repo
stringlengths 1
152
⌀ | file
stringlengths 15
205
| code
stringlengths 0
41.6M
| file_length
int64 0
41.6M
| avg_line_length
float64 0
1.81M
| max_line_length
int64 0
12.7M
| extension_type
stringclasses 90
values |
---|---|---|---|---|---|---|
null | ceph-main/src/osd/PGStateUtils.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "PGStateUtils.h"
#include "common/Clock.h"
using ceph::Formatter;
/*------NamedState----*/
NamedState::NamedState(PGStateHistory *pgsh, const char *state_name)
: pgsh(pgsh), state_name(state_name), enter_time(ceph_clock_now()) {
if(pgsh) {
pgsh->enter(enter_time, state_name);
}
}
NamedState::~NamedState() {
if(pgsh) {
pgsh->exit(state_name);
}
}
/*---------PGStateHistory---------*/
void PGStateHistory::enter(const utime_t entime, const char* state)
{
if (pi == nullptr) {
pi = std::make_unique<PGStateInstance>();
}
pi->enter_state(entime, state);
}
void PGStateHistory::exit(const char* state) {
pi->setepoch(es.get_osdmap_epoch());
pi->exit_state(ceph_clock_now());
if (pi->empty()) {
reset();
}
}
void PGStateHistory::dump(Formatter* f) const {
f->open_array_section("history");
for (auto pi = buffer.begin(); pi != buffer.end(); ++pi) {
f->open_object_section("epochs");
f->dump_stream("epoch") << (*pi)->this_epoch;
f->open_array_section("states");
for (auto she : (*pi)->state_history) {
f->open_object_section("state");
f->dump_string("state", std::get<2>(she));
f->dump_stream("enter") << std::get<0>(she);
f->dump_stream("exit") << std::get<1>(she);
f->close_section();
}
f->close_section();
f->close_section();
}
f->close_section();
}
| 1,469 | 24.344828 | 70 | cc |
null | ceph-main/src/osd/PGStateUtils.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include "include/utime.h"
#include "common/Formatter.h"
#include <stack>
#include <vector>
#include <boost/circular_buffer.hpp>
class PGStateHistory;
struct EpochSource {
virtual epoch_t get_osdmap_epoch() const = 0;
virtual ~EpochSource() {}
};
struct NamedState {
PGStateHistory *pgsh;
const char *state_name;
utime_t enter_time;
const char *get_state_name() { return state_name; }
NamedState(
PGStateHistory *pgsh,
const char *state_name_);
virtual ~NamedState();
};
using state_history_entry = std::tuple<utime_t, utime_t, const char*>;
using embedded_state = std::pair<utime_t, const char*>;
struct PGStateInstance {
// Time spent in pg states
void setepoch(const epoch_t current_epoch) {
this_epoch = current_epoch;
}
void enter_state(const utime_t entime, const char* state) {
embedded_states.push(std::make_pair(entime, state));
}
void exit_state(const utime_t extime) {
embedded_state this_state = embedded_states.top();
state_history.push_back(state_history_entry{
this_state.first, extime, this_state.second});
embedded_states.pop();
}
bool empty() const {
return embedded_states.empty();
}
epoch_t this_epoch;
std::vector<state_history_entry> state_history;
std::stack<embedded_state> embedded_states;
};
class PGStateHistory {
public:
PGStateHistory(const EpochSource &es) : buffer(10), es(es) {}
void enter(const utime_t entime, const char* state);
void exit(const char* state);
void reset() {
buffer.push_back(std::move(pi));
pi = nullptr;
}
void dump(ceph::Formatter* f) const;
const char *get_current_state() const {
if (pi == nullptr) return "unknown";
return std::get<1>(pi->embedded_states.top());
}
private:
std::unique_ptr<PGStateInstance> pi;
boost::circular_buffer<std::unique_ptr<PGStateInstance>> buffer;
const EpochSource &es;
};
| 2,007 | 22.348837 | 70 | h |
null | ceph-main/src/osd/PGTransaction.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2016 Red Hat
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef PGTRANSACTION_H
#define PGTRANSACTION_H
#include <map>
#include <memory>
#include <optional>
#include "common/hobject.h"
#include "osd/osd_types.h"
#include "osd/osd_internal_types.h"
#include "common/interval_map.h"
#include "common/inline_variant.h"
/**
* This class represents transactions which can be submitted to
* a PGBackend. For expediency, there are some constraints on
* the operations submitted:
* 1) Rename sources may only be referenced prior to the rename
* operation to the destination.
* 2) The graph formed by edges of source->destination for clones
* (Create) and Renames must be acyclic.
* 3) clone_range sources must not be modified by the same
* transaction
*/
class PGTransaction {
public:
std::map<hobject_t, ObjectContextRef> obc_map;
class ObjectOperation {
public:
struct Init
{
struct None {};
struct Create {};
struct Clone {
hobject_t source;
};
struct Rename {
hobject_t source; // must be temp object
};
};
using InitType = boost::variant<
Init::None,
Init::Create,
Init::Clone,
Init::Rename>;
InitType init_type = Init::None();
bool delete_first = false;
/**
* is_none() && is_delete() indicates that we are deleting an
* object which already exists and not recreating it. delete_first means
* that the transaction logically removes the object.
* There are really 4 cases:
* 1) We are modifying an existing object (is_none() &&
* !is_delete())
* a) If it's an append, we just write into the log entry the old size
* b) If it's an actual overwrite, we save the old versions of the
* extents being overwritten and write those offsets into the log
* entry
* 2) We are removing and then recreating an object (!is_none() && is_delete())
* -- stash
* 3) We are removing an object (is_none() && is_delete()) -- stash
* 4) We are creating an object (!is_none() && !is_delete()) -- create (no
* stash)
*
* Create, Clone, Rename are the three ways we can recreate it.
* ECBackend transaction planning needs this context
* to figure out how to perform the transaction.
*/
bool deletes_first() const {
return delete_first;
}
bool is_delete() const {
return boost::get<Init::None>(&init_type) != nullptr && delete_first;
}
bool is_none() const {
return boost::get<Init::None>(&init_type) != nullptr && !delete_first;
}
bool is_fresh_object() const {
return boost::get<Init::None>(&init_type) == nullptr;
}
bool is_rename() const {
return boost::get<Init::Rename>(&init_type) != nullptr;
}
bool has_source(hobject_t *source = nullptr) const {
return match(
init_type,
[&](const Init::Clone &op) -> bool {
if (source)
*source = op.source;
return true;
},
[&](const Init::Rename &op) -> bool {
if (source)
*source = op.source;
return true;
},
[&](const Init::None &) -> bool { return false; },
[&](const Init::Create &) -> bool { return false; });
}
bool clear_omap = false;
/**
* truncate
* <lowest, last> ?
*
* truncate is represented as a pair because in the event of
* multiple truncates within a single transaction we need to
* remember the lowest truncate and the final object size
* (the last truncate). We also adjust the buffers map
* to account for truncates overriding previous writes */
std::optional<std::pair<uint64_t, uint64_t> > truncate = std::nullopt;
std::map<std::string, std::optional<ceph::buffer::list> > attr_updates;
enum class OmapUpdateType {Remove, Insert, RemoveRange};
std::vector<std::pair<OmapUpdateType, ceph::buffer::list> > omap_updates;
std::optional<ceph::buffer::list> omap_header;
/// (old, new) -- only valid with no truncate or buffer updates
std::optional<std::pair<std::set<snapid_t>, std::set<snapid_t>>> updated_snaps;
struct alloc_hint_t {
uint64_t expected_object_size;
uint64_t expected_write_size;
uint32_t flags;
};
std::optional<alloc_hint_t> alloc_hint;
struct BufferUpdate {
struct Write {
ceph::buffer::list buffer;
uint32_t fadvise_flags;
};
struct Zero {
uint64_t len;
};
struct CloneRange {
hobject_t from;
uint64_t offset;
uint64_t len;
};
};
using BufferUpdateType = boost::variant<
BufferUpdate::Write,
BufferUpdate::Zero,
BufferUpdate::CloneRange>;
private:
struct SplitMerger {
BufferUpdateType split(
uint64_t offset,
uint64_t len,
const BufferUpdateType &bu) const {
return match(
bu,
[&](const BufferUpdate::Write &w) -> BufferUpdateType {
ceph::buffer::list bl;
bl.substr_of(w.buffer, offset, len);
return BufferUpdate::Write{bl, w.fadvise_flags};
},
[&](const BufferUpdate::Zero &) -> BufferUpdateType {
return BufferUpdate::Zero{len};
},
[&](const BufferUpdate::CloneRange &c) -> BufferUpdateType {
return BufferUpdate::CloneRange{c.from, c.offset + offset, len};
});
}
uint64_t length(
const BufferUpdateType &left) const {
return match(
left,
[&](const BufferUpdate::Write &w) -> uint64_t {
return w.buffer.length();
},
[&](const BufferUpdate::Zero &z) -> uint64_t {
return z.len;
},
[&](const BufferUpdate::CloneRange &c) -> uint64_t {
return c.len;
});
}
bool can_merge(
const BufferUpdateType &left,
const BufferUpdateType &right) const {
return match(
left,
[&](const BufferUpdate::Write &w) -> bool {
auto r = boost::get<BufferUpdate::Write>(&right);
return r != nullptr && (w.fadvise_flags == r->fadvise_flags);
},
[&](const BufferUpdate::Zero &) -> bool {
auto r = boost::get<BufferUpdate::Zero>(&right);
return r != nullptr;
},
[&](const BufferUpdate::CloneRange &c) -> bool {
return false;
});
}
BufferUpdateType merge(
BufferUpdateType &&left,
BufferUpdateType &&right) const {
return match(
left,
[&](const BufferUpdate::Write &w) -> BufferUpdateType {
auto r = boost::get<BufferUpdate::Write>(&right);
ceph_assert(r && w.fadvise_flags == r->fadvise_flags);
ceph::buffer::list bl = w.buffer;
bl.append(r->buffer);
return BufferUpdate::Write{bl, w.fadvise_flags};
},
[&](const BufferUpdate::Zero &z) -> BufferUpdateType {
auto r = boost::get<BufferUpdate::Zero>(&right);
ceph_assert(r);
return BufferUpdate::Zero{z.len + r->len};
},
[&](const BufferUpdate::CloneRange &c) -> BufferUpdateType {
ceph_abort_msg("violates can_merge condition");
return left;
});
}
};
public:
using buffer_update_type = interval_map<
uint64_t, BufferUpdateType, SplitMerger>;
buffer_update_type buffer_updates;
friend class PGTransaction;
};
std::map<hobject_t, ObjectOperation> op_map;
private:
ObjectOperation &get_object_op_for_modify(const hobject_t &hoid) {
auto &op = op_map[hoid];
ceph_assert(!op.is_delete());
return op;
}
ObjectOperation &get_object_op(const hobject_t &hoid) {
return op_map[hoid];
}
public:
void add_obc(
ObjectContextRef obc) {
ceph_assert(obc);
obc_map[obc->obs.oi.soid] = obc;
}
/// Sets up state for new object
void create(
const hobject_t &hoid
) {
auto &op = op_map[hoid];
ceph_assert(op.is_none() || op.is_delete());
op.init_type = ObjectOperation::Init::Create();
}
/// Sets up state for target cloned from source
void clone(
const hobject_t &target, ///< [in] obj to clone to
const hobject_t &source ///< [in] obj to clone from
) {
auto &op = op_map[target];
ceph_assert(op.is_none() || op.is_delete());
op.init_type = ObjectOperation::Init::Clone{source};
}
/// Sets up state for target renamed from source
void rename(
const hobject_t &target, ///< [in] to, must not exist, be non-temp
const hobject_t &source ///< [in] source (must be a temp object)
) {
ceph_assert(source.is_temp());
ceph_assert(!target.is_temp());
auto &op = op_map[target];
ceph_assert(op.is_none() || op.is_delete());
bool del_first = op.is_delete();
auto iter = op_map.find(source);
if (iter != op_map.end()) {
op = iter->second;
op_map.erase(iter);
op.delete_first = del_first;
}
op.init_type = ObjectOperation::Init::Rename{source};
}
/// Remove -- must not be called on rename target
void remove(
const hobject_t &hoid ///< [in] obj to remove
) {
auto &op = get_object_op_for_modify(hoid);
if (!op.is_fresh_object()) {
ceph_assert(!op.updated_snaps);
op = ObjectOperation();
op.delete_first = true;
} else {
ceph_assert(!op.is_rename());
op_map.erase(hoid); // make it a noop if it's a fresh object
}
}
void update_snaps(
const hobject_t &hoid, ///< [in] object for snaps
const std::set<snapid_t> &old_snaps,///< [in] old snaps value
const std::set<snapid_t> &new_snaps ///< [in] new snaps value
) {
auto &op = get_object_op(hoid);
ceph_assert(!op.updated_snaps);
ceph_assert(op.buffer_updates.empty());
ceph_assert(!op.truncate);
op.updated_snaps = make_pair(
old_snaps,
new_snaps);
}
/// Clears, truncates
void omap_clear(
const hobject_t &hoid ///< [in] object to clear omap
) {
auto &op = get_object_op_for_modify(hoid);
op.clear_omap = true;
op.omap_updates.clear();
op.omap_header = std::nullopt;
}
void truncate(
const hobject_t &hoid, ///< [in] object
uint64_t off ///< [in] offset to truncate to
) {
auto &op = get_object_op_for_modify(hoid);
ceph_assert(!op.updated_snaps);
op.buffer_updates.erase(
off,
std::numeric_limits<uint64_t>::max() - off);
if (!op.truncate || off < op.truncate->first) {
op.truncate = std::pair<uint64_t, uint64_t>(off, off);
} else {
op.truncate->second = off;
}
}
/// Attr ops
void setattrs(
const hobject_t &hoid, ///< [in] object to write
std::map<std::string, ceph::buffer::list, std::less<>> &attrs ///< [in] attrs, may be cleared
) {
auto &op = get_object_op_for_modify(hoid);
for (auto &[key, val]: attrs) {
auto& d = op.attr_updates[key];
d = val;
d->rebuild();
}
}
void setattr(
const hobject_t &hoid, ///< [in] object to write
const std::string &attrname, ///< [in] attr to write
ceph::buffer::list &bl ///< [in] val to write, may be claimed
) {
auto &op = get_object_op_for_modify(hoid);
auto& d = op.attr_updates[attrname];
d = bl;
d->rebuild();
}
void rmattr(
const hobject_t &hoid, ///< [in] object to write
const std::string &attrname ///< [in] attr to remove
) {
auto &op = get_object_op_for_modify(hoid);
op.attr_updates[attrname] = std::nullopt;
}
/// set alloc hint
void set_alloc_hint(
const hobject_t &hoid, ///< [in] object (must exist)
uint64_t expected_object_size, ///< [in]
uint64_t expected_write_size,
uint32_t flags
) {
auto &op = get_object_op_for_modify(hoid);
op.alloc_hint = ObjectOperation::alloc_hint_t{
expected_object_size, expected_write_size, flags};
}
/// Buffer updates
void write(
const hobject_t &hoid, ///< [in] object to write
uint64_t off, ///< [in] off at which to write
uint64_t len, ///< [in] len to write from bl
ceph::buffer::list &bl, ///< [in] bl to write will be claimed to len
uint32_t fadvise_flags = 0 ///< [in] fadvise hint
) {
auto &op = get_object_op_for_modify(hoid);
ceph_assert(!op.updated_snaps);
ceph_assert(len > 0);
ceph_assert(len == bl.length());
op.buffer_updates.insert(
off,
len,
ObjectOperation::BufferUpdate::Write{bl, fadvise_flags});
}
void clone_range(
const hobject_t &from, ///< [in] from
const hobject_t &to, ///< [in] to
uint64_t fromoff, ///< [in] offset
uint64_t len, ///< [in] len
uint64_t tooff ///< [in] offset
) {
auto &op = get_object_op_for_modify(to);
ceph_assert(!op.updated_snaps);
op.buffer_updates.insert(
tooff,
len,
ObjectOperation::BufferUpdate::CloneRange{from, fromoff, len});
}
void zero(
const hobject_t &hoid, ///< [in] object
uint64_t off, ///< [in] offset to start zeroing at
uint64_t len ///< [in] amount to zero
) {
auto &op = get_object_op_for_modify(hoid);
ceph_assert(!op.updated_snaps);
op.buffer_updates.insert(
off,
len,
ObjectOperation::BufferUpdate::Zero{len});
}
/// Omap updates
void omap_setkeys(
const hobject_t &hoid, ///< [in] object to write
ceph::buffer::list &keys_bl ///< [in] encoded map<string, ceph::buffer::list>
) {
auto &op = get_object_op_for_modify(hoid);
op.omap_updates.emplace_back(
std::make_pair(
ObjectOperation::OmapUpdateType::Insert,
keys_bl));
}
void omap_setkeys(
const hobject_t &hoid, ///< [in] object to write
std::map<std::string, ceph::buffer::list> &keys ///< [in] omap keys, may be cleared
) {
using ceph::encode;
ceph::buffer::list bl;
encode(keys, bl);
omap_setkeys(hoid, bl);
}
void omap_rmkeys(
const hobject_t &hoid, ///< [in] object to write
ceph::buffer::list &keys_bl ///< [in] encode set<string>
) {
auto &op = get_object_op_for_modify(hoid);
op.omap_updates.emplace_back(
std::make_pair(
ObjectOperation::OmapUpdateType::Remove,
keys_bl));
}
void omap_rmkeys(
const hobject_t &hoid, ///< [in] object to write
std::set<std::string> &keys ///< [in] omap keys, may be cleared
) {
using ceph::encode;
ceph::buffer::list bl;
encode(keys, bl);
omap_rmkeys(hoid, bl);
}
void omap_rmkeyrange(
const hobject_t &hoid, ///< [in] object to write
ceph::buffer::list &range_bl ///< [in] encode string[2]
) {
auto &op = get_object_op_for_modify(hoid);
op.omap_updates.emplace_back(
std::make_pair(
ObjectOperation::OmapUpdateType::RemoveRange,
range_bl));
}
void omap_rmkeyrange(
const hobject_t &hoid, ///< [in] object to write
std::string& key_begin, ///< [in] first key in range
std::string& key_end ///< [in] first key past range, range is [first,last)
) {
ceph::buffer::list bl;
::encode(key_begin, bl);
::encode(key_end, bl);
omap_rmkeyrange(hoid, bl);
}
void omap_setheader(
const hobject_t &hoid, ///< [in] object to write
ceph::buffer::list &header ///< [in] header
) {
auto &op = get_object_op_for_modify(hoid);
op.omap_header = header;
}
bool empty() const {
return op_map.empty();
}
uint64_t get_bytes_written() const {
uint64_t ret = 0;
for (auto &&i: op_map) {
for (auto &&j: i.second.buffer_updates) {
ret += j.get_len();
}
}
return ret;
}
void nop(
const hobject_t &hoid ///< [in] obj to which we are doing nothing
) {
get_object_op_for_modify(hoid);
}
/* Calls t() on all pair<hobject_t, ObjectOperation> & such that clone/rename
* sinks are always called before clone sources
*
* TODO: add a fast path for the single object case and possibly the single
* object clone from source case (make_writeable made a clone).
*
* This structure only requires that the source->sink graph be acyclic.
* This is much more general than is actually required by PrimaryLogPG.
* Only 4 flavors of multi-object transactions actually happen:
* 1) rename temp -> object for copyfrom
* 2) clone head -> clone, modify head for make_writeable on normal head write
* 3) clone clone -> head for rollback
* 4) 2 + 3
*
* We can bypass the below logic for single object transactions trivially
* (including case 1 above since temp doesn't show up again).
* For 2-3, we could add something ad-hoc to ensure that they happen in the
* right order, but it actually seems easier to just do the graph construction.
*/
template <typename T>
void safe_create_traverse(T &&t) {
std::map<hobject_t, std::list<hobject_t>> dgraph;
std::list<hobject_t> stack;
// Populate stack with roots, dgraph with edges
for (auto &&opair: op_map) {
hobject_t source;
if (opair.second.has_source(&source)) {
auto &l = dgraph[source];
if (l.empty() && !op_map.count(source)) {
/* Source oids not in op_map need to be added as roots
* (but only once!) */
stack.push_back(source);
}
l.push_back(opair.first);
} else {
stack.push_back(opair.first);
}
}
/* Why don't we need to worry about accessing the same node
* twice? dgraph nodes always have in-degree at most 1 because
* the inverse graph nodes (source->dest) can have out-degree
* at most 1 (only one possible source). We do a post-order
* depth-first traversal here to ensure we call f on children
* before parents.
*/
while (!stack.empty()) {
hobject_t &cur = stack.front();
auto diter = dgraph.find(cur);
if (diter == dgraph.end()) {
/* Leaf: pop and call t() */
auto opiter = op_map.find(cur);
if (opiter != op_map.end())
t(*opiter);
stack.pop_front();
} else {
/* Internal node: push children onto stack, remove edge,
* recurse. When this node is encountered again, it'll
* be a leaf */
ceph_assert(!diter->second.empty());
stack.splice(stack.begin(), diter->second);
dgraph.erase(diter);
}
}
}
};
using PGTransactionUPtr = std::unique_ptr<PGTransaction>;
#endif
| 18,554 | 29.822259 | 97 | h |
null | ceph-main/src/osd/PeeringState.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "PGPeeringEvent.h"
#include "common/ceph_releases.h"
#include "common/dout.h"
#include "PeeringState.h"
#include "messages/MOSDPGRemove.h"
#include "messages/MBackfillReserve.h"
#include "messages/MRecoveryReserve.h"
#include "messages/MOSDScrubReserve.h"
#include "messages/MOSDPGInfo2.h"
#include "messages/MOSDPGTrim.h"
#include "messages/MOSDPGLog.h"
#include "messages/MOSDPGNotify2.h"
#include "messages/MOSDPGQuery2.h"
#include "messages/MOSDPGLease.h"
#include "messages/MOSDPGLeaseAck.h"
#define dout_context cct
#define dout_subsys ceph_subsys_osd
using std::dec;
using std::hex;
using std::make_pair;
using std::map;
using std::ostream;
using std::pair;
using std::set;
using std::string;
using std::stringstream;
using std::vector;
using ceph::Formatter;
using ceph::make_message;
BufferedRecoveryMessages::BufferedRecoveryMessages(PeeringCtx &ctx)
// steal messages from ctx
: message_map{std::move(ctx.message_map)}
{}
void BufferedRecoveryMessages::send_notify(int to, const pg_notify_t &n)
{
spg_t pgid(n.info.pgid.pgid, n.to);
send_osd_message(to, TOPNSPC::make_message<MOSDPGNotify2>(pgid, n));
}
void BufferedRecoveryMessages::send_query(
int to,
spg_t to_spgid,
const pg_query_t &q)
{
send_osd_message(to, TOPNSPC::make_message<MOSDPGQuery2>(to_spgid, q));
}
void BufferedRecoveryMessages::send_info(
int to,
spg_t to_spgid,
epoch_t min_epoch,
epoch_t cur_epoch,
const pg_info_t &info,
std::optional<pg_lease_t> lease,
std::optional<pg_lease_ack_t> lease_ack)
{
send_osd_message(
to,
TOPNSPC::make_message<MOSDPGInfo2>(
to_spgid,
info,
cur_epoch,
min_epoch,
lease,
lease_ack)
);
}
void PGPool::update(OSDMapRef map)
{
const pg_pool_t *pi = map->get_pg_pool(id);
if (!pi) {
return; // pool has been deleted
}
info = *pi;
name = map->get_pool_name(id);
bool updated = false;
if ((map->get_epoch() != cached_epoch + 1) ||
(pi->get_snap_epoch() == map->get_epoch())) {
updated = true;
}
if (info.is_pool_snaps_mode() && updated) {
snapc = pi->get_snap_context();
}
cached_epoch = map->get_epoch();
}
/*-------------Peering State Helpers----------------*/
#undef dout_prefix
#define dout_prefix (dpp->gen_prefix(*_dout)) \
<< "PeeringState::" << __func__ << " "
#undef psdout
#define psdout(x) ldout(cct, x)
PeeringState::PeeringState(
CephContext *cct,
pg_shard_t pg_whoami,
spg_t spgid,
const PGPool &_pool,
OSDMapRef curmap,
DoutPrefixProvider *dpp,
PeeringListener *pl)
: state_history(*pl),
cct(cct),
spgid(spgid),
dpp(dpp),
pl(pl),
orig_ctx(0),
osdmap_ref(curmap),
pool(_pool),
pg_whoami(pg_whoami),
info(spgid),
pg_log(cct),
last_require_osd_release(curmap->require_osd_release),
missing_loc(spgid, this, dpp, cct),
machine(this, cct, spgid, dpp, pl, &state_history)
{
machine.initiate();
}
void PeeringState::start_handle(PeeringCtx *new_ctx) {
ceph_assert(!rctx);
ceph_assert(!orig_ctx);
orig_ctx = new_ctx;
if (new_ctx) {
if (messages_pending_flush) {
rctx.emplace(*messages_pending_flush, *new_ctx);
} else {
rctx.emplace(*new_ctx);
}
rctx->start_time = ceph_clock_now();
}
}
void PeeringState::begin_block_outgoing() {
ceph_assert(!messages_pending_flush);
ceph_assert(orig_ctx);
ceph_assert(rctx);
messages_pending_flush.emplace();
rctx.emplace(*messages_pending_flush, *orig_ctx);
}
void PeeringState::clear_blocked_outgoing() {
ceph_assert(orig_ctx);
ceph_assert(rctx);
messages_pending_flush = std::optional<BufferedRecoveryMessages>();
}
void PeeringState::end_block_outgoing() {
ceph_assert(messages_pending_flush);
ceph_assert(orig_ctx);
ceph_assert(rctx);
orig_ctx->accept_buffered_messages(*messages_pending_flush);
rctx.emplace(*orig_ctx);
messages_pending_flush = std::optional<BufferedRecoveryMessages>();
}
void PeeringState::end_handle() {
if (rctx) {
utime_t dur = ceph_clock_now() - rctx->start_time;
machine.event_time += dur;
}
machine.event_count++;
rctx = std::nullopt;
orig_ctx = NULL;
}
void PeeringState::check_recovery_sources(const OSDMapRef& osdmap)
{
/*
* check that any peers we are planning to (or currently) pulling
* objects from are dealt with.
*/
missing_loc.check_recovery_sources(osdmap);
pl->check_recovery_sources(osdmap);
for (auto i = peer_log_requested.begin(); i != peer_log_requested.end();) {
if (!osdmap->is_up(i->osd)) {
psdout(10) << "peer_log_requested removing " << *i << dendl;
peer_log_requested.erase(i++);
} else {
++i;
}
}
for (auto i = peer_missing_requested.begin();
i != peer_missing_requested.end();) {
if (!osdmap->is_up(i->osd)) {
psdout(10) << "peer_missing_requested removing " << *i << dendl;
peer_missing_requested.erase(i++);
} else {
++i;
}
}
}
void PeeringState::update_history(const pg_history_t& new_history)
{
auto mnow = pl->get_mnow();
info.history.refresh_prior_readable_until_ub(mnow, prior_readable_until_ub);
if (info.history.merge(new_history)) {
psdout(20) << "advanced history from " << new_history << dendl;
dirty_info = true;
if (info.history.last_epoch_clean >= info.history.same_interval_since) {
psdout(20) << "clearing past_intervals" << dendl;
past_intervals.clear();
dirty_big_info = true;
}
prior_readable_until_ub = info.history.get_prior_readable_until_ub(mnow);
if (prior_readable_until_ub != ceph::signedspan::zero()) {
dout(20) << "prior_readable_until_ub " << prior_readable_until_ub
<< " (mnow " << mnow << " + "
<< info.history.prior_readable_until_ub << ")" << dendl;
}
}
}
hobject_t PeeringState::earliest_backfill() const
{
hobject_t e = hobject_t::get_max();
for (const pg_shard_t& bt : get_backfill_targets()) {
const pg_info_t &pi = get_peer_info(bt);
e = std::min(pi.last_backfill, e);
}
return e;
}
void PeeringState::purge_strays()
{
if (is_premerge()) {
psdout(10) << "purge_strays " << stray_set << " but premerge, doing nothing"
<< dendl;
return;
}
if (cct->_conf.get_val<bool>("osd_debug_no_purge_strays")) {
return;
}
psdout(10) << "purge_strays " << stray_set << dendl;
bool removed = false;
for (auto p = stray_set.begin(); p != stray_set.end(); ++p) {
ceph_assert(!is_acting_recovery_backfill(*p));
if (get_osdmap()->is_up(p->osd)) {
psdout(10) << "sending PGRemove to osd." << *p << dendl;
vector<spg_t> to_remove;
to_remove.push_back(spg_t(info.pgid.pgid, p->shard));
auto m = TOPNSPC::make_message<MOSDPGRemove>(
get_osdmap_epoch(),
to_remove);
pl->send_cluster_message(p->osd, std::move(m), get_osdmap_epoch());
} else {
psdout(10) << "not sending PGRemove to down osd." << *p << dendl;
}
peer_missing.erase(*p);
peer_info.erase(*p);
missing_loc.remove_stray_recovery_sources(*p);
peer_purged.insert(*p);
removed = true;
}
// if we removed anyone, update peers (which include peer_info)
if (removed)
update_heartbeat_peers();
stray_set.clear();
// clear _requested maps; we may have to peer() again if we discover
// (more) stray content
peer_log_requested.clear();
peer_missing_requested.clear();
}
void PeeringState::query_unfound(Formatter *f, string state)
{
psdout(20) << "Enter PeeringState common QueryUnfound" << dendl;
{
f->dump_string("state", state);
f->dump_bool("available_might_have_unfound", true);
f->open_array_section("might_have_unfound");
for (auto p = might_have_unfound.begin();
p != might_have_unfound.end();
++p) {
if (peer_missing.count(*p)) {
; // Ignore already probed OSDs
} else {
f->open_object_section("osd");
f->dump_stream("osd") << *p;
if (peer_missing_requested.count(*p)) {
f->dump_string("status", "querying");
} else if (!get_osdmap()->is_up(p->osd)) {
f->dump_string("status", "osd is down");
} else {
f->dump_string("status", "not queried");
}
f->close_section();
}
}
f->close_section();
}
psdout(20) << "Exit PeeringState common QueryUnfound" << dendl;
return;
}
bool PeeringState::proc_replica_info(
pg_shard_t from, const pg_info_t &oinfo, epoch_t send_epoch)
{
auto p = peer_info.find(from);
if (p != peer_info.end() && p->second.last_update == oinfo.last_update) {
psdout(10) << " got dup osd." << from << " info "
<< oinfo << ", identical to ours" << dendl;
return false;
}
if (!get_osdmap()->has_been_up_since(from.osd, send_epoch)) {
psdout(10) << " got info " << oinfo << " from down osd." << from
<< " discarding" << dendl;
return false;
}
psdout(10) << " got osd." << from << " " << oinfo << dendl;
ceph_assert(is_primary());
peer_info[from] = oinfo;
might_have_unfound.insert(from);
update_history(oinfo.history);
// stray?
if (!is_up(from) && !is_acting(from)) {
psdout(10) << " osd." << from << " has stray content: " << oinfo << dendl;
stray_set.insert(from);
if (is_clean()) {
purge_strays();
}
}
// was this a new info? if so, update peers!
if (p == peer_info.end())
update_heartbeat_peers();
return true;
}
void PeeringState::remove_down_peer_info(const OSDMapRef &osdmap)
{
// Remove any downed osds from peer_info
bool removed = false;
auto p = peer_info.begin();
while (p != peer_info.end()) {
if (!osdmap->is_up(p->first.osd)) {
psdout(10) << " dropping down osd." << p->first << " info " << p->second << dendl;
peer_missing.erase(p->first);
peer_log_requested.erase(p->first);
peer_missing_requested.erase(p->first);
peer_info.erase(p++);
removed = true;
} else
++p;
}
// Remove any downed osds from peer_purged so we can re-purge if necessary
auto it = peer_purged.begin();
while (it != peer_purged.end()) {
if (!osdmap->is_up(it->osd)) {
psdout(10) << " dropping down osd." << *it << " from peer_purged" << dendl;
peer_purged.erase(it++);
} else {
++it;
}
}
// if we removed anyone, update peers (which include peer_info)
if (removed)
update_heartbeat_peers();
check_recovery_sources(osdmap);
}
void PeeringState::update_heartbeat_peers()
{
if (!is_primary())
return;
set<int> new_peers;
for (unsigned i=0; i<acting.size(); i++) {
if (acting[i] != CRUSH_ITEM_NONE)
new_peers.insert(acting[i]);
}
for (unsigned i=0; i<up.size(); i++) {
if (up[i] != CRUSH_ITEM_NONE)
new_peers.insert(up[i]);
}
for (auto p = peer_info.begin(); p != peer_info.end(); ++p) {
new_peers.insert(p->first.osd);
}
pl->update_heartbeat_peers(std::move(new_peers));
}
void PeeringState::write_if_dirty(ObjectStore::Transaction& t)
{
pl->prepare_write(
info,
last_written_info,
past_intervals,
pg_log,
dirty_info,
dirty_big_info,
last_persisted_osdmap < get_osdmap_epoch(),
t);
if (dirty_info || dirty_big_info) {
last_persisted_osdmap = get_osdmap_epoch();
last_written_info = info;
dirty_info = false;
dirty_big_info = false;
}
}
void PeeringState::advance_map(
OSDMapRef osdmap, OSDMapRef lastmap,
vector<int>& newup, int up_primary,
vector<int>& newacting, int acting_primary,
PeeringCtx &rctx)
{
ceph_assert(lastmap == osdmap_ref);
psdout(10) << "handle_advance_map "
<< newup << "/" << newacting
<< " -- " << up_primary << "/" << acting_primary
<< dendl;
update_osdmap_ref(osdmap);
pool.update(osdmap);
AdvMap evt(
osdmap, lastmap, newup, up_primary,
newacting, acting_primary);
handle_event(evt, &rctx);
if (pool.info.last_change == osdmap_ref->get_epoch()) {
pl->on_pool_change();
}
readable_interval = pool.get_readable_interval(cct->_conf);
last_require_osd_release = osdmap->require_osd_release;
}
void PeeringState::activate_map(PeeringCtx &rctx)
{
psdout(10) << dendl;
ActMap evt;
handle_event(evt, &rctx);
if (osdmap_ref->get_epoch() - last_persisted_osdmap >
cct->_conf->osd_pg_epoch_persisted_max_stale) {
psdout(20) << ": Dirtying info: last_persisted is "
<< last_persisted_osdmap
<< " while current is " << osdmap_ref->get_epoch() << dendl;
dirty_info = true;
} else {
psdout(20) << ": Not dirtying info: last_persisted is "
<< last_persisted_osdmap
<< " while current is " << osdmap_ref->get_epoch() << dendl;
}
write_if_dirty(rctx.transaction);
if (get_osdmap()->check_new_blocklist_entries()) {
pl->check_blocklisted_watchers();
}
}
void PeeringState::set_last_peering_reset()
{
psdout(20) << "set_last_peering_reset " << get_osdmap_epoch() << dendl;
if (last_peering_reset != get_osdmap_epoch()) {
last_peering_reset = get_osdmap_epoch();
psdout(10) << "Clearing blocked outgoing recovery messages" << dendl;
clear_blocked_outgoing();
if (!pl->try_flush_or_schedule_async()) {
psdout(10) << "Beginning to block outgoing recovery messages" << dendl;
begin_block_outgoing();
} else {
psdout(10) << "Not blocking outgoing recovery messages" << dendl;
}
}
}
void PeeringState::complete_flush()
{
flushes_in_progress--;
if (flushes_in_progress == 0) {
pl->on_flushed();
}
}
void PeeringState::check_full_transition(OSDMapRef lastmap, OSDMapRef osdmap)
{
const pg_pool_t *pi = osdmap->get_pg_pool(info.pgid.pool());
if (!pi) {
return; // pool deleted
}
bool changed = false;
if (pi->has_flag(pg_pool_t::FLAG_FULL)) {
const pg_pool_t *opi = lastmap->get_pg_pool(info.pgid.pool());
if (!opi || !opi->has_flag(pg_pool_t::FLAG_FULL)) {
psdout(10) << " pool was marked full in " << osdmap->get_epoch() << dendl;
changed = true;
}
}
if (changed) {
info.history.last_epoch_marked_full = osdmap->get_epoch();
dirty_info = true;
}
}
bool PeeringState::should_restart_peering(
int newupprimary,
int newactingprimary,
const vector<int>& newup,
const vector<int>& newacting,
OSDMapRef lastmap,
OSDMapRef osdmap)
{
if (PastIntervals::is_new_interval(
primary.osd,
newactingprimary,
acting,
newacting,
up_primary.osd,
newupprimary,
up,
newup,
osdmap.get(),
lastmap.get(),
info.pgid.pgid)) {
psdout(20) << "new interval newup " << newup
<< " newacting " << newacting << dendl;
return true;
}
if (!lastmap->is_up(pg_whoami.osd) && osdmap->is_up(pg_whoami.osd)) {
psdout(10) << "osd transitioned from down -> up"
<< dendl;
return true;
}
return false;
}
/* Called before initializing peering during advance_map */
void PeeringState::start_peering_interval(
const OSDMapRef lastmap,
const vector<int>& newup, int new_up_primary,
const vector<int>& newacting, int new_acting_primary,
ObjectStore::Transaction &t)
{
const OSDMapRef osdmap = get_osdmap();
set_last_peering_reset();
vector<int> oldacting, oldup;
int oldrole = get_role();
if (is_primary()) {
pl->clear_ready_to_merge();
}
pg_shard_t old_acting_primary = get_primary();
pg_shard_t old_up_primary = up_primary;
bool was_old_primary = is_primary();
bool was_old_nonprimary = is_nonprimary();
acting.swap(oldacting);
up.swap(oldup);
init_primary_up_acting(
newup,
newacting,
new_up_primary,
new_acting_primary);
if (info.stats.up != up ||
info.stats.acting != acting ||
info.stats.up_primary != new_up_primary ||
info.stats.acting_primary != new_acting_primary) {
info.stats.up = up;
info.stats.up_primary = new_up_primary;
info.stats.acting = acting;
info.stats.acting_primary = new_acting_primary;
info.stats.mapping_epoch = osdmap->get_epoch();
}
pl->clear_publish_stats();
// This will now be remapped during a backfill in cases
// that it would not have been before.
if (up != acting)
state_set(PG_STATE_REMAPPED);
else
state_clear(PG_STATE_REMAPPED);
int role = osdmap->calc_pg_role(pg_whoami, acting);
set_role(role);
// did acting, up, primary|acker change?
if (!lastmap) {
psdout(10) << " no lastmap" << dendl;
dirty_info = true;
dirty_big_info = true;
info.history.same_interval_since = osdmap->get_epoch();
} else {
std::stringstream debug;
ceph_assert(info.history.same_interval_since != 0);
bool new_interval = PastIntervals::check_new_interval(
old_acting_primary.osd,
new_acting_primary,
oldacting, newacting,
old_up_primary.osd,
new_up_primary,
oldup, newup,
info.history.same_interval_since,
info.history.last_epoch_clean,
osdmap.get(),
lastmap.get(),
info.pgid.pgid,
missing_loc.get_recoverable_predicate(),
&past_intervals,
&debug);
psdout(10) << ": check_new_interval output: "
<< debug.str() << dendl;
if (new_interval) {
if (osdmap->get_epoch() == pl->cluster_osdmap_trim_lower_bound() &&
info.history.last_epoch_clean < osdmap->get_epoch()) {
psdout(10) << " map gap, clearing past_intervals and faking" << dendl;
// our information is incomplete and useless; someone else was clean
// after everything we know if osdmaps were trimmed.
past_intervals.clear();
} else {
psdout(10) << " noting past " << past_intervals << dendl;
}
dirty_info = true;
dirty_big_info = true;
info.history.same_interval_since = osdmap->get_epoch();
if (osdmap->have_pg_pool(info.pgid.pgid.pool()) &&
info.pgid.pgid.is_split(lastmap->get_pg_num(info.pgid.pgid.pool()),
osdmap->get_pg_num(info.pgid.pgid.pool()),
nullptr)) {
info.history.last_epoch_split = osdmap->get_epoch();
}
}
}
if (old_up_primary != up_primary ||
oldup != up) {
info.history.same_up_since = osdmap->get_epoch();
}
// this comparison includes primary rank via pg_shard_t
if (old_acting_primary != get_primary()) {
info.history.same_primary_since = osdmap->get_epoch();
}
on_new_interval();
psdout(1) << "up " << oldup << " -> " << up
<< ", acting " << oldacting << " -> " << acting
<< ", acting_primary " << old_acting_primary << " -> "
<< new_acting_primary
<< ", up_primary " << old_up_primary << " -> " << new_up_primary
<< ", role " << oldrole << " -> " << role
<< ", features acting " << acting_features
<< " upacting " << upacting_features
<< dendl;
// deactivate.
state_clear(PG_STATE_ACTIVE);
state_clear(PG_STATE_PEERED);
state_clear(PG_STATE_PREMERGE);
state_clear(PG_STATE_DOWN);
state_clear(PG_STATE_RECOVERY_WAIT);
state_clear(PG_STATE_RECOVERY_TOOFULL);
state_clear(PG_STATE_RECOVERING);
peer_purged.clear();
acting_recovery_backfill.clear();
// reset primary/replica state?
if (was_old_primary || is_primary()) {
pl->clear_want_pg_temp();
} else if (was_old_nonprimary || is_nonprimary()) {
pl->clear_want_pg_temp();
}
clear_primary_state();
pl->on_change(t);
ceph_assert(!deleting);
// should we tell the primary we are here?
send_notify = !is_primary();
if (role != oldrole ||
was_old_primary != is_primary()) {
// did primary change?
if (was_old_primary != is_primary()) {
state_clear(PG_STATE_CLEAN);
}
pl->on_role_change();
} else {
// no role change.
// did primary change?
if (get_primary() != old_acting_primary) {
psdout(10) << oldacting << " -> " << acting
<< ", acting primary "
<< old_acting_primary << " -> " << get_primary()
<< dendl;
} else {
// primary is the same.
if (is_primary()) {
// i am (still) primary. but my replica set changed.
state_clear(PG_STATE_CLEAN);
psdout(10) << oldacting << " -> " << acting
<< ", replicas changed" << dendl;
}
}
}
if (acting.empty() && !up.empty() && up_primary == pg_whoami) {
psdout(10) << " acting empty, but i am up[0], clearing pg_temp" << dendl;
pl->queue_want_pg_temp(acting);
}
}
void PeeringState::on_new_interval()
{
dout(20) << dendl;
const OSDMapRef osdmap = get_osdmap();
// initialize features
acting_features = CEPH_FEATURES_SUPPORTED_DEFAULT;
upacting_features = CEPH_FEATURES_SUPPORTED_DEFAULT;
for (auto p = acting.begin(); p != acting.end(); ++p) {
if (*p == CRUSH_ITEM_NONE)
continue;
uint64_t f = osdmap->get_xinfo(*p).features;
acting_features &= f;
upacting_features &= f;
}
for (auto p = up.begin(); p != up.end(); ++p) {
if (*p == CRUSH_ITEM_NONE)
continue;
upacting_features &= osdmap->get_xinfo(*p).features;
}
psdout(20) << "upacting_features 0x" << std::hex
<< upacting_features << std::dec
<< " from " << acting << "+" << up << dendl;
psdout(20) << "checking missing set deletes flag. missing = "
<< get_pg_log().get_missing() << dendl;
if (!pg_log.get_missing().may_include_deletes &&
!perform_deletes_during_peering()) {
pl->rebuild_missing_set_with_deletes(pg_log);
}
ceph_assert(
pg_log.get_missing().may_include_deletes ==
!perform_deletes_during_peering());
init_hb_stamps();
// update lease bounds for a new interval
auto mnow = pl->get_mnow();
prior_readable_until_ub = std::max(prior_readable_until_ub,
readable_until_ub);
prior_readable_until_ub = info.history.refresh_prior_readable_until_ub(
mnow, prior_readable_until_ub);
psdout(10) << "prior_readable_until_ub "
<< prior_readable_until_ub << " (mnow " << mnow << " + "
<< info.history.prior_readable_until_ub << ")" << dendl;
prior_readable_down_osds.clear(); // we populate this when we build the priorset
readable_until =
readable_until_ub =
readable_until_ub_sent =
readable_until_ub_from_primary = ceph::signedspan::zero();
acting_readable_until_ub.clear();
if (is_primary()) {
acting_readable_until_ub.resize(acting.size(), ceph::signedspan::zero());
}
pl->on_new_interval();
}
void PeeringState::init_primary_up_acting(
const vector<int> &newup,
const vector<int> &newacting,
int new_up_primary,
int new_acting_primary)
{
actingset.clear();
acting = newacting;
for (uint8_t i = 0; i < acting.size(); ++i) {
if (acting[i] != CRUSH_ITEM_NONE)
actingset.insert(
pg_shard_t(
acting[i],
pool.info.is_erasure() ? shard_id_t(i) : shard_id_t::NO_SHARD));
}
upset.clear();
up = newup;
for (uint8_t i = 0; i < up.size(); ++i) {
if (up[i] != CRUSH_ITEM_NONE)
upset.insert(
pg_shard_t(
up[i],
pool.info.is_erasure() ? shard_id_t(i) : shard_id_t::NO_SHARD));
}
if (!pool.info.is_erasure()) {
// replicated
up_primary = pg_shard_t(new_up_primary, shard_id_t::NO_SHARD);
primary = pg_shard_t(new_acting_primary, shard_id_t::NO_SHARD);
} else {
// erasure
up_primary = pg_shard_t();
primary = pg_shard_t();
for (uint8_t i = 0; i < up.size(); ++i) {
if (up[i] == new_up_primary) {
up_primary = pg_shard_t(up[i], shard_id_t(i));
break;
}
}
for (uint8_t i = 0; i < acting.size(); ++i) {
if (acting[i] == new_acting_primary) {
primary = pg_shard_t(acting[i], shard_id_t(i));
break;
}
}
ceph_assert(up_primary.osd == new_up_primary);
ceph_assert(primary.osd == new_acting_primary);
}
}
void PeeringState::init_hb_stamps()
{
if (is_primary()) {
// we care about all other osds in the acting set
hb_stamps.resize(acting.size() - 1);
unsigned i = 0;
for (auto p : acting) {
if (p == CRUSH_ITEM_NONE || p == get_primary().osd) {
continue;
}
hb_stamps[i++] = pl->get_hb_stamps(p);
}
hb_stamps.resize(i);
} else if (is_nonprimary()) {
// we care about just the primary
hb_stamps.resize(1);
hb_stamps[0] = pl->get_hb_stamps(get_primary().osd);
} else {
hb_stamps.clear();
}
dout(10) << "now " << hb_stamps << dendl;
}
void PeeringState::clear_recovery_state()
{
async_recovery_targets.clear();
backfill_targets.clear();
}
void PeeringState::clear_primary_state()
{
psdout(10) << "clear_primary_state" << dendl;
// clear peering state
stray_set.clear();
peer_log_requested.clear();
peer_missing_requested.clear();
peer_info.clear();
peer_bytes.clear();
peer_missing.clear();
peer_last_complete_ondisk.clear();
peer_activated.clear();
min_last_complete_ondisk = eversion_t();
pg_trim_to = eversion_t();
might_have_unfound.clear();
need_up_thru = false;
missing_loc.clear();
pg_log.reset_recovery_pointers();
clear_recovery_state();
last_update_ondisk = eversion_t();
missing_loc.clear();
pl->clear_primary_state();
}
/// return [start,end) bounds for required past_intervals
static pair<epoch_t, epoch_t> get_required_past_interval_bounds(
const pg_info_t &info,
epoch_t oldest_map) {
epoch_t start = std::max(
info.history.last_epoch_clean ? info.history.last_epoch_clean :
info.history.epoch_pool_created,
oldest_map);
epoch_t end = std::max(
info.history.same_interval_since,
info.history.epoch_pool_created);
return make_pair(start, end);
}
void PeeringState::check_past_interval_bounds() const
{
// cluster_osdmap_trim_lower_bound gives us a bound on needed
// intervals, see doc/dev/osd_internals/past_intervals.rst
auto oldest_epoch = pl->cluster_osdmap_trim_lower_bound();
auto rpib = get_required_past_interval_bounds(
info,
oldest_epoch);
if (rpib.first >= rpib.second) {
// do not warn if the start bound is dictated by oldest_map; the
// past intervals are presumably appropriate given the pg info.
if (!past_intervals.empty() &&
rpib.first > oldest_epoch) {
pl->get_clog_error() << info.pgid << " required past_interval bounds are"
<< " empty [" << rpib << ") but past_intervals is not: "
<< past_intervals;
derr << info.pgid << " required past_interval bounds are"
<< " empty [" << rpib << ") but past_intervals is not: "
<< past_intervals << dendl;
}
} else {
if (past_intervals.empty()) {
pl->get_clog_error() << info.pgid << " required past_interval bounds are"
<< " not empty [" << rpib << ") but past_intervals "
<< past_intervals << " is empty";
derr << info.pgid << " required past_interval bounds are"
<< " not empty [" << rpib << ") but past_intervals "
<< past_intervals << " is empty" << dendl;
ceph_assert(!past_intervals.empty());
}
auto apib = past_intervals.get_bounds();
if (apib.first > rpib.first) {
pl->get_clog_error() << info.pgid << " past_intervals [" << apib
<< ") start interval does not contain the required"
<< " bound [" << rpib << ") start";
derr << info.pgid << " past_intervals [" << apib
<< ") start interval does not contain the required"
<< " bound [" << rpib << ") start" << dendl;
ceph_abort_msg("past_interval start interval mismatch");
}
if (apib.second != rpib.second) {
pl->get_clog_error() << info.pgid << " past_interal bound [" << apib
<< ") end does not match required [" << rpib
<< ") end";
derr << info.pgid << " past_interal bound [" << apib
<< ") end does not match required [" << rpib
<< ") end" << dendl;
ceph_abort_msg("past_interval end mismatch");
}
}
}
int PeeringState::clamp_recovery_priority(int priority, int pool_recovery_priority, int max)
{
static_assert(OSD_RECOVERY_PRIORITY_MIN < OSD_RECOVERY_PRIORITY_MAX, "Invalid priority range");
static_assert(OSD_RECOVERY_PRIORITY_MIN >= 0, "Priority range must match unsigned type");
ceph_assert(max <= OSD_RECOVERY_PRIORITY_MAX);
// User can't set this too high anymore, but might be a legacy value
if (pool_recovery_priority > OSD_POOL_PRIORITY_MAX)
pool_recovery_priority = OSD_POOL_PRIORITY_MAX;
if (pool_recovery_priority < OSD_POOL_PRIORITY_MIN)
pool_recovery_priority = OSD_POOL_PRIORITY_MIN;
// Shift range from min to max to 0 to max - min
pool_recovery_priority += (0 - OSD_POOL_PRIORITY_MIN);
ceph_assert(pool_recovery_priority >= 0 && pool_recovery_priority <= (OSD_POOL_PRIORITY_MAX - OSD_POOL_PRIORITY_MIN));
priority += pool_recovery_priority;
// Clamp to valid range
return std::clamp<int>(priority, OSD_RECOVERY_PRIORITY_MIN, max);
}
unsigned PeeringState::get_recovery_priority()
{
// a higher value -> a higher priority
int ret = OSD_RECOVERY_PRIORITY_BASE;
int base = ret;
if (state & PG_STATE_FORCED_RECOVERY) {
ret = OSD_RECOVERY_PRIORITY_FORCED;
} else {
// XXX: This priority boost isn't so much about inactive, but about data-at-risk
if (is_degraded() && info.stats.avail_no_missing.size() < pool.info.min_size) {
base = OSD_RECOVERY_INACTIVE_PRIORITY_BASE;
// inactive: no. of replicas < min_size, highest priority since it blocks IO
ret = base + (pool.info.min_size - info.stats.avail_no_missing.size());
}
int64_t pool_recovery_priority = 0;
pool.info.opts.get(pool_opts_t::RECOVERY_PRIORITY, &pool_recovery_priority);
ret = clamp_recovery_priority(ret, pool_recovery_priority, max_prio_map[base]);
}
psdout(20) << "recovery priority is " << ret << dendl;
return static_cast<unsigned>(ret);
}
unsigned PeeringState::get_backfill_priority()
{
// a higher value -> a higher priority
int ret = OSD_BACKFILL_PRIORITY_BASE;
int base = ret;
if (state & PG_STATE_FORCED_BACKFILL) {
ret = OSD_BACKFILL_PRIORITY_FORCED;
} else {
if (actingset.size() < pool.info.min_size) {
base = OSD_BACKFILL_INACTIVE_PRIORITY_BASE;
// inactive: no. of replicas < min_size, highest priority since it blocks IO
ret = base + (pool.info.min_size - actingset.size());
} else if (is_undersized()) {
// undersized: OSD_BACKFILL_DEGRADED_PRIORITY_BASE + num missing replicas
ceph_assert(pool.info.size > actingset.size());
base = OSD_BACKFILL_DEGRADED_PRIORITY_BASE;
ret = base + (pool.info.size - actingset.size());
} else if (is_degraded()) {
// degraded: baseline degraded
base = ret = OSD_BACKFILL_DEGRADED_PRIORITY_BASE;
}
// Adjust with pool's recovery priority
int64_t pool_recovery_priority = 0;
pool.info.opts.get(pool_opts_t::RECOVERY_PRIORITY, &pool_recovery_priority);
ret = clamp_recovery_priority(ret, pool_recovery_priority, max_prio_map[base]);
}
psdout(20) << "backfill priority is " << ret << dendl;
return static_cast<unsigned>(ret);
}
unsigned PeeringState::get_delete_priority()
{
auto state = get_osdmap()->get_state(pg_whoami.osd);
if (state & (CEPH_OSD_BACKFILLFULL |
CEPH_OSD_FULL)) {
return OSD_DELETE_PRIORITY_FULL;
} else if (state & CEPH_OSD_NEARFULL) {
return OSD_DELETE_PRIORITY_FULLISH;
} else {
return OSD_DELETE_PRIORITY_NORMAL;
}
}
bool PeeringState::set_force_recovery(bool b)
{
bool did = false;
if (b) {
if (!(state & PG_STATE_FORCED_RECOVERY) &&
(state & (PG_STATE_DEGRADED |
PG_STATE_RECOVERY_WAIT |
PG_STATE_RECOVERING))) {
psdout(20) << "set" << dendl;
state_set(PG_STATE_FORCED_RECOVERY);
pl->publish_stats_to_osd();
did = true;
}
} else if (state & PG_STATE_FORCED_RECOVERY) {
psdout(20) << "clear" << dendl;
state_clear(PG_STATE_FORCED_RECOVERY);
pl->publish_stats_to_osd();
did = true;
}
if (did) {
psdout(20) << "state " << get_current_state()
<< dendl;
pl->update_local_background_io_priority(get_recovery_priority());
}
return did;
}
bool PeeringState::set_force_backfill(bool b)
{
bool did = false;
if (b) {
if (!(state & PG_STATE_FORCED_BACKFILL) &&
(state & (PG_STATE_DEGRADED |
PG_STATE_BACKFILL_WAIT |
PG_STATE_BACKFILLING))) {
psdout(10) << "set" << dendl;
state_set(PG_STATE_FORCED_BACKFILL);
pl->publish_stats_to_osd();
did = true;
}
} else if (state & PG_STATE_FORCED_BACKFILL) {
psdout(10) << "clear" << dendl;
state_clear(PG_STATE_FORCED_BACKFILL);
pl->publish_stats_to_osd();
did = true;
}
if (did) {
psdout(20) << "state " << get_current_state()
<< dendl;
pl->update_local_background_io_priority(get_backfill_priority());
}
return did;
}
void PeeringState::schedule_renew_lease()
{
pl->schedule_renew_lease(
last_peering_reset,
readable_interval / 2);
}
void PeeringState::send_lease()
{
epoch_t epoch = pl->get_osdmap_epoch();
for (auto peer : actingset) {
if (peer == pg_whoami) {
continue;
}
pl->send_cluster_message(
peer.osd,
TOPNSPC::make_message<MOSDPGLease>(epoch,
spg_t(spgid.pgid, peer.shard),
get_lease()),
epoch);
}
}
void PeeringState::proc_lease(const pg_lease_t& l)
{
assert(HAVE_FEATURE(upacting_features, SERVER_OCTOPUS));
if (!is_nonprimary()) {
psdout(20) << "no-op, !nonprimary" << dendl;
return;
}
psdout(10) << l << dendl;
if (l.readable_until_ub > readable_until_ub_from_primary) {
readable_until_ub_from_primary = l.readable_until_ub;
}
ceph::signedspan ru = ceph::signedspan::zero();
if (l.readable_until != ceph::signedspan::zero() &&
hb_stamps[0]->peer_clock_delta_ub) {
ru = l.readable_until - *hb_stamps[0]->peer_clock_delta_ub;
psdout(20) << " peer_clock_delta_ub " << *hb_stamps[0]->peer_clock_delta_ub
<< " -> ru " << ru << dendl;
}
if (ru > readable_until) {
readable_until = ru;
psdout(20) << "readable_until now " << readable_until << dendl;
// NOTE: if we ever decide to block/queue ops on the replica,
// we'll need to wake them up here.
}
ceph::signedspan ruub;
if (hb_stamps[0]->peer_clock_delta_lb) {
ruub = l.readable_until_ub - *hb_stamps[0]->peer_clock_delta_lb;
psdout(20) << " peer_clock_delta_lb " << *hb_stamps[0]->peer_clock_delta_lb
<< " -> ruub " << ruub << dendl;
} else {
ruub = pl->get_mnow() + l.interval;
psdout(20) << " no peer_clock_delta_lb -> ruub " << ruub << dendl;
}
if (ruub > readable_until_ub) {
readable_until_ub = ruub;
psdout(20) << "readable_until_ub now " << readable_until_ub
<< dendl;
}
}
void PeeringState::proc_lease_ack(int from, const pg_lease_ack_t& a)
{
assert(HAVE_FEATURE(upacting_features, SERVER_OCTOPUS));
auto now = pl->get_mnow();
bool was_min = false;
for (unsigned i = 0; i < acting.size(); ++i) {
if (from == acting[i]) {
// the lease_ack value is based on the primary's clock
if (a.readable_until_ub > acting_readable_until_ub[i]) {
if (acting_readable_until_ub[i] == readable_until) {
was_min = true;
}
acting_readable_until_ub[i] = a.readable_until_ub;
break;
}
}
}
if (was_min) {
auto old_ru = readable_until;
recalc_readable_until();
if (now >= old_ru) {
pl->recheck_readable();
}
}
}
void PeeringState::proc_renew_lease()
{
assert(HAVE_FEATURE(upacting_features, SERVER_OCTOPUS));
renew_lease(pl->get_mnow());
send_lease();
schedule_renew_lease();
}
void PeeringState::recalc_readable_until()
{
assert(is_primary());
ceph::signedspan min = readable_until_ub_sent;
for (unsigned i = 0; i < acting.size(); ++i) {
if (acting[i] == pg_whoami.osd || acting[i] == CRUSH_ITEM_NONE) {
continue;
}
dout(20) << "peer osd." << acting[i]
<< " ruub " << acting_readable_until_ub[i] << dendl;
if (acting_readable_until_ub[i] < min) {
min = acting_readable_until_ub[i];
}
}
readable_until = min;
readable_until_ub = min;
dout(20) << "readable_until[_ub] " << readable_until
<< " (sent " << readable_until_ub_sent << ")" << dendl;
}
bool PeeringState::check_prior_readable_down_osds(const OSDMapRef& map)
{
assert(HAVE_FEATURE(upacting_features, SERVER_OCTOPUS));
bool changed = false;
auto p = prior_readable_down_osds.begin();
while (p != prior_readable_down_osds.end()) {
if (map->is_dead(*p)) {
dout(10) << "prior_readable_down_osds osd." << *p
<< " is dead as of epoch " << map->get_epoch()
<< dendl;
p = prior_readable_down_osds.erase(p);
changed = true;
} else {
++p;
}
}
if (changed && prior_readable_down_osds.empty()) {
psdout(10) << " empty prior_readable_down_osds, clearing ub" << dendl;
clear_prior_readable_until_ub();
return true;
}
return false;
}
bool PeeringState::adjust_need_up_thru(const OSDMapRef osdmap)
{
epoch_t up_thru = osdmap->get_up_thru(pg_whoami.osd);
if (need_up_thru &&
up_thru >= info.history.same_interval_since) {
psdout(10) << "adjust_need_up_thru now "
<< up_thru << ", need_up_thru now false" << dendl;
need_up_thru = false;
return true;
}
return false;
}
PastIntervals::PriorSet PeeringState::build_prior()
{
if (1) {
// sanity check
for (auto it = peer_info.begin(); it != peer_info.end(); ++it) {
ceph_assert(info.history.last_epoch_started >=
it->second.history.last_epoch_started);
}
}
const OSDMap &osdmap = *get_osdmap();
PastIntervals::PriorSet prior = past_intervals.get_prior_set(
pool.info.is_erasure(),
info.history.last_epoch_started,
&missing_loc.get_recoverable_predicate(),
[&](epoch_t start, int osd, epoch_t *lost_at) {
const osd_info_t *pinfo = 0;
if (osdmap.exists(osd)) {
pinfo = &osdmap.get_info(osd);
if (lost_at)
*lost_at = pinfo->lost_at;
}
if (osdmap.is_up(osd)) {
return PastIntervals::UP;
} else if (!pinfo) {
return PastIntervals::DNE;
} else if (pinfo->lost_at > start) {
return PastIntervals::LOST;
} else {
return PastIntervals::DOWN;
}
},
up,
acting,
dpp);
if (prior.pg_down) {
state_set(PG_STATE_DOWN);
}
if (get_osdmap()->get_up_thru(pg_whoami.osd) <
info.history.same_interval_since) {
psdout(10) << "up_thru " << get_osdmap()->get_up_thru(pg_whoami.osd)
<< " < same_since " << info.history.same_interval_since
<< ", must notify monitor" << dendl;
need_up_thru = true;
} else {
psdout(10) << "up_thru " << get_osdmap()->get_up_thru(pg_whoami.osd)
<< " >= same_since " << info.history.same_interval_since
<< ", all is well" << dendl;
need_up_thru = false;
}
pl->set_probe_targets(prior.probe);
return prior;
}
bool PeeringState::needs_recovery() const
{
ceph_assert(is_primary());
auto &missing = pg_log.get_missing();
if (missing.num_missing()) {
psdout(10) << "primary has " << missing.num_missing()
<< " missing" << dendl;
return true;
}
ceph_assert(!acting_recovery_backfill.empty());
for (const pg_shard_t& peer : acting_recovery_backfill) {
if (peer == get_primary()) {
continue;
}
auto pm = peer_missing.find(peer);
if (pm == peer_missing.end()) {
psdout(10) << "osd." << peer << " doesn't have missing set"
<< dendl;
continue;
}
if (pm->second.num_missing()) {
psdout(10) << "osd." << peer << " has "
<< pm->second.num_missing() << " missing" << dendl;
return true;
}
}
psdout(10) << "is recovered" << dendl;
return false;
}
bool PeeringState::needs_backfill() const
{
ceph_assert(is_primary());
// We can assume that only possible osds that need backfill
// are on the backfill_targets vector nodes.
for (const pg_shard_t& peer : backfill_targets) {
auto pi = peer_info.find(peer);
ceph_assert(pi != peer_info.end());
if (!pi->second.last_backfill.is_max()) {
psdout(10) << "osd." << peer
<< " has last_backfill " << pi->second.last_backfill << dendl;
return true;
}
}
psdout(10) << "does not need backfill" << dendl;
return false;
}
/**
* Returns whether a particular object can be safely read on this replica
*/
bool PeeringState::can_serve_replica_read(const hobject_t &hoid)
{
ceph_assert(!is_primary());
eversion_t min_last_complete_ondisk = get_min_last_complete_ondisk();
if (!pg_log.get_log().has_write_since(
hoid, min_last_complete_ondisk)) {
psdout(20) << "can be safely read on this replica" << dendl;
return true;
} else {
psdout(20) << "can't read object on this replica" << dendl;
return false;
}
}
/*
* Returns true unless there is a non-lost OSD in might_have_unfound.
*/
bool PeeringState::all_unfound_are_queried_or_lost(
const OSDMapRef osdmap) const
{
ceph_assert(is_primary());
auto peer = might_have_unfound.begin();
auto mend = might_have_unfound.end();
for (; peer != mend; ++peer) {
if (peer_missing.count(*peer))
continue;
auto iter = peer_info.find(*peer);
if (iter != peer_info.end() &&
(iter->second.is_empty() || iter->second.dne()))
continue;
if (!osdmap->exists(peer->osd))
continue;
const osd_info_t &osd_info(osdmap->get_info(peer->osd));
if (osd_info.lost_at <= osd_info.up_from) {
// If there is even one OSD in might_have_unfound that isn't lost, we
// still might retrieve our unfound.
return false;
}
}
psdout(10) << "all_unfound_are_queried_or_lost all of might_have_unfound "
<< might_have_unfound
<< " have been queried or are marked lost" << dendl;
return true;
}
void PeeringState::reject_reservation()
{
pl->unreserve_recovery_space();
pl->send_cluster_message(
primary.osd,
TOPNSPC::make_message<MBackfillReserve>(
MBackfillReserve::REJECT_TOOFULL,
spg_t(info.pgid.pgid, primary.shard),
get_osdmap_epoch()),
get_osdmap_epoch());
}
/**
* find_best_info
*
* Returns an iterator to the best info in infos sorted by:
* 1) Prefer newer last_update
* 2) Prefer longer tail if it brings another info into contiguity
* 3) Prefer current primary
*/
map<pg_shard_t, pg_info_t>::const_iterator PeeringState::find_best_info(
const map<pg_shard_t, pg_info_t> &infos,
bool restrict_to_up_acting,
bool *history_les_bound) const
{
ceph_assert(history_les_bound);
/* See doc/dev/osd_internals/last_epoch_started.rst before attempting
* to make changes to this process. Also, make sure to update it
* when you find bugs! */
epoch_t max_last_epoch_started_found = 0;
for (auto i = infos.begin(); i != infos.end(); ++i) {
if (!cct->_conf->osd_find_best_info_ignore_history_les &&
max_last_epoch_started_found < i->second.history.last_epoch_started) {
*history_les_bound = true;
max_last_epoch_started_found = i->second.history.last_epoch_started;
}
if (!i->second.is_incomplete() &&
max_last_epoch_started_found < i->second.last_epoch_started) {
*history_les_bound = false;
max_last_epoch_started_found = i->second.last_epoch_started;
}
}
eversion_t min_last_update_acceptable = eversion_t::max();
for (auto i = infos.begin(); i != infos.end(); ++i) {
if (max_last_epoch_started_found <= i->second.last_epoch_started) {
if (min_last_update_acceptable > i->second.last_update)
min_last_update_acceptable = i->second.last_update;
}
}
if (min_last_update_acceptable == eversion_t::max())
return infos.end();
auto best = infos.end();
// find osd with newest last_update (oldest for ec_pool).
// if there are multiples, prefer
// - a longer tail, if it brings another peer into log contiguity
// - the current primary
for (auto p = infos.begin(); p != infos.end(); ++p) {
if (restrict_to_up_acting && !is_up(p->first) &&
!is_acting(p->first))
continue;
// Only consider peers with last_update >= min_last_update_acceptable
if (p->second.last_update < min_last_update_acceptable)
continue;
// Disqualify anyone with a too old last_epoch_started
if (p->second.last_epoch_started < max_last_epoch_started_found)
continue;
// Disqualify anyone who is incomplete (not fully backfilled)
if (p->second.is_incomplete())
continue;
if (best == infos.end()) {
best = p;
continue;
}
// Prefer newer last_update
if (pool.info.require_rollback()) {
if (p->second.last_update > best->second.last_update)
continue;
if (p->second.last_update < best->second.last_update) {
best = p;
continue;
}
} else {
if (p->second.last_update < best->second.last_update)
continue;
if (p->second.last_update > best->second.last_update) {
best = p;
continue;
}
}
// Prefer longer tail
if (p->second.log_tail > best->second.log_tail) {
continue;
} else if (p->second.log_tail < best->second.log_tail) {
best = p;
continue;
}
if (!p->second.has_missing() && best->second.has_missing()) {
psdout(10) << "prefer osd." << p->first
<< " because it is complete while best has missing"
<< dendl;
best = p;
continue;
} else if (p->second.has_missing() && !best->second.has_missing()) {
psdout(10) << "skipping osd." << p->first
<< " because it has missing while best is complete"
<< dendl;
continue;
} else {
// both are complete or have missing
// fall through
}
// prefer current primary (usually the caller), all things being equal
if (p->first == pg_whoami) {
psdout(10) << "calc_acting prefer osd." << p->first
<< " because it is current primary" << dendl;
best = p;
continue;
}
}
return best;
}
void PeeringState::calc_ec_acting(
map<pg_shard_t, pg_info_t>::const_iterator auth_log_shard,
unsigned size,
const vector<int> &acting,
const vector<int> &up,
const map<pg_shard_t, pg_info_t> &all_info,
bool restrict_to_up_acting,
vector<int> *_want,
set<pg_shard_t> *backfill,
set<pg_shard_t> *acting_backfill,
ostream &ss)
{
vector<int> want(size, CRUSH_ITEM_NONE);
map<shard_id_t, set<pg_shard_t> > all_info_by_shard;
for (auto i = all_info.begin();
i != all_info.end();
++i) {
all_info_by_shard[i->first.shard].insert(i->first);
}
for (uint8_t i = 0; i < want.size(); ++i) {
ss << "For position " << (unsigned)i << ": ";
if (up.size() > (unsigned)i && up[i] != CRUSH_ITEM_NONE &&
!all_info.find(pg_shard_t(up[i], shard_id_t(i)))->second.is_incomplete() &&
all_info.find(pg_shard_t(up[i], shard_id_t(i)))->second.last_update >=
auth_log_shard->second.log_tail) {
ss << " selecting up[i]: " << pg_shard_t(up[i], shard_id_t(i)) << std::endl;
want[i] = up[i];
continue;
}
if (up.size() > (unsigned)i && up[i] != CRUSH_ITEM_NONE) {
ss << " backfilling up[i]: " << pg_shard_t(up[i], shard_id_t(i))
<< " and ";
backfill->insert(pg_shard_t(up[i], shard_id_t(i)));
}
if (acting.size() > (unsigned)i && acting[i] != CRUSH_ITEM_NONE &&
!all_info.find(pg_shard_t(acting[i], shard_id_t(i)))->second.is_incomplete() &&
all_info.find(pg_shard_t(acting[i], shard_id_t(i)))->second.last_update >=
auth_log_shard->second.log_tail) {
ss << " selecting acting[i]: " << pg_shard_t(acting[i], shard_id_t(i)) << std::endl;
want[i] = acting[i];
} else if (!restrict_to_up_acting) {
for (auto j = all_info_by_shard[shard_id_t(i)].begin();
j != all_info_by_shard[shard_id_t(i)].end();
++j) {
ceph_assert(j->shard == i);
if (!all_info.find(*j)->second.is_incomplete() &&
all_info.find(*j)->second.last_update >=
auth_log_shard->second.log_tail) {
ss << " selecting stray: " << *j << std::endl;
want[i] = j->osd;
break;
}
}
if (want[i] == CRUSH_ITEM_NONE)
ss << " failed to fill position " << (int)i << std::endl;
}
}
for (uint8_t i = 0; i < want.size(); ++i) {
if (want[i] != CRUSH_ITEM_NONE) {
acting_backfill->insert(pg_shard_t(want[i], shard_id_t(i)));
}
}
acting_backfill->insert(backfill->begin(), backfill->end());
_want->swap(want);
}
std::pair<map<pg_shard_t, pg_info_t>::const_iterator, eversion_t>
PeeringState::select_replicated_primary(
map<pg_shard_t, pg_info_t>::const_iterator auth_log_shard,
uint64_t force_auth_primary_missing_objects,
const std::vector<int> &up,
pg_shard_t up_primary,
const map<pg_shard_t, pg_info_t> &all_info,
const OSDMapRef osdmap,
ostream &ss)
{
pg_shard_t auth_log_shard_id = auth_log_shard->first;
ss << __func__ << " newest update on osd." << auth_log_shard_id
<< " with " << auth_log_shard->second << std::endl;
// select primary
auto primary = all_info.find(up_primary);
if (up.size() &&
!primary->second.is_incomplete() &&
primary->second.last_update >=
auth_log_shard->second.log_tail) {
assert(HAVE_FEATURE(osdmap->get_up_osd_features(), SERVER_NAUTILUS));
auto approx_missing_objects =
primary->second.stats.stats.sum.num_objects_missing;
auto auth_version = auth_log_shard->second.last_update.version;
auto primary_version = primary->second.last_update.version;
if (auth_version > primary_version) {
approx_missing_objects += auth_version - primary_version;
} else {
approx_missing_objects += primary_version - auth_version;
}
if ((uint64_t)approx_missing_objects >
force_auth_primary_missing_objects) {
primary = auth_log_shard;
ss << "up_primary: " << up_primary << ") has approximate "
<< approx_missing_objects
<< "(>" << force_auth_primary_missing_objects <<") "
<< "missing objects, osd." << auth_log_shard_id
<< " selected as primary instead"
<< std::endl;
} else {
ss << "up_primary: " << up_primary << ") selected as primary"
<< std::endl;
}
} else {
ceph_assert(!auth_log_shard->second.is_incomplete());
ss << "up[0] needs backfill, osd." << auth_log_shard_id
<< " selected as primary instead" << std::endl;
primary = auth_log_shard;
}
ss << __func__ << " primary is osd." << primary->first
<< " with " << primary->second << std::endl;
/* We include auth_log_shard->second.log_tail because in GetLog,
* we will request logs back to the min last_update over our
* acting_backfill set, which will result in our log being extended
* as far backwards as necessary to pick up any peers which can
* be log recovered by auth_log_shard's log */
eversion_t oldest_auth_log_entry =
std::min(primary->second.log_tail, auth_log_shard->second.log_tail);
return std::make_pair(primary, oldest_auth_log_entry);
}
/**
* calculate the desired acting set.
*
* Choose an appropriate acting set. Prefer up[0], unless it is
* incomplete, or another osd has a longer tail that allows us to
* bring other up nodes up to date.
*/
void PeeringState::calc_replicated_acting(
map<pg_shard_t, pg_info_t>::const_iterator primary,
eversion_t oldest_auth_log_entry,
unsigned size,
const vector<int> &acting,
const vector<int> &up,
pg_shard_t up_primary,
const map<pg_shard_t, pg_info_t> &all_info,
bool restrict_to_up_acting,
vector<int> *want,
set<pg_shard_t> *backfill,
set<pg_shard_t> *acting_backfill,
const OSDMapRef osdmap,
const PGPool& pool,
ostream &ss)
{
ss << __func__ << (restrict_to_up_acting ? " restrict_to_up_acting" : "")
<< std::endl;
want->push_back(primary->first.osd);
acting_backfill->insert(primary->first);
// select replicas that have log contiguity with primary.
// prefer up, then acting, then any peer_info osds
for (auto i : up) {
pg_shard_t up_cand = pg_shard_t(i, shard_id_t::NO_SHARD);
if (up_cand == primary->first)
continue;
const pg_info_t &cur_info = all_info.find(up_cand)->second;
if (cur_info.is_incomplete() ||
cur_info.last_update < oldest_auth_log_entry) {
ss << " shard " << up_cand << " (up) backfill " << cur_info << std::endl;
backfill->insert(up_cand);
acting_backfill->insert(up_cand);
} else {
want->push_back(i);
acting_backfill->insert(up_cand);
ss << " osd." << i << " (up) accepted " << cur_info << std::endl;
}
}
if (want->size() >= size) {
return;
}
std::vector<std::pair<eversion_t, int>> candidate_by_last_update;
candidate_by_last_update.reserve(acting.size());
// This no longer has backfill OSDs, but they are covered above.
for (auto i : acting) {
pg_shard_t acting_cand(i, shard_id_t::NO_SHARD);
// skip up osds we already considered above
if (acting_cand == primary->first)
continue;
auto up_it = find(up.begin(), up.end(), i);
if (up_it != up.end())
continue;
const pg_info_t &cur_info = all_info.find(acting_cand)->second;
if (cur_info.is_incomplete() ||
cur_info.last_update < oldest_auth_log_entry) {
ss << " shard " << acting_cand << " (acting) REJECTED "
<< cur_info << std::endl;
} else {
candidate_by_last_update.emplace_back(cur_info.last_update, i);
}
}
auto sort_by_eversion =[](const std::pair<eversion_t, int> &lhs,
const std::pair<eversion_t, int> &rhs) {
return lhs.first > rhs.first;
};
// sort by last_update, in descending order.
std::sort(candidate_by_last_update.begin(),
candidate_by_last_update.end(), sort_by_eversion);
for (auto &p: candidate_by_last_update) {
ceph_assert(want->size() < size);
want->push_back(p.second);
pg_shard_t s = pg_shard_t(p.second, shard_id_t::NO_SHARD);
acting_backfill->insert(s);
ss << " shard " << s << " (acting) accepted "
<< all_info.find(s)->second << std::endl;
if (want->size() >= size) {
return;
}
}
if (restrict_to_up_acting) {
return;
}
candidate_by_last_update.clear();
candidate_by_last_update.reserve(all_info.size()); // overestimate but fine
// continue to search stray to find more suitable peers
for (auto &i : all_info) {
// skip up osds we already considered above
if (i.first == primary->first)
continue;
auto up_it = find(up.begin(), up.end(), i.first.osd);
if (up_it != up.end())
continue;
auto acting_it = find(
acting.begin(), acting.end(), i.first.osd);
if (acting_it != acting.end())
continue;
if (i.second.is_incomplete() ||
i.second.last_update < oldest_auth_log_entry) {
ss << " shard " << i.first << " (stray) REJECTED " << i.second
<< std::endl;
} else {
candidate_by_last_update.emplace_back(
i.second.last_update, i.first.osd);
}
}
if (candidate_by_last_update.empty()) {
// save us some effort
return;
}
// sort by last_update, in descending order.
std::sort(candidate_by_last_update.begin(),
candidate_by_last_update.end(), sort_by_eversion);
for (auto &p: candidate_by_last_update) {
ceph_assert(want->size() < size);
want->push_back(p.second);
pg_shard_t s = pg_shard_t(p.second, shard_id_t::NO_SHARD);
acting_backfill->insert(s);
ss << " shard " << s << " (stray) accepted "
<< all_info.find(s)->second << std::endl;
if (want->size() >= size) {
return;
}
}
}
// Defines osd preference order: acting set, then larger last_update
using osd_ord_t = std::tuple<bool, eversion_t>; // <acting, last_update>
using osd_id_t = int;
class bucket_candidates_t {
std::deque<std::pair<osd_ord_t, osd_id_t>> osds;
int selected = 0;
public:
void add_osd(osd_ord_t ord, osd_id_t osd) {
// osds will be added in smallest to largest order
assert(osds.empty() || osds.back().first <= ord);
osds.push_back(std::make_pair(ord, osd));
}
osd_id_t pop_osd() {
ceph_assert(!is_empty());
auto ret = osds.front();
osds.pop_front();
return ret.second;
}
void inc_selected() { selected++; }
unsigned get_num_selected() const { return selected; }
osd_ord_t get_ord() const {
return osds.empty() ? std::make_tuple(false, eversion_t())
: osds.front().first;
}
bool is_empty() const { return osds.empty(); }
bool operator<(const bucket_candidates_t &rhs) const {
return std::make_tuple(-selected, get_ord()) <
std::make_tuple(-rhs.selected, rhs.get_ord());
}
friend std::ostream &operator<<(std::ostream &, const bucket_candidates_t &);
};
std::ostream &operator<<(std::ostream &lhs, const bucket_candidates_t &cand)
{
return lhs << "candidates[" << cand.osds << "]";
}
class bucket_heap_t {
using elem_t = std::reference_wrapper<bucket_candidates_t>;
std::vector<elem_t> heap;
// Max heap -- should emit buckets in order of preference
struct comp {
bool operator()(const elem_t &lhs, const elem_t &rhs) {
return lhs.get() < rhs.get();
}
};
public:
void push_if_nonempty(elem_t e) {
if (!e.get().is_empty()) {
heap.push_back(e);
std::push_heap(heap.begin(), heap.end(), comp());
}
}
elem_t pop() {
std::pop_heap(heap.begin(), heap.end(), comp());
auto ret = heap.back();
heap.pop_back();
return ret;
}
bool is_empty() const { return heap.empty(); }
};
/**
* calc_replicated_acting_stretch
*
* Choose an acting set using as much of the up set as possible; filling
* in the remaining slots so as to maximize the number of crush buckets at
* level pool.info.peering_crush_bucket_barrier represented.
*
* Stretch clusters are a bit special: while they have a "size" the
* same way as normal pools, if we happen to lose a data center
* (we call it a "stretch bucket", but really it'll be a data center or
* a cloud availability zone), we don't actually want to shove
* 2 DC's worth of replication into a single site -- it won't fit!
* So we locally calculate a bucket_max, based
* on the targeted number of stretch buckets for the pool and
* its size. Then we won't pull more than bucket_max from any
* given ancestor even if it leaves us undersized.
* There are two distinct phases: (commented below)
*/
void PeeringState::calc_replicated_acting_stretch(
map<pg_shard_t, pg_info_t>::const_iterator primary,
eversion_t oldest_auth_log_entry,
unsigned size,
const vector<int> &acting,
const vector<int> &up,
pg_shard_t up_primary,
const map<pg_shard_t, pg_info_t> &all_info,
bool restrict_to_up_acting,
vector<int> *want,
set<pg_shard_t> *backfill,
set<pg_shard_t> *acting_backfill,
const OSDMapRef osdmap,
const PGPool& pool,
ostream &ss)
{
ceph_assert(want);
ceph_assert(acting_backfill);
ceph_assert(backfill);
ss << __func__ << (restrict_to_up_acting ? " restrict_to_up_acting" : "")
<< std::endl;
auto used = [want](int osd) {
return std::find(want->begin(), want->end(), osd) != want->end();
};
auto usable_info = [&](const auto &cur_info) mutable {
return !(cur_info.is_incomplete() ||
cur_info.last_update < oldest_auth_log_entry);
};
auto osd_info = [&](int osd) mutable -> const pg_info_t & {
pg_shard_t cand = pg_shard_t(osd, shard_id_t::NO_SHARD);
const pg_info_t &cur_info = all_info.find(cand)->second;
return cur_info;
};
auto usable_osd = [&](int osd) mutable {
return usable_info(osd_info(osd));
};
std::map<int, bucket_candidates_t> ancestors;
auto get_ancestor = [&](int osd) mutable {
int ancestor = osdmap->crush->get_parent_of_type(
osd,
pool.info.peering_crush_bucket_barrier,
pool.info.crush_rule);
return &ancestors[ancestor];
};
unsigned bucket_max = pool.info.size / pool.info.peering_crush_bucket_target;
if (bucket_max * pool.info.peering_crush_bucket_target < pool.info.size) {
++bucket_max;
}
/* 1) Select all usable osds from the up set as well as the primary
*
* We also stash any unusable osds from up into backfill.
*/
auto add_required = [&](int osd) {
if (!used(osd)) {
want->push_back(osd);
acting_backfill->insert(
pg_shard_t(osd, shard_id_t::NO_SHARD));
get_ancestor(osd)->inc_selected();
}
};
add_required(primary->first.osd);
ss << " osd " << primary->first.osd << " primary accepted "
<< osd_info(primary->first.osd) << std::endl;
for (auto upcand: up) {
auto upshard = pg_shard_t(upcand, shard_id_t::NO_SHARD);
auto &curinfo = osd_info(upcand);
if (usable_osd(upcand)) {
ss << " osd " << upcand << " (up) accepted " << curinfo << std::endl;
add_required(upcand);
} else {
ss << " osd " << upcand << " (up) backfill " << curinfo << std::endl;
backfill->insert(upshard);
acting_backfill->insert(upshard);
}
}
if (want->size() >= pool.info.size) { // non-failed CRUSH mappings are valid
ss << " up set sufficient" << std::endl;
return;
}
ss << " up set insufficient, considering remaining osds" << std::endl;
/* 2) Fill out remaining slots from usable osds in all_info
* while maximizing the number of ancestor nodes at the
* barrier_id crush level.
*/
{
std::vector<std::pair<osd_ord_t, osd_id_t>> candidates;
/* To do this, we first filter the set of usable osd into an ordered
* list of usable osds
*/
auto get_osd_ord = [&](bool is_acting, const pg_info_t &info) -> osd_ord_t {
return std::make_tuple(
!is_acting /* acting should sort first */,
info.last_update);
};
for (auto &cand : acting) {
auto &cand_info = osd_info(cand);
if (!used(cand) && usable_info(cand_info)) {
ss << " acting candidate " << cand << " " << cand_info << std::endl;
candidates.push_back(std::make_pair(get_osd_ord(true, cand_info), cand));
}
}
if (!restrict_to_up_acting) {
for (auto &[cand, info] : all_info) {
if (!used(cand.osd) && usable_info(info) &&
(std::find(acting.begin(), acting.end(), cand.osd)
== acting.end())) {
ss << " other candidate " << cand << " " << info << std::endl;
candidates.push_back(
std::make_pair(get_osd_ord(false, info), cand.osd));
}
}
}
std::sort(candidates.begin(), candidates.end());
// We then filter these candidates by ancestor
std::for_each(candidates.begin(), candidates.end(), [&](auto cand) {
get_ancestor(cand.second)->add_osd(cand.first, cand.second);
});
}
auto pop_ancestor = [&](auto &ancestor) {
ceph_assert(!ancestor.is_empty());
auto osd = ancestor.pop_osd();
ss << " accepting candidate " << osd << std::endl;
ceph_assert(!used(osd));
ceph_assert(usable_osd(osd));
want->push_back(osd);
acting_backfill->insert(
pg_shard_t(osd, shard_id_t::NO_SHARD));
ancestor.inc_selected();
};
/* Next, we use the ancestors map to grab a descendant of the
* peering_crush_mandatory_member if not already represented.
*
* TODO: using 0 here to match other users. Prior to merge, I
* expect that this and other users should instead check against
* CRUSH_ITEM_NONE.
*/
if (pool.info.peering_crush_mandatory_member != CRUSH_ITEM_NONE) {
auto aiter = ancestors.find(pool.info.peering_crush_mandatory_member);
if (aiter != ancestors.end() &&
!aiter->second.get_num_selected()) {
ss << " adding required ancestor " << aiter->first << std::endl;
ceph_assert(!aiter->second.is_empty()); // wouldn't exist otherwise
pop_ancestor(aiter->second);
}
}
/* We then place the ancestors in a heap ordered by fewest selected
* and then by the ordering token of the next osd */
bucket_heap_t aheap;
std::for_each(ancestors.begin(), ancestors.end(), [&](auto &anc) {
aheap.push_if_nonempty(anc.second);
});
/* and pull from this heap until it's empty or we have enough.
* "We have enough" is a sufficient check here for
* stretch_set_can_peer() because our heap sorting always
* pulls from ancestors with the least number of included OSDs,
* so if it is possible to satisfy the bucket_count constraints we
* will do so.
*/
while (!aheap.is_empty() && want->size() < pool.info.size) {
auto next = aheap.pop();
pop_ancestor(next.get());
if (next.get().get_num_selected() < bucket_max) {
aheap.push_if_nonempty(next);
}
}
/* The end result is that we should have as many buckets covered as
* possible while respecting up, the primary selection,
* the pool size (given bucket count constraints),
* and the mandatory member.
*/
}
bool PeeringState::recoverable(const vector<int> &want) const
{
unsigned num_want_acting = 0;
set<pg_shard_t> have;
for (int i = 0; i < (int)want.size(); ++i) {
if (want[i] != CRUSH_ITEM_NONE) {
++num_want_acting;
have.insert(
pg_shard_t(
want[i],
pool.info.is_erasure() ? shard_id_t(i) : shard_id_t::NO_SHARD));
}
}
if (num_want_acting < pool.info.min_size) {
if (!cct->_conf.get_val<bool>("osd_allow_recovery_below_min_size")) {
psdout(10) << "failed, recovery below min size not enabled" << dendl;
return false;
}
}
if (missing_loc.get_recoverable_predicate()(have)) {
return true;
} else {
psdout(10) << "failed, not recoverable " << dendl;
return false;
}
}
void PeeringState::choose_async_recovery_ec(
const map<pg_shard_t, pg_info_t> &all_info,
const pg_info_t &auth_info,
vector<int> *want,
set<pg_shard_t> *async_recovery,
const OSDMapRef osdmap) const
{
set<pair<int, pg_shard_t> > candidates_by_cost;
for (uint8_t i = 0; i < want->size(); ++i) {
if ((*want)[i] == CRUSH_ITEM_NONE)
continue;
// Considering log entries to recover is accurate enough for
// now. We could use minimum_to_decode_with_cost() later if
// necessary.
pg_shard_t shard_i((*want)[i], shard_id_t(i));
// do not include strays
if (stray_set.find(shard_i) != stray_set.end())
continue;
// Do not include an osd that is not up, since choosing it as
// an async_recovery_target will move it out of the acting set.
// This results in it being identified as a stray during peering,
// because it is no longer in the up or acting set.
if (!is_up(shard_i))
continue;
auto shard_info = all_info.find(shard_i)->second;
// for ec pools we rollback all entries past the authoritative
// last_update *before* activation. This is relatively inexpensive
// compared to recovery, since it is purely local, so treat shards
// past the authoritative last_update the same as those equal to it.
version_t auth_version = auth_info.last_update.version;
version_t candidate_version = shard_info.last_update.version;
assert(HAVE_FEATURE(osdmap->get_up_osd_features(), SERVER_NAUTILUS));
auto approx_missing_objects =
shard_info.stats.stats.sum.num_objects_missing;
if (auth_version > candidate_version) {
approx_missing_objects += auth_version - candidate_version;
}
if (static_cast<uint64_t>(approx_missing_objects) >
cct->_conf.get_val<uint64_t>("osd_async_recovery_min_cost")) {
candidates_by_cost.emplace(approx_missing_objects, shard_i);
}
}
psdout(20) << "candidates by cost are: " << candidates_by_cost
<< dendl;
// take out as many osds as we can for async recovery, in order of cost
for (auto rit = candidates_by_cost.rbegin();
rit != candidates_by_cost.rend(); ++rit) {
pg_shard_t cur_shard = rit->second;
vector<int> candidate_want(*want);
candidate_want[cur_shard.shard.id] = CRUSH_ITEM_NONE;
if (recoverable(candidate_want)) {
want->swap(candidate_want);
async_recovery->insert(cur_shard);
}
}
psdout(20) << "result want=" << *want
<< " async_recovery=" << *async_recovery << dendl;
}
void PeeringState::choose_async_recovery_replicated(
const map<pg_shard_t, pg_info_t> &all_info,
const pg_info_t &auth_info,
vector<int> *want,
set<pg_shard_t> *async_recovery,
const OSDMapRef osdmap) const
{
set<pair<int, pg_shard_t> > candidates_by_cost;
for (auto osd_num : *want) {
pg_shard_t shard_i(osd_num, shard_id_t::NO_SHARD);
// do not include strays
if (stray_set.find(shard_i) != stray_set.end())
continue;
// Do not include an osd that is not up, since choosing it as
// an async_recovery_target will move it out of the acting set.
// This results in it being identified as a stray during peering,
// because it is no longer in the up or acting set.
if (!is_up(shard_i))
continue;
auto shard_info = all_info.find(shard_i)->second;
// use the approximate magnitude of the difference in length of
// logs plus historical missing objects as the cost of recovery
version_t auth_version = auth_info.last_update.version;
version_t candidate_version = shard_info.last_update.version;
assert(HAVE_FEATURE(osdmap->get_up_osd_features(), SERVER_NAUTILUS));
auto approx_missing_objects =
shard_info.stats.stats.sum.num_objects_missing;
if (auth_version > candidate_version) {
approx_missing_objects += auth_version - candidate_version;
} else {
approx_missing_objects += candidate_version - auth_version;
}
if (static_cast<uint64_t>(approx_missing_objects) >
cct->_conf.get_val<uint64_t>("osd_async_recovery_min_cost")) {
candidates_by_cost.emplace(approx_missing_objects, shard_i);
}
}
psdout(20) << "candidates by cost are: " << candidates_by_cost
<< dendl;
// take out as many osds as we can for async recovery, in order of cost
for (auto rit = candidates_by_cost.rbegin();
rit != candidates_by_cost.rend(); ++rit) {
if (want->size() <= pool.info.min_size) {
break;
}
pg_shard_t cur_shard = rit->second;
vector<int> candidate_want(*want);
for (auto it = candidate_want.begin(); it != candidate_want.end(); ++it) {
if (*it == cur_shard.osd) {
candidate_want.erase(it);
if (pool.info.stretch_set_can_peer(candidate_want, *osdmap, NULL)) {
// if we're in stretch mode, we can only remove the osd if it doesn't
// break peering limits.
want->swap(candidate_want);
async_recovery->insert(cur_shard);
}
break;
}
}
}
psdout(20) << "result want=" << *want
<< " async_recovery=" << *async_recovery << dendl;
}
/**
* choose acting
*
* calculate the desired acting, and request a change with the monitor
* if it differs from the current acting.
*
* if restrict_to_up_acting=true, we filter out anything that's not in
* up/acting. in order to lift this restriction, we need to
* 1) check whether it's worth switching the acting set any time we get
* a new pg info (not just here, when recovery finishes)
* 2) check whether anything in want_acting went down on each new map
* (and, if so, calculate a new want_acting)
* 3) remove the assertion in PG::PeeringState::Active::react(const AdvMap)
* TODO!
*/
bool PeeringState::choose_acting(pg_shard_t &auth_log_shard_id,
bool restrict_to_up_acting,
bool *history_les_bound,
bool request_pg_temp_change_only)
{
map<pg_shard_t, pg_info_t> all_info(peer_info.begin(), peer_info.end());
all_info[pg_whoami] = info;
if (cct->_conf->subsys.should_gather<dout_subsys, 10>()) {
for (auto p = all_info.begin(); p != all_info.end(); ++p) {
psdout(10) << "all_info osd." << p->first << " "
<< p->second << dendl;
}
}
auto auth_log_shard = find_best_info(all_info, restrict_to_up_acting,
history_les_bound);
if (auth_log_shard == all_info.end()) {
if (up != acting) {
psdout(10) << "no suitable info found (incomplete backfills?),"
<< " reverting to up" << dendl;
want_acting = up;
vector<int> empty;
pl->queue_want_pg_temp(empty);
} else {
psdout(10) << "failed" << dendl;
ceph_assert(want_acting.empty());
}
return false;
}
ceph_assert(!auth_log_shard->second.is_incomplete());
auth_log_shard_id = auth_log_shard->first;
set<pg_shard_t> want_backfill, want_acting_backfill;
vector<int> want;
stringstream ss;
if (pool.info.is_replicated()) {
auto [primary_shard, oldest_log] = select_replicated_primary(
auth_log_shard,
cct->_conf.get_val<uint64_t>(
"osd_force_auth_primary_missing_objects"),
up,
up_primary,
all_info,
get_osdmap(),
ss);
if (pool.info.is_stretch_pool()) {
calc_replicated_acting_stretch(
primary_shard,
oldest_log,
get_osdmap()->get_pg_size(info.pgid.pgid),
acting,
up,
up_primary,
all_info,
restrict_to_up_acting,
&want,
&want_backfill,
&want_acting_backfill,
get_osdmap(),
pool,
ss);
} else {
calc_replicated_acting(
primary_shard,
oldest_log,
get_osdmap()->get_pg_size(info.pgid.pgid),
acting,
up,
up_primary,
all_info,
restrict_to_up_acting,
&want,
&want_backfill,
&want_acting_backfill,
get_osdmap(),
pool,
ss);
}
} else {
calc_ec_acting(
auth_log_shard,
get_osdmap()->get_pg_size(info.pgid.pgid),
acting,
up,
all_info,
restrict_to_up_acting,
&want,
&want_backfill,
&want_acting_backfill,
ss);
}
psdout(10) << ss.str() << dendl;
if (!recoverable(want)) {
want_acting.clear();
return false;
}
set<pg_shard_t> want_async_recovery;
if (HAVE_FEATURE(get_osdmap()->get_up_osd_features(), SERVER_MIMIC)) {
if (pool.info.is_erasure()) {
choose_async_recovery_ec(
all_info, auth_log_shard->second, &want, &want_async_recovery,
get_osdmap());
} else {
choose_async_recovery_replicated(
all_info, auth_log_shard->second, &want, &want_async_recovery,
get_osdmap());
}
}
while (want.size() > pool.info.size) {
// async recovery should have taken out as many osds as it can.
// if not, then always evict the last peer
// (will get synchronously recovered later)
psdout(10) << "evicting osd." << want.back()
<< " from oversized want " << want << dendl;
want.pop_back();
}
if (want != acting) {
psdout(10) << "want " << want << " != acting " << acting
<< ", requesting pg_temp change" << dendl;
want_acting = want;
if (!cct->_conf->osd_debug_no_acting_change) {
if (want_acting == up) {
// There can't be any pending backfill if
// want is the same as crush map up OSDs.
ceph_assert(want_backfill.empty());
vector<int> empty;
pl->queue_want_pg_temp(empty);
} else
pl->queue_want_pg_temp(want);
}
return false;
}
if (request_pg_temp_change_only)
return true;
want_acting.clear();
acting_recovery_backfill = want_acting_backfill;
psdout(10) << "acting_recovery_backfill is "
<< acting_recovery_backfill << dendl;
ceph_assert(
backfill_targets.empty() ||
backfill_targets == want_backfill);
if (backfill_targets.empty()) {
// Caller is GetInfo
backfill_targets = want_backfill;
}
// Adding !needs_recovery() to let the async_recovery_targets reset after recovery is complete
ceph_assert(
async_recovery_targets.empty() ||
async_recovery_targets == want_async_recovery ||
!needs_recovery());
if (async_recovery_targets.empty() || !needs_recovery()) {
async_recovery_targets = want_async_recovery;
}
// Will not change if already set because up would have had to change
// Verify that nothing in backfill is in stray_set
for (auto i = want_backfill.begin(); i != want_backfill.end(); ++i) {
ceph_assert(stray_set.find(*i) == stray_set.end());
}
psdout(10) << "choose_acting want=" << want << " backfill_targets="
<< want_backfill << " async_recovery_targets="
<< async_recovery_targets << dendl;
return true;
}
void PeeringState::log_weirdness()
{
if (pg_log.get_tail() != info.log_tail)
pl->get_clog_error() << info.pgid
<< " info mismatch, log.tail " << pg_log.get_tail()
<< " != info.log_tail " << info.log_tail;
if (pg_log.get_head() != info.last_update)
pl->get_clog_error() << info.pgid
<< " info mismatch, log.head " << pg_log.get_head()
<< " != info.last_update " << info.last_update;
if (!pg_log.get_log().empty()) {
// sloppy check
if ((pg_log.get_log().log.begin()->version <= pg_log.get_tail()))
pl->get_clog_error() << info.pgid
<< " log bound mismatch, info (tail,head] ("
<< pg_log.get_tail() << ","
<< pg_log.get_head() << "]"
<< " actual ["
<< pg_log.get_log().log.begin()->version << ","
<< pg_log.get_log().log.rbegin()->version << "]";
}
if (pg_log.get_log().caller_ops.size() > pg_log.get_log().log.size()) {
pl->get_clog_error() << info.pgid
<< " caller_ops.size "
<< pg_log.get_log().caller_ops.size()
<< " > log size " << pg_log.get_log().log.size();
}
}
/*
* Process information from a replica to determine if it could have any
* objects that i need.
*
* TODO: if the missing set becomes very large, this could get expensive.
* Instead, we probably want to just iterate over our unfound set.
*/
bool PeeringState::search_for_missing(
const pg_info_t &oinfo, const pg_missing_t &omissing,
pg_shard_t from,
PeeringCtxWrapper &ctx)
{
uint64_t num_unfound_before = missing_loc.num_unfound();
bool found_missing = missing_loc.add_source_info(
from, oinfo, omissing, ctx.handle);
if (found_missing && num_unfound_before != missing_loc.num_unfound())
pl->publish_stats_to_osd();
// avoid doing this if the peer is empty. This is abit of paranoia
// to avoid doing something rash if add_source_info() above
// incorrectly decided we found something new. (if the peer has
// last_update=0'0 that's impossible.)
if (found_missing &&
oinfo.last_update != eversion_t()) {
pg_info_t tinfo(oinfo);
tinfo.pgid.shard = pg_whoami.shard;
ctx.send_info(
from.osd,
spg_t(info.pgid.pgid, from.shard),
get_osdmap_epoch(), // fixme: use lower epoch?
get_osdmap_epoch(),
tinfo);
}
return found_missing;
}
bool PeeringState::discover_all_missing(
BufferedRecoveryMessages &rctx)
{
auto &missing = pg_log.get_missing();
uint64_t unfound = get_num_unfound();
bool any = false; // did we start any queries
psdout(10) << missing.num_missing() << " missing, "
<< unfound << " unfound"
<< dendl;
auto m = might_have_unfound.begin();
auto mend = might_have_unfound.end();
for (; m != mend; ++m) {
pg_shard_t peer(*m);
if (!get_osdmap()->is_up(peer.osd)) {
psdout(20) << "skipping down osd." << peer << dendl;
continue;
}
if (peer_purged.count(peer)) {
psdout(20) << "skipping purged osd." << peer << dendl;
continue;
}
auto iter = peer_info.find(peer);
if (iter != peer_info.end() &&
(iter->second.is_empty() || iter->second.dne())) {
// ignore empty peers
continue;
}
// If we've requested any of this stuff, the pg_missing_t information
// should be on its way.
// TODO: coalsce requested_* into a single data structure
if (peer_missing.find(peer) != peer_missing.end()) {
psdout(20) << ": osd." << peer
<< ": we already have pg_missing_t" << dendl;
continue;
}
if (peer_log_requested.find(peer) != peer_log_requested.end()) {
psdout(20) << ": osd." << peer
<< ": in peer_log_requested" << dendl;
continue;
}
if (peer_missing_requested.find(peer) != peer_missing_requested.end()) {
psdout(20) << ": osd." << peer
<< ": in peer_missing_requested" << dendl;
continue;
}
// Request missing
psdout(10) << ": osd." << peer << ": requesting pg_missing_t"
<< dendl;
peer_missing_requested.insert(peer);
rctx.send_query(
peer.osd,
spg_t(info.pgid.pgid, peer.shard),
pg_query_t(
pg_query_t::FULLLOG,
peer.shard, pg_whoami.shard,
info.history, get_osdmap_epoch()));
any = true;
}
return any;
}
/* Build the might_have_unfound set.
*
* This is used by the primary OSD during recovery.
*
* This set tracks the OSDs which might have unfound objects that the primary
* OSD needs. As we receive pg_missing_t from each OSD in might_have_unfound, we
* will remove the OSD from the set.
*/
void PeeringState::build_might_have_unfound()
{
ceph_assert(might_have_unfound.empty());
ceph_assert(is_primary());
psdout(10) << dendl;
check_past_interval_bounds();
might_have_unfound = past_intervals.get_might_have_unfound(
pg_whoami,
pool.info.is_erasure());
// include any (stray) peers
for (auto p = peer_info.begin(); p != peer_info.end(); ++p)
might_have_unfound.insert(p->first);
psdout(15) << ": built " << might_have_unfound << dendl;
}
void PeeringState::activate(
ObjectStore::Transaction& t,
epoch_t activation_epoch,
PeeringCtxWrapper &ctx)
{
ceph_assert(!is_peered());
// twiddle pg state
state_clear(PG_STATE_DOWN);
send_notify = false;
if (is_primary()) {
// only update primary last_epoch_started if we will go active
if (acting_set_writeable()) {
ceph_assert(cct->_conf->osd_find_best_info_ignore_history_les ||
info.last_epoch_started <= activation_epoch);
info.last_epoch_started = activation_epoch;
info.last_interval_started = info.history.same_interval_since;
}
} else if (is_acting(pg_whoami)) {
/* update last_epoch_started on acting replica to whatever the primary sent
* unless it's smaller (could happen if we are going peered rather than
* active, see doc/dev/osd_internals/last_epoch_started.rst) */
if (info.last_epoch_started < activation_epoch) {
info.last_epoch_started = activation_epoch;
info.last_interval_started = info.history.same_interval_since;
}
}
auto &missing = pg_log.get_missing();
min_last_complete_ondisk = eversion_t(0,0); // we don't know (yet)!
if (is_primary()) {
last_update_ondisk = info.last_update;
}
last_update_applied = info.last_update;
last_rollback_info_trimmed_to_applied = pg_log.get_can_rollback_to();
need_up_thru = false;
// write pg info, log
dirty_info = true;
dirty_big_info = true; // maybe
pl->schedule_event_on_commit(
t,
std::make_shared<PGPeeringEvent>(
get_osdmap_epoch(),
get_osdmap_epoch(),
ActivateCommitted(
get_osdmap_epoch(),
activation_epoch)));
// init complete pointer
if (missing.num_missing() == 0) {
psdout(10) << "activate - no missing, moving last_complete " << info.last_complete
<< " -> " << info.last_update << dendl;
info.last_complete = info.last_update;
info.stats.stats.sum.num_objects_missing = 0;
pg_log.reset_recovery_pointers();
} else {
psdout(10) << "activate - not complete, " << missing << dendl;
info.stats.stats.sum.num_objects_missing = missing.num_missing();
pg_log.activate_not_complete(info);
}
log_weirdness();
if (is_primary()) {
// initialize snap_trimq
interval_set<snapid_t> to_trim;
auto& removed_snaps_queue = get_osdmap()->get_removed_snaps_queue();
auto p = removed_snaps_queue.find(info.pgid.pgid.pool());
if (p != removed_snaps_queue.end()) {
dout(20) << "activate - purged_snaps " << info.purged_snaps
<< " removed_snaps " << p->second
<< dendl;
for (auto q : p->second) {
to_trim.insert(q.first, q.second);
}
}
interval_set<snapid_t> purged;
purged.intersection_of(to_trim, info.purged_snaps);
to_trim.subtract(purged);
assert(HAVE_FEATURE(upacting_features, SERVER_OCTOPUS));
renew_lease(pl->get_mnow());
// do not schedule until we are actually activated
// adjust purged_snaps: PG may have been inactive while snaps were pruned
// from the removed_snaps_queue in the osdmap. update local purged_snaps
// reflect only those snaps that we thought were pruned and were still in
// the queue.
info.purged_snaps.swap(purged);
// start up replicas
if (prior_readable_down_osds.empty()) {
dout(10) << "no prior_readable_down_osds to wait on, clearing ub"
<< dendl;
clear_prior_readable_until_ub();
}
info.history.refresh_prior_readable_until_ub(pl->get_mnow(),
prior_readable_until_ub);
ceph_assert(!acting_recovery_backfill.empty());
for (auto i = acting_recovery_backfill.begin();
i != acting_recovery_backfill.end();
++i) {
if (*i == pg_whoami) continue;
pg_shard_t peer = *i;
ceph_assert(peer_info.count(peer));
pg_info_t& pi = peer_info[peer];
psdout(10) << "activate peer osd." << peer << " " << pi << dendl;
#if defined(WITH_SEASTAR)
MURef<MOSDPGLog> m;
#else
MRef<MOSDPGLog> m;
#endif
ceph_assert(peer_missing.count(peer));
pg_missing_t& pm = peer_missing[peer];
bool needs_past_intervals = pi.dne();
// Save num_bytes for backfill reservation request, can't be negative
peer_bytes[peer] = std::max<int64_t>(0, pi.stats.stats.sum.num_bytes);
if (pi.last_update == info.last_update) {
// empty log
if (!pi.last_backfill.is_max())
pl->get_clog_info() << info.pgid << " continuing backfill to osd."
<< peer
<< " from (" << pi.log_tail << "," << pi.last_update
<< "] " << pi.last_backfill
<< " to " << info.last_update;
if (!pi.is_empty()) {
psdout(10) << "activate peer osd." << peer
<< " is up to date, queueing in pending_activators" << dendl;
ctx.send_info(
peer.osd,
spg_t(info.pgid.pgid, peer.shard),
get_osdmap_epoch(), // fixme: use lower epoch?
get_osdmap_epoch(),
info,
get_lease());
} else {
psdout(10) << "activate peer osd." << peer
<< " is up to date, but sending pg_log anyway" << dendl;
m = TOPNSPC::make_message<MOSDPGLog>(
i->shard, pg_whoami.shard,
get_osdmap_epoch(), info,
last_peering_reset);
}
} else if (
pg_log.get_tail() > pi.last_update ||
pi.last_backfill == hobject_t() ||
(backfill_targets.count(*i) && pi.last_backfill.is_max())) {
/* ^ This last case covers a situation where a replica is not contiguous
* with the auth_log, but is contiguous with this replica. Reshuffling
* the active set to handle this would be tricky, so instead we just go
* ahead and backfill it anyway. This is probably preferrable in any
* case since the replica in question would have to be significantly
* behind.
*/
// backfill
pl->get_clog_debug() << info.pgid << " starting backfill to osd." << peer
<< " from (" << pi.log_tail << "," << pi.last_update
<< "] " << pi.last_backfill
<< " to " << info.last_update;
pi.last_update = info.last_update;
pi.last_complete = info.last_update;
pi.set_last_backfill(hobject_t());
pi.last_epoch_started = info.last_epoch_started;
pi.last_interval_started = info.last_interval_started;
pi.history = info.history;
pi.hit_set = info.hit_set;
pi.stats.stats.clear();
pi.stats.stats.sum.num_bytes = peer_bytes[peer];
// initialize peer with our purged_snaps.
pi.purged_snaps = info.purged_snaps;
m = TOPNSPC::make_message<MOSDPGLog>(
i->shard, pg_whoami.shard,
get_osdmap_epoch(), pi,
last_peering_reset /* epoch to create pg at */);
// send some recent log, so that op dup detection works well.
m->log.copy_up_to(cct, pg_log.get_log(),
cct->_conf->osd_max_pg_log_entries);
m->info.log_tail = m->log.tail;
pi.log_tail = m->log.tail; // sigh...
pm.clear();
} else {
// catch up
ceph_assert(pg_log.get_tail() <= pi.last_update);
m = TOPNSPC::make_message<MOSDPGLog>(
i->shard, pg_whoami.shard,
get_osdmap_epoch(), info,
last_peering_reset /* epoch to create pg at */);
// send new stuff to append to replicas log
m->log.copy_after(cct, pg_log.get_log(), pi.last_update);
}
// share past_intervals if we are creating the pg on the replica
// based on whether our info for that peer was dne() *before*
// updating pi.history in the backfill block above.
if (m && needs_past_intervals)
m->past_intervals = past_intervals;
// update local version of peer's missing list!
if (m && pi.last_backfill != hobject_t()) {
for (auto p = m->log.log.begin(); p != m->log.log.end(); ++p) {
if (p->soid <= pi.last_backfill &&
!p->is_error()) {
if (perform_deletes_during_peering() && p->is_delete()) {
pm.rm(p->soid, p->version);
} else {
pm.add_next_event(*p);
}
}
}
}
if (m) {
dout(10) << "activate peer osd." << peer << " sending " << m->log
<< dendl;
m->lease = get_lease();
pl->send_cluster_message(peer.osd, std::move(m), get_osdmap_epoch());
}
// peer now has
pi.last_update = info.last_update;
// update our missing
if (pm.num_missing() == 0) {
pi.last_complete = pi.last_update;
psdout(10) << "activate peer osd." << peer << " " << pi
<< " uptodate" << dendl;
} else {
psdout(10) << "activate peer osd." << peer << " " << pi
<< " missing " << pm << dendl;
}
}
// Set up missing_loc
set<pg_shard_t> complete_shards;
for (auto i = acting_recovery_backfill.begin();
i != acting_recovery_backfill.end();
++i) {
psdout(20) << "setting up missing_loc from shard " << *i
<< " " << dendl;
if (*i == get_primary()) {
missing_loc.add_active_missing(missing);
if (!missing.have_missing())
complete_shards.insert(*i);
} else {
auto peer_missing_entry = peer_missing.find(*i);
ceph_assert(peer_missing_entry != peer_missing.end());
missing_loc.add_active_missing(peer_missing_entry->second);
if (!peer_missing_entry->second.have_missing() &&
peer_info[*i].last_backfill.is_max())
complete_shards.insert(*i);
}
}
// If necessary, create might_have_unfound to help us find our unfound objects.
// NOTE: It's important that we build might_have_unfound before trimming the
// past intervals.
might_have_unfound.clear();
if (needs_recovery()) {
// If only one shard has missing, we do a trick to add all others as recovery
// source, this is considered safe since the PGLogs have been merged locally,
// and covers vast majority of the use cases, like one OSD/host is down for
// a while for hardware repairing
if (complete_shards.size() + 1 == acting_recovery_backfill.size()) {
missing_loc.add_batch_sources_info(complete_shards, ctx.handle);
} else {
missing_loc.add_source_info(pg_whoami, info, pg_log.get_missing(),
ctx.handle);
for (auto i = acting_recovery_backfill.begin();
i != acting_recovery_backfill.end();
++i) {
if (*i == pg_whoami) continue;
psdout(10) << ": adding " << *i << " as a source" << dendl;
ceph_assert(peer_missing.count(*i));
ceph_assert(peer_info.count(*i));
missing_loc.add_source_info(
*i,
peer_info[*i],
peer_missing[*i],
ctx.handle);
}
}
for (auto i = peer_missing.begin(); i != peer_missing.end(); ++i) {
if (is_acting_recovery_backfill(i->first))
continue;
ceph_assert(peer_info.count(i->first));
search_for_missing(
peer_info[i->first],
i->second,
i->first,
ctx);
}
build_might_have_unfound();
// Always call now so update_calc_stats() will be accurate
discover_all_missing(ctx.msgs);
}
// num_objects_degraded if calculated should reflect this too, unless no
// missing and we are about to go clean.
if (get_osdmap()->get_pg_size(info.pgid.pgid) > actingset.size()) {
state_set(PG_STATE_UNDERSIZED);
}
state_set(PG_STATE_ACTIVATING);
pl->on_activate(std::move(to_trim));
}
if (acting_set_writeable()) {
PGLog::LogEntryHandlerRef rollbacker{pl->get_log_handler(t)};
pg_log.roll_forward(rollbacker.get());
}
}
void PeeringState::share_pg_info()
{
psdout(10) << "share_pg_info" << dendl;
info.history.refresh_prior_readable_until_ub(pl->get_mnow(),
prior_readable_until_ub);
// share new pg_info_t with replicas
ceph_assert(!acting_recovery_backfill.empty());
for (auto pg_shard : acting_recovery_backfill) {
if (pg_shard == pg_whoami) continue;
if (auto peer = peer_info.find(pg_shard); peer != peer_info.end()) {
peer->second.last_epoch_started = info.last_epoch_started;
peer->second.last_interval_started = info.last_interval_started;
peer->second.history.merge(info.history);
}
auto m = TOPNSPC::make_message<MOSDPGInfo2>(spg_t{info.pgid.pgid, pg_shard.shard},
info,
get_osdmap_epoch(),
get_osdmap_epoch(),
std::optional<pg_lease_t>{get_lease()},
std::nullopt);
pl->send_cluster_message(pg_shard.osd, std::move(m), get_osdmap_epoch());
}
}
void PeeringState::merge_log(
ObjectStore::Transaction& t, pg_info_t &oinfo, pg_log_t&& olog,
pg_shard_t from)
{
PGLog::LogEntryHandlerRef rollbacker{pl->get_log_handler(t)};
pg_log.merge_log(
oinfo, std::move(olog), from, info, rollbacker.get(),
dirty_info, dirty_big_info);
}
void PeeringState::rewind_divergent_log(
ObjectStore::Transaction& t, eversion_t newhead)
{
PGLog::LogEntryHandlerRef rollbacker{pl->get_log_handler(t)};
pg_log.rewind_divergent_log(
newhead, info, rollbacker.get(), dirty_info, dirty_big_info);
}
void PeeringState::proc_primary_info(
ObjectStore::Transaction &t, const pg_info_t &oinfo)
{
ceph_assert(!is_primary());
update_history(oinfo.history);
if (!info.stats.stats_invalid && info.stats.stats.sum.num_scrub_errors) {
info.stats.stats.sum.num_scrub_errors = 0;
info.stats.stats.sum.num_shallow_scrub_errors = 0;
info.stats.stats.sum.num_deep_scrub_errors = 0;
dirty_info = true;
}
if (!(info.purged_snaps == oinfo.purged_snaps)) {
psdout(10) << "updating purged_snaps to "
<< oinfo.purged_snaps
<< dendl;
info.purged_snaps = oinfo.purged_snaps;
dirty_info = true;
dirty_big_info = true;
}
}
void PeeringState::proc_master_log(
ObjectStore::Transaction& t, pg_info_t &oinfo,
pg_log_t&& olog, pg_missing_t&& omissing, pg_shard_t from)
{
psdout(10) << "proc_master_log for osd." << from << ": "
<< olog << " " << omissing << dendl;
ceph_assert(!is_peered() && is_primary());
// merge log into our own log to build master log. no need to
// make any adjustments to their missing map; we are taking their
// log to be authoritative (i.e., their entries are by definitely
// non-divergent).
merge_log(t, oinfo, std::move(olog), from);
peer_info[from] = oinfo;
psdout(10) << " peer osd." << from << " now " << oinfo
<< " " << omissing << dendl;
might_have_unfound.insert(from);
// See doc/dev/osd_internals/last_epoch_started
if (oinfo.last_epoch_started > info.last_epoch_started) {
info.last_epoch_started = oinfo.last_epoch_started;
dirty_info = true;
}
if (oinfo.last_interval_started > info.last_interval_started) {
info.last_interval_started = oinfo.last_interval_started;
dirty_info = true;
}
update_history(oinfo.history);
ceph_assert(cct->_conf->osd_find_best_info_ignore_history_les ||
info.last_epoch_started >= info.history.last_epoch_started);
peer_missing[from].claim(std::move(omissing));
}
void PeeringState::proc_replica_log(
pg_info_t &oinfo,
const pg_log_t &olog,
pg_missing_t&& omissing,
pg_shard_t from)
{
psdout(10) << "proc_replica_log for osd." << from << ": "
<< oinfo << " " << olog << " " << omissing << dendl;
pg_log.proc_replica_log(oinfo, olog, omissing, from);
peer_info[from] = oinfo;
psdout(10) << " peer osd." << from << " now "
<< oinfo << " " << omissing << dendl;
might_have_unfound.insert(from);
for (auto i = omissing.get_items().begin();
i != omissing.get_items().end();
++i) {
psdout(20) << " after missing " << i->first
<< " need " << i->second.need
<< " have " << i->second.have << dendl;
}
peer_missing[from].claim(std::move(omissing));
}
/**
* Update min_last_complete_ondisk to the minimum
* last_complete_ondisk version informed by each peer.
*/
void PeeringState::calc_min_last_complete_ondisk() {
ceph_assert(!acting_recovery_backfill.empty());
eversion_t min = last_complete_ondisk;
for (const auto& pg_shard : acting_recovery_backfill) {
if (pg_shard == get_primary()) {
continue;
}
if (peer_last_complete_ondisk.count(pg_shard) == 0) {
psdout(20) << "no complete info on: "
<< pg_shard << dendl;
return;
}
if (peer_last_complete_ondisk[pg_shard] < min) {
min = peer_last_complete_ondisk[pg_shard];
}
}
if (min != min_last_complete_ondisk) {
psdout(20) << "last_complete_ondisk is "
<< "updated to: " << min
<< " from: " << min_last_complete_ondisk
<< dendl;
min_last_complete_ondisk = min;
}
}
void PeeringState::fulfill_info(
pg_shard_t from, const pg_query_t &query,
pair<pg_shard_t, pg_info_t> ¬ify_info)
{
ceph_assert(from == primary);
ceph_assert(query.type == pg_query_t::INFO);
// info
psdout(10) << "sending info" << dendl;
notify_info = make_pair(from, info);
}
void PeeringState::fulfill_log(
pg_shard_t from, const pg_query_t &query, epoch_t query_epoch)
{
psdout(10) << "log request from " << from << dendl;
ceph_assert(from == primary);
ceph_assert(query.type != pg_query_t::INFO);
auto mlog = TOPNSPC::make_message<MOSDPGLog>(
from.shard, pg_whoami.shard,
get_osdmap_epoch(),
info, query_epoch);
mlog->missing = pg_log.get_missing();
// primary -> other, when building master log
if (query.type == pg_query_t::LOG) {
psdout(10) << " sending info+missing+log since " << query.since
<< dendl;
if (query.since != eversion_t() && query.since < pg_log.get_tail()) {
pl->get_clog_error() << info.pgid << " got broken pg_query_t::LOG since "
<< query.since
<< " when my log.tail is " << pg_log.get_tail()
<< ", sending full log instead";
mlog->log = pg_log.get_log(); // primary should not have requested this!!
} else
mlog->log.copy_after(cct, pg_log.get_log(), query.since);
}
else if (query.type == pg_query_t::FULLLOG) {
psdout(10) << " sending info+missing+full log" << dendl;
mlog->log = pg_log.get_log();
}
psdout(10) << " sending " << mlog->log << " " << mlog->missing << dendl;
pl->send_cluster_message(from.osd, std::move(mlog), get_osdmap_epoch(), true);
}
void PeeringState::fulfill_query(const MQuery& query, PeeringCtxWrapper &rctx)
{
if (query.query.type == pg_query_t::INFO) {
pair<pg_shard_t, pg_info_t> notify_info;
// note this refreshes our prior_readable_until_ub value
update_history(query.query.history);
fulfill_info(query.from, query.query, notify_info);
rctx.send_notify(
notify_info.first.osd,
pg_notify_t(
notify_info.first.shard, pg_whoami.shard,
query.query_epoch,
get_osdmap_epoch(),
notify_info.second,
past_intervals));
} else {
update_history(query.query.history);
fulfill_log(query.from, query.query, query.query_epoch);
}
}
void PeeringState::try_mark_clean()
{
if (actingset.size() == get_osdmap()->get_pg_size(info.pgid.pgid)) {
state_clear(PG_STATE_FORCED_BACKFILL | PG_STATE_FORCED_RECOVERY);
state_set(PG_STATE_CLEAN);
info.history.last_epoch_clean = get_osdmap_epoch();
info.history.last_interval_clean = info.history.same_interval_since;
past_intervals.clear();
dirty_big_info = true;
dirty_info = true;
}
if (!is_active() && is_peered()) {
if (is_clean()) {
bool target;
if (pool.info.is_pending_merge(info.pgid.pgid, &target)) {
if (target) {
psdout(10) << "ready to merge (target)" << dendl;
pl->set_ready_to_merge_target(
info.last_update,
info.history.last_epoch_started,
info.history.last_epoch_clean);
} else {
psdout(10) << "ready to merge (source)" << dendl;
pl->set_ready_to_merge_source(info.last_update);
}
}
} else {
psdout(10) << "not clean, not ready to merge" << dendl;
// we should have notified OSD in Active state entry point
}
}
state_clear(PG_STATE_FORCED_RECOVERY | PG_STATE_FORCED_BACKFILL);
share_pg_info();
pl->publish_stats_to_osd();
clear_recovery_state();
}
void PeeringState::split_into(
pg_t child_pgid, PeeringState *child, unsigned split_bits)
{
child->update_osdmap_ref(get_osdmap());
child->pool = pool;
// Log
pg_log.split_into(child_pgid, split_bits, &(child->pg_log));
child->info.last_complete = info.last_complete;
info.last_update = pg_log.get_head();
child->info.last_update = child->pg_log.get_head();
child->info.last_user_version = info.last_user_version;
info.log_tail = pg_log.get_tail();
child->info.log_tail = child->pg_log.get_tail();
// reset last_complete, we might have modified pg_log & missing above
pg_log.reset_complete_to(&info);
child->pg_log.reset_complete_to(&child->info);
// Info
child->info.history = info.history;
child->info.history.epoch_created = get_osdmap_epoch();
child->info.purged_snaps = info.purged_snaps;
if (info.last_backfill.is_max()) {
child->info.set_last_backfill(hobject_t::get_max());
} else {
// restart backfill on parent and child to be safe. we could
// probably do better in the bitwise sort case, but it's more
// fragile (there may be special work to do on backfill completion
// in the future).
info.set_last_backfill(hobject_t());
child->info.set_last_backfill(hobject_t());
// restarting backfill implies that the missing set is empty,
// since it is only used for objects prior to last_backfill
pg_log.reset_backfill();
child->pg_log.reset_backfill();
}
child->info.stats = info.stats;
child->info.stats.parent_split_bits = split_bits;
info.stats.stats_invalid = true;
child->info.stats.stats_invalid = true;
child->info.stats.objects_trimmed = 0;
child->info.stats.snaptrim_duration = 0.0;
child->info.last_epoch_started = info.last_epoch_started;
child->info.last_interval_started = info.last_interval_started;
// There can't be recovery/backfill going on now
int primary, up_primary;
vector<int> newup, newacting;
get_osdmap()->pg_to_up_acting_osds(
child->info.pgid.pgid, &newup, &up_primary, &newacting, &primary);
child->init_primary_up_acting(
newup,
newacting,
up_primary,
primary);
child->role = OSDMap::calc_pg_role(pg_whoami, child->acting);
// this comparison includes primary rank via pg_shard_t
if (get_primary() != child->get_primary())
child->info.history.same_primary_since = get_osdmap_epoch();
child->info.stats.up = newup;
child->info.stats.up_primary = up_primary;
child->info.stats.acting = newacting;
child->info.stats.acting_primary = primary;
child->info.stats.mapping_epoch = get_osdmap_epoch();
// History
child->past_intervals = past_intervals;
child->on_new_interval();
child->send_notify = !child->is_primary();
child->dirty_info = true;
child->dirty_big_info = true;
dirty_info = true;
dirty_big_info = true;
}
void PeeringState::merge_from(
map<spg_t,PeeringState *>& sources,
PeeringCtx &rctx,
unsigned split_bits,
const pg_merge_meta_t& last_pg_merge_meta)
{
bool incomplete = false;
if (info.last_complete != info.last_update ||
info.is_incomplete() ||
info.dne()) {
psdout(10) << "target incomplete" << dendl;
incomplete = true;
}
if (last_pg_merge_meta.source_pgid != pg_t()) {
if (info.pgid.pgid != last_pg_merge_meta.source_pgid.get_parent()) {
psdout(10) << "target doesn't match expected parent "
<< last_pg_merge_meta.source_pgid.get_parent()
<< " of source_pgid " << last_pg_merge_meta.source_pgid
<< dendl;
incomplete = true;
}
if (info.last_update != last_pg_merge_meta.target_version) {
psdout(10) << "target version doesn't match expected "
<< last_pg_merge_meta.target_version << dendl;
incomplete = true;
}
}
PGLog::LogEntryHandlerRef handler{pl->get_log_handler(rctx.transaction)};
pg_log.roll_forward(handler.get());
info.last_complete = info.last_update; // to fake out trim()
pg_log.reset_recovery_pointers();
pg_log.trim(info.last_update, info);
vector<PGLog*> log_from;
for (auto& i : sources) {
auto& source = i.second;
if (!source) {
psdout(10) << "source " << i.first << " missing" << dendl;
incomplete = true;
continue;
}
if (source->info.last_complete != source->info.last_update ||
source->info.is_incomplete() ||
source->info.dne()) {
psdout(10) << "source " << source->pg_whoami
<< " incomplete"
<< dendl;
incomplete = true;
}
if (last_pg_merge_meta.source_pgid != pg_t()) {
if (source->info.pgid.pgid != last_pg_merge_meta.source_pgid) {
dout(10) << "source " << source->info.pgid.pgid
<< " doesn't match expected source pgid "
<< last_pg_merge_meta.source_pgid << dendl;
incomplete = true;
}
if (source->info.last_update != last_pg_merge_meta.source_version) {
dout(10) << "source version doesn't match expected "
<< last_pg_merge_meta.target_version << dendl;
incomplete = true;
}
}
// prepare log
PGLog::LogEntryHandlerRef handler{
source->pl->get_log_handler(rctx.transaction)};
source->pg_log.roll_forward(handler.get());
source->info.last_complete = source->info.last_update; // to fake out trim()
source->pg_log.reset_recovery_pointers();
source->pg_log.trim(source->info.last_update, source->info);
log_from.push_back(&source->pg_log);
// combine stats
info.stats.add(source->info.stats);
// pull up last_update
info.last_update = std::max(info.last_update, source->info.last_update);
// adopt source's PastIntervals if target has none. we can do this since
// pgp_num has been reduced prior to the merge, so the OSD mappings for
// the PGs are identical.
if (past_intervals.empty() && !source->past_intervals.empty()) {
psdout(10) << "taking source's past_intervals" << dendl;
past_intervals = source->past_intervals;
}
}
info.last_complete = info.last_update;
info.log_tail = info.last_update;
if (incomplete) {
info.last_backfill = hobject_t();
}
// merge logs
pg_log.merge_from(log_from, info.last_update);
// make sure we have a meaningful last_epoch_started/clean (if we were a
// placeholder)
if (info.history.epoch_created == 0) {
// start with (a) source's history, since these PGs *should* have been
// remapped in concert with each other...
info.history = sources.begin()->second->info.history;
// we use the last_epoch_{started,clean} we got from
// the caller, which are the epochs that were reported by the PGs were
// found to be ready for merge.
info.history.last_epoch_clean = last_pg_merge_meta.last_epoch_clean;
info.history.last_epoch_started = last_pg_merge_meta.last_epoch_started;
info.last_epoch_started = last_pg_merge_meta.last_epoch_started;
psdout(10) << "set les/c to "
<< last_pg_merge_meta.last_epoch_started << "/"
<< last_pg_merge_meta.last_epoch_clean
<< " from pool last_dec_*, source pg history was "
<< sources.begin()->second->info.history
<< dendl;
// above we have pulled down source's history and we need to check
// history.epoch_created again to confirm that source is not a placeholder
// too. (peering requires a sane history.same_interval_since value for any
// non-newly created pg and below here we know we are basically iterating
// back a series of past maps to fake a merge process, hence we need to
// fix history.same_interval_since first so that start_peering_interval()
// will not complain)
if (info.history.epoch_created == 0) {
dout(10) << "both merge target and source are placeholders,"
<< " set sis to lec " << info.history.last_epoch_clean
<< dendl;
info.history.same_interval_since = info.history.last_epoch_clean;
}
// if the past_intervals start is later than last_epoch_clean, it
// implies the source repeered again but the target didn't, or
// that the source became clean in a later epoch than the target.
// avoid the discrepancy but adjusting the interval start
// backwards to match so that check_past_interval_bounds() will
// not complain.
auto pib = past_intervals.get_bounds();
if (info.history.last_epoch_clean < pib.first) {
psdout(10) << "last_epoch_clean "
<< info.history.last_epoch_clean << " < past_interval start "
<< pib.first << ", adjusting start backwards" << dendl;
past_intervals.adjust_start_backwards(info.history.last_epoch_clean);
}
// Similarly, if the same_interval_since value is later than
// last_epoch_clean, the next interval change will result in a
// past_interval start that is later than last_epoch_clean. This
// can happen if we use the pg_history values from the merge
// source. Adjust the same_interval_since value backwards if that
// happens. (We trust the les and lec values more because they came from
// the real target, whereas the history value we stole from the source.)
if (info.history.last_epoch_started < info.history.same_interval_since) {
psdout(10) << "last_epoch_started "
<< info.history.last_epoch_started << " < same_interval_since "
<< info.history.same_interval_since
<< ", adjusting pg_history backwards" << dendl;
info.history.same_interval_since = info.history.last_epoch_clean;
// make sure same_{up,primary}_since are <= same_interval_since
info.history.same_up_since = std::min(
info.history.same_up_since, info.history.same_interval_since);
info.history.same_primary_since = std::min(
info.history.same_primary_since, info.history.same_interval_since);
}
}
dirty_info = true;
dirty_big_info = true;
}
void PeeringState::start_split_stats(
const set<spg_t>& childpgs, vector<object_stat_sum_t> *out)
{
out->resize(childpgs.size() + 1);
info.stats.stats.sum.split(*out);
}
void PeeringState::finish_split_stats(
const object_stat_sum_t& stats, ObjectStore::Transaction &t)
{
info.stats.stats.sum = stats;
write_if_dirty(t);
}
void PeeringState::update_blocked_by()
{
// set a max on the number of blocking peers we report. if we go
// over, report a random subset. keep the result sorted.
unsigned keep = std::min<unsigned>(
blocked_by.size(), cct->_conf->osd_max_pg_blocked_by);
unsigned skip = blocked_by.size() - keep;
info.stats.blocked_by.clear();
info.stats.blocked_by.resize(keep);
unsigned pos = 0;
for (auto p = blocked_by.begin(); p != blocked_by.end() && keep > 0; ++p) {
if (skip > 0 && (rand() % (skip + keep) < skip)) {
--skip;
} else {
info.stats.blocked_by[pos++] = *p;
--keep;
}
}
}
static bool find_shard(const set<pg_shard_t> & pgs, shard_id_t shard)
{
for (auto&p : pgs)
if (p.shard == shard)
return true;
return false;
}
static pg_shard_t get_another_shard(const set<pg_shard_t> & pgs, pg_shard_t skip, shard_id_t shard)
{
for (auto&p : pgs) {
if (p == skip)
continue;
if (p.shard == shard)
return p;
}
return pg_shard_t();
}
void PeeringState::update_calc_stats()
{
info.stats.version = info.last_update;
info.stats.created = info.history.epoch_created;
info.stats.last_scrub = info.history.last_scrub;
info.stats.last_scrub_stamp = info.history.last_scrub_stamp;
info.stats.last_deep_scrub = info.history.last_deep_scrub;
info.stats.last_deep_scrub_stamp = info.history.last_deep_scrub_stamp;
info.stats.last_clean_scrub_stamp = info.history.last_clean_scrub_stamp;
info.stats.last_epoch_clean = info.history.last_epoch_clean;
info.stats.log_size = pg_log.get_head().version - pg_log.get_tail().version;
info.stats.log_dups_size = pg_log.get_log().dups.size();
info.stats.ondisk_log_size = info.stats.log_size;
info.stats.log_start = pg_log.get_tail();
info.stats.ondisk_log_start = pg_log.get_tail();
info.stats.snaptrimq_len = pl->get_snap_trimq_size();
unsigned num_shards = get_osdmap()->get_pg_size(info.pgid.pgid);
// In rare case that upset is too large (usually transient), use as target
// for calculations below.
unsigned target = std::max(num_shards, (unsigned)upset.size());
// For undersized actingset may be larger with OSDs out
unsigned nrep = std::max(actingset.size(), upset.size());
// calc num_object_copies
info.stats.stats.calc_copies(std::max(target, nrep));
info.stats.stats.sum.num_objects_degraded = 0;
info.stats.stats.sum.num_objects_unfound = 0;
info.stats.stats.sum.num_objects_misplaced = 0;
info.stats.avail_no_missing.clear();
info.stats.object_location_counts.clear();
// We should never hit this condition, but if end up hitting it,
// make sure to update num_objects and set PG_STATE_INCONSISTENT.
if (info.stats.stats.sum.num_objects < 0) {
psdout(0) << "negative num_objects = "
<< info.stats.stats.sum.num_objects << " setting it to 0 "
<< dendl;
info.stats.stats.sum.num_objects = 0;
state_set(PG_STATE_INCONSISTENT);
}
if ((is_remapped() || is_undersized() || !is_clean()) &&
(is_peered()|| is_activating())) {
psdout(20) << "actingset " << actingset << " upset "
<< upset << " acting_recovery_backfill " << acting_recovery_backfill << dendl;
ceph_assert(!acting_recovery_backfill.empty());
bool estimate = false;
// NOTE: we only generate degraded, misplaced and unfound
// values for the summation, not individual stat categories.
int64_t num_objects = info.stats.stats.sum.num_objects;
// Objects missing from up nodes, sorted by # objects.
boost::container::flat_set<pair<int64_t,pg_shard_t>> missing_target_objects;
// Objects missing from nodes not in up, sort by # objects
boost::container::flat_set<pair<int64_t,pg_shard_t>> acting_source_objects;
// Fill missing_target_objects/acting_source_objects
{
int64_t missing;
// Primary first
missing = pg_log.get_missing().num_missing();
ceph_assert(acting_recovery_backfill.count(pg_whoami));
if (upset.count(pg_whoami)) {
missing_target_objects.emplace(missing, pg_whoami);
} else {
acting_source_objects.emplace(missing, pg_whoami);
}
info.stats.stats.sum.num_objects_missing_on_primary = missing;
if (missing == 0)
info.stats.avail_no_missing.push_back(pg_whoami);
psdout(20) << "shard " << pg_whoami
<< " primary objects " << num_objects
<< " missing " << missing
<< dendl;
}
// All other peers
for (auto& peer : peer_info) {
// Primary should not be in the peer_info, skip if it is.
if (peer.first == pg_whoami) continue;
int64_t missing = 0;
int64_t peer_num_objects =
std::max((int64_t)0, peer.second.stats.stats.sum.num_objects);
// Backfill targets always track num_objects accurately
// all other peers track missing accurately.
if (is_backfill_target(peer.first)) {
missing = std::max((int64_t)0, num_objects - peer_num_objects);
} else {
if (peer_missing.count(peer.first)) {
missing = peer_missing[peer.first].num_missing();
} else {
psdout(20) << "no peer_missing found for "
<< peer.first << dendl;
if (is_recovering()) {
estimate = true;
}
missing = std::max((int64_t)0, num_objects - peer_num_objects);
}
}
if (upset.count(peer.first)) {
missing_target_objects.emplace(missing, peer.first);
} else if (actingset.count(peer.first)) {
acting_source_objects.emplace(missing, peer.first);
}
peer.second.stats.stats.sum.num_objects_missing = missing;
if (missing == 0)
info.stats.avail_no_missing.push_back(peer.first);
psdout(20) << "shard " << peer.first
<< " objects " << peer_num_objects
<< " missing " << missing
<< dendl;
}
// Compute object_location_counts
for (auto& ml: missing_loc.get_missing_locs()) {
info.stats.object_location_counts[ml.second]++;
psdout(30) << ml.first << " object_location_counts["
<< ml.second << "]=" << info.stats.object_location_counts[ml.second]
<< dendl;
}
int64_t not_missing = num_objects - missing_loc.get_missing_locs().size();
if (not_missing) {
// During recovery we know upset == actingset and is being populated
// During backfill we know that all non-missing objects are in the actingset
info.stats.object_location_counts[actingset] = not_missing;
}
psdout(30) << "object_location_counts["
<< upset << "]=" << info.stats.object_location_counts[upset]
<< dendl;
psdout(20) << "object_location_counts "
<< info.stats.object_location_counts << dendl;
// A misplaced object is not stored on the correct OSD
int64_t misplaced = 0;
// a degraded objects has fewer replicas or EC shards than the pool specifies.
int64_t degraded = 0;
if (is_recovering()) {
for (auto& sml: missing_loc.get_missing_by_count()) {
for (auto& ml: sml.second) {
int missing_shards;
if (sml.first == shard_id_t::NO_SHARD) {
psdout(20) << "ml " << ml.second
<< " upset size " << upset.size()
<< " up " << ml.first.up << dendl;
missing_shards = (int)upset.size() - ml.first.up;
} else {
// Handle shards not even in upset below
if (!find_shard(upset, sml.first))
continue;
missing_shards = std::max(0, 1 - ml.first.up);
psdout(20) << "shard " << sml.first
<< " ml " << ml.second
<< " missing shards " << missing_shards << dendl;
}
int odegraded = ml.second * missing_shards;
// Copies on other osds but limited to the possible degraded
int more_osds = std::min(missing_shards, ml.first.other);
int omisplaced = ml.second * more_osds;
ceph_assert(omisplaced <= odegraded);
odegraded -= omisplaced;
misplaced += omisplaced;
degraded += odegraded;
}
}
psdout(20) << "missing based degraded "
<< degraded << dendl;
psdout(20) << "missing based misplaced "
<< misplaced << dendl;
// Handle undersized case
if (pool.info.is_replicated()) {
// Add degraded for missing targets (num_objects missing)
ceph_assert(target >= upset.size());
unsigned needed = target - upset.size();
degraded += num_objects * needed;
} else {
for (unsigned i = 0 ; i < num_shards; ++i) {
shard_id_t shard(i);
if (!find_shard(upset, shard)) {
pg_shard_t pgs = get_another_shard(actingset, pg_shard_t(), shard);
if (pgs != pg_shard_t()) {
int64_t missing;
if (pgs == pg_whoami)
missing = info.stats.stats.sum.num_objects_missing_on_primary;
else
missing = peer_info[pgs].stats.stats.sum.num_objects_missing;
degraded += missing;
misplaced += std::max((int64_t)0, num_objects - missing);
} else {
// No shard anywhere
degraded += num_objects;
}
}
}
}
goto out;
}
// Handle undersized case
if (pool.info.is_replicated()) {
// Add to missing_target_objects
ceph_assert(target >= missing_target_objects.size());
unsigned needed = target - missing_target_objects.size();
if (needed)
missing_target_objects.emplace(num_objects * needed, pg_shard_t(pg_shard_t::NO_OSD));
} else {
for (unsigned i = 0 ; i < num_shards; ++i) {
shard_id_t shard(i);
bool found = false;
for (const auto& t : missing_target_objects) {
if (std::get<1>(t).shard == shard) {
found = true;
break;
}
}
if (!found)
missing_target_objects.emplace(num_objects, pg_shard_t(pg_shard_t::NO_OSD,shard));
}
}
for (const auto& item : missing_target_objects)
psdout(20) << "missing shard " << std::get<1>(item)
<< " missing= " << std::get<0>(item) << dendl;
for (const auto& item : acting_source_objects)
psdout(20) << "acting shard " << std::get<1>(item)
<< " missing= " << std::get<0>(item) << dendl;
// Handle all objects not in missing for remapped
// or backfill
for (auto m = missing_target_objects.rbegin();
m != missing_target_objects.rend(); ++m) {
int64_t extra_missing = -1;
if (pool.info.is_replicated()) {
if (!acting_source_objects.empty()) {
auto extra_copy = acting_source_objects.begin();
extra_missing = std::get<0>(*extra_copy);
acting_source_objects.erase(extra_copy);
}
} else { // Erasure coded
// Use corresponding shard
for (const auto& a : acting_source_objects) {
if (std::get<1>(a).shard == std::get<1>(*m).shard) {
extra_missing = std::get<0>(a);
acting_source_objects.erase(a);
break;
}
}
}
if (extra_missing >= 0 && std::get<0>(*m) >= extra_missing) {
// We don't know which of the objects on the target
// are part of extra_missing so assume are all degraded.
misplaced += std::get<0>(*m) - extra_missing;
degraded += extra_missing;
} else {
// 1. extra_missing == -1, more targets than sources so degraded
// 2. extra_missing > std::get<0>(m), so that we know that some extra_missing
// previously degraded are now present on the target.
degraded += std::get<0>(*m);
}
}
// If there are still acting that haven't been accounted for
// then they are misplaced
for (const auto& a : acting_source_objects) {
int64_t extra_misplaced = std::max((int64_t)0, num_objects - std::get<0>(a));
psdout(20) << "extra acting misplaced " << extra_misplaced
<< dendl;
misplaced += extra_misplaced;
}
out:
// NOTE: Tests use these messages to verify this code
psdout(20) << "degraded " << degraded
<< (estimate ? " (est)": "") << dendl;
psdout(20) << "misplaced " << misplaced
<< (estimate ? " (est)": "")<< dendl;
info.stats.stats.sum.num_objects_degraded = degraded;
info.stats.stats.sum.num_objects_unfound = get_num_unfound();
info.stats.stats.sum.num_objects_misplaced = misplaced;
}
}
std::optional<pg_stat_t> PeeringState::prepare_stats_for_publish(
const std::optional<pg_stat_t> &pg_stats_publish,
const object_stat_collection_t &unstable_stats)
{
if (info.stats.stats.sum.num_scrub_errors) {
psdout(10) << "inconsistent due to " <<
info.stats.stats.sum.num_scrub_errors << " scrub errors" << dendl;
state_set(PG_STATE_INCONSISTENT);
} else {
state_clear(PG_STATE_INCONSISTENT);
state_clear(PG_STATE_FAILED_REPAIR);
}
utime_t now = ceph_clock_now();
if (info.stats.state != state) {
info.stats.last_change = now;
// Optimistic estimation, if we just find out an inactive PG,
// assume it is active till now.
if (!(state & PG_STATE_ACTIVE) &&
(info.stats.state & PG_STATE_ACTIVE))
info.stats.last_active = now;
if ((state & PG_STATE_ACTIVE) &&
!(info.stats.state & PG_STATE_ACTIVE))
info.stats.last_became_active = now;
if ((state & (PG_STATE_ACTIVE|PG_STATE_PEERED)) &&
!(info.stats.state & (PG_STATE_ACTIVE|PG_STATE_PEERED)))
info.stats.last_became_peered = now;
info.stats.state = state;
}
update_calc_stats();
if (info.stats.stats.sum.num_objects_degraded) {
state_set(PG_STATE_DEGRADED);
} else {
state_clear(PG_STATE_DEGRADED);
}
update_blocked_by();
pg_stat_t pre_publish = info.stats;
pre_publish.stats.add(unstable_stats);
utime_t cutoff = now;
cutoff -= cct->_conf->osd_pg_stat_report_interval_max;
// share (some of) our purged_snaps via the pg_stats. limit # of intervals
// because we don't want to make the pg_stat_t structures too expensive.
unsigned max = cct->_conf->osd_max_snap_prune_intervals_per_epoch;
unsigned num = 0;
auto i = info.purged_snaps.begin();
while (num < max && i != info.purged_snaps.end()) {
pre_publish.purged_snaps.insert(i.get_start(), i.get_len());
++num;
++i;
}
psdout(20) << "reporting purged_snaps "
<< pre_publish.purged_snaps << dendl;
if (pg_stats_publish && pre_publish == *pg_stats_publish &&
info.stats.last_fresh > cutoff) {
psdout(15) << "publish_stats_to_osd " << pg_stats_publish->reported_epoch
<< ": no change since " << info.stats.last_fresh << dendl;
return std::nullopt;
} else {
// update our stat summary and timestamps
info.stats.reported_epoch = get_osdmap_epoch();
++info.stats.reported_seq;
info.stats.last_fresh = now;
if (info.stats.state & PG_STATE_CLEAN)
info.stats.last_clean = now;
if (info.stats.state & PG_STATE_ACTIVE)
info.stats.last_active = now;
if (info.stats.state & (PG_STATE_ACTIVE|PG_STATE_PEERED))
info.stats.last_peered = now;
info.stats.last_unstale = now;
if ((info.stats.state & PG_STATE_DEGRADED) == 0)
info.stats.last_undegraded = now;
if ((info.stats.state & PG_STATE_UNDERSIZED) == 0)
info.stats.last_fullsized = now;
psdout(15) << "publish_stats_to_osd " << pre_publish.reported_epoch
<< ":" << pre_publish.reported_seq << dendl;
return std::make_optional(std::move(pre_publish));
}
}
void PeeringState::init(
int role,
const vector<int>& newup, int new_up_primary,
const vector<int>& newacting, int new_acting_primary,
const pg_history_t& history,
const PastIntervals& pi,
ObjectStore::Transaction &t)
{
psdout(10) << "init role " << role << " up "
<< newup << " acting " << newacting
<< " history " << history
<< " past_intervals " << pi
<< dendl;
set_role(role);
init_primary_up_acting(
newup,
newacting,
new_up_primary,
new_acting_primary);
info.history = history;
past_intervals = pi;
info.stats.up = up;
info.stats.up_primary = new_up_primary;
info.stats.acting = acting;
info.stats.acting_primary = new_acting_primary;
info.stats.mapping_epoch = info.history.same_interval_since;
if (!perform_deletes_during_peering()) {
pg_log.set_missing_may_contain_deletes();
}
on_new_interval();
dirty_info = true;
dirty_big_info = true;
write_if_dirty(t);
}
void PeeringState::dump_peering_state(Formatter *f)
{
f->dump_string("state", get_pg_state_string());
f->dump_unsigned("epoch", get_osdmap_epoch());
f->open_array_section("up");
for (auto p = up.begin(); p != up.end(); ++p)
f->dump_unsigned("osd", *p);
f->close_section();
f->open_array_section("acting");
for (auto p = acting.begin(); p != acting.end(); ++p)
f->dump_unsigned("osd", *p);
f->close_section();
if (!backfill_targets.empty()) {
f->open_array_section("backfill_targets");
for (auto p = backfill_targets.begin(); p != backfill_targets.end(); ++p)
f->dump_stream("shard") << *p;
f->close_section();
}
if (!async_recovery_targets.empty()) {
f->open_array_section("async_recovery_targets");
for (auto p = async_recovery_targets.begin();
p != async_recovery_targets.end();
++p)
f->dump_stream("shard") << *p;
f->close_section();
}
if (!acting_recovery_backfill.empty()) {
f->open_array_section("acting_recovery_backfill");
for (auto p = acting_recovery_backfill.begin();
p != acting_recovery_backfill.end();
++p)
f->dump_stream("shard") << *p;
f->close_section();
}
f->open_object_section("info");
update_calc_stats();
info.dump(f);
f->close_section();
f->open_array_section("peer_info");
for (auto p = peer_info.begin(); p != peer_info.end(); ++p) {
f->open_object_section("info");
f->dump_stream("peer") << p->first;
p->second.dump(f);
f->close_section();
}
f->close_section();
}
void PeeringState::update_stats(
std::function<bool(pg_history_t &, pg_stat_t &)> f,
ObjectStore::Transaction *t) {
if (f(info.history, info.stats)) {
pl->publish_stats_to_osd();
}
if (t) {
dirty_info = true;
write_if_dirty(*t);
}
}
void PeeringState::update_stats_wo_resched(
std::function<void(pg_history_t &, pg_stat_t &)> f)
{
f(info.history, info.stats);
}
bool PeeringState::append_log_entries_update_missing(
const mempool::osd_pglog::list<pg_log_entry_t> &entries,
ObjectStore::Transaction &t, std::optional<eversion_t> trim_to,
std::optional<eversion_t> roll_forward_to)
{
ceph_assert(!entries.empty());
ceph_assert(entries.begin()->version > info.last_update);
PGLog::LogEntryHandlerRef rollbacker{pl->get_log_handler(t)};
bool invalidate_stats =
pg_log.append_new_log_entries(
info.last_backfill,
entries,
rollbacker.get());
if (roll_forward_to && entries.rbegin()->soid > info.last_backfill) {
pg_log.roll_forward(rollbacker.get());
}
if (roll_forward_to && *roll_forward_to > pg_log.get_can_rollback_to()) {
pg_log.roll_forward_to(*roll_forward_to, rollbacker.get());
last_rollback_info_trimmed_to_applied = *roll_forward_to;
}
info.last_update = pg_log.get_head();
if (pg_log.get_missing().num_missing() == 0) {
// advance last_complete since nothing else is missing!
info.last_complete = info.last_update;
}
info.stats.stats_invalid = info.stats.stats_invalid || invalidate_stats;
psdout(20) << "trim_to bool = " << bool(trim_to)
<< " trim_to = " << (trim_to ? *trim_to : eversion_t()) << dendl;
if (trim_to)
pg_log.trim(*trim_to, info);
dirty_info = true;
write_if_dirty(t);
return invalidate_stats;
}
void PeeringState::merge_new_log_entries(
const mempool::osd_pglog::list<pg_log_entry_t> &entries,
ObjectStore::Transaction &t,
std::optional<eversion_t> trim_to,
std::optional<eversion_t> roll_forward_to)
{
psdout(10) << entries << dendl;
ceph_assert(is_primary());
bool rebuild_missing = append_log_entries_update_missing(entries, t, trim_to, roll_forward_to);
for (auto i = acting_recovery_backfill.begin();
i != acting_recovery_backfill.end();
++i) {
pg_shard_t peer(*i);
if (peer == pg_whoami) continue;
ceph_assert(peer_missing.count(peer));
ceph_assert(peer_info.count(peer));
pg_missing_t& pmissing(peer_missing[peer]);
psdout(20) << "peer_missing for " << peer
<< " = " << pmissing << dendl;
pg_info_t& pinfo(peer_info[peer]);
bool invalidate_stats = PGLog::append_log_entries_update_missing(
pinfo.last_backfill,
entries,
true,
NULL,
pmissing,
NULL,
dpp);
pinfo.last_update = info.last_update;
pinfo.stats.stats_invalid = pinfo.stats.stats_invalid || invalidate_stats;
rebuild_missing = rebuild_missing || invalidate_stats;
}
if (!rebuild_missing) {
return;
}
for (auto &&i: entries) {
missing_loc.rebuild(
i.soid,
pg_whoami,
acting_recovery_backfill,
info,
pg_log.get_missing(),
peer_missing,
peer_info);
}
}
void PeeringState::add_log_entry(const pg_log_entry_t& e, bool applied)
{
// raise last_complete only if we were previously up to date
if (info.last_complete == info.last_update)
info.last_complete = e.version;
// raise last_update.
ceph_assert(e.version > info.last_update);
info.last_update = e.version;
// raise user_version, if it increased (it may have not get bumped
// by all logged updates)
if (e.user_version > info.last_user_version)
info.last_user_version = e.user_version;
// log mutation
pg_log.add(e, applied);
psdout(10) << "add_log_entry " << e << dendl;
}
void PeeringState::append_log(
vector<pg_log_entry_t>&& logv,
eversion_t trim_to,
eversion_t roll_forward_to,
eversion_t mlcod,
ObjectStore::Transaction &t,
bool transaction_applied,
bool async)
{
/* The primary has sent an info updating the history, but it may not
* have arrived yet. We want to make sure that we cannot remember this
* write without remembering that it happened in an interval which went
* active in epoch history.last_epoch_started.
*/
if (info.last_epoch_started != info.history.last_epoch_started) {
info.history.last_epoch_started = info.last_epoch_started;
}
if (info.last_interval_started != info.history.last_interval_started) {
info.history.last_interval_started = info.last_interval_started;
}
psdout(10) << "append_log " << pg_log.get_log() << " " << logv << dendl;
PGLog::LogEntryHandlerRef handler{pl->get_log_handler(t)};
if (!transaction_applied) {
/* We must be a backfill or async recovery peer, so it's ok if we apply
* out-of-turn since we won't be considered when
* determining a min possible last_update.
*
* We skip_rollforward() here, which advances the crt, without
* doing an actual rollforward. This avoids cleaning up entries
* from the backend and we do not end up in a situation, where the
* object is deleted before we can _merge_object_divergent_entries().
*/
pg_log.skip_rollforward();
}
for (auto p = logv.begin(); p != logv.end(); ++p) {
add_log_entry(*p, transaction_applied);
/* We don't want to leave the rollforward artifacts around
* here past last_backfill. It's ok for the same reason as
* above */
if (transaction_applied &&
p->soid > info.last_backfill) {
pg_log.roll_forward(handler.get());
}
}
if (transaction_applied && roll_forward_to > pg_log.get_can_rollback_to()) {
pg_log.roll_forward_to(
roll_forward_to,
handler.get());
last_rollback_info_trimmed_to_applied = roll_forward_to;
}
psdout(10) << "approx pg log length = "
<< pg_log.get_log().approx_size() << dendl;
psdout(10) << "dups pg log length = "
<< pg_log.get_log().dups.size() << dendl;
psdout(10) << "transaction_applied = "
<< transaction_applied << dendl;
if (!transaction_applied || async)
psdout(10) << pg_whoami
<< " is async_recovery or backfill target" << dendl;
pg_log.trim(trim_to, info, transaction_applied, async);
// update the local pg, pg log
dirty_info = true;
write_if_dirty(t);
if (!is_primary())
min_last_complete_ondisk = mlcod;
}
void PeeringState::recover_got(
const hobject_t &oid, eversion_t v,
bool is_delete,
ObjectStore::Transaction &t)
{
if (v > pg_log.get_can_rollback_to()) {
/* This can only happen during a repair, and even then, it would
* be one heck of a race. If we are repairing the object, the
* write in question must be fully committed, so it's not valid
* to roll it back anyway (and we'll be rolled forward shortly
* anyway) */
PGLog::LogEntryHandlerRef handler{pl->get_log_handler(t)};
pg_log.roll_forward_to(v, handler.get());
}
psdout(10) << "got missing " << oid << " v " << v << dendl;
pg_log.recover_got(oid, v, info);
if (pg_log.get_log().log.empty()) {
psdout(10) << "last_complete now " << info.last_complete
<< " while log is empty" << dendl;
} else if (pg_log.get_log().complete_to != pg_log.get_log().log.end()) {
psdout(10) << "last_complete now " << info.last_complete
<< " log.complete_to " << pg_log.get_log().complete_to->version
<< dendl;
} else {
psdout(10) << "last_complete now " << info.last_complete
<< " log.complete_to at end" << dendl;
//below is not true in the repair case.
//assert(missing.num_missing() == 0); // otherwise, complete_to was wrong.
ceph_assert(info.last_complete == info.last_update);
}
if (is_primary()) {
ceph_assert(missing_loc.needs_recovery(oid));
if (!is_delete)
missing_loc.add_location(oid, pg_whoami);
}
// update pg
dirty_info = true;
write_if_dirty(t);
}
void PeeringState::update_backfill_progress(
const hobject_t &updated_backfill,
const pg_stat_t &updated_stats,
bool preserve_local_num_bytes,
ObjectStore::Transaction &t) {
info.set_last_backfill(updated_backfill);
if (preserve_local_num_bytes) {
psdout(25) << "primary " << updated_stats.stats.sum.num_bytes
<< " local " << info.stats.stats.sum.num_bytes << dendl;
int64_t bytes = info.stats.stats.sum.num_bytes;
info.stats = updated_stats;
info.stats.stats.sum.num_bytes = bytes;
} else {
psdout(20) << "final " << updated_stats.stats.sum.num_bytes
<< " replaces local " << info.stats.stats.sum.num_bytes << dendl;
info.stats = updated_stats;
}
dirty_info = true;
write_if_dirty(t);
}
void PeeringState::adjust_purged_snaps(
std::function<void(interval_set<snapid_t> &snaps)> f) {
f(info.purged_snaps);
dirty_info = true;
dirty_big_info = true;
}
void PeeringState::on_peer_recover(
pg_shard_t peer,
const hobject_t &soid,
const eversion_t &version)
{
pl->publish_stats_to_osd();
// done!
peer_missing[peer].got(soid, version);
missing_loc.add_location(soid, peer);
}
void PeeringState::begin_peer_recover(
pg_shard_t peer,
const hobject_t soid)
{
peer_missing[peer].revise_have(soid, eversion_t());
}
void PeeringState::force_object_missing(
const set<pg_shard_t> &peers,
const hobject_t &soid,
eversion_t version)
{
for (auto &&peer : peers) {
if (peer != primary) {
peer_missing[peer].add(soid, version, eversion_t(), false);
} else {
pg_log.missing_add(soid, version, eversion_t());
pg_log.reset_complete_to(&info);
pg_log.set_last_requested(0);
}
}
missing_loc.rebuild(
soid,
pg_whoami,
acting_recovery_backfill,
info,
pg_log.get_missing(),
peer_missing,
peer_info);
}
void PeeringState::pre_submit_op(
const hobject_t &hoid,
const vector<pg_log_entry_t>& logv,
eversion_t at_version)
{
if (at_version > eversion_t()) {
for (auto &&i : get_acting_recovery_backfill()) {
if (i == primary) continue;
pg_info_t &pinfo = peer_info[i];
// keep peer_info up to date
if (pinfo.last_complete == pinfo.last_update)
pinfo.last_complete = at_version;
pinfo.last_update = at_version;
}
}
bool requires_missing_loc = false;
for (auto &&i : get_async_recovery_targets()) {
if (i == primary || !get_peer_missing(i).is_missing(hoid))
continue;
requires_missing_loc = true;
for (auto &&entry: logv) {
peer_missing[i].add_next_event(entry);
}
}
if (requires_missing_loc) {
for (auto &&entry: logv) {
psdout(30) << "missing_loc before: "
<< missing_loc.get_locations(entry.soid) << dendl;
missing_loc.add_missing(entry.soid, entry.version,
eversion_t(), entry.is_delete());
// clear out missing_loc
missing_loc.clear_location(entry.soid);
for (auto &i: get_actingset()) {
if (!get_peer_missing(i).is_missing(entry.soid))
missing_loc.add_location(entry.soid, i);
}
psdout(30) << "missing_loc after: "
<< missing_loc.get_locations(entry.soid) << dendl;
}
}
}
void PeeringState::recovery_committed_to(eversion_t version)
{
psdout(10) << "version " << version
<< " now ondisk" << dendl;
last_complete_ondisk = version;
if (last_complete_ondisk == info.last_update) {
if (!is_primary()) {
// Either we are a replica or backfill target.
// we are fully up to date. tell the primary!
pl->send_cluster_message(
get_primary().osd,
TOPNSPC::make_message<MOSDPGTrim>(
get_osdmap_epoch(),
spg_t(info.pgid.pgid, primary.shard),
last_complete_ondisk),
get_osdmap_epoch());
} else {
calc_min_last_complete_ondisk();
}
}
}
void PeeringState::complete_write(eversion_t v, eversion_t lc)
{
last_update_ondisk = v;
last_complete_ondisk = lc;
calc_min_last_complete_ondisk();
}
void PeeringState::calc_trim_to()
{
size_t target = pl->get_target_pg_log_entries();
eversion_t limit = std::min(
min_last_complete_ondisk,
pg_log.get_can_rollback_to());
if (limit != eversion_t() &&
limit != pg_trim_to &&
pg_log.get_log().approx_size() > target) {
size_t num_to_trim = std::min(pg_log.get_log().approx_size() - target,
cct->_conf->osd_pg_log_trim_max);
if (num_to_trim < cct->_conf->osd_pg_log_trim_min &&
cct->_conf->osd_pg_log_trim_max >= cct->_conf->osd_pg_log_trim_min) {
return;
}
auto it = pg_log.get_log().log.begin();
eversion_t new_trim_to;
for (size_t i = 0; i < num_to_trim; ++i) {
new_trim_to = it->version;
++it;
if (new_trim_to > limit) {
new_trim_to = limit;
psdout(10) << "calc_trim_to trimming to min_last_complete_ondisk" << dendl;
break;
}
}
psdout(10) << "calc_trim_to " << pg_trim_to << " -> " << new_trim_to << dendl;
pg_trim_to = new_trim_to;
assert(pg_trim_to <= pg_log.get_head());
assert(pg_trim_to <= min_last_complete_ondisk);
}
}
void PeeringState::calc_trim_to_aggressive()
{
size_t target = pl->get_target_pg_log_entries();
// limit pg log trimming up to the can_rollback_to value
eversion_t limit = std::min({
pg_log.get_head(),
pg_log.get_can_rollback_to(),
last_update_ondisk});
psdout(10) << "limit = " << limit << dendl;
if (limit != eversion_t() &&
limit != pg_trim_to &&
pg_log.get_log().approx_size() > target) {
psdout(10) << "approx pg log length = "
<< pg_log.get_log().approx_size() << dendl;
uint64_t num_to_trim = std::min<uint64_t>(pg_log.get_log().approx_size() - target,
cct->_conf->osd_pg_log_trim_max);
psdout(10) << "num_to_trim = " << num_to_trim << dendl;
if (num_to_trim < cct->_conf->osd_pg_log_trim_min &&
cct->_conf->osd_pg_log_trim_max >= cct->_conf->osd_pg_log_trim_min) {
return;
}
auto it = pg_log.get_log().log.begin(); // oldest log entry
auto rit = pg_log.get_log().log.rbegin();
eversion_t by_n_to_keep; // start from tail
eversion_t by_n_to_trim = eversion_t::max(); // start from head
for (size_t i = 0; it != pg_log.get_log().log.end(); ++it, ++rit) {
i++;
if (i > target && by_n_to_keep == eversion_t()) {
by_n_to_keep = rit->version;
}
if (i >= num_to_trim && by_n_to_trim == eversion_t::max()) {
by_n_to_trim = it->version;
}
if (by_n_to_keep != eversion_t() &&
by_n_to_trim != eversion_t::max()) {
break;
}
}
if (by_n_to_keep == eversion_t()) {
return;
}
pg_trim_to = std::min({by_n_to_keep, by_n_to_trim, limit});
psdout(10) << "pg_trim_to now " << pg_trim_to << dendl;
ceph_assert(pg_trim_to <= pg_log.get_head());
}
}
void PeeringState::apply_op_stats(
const hobject_t &soid,
const object_stat_sum_t &delta_stats)
{
info.stats.stats.add(delta_stats);
info.stats.stats.floor(0);
for (auto i = get_backfill_targets().begin();
i != get_backfill_targets().end();
++i) {
pg_shard_t bt = *i;
pg_info_t& pinfo = peer_info[bt];
if (soid <= pinfo.last_backfill)
pinfo.stats.stats.add(delta_stats);
}
}
void PeeringState::update_complete_backfill_object_stats(
const hobject_t &hoid,
const pg_stat_t &stats)
{
for (auto &&bt: get_backfill_targets()) {
pg_info_t& pinfo = peer_info[bt];
//Add stats to all peers that were missing object
if (hoid > pinfo.last_backfill)
pinfo.stats.add(stats);
}
}
void PeeringState::update_peer_last_backfill(
pg_shard_t peer,
const hobject_t &new_last_backfill)
{
pg_info_t &pinfo = peer_info[peer];
pinfo.last_backfill = new_last_backfill;
if (new_last_backfill.is_max()) {
/* pinfo.stats might be wrong if we did log-based recovery on the
* backfilled portion in addition to continuing backfill.
*/
pinfo.stats = info.stats;
}
}
void PeeringState::set_revert_with_targets(
const hobject_t &soid,
const set<pg_shard_t> &good_peers)
{
for (auto &&peer: good_peers) {
missing_loc.add_location(soid, peer);
}
}
void PeeringState::update_peer_last_complete_ondisk(
pg_shard_t fromosd,
eversion_t lcod) {
psdout(20) << "updating peer_last_complete_ondisk"
<< " of osd: "<< fromosd << " to: "
<< lcod << dendl;
peer_last_complete_ondisk[fromosd] = lcod;
}
void PeeringState::update_last_complete_ondisk(
eversion_t lcod) {
psdout(20) << "updating last_complete_ondisk"
<< " to: " << lcod << dendl;
last_complete_ondisk = lcod;
}
void PeeringState::prepare_backfill_for_missing(
const hobject_t &soid,
const eversion_t &version,
const vector<pg_shard_t> &targets) {
for (auto &&peer: targets) {
peer_missing[peer].add(soid, version, eversion_t(), false);
}
}
void PeeringState::update_hset(const pg_hit_set_history_t &hset_history)
{
info.hit_set = hset_history;
}
/*------------ Peering State Machine----------------*/
#undef dout_prefix
#define dout_prefix (context< PeeringMachine >().dpp->gen_prefix(*_dout) \
<< "state<" << get_state_name() << ">: ")
#undef psdout
#define psdout(x) ldout(context< PeeringMachine >().cct, x)
#define DECLARE_LOCALS \
PeeringState *ps = context< PeeringMachine >().state; \
std::ignore = ps; \
PeeringListener *pl = context< PeeringMachine >().pl; \
std::ignore = pl
/*------Crashed-------*/
PeeringState::Crashed::Crashed(my_context ctx)
: my_base(ctx),
NamedState(context< PeeringMachine >().state_history, "Crashed")
{
context< PeeringMachine >().log_enter(state_name);
ceph_abort_msg("we got a bad state machine event");
}
/*------Initial-------*/
PeeringState::Initial::Initial(my_context ctx)
: my_base(ctx),
NamedState(context< PeeringMachine >().state_history, "Initial")
{
context< PeeringMachine >().log_enter(state_name);
}
boost::statechart::result PeeringState::Initial::react(const MNotifyRec& notify)
{
DECLARE_LOCALS;
ps->proc_replica_info(
notify.from, notify.notify.info, notify.notify.epoch_sent);
ps->set_last_peering_reset();
return transit< Primary >();
}
boost::statechart::result PeeringState::Initial::react(const MInfoRec& i)
{
DECLARE_LOCALS;
ceph_assert(!ps->is_primary());
post_event(i);
return transit< Stray >();
}
boost::statechart::result PeeringState::Initial::react(const MLogRec& i)
{
DECLARE_LOCALS;
ceph_assert(!ps->is_primary());
post_event(i);
return transit< Stray >();
}
void PeeringState::Initial::exit()
{
context< PeeringMachine >().log_exit(state_name, enter_time);
DECLARE_LOCALS;
utime_t dur = ceph_clock_now() - enter_time;
pl->get_peering_perf().tinc(rs_initial_latency, dur);
}
/*------Started-------*/
PeeringState::Started::Started(my_context ctx)
: my_base(ctx),
NamedState(context< PeeringMachine >().state_history, "Started")
{
context< PeeringMachine >().log_enter(state_name);
}
boost::statechart::result
PeeringState::Started::react(const IntervalFlush&)
{
psdout(10) << "Ending blocked outgoing recovery messages" << dendl;
context< PeeringMachine >().state->end_block_outgoing();
return discard_event();
}
boost::statechart::result PeeringState::Started::react(const AdvMap& advmap)
{
DECLARE_LOCALS;
psdout(10) << "Started advmap" << dendl;
ps->check_full_transition(advmap.lastmap, advmap.osdmap);
if (ps->should_restart_peering(
advmap.up_primary,
advmap.acting_primary,
advmap.newup,
advmap.newacting,
advmap.lastmap,
advmap.osdmap)) {
psdout(10) << "should_restart_peering, transitioning to Reset"
<< dendl;
post_event(advmap);
return transit< Reset >();
}
ps->remove_down_peer_info(advmap.osdmap);
return discard_event();
}
boost::statechart::result PeeringState::Started::react(const QueryState& q)
{
q.f->open_object_section("state");
q.f->dump_string("name", state_name);
q.f->dump_stream("enter_time") << enter_time;
q.f->close_section();
return discard_event();
}
boost::statechart::result PeeringState::Started::react(const QueryUnfound& q)
{
q.f->dump_string("state", "Started");
q.f->dump_bool("available_might_have_unfound", false);
return discard_event();
}
void PeeringState::Started::exit()
{
context< PeeringMachine >().log_exit(state_name, enter_time);
DECLARE_LOCALS;
utime_t dur = ceph_clock_now() - enter_time;
pl->get_peering_perf().tinc(rs_started_latency, dur);
ps->state_clear(PG_STATE_WAIT | PG_STATE_LAGGY);
}
/*--------Reset---------*/
PeeringState::Reset::Reset(my_context ctx)
: my_base(ctx),
NamedState(context< PeeringMachine >().state_history, "Reset")
{
context< PeeringMachine >().log_enter(state_name);
DECLARE_LOCALS;
ps->flushes_in_progress = 0;
ps->set_last_peering_reset();
ps->log_weirdness();
}
boost::statechart::result
PeeringState::Reset::react(const IntervalFlush&)
{
psdout(10) << "Ending blocked outgoing recovery messages" << dendl;
context< PeeringMachine >().state->end_block_outgoing();
return discard_event();
}
boost::statechart::result PeeringState::Reset::react(const AdvMap& advmap)
{
DECLARE_LOCALS;
psdout(10) << "Reset advmap" << dendl;
ps->check_full_transition(advmap.lastmap, advmap.osdmap);
if (ps->should_restart_peering(
advmap.up_primary,
advmap.acting_primary,
advmap.newup,
advmap.newacting,
advmap.lastmap,
advmap.osdmap)) {
psdout(10) << "should restart peering, calling start_peering_interval again"
<< dendl;
ps->start_peering_interval(
advmap.lastmap,
advmap.newup, advmap.up_primary,
advmap.newacting, advmap.acting_primary,
context< PeeringMachine >().get_cur_transaction());
}
ps->remove_down_peer_info(advmap.osdmap);
ps->check_past_interval_bounds();
return discard_event();
}
boost::statechart::result PeeringState::Reset::react(const ActMap&)
{
DECLARE_LOCALS;
if (ps->should_send_notify() && ps->get_primary().osd >= 0) {
ps->info.history.refresh_prior_readable_until_ub(
pl->get_mnow(),
ps->prior_readable_until_ub);
context< PeeringMachine >().send_notify(
ps->get_primary().osd,
pg_notify_t(
ps->get_primary().shard, ps->pg_whoami.shard,
ps->get_osdmap_epoch(),
ps->get_osdmap_epoch(),
ps->info,
ps->past_intervals));
}
ps->update_heartbeat_peers();
return transit< Started >();
}
boost::statechart::result PeeringState::Reset::react(const QueryState& q)
{
q.f->open_object_section("state");
q.f->dump_string("name", state_name);
q.f->dump_stream("enter_time") << enter_time;
q.f->close_section();
return discard_event();
}
boost::statechart::result PeeringState::Reset::react(const QueryUnfound& q)
{
q.f->dump_string("state", "Reset");
q.f->dump_bool("available_might_have_unfound", false);
return discard_event();
}
void PeeringState::Reset::exit()
{
context< PeeringMachine >().log_exit(state_name, enter_time);
DECLARE_LOCALS;
utime_t dur = ceph_clock_now() - enter_time;
pl->get_peering_perf().tinc(rs_reset_latency, dur);
}
/*-------Start---------*/
PeeringState::Start::Start(my_context ctx)
: my_base(ctx),
NamedState(context< PeeringMachine >().state_history, "Start")
{
context< PeeringMachine >().log_enter(state_name);
DECLARE_LOCALS;
if (ps->is_primary()) {
psdout(1) << "transitioning to Primary" << dendl;
post_event(MakePrimary());
} else { //is_stray
psdout(1) << "transitioning to Stray" << dendl;
post_event(MakeStray());
}
}
void PeeringState::Start::exit()
{
context< PeeringMachine >().log_exit(state_name, enter_time);
DECLARE_LOCALS;
utime_t dur = ceph_clock_now() - enter_time;
pl->get_peering_perf().tinc(rs_start_latency, dur);
}
/*---------Primary--------*/
PeeringState::Primary::Primary(my_context ctx)
: my_base(ctx),
NamedState(context< PeeringMachine >().state_history, "Started/Primary")
{
context< PeeringMachine >().log_enter(state_name);
DECLARE_LOCALS;
ceph_assert(ps->want_acting.empty());
// set CREATING bit until we have peered for the first time.
if (ps->info.history.last_epoch_started == 0) {
ps->state_set(PG_STATE_CREATING);
// use the history timestamp, which ultimately comes from the
// monitor in the create case.
utime_t t = ps->info.history.last_scrub_stamp;
ps->info.stats.last_fresh = t;
ps->info.stats.last_active = t;
ps->info.stats.last_change = t;
ps->info.stats.last_peered = t;
ps->info.stats.last_clean = t;
ps->info.stats.last_unstale = t;
ps->info.stats.last_undegraded = t;
ps->info.stats.last_fullsized = t;
ps->info.stats.last_scrub_stamp = t;
ps->info.stats.last_deep_scrub_stamp = t;
ps->info.stats.last_clean_scrub_stamp = t;
}
}
boost::statechart::result PeeringState::Primary::react(const MNotifyRec& notevt)
{
DECLARE_LOCALS;
psdout(7) << "handle_pg_notify from osd." << notevt.from << dendl;
ps->proc_replica_info(
notevt.from, notevt.notify.info, notevt.notify.epoch_sent);
return discard_event();
}
boost::statechart::result PeeringState::Primary::react(const ActMap&)
{
DECLARE_LOCALS;
psdout(7) << "handle ActMap primary" << dendl;
pl->publish_stats_to_osd();
return discard_event();
}
boost::statechart::result PeeringState::Primary::react(
const SetForceRecovery&)
{
DECLARE_LOCALS;
ps->set_force_recovery(true);
return discard_event();
}
boost::statechart::result PeeringState::Primary::react(
const UnsetForceRecovery&)
{
DECLARE_LOCALS;
ps->set_force_recovery(false);
return discard_event();
}
boost::statechart::result PeeringState::Primary::react(
const RequestScrub& evt)
{
DECLARE_LOCALS;
if (ps->is_primary()) {
pl->scrub_requested(evt.deep, evt.repair);
psdout(10) << "marking for scrub" << dendl;
}
return discard_event();
}
boost::statechart::result PeeringState::Primary::react(
const SetForceBackfill&)
{
DECLARE_LOCALS;
ps->set_force_backfill(true);
return discard_event();
}
boost::statechart::result PeeringState::Primary::react(
const UnsetForceBackfill&)
{
DECLARE_LOCALS;
ps->set_force_backfill(false);
return discard_event();
}
void PeeringState::Primary::exit()
{
context< PeeringMachine >().log_exit(state_name, enter_time);
DECLARE_LOCALS;
ps->want_acting.clear();
utime_t dur = ceph_clock_now() - enter_time;
pl->get_peering_perf().tinc(rs_primary_latency, dur);
pl->clear_primary_state();
ps->state_clear(PG_STATE_CREATING);
}
/*---------Peering--------*/
PeeringState::Peering::Peering(my_context ctx)
: my_base(ctx),
NamedState(context< PeeringMachine >().state_history, "Started/Primary/Peering"),
history_les_bound(false)
{
context< PeeringMachine >().log_enter(state_name);
DECLARE_LOCALS;
ceph_assert(!ps->is_peered());
ceph_assert(!ps->is_peering());
ceph_assert(ps->is_primary());
ps->state_set(PG_STATE_PEERING);
}
boost::statechart::result PeeringState::Peering::react(const AdvMap& advmap)
{
DECLARE_LOCALS;
psdout(10) << "Peering advmap" << dendl;
if (prior_set.affected_by_map(*(advmap.osdmap), ps->dpp)) {
psdout(1) << "Peering, affected_by_map, going to Reset" << dendl;
post_event(advmap);
return transit< Reset >();
}
ps->adjust_need_up_thru(advmap.osdmap);
ps->check_prior_readable_down_osds(advmap.osdmap);
return forward_event();
}
boost::statechart::result PeeringState::Peering::react(const QueryState& q)
{
DECLARE_LOCALS;
q.f->open_object_section("state");
q.f->dump_string("name", state_name);
q.f->dump_stream("enter_time") << enter_time;
q.f->open_array_section("past_intervals");
ps->past_intervals.dump(q.f);
q.f->close_section();
q.f->open_array_section("probing_osds");
for (auto p = prior_set.probe.begin(); p != prior_set.probe.end(); ++p)
q.f->dump_stream("osd") << *p;
q.f->close_section();
if (prior_set.pg_down)
q.f->dump_string("blocked", "peering is blocked due to down osds");
q.f->open_array_section("down_osds_we_would_probe");
for (auto p = prior_set.down.begin(); p != prior_set.down.end(); ++p)
q.f->dump_int("osd", *p);
q.f->close_section();
q.f->open_array_section("peering_blocked_by");
for (auto p = prior_set.blocked_by.begin();
p != prior_set.blocked_by.end();
++p) {
q.f->open_object_section("osd");
q.f->dump_int("osd", p->first);
q.f->dump_int("current_lost_at", p->second);
q.f->dump_string("comment", "starting or marking this osd lost may let us proceed");
q.f->close_section();
}
q.f->close_section();
if (history_les_bound) {
q.f->open_array_section("peering_blocked_by_detail");
q.f->open_object_section("item");
q.f->dump_string("detail","peering_blocked_by_history_les_bound");
q.f->close_section();
q.f->close_section();
}
q.f->close_section();
return forward_event();
}
boost::statechart::result PeeringState::Peering::react(const QueryUnfound& q)
{
q.f->dump_string("state", "Peering");
q.f->dump_bool("available_might_have_unfound", false);
return discard_event();
}
void PeeringState::Peering::exit()
{
DECLARE_LOCALS;
psdout(10) << "Leaving Peering" << dendl;
context< PeeringMachine >().log_exit(state_name, enter_time);
ps->state_clear(PG_STATE_PEERING);
pl->clear_probe_targets();
utime_t dur = ceph_clock_now() - enter_time;
pl->get_peering_perf().tinc(rs_peering_latency, dur);
}
/*------Backfilling-------*/
PeeringState::Backfilling::Backfilling(my_context ctx)
: my_base(ctx),
NamedState(context< PeeringMachine >().state_history, "Started/Primary/Active/Backfilling")
{
context< PeeringMachine >().log_enter(state_name);
DECLARE_LOCALS;
ps->backfill_reserved = true;
pl->on_backfill_reserved();
ps->state_clear(PG_STATE_BACKFILL_TOOFULL);
ps->state_clear(PG_STATE_BACKFILL_WAIT);
ps->state_set(PG_STATE_BACKFILLING);
pl->publish_stats_to_osd();
}
void PeeringState::Backfilling::backfill_release_reservations()
{
DECLARE_LOCALS;
pl->cancel_local_background_io_reservation();
for (auto it = ps->backfill_targets.begin();
it != ps->backfill_targets.end();
++it) {
ceph_assert(*it != ps->pg_whoami);
pl->send_cluster_message(
it->osd,
TOPNSPC::make_message<MBackfillReserve>(
MBackfillReserve::RELEASE,
spg_t(ps->info.pgid.pgid, it->shard),
ps->get_osdmap_epoch()),
ps->get_osdmap_epoch());
}
}
void PeeringState::Backfilling::cancel_backfill()
{
DECLARE_LOCALS;
backfill_release_reservations();
pl->on_backfill_canceled();
}
boost::statechart::result
PeeringState::Backfilling::react(const Backfilled &c)
{
backfill_release_reservations();
return transit<Recovered>();
}
boost::statechart::result
PeeringState::Backfilling::react(const DeferBackfill &c)
{
DECLARE_LOCALS;
psdout(10) << "defer backfill, retry delay " << c.delay << dendl;
ps->state_set(PG_STATE_BACKFILL_WAIT);
ps->state_clear(PG_STATE_BACKFILLING);
cancel_backfill();
pl->schedule_event_after(
std::make_shared<PGPeeringEvent>(
ps->get_osdmap_epoch(),
ps->get_osdmap_epoch(),
RequestBackfill()),
c.delay);
return transit<NotBackfilling>();
}
boost::statechart::result
PeeringState::Backfilling::react(const UnfoundBackfill &c)
{
DECLARE_LOCALS;
psdout(10) << "backfill has unfound, can't continue" << dendl;
ps->state_set(PG_STATE_BACKFILL_UNFOUND);
ps->state_clear(PG_STATE_BACKFILLING);
cancel_backfill();
return transit<NotBackfilling>();
}
boost::statechart::result
PeeringState::Backfilling::react(const RemoteReservationRevokedTooFull &)
{
DECLARE_LOCALS;
ps->state_set(PG_STATE_BACKFILL_TOOFULL);
ps->state_clear(PG_STATE_BACKFILLING);
cancel_backfill();
pl->schedule_event_after(
std::make_shared<PGPeeringEvent>(
ps->get_osdmap_epoch(),
ps->get_osdmap_epoch(),
RequestBackfill()),
ps->cct->_conf->osd_backfill_retry_interval);
return transit<NotBackfilling>();
}
boost::statechart::result
PeeringState::Backfilling::react(const RemoteReservationRevoked &)
{
DECLARE_LOCALS;
ps->state_set(PG_STATE_BACKFILL_WAIT);
cancel_backfill();
if (ps->needs_backfill()) {
return transit<WaitLocalBackfillReserved>();
} else {
// raced with MOSDPGBackfill::OP_BACKFILL_FINISH, ignore
return discard_event();
}
}
void PeeringState::Backfilling::exit()
{
context< PeeringMachine >().log_exit(state_name, enter_time);
DECLARE_LOCALS;
ps->backfill_reserved = false;
ps->state_clear(PG_STATE_BACKFILLING);
ps->state_clear(PG_STATE_FORCED_BACKFILL | PG_STATE_FORCED_RECOVERY);
utime_t dur = ceph_clock_now() - enter_time;
pl->get_peering_perf().tinc(rs_backfilling_latency, dur);
}
/*--WaitRemoteBackfillReserved--*/
PeeringState::WaitRemoteBackfillReserved::WaitRemoteBackfillReserved(my_context ctx)
: my_base(ctx),
NamedState(context< PeeringMachine >().state_history, "Started/Primary/Active/WaitRemoteBackfillReserved"),
backfill_osd_it(context< Active >().remote_shards_to_reserve_backfill.begin())
{
context< PeeringMachine >().log_enter(state_name);
DECLARE_LOCALS;
ps->state_set(PG_STATE_BACKFILL_WAIT);
pl->publish_stats_to_osd();
post_event(RemoteBackfillReserved());
}
boost::statechart::result
PeeringState::WaitRemoteBackfillReserved::react(const RemoteBackfillReserved &evt)
{
DECLARE_LOCALS;
int64_t num_bytes = ps->info.stats.stats.sum.num_bytes;
psdout(10) << __func__ << " num_bytes " << num_bytes << dendl;
if (backfill_osd_it !=
context< Active >().remote_shards_to_reserve_backfill.end()) {
// The primary never backfills itself
ceph_assert(*backfill_osd_it != ps->pg_whoami);
pl->send_cluster_message(
backfill_osd_it->osd,
TOPNSPC::make_message<MBackfillReserve>(
MBackfillReserve::REQUEST,
spg_t(context< PeeringMachine >().spgid.pgid, backfill_osd_it->shard),
ps->get_osdmap_epoch(),
ps->get_backfill_priority(),
num_bytes,
ps->peer_bytes[*backfill_osd_it]),
ps->get_osdmap_epoch());
++backfill_osd_it;
} else {
ps->peer_bytes.clear();
post_event(AllBackfillsReserved());
}
return discard_event();
}
void PeeringState::WaitRemoteBackfillReserved::exit()
{
context< PeeringMachine >().log_exit(state_name, enter_time);
DECLARE_LOCALS;
utime_t dur = ceph_clock_now() - enter_time;
pl->get_peering_perf().tinc(rs_waitremotebackfillreserved_latency, dur);
}
void PeeringState::WaitRemoteBackfillReserved::retry()
{
DECLARE_LOCALS;
pl->cancel_local_background_io_reservation();
// Send CANCEL to all previously acquired reservations
set<pg_shard_t>::const_iterator it, begin, end;
begin = context< Active >().remote_shards_to_reserve_backfill.begin();
end = context< Active >().remote_shards_to_reserve_backfill.end();
ceph_assert(begin != end);
for (it = begin; it != backfill_osd_it; ++it) {
// The primary never backfills itself
ceph_assert(*it != ps->pg_whoami);
pl->send_cluster_message(
it->osd,
TOPNSPC::make_message<MBackfillReserve>(
MBackfillReserve::RELEASE,
spg_t(context< PeeringMachine >().spgid.pgid, it->shard),
ps->get_osdmap_epoch()),
ps->get_osdmap_epoch());
}
ps->state_clear(PG_STATE_BACKFILL_WAIT);
pl->publish_stats_to_osd();
pl->schedule_event_after(
std::make_shared<PGPeeringEvent>(
ps->get_osdmap_epoch(),
ps->get_osdmap_epoch(),
RequestBackfill()),
ps->cct->_conf->osd_backfill_retry_interval);
}
boost::statechart::result
PeeringState::WaitRemoteBackfillReserved::react(const RemoteReservationRejectedTooFull &evt)
{
DECLARE_LOCALS;
ps->state_set(PG_STATE_BACKFILL_TOOFULL);
retry();
return transit<NotBackfilling>();
}
boost::statechart::result
PeeringState::WaitRemoteBackfillReserved::react(const RemoteReservationRevoked &evt)
{
retry();
return transit<NotBackfilling>();
}
/*--WaitLocalBackfillReserved--*/
PeeringState::WaitLocalBackfillReserved::WaitLocalBackfillReserved(my_context ctx)
: my_base(ctx),
NamedState(context< PeeringMachine >().state_history, "Started/Primary/Active/WaitLocalBackfillReserved")
{
context< PeeringMachine >().log_enter(state_name);
DECLARE_LOCALS;
ps->state_set(PG_STATE_BACKFILL_WAIT);
pl->request_local_background_io_reservation(
ps->get_backfill_priority(),
std::make_unique<PGPeeringEvent>(
ps->get_osdmap_epoch(),
ps->get_osdmap_epoch(),
LocalBackfillReserved()),
std::make_unique<PGPeeringEvent>(
ps->get_osdmap_epoch(),
ps->get_osdmap_epoch(),
DeferBackfill(0.0)));
pl->publish_stats_to_osd();
}
void PeeringState::WaitLocalBackfillReserved::exit()
{
context< PeeringMachine >().log_exit(state_name, enter_time);
DECLARE_LOCALS;
utime_t dur = ceph_clock_now() - enter_time;
pl->get_peering_perf().tinc(rs_waitlocalbackfillreserved_latency, dur);
}
/*----NotBackfilling------*/
PeeringState::NotBackfilling::NotBackfilling(my_context ctx)
: my_base(ctx),
NamedState(context< PeeringMachine >().state_history, "Started/Primary/Active/NotBackfilling")
{
context< PeeringMachine >().log_enter(state_name);
DECLARE_LOCALS;
ps->state_clear(PG_STATE_REPAIR);
pl->publish_stats_to_osd();
}
boost::statechart::result PeeringState::NotBackfilling::react(const QueryUnfound& q)
{
DECLARE_LOCALS;
ps->query_unfound(q.f, "NotBackfilling");
return discard_event();
}
boost::statechart::result
PeeringState::NotBackfilling::react(const RemoteBackfillReserved &evt)
{
return discard_event();
}
boost::statechart::result
PeeringState::NotBackfilling::react(const RemoteReservationRejectedTooFull &evt)
{
return discard_event();
}
void PeeringState::NotBackfilling::exit()
{
context< PeeringMachine >().log_exit(state_name, enter_time);
DECLARE_LOCALS;
ps->state_clear(PG_STATE_BACKFILL_UNFOUND);
utime_t dur = ceph_clock_now() - enter_time;
pl->get_peering_perf().tinc(rs_notbackfilling_latency, dur);
}
/*----NotRecovering------*/
PeeringState::NotRecovering::NotRecovering(my_context ctx)
: my_base(ctx),
NamedState(context< PeeringMachine >().state_history, "Started/Primary/Active/NotRecovering")
{
context< PeeringMachine >().log_enter(state_name);
DECLARE_LOCALS;
ps->state_clear(PG_STATE_REPAIR);
pl->publish_stats_to_osd();
}
boost::statechart::result PeeringState::NotRecovering::react(const QueryUnfound& q)
{
DECLARE_LOCALS;
ps->query_unfound(q.f, "NotRecovering");
return discard_event();
}
void PeeringState::NotRecovering::exit()
{
context< PeeringMachine >().log_exit(state_name, enter_time);
DECLARE_LOCALS;
ps->state_clear(PG_STATE_RECOVERY_UNFOUND);
utime_t dur = ceph_clock_now() - enter_time;
pl->get_peering_perf().tinc(rs_notrecovering_latency, dur);
}
/*---RepNotRecovering----*/
PeeringState::RepNotRecovering::RepNotRecovering(my_context ctx)
: my_base(ctx),
NamedState(context< PeeringMachine >().state_history, "Started/ReplicaActive/RepNotRecovering")
{
context< PeeringMachine >().log_enter(state_name);
}
boost::statechart::result
PeeringState::RepNotRecovering::react(const RejectTooFullRemoteReservation &evt)
{
DECLARE_LOCALS;
ps->reject_reservation();
post_event(RemoteReservationRejectedTooFull());
return discard_event();
}
void PeeringState::RepNotRecovering::exit()
{
context< PeeringMachine >().log_exit(state_name, enter_time);
DECLARE_LOCALS;
utime_t dur = ceph_clock_now() - enter_time;
pl->get_peering_perf().tinc(rs_repnotrecovering_latency, dur);
}
/*---RepWaitRecoveryReserved--*/
PeeringState::RepWaitRecoveryReserved::RepWaitRecoveryReserved(my_context ctx)
: my_base(ctx),
NamedState(context< PeeringMachine >().state_history, "Started/ReplicaActive/RepWaitRecoveryReserved")
{
context< PeeringMachine >().log_enter(state_name);
}
boost::statechart::result
PeeringState::RepWaitRecoveryReserved::react(const RemoteRecoveryReserved &evt)
{
DECLARE_LOCALS;
pl->send_cluster_message(
ps->primary.osd,
TOPNSPC::make_message<MRecoveryReserve>(
MRecoveryReserve::GRANT,
spg_t(ps->info.pgid.pgid, ps->primary.shard),
ps->get_osdmap_epoch()),
ps->get_osdmap_epoch());
return transit<RepRecovering>();
}
boost::statechart::result
PeeringState::RepWaitRecoveryReserved::react(
const RemoteReservationCanceled &evt)
{
DECLARE_LOCALS;
pl->unreserve_recovery_space();
pl->cancel_remote_recovery_reservation();
return transit<RepNotRecovering>();
}
void PeeringState::RepWaitRecoveryReserved::exit()
{
context< PeeringMachine >().log_exit(state_name, enter_time);
DECLARE_LOCALS;
utime_t dur = ceph_clock_now() - enter_time;
pl->get_peering_perf().tinc(rs_repwaitrecoveryreserved_latency, dur);
}
/*-RepWaitBackfillReserved*/
PeeringState::RepWaitBackfillReserved::RepWaitBackfillReserved(my_context ctx)
: my_base(ctx),
NamedState(context< PeeringMachine >().state_history, "Started/ReplicaActive/RepWaitBackfillReserved")
{
context< PeeringMachine >().log_enter(state_name);
}
boost::statechart::result
PeeringState::RepNotRecovering::react(const RequestBackfillPrio &evt)
{
DECLARE_LOCALS;
if (!pl->try_reserve_recovery_space(
evt.primary_num_bytes, evt.local_num_bytes)) {
post_event(RejectTooFullRemoteReservation());
} else {
PGPeeringEventURef preempt;
if (HAVE_FEATURE(ps->upacting_features, RECOVERY_RESERVATION_2)) {
// older peers will interpret preemption as TOOFULL
preempt = std::make_unique<PGPeeringEvent>(
pl->get_osdmap_epoch(),
pl->get_osdmap_epoch(),
RemoteBackfillPreempted());
}
pl->request_remote_recovery_reservation(
evt.priority,
std::make_unique<PGPeeringEvent>(
pl->get_osdmap_epoch(),
pl->get_osdmap_epoch(),
RemoteBackfillReserved()),
std::move(preempt));
}
return transit<RepWaitBackfillReserved>();
}
boost::statechart::result
PeeringState::RepNotRecovering::react(const RequestRecoveryPrio &evt)
{
DECLARE_LOCALS;
// fall back to a local reckoning of priority of primary doesn't pass one
// (pre-mimic compat)
int prio = evt.priority ? evt.priority : ps->get_recovery_priority();
PGPeeringEventURef preempt;
if (HAVE_FEATURE(ps->upacting_features, RECOVERY_RESERVATION_2)) {
// older peers can't handle this
preempt = std::make_unique<PGPeeringEvent>(
ps->get_osdmap_epoch(),
ps->get_osdmap_epoch(),
RemoteRecoveryPreempted());
}
pl->request_remote_recovery_reservation(
prio,
std::make_unique<PGPeeringEvent>(
ps->get_osdmap_epoch(),
ps->get_osdmap_epoch(),
RemoteRecoveryReserved()),
std::move(preempt));
return transit<RepWaitRecoveryReserved>();
}
void PeeringState::RepWaitBackfillReserved::exit()
{
context< PeeringMachine >().log_exit(state_name, enter_time);
DECLARE_LOCALS;
utime_t dur = ceph_clock_now() - enter_time;
pl->get_peering_perf().tinc(rs_repwaitbackfillreserved_latency, dur);
}
boost::statechart::result
PeeringState::RepWaitBackfillReserved::react(const RemoteBackfillReserved &evt)
{
DECLARE_LOCALS;
pl->send_cluster_message(
ps->primary.osd,
TOPNSPC::make_message<MBackfillReserve>(
MBackfillReserve::GRANT,
spg_t(ps->info.pgid.pgid, ps->primary.shard),
ps->get_osdmap_epoch()),
ps->get_osdmap_epoch());
return transit<RepRecovering>();
}
boost::statechart::result
PeeringState::RepWaitBackfillReserved::react(
const RejectTooFullRemoteReservation &evt)
{
DECLARE_LOCALS;
ps->reject_reservation();
post_event(RemoteReservationRejectedTooFull());
return discard_event();
}
boost::statechart::result
PeeringState::RepWaitBackfillReserved::react(
const RemoteReservationRejectedTooFull &evt)
{
DECLARE_LOCALS;
pl->unreserve_recovery_space();
pl->cancel_remote_recovery_reservation();
return transit<RepNotRecovering>();
}
boost::statechart::result
PeeringState::RepWaitBackfillReserved::react(
const RemoteReservationCanceled &evt)
{
DECLARE_LOCALS;
pl->unreserve_recovery_space();
pl->cancel_remote_recovery_reservation();
return transit<RepNotRecovering>();
}
/*---RepRecovering-------*/
PeeringState::RepRecovering::RepRecovering(my_context ctx)
: my_base(ctx),
NamedState(context< PeeringMachine >().state_history, "Started/ReplicaActive/RepRecovering")
{
context< PeeringMachine >().log_enter(state_name);
}
boost::statechart::result
PeeringState::RepRecovering::react(const RemoteRecoveryPreempted &)
{
DECLARE_LOCALS;
pl->unreserve_recovery_space();
pl->send_cluster_message(
ps->primary.osd,
TOPNSPC::make_message<MRecoveryReserve>(
MRecoveryReserve::REVOKE,
spg_t(ps->info.pgid.pgid, ps->primary.shard),
ps->get_osdmap_epoch()),
ps->get_osdmap_epoch());
return discard_event();
}
boost::statechart::result
PeeringState::RepRecovering::react(const BackfillTooFull &)
{
DECLARE_LOCALS;
pl->unreserve_recovery_space();
pl->send_cluster_message(
ps->primary.osd,
TOPNSPC::make_message<MBackfillReserve>(
MBackfillReserve::REVOKE_TOOFULL,
spg_t(ps->info.pgid.pgid, ps->primary.shard),
ps->get_osdmap_epoch()),
ps->get_osdmap_epoch());
return discard_event();
}
boost::statechart::result
PeeringState::RepRecovering::react(const RemoteBackfillPreempted &)
{
DECLARE_LOCALS;
pl->unreserve_recovery_space();
pl->send_cluster_message(
ps->primary.osd,
TOPNSPC::make_message<MBackfillReserve>(
MBackfillReserve::REVOKE,
spg_t(ps->info.pgid.pgid, ps->primary.shard),
ps->get_osdmap_epoch()),
ps->get_osdmap_epoch());
return discard_event();
}
void PeeringState::RepRecovering::exit()
{
context< PeeringMachine >().log_exit(state_name, enter_time);
DECLARE_LOCALS;
pl->unreserve_recovery_space();
pl->cancel_remote_recovery_reservation();
utime_t dur = ceph_clock_now() - enter_time;
pl->get_peering_perf().tinc(rs_reprecovering_latency, dur);
}
/*------Activating--------*/
PeeringState::Activating::Activating(my_context ctx)
: my_base(ctx),
NamedState(context< PeeringMachine >().state_history, "Started/Primary/Active/Activating")
{
context< PeeringMachine >().log_enter(state_name);
}
void PeeringState::Activating::exit()
{
context< PeeringMachine >().log_exit(state_name, enter_time);
DECLARE_LOCALS;
utime_t dur = ceph_clock_now() - enter_time;
pl->get_peering_perf().tinc(rs_activating_latency, dur);
}
PeeringState::WaitLocalRecoveryReserved::WaitLocalRecoveryReserved(my_context ctx)
: my_base(ctx),
NamedState(context< PeeringMachine >().state_history, "Started/Primary/Active/WaitLocalRecoveryReserved")
{
context< PeeringMachine >().log_enter(state_name);
DECLARE_LOCALS;
// Make sure all nodes that part of the recovery aren't full
if (!ps->cct->_conf->osd_debug_skip_full_check_in_recovery &&
ps->get_osdmap()->check_full(ps->acting_recovery_backfill)) {
post_event(RecoveryTooFull());
return;
}
ps->state_clear(PG_STATE_RECOVERY_TOOFULL);
ps->state_set(PG_STATE_RECOVERY_WAIT);
pl->request_local_background_io_reservation(
ps->get_recovery_priority(),
std::make_unique<PGPeeringEvent>(
ps->get_osdmap_epoch(),
ps->get_osdmap_epoch(),
LocalRecoveryReserved()),
std::make_unique<PGPeeringEvent>(
ps->get_osdmap_epoch(),
ps->get_osdmap_epoch(),
DeferRecovery(0.0)));
pl->publish_stats_to_osd();
}
boost::statechart::result
PeeringState::WaitLocalRecoveryReserved::react(const RecoveryTooFull &evt)
{
DECLARE_LOCALS;
ps->state_set(PG_STATE_RECOVERY_TOOFULL);
pl->schedule_event_after(
std::make_shared<PGPeeringEvent>(
ps->get_osdmap_epoch(),
ps->get_osdmap_epoch(),
DoRecovery()),
ps->cct->_conf->osd_recovery_retry_interval);
return transit<NotRecovering>();
}
void PeeringState::WaitLocalRecoveryReserved::exit()
{
context< PeeringMachine >().log_exit(state_name, enter_time);
DECLARE_LOCALS;
utime_t dur = ceph_clock_now() - enter_time;
pl->get_peering_perf().tinc(rs_waitlocalrecoveryreserved_latency, dur);
}
PeeringState::WaitRemoteRecoveryReserved::WaitRemoteRecoveryReserved(my_context ctx)
: my_base(ctx),
NamedState(context< PeeringMachine >().state_history, "Started/Primary/Active/WaitRemoteRecoveryReserved"),
remote_recovery_reservation_it(context< Active >().remote_shards_to_reserve_recovery.begin())
{
context< PeeringMachine >().log_enter(state_name);
post_event(RemoteRecoveryReserved());
}
boost::statechart::result
PeeringState::WaitRemoteRecoveryReserved::react(const RemoteRecoveryReserved &evt) {
DECLARE_LOCALS;
if (remote_recovery_reservation_it !=
context< Active >().remote_shards_to_reserve_recovery.end()) {
ceph_assert(*remote_recovery_reservation_it != ps->pg_whoami);
pl->send_cluster_message(
remote_recovery_reservation_it->osd,
TOPNSPC::make_message<MRecoveryReserve>(
MRecoveryReserve::REQUEST,
spg_t(context< PeeringMachine >().spgid.pgid,
remote_recovery_reservation_it->shard),
ps->get_osdmap_epoch(),
ps->get_recovery_priority()),
ps->get_osdmap_epoch());
++remote_recovery_reservation_it;
} else {
post_event(AllRemotesReserved());
}
return discard_event();
}
void PeeringState::WaitRemoteRecoveryReserved::exit()
{
context< PeeringMachine >().log_exit(state_name, enter_time);
DECLARE_LOCALS;
utime_t dur = ceph_clock_now() - enter_time;
pl->get_peering_perf().tinc(rs_waitremoterecoveryreserved_latency, dur);
}
PeeringState::Recovering::Recovering(my_context ctx)
: my_base(ctx),
NamedState(context< PeeringMachine >().state_history, "Started/Primary/Active/Recovering")
{
context< PeeringMachine >().log_enter(state_name);
DECLARE_LOCALS;
ps->state_clear(PG_STATE_RECOVERY_WAIT);
ps->state_clear(PG_STATE_RECOVERY_TOOFULL);
ps->state_set(PG_STATE_RECOVERING);
pl->on_recovery_reserved();
ceph_assert(!ps->state_test(PG_STATE_ACTIVATING));
pl->publish_stats_to_osd();
}
void PeeringState::Recovering::release_reservations(bool cancel)
{
DECLARE_LOCALS;
ceph_assert(cancel || !ps->pg_log.get_missing().have_missing());
// release remote reservations
for (auto i = context< Active >().remote_shards_to_reserve_recovery.begin();
i != context< Active >().remote_shards_to_reserve_recovery.end();
++i) {
if (*i == ps->pg_whoami) // skip myself
continue;
pl->send_cluster_message(
i->osd,
TOPNSPC::make_message<MRecoveryReserve>(
MRecoveryReserve::RELEASE,
spg_t(ps->info.pgid.pgid, i->shard),
ps->get_osdmap_epoch()),
ps->get_osdmap_epoch());
}
}
boost::statechart::result
PeeringState::Recovering::react(const AllReplicasRecovered &evt)
{
DECLARE_LOCALS;
ps->state_clear(PG_STATE_FORCED_RECOVERY);
release_reservations();
pl->cancel_local_background_io_reservation();
return transit<Recovered>();
}
boost::statechart::result
PeeringState::Recovering::react(const RequestBackfill &evt)
{
DECLARE_LOCALS;
release_reservations();
ps->state_clear(PG_STATE_FORCED_RECOVERY);
pl->cancel_local_background_io_reservation();
pl->publish_stats_to_osd();
// transit any async_recovery_targets back into acting
// so pg won't have to stay undersized for long
// as backfill might take a long time to complete..
if (!ps->async_recovery_targets.empty()) {
pg_shard_t auth_log_shard;
bool history_les_bound = false;
// FIXME: Uh-oh we have to check this return value; choose_acting can fail!
ps->choose_acting(auth_log_shard, true, &history_les_bound);
}
return transit<WaitLocalBackfillReserved>();
}
boost::statechart::result
PeeringState::Recovering::react(const DeferRecovery &evt)
{
DECLARE_LOCALS;
if (!ps->state_test(PG_STATE_RECOVERING)) {
// we may have finished recovery and have an AllReplicasRecovered
// event queued to move us to the next state.
psdout(10) << "got defer recovery but not recovering" << dendl;
return discard_event();
}
psdout(10) << "defer recovery, retry delay " << evt.delay << dendl;
ps->state_set(PG_STATE_RECOVERY_WAIT);
pl->cancel_local_background_io_reservation();
release_reservations(true);
pl->schedule_event_after(
std::make_shared<PGPeeringEvent>(
ps->get_osdmap_epoch(),
ps->get_osdmap_epoch(),
DoRecovery()),
evt.delay);
return transit<NotRecovering>();
}
boost::statechart::result
PeeringState::Recovering::react(const UnfoundRecovery &evt)
{
DECLARE_LOCALS;
psdout(10) << "recovery has unfound, can't continue" << dendl;
ps->state_set(PG_STATE_RECOVERY_UNFOUND);
pl->cancel_local_background_io_reservation();
release_reservations(true);
return transit<NotRecovering>();
}
void PeeringState::Recovering::exit()
{
context< PeeringMachine >().log_exit(state_name, enter_time);
DECLARE_LOCALS;
utime_t dur = ceph_clock_now() - enter_time;
ps->state_clear(PG_STATE_RECOVERING);
pl->get_peering_perf().tinc(rs_recovering_latency, dur);
}
PeeringState::Recovered::Recovered(my_context ctx)
: my_base(ctx),
NamedState(context< PeeringMachine >().state_history, "Started/Primary/Active/Recovered")
{
pg_shard_t auth_log_shard;
context< PeeringMachine >().log_enter(state_name);
DECLARE_LOCALS;
ceph_assert(!ps->needs_recovery());
// if we finished backfill, all acting are active; recheck if
// DEGRADED | UNDERSIZED is appropriate.
ceph_assert(!ps->acting_recovery_backfill.empty());
if (ps->get_osdmap()->get_pg_size(context< PeeringMachine >().spgid.pgid) <=
ps->acting_recovery_backfill.size()) {
ps->state_clear(PG_STATE_FORCED_BACKFILL | PG_STATE_FORCED_RECOVERY);
pl->publish_stats_to_osd();
}
// adjust acting set? (e.g. because backfill completed...)
bool history_les_bound = false;
if (ps->acting != ps->up && !ps->choose_acting(auth_log_shard,
true, &history_les_bound)) {
ceph_assert(ps->want_acting.size());
} else if (!ps->async_recovery_targets.empty()) {
// FIXME: Uh-oh we have to check this return value; choose_acting can fail!
ps->choose_acting(auth_log_shard, true, &history_les_bound);
}
if (context< Active >().all_replicas_activated &&
ps->async_recovery_targets.empty())
post_event(GoClean());
}
void PeeringState::Recovered::exit()
{
context< PeeringMachine >().log_exit(state_name, enter_time);
DECLARE_LOCALS;
utime_t dur = ceph_clock_now() - enter_time;
pl->get_peering_perf().tinc(rs_recovered_latency, dur);
}
PeeringState::Clean::Clean(my_context ctx)
: my_base(ctx),
NamedState(context< PeeringMachine >().state_history, "Started/Primary/Active/Clean")
{
context< PeeringMachine >().log_enter(state_name);
DECLARE_LOCALS;
if (ps->info.last_complete != ps->info.last_update) {
ceph_abort();
}
ps->try_mark_clean();
context< PeeringMachine >().get_cur_transaction().register_on_commit(
pl->on_clean());
}
void PeeringState::Clean::exit()
{
context< PeeringMachine >().log_exit(state_name, enter_time);
DECLARE_LOCALS;
ps->state_clear(PG_STATE_CLEAN);
utime_t dur = ceph_clock_now() - enter_time;
pl->get_peering_perf().tinc(rs_clean_latency, dur);
}
template <typename T>
set<pg_shard_t> unique_osd_shard_set(const pg_shard_t & skip, const T &in)
{
set<int> osds_found;
set<pg_shard_t> out;
for (auto i = in.begin(); i != in.end(); ++i) {
if (*i != skip && !osds_found.count(i->osd)) {
osds_found.insert(i->osd);
out.insert(*i);
}
}
return out;
}
/*---------Active---------*/
PeeringState::Active::Active(my_context ctx)
: my_base(ctx),
NamedState(context< PeeringMachine >().state_history, "Started/Primary/Active"),
remote_shards_to_reserve_recovery(
unique_osd_shard_set(
context< PeeringMachine >().state->pg_whoami,
context< PeeringMachine >().state->acting_recovery_backfill)),
remote_shards_to_reserve_backfill(
unique_osd_shard_set(
context< PeeringMachine >().state->pg_whoami,
context< PeeringMachine >().state->backfill_targets)),
all_replicas_activated(false)
{
context< PeeringMachine >().log_enter(state_name);
DECLARE_LOCALS;
ceph_assert(!ps->backfill_reserved);
ceph_assert(ps->is_primary());
psdout(10) << "In Active, about to call activate" << dendl;
ps->start_flush(context< PeeringMachine >().get_cur_transaction());
ps->activate(context< PeeringMachine >().get_cur_transaction(),
ps->get_osdmap_epoch(),
context< PeeringMachine >().get_recovery_ctx());
// everyone has to commit/ack before we are truly active
ps->blocked_by.clear();
for (auto p = ps->acting_recovery_backfill.begin();
p != ps->acting_recovery_backfill.end();
++p) {
if (p->shard != ps->pg_whoami.shard) {
ps->blocked_by.insert(p->shard);
}
}
pl->publish_stats_to_osd();
psdout(10) << "Activate Finished" << dendl;
}
boost::statechart::result PeeringState::Active::react(const AdvMap& advmap)
{
DECLARE_LOCALS;
if (ps->should_restart_peering(
advmap.up_primary,
advmap.acting_primary,
advmap.newup,
advmap.newacting,
advmap.lastmap,
advmap.osdmap)) {
psdout(10) << "Active advmap interval change, fast return" << dendl;
return forward_event();
}
psdout(10) << "Active advmap" << dendl;
bool need_publish = false;
pl->on_active_advmap(advmap.osdmap);
if (ps->dirty_big_info) {
// share updated purged_snaps to mgr/mon so that we (a) stop reporting
// purged snaps and (b) perhaps share more snaps that we have purged
// but didn't fit in pg_stat_t.
need_publish = true;
ps->share_pg_info();
}
bool need_acting_change = false;
for (size_t i = 0; i < ps->want_acting.size(); i++) {
int osd = ps->want_acting[i];
if (!advmap.osdmap->is_up(osd)) {
pg_shard_t osd_with_shard(osd, shard_id_t(i));
if (!ps->is_acting(osd_with_shard) && !ps->is_up(osd_with_shard)) {
psdout(10) << "Active stray osd." << osd << " in want_acting is down"
<< dendl;
need_acting_change = true;
}
}
}
if (need_acting_change) {
psdout(10) << "Active need acting change, call choose_acting again"
<< dendl;
// possibly because we re-add some strays into the acting set and
// some of them then go down in a subsequent map before we could see
// the map changing the pg temp.
// call choose_acting again to clear them out.
// note that we leave restrict_to_up_acting to false in order to
// not overkill any chosen stray that is still alive.
pg_shard_t auth_log_shard;
bool history_les_bound = false;
ps->remove_down_peer_info(advmap.osdmap);
ps->choose_acting(auth_log_shard, false, &history_les_bound, true);
}
/* Check for changes in pool size (if the acting set changed as a result,
* this does not matter) */
if (advmap.lastmap->get_pg_size(ps->info.pgid.pgid) !=
ps->get_osdmap()->get_pg_size(ps->info.pgid.pgid)) {
if (ps->get_osdmap()->get_pg_size(ps->info.pgid.pgid) <=
ps->actingset.size()) {
ps->state_clear(PG_STATE_UNDERSIZED);
} else {
ps->state_set(PG_STATE_UNDERSIZED);
}
// degraded changes will be detected by call from publish_stats_to_osd()
need_publish = true;
}
// if we haven't reported our PG stats in a long time, do so now.
if (ps->info.stats.reported_epoch + ps->cct->_conf->osd_pg_stat_report_interval_max < advmap.osdmap->get_epoch()) {
psdout(20) << "reporting stats to osd after " << (advmap.osdmap->get_epoch() - ps->info.stats.reported_epoch)
<< " epochs" << dendl;
need_publish = true;
}
if (need_publish)
pl->publish_stats_to_osd();
if (ps->check_prior_readable_down_osds(advmap.osdmap)) {
pl->recheck_readable();
}
return forward_event();
}
boost::statechart::result PeeringState::Active::react(const ActMap&)
{
DECLARE_LOCALS;
psdout(10) << "Active: handling ActMap" << dendl;
ceph_assert(ps->is_primary());
pl->on_active_actmap();
if (ps->have_unfound()) {
// object may have become unfound
ps->discover_all_missing(context<PeeringMachine>().get_recovery_ctx().msgs);
}
uint64_t unfound = ps->missing_loc.num_unfound();
if (unfound > 0 &&
ps->all_unfound_are_queried_or_lost(ps->get_osdmap())) {
if (ps->cct->_conf->osd_auto_mark_unfound_lost) {
pl->get_clog_error() << context< PeeringMachine >().spgid.pgid << " has " << unfound
<< " objects unfound and apparently lost, would automatically "
<< "mark these objects lost but this feature is not yet implemented "
<< "(osd_auto_mark_unfound_lost)";
} else
pl->get_clog_error() << context< PeeringMachine >().spgid.pgid << " has "
<< unfound << " objects unfound and apparently lost";
}
return forward_event();
}
boost::statechart::result PeeringState::Active::react(const MNotifyRec& notevt)
{
DECLARE_LOCALS;
ceph_assert(ps->is_primary());
if (ps->peer_info.count(notevt.from)) {
psdout(10) << "Active: got notify from " << notevt.from
<< ", already have info from that osd, ignoring"
<< dendl;
} else if (ps->peer_purged.count(notevt.from)) {
psdout(10) << "Active: got notify from " << notevt.from
<< ", already purged that peer, ignoring"
<< dendl;
} else {
psdout(10) << "Active: got notify from " << notevt.from
<< ", calling proc_replica_info and discover_all_missing"
<< dendl;
ps->proc_replica_info(
notevt.from, notevt.notify.info, notevt.notify.epoch_sent);
if (ps->have_unfound() || (ps->is_degraded() && ps->might_have_unfound.count(notevt.from))) {
ps->discover_all_missing(
context<PeeringMachine>().get_recovery_ctx().msgs);
}
// check if it is a previous down acting member that's coming back.
// if so, request pg_temp change to trigger a new interval transition
pg_shard_t auth_log_shard;
bool history_les_bound = false;
// FIXME: Uh-oh we have to check this return value; choose_acting can fail!
ps->choose_acting(auth_log_shard, false, &history_les_bound, true);
if (!ps->want_acting.empty() && ps->want_acting != ps->acting) {
psdout(10) << "Active: got notify from previous acting member "
<< notevt.from << ", requesting pg_temp change"
<< dendl;
}
}
return discard_event();
}
boost::statechart::result PeeringState::Active::react(const MTrim& trim)
{
DECLARE_LOCALS;
ceph_assert(ps->is_primary());
// peer is informing us of their last_complete_ondisk
ldout(ps->cct,10) << " replica osd." << trim.from << " lcod " << trim.trim_to << dendl;
ps->update_peer_last_complete_ondisk(pg_shard_t{trim.from, trim.shard},
trim.trim_to);
// trim log when the pg is recovered
ps->calc_min_last_complete_ondisk();
return discard_event();
}
boost::statechart::result PeeringState::Active::react(const MInfoRec& infoevt)
{
DECLARE_LOCALS;
ceph_assert(ps->is_primary());
ceph_assert(!ps->acting_recovery_backfill.empty());
if (infoevt.lease_ack) {
ps->proc_lease_ack(infoevt.from.osd, *infoevt.lease_ack);
}
// don't update history (yet) if we are active and primary; the replica
// may be telling us they have activated (and committed) but we can't
// share that until _everyone_ does the same.
if (ps->is_acting_recovery_backfill(infoevt.from) &&
ps->peer_activated.count(infoevt.from) == 0) {
psdout(10) << " peer osd." << infoevt.from
<< " activated and committed" << dendl;
ps->peer_activated.insert(infoevt.from);
ps->blocked_by.erase(infoevt.from.shard);
pl->publish_stats_to_osd();
if (ps->peer_activated.size() == ps->acting_recovery_backfill.size()) {
all_activated_and_committed();
}
}
return discard_event();
}
boost::statechart::result PeeringState::Active::react(const MLogRec& logevt)
{
DECLARE_LOCALS;
psdout(10) << "searching osd." << logevt.from
<< " log for unfound items" << dendl;
ps->proc_replica_log(
logevt.msg->info, logevt.msg->log, std::move(logevt.msg->missing), logevt.from);
bool got_missing = ps->search_for_missing(
ps->peer_info[logevt.from],
ps->peer_missing[logevt.from],
logevt.from,
context< PeeringMachine >().get_recovery_ctx());
// If there are missing AND we are "fully" active then start recovery now
if (got_missing && ps->state_test(PG_STATE_ACTIVE)) {
post_event(DoRecovery());
}
return discard_event();
}
boost::statechart::result PeeringState::Active::react(const QueryState& q)
{
DECLARE_LOCALS;
q.f->open_object_section("state");
q.f->dump_string("name", state_name);
q.f->dump_stream("enter_time") << enter_time;
{
q.f->open_array_section("might_have_unfound");
for (auto p = ps->might_have_unfound.begin();
p != ps->might_have_unfound.end();
++p) {
q.f->open_object_section("osd");
q.f->dump_stream("osd") << *p;
if (ps->peer_missing.count(*p)) {
q.f->dump_string("status", "already probed");
} else if (ps->peer_missing_requested.count(*p)) {
q.f->dump_string("status", "querying");
} else if (!ps->get_osdmap()->is_up(p->osd)) {
q.f->dump_string("status", "osd is down");
} else {
q.f->dump_string("status", "not queried");
}
q.f->close_section();
}
q.f->close_section();
}
{
q.f->open_object_section("recovery_progress");
q.f->open_array_section("backfill_targets");
for (auto p = ps->backfill_targets.begin();
p != ps->backfill_targets.end(); ++p)
q.f->dump_stream("replica") << *p;
q.f->close_section();
pl->dump_recovery_info(q.f);
q.f->close_section();
}
q.f->close_section();
return forward_event();
}
boost::statechart::result PeeringState::Active::react(const QueryUnfound& q)
{
DECLARE_LOCALS;
ps->query_unfound(q.f, "Active");
return discard_event();
}
boost::statechart::result PeeringState::Active::react(
const ActivateCommitted &evt)
{
DECLARE_LOCALS;
ceph_assert(!ps->peer_activated.count(ps->pg_whoami));
ps->peer_activated.insert(ps->pg_whoami);
psdout(10) << "_activate_committed " << evt.epoch
<< " peer_activated now " << ps->peer_activated
<< " last_interval_started "
<< ps->info.history.last_interval_started
<< " last_epoch_started "
<< ps->info.history.last_epoch_started
<< " same_interval_since "
<< ps->info.history.same_interval_since
<< dendl;
ceph_assert(!ps->acting_recovery_backfill.empty());
if (ps->peer_activated.size() == ps->acting_recovery_backfill.size())
all_activated_and_committed();
return discard_event();
}
boost::statechart::result PeeringState::Active::react(const AllReplicasActivated &evt)
{
DECLARE_LOCALS;
pg_t pgid = context< PeeringMachine >().spgid.pgid;
all_replicas_activated = true;
ps->state_clear(PG_STATE_ACTIVATING);
ps->state_clear(PG_STATE_CREATING);
ps->state_clear(PG_STATE_PREMERGE);
bool merge_target;
if (ps->pool.info.is_pending_merge(pgid, &merge_target)) {
ps->state_set(PG_STATE_PEERED);
ps->state_set(PG_STATE_PREMERGE);
if (ps->actingset.size() != ps->get_osdmap()->get_pg_size(pgid)) {
if (merge_target) {
pg_t src = pgid;
src.set_ps(ps->pool.info.get_pg_num_pending());
assert(src.get_parent() == pgid);
pl->set_not_ready_to_merge_target(pgid, src);
} else {
pl->set_not_ready_to_merge_source(pgid);
}
}
} else if (!ps->acting_set_writeable()) {
ps->state_set(PG_STATE_PEERED);
} else {
ps->state_set(PG_STATE_ACTIVE);
}
auto mnow = pl->get_mnow();
if (ps->prior_readable_until_ub > mnow) {
psdout(10) << " waiting for prior_readable_until_ub "
<< ps->prior_readable_until_ub << " > mnow " << mnow << dendl;
ps->state_set(PG_STATE_WAIT);
pl->queue_check_readable(
ps->last_peering_reset,
ps->prior_readable_until_ub - mnow);
} else {
psdout(10) << " mnow " << mnow << " >= prior_readable_until_ub "
<< ps->prior_readable_until_ub << dendl;
}
if (ps->pool.info.has_flag(pg_pool_t::FLAG_CREATING)) {
pl->send_pg_created(pgid);
}
psdout(1) << __func__ << " AllReplicasActivated Activating complete" << dendl;
ps->info.history.last_epoch_started = ps->info.last_epoch_started;
ps->info.history.last_interval_started = ps->info.last_interval_started;
ps->dirty_info = true;
ps->share_pg_info();
pl->publish_stats_to_osd();
pl->on_activate_complete();
return discard_event();
}
boost::statechart::result PeeringState::Active::react(const RenewLease& rl)
{
DECLARE_LOCALS;
ps->proc_renew_lease();
return discard_event();
}
boost::statechart::result PeeringState::Active::react(const MLeaseAck& la)
{
DECLARE_LOCALS;
ps->proc_lease_ack(la.from, la.lease_ack);
return discard_event();
}
boost::statechart::result PeeringState::Active::react(const CheckReadable &evt)
{
DECLARE_LOCALS;
pl->recheck_readable();
return discard_event();
}
/*
* update info.history.last_epoch_started ONLY after we and all
* replicas have activated AND committed the activate transaction
* (i.e. the peering results are stable on disk).
*/
void PeeringState::Active::all_activated_and_committed()
{
DECLARE_LOCALS;
psdout(10) << "all_activated_and_committed" << dendl;
ceph_assert(ps->is_primary());
ceph_assert(ps->peer_activated.size() == ps->acting_recovery_backfill.size());
ceph_assert(!ps->acting_recovery_backfill.empty());
ceph_assert(ps->blocked_by.empty());
assert(HAVE_FEATURE(ps->upacting_features, SERVER_OCTOPUS));
// this is overkill when the activation is quick, but when it is slow it
// is important, because the lease was renewed by the activate itself but we
// don't know how long ago that was, and simply scheduling now may leave
// a gap in lease coverage. keep it simple and aggressively renew.
ps->renew_lease(pl->get_mnow());
ps->send_lease();
ps->schedule_renew_lease();
// Degraded?
ps->update_calc_stats();
if (ps->info.stats.stats.sum.num_objects_degraded) {
ps->state_set(PG_STATE_DEGRADED);
} else {
ps->state_clear(PG_STATE_DEGRADED);
}
post_event(PeeringState::AllReplicasActivated());
}
void PeeringState::Active::exit()
{
context< PeeringMachine >().log_exit(state_name, enter_time);
DECLARE_LOCALS;
pl->cancel_local_background_io_reservation();
ps->blocked_by.clear();
ps->backfill_reserved = false;
ps->state_clear(PG_STATE_ACTIVATING);
ps->state_clear(PG_STATE_DEGRADED);
ps->state_clear(PG_STATE_UNDERSIZED);
ps->state_clear(PG_STATE_BACKFILL_TOOFULL);
ps->state_clear(PG_STATE_BACKFILL_WAIT);
ps->state_clear(PG_STATE_RECOVERY_WAIT);
ps->state_clear(PG_STATE_RECOVERY_TOOFULL);
utime_t dur = ceph_clock_now() - enter_time;
pl->get_peering_perf().tinc(rs_active_latency, dur);
pl->on_active_exit();
}
/*------ReplicaActive-----*/
PeeringState::ReplicaActive::ReplicaActive(my_context ctx)
: my_base(ctx),
NamedState(context< PeeringMachine >().state_history, "Started/ReplicaActive")
{
context< PeeringMachine >().log_enter(state_name);
DECLARE_LOCALS;
ps->start_flush(context< PeeringMachine >().get_cur_transaction());
}
boost::statechart::result PeeringState::ReplicaActive::react(
const Activate& actevt) {
DECLARE_LOCALS;
psdout(10) << "In ReplicaActive, about to call activate" << dendl;
ps->activate(
context< PeeringMachine >().get_cur_transaction(),
actevt.activation_epoch,
context< PeeringMachine >().get_recovery_ctx());
psdout(10) << "Activate Finished" << dendl;
return discard_event();
}
boost::statechart::result PeeringState::ReplicaActive::react(
const ActivateCommitted &evt)
{
DECLARE_LOCALS;
psdout(10) << __func__ << " " << evt.epoch << " telling primary" << dendl;
auto &rctx = context<PeeringMachine>().get_recovery_ctx();
auto epoch = ps->get_osdmap_epoch();
pg_info_t i = ps->info;
i.history.last_epoch_started = evt.activation_epoch;
i.history.last_interval_started = i.history.same_interval_since;
rctx.send_info(
ps->get_primary().osd,
spg_t(ps->info.pgid.pgid, ps->get_primary().shard),
epoch,
epoch,
i,
{}, /* lease */
ps->get_lease_ack());
if (ps->acting_set_writeable()) {
ps->state_set(PG_STATE_ACTIVE);
} else {
ps->state_set(PG_STATE_PEERED);
}
pl->on_activate_committed();
return discard_event();
}
boost::statechart::result PeeringState::ReplicaActive::react(const MLease& l)
{
DECLARE_LOCALS;
spg_t spgid = context< PeeringMachine >().spgid;
epoch_t epoch = pl->get_osdmap_epoch();
ps->proc_lease(l.lease);
pl->send_cluster_message(
ps->get_primary().osd,
TOPNSPC::make_message<MOSDPGLeaseAck>(epoch,
spg_t(spgid.pgid, ps->get_primary().shard),
ps->get_lease_ack()),
epoch);
return discard_event();
}
boost::statechart::result PeeringState::ReplicaActive::react(const MInfoRec& infoevt)
{
DECLARE_LOCALS;
ps->proc_primary_info(context<PeeringMachine>().get_cur_transaction(),
infoevt.info);
return discard_event();
}
boost::statechart::result PeeringState::ReplicaActive::react(const MLogRec& logevt)
{
DECLARE_LOCALS;
psdout(10) << "received log from " << logevt.from << dendl;
ObjectStore::Transaction &t = context<PeeringMachine>().get_cur_transaction();
ps->merge_log(t, logevt.msg->info, std::move(logevt.msg->log), logevt.from);
ceph_assert(ps->pg_log.get_head() == ps->info.last_update);
if (logevt.msg->lease) {
ps->proc_lease(*logevt.msg->lease);
}
return discard_event();
}
boost::statechart::result PeeringState::ReplicaActive::react(const MTrim& trim)
{
DECLARE_LOCALS;
// primary is instructing us to trim
ps->pg_log.trim(trim.trim_to, ps->info);
ps->dirty_info = true;
return discard_event();
}
boost::statechart::result PeeringState::ReplicaActive::react(const ActMap&)
{
DECLARE_LOCALS;
if (ps->should_send_notify() && ps->get_primary().osd >= 0) {
ps->info.history.refresh_prior_readable_until_ub(
pl->get_mnow(), ps->prior_readable_until_ub);
context< PeeringMachine >().send_notify(
ps->get_primary().osd,
pg_notify_t(
ps->get_primary().shard, ps->pg_whoami.shard,
ps->get_osdmap_epoch(),
ps->get_osdmap_epoch(),
ps->info,
ps->past_intervals));
}
return discard_event();
}
boost::statechart::result PeeringState::ReplicaActive::react(
const MQuery& query)
{
DECLARE_LOCALS;
ps->fulfill_query(query, context<PeeringMachine>().get_recovery_ctx());
return discard_event();
}
boost::statechart::result PeeringState::ReplicaActive::react(const QueryState& q)
{
q.f->open_object_section("state");
q.f->dump_string("name", state_name);
q.f->dump_stream("enter_time") << enter_time;
q.f->close_section();
return forward_event();
}
boost::statechart::result PeeringState::ReplicaActive::react(const QueryUnfound& q)
{
q.f->dump_string("state", "ReplicaActive");
q.f->dump_bool("available_might_have_unfound", false);
return discard_event();
}
void PeeringState::ReplicaActive::exit()
{
context< PeeringMachine >().log_exit(state_name, enter_time);
DECLARE_LOCALS;
pl->unreserve_recovery_space();
pl->cancel_remote_recovery_reservation();
utime_t dur = ceph_clock_now() - enter_time;
pl->get_peering_perf().tinc(rs_replicaactive_latency, dur);
ps->min_last_complete_ondisk = eversion_t();
}
/*-------Stray---*/
PeeringState::Stray::Stray(my_context ctx)
: my_base(ctx),
NamedState(context< PeeringMachine >().state_history, "Started/Stray")
{
context< PeeringMachine >().log_enter(state_name);
DECLARE_LOCALS;
ceph_assert(!ps->is_peered());
ceph_assert(!ps->is_peering());
ceph_assert(!ps->is_primary());
if (!ps->get_osdmap()->have_pg_pool(ps->info.pgid.pgid.pool())) {
ldout(ps->cct,10) << __func__ << " pool is deleted" << dendl;
post_event(DeleteStart());
} else {
ps->start_flush(context< PeeringMachine >().get_cur_transaction());
}
}
boost::statechart::result PeeringState::Stray::react(const MLogRec& logevt)
{
DECLARE_LOCALS;
MOSDPGLog *msg = logevt.msg.get();
psdout(10) << "got info+log from osd." << logevt.from << " " << msg->info << " " << msg->log << dendl;
ObjectStore::Transaction &t = context<PeeringMachine>().get_cur_transaction();
if (msg->info.last_backfill == hobject_t()) {
// restart backfill
ps->info = msg->info;
ps->dirty_info = true;
ps->dirty_big_info = true; // maybe.
PGLog::LogEntryHandlerRef rollbacker{pl->get_log_handler(t)};
ps->pg_log.reset_backfill_claim_log(msg->log, rollbacker.get());
ps->pg_log.reset_backfill();
} else {
ps->merge_log(t, msg->info, std::move(msg->log), logevt.from);
}
if (logevt.msg->lease) {
ps->proc_lease(*logevt.msg->lease);
}
ceph_assert(ps->pg_log.get_head() == ps->info.last_update);
post_event(Activate(logevt.msg->info.last_epoch_started));
return transit<ReplicaActive>();
}
boost::statechart::result PeeringState::Stray::react(const MInfoRec& infoevt)
{
DECLARE_LOCALS;
psdout(10) << "got info from osd." << infoevt.from << " " << infoevt.info << dendl;
if (ps->info.last_update > infoevt.info.last_update) {
// rewind divergent log entries
ObjectStore::Transaction &t = context<PeeringMachine>().get_cur_transaction();
ps->rewind_divergent_log(t, infoevt.info.last_update);
ps->info.stats = infoevt.info.stats;
ps->info.hit_set = infoevt.info.hit_set;
}
if (infoevt.lease) {
ps->proc_lease(*infoevt.lease);
}
ceph_assert(infoevt.info.last_update == ps->info.last_update);
ceph_assert(ps->pg_log.get_head() == ps->info.last_update);
post_event(Activate(infoevt.info.last_epoch_started));
return transit<ReplicaActive>();
}
boost::statechart::result PeeringState::Stray::react(const MQuery& query)
{
DECLARE_LOCALS;
ps->fulfill_query(query, context<PeeringMachine>().get_recovery_ctx());
return discard_event();
}
boost::statechart::result PeeringState::Stray::react(const ActMap&)
{
DECLARE_LOCALS;
if (ps->should_send_notify() && ps->get_primary().osd >= 0) {
ps->info.history.refresh_prior_readable_until_ub(
pl->get_mnow(), ps->prior_readable_until_ub);
context< PeeringMachine >().send_notify(
ps->get_primary().osd,
pg_notify_t(
ps->get_primary().shard, ps->pg_whoami.shard,
ps->get_osdmap_epoch(),
ps->get_osdmap_epoch(),
ps->info,
ps->past_intervals));
}
return discard_event();
}
void PeeringState::Stray::exit()
{
context< PeeringMachine >().log_exit(state_name, enter_time);
DECLARE_LOCALS;
utime_t dur = ceph_clock_now() - enter_time;
pl->get_peering_perf().tinc(rs_stray_latency, dur);
}
/*--------ToDelete----------*/
PeeringState::ToDelete::ToDelete(my_context ctx)
: my_base(ctx),
NamedState(context< PeeringMachine >().state_history, "Started/ToDelete")
{
context< PeeringMachine >().log_enter(state_name);
DECLARE_LOCALS;
pl->get_perf_logger().inc(l_osd_pg_removing);
}
void PeeringState::ToDelete::exit()
{
context< PeeringMachine >().log_exit(state_name, enter_time);
DECLARE_LOCALS;
// note: on a successful removal, this path doesn't execute. see
// do_delete_work().
pl->get_perf_logger().dec(l_osd_pg_removing);
pl->cancel_local_background_io_reservation();
}
/*----WaitDeleteReserved----*/
PeeringState::WaitDeleteReserved::WaitDeleteReserved(my_context ctx)
: my_base(ctx),
NamedState(context< PeeringMachine >().state_history,
"Started/ToDelete/WaitDeleteReseved")
{
context< PeeringMachine >().log_enter(state_name);
DECLARE_LOCALS;
context< ToDelete >().priority = ps->get_delete_priority();
pl->cancel_local_background_io_reservation();
pl->request_local_background_io_reservation(
context<ToDelete>().priority,
std::make_unique<PGPeeringEvent>(
ps->get_osdmap_epoch(),
ps->get_osdmap_epoch(),
DeleteReserved()),
std::make_unique<PGPeeringEvent>(
ps->get_osdmap_epoch(),
ps->get_osdmap_epoch(),
DeleteInterrupted()));
}
boost::statechart::result PeeringState::ToDelete::react(
const ActMap& evt)
{
DECLARE_LOCALS;
if (ps->get_delete_priority() != priority) {
psdout(10) << __func__ << " delete priority changed, resetting"
<< dendl;
return transit<ToDelete>();
}
return discard_event();
}
void PeeringState::WaitDeleteReserved::exit()
{
context< PeeringMachine >().log_exit(state_name, enter_time);
}
/*----Deleting-----*/
PeeringState::Deleting::Deleting(my_context ctx)
: my_base(ctx),
NamedState(context< PeeringMachine >().state_history, "Started/ToDelete/Deleting")
{
context< PeeringMachine >().log_enter(state_name);
DECLARE_LOCALS;
ps->deleting = true;
ObjectStore::Transaction &t = context<PeeringMachine>().get_cur_transaction();
// clear log
PGLog::LogEntryHandlerRef rollbacker{pl->get_log_handler(t)};
ps->pg_log.roll_forward(rollbacker.get());
// adjust info to backfill
ps->info.set_last_backfill(hobject_t());
ps->pg_log.reset_backfill();
ps->dirty_info = true;
pl->on_removal(t);
}
boost::statechart::result PeeringState::Deleting::react(
const DeleteSome& evt)
{
DECLARE_LOCALS;
std::pair<ghobject_t, bool> p;
p = pl->do_delete_work(context<PeeringMachine>().get_cur_transaction(),
next);
next = p.first;
return p.second ? discard_event() : terminate();
}
void PeeringState::Deleting::exit()
{
context< PeeringMachine >().log_exit(state_name, enter_time);
DECLARE_LOCALS;
ps->deleting = false;
pl->cancel_local_background_io_reservation();
}
/*--------GetInfo---------*/
PeeringState::GetInfo::GetInfo(my_context ctx)
: my_base(ctx),
NamedState(context< PeeringMachine >().state_history, "Started/Primary/Peering/GetInfo")
{
context< PeeringMachine >().log_enter(state_name);
DECLARE_LOCALS;
ps->check_past_interval_bounds();
ps->log_weirdness();
PastIntervals::PriorSet &prior_set = context< Peering >().prior_set;
ceph_assert(ps->blocked_by.empty());
prior_set = ps->build_prior();
ps->prior_readable_down_osds = prior_set.down;
if (ps->prior_readable_down_osds.empty()) {
psdout(10) << " no prior_set down osds, will clear prior_readable_until_ub before activating"
<< dendl;
}
ps->reset_min_peer_features();
get_infos();
if (prior_set.pg_down) {
post_event(IsDown());
} else if (peer_info_requested.empty()) {
post_event(GotInfo());
}
}
void PeeringState::GetInfo::get_infos()
{
DECLARE_LOCALS;
PastIntervals::PriorSet &prior_set = context< Peering >().prior_set;
ps->blocked_by.clear();
for (auto it = prior_set.probe.begin(); it != prior_set.probe.end(); ++it) {
pg_shard_t peer = *it;
if (peer == ps->pg_whoami) {
continue;
}
if (ps->peer_info.count(peer)) {
psdout(10) << " have osd." << peer << " info " << ps->peer_info[peer] << dendl;
continue;
}
if (peer_info_requested.count(peer)) {
psdout(10) << " already requested info from osd." << peer << dendl;
ps->blocked_by.insert(peer.osd);
} else if (!ps->get_osdmap()->is_up(peer.osd)) {
psdout(10) << " not querying info from down osd." << peer << dendl;
} else {
psdout(10) << " querying info from osd." << peer << dendl;
context< PeeringMachine >().send_query(
peer.osd,
pg_query_t(pg_query_t::INFO,
it->shard, ps->pg_whoami.shard,
ps->info.history,
ps->get_osdmap_epoch()));
peer_info_requested.insert(peer);
ps->blocked_by.insert(peer.osd);
}
}
ps->check_prior_readable_down_osds(ps->get_osdmap());
pl->publish_stats_to_osd();
}
boost::statechart::result PeeringState::GetInfo::react(const MNotifyRec& infoevt)
{
DECLARE_LOCALS;
auto p = peer_info_requested.find(infoevt.from);
if (p != peer_info_requested.end()) {
peer_info_requested.erase(p);
ps->blocked_by.erase(infoevt.from.osd);
}
epoch_t old_start = ps->info.history.last_epoch_started;
if (ps->proc_replica_info(
infoevt.from, infoevt.notify.info, infoevt.notify.epoch_sent)) {
// we got something new ...
PastIntervals::PriorSet &prior_set = context< Peering >().prior_set;
if (old_start < ps->info.history.last_epoch_started) {
psdout(10) << " last_epoch_started moved forward, rebuilding prior" << dendl;
prior_set = ps->build_prior();
ps->prior_readable_down_osds = prior_set.down;
// filter out any osds that got dropped from the probe set from
// peer_info_requested. this is less expensive than restarting
// peering (which would re-probe everyone).
auto p = peer_info_requested.begin();
while (p != peer_info_requested.end()) {
if (prior_set.probe.count(*p) == 0) {
psdout(20) << " dropping osd." << *p << " from info_requested, no longer in probe set" << dendl;
peer_info_requested.erase(p++);
} else {
++p;
}
}
get_infos();
}
psdout(20) << "Adding osd: " << infoevt.from.osd << " peer features: "
<< hex << infoevt.features << dec << dendl;
ps->apply_peer_features(infoevt.features);
// are we done getting everything?
if (peer_info_requested.empty() && !prior_set.pg_down) {
psdout(20) << "Common peer features: " << hex << ps->get_min_peer_features() << dec << dendl;
psdout(20) << "Common acting features: " << hex << ps->get_min_acting_features() << dec << dendl;
psdout(20) << "Common upacting features: " << hex << ps->get_min_upacting_features() << dec << dendl;
post_event(GotInfo());
}
}
return discard_event();
}
boost::statechart::result PeeringState::GetInfo::react(const QueryState& q)
{
DECLARE_LOCALS;
q.f->open_object_section("state");
q.f->dump_string("name", state_name);
q.f->dump_stream("enter_time") << enter_time;
q.f->open_array_section("requested_info_from");
for (auto p = peer_info_requested.begin();
p != peer_info_requested.end();
++p) {
q.f->open_object_section("osd");
q.f->dump_stream("osd") << *p;
if (ps->peer_info.count(*p)) {
q.f->open_object_section("got_info");
ps->peer_info[*p].dump(q.f);
q.f->close_section();
}
q.f->close_section();
}
q.f->close_section();
q.f->close_section();
return forward_event();
}
boost::statechart::result PeeringState::GetInfo::react(const QueryUnfound& q)
{
q.f->dump_string("state", "GetInfo");
q.f->dump_bool("available_might_have_unfound", false);
return discard_event();
}
void PeeringState::GetInfo::exit()
{
context< PeeringMachine >().log_exit(state_name, enter_time);
DECLARE_LOCALS;
utime_t dur = ceph_clock_now() - enter_time;
pl->get_peering_perf().tinc(rs_getinfo_latency, dur);
ps->blocked_by.clear();
}
/*------GetLog------------*/
PeeringState::GetLog::GetLog(my_context ctx)
: my_base(ctx),
NamedState(
context< PeeringMachine >().state_history,
"Started/Primary/Peering/GetLog"),
msg(0)
{
context< PeeringMachine >().log_enter(state_name);
DECLARE_LOCALS;
ps->log_weirdness();
// adjust acting?
if (!ps->choose_acting(auth_log_shard, false,
&context< Peering >().history_les_bound)) {
if (!ps->want_acting.empty()) {
post_event(NeedActingChange());
} else {
post_event(IsIncomplete());
}
return;
}
// am i the best?
if (auth_log_shard == ps->pg_whoami) {
post_event(GotLog());
return;
}
const pg_info_t& best = ps->peer_info[auth_log_shard];
// am i broken?
if (ps->info.last_update < best.log_tail) {
psdout(10) << " not contiguous with osd." << auth_log_shard << ", down" << dendl;
post_event(IsIncomplete());
return;
}
// how much log to request?
eversion_t request_log_from = ps->info.last_update;
ceph_assert(!ps->acting_recovery_backfill.empty());
for (auto p = ps->acting_recovery_backfill.begin();
p != ps->acting_recovery_backfill.end();
++p) {
if (*p == ps->pg_whoami) continue;
pg_info_t& ri = ps->peer_info[*p];
if (ri.last_update < ps->info.log_tail && ri.last_update >= best.log_tail &&
ri.last_update < request_log_from)
request_log_from = ri.last_update;
}
// how much?
psdout(10) << " requesting log from osd." << auth_log_shard << dendl;
context<PeeringMachine>().send_query(
auth_log_shard.osd,
pg_query_t(
pg_query_t::LOG,
auth_log_shard.shard, ps->pg_whoami.shard,
request_log_from, ps->info.history,
ps->get_osdmap_epoch()));
ceph_assert(ps->blocked_by.empty());
ps->blocked_by.insert(auth_log_shard.osd);
pl->publish_stats_to_osd();
}
boost::statechart::result PeeringState::GetLog::react(const AdvMap& advmap)
{
// make sure our log source didn't go down. we need to check
// explicitly because it may not be part of the prior set, which
// means the Peering state check won't catch it going down.
if (!advmap.osdmap->is_up(auth_log_shard.osd)) {
psdout(10) << "GetLog: auth_log_shard osd."
<< auth_log_shard.osd << " went down" << dendl;
post_event(advmap);
return transit< Reset >();
}
// let the Peering state do its checks.
return forward_event();
}
boost::statechart::result PeeringState::GetLog::react(const MLogRec& logevt)
{
ceph_assert(!msg);
if (logevt.from != auth_log_shard) {
psdout(10) << "GetLog: discarding log from "
<< "non-auth_log_shard osd." << logevt.from << dendl;
return discard_event();
}
psdout(10) << "GetLog: received master log from osd."
<< logevt.from << dendl;
msg = logevt.msg;
post_event(GotLog());
return discard_event();
}
boost::statechart::result PeeringState::GetLog::react(const GotLog&)
{
DECLARE_LOCALS;
psdout(10) << "leaving GetLog" << dendl;
if (msg) {
psdout(10) << "processing master log" << dendl;
ps->proc_master_log(context<PeeringMachine>().get_cur_transaction(),
msg->info, std::move(msg->log), std::move(msg->missing),
auth_log_shard);
}
ps->start_flush(context< PeeringMachine >().get_cur_transaction());
return transit< GetMissing >();
}
boost::statechart::result PeeringState::GetLog::react(const QueryState& q)
{
q.f->open_object_section("state");
q.f->dump_string("name", state_name);
q.f->dump_stream("enter_time") << enter_time;
q.f->dump_stream("auth_log_shard") << auth_log_shard;
q.f->close_section();
return forward_event();
}
boost::statechart::result PeeringState::GetLog::react(const QueryUnfound& q)
{
q.f->dump_string("state", "GetLog");
q.f->dump_bool("available_might_have_unfound", false);
return discard_event();
}
void PeeringState::GetLog::exit()
{
context< PeeringMachine >().log_exit(state_name, enter_time);
DECLARE_LOCALS;
utime_t dur = ceph_clock_now() - enter_time;
pl->get_peering_perf().tinc(rs_getlog_latency, dur);
ps->blocked_by.clear();
}
/*------WaitActingChange--------*/
PeeringState::WaitActingChange::WaitActingChange(my_context ctx)
: my_base(ctx),
NamedState(context< PeeringMachine >().state_history, "Started/Primary/WaitActingChange")
{
context< PeeringMachine >().log_enter(state_name);
}
boost::statechart::result PeeringState::WaitActingChange::react(const AdvMap& advmap)
{
DECLARE_LOCALS;
OSDMapRef osdmap = advmap.osdmap;
psdout(10) << "verifying no want_acting " << ps->want_acting << " targets didn't go down" << dendl;
for (auto p = ps->want_acting.begin(); p != ps->want_acting.end(); ++p) {
if (!osdmap->is_up(*p)) {
psdout(10) << " want_acting target osd." << *p << " went down, resetting" << dendl;
post_event(advmap);
return transit< Reset >();
}
}
return forward_event();
}
boost::statechart::result PeeringState::WaitActingChange::react(const MLogRec& logevt)
{
psdout(10) << "In WaitActingChange, ignoring MLocRec" << dendl;
return discard_event();
}
boost::statechart::result PeeringState::WaitActingChange::react(const MInfoRec& evt)
{
psdout(10) << "In WaitActingChange, ignoring MInfoRec" << dendl;
return discard_event();
}
boost::statechart::result PeeringState::WaitActingChange::react(const MNotifyRec& evt)
{
psdout(10) << "In WaitActingChange, ignoring MNotifyRec" << dendl;
return discard_event();
}
boost::statechart::result PeeringState::WaitActingChange::react(const QueryState& q)
{
q.f->open_object_section("state");
q.f->dump_string("name", state_name);
q.f->dump_stream("enter_time") << enter_time;
q.f->dump_string("comment", "waiting for pg acting set to change");
q.f->close_section();
return forward_event();
}
boost::statechart::result PeeringState::WaitActingChange::react(const QueryUnfound& q)
{
q.f->dump_string("state", "WaitActingChange");
q.f->dump_bool("available_might_have_unfound", false);
return discard_event();
}
void PeeringState::WaitActingChange::exit()
{
context< PeeringMachine >().log_exit(state_name, enter_time);
DECLARE_LOCALS;
utime_t dur = ceph_clock_now() - enter_time;
pl->get_peering_perf().tinc(rs_waitactingchange_latency, dur);
}
/*------Down--------*/
PeeringState::Down::Down(my_context ctx)
: my_base(ctx),
NamedState(context< PeeringMachine >().state_history, "Started/Primary/Peering/Down")
{
context< PeeringMachine >().log_enter(state_name);
DECLARE_LOCALS;
ps->state_clear(PG_STATE_PEERING);
ps->state_set(PG_STATE_DOWN);
auto &prior_set = context< Peering >().prior_set;
ceph_assert(ps->blocked_by.empty());
ps->blocked_by.insert(prior_set.down.begin(), prior_set.down.end());
pl->publish_stats_to_osd();
}
void PeeringState::Down::exit()
{
context< PeeringMachine >().log_exit(state_name, enter_time);
DECLARE_LOCALS;
ps->state_clear(PG_STATE_DOWN);
utime_t dur = ceph_clock_now() - enter_time;
pl->get_peering_perf().tinc(rs_down_latency, dur);
ps->blocked_by.clear();
}
boost::statechart::result PeeringState::Down::react(const QueryState& q)
{
q.f->open_object_section("state");
q.f->dump_string("name", state_name);
q.f->dump_stream("enter_time") << enter_time;
q.f->dump_string("comment",
"not enough up instances of this PG to go active");
q.f->close_section();
return forward_event();
}
boost::statechart::result PeeringState::Down::react(const QueryUnfound& q)
{
q.f->dump_string("state", "Down");
q.f->dump_bool("available_might_have_unfound", false);
return discard_event();
}
boost::statechart::result PeeringState::Down::react(const MNotifyRec& infoevt)
{
DECLARE_LOCALS;
ceph_assert(ps->is_primary());
epoch_t old_start = ps->info.history.last_epoch_started;
if (!ps->peer_info.count(infoevt.from) &&
ps->get_osdmap()->has_been_up_since(infoevt.from.osd, infoevt.notify.epoch_sent)) {
ps->update_history(infoevt.notify.info.history);
}
// if we got something new to make pg escape down state
if (ps->info.history.last_epoch_started > old_start) {
psdout(10) << " last_epoch_started moved forward, re-enter getinfo" << dendl;
ps->state_clear(PG_STATE_DOWN);
ps->state_set(PG_STATE_PEERING);
return transit< GetInfo >();
}
return discard_event();
}
/*------Incomplete--------*/
PeeringState::Incomplete::Incomplete(my_context ctx)
: my_base(ctx),
NamedState(context< PeeringMachine >().state_history, "Started/Primary/Peering/Incomplete")
{
context< PeeringMachine >().log_enter(state_name);
DECLARE_LOCALS;
ps->state_clear(PG_STATE_PEERING);
ps->state_set(PG_STATE_INCOMPLETE);
PastIntervals::PriorSet &prior_set = context< Peering >().prior_set;
ceph_assert(ps->blocked_by.empty());
ps->blocked_by.insert(prior_set.down.begin(), prior_set.down.end());
pl->publish_stats_to_osd();
}
boost::statechart::result PeeringState::Incomplete::react(const AdvMap &advmap) {
DECLARE_LOCALS;
int64_t poolnum = ps->info.pgid.pool();
// Reset if min_size turn smaller than previous value, pg might now be able to go active
if (!advmap.osdmap->have_pg_pool(poolnum) ||
advmap.lastmap->get_pools().find(poolnum)->second.min_size >
advmap.osdmap->get_pools().find(poolnum)->second.min_size) {
post_event(advmap);
return transit< Reset >();
}
return forward_event();
}
boost::statechart::result PeeringState::Incomplete::react(const MNotifyRec& notevt) {
DECLARE_LOCALS;
psdout(7) << "handle_pg_notify from osd." << notevt.from << dendl;
if (ps->proc_replica_info(
notevt.from, notevt.notify.info, notevt.notify.epoch_sent)) {
// We got something new, try again!
return transit< GetLog >();
} else {
return discard_event();
}
}
boost::statechart::result PeeringState::Incomplete::react(
const QueryState& q)
{
q.f->open_object_section("state");
q.f->dump_string("name", state_name);
q.f->dump_stream("enter_time") << enter_time;
q.f->dump_string("comment", "not enough complete instances of this PG");
q.f->close_section();
return forward_event();
}
boost::statechart::result PeeringState::Incomplete::react(const QueryUnfound& q)
{
q.f->dump_string("state", "Incomplete");
q.f->dump_bool("available_might_have_unfound", false);
return discard_event();
}
void PeeringState::Incomplete::exit()
{
context< PeeringMachine >().log_exit(state_name, enter_time);
DECLARE_LOCALS;
ps->state_clear(PG_STATE_INCOMPLETE);
utime_t dur = ceph_clock_now() - enter_time;
pl->get_peering_perf().tinc(rs_incomplete_latency, dur);
ps->blocked_by.clear();
}
/*------GetMissing--------*/
PeeringState::GetMissing::GetMissing(my_context ctx)
: my_base(ctx),
NamedState(context< PeeringMachine >().state_history, "Started/Primary/Peering/GetMissing")
{
context< PeeringMachine >().log_enter(state_name);
DECLARE_LOCALS;
ps->log_weirdness();
ceph_assert(!ps->acting_recovery_backfill.empty());
eversion_t since;
for (auto i = ps->acting_recovery_backfill.begin();
i != ps->acting_recovery_backfill.end();
++i) {
if (*i == ps->get_primary()) continue;
const pg_info_t& pi = ps->peer_info[*i];
// reset this so to make sure the pg_missing_t is initialized and
// has the correct semantics even if we don't need to get a
// missing set from a shard. This way later additions due to
// lost+unfound delete work properly.
ps->peer_missing[*i].may_include_deletes = !ps->perform_deletes_during_peering();
if (pi.is_empty())
continue; // no pg data, nothing divergent
if (pi.last_update < ps->pg_log.get_tail()) {
psdout(10) << " osd." << *i << " is not contiguous, will restart backfill" << dendl;
ps->peer_missing[*i].clear();
continue;
}
if (pi.last_backfill == hobject_t()) {
psdout(10) << " osd." << *i << " will fully backfill; can infer empty missing set" << dendl;
ps->peer_missing[*i].clear();
continue;
}
if (pi.last_update == pi.last_complete && // peer has no missing
pi.last_update == ps->info.last_update) { // peer is up to date
// replica has no missing and identical log as us. no need to
// pull anything.
// FIXME: we can do better here. if last_update==last_complete we
// can infer the rest!
psdout(10) << " osd." << *i << " has no missing, identical log" << dendl;
ps->peer_missing[*i].clear();
continue;
}
// We pull the log from the peer's last_epoch_started to ensure we
// get enough log to detect divergent updates.
since.epoch = pi.last_epoch_started;
ceph_assert(pi.last_update >= ps->info.log_tail); // or else choose_acting() did a bad thing
if (pi.log_tail <= since) {
psdout(10) << " requesting log+missing since " << since << " from osd." << *i << dendl;
context< PeeringMachine >().send_query(
i->osd,
pg_query_t(
pg_query_t::LOG,
i->shard, ps->pg_whoami.shard,
since, ps->info.history,
ps->get_osdmap_epoch()));
} else {
psdout(10) << " requesting fulllog+missing from osd." << *i
<< " (want since " << since << " < log.tail "
<< pi.log_tail << ")" << dendl;
context< PeeringMachine >().send_query(
i->osd, pg_query_t(
pg_query_t::FULLLOG,
i->shard, ps->pg_whoami.shard,
ps->info.history, ps->get_osdmap_epoch()));
}
peer_missing_requested.insert(*i);
ps->blocked_by.insert(i->osd);
}
if (peer_missing_requested.empty()) {
if (ps->need_up_thru) {
psdout(10) << " still need up_thru update before going active"
<< dendl;
post_event(NeedUpThru());
return;
}
// all good!
post_event(Activate(ps->get_osdmap_epoch()));
} else {
pl->publish_stats_to_osd();
}
}
boost::statechart::result PeeringState::GetMissing::react(const MLogRec& logevt)
{
DECLARE_LOCALS;
peer_missing_requested.erase(logevt.from);
ps->proc_replica_log(logevt.msg->info,
logevt.msg->log,
std::move(logevt.msg->missing),
logevt.from);
if (peer_missing_requested.empty()) {
if (ps->need_up_thru) {
psdout(10) << " still need up_thru update before going active"
<< dendl;
post_event(NeedUpThru());
} else {
psdout(10) << "Got last missing, don't need missing "
<< "posting Activate" << dendl;
post_event(Activate(ps->get_osdmap_epoch()));
}
}
return discard_event();
}
boost::statechart::result PeeringState::GetMissing::react(const QueryState& q)
{
DECLARE_LOCALS;
q.f->open_object_section("state");
q.f->dump_string("name", state_name);
q.f->dump_stream("enter_time") << enter_time;
q.f->open_array_section("peer_missing_requested");
for (auto p = peer_missing_requested.begin();
p != peer_missing_requested.end();
++p) {
q.f->open_object_section("osd");
q.f->dump_stream("osd") << *p;
if (ps->peer_missing.count(*p)) {
q.f->open_object_section("got_missing");
ps->peer_missing[*p].dump(q.f);
q.f->close_section();
}
q.f->close_section();
}
q.f->close_section();
q.f->close_section();
return forward_event();
}
boost::statechart::result PeeringState::GetMissing::react(const QueryUnfound& q)
{
q.f->dump_string("state", "GetMising");
q.f->dump_bool("available_might_have_unfound", false);
return discard_event();
}
void PeeringState::GetMissing::exit()
{
context< PeeringMachine >().log_exit(state_name, enter_time);
DECLARE_LOCALS;
utime_t dur = ceph_clock_now() - enter_time;
pl->get_peering_perf().tinc(rs_getmissing_latency, dur);
ps->blocked_by.clear();
}
/*------WaitUpThru--------*/
PeeringState::WaitUpThru::WaitUpThru(my_context ctx)
: my_base(ctx),
NamedState(context< PeeringMachine >().state_history, "Started/Primary/Peering/WaitUpThru")
{
context< PeeringMachine >().log_enter(state_name);
}
boost::statechart::result PeeringState::WaitUpThru::react(const ActMap& am)
{
DECLARE_LOCALS;
if (!ps->need_up_thru) {
post_event(Activate(ps->get_osdmap_epoch()));
}
return forward_event();
}
boost::statechart::result PeeringState::WaitUpThru::react(const MLogRec& logevt)
{
DECLARE_LOCALS;
psdout(10) << "Noting missing from osd." << logevt.from << dendl;
ps->peer_missing[logevt.from].claim(std::move(logevt.msg->missing));
ps->peer_info[logevt.from] = logevt.msg->info;
return discard_event();
}
boost::statechart::result PeeringState::WaitUpThru::react(const QueryState& q)
{
q.f->open_object_section("state");
q.f->dump_string("name", state_name);
q.f->dump_stream("enter_time") << enter_time;
q.f->dump_string("comment", "waiting for osdmap to reflect a new up_thru for this osd");
q.f->close_section();
return forward_event();
}
boost::statechart::result PeeringState::WaitUpThru::react(const QueryUnfound& q)
{
q.f->dump_string("state", "WaitUpThru");
q.f->dump_bool("available_might_have_unfound", false);
return discard_event();
}
void PeeringState::WaitUpThru::exit()
{
context< PeeringMachine >().log_exit(state_name, enter_time);
DECLARE_LOCALS;
utime_t dur = ceph_clock_now() - enter_time;
pl->get_peering_perf().tinc(rs_waitupthru_latency, dur);
}
/*----PeeringState::PeeringMachine Methods-----*/
#undef dout_prefix
#define dout_prefix dpp->gen_prefix(*_dout)
void PeeringState::PeeringMachine::log_enter(const char *state_name)
{
DECLARE_LOCALS;
psdout(5) << "enter " << state_name << dendl;
pl->log_state_enter(state_name);
}
void PeeringState::PeeringMachine::log_exit(const char *state_name, utime_t enter_time)
{
DECLARE_LOCALS;
utime_t dur = ceph_clock_now() - enter_time;
psdout(5) << "exit " << state_name << " " << dur << " " << event_count << " " << event_time << dendl;
pl->log_state_exit(state_name, enter_time, event_count, event_time);
event_count = 0;
event_time = utime_t();
}
ostream &operator<<(ostream &out, const PeeringState &ps) {
out << "pg[" << ps.info
<< " " << pg_vector_string(ps.up);
if (ps.acting != ps.up)
out << "/" << pg_vector_string(ps.acting);
if (ps.is_ec_pg())
out << "p" << ps.get_primary();
if (!ps.async_recovery_targets.empty())
out << " async=[" << ps.async_recovery_targets << "]";
if (!ps.backfill_targets.empty())
out << " backfill=[" << ps.backfill_targets << "]";
out << " r=" << ps.get_role();
out << " lpr=" << ps.get_last_peering_reset();
if (ps.deleting)
out << " DELETING";
if (!ps.past_intervals.empty()) {
out << " pi=[" << ps.past_intervals.get_bounds()
<< ")/" << ps.past_intervals.size();
}
if (ps.is_peered()) {
if (ps.last_update_ondisk != ps.info.last_update)
out << " luod=" << ps.last_update_ondisk;
if (ps.last_update_applied != ps.info.last_update)
out << " lua=" << ps.last_update_applied;
}
if (ps.pg_log.get_tail() != ps.info.log_tail ||
ps.pg_log.get_head() != ps.info.last_update)
out << " (info mismatch, " << ps.pg_log.get_log() << ")";
if (!ps.pg_log.get_log().empty()) {
if ((ps.pg_log.get_log().log.begin()->version <= ps.pg_log.get_tail())) {
out << " (log bound mismatch, actual=["
<< ps.pg_log.get_log().log.begin()->version << ","
<< ps.pg_log.get_log().log.rbegin()->version << "]";
out << ")";
}
}
out << " crt=" << ps.pg_log.get_can_rollback_to();
if (ps.last_complete_ondisk != ps.info.last_complete)
out << " lcod " << ps.last_complete_ondisk;
out << " mlcod " << ps.min_last_complete_ondisk;
out << " " << pg_state_string(ps.get_state());
if (ps.should_send_notify())
out << " NOTIFY";
if (ps.prior_readable_until_ub != ceph::signedspan::zero()) {
out << " pruub " << ps.prior_readable_until_ub
<< "@" << ps.get_prior_readable_down_osds();
}
return out;
}
std::vector<pg_shard_t> PeeringState::get_replica_recovery_order() const
{
std::vector<std::pair<unsigned int, pg_shard_t>> replicas_by_num_missing,
async_by_num_missing;
replicas_by_num_missing.reserve(get_acting_recovery_backfill().size() - 1);
for (auto &p : get_acting_recovery_backfill()) {
if (p == get_primary()) {
continue;
}
auto pm = get_peer_missing().find(p);
assert(pm != get_peer_missing().end());
auto nm = pm->second.num_missing();
if (nm != 0) {
if (is_async_recovery_target(p)) {
async_by_num_missing.push_back(make_pair(nm, p));
} else {
replicas_by_num_missing.push_back(make_pair(nm, p));
}
}
}
// sort by number of missing objects, in ascending order.
auto func = [](const std::pair<unsigned int, pg_shard_t> &lhs,
const std::pair<unsigned int, pg_shard_t> &rhs) {
return lhs.first < rhs.first;
};
// acting goes first
std::sort(replicas_by_num_missing.begin(), replicas_by_num_missing.end(), func);
// then async_recovery_targets
std::sort(async_by_num_missing.begin(), async_by_num_missing.end(), func);
replicas_by_num_missing.insert(replicas_by_num_missing.end(),
async_by_num_missing.begin(), async_by_num_missing.end());
std::vector<pg_shard_t> ret;
ret.reserve(replicas_by_num_missing.size());
for (auto p : replicas_by_num_missing) {
ret.push_back(p.second);
}
return ret;
}
| 236,022 | 30.112971 | 120 | cc |
null | ceph-main/src/osd/PeeringState.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <boost/statechart/custom_reaction.hpp>
#include <boost/statechart/event.hpp>
#include <boost/statechart/simple_state.hpp>
#include <boost/statechart/state.hpp>
#include <boost/statechart/state_machine.hpp>
#include <boost/statechart/transition.hpp>
#include <boost/statechart/event_base.hpp>
#include <string>
#include <atomic>
#include "include/ceph_assert.h"
#include "include/common_fwd.h"
#include "PGLog.h"
#include "PGStateUtils.h"
#include "PGPeeringEvent.h"
#include "osd_types.h"
#include "osd_types_fmt.h"
#include "os/ObjectStore.h"
#include "OSDMap.h"
#include "MissingLoc.h"
#include "osd/osd_perf_counters.h"
#include "common/ostream_temp.h"
struct PGPool {
epoch_t cached_epoch;
int64_t id;
std::string name;
pg_pool_t info;
SnapContext snapc; // the default pool snapc, ready to go.
PGPool(OSDMapRef map, int64_t i, const pg_pool_t& info,
const std::string& name)
: cached_epoch(map->get_epoch()),
id(i),
name(name),
info(info) {
snapc = info.get_snap_context();
}
void update(OSDMapRef map);
ceph::timespan get_readable_interval(ConfigProxy &conf) const {
double v = 0;
if (info.opts.get(pool_opts_t::READ_LEASE_INTERVAL, &v)) {
return ceph::make_timespan(v);
} else {
auto hbi = conf->osd_heartbeat_grace;
auto fac = conf->osd_pool_default_read_lease_ratio;
return ceph::make_timespan(hbi * fac);
}
}
};
template <>
struct fmt::formatter<PGPool> {
template <typename ParseContext>
constexpr auto parse(ParseContext& ctx) { return ctx.begin(); }
template <typename FormatContext>
auto format(const PGPool& pool, FormatContext& ctx)
{
return fmt::format_to(ctx.out(),
"{}/{}({})",
pool.id,
pool.name,
pool.info);
}
};
struct PeeringCtx;
// [primary only] content recovery state
struct BufferedRecoveryMessages {
#if defined(WITH_SEASTAR)
std::map<int, std::vector<MessageURef>> message_map;
#else
std::map<int, std::vector<MessageRef>> message_map;
#endif
BufferedRecoveryMessages() = default;
BufferedRecoveryMessages(PeeringCtx &ctx);
void accept_buffered_messages(BufferedRecoveryMessages &m) {
for (auto &[target, ls] : m.message_map) {
auto &ovec = message_map[target];
// put buffered messages in front
ls.reserve(ls.size() + ovec.size());
ls.insert(ls.end(), std::make_move_iterator(ovec.begin()), std::make_move_iterator(ovec.end()));
ovec.clear();
ovec.swap(ls);
}
}
template <class MsgT> // MsgT = MessageRef for ceph-osd and MessageURef for crimson-osd
void send_osd_message(int target, MsgT&& m) {
message_map[target].emplace_back(std::forward<MsgT>(m));
}
void send_notify(int to, const pg_notify_t &n);
void send_query(int to, spg_t spgid, const pg_query_t &q);
void send_info(int to, spg_t to_spgid,
epoch_t min_epoch, epoch_t cur_epoch,
const pg_info_t &info,
std::optional<pg_lease_t> lease = {},
std::optional<pg_lease_ack_t> lease_ack = {});
};
struct HeartbeatStamps : public RefCountedObject {
mutable ceph::mutex lock = ceph::make_mutex("HeartbeatStamps::lock");
const int osd;
// we maintain an upper and lower bound on the delta between our local
// mono_clock time (minus the startup_time) to the peer OSD's mono_clock
// time (minus its startup_time).
//
// delta is (remote_clock_time - local_clock_time), so that
// local_time + delta -> peer_time, and peer_time - delta -> local_time.
//
// we have an upper and lower bound value on this delta, meaning the
// value of the remote clock is somewhere between [my_time + lb, my_time + ub]
//
// conversely, if we have a remote timestamp T, then that is
// [T - ub, T - lb] in terms of the local clock. i.e., if you are
// substracting the delta, then take care that you swap the role of the
// lb and ub values.
/// lower bound on peer clock - local clock
std::optional<ceph::signedspan> peer_clock_delta_lb;
/// upper bound on peer clock - local clock
std::optional<ceph::signedspan> peer_clock_delta_ub;
/// highest up_from we've seen from this rank
epoch_t up_from = 0;
void print(std::ostream& out) const {
std::lock_guard l(lock);
out << "hbstamp(osd." << osd << " up_from " << up_from
<< " peer_clock_delta [";
if (peer_clock_delta_lb) {
out << *peer_clock_delta_lb;
}
out << ",";
if (peer_clock_delta_ub) {
out << *peer_clock_delta_ub;
}
out << "])";
}
void sent_ping(std::optional<ceph::signedspan> *delta_ub) {
std::lock_guard l(lock);
// the non-primaries need a lower bound on remote clock - local clock. if
// we assume the transit for the last ping_reply was
// instantaneous, that would be (the negative of) our last
// peer_clock_delta_lb value.
if (peer_clock_delta_lb) {
*delta_ub = - *peer_clock_delta_lb;
}
}
void got_ping(epoch_t this_up_from,
ceph::signedspan now,
ceph::signedspan peer_send_stamp,
std::optional<ceph::signedspan> delta_ub,
ceph::signedspan *out_delta_ub) {
std::lock_guard l(lock);
if (this_up_from < up_from) {
return;
}
if (this_up_from > up_from) {
up_from = this_up_from;
}
peer_clock_delta_lb = peer_send_stamp - now;
peer_clock_delta_ub = delta_ub;
*out_delta_ub = - *peer_clock_delta_lb;
}
void got_ping_reply(ceph::signedspan now,
ceph::signedspan peer_send_stamp,
std::optional<ceph::signedspan> delta_ub) {
std::lock_guard l(lock);
peer_clock_delta_lb = peer_send_stamp - now;
peer_clock_delta_ub = delta_ub;
}
private:
FRIEND_MAKE_REF(HeartbeatStamps);
HeartbeatStamps(int o)
: RefCountedObject(NULL),
osd(o) {}
};
using HeartbeatStampsRef = ceph::ref_t<HeartbeatStamps>;
inline std::ostream& operator<<(std::ostream& out, const HeartbeatStamps& hb)
{
hb.print(out);
return out;
}
struct PeeringCtx : BufferedRecoveryMessages {
ObjectStore::Transaction transaction;
HBHandle* handle = nullptr;
PeeringCtx() = default;
PeeringCtx(const PeeringCtx &) = delete;
PeeringCtx &operator=(const PeeringCtx &) = delete;
PeeringCtx(PeeringCtx &&) = default;
PeeringCtx &operator=(PeeringCtx &&) = default;
void reset_transaction() {
transaction = ObjectStore::Transaction();
}
};
/**
* Wraps PeeringCtx to hide the difference between buffering messages to
* be sent after flush or immediately.
*/
struct PeeringCtxWrapper {
utime_t start_time;
BufferedRecoveryMessages &msgs;
ObjectStore::Transaction &transaction;
HBHandle * const handle = nullptr;
PeeringCtxWrapper(PeeringCtx &wrapped) :
msgs(wrapped),
transaction(wrapped.transaction),
handle(wrapped.handle) {}
PeeringCtxWrapper(BufferedRecoveryMessages &buf, PeeringCtx &wrapped)
: msgs(buf),
transaction(wrapped.transaction),
handle(wrapped.handle) {}
PeeringCtxWrapper(PeeringCtxWrapper &&ctx) = default;
template <class MsgT> // MsgT = MessageRef for ceph-osd and MessageURef for crimson-osd
void send_osd_message(int target, MsgT&& m) {
msgs.send_osd_message(target, std::forward<MsgT>(m));
}
void send_notify(int to, const pg_notify_t &n) {
msgs.send_notify(to, n);
}
void send_query(int to, spg_t spgid, const pg_query_t &q) {
msgs.send_query(to, spgid, q);
}
void send_info(int to, spg_t to_spgid,
epoch_t min_epoch, epoch_t cur_epoch,
const pg_info_t &info,
std::optional<pg_lease_t> lease = {},
std::optional<pg_lease_ack_t> lease_ack = {}) {
msgs.send_info(to, to_spgid, min_epoch, cur_epoch, info,
lease, lease_ack);
}
};
/* Encapsulates PG recovery process */
class PeeringState : public MissingLoc::MappingInfo {
public:
struct PeeringListener : public EpochSource {
/// Prepare t with written information
virtual void prepare_write(
pg_info_t &info,
pg_info_t &last_written_info,
PastIntervals &past_intervals,
PGLog &pglog,
bool dirty_info,
bool dirty_big_info,
bool need_write_epoch,
ObjectStore::Transaction &t) = 0;
/// Notify that a scrub has been requested
virtual void scrub_requested(scrub_level_t scrub_level, scrub_type_t scrub_type) = 0;
/// Return current snap_trimq size
virtual uint64_t get_snap_trimq_size() const = 0;
/// Send cluster message to osd
#if defined(WITH_SEASTAR)
virtual void send_cluster_message(
int osd, MessageURef m, epoch_t epoch, bool share_map_update=false) = 0;
#else
virtual void send_cluster_message(
int osd, MessageRef m, epoch_t epoch, bool share_map_update=false) = 0;
#endif
/// Send pg_created to mon
virtual void send_pg_created(pg_t pgid) = 0;
virtual ceph::signedspan get_mnow() const = 0;
virtual HeartbeatStampsRef get_hb_stamps(int peer) = 0;
virtual void schedule_renew_lease(epoch_t plr, ceph::timespan delay) = 0;
virtual void queue_check_readable(epoch_t lpr, ceph::timespan delay) = 0;
virtual void recheck_readable() = 0;
virtual unsigned get_target_pg_log_entries() const = 0;
// ============ Flush state ==================
/**
* try_flush_or_schedule_async()
*
* If true, caller may assume all past operations on this pg
* have been flushed. Else, caller will receive an on_flushed()
* call once the flush has completed.
*/
virtual bool try_flush_or_schedule_async() = 0;
/// Arranges for a commit on t to call on_flushed() once flushed.
virtual void start_flush_on_transaction(
ObjectStore::Transaction &t) = 0;
/// Notification that all outstanding flushes for interval have completed
virtual void on_flushed() = 0;
//============= Recovery ====================
/// Arrange for even to be queued after delay
virtual void schedule_event_after(
PGPeeringEventRef event,
float delay) = 0;
/**
* request_local_background_io_reservation
*
* Request reservation at priority with on_grant queued on grant
* and on_preempt on preempt
*/
virtual void request_local_background_io_reservation(
unsigned priority,
PGPeeringEventURef on_grant,
PGPeeringEventURef on_preempt) = 0;
/// Modify pending local background reservation request priority
virtual void update_local_background_io_priority(
unsigned priority) = 0;
/// Cancel pending local background reservation request
virtual void cancel_local_background_io_reservation() = 0;
/**
* request_remote_background_io_reservation
*
* Request reservation at priority with on_grant queued on grant
* and on_preempt on preempt
*/
virtual void request_remote_recovery_reservation(
unsigned priority,
PGPeeringEventURef on_grant,
PGPeeringEventURef on_preempt) = 0;
/// Cancel pending remote background reservation request
virtual void cancel_remote_recovery_reservation() = 0;
/// Arrange for on_commit to be queued upon commit of t
virtual void schedule_event_on_commit(
ObjectStore::Transaction &t,
PGPeeringEventRef on_commit) = 0;
//============================ HB =============================
/// Update hb set to peers
virtual void update_heartbeat_peers(std::set<int> peers) = 0;
/// Std::set targets being probed in this interval
virtual void set_probe_targets(const std::set<pg_shard_t> &probe_set) = 0;
/// Clear targets being probed in this interval
virtual void clear_probe_targets() = 0;
/// Queue for a pg_temp of wanted
virtual void queue_want_pg_temp(const std::vector<int> &wanted) = 0;
/// Clear queue for a pg_temp of wanted
virtual void clear_want_pg_temp() = 0;
/// Arrange for stats to be shipped to mon to be updated for this pg
virtual void publish_stats_to_osd() = 0;
/// Clear stats to be shipped to mon for this pg
virtual void clear_publish_stats() = 0;
/// Notification to check outstanding operation targets
virtual void check_recovery_sources(const OSDMapRef& newmap) = 0;
/// Notification to check outstanding blocklist
virtual void check_blocklisted_watchers() = 0;
/// Notification to clear state associated with primary
virtual void clear_primary_state() = 0;
// =================== Event notification ====================
virtual void on_pool_change() = 0;
virtual void on_role_change() = 0;
virtual void on_change(ObjectStore::Transaction &t) = 0;
virtual void on_activate(interval_set<snapid_t> to_trim) = 0;
virtual void on_activate_complete() = 0;
virtual void on_new_interval() = 0;
virtual Context *on_clean() = 0;
virtual void on_activate_committed() = 0;
virtual void on_active_exit() = 0;
// ====================== PG deletion =======================
/// Notification of removal complete, t must be populated to complete removal
virtual void on_removal(ObjectStore::Transaction &t) = 0;
/// Perform incremental removal work
virtual std::pair<ghobject_t, bool> do_delete_work(
ObjectStore::Transaction &t, ghobject_t _next) = 0;
// ======================= PG Merge =========================
virtual void clear_ready_to_merge() = 0;
virtual void set_not_ready_to_merge_target(pg_t pgid, pg_t src) = 0;
virtual void set_not_ready_to_merge_source(pg_t pgid) = 0;
virtual void set_ready_to_merge_target(eversion_t lu, epoch_t les, epoch_t lec) = 0;
virtual void set_ready_to_merge_source(eversion_t lu) = 0;
// ==================== Std::map notifications ===================
virtual void on_active_actmap() = 0;
virtual void on_active_advmap(const OSDMapRef &osdmap) = 0;
virtual epoch_t cluster_osdmap_trim_lower_bound() = 0;
// ============ recovery reservation notifications ==========
virtual void on_backfill_reserved() = 0;
virtual void on_backfill_canceled() = 0;
virtual void on_recovery_reserved() = 0;
// ================recovery space accounting ================
virtual bool try_reserve_recovery_space(
int64_t primary_num_bytes, int64_t local_num_bytes) = 0;
virtual void unreserve_recovery_space() = 0;
// ================== Peering log events ====================
/// Get handler for rolling forward/back log entries
virtual PGLog::LogEntryHandlerRef get_log_handler(
ObjectStore::Transaction &t) = 0;
// ============ On disk representation changes ==============
virtual void rebuild_missing_set_with_deletes(PGLog &pglog) = 0;
// ======================= Logging ==========================
virtual PerfCounters &get_peering_perf() = 0;
virtual PerfCounters &get_perf_logger() = 0;
virtual void log_state_enter(const char *state) = 0;
virtual void log_state_exit(
const char *state_name, utime_t enter_time,
uint64_t events, utime_t event_dur) = 0;
virtual void dump_recovery_info(ceph::Formatter *f) const = 0;
virtual OstreamTemp get_clog_info() = 0;
virtual OstreamTemp get_clog_error() = 0;
virtual OstreamTemp get_clog_debug() = 0;
virtual ~PeeringListener() {}
};
struct QueryState : boost::statechart::event< QueryState > {
ceph::Formatter *f;
explicit QueryState(ceph::Formatter *f) : f(f) {}
void print(std::ostream *out) const {
*out << "Query";
}
};
struct QueryUnfound : boost::statechart::event< QueryUnfound > {
ceph::Formatter *f;
explicit QueryUnfound(ceph::Formatter *f) : f(f) {}
void print(std::ostream *out) const {
*out << "QueryUnfound";
}
};
struct AdvMap : boost::statechart::event< AdvMap > {
OSDMapRef osdmap;
OSDMapRef lastmap;
std::vector<int> newup, newacting;
int up_primary, acting_primary;
AdvMap(
OSDMapRef osdmap, OSDMapRef lastmap,
std::vector<int>& newup, int up_primary,
std::vector<int>& newacting, int acting_primary):
osdmap(osdmap), lastmap(lastmap),
newup(newup),
newacting(newacting),
up_primary(up_primary),
acting_primary(acting_primary) {}
void print(std::ostream *out) const {
*out << "AdvMap";
}
};
struct ActMap : boost::statechart::event< ActMap > {
ActMap() : boost::statechart::event< ActMap >() {}
void print(std::ostream *out) const {
*out << "ActMap";
}
};
struct Activate : boost::statechart::event< Activate > {
epoch_t activation_epoch;
explicit Activate(epoch_t q) : boost::statechart::event< Activate >(),
activation_epoch(q) {}
void print(std::ostream *out) const {
*out << "Activate from " << activation_epoch;
}
};
struct ActivateCommitted : boost::statechart::event< ActivateCommitted > {
epoch_t epoch;
epoch_t activation_epoch;
explicit ActivateCommitted(epoch_t e, epoch_t ae)
: boost::statechart::event< ActivateCommitted >(),
epoch(e),
activation_epoch(ae) {}
void print(std::ostream *out) const {
*out << "ActivateCommitted from " << activation_epoch
<< " processed at " << epoch;
}
};
public:
struct UnfoundBackfill : boost::statechart::event<UnfoundBackfill> {
explicit UnfoundBackfill() {}
void print(std::ostream *out) const {
*out << "UnfoundBackfill";
}
};
struct UnfoundRecovery : boost::statechart::event<UnfoundRecovery> {
explicit UnfoundRecovery() {}
void print(std::ostream *out) const {
*out << "UnfoundRecovery";
}
};
struct RequestScrub : boost::statechart::event<RequestScrub> {
scrub_level_t deep;
scrub_type_t repair;
explicit RequestScrub(bool d, bool r) : deep(scrub_level_t(d)), repair(scrub_type_t(r)) {}
void print(std::ostream *out) const {
*out << "RequestScrub(" << ((deep==scrub_level_t::deep) ? "deep" : "shallow")
<< ((repair==scrub_type_t::do_repair) ? " repair)" : ")");
}
};
TrivialEvent(Initialize)
TrivialEvent(GotInfo)
TrivialEvent(NeedUpThru)
TrivialEvent(Backfilled)
TrivialEvent(LocalBackfillReserved)
TrivialEvent(RejectTooFullRemoteReservation)
TrivialEvent(RequestBackfill)
TrivialEvent(RemoteRecoveryPreempted)
TrivialEvent(RemoteBackfillPreempted)
TrivialEvent(BackfillTooFull)
TrivialEvent(RecoveryTooFull)
TrivialEvent(MakePrimary)
TrivialEvent(MakeStray)
TrivialEvent(NeedActingChange)
TrivialEvent(IsIncomplete)
TrivialEvent(IsDown)
TrivialEvent(AllReplicasRecovered)
TrivialEvent(DoRecovery)
TrivialEvent(LocalRecoveryReserved)
TrivialEvent(AllRemotesReserved)
TrivialEvent(AllBackfillsReserved)
TrivialEvent(GoClean)
TrivialEvent(AllReplicasActivated)
TrivialEvent(IntervalFlush)
TrivialEvent(DeleteStart)
TrivialEvent(DeleteSome)
TrivialEvent(SetForceRecovery)
TrivialEvent(UnsetForceRecovery)
TrivialEvent(SetForceBackfill)
TrivialEvent(UnsetForceBackfill)
TrivialEvent(DeleteReserved)
TrivialEvent(DeleteInterrupted)
TrivialEvent(CheckReadable)
void start_handle(PeeringCtx *new_ctx);
void end_handle();
void begin_block_outgoing();
void end_block_outgoing();
void clear_blocked_outgoing();
private:
/* States */
struct Initial;
class PeeringMachine : public boost::statechart::state_machine< PeeringMachine, Initial > {
public:
PeeringState *state;
PGStateHistory *state_history;
CephContext *cct;
spg_t spgid;
DoutPrefixProvider *dpp;
PeeringListener *pl;
utime_t event_time;
uint64_t event_count;
void clear_event_counters() {
event_time = utime_t();
event_count = 0;
}
void log_enter(const char *state_name);
void log_exit(const char *state_name, utime_t duration);
PeeringMachine(
PeeringState *state, CephContext *cct,
spg_t spgid,
DoutPrefixProvider *dpp,
PeeringListener *pl,
PGStateHistory *state_history) :
state(state),
state_history(state_history),
cct(cct), spgid(spgid),
dpp(dpp), pl(pl),
event_count(0) {}
/* Accessor functions for state methods */
ObjectStore::Transaction& get_cur_transaction() {
ceph_assert(state->rctx);
return state->rctx->transaction;
}
PeeringCtxWrapper &get_recovery_ctx() {
assert(state->rctx);
return *(state->rctx);
}
void send_notify(int to, const pg_notify_t &n) {
ceph_assert(state->rctx);
state->rctx->send_notify(to, n);
}
void send_query(int to, const pg_query_t &query) {
state->rctx->send_query(
to,
spg_t(spgid.pgid, query.to),
query);
}
};
friend class PeeringMachine;
/* States */
// Initial
// Reset
// Started
// Start
// Primary
// WaitActingChange
// Peering
// GetInfo
// GetLog
// GetMissing
// WaitUpThru
// Incomplete
// Active
// Activating
// Clean
// Recovered
// Backfilling
// WaitRemoteBackfillReserved
// WaitLocalBackfillReserved
// NotBackfilling
// NotRecovering
// Recovering
// WaitRemoteRecoveryReserved
// WaitLocalRecoveryReserved
// ReplicaActive
// RepNotRecovering
// RepRecovering
// RepWaitBackfillReserved
// RepWaitRecoveryReserved
// Stray
// ToDelete
// WaitDeleteReserved
// Deleting
// Crashed
struct Crashed : boost::statechart::state< Crashed, PeeringMachine >, NamedState {
explicit Crashed(my_context ctx);
};
struct Reset;
struct Initial : boost::statechart::state< Initial, PeeringMachine >, NamedState {
explicit Initial(my_context ctx);
void exit();
typedef boost::mpl::list <
boost::statechart::transition< Initialize, Reset >,
boost::statechart::custom_reaction< NullEvt >,
boost::statechart::transition< boost::statechart::event_base, Crashed >
> reactions;
boost::statechart::result react(const MNotifyRec&);
boost::statechart::result react(const MInfoRec&);
boost::statechart::result react(const MLogRec&);
boost::statechart::result react(const boost::statechart::event_base&) {
return discard_event();
}
};
struct Reset : boost::statechart::state< Reset, PeeringMachine >, NamedState {
explicit Reset(my_context ctx);
void exit();
typedef boost::mpl::list <
boost::statechart::custom_reaction< QueryState >,
boost::statechart::custom_reaction< QueryUnfound >,
boost::statechart::custom_reaction< AdvMap >,
boost::statechart::custom_reaction< ActMap >,
boost::statechart::custom_reaction< NullEvt >,
boost::statechart::custom_reaction< IntervalFlush >,
boost::statechart::transition< boost::statechart::event_base, Crashed >
> reactions;
boost::statechart::result react(const QueryState& q);
boost::statechart::result react(const QueryUnfound& q);
boost::statechart::result react(const AdvMap&);
boost::statechart::result react(const ActMap&);
boost::statechart::result react(const IntervalFlush&);
boost::statechart::result react(const boost::statechart::event_base&) {
return discard_event();
}
};
struct Start;
struct Started : boost::statechart::state< Started, PeeringMachine, Start >, NamedState {
explicit Started(my_context ctx);
void exit();
typedef boost::mpl::list <
boost::statechart::custom_reaction< QueryState >,
boost::statechart::custom_reaction< QueryUnfound >,
boost::statechart::custom_reaction< AdvMap >,
boost::statechart::custom_reaction< IntervalFlush >,
// ignored
boost::statechart::custom_reaction< NullEvt >,
boost::statechart::custom_reaction<SetForceRecovery>,
boost::statechart::custom_reaction<UnsetForceRecovery>,
boost::statechart::custom_reaction<SetForceBackfill>,
boost::statechart::custom_reaction<UnsetForceBackfill>,
boost::statechart::custom_reaction<RequestScrub>,
boost::statechart::custom_reaction<CheckReadable>,
// crash
boost::statechart::transition< boost::statechart::event_base, Crashed >
> reactions;
boost::statechart::result react(const QueryState& q);
boost::statechart::result react(const QueryUnfound& q);
boost::statechart::result react(const AdvMap&);
boost::statechart::result react(const IntervalFlush&);
boost::statechart::result react(const boost::statechart::event_base&) {
return discard_event();
}
};
struct Primary;
struct Stray;
struct Start : boost::statechart::state< Start, Started >, NamedState {
explicit Start(my_context ctx);
void exit();
typedef boost::mpl::list <
boost::statechart::transition< MakePrimary, Primary >,
boost::statechart::transition< MakeStray, Stray >
> reactions;
};
struct Peering;
struct WaitActingChange;
struct Incomplete;
struct Down;
struct Primary : boost::statechart::state< Primary, Started, Peering >, NamedState {
explicit Primary(my_context ctx);
void exit();
typedef boost::mpl::list <
boost::statechart::custom_reaction< ActMap >,
boost::statechart::custom_reaction< MNotifyRec >,
boost::statechart::custom_reaction<SetForceRecovery>,
boost::statechart::custom_reaction<UnsetForceRecovery>,
boost::statechart::custom_reaction<SetForceBackfill>,
boost::statechart::custom_reaction<UnsetForceBackfill>,
boost::statechart::custom_reaction<RequestScrub>
> reactions;
boost::statechart::result react(const ActMap&);
boost::statechart::result react(const MNotifyRec&);
boost::statechart::result react(const SetForceRecovery&);
boost::statechart::result react(const UnsetForceRecovery&);
boost::statechart::result react(const SetForceBackfill&);
boost::statechart::result react(const UnsetForceBackfill&);
boost::statechart::result react(const RequestScrub&);
};
struct WaitActingChange : boost::statechart::state< WaitActingChange, Primary>,
NamedState {
typedef boost::mpl::list <
boost::statechart::custom_reaction< QueryState >,
boost::statechart::custom_reaction< QueryUnfound >,
boost::statechart::custom_reaction< AdvMap >,
boost::statechart::custom_reaction< MLogRec >,
boost::statechart::custom_reaction< MInfoRec >,
boost::statechart::custom_reaction< MNotifyRec >
> reactions;
explicit WaitActingChange(my_context ctx);
boost::statechart::result react(const QueryState& q);
boost::statechart::result react(const QueryUnfound& q);
boost::statechart::result react(const AdvMap&);
boost::statechart::result react(const MLogRec&);
boost::statechart::result react(const MInfoRec&);
boost::statechart::result react(const MNotifyRec&);
void exit();
};
struct GetInfo;
struct Active;
struct Peering : boost::statechart::state< Peering, Primary, GetInfo >, NamedState {
PastIntervals::PriorSet prior_set;
bool history_les_bound; //< need osd_find_best_info_ignore_history_les
explicit Peering(my_context ctx);
void exit();
typedef boost::mpl::list <
boost::statechart::custom_reaction< QueryState >,
boost::statechart::custom_reaction< QueryUnfound >,
boost::statechart::transition< Activate, Active >,
boost::statechart::custom_reaction< AdvMap >
> reactions;
boost::statechart::result react(const QueryState& q);
boost::statechart::result react(const QueryUnfound& q);
boost::statechart::result react(const AdvMap &advmap);
};
struct WaitLocalRecoveryReserved;
struct Activating;
struct Active : boost::statechart::state< Active, Primary, Activating >, NamedState {
explicit Active(my_context ctx);
void exit();
const std::set<pg_shard_t> remote_shards_to_reserve_recovery;
const std::set<pg_shard_t> remote_shards_to_reserve_backfill;
bool all_replicas_activated;
typedef boost::mpl::list <
boost::statechart::custom_reaction< QueryState >,
boost::statechart::custom_reaction< QueryUnfound >,
boost::statechart::custom_reaction< ActMap >,
boost::statechart::custom_reaction< AdvMap >,
boost::statechart::custom_reaction< MInfoRec >,
boost::statechart::custom_reaction< MNotifyRec >,
boost::statechart::custom_reaction< MLogRec >,
boost::statechart::custom_reaction< MTrim >,
boost::statechart::custom_reaction< Backfilled >,
boost::statechart::custom_reaction< ActivateCommitted >,
boost::statechart::custom_reaction< AllReplicasActivated >,
boost::statechart::custom_reaction< DeferRecovery >,
boost::statechart::custom_reaction< DeferBackfill >,
boost::statechart::custom_reaction< UnfoundRecovery >,
boost::statechart::custom_reaction< UnfoundBackfill >,
boost::statechart::custom_reaction< RemoteReservationRevokedTooFull>,
boost::statechart::custom_reaction< RemoteReservationRevoked>,
boost::statechart::custom_reaction< DoRecovery>,
boost::statechart::custom_reaction< RenewLease>,
boost::statechart::custom_reaction< MLeaseAck>,
boost::statechart::custom_reaction< CheckReadable>
> reactions;
boost::statechart::result react(const QueryState& q);
boost::statechart::result react(const QueryUnfound& q);
boost::statechart::result react(const ActMap&);
boost::statechart::result react(const AdvMap&);
boost::statechart::result react(const MInfoRec& infoevt);
boost::statechart::result react(const MNotifyRec& notevt);
boost::statechart::result react(const MLogRec& logevt);
boost::statechart::result react(const MTrim& trimevt);
boost::statechart::result react(const Backfilled&) {
return discard_event();
}
boost::statechart::result react(const ActivateCommitted&);
boost::statechart::result react(const AllReplicasActivated&);
boost::statechart::result react(const RenewLease&);
boost::statechart::result react(const MLeaseAck&);
boost::statechart::result react(const DeferRecovery& evt) {
return discard_event();
}
boost::statechart::result react(const DeferBackfill& evt) {
return discard_event();
}
boost::statechart::result react(const UnfoundRecovery& evt) {
return discard_event();
}
boost::statechart::result react(const UnfoundBackfill& evt) {
return discard_event();
}
boost::statechart::result react(const RemoteReservationRevokedTooFull&) {
return discard_event();
}
boost::statechart::result react(const RemoteReservationRevoked&) {
return discard_event();
}
boost::statechart::result react(const DoRecovery&) {
return discard_event();
}
boost::statechart::result react(const CheckReadable&);
void all_activated_and_committed();
};
struct Clean : boost::statechart::state< Clean, Active >, NamedState {
typedef boost::mpl::list<
boost::statechart::transition< DoRecovery, WaitLocalRecoveryReserved >,
boost::statechart::custom_reaction<SetForceRecovery>,
boost::statechart::custom_reaction<SetForceBackfill>
> reactions;
explicit Clean(my_context ctx);
void exit();
boost::statechart::result react(const boost::statechart::event_base&) {
return discard_event();
}
};
struct Recovered : boost::statechart::state< Recovered, Active >, NamedState {
typedef boost::mpl::list<
boost::statechart::transition< GoClean, Clean >,
boost::statechart::transition< DoRecovery, WaitLocalRecoveryReserved >,
boost::statechart::custom_reaction< AllReplicasActivated >
> reactions;
explicit Recovered(my_context ctx);
void exit();
boost::statechart::result react(const AllReplicasActivated&) {
post_event(GoClean());
return forward_event();
}
};
struct Backfilling : boost::statechart::state< Backfilling, Active >, NamedState {
typedef boost::mpl::list<
boost::statechart::custom_reaction< Backfilled >,
boost::statechart::custom_reaction< DeferBackfill >,
boost::statechart::custom_reaction< UnfoundBackfill >,
boost::statechart::custom_reaction< RemoteReservationRejectedTooFull >,
boost::statechart::custom_reaction< RemoteReservationRevokedTooFull>,
boost::statechart::custom_reaction< RemoteReservationRevoked>
> reactions;
explicit Backfilling(my_context ctx);
boost::statechart::result react(const RemoteReservationRejectedTooFull& evt) {
// for compat with old peers
post_event(RemoteReservationRevokedTooFull());
return discard_event();
}
void backfill_release_reservations();
boost::statechart::result react(const Backfilled& evt);
boost::statechart::result react(const RemoteReservationRevokedTooFull& evt);
boost::statechart::result react(const RemoteReservationRevoked& evt);
boost::statechart::result react(const DeferBackfill& evt);
boost::statechart::result react(const UnfoundBackfill& evt);
void cancel_backfill();
void exit();
};
struct WaitRemoteBackfillReserved : boost::statechart::state< WaitRemoteBackfillReserved, Active >, NamedState {
typedef boost::mpl::list<
boost::statechart::custom_reaction< RemoteBackfillReserved >,
boost::statechart::custom_reaction< RemoteReservationRejectedTooFull >,
boost::statechart::custom_reaction< RemoteReservationRevoked >,
boost::statechart::transition< AllBackfillsReserved, Backfilling >
> reactions;
std::set<pg_shard_t>::const_iterator backfill_osd_it;
explicit WaitRemoteBackfillReserved(my_context ctx);
void retry();
void exit();
boost::statechart::result react(const RemoteBackfillReserved& evt);
boost::statechart::result react(const RemoteReservationRejectedTooFull& evt);
boost::statechart::result react(const RemoteReservationRevoked& evt);
};
struct WaitLocalBackfillReserved : boost::statechart::state< WaitLocalBackfillReserved, Active >, NamedState {
typedef boost::mpl::list<
boost::statechart::transition< LocalBackfillReserved, WaitRemoteBackfillReserved >,
boost::statechart::custom_reaction< RemoteBackfillReserved >
> reactions;
explicit WaitLocalBackfillReserved(my_context ctx);
boost::statechart::result react(const RemoteBackfillReserved& evt) {
/* no-op */
return discard_event();
}
void exit();
};
struct NotBackfilling : boost::statechart::state< NotBackfilling, Active>, NamedState {
typedef boost::mpl::list<
boost::statechart::custom_reaction< QueryUnfound >,
boost::statechart::transition< RequestBackfill, WaitLocalBackfillReserved>,
boost::statechart::custom_reaction< RemoteBackfillReserved >,
boost::statechart::custom_reaction< RemoteReservationRejectedTooFull >
> reactions;
explicit NotBackfilling(my_context ctx);
void exit();
boost::statechart::result react(const QueryUnfound& q);
boost::statechart::result react(const RemoteBackfillReserved& evt);
boost::statechart::result react(const RemoteReservationRejectedTooFull& evt);
};
struct NotRecovering : boost::statechart::state< NotRecovering, Active>, NamedState {
typedef boost::mpl::list<
boost::statechart::custom_reaction< QueryUnfound >,
boost::statechart::transition< DoRecovery, WaitLocalRecoveryReserved >,
boost::statechart::custom_reaction< DeferRecovery >,
boost::statechart::custom_reaction< UnfoundRecovery >
> reactions;
explicit NotRecovering(my_context ctx);
boost::statechart::result react(const QueryUnfound& q);
boost::statechart::result react(const DeferRecovery& evt) {
/* no-op */
return discard_event();
}
boost::statechart::result react(const UnfoundRecovery& evt) {
/* no-op */
return discard_event();
}
void exit();
};
struct ToDelete;
struct RepNotRecovering;
struct ReplicaActive : boost::statechart::state< ReplicaActive, Started, RepNotRecovering >, NamedState {
explicit ReplicaActive(my_context ctx);
void exit();
typedef boost::mpl::list <
boost::statechart::custom_reaction< QueryState >,
boost::statechart::custom_reaction< QueryUnfound >,
boost::statechart::custom_reaction< ActMap >,
boost::statechart::custom_reaction< MQuery >,
boost::statechart::custom_reaction< MInfoRec >,
boost::statechart::custom_reaction< MLogRec >,
boost::statechart::custom_reaction< MTrim >,
boost::statechart::custom_reaction< Activate >,
boost::statechart::custom_reaction< ActivateCommitted >,
boost::statechart::custom_reaction< DeferRecovery >,
boost::statechart::custom_reaction< DeferBackfill >,
boost::statechart::custom_reaction< UnfoundRecovery >,
boost::statechart::custom_reaction< UnfoundBackfill >,
boost::statechart::custom_reaction< RemoteBackfillPreempted >,
boost::statechart::custom_reaction< RemoteRecoveryPreempted >,
boost::statechart::custom_reaction< RecoveryDone >,
boost::statechart::transition<DeleteStart, ToDelete>,
boost::statechart::custom_reaction< MLease >
> reactions;
boost::statechart::result react(const QueryState& q);
boost::statechart::result react(const QueryUnfound& q);
boost::statechart::result react(const MInfoRec& infoevt);
boost::statechart::result react(const MLogRec& logevt);
boost::statechart::result react(const MTrim& trimevt);
boost::statechart::result react(const ActMap&);
boost::statechart::result react(const MQuery&);
boost::statechart::result react(const Activate&);
boost::statechart::result react(const ActivateCommitted&);
boost::statechart::result react(const MLease&);
boost::statechart::result react(const RecoveryDone&) {
return discard_event();
}
boost::statechart::result react(const DeferRecovery& evt) {
return discard_event();
}
boost::statechart::result react(const DeferBackfill& evt) {
return discard_event();
}
boost::statechart::result react(const UnfoundRecovery& evt) {
return discard_event();
}
boost::statechart::result react(const UnfoundBackfill& evt) {
return discard_event();
}
boost::statechart::result react(const RemoteBackfillPreempted& evt) {
return discard_event();
}
boost::statechart::result react(const RemoteRecoveryPreempted& evt) {
return discard_event();
}
};
struct RepRecovering : boost::statechart::state< RepRecovering, ReplicaActive >, NamedState {
typedef boost::mpl::list<
boost::statechart::transition< RecoveryDone, RepNotRecovering >,
// for compat with old peers
boost::statechart::transition< RemoteReservationRejectedTooFull, RepNotRecovering >,
boost::statechart::transition< RemoteReservationCanceled, RepNotRecovering >,
boost::statechart::custom_reaction< BackfillTooFull >,
boost::statechart::custom_reaction< RemoteRecoveryPreempted >,
boost::statechart::custom_reaction< RemoteBackfillPreempted >
> reactions;
explicit RepRecovering(my_context ctx);
boost::statechart::result react(const RemoteRecoveryPreempted &evt);
boost::statechart::result react(const BackfillTooFull &evt);
boost::statechart::result react(const RemoteBackfillPreempted &evt);
void exit();
};
struct RepWaitBackfillReserved : boost::statechart::state< RepWaitBackfillReserved, ReplicaActive >, NamedState {
typedef boost::mpl::list<
boost::statechart::custom_reaction< RemoteBackfillReserved >,
boost::statechart::custom_reaction< RejectTooFullRemoteReservation >,
boost::statechart::custom_reaction< RemoteReservationRejectedTooFull >,
boost::statechart::custom_reaction< RemoteReservationCanceled >
> reactions;
explicit RepWaitBackfillReserved(my_context ctx);
void exit();
boost::statechart::result react(const RemoteBackfillReserved &evt);
boost::statechart::result react(const RejectTooFullRemoteReservation &evt);
boost::statechart::result react(const RemoteReservationRejectedTooFull &evt);
boost::statechart::result react(const RemoteReservationCanceled &evt);
};
struct RepWaitRecoveryReserved : boost::statechart::state< RepWaitRecoveryReserved, ReplicaActive >, NamedState {
typedef boost::mpl::list<
boost::statechart::custom_reaction< RemoteRecoveryReserved >,
// for compat with old peers
boost::statechart::custom_reaction< RemoteReservationRejectedTooFull >,
boost::statechart::custom_reaction< RemoteReservationCanceled >
> reactions;
explicit RepWaitRecoveryReserved(my_context ctx);
void exit();
boost::statechart::result react(const RemoteRecoveryReserved &evt);
boost::statechart::result react(const RemoteReservationRejectedTooFull &evt) {
// for compat with old peers
post_event(RemoteReservationCanceled());
return discard_event();
}
boost::statechart::result react(const RemoteReservationCanceled &evt);
};
struct RepNotRecovering : boost::statechart::state< RepNotRecovering, ReplicaActive>, NamedState {
typedef boost::mpl::list<
boost::statechart::custom_reaction< RequestRecoveryPrio >,
boost::statechart::custom_reaction< RequestBackfillPrio >,
boost::statechart::custom_reaction< RejectTooFullRemoteReservation >,
boost::statechart::transition< RemoteReservationRejectedTooFull, RepNotRecovering >,
boost::statechart::transition< RemoteReservationCanceled, RepNotRecovering >,
boost::statechart::custom_reaction< RemoteRecoveryReserved >,
boost::statechart::custom_reaction< RemoteBackfillReserved >,
boost::statechart::transition< RecoveryDone, RepNotRecovering > // for compat with pre-reservation peers
> reactions;
explicit RepNotRecovering(my_context ctx);
boost::statechart::result react(const RequestRecoveryPrio &evt);
boost::statechart::result react(const RequestBackfillPrio &evt);
boost::statechart::result react(const RemoteBackfillReserved &evt) {
// my reservation completion raced with a RELEASE from primary
return discard_event();
}
boost::statechart::result react(const RemoteRecoveryReserved &evt) {
// my reservation completion raced with a RELEASE from primary
return discard_event();
}
boost::statechart::result react(const RejectTooFullRemoteReservation &evt);
void exit();
};
struct Recovering : boost::statechart::state< Recovering, Active >, NamedState {
typedef boost::mpl::list <
boost::statechart::custom_reaction< AllReplicasRecovered >,
boost::statechart::custom_reaction< DeferRecovery >,
boost::statechart::custom_reaction< UnfoundRecovery >,
boost::statechart::custom_reaction< RequestBackfill >
> reactions;
explicit Recovering(my_context ctx);
void exit();
void release_reservations(bool cancel = false);
boost::statechart::result react(const AllReplicasRecovered &evt);
boost::statechart::result react(const DeferRecovery& evt);
boost::statechart::result react(const UnfoundRecovery& evt);
boost::statechart::result react(const RequestBackfill &evt);
};
struct WaitRemoteRecoveryReserved : boost::statechart::state< WaitRemoteRecoveryReserved, Active >, NamedState {
typedef boost::mpl::list <
boost::statechart::custom_reaction< RemoteRecoveryReserved >,
boost::statechart::transition< AllRemotesReserved, Recovering >
> reactions;
std::set<pg_shard_t>::const_iterator remote_recovery_reservation_it;
explicit WaitRemoteRecoveryReserved(my_context ctx);
boost::statechart::result react(const RemoteRecoveryReserved &evt);
void exit();
};
struct WaitLocalRecoveryReserved : boost::statechart::state< WaitLocalRecoveryReserved, Active >, NamedState {
typedef boost::mpl::list <
boost::statechart::transition< LocalRecoveryReserved, WaitRemoteRecoveryReserved >,
boost::statechart::custom_reaction< RecoveryTooFull >
> reactions;
explicit WaitLocalRecoveryReserved(my_context ctx);
void exit();
boost::statechart::result react(const RecoveryTooFull &evt);
};
struct Activating : boost::statechart::state< Activating, Active >, NamedState {
typedef boost::mpl::list <
boost::statechart::transition< AllReplicasRecovered, Recovered >,
boost::statechart::transition< DoRecovery, WaitLocalRecoveryReserved >,
boost::statechart::transition< RequestBackfill, WaitLocalBackfillReserved >
> reactions;
explicit Activating(my_context ctx);
void exit();
};
struct Stray : boost::statechart::state< Stray, Started >,
NamedState {
explicit Stray(my_context ctx);
void exit();
typedef boost::mpl::list <
boost::statechart::custom_reaction< MQuery >,
boost::statechart::custom_reaction< MLogRec >,
boost::statechart::custom_reaction< MInfoRec >,
boost::statechart::custom_reaction< ActMap >,
boost::statechart::custom_reaction< RecoveryDone >,
boost::statechart::transition<DeleteStart, ToDelete>
> reactions;
boost::statechart::result react(const MQuery& query);
boost::statechart::result react(const MLogRec& logevt);
boost::statechart::result react(const MInfoRec& infoevt);
boost::statechart::result react(const ActMap&);
boost::statechart::result react(const RecoveryDone&) {
return discard_event();
}
};
struct WaitDeleteReserved;
struct ToDelete : boost::statechart::state<ToDelete, Started, WaitDeleteReserved>, NamedState {
unsigned priority = 0;
typedef boost::mpl::list <
boost::statechart::custom_reaction< ActMap >,
boost::statechart::custom_reaction< ActivateCommitted >,
boost::statechart::custom_reaction< DeleteSome >
> reactions;
explicit ToDelete(my_context ctx);
boost::statechart::result react(const ActMap &evt);
boost::statechart::result react(const DeleteSome &evt) {
// happens if we drop out of Deleting due to reprioritization etc.
return discard_event();
}
boost::statechart::result react(const ActivateCommitted&) {
// Can happens if we were activated as a stray but not actually pulled
// from prior to the pg going clean and sending a delete.
return discard_event();
}
void exit();
};
struct Deleting;
struct WaitDeleteReserved : boost::statechart::state<WaitDeleteReserved,
ToDelete>, NamedState {
typedef boost::mpl::list <
boost::statechart::transition<DeleteReserved, Deleting>
> reactions;
explicit WaitDeleteReserved(my_context ctx);
void exit();
};
struct Deleting : boost::statechart::state<Deleting,
ToDelete>, NamedState {
typedef boost::mpl::list <
boost::statechart::custom_reaction< DeleteSome >,
boost::statechart::transition<DeleteInterrupted, WaitDeleteReserved>
> reactions;
ghobject_t next;
explicit Deleting(my_context ctx);
boost::statechart::result react(const DeleteSome &evt);
void exit();
};
struct GetLog;
struct GetInfo : boost::statechart::state< GetInfo, Peering >, NamedState {
std::set<pg_shard_t> peer_info_requested;
explicit GetInfo(my_context ctx);
void exit();
void get_infos();
typedef boost::mpl::list <
boost::statechart::custom_reaction< QueryState >,
boost::statechart::custom_reaction< QueryUnfound >,
boost::statechart::transition< GotInfo, GetLog >,
boost::statechart::custom_reaction< MNotifyRec >,
boost::statechart::transition< IsDown, Down >
> reactions;
boost::statechart::result react(const QueryState& q);
boost::statechart::result react(const QueryUnfound& q);
boost::statechart::result react(const MNotifyRec& infoevt);
};
struct GotLog : boost::statechart::event< GotLog > {
GotLog() : boost::statechart::event< GotLog >() {}
};
struct GetLog : boost::statechart::state< GetLog, Peering >, NamedState {
pg_shard_t auth_log_shard;
boost::intrusive_ptr<MOSDPGLog> msg;
explicit GetLog(my_context ctx);
void exit();
typedef boost::mpl::list <
boost::statechart::custom_reaction< QueryState >,
boost::statechart::custom_reaction< QueryUnfound >,
boost::statechart::custom_reaction< MLogRec >,
boost::statechart::custom_reaction< GotLog >,
boost::statechart::custom_reaction< AdvMap >,
boost::statechart::transition< NeedActingChange, WaitActingChange >,
boost::statechart::transition< IsIncomplete, Incomplete >
> reactions;
boost::statechart::result react(const AdvMap&);
boost::statechart::result react(const QueryState& q);
boost::statechart::result react(const QueryUnfound& q);
boost::statechart::result react(const MLogRec& logevt);
boost::statechart::result react(const GotLog&);
};
struct WaitUpThru;
struct GetMissing : boost::statechart::state< GetMissing, Peering >, NamedState {
std::set<pg_shard_t> peer_missing_requested;
explicit GetMissing(my_context ctx);
void exit();
typedef boost::mpl::list <
boost::statechart::custom_reaction< QueryState >,
boost::statechart::custom_reaction< QueryUnfound >,
boost::statechart::custom_reaction< MLogRec >,
boost::statechart::transition< NeedUpThru, WaitUpThru >
> reactions;
boost::statechart::result react(const QueryState& q);
boost::statechart::result react(const QueryUnfound& q);
boost::statechart::result react(const MLogRec& logevt);
};
struct WaitUpThru : boost::statechart::state< WaitUpThru, Peering >, NamedState {
explicit WaitUpThru(my_context ctx);
void exit();
typedef boost::mpl::list <
boost::statechart::custom_reaction< QueryState >,
boost::statechart::custom_reaction< QueryUnfound >,
boost::statechart::custom_reaction< ActMap >,
boost::statechart::custom_reaction< MLogRec >
> reactions;
boost::statechart::result react(const QueryState& q);
boost::statechart::result react(const QueryUnfound& q);
boost::statechart::result react(const ActMap& am);
boost::statechart::result react(const MLogRec& logrec);
};
struct Down : boost::statechart::state< Down, Peering>, NamedState {
explicit Down(my_context ctx);
typedef boost::mpl::list <
boost::statechart::custom_reaction< QueryState >,
boost::statechart::custom_reaction< QueryUnfound >,
boost::statechart::custom_reaction< MNotifyRec >
> reactions;
boost::statechart::result react(const QueryState& q);
boost::statechart::result react(const QueryUnfound& q);
boost::statechart::result react(const MNotifyRec& infoevt);
void exit();
};
struct Incomplete : boost::statechart::state< Incomplete, Peering>, NamedState {
typedef boost::mpl::list <
boost::statechart::custom_reaction< AdvMap >,
boost::statechart::custom_reaction< MNotifyRec >,
boost::statechart::custom_reaction< QueryUnfound >,
boost::statechart::custom_reaction< QueryState >
> reactions;
explicit Incomplete(my_context ctx);
boost::statechart::result react(const AdvMap &advmap);
boost::statechart::result react(const MNotifyRec& infoevt);
boost::statechart::result react(const QueryUnfound& q);
boost::statechart::result react(const QueryState& q);
void exit();
};
PGStateHistory state_history;
CephContext* cct;
spg_t spgid;
DoutPrefixProvider *dpp;
PeeringListener *pl;
/// context passed in by state machine caller
PeeringCtx *orig_ctx;
/// populated if we are buffering messages pending a flush
std::optional<BufferedRecoveryMessages> messages_pending_flush;
/**
* populated between start_handle() and end_handle(), points into
* the message lists for messages_pending_flush while blocking messages
* or into orig_ctx otherwise
*/
std::optional<PeeringCtxWrapper> rctx;
/**
* OSDMap state
*/
OSDMapRef osdmap_ref; ///< Reference to current OSDMap
PGPool pool; ///< Current pool state
epoch_t last_persisted_osdmap = 0; ///< Last osdmap epoch persisted
/**
* Peering state information
*/
int role = -1; ///< 0 = primary, 1 = replica, -1=none.
uint64_t state = 0; ///< PG_STATE_*
pg_shard_t primary; ///< id/shard of primary
pg_shard_t pg_whoami; ///< my id/shard
pg_shard_t up_primary; ///< id/shard of primary of up set
std::vector<int> up; ///< crush mapping without temp pgs
std::set<pg_shard_t> upset; ///< up in set form
std::vector<int> acting; ///< actual acting set for the current interval
std::set<pg_shard_t> actingset; ///< acting in set form
/// union of acting, recovery, and backfill targets
std::set<pg_shard_t> acting_recovery_backfill;
std::vector<HeartbeatStampsRef> hb_stamps;
ceph::signedspan readable_interval = ceph::signedspan::zero();
/// how long we can service reads in this interval
ceph::signedspan readable_until = ceph::signedspan::zero();
/// upper bound on any acting OSDs' readable_until in this interval
ceph::signedspan readable_until_ub = ceph::signedspan::zero();
/// upper bound from prior interval(s)
ceph::signedspan prior_readable_until_ub = ceph::signedspan::zero();
/// pg instances from prior interval(s) that may still be readable
std::set<int> prior_readable_down_osds;
/// [replica] upper bound we got from the primary (primary's clock)
ceph::signedspan readable_until_ub_from_primary = ceph::signedspan::zero();
/// [primary] last upper bound shared by primary to replicas
ceph::signedspan readable_until_ub_sent = ceph::signedspan::zero();
/// [primary] readable ub acked by acting set members
std::vector<ceph::signedspan> acting_readable_until_ub;
bool send_notify = false; ///< True if a notify needs to be sent to the primary
bool dirty_info = false; ///< small info structu on disk out of date
bool dirty_big_info = false; ///< big info structure on disk out of date
pg_info_t info; ///< current pg info
pg_info_t last_written_info; ///< last written info
PastIntervals past_intervals; ///< information about prior pg mappings
PGLog pg_log; ///< pg log
epoch_t last_peering_reset = 0; ///< epoch of last peering reset
/// last_update that has committed; ONLY DEFINED WHEN is_active()
eversion_t last_update_ondisk;
eversion_t last_complete_ondisk; ///< last_complete that has committed.
eversion_t last_update_applied; ///< last_update readable
/// last version to which rollback_info trimming has been applied
eversion_t last_rollback_info_trimmed_to_applied;
/// Counter to determine when pending flushes have completed
unsigned flushes_in_progress = 0;
/**
* Primary state
*/
std::set<pg_shard_t> stray_set; ///< non-acting osds that have PG data.
std::map<pg_shard_t, pg_info_t> peer_info; ///< info from peers (stray or prior)
std::map<pg_shard_t, int64_t> peer_bytes; ///< Peer's num_bytes from peer_info
std::set<pg_shard_t> peer_purged; ///< peers purged
std::map<pg_shard_t, pg_missing_t> peer_missing; ///< peer missing sets
std::set<pg_shard_t> peer_log_requested; ///< logs i've requested (and start stamps)
std::set<pg_shard_t> peer_missing_requested; ///< missing sets requested
/// features supported by all peers
uint64_t peer_features = CEPH_FEATURES_SUPPORTED_DEFAULT;
/// features supported by acting set
uint64_t acting_features = CEPH_FEATURES_SUPPORTED_DEFAULT;
/// features supported by up and acting
uint64_t upacting_features = CEPH_FEATURES_SUPPORTED_DEFAULT;
/// most recently consumed osdmap's require_osd_version
ceph_release_t last_require_osd_release;
std::vector<int> want_acting; ///< non-empty while peering needs a new acting set
// acting_recovery_backfill contains shards that are acting,
// async recovery targets, or backfill targets.
std::map<pg_shard_t,eversion_t> peer_last_complete_ondisk;
/// up: min over last_complete_ondisk, peer_last_complete_ondisk
eversion_t min_last_complete_ondisk;
/// point to which the log should be trimmed
eversion_t pg_trim_to;
std::set<int> blocked_by; ///< osds we are blocked by (for pg stats)
bool need_up_thru = false; ///< true if osdmap with updated up_thru needed
/// I deleted these strays; ignore racing PGInfo from them
std::set<pg_shard_t> peer_activated;
std::set<pg_shard_t> backfill_targets; ///< osds to be backfilled
std::set<pg_shard_t> async_recovery_targets; ///< osds to be async recovered
/// osds which might have objects on them which are unfound on the primary
std::set<pg_shard_t> might_have_unfound;
bool deleting = false; /// true while in removing or OSD is shutting down
std::atomic<bool> deleted = {false}; /// true once deletion complete
MissingLoc missing_loc; ///< information about missing objects
bool backfill_reserved = false;
bool backfill_reserving = false;
PeeringMachine machine;
void update_osdmap_ref(OSDMapRef newmap) {
osdmap_ref = std::move(newmap);
}
void update_heartbeat_peers();
void query_unfound(Formatter *f, std::string state);
bool proc_replica_info(
pg_shard_t from, const pg_info_t &oinfo, epoch_t send_epoch);
void remove_down_peer_info(const OSDMapRef &osdmap);
void check_recovery_sources(const OSDMapRef& map);
void set_last_peering_reset();
void check_full_transition(OSDMapRef lastmap, OSDMapRef osdmap);
bool should_restart_peering(
int newupprimary,
int newactingprimary,
const std::vector<int>& newup,
const std::vector<int>& newacting,
OSDMapRef lastmap,
OSDMapRef osdmap);
void start_peering_interval(
const OSDMapRef lastmap,
const std::vector<int>& newup, int up_primary,
const std::vector<int>& newacting, int acting_primary,
ObjectStore::Transaction &t);
void on_new_interval();
void clear_recovery_state();
void clear_primary_state();
void check_past_interval_bounds() const;
bool set_force_recovery(bool b);
bool set_force_backfill(bool b);
/// clip calculated priority to reasonable range
int clamp_recovery_priority(int prio, int pool_recovery_prio, int max);
/// get log recovery reservation priority
unsigned get_recovery_priority();
/// get backfill reservation priority
unsigned get_backfill_priority();
/// get priority for pg deletion
unsigned get_delete_priority();
public:
/**
* recovery_msg_priority_t
*
* Defines priority values for use with recovery messages. The values are
* chosen to be reasonable for wpq during an upgrade scenarios, but are
* actually translated into a class in PGRecoveryMsg::get_scheduler_class()
*/
enum recovery_msg_priority_t : int {
FORCED = 20,
UNDERSIZED = 15,
DEGRADED = 10,
BEST_EFFORT = 5
};
/// get message priority for recovery messages
int get_recovery_op_priority() const {
if (cct->_conf->osd_op_queue == "mclock_scheduler") {
/* For mclock, we use special priority values which will be
* translated into op classes within PGRecoveryMsg::get_scheduler_class
*/
if (is_forced_recovery_or_backfill()) {
return recovery_msg_priority_t::FORCED;
} else if (is_undersized()) {
return recovery_msg_priority_t::UNDERSIZED;
} else if (is_degraded()) {
return recovery_msg_priority_t::DEGRADED;
} else {
return recovery_msg_priority_t::BEST_EFFORT;
}
} else {
/* For WeightedPriorityQueue, we use pool or osd config settings to
* statically set the priority for recovery messages. This special
* handling should probably be removed after Reef */
int64_t pri = 0;
pool.info.opts.get(pool_opts_t::RECOVERY_OP_PRIORITY, &pri);
return pri > 0 ? pri : cct->_conf->osd_recovery_op_priority;
}
}
private:
bool check_prior_readable_down_osds(const OSDMapRef& map);
bool adjust_need_up_thru(const OSDMapRef osdmap);
PastIntervals::PriorSet build_prior();
void reject_reservation();
// acting std::set
std::map<pg_shard_t, pg_info_t>::const_iterator find_best_info(
const std::map<pg_shard_t, pg_info_t> &infos,
bool restrict_to_up_acting,
bool *history_les_bound) const;
static void calc_ec_acting(
std::map<pg_shard_t, pg_info_t>::const_iterator auth_log_shard,
unsigned size,
const std::vector<int> &acting,
const std::vector<int> &up,
const std::map<pg_shard_t, pg_info_t> &all_info,
bool restrict_to_up_acting,
std::vector<int> *want,
std::set<pg_shard_t> *backfill,
std::set<pg_shard_t> *acting_backfill,
std::ostream &ss);
static std::pair<std::map<pg_shard_t, pg_info_t>::const_iterator, eversion_t>
select_replicated_primary(
std::map<pg_shard_t, pg_info_t>::const_iterator auth_log_shard,
uint64_t force_auth_primary_missing_objects,
const std::vector<int> &up,
pg_shard_t up_primary,
const std::map<pg_shard_t, pg_info_t> &all_info,
const OSDMapRef osdmap,
std::ostream &ss);
static void calc_replicated_acting(
std::map<pg_shard_t, pg_info_t>::const_iterator primary_shard,
eversion_t oldest_auth_log_entry,
unsigned size,
const std::vector<int> &acting,
const std::vector<int> &up,
pg_shard_t up_primary,
const std::map<pg_shard_t, pg_info_t> &all_info,
bool restrict_to_up_acting,
std::vector<int> *want,
std::set<pg_shard_t> *backfill,
std::set<pg_shard_t> *acting_backfill,
const OSDMapRef osdmap,
const PGPool& pool,
std::ostream &ss);
static void calc_replicated_acting_stretch(
std::map<pg_shard_t, pg_info_t>::const_iterator primary_shard,
eversion_t oldest_auth_log_entry,
unsigned size,
const std::vector<int> &acting,
const std::vector<int> &up,
pg_shard_t up_primary,
const std::map<pg_shard_t, pg_info_t> &all_info,
bool restrict_to_up_acting,
std::vector<int> *want,
std::set<pg_shard_t> *backfill,
std::set<pg_shard_t> *acting_backfill,
const OSDMapRef osdmap,
const PGPool& pool,
std::ostream &ss);
void choose_async_recovery_ec(
const std::map<pg_shard_t, pg_info_t> &all_info,
const pg_info_t &auth_info,
std::vector<int> *want,
std::set<pg_shard_t> *async_recovery,
const OSDMapRef osdmap) const;
void choose_async_recovery_replicated(
const std::map<pg_shard_t, pg_info_t> &all_info,
const pg_info_t &auth_info,
std::vector<int> *want,
std::set<pg_shard_t> *async_recovery,
const OSDMapRef osdmap) const;
bool recoverable(const std::vector<int> &want) const;
bool choose_acting(pg_shard_t &auth_log_shard,
bool restrict_to_up_acting,
bool *history_les_bound,
bool request_pg_temp_change_only = false);
bool search_for_missing(
const pg_info_t &oinfo, const pg_missing_t &omissing,
pg_shard_t fromosd,
PeeringCtxWrapper &rctx);
void build_might_have_unfound();
void log_weirdness();
void activate(
ObjectStore::Transaction& t,
epoch_t activation_epoch,
PeeringCtxWrapper &ctx);
void rewind_divergent_log(ObjectStore::Transaction& t, eversion_t newhead);
void merge_log(
ObjectStore::Transaction& t, pg_info_t &oinfo,
pg_log_t&& olog, pg_shard_t from);
void proc_primary_info(ObjectStore::Transaction &t, const pg_info_t &info);
void proc_master_log(ObjectStore::Transaction& t, pg_info_t &oinfo,
pg_log_t&& olog, pg_missing_t&& omissing,
pg_shard_t from);
void proc_replica_log(pg_info_t &oinfo, const pg_log_t &olog,
pg_missing_t&& omissing, pg_shard_t from);
void calc_min_last_complete_ondisk();
void fulfill_info(
pg_shard_t from, const pg_query_t &query,
std::pair<pg_shard_t, pg_info_t> ¬ify_info);
void fulfill_log(
pg_shard_t from, const pg_query_t &query, epoch_t query_epoch);
void fulfill_query(const MQuery& q, PeeringCtxWrapper &rctx);
void try_mark_clean();
void update_blocked_by();
void update_calc_stats();
void add_log_entry(const pg_log_entry_t& e, bool applied);
void calc_trim_to();
void calc_trim_to_aggressive();
public:
PeeringState(
CephContext *cct,
pg_shard_t pg_whoami,
spg_t spgid,
const PGPool &pool,
OSDMapRef curmap,
DoutPrefixProvider *dpp,
PeeringListener *pl);
/// Process evt
void handle_event(const boost::statechart::event_base &evt,
PeeringCtx *rctx) {
start_handle(rctx);
machine.process_event(evt);
end_handle();
}
/// Process evt
void handle_event(PGPeeringEventRef evt,
PeeringCtx *rctx) {
start_handle(rctx);
machine.process_event(evt->get_event());
end_handle();
}
/// Init fresh instance of PG
void init(
int role,
const std::vector<int>& newup, int new_up_primary,
const std::vector<int>& newacting, int new_acting_primary,
const pg_history_t& history,
const PastIntervals& pi,
ObjectStore::Transaction &t);
/// Init pg instance from disk state
template <typename F>
auto init_from_disk_state(
pg_info_t &&info_from_disk,
PastIntervals &&past_intervals_from_disk,
F &&pg_log_init) {
info = std::move(info_from_disk);
last_written_info = info;
past_intervals = std::move(past_intervals_from_disk);
auto ret = pg_log_init(pg_log);
log_weirdness();
return ret;
}
/// Std::set initial primary/acting
void init_primary_up_acting(
const std::vector<int> &newup,
const std::vector<int> &newacting,
int new_up_primary,
int new_acting_primary);
void init_hb_stamps();
/// Std::set initial role
void set_role(int r) {
role = r;
}
/// Std::set predicates used for determining readable and recoverable
void set_backend_predicates(
IsPGReadablePredicate *is_readable,
IsPGRecoverablePredicate *is_recoverable) {
missing_loc.set_backend_predicates(is_readable, is_recoverable);
}
/// Send current pg_info to peers
void share_pg_info();
/// Get stats for child pgs
void start_split_stats(
const std::set<spg_t>& childpgs, std::vector<object_stat_sum_t> *out);
/// Update new child with stats
void finish_split_stats(
const object_stat_sum_t& stats, ObjectStore::Transaction &t);
/// Split state for child_pgid into *child
void split_into(
pg_t child_pgid, PeeringState *child, unsigned split_bits);
/// Merge state from sources
void merge_from(
std::map<spg_t,PeeringState *>& sources,
PeeringCtx &rctx,
unsigned split_bits,
const pg_merge_meta_t& last_pg_merge_meta);
/// Permit stray replicas to purge now unnecessary state
void purge_strays();
/**
* update_stats
*
* Mechanism for updating stats and/or history. Pass t to mark
* dirty and write out. Return true if stats should be published
* to the osd.
*/
void update_stats(
std::function<bool(pg_history_t &, pg_stat_t &)> f,
ObjectStore::Transaction *t = nullptr);
void update_stats_wo_resched(
std::function<void(pg_history_t &, pg_stat_t &)> f);
/**
* adjust_purged_snaps
*
* Mechanism for updating purged_snaps. Marks dirty_info, big_dirty_info.
*/
void adjust_purged_snaps(
std::function<void(interval_set<snapid_t> &snaps)> f);
/// Updates info.hit_set to hset_history, does not dirty
void update_hset(const pg_hit_set_history_t &hset_history);
/// Get all pg_shards that needs recovery
std::vector<pg_shard_t> get_replica_recovery_order() const;
/**
* update_history
*
* Merges new_history into info.history clearing past_intervals and
* dirtying as needed.
*
* Calls PeeringListener::on_info_history_change()
*/
void update_history(const pg_history_t& new_history);
/**
* prepare_stats_for_publish
*
* Returns updated pg_stat_t if stats have changed since
* pg_stats_publish adding in unstable_stats.
*
* @param pg_stats_publish the latest pg_stat possessed by caller
* @param unstable_stats additional stats which should be included in the
* returned stats
* @return the up to date stats if it is different from the specfied
* @c pg_stats_publish
*/
std::optional<pg_stat_t> prepare_stats_for_publish(
const std::optional<pg_stat_t> &pg_stats_publish,
const object_stat_collection_t &unstable_stats);
/**
* Merge entries updating missing as necessary on all
* acting_recovery_backfill logs and missings (also missing_loc)
*/
bool append_log_entries_update_missing(
const mempool::osd_pglog::list<pg_log_entry_t> &entries,
ObjectStore::Transaction &t,
std::optional<eversion_t> trim_to,
std::optional<eversion_t> roll_forward_to);
void append_log_with_trim_to_updated(
std::vector<pg_log_entry_t>&& log_entries,
eversion_t roll_forward_to,
ObjectStore::Transaction &t,
bool transaction_applied,
bool async) {
update_trim_to();
append_log(std::move(log_entries), pg_trim_to, roll_forward_to,
min_last_complete_ondisk, t, transaction_applied, async);
}
/**
* Updates local log to reflect new write from primary.
*/
void append_log(
std::vector<pg_log_entry_t>&& logv,
eversion_t trim_to,
eversion_t roll_forward_to,
eversion_t min_last_complete_ondisk,
ObjectStore::Transaction &t,
bool transaction_applied,
bool async);
/**
* retrieve the min last_backfill among backfill targets
*/
hobject_t earliest_backfill() const;
/**
* Updates local log/missing to reflect new oob log update from primary
*/
void merge_new_log_entries(
const mempool::osd_pglog::list<pg_log_entry_t> &entries,
ObjectStore::Transaction &t,
std::optional<eversion_t> trim_to,
std::optional<eversion_t> roll_forward_to);
/// Update missing set to reflect e (TODOSAM: not sure why this is needed)
void add_local_next_event(const pg_log_entry_t& e) {
pg_log.missing_add_next_entry(e);
}
/// Update log trim boundary
void update_trim_to() {
bool hard_limit = (get_osdmap()->test_flag(CEPH_OSDMAP_PGLOG_HARDLIMIT));
if (hard_limit)
calc_trim_to_aggressive();
else
calc_trim_to();
}
/// Pre-process pending update on hoid represented by logv
void pre_submit_op(
const hobject_t &hoid,
const std::vector<pg_log_entry_t>& logv,
eversion_t at_version);
/// Signal that oid has been locally recovered to version v
void recover_got(
const hobject_t &oid, eversion_t v,
bool is_delete,
ObjectStore::Transaction &t);
/// Signal that oid has been recovered on peer to version
void on_peer_recover(
pg_shard_t peer,
const hobject_t &soid,
const eversion_t &version);
/// Notify that soid is being recovered on peer
void begin_peer_recover(
pg_shard_t peer,
const hobject_t soid);
/// Pull missing sets from all candidate peers
bool discover_all_missing(
BufferedRecoveryMessages &rctx);
/// Notify that hoid has been fully recocovered
void object_recovered(
const hobject_t &hoid,
const object_stat_sum_t &stat_diff) {
info.stats.stats.sum.add(stat_diff);
missing_loc.recovered(hoid);
}
/// Update info/stats to reflect backfill progress
void update_backfill_progress(
const hobject_t &updated_backfill,
const pg_stat_t &updated_stats,
bool preserve_local_num_bytes,
ObjectStore::Transaction &t);
/// Update info/stats to reflect completed backfill on hoid
void update_complete_backfill_object_stats(
const hobject_t &hoid,
const pg_stat_t &stats);
/// Update last_backfill for peer to new_last_backfill
void update_peer_last_backfill(
pg_shard_t peer,
const hobject_t &new_last_backfill);
/// Update info.stats with delta_stats for operation on soid
void apply_op_stats(
const hobject_t &soid,
const object_stat_sum_t &delta_stats);
/**
* force_object_missing
*
* Force oid on peer to be missing at version. If the object does not
* currently need recovery, either candidates if provided or the remainder
* of the acting std::set will be deemed to have the object.
*/
void force_object_missing(
const pg_shard_t &peer,
const hobject_t &oid,
eversion_t version) {
force_object_missing(std::set<pg_shard_t>{peer}, oid, version);
}
void force_object_missing(
const std::set<pg_shard_t> &peer,
const hobject_t &oid,
eversion_t version);
/// Update state prior to backfilling soid on targets
void prepare_backfill_for_missing(
const hobject_t &soid,
const eversion_t &version,
const std::vector<pg_shard_t> &targets);
/// Std::set targets with the right version for revert (see recover_primary)
void set_revert_with_targets(
const hobject_t &soid,
const std::set<pg_shard_t> &good_peers);
/// Update lcod for fromosd
void update_peer_last_complete_ondisk(
pg_shard_t fromosd,
eversion_t lcod);
/// Update lcod
void update_last_complete_ondisk(
eversion_t lcod);
/// Update state to reflect recovery up to version
void recovery_committed_to(eversion_t version);
/// Mark recovery complete
void local_recovery_complete() {
info.last_complete = info.last_update;
}
/// Update last_requested pointer to v
void set_last_requested(version_t v) {
pg_log.set_last_requested(v);
}
/// Write dirty state to t
void write_if_dirty(ObjectStore::Transaction& t);
/// Mark write completed to v with persisted lc
void complete_write(eversion_t v, eversion_t lc);
/// Update local write applied pointer
void local_write_applied(eversion_t v) {
last_update_applied = v;
}
/// Updates peering state with new map
void advance_map(
OSDMapRef osdmap, ///< [in] new osdmap
OSDMapRef lastmap, ///< [in] prev osdmap
std::vector<int>& newup, ///< [in] new up set
int up_primary, ///< [in] new up primary
std::vector<int>& newacting, ///< [in] new acting
int acting_primary, ///< [in] new acting primary
PeeringCtx &rctx ///< [out] recovery context
);
/// Activates most recently updated map
void activate_map(
PeeringCtx &rctx ///< [out] recovery context
);
/// resets last_persisted_osdmap
void reset_last_persisted() {
last_persisted_osdmap = 0;
dirty_info = true;
dirty_big_info = true;
}
/// Signal shutdown beginning
void shutdown() {
deleting = true;
}
/// Signal shutdown complete
void set_delete_complete() {
deleted = true;
}
/// Dirty info and write out
void force_write_state(ObjectStore::Transaction &t) {
dirty_info = true;
dirty_big_info = true;
write_if_dirty(t);
}
/// Get current interval's readable_until
ceph::signedspan get_readable_until() const {
return readable_until;
}
/// Get prior intervals' readable_until upper bound
ceph::signedspan get_prior_readable_until_ub() const {
return prior_readable_until_ub;
}
/// Get prior intervals' readable_until down OSDs of note
const std::set<int>& get_prior_readable_down_osds() const {
return prior_readable_down_osds;
}
/// Reset prior intervals' readable_until upper bound (e.g., bc it passed)
void clear_prior_readable_until_ub() {
prior_readable_until_ub = ceph::signedspan::zero();
prior_readable_down_osds.clear();
info.history.prior_readable_until_ub = ceph::signedspan::zero();
}
void renew_lease(ceph::signedspan now) {
bool was_min = (readable_until_ub == readable_until);
readable_until_ub_sent = now + readable_interval;
if (was_min) {
recalc_readable_until();
}
}
void send_lease();
void schedule_renew_lease();
pg_lease_t get_lease() {
return pg_lease_t(readable_until, readable_until_ub_sent, readable_interval);
}
void proc_lease(const pg_lease_t& l);
void proc_lease_ack(int from, const pg_lease_ack_t& la);
void proc_renew_lease();
pg_lease_ack_t get_lease_ack() {
return pg_lease_ack_t(readable_until_ub_from_primary);
}
/// [primary] recalc readable_until[_ub] for the current interval
void recalc_readable_until();
//============================ const helpers ================================
const char *get_current_state() const {
return state_history.get_current_state();
}
epoch_t get_last_peering_reset() const {
return last_peering_reset;
}
eversion_t get_last_rollback_info_trimmed_to_applied() const {
return last_rollback_info_trimmed_to_applied;
}
/// Returns stable reference to internal pool structure
const PGPool &get_pgpool() const {
return pool;
}
/// Returns reference to current osdmap
const OSDMapRef &get_osdmap() const {
ceph_assert(osdmap_ref);
return osdmap_ref;
}
/// Returns epoch of current osdmap
epoch_t get_osdmap_epoch() const {
return get_osdmap()->get_epoch();
}
bool is_ec_pg() const override {
return pool.info.is_erasure();
}
int get_pg_size() const override {
return pool.info.size;
}
bool is_deleting() const {
return deleting;
}
bool is_deleted() const {
return deleted;
}
const std::set<pg_shard_t> &get_upset() const override {
return upset;
}
bool is_acting_recovery_backfill(pg_shard_t osd) const {
return acting_recovery_backfill.count(osd);
}
bool is_acting(pg_shard_t osd) const {
return has_shard(pool.info.is_erasure(), acting, osd);
}
bool is_up(pg_shard_t osd) const {
return has_shard(pool.info.is_erasure(), up, osd);
}
static bool has_shard(bool ec, const std::vector<int>& v, pg_shard_t osd) {
if (ec) {
return v.size() > (unsigned)osd.shard && v[osd.shard] == osd.osd;
} else {
return std::find(v.begin(), v.end(), osd.osd) != v.end();
}
}
const PastIntervals& get_past_intervals() const {
return past_intervals;
}
/// acting osd that is not the primary
bool is_nonprimary() const {
return role >= 0 && pg_whoami != primary;
}
/// primary osd
bool is_primary() const {
return pg_whoami == primary;
}
bool pg_has_reset_since(epoch_t e) const {
return deleted || e < get_last_peering_reset();
}
int get_role() const {
return role;
}
const std::vector<int> &get_acting() const {
return acting;
}
const std::set<pg_shard_t> &get_actingset() const {
return actingset;
}
int get_acting_primary() const {
return primary.osd;
}
pg_shard_t get_primary() const {
return primary;
}
const std::vector<int> &get_up() const {
return up;
}
int get_up_primary() const {
return up_primary.osd;
}
bool is_backfill_target(pg_shard_t osd) const {
return backfill_targets.count(osd);
}
const std::set<pg_shard_t> &get_backfill_targets() const {
return backfill_targets;
}
bool is_async_recovery_target(pg_shard_t peer) const {
return async_recovery_targets.count(peer);
}
const std::set<pg_shard_t> &get_async_recovery_targets() const {
return async_recovery_targets;
}
const std::set<pg_shard_t> &get_acting_recovery_backfill() const {
return acting_recovery_backfill;
}
const PGLog &get_pg_log() const {
return pg_log;
}
bool state_test(uint64_t m) const { return (state & m) != 0; }
void state_set(uint64_t m) { state |= m; }
void state_clear(uint64_t m) { state &= ~m; }
bool is_complete() const { return info.last_complete == info.last_update; }
bool should_send_notify() const { return send_notify; }
uint64_t get_state() const { return state; }
bool is_active() const { return state_test(PG_STATE_ACTIVE); }
bool is_activating() const { return state_test(PG_STATE_ACTIVATING); }
bool is_peering() const { return state_test(PG_STATE_PEERING); }
bool is_down() const { return state_test(PG_STATE_DOWN); }
bool is_recovery_unfound() const {
return state_test(PG_STATE_RECOVERY_UNFOUND);
}
bool is_backfilling() const {
return state_test(PG_STATE_BACKFILLING);
}
bool is_backfill_unfound() const {
return state_test(PG_STATE_BACKFILL_UNFOUND);
}
bool is_incomplete() const { return state_test(PG_STATE_INCOMPLETE); }
bool is_clean() const { return state_test(PG_STATE_CLEAN); }
bool is_degraded() const { return state_test(PG_STATE_DEGRADED); }
bool is_undersized() const { return state_test(PG_STATE_UNDERSIZED); }
bool is_remapped() const { return state_test(PG_STATE_REMAPPED); }
bool is_peered() const {
return state_test(PG_STATE_ACTIVE) || state_test(PG_STATE_PEERED);
}
bool is_recovering() const { return state_test(PG_STATE_RECOVERING); }
bool is_premerge() const { return state_test(PG_STATE_PREMERGE); }
bool is_repair() const { return state_test(PG_STATE_REPAIR); }
bool is_empty() const { return info.last_update == eversion_t(0,0); }
bool get_need_up_thru() const {
return need_up_thru;
}
bool is_forced_recovery_or_backfill() const {
return get_state() & (PG_STATE_FORCED_RECOVERY | PG_STATE_FORCED_BACKFILL);
}
bool is_backfill_reserved() const {
return backfill_reserved;
}
bool is_backfill_reserving() const {
return backfill_reserving;
}
ceph_release_t get_last_require_osd_release() const {
return last_require_osd_release;
}
const pg_info_t &get_info() const {
return info;
}
const decltype(peer_info) &get_peer_info() const {
return peer_info;
}
const decltype(peer_missing) &get_peer_missing() const {
return peer_missing;
}
const pg_missing_const_i &get_peer_missing(const pg_shard_t &peer) const {
if (peer == pg_whoami) {
return pg_log.get_missing();
} else {
assert(peer_missing.count(peer));
return peer_missing.find(peer)->second;
}
}
const pg_info_t&get_peer_info(pg_shard_t peer) const {
assert(peer_info.count(peer));
return peer_info.find(peer)->second;
}
bool has_peer_info(pg_shard_t peer) const {
return peer_info.count(peer);
}
bool needs_recovery() const;
bool needs_backfill() const;
bool can_serve_replica_read(const hobject_t &hoid);
/**
* Returns whether the current acting set is able to go active
* and serve writes. It needs to satisfy min_size and any
* applicable stretch cluster constraints.
*/
bool acting_set_writeable() {
return (actingset.size() >= pool.info.min_size) &&
(pool.info.stretch_set_can_peer(acting, *get_osdmap(), NULL));
}
/**
* Returns whether all peers which might have unfound objects have been
* queried or marked lost.
*/
bool all_unfound_are_queried_or_lost(const OSDMapRef osdmap) const;
bool all_missing_unfound() const {
const auto& missing = pg_log.get_missing();
if (!missing.have_missing())
return false;
for (auto& m : missing.get_items()) {
if (!missing_loc.is_unfound(m.first))
return false;
}
return true;
}
bool perform_deletes_during_peering() const {
return !(get_osdmap()->test_flag(CEPH_OSDMAP_RECOVERY_DELETES));
}
bool have_unfound() const {
return missing_loc.have_unfound();
}
uint64_t get_num_unfound() const {
return missing_loc.num_unfound();
}
bool have_missing() const {
return pg_log.get_missing().num_missing() > 0;
}
unsigned int get_num_missing() const {
return pg_log.get_missing().num_missing();
}
const MissingLoc &get_missing_loc() const {
return missing_loc;
}
const MissingLoc::missing_by_count_t &get_missing_by_count() const {
return missing_loc.get_missing_by_count();
}
eversion_t get_min_last_complete_ondisk() const {
return min_last_complete_ondisk;
}
eversion_t get_pg_trim_to() const {
return pg_trim_to;
}
eversion_t get_last_update_applied() const {
return last_update_applied;
}
eversion_t get_last_update_ondisk() const {
return last_update_ondisk;
}
bool debug_has_dirty_state() const {
return dirty_info || dirty_big_info;
}
std::string get_pg_state_string() const {
return pg_state_string(state);
}
/// Dump representation of past_intervals to out
void print_past_intervals(std::ostream &out) const {
out << "[" << past_intervals.get_bounds()
<< ")/" << past_intervals.size();
}
void dump_history(ceph::Formatter *f) const {
state_history.dump(f);
}
/// Dump formatted peering status
void dump_peering_state(ceph::Formatter *f);
private:
/// Mask feature vector with feature set from new peer
void apply_peer_features(uint64_t f) { peer_features &= f; }
/// Reset feature vector to default
void reset_min_peer_features() {
peer_features = CEPH_FEATURES_SUPPORTED_DEFAULT;
}
public:
/// Get feature vector common to all known peers with this pg
uint64_t get_min_peer_features() const { return peer_features; }
/// Get feature vector common to acting set
uint64_t get_min_acting_features() const { return acting_features; }
/// Get feature vector common to up/acting set
uint64_t get_min_upacting_features() const { return upacting_features; }
// Flush control interface
private:
/**
* Start additional flush (blocks needs_flush/activation until
* complete_flush is called once for each start_flush call as
* required by start_flush_on_transaction).
*/
void start_flush(ObjectStore::Transaction &t) {
flushes_in_progress++;
pl->start_flush_on_transaction(t);
}
public:
/// True if there are outstanding flushes
bool needs_flush() const {
return flushes_in_progress > 0;
}
/// Must be called once per start_flush
void complete_flush();
friend std::ostream &operator<<(std::ostream &out, const PeeringState &ps);
};
std::ostream &operator<<(std::ostream &out, const PeeringState &ps);
| 85,953 | 33.589135 | 115 | h |
null | ceph-main/src/osd/PrimaryLogPG.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <[email protected]>
* Copyright (C) 2013,2014 Cloudwatt <[email protected]>
*
* Author: Loic Dachary <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include <errno.h>
#include <charconv>
#include <sstream>
#include <utility>
#include <boost/intrusive_ptr.hpp>
#include <boost/tuple/tuple.hpp>
#include "PrimaryLogPG.h"
#include "cls/cas/cls_cas_ops.h"
#include "common/CDC.h"
#include "common/EventTrace.h"
#include "common/ceph_crypto.h"
#include "common/config.h"
#include "common/errno.h"
#include "common/perf_counters.h"
#include "common/scrub_types.h"
#include "include/compat.h"
#include "json_spirit/json_spirit_reader.h"
#include "json_spirit/json_spirit_value.h"
#include "messages/MCommandReply.h"
#include "messages/MOSDBackoff.h"
#include "messages/MOSDOp.h"
#include "messages/MOSDPGBackfill.h"
#include "messages/MOSDPGBackfillRemove.h"
#include "messages/MOSDPGLog.h"
#include "messages/MOSDPGScan.h"
#include "messages/MOSDPGTrim.h"
#include "messages/MOSDPGUpdateLogMissing.h"
#include "messages/MOSDPGUpdateLogMissingReply.h"
#include "messages/MOSDRepScrub.h"
#include "messages/MOSDScrubReserve.h"
#include "mon/MonClient.h"
#include "objclass/objclass.h"
#include "osd/ClassHandler.h"
#include "osdc/Objecter.h"
#include "osd/scrubber/PrimaryLogScrub.h"
#include "osd/scrubber/ScrubStore.h"
#include "osd/scrubber/pg_scrubber.h"
#include "OSD.h"
#include "OpRequest.h"
#include "PG.h"
#include "Session.h"
// required includes order:
#include "json_spirit/json_spirit_value.h"
#include "json_spirit/json_spirit_reader.h"
#include "include/ceph_assert.h" // json_spirit clobbers it
#include "include/rados/rados_types.hpp"
#ifdef WITH_LTTNG
#include "tracing/osd.h"
#else
#define tracepoint(...)
#endif
#define dout_context cct
#define dout_subsys ceph_subsys_osd
#define DOUT_PREFIX_ARGS this, osd->whoami, get_osdmap()
#undef dout_prefix
#define dout_prefix _prefix(_dout, this)
#include "osd_tracer.h"
MEMPOOL_DEFINE_OBJECT_FACTORY(PrimaryLogPG, replicatedpg, osd);
using std::less;
using std::list;
using std::ostream;
using std::pair;
using std::make_pair;
using std::make_unique;
using std::map;
using std::ostringstream;
using std::set;
using std::string;
using std::string_view;
using std::stringstream;
using std::unique_ptr;
using std::vector;
using ceph::bufferlist;
using ceph::bufferptr;
using ceph::Formatter;
using ceph::decode;
using ceph::decode_noclear;
using ceph::encode;
using ceph::encode_destructively;
using namespace ceph::osd::scheduler;
using TOPNSPC::common::cmd_getval;
using TOPNSPC::common::cmd_getval_or;
template <typename T>
static ostream& _prefix(std::ostream *_dout, T *pg) {
return pg->gen_prefix(*_dout);
}
/**
* The CopyCallback class defines an interface for completions to the
* copy_start code. Users of the copy infrastructure must implement
* one and give an instance of the class to start_copy.
*
* The implementer is responsible for making sure that the CopyCallback
* can associate itself with the correct copy operation.
*/
class PrimaryLogPG::CopyCallback : public GenContext<CopyCallbackResults> {
protected:
CopyCallback() {}
/**
* results.get<0>() is the return code: 0 for success; -ECANCELED if
* the operation was cancelled by the local OSD; -errno for other issues.
* results.get<1>() is a pointer to a CopyResults object, which you are
* responsible for deleting.
*/
void finish(CopyCallbackResults results_) override = 0;
public:
/// Provide the final size of the copied object to the CopyCallback
~CopyCallback() override {}
};
template <typename T>
class PrimaryLogPG::BlessedGenContext : public GenContext<T> {
PrimaryLogPGRef pg;
unique_ptr<GenContext<T>> c;
epoch_t e;
public:
BlessedGenContext(PrimaryLogPG *pg, GenContext<T> *c, epoch_t e)
: pg(pg), c(c), e(e) {}
void finish(T t) override {
std::scoped_lock locker{*pg};
if (pg->pg_has_reset_since(e))
c.reset();
else
c.release()->complete(t);
}
bool sync_finish(T t) {
// we assume here all blessed/wrapped Contexts can complete synchronously.
c.release()->complete(t);
return true;
}
};
GenContext<ThreadPool::TPHandle&> *PrimaryLogPG::bless_gencontext(
GenContext<ThreadPool::TPHandle&> *c) {
return new BlessedGenContext<ThreadPool::TPHandle&>(
this, c, get_osdmap_epoch());
}
template <typename T>
class PrimaryLogPG::UnlockedBlessedGenContext : public GenContext<T> {
PrimaryLogPGRef pg;
unique_ptr<GenContext<T>> c;
epoch_t e;
public:
UnlockedBlessedGenContext(PrimaryLogPG *pg, GenContext<T> *c, epoch_t e)
: pg(pg), c(c), e(e) {}
void finish(T t) override {
if (pg->pg_has_reset_since(e))
c.reset();
else
c.release()->complete(t);
}
bool sync_finish(T t) {
// we assume here all blessed/wrapped Contexts can complete synchronously.
c.release()->complete(t);
return true;
}
};
GenContext<ThreadPool::TPHandle&> *PrimaryLogPG::bless_unlocked_gencontext(
GenContext<ThreadPool::TPHandle&> *c) {
return new UnlockedBlessedGenContext<ThreadPool::TPHandle&>(
this, c, get_osdmap_epoch());
}
class PrimaryLogPG::BlessedContext : public Context {
PrimaryLogPGRef pg;
unique_ptr<Context> c;
epoch_t e;
public:
BlessedContext(PrimaryLogPG *pg, Context *c, epoch_t e)
: pg(pg), c(c), e(e) {}
void finish(int r) override {
std::scoped_lock locker{*pg};
if (pg->pg_has_reset_since(e))
c.reset();
else
c.release()->complete(r);
}
bool sync_finish(int r) override {
// we assume here all blessed/wrapped Contexts can complete synchronously.
c.release()->complete(r);
return true;
}
};
Context *PrimaryLogPG::bless_context(Context *c) {
return new BlessedContext(this, c, get_osdmap_epoch());
}
class PrimaryLogPG::C_PG_ObjectContext : public Context {
PrimaryLogPGRef pg;
ObjectContext *obc;
public:
C_PG_ObjectContext(PrimaryLogPG *p, ObjectContext *o) :
pg(p), obc(o) {}
void finish(int r) override {
pg->object_context_destructor_callback(obc);
}
};
struct OnReadComplete : public Context {
PrimaryLogPG *pg;
PrimaryLogPG::OpContext *opcontext;
OnReadComplete(
PrimaryLogPG *pg,
PrimaryLogPG::OpContext *ctx) : pg(pg), opcontext(ctx) {}
void finish(int r) override {
opcontext->finish_read(pg);
}
~OnReadComplete() override {}
};
class PrimaryLogPG::C_OSD_AppliedRecoveredObject : public Context {
PrimaryLogPGRef pg;
ObjectContextRef obc;
public:
C_OSD_AppliedRecoveredObject(PrimaryLogPG *p, ObjectContextRef o) :
pg(p), obc(o) {}
bool sync_finish(int r) override {
pg->_applied_recovered_object(obc);
return true;
}
void finish(int r) override {
std::scoped_lock locker{*pg};
pg->_applied_recovered_object(obc);
}
};
class PrimaryLogPG::C_OSD_CommittedPushedObject : public Context {
PrimaryLogPGRef pg;
epoch_t epoch;
eversion_t last_complete;
public:
C_OSD_CommittedPushedObject(
PrimaryLogPG *p, epoch_t epoch, eversion_t lc) :
pg(p), epoch(epoch), last_complete(lc) {
}
void finish(int r) override {
pg->_committed_pushed_object(epoch, last_complete);
}
};
class PrimaryLogPG::C_OSD_AppliedRecoveredObjectReplica : public Context {
PrimaryLogPGRef pg;
public:
explicit C_OSD_AppliedRecoveredObjectReplica(PrimaryLogPG *p) :
pg(p) {}
bool sync_finish(int r) override {
pg->_applied_recovered_object_replica();
return true;
}
void finish(int r) override {
std::scoped_lock locker{*pg};
pg->_applied_recovered_object_replica();
}
};
// OpContext
void PrimaryLogPG::OpContext::start_async_reads(PrimaryLogPG *pg)
{
inflightreads = 1;
list<pair<boost::tuple<uint64_t, uint64_t, unsigned>,
pair<bufferlist*, Context*> > > in;
in.swap(pending_async_reads);
pg->pgbackend->objects_read_async(
obc->obs.oi.soid,
in,
new OnReadComplete(pg, this), pg->get_pool().fast_read);
}
void PrimaryLogPG::OpContext::finish_read(PrimaryLogPG *pg)
{
ceph_assert(inflightreads > 0);
--inflightreads;
if (async_reads_complete()) {
ceph_assert(pg->in_progress_async_reads.size());
ceph_assert(pg->in_progress_async_reads.front().second == this);
pg->in_progress_async_reads.pop_front();
// Restart the op context now that all reads have been
// completed. Read failures will be handled by the op finisher
pg->execute_ctx(this);
}
}
class CopyFromCallback : public PrimaryLogPG::CopyCallback {
public:
PrimaryLogPG::CopyResults *results = nullptr;
PrimaryLogPG::OpContext *ctx;
OSDOp &osd_op;
uint32_t truncate_seq;
uint64_t truncate_size;
bool have_truncate = false;
CopyFromCallback(PrimaryLogPG::OpContext *ctx, OSDOp &osd_op)
: ctx(ctx), osd_op(osd_op) {
}
~CopyFromCallback() override {}
void finish(PrimaryLogPG::CopyCallbackResults results_) override {
results = results_.get<1>();
int r = results_.get<0>();
// Only use truncate_{seq,size} from the original object if the client
// did not sent us these parameters
if (!have_truncate) {
truncate_seq = results->truncate_seq;
truncate_size = results->truncate_size;
}
// for finish_copyfrom
ctx->user_at_version = results->user_version;
if (r >= 0) {
ctx->pg->execute_ctx(ctx);
} else {
if (r != -ECANCELED) { // on cancel just toss it out; client resends
if (ctx->op)
ctx->pg->osd->reply_op_error(ctx->op, r);
} else if (results->should_requeue) {
if (ctx->op)
ctx->pg->requeue_op(ctx->op);
}
ctx->pg->close_op_ctx(ctx);
}
}
bool is_temp_obj_used() {
return results->started_temp_obj;
}
uint64_t get_data_size() {
return results->object_size;
}
void set_truncate(uint32_t seq, uint64_t size) {
truncate_seq = seq;
truncate_size = size;
have_truncate = true;
}
};
struct CopyFromFinisher : public PrimaryLogPG::OpFinisher {
CopyFromCallback *copy_from_callback;
explicit CopyFromFinisher(CopyFromCallback *copy_from_callback)
: copy_from_callback(copy_from_callback) {
}
int execute() override {
// instance will be destructed after this method completes
copy_from_callback->ctx->pg->finish_copyfrom(copy_from_callback);
return 0;
}
};
// ======================
// PGBackend::Listener
void PrimaryLogPG::on_local_recover(
const hobject_t &hoid,
const ObjectRecoveryInfo &_recovery_info,
ObjectContextRef obc,
bool is_delete,
ObjectStore::Transaction *t
)
{
dout(10) << __func__ << ": " << hoid << dendl;
ObjectRecoveryInfo recovery_info(_recovery_info);
clear_object_snap_mapping(t, hoid);
if (!is_delete && recovery_info.soid.is_snap()) {
OSDriver::OSTransaction _t(osdriver.get_transaction(t));
set<snapid_t> snaps;
dout(20) << " snapset " << recovery_info.ss << dendl;
auto p = recovery_info.ss.clone_snaps.find(hoid.snap);
if (p != recovery_info.ss.clone_snaps.end()) {
snaps.insert(p->second.begin(), p->second.end());
dout(20) << " snaps " << snaps << dendl;
snap_mapper.add_oid(
recovery_info.soid,
snaps,
&_t);
} else {
derr << __func__ << " " << hoid << " had no clone_snaps" << dendl;
}
}
if (!is_delete && recovery_state.get_pg_log().get_missing().is_missing(recovery_info.soid) &&
recovery_state.get_pg_log().get_missing().get_items().find(recovery_info.soid)->second.need > recovery_info.version) {
ceph_assert(is_primary());
const pg_log_entry_t *latest = recovery_state.get_pg_log().get_log().objects.find(recovery_info.soid)->second;
if (latest->op == pg_log_entry_t::LOST_REVERT &&
latest->reverting_to == recovery_info.version) {
dout(10) << " got old revert version " << recovery_info.version
<< " for " << *latest << dendl;
recovery_info.version = latest->version;
// update the attr to the revert event version
recovery_info.oi.prior_version = recovery_info.oi.version;
recovery_info.oi.version = latest->version;
bufferlist bl;
encode(recovery_info.oi, bl,
get_osdmap()->get_features(CEPH_ENTITY_TYPE_OSD, nullptr));
ceph_assert(!pool.info.is_erasure());
t->setattr(coll, ghobject_t(recovery_info.soid), OI_ATTR, bl);
if (obc)
obc->attr_cache[OI_ATTR] = bl;
}
}
// keep track of active pushes for scrub
++active_pushes;
recovery_state.recover_got(
recovery_info.soid,
recovery_info.version,
is_delete,
*t);
if (is_primary()) {
if (!is_delete) {
obc->obs.exists = true;
bool got = obc->get_recovery_read();
ceph_assert(got);
ceph_assert(recovering.count(obc->obs.oi.soid));
recovering[obc->obs.oi.soid] = obc;
obc->obs.oi = recovery_info.oi; // may have been updated above
}
t->register_on_applied(new C_OSD_AppliedRecoveredObject(this, obc));
publish_stats_to_osd();
release_backoffs(hoid);
if (!is_unreadable_object(hoid)) {
auto unreadable_object_entry = waiting_for_unreadable_object.find(hoid);
if (unreadable_object_entry != waiting_for_unreadable_object.end()) {
dout(20) << " kicking unreadable waiters on " << hoid << dendl;
requeue_ops(unreadable_object_entry->second);
waiting_for_unreadable_object.erase(unreadable_object_entry);
}
}
} else {
t->register_on_applied(
new C_OSD_AppliedRecoveredObjectReplica(this));
}
t->register_on_commit(
new C_OSD_CommittedPushedObject(
this,
get_osdmap_epoch(),
info.last_complete));
}
void PrimaryLogPG::on_global_recover(
const hobject_t &soid,
const object_stat_sum_t &stat_diff,
bool is_delete)
{
recovery_state.object_recovered(soid, stat_diff);
publish_stats_to_osd();
dout(10) << "pushed " << soid << " to all replicas" << dendl;
auto i = recovering.find(soid);
ceph_assert(i != recovering.end());
if (i->second && i->second->rwstate.recovery_read_marker) {
// recover missing won't have had an obc, but it gets filled in
// during on_local_recover
ceph_assert(i->second);
list<OpRequestRef> requeue_list;
i->second->drop_recovery_read(&requeue_list);
requeue_ops(requeue_list);
}
backfills_in_flight.erase(soid);
recovering.erase(i);
finish_recovery_op(soid);
release_backoffs(soid);
auto degraded_object_entry = waiting_for_degraded_object.find(soid);
if (degraded_object_entry != waiting_for_degraded_object.end()) {
dout(20) << " kicking degraded waiters on " << soid << dendl;
requeue_ops(degraded_object_entry->second);
waiting_for_degraded_object.erase(degraded_object_entry);
}
auto unreadable_object_entry = waiting_for_unreadable_object.find(soid);
if (unreadable_object_entry != waiting_for_unreadable_object.end()) {
dout(20) << " kicking unreadable waiters on " << soid << dendl;
requeue_ops(unreadable_object_entry->second);
waiting_for_unreadable_object.erase(unreadable_object_entry);
}
finish_degraded_object(soid);
}
void PrimaryLogPG::schedule_recovery_work(
GenContext<ThreadPool::TPHandle&> *c,
uint64_t cost)
{
osd->queue_recovery_context(
this, c, cost,
recovery_state.get_recovery_op_priority());
}
void PrimaryLogPG::replica_clear_repop_obc(
const vector<pg_log_entry_t> &logv,
ObjectStore::Transaction &t)
{
for (auto &&e: logv) {
/* Have to blast all clones, they share a snapset */
object_contexts.clear_range(
e.soid.get_object_boundary(), e.soid.get_head());
ceph_assert(
snapset_contexts.find(e.soid.get_head()) ==
snapset_contexts.end());
}
}
bool PrimaryLogPG::should_send_op(
pg_shard_t peer,
const hobject_t &hoid) {
if (peer == get_primary())
return true;
ceph_assert(recovery_state.has_peer_info(peer));
bool should_send =
hoid.pool != (int64_t)info.pgid.pool() ||
hoid <= last_backfill_started ||
hoid <= recovery_state.get_peer_info(peer).last_backfill;
if (!should_send) {
ceph_assert(is_backfill_target(peer));
dout(10) << __func__ << " issue_repop shipping empty opt to osd." << peer
<< ", object " << hoid
<< " beyond std::max(last_backfill_started "
<< ", peer_info[peer].last_backfill "
<< recovery_state.get_peer_info(peer).last_backfill
<< ")" << dendl;
return should_send;
}
if (is_async_recovery_target(peer) &&
recovery_state.get_peer_missing(peer).is_missing(hoid)) {
should_send = false;
dout(10) << __func__ << " issue_repop shipping empty opt to osd." << peer
<< ", object " << hoid
<< " which is pending recovery in async_recovery_targets" << dendl;
}
return should_send;
}
ConnectionRef PrimaryLogPG::get_con_osd_cluster(
int peer, epoch_t from_epoch)
{
return osd->get_con_osd_cluster(peer, from_epoch);
}
PerfCounters *PrimaryLogPG::get_logger()
{
return osd->logger;
}
// ====================
// missing objects
bool PrimaryLogPG::is_missing_object(const hobject_t& soid) const
{
return recovery_state.get_pg_log().get_missing().get_items().count(soid);
}
void PrimaryLogPG::maybe_kick_recovery(
const hobject_t &soid)
{
eversion_t v;
bool work_started = false;
if (!recovery_state.get_missing_loc().needs_recovery(soid, &v))
return;
map<hobject_t, ObjectContextRef>::const_iterator p = recovering.find(soid);
if (p != recovering.end()) {
dout(7) << "object " << soid << " v " << v << ", already recovering." << dendl;
} else if (recovery_state.get_missing_loc().is_unfound(soid)) {
dout(7) << "object " << soid << " v " << v << ", is unfound." << dendl;
} else {
dout(7) << "object " << soid << " v " << v << ", recovering." << dendl;
PGBackend::RecoveryHandle *h = pgbackend->open_recovery_op();
if (is_missing_object(soid)) {
recover_missing(soid, v, CEPH_MSG_PRIO_HIGH, h);
} else if (recovery_state.get_missing_loc().is_deleted(soid)) {
prep_object_replica_deletes(soid, v, h, &work_started);
} else {
prep_object_replica_pushes(soid, v, h, &work_started);
}
pgbackend->run_recovery_op(h, CEPH_MSG_PRIO_HIGH);
}
}
void PrimaryLogPG::wait_for_unreadable_object(
const hobject_t& soid, OpRequestRef op)
{
ceph_assert(is_unreadable_object(soid));
maybe_kick_recovery(soid);
waiting_for_unreadable_object[soid].push_back(op);
op->mark_delayed("waiting for missing object");
osd->logger->inc(l_osd_op_delayed_unreadable);
}
bool PrimaryLogPG::is_degraded_or_backfilling_object(const hobject_t& soid)
{
/* The conditions below may clear (on_local_recover, before we queue
* the transaction) before we actually requeue the degraded waiters
* in on_global_recover after the transaction completes.
*/
if (waiting_for_degraded_object.count(soid))
return true;
if (recovery_state.get_pg_log().get_missing().get_items().count(soid))
return true;
ceph_assert(!get_acting_recovery_backfill().empty());
for (set<pg_shard_t>::iterator i = get_acting_recovery_backfill().begin();
i != get_acting_recovery_backfill().end();
++i) {
if (*i == get_primary()) continue;
pg_shard_t peer = *i;
auto peer_missing_entry = recovery_state.get_peer_missing().find(peer);
// If an object is missing on an async_recovery_target, return false.
// This will not block the op and the object is async recovered later.
if (peer_missing_entry != recovery_state.get_peer_missing().end() &&
peer_missing_entry->second.get_items().count(soid)) {
if (is_async_recovery_target(peer))
continue;
else
return true;
}
// Object is degraded if after last_backfill AND
// we are backfilling it
if (is_backfill_target(peer) &&
recovery_state.get_peer_info(peer).last_backfill <= soid &&
last_backfill_started >= soid &&
backfills_in_flight.count(soid))
return true;
}
return false;
}
bool PrimaryLogPG::is_degraded_on_async_recovery_target(const hobject_t& soid)
{
for (auto &i: get_async_recovery_targets()) {
auto peer_missing_entry = recovery_state.get_peer_missing().find(i);
if (peer_missing_entry != recovery_state.get_peer_missing().end() &&
peer_missing_entry->second.get_items().count(soid)) {
dout(30) << __func__ << " " << soid << dendl;
return true;
}
}
return false;
}
void PrimaryLogPG::wait_for_degraded_object(const hobject_t& soid, OpRequestRef op)
{
ceph_assert(is_degraded_or_backfilling_object(soid) || is_degraded_on_async_recovery_target(soid));
maybe_kick_recovery(soid);
waiting_for_degraded_object[soid].push_back(op);
op->mark_delayed("waiting for degraded object");
osd->logger->inc(l_osd_op_delayed_degraded);
}
void PrimaryLogPG::block_write_on_full_cache(
const hobject_t& _oid, OpRequestRef op)
{
const hobject_t oid = _oid.get_head();
dout(20) << __func__ << ": blocking object " << oid
<< " on full cache" << dendl;
objects_blocked_on_cache_full.insert(oid);
waiting_for_cache_not_full.push_back(op);
op->mark_delayed("waiting for cache not full");
}
void PrimaryLogPG::block_for_clean(
const hobject_t& oid, OpRequestRef op)
{
dout(20) << __func__ << ": blocking object " << oid
<< " on primary repair" << dendl;
waiting_for_clean_to_primary_repair.push_back(op);
op->mark_delayed("waiting for clean to repair");
}
void PrimaryLogPG::block_write_on_snap_rollback(
const hobject_t& oid, ObjectContextRef obc, OpRequestRef op)
{
dout(20) << __func__ << ": blocking object " << oid.get_head()
<< " on snap promotion " << obc->obs.oi.soid << dendl;
// otherwise, we'd have blocked in do_op
ceph_assert(oid.is_head());
ceph_assert(objects_blocked_on_snap_promotion.count(oid) == 0);
/*
* We block the head object here.
*
* Let's assume that there is racing read When the head object is being rollbacked.
* Since the two different ops can trigger promote_object() with the same source,
* infinite loop happens by canceling ops each other.
* To avoid this, we block the head object during rollback.
* So, the racing read will be blocked until the rollback is completed.
* see also: https://tracker.ceph.com/issues/49726
*/
ObjectContextRef head_obc = get_object_context(oid, false);
head_obc->start_block();
objects_blocked_on_snap_promotion[oid] = obc;
wait_for_blocked_object(obc->obs.oi.soid, op);
}
void PrimaryLogPG::block_write_on_degraded_snap(
const hobject_t& snap, OpRequestRef op)
{
dout(20) << __func__ << ": blocking object " << snap.get_head()
<< " on degraded snap " << snap << dendl;
// otherwise, we'd have blocked in do_op
ceph_assert(objects_blocked_on_degraded_snap.count(snap.get_head()) == 0);
objects_blocked_on_degraded_snap[snap.get_head()] = snap.snap;
wait_for_degraded_object(snap, op);
}
bool PrimaryLogPG::maybe_await_blocked_head(
const hobject_t &hoid,
OpRequestRef op)
{
ObjectContextRef obc;
obc = object_contexts.lookup(hoid.get_head());
if (obc) {
if (obc->is_blocked()) {
wait_for_blocked_object(obc->obs.oi.soid, op);
return true;
} else {
return false;
}
}
return false;
}
void PrimaryLogPG::wait_for_blocked_object(const hobject_t& soid, OpRequestRef op)
{
dout(10) << __func__ << " " << soid << " " << *op->get_req() << dendl;
waiting_for_blocked_object[soid].push_back(op);
op->mark_delayed("waiting for blocked object");
}
void PrimaryLogPG::maybe_force_recovery()
{
// no force if not in degraded/recovery/backfill states
if (!is_degraded() &&
!state_test(PG_STATE_RECOVERING |
PG_STATE_RECOVERY_WAIT |
PG_STATE_BACKFILLING |
PG_STATE_BACKFILL_WAIT |
PG_STATE_BACKFILL_TOOFULL))
return;
if (recovery_state.get_pg_log().get_log().approx_size() <
cct->_conf->osd_max_pg_log_entries *
cct->_conf->osd_force_recovery_pg_log_entries_factor)
return;
// find the oldest missing object
version_t min_version = recovery_state.get_pg_log().get_log().head.version;
hobject_t soid;
if (!recovery_state.get_pg_log().get_missing().get_rmissing().empty()) {
min_version = recovery_state.get_pg_log().get_missing().get_rmissing().begin()->first;
soid = recovery_state.get_pg_log().get_missing().get_rmissing().begin()->second;
}
ceph_assert(!get_acting_recovery_backfill().empty());
for (set<pg_shard_t>::iterator it = get_acting_recovery_backfill().begin();
it != get_acting_recovery_backfill().end();
++it) {
if (*it == get_primary()) continue;
pg_shard_t peer = *it;
auto it_missing = recovery_state.get_peer_missing().find(peer);
if (it_missing != recovery_state.get_peer_missing().end() &&
!it_missing->second.get_rmissing().empty()) {
const auto& min_obj = recovery_state.get_peer_missing(peer).get_rmissing().begin();
dout(20) << __func__ << " peer " << peer << " min_version " << min_obj->first
<< " oid " << min_obj->second << dendl;
if (min_version > min_obj->first) {
min_version = min_obj->first;
soid = min_obj->second;
}
}
}
// recover it
if (soid != hobject_t())
maybe_kick_recovery(soid);
}
bool PrimaryLogPG::check_laggy(OpRequestRef& op)
{
assert(HAVE_FEATURE(recovery_state.get_min_upacting_features(),
SERVER_OCTOPUS));
if (state_test(PG_STATE_WAIT)) {
dout(10) << __func__ << " PG is WAIT state" << dendl;
} else if (!state_test(PG_STATE_LAGGY)) {
auto mnow = osd->get_mnow();
auto ru = recovery_state.get_readable_until();
if (mnow <= ru) {
// not laggy
return true;
}
dout(10) << __func__
<< " mnow " << mnow
<< " > readable_until " << ru << dendl;
if (!is_primary()) {
osd->reply_op_error(op, -EAGAIN);
return false;
}
// go to laggy state
state_set(PG_STATE_LAGGY);
publish_stats_to_osd();
}
dout(10) << __func__ << " not readable" << dendl;
waiting_for_readable.push_back(op);
op->mark_delayed("waiting for readable");
return false;
}
bool PrimaryLogPG::check_laggy_requeue(OpRequestRef& op)
{
assert(HAVE_FEATURE(recovery_state.get_min_upacting_features(),
SERVER_OCTOPUS));
if (!state_test(PG_STATE_WAIT) && !state_test(PG_STATE_LAGGY)) {
return true; // not laggy
}
dout(10) << __func__ << " not readable" << dendl;
waiting_for_readable.push_front(op);
op->mark_delayed("waiting for readable");
return false;
}
void PrimaryLogPG::recheck_readable()
{
if (!is_wait() && !is_laggy()) {
dout(20) << __func__ << " wasn't wait or laggy" << dendl;
return;
}
auto mnow = osd->get_mnow();
bool pub = false;
if (is_wait()) {
auto prior_readable_until_ub = recovery_state.get_prior_readable_until_ub();
if (mnow < prior_readable_until_ub) {
dout(10) << __func__ << " still wait (mnow " << mnow
<< " < prior_readable_until_ub " << prior_readable_until_ub
<< ")" << dendl;
} else {
dout(10) << __func__ << " no longer wait (mnow " << mnow
<< " >= prior_readable_until_ub " << prior_readable_until_ub
<< ")" << dendl;
state_clear(PG_STATE_WAIT);
recovery_state.clear_prior_readable_until_ub();
pub = true;
}
}
if (is_laggy()) {
auto ru = recovery_state.get_readable_until();
if (ru == ceph::signedspan::zero()) {
dout(10) << __func__ << " still laggy (mnow " << mnow
<< ", readable_until zero)" << dendl;
} else if (mnow >= ru) {
dout(10) << __func__ << " still laggy (mnow " << mnow
<< " >= readable_until " << ru << ")" << dendl;
} else {
dout(10) << __func__ << " no longer laggy (mnow " << mnow
<< " < readable_until " << ru << ")" << dendl;
state_clear(PG_STATE_LAGGY);
pub = true;
}
}
if (pub) {
publish_stats_to_osd();
}
if (!is_laggy() && !is_wait()) {
requeue_ops(waiting_for_readable);
}
}
bool PrimaryLogPG::pgls_filter(const PGLSFilter& filter, const hobject_t& sobj)
{
bufferlist bl;
// If filter has expressed an interest in an xattr, load it.
if (!filter.get_xattr().empty()) {
int ret = pgbackend->objects_get_attr(
sobj,
filter.get_xattr(),
&bl);
dout(0) << "getattr (sobj=" << sobj << ", attr=" << filter.get_xattr() << ") returned " << ret << dendl;
if (ret < 0) {
if (ret != -ENODATA || filter.reject_empty_xattr()) {
return false;
}
}
}
return filter.filter(sobj, bl);
}
std::pair<int, std::unique_ptr<const PGLSFilter>>
PrimaryLogPG::get_pgls_filter(bufferlist::const_iterator& iter)
{
string type;
// storing non-const PGLSFilter for the sake of ::init()
std::unique_ptr<PGLSFilter> filter;
try {
decode(type, iter);
}
catch (ceph::buffer::error& e) {
return { -EINVAL, nullptr };
}
if (type.compare("plain") == 0) {
filter = std::make_unique<PGLSPlainFilter>();
} else {
std::size_t dot = type.find('.');
if (dot == std::string::npos || dot == 0 || dot == type.size() - 1) {
return { -EINVAL, nullptr };
}
const std::string class_name = type.substr(0, dot);
const std::string filter_name = type.substr(dot + 1);
ClassHandler::ClassData *cls = NULL;
int r = ClassHandler::get_instance().open_class(class_name, &cls);
if (r != 0) {
derr << "Error opening class '" << class_name << "': "
<< cpp_strerror(r) << dendl;
if (r != -EPERM) // propagate permission error
r = -EINVAL;
return { r, nullptr };
} else {
ceph_assert(cls);
}
ClassHandler::ClassFilter *class_filter = cls->get_filter(filter_name);
if (class_filter == NULL) {
derr << "Error finding filter '" << filter_name << "' in class "
<< class_name << dendl;
return { -EINVAL, nullptr };
}
filter.reset(class_filter->fn());
if (!filter) {
// Object classes are obliged to return us something, but let's
// give an error rather than asserting out.
derr << "Buggy class " << class_name << " failed to construct "
"filter " << filter_name << dendl;
return { -EINVAL, nullptr };
}
}
ceph_assert(filter);
int r = filter->init(iter);
if (r < 0) {
derr << "Error initializing filter " << type << ": "
<< cpp_strerror(r) << dendl;
return { -EINVAL, nullptr };
} else {
// Successfully constructed and initialized, return it.
return std::make_pair(0, std::move(filter));
}
}
// ==========================================================
void PrimaryLogPG::do_command(
const string_view& orig_prefix,
const cmdmap_t& cmdmap,
const bufferlist& idata,
std::function<void(int,const std::string&,bufferlist&)> on_finish)
{
string format;
cmd_getval(cmdmap, "format", format);
auto f(Formatter::create_unique(format, "json-pretty", "json-pretty"));
int ret = 0;
stringstream ss; // stderr error message stream
bufferlist outbl; // if empty at end, we'll dump formatter as output
// get final prefix:
// - ceph pg <pgid> foo -> prefix=pg, cmd=foo
// - ceph tell <pgid> foo -> prefix=foo
string prefix(orig_prefix);
string command;
cmd_getval(cmdmap, "cmd", command);
if (command.size()) {
prefix = command;
}
if (prefix == "query") {
f->open_object_section("pg");
f->dump_stream("snap_trimq") << snap_trimq;
f->dump_unsigned("snap_trimq_len", snap_trimq.size());
recovery_state.dump_peering_state(f.get());
f->open_array_section("recovery_state");
handle_query_state(f.get());
f->close_section();
if (is_primary() && is_active() && m_scrubber) {
m_scrubber->dump_scrubber(f.get(), m_planned_scrub);
}
f->open_object_section("agent_state");
if (agent_state)
agent_state->dump(f.get());
f->close_section();
f->close_section();
}
else if (prefix == "log") {
f->open_object_section("op_log");
f->open_object_section("pg_log_t");
recovery_state.get_pg_log().get_log().dump(f.get());
f->close_section();
f->close_section();
}
else if (prefix == "mark_unfound_lost") {
string mulcmd;
cmd_getval(cmdmap, "mulcmd", mulcmd);
int mode = -1;
if (mulcmd == "revert") {
if (pool.info.is_erasure()) {
ss << "mode must be 'delete' for ec pool";
ret = -EINVAL;
goto out;
}
mode = pg_log_entry_t::LOST_REVERT;
} else if (mulcmd == "delete") {
mode = pg_log_entry_t::LOST_DELETE;
} else {
ss << "mode must be 'revert' or 'delete'; mark not yet implemented";
ret = -EINVAL;
goto out;
}
ceph_assert(mode == pg_log_entry_t::LOST_REVERT ||
mode == pg_log_entry_t::LOST_DELETE);
if (!is_primary()) {
ss << "not primary";
ret = -EROFS;
goto out;
}
uint64_t unfound = recovery_state.get_missing_loc().num_unfound();
if (!unfound) {
ss << "pg has no unfound objects";
goto out; // make command idempotent
}
if (!recovery_state.all_unfound_are_queried_or_lost(get_osdmap())) {
ss << "pg has " << unfound
<< " unfound objects but we haven't probed all sources, not marking lost";
ret = -EINVAL;
goto out;
}
mark_all_unfound_lost(mode, on_finish);
return;
}
else if (prefix == "list_unfound") {
hobject_t offset;
string offset_json;
bool show_offset = false;
if (cmd_getval(cmdmap, "offset", offset_json)) {
json_spirit::Value v;
try {
if (!json_spirit::read(offset_json, v))
throw std::runtime_error("bad json");
offset.decode(v);
} catch (std::runtime_error& e) {
ss << "error parsing offset: " << e.what();
ret = -EINVAL;
goto out;
}
show_offset = true;
}
f->open_object_section("missing");
if (show_offset) {
f->open_object_section("offset");
offset.dump(f.get());
f->close_section();
}
auto &needs_recovery_map = recovery_state.get_missing_loc()
.get_needs_recovery();
f->dump_int("num_missing", needs_recovery_map.size());
f->dump_int("num_unfound", get_num_unfound());
map<hobject_t, pg_missing_item>::const_iterator p =
needs_recovery_map.upper_bound(offset);
{
f->open_array_section("objects");
int32_t num = 0;
for (; p != needs_recovery_map.end() &&
num < cct->_conf->osd_command_max_records;
++p) {
if (recovery_state.get_missing_loc().is_unfound(p->first)) {
f->open_object_section("object");
{
f->open_object_section("oid");
p->first.dump(f.get());
f->close_section();
}
p->second.dump(f.get()); // have, need keys
{
f->open_array_section("locations");
for (auto &&r : recovery_state.get_missing_loc().get_locations(
p->first)) {
f->dump_stream("shard") << r;
}
f->close_section();
}
f->close_section();
num++;
}
}
f->close_section();
}
// Get possible locations of missing objects from pg information
PeeringState::QueryUnfound q(f.get());
recovery_state.handle_event(q, 0);
f->dump_bool("more", p != needs_recovery_map.end());
f->close_section();
}
else if (prefix == "scrub" ||
prefix == "deep_scrub") {
bool deep = (prefix == "deep_scrub");
int64_t time = cmd_getval_or<int64_t>(cmdmap, "time", 0);
if (is_primary()) {
const pg_pool_t *p = &pool.info;
double pool_scrub_max_interval = 0;
double scrub_max_interval;
if (deep) {
p->opts.get(pool_opts_t::DEEP_SCRUB_INTERVAL, &pool_scrub_max_interval);
scrub_max_interval = pool_scrub_max_interval > 0 ?
pool_scrub_max_interval : g_conf()->osd_deep_scrub_interval;
} else {
p->opts.get(pool_opts_t::SCRUB_MAX_INTERVAL, &pool_scrub_max_interval);
scrub_max_interval = pool_scrub_max_interval > 0 ?
pool_scrub_max_interval : g_conf()->osd_scrub_max_interval;
}
// Instead of marking must_scrub force a schedule scrub
utime_t stamp = ceph_clock_now();
if (time == 0)
stamp -= scrub_max_interval;
else
stamp -= (float)time;
stamp -= 100.0; // push back last scrub more for good measure
if (deep) {
set_last_deep_scrub_stamp(stamp);
}
set_last_scrub_stamp(stamp); // for 'deep' as well, as we use this value to order scrubs
f->open_object_section("result");
f->dump_bool("deep", deep);
f->dump_stream("stamp") << stamp;
f->close_section();
} else {
ss << "Not primary";
ret = -EPERM;
}
outbl.append(ss.str());
}
else if (prefix == "block" || prefix == "unblock" || prefix == "set" ||
prefix == "unset") {
string value;
cmd_getval(cmdmap, "value", value);
if (is_primary()) {
ret = m_scrubber->asok_debug(prefix, value, f.get(), ss);
f->open_object_section("result");
f->dump_bool("success", true);
f->close_section();
} else {
ss << "Not primary";
ret = -EPERM;
}
outbl.append(ss.str());
}
else {
ret = -ENOSYS;
ss << "prefix '" << prefix << "' not implemented";
}
out:
if (ret >= 0 && outbl.length() == 0) {
f->flush(outbl);
}
on_finish(ret, ss.str(), outbl);
}
// ==========================================================
void PrimaryLogPG::do_pg_op(OpRequestRef op)
{
const MOSDOp *m = static_cast<const MOSDOp *>(op->get_req());
ceph_assert(m->get_type() == CEPH_MSG_OSD_OP);
dout(10) << "do_pg_op " << *m << dendl;
op->mark_started();
int result = 0;
string cname, mname;
snapid_t snapid = m->get_snapid();
vector<OSDOp> ops = m->ops;
for (vector<OSDOp>::iterator p = ops.begin(); p != ops.end(); ++p) {
std::unique_ptr<const PGLSFilter> filter;
OSDOp& osd_op = *p;
auto bp = p->indata.cbegin();
switch (p->op.op) {
case CEPH_OSD_OP_PGNLS_FILTER:
try {
decode(cname, bp);
decode(mname, bp);
}
catch (const ceph::buffer::error& e) {
dout(0) << "unable to decode PGLS_FILTER description in " << *m << dendl;
result = -EINVAL;
break;
}
std::tie(result, filter) = get_pgls_filter(bp);
if (result < 0)
break;
ceph_assert(filter);
// fall through
case CEPH_OSD_OP_PGNLS:
if (snapid != CEPH_NOSNAP) {
result = -EINVAL;
break;
}
if (get_osdmap()->raw_pg_to_pg(m->get_pg()) != info.pgid.pgid) {
dout(10) << " pgnls pg=" << m->get_pg()
<< " " << get_osdmap()->raw_pg_to_pg(m->get_pg())
<< " != " << info.pgid << dendl;
result = 0; // hmm?
} else {
unsigned list_size = std::min<uint64_t>(cct->_conf->osd_max_pgls,
p->op.pgls.count);
dout(10) << " pgnls pg=" << m->get_pg() << " count " << list_size
<< dendl;
// read into a buffer
vector<hobject_t> sentries;
pg_nls_response_t response;
try {
decode(response.handle, bp);
}
catch (const ceph::buffer::error& e) {
dout(0) << "unable to decode PGNLS handle in " << *m << dendl;
result = -EINVAL;
break;
}
hobject_t next;
hobject_t lower_bound = response.handle;
hobject_t pg_start = info.pgid.pgid.get_hobj_start();
hobject_t pg_end = info.pgid.pgid.get_hobj_end(pool.info.get_pg_num());
dout(10) << " pgnls lower_bound " << lower_bound
<< " pg_end " << pg_end << dendl;
if (((!lower_bound.is_max() && lower_bound >= pg_end) ||
(lower_bound != hobject_t() && lower_bound < pg_start))) {
// this should only happen with a buggy client.
dout(10) << "outside of PG bounds " << pg_start << " .. "
<< pg_end << dendl;
result = -EINVAL;
break;
}
hobject_t current = lower_bound;
int r = pgbackend->objects_list_partial(
current,
list_size,
list_size,
&sentries,
&next);
if (r != 0) {
result = -EINVAL;
break;
}
map<hobject_t, pg_missing_item>::const_iterator missing_iter =
recovery_state.get_pg_log().get_missing().get_items().lower_bound(current);
vector<hobject_t>::iterator ls_iter = sentries.begin();
hobject_t _max = hobject_t::get_max();
while (1) {
const hobject_t &mcand =
missing_iter == recovery_state.get_pg_log().get_missing().get_items().end() ?
_max :
missing_iter->first;
const hobject_t &lcand =
ls_iter == sentries.end() ?
_max :
*ls_iter;
hobject_t candidate;
if (mcand == lcand) {
candidate = mcand;
if (!mcand.is_max()) {
++ls_iter;
++missing_iter;
}
} else if (mcand < lcand) {
candidate = mcand;
ceph_assert(!mcand.is_max());
++missing_iter;
} else {
candidate = lcand;
ceph_assert(!lcand.is_max());
++ls_iter;
}
dout(10) << " pgnls candidate 0x" << std::hex << candidate.get_hash()
<< " vs lower bound 0x" << lower_bound.get_hash()
<< std::dec << dendl;
if (candidate >= next) {
break;
}
if (response.entries.size() == list_size) {
next = candidate;
break;
}
if (candidate.snap != CEPH_NOSNAP)
continue;
// skip internal namespace
if (candidate.get_namespace() == cct->_conf->osd_hit_set_namespace)
continue;
if (recovery_state.get_missing_loc().is_deleted(candidate))
continue;
// skip wrong namespace
if (m->get_hobj().nspace != librados::all_nspaces &&
candidate.get_namespace() != m->get_hobj().nspace)
continue;
if (filter && !pgls_filter(*filter, candidate))
continue;
dout(20) << "pgnls item 0x" << std::hex
<< candidate.get_hash()
<< ", rev 0x" << hobject_t::_reverse_bits(candidate.get_hash())
<< std::dec << " "
<< candidate.oid.name << dendl;
librados::ListObjectImpl item;
item.nspace = candidate.get_namespace();
item.oid = candidate.oid.name;
item.locator = candidate.get_key();
response.entries.push_back(item);
}
if (next.is_max() &&
missing_iter == recovery_state.get_pg_log().get_missing().get_items().end() &&
ls_iter == sentries.end()) {
result = 1;
// Set response.handle to the start of the next PG according
// to the object sort order.
response.handle = info.pgid.pgid.get_hobj_end(pool.info.get_pg_num());
} else {
response.handle = next;
}
dout(10) << "pgnls handle=" << response.handle << dendl;
encode(response, osd_op.outdata);
dout(10) << " pgnls result=" << result << " outdata.length()="
<< osd_op.outdata.length() << dendl;
}
break;
case CEPH_OSD_OP_PGLS_FILTER:
try {
decode(cname, bp);
decode(mname, bp);
}
catch (const ceph::buffer::error& e) {
dout(0) << "unable to decode PGLS_FILTER description in " << *m << dendl;
result = -EINVAL;
break;
}
std::tie(result, filter) = get_pgls_filter(bp);
if (result < 0)
break;
ceph_assert(filter);
// fall through
case CEPH_OSD_OP_PGLS:
if (snapid != CEPH_NOSNAP) {
result = -EINVAL;
break;
}
if (get_osdmap()->raw_pg_to_pg(m->get_pg()) != info.pgid.pgid) {
dout(10) << " pgls pg=" << m->get_pg()
<< " " << get_osdmap()->raw_pg_to_pg(m->get_pg())
<< " != " << info.pgid << dendl;
result = 0; // hmm?
} else {
unsigned list_size = std::min<uint64_t>(cct->_conf->osd_max_pgls,
p->op.pgls.count);
dout(10) << " pgls pg=" << m->get_pg() << " count " << list_size << dendl;
// read into a buffer
vector<hobject_t> sentries;
pg_ls_response_t response;
try {
decode(response.handle, bp);
}
catch (const ceph::buffer::error& e) {
dout(0) << "unable to decode PGLS handle in " << *m << dendl;
result = -EINVAL;
break;
}
hobject_t next;
hobject_t current = response.handle;
int r = pgbackend->objects_list_partial(
current,
list_size,
list_size,
&sentries,
&next);
if (r != 0) {
result = -EINVAL;
break;
}
ceph_assert(snapid == CEPH_NOSNAP || recovery_state.get_pg_log().get_missing().get_items().empty());
map<hobject_t, pg_missing_item>::const_iterator missing_iter =
recovery_state.get_pg_log().get_missing().get_items().lower_bound(current);
vector<hobject_t>::iterator ls_iter = sentries.begin();
hobject_t _max = hobject_t::get_max();
while (1) {
const hobject_t &mcand =
missing_iter == recovery_state.get_pg_log().get_missing().get_items().end() ?
_max :
missing_iter->first;
const hobject_t &lcand =
ls_iter == sentries.end() ?
_max :
*ls_iter;
hobject_t candidate;
if (mcand == lcand) {
candidate = mcand;
if (!mcand.is_max()) {
++ls_iter;
++missing_iter;
}
} else if (mcand < lcand) {
candidate = mcand;
ceph_assert(!mcand.is_max());
++missing_iter;
} else {
candidate = lcand;
ceph_assert(!lcand.is_max());
++ls_iter;
}
if (candidate >= next) {
break;
}
if (response.entries.size() == list_size) {
next = candidate;
break;
}
if (candidate.snap != CEPH_NOSNAP)
continue;
// skip wrong namespace
if (candidate.get_namespace() != m->get_hobj().nspace)
continue;
if (recovery_state.get_missing_loc().is_deleted(candidate))
continue;
if (filter && !pgls_filter(*filter, candidate))
continue;
response.entries.push_back(make_pair(candidate.oid,
candidate.get_key()));
}
if (next.is_max() &&
missing_iter == recovery_state.get_pg_log().get_missing().get_items().end() &&
ls_iter == sentries.end()) {
result = 1;
}
response.handle = next;
encode(response, osd_op.outdata);
dout(10) << " pgls result=" << result << " outdata.length()="
<< osd_op.outdata.length() << dendl;
}
break;
case CEPH_OSD_OP_PG_HITSET_LS:
{
list< pair<utime_t,utime_t> > ls;
for (list<pg_hit_set_info_t>::const_iterator p = info.hit_set.history.begin();
p != info.hit_set.history.end();
++p)
ls.push_back(make_pair(p->begin, p->end));
if (hit_set)
ls.push_back(make_pair(hit_set_start_stamp, utime_t()));
encode(ls, osd_op.outdata);
}
break;
case CEPH_OSD_OP_PG_HITSET_GET:
{
utime_t stamp(osd_op.op.hit_set_get.stamp);
if (hit_set_start_stamp && stamp >= hit_set_start_stamp) {
// read the current in-memory HitSet, not the version we've
// checkpointed.
if (!hit_set) {
result= -ENOENT;
break;
}
encode(*hit_set, osd_op.outdata);
result = osd_op.outdata.length();
} else {
// read an archived HitSet.
hobject_t oid;
for (list<pg_hit_set_info_t>::const_iterator p = info.hit_set.history.begin();
p != info.hit_set.history.end();
++p) {
if (stamp >= p->begin && stamp <= p->end) {
oid = get_hit_set_archive_object(p->begin, p->end, p->using_gmt);
break;
}
}
if (oid == hobject_t()) {
result = -ENOENT;
break;
}
if (!pool.info.is_replicated()) {
// FIXME: EC not supported yet
result = -EOPNOTSUPP;
break;
}
if (is_unreadable_object(oid)) {
wait_for_unreadable_object(oid, op);
return;
}
result = osd->store->read(ch, ghobject_t(oid), 0, 0, osd_op.outdata);
}
}
break;
case CEPH_OSD_OP_SCRUBLS:
result = do_scrub_ls(m, &osd_op);
break;
default:
result = -EINVAL;
break;
}
if (result < 0)
break;
}
// reply
MOSDOpReply *reply = new MOSDOpReply(m, 0, get_osdmap_epoch(),
CEPH_OSD_FLAG_ACK | CEPH_OSD_FLAG_ONDISK,
false);
reply->claim_op_out_data(ops);
reply->set_result(result);
reply->set_reply_versions(info.last_update, info.last_user_version);
osd->send_message_osd_client(reply, m->get_connection());
}
int PrimaryLogPG::do_scrub_ls(const MOSDOp *m, OSDOp *osd_op)
{
if (m->get_pg() != info.pgid.pgid) {
dout(10) << " scrubls pg=" << m->get_pg() << " != " << info.pgid << dendl;
return -EINVAL; // hmm?
}
auto bp = osd_op->indata.cbegin();
scrub_ls_arg_t arg;
try {
arg.decode(bp);
} catch (ceph::buffer::error&) {
dout(10) << " corrupted scrub_ls_arg_t" << dendl;
return -EINVAL;
}
int r = 0;
scrub_ls_result_t result = {.interval = info.history.same_interval_since};
if (arg.interval != 0 && arg.interval != info.history.same_interval_since) {
r = -EAGAIN;
} else {
bool store_queried = m_scrubber && m_scrubber->get_store_errors(arg, result);
if (store_queried) {
encode(result, osd_op->outdata);
} else {
// the scrubber's store is not initialized
r = -ENOENT;
}
}
return r;
}
/**
* Grabs locks for OpContext, should be cleaned up in close_op_ctx
*
* @param ctx [in,out] ctx to get locks for
* @return true on success, false if we are queued
*/
bool PrimaryLogPG::get_rw_locks(bool write_ordered, OpContext *ctx)
{
/* If head_obc, !obc->obs->exists and we will always take the
* snapdir lock *before* the head lock. Since all callers will do
* this (read or write) if we get the first we will be guaranteed
* to get the second.
*/
if (write_ordered && ctx->op->may_read()) {
if (ctx->op->may_read_data()) {
ctx->lock_type = RWState::RWEXCL;
} else {
ctx->lock_type = RWState::RWWRITE;
}
} else if (write_ordered) {
ctx->lock_type = RWState::RWWRITE;
} else {
ceph_assert(ctx->op->may_read());
ctx->lock_type = RWState::RWREAD;
}
if (ctx->head_obc) {
ceph_assert(!ctx->obc->obs.exists);
if (!ctx->lock_manager.get_lock_type(
ctx->lock_type,
ctx->head_obc->obs.oi.soid,
ctx->head_obc,
ctx->op)) {
ctx->lock_type = RWState::RWNONE;
return false;
}
}
if (ctx->lock_manager.get_lock_type(
ctx->lock_type,
ctx->obc->obs.oi.soid,
ctx->obc,
ctx->op)) {
return true;
} else {
ceph_assert(!ctx->head_obc);
ctx->lock_type = RWState::RWNONE;
return false;
}
}
/**
* Releases locks
*
* @param manager [in] manager with locks to release
*/
void PrimaryLogPG::release_object_locks(
ObcLockManager &lock_manager) {
std::list<std::pair<ObjectContextRef, std::list<OpRequestRef> > > to_req;
bool requeue_recovery = false;
bool requeue_snaptrim = false;
lock_manager.put_locks(
&to_req,
&requeue_recovery,
&requeue_snaptrim);
if (requeue_recovery)
queue_recovery();
if (requeue_snaptrim)
snap_trimmer_machine.process_event(TrimWriteUnblocked());
if (!to_req.empty()) {
// requeue at front of scrub blocking queue if we are blocked by scrub
for (auto &&p: to_req) {
if (m_scrubber->write_blocked_by_scrub(p.first->obs.oi.soid.get_head())) {
for (auto& op : p.second) {
op->mark_delayed("waiting for scrub");
}
waiting_for_scrub.splice(
waiting_for_scrub.begin(),
p.second,
p.second.begin(),
p.second.end());
} else if (is_laggy()) {
for (auto& op : p.second) {
op->mark_delayed("waiting for readable");
}
waiting_for_readable.splice(
waiting_for_readable.begin(),
p.second,
p.second.begin(),
p.second.end());
} else {
requeue_ops(p.second);
}
}
}
}
PrimaryLogPG::PrimaryLogPG(OSDService *o, OSDMapRef curmap,
const PGPool &_pool,
const map<string,string>& ec_profile, spg_t p) :
PG(o, curmap, _pool, p),
pgbackend(
PGBackend::build_pg_backend(
_pool.info, ec_profile, this, coll_t(p), ch, o->store, cct)),
object_contexts(o->cct, o->cct->_conf->osd_pg_object_context_cache_count),
new_backfill(false),
temp_seq(0),
snap_trimmer_machine(this)
{
recovery_state.set_backend_predicates(
pgbackend->get_is_readable_predicate(),
pgbackend->get_is_recoverable_predicate());
snap_trimmer_machine.initiate();
m_scrubber = make_unique<PrimaryLogScrub>(this);
}
PrimaryLogPG::~PrimaryLogPG()
{
m_scrubber.reset();
}
void PrimaryLogPG::get_src_oloc(const object_t& oid, const object_locator_t& oloc, object_locator_t& src_oloc)
{
src_oloc = oloc;
if (oloc.key.empty())
src_oloc.key = oid.name;
}
void PrimaryLogPG::handle_backoff(OpRequestRef& op)
{
auto m = op->get_req<MOSDBackoff>();
auto session = ceph::ref_cast<Session>(m->get_connection()->get_priv());
if (!session)
return; // drop it.
hobject_t begin = info.pgid.pgid.get_hobj_start();
hobject_t end = info.pgid.pgid.get_hobj_end(pool.info.get_pg_num());
if (begin < m->begin) {
begin = m->begin;
}
if (end > m->end) {
end = m->end;
}
dout(10) << __func__ << " backoff ack id " << m->id
<< " [" << begin << "," << end << ")" << dendl;
session->ack_backoff(cct, m->pgid, m->id, begin, end);
}
void PrimaryLogPG::do_request(
OpRequestRef& op,
ThreadPool::TPHandle &handle)
{
if (op->osd_trace) {
op->pg_trace.init("pg op", &trace_endpoint, &op->osd_trace);
op->pg_trace.event("do request");
}
// make sure we have a new enough map
auto p = waiting_for_map.find(op->get_source());
if (p != waiting_for_map.end()) {
// preserve ordering
dout(20) << __func__ << " waiting_for_map "
<< p->first << " not empty, queueing" << dendl;
p->second.push_back(op);
op->mark_delayed("waiting_for_map not empty");
return;
}
if (!have_same_or_newer_map(op->min_epoch)) {
dout(20) << __func__ << " min " << op->min_epoch
<< ", queue on waiting_for_map " << op->get_source() << dendl;
waiting_for_map[op->get_source()].push_back(op);
op->mark_delayed("op must wait for map");
osd->request_osdmap_update(op->min_epoch);
return;
}
if (can_discard_request(op)) {
return;
}
// pg-wide backoffs
const Message *m = op->get_req();
int msg_type = m->get_type();
if (m->get_connection()->has_feature(CEPH_FEATURE_RADOS_BACKOFF)) {
auto session = ceph::ref_cast<Session>(m->get_connection()->get_priv());
if (!session)
return; // drop it.
if (msg_type == CEPH_MSG_OSD_OP) {
if (session->check_backoff(cct, info.pgid,
info.pgid.pgid.get_hobj_start(), m)) {
return;
}
bool backoff =
is_down() ||
is_incomplete() ||
(!is_active() && is_peered());
if (g_conf()->osd_backoff_on_peering && !backoff) {
if (is_peering()) {
backoff = true;
}
}
if (backoff) {
add_pg_backoff(session);
return;
}
}
// pg backoff acks at pg-level
if (msg_type == CEPH_MSG_OSD_BACKOFF) {
const MOSDBackoff *ba = static_cast<const MOSDBackoff*>(m);
if (ba->begin != ba->end) {
handle_backoff(op);
return;
}
}
}
if (!is_peered()) {
// Delay unless PGBackend says it's ok
if (pgbackend->can_handle_while_inactive(op)) {
bool handled = pgbackend->handle_message(op);
ceph_assert(handled);
return;
} else {
waiting_for_peered.push_back(op);
op->mark_delayed("waiting for peered");
return;
}
}
if (recovery_state.needs_flush()) {
dout(20) << "waiting for flush on " << *op->get_req() << dendl;
waiting_for_flush.push_back(op);
op->mark_delayed("waiting for flush");
return;
}
ceph_assert(is_peered() && !recovery_state.needs_flush());
if (pgbackend->handle_message(op))
return;
switch (msg_type) {
case CEPH_MSG_OSD_OP:
case CEPH_MSG_OSD_BACKOFF:
if (!is_active()) {
dout(20) << " peered, not active, waiting for active on "
<< *op->get_req() << dendl;
waiting_for_active.push_back(op);
op->mark_delayed("waiting for active");
return;
}
switch (msg_type) {
case CEPH_MSG_OSD_OP:
// verify client features
if ((pool.info.has_tiers() || pool.info.is_tier()) &&
!op->has_feature(CEPH_FEATURE_OSD_CACHEPOOL)) {
osd->reply_op_error(op, -EOPNOTSUPP);
return;
}
do_op(op);
break;
case CEPH_MSG_OSD_BACKOFF:
// object-level backoff acks handled in osdop context
handle_backoff(op);
break;
}
break;
case MSG_OSD_PG_SCAN:
do_scan(op, handle);
break;
case MSG_OSD_PG_BACKFILL:
do_backfill(op);
break;
case MSG_OSD_PG_BACKFILL_REMOVE:
do_backfill_remove(op);
break;
case MSG_OSD_SCRUB_RESERVE:
{
if (!m_scrubber) {
osd->reply_op_error(op, -EAGAIN);
return;
}
auto m = op->get_req<MOSDScrubReserve>();
switch (m->type) {
case MOSDScrubReserve::REQUEST:
m_scrubber->handle_scrub_reserve_request(op);
break;
case MOSDScrubReserve::GRANT:
m_scrubber->handle_scrub_reserve_grant(op, m->from);
break;
case MOSDScrubReserve::REJECT:
m_scrubber->handle_scrub_reserve_reject(op, m->from);
break;
case MOSDScrubReserve::RELEASE:
m_scrubber->handle_scrub_reserve_release(op);
break;
}
}
break;
case MSG_OSD_REP_SCRUB:
replica_scrub(op, handle);
break;
case MSG_OSD_REP_SCRUBMAP:
do_replica_scrub_map(op);
break;
case MSG_OSD_PG_UPDATE_LOG_MISSING:
do_update_log_missing(op);
break;
case MSG_OSD_PG_UPDATE_LOG_MISSING_REPLY:
do_update_log_missing_reply(op);
break;
default:
ceph_abort_msg("bad message type in do_request");
}
}
/** do_op - do an op
* pg lock will be held (if multithreaded)
* osd_lock NOT held.
*/
void PrimaryLogPG::do_op(OpRequestRef& op)
{
FUNCTRACE(cct);
// NOTE: take a non-const pointer here; we must be careful not to
// change anything that will break other reads on m (operator<<).
MOSDOp *m = static_cast<MOSDOp*>(op->get_nonconst_req());
ceph_assert(m->get_type() == CEPH_MSG_OSD_OP);
if (m->finish_decode()) {
op->reset_desc(); // for TrackedOp
m->clear_payload();
}
dout(20) << __func__ << ": op " << *m << dendl;
const hobject_t head = m->get_hobj().get_head();
if (!info.pgid.pgid.contains(
info.pgid.pgid.get_split_bits(pool.info.get_pg_num()), head)) {
derr << __func__ << " " << info.pgid.pgid << " does not contain "
<< head << " pg_num " << pool.info.get_pg_num() << " hash "
<< std::hex << head.get_hash() << std::dec << dendl;
osd->clog->warn() << info.pgid.pgid << " does not contain " << head
<< " op " << *m;
ceph_assert(!cct->_conf->osd_debug_misdirected_ops);
return;
}
bool can_backoff =
m->get_connection()->has_feature(CEPH_FEATURE_RADOS_BACKOFF);
ceph::ref_t<Session> session;
if (can_backoff) {
session = static_cast<Session*>(m->get_connection()->get_priv().get());
if (!session.get()) {
dout(10) << __func__ << " no session" << dendl;
return;
}
if (session->check_backoff(cct, info.pgid, head, m)) {
return;
}
}
if (m->has_flag(CEPH_OSD_FLAG_PARALLELEXEC)) {
// not implemented.
dout(20) << __func__ << ": PARALLELEXEC not implemented " << *m << dendl;
osd->reply_op_error(op, -EINVAL);
return;
}
{
int r = op->maybe_init_op_info(*get_osdmap());
if (r) {
osd->reply_op_error(op, r);
return;
}
}
if ((m->get_flags() & (CEPH_OSD_FLAG_BALANCE_READS |
CEPH_OSD_FLAG_LOCALIZE_READS)) &&
op->may_read() &&
!(op->may_write() || op->may_cache())) {
// balanced reads; any replica will do
if (!(is_primary() || is_nonprimary())) {
osd->handle_misdirected_op(this, op);
return;
}
} else {
// normal case; must be primary
if (!is_primary()) {
osd->handle_misdirected_op(this, op);
return;
}
}
if (!check_laggy(op)) {
return;
}
if (!op_has_sufficient_caps(op)) {
osd->reply_op_error(op, -EPERM);
return;
}
if (op->includes_pg_op()) {
return do_pg_op(op);
}
// object name too long?
if (m->get_oid().name.size() > cct->_conf->osd_max_object_name_len) {
dout(4) << "do_op name is longer than "
<< cct->_conf->osd_max_object_name_len
<< " bytes" << dendl;
osd->reply_op_error(op, -ENAMETOOLONG);
return;
}
if (m->get_hobj().get_key().size() > cct->_conf->osd_max_object_name_len) {
dout(4) << "do_op locator is longer than "
<< cct->_conf->osd_max_object_name_len
<< " bytes" << dendl;
osd->reply_op_error(op, -ENAMETOOLONG);
return;
}
if (m->get_hobj().nspace.size() > cct->_conf->osd_max_object_namespace_len) {
dout(4) << "do_op namespace is longer than "
<< cct->_conf->osd_max_object_namespace_len
<< " bytes" << dendl;
osd->reply_op_error(op, -ENAMETOOLONG);
return;
}
if (m->get_hobj().oid.name.empty()) {
dout(4) << "do_op empty oid name is not allowed" << dendl;
osd->reply_op_error(op, -EINVAL);
return;
}
if (int r = osd->store->validate_hobject_key(head)) {
dout(4) << "do_op object " << head << " invalid for backing store: "
<< r << dendl;
osd->reply_op_error(op, r);
return;
}
// blocklisted?
if (get_osdmap()->is_blocklisted(m->get_source_addr())) {
dout(10) << "do_op " << m->get_source_addr() << " is blocklisted" << dendl;
osd->reply_op_error(op, -EBLOCKLISTED);
return;
}
// order this op as a write?
bool write_ordered = op->rwordered();
// discard due to cluster full transition? (we discard any op that
// originates before the cluster or pool is marked full; the client
// will resend after the full flag is removed or if they expect the
// op to succeed despite being full). The except is FULL_FORCE and
// FULL_TRY ops, which there is no reason to discard because they
// bypass all full checks anyway. If this op isn't write or
// read-ordered, we skip.
// FIXME: we exclude mds writes for now.
if (write_ordered && !(m->get_source().is_mds() ||
m->has_flag(CEPH_OSD_FLAG_FULL_TRY) ||
m->has_flag(CEPH_OSD_FLAG_FULL_FORCE)) &&
info.history.last_epoch_marked_full > m->get_map_epoch()) {
dout(10) << __func__ << " discarding op sent before full " << m << " "
<< *m << dendl;
return;
}
// mds should have stopped writing before this point.
// We can't allow OSD to become non-startable even if mds
// could be writing as part of file removals.
if (write_ordered && osd->check_failsafe_full(get_dpp()) &&
!m->has_flag(CEPH_OSD_FLAG_FULL_TRY)) {
dout(10) << __func__ << " fail-safe full check failed, dropping request." << dendl;
return;
}
int64_t poolid = get_pgid().pool();
const pg_pool_t *pi = get_osdmap()->get_pg_pool(poolid);
if (!pi) {
return;
}
if (pi->has_flag(pg_pool_t::FLAG_EIO)) {
// drop op on the floor; the client will handle returning EIO
if (m->has_flag(CEPH_OSD_FLAG_SUPPORTSPOOLEIO)) {
dout(10) << __func__ << " discarding op due to pool EIO flag" << dendl;
} else {
dout(10) << __func__ << " replying EIO due to pool EIO flag" << dendl;
osd->reply_op_error(op, -EIO);
}
return;
}
if (op->may_write()) {
// invalid?
if (m->get_snapid() != CEPH_NOSNAP) {
dout(20) << __func__ << ": write to clone not valid " << *m << dendl;
osd->reply_op_error(op, -EINVAL);
return;
}
// too big?
if (cct->_conf->osd_max_write_size &&
m->get_data_len() > cct->_conf->osd_max_write_size << 20) {
// journal can't hold commit!
derr << "do_op msg data len " << m->get_data_len()
<< " > osd_max_write_size " << (cct->_conf->osd_max_write_size << 20)
<< " on " << *m << dendl;
osd->reply_op_error(op, -OSD_WRITETOOBIG);
return;
}
}
dout(10) << "do_op " << *m
<< (op->may_write() ? " may_write" : "")
<< (op->may_read() ? " may_read" : "")
<< (op->may_cache() ? " may_cache" : "")
<< " -> " << (write_ordered ? "write-ordered" : "read-ordered")
<< " flags " << ceph_osd_flag_string(m->get_flags())
<< dendl;
// missing object?
if (is_unreadable_object(head)) {
if (!is_primary()) {
osd->reply_op_error(op, -EAGAIN);
return;
}
if (can_backoff &&
(g_conf()->osd_backoff_on_degraded ||
(g_conf()->osd_backoff_on_unfound &&
recovery_state.get_missing_loc().is_unfound(head)))) {
add_backoff(session, head, head);
maybe_kick_recovery(head);
} else {
wait_for_unreadable_object(head, op);
}
return;
}
if (write_ordered) {
// degraded object?
if (is_degraded_or_backfilling_object(head)) {
if (can_backoff && g_conf()->osd_backoff_on_degraded) {
add_backoff(session, head, head);
maybe_kick_recovery(head);
} else {
wait_for_degraded_object(head, op);
}
return;
}
if (m_scrubber->is_scrub_active() && m_scrubber->write_blocked_by_scrub(head)) {
dout(20) << __func__ << ": waiting for scrub" << dendl;
waiting_for_scrub.push_back(op);
op->mark_delayed("waiting for scrub");
return;
}
if (!check_laggy_requeue(op)) {
return;
}
// blocked on snap?
if (auto blocked_iter = objects_blocked_on_degraded_snap.find(head);
blocked_iter != std::end(objects_blocked_on_degraded_snap)) {
hobject_t to_wait_on(head);
to_wait_on.snap = blocked_iter->second;
wait_for_degraded_object(to_wait_on, op);
return;
}
if (auto blocked_snap_promote_iter = objects_blocked_on_snap_promotion.find(head);
blocked_snap_promote_iter != std::end(objects_blocked_on_snap_promotion)) {
wait_for_blocked_object(blocked_snap_promote_iter->second->obs.oi.soid, op);
return;
}
if (objects_blocked_on_cache_full.count(head)) {
block_write_on_full_cache(head, op);
return;
}
}
// dup/resent?
if (op->may_write() || op->may_cache()) {
// warning: we will get back *a* request for this reqid, but not
// necessarily the most recent. this happens with flush and
// promote ops, but we can't possible have both in our log where
// the original request is still not stable on disk, so for our
// purposes here it doesn't matter which one we get.
eversion_t version;
version_t user_version;
int return_code = 0;
vector<pg_log_op_return_item_t> op_returns;
bool got = check_in_progress_op(
m->get_reqid(), &version, &user_version, &return_code, &op_returns);
if (got) {
dout(3) << __func__ << " dup " << m->get_reqid()
<< " version " << version << dendl;
if (already_complete(version)) {
osd->reply_op_error(op, return_code, version, user_version, op_returns);
} else {
dout(10) << " waiting for " << version << " to commit" << dendl;
// always queue ondisk waiters, so that we can requeue if needed
waiting_for_ondisk[version].emplace_back(op, user_version, return_code,
op_returns);
op->mark_delayed("waiting for ondisk");
}
return;
}
}
ObjectContextRef obc;
bool can_create = op->may_write();
hobject_t missing_oid;
// kludge around the fact that LIST_SNAPS sets CEPH_SNAPDIR for LIST_SNAPS
const hobject_t& oid =
m->get_snapid() == CEPH_SNAPDIR ? head : m->get_hobj();
// make sure LIST_SNAPS is on CEPH_SNAPDIR and nothing else
for (vector<OSDOp>::iterator p = m->ops.begin(); p != m->ops.end(); ++p) {
OSDOp& osd_op = *p;
if (osd_op.op.op == CEPH_OSD_OP_LIST_SNAPS) {
if (m->get_snapid() != CEPH_SNAPDIR) {
dout(10) << "LIST_SNAPS with incorrect context" << dendl;
osd->reply_op_error(op, -EINVAL);
return;
}
} else {
if (m->get_snapid() == CEPH_SNAPDIR) {
dout(10) << "non-LIST_SNAPS on snapdir" << dendl;
osd->reply_op_error(op, -EINVAL);
return;
}
}
}
// io blocked on obc?
if (!m->has_flag(CEPH_OSD_FLAG_FLUSH) &&
maybe_await_blocked_head(oid, op)) {
return;
}
if (!is_primary()) {
if (!recovery_state.can_serve_replica_read(oid)) {
dout(20) << __func__
<< ": unstable write on replica, bouncing to primary "
<< *m << dendl;
osd->reply_op_error(op, -EAGAIN);
return;
}
dout(20) << __func__ << ": serving replica read on oid " << oid
<< dendl;
}
int r = find_object_context(
oid, &obc, can_create,
m->has_flag(CEPH_OSD_FLAG_MAP_SNAP_CLONE),
&missing_oid);
// LIST_SNAPS needs the ssc too
if (obc &&
m->get_snapid() == CEPH_SNAPDIR &&
!obc->ssc) {
obc->ssc = get_snapset_context(oid, true);
}
if (r == -EAGAIN) {
// If we're not the primary of this OSD, we just return -EAGAIN. Otherwise,
// we have to wait for the object.
if (is_primary()) {
// missing the specific snap we need; requeue and wait.
ceph_assert(!op->may_write()); // only happens on a read/cache
wait_for_unreadable_object(missing_oid, op);
return;
}
} else if (r == 0) {
if (is_unreadable_object(obc->obs.oi.soid)) {
dout(10) << __func__ << ": clone " << obc->obs.oi.soid
<< " is unreadable, waiting" << dendl;
wait_for_unreadable_object(obc->obs.oi.soid, op);
return;
}
// degraded object? (the check above was for head; this could be a clone)
if (write_ordered &&
obc->obs.oi.soid.snap != CEPH_NOSNAP &&
is_degraded_or_backfilling_object(obc->obs.oi.soid)) {
dout(10) << __func__ << ": clone " << obc->obs.oi.soid
<< " is degraded, waiting" << dendl;
wait_for_degraded_object(obc->obs.oi.soid, op);
return;
}
}
bool in_hit_set = false;
if (hit_set) {
if (obc.get()) {
if (obc->obs.oi.soid != hobject_t() && hit_set->contains(obc->obs.oi.soid))
in_hit_set = true;
} else {
if (missing_oid != hobject_t() && hit_set->contains(missing_oid))
in_hit_set = true;
}
if (!op->hitset_inserted) {
hit_set->insert(oid);
op->hitset_inserted = true;
if (hit_set->is_full() ||
hit_set_start_stamp + pool.info.hit_set_period <= m->get_recv_stamp()) {
hit_set_persist();
}
}
}
if (agent_state) {
if (agent_choose_mode(false, op))
return;
}
if (obc.get() && obc->obs.exists) {
if (recover_adjacent_clones(obc, op)) {
return;
}
if (maybe_handle_manifest(op,
write_ordered,
obc))
return;
}
if (maybe_handle_cache(op,
write_ordered,
obc,
r,
missing_oid,
false,
in_hit_set))
return;
if (r && (r != -ENOENT || !obc)) {
// copy the reqids for copy get on ENOENT
if (r == -ENOENT &&
(m->ops[0].op.op == CEPH_OSD_OP_COPY_GET)) {
fill_in_copy_get_noent(op, oid, m->ops[0]);
return;
}
dout(20) << __func__ << ": find_object_context got error " << r << dendl;
if (op->may_write() &&
get_osdmap()->require_osd_release >= ceph_release_t::kraken) {
record_write_error(op, oid, nullptr, r);
} else {
osd->reply_op_error(op, r);
}
return;
}
// make sure locator is consistent
object_locator_t oloc(obc->obs.oi.soid);
if (m->get_object_locator() != oloc) {
dout(10) << " provided locator " << m->get_object_locator()
<< " != object's " << obc->obs.oi.soid << dendl;
osd->clog->warn() << "bad locator " << m->get_object_locator()
<< " on object " << oloc
<< " op " << *m;
}
// io blocked on obc?
if (obc->is_blocked() &&
!m->has_flag(CEPH_OSD_FLAG_FLUSH)) {
wait_for_blocked_object(obc->obs.oi.soid, op);
return;
}
dout(25) << __func__ << " oi " << obc->obs.oi << dendl;
OpContext *ctx = new OpContext(op, m->get_reqid(), &m->ops, obc, this);
if (m->has_flag(CEPH_OSD_FLAG_SKIPRWLOCKS)) {
dout(20) << __func__ << ": skipping rw locks" << dendl;
} else if (m->get_flags() & CEPH_OSD_FLAG_FLUSH) {
dout(20) << __func__ << ": part of flush, will ignore write lock" << dendl;
// verify there is in fact a flush in progress
// FIXME: we could make this a stronger test.
map<hobject_t,FlushOpRef>::iterator p = flush_ops.find(obc->obs.oi.soid);
if (p == flush_ops.end()) {
dout(10) << __func__ << " no flush in progress, aborting" << dendl;
reply_ctx(ctx, -EINVAL);
return;
}
} else if (!get_rw_locks(write_ordered, ctx)) {
dout(20) << __func__ << " waiting for rw locks " << dendl;
op->mark_delayed("waiting for rw locks");
close_op_ctx(ctx);
return;
}
dout(20) << __func__ << " obc " << *obc << dendl;
if (r) {
dout(20) << __func__ << " returned an error: " << r << dendl;
if (op->may_write() &&
get_osdmap()->require_osd_release >= ceph_release_t::kraken) {
record_write_error(op, oid, nullptr, r,
ctx->op->allows_returnvec() ? ctx : nullptr);
} else {
osd->reply_op_error(op, r);
}
close_op_ctx(ctx);
return;
}
if (m->has_flag(CEPH_OSD_FLAG_IGNORE_CACHE)) {
ctx->ignore_cache = true;
}
if ((op->may_read()) && (obc->obs.oi.is_lost())) {
// This object is lost. Reading from it returns an error.
dout(20) << __func__ << ": object " << obc->obs.oi.soid
<< " is lost" << dendl;
reply_ctx(ctx, -ENFILE);
return;
}
if (!op->may_write() &&
!op->may_cache() &&
(!obc->obs.exists ||
((m->get_snapid() != CEPH_SNAPDIR) &&
obc->obs.oi.is_whiteout()))) {
// copy the reqids for copy get on ENOENT
if (m->ops[0].op.op == CEPH_OSD_OP_COPY_GET) {
fill_in_copy_get_noent(op, oid, m->ops[0]);
close_op_ctx(ctx);
return;
}
reply_ctx(ctx, -ENOENT);
return;
}
op->mark_started();
execute_ctx(ctx);
utime_t prepare_latency = ceph_clock_now();
prepare_latency -= op->get_dequeued_time();
osd->logger->tinc(l_osd_op_prepare_lat, prepare_latency);
if (op->may_read() && op->may_write()) {
osd->logger->tinc(l_osd_op_rw_prepare_lat, prepare_latency);
} else if (op->may_read()) {
osd->logger->tinc(l_osd_op_r_prepare_lat, prepare_latency);
} else if (op->may_write() || op->may_cache()) {
osd->logger->tinc(l_osd_op_w_prepare_lat, prepare_latency);
}
// force recovery of the oldest missing object if too many logs
maybe_force_recovery();
}
PrimaryLogPG::cache_result_t PrimaryLogPG::maybe_handle_manifest_detail(
OpRequestRef op,
bool write_ordered,
ObjectContextRef obc)
{
if (!obc) {
dout(20) << __func__ << ": no obc " << dendl;
return cache_result_t::NOOP;
}
if (!obc->obs.oi.has_manifest()) {
dout(20) << __func__ << ": " << obc->obs.oi.soid
<< " is not manifest object " << dendl;
return cache_result_t::NOOP;
}
if (op->get_req<MOSDOp>()->get_flags() & CEPH_OSD_FLAG_IGNORE_REDIRECT) {
dout(20) << __func__ << ": ignoring redirect due to flag" << dendl;
return cache_result_t::NOOP;
}
// if it is write-ordered and blocked, stop now
if (obc->is_blocked() && write_ordered) {
// we're already doing something with this object
dout(20) << __func__ << " blocked on " << obc->obs.oi.soid << dendl;
return cache_result_t::NOOP;
}
vector<OSDOp> ops = op->get_req<MOSDOp>()->ops;
for (vector<OSDOp>::iterator p = ops.begin(); p != ops.end(); ++p) {
OSDOp& osd_op = *p;
ceph_osd_op& op = osd_op.op;
if (op.op == CEPH_OSD_OP_SET_REDIRECT ||
op.op == CEPH_OSD_OP_SET_CHUNK ||
op.op == CEPH_OSD_OP_UNSET_MANIFEST ||
op.op == CEPH_OSD_OP_TIER_PROMOTE ||
op.op == CEPH_OSD_OP_TIER_FLUSH ||
op.op == CEPH_OSD_OP_TIER_EVICT ||
op.op == CEPH_OSD_OP_ISDIRTY) {
return cache_result_t::NOOP;
}
}
switch (obc->obs.oi.manifest.type) {
case object_manifest_t::TYPE_REDIRECT:
if (op->may_write() || write_ordered) {
do_proxy_write(op, obc);
} else {
// promoted object
if (obc->obs.oi.size != 0) {
return cache_result_t::NOOP;
}
do_proxy_read(op, obc);
}
return cache_result_t::HANDLED_PROXY;
case object_manifest_t::TYPE_CHUNKED:
{
if (can_proxy_chunked_read(op, obc)) {
map<hobject_t,FlushOpRef>::iterator p = flush_ops.find(obc->obs.oi.soid);
if (p != flush_ops.end()) {
do_proxy_chunked_op(op, obc->obs.oi.soid, obc, true);
return cache_result_t::HANDLED_PROXY;
}
do_proxy_chunked_op(op, obc->obs.oi.soid, obc, write_ordered);
return cache_result_t::HANDLED_PROXY;
}
MOSDOp *m = static_cast<MOSDOp*>(op->get_nonconst_req());
ceph_assert(m->get_type() == CEPH_MSG_OSD_OP);
hobject_t head = m->get_hobj();
if (is_degraded_or_backfilling_object(head)) {
dout(20) << __func__ << ": " << head << " is degraded, waiting" << dendl;
wait_for_degraded_object(head, op);
return cache_result_t::BLOCKED_RECOVERY;
}
if (m_scrubber->write_blocked_by_scrub(head)) {
dout(20) << __func__ << ": waiting for scrub" << dendl;
waiting_for_scrub.push_back(op);
op->mark_delayed("waiting for scrub");
return cache_result_t::BLOCKED_RECOVERY;
}
if (!check_laggy_requeue(op)) {
return cache_result_t::BLOCKED_RECOVERY;
}
for (auto& p : obc->obs.oi.manifest.chunk_map) {
if (p.second.is_missing()) {
auto m = op->get_req<MOSDOp>();
const object_locator_t oloc = m->get_object_locator();
promote_object(obc, obc->obs.oi.soid, oloc, op, NULL);
return cache_result_t::BLOCKED_PROMOTE;
}
}
return cache_result_t::NOOP;
}
default:
ceph_abort_msg("unrecognized manifest type");
}
return cache_result_t::NOOP;
}
void PrimaryLogPG::record_write_error(OpRequestRef op, const hobject_t &soid,
MOSDOpReply *orig_reply, int r,
OpContext *ctx_for_op_returns)
{
dout(20) << __func__ << " r=" << r << dendl;
ceph_assert(op->may_write());
const osd_reqid_t &reqid = op->get_req<MOSDOp>()->get_reqid();
mempool::osd_pglog::list<pg_log_entry_t> entries;
entries.push_back(pg_log_entry_t(pg_log_entry_t::ERROR, soid,
get_next_version(), eversion_t(), 0,
reqid, utime_t(), r));
if (ctx_for_op_returns) {
entries.back().set_op_returns(*ctx_for_op_returns->ops);
dout(20) << __func__ << " op_returns=" << entries.back().op_returns << dendl;
}
struct OnComplete {
PrimaryLogPG *pg;
OpRequestRef op;
boost::intrusive_ptr<MOSDOpReply> orig_reply;
int r;
OnComplete(
PrimaryLogPG *pg,
OpRequestRef op,
MOSDOpReply *orig_reply,
int r)
: pg(pg), op(op),
orig_reply(orig_reply, false /* take over ref */), r(r)
{}
void operator()() {
ldpp_dout(pg, 20) << "finished " << __func__ << " r=" << r << dendl;
auto m = op->get_req<MOSDOp>();
MOSDOpReply *reply = orig_reply.detach();
ldpp_dout(pg, 10) << " sending commit on " << *m << " " << reply << dendl;
pg->osd->send_message_osd_client(reply, m->get_connection());
}
};
ObcLockManager lock_manager;
submit_log_entries(
entries,
std::move(lock_manager),
std::optional<std::function<void(void)> >(
OnComplete(this, op, orig_reply, r)),
op,
r);
}
PrimaryLogPG::cache_result_t PrimaryLogPG::maybe_handle_cache_detail(
OpRequestRef op,
bool write_ordered,
ObjectContextRef obc,
int r, hobject_t missing_oid,
bool must_promote,
bool in_hit_set,
ObjectContextRef *promote_obc)
{
// return quickly if caching is not enabled
if (pool.info.cache_mode == pg_pool_t::CACHEMODE_NONE)
return cache_result_t::NOOP;
if (op &&
op->get_req() &&
op->get_req()->get_type() == CEPH_MSG_OSD_OP &&
(op->get_req<MOSDOp>()->get_flags() &
CEPH_OSD_FLAG_IGNORE_CACHE)) {
dout(20) << __func__ << ": ignoring cache due to flag" << dendl;
return cache_result_t::NOOP;
}
must_promote = must_promote || op->need_promote();
if (obc)
dout(25) << __func__ << " " << obc->obs.oi << " "
<< (obc->obs.exists ? "exists" : "DNE")
<< " missing_oid " << missing_oid
<< " must_promote " << (int)must_promote
<< " in_hit_set " << (int)in_hit_set
<< dendl;
else
dout(25) << __func__ << " (no obc)"
<< " missing_oid " << missing_oid
<< " must_promote " << (int)must_promote
<< " in_hit_set " << (int)in_hit_set
<< dendl;
// if it is write-ordered and blocked, stop now
if (obc.get() && obc->is_blocked() && write_ordered) {
// we're already doing something with this object
dout(20) << __func__ << " blocked on " << obc->obs.oi.soid << dendl;
return cache_result_t::NOOP;
}
if (r == -ENOENT && missing_oid == hobject_t()) {
// we know this object is logically absent (e.g., an undefined clone)
return cache_result_t::NOOP;
}
if (obc.get() && obc->obs.exists) {
osd->logger->inc(l_osd_op_cache_hit);
return cache_result_t::NOOP;
}
if (!is_primary()) {
dout(20) << __func__ << " cache miss; ask the primary" << dendl;
osd->reply_op_error(op, -EAGAIN);
return cache_result_t::REPLIED_WITH_EAGAIN;
}
if (missing_oid == hobject_t() && obc.get()) {
missing_oid = obc->obs.oi.soid;
}
auto m = op->get_req<MOSDOp>();
const object_locator_t oloc = m->get_object_locator();
if (op->need_skip_handle_cache()) {
return cache_result_t::NOOP;
}
OpRequestRef promote_op;
switch (pool.info.cache_mode) {
case pg_pool_t::CACHEMODE_WRITEBACK:
if (agent_state &&
agent_state->evict_mode == TierAgentState::EVICT_MODE_FULL) {
if (!op->may_write() && !op->may_cache() &&
!write_ordered && !must_promote) {
dout(20) << __func__ << " cache pool full, proxying read" << dendl;
do_proxy_read(op);
return cache_result_t::HANDLED_PROXY;
}
dout(20) << __func__ << " cache pool full, waiting" << dendl;
block_write_on_full_cache(missing_oid, op);
return cache_result_t::BLOCKED_FULL;
}
if (must_promote || (!hit_set && !op->need_skip_promote())) {
promote_object(obc, missing_oid, oloc, op, promote_obc);
return cache_result_t::BLOCKED_PROMOTE;
}
if (op->may_write() || op->may_cache()) {
do_proxy_write(op);
// Promote too?
if (!op->need_skip_promote() &&
maybe_promote(obc, missing_oid, oloc, in_hit_set,
pool.info.min_write_recency_for_promote,
OpRequestRef(),
promote_obc)) {
return cache_result_t::BLOCKED_PROMOTE;
}
return cache_result_t::HANDLED_PROXY;
} else {
do_proxy_read(op);
// Avoid duplicate promotion
if (obc.get() && obc->is_blocked()) {
if (promote_obc)
*promote_obc = obc;
return cache_result_t::BLOCKED_PROMOTE;
}
// Promote too?
if (!op->need_skip_promote()) {
(void)maybe_promote(obc, missing_oid, oloc, in_hit_set,
pool.info.min_read_recency_for_promote,
promote_op, promote_obc);
}
return cache_result_t::HANDLED_PROXY;
}
ceph_abort_msg("unreachable");
return cache_result_t::NOOP;
case pg_pool_t::CACHEMODE_READONLY:
// TODO: clean this case up
if (!obc.get() && r == -ENOENT) {
// we don't have the object and op's a read
promote_object(obc, missing_oid, oloc, op, promote_obc);
return cache_result_t::BLOCKED_PROMOTE;
}
if (!r) { // it must be a write
do_cache_redirect(op);
return cache_result_t::HANDLED_REDIRECT;
}
// crap, there was a failure of some kind
return cache_result_t::NOOP;
case pg_pool_t::CACHEMODE_FORWARD:
// this mode is deprecated; proxy instead
case pg_pool_t::CACHEMODE_PROXY:
if (!must_promote) {
if (op->may_write() || op->may_cache() || write_ordered) {
do_proxy_write(op);
return cache_result_t::HANDLED_PROXY;
} else {
do_proxy_read(op);
return cache_result_t::HANDLED_PROXY;
}
}
// ugh, we're forced to promote.
if (agent_state &&
agent_state->evict_mode == TierAgentState::EVICT_MODE_FULL) {
dout(20) << __func__ << " cache pool full, waiting" << dendl;
block_write_on_full_cache(missing_oid, op);
return cache_result_t::BLOCKED_FULL;
}
promote_object(obc, missing_oid, oloc, op, promote_obc);
return cache_result_t::BLOCKED_PROMOTE;
case pg_pool_t::CACHEMODE_READFORWARD:
// this mode is deprecated; proxy instead
case pg_pool_t::CACHEMODE_READPROXY:
// Do writeback to the cache tier for writes
if (op->may_write() || write_ordered || must_promote) {
if (agent_state &&
agent_state->evict_mode == TierAgentState::EVICT_MODE_FULL) {
dout(20) << __func__ << " cache pool full, waiting" << dendl;
block_write_on_full_cache(missing_oid, op);
return cache_result_t::BLOCKED_FULL;
}
promote_object(obc, missing_oid, oloc, op, promote_obc);
return cache_result_t::BLOCKED_PROMOTE;
}
// If it is a read, we can read, we need to proxy it
do_proxy_read(op);
return cache_result_t::HANDLED_PROXY;
default:
ceph_abort_msg("unrecognized cache_mode");
}
return cache_result_t::NOOP;
}
bool PrimaryLogPG::maybe_promote(ObjectContextRef obc,
const hobject_t& missing_oid,
const object_locator_t& oloc,
bool in_hit_set,
uint32_t recency,
OpRequestRef promote_op,
ObjectContextRef *promote_obc)
{
dout(20) << __func__ << " missing_oid " << missing_oid
<< " in_hit_set " << in_hit_set << dendl;
switch (recency) {
case 0:
break;
case 1:
// Check if in the current hit set
if (in_hit_set) {
break;
} else {
// not promoting
return false;
}
break;
default:
{
unsigned count = (int)in_hit_set;
if (count) {
// Check if in other hit sets
const hobject_t& oid = obc.get() ? obc->obs.oi.soid : missing_oid;
for (map<time_t,HitSetRef>::reverse_iterator itor =
agent_state->hit_set_map.rbegin();
itor != agent_state->hit_set_map.rend();
++itor) {
if (!itor->second->contains(oid)) {
break;
}
++count;
if (count >= recency) {
break;
}
}
}
if (count >= recency) {
break;
}
return false; // not promoting
}
break;
}
if (osd->promote_throttle()) {
dout(10) << __func__ << " promote throttled" << dendl;
return false;
}
promote_object(obc, missing_oid, oloc, promote_op, promote_obc);
return true;
}
void PrimaryLogPG::do_cache_redirect(OpRequestRef op)
{
auto m = op->get_req<MOSDOp>();
int flags = m->get_flags() & (CEPH_OSD_FLAG_ACK|CEPH_OSD_FLAG_ONDISK);
MOSDOpReply *reply = new MOSDOpReply(m, -ENOENT, get_osdmap_epoch(),
flags, false);
request_redirect_t redir(m->get_object_locator(), pool.info.tier_of);
reply->set_redirect(redir);
dout(10) << "sending redirect to pool " << pool.info.tier_of << " for op "
<< *op->get_req() << dendl;
m->get_connection()->send_message(reply);
return;
}
struct C_ProxyRead : public Context {
PrimaryLogPGRef pg;
hobject_t oid;
epoch_t last_peering_reset;
ceph_tid_t tid;
PrimaryLogPG::ProxyReadOpRef prdop;
utime_t start;
C_ProxyRead(PrimaryLogPG *p, hobject_t o, epoch_t lpr,
const PrimaryLogPG::ProxyReadOpRef& prd)
: pg(p), oid(o), last_peering_reset(lpr),
tid(0), prdop(prd), start(ceph_clock_now())
{}
void finish(int r) override {
if (prdop->canceled)
return;
std::scoped_lock locker{*pg};
if (prdop->canceled) {
return;
}
if (last_peering_reset == pg->get_last_peering_reset()) {
pg->finish_proxy_read(oid, tid, r);
pg->osd->logger->tinc(l_osd_tier_r_lat, ceph_clock_now() - start);
}
}
};
struct C_ProxyChunkRead : public Context {
PrimaryLogPGRef pg;
hobject_t oid;
epoch_t last_peering_reset;
ceph_tid_t tid;
PrimaryLogPG::ProxyReadOpRef prdop;
utime_t start;
ObjectOperation *obj_op;
int op_index = 0;
uint64_t req_offset = 0;
ObjectContextRef obc;
uint64_t req_total_len = 0;
C_ProxyChunkRead(PrimaryLogPG *p, hobject_t o, epoch_t lpr,
const PrimaryLogPG::ProxyReadOpRef& prd)
: pg(p), oid(o), last_peering_reset(lpr),
tid(0), prdop(prd), start(ceph_clock_now()), obj_op(NULL)
{}
void finish(int r) override {
if (prdop->canceled)
return;
std::scoped_lock locker{*pg};
if (prdop->canceled) {
return;
}
if (last_peering_reset == pg->get_last_peering_reset()) {
if (r >= 0) {
if (!prdop->ops[op_index].outdata.length()) {
ceph_assert(req_total_len);
bufferlist list;
bufferptr bptr(req_total_len);
list.push_back(std::move(bptr));
prdop->ops[op_index].outdata.append(list);
}
ceph_assert(obj_op);
uint64_t copy_offset;
if (req_offset >= prdop->ops[op_index].op.extent.offset) {
copy_offset = req_offset - prdop->ops[op_index].op.extent.offset;
} else {
copy_offset = 0;
}
prdop->ops[op_index].outdata.begin(copy_offset).copy_in(
obj_op->ops[0].outdata.length(),
obj_op->ops[0].outdata.c_str());
}
pg->finish_proxy_read(oid, tid, r);
pg->osd->logger->tinc(l_osd_tier_r_lat, ceph_clock_now() - start);
if (obj_op) {
delete obj_op;
}
}
}
};
void PrimaryLogPG::do_proxy_read(OpRequestRef op, ObjectContextRef obc)
{
// NOTE: non-const here because the ProxyReadOp needs mutable refs to
// stash the result in the request's OSDOp vector
MOSDOp *m = static_cast<MOSDOp*>(op->get_nonconst_req());
object_locator_t oloc;
hobject_t soid;
/* extensible tier */
if (obc && obc->obs.exists && obc->obs.oi.has_manifest()) {
switch (obc->obs.oi.manifest.type) {
case object_manifest_t::TYPE_REDIRECT:
oloc = object_locator_t(obc->obs.oi.manifest.redirect_target);
soid = obc->obs.oi.manifest.redirect_target;
break;
default:
ceph_abort_msg("unrecognized manifest type");
}
} else {
/* proxy */
soid = m->get_hobj();
oloc = object_locator_t(m->get_object_locator());
oloc.pool = pool.info.tier_of;
}
unsigned flags = CEPH_OSD_FLAG_IGNORE_CACHE | CEPH_OSD_FLAG_IGNORE_OVERLAY;
// pass through some original flags that make sense.
// - leave out redirection and balancing flags since we are
// already proxying through the primary
// - leave off read/write/exec flags that are derived from the op
flags |= m->get_flags() & (CEPH_OSD_FLAG_RWORDERED |
CEPH_OSD_FLAG_ORDERSNAP |
CEPH_OSD_FLAG_ENFORCE_SNAPC |
CEPH_OSD_FLAG_MAP_SNAP_CLONE);
dout(10) << __func__ << " Start proxy read for " << *m << dendl;
ProxyReadOpRef prdop(std::make_shared<ProxyReadOp>(op, soid, m->ops));
ObjectOperation obj_op;
obj_op.dup(prdop->ops);
if (pool.info.cache_mode == pg_pool_t::CACHEMODE_WRITEBACK &&
(agent_state && agent_state->evict_mode != TierAgentState::EVICT_MODE_FULL)) {
for (unsigned i = 0; i < obj_op.ops.size(); i++) {
ceph_osd_op op = obj_op.ops[i].op;
switch (op.op) {
case CEPH_OSD_OP_READ:
case CEPH_OSD_OP_SYNC_READ:
case CEPH_OSD_OP_SPARSE_READ:
case CEPH_OSD_OP_CHECKSUM:
case CEPH_OSD_OP_CMPEXT:
op.flags = (op.flags | CEPH_OSD_OP_FLAG_FADVISE_SEQUENTIAL) &
~(CEPH_OSD_OP_FLAG_FADVISE_DONTNEED | CEPH_OSD_OP_FLAG_FADVISE_NOCACHE);
}
}
}
C_ProxyRead *fin = new C_ProxyRead(this, soid, get_last_peering_reset(),
prdop);
ceph_tid_t tid = osd->objecter->read(
soid.oid, oloc, obj_op,
m->get_snapid(), NULL,
flags, new C_OnFinisher(fin, osd->get_objecter_finisher(get_pg_shard())),
&prdop->user_version,
&prdop->data_offset,
m->get_features());
fin->tid = tid;
prdop->objecter_tid = tid;
proxyread_ops[tid] = prdop;
in_progress_proxy_ops[soid].push_back(op);
}
void PrimaryLogPG::finish_proxy_read(hobject_t oid, ceph_tid_t tid, int r)
{
dout(10) << __func__ << " " << oid << " tid " << tid
<< " " << cpp_strerror(r) << dendl;
map<ceph_tid_t, ProxyReadOpRef>::iterator p = proxyread_ops.find(tid);
if (p == proxyread_ops.end()) {
dout(10) << __func__ << " no proxyread_op found" << dendl;
return;
}
ProxyReadOpRef prdop = p->second;
if (tid != prdop->objecter_tid) {
dout(10) << __func__ << " tid " << tid << " != prdop " << prdop
<< " tid " << prdop->objecter_tid << dendl;
return;
}
if (oid != prdop->soid) {
dout(10) << __func__ << " oid " << oid << " != prdop " << prdop
<< " soid " << prdop->soid << dendl;
return;
}
proxyread_ops.erase(tid);
map<hobject_t, list<OpRequestRef>>::iterator q = in_progress_proxy_ops.find(oid);
if (q == in_progress_proxy_ops.end()) {
dout(10) << __func__ << " no in_progress_proxy_ops found" << dendl;
return;
}
ceph_assert(q->second.size());
list<OpRequestRef>::iterator it = std::find(q->second.begin(),
q->second.end(),
prdop->op);
ceph_assert(it != q->second.end());
OpRequestRef op = *it;
q->second.erase(it);
if (q->second.size() == 0) {
in_progress_proxy_ops.erase(oid);
} else if (std::find(q->second.begin(),
q->second.end(),
prdop->op) != q->second.end()) {
/* multiple read case */
dout(20) << __func__ << " " << oid << " is not completed " << dendl;
return;
}
osd->logger->inc(l_osd_tier_proxy_read);
auto m = op->get_req<MOSDOp>();
OpContext *ctx = new OpContext(op, m->get_reqid(), &prdop->ops, this);
ctx->reply = new MOSDOpReply(m, 0, get_osdmap_epoch(), 0, false);
ctx->user_at_version = prdop->user_version;
ctx->data_off = prdop->data_offset;
ctx->ignore_log_op_stats = true;
complete_read_ctx(r, ctx);
}
void PrimaryLogPG::kick_proxy_ops_blocked(hobject_t& soid)
{
map<hobject_t, list<OpRequestRef>>::iterator p = in_progress_proxy_ops.find(soid);
if (p == in_progress_proxy_ops.end())
return;
list<OpRequestRef>& ls = p->second;
dout(10) << __func__ << " " << soid << " requeuing " << ls.size() << " requests" << dendl;
requeue_ops(ls);
in_progress_proxy_ops.erase(p);
}
void PrimaryLogPG::cancel_proxy_read(ProxyReadOpRef prdop,
vector<ceph_tid_t> *tids)
{
dout(10) << __func__ << " " << prdop->soid << dendl;
prdop->canceled = true;
// cancel objecter op, if we can
if (prdop->objecter_tid) {
tids->push_back(prdop->objecter_tid);
for (uint32_t i = 0; i < prdop->ops.size(); i++) {
prdop->ops[i].outdata.clear();
}
proxyread_ops.erase(prdop->objecter_tid);
prdop->objecter_tid = 0;
}
}
void PrimaryLogPG::cancel_proxy_ops(bool requeue, vector<ceph_tid_t> *tids)
{
dout(10) << __func__ << dendl;
// cancel proxy reads
map<ceph_tid_t, ProxyReadOpRef>::iterator p = proxyread_ops.begin();
while (p != proxyread_ops.end()) {
cancel_proxy_read((p++)->second, tids);
}
// cancel proxy writes
map<ceph_tid_t, ProxyWriteOpRef>::iterator q = proxywrite_ops.begin();
while (q != proxywrite_ops.end()) {
cancel_proxy_write((q++)->second, tids);
}
if (requeue) {
map<hobject_t, list<OpRequestRef>>::iterator p =
in_progress_proxy_ops.begin();
while (p != in_progress_proxy_ops.end()) {
list<OpRequestRef>& ls = p->second;
dout(10) << __func__ << " " << p->first << " requeuing " << ls.size()
<< " requests" << dendl;
requeue_ops(ls);
in_progress_proxy_ops.erase(p++);
}
} else {
in_progress_proxy_ops.clear();
}
}
struct C_ProxyWrite_Commit : public Context {
PrimaryLogPGRef pg;
hobject_t oid;
epoch_t last_peering_reset;
ceph_tid_t tid;
PrimaryLogPG::ProxyWriteOpRef pwop;
C_ProxyWrite_Commit(PrimaryLogPG *p, hobject_t o, epoch_t lpr,
const PrimaryLogPG::ProxyWriteOpRef& pw)
: pg(p), oid(o), last_peering_reset(lpr),
tid(0), pwop(pw)
{}
void finish(int r) override {
if (pwop->canceled)
return;
std::scoped_lock locker{*pg};
if (pwop->canceled) {
return;
}
if (last_peering_reset == pg->get_last_peering_reset()) {
pg->finish_proxy_write(oid, tid, r);
}
}
};
void PrimaryLogPG::do_proxy_write(OpRequestRef op, ObjectContextRef obc)
{
// NOTE: non-const because ProxyWriteOp takes a mutable ref
MOSDOp *m = static_cast<MOSDOp*>(op->get_nonconst_req());
object_locator_t oloc;
SnapContext snapc(m->get_snap_seq(), m->get_snaps());
hobject_t soid;
/* extensible tier */
if (obc && obc->obs.exists && obc->obs.oi.has_manifest()) {
switch (obc->obs.oi.manifest.type) {
case object_manifest_t::TYPE_REDIRECT:
oloc = object_locator_t(obc->obs.oi.manifest.redirect_target);
soid = obc->obs.oi.manifest.redirect_target;
break;
default:
ceph_abort_msg("unrecognized manifest type");
}
} else {
/* proxy */
soid = m->get_hobj();
oloc = object_locator_t(m->get_object_locator());
oloc.pool = pool.info.tier_of;
}
unsigned flags = CEPH_OSD_FLAG_IGNORE_CACHE | CEPH_OSD_FLAG_IGNORE_OVERLAY;
if (!(op->may_write() || op->may_cache())) {
flags |= CEPH_OSD_FLAG_RWORDERED;
}
if (op->allows_returnvec()) {
flags |= CEPH_OSD_FLAG_RETURNVEC;
}
dout(10) << __func__ << " Start proxy write for " << *m << dendl;
ProxyWriteOpRef pwop(std::make_shared<ProxyWriteOp>(op, soid, m->ops, m->get_reqid()));
pwop->ctx = new OpContext(op, m->get_reqid(), &pwop->ops, this);
pwop->mtime = m->get_mtime();
ObjectOperation obj_op;
obj_op.dup(pwop->ops);
C_ProxyWrite_Commit *fin = new C_ProxyWrite_Commit(
this, soid, get_last_peering_reset(), pwop);
ceph_tid_t tid = osd->objecter->mutate(
soid.oid, oloc, obj_op, snapc,
ceph::real_clock::from_ceph_timespec(pwop->mtime),
flags, new C_OnFinisher(fin, osd->get_objecter_finisher(get_pg_shard())),
&pwop->user_version, pwop->reqid);
fin->tid = tid;
pwop->objecter_tid = tid;
proxywrite_ops[tid] = pwop;
in_progress_proxy_ops[soid].push_back(op);
}
void PrimaryLogPG::do_proxy_chunked_op(OpRequestRef op, const hobject_t& missing_oid,
ObjectContextRef obc, bool write_ordered)
{
MOSDOp *m = static_cast<MOSDOp*>(op->get_nonconst_req());
OSDOp *osd_op = NULL;
for (unsigned int i = 0; i < m->ops.size(); i++) {
osd_op = &m->ops[i];
uint64_t cursor = osd_op->op.extent.offset;
uint64_t op_length = osd_op->op.extent.offset + osd_op->op.extent.length;
uint64_t chunk_length = 0, chunk_index = 0, req_len = 0;
object_manifest_t *manifest = &obc->obs.oi.manifest;
map <uint64_t, map<uint64_t, uint64_t>> chunk_read;
while (cursor < op_length) {
chunk_index = 0;
chunk_length = 0;
/* find the right chunk position for cursor */
for (auto &p : manifest->chunk_map) {
if (p.first <= cursor && p.first + p.second.length > cursor) {
chunk_length = p.second.length;
chunk_index = p.first;
break;
}
}
/* no index */
if (!chunk_index && !chunk_length) {
if (cursor == osd_op->op.extent.offset) {
OpContext *ctx = new OpContext(op, m->get_reqid(), &m->ops, this);
ctx->reply = new MOSDOpReply(m, 0, get_osdmap_epoch(), 0, false);
ctx->data_off = osd_op->op.extent.offset;
ctx->ignore_log_op_stats = true;
complete_read_ctx(0, ctx);
}
break;
}
uint64_t next_length = chunk_length;
/* the size to read -> | op length | */
/* | a chunk | */
if (cursor + next_length > op_length) {
next_length = op_length - cursor;
}
/* the size to read -> | op length | */
/* | a chunk | */
if (cursor + next_length > chunk_index + chunk_length) {
next_length = chunk_index + chunk_length - cursor;
}
chunk_read[cursor] = {{chunk_index, next_length}};
cursor += next_length;
}
req_len = cursor - osd_op->op.extent.offset;
for (auto &p : chunk_read) {
auto chunks = p.second.begin();
dout(20) << __func__ << " chunk_index: " << chunks->first
<< " next_length: " << chunks->second << " cursor: "
<< p.first << dendl;
do_proxy_chunked_read(op, obc, i, chunks->first, p.first, chunks->second, req_len, write_ordered);
}
}
}
struct RefCountCallback : public Context {
public:
PrimaryLogPG::OpContext *ctx;
OSDOp& osd_op;
bool requeue = false;
RefCountCallback(PrimaryLogPG::OpContext *ctx, OSDOp &osd_op)
: ctx(ctx), osd_op(osd_op) {}
void finish(int r) override {
// NB: caller must already have pg->lock held
ctx->obc->stop_block();
ctx->pg->kick_object_context_blocked(ctx->obc);
if (r >= 0) {
osd_op.rval = 0;
ctx->pg->execute_ctx(ctx);
} else {
// on cancel simply toss op out,
// or requeue as requested
if (r != -ECANCELED) {
if (ctx->op)
ctx->pg->osd->reply_op_error(ctx->op, r);
} else if (requeue) {
if (ctx->op)
ctx->pg->requeue_op(ctx->op);
}
ctx->pg->close_op_ctx(ctx);
}
}
void set_requeue(bool rq) {
requeue = rq;
}
};
struct SetManifestFinisher : public PrimaryLogPG::OpFinisher {
OSDOp& osd_op;
explicit SetManifestFinisher(OSDOp& osd_op) : osd_op(osd_op) {
}
int execute() override {
return osd_op.rval;
}
};
struct C_SetManifestRefCountDone : public Context {
PrimaryLogPGRef pg;
hobject_t soid;
uint64_t offset;
ceph_tid_t tid = 0;
C_SetManifestRefCountDone(PrimaryLogPG *p,
hobject_t soid, uint64_t offset) :
pg(p), soid(soid), offset(offset) {}
void finish(int r) override {
if (r == -ECANCELED)
return;
std::scoped_lock locker{*pg};
pg->finish_set_manifest_refcount(soid, r, tid, offset);
}
};
struct C_SetDedupChunks : public Context {
PrimaryLogPGRef pg;
hobject_t oid;
epoch_t last_peering_reset;
ceph_tid_t tid;
uint64_t offset;
C_SetDedupChunks(PrimaryLogPG *p, hobject_t o, epoch_t lpr, uint64_t offset)
: pg(p), oid(o), last_peering_reset(lpr),
tid(0), offset(offset)
{}
void finish(int r) override {
if (r == -ECANCELED)
return;
std::scoped_lock locker{*pg};
if (last_peering_reset != pg->get_last_peering_reset()) {
return;
}
pg->finish_set_dedup(oid, r, tid, offset);
}
};
void PrimaryLogPG::cancel_manifest_ops(bool requeue, vector<ceph_tid_t> *tids)
{
dout(10) << __func__ << dendl;
auto p = manifest_ops.begin();
while (p != manifest_ops.end()) {
auto mop = p->second;
// cancel objecter op, if we can
if (mop->objecter_tid) {
tids->push_back(mop->objecter_tid);
mop->objecter_tid = 0;
} else if (!mop->tids.empty()) {
for (auto &p : mop->tids) {
tids->push_back(p.second);
}
}
if (mop->cb) {
mop->cb->set_requeue(requeue);
mop->cb->complete(-ECANCELED);
}
manifest_ops.erase(p++);
}
}
int PrimaryLogPG::get_manifest_ref_count(ObjectContextRef obc, std::string& fp_oid, OpRequestRef op)
{
int cnt = 0;
// head
for (auto &p : obc->obs.oi.manifest.chunk_map) {
if (p.second.oid.oid.name == fp_oid) {
cnt++;
}
}
// snap
SnapSet& ss = obc->ssc->snapset;
const OSDMapRef& osdmap = get_osdmap();
for (vector<snapid_t>::const_reverse_iterator p = ss.clones.rbegin();
p != ss.clones.rend();
++p) {
object_ref_delta_t refs;
ObjectContextRef obc_l = nullptr;
ObjectContextRef obc_g = nullptr;
hobject_t clone_oid = obc->obs.oi.soid;
clone_oid.snap = *p;
if (osdmap->in_removed_snaps_queue(info.pgid.pgid.pool(), *p)) {
return -EBUSY;
}
if (is_unreadable_object(clone_oid)) {
dout(10) << __func__ << ": " << clone_oid
<< " is unreadable. Need to wait for recovery" << dendl;
wait_for_unreadable_object(clone_oid, op);
return -EAGAIN;
}
ObjectContextRef clone_obc = get_object_context(clone_oid, false);
if (!clone_obc) {
break;
}
if (recover_adjacent_clones(clone_obc, op)) {
return -EAGAIN;
}
get_adjacent_clones(clone_obc, obc_l, obc_g);
clone_obc->obs.oi.manifest.calc_refs_to_inc_on_set(
obc_g ? &(obc_g->obs.oi.manifest) : nullptr ,
nullptr,
refs);
for (auto p = refs.begin(); p != refs.end(); ++p) {
if (p->first.oid.name == fp_oid && p->second > 0) {
cnt += p->second;
}
}
}
return cnt;
}
bool PrimaryLogPG::recover_adjacent_clones(ObjectContextRef obc, OpRequestRef op)
{
if (!obc->ssc || !obc->ssc->snapset.clones.size()) {
return false;
}
MOSDOp *m = static_cast<MOSDOp*>(op->get_nonconst_req());
bool has_manifest_op = std::any_of(
begin(m->ops),
end(m->ops),
[](const auto& osd_op) {
return osd_op.op.op == CEPH_OSD_OP_SET_CHUNK;
});
if (!obc->obs.oi.manifest.is_chunked() && !has_manifest_op) {
return false;
}
ceph_assert(op);
const SnapSet& snapset = obc->ssc->snapset;
auto s = std::find(snapset.clones.begin(), snapset.clones.end(), obc->obs.oi.soid.snap);
auto is_unreadable_snap = [this, obc, &snapset, op](auto iter) -> bool {
hobject_t cid = obc->obs.oi.soid;
cid.snap = (iter == snapset.clones.end()) ? snapid_t(CEPH_NOSNAP) : *iter;
if (is_unreadable_object(cid)) {
dout(10) << __func__ << ": clone " << cid
<< " is unreadable, waiting" << dendl;
wait_for_unreadable_object(cid, op);
return true;
}
return false;
};
if (s != snapset.clones.begin()) {
if (is_unreadable_snap(s - 1)) {
return true;
}
}
if (s != snapset.clones.end()) {
if (is_unreadable_snap(s + 1)) {
return true;
}
}
return false;
}
ObjectContextRef PrimaryLogPG::get_prev_clone_obc(ObjectContextRef obc)
{
auto s = std::find(obc->ssc->snapset.clones.begin(), obc->ssc->snapset.clones.end(),
obc->obs.oi.soid.snap);
if (s != obc->ssc->snapset.clones.begin()) {
auto s_iter = s - 1;
hobject_t cid = obc->obs.oi.soid;
object_ref_delta_t refs;
cid.snap = *s_iter;
ObjectContextRef cobc = get_object_context(cid, false, NULL);
ceph_assert(cobc);
return cobc;
}
return nullptr;
}
void PrimaryLogPG::dec_refcount(const hobject_t& soid, const object_ref_delta_t& refs)
{
for (auto p = refs.begin(); p != refs.end(); ++p) {
int dec_ref_count = p->second;
ceph_assert(dec_ref_count < 0);
while (dec_ref_count < 0) {
dout(10) << __func__ << ": decrement reference on offset oid: " << p->first << dendl;
refcount_manifest(soid, p->first,
refcount_t::DECREMENT_REF, NULL, std::nullopt);
dec_ref_count++;
}
}
}
void PrimaryLogPG::get_adjacent_clones(ObjectContextRef src_obc,
ObjectContextRef& _l, ObjectContextRef& _g)
{
const SnapSet& snapset = src_obc->ssc->snapset;
const object_info_t& oi = src_obc->obs.oi;
auto get_context = [this, &oi, &snapset](auto iter)
-> ObjectContextRef {
hobject_t cid = oi.soid;
cid.snap = (iter == snapset.clones.end()) ? snapid_t(CEPH_NOSNAP) : *iter;
ObjectContextRef obc = get_object_context(cid, false, NULL);
ceph_assert(obc);
return obc;
};
// check adjacent clones
auto s = std::find(snapset.clones.begin(), snapset.clones.end(), oi.soid.snap);
// We *must* find the clone iff it's not head,
// let s == snapset.clones.end() mean head
ceph_assert((s == snapset.clones.end()) == oi.soid.is_head());
if (s != snapset.clones.begin()) {
_l = get_context(s - 1);
}
if (s != snapset.clones.end()) {
_g = get_context(s + 1);
}
}
bool PrimaryLogPG::inc_refcount_by_set(OpContext* ctx, object_manifest_t& set_chunk,
OSDOp& osd_op)
{
object_ref_delta_t refs;
ObjectContextRef obc_l, obc_g;
get_adjacent_clones(ctx->obc, obc_l, obc_g);
set_chunk.calc_refs_to_inc_on_set(
obc_l ? &(obc_l->obs.oi.manifest) : nullptr,
obc_g ? &(obc_g->obs.oi.manifest) : nullptr,
refs);
bool need_inc_ref = false;
if (!refs.is_empty()) {
ManifestOpRef mop(std::make_shared<ManifestOp>(ctx->obc, nullptr));
for (auto c : set_chunk.chunk_map) {
auto p = refs.find(c.second.oid);
if (p == refs.end()) {
continue;
}
int inc_ref_count = p->second;
if (inc_ref_count > 0) {
/*
* In set-chunk case, the first thing we should do is to increment
* the reference the targe object has prior to update object_manifest in object_info_t.
* So, call directly refcount_manifest.
*/
auto target_oid = p->first;
auto offset = c.first;
auto length = c.second.length;
auto* fin = new C_SetManifestRefCountDone(this, ctx->obs->oi.soid, offset);
ceph_tid_t tid = refcount_manifest(ctx->obs->oi.soid, target_oid,
refcount_t::INCREMENT_REF, fin, std::nullopt);
fin->tid = tid;
mop->chunks[target_oid] = make_pair(offset, length);
mop->num_chunks++;
mop->tids[offset] = tid;
if (!ctx->obc->is_blocked()) {
dout(15) << fmt::format("{}: blocking object on rc: tid:{}", __func__, tid) << dendl;
ctx->obc->start_block();
}
need_inc_ref = true;
} else if (inc_ref_count < 0) {
hobject_t src = ctx->obs->oi.soid;
hobject_t tgt = p->first;
ctx->register_on_commit(
[src, tgt, this](){
refcount_manifest(src, tgt, refcount_t::DECREMENT_REF, NULL, std::nullopt);
});
}
}
if (mop->tids.size()) {
mop->cb = new RefCountCallback(ctx, osd_op);
manifest_ops[ctx->obs->oi.soid] = mop;
manifest_ops[ctx->obs->oi.soid]->op = ctx->op;
}
}
return need_inc_ref;
}
void PrimaryLogPG::update_chunk_map_by_dirty(OpContext* ctx) {
/*
* We should consider two cases here:
* 1) just modification: This created dirty regions, but didn't update chunk_map.
* 2) rollback: In rollback, head will be converted to the clone the rollback targets.
* Also, rollback already updated chunk_map.
* So, we should do here is to check whether chunk_map is updated and the clean_region has dirty regions.
* In case of the rollback, chunk_map doesn't need to be clear
*/
for (auto &p : ctx->obs->oi.manifest.chunk_map) {
if (!ctx->clean_regions.is_clean_region(p.first, p.second.length)) {
ctx->new_obs.oi.manifest.chunk_map.erase(p.first);
if (ctx->new_obs.oi.manifest.chunk_map.empty()) {
ctx->new_obs.oi.manifest.type = object_manifest_t::TYPE_NONE;
ctx->new_obs.oi.clear_flag(object_info_t::FLAG_MANIFEST);
ctx->delta_stats.num_objects_manifest--;
}
}
}
}
void PrimaryLogPG::dec_refcount_by_dirty(OpContext* ctx)
{
object_ref_delta_t refs;
ObjectContextRef cobc = nullptr;
ObjectContextRef obc = ctx->obc;
// Look over previous snapshot, then figure out whether updated chunk needs to be deleted
cobc = get_prev_clone_obc(obc);
obc->obs.oi.manifest.calc_refs_to_drop_on_modify(
cobc ? &cobc->obs.oi.manifest : nullptr,
ctx->clean_regions,
refs);
if (!refs.is_empty()) {
hobject_t soid = obc->obs.oi.soid;
ctx->register_on_commit(
[soid, this, refs](){
dec_refcount(soid, refs);
});
}
}
void PrimaryLogPG::dec_all_refcount_manifest(const object_info_t& oi, OpContext* ctx)
{
ceph_assert(oi.has_manifest());
ceph_assert(ctx->obc->ssc);
if (oi.manifest.is_chunked()) {
object_ref_delta_t refs;
ObjectContextRef obc_l, obc_g, obc;
/* in trim_object, oi and ctx can have different oid */
obc = get_object_context(oi.soid, false, NULL);
ceph_assert(obc);
get_adjacent_clones(obc, obc_l, obc_g);
oi.manifest.calc_refs_to_drop_on_removal(
obc_l ? &(obc_l->obs.oi.manifest) : nullptr,
obc_g ? &(obc_g->obs.oi.manifest) : nullptr,
refs);
if (!refs.is_empty()) {
/* dec_refcount will use head object anyway */
hobject_t soid = ctx->obc->obs.oi.soid;
ctx->register_on_commit(
[soid, this, refs](){
dec_refcount(soid, refs);
});
}
} else if (oi.manifest.is_redirect() &&
oi.test_flag(object_info_t::FLAG_REDIRECT_HAS_REFERENCE)) {
ctx->register_on_commit(
[oi, this](){
refcount_manifest(oi.soid, oi.manifest.redirect_target,
refcount_t::DECREMENT_REF, NULL, std::nullopt);
});
}
}
ceph_tid_t PrimaryLogPG::refcount_manifest(hobject_t src_soid, hobject_t tgt_soid, refcount_t type,
Context *cb, std::optional<bufferlist> chunk)
{
unsigned flags = CEPH_OSD_FLAG_IGNORE_CACHE | CEPH_OSD_FLAG_IGNORE_OVERLAY |
CEPH_OSD_FLAG_RWORDERED;
dout(10) << __func__ << " Start refcount from " << src_soid
<< " to " << tgt_soid << dendl;
ObjectOperation obj_op;
bufferlist in;
if (type == refcount_t::INCREMENT_REF) {
cls_cas_chunk_get_ref_op call;
call.source = src_soid.get_head();
::encode(call, in);
obj_op.call("cas", "chunk_get_ref", in);
} else if (type == refcount_t::DECREMENT_REF) {
cls_cas_chunk_put_ref_op call;
call.source = src_soid.get_head();
::encode(call, in);
obj_op.call("cas", "chunk_put_ref", in);
} else if (type == refcount_t::CREATE_OR_GET_REF) {
cls_cas_chunk_create_or_get_ref_op get_call;
get_call.source = src_soid.get_head();
ceph_assert(chunk);
get_call.data = std::move(*chunk);
::encode(get_call, in);
obj_op.call("cas", "chunk_create_or_get_ref", in);
} else {
ceph_assert(0 == "unrecognized type");
}
Context *c = nullptr;
if (cb) {
c = new C_OnFinisher(cb, osd->get_objecter_finisher(get_pg_shard()));
}
object_locator_t oloc(tgt_soid);
ObjectContextRef src_obc = get_object_context(src_soid, false, NULL);
ceph_assert(src_obc);
auto tid = osd->objecter->mutate(
tgt_soid.oid, oloc, obj_op, SnapContext(),
ceph::real_clock::from_ceph_timespec(src_obc->obs.oi.mtime),
flags, c);
return tid;
}
void PrimaryLogPG::do_proxy_chunked_read(OpRequestRef op, ObjectContextRef obc, int op_index,
uint64_t chunk_index, uint64_t req_offset, uint64_t req_length,
uint64_t req_total_len, bool write_ordered)
{
MOSDOp *m = static_cast<MOSDOp*>(op->get_nonconst_req());
object_manifest_t *manifest = &obc->obs.oi.manifest;
if (!manifest->chunk_map.count(chunk_index)) {
return;
}
uint64_t chunk_length = manifest->chunk_map[chunk_index].length;
hobject_t soid = manifest->chunk_map[chunk_index].oid;
hobject_t ori_soid = m->get_hobj();
object_locator_t oloc(soid);
unsigned flags = CEPH_OSD_FLAG_IGNORE_CACHE | CEPH_OSD_FLAG_IGNORE_OVERLAY;
if (write_ordered) {
flags |= CEPH_OSD_FLAG_RWORDERED;
}
if (!chunk_length || soid == hobject_t()) {
return;
}
/* same as do_proxy_read() */
flags |= m->get_flags() & (CEPH_OSD_FLAG_RWORDERED |
CEPH_OSD_FLAG_ORDERSNAP |
CEPH_OSD_FLAG_ENFORCE_SNAPC |
CEPH_OSD_FLAG_MAP_SNAP_CLONE);
dout(10) << __func__ << " Start do chunk proxy read for " << *m
<< " index: " << op_index << " oid: " << soid.oid.name << " req_offset: " << req_offset
<< " req_length: " << req_length << dendl;
ProxyReadOpRef prdop(std::make_shared<ProxyReadOp>(op, ori_soid, m->ops));
ObjectOperation *pobj_op = new ObjectOperation;
OSDOp &osd_op = pobj_op->add_op(m->ops[op_index].op.op);
if (chunk_index <= req_offset) {
osd_op.op.extent.offset = manifest->chunk_map[chunk_index].offset + req_offset - chunk_index;
} else {
ceph_abort_msg("chunk_index > req_offset");
}
osd_op.op.extent.length = req_length;
ObjectOperation obj_op;
obj_op.dup(pobj_op->ops);
C_ProxyChunkRead *fin = new C_ProxyChunkRead(this, ori_soid, get_last_peering_reset(),
prdop);
fin->obj_op = pobj_op;
fin->op_index = op_index;
fin->req_offset = req_offset;
fin->obc = obc;
fin->req_total_len = req_total_len;
ceph_tid_t tid = osd->objecter->read(
soid.oid, oloc, obj_op,
m->get_snapid(), NULL,
flags, new C_OnFinisher(fin, osd->get_objecter_finisher(get_pg_shard())),
&prdop->user_version,
&prdop->data_offset,
m->get_features());
fin->tid = tid;
prdop->objecter_tid = tid;
proxyread_ops[tid] = prdop;
in_progress_proxy_ops[ori_soid].push_back(op);
}
bool PrimaryLogPG::can_proxy_chunked_read(OpRequestRef op, ObjectContextRef obc)
{
MOSDOp *m = static_cast<MOSDOp*>(op->get_nonconst_req());
OSDOp *osd_op = NULL;
bool ret = true;
for (unsigned int i = 0; i < m->ops.size(); i++) {
osd_op = &m->ops[i];
ceph_osd_op op = osd_op->op;
switch (op.op) {
case CEPH_OSD_OP_READ:
case CEPH_OSD_OP_SYNC_READ: {
uint64_t cursor = osd_op->op.extent.offset;
uint64_t remain = osd_op->op.extent.length;
/* requested chunks exist in chunk_map ? */
for (auto &p : obc->obs.oi.manifest.chunk_map) {
if (p.first <= cursor && p.first + p.second.length > cursor) {
if (!p.second.is_missing()) {
return false;
}
if (p.second.length >= remain) {
remain = 0;
break;
} else {
remain = remain - p.second.length;
}
cursor += p.second.length;
}
}
if (remain) {
dout(20) << __func__ << " requested chunks don't exist in chunk_map " << dendl;
return false;
}
continue;
}
default:
return false;
}
}
return ret;
}
void PrimaryLogPG::finish_proxy_write(hobject_t oid, ceph_tid_t tid, int r)
{
dout(10) << __func__ << " " << oid << " tid " << tid
<< " " << cpp_strerror(r) << dendl;
map<ceph_tid_t, ProxyWriteOpRef>::iterator p = proxywrite_ops.find(tid);
if (p == proxywrite_ops.end()) {
dout(10) << __func__ << " no proxywrite_op found" << dendl;
return;
}
ProxyWriteOpRef pwop = p->second;
ceph_assert(tid == pwop->objecter_tid);
ceph_assert(oid == pwop->soid);
proxywrite_ops.erase(tid);
map<hobject_t, list<OpRequestRef> >::iterator q = in_progress_proxy_ops.find(oid);
if (q == in_progress_proxy_ops.end()) {
dout(10) << __func__ << " no in_progress_proxy_ops found" << dendl;
delete pwop->ctx;
pwop->ctx = NULL;
return;
}
list<OpRequestRef>& in_progress_op = q->second;
ceph_assert(in_progress_op.size());
list<OpRequestRef>::iterator it = std::find(in_progress_op.begin(),
in_progress_op.end(),
pwop->op);
ceph_assert(it != in_progress_op.end());
in_progress_op.erase(it);
if (in_progress_op.size() == 0) {
in_progress_proxy_ops.erase(oid);
} else if (std::find(in_progress_op.begin(),
in_progress_op.end(),
pwop->op) != in_progress_op.end()) {
if (pwop->ctx)
delete pwop->ctx;
pwop->ctx = NULL;
dout(20) << __func__ << " " << oid << " tid " << tid
<< " in_progress_op size: "
<< in_progress_op.size() << dendl;
return;
}
osd->logger->inc(l_osd_tier_proxy_write);
auto m = pwop->op->get_req<MOSDOp>();
ceph_assert(m != NULL);
if (!pwop->sent_reply) {
// send commit.
assert(pwop->ctx->reply == nullptr);
MOSDOpReply *reply = new MOSDOpReply(m, r, get_osdmap_epoch(), 0,
true /* we claim it below */);
reply->set_reply_versions(eversion_t(), pwop->user_version);
reply->add_flags(CEPH_OSD_FLAG_ACK | CEPH_OSD_FLAG_ONDISK);
reply->claim_op_out_data(pwop->ops);
dout(10) << " sending commit on " << pwop << " " << reply << dendl;
osd->send_message_osd_client(reply, m->get_connection());
pwop->sent_reply = true;
pwop->ctx->op->mark_commit_sent();
}
delete pwop->ctx;
pwop->ctx = NULL;
}
void PrimaryLogPG::cancel_proxy_write(ProxyWriteOpRef pwop,
vector<ceph_tid_t> *tids)
{
dout(10) << __func__ << " " << pwop->soid << dendl;
pwop->canceled = true;
// cancel objecter op, if we can
if (pwop->objecter_tid) {
tids->push_back(pwop->objecter_tid);
delete pwop->ctx;
pwop->ctx = NULL;
proxywrite_ops.erase(pwop->objecter_tid);
pwop->objecter_tid = 0;
}
}
class PromoteCallback: public PrimaryLogPG::CopyCallback {
ObjectContextRef obc;
PrimaryLogPG *pg;
utime_t start;
public:
PromoteCallback(ObjectContextRef obc_, PrimaryLogPG *pg_)
: obc(obc_),
pg(pg_),
start(ceph_clock_now()) {}
void finish(PrimaryLogPG::CopyCallbackResults results) override {
PrimaryLogPG::CopyResults *results_data = results.get<1>();
int r = results.get<0>();
if (obc->obs.oi.has_manifest() && obc->obs.oi.manifest.is_chunked()) {
pg->finish_promote_manifest(r, results_data, obc);
} else {
pg->finish_promote(r, results_data, obc);
}
pg->osd->logger->tinc(l_osd_tier_promote_lat, ceph_clock_now() - start);
}
};
class PromoteManifestCallback: public PrimaryLogPG::CopyCallback {
ObjectContextRef obc;
PrimaryLogPG *pg;
utime_t start;
PrimaryLogPG::OpContext *ctx;
PrimaryLogPG::CopyCallbackResults promote_results;
public:
PromoteManifestCallback(ObjectContextRef obc_, PrimaryLogPG *pg_, PrimaryLogPG::OpContext *ctx)
: obc(obc_),
pg(pg_),
start(ceph_clock_now()), ctx(ctx) {}
void finish(PrimaryLogPG::CopyCallbackResults results) override {
PrimaryLogPG::CopyResults *results_data = results.get<1>();
int r = results.get<0>();
promote_results = results;
if (obc->obs.oi.has_manifest() && obc->obs.oi.manifest.is_redirect()) {
ctx->user_at_version = results_data->user_version;
}
if (r >= 0) {
ctx->pg->execute_ctx(ctx);
} else {
if (r != -ECANCELED) {
if (ctx->op)
ctx->pg->osd->reply_op_error(ctx->op, r);
} else if (results_data->should_requeue) {
if (ctx->op)
ctx->pg->requeue_op(ctx->op);
}
ctx->pg->close_op_ctx(ctx);
}
pg->osd->logger->tinc(l_osd_tier_promote_lat, ceph_clock_now() - start);
}
friend struct PromoteFinisher;
};
struct PromoteFinisher : public PrimaryLogPG::OpFinisher {
PromoteManifestCallback *promote_callback;
explicit PromoteFinisher(PromoteManifestCallback *promote_callback)
: promote_callback(promote_callback) {
}
int execute() override {
if (promote_callback->ctx->obc->obs.oi.manifest.is_redirect()) {
promote_callback->ctx->pg->finish_promote(promote_callback->promote_results.get<0>(),
promote_callback->promote_results.get<1>(),
promote_callback->obc);
} else if (promote_callback->ctx->obc->obs.oi.manifest.is_chunked()) {
promote_callback->ctx->pg->finish_promote_manifest(promote_callback->promote_results.get<0>(),
promote_callback->promote_results.get<1>(),
promote_callback->obc);
} else {
ceph_abort_msg("unrecognized manifest type");
}
return 0;
}
};
void PrimaryLogPG::promote_object(ObjectContextRef obc,
const hobject_t& missing_oid,
const object_locator_t& oloc,
OpRequestRef op,
ObjectContextRef *promote_obc)
{
hobject_t hoid = obc ? obc->obs.oi.soid : missing_oid;
ceph_assert(hoid != hobject_t());
if (m_scrubber->write_blocked_by_scrub(hoid)) {
dout(10) << __func__ << " " << hoid
<< " blocked by scrub" << dendl;
if (op) {
waiting_for_scrub.push_back(op);
op->mark_delayed("waiting for scrub");
dout(10) << __func__ << " " << hoid
<< " placing op in waiting_for_scrub" << dendl;
} else {
dout(10) << __func__ << " " << hoid
<< " no op, dropping on the floor" << dendl;
}
return;
}
if (op && !check_laggy_requeue(op)) {
return;
}
if (!obc) { // we need to create an ObjectContext
ceph_assert(missing_oid != hobject_t());
obc = get_object_context(missing_oid, true);
}
if (promote_obc)
*promote_obc = obc;
/*
* Before promote complete, if there are proxy-reads for the object,
* for this case we don't use DONTNEED.
*/
unsigned src_fadvise_flags = LIBRADOS_OP_FLAG_FADVISE_SEQUENTIAL;
map<hobject_t, list<OpRequestRef>>::iterator q = in_progress_proxy_ops.find(obc->obs.oi.soid);
if (q == in_progress_proxy_ops.end()) {
src_fadvise_flags |= LIBRADOS_OP_FLAG_FADVISE_DONTNEED;
}
CopyCallback *cb;
object_locator_t my_oloc;
hobject_t src_hoid;
if (!obc->obs.oi.has_manifest()) {
my_oloc = oloc;
my_oloc.pool = pool.info.tier_of;
src_hoid = obc->obs.oi.soid;
cb = new PromoteCallback(obc, this);
} else {
if (obc->obs.oi.manifest.is_chunked()) {
src_hoid = obc->obs.oi.soid;
cb = new PromoteCallback(obc, this);
} else if (obc->obs.oi.manifest.is_redirect()) {
object_locator_t src_oloc(obc->obs.oi.manifest.redirect_target);
my_oloc = src_oloc;
src_hoid = obc->obs.oi.manifest.redirect_target;
cb = new PromoteCallback(obc, this);
} else {
ceph_abort_msg("unrecognized manifest type");
}
}
unsigned flags = CEPH_OSD_COPY_FROM_FLAG_IGNORE_OVERLAY |
CEPH_OSD_COPY_FROM_FLAG_IGNORE_CACHE |
CEPH_OSD_COPY_FROM_FLAG_MAP_SNAP_CLONE |
CEPH_OSD_COPY_FROM_FLAG_RWORDERED;
start_copy(cb, obc, src_hoid, my_oloc, 0, flags,
obc->obs.oi.soid.snap == CEPH_NOSNAP,
src_fadvise_flags, 0);
ceph_assert(obc->is_blocked());
if (op)
wait_for_blocked_object(obc->obs.oi.soid, op);
recovery_state.update_stats(
[](auto &history, auto &stats) {
stats.stats.sum.num_promote++;
return false;
});
}
void PrimaryLogPG::execute_ctx(OpContext *ctx)
{
FUNCTRACE(cct);
dout(10) << __func__ << " " << ctx << dendl;
ctx->reset_obs(ctx->obc);
ctx->update_log_only = false; // reset in case finish_copyfrom() is re-running execute_ctx
OpRequestRef op = ctx->op;
auto m = op->get_req<MOSDOp>();
ObjectContextRef obc = ctx->obc;
const hobject_t& soid = obc->obs.oi.soid;
// this method must be idempotent since we may call it several times
// before we finally apply the resulting transaction.
ctx->op_t.reset(new PGTransaction);
if (op->may_write() || op->may_cache()) {
// snap
if (!(m->has_flag(CEPH_OSD_FLAG_ENFORCE_SNAPC)) &&
pool.info.is_pool_snaps_mode()) {
// use pool's snapc
ctx->snapc = pool.snapc;
} else {
// client specified snapc
ctx->snapc.seq = m->get_snap_seq();
ctx->snapc.snaps = m->get_snaps();
filter_snapc(ctx->snapc.snaps);
}
if ((m->has_flag(CEPH_OSD_FLAG_ORDERSNAP)) &&
ctx->snapc.seq < obc->ssc->snapset.seq) {
dout(10) << " ORDERSNAP flag set and snapc seq " << ctx->snapc.seq
<< " < snapset seq " << obc->ssc->snapset.seq
<< " on " << obc->obs.oi.soid << dendl;
reply_ctx(ctx, -EOLDSNAPC);
return;
}
// version
ctx->at_version = get_next_version();
ctx->mtime = m->get_mtime();
dout(10) << __func__ << " " << soid << " " << *ctx->ops
<< " ov " << obc->obs.oi.version << " av " << ctx->at_version
<< " snapc " << ctx->snapc
<< " snapset " << obc->ssc->snapset
<< dendl;
} else {
dout(10) << __func__ << " " << soid << " " << *ctx->ops
<< " ov " << obc->obs.oi.version
<< dendl;
}
if (!ctx->user_at_version)
ctx->user_at_version = obc->obs.oi.user_version;
dout(30) << __func__ << " user_at_version " << ctx->user_at_version << dendl;
{
#ifdef WITH_LTTNG
osd_reqid_t reqid = ctx->op->get_reqid();
#endif
tracepoint(osd, prepare_tx_enter, reqid.name._type,
reqid.name._num, reqid.tid, reqid.inc);
}
int result = prepare_transaction(ctx);
{
#ifdef WITH_LTTNG
osd_reqid_t reqid = ctx->op->get_reqid();
#endif
tracepoint(osd, prepare_tx_exit, reqid.name._type,
reqid.name._num, reqid.tid, reqid.inc);
}
bool pending_async_reads = !ctx->pending_async_reads.empty();
if (result == -EINPROGRESS || pending_async_reads) {
// come back later.
if (pending_async_reads) {
ceph_assert(pool.info.is_erasure());
in_progress_async_reads.push_back(make_pair(op, ctx));
ctx->start_async_reads(this);
}
return;
}
if (result == -EAGAIN) {
// clean up after the ctx
close_op_ctx(ctx);
return;
}
bool ignore_out_data = false;
if (!ctx->op_t->empty() &&
op->may_write() &&
result >= 0) {
// successful update
if (ctx->op->allows_returnvec()) {
// enforce reasonable bound on the return buffer sizes
for (auto& i : *ctx->ops) {
if (i.outdata.length() > cct->_conf->osd_max_write_op_reply_len) {
dout(10) << __func__ << " op " << i << " outdata overflow" << dendl;
result = -EOVERFLOW; // overall result is overflow
i.rval = -EOVERFLOW;
i.outdata.clear();
}
}
} else {
// legacy behavior -- zero result and return data etc.
ignore_out_data = true;
result = 0;
}
}
// prepare the reply
ctx->reply = new MOSDOpReply(m, result, get_osdmap_epoch(), 0,
ignore_out_data);
dout(20) << __func__ << " alloc reply " << ctx->reply
<< " result " << result << dendl;
// read or error?
if ((ctx->op_t->empty() || result < 0) && !ctx->update_log_only) {
// finish side-effects
if (result >= 0)
do_osd_op_effects(ctx, m->get_connection());
complete_read_ctx(result, ctx);
return;
}
ctx->reply->set_reply_versions(ctx->at_version, ctx->user_at_version);
ceph_assert(op->may_write() || op->may_cache());
// trim log?
recovery_state.update_trim_to();
// verify that we are doing this in order?
if (cct->_conf->osd_debug_op_order && m->get_source().is_client() &&
!pool.info.is_tier() && !pool.info.has_tiers()) {
map<client_t,ceph_tid_t>& cm = debug_op_order[obc->obs.oi.soid];
ceph_tid_t t = m->get_tid();
client_t n = m->get_source().num();
map<client_t,ceph_tid_t>::iterator p = cm.find(n);
if (p == cm.end()) {
dout(20) << " op order client." << n << " tid " << t << " (first)" << dendl;
cm[n] = t;
} else {
dout(20) << " op order client." << n << " tid " << t << " last was " << p->second << dendl;
if (p->second > t) {
derr << "bad op order, already applied " << p->second << " > this " << t << dendl;
ceph_abort_msg("out of order op");
}
p->second = t;
}
}
if (ctx->update_log_only) {
if (result >= 0)
do_osd_op_effects(ctx, m->get_connection());
dout(20) << __func__ << " update_log_only -- result=" << result << dendl;
// save just what we need from ctx
MOSDOpReply *reply = ctx->reply;
ctx->reply = nullptr;
reply->get_header().data_off = (ctx->data_off ? *ctx->data_off : 0);
if (result == -ENOENT) {
reply->set_enoent_reply_versions(info.last_update,
info.last_user_version);
}
reply->add_flags(CEPH_OSD_FLAG_ACK | CEPH_OSD_FLAG_ONDISK);
// append to pg log for dup detection - don't save buffers for now
record_write_error(op, soid, reply, result,
ctx->op->allows_returnvec() ? ctx : nullptr);
close_op_ctx(ctx);
return;
}
// no need to capture PG ref, repop cancel will handle that
// Can capture the ctx by pointer, it's owned by the repop
ctx->register_on_commit(
[m, ctx, this](){
if (ctx->op)
log_op_stats(*ctx->op, ctx->bytes_written, ctx->bytes_read);
if (m && !ctx->sent_reply) {
MOSDOpReply *reply = ctx->reply;
ctx->reply = nullptr;
reply->add_flags(CEPH_OSD_FLAG_ACK | CEPH_OSD_FLAG_ONDISK);
dout(10) << " sending reply on " << *m << " " << reply << dendl;
osd->send_message_osd_client(reply, m->get_connection());
ctx->sent_reply = true;
ctx->op->mark_commit_sent();
}
});
ctx->register_on_success(
[ctx, this]() {
do_osd_op_effects(
ctx,
ctx->op ? ctx->op->get_req()->get_connection() :
ConnectionRef());
});
ctx->register_on_finish(
[ctx]() {
delete ctx;
});
// issue replica writes
ceph_tid_t rep_tid = osd->get_tid();
RepGather *repop = new_repop(ctx, rep_tid);
issue_repop(repop, ctx);
eval_repop(repop);
repop->put();
}
void PrimaryLogPG::close_op_ctx(OpContext *ctx) {
release_object_locks(ctx->lock_manager);
ctx->op_t.reset();
for (auto p = ctx->on_finish.begin(); p != ctx->on_finish.end();
ctx->on_finish.erase(p++)) {
(*p)();
}
delete ctx;
}
void PrimaryLogPG::reply_ctx(OpContext *ctx, int r)
{
if (ctx->op)
osd->reply_op_error(ctx->op, r);
close_op_ctx(ctx);
}
void PrimaryLogPG::log_op_stats(const OpRequest& op,
const uint64_t inb,
const uint64_t outb)
{
auto m = op.get_req<MOSDOp>();
const utime_t now = ceph_clock_now();
const utime_t latency = now - m->get_recv_stamp();
const utime_t process_latency = now - op.get_dequeued_time();
osd->logger->inc(l_osd_op);
osd->logger->inc(l_osd_op_outb, outb);
osd->logger->inc(l_osd_op_inb, inb);
osd->logger->tinc(l_osd_op_lat, latency);
osd->logger->tinc(l_osd_op_process_lat, process_latency);
if (op.may_read() && op.may_write()) {
osd->logger->inc(l_osd_op_rw);
osd->logger->inc(l_osd_op_rw_inb, inb);
osd->logger->inc(l_osd_op_rw_outb, outb);
osd->logger->tinc(l_osd_op_rw_lat, latency);
osd->logger->hinc(l_osd_op_rw_lat_inb_hist, latency.to_nsec(), inb);
osd->logger->hinc(l_osd_op_rw_lat_outb_hist, latency.to_nsec(), outb);
osd->logger->tinc(l_osd_op_rw_process_lat, process_latency);
} else if (op.may_read()) {
osd->logger->inc(l_osd_op_r);
osd->logger->inc(l_osd_op_r_outb, outb);
osd->logger->tinc(l_osd_op_r_lat, latency);
osd->logger->hinc(l_osd_op_r_lat_outb_hist, latency.to_nsec(), outb);
osd->logger->tinc(l_osd_op_r_process_lat, process_latency);
} else if (op.may_write() || op.may_cache()) {
osd->logger->inc(l_osd_op_w);
osd->logger->inc(l_osd_op_w_inb, inb);
osd->logger->tinc(l_osd_op_w_lat, latency);
osd->logger->hinc(l_osd_op_w_lat_inb_hist, latency.to_nsec(), inb);
osd->logger->tinc(l_osd_op_w_process_lat, process_latency);
} else {
ceph_abort();
}
dout(15) << "log_op_stats " << *m
<< " inb " << inb
<< " outb " << outb
<< " lat " << latency << dendl;
if (m_dynamic_perf_stats.is_enabled()) {
m_dynamic_perf_stats.add(osd, info, op, inb, outb, latency);
}
}
void PrimaryLogPG::set_dynamic_perf_stats_queries(
const std::list<OSDPerfMetricQuery> &queries)
{
m_dynamic_perf_stats.set_queries(queries);
}
void PrimaryLogPG::get_dynamic_perf_stats(DynamicPerfStats *stats)
{
std::swap(m_dynamic_perf_stats, *stats);
}
void PrimaryLogPG::do_scan(
OpRequestRef op,
ThreadPool::TPHandle &handle)
{
auto m = op->get_req<MOSDPGScan>();
ceph_assert(m->get_type() == MSG_OSD_PG_SCAN);
dout(10) << "do_scan " << *m << dendl;
op->mark_started();
switch (m->op) {
case MOSDPGScan::OP_SCAN_GET_DIGEST:
{
auto dpp = get_dpp();
if (osd->check_backfill_full(dpp)) {
dout(1) << __func__ << ": Canceling backfill: Full." << dendl;
queue_peering_event(
PGPeeringEventRef(
std::make_shared<PGPeeringEvent>(
get_osdmap_epoch(),
get_osdmap_epoch(),
PeeringState::BackfillTooFull())));
return;
}
BackfillInterval bi;
bi.begin = m->begin;
// No need to flush, there won't be any in progress writes occuring
// past m->begin
scan_range(
cct->_conf->osd_backfill_scan_min,
cct->_conf->osd_backfill_scan_max,
&bi,
handle);
MOSDPGScan *reply = new MOSDPGScan(
MOSDPGScan::OP_SCAN_DIGEST,
pg_whoami,
get_osdmap_epoch(), m->query_epoch,
spg_t(info.pgid.pgid, get_primary().shard), bi.begin, bi.end);
encode(bi.objects, reply->get_data());
osd->send_message_osd_cluster(reply, m->get_connection());
}
break;
case MOSDPGScan::OP_SCAN_DIGEST:
{
pg_shard_t from = m->from;
// Check that from is in backfill_targets vector
ceph_assert(is_backfill_target(from));
BackfillInterval& bi = peer_backfill_info[from];
bi.begin = m->begin;
bi.end = m->end;
auto p = m->get_data().cbegin();
// take care to preserve ordering!
bi.clear_objects();
decode_noclear(bi.objects, p);
dout(10) << __func__ << " bi.begin=" << bi.begin << " bi.end=" << bi.end
<< " bi.objects.size()=" << bi.objects.size() << dendl;
if (waiting_on_backfill.erase(from)) {
if (waiting_on_backfill.empty()) {
ceph_assert(
peer_backfill_info.size() ==
get_backfill_targets().size());
finish_recovery_op(hobject_t::get_max());
}
} else {
// we canceled backfill for a while due to a too full, and this
// is an extra response from a non-too-full peer
dout(20) << __func__ << " canceled backfill (too full?)" << dendl;
}
}
break;
}
}
void PrimaryLogPG::do_backfill(OpRequestRef op)
{
auto m = op->get_req<MOSDPGBackfill>();
ceph_assert(m->get_type() == MSG_OSD_PG_BACKFILL);
dout(10) << "do_backfill " << *m << dendl;
op->mark_started();
switch (m->op) {
case MOSDPGBackfill::OP_BACKFILL_FINISH:
{
ceph_assert(cct->_conf->osd_kill_backfill_at != 1);
MOSDPGBackfill *reply = new MOSDPGBackfill(
MOSDPGBackfill::OP_BACKFILL_FINISH_ACK,
get_osdmap_epoch(),
m->query_epoch,
spg_t(info.pgid.pgid, get_primary().shard));
reply->set_priority(recovery_state.get_recovery_op_priority());
osd->send_message_osd_cluster(reply, m->get_connection());
queue_peering_event(
PGPeeringEventRef(
std::make_shared<PGPeeringEvent>(
get_osdmap_epoch(),
get_osdmap_epoch(),
RecoveryDone())));
}
// fall-thru
case MOSDPGBackfill::OP_BACKFILL_PROGRESS:
{
ceph_assert(cct->_conf->osd_kill_backfill_at != 2);
ObjectStore::Transaction t;
recovery_state.update_backfill_progress(
m->last_backfill,
m->stats,
m->op == MOSDPGBackfill::OP_BACKFILL_PROGRESS,
t);
int tr = osd->store->queue_transaction(ch, std::move(t), NULL);
ceph_assert(tr == 0);
}
break;
case MOSDPGBackfill::OP_BACKFILL_FINISH_ACK:
{
ceph_assert(is_primary());
ceph_assert(cct->_conf->osd_kill_backfill_at != 3);
finish_recovery_op(hobject_t::get_max());
}
break;
}
}
void PrimaryLogPG::do_backfill_remove(OpRequestRef op)
{
const MOSDPGBackfillRemove *m = static_cast<const MOSDPGBackfillRemove*>(
op->get_req());
ceph_assert(m->get_type() == MSG_OSD_PG_BACKFILL_REMOVE);
dout(7) << __func__ << " " << m->ls << dendl;
op->mark_started();
ObjectStore::Transaction t;
for (auto& p : m->ls) {
if (is_remote_backfilling()) {
struct stat st;
int r = osd->store->stat(ch, ghobject_t(p.first, ghobject_t::NO_GEN,
pg_whoami.shard) , &st);
if (r == 0) {
sub_local_num_bytes(st.st_size);
int64_t usersize;
if (pool.info.is_erasure()) {
bufferlist bv;
int r = osd->store->getattr(
ch,
ghobject_t(p.first, ghobject_t::NO_GEN, pg_whoami.shard),
OI_ATTR,
bv);
if (r >= 0) {
object_info_t oi(bv);
usersize = oi.size * pgbackend->get_ec_data_chunk_count();
} else {
dout(0) << __func__ << " " << ghobject_t(p.first, ghobject_t::NO_GEN, pg_whoami.shard)
<< " can't get object info" << dendl;
usersize = 0;
}
} else {
usersize = st.st_size;
}
sub_num_bytes(usersize);
dout(10) << __func__ << " " << ghobject_t(p.first, ghobject_t::NO_GEN, pg_whoami.shard)
<< " sub actual data by " << st.st_size
<< " sub num_bytes by " << usersize
<< dendl;
}
}
remove_snap_mapped_object(t, p.first);
}
int r = osd->store->queue_transaction(ch, std::move(t), NULL);
ceph_assert(r == 0);
}
int PrimaryLogPG::trim_object(
bool first, const hobject_t &coid, snapid_t snap_to_trim,
PrimaryLogPG::OpContextUPtr *ctxp)
{
*ctxp = NULL;
// load clone info
bufferlist bl;
ObjectContextRef obc = get_object_context(coid, false, NULL);
if (!obc || !obc->ssc || !obc->ssc->exists) {
osd->clog->error() << __func__ << ": Can not trim " << coid
<< " repair needed " << (obc ? "(no obc->ssc or !exists)" : "(no obc)");
return -ENOENT;
}
hobject_t head_oid = coid.get_head();
ObjectContextRef head_obc = get_object_context(head_oid, false);
if (!head_obc) {
osd->clog->error() << __func__ << ": Can not trim " << coid
<< " repair needed, no snapset obc for " << head_oid;
return -ENOENT;
}
SnapSet& snapset = obc->ssc->snapset;
object_info_t &coi = obc->obs.oi;
auto citer = snapset.clone_snaps.find(coid.snap);
if (citer == snapset.clone_snaps.end()) {
osd->clog->error() << "No clone_snaps in snapset " << snapset
<< " for object " << coid << "\n";
return -ENOENT;
}
set<snapid_t> old_snaps(citer->second.begin(), citer->second.end());
if (old_snaps.empty()) {
osd->clog->error() << "No object info snaps for object " << coid;
return -ENOENT;
}
dout(10) << coid << " old_snaps " << old_snaps
<< " old snapset " << snapset << dendl;
if (snapset.seq == 0) {
osd->clog->error() << "No snapset.seq for object " << coid;
return -ENOENT;
}
set<snapid_t> new_snaps;
const OSDMapRef& osdmap = get_osdmap();
for (set<snapid_t>::iterator i = old_snaps.begin();
i != old_snaps.end();
++i) {
if (!osdmap->in_removed_snaps_queue(info.pgid.pgid.pool(), *i) &&
*i != snap_to_trim) {
new_snaps.insert(*i);
}
}
vector<snapid_t>::iterator p = snapset.clones.end();
if (new_snaps.empty()) {
p = std::find(snapset.clones.begin(), snapset.clones.end(), coid.snap);
if (p == snapset.clones.end()) {
osd->clog->error() << "Snap " << coid.snap << " not in clones";
return -ENOENT;
}
}
OpContextUPtr ctx = simple_opc_create(obc);
ctx->head_obc = head_obc;
if (!ctx->lock_manager.get_snaptrimmer_write(
coid,
obc,
first)) {
close_op_ctx(ctx.release());
dout(10) << __func__ << ": Unable to get a wlock on " << coid << dendl;
return -ENOLCK;
}
if (!ctx->lock_manager.get_snaptrimmer_write(
head_oid,
head_obc,
first)) {
close_op_ctx(ctx.release());
dout(10) << __func__ << ": Unable to get a wlock on " << head_oid << dendl;
return -ENOLCK;
}
ctx->at_version = get_next_version();
PGTransaction *t = ctx->op_t.get();
int64_t num_objects_before_trim = ctx->delta_stats.num_objects;
if (new_snaps.empty()) {
// remove clone
dout(10) << coid << " snaps " << old_snaps << " -> "
<< new_snaps << " ... deleting" << dendl;
// ...from snapset
ceph_assert(p != snapset.clones.end());
snapid_t last = coid.snap;
ctx->delta_stats.num_bytes -= snapset.get_clone_bytes(last);
if (p != snapset.clones.begin()) {
// not the oldest... merge overlap into next older clone
vector<snapid_t>::iterator n = p - 1;
hobject_t prev_coid = coid;
prev_coid.snap = *n;
bool adjust_prev_bytes = is_present_clone(prev_coid);
if (adjust_prev_bytes)
ctx->delta_stats.num_bytes -= snapset.get_clone_bytes(*n);
snapset.clone_overlap[*n].intersection_of(
snapset.clone_overlap[*p]);
if (adjust_prev_bytes)
ctx->delta_stats.num_bytes += snapset.get_clone_bytes(*n);
}
ctx->delta_stats.num_objects--;
if (coi.is_dirty())
ctx->delta_stats.num_objects_dirty--;
if (coi.is_omap())
ctx->delta_stats.num_objects_omap--;
if (coi.is_whiteout()) {
dout(20) << __func__ << " trimming whiteout on " << coid << dendl;
ctx->delta_stats.num_whiteouts--;
}
ctx->delta_stats.num_object_clones--;
if (coi.is_cache_pinned())
ctx->delta_stats.num_objects_pinned--;
if (coi.has_manifest()) {
dec_all_refcount_manifest(coi, ctx.get());
ctx->delta_stats.num_objects_manifest--;
}
obc->obs.exists = false;
snapset.clones.erase(p);
snapset.clone_overlap.erase(last);
snapset.clone_size.erase(last);
snapset.clone_snaps.erase(last);
ctx->log.push_back(
pg_log_entry_t(
pg_log_entry_t::DELETE,
coid,
ctx->at_version,
ctx->obs->oi.version,
0,
osd_reqid_t(),
ctx->mtime,
0)
);
t->remove(coid);
t->update_snaps(
coid,
old_snaps,
new_snaps);
coi = object_info_t(coid);
ctx->at_version.version++;
} else {
// save adjusted snaps for this object
dout(10) << coid << " snaps " << old_snaps << " -> " << new_snaps << dendl;
snapset.clone_snaps[coid.snap] =
vector<snapid_t>(new_snaps.rbegin(), new_snaps.rend());
// we still do a 'modify' event on this object just to trigger a
// snapmapper.update ... :(
coi.prior_version = coi.version;
coi.version = ctx->at_version;
bl.clear();
encode(coi, bl, get_osdmap()->get_features(CEPH_ENTITY_TYPE_OSD, nullptr));
t->setattr(coid, OI_ATTR, bl);
ctx->log.push_back(
pg_log_entry_t(
pg_log_entry_t::MODIFY,
coid,
coi.version,
coi.prior_version,
0,
osd_reqid_t(),
ctx->mtime,
0)
);
ctx->at_version.version++;
t->update_snaps(
coid,
old_snaps,
new_snaps);
}
// save head snapset
dout(10) << coid << " new snapset " << snapset << " on "
<< head_obc->obs.oi << dendl;
if (snapset.clones.empty() &&
(head_obc->obs.oi.is_whiteout() &&
!(head_obc->obs.oi.is_dirty() && pool.info.is_tier()) &&
!head_obc->obs.oi.is_cache_pinned())) {
// NOTE: this arguably constitutes minor interference with the
// tiering agent if this is a cache tier since a snap trim event
// is effectively evicting a whiteout we might otherwise want to
// keep around.
dout(10) << coid << " removing " << head_oid << dendl;
ctx->log.push_back(
pg_log_entry_t(
pg_log_entry_t::DELETE,
head_oid,
ctx->at_version,
head_obc->obs.oi.version,
0,
osd_reqid_t(),
ctx->mtime,
0)
);
dout(10) << "removing snap head" << dendl;
object_info_t& oi = head_obc->obs.oi;
ctx->delta_stats.num_objects--;
if (oi.is_dirty()) {
ctx->delta_stats.num_objects_dirty--;
}
if (oi.is_omap())
ctx->delta_stats.num_objects_omap--;
if (oi.is_whiteout()) {
dout(20) << __func__ << " trimming whiteout on " << oi.soid << dendl;
ctx->delta_stats.num_whiteouts--;
}
if (oi.is_cache_pinned()) {
ctx->delta_stats.num_objects_pinned--;
}
if (oi.has_manifest()) {
ctx->delta_stats.num_objects_manifest--;
dec_all_refcount_manifest(oi, ctx.get());
}
head_obc->obs.exists = false;
head_obc->obs.oi = object_info_t(head_oid);
t->remove(head_oid);
} else {
if (get_osdmap()->require_osd_release < ceph_release_t::octopus) {
// filter SnapSet::snaps for the benefit of pre-octopus
// peers. This is perhaps overly conservative in that I'm not
// certain they need this, but let's be conservative here.
dout(10) << coid << " filtering snapset on " << head_oid << dendl;
snapset.filter(pool.info);
} else {
snapset.snaps.clear();
}
dout(10) << coid << " writing updated snapset on " << head_oid
<< ", snapset is " << snapset << dendl;
ctx->log.push_back(
pg_log_entry_t(
pg_log_entry_t::MODIFY,
head_oid,
ctx->at_version,
head_obc->obs.oi.version,
0,
osd_reqid_t(),
ctx->mtime,
0)
);
head_obc->obs.oi.prior_version = head_obc->obs.oi.version;
head_obc->obs.oi.version = ctx->at_version;
map <string, bufferlist, less<>> attrs;
bl.clear();
encode(snapset, bl);
attrs[SS_ATTR] = std::move(bl);
bl.clear();
encode(head_obc->obs.oi, bl,
get_osdmap()->get_features(CEPH_ENTITY_TYPE_OSD, nullptr));
attrs[OI_ATTR] = std::move(bl);
t->setattrs(head_oid, attrs);
}
// Stats reporting - Set number of objects trimmed
if (num_objects_before_trim > ctx->delta_stats.num_objects) {
int64_t num_objects_trimmed =
num_objects_before_trim - ctx->delta_stats.num_objects;
add_objects_trimmed_count(num_objects_trimmed);
}
*ctxp = std::move(ctx);
return 0;
}
void PrimaryLogPG::kick_snap_trim()
{
ceph_assert(is_active());
ceph_assert(is_primary());
if (is_clean() &&
!state_test(PG_STATE_PREMERGE) &&
!snap_trimq.empty()) {
if (get_osdmap()->test_flag(CEPH_OSDMAP_NOSNAPTRIM)) {
dout(10) << __func__ << ": nosnaptrim set, not kicking" << dendl;
} else {
dout(10) << __func__ << ": clean and snaps to trim, kicking" << dendl;
reset_objects_trimmed();
set_snaptrim_begin_stamp();
snap_trimmer_machine.process_event(KickTrim());
}
}
}
void PrimaryLogPG::snap_trimmer_scrub_complete()
{
if (is_primary() && is_active() && is_clean() && !snap_trimq.empty()) {
dout(10) << "scrub finished - requeuing snap_trimmer" << dendl;
snap_trimmer_machine.process_event(ScrubComplete());
}
}
void PrimaryLogPG::snap_trimmer(epoch_t queued)
{
if (recovery_state.is_deleting() || pg_has_reset_since(queued)) {
return;
}
ceph_assert(is_primary());
dout(10) << "snap_trimmer posting" << dendl;
snap_trimmer_machine.process_event(DoSnapWork());
dout(10) << "snap_trimmer complete" << dendl;
return;
}
namespace {
template<typename U, typename V>
int do_cmp_xattr(int op, const U& lhs, const V& rhs)
{
switch (op) {
case CEPH_OSD_CMPXATTR_OP_EQ:
return lhs == rhs;
case CEPH_OSD_CMPXATTR_OP_NE:
return lhs != rhs;
case CEPH_OSD_CMPXATTR_OP_GT:
return lhs > rhs;
case CEPH_OSD_CMPXATTR_OP_GTE:
return lhs >= rhs;
case CEPH_OSD_CMPXATTR_OP_LT:
return lhs < rhs;
case CEPH_OSD_CMPXATTR_OP_LTE:
return lhs <= rhs;
default:
return -EINVAL;
}
}
} // anonymous namespace
int PrimaryLogPG::do_xattr_cmp_u64(int op, uint64_t v1, bufferlist& xattr)
{
uint64_t v2;
if (xattr.length()) {
const char* first = xattr.c_str();
if (auto [p, ec] = std::from_chars(first, first + xattr.length(), v2);
ec != std::errc()) {
return -EINVAL;
}
} else {
v2 = 0;
}
dout(20) << "do_xattr_cmp_u64 '" << v1 << "' vs '" << v2 << "' op " << op << dendl;
return do_cmp_xattr(op, v1, v2);
}
int PrimaryLogPG::do_xattr_cmp_str(int op, string& v1s, bufferlist& xattr)
{
string_view v2s(xattr.c_str(), xattr.length());
dout(20) << "do_xattr_cmp_str '" << v1s << "' vs '" << v2s << "' op " << op << dendl;
return do_cmp_xattr(op, v1s, v2s);
}
int PrimaryLogPG::do_writesame(OpContext *ctx, OSDOp& osd_op)
{
ceph_osd_op& op = osd_op.op;
vector<OSDOp> write_ops(1);
OSDOp& write_op = write_ops[0];
uint64_t write_length = op.writesame.length;
int result = 0;
if (!write_length)
return 0;
if (!op.writesame.data_length || write_length % op.writesame.data_length)
return -EINVAL;
if (op.writesame.data_length != osd_op.indata.length()) {
derr << "invalid length ws data length " << op.writesame.data_length << " actual len " << osd_op.indata.length() << dendl;
return -EINVAL;
}
while (write_length) {
write_op.indata.append(osd_op.indata);
write_length -= op.writesame.data_length;
}
write_op.op.op = CEPH_OSD_OP_WRITE;
write_op.op.extent.offset = op.writesame.offset;
write_op.op.extent.length = op.writesame.length;
result = do_osd_ops(ctx, write_ops);
if (result < 0)
derr << "do_writesame do_osd_ops failed " << result << dendl;
return result;
}
// ========================================================================
// low level osd ops
int PrimaryLogPG::do_tmap2omap(OpContext *ctx, unsigned flags)
{
dout(20) << " convert tmap to omap for " << ctx->new_obs.oi.soid << dendl;
bufferlist header, vals;
int r = _get_tmap(ctx, &header, &vals);
if (r < 0) {
if (r == -ENODATA && (flags & CEPH_OSD_TMAP2OMAP_NULLOK))
r = 0;
return r;
}
vector<OSDOp> ops(3);
ops[0].op.op = CEPH_OSD_OP_TRUNCATE;
ops[0].op.extent.offset = 0;
ops[0].op.extent.length = 0;
ops[1].op.op = CEPH_OSD_OP_OMAPSETHEADER;
ops[1].indata = std::move(header);
ops[2].op.op = CEPH_OSD_OP_OMAPSETVALS;
ops[2].indata = std::move(vals);
return do_osd_ops(ctx, ops);
}
int PrimaryLogPG::do_tmapup_slow(OpContext *ctx, bufferlist::const_iterator& bp,
OSDOp& osd_op, bufferlist& bl)
{
// decode
bufferlist header;
map<string, bufferlist> m;
if (bl.length()) {
auto p = bl.cbegin();
decode(header, p);
decode(m, p);
ceph_assert(p.end());
}
// do the update(s)
while (!bp.end()) {
__u8 op;
string key;
decode(op, bp);
switch (op) {
case CEPH_OSD_TMAP_SET: // insert key
{
decode(key, bp);
bufferlist data;
decode(data, bp);
m[key] = data;
}
break;
case CEPH_OSD_TMAP_RM: // remove key
decode(key, bp);
if (!m.count(key)) {
return -ENOENT;
}
m.erase(key);
break;
case CEPH_OSD_TMAP_RMSLOPPY: // remove key
decode(key, bp);
m.erase(key);
break;
case CEPH_OSD_TMAP_HDR: // update header
{
decode(header, bp);
}
break;
default:
return -EINVAL;
}
}
// reencode
bufferlist obl;
encode(header, obl);
encode(m, obl);
// write it out
vector<OSDOp> nops(1);
OSDOp& newop = nops[0];
newop.op.op = CEPH_OSD_OP_WRITEFULL;
newop.op.extent.offset = 0;
newop.op.extent.length = obl.length();
newop.indata = obl;
do_osd_ops(ctx, nops);
return 0;
}
int PrimaryLogPG::do_tmapup(OpContext *ctx, bufferlist::const_iterator& bp, OSDOp& osd_op)
{
bufferlist::const_iterator orig_bp = bp;
int result = 0;
if (bp.end()) {
dout(10) << "tmapup is a no-op" << dendl;
} else {
// read the whole object
vector<OSDOp> nops(1);
OSDOp& newop = nops[0];
newop.op.op = CEPH_OSD_OP_READ;
newop.op.extent.offset = 0;
newop.op.extent.length = 0;
result = do_osd_ops(ctx, nops);
dout(10) << "tmapup read " << newop.outdata.length() << dendl;
dout(30) << " starting is \n";
newop.outdata.hexdump(*_dout);
*_dout << dendl;
auto ip = newop.outdata.cbegin();
bufferlist obl;
dout(30) << "the update command is: \n";
osd_op.indata.hexdump(*_dout);
*_dout << dendl;
// header
bufferlist header;
__u32 nkeys = 0;
if (newop.outdata.length()) {
decode(header, ip);
decode(nkeys, ip);
}
dout(10) << "tmapup header " << header.length() << dendl;
if (!bp.end() && *bp == CEPH_OSD_TMAP_HDR) {
++bp;
decode(header, bp);
dout(10) << "tmapup new header " << header.length() << dendl;
}
encode(header, obl);
dout(20) << "tmapup initial nkeys " << nkeys << dendl;
// update keys
bufferlist newkeydata;
string nextkey, last_in_key;
bufferlist nextval;
bool have_next = false;
if (!ip.end()) {
have_next = true;
decode(nextkey, ip);
decode(nextval, ip);
}
while (!bp.end() && !result) {
__u8 op;
string key;
try {
decode(op, bp);
decode(key, bp);
}
catch (ceph::buffer::error& e) {
return -EINVAL;
}
if (key < last_in_key) {
dout(5) << "tmapup warning: key '" << key << "' < previous key '" << last_in_key
<< "', falling back to an inefficient (unsorted) update" << dendl;
bp = orig_bp;
return do_tmapup_slow(ctx, bp, osd_op, newop.outdata);
}
last_in_key = key;
dout(10) << "tmapup op " << (int)op << " key " << key << dendl;
// skip existing intervening keys
bool key_exists = false;
while (have_next && !key_exists) {
dout(20) << " (have_next=" << have_next << " nextkey=" << nextkey << ")" << dendl;
if (nextkey > key)
break;
if (nextkey < key) {
// copy untouched.
encode(nextkey, newkeydata);
encode(nextval, newkeydata);
dout(20) << " keep " << nextkey << " " << nextval.length() << dendl;
} else {
// don't copy; discard old value. and stop.
dout(20) << " drop " << nextkey << " " << nextval.length() << dendl;
key_exists = true;
nkeys--;
}
if (!ip.end()) {
decode(nextkey, ip);
decode(nextval, ip);
} else {
have_next = false;
}
}
if (op == CEPH_OSD_TMAP_SET) {
bufferlist val;
try {
decode(val, bp);
}
catch (ceph::buffer::error& e) {
return -EINVAL;
}
encode(key, newkeydata);
encode(val, newkeydata);
dout(20) << " set " << key << " " << val.length() << dendl;
nkeys++;
} else if (op == CEPH_OSD_TMAP_CREATE) {
if (key_exists) {
return -EEXIST;
}
bufferlist val;
try {
decode(val, bp);
}
catch (ceph::buffer::error& e) {
return -EINVAL;
}
encode(key, newkeydata);
encode(val, newkeydata);
dout(20) << " create " << key << " " << val.length() << dendl;
nkeys++;
} else if (op == CEPH_OSD_TMAP_RM) {
// do nothing.
if (!key_exists) {
return -ENOENT;
}
} else if (op == CEPH_OSD_TMAP_RMSLOPPY) {
// do nothing
} else {
dout(10) << " invalid tmap op " << (int)op << dendl;
return -EINVAL;
}
}
// copy remaining
if (have_next) {
encode(nextkey, newkeydata);
encode(nextval, newkeydata);
dout(20) << " keep " << nextkey << " " << nextval.length() << dendl;
}
if (!ip.end()) {
bufferlist rest;
rest.substr_of(newop.outdata, ip.get_off(), newop.outdata.length() - ip.get_off());
dout(20) << " keep trailing " << rest.length()
<< " at " << newkeydata.length() << dendl;
newkeydata.claim_append(rest);
}
// encode final key count + key data
dout(20) << "tmapup final nkeys " << nkeys << dendl;
encode(nkeys, obl);
obl.claim_append(newkeydata);
if (0) {
dout(30) << " final is \n";
obl.hexdump(*_dout);
*_dout << dendl;
// sanity check
auto tp = obl.cbegin();
bufferlist h;
decode(h, tp);
map<string,bufferlist> d;
decode(d, tp);
ceph_assert(tp.end());
dout(0) << " **** debug sanity check, looks ok ****" << dendl;
}
// write it out
if (!result) {
dout(20) << "tmapput write " << obl.length() << dendl;
newop.op.op = CEPH_OSD_OP_WRITEFULL;
newop.op.extent.offset = 0;
newop.op.extent.length = obl.length();
newop.indata = obl;
do_osd_ops(ctx, nops);
}
}
return result;
}
static int check_offset_and_length(uint64_t offset, uint64_t length,
uint64_t max, DoutPrefixProvider *dpp)
{
if (offset >= max ||
length > max ||
offset + length > max) {
ldpp_dout(dpp, 10) << __func__ << " "
<< "osd_max_object_size: " << max
<< "; Hard limit of object size is 4GB." << dendl;
return -EFBIG;
}
return 0;
}
struct FillInVerifyExtent : public Context {
ceph_le64 *r;
int32_t *rval;
bufferlist *outdatap;
std::optional<uint32_t> maybe_crc;
uint64_t size;
OSDService *osd;
hobject_t soid;
uint32_t flags;
FillInVerifyExtent(ceph_le64 *r, int32_t *rv, bufferlist *blp,
std::optional<uint32_t> mc, uint64_t size,
OSDService *osd, hobject_t soid, uint32_t flags) :
r(r), rval(rv), outdatap(blp), maybe_crc(mc),
size(size), osd(osd), soid(soid), flags(flags) {}
void finish(int len) override {
if (len < 0) {
*rval = len;
return;
}
*r = len;
*rval = 0;
// whole object? can we verify the checksum?
if (maybe_crc && *r == size) {
uint32_t crc = outdatap->crc32c(-1);
if (maybe_crc != crc) {
osd->clog->error() << std::hex << " full-object read crc 0x" << crc
<< " != expected 0x" << *maybe_crc
<< std::dec << " on " << soid;
if (!(flags & CEPH_OSD_OP_FLAG_FAILOK)) {
*rval = -EIO;
*r = 0;
}
}
}
}
};
struct ToSparseReadResult : public Context {
int* result;
bufferlist* data_bl;
uint64_t data_offset;
ceph_le64* len;
ToSparseReadResult(int* result, bufferlist* bl, uint64_t offset,
ceph_le64* len)
: result(result), data_bl(bl), data_offset(offset),len(len) {}
void finish(int r) override {
if (r < 0) {
*result = r;
return;
}
*result = 0;
*len = r;
bufferlist outdata;
map<uint64_t, uint64_t> extents = {{data_offset, r}};
encode(extents, outdata);
encode_destructively(*data_bl, outdata);
data_bl->swap(outdata);
}
};
template<typename V>
static string list_keys(const map<string, V>& m) {
string s;
for (typename map<string, V>::const_iterator itr = m.begin(); itr != m.end(); ++itr) {
if (!s.empty()) {
s.push_back(',');
}
s.append(itr->first);
}
return s;
}
template<typename T>
static string list_entries(const T& m) {
string s;
for (typename T::const_iterator itr = m.begin(); itr != m.end(); ++itr) {
if (!s.empty()) {
s.push_back(',');
}
s.append(*itr);
}
return s;
}
void PrimaryLogPG::maybe_create_new_object(
OpContext *ctx,
bool ignore_transaction)
{
ObjectState& obs = ctx->new_obs;
if (!obs.exists) {
ctx->delta_stats.num_objects++;
obs.exists = true;
ceph_assert(!obs.oi.is_whiteout());
obs.oi.new_object();
if (!ignore_transaction)
ctx->op_t->create(obs.oi.soid);
} else if (obs.oi.is_whiteout()) {
dout(10) << __func__ << " clearing whiteout on " << obs.oi.soid << dendl;
ctx->new_obs.oi.clear_flag(object_info_t::FLAG_WHITEOUT);
--ctx->delta_stats.num_whiteouts;
}
}
struct ReadFinisher : public PrimaryLogPG::OpFinisher {
OSDOp& osd_op;
explicit ReadFinisher(OSDOp& osd_op) : osd_op(osd_op) {
}
int execute() override {
return osd_op.rval;
}
};
struct C_ChecksumRead : public Context {
PrimaryLogPG *primary_log_pg;
OSDOp &osd_op;
Checksummer::CSumType csum_type;
bufferlist init_value_bl;
ceph_le64 read_length;
bufferlist read_bl;
Context *fill_extent_ctx;
C_ChecksumRead(PrimaryLogPG *primary_log_pg, OSDOp &osd_op,
Checksummer::CSumType csum_type, bufferlist &&init_value_bl,
std::optional<uint32_t> maybe_crc, uint64_t size,
OSDService *osd, hobject_t soid, uint32_t flags)
: primary_log_pg(primary_log_pg), osd_op(osd_op),
csum_type(csum_type), init_value_bl(std::move(init_value_bl)),
fill_extent_ctx(new FillInVerifyExtent(&read_length, &osd_op.rval,
&read_bl, maybe_crc, size,
osd, soid, flags)) {
}
~C_ChecksumRead() override {
delete fill_extent_ctx;
}
void finish(int r) override {
fill_extent_ctx->complete(r);
fill_extent_ctx = nullptr;
if (osd_op.rval >= 0) {
bufferlist::const_iterator init_value_bl_it = init_value_bl.begin();
osd_op.rval = primary_log_pg->finish_checksum(osd_op, csum_type,
&init_value_bl_it, read_bl);
}
}
};
int PrimaryLogPG::do_checksum(OpContext *ctx, OSDOp& osd_op,
bufferlist::const_iterator *bl_it)
{
dout(20) << __func__ << dendl;
auto& op = osd_op.op;
if (op.checksum.chunk_size > 0) {
if (op.checksum.length == 0) {
dout(10) << __func__ << ": length required when chunk size provided"
<< dendl;
return -EINVAL;
}
if (op.checksum.length % op.checksum.chunk_size != 0) {
dout(10) << __func__ << ": length not aligned to chunk size" << dendl;
return -EINVAL;
}
}
auto& oi = ctx->new_obs.oi;
if (op.checksum.offset == 0 && op.checksum.length == 0) {
// zeroed offset+length implies checksum whole object
op.checksum.length = oi.size;
} else if (op.checksum.offset >= oi.size) {
// read size was trimmed to zero, do nothing
// see PrimaryLogPG::do_read
return 0;
} else if (op.extent.offset + op.extent.length > oi.size) {
op.extent.length = oi.size - op.extent.offset;
if (op.checksum.chunk_size > 0 &&
op.checksum.length % op.checksum.chunk_size != 0) {
dout(10) << __func__ << ": length (trimmed to 0x"
<< std::hex << op.checksum.length
<< ") not aligned to chunk size 0x"
<< op.checksum.chunk_size << std::dec
<< dendl;
return -EINVAL;
}
}
Checksummer::CSumType csum_type;
switch (op.checksum.type) {
case CEPH_OSD_CHECKSUM_OP_TYPE_XXHASH32:
csum_type = Checksummer::CSUM_XXHASH32;
break;
case CEPH_OSD_CHECKSUM_OP_TYPE_XXHASH64:
csum_type = Checksummer::CSUM_XXHASH64;
break;
case CEPH_OSD_CHECKSUM_OP_TYPE_CRC32C:
csum_type = Checksummer::CSUM_CRC32C;
break;
default:
dout(10) << __func__ << ": unknown crc type ("
<< static_cast<uint32_t>(op.checksum.type) << ")" << dendl;
return -EINVAL;
}
size_t csum_init_value_size = Checksummer::get_csum_init_value_size(csum_type);
if (bl_it->get_remaining() < csum_init_value_size) {
dout(10) << __func__ << ": init value not provided" << dendl;
return -EINVAL;
}
bufferlist init_value_bl;
init_value_bl.substr_of(bl_it->get_bl(), bl_it->get_off(),
csum_init_value_size);
*bl_it += csum_init_value_size;
if (pool.info.is_erasure() && op.checksum.length > 0) {
// If there is a data digest and it is possible we are reading
// entire object, pass the digest.
std::optional<uint32_t> maybe_crc;
if (oi.is_data_digest() && op.checksum.offset == 0 &&
op.checksum.length >= oi.size) {
maybe_crc = oi.data_digest;
}
// async read
auto& soid = oi.soid;
auto checksum_ctx = new C_ChecksumRead(this, osd_op, csum_type,
std::move(init_value_bl), maybe_crc,
oi.size, osd, soid, op.flags);
ctx->pending_async_reads.push_back({
{op.checksum.offset, op.checksum.length, op.flags},
{&checksum_ctx->read_bl, checksum_ctx}});
dout(10) << __func__ << ": async_read noted for " << soid << dendl;
ctx->op_finishers[ctx->current_osd_subop_num].reset(
new ReadFinisher(osd_op));
return -EINPROGRESS;
}
// sync read
std::vector<OSDOp> read_ops(1);
auto& read_op = read_ops[0];
if (op.checksum.length > 0) {
read_op.op.op = CEPH_OSD_OP_READ;
read_op.op.flags = op.flags;
read_op.op.extent.offset = op.checksum.offset;
read_op.op.extent.length = op.checksum.length;
read_op.op.extent.truncate_size = 0;
read_op.op.extent.truncate_seq = 0;
int r = do_osd_ops(ctx, read_ops);
if (r < 0) {
derr << __func__ << ": do_osd_ops failed: " << cpp_strerror(r) << dendl;
return r;
}
}
bufferlist::const_iterator init_value_bl_it = init_value_bl.begin();
return finish_checksum(osd_op, csum_type, &init_value_bl_it,
read_op.outdata);
}
int PrimaryLogPG::finish_checksum(OSDOp& osd_op,
Checksummer::CSumType csum_type,
bufferlist::const_iterator *init_value_bl_it,
const bufferlist &read_bl) {
dout(20) << __func__ << dendl;
auto& op = osd_op.op;
if (op.checksum.length > 0 && read_bl.length() != op.checksum.length) {
derr << __func__ << ": bytes read " << read_bl.length() << " != "
<< op.checksum.length << dendl;
return -EINVAL;
}
size_t csum_chunk_size = (op.checksum.chunk_size != 0 ?
op.checksum.chunk_size : read_bl.length());
uint32_t csum_count = (csum_chunk_size > 0 ?
read_bl.length() / csum_chunk_size : 0);
bufferlist csum;
bufferptr csum_data;
if (csum_count > 0) {
size_t csum_value_size = Checksummer::get_csum_value_size(csum_type);
csum_data = ceph::buffer::create(csum_value_size * csum_count);
csum_data.zero();
csum.append(csum_data);
switch (csum_type) {
case Checksummer::CSUM_XXHASH32:
{
Checksummer::xxhash32::init_value_t init_value;
decode(init_value, *init_value_bl_it);
Checksummer::calculate<Checksummer::xxhash32>(
init_value, csum_chunk_size, 0, read_bl.length(), read_bl,
&csum_data);
}
break;
case Checksummer::CSUM_XXHASH64:
{
Checksummer::xxhash64::init_value_t init_value;
decode(init_value, *init_value_bl_it);
Checksummer::calculate<Checksummer::xxhash64>(
init_value, csum_chunk_size, 0, read_bl.length(), read_bl,
&csum_data);
}
break;
case Checksummer::CSUM_CRC32C:
{
Checksummer::crc32c::init_value_t init_value;
decode(init_value, *init_value_bl_it);
Checksummer::calculate<Checksummer::crc32c>(
init_value, csum_chunk_size, 0, read_bl.length(), read_bl,
&csum_data);
}
break;
default:
break;
}
}
encode(csum_count, osd_op.outdata);
osd_op.outdata.claim_append(csum);
return 0;
}
struct C_ExtentCmpRead : public Context {
PrimaryLogPG *primary_log_pg;
OSDOp &osd_op;
ceph_le64 read_length{};
bufferlist read_bl;
Context *fill_extent_ctx;
C_ExtentCmpRead(PrimaryLogPG *primary_log_pg, OSDOp &osd_op,
std::optional<uint32_t> maybe_crc, uint64_t size,
OSDService *osd, hobject_t soid, uint32_t flags)
: primary_log_pg(primary_log_pg), osd_op(osd_op),
fill_extent_ctx(new FillInVerifyExtent(&read_length, &osd_op.rval,
&read_bl, maybe_crc, size,
osd, soid, flags)) {
}
~C_ExtentCmpRead() override {
delete fill_extent_ctx;
}
void finish(int r) override {
if (r == -ENOENT) {
osd_op.rval = 0;
read_bl.clear();
delete fill_extent_ctx;
} else {
fill_extent_ctx->complete(r);
}
fill_extent_ctx = nullptr;
if (osd_op.rval >= 0) {
osd_op.rval = primary_log_pg->finish_extent_cmp(osd_op, read_bl);
}
}
};
int PrimaryLogPG::do_extent_cmp(OpContext *ctx, OSDOp& osd_op)
{
dout(20) << __func__ << dendl;
ceph_osd_op& op = osd_op.op;
auto& oi = ctx->new_obs.oi;
uint64_t size = oi.size;
if ((oi.truncate_seq < op.extent.truncate_seq) &&
(op.extent.offset + op.extent.length > op.extent.truncate_size)) {
size = op.extent.truncate_size;
}
if (op.extent.offset >= size) {
op.extent.length = 0;
} else if (op.extent.offset + op.extent.length > size) {
op.extent.length = size - op.extent.offset;
}
if (op.extent.length == 0) {
dout(20) << __func__ << " zero length extent" << dendl;
return finish_extent_cmp(osd_op, bufferlist{});
} else if (!ctx->obs->exists || ctx->obs->oi.is_whiteout()) {
dout(20) << __func__ << " object DNE" << dendl;
return finish_extent_cmp(osd_op, {});
} else if (pool.info.is_erasure()) {
// If there is a data digest and it is possible we are reading
// entire object, pass the digest.
std::optional<uint32_t> maybe_crc;
if (oi.is_data_digest() && op.checksum.offset == 0 &&
op.checksum.length >= oi.size) {
maybe_crc = oi.data_digest;
}
// async read
auto& soid = oi.soid;
auto extent_cmp_ctx = new C_ExtentCmpRead(this, osd_op, maybe_crc, oi.size,
osd, soid, op.flags);
ctx->pending_async_reads.push_back({
{op.extent.offset, op.extent.length, op.flags},
{&extent_cmp_ctx->read_bl, extent_cmp_ctx}});
dout(10) << __func__ << ": async_read noted for " << soid << dendl;
ctx->op_finishers[ctx->current_osd_subop_num].reset(
new ReadFinisher(osd_op));
return -EINPROGRESS;
}
// sync read
vector<OSDOp> read_ops(1);
OSDOp& read_op = read_ops[0];
read_op.op.op = CEPH_OSD_OP_SYNC_READ;
read_op.op.extent.offset = op.extent.offset;
read_op.op.extent.length = op.extent.length;
read_op.op.extent.truncate_seq = op.extent.truncate_seq;
read_op.op.extent.truncate_size = op.extent.truncate_size;
int result = do_osd_ops(ctx, read_ops);
if (result < 0) {
derr << __func__ << " failed " << result << dendl;
return result;
}
return finish_extent_cmp(osd_op, read_op.outdata);
}
int PrimaryLogPG::finish_extent_cmp(OSDOp& osd_op, const bufferlist &read_bl)
{
for (uint64_t idx = 0; idx < osd_op.indata.length(); ++idx) {
char read_byte = (idx < read_bl.length() ? read_bl[idx] : 0);
if (osd_op.indata[idx] != read_byte) {
return (-MAX_ERRNO - idx);
}
}
return 0;
}
int PrimaryLogPG::do_read(OpContext *ctx, OSDOp& osd_op) {
dout(20) << __func__ << dendl;
auto& op = osd_op.op;
auto& oi = ctx->new_obs.oi;
auto& soid = oi.soid;
__u32 seq = oi.truncate_seq;
uint64_t size = oi.size;
bool trimmed_read = false;
dout(30) << __func__ << " oi.size: " << oi.size << dendl;
dout(30) << __func__ << " oi.truncate_seq: " << oi.truncate_seq << dendl;
dout(30) << __func__ << " op.extent.truncate_seq: " << op.extent.truncate_seq << dendl;
dout(30) << __func__ << " op.extent.truncate_size: " << op.extent.truncate_size << dendl;
// are we beyond truncate_size?
if ( (seq < op.extent.truncate_seq) &&
(op.extent.offset + op.extent.length > op.extent.truncate_size) &&
(size > op.extent.truncate_size) )
size = op.extent.truncate_size;
if (op.extent.length == 0) //length is zero mean read the whole object
op.extent.length = size;
if (op.extent.offset >= size) {
op.extent.length = 0;
trimmed_read = true;
} else if (op.extent.offset + op.extent.length > size) {
op.extent.length = size - op.extent.offset;
trimmed_read = true;
}
dout(30) << __func__ << "op.extent.length is now " << op.extent.length << dendl;
// read into a buffer
int result = 0;
if (trimmed_read && op.extent.length == 0) {
// read size was trimmed to zero and it is expected to do nothing
// a read operation of 0 bytes does *not* do nothing, this is why
// the trimmed_read boolean is needed
} else if (pool.info.is_erasure()) {
// The initialisation below is required to silence a false positive
// -Wmaybe-uninitialized warning
std::optional<uint32_t> maybe_crc;
// If there is a data digest and it is possible we are reading
// entire object, pass the digest. FillInVerifyExtent will
// will check the oi.size again.
if (oi.is_data_digest() && op.extent.offset == 0 &&
op.extent.length >= oi.size)
maybe_crc = oi.data_digest;
ctx->pending_async_reads.push_back(
make_pair(
boost::make_tuple(op.extent.offset, op.extent.length, op.flags),
make_pair(&osd_op.outdata,
new FillInVerifyExtent(&op.extent.length, &osd_op.rval,
&osd_op.outdata, maybe_crc, oi.size,
osd, soid, op.flags))));
dout(10) << " async_read noted for " << soid << dendl;
ctx->op_finishers[ctx->current_osd_subop_num].reset(
new ReadFinisher(osd_op));
} else {
int r = pgbackend->objects_read_sync(
soid, op.extent.offset, op.extent.length, op.flags, &osd_op.outdata);
// whole object? can we verify the checksum?
if (r >= 0 && op.extent.offset == 0 &&
(uint64_t)r == oi.size && oi.is_data_digest()) {
uint32_t crc = osd_op.outdata.crc32c(-1);
if (oi.data_digest != crc) {
osd->clog->error() << info.pgid << std::hex
<< " full-object read crc 0x" << crc
<< " != expected 0x" << oi.data_digest
<< std::dec << " on " << soid;
r = -EIO; // try repair later
}
}
if (r == -EIO) {
r = rep_repair_primary_object(soid, ctx);
}
if (r >= 0)
op.extent.length = r;
else if (r == -EAGAIN) {
result = -EAGAIN;
} else {
result = r;
op.extent.length = 0;
}
dout(10) << " read got " << r << " / " << op.extent.length
<< " bytes from obj " << soid << dendl;
}
if (result >= 0) {
ctx->delta_stats.num_rd_kb += shift_round_up(op.extent.length, 10);
ctx->delta_stats.num_rd++;
}
return result;
}
int PrimaryLogPG::do_sparse_read(OpContext *ctx, OSDOp& osd_op) {
dout(20) << __func__ << dendl;
auto& op = osd_op.op;
auto& oi = ctx->new_obs.oi;
auto& soid = oi.soid;
uint64_t size = oi.size;
uint64_t offset = op.extent.offset;
uint64_t length = op.extent.length;
// are we beyond truncate_size?
if ((oi.truncate_seq < op.extent.truncate_seq) &&
(op.extent.offset + op.extent.length > op.extent.truncate_size) &&
(size > op.extent.truncate_size)) {
size = op.extent.truncate_size;
}
if (offset > size) {
length = 0;
} else if (offset + length > size) {
length = size - offset;
}
++ctx->num_read;
if (pool.info.is_erasure()) {
// translate sparse read to a normal one if not supported
if (length > 0) {
ctx->pending_async_reads.push_back(
make_pair(
boost::make_tuple(offset, length, op.flags),
make_pair(
&osd_op.outdata,
new ToSparseReadResult(&osd_op.rval, &osd_op.outdata, offset,
&op.extent.length))));
dout(10) << " async_read (was sparse_read) noted for " << soid << dendl;
ctx->op_finishers[ctx->current_osd_subop_num].reset(
new ReadFinisher(osd_op));
} else {
dout(10) << " sparse read ended up empty for " << soid << dendl;
map<uint64_t, uint64_t> extents;
encode(extents, osd_op.outdata);
}
} else {
// read into a buffer
map<uint64_t, uint64_t> m;
int r = osd->store->fiemap(ch, ghobject_t(soid, ghobject_t::NO_GEN,
info.pgid.shard),
offset, length, m);
if (r < 0) {
return r;
}
bufferlist data_bl;
r = pgbackend->objects_readv_sync(soid, std::move(m), op.flags, &data_bl);
if (r == -EIO) {
r = rep_repair_primary_object(soid, ctx);
}
if (r < 0) {
return r;
}
// Why SPARSE_READ need checksum? In fact, librbd always use sparse-read.
// Maybe at first, there is no much whole objects. With continued use, more
// and more whole object exist. So from this point, for spare-read add
// checksum make sense.
if ((uint64_t)r == oi.size && oi.is_data_digest()) {
uint32_t crc = data_bl.crc32c(-1);
if (oi.data_digest != crc) {
osd->clog->error() << info.pgid << std::hex
<< " full-object read crc 0x" << crc
<< " != expected 0x" << oi.data_digest
<< std::dec << " on " << soid;
r = rep_repair_primary_object(soid, ctx);
if (r < 0) {
return r;
}
}
}
op.extent.length = r;
encode(m, osd_op.outdata); // re-encode since it might be modified
::encode_destructively(data_bl, osd_op.outdata);
dout(10) << " sparse_read got " << r << " bytes from object "
<< soid << dendl;
}
ctx->delta_stats.num_rd_kb += shift_round_up(op.extent.length, 10);
ctx->delta_stats.num_rd++;
return 0;
}
int PrimaryLogPG::do_osd_ops(OpContext *ctx, vector<OSDOp>& ops)
{
int result = 0;
SnapSetContext *ssc = ctx->obc->ssc;
ObjectState& obs = ctx->new_obs;
object_info_t& oi = obs.oi;
const hobject_t& soid = oi.soid;
const bool skip_data_digest = osd->store->has_builtin_csum() &&
osd->osd_skip_data_digest;
PGTransaction* t = ctx->op_t.get();
dout(10) << "do_osd_op " << soid << " " << ops << dendl;
ctx->current_osd_subop_num = 0;
for (auto p = ops.begin(); p != ops.end(); ++p, ctx->current_osd_subop_num++, ctx->processed_subop_count++) {
OSDOp& osd_op = *p;
ceph_osd_op& op = osd_op.op;
OpFinisher* op_finisher = nullptr;
{
auto op_finisher_it = ctx->op_finishers.find(ctx->current_osd_subop_num);
if (op_finisher_it != ctx->op_finishers.end()) {
op_finisher = op_finisher_it->second.get();
}
}
// TODO: check endianness (ceph_le32 vs uint32_t, etc.)
// The fields in ceph_osd_op are little-endian (according to the definition in rados.h),
// but the code in this function seems to treat them as native-endian. What should the
// tracepoints do?
tracepoint(osd, do_osd_op_pre, soid.oid.name.c_str(), soid.snap.val, op.op, ceph_osd_op_name(op.op), op.flags);
dout(10) << "do_osd_op " << osd_op << dendl;
auto bp = osd_op.indata.cbegin();
// user-visible modifcation?
switch (op.op) {
// non user-visible modifications
case CEPH_OSD_OP_WATCH:
case CEPH_OSD_OP_CACHE_EVICT:
case CEPH_OSD_OP_CACHE_FLUSH:
case CEPH_OSD_OP_CACHE_TRY_FLUSH:
case CEPH_OSD_OP_UNDIRTY:
case CEPH_OSD_OP_COPY_FROM: // we handle user_version update explicitly
case CEPH_OSD_OP_COPY_FROM2:
case CEPH_OSD_OP_CACHE_PIN:
case CEPH_OSD_OP_CACHE_UNPIN:
case CEPH_OSD_OP_SET_REDIRECT:
case CEPH_OSD_OP_SET_CHUNK:
case CEPH_OSD_OP_TIER_PROMOTE:
case CEPH_OSD_OP_TIER_FLUSH:
case CEPH_OSD_OP_TIER_EVICT:
break;
default:
if (op.op & CEPH_OSD_OP_MODE_WR)
ctx->user_modify = true;
}
// munge -1 truncate to 0 truncate
if (ceph_osd_op_uses_extent(op.op) &&
op.extent.truncate_seq == 1 &&
op.extent.truncate_size == (-1ULL)) {
op.extent.truncate_size = 0;
op.extent.truncate_seq = 0;
}
// munge ZERO -> TRUNCATE? (don't munge to DELETE or we risk hosing attributes)
if (op.op == CEPH_OSD_OP_ZERO &&
obs.exists &&
op.extent.offset < static_cast<Option::size_t>(osd->osd_max_object_size) &&
op.extent.length >= 1 &&
op.extent.length <= static_cast<Option::size_t>(osd->osd_max_object_size) &&
op.extent.offset + op.extent.length >= oi.size) {
if (op.extent.offset >= oi.size) {
// no-op
goto fail;
}
dout(10) << " munging ZERO " << op.extent.offset << "~" << op.extent.length
<< " -> TRUNCATE " << op.extent.offset << " (old size is " << oi.size << ")" << dendl;
op.op = CEPH_OSD_OP_TRUNCATE;
}
switch (op.op) {
// --- READS ---
case CEPH_OSD_OP_CMPEXT:
++ctx->num_read;
tracepoint(osd, do_osd_op_pre_extent_cmp, soid.oid.name.c_str(),
soid.snap.val, oi.size, oi.truncate_seq, op.extent.offset,
op.extent.length, op.extent.truncate_size,
op.extent.truncate_seq);
if (op_finisher == nullptr) {
result = do_extent_cmp(ctx, osd_op);
} else {
result = op_finisher->execute();
}
break;
case CEPH_OSD_OP_SYNC_READ:
if (pool.info.is_erasure()) {
result = -EOPNOTSUPP;
break;
}
// fall through
case CEPH_OSD_OP_READ:
++ctx->num_read;
tracepoint(osd, do_osd_op_pre_read, soid.oid.name.c_str(),
soid.snap.val, oi.size, oi.truncate_seq, op.extent.offset,
op.extent.length, op.extent.truncate_size,
op.extent.truncate_seq);
if (op_finisher == nullptr) {
if (!ctx->data_off) {
ctx->data_off = op.extent.offset;
}
result = do_read(ctx, osd_op);
} else {
result = op_finisher->execute();
}
break;
case CEPH_OSD_OP_CHECKSUM:
++ctx->num_read;
{
tracepoint(osd, do_osd_op_pre_checksum, soid.oid.name.c_str(),
soid.snap.val, oi.size, oi.truncate_seq, op.checksum.type,
op.checksum.offset, op.checksum.length,
op.checksum.chunk_size);
if (op_finisher == nullptr) {
result = do_checksum(ctx, osd_op, &bp);
} else {
result = op_finisher->execute();
}
}
break;
/* map extents */
case CEPH_OSD_OP_MAPEXT:
tracepoint(osd, do_osd_op_pre_mapext, soid.oid.name.c_str(), soid.snap.val, op.extent.offset, op.extent.length);
if (pool.info.is_erasure()) {
result = -EOPNOTSUPP;
break;
}
++ctx->num_read;
{
// read into a buffer
bufferlist bl;
int r = osd->store->fiemap(ch, ghobject_t(soid, ghobject_t::NO_GEN,
info.pgid.shard),
op.extent.offset, op.extent.length, bl);
osd_op.outdata = std::move(bl);
if (r < 0)
result = r;
else
ctx->delta_stats.num_rd_kb += shift_round_up(bl.length(), 10);
ctx->delta_stats.num_rd++;
dout(10) << " map_extents done on object " << soid << dendl;
}
break;
/* map extents */
case CEPH_OSD_OP_SPARSE_READ:
tracepoint(osd, do_osd_op_pre_sparse_read, soid.oid.name.c_str(),
soid.snap.val, oi.size, oi.truncate_seq, op.extent.offset,
op.extent.length, op.extent.truncate_size,
op.extent.truncate_seq);
if (op_finisher == nullptr) {
result = do_sparse_read(ctx, osd_op);
} else {
result = op_finisher->execute();
}
break;
case CEPH_OSD_OP_CALL:
{
string cname, mname;
bufferlist indata;
try {
bp.copy(op.cls.class_len, cname);
bp.copy(op.cls.method_len, mname);
bp.copy(op.cls.indata_len, indata);
} catch (ceph::buffer::error& e) {
dout(10) << "call unable to decode class + method + indata" << dendl;
dout(30) << "in dump: ";
osd_op.indata.hexdump(*_dout);
*_dout << dendl;
result = -EINVAL;
tracepoint(osd, do_osd_op_pre_call, soid.oid.name.c_str(), soid.snap.val, "???", "???");
break;
}
tracepoint(osd, do_osd_op_pre_call, soid.oid.name.c_str(), soid.snap.val, cname.c_str(), mname.c_str());
ClassHandler::ClassData *cls;
result = ClassHandler::get_instance().open_class(cname, &cls);
ceph_assert(result == 0); // init_op_flags() already verified this works.
ClassHandler::ClassMethod *method = cls->get_method(mname);
if (!method) {
dout(10) << "call method " << cname << "." << mname << " does not exist" << dendl;
result = -EOPNOTSUPP;
break;
}
int flags = method->get_flags();
if (flags & CLS_METHOD_WR)
ctx->user_modify = true;
bufferlist outdata;
dout(10) << "call method " << cname << "." << mname << dendl;
int prev_rd = ctx->num_read;
int prev_wr = ctx->num_write;
result = method->exec((cls_method_context_t)&ctx, indata, outdata);
if (ctx->num_read > prev_rd && !(flags & CLS_METHOD_RD)) {
derr << "method " << cname << "." << mname << " tried to read object but is not marked RD" << dendl;
result = -EIO;
break;
}
if (ctx->num_write > prev_wr && !(flags & CLS_METHOD_WR)) {
derr << "method " << cname << "." << mname << " tried to update object but is not marked WR" << dendl;
result = -EIO;
break;
}
dout(10) << "method called response length=" << outdata.length() << dendl;
op.extent.length = outdata.length();
osd_op.outdata.claim_append(outdata);
dout(30) << "out dump: ";
osd_op.outdata.hexdump(*_dout);
*_dout << dendl;
}
break;
case CEPH_OSD_OP_STAT:
// note: stat does not require RD
{
tracepoint(osd, do_osd_op_pre_stat, soid.oid.name.c_str(), soid.snap.val);
if (obs.exists && !oi.is_whiteout()) {
encode(oi.size, osd_op.outdata);
encode(oi.mtime, osd_op.outdata);
dout(10) << "stat oi has " << oi.size << " " << oi.mtime << dendl;
} else {
result = -ENOENT;
dout(10) << "stat oi object does not exist" << dendl;
}
ctx->delta_stats.num_rd++;
}
break;
case CEPH_OSD_OP_ISDIRTY:
++ctx->num_read;
{
tracepoint(osd, do_osd_op_pre_isdirty, soid.oid.name.c_str(), soid.snap.val);
bool is_dirty = obs.oi.is_dirty();
encode(is_dirty, osd_op.outdata);
ctx->delta_stats.num_rd++;
result = 0;
}
break;
case CEPH_OSD_OP_UNDIRTY:
++ctx->num_write;
result = 0;
{
tracepoint(osd, do_osd_op_pre_undirty, soid.oid.name.c_str(), soid.snap.val);
if (oi.is_dirty()) {
ctx->undirty = true; // see make_writeable()
ctx->modify = true;
ctx->delta_stats.num_wr++;
}
}
break;
case CEPH_OSD_OP_CACHE_TRY_FLUSH:
++ctx->num_write;
result = 0;
{
tracepoint(osd, do_osd_op_pre_try_flush, soid.oid.name.c_str(), soid.snap.val);
if (ctx->lock_type != RWState::RWNONE) {
dout(10) << "cache-try-flush without SKIPRWLOCKS flag set" << dendl;
result = -EINVAL;
break;
}
if (pool.info.cache_mode == pg_pool_t::CACHEMODE_NONE || obs.oi.has_manifest()) {
result = -EINVAL;
break;
}
if (!obs.exists) {
result = 0;
break;
}
if (oi.is_cache_pinned()) {
dout(10) << "cache-try-flush on a pinned object, consider unpin this object first" << dendl;
result = -EPERM;
break;
}
if (oi.is_dirty()) {
result = start_flush(ctx->op, ctx->obc, false, NULL, std::nullopt);
if (result == -EINPROGRESS)
result = -EAGAIN;
} else {
result = 0;
}
}
break;
case CEPH_OSD_OP_CACHE_FLUSH:
++ctx->num_write;
result = 0;
{
tracepoint(osd, do_osd_op_pre_cache_flush, soid.oid.name.c_str(), soid.snap.val);
if (ctx->lock_type == RWState::RWNONE) {
dout(10) << "cache-flush with SKIPRWLOCKS flag set" << dendl;
result = -EINVAL;
break;
}
if (pool.info.cache_mode == pg_pool_t::CACHEMODE_NONE || obs.oi.has_manifest()) {
result = -EINVAL;
break;
}
if (!obs.exists) {
result = 0;
break;
}
if (oi.is_cache_pinned()) {
dout(10) << "cache-flush on a pinned object, consider unpin this object first" << dendl;
result = -EPERM;
break;
}
hobject_t missing;
if (oi.is_dirty()) {
result = start_flush(ctx->op, ctx->obc, true, &missing, std::nullopt);
if (result == -EINPROGRESS)
result = -EAGAIN;
} else {
result = 0;
}
// Check special return value which has set missing_return
if (result == -ENOENT) {
dout(10) << __func__ << " CEPH_OSD_OP_CACHE_FLUSH got ENOENT" << dendl;
ceph_assert(!missing.is_min());
wait_for_unreadable_object(missing, ctx->op);
// Error code which is used elsewhere when wait_for_unreadable_object() is used
result = -EAGAIN;
}
}
break;
case CEPH_OSD_OP_CACHE_EVICT:
++ctx->num_write;
result = 0;
{
tracepoint(osd, do_osd_op_pre_cache_evict, soid.oid.name.c_str(), soid.snap.val);
if (pool.info.cache_mode == pg_pool_t::CACHEMODE_NONE || obs.oi.has_manifest()) {
result = -EINVAL;
break;
}
if (!obs.exists) {
result = 0;
break;
}
if (oi.is_cache_pinned()) {
dout(10) << "cache-evict on a pinned object, consider unpin this object first" << dendl;
result = -EPERM;
break;
}
if (oi.is_dirty()) {
result = -EBUSY;
break;
}
if (!oi.watchers.empty()) {
result = -EBUSY;
break;
}
if (soid.snap == CEPH_NOSNAP) {
result = _verify_no_head_clones(soid, ssc->snapset);
if (result < 0)
break;
}
result = _delete_oid(ctx, true, false);
if (result >= 0) {
// mark that this is a cache eviction to avoid triggering normal
// make_writeable() clone creation in finish_ctx()
ctx->cache_operation = true;
}
osd->logger->inc(l_osd_tier_evict);
}
break;
case CEPH_OSD_OP_GETXATTR:
++ctx->num_read;
{
string aname;
bp.copy(op.xattr.name_len, aname);
tracepoint(osd, do_osd_op_pre_getxattr, soid.oid.name.c_str(), soid.snap.val, aname.c_str());
string name = "_" + aname;
int r = getattr_maybe_cache(
ctx->obc,
name,
&(osd_op.outdata));
if (r >= 0) {
op.xattr.value_len = osd_op.outdata.length();
result = 0;
ctx->delta_stats.num_rd_kb += shift_round_up(osd_op.outdata.length(), 10);
} else
result = r;
ctx->delta_stats.num_rd++;
}
break;
case CEPH_OSD_OP_GETXATTRS:
++ctx->num_read;
{
tracepoint(osd, do_osd_op_pre_getxattrs, soid.oid.name.c_str(), soid.snap.val);
map<string, bufferlist,less<>> out;
result = getattrs_maybe_cache(
ctx->obc,
&out);
bufferlist bl;
encode(out, bl);
ctx->delta_stats.num_rd_kb += shift_round_up(bl.length(), 10);
ctx->delta_stats.num_rd++;
osd_op.outdata.claim_append(bl);
}
break;
case CEPH_OSD_OP_CMPXATTR:
++ctx->num_read;
{
string aname;
bp.copy(op.xattr.name_len, aname);
tracepoint(osd, do_osd_op_pre_cmpxattr, soid.oid.name.c_str(), soid.snap.val, aname.c_str());
string name = "_" + aname;
name[op.xattr.name_len + 1] = 0;
bufferlist xattr;
result = getattr_maybe_cache(
ctx->obc,
name,
&xattr);
if (result < 0 && result != -EEXIST && result != -ENODATA)
break;
ctx->delta_stats.num_rd++;
ctx->delta_stats.num_rd_kb += shift_round_up(xattr.length(), 10);
switch (op.xattr.cmp_mode) {
case CEPH_OSD_CMPXATTR_MODE_STRING:
{
string val;
bp.copy(op.xattr.value_len, val);
val[op.xattr.value_len] = 0;
dout(10) << "CEPH_OSD_OP_CMPXATTR name=" << name << " val=" << val
<< " op=" << (int)op.xattr.cmp_op << " mode=" << (int)op.xattr.cmp_mode << dendl;
result = do_xattr_cmp_str(op.xattr.cmp_op, val, xattr);
}
break;
case CEPH_OSD_CMPXATTR_MODE_U64:
{
uint64_t u64val;
try {
decode(u64val, bp);
}
catch (ceph::buffer::error& e) {
result = -EINVAL;
goto fail;
}
dout(10) << "CEPH_OSD_OP_CMPXATTR name=" << name << " val=" << u64val
<< " op=" << (int)op.xattr.cmp_op << " mode=" << (int)op.xattr.cmp_mode << dendl;
result = do_xattr_cmp_u64(op.xattr.cmp_op, u64val, xattr);
}
break;
default:
dout(10) << "bad cmp mode " << (int)op.xattr.cmp_mode << dendl;
result = -EINVAL;
}
if (!result) {
dout(10) << "comparison returned false" << dendl;
result = -ECANCELED;
break;
}
if (result < 0) {
dout(10) << "comparison returned " << result << " " << cpp_strerror(-result) << dendl;
break;
}
dout(10) << "comparison returned true" << dendl;
}
break;
case CEPH_OSD_OP_ASSERT_VER:
++ctx->num_read;
{
uint64_t ver = op.assert_ver.ver;
tracepoint(osd, do_osd_op_pre_assert_ver, soid.oid.name.c_str(), soid.snap.val, ver);
if (!ver) {
result = -EINVAL;
} else if (ver < oi.user_version) {
result = -ERANGE;
} else if (ver > oi.user_version) {
result = -EOVERFLOW;
}
}
break;
case CEPH_OSD_OP_LIST_WATCHERS:
++ctx->num_read;
{
tracepoint(osd, do_osd_op_pre_list_watchers, soid.oid.name.c_str(), soid.snap.val);
obj_list_watch_response_t resp;
map<pair<uint64_t, entity_name_t>, watch_info_t>::const_iterator oi_iter;
for (oi_iter = oi.watchers.begin(); oi_iter != oi.watchers.end();
++oi_iter) {
dout(20) << "key cookie=" << oi_iter->first.first
<< " entity=" << oi_iter->first.second << " "
<< oi_iter->second << dendl;
ceph_assert(oi_iter->first.first == oi_iter->second.cookie);
ceph_assert(oi_iter->first.second.is_client());
watch_item_t wi(oi_iter->first.second, oi_iter->second.cookie,
oi_iter->second.timeout_seconds, oi_iter->second.addr);
resp.entries.push_back(wi);
}
resp.encode(osd_op.outdata, ctx->get_features());
result = 0;
ctx->delta_stats.num_rd++;
break;
}
case CEPH_OSD_OP_LIST_SNAPS:
++ctx->num_read;
{
tracepoint(osd, do_osd_op_pre_list_snaps, soid.oid.name.c_str(), soid.snap.val);
obj_list_snap_response_t resp;
if (!ssc) {
ssc = ctx->obc->ssc = get_snapset_context(soid, false);
}
ceph_assert(ssc);
dout(20) << " snapset " << ssc->snapset << dendl;
int clonecount = ssc->snapset.clones.size();
clonecount++; // for head
resp.clones.reserve(clonecount);
for (auto clone_iter = ssc->snapset.clones.begin();
clone_iter != ssc->snapset.clones.end(); ++clone_iter) {
clone_info ci;
ci.cloneid = *clone_iter;
hobject_t clone_oid = soid;
clone_oid.snap = *clone_iter;
auto p = ssc->snapset.clone_snaps.find(*clone_iter);
if (p == ssc->snapset.clone_snaps.end()) {
osd->clog->error() << "osd." << osd->whoami
<< ": inconsistent clone_snaps found for oid "
<< soid << " clone " << *clone_iter
<< " snapset " << ssc->snapset;
result = -EINVAL;
break;
}
for (auto q = p->second.rbegin(); q != p->second.rend(); ++q) {
ci.snaps.push_back(*q);
}
dout(20) << " clone " << *clone_iter << " snaps " << ci.snaps << dendl;
map<snapid_t, interval_set<uint64_t> >::const_iterator coi;
coi = ssc->snapset.clone_overlap.find(ci.cloneid);
if (coi == ssc->snapset.clone_overlap.end()) {
osd->clog->error() << "osd." << osd->whoami
<< ": inconsistent clone_overlap found for oid "
<< soid << " clone " << *clone_iter;
result = -EINVAL;
break;
}
const interval_set<uint64_t> &o = coi->second;
ci.overlap.reserve(o.num_intervals());
for (interval_set<uint64_t>::const_iterator r = o.begin();
r != o.end(); ++r) {
ci.overlap.push_back(pair<uint64_t,uint64_t>(r.get_start(),
r.get_len()));
}
map<snapid_t, uint64_t>::const_iterator si;
si = ssc->snapset.clone_size.find(ci.cloneid);
if (si == ssc->snapset.clone_size.end()) {
osd->clog->error() << "osd." << osd->whoami
<< ": inconsistent clone_size found for oid "
<< soid << " clone " << *clone_iter;
result = -EINVAL;
break;
}
ci.size = si->second;
resp.clones.push_back(ci);
}
if (result < 0) {
break;
}
if (!ctx->obc->obs.oi.is_whiteout()) {
ceph_assert(obs.exists);
clone_info ci;
ci.cloneid = CEPH_NOSNAP;
//Size for HEAD is oi.size
ci.size = oi.size;
resp.clones.push_back(ci);
}
resp.seq = ssc->snapset.seq;
resp.encode(osd_op.outdata);
result = 0;
ctx->delta_stats.num_rd++;
break;
}
case CEPH_OSD_OP_NOTIFY:
++ctx->num_read;
{
uint32_t timeout;
bufferlist bl;
try {
uint32_t ver; // obsolete
decode(ver, bp);
decode(timeout, bp);
decode(bl, bp);
} catch (const ceph::buffer::error &e) {
timeout = 0;
}
tracepoint(osd, do_osd_op_pre_notify, soid.oid.name.c_str(), soid.snap.val, timeout);
if (!timeout)
timeout = cct->_conf->osd_default_notify_timeout;
notify_info_t n;
n.timeout = timeout;
n.notify_id = osd->get_next_id(get_osdmap_epoch());
n.cookie = op.notify.cookie;
n.bl = bl;
ctx->notifies.push_back(n);
// return our unique notify id to the client
encode(n.notify_id, osd_op.outdata);
}
break;
case CEPH_OSD_OP_NOTIFY_ACK:
++ctx->num_read;
{
try {
uint64_t notify_id = 0;
uint64_t watch_cookie = 0;
decode(notify_id, bp);
decode(watch_cookie, bp);
bufferlist reply_bl;
if (!bp.end()) {
decode(reply_bl, bp);
}
tracepoint(osd, do_osd_op_pre_notify_ack, soid.oid.name.c_str(), soid.snap.val, notify_id, watch_cookie, "Y");
OpContext::NotifyAck ack(notify_id, watch_cookie, reply_bl);
ctx->notify_acks.push_back(ack);
} catch (const ceph::buffer::error &e) {
tracepoint(osd, do_osd_op_pre_notify_ack, soid.oid.name.c_str(), soid.snap.val, op.watch.cookie, 0, "N");
OpContext::NotifyAck ack(
// op.watch.cookie is actually the notify_id for historical reasons
op.watch.cookie
);
ctx->notify_acks.push_back(ack);
}
}
break;
case CEPH_OSD_OP_SETALLOCHINT:
++ctx->num_write;
result = 0;
{
tracepoint(osd, do_osd_op_pre_setallochint, soid.oid.name.c_str(), soid.snap.val, op.alloc_hint.expected_object_size, op.alloc_hint.expected_write_size);
maybe_create_new_object(ctx);
oi.expected_object_size = op.alloc_hint.expected_object_size;
oi.expected_write_size = op.alloc_hint.expected_write_size;
oi.alloc_hint_flags = op.alloc_hint.flags;
t->set_alloc_hint(soid, op.alloc_hint.expected_object_size,
op.alloc_hint.expected_write_size,
op.alloc_hint.flags);
}
break;
// --- WRITES ---
// -- object data --
case CEPH_OSD_OP_WRITE:
++ctx->num_write;
result = 0;
{ // write
__u32 seq = oi.truncate_seq;
tracepoint(osd, do_osd_op_pre_write, soid.oid.name.c_str(), soid.snap.val, oi.size, seq, op.extent.offset, op.extent.length, op.extent.truncate_size, op.extent.truncate_seq);
if (op.extent.length != osd_op.indata.length()) {
result = -EINVAL;
break;
}
if (pool.info.has_flag(pg_pool_t::FLAG_WRITE_FADVISE_DONTNEED))
op.flags = op.flags | CEPH_OSD_OP_FLAG_FADVISE_DONTNEED;
if (pool.info.requires_aligned_append() &&
(op.extent.offset % pool.info.required_alignment() != 0)) {
result = -EOPNOTSUPP;
break;
}
if (!obs.exists) {
if (pool.info.requires_aligned_append() && op.extent.offset) {
result = -EOPNOTSUPP;
break;
}
} else if (op.extent.offset != oi.size &&
pool.info.requires_aligned_append()) {
result = -EOPNOTSUPP;
break;
}
if (seq && (seq > op.extent.truncate_seq) &&
(op.extent.offset + op.extent.length > oi.size)) {
// old write, arrived after trimtrunc
op.extent.length = (op.extent.offset > oi.size ? 0 : oi.size - op.extent.offset);
dout(10) << " old truncate_seq " << op.extent.truncate_seq << " < current " << seq
<< ", adjusting write length to " << op.extent.length << dendl;
bufferlist t;
t.substr_of(osd_op.indata, 0, op.extent.length);
osd_op.indata.swap(t);
}
if (op.extent.truncate_seq > seq) {
// write arrives before trimtrunc
if (obs.exists && !oi.is_whiteout()) {
dout(10) << " truncate_seq " << op.extent.truncate_seq << " > current " << seq
<< ", truncating to " << op.extent.truncate_size << dendl;
t->truncate(soid, op.extent.truncate_size);
oi.truncate_seq = op.extent.truncate_seq;
oi.truncate_size = op.extent.truncate_size;
if (oi.size > op.extent.truncate_size) {
interval_set<uint64_t> trim;
trim.insert(op.extent.truncate_size,
oi.size - op.extent.truncate_size);
ctx->modified_ranges.union_of(trim);
ctx->clean_regions.mark_data_region_dirty(op.extent.truncate_size, oi.size - op.extent.truncate_size);
oi.clear_data_digest();
}
if (op.extent.truncate_size != oi.size) {
truncate_update_size_and_usage(ctx->delta_stats,
oi,
op.extent.truncate_size);
}
} else {
dout(10) << " truncate_seq " << op.extent.truncate_seq << " > current " << seq
<< ", but object is new" << dendl;
oi.truncate_seq = op.extent.truncate_seq;
oi.truncate_size = op.extent.truncate_size;
}
}
result = check_offset_and_length(
op.extent.offset, op.extent.length,
static_cast<Option::size_t>(osd->osd_max_object_size), get_dpp());
if (result < 0)
break;
maybe_create_new_object(ctx);
if (op.extent.length == 0) {
if (op.extent.offset > oi.size) {
t->truncate(
soid, op.extent.offset);
truncate_update_size_and_usage(ctx->delta_stats, oi,
op.extent.offset);
} else {
t->nop(soid);
}
} else {
t->write(
soid, op.extent.offset, op.extent.length, osd_op.indata, op.flags);
}
if (op.extent.offset == 0 && op.extent.length >= oi.size
&& !skip_data_digest) {
obs.oi.set_data_digest(osd_op.indata.crc32c(-1));
} else if (op.extent.offset == oi.size && obs.oi.is_data_digest()) {
if (skip_data_digest) {
obs.oi.clear_data_digest();
} else {
obs.oi.set_data_digest(osd_op.indata.crc32c(obs.oi.data_digest));
}
} else {
obs.oi.clear_data_digest();
}
write_update_size_and_usage(ctx->delta_stats, oi, ctx->modified_ranges,
op.extent.offset, op.extent.length);
ctx->clean_regions.mark_data_region_dirty(op.extent.offset, op.extent.length);
dout(10) << "clean_regions modified" << ctx->clean_regions << dendl;
}
break;
case CEPH_OSD_OP_WRITEFULL:
++ctx->num_write;
result = 0;
{ // write full object
tracepoint(osd, do_osd_op_pre_writefull, soid.oid.name.c_str(), soid.snap.val, oi.size, 0, op.extent.length);
if (op.extent.length != osd_op.indata.length()) {
result = -EINVAL;
break;
}
result = check_offset_and_length(
0, op.extent.length,
static_cast<Option::size_t>(osd->osd_max_object_size), get_dpp());
if (result < 0)
break;
if (pool.info.has_flag(pg_pool_t::FLAG_WRITE_FADVISE_DONTNEED))
op.flags = op.flags | CEPH_OSD_OP_FLAG_FADVISE_DONTNEED;
maybe_create_new_object(ctx);
if (pool.info.is_erasure()) {
t->truncate(soid, 0);
} else if (obs.exists && op.extent.length < oi.size) {
t->truncate(soid, op.extent.length);
}
if (op.extent.length) {
t->write(soid, 0, op.extent.length, osd_op.indata, op.flags);
}
if (!skip_data_digest) {
obs.oi.set_data_digest(osd_op.indata.crc32c(-1));
} else {
obs.oi.clear_data_digest();
}
ctx->clean_regions.mark_data_region_dirty(0,
std::max((uint64_t)op.extent.length, oi.size));
write_update_size_and_usage(ctx->delta_stats, oi, ctx->modified_ranges,
0, op.extent.length, true);
}
break;
case CEPH_OSD_OP_WRITESAME:
++ctx->num_write;
tracepoint(osd, do_osd_op_pre_writesame, soid.oid.name.c_str(), soid.snap.val, oi.size, op.writesame.offset, op.writesame.length, op.writesame.data_length);
result = do_writesame(ctx, osd_op);
break;
case CEPH_OSD_OP_ROLLBACK :
++ctx->num_write;
tracepoint(osd, do_osd_op_pre_rollback, soid.oid.name.c_str(), soid.snap.val);
result = _rollback_to(ctx, osd_op);
break;
case CEPH_OSD_OP_ZERO:
tracepoint(osd, do_osd_op_pre_zero, soid.oid.name.c_str(), soid.snap.val, op.extent.offset, op.extent.length);
if (pool.info.requires_aligned_append()) {
result = -EOPNOTSUPP;
break;
}
++ctx->num_write;
{ // zero
result = check_offset_and_length(
op.extent.offset, op.extent.length,
static_cast<Option::size_t>(osd->osd_max_object_size), get_dpp());
if (result < 0)
break;
if (op.extent.length && obs.exists && !oi.is_whiteout()) {
t->zero(soid, op.extent.offset, op.extent.length);
interval_set<uint64_t> ch;
ch.insert(op.extent.offset, op.extent.length);
ctx->modified_ranges.union_of(ch);
ctx->clean_regions.mark_data_region_dirty(op.extent.offset, op.extent.length);
ctx->delta_stats.num_wr++;
oi.clear_data_digest();
} else {
// no-op
}
}
break;
case CEPH_OSD_OP_CREATE:
++ctx->num_write;
result = 0;
{
tracepoint(osd, do_osd_op_pre_create, soid.oid.name.c_str(), soid.snap.val);
if (obs.exists && !oi.is_whiteout() &&
(op.flags & CEPH_OSD_OP_FLAG_EXCL)) {
result = -EEXIST; /* this is an exclusive create */
} else {
if (osd_op.indata.length()) {
auto p = osd_op.indata.cbegin();
string category;
try {
decode(category, p);
}
catch (ceph::buffer::error& e) {
result = -EINVAL;
goto fail;
}
// category is no longer implemented.
}
maybe_create_new_object(ctx);
t->nop(soid);
}
}
break;
case CEPH_OSD_OP_TRIMTRUNC:
op.extent.offset = op.extent.truncate_size;
// falling through
case CEPH_OSD_OP_TRUNCATE:
tracepoint(osd, do_osd_op_pre_truncate, soid.oid.name.c_str(), soid.snap.val, oi.size, oi.truncate_seq, op.extent.offset, op.extent.length, op.extent.truncate_size, op.extent.truncate_seq);
if (pool.info.requires_aligned_append()) {
result = -EOPNOTSUPP;
break;
}
++ctx->num_write;
result = 0;
{
// truncate
if (!obs.exists || oi.is_whiteout()) {
dout(10) << " object dne, truncate is a no-op" << dendl;
break;
}
result = check_offset_and_length(
op.extent.offset, op.extent.length,
static_cast<Option::size_t>(osd->osd_max_object_size), get_dpp());
if (result < 0)
break;
if (op.extent.truncate_seq) {
ceph_assert(op.extent.offset == op.extent.truncate_size);
if (op.extent.truncate_seq <= oi.truncate_seq) {
dout(10) << " truncate seq " << op.extent.truncate_seq << " <= current " << oi.truncate_seq
<< ", no-op" << dendl;
break; // old
}
dout(10) << " truncate seq " << op.extent.truncate_seq << " > current " << oi.truncate_seq
<< ", truncating" << dendl;
oi.truncate_seq = op.extent.truncate_seq;
oi.truncate_size = op.extent.truncate_size;
}
maybe_create_new_object(ctx);
t->truncate(soid, op.extent.offset);
if (oi.size > op.extent.offset) {
interval_set<uint64_t> trim;
trim.insert(op.extent.offset, oi.size-op.extent.offset);
ctx->modified_ranges.union_of(trim);
ctx->clean_regions.mark_data_region_dirty(op.extent.offset, oi.size - op.extent.offset);
} else if (oi.size < op.extent.offset) {
ctx->clean_regions.mark_data_region_dirty(oi.size, op.extent.offset - oi.size);
}
if (op.extent.offset != oi.size) {
truncate_update_size_and_usage(ctx->delta_stats,
oi,
op.extent.offset);
}
ctx->delta_stats.num_wr++;
// do no set exists, or we will break above DELETE -> TRUNCATE munging.
oi.clear_data_digest();
}
break;
case CEPH_OSD_OP_DELETE:
++ctx->num_write;
result = 0;
tracepoint(osd, do_osd_op_pre_delete, soid.oid.name.c_str(), soid.snap.val);
{
result = _delete_oid(ctx, false, ctx->ignore_cache);
}
break;
case CEPH_OSD_OP_WATCH:
++ctx->num_write;
result = 0;
{
tracepoint(osd, do_osd_op_pre_watch, soid.oid.name.c_str(), soid.snap.val,
op.watch.cookie, op.watch.op);
if (!obs.exists) {
result = -ENOENT;
break;
}
result = 0;
uint64_t cookie = op.watch.cookie;
entity_name_t entity = ctx->reqid.name;
ObjectContextRef obc = ctx->obc;
dout(10) << "watch " << ceph_osd_watch_op_name(op.watch.op)
<< ": ctx->obc=" << (void *)obc.get() << " cookie=" << cookie
<< " oi.version=" << oi.version.version << " ctx->at_version=" << ctx->at_version << dendl;
dout(10) << "watch: oi.user_version=" << oi.user_version<< dendl;
dout(10) << "watch: peer_addr="
<< ctx->op->get_req()->get_connection()->get_peer_addr() << dendl;
uint32_t timeout = cct->_conf->osd_client_watch_timeout;
if (op.watch.timeout != 0) {
timeout = op.watch.timeout;
}
watch_info_t w(cookie, timeout,
ctx->op->get_req()->get_connection()->get_peer_addr());
if (op.watch.op == CEPH_OSD_WATCH_OP_WATCH ||
op.watch.op == CEPH_OSD_WATCH_OP_LEGACY_WATCH) {
if (oi.watchers.count(make_pair(cookie, entity))) {
dout(10) << " found existing watch " << w << " by " << entity << dendl;
} else {
dout(10) << " registered new watch " << w << " by " << entity << dendl;
oi.watchers[make_pair(cookie, entity)] = w;
t->nop(soid); // make sure update the object_info on disk!
}
bool will_ping = (op.watch.op == CEPH_OSD_WATCH_OP_WATCH);
ctx->watch_connects.push_back(make_pair(w, will_ping));
} else if (op.watch.op == CEPH_OSD_WATCH_OP_RECONNECT) {
if (!oi.watchers.count(make_pair(cookie, entity))) {
result = -ENOTCONN;
break;
}
dout(10) << " found existing watch " << w << " by " << entity << dendl;
ctx->watch_connects.push_back(make_pair(w, true));
} else if (op.watch.op == CEPH_OSD_WATCH_OP_PING) {
/* Note: WATCH with PING doesn't cause may_write() to return true,
* so if there is nothing else in the transaction, this is going
* to run do_osd_op_effects, but not write out a log entry */
if (!oi.watchers.count(make_pair(cookie, entity))) {
result = -ENOTCONN;
break;
}
map<pair<uint64_t,entity_name_t>,WatchRef>::iterator p =
obc->watchers.find(make_pair(cookie, entity));
if (p == obc->watchers.end() ||
!p->second->is_connected()) {
// client needs to reconnect
result = -ETIMEDOUT;
break;
}
dout(10) << " found existing watch " << w << " by " << entity << dendl;
p->second->got_ping(ceph_clock_now());
result = 0;
} else if (op.watch.op == CEPH_OSD_WATCH_OP_UNWATCH) {
map<pair<uint64_t, entity_name_t>, watch_info_t>::iterator oi_iter =
oi.watchers.find(make_pair(cookie, entity));
if (oi_iter != oi.watchers.end()) {
dout(10) << " removed watch " << oi_iter->second << " by "
<< entity << dendl;
oi.watchers.erase(oi_iter);
t->nop(soid); // update oi on disk
ctx->watch_disconnects.push_back(
watch_disconnect_t(cookie, entity, false));
} else {
dout(10) << " can't remove: no watch by " << entity << dendl;
}
}
}
break;
case CEPH_OSD_OP_CACHE_PIN:
tracepoint(osd, do_osd_op_pre_cache_pin, soid.oid.name.c_str(), soid.snap.val);
if ((!pool.info.is_tier() ||
pool.info.cache_mode == pg_pool_t::CACHEMODE_NONE)) {
result = -EINVAL;
dout(10) << " pin object is only allowed on the cache tier " << dendl;
break;
}
++ctx->num_write;
result = 0;
{
if (!obs.exists || oi.is_whiteout()) {
result = -ENOENT;
break;
}
if (!oi.is_cache_pinned()) {
oi.set_flag(object_info_t::FLAG_CACHE_PIN);
ctx->modify = true;
ctx->delta_stats.num_objects_pinned++;
ctx->delta_stats.num_wr++;
}
}
break;
case CEPH_OSD_OP_CACHE_UNPIN:
tracepoint(osd, do_osd_op_pre_cache_unpin, soid.oid.name.c_str(), soid.snap.val);
if ((!pool.info.is_tier() ||
pool.info.cache_mode == pg_pool_t::CACHEMODE_NONE)) {
result = -EINVAL;
dout(10) << " pin object is only allowed on the cache tier " << dendl;
break;
}
++ctx->num_write;
result = 0;
{
if (!obs.exists || oi.is_whiteout()) {
result = -ENOENT;
break;
}
if (oi.is_cache_pinned()) {
oi.clear_flag(object_info_t::FLAG_CACHE_PIN);
ctx->modify = true;
ctx->delta_stats.num_objects_pinned--;
ctx->delta_stats.num_wr++;
}
}
break;
case CEPH_OSD_OP_SET_REDIRECT:
++ctx->num_write;
result = 0;
{
if (pool.info.is_tier()) {
result = -EINVAL;
break;
}
if (!obs.exists) {
result = -ENOENT;
break;
}
if (get_osdmap()->require_osd_release < ceph_release_t::luminous) {
result = -EOPNOTSUPP;
break;
}
object_t target_name;
object_locator_t target_oloc;
snapid_t target_snapid = (uint64_t)op.copy_from.snapid;
version_t target_version = op.copy_from.src_version;
try {
decode(target_name, bp);
decode(target_oloc, bp);
}
catch (ceph::buffer::error& e) {
result = -EINVAL;
goto fail;
}
pg_t raw_pg;
result = get_osdmap()->object_locator_to_pg(target_name, target_oloc, raw_pg);
if (result < 0) {
dout(5) << " pool information is invalid: " << result << dendl;
break;
}
hobject_t target(target_name, target_oloc.key, target_snapid,
raw_pg.ps(), raw_pg.pool(),
target_oloc.nspace);
if (target == soid) {
dout(20) << " set-redirect self is invalid" << dendl;
result = -EINVAL;
break;
}
bool need_reference = (osd_op.op.flags & CEPH_OSD_OP_FLAG_WITH_REFERENCE);
bool has_reference = (oi.flags & object_info_t::FLAG_REDIRECT_HAS_REFERENCE);
if (has_reference) {
result = -EINVAL;
dout(5) << " the object is already a manifest " << dendl;
break;
}
if (op_finisher == nullptr && need_reference) {
// start
ctx->op_finishers[ctx->current_osd_subop_num].reset(
new SetManifestFinisher(osd_op));
ManifestOpRef mop = std::make_shared<ManifestOp>(ctx->obc, new RefCountCallback(ctx, osd_op));
auto* fin = new C_SetManifestRefCountDone(this, soid, 0);
ceph_tid_t tid = refcount_manifest(soid, target,
refcount_t::INCREMENT_REF, fin, std::nullopt);
fin->tid = tid;
mop->num_chunks++;
mop->tids[0] = tid;
manifest_ops[soid] = mop;
ctx->obc->start_block();
result = -EINPROGRESS;
} else {
// finish
if (op_finisher) {
result = op_finisher->execute();
ceph_assert(result == 0);
}
if (!oi.has_manifest() && !oi.manifest.is_redirect())
ctx->delta_stats.num_objects_manifest++;
oi.set_flag(object_info_t::FLAG_MANIFEST);
oi.manifest.redirect_target = target;
oi.manifest.type = object_manifest_t::TYPE_REDIRECT;
t->truncate(soid, 0);
ctx->clean_regions.mark_data_region_dirty(0, oi.size);
if (oi.is_omap() && pool.info.supports_omap()) {
t->omap_clear(soid);
obs.oi.clear_omap_digest();
obs.oi.clear_flag(object_info_t::FLAG_OMAP);
ctx->clean_regions.mark_omap_dirty();
}
write_update_size_and_usage(ctx->delta_stats, oi, ctx->modified_ranges,
0, oi.size, false);
ctx->delta_stats.num_bytes -= oi.size;
oi.size = 0;
oi.new_object();
oi.user_version = target_version;
ctx->user_at_version = target_version;
/* rm_attrs */
map<string,bufferlist,less<>> rmattrs;
result = getattrs_maybe_cache(ctx->obc, &rmattrs);
if (result < 0) {
dout(10) << __func__ << " error: " << cpp_strerror(result) << dendl;
return result;
}
map<string, bufferlist>::iterator iter;
for (iter = rmattrs.begin(); iter != rmattrs.end(); ++iter) {
const string& name = iter->first;
t->rmattr(soid, name);
}
if (!has_reference && need_reference) {
oi.set_flag(object_info_t::FLAG_REDIRECT_HAS_REFERENCE);
}
dout(10) << "set-redirect oid:" << oi.soid << " user_version: " << oi.user_version << dendl;
if (op_finisher) {
ctx->op_finishers.erase(ctx->current_osd_subop_num);
}
}
}
break;
case CEPH_OSD_OP_SET_CHUNK:
++ctx->num_write;
result = 0;
{
if (pool.info.is_tier()) {
result = -EINVAL;
break;
}
if (!obs.exists) {
result = -ENOENT;
break;
}
if (get_osdmap()->require_osd_release < ceph_release_t::luminous) {
result = -EOPNOTSUPP;
break;
}
if (oi.manifest.is_redirect()) {
result = -EINVAL;
goto fail;
}
object_locator_t tgt_oloc;
uint64_t src_offset, src_length, tgt_offset;
object_t tgt_name;
try {
decode(src_offset, bp);
decode(src_length, bp);
decode(tgt_oloc, bp);
decode(tgt_name, bp);
decode(tgt_offset, bp);
}
catch (ceph::buffer::error& e) {
result = -EINVAL;
goto fail;
}
if (!src_length) {
result = -EINVAL;
goto fail;
}
if (src_offset + src_length > oi.size) {
result = -ERANGE;
goto fail;
}
if (!(osd_op.op.flags & CEPH_OSD_OP_FLAG_WITH_REFERENCE)) {
result = -EOPNOTSUPP;
break;
}
if (pool.info.is_erasure()) {
result = -EOPNOTSUPP;
break;
}
for (auto &p : oi.manifest.chunk_map) {
interval_set<uint64_t> chunk;
chunk.insert(p.first, p.second.length);
if (chunk.intersects(src_offset, src_length)) {
dout(20) << __func__ << " overlapped !! offset: " << src_offset << " length: " << src_length
<< " chunk_info: " << p << dendl;
result = -EOPNOTSUPP;
goto fail;
}
}
pg_t raw_pg;
chunk_info_t chunk_info;
result = get_osdmap()->object_locator_to_pg(tgt_name, tgt_oloc, raw_pg);
if (result < 0) {
dout(5) << " pool information is invalid: " << result << dendl;
break;
}
hobject_t target(tgt_name, tgt_oloc.key, snapid_t(),
raw_pg.ps(), raw_pg.pool(),
tgt_oloc.nspace);
bool has_reference = (oi.manifest.chunk_map.find(src_offset) != oi.manifest.chunk_map.end()) &&
(oi.manifest.chunk_map[src_offset].test_flag(chunk_info_t::FLAG_HAS_REFERENCE));
if (has_reference) {
result = -EINVAL;
dout(5) << " the object is already a manifest " << dendl;
break;
}
chunk_info.oid = target;
chunk_info.offset = tgt_offset;
chunk_info.length = src_length;
if (op_finisher == nullptr) {
// start
ctx->op_finishers[ctx->current_osd_subop_num].reset(
new SetManifestFinisher(osd_op));
object_manifest_t set_chunk;
bool need_inc_ref = false;
set_chunk.chunk_map[src_offset] = chunk_info;
need_inc_ref = inc_refcount_by_set(ctx, set_chunk, osd_op);
if (need_inc_ref) {
result = -EINPROGRESS;
break;
}
}
if (op_finisher) {
result = op_finisher->execute();
ceph_assert(result == 0);
}
oi.manifest.chunk_map[src_offset] = chunk_info;
if (!oi.has_manifest() && !oi.manifest.is_chunked())
ctx->delta_stats.num_objects_manifest++;
oi.set_flag(object_info_t::FLAG_MANIFEST);
oi.manifest.type = object_manifest_t::TYPE_CHUNKED;
if (!has_reference) {
oi.manifest.chunk_map[src_offset].set_flag(chunk_info_t::FLAG_HAS_REFERENCE);
}
ctx->modify = true;
ctx->cache_operation = true;
dout(10) << "set-chunked oid:" << oi.soid << " user_version: " << oi.user_version
<< " chunk_info: " << chunk_info << dendl;
if (op_finisher) {
ctx->op_finishers.erase(ctx->current_osd_subop_num);
}
}
break;
case CEPH_OSD_OP_TIER_PROMOTE:
++ctx->num_write;
result = 0;
{
if (pool.info.is_tier()) {
result = -EINVAL;
break;
}
if (!obs.exists) {
result = -ENOENT;
break;
}
if (get_osdmap()->require_osd_release < ceph_release_t::luminous) {
result = -EOPNOTSUPP;
break;
}
if (!obs.oi.has_manifest()) {
result = 0;
break;
}
if (op_finisher == nullptr) {
PromoteManifestCallback *cb;
object_locator_t my_oloc;
hobject_t src_hoid;
if (obs.oi.manifest.is_chunked()) {
src_hoid = obs.oi.soid;
} else if (obs.oi.manifest.is_redirect()) {
object_locator_t src_oloc(obs.oi.manifest.redirect_target);
my_oloc = src_oloc;
src_hoid = obs.oi.manifest.redirect_target;
} else {
ceph_abort_msg("unrecognized manifest type");
}
cb = new PromoteManifestCallback(ctx->obc, this, ctx);
ctx->op_finishers[ctx->current_osd_subop_num].reset(
new PromoteFinisher(cb));
unsigned flags = CEPH_OSD_COPY_FROM_FLAG_IGNORE_OVERLAY |
CEPH_OSD_COPY_FROM_FLAG_IGNORE_CACHE |
CEPH_OSD_COPY_FROM_FLAG_MAP_SNAP_CLONE |
CEPH_OSD_COPY_FROM_FLAG_RWORDERED;
unsigned src_fadvise_flags = LIBRADOS_OP_FLAG_FADVISE_SEQUENTIAL;
start_copy(cb, ctx->obc, src_hoid, my_oloc, 0, flags,
obs.oi.soid.snap == CEPH_NOSNAP,
src_fadvise_flags, 0);
dout(10) << "tier-promote oid:" << oi.soid << " manifest: " << obs.oi.manifest << dendl;
result = -EINPROGRESS;
} else {
result = op_finisher->execute();
ceph_assert(result == 0);
ctx->op_finishers.erase(ctx->current_osd_subop_num);
}
}
break;
case CEPH_OSD_OP_TIER_FLUSH:
++ctx->num_write;
result = 0;
{
if (pool.info.is_tier()) {
result = -EINVAL;
break;
}
if (!obs.exists) {
result = -ENOENT;
break;
}
if (get_osdmap()->require_osd_release < ceph_release_t::octopus) {
result = -EOPNOTSUPP;
break;
}
if (oi.is_dirty() || !obs.oi.has_manifest()) {
result = start_flush(ctx->op, ctx->obc, true, NULL, std::nullopt, true);
if (result == -EINPROGRESS)
result = -EAGAIN;
} else {
result = 0;
}
}
break;
case CEPH_OSD_OP_TIER_EVICT:
++ctx->num_write;
result = 0;
{
if (pool.info.is_tier()) {
result = -EINVAL;
break;
}
if (!obs.exists) {
result = -ENOENT;
break;
}
if (get_osdmap()->require_osd_release < ceph_release_t::octopus) {
result = -EOPNOTSUPP;
break;
}
if (!obs.oi.has_manifest()) {
result = -EINVAL;
break;
}
// The chunks already has a reference, so it is just enough to invoke truncate if necessary
for (auto &p : obs.oi.manifest.chunk_map) {
p.second.set_flag(chunk_info_t::FLAG_MISSING);
// punch hole
t->zero(soid, p.first, p.second.length);
interval_set<uint64_t> ch;
ch.insert(p.first, p.second.length);
ctx->modified_ranges.union_of(ch);
ctx->clean_regions.mark_data_region_dirty(p.first, p.second.length);
}
oi.clear_data_digest();
ctx->delta_stats.num_wr++;
ctx->cache_operation = true;
ctx->undirty = true;
osd->logger->inc(l_osd_tier_evict);
}
break;
case CEPH_OSD_OP_UNSET_MANIFEST:
++ctx->num_write;
result = 0;
{
if (pool.info.is_tier()) {
result = -EINVAL;
break;
}
if (!obs.exists) {
result = -ENOENT;
break;
}
if (!oi.has_manifest()) {
result = -EOPNOTSUPP;
break;
}
if (get_osdmap()->require_osd_release < ceph_release_t::luminous) {
result = -EOPNOTSUPP;
break;
}
dec_all_refcount_manifest(oi, ctx);
oi.clear_flag(object_info_t::FLAG_MANIFEST);
oi.manifest = object_manifest_t();
ctx->delta_stats.num_objects_manifest--;
ctx->delta_stats.num_wr++;
ctx->modify = true;
}
break;
// -- object attrs --
case CEPH_OSD_OP_SETXATTR:
++ctx->num_write;
result = 0;
{
if (cct->_conf->osd_max_attr_size > 0 &&
op.xattr.value_len > cct->_conf->osd_max_attr_size) {
tracepoint(osd, do_osd_op_pre_setxattr, soid.oid.name.c_str(), soid.snap.val, "???");
result = -EFBIG;
break;
}
unsigned max_name_len =
std::min<uint64_t>(osd->store->get_max_attr_name_length(),
cct->_conf->osd_max_attr_name_len);
if (op.xattr.name_len > max_name_len) {
result = -ENAMETOOLONG;
break;
}
maybe_create_new_object(ctx);
string aname;
bp.copy(op.xattr.name_len, aname);
tracepoint(osd, do_osd_op_pre_setxattr, soid.oid.name.c_str(), soid.snap.val, aname.c_str());
string name = "_" + aname;
bufferlist bl;
bp.copy(op.xattr.value_len, bl);
t->setattr(soid, name, bl);
ctx->delta_stats.num_wr++;
}
break;
case CEPH_OSD_OP_RMXATTR:
++ctx->num_write;
result = 0;
{
string aname;
bp.copy(op.xattr.name_len, aname);
tracepoint(osd, do_osd_op_pre_rmxattr, soid.oid.name.c_str(), soid.snap.val, aname.c_str());
if (!obs.exists || oi.is_whiteout()) {
result = -ENOENT;
break;
}
string name = "_" + aname;
t->rmattr(soid, name);
ctx->delta_stats.num_wr++;
}
break;
// -- fancy writers --
case CEPH_OSD_OP_APPEND:
{
tracepoint(osd, do_osd_op_pre_append, soid.oid.name.c_str(), soid.snap.val, oi.size, oi.truncate_seq, op.extent.offset, op.extent.length, op.extent.truncate_size, op.extent.truncate_seq);
// just do it inline; this works because we are happy to execute
// fancy op on replicas as well.
vector<OSDOp> nops(1);
OSDOp& newop = nops[0];
newop.op.op = CEPH_OSD_OP_WRITE;
newop.op.extent.offset = oi.size;
newop.op.extent.length = op.extent.length;
newop.op.extent.truncate_seq = oi.truncate_seq;
newop.indata = osd_op.indata;
result = do_osd_ops(ctx, nops);
osd_op.outdata = std::move(newop.outdata);
}
break;
case CEPH_OSD_OP_STARTSYNC:
result = 0;
t->nop(soid);
break;
// -- trivial map --
case CEPH_OSD_OP_TMAPGET:
tracepoint(osd, do_osd_op_pre_tmapget, soid.oid.name.c_str(), soid.snap.val);
if (pool.info.is_erasure()) {
result = -EOPNOTSUPP;
break;
}
{
vector<OSDOp> nops(1);
OSDOp& newop = nops[0];
newop.op.op = CEPH_OSD_OP_SYNC_READ;
newop.op.extent.offset = 0;
newop.op.extent.length = 0;
result = do_osd_ops(ctx, nops);
osd_op.outdata = std::move(newop.outdata);
}
break;
case CEPH_OSD_OP_TMAPPUT:
tracepoint(osd, do_osd_op_pre_tmapput, soid.oid.name.c_str(), soid.snap.val);
if (pool.info.is_erasure()) {
result = -EOPNOTSUPP;
break;
}
{
//_dout_lock.Lock();
//osd_op.data.hexdump(*_dout);
//_dout_lock.Unlock();
// verify sort order
bool unsorted = false;
if (true) {
bufferlist header;
decode(header, bp);
uint32_t n;
decode(n, bp);
string last_key;
while (n--) {
string key;
decode(key, bp);
dout(10) << "tmapput key " << key << dendl;
bufferlist val;
decode(val, bp);
if (key < last_key) {
dout(10) << "TMAPPUT is unordered; resorting" << dendl;
unsorted = true;
break;
}
last_key = key;
}
}
// write it
vector<OSDOp> nops(1);
OSDOp& newop = nops[0];
newop.op.op = CEPH_OSD_OP_WRITEFULL;
newop.op.extent.offset = 0;
newop.op.extent.length = osd_op.indata.length();
newop.indata = osd_op.indata;
if (unsorted) {
bp = osd_op.indata.begin();
bufferlist header;
map<string, bufferlist> m;
decode(header, bp);
decode(m, bp);
ceph_assert(bp.end());
bufferlist newbl;
encode(header, newbl);
encode(m, newbl);
newop.indata = newbl;
}
result = do_osd_ops(ctx, nops);
ceph_assert(result == 0);
}
break;
case CEPH_OSD_OP_TMAPUP:
tracepoint(osd, do_osd_op_pre_tmapup, soid.oid.name.c_str(), soid.snap.val);
if (pool.info.is_erasure()) {
result = -EOPNOTSUPP;
break;
}
++ctx->num_write;
result = do_tmapup(ctx, bp, osd_op);
break;
case CEPH_OSD_OP_TMAP2OMAP:
++ctx->num_write;
tracepoint(osd, do_osd_op_pre_tmap2omap, soid.oid.name.c_str(), soid.snap.val);
result = do_tmap2omap(ctx, op.tmap2omap.flags);
break;
// OMAP Read ops
case CEPH_OSD_OP_OMAPGETKEYS:
++ctx->num_read;
{
string start_after;
uint64_t max_return;
try {
decode(start_after, bp);
decode(max_return, bp);
}
catch (ceph::buffer::error& e) {
result = -EINVAL;
tracepoint(osd, do_osd_op_pre_omapgetkeys, soid.oid.name.c_str(), soid.snap.val, "???", 0);
goto fail;
}
if (max_return > cct->_conf->osd_max_omap_entries_per_request) {
max_return = cct->_conf->osd_max_omap_entries_per_request;
}
tracepoint(osd, do_osd_op_pre_omapgetkeys, soid.oid.name.c_str(), soid.snap.val, start_after.c_str(), max_return);
bufferlist bl;
uint32_t num = 0;
bool truncated = false;
if (oi.is_omap()) {
ObjectMap::ObjectMapIterator iter = osd->store->get_omap_iterator(
ch, ghobject_t(soid)
);
ceph_assert(iter);
iter->upper_bound(start_after);
for (num = 0; iter->valid(); ++num, iter->next()) {
if (num >= max_return ||
bl.length() >= cct->_conf->osd_max_omap_bytes_per_request) {
truncated = true;
break;
}
encode(iter->key(), bl);
}
} // else return empty out_set
encode(num, osd_op.outdata);
osd_op.outdata.claim_append(bl);
encode(truncated, osd_op.outdata);
ctx->delta_stats.num_rd_kb += shift_round_up(osd_op.outdata.length(), 10);
ctx->delta_stats.num_rd++;
}
break;
case CEPH_OSD_OP_OMAPGETVALS:
++ctx->num_read;
{
string start_after;
uint64_t max_return;
string filter_prefix;
try {
decode(start_after, bp);
decode(max_return, bp);
decode(filter_prefix, bp);
}
catch (ceph::buffer::error& e) {
result = -EINVAL;
tracepoint(osd, do_osd_op_pre_omapgetvals, soid.oid.name.c_str(), soid.snap.val, "???", 0, "???");
goto fail;
}
if (max_return > cct->_conf->osd_max_omap_entries_per_request) {
max_return = cct->_conf->osd_max_omap_entries_per_request;
}
tracepoint(osd, do_osd_op_pre_omapgetvals, soid.oid.name.c_str(), soid.snap.val, start_after.c_str(), max_return, filter_prefix.c_str());
uint32_t num = 0;
bool truncated = false;
bufferlist bl;
if (oi.is_omap()) {
ObjectMap::ObjectMapIterator iter = osd->store->get_omap_iterator(
ch, ghobject_t(soid)
);
if (!iter) {
result = -ENOENT;
goto fail;
}
iter->upper_bound(start_after);
if (filter_prefix > start_after) iter->lower_bound(filter_prefix);
for (num = 0;
iter->valid() &&
iter->key().substr(0, filter_prefix.size()) == filter_prefix;
++num, iter->next()) {
dout(20) << "Found key " << iter->key() << dendl;
if (num >= max_return ||
bl.length() >= cct->_conf->osd_max_omap_bytes_per_request) {
truncated = true;
break;
}
encode(iter->key(), bl);
encode(iter->value(), bl);
}
} // else return empty out_set
encode(num, osd_op.outdata);
osd_op.outdata.claim_append(bl);
encode(truncated, osd_op.outdata);
ctx->delta_stats.num_rd_kb += shift_round_up(osd_op.outdata.length(), 10);
ctx->delta_stats.num_rd++;
}
break;
case CEPH_OSD_OP_OMAPGETHEADER:
tracepoint(osd, do_osd_op_pre_omapgetheader, soid.oid.name.c_str(), soid.snap.val);
if (!oi.is_omap()) {
// return empty header
break;
}
++ctx->num_read;
{
osd->store->omap_get_header(ch, ghobject_t(soid), &osd_op.outdata);
ctx->delta_stats.num_rd_kb += shift_round_up(osd_op.outdata.length(), 10);
ctx->delta_stats.num_rd++;
}
break;
case CEPH_OSD_OP_OMAPGETVALSBYKEYS:
++ctx->num_read;
{
set<string> keys_to_get;
try {
decode(keys_to_get, bp);
}
catch (ceph::buffer::error& e) {
result = -EINVAL;
tracepoint(osd, do_osd_op_pre_omapgetvalsbykeys, soid.oid.name.c_str(), soid.snap.val, "???");
goto fail;
}
tracepoint(osd, do_osd_op_pre_omapgetvalsbykeys, soid.oid.name.c_str(), soid.snap.val, list_entries(keys_to_get).c_str());
map<string, bufferlist> out;
if (oi.is_omap()) {
osd->store->omap_get_values(ch, ghobject_t(soid), keys_to_get, &out);
} // else return empty omap entries
encode(out, osd_op.outdata);
ctx->delta_stats.num_rd_kb += shift_round_up(osd_op.outdata.length(), 10);
ctx->delta_stats.num_rd++;
}
break;
case CEPH_OSD_OP_OMAP_CMP:
++ctx->num_read;
{
if (!obs.exists || oi.is_whiteout()) {
result = -ENOENT;
tracepoint(osd, do_osd_op_pre_omap_cmp, soid.oid.name.c_str(), soid.snap.val, "???");
break;
}
map<string, pair<bufferlist, int> > assertions;
try {
decode(assertions, bp);
}
catch (ceph::buffer::error& e) {
result = -EINVAL;
tracepoint(osd, do_osd_op_pre_omap_cmp, soid.oid.name.c_str(), soid.snap.val, "???");
goto fail;
}
tracepoint(osd, do_osd_op_pre_omap_cmp, soid.oid.name.c_str(), soid.snap.val, list_keys(assertions).c_str());
map<string, bufferlist> out;
if (oi.is_omap()) {
set<string> to_get;
for (map<string, pair<bufferlist, int> >::iterator i = assertions.begin();
i != assertions.end();
++i)
to_get.insert(i->first);
int r = osd->store->omap_get_values(ch, ghobject_t(soid),
to_get, &out);
if (r < 0) {
result = r;
break;
}
} // else leave out empty
//Should set num_rd_kb based on encode length of map
ctx->delta_stats.num_rd++;
int r = 0;
bufferlist empty;
for (map<string, pair<bufferlist, int> >::iterator i = assertions.begin();
i != assertions.end();
++i) {
auto out_entry = out.find(i->first);
bufferlist &bl = (out_entry != out.end()) ?
out_entry->second : empty;
switch (i->second.second) {
case CEPH_OSD_CMPXATTR_OP_EQ:
if (!(bl == i->second.first)) {
r = -ECANCELED;
}
break;
case CEPH_OSD_CMPXATTR_OP_LT:
if (!(bl < i->second.first)) {
r = -ECANCELED;
}
break;
case CEPH_OSD_CMPXATTR_OP_GT:
if (!(bl > i->second.first)) {
r = -ECANCELED;
}
break;
default:
r = -EINVAL;
break;
}
if (r < 0)
break;
}
if (r < 0) {
result = r;
}
}
break;
// OMAP Write ops
case CEPH_OSD_OP_OMAPSETVALS:
if (!pool.info.supports_omap()) {
result = -EOPNOTSUPP;
tracepoint(osd, do_osd_op_pre_omapsetvals, soid.oid.name.c_str(), soid.snap.val);
break;
}
++ctx->num_write;
result = 0;
{
maybe_create_new_object(ctx);
bufferlist to_set_bl;
try {
decode_str_str_map_to_bl(bp, &to_set_bl);
}
catch (ceph::buffer::error& e) {
result = -EINVAL;
tracepoint(osd, do_osd_op_pre_omapsetvals, soid.oid.name.c_str(), soid.snap.val);
goto fail;
}
tracepoint(osd, do_osd_op_pre_omapsetvals, soid.oid.name.c_str(), soid.snap.val);
if (cct->_conf->subsys.should_gather<dout_subsys, 20>()) {
dout(20) << "setting vals: " << dendl;
map<string,bufferlist> to_set;
bufferlist::const_iterator pt = to_set_bl.begin();
decode(to_set, pt);
for (map<string, bufferlist>::iterator i = to_set.begin();
i != to_set.end();
++i) {
dout(20) << "\t" << i->first << dendl;
}
}
t->omap_setkeys(soid, to_set_bl);
ctx->clean_regions.mark_omap_dirty();
ctx->delta_stats.num_wr++;
ctx->delta_stats.num_wr_kb += shift_round_up(to_set_bl.length(), 10);
}
obs.oi.set_flag(object_info_t::FLAG_OMAP);
obs.oi.clear_omap_digest();
break;
case CEPH_OSD_OP_OMAPSETHEADER:
tracepoint(osd, do_osd_op_pre_omapsetheader, soid.oid.name.c_str(), soid.snap.val);
if (!pool.info.supports_omap()) {
result = -EOPNOTSUPP;
break;
}
++ctx->num_write;
result = 0;
{
maybe_create_new_object(ctx);
t->omap_setheader(soid, osd_op.indata);
ctx->clean_regions.mark_omap_dirty();
ctx->delta_stats.num_wr++;
}
obs.oi.set_flag(object_info_t::FLAG_OMAP);
obs.oi.clear_omap_digest();
break;
case CEPH_OSD_OP_OMAPCLEAR:
tracepoint(osd, do_osd_op_pre_omapclear, soid.oid.name.c_str(), soid.snap.val);
if (!pool.info.supports_omap()) {
result = -EOPNOTSUPP;
break;
}
++ctx->num_write;
result = 0;
{
if (!obs.exists || oi.is_whiteout()) {
result = -ENOENT;
break;
}
if (oi.is_omap()) {
t->omap_clear(soid);
ctx->clean_regions.mark_omap_dirty();
ctx->delta_stats.num_wr++;
obs.oi.clear_omap_digest();
obs.oi.clear_flag(object_info_t::FLAG_OMAP);
}
}
break;
case CEPH_OSD_OP_OMAPRMKEYS:
if (!pool.info.supports_omap()) {
result = -EOPNOTSUPP;
tracepoint(osd, do_osd_op_pre_omaprmkeys, soid.oid.name.c_str(), soid.snap.val);
break;
}
++ctx->num_write;
result = 0;
{
if (!obs.exists || oi.is_whiteout()) {
result = -ENOENT;
tracepoint(osd, do_osd_op_pre_omaprmkeys, soid.oid.name.c_str(), soid.snap.val);
break;
}
bufferlist to_rm_bl;
try {
decode_str_set_to_bl(bp, &to_rm_bl);
}
catch (ceph::buffer::error& e) {
result = -EINVAL;
tracepoint(osd, do_osd_op_pre_omaprmkeys, soid.oid.name.c_str(), soid.snap.val);
goto fail;
}
tracepoint(osd, do_osd_op_pre_omaprmkeys, soid.oid.name.c_str(), soid.snap.val);
t->omap_rmkeys(soid, to_rm_bl);
ctx->clean_regions.mark_omap_dirty();
ctx->delta_stats.num_wr++;
}
obs.oi.clear_omap_digest();
break;
case CEPH_OSD_OP_OMAPRMKEYRANGE:
tracepoint(osd, do_osd_op_pre_omaprmkeyrange, soid.oid.name.c_str(), soid.snap.val);
if (!pool.info.supports_omap()) {
result = -EOPNOTSUPP;
break;
}
++ctx->num_write;
result = 0;
{
if (!obs.exists || oi.is_whiteout()) {
result = -ENOENT;
break;
}
std::string key_begin, key_end;
try {
decode(key_begin, bp);
decode(key_end, bp);
} catch (ceph::buffer::error& e) {
result = -EINVAL;
goto fail;
}
t->omap_rmkeyrange(soid, key_begin, key_end);
ctx->clean_regions.mark_omap_dirty();
ctx->delta_stats.num_wr++;
}
obs.oi.clear_omap_digest();
break;
case CEPH_OSD_OP_COPY_GET:
++ctx->num_read;
tracepoint(osd, do_osd_op_pre_copy_get, soid.oid.name.c_str(),
soid.snap.val);
if (op_finisher == nullptr) {
result = do_copy_get(ctx, bp, osd_op, ctx->obc);
} else {
result = op_finisher->execute();
}
break;
case CEPH_OSD_OP_COPY_FROM:
case CEPH_OSD_OP_COPY_FROM2:
++ctx->num_write;
result = 0;
{
object_t src_name;
object_locator_t src_oloc;
uint32_t truncate_seq = 0;
uint64_t truncate_size = 0;
bool have_truncate = false;
snapid_t src_snapid = (uint64_t)op.copy_from.snapid;
version_t src_version = op.copy_from.src_version;
if ((op.op == CEPH_OSD_OP_COPY_FROM2) &&
(op.copy_from.flags & ~CEPH_OSD_COPY_FROM_FLAGS)) {
dout(20) << "invalid copy-from2 flags 0x"
<< std::hex << (int)op.copy_from.flags << std::dec << dendl;
result = -EINVAL;
break;
}
try {
decode(src_name, bp);
decode(src_oloc, bp);
// check if client sent us truncate_seq and truncate_size
if ((op.op == CEPH_OSD_OP_COPY_FROM2) &&
(op.copy_from.flags & CEPH_OSD_COPY_FROM_FLAG_TRUNCATE_SEQ)) {
decode(truncate_seq, bp);
decode(truncate_size, bp);
have_truncate = true;
}
}
catch (ceph::buffer::error& e) {
result = -EINVAL;
tracepoint(osd,
do_osd_op_pre_copy_from,
soid.oid.name.c_str(),
soid.snap.val,
"???",
0,
"???",
"???",
0,
src_snapid,
src_version);
goto fail;
}
tracepoint(osd,
do_osd_op_pre_copy_from,
soid.oid.name.c_str(),
soid.snap.val,
src_name.name.c_str(),
src_oloc.pool,
src_oloc.key.c_str(),
src_oloc.nspace.c_str(),
src_oloc.hash,
src_snapid,
src_version);
if (op_finisher == nullptr) {
// start
pg_t raw_pg;
get_osdmap()->object_locator_to_pg(src_name, src_oloc, raw_pg);
hobject_t src(src_name, src_oloc.key, src_snapid,
raw_pg.ps(), raw_pg.pool(),
src_oloc.nspace);
if (src == soid) {
dout(20) << " copy from self is invalid" << dendl;
result = -EINVAL;
break;
}
CopyFromCallback *cb = new CopyFromCallback(ctx, osd_op);
if (have_truncate)
cb->set_truncate(truncate_seq, truncate_size);
ctx->op_finishers[ctx->current_osd_subop_num].reset(
new CopyFromFinisher(cb));
start_copy(cb, ctx->obc, src, src_oloc, src_version,
op.copy_from.flags,
false,
op.copy_from.src_fadvise_flags,
op.flags);
result = -EINPROGRESS;
} else {
// finish
result = op_finisher->execute();
ceph_assert(result == 0);
// COPY_FROM cannot be executed multiple times -- it must restart
ctx->op_finishers.erase(ctx->current_osd_subop_num);
}
}
break;
default:
tracepoint(osd, do_osd_op_pre_unknown, soid.oid.name.c_str(), soid.snap.val, op.op, ceph_osd_op_name(op.op));
dout(1) << "unrecognized osd op " << op.op
<< " " << ceph_osd_op_name(op.op)
<< dendl;
result = -EOPNOTSUPP;
}
fail:
osd_op.rval = result;
tracepoint(osd, do_osd_op_post, soid.oid.name.c_str(), soid.snap.val, op.op, ceph_osd_op_name(op.op), op.flags, result);
if (result < 0 && (op.flags & CEPH_OSD_OP_FLAG_FAILOK) &&
result != -EAGAIN && result != -EINPROGRESS)
result = 0;
if (result < 0)
break;
}
if (result < 0) {
dout(10) << __func__ << " error: " << cpp_strerror(result) << dendl;
}
return result;
}
int PrimaryLogPG::_get_tmap(OpContext *ctx, bufferlist *header, bufferlist *vals)
{
if (ctx->new_obs.oi.size == 0) {
dout(20) << "unable to get tmap for zero sized " << ctx->new_obs.oi.soid << dendl;
return -ENODATA;
}
vector<OSDOp> nops(1);
OSDOp &newop = nops[0];
newop.op.op = CEPH_OSD_OP_TMAPGET;
do_osd_ops(ctx, nops);
try {
bufferlist::const_iterator i = newop.outdata.begin();
decode(*header, i);
(*vals).substr_of(newop.outdata, i.get_off(), i.get_remaining());
} catch (...) {
dout(20) << "unsuccessful at decoding tmap for " << ctx->new_obs.oi.soid
<< dendl;
return -EINVAL;
}
dout(20) << "successful at decoding tmap for " << ctx->new_obs.oi.soid
<< dendl;
return 0;
}
int PrimaryLogPG::_verify_no_head_clones(const hobject_t& soid,
const SnapSet& ss)
{
// verify that all clones have been evicted
dout(20) << __func__ << " verifying clones are absent "
<< ss << dendl;
for (vector<snapid_t>::const_iterator p = ss.clones.begin();
p != ss.clones.end();
++p) {
hobject_t clone_oid = soid;
clone_oid.snap = *p;
if (is_missing_object(clone_oid))
return -EBUSY;
ObjectContextRef clone_obc = get_object_context(clone_oid, false);
if (clone_obc && clone_obc->obs.exists) {
dout(10) << __func__ << " cannot evict head before clone "
<< clone_oid << dendl;
return -EBUSY;
}
if (copy_ops.count(clone_oid)) {
dout(10) << __func__ << " cannot evict head, pending promote on clone "
<< clone_oid << dendl;
return -EBUSY;
}
}
return 0;
}
inline int PrimaryLogPG::_delete_oid(
OpContext *ctx,
bool no_whiteout, // no whiteouts, no matter what.
bool try_no_whiteout) // try not to whiteout
{
SnapSet& snapset = ctx->new_snapset;
ObjectState& obs = ctx->new_obs;
object_info_t& oi = obs.oi;
const hobject_t& soid = oi.soid;
PGTransaction* t = ctx->op_t.get();
// cache: cache: set whiteout on delete?
bool whiteout = false;
if (pool.info.cache_mode != pg_pool_t::CACHEMODE_NONE
&& !no_whiteout
&& !try_no_whiteout) {
whiteout = true;
}
// in luminous or later, we can't delete the head if there are
// clones. we trust the caller passing no_whiteout has already
// verified they don't exist.
if (!snapset.clones.empty() ||
(!ctx->snapc.snaps.empty() && ctx->snapc.snaps[0] > snapset.seq)) {
if (no_whiteout) {
dout(20) << __func__ << " has or will have clones but no_whiteout=1"
<< dendl;
} else {
dout(20) << __func__ << " has or will have clones; will whiteout"
<< dendl;
whiteout = true;
}
}
dout(20) << __func__ << " " << soid << " whiteout=" << (int)whiteout
<< " no_whiteout=" << (int)no_whiteout
<< " try_no_whiteout=" << (int)try_no_whiteout
<< dendl;
if (!obs.exists || (obs.oi.is_whiteout() && whiteout))
return -ENOENT;
t->remove(soid);
if (oi.size > 0) {
interval_set<uint64_t> ch;
ch.insert(0, oi.size);
ctx->modified_ranges.union_of(ch);
ctx->clean_regions.mark_data_region_dirty(0, oi.size);
}
ctx->clean_regions.mark_omap_dirty();
ctx->delta_stats.num_wr++;
if (soid.is_snap()) {
ceph_assert(ctx->obc->ssc->snapset.clone_overlap.count(soid.snap));
ctx->delta_stats.num_bytes -= ctx->obc->ssc->snapset.get_clone_bytes(soid.snap);
} else {
ctx->delta_stats.num_bytes -= oi.size;
}
oi.size = 0;
oi.new_object();
// disconnect all watchers
for (map<pair<uint64_t, entity_name_t>, watch_info_t>::iterator p =
oi.watchers.begin();
p != oi.watchers.end();
++p) {
dout(20) << __func__ << " will disconnect watcher " << p->first << dendl;
ctx->watch_disconnects.push_back(
watch_disconnect_t(p->first.first, p->first.second, true));
}
oi.watchers.clear();
if (whiteout) {
dout(20) << __func__ << " setting whiteout on " << soid << dendl;
oi.set_flag(object_info_t::FLAG_WHITEOUT);
ctx->delta_stats.num_whiteouts++;
t->create(soid);
osd->logger->inc(l_osd_tier_whiteout);
return 0;
}
if (oi.has_manifest()) {
ctx->delta_stats.num_objects_manifest--;
dec_all_refcount_manifest(oi, ctx);
}
// delete the head
ctx->delta_stats.num_objects--;
if (soid.is_snap())
ctx->delta_stats.num_object_clones--;
if (oi.is_whiteout()) {
dout(20) << __func__ << " deleting whiteout on " << soid << dendl;
ctx->delta_stats.num_whiteouts--;
oi.clear_flag(object_info_t::FLAG_WHITEOUT);
}
if (oi.is_cache_pinned()) {
ctx->delta_stats.num_objects_pinned--;
}
obs.exists = false;
return 0;
}
int PrimaryLogPG::_rollback_to(OpContext *ctx, OSDOp& op)
{
ObjectState& obs = ctx->new_obs;
object_info_t& oi = obs.oi;
const hobject_t& soid = oi.soid;
snapid_t snapid = (uint64_t)op.op.snap.snapid;
hobject_t missing_oid;
dout(10) << "_rollback_to " << soid << " snapid " << snapid << dendl;
ObjectContextRef rollback_to;
int ret = find_object_context(
hobject_t(soid.oid, soid.get_key(), snapid, soid.get_hash(), info.pgid.pool(),
soid.get_namespace()),
&rollback_to, false, false, &missing_oid);
if (ret == -EAGAIN) {
/* clone must be missing */
ceph_assert(is_degraded_or_backfilling_object(missing_oid) || is_degraded_on_async_recovery_target(missing_oid));
dout(20) << "_rollback_to attempted to roll back to a missing or backfilling clone "
<< missing_oid << " (requested snapid: ) " << snapid << dendl;
block_write_on_degraded_snap(missing_oid, ctx->op);
return ret;
}
{
ObjectContextRef promote_obc;
cache_result_t tier_mode_result;
if (obs.exists && obs.oi.has_manifest()) {
/*
* In the case of manifest object, the object_info exists on the base tier at all time,
* so promote_obc should be equal to rollback_to
* */
promote_obc = rollback_to;
tier_mode_result =
maybe_handle_manifest_detail(
ctx->op,
true,
rollback_to);
} else {
tier_mode_result =
maybe_handle_cache_detail(
ctx->op,
true,
rollback_to,
ret,
missing_oid,
true,
false,
&promote_obc);
}
switch (tier_mode_result) {
case cache_result_t::NOOP:
break;
case cache_result_t::BLOCKED_PROMOTE:
ceph_assert(promote_obc);
block_write_on_snap_rollback(soid, promote_obc, ctx->op);
return -EAGAIN;
case cache_result_t::BLOCKED_FULL:
block_write_on_full_cache(soid, ctx->op);
return -EAGAIN;
case cache_result_t::REPLIED_WITH_EAGAIN:
ceph_abort_msg("this can't happen, no rollback on replica");
default:
ceph_abort_msg("must promote was set, other values are not valid");
return -EAGAIN;
}
}
if (ret == -ENOENT || (rollback_to && rollback_to->obs.oi.is_whiteout())) {
// there's no snapshot here, or there's no object.
// if there's no snapshot, we delete the object; otherwise, do nothing.
dout(20) << "_rollback_to deleting head on " << soid.oid
<< " because got ENOENT|whiteout on find_object_context" << dendl;
if (ctx->obc->obs.oi.watchers.size()) {
// Cannot delete an object with watchers
ret = -EBUSY;
} else {
_delete_oid(ctx, false, false);
ret = 0;
}
} else if (ret) {
// ummm....huh? It *can't* return anything else at time of writing.
ceph_abort_msg("unexpected error code in _rollback_to");
} else { //we got our context, let's use it to do the rollback!
hobject_t& rollback_to_sobject = rollback_to->obs.oi.soid;
if (is_degraded_or_backfilling_object(rollback_to_sobject) ||
is_degraded_on_async_recovery_target(rollback_to_sobject)) {
dout(20) << "_rollback_to attempted to roll back to a degraded object "
<< rollback_to_sobject << " (requested snapid: ) " << snapid << dendl;
block_write_on_degraded_snap(rollback_to_sobject, ctx->op);
ret = -EAGAIN;
} else if (rollback_to->obs.oi.soid.snap == CEPH_NOSNAP) {
// rolling back to the head; we just need to clone it.
ctx->modify = true;
} else {
if (rollback_to->obs.oi.has_manifest() && rollback_to->obs.oi.manifest.is_chunked()) {
/*
* looking at the following case, the foo head needs the reference of chunk4 and chunk5
* in case snap[1] is removed.
*
* Before rollback to snap[1]:
*
* foo snap[1]: [chunk4] [chunk5]
* foo snap[0]: [ chunk2 ]
* foo head : [chunk1] [chunk3]
*
* After:
*
* foo snap[1]: [chunk4] [chunk5]
* foo snap[0]: [ chunk2 ]
* foo head : [chunk4] [chunk5]
*
*/
OpFinisher* op_finisher = nullptr;
auto op_finisher_it = ctx->op_finishers.find(ctx->current_osd_subop_num);
if (op_finisher_it != ctx->op_finishers.end()) {
op_finisher = op_finisher_it->second.get();
}
if (!op_finisher) {
bool need_inc_ref = inc_refcount_by_set(ctx, rollback_to->obs.oi.manifest, op);
if (need_inc_ref) {
ceph_assert(op_finisher_it == ctx->op_finishers.end());
ctx->op_finishers[ctx->current_osd_subop_num].reset(
new SetManifestFinisher(op));
return -EINPROGRESS;
}
} else {
op_finisher->execute();
ctx->op_finishers.erase(ctx->current_osd_subop_num);
}
}
_do_rollback_to(ctx, rollback_to, op);
}
}
return ret;
}
void PrimaryLogPG::_do_rollback_to(OpContext *ctx, ObjectContextRef rollback_to,
OSDOp& op)
{
SnapSet& snapset = ctx->new_snapset;
ObjectState& obs = ctx->new_obs;
object_info_t& oi = obs.oi;
const hobject_t& soid = oi.soid;
PGTransaction* t = ctx->op_t.get();
snapid_t snapid = (uint64_t)op.op.snap.snapid;
hobject_t& rollback_to_sobject = rollback_to->obs.oi.soid;
/* 1) Delete current head
* 2) Clone correct snapshot into head
* 3) Calculate clone_overlaps by following overlaps
* forward from rollback snapshot */
dout(10) << "_do_rollback_to deleting " << soid.oid
<< " and rolling back to old snap" << dendl;
if (obs.exists) {
t->remove(soid);
if (obs.oi.has_manifest()) {
dec_all_refcount_manifest(obs.oi, ctx);
oi.manifest.clear();
oi.manifest.type = object_manifest_t::TYPE_NONE;
oi.clear_flag(object_info_t::FLAG_MANIFEST);
ctx->delta_stats.num_objects_manifest--;
ctx->cache_operation = true; // do not trigger to call ref function to calculate refcount
}
}
t->clone(soid, rollback_to_sobject);
t->add_obc(rollback_to);
map<snapid_t, interval_set<uint64_t> >::iterator iter =
snapset.clone_overlap.lower_bound(snapid);
ceph_assert(iter != snapset.clone_overlap.end());
interval_set<uint64_t> overlaps = iter->second;
for ( ;
iter != snapset.clone_overlap.end();
++iter)
overlaps.intersection_of(iter->second);
if (obs.oi.size > 0) {
interval_set<uint64_t> modified;
modified.insert(0, obs.oi.size);
overlaps.intersection_of(modified);
modified.subtract(overlaps);
ctx->modified_ranges.union_of(modified);
}
// Adjust the cached objectcontext
maybe_create_new_object(ctx, true);
ctx->delta_stats.num_bytes -= obs.oi.size;
ctx->delta_stats.num_bytes += rollback_to->obs.oi.size;
ctx->clean_regions.mark_data_region_dirty(0, std::max(obs.oi.size, rollback_to->obs.oi.size));
ctx->clean_regions.mark_omap_dirty();
obs.oi.size = rollback_to->obs.oi.size;
if (rollback_to->obs.oi.is_data_digest())
obs.oi.set_data_digest(rollback_to->obs.oi.data_digest);
else
obs.oi.clear_data_digest();
if (rollback_to->obs.oi.is_omap_digest())
obs.oi.set_omap_digest(rollback_to->obs.oi.omap_digest);
else
obs.oi.clear_omap_digest();
if (rollback_to->obs.oi.has_manifest() && rollback_to->obs.oi.manifest.is_chunked()) {
obs.oi.set_flag(object_info_t::FLAG_MANIFEST);
obs.oi.manifest.type = rollback_to->obs.oi.manifest.type;
obs.oi.manifest.chunk_map = rollback_to->obs.oi.manifest.chunk_map;
ctx->cache_operation = true;
ctx->delta_stats.num_objects_manifest++;
}
if (rollback_to->obs.oi.is_omap()) {
dout(10) << __func__ << " setting omap flag on " << obs.oi.soid << dendl;
obs.oi.set_flag(object_info_t::FLAG_OMAP);
} else {
dout(10) << __func__ << " clearing omap flag on " << obs.oi.soid << dendl;
obs.oi.clear_flag(object_info_t::FLAG_OMAP);
}
}
void PrimaryLogPG::_make_clone(
OpContext *ctx,
PGTransaction* t,
ObjectContextRef clone_obc,
const hobject_t& head, const hobject_t& coid,
object_info_t *poi)
{
bufferlist bv;
encode(*poi, bv, get_osdmap()->get_features(CEPH_ENTITY_TYPE_OSD, nullptr));
t->clone(coid, head);
setattr_maybe_cache(clone_obc, t, OI_ATTR, bv);
rmattr_maybe_cache(clone_obc, t, SS_ATTR);
}
void PrimaryLogPG::make_writeable(OpContext *ctx)
{
const hobject_t& soid = ctx->obs->oi.soid;
SnapContext& snapc = ctx->snapc;
// clone?
ceph_assert(soid.snap == CEPH_NOSNAP);
dout(20) << "make_writeable " << soid << " snapset=" << ctx->new_snapset
<< " snapc=" << snapc << dendl;
bool was_dirty = ctx->obc->obs.oi.is_dirty();
if (ctx->new_obs.exists) {
// we will mark the object dirty
if (ctx->undirty && was_dirty) {
dout(20) << " clearing DIRTY flag" << dendl;
ceph_assert(ctx->new_obs.oi.is_dirty());
ctx->new_obs.oi.clear_flag(object_info_t::FLAG_DIRTY);
--ctx->delta_stats.num_objects_dirty;
osd->logger->inc(l_osd_tier_clean);
} else if (!was_dirty && !ctx->undirty) {
dout(20) << " setting DIRTY flag" << dendl;
ctx->new_obs.oi.set_flag(object_info_t::FLAG_DIRTY);
++ctx->delta_stats.num_objects_dirty;
osd->logger->inc(l_osd_tier_dirty);
}
} else {
if (was_dirty) {
dout(20) << " deletion, decrementing num_dirty and clearing flag" << dendl;
ctx->new_obs.oi.clear_flag(object_info_t::FLAG_DIRTY);
--ctx->delta_stats.num_objects_dirty;
}
}
if ((ctx->new_obs.exists &&
ctx->new_obs.oi.is_omap()) &&
(!ctx->obc->obs.exists ||
!ctx->obc->obs.oi.is_omap())) {
++ctx->delta_stats.num_objects_omap;
}
if ((!ctx->new_obs.exists ||
!ctx->new_obs.oi.is_omap()) &&
(ctx->obc->obs.exists &&
ctx->obc->obs.oi.is_omap())) {
--ctx->delta_stats.num_objects_omap;
}
if (ctx->new_snapset.seq > snapc.seq) {
dout(10) << " op snapset is old" << dendl;
}
if ((ctx->obs->exists && !ctx->obs->oi.is_whiteout()) && // head exist(ed)
snapc.snaps.size() && // there are snaps
!ctx->cache_operation &&
snapc.snaps[0] > ctx->new_snapset.seq) { // existing object is old
// clone
hobject_t coid = soid;
coid.snap = snapc.seq;
const auto snaps = [&] {
auto last = find_if_not(
begin(snapc.snaps), end(snapc.snaps),
[&](snapid_t snap_id) { return snap_id > ctx->new_snapset.seq; });
return vector<snapid_t>{begin(snapc.snaps), last};
}();
// prepare clone
object_info_t static_snap_oi(coid);
object_info_t *snap_oi;
if (is_primary()) {
ctx->clone_obc = object_contexts.lookup_or_create(static_snap_oi.soid);
ctx->clone_obc->destructor_callback =
new C_PG_ObjectContext(this, ctx->clone_obc.get());
ctx->clone_obc->obs.oi = static_snap_oi;
ctx->clone_obc->obs.exists = true;
ctx->clone_obc->ssc = ctx->obc->ssc;
ctx->clone_obc->ssc->ref++;
if (pool.info.is_erasure())
ctx->clone_obc->attr_cache = ctx->obc->attr_cache;
snap_oi = &ctx->clone_obc->obs.oi;
if (ctx->obc->obs.oi.has_manifest()) {
if ((ctx->obc->obs.oi.flags & object_info_t::FLAG_REDIRECT_HAS_REFERENCE) &&
ctx->obc->obs.oi.manifest.is_redirect()) {
snap_oi->set_flag(object_info_t::FLAG_MANIFEST);
snap_oi->manifest.type = object_manifest_t::TYPE_REDIRECT;
snap_oi->manifest.redirect_target = ctx->obc->obs.oi.manifest.redirect_target;
} else if (ctx->obc->obs.oi.manifest.is_chunked()) {
snap_oi->set_flag(object_info_t::FLAG_MANIFEST);
snap_oi->manifest.type = object_manifest_t::TYPE_CHUNKED;
snap_oi->manifest.chunk_map = ctx->obc->obs.oi.manifest.chunk_map;
} else {
ceph_abort_msg("unrecognized manifest type");
}
}
bool got = ctx->lock_manager.get_write_greedy(
coid,
ctx->clone_obc,
ctx->op);
ceph_assert(got);
dout(20) << " got greedy write on clone_obc " << *ctx->clone_obc << dendl;
} else {
snap_oi = &static_snap_oi;
}
snap_oi->version = ctx->at_version;
snap_oi->prior_version = ctx->obs->oi.version;
snap_oi->copy_user_bits(ctx->obs->oi);
_make_clone(ctx, ctx->op_t.get(), ctx->clone_obc, soid, coid, snap_oi);
ctx->delta_stats.num_objects++;
if (snap_oi->is_dirty()) {
ctx->delta_stats.num_objects_dirty++;
osd->logger->inc(l_osd_tier_dirty);
}
if (snap_oi->is_omap())
ctx->delta_stats.num_objects_omap++;
if (snap_oi->is_cache_pinned())
ctx->delta_stats.num_objects_pinned++;
if (snap_oi->has_manifest())
ctx->delta_stats.num_objects_manifest++;
ctx->delta_stats.num_object_clones++;
ctx->new_snapset.clones.push_back(coid.snap);
ctx->new_snapset.clone_size[coid.snap] = ctx->obs->oi.size;
ctx->new_snapset.clone_snaps[coid.snap] = snaps;
// clone_overlap should contain an entry for each clone
// (an empty interval_set if there is no overlap)
ctx->new_snapset.clone_overlap[coid.snap];
if (ctx->obs->oi.size) {
ctx->new_snapset.clone_overlap[coid.snap].insert(0, ctx->obs->oi.size);
}
// log clone
dout(10) << " cloning v " << ctx->obs->oi.version
<< " to " << coid << " v " << ctx->at_version
<< " snaps=" << snaps
<< " snapset=" << ctx->new_snapset << dendl;
ctx->log.push_back(pg_log_entry_t(
pg_log_entry_t::CLONE, coid, ctx->at_version,
ctx->obs->oi.version,
ctx->obs->oi.user_version,
osd_reqid_t(), ctx->new_obs.oi.mtime, 0));
encode(snaps, ctx->log.back().snaps);
ctx->at_version.version++;
}
// update most recent clone_overlap and usage stats
if (ctx->new_snapset.clones.size() > 0) {
// the clone_overlap is difference of range between head and clones.
// we need to check whether the most recent clone exists, if it's
// been evicted, it's not included in the stats, but the clone_overlap
// is still exist in the snapset, so we should update the
// clone_overlap to make it sense.
hobject_t last_clone_oid = soid;
last_clone_oid.snap = ctx->new_snapset.clone_overlap.rbegin()->first;
interval_set<uint64_t> &newest_overlap =
ctx->new_snapset.clone_overlap.rbegin()->second;
ctx->modified_ranges.intersection_of(newest_overlap);
if (is_present_clone(last_clone_oid)) {
// modified_ranges is still in use by the clone
ctx->delta_stats.num_bytes += ctx->modified_ranges.size();
}
newest_overlap.subtract(ctx->modified_ranges);
}
if (snapc.seq > ctx->new_snapset.seq) {
// update snapset with latest snap context
ctx->new_snapset.seq = snapc.seq;
if (get_osdmap()->require_osd_release < ceph_release_t::octopus) {
ctx->new_snapset.snaps = snapc.snaps;
} else {
ctx->new_snapset.snaps.clear();
}
}
dout(20) << "make_writeable " << soid
<< " done, snapset=" << ctx->new_snapset << dendl;
}
void PrimaryLogPG::write_update_size_and_usage(object_stat_sum_t& delta_stats, object_info_t& oi,
interval_set<uint64_t>& modified, uint64_t offset,
uint64_t length, bool write_full)
{
interval_set<uint64_t> ch;
if (write_full) {
if (oi.size)
ch.insert(0, oi.size);
} else if (length)
ch.insert(offset, length);
modified.union_of(ch);
if (write_full ||
(offset + length > oi.size && length)) {
uint64_t new_size = offset + length;
delta_stats.num_bytes -= oi.size;
delta_stats.num_bytes += new_size;
oi.size = new_size;
}
delta_stats.num_wr++;
delta_stats.num_wr_kb += shift_round_up(length, 10);
}
void PrimaryLogPG::truncate_update_size_and_usage(
object_stat_sum_t& delta_stats,
object_info_t& oi,
uint64_t truncate_size)
{
if (oi.size != truncate_size) {
delta_stats.num_bytes -= oi.size;
delta_stats.num_bytes += truncate_size;
oi.size = truncate_size;
}
}
void PrimaryLogPG::complete_disconnect_watches(
ObjectContextRef obc,
const list<watch_disconnect_t> &to_disconnect)
{
for (list<watch_disconnect_t>::const_iterator i =
to_disconnect.begin();
i != to_disconnect.end();
++i) {
pair<uint64_t, entity_name_t> watcher(i->cookie, i->name);
auto watchers_entry = obc->watchers.find(watcher);
if (watchers_entry != obc->watchers.end()) {
WatchRef watch = watchers_entry->second;
dout(10) << "do_osd_op_effects disconnect watcher " << watcher << dendl;
obc->watchers.erase(watcher);
watch->remove(i->send_disconnect);
} else {
dout(10) << "do_osd_op_effects disconnect failed to find watcher "
<< watcher << dendl;
}
}
}
void PrimaryLogPG::do_osd_op_effects(OpContext *ctx, const ConnectionRef& conn)
{
entity_name_t entity = ctx->reqid.name;
dout(15) << "do_osd_op_effects " << entity << " con " << conn.get() << dendl;
// disconnects first
complete_disconnect_watches(ctx->obc, ctx->watch_disconnects);
ceph_assert(conn);
auto session = conn->get_priv();
for (list<pair<watch_info_t,bool> >::iterator i = ctx->watch_connects.begin();
i != ctx->watch_connects.end();
++i) {
pair<uint64_t, entity_name_t> watcher(i->first.cookie, entity);
dout(15) << "do_osd_op_effects applying watch connect on session "
<< (session ? session.get() : nullptr) << " watcher " << watcher
<< dendl;
WatchRef watch;
if (ctx->obc->watchers.count(watcher)) {
dout(15) << "do_osd_op_effects found existing watch watcher " << watcher
<< dendl;
watch = ctx->obc->watchers[watcher];
} else {
dout(15) << "do_osd_op_effects new watcher " << watcher
<< dendl;
watch = Watch::makeWatchRef(
this, osd, ctx->obc, i->first.timeout_seconds,
i->first.cookie, entity, conn->get_peer_addr());
ctx->obc->watchers.insert(
make_pair(
watcher,
watch));
}
watch->connect(conn, i->second);
}
for (list<notify_info_t>::iterator p = ctx->notifies.begin();
p != ctx->notifies.end();
++p) {
dout(10) << "do_osd_op_effects, notify " << *p << dendl;
NotifyRef notif(
Notify::makeNotifyRef(
conn,
ctx->reqid.name.num(),
p->bl,
p->timeout,
p->cookie,
p->notify_id,
ctx->obc->obs.oi.user_version,
osd));
for (map<pair<uint64_t, entity_name_t>, WatchRef>::iterator i =
ctx->obc->watchers.begin();
i != ctx->obc->watchers.end();
++i) {
dout(10) << "starting notify on watch " << i->first << dendl;
i->second->start_notify(notif);
}
notif->init();
}
for (list<OpContext::NotifyAck>::iterator p = ctx->notify_acks.begin();
p != ctx->notify_acks.end();
++p) {
if (p->watch_cookie)
dout(10) << "notify_ack " << make_pair(*(p->watch_cookie), p->notify_id) << dendl;
else
dout(10) << "notify_ack " << make_pair("NULL", p->notify_id) << dendl;
for (map<pair<uint64_t, entity_name_t>, WatchRef>::iterator i =
ctx->obc->watchers.begin();
i != ctx->obc->watchers.end();
++i) {
if (i->first.second != entity) continue;
if (p->watch_cookie &&
*(p->watch_cookie) != i->first.first) continue;
dout(10) << "acking notify on watch " << i->first << dendl;
i->second->notify_ack(p->notify_id, p->reply_bl);
}
}
}
hobject_t PrimaryLogPG::generate_temp_object(const hobject_t& target)
{
ostringstream ss;
ss << "temp_" << info.pgid << "_" << get_role()
<< "_" << osd->monc->get_global_id() << "_" << (++temp_seq);
hobject_t hoid = target.make_temp_hobject(ss.str());
dout(20) << __func__ << " " << hoid << dendl;
return hoid;
}
hobject_t PrimaryLogPG::get_temp_recovery_object(
const hobject_t& target,
eversion_t version)
{
ostringstream ss;
ss << "temp_recovering_" << info.pgid // (note this includes the shardid)
<< "_" << version
<< "_" << info.history.same_interval_since
<< "_" << target.snap;
// pgid + version + interval + snapid is unique, and short
hobject_t hoid = target.make_temp_hobject(ss.str());
dout(20) << __func__ << " " << hoid << dendl;
return hoid;
}
int PrimaryLogPG::prepare_transaction(OpContext *ctx)
{
ceph_assert(!ctx->ops->empty());
// valid snap context?
if (!ctx->snapc.is_valid()) {
dout(10) << " invalid snapc " << ctx->snapc << dendl;
return -EINVAL;
}
// prepare the actual mutation
int result = do_osd_ops(ctx, *ctx->ops);
if (result < 0) {
if (ctx->op->may_write() &&
get_osdmap()->require_osd_release >= ceph_release_t::kraken) {
// need to save the error code in the pg log, to detect dup ops,
// but do nothing else
ctx->update_log_only = true;
}
return result;
}
// read-op? write-op noop? done?
if (ctx->op_t->empty() && !ctx->modify) {
if (ctx->pending_async_reads.empty())
unstable_stats.add(ctx->delta_stats);
if (ctx->op->may_write() &&
get_osdmap()->require_osd_release >= ceph_release_t::kraken) {
ctx->update_log_only = true;
}
return result;
}
// check for full
if ((ctx->delta_stats.num_bytes > 0 ||
ctx->delta_stats.num_objects > 0) && // FIXME: keys?
pool.info.has_flag(pg_pool_t::FLAG_FULL)) {
auto m = ctx->op->get_req<MOSDOp>();
if (ctx->reqid.name.is_mds() || // FIXME: ignore MDS for now
m->has_flag(CEPH_OSD_FLAG_FULL_FORCE)) {
dout(20) << __func__ << " full, but proceeding due to FULL_FORCE or MDS"
<< dendl;
} else if (m->has_flag(CEPH_OSD_FLAG_FULL_TRY)) {
// they tried, they failed.
dout(20) << __func__ << " full, replying to FULL_TRY op" << dendl;
return pool.info.has_flag(pg_pool_t::FLAG_FULL_QUOTA) ? -EDQUOT : -ENOSPC;
} else {
// drop request
dout(20) << __func__ << " full, dropping request (bad client)" << dendl;
return -EAGAIN;
}
}
const hobject_t& soid = ctx->obs->oi.soid;
// clone, if necessary
if (soid.snap == CEPH_NOSNAP)
make_writeable(ctx);
finish_ctx(ctx,
ctx->new_obs.exists ? pg_log_entry_t::MODIFY :
pg_log_entry_t::DELETE,
result);
return result;
}
void PrimaryLogPG::finish_ctx(OpContext *ctx, int log_op_type, int result)
{
const hobject_t& soid = ctx->obs->oi.soid;
dout(20) << __func__ << " " << soid << " " << ctx
<< " op " << pg_log_entry_t::get_op_name(log_op_type)
<< dendl;
utime_t now = ceph_clock_now();
// Drop the reference if deduped chunk is modified
if (ctx->new_obs.oi.is_dirty() &&
(ctx->obs->oi.has_manifest() && ctx->obs->oi.manifest.is_chunked()) &&
!ctx->cache_operation &&
log_op_type != pg_log_entry_t::PROMOTE) {
update_chunk_map_by_dirty(ctx);
// If a clone is creating, ignore dropping the reference for manifest object
if (!ctx->delta_stats.num_object_clones) {
dec_refcount_by_dirty(ctx);
}
}
// finish and log the op.
if (ctx->user_modify) {
// update the user_version for any modify ops, except for the watch op
ctx->user_at_version = std::max(info.last_user_version, ctx->new_obs.oi.user_version) + 1;
/* In order for new clients and old clients to interoperate properly
* when exchanging versions, we need to lower bound the user_version
* (which our new clients pay proper attention to)
* by the at_version (which is all the old clients can ever see). */
if (ctx->at_version.version > ctx->user_at_version)
ctx->user_at_version = ctx->at_version.version;
ctx->new_obs.oi.user_version = ctx->user_at_version;
}
ctx->bytes_written = ctx->op_t->get_bytes_written();
if (ctx->new_obs.exists) {
ctx->new_obs.oi.version = ctx->at_version;
ctx->new_obs.oi.prior_version = ctx->obs->oi.version;
ctx->new_obs.oi.last_reqid = ctx->reqid;
if (ctx->mtime != utime_t()) {
ctx->new_obs.oi.mtime = ctx->mtime;
dout(10) << " set mtime to " << ctx->new_obs.oi.mtime << dendl;
ctx->new_obs.oi.local_mtime = now;
} else {
dout(10) << " mtime unchanged at " << ctx->new_obs.oi.mtime << dendl;
}
// object_info_t
map <string, bufferlist, less<>> attrs;
bufferlist bv(sizeof(ctx->new_obs.oi));
encode(ctx->new_obs.oi, bv,
get_osdmap()->get_features(CEPH_ENTITY_TYPE_OSD, nullptr));
attrs[OI_ATTR] = std::move(bv);
// snapset
if (soid.snap == CEPH_NOSNAP) {
dout(10) << " final snapset " << ctx->new_snapset
<< " in " << soid << dendl;
bufferlist bss;
encode(ctx->new_snapset, bss);
attrs[SS_ATTR] = std::move(bss);
} else {
dout(10) << " no snapset (this is a clone)" << dendl;
}
ctx->op_t->setattrs(soid, attrs);
} else {
// reset cached oi
ctx->new_obs.oi = object_info_t(ctx->obc->obs.oi.soid);
}
// append to log
ctx->log.push_back(
pg_log_entry_t(log_op_type, soid, ctx->at_version,
ctx->obs->oi.version,
ctx->user_at_version, ctx->reqid,
ctx->mtime,
(ctx->op && ctx->op->allows_returnvec()) ? result : 0));
if (ctx->op && ctx->op->allows_returnvec()) {
// also the per-op values
ctx->log.back().set_op_returns(*ctx->ops);
dout(20) << __func__ << " op_returns " << ctx->log.back().op_returns
<< dendl;
}
ctx->log.back().clean_regions = ctx->clean_regions;
dout(20) << __func__ << " object " << soid << " marks clean_regions " << ctx->log.back().clean_regions << dendl;
if (soid.snap < CEPH_NOSNAP) {
switch (log_op_type) {
case pg_log_entry_t::MODIFY:
case pg_log_entry_t::PROMOTE:
case pg_log_entry_t::CLEAN:
dout(20) << __func__ << " encoding snaps from " << ctx->new_snapset
<< dendl;
encode(ctx->new_snapset.clone_snaps[soid.snap], ctx->log.back().snaps);
break;
default:
break;
}
}
if (!ctx->extra_reqids.empty()) {
dout(20) << __func__ << " extra_reqids " << ctx->extra_reqids << " "
<< ctx->extra_reqid_return_codes << dendl;
ctx->log.back().extra_reqids.swap(ctx->extra_reqids);
ctx->log.back().extra_reqid_return_codes.swap(ctx->extra_reqid_return_codes);
}
// apply new object state.
ctx->obc->obs = ctx->new_obs;
if (soid.is_head() && !ctx->obc->obs.exists) {
ctx->obc->ssc->exists = false;
ctx->obc->ssc->snapset = SnapSet();
} else {
ctx->obc->ssc->exists = true;
ctx->obc->ssc->snapset = ctx->new_snapset;
}
}
void PrimaryLogPG::apply_stats(
const hobject_t &soid,
const object_stat_sum_t &delta_stats) {
recovery_state.apply_op_stats(soid, delta_stats);
for (set<pg_shard_t>::const_iterator i = get_backfill_targets().begin();
i != get_backfill_targets().end();
++i) {
pg_shard_t bt = *i;
const pg_info_t& pinfo = recovery_state.get_peer_info(bt);
if (soid > pinfo.last_backfill && soid <= last_backfill_started) {
pending_backfill_updates[soid].stats.add(delta_stats);
}
}
m_scrubber->stats_of_handled_objects(delta_stats, soid);
}
void PrimaryLogPG::complete_read_ctx(int result, OpContext *ctx)
{
auto m = ctx->op->get_req<MOSDOp>();
ceph_assert(ctx->async_reads_complete());
for (auto p = ctx->ops->begin();
p != ctx->ops->end() && result >= 0; ++p) {
if (p->rval < 0 && !(p->op.flags & CEPH_OSD_OP_FLAG_FAILOK)) {
result = p->rval;
break;
}
ctx->bytes_read += p->outdata.length();
}
ctx->reply->get_header().data_off = (ctx->data_off ? *ctx->data_off : 0);
MOSDOpReply *reply = ctx->reply;
ctx->reply = nullptr;
if (result >= 0) {
if (!ctx->ignore_log_op_stats) {
log_op_stats(*ctx->op, ctx->bytes_written, ctx->bytes_read);
publish_stats_to_osd();
}
// on read, return the current object version
if (ctx->obs) {
reply->set_reply_versions(eversion_t(), ctx->obs->oi.user_version);
} else {
reply->set_reply_versions(eversion_t(), ctx->user_at_version);
}
} else if (result == -ENOENT) {
// on ENOENT, set a floor for what the next user version will be.
reply->set_enoent_reply_versions(info.last_update, info.last_user_version);
}
reply->set_result(result);
reply->add_flags(CEPH_OSD_FLAG_ACK | CEPH_OSD_FLAG_ONDISK);
osd->send_message_osd_client(reply, m->get_connection());
close_op_ctx(ctx);
}
// ========================================================================
// copyfrom
struct C_Copyfrom : public Context {
PrimaryLogPGRef pg;
hobject_t oid;
epoch_t last_peering_reset;
ceph_tid_t tid;
PrimaryLogPG::CopyOpRef cop; // used for keeping the cop alive
C_Copyfrom(PrimaryLogPG *p, hobject_t o, epoch_t lpr,
const PrimaryLogPG::CopyOpRef& c)
: pg(p), oid(o), last_peering_reset(lpr),
tid(0), cop(c)
{}
void finish(int r) override {
if (r == -ECANCELED)
return;
std::scoped_lock l{*pg};
if (last_peering_reset == pg->get_last_peering_reset()) {
pg->process_copy_chunk(oid, tid, r);
cop.reset();
}
}
};
struct C_CopyFrom_AsyncReadCb : public Context {
OSDOp *osd_op;
object_copy_data_t reply_obj;
uint64_t features;
size_t len;
C_CopyFrom_AsyncReadCb(OSDOp *osd_op, uint64_t features) :
osd_op(osd_op), features(features), len(0) {}
void finish(int r) override {
osd_op->rval = r;
if (r < 0) {
return;
}
ceph_assert(len > 0);
ceph_assert(len <= reply_obj.data.length());
bufferlist bl;
bl.substr_of(reply_obj.data, 0, len);
reply_obj.data.swap(bl);
encode(reply_obj, osd_op->outdata, features);
}
};
struct C_CopyChunk : public Context {
PrimaryLogPGRef pg;
hobject_t oid;
epoch_t last_peering_reset;
ceph_tid_t tid;
PrimaryLogPG::CopyOpRef cop; // used for keeping the cop alive
uint64_t offset = 0;
C_CopyChunk(PrimaryLogPG *p, hobject_t o, epoch_t lpr,
const PrimaryLogPG::CopyOpRef& c)
: pg(p), oid(o), last_peering_reset(lpr),
tid(0), cop(c)
{}
void finish(int r) override {
if (r == -ECANCELED)
return;
std::scoped_lock l{*pg};
if (last_peering_reset == pg->get_last_peering_reset()) {
pg->process_copy_chunk_manifest(oid, tid, r, offset);
cop.reset();
}
}
};
int PrimaryLogPG::do_copy_get(OpContext *ctx, bufferlist::const_iterator& bp,
OSDOp& osd_op, ObjectContextRef &obc)
{
object_info_t& oi = obc->obs.oi;
hobject_t& soid = oi.soid;
int result = 0;
object_copy_cursor_t cursor;
uint64_t out_max;
try {
decode(cursor, bp);
decode(out_max, bp);
}
catch (ceph::buffer::error& e) {
result = -EINVAL;
return result;
}
const MOSDOp *op = reinterpret_cast<const MOSDOp*>(ctx->op->get_req());
uint64_t features = op->get_features();
bool async_read_started = false;
object_copy_data_t _reply_obj;
C_CopyFrom_AsyncReadCb *cb = nullptr;
if (pool.info.is_erasure()) {
cb = new C_CopyFrom_AsyncReadCb(&osd_op, features);
}
object_copy_data_t &reply_obj = cb ? cb->reply_obj : _reply_obj;
// size, mtime
reply_obj.size = oi.size;
reply_obj.mtime = oi.mtime;
ceph_assert(obc->ssc);
if (soid.snap < CEPH_NOSNAP) {
auto p = obc->ssc->snapset.clone_snaps.find(soid.snap);
ceph_assert(p != obc->ssc->snapset.clone_snaps.end()); // warn?
reply_obj.snaps = p->second;
} else {
reply_obj.snap_seq = obc->ssc->snapset.seq;
}
if (oi.is_data_digest()) {
reply_obj.flags |= object_copy_data_t::FLAG_DATA_DIGEST;
reply_obj.data_digest = oi.data_digest;
}
if (oi.is_omap_digest()) {
reply_obj.flags |= object_copy_data_t::FLAG_OMAP_DIGEST;
reply_obj.omap_digest = oi.omap_digest;
}
reply_obj.truncate_seq = oi.truncate_seq;
reply_obj.truncate_size = oi.truncate_size;
// attrs
map<string,bufferlist,less<>>& out_attrs = reply_obj.attrs;
if (!cursor.attr_complete) {
result = getattrs_maybe_cache(
ctx->obc,
&out_attrs);
if (result < 0) {
if (cb) {
delete cb;
}
return result;
}
cursor.attr_complete = true;
dout(20) << " got attrs" << dendl;
}
int64_t left = out_max - osd_op.outdata.length();
// data
bufferlist& bl = reply_obj.data;
if (left > 0 && !cursor.data_complete) {
if (cursor.data_offset < oi.size) {
uint64_t max_read = std::min(oi.size - cursor.data_offset, (uint64_t)left);
if (cb) {
async_read_started = true;
ctx->pending_async_reads.push_back(
make_pair(
boost::make_tuple(cursor.data_offset, max_read, osd_op.op.flags),
make_pair(&bl, cb)));
cb->len = max_read;
ctx->op_finishers[ctx->current_osd_subop_num].reset(
new ReadFinisher(osd_op));
result = -EINPROGRESS;
dout(10) << __func__ << ": async_read noted for " << soid << dendl;
} else {
result = pgbackend->objects_read_sync(
oi.soid, cursor.data_offset, max_read, osd_op.op.flags, &bl);
if (result < 0)
return result;
}
left -= max_read;
cursor.data_offset += max_read;
}
if (cursor.data_offset == oi.size) {
cursor.data_complete = true;
dout(20) << " got data" << dendl;
}
ceph_assert(cursor.data_offset <= oi.size);
}
// omap
uint32_t omap_keys = 0;
if (!pool.info.supports_omap() || !oi.is_omap()) {
cursor.omap_complete = true;
} else {
if (left > 0 && !cursor.omap_complete) {
ceph_assert(cursor.data_complete);
if (cursor.omap_offset.empty()) {
osd->store->omap_get_header(ch, ghobject_t(oi.soid),
&reply_obj.omap_header);
}
bufferlist omap_data;
ObjectMap::ObjectMapIterator iter =
osd->store->get_omap_iterator(ch, ghobject_t(oi.soid));
ceph_assert(iter);
iter->upper_bound(cursor.omap_offset);
for (; iter->valid(); iter->next()) {
++omap_keys;
encode(iter->key(), omap_data);
encode(iter->value(), omap_data);
left -= iter->key().length() + 4 + iter->value().length() + 4;
if (left <= 0)
break;
}
if (omap_keys) {
encode(omap_keys, reply_obj.omap_data);
reply_obj.omap_data.claim_append(omap_data);
}
if (iter->valid()) {
cursor.omap_offset = iter->key();
} else {
cursor.omap_complete = true;
dout(20) << " got omap" << dendl;
}
}
}
if (cursor.is_complete()) {
// include reqids only in the final step. this is a bit fragile
// but it works...
recovery_state.get_pg_log().get_log().get_object_reqids(ctx->obc->obs.oi.soid, 10,
&reply_obj.reqids,
&reply_obj.reqid_return_codes);
dout(20) << " got reqids" << dendl;
}
dout(20) << " cursor.is_complete=" << cursor.is_complete()
<< " " << out_attrs.size() << " attrs"
<< " " << bl.length() << " bytes"
<< " " << reply_obj.omap_header.length() << " omap header bytes"
<< " " << reply_obj.omap_data.length() << " omap data bytes in "
<< omap_keys << " keys"
<< " " << reply_obj.reqids.size() << " reqids"
<< dendl;
reply_obj.cursor = cursor;
if (!async_read_started) {
encode(reply_obj, osd_op.outdata, features);
}
if (cb && !async_read_started) {
delete cb;
}
if (result > 0) {
result = 0;
}
return result;
}
void PrimaryLogPG::fill_in_copy_get_noent(OpRequestRef& op, hobject_t oid,
OSDOp& osd_op)
{
const MOSDOp *m = static_cast<const MOSDOp*>(op->get_req());
uint64_t features = m->get_features();
object_copy_data_t reply_obj;
recovery_state.get_pg_log().get_log().get_object_reqids(oid, 10, &reply_obj.reqids,
&reply_obj.reqid_return_codes);
dout(20) << __func__ << " got reqids " << reply_obj.reqids << dendl;
encode(reply_obj, osd_op.outdata, features);
osd_op.rval = -ENOENT;
MOSDOpReply *reply = new MOSDOpReply(m, 0, get_osdmap_epoch(), 0, false);
reply->set_result(-ENOENT);
reply->add_flags(CEPH_OSD_FLAG_ACK | CEPH_OSD_FLAG_ONDISK);
osd->send_message_osd_client(reply, m->get_connection());
}
void PrimaryLogPG::start_copy(CopyCallback *cb, ObjectContextRef obc,
hobject_t src, object_locator_t oloc,
version_t version, unsigned flags,
bool mirror_snapset,
unsigned src_obj_fadvise_flags,
unsigned dest_obj_fadvise_flags)
{
const hobject_t& dest = obc->obs.oi.soid;
dout(10) << __func__ << " " << dest
<< " from " << src << " " << oloc << " v" << version
<< " flags " << flags
<< (mirror_snapset ? " mirror_snapset" : "")
<< dendl;
ceph_assert(!mirror_snapset || src.snap == CEPH_NOSNAP);
// cancel a previous in-progress copy?
if (copy_ops.count(dest)) {
// FIXME: if the src etc match, we could avoid restarting from the
// beginning.
CopyOpRef cop = copy_ops[dest];
vector<ceph_tid_t> tids;
cancel_copy(cop, false, &tids);
osd->objecter->op_cancel(tids, -ECANCELED);
}
CopyOpRef cop(std::make_shared<CopyOp>(cb, obc, src, oloc, version, flags,
mirror_snapset, src_obj_fadvise_flags,
dest_obj_fadvise_flags));
copy_ops[dest] = cop;
dout(20) << fmt::format("{}: blocking {}", __func__, dest) << dendl;
obc->start_block();
if (!obc->obs.oi.has_manifest()) {
_copy_some(obc, cop);
} else {
if (obc->obs.oi.manifest.is_redirect()) {
_copy_some(obc, cop);
} else if (obc->obs.oi.manifest.is_chunked()) {
auto p = obc->obs.oi.manifest.chunk_map.begin();
_copy_some_manifest(obc, cop, p->first);
} else {
ceph_abort_msg("unrecognized manifest type");
}
}
}
void PrimaryLogPG::_copy_some(ObjectContextRef obc, CopyOpRef cop)
{
dout(10) << __func__ << " " << *obc << " " << cop << dendl;
unsigned flags = 0;
if (cop->flags & CEPH_OSD_COPY_FROM_FLAG_FLUSH)
flags |= CEPH_OSD_FLAG_FLUSH;
if (cop->flags & CEPH_OSD_COPY_FROM_FLAG_IGNORE_CACHE)
flags |= CEPH_OSD_FLAG_IGNORE_CACHE;
if (cop->flags & CEPH_OSD_COPY_FROM_FLAG_IGNORE_OVERLAY)
flags |= CEPH_OSD_FLAG_IGNORE_OVERLAY;
if (cop->flags & CEPH_OSD_COPY_FROM_FLAG_MAP_SNAP_CLONE)
flags |= CEPH_OSD_FLAG_MAP_SNAP_CLONE;
if (cop->flags & CEPH_OSD_COPY_FROM_FLAG_RWORDERED)
flags |= CEPH_OSD_FLAG_RWORDERED;
C_GatherBuilder gather(cct);
if (cop->cursor.is_initial() && cop->mirror_snapset) {
// list snaps too.
ceph_assert(cop->src.snap == CEPH_NOSNAP);
ObjectOperation op;
op.list_snaps(&cop->results.snapset, NULL);
ceph_tid_t tid = osd->objecter->read(cop->src.oid, cop->oloc, op,
CEPH_SNAPDIR, NULL,
flags, gather.new_sub(), NULL);
cop->objecter_tid2 = tid;
}
ObjectOperation op;
if (cop->results.user_version) {
op.assert_version(cop->results.user_version);
} else {
// we should learn the version after the first chunk, if we didn't know
// it already!
ceph_assert(cop->cursor.is_initial());
}
op.copy_get(&cop->cursor, get_copy_chunk_size(),
&cop->results.object_size, &cop->results.mtime,
&cop->attrs, &cop->data, &cop->omap_header, &cop->omap_data,
&cop->results.snaps, &cop->results.snap_seq,
&cop->results.flags,
&cop->results.source_data_digest,
&cop->results.source_omap_digest,
&cop->results.reqids,
&cop->results.reqid_return_codes,
&cop->results.truncate_seq,
&cop->results.truncate_size,
&cop->rval);
op.set_last_op_flags(cop->src_obj_fadvise_flags);
C_Copyfrom *fin = new C_Copyfrom(this, obc->obs.oi.soid,
get_last_peering_reset(), cop);
gather.set_finisher(new C_OnFinisher(fin,
osd->get_objecter_finisher(get_pg_shard())));
ceph_tid_t tid = osd->objecter->read(cop->src.oid, cop->oloc, op,
cop->src.snap, NULL,
flags,
gather.new_sub(),
// discover the object version if we don't know it yet
cop->results.user_version ? NULL : &cop->results.user_version);
fin->tid = tid;
cop->objecter_tid = tid;
gather.activate();
}
void PrimaryLogPG::_copy_some_manifest(ObjectContextRef obc, CopyOpRef cop, uint64_t start_offset)
{
dout(10) << __func__ << " " << *obc << " " << cop << dendl;
unsigned flags = 0;
if (cop->flags & CEPH_OSD_COPY_FROM_FLAG_FLUSH)
flags |= CEPH_OSD_FLAG_FLUSH;
if (cop->flags & CEPH_OSD_COPY_FROM_FLAG_IGNORE_CACHE)
flags |= CEPH_OSD_FLAG_IGNORE_CACHE;
if (cop->flags & CEPH_OSD_COPY_FROM_FLAG_IGNORE_OVERLAY)
flags |= CEPH_OSD_FLAG_IGNORE_OVERLAY;
if (cop->flags & CEPH_OSD_COPY_FROM_FLAG_MAP_SNAP_CLONE)
flags |= CEPH_OSD_FLAG_MAP_SNAP_CLONE;
if (cop->flags & CEPH_OSD_COPY_FROM_FLAG_RWORDERED)
flags |= CEPH_OSD_FLAG_RWORDERED;
int num_chunks = 0;
uint64_t last_offset = 0, chunks_size = 0;
object_manifest_t *manifest = &obc->obs.oi.manifest;
map<uint64_t, chunk_info_t>::iterator iter = manifest->chunk_map.find(start_offset);
for (;iter != manifest->chunk_map.end(); ++iter) {
num_chunks++;
chunks_size += iter->second.length;
last_offset = iter->first;
if (get_copy_chunk_size() < chunks_size) {
break;
}
}
cop->num_chunk = num_chunks;
cop->start_offset = start_offset;
cop->last_offset = last_offset;
dout(20) << __func__ << " oid " << obc->obs.oi.soid << " num_chunks: " << num_chunks
<< " start_offset: " << start_offset << " chunks_size: " << chunks_size
<< " last_offset: " << last_offset << dendl;
iter = manifest->chunk_map.find(start_offset);
for (;iter != manifest->chunk_map.end(); ++iter) {
uint64_t obj_offset = iter->first;
uint64_t length = manifest->chunk_map[iter->first].length;
hobject_t soid = manifest->chunk_map[iter->first].oid;
object_locator_t oloc(soid);
CopyCallback * cb = NULL;
CopyOpRef sub_cop(std::make_shared<CopyOp>(cb, ObjectContextRef(), cop->src, oloc,
cop->results.user_version, cop->flags, cop->mirror_snapset,
cop->src_obj_fadvise_flags, cop->dest_obj_fadvise_flags));
sub_cop->cursor.data_offset = obj_offset;
cop->chunk_cops[obj_offset] = sub_cop;
int s = sub_cop->chunk_ops.size();
sub_cop->chunk_ops.resize(s+1);
sub_cop->chunk_ops[s].op.op = CEPH_OSD_OP_READ;
sub_cop->chunk_ops[s].op.extent.offset = manifest->chunk_map[iter->first].offset;
sub_cop->chunk_ops[s].op.extent.length = length;
ObjectOperation op;
op.dup(sub_cop->chunk_ops);
if (cop->results.user_version) {
op.assert_version(cop->results.user_version);
} else {
// we should learn the version after the first chunk, if we didn't know
// it already!
ceph_assert(cop->cursor.is_initial());
}
op.set_last_op_flags(cop->src_obj_fadvise_flags);
C_CopyChunk *fin = new C_CopyChunk(this, obc->obs.oi.soid,
get_last_peering_reset(), cop);
fin->offset = obj_offset;
ceph_tid_t tid = osd->objecter->read(
soid.oid, oloc, op,
sub_cop->src.snap, NULL,
flags,
new C_OnFinisher(fin, osd->get_objecter_finisher(get_pg_shard())),
// discover the object version if we don't know it yet
sub_cop->results.user_version ? NULL : &sub_cop->results.user_version);
fin->tid = tid;
sub_cop->objecter_tid = tid;
dout(20) << __func__ << " tgt_oid: " << soid.oid << " tgt_offset: "
<< manifest->chunk_map[iter->first].offset
<< " length: " << length << " pool id: " << oloc.pool
<< " tid: " << tid << dendl;
if (last_offset <= iter->first) {
break;
}
}
}
void PrimaryLogPG::process_copy_chunk(hobject_t oid, ceph_tid_t tid, int r)
{
dout(10) << __func__ << " " << oid << " tid " << tid
<< " " << cpp_strerror(r) << dendl;
map<hobject_t,CopyOpRef>::iterator p = copy_ops.find(oid);
if (p == copy_ops.end()) {
dout(10) << __func__ << " no copy_op found" << dendl;
return;
}
CopyOpRef cop = p->second;
if (tid != cop->objecter_tid) {
dout(10) << __func__ << " tid " << tid << " != cop " << cop
<< " tid " << cop->objecter_tid << dendl;
return;
}
if (cop->omap_data.length() || cop->omap_header.length())
cop->results.has_omap = true;
if (r >= 0 && !pool.info.supports_omap() &&
(cop->omap_data.length() || cop->omap_header.length())) {
r = -EOPNOTSUPP;
}
cop->objecter_tid = 0;
cop->objecter_tid2 = 0; // assume this ordered before us (if it happened)
ObjectContextRef& cobc = cop->obc;
if (r < 0)
goto out;
ceph_assert(cop->rval >= 0);
if (oid.snap < CEPH_NOSNAP && !cop->results.snaps.empty()) {
// verify snap hasn't been deleted
vector<snapid_t>::iterator p = cop->results.snaps.begin();
while (p != cop->results.snaps.end()) {
// make best effort to sanitize snaps/clones.
if (get_osdmap()->in_removed_snaps_queue(info.pgid.pgid.pool(), *p)) {
dout(10) << __func__ << " clone snap " << *p << " has been deleted"
<< dendl;
for (vector<snapid_t>::iterator q = p + 1;
q != cop->results.snaps.end();
++q)
*(q - 1) = *q;
cop->results.snaps.resize(cop->results.snaps.size() - 1);
} else {
++p;
}
}
if (cop->results.snaps.empty()) {
dout(10) << __func__ << " no more snaps for " << oid << dendl;
r = -ENOENT;
goto out;
}
}
ceph_assert(cop->rval >= 0);
if (!cop->temp_cursor.data_complete) {
cop->results.data_digest = cop->data.crc32c(cop->results.data_digest);
}
if (pool.info.supports_omap() && !cop->temp_cursor.omap_complete) {
if (cop->omap_header.length()) {
cop->results.omap_digest =
cop->omap_header.crc32c(cop->results.omap_digest);
}
if (cop->omap_data.length()) {
bufferlist keys;
keys.substr_of(cop->omap_data, 4, cop->omap_data.length() - 4);
cop->results.omap_digest = keys.crc32c(cop->results.omap_digest);
}
}
if (!cop->temp_cursor.attr_complete) {
for (map<string,bufferlist>::iterator p = cop->attrs.begin();
p != cop->attrs.end();
++p) {
cop->results.attrs[string("_") + p->first] = p->second;
}
cop->attrs.clear();
}
if (!cop->cursor.is_complete()) {
// write out what we have so far
if (cop->temp_cursor.is_initial()) {
ceph_assert(!cop->results.started_temp_obj);
cop->results.started_temp_obj = true;
cop->results.temp_oid = generate_temp_object(oid);
dout(20) << __func__ << " using temp " << cop->results.temp_oid << dendl;
}
ObjectContextRef tempobc = get_object_context(cop->results.temp_oid, true);
OpContextUPtr ctx = simple_opc_create(tempobc);
if (cop->temp_cursor.is_initial()) {
ctx->new_temp_oid = cop->results.temp_oid;
}
_write_copy_chunk(cop, ctx->op_t.get());
simple_opc_submit(std::move(ctx));
dout(10) << __func__ << " fetching more" << dendl;
_copy_some(cobc, cop);
return;
}
// verify digests?
if (cop->results.is_data_digest() || cop->results.is_omap_digest()) {
dout(20) << __func__ << std::hex
<< " got digest: rx data 0x" << cop->results.data_digest
<< " omap 0x" << cop->results.omap_digest
<< ", source: data 0x" << cop->results.source_data_digest
<< " omap 0x" << cop->results.source_omap_digest
<< std::dec
<< " flags " << cop->results.flags
<< dendl;
}
if (cop->results.is_data_digest() &&
cop->results.data_digest != cop->results.source_data_digest) {
derr << __func__ << std::hex << " data digest 0x" << cop->results.data_digest
<< " != source 0x" << cop->results.source_data_digest << std::dec
<< dendl;
osd->clog->error() << info.pgid << " copy from " << cop->src
<< " to " << cop->obc->obs.oi.soid << std::hex
<< " data digest 0x" << cop->results.data_digest
<< " != source 0x" << cop->results.source_data_digest
<< std::dec;
r = -EIO;
goto out;
}
if (cop->results.is_omap_digest() &&
cop->results.omap_digest != cop->results.source_omap_digest) {
derr << __func__ << std::hex
<< " omap digest 0x" << cop->results.omap_digest
<< " != source 0x" << cop->results.source_omap_digest
<< std::dec << dendl;
osd->clog->error() << info.pgid << " copy from " << cop->src
<< " to " << cop->obc->obs.oi.soid << std::hex
<< " omap digest 0x" << cop->results.omap_digest
<< " != source 0x" << cop->results.source_omap_digest
<< std::dec;
r = -EIO;
goto out;
}
if (cct->_conf->osd_debug_inject_copyfrom_error) {
derr << __func__ << " injecting copyfrom failure" << dendl;
r = -EIO;
goto out;
}
cop->results.fill_in_final_tx = std::function<void(PGTransaction*)>(
[this, &cop /* avoid ref cycle */](PGTransaction *t) {
ObjectState& obs = cop->obc->obs;
if (cop->temp_cursor.is_initial()) {
dout(20) << "fill_in_final_tx: writing "
<< "directly to final object" << dendl;
// write directly to final object
cop->results.temp_oid = obs.oi.soid;
_write_copy_chunk(cop, t);
} else {
// finish writing to temp object, then move into place
dout(20) << "fill_in_final_tx: writing to temp object" << dendl;
if (obs.oi.has_manifest() && obs.oi.manifest.is_redirect() && obs.exists) {
/* In redirect manifest case, the object exists in the upper tier.
* So, to avoid a conflict when rename() is called, remove existing
* object first
*/
t->remove(obs.oi.soid);
}
_write_copy_chunk(cop, t);
t->rename(obs.oi.soid, cop->results.temp_oid);
}
t->setattrs(obs.oi.soid, cop->results.attrs);
});
dout(20) << __func__ << " success; committing" << dendl;
out:
dout(20) << __func__ << " complete r = " << cpp_strerror(r) << dendl;
CopyCallbackResults results(r, &cop->results);
cop->cb->complete(results);
copy_ops.erase(cobc->obs.oi.soid);
cobc->stop_block();
if (r < 0 && cop->results.started_temp_obj) {
dout(10) << __func__ << " deleting partial temp object "
<< cop->results.temp_oid << dendl;
ObjectContextRef tempobc = get_object_context(cop->results.temp_oid, true);
OpContextUPtr ctx = simple_opc_create(tempobc);
ctx->op_t->remove(cop->results.temp_oid);
ctx->discard_temp_oid = cop->results.temp_oid;
simple_opc_submit(std::move(ctx));
}
// cancel and requeue proxy ops on this object
if (!r) {
cancel_and_requeue_proxy_ops(cobc->obs.oi.soid);
}
kick_object_context_blocked(cobc);
}
void PrimaryLogPG::process_copy_chunk_manifest(hobject_t oid, ceph_tid_t tid, int r, uint64_t offset)
{
dout(10) << __func__ << " " << oid << " tid " << tid
<< " " << cpp_strerror(r) << dendl;
map<hobject_t,CopyOpRef>::iterator p = copy_ops.find(oid);
if (p == copy_ops.end()) {
dout(10) << __func__ << " no copy_op found" << dendl;
return;
}
CopyOpRef obj_cop = p->second;
CopyOpRef chunk_cop = obj_cop->chunk_cops[offset];
if (tid != chunk_cop->objecter_tid) {
dout(10) << __func__ << " tid " << tid << " != cop " << chunk_cop
<< " tid " << chunk_cop->objecter_tid << dendl;
return;
}
if (chunk_cop->omap_data.length() || chunk_cop->omap_header.length()) {
r = -EOPNOTSUPP;
}
chunk_cop->objecter_tid = 0;
chunk_cop->objecter_tid2 = 0; // assume this ordered before us (if it happened)
ObjectContextRef& cobc = obj_cop->obc;
OSDOp &chunk_data = chunk_cop->chunk_ops[0];
if (r < 0) {
obj_cop->failed = true;
goto out;
}
if (obj_cop->failed) {
return;
}
if (!chunk_data.outdata.length()) {
r = -EIO;
obj_cop->failed = true;
goto out;
}
obj_cop->num_chunk--;
/* check all of the copyop are completed */
if (obj_cop->num_chunk) {
dout(20) << __func__ << " num_chunk: " << obj_cop->num_chunk << dendl;
return;
}
{
OpContextUPtr ctx = simple_opc_create(obj_cop->obc);
if (!ctx->lock_manager.take_write_lock(
obj_cop->obc->obs.oi.soid,
obj_cop->obc)) {
// recovery op can take read lock.
// so need to wait for recovery completion
r = -EAGAIN;
obj_cop->failed = true;
close_op_ctx(ctx.release());
goto out;
}
dout(20) << __func__ << " took lock on obc, " << obj_cop->obc->rwstate << dendl;
PGTransaction *t = ctx->op_t.get();
ObjectState& obs = ctx->new_obs;
for (auto p : obj_cop->chunk_cops) {
OSDOp &sub_chunk = p.second->chunk_ops[0];
t->write(cobc->obs.oi.soid,
p.second->cursor.data_offset,
sub_chunk.outdata.length(),
sub_chunk.outdata,
p.second->dest_obj_fadvise_flags);
dout(20) << __func__ << " offset: " << p.second->cursor.data_offset
<< " length: " << sub_chunk.outdata.length() << dendl;
write_update_size_and_usage(ctx->delta_stats, obs.oi, ctx->modified_ranges,
p.second->cursor.data_offset, sub_chunk.outdata.length());
obs.oi.manifest.chunk_map[p.second->cursor.data_offset].clear_flag(chunk_info_t::FLAG_MISSING);
ctx->clean_regions.mark_data_region_dirty(p.second->cursor.data_offset, sub_chunk.outdata.length());
sub_chunk.outdata.clear();
}
obs.oi.clear_data_digest();
ctx->at_version = get_next_version();
finish_ctx(ctx.get(), pg_log_entry_t::PROMOTE);
simple_opc_submit(std::move(ctx));
obj_cop->chunk_cops.clear();
auto p = cobc->obs.oi.manifest.chunk_map.rbegin();
/* check remaining work */
if (p != cobc->obs.oi.manifest.chunk_map.rend()) {
if (obj_cop->last_offset < p->first) {
for (auto &en : cobc->obs.oi.manifest.chunk_map) {
if (obj_cop->last_offset < en.first) {
_copy_some_manifest(cobc, obj_cop, en.first);
return;
}
}
}
}
}
out:
dout(20) << __func__ << " complete r = " << cpp_strerror(r) << dendl;
CopyCallbackResults results(r, &obj_cop->results);
obj_cop->cb->complete(results);
copy_ops.erase(cobc->obs.oi.soid);
cobc->stop_block();
// cancel and requeue proxy ops on this object
if (!r) {
cancel_and_requeue_proxy_ops(cobc->obs.oi.soid);
}
kick_object_context_blocked(cobc);
}
void PrimaryLogPG::cancel_and_requeue_proxy_ops(hobject_t oid) {
vector<ceph_tid_t> tids;
for (map<ceph_tid_t, ProxyReadOpRef>::iterator it = proxyread_ops.begin();
it != proxyread_ops.end();) {
if (it->second->soid == oid) {
cancel_proxy_read((it++)->second, &tids);
} else {
++it;
}
}
for (map<ceph_tid_t, ProxyWriteOpRef>::iterator it = proxywrite_ops.begin();
it != proxywrite_ops.end();) {
if (it->second->soid == oid) {
cancel_proxy_write((it++)->second, &tids);
} else {
++it;
}
}
osd->objecter->op_cancel(tids, -ECANCELED);
kick_proxy_ops_blocked(oid);
}
void PrimaryLogPG::_write_copy_chunk(CopyOpRef cop, PGTransaction *t)
{
dout(20) << __func__ << " " << cop
<< " " << cop->attrs.size() << " attrs"
<< " " << cop->data.length() << " bytes"
<< " " << cop->omap_header.length() << " omap header bytes"
<< " " << cop->omap_data.length() << " omap data bytes"
<< dendl;
if (!cop->temp_cursor.attr_complete) {
t->create(cop->results.temp_oid);
}
if (!cop->temp_cursor.data_complete) {
ceph_assert(cop->data.length() + cop->temp_cursor.data_offset ==
cop->cursor.data_offset);
if (pool.info.required_alignment() &&
!cop->cursor.data_complete) {
/**
* Trim off the unaligned bit at the end, we'll adjust cursor.data_offset
* to pick it up on the next pass.
*/
ceph_assert(cop->temp_cursor.data_offset %
pool.info.required_alignment() == 0);
if (cop->data.length() % pool.info.required_alignment() != 0) {
uint64_t to_trim =
cop->data.length() % pool.info.required_alignment();
bufferlist bl;
bl.substr_of(cop->data, 0, cop->data.length() - to_trim);
cop->data.swap(bl);
cop->cursor.data_offset -= to_trim;
ceph_assert(cop->data.length() + cop->temp_cursor.data_offset ==
cop->cursor.data_offset);
}
}
if (cop->data.length()) {
t->write(
cop->results.temp_oid,
cop->temp_cursor.data_offset,
cop->data.length(),
cop->data,
cop->dest_obj_fadvise_flags);
}
cop->data.clear();
}
if (pool.info.supports_omap()) {
if (!cop->temp_cursor.omap_complete) {
if (cop->omap_header.length()) {
t->omap_setheader(
cop->results.temp_oid,
cop->omap_header);
cop->omap_header.clear();
}
if (cop->omap_data.length()) {
map<string,bufferlist> omap;
bufferlist::const_iterator p = cop->omap_data.begin();
decode(omap, p);
t->omap_setkeys(cop->results.temp_oid, omap);
cop->omap_data.clear();
}
}
} else {
ceph_assert(cop->omap_header.length() == 0);
ceph_assert(cop->omap_data.length() == 0);
}
cop->temp_cursor = cop->cursor;
}
void PrimaryLogPG::finish_copyfrom(CopyFromCallback *cb)
{
OpContext *ctx = cb->ctx;
dout(20) << "finish_copyfrom on " << ctx->obs->oi.soid << dendl;
ObjectState& obs = ctx->new_obs;
if (obs.exists) {
dout(20) << __func__ << ": exists, removing" << dendl;
ctx->op_t->remove(obs.oi.soid);
} else {
ctx->delta_stats.num_objects++;
obs.exists = true;
}
if (cb->is_temp_obj_used()) {
ctx->discard_temp_oid = cb->results->temp_oid;
}
cb->results->fill_in_final_tx(ctx->op_t.get());
// CopyFromCallback fills this in for us
obs.oi.user_version = ctx->user_at_version;
if (cb->results->is_data_digest()) {
obs.oi.set_data_digest(cb->results->data_digest);
} else {
obs.oi.clear_data_digest();
}
if (cb->results->is_omap_digest()) {
obs.oi.set_omap_digest(cb->results->omap_digest);
} else {
obs.oi.clear_omap_digest();
}
obs.oi.truncate_seq = cb->truncate_seq;
obs.oi.truncate_size = cb->truncate_size;
obs.oi.mtime = ceph::real_clock::to_timespec(cb->results->mtime);
ctx->mtime = utime_t();
ctx->extra_reqids = cb->results->reqids;
ctx->extra_reqid_return_codes = cb->results->reqid_return_codes;
// cache: clear whiteout?
if (obs.oi.is_whiteout()) {
dout(10) << __func__ << " clearing whiteout on " << obs.oi.soid << dendl;
obs.oi.clear_flag(object_info_t::FLAG_WHITEOUT);
--ctx->delta_stats.num_whiteouts;
}
if (cb->results->has_omap) {
dout(10) << __func__ << " setting omap flag on " << obs.oi.soid << dendl;
obs.oi.set_flag(object_info_t::FLAG_OMAP);
ctx->clean_regions.mark_omap_dirty();
} else {
dout(10) << __func__ << " clearing omap flag on " << obs.oi.soid << dendl;
obs.oi.clear_flag(object_info_t::FLAG_OMAP);
}
interval_set<uint64_t> ch;
if (obs.oi.size > 0)
ch.insert(0, obs.oi.size);
ctx->modified_ranges.union_of(ch);
ctx->clean_regions.mark_data_region_dirty(0, std::max(obs.oi.size, cb->get_data_size()));
if (cb->get_data_size() != obs.oi.size) {
ctx->delta_stats.num_bytes -= obs.oi.size;
obs.oi.size = cb->get_data_size();
ctx->delta_stats.num_bytes += obs.oi.size;
}
ctx->delta_stats.num_wr++;
ctx->delta_stats.num_wr_kb += shift_round_up(obs.oi.size, 10);
osd->logger->inc(l_osd_copyfrom);
}
void PrimaryLogPG::finish_promote(int r, CopyResults *results,
ObjectContextRef obc)
{
const hobject_t& soid = obc->obs.oi.soid;
dout(10) << __func__ << " " << soid << " r=" << r
<< " uv" << results->user_version << dendl;
if (r == -ECANCELED) {
return;
}
if (r != -ENOENT && soid.is_snap()) {
if (results->snaps.empty()) {
// we must have read "snap" content from the head object in the
// base pool. use snap_seq to construct what snaps should be
// for this clone (what is was before we evicted the clean clone
// from this pool, and what it will be when we flush and the
// clone eventually happens in the base pool). we want to use
// snaps in (results->snap_seq,soid.snap]
SnapSet& snapset = obc->ssc->snapset;
for (auto p = snapset.clone_snaps.rbegin();
p != snapset.clone_snaps.rend();
++p) {
for (auto snap : p->second) {
if (snap > soid.snap) {
continue;
}
if (snap <= results->snap_seq) {
break;
}
results->snaps.push_back(snap);
}
}
}
dout(20) << __func__ << " snaps " << results->snaps << dendl;
filter_snapc(results->snaps);
dout(20) << __func__ << " filtered snaps " << results->snaps << dendl;
if (results->snaps.empty()) {
dout(20) << __func__
<< " snaps are empty, clone is invalid,"
<< " setting r to ENOENT" << dendl;
r = -ENOENT;
}
}
if (r < 0 && results->started_temp_obj) {
dout(10) << __func__ << " abort; will clean up partial work" << dendl;
ObjectContextRef tempobc = get_object_context(results->temp_oid, false);
ceph_assert(tempobc);
OpContextUPtr ctx = simple_opc_create(tempobc);
ctx->op_t->remove(results->temp_oid);
simple_opc_submit(std::move(ctx));
results->started_temp_obj = false;
}
if (r == -ENOENT && soid.is_snap()) {
dout(10) << __func__
<< ": enoent while trying to promote clone, " << soid
<< " must have been trimmed, removing from snapset"
<< dendl;
hobject_t head(soid.get_head());
ObjectContextRef obc = get_object_context(head, false);
ceph_assert(obc);
OpContextUPtr tctx = simple_opc_create(obc);
tctx->at_version = get_next_version();
if (get_osdmap()->require_osd_release < ceph_release_t::octopus) {
filter_snapc(tctx->new_snapset.snaps);
} else {
tctx->new_snapset.snaps.clear();
}
vector<snapid_t> new_clones;
map<snapid_t, vector<snapid_t>> new_clone_snaps;
for (vector<snapid_t>::iterator i = tctx->new_snapset.clones.begin();
i != tctx->new_snapset.clones.end();
++i) {
if (*i != soid.snap) {
new_clones.push_back(*i);
auto p = tctx->new_snapset.clone_snaps.find(*i);
if (p != tctx->new_snapset.clone_snaps.end()) {
new_clone_snaps[*i] = p->second;
}
}
}
tctx->new_snapset.clones.swap(new_clones);
tctx->new_snapset.clone_overlap.erase(soid.snap);
tctx->new_snapset.clone_size.erase(soid.snap);
tctx->new_snapset.clone_snaps.swap(new_clone_snaps);
// take RWWRITE lock for duration of our local write. ignore starvation.
if (!tctx->lock_manager.take_write_lock(
head,
obc)) {
ceph_abort_msg("problem!");
}
dout(20) << __func__ << " took lock on obc, " << obc->rwstate << dendl;
finish_ctx(tctx.get(), pg_log_entry_t::PROMOTE);
simple_opc_submit(std::move(tctx));
return;
}
bool whiteout = false;
if (r == -ENOENT) {
ceph_assert(soid.snap == CEPH_NOSNAP); // snap case is above
dout(10) << __func__ << " whiteout " << soid << dendl;
whiteout = true;
}
if (r < 0 && !whiteout) {
derr << __func__ << " unexpected promote error " << cpp_strerror(r) << dendl;
// pass error to everyone blocked on this object
// FIXME: this is pretty sloppy, but at this point we got
// something unexpected and don't have many other options.
map<hobject_t,list<OpRequestRef>>::iterator blocked_iter =
waiting_for_blocked_object.find(soid);
if (blocked_iter != waiting_for_blocked_object.end()) {
while (!blocked_iter->second.empty()) {
osd->reply_op_error(blocked_iter->second.front(), r);
blocked_iter->second.pop_front();
}
waiting_for_blocked_object.erase(blocked_iter);
}
return;
}
osd->promote_finish(results->object_size);
OpContextUPtr tctx = simple_opc_create(obc);
tctx->at_version = get_next_version();
if (!obc->obs.oi.has_manifest()) {
++tctx->delta_stats.num_objects;
}
if (soid.snap < CEPH_NOSNAP)
++tctx->delta_stats.num_object_clones;
tctx->new_obs.exists = true;
tctx->extra_reqids = results->reqids;
tctx->extra_reqid_return_codes = results->reqid_return_codes;
if (obc->obs.oi.has_manifest() && obc->obs.oi.manifest.is_redirect()) {
tctx->new_obs.oi.manifest.type = object_manifest_t::TYPE_NONE;
tctx->new_obs.oi.clear_flag(object_info_t::FLAG_REDIRECT_HAS_REFERENCE);
tctx->new_obs.oi.clear_flag(object_info_t::FLAG_MANIFEST);
tctx->new_obs.oi.manifest.redirect_target = hobject_t();
tctx->delta_stats.num_objects_manifest--;
if (obc->obs.oi.test_flag(object_info_t::FLAG_REDIRECT_HAS_REFERENCE)) {
dec_all_refcount_manifest(obc->obs.oi, tctx.get());
}
}
if (whiteout) {
// create a whiteout
tctx->op_t->create(soid);
tctx->new_obs.oi.set_flag(object_info_t::FLAG_WHITEOUT);
++tctx->delta_stats.num_whiteouts;
dout(20) << __func__ << " creating whiteout on " << soid << dendl;
osd->logger->inc(l_osd_tier_whiteout);
} else {
if (results->has_omap) {
dout(10) << __func__ << " setting omap flag on " << soid << dendl;
tctx->new_obs.oi.set_flag(object_info_t::FLAG_OMAP);
++tctx->delta_stats.num_objects_omap;
}
results->fill_in_final_tx(tctx->op_t.get());
if (results->started_temp_obj) {
tctx->discard_temp_oid = results->temp_oid;
}
tctx->new_obs.oi.size = results->object_size;
tctx->new_obs.oi.user_version = results->user_version;
tctx->new_obs.oi.mtime = ceph::real_clock::to_timespec(results->mtime);
tctx->mtime = utime_t();
if (results->is_data_digest()) {
tctx->new_obs.oi.set_data_digest(results->data_digest);
} else {
tctx->new_obs.oi.clear_data_digest();
}
if (results->object_size)
tctx->clean_regions.mark_data_region_dirty(0, results->object_size);
if (results->is_omap_digest()) {
tctx->new_obs.oi.set_omap_digest(results->omap_digest);
} else {
tctx->new_obs.oi.clear_omap_digest();
}
if (results->has_omap)
tctx->clean_regions.mark_omap_dirty();
tctx->new_obs.oi.truncate_seq = results->truncate_seq;
tctx->new_obs.oi.truncate_size = results->truncate_size;
if (soid.snap != CEPH_NOSNAP) {
ceph_assert(obc->ssc->snapset.clone_snaps.count(soid.snap));
ceph_assert(obc->ssc->snapset.clone_size.count(soid.snap));
ceph_assert(obc->ssc->snapset.clone_size[soid.snap] ==
results->object_size);
ceph_assert(obc->ssc->snapset.clone_overlap.count(soid.snap));
tctx->delta_stats.num_bytes += obc->ssc->snapset.get_clone_bytes(soid.snap);
} else {
tctx->delta_stats.num_bytes += results->object_size;
}
}
if (results->mirror_snapset) {
ceph_assert(tctx->new_obs.oi.soid.snap == CEPH_NOSNAP);
tctx->new_snapset.from_snap_set(
results->snapset,
get_osdmap()->require_osd_release < ceph_release_t::luminous);
}
dout(20) << __func__ << " new_snapset " << tctx->new_snapset << dendl;
// take RWWRITE lock for duration of our local write. ignore starvation.
if (!tctx->lock_manager.take_write_lock(
obc->obs.oi.soid,
obc)) {
ceph_abort_msg("problem!");
}
dout(20) << __func__ << " took lock on obc, " << obc->rwstate << dendl;
finish_ctx(tctx.get(), pg_log_entry_t::PROMOTE);
simple_opc_submit(std::move(tctx));
osd->logger->inc(l_osd_tier_promote);
if (agent_state &&
agent_state->is_idle())
agent_choose_mode();
}
void PrimaryLogPG::finish_promote_manifest(int r, CopyResults *results,
ObjectContextRef obc)
{
const hobject_t& soid = obc->obs.oi.soid;
dout(10) << __func__ << " " << soid << " r=" << r
<< " uv" << results->user_version << dendl;
if (r == -ECANCELED || r == -EAGAIN) {
return;
}
if (r < 0) {
derr << __func__ << " unexpected promote error " << cpp_strerror(r) << dendl;
// pass error to everyone blocked on this object
// FIXME: this is pretty sloppy, but at this point we got
// something unexpected and don't have many other options.
map<hobject_t,list<OpRequestRef>>::iterator blocked_iter =
waiting_for_blocked_object.find(soid);
if (blocked_iter != waiting_for_blocked_object.end()) {
while (!blocked_iter->second.empty()) {
osd->reply_op_error(blocked_iter->second.front(), r);
blocked_iter->second.pop_front();
}
waiting_for_blocked_object.erase(blocked_iter);
}
return;
}
osd->promote_finish(results->object_size);
osd->logger->inc(l_osd_tier_promote);
if (agent_state &&
agent_state->is_idle())
agent_choose_mode();
}
void PrimaryLogPG::cancel_copy(CopyOpRef cop, bool requeue,
vector<ceph_tid_t> *tids)
{
dout(10) << __func__ << " " << cop->obc->obs.oi.soid
<< " from " << cop->src << " " << cop->oloc
<< " v" << cop->results.user_version << dendl;
// cancel objecter op, if we can
if (cop->objecter_tid) {
tids->push_back(cop->objecter_tid);
cop->objecter_tid = 0;
if (cop->objecter_tid2) {
tids->push_back(cop->objecter_tid2);
cop->objecter_tid2 = 0;
}
}
copy_ops.erase(cop->obc->obs.oi.soid);
cop->obc->stop_block();
kick_object_context_blocked(cop->obc);
cop->results.should_requeue = requeue;
CopyCallbackResults result(-ECANCELED, &cop->results);
cop->cb->complete(result);
// There may still be an objecter callback referencing this copy op.
// That callback will not need the obc since it's been canceled, and
// we need the obc reference to go away prior to flush.
cop->obc = ObjectContextRef();
}
void PrimaryLogPG::cancel_copy_ops(bool requeue, vector<ceph_tid_t> *tids)
{
dout(10) << __func__ << dendl;
map<hobject_t,CopyOpRef>::iterator p = copy_ops.begin();
while (p != copy_ops.end()) {
// requeue this op? can I queue up all of them?
cancel_copy((p++)->second, requeue, tids);
}
}
struct C_gather : public Context {
PrimaryLogPGRef pg;
hobject_t oid;
epoch_t last_peering_reset;
OSDOp *osd_op;
C_gather(PrimaryLogPG *pg_, hobject_t oid_, epoch_t lpr_, OSDOp *osd_op_) :
pg(pg_), oid(oid_), last_peering_reset(lpr_), osd_op(osd_op_) {}
void finish(int r) override {
if (r == -ECANCELED)
return;
std::scoped_lock locker{*pg};
auto p = pg->cls_gather_ops.find(oid);
if (p == pg->cls_gather_ops.end()) {
// op was cancelled
return;
}
if (last_peering_reset != pg->get_last_peering_reset()) {
return;
}
osd_op->rval = r;
PrimaryLogPG::OpContext *ctx = p->second.ctx;
pg->cls_gather_ops.erase(p);
pg->execute_ctx(ctx);
}
};
int PrimaryLogPG::start_cls_gather(OpContext *ctx, std::map<std::string, bufferlist> *src_obj_buffs, const std::string& pool,
const char *cls, const char *method, bufferlist& inbl)
{
OpRequestRef op = ctx->op;
MOSDOp *m = static_cast<MOSDOp*>(op->get_nonconst_req());
auto pool_id = osd->objecter->with_osdmap(std::mem_fn(&OSDMap::lookup_pg_pool_name), pool);
object_locator_t oloc(pool_id);
ObjectState& obs = ctx->new_obs;
object_info_t& oi = obs.oi;
const hobject_t& soid = oi.soid;
ObjectContextRef obc = get_object_context(soid, false);
C_GatherBuilder gather(cct);
auto [iter, inserted] = cls_gather_ops.emplace(soid, CLSGatherOp(ctx, obc, op));
ceph_assert(inserted);
auto &cgop = iter->second;
for (std::map<std::string, bufferlist>::iterator it = src_obj_buffs->begin(); it != src_obj_buffs->end(); it++) {
std::string oid = it->first;
ObjectOperation obj_op;
obj_op.call(cls, method, inbl);
uint32_t flags = 0;
ceph_tid_t tid = osd->objecter->read(
object_t(oid), oloc, obj_op,
m->get_snapid(), &it->second,
flags, gather.new_sub());
cgop.objecter_tids.push_back(tid);
dout(10) << __func__ << " src=" << oid << ", tgt=" << soid << dendl;
}
C_gather *fin = new C_gather(this, soid, get_last_peering_reset(), &(*ctx->ops)[ctx->current_osd_subop_num]);
gather.set_finisher(new C_OnFinisher(fin,
osd->get_objecter_finisher(get_pg_shard())));
gather.activate();
return -EINPROGRESS;
}
// ========================================================================
// flush
//
// Flush a dirty object in the cache tier by writing it back to the
// base tier. The sequence looks like:
//
// * send a copy-from operation to the base tier to copy the current
// version of the object
// * base tier will pull the object via (perhaps multiple) copy-get(s)
// * on completion, we check if the object has been modified. if so,
// just reply with -EAGAIN.
// * try to take a write lock so we can clear the dirty flag. if this
// fails, wait and retry
// * start a repop that clears the bit.
//
// If we have to wait, we will retry by coming back through the
// start_flush method. We check if a flush is already in progress
// and, if so, try to finish it by rechecking the version and trying
// to clear the dirty bit.
//
// In order for the cache-flush (a write op) to not block the copy-get
// from reading the object, the client *must* set the SKIPRWLOCKS
// flag.
//
// NOTE: normally writes are strictly ordered for the client, but
// flushes are special in that they can be reordered with respect to
// other writes. In particular, we can't have a flush request block
// an update to the cache pool object!
struct C_Flush : public Context {
PrimaryLogPGRef pg;
hobject_t oid;
epoch_t last_peering_reset;
ceph_tid_t tid;
utime_t start;
C_Flush(PrimaryLogPG *p, hobject_t o, epoch_t lpr)
: pg(p), oid(o), last_peering_reset(lpr),
tid(0), start(ceph_clock_now())
{}
void finish(int r) override {
if (r == -ECANCELED)
return;
std::scoped_lock locker{*pg};
if (last_peering_reset == pg->get_last_peering_reset()) {
pg->finish_flush(oid, tid, r);
pg->osd->logger->tinc(l_osd_tier_flush_lat, ceph_clock_now() - start);
}
}
};
int PrimaryLogPG::start_dedup(OpRequestRef op, ObjectContextRef obc)
{
const object_info_t& oi = obc->obs.oi;
const hobject_t& soid = oi.soid;
ceph_assert(obc->is_blocked());
if (oi.size == 0) {
// evicted
return 0;
}
if (pool.info.get_fingerprint_type() == pg_pool_t::TYPE_FINGERPRINT_NONE) {
dout(0) << " fingerprint algorithm is not set " << dendl;
return -EINVAL;
}
if (pool.info.get_dedup_tier() <= 0) {
dout(10) << " dedup tier is not set " << dendl;
return -EINVAL;
}
/*
* The operations to make dedup chunks are tracked by a ManifestOp.
* This op will be finished if all the operations are completed.
*/
ManifestOpRef mop(std::make_shared<ManifestOp>(obc, nullptr));
// cdc
std::map<uint64_t, bufferlist> chunks;
int r = do_cdc(oi, mop->new_manifest.chunk_map, chunks);
if (r < 0) {
return r;
}
if (!chunks.size()) {
return 0;
}
// chunks issued here are different with chunk_map newly generated
// because the same chunks in previous snap will not be issued
// So, we need two data structures; the first is the issued chunk list to track
// issued operations, and the second is the new chunk_map to update chunk_map after
// all operations are finished
object_ref_delta_t refs;
ObjectContextRef obc_l, obc_g;
get_adjacent_clones(obc, obc_l, obc_g);
// skip if the same content exits in prev snap at same offset
mop->new_manifest.calc_refs_to_inc_on_set(
obc_l ? &(obc_l->obs.oi.manifest) : nullptr,
obc_g ? &(obc_g->obs.oi.manifest) : nullptr,
refs);
for (auto p : chunks) {
hobject_t target = mop->new_manifest.chunk_map[p.first].oid;
if (refs.find(target) == refs.end()) {
continue;
}
C_SetDedupChunks *fin = new C_SetDedupChunks(this, soid, get_last_peering_reset(), p.first);
ceph_tid_t tid = refcount_manifest(soid, target, refcount_t::CREATE_OR_GET_REF,
fin, std::move(chunks[p.first]));
mop->chunks[target] = make_pair(p.first, p.second.length());
mop->num_chunks++;
mop->tids[p.first] = tid;
fin->tid = tid;
dout(10) << __func__ << " oid: " << soid << " tid: " << tid
<< " target: " << target << " offset: " << p.first
<< " length: " << p.second.length() << dendl;
}
if (mop->tids.size()) {
manifest_ops[soid] = mop;
manifest_ops[soid]->op = op;
} else {
// size == 0
return 0;
}
return -EINPROGRESS;
}
int PrimaryLogPG::do_cdc(const object_info_t& oi,
std::map<uint64_t, chunk_info_t>& chunk_map,
std::map<uint64_t, bufferlist>& chunks)
{
string chunk_algo = pool.info.get_dedup_chunk_algorithm_name();
int64_t chunk_size = pool.info.get_dedup_cdc_chunk_size();
uint64_t total_length = 0;
std::unique_ptr<CDC> cdc = CDC::create(chunk_algo, cbits(chunk_size)-1);
if (!cdc) {
dout(0) << __func__ << " unrecognized chunk-algorithm " << dendl;
return -EINVAL;
}
bufferlist bl;
/**
* We disable EC pool as a base tier of distributed dedup.
* The reason why we disallow erasure code pool here is that the EC pool does not support objects_read_sync().
* Therefore, we should change the current implementation totally to make EC pool compatible.
* As s result, we leave this as a future work.
*/
int r = pgbackend->objects_read_sync(
oi.soid, 0, oi.size, 0, &bl);
if (r < 0) {
dout(0) << __func__ << " read fail " << oi.soid
<< " len: " << oi.size << " r: " << r << dendl;
return r;
}
if (bl.length() != oi.size) {
dout(0) << __func__ << " bl.length: " << bl.length() << " != oi.size: "
<< oi.size << " during chunking " << dendl;
return -EIO;
}
dout(10) << __func__ << " oid: " << oi.soid << " len: " << bl.length()
<< " oi.size: " << oi.size
<< " chunk_size: " << chunk_size << dendl;
vector<pair<uint64_t, uint64_t>> cdc_chunks;
cdc->calc_chunks(bl, &cdc_chunks);
// get fingerprint
for (auto p : cdc_chunks) {
bufferlist chunk;
chunk.substr_of(bl, p.first, p.second);
auto [ret, target] = get_fpoid_from_chunk(oi.soid, chunk);
if (ret < 0) {
return ret;
}
chunks[p.first] = std::move(chunk);
chunk_map[p.first] = chunk_info_t(0, p.second, target);
total_length += p.second;
}
return total_length;
}
std::pair<int, hobject_t> PrimaryLogPG::get_fpoid_from_chunk(
const hobject_t soid, bufferlist& chunk)
{
pg_pool_t::fingerprint_t fp_algo = pool.info.get_fingerprint_type();
if (fp_algo == pg_pool_t::TYPE_FINGERPRINT_NONE) {
return make_pair(-EINVAL, hobject_t());
}
object_t fp_oid = [&fp_algo, &chunk]() -> string {
switch (fp_algo) {
case pg_pool_t::TYPE_FINGERPRINT_SHA1:
return ceph::crypto::digest<ceph::crypto::SHA1>(chunk).to_str();
case pg_pool_t::TYPE_FINGERPRINT_SHA256:
return ceph::crypto::digest<ceph::crypto::SHA256>(chunk).to_str();
case pg_pool_t::TYPE_FINGERPRINT_SHA512:
return ceph::crypto::digest<ceph::crypto::SHA512>(chunk).to_str();
default:
assert(0 == "unrecognized fingerprint type");
return {};
}
}();
pg_t raw_pg;
object_locator_t oloc(soid);
oloc.pool = pool.info.get_dedup_tier();
// check if dedup_tier isn't set
ceph_assert(oloc.pool > 0);
int ret = get_osdmap()->object_locator_to_pg(fp_oid, oloc, raw_pg);
if (ret < 0) {
return make_pair(ret, hobject_t());
}
hobject_t target(fp_oid, oloc.key, snapid_t(),
raw_pg.ps(), raw_pg.pool(),
oloc.nspace);
return make_pair(0, target);
}
int PrimaryLogPG::finish_set_dedup(hobject_t oid, int r, ceph_tid_t tid, uint64_t offset)
{
dout(10) << __func__ << " " << oid << " tid " << tid
<< " " << cpp_strerror(r) << dendl;
map<hobject_t,ManifestOpRef>::iterator p = manifest_ops.find(oid);
if (p == manifest_ops.end()) {
dout(10) << __func__ << " no manifest_op found" << dendl;
return -EINVAL;
}
ManifestOpRef mop = p->second;
mop->results[offset] = r;
if (r < 0) {
// if any failure occurs, put a mark on the results to recognize the failure
mop->results[0] = r;
}
if (mop->num_chunks != mop->results.size()) {
// there are on-going works
return -EINPROGRESS;
}
ObjectContextRef obc = mop->obc;
ceph_assert(obc);
ceph_assert(obc->is_blocked());
obc->stop_block();
kick_object_context_blocked(obc);
if (mop->results[0] < 0) {
// check if the previous op returns fail
ceph_assert(mop->num_chunks == mop->results.size());
manifest_ops.erase(oid);
osd->reply_op_error(mop->op, mop->results[0]);
return -EIO;
}
if (mop->chunks.size()) {
OpContextUPtr ctx = simple_opc_create(obc);
ceph_assert(ctx);
if (ctx->lock_manager.get_lock_type(
RWState::RWWRITE,
oid,
obc,
mop->op)) {
dout(20) << __func__ << " took write lock" << dendl;
} else if (mop->op) {
dout(10) << __func__ << " waiting on write lock " << mop->op << dendl;
close_op_ctx(ctx.release());
return -EAGAIN;
}
ctx->at_version = get_next_version();
ctx->new_obs = obc->obs;
ctx->new_obs.oi.clear_flag(object_info_t::FLAG_DIRTY);
--ctx->delta_stats.num_objects_dirty;
if (!ctx->obs->oi.has_manifest()) {
ctx->delta_stats.num_objects_manifest++;
ctx->new_obs.oi.set_flag(object_info_t::FLAG_MANIFEST);
ctx->new_obs.oi.manifest.type = object_manifest_t::TYPE_CHUNKED;
}
/*
* Let's assume that there is a manifest snapshotted object, and we issue tier_flush() to head.
* head: [0, 2) aaa <-- tier_flush()
* 20: [0, 2) ddd, [6, 2) bbb, [8, 2) ccc
*
* In this case, if the new chunk_map is as follows,
* new_chunk_map : [0, 2) ddd, [6, 2) bbb, [8, 2) ccc
* we should drop aaa from head by using calc_refs_to_drop_on_removal().
* So, the precedure is
* 1. calc_refs_to_drop_on_removal()
* 2. register old references to drop after tier_flush() is committed
* 3. update new chunk_map
*/
ObjectCleanRegions c_regions = ctx->clean_regions;
ObjectContextRef cobc = get_prev_clone_obc(obc);
c_regions.mark_fully_dirty();
// CDC was done on entire range of manifest object,
// so the first thing we should do here is to drop the reference to old chunks
ObjectContextRef obc_l, obc_g;
get_adjacent_clones(obc, obc_l, obc_g);
// clear all old references
object_ref_delta_t refs;
ctx->obs->oi.manifest.calc_refs_to_drop_on_removal(
obc_l ? &(obc_l->obs.oi.manifest) : nullptr,
obc_g ? &(obc_g->obs.oi.manifest) : nullptr,
refs);
if (!refs.is_empty()) {
ctx->register_on_commit(
[oid, this, refs](){
dec_refcount(oid, refs);
});
}
// set new references
ctx->new_obs.oi.manifest.chunk_map = mop->new_manifest.chunk_map;
finish_ctx(ctx.get(), pg_log_entry_t::CLEAN);
simple_opc_submit(std::move(ctx));
}
if (mop->op)
osd->reply_op_error(mop->op, r);
manifest_ops.erase(oid);
return 0;
}
int PrimaryLogPG::finish_set_manifest_refcount(hobject_t oid, int r, ceph_tid_t tid, uint64_t offset)
{
dout(10) << __func__ << " " << oid << " tid " << tid
<< " " << cpp_strerror(r) << dendl;
map<hobject_t,ManifestOpRef>::iterator p = manifest_ops.find(oid);
if (p == manifest_ops.end()) {
dout(10) << __func__ << " no manifest_op found" << dendl;
return -EINVAL;
}
ManifestOpRef mop = p->second;
mop->results[offset] = r;
if (r < 0) {
// if any failure occurs, put a mark on the results to recognize the failure
mop->results[0] = r;
}
if (mop->num_chunks != mop->results.size()) {
// there are on-going works
return -EINPROGRESS;
}
if (mop->cb) {
mop->cb->complete(r);
}
manifest_ops.erase(p);
mop.reset();
return 0;
}
int PrimaryLogPG::start_flush(
OpRequestRef op, ObjectContextRef obc,
bool blocking, hobject_t *pmissing,
std::optional<std::function<void()>> &&on_flush,
bool force_dedup)
{
const object_info_t& oi = obc->obs.oi;
const hobject_t& soid = oi.soid;
dout(10) << __func__ << " " << soid
<< " v" << oi.version
<< " uv" << oi.user_version
<< " " << (blocking ? "blocking" : "non-blocking/best-effort")
<< dendl;
bool preoctopus_compat =
get_osdmap()->require_osd_release < ceph_release_t::octopus;
SnapSet snapset;
if (preoctopus_compat) {
// for pre-octopus compatibility, filter SnapSet::snaps. not
// certain we need this, but let's be conservative.
snapset = obc->ssc->snapset.get_filtered(pool.info);
} else {
// NOTE: change this to a const ref when we remove this compat code
snapset = obc->ssc->snapset;
}
if ((obc->obs.oi.has_manifest() && obc->obs.oi.manifest.is_chunked())
|| force_dedup) {
// current dedup tier only supports blocking operation
if (!blocking) {
return -EOPNOTSUPP;
}
}
// verify there are no (older) check for dirty clones
{
dout(20) << " snapset " << snapset << dendl;
vector<snapid_t>::reverse_iterator p = snapset.clones.rbegin();
while (p != snapset.clones.rend() && *p >= soid.snap)
++p;
if (p != snapset.clones.rend()) {
hobject_t next = soid;
next.snap = *p;
ceph_assert(next.snap < soid.snap);
if (recovery_state.get_pg_log().get_missing().is_missing(next)) {
dout(10) << __func__ << " missing clone is " << next << dendl;
if (pmissing)
*pmissing = next;
return -ENOENT;
}
ObjectContextRef older_obc = get_object_context(next, false);
if (older_obc) {
dout(20) << __func__ << " next oldest clone is " << older_obc->obs.oi
<< dendl;
if (older_obc->obs.oi.is_dirty()) {
dout(10) << __func__ << " next oldest clone is dirty: "
<< older_obc->obs.oi << dendl;
return -EBUSY;
}
} else {
dout(20) << __func__ << " next oldest clone " << next
<< " is not present; implicitly clean" << dendl;
}
} else {
dout(20) << __func__ << " no older clones" << dendl;
}
}
if (blocking) {
dout(20) << fmt::format("{}: blocking {}", __func__, soid) << dendl;
obc->start_block();
}
map<hobject_t,FlushOpRef>::iterator p = flush_ops.find(soid);
if (p != flush_ops.end()) {
FlushOpRef fop = p->second;
if (fop->op == op) {
// we couldn't take the write lock on a cache-try-flush before;
// now we are trying again for the lock.
return try_flush_mark_clean(fop);
}
if (fop->flushed_version == obc->obs.oi.user_version &&
(fop->blocking || !blocking)) {
// nonblocking can join anything
// blocking can only join a blocking flush
dout(20) << __func__ << " piggybacking on existing flush " << dendl;
if (op)
fop->dup_ops.push_back(op);
return -EAGAIN; // clean up this ctx; op will retry later
}
// cancel current flush since it will fail anyway, or because we
// are blocking and the existing flush is nonblocking.
dout(20) << __func__ << " canceling previous flush; it will fail" << dendl;
if (fop->op)
osd->reply_op_error(fop->op, -EBUSY);
while (!fop->dup_ops.empty()) {
osd->reply_op_error(fop->dup_ops.front(), -EBUSY);
fop->dup_ops.pop_front();
}
vector<ceph_tid_t> tids;
cancel_flush(fop, false, &tids);
osd->objecter->op_cancel(tids, -ECANCELED);
}
if ((obc->obs.oi.has_manifest() && obc->obs.oi.manifest.is_chunked())
|| force_dedup) {
int r = start_dedup(op, obc);
if (r != -EINPROGRESS) {
if (blocking)
obc->stop_block();
}
return r;
}
/**
* In general, we need to send a delete and a copyfrom.
* Consider snapc 10:[10, 9, 8, 4, 3, 2]:[10(10, 9), 4(4,3,2)]
* where 4 is marked as clean. To flush 10, we have to:
* 1) delete 4:[4,3,2] -- Logically, the object does not exist after 4
* 2) copyfrom 8:[8,4,3,2] -- flush object after snap 8
*
* There is a complicating case. Supposed there had been a clone 7
* for snaps [7, 6] which has been trimmed since they no longer exist.
* In the base pool, we'd have 5:[4,3,2]:[4(4,3,2)]+head. When we submit
* the delete, the snap will be promoted to 5, and the head will become
* a whiteout. When the copy-from goes through, we'll end up with
* 8:[8,4,3,2]:[4(4,3,2)]+head.
*
* Another complication is the case where there is an interval change
* after doing the delete and the flush but before marking the object
* clean. We'll happily delete head and then recreate it at the same
* sequence number, which works out ok.
*/
SnapContext snapc, dsnapc;
if (snapset.seq != 0) {
if (soid.snap == CEPH_NOSNAP) {
snapc = snapset.get_ssc_as_of(snapset.seq);
} else {
snapid_t min_included_snap;
auto p = snapset.clone_snaps.find(soid.snap);
ceph_assert(p != snapset.clone_snaps.end());
min_included_snap = p->second.back();
snapc = snapset.get_ssc_as_of(min_included_snap - 1);
}
snapid_t prev_snapc = 0;
for (vector<snapid_t>::reverse_iterator citer = snapset.clones.rbegin();
citer != snapset.clones.rend();
++citer) {
if (*citer < soid.snap) {
prev_snapc = *citer;
break;
}
}
dsnapc = snapset.get_ssc_as_of(prev_snapc);
}
object_locator_t base_oloc(soid);
base_oloc.pool = pool.info.tier_of;
if (dsnapc.seq < snapc.seq) {
ObjectOperation o;
o.remove();
osd->objecter->mutate(
soid.oid,
base_oloc,
o,
dsnapc,
ceph::real_clock::from_ceph_timespec(oi.mtime),
(CEPH_OSD_FLAG_IGNORE_OVERLAY |
CEPH_OSD_FLAG_ENFORCE_SNAPC),
NULL /* no callback, we'll rely on the ordering w.r.t the next op */);
}
FlushOpRef fop(std::make_shared<FlushOp>());
fop->obc = obc;
fop->flushed_version = oi.user_version;
fop->blocking = blocking;
fop->on_flush = std::move(on_flush);
fop->op = op;
ObjectOperation o;
if (oi.is_whiteout()) {
fop->removal = true;
o.remove();
} else {
object_locator_t oloc(soid);
o.copy_from(soid.oid.name, soid.snap, oloc, oi.user_version,
CEPH_OSD_COPY_FROM_FLAG_FLUSH |
CEPH_OSD_COPY_FROM_FLAG_IGNORE_OVERLAY |
CEPH_OSD_COPY_FROM_FLAG_IGNORE_CACHE |
CEPH_OSD_COPY_FROM_FLAG_MAP_SNAP_CLONE,
LIBRADOS_OP_FLAG_FADVISE_SEQUENTIAL|LIBRADOS_OP_FLAG_FADVISE_NOCACHE);
//mean the base tier don't cache data after this
if (agent_state && agent_state->evict_mode != TierAgentState::EVICT_MODE_FULL)
o.set_last_op_flags(LIBRADOS_OP_FLAG_FADVISE_DONTNEED);
}
C_Flush *fin = new C_Flush(this, soid, get_last_peering_reset());
ceph_tid_t tid = osd->objecter->mutate(
soid.oid, base_oloc, o, snapc,
ceph::real_clock::from_ceph_timespec(oi.mtime),
CEPH_OSD_FLAG_IGNORE_OVERLAY | CEPH_OSD_FLAG_ENFORCE_SNAPC,
new C_OnFinisher(fin,
osd->get_objecter_finisher(get_pg_shard())));
/* we're under the pg lock and fin->finish() is grabbing that */
fin->tid = tid;
fop->objecter_tid = tid;
flush_ops[soid] = fop;
recovery_state.update_stats(
[&oi](auto &history, auto &stats) {
stats.stats.sum.num_flush++;
stats.stats.sum.num_flush_kb += shift_round_up(oi.size, 10);
return false;
});
return -EINPROGRESS;
}
void PrimaryLogPG::finish_flush(hobject_t oid, ceph_tid_t tid, int r)
{
dout(10) << __func__ << " " << oid << " tid " << tid
<< " " << cpp_strerror(r) << dendl;
map<hobject_t,FlushOpRef>::iterator p = flush_ops.find(oid);
if (p == flush_ops.end()) {
dout(10) << __func__ << " no flush_op found" << dendl;
return;
}
FlushOpRef fop = p->second;
if (tid != fop->objecter_tid && !fop->obc->obs.oi.has_manifest()) {
dout(10) << __func__ << " tid " << tid << " != fop " << fop
<< " tid " << fop->objecter_tid << dendl;
return;
}
ObjectContextRef obc = fop->obc;
fop->objecter_tid = 0;
if (r < 0 && !(r == -ENOENT && fop->removal)) {
if (fop->op)
osd->reply_op_error(fop->op, -EBUSY);
if (fop->blocking) {
obc->stop_block();
kick_object_context_blocked(obc);
}
if (!fop->dup_ops.empty()) {
dout(20) << __func__ << " requeueing dups" << dendl;
requeue_ops(fop->dup_ops);
}
if (fop->on_flush) {
(*(fop->on_flush))();
fop->on_flush = std::nullopt;
}
flush_ops.erase(oid);
return;
}
r = try_flush_mark_clean(fop);
if (r == -EBUSY && fop->op) {
osd->reply_op_error(fop->op, r);
}
}
int PrimaryLogPG::try_flush_mark_clean(FlushOpRef fop)
{
ObjectContextRef obc = fop->obc;
const hobject_t& oid = obc->obs.oi.soid;
if (fop->blocking) {
obc->stop_block();
kick_object_context_blocked(obc);
}
if (fop->flushed_version != obc->obs.oi.user_version ||
!obc->obs.exists) {
if (obc->obs.exists)
dout(10) << __func__ << " flushed_version " << fop->flushed_version
<< " != current " << obc->obs.oi.user_version
<< dendl;
else
dout(10) << __func__ << " object no longer exists" << dendl;
if (!fop->dup_ops.empty()) {
dout(20) << __func__ << " requeueing dups" << dendl;
requeue_ops(fop->dup_ops);
}
if (fop->on_flush) {
(*(fop->on_flush))();
fop->on_flush = std::nullopt;
}
flush_ops.erase(oid);
if (fop->blocking)
osd->logger->inc(l_osd_tier_flush_fail);
else
osd->logger->inc(l_osd_tier_try_flush_fail);
return -EBUSY;
}
if (!fop->blocking &&
m_scrubber->write_blocked_by_scrub(oid)) {
if (fop->op) {
dout(10) << __func__ << " blocked by scrub" << dendl;
requeue_op(fop->op);
requeue_ops(fop->dup_ops);
return -EAGAIN; // will retry
} else {
osd->logger->inc(l_osd_tier_try_flush_fail);
vector<ceph_tid_t> tids;
cancel_flush(fop, false, &tids);
osd->objecter->op_cancel(tids, -ECANCELED);
return -ECANCELED;
}
}
// successfully flushed, can we evict this object?
if (!obc->obs.oi.has_manifest() && !fop->op &&
agent_state && agent_state->evict_mode != TierAgentState::EVICT_MODE_IDLE &&
agent_maybe_evict(obc, true)) {
osd->logger->inc(l_osd_tier_clean);
if (fop->on_flush) {
(*(fop->on_flush))();
fop->on_flush = std::nullopt;
}
flush_ops.erase(oid);
return 0;
}
dout(10) << __func__ << " clearing DIRTY flag for " << oid << dendl;
OpContextUPtr ctx = simple_opc_create(fop->obc);
// successfully flushed; can we clear the dirty bit?
// try to take the lock manually, since we don't
// have a ctx yet.
if (ctx->lock_manager.get_lock_type(
RWState::RWWRITE,
oid,
obc,
fop->op)) {
dout(20) << __func__ << " took write lock" << dendl;
} else if (fop->op) {
dout(10) << __func__ << " waiting on write lock " << fop->op << " "
<< fop->dup_ops << dendl;
// fop->op is now waiting on the lock; get fop->dup_ops to wait too.
for (auto op : fop->dup_ops) {
bool locked = ctx->lock_manager.get_lock_type(
RWState::RWWRITE,
oid,
obc,
op);
ceph_assert(!locked);
}
close_op_ctx(ctx.release());
return -EAGAIN; // will retry
} else {
dout(10) << __func__ << " failed write lock, no op; failing" << dendl;
close_op_ctx(ctx.release());
osd->logger->inc(l_osd_tier_try_flush_fail);
vector<ceph_tid_t> tids;
cancel_flush(fop, false, &tids);
osd->objecter->op_cancel(tids, -ECANCELED);
return -ECANCELED;
}
if (fop->on_flush) {
ctx->register_on_finish(*(fop->on_flush));
fop->on_flush = std::nullopt;
}
ctx->at_version = get_next_version();
ctx->new_obs = obc->obs;
ctx->new_obs.oi.clear_flag(object_info_t::FLAG_DIRTY);
--ctx->delta_stats.num_objects_dirty;
if (fop->obc->obs.oi.has_manifest()) {
ceph_assert(obc->obs.oi.manifest.is_chunked());
PGTransaction* t = ctx->op_t.get();
uint64_t chunks_size = 0;
for (auto &p : ctx->new_obs.oi.manifest.chunk_map) {
chunks_size += p.second.length;
}
if (ctx->new_obs.oi.is_omap() && pool.info.supports_omap()) {
t->omap_clear(oid);
ctx->new_obs.oi.clear_omap_digest();
ctx->new_obs.oi.clear_flag(object_info_t::FLAG_OMAP);
ctx->clean_regions.mark_omap_dirty();
}
if (obc->obs.oi.size == chunks_size) {
t->truncate(oid, 0);
interval_set<uint64_t> trim;
trim.insert(0, ctx->new_obs.oi.size);
ctx->modified_ranges.union_of(trim);
truncate_update_size_and_usage(ctx->delta_stats,
ctx->new_obs.oi,
0);
ctx->clean_regions.mark_data_region_dirty(0, ctx->new_obs.oi.size);
ctx->new_obs.oi.new_object();
for (auto &p : ctx->new_obs.oi.manifest.chunk_map) {
p.second.set_flag(chunk_info_t::FLAG_MISSING);
}
} else {
for (auto &p : ctx->new_obs.oi.manifest.chunk_map) {
dout(20) << __func__ << " offset: " << p.second.offset
<< " length: " << p.second.length << dendl;
p.second.clear_flag(chunk_info_t::FLAG_MISSING); // CLEAN
}
}
}
finish_ctx(ctx.get(), pg_log_entry_t::CLEAN);
osd->logger->inc(l_osd_tier_clean);
if (!fop->dup_ops.empty() || fop->op) {
dout(20) << __func__ << " requeueing for " << ctx->at_version << dendl;
list<OpRequestRef> ls;
if (fop->op)
ls.push_back(fop->op);
ls.splice(ls.end(), fop->dup_ops);
requeue_ops(ls);
}
simple_opc_submit(std::move(ctx));
flush_ops.erase(oid);
if (fop->blocking)
osd->logger->inc(l_osd_tier_flush);
else
osd->logger->inc(l_osd_tier_try_flush);
return -EINPROGRESS;
}
void PrimaryLogPG::cancel_flush(FlushOpRef fop, bool requeue,
vector<ceph_tid_t> *tids)
{
dout(10) << __func__ << " " << fop->obc->obs.oi.soid << " tid "
<< fop->objecter_tid << dendl;
if (fop->objecter_tid) {
tids->push_back(fop->objecter_tid);
fop->objecter_tid = 0;
}
if (fop->io_tids.size()) {
for (auto &p : fop->io_tids) {
tids->push_back(p.second);
p.second = 0;
}
}
if (fop->blocking && fop->obc->is_blocked()) {
fop->obc->stop_block();
kick_object_context_blocked(fop->obc);
}
if (requeue) {
if (fop->op)
requeue_op(fop->op);
requeue_ops(fop->dup_ops);
}
if (fop->on_flush) {
(*(fop->on_flush))();
fop->on_flush = std::nullopt;
}
flush_ops.erase(fop->obc->obs.oi.soid);
}
void PrimaryLogPG::cancel_flush_ops(bool requeue, vector<ceph_tid_t> *tids)
{
dout(10) << __func__ << dendl;
map<hobject_t,FlushOpRef>::iterator p = flush_ops.begin();
while (p != flush_ops.end()) {
cancel_flush((p++)->second, requeue, tids);
}
}
bool PrimaryLogPG::is_present_clone(hobject_t coid)
{
if (!pool.info.allow_incomplete_clones())
return true;
if (is_missing_object(coid))
return true;
ObjectContextRef obc = get_object_context(coid, false);
return obc && obc->obs.exists;
}
// ========================================================================
// cls gather
//
void PrimaryLogPG::cancel_cls_gather(map<hobject_t,CLSGatherOp>::iterator iter, bool requeue,
vector<ceph_tid_t> *tids)
{
auto &cgop = iter->second;
for (std::vector<ceph_tid_t>::iterator p = cgop.objecter_tids.begin(); p != cgop.objecter_tids.end(); p++) {
tids->push_back(*p);
dout(10) << __func__ << " " << cgop.obc->obs.oi.soid << " tid " << *p << dendl;
}
cgop.objecter_tids.clear();
close_op_ctx(cgop.ctx);
cgop.ctx = NULL;
if (requeue) {
if (cgop.op)
requeue_op(cgop.op);
}
cls_gather_ops.erase(iter);
}
void PrimaryLogPG::cancel_cls_gather_ops(bool requeue, vector<ceph_tid_t> *tids)
{
dout(10) << __func__ << dendl;
map<hobject_t,CLSGatherOp>::iterator p = cls_gather_ops.begin();
while (p != cls_gather_ops.end()) {
cancel_cls_gather(p++, requeue, tids);
}
}
// ========================================================================
// rep op gather
class C_OSD_RepopCommit : public Context {
PrimaryLogPGRef pg;
boost::intrusive_ptr<PrimaryLogPG::RepGather> repop;
public:
C_OSD_RepopCommit(PrimaryLogPG *pg, PrimaryLogPG::RepGather *repop)
: pg(pg), repop(repop) {}
void finish(int) override {
pg->repop_all_committed(repop.get());
}
};
void PrimaryLogPG::repop_all_committed(RepGather *repop)
{
dout(10) << __func__ << ": repop tid " << repop->rep_tid << " all committed "
<< dendl;
repop->all_committed = true;
if (!repop->rep_aborted) {
if (repop->v != eversion_t()) {
recovery_state.complete_write(repop->v, repop->pg_local_last_complete);
}
eval_repop(repop);
}
}
void PrimaryLogPG::op_applied(const eversion_t &applied_version)
{
dout(10) << "op_applied version " << applied_version << dendl;
ceph_assert(applied_version != eversion_t());
ceph_assert(applied_version <= info.last_update);
recovery_state.local_write_applied(applied_version);
if (is_primary() && m_scrubber) {
// if there's a scrub operation waiting for the selected chunk to be fully updated -
// allow it to continue
m_scrubber->on_applied_when_primary(recovery_state.get_last_update_applied());
}
}
void PrimaryLogPG::eval_repop(RepGather *repop)
{
dout(10) << "eval_repop " << *repop
<< (repop->op && repop->op->get_req<MOSDOp>() ? "" : " (no op)") << dendl;
// ondisk?
if (repop->all_committed) {
dout(10) << " commit: " << *repop << dendl;
for (auto p = repop->on_committed.begin();
p != repop->on_committed.end();
repop->on_committed.erase(p++)) {
(*p)();
}
// send dup commits, in order
auto it = waiting_for_ondisk.find(repop->v);
if (it != waiting_for_ondisk.end()) {
ceph_assert(waiting_for_ondisk.begin()->first == repop->v);
for (auto& i : it->second) {
int return_code = repop->r;
if (return_code >= 0) {
return_code = std::get<2>(i);
}
osd->reply_op_error(std::get<0>(i), return_code, repop->v,
std::get<1>(i), std::get<3>(i));
}
waiting_for_ondisk.erase(it);
}
publish_stats_to_osd();
dout(10) << " removing " << *repop << dendl;
ceph_assert(!repop_queue.empty());
dout(20) << " q front is " << *repop_queue.front() << dendl;
if (repop_queue.front() == repop) {
RepGather *to_remove = nullptr;
while (!repop_queue.empty() &&
(to_remove = repop_queue.front())->all_committed) {
repop_queue.pop_front();
for (auto p = to_remove->on_success.begin();
p != to_remove->on_success.end();
to_remove->on_success.erase(p++)) {
(*p)();
}
remove_repop(to_remove);
}
}
}
}
void PrimaryLogPG::issue_repop(RepGather *repop, OpContext *ctx)
{
FUNCTRACE(cct);
const hobject_t& soid = ctx->obs->oi.soid;
dout(7) << "issue_repop rep_tid " << repop->rep_tid
<< " o " << soid
<< dendl;
repop->v = ctx->at_version;
ctx->op_t->add_obc(ctx->obc);
if (ctx->clone_obc) {
ctx->op_t->add_obc(ctx->clone_obc);
}
if (ctx->head_obc) {
ctx->op_t->add_obc(ctx->head_obc);
}
Context *on_all_commit = new C_OSD_RepopCommit(this, repop);
if (!(ctx->log.empty())) {
ceph_assert(ctx->at_version >= projected_last_update);
projected_last_update = ctx->at_version;
}
for (auto &&entry: ctx->log) {
projected_log.add(entry);
}
recovery_state.pre_submit_op(
soid,
ctx->log,
ctx->at_version);
pgbackend->submit_transaction(
soid,
ctx->delta_stats,
ctx->at_version,
std::move(ctx->op_t),
recovery_state.get_pg_trim_to(),
recovery_state.get_min_last_complete_ondisk(),
std::move(ctx->log),
ctx->updated_hset_history,
on_all_commit,
repop->rep_tid,
ctx->reqid,
ctx->op);
}
PrimaryLogPG::RepGather *PrimaryLogPG::new_repop(
OpContext *ctx,
ceph_tid_t rep_tid)
{
if (ctx->op)
dout(10) << "new_repop rep_tid " << rep_tid << " on " << *ctx->op->get_req() << dendl;
else
dout(10) << "new_repop rep_tid " << rep_tid << " (no op)" << dendl;
RepGather *repop = new RepGather(
ctx, rep_tid, info.last_complete);
repop->start = ceph_clock_now();
repop_queue.push_back(&repop->queue_item);
repop->get();
osd->logger->inc(l_osd_op_wip);
dout(10) << __func__ << ": " << *repop << dendl;
return repop;
}
boost::intrusive_ptr<PrimaryLogPG::RepGather> PrimaryLogPG::new_repop(
eversion_t version,
int r,
ObcLockManager &&manager,
OpRequestRef &&op,
std::optional<std::function<void(void)> > &&on_complete)
{
RepGather *repop = new RepGather(
std::move(manager),
std::move(op),
std::move(on_complete),
osd->get_tid(),
info.last_complete,
r);
repop->v = version;
repop->start = ceph_clock_now();
repop_queue.push_back(&repop->queue_item);
osd->logger->inc(l_osd_op_wip);
dout(10) << __func__ << ": " << *repop << dendl;
return boost::intrusive_ptr<RepGather>(repop);
}
void PrimaryLogPG::remove_repop(RepGather *repop)
{
dout(20) << __func__ << " " << *repop << dendl;
for (auto p = repop->on_finish.begin();
p != repop->on_finish.end();
repop->on_finish.erase(p++)) {
(*p)();
}
release_object_locks(
repop->lock_manager);
repop->put();
osd->logger->dec(l_osd_op_wip);
}
PrimaryLogPG::OpContextUPtr PrimaryLogPG::simple_opc_create(ObjectContextRef obc)
{
dout(20) << __func__ << " " << obc->obs.oi.soid << dendl;
ceph_tid_t rep_tid = osd->get_tid();
osd_reqid_t reqid(osd->get_cluster_msgr_name(), 0, rep_tid);
OpContextUPtr ctx(new OpContext(OpRequestRef(), reqid, nullptr, obc, this));
ctx->op_t.reset(new PGTransaction());
ctx->mtime = ceph_clock_now();
return ctx;
}
void PrimaryLogPG::simple_opc_submit(OpContextUPtr ctx)
{
RepGather *repop = new_repop(ctx.get(), ctx->reqid.tid);
dout(20) << __func__ << " " << repop << dendl;
issue_repop(repop, ctx.get());
eval_repop(repop);
recovery_state.update_trim_to();
repop->put();
}
void PrimaryLogPG::submit_log_entries(
const mempool::osd_pglog::list<pg_log_entry_t> &entries,
ObcLockManager &&manager,
std::optional<std::function<void(void)> > &&_on_complete,
OpRequestRef op,
int r)
{
dout(10) << __func__ << " " << entries << dendl;
ceph_assert(is_primary());
eversion_t version;
if (!entries.empty()) {
ceph_assert(entries.rbegin()->version >= projected_last_update);
version = projected_last_update = entries.rbegin()->version;
}
boost::intrusive_ptr<RepGather> repop;
std::optional<std::function<void(void)> > on_complete;
if (get_osdmap()->require_osd_release >= ceph_release_t::jewel) {
repop = new_repop(
version,
r,
std::move(manager),
std::move(op),
std::move(_on_complete));
} else {
on_complete = std::move(_on_complete);
}
pgbackend->call_write_ordered(
[this, entries, repop, on_complete]() {
ObjectStore::Transaction t;
eversion_t old_last_update = info.last_update;
recovery_state.merge_new_log_entries(
entries, t, recovery_state.get_pg_trim_to(),
recovery_state.get_min_last_complete_ondisk());
set<pg_shard_t> waiting_on;
for (set<pg_shard_t>::const_iterator i = get_acting_recovery_backfill().begin();
i != get_acting_recovery_backfill().end();
++i) {
pg_shard_t peer(*i);
if (peer == pg_whoami) continue;
ceph_assert(recovery_state.get_peer_missing().count(peer));
ceph_assert(recovery_state.has_peer_info(peer));
if (get_osdmap()->require_osd_release >= ceph_release_t::jewel) {
ceph_assert(repop);
MOSDPGUpdateLogMissing *m = new MOSDPGUpdateLogMissing(
entries,
spg_t(info.pgid.pgid, i->shard),
pg_whoami.shard,
get_osdmap_epoch(),
get_last_peering_reset(),
repop->rep_tid,
recovery_state.get_pg_trim_to(),
recovery_state.get_min_last_complete_ondisk());
osd->send_message_osd_cluster(
peer.osd, m, get_osdmap_epoch());
waiting_on.insert(peer);
} else {
MOSDPGLog *m = new MOSDPGLog(
peer.shard, pg_whoami.shard,
info.last_update.epoch,
info, get_last_peering_reset());
m->log.log = entries;
m->log.tail = old_last_update;
m->log.head = info.last_update;
osd->send_message_osd_cluster(
peer.osd, m, get_osdmap_epoch());
}
}
ceph_tid_t rep_tid = repop->rep_tid;
waiting_on.insert(pg_whoami);
log_entry_update_waiting_on.insert(
make_pair(
rep_tid,
LogUpdateCtx{std::move(repop), std::move(waiting_on)}
));
struct OnComplete : public Context {
PrimaryLogPGRef pg;
ceph_tid_t rep_tid;
epoch_t epoch;
OnComplete(
PrimaryLogPGRef pg,
ceph_tid_t rep_tid,
epoch_t epoch)
: pg(pg), rep_tid(rep_tid), epoch(epoch) {}
void finish(int) override {
std::scoped_lock l{*pg};
if (!pg->pg_has_reset_since(epoch)) {
auto it = pg->log_entry_update_waiting_on.find(rep_tid);
ceph_assert(it != pg->log_entry_update_waiting_on.end());
auto it2 = it->second.waiting_on.find(pg->pg_whoami);
ceph_assert(it2 != it->second.waiting_on.end());
it->second.waiting_on.erase(it2);
if (it->second.waiting_on.empty()) {
pg->repop_all_committed(it->second.repop.get());
pg->log_entry_update_waiting_on.erase(it);
}
}
}
};
t.register_on_commit(
new OnComplete{this, rep_tid, get_osdmap_epoch()});
int r = osd->store->queue_transaction(ch, std::move(t), NULL);
ceph_assert(r == 0);
op_applied(info.last_update);
});
recovery_state.update_trim_to();
}
void PrimaryLogPG::cancel_log_updates()
{
// get rid of all the LogUpdateCtx so their references to repops are
// dropped
log_entry_update_waiting_on.clear();
}
// -------------------------------------------------------
void PrimaryLogPG::get_watchers(list<obj_watch_item_t> *ls)
{
std::scoped_lock l{*this};
pair<hobject_t, ObjectContextRef> i;
while (object_contexts.get_next(i.first, &i)) {
ObjectContextRef obc(i.second);
get_obc_watchers(obc, *ls);
}
}
void PrimaryLogPG::get_obc_watchers(ObjectContextRef obc, list<obj_watch_item_t> &pg_watchers)
{
for (map<pair<uint64_t, entity_name_t>, WatchRef>::iterator j =
obc->watchers.begin();
j != obc->watchers.end();
++j) {
obj_watch_item_t owi;
owi.obj = obc->obs.oi.soid;
owi.wi.addr = j->second->get_peer_addr();
owi.wi.name = j->second->get_entity();
owi.wi.cookie = j->second->get_cookie();
owi.wi.timeout_seconds = j->second->get_timeout();
dout(30) << "watch: Found oid=" << owi.obj << " addr=" << owi.wi.addr
<< " name=" << owi.wi.name << " cookie=" << owi.wi.cookie << dendl;
pg_watchers.push_back(owi);
}
}
void PrimaryLogPG::check_blocklisted_watchers()
{
dout(20) << "PrimaryLogPG::check_blocklisted_watchers for pg " << get_pgid() << dendl;
pair<hobject_t, ObjectContextRef> i;
while (object_contexts.get_next(i.first, &i))
check_blocklisted_obc_watchers(i.second);
}
void PrimaryLogPG::check_blocklisted_obc_watchers(ObjectContextRef obc)
{
dout(20) << "PrimaryLogPG::check_blocklisted_obc_watchers for obc " << obc->obs.oi.soid << dendl;
for (map<pair<uint64_t, entity_name_t>, WatchRef>::iterator k =
obc->watchers.begin();
k != obc->watchers.end();
) {
//Advance iterator now so handle_watch_timeout() can erase element
map<pair<uint64_t, entity_name_t>, WatchRef>::iterator j = k++;
dout(30) << "watch: Found " << j->second->get_entity() << " cookie " << j->second->get_cookie() << dendl;
entity_addr_t ea = j->second->get_peer_addr();
dout(30) << "watch: Check entity_addr_t " << ea << dendl;
if (get_osdmap()->is_blocklisted(ea)) {
dout(10) << "watch: Found blocklisted watcher for " << ea << dendl;
ceph_assert(j->second->get_pg() == this);
j->second->unregister_cb();
handle_watch_timeout(j->second);
}
}
}
void PrimaryLogPG::populate_obc_watchers(ObjectContextRef obc)
{
ceph_assert(is_primary() && is_active());
auto it_objects = recovery_state.get_pg_log().get_log().objects.find(obc->obs.oi.soid);
ceph_assert((recovering.count(obc->obs.oi.soid) ||
!is_missing_object(obc->obs.oi.soid)) ||
(it_objects != recovery_state.get_pg_log().get_log().objects.end() && // or this is a revert... see recover_primary()
it_objects->second->op ==
pg_log_entry_t::LOST_REVERT &&
it_objects->second->reverting_to ==
obc->obs.oi.version));
dout(10) << "populate_obc_watchers " << obc->obs.oi.soid << dendl;
ceph_assert(obc->watchers.empty());
// populate unconnected_watchers
for (map<pair<uint64_t, entity_name_t>, watch_info_t>::iterator p =
obc->obs.oi.watchers.begin();
p != obc->obs.oi.watchers.end();
++p) {
utime_t expire = info.stats.last_became_active;
expire += p->second.timeout_seconds;
dout(10) << " unconnected watcher " << p->first << " will expire " << expire << dendl;
WatchRef watch(
Watch::makeWatchRef(
this, osd, obc, p->second.timeout_seconds, p->first.first,
p->first.second, p->second.addr));
watch->disconnect();
obc->watchers.insert(
make_pair(
make_pair(p->first.first, p->first.second),
watch));
}
// Look for watchers from blocklisted clients and drop
check_blocklisted_obc_watchers(obc);
}
void PrimaryLogPG::handle_watch_timeout(WatchRef watch)
{
ObjectContextRef obc = watch->get_obc(); // handle_watch_timeout owns this ref
dout(10) << "handle_watch_timeout obc " << *obc << dendl;
if (!is_active()) {
dout(10) << "handle_watch_timeout not active, no-op" << dendl;
return;
}
if (!obc->obs.exists) {
dout(10) << __func__ << " object " << obc->obs.oi.soid << " dne" << dendl;
return;
}
if (is_degraded_or_backfilling_object(obc->obs.oi.soid)) {
callbacks_for_degraded_object[obc->obs.oi.soid].push_back(
watch->get_delayed_cb()
);
dout(10) << "handle_watch_timeout waiting for degraded on obj "
<< obc->obs.oi.soid
<< dendl;
return;
}
if (m_scrubber->write_blocked_by_scrub(obc->obs.oi.soid)) {
dout(10) << "handle_watch_timeout waiting for scrub on obj "
<< obc->obs.oi.soid
<< dendl;
m_scrubber->add_callback(
watch->get_delayed_cb() // This callback!
);
return;
}
OpContextUPtr ctx = simple_opc_create(obc);
ctx->at_version = get_next_version();
object_info_t& oi = ctx->new_obs.oi;
oi.watchers.erase(make_pair(watch->get_cookie(),
watch->get_entity()));
list<watch_disconnect_t> watch_disconnects = {
watch_disconnect_t(watch->get_cookie(), watch->get_entity(), true)
};
ctx->register_on_success(
[this, obc, watch_disconnects]() {
complete_disconnect_watches(obc, watch_disconnects);
});
PGTransaction *t = ctx->op_t.get();
ctx->log.push_back(pg_log_entry_t(pg_log_entry_t::MODIFY, obc->obs.oi.soid,
ctx->at_version,
oi.version,
0,
osd_reqid_t(), ctx->mtime, 0));
oi.prior_version = obc->obs.oi.version;
oi.version = ctx->at_version;
bufferlist bl;
encode(oi, bl, get_osdmap()->get_features(CEPH_ENTITY_TYPE_OSD, nullptr));
t->setattr(obc->obs.oi.soid, OI_ATTR, bl);
// apply new object state.
ctx->obc->obs = ctx->new_obs;
// no ctx->delta_stats
simple_opc_submit(std::move(ctx));
}
ObjectContextRef PrimaryLogPG::create_object_context(const object_info_t& oi,
SnapSetContext *ssc)
{
ObjectContextRef obc(object_contexts.lookup_or_create(oi.soid));
ceph_assert(obc->destructor_callback == NULL);
obc->destructor_callback = new C_PG_ObjectContext(this, obc.get());
obc->obs.oi = oi;
obc->obs.exists = false;
obc->ssc = ssc;
if (ssc)
register_snapset_context(ssc);
dout(10) << "create_object_context " << (void*)obc.get() << " " << oi.soid << " " << dendl;
if (is_active())
populate_obc_watchers(obc);
return obc;
}
ObjectContextRef PrimaryLogPG::get_object_context(
const hobject_t& soid,
bool can_create,
const map<string, bufferlist, less<>> *attrs)
{
auto it_objects = recovery_state.get_pg_log().get_log().objects.find(soid);
ceph_assert(
attrs || !recovery_state.get_pg_log().get_missing().is_missing(soid) ||
// or this is a revert... see recover_primary()
(it_objects != recovery_state.get_pg_log().get_log().objects.end() &&
it_objects->second->op ==
pg_log_entry_t::LOST_REVERT));
ObjectContextRef obc = object_contexts.lookup(soid);
osd->logger->inc(l_osd_object_ctx_cache_total);
if (obc) {
osd->logger->inc(l_osd_object_ctx_cache_hit);
dout(10) << __func__ << ": found obc in cache: " << *obc
<< dendl;
} else {
dout(10) << __func__ << ": obc NOT found in cache: " << soid << dendl;
// check disk
bufferlist bv;
if (attrs) {
auto it_oi = attrs->find(OI_ATTR);
ceph_assert(it_oi != attrs->end());
bv = it_oi->second;
} else {
int r = pgbackend->objects_get_attr(soid, OI_ATTR, &bv);
if (r < 0) {
if (!can_create) {
dout(10) << __func__ << ": no obc for soid "
<< soid << " and !can_create"
<< dendl;
return ObjectContextRef(); // -ENOENT!
}
dout(10) << __func__ << ": no obc for soid "
<< soid << " but can_create"
<< dendl;
// new object.
object_info_t oi(soid);
SnapSetContext *ssc = get_snapset_context(
soid, true, 0, false);
ceph_assert(ssc);
obc = create_object_context(oi, ssc);
dout(10) << __func__ << ": " << *obc
<< " oi: " << obc->obs.oi
<< " " << *obc->ssc << dendl;
return obc;
}
}
object_info_t oi;
try {
bufferlist::const_iterator bliter = bv.begin();
decode(oi, bliter);
} catch (...) {
dout(0) << __func__ << ": obc corrupt: " << soid << dendl;
return ObjectContextRef(); // -ENOENT!
}
ceph_assert(oi.soid.pool == (int64_t)info.pgid.pool());
obc = object_contexts.lookup_or_create(oi.soid);
obc->destructor_callback = new C_PG_ObjectContext(this, obc.get());
obc->obs.oi = oi;
obc->obs.exists = true;
obc->ssc = get_snapset_context(
soid, true,
soid.has_snapset() ? attrs : 0);
if (is_primary() && is_active())
populate_obc_watchers(obc);
if (pool.info.is_erasure()) {
if (attrs) {
obc->attr_cache = *attrs;
} else {
int r = pgbackend->objects_get_attrs(
soid,
&obc->attr_cache);
ceph_assert(r == 0);
}
}
dout(10) << __func__ << ": creating obc from disk: " << *obc
<< dendl;
}
// XXX: Caller doesn't expect this
if (obc->ssc == NULL) {
derr << __func__ << ": obc->ssc not available, not returning context" << dendl;
return ObjectContextRef(); // -ENOENT!
}
dout(10) << __func__ << ": " << *obc
<< " oi: " << obc->obs.oi
<< " exists: " << (int)obc->obs.exists
<< " " << *obc->ssc << dendl;
return obc;
}
void PrimaryLogPG::context_registry_on_change()
{
pair<hobject_t, ObjectContextRef> i;
while (object_contexts.get_next(i.first, &i)) {
ObjectContextRef obc(i.second);
if (obc) {
for (map<pair<uint64_t, entity_name_t>, WatchRef>::iterator j =
obc->watchers.begin();
j != obc->watchers.end();
obc->watchers.erase(j++)) {
j->second->discard();
}
}
}
}
/*
* If we return an error, and set *pmissing, then promoting that
* object may help.
*
* If we return -EAGAIN, we will always set *pmissing to the missing
* object to wait for.
*
* If we return an error but do not set *pmissing, then we know the
* object does not exist.
*/
int PrimaryLogPG::find_object_context(const hobject_t& oid,
ObjectContextRef *pobc,
bool can_create,
bool map_snapid_to_clone,
hobject_t *pmissing)
{
FUNCTRACE(cct);
ceph_assert(oid.pool == static_cast<int64_t>(info.pgid.pool()));
// want the head?
if (oid.snap == CEPH_NOSNAP) {
ObjectContextRef obc = get_object_context(oid, can_create);
if (!obc) {
if (pmissing)
*pmissing = oid;
return -ENOENT;
}
dout(10) << __func__ << " " << oid
<< " @" << oid.snap
<< " oi=" << obc->obs.oi
<< dendl;
*pobc = obc;
return 0;
}
// we want a snap
hobject_t head = oid.get_head();
SnapSetContext *ssc = get_snapset_context(oid, can_create);
if (!ssc || !(ssc->exists || can_create)) {
dout(20) << __func__ << " " << oid << " no snapset" << dendl;
if (pmissing)
*pmissing = head; // start by getting the head
if (ssc)
put_snapset_context(ssc);
return -ENOENT;
}
if (map_snapid_to_clone) {
dout(10) << __func__ << " " << oid << " @" << oid.snap
<< " snapset " << ssc->snapset
<< " map_snapid_to_clone=true" << dendl;
if (oid.snap > ssc->snapset.seq) {
// already must be readable
ObjectContextRef obc = get_object_context(head, false);
dout(10) << __func__ << " " << oid << " @" << oid.snap
<< " snapset " << ssc->snapset
<< " maps to head" << dendl;
*pobc = obc;
put_snapset_context(ssc);
return (obc && obc->obs.exists) ? 0 : -ENOENT;
} else {
vector<snapid_t>::const_iterator citer = std::find(
ssc->snapset.clones.begin(),
ssc->snapset.clones.end(),
oid.snap);
if (citer == ssc->snapset.clones.end()) {
dout(10) << __func__ << " " << oid << " @" << oid.snap
<< " snapset " << ssc->snapset
<< " maps to nothing" << dendl;
put_snapset_context(ssc);
return -ENOENT;
}
dout(10) << __func__ << " " << oid << " @" << oid.snap
<< " snapset " << ssc->snapset
<< " maps to " << oid << dendl;
if (recovery_state.get_pg_log().get_missing().is_missing(oid)) {
dout(10) << __func__ << " " << oid << " @" << oid.snap
<< " snapset " << ssc->snapset
<< " " << oid << " is missing" << dendl;
if (pmissing)
*pmissing = oid;
put_snapset_context(ssc);
return -EAGAIN;
}
ObjectContextRef obc = get_object_context(oid, false);
if (!obc || !obc->obs.exists) {
dout(10) << __func__ << " " << oid << " @" << oid.snap
<< " snapset " << ssc->snapset
<< " " << oid << " is not present" << dendl;
if (pmissing)
*pmissing = oid;
put_snapset_context(ssc);
return -ENOENT;
}
dout(10) << __func__ << " " << oid << " @" << oid.snap
<< " snapset " << ssc->snapset
<< " " << oid << " HIT" << dendl;
*pobc = obc;
put_snapset_context(ssc);
return 0;
}
ceph_abort(); //unreachable
}
dout(10) << __func__ << " " << oid << " @" << oid.snap
<< " snapset " << ssc->snapset << dendl;
// head?
if (oid.snap > ssc->snapset.seq) {
ObjectContextRef obc = get_object_context(head, false);
dout(10) << __func__ << " " << head
<< " want " << oid.snap << " > snapset seq " << ssc->snapset.seq
<< " -- HIT " << obc->obs
<< dendl;
if (!obc->ssc)
obc->ssc = ssc;
else {
ceph_assert(ssc == obc->ssc);
put_snapset_context(ssc);
}
*pobc = obc;
return 0;
}
// which clone would it be?
unsigned k = 0;
while (k < ssc->snapset.clones.size() &&
ssc->snapset.clones[k] < oid.snap)
k++;
if (k == ssc->snapset.clones.size()) {
dout(10) << __func__ << " no clones with last >= oid.snap "
<< oid.snap << " -- DNE" << dendl;
put_snapset_context(ssc);
return -ENOENT;
}
hobject_t soid(oid.oid, oid.get_key(), ssc->snapset.clones[k], oid.get_hash(),
info.pgid.pool(), oid.get_namespace());
if (recovery_state.get_pg_log().get_missing().is_missing(soid)) {
dout(20) << __func__ << " " << soid << " missing, try again later"
<< dendl;
if (pmissing)
*pmissing = soid;
put_snapset_context(ssc);
return -EAGAIN;
}
ObjectContextRef obc = get_object_context(soid, false);
if (!obc || !obc->obs.exists) {
if (pmissing)
*pmissing = soid;
put_snapset_context(ssc);
if (is_primary()) {
if (is_degraded_or_backfilling_object(soid)) {
dout(20) << __func__ << " clone is degraded or backfilling " << soid << dendl;
return -EAGAIN;
} else if (is_degraded_on_async_recovery_target(soid)) {
dout(20) << __func__ << " clone is recovering " << soid << dendl;
return -EAGAIN;
} else {
dout(20) << __func__ << " missing clone " << soid << dendl;
return -ENOENT;
}
} else {
dout(20) << __func__ << " replica missing clone" << soid << dendl;
return -ENOENT;
}
}
if (!obc->ssc) {
obc->ssc = ssc;
} else {
ceph_assert(obc->ssc == ssc);
put_snapset_context(ssc);
}
ssc = 0;
// clone
dout(20) << __func__ << " " << soid
<< " snapset " << obc->ssc->snapset
<< dendl;
snapid_t first, last;
auto p = obc->ssc->snapset.clone_snaps.find(soid.snap);
ceph_assert(p != obc->ssc->snapset.clone_snaps.end());
if (p->second.empty()) {
dout(1) << __func__ << " " << soid << " empty snapset -- DNE" << dendl;
ceph_assert(!cct->_conf->osd_debug_verify_snaps);
return -ENOENT;
}
if (std::find(p->second.begin(), p->second.end(), oid.snap) ==
p->second.end()) {
dout(20) << __func__ << " " << soid << " clone_snaps " << p->second
<< " does not contain " << oid.snap << " -- DNE" << dendl;
return -ENOENT;
}
if (get_osdmap()->in_removed_snaps_queue(info.pgid.pgid.pool(), oid.snap)) {
dout(20) << __func__ << " " << soid << " snap " << oid.snap
<< " in removed_snaps_queue" << " -- DNE" << dendl;
return -ENOENT;
}
dout(20) << __func__ << " " << soid << " clone_snaps " << p->second
<< " contains " << oid.snap << " -- HIT " << obc->obs << dendl;
*pobc = obc;
return 0;
}
void PrimaryLogPG::object_context_destructor_callback(ObjectContext *obc)
{
if (obc->ssc)
put_snapset_context(obc->ssc);
}
void PrimaryLogPG::add_object_context_to_pg_stat(ObjectContextRef obc, pg_stat_t *pgstat)
{
object_info_t& oi = obc->obs.oi;
dout(10) << __func__ << " " << oi.soid << dendl;
ceph_assert(!oi.soid.is_snapdir());
object_stat_sum_t stat;
stat.num_objects++;
if (oi.is_dirty())
stat.num_objects_dirty++;
if (oi.is_whiteout())
stat.num_whiteouts++;
if (oi.is_omap())
stat.num_objects_omap++;
if (oi.is_cache_pinned())
stat.num_objects_pinned++;
if (oi.has_manifest())
stat.num_objects_manifest++;
if (oi.soid.is_snap()) {
stat.num_object_clones++;
if (!obc->ssc)
obc->ssc = get_snapset_context(oi.soid, false);
ceph_assert(obc->ssc);
stat.num_bytes += obc->ssc->snapset.get_clone_bytes(oi.soid.snap);
} else {
stat.num_bytes += oi.size;
}
// add it in
pgstat->stats.sum.add(stat);
}
void PrimaryLogPG::requeue_op_blocked_by_object(const hobject_t &soid) {
map<hobject_t, list<OpRequestRef>>::iterator p = waiting_for_blocked_object.find(soid);
if (p != waiting_for_blocked_object.end()) {
list<OpRequestRef>& ls = p->second;
dout(10) << __func__ << " " << soid << " requeuing " << ls.size() << " requests" << dendl;
requeue_ops(ls);
waiting_for_blocked_object.erase(p);
}
}
void PrimaryLogPG::kick_object_context_blocked(ObjectContextRef obc)
{
const hobject_t& soid = obc->obs.oi.soid;
if (obc->is_blocked()) {
dout(10) << __func__ << " " << soid << " still blocked" << dendl;
return;
}
requeue_op_blocked_by_object(soid);
map<hobject_t, ObjectContextRef>::iterator i =
objects_blocked_on_snap_promotion.find(obc->obs.oi.soid.get_head());
if (i != objects_blocked_on_snap_promotion.end()) {
ceph_assert(i->second == obc);
ObjectContextRef head_obc = get_object_context(i->first, false);
head_obc->stop_block();
// kick blocked ops (head)
requeue_op_blocked_by_object(i->first);
objects_blocked_on_snap_promotion.erase(i);
}
if (obc->requeue_scrub_on_unblock) {
obc->requeue_scrub_on_unblock = false;
dout(20) << __func__ << " requeuing if still active: " << (is_active() ? "yes" : "no") << dendl;
// only requeue if we are still active: we may be unblocking
// because we are resetting for a new peering interval
if (is_active()) {
osd->queue_scrub_unblocking(this, is_scrub_blocking_ops());
}
}
}
SnapSetContext *PrimaryLogPG::get_snapset_context(
const hobject_t& oid,
bool can_create,
const map<string, bufferlist, less<>> *attrs,
bool oid_existed)
{
std::lock_guard l(snapset_contexts_lock);
SnapSetContext *ssc;
map<hobject_t, SnapSetContext*>::iterator p = snapset_contexts.find(
oid.get_snapdir());
if (p != snapset_contexts.end()) {
if (can_create || p->second->exists) {
ssc = p->second;
} else {
return NULL;
}
} else {
bufferlist bv;
if (!attrs) {
int r = -ENOENT;
if (!(oid.is_head() && !oid_existed)) {
r = pgbackend->objects_get_attr(oid.get_head(), SS_ATTR, &bv);
}
if (r < 0 && !can_create)
return NULL;
} else {
auto it_ss = attrs->find(SS_ATTR);
ceph_assert(it_ss != attrs->end());
bv = it_ss->second;
}
ssc = new SnapSetContext(oid.get_snapdir());
_register_snapset_context(ssc);
if (bv.length()) {
bufferlist::const_iterator bvp = bv.begin();
try {
ssc->snapset.decode(bvp);
} catch (const ceph::buffer::error& e) {
dout(0) << __func__ << " Can't decode snapset: " << e.what() << dendl;
return NULL;
}
ssc->exists = true;
} else {
ssc->exists = false;
}
}
ceph_assert(ssc);
ssc->ref++;
return ssc;
}
void PrimaryLogPG::put_snapset_context(SnapSetContext *ssc)
{
std::lock_guard l(snapset_contexts_lock);
--ssc->ref;
if (ssc->ref == 0) {
if (ssc->registered)
snapset_contexts.erase(ssc->oid);
delete ssc;
}
}
/*
* Return values:
* NONE - didn't pull anything
* YES - pulled what the caller wanted
* HEAD - needed to pull head first
*/
enum { PULL_NONE, PULL_HEAD, PULL_YES };
int PrimaryLogPG::recover_missing(
const hobject_t &soid, eversion_t v,
int priority,
PGBackend::RecoveryHandle *h)
{
dout(10) << __func__ << " sar: " << scrub_after_recovery << dendl;
if (recovery_state.get_missing_loc().is_unfound(soid)) {
dout(7) << __func__ << " " << soid
<< " v " << v
<< " but it is unfound" << dendl;
return PULL_NONE;
}
if (recovery_state.get_missing_loc().is_deleted(soid)) {
start_recovery_op(soid);
ceph_assert(!recovering.count(soid));
recovering.insert(make_pair(soid, ObjectContextRef()));
epoch_t cur_epoch = get_osdmap_epoch();
remove_missing_object(soid, v, new LambdaContext(
[=, this](int) {
std::scoped_lock locker{*this};
if (!pg_has_reset_since(cur_epoch)) {
bool object_missing = false;
for (const auto& shard : get_acting_recovery_backfill()) {
if (shard == pg_whoami)
continue;
if (recovery_state.get_peer_missing(shard).is_missing(soid)) {
dout(20) << __func__ << ": soid " << soid << " needs to be deleted from replica " << shard << dendl;
object_missing = true;
break;
}
}
if (!object_missing) {
object_stat_sum_t stat_diff;
stat_diff.num_objects_recovered = 1;
if (scrub_after_recovery)
stat_diff.num_objects_repaired = 1;
on_global_recover(soid, stat_diff, true);
} else {
auto recovery_handle = pgbackend->open_recovery_op();
pgbackend->recover_delete_object(soid, v, recovery_handle);
pgbackend->run_recovery_op(recovery_handle, priority);
}
}
}));
return PULL_YES;
}
// is this a snapped object? if so, consult the snapset.. we may not need the entire object!
ObjectContextRef obc;
ObjectContextRef head_obc;
if (soid.snap && soid.snap < CEPH_NOSNAP) {
// do we have the head?
hobject_t head = soid.get_head();
if (recovery_state.get_pg_log().get_missing().is_missing(head)) {
if (recovering.count(head)) {
dout(10) << " missing but already recovering head " << head << dendl;
return PULL_NONE;
} else {
int r = recover_missing(
head, recovery_state.get_pg_log().get_missing().get_items().find(head)->second.need, priority,
h);
if (r != PULL_NONE)
return PULL_HEAD;
return PULL_NONE;
}
}
head_obc = get_object_context(
head,
false,
0);
ceph_assert(head_obc);
}
start_recovery_op(soid);
ceph_assert(!recovering.count(soid));
recovering.insert(make_pair(soid, obc));
int r = pgbackend->recover_object(
soid,
v,
head_obc,
obc,
h);
// This is only a pull which shouldn't return an error
ceph_assert(r >= 0);
return PULL_YES;
}
void PrimaryLogPG::remove_missing_object(const hobject_t &soid,
eversion_t v, Context *on_complete)
{
dout(20) << __func__ << " " << soid << " " << v << dendl;
ceph_assert(on_complete != nullptr);
// delete locally
ObjectStore::Transaction t;
remove_snap_mapped_object(t, soid);
ObjectRecoveryInfo recovery_info;
recovery_info.soid = soid;
recovery_info.version = v;
epoch_t cur_epoch = get_osdmap_epoch();
t.register_on_complete(new LambdaContext(
[=, this](int) {
std::unique_lock locker{*this};
if (!pg_has_reset_since(cur_epoch)) {
ObjectStore::Transaction t2;
on_local_recover(soid, recovery_info, ObjectContextRef(), true, &t2);
t2.register_on_complete(on_complete);
int r = osd->store->queue_transaction(ch, std::move(t2), nullptr);
ceph_assert(r == 0);
locker.unlock();
} else {
locker.unlock();
on_complete->complete(-EAGAIN);
}
}));
int r = osd->store->queue_transaction(ch, std::move(t), nullptr);
ceph_assert(r == 0);
}
void PrimaryLogPG::finish_degraded_object(const hobject_t oid)
{
dout(10) << __func__ << " " << oid << dendl;
if (callbacks_for_degraded_object.count(oid)) {
list<Context*> contexts;
contexts.swap(callbacks_for_degraded_object[oid]);
callbacks_for_degraded_object.erase(oid);
for (list<Context*>::iterator i = contexts.begin();
i != contexts.end();
++i) {
(*i)->complete(0);
}
}
map<hobject_t, snapid_t>::iterator i = objects_blocked_on_degraded_snap.find(
oid.get_head());
if (i != objects_blocked_on_degraded_snap.end() &&
i->second == oid.snap)
objects_blocked_on_degraded_snap.erase(i);
}
void PrimaryLogPG::_committed_pushed_object(
epoch_t epoch, eversion_t last_complete)
{
std::scoped_lock locker{*this};
if (!pg_has_reset_since(epoch)) {
recovery_state.recovery_committed_to(last_complete);
} else {
dout(10) << __func__
<< " pg has changed, not touching last_complete_ondisk" << dendl;
}
}
void PrimaryLogPG::_applied_recovered_object(ObjectContextRef obc)
{
dout(20) << __func__ << dendl;
if (obc) {
dout(20) << "obc = " << *obc << dendl;
}
ceph_assert(active_pushes >= 1);
--active_pushes;
// requeue an active chunky scrub waiting on recovery ops
if (!recovery_state.is_deleting() && active_pushes == 0 &&
is_scrub_active()) {
osd->queue_scrub_pushes_update(this, is_scrub_blocking_ops());
}
}
void PrimaryLogPG::_applied_recovered_object_replica()
{
dout(20) << __func__ << dendl;
ceph_assert(active_pushes >= 1);
--active_pushes;
// requeue an active scrub waiting on recovery ops
if (!recovery_state.is_deleting() && active_pushes == 0 &&
is_scrub_active()) {
osd->queue_scrub_replica_pushes(this, m_scrubber->replica_op_priority());
}
}
void PrimaryLogPG::on_failed_pull(
const set<pg_shard_t> &from,
const hobject_t &soid,
const eversion_t &v)
{
dout(20) << __func__ << ": " << soid << dendl;
ceph_assert(recovering.count(soid));
auto obc = recovering[soid];
if (obc) {
list<OpRequestRef> blocked_ops;
obc->drop_recovery_read(&blocked_ops);
requeue_ops(blocked_ops);
}
recovering.erase(soid);
for (auto&& i : from) {
if (i != pg_whoami) { // we'll get it below in primary_error
recovery_state.force_object_missing(i, soid, v);
}
}
dout(0) << __func__ << " " << soid << " from shard " << from
<< ", reps on " << recovery_state.get_missing_loc().get_locations(soid)
<< " unfound? " << recovery_state.get_missing_loc().is_unfound(soid)
<< dendl;
finish_recovery_op(soid); // close out this attempt,
finish_degraded_object(soid);
if (from.count(pg_whoami)) {
dout(0) << " primary missing oid " << soid << " version " << v << dendl;
primary_error(soid, v);
backfills_in_flight.erase(soid);
}
}
eversion_t PrimaryLogPG::pick_newest_available(const hobject_t& oid)
{
eversion_t v;
pg_missing_item pmi;
bool is_missing = recovery_state.get_pg_log().get_missing().is_missing(oid, &pmi);
ceph_assert(is_missing);
v = pmi.have;
dout(10) << "pick_newest_available " << oid << " " << v << " on osd." << osd->whoami << " (local)" << dendl;
ceph_assert(!get_acting_recovery_backfill().empty());
for (set<pg_shard_t>::iterator i = get_acting_recovery_backfill().begin();
i != get_acting_recovery_backfill().end();
++i) {
if (*i == get_primary()) continue;
pg_shard_t peer = *i;
if (!recovery_state.get_peer_missing(peer).is_missing(oid)) {
continue;
}
eversion_t h = recovery_state.get_peer_missing(peer).get_items().at(oid).have;
dout(10) << "pick_newest_available " << oid << " " << h << " on osd." << peer << dendl;
if (h > v)
v = h;
}
dout(10) << "pick_newest_available " << oid << " " << v << " (newest)" << dendl;
return v;
}
void PrimaryLogPG::do_update_log_missing(OpRequestRef &op)
{
const MOSDPGUpdateLogMissing *m = static_cast<const MOSDPGUpdateLogMissing*>(
op->get_req());
ceph_assert(m->get_type() == MSG_OSD_PG_UPDATE_LOG_MISSING);
ObjectStore::Transaction t;
std::optional<eversion_t> op_trim_to, op_roll_forward_to;
if (m->pg_trim_to != eversion_t())
op_trim_to = m->pg_trim_to;
if (m->pg_roll_forward_to != eversion_t())
op_roll_forward_to = m->pg_roll_forward_to;
dout(20) << __func__
<< " op_trim_to = " << op_trim_to << " op_roll_forward_to = " << op_roll_forward_to << dendl;
recovery_state.append_log_entries_update_missing(
m->entries, t, op_trim_to, op_roll_forward_to);
eversion_t new_lcod = info.last_complete;
Context *complete = new LambdaContext(
[=, this](int) {
const MOSDPGUpdateLogMissing *msg = static_cast<const MOSDPGUpdateLogMissing*>(
op->get_req());
std::scoped_lock locker{*this};
if (!pg_has_reset_since(msg->get_epoch())) {
update_last_complete_ondisk(new_lcod);
MOSDPGUpdateLogMissingReply *reply =
new MOSDPGUpdateLogMissingReply(
spg_t(info.pgid.pgid, primary_shard().shard),
pg_whoami.shard,
msg->get_epoch(),
msg->min_epoch,
msg->get_tid(),
new_lcod);
reply->set_priority(CEPH_MSG_PRIO_HIGH);
msg->get_connection()->send_message(reply);
}
});
if (get_osdmap()->require_osd_release >= ceph_release_t::kraken) {
t.register_on_commit(complete);
} else {
/* Hack to work around the fact that ReplicatedBackend sends
* ack+commit if commit happens first
*
* This behavior is no longer necessary, but we preserve it so old
* primaries can keep their repops in order */
if (pool.info.is_erasure()) {
t.register_on_complete(complete);
} else {
t.register_on_commit(complete);
}
}
int tr = osd->store->queue_transaction(
ch,
std::move(t),
nullptr);
ceph_assert(tr == 0);
op_applied(info.last_update);
}
void PrimaryLogPG::do_update_log_missing_reply(OpRequestRef &op)
{
const MOSDPGUpdateLogMissingReply *m =
static_cast<const MOSDPGUpdateLogMissingReply*>(
op->get_req());
dout(20) << __func__ << " got reply from "
<< m->get_from() << dendl;
auto it = log_entry_update_waiting_on.find(m->get_tid());
if (it != log_entry_update_waiting_on.end()) {
if (it->second.waiting_on.count(m->get_from())) {
it->second.waiting_on.erase(m->get_from());
if (m->last_complete_ondisk != eversion_t()) {
update_peer_last_complete_ondisk(m->get_from(), m->last_complete_ondisk);
}
} else {
osd->clog->error()
<< info.pgid << " got reply "
<< *m << " from shard we are not waiting for "
<< m->get_from();
}
if (it->second.waiting_on.empty()) {
repop_all_committed(it->second.repop.get());
log_entry_update_waiting_on.erase(it);
}
} else {
osd->clog->error()
<< info.pgid << " got reply "
<< *m << " on unknown tid " << m->get_tid();
}
}
/* Mark all unfound objects as lost.
*/
void PrimaryLogPG::mark_all_unfound_lost(
int what,
std::function<void(int,const std::string&,bufferlist&)> on_finish)
{
dout(3) << __func__ << " " << pg_log_entry_t::get_op_name(what) << dendl;
list<hobject_t> oids;
dout(30) << __func__ << ": log before:\n";
recovery_state.get_pg_log().get_log().print(*_dout);
*_dout << dendl;
mempool::osd_pglog::list<pg_log_entry_t> log_entries;
utime_t mtime = ceph_clock_now();
map<hobject_t, pg_missing_item>::const_iterator m =
recovery_state.get_missing_loc().get_needs_recovery().begin();
map<hobject_t, pg_missing_item>::const_iterator mend =
recovery_state.get_missing_loc().get_needs_recovery().end();
ObcLockManager manager;
eversion_t v = get_next_version();
v.epoch = get_osdmap_epoch();
uint64_t num_unfound = recovery_state.get_missing_loc().num_unfound();
while (m != mend) {
const hobject_t &oid(m->first);
if (!recovery_state.get_missing_loc().is_unfound(oid)) {
// We only care about unfound objects
++m;
continue;
}
ObjectContextRef obc;
eversion_t prev;
switch (what) {
case pg_log_entry_t::LOST_MARK:
ceph_abort_msg("actually, not implemented yet!");
break;
case pg_log_entry_t::LOST_REVERT:
prev = pick_newest_available(oid);
if (prev > eversion_t()) {
// log it
pg_log_entry_t e(
pg_log_entry_t::LOST_REVERT, oid, v,
m->second.need, 0, osd_reqid_t(), mtime, 0);
e.reverting_to = prev;
e.mark_unrollbackable();
log_entries.push_back(e);
dout(10) << e << dendl;
// we are now missing the new version; recovery code will sort it out.
++v.version;
++m;
break;
}
case pg_log_entry_t::LOST_DELETE:
{
pg_log_entry_t e(pg_log_entry_t::LOST_DELETE, oid, v, m->second.need,
0, osd_reqid_t(), mtime, 0);
if (get_osdmap()->require_osd_release >= ceph_release_t::jewel) {
if (pool.info.require_rollback()) {
e.mod_desc.try_rmobject(v.version);
} else {
e.mark_unrollbackable();
}
} // otherwise, just do what we used to do
dout(10) << e << dendl;
log_entries.push_back(e);
oids.push_back(oid);
// If context found mark object as deleted in case
// of racing with new creation. This can happen if
// object lost and EIO at primary.
obc = object_contexts.lookup(oid);
if (obc)
obc->obs.exists = false;
++v.version;
++m;
}
break;
default:
ceph_abort();
}
}
recovery_state.update_stats(
[](auto &history, auto &stats) {
stats.stats_invalid = true;
return false;
});
submit_log_entries(
log_entries,
std::move(manager),
std::optional<std::function<void(void)> >(
[this, oids, num_unfound, on_finish]() {
if (recovery_state.perform_deletes_during_peering()) {
for (auto oid : oids) {
// clear old locations - merge_new_log_entries will have
// handled rebuilding missing_loc for each of these
// objects if we have the RECOVERY_DELETES flag
recovery_state.object_recovered(oid, object_stat_sum_t());
}
}
if (is_recovery_unfound()) {
queue_peering_event(
PGPeeringEventRef(
std::make_shared<PGPeeringEvent>(
get_osdmap_epoch(),
get_osdmap_epoch(),
PeeringState::DoRecovery())));
} else if (is_backfill_unfound()) {
queue_peering_event(
PGPeeringEventRef(
std::make_shared<PGPeeringEvent>(
get_osdmap_epoch(),
get_osdmap_epoch(),
PeeringState::RequestBackfill())));
} else {
queue_recovery();
}
stringstream ss;
ss << "pg has " << num_unfound
<< " objects unfound and apparently lost marking";
string rs = ss.str();
dout(0) << "do_command r=" << 0 << " " << rs << dendl;
osd->clog->info() << rs;
bufferlist empty;
on_finish(0, rs, empty);
}),
OpRequestRef());
}
void PrimaryLogPG::_split_into(pg_t child_pgid, PG *child, unsigned split_bits)
{
ceph_assert(repop_queue.empty());
}
/*
* pg status change notification
*/
void PrimaryLogPG::apply_and_flush_repops(bool requeue)
{
list<OpRequestRef> rq;
// apply all repops
while (!repop_queue.empty()) {
RepGather *repop = repop_queue.front();
repop_queue.pop_front();
dout(10) << " canceling repop tid " << repop->rep_tid << dendl;
repop->rep_aborted = true;
repop->on_committed.clear();
repop->on_success.clear();
if (requeue) {
if (repop->op) {
dout(10) << " requeuing " << *repop->op->get_req() << dendl;
rq.push_back(repop->op);
repop->op = OpRequestRef();
}
// also requeue any dups, interleaved into position
auto p = waiting_for_ondisk.find(repop->v);
if (p != waiting_for_ondisk.end()) {
dout(10) << " also requeuing ondisk waiters " << p->second << dendl;
for (auto& i : p->second) {
rq.push_back(std::get<0>(i));
}
waiting_for_ondisk.erase(p);
}
}
remove_repop(repop);
}
ceph_assert(repop_queue.empty());
if (requeue) {
requeue_ops(rq);
if (!waiting_for_ondisk.empty()) {
for (auto& i : waiting_for_ondisk) {
for (auto& j : i.second) {
derr << __func__ << ": op " << *(std::get<0>(j)->get_req())
<< " waiting on " << i.first << dendl;
}
}
ceph_assert(waiting_for_ondisk.empty());
}
}
waiting_for_ondisk.clear();
}
void PrimaryLogPG::on_flushed()
{
requeue_ops(waiting_for_flush);
if (!is_peered() || !is_primary()) {
pair<hobject_t, ObjectContextRef> i;
while (object_contexts.get_next(i.first, &i)) {
derr << __func__ << ": object " << i.first << " obc still alive" << dendl;
}
ceph_assert(object_contexts.empty());
}
}
void PrimaryLogPG::on_removal(ObjectStore::Transaction &t)
{
dout(10) << __func__ << dendl;
on_shutdown();
t.register_on_commit(new C_DeleteMore(this, get_osdmap_epoch()));
}
void PrimaryLogPG::clear_async_reads()
{
dout(10) << __func__ << dendl;
for(auto& i : in_progress_async_reads) {
dout(10) << "clear ctx: "
<< "OpRequestRef " << i.first
<< " OpContext " << i.second
<< dendl;
close_op_ctx(i.second);
}
}
void PrimaryLogPG::clear_cache()
{
object_contexts.clear();
}
void PrimaryLogPG::on_shutdown()
{
dout(10) << __func__ << dendl;
if (recovery_queued) {
recovery_queued = false;
osd->clear_queued_recovery(this);
}
m_scrubber->scrub_clear_state();
m_scrubber->rm_from_osd_scrubbing();
vector<ceph_tid_t> tids;
cancel_copy_ops(false, &tids);
cancel_flush_ops(false, &tids);
cancel_proxy_ops(false, &tids);
cancel_manifest_ops(false, &tids);
cancel_cls_gather_ops(false, &tids);
osd->objecter->op_cancel(tids, -ECANCELED);
apply_and_flush_repops(false);
cancel_log_updates();
// we must remove PGRefs, so do this this prior to release_backoffs() callers
clear_backoffs();
// clean up snap trim references
snap_trimmer_machine.process_event(Reset());
pgbackend->on_change();
context_registry_on_change();
object_contexts.clear();
clear_async_reads();
osd->remote_reserver.cancel_reservation(info.pgid);
osd->local_reserver.cancel_reservation(info.pgid);
clear_primary_state();
cancel_recovery();
if (is_primary()) {
osd->clear_ready_to_merge(this);
}
}
void PrimaryLogPG::on_activate_complete()
{
check_local();
// waiters
if (!recovery_state.needs_flush()) {
requeue_ops(waiting_for_peered);
} else if (!waiting_for_peered.empty()) {
dout(10) << __func__ << " flushes in progress, moving "
<< waiting_for_peered.size()
<< " items to waiting_for_flush"
<< dendl;
ceph_assert(waiting_for_flush.empty());
waiting_for_flush.swap(waiting_for_peered);
}
// all clean?
if (needs_recovery()) {
dout(10) << "activate not all replicas are up-to-date, queueing recovery" << dendl;
queue_peering_event(
PGPeeringEventRef(
std::make_shared<PGPeeringEvent>(
get_osdmap_epoch(),
get_osdmap_epoch(),
PeeringState::DoRecovery())));
} else if (needs_backfill()) {
dout(10) << "activate queueing backfill" << dendl;
queue_peering_event(
PGPeeringEventRef(
std::make_shared<PGPeeringEvent>(
get_osdmap_epoch(),
get_osdmap_epoch(),
PeeringState::RequestBackfill())));
} else {
dout(10) << "activate all replicas clean, no recovery" << dendl;
queue_peering_event(
PGPeeringEventRef(
std::make_shared<PGPeeringEvent>(
get_osdmap_epoch(),
get_osdmap_epoch(),
PeeringState::AllReplicasRecovered())));
}
publish_stats_to_osd();
if (get_backfill_targets().size()) {
last_backfill_started = recovery_state.earliest_backfill();
new_backfill = true;
ceph_assert(!last_backfill_started.is_max());
dout(5) << __func__ << ": bft=" << get_backfill_targets()
<< " from " << last_backfill_started << dendl;
for (set<pg_shard_t>::const_iterator i = get_backfill_targets().begin();
i != get_backfill_targets().end();
++i) {
dout(5) << "target shard " << *i
<< " from " << recovery_state.get_peer_info(*i).last_backfill
<< dendl;
}
}
hit_set_setup();
agent_setup();
}
void PrimaryLogPG::on_change(ObjectStore::Transaction &t)
{
dout(10) << __func__ << dendl;
if (hit_set && hit_set->insert_count() == 0) {
dout(20) << " discarding empty hit_set" << dendl;
hit_set_clear();
}
if (recovery_queued) {
recovery_queued = false;
osd->clear_queued_recovery(this);
}
// requeue everything in the reverse order they should be
// reexamined.
requeue_ops(waiting_for_peered);
requeue_ops(waiting_for_flush);
requeue_ops(waiting_for_active);
requeue_ops(waiting_for_readable);
vector<ceph_tid_t> tids;
cancel_copy_ops(is_primary(), &tids);
cancel_flush_ops(is_primary(), &tids);
cancel_proxy_ops(is_primary(), &tids);
cancel_manifest_ops(is_primary(), &tids);
cancel_cls_gather_ops(is_primary(), &tids);
osd->objecter->op_cancel(tids, -ECANCELED);
// requeue object waiters
for (auto& p : waiting_for_unreadable_object) {
release_backoffs(p.first);
}
if (is_primary()) {
requeue_object_waiters(waiting_for_unreadable_object);
} else {
waiting_for_unreadable_object.clear();
}
for (map<hobject_t,list<OpRequestRef>>::iterator p = waiting_for_degraded_object.begin();
p != waiting_for_degraded_object.end();
waiting_for_degraded_object.erase(p++)) {
release_backoffs(p->first);
if (is_primary())
requeue_ops(p->second);
else
p->second.clear();
finish_degraded_object(p->first);
}
// requeues waiting_for_scrub
m_scrubber->scrub_clear_state();
for (auto p = waiting_for_blocked_object.begin();
p != waiting_for_blocked_object.end();
waiting_for_blocked_object.erase(p++)) {
if (is_primary())
requeue_ops(p->second);
else
p->second.clear();
}
for (auto i = callbacks_for_degraded_object.begin();
i != callbacks_for_degraded_object.end();
) {
finish_degraded_object((i++)->first);
}
ceph_assert(callbacks_for_degraded_object.empty());
if (is_primary()) {
requeue_ops(waiting_for_cache_not_full);
} else {
waiting_for_cache_not_full.clear();
}
objects_blocked_on_cache_full.clear();
for (list<pair<OpRequestRef, OpContext*> >::iterator i =
in_progress_async_reads.begin();
i != in_progress_async_reads.end();
in_progress_async_reads.erase(i++)) {
close_op_ctx(i->second);
if (is_primary())
requeue_op(i->first);
}
// this will requeue ops we were working on but didn't finish, and
// any dups
apply_and_flush_repops(is_primary());
cancel_log_updates();
// do this *after* apply_and_flush_repops so that we catch any newly
// registered watches.
context_registry_on_change();
pgbackend->on_change_cleanup(&t);
m_scrubber->cleanup_store(&t);
pgbackend->on_change();
// clear snap_trimmer state
snap_trimmer_machine.process_event(Reset());
debug_op_order.clear();
unstable_stats.clear();
// we don't want to cache object_contexts through the interval change
// NOTE: we actually assert that all currently live references are dead
// by the time the flush for the next interval completes.
object_contexts.clear();
// should have been cleared above by finishing all of the degraded objects
ceph_assert(objects_blocked_on_degraded_snap.empty());
}
void PrimaryLogPG::plpg_on_role_change()
{
dout(10) << __func__ << dendl;
if (get_role() != 0 && hit_set) {
dout(10) << " clearing hit set" << dendl;
hit_set_clear();
}
}
void PrimaryLogPG::plpg_on_pool_change()
{
dout(10) << __func__ << dendl;
// requeue cache full waiters just in case the cache_mode is
// changing away from writeback mode. note that if we are not
// active the normal requeuing machinery is sufficient (and properly
// ordered).
if (is_active() &&
pool.info.cache_mode != pg_pool_t::CACHEMODE_WRITEBACK &&
!waiting_for_cache_not_full.empty()) {
dout(10) << __func__ << " requeuing full waiters (not in writeback) "
<< dendl;
requeue_ops(waiting_for_cache_not_full);
objects_blocked_on_cache_full.clear();
}
hit_set_setup();
agent_setup();
}
// clear state. called on recovery completion AND cancellation.
void PrimaryLogPG::_clear_recovery_state()
{
#ifdef DEBUG_RECOVERY_OIDS
recovering_oids.clear();
#endif
dout(15) << __func__ << " flags: " << m_planned_scrub << dendl;
last_backfill_started = hobject_t();
set<hobject_t>::iterator i = backfills_in_flight.begin();
while (i != backfills_in_flight.end()) {
backfills_in_flight.erase(i++);
}
list<OpRequestRef> blocked_ops;
for (map<hobject_t, ObjectContextRef>::iterator i = recovering.begin();
i != recovering.end();
recovering.erase(i++)) {
if (i->second) {
i->second->drop_recovery_read(&blocked_ops);
requeue_ops(blocked_ops);
}
}
ceph_assert(backfills_in_flight.empty());
pending_backfill_updates.clear();
ceph_assert(recovering.empty());
pgbackend->clear_recovery_state();
}
void PrimaryLogPG::cancel_pull(const hobject_t &soid)
{
dout(20) << __func__ << ": " << soid << dendl;
ceph_assert(recovering.count(soid));
ObjectContextRef obc = recovering[soid];
if (obc) {
list<OpRequestRef> blocked_ops;
obc->drop_recovery_read(&blocked_ops);
requeue_ops(blocked_ops);
}
recovering.erase(soid);
finish_recovery_op(soid);
release_backoffs(soid);
if (waiting_for_degraded_object.count(soid)) {
dout(20) << " kicking degraded waiters on " << soid << dendl;
requeue_ops(waiting_for_degraded_object[soid]);
waiting_for_degraded_object.erase(soid);
}
if (waiting_for_unreadable_object.count(soid)) {
dout(20) << " kicking unreadable waiters on " << soid << dendl;
requeue_ops(waiting_for_unreadable_object[soid]);
waiting_for_unreadable_object.erase(soid);
}
if (is_missing_object(soid))
recovery_state.set_last_requested(0);
finish_degraded_object(soid);
}
void PrimaryLogPG::check_recovery_sources(const OSDMapRef& osdmap)
{
pgbackend->check_recovery_sources(osdmap);
}
bool PrimaryLogPG::start_recovery_ops(
uint64_t max,
ThreadPool::TPHandle &handle,
uint64_t *ops_started)
{
uint64_t& started = *ops_started;
started = 0;
bool work_in_progress = false;
bool recovery_started = false;
ceph_assert(is_primary());
ceph_assert(is_peered());
ceph_assert(!recovery_state.is_deleting());
ceph_assert(recovery_queued);
recovery_queued = false;
if (!state_test(PG_STATE_RECOVERING) &&
!state_test(PG_STATE_BACKFILLING)) {
/* TODO: I think this case is broken and will make do_recovery()
* unhappy since we're returning false */
dout(10) << "recovery raced and were queued twice, ignoring!" << dendl;
return have_unfound();
}
const auto &missing = recovery_state.get_pg_log().get_missing();
uint64_t num_unfound = get_num_unfound();
if (!recovery_state.have_missing()) {
recovery_state.local_recovery_complete();
}
if (!missing.have_missing() || // Primary does not have missing
// or all of the missing objects are unfound.
recovery_state.all_missing_unfound()) {
// Recover the replicas.
started = recover_replicas(max, handle, &recovery_started);
}
if (!started) {
// We still have missing objects that we should grab from replicas.
started += recover_primary(max, handle);
}
if (!started && num_unfound != get_num_unfound()) {
// second chance to recovery replicas
started = recover_replicas(max, handle, &recovery_started);
}
if (started || recovery_started)
work_in_progress = true;
bool deferred_backfill = false;
if (recovering.empty() &&
state_test(PG_STATE_BACKFILLING) &&
!get_backfill_targets().empty() && started < max &&
missing.num_missing() == 0 &&
waiting_on_backfill.empty()) {
if (get_osdmap()->test_flag(CEPH_OSDMAP_NOBACKFILL)) {
dout(10) << "deferring backfill due to NOBACKFILL" << dendl;
deferred_backfill = true;
} else if (get_osdmap()->test_flag(CEPH_OSDMAP_NOREBALANCE) &&
!is_degraded()) {
dout(10) << "deferring backfill due to NOREBALANCE" << dendl;
deferred_backfill = true;
} else if (!recovery_state.is_backfill_reserved()) {
/* DNMNOTE I think this branch is dead */
dout(10) << "deferring backfill due to !backfill_reserved" << dendl;
if (!backfill_reserving) {
dout(10) << "queueing RequestBackfill" << dendl;
backfill_reserving = true;
queue_peering_event(
PGPeeringEventRef(
std::make_shared<PGPeeringEvent>(
get_osdmap_epoch(),
get_osdmap_epoch(),
PeeringState::RequestBackfill())));
}
deferred_backfill = true;
} else {
started += recover_backfill(max - started, handle, &work_in_progress);
}
}
dout(10) << " started " << started << dendl;
osd->logger->inc(l_osd_rop, started);
if (!recovering.empty() ||
work_in_progress || recovery_ops_active > 0 || deferred_backfill)
return !work_in_progress && have_unfound();
ceph_assert(recovering.empty());
ceph_assert(recovery_ops_active == 0);
dout(10) << __func__ << " needs_recovery: "
<< recovery_state.get_missing_loc().get_needs_recovery()
<< dendl;
dout(10) << __func__ << " missing_loc: "
<< recovery_state.get_missing_loc().get_missing_locs()
<< dendl;
int unfound = get_num_unfound();
if (unfound) {
dout(10) << " still have " << unfound << " unfound" << dendl;
return true;
}
if (missing.num_missing() > 0) {
// this shouldn't happen!
osd->clog->error() << info.pgid << " Unexpected Error: recovery ending with "
<< missing.num_missing() << ": " << missing.get_items();
return false;
}
if (needs_recovery()) {
// this shouldn't happen!
// We already checked num_missing() so we must have missing replicas
osd->clog->error() << info.pgid
<< " Unexpected Error: recovery ending with missing replicas";
return false;
}
if (state_test(PG_STATE_RECOVERING)) {
state_clear(PG_STATE_RECOVERING);
state_clear(PG_STATE_FORCED_RECOVERY);
if (needs_backfill()) {
dout(10) << "recovery done, queuing backfill" << dendl;
queue_peering_event(
PGPeeringEventRef(
std::make_shared<PGPeeringEvent>(
get_osdmap_epoch(),
get_osdmap_epoch(),
PeeringState::RequestBackfill())));
} else {
dout(10) << "recovery done, no backfill" << dendl;
state_clear(PG_STATE_FORCED_BACKFILL);
queue_peering_event(
PGPeeringEventRef(
std::make_shared<PGPeeringEvent>(
get_osdmap_epoch(),
get_osdmap_epoch(),
PeeringState::AllReplicasRecovered())));
}
} else { // backfilling
state_clear(PG_STATE_BACKFILLING);
state_clear(PG_STATE_FORCED_BACKFILL);
state_clear(PG_STATE_FORCED_RECOVERY);
dout(10) << "recovery done, backfill done" << dendl;
queue_peering_event(
PGPeeringEventRef(
std::make_shared<PGPeeringEvent>(
get_osdmap_epoch(),
get_osdmap_epoch(),
PeeringState::Backfilled())));
}
return false;
}
/**
* do one recovery op.
* return true if done, false if nothing left to do.
*/
uint64_t PrimaryLogPG::recover_primary(uint64_t max, ThreadPool::TPHandle &handle)
{
ceph_assert(is_primary());
const auto &missing = recovery_state.get_pg_log().get_missing();
dout(10) << __func__ << " recovering " << recovering.size()
<< " in pg,"
<< " missing " << missing << dendl;
dout(25) << __func__ << " " << missing.get_items() << dendl;
// look at log!
pg_log_entry_t *latest = 0;
unsigned started = 0;
int skipped = 0;
PGBackend::RecoveryHandle *h = pgbackend->open_recovery_op();
map<version_t, hobject_t>::const_iterator p =
missing.get_rmissing().lower_bound(recovery_state.get_pg_log().get_log().last_requested);
while (p != missing.get_rmissing().end()) {
handle.reset_tp_timeout();
hobject_t soid;
version_t v = p->first;
auto it_objects = recovery_state.get_pg_log().get_log().objects.find(p->second);
if (it_objects != recovery_state.get_pg_log().get_log().objects.end()) {
latest = it_objects->second;
ceph_assert(latest->is_update() || latest->is_delete());
soid = latest->soid;
} else {
latest = 0;
soid = p->second;
}
const pg_missing_item& item = missing.get_items().find(p->second)->second;
++p;
hobject_t head = soid.get_head();
eversion_t need = item.need;
dout(10) << __func__ << " "
<< soid << " " << item.need
<< (missing.is_missing(soid) ? " (missing)":"")
<< (missing.is_missing(head) ? " (missing head)":"")
<< (recovering.count(soid) ? " (recovering)":"")
<< (recovering.count(head) ? " (recovering head)":"")
<< dendl;
if (latest) {
switch (latest->op) {
case pg_log_entry_t::CLONE:
/*
* Handling for this special case removed for now, until we
* can correctly construct an accurate SnapSet from the old
* one.
*/
break;
case pg_log_entry_t::LOST_REVERT:
{
if (item.have == latest->reverting_to) {
ObjectContextRef obc = get_object_context(soid, true);
if (obc->obs.oi.version == latest->version) {
// I'm already reverting
dout(10) << " already reverting " << soid << dendl;
} else {
dout(10) << " reverting " << soid << " to " << latest->prior_version << dendl;
obc->obs.oi.version = latest->version;
ObjectStore::Transaction t;
bufferlist b2;
obc->obs.oi.encode(
b2,
get_osdmap()->get_features(CEPH_ENTITY_TYPE_OSD, nullptr));
ceph_assert(!pool.info.require_rollback());
t.setattr(coll, ghobject_t(soid), OI_ATTR, b2);
recovery_state.recover_got(
soid,
latest->version,
false,
t);
++active_pushes;
t.register_on_applied(new C_OSD_AppliedRecoveredObject(this, obc));
t.register_on_commit(new C_OSD_CommittedPushedObject(
this,
get_osdmap_epoch(),
info.last_complete));
osd->store->queue_transaction(ch, std::move(t));
continue;
}
} else {
/*
* Pull the old version of the object. Update missing_loc here to have the location
* of the version we want.
*
* This doesn't use the usual missing_loc paths, but that's okay:
* - if we have it locally, we hit the case above, and go from there.
* - if we don't, we always pass through this case during recovery and set up the location
* properly.
* - this way we don't need to mangle the missing code to be general about needing an old
* version...
*/
eversion_t alternate_need = latest->reverting_to;
dout(10) << " need to pull prior_version " << alternate_need << " for revert " << item << dendl;
set<pg_shard_t> good_peers;
for (auto p = recovery_state.get_peer_missing().begin();
p != recovery_state.get_peer_missing().end();
++p) {
if (p->second.is_missing(soid, need) &&
p->second.get_items().at(soid).have == alternate_need) {
good_peers.insert(p->first);
}
}
recovery_state.set_revert_with_targets(
soid,
good_peers);
dout(10) << " will pull " << alternate_need << " or " << need
<< " from one of "
<< recovery_state.get_missing_loc().get_locations(soid)
<< dendl;
}
}
break;
}
}
if (!recovering.count(soid)) {
if (recovering.count(head)) {
++skipped;
} else {
int r = recover_missing(
soid, need, recovery_state.get_recovery_op_priority(), h);
switch (r) {
case PULL_YES:
++started;
break;
case PULL_HEAD:
++started;
case PULL_NONE:
++skipped;
break;
default:
ceph_abort();
}
if (started >= max)
break;
}
}
// only advance last_requested if we haven't skipped anything
if (!skipped)
recovery_state.set_last_requested(v);
}
pgbackend->run_recovery_op(h, recovery_state.get_recovery_op_priority());
return started;
}
bool PrimaryLogPG::primary_error(
const hobject_t& soid, eversion_t v)
{
recovery_state.force_object_missing(pg_whoami, soid, v);
bool uhoh = recovery_state.get_missing_loc().is_unfound(soid);
if (uhoh)
osd->clog->error() << info.pgid << " missing primary copy of "
<< soid << ", unfound";
else
osd->clog->error() << info.pgid << " missing primary copy of "
<< soid
<< ", will try copies on "
<< recovery_state.get_missing_loc().get_locations(soid);
return uhoh;
}
int PrimaryLogPG::prep_object_replica_deletes(
const hobject_t& soid, eversion_t v,
PGBackend::RecoveryHandle *h,
bool *work_started)
{
ceph_assert(is_primary());
dout(10) << __func__ << ": on " << soid << dendl;
ObjectContextRef obc = get_object_context(soid, false);
if (obc) {
if (!obc->get_recovery_read()) {
dout(20) << "replica delete delayed on " << soid
<< "; could not get rw_manager lock" << dendl;
*work_started = true;
return 0;
} else {
dout(20) << "replica delete got recovery read lock on " << soid
<< dendl;
}
}
start_recovery_op(soid);
ceph_assert(!recovering.count(soid));
if (!obc)
recovering.insert(make_pair(soid, ObjectContextRef()));
else
recovering.insert(make_pair(soid, obc));
pgbackend->recover_delete_object(soid, v, h);
return 1;
}
int PrimaryLogPG::prep_object_replica_pushes(
const hobject_t& soid, eversion_t v,
PGBackend::RecoveryHandle *h,
bool *work_started)
{
ceph_assert(is_primary());
dout(10) << __func__ << ": on " << soid << dendl;
if (soid.snap && soid.snap < CEPH_NOSNAP) {
// do we have the head and/or snapdir?
hobject_t head = soid.get_head();
if (recovery_state.get_pg_log().get_missing().is_missing(head)) {
if (recovering.count(head)) {
dout(10) << " missing but already recovering head " << head << dendl;
return 0;
} else {
int r = recover_missing(
head, recovery_state.get_pg_log().get_missing().get_items().find(head)->second.need,
recovery_state.get_recovery_op_priority(), h);
if (r != PULL_NONE)
return 1;
return 0;
}
}
}
// NOTE: we know we will get a valid oloc off of disk here.
ObjectContextRef obc = get_object_context(soid, false);
if (!obc) {
primary_error(soid, v);
return 0;
}
if (!obc->get_recovery_read()) {
dout(20) << "recovery delayed on " << soid
<< "; could not get rw_manager lock" << dendl;
*work_started = true;
return 0;
} else {
dout(20) << "recovery got recovery read lock on " << soid
<< dendl;
}
start_recovery_op(soid);
ceph_assert(!recovering.count(soid));
recovering.insert(make_pair(soid, obc));
int r = pgbackend->recover_object(
soid,
v,
ObjectContextRef(),
obc, // has snapset context
h);
if (r < 0) {
dout(0) << __func__ << " Error " << r << " on oid " << soid << dendl;
on_failed_pull({ pg_whoami }, soid, v);
return 0;
}
return 1;
}
uint64_t PrimaryLogPG::recover_replicas(uint64_t max, ThreadPool::TPHandle &handle,
bool *work_started)
{
dout(10) << __func__ << "(" << max << ")" << dendl;
uint64_t started = 0;
PGBackend::RecoveryHandle *h = pgbackend->open_recovery_op();
// this is FAR from an optimal recovery order. pretty lame, really.
ceph_assert(!get_acting_recovery_backfill().empty());
// choose replicas to recover, replica has the shortest missing list first
// so we can bring it back to normal ASAP
std::vector<std::pair<unsigned int, pg_shard_t>> replicas_by_num_missing,
async_by_num_missing;
replicas_by_num_missing.reserve(get_acting_recovery_backfill().size() - 1);
for (auto &p: get_acting_recovery_backfill()) {
if (p == get_primary()) {
continue;
}
auto pm = recovery_state.get_peer_missing().find(p);
ceph_assert(pm != recovery_state.get_peer_missing().end());
auto nm = pm->second.num_missing();
if (nm != 0) {
if (is_async_recovery_target(p)) {
async_by_num_missing.push_back(make_pair(nm, p));
} else {
replicas_by_num_missing.push_back(make_pair(nm, p));
}
}
}
// sort by number of missing objects, in ascending order.
auto func = [](const std::pair<unsigned int, pg_shard_t> &lhs,
const std::pair<unsigned int, pg_shard_t> &rhs) {
return lhs.first < rhs.first;
};
// acting goes first
std::sort(replicas_by_num_missing.begin(), replicas_by_num_missing.end(), func);
// then async_recovery_targets
std::sort(async_by_num_missing.begin(), async_by_num_missing.end(), func);
replicas_by_num_missing.insert(replicas_by_num_missing.end(),
async_by_num_missing.begin(), async_by_num_missing.end());
for (auto &replica: replicas_by_num_missing) {
pg_shard_t &peer = replica.second;
ceph_assert(peer != get_primary());
auto pm = recovery_state.get_peer_missing().find(peer);
ceph_assert(pm != recovery_state.get_peer_missing().end());
size_t m_sz = pm->second.num_missing();
dout(10) << " peer osd." << peer << " missing " << m_sz << " objects." << dendl;
dout(20) << " peer osd." << peer << " missing " << pm->second.get_items() << dendl;
// oldest first!
const pg_missing_t &m(pm->second);
for (map<version_t, hobject_t>::const_iterator p = m.get_rmissing().begin();
p != m.get_rmissing().end() && started < max;
++p) {
handle.reset_tp_timeout();
const hobject_t soid(p->second);
if (recovery_state.get_missing_loc().is_unfound(soid)) {
dout(10) << __func__ << ": " << soid << " still unfound" << dendl;
continue;
}
const pg_info_t &pi = recovery_state.get_peer_info(peer);
if (soid > pi.last_backfill) {
if (!recovering.count(soid)) {
derr << __func__ << ": object " << soid << " last_backfill "
<< pi.last_backfill << dendl;
derr << __func__ << ": object added to missing set for backfill, but "
<< "is not in recovering, error!" << dendl;
ceph_abort();
}
continue;
}
if (recovering.count(soid)) {
dout(10) << __func__ << ": already recovering " << soid << dendl;
continue;
}
if (recovery_state.get_missing_loc().is_deleted(soid)) {
dout(10) << __func__ << ": " << soid << " is a delete, removing" << dendl;
map<hobject_t,pg_missing_item>::const_iterator r = m.get_items().find(soid);
started += prep_object_replica_deletes(soid, r->second.need, h, work_started);
continue;
}
if (soid.is_snap() &&
recovery_state.get_pg_log().get_missing().is_missing(
soid.get_head())) {
dout(10) << __func__ << ": " << soid.get_head()
<< " still missing on primary" << dendl;
continue;
}
if (recovery_state.get_pg_log().get_missing().is_missing(soid)) {
dout(10) << __func__ << ": " << soid << " still missing on primary" << dendl;
continue;
}
dout(10) << __func__ << ": recover_object_replicas(" << soid << ")" << dendl;
map<hobject_t,pg_missing_item>::const_iterator r = m.get_items().find(soid);
started += prep_object_replica_pushes(soid, r->second.need, h, work_started);
}
}
pgbackend->run_recovery_op(h, recovery_state.get_recovery_op_priority());
return started;
}
hobject_t PrimaryLogPG::earliest_peer_backfill() const
{
hobject_t e = hobject_t::get_max();
for (const pg_shard_t& peer : get_backfill_targets()) {
const auto iter = peer_backfill_info.find(peer);
ceph_assert(iter != peer_backfill_info.end());
e = std::min(e, iter->second.begin);
}
return e;
}
bool PrimaryLogPG::all_peer_done() const
{
// Primary hasn't got any more objects
ceph_assert(backfill_info.empty());
for (const pg_shard_t& bt : get_backfill_targets()) {
const auto piter = peer_backfill_info.find(bt);
ceph_assert(piter != peer_backfill_info.end());
const BackfillInterval& pbi = piter->second;
// See if peer has more to process
if (!pbi.extends_to_end() || !pbi.empty())
return false;
}
return true;
}
/**
* recover_backfill
*
* Invariants:
*
* backfilled: fully pushed to replica or present in replica's missing set (both
* our copy and theirs).
*
* All objects on a backfill_target in
* [MIN,peer_backfill_info[backfill_target].begin) are valid; logically-removed
* objects have been actually deleted and all logically-valid objects are replicated.
* There may be PG objects in this interval yet to be backfilled.
*
* All objects in PG in [MIN,backfill_info.begin) have been backfilled to all
* backfill_targets. There may be objects on backfill_target(s) yet to be deleted.
*
* For a backfill target, all objects < std::min(peer_backfill_info[target].begin,
* backfill_info.begin) in PG are backfilled. No deleted objects in this
* interval remain on the backfill target.
*
* For a backfill target, all objects <= peer_info[target].last_backfill
* have been backfilled to target
*
* There *MAY* be missing/outdated objects between last_backfill_started and
* std::min(peer_backfill_info[*].begin, backfill_info.begin) in the event that client
* io created objects since the last scan. For this reason, we call
* update_range() again before continuing backfill.
*/
uint64_t PrimaryLogPG::recover_backfill(
uint64_t max,
ThreadPool::TPHandle &handle, bool *work_started)
{
dout(10) << __func__ << " (" << max << ")"
<< " bft=" << get_backfill_targets()
<< " last_backfill_started " << last_backfill_started
<< (new_backfill ? " new_backfill":"")
<< dendl;
ceph_assert(!get_backfill_targets().empty());
// Initialize from prior backfill state
if (new_backfill) {
// on_activate() was called prior to getting here
ceph_assert(last_backfill_started == recovery_state.earliest_backfill());
new_backfill = false;
// initialize BackfillIntervals
for (set<pg_shard_t>::const_iterator i = get_backfill_targets().begin();
i != get_backfill_targets().end();
++i) {
peer_backfill_info[*i].reset(
recovery_state.get_peer_info(*i).last_backfill);
}
backfill_info.reset(last_backfill_started);
backfills_in_flight.clear();
pending_backfill_updates.clear();
}
for (set<pg_shard_t>::const_iterator i = get_backfill_targets().begin();
i != get_backfill_targets().end();
++i) {
dout(10) << "peer osd." << *i
<< " info " << recovery_state.get_peer_info(*i)
<< " interval " << peer_backfill_info[*i].begin
<< "-" << peer_backfill_info[*i].end
<< " " << peer_backfill_info[*i].objects.size() << " objects"
<< dendl;
}
// update our local interval to cope with recent changes
backfill_info.begin = last_backfill_started;
update_range(&backfill_info, handle);
unsigned ops = 0;
vector<boost::tuple<hobject_t, eversion_t, pg_shard_t> > to_remove;
set<hobject_t> add_to_stat;
for (set<pg_shard_t>::const_iterator i = get_backfill_targets().begin();
i != get_backfill_targets().end();
++i) {
peer_backfill_info[*i].trim_to(
std::max(
recovery_state.get_peer_info(*i).last_backfill,
last_backfill_started));
}
backfill_info.trim_to(last_backfill_started);
PGBackend::RecoveryHandle *h = pgbackend->open_recovery_op();
while (ops < max) {
if (backfill_info.begin <= earliest_peer_backfill() &&
!backfill_info.extends_to_end() && backfill_info.empty()) {
hobject_t next = backfill_info.end;
backfill_info.reset(next);
backfill_info.end = hobject_t::get_max();
update_range(&backfill_info, handle);
backfill_info.trim();
}
dout(20) << " my backfill interval " << backfill_info << dendl;
bool sent_scan = false;
for (set<pg_shard_t>::const_iterator i = get_backfill_targets().begin();
i != get_backfill_targets().end();
++i) {
pg_shard_t bt = *i;
BackfillInterval& pbi = peer_backfill_info[bt];
dout(20) << " peer shard " << bt << " backfill " << pbi << dendl;
if (pbi.begin <= backfill_info.begin &&
!pbi.extends_to_end() && pbi.empty()) {
dout(10) << " scanning peer osd." << bt << " from " << pbi.end << dendl;
epoch_t e = get_osdmap_epoch();
MOSDPGScan *m = new MOSDPGScan(
MOSDPGScan::OP_SCAN_GET_DIGEST, pg_whoami, e, get_last_peering_reset(),
spg_t(info.pgid.pgid, bt.shard),
pbi.end, hobject_t());
if (cct->_conf->osd_op_queue == "mclock_scheduler") {
/* This guard preserves legacy WeightedPriorityQueue behavior for
* now, but should be removed after Reef */
m->set_priority(recovery_state.get_recovery_op_priority());
}
osd->send_message_osd_cluster(bt.osd, m, get_osdmap_epoch());
ceph_assert(waiting_on_backfill.find(bt) == waiting_on_backfill.end());
waiting_on_backfill.insert(bt);
sent_scan = true;
}
}
// Count simultaneous scans as a single op and let those complete
if (sent_scan) {
ops++;
start_recovery_op(hobject_t::get_max()); // XXX: was pbi.end
break;
}
if (backfill_info.empty() && all_peer_done()) {
dout(10) << " reached end for both local and all peers" << dendl;
break;
}
// Get object within set of peers to operate on and
// the set of targets for which that object applies.
hobject_t check = earliest_peer_backfill();
if (check < backfill_info.begin) {
set<pg_shard_t> check_targets;
for (set<pg_shard_t>::const_iterator i = get_backfill_targets().begin();
i != get_backfill_targets().end();
++i) {
pg_shard_t bt = *i;
BackfillInterval& pbi = peer_backfill_info[bt];
if (pbi.begin == check)
check_targets.insert(bt);
}
ceph_assert(!check_targets.empty());
dout(20) << " BACKFILL removing " << check
<< " from peers " << check_targets << dendl;
for (set<pg_shard_t>::iterator i = check_targets.begin();
i != check_targets.end();
++i) {
pg_shard_t bt = *i;
BackfillInterval& pbi = peer_backfill_info[bt];
ceph_assert(pbi.begin == check);
to_remove.push_back(boost::make_tuple(check, pbi.objects.begin()->second, bt));
pbi.pop_front();
}
last_backfill_started = check;
// Don't increment ops here because deletions
// are cheap and not replied to unlike real recovery_ops,
// and we can't increment ops without requeueing ourself
// for recovery.
} else {
eversion_t& obj_v = backfill_info.objects.begin()->second;
vector<pg_shard_t> need_ver_targs, missing_targs, keep_ver_targs, skip_targs;
for (set<pg_shard_t>::const_iterator i = get_backfill_targets().begin();
i != get_backfill_targets().end();
++i) {
pg_shard_t bt = *i;
BackfillInterval& pbi = peer_backfill_info[bt];
// Find all check peers that have the wrong version
if (check == backfill_info.begin && check == pbi.begin) {
if (pbi.objects.begin()->second != obj_v) {
need_ver_targs.push_back(bt);
} else {
keep_ver_targs.push_back(bt);
}
} else {
const pg_info_t& pinfo = recovery_state.get_peer_info(bt);
// Only include peers that we've caught up to their backfill line
// otherwise, they only appear to be missing this object
// because their pbi.begin > backfill_info.begin.
if (backfill_info.begin > pinfo.last_backfill)
missing_targs.push_back(bt);
else
skip_targs.push_back(bt);
}
}
if (!keep_ver_targs.empty()) {
// These peers have version obj_v
dout(20) << " BACKFILL keeping " << check
<< " with ver " << obj_v
<< " on peers " << keep_ver_targs << dendl;
//assert(!waiting_for_degraded_object.count(check));
}
if (!need_ver_targs.empty() || !missing_targs.empty()) {
ObjectContextRef obc = get_object_context(backfill_info.begin, false);
ceph_assert(obc);
if (obc->get_recovery_read()) {
if (!need_ver_targs.empty()) {
dout(20) << " BACKFILL replacing " << check
<< " with ver " << obj_v
<< " to peers " << need_ver_targs << dendl;
}
if (!missing_targs.empty()) {
dout(20) << " BACKFILL pushing " << backfill_info.begin
<< " with ver " << obj_v
<< " to peers " << missing_targs << dendl;
}
vector<pg_shard_t> all_push = need_ver_targs;
all_push.insert(all_push.end(), missing_targs.begin(), missing_targs.end());
handle.reset_tp_timeout();
int r = prep_backfill_object_push(backfill_info.begin, obj_v, obc, all_push, h);
if (r < 0) {
*work_started = true;
dout(0) << __func__ << " Error " << r << " trying to backfill " << backfill_info.begin << dendl;
break;
}
ops++;
} else {
*work_started = true;
dout(20) << "backfill blocking on " << backfill_info.begin
<< "; could not get rw_manager lock" << dendl;
break;
}
}
dout(20) << "need_ver_targs=" << need_ver_targs
<< " keep_ver_targs=" << keep_ver_targs << dendl;
dout(20) << "backfill_targets=" << get_backfill_targets()
<< " missing_targs=" << missing_targs
<< " skip_targs=" << skip_targs << dendl;
last_backfill_started = backfill_info.begin;
add_to_stat.insert(backfill_info.begin); // XXX: Only one for all pushes?
backfill_info.pop_front();
vector<pg_shard_t> check_targets = need_ver_targs;
check_targets.insert(check_targets.end(), keep_ver_targs.begin(), keep_ver_targs.end());
for (vector<pg_shard_t>::iterator i = check_targets.begin();
i != check_targets.end();
++i) {
pg_shard_t bt = *i;
BackfillInterval& pbi = peer_backfill_info[bt];
pbi.pop_front();
}
}
}
for (set<hobject_t>::iterator i = add_to_stat.begin();
i != add_to_stat.end();
++i) {
ObjectContextRef obc = get_object_context(*i, false);
ceph_assert(obc);
pg_stat_t stat;
add_object_context_to_pg_stat(obc, &stat);
pending_backfill_updates[*i] = stat;
}
map<pg_shard_t,MOSDPGBackfillRemove*> reqs;
for (unsigned i = 0; i < to_remove.size(); ++i) {
handle.reset_tp_timeout();
const hobject_t& oid = to_remove[i].get<0>();
eversion_t v = to_remove[i].get<1>();
pg_shard_t peer = to_remove[i].get<2>();
MOSDPGBackfillRemove *m;
auto it = reqs.find(peer);
if (it != reqs.end()) {
m = it->second;
} else {
m = reqs[peer] = new MOSDPGBackfillRemove(
spg_t(info.pgid.pgid, peer.shard),
get_osdmap_epoch());
if (cct->_conf->osd_op_queue == "mclock_scheduler") {
/* This guard preserves legacy WeightedPriorityQueue behavior for
* now, but should be removed after Reef */
m->set_priority(recovery_state.get_recovery_op_priority());
}
}
m->ls.push_back(make_pair(oid, v));
if (oid <= last_backfill_started)
pending_backfill_updates[oid]; // add empty stat!
}
for (auto p : reqs) {
osd->send_message_osd_cluster(p.first.osd, p.second,
get_osdmap_epoch());
}
pgbackend->run_recovery_op(h, recovery_state.get_recovery_op_priority());
hobject_t backfill_pos =
std::min(backfill_info.begin, earliest_peer_backfill());
dout(5) << "backfill_pos is " << backfill_pos << dendl;
for (set<hobject_t>::iterator i = backfills_in_flight.begin();
i != backfills_in_flight.end();
++i) {
dout(20) << *i << " is still in flight" << dendl;
}
hobject_t next_backfill_to_complete = backfills_in_flight.empty() ?
backfill_pos : *(backfills_in_flight.begin());
hobject_t new_last_backfill = recovery_state.earliest_backfill();
dout(10) << "starting new_last_backfill at " << new_last_backfill << dendl;
for (map<hobject_t, pg_stat_t>::iterator i =
pending_backfill_updates.begin();
i != pending_backfill_updates.end() &&
i->first < next_backfill_to_complete;
pending_backfill_updates.erase(i++)) {
dout(20) << " pending_backfill_update " << i->first << dendl;
ceph_assert(i->first > new_last_backfill);
// carried from a previous round – if we are here, then we had to
// be requeued (by e.g. on_global_recover()) and those operations
// are done.
recovery_state.update_complete_backfill_object_stats(
i->first,
i->second);
new_last_backfill = i->first;
}
dout(10) << "possible new_last_backfill at " << new_last_backfill << dendl;
ceph_assert(!pending_backfill_updates.empty() ||
new_last_backfill == last_backfill_started);
if (pending_backfill_updates.empty() &&
backfill_pos.is_max()) {
ceph_assert(backfills_in_flight.empty());
new_last_backfill = backfill_pos;
last_backfill_started = backfill_pos;
}
dout(10) << "final new_last_backfill at " << new_last_backfill << dendl;
// If new_last_backfill == MAX, then we will send OP_BACKFILL_FINISH to
// all the backfill targets. Otherwise, we will move last_backfill up on
// those targets need it and send OP_BACKFILL_PROGRESS to them.
for (set<pg_shard_t>::const_iterator i = get_backfill_targets().begin();
i != get_backfill_targets().end();
++i) {
pg_shard_t bt = *i;
const pg_info_t& pinfo = recovery_state.get_peer_info(bt);
if (new_last_backfill > pinfo.last_backfill) {
recovery_state.update_peer_last_backfill(bt, new_last_backfill);
epoch_t e = get_osdmap_epoch();
MOSDPGBackfill *m = NULL;
if (pinfo.last_backfill.is_max()) {
m = new MOSDPGBackfill(
MOSDPGBackfill::OP_BACKFILL_FINISH,
e,
get_last_peering_reset(),
spg_t(info.pgid.pgid, bt.shard));
// Use default priority here, must match sub_op priority
start_recovery_op(hobject_t::get_max());
} else {
m = new MOSDPGBackfill(
MOSDPGBackfill::OP_BACKFILL_PROGRESS,
e,
get_last_peering_reset(),
spg_t(info.pgid.pgid, bt.shard));
// Use default priority here, must match sub_op priority
}
m->last_backfill = pinfo.last_backfill;
m->stats = pinfo.stats;
if (cct->_conf->osd_op_queue == "mclock_scheduler") {
/* This guard preserves legacy WeightedPriorityQueue behavior for
* now, but should be removed after Reef */
m->set_priority(recovery_state.get_recovery_op_priority());
}
osd->send_message_osd_cluster(bt.osd, m, get_osdmap_epoch());
dout(10) << " peer " << bt
<< " num_objects now " << pinfo.stats.stats.sum.num_objects
<< " / " << info.stats.stats.sum.num_objects << dendl;
}
}
if (ops)
*work_started = true;
return ops;
}
int PrimaryLogPG::prep_backfill_object_push(
hobject_t oid, eversion_t v,
ObjectContextRef obc,
vector<pg_shard_t> peers,
PGBackend::RecoveryHandle *h)
{
dout(10) << __func__ << " " << oid << " v " << v << " to peers " << peers << dendl;
ceph_assert(!peers.empty());
backfills_in_flight.insert(oid);
recovery_state.prepare_backfill_for_missing(oid, v, peers);
ceph_assert(!recovering.count(oid));
start_recovery_op(oid);
recovering.insert(make_pair(oid, obc));
int r = pgbackend->recover_object(
oid,
v,
ObjectContextRef(),
obc,
h);
if (r < 0) {
dout(0) << __func__ << " Error " << r << " on oid " << oid << dendl;
on_failed_pull({ pg_whoami }, oid, v);
}
return r;
}
void PrimaryLogPG::update_range(
BackfillInterval *bi,
ThreadPool::TPHandle &handle)
{
int local_min = cct->_conf->osd_backfill_scan_min;
int local_max = cct->_conf->osd_backfill_scan_max;
if (bi->version < info.log_tail) {
dout(10) << __func__<< ": bi is old, rescanning local backfill_info"
<< dendl;
bi->version = info.last_update;
scan_range(local_min, local_max, bi, handle);
}
if (bi->version >= projected_last_update) {
dout(10) << __func__<< ": bi is current " << dendl;
ceph_assert(bi->version == projected_last_update);
} else if (bi->version >= info.log_tail) {
if (recovery_state.get_pg_log().get_log().empty() && projected_log.empty()) {
/* Because we don't move log_tail on split, the log might be
* empty even if log_tail != last_update. However, the only
* way to get here with an empty log is if log_tail is actually
* eversion_t(), because otherwise the entry which changed
* last_update since the last scan would have to be present.
*/
ceph_assert(bi->version == eversion_t());
return;
}
dout(10) << __func__<< ": bi is old, (" << bi->version
<< ") can be updated with log to projected_last_update "
<< projected_last_update << dendl;
auto func = [&](const pg_log_entry_t &e) {
dout(10) << __func__ << ": updating from version " << e.version
<< dendl;
const hobject_t &soid = e.soid;
if (soid >= bi->begin &&
soid < bi->end) {
if (e.is_update()) {
dout(10) << __func__ << ": " << e.soid << " updated to version "
<< e.version << dendl;
bi->objects.erase(e.soid);
bi->objects.insert(
make_pair(
e.soid,
e.version));
} else if (e.is_delete()) {
dout(10) << __func__ << ": " << e.soid << " removed" << dendl;
bi->objects.erase(e.soid);
}
}
};
dout(10) << "scanning pg log first" << dendl;
recovery_state.get_pg_log().get_log().scan_log_after(bi->version, func);
dout(10) << "scanning projected log" << dendl;
projected_log.scan_log_after(bi->version, func);
bi->version = projected_last_update;
} else {
ceph_abort_msg("scan_range should have raised bi->version past log_tail");
}
}
void PrimaryLogPG::scan_range(
int min, int max, BackfillInterval *bi,
ThreadPool::TPHandle &handle)
{
ceph_assert(is_locked());
dout(10) << "scan_range from " << bi->begin << dendl;
bi->clear_objects();
vector<hobject_t> ls;
ls.reserve(max);
int r = pgbackend->objects_list_partial(bi->begin, min, max, &ls, &bi->end);
ceph_assert(r >= 0);
dout(10) << " got " << ls.size() << " items, next " << bi->end << dendl;
dout(20) << ls << dendl;
for (vector<hobject_t>::iterator p = ls.begin(); p != ls.end(); ++p) {
handle.reset_tp_timeout();
ObjectContextRef obc;
if (is_primary())
obc = object_contexts.lookup(*p);
if (obc) {
if (!obc->obs.exists) {
/* If the object does not exist here, it must have been removed
* between the collection_list_partial and here. This can happen
* for the first item in the range, which is usually last_backfill.
*/
continue;
}
bi->objects[*p] = obc->obs.oi.version;
dout(20) << " " << *p << " " << obc->obs.oi.version << dendl;
} else {
bufferlist bl;
int r = pgbackend->objects_get_attr(*p, OI_ATTR, &bl);
/* If the object does not exist here, it must have been removed
* between the collection_list_partial and here. This can happen
* for the first item in the range, which is usually last_backfill.
*/
if (r == -ENOENT)
continue;
ceph_assert(r >= 0);
object_info_t oi(bl);
bi->objects[*p] = oi.version;
dout(20) << " " << *p << " " << oi.version << dendl;
}
}
}
/** check_local
*
* verifies that stray objects have been deleted
*/
void PrimaryLogPG::check_local()
{
dout(10) << __func__ << dendl;
ceph_assert(
info.last_update >=
recovery_state.get_pg_log().get_tail()); // otherwise we need some help!
if (!cct->_conf->osd_debug_verify_stray_on_activate)
return;
// just scan the log.
set<hobject_t> did;
for (list<pg_log_entry_t>::const_reverse_iterator p = recovery_state.get_pg_log().get_log().log.rbegin();
p != recovery_state.get_pg_log().get_log().log.rend();
++p) {
if (did.count(p->soid))
continue;
did.insert(p->soid);
if (p->is_delete() && !is_missing_object(p->soid)) {
dout(10) << " checking " << p->soid
<< " at " << p->version << dendl;
struct stat st;
int r = osd->store->stat(
ch,
ghobject_t(p->soid, ghobject_t::NO_GEN, pg_whoami.shard),
&st);
if (r != -ENOENT) {
derr << __func__ << " " << p->soid << " exists, but should have been "
<< "deleted" << dendl;
ceph_abort_msg("erroneously present object");
}
} else {
// ignore old(+missing) objects
}
}
}
// ===========================
// hit sets
hobject_t PrimaryLogPG::get_hit_set_current_object(utime_t stamp)
{
ostringstream ss;
ss << "hit_set_" << info.pgid.pgid << "_current_" << stamp;
hobject_t hoid(sobject_t(ss.str(), CEPH_NOSNAP), "",
info.pgid.ps(), info.pgid.pool(),
cct->_conf->osd_hit_set_namespace);
dout(20) << __func__ << " " << hoid << dendl;
return hoid;
}
hobject_t PrimaryLogPG::get_hit_set_archive_object(utime_t start,
utime_t end,
bool using_gmt)
{
ostringstream ss;
ss << "hit_set_" << info.pgid.pgid << "_archive_";
if (using_gmt) {
start.gmtime(ss, true /* legacy pre-octopus form */) << "_";
end.gmtime(ss, true /* legacy pre-octopus form */);
} else {
start.localtime(ss, true /* legacy pre-octopus form */) << "_";
end.localtime(ss, true /* legacy pre-octopus form */);
}
hobject_t hoid(sobject_t(ss.str(), CEPH_NOSNAP), "",
info.pgid.ps(), info.pgid.pool(),
cct->_conf->osd_hit_set_namespace);
dout(20) << __func__ << " " << hoid << dendl;
return hoid;
}
void PrimaryLogPG::hit_set_clear()
{
dout(20) << __func__ << dendl;
hit_set.reset();
hit_set_start_stamp = utime_t();
}
void PrimaryLogPG::hit_set_setup()
{
if (!is_active() ||
!is_primary()) {
hit_set_clear();
return;
}
if (is_active() && is_primary() &&
(!pool.info.hit_set_count ||
!pool.info.hit_set_period ||
pool.info.hit_set_params.get_type() == HitSet::TYPE_NONE)) {
hit_set_clear();
// only primary is allowed to remove all the hit set objects
hit_set_remove_all();
return;
}
// FIXME: discard any previous data for now
hit_set_create();
// include any writes we know about from the pg log. this doesn't
// capture reads, but it is better than nothing!
hit_set_apply_log();
}
void PrimaryLogPG::hit_set_remove_all()
{
// If any archives are degraded we skip this
for (auto p = info.hit_set.history.begin();
p != info.hit_set.history.end();
++p) {
hobject_t aoid = get_hit_set_archive_object(p->begin, p->end, p->using_gmt);
// Once we hit a degraded object just skip
if (is_degraded_or_backfilling_object(aoid))
return;
if (m_scrubber->write_blocked_by_scrub(aoid))
return;
}
if (!info.hit_set.history.empty()) {
auto p = info.hit_set.history.rbegin();
ceph_assert(p != info.hit_set.history.rend());
hobject_t oid = get_hit_set_archive_object(p->begin, p->end, p->using_gmt);
ceph_assert(!is_degraded_or_backfilling_object(oid));
ObjectContextRef obc = get_object_context(oid, false);
ceph_assert(obc);
OpContextUPtr ctx = simple_opc_create(obc);
ctx->at_version = get_next_version();
ctx->updated_hset_history = info.hit_set;
utime_t now = ceph_clock_now();
ctx->mtime = now;
hit_set_trim(ctx, 0);
simple_opc_submit(std::move(ctx));
}
recovery_state.update_hset(pg_hit_set_history_t());
if (agent_state) {
agent_state->discard_hit_sets();
}
}
void PrimaryLogPG::hit_set_create()
{
utime_t now = ceph_clock_now();
// make a copy of the params to modify
HitSet::Params params(pool.info.hit_set_params);
dout(20) << __func__ << " " << params << dendl;
if (pool.info.hit_set_params.get_type() == HitSet::TYPE_BLOOM) {
BloomHitSet::Params *p =
static_cast<BloomHitSet::Params*>(params.impl.get());
// convert false positive rate so it holds up across the full period
p->set_fpp(p->get_fpp() / pool.info.hit_set_count);
if (p->get_fpp() <= 0.0)
p->set_fpp(.01); // fpp cannot be zero!
// if we don't have specified size, estimate target size based on the
// previous bin!
if (p->target_size == 0 && hit_set) {
utime_t dur = now - hit_set_start_stamp;
unsigned unique = hit_set->approx_unique_insert_count();
dout(20) << __func__ << " previous set had approx " << unique
<< " unique items over " << dur << " seconds" << dendl;
p->target_size = (double)unique * (double)pool.info.hit_set_period
/ (double)dur;
}
if (p->target_size <
static_cast<uint64_t>(cct->_conf->osd_hit_set_min_size))
p->target_size = cct->_conf->osd_hit_set_min_size;
if (p->target_size
> static_cast<uint64_t>(cct->_conf->osd_hit_set_max_size))
p->target_size = cct->_conf->osd_hit_set_max_size;
p->seed = now.sec();
dout(10) << __func__ << " target_size " << p->target_size
<< " fpp " << p->get_fpp() << dendl;
}
hit_set.reset(new HitSet(params));
hit_set_start_stamp = now;
}
/**
* apply log entries to set
*
* this would only happen after peering, to at least capture writes
* during an interval that was potentially lost.
*/
bool PrimaryLogPG::hit_set_apply_log()
{
if (!hit_set)
return false;
eversion_t to = info.last_update;
eversion_t from = info.hit_set.current_last_update;
if (to <= from) {
dout(20) << __func__ << " no update" << dendl;
return false;
}
dout(20) << __func__ << " " << to << " .. " << info.last_update << dendl;
list<pg_log_entry_t>::const_reverse_iterator p =
recovery_state.get_pg_log().get_log().log.rbegin();
while (p != recovery_state.get_pg_log().get_log().log.rend() && p->version > to)
++p;
while (p != recovery_state.get_pg_log().get_log().log.rend() && p->version > from) {
hit_set->insert(p->soid);
++p;
}
return true;
}
void PrimaryLogPG::hit_set_persist()
{
dout(10) << __func__ << dendl;
bufferlist bl;
unsigned max = pool.info.hit_set_count;
utime_t now = ceph_clock_now();
hobject_t oid;
// If any archives are degraded we skip this persist request
// account for the additional entry being added below
for (auto p = info.hit_set.history.begin();
p != info.hit_set.history.end();
++p) {
hobject_t aoid = get_hit_set_archive_object(p->begin, p->end, p->using_gmt);
// Once we hit a degraded object just skip further trim
if (is_degraded_or_backfilling_object(aoid))
return;
if (m_scrubber->write_blocked_by_scrub(aoid))
return;
}
// If backfill is in progress and we could possibly overlap with the
// hit_set_* objects, back off. Since these all have
// hobject_t::hash set to pgid.ps(), and those sort first, we can
// look just at that. This is necessary because our transactions
// may include a modify of the new hit_set *and* a delete of the
// old one, and this may span the backfill boundary.
for (set<pg_shard_t>::const_iterator p = get_backfill_targets().begin();
p != get_backfill_targets().end();
++p) {
const pg_info_t& pi = recovery_state.get_peer_info(*p);
if (pi.last_backfill == hobject_t() ||
pi.last_backfill.get_hash() == info.pgid.ps()) {
dout(10) << __func__ << " backfill target osd." << *p
<< " last_backfill has not progressed past pgid ps"
<< dendl;
return;
}
}
pg_hit_set_info_t new_hset = pg_hit_set_info_t(pool.info.use_gmt_hitset);
new_hset.begin = hit_set_start_stamp;
new_hset.end = now;
oid = get_hit_set_archive_object(
new_hset.begin,
new_hset.end,
new_hset.using_gmt);
// If the current object is degraded we skip this persist request
if (m_scrubber->write_blocked_by_scrub(oid))
return;
hit_set->seal();
encode(*hit_set, bl);
dout(20) << __func__ << " archive " << oid << dendl;
if (agent_state) {
agent_state->add_hit_set(new_hset.begin, hit_set);
uint32_t size = agent_state->hit_set_map.size();
if (size >= pool.info.hit_set_count) {
size = pool.info.hit_set_count > 0 ? pool.info.hit_set_count - 1: 0;
}
hit_set_in_memory_trim(size);
}
ObjectContextRef obc = get_object_context(oid, true);
OpContextUPtr ctx = simple_opc_create(obc);
ctx->at_version = get_next_version();
ctx->updated_hset_history = info.hit_set;
pg_hit_set_history_t &updated_hit_set_hist = *(ctx->updated_hset_history);
updated_hit_set_hist.current_last_update = info.last_update;
new_hset.version = ctx->at_version;
updated_hit_set_hist.history.push_back(new_hset);
hit_set_create();
// fabricate an object_info_t and SnapSet
obc->obs.oi.version = ctx->at_version;
obc->obs.oi.mtime = now;
obc->obs.oi.size = bl.length();
obc->obs.exists = true;
obc->obs.oi.set_data_digest(bl.crc32c(-1));
ctx->new_obs = obc->obs;
ctx->new_snapset = obc->ssc->snapset;
ctx->delta_stats.num_objects++;
ctx->delta_stats.num_objects_hit_set_archive++;
ctx->delta_stats.num_bytes += bl.length();
ctx->delta_stats.num_bytes_hit_set_archive += bl.length();
bufferlist bss;
encode(ctx->new_snapset, bss);
bufferlist boi(sizeof(ctx->new_obs.oi));
encode(ctx->new_obs.oi, boi,
get_osdmap()->get_features(CEPH_ENTITY_TYPE_OSD, nullptr));
ctx->op_t->create(oid);
if (bl.length()) {
ctx->op_t->write(oid, 0, bl.length(), bl, 0);
write_update_size_and_usage(ctx->delta_stats, obc->obs.oi, ctx->modified_ranges,
0, bl.length());
ctx->clean_regions.mark_data_region_dirty(0, bl.length());
}
map<string, bufferlist, std::less<>> attrs = {
{OI_ATTR, std::move(boi)},
{SS_ATTR, std::move(bss)}
};
setattrs_maybe_cache(ctx->obc, ctx->op_t.get(), attrs);
ctx->log.push_back(
pg_log_entry_t(
pg_log_entry_t::MODIFY,
oid,
ctx->at_version,
eversion_t(),
0,
osd_reqid_t(),
ctx->mtime,
0)
);
ctx->log.back().clean_regions = ctx->clean_regions;
hit_set_trim(ctx, max);
simple_opc_submit(std::move(ctx));
}
void PrimaryLogPG::hit_set_trim(OpContextUPtr &ctx, unsigned max)
{
ceph_assert(ctx->updated_hset_history);
pg_hit_set_history_t &updated_hit_set_hist =
*(ctx->updated_hset_history);
for (unsigned num = updated_hit_set_hist.history.size(); num > max; --num) {
list<pg_hit_set_info_t>::iterator p = updated_hit_set_hist.history.begin();
ceph_assert(p != updated_hit_set_hist.history.end());
hobject_t oid = get_hit_set_archive_object(p->begin, p->end, p->using_gmt);
ceph_assert(!is_degraded_or_backfilling_object(oid));
dout(20) << __func__ << " removing " << oid << dendl;
++ctx->at_version.version;
ctx->log.push_back(
pg_log_entry_t(pg_log_entry_t::DELETE,
oid,
ctx->at_version,
p->version,
0,
osd_reqid_t(),
ctx->mtime,
0));
ctx->op_t->remove(oid);
updated_hit_set_hist.history.pop_front();
ObjectContextRef obc = get_object_context(oid, false);
ceph_assert(obc);
--ctx->delta_stats.num_objects;
--ctx->delta_stats.num_objects_hit_set_archive;
ctx->delta_stats.num_bytes -= obc->obs.oi.size;
ctx->delta_stats.num_bytes_hit_set_archive -= obc->obs.oi.size;
}
}
void PrimaryLogPG::hit_set_in_memory_trim(uint32_t max_in_memory)
{
while (agent_state->hit_set_map.size() > max_in_memory) {
agent_state->remove_oldest_hit_set();
}
}
// =======================================
// cache agent
void PrimaryLogPG::agent_setup()
{
ceph_assert(is_locked());
if (!is_active() ||
!is_primary() ||
state_test(PG_STATE_PREMERGE) ||
pool.info.cache_mode == pg_pool_t::CACHEMODE_NONE ||
pool.info.tier_of < 0 ||
!get_osdmap()->have_pg_pool(pool.info.tier_of)) {
agent_clear();
return;
}
if (!agent_state) {
agent_state.reset(new TierAgentState);
// choose random starting position
agent_state->position = hobject_t();
agent_state->position.pool = info.pgid.pool();
agent_state->position.set_hash(pool.info.get_random_pg_position(
info.pgid.pgid,
rand()));
agent_state->start = agent_state->position;
dout(10) << __func__ << " allocated new state, position "
<< agent_state->position << dendl;
} else {
dout(10) << __func__ << " keeping existing state" << dendl;
}
if (info.stats.stats_invalid) {
osd->clog->warn() << "pg " << info.pgid << " has invalid (post-split) stats; must scrub before tier agent can activate";
}
agent_choose_mode();
}
void PrimaryLogPG::agent_clear()
{
agent_stop();
agent_state.reset(NULL);
}
// Return false if no objects operated on since start of object hash space
bool PrimaryLogPG::agent_work(int start_max, int agent_flush_quota)
{
std::scoped_lock locker{*this};
if (!agent_state) {
dout(10) << __func__ << " no agent state, stopping" << dendl;
return true;
}
ceph_assert(!recovery_state.is_deleting());
if (agent_state->is_idle()) {
dout(10) << __func__ << " idle, stopping" << dendl;
return true;
}
osd->logger->inc(l_osd_agent_wake);
dout(10) << __func__
<< " max " << start_max
<< ", flush " << agent_state->get_flush_mode_name()
<< ", evict " << agent_state->get_evict_mode_name()
<< ", pos " << agent_state->position
<< dendl;
ceph_assert(is_primary());
ceph_assert(is_active());
agent_load_hit_sets();
const pg_pool_t *base_pool = get_osdmap()->get_pg_pool(pool.info.tier_of);
ceph_assert(base_pool);
int ls_min = 1;
int ls_max = cct->_conf->osd_pool_default_cache_max_evict_check_size;
// list some objects. this conveniently lists clones (oldest to
// newest) before heads... the same order we want to flush in.
//
// NOTE: do not flush the Sequencer. we will assume that the
// listing we get back is imprecise.
vector<hobject_t> ls;
hobject_t next;
int r = pgbackend->objects_list_partial(agent_state->position, ls_min, ls_max,
&ls, &next);
ceph_assert(r >= 0);
dout(20) << __func__ << " got " << ls.size() << " objects" << dendl;
int started = 0;
for (vector<hobject_t>::iterator p = ls.begin();
p != ls.end();
++p) {
if (p->nspace == cct->_conf->osd_hit_set_namespace) {
dout(20) << __func__ << " skip (hit set) " << *p << dendl;
osd->logger->inc(l_osd_agent_skip);
continue;
}
if (is_degraded_or_backfilling_object(*p)) {
dout(20) << __func__ << " skip (degraded) " << *p << dendl;
osd->logger->inc(l_osd_agent_skip);
continue;
}
if (is_missing_object(p->get_head())) {
dout(20) << __func__ << " skip (missing head) " << *p << dendl;
osd->logger->inc(l_osd_agent_skip);
continue;
}
ObjectContextRef obc = get_object_context(*p, false, NULL);
if (!obc) {
// we didn't flush; we may miss something here.
dout(20) << __func__ << " skip (no obc) " << *p << dendl;
osd->logger->inc(l_osd_agent_skip);
continue;
}
if (!obc->obs.exists) {
dout(20) << __func__ << " skip (dne) " << obc->obs.oi.soid << dendl;
osd->logger->inc(l_osd_agent_skip);
continue;
}
if (m_scrubber->range_intersects_scrub(obc->obs.oi.soid,
obc->obs.oi.soid.get_head())) {
dout(20) << __func__ << " skip (scrubbing) " << obc->obs.oi << dendl;
osd->logger->inc(l_osd_agent_skip);
continue;
}
if (obc->is_blocked()) {
dout(20) << __func__ << " skip (blocked) " << obc->obs.oi << dendl;
osd->logger->inc(l_osd_agent_skip);
continue;
}
if (obc->is_request_pending()) {
dout(20) << __func__ << " skip (request pending) " << obc->obs.oi << dendl;
osd->logger->inc(l_osd_agent_skip);
continue;
}
// be careful flushing omap to an EC pool.
if (!base_pool->supports_omap() &&
obc->obs.oi.is_omap()) {
dout(20) << __func__ << " skip (omap to EC) " << obc->obs.oi << dendl;
osd->logger->inc(l_osd_agent_skip);
continue;
}
if (agent_state->evict_mode != TierAgentState::EVICT_MODE_IDLE &&
agent_maybe_evict(obc, false))
++started;
else if (agent_state->flush_mode != TierAgentState::FLUSH_MODE_IDLE &&
agent_flush_quota > 0 && agent_maybe_flush(obc)) {
++started;
--agent_flush_quota;
}
if (started >= start_max) {
// If finishing early, set "next" to the next object
if (++p != ls.end())
next = *p;
break;
}
}
if (++agent_state->hist_age > cct->_conf->osd_agent_hist_halflife) {
dout(20) << __func__ << " resetting atime and temp histograms" << dendl;
agent_state->hist_age = 0;
agent_state->temp_hist.decay();
}
// Total objects operated on so far
int total_started = agent_state->started + started;
bool need_delay = false;
dout(20) << __func__ << " start pos " << agent_state->position
<< " next start pos " << next
<< " started " << total_started << dendl;
// See if we've made a full pass over the object hash space
// This might check at most ls_max objects a second time to notice that
// we've checked every objects at least once.
if (agent_state->position < agent_state->start &&
next >= agent_state->start) {
dout(20) << __func__ << " wrap around " << agent_state->start << dendl;
if (total_started == 0)
need_delay = true;
else
total_started = 0;
agent_state->start = next;
}
agent_state->started = total_started;
// See if we are starting from beginning
if (next.is_max())
agent_state->position = hobject_t();
else
agent_state->position = next;
// Discard old in memory HitSets
hit_set_in_memory_trim(pool.info.hit_set_count);
if (need_delay) {
ceph_assert(agent_state->delaying == false);
agent_delay();
return false;
}
agent_choose_mode();
return true;
}
void PrimaryLogPG::agent_load_hit_sets()
{
if (agent_state->evict_mode == TierAgentState::EVICT_MODE_IDLE) {
return;
}
if (agent_state->hit_set_map.size() < info.hit_set.history.size()) {
dout(10) << __func__ << dendl;
for (auto p = info.hit_set.history.begin();
p != info.hit_set.history.end(); ++p) {
if (agent_state->hit_set_map.count(p->begin.sec()) == 0) {
dout(10) << __func__ << " loading " << p->begin << "-"
<< p->end << dendl;
if (!pool.info.is_replicated()) {
// FIXME: EC not supported here yet
derr << __func__ << " on non-replicated pool" << dendl;
break;
}
hobject_t oid = get_hit_set_archive_object(p->begin, p->end, p->using_gmt);
if (is_unreadable_object(oid)) {
dout(10) << __func__ << " unreadable " << oid << ", waiting" << dendl;
break;
}
ObjectContextRef obc = get_object_context(oid, false);
if (!obc) {
derr << __func__ << ": could not load hitset " << oid << dendl;
break;
}
bufferlist bl;
{
int r = osd->store->read(ch, ghobject_t(oid), 0, 0, bl);
ceph_assert(r >= 0);
}
HitSetRef hs(new HitSet);
bufferlist::const_iterator pbl = bl.begin();
decode(*hs, pbl);
agent_state->add_hit_set(p->begin.sec(), hs);
}
}
}
}
bool PrimaryLogPG::agent_maybe_flush(ObjectContextRef& obc)
{
if (!obc->obs.oi.is_dirty()) {
dout(20) << __func__ << " skip (clean) " << obc->obs.oi << dendl;
osd->logger->inc(l_osd_agent_skip);
return false;
}
if (obc->obs.oi.is_cache_pinned()) {
dout(20) << __func__ << " skip (cache_pinned) " << obc->obs.oi << dendl;
osd->logger->inc(l_osd_agent_skip);
return false;
}
utime_t now = ceph_clock_now();
utime_t ob_local_mtime;
if (obc->obs.oi.local_mtime != utime_t()) {
ob_local_mtime = obc->obs.oi.local_mtime;
} else {
ob_local_mtime = obc->obs.oi.mtime;
}
bool evict_mode_full =
(agent_state->evict_mode == TierAgentState::EVICT_MODE_FULL);
if (!evict_mode_full &&
obc->obs.oi.soid.snap == CEPH_NOSNAP && // snaps immutable; don't delay
(ob_local_mtime + utime_t(pool.info.cache_min_flush_age, 0) > now)) {
dout(20) << __func__ << " skip (too young) " << obc->obs.oi << dendl;
osd->logger->inc(l_osd_agent_skip);
return false;
}
if (osd->agent_is_active_oid(obc->obs.oi.soid)) {
dout(20) << __func__ << " skip (flushing) " << obc->obs.oi << dendl;
osd->logger->inc(l_osd_agent_skip);
return false;
}
dout(10) << __func__ << " flushing " << obc->obs.oi << dendl;
// FIXME: flush anything dirty, regardless of what distribution of
// ages we expect.
hobject_t oid = obc->obs.oi.soid;
osd->agent_start_op(oid);
// no need to capture a pg ref, can't outlive fop or ctx
std::function<void()> on_flush = [this, oid]() {
osd->agent_finish_op(oid);
};
int result = start_flush(
OpRequestRef(), obc, false, NULL,
on_flush);
if (result != -EINPROGRESS) {
on_flush();
dout(10) << __func__ << " start_flush() failed " << obc->obs.oi
<< " with " << result << dendl;
osd->logger->inc(l_osd_agent_skip);
return false;
}
osd->logger->inc(l_osd_agent_flush);
return true;
}
bool PrimaryLogPG::agent_maybe_evict(ObjectContextRef& obc, bool after_flush)
{
const hobject_t& soid = obc->obs.oi.soid;
if (!after_flush && obc->obs.oi.is_dirty()) {
dout(20) << __func__ << " skip (dirty) " << obc->obs.oi << dendl;
return false;
}
// This is already checked by agent_work() which passes after_flush = false
if (after_flush && m_scrubber->range_intersects_scrub(soid, soid.get_head())) {
dout(20) << __func__ << " skip (scrubbing) " << obc->obs.oi << dendl;
return false;
}
if (!obc->obs.oi.watchers.empty()) {
dout(20) << __func__ << " skip (watchers) " << obc->obs.oi << dendl;
return false;
}
if (obc->is_blocked()) {
dout(20) << __func__ << " skip (blocked) " << obc->obs.oi << dendl;
return false;
}
if (obc->obs.oi.is_cache_pinned()) {
dout(20) << __func__ << " skip (cache_pinned) " << obc->obs.oi << dendl;
return false;
}
if (soid.snap == CEPH_NOSNAP) {
int result = _verify_no_head_clones(soid, obc->ssc->snapset);
if (result < 0) {
dout(20) << __func__ << " skip (clones) " << obc->obs.oi << dendl;
return false;
}
}
if (agent_state->evict_mode != TierAgentState::EVICT_MODE_FULL) {
// is this object old than cache_min_evict_age?
utime_t now = ceph_clock_now();
utime_t ob_local_mtime;
if (obc->obs.oi.local_mtime != utime_t()) {
ob_local_mtime = obc->obs.oi.local_mtime;
} else {
ob_local_mtime = obc->obs.oi.mtime;
}
if (ob_local_mtime + utime_t(pool.info.cache_min_evict_age, 0) > now) {
dout(20) << __func__ << " skip (too young) " << obc->obs.oi << dendl;
osd->logger->inc(l_osd_agent_skip);
return false;
}
// is this object old and/or cold enough?
int temp = 0;
uint64_t temp_upper = 0, temp_lower = 0;
if (hit_set)
agent_estimate_temp(soid, &temp);
agent_state->temp_hist.add(temp);
agent_state->temp_hist.get_position_micro(temp, &temp_lower, &temp_upper);
dout(20) << __func__
<< " temp " << temp
<< " pos " << temp_lower << "-" << temp_upper
<< ", evict_effort " << agent_state->evict_effort
<< dendl;
dout(30) << "agent_state:\n";
auto f = Formatter::create_unique("");
f->open_object_section("agent_state");
agent_state->dump(f.get());
f->close_section();
f->flush(*_dout);
*_dout << dendl;
if (1000000 - temp_upper >= agent_state->evict_effort)
return false;
}
dout(10) << __func__ << " evicting " << obc->obs.oi << dendl;
OpContextUPtr ctx = simple_opc_create(obc);
auto null_op_req = OpRequestRef();
if (!ctx->lock_manager.get_lock_type(
RWState::RWWRITE,
obc->obs.oi.soid,
obc,
null_op_req)) {
close_op_ctx(ctx.release());
dout(20) << __func__ << " skip (cannot get lock) " << obc->obs.oi << dendl;
return false;
}
osd->agent_start_evict_op();
ctx->register_on_finish(
[this]() {
osd->agent_finish_evict_op();
});
ctx->at_version = get_next_version();
ceph_assert(ctx->new_obs.exists);
int r = _delete_oid(ctx.get(), true, false);
if (obc->obs.oi.is_omap())
ctx->delta_stats.num_objects_omap--;
ctx->delta_stats.num_evict++;
ctx->delta_stats.num_evict_kb += shift_round_up(obc->obs.oi.size, 10);
if (obc->obs.oi.is_dirty())
--ctx->delta_stats.num_objects_dirty;
ceph_assert(r == 0);
finish_ctx(ctx.get(), pg_log_entry_t::DELETE);
simple_opc_submit(std::move(ctx));
osd->logger->inc(l_osd_tier_evict);
osd->logger->inc(l_osd_agent_evict);
return true;
}
void PrimaryLogPG::agent_stop()
{
dout(20) << __func__ << dendl;
if (agent_state && !agent_state->is_idle()) {
agent_state->evict_mode = TierAgentState::EVICT_MODE_IDLE;
agent_state->flush_mode = TierAgentState::FLUSH_MODE_IDLE;
osd->agent_disable_pg(this, agent_state->evict_effort);
}
}
void PrimaryLogPG::agent_delay()
{
dout(20) << __func__ << dendl;
if (agent_state && !agent_state->is_idle()) {
ceph_assert(agent_state->delaying == false);
agent_state->delaying = true;
osd->agent_disable_pg(this, agent_state->evict_effort);
}
}
void PrimaryLogPG::agent_choose_mode_restart()
{
dout(20) << __func__ << dendl;
std::scoped_lock locker{*this};
if (agent_state && agent_state->delaying) {
agent_state->delaying = false;
agent_choose_mode(true);
}
}
bool PrimaryLogPG::agent_choose_mode(bool restart, OpRequestRef op)
{
bool requeued = false;
// Let delay play out
if (agent_state->delaying) {
dout(20) << __func__ << " " << this << " delaying, ignored" << dendl;
return requeued;
}
TierAgentState::flush_mode_t flush_mode = TierAgentState::FLUSH_MODE_IDLE;
TierAgentState::evict_mode_t evict_mode = TierAgentState::EVICT_MODE_IDLE;
unsigned evict_effort = 0;
if (info.stats.stats_invalid) {
// idle; stats can't be trusted until we scrub.
dout(20) << __func__ << " stats invalid (post-split), idle" << dendl;
goto skip_calc;
}
{
uint64_t divisor = pool.info.get_pg_num_divisor(info.pgid.pgid);
ceph_assert(divisor > 0);
// adjust (effective) user objects down based on the number
// of HitSet objects, which should not count toward our total since
// they cannot be flushed.
uint64_t unflushable = info.stats.stats.sum.num_objects_hit_set_archive;
// also exclude omap objects if ec backing pool
const pg_pool_t *base_pool = get_osdmap()->get_pg_pool(pool.info.tier_of);
ceph_assert(base_pool);
if (!base_pool->supports_omap())
unflushable += info.stats.stats.sum.num_objects_omap;
uint64_t num_user_objects = info.stats.stats.sum.num_objects;
if (num_user_objects > unflushable)
num_user_objects -= unflushable;
else
num_user_objects = 0;
uint64_t num_user_bytes = info.stats.stats.sum.num_bytes;
uint64_t unflushable_bytes = info.stats.stats.sum.num_bytes_hit_set_archive;
num_user_bytes -= unflushable_bytes;
uint64_t num_overhead_bytes = osd->store->estimate_objects_overhead(num_user_objects);
num_user_bytes += num_overhead_bytes;
// also reduce the num_dirty by num_objects_omap
int64_t num_dirty = info.stats.stats.sum.num_objects_dirty;
if (!base_pool->supports_omap()) {
if (num_dirty > info.stats.stats.sum.num_objects_omap)
num_dirty -= info.stats.stats.sum.num_objects_omap;
else
num_dirty = 0;
}
dout(10) << __func__
<< " flush_mode: "
<< TierAgentState::get_flush_mode_name(agent_state->flush_mode)
<< " evict_mode: "
<< TierAgentState::get_evict_mode_name(agent_state->evict_mode)
<< " num_objects: " << info.stats.stats.sum.num_objects
<< " num_bytes: " << info.stats.stats.sum.num_bytes
<< " num_objects_dirty: " << info.stats.stats.sum.num_objects_dirty
<< " num_objects_omap: " << info.stats.stats.sum.num_objects_omap
<< " num_dirty: " << num_dirty
<< " num_user_objects: " << num_user_objects
<< " num_user_bytes: " << num_user_bytes
<< " num_overhead_bytes: " << num_overhead_bytes
<< " pool.info.target_max_bytes: " << pool.info.target_max_bytes
<< " pool.info.target_max_objects: " << pool.info.target_max_objects
<< dendl;
// get dirty, full ratios
uint64_t dirty_micro = 0;
uint64_t full_micro = 0;
if (pool.info.target_max_bytes && num_user_objects > 0) {
uint64_t avg_size = num_user_bytes / num_user_objects;
dirty_micro =
num_dirty * avg_size * 1000000 /
std::max<uint64_t>(pool.info.target_max_bytes / divisor, 1);
full_micro =
num_user_objects * avg_size * 1000000 /
std::max<uint64_t>(pool.info.target_max_bytes / divisor, 1);
}
if (pool.info.target_max_objects > 0) {
uint64_t dirty_objects_micro =
num_dirty * 1000000 /
std::max<uint64_t>(pool.info.target_max_objects / divisor, 1);
if (dirty_objects_micro > dirty_micro)
dirty_micro = dirty_objects_micro;
uint64_t full_objects_micro =
num_user_objects * 1000000 /
std::max<uint64_t>(pool.info.target_max_objects / divisor, 1);
if (full_objects_micro > full_micro)
full_micro = full_objects_micro;
}
dout(20) << __func__ << " dirty " << ((float)dirty_micro / 1000000.0)
<< " full " << ((float)full_micro / 1000000.0)
<< dendl;
// flush mode
uint64_t flush_target = pool.info.cache_target_dirty_ratio_micro;
uint64_t flush_high_target = pool.info.cache_target_dirty_high_ratio_micro;
uint64_t flush_slop = (float)flush_target * cct->_conf->osd_agent_slop;
if (restart || agent_state->flush_mode == TierAgentState::FLUSH_MODE_IDLE) {
flush_target += flush_slop;
flush_high_target += flush_slop;
} else {
flush_target -= std::min(flush_target, flush_slop);
flush_high_target -= std::min(flush_high_target, flush_slop);
}
if (dirty_micro > flush_high_target) {
flush_mode = TierAgentState::FLUSH_MODE_HIGH;
} else if (dirty_micro > flush_target || (!flush_target && num_dirty > 0)) {
flush_mode = TierAgentState::FLUSH_MODE_LOW;
}
// evict mode
uint64_t evict_target = pool.info.cache_target_full_ratio_micro;
uint64_t evict_slop = (float)evict_target * cct->_conf->osd_agent_slop;
if (restart || agent_state->evict_mode == TierAgentState::EVICT_MODE_IDLE)
evict_target += evict_slop;
else
evict_target -= std::min(evict_target, evict_slop);
if (full_micro > 1000000) {
// evict anything clean
evict_mode = TierAgentState::EVICT_MODE_FULL;
evict_effort = 1000000;
} else if (full_micro > evict_target) {
// set effort in [0..1] range based on where we are between
evict_mode = TierAgentState::EVICT_MODE_SOME;
uint64_t over = full_micro - evict_target;
uint64_t span = 1000000 - evict_target;
evict_effort = std::max(over * 1000000 / span,
uint64_t(1000000.0 *
cct->_conf->osd_agent_min_evict_effort));
// quantize effort to avoid too much reordering in the agent_queue.
uint64_t inc = cct->_conf->osd_agent_quantize_effort * 1000000;
ceph_assert(inc > 0);
uint64_t was = evict_effort;
evict_effort -= evict_effort % inc;
if (evict_effort < inc)
evict_effort = inc;
ceph_assert(evict_effort >= inc && evict_effort <= 1000000);
dout(30) << __func__ << " evict_effort " << was << " quantized by " << inc << " to " << evict_effort << dendl;
}
}
skip_calc:
bool old_idle = agent_state->is_idle();
if (flush_mode != agent_state->flush_mode) {
dout(5) << __func__ << " flush_mode "
<< TierAgentState::get_flush_mode_name(agent_state->flush_mode)
<< " -> "
<< TierAgentState::get_flush_mode_name(flush_mode)
<< dendl;
recovery_state.update_stats(
[=, this](auto &history, auto &stats) {
if (flush_mode == TierAgentState::FLUSH_MODE_HIGH) {
osd->agent_inc_high_count();
stats.stats.sum.num_flush_mode_high = 1;
} else if (flush_mode == TierAgentState::FLUSH_MODE_LOW) {
stats.stats.sum.num_flush_mode_low = 1;
}
if (agent_state->flush_mode == TierAgentState::FLUSH_MODE_HIGH) {
osd->agent_dec_high_count();
stats.stats.sum.num_flush_mode_high = 0;
} else if (agent_state->flush_mode == TierAgentState::FLUSH_MODE_LOW) {
stats.stats.sum.num_flush_mode_low = 0;
}
return false;
});
agent_state->flush_mode = flush_mode;
}
if (evict_mode != agent_state->evict_mode) {
dout(5) << __func__ << " evict_mode "
<< TierAgentState::get_evict_mode_name(agent_state->evict_mode)
<< " -> "
<< TierAgentState::get_evict_mode_name(evict_mode)
<< dendl;
if (agent_state->evict_mode == TierAgentState::EVICT_MODE_FULL &&
is_active()) {
if (op)
requeue_op(op);
requeue_ops(waiting_for_flush);
requeue_ops(waiting_for_active);
requeue_ops(waiting_for_readable);
requeue_ops(waiting_for_scrub);
requeue_ops(waiting_for_cache_not_full);
objects_blocked_on_cache_full.clear();
requeued = true;
}
recovery_state.update_stats(
[=, this](auto &history, auto &stats) {
if (evict_mode == TierAgentState::EVICT_MODE_SOME) {
stats.stats.sum.num_evict_mode_some = 1;
} else if (evict_mode == TierAgentState::EVICT_MODE_FULL) {
stats.stats.sum.num_evict_mode_full = 1;
}
if (agent_state->evict_mode == TierAgentState::EVICT_MODE_SOME) {
stats.stats.sum.num_evict_mode_some = 0;
} else if (agent_state->evict_mode == TierAgentState::EVICT_MODE_FULL) {
stats.stats.sum.num_evict_mode_full = 0;
}
return false;
});
agent_state->evict_mode = evict_mode;
}
uint64_t old_effort = agent_state->evict_effort;
if (evict_effort != agent_state->evict_effort) {
dout(5) << __func__ << " evict_effort "
<< ((float)agent_state->evict_effort / 1000000.0)
<< " -> "
<< ((float)evict_effort / 1000000.0)
<< dendl;
agent_state->evict_effort = evict_effort;
}
// NOTE: we are using evict_effort as a proxy for *all* agent effort
// (including flush). This is probably fine (they should be
// correlated) but it is not precisely correct.
if (agent_state->is_idle()) {
if (!restart && !old_idle) {
osd->agent_disable_pg(this, old_effort);
}
} else {
if (restart || old_idle) {
osd->agent_enable_pg(this, agent_state->evict_effort);
} else if (old_effort != agent_state->evict_effort) {
osd->agent_adjust_pg(this, old_effort, agent_state->evict_effort);
}
}
return requeued;
}
void PrimaryLogPG::agent_estimate_temp(const hobject_t& oid, int *temp)
{
ceph_assert(hit_set);
ceph_assert(temp);
*temp = 0;
if (hit_set->contains(oid))
*temp = 1000000;
unsigned i = 0;
int last_n = pool.info.hit_set_search_last_n;
for (map<time_t,HitSetRef>::reverse_iterator p =
agent_state->hit_set_map.rbegin(); last_n > 0 &&
p != agent_state->hit_set_map.rend(); ++p, ++i) {
if (p->second->contains(oid)) {
*temp += pool.info.get_grade(i);
--last_n;
}
}
}
// Dup op detection
bool PrimaryLogPG::already_complete(eversion_t v)
{
dout(20) << __func__ << ": " << v << dendl;
for (xlist<RepGather*>::iterator i = repop_queue.begin();
!i.end();
++i) {
dout(20) << __func__ << ": " << **i << dendl;
// skip copy from temp object ops
if ((*i)->v == eversion_t()) {
dout(20) << __func__ << ": " << **i
<< " version is empty" << dendl;
continue;
}
if ((*i)->v > v) {
dout(20) << __func__ << ": " << **i
<< " (*i)->v past v" << dendl;
break;
}
if (!(*i)->all_committed) {
dout(20) << __func__ << ": " << **i
<< " not committed, returning false"
<< dendl;
return false;
}
}
dout(20) << __func__ << ": returning true" << dendl;
return true;
}
// ==========================================================================================
// SCRUB
void PrimaryLogPG::do_replica_scrub_map(OpRequestRef op)
{
dout(15) << __func__ << " is scrub active? " << is_scrub_active() << dendl;
op->mark_started();
if (!is_scrub_active()) {
dout(10) << __func__ << " scrub isn't active" << dendl;
return;
}
m_scrubber->map_from_replica(op);
}
bool PrimaryLogPG::_range_available_for_scrub(const hobject_t& begin,
const hobject_t& end)
{
pair<hobject_t, ObjectContextRef> next;
next.second = object_contexts.lookup(begin);
next.first = begin;
bool more = true;
while (more && next.first < end) {
if (next.second && next.second->is_blocked()) {
next.second->requeue_scrub_on_unblock = true;
dout(10) << __func__ << ": scrub delayed, "
<< next.first << " is blocked"
<< dendl;
return false;
}
more = object_contexts.get_next(next.first, &next);
}
return true;
}
int PrimaryLogPG::rep_repair_primary_object(const hobject_t& soid, OpContext *ctx)
{
OpRequestRef op = ctx->op;
// Only supports replicated pools
ceph_assert(!pool.info.is_erasure());
ceph_assert(is_primary());
dout(10) << __func__ << " " << soid
<< " peers osd.{" << get_acting_recovery_backfill() << "}" << dendl;
if (!is_clean()) {
block_for_clean(soid, op);
return -EAGAIN;
}
ceph_assert(!recovery_state.get_pg_log().get_missing().is_missing(soid));
auto& oi = ctx->new_obs.oi;
eversion_t v = oi.version;
if (primary_error(soid, v)) {
dout(0) << __func__ << " No other replicas available for " << soid << dendl;
// XXX: If we knew that there is no down osd which could include this
// object, it would be nice if we could return EIO here.
// If a "never fail" flag was available, that could be used
// for rbd to NOT return EIO until object marked lost.
// Drop through to save this op in case an osd comes up with the object.
}
// Restart the op after object becomes readable again
waiting_for_unreadable_object[soid].push_back(op);
op->mark_delayed("waiting for missing object");
ceph_assert(is_clean());
state_set(PG_STATE_REPAIR);
state_clear(PG_STATE_CLEAN);
queue_peering_event(
PGPeeringEventRef(
std::make_shared<PGPeeringEvent>(
get_osdmap_epoch(),
get_osdmap_epoch(),
PeeringState::DoRecovery())));
return -EAGAIN;
}
/*---SnapTrimmer Logging---*/
#undef dout_prefix
#define dout_prefix pg->gen_prefix(*_dout)
void PrimaryLogPG::SnapTrimmer::log_enter(const char *state_name)
{
ldout(pg->cct, 20) << "enter " << state_name << dendl;
}
void PrimaryLogPG::SnapTrimmer::log_exit(const char *state_name, utime_t enter_time)
{
ldout(pg->cct, 20) << "exit " << state_name << dendl;
}
bool PrimaryLogPG::SnapTrimmer::permit_trim() {
return
pg->is_clean() &&
!pg->is_scrub_queued_or_active() &&
!pg->snap_trimq.empty();
}
/*---SnapTrimmer states---*/
#undef dout_prefix
#define dout_prefix (context< SnapTrimmer >().pg->gen_prefix(*_dout) \
<< "SnapTrimmer state<" << get_state_name() << ">: ")
/* NotTrimming */
PrimaryLogPG::NotTrimming::NotTrimming(my_context ctx)
: my_base(ctx),
NamedState(nullptr, "NotTrimming")
{
context< SnapTrimmer >().log_enter(state_name);
}
void PrimaryLogPG::NotTrimming::exit()
{
context< SnapTrimmer >().log_exit(state_name, enter_time);
}
boost::statechart::result PrimaryLogPG::NotTrimming::react(const KickTrim&)
{
PrimaryLogPG *pg = context< SnapTrimmer >().pg;
ldout(pg->cct, 10) << "NotTrimming react KickTrim" << dendl;
if (!(pg->is_primary() && pg->is_active())) {
ldout(pg->cct, 10) << "NotTrimming not primary or active" << dendl;
return discard_event();
}
if (!pg->is_clean() ||
pg->snap_trimq.empty()) {
ldout(pg->cct, 10) << "NotTrimming not clean or nothing to trim" << dendl;
return discard_event();
}
if (pg->is_scrub_queued_or_active()) {
ldout(pg->cct, 10) << " scrubbing, will requeue snap_trimmer after" << dendl;
return transit< WaitScrub >();
} else {
return transit< Trimming >();
}
}
boost::statechart::result PrimaryLogPG::WaitReservation::react(const SnapTrimReserved&)
{
PrimaryLogPG *pg = context< SnapTrimmer >().pg;
ldout(pg->cct, 10) << "WaitReservation react SnapTrimReserved" << dendl;
pending = nullptr;
if (!context< SnapTrimmer >().can_trim()) {
post_event(KickTrim());
return transit< NotTrimming >();
}
context<Trimming>().snap_to_trim = pg->snap_trimq.range_start();
ldout(pg->cct, 10) << "NotTrimming: trimming "
<< pg->snap_trimq.range_start()
<< dendl;
return transit< AwaitAsyncWork >();
}
/* AwaitAsyncWork */
PrimaryLogPG::AwaitAsyncWork::AwaitAsyncWork(my_context ctx)
: my_base(ctx),
NamedState(nullptr, "Trimming/AwaitAsyncWork")
{
auto *pg = context< SnapTrimmer >().pg;
context< SnapTrimmer >().log_enter(state_name);
context< SnapTrimmer >().pg->osd->queue_for_snap_trim(pg);
pg->state_set(PG_STATE_SNAPTRIM);
pg->state_clear(PG_STATE_SNAPTRIM_ERROR);
pg->publish_stats_to_osd();
}
boost::statechart::result PrimaryLogPG::AwaitAsyncWork::react(const DoSnapWork&)
{
PrimaryLogPGRef pg = context< SnapTrimmer >().pg;
snapid_t snap_to_trim = context<Trimming>().snap_to_trim;
auto &in_flight = context<Trimming>().in_flight;
ceph_assert(in_flight.empty());
ceph_assert(pg->is_primary() && pg->is_active());
if (!context< SnapTrimmer >().can_trim()) {
ldout(pg->cct, 10) << "something changed, reverting to NotTrimming" << dendl;
post_event(KickTrim());
return transit< NotTrimming >();
}
ldout(pg->cct, 10) << "AwaitAsyncWork: trimming snap " << snap_to_trim << dendl;
vector<hobject_t> to_trim;
unsigned max = pg->cct->_conf->osd_pg_max_concurrent_snap_trims;
// we need to look for at least 1 snaptrim, otherwise we'll misinterpret
// the ENOENT below and erase snap_to_trim.
ceph_assert(max > 0);
to_trim.reserve(max);
int r = pg->snap_mapper.get_next_objects_to_trim(
snap_to_trim,
max,
&to_trim);
if (r != 0 && r != -ENOENT) {
lderr(pg->cct) << "get_next_objects_to_trim returned "
<< cpp_strerror(r) << dendl;
ceph_abort_msg("get_next_objects_to_trim returned an invalid code");
} else if (r == -ENOENT) {
// Done!
ldout(pg->cct, 10) << "got ENOENT" << dendl;
pg->snap_trimq.erase(snap_to_trim);
if (pg->snap_trimq_repeat.count(snap_to_trim)) {
ldout(pg->cct, 10) << " removing from snap_trimq_repeat" << dendl;
pg->snap_trimq_repeat.erase(snap_to_trim);
} else {
ldout(pg->cct, 10) << "adding snap " << snap_to_trim
<< " to purged_snaps"
<< dendl;
ObjectStore::Transaction t;
pg->recovery_state.adjust_purged_snaps(
[snap_to_trim](auto &purged_snaps) {
purged_snaps.insert(snap_to_trim);
});
pg->write_if_dirty(t);
ldout(pg->cct, 10) << "purged_snaps now "
<< pg->info.purged_snaps << ", snap_trimq now "
<< pg->snap_trimq << dendl;
int tr = pg->osd->store->queue_transaction(pg->ch, std::move(t), NULL);
ceph_assert(tr == 0);
pg->recovery_state.share_pg_info();
}
post_event(KickTrim());
pg->set_snaptrim_duration();
return transit< NotTrimming >();
}
ceph_assert(!to_trim.empty());
for (auto &&object: to_trim) {
// Get next
ldout(pg->cct, 10) << "AwaitAsyncWork react trimming " << object << dendl;
OpContextUPtr ctx;
int error = pg->trim_object(in_flight.empty(), object, snap_to_trim, &ctx);
if (error) {
if (error == -ENOLCK) {
ldout(pg->cct, 10) << "could not get write lock on obj "
<< object << dendl;
} else {
pg->state_set(PG_STATE_SNAPTRIM_ERROR);
ldout(pg->cct, 10) << "Snaptrim error=" << error << dendl;
}
if (!in_flight.empty()) {
ldout(pg->cct, 10) << "letting the ones we already started finish" << dendl;
return transit< WaitRepops >();
}
if (error == -ENOLCK) {
ldout(pg->cct, 10) << "waiting for it to clear"
<< dendl;
return transit< WaitRWLock >();
}
return transit< NotTrimming >();
}
in_flight.insert(object);
ctx->register_on_success(
[pg, object, &in_flight]() {
ceph_assert(in_flight.find(object) != in_flight.end());
in_flight.erase(object);
if (in_flight.empty()) {
if (pg->state_test(PG_STATE_SNAPTRIM_ERROR)) {
pg->snap_trimmer_machine.process_event(Reset());
} else {
pg->snap_trimmer_machine.process_event(RepopsComplete());
}
}
});
pg->simple_opc_submit(std::move(ctx));
}
return transit< WaitRepops >();
}
void PrimaryLogPG::setattr_maybe_cache(
ObjectContextRef obc,
PGTransaction *t,
const string &key,
bufferlist &val)
{
t->setattr(obc->obs.oi.soid, key, val);
}
void PrimaryLogPG::setattrs_maybe_cache(
ObjectContextRef obc,
PGTransaction *t,
map<string, bufferlist, less<>> &attrs)
{
t->setattrs(obc->obs.oi.soid, attrs);
}
void PrimaryLogPG::rmattr_maybe_cache(
ObjectContextRef obc,
PGTransaction *t,
const string &key)
{
t->rmattr(obc->obs.oi.soid, key);
}
int PrimaryLogPG::getattr_maybe_cache(
ObjectContextRef obc,
const string &key,
bufferlist *val)
{
if (pool.info.is_erasure()) {
map<string, bufferlist>::iterator i = obc->attr_cache.find(key);
if (i != obc->attr_cache.end()) {
if (val)
*val = i->second;
return 0;
} else {
if (obc->obs.exists) {
return -ENODATA;
} else {
return -ENOENT;
}
}
}
return pgbackend->objects_get_attr(obc->obs.oi.soid, key, val);
}
int PrimaryLogPG::getattrs_maybe_cache(
ObjectContextRef obc,
map<string, bufferlist, less<>> *out)
{
int r = 0;
ceph_assert(out);
if (pool.info.is_erasure()) {
*out = obc->attr_cache;
} else {
r = pgbackend->objects_get_attrs(obc->obs.oi.soid, out);
}
map<string, bufferlist, less<>> tmp;
for (auto& [key, val]: *out) {
if (key.size() > 1 && key[0] == '_') {
tmp[key.substr(1, key.size())] = std::move(val);
}
}
tmp.swap(*out);
return r;
}
bool PrimaryLogPG::check_failsafe_full() {
return osd->check_failsafe_full(get_dpp());
}
bool PrimaryLogPG::maybe_preempt_replica_scrub(const hobject_t& oid)
{
return m_scrubber->write_blocked_by_scrub(oid);
}
void intrusive_ptr_add_ref(PrimaryLogPG *pg) { pg->get("intptr"); }
void intrusive_ptr_release(PrimaryLogPG *pg) { pg->put("intptr"); }
#ifdef PG_DEBUG_REFS
uint64_t get_with_id(PrimaryLogPG *pg) { return pg->get_with_id(); }
void put_with_id(PrimaryLogPG *pg, uint64_t id) { return pg->put_with_id(id); }
#endif
void intrusive_ptr_add_ref(PrimaryLogPG::RepGather *repop) { repop->get(); }
void intrusive_ptr_release(PrimaryLogPG::RepGather *repop) { repop->put(); }
| 483,305 | 29.519449 | 195 | cc |
null | ceph-main/src/osd/PrimaryLogPG.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <[email protected]>
* Copyright (C) 2013 Cloudwatt <[email protected]>
*
* Author: Loic Dachary <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_REPLICATEDPG_H
#define CEPH_REPLICATEDPG_H
#include <boost/tuple/tuple.hpp>
#include "include/ceph_assert.h"
#include "DynamicPerfStats.h"
#include "OSD.h"
#include "PG.h"
#include "Watch.h"
#include "TierAgentState.h"
#include "messages/MOSDOpReply.h"
#include "common/Checksummer.h"
#include "common/sharedptr_registry.hpp"
#include "common/shared_cache.hpp"
#include "ReplicatedBackend.h"
#include "PGTransaction.h"
#include "cls/cas/cls_cas_ops.h"
class CopyFromCallback;
class PromoteCallback;
struct RefCountCallback;
class PrimaryLogPG;
class PGLSFilter;
class HitSet;
struct TierAgentState;
class OSDService;
void intrusive_ptr_add_ref(PrimaryLogPG *pg);
void intrusive_ptr_release(PrimaryLogPG *pg);
uint64_t get_with_id(PrimaryLogPG *pg);
void put_with_id(PrimaryLogPG *pg, uint64_t id);
#ifdef PG_DEBUG_REFS
typedef TrackedIntPtr<PrimaryLogPG> PrimaryLogPGRef;
#else
typedef boost::intrusive_ptr<PrimaryLogPG> PrimaryLogPGRef;
#endif
struct inconsistent_snapset_wrapper;
class PrimaryLogPG : public PG, public PGBackend::Listener {
friend class OSD;
friend class Watch;
friend class PrimaryLogScrub;
public:
MEMPOOL_CLASS_HELPERS();
/*
* state associated with a copy operation
*/
struct OpContext;
class CopyCallback;
/**
* CopyResults stores the object metadata of interest to a copy initiator.
*/
struct CopyResults {
ceph::real_time mtime; ///< the copy source's mtime
uint64_t object_size; ///< the copied object's size
bool started_temp_obj; ///< true if the callback needs to delete temp object
hobject_t temp_oid; ///< temp object (if any)
/**
* Function to fill in transaction; if non-empty the callback
* must execute it before any other accesses to the object
* (in order to complete the copy).
*/
std::function<void(PGTransaction *)> fill_in_final_tx;
version_t user_version; ///< The copy source's user version
bool should_requeue; ///< op should be requeued on cancel
std::vector<snapid_t> snaps; ///< src's snaps (if clone)
snapid_t snap_seq; ///< src's snap_seq (if head)
librados::snap_set_t snapset; ///< src snapset (if head)
bool mirror_snapset;
bool has_omap;
uint32_t flags; // object_copy_data_t::FLAG_*
uint32_t source_data_digest, source_omap_digest;
uint32_t data_digest, omap_digest;
mempool::osd_pglog::vector<std::pair<osd_reqid_t, version_t> > reqids; // [(reqid, user_version)]
mempool::osd_pglog::map<uint32_t, int> reqid_return_codes; // std::map reqids by index to error code
std::map<std::string, ceph::buffer::list, std::less<>> attrs; // xattrs
uint64_t truncate_seq;
uint64_t truncate_size;
bool is_data_digest() {
return flags & object_copy_data_t::FLAG_DATA_DIGEST;
}
bool is_omap_digest() {
return flags & object_copy_data_t::FLAG_OMAP_DIGEST;
}
CopyResults()
: object_size(0), started_temp_obj(false),
user_version(0),
should_requeue(false), mirror_snapset(false),
has_omap(false),
flags(0),
source_data_digest(-1), source_omap_digest(-1),
data_digest(-1), omap_digest(-1),
truncate_seq(0), truncate_size(0)
{}
};
struct CopyOp;
typedef std::shared_ptr<CopyOp> CopyOpRef;
struct CopyOp {
CopyCallback *cb;
ObjectContextRef obc;
hobject_t src;
object_locator_t oloc;
unsigned flags;
bool mirror_snapset;
CopyResults results;
ceph_tid_t objecter_tid;
ceph_tid_t objecter_tid2;
object_copy_cursor_t cursor;
std::map<std::string,ceph::buffer::list,std::less<>> attrs;
ceph::buffer::list data;
ceph::buffer::list omap_header;
ceph::buffer::list omap_data;
int rval;
object_copy_cursor_t temp_cursor;
/*
* For CopyOp the process is:
* step1: read the data(attr/omap/data) from the source object
* step2: handle those data(w/ those data create a new object)
* src_obj_fadvise_flags used in step1;
* dest_obj_fadvise_flags used in step2
*/
unsigned src_obj_fadvise_flags;
unsigned dest_obj_fadvise_flags;
std::map<uint64_t, CopyOpRef> chunk_cops;
int num_chunk;
bool failed;
uint64_t start_offset = 0;
uint64_t last_offset = 0;
std::vector<OSDOp> chunk_ops;
CopyOp(CopyCallback *cb_, ObjectContextRef _obc, hobject_t s,
object_locator_t l,
version_t v,
unsigned f,
bool ms,
unsigned src_obj_fadvise_flags,
unsigned dest_obj_fadvise_flags)
: cb(cb_), obc(_obc), src(s), oloc(l), flags(f),
mirror_snapset(ms),
objecter_tid(0),
objecter_tid2(0),
rval(-1),
src_obj_fadvise_flags(src_obj_fadvise_flags),
dest_obj_fadvise_flags(dest_obj_fadvise_flags),
num_chunk(0),
failed(false)
{
results.user_version = v;
results.mirror_snapset = mirror_snapset;
}
};
/**
* The CopyCallback class defines an interface for completions to the
* copy_start code. Users of the copy infrastructure must implement
* one and give an instance of the class to start_copy.
*
* The implementer is responsible for making sure that the CopyCallback
* can associate itself with the correct copy operation.
*/
typedef boost::tuple<int, CopyResults*> CopyCallbackResults;
friend class CopyFromCallback;
friend struct CopyFromFinisher;
friend class PromoteCallback;
friend struct PromoteFinisher;
friend struct C_gather;
struct ProxyReadOp {
OpRequestRef op;
hobject_t soid;
ceph_tid_t objecter_tid;
std::vector<OSDOp> &ops;
version_t user_version;
int data_offset;
bool canceled; ///< true if canceled
ProxyReadOp(OpRequestRef _op, hobject_t oid, std::vector<OSDOp>& _ops)
: op(_op), soid(oid),
objecter_tid(0), ops(_ops),
user_version(0), data_offset(0),
canceled(false) { }
};
typedef std::shared_ptr<ProxyReadOp> ProxyReadOpRef;
struct ProxyWriteOp {
OpContext *ctx;
OpRequestRef op;
hobject_t soid;
ceph_tid_t objecter_tid;
std::vector<OSDOp> &ops;
version_t user_version;
bool sent_reply;
utime_t mtime;
bool canceled;
osd_reqid_t reqid;
ProxyWriteOp(OpRequestRef _op, hobject_t oid, std::vector<OSDOp>& _ops, osd_reqid_t _reqid)
: ctx(NULL), op(_op), soid(oid),
objecter_tid(0), ops(_ops),
user_version(0), sent_reply(false),
canceled(false),
reqid(_reqid) { }
};
typedef std::shared_ptr<ProxyWriteOp> ProxyWriteOpRef;
struct FlushOp {
ObjectContextRef obc; ///< obc we are flushing
OpRequestRef op; ///< initiating op
std::list<OpRequestRef> dup_ops; ///< bandwagon jumpers
version_t flushed_version; ///< user version we are flushing
ceph_tid_t objecter_tid; ///< copy-from request tid
int rval; ///< copy-from result
bool blocking; ///< whether we are blocking updates
bool removal; ///< we are removing the backend object
std::optional<std::function<void()>> on_flush; ///< callback, may be null
// for chunked object
std::map<uint64_t, int> io_results;
std::map<uint64_t, ceph_tid_t> io_tids;
uint64_t chunks;
FlushOp()
: flushed_version(0), objecter_tid(0), rval(0),
blocking(false), removal(false), chunks(0) {}
~FlushOp() { ceph_assert(!on_flush); }
};
typedef std::shared_ptr<FlushOp> FlushOpRef;
struct CLSGatherOp {
OpContext *ctx = nullptr;
ObjectContextRef obc;
OpRequestRef op;
std::vector<ceph_tid_t> objecter_tids;
int rval = 0;
CLSGatherOp(OpContext *ctx_, ObjectContextRef obc_, OpRequestRef op_)
: ctx(ctx_), obc(obc_), op(op_) {}
CLSGatherOp() {}
~CLSGatherOp() {}
};
friend struct RefCountCallback;
struct ManifestOp {
RefCountCallback *cb = nullptr;
ceph_tid_t objecter_tid = 0;
OpRequestRef op;
std::map<uint64_t, int> results;
std::map<uint64_t, ceph_tid_t> tids;
std::map<hobject_t, std::pair<uint64_t, uint64_t>> chunks;
uint64_t num_chunks = 0;
object_manifest_t new_manifest;
ObjectContextRef obc;
ManifestOp(ObjectContextRef obc, RefCountCallback* cb)
: cb(cb), obc(obc) {}
ManifestOp() = delete;
};
typedef std::shared_ptr<ManifestOp> ManifestOpRef;
std::map<hobject_t, ManifestOpRef> manifest_ops;
boost::scoped_ptr<PGBackend> pgbackend;
PGBackend *get_pgbackend() override {
return pgbackend.get();
}
const PGBackend *get_pgbackend() const override {
return pgbackend.get();
}
/// Listener methods
DoutPrefixProvider *get_dpp() override {
return this;
}
void on_local_recover(
const hobject_t &oid,
const ObjectRecoveryInfo &recovery_info,
ObjectContextRef obc,
bool is_delete,
ObjectStore::Transaction *t
) override;
void on_peer_recover(
pg_shard_t peer,
const hobject_t &oid,
const ObjectRecoveryInfo &recovery_info
) override {
recovery_state.on_peer_recover(peer, oid, recovery_info.version);
}
void begin_peer_recover(
pg_shard_t peer,
const hobject_t oid) override {
recovery_state.begin_peer_recover(peer, oid);
}
void on_global_recover(
const hobject_t &oid,
const object_stat_sum_t &stat_diff,
bool is_delete) override;
void on_failed_pull(
const std::set<pg_shard_t> &from,
const hobject_t &soid,
const eversion_t &version) override;
void cancel_pull(const hobject_t &soid) override;
void apply_stats(
const hobject_t &soid,
const object_stat_sum_t &delta_stats) override;
bool primary_error(const hobject_t& soid, eversion_t v);
void remove_missing_object(const hobject_t &oid,
eversion_t v,
Context *on_complete) override;
template<class T> class BlessedGenContext;
template<class T> class UnlockedBlessedGenContext;
class BlessedContext;
Context *bless_context(Context *c) override;
GenContext<ThreadPool::TPHandle&> *bless_gencontext(
GenContext<ThreadPool::TPHandle&> *c) override;
GenContext<ThreadPool::TPHandle&> *bless_unlocked_gencontext(
GenContext<ThreadPool::TPHandle&> *c) override;
void send_message(int to_osd, Message *m) override {
osd->send_message_osd_cluster(to_osd, m, get_osdmap_epoch());
}
void queue_transaction(ObjectStore::Transaction&& t,
OpRequestRef op) override {
osd->store->queue_transaction(ch, std::move(t), op);
}
void queue_transactions(std::vector<ObjectStore::Transaction>& tls,
OpRequestRef op) override {
osd->store->queue_transactions(ch, tls, op, NULL);
}
epoch_t get_interval_start_epoch() const override {
return info.history.same_interval_since;
}
epoch_t get_last_peering_reset_epoch() const override {
return get_last_peering_reset();
}
const std::set<pg_shard_t> &get_acting_recovery_backfill_shards() const override {
return get_acting_recovery_backfill();
}
const std::set<pg_shard_t> &get_acting_shards() const override {
return recovery_state.get_actingset();
}
const std::set<pg_shard_t> &get_backfill_shards() const override {
return get_backfill_targets();
}
std::ostream& gen_dbg_prefix(std::ostream& out) const override {
return gen_prefix(out);
}
const HobjToShardSetMapping& get_missing_loc_shards() const override
{
return recovery_state.get_missing_loc().get_missing_locs();
}
const std::map<pg_shard_t, pg_missing_t> &get_shard_missing() const override {
return recovery_state.get_peer_missing();
}
using PGBackend::Listener::get_shard_missing;
const std::map<pg_shard_t, pg_info_t> &get_shard_info() const override {
return recovery_state.get_peer_info();
}
using PGBackend::Listener::get_shard_info;
const pg_missing_tracker_t &get_local_missing() const override {
return recovery_state.get_pg_log().get_missing();
}
const PGLog &get_log() const override {
return recovery_state.get_pg_log();
}
void add_local_next_event(const pg_log_entry_t& e) override {
recovery_state.add_local_next_event(e);
}
bool pgb_is_primary() const override {
return is_primary();
}
const OSDMapRef& pgb_get_osdmap() const override final {
return get_osdmap();
}
epoch_t pgb_get_osdmap_epoch() const override final {
return get_osdmap_epoch();
}
const pg_info_t &get_info() const override {
return info;
}
const pg_pool_t &get_pool() const override {
return pool.info;
}
ObjectContextRef get_obc(
const hobject_t &hoid,
const std::map<std::string, ceph::buffer::list, std::less<>> &attrs) override {
return get_object_context(hoid, true, &attrs);
}
bool try_lock_for_read(
const hobject_t &hoid,
ObcLockManager &manager) override {
if (is_missing_object(hoid))
return false;
auto obc = get_object_context(hoid, false, nullptr);
if (!obc)
return false;
return manager.try_get_read_lock(hoid, obc);
}
void release_locks(ObcLockManager &manager) override {
release_object_locks(manager);
}
void inc_osd_stat_repaired() override {
osd->inc_osd_stat_repaired();
}
bool pg_is_remote_backfilling() override {
return is_remote_backfilling();
}
void pg_add_local_num_bytes(int64_t num_bytes) override {
add_local_num_bytes(num_bytes);
}
void pg_sub_local_num_bytes(int64_t num_bytes) override {
sub_local_num_bytes(num_bytes);
}
void pg_add_num_bytes(int64_t num_bytes) override {
add_num_bytes(num_bytes);
}
void pg_sub_num_bytes(int64_t num_bytes) override {
sub_num_bytes(num_bytes);
}
void pgb_set_object_snap_mapping(
const hobject_t &soid,
const std::set<snapid_t> &snaps,
ObjectStore::Transaction *t) override {
return update_object_snap_mapping(t, soid, snaps);
}
void pgb_clear_object_snap_mapping(
const hobject_t &soid,
ObjectStore::Transaction *t) override {
return clear_object_snap_mapping(t, soid);
}
void log_operation(
std::vector<pg_log_entry_t>&& logv,
const std::optional<pg_hit_set_history_t> &hset_history,
const eversion_t &trim_to,
const eversion_t &roll_forward_to,
const eversion_t &min_last_complete_ondisk,
bool transaction_applied,
ObjectStore::Transaction &t,
bool async = false) override {
if (is_primary()) {
ceph_assert(trim_to <= recovery_state.get_last_update_ondisk());
}
if (hset_history) {
recovery_state.update_hset(*hset_history);
}
if (transaction_applied) {
update_snap_map(logv, t);
}
auto last = logv.rbegin();
if (is_primary() && last != logv.rend()) {
projected_log.skip_can_rollback_to_to_head();
projected_log.trim(cct, last->version, nullptr, nullptr, nullptr);
}
if (!is_primary() && !is_ec_pg()) {
replica_clear_repop_obc(logv, t);
}
recovery_state.append_log(
std::move(logv), trim_to, roll_forward_to, min_last_complete_ondisk,
t, transaction_applied, async);
}
void replica_clear_repop_obc(
const std::vector<pg_log_entry_t> &logv,
ObjectStore::Transaction &t);
void op_applied(const eversion_t &applied_version) override;
bool should_send_op(
pg_shard_t peer,
const hobject_t &hoid) override;
bool pg_is_undersized() const override {
return is_undersized();
}
bool pg_is_repair() const override {
return is_repair();
}
void update_peer_last_complete_ondisk(
pg_shard_t fromosd,
eversion_t lcod) override {
recovery_state.update_peer_last_complete_ondisk(fromosd, lcod);
}
void update_last_complete_ondisk(
eversion_t lcod) override {
recovery_state.update_last_complete_ondisk(lcod);
}
void update_stats(
const pg_stat_t &stat) override {
recovery_state.update_stats(
[&stat](auto &history, auto &stats) {
stats = stat;
return false;
});
}
void schedule_recovery_work(
GenContext<ThreadPool::TPHandle&> *c,
uint64_t cost) override;
pg_shard_t whoami_shard() const override {
return pg_whoami;
}
spg_t primary_spg_t() const override {
return spg_t(info.pgid.pgid, get_primary().shard);
}
pg_shard_t primary_shard() const override {
return get_primary();
}
uint64_t min_peer_features() const override {
return recovery_state.get_min_peer_features();
}
uint64_t min_upacting_features() const override {
return recovery_state.get_min_upacting_features();
}
void send_message_osd_cluster(
int peer, Message *m, epoch_t from_epoch) override {
osd->send_message_osd_cluster(peer, m, from_epoch);
}
void send_message_osd_cluster(
std::vector<std::pair<int, Message*>>& messages, epoch_t from_epoch) override {
osd->send_message_osd_cluster(messages, from_epoch);
}
void send_message_osd_cluster(
MessageRef m, Connection *con) override {
osd->send_message_osd_cluster(std::move(m), con);
}
void send_message_osd_cluster(
Message *m, const ConnectionRef& con) override {
osd->send_message_osd_cluster(m, con);
}
ConnectionRef get_con_osd_cluster(int peer, epoch_t from_epoch) override;
entity_name_t get_cluster_msgr_name() override {
return osd->get_cluster_msgr_name();
}
PerfCounters *get_logger() override;
ceph_tid_t get_tid() override { return osd->get_tid(); }
OstreamTemp clog_error() override { return osd->clog->error(); }
OstreamTemp clog_warn() override { return osd->clog->warn(); }
/**
* a scrub-map arrived from a replica
*/
void do_replica_scrub_map(OpRequestRef op);
struct watch_disconnect_t {
uint64_t cookie;
entity_name_t name;
bool send_disconnect;
watch_disconnect_t(uint64_t c, entity_name_t n, bool sd)
: cookie(c), name(n), send_disconnect(sd) {}
};
void complete_disconnect_watches(
ObjectContextRef obc,
const std::list<watch_disconnect_t> &to_disconnect);
struct OpFinisher {
virtual ~OpFinisher() {
}
virtual int execute() = 0;
};
/*
* Capture all object state associated with an in-progress read or write.
*/
struct OpContext {
OpRequestRef op;
osd_reqid_t reqid;
std::vector<OSDOp> *ops;
const ObjectState *obs; // Old objectstate
const SnapSet *snapset; // Old snapset
ObjectState new_obs; // resulting ObjectState
SnapSet new_snapset; // resulting SnapSet (in case of a write)
//pg_stat_t new_stats; // resulting Stats
object_stat_sum_t delta_stats;
bool modify; // (force) modification (even if op_t is empty)
bool user_modify; // user-visible modification
bool undirty; // user explicitly un-dirtying this object
bool cache_operation; ///< true if this is a cache eviction
bool ignore_cache; ///< true if IGNORE_CACHE flag is std::set
bool ignore_log_op_stats; // don't log op stats
bool update_log_only; ///< this is a write that returned an error - just record in pg log for dup detection
ObjectCleanRegions clean_regions;
// side effects
std::list<std::pair<watch_info_t,bool> > watch_connects; ///< new watch + will_ping flag
std::list<watch_disconnect_t> watch_disconnects; ///< old watch + send_discon
std::list<notify_info_t> notifies;
struct NotifyAck {
std::optional<uint64_t> watch_cookie;
uint64_t notify_id;
ceph::buffer::list reply_bl;
explicit NotifyAck(uint64_t notify_id) : notify_id(notify_id) {}
NotifyAck(uint64_t notify_id, uint64_t cookie, ceph::buffer::list& rbl)
: watch_cookie(cookie), notify_id(notify_id) {
reply_bl = std::move(rbl);
}
};
std::list<NotifyAck> notify_acks;
uint64_t bytes_written, bytes_read;
utime_t mtime;
SnapContext snapc; // writer snap context
eversion_t at_version; // pg's current version pointer
version_t user_at_version; // pg's current user version pointer
/// index of the current subop - only valid inside of do_osd_ops()
int current_osd_subop_num;
/// total number of subops processed in this context for cls_cxx_subop_version()
int processed_subop_count = 0;
PGTransactionUPtr op_t;
std::vector<pg_log_entry_t> log;
std::optional<pg_hit_set_history_t> updated_hset_history;
interval_set<uint64_t> modified_ranges;
ObjectContextRef obc;
ObjectContextRef clone_obc; // if we created a clone
ObjectContextRef head_obc; // if we also update snapset (see trim_object)
// FIXME: we may want to kill this msgr hint off at some point!
std::optional<int> data_off = std::nullopt;
MOSDOpReply *reply;
PrimaryLogPG *pg;
int num_read; ///< count read ops
int num_write; ///< count update ops
mempool::osd_pglog::vector<std::pair<osd_reqid_t, version_t> > extra_reqids;
mempool::osd_pglog::map<uint32_t, int> extra_reqid_return_codes;
hobject_t new_temp_oid, discard_temp_oid; ///< temp objects we should start/stop tracking
std::list<std::function<void()>> on_applied;
std::list<std::function<void()>> on_committed;
std::list<std::function<void()>> on_finish;
std::list<std::function<void()>> on_success;
template <typename F>
void register_on_finish(F &&f) {
on_finish.emplace_back(std::forward<F>(f));
}
template <typename F>
void register_on_success(F &&f) {
on_success.emplace_back(std::forward<F>(f));
}
template <typename F>
void register_on_applied(F &&f) {
on_applied.emplace_back(std::forward<F>(f));
}
template <typename F>
void register_on_commit(F &&f) {
on_committed.emplace_back(std::forward<F>(f));
}
bool sent_reply = false;
// pending async reads <off, len, op_flags> -> <outbl, outr>
std::list<std::pair<boost::tuple<uint64_t, uint64_t, unsigned>,
std::pair<ceph::buffer::list*, Context*> > > pending_async_reads;
int inflightreads;
friend struct OnReadComplete;
void start_async_reads(PrimaryLogPG *pg);
void finish_read(PrimaryLogPG *pg);
bool async_reads_complete() {
return inflightreads == 0;
}
RWState::State lock_type;
ObcLockManager lock_manager;
std::map<int, std::unique_ptr<OpFinisher>> op_finishers;
OpContext(const OpContext& other);
const OpContext& operator=(const OpContext& other);
OpContext(OpRequestRef _op, osd_reqid_t _reqid, std::vector<OSDOp>* _ops,
ObjectContextRef& obc,
PrimaryLogPG *_pg) :
op(_op), reqid(_reqid), ops(_ops),
obs(&obc->obs),
snapset(0),
new_obs(obs->oi, obs->exists),
modify(false), user_modify(false), undirty(false), cache_operation(false),
ignore_cache(false), ignore_log_op_stats(false), update_log_only(false),
bytes_written(0), bytes_read(0), user_at_version(0),
current_osd_subop_num(0),
obc(obc),
reply(NULL), pg(_pg),
num_read(0),
num_write(0),
sent_reply(false),
inflightreads(0),
lock_type(RWState::RWNONE) {
if (obc->ssc) {
new_snapset = obc->ssc->snapset;
snapset = &obc->ssc->snapset;
}
}
OpContext(OpRequestRef _op, osd_reqid_t _reqid,
std::vector<OSDOp>* _ops, PrimaryLogPG *_pg) :
op(_op), reqid(_reqid), ops(_ops), obs(NULL), snapset(0),
modify(false), user_modify(false), undirty(false), cache_operation(false),
ignore_cache(false), ignore_log_op_stats(false), update_log_only(false),
bytes_written(0), bytes_read(0), user_at_version(0),
current_osd_subop_num(0),
reply(NULL), pg(_pg),
num_read(0),
num_write(0),
inflightreads(0),
lock_type(RWState::RWNONE) {}
void reset_obs(ObjectContextRef obc) {
new_obs = ObjectState(obc->obs.oi, obc->obs.exists);
if (obc->ssc) {
new_snapset = obc->ssc->snapset;
snapset = &obc->ssc->snapset;
}
}
~OpContext() {
ceph_assert(!op_t);
if (reply)
reply->put();
for (std::list<std::pair<boost::tuple<uint64_t, uint64_t, unsigned>,
std::pair<ceph::buffer::list*, Context*> > >::iterator i =
pending_async_reads.begin();
i != pending_async_reads.end();
pending_async_reads.erase(i++)) {
delete i->second.second;
}
}
uint64_t get_features() {
if (op && op->get_req()) {
return op->get_req()->get_connection()->get_features();
}
return -1ull;
}
};
using OpContextUPtr = std::unique_ptr<OpContext>;
friend struct OpContext;
/*
* State on the PG primary associated with the replicated mutation
*/
class RepGather {
public:
hobject_t hoid;
OpRequestRef op;
xlist<RepGather*>::item queue_item;
int nref;
eversion_t v;
int r = 0;
ceph_tid_t rep_tid;
bool rep_aborted;
bool all_committed;
utime_t start;
eversion_t pg_local_last_complete;
ObcLockManager lock_manager;
std::list<std::function<void()>> on_committed;
std::list<std::function<void()>> on_success;
std::list<std::function<void()>> on_finish;
RepGather(
OpContext *c, ceph_tid_t rt,
eversion_t lc) :
hoid(c->obc->obs.oi.soid),
op(c->op),
queue_item(this),
nref(1),
rep_tid(rt),
rep_aborted(false),
all_committed(false),
pg_local_last_complete(lc),
lock_manager(std::move(c->lock_manager)),
on_committed(std::move(c->on_committed)),
on_success(std::move(c->on_success)),
on_finish(std::move(c->on_finish)) {}
RepGather(
ObcLockManager &&manager,
OpRequestRef &&o,
std::optional<std::function<void(void)> > &&on_complete,
ceph_tid_t rt,
eversion_t lc,
int r) :
op(o),
queue_item(this),
nref(1),
r(r),
rep_tid(rt),
rep_aborted(false),
all_committed(false),
pg_local_last_complete(lc),
lock_manager(std::move(manager)) {
if (on_complete) {
on_success.push_back(std::move(*on_complete));
}
}
RepGather *get() {
nref++;
return this;
}
void put() {
ceph_assert(nref > 0);
if (--nref == 0) {
delete this;
//generic_dout(0) << "deleting " << this << dendl;
}
}
};
protected:
/**
* Grabs locks for OpContext, should be cleaned up in close_op_ctx
*
* @param ctx [in,out] ctx to get locks for
* @return true on success, false if we are queued
*/
bool get_rw_locks(bool write_ordered, OpContext *ctx);
/**
* Cleans up OpContext
*
* @param ctx [in] ctx to clean up
*/
void close_op_ctx(OpContext *ctx);
/**
* Releases locks
*
* @param manager [in] manager with locks to release
*
* (moved to .cc due to scrubber access)
*/
void release_object_locks(ObcLockManager &lock_manager);
// replica ops
// [primary|tail]
xlist<RepGather*> repop_queue;
friend class C_OSD_RepopCommit;
void repop_all_committed(RepGather *repop);
void eval_repop(RepGather*);
void issue_repop(RepGather *repop, OpContext *ctx);
RepGather *new_repop(
OpContext *ctx,
ceph_tid_t rep_tid);
boost::intrusive_ptr<RepGather> new_repop(
eversion_t version,
int r,
ObcLockManager &&manager,
OpRequestRef &&op,
std::optional<std::function<void(void)> > &&on_complete);
void remove_repop(RepGather *repop);
OpContextUPtr simple_opc_create(ObjectContextRef obc);
void simple_opc_submit(OpContextUPtr ctx);
/**
* Merge entries atomically into all acting_recovery_backfill osds
* adjusting missing and recovery state as necessary.
*
* Also used to store error log entries for dup detection.
*/
void submit_log_entries(
const mempool::osd_pglog::list<pg_log_entry_t> &entries,
ObcLockManager &&manager,
std::optional<std::function<void(void)> > &&on_complete,
OpRequestRef op = OpRequestRef(),
int r = 0);
struct LogUpdateCtx {
boost::intrusive_ptr<RepGather> repop;
std::set<pg_shard_t> waiting_on;
};
void cancel_log_updates();
std::map<ceph_tid_t, LogUpdateCtx> log_entry_update_waiting_on;
// hot/cold tracking
HitSetRef hit_set; ///< currently accumulating HitSet
utime_t hit_set_start_stamp; ///< time the current HitSet started recording
void hit_set_clear(); ///< discard any HitSet state
void hit_set_setup(); ///< initialize HitSet state
void hit_set_create(); ///< create a new HitSet
void hit_set_persist(); ///< persist hit info
bool hit_set_apply_log(); ///< apply log entries to update in-memory HitSet
void hit_set_trim(OpContextUPtr &ctx, unsigned max); ///< discard old HitSets
void hit_set_in_memory_trim(uint32_t max_in_memory); ///< discard old in memory HitSets
void hit_set_remove_all();
hobject_t get_hit_set_current_object(utime_t stamp);
hobject_t get_hit_set_archive_object(utime_t start,
utime_t end,
bool using_gmt);
// agent
boost::scoped_ptr<TierAgentState> agent_state;
void agent_setup(); ///< initialize agent state
bool agent_work(int max) override ///< entry point to do some agent work
{
return agent_work(max, max);
}
bool agent_work(int max, int agent_flush_quota) override;
bool agent_maybe_flush(ObjectContextRef& obc); ///< maybe flush
bool agent_maybe_evict(ObjectContextRef& obc, bool after_flush); ///< maybe evict
void agent_load_hit_sets(); ///< load HitSets, if needed
/// estimate object atime and temperature
///
/// @param oid [in] object name
/// @param temperature [out] relative temperature (# consider both access time and frequency)
void agent_estimate_temp(const hobject_t& oid, int *temperature);
/// stop the agent
void agent_stop() override;
void agent_delay() override;
/// clear agent state
void agent_clear() override;
/// choose (new) agent mode(s), returns true if op is requeued
bool agent_choose_mode(bool restart = false, OpRequestRef op = OpRequestRef());
void agent_choose_mode_restart() override;
/// true if we can send an ondisk/commit for v
bool already_complete(eversion_t v);
// projected object info
SharedLRU<hobject_t, ObjectContext> object_contexts;
// std::map from oid.snapdir() to SnapSetContext *
std::map<hobject_t, SnapSetContext*> snapset_contexts;
ceph::mutex snapset_contexts_lock =
ceph::make_mutex("PrimaryLogPG::snapset_contexts_lock");
// debug order that client ops are applied
std::map<hobject_t, std::map<client_t, ceph_tid_t>> debug_op_order;
void populate_obc_watchers(ObjectContextRef obc);
void check_blocklisted_obc_watchers(ObjectContextRef obc);
void check_blocklisted_watchers() override;
void get_watchers(std::list<obj_watch_item_t> *ls) override;
void get_obc_watchers(ObjectContextRef obc, std::list<obj_watch_item_t> &pg_watchers);
public:
void handle_watch_timeout(WatchRef watch);
protected:
ObjectContextRef create_object_context(const object_info_t& oi, SnapSetContext *ssc);
ObjectContextRef get_object_context(
const hobject_t& soid,
bool can_create,
const std::map<std::string, ceph::buffer::list, std::less<>> *attrs = 0
);
void context_registry_on_change();
void object_context_destructor_callback(ObjectContext *obc);
class C_PG_ObjectContext;
int find_object_context(const hobject_t& oid,
ObjectContextRef *pobc,
bool can_create,
bool map_snapid_to_clone=false,
hobject_t *missing_oid=NULL);
void add_object_context_to_pg_stat(ObjectContextRef obc, pg_stat_t *stat);
void get_src_oloc(const object_t& oid, const object_locator_t& oloc, object_locator_t& src_oloc);
SnapSetContext *get_snapset_context(
const hobject_t& oid,
bool can_create,
const std::map<std::string, ceph::buffer::list, std::less<>> *attrs = 0,
bool oid_existed = true //indicate this oid whether exsited in backend
);
void register_snapset_context(SnapSetContext *ssc) {
std::lock_guard l(snapset_contexts_lock);
_register_snapset_context(ssc);
}
void _register_snapset_context(SnapSetContext *ssc) {
ceph_assert(ceph_mutex_is_locked(snapset_contexts_lock));
if (!ssc->registered) {
ceph_assert(snapset_contexts.count(ssc->oid) == 0);
ssc->registered = true;
snapset_contexts[ssc->oid] = ssc;
}
}
void put_snapset_context(SnapSetContext *ssc);
std::map<hobject_t, ObjectContextRef> recovering;
/*
* Backfill
*
* peer_info[backfill_target].last_backfill == info.last_backfill on the peer.
*
* objects prior to peer_info[backfill_target].last_backfill
* - are on the peer
* - are included in the peer stats
*
* objects \in (last_backfill, last_backfill_started]
* - are on the peer or are in backfills_in_flight
* - are not included in pg stats (yet)
* - have their stats in pending_backfill_updates on the primary
*/
std::set<hobject_t> backfills_in_flight;
std::map<hobject_t, pg_stat_t> pending_backfill_updates;
void dump_recovery_info(ceph::Formatter *f) const override {
f->open_array_section("waiting_on_backfill");
for (std::set<pg_shard_t>::const_iterator p = waiting_on_backfill.begin();
p != waiting_on_backfill.end(); ++p)
f->dump_stream("osd") << *p;
f->close_section();
f->dump_stream("last_backfill_started") << last_backfill_started;
{
f->open_object_section("backfill_info");
backfill_info.dump(f);
f->close_section();
}
{
f->open_array_section("peer_backfill_info");
for (std::map<pg_shard_t, BackfillInterval>::const_iterator pbi =
peer_backfill_info.begin();
pbi != peer_backfill_info.end(); ++pbi) {
f->dump_stream("osd") << pbi->first;
f->open_object_section("BackfillInterval");
pbi->second.dump(f);
f->close_section();
}
f->close_section();
}
{
f->open_array_section("backfills_in_flight");
for (std::set<hobject_t>::const_iterator i = backfills_in_flight.begin();
i != backfills_in_flight.end();
++i) {
f->dump_stream("object") << *i;
}
f->close_section();
}
{
f->open_array_section("recovering");
for (std::map<hobject_t, ObjectContextRef>::const_iterator i = recovering.begin();
i != recovering.end();
++i) {
f->dump_stream("object") << i->first;
}
f->close_section();
}
{
f->open_object_section("pg_backend");
pgbackend->dump_recovery_info(f);
f->close_section();
}
}
/// last backfill operation started
hobject_t last_backfill_started;
bool new_backfill;
int prep_object_replica_pushes(const hobject_t& soid, eversion_t v,
PGBackend::RecoveryHandle *h,
bool *work_started);
int prep_object_replica_deletes(const hobject_t& soid, eversion_t v,
PGBackend::RecoveryHandle *h,
bool *work_started);
void finish_degraded_object(const hobject_t oid);
// Cancels/resets pulls from peer
void check_recovery_sources(const OSDMapRef& map) override ;
int recover_missing(
const hobject_t& oid,
eversion_t v,
int priority,
PGBackend::RecoveryHandle *h);
// low level ops
void _make_clone(
OpContext *ctx,
PGTransaction* t,
ObjectContextRef clone_obc,
const hobject_t& head, const hobject_t& coid,
object_info_t *poi);
void execute_ctx(OpContext *ctx);
void finish_ctx(OpContext *ctx, int log_op_type, int result=0);
void reply_ctx(OpContext *ctx, int err);
void make_writeable(OpContext *ctx);
void log_op_stats(const OpRequest& op, uint64_t inb, uint64_t outb);
void write_update_size_and_usage(object_stat_sum_t& stats, object_info_t& oi,
interval_set<uint64_t>& modified, uint64_t offset,
uint64_t length, bool write_full=false);
inline void truncate_update_size_and_usage(
object_stat_sum_t& delta_stats,
object_info_t& oi,
uint64_t truncate_size);
enum class cache_result_t {
NOOP,
BLOCKED_FULL,
BLOCKED_PROMOTE,
HANDLED_PROXY,
HANDLED_REDIRECT,
REPLIED_WITH_EAGAIN,
BLOCKED_RECOVERY,
};
cache_result_t maybe_handle_cache_detail(OpRequestRef op,
bool write_ordered,
ObjectContextRef obc, int r,
hobject_t missing_oid,
bool must_promote,
bool in_hit_set,
ObjectContextRef *promote_obc);
cache_result_t maybe_handle_manifest_detail(OpRequestRef op,
bool write_ordered,
ObjectContextRef obc);
bool maybe_handle_manifest(OpRequestRef op,
bool write_ordered,
ObjectContextRef obc) {
return cache_result_t::NOOP != maybe_handle_manifest_detail(
op,
write_ordered,
obc);
}
/**
* This helper function is called from do_op if the ObjectContext lookup fails.
* @returns true if the caching code is handling the Op, false otherwise.
*/
bool maybe_handle_cache(OpRequestRef op,
bool write_ordered,
ObjectContextRef obc, int r,
const hobject_t& missing_oid,
bool must_promote,
bool in_hit_set = false) {
return cache_result_t::NOOP != maybe_handle_cache_detail(
op,
write_ordered,
obc,
r,
missing_oid,
must_promote,
in_hit_set,
nullptr);
}
/**
* This helper function checks if a promotion is needed.
*/
bool maybe_promote(ObjectContextRef obc,
const hobject_t& missing_oid,
const object_locator_t& oloc,
bool in_hit_set,
uint32_t recency,
OpRequestRef promote_op,
ObjectContextRef *promote_obc = nullptr);
/**
* This helper function tells the client to redirect their request elsewhere.
*/
void do_cache_redirect(OpRequestRef op);
/**
* This function attempts to start a promote. Either it succeeds,
* or places op on a wait std::list. If op is null, failure means that
* this is a noop. If a future user wants to be able to distinguish
* these cases, a return value should be added.
*/
void promote_object(
ObjectContextRef obc, ///< [optional] obc
const hobject_t& missing_object, ///< oid (if !obc)
const object_locator_t& oloc, ///< locator for obc|oid
OpRequestRef op, ///< [optional] client op
ObjectContextRef *promote_obc = nullptr ///< [optional] new obc for object
);
int prepare_transaction(OpContext *ctx);
std::list<std::pair<OpRequestRef, OpContext*> > in_progress_async_reads;
void complete_read_ctx(int result, OpContext *ctx);
// pg on-disk content
void check_local() override;
void _clear_recovery_state() override;
bool start_recovery_ops(
uint64_t max,
ThreadPool::TPHandle &handle, uint64_t *started) override;
uint64_t recover_primary(uint64_t max, ThreadPool::TPHandle &handle);
uint64_t recover_replicas(uint64_t max, ThreadPool::TPHandle &handle,
bool *recovery_started);
hobject_t earliest_peer_backfill() const;
bool all_peer_done() const;
/**
* @param work_started will be std::set to true if recover_backfill got anywhere
* @returns the number of operations started
*/
uint64_t recover_backfill(uint64_t max, ThreadPool::TPHandle &handle,
bool *work_started);
/**
* scan a (hash) range of objects in the current pg
*
* @min return at least this many items, unless we are done
* @max return no more than this many items
* @bi.begin first item should be >= this value
* @bi [out] resulting std::map of objects to eversion_t's
*/
void scan_range(
int min, int max, BackfillInterval *bi,
ThreadPool::TPHandle &handle
);
/// Update a hash range to reflect changes since the last scan
void update_range(
BackfillInterval *bi, ///< [in,out] interval to update
ThreadPool::TPHandle &handle ///< [in] tp handle
);
int prep_backfill_object_push(
hobject_t oid, eversion_t v, ObjectContextRef obc,
std::vector<pg_shard_t> peers,
PGBackend::RecoveryHandle *h);
void send_remove_op(const hobject_t& oid, eversion_t v, pg_shard_t peer);
class C_OSD_AppliedRecoveredObject;
class C_OSD_CommittedPushedObject;
class C_OSD_AppliedRecoveredObjectReplica;
void _applied_recovered_object(ObjectContextRef obc);
void _applied_recovered_object_replica();
void _committed_pushed_object(epoch_t epoch, eversion_t lc);
void recover_got(hobject_t oid, eversion_t v);
// -- copyfrom --
std::map<hobject_t, CopyOpRef> copy_ops;
int do_copy_get(OpContext *ctx, ceph::buffer::list::const_iterator& bp, OSDOp& op,
ObjectContextRef& obc);
int finish_copy_get();
void fill_in_copy_get_noent(OpRequestRef& op, hobject_t oid,
OSDOp& osd_op);
/**
* To copy an object, call start_copy.
*
* @param cb: The CopyCallback to be activated when the copy is complete
* @param obc: The ObjectContext we are copying into
* @param src: The source object
* @param oloc: the source object locator
* @param version: the version of the source object to copy (0 for any)
*/
void start_copy(CopyCallback *cb, ObjectContextRef obc, hobject_t src,
object_locator_t oloc, version_t version, unsigned flags,
bool mirror_snapset, unsigned src_obj_fadvise_flags,
unsigned dest_obj_fadvise_flags);
void process_copy_chunk(hobject_t oid, ceph_tid_t tid, int r);
void _write_copy_chunk(CopyOpRef cop, PGTransaction *t);
uint64_t get_copy_chunk_size() const {
uint64_t size = cct->_conf->osd_copyfrom_max_chunk;
if (pool.info.required_alignment()) {
uint64_t alignment = pool.info.required_alignment();
if (size % alignment) {
size += alignment - (size % alignment);
}
}
return size;
}
void _copy_some(ObjectContextRef obc, CopyOpRef cop);
void finish_copyfrom(CopyFromCallback *cb);
void finish_promote(int r, CopyResults *results, ObjectContextRef obc);
void cancel_copy(CopyOpRef cop, bool requeue, std::vector<ceph_tid_t> *tids);
void cancel_copy_ops(bool requeue, std::vector<ceph_tid_t> *tids);
friend struct C_Copyfrom;
// -- flush --
std::map<hobject_t, FlushOpRef> flush_ops;
/// start_flush takes ownership of on_flush iff ret == -EINPROGRESS
int start_flush(
OpRequestRef op, ObjectContextRef obc,
bool blocking, hobject_t *pmissing,
std::optional<std::function<void()>> &&on_flush,
bool force_dedup = false);
void finish_flush(hobject_t oid, ceph_tid_t tid, int r);
int try_flush_mark_clean(FlushOpRef fop);
void cancel_flush(FlushOpRef fop, bool requeue, std::vector<ceph_tid_t> *tids);
void cancel_flush_ops(bool requeue, std::vector<ceph_tid_t> *tids);
/// @return false if clone is has been evicted
bool is_present_clone(hobject_t coid);
friend struct C_Flush;
// -- cls_gather --
std::map<hobject_t, CLSGatherOp> cls_gather_ops;
void cancel_cls_gather(std::map<hobject_t,CLSGatherOp>::iterator iter, bool requeue, std::vector<ceph_tid_t> *tids);
void cancel_cls_gather_ops(bool requeue, std::vector<ceph_tid_t> *tids);
// -- scrub --
bool _range_available_for_scrub(
const hobject_t &begin, const hobject_t &end) override;
void _split_into(pg_t child_pgid, PG *child,
unsigned split_bits) override;
void apply_and_flush_repops(bool requeue);
int do_xattr_cmp_u64(int op, uint64_t v1, ceph::buffer::list& xattr);
int do_xattr_cmp_str(int op, std::string& v1s, ceph::buffer::list& xattr);
// -- checksum --
int do_checksum(OpContext *ctx, OSDOp& osd_op, ceph::buffer::list::const_iterator *bl_it);
int finish_checksum(OSDOp& osd_op, Checksummer::CSumType csum_type,
ceph::buffer::list::const_iterator *init_value_bl_it,
const ceph::buffer::list &read_bl);
friend struct C_ChecksumRead;
int do_extent_cmp(OpContext *ctx, OSDOp& osd_op);
int finish_extent_cmp(OSDOp& osd_op, const ceph::buffer::list &read_bl);
friend struct C_ExtentCmpRead;
int do_read(OpContext *ctx, OSDOp& osd_op);
int do_sparse_read(OpContext *ctx, OSDOp& osd_op);
int do_writesame(OpContext *ctx, OSDOp& osd_op);
bool pgls_filter(const PGLSFilter& filter, const hobject_t& sobj);
std::pair<int, std::unique_ptr<const PGLSFilter>> get_pgls_filter(
ceph::buffer::list::const_iterator& iter);
std::map<hobject_t, std::list<OpRequestRef>> in_progress_proxy_ops;
void kick_proxy_ops_blocked(hobject_t& soid);
void cancel_proxy_ops(bool requeue, std::vector<ceph_tid_t> *tids);
// -- proxyread --
std::map<ceph_tid_t, ProxyReadOpRef> proxyread_ops;
void do_proxy_read(OpRequestRef op, ObjectContextRef obc = NULL);
void finish_proxy_read(hobject_t oid, ceph_tid_t tid, int r);
void cancel_proxy_read(ProxyReadOpRef prdop, std::vector<ceph_tid_t> *tids);
friend struct C_ProxyRead;
// -- proxywrite --
std::map<ceph_tid_t, ProxyWriteOpRef> proxywrite_ops;
void do_proxy_write(OpRequestRef op, ObjectContextRef obc = NULL);
void finish_proxy_write(hobject_t oid, ceph_tid_t tid, int r);
void cancel_proxy_write(ProxyWriteOpRef pwop, std::vector<ceph_tid_t> *tids);
friend struct C_ProxyWrite_Commit;
// -- chunkop --
enum class refcount_t {
INCREMENT_REF,
DECREMENT_REF,
CREATE_OR_GET_REF,
};
void do_proxy_chunked_op(OpRequestRef op, const hobject_t& missing_oid,
ObjectContextRef obc, bool write_ordered);
void do_proxy_chunked_read(OpRequestRef op, ObjectContextRef obc, int op_index,
uint64_t chunk_index, uint64_t req_offset, uint64_t req_length,
uint64_t req_total_len, bool write_ordered);
bool can_proxy_chunked_read(OpRequestRef op, ObjectContextRef obc);
void _copy_some_manifest(ObjectContextRef obc, CopyOpRef cop, uint64_t start_offset);
void process_copy_chunk_manifest(hobject_t oid, ceph_tid_t tid, int r, uint64_t offset);
void finish_promote_manifest(int r, CopyResults *results, ObjectContextRef obc);
void cancel_and_requeue_proxy_ops(hobject_t oid);
void cancel_manifest_ops(bool requeue, std::vector<ceph_tid_t> *tids);
ceph_tid_t refcount_manifest(hobject_t src_soid, hobject_t tgt_soid, refcount_t type,
Context *cb, std::optional<bufferlist> chunk);
void dec_all_refcount_manifest(const object_info_t& oi, OpContext* ctx);
void dec_refcount(const hobject_t& soid, const object_ref_delta_t& refs);
void update_chunk_map_by_dirty(OpContext* ctx);
void dec_refcount_by_dirty(OpContext* ctx);
ObjectContextRef get_prev_clone_obc(ObjectContextRef obc);
bool recover_adjacent_clones(ObjectContextRef obc, OpRequestRef op);
void get_adjacent_clones(ObjectContextRef src_obc,
ObjectContextRef& _l, ObjectContextRef& _g);
bool inc_refcount_by_set(OpContext* ctx, object_manifest_t& tgt,
OSDOp& osd_op);
int do_cdc(const object_info_t& oi, std::map<uint64_t, chunk_info_t>& chunk_map,
std::map<uint64_t, bufferlist>& chunks);
int start_dedup(OpRequestRef op, ObjectContextRef obc);
std::pair<int, hobject_t> get_fpoid_from_chunk(const hobject_t soid, bufferlist& chunk);
int finish_set_dedup(hobject_t oid, int r, ceph_tid_t tid, uint64_t offset);
int finish_set_manifest_refcount(hobject_t oid, int r, ceph_tid_t tid, uint64_t offset);
friend struct C_ProxyChunkRead;
friend class PromoteManifestCallback;
friend struct C_CopyChunk;
friend struct RefCountCallback;
friend struct C_SetDedupChunks;
friend struct C_SetManifestRefCountDone;
friend struct SetManifestFinisher;
public:
PrimaryLogPG(OSDService *o, OSDMapRef curmap,
const PGPool &_pool,
const std::map<std::string,std::string>& ec_profile,
spg_t p);
~PrimaryLogPG() override;
void do_command(
const std::string_view& prefix,
const cmdmap_t& cmdmap,
const ceph::buffer::list& idata,
std::function<void(int,const std::string&,ceph::buffer::list&)> on_finish) override;
void clear_cache() override;
int get_cache_obj_count() override {
return object_contexts.get_count();
}
unsigned get_pg_shard() const {
return info.pgid.hash_to_shard(osd->get_num_shards());
}
void do_request(
OpRequestRef& op,
ThreadPool::TPHandle &handle) override;
void do_op(OpRequestRef& op);
void record_write_error(OpRequestRef op, const hobject_t &soid,
MOSDOpReply *orig_reply, int r,
OpContext *ctx_for_op_returns=nullptr);
void do_pg_op(OpRequestRef op);
void do_scan(
OpRequestRef op,
ThreadPool::TPHandle &handle);
void do_backfill(OpRequestRef op);
void do_backfill_remove(OpRequestRef op);
void handle_backoff(OpRequestRef& op);
int trim_object(bool first, const hobject_t &coid, snapid_t snap_to_trim,
OpContextUPtr *ctxp);
void snap_trimmer(epoch_t e) override;
void kick_snap_trim() override;
void snap_trimmer_scrub_complete() override;
int do_osd_ops(OpContext *ctx, std::vector<OSDOp>& ops);
int _get_tmap(OpContext *ctx, ceph::buffer::list *header, ceph::buffer::list *vals);
int do_tmap2omap(OpContext *ctx, unsigned flags);
int do_tmapup(OpContext *ctx, ceph::buffer::list::const_iterator& bp, OSDOp& osd_op);
int do_tmapup_slow(OpContext *ctx, ceph::buffer::list::const_iterator& bp, OSDOp& osd_op, ceph::buffer::list& bl);
void do_osd_op_effects(OpContext *ctx, const ConnectionRef& conn);
int start_cls_gather(OpContext *ctx, std::map<std::string, bufferlist> *src_objs, const std::string& pool,
const char *cls, const char *method, bufferlist& inbl);
private:
int do_scrub_ls(const MOSDOp *op, OSDOp *osd_op);
bool check_src_targ(const hobject_t& soid, const hobject_t& toid) const;
uint64_t temp_seq; ///< last id for naming temp objects
/// generate a new temp object name
hobject_t generate_temp_object(const hobject_t& target);
/// generate a new temp object name (for recovery)
hobject_t get_temp_recovery_object(const hobject_t& target,
eversion_t version) override;
public:
coll_t get_coll() {
return coll;
}
void split_colls(
spg_t child,
int split_bits,
int seed,
const pg_pool_t *pool,
ObjectStore::Transaction &t) override {
coll_t target = coll_t(child);
create_pg_collection(t, child, split_bits);
t.split_collection(
coll,
split_bits,
seed,
target);
init_pg_ondisk(t, child, pool);
}
private:
struct DoSnapWork : boost::statechart::event< DoSnapWork > {
DoSnapWork() : boost::statechart::event < DoSnapWork >() {}
};
struct KickTrim : boost::statechart::event< KickTrim > {
KickTrim() : boost::statechart::event < KickTrim >() {}
};
struct RepopsComplete : boost::statechart::event< RepopsComplete > {
RepopsComplete() : boost::statechart::event < RepopsComplete >() {}
};
struct ScrubComplete : boost::statechart::event< ScrubComplete > {
ScrubComplete() : boost::statechart::event < ScrubComplete >() {}
};
struct TrimWriteUnblocked : boost::statechart::event< TrimWriteUnblocked > {
TrimWriteUnblocked() : boost::statechart::event < TrimWriteUnblocked >() {}
};
struct Reset : boost::statechart::event< Reset > {
Reset() : boost::statechart::event< Reset >() {}
};
struct SnapTrimReserved : boost::statechart::event< SnapTrimReserved > {
SnapTrimReserved() : boost::statechart::event< SnapTrimReserved >() {}
};
struct SnapTrimTimerReady : boost::statechart::event< SnapTrimTimerReady > {
SnapTrimTimerReady() : boost::statechart::event< SnapTrimTimerReady >() {}
};
struct NotTrimming;
struct SnapTrimmer : public boost::statechart::state_machine< SnapTrimmer, NotTrimming > {
PrimaryLogPG *pg;
explicit SnapTrimmer(PrimaryLogPG *pg) : pg(pg) {}
void log_enter(const char *state_name);
void log_exit(const char *state_name, utime_t duration);
bool permit_trim();
bool can_trim() {
return
permit_trim() &&
!pg->get_osdmap()->test_flag(CEPH_OSDMAP_NOSNAPTRIM);
}
} snap_trimmer_machine;
struct WaitReservation;
struct Trimming : boost::statechart::state< Trimming, SnapTrimmer, WaitReservation >, NamedState {
typedef boost::mpl::list <
boost::statechart::custom_reaction< KickTrim >,
boost::statechart::transition< Reset, NotTrimming >
> reactions;
std::set<hobject_t> in_flight;
snapid_t snap_to_trim;
explicit Trimming(my_context ctx)
: my_base(ctx),
NamedState(nullptr, "Trimming") {
context< SnapTrimmer >().log_enter(state_name);
ceph_assert(context< SnapTrimmer >().permit_trim());
ceph_assert(in_flight.empty());
}
void exit() {
context< SnapTrimmer >().log_exit(state_name, enter_time);
auto *pg = context< SnapTrimmer >().pg;
pg->osd->snap_reserver.cancel_reservation(pg->get_pgid());
pg->state_clear(PG_STATE_SNAPTRIM);
pg->publish_stats_to_osd();
}
boost::statechart::result react(const KickTrim&) {
return discard_event();
}
};
/* SnapTrimmerStates */
struct WaitTrimTimer : boost::statechart::state< WaitTrimTimer, Trimming >, NamedState {
typedef boost::mpl::list <
boost::statechart::custom_reaction< SnapTrimTimerReady >
> reactions;
Context *wakeup = nullptr;
explicit WaitTrimTimer(my_context ctx)
: my_base(ctx),
NamedState(nullptr, "Trimming/WaitTrimTimer") {
context< SnapTrimmer >().log_enter(state_name);
ceph_assert(context<Trimming>().in_flight.empty());
struct OnTimer : Context {
PrimaryLogPGRef pg;
epoch_t epoch;
OnTimer(PrimaryLogPGRef pg, epoch_t epoch) : pg(pg), epoch(epoch) {}
void finish(int) override {
pg->lock();
if (!pg->pg_has_reset_since(epoch))
pg->snap_trimmer_machine.process_event(SnapTrimTimerReady());
pg->unlock();
}
};
auto *pg = context< SnapTrimmer >().pg;
float osd_snap_trim_sleep = pg->osd->osd->get_osd_snap_trim_sleep();
if (osd_snap_trim_sleep > 0) {
std::lock_guard l(pg->osd->sleep_lock);
wakeup = pg->osd->sleep_timer.add_event_after(
osd_snap_trim_sleep,
new OnTimer{pg, pg->get_osdmap_epoch()});
} else {
post_event(SnapTrimTimerReady());
}
}
void exit() {
context< SnapTrimmer >().log_exit(state_name, enter_time);
auto *pg = context< SnapTrimmer >().pg;
if (wakeup) {
std::lock_guard l(pg->osd->sleep_lock);
pg->osd->sleep_timer.cancel_event(wakeup);
wakeup = nullptr;
}
}
boost::statechart::result react(const SnapTrimTimerReady &) {
wakeup = nullptr;
if (!context< SnapTrimmer >().can_trim()) {
post_event(KickTrim());
return transit< NotTrimming >();
} else {
return transit< AwaitAsyncWork >();
}
}
};
struct WaitRWLock : boost::statechart::state< WaitRWLock, Trimming >, NamedState {
typedef boost::mpl::list <
boost::statechart::custom_reaction< TrimWriteUnblocked >
> reactions;
explicit WaitRWLock(my_context ctx)
: my_base(ctx),
NamedState(nullptr, "Trimming/WaitRWLock") {
context< SnapTrimmer >().log_enter(state_name);
ceph_assert(context<Trimming>().in_flight.empty());
}
void exit() {
context< SnapTrimmer >().log_exit(state_name, enter_time);
}
boost::statechart::result react(const TrimWriteUnblocked&) {
if (!context< SnapTrimmer >().can_trim()) {
post_event(KickTrim());
return transit< NotTrimming >();
} else {
return transit< AwaitAsyncWork >();
}
}
};
struct WaitRepops : boost::statechart::state< WaitRepops, Trimming >, NamedState {
typedef boost::mpl::list <
boost::statechart::custom_reaction< RepopsComplete >
> reactions;
explicit WaitRepops(my_context ctx)
: my_base(ctx),
NamedState(nullptr, "Trimming/WaitRepops") {
context< SnapTrimmer >().log_enter(state_name);
ceph_assert(!context<Trimming>().in_flight.empty());
}
void exit() {
context< SnapTrimmer >().log_exit(state_name, enter_time);
}
boost::statechart::result react(const RepopsComplete&) {
if (!context< SnapTrimmer >().can_trim()) {
post_event(KickTrim());
return transit< NotTrimming >();
} else {
return transit< WaitTrimTimer >();
}
}
};
struct AwaitAsyncWork : boost::statechart::state< AwaitAsyncWork, Trimming >, NamedState {
typedef boost::mpl::list <
boost::statechart::custom_reaction< DoSnapWork >
> reactions;
explicit AwaitAsyncWork(my_context ctx);
void exit() {
context< SnapTrimmer >().log_exit(state_name, enter_time);
}
boost::statechart::result react(const DoSnapWork&);
};
struct WaitReservation : boost::statechart::state< WaitReservation, Trimming >, NamedState {
/* WaitReservation is a sub-state of trimming simply so that exiting Trimming
* always cancels the reservation */
typedef boost::mpl::list <
boost::statechart::custom_reaction< SnapTrimReserved >
> reactions;
struct ReservationCB : public Context {
PrimaryLogPGRef pg;
bool canceled;
explicit ReservationCB(PrimaryLogPG *pg) : pg(pg), canceled(false) {}
void finish(int) override {
pg->lock();
if (!canceled)
pg->snap_trimmer_machine.process_event(SnapTrimReserved());
pg->unlock();
}
void cancel() {
ceph_assert(pg->is_locked());
ceph_assert(!canceled);
canceled = true;
}
};
ReservationCB *pending = nullptr;
explicit WaitReservation(my_context ctx)
: my_base(ctx),
NamedState(nullptr, "Trimming/WaitReservation") {
context< SnapTrimmer >().log_enter(state_name);
ceph_assert(context<Trimming>().in_flight.empty());
auto *pg = context< SnapTrimmer >().pg;
pending = new ReservationCB(pg);
pg->osd->snap_reserver.request_reservation(
pg->get_pgid(),
pending,
0);
pg->state_set(PG_STATE_SNAPTRIM_WAIT);
pg->publish_stats_to_osd();
}
boost::statechart::result react(const SnapTrimReserved&);
void exit() {
context< SnapTrimmer >().log_exit(state_name, enter_time);
if (pending)
pending->cancel();
pending = nullptr;
auto *pg = context< SnapTrimmer >().pg;
pg->state_clear(PG_STATE_SNAPTRIM_WAIT);
pg->state_clear(PG_STATE_SNAPTRIM_ERROR);
pg->publish_stats_to_osd();
}
};
struct WaitScrub : boost::statechart::state< WaitScrub, SnapTrimmer >, NamedState {
typedef boost::mpl::list <
boost::statechart::custom_reaction< ScrubComplete >,
boost::statechart::custom_reaction< KickTrim >,
boost::statechart::transition< Reset, NotTrimming >
> reactions;
explicit WaitScrub(my_context ctx)
: my_base(ctx),
NamedState(nullptr, "WaitScrub") {
context< SnapTrimmer >().log_enter(state_name);
}
void exit() {
context< SnapTrimmer >().log_exit(state_name, enter_time);
}
boost::statechart::result react(const ScrubComplete&) {
post_event(KickTrim());
return transit< NotTrimming >();
}
boost::statechart::result react(const KickTrim&) {
return discard_event();
}
};
struct NotTrimming : boost::statechart::state< NotTrimming, SnapTrimmer >, NamedState {
typedef boost::mpl::list <
boost::statechart::custom_reaction< KickTrim >,
boost::statechart::transition< Reset, NotTrimming >
> reactions;
explicit NotTrimming(my_context ctx);
void exit();
boost::statechart::result react(const KickTrim&);
};
int _verify_no_head_clones(const hobject_t& soid,
const SnapSet& ss);
// return true if we're creating a local object, false for a
// whiteout or no change.
void maybe_create_new_object(OpContext *ctx, bool ignore_transaction=false);
int _delete_oid(OpContext *ctx, bool no_whiteout, bool try_no_whiteout);
int _rollback_to(OpContext *ctx, OSDOp& op);
void _do_rollback_to(OpContext *ctx, ObjectContextRef rollback_to,
OSDOp& op);
public:
bool is_missing_object(const hobject_t& oid) const;
bool is_unreadable_object(const hobject_t &oid) const {
return is_missing_object(oid) ||
!recovery_state.get_missing_loc().readable_with_acting(
oid, get_actingset());
}
void maybe_kick_recovery(const hobject_t &soid);
void wait_for_unreadable_object(const hobject_t& oid, OpRequestRef op);
int get_manifest_ref_count(ObjectContextRef obc, std::string& fp_oid, OpRequestRef op);
bool check_laggy(OpRequestRef& op);
bool check_laggy_requeue(OpRequestRef& op);
void recheck_readable() override;
bool is_backfill_target(pg_shard_t osd) const {
return recovery_state.is_backfill_target(osd);
}
const std::set<pg_shard_t> &get_backfill_targets() const {
return recovery_state.get_backfill_targets();
}
bool is_async_recovery_target(pg_shard_t peer) const {
return recovery_state.is_async_recovery_target(peer);
}
const std::set<pg_shard_t> &get_async_recovery_targets() const {
return recovery_state.get_async_recovery_targets();
}
bool is_degraded_or_backfilling_object(const hobject_t& oid);
bool is_degraded_on_async_recovery_target(const hobject_t& soid);
void wait_for_degraded_object(const hobject_t& oid, OpRequestRef op);
void block_write_on_full_cache(
const hobject_t& oid, OpRequestRef op);
void block_for_clean(
const hobject_t& oid, OpRequestRef op);
void block_write_on_snap_rollback(
const hobject_t& oid, ObjectContextRef obc, OpRequestRef op);
void block_write_on_degraded_snap(const hobject_t& oid, OpRequestRef op);
bool maybe_await_blocked_head(const hobject_t &soid, OpRequestRef op);
void wait_for_blocked_object(const hobject_t& soid, OpRequestRef op);
void kick_object_context_blocked(ObjectContextRef obc);
void requeue_op_blocked_by_object(const hobject_t &soid);
void maybe_force_recovery();
void mark_all_unfound_lost(
int what,
std::function<void(int,const std::string&,ceph::buffer::list&)> on_finish);
eversion_t pick_newest_available(const hobject_t& oid);
void do_update_log_missing(
OpRequestRef &op);
void do_update_log_missing_reply(
OpRequestRef &op);
void plpg_on_role_change() override;
void plpg_on_pool_change() override;
void clear_async_reads();
void on_change(ObjectStore::Transaction &t) override;
void on_activate_complete() override;
void on_flushed() override;
void on_removal(ObjectStore::Transaction &t) override;
void on_shutdown() override;
bool check_failsafe_full() override;
bool maybe_preempt_replica_scrub(const hobject_t& oid) override;
int rep_repair_primary_object(const hobject_t& soid, OpContext *ctx);
// attr cache handling
void setattr_maybe_cache(
ObjectContextRef obc,
PGTransaction *t,
const std::string &key,
ceph::buffer::list &val);
void setattrs_maybe_cache(
ObjectContextRef obc,
PGTransaction *t,
std::map<std::string, ceph::buffer::list, std::less<>> &attrs);
void rmattr_maybe_cache(
ObjectContextRef obc,
PGTransaction *t,
const std::string &key);
/**
* getattr_maybe_cache
*
* Populates val (if non-null) with the value of the attr with the specified key.
* Returns -ENOENT if object does not exist, -ENODATA if the object exists,
* but the specified key does not.
*/
int getattr_maybe_cache(
ObjectContextRef obc,
const std::string &key,
ceph::buffer::list *val);
int getattrs_maybe_cache(
ObjectContextRef obc,
std::map<std::string, ceph::buffer::list, std::less<>> *out);
public:
void set_dynamic_perf_stats_queries(
const std::list<OSDPerfMetricQuery> &queries) override;
void get_dynamic_perf_stats(DynamicPerfStats *stats) override;
private:
DynamicPerfStats m_dynamic_perf_stats;
};
inline ostream& operator<<(ostream& out, const PrimaryLogPG::RepGather& repop)
{
out << "repgather(" << &repop
<< " " << repop.v
<< " rep_tid=" << repop.rep_tid
<< " committed?=" << repop.all_committed
<< " r=" << repop.r
<< ")";
return out;
}
inline ostream& operator<<(ostream& out,
const PrimaryLogPG::ProxyWriteOpRef& pwop)
{
out << "proxywrite(" << &pwop
<< " " << pwop->user_version
<< " pwop_tid=" << pwop->objecter_tid;
if (pwop->ctx->op)
out << " op=" << *(pwop->ctx->op->get_req());
out << ")";
return out;
}
void intrusive_ptr_add_ref(PrimaryLogPG::RepGather *repop);
void intrusive_ptr_release(PrimaryLogPG::RepGather *repop);
#endif
| 64,625 | 31.922058 | 118 | h |
null | ceph-main/src/osd/ReplicatedBackend.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2013 Inktank Storage, Inc.
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include "common/errno.h"
#include "ReplicatedBackend.h"
#include "messages/MOSDOp.h"
#include "messages/MOSDRepOp.h"
#include "messages/MOSDRepOpReply.h"
#include "messages/MOSDPGPush.h"
#include "messages/MOSDPGPull.h"
#include "messages/MOSDPGPushReply.h"
#include "common/EventTrace.h"
#include "include/random.h"
#include "include/util.h"
#include "OSD.h"
#include "osd_tracer.h"
#define dout_context cct
#define dout_subsys ceph_subsys_osd
#define DOUT_PREFIX_ARGS this
#undef dout_prefix
#define dout_prefix _prefix(_dout, this)
static ostream& _prefix(std::ostream *_dout, ReplicatedBackend *pgb) {
return pgb->get_parent()->gen_dbg_prefix(*_dout);
}
using std::less;
using std::list;
using std::make_pair;
using std::map;
using std::ostringstream;
using std::set;
using std::pair;
using std::string;
using std::unique_ptr;
using std::vector;
using ceph::bufferhash;
using ceph::bufferlist;
using ceph::decode;
using ceph::encode;
namespace {
class PG_SendMessageOnConn: public Context {
PGBackend::Listener *pg;
Message *reply;
ConnectionRef conn;
public:
PG_SendMessageOnConn(
PGBackend::Listener *pg,
Message *reply,
ConnectionRef conn) : pg(pg), reply(reply), conn(conn) {}
void finish(int) override {
pg->send_message_osd_cluster(MessageRef(reply, false), conn.get());
}
};
class PG_RecoveryQueueAsync : public Context {
PGBackend::Listener *pg;
unique_ptr<GenContext<ThreadPool::TPHandle&>> c;
uint64_t cost;
public:
PG_RecoveryQueueAsync(
PGBackend::Listener *pg,
GenContext<ThreadPool::TPHandle&> *c,
uint64_t cost) : pg(pg), c(c), cost(cost) {}
void finish(int) override {
pg->schedule_recovery_work(c.release(), cost);
}
};
}
struct ReplicatedBackend::C_OSD_RepModifyCommit : public Context {
ReplicatedBackend *pg;
RepModifyRef rm;
C_OSD_RepModifyCommit(ReplicatedBackend *pg, RepModifyRef r)
: pg(pg), rm(r) {}
void finish(int r) override {
pg->repop_commit(rm);
}
};
static void log_subop_stats(
PerfCounters *logger,
OpRequestRef op, int subop)
{
utime_t latency = ceph_clock_now();
latency -= op->get_req()->get_recv_stamp();
logger->inc(l_osd_sop);
logger->tinc(l_osd_sop_lat, latency);
logger->inc(subop);
if (subop != l_osd_sop_pull) {
uint64_t inb = op->get_req()->get_data().length();
logger->inc(l_osd_sop_inb, inb);
if (subop == l_osd_sop_w) {
logger->inc(l_osd_sop_w_inb, inb);
logger->tinc(l_osd_sop_w_lat, latency);
} else if (subop == l_osd_sop_push) {
logger->inc(l_osd_sop_push_inb, inb);
logger->tinc(l_osd_sop_push_lat, latency);
} else
ceph_abort_msg("no support subop");
} else {
logger->tinc(l_osd_sop_pull_lat, latency);
}
}
ReplicatedBackend::ReplicatedBackend(
PGBackend::Listener *pg,
const coll_t &coll,
ObjectStore::CollectionHandle &c,
ObjectStore *store,
CephContext *cct) :
PGBackend(cct, pg, store, coll, c) {}
void ReplicatedBackend::run_recovery_op(
PGBackend::RecoveryHandle *_h,
int priority)
{
RPGHandle *h = static_cast<RPGHandle *>(_h);
send_pushes(priority, h->pushes);
send_pulls(priority, h->pulls);
send_recovery_deletes(priority, h->deletes);
delete h;
}
int ReplicatedBackend::recover_object(
const hobject_t &hoid,
eversion_t v,
ObjectContextRef head,
ObjectContextRef obc,
RecoveryHandle *_h
)
{
dout(10) << __func__ << ": " << hoid << dendl;
RPGHandle *h = static_cast<RPGHandle *>(_h);
if (get_parent()->get_local_missing().is_missing(hoid)) {
ceph_assert(!obc);
// pull
prepare_pull(
v,
hoid,
head,
h);
} else {
ceph_assert(obc);
int started = start_pushes(
hoid,
obc,
h);
if (started < 0) {
pushing[hoid].clear();
return started;
}
}
return 0;
}
void ReplicatedBackend::check_recovery_sources(const OSDMapRef& osdmap)
{
for(map<pg_shard_t, set<hobject_t> >::iterator i = pull_from_peer.begin();
i != pull_from_peer.end();
) {
if (osdmap->is_down(i->first.osd)) {
dout(10) << "check_recovery_sources resetting pulls from osd." << i->first
<< ", osdmap has it marked down" << dendl;
for (set<hobject_t>::iterator j = i->second.begin();
j != i->second.end();
++j) {
get_parent()->cancel_pull(*j);
clear_pull(pulling.find(*j), false);
}
pull_from_peer.erase(i++);
} else {
++i;
}
}
}
bool ReplicatedBackend::can_handle_while_inactive(OpRequestRef op)
{
dout(10) << __func__ << ": " << *op->get_req() << dendl;
switch (op->get_req()->get_type()) {
case MSG_OSD_PG_PULL:
return true;
default:
return false;
}
}
bool ReplicatedBackend::_handle_message(
OpRequestRef op
)
{
dout(10) << __func__ << ": " << *op->get_req() << dendl;
switch (op->get_req()->get_type()) {
case MSG_OSD_PG_PUSH:
do_push(op);
return true;
case MSG_OSD_PG_PULL:
do_pull(op);
return true;
case MSG_OSD_PG_PUSH_REPLY:
do_push_reply(op);
return true;
case MSG_OSD_REPOP: {
do_repop(op);
return true;
}
case MSG_OSD_REPOPREPLY: {
do_repop_reply(op);
return true;
}
default:
break;
}
return false;
}
void ReplicatedBackend::clear_recovery_state()
{
// clear pushing/pulling maps
for (auto &&i: pushing) {
for (auto &&j: i.second) {
get_parent()->release_locks(j.second.lock_manager);
}
}
pushing.clear();
for (auto &&i: pulling) {
get_parent()->release_locks(i.second.lock_manager);
}
pulling.clear();
pull_from_peer.clear();
}
void ReplicatedBackend::on_change()
{
dout(10) << __func__ << dendl;
for (auto& op : in_progress_ops) {
delete op.second->on_commit;
op.second->on_commit = nullptr;
}
in_progress_ops.clear();
clear_recovery_state();
}
int ReplicatedBackend::objects_read_sync(
const hobject_t &hoid,
uint64_t off,
uint64_t len,
uint32_t op_flags,
bufferlist *bl)
{
return store->read(ch, ghobject_t(hoid), off, len, *bl, op_flags);
}
int ReplicatedBackend::objects_readv_sync(
const hobject_t &hoid,
map<uint64_t, uint64_t>&& m,
uint32_t op_flags,
bufferlist *bl)
{
interval_set<uint64_t> im(std::move(m));
auto r = store->readv(ch, ghobject_t(hoid), im, *bl, op_flags);
if (r >= 0) {
m = std::move(im).detach();
}
return r;
}
void ReplicatedBackend::objects_read_async(
const hobject_t &hoid,
const list<pair<boost::tuple<uint64_t, uint64_t, uint32_t>,
pair<bufferlist*, Context*> > > &to_read,
Context *on_complete,
bool fast_read)
{
ceph_abort_msg("async read is not used by replica pool");
}
class C_OSD_OnOpCommit : public Context {
ReplicatedBackend *pg;
ceph::ref_t<ReplicatedBackend::InProgressOp> op;
public:
C_OSD_OnOpCommit(ReplicatedBackend *pg, ceph::ref_t<ReplicatedBackend::InProgressOp> op)
: pg(pg), op(std::move(op)) {}
void finish(int) override {
pg->op_commit(op);
}
};
void generate_transaction(
PGTransactionUPtr &pgt,
const coll_t &coll,
vector<pg_log_entry_t> &log_entries,
ObjectStore::Transaction *t,
set<hobject_t> *added,
set<hobject_t> *removed,
const ceph_release_t require_osd_release = ceph_release_t::unknown )
{
ceph_assert(t);
ceph_assert(added);
ceph_assert(removed);
for (auto &&le: log_entries) {
le.mark_unrollbackable();
auto oiter = pgt->op_map.find(le.soid);
if (oiter != pgt->op_map.end() && oiter->second.updated_snaps) {
bufferlist bl(oiter->second.updated_snaps->second.size() * 8 + 8);
encode(oiter->second.updated_snaps->second, bl);
le.snaps.swap(bl);
le.snaps.reassign_to_mempool(mempool::mempool_osd_pglog);
}
}
pgt->safe_create_traverse(
[&](pair<const hobject_t, PGTransaction::ObjectOperation> &obj_op) {
const hobject_t &oid = obj_op.first;
const ghobject_t goid =
ghobject_t(oid, ghobject_t::NO_GEN, shard_id_t::NO_SHARD);
const PGTransaction::ObjectOperation &op = obj_op.second;
if (oid.is_temp()) {
if (op.is_fresh_object()) {
added->insert(oid);
} else if (op.is_delete()) {
removed->insert(oid);
}
}
if (op.delete_first) {
t->remove(coll, goid);
}
match(
op.init_type,
[&](const PGTransaction::ObjectOperation::Init::None &) {
},
[&](const PGTransaction::ObjectOperation::Init::Create &op) {
if (require_osd_release >= ceph_release_t::octopus) {
t->create(coll, goid);
} else {
t->touch(coll, goid);
}
},
[&](const PGTransaction::ObjectOperation::Init::Clone &op) {
t->clone(
coll,
ghobject_t(
op.source, ghobject_t::NO_GEN, shard_id_t::NO_SHARD),
goid);
},
[&](const PGTransaction::ObjectOperation::Init::Rename &op) {
ceph_assert(op.source.is_temp());
t->collection_move_rename(
coll,
ghobject_t(
op.source, ghobject_t::NO_GEN, shard_id_t::NO_SHARD),
coll,
goid);
});
if (op.truncate) {
t->truncate(coll, goid, op.truncate->first);
if (op.truncate->first != op.truncate->second)
t->truncate(coll, goid, op.truncate->second);
}
if (!op.attr_updates.empty()) {
map<string, bufferlist, less<>> attrs;
for (auto &&p: op.attr_updates) {
if (p.second)
attrs[p.first] = *(p.second);
else
t->rmattr(coll, goid, p.first);
}
t->setattrs(coll, goid, attrs);
}
if (op.clear_omap)
t->omap_clear(coll, goid);
if (op.omap_header)
t->omap_setheader(coll, goid, *(op.omap_header));
for (auto &&up: op.omap_updates) {
using UpdateType = PGTransaction::ObjectOperation::OmapUpdateType;
switch (up.first) {
case UpdateType::Remove:
t->omap_rmkeys(coll, goid, up.second);
break;
case UpdateType::Insert:
t->omap_setkeys(coll, goid, up.second);
break;
case UpdateType::RemoveRange:
t->omap_rmkeyrange(coll, goid, up.second);
break;
}
}
// updated_snaps doesn't matter since we marked unrollbackable
if (op.alloc_hint) {
auto &hint = *(op.alloc_hint);
t->set_alloc_hint(
coll,
goid,
hint.expected_object_size,
hint.expected_write_size,
hint.flags);
}
for (auto &&extent: op.buffer_updates) {
using BufferUpdate = PGTransaction::ObjectOperation::BufferUpdate;
match(
extent.get_val(),
[&](const BufferUpdate::Write &op) {
t->write(
coll,
goid,
extent.get_off(),
extent.get_len(),
op.buffer,
op.fadvise_flags);
},
[&](const BufferUpdate::Zero &op) {
t->zero(
coll,
goid,
extent.get_off(),
extent.get_len());
},
[&](const BufferUpdate::CloneRange &op) {
ceph_assert(op.len == extent.get_len());
t->clone_range(
coll,
ghobject_t(op.from, ghobject_t::NO_GEN, shard_id_t::NO_SHARD),
goid,
op.offset,
extent.get_len(),
extent.get_off());
});
}
});
}
void ReplicatedBackend::submit_transaction(
const hobject_t &soid,
const object_stat_sum_t &delta_stats,
const eversion_t &at_version,
PGTransactionUPtr &&_t,
const eversion_t &trim_to,
const eversion_t &min_last_complete_ondisk,
vector<pg_log_entry_t>&& _log_entries,
std::optional<pg_hit_set_history_t> &hset_history,
Context *on_all_commit,
ceph_tid_t tid,
osd_reqid_t reqid,
OpRequestRef orig_op)
{
parent->apply_stats(
soid,
delta_stats);
vector<pg_log_entry_t> log_entries(_log_entries);
ObjectStore::Transaction op_t;
PGTransactionUPtr t(std::move(_t));
set<hobject_t> added, removed;
generate_transaction(
t,
coll,
log_entries,
&op_t,
&added,
&removed,
get_osdmap()->require_osd_release);
ceph_assert(added.size() <= 1);
ceph_assert(removed.size() <= 1);
auto insert_res = in_progress_ops.insert(
make_pair(
tid,
ceph::make_ref<InProgressOp>(
tid, on_all_commit,
orig_op, at_version)
)
);
ceph_assert(insert_res.second);
InProgressOp &op = *insert_res.first->second;
op.waiting_for_commit.insert(
parent->get_acting_recovery_backfill_shards().begin(),
parent->get_acting_recovery_backfill_shards().end());
issue_op(
soid,
at_version,
tid,
reqid,
trim_to,
min_last_complete_ondisk,
added.size() ? *(added.begin()) : hobject_t(),
removed.size() ? *(removed.begin()) : hobject_t(),
log_entries,
hset_history,
&op,
op_t);
add_temp_objs(added);
clear_temp_objs(removed);
parent->log_operation(
std::move(log_entries),
hset_history,
trim_to,
at_version,
min_last_complete_ondisk,
true,
op_t);
op_t.register_on_commit(
parent->bless_context(
new C_OSD_OnOpCommit(this, &op)));
vector<ObjectStore::Transaction> tls;
tls.push_back(std::move(op_t));
parent->queue_transactions(tls, op.op);
if (at_version != eversion_t()) {
parent->op_applied(at_version);
}
}
void ReplicatedBackend::op_commit(const ceph::ref_t<InProgressOp>& op)
{
if (op->on_commit == nullptr) {
// aborted
return;
}
FUNCTRACE(cct);
OID_EVENT_TRACE_WITH_MSG((op && op->op) ? op->op->get_req() : NULL, "OP_COMMIT_BEGIN", true);
dout(10) << __func__ << ": " << op->tid << dendl;
if (op->op) {
op->op->mark_event("op_commit");
op->op->pg_trace.event("op commit");
}
op->waiting_for_commit.erase(get_parent()->whoami_shard());
if (op->waiting_for_commit.empty()) {
op->on_commit->complete(0);
op->on_commit = 0;
in_progress_ops.erase(op->tid);
}
}
void ReplicatedBackend::do_repop_reply(OpRequestRef op)
{
static_cast<MOSDRepOpReply*>(op->get_nonconst_req())->finish_decode();
auto r = op->get_req<MOSDRepOpReply>();
ceph_assert(r->get_header().type == MSG_OSD_REPOPREPLY);
op->mark_started();
// must be replication.
ceph_tid_t rep_tid = r->get_tid();
pg_shard_t from = r->from;
auto iter = in_progress_ops.find(rep_tid);
if (iter != in_progress_ops.end()) {
InProgressOp &ip_op = *iter->second;
const MOSDOp *m = nullptr;
if (ip_op.op)
m = ip_op.op->get_req<MOSDOp>();
if (m)
dout(7) << __func__ << ": tid " << ip_op.tid << " op " //<< *m
<< " ack_type " << (int)r->ack_type
<< " from " << from
<< dendl;
else
dout(7) << __func__ << ": tid " << ip_op.tid << " (no op) "
<< " ack_type " << (int)r->ack_type
<< " from " << from
<< dendl;
// oh, good.
if (r->ack_type & CEPH_OSD_FLAG_ONDISK) {
ceph_assert(ip_op.waiting_for_commit.count(from));
ip_op.waiting_for_commit.erase(from);
if (ip_op.op) {
ip_op.op->mark_event("sub_op_commit_rec");
ip_op.op->pg_trace.event("sub_op_commit_rec");
}
} else {
// legacy peer; ignore
}
parent->update_peer_last_complete_ondisk(
from,
r->get_last_complete_ondisk());
if (ip_op.waiting_for_commit.empty() &&
ip_op.on_commit) {
ip_op.on_commit->complete(0);
ip_op.on_commit = 0;
in_progress_ops.erase(iter);
}
}
}
int ReplicatedBackend::be_deep_scrub(
const hobject_t &poid,
ScrubMap &map,
ScrubMapBuilder &pos,
ScrubMap::object &o)
{
dout(10) << __func__ << " " << poid << " pos " << pos << dendl;
int r;
uint32_t fadvise_flags = CEPH_OSD_OP_FLAG_FADVISE_SEQUENTIAL |
CEPH_OSD_OP_FLAG_FADVISE_DONTNEED |
CEPH_OSD_OP_FLAG_BYPASS_CLEAN_CACHE;
utime_t sleeptime;
sleeptime.set_from_double(cct->_conf->osd_debug_deep_scrub_sleep);
if (sleeptime != utime_t()) {
lgeneric_derr(cct) << __func__ << " sleeping for " << sleeptime << dendl;
sleeptime.sleep();
}
ceph_assert(poid == pos.ls[pos.pos]);
if (!pos.data_done()) {
if (pos.data_pos == 0) {
pos.data_hash = bufferhash(-1);
}
const uint64_t stride = cct->_conf->osd_deep_scrub_stride;
bufferlist bl;
r = store->read(
ch,
ghobject_t(
poid, ghobject_t::NO_GEN, get_parent()->whoami_shard().shard),
pos.data_pos,
stride, bl,
fadvise_flags);
if (r < 0) {
dout(20) << __func__ << " " << poid << " got "
<< r << " on read, read_error" << dendl;
o.read_error = true;
return 0;
}
if (r > 0) {
pos.data_hash << bl;
}
pos.data_pos += r;
if (static_cast<uint64_t>(r) == stride) {
dout(20) << __func__ << " " << poid << " more data, digest so far 0x"
<< std::hex << pos.data_hash.digest() << std::dec << dendl;
return -EINPROGRESS;
}
// done with bytes
pos.data_pos = -1;
o.digest = pos.data_hash.digest();
o.digest_present = true;
dout(20) << __func__ << " " << poid << " done with data, digest 0x"
<< std::hex << o.digest << std::dec << dendl;
}
// omap header
if (pos.omap_pos.empty()) {
pos.omap_hash = bufferhash(-1);
bufferlist hdrbl;
r = store->omap_get_header(
ch,
ghobject_t(
poid, ghobject_t::NO_GEN, get_parent()->whoami_shard().shard),
&hdrbl, true);
if (r == -EIO) {
dout(20) << __func__ << " " << poid << " got "
<< r << " on omap header read, read_error" << dendl;
o.read_error = true;
return 0;
}
if (r == 0 && hdrbl.length()) {
bool encoded = false;
dout(25) << "CRC header " << cleanbin(hdrbl, encoded, true) << dendl;
pos.omap_hash << hdrbl;
}
}
// omap
ObjectMap::ObjectMapIterator iter = store->get_omap_iterator(
ch,
ghobject_t(
poid, ghobject_t::NO_GEN, get_parent()->whoami_shard().shard));
ceph_assert(iter);
if (pos.omap_pos.length()) {
iter->lower_bound(pos.omap_pos);
} else {
iter->seek_to_first();
}
int max = g_conf()->osd_deep_scrub_keys;
while (iter->status() == 0 && iter->valid()) {
pos.omap_bytes += iter->value().length();
++pos.omap_keys;
--max;
// fixme: we can do this more efficiently.
bufferlist bl;
encode(iter->key(), bl);
encode(iter->value(), bl);
pos.omap_hash << bl;
iter->next();
if (iter->valid() && max == 0) {
pos.omap_pos = iter->key();
return -EINPROGRESS;
}
if (iter->status() < 0) {
dout(25) << __func__ << " " << poid
<< " on omap scan, db status error" << dendl;
o.read_error = true;
return 0;
}
}
if (pos.omap_keys > cct->_conf->
osd_deep_scrub_large_omap_object_key_threshold ||
pos.omap_bytes > cct->_conf->
osd_deep_scrub_large_omap_object_value_sum_threshold) {
dout(25) << __func__ << " " << poid
<< " large omap object detected. Object has " << pos.omap_keys
<< " keys and size " << pos.omap_bytes << " bytes" << dendl;
o.large_omap_object_found = true;
o.large_omap_object_key_count = pos.omap_keys;
o.large_omap_object_value_size = pos.omap_bytes;
map.has_large_omap_object_errors = true;
}
o.omap_digest = pos.omap_hash.digest();
o.omap_digest_present = true;
dout(20) << __func__ << " done with " << poid << " omap_digest "
<< std::hex << o.omap_digest << std::dec << dendl;
// Sum up omap usage
if (pos.omap_keys > 0 || pos.omap_bytes > 0) {
dout(25) << __func__ << " adding " << pos.omap_keys << " keys and "
<< pos.omap_bytes << " bytes to pg_stats sums" << dendl;
map.has_omap_keys = true;
o.object_omap_bytes = pos.omap_bytes;
o.object_omap_keys = pos.omap_keys;
}
// done!
return 0;
}
void ReplicatedBackend::_do_push(OpRequestRef op)
{
auto m = op->get_req<MOSDPGPush>();
ceph_assert(m->get_type() == MSG_OSD_PG_PUSH);
pg_shard_t from = m->from;
op->mark_started();
vector<PushReplyOp> replies;
ObjectStore::Transaction t;
if (get_parent()->check_failsafe_full()) {
dout(10) << __func__ << " Out of space (failsafe) processing push request." << dendl;
ceph_abort();
}
for (vector<PushOp>::const_iterator i = m->pushes.begin();
i != m->pushes.end();
++i) {
replies.push_back(PushReplyOp());
handle_push(from, *i, &(replies.back()), &t, m->is_repair);
}
MOSDPGPushReply *reply = new MOSDPGPushReply;
reply->from = get_parent()->whoami_shard();
reply->set_priority(m->get_priority());
reply->pgid = get_info().pgid;
reply->map_epoch = m->map_epoch;
reply->min_epoch = m->min_epoch;
reply->replies.swap(replies);
reply->compute_cost(cct);
t.register_on_complete(
new PG_SendMessageOnConn(
get_parent(), reply, m->get_connection()));
get_parent()->queue_transaction(std::move(t));
}
struct C_ReplicatedBackend_OnPullComplete : GenContext<ThreadPool::TPHandle&> {
ReplicatedBackend *bc;
list<ReplicatedBackend::pull_complete_info> to_continue;
int priority;
C_ReplicatedBackend_OnPullComplete(
ReplicatedBackend *bc,
int priority,
list<ReplicatedBackend::pull_complete_info> &&to_continue)
: bc(bc), to_continue(std::move(to_continue)), priority(priority) {}
void finish(ThreadPool::TPHandle &handle) override {
ReplicatedBackend::RPGHandle *h = bc->_open_recovery_op();
for (auto &&i: to_continue) {
auto j = bc->pulling.find(i.hoid);
ceph_assert(j != bc->pulling.end());
ObjectContextRef obc = j->second.obc;
bc->clear_pull(j, false /* already did it */);
int started = bc->start_pushes(i.hoid, obc, h);
if (started < 0) {
bc->pushing[i.hoid].clear();
bc->get_parent()->on_failed_pull(
{ bc->get_parent()->whoami_shard() },
i.hoid, obc->obs.oi.version);
} else if (!started) {
bc->get_parent()->on_global_recover(
i.hoid, i.stat, false);
}
handle.reset_tp_timeout();
}
bc->run_recovery_op(h, priority);
}
/// Estimate total data reads required to perform pushes
uint64_t estimate_push_costs() const {
uint64_t cost = 0;
for (const auto &i: to_continue) {
cost += i.stat.num_bytes_recovered;
}
return cost;
}
};
void ReplicatedBackend::_do_pull_response(OpRequestRef op)
{
auto m = op->get_req<MOSDPGPush>();
ceph_assert(m->get_type() == MSG_OSD_PG_PUSH);
pg_shard_t from = m->from;
op->mark_started();
vector<PullOp> replies(1);
if (get_parent()->check_failsafe_full()) {
dout(10) << __func__ << " Out of space (failsafe) processing pull response (push)." << dendl;
ceph_abort();
}
ObjectStore::Transaction t;
list<pull_complete_info> to_continue;
for (vector<PushOp>::const_iterator i = m->pushes.begin();
i != m->pushes.end();
++i) {
bool more = handle_pull_response(from, *i, &(replies.back()), &to_continue, &t);
if (more)
replies.push_back(PullOp());
}
if (!to_continue.empty()) {
C_ReplicatedBackend_OnPullComplete *c =
new C_ReplicatedBackend_OnPullComplete(
this,
m->get_priority(),
std::move(to_continue));
t.register_on_complete(
new PG_RecoveryQueueAsync(
get_parent(),
get_parent()->bless_unlocked_gencontext(c),
std::max<uint64_t>(1, c->estimate_push_costs())));
}
replies.erase(replies.end() - 1);
if (replies.size()) {
MOSDPGPull *reply = new MOSDPGPull;
reply->from = parent->whoami_shard();
reply->set_priority(m->get_priority());
reply->pgid = get_info().pgid;
reply->map_epoch = m->map_epoch;
reply->min_epoch = m->min_epoch;
reply->set_pulls(std::move(replies));
reply->compute_cost(cct);
t.register_on_complete(
new PG_SendMessageOnConn(
get_parent(), reply, m->get_connection()));
}
get_parent()->queue_transaction(std::move(t));
}
void ReplicatedBackend::do_pull(OpRequestRef op)
{
MOSDPGPull *m = static_cast<MOSDPGPull *>(op->get_nonconst_req());
ceph_assert(m->get_type() == MSG_OSD_PG_PULL);
pg_shard_t from = m->from;
map<pg_shard_t, vector<PushOp> > replies;
for (auto& i : m->take_pulls()) {
replies[from].push_back(PushOp());
handle_pull(from, i, &(replies[from].back()));
}
send_pushes(m->get_priority(), replies);
}
void ReplicatedBackend::do_push_reply(OpRequestRef op)
{
auto m = op->get_req<MOSDPGPushReply>();
ceph_assert(m->get_type() == MSG_OSD_PG_PUSH_REPLY);
pg_shard_t from = m->from;
vector<PushOp> replies(1);
for (vector<PushReplyOp>::const_iterator i = m->replies.begin();
i != m->replies.end();
++i) {
bool more = handle_push_reply(from, *i, &(replies.back()));
if (more)
replies.push_back(PushOp());
}
replies.erase(replies.end() - 1);
map<pg_shard_t, vector<PushOp> > _replies;
_replies[from].swap(replies);
send_pushes(m->get_priority(), _replies);
}
Message * ReplicatedBackend::generate_subop(
const hobject_t &soid,
const eversion_t &at_version,
ceph_tid_t tid,
osd_reqid_t reqid,
eversion_t pg_trim_to,
eversion_t min_last_complete_ondisk,
hobject_t new_temp_oid,
hobject_t discard_temp_oid,
const bufferlist &log_entries,
std::optional<pg_hit_set_history_t> &hset_hist,
ObjectStore::Transaction &op_t,
pg_shard_t peer,
const pg_info_t &pinfo)
{
int acks_wanted = CEPH_OSD_FLAG_ACK | CEPH_OSD_FLAG_ONDISK;
// forward the write/update/whatever
MOSDRepOp *wr = new MOSDRepOp(
reqid, parent->whoami_shard(),
spg_t(get_info().pgid.pgid, peer.shard),
soid, acks_wanted,
get_osdmap_epoch(),
parent->get_last_peering_reset_epoch(),
tid, at_version);
// ship resulting transaction, log entries, and pg_stats
if (!parent->should_send_op(peer, soid)) {
ObjectStore::Transaction t;
encode(t, wr->get_data());
} else {
encode(op_t, wr->get_data());
wr->get_header().data_off = op_t.get_data_alignment();
}
wr->logbl = log_entries;
if (pinfo.is_incomplete())
wr->pg_stats = pinfo.stats; // reflects backfill progress
else
wr->pg_stats = get_info().stats;
wr->pg_trim_to = pg_trim_to;
if (HAVE_FEATURE(parent->min_peer_features(), OSD_REPOP_MLCOD)) {
wr->min_last_complete_ondisk = min_last_complete_ondisk;
} else {
/* Some replicas need this field to be at_version. New replicas
* will ignore it */
wr->set_rollback_to(at_version);
}
wr->new_temp_oid = new_temp_oid;
wr->discard_temp_oid = discard_temp_oid;
wr->updated_hit_set_history = hset_hist;
return wr;
}
void ReplicatedBackend::issue_op(
const hobject_t &soid,
const eversion_t &at_version,
ceph_tid_t tid,
osd_reqid_t reqid,
eversion_t pg_trim_to,
eversion_t min_last_complete_ondisk,
hobject_t new_temp_oid,
hobject_t discard_temp_oid,
const vector<pg_log_entry_t> &log_entries,
std::optional<pg_hit_set_history_t> &hset_hist,
InProgressOp *op,
ObjectStore::Transaction &op_t)
{
if (parent->get_acting_recovery_backfill_shards().size() > 1) {
if (op->op) {
op->op->pg_trace.event("issue replication ops");
ostringstream ss;
set<pg_shard_t> replicas = parent->get_acting_recovery_backfill_shards();
replicas.erase(parent->whoami_shard());
ss << "waiting for subops from " << replicas;
op->op->mark_sub_op_sent(ss.str());
}
// avoid doing the same work in generate_subop
bufferlist logs;
encode(log_entries, logs);
for (const auto& shard : get_parent()->get_acting_recovery_backfill_shards()) {
if (shard == parent->whoami_shard()) continue;
const pg_info_t &pinfo = parent->get_shard_info().find(shard)->second;
Message *wr;
wr = generate_subop(
soid,
at_version,
tid,
reqid,
pg_trim_to,
min_last_complete_ondisk,
new_temp_oid,
discard_temp_oid,
logs,
hset_hist,
op_t,
shard,
pinfo);
if (op->op && op->op->pg_trace)
wr->trace.init("replicated op", nullptr, &op->op->pg_trace);
get_parent()->send_message_osd_cluster(
shard.osd, wr, get_osdmap_epoch());
}
}
}
// sub op modify
void ReplicatedBackend::do_repop(OpRequestRef op)
{
static_cast<MOSDRepOp*>(op->get_nonconst_req())->finish_decode();
auto m = op->get_req<MOSDRepOp>();
int msg_type = m->get_type();
ceph_assert(MSG_OSD_REPOP == msg_type);
const hobject_t& soid = m->poid;
dout(10) << __func__ << " " << soid
<< " v " << m->version
<< (m->logbl.length() ? " (transaction)" : " (parallel exec")
<< " " << m->logbl.length()
<< dendl;
// sanity checks
ceph_assert(m->map_epoch >= get_info().history.same_interval_since);
dout(30) << __func__ << " missing before " << get_parent()->get_log().get_missing().get_items() << dendl;
parent->maybe_preempt_replica_scrub(soid);
int ackerosd = m->get_source().num();
op->mark_started();
RepModifyRef rm(std::make_shared<RepModify>());
rm->op = op;
rm->ackerosd = ackerosd;
rm->last_complete = get_info().last_complete;
rm->epoch_started = get_osdmap_epoch();
ceph_assert(m->logbl.length());
// shipped transaction and log entries
vector<pg_log_entry_t> log;
auto p = const_cast<bufferlist&>(m->get_data()).cbegin();
decode(rm->opt, p);
if (m->new_temp_oid != hobject_t()) {
dout(20) << __func__ << " start tracking temp " << m->new_temp_oid << dendl;
add_temp_obj(m->new_temp_oid);
}
if (m->discard_temp_oid != hobject_t()) {
dout(20) << __func__ << " stop tracking temp " << m->discard_temp_oid << dendl;
if (rm->opt.empty()) {
dout(10) << __func__ << ": removing object " << m->discard_temp_oid
<< " since we won't get the transaction" << dendl;
rm->localt.remove(coll, ghobject_t(m->discard_temp_oid));
}
clear_temp_obj(m->discard_temp_oid);
}
p = const_cast<bufferlist&>(m->logbl).begin();
decode(log, p);
rm->opt.set_fadvise_flag(CEPH_OSD_OP_FLAG_FADVISE_DONTNEED);
bool update_snaps = false;
if (!rm->opt.empty()) {
// If the opt is non-empty, we infer we are before
// last_backfill (according to the primary, not our
// not-quite-accurate value), and should update the
// collections now. Otherwise, we do it later on push.
update_snaps = true;
}
// flag set to true during async recovery
bool async = false;
pg_missing_tracker_t pmissing = get_parent()->get_local_missing();
if (pmissing.is_missing(soid)) {
async = true;
dout(30) << __func__ << " is_missing " << pmissing.is_missing(soid) << dendl;
for (auto &&e: log) {
dout(30) << " add_next_event entry " << e << dendl;
get_parent()->add_local_next_event(e);
dout(30) << " entry is_delete " << e.is_delete() << dendl;
}
}
parent->update_stats(m->pg_stats);
parent->log_operation(
std::move(log),
m->updated_hit_set_history,
m->pg_trim_to,
m->version, /* Replicated PGs don't have rollback info */
m->min_last_complete_ondisk,
update_snaps,
rm->localt,
async);
rm->opt.register_on_commit(
parent->bless_context(
new C_OSD_RepModifyCommit(this, rm)));
vector<ObjectStore::Transaction> tls;
tls.reserve(2);
tls.push_back(std::move(rm->localt));
tls.push_back(std::move(rm->opt));
parent->queue_transactions(tls, op);
// op is cleaned up by oncommit/onapply when both are executed
dout(30) << __func__ << " missing after" << get_parent()->get_log().get_missing().get_items() << dendl;
}
void ReplicatedBackend::repop_commit(RepModifyRef rm)
{
rm->op->mark_commit_sent();
rm->op->pg_trace.event("sup_op_commit");
rm->committed = true;
// send commit.
auto m = rm->op->get_req<MOSDRepOp>();
ceph_assert(m->get_type() == MSG_OSD_REPOP);
dout(10) << __func__ << " on op " << *m
<< ", sending commit to osd." << rm->ackerosd
<< dendl;
ceph_assert(get_osdmap()->is_up(rm->ackerosd));
get_parent()->update_last_complete_ondisk(rm->last_complete);
MOSDRepOpReply *reply = new MOSDRepOpReply(
m,
get_parent()->whoami_shard(),
0, get_osdmap_epoch(), m->get_min_epoch(), CEPH_OSD_FLAG_ONDISK);
reply->set_last_complete_ondisk(rm->last_complete);
reply->set_priority(CEPH_MSG_PRIO_HIGH); // this better match ack priority!
reply->trace = rm->op->pg_trace;
get_parent()->send_message_osd_cluster(
rm->ackerosd, reply, get_osdmap_epoch());
log_subop_stats(get_parent()->get_logger(), rm->op, l_osd_sop_w);
}
// ===========================================================
void ReplicatedBackend::calc_head_subsets(
ObjectContextRef obc, SnapSet& snapset, const hobject_t& head,
const pg_missing_t& missing,
const hobject_t &last_backfill,
interval_set<uint64_t>& data_subset,
map<hobject_t, interval_set<uint64_t>>& clone_subsets,
ObcLockManager &manager)
{
dout(10) << "calc_head_subsets " << head
<< " clone_overlap " << snapset.clone_overlap << dendl;
uint64_t size = obc->obs.oi.size;
if (size)
data_subset.insert(0, size);
assert(HAVE_FEATURE(parent->min_peer_features(), SERVER_OCTOPUS));
const auto it = missing.get_items().find(head);
assert(it != missing.get_items().end());
data_subset.intersection_of(it->second.clean_regions.get_dirty_regions());
dout(10) << "calc_head_subsets " << head
<< " data_subset " << data_subset << dendl;
if (get_parent()->get_pool().allow_incomplete_clones()) {
dout(10) << __func__ << ": caching (was) enabled, skipping clone subsets" << dendl;
return;
}
if (!cct->_conf->osd_recover_clone_overlap) {
dout(10) << "calc_head_subsets " << head << " -- osd_recover_clone_overlap disabled" << dendl;
return;
}
interval_set<uint64_t> cloning;
interval_set<uint64_t> prev;
hobject_t c = head;
if (size)
prev.insert(0, size);
for (int j=snapset.clones.size()-1; j>=0; j--) {
c.snap = snapset.clones[j];
prev.intersection_of(snapset.clone_overlap[snapset.clones[j]]);
if (!missing.is_missing(c) &&
c < last_backfill &&
get_parent()->try_lock_for_read(c, manager)) {
dout(10) << "calc_head_subsets " << head << " has prev " << c
<< " overlap " << prev << dendl;
cloning = prev;
break;
}
dout(10) << "calc_head_subsets " << head << " does not have prev " << c
<< " overlap " << prev << dendl;
}
cloning.intersection_of(data_subset);
if (cloning.empty()) {
dout(10) << "skipping clone, nothing needs to clone" << dendl;
return;
}
if (cloning.num_intervals() > g_conf().get_val<uint64_t>("osd_recover_clone_overlap_limit")) {
dout(10) << "skipping clone, too many holes" << dendl;
get_parent()->release_locks(manager);
clone_subsets.clear();
cloning.clear();
return;
}
// what's left for us to push?
clone_subsets[c] = cloning;
data_subset.subtract(cloning);
dout(10) << "calc_head_subsets " << head
<< " data_subset " << data_subset
<< " clone_subsets " << clone_subsets << dendl;
}
void ReplicatedBackend::calc_clone_subsets(
SnapSet& snapset, const hobject_t& soid,
const pg_missing_t& missing,
const hobject_t &last_backfill,
interval_set<uint64_t>& data_subset,
map<hobject_t, interval_set<uint64_t>>& clone_subsets,
ObcLockManager &manager)
{
dout(10) << "calc_clone_subsets " << soid
<< " clone_overlap " << snapset.clone_overlap << dendl;
uint64_t size = snapset.clone_size[soid.snap];
if (size)
data_subset.insert(0, size);
if (get_parent()->get_pool().allow_incomplete_clones()) {
dout(10) << __func__ << ": caching (was) enabled, skipping clone subsets" << dendl;
return;
}
if (!cct->_conf->osd_recover_clone_overlap) {
dout(10) << "calc_clone_subsets " << soid << " -- osd_recover_clone_overlap disabled" << dendl;
return;
}
unsigned i;
for (i=0; i < snapset.clones.size(); i++)
if (snapset.clones[i] == soid.snap)
break;
// any overlap with next older clone?
interval_set<uint64_t> cloning;
interval_set<uint64_t> prev;
if (size)
prev.insert(0, size);
for (int j=i-1; j>=0; j--) {
hobject_t c = soid;
c.snap = snapset.clones[j];
prev.intersection_of(snapset.clone_overlap[snapset.clones[j]]);
if (!missing.is_missing(c) &&
c < last_backfill &&
get_parent()->try_lock_for_read(c, manager)) {
dout(10) << "calc_clone_subsets " << soid << " has prev " << c
<< " overlap " << prev << dendl;
clone_subsets[c] = prev;
cloning.union_of(prev);
break;
}
dout(10) << "calc_clone_subsets " << soid << " does not have prev " << c
<< " overlap " << prev << dendl;
}
// overlap with next newest?
interval_set<uint64_t> next;
if (size)
next.insert(0, size);
for (unsigned j=i+1; j<snapset.clones.size(); j++) {
hobject_t c = soid;
c.snap = snapset.clones[j];
next.intersection_of(snapset.clone_overlap[snapset.clones[j-1]]);
if (!missing.is_missing(c) &&
c < last_backfill &&
get_parent()->try_lock_for_read(c, manager)) {
dout(10) << "calc_clone_subsets " << soid << " has next " << c
<< " overlap " << next << dendl;
clone_subsets[c] = next;
cloning.union_of(next);
break;
}
dout(10) << "calc_clone_subsets " << soid << " does not have next " << c
<< " overlap " << next << dendl;
}
if (cloning.num_intervals() > g_conf().get_val<uint64_t>("osd_recover_clone_overlap_limit")) {
dout(10) << "skipping clone, too many holes" << dendl;
get_parent()->release_locks(manager);
clone_subsets.clear();
cloning.clear();
}
// what's left for us to push?
data_subset.subtract(cloning);
dout(10) << "calc_clone_subsets " << soid
<< " data_subset " << data_subset
<< " clone_subsets " << clone_subsets << dendl;
}
void ReplicatedBackend::prepare_pull(
eversion_t v,
const hobject_t& soid,
ObjectContextRef headctx,
RPGHandle *h)
{
const auto missing_iter = get_parent()->get_local_missing().get_items().find(soid);
ceph_assert(missing_iter != get_parent()->get_local_missing().get_items().end());
eversion_t _v = missing_iter->second.need;
ceph_assert(_v == v);
const map<hobject_t, set<pg_shard_t>> &missing_loc(
get_parent()->get_missing_loc_shards());
const map<pg_shard_t, pg_missing_t > &peer_missing(
get_parent()->get_shard_missing());
map<hobject_t, set<pg_shard_t>>::const_iterator q = missing_loc.find(soid);
ceph_assert(q != missing_loc.end());
ceph_assert(!q->second.empty());
// pick a pullee
auto p = q->second.end();
if (cct->_conf->osd_debug_feed_pullee >= 0) {
for (auto it = q->second.begin(); it != q->second.end(); it++) {
if (it->osd == cct->_conf->osd_debug_feed_pullee) {
p = it;
break;
}
}
}
if (p == q->second.end()) {
// probably because user feed a wrong pullee
p = q->second.begin();
std::advance(p,
ceph::util::generate_random_number<int>(0,
q->second.size() - 1));
}
ceph_assert(get_osdmap()->is_up(p->osd));
pg_shard_t fromshard = *p;
dout(7) << "pull " << soid
<< " v " << v
<< " on osds " << q->second
<< " from osd." << fromshard
<< dendl;
ceph_assert(peer_missing.count(fromshard));
const pg_missing_t &pmissing = peer_missing.find(fromshard)->second;
if (pmissing.is_missing(soid, v)) {
ceph_assert(pmissing.get_items().find(soid)->second.have != v);
dout(10) << "pulling soid " << soid << " from osd " << fromshard
<< " at version " << pmissing.get_items().find(soid)->second.have
<< " rather than at version " << v << dendl;
v = pmissing.get_items().find(soid)->second.have;
ceph_assert(get_parent()->get_log().get_log().objects.count(soid) &&
(get_parent()->get_log().get_log().objects.find(soid)->second->op ==
pg_log_entry_t::LOST_REVERT) &&
(get_parent()->get_log().get_log().objects.find(
soid)->second->reverting_to ==
v));
}
ObjectRecoveryInfo recovery_info;
ObcLockManager lock_manager;
if (soid.is_snap()) {
ceph_assert(!get_parent()->get_local_missing().is_missing(soid.get_head()));
ceph_assert(headctx);
// check snapset
SnapSetContext *ssc = headctx->ssc;
ceph_assert(ssc);
dout(10) << " snapset " << ssc->snapset << dendl;
recovery_info.ss = ssc->snapset;
calc_clone_subsets(
ssc->snapset, soid, get_parent()->get_local_missing(),
get_info().last_backfill,
recovery_info.copy_subset,
recovery_info.clone_subset,
lock_manager);
// FIXME: this may overestimate if we are pulling multiple clones in parallel...
dout(10) << " pulling " << recovery_info << dendl;
ceph_assert(ssc->snapset.clone_size.count(soid.snap));
recovery_info.size = ssc->snapset.clone_size[soid.snap];
recovery_info.object_exist = missing_iter->second.clean_regions.object_is_exist();
} else {
// pulling head or unversioned object.
// always pull the whole thing.
recovery_info.copy_subset.insert(0, (uint64_t)-1);
assert(HAVE_FEATURE(parent->min_peer_features(), SERVER_OCTOPUS));
recovery_info.copy_subset.intersection_of(missing_iter->second.clean_regions.get_dirty_regions());
recovery_info.size = ((uint64_t)-1);
recovery_info.object_exist = missing_iter->second.clean_regions.object_is_exist();
}
h->pulls[fromshard].push_back(PullOp());
PullOp &op = h->pulls[fromshard].back();
op.soid = soid;
op.recovery_info = recovery_info;
op.recovery_info.soid = soid;
op.recovery_info.version = v;
op.recovery_progress.data_complete = false;
op.recovery_progress.omap_complete = !missing_iter->second.clean_regions.omap_is_dirty();
op.recovery_progress.data_recovered_to = 0;
op.recovery_progress.first = true;
ceph_assert(!pulling.count(soid));
pull_from_peer[fromshard].insert(soid);
pull_info_t &pull_info = pulling[soid];
pull_info.from = fromshard;
pull_info.soid = soid;
pull_info.head_ctx = headctx;
pull_info.recovery_info = op.recovery_info;
pull_info.recovery_progress = op.recovery_progress;
pull_info.cache_dont_need = h->cache_dont_need;
pull_info.lock_manager = std::move(lock_manager);
}
/*
* intelligently push an object to a replica. make use of existing
* clones/heads and dup data ranges where possible.
*/
int ReplicatedBackend::prep_push_to_replica(
ObjectContextRef obc, const hobject_t& soid, pg_shard_t peer,
PushOp *pop, bool cache_dont_need)
{
const object_info_t& oi = obc->obs.oi;
uint64_t size = obc->obs.oi.size;
dout(10) << __func__ << ": " << soid << " v" << oi.version
<< " size " << size << " to osd." << peer << dendl;
map<hobject_t, interval_set<uint64_t>> clone_subsets;
interval_set<uint64_t> data_subset;
ObcLockManager lock_manager;
// are we doing a clone on the replica?
if (soid.snap && soid.snap < CEPH_NOSNAP) {
hobject_t head = soid;
head.snap = CEPH_NOSNAP;
// try to base push off of clones that succeed/preceed poid
// we need the head (and current SnapSet) locally to do that.
if (get_parent()->get_local_missing().is_missing(head)) {
dout(15) << "push_to_replica missing head " << head << ", pushing raw clone" << dendl;
return prep_push(obc, soid, peer, pop, cache_dont_need);
}
SnapSetContext *ssc = obc->ssc;
ceph_assert(ssc);
dout(15) << "push_to_replica snapset is " << ssc->snapset << dendl;
pop->recovery_info.ss = ssc->snapset;
map<pg_shard_t, pg_missing_t>::const_iterator pm =
get_parent()->get_shard_missing().find(peer);
ceph_assert(pm != get_parent()->get_shard_missing().end());
map<pg_shard_t, pg_info_t>::const_iterator pi =
get_parent()->get_shard_info().find(peer);
ceph_assert(pi != get_parent()->get_shard_info().end());
calc_clone_subsets(
ssc->snapset, soid,
pm->second,
pi->second.last_backfill,
data_subset, clone_subsets,
lock_manager);
} else if (soid.snap == CEPH_NOSNAP) {
// pushing head or unversioned object.
// base this on partially on replica's clones?
SnapSetContext *ssc = obc->ssc;
ceph_assert(ssc);
dout(15) << "push_to_replica snapset is " << ssc->snapset << dendl;
calc_head_subsets(
obc,
ssc->snapset, soid, get_parent()->get_shard_missing().find(peer)->second,
get_parent()->get_shard_info().find(peer)->second.last_backfill,
data_subset, clone_subsets,
lock_manager);
}
return prep_push(
obc,
soid,
peer,
oi.version,
data_subset,
clone_subsets,
pop,
cache_dont_need,
std::move(lock_manager));
}
int ReplicatedBackend::prep_push(ObjectContextRef obc,
const hobject_t& soid, pg_shard_t peer,
PushOp *pop, bool cache_dont_need)
{
interval_set<uint64_t> data_subset;
if (obc->obs.oi.size)
data_subset.insert(0, obc->obs.oi.size);
map<hobject_t, interval_set<uint64_t>> clone_subsets;
return prep_push(obc, soid, peer,
obc->obs.oi.version, data_subset, clone_subsets,
pop, cache_dont_need, ObcLockManager());
}
int ReplicatedBackend::prep_push(
ObjectContextRef obc,
const hobject_t& soid, pg_shard_t peer,
eversion_t version,
interval_set<uint64_t> &data_subset,
map<hobject_t, interval_set<uint64_t>>& clone_subsets,
PushOp *pop,
bool cache_dont_need,
ObcLockManager &&lock_manager)
{
get_parent()->begin_peer_recover(peer, soid);
const auto pmissing_iter = get_parent()->get_shard_missing().find(peer);
const auto missing_iter = pmissing_iter->second.get_items().find(soid);
assert(missing_iter != pmissing_iter->second.get_items().end());
// take note.
push_info_t &push_info = pushing[soid][peer];
push_info.obc = obc;
push_info.recovery_info.size = obc->obs.oi.size;
push_info.recovery_info.copy_subset = data_subset;
push_info.recovery_info.clone_subset = clone_subsets;
push_info.recovery_info.soid = soid;
push_info.recovery_info.oi = obc->obs.oi;
push_info.recovery_info.ss = pop->recovery_info.ss;
push_info.recovery_info.version = version;
push_info.recovery_info.object_exist = missing_iter->second.clean_regions.object_is_exist();
push_info.recovery_progress.omap_complete = !missing_iter->second.clean_regions.omap_is_dirty();
push_info.lock_manager = std::move(lock_manager);
ObjectRecoveryProgress new_progress;
int r = build_push_op(push_info.recovery_info,
push_info.recovery_progress,
&new_progress,
pop,
&(push_info.stat), cache_dont_need);
if (r < 0)
return r;
push_info.recovery_progress = new_progress;
return 0;
}
void ReplicatedBackend::submit_push_data(
const ObjectRecoveryInfo &recovery_info,
bool first,
bool complete,
bool clear_omap,
bool cache_dont_need,
interval_set<uint64_t> &data_zeros,
const interval_set<uint64_t> &intervals_included,
bufferlist data_included,
bufferlist omap_header,
const map<string, bufferlist, less<>> &attrs,
const map<string, bufferlist> &omap_entries,
ObjectStore::Transaction *t)
{
hobject_t target_oid;
if (first && complete) {
target_oid = recovery_info.soid;
} else {
target_oid = get_parent()->get_temp_recovery_object(recovery_info.soid,
recovery_info.version);
if (first) {
dout(10) << __func__ << ": Adding oid "
<< target_oid << " in the temp collection" << dendl;
add_temp_obj(target_oid);
}
}
if (first) {
if (!complete) {
t->remove(coll, ghobject_t(target_oid));
t->touch(coll, ghobject_t(target_oid));
object_info_t oi(attrs.at(OI_ATTR));
t->set_alloc_hint(coll, ghobject_t(target_oid),
oi.expected_object_size,
oi.expected_write_size,
oi.alloc_hint_flags);
} else {
if (!recovery_info.object_exist) {
t->remove(coll, ghobject_t(target_oid));
t->touch(coll, ghobject_t(target_oid));
object_info_t oi(attrs.at(OI_ATTR));
t->set_alloc_hint(coll, ghobject_t(target_oid),
oi.expected_object_size,
oi.expected_write_size,
oi.alloc_hint_flags);
}
//remove xattr and update later if overwrite on original object
t->rmattrs(coll, ghobject_t(target_oid));
//if need update omap, clear the previous content first
if (clear_omap)
t->omap_clear(coll, ghobject_t(target_oid));
}
t->truncate(coll, ghobject_t(target_oid), recovery_info.size);
if (omap_header.length())
t->omap_setheader(coll, ghobject_t(target_oid), omap_header);
struct stat st;
int r = store->stat(ch, ghobject_t(recovery_info.soid), &st);
if (get_parent()->pg_is_remote_backfilling()) {
uint64_t size = 0;
if (r == 0)
size = st.st_size;
// Don't need to do anything if object is still the same size
if (size != recovery_info.oi.size) {
get_parent()->pg_add_local_num_bytes((int64_t)recovery_info.oi.size - (int64_t)size);
get_parent()->pg_add_num_bytes((int64_t)recovery_info.oi.size - (int64_t)size);
dout(10) << __func__ << " " << recovery_info.soid
<< " backfill size " << recovery_info.oi.size
<< " previous size " << size
<< " net size " << recovery_info.oi.size - size
<< dendl;
}
}
if (!complete) {
//clone overlap content in local object
if (recovery_info.object_exist) {
assert(r == 0);
uint64_t local_size = std::min(recovery_info.size, (uint64_t)st.st_size);
interval_set<uint64_t> local_intervals_included, local_intervals_excluded;
if (local_size) {
local_intervals_included.insert(0, local_size);
local_intervals_excluded.intersection_of(local_intervals_included, recovery_info.copy_subset);
local_intervals_included.subtract(local_intervals_excluded);
}
for (interval_set<uint64_t>::const_iterator q = local_intervals_included.begin();
q != local_intervals_included.end();
++q) {
dout(15) << " clone_range " << recovery_info.soid << " "
<< q.get_start() << "~" << q.get_len() << dendl;
t->clone_range(coll, ghobject_t(recovery_info.soid), ghobject_t(target_oid),
q.get_start(), q.get_len(), q.get_start());
}
}
}
}
uint64_t off = 0;
uint32_t fadvise_flags = CEPH_OSD_OP_FLAG_FADVISE_SEQUENTIAL;
if (cache_dont_need)
fadvise_flags |= CEPH_OSD_OP_FLAG_FADVISE_DONTNEED;
// Punch zeros for data, if fiemap indicates nothing but it is marked dirty
if (data_zeros.size() > 0) {
data_zeros.intersection_of(recovery_info.copy_subset);
assert(intervals_included.subset_of(data_zeros));
data_zeros.subtract(intervals_included);
dout(20) << __func__ <<" recovering object " << recovery_info.soid
<< " copy_subset: " << recovery_info.copy_subset
<< " intervals_included: " << intervals_included
<< " data_zeros: " << data_zeros << dendl;
for (auto p = data_zeros.begin(); p != data_zeros.end(); ++p)
t->zero(coll, ghobject_t(target_oid), p.get_start(), p.get_len());
}
for (interval_set<uint64_t>::const_iterator p = intervals_included.begin();
p != intervals_included.end();
++p) {
bufferlist bit;
bit.substr_of(data_included, off, p.get_len());
t->write(coll, ghobject_t(target_oid),
p.get_start(), p.get_len(), bit, fadvise_flags);
off += p.get_len();
}
if (!omap_entries.empty())
t->omap_setkeys(coll, ghobject_t(target_oid), omap_entries);
if (!attrs.empty())
t->setattrs(coll, ghobject_t(target_oid), attrs);
if (complete) {
if (!first) {
dout(10) << __func__ << ": Removing oid "
<< target_oid << " from the temp collection" << dendl;
clear_temp_obj(target_oid);
t->remove(coll, ghobject_t(recovery_info.soid));
t->collection_move_rename(coll, ghobject_t(target_oid),
coll, ghobject_t(recovery_info.soid));
}
submit_push_complete(recovery_info, t);
}
}
void ReplicatedBackend::submit_push_complete(
const ObjectRecoveryInfo &recovery_info,
ObjectStore::Transaction *t)
{
for (map<hobject_t, interval_set<uint64_t>>::const_iterator p =
recovery_info.clone_subset.begin();
p != recovery_info.clone_subset.end();
++p) {
for (interval_set<uint64_t>::const_iterator q = p->second.begin();
q != p->second.end();
++q) {
dout(15) << " clone_range " << p->first << " "
<< q.get_start() << "~" << q.get_len() << dendl;
t->clone_range(coll, ghobject_t(p->first), ghobject_t(recovery_info.soid),
q.get_start(), q.get_len(), q.get_start());
}
}
}
ObjectRecoveryInfo ReplicatedBackend::recalc_subsets(
const ObjectRecoveryInfo& recovery_info,
SnapSetContext *ssc,
ObcLockManager &manager)
{
if (!recovery_info.soid.snap || recovery_info.soid.snap >= CEPH_NOSNAP)
return recovery_info;
ObjectRecoveryInfo new_info = recovery_info;
new_info.copy_subset.clear();
new_info.clone_subset.clear();
ceph_assert(ssc);
get_parent()->release_locks(manager); // might already have locks
calc_clone_subsets(
ssc->snapset, new_info.soid, get_parent()->get_local_missing(),
get_info().last_backfill,
new_info.copy_subset, new_info.clone_subset,
manager);
return new_info;
}
bool ReplicatedBackend::handle_pull_response(
pg_shard_t from, const PushOp &pop, PullOp *response,
list<pull_complete_info> *to_continue,
ObjectStore::Transaction *t)
{
interval_set<uint64_t> data_included = pop.data_included;
bufferlist data;
data = pop.data;
dout(10) << "handle_pull_response "
<< pop.recovery_info
<< pop.after_progress
<< " data.size() is " << data.length()
<< " data_included: " << data_included
<< dendl;
if (pop.version == eversion_t()) {
// replica doesn't have it!
_failed_pull(from, pop.soid);
return false;
}
const hobject_t &hoid = pop.soid;
ceph_assert((data_included.empty() && data.length() == 0) ||
(!data_included.empty() && data.length() > 0));
auto piter = pulling.find(hoid);
if (piter == pulling.end()) {
return false;
}
pull_info_t &pull_info = piter->second;
if (pull_info.recovery_info.size == (uint64_t(-1))) {
pull_info.recovery_info.size = pop.recovery_info.size;
pull_info.recovery_info.copy_subset.intersection_of(
pop.recovery_info.copy_subset);
}
// If primary doesn't have object info and didn't know version
if (pull_info.recovery_info.version == eversion_t()) {
pull_info.recovery_info.version = pop.version;
}
bool first = pull_info.recovery_progress.first;
if (first) {
// attrs only reference the origin bufferlist (decode from
// MOSDPGPush message) whose size is much greater than attrs in
// recovery. If obc cache it (get_obc maybe cache the attr), this
// causes the whole origin bufferlist would not be free until obc
// is evicted from obc cache. So rebuild the bufferlists before
// cache it.
auto attrset = pop.attrset;
for (auto& a : attrset) {
a.second.rebuild();
}
pull_info.obc = get_parent()->get_obc(pull_info.recovery_info.soid, attrset);
if (attrset.find(SS_ATTR) != attrset.end()) {
bufferlist ssbv = attrset.at(SS_ATTR);
SnapSet ss(ssbv);
assert(!pull_info.obc->ssc->exists || ss.seq == pull_info.obc->ssc->snapset.seq);
}
pull_info.recovery_info.oi = pull_info.obc->obs.oi;
pull_info.recovery_info = recalc_subsets(
pull_info.recovery_info,
pull_info.obc->ssc,
pull_info.lock_manager);
}
interval_set<uint64_t> usable_intervals;
bufferlist usable_data;
trim_pushed_data(pull_info.recovery_info.copy_subset,
data_included,
data,
&usable_intervals,
&usable_data);
data_included = usable_intervals;
data = std::move(usable_data);
pull_info.recovery_progress = pop.after_progress;
dout(10) << "new recovery_info " << pull_info.recovery_info
<< ", new progress " << pull_info.recovery_progress
<< dendl;
interval_set<uint64_t> data_zeros;
uint64_t z_offset = pop.before_progress.data_recovered_to;
uint64_t z_length = pop.after_progress.data_recovered_to - pop.before_progress.data_recovered_to;
if (z_length)
data_zeros.insert(z_offset, z_length);
bool complete = pull_info.is_complete();
bool clear_omap = !pop.before_progress.omap_complete;
submit_push_data(pull_info.recovery_info,
first,
complete,
clear_omap,
pull_info.cache_dont_need,
data_zeros,
data_included,
data,
pop.omap_header,
pop.attrset,
pop.omap_entries,
t);
pull_info.stat.num_keys_recovered += pop.omap_entries.size();
pull_info.stat.num_bytes_recovered += data.length();
get_parent()->get_logger()->inc(l_osd_rbytes, pop.omap_entries.size() + data.length());
if (complete) {
pull_info.stat.num_objects_recovered++;
// XXX: This could overcount if regular recovery is needed right after a repair
if (get_parent()->pg_is_repair()) {
pull_info.stat.num_objects_repaired++;
get_parent()->inc_osd_stat_repaired();
}
clear_pull_from(piter);
to_continue->push_back({hoid, pull_info.stat});
get_parent()->on_local_recover(
hoid, pull_info.recovery_info, pull_info.obc, false, t);
return false;
} else {
response->soid = pop.soid;
response->recovery_info = pull_info.recovery_info;
response->recovery_progress = pull_info.recovery_progress;
return true;
}
}
void ReplicatedBackend::handle_push(
pg_shard_t from, const PushOp &pop, PushReplyOp *response,
ObjectStore::Transaction *t, bool is_repair)
{
dout(10) << "handle_push "
<< pop.recovery_info
<< pop.after_progress
<< dendl;
bufferlist data;
data = pop.data;
bool first = pop.before_progress.first;
bool complete = pop.after_progress.data_complete &&
pop.after_progress.omap_complete;
bool clear_omap = !pop.before_progress.omap_complete;
interval_set<uint64_t> data_zeros;
uint64_t z_offset = pop.before_progress.data_recovered_to;
uint64_t z_length = pop.after_progress.data_recovered_to - pop.before_progress.data_recovered_to;
if (z_length)
data_zeros.insert(z_offset, z_length);
response->soid = pop.recovery_info.soid;
submit_push_data(pop.recovery_info,
first,
complete,
clear_omap,
true, // must be replicate
data_zeros,
pop.data_included,
data,
pop.omap_header,
pop.attrset,
pop.omap_entries,
t);
if (complete) {
if (is_repair) {
get_parent()->inc_osd_stat_repaired();
dout(20) << __func__ << " repair complete" << dendl;
}
get_parent()->on_local_recover(
pop.recovery_info.soid,
pop.recovery_info,
ObjectContextRef(), // ok, is replica
false,
t);
}
}
void ReplicatedBackend::send_pushes(int prio, map<pg_shard_t, vector<PushOp> > &pushes)
{
for (map<pg_shard_t, vector<PushOp> >::iterator i = pushes.begin();
i != pushes.end();
++i) {
ConnectionRef con = get_parent()->get_con_osd_cluster(
i->first.osd,
get_osdmap_epoch());
if (!con)
continue;
vector<PushOp>::iterator j = i->second.begin();
while (j != i->second.end()) {
uint64_t cost = 0;
uint64_t pushes = 0;
MOSDPGPush *msg = new MOSDPGPush();
msg->from = get_parent()->whoami_shard();
msg->pgid = get_parent()->primary_spg_t();
msg->map_epoch = get_osdmap_epoch();
msg->min_epoch = get_parent()->get_last_peering_reset_epoch();
msg->set_priority(prio);
msg->is_repair = get_parent()->pg_is_repair();
for (;
(j != i->second.end() &&
cost < cct->_conf->osd_max_push_cost &&
pushes < cct->_conf->osd_max_push_objects) ;
++j) {
dout(20) << __func__ << ": sending push " << *j
<< " to osd." << i->first << dendl;
cost += j->cost(cct);
pushes += 1;
msg->pushes.push_back(*j);
}
msg->set_cost(cost);
get_parent()->send_message_osd_cluster(msg, con);
}
}
}
void ReplicatedBackend::send_pulls(int prio, map<pg_shard_t, vector<PullOp> > &pulls)
{
for (map<pg_shard_t, vector<PullOp> >::iterator i = pulls.begin();
i != pulls.end();
++i) {
ConnectionRef con = get_parent()->get_con_osd_cluster(
i->first.osd,
get_osdmap_epoch());
if (!con)
continue;
dout(20) << __func__ << ": sending pulls " << i->second
<< " to osd." << i->first << dendl;
MOSDPGPull *msg = new MOSDPGPull();
msg->from = parent->whoami_shard();
msg->set_priority(prio);
msg->pgid = get_parent()->primary_spg_t();
msg->map_epoch = get_osdmap_epoch();
msg->min_epoch = get_parent()->get_last_peering_reset_epoch();
msg->set_pulls(std::move(i->second));
msg->compute_cost(cct);
get_parent()->send_message_osd_cluster(msg, con);
}
}
int ReplicatedBackend::build_push_op(const ObjectRecoveryInfo &recovery_info,
const ObjectRecoveryProgress &progress,
ObjectRecoveryProgress *out_progress,
PushOp *out_op,
object_stat_sum_t *stat,
bool cache_dont_need)
{
ObjectRecoveryProgress _new_progress;
if (!out_progress)
out_progress = &_new_progress;
ObjectRecoveryProgress &new_progress = *out_progress;
new_progress = progress;
dout(7) << __func__ << " " << recovery_info.soid
<< " v " << recovery_info.version
<< " size " << recovery_info.size
<< " recovery_info: " << recovery_info
<< dendl;
eversion_t v = recovery_info.version;
object_info_t oi;
if (progress.first) {
int r = store->omap_get_header(ch, ghobject_t(recovery_info.soid), &out_op->omap_header);
if (r < 0) {
dout(1) << __func__ << " get omap header failed: " << cpp_strerror(-r) << dendl;
return r;
}
r = store->getattrs(ch, ghobject_t(recovery_info.soid), out_op->attrset);
if (r < 0) {
dout(1) << __func__ << " getattrs failed: " << cpp_strerror(-r) << dendl;
return r;
}
// Debug
try {
oi.decode(out_op->attrset[OI_ATTR]);
} catch (...) {
dout(0) << __func__ << ": bad object_info_t: " << recovery_info.soid << dendl;
return -EINVAL;
}
// If requestor didn't know the version, use ours
if (v == eversion_t()) {
v = oi.version;
} else if (oi.version != v) {
get_parent()->clog_error() << get_info().pgid << " push "
<< recovery_info.soid << " v "
<< recovery_info.version
<< " failed because local copy is "
<< oi.version;
return -EINVAL;
}
new_progress.first = false;
}
// Once we provide the version subsequent requests will have it, so
// at this point it must be known.
ceph_assert(v != eversion_t());
uint64_t available = cct->_conf->osd_recovery_max_chunk;
if (!progress.omap_complete) {
ObjectMap::ObjectMapIterator iter =
store->get_omap_iterator(ch,
ghobject_t(recovery_info.soid));
ceph_assert(iter);
for (iter->lower_bound(progress.omap_recovered_to);
iter->valid();
iter->next()) {
if (!out_op->omap_entries.empty() &&
((cct->_conf->osd_recovery_max_omap_entries_per_chunk > 0 &&
out_op->omap_entries.size() >= cct->_conf->osd_recovery_max_omap_entries_per_chunk) ||
available <= iter->key().size() + iter->value().length()))
break;
out_op->omap_entries.insert(make_pair(iter->key(), iter->value()));
if ((iter->key().size() + iter->value().length()) <= available)
available -= (iter->key().size() + iter->value().length());
else
available = 0;
}
if (!iter->valid())
new_progress.omap_complete = true;
else
new_progress.omap_recovered_to = iter->key();
}
if (available > 0) {
if (!recovery_info.copy_subset.empty()) {
interval_set<uint64_t> copy_subset = recovery_info.copy_subset;
map<uint64_t, uint64_t> m;
int r = store->fiemap(ch, ghobject_t(recovery_info.soid), 0,
copy_subset.range_end(), m);
if (r >= 0) {
interval_set<uint64_t> fiemap_included(std::move(m));
copy_subset.intersection_of(fiemap_included);
} else {
// intersection of copy_subset and empty interval_set would be empty anyway
copy_subset.clear();
}
out_op->data_included.span_of(copy_subset, progress.data_recovered_to,
available);
// zero filled section, skip to end!
if (out_op->data_included.empty() ||
out_op->data_included.range_end() == copy_subset.range_end())
new_progress.data_recovered_to = recovery_info.copy_subset.range_end();
else
new_progress.data_recovered_to = out_op->data_included.range_end();
}
} else {
out_op->data_included.clear();
}
auto origin_size = out_op->data_included.size();
bufferlist bit;
int r = store->readv(ch, ghobject_t(recovery_info.soid),
out_op->data_included, bit,
cache_dont_need ? CEPH_OSD_OP_FLAG_FADVISE_DONTNEED: 0);
if (cct->_conf->osd_debug_random_push_read_error &&
(rand() % (int)(cct->_conf->osd_debug_random_push_read_error * 100.0)) == 0) {
dout(0) << __func__ << ": inject EIO " << recovery_info.soid << dendl;
r = -EIO;
}
if (r < 0) {
return r;
}
if (out_op->data_included.size() != origin_size) {
dout(10) << __func__ << " some extents get pruned "
<< out_op->data_included.size() << "/" << origin_size
<< dendl;
new_progress.data_complete = true;
}
out_op->data.claim_append(bit);
if (progress.first && !out_op->data_included.empty() &&
out_op->data_included.begin().get_start() == 0 &&
out_op->data.length() == oi.size && oi.is_data_digest()) {
uint32_t crc = out_op->data.crc32c(-1);
if (oi.data_digest != crc) {
dout(0) << __func__ << " " << coll << std::hex
<< " full-object read crc 0x" << crc
<< " != expected 0x" << oi.data_digest
<< std::dec << " on " << recovery_info.soid << dendl;
return -EIO;
}
}
if (new_progress.is_complete(recovery_info)) {
new_progress.data_complete = true;
if (stat) {
stat->num_objects_recovered++;
if (get_parent()->pg_is_repair())
stat->num_objects_repaired++;
}
} else if (progress.first && progress.omap_complete) {
// If omap is not changed, we need recovery omap when recovery cannot be completed once
new_progress.omap_complete = false;
}
if (stat) {
stat->num_keys_recovered += out_op->omap_entries.size();
stat->num_bytes_recovered += out_op->data.length();
get_parent()->get_logger()->inc(l_osd_rbytes, out_op->omap_entries.size() + out_op->data.length());
}
get_parent()->get_logger()->inc(l_osd_push);
get_parent()->get_logger()->inc(l_osd_push_outb, out_op->data.length());
// send
out_op->version = v;
out_op->soid = recovery_info.soid;
out_op->recovery_info = recovery_info;
out_op->after_progress = new_progress;
out_op->before_progress = progress;
return 0;
}
void ReplicatedBackend::prep_push_op_blank(const hobject_t& soid, PushOp *op)
{
op->recovery_info.version = eversion_t();
op->version = eversion_t();
op->soid = soid;
}
bool ReplicatedBackend::handle_push_reply(
pg_shard_t peer, const PushReplyOp &op, PushOp *reply)
{
const hobject_t &soid = op.soid;
if (pushing.count(soid) == 0) {
dout(10) << "huh, i wasn't pushing " << soid << " to osd." << peer
<< ", or anybody else"
<< dendl;
return false;
} else if (pushing[soid].count(peer) == 0) {
dout(10) << "huh, i wasn't pushing " << soid << " to osd." << peer
<< dendl;
return false;
} else {
push_info_t *push_info = &pushing[soid][peer];
bool error = pushing[soid].begin()->second.recovery_progress.error;
if (!push_info->recovery_progress.data_complete && !error) {
dout(10) << " pushing more from, "
<< push_info->recovery_progress.data_recovered_to
<< " of " << push_info->recovery_info.copy_subset << dendl;
ObjectRecoveryProgress new_progress;
int r = build_push_op(
push_info->recovery_info,
push_info->recovery_progress, &new_progress, reply,
&(push_info->stat));
// Handle the case of a read error right after we wrote, which is
// hopefully extremely rare.
if (r < 0) {
dout(5) << __func__ << ": oid " << soid << " error " << r << dendl;
error = true;
goto done;
}
push_info->recovery_progress = new_progress;
return true;
} else {
// done!
done:
if (!error)
get_parent()->on_peer_recover( peer, soid, push_info->recovery_info);
get_parent()->release_locks(push_info->lock_manager);
object_stat_sum_t stat = push_info->stat;
eversion_t v = push_info->recovery_info.version;
pushing[soid].erase(peer);
push_info = nullptr;
if (pushing[soid].empty()) {
if (!error)
get_parent()->on_global_recover(soid, stat, false);
else
get_parent()->on_failed_pull(
std::set<pg_shard_t>{ get_parent()->whoami_shard() },
soid,
v);
pushing.erase(soid);
} else {
// This looks weird, but we erased the current peer and need to remember
// the error on any other one, while getting more acks.
if (error)
pushing[soid].begin()->second.recovery_progress.error = true;
dout(10) << "pushed " << soid << ", still waiting for push ack from "
<< pushing[soid].size() << " others" << dendl;
}
return false;
}
}
}
void ReplicatedBackend::handle_pull(pg_shard_t peer, PullOp &op, PushOp *reply)
{
const hobject_t &soid = op.soid;
struct stat st;
int r = store->stat(ch, ghobject_t(soid), &st);
if (r != 0) {
get_parent()->clog_error() << get_info().pgid << " "
<< peer << " tried to pull " << soid
<< " but got " << cpp_strerror(-r);
prep_push_op_blank(soid, reply);
} else {
ObjectRecoveryInfo &recovery_info = op.recovery_info;
ObjectRecoveryProgress &progress = op.recovery_progress;
if (progress.first && recovery_info.size == ((uint64_t)-1)) {
// Adjust size and copy_subset
recovery_info.size = st.st_size;
if (st.st_size) {
interval_set<uint64_t> object_range;
object_range.insert(0, st.st_size);
recovery_info.copy_subset.intersection_of(object_range);
} else {
recovery_info.copy_subset.clear();
}
assert(recovery_info.clone_subset.empty());
}
r = build_push_op(recovery_info, progress, 0, reply);
if (r < 0)
prep_push_op_blank(soid, reply);
}
}
/**
* trim received data to remove what we don't want
*
* @param copy_subset intervals we want
* @param data_included intervals we got
* @param data_recieved data we got
* @param intervals_usable intervals we want to keep
* @param data_usable matching data we want to keep
*/
void ReplicatedBackend::trim_pushed_data(
const interval_set<uint64_t> ©_subset,
const interval_set<uint64_t> &intervals_received,
bufferlist data_received,
interval_set<uint64_t> *intervals_usable,
bufferlist *data_usable)
{
if (intervals_received.subset_of(copy_subset)) {
*intervals_usable = intervals_received;
*data_usable = data_received;
return;
}
intervals_usable->intersection_of(copy_subset,
intervals_received);
uint64_t off = 0;
for (interval_set<uint64_t>::const_iterator p = intervals_received.begin();
p != intervals_received.end();
++p) {
interval_set<uint64_t> x;
x.insert(p.get_start(), p.get_len());
x.intersection_of(copy_subset);
for (interval_set<uint64_t>::const_iterator q = x.begin();
q != x.end();
++q) {
bufferlist sub;
uint64_t data_off = off + (q.get_start() - p.get_start());
sub.substr_of(data_received, data_off, q.get_len());
data_usable->claim_append(sub);
}
off += p.get_len();
}
}
void ReplicatedBackend::_failed_pull(pg_shard_t from, const hobject_t &soid)
{
dout(20) << __func__ << ": " << soid << " from " << from << dendl;
auto it = pulling.find(soid);
assert(it != pulling.end());
get_parent()->on_failed_pull(
{ from },
soid,
it->second.recovery_info.version);
clear_pull(it);
}
void ReplicatedBackend::clear_pull_from(
map<hobject_t, pull_info_t>::iterator piter)
{
auto from = piter->second.from;
pull_from_peer[from].erase(piter->second.soid);
if (pull_from_peer[from].empty())
pull_from_peer.erase(from);
}
void ReplicatedBackend::clear_pull(
map<hobject_t, pull_info_t>::iterator piter,
bool clear_pull_from_peer)
{
if (clear_pull_from_peer) {
clear_pull_from(piter);
}
get_parent()->release_locks(piter->second.lock_manager);
pulling.erase(piter);
}
int ReplicatedBackend::start_pushes(
const hobject_t &soid,
ObjectContextRef obc,
RPGHandle *h)
{
list< map<pg_shard_t, pg_missing_t>::const_iterator > shards;
dout(20) << __func__ << " soid " << soid << dendl;
// who needs it?
ceph_assert(get_parent()->get_acting_recovery_backfill_shards().size() > 0);
for (set<pg_shard_t>::iterator i =
get_parent()->get_acting_recovery_backfill_shards().begin();
i != get_parent()->get_acting_recovery_backfill_shards().end();
++i) {
if (*i == get_parent()->whoami_shard()) continue;
pg_shard_t peer = *i;
map<pg_shard_t, pg_missing_t>::const_iterator j =
get_parent()->get_shard_missing().find(peer);
ceph_assert(j != get_parent()->get_shard_missing().end());
if (j->second.is_missing(soid)) {
shards.push_back(j);
}
}
// If more than 1 read will occur ignore possible request to not cache
bool cache = shards.size() == 1 ? h->cache_dont_need : false;
for (auto j : shards) {
pg_shard_t peer = j->first;
h->pushes[peer].push_back(PushOp());
int r = prep_push_to_replica(obc, soid, peer,
&(h->pushes[peer].back()), cache);
if (r < 0) {
// Back out all failed reads
for (auto k : shards) {
pg_shard_t p = k->first;
dout(10) << __func__ << " clean up peer " << p << dendl;
h->pushes[p].pop_back();
if (p == peer) break;
}
return r;
}
}
return shards.size();
}
| 74,337 | 29.554048 | 107 | cc |
null | ceph-main/src/osd/ReplicatedBackend.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2013 Inktank Storage, Inc.
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef REPBACKEND_H
#define REPBACKEND_H
#include "PGBackend.h"
struct C_ReplicatedBackend_OnPullComplete;
class ReplicatedBackend : public PGBackend {
struct RPGHandle : public PGBackend::RecoveryHandle {
std::map<pg_shard_t, std::vector<PushOp> > pushes;
std::map<pg_shard_t, std::vector<PullOp> > pulls;
};
friend struct C_ReplicatedBackend_OnPullComplete;
public:
ReplicatedBackend(
PGBackend::Listener *pg,
const coll_t &coll,
ObjectStore::CollectionHandle &ch,
ObjectStore *store,
CephContext *cct);
/// @see PGBackend::open_recovery_op
RPGHandle *_open_recovery_op() {
return new RPGHandle();
}
PGBackend::RecoveryHandle *open_recovery_op() override {
return _open_recovery_op();
}
/// @see PGBackend::run_recovery_op
void run_recovery_op(
PGBackend::RecoveryHandle *h,
int priority) override;
/// @see PGBackend::recover_object
int recover_object(
const hobject_t &hoid,
eversion_t v,
ObjectContextRef head,
ObjectContextRef obc,
RecoveryHandle *h
) override;
void check_recovery_sources(const OSDMapRef& osdmap) override;
bool can_handle_while_inactive(OpRequestRef op) override;
/// @see PGBackend::handle_message
bool _handle_message(
OpRequestRef op
) override;
void on_change() override;
void clear_recovery_state() override;
class RPCRecPred : public IsPGRecoverablePredicate {
public:
bool operator()(const std::set<pg_shard_t> &have) const override {
return !have.empty();
}
};
IsPGRecoverablePredicate *get_is_recoverable_predicate() const override {
return new RPCRecPred;
}
class RPCReadPred : public IsPGReadablePredicate {
pg_shard_t whoami;
public:
explicit RPCReadPred(pg_shard_t whoami) : whoami(whoami) {}
bool operator()(const std::set<pg_shard_t> &have) const override {
return have.count(whoami);
}
};
IsPGReadablePredicate *get_is_readable_predicate() const override {
return new RPCReadPred(get_parent()->whoami_shard());
}
void dump_recovery_info(ceph::Formatter *f) const override {
{
f->open_array_section("pull_from_peer");
for (const auto& i : pull_from_peer) {
f->open_object_section("pulling_from");
f->dump_stream("pull_from") << i.first;
{
f->open_array_section("pulls");
for (const auto& j : i.second) {
f->open_object_section("pull_info");
ceph_assert(pulling.count(j));
pulling.find(j)->second.dump(f);
f->close_section();
}
f->close_section();
}
f->close_section();
}
f->close_section();
}
{
f->open_array_section("pushing");
for(const auto& i : pushing) {
f->open_object_section("object");
f->dump_stream("pushing") << i.first;
{
f->open_array_section("pushing_to");
for (const auto& j : i.second) {
f->open_object_section("push_progress");
f->dump_stream("pushing_to") << j.first;
{
f->open_object_section("push_info");
j.second.dump(f);
f->close_section();
}
f->close_section();
}
f->close_section();
}
f->close_section();
}
f->close_section();
}
}
int objects_read_sync(
const hobject_t &hoid,
uint64_t off,
uint64_t len,
uint32_t op_flags,
ceph::buffer::list *bl) override;
int objects_readv_sync(
const hobject_t &hoid,
std::map<uint64_t, uint64_t>&& m,
uint32_t op_flags,
ceph::buffer::list *bl) override;
void objects_read_async(
const hobject_t &hoid,
const std::list<std::pair<boost::tuple<uint64_t, uint64_t, uint32_t>,
std::pair<ceph::buffer::list*, Context*> > > &to_read,
Context *on_complete,
bool fast_read = false) override;
private:
// push
struct push_info_t {
ObjectRecoveryProgress recovery_progress;
ObjectRecoveryInfo recovery_info;
ObjectContextRef obc;
object_stat_sum_t stat;
ObcLockManager lock_manager;
void dump(ceph::Formatter *f) const {
{
f->open_object_section("recovery_progress");
recovery_progress.dump(f);
f->close_section();
}
{
f->open_object_section("recovery_info");
recovery_info.dump(f);
f->close_section();
}
}
};
std::map<hobject_t, std::map<pg_shard_t, push_info_t>> pushing;
// pull
struct pull_info_t {
pg_shard_t from;
hobject_t soid;
ObjectRecoveryProgress recovery_progress;
ObjectRecoveryInfo recovery_info;
ObjectContextRef head_ctx;
ObjectContextRef obc;
object_stat_sum_t stat;
bool cache_dont_need;
ObcLockManager lock_manager;
void dump(ceph::Formatter *f) const {
{
f->open_object_section("recovery_progress");
recovery_progress.dump(f);
f->close_section();
}
{
f->open_object_section("recovery_info");
recovery_info.dump(f);
f->close_section();
}
}
bool is_complete() const {
return recovery_progress.is_complete(recovery_info);
}
};
std::map<hobject_t, pull_info_t> pulling;
// Reverse mapping from osd peer to objects being pulled from that peer
std::map<pg_shard_t, std::set<hobject_t> > pull_from_peer;
void clear_pull(
std::map<hobject_t, pull_info_t>::iterator piter,
bool clear_pull_from_peer = true);
void clear_pull_from(
std::map<hobject_t, pull_info_t>::iterator piter);
void _do_push(OpRequestRef op);
void _do_pull_response(OpRequestRef op);
void do_push(OpRequestRef op) {
if (is_primary()) {
_do_pull_response(op);
} else {
_do_push(op);
}
}
void do_pull(OpRequestRef op);
void do_push_reply(OpRequestRef op);
bool handle_push_reply(pg_shard_t peer, const PushReplyOp &op, PushOp *reply);
void handle_pull(pg_shard_t peer, PullOp &op, PushOp *reply);
struct pull_complete_info {
hobject_t hoid;
object_stat_sum_t stat;
};
bool handle_pull_response(
pg_shard_t from, const PushOp &op, PullOp *response,
std::list<pull_complete_info> *to_continue,
ObjectStore::Transaction *t);
void handle_push(pg_shard_t from, const PushOp &op, PushReplyOp *response,
ObjectStore::Transaction *t, bool is_repair);
static void trim_pushed_data(const interval_set<uint64_t> ©_subset,
const interval_set<uint64_t> &intervals_received,
ceph::buffer::list data_received,
interval_set<uint64_t> *intervals_usable,
ceph::buffer::list *data_usable);
void _failed_pull(pg_shard_t from, const hobject_t &soid);
void send_pushes(int prio, std::map<pg_shard_t, std::vector<PushOp> > &pushes);
void prep_push_op_blank(const hobject_t& soid, PushOp *op);
void send_pulls(
int priority,
std::map<pg_shard_t, std::vector<PullOp> > &pulls);
int build_push_op(const ObjectRecoveryInfo &recovery_info,
const ObjectRecoveryProgress &progress,
ObjectRecoveryProgress *out_progress,
PushOp *out_op,
object_stat_sum_t *stat = 0,
bool cache_dont_need = true);
void submit_push_data(const ObjectRecoveryInfo &recovery_info,
bool first,
bool complete,
bool clear_omap,
bool cache_dont_need,
interval_set<uint64_t> &data_zeros,
const interval_set<uint64_t> &intervals_included,
ceph::buffer::list data_included,
ceph::buffer::list omap_header,
const std::map<std::string, ceph::buffer::list, std::less<>> &attrs,
const std::map<std::string, ceph::buffer::list> &omap_entries,
ObjectStore::Transaction *t);
void submit_push_complete(const ObjectRecoveryInfo &recovery_info,
ObjectStore::Transaction *t);
void calc_clone_subsets(
SnapSet& snapset, const hobject_t& poid, const pg_missing_t& missing,
const hobject_t &last_backfill,
interval_set<uint64_t>& data_subset,
std::map<hobject_t, interval_set<uint64_t>>& clone_subsets,
ObcLockManager &lock_manager);
void prepare_pull(
eversion_t v,
const hobject_t& soid,
ObjectContextRef headctx,
RPGHandle *h);
int start_pushes(
const hobject_t &soid,
ObjectContextRef obj,
RPGHandle *h);
int prep_push_to_replica(
ObjectContextRef obc, const hobject_t& soid, pg_shard_t peer,
PushOp *pop, bool cache_dont_need = true);
int prep_push(
ObjectContextRef obc,
const hobject_t& oid, pg_shard_t dest,
PushOp *op,
bool cache_dont_need);
int prep_push(
ObjectContextRef obc,
const hobject_t& soid, pg_shard_t peer,
eversion_t version,
interval_set<uint64_t> &data_subset,
std::map<hobject_t, interval_set<uint64_t>>& clone_subsets,
PushOp *op,
bool cache,
ObcLockManager &&lock_manager);
void calc_head_subsets(
ObjectContextRef obc, SnapSet& snapset, const hobject_t& head,
const pg_missing_t& missing,
const hobject_t &last_backfill,
interval_set<uint64_t>& data_subset,
std::map<hobject_t, interval_set<uint64_t>>& clone_subsets,
ObcLockManager &lock_manager);
ObjectRecoveryInfo recalc_subsets(
const ObjectRecoveryInfo& recovery_info,
SnapSetContext *ssc,
ObcLockManager &lock_manager);
/**
* Client IO
*/
struct InProgressOp : public RefCountedObject {
ceph_tid_t tid;
std::set<pg_shard_t> waiting_for_commit;
Context *on_commit;
OpRequestRef op;
eversion_t v;
bool done() const {
return waiting_for_commit.empty();
}
private:
FRIEND_MAKE_REF(InProgressOp);
InProgressOp(ceph_tid_t tid, Context *on_commit, OpRequestRef op, eversion_t v)
:
tid(tid), on_commit(on_commit),
op(op), v(v) {}
};
std::map<ceph_tid_t, ceph::ref_t<InProgressOp>> in_progress_ops;
public:
friend class C_OSD_OnOpCommit;
void call_write_ordered(std::function<void(void)> &&cb) override {
// ReplicatedBackend submits writes inline in submit_transaction, so
// we can just call the callback.
cb();
}
void submit_transaction(
const hobject_t &hoid,
const object_stat_sum_t &delta_stats,
const eversion_t &at_version,
PGTransactionUPtr &&t,
const eversion_t &trim_to,
const eversion_t &min_last_complete_ondisk,
std::vector<pg_log_entry_t>&& log_entries,
std::optional<pg_hit_set_history_t> &hset_history,
Context *on_all_commit,
ceph_tid_t tid,
osd_reqid_t reqid,
OpRequestRef op
) override;
private:
Message * generate_subop(
const hobject_t &soid,
const eversion_t &at_version,
ceph_tid_t tid,
osd_reqid_t reqid,
eversion_t pg_trim_to,
eversion_t min_last_complete_ondisk,
hobject_t new_temp_oid,
hobject_t discard_temp_oid,
const ceph::buffer::list &log_entries,
std::optional<pg_hit_set_history_t> &hset_history,
ObjectStore::Transaction &op_t,
pg_shard_t peer,
const pg_info_t &pinfo);
void issue_op(
const hobject_t &soid,
const eversion_t &at_version,
ceph_tid_t tid,
osd_reqid_t reqid,
eversion_t pg_trim_to,
eversion_t min_last_complete_ondisk,
hobject_t new_temp_oid,
hobject_t discard_temp_oid,
const std::vector<pg_log_entry_t> &log_entries,
std::optional<pg_hit_set_history_t> &hset_history,
InProgressOp *op,
ObjectStore::Transaction &op_t);
void op_commit(const ceph::ref_t<InProgressOp>& op);
void do_repop_reply(OpRequestRef op);
void do_repop(OpRequestRef op);
struct RepModify {
OpRequestRef op;
bool committed;
int ackerosd;
eversion_t last_complete;
epoch_t epoch_started;
ObjectStore::Transaction opt, localt;
RepModify() : committed(false), ackerosd(-1),
epoch_started(0) {}
};
typedef std::shared_ptr<RepModify> RepModifyRef;
struct C_OSD_RepModifyCommit;
void repop_commit(RepModifyRef rm);
bool auto_repair_supported() const override { return store->has_builtin_csum(); }
int be_deep_scrub(
const hobject_t &poid,
ScrubMap &map,
ScrubMapBuilder &pos,
ScrubMap::object &o) override;
uint64_t be_get_ondisk_size(uint64_t logical_size) const final {
return logical_size;
}
};
#endif
| 12,369 | 27.634259 | 83 | h |
null | ceph-main/src/osd/Session.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "PG.h"
#include "Session.h"
#include "common/debug.h"
#define dout_context cct
#define dout_subsys ceph_subsys_osd
using std::map;
using std::set;
void Session::clear_backoffs()
{
map<spg_t,map<hobject_t,set<ceph::ref_t<Backoff>>>> ls;
{
std::lock_guard l(backoff_lock);
ls.swap(backoffs);
backoff_count = 0;
}
for (auto& i : ls) {
for (auto& p : i.second) {
for (auto& b : p.second) {
std::lock_guard l(b->lock);
if (b->pg) {
ceph_assert(b->session == this);
ceph_assert(b->is_new() || b->is_acked());
b->pg->rm_backoff(b);
b->pg.reset();
b->session.reset();
} else if (b->session) {
ceph_assert(b->session == this);
ceph_assert(b->is_deleting());
b->session.reset();
}
}
}
}
}
void Session::ack_backoff(
CephContext *cct,
spg_t pgid,
uint64_t id,
const hobject_t& begin,
const hobject_t& end)
{
std::lock_guard l(backoff_lock);
auto p = backoffs.find(pgid);
if (p == backoffs.end()) {
dout(20) << __func__ << " " << pgid << " " << id << " [" << begin << ","
<< end << ") pg not found" << dendl;
return;
}
auto q = p->second.find(begin);
if (q == p->second.end()) {
dout(20) << __func__ << " " << pgid << " " << id << " [" << begin << ","
<< end << ") begin not found" << dendl;
return;
}
for (auto i = q->second.begin(); i != q->second.end(); ++i) {
Backoff *b = (*i).get();
if (b->id == id) {
if (b->is_new()) {
b->state = Backoff::STATE_ACKED;
dout(20) << __func__ << " now " << *b << dendl;
} else if (b->is_deleting()) {
dout(20) << __func__ << " deleting " << *b << dendl;
q->second.erase(i);
--backoff_count;
}
break;
}
}
if (q->second.empty()) {
dout(20) << __func__ << " clearing begin bin " << q->first << dendl;
p->second.erase(q);
if (p->second.empty()) {
dout(20) << __func__ << " clearing pg bin " << p->first << dendl;
backoffs.erase(p);
}
}
ceph_assert(!backoff_count == backoffs.empty());
}
bool Session::check_backoff(
CephContext *cct, spg_t pgid, const hobject_t& oid, const Message *m)
{
auto b = have_backoff(pgid, oid);
if (b) {
dout(10) << __func__ << " session " << this << " has backoff " << *b
<< " for " << *m << dendl;
ceph_assert(!b->is_acked() || !g_conf()->osd_debug_crash_on_ignored_backoff);
return true;
}
// we may race with ms_handle_reset. it clears session->con before removing
// backoffs, so if we see con is cleared here we have to abort this
// request.
if (!con) {
dout(10) << __func__ << " session " << this << " disconnected" << dendl;
return true;
}
return false;
}
| 2,776 | 24.953271 | 81 | cc |
null | ceph-main/src/osd/Session.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_OSD_SESSION_H
#define CEPH_OSD_SESSION_H
#include "common/RefCountedObj.h"
#include "common/ceph_mutex.h"
#include "global/global_context.h"
#include "include/spinlock.h"
#include "OSDCap.h"
#include "Watch.h"
#include "OSDMap.h"
#include "PeeringState.h"
//#define PG_DEBUG_REFS
class PG;
#ifdef PG_DEBUG_REFS
#include "common/tracked_int_ptr.hpp"
typedef TrackedIntPtr<PG> PGRef;
#else
typedef boost::intrusive_ptr<PG> PGRef;
#endif
/*
* A Backoff represents one instance of either a PG or an OID
* being plugged at the client. It's refcounted and linked from
* the PG {pg_oid}_backoffs map and from the client Session
* object.
*
* The Backoff has a lock that protects it's internal fields.
*
* The PG has a backoff_lock that protects it's maps to Backoffs.
* This lock is *inside* of Backoff::lock.
*
* The Session has a backoff_lock that protects it's map of pg and
* oid backoffs. This lock is *inside* the Backoff::lock *and*
* PG::backoff_lock.
*
* That's
*
* Backoff::lock
* PG::backoff_lock
* Session::backoff_lock
*
* When the Session goes away, we move our backoff lists aside,
* then we lock each of the Backoffs we
* previously referenced and clear the Session* pointer. If the PG
* is still linked, we unlink it, too.
*
* When the PG clears the backoff, it will send an unblock message
* if the Session* is still non-null, and unlink the session.
*
*/
struct Backoff : public RefCountedObject {
enum {
STATE_NEW = 1, ///< backoff in flight to client
STATE_ACKED = 2, ///< backoff acked
STATE_DELETING = 3 ///< backoff deleted, but un-acked
};
std::atomic<int> state = {STATE_NEW};
spg_t pgid; ///< owning pgid
uint64_t id = 0; ///< unique id (within the Session)
bool is_new() const {
return state.load() == STATE_NEW;
}
bool is_acked() const {
return state.load() == STATE_ACKED;
}
bool is_deleting() const {
return state.load() == STATE_DELETING;
}
const char *get_state_name() const {
switch (state.load()) {
case STATE_NEW: return "new";
case STATE_ACKED: return "acked";
case STATE_DELETING: return "deleting";
default: return "???";
}
}
ceph::mutex lock = ceph::make_mutex("Backoff::lock");
// NOTE: the owning PG and session are either
// - *both* set, or
// - both null (teardown), or
// - only session is set (and state == DELETING)
PGRef pg; ///< owning pg
ceph::ref_t<struct Session> session; ///< owning session
hobject_t begin, end; ///< [) range to block, unless ==, then single obj
friend ostream& operator<<(ostream& out, const Backoff& b) {
return out << "Backoff(" << &b << " " << b.pgid << " " << b.id
<< " " << b.get_state_name()
<< " [" << b.begin << "," << b.end << ") "
<< " session " << b.session
<< " pg " << b.pg << ")";
}
private:
FRIEND_MAKE_REF(Backoff);
Backoff(spg_t pgid, PGRef pg, ceph::ref_t<Session> s,
uint64_t i,
const hobject_t& b, const hobject_t& e)
: RefCountedObject(g_ceph_context),
pgid(pgid),
id(i),
pg(pg),
session(std::move(s)),
begin(b),
end(e) {}
};
struct Session : public RefCountedObject {
EntityName entity_name;
OSDCap caps;
ConnectionRef con;
entity_addr_t socket_addr;
WatchConState wstate;
ceph::mutex session_dispatch_lock =
ceph::make_mutex("Session::session_dispatch_lock");
boost::intrusive::list<OpRequest> waiting_on_map;
ceph::spinlock projected_epoch_lock;
epoch_t projected_epoch = 0;
/// protects backoffs; orders inside Backoff::lock *and* PG::backoff_lock
ceph::mutex backoff_lock = ceph::make_mutex("Session::backoff_lock");
std::atomic<int> backoff_count= {0}; ///< simple count of backoffs
std::map<spg_t, std::map<hobject_t, std::set<ceph::ref_t<Backoff>>>> backoffs;
std::atomic<uint64_t> backoff_seq = {0};
// for heartbeat connections only
int peer = -1;
HeartbeatStampsRef stamps;
entity_addr_t& get_peer_socket_addr() {
return socket_addr;
}
void ack_backoff(
CephContext *cct,
spg_t pgid,
uint64_t id,
const hobject_t& start,
const hobject_t& end);
ceph::ref_t<Backoff> have_backoff(spg_t pgid, const hobject_t& oid) {
if (!backoff_count.load()) {
return nullptr;
}
std::lock_guard l(backoff_lock);
ceph_assert(!backoff_count == backoffs.empty());
auto i = backoffs.find(pgid);
if (i == backoffs.end()) {
return nullptr;
}
auto p = i->second.lower_bound(oid);
if (p != i->second.begin() &&
(p == i->second.end() || p->first > oid)) {
--p;
}
if (p != i->second.end()) {
int r = cmp(oid, p->first);
if (r == 0 || r > 0) {
for (auto& q : p->second) {
if (r == 0 || oid < q->end) {
return &(*q);
}
}
}
}
return nullptr;
}
bool check_backoff(
CephContext *cct, spg_t pgid, const hobject_t& oid, const Message *m);
void add_backoff(ceph::ref_t<Backoff> b) {
std::lock_guard l(backoff_lock);
ceph_assert(!backoff_count == backoffs.empty());
backoffs[b->pgid][b->begin].insert(std::move(b));
++backoff_count;
}
// called by PG::release_*_backoffs and PG::clear_backoffs()
void rm_backoff(const ceph::ref_t<Backoff>& b) {
std::lock_guard l(backoff_lock);
ceph_assert(ceph_mutex_is_locked_by_me(b->lock));
ceph_assert(b->session == this);
auto i = backoffs.find(b->pgid);
if (i != backoffs.end()) {
// may race with clear_backoffs()
auto p = i->second.find(b->begin);
if (p != i->second.end()) {
auto q = p->second.find(b);
if (q != p->second.end()) {
p->second.erase(q);
--backoff_count;
if (p->second.empty()) {
i->second.erase(p);
if (i->second.empty()) {
backoffs.erase(i);
}
}
}
}
}
ceph_assert(!backoff_count == backoffs.empty());
}
void clear_backoffs();
private:
FRIEND_MAKE_REF(Session);
explicit Session(CephContext *cct, Connection *con_) :
RefCountedObject(cct),
con(con_),
socket_addr(con_->get_peer_socket_addr()),
wstate(cct)
{}
};
#endif
| 6,592 | 26.356846 | 80 | h |
null | ceph-main/src/osd/SnapMapReaderI.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
/**
* \file
* \brief Defines the interface for the snap-mapper used by the scrubber.
*/
#include <set>
#include "common/scrub_types.h"
#include "include/expected.hpp"
namespace Scrub {
/*
* snaps-related aux structures:
* the scrub-backend scans the snaps associated with each scrubbed object, and
* fixes corrupted snap-sets.
* The actual access to the PG's snap_mapper, and the actual I/O transactions,
* are performed by the main PgScrubber object.
* the following aux structures are used to facilitate the required exchanges:
* - pre-fix snap-sets are accessed by the scrub-backend, and:
* - a list of fix-orders (either insert or replace operations) are returned
*/
struct SnapMapReaderI {
struct result_t {
enum class code_t { success, backend_error, not_found, inconsistent };
code_t code{code_t::success};
int backend_error{0}; ///< errno returned by the backend
};
/**
* get SnapMapper's snap-set for a given object
* \returns a set of snaps, or an error code
* \attn: only OBJ_ DB entries are consulted
*/
virtual tl::expected<std::set<snapid_t>, result_t> get_snaps(
const hobject_t& hoid) const = 0;
/**
* get SnapMapper's snap-set for a given object.
* The snaps gleaned from the OBJ_ entry are verified against the
* mapping ('SNA_') entries.
* A mismatch between both sets of entries will result in an error.
* \returns a set of snaps, or an error code.
*/
virtual tl::expected<std::set<snapid_t>, result_t>
get_snaps_check_consistency(const hobject_t& hoid) const = 0;
virtual ~SnapMapReaderI() = default;
};
enum class snap_mapper_op_t {
add,
update,
overwrite, //< the mapper's data is internally inconsistent. Similar
//< to an 'update' operation, but the logs are different.
};
struct snap_mapper_fix_t {
snap_mapper_op_t op;
hobject_t hoid;
std::set<snapid_t> snaps;
std::set<snapid_t> wrong_snaps; // only collected & returned for logging sake
};
} // namespace Scrub
| 2,119 | 29.724638 | 80 | h |
null | ceph-main/src/osd/SnapMapper.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include "SnapMapper.h"
#include <fmt/printf.h>
#include <fmt/ranges.h>
#include "global/global_context.h"
#include "osd/osd_types_fmt.h"
#include "SnapMapReaderI.h"
#define dout_context cct
#define dout_subsys ceph_subsys_osd
#undef dout_prefix
#define dout_prefix *_dout << "snap_mapper."
using std::make_pair;
using std::map;
using std::pair;
using std::set;
using std::string;
using std::vector;
using ceph::decode;
using ceph::encode;
using ceph::timespan_str;
using result_t = Scrub::SnapMapReaderI::result_t;
using code_t = Scrub::SnapMapReaderI::result_t::code_t;
const string SnapMapper::LEGACY_MAPPING_PREFIX = "MAP_";
const string SnapMapper::MAPPING_PREFIX = "SNA_";
const string SnapMapper::OBJECT_PREFIX = "OBJ_";
const char *SnapMapper::PURGED_SNAP_PREFIX = "PSN_";
/*
We have a bidirectional mapping, (1) from each snap+obj to object,
sorted by snapshot, such that we can enumerate to identify all clones
mapped to a particular snapshot, and (2) from object to snaps, so we
can identify which reverse mappings exist for any given object (and,
e.g., clean up on deletion).
"MAP_"
+ ("%016x" % snapid)
+ "_"
+ (".%x" % shard_id)
+ "_"
+ hobject_t::to_str() ("%llx.%8x.%lx.name...." % pool, hash, snap)
-> SnapMapping::Mapping { snap, hoid }
"SNA_"
+ ("%lld" % poolid)
+ "_"
+ ("%016x" % snapid)
+ "_"
+ (".%x" % shard_id)
+ "_"
+ hobject_t::to_str() ("%llx.%8x.%lx.name...." % pool, hash, snap)
-> SnapMapping::Mapping { snap, hoid }
"OBJ_" +
+ (".%x" % shard_id)
+ hobject_t::to_str()
-> SnapMapper::object_snaps { oid, set<snapid_t> }
*/
#ifdef WITH_SEASTAR
#include "crimson/common/log.h"
#include "crimson/osd/pg_interval_interrupt_condition.h"
template <typename ValuesT = void>
using interruptible_future =
::crimson::interruptible::interruptible_future<
::crimson::osd::IOInterruptCondition, ValuesT>;
using interruptor =
::crimson::interruptible::interruptor<
::crimson::osd::IOInterruptCondition>;
#define CRIMSON_DEBUG(FMT_MSG, ...) crimson::get_logger(ceph_subsys_).debug(FMT_MSG, ##__VA_ARGS__)
int OSDriver::get_keys(
const std::set<std::string> &keys,
std::map<std::string, ceph::buffer::list> *out)
{
CRIMSON_DEBUG("OSDriver::{}:{}", __func__, __LINE__);
using crimson::os::FuturizedStore;
return interruptor::green_get(os->omap_get_values(
ch, hoid, keys
).safe_then([out] (FuturizedStore::Shard::omap_values_t&& vals) {
// just the difference in comparator (`std::less<>` in omap_values_t`)
reinterpret_cast<FuturizedStore::Shard::omap_values_t&>(*out) = std::move(vals);
return 0;
}, FuturizedStore::Shard::read_errorator::all_same_way([] (auto& e) {
assert(e.value() > 0);
return -e.value();
}))); // this requires seastar::thread
CRIMSON_DEBUG("OSDriver::{}:{}", __func__, __LINE__);
}
int OSDriver::get_next(
const std::string &key,
std::pair<std::string, ceph::buffer::list> *next)
{
CRIMSON_DEBUG("OSDriver::{}:{}", __func__, __LINE__);
using crimson::os::FuturizedStore;
return interruptor::green_get(os->omap_get_values(
ch, hoid, key
).safe_then_unpack([&key, next] (bool, FuturizedStore::Shard::omap_values_t&& vals) {
CRIMSON_DEBUG("OSDriver::{}:{}", "get_next", __LINE__);
if (auto nit = std::begin(vals); nit == std::end(vals)) {
CRIMSON_DEBUG("OSDriver::{}:{}", "get_next", __LINE__);
return -ENOENT;
} else {
CRIMSON_DEBUG("OSDriver::{}:{}", "get_next", __LINE__);
assert(nit->first > key);
*next = *nit;
return 0;
}
}, FuturizedStore::Shard::read_errorator::all_same_way([] {
CRIMSON_DEBUG("OSDriver::{}:{}", "get_next", __LINE__);
return -EINVAL;
}))); // this requires seastar::thread
CRIMSON_DEBUG("OSDriver::{}:{}", __func__, __LINE__);
}
int OSDriver::get_next_or_current(
const std::string &key,
std::pair<std::string, ceph::buffer::list> *next_or_current)
{
CRIMSON_DEBUG("OSDriver::{}:{}", __func__, __LINE__);
using crimson::os::FuturizedStore;
// let's try to get current first
return interruptor::green_get(os->omap_get_values(
ch, hoid, FuturizedStore::Shard::omap_keys_t{key}
).safe_then([&key, next_or_current] (FuturizedStore::Shard::omap_values_t&& vals) {
assert(vals.size() == 1);
*next_or_current = std::make_pair(key, std::move(vals[0]));
return 0;
}, FuturizedStore::Shard::read_errorator::all_same_way(
[next_or_current, &key, this] {
// no current, try next
return get_next(key, next_or_current);
}))); // this requires seastar::thread
CRIMSON_DEBUG("OSDriver::{}:{}", __func__, __LINE__);
}
#else
int OSDriver::get_keys(
const std::set<std::string> &keys,
std::map<std::string, ceph::buffer::list> *out)
{
return os->omap_get_values(ch, hoid, keys, out);
}
int OSDriver::get_next(
const std::string &key,
std::pair<std::string, ceph::buffer::list> *next)
{
ObjectMap::ObjectMapIterator iter =
os->get_omap_iterator(ch, hoid);
if (!iter) {
ceph_abort();
return -EINVAL;
}
iter->upper_bound(key);
if (iter->valid()) {
if (next)
*next = make_pair(iter->key(), iter->value());
return 0;
} else {
return -ENOENT;
}
}
int OSDriver::get_next_or_current(
const std::string &key,
std::pair<std::string, ceph::buffer::list> *next_or_current)
{
ObjectMap::ObjectMapIterator iter =
os->get_omap_iterator(ch, hoid);
if (!iter) {
ceph_abort();
return -EINVAL;
}
iter->lower_bound(key);
if (iter->valid()) {
if (next_or_current)
*next_or_current = make_pair(iter->key(), iter->value());
return 0;
} else {
return -ENOENT;
}
}
#endif // WITH_SEASTAR
string SnapMapper::get_prefix(int64_t pool, snapid_t snap)
{
static_assert(sizeof(pool) == 8, "assumed by the formatting code");
return fmt::sprintf("%s%lld_%.16X_",
MAPPING_PREFIX,
pool,
snap);
}
string SnapMapper::to_raw_key(
const pair<snapid_t, hobject_t> &in) const
{
return get_prefix(in.second.pool, in.first) + shard_prefix + in.second.to_str();
}
std::string SnapMapper::to_raw_key(snapid_t snap, const hobject_t &clone) const
{
return get_prefix(clone.pool, snap) + shard_prefix + clone.to_str();
}
pair<string, ceph::buffer::list> SnapMapper::to_raw(
const pair<snapid_t, hobject_t> &in) const
{
ceph::buffer::list bl;
encode(Mapping(in), bl);
return make_pair(to_raw_key(in), bl);
}
pair<snapid_t, hobject_t> SnapMapper::from_raw(
const pair<std::string, ceph::buffer::list> &image)
{
using ceph::decode;
Mapping map;
ceph::buffer::list bl(image.second);
auto bp = bl.cbegin();
decode(map, bp);
return make_pair(map.snap, map.hoid);
}
std::pair<snapid_t, hobject_t> SnapMapper::from_raw(
const ceph::buffer::list &image)
{
using ceph::decode;
Mapping map;
auto bp = image.cbegin();
decode(map, bp);
return make_pair(map.snap, map.hoid);
}
bool SnapMapper::is_mapping(const string &to_test)
{
return to_test.substr(0, MAPPING_PREFIX.size()) == MAPPING_PREFIX;
}
string SnapMapper::to_object_key(const hobject_t &hoid) const
{
return OBJECT_PREFIX + shard_prefix + hoid.to_str();
}
void SnapMapper::object_snaps::encode(ceph::buffer::list &bl) const
{
ENCODE_START(1, 1, bl);
encode(oid, bl);
encode(snaps, bl);
ENCODE_FINISH(bl);
}
void SnapMapper::object_snaps::decode(ceph::buffer::list::const_iterator &bl)
{
DECODE_START(1, bl);
decode(oid, bl);
decode(snaps, bl);
DECODE_FINISH(bl);
}
bool SnapMapper::check(const hobject_t &hoid) const
{
if (hoid.match(mask_bits, match)) {
return true;
}
derr << __func__ << " " << hoid << " mask_bits " << mask_bits
<< " match 0x" << std::hex << match << std::dec << " is false"
<< dendl;
return false;
}
int SnapMapper::get_snaps(const hobject_t &oid, object_snaps *out) const
{
auto snaps = get_snaps_common(oid);
if (snaps) {
*out = *snaps;
return 0;
}
switch (auto e = snaps.error(); e.code) {
case code_t::backend_error:
return e.backend_error;
case code_t::not_found:
return -ENOENT;
case code_t::inconsistent:
// As this is a legacy interface, we cannot surprise the user with
// a new error code here.
return -ENOENT;
default:
// Can't happen. Just to keep the compiler happy.
ceph_abort("get_snaps_common() returned invalid error code");
}
}
tl::expected<std::set<snapid_t>, Scrub::SnapMapReaderI::result_t>
SnapMapper::get_snaps(const hobject_t &oid) const
{
auto snaps = get_snaps_common(oid);
if (snaps) {
return snaps->snaps;
}
return tl::unexpected(snaps.error());
}
tl::expected<SnapMapper::object_snaps, Scrub::SnapMapReaderI::result_t>
SnapMapper::get_snaps_common(const hobject_t &oid) const
{
ceph_assert(check(oid));
set<string> keys{to_object_key(oid)};
dout(20) << fmt::format("{}: key string: {} oid:{}", __func__, keys, oid)
<< dendl;
map<string, ceph::buffer::list> got;
int r = backend.get_keys(keys, &got);
if (r < 0) {
dout(10) << __func__ << " " << oid << " got err " << r << dendl;
return tl::unexpected(result_t{code_t::backend_error, r});
}
if (got.empty()) {
dout(10) << __func__ << " " << oid << " got.empty()" << dendl;
return tl::unexpected(result_t{code_t::not_found, -ENOENT});
}
object_snaps out;
auto bp = got.begin()->second.cbegin();
try {
decode(out, bp);
} catch (...) {
dout(1) << __func__ << ": " << oid << " decode failed" << dendl;
return tl::unexpected(result_t{code_t::backend_error, -EIO});
}
dout(20) << __func__ << " " << oid << " " << out.snaps << dendl;
if (out.snaps.empty()) {
dout(1) << __func__ << " " << oid << " empty snapset" << dendl;
ceph_assert(!cct->_conf->osd_debug_verify_snaps);
}
return out;
}
std::set<std::string> SnapMapper::to_raw_keys(
const hobject_t &clone,
const std::set<snapid_t> &snaps) const
{
std::set<std::string> keys;
for (auto snap : snaps) {
keys.insert(to_raw_key(snap, clone));
}
dout(20) << fmt::format(
"{}: clone:{} snaps:{} -> keys: {}", __func__, clone, snaps,
keys)
<< dendl;
return keys;
}
tl::expected<std::set<snapid_t>, result_t>
SnapMapper::get_snaps_check_consistency(const hobject_t &hoid) const
{
// derive the set of snaps from the 'OBJ_' entry
auto obj_snaps = get_snaps(hoid);
if (!obj_snaps) {
return obj_snaps;
}
// make sure we have the expected set of SNA_ entries:
// we have the clone oid and the set of snaps relevant to this clone.
// Let's construct all expected SNA_ key, then fetch them.
auto mapping_keys = to_raw_keys(hoid, *obj_snaps);
map<string, ceph::buffer::list> kvmap;
auto r = backend.get_keys(mapping_keys, &kvmap);
if (r < 0) {
dout(10) << fmt::format(
"{}: backend error ({}) for cobject {}", __func__, r, hoid)
<< dendl;
// that's a backend error, but for the SNA_ entries. Let's treat it as an
// internal consistency error (although a backend error would have made
// sense too).
return tl::unexpected(result_t{code_t::inconsistent, r});
}
std::set<snapid_t> snaps_from_mapping;
for (auto &[k, v] : kvmap) {
dout(20) << __func__ << " " << hoid << " " << k << dendl;
// extract the object ID from the value fetched for an SNA mapping key
auto [sn, obj] = SnapMapper::from_raw(v);
if (obj != hoid) {
dout(1) << fmt::format(
"{}: unexpected object ID {} for key{} (expected {})",
__func__, obj, k, hoid)
<< dendl;
return tl::unexpected(result_t{code_t::inconsistent});
}
snaps_from_mapping.insert(sn);
}
if (snaps_from_mapping != *obj_snaps) {
dout(10) << fmt::format(
"{}: hoid:{} -> mapper internal inconsistency ({} vs {})",
__func__, hoid, *obj_snaps, snaps_from_mapping)
<< dendl;
return tl::unexpected(result_t{code_t::inconsistent});
}
dout(10) << fmt::format(
"{}: snaps for {}: {}", __func__, hoid, snaps_from_mapping)
<< dendl;
return obj_snaps;
}
void SnapMapper::clear_snaps(
const hobject_t &oid,
MapCacher::Transaction<std::string, ceph::buffer::list> *t)
{
dout(20) << __func__ << " " << oid << dendl;
ceph_assert(check(oid));
set<string> to_remove;
to_remove.insert(to_object_key(oid));
if (g_conf()->subsys.should_gather<ceph_subsys_osd, 20>()) {
for (auto& i : to_remove) {
dout(20) << __func__ << " rm " << i << dendl;
}
}
backend.remove_keys(to_remove, t);
}
void SnapMapper::set_snaps(
const hobject_t &oid,
const object_snaps &in,
MapCacher::Transaction<std::string, ceph::buffer::list> *t)
{
ceph_assert(check(oid));
map<string, ceph::buffer::list> to_set;
ceph::buffer::list bl;
encode(in, bl);
to_set[to_object_key(oid)] = bl;
dout(20) << __func__ << " " << oid << " " << in.snaps << dendl;
if (g_conf()->subsys.should_gather<ceph_subsys_osd, 20>()) {
for (auto& i : to_set) {
dout(20) << __func__ << " set " << i.first << dendl;
}
}
backend.set_keys(to_set, t);
}
int SnapMapper::update_snaps(
const hobject_t &oid,
const set<snapid_t> &new_snaps,
const set<snapid_t> *old_snaps_check,
MapCacher::Transaction<std::string, ceph::buffer::list> *t)
{
dout(20) << __func__ << " " << oid << " " << new_snaps
<< " was " << (old_snaps_check ? *old_snaps_check : set<snapid_t>())
<< dendl;
ceph_assert(check(oid));
if (new_snaps.empty())
return remove_oid(oid, t);
object_snaps out;
int r = get_snaps(oid, &out);
// Tolerate missing keys but not disk errors
if (r < 0 && r != -ENOENT)
return r;
if (old_snaps_check)
ceph_assert(out.snaps == *old_snaps_check);
object_snaps in(oid, new_snaps);
set_snaps(oid, in, t);
set<string> to_remove;
for (set<snapid_t>::iterator i = out.snaps.begin();
i != out.snaps.end();
++i) {
if (!new_snaps.count(*i)) {
to_remove.insert(to_raw_key(make_pair(*i, oid)));
}
}
if (g_conf()->subsys.should_gather<ceph_subsys_osd, 20>()) {
for (auto& i : to_remove) {
dout(20) << __func__ << " rm " << i << dendl;
}
}
backend.remove_keys(to_remove, t);
return 0;
}
void SnapMapper::add_oid(
const hobject_t &oid,
const set<snapid_t>& snaps,
MapCacher::Transaction<std::string, ceph::buffer::list> *t)
{
dout(20) << __func__ << " " << oid << " " << snaps << dendl;
ceph_assert(!snaps.empty());
ceph_assert(check(oid));
{
object_snaps out;
int r = get_snaps(oid, &out);
if (r != -ENOENT) {
derr << __func__ << " found existing snaps mapped on " << oid
<< ", removing" << dendl;
ceph_assert(!cct->_conf->osd_debug_verify_snaps);
remove_oid(oid, t);
}
}
object_snaps _snaps(oid, snaps);
set_snaps(oid, _snaps, t);
map<string, ceph::buffer::list> to_add;
for (set<snapid_t>::iterator i = snaps.begin();
i != snaps.end();
++i) {
to_add.insert(to_raw(make_pair(*i, oid)));
}
if (g_conf()->subsys.should_gather<ceph_subsys_osd, 20>()) {
for (auto& i : to_add) {
dout(20) << __func__ << " set " << i.first << dendl;
}
}
backend.set_keys(to_add, t);
}
int SnapMapper::get_next_objects_to_trim(
snapid_t snap,
unsigned max,
vector<hobject_t> *out)
{
ceph_assert(out);
ceph_assert(out->empty());
// if max would be 0, we return ENOENT and the caller would mistakenly
// trim the snaptrim queue
ceph_assert(max > 0);
int r = 0;
/// \todo cache the prefixes-set in update_bits()
for (set<string>::iterator i = prefixes.begin();
i != prefixes.end() && out->size() < max && r == 0;
++i) {
string prefix(get_prefix(pool, snap) + *i);
string pos = prefix;
while (out->size() < max) {
pair<string, ceph::buffer::list> next;
r = backend.get_next(pos, &next);
dout(20) << __func__ << " get_next(" << pos << ") returns " << r
<< " " << next << dendl;
if (r != 0) {
break; // Done
}
if (next.first.substr(0, prefix.size()) !=
prefix) {
break; // Done with this prefix
}
ceph_assert(is_mapping(next.first));
dout(20) << __func__ << " " << next.first << dendl;
pair<snapid_t, hobject_t> next_decoded(from_raw(next));
ceph_assert(next_decoded.first == snap);
ceph_assert(check(next_decoded.second));
out->push_back(next_decoded.second);
pos = next.first;
}
}
if (out->size() == 0) {
return -ENOENT;
} else {
return 0;
}
}
int SnapMapper::remove_oid(
const hobject_t &oid,
MapCacher::Transaction<std::string, ceph::buffer::list> *t)
{
dout(20) << __func__ << " " << oid << dendl;
ceph_assert(check(oid));
return _remove_oid(oid, t);
}
int SnapMapper::_remove_oid(
const hobject_t &oid,
MapCacher::Transaction<std::string, ceph::buffer::list> *t)
{
dout(20) << __func__ << " " << oid << dendl;
object_snaps out;
int r = get_snaps(oid, &out);
if (r < 0)
return r;
clear_snaps(oid, t);
set<string> to_remove;
for (set<snapid_t>::iterator i = out.snaps.begin();
i != out.snaps.end();
++i) {
to_remove.insert(to_raw_key(make_pair(*i, oid)));
}
if (g_conf()->subsys.should_gather<ceph_subsys_osd, 20>()) {
for (auto& i : to_remove) {
dout(20) << __func__ << " rm " << i << dendl;
}
}
backend.remove_keys(to_remove, t);
return 0;
}
int SnapMapper::get_snaps(
const hobject_t &oid,
std::set<snapid_t> *snaps) const
{
ceph_assert(check(oid));
object_snaps out;
int r = get_snaps(oid, &out);
if (r < 0)
return r;
if (snaps)
snaps->swap(out.snaps);
return 0;
}
// -- purged snaps --
string SnapMapper::make_purged_snap_key(int64_t pool, snapid_t last)
{
return fmt::sprintf("%s_%lld_%016llx",
PURGED_SNAP_PREFIX,
pool,
last);
}
void SnapMapper::make_purged_snap_key_value(
int64_t pool, snapid_t begin, snapid_t end, map<string,ceph::buffer::list> *m)
{
string k = make_purged_snap_key(pool, end - 1);
auto& v = (*m)[k];
ceph::encode(pool, v);
ceph::encode(begin, v);
ceph::encode(end, v);
}
int SnapMapper::_lookup_purged_snap(
CephContext *cct,
OSDriver& backend,
int64_t pool, snapid_t snap,
snapid_t *begin, snapid_t *end)
{
string k = make_purged_snap_key(pool, snap);
std::pair<std::string, ceph::buffer::list> kv;
if (auto ret = backend.get_next_or_current(k, &kv); ret == -ENOENT) {
dout(20) << __func__ << " pool " << pool << " snap " << snap
<< " key '" << k << "' lower_bound not found" << dendl;
return -ENOENT;
}
if (kv.first.find(PURGED_SNAP_PREFIX) != 0) {
dout(20) << __func__ << " pool " << pool << " snap " << snap
<< " key '" << k << "' lower_bound got mismatched prefix '"
<< kv.first << "'" << dendl;
return -ENOENT;
}
ceph::buffer::list v = kv.second;
auto p = v.cbegin();
int64_t gotpool;
decode(gotpool, p);
decode(*begin, p);
decode(*end, p);
if (snap < *begin || snap >= *end) {
dout(20) << __func__ << " pool " << pool << " snap " << snap
<< " found [" << *begin << "," << *end << "), no overlap" << dendl;
return -ENOENT;
}
return 0;
}
void SnapMapper::record_purged_snaps(
CephContext *cct,
OSDriver& backend,
OSDriver::OSTransaction&& txn,
map<epoch_t,mempool::osdmap::map<int64_t,snap_interval_set_t>> purged_snaps)
{
dout(10) << __func__ << " purged_snaps " << purged_snaps << dendl;
map<string,ceph::buffer::list> m;
set<string> rm;
for (auto& [epoch, bypool] : purged_snaps) {
// index by (pool, snap)
for (auto& [pool, snaps] : bypool) {
for (auto i = snaps.begin();
i != snaps.end();
++i) {
snapid_t begin = i.get_start();
snapid_t end = i.get_end();
snapid_t before_begin, before_end;
snapid_t after_begin, after_end;
int b = _lookup_purged_snap(cct, backend,
pool, begin - 1, &before_begin, &before_end);
int a = _lookup_purged_snap(cct, backend,
pool, end, &after_begin, &after_end);
if (!b && !a) {
dout(10) << __func__
<< " [" << begin << "," << end << ") - joins ["
<< before_begin << "," << before_end << ") and ["
<< after_begin << "," << after_end << ")" << dendl;
// erase only the begin record; we'll overwrite the end one
rm.insert(make_purged_snap_key(pool, before_end - 1));
make_purged_snap_key_value(pool, before_begin, after_end, &m);
} else if (!b) {
dout(10) << __func__
<< " [" << begin << "," << end << ") - join with earlier ["
<< before_begin << "," << before_end << ")" << dendl;
rm.insert(make_purged_snap_key(pool, before_end - 1));
make_purged_snap_key_value(pool, before_begin, end, &m);
} else if (!a) {
dout(10) << __func__
<< " [" << begin << "," << end << ") - join with later ["
<< after_begin << "," << after_end << ")" << dendl;
// overwrite after record
make_purged_snap_key_value(pool, begin, after_end, &m);
} else {
make_purged_snap_key_value(pool, begin, end, &m);
}
}
}
}
txn.remove_keys(rm);
txn.set_keys(m);
dout(10) << __func__ << " rm " << rm.size() << " keys, set " << m.size()
<< " keys" << dendl;
}
#ifndef WITH_SEASTAR
bool SnapMapper::Scrubber::_parse_p()
{
if (!psit->valid()) {
pool = -1;
return false;
}
if (psit->key().find(PURGED_SNAP_PREFIX) != 0) {
pool = -1;
return false;
}
ceph::buffer::list v = psit->value();
auto p = v.cbegin();
ceph::decode(pool, p);
ceph::decode(begin, p);
ceph::decode(end, p);
dout(20) << __func__ << " purged_snaps pool " << pool
<< " [" << begin << "," << end << ")" << dendl;
psit->next();
return true;
}
bool SnapMapper::Scrubber::_parse_m()
{
if (!mapit->valid()) {
return false;
}
if (mapit->key().find(MAPPING_PREFIX) != 0) {
return false;
}
auto v = mapit->value();
auto p = v.cbegin();
mapping.decode(p);
{
unsigned long long p, s;
long sh;
string k = mapit->key();
int r = sscanf(k.c_str(), "SNA_%lld_%llx.%lx", &p, &s, &sh);
if (r != 1) {
shard = shard_id_t::NO_SHARD;
} else {
shard = shard_id_t(sh);
}
}
dout(20) << __func__ << " mapping pool " << mapping.hoid.pool
<< " snap " << mapping.snap
<< " shard " << shard
<< " " << mapping.hoid << dendl;
mapit->next();
return true;
}
void SnapMapper::Scrubber::run()
{
dout(10) << __func__ << dendl;
psit = store->get_omap_iterator(ch, purged_snaps_hoid);
psit->upper_bound(PURGED_SNAP_PREFIX);
_parse_p();
mapit = store->get_omap_iterator(ch, mapping_hoid);
mapit->upper_bound(MAPPING_PREFIX);
while (_parse_m()) {
// advance to next purged_snaps range?
while (pool >= 0 &&
(mapping.hoid.pool > pool ||
(mapping.hoid.pool == pool && mapping.snap >= end))) {
_parse_p();
}
if (pool < 0) {
dout(10) << __func__ << " passed final purged_snaps interval, rest ok"
<< dendl;
break;
}
if (mapping.hoid.pool < pool ||
mapping.snap < begin) {
// ok
dout(20) << __func__ << " ok " << mapping.hoid
<< " snap " << mapping.snap
<< " precedes pool " << pool
<< " purged_snaps [" << begin << "," << end << ")" << dendl;
} else {
assert(mapping.snap >= begin &&
mapping.snap < end &&
mapping.hoid.pool == pool);
// invalid
dout(10) << __func__ << " stray " << mapping.hoid
<< " snap " << mapping.snap
<< " in pool " << pool
<< " shard " << shard
<< " purged_snaps [" << begin << "," << end << ")" << dendl;
stray.emplace_back(std::tuple<int64_t,snapid_t,uint32_t,shard_id_t>(
pool, mapping.snap, mapping.hoid.get_hash(),
shard
));
}
}
dout(10) << __func__ << " end, found " << stray.size() << " stray" << dendl;
psit = ObjectMap::ObjectMapIterator();
mapit = ObjectMap::ObjectMapIterator();
}
#endif // !WITH_SEASTAR
// -------------------------------------
// legacy conversion/support
string SnapMapper::get_legacy_prefix(snapid_t snap)
{
return fmt::sprintf("%s%.16X_",
LEGACY_MAPPING_PREFIX,
snap);
}
string SnapMapper::to_legacy_raw_key(
const pair<snapid_t, hobject_t> &in)
{
return get_legacy_prefix(in.first) + shard_prefix + in.second.to_str();
}
bool SnapMapper::is_legacy_mapping(const string &to_test)
{
return to_test.substr(0, LEGACY_MAPPING_PREFIX.size()) ==
LEGACY_MAPPING_PREFIX;
}
#ifndef WITH_SEASTAR
/* Octopus modified the SnapMapper key format from
*
* <LEGACY_MAPPING_PREFIX><snapid>_<shardid>_<hobject_t::to_str()>
*
* to
*
* <MAPPING_PREFIX><pool>_<snapid>_<shardid>_<hobject_t::to_str()>
*
* We can't reconstruct the new key format just from the value since the
* Mapping object contains an hobject rather than a ghobject. Instead,
* we exploit the fact that the new format is identical starting at <snapid>.
*
* Note that the original version of this conversion introduced in 94ebe0ea
* had a crucial bug which essentially destroyed legacy keys by mapping
* them to
*
* <MAPPING_PREFIX><poolid>_<snapid>_
*
* without the object-unique suffix.
* See https://tracker.ceph.com/issues/56147
*/
std::string SnapMapper::convert_legacy_key(
const std::string& old_key,
const ceph::buffer::list& value)
{
auto old = from_raw(make_pair(old_key, value));
std::string object_suffix = old_key.substr(
SnapMapper::LEGACY_MAPPING_PREFIX.length());
return SnapMapper::MAPPING_PREFIX + std::to_string(old.second.pool)
+ "_" + object_suffix;
}
int SnapMapper::convert_legacy(
CephContext *cct,
ObjectStore *store,
ObjectStore::CollectionHandle& ch,
ghobject_t hoid,
unsigned max)
{
uint64_t n = 0;
ObjectMap::ObjectMapIterator iter = store->get_omap_iterator(ch, hoid);
if (!iter) {
return -EIO;
}
auto start = ceph::mono_clock::now();
iter->upper_bound(SnapMapper::LEGACY_MAPPING_PREFIX);
map<string,ceph::buffer::list> to_set;
while (iter->valid()) {
bool valid = SnapMapper::is_legacy_mapping(iter->key());
if (valid) {
to_set.emplace(
convert_legacy_key(iter->key(), iter->value()),
iter->value());
++n;
iter->next();
}
if (!valid || !iter->valid() || to_set.size() >= max) {
ObjectStore::Transaction t;
t.omap_setkeys(ch->cid, hoid, to_set);
int r = store->queue_transaction(ch, std::move(t));
ceph_assert(r == 0);
to_set.clear();
if (!valid) {
break;
}
dout(10) << __func__ << " converted " << n << " keys" << dendl;
}
}
auto end = ceph::mono_clock::now();
dout(1) << __func__ << " converted " << n << " keys in "
<< timespan_str(end - start) << dendl;
// remove the old keys
{
ObjectStore::Transaction t;
string end = SnapMapper::LEGACY_MAPPING_PREFIX;
++end[end.size()-1]; // turn _ to whatever comes after _
t.omap_rmkeyrange(ch->cid, hoid,
SnapMapper::LEGACY_MAPPING_PREFIX,
end);
int r = store->queue_transaction(ch, std::move(t));
ceph_assert(r == 0);
}
return 0;
}
#endif // !WITH_SEASTAR
| 27,398 | 27.072746 | 99 | cc |
null | ceph-main/src/osd/SnapMapper.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2013 Inktank Storage, Inc.
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef SNAPMAPPER_H
#define SNAPMAPPER_H
#include <cstring>
#include <set>
#include <string>
#include <utility>
#include "common/hobject.h"
#include "common/map_cacher.hpp"
#ifdef WITH_SEASTAR
# include "crimson/os/futurized_store.h"
# include "crimson/os/futurized_collection.h"
#endif
#include "include/buffer.h"
#include "include/encoding.h"
#include "include/object.h"
#include "os/ObjectStore.h"
#include "osd/OSDMap.h"
#include "osd/SnapMapReaderI.h"
class OSDriver : public MapCacher::StoreDriver<std::string, ceph::buffer::list> {
#ifdef WITH_SEASTAR
using ObjectStoreT = crimson::os::FuturizedStore::Shard;
using CollectionHandleT = ObjectStoreT::CollectionRef;
#else
using ObjectStoreT = ObjectStore;
using CollectionHandleT = ObjectStoreT::CollectionHandle;
#endif
ObjectStoreT *os;
CollectionHandleT ch;
ghobject_t hoid;
public:
class OSTransaction : public MapCacher::Transaction<std::string, ceph::buffer::list> {
friend class OSDriver;
coll_t cid;
ghobject_t hoid;
ceph::os::Transaction *t;
OSTransaction(
const coll_t &cid,
const ghobject_t &hoid,
ceph::os::Transaction *t)
: cid(cid), hoid(hoid), t(t) {}
public:
void set_keys(
const std::map<std::string, ceph::buffer::list> &to_set) override {
t->omap_setkeys(cid, hoid, to_set);
}
void remove_keys(
const std::set<std::string> &to_remove) override {
t->omap_rmkeys(cid, hoid, to_remove);
}
void add_callback(
Context *c) override {
t->register_on_applied(c);
}
};
OSTransaction get_transaction(
ceph::os::Transaction *t) const {
return OSTransaction(ch->get_cid(), hoid, t);
}
#ifndef WITH_SEASTAR
OSDriver(ObjectStoreT *os, const coll_t& cid, const ghobject_t &hoid) :
OSDriver(os, os->open_collection(cid), hoid) {}
#endif
OSDriver(ObjectStoreT *os, CollectionHandleT ch, const ghobject_t &hoid) :
os(os),
ch(ch),
hoid(hoid) {}
int get_keys(
const std::set<std::string> &keys,
std::map<std::string, ceph::buffer::list> *out) override;
int get_next(
const std::string &key,
std::pair<std::string, ceph::buffer::list> *next) override;
int get_next_or_current(
const std::string &key,
std::pair<std::string, ceph::buffer::list> *next_or_current) override;
};
/**
* SnapMapper
*
* Manages two mappings:
* 1) hobject_t -> {snapid}
* 2) snapid -> {hobject_t}
*
* We accomplish this using two sets of keys:
* 1) OBJECT_PREFIX + obj.str() -> encoding of object_snaps
* 2) MAPPING_PREFIX + poolid + snapid_t + obj.str() -> encoding of std::pair<snapid_t, obj>
*
* The on disk strings and encodings are implemented in to_raw, to_raw_key,
* from_raw, to_object_key.
*
* The object -> {snapid} mapping is primarily included so that the
* SnapMapper state can be verified against the external PG state during
* scrub etc.
*
* The 2) mapping is arranged such that all objects in a particular
* snap will sort together, and so that all objects in a pg for a
* particular snap will group under up to 8 prefixes.
*/
class SnapMapper : public Scrub::SnapMapReaderI {
friend class MapperVerifier; // unit-test support
friend class DirectMapper; // unit-test support
public:
CephContext* cct;
struct object_snaps {
hobject_t oid;
std::set<snapid_t> snaps;
object_snaps(hobject_t oid, const std::set<snapid_t> &snaps)
: oid(oid), snaps(snaps) {}
object_snaps() {}
void encode(ceph::buffer::list &bl) const;
void decode(ceph::buffer::list::const_iterator &bp);
};
struct Mapping {
snapid_t snap;
hobject_t hoid;
explicit Mapping(const std::pair<snapid_t, hobject_t> &in)
: snap(in.first), hoid(in.second) {}
Mapping() : snap(0) {}
void encode(ceph::buffer::list &bl) const {
ENCODE_START(1, 1, bl);
encode(snap, bl);
encode(hoid, bl);
ENCODE_FINISH(bl);
}
void decode(ceph::buffer::list::const_iterator &bl) {
DECODE_START(1, bl);
decode(snap, bl);
decode(hoid, bl);
DECODE_FINISH(bl);
}
};
static const std::string LEGACY_MAPPING_PREFIX;
static const std::string MAPPING_PREFIX;
static const std::string OBJECT_PREFIX;
static const char *PURGED_SNAP_EPOCH_PREFIX;
static const char *PURGED_SNAP_PREFIX;
#ifndef WITH_SEASTAR
struct Scrubber {
CephContext *cct;
ObjectStore *store;
ObjectStore::CollectionHandle ch;
ghobject_t mapping_hoid;
ghobject_t purged_snaps_hoid;
ObjectMap::ObjectMapIterator psit;
int64_t pool;
snapid_t begin, end;
bool _parse_p(); ///< advance the purged_snaps pointer
ObjectMap::ObjectMapIterator mapit;
Mapping mapping;
shard_id_t shard;
bool _parse_m(); ///< advance the (object) mapper pointer
std::vector<std::tuple<int64_t, snapid_t, uint32_t, shard_id_t>> stray;
Scrubber(
CephContext *cct,
ObjectStore *store,
ObjectStore::CollectionHandle& ch,
ghobject_t mapping_hoid,
ghobject_t purged_snaps_hoid)
: cct(cct),
store(store),
ch(ch),
mapping_hoid(mapping_hoid),
purged_snaps_hoid(purged_snaps_hoid) {}
void run();
};
static std::string convert_legacy_key(
const std::string& old_key,
const bufferlist& value);
static int convert_legacy(
CephContext *cct,
ObjectStore *store,
ObjectStore::CollectionHandle& ch,
ghobject_t hoid,
unsigned max);
#endif
static void record_purged_snaps(
CephContext *cct,
OSDriver& backend,
OSDriver::OSTransaction&& txn,
std::map<epoch_t,mempool::osdmap::map<int64_t,snap_interval_set_t>> purged_snaps);
private:
static int _lookup_purged_snap(
CephContext *cct,
OSDriver& backend,
int64_t pool, snapid_t snap,
snapid_t *begin, snapid_t *end);
static void make_purged_snap_key_value(
int64_t pool, snapid_t begin,
snapid_t end, std::map<std::string,ceph::buffer::list> *m);
static std::string make_purged_snap_key(int64_t pool, snapid_t last);
// note: marked 'mutable', as functions as a cache and used in some 'const'
// functions.
mutable MapCacher::MapCacher<std::string, ceph::buffer::list> backend;
static std::string get_legacy_prefix(snapid_t snap);
std::string to_legacy_raw_key(
const std::pair<snapid_t, hobject_t> &to_map);
static bool is_legacy_mapping(const std::string &to_test);
static std::string get_prefix(int64_t pool, snapid_t snap);
std::string to_raw_key(
const std::pair<snapid_t, hobject_t> &to_map) const;
std::string to_raw_key(snapid_t snap, const hobject_t& clone) const;
std::pair<std::string, ceph::buffer::list> to_raw(
const std::pair<snapid_t, hobject_t> &to_map) const;
static bool is_mapping(const std::string &to_test);
static std::pair<snapid_t, hobject_t> from_raw(
const std::pair<std::string, ceph::buffer::list> &image);
static std::pair<snapid_t, hobject_t> from_raw(
const ceph::buffer::list& image);
std::string to_object_key(const hobject_t &hoid) const;
int get_snaps(const hobject_t &oid, object_snaps *out) const;
std::set<std::string> to_raw_keys(
const hobject_t &clone,
const std::set<snapid_t> &snaps) const;
void set_snaps(
const hobject_t &oid,
const object_snaps &out,
MapCacher::Transaction<std::string, ceph::buffer::list> *t);
void clear_snaps(
const hobject_t &oid,
MapCacher::Transaction<std::string, ceph::buffer::list> *t);
// True if hoid belongs in this mapping based on mask_bits and match
bool check(const hobject_t &hoid) const;
int _remove_oid(
const hobject_t &oid, ///< [in] oid to remove
MapCacher::Transaction<std::string, ceph::buffer::list> *t ///< [out] transaction
);
/// Get snaps (as an 'object_snaps' object) for oid
tl::expected<object_snaps, SnapMapReaderI::result_t> get_snaps_common(
const hobject_t &hoid) const;
public:
static std::string make_shard_prefix(shard_id_t shard) {
if (shard == shard_id_t::NO_SHARD)
return std::string();
char buf[20];
int r = snprintf(buf, sizeof(buf), ".%x", (int)shard);
ceph_assert(r < (int)sizeof(buf));
return std::string(buf, r) + '_';
}
uint32_t mask_bits;
const uint32_t match;
std::string last_key_checked;
const int64_t pool;
const shard_id_t shard;
const std::string shard_prefix;
SnapMapper(
CephContext* cct,
MapCacher::StoreDriver<std::string, ceph::buffer::list> *driver,
uint32_t match, ///< [in] pgid
uint32_t bits, ///< [in] current split bits
int64_t pool, ///< [in] pool
shard_id_t shard ///< [in] shard
)
: cct(cct), backend(driver), mask_bits(bits), match(match), pool(pool),
shard(shard), shard_prefix(make_shard_prefix(shard)) {
update_bits(mask_bits);
}
std::set<std::string> prefixes;
/// Update bits in case of pg split or merge
void update_bits(
uint32_t new_bits ///< [in] new split bits
) {
mask_bits = new_bits;
std::set<std::string> _prefixes = hobject_t::get_prefixes(
mask_bits,
match,
pool);
prefixes.clear();
for (auto i = _prefixes.begin(); i != _prefixes.end(); ++i) {
prefixes.insert(shard_prefix + *i);
}
}
/// Update snaps for oid, empty new_snaps removes the mapping
int update_snaps(
const hobject_t &oid, ///< [in] oid to update
const std::set<snapid_t> &new_snaps, ///< [in] new snap std::set
const std::set<snapid_t> *old_snaps, ///< [in] old snaps (for debugging)
MapCacher::Transaction<std::string, ceph::buffer::list> *t ///< [out] transaction
); ///@ return error, 0 on success
/// Add mapping for oid, must not already be mapped
void add_oid(
const hobject_t &oid, ///< [in] oid to add
const std::set<snapid_t>& new_snaps, ///< [in] snaps
MapCacher::Transaction<std::string, ceph::buffer::list> *t ///< [out] transaction
);
/// Returns first object with snap as a snap
int get_next_objects_to_trim(
snapid_t snap, ///< [in] snap to check
unsigned max, ///< [in] max to get
std::vector<hobject_t> *out ///< [out] next objects to trim (must be empty)
); ///< @return error, -ENOENT if no more objects
/// Remove mapping for oid
int remove_oid(
const hobject_t &oid, ///< [in] oid to remove
MapCacher::Transaction<std::string, ceph::buffer::list> *t ///< [out] transaction
); ///< @return error, -ENOENT if the object is not mapped
/// Get snaps for oid
int get_snaps(
const hobject_t &oid, ///< [in] oid to get snaps for
std::set<snapid_t> *snaps ///< [out] snaps
) const; ///< @return error, -ENOENT if oid is not recorded
/// Get snaps for oid - alternative interface
tl::expected<std::set<snapid_t>, SnapMapReaderI::result_t> get_snaps(
const hobject_t &hoid) const final;
/**
* get_snaps_check_consistency
*
* Returns snaps for hoid as in get_snaps(), but additionally validates the
* snap->hobject_t mappings ('SNA_' entries).
*/
tl::expected<std::set<snapid_t>, SnapMapReaderI::result_t>
get_snaps_check_consistency(const hobject_t &hoid) const final;
};
WRITE_CLASS_ENCODER(SnapMapper::object_snaps)
WRITE_CLASS_ENCODER(SnapMapper::Mapping)
#endif
| 11,687 | 29.83905 | 93 | h |
null | ceph-main/src/osd/TierAgentState.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2013 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_OSD_TIERAGENT_H
#define CEPH_OSD_TIERAGENT_H
#include <ctime>
#include <list>
#include <map>
#include <utility>
#include "common/Formatter.h"
#include "common/histogram.h"
#include "common/hobject.h"
#include "osd/HitSet.h"
struct TierAgentState {
/// current position iterating across pool
hobject_t position;
/// Count of agent_work since "start" position of object hash space
int started;
hobject_t start;
bool delaying;
/// histogram of ages we've encountered
pow2_hist_t temp_hist;
int hist_age;
/// past HitSet(s) (not current)
std::map<time_t,HitSetRef> hit_set_map;
/// a few recent things we've seen that are clean
std::list<hobject_t> recent_clean;
enum flush_mode_t {
FLUSH_MODE_IDLE, // nothing to flush
FLUSH_MODE_LOW, // flush dirty objects with a low speed
FLUSH_MODE_HIGH, //flush dirty objects with a high speed
} flush_mode; ///< current flush behavior
static const char *get_flush_mode_name(flush_mode_t m) {
switch (m) {
case FLUSH_MODE_IDLE: return "idle";
case FLUSH_MODE_LOW: return "low";
case FLUSH_MODE_HIGH: return "high";
default: ceph_abort_msg("bad flush mode");
}
}
const char *get_flush_mode_name() const {
return get_flush_mode_name(flush_mode);
}
enum evict_mode_t {
EVICT_MODE_IDLE, // no need to evict anything
EVICT_MODE_SOME, // evict some things as we are near the target
EVICT_MODE_FULL, // evict anything
} evict_mode; ///< current evict behavior
static const char *get_evict_mode_name(evict_mode_t m) {
switch (m) {
case EVICT_MODE_IDLE: return "idle";
case EVICT_MODE_SOME: return "some";
case EVICT_MODE_FULL: return "full";
default: ceph_abort_msg("bad evict mode");
}
}
const char *get_evict_mode_name() const {
return get_evict_mode_name(evict_mode);
}
/// approximate ratio of objects (assuming they are uniformly
/// distributed) that i should aim to evict.
unsigned evict_effort;
TierAgentState()
: started(0),
delaying(false),
hist_age(0),
flush_mode(FLUSH_MODE_IDLE),
evict_mode(EVICT_MODE_IDLE),
evict_effort(0)
{}
/// false if we have any work to do
bool is_idle() const {
return
delaying ||
(flush_mode == FLUSH_MODE_IDLE &&
evict_mode == EVICT_MODE_IDLE);
}
/// add archived HitSet
void add_hit_set(time_t start, HitSetRef hs) {
hit_set_map.insert(std::make_pair(start, hs));
}
/// remove old/trimmed HitSet
void remove_oldest_hit_set() {
if (!hit_set_map.empty())
hit_set_map.erase(hit_set_map.begin());
}
/// discard all open hit sets
void discard_hit_sets() {
hit_set_map.clear();
}
void dump(ceph::Formatter *f) const {
f->dump_string("flush_mode", get_flush_mode_name());
f->dump_string("evict_mode", get_evict_mode_name());
f->dump_unsigned("evict_effort", evict_effort);
f->dump_stream("position") << position;
f->open_object_section("temp_hist");
temp_hist.dump(f);
f->close_section();
}
};
#endif
| 3,480 | 25.984496 | 72 | h |
null | ceph-main/src/osd/Watch.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
#include "PG.h"
#include "include/types.h"
#include "messages/MWatchNotify.h"
#include <map>
#include "OSD.h"
#include "PrimaryLogPG.h"
#include "Watch.h"
#include "Session.h"
#include "common/config.h"
#define dout_context osd->cct
#define dout_subsys ceph_subsys_osd
#undef dout_prefix
#define dout_prefix _prefix(_dout, this)
using std::list;
using std::make_pair;
using std::pair;
using std::ostream;
using std::set;
using std::vector;
using ceph::bufferlist;
using ceph::decode;
using ceph::encode;
struct CancelableContext : public Context {
virtual void cancel() = 0;
};
static ostream& _prefix(
std::ostream* _dout,
Notify *notify) {
return notify->gen_dbg_prefix(*_dout);
}
Notify::Notify(
ConnectionRef client,
uint64_t client_gid,
bufferlist &payload,
uint32_t timeout,
uint64_t cookie,
uint64_t notify_id,
uint64_t version,
OSDService *osd)
: client(client),
client_gid(client_gid),
complete(false),
discarded(false),
timed_out(false),
payload(payload),
timeout(timeout),
cookie(cookie),
notify_id(notify_id),
version(version),
osd(osd),
cb(nullptr) {}
NotifyRef Notify::makeNotifyRef(
ConnectionRef client,
uint64_t client_gid,
bufferlist &payload,
uint32_t timeout,
uint64_t cookie,
uint64_t notify_id,
uint64_t version,
OSDService *osd) {
NotifyRef ret(
new Notify(
client, client_gid,
payload, timeout,
cookie, notify_id,
version, osd));
ret->set_self(ret);
return ret;
}
class NotifyTimeoutCB : public CancelableContext {
NotifyRef notif;
bool canceled; // protected by notif lock
public:
explicit NotifyTimeoutCB(NotifyRef notif) : notif(notif), canceled(false) {}
void finish(int) override {
notif->osd->watch_lock.unlock();
notif->lock.lock();
if (!canceled)
notif->do_timeout(); // drops lock
else
notif->lock.unlock();
notif->osd->watch_lock.lock();
}
void cancel() override {
ceph_assert(ceph_mutex_is_locked(notif->lock));
canceled = true;
}
};
void Notify::do_timeout()
{
ceph_assert(ceph_mutex_is_locked(lock));
dout(10) << "timeout" << dendl;
cb = nullptr;
if (is_discarded()) {
lock.unlock();
return;
}
timed_out = true; // we will send the client an error code
maybe_complete_notify();
ceph_assert(complete);
set<WatchRef> _watchers;
_watchers.swap(watchers);
lock.unlock();
for (auto i = _watchers.begin(); i != _watchers.end(); ++i) {
boost::intrusive_ptr<PrimaryLogPG> pg((*i)->get_pg());
pg->lock();
if (!(*i)->is_discarded()) {
(*i)->cancel_notify(self.lock());
}
pg->unlock();
}
}
void Notify::register_cb()
{
ceph_assert(ceph_mutex_is_locked(lock));
{
std::lock_guard l{osd->watch_lock};
cb = new NotifyTimeoutCB(self.lock());
if (!osd->watch_timer.add_event_after(timeout, cb)) {
cb = nullptr;
}
}
}
void Notify::unregister_cb()
{
ceph_assert(ceph_mutex_is_locked(lock));
if (!cb)
return;
cb->cancel();
{
std::lock_guard l{osd->watch_lock};
osd->watch_timer.cancel_event(cb);
cb = nullptr;
}
}
void Notify::start_watcher(WatchRef watch)
{
std::lock_guard l(lock);
dout(10) << "start_watcher" << dendl;
watchers.insert(watch);
}
void Notify::complete_watcher(WatchRef watch, bufferlist& reply_bl)
{
std::lock_guard l(lock);
dout(10) << "complete_watcher" << dendl;
if (is_discarded())
return;
ceph_assert(watchers.count(watch));
watchers.erase(watch);
notify_replies.insert(make_pair(make_pair(watch->get_watcher_gid(),
watch->get_cookie()),
reply_bl));
maybe_complete_notify();
}
void Notify::complete_watcher_remove(WatchRef watch)
{
std::lock_guard l(lock);
dout(10) << __func__ << dendl;
if (is_discarded())
return;
ceph_assert(watchers.count(watch));
watchers.erase(watch);
maybe_complete_notify();
}
void Notify::maybe_complete_notify()
{
dout(10) << "maybe_complete_notify -- "
<< watchers.size()
<< " in progress watchers " << dendl;
if (watchers.empty() || timed_out) {
// prepare reply
bufferlist bl;
encode(notify_replies, bl);
vector<pair<uint64_t,uint64_t>> missed;
missed.reserve(watchers.size());
for (auto& watcher : watchers) {
missed.emplace_back(watcher->get_watcher_gid(),
watcher->get_cookie());
}
encode(missed, bl);
bufferlist empty;
auto* const reply = new MWatchNotify(
cookie,
version,
notify_id,
CEPH_WATCH_EVENT_NOTIFY_COMPLETE,
empty,
client_gid);
reply->set_data(bl);
if (timed_out)
reply->return_code = -ETIMEDOUT;
client->send_message(reply);
unregister_cb();
complete = true;
}
}
void Notify::discard()
{
std::lock_guard l(lock);
discarded = true;
unregister_cb();
watchers.clear();
}
void Notify::init()
{
std::lock_guard l(lock);
register_cb();
maybe_complete_notify();
}
#define dout_subsys ceph_subsys_osd
#undef dout_prefix
#define dout_prefix _prefix(_dout, watch.get())
static ostream& _prefix(
std::ostream* _dout,
Watch *watch) {
return watch->gen_dbg_prefix(*_dout);
}
class HandleWatchTimeout : public CancelableContext {
WatchRef watch;
public:
bool canceled; // protected by watch->pg->lock
explicit HandleWatchTimeout(WatchRef watch) : watch(watch), canceled(false) {}
void cancel() override {
canceled = true;
}
void finish(int) override { ceph_abort(); /* not used */ }
void complete(int) override {
OSDService *osd(watch->osd);
ldout(osd->cct, 10) << "HandleWatchTimeout" << dendl;
boost::intrusive_ptr<PrimaryLogPG> pg(watch->pg);
osd->watch_lock.unlock();
pg->lock();
watch->cb = nullptr;
if (!watch->is_discarded() && !canceled)
watch->pg->handle_watch_timeout(watch);
delete this; // ~Watch requires pg lock!
pg->unlock();
osd->watch_lock.lock();
}
};
class HandleDelayedWatchTimeout : public CancelableContext {
WatchRef watch;
public:
bool canceled;
explicit HandleDelayedWatchTimeout(WatchRef watch) : watch(watch), canceled(false) {}
void cancel() override {
canceled = true;
}
void finish(int) override {
OSDService *osd(watch->osd);
dout(10) << "HandleWatchTimeoutDelayed" << dendl;
ceph_assert(watch->pg->is_locked());
watch->cb = nullptr;
if (!watch->is_discarded() && !canceled)
watch->pg->handle_watch_timeout(watch);
}
};
#define dout_subsys ceph_subsys_osd
#undef dout_prefix
#define dout_prefix _prefix(_dout, this)
std::ostream& Watch::gen_dbg_prefix(std::ostream& out) {
return pg->gen_prefix(out) << " -- Watch("
<< make_pair(cookie, entity) << ") ";
}
Watch::Watch(
PrimaryLogPG *pg,
OSDService *osd,
ObjectContextRef obc,
uint32_t timeout,
uint64_t cookie,
entity_name_t entity,
const entity_addr_t &addr)
: cb(NULL),
osd(osd),
pg(pg),
obc(obc),
timeout(timeout),
cookie(cookie),
addr(addr),
will_ping(false),
entity(entity),
discarded(false) {
dout(10) << "Watch()" << dendl;
}
Watch::~Watch() {
dout(10) << "~Watch" << dendl;
// users must have called remove() or discard() prior to this point
ceph_assert(!obc);
ceph_assert(!is_connected());
}
Context *Watch::get_delayed_cb()
{
ceph_assert(!cb);
cb = new HandleDelayedWatchTimeout(self.lock());
return cb;
}
void Watch::register_cb()
{
std::lock_guard l(osd->watch_lock);
if (cb) {
dout(15) << "re-registering callback, timeout: " << timeout << dendl;
cb->cancel();
osd->watch_timer.cancel_event(cb);
} else {
dout(15) << "registering callback, timeout: " << timeout << dendl;
}
cb = new HandleWatchTimeout(self.lock());
if (!osd->watch_timer.add_event_after(timeout, cb)) {
cb = nullptr;
}
}
void Watch::unregister_cb()
{
dout(15) << "unregister_cb" << dendl;
if (!cb)
return;
dout(15) << "actually registered, cancelling" << dendl;
cb->cancel();
{
std::lock_guard l(osd->watch_lock);
osd->watch_timer.cancel_event(cb); // harmless if not registered with timer
}
cb = nullptr;
}
void Watch::got_ping(utime_t t)
{
last_ping = t;
if (is_connected()) {
register_cb();
}
}
void Watch::connect(ConnectionRef con, bool _will_ping)
{
if (is_connected(con.get())) {
dout(10) << __func__ << " con " << con << " - already connected" << dendl;
return;
}
dout(10) << __func__ << " con " << con << dendl;
conn = con;
will_ping = _will_ping;
auto priv = con->get_priv();
if (priv) {
auto sessionref = static_cast<Session*>(priv.get());
sessionref->wstate.addWatch(self.lock());
priv.reset();
for (auto i = in_progress_notifies.begin();
i != in_progress_notifies.end();
++i) {
send_notify(i->second);
}
}
if (will_ping) {
last_ping = ceph_clock_now();
register_cb();
} else {
if (!con->get_priv()) {
// if session is already nullptr
// !will_ping should also register WatchTimeout
conn = ConnectionRef();
register_cb();
} else {
unregister_cb();
}
}
}
void Watch::disconnect()
{
dout(10) << "disconnect (con was " << conn << ")" << dendl;
conn = ConnectionRef();
if (!will_ping)
register_cb();
}
void Watch::discard()
{
dout(10) << "discard" << dendl;
for (auto i = in_progress_notifies.begin();
i != in_progress_notifies.end();
++i) {
i->second->discard();
}
discard_state();
}
void Watch::discard_state()
{
ceph_assert(pg->is_locked());
ceph_assert(!discarded);
ceph_assert(obc);
in_progress_notifies.clear();
unregister_cb();
discarded = true;
if (is_connected()) {
if (auto priv = conn->get_priv(); priv) {
auto session = static_cast<Session*>(priv.get());
session->wstate.removeWatch(self.lock());
}
conn = ConnectionRef();
}
obc = ObjectContextRef();
}
bool Watch::is_discarded() const
{
return discarded;
}
void Watch::remove(bool send_disconnect)
{
dout(10) << "remove" << dendl;
if (send_disconnect && is_connected()) {
bufferlist empty;
MWatchNotify *reply(new MWatchNotify(cookie, 0, 0,
CEPH_WATCH_EVENT_DISCONNECT, empty));
conn->send_message(reply);
}
for (auto i = in_progress_notifies.begin();
i != in_progress_notifies.end();
++i) {
i->second->complete_watcher_remove(self.lock());
}
discard_state();
}
void Watch::start_notify(NotifyRef notif)
{
ceph_assert(in_progress_notifies.find(notif->notify_id) ==
in_progress_notifies.end());
if (will_ping) {
utime_t cutoff = ceph_clock_now();
cutoff.sec_ref() -= timeout;
if (last_ping < cutoff) {
dout(10) << __func__ << " " << notif->notify_id
<< " last_ping " << last_ping << " < cutoff " << cutoff
<< ", disconnecting" << dendl;
disconnect();
return;
}
}
dout(10) << "start_notify " << notif->notify_id << dendl;
in_progress_notifies[notif->notify_id] = notif;
notif->start_watcher(self.lock());
if (is_connected())
send_notify(notif);
}
void Watch::cancel_notify(NotifyRef notif)
{
dout(10) << "cancel_notify " << notif->notify_id << dendl;
in_progress_notifies.erase(notif->notify_id);
}
void Watch::send_notify(NotifyRef notif)
{
dout(10) << "send_notify" << dendl;
MWatchNotify *notify_msg = new MWatchNotify(
cookie,
notif->version,
notif->notify_id,
CEPH_WATCH_EVENT_NOTIFY,
notif->payload,
notif->client_gid);
conn->send_message(notify_msg);
}
void Watch::notify_ack(uint64_t notify_id, bufferlist& reply_bl)
{
dout(10) << "notify_ack" << dendl;
auto i = in_progress_notifies.find(notify_id);
if (i != in_progress_notifies.end()) {
i->second->complete_watcher(self.lock(), reply_bl);
in_progress_notifies.erase(i);
}
}
WatchRef Watch::makeWatchRef(
PrimaryLogPG *pg, OSDService *osd,
ObjectContextRef obc, uint32_t timeout, uint64_t cookie, entity_name_t entity, const entity_addr_t& addr)
{
WatchRef ret(new Watch(pg, osd, obc, timeout, cookie, entity, addr));
ret->set_self(ret);
return ret;
}
void WatchConState::addWatch(WatchRef watch)
{
std::lock_guard l(lock);
watches.insert(watch);
}
void WatchConState::removeWatch(WatchRef watch)
{
std::lock_guard l(lock);
watches.erase(watch);
}
void WatchConState::reset(Connection *con)
{
set<WatchRef> _watches;
{
std::lock_guard l(lock);
_watches.swap(watches);
}
for (set<WatchRef>::iterator i = _watches.begin();
i != _watches.end();
++i) {
boost::intrusive_ptr<PrimaryLogPG> pg((*i)->get_pg());
pg->lock();
if (!(*i)->is_discarded()) {
if ((*i)->is_connected(con)) {
(*i)->disconnect();
} else {
lgeneric_derr(cct) << __func__ << " not still connected to " << (*i) << dendl;
}
}
pg->unlock();
}
}
| 12,915 | 22.064286 | 107 | cc |
null | ceph-main/src/osd/Watch.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_WATCH_H
#define CEPH_WATCH_H
#include <set>
#include "msg/Connection.h"
#include "include/Context.h"
enum WatcherState {
WATCHER_PENDING,
WATCHER_NOTIFIED,
};
class OSDService;
class PrimaryLogPG;
void intrusive_ptr_add_ref(PrimaryLogPG *pg);
void intrusive_ptr_release(PrimaryLogPG *pg);
struct ObjectContext;
class MWatchNotify;
class Watch;
typedef std::shared_ptr<Watch> WatchRef;
typedef std::weak_ptr<Watch> WWatchRef;
class Notify;
typedef std::shared_ptr<Notify> NotifyRef;
typedef std::weak_ptr<Notify> WNotifyRef;
struct CancelableContext;
/**
* Notify tracks the progress of a particular notify
*
* References are held by Watch and the timeout callback.
*/
class Notify {
friend class NotifyTimeoutCB;
friend class Watch;
WNotifyRef self;
ConnectionRef client;
uint64_t client_gid;
bool complete;
bool discarded;
bool timed_out; ///< true if the notify timed out
std::set<WatchRef> watchers;
ceph::buffer::list payload;
uint32_t timeout;
uint64_t cookie;
uint64_t notify_id;
uint64_t version;
OSDService *osd;
CancelableContext *cb;
ceph::mutex lock = ceph::make_mutex("Notify::lock");
/// (gid,cookie) -> reply_bl for everyone who acked the notify
std::multimap<std::pair<uint64_t,uint64_t>, ceph::buffer::list> notify_replies;
/// true if this notify is being discarded
bool is_discarded() {
return discarded || complete;
}
/// Sends notify completion if watchers.empty() or timeout
void maybe_complete_notify();
/// Called on Notify timeout
void do_timeout();
Notify(
ConnectionRef client,
uint64_t client_gid,
ceph::buffer::list& payload,
uint32_t timeout,
uint64_t cookie,
uint64_t notify_id,
uint64_t version,
OSDService *osd);
/// registers a timeout callback with the watch_timer
void register_cb();
/// removes the timeout callback, called on completion or cancellation
void unregister_cb();
public:
std::ostream& gen_dbg_prefix(std::ostream& out) {
return out << "Notify(" << std::make_pair(cookie, notify_id) << " "
<< " watchers=" << watchers.size()
<< ") ";
}
void set_self(NotifyRef _self) {
self = _self;
}
static NotifyRef makeNotifyRef(
ConnectionRef client,
uint64_t client_gid,
ceph::buffer::list &payload,
uint32_t timeout,
uint64_t cookie,
uint64_t notify_id,
uint64_t version,
OSDService *osd);
/// Call after creation to initialize
void init();
/// Called once per watcher prior to init()
void start_watcher(
WatchRef watcher ///< [in] watcher to complete
);
/// Called once per NotifyAck
void complete_watcher(
WatchRef watcher, ///< [in] watcher to complete
ceph::buffer::list& reply_bl ///< [in] reply buffer from the notified watcher
);
/// Called when a watcher unregisters or times out
void complete_watcher_remove(
WatchRef watcher ///< [in] watcher to complete
);
/// Called when the notify is canceled due to a new peering interval
void discard();
};
/**
* Watch is a mapping between a Connection and an ObjectContext
*
* References are held by ObjectContext and the timeout callback
*/
class HandleWatchTimeout;
class HandleDelayedWatchTimeout;
class Watch {
WWatchRef self;
friend class HandleWatchTimeout;
friend class HandleDelayedWatchTimeout;
ConnectionRef conn;
CancelableContext *cb;
OSDService *osd;
boost::intrusive_ptr<PrimaryLogPG> pg;
std::shared_ptr<ObjectContext> obc;
std::map<uint64_t, NotifyRef> in_progress_notifies;
// Could have watch_info_t here, but this file includes osd_types.h
uint32_t timeout; ///< timeout in seconds
uint64_t cookie;
entity_addr_t addr;
bool will_ping; ///< is client new enough to ping the watch
utime_t last_ping; ///< last client ping
entity_name_t entity;
bool discarded;
Watch(
PrimaryLogPG *pg, OSDService *osd,
std::shared_ptr<ObjectContext> obc, uint32_t timeout,
uint64_t cookie, entity_name_t entity,
const entity_addr_t& addr);
/// Registers the timeout callback with watch_timer
void register_cb();
/// send a Notify message when connected for notif
void send_notify(NotifyRef notif);
/// Cleans up state on discard or remove (including Connection state, obc)
void discard_state();
public:
/// Unregisters the timeout callback
void unregister_cb();
/// note receipt of a ping
void got_ping(utime_t t);
/// True if currently connected
bool is_connected() const {
return conn.get() != NULL;
}
bool is_connected(Connection *con) const {
return conn.get() == con;
}
/// NOTE: must be called with pg lock held
~Watch();
uint64_t get_watcher_gid() const {
return entity.num();
}
std::ostream& gen_dbg_prefix(std::ostream& out);
static WatchRef makeWatchRef(
PrimaryLogPG *pg, OSDService *osd,
std::shared_ptr<ObjectContext> obc, uint32_t timeout, uint64_t cookie, entity_name_t entity, const entity_addr_t &addr);
void set_self(WatchRef _self) {
self = _self;
}
/// Does not grant a ref count!
boost::intrusive_ptr<PrimaryLogPG> get_pg() { return pg; }
std::shared_ptr<ObjectContext> get_obc() { return obc; }
uint64_t get_cookie() const { return cookie; }
entity_name_t get_entity() const { return entity; }
entity_addr_t get_peer_addr() const { return addr; }
uint32_t get_timeout() const { return timeout; }
/// Generates context for use if watch timeout is delayed by scrub or recovery
Context *get_delayed_cb();
/// Transitions Watch to connected, unregister_cb, resends pending Notifies
void connect(
ConnectionRef con, ///< [in] Reference to new connection
bool will_ping ///< [in] client is new and will send pings
);
/// Transitions watch to disconnected, register_cb
void disconnect();
/// Called if Watch state is discarded due to new peering interval
void discard();
/// True if removed or discarded
bool is_discarded() const;
/// Called on unwatch
void remove(bool send_disconnect);
/// Adds notif as in-progress notify
void start_notify(
NotifyRef notif ///< [in] Reference to new in-progress notify
);
/// Removes timed out notify
void cancel_notify(
NotifyRef notif ///< [in] notify which timed out
);
/// Call when notify_ack received on notify_id
void notify_ack(
uint64_t notify_id, ///< [in] id of acked notify
ceph::buffer::list& reply_bl ///< [in] notify reply buffer
);
};
/**
* Holds weak refs to Watch structures corresponding to a connection
* Lives in the Session object of an OSD connection
*/
class WatchConState {
ceph::mutex lock = ceph::make_mutex("WatchConState");
std::set<WatchRef> watches;
public:
CephContext* cct;
explicit WatchConState(CephContext* cct) : cct(cct) {}
/// Add a watch
void addWatch(
WatchRef watch ///< [in] Ref to new watch object
);
/// Remove a watch
void removeWatch(
WatchRef watch ///< [in] Ref to watch object to remove
);
/// Called on session reset, disconnects watchers
void reset(Connection *con);
};
#endif
| 7,546 | 25.114187 | 124 | h |
null | ceph-main/src/osd/error_code.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2019 Red Hat <[email protected]>
* Author: Adam C. Emerson <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include <string>
#include "common/error_code.h"
#include "common/errno.h"
#include "error_code.h"
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wnon-virtual-dtor"
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wnon-virtual-dtor"
class osd_error_category : public ceph::converting_category {
public:
osd_error_category(){}
const char* name() const noexcept override;
const char* message(int ev, char*, std::size_t) const noexcept override;
std::string message(int ev) const override;
boost::system::error_condition default_error_condition(int ev) const noexcept
override;
bool equivalent(int ev, const boost::system::error_condition& c) const
noexcept override;
using ceph::converting_category::equivalent;
int from_code(int ev) const noexcept override;
};
#pragma GCC diagnostic pop
#pragma clang diagnostic pop
const char* osd_error_category::name() const noexcept {
return "osd";
}
const char* osd_error_category::message(int ev, char* buf,
std::size_t len) const noexcept {
if (ev == 0)
return "No error";
switch (static_cast<osd_errc>(ev)) {
case osd_errc::old_snapc:
return "ORDERSNAP flag set; writer has old snapc";
case osd_errc::blocklisted:
return "Blocklisted";
}
if (len) {
auto s = cpp_strerror(ev);
auto n = s.copy(buf, len - 1);
*(buf + n) = '\0';
}
return buf;
}
std::string osd_error_category::message(int ev) const {
if (ev == 0)
return "No error";
switch (static_cast<osd_errc>(ev)) {
case osd_errc::old_snapc:
return "ORDERSNAP flag set; writer has old snapc";
case osd_errc::blocklisted:
return "Blocklisted";
}
return cpp_strerror(ev);
}
boost::system::error_condition osd_error_category::default_error_condition(int ev) const noexcept {
if (ev == static_cast<int>(osd_errc::old_snapc) ||
ev == static_cast<int>(osd_errc::blocklisted))
return { ev, *this };
else
return { ev, boost::system::generic_category() };
}
bool osd_error_category::equivalent(int ev, const boost::system::error_condition& c) const noexcept {
switch (static_cast<osd_errc>(ev)) {
case osd_errc::old_snapc:
return c == boost::system::errc::invalid_argument;
case osd_errc::blocklisted:
return c == boost::system::errc::operation_not_permitted;
}
return default_error_condition(ev) == c;
}
int osd_error_category::from_code(int ev) const noexcept {
return -ev;
}
const boost::system::error_category& osd_category() noexcept {
static const osd_error_category c;
return c;
}
| 3,024 | 27.537736 | 101 | cc |
null | ceph-main/src/osd/error_code.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2019 Red Hat <[email protected]>
* Author: Adam C. Emerson <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#pragma once
#include <boost/system/error_code.hpp>
#include "include/rados.h"
const boost::system::error_category& osd_category() noexcept;
// Since the OSD mostly uses POSIX error codes plus a couple
// additions, this will be a degenerate error category for now that
// mostly forwards to POSIX.
enum class osd_errc {
old_snapc = 85, /* ORDERSNAP flag set; writer has old snapc*/
blocklisted = 108 /* blocklisted */
};
namespace boost::system {
template<>
struct is_error_code_enum<::osd_errc> {
static const bool value = true;
};
template<>
struct is_error_condition_enum<::osd_errc> {
static const bool value = false;
};
}
// implicit conversion:
inline boost::system::error_code make_error_code(osd_errc e) noexcept {
return { static_cast<int>(e), osd_category() };
}
// explicit conversion:
inline boost::system::error_condition make_error_condition(osd_errc e) noexcept {
return { static_cast<int>(e), osd_category() };
}
| 1,422 | 25.351852 | 81 | h |
null | ceph-main/src/osd/objclass.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include <cstdarg>
#include <boost/container/small_vector.hpp>
#include "common/ceph_context.h"
#include "common/ceph_releases.h"
#include "common/config.h"
#include "common/debug.h"
#include "objclass/objclass.h"
#include "osd/PrimaryLogPG.h"
#include "osd/ClassHandler.h"
#include "auth/Crypto.h"
#include "common/armor.h"
#define dout_context ClassHandler::get_instance().cct
using std::map;
using std::set;
using std::string;
using std::vector;
using ceph::bufferlist;
using ceph::decode;
using ceph::encode;
using ceph::real_time;
static constexpr int dout_subsys = ceph_subsys_objclass;
int cls_call(cls_method_context_t hctx, const char *cls, const char *method,
char *indata, int datalen, char **outdata, int *outdatalen)
{
PrimaryLogPG::OpContext **pctx = (PrimaryLogPG::OpContext **)hctx;
bufferlist idata;
vector<OSDOp> nops(1);
OSDOp& op = nops[0];
int r;
op.op.op = CEPH_OSD_OP_CALL;
op.op.cls.class_len = strlen(cls);
op.op.cls.method_len = strlen(method);
op.op.cls.indata_len = datalen;
op.indata.append(cls, op.op.cls.class_len);
op.indata.append(method, op.op.cls.method_len);
op.indata.append(indata, datalen);
r = (*pctx)->pg->do_osd_ops(*pctx, nops);
if (r < 0)
return r;
*outdata = (char *)malloc(op.outdata.length());
if (!*outdata)
return -ENOMEM;
memcpy(*outdata, op.outdata.c_str(), op.outdata.length());
*outdatalen = op.outdata.length();
return r;
}
int cls_getxattr(cls_method_context_t hctx, const char *name,
char **outdata, int *outdatalen)
{
PrimaryLogPG::OpContext **pctx = (PrimaryLogPG::OpContext **)hctx;
vector<OSDOp> nops(1);
OSDOp& op = nops[0];
int r;
op.op.op = CEPH_OSD_OP_GETXATTR;
op.op.xattr.name_len = strlen(name);
op.indata.append(name, op.op.xattr.name_len);
r = (*pctx)->pg->do_osd_ops(*pctx, nops);
if (r < 0)
return r;
*outdata = (char *)malloc(op.outdata.length());
if (!*outdata)
return -ENOMEM;
memcpy(*outdata, op.outdata.c_str(), op.outdata.length());
*outdatalen = op.outdata.length();
return r;
}
int cls_setxattr(cls_method_context_t hctx, const char *name,
const char *value, int val_len)
{
PrimaryLogPG::OpContext **pctx = (PrimaryLogPG::OpContext **)hctx;
vector<OSDOp> nops(1);
OSDOp& op = nops[0];
int r;
op.op.op = CEPH_OSD_OP_SETXATTR;
op.op.xattr.name_len = strlen(name);
op.op.xattr.value_len = val_len;
op.indata.append(name, op.op.xattr.name_len);
op.indata.append(value, val_len);
r = (*pctx)->pg->do_osd_ops(*pctx, nops);
return r;
}
int cls_read(cls_method_context_t hctx, int ofs, int len,
char **outdata, int *outdatalen)
{
PrimaryLogPG::OpContext **pctx = (PrimaryLogPG::OpContext **)hctx;
vector<OSDOp> ops(1);
ops[0].op.op = CEPH_OSD_OP_SYNC_READ;
ops[0].op.extent.offset = ofs;
ops[0].op.extent.length = len;
int r = (*pctx)->pg->do_osd_ops(*pctx, ops);
if (r < 0)
return r;
*outdata = (char *)malloc(ops[0].outdata.length());
if (!*outdata)
return -ENOMEM;
memcpy(*outdata, ops[0].outdata.c_str(), ops[0].outdata.length());
*outdatalen = ops[0].outdata.length();
return *outdatalen;
}
int cls_get_request_origin(cls_method_context_t hctx, entity_inst_t *origin)
{
PrimaryLogPG::OpContext **pctx = static_cast<PrimaryLogPG::OpContext **>(hctx);
*origin = (*pctx)->op->get_req()->get_orig_source_inst();
return 0;
}
int cls_cxx_create(cls_method_context_t hctx, bool exclusive)
{
PrimaryLogPG::OpContext **pctx = (PrimaryLogPG::OpContext **)hctx;
vector<OSDOp> ops(1);
ops[0].op.op = CEPH_OSD_OP_CREATE;
ops[0].op.flags = (exclusive ? CEPH_OSD_OP_FLAG_EXCL : 0);
return (*pctx)->pg->do_osd_ops(*pctx, ops);
}
int cls_cxx_remove(cls_method_context_t hctx)
{
PrimaryLogPG::OpContext **pctx = (PrimaryLogPG::OpContext **)hctx;
vector<OSDOp> ops(1);
ops[0].op.op = CEPH_OSD_OP_DELETE;
return (*pctx)->pg->do_osd_ops(*pctx, ops);
}
int cls_cxx_stat(cls_method_context_t hctx, uint64_t *size, time_t *mtime)
{
PrimaryLogPG::OpContext **pctx = (PrimaryLogPG::OpContext **)hctx;
vector<OSDOp> ops(1);
int ret;
ops[0].op.op = CEPH_OSD_OP_STAT;
ret = (*pctx)->pg->do_osd_ops(*pctx, ops);
if (ret < 0)
return ret;
auto iter = ops[0].outdata.cbegin();
utime_t ut;
uint64_t s;
try {
decode(s, iter);
decode(ut, iter);
} catch (ceph::buffer::error& err) {
return -EIO;
}
if (size)
*size = s;
if (mtime)
*mtime = ut.sec();
return 0;
}
int cls_cxx_stat2(cls_method_context_t hctx, uint64_t *size, ceph::real_time *mtime)
{
PrimaryLogPG::OpContext **pctx = (PrimaryLogPG::OpContext **)hctx;
vector<OSDOp> ops(1);
int ret;
ops[0].op.op = CEPH_OSD_OP_STAT;
ret = (*pctx)->pg->do_osd_ops(*pctx, ops);
if (ret < 0)
return ret;
auto iter = ops[0].outdata.cbegin();
real_time ut;
uint64_t s;
try {
decode(s, iter);
decode(ut, iter);
} catch (ceph::buffer::error& err) {
return -EIO;
}
if (size)
*size = s;
if (mtime)
*mtime = ut;
return 0;
}
int cls_cxx_read2(cls_method_context_t hctx, int ofs, int len,
bufferlist *outbl, uint32_t op_flags)
{
PrimaryLogPG::OpContext **pctx = (PrimaryLogPG::OpContext **)hctx;
vector<OSDOp> ops(1);
int ret;
ops[0].op.op = CEPH_OSD_OP_SYNC_READ;
ops[0].op.extent.offset = ofs;
ops[0].op.extent.length = len;
ops[0].op.flags = op_flags;
ret = (*pctx)->pg->do_osd_ops(*pctx, ops);
if (ret < 0)
return ret;
*outbl = std::move(ops[0].outdata);
return outbl->length();
}
int cls_cxx_write2(cls_method_context_t hctx, int ofs, int len,
bufferlist *inbl, uint32_t op_flags)
{
PrimaryLogPG::OpContext **pctx = (PrimaryLogPG::OpContext **)hctx;
vector<OSDOp> ops(1);
ops[0].op.op = CEPH_OSD_OP_WRITE;
ops[0].op.extent.offset = ofs;
ops[0].op.extent.length = len;
ops[0].op.flags = op_flags;
ops[0].indata = *inbl;
return (*pctx)->pg->do_osd_ops(*pctx, ops);
}
int cls_cxx_write_full(cls_method_context_t hctx, bufferlist *inbl)
{
PrimaryLogPG::OpContext **pctx = (PrimaryLogPG::OpContext **)hctx;
vector<OSDOp> ops(1);
ops[0].op.op = CEPH_OSD_OP_WRITEFULL;
ops[0].op.extent.offset = 0;
ops[0].op.extent.length = inbl->length();
ops[0].indata = *inbl;
return (*pctx)->pg->do_osd_ops(*pctx, ops);
}
int cls_cxx_replace(cls_method_context_t hctx, int ofs, int len, bufferlist *inbl)
{
PrimaryLogPG::OpContext **pctx = (PrimaryLogPG::OpContext **)hctx;
vector<OSDOp> ops(2);
ops[0].op.op = CEPH_OSD_OP_TRUNCATE;
ops[0].op.extent.offset = 0;
ops[0].op.extent.length = 0;
ops[1].op.op = CEPH_OSD_OP_WRITE;
ops[1].op.extent.offset = ofs;
ops[1].op.extent.length = len;
ops[1].indata = *inbl;
return (*pctx)->pg->do_osd_ops(*pctx, ops);
}
int cls_cxx_truncate(cls_method_context_t hctx, int ofs)
{
PrimaryLogPG::OpContext **pctx = (PrimaryLogPG::OpContext **)hctx;
vector<OSDOp> ops(1);
ops[0].op.op = CEPH_OSD_OP_TRUNCATE;
ops[0].op.extent.offset = ofs;
ops[0].op.extent.length = 0;
return (*pctx)->pg->do_osd_ops(*pctx, ops);
}
int cls_cxx_write_zero(cls_method_context_t hctx, int ofs, int len)
{
PrimaryLogPG::OpContext **pctx = (PrimaryLogPG::OpContext **)hctx;
vector<OSDOp> ops(1);
ops[0].op.op = CEPH_OSD_OP_ZERO;
ops[0].op.extent.offset = ofs;
ops[0].op.extent.length = len;
return (*pctx)->pg->do_osd_ops(*pctx, ops);
}
int cls_cxx_getxattr(cls_method_context_t hctx, const char *name,
bufferlist *outbl)
{
PrimaryLogPG::OpContext **pctx = (PrimaryLogPG::OpContext **)hctx;
vector<OSDOp> nops(1);
OSDOp& op = nops[0];
int r;
op.op.op = CEPH_OSD_OP_GETXATTR;
op.op.xattr.name_len = strlen(name);
op.indata.append(name, op.op.xattr.name_len);
r = (*pctx)->pg->do_osd_ops(*pctx, nops);
if (r < 0)
return r;
*outbl = std::move(op.outdata);
return outbl->length();
}
int cls_cxx_getxattrs(cls_method_context_t hctx, map<string, bufferlist> *attrset)
{
PrimaryLogPG::OpContext **pctx = (PrimaryLogPG::OpContext **)hctx;
vector<OSDOp> nops(1);
OSDOp& op = nops[0];
int r;
op.op.op = CEPH_OSD_OP_GETXATTRS;
r = (*pctx)->pg->do_osd_ops(*pctx, nops);
if (r < 0)
return r;
auto iter = op.outdata.cbegin();
try {
decode(*attrset, iter);
} catch (ceph::buffer::error& err) {
return -EIO;
}
return 0;
}
int cls_cxx_setxattr(cls_method_context_t hctx, const char *name,
bufferlist *inbl)
{
PrimaryLogPG::OpContext **pctx = (PrimaryLogPG::OpContext **)hctx;
vector<OSDOp> nops(1);
OSDOp& op = nops[0];
int r;
op.op.op = CEPH_OSD_OP_SETXATTR;
op.op.xattr.name_len = strlen(name);
op.op.xattr.value_len = inbl->length();
op.indata.append(name, op.op.xattr.name_len);
op.indata.append(*inbl);
r = (*pctx)->pg->do_osd_ops(*pctx, nops);
return r;
}
int cls_cxx_snap_revert(cls_method_context_t hctx, snapid_t snapid)
{
PrimaryLogPG::OpContext **pctx = (PrimaryLogPG::OpContext **)hctx;
vector<OSDOp> ops(1);
ops[0].op.op = CEPH_OSD_OP_ROLLBACK;
ops[0].op.snap.snapid = snapid;
return (*pctx)->pg->do_osd_ops(*pctx, ops);
}
int cls_cxx_map_get_all_vals(cls_method_context_t hctx, map<string, bufferlist>* vals,
bool *more)
{
PrimaryLogPG::OpContext **pctx = (PrimaryLogPG::OpContext **)hctx;
vector<OSDOp> ops(1);
OSDOp& op = ops[0];
int ret;
string start_after;
string filter_prefix;
uint64_t max = (uint64_t)-1;
encode(start_after, op.indata);
encode(max, op.indata);
encode(filter_prefix, op.indata);
op.op.op = CEPH_OSD_OP_OMAPGETVALS;
ret = (*pctx)->pg->do_osd_ops(*pctx, ops);
if (ret < 0)
return ret;
auto iter = op.outdata.cbegin();
try {
decode(*vals, iter);
decode(*more, iter);
} catch (ceph::buffer::error& err) {
return -EIO;
}
return vals->size();
}
int cls_cxx_map_get_keys(cls_method_context_t hctx, const string &start_obj,
uint64_t max_to_get, set<string> *keys,
bool *more)
{
PrimaryLogPG::OpContext **pctx = (PrimaryLogPG::OpContext **)hctx;
vector<OSDOp> ops(1);
OSDOp& op = ops[0];
int ret;
encode(start_obj, op.indata);
encode(max_to_get, op.indata);
op.op.op = CEPH_OSD_OP_OMAPGETKEYS;
ret = (*pctx)->pg->do_osd_ops(*pctx, ops);
if (ret < 0)
return ret;
auto iter = op.outdata.cbegin();
try {
decode(*keys, iter);
decode(*more, iter);
} catch (ceph::buffer::error& err) {
return -EIO;
}
return keys->size();
}
int cls_cxx_map_get_vals(cls_method_context_t hctx, const string &start_obj,
const string &filter_prefix, uint64_t max_to_get,
map<string, bufferlist> *vals, bool *more)
{
PrimaryLogPG::OpContext **pctx = (PrimaryLogPG::OpContext **)hctx;
vector<OSDOp> ops(1);
OSDOp& op = ops[0];
int ret;
encode(start_obj, op.indata);
encode(max_to_get, op.indata);
encode(filter_prefix, op.indata);
op.op.op = CEPH_OSD_OP_OMAPGETVALS;
ret = (*pctx)->pg->do_osd_ops(*pctx, ops);
if (ret < 0)
return ret;
auto iter = op.outdata.cbegin();
try {
decode(*vals, iter);
decode(*more, iter);
} catch (ceph::buffer::error& err) {
return -EIO;
}
return vals->size();
}
int cls_cxx_map_read_header(cls_method_context_t hctx, bufferlist *outbl)
{
PrimaryLogPG::OpContext **pctx = (PrimaryLogPG::OpContext **)hctx;
vector<OSDOp> ops(1);
OSDOp& op = ops[0];
int ret;
op.op.op = CEPH_OSD_OP_OMAPGETHEADER;
ret = (*pctx)->pg->do_osd_ops(*pctx, ops);
if (ret < 0)
return ret;
*outbl = std::move(op.outdata);
return 0;
}
int cls_cxx_map_get_val(cls_method_context_t hctx, const string &key,
bufferlist *outbl)
{
PrimaryLogPG::OpContext **pctx = (PrimaryLogPG::OpContext **)hctx;
vector<OSDOp> ops(1);
OSDOp& op = ops[0];
int ret;
set<string> k;
k.insert(key);
encode(k, op.indata);
op.op.op = CEPH_OSD_OP_OMAPGETVALSBYKEYS;
ret = (*pctx)->pg->do_osd_ops(*pctx, ops);
if (ret < 0)
return ret;
auto iter = op.outdata.cbegin();
try {
map<string, bufferlist> m;
decode(m, iter);
map<string, bufferlist>::iterator iter = m.begin();
if (iter == m.end())
return -ENOENT;
*outbl = iter->second;
} catch (ceph::buffer::error& e) {
return -EIO;
}
return 0;
}
int cls_cxx_map_get_vals_by_keys(cls_method_context_t hctx,
const std::set<std::string> &keys,
std::map<std::string, bufferlist> *map)
{
PrimaryLogPG::OpContext **pctx = (PrimaryLogPG::OpContext **)hctx;
vector<OSDOp> ops(1);
OSDOp& op = ops[0];
int ret;
encode(keys, op.indata);
op.op.op = CEPH_OSD_OP_OMAPGETVALSBYKEYS;
ret = (*pctx)->pg->do_osd_ops(*pctx, ops);
if (ret < 0)
return ret;
auto iter = op.outdata.cbegin();
try {
decode(*map, iter);
} catch (buffer::error& e) {
return -EIO;
}
return 0;
}
int cls_cxx_map_set_val(cls_method_context_t hctx, const string &key,
bufferlist *inbl)
{
PrimaryLogPG::OpContext **pctx = (PrimaryLogPG::OpContext **)hctx;
vector<OSDOp> ops(1);
OSDOp& op = ops[0];
bufferlist& update_bl = op.indata;
map<string, bufferlist> m;
m[key] = *inbl;
encode(m, update_bl);
op.op.op = CEPH_OSD_OP_OMAPSETVALS;
return (*pctx)->pg->do_osd_ops(*pctx, ops);
}
int cls_cxx_map_set_vals(cls_method_context_t hctx,
const std::map<string, bufferlist> *map)
{
PrimaryLogPG::OpContext **pctx = (PrimaryLogPG::OpContext **)hctx;
vector<OSDOp> ops(1);
OSDOp& op = ops[0];
bufferlist& update_bl = op.indata;
encode(*map, update_bl);
op.op.op = CEPH_OSD_OP_OMAPSETVALS;
return (*pctx)->pg->do_osd_ops(*pctx, ops);
}
int cls_cxx_map_clear(cls_method_context_t hctx)
{
PrimaryLogPG::OpContext **pctx = (PrimaryLogPG::OpContext **)hctx;
vector<OSDOp> ops(1);
OSDOp& op = ops[0];
op.op.op = CEPH_OSD_OP_OMAPCLEAR;
return (*pctx)->pg->do_osd_ops(*pctx, ops);
}
int cls_cxx_map_write_header(cls_method_context_t hctx, bufferlist *inbl)
{
PrimaryLogPG::OpContext **pctx = (PrimaryLogPG::OpContext **)hctx;
vector<OSDOp> ops(1);
OSDOp& op = ops[0];
op.indata = std::move(*inbl);
op.op.op = CEPH_OSD_OP_OMAPSETHEADER;
return (*pctx)->pg->do_osd_ops(*pctx, ops);
}
int cls_cxx_map_remove_range(cls_method_context_t hctx,
const std::string& key_begin,
const std::string& key_end)
{
PrimaryLogPG::OpContext **pctx = (PrimaryLogPG::OpContext **)hctx;
vector<OSDOp> ops(1);
OSDOp& op = ops[0];
bufferlist& update_bl = op.indata;
::encode(key_begin, update_bl);
::encode(key_end, update_bl);
op.op.op = CEPH_OSD_OP_OMAPRMKEYRANGE;
return (*pctx)->pg->do_osd_ops(*pctx, ops);
}
int cls_cxx_map_remove_key(cls_method_context_t hctx, const string &key)
{
PrimaryLogPG::OpContext **pctx = (PrimaryLogPG::OpContext **)hctx;
vector<OSDOp> ops(1);
OSDOp& op = ops[0];
bufferlist& update_bl = op.indata;
set<string> to_rm;
to_rm.insert(key);
encode(to_rm, update_bl);
op.op.op = CEPH_OSD_OP_OMAPRMKEYS;
return (*pctx)->pg->do_osd_ops(*pctx, ops);
}
int cls_cxx_list_watchers(cls_method_context_t hctx,
obj_list_watch_response_t *watchers)
{
PrimaryLogPG::OpContext **pctx = (PrimaryLogPG::OpContext **)hctx;
vector<OSDOp> nops(1);
OSDOp& op = nops[0];
int r;
op.op.op = CEPH_OSD_OP_LIST_WATCHERS;
r = (*pctx)->pg->do_osd_ops(*pctx, nops);
if (r < 0)
return r;
auto iter = op.outdata.cbegin();
try {
decode(*watchers, iter);
} catch (ceph::buffer::error& err) {
return -EIO;
}
return 0;
}
uint64_t cls_current_version(cls_method_context_t hctx)
{
PrimaryLogPG::OpContext *ctx = *(PrimaryLogPG::OpContext **)hctx;
return ctx->pg->get_last_user_version();
}
int cls_current_subop_num(cls_method_context_t hctx)
{
PrimaryLogPG::OpContext *ctx = *(PrimaryLogPG::OpContext **)hctx;
return ctx->processed_subop_count;
}
uint64_t cls_get_features(cls_method_context_t hctx)
{
PrimaryLogPG::OpContext *ctx = *(PrimaryLogPG::OpContext **)hctx;
return ctx->pg->get_osdmap()->get_up_osd_features();
}
uint64_t cls_get_client_features(cls_method_context_t hctx)
{
PrimaryLogPG::OpContext *ctx = *(PrimaryLogPG::OpContext **)hctx;
return ctx->op->get_req()->get_connection()->get_features();
}
ceph_release_t cls_get_required_osd_release(cls_method_context_t hctx)
{
PrimaryLogPG::OpContext *ctx = *(PrimaryLogPG::OpContext **)hctx;
return ctx->pg->get_osdmap()->require_osd_release;
}
ceph_release_t cls_get_min_compatible_client(cls_method_context_t hctx)
{
PrimaryLogPG::OpContext *ctx = *(PrimaryLogPG::OpContext **)hctx;
return ctx->pg->get_osdmap()->get_require_min_compat_client();
}
const ConfigProxy& cls_get_config(cls_method_context_t hctx)
{
PrimaryLogPG::OpContext *ctx = *(PrimaryLogPG::OpContext **)hctx;
return ctx->pg->get_cct()->_conf;
}
const object_info_t& cls_get_object_info(cls_method_context_t hctx)
{
PrimaryLogPG::OpContext *ctx = *(PrimaryLogPG::OpContext **)hctx;
return ctx->obs->oi;
}
int cls_get_snapset_seq(cls_method_context_t hctx, uint64_t *snap_seq) {
PrimaryLogPG::OpContext *ctx = *(PrimaryLogPG::OpContext **)hctx;
if (!ctx->new_obs.exists || (ctx->new_obs.oi.is_whiteout() &&
ctx->obc->ssc->snapset.clones.empty())) {
return -ENOENT;
}
*snap_seq = ctx->obc->ssc->snapset.seq;
return 0;
}
int cls_cxx_chunk_write_and_set(cls_method_context_t hctx, int ofs, int len,
bufferlist *write_inbl, uint32_t op_flags,
bufferlist *set_inbl, int set_len)
{
PrimaryLogPG::OpContext **pctx = (PrimaryLogPG::OpContext **)hctx;
char cname[] = "cas";
char method[] = "chunk_set";
vector<OSDOp> ops(2);
ops[0].op.op = CEPH_OSD_OP_WRITE;
ops[0].op.extent.offset = ofs;
ops[0].op.extent.length = len;
ops[0].op.flags = op_flags;
ops[0].indata = *write_inbl;
ops[1].op.op = CEPH_OSD_OP_CALL;
ops[1].op.cls.class_len = strlen(cname);
ops[1].op.cls.method_len = strlen(method);
ops[1].op.cls.indata_len = set_len;
ops[1].indata.append(cname, ops[1].op.cls.class_len);
ops[1].indata.append(method, ops[1].op.cls.method_len);
ops[1].indata.append(*set_inbl);
return (*pctx)->pg->do_osd_ops(*pctx, ops);
}
int cls_get_manifest_ref_count(cls_method_context_t hctx, string fp_oid)
{
PrimaryLogPG::OpContext *ctx = *(PrimaryLogPG::OpContext **)hctx;
return ctx->pg->get_manifest_ref_count(ctx->obc, fp_oid, ctx->op);
}
uint64_t cls_get_osd_min_alloc_size(cls_method_context_t hctx) {
PrimaryLogPG::OpContext *ctx = *(PrimaryLogPG::OpContext **)hctx;
return ctx->pg->get_min_alloc_size();
}
uint64_t cls_get_pool_stripe_width(cls_method_context_t hctx)
{
PrimaryLogPG::OpContext *ctx = *(PrimaryLogPG::OpContext **)hctx;
return ctx->pg->get_pool().stripe_width;
}
struct GatherFinisher : public PrimaryLogPG::OpFinisher {
std::map<std::string, bufferlist> src_obj_buffs;
OSDOp *osd_op;
GatherFinisher(OSDOp *osd_op_) : osd_op(osd_op_) {}
int execute() override {
return 0;
}
};
int cls_cxx_gather(cls_method_context_t hctx, const std::set<std::string> &src_objs, const std::string& pool,
const char *cls, const char *method, bufferlist& inbl)
{
PrimaryLogPG::OpContext **pctx = (PrimaryLogPG::OpContext**)hctx;
int subop_num = (*pctx)->current_osd_subop_num;
OSDOp *osd_op = &(*(*pctx)->ops)[subop_num];
auto [iter, inserted] = (*pctx)->op_finishers.emplace(std::make_pair(subop_num, std::make_unique<GatherFinisher>(osd_op)));
assert(inserted);
auto &gather = *static_cast<GatherFinisher*>(iter->second.get());
for (const auto &obj : src_objs) {
gather.src_obj_buffs[obj] = bufferlist();
}
return (*pctx)->pg->start_cls_gather(*pctx, &gather.src_obj_buffs, pool, cls, method, inbl);
}
int cls_cxx_get_gathered_data(cls_method_context_t hctx, std::map<std::string, bufferlist> *results)
{
assert(results);
PrimaryLogPG::OpContext **pctx = (PrimaryLogPG::OpContext**)hctx;
PrimaryLogPG::OpFinisher* op_finisher = nullptr;
int r = 0;
{
auto op_finisher_it = (*pctx)->op_finishers.find((*pctx)->current_osd_subop_num);
if (op_finisher_it != (*pctx)->op_finishers.end()) {
op_finisher = op_finisher_it->second.get();
}
}
if (op_finisher == nullptr) {
results->clear();
} else {
GatherFinisher *gf = (GatherFinisher*)op_finisher;
*results = std::move(gf->src_obj_buffs);
r = gf->osd_op->rval;
}
return r;
}
// although at first glance the implementation looks the same as in
// crimson-osd, it's different b/c of how the dout macro expands.
int cls_log(int level, const char *format, ...)
{
size_t size = 256;
va_list ap;
while (1) {
boost::container::small_vector<char, 256> buf(size);
va_start(ap, format);
int n = vsnprintf(buf.data(), size, format, ap);
va_end(ap);
#define MAX_SIZE 8196UL
if ((n > -1 && static_cast<size_t>(n) < size) || size > MAX_SIZE) {
dout(ceph::dout::need_dynamic(level)) << buf.data() << dendl;
return n;
}
size *= 2;
}
}
| 21,112 | 25.96424 | 125 | cc |
null | ceph-main/src/osd/object_state.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include "osd_types.h"
struct ObjectState {
object_info_t oi;
bool exists; ///< the stored object exists (i.e., we will remember the object_info_t)
ObjectState() : exists(false) {}
ObjectState(const object_info_t &oi_, bool exists_)
: oi(oi_), exists(exists_) {}
ObjectState(object_info_t &&oi_, bool exists_)
: oi(std::move(oi_)), exists(exists_) {}
ObjectState(const hobject_t &obj) : oi(obj), exists(false) {}
};
struct RWState {
enum State {
RWNONE,
RWREAD,
RWWRITE,
RWEXCL,
};
static const char *get_state_name(State s) {
switch (s) {
case RWNONE: return "none";
case RWREAD: return "read";
case RWWRITE: return "write";
case RWEXCL: return "excl";
default: return "???";
}
}
const char *get_state_name() const {
return get_state_name(state);
}
int count; ///< number of readers or writers
int waiters = 0; ///< number waiting
State state:4; ///< rw state
/// if set, restart backfill when we can get a read lock
bool recovery_read_marker:1;
/// if set, requeue snaptrim on lock release
bool snaptrimmer_write_marker:1;
RWState()
: count(0),
state(RWNONE),
recovery_read_marker(false),
snaptrimmer_write_marker(false)
{}
/// this function adjusts the counts if necessary
bool get_read_lock() {
// don't starve anybody!
if (waiters > 0) {
return false;
}
switch (state) {
case RWNONE:
ceph_assert(count == 0);
state = RWREAD;
// fall through
case RWREAD:
count++;
return true;
case RWWRITE:
return false;
case RWEXCL:
return false;
default:
ceph_abort_msg("unhandled case");
return false;
}
}
bool get_write_lock(bool greedy=false) {
if (!greedy) {
// don't starve anybody!
if (waiters > 0 ||
recovery_read_marker) {
return false;
}
}
switch (state) {
case RWNONE:
ceph_assert(count == 0);
state = RWWRITE;
// fall through
case RWWRITE:
count++;
return true;
case RWREAD:
return false;
case RWEXCL:
return false;
default:
ceph_abort_msg("unhandled case");
return false;
}
}
bool get_excl_lock() {
switch (state) {
case RWNONE:
ceph_assert(count == 0);
state = RWEXCL;
count = 1;
return true;
case RWWRITE:
return false;
case RWREAD:
return false;
case RWEXCL:
return false;
default:
ceph_abort_msg("unhandled case");
return false;
}
}
/// same as get_write_lock, but ignore starvation
bool take_write_lock() {
if (state == RWWRITE) {
count++;
return true;
}
return get_write_lock();
}
bool dec() {
ceph_assert(count > 0);
count--;
if (count == 0) {
state = RWNONE;
return true;
} else {
return false;
}
}
bool put_read() {
ceph_assert(state == RWREAD);
return dec();
}
bool put_write() {
ceph_assert(state == RWWRITE);
return dec();
}
bool put_excl() {
ceph_assert(state == RWEXCL);
return dec();
}
void inc_waiters() {
++waiters;
}
void release_waiters() {
waiters = 0;
}
void dec_waiters(int count) {
ceph_assert(waiters >= count);
waiters -= count;
}
bool empty() const { return state == RWNONE; }
bool get_snaptrimmer_write(bool mark_if_unsuccessful) {
if (get_write_lock()) {
return true;
} else {
if (mark_if_unsuccessful)
snaptrimmer_write_marker = true;
return false;
}
}
bool get_recovery_read() {
recovery_read_marker = true;
if (get_read_lock()) {
return true;
}
return false;
}
};
inline std::ostream& operator<<(std::ostream& out, const RWState& rw)
{
return out << "rwstate(" << rw.get_state_name()
<< " n=" << rw.count
<< " w=" << rw.waiters
<< ")";
}
| 4,084 | 20.387435 | 95 | h |
null | ceph-main/src/osd/object_state_fmt.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
/**
* \file fmtlib formatters for some types.h classes
*/
#include "osd/object_state.h"
#include "osd/osd_types_fmt.h"
#if FMT_VERSION >= 90000
#include <fmt/ostream.h>
#endif
template <>
struct fmt::formatter<ObjectState> {
constexpr auto parse(format_parse_context& ctx) { return ctx.begin(); }
template <typename FormatContext>
auto format(const ObjectState& os, FormatContext& ctx) const
{
return fmt::format_to(ctx.out(), "exists {} oi {}", os.exists, os.oi);
}
};
| 600 | 24.041667 | 74 | h |
null | ceph-main/src/osd/osd_internal_types.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_OSD_INTERNAL_TYPES_H
#define CEPH_OSD_INTERNAL_TYPES_H
#include "osd_types.h"
#include "OpRequest.h"
#include "object_state.h"
/*
* keep tabs on object modifications that are in flight.
* we need to know the projected existence, size, snapset,
* etc., because we don't send writes down to disk until after
* replicas ack.
*/
struct SnapSetContext {
hobject_t oid;
SnapSet snapset;
int ref;
bool registered : 1;
bool exists : 1;
explicit SnapSetContext(const hobject_t& o) :
oid(o), ref(0), registered(false), exists(true) { }
};
inline std::ostream& operator<<(std::ostream& out, const SnapSetContext& ssc)
{
return out << "ssc(" << ssc.oid << " snapset: " << ssc.snapset
<< " ref: " << ssc.ref << " registered: "
<< ssc.registered << " exists: " << ssc.exists << ")";
}
struct ObjectContext;
typedef std::shared_ptr<ObjectContext> ObjectContextRef;
struct ObjectContext {
ObjectState obs;
SnapSetContext *ssc; // may be null
Context *destructor_callback;
public:
// any entity in obs.oi.watchers MUST be in either watchers or unconnected_watchers.
std::map<std::pair<uint64_t, entity_name_t>, WatchRef> watchers;
// attr cache
std::map<std::string, ceph::buffer::list, std::less<>> attr_cache;
RWState rwstate;
std::list<OpRequestRef> waiters; ///< ops waiting on state change
bool get_read(OpRequestRef& op) {
if (rwstate.get_read_lock()) {
return true;
} // else
// Now we really need to bump up the ref-counter.
waiters.emplace_back(op);
rwstate.inc_waiters();
return false;
}
bool get_write(OpRequestRef& op, bool greedy=false) {
if (rwstate.get_write_lock(greedy)) {
return true;
} // else
if (op) {
waiters.emplace_back(op);
rwstate.inc_waiters();
}
return false;
}
bool get_excl(OpRequestRef& op) {
if (rwstate.get_excl_lock()) {
return true;
} // else
if (op) {
waiters.emplace_back(op);
rwstate.inc_waiters();
}
return false;
}
void wake(std::list<OpRequestRef> *requeue) {
rwstate.release_waiters();
requeue->splice(requeue->end(), waiters);
}
void put_read(std::list<OpRequestRef> *requeue) {
if (rwstate.put_read()) {
wake(requeue);
}
}
void put_write(std::list<OpRequestRef> *requeue) {
if (rwstate.put_write()) {
wake(requeue);
}
}
void put_excl(std::list<OpRequestRef> *requeue) {
if (rwstate.put_excl()) {
wake(requeue);
}
}
bool empty() const { return rwstate.empty(); }
bool get_lock_type(OpRequestRef& op, RWState::State type) {
switch (type) {
case RWState::RWWRITE:
return get_write(op);
case RWState::RWREAD:
return get_read(op);
case RWState::RWEXCL:
return get_excl(op);
default:
ceph_abort_msg("invalid lock type");
return true;
}
}
bool get_write_greedy(OpRequestRef& op) {
return get_write(op, true);
}
bool get_snaptrimmer_write(bool mark_if_unsuccessful) {
return rwstate.get_snaptrimmer_write(mark_if_unsuccessful);
}
bool get_recovery_read() {
return rwstate.get_recovery_read();
}
bool try_get_read_lock() {
return rwstate.get_read_lock();
}
void drop_recovery_read(std::list<OpRequestRef> *ls) {
ceph_assert(rwstate.recovery_read_marker);
put_read(ls);
rwstate.recovery_read_marker = false;
}
void put_lock_type(
RWState::State type,
std::list<OpRequestRef> *to_wake,
bool *requeue_recovery,
bool *requeue_snaptrimmer) {
switch (type) {
case RWState::RWWRITE:
put_write(to_wake);
break;
case RWState::RWREAD:
put_read(to_wake);
break;
case RWState::RWEXCL:
put_excl(to_wake);
break;
default:
ceph_abort_msg("invalid lock type");
}
if (rwstate.empty() && rwstate.recovery_read_marker) {
rwstate.recovery_read_marker = false;
*requeue_recovery = true;
}
if (rwstate.empty() && rwstate.snaptrimmer_write_marker) {
rwstate.snaptrimmer_write_marker = false;
*requeue_snaptrimmer = true;
}
}
bool is_request_pending() {
return !rwstate.empty();
}
ObjectContext()
: ssc(NULL),
destructor_callback(0),
blocked(false), requeue_scrub_on_unblock(false) {}
~ObjectContext() {
ceph_assert(rwstate.empty());
if (destructor_callback)
destructor_callback->complete(0);
}
void start_block() {
ceph_assert(!blocked);
blocked = true;
}
void stop_block() {
ceph_assert(blocked);
blocked = false;
}
bool is_blocked() const {
return blocked;
}
/// in-progress copyfrom ops for this object
bool blocked;
bool requeue_scrub_on_unblock; // true if we need to requeue scrub on unblock
};
inline std::ostream& operator<<(std::ostream& out, const ObjectState& obs)
{
out << obs.oi.soid;
if (!obs.exists)
out << "(dne)";
return out;
}
inline std::ostream& operator<<(std::ostream& out, const ObjectContext& obc)
{
return out << "obc(" << obc.obs << " " << obc.rwstate << ")";
}
class ObcLockManager {
struct ObjectLockState {
ObjectContextRef obc;
RWState::State type;
ObjectLockState(
ObjectContextRef obc,
RWState::State type)
: obc(std::move(obc)), type(type) {}
};
std::map<hobject_t, ObjectLockState> locks;
public:
ObcLockManager() = default;
ObcLockManager(ObcLockManager &&) = default;
ObcLockManager(const ObcLockManager &) = delete;
ObcLockManager &operator=(ObcLockManager &&) = default;
bool empty() const {
return locks.empty();
}
bool get_lock_type(
RWState::State type,
const hobject_t &hoid,
ObjectContextRef& obc,
OpRequestRef& op) {
ceph_assert(locks.find(hoid) == locks.end());
if (obc->get_lock_type(op, type)) {
locks.insert(std::make_pair(hoid, ObjectLockState(obc, type)));
return true;
} else {
return false;
}
}
/// Get write lock, ignore starvation
bool take_write_lock(
const hobject_t &hoid,
ObjectContextRef obc) {
ceph_assert(locks.find(hoid) == locks.end());
if (obc->rwstate.take_write_lock()) {
locks.insert(
std::make_pair(
hoid, ObjectLockState(obc, RWState::RWWRITE)));
return true;
} else {
return false;
}
}
/// Get write lock for snap trim
bool get_snaptrimmer_write(
const hobject_t &hoid,
ObjectContextRef obc,
bool mark_if_unsuccessful) {
ceph_assert(locks.find(hoid) == locks.end());
if (obc->get_snaptrimmer_write(mark_if_unsuccessful)) {
locks.insert(
std::make_pair(
hoid, ObjectLockState(obc, RWState::RWWRITE)));
return true;
} else {
return false;
}
}
/// Get write lock greedy
bool get_write_greedy(
const hobject_t &hoid,
ObjectContextRef obc,
OpRequestRef op) {
ceph_assert(locks.find(hoid) == locks.end());
if (obc->get_write_greedy(op)) {
locks.insert(
std::make_pair(
hoid, ObjectLockState(obc, RWState::RWWRITE)));
return true;
} else {
return false;
}
}
/// try get read lock
bool try_get_read_lock(
const hobject_t &hoid,
ObjectContextRef obc) {
ceph_assert(locks.find(hoid) == locks.end());
if (obc->try_get_read_lock()) {
locks.insert(
std::make_pair(
hoid,
ObjectLockState(obc, RWState::RWREAD)));
return true;
} else {
return false;
}
}
void put_locks(
std::list<std::pair<ObjectContextRef, std::list<OpRequestRef> > > *to_requeue,
bool *requeue_recovery,
bool *requeue_snaptrimmer) {
for (auto& p: locks) {
std::list<OpRequestRef> _to_requeue;
p.second.obc->put_lock_type(
p.second.type,
&_to_requeue,
requeue_recovery,
requeue_snaptrimmer);
if (to_requeue) {
// We can safely std::move here as the whole `locks` is going
// to die just after the loop.
to_requeue->emplace_back(std::move(p.second.obc),
std::move(_to_requeue));
}
}
locks.clear();
}
~ObcLockManager() {
ceph_assert(locks.empty());
}
};
#endif
| 8,217 | 24.054878 | 86 | h |
null | ceph-main/src/osd/osd_op_util.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "osd/osd_op_util.h"
#include "osd/ClassHandler.h"
#include "messages/MOSDOp.h"
using std::ostream;
using std::string;
using std::vector;
using ceph::bufferlist;
bool OpInfo::check_rmw(int flag) const {
ceph_assert(rmw_flags != 0);
return rmw_flags & flag;
}
// Returns true if op performs a read (including of the object_info).
bool OpInfo::may_read() const {
return need_read_cap() || check_rmw(CEPH_OSD_RMW_FLAG_CLASS_READ);
}
bool OpInfo::may_write() const {
return need_write_cap() || check_rmw(CEPH_OSD_RMW_FLAG_CLASS_WRITE);
}
bool OpInfo::may_cache() const { return check_rmw(CEPH_OSD_RMW_FLAG_CACHE); }
bool OpInfo::rwordered_forced() const {
return check_rmw(CEPH_OSD_RMW_FLAG_RWORDERED);
}
bool OpInfo::rwordered() const {
return may_write() || may_cache() || rwordered_forced();
}
bool OpInfo::includes_pg_op() const {
return check_rmw(CEPH_OSD_RMW_FLAG_PGOP);
}
bool OpInfo::need_read_cap() const {
return check_rmw(CEPH_OSD_RMW_FLAG_READ);
}
bool OpInfo::need_write_cap() const {
return check_rmw(CEPH_OSD_RMW_FLAG_WRITE);
}
bool OpInfo::need_promote() const {
return check_rmw(CEPH_OSD_RMW_FLAG_FORCE_PROMOTE);
}
bool OpInfo::need_skip_handle_cache() const {
return check_rmw(CEPH_OSD_RMW_FLAG_SKIP_HANDLE_CACHE);
}
bool OpInfo::need_skip_promote() const {
return check_rmw(CEPH_OSD_RMW_FLAG_SKIP_PROMOTE);
}
bool OpInfo::allows_returnvec() const {
return check_rmw(CEPH_OSD_RMW_FLAG_RETURNVEC);
}
/**
* may_read_data()
*
* Returns true if op reads information other than the object_info. Requires that the
* osd flush any prior writes prior to servicing this op. Includes any information not
* cached by the osd in the object_info or snapset.
*/
bool OpInfo::may_read_data() const {
return check_rmw(CEPH_OSD_RMW_FLAG_READ_DATA);
}
void OpInfo::set_rmw_flags(int flags) {
rmw_flags |= flags;
}
void OpInfo::set_read() { set_rmw_flags(CEPH_OSD_RMW_FLAG_READ); }
void OpInfo::set_write() { set_rmw_flags(CEPH_OSD_RMW_FLAG_WRITE); }
void OpInfo::set_class_read() { set_rmw_flags(CEPH_OSD_RMW_FLAG_CLASS_READ); }
void OpInfo::set_class_write() { set_rmw_flags(CEPH_OSD_RMW_FLAG_CLASS_WRITE); }
void OpInfo::set_pg_op() { set_rmw_flags(CEPH_OSD_RMW_FLAG_PGOP); }
void OpInfo::set_cache() { set_rmw_flags(CEPH_OSD_RMW_FLAG_CACHE); }
void OpInfo::set_promote() { set_rmw_flags(CEPH_OSD_RMW_FLAG_FORCE_PROMOTE); }
void OpInfo::set_skip_handle_cache() { set_rmw_flags(CEPH_OSD_RMW_FLAG_SKIP_HANDLE_CACHE); }
void OpInfo::set_skip_promote() { set_rmw_flags(CEPH_OSD_RMW_FLAG_SKIP_PROMOTE); }
void OpInfo::set_force_rwordered() { set_rmw_flags(CEPH_OSD_RMW_FLAG_RWORDERED); }
void OpInfo::set_returnvec() { set_rmw_flags(CEPH_OSD_RMW_FLAG_RETURNVEC); }
void OpInfo::set_read_data() { set_rmw_flags(CEPH_OSD_RMW_FLAG_READ_DATA); }
int OpInfo::set_from_op(
const MOSDOp *m,
const OSDMap &osdmap)
{
// client flags have no bearing on whether an op is a read, write, etc.
clear();
if (m->has_flag(CEPH_OSD_FLAG_RWORDERED)) {
set_force_rwordered();
}
if (m->has_flag(CEPH_OSD_FLAG_RETURNVEC)) {
set_returnvec();
}
return set_from_op(m->ops, m->get_pg(), osdmap);
}
int OpInfo::set_from_op(
const std::vector<OSDOp>& ops,
const pg_t& pg,
const OSDMap &osdmap)
{
vector<OSDOp>::const_iterator iter;
// set bits based on op codes, called methods.
for (iter = ops.begin(); iter != ops.end(); ++iter) {
if ((iter->op.op == CEPH_OSD_OP_WATCH &&
iter->op.watch.op == CEPH_OSD_WATCH_OP_PING)) {
/* This a bit odd. PING isn't actually a write. It can't
* result in an update to the object_info. PINGs also aren't
* resent, so there's no reason to write out a log entry.
*
* However, we pipeline them behind writes, so let's force
* the write_ordered flag.
*/
set_force_rwordered();
} else {
if (ceph_osd_op_mode_modify(iter->op.op))
set_write();
}
if (ceph_osd_op_mode_read(iter->op.op)) {
set_read();
if (iter->op.op != CEPH_OSD_OP_STAT) {
set_read_data();
}
}
// set PGOP flag if there are PG ops
if (ceph_osd_op_type_pg(iter->op.op))
set_pg_op();
if (ceph_osd_op_mode_cache(iter->op.op))
set_cache();
// check for ec base pool
int64_t poolid = pg.pool();
const pg_pool_t *pool = osdmap.get_pg_pool(poolid);
if (pool && pool->is_tier()) {
const pg_pool_t *base_pool = osdmap.get_pg_pool(pool->tier_of);
if (base_pool && base_pool->require_rollback()) {
if ((iter->op.op != CEPH_OSD_OP_READ) &&
(iter->op.op != CEPH_OSD_OP_CHECKSUM) &&
(iter->op.op != CEPH_OSD_OP_CMPEXT) &&
(iter->op.op != CEPH_OSD_OP_STAT) &&
(iter->op.op != CEPH_OSD_OP_ISDIRTY) &&
(iter->op.op != CEPH_OSD_OP_UNDIRTY) &&
(iter->op.op != CEPH_OSD_OP_GETXATTR) &&
(iter->op.op != CEPH_OSD_OP_GETXATTRS) &&
(iter->op.op != CEPH_OSD_OP_CMPXATTR) &&
(iter->op.op != CEPH_OSD_OP_ASSERT_VER) &&
(iter->op.op != CEPH_OSD_OP_LIST_WATCHERS) &&
(iter->op.op != CEPH_OSD_OP_LIST_SNAPS) &&
(iter->op.op != CEPH_OSD_OP_SETALLOCHINT) &&
(iter->op.op != CEPH_OSD_OP_WRITEFULL) &&
(iter->op.op != CEPH_OSD_OP_ROLLBACK) &&
(iter->op.op != CEPH_OSD_OP_CREATE) &&
(iter->op.op != CEPH_OSD_OP_DELETE) &&
(iter->op.op != CEPH_OSD_OP_SETXATTR) &&
(iter->op.op != CEPH_OSD_OP_RMXATTR) &&
(iter->op.op != CEPH_OSD_OP_STARTSYNC) &&
(iter->op.op != CEPH_OSD_OP_COPY_GET) &&
(iter->op.op != CEPH_OSD_OP_COPY_FROM) &&
(iter->op.op != CEPH_OSD_OP_COPY_FROM2)) {
set_promote();
}
}
}
switch (iter->op.op) {
case CEPH_OSD_OP_CALL:
{
bufferlist::iterator bp = const_cast<bufferlist&>(iter->indata).begin();
int is_write, is_read;
string cname, mname;
bp.copy(iter->op.cls.class_len, cname);
bp.copy(iter->op.cls.method_len, mname);
ClassHandler::ClassData *cls;
int r = ClassHandler::get_instance().open_class(cname, &cls);
if (r) {
if (r == -ENOENT)
r = -EOPNOTSUPP;
else if (r != -EPERM) // propagate permission errors
r = -EIO;
return r;
}
int flags = cls->get_method_flags(mname);
if (flags < 0) {
if (flags == -ENOENT)
r = -EOPNOTSUPP;
else
r = flags;
return r;
}
is_read = flags & CLS_METHOD_RD;
is_write = flags & CLS_METHOD_WR;
bool is_promote = flags & CLS_METHOD_PROMOTE;
if (is_read)
set_class_read();
if (is_write)
set_class_write();
if (is_promote)
set_promote();
add_class(std::move(cname), std::move(mname), is_read, is_write,
cls->allowed);
break;
}
case CEPH_OSD_OP_WATCH:
// force the read bit for watch since it is depends on previous
// watch state (and may return early if the watch exists) or, in
// the case of ping, is simply a read op.
set_read();
set_read_data();
// fall through
case CEPH_OSD_OP_NOTIFY:
case CEPH_OSD_OP_NOTIFY_ACK:
{
set_promote();
break;
}
case CEPH_OSD_OP_DELETE:
// if we get a delete with FAILOK we can skip handle cache. without
// FAILOK we still need to promote (or do something smarter) to
// determine whether to return ENOENT or 0.
if (iter == ops.begin() &&
iter->op.flags == CEPH_OSD_OP_FLAG_FAILOK) {
set_skip_handle_cache();
}
// skip promotion when proxying a delete op
if (ops.size() == 1) {
set_skip_promote();
}
break;
case CEPH_OSD_OP_CACHE_TRY_FLUSH:
case CEPH_OSD_OP_CACHE_FLUSH:
case CEPH_OSD_OP_CACHE_EVICT:
// If try_flush/flush/evict is the only op, can skip handle cache.
if (ops.size() == 1) {
set_skip_handle_cache();
}
break;
case CEPH_OSD_OP_READ:
case CEPH_OSD_OP_SYNC_READ:
case CEPH_OSD_OP_SPARSE_READ:
case CEPH_OSD_OP_CHECKSUM:
case CEPH_OSD_OP_WRITEFULL:
if (ops.size() == 1 &&
(iter->op.flags & CEPH_OSD_OP_FLAG_FADVISE_NOCACHE ||
iter->op.flags & CEPH_OSD_OP_FLAG_FADVISE_DONTNEED)) {
set_skip_promote();
}
break;
// force promotion when pin an object in cache tier
case CEPH_OSD_OP_CACHE_PIN:
set_promote();
break;
default:
break;
}
}
if (rmw_flags == 0)
return -EINVAL;
return 0;
}
ostream& operator<<(ostream& out, const OpInfo::ClassInfo& i)
{
out << "class " << i.class_name << " method " << i.method_name
<< " rd " << i.read << " wr " << i.write << " allowed " << i.allowed;
return out;
}
| 8,816 | 30.045775 | 92 | cc |
null | ceph-main/src/osd/osd_op_util.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <vector>
#include <string>
#include "osd/OSDMap.h"
#include "messages/MOSDOp.h"
class OpInfo {
public:
struct ClassInfo {
ClassInfo(std::string&& class_name, std::string&& method_name,
bool read, bool write, bool allowed) :
class_name(std::move(class_name)), method_name(std::move(method_name)),
read(read), write(write), allowed(allowed)
{}
const std::string class_name;
const std::string method_name;
const bool read, write, allowed;
};
private:
uint64_t rmw_flags = 0;
std::vector<ClassInfo> classes;
void set_rmw_flags(int flags);
void add_class(std::string&& class_name, std::string&& method_name,
bool read, bool write, bool allowed) {
classes.emplace_back(std::move(class_name), std::move(method_name),
read, write, allowed);
}
public:
void clear() {
rmw_flags = 0;
}
uint64_t get_flags() const {
return rmw_flags;
}
bool check_rmw(int flag) const ;
bool may_read() const;
bool may_read_data() const;
bool may_write() const;
bool may_cache() const;
bool rwordered_forced() const;
bool rwordered() const;
bool includes_pg_op() const;
bool need_read_cap() const;
bool need_write_cap() const;
bool need_promote() const;
bool need_skip_handle_cache() const;
bool need_skip_promote() const;
bool allows_returnvec() const;
void set_read();
void set_write();
void set_cache();
void set_class_read();
void set_class_write();
void set_pg_op();
void set_promote();
void set_skip_handle_cache();
void set_skip_promote();
void set_force_rwordered();
void set_returnvec();
void set_read_data();
int set_from_op(
const MOSDOp *m,
const OSDMap &osdmap);
int set_from_op(
const std::vector<OSDOp> &ops,
const pg_t &pg,
const OSDMap &osdmap);
std::vector<ClassInfo> get_classes() const {
return classes;
}
};
std::ostream& operator<<(std::ostream& out, const OpInfo::ClassInfo& i);
| 2,115 | 22.511111 | 77 | h |
null | ceph-main/src/osd/osd_perf_counters.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "osd_perf_counters.h"
#include "include/common_fwd.h"
PerfCounters *build_osd_logger(CephContext *cct) {
PerfCountersBuilder osd_plb(cct, "osd", l_osd_first, l_osd_last);
// Latency axis configuration for op histograms, values are in nanoseconds
PerfHistogramCommon::axis_config_d op_hist_x_axis_config{
"Latency (usec)",
PerfHistogramCommon::SCALE_LOG2, ///< Latency in logarithmic scale
0, ///< Start at 0
100000, ///< Quantization unit is 100usec
32, ///< Enough to cover much longer than slow requests
};
// Op size axis configuration for op histograms, values are in bytes
PerfHistogramCommon::axis_config_d op_hist_y_axis_config{
"Request size (bytes)",
PerfHistogramCommon::SCALE_LOG2, ///< Request size in logarithmic scale
0, ///< Start at 0
512, ///< Quantization unit is 512 bytes
32, ///< Enough to cover requests larger than GB
};
// All the basic OSD operation stats are to be considered useful
osd_plb.set_prio_default(PerfCountersBuilder::PRIO_USEFUL);
osd_plb.add_u64(
l_osd_op_wip, "op_wip",
"Replication operations currently being processed (primary)");
osd_plb.add_u64_counter(
l_osd_op, "op",
"Client operations",
"ops", PerfCountersBuilder::PRIO_CRITICAL);
osd_plb.add_u64_counter(
l_osd_op_inb, "op_in_bytes",
"Client operations total write size",
"wr", PerfCountersBuilder::PRIO_INTERESTING, unit_t(UNIT_BYTES));
osd_plb.add_u64_counter(
l_osd_op_outb, "op_out_bytes",
"Client operations total read size",
"rd", PerfCountersBuilder::PRIO_INTERESTING, unit_t(UNIT_BYTES));
osd_plb.add_time_avg(
l_osd_op_lat, "op_latency",
"Latency of client operations (including queue time)",
"l", 9);
osd_plb.add_time_avg(
l_osd_op_process_lat, "op_process_latency",
"Latency of client operations (excluding queue time)");
osd_plb.add_time_avg(
l_osd_op_prepare_lat, "op_prepare_latency",
"Latency of client operations (excluding queue time and wait for finished)");
osd_plb.add_u64_counter(
l_osd_op_delayed_unreadable, "op_delayed_unreadable",
"Count of ops delayed due to target object being unreadable");
osd_plb.add_u64_counter(
l_osd_op_delayed_degraded, "op_delayed_degraded",
"Count of ops delayed due to target object being degraded");
osd_plb.add_u64_counter(
l_osd_op_r, "op_r", "Client read operations");
osd_plb.add_u64_counter(
l_osd_op_r_outb, "op_r_out_bytes", "Client data read", NULL, PerfCountersBuilder::PRIO_USEFUL, unit_t(UNIT_BYTES));
osd_plb.add_time_avg(
l_osd_op_r_lat, "op_r_latency",
"Latency of read operation (including queue time)");
osd_plb.add_u64_counter_histogram(
l_osd_op_r_lat_outb_hist, "op_r_latency_out_bytes_histogram",
op_hist_x_axis_config, op_hist_y_axis_config,
"Histogram of operation latency (including queue time) + data read");
osd_plb.add_time_avg(
l_osd_op_r_process_lat, "op_r_process_latency",
"Latency of read operation (excluding queue time)");
osd_plb.add_time_avg(
l_osd_op_r_prepare_lat, "op_r_prepare_latency",
"Latency of read operations (excluding queue time and wait for finished)");
osd_plb.add_u64_counter(
l_osd_op_w, "op_w", "Client write operations");
osd_plb.add_u64_counter(
l_osd_op_w_inb, "op_w_in_bytes", "Client data written");
osd_plb.add_time_avg(
l_osd_op_w_lat, "op_w_latency",
"Latency of write operation (including queue time)");
osd_plb.add_u64_counter_histogram(
l_osd_op_w_lat_inb_hist, "op_w_latency_in_bytes_histogram",
op_hist_x_axis_config, op_hist_y_axis_config,
"Histogram of operation latency (including queue time) + data written");
osd_plb.add_time_avg(
l_osd_op_w_process_lat, "op_w_process_latency",
"Latency of write operation (excluding queue time)");
osd_plb.add_time_avg(
l_osd_op_w_prepare_lat, "op_w_prepare_latency",
"Latency of write operations (excluding queue time and wait for finished)");
osd_plb.add_u64_counter(
l_osd_op_rw, "op_rw",
"Client read-modify-write operations");
osd_plb.add_u64_counter(
l_osd_op_rw_inb, "op_rw_in_bytes",
"Client read-modify-write operations write in", NULL, PerfCountersBuilder::PRIO_USEFUL, unit_t(UNIT_BYTES));
osd_plb.add_u64_counter(
l_osd_op_rw_outb,"op_rw_out_bytes",
"Client read-modify-write operations read out ", NULL, PerfCountersBuilder::PRIO_USEFUL, unit_t(UNIT_BYTES));
osd_plb.add_time_avg(
l_osd_op_rw_lat, "op_rw_latency",
"Latency of read-modify-write operation (including queue time)");
osd_plb.add_u64_counter_histogram(
l_osd_op_rw_lat_inb_hist, "op_rw_latency_in_bytes_histogram",
op_hist_x_axis_config, op_hist_y_axis_config,
"Histogram of rw operation latency (including queue time) + data written");
osd_plb.add_u64_counter_histogram(
l_osd_op_rw_lat_outb_hist, "op_rw_latency_out_bytes_histogram",
op_hist_x_axis_config, op_hist_y_axis_config,
"Histogram of rw operation latency (including queue time) + data read");
osd_plb.add_time_avg(
l_osd_op_rw_process_lat, "op_rw_process_latency",
"Latency of read-modify-write operation (excluding queue time)");
osd_plb.add_time_avg(
l_osd_op_rw_prepare_lat, "op_rw_prepare_latency",
"Latency of read-modify-write operations (excluding queue time and wait for finished)");
osd_plb.add_time_avg(l_osd_op_before_queue_op_lat, "op_before_queue_op_lat",
"Latency of IO before calling queue(before really queue into ShardedOpWq)"); // client io before queue op_wq latency
// Now we move on to some more obscure stats, revert to assuming things
// are low priority unless otherwise specified.
osd_plb.set_prio_default(PerfCountersBuilder::PRIO_DEBUGONLY);
osd_plb.add_time_avg(l_osd_op_before_dequeue_op_lat, "op_before_dequeue_op_lat",
"Latency of IO before calling dequeue_op(already dequeued and get PG lock)"); // client io before dequeue_op latency
osd_plb.add_u64_counter(
l_osd_sop, "subop", "Suboperations");
osd_plb.add_u64_counter(
l_osd_sop_inb, "subop_in_bytes", "Suboperations total size", NULL, 0, unit_t(UNIT_BYTES));
osd_plb.add_time_avg(l_osd_sop_lat, "subop_latency", "Suboperations latency");
osd_plb.add_u64_counter(l_osd_sop_w, "subop_w", "Replicated writes");
osd_plb.add_u64_counter(
l_osd_sop_w_inb, "subop_w_in_bytes", "Replicated written data size", NULL, 0, unit_t(UNIT_BYTES));
osd_plb.add_time_avg(
l_osd_sop_w_lat, "subop_w_latency", "Replicated writes latency");
osd_plb.add_u64_counter(
l_osd_sop_pull, "subop_pull", "Suboperations pull requests");
osd_plb.add_time_avg(
l_osd_sop_pull_lat, "subop_pull_latency", "Suboperations pull latency");
osd_plb.add_u64_counter(
l_osd_sop_push, "subop_push", "Suboperations push messages");
osd_plb.add_u64_counter(
l_osd_sop_push_inb, "subop_push_in_bytes", "Suboperations pushed size", NULL, 0, unit_t(UNIT_BYTES));
osd_plb.add_time_avg(
l_osd_sop_push_lat, "subop_push_latency", "Suboperations push latency");
osd_plb.add_u64_counter(l_osd_pull, "pull", "Pull requests sent");
osd_plb.add_u64_counter(l_osd_push, "push", "Push messages sent");
osd_plb.add_u64_counter(l_osd_push_outb, "push_out_bytes", "Pushed size", NULL, 0, unit_t(UNIT_BYTES));
osd_plb.add_u64_counter(
l_osd_rop, "recovery_ops",
"Started recovery operations",
"rop", PerfCountersBuilder::PRIO_INTERESTING);
osd_plb.add_u64_counter(
l_osd_rbytes, "recovery_bytes",
"recovery bytes",
"rbt", PerfCountersBuilder::PRIO_INTERESTING);
osd_plb.add_time_avg(
l_osd_recovery_push_queue_lat,
"l_osd_recovery_push_queue_latency",
"MOSDPGPush queue latency");
osd_plb.add_time_avg(
l_osd_recovery_push_reply_queue_lat,
"l_osd_recovery_push_reply_queue_latency",
"MOSDPGPushReply queue latency");
osd_plb.add_time_avg(
l_osd_recovery_pull_queue_lat,
"l_osd_recovery_pull_queue_latency",
"MOSDPGPull queue latency");
osd_plb.add_time_avg(
l_osd_recovery_backfill_queue_lat,
"l_osd_recovery_backfill_queue_latency",
"MOSDPGBackfill queue latency");
osd_plb.add_time_avg(
l_osd_recovery_backfill_remove_queue_lat,
"l_osd_recovery_backfill_remove_queue_latency",
"MOSDPGBackfillDelete queue latency");
osd_plb.add_time_avg(
l_osd_recovery_scan_queue_lat,
"l_osd_recovery_scan_queue_latency",
"MOSDPGScan queue latency");
osd_plb.add_time_avg(
l_osd_recovery_queue_lat,
"l_osd_recovery_queue_latency",
"PGRecovery queue latency");
osd_plb.add_time_avg(
l_osd_recovery_context_queue_lat,
"l_osd_recovery_context_queue_latency",
"PGRecoveryContext queue latency");
osd_plb.add_u64(l_osd_loadavg, "loadavg", "CPU load");
osd_plb.add_u64(
l_osd_cached_crc, "cached_crc", "Total number getting crc from crc_cache");
osd_plb.add_u64(
l_osd_cached_crc_adjusted, "cached_crc_adjusted",
"Total number getting crc from crc_cache with adjusting");
osd_plb.add_u64(l_osd_missed_crc, "missed_crc",
"Total number of crc cache misses");
osd_plb.add_u64(l_osd_pg, "numpg", "Placement groups",
"pgs", PerfCountersBuilder::PRIO_USEFUL);
osd_plb.add_u64(
l_osd_pg_primary, "numpg_primary",
"Placement groups for which this osd is primary");
osd_plb.add_u64(
l_osd_pg_replica, "numpg_replica",
"Placement groups for which this osd is replica");
osd_plb.add_u64(
l_osd_pg_stray, "numpg_stray",
"Placement groups ready to be deleted from this osd");
osd_plb.add_u64(
l_osd_pg_removing, "numpg_removing",
"Placement groups queued for local deletion", "pgsr",
PerfCountersBuilder::PRIO_USEFUL);
osd_plb.add_u64(
l_osd_hb_to, "heartbeat_to_peers", "Heartbeat (ping) peers we send to");
osd_plb.add_u64_counter(l_osd_map, "map_messages", "OSD map messages");
osd_plb.add_u64_counter(l_osd_mape, "map_message_epochs", "OSD map epochs");
osd_plb.add_u64_counter(
l_osd_mape_dup, "map_message_epoch_dups", "OSD map duplicates");
osd_plb.add_u64_counter(
l_osd_waiting_for_map, "messages_delayed_for_map",
"Operations waiting for OSD map");
osd_plb.add_u64_counter(
l_osd_map_cache_hit, "osd_map_cache_hit", "osdmap cache hit");
osd_plb.add_u64_counter(
l_osd_map_cache_miss, "osd_map_cache_miss", "osdmap cache miss");
osd_plb.add_u64_counter(
l_osd_map_cache_miss_low, "osd_map_cache_miss_low",
"osdmap cache miss below cache lower bound");
osd_plb.add_u64_avg(
l_osd_map_cache_miss_low_avg, "osd_map_cache_miss_low_avg",
"osdmap cache miss, avg distance below cache lower bound");
osd_plb.add_u64_counter(
l_osd_map_bl_cache_hit, "osd_map_bl_cache_hit",
"OSDMap buffer cache hits");
osd_plb.add_u64_counter(
l_osd_map_bl_cache_miss, "osd_map_bl_cache_miss",
"OSDMap buffer cache misses");
osd_plb.add_u64(
l_osd_stat_bytes, "stat_bytes", "OSD size", "size",
PerfCountersBuilder::PRIO_USEFUL, unit_t(UNIT_BYTES));
osd_plb.add_u64(
l_osd_stat_bytes_used, "stat_bytes_used", "Used space", "used",
PerfCountersBuilder::PRIO_USEFUL, unit_t(UNIT_BYTES));
osd_plb.add_u64(l_osd_stat_bytes_avail, "stat_bytes_avail", "Available space", NULL, 0, unit_t(UNIT_BYTES));
osd_plb.add_u64_counter(
l_osd_copyfrom, "copyfrom", "Rados \"copy-from\" operations");
osd_plb.add_u64_counter(l_osd_tier_promote, "tier_promote", "Tier promotions");
osd_plb.add_u64_counter(l_osd_tier_flush, "tier_flush", "Tier flushes");
osd_plb.add_u64_counter(
l_osd_tier_flush_fail, "tier_flush_fail", "Failed tier flushes");
osd_plb.add_u64_counter(
l_osd_tier_try_flush, "tier_try_flush", "Tier flush attempts");
osd_plb.add_u64_counter(
l_osd_tier_try_flush_fail, "tier_try_flush_fail",
"Failed tier flush attempts");
osd_plb.add_u64_counter(
l_osd_tier_evict, "tier_evict", "Tier evictions");
osd_plb.add_u64_counter(
l_osd_tier_whiteout, "tier_whiteout", "Tier whiteouts");
osd_plb.add_u64_counter(
l_osd_tier_dirty, "tier_dirty", "Dirty tier flag set");
osd_plb.add_u64_counter(
l_osd_tier_clean, "tier_clean", "Dirty tier flag cleaned");
osd_plb.add_u64_counter(
l_osd_tier_delay, "tier_delay", "Tier delays (agent waiting)");
osd_plb.add_u64_counter(
l_osd_tier_proxy_read, "tier_proxy_read", "Tier proxy reads");
osd_plb.add_u64_counter(
l_osd_tier_proxy_write, "tier_proxy_write", "Tier proxy writes");
osd_plb.add_u64_counter(
l_osd_agent_wake, "agent_wake", "Tiering agent wake up");
osd_plb.add_u64_counter(
l_osd_agent_skip, "agent_skip", "Objects skipped by agent");
osd_plb.add_u64_counter(
l_osd_agent_flush, "agent_flush", "Tiering agent flushes");
osd_plb.add_u64_counter(
l_osd_agent_evict, "agent_evict", "Tiering agent evictions");
osd_plb.add_u64_counter(
l_osd_object_ctx_cache_hit, "object_ctx_cache_hit", "Object context cache hits");
osd_plb.add_u64_counter(
l_osd_object_ctx_cache_total, "object_ctx_cache_total", "Object context cache lookups");
osd_plb.add_u64_counter(l_osd_op_cache_hit, "op_cache_hit");
osd_plb.add_time_avg(
l_osd_tier_flush_lat, "osd_tier_flush_lat", "Object flush latency");
osd_plb.add_time_avg(
l_osd_tier_promote_lat, "osd_tier_promote_lat", "Object promote latency");
osd_plb.add_time_avg(
l_osd_tier_r_lat, "osd_tier_r_lat", "Object proxy read latency");
osd_plb.add_u64_counter(
l_osd_pg_info, "osd_pg_info", "PG updated its info (using any method)");
osd_plb.add_u64_counter(
l_osd_pg_fastinfo, "osd_pg_fastinfo",
"PG updated its info using fastinfo attr");
osd_plb.add_u64_counter(
l_osd_pg_biginfo, "osd_pg_biginfo", "PG updated its biginfo attr");
return osd_plb.create_perf_counters();
}
PerfCounters *build_recoverystate_perf(CephContext *cct) {
PerfCountersBuilder rs_perf(cct, "recoverystate_perf", rs_first, rs_last);
rs_perf.add_time_avg(rs_initial_latency, "initial_latency", "Initial recovery state latency");
rs_perf.add_time_avg(rs_started_latency, "started_latency", "Started recovery state latency");
rs_perf.add_time_avg(rs_reset_latency, "reset_latency", "Reset recovery state latency");
rs_perf.add_time_avg(rs_start_latency, "start_latency", "Start recovery state latency");
rs_perf.add_time_avg(rs_primary_latency, "primary_latency", "Primary recovery state latency");
rs_perf.add_time_avg(rs_peering_latency, "peering_latency", "Peering recovery state latency");
rs_perf.add_time_avg(rs_backfilling_latency, "backfilling_latency", "Backfilling recovery state latency");
rs_perf.add_time_avg(rs_waitremotebackfillreserved_latency, "waitremotebackfillreserved_latency", "Wait remote backfill reserved recovery state latency");
rs_perf.add_time_avg(rs_waitlocalbackfillreserved_latency, "waitlocalbackfillreserved_latency", "Wait local backfill reserved recovery state latency");
rs_perf.add_time_avg(rs_notbackfilling_latency, "notbackfilling_latency", "Notbackfilling recovery state latency");
rs_perf.add_time_avg(rs_repnotrecovering_latency, "repnotrecovering_latency", "Repnotrecovering recovery state latency");
rs_perf.add_time_avg(rs_repwaitrecoveryreserved_latency, "repwaitrecoveryreserved_latency", "Rep wait recovery reserved recovery state latency");
rs_perf.add_time_avg(rs_repwaitbackfillreserved_latency, "repwaitbackfillreserved_latency", "Rep wait backfill reserved recovery state latency");
rs_perf.add_time_avg(rs_reprecovering_latency, "reprecovering_latency", "RepRecovering recovery state latency");
rs_perf.add_time_avg(rs_activating_latency, "activating_latency", "Activating recovery state latency");
rs_perf.add_time_avg(rs_waitlocalrecoveryreserved_latency, "waitlocalrecoveryreserved_latency", "Wait local recovery reserved recovery state latency");
rs_perf.add_time_avg(rs_waitremoterecoveryreserved_latency, "waitremoterecoveryreserved_latency", "Wait remote recovery reserved recovery state latency");
rs_perf.add_time_avg(rs_recovering_latency, "recovering_latency", "Recovering recovery state latency");
rs_perf.add_time_avg(rs_recovered_latency, "recovered_latency", "Recovered recovery state latency");
rs_perf.add_time_avg(rs_clean_latency, "clean_latency", "Clean recovery state latency");
rs_perf.add_time_avg(rs_active_latency, "active_latency", "Active recovery state latency");
rs_perf.add_time_avg(rs_replicaactive_latency, "replicaactive_latency", "Replicaactive recovery state latency");
rs_perf.add_time_avg(rs_stray_latency, "stray_latency", "Stray recovery state latency");
rs_perf.add_time_avg(rs_getinfo_latency, "getinfo_latency", "Getinfo recovery state latency");
rs_perf.add_time_avg(rs_getlog_latency, "getlog_latency", "Getlog recovery state latency");
rs_perf.add_time_avg(rs_waitactingchange_latency, "waitactingchange_latency", "Waitactingchange recovery state latency");
rs_perf.add_time_avg(rs_incomplete_latency, "incomplete_latency", "Incomplete recovery state latency");
rs_perf.add_time_avg(rs_down_latency, "down_latency", "Down recovery state latency");
rs_perf.add_time_avg(rs_getmissing_latency, "getmissing_latency", "Getmissing recovery state latency");
rs_perf.add_time_avg(rs_waitupthru_latency, "waitupthru_latency", "Waitupthru recovery state latency");
rs_perf.add_time_avg(rs_notrecovering_latency, "notrecovering_latency", "Notrecovering recovery state latency");
return rs_perf.create_perf_counters();
}
| 17,732 | 47.85124 | 156 | cc |
null | ceph-main/src/osd/osd_perf_counters.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include "include/common_fwd.h"
#include "common/perf_counters.h"
enum {
l_osd_first = 10000,
l_osd_op_wip,
l_osd_op,
l_osd_op_inb,
l_osd_op_outb,
l_osd_op_lat,
l_osd_op_process_lat,
l_osd_op_prepare_lat,
l_osd_op_r,
l_osd_op_r_outb,
l_osd_op_r_lat,
l_osd_op_r_lat_outb_hist,
l_osd_op_r_process_lat,
l_osd_op_r_prepare_lat,
l_osd_op_w,
l_osd_op_w_inb,
l_osd_op_w_lat,
l_osd_op_w_lat_inb_hist,
l_osd_op_w_process_lat,
l_osd_op_w_prepare_lat,
l_osd_op_rw,
l_osd_op_rw_inb,
l_osd_op_rw_outb,
l_osd_op_rw_lat,
l_osd_op_rw_lat_inb_hist,
l_osd_op_rw_lat_outb_hist,
l_osd_op_rw_process_lat,
l_osd_op_rw_prepare_lat,
l_osd_op_delayed_unreadable,
l_osd_op_delayed_degraded,
l_osd_op_before_queue_op_lat,
l_osd_op_before_dequeue_op_lat,
l_osd_sop,
l_osd_sop_inb,
l_osd_sop_lat,
l_osd_sop_w,
l_osd_sop_w_inb,
l_osd_sop_w_lat,
l_osd_sop_pull,
l_osd_sop_pull_lat,
l_osd_sop_push,
l_osd_sop_push_inb,
l_osd_sop_push_lat,
l_osd_pull,
l_osd_push,
l_osd_push_outb,
l_osd_rop,
l_osd_rbytes,
l_osd_recovery_push_queue_lat,
l_osd_recovery_push_reply_queue_lat,
l_osd_recovery_pull_queue_lat,
l_osd_recovery_backfill_queue_lat,
l_osd_recovery_backfill_remove_queue_lat,
l_osd_recovery_scan_queue_lat,
l_osd_recovery_queue_lat,
l_osd_recovery_context_queue_lat,
l_osd_loadavg,
l_osd_cached_crc,
l_osd_cached_crc_adjusted,
l_osd_missed_crc,
l_osd_pg,
l_osd_pg_primary,
l_osd_pg_replica,
l_osd_pg_stray,
l_osd_pg_removing,
l_osd_hb_to,
l_osd_map,
l_osd_mape,
l_osd_mape_dup,
l_osd_waiting_for_map,
l_osd_map_cache_hit,
l_osd_map_cache_miss,
l_osd_map_cache_miss_low,
l_osd_map_cache_miss_low_avg,
l_osd_map_bl_cache_hit,
l_osd_map_bl_cache_miss,
l_osd_stat_bytes,
l_osd_stat_bytes_used,
l_osd_stat_bytes_avail,
l_osd_copyfrom,
l_osd_tier_promote,
l_osd_tier_flush,
l_osd_tier_flush_fail,
l_osd_tier_try_flush,
l_osd_tier_try_flush_fail,
l_osd_tier_evict,
l_osd_tier_whiteout,
l_osd_tier_dirty,
l_osd_tier_clean,
l_osd_tier_delay,
l_osd_tier_proxy_read,
l_osd_tier_proxy_write,
l_osd_agent_wake,
l_osd_agent_skip,
l_osd_agent_flush,
l_osd_agent_evict,
l_osd_object_ctx_cache_hit,
l_osd_object_ctx_cache_total,
l_osd_op_cache_hit,
l_osd_tier_flush_lat,
l_osd_tier_promote_lat,
l_osd_tier_r_lat,
l_osd_pg_info,
l_osd_pg_fastinfo,
l_osd_pg_biginfo,
l_osd_last,
};
PerfCounters *build_osd_logger(CephContext *cct);
// PeeringState perf counters
enum {
rs_first = 20000,
rs_initial_latency,
rs_started_latency,
rs_reset_latency,
rs_start_latency,
rs_primary_latency,
rs_peering_latency,
rs_backfilling_latency,
rs_waitremotebackfillreserved_latency,
rs_waitlocalbackfillreserved_latency,
rs_notbackfilling_latency,
rs_repnotrecovering_latency,
rs_repwaitrecoveryreserved_latency,
rs_repwaitbackfillreserved_latency,
rs_reprecovering_latency,
rs_activating_latency,
rs_waitlocalrecoveryreserved_latency,
rs_waitremoterecoveryreserved_latency,
rs_recovering_latency,
rs_recovered_latency,
rs_clean_latency,
rs_active_latency,
rs_replicaactive_latency,
rs_stray_latency,
rs_getinfo_latency,
rs_getlog_latency,
rs_waitactingchange_latency,
rs_incomplete_latency,
rs_down_latency,
rs_getmissing_latency,
rs_waitupthru_latency,
rs_notrecovering_latency,
rs_last,
};
PerfCounters *build_recoverystate_perf(CephContext *cct);
| 3,602 | 19.355932 | 70 | h |
null | ceph-main/src/osd/osd_tracer.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "osd_tracer.h"
namespace tracing {
namespace osd {
tracing::Tracer tracer;
} // namespace osd
} // namespace tracing
| 228 | 16.615385 | 70 | cc |
null | ceph-main/src/osd/osd_tracer.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include "common/tracer.h"
namespace tracing {
namespace osd {
extern tracing::Tracer tracer;
} // namespace osd
} // namespace tracing
| 251 | 17 | 70 | h |
null | ceph-main/src/osd/osd_types.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2011 New Dream Network
* Copyright (C) 2013,2014 Cloudwatt <[email protected]>
*
* Author: Loic Dachary <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include <algorithm>
#include <list>
#include <map>
#include <ostream>
#include <sstream>
#include <set>
#include <string>
#include <utility>
#include <vector>
#include <boost/assign/list_of.hpp>
#include "include/ceph_features.h"
#include "include/encoding.h"
#include "include/stringify.h"
extern "C" {
#include "crush/hash.h"
}
#include "common/Formatter.h"
#include "common/StackStringStream.h"
#include "include/utime_fmt.h"
#include "OSDMap.h"
#include "osd_types.h"
#include "osd_types_fmt.h"
#include "os/Transaction.h"
using std::list;
using std::make_pair;
using std::map;
using std::ostream;
using std::pair;
using std::set;
using std::shared_ptr;
using std::string;
using std::unique_ptr;
using std::vector;
using ceph::bufferlist;
using ceph::decode;
using ceph::decode_nohead;
using ceph::encode;
using ceph::encode_nohead;
using ceph::Formatter;
using ceph::make_timespan;
using ceph::JSONFormatter;
using namespace std::literals;
const char *ceph_osd_flag_name(unsigned flag)
{
switch (flag) {
case CEPH_OSD_FLAG_ACK: return "ack";
case CEPH_OSD_FLAG_ONNVRAM: return "onnvram";
case CEPH_OSD_FLAG_ONDISK: return "ondisk";
case CEPH_OSD_FLAG_RETRY: return "retry";
case CEPH_OSD_FLAG_READ: return "read";
case CEPH_OSD_FLAG_WRITE: return "write";
case CEPH_OSD_FLAG_ORDERSNAP: return "ordersnap";
case CEPH_OSD_FLAG_PEERSTAT_OLD: return "peerstat_old";
case CEPH_OSD_FLAG_BALANCE_READS: return "balance_reads";
case CEPH_OSD_FLAG_PARALLELEXEC: return "parallelexec";
case CEPH_OSD_FLAG_PGOP: return "pgop";
case CEPH_OSD_FLAG_EXEC: return "exec";
case CEPH_OSD_FLAG_EXEC_PUBLIC: return "exec_public";
case CEPH_OSD_FLAG_LOCALIZE_READS: return "localize_reads";
case CEPH_OSD_FLAG_RWORDERED: return "rwordered";
case CEPH_OSD_FLAG_IGNORE_CACHE: return "ignore_cache";
case CEPH_OSD_FLAG_SKIPRWLOCKS: return "skiprwlocks";
case CEPH_OSD_FLAG_IGNORE_OVERLAY: return "ignore_overlay";
case CEPH_OSD_FLAG_FLUSH: return "flush";
case CEPH_OSD_FLAG_MAP_SNAP_CLONE: return "map_snap_clone";
case CEPH_OSD_FLAG_ENFORCE_SNAPC: return "enforce_snapc";
case CEPH_OSD_FLAG_REDIRECTED: return "redirected";
case CEPH_OSD_FLAG_KNOWN_REDIR: return "known_if_redirected";
case CEPH_OSD_FLAG_FULL_TRY: return "full_try";
case CEPH_OSD_FLAG_FULL_FORCE: return "full_force";
case CEPH_OSD_FLAG_IGNORE_REDIRECT: return "ignore_redirect";
case CEPH_OSD_FLAG_RETURNVEC: return "returnvec";
case CEPH_OSD_FLAG_SUPPORTSPOOLEIO: return "supports_pool_eio";
default: return "???";
}
}
string ceph_osd_flag_string(unsigned flags)
{
string s;
for (unsigned i=0; i<32; ++i) {
if (flags & (1u<<i)) {
if (s.length())
s += "+";
s += ceph_osd_flag_name(1u << i);
}
}
if (s.length())
return s;
return string("-");
}
const char * ceph_osd_op_flag_name(unsigned flag)
{
const char *name;
switch(flag) {
case CEPH_OSD_OP_FLAG_EXCL:
name = "excl";
break;
case CEPH_OSD_OP_FLAG_FAILOK:
name = "failok";
break;
case CEPH_OSD_OP_FLAG_FADVISE_RANDOM:
name = "fadvise_random";
break;
case CEPH_OSD_OP_FLAG_FADVISE_SEQUENTIAL:
name = "fadvise_sequential";
break;
case CEPH_OSD_OP_FLAG_FADVISE_WILLNEED:
name = "favise_willneed";
break;
case CEPH_OSD_OP_FLAG_FADVISE_DONTNEED:
name = "fadvise_dontneed";
break;
case CEPH_OSD_OP_FLAG_FADVISE_NOCACHE:
name = "fadvise_nocache";
break;
case CEPH_OSD_OP_FLAG_WITH_REFERENCE:
name = "with_reference";
break;
case CEPH_OSD_OP_FLAG_BYPASS_CLEAN_CACHE:
name = "bypass_clean_cache";
break;
default:
name = "???";
};
return name;
}
string ceph_osd_op_flag_string(unsigned flags)
{
string s;
for (unsigned i=0; i<32; ++i) {
if (flags & (1u<<i)) {
if (s.length())
s += "+";
s += ceph_osd_op_flag_name(1u << i);
}
}
if (s.length())
return s;
return string("-");
}
string ceph_osd_alloc_hint_flag_string(unsigned flags)
{
string s;
for (unsigned i=0; i<32; ++i) {
if (flags & (1u<<i)) {
if (s.length())
s += "+";
s += ceph_osd_alloc_hint_flag_name(1u << i);
}
}
if (s.length())
return s;
return string("-");
}
void pg_shard_t::encode(ceph::buffer::list &bl) const
{
ENCODE_START(1, 1, bl);
encode(osd, bl);
encode(shard, bl);
ENCODE_FINISH(bl);
}
void pg_shard_t::decode(ceph::buffer::list::const_iterator &bl)
{
DECODE_START(1, bl);
decode(osd, bl);
decode(shard, bl);
DECODE_FINISH(bl);
}
ostream &operator<<(ostream &lhs, const pg_shard_t &rhs)
{
if (rhs.is_undefined())
return lhs << "?";
if (rhs.shard == shard_id_t::NO_SHARD)
return lhs << rhs.get_osd();
return lhs << rhs.get_osd() << '(' << (unsigned)(rhs.shard) << ')';
}
void dump(Formatter* f, const osd_alerts_t& alerts)
{
for (auto& a : alerts) {
string s0 = " osd: ";
s0 += stringify(a.first);
string s;
for (auto& aa : a.second) {
s = s0;
s += " ";
s += aa.first;
s += ":";
s += aa.second;
f->dump_string("alert", s);
}
}
}
// -- osd_reqid_t --
void osd_reqid_t::dump(Formatter *f) const
{
f->dump_stream("name") << name;
f->dump_int("inc", inc);
f->dump_unsigned("tid", tid);
}
void osd_reqid_t::generate_test_instances(list<osd_reqid_t*>& o)
{
o.push_back(new osd_reqid_t);
o.push_back(new osd_reqid_t(entity_name_t::CLIENT(123), 1, 45678));
}
// -- object_locator_t --
void object_locator_t::encode(ceph::buffer::list& bl) const
{
// verify that nobody's corrupted the locator
ceph_assert(hash == -1 || key.empty());
__u8 encode_compat = 3;
ENCODE_START(6, encode_compat, bl);
encode(pool, bl);
int32_t preferred = -1; // tell old code there is no preferred osd (-1).
encode(preferred, bl);
encode(key, bl);
encode(nspace, bl);
encode(hash, bl);
if (hash != -1)
encode_compat = std::max<std::uint8_t>(encode_compat, 6); // need to interpret the hash
ENCODE_FINISH_NEW_COMPAT(bl, encode_compat);
}
void object_locator_t::decode(ceph::buffer::list::const_iterator& p)
{
DECODE_START_LEGACY_COMPAT_LEN(6, 3, 3, p);
if (struct_v < 2) {
int32_t op;
decode(op, p);
pool = op;
int16_t pref;
decode(pref, p);
} else {
decode(pool, p);
int32_t preferred;
decode(preferred, p);
}
decode(key, p);
if (struct_v >= 5)
decode(nspace, p);
if (struct_v >= 6)
decode(hash, p);
else
hash = -1;
DECODE_FINISH(p);
// verify that nobody's corrupted the locator
ceph_assert(hash == -1 || key.empty());
}
void object_locator_t::dump(Formatter *f) const
{
f->dump_int("pool", pool);
f->dump_string("key", key);
f->dump_string("namespace", nspace);
f->dump_int("hash", hash);
}
void object_locator_t::generate_test_instances(list<object_locator_t*>& o)
{
o.push_back(new object_locator_t);
o.push_back(new object_locator_t(123));
o.push_back(new object_locator_t(123, 876));
o.push_back(new object_locator_t(1, "n2"));
o.push_back(new object_locator_t(1234, "", "key"));
o.push_back(new object_locator_t(12, "n1", "key2"));
}
// -- request_redirect_t --
void request_redirect_t::encode(ceph::buffer::list& bl) const
{
ENCODE_START(1, 1, bl);
encode(redirect_locator, bl);
encode(redirect_object, bl);
// legacy of the removed osd_instructions member
encode((uint32_t)0, bl);
ENCODE_FINISH(bl);
}
void request_redirect_t::decode(ceph::buffer::list::const_iterator& bl)
{
DECODE_START(1, bl);
uint32_t legacy_osd_instructions_len;
decode(redirect_locator, bl);
decode(redirect_object, bl);
decode(legacy_osd_instructions_len, bl);
if (legacy_osd_instructions_len) {
bl += legacy_osd_instructions_len;
}
DECODE_FINISH(bl);
}
void request_redirect_t::dump(Formatter *f) const
{
f->dump_string("object", redirect_object);
f->open_object_section("locator");
redirect_locator.dump(f);
f->close_section(); // locator
}
void request_redirect_t::generate_test_instances(list<request_redirect_t*>& o)
{
object_locator_t loc(1, "redir_obj");
o.push_back(new request_redirect_t());
o.push_back(new request_redirect_t(loc, 0));
o.push_back(new request_redirect_t(loc, "redir_obj"));
o.push_back(new request_redirect_t(loc));
}
void objectstore_perf_stat_t::dump(Formatter *f) const
{
// *_ms values just for compatibility.
f->dump_float("commit_latency_ms", os_commit_latency_ns / 1000000.0);
f->dump_float("apply_latency_ms", os_apply_latency_ns / 1000000.0);
f->dump_unsigned("commit_latency_ns", os_commit_latency_ns);
f->dump_unsigned("apply_latency_ns", os_apply_latency_ns);
}
void objectstore_perf_stat_t::encode(ceph::buffer::list &bl, uint64_t features) const
{
uint8_t target_v = 2;
if (!HAVE_FEATURE(features, OS_PERF_STAT_NS)) {
target_v = 1;
}
ENCODE_START(target_v, target_v, bl);
if (target_v >= 2) {
encode(os_commit_latency_ns, bl);
encode(os_apply_latency_ns, bl);
} else {
constexpr auto NS_PER_MS = std::chrono::nanoseconds(1ms).count();
uint32_t commit_latency_ms = os_commit_latency_ns / NS_PER_MS;
uint32_t apply_latency_ms = os_apply_latency_ns / NS_PER_MS;
encode(commit_latency_ms, bl); // for compatibility with older monitor.
encode(apply_latency_ms, bl); // for compatibility with older monitor.
}
ENCODE_FINISH(bl);
}
void objectstore_perf_stat_t::decode(ceph::buffer::list::const_iterator &bl)
{
DECODE_START(2, bl);
if (struct_v >= 2) {
decode(os_commit_latency_ns, bl);
decode(os_apply_latency_ns, bl);
} else {
uint32_t commit_latency_ms;
uint32_t apply_latency_ms;
decode(commit_latency_ms, bl);
decode(apply_latency_ms, bl);
constexpr auto NS_PER_MS = std::chrono::nanoseconds(1ms).count();
os_commit_latency_ns = commit_latency_ms * NS_PER_MS;
os_apply_latency_ns = apply_latency_ms * NS_PER_MS;
}
DECODE_FINISH(bl);
}
void objectstore_perf_stat_t::generate_test_instances(std::list<objectstore_perf_stat_t*>& o)
{
o.push_back(new objectstore_perf_stat_t());
o.push_back(new objectstore_perf_stat_t());
o.back()->os_commit_latency_ns = 20000000;
o.back()->os_apply_latency_ns = 30000000;
}
// -- osd_stat_t --
void osd_stat_t::dump(Formatter *f, bool with_net) const
{
f->dump_unsigned("up_from", up_from);
f->dump_unsigned("seq", seq);
f->dump_unsigned("num_pgs", num_pgs);
f->dump_unsigned("num_osds", num_osds);
f->dump_unsigned("num_per_pool_osds", num_per_pool_osds);
f->dump_unsigned("num_per_pool_omap_osds", num_per_pool_omap_osds);
/// dump legacy stats fields to ensure backward compatibility.
f->dump_unsigned("kb", statfs.kb());
f->dump_unsigned("kb_used", statfs.kb_used_raw());
f->dump_unsigned("kb_used_data", statfs.kb_used_data());
f->dump_unsigned("kb_used_omap", statfs.kb_used_omap());
f->dump_unsigned("kb_used_meta", statfs.kb_used_internal_metadata());
f->dump_unsigned("kb_avail", statfs.kb_avail());
////////////////////
f->open_object_section("statfs");
statfs.dump(f);
f->close_section();
f->open_array_section("hb_peers");
for (auto p : hb_peers)
f->dump_int("osd", p);
f->close_section();
f->dump_int("snap_trim_queue_len", snap_trim_queue_len);
f->dump_int("num_snap_trimming", num_snap_trimming);
f->dump_int("num_shards_repaired", num_shards_repaired);
f->open_object_section("op_queue_age_hist");
op_queue_age_hist.dump(f);
f->close_section();
f->open_object_section("perf_stat");
os_perf_stat.dump(f);
f->close_section();
f->open_array_section("alerts");
::dump(f, os_alerts);
f->close_section();
if (with_net) {
dump_ping_time(f);
}
}
void osd_stat_t::dump_ping_time(Formatter *f) const
{
f->open_array_section("network_ping_times");
for (auto &i : hb_pingtime) {
f->open_object_section("entry");
f->dump_int("osd", i.first);
const time_t lu(i.second.last_update);
char buffer[26];
string lustr(ctime_r(&lu, buffer));
lustr.pop_back(); // Remove trailing \n
f->dump_string("last update", lustr);
f->open_array_section("interfaces");
f->open_object_section("interface");
f->dump_string("interface", "back");
f->open_object_section("average");
f->dump_float("1min", i.second.back_pingtime[0]/1000.0);
f->dump_float("5min", i.second.back_pingtime[1]/1000.0);
f->dump_float("15min", i.second.back_pingtime[2]/1000.0);
f->close_section(); // average
f->open_object_section("min");
f->dump_float("1min", i.second.back_min[0]/1000.0);
f->dump_float("5min", i.second.back_min[1]/1000.0);
f->dump_float("15min", i.second.back_min[2]/1000.0);
f->close_section(); // min
f->open_object_section("max");
f->dump_float("1min", i.second.back_max[0]/1000.0);
f->dump_float("5min", i.second.back_max[1]/1000.0);
f->dump_float("15min", i.second.back_max[2]/1000.0);
f->close_section(); // max
f->dump_float("last", i.second.back_last/1000.0);
f->close_section(); // interface
if (i.second.front_pingtime[0] != 0) {
f->open_object_section("interface");
f->dump_string("interface", "front");
f->open_object_section("average");
f->dump_float("1min", i.second.front_pingtime[0]/1000.0);
f->dump_float("5min", i.second.front_pingtime[1]/1000.0);
f->dump_float("15min", i.second.front_pingtime[2]/1000.0);
f->close_section(); // average
f->open_object_section("min");
f->dump_float("1min", i.second.front_min[0]/1000.0);
f->dump_float("5min", i.second.front_min[1]/1000.0);
f->dump_float("15min", i.second.front_min[2]/1000.0);
f->close_section(); // min
f->open_object_section("max");
f->dump_float("1min", i.second.front_max[0]/1000.0);
f->dump_float("5min", i.second.front_max[1]/1000.0);
f->dump_float("15min", i.second.front_max[2]/1000.0);
f->close_section(); // max
f->dump_float("last", i.second.front_last/1000.0);
f->close_section(); // interface
}
f->close_section(); // interfaces
f->close_section(); // entry
}
f->close_section(); // network_ping_time
}
void osd_stat_t::encode(ceph::buffer::list &bl, uint64_t features) const
{
ENCODE_START(14, 2, bl);
//////// for compatibility ////////
int64_t kb = statfs.kb();
int64_t kb_used = statfs.kb_used_raw();
int64_t kb_avail = statfs.kb_avail();
encode(kb, bl);
encode(kb_used, bl);
encode(kb_avail, bl);
///////////////////////////////////
encode(snap_trim_queue_len, bl);
encode(num_snap_trimming, bl);
encode(hb_peers, bl);
encode((uint32_t)0, bl);
encode(op_queue_age_hist, bl);
encode(os_perf_stat, bl, features);
encode(up_from, bl);
encode(seq, bl);
encode(num_pgs, bl);
//////// for compatibility ////////
int64_t kb_used_data = statfs.kb_used_data();
int64_t kb_used_omap = statfs.kb_used_omap();
int64_t kb_used_meta = statfs.kb_used_internal_metadata();
encode(kb_used_data, bl);
encode(kb_used_omap, bl);
encode(kb_used_meta, bl);
encode(statfs, bl);
///////////////////////////////////
encode(os_alerts, bl);
encode(num_shards_repaired, bl);
encode(num_osds, bl);
encode(num_per_pool_osds, bl);
encode(num_per_pool_omap_osds, bl);
// hb_pingtime map
encode((int)hb_pingtime.size(), bl);
for (auto i : hb_pingtime) {
encode(i.first, bl); // osd
encode(i.second.last_update, bl);
encode(i.second.back_pingtime[0], bl);
encode(i.second.back_pingtime[1], bl);
encode(i.second.back_pingtime[2], bl);
encode(i.second.back_min[0], bl);
encode(i.second.back_min[1], bl);
encode(i.second.back_min[2], bl);
encode(i.second.back_max[0], bl);
encode(i.second.back_max[1], bl);
encode(i.second.back_max[2], bl);
encode(i.second.back_last, bl);
encode(i.second.front_pingtime[0], bl);
encode(i.second.front_pingtime[1], bl);
encode(i.second.front_pingtime[2], bl);
encode(i.second.front_min[0], bl);
encode(i.second.front_min[1], bl);
encode(i.second.front_min[2], bl);
encode(i.second.front_max[0], bl);
encode(i.second.front_max[1], bl);
encode(i.second.front_max[2], bl);
encode(i.second.front_last, bl);
}
ENCODE_FINISH(bl);
}
void osd_stat_t::decode(ceph::buffer::list::const_iterator &bl)
{
int64_t kb, kb_used,kb_avail;
int64_t kb_used_data, kb_used_omap, kb_used_meta;
DECODE_START_LEGACY_COMPAT_LEN(14, 2, 2, bl);
decode(kb, bl);
decode(kb_used, bl);
decode(kb_avail, bl);
decode(snap_trim_queue_len, bl);
decode(num_snap_trimming, bl);
decode(hb_peers, bl);
vector<int> num_hb_out;
decode(num_hb_out, bl);
if (struct_v >= 3)
decode(op_queue_age_hist, bl);
if (struct_v >= 4)
decode(os_perf_stat, bl);
if (struct_v >= 6) {
decode(up_from, bl);
decode(seq, bl);
}
if (struct_v >= 7) {
decode(num_pgs, bl);
}
if (struct_v >= 8) {
decode(kb_used_data, bl);
decode(kb_used_omap, bl);
decode(kb_used_meta, bl);
} else {
kb_used_data = kb_used;
kb_used_omap = 0;
kb_used_meta = 0;
}
if (struct_v >= 9) {
decode(statfs, bl);
} else {
statfs.reset();
statfs.total = kb << 10;
statfs.available = kb_avail << 10;
// actually it's totally unexpected to have ststfs.total < statfs.available
// here but unfortunately legacy generate_test_instances produced such a
// case hence inserting some handling rather than assert
statfs.internally_reserved =
statfs.total > statfs.available ? statfs.total - statfs.available : 0;
kb_used <<= 10;
if ((int64_t)statfs.internally_reserved > kb_used) {
statfs.internally_reserved -= kb_used;
} else {
statfs.internally_reserved = 0;
}
statfs.allocated = kb_used_data << 10;
statfs.omap_allocated = kb_used_omap << 10;
statfs.internal_metadata = kb_used_meta << 10;
}
if (struct_v >= 10) {
decode(os_alerts, bl);
} else {
os_alerts.clear();
}
if (struct_v >= 11) {
decode(num_shards_repaired, bl);
} else {
num_shards_repaired = 0;
}
if (struct_v >= 12) {
decode(num_osds, bl);
decode(num_per_pool_osds, bl);
} else {
num_osds = 0;
num_per_pool_osds = 0;
}
if (struct_v >= 13) {
decode(num_per_pool_omap_osds, bl);
} else {
num_per_pool_omap_osds = 0;
}
hb_pingtime.clear();
if (struct_v >= 14) {
int count;
decode(count, bl);
for (int i = 0 ; i < count ; i++) {
int osd;
decode(osd, bl);
struct Interfaces ifs;
decode(ifs.last_update, bl);
decode(ifs.back_pingtime[0],bl);
decode(ifs.back_pingtime[1], bl);
decode(ifs.back_pingtime[2], bl);
decode(ifs.back_min[0],bl);
decode(ifs.back_min[1], bl);
decode(ifs.back_min[2], bl);
decode(ifs.back_max[0],bl);
decode(ifs.back_max[1], bl);
decode(ifs.back_max[2], bl);
decode(ifs.back_last, bl);
decode(ifs.front_pingtime[0], bl);
decode(ifs.front_pingtime[1], bl);
decode(ifs.front_pingtime[2], bl);
decode(ifs.front_min[0], bl);
decode(ifs.front_min[1], bl);
decode(ifs.front_min[2], bl);
decode(ifs.front_max[0], bl);
decode(ifs.front_max[1], bl);
decode(ifs.front_max[2], bl);
decode(ifs.front_last, bl);
hb_pingtime[osd] = ifs;
}
}
DECODE_FINISH(bl);
}
void osd_stat_t::generate_test_instances(std::list<osd_stat_t*>& o)
{
o.push_back(new osd_stat_t);
o.push_back(new osd_stat_t);
list<store_statfs_t*> ll;
store_statfs_t::generate_test_instances(ll);
o.back()->statfs = *ll.back();
o.back()->hb_peers.push_back(7);
o.back()->snap_trim_queue_len = 8;
o.back()->num_snap_trimming = 99;
o.back()->num_shards_repaired = 101;
o.back()->os_alerts[0].emplace(
"some alert", "some alert details");
o.back()->os_alerts[1].emplace(
"some alert2", "some alert2 details");
struct Interfaces gen_interfaces = {
123456789, { 1000, 900, 800 }, { 990, 890, 790 }, { 1010, 910, 810 }, 1001,
{ 1100, 1000, 900 }, { 1090, 990, 890 }, { 1110, 1010, 910 }, 1101 };
o.back()->hb_pingtime[20] = gen_interfaces;
gen_interfaces = {
987654321, { 100, 200, 300 }, { 90, 190, 290 }, { 110, 210, 310 }, 101 };
o.back()->hb_pingtime[30] = gen_interfaces;
}
// -- pg_t --
int pg_t::print(char *o, int maxlen) const
{
return snprintf(o, maxlen, "%llu.%x", (unsigned long long)pool(), ps());
}
bool pg_t::parse(const char *s)
{
uint64_t ppool;
uint32_t pseed;
int r = sscanf(s, "%llu.%x", (long long unsigned *)&ppool, &pseed);
if (r < 2)
return false;
m_pool = ppool;
m_seed = pseed;
return true;
}
bool spg_t::parse(const char *s)
{
shard = shard_id_t::NO_SHARD;
uint64_t ppool;
uint32_t pseed;
uint32_t pshard;
int r = sscanf(s, "%llu.%x", (long long unsigned *)&ppool, &pseed);
if (r < 2)
return false;
pgid.set_pool(ppool);
pgid.set_ps(pseed);
const char *p = strchr(s, 's');
if (p) {
r = sscanf(p, "s%u", &pshard);
if (r == 1) {
shard = shard_id_t(pshard);
} else {
return false;
}
}
return true;
}
char *spg_t::calc_name(char *buf, const char *suffix_backwords) const
{
while (*suffix_backwords)
*--buf = *suffix_backwords++;
if (!is_no_shard()) {
buf = ritoa<uint8_t, 10>((uint8_t)shard.id, buf);
*--buf = 's';
}
return pgid.calc_name(buf, "");
}
std::string spg_t::calc_name_sring() const
{
char buf[spg_t::calc_name_buf_size];
buf[spg_t::calc_name_buf_size - 1] = '\0';
return string{calc_name(buf + spg_t::calc_name_buf_size - 1, "")};
}
ostream& operator<<(ostream& out, const spg_t &pg)
{
char buf[spg_t::calc_name_buf_size];
buf[spg_t::calc_name_buf_size - 1] = '\0';
out << pg.calc_name(buf + spg_t::calc_name_buf_size - 1, "");
return out;
}
pg_t pg_t::get_ancestor(unsigned old_pg_num) const
{
int old_bits = cbits(old_pg_num);
int old_mask = (1 << old_bits) - 1;
pg_t ret = *this;
ret.m_seed = ceph_stable_mod(m_seed, old_pg_num, old_mask);
return ret;
}
bool pg_t::is_split(unsigned old_pg_num, unsigned new_pg_num, set<pg_t> *children) const
{
//ceph_assert(m_seed < old_pg_num);
if (m_seed >= old_pg_num) {
// degenerate case
return false;
}
if (new_pg_num <= old_pg_num)
return false;
bool split = false;
if (true) {
unsigned old_bits = cbits(old_pg_num);
unsigned old_mask = (1 << old_bits) - 1;
for (unsigned n = 1; ; n++) {
unsigned next_bit = (n << (old_bits-1));
unsigned s = next_bit | m_seed;
if (s < old_pg_num || s == m_seed)
continue;
if (s >= new_pg_num)
break;
if ((unsigned)ceph_stable_mod(s, old_pg_num, old_mask) == m_seed) {
split = true;
if (children)
children->insert(pg_t(s, m_pool));
}
}
}
if (false) {
// brute force
int old_bits = cbits(old_pg_num);
int old_mask = (1 << old_bits) - 1;
for (unsigned x = old_pg_num; x < new_pg_num; ++x) {
unsigned o = ceph_stable_mod(x, old_pg_num, old_mask);
if (o == m_seed) {
split = true;
children->insert(pg_t(x, m_pool));
}
}
}
return split;
}
unsigned pg_t::get_split_bits(unsigned pg_num) const {
if (pg_num == 1)
return 0;
ceph_assert(pg_num > 1);
// Find unique p such that pg_num \in [2^(p-1), 2^p)
unsigned p = cbits(pg_num);
ceph_assert(p); // silence coverity #751330
if ((m_seed % (1<<(p-1))) < (pg_num % (1<<(p-1))))
return p;
else
return p - 1;
}
bool pg_t::is_merge_source(
unsigned old_pg_num,
unsigned new_pg_num,
pg_t *parent) const
{
if (m_seed < old_pg_num &&
m_seed >= new_pg_num) {
if (parent) {
pg_t t = *this;
while (t.m_seed >= new_pg_num) {
t = t.get_parent();
}
*parent = t;
}
return true;
}
return false;
}
pg_t pg_t::get_parent() const
{
unsigned bits = cbits(m_seed);
ceph_assert(bits);
pg_t retval = *this;
retval.m_seed &= ~((~0)<<(bits - 1));
return retval;
}
hobject_t pg_t::get_hobj_start() const
{
return hobject_t(object_t(), string(), 0, m_seed, m_pool,
string());
}
hobject_t pg_t::get_hobj_end(unsigned pg_num) const
{
// note: this assumes a bitwise sort; with the legacy nibblewise
// sort a PG did not always cover a single contiguous range of the
// (bit-reversed) hash range.
unsigned bits = get_split_bits(pg_num);
uint64_t rev_start = hobject_t::_reverse_bits(m_seed);
uint64_t rev_end = (rev_start | (0xffffffff >> bits)) + 1;
if (rev_end >= 0x100000000) {
ceph_assert(rev_end == 0x100000000);
return hobject_t::get_max();
} else {
return hobject_t(object_t(), string(), CEPH_NOSNAP,
hobject_t::_reverse_bits(rev_end), m_pool,
string());
}
}
void pg_t::dump(Formatter *f) const
{
f->dump_unsigned("pool", m_pool);
f->dump_unsigned("seed", m_seed);
}
void pg_t::generate_test_instances(list<pg_t*>& o)
{
o.push_back(new pg_t);
o.push_back(new pg_t(1, 2));
o.push_back(new pg_t(13123, 3));
o.push_back(new pg_t(131223, 4));
}
char *pg_t::calc_name(char *buf, const char *suffix_backwords) const
{
while (*suffix_backwords)
*--buf = *suffix_backwords++;
buf = ritoa<uint32_t, 16>(m_seed, buf);
*--buf = '.';
return ritoa<uint64_t, 10>(m_pool, buf);
}
ostream& operator<<(ostream& out, const pg_t &pg)
{
char buf[pg_t::calc_name_buf_size];
buf[pg_t::calc_name_buf_size - 1] = '\0';
out << pg.calc_name(buf + pg_t::calc_name_buf_size - 1, "");
return out;
}
// -- coll_t --
void coll_t::calc_str()
{
switch (type) {
case TYPE_META:
strcpy(_str_buff, "meta");
_str = _str_buff;
break;
case TYPE_PG:
_str_buff[spg_t::calc_name_buf_size - 1] = '\0';
_str = pgid.calc_name(_str_buff + spg_t::calc_name_buf_size - 1, "daeh_");
break;
case TYPE_PG_TEMP:
_str_buff[spg_t::calc_name_buf_size - 1] = '\0';
_str = pgid.calc_name(_str_buff + spg_t::calc_name_buf_size - 1, "PMET_");
break;
default:
ceph_abort_msg("unknown collection type");
}
}
bool coll_t::parse(const std::string& s)
{
if (s == "meta") {
type = TYPE_META;
pgid = spg_t();
removal_seq = 0;
calc_str();
ceph_assert(s == _str);
return true;
}
if (s.find("_head") == s.length() - 5 &&
pgid.parse(s.substr(0, s.length() - 5))) {
type = TYPE_PG;
removal_seq = 0;
calc_str();
ceph_assert(s == _str);
return true;
}
if (s.find("_TEMP") == s.length() - 5 &&
pgid.parse(s.substr(0, s.length() - 5))) {
type = TYPE_PG_TEMP;
removal_seq = 0;
calc_str();
ceph_assert(s == _str);
return true;
}
return false;
}
void coll_t::encode(ceph::buffer::list& bl) const
{
using ceph::encode;
// when changing this, remember to update encoded_size() too.
if (is_temp()) {
// can't express this as v2...
__u8 struct_v = 3;
encode(struct_v, bl);
encode(to_str(), bl);
} else {
__u8 struct_v = 2;
encode(struct_v, bl);
encode((__u8)type, bl);
encode(pgid, bl);
snapid_t snap = CEPH_NOSNAP;
encode(snap, bl);
}
}
size_t coll_t::encoded_size() const
{
size_t r = sizeof(__u8);
if (is_temp()) {
// v3
r += sizeof(__u32);
if (_str) {
r += strlen(_str);
}
} else {
// v2
// 1. type
r += sizeof(__u8);
// 2. pgid
// - encoding header
r += sizeof(ceph_le32) + 2 * sizeof(__u8);
// - pg_t
r += sizeof(__u8) + sizeof(uint64_t) + 2 * sizeof(uint32_t);
// - shard_id_t
r += sizeof(int8_t);
// 3. snapid_t
r += sizeof(uint64_t);
}
return r;
}
void coll_t::decode(ceph::buffer::list::const_iterator& bl)
{
using ceph::decode;
__u8 struct_v;
decode(struct_v, bl);
switch (struct_v) {
case 1:
{
snapid_t snap;
decode(pgid, bl);
decode(snap, bl);
// infer the type
if (pgid == spg_t() && snap == 0) {
type = TYPE_META;
} else {
type = TYPE_PG;
}
removal_seq = 0;
}
break;
case 2:
{
__u8 _type;
snapid_t snap;
decode(_type, bl);
decode(pgid, bl);
decode(snap, bl);
type = (type_t)_type;
removal_seq = 0;
}
break;
case 3:
{
string str;
decode(str, bl);
bool ok = parse(str);
if (!ok)
throw std::domain_error(std::string("unable to parse pg ") + str);
}
break;
default:
{
CachedStackStringStream css;
*css << "coll_t::decode(): don't know how to decode version "
<< struct_v;
throw std::domain_error(css->str());
}
}
}
void coll_t::dump(Formatter *f) const
{
f->dump_unsigned("type_id", (unsigned)type);
if (type != TYPE_META)
f->dump_stream("pgid") << pgid;
f->dump_string("name", to_str());
}
void coll_t::generate_test_instances(list<coll_t*>& o)
{
o.push_back(new coll_t());
o.push_back(new coll_t(spg_t(pg_t(1, 0), shard_id_t::NO_SHARD)));
o.push_back(new coll_t(o.back()->get_temp()));
o.push_back(new coll_t(spg_t(pg_t(3, 2), shard_id_t(12))));
o.push_back(new coll_t(o.back()->get_temp()));
o.push_back(new coll_t());
}
// ---
std::string pg_vector_string(const vector<int32_t> &a)
{
CachedStackStringStream css;
*css << "[";
for (auto i = a.cbegin(); i != a.cend(); ++i) {
if (i != a.begin())
*css << ",";
if (*i != CRUSH_ITEM_NONE)
*css << *i;
else
*css << "NONE";
}
*css << "]";
return css->str();
}
std::string pg_state_string(uint64_t state)
{
CachedStackStringStream css;
if (state & PG_STATE_STALE)
*css << "stale+";
if (state & PG_STATE_CREATING)
*css << "creating+";
if (state & PG_STATE_ACTIVE)
*css << "active+";
if (state & PG_STATE_ACTIVATING)
*css << "activating+";
if (state & PG_STATE_CLEAN)
*css << "clean+";
if (state & PG_STATE_RECOVERY_WAIT)
*css << "recovery_wait+";
if (state & PG_STATE_RECOVERY_TOOFULL)
*css << "recovery_toofull+";
if (state & PG_STATE_RECOVERING)
*css << "recovering+";
if (state & PG_STATE_FORCED_RECOVERY)
*css << "forced_recovery+";
if (state & PG_STATE_DOWN)
*css << "down+";
if (state & PG_STATE_RECOVERY_UNFOUND)
*css << "recovery_unfound+";
if (state & PG_STATE_BACKFILL_UNFOUND)
*css << "backfill_unfound+";
if (state & PG_STATE_UNDERSIZED)
*css << "undersized+";
if (state & PG_STATE_DEGRADED)
*css << "degraded+";
if (state & PG_STATE_REMAPPED)
*css << "remapped+";
if (state & PG_STATE_PREMERGE)
*css << "premerge+";
if (state & PG_STATE_SCRUBBING)
*css << "scrubbing+";
if (state & PG_STATE_DEEP_SCRUB)
*css << "deep+";
if (state & PG_STATE_INCONSISTENT)
*css << "inconsistent+";
if (state & PG_STATE_PEERING)
*css << "peering+";
if (state & PG_STATE_REPAIR)
*css << "repair+";
if (state & PG_STATE_BACKFILL_WAIT)
*css << "backfill_wait+";
if (state & PG_STATE_BACKFILLING)
*css << "backfilling+";
if (state & PG_STATE_FORCED_BACKFILL)
*css << "forced_backfill+";
if (state & PG_STATE_BACKFILL_TOOFULL)
*css << "backfill_toofull+";
if (state & PG_STATE_INCOMPLETE)
*css << "incomplete+";
if (state & PG_STATE_PEERED)
*css << "peered+";
if (state & PG_STATE_SNAPTRIM)
*css << "snaptrim+";
if (state & PG_STATE_SNAPTRIM_WAIT)
*css << "snaptrim_wait+";
if (state & PG_STATE_SNAPTRIM_ERROR)
*css << "snaptrim_error+";
if (state & PG_STATE_FAILED_REPAIR)
*css << "failed_repair+";
if (state & PG_STATE_LAGGY)
*css << "laggy+";
if (state & PG_STATE_WAIT)
*css << "wait+";
auto ret = css->str();
if (ret.length() > 0)
ret.resize(ret.length() - 1);
else
ret = "unknown";
return ret;
}
std::optional<uint64_t> pg_string_state(const std::string& state)
{
std::optional<uint64_t> type;
if (state == "active")
type = PG_STATE_ACTIVE;
else if (state == "clean")
type = PG_STATE_CLEAN;
else if (state == "down")
type = PG_STATE_DOWN;
else if (state == "recovery_unfound")
type = PG_STATE_RECOVERY_UNFOUND;
else if (state == "backfill_unfound")
type = PG_STATE_BACKFILL_UNFOUND;
else if (state == "premerge")
type = PG_STATE_PREMERGE;
else if (state == "scrubbing")
type = PG_STATE_SCRUBBING;
else if (state == "degraded")
type = PG_STATE_DEGRADED;
else if (state == "inconsistent")
type = PG_STATE_INCONSISTENT;
else if (state == "peering")
type = PG_STATE_PEERING;
else if (state == "repair")
type = PG_STATE_REPAIR;
else if (state == "recovering")
type = PG_STATE_RECOVERING;
else if (state == "forced_recovery")
type = PG_STATE_FORCED_RECOVERY;
else if (state == "backfill_wait")
type = PG_STATE_BACKFILL_WAIT;
else if (state == "incomplete")
type = PG_STATE_INCOMPLETE;
else if (state == "stale")
type = PG_STATE_STALE;
else if (state == "remapped")
type = PG_STATE_REMAPPED;
else if (state == "deep")
type = PG_STATE_DEEP_SCRUB;
else if (state == "backfilling")
type = PG_STATE_BACKFILLING;
else if (state == "forced_backfill")
type = PG_STATE_FORCED_BACKFILL;
else if (state == "backfill_toofull")
type = PG_STATE_BACKFILL_TOOFULL;
else if (state == "recovery_wait")
type = PG_STATE_RECOVERY_WAIT;
else if (state == "recovery_toofull")
type = PG_STATE_RECOVERY_TOOFULL;
else if (state == "undersized")
type = PG_STATE_UNDERSIZED;
else if (state == "activating")
type = PG_STATE_ACTIVATING;
else if (state == "peered")
type = PG_STATE_PEERED;
else if (state == "snaptrim")
type = PG_STATE_SNAPTRIM;
else if (state == "snaptrim_wait")
type = PG_STATE_SNAPTRIM_WAIT;
else if (state == "snaptrim_error")
type = PG_STATE_SNAPTRIM_ERROR;
else if (state == "creating")
type = PG_STATE_CREATING;
else if (state == "failed_repair")
type = PG_STATE_FAILED_REPAIR;
else if (state == "laggy")
type = PG_STATE_LAGGY;
else if (state == "wait")
type = PG_STATE_WAIT;
else if (state == "unknown")
type = 0;
else
type = std::nullopt;
return type;
}
// -- eversion_t --
string eversion_t::get_key_name() const
{
std::string key(32, ' ');
get_key_name(&key[0]);
key.resize(31); // remove the null terminator
return key;
}
// -- pool_snap_info_t --
void pool_snap_info_t::dump(Formatter *f) const
{
f->dump_unsigned("snapid", snapid);
f->dump_stream("stamp") << stamp;
f->dump_string("name", name);
}
void pool_snap_info_t::encode(ceph::buffer::list& bl, uint64_t features) const
{
using ceph::encode;
if ((features & CEPH_FEATURE_PGPOOL3) == 0) {
__u8 struct_v = 1;
encode(struct_v, bl);
encode(snapid, bl);
encode(stamp, bl);
encode(name, bl);
return;
}
ENCODE_START(2, 2, bl);
encode(snapid, bl);
encode(stamp, bl);
encode(name, bl);
ENCODE_FINISH(bl);
}
void pool_snap_info_t::decode(ceph::buffer::list::const_iterator& bl)
{
DECODE_START_LEGACY_COMPAT_LEN(2, 2, 2, bl);
decode(snapid, bl);
decode(stamp, bl);
decode(name, bl);
DECODE_FINISH(bl);
}
void pool_snap_info_t::generate_test_instances(list<pool_snap_info_t*>& o)
{
o.push_back(new pool_snap_info_t);
o.push_back(new pool_snap_info_t);
o.back()->snapid = 1;
o.back()->stamp = utime_t(1, 2);
o.back()->name = "foo";
}
// -- pool_opts_t --
// The order of items in the list is important, therefore,
// you should always add to the end of the list when adding new options.
typedef std::map<std::string, pool_opts_t::opt_desc_t> opt_mapping_t;
static opt_mapping_t opt_mapping = boost::assign::map_list_of
("scrub_min_interval", pool_opts_t::opt_desc_t(
pool_opts_t::SCRUB_MIN_INTERVAL, pool_opts_t::DOUBLE))
("scrub_max_interval", pool_opts_t::opt_desc_t(
pool_opts_t::SCRUB_MAX_INTERVAL, pool_opts_t::DOUBLE))
("deep_scrub_interval", pool_opts_t::opt_desc_t(
pool_opts_t::DEEP_SCRUB_INTERVAL, pool_opts_t::DOUBLE))
("recovery_priority", pool_opts_t::opt_desc_t(
pool_opts_t::RECOVERY_PRIORITY, pool_opts_t::INT))
("recovery_op_priority", pool_opts_t::opt_desc_t(
pool_opts_t::RECOVERY_OP_PRIORITY, pool_opts_t::INT))
("scrub_priority", pool_opts_t::opt_desc_t(
pool_opts_t::SCRUB_PRIORITY, pool_opts_t::INT))
("compression_mode", pool_opts_t::opt_desc_t(
pool_opts_t::COMPRESSION_MODE, pool_opts_t::STR))
("compression_algorithm", pool_opts_t::opt_desc_t(
pool_opts_t::COMPRESSION_ALGORITHM, pool_opts_t::STR))
("compression_required_ratio", pool_opts_t::opt_desc_t(
pool_opts_t::COMPRESSION_REQUIRED_RATIO, pool_opts_t::DOUBLE))
("compression_max_blob_size", pool_opts_t::opt_desc_t(
pool_opts_t::COMPRESSION_MAX_BLOB_SIZE, pool_opts_t::INT))
("compression_min_blob_size", pool_opts_t::opt_desc_t(
pool_opts_t::COMPRESSION_MIN_BLOB_SIZE, pool_opts_t::INT))
("csum_type", pool_opts_t::opt_desc_t(
pool_opts_t::CSUM_TYPE, pool_opts_t::INT))
("csum_max_block", pool_opts_t::opt_desc_t(
pool_opts_t::CSUM_MAX_BLOCK, pool_opts_t::INT))
("csum_min_block", pool_opts_t::opt_desc_t(
pool_opts_t::CSUM_MIN_BLOCK, pool_opts_t::INT))
("fingerprint_algorithm", pool_opts_t::opt_desc_t(
pool_opts_t::FINGERPRINT_ALGORITHM, pool_opts_t::STR))
("pg_num_min", pool_opts_t::opt_desc_t(
pool_opts_t::PG_NUM_MIN, pool_opts_t::INT))
("target_size_bytes", pool_opts_t::opt_desc_t(
pool_opts_t::TARGET_SIZE_BYTES, pool_opts_t::INT))
("target_size_ratio", pool_opts_t::opt_desc_t(
pool_opts_t::TARGET_SIZE_RATIO, pool_opts_t::DOUBLE))
("pg_autoscale_bias", pool_opts_t::opt_desc_t(
pool_opts_t::PG_AUTOSCALE_BIAS, pool_opts_t::DOUBLE))
("read_lease_interval", pool_opts_t::opt_desc_t(
pool_opts_t::READ_LEASE_INTERVAL, pool_opts_t::DOUBLE))
("dedup_tier", pool_opts_t::opt_desc_t(
pool_opts_t::DEDUP_TIER, pool_opts_t::INT))
("dedup_chunk_algorithm", pool_opts_t::opt_desc_t(
pool_opts_t::DEDUP_CHUNK_ALGORITHM, pool_opts_t::STR))
("dedup_cdc_chunk_size", pool_opts_t::opt_desc_t(
pool_opts_t::DEDUP_CDC_CHUNK_SIZE, pool_opts_t::INT))
("pg_num_max", pool_opts_t::opt_desc_t(
pool_opts_t::PG_NUM_MAX, pool_opts_t::INT));
bool pool_opts_t::is_opt_name(const std::string& name)
{
return opt_mapping.count(name);
}
pool_opts_t::opt_desc_t pool_opts_t::get_opt_desc(const std::string& name)
{
auto i = opt_mapping.find(name);
ceph_assert(i != opt_mapping.end());
return i->second;
}
bool pool_opts_t::is_set(pool_opts_t::key_t key) const
{
return opts.count(key);
}
const pool_opts_t::value_t& pool_opts_t::get(pool_opts_t::key_t key) const
{
auto i = opts.find(key);
ceph_assert(i != opts.end());
return i->second;
}
bool pool_opts_t::unset(pool_opts_t::key_t key) {
return opts.erase(key) > 0;
}
class pool_opts_dumper_t : public boost::static_visitor<> {
public:
pool_opts_dumper_t(const std::string& name_, Formatter* f_) :
name(name_.c_str()), f(f_) {}
void operator()(std::string s) const {
f->dump_string(name, s);
}
void operator()(int64_t i) const {
f->dump_int(name, i);
}
void operator()(double d) const {
f->dump_float(name, d);
}
private:
const char* name;
Formatter* f;
};
void pool_opts_t::dump(const std::string& name, Formatter* f) const
{
const opt_desc_t& desc = get_opt_desc(name);
auto i = opts.find(desc.key);
if (i == opts.end()) {
return;
}
boost::apply_visitor(pool_opts_dumper_t(name, f), i->second);
}
void pool_opts_t::dump(Formatter* f) const
{
for (auto i = opt_mapping.cbegin(); i != opt_mapping.cend(); ++i) {
const std::string& name = i->first;
const opt_desc_t& desc = i->second;
auto j = opts.find(desc.key);
if (j == opts.end()) {
continue;
}
boost::apply_visitor(pool_opts_dumper_t(name, f), j->second);
}
}
class pool_opts_encoder_t : public boost::static_visitor<> {
public:
explicit pool_opts_encoder_t(ceph::buffer::list& bl_, uint64_t features)
: bl(bl_),
features(features) {}
void operator()(const std::string &s) const {
encode(static_cast<int32_t>(pool_opts_t::STR), bl);
encode(s, bl);
}
void operator()(int64_t i) const {
encode(static_cast<int32_t>(pool_opts_t::INT), bl);
if (HAVE_FEATURE(features, SERVER_NAUTILUS)) {
encode(i, bl);
} else {
encode(static_cast<int32_t>(i), bl);
}
}
void operator()(double d) const {
encode(static_cast<int32_t>(pool_opts_t::DOUBLE), bl);
encode(d, bl);
}
private:
ceph::buffer::list& bl;
uint64_t features;
};
void pool_opts_t::encode(ceph::buffer::list& bl, uint64_t features) const
{
unsigned v = 2;
if (!HAVE_FEATURE(features, SERVER_NAUTILUS)) {
v = 1;
}
ENCODE_START(v, 1, bl);
uint32_t n = static_cast<uint32_t>(opts.size());
encode(n, bl);
for (auto i = opts.cbegin(); i != opts.cend(); ++i) {
encode(static_cast<int32_t>(i->first), bl);
boost::apply_visitor(pool_opts_encoder_t(bl, features), i->second);
}
ENCODE_FINISH(bl);
}
void pool_opts_t::decode(ceph::buffer::list::const_iterator& bl)
{
DECODE_START(1, bl);
__u32 n;
decode(n, bl);
opts.clear();
while (n--) {
int32_t k, t;
decode(k, bl);
decode(t, bl);
if (t == STR) {
std::string s;
decode(s, bl);
opts[static_cast<key_t>(k)] = s;
} else if (t == INT) {
int64_t i;
if (struct_v >= 2) {
decode(i, bl);
} else {
int ii;
decode(ii, bl);
i = ii;
}
opts[static_cast<key_t>(k)] = i;
} else if (t == DOUBLE) {
double d;
decode(d, bl);
opts[static_cast<key_t>(k)] = d;
} else {
ceph_assert(!"invalid type");
}
}
DECODE_FINISH(bl);
}
ostream& operator<<(ostream& out, const pool_opts_t& opts)
{
for (auto i = opt_mapping.begin(); i != opt_mapping.end(); ++i) {
const std::string& name = i->first;
const pool_opts_t::opt_desc_t& desc = i->second;
auto j = opts.opts.find(desc.key);
if (j == opts.opts.end()) {
continue;
}
out << " " << name << " " << j->second;
}
return out;
}
// -- pg_pool_t --
const char *pg_pool_t::APPLICATION_NAME_CEPHFS("cephfs");
const char *pg_pool_t::APPLICATION_NAME_RBD("rbd");
const char *pg_pool_t::APPLICATION_NAME_RGW("rgw");
void pg_pool_t::dump(Formatter *f) const
{
f->dump_stream("create_time") << get_create_time();
f->dump_unsigned("flags", get_flags());
f->dump_string("flags_names", get_flags_string());
f->dump_int("type", get_type());
f->dump_int("size", get_size());
f->dump_int("min_size", get_min_size());
f->dump_int("crush_rule", get_crush_rule());
f->dump_int("peering_crush_bucket_count", peering_crush_bucket_count);
f->dump_int("peering_crush_bucket_target", peering_crush_bucket_target);
f->dump_int("peering_crush_bucket_barrier", peering_crush_bucket_barrier);
f->dump_int("peering_crush_bucket_mandatory_member", peering_crush_mandatory_member);
f->dump_int("object_hash", get_object_hash());
f->dump_string("pg_autoscale_mode",
get_pg_autoscale_mode_name(pg_autoscale_mode));
f->dump_unsigned("pg_num", get_pg_num());
f->dump_unsigned("pg_placement_num", get_pgp_num());
f->dump_unsigned("pg_placement_num_target", get_pgp_num_target());
f->dump_unsigned("pg_num_target", get_pg_num_target());
f->dump_unsigned("pg_num_pending", get_pg_num_pending());
f->dump_object("last_pg_merge_meta", last_pg_merge_meta);
f->dump_stream("last_change") << get_last_change();
f->dump_stream("last_force_op_resend") << get_last_force_op_resend();
f->dump_stream("last_force_op_resend_prenautilus")
<< get_last_force_op_resend_prenautilus();
f->dump_stream("last_force_op_resend_preluminous")
<< get_last_force_op_resend_preluminous();
f->dump_unsigned("auid", get_auid());
f->dump_string("snap_mode", is_pool_snaps_mode() ? "pool" : "selfmanaged");
f->dump_unsigned("snap_seq", get_snap_seq());
f->dump_unsigned("snap_epoch", get_snap_epoch());
f->open_array_section("pool_snaps");
for (auto p = snaps.cbegin(); p != snaps.cend(); ++p) {
f->open_object_section("pool_snap_info");
p->second.dump(f);
f->close_section();
}
f->close_section();
f->dump_stream("removed_snaps") << removed_snaps;
f->dump_unsigned("quota_max_bytes", quota_max_bytes);
f->dump_unsigned("quota_max_objects", quota_max_objects);
f->open_array_section("tiers");
for (auto p = tiers.cbegin(); p != tiers.cend(); ++p)
f->dump_unsigned("pool_id", *p);
f->close_section();
f->dump_int("tier_of", tier_of);
f->dump_int("read_tier", read_tier);
f->dump_int("write_tier", write_tier);
f->dump_string("cache_mode", get_cache_mode_name());
f->dump_unsigned("target_max_bytes", target_max_bytes);
f->dump_unsigned("target_max_objects", target_max_objects);
f->dump_unsigned("cache_target_dirty_ratio_micro",
cache_target_dirty_ratio_micro);
f->dump_unsigned("cache_target_dirty_high_ratio_micro",
cache_target_dirty_high_ratio_micro);
f->dump_unsigned("cache_target_full_ratio_micro",
cache_target_full_ratio_micro);
f->dump_unsigned("cache_min_flush_age", cache_min_flush_age);
f->dump_unsigned("cache_min_evict_age", cache_min_evict_age);
f->dump_string("erasure_code_profile", erasure_code_profile);
f->open_object_section("hit_set_params");
hit_set_params.dump(f);
f->close_section(); // hit_set_params
f->dump_unsigned("hit_set_period", hit_set_period);
f->dump_unsigned("hit_set_count", hit_set_count);
f->dump_bool("use_gmt_hitset", use_gmt_hitset);
f->dump_unsigned("min_read_recency_for_promote", min_read_recency_for_promote);
f->dump_unsigned("min_write_recency_for_promote", min_write_recency_for_promote);
f->dump_unsigned("hit_set_grade_decay_rate", hit_set_grade_decay_rate);
f->dump_unsigned("hit_set_search_last_n", hit_set_search_last_n);
f->open_array_section("grade_table");
for (unsigned i = 0; i < hit_set_count; ++i)
f->dump_unsigned("value", get_grade(i));
f->close_section();
f->dump_unsigned("stripe_width", get_stripe_width());
f->dump_unsigned("expected_num_objects", expected_num_objects);
f->dump_bool("fast_read", fast_read);
f->open_object_section("options");
opts.dump(f);
f->close_section(); // options
f->open_object_section("application_metadata");
for (auto &app_pair : application_metadata) {
f->open_object_section(app_pair.first.c_str());
for (auto &kv_pair : app_pair.second) {
f->dump_string(kv_pair.first.c_str(), kv_pair.second);
}
f->close_section(); // application
}
f->close_section(); // application_metadata
}
void pg_pool_t::convert_to_pg_shards(const vector<int> &from, set<pg_shard_t>* to) const {
for (size_t i = 0; i < from.size(); ++i) {
if (from[i] != CRUSH_ITEM_NONE) {
to->insert(
pg_shard_t(
from[i],
is_erasure() ? shard_id_t(i) : shard_id_t::NO_SHARD));
}
}
}
void pg_pool_t::calc_pg_masks()
{
pg_num_mask = (1 << cbits(pg_num-1)) - 1;
pgp_num_mask = (1 << cbits(pgp_num-1)) - 1;
}
unsigned pg_pool_t::get_pg_num_divisor(pg_t pgid) const
{
if (pg_num == pg_num_mask + 1)
return pg_num; // power-of-2 split
unsigned mask = pg_num_mask >> 1;
if ((pgid.ps() & mask) < (pg_num & mask))
return pg_num_mask + 1; // smaller bin size (already split)
else
return (pg_num_mask + 1) >> 1; // bigger bin (not yet split)
}
bool pg_pool_t::is_pending_merge(pg_t pgid, bool *target) const
{
if (pg_num_pending >= pg_num) {
return false;
}
if (pgid.ps() >= pg_num_pending && pgid.ps() < pg_num) {
if (target) {
*target = false;
}
return true;
}
for (unsigned ps = pg_num_pending; ps < pg_num; ++ps) {
if (pg_t(ps, pgid.pool()).get_parent() == pgid) {
if (target) {
*target = true;
}
return true;
}
}
return false;
}
/*
* we have two snap modes:
* - pool snaps
* - snap existence/non-existence defined by snaps[] and snap_seq
* - user managed snaps
* - existence tracked by librados user
*/
bool pg_pool_t::is_pool_snaps_mode() const
{
return has_flag(FLAG_POOL_SNAPS);
}
bool pg_pool_t::is_unmanaged_snaps_mode() const
{
return has_flag(FLAG_SELFMANAGED_SNAPS);
}
bool pg_pool_t::is_removed_snap(snapid_t s) const
{
if (is_pool_snaps_mode())
return s <= get_snap_seq() && snaps.count(s) == 0;
else
return removed_snaps.contains(s);
}
snapid_t pg_pool_t::snap_exists(std::string_view s) const
{
for (auto p = snaps.cbegin(); p != snaps.cend(); ++p)
if (p->second.name == s)
return p->second.snapid;
return 0;
}
void pg_pool_t::add_snap(const char *n, utime_t stamp)
{
ceph_assert(!is_unmanaged_snaps_mode());
flags |= FLAG_POOL_SNAPS;
snapid_t s = get_snap_seq() + 1;
snap_seq = s;
snaps[s].snapid = s;
snaps[s].name = n;
snaps[s].stamp = stamp;
}
uint64_t pg_pool_t::add_unmanaged_snap(bool preoctopus_compat)
{
ceph_assert(!is_pool_snaps_mode());
if (snap_seq == 0) {
if (preoctopus_compat) {
// kludge for pre-mimic tracking of pool vs selfmanaged snaps. after
// mimic this field is not decoded but our flag is set; pre-mimic, we
// have a non-empty removed_snaps to signifiy a non-pool-snaps pool.
removed_snaps.insert(snapid_t(1));
}
snap_seq = 1;
}
flags |= FLAG_SELFMANAGED_SNAPS;
snap_seq = snap_seq + 1;
return snap_seq;
}
void pg_pool_t::remove_snap(snapid_t s)
{
ceph_assert(snaps.count(s));
snaps.erase(s);
snap_seq = snap_seq + 1;
}
void pg_pool_t::remove_unmanaged_snap(snapid_t s, bool preoctopus_compat)
{
ceph_assert(is_unmanaged_snaps_mode());
++snap_seq;
if (preoctopus_compat) {
removed_snaps.insert(s);
// try to add in the new seq, just to try to keep the interval_set contiguous
if (!removed_snaps.contains(get_snap_seq())) {
removed_snaps.insert(get_snap_seq());
}
}
}
SnapContext pg_pool_t::get_snap_context() const
{
vector<snapid_t> s(snaps.size());
unsigned i = 0;
for (auto p = snaps.crbegin(); p != snaps.crend(); ++p)
s[i++] = p->first;
return SnapContext(get_snap_seq(), s);
}
uint32_t pg_pool_t::hash_key(const string& key, const string& ns) const
{
if (ns.empty())
return ceph_str_hash(object_hash, key.data(), key.length());
int nsl = ns.length();
int len = key.length() + nsl + 1;
char buf[len];
memcpy(&buf[0], ns.data(), nsl);
buf[nsl] = '\037';
memcpy(&buf[nsl+1], key.data(), key.length());
return ceph_str_hash(object_hash, &buf[0], len);
}
uint32_t pg_pool_t::raw_hash_to_pg(uint32_t v) const
{
return ceph_stable_mod(v, pg_num, pg_num_mask);
}
/*
* map a raw pg (with full precision ps) into an actual pg, for storage
*/
pg_t pg_pool_t::raw_pg_to_pg(pg_t pg) const
{
pg.set_ps(ceph_stable_mod(pg.ps(), pg_num, pg_num_mask));
return pg;
}
/*
* map raw pg (full precision ps) into a placement seed. include
* pool id in that value so that different pools don't use the same
* seeds.
*/
ps_t pg_pool_t::raw_pg_to_pps(pg_t pg) const
{
if (flags & FLAG_HASHPSPOOL) {
// Hash the pool id so that pool PGs do not overlap.
return
crush_hash32_2(CRUSH_HASH_RJENKINS1,
ceph_stable_mod(pg.ps(), pgp_num, pgp_num_mask),
pg.pool());
} else {
// Legacy behavior; add ps and pool together. This is not a great
// idea because the PGs from each pool will essentially overlap on
// top of each other: 0.5 == 1.4 == 2.3 == ...
return
ceph_stable_mod(pg.ps(), pgp_num, pgp_num_mask) +
pg.pool();
}
}
uint32_t pg_pool_t::get_random_pg_position(pg_t pg, uint32_t seed) const
{
uint32_t r = crush_hash32_2(CRUSH_HASH_RJENKINS1, seed, 123);
if (pg_num == pg_num_mask + 1) {
r &= ~pg_num_mask;
} else {
unsigned smaller_mask = pg_num_mask >> 1;
if ((pg.ps() & smaller_mask) < (pg_num & smaller_mask)) {
r &= ~pg_num_mask;
} else {
r &= ~smaller_mask;
}
}
r |= pg.ps();
return r;
}
void pg_pool_t::encode(ceph::buffer::list& bl, uint64_t features) const
{
using ceph::encode;
if ((features & CEPH_FEATURE_PGPOOL3) == 0) {
// this encoding matches the old struct ceph_pg_pool
__u8 struct_v = 2;
encode(struct_v, bl);
encode(type, bl);
encode(size, bl);
encode(crush_rule, bl);
encode(object_hash, bl);
encode(pg_num, bl);
encode(pgp_num, bl);
__u32 lpg_num = 0, lpgp_num = 0; // tell old code that there are no localized pgs.
encode(lpg_num, bl);
encode(lpgp_num, bl);
encode(last_change, bl);
encode(snap_seq, bl);
encode(snap_epoch, bl);
__u32 n = snaps.size();
encode(n, bl);
n = removed_snaps.num_intervals();
encode(n, bl);
encode(auid, bl);
encode_nohead(snaps, bl, features);
encode_nohead(removed_snaps, bl);
return;
}
if ((features & CEPH_FEATURE_OSDENC) == 0) {
__u8 struct_v = 4;
encode(struct_v, bl);
encode(type, bl);
encode(size, bl);
encode(crush_rule, bl);
encode(object_hash, bl);
encode(pg_num, bl);
encode(pgp_num, bl);
__u32 lpg_num = 0, lpgp_num = 0; // tell old code that there are no localized pgs.
encode(lpg_num, bl);
encode(lpgp_num, bl);
encode(last_change, bl);
encode(snap_seq, bl);
encode(snap_epoch, bl);
encode(snaps, bl, features);
encode(removed_snaps, bl);
encode(auid, bl);
encode(flags, bl);
encode((uint32_t)0, bl); // crash_replay_interval
return;
}
if ((features & CEPH_FEATURE_OSD_POOLRESEND) == 0) {
// we simply added last_force_op_resend here, which is a fully
// backward compatible change. however, encoding the same map
// differently between monitors triggers scrub noise (even though
// they are decodable without the feature), so let's be pendantic
// about it.
ENCODE_START(14, 5, bl);
encode(type, bl);
encode(size, bl);
encode(crush_rule, bl);
encode(object_hash, bl);
encode(pg_num, bl);
encode(pgp_num, bl);
__u32 lpg_num = 0, lpgp_num = 0; // tell old code that there are no localized pgs.
encode(lpg_num, bl);
encode(lpgp_num, bl);
encode(last_change, bl);
encode(snap_seq, bl);
encode(snap_epoch, bl);
encode(snaps, bl, features);
encode(removed_snaps, bl);
encode(auid, bl);
encode(flags, bl);
encode((uint32_t)0, bl); // crash_replay_interval
encode(min_size, bl);
encode(quota_max_bytes, bl);
encode(quota_max_objects, bl);
encode(tiers, bl);
encode(tier_of, bl);
__u8 c = cache_mode;
encode(c, bl);
encode(read_tier, bl);
encode(write_tier, bl);
encode(properties, bl);
encode(hit_set_params, bl);
encode(hit_set_period, bl);
encode(hit_set_count, bl);
encode(stripe_width, bl);
encode(target_max_bytes, bl);
encode(target_max_objects, bl);
encode(cache_target_dirty_ratio_micro, bl);
encode(cache_target_full_ratio_micro, bl);
encode(cache_min_flush_age, bl);
encode(cache_min_evict_age, bl);
encode(erasure_code_profile, bl);
ENCODE_FINISH(bl);
return;
}
uint8_t v = 30;
// NOTE: any new encoding dependencies must be reflected by
// SIGNIFICANT_FEATURES
if (!(features & CEPH_FEATURE_NEW_OSDOP_ENCODING)) {
// this was the first post-hammer thing we added; if it's missing, encode
// like hammer.
v = 21;
} else if (!HAVE_FEATURE(features, SERVER_LUMINOUS)) {
v = 24;
} else if (!HAVE_FEATURE(features, SERVER_MIMIC)) {
v = 26;
} else if (!HAVE_FEATURE(features, SERVER_NAUTILUS)) {
v = 27;
} else if (!is_stretch_pool()) {
v = 29;
}
ENCODE_START(v, 5, bl);
encode(type, bl);
encode(size, bl);
encode(crush_rule, bl);
encode(object_hash, bl);
encode(pg_num, bl);
encode(pgp_num, bl);
__u32 lpg_num = 0, lpgp_num = 0; // tell old code that there are no localized pgs.
encode(lpg_num, bl);
encode(lpgp_num, bl);
encode(last_change, bl);
encode(snap_seq, bl);
encode(snap_epoch, bl);
encode(snaps, bl, features);
encode(removed_snaps, bl);
encode(auid, bl);
if (v >= 27) {
encode(flags, bl);
} else {
auto tmp = flags;
tmp &= ~(FLAG_SELFMANAGED_SNAPS | FLAG_POOL_SNAPS | FLAG_CREATING);
encode(tmp, bl);
}
encode((uint32_t)0, bl); // crash_replay_interval
encode(min_size, bl);
encode(quota_max_bytes, bl);
encode(quota_max_objects, bl);
encode(tiers, bl);
encode(tier_of, bl);
__u8 c = cache_mode;
encode(c, bl);
encode(read_tier, bl);
encode(write_tier, bl);
encode(properties, bl);
encode(hit_set_params, bl);
encode(hit_set_period, bl);
encode(hit_set_count, bl);
encode(stripe_width, bl);
encode(target_max_bytes, bl);
encode(target_max_objects, bl);
encode(cache_target_dirty_ratio_micro, bl);
encode(cache_target_full_ratio_micro, bl);
encode(cache_min_flush_age, bl);
encode(cache_min_evict_age, bl);
encode(erasure_code_profile, bl);
encode(last_force_op_resend_preluminous, bl);
encode(min_read_recency_for_promote, bl);
encode(expected_num_objects, bl);
if (v >= 19) {
encode(cache_target_dirty_high_ratio_micro, bl);
}
if (v >= 20) {
encode(min_write_recency_for_promote, bl);
}
if (v >= 21) {
encode(use_gmt_hitset, bl);
}
if (v >= 22) {
encode(fast_read, bl);
}
if (v >= 23) {
encode(hit_set_grade_decay_rate, bl);
encode(hit_set_search_last_n, bl);
}
if (v >= 24) {
encode(opts, bl, features);
}
if (v >= 25) {
encode(last_force_op_resend_prenautilus, bl);
}
if (v >= 26) {
encode(application_metadata, bl);
}
if (v >= 27) {
encode(create_time, bl);
}
if (v >= 28) {
encode(pg_num_target, bl);
encode(pgp_num_target, bl);
encode(pg_num_pending, bl);
encode((epoch_t)0, bl); // pg_num_dec_last_epoch_started from 14.1.[01]
encode((epoch_t)0, bl); // pg_num_dec_last_epoch_clean from 14.1.[01]
encode(last_force_op_resend, bl);
encode(pg_autoscale_mode, bl);
}
if (v >= 29) {
encode(last_pg_merge_meta, bl);
}
if (v >= 30) {
encode(peering_crush_bucket_count, bl);
encode(peering_crush_bucket_target, bl);
encode(peering_crush_bucket_barrier, bl);
encode(peering_crush_mandatory_member, bl);
}
ENCODE_FINISH(bl);
}
void pg_pool_t::decode(ceph::buffer::list::const_iterator& bl)
{
DECODE_START_LEGACY_COMPAT_LEN(30, 5, 5, bl);
decode(type, bl);
decode(size, bl);
decode(crush_rule, bl);
decode(object_hash, bl);
decode(pg_num, bl);
decode(pgp_num, bl);
{
__u32 lpg_num, lpgp_num;
decode(lpg_num, bl);
decode(lpgp_num, bl);
}
decode(last_change, bl);
decode(snap_seq, bl);
decode(snap_epoch, bl);
if (struct_v >= 3) {
decode(snaps, bl);
decode(removed_snaps, bl);
decode(auid, bl);
} else {
__u32 n, m;
decode(n, bl);
decode(m, bl);
decode(auid, bl);
decode_nohead(n, snaps, bl);
decode_nohead(m, removed_snaps, bl);
}
if (struct_v >= 4) {
decode(flags, bl);
uint32_t crash_replay_interval;
decode(crash_replay_interval, bl);
} else {
flags = 0;
}
// upgrade path for selfmanaged vs pool snaps
if (snap_seq > 0 && (flags & (FLAG_SELFMANAGED_SNAPS|FLAG_POOL_SNAPS)) == 0) {
if (!removed_snaps.empty()) {
flags |= FLAG_SELFMANAGED_SNAPS;
} else {
flags |= FLAG_POOL_SNAPS;
}
}
if (struct_v >= 7) {
decode(min_size, bl);
} else {
min_size = size - size/2;
}
if (struct_v >= 8) {
decode(quota_max_bytes, bl);
decode(quota_max_objects, bl);
}
if (struct_v >= 9) {
decode(tiers, bl);
decode(tier_of, bl);
__u8 v;
decode(v, bl);
cache_mode = (cache_mode_t)v;
decode(read_tier, bl);
decode(write_tier, bl);
}
if (struct_v >= 10) {
decode(properties, bl);
}
if (struct_v >= 11) {
decode(hit_set_params, bl);
decode(hit_set_period, bl);
decode(hit_set_count, bl);
} else {
pg_pool_t def;
hit_set_period = def.hit_set_period;
hit_set_count = def.hit_set_count;
}
if (struct_v >= 12) {
decode(stripe_width, bl);
} else {
set_stripe_width(0);
}
if (struct_v >= 13) {
decode(target_max_bytes, bl);
decode(target_max_objects, bl);
decode(cache_target_dirty_ratio_micro, bl);
decode(cache_target_full_ratio_micro, bl);
decode(cache_min_flush_age, bl);
decode(cache_min_evict_age, bl);
} else {
target_max_bytes = 0;
target_max_objects = 0;
cache_target_dirty_ratio_micro = 0;
cache_target_full_ratio_micro = 0;
cache_min_flush_age = 0;
cache_min_evict_age = 0;
}
if (struct_v >= 14) {
decode(erasure_code_profile, bl);
}
if (struct_v >= 15) {
decode(last_force_op_resend_preluminous, bl);
} else {
last_force_op_resend_preluminous = 0;
}
if (struct_v >= 16) {
decode(min_read_recency_for_promote, bl);
} else {
min_read_recency_for_promote = 1;
}
if (struct_v >= 17) {
decode(expected_num_objects, bl);
} else {
expected_num_objects = 0;
}
if (struct_v >= 19) {
decode(cache_target_dirty_high_ratio_micro, bl);
} else {
cache_target_dirty_high_ratio_micro = cache_target_dirty_ratio_micro;
}
if (struct_v >= 20) {
decode(min_write_recency_for_promote, bl);
} else {
min_write_recency_for_promote = 1;
}
if (struct_v >= 21) {
decode(use_gmt_hitset, bl);
} else {
use_gmt_hitset = false;
}
if (struct_v >= 22) {
decode(fast_read, bl);
} else {
fast_read = false;
}
if (struct_v >= 23) {
decode(hit_set_grade_decay_rate, bl);
decode(hit_set_search_last_n, bl);
} else {
hit_set_grade_decay_rate = 0;
hit_set_search_last_n = 1;
}
if (struct_v >= 24) {
decode(opts, bl);
}
if (struct_v >= 25) {
decode(last_force_op_resend_prenautilus, bl);
} else {
last_force_op_resend_prenautilus = last_force_op_resend_preluminous;
}
if (struct_v >= 26) {
decode(application_metadata, bl);
}
if (struct_v >= 27) {
decode(create_time, bl);
}
if (struct_v >= 28) {
decode(pg_num_target, bl);
decode(pgp_num_target, bl);
decode(pg_num_pending, bl);
epoch_t old_merge_last_epoch_clean, old_merge_last_epoch_started;
decode(old_merge_last_epoch_started, bl);
decode(old_merge_last_epoch_clean, bl);
decode(last_force_op_resend, bl);
decode(pg_autoscale_mode, bl);
if (struct_v >= 29) {
decode(last_pg_merge_meta, bl);
} else {
last_pg_merge_meta.last_epoch_clean = old_merge_last_epoch_clean;
last_pg_merge_meta.last_epoch_started = old_merge_last_epoch_started;
}
} else {
pg_num_target = pg_num;
pgp_num_target = pgp_num;
pg_num_pending = pg_num;
last_force_op_resend = last_force_op_resend_prenautilus;
pg_autoscale_mode = pg_autoscale_mode_t::WARN; // default to warn on upgrade
}
if (struct_v >= 30) {
decode(peering_crush_bucket_count, bl);
decode(peering_crush_bucket_target, bl);
decode(peering_crush_bucket_barrier, bl);
decode(peering_crush_mandatory_member, bl);
}
DECODE_FINISH(bl);
calc_pg_masks();
calc_grade_table();
}
bool pg_pool_t::stretch_set_can_peer(const set<int>& want, const OSDMap& osdmap,
std::ostream * out) const
{
if (!is_stretch_pool()) return true;
const uint32_t barrier_id = peering_crush_bucket_barrier;
const uint32_t barrier_count = peering_crush_bucket_count;
set<int> ancestors;
const shared_ptr<CrushWrapper>& crush = osdmap.crush;
for (int osdid : want) {
int ancestor = crush->get_parent_of_type(osdid, barrier_id,
crush_rule);
ancestors.insert(ancestor);
}
if (ancestors.size() < barrier_count) {
if (out) {
*out << __func__ << ": not enough crush buckets with OSDs in want set "
<< want;
}
return false;
} else if (peering_crush_mandatory_member != CRUSH_ITEM_NONE &&
!ancestors.count(peering_crush_mandatory_member)) {
if (out) {
*out << __func__ << ": missing mandatory crush bucket member "
<< peering_crush_mandatory_member;
}
return false;
}
return true;
}
void pg_pool_t::generate_test_instances(list<pg_pool_t*>& o)
{
pg_pool_t a;
o.push_back(new pg_pool_t(a));
a.create_time = utime_t(4,5);
a.type = TYPE_REPLICATED;
a.size = 2;
a.crush_rule = 3;
a.object_hash = 4;
a.pg_num = 6;
a.pgp_num = 4;
a.pgp_num_target = 4;
a.pg_num_target = 5;
a.pg_num_pending = 5;
a.last_pg_merge_meta.last_epoch_started = 2;
a.last_pg_merge_meta.last_epoch_clean = 2;
a.last_change = 9;
a.last_force_op_resend = 123823;
a.last_force_op_resend_preluminous = 123824;
a.snap_seq = 10;
a.snap_epoch = 11;
a.flags = FLAG_POOL_SNAPS;
a.auid = 12;
a.quota_max_bytes = 473;
a.quota_max_objects = 474;
o.push_back(new pg_pool_t(a));
a.snaps[3].name = "asdf";
a.snaps[3].snapid = 3;
a.snaps[3].stamp = utime_t(123, 4);
a.snaps[6].name = "qwer";
a.snaps[6].snapid = 6;
a.snaps[6].stamp = utime_t(23423, 4);
o.push_back(new pg_pool_t(a));
a.flags = FLAG_SELFMANAGED_SNAPS;
a.snaps.clear();
a.removed_snaps.insert(2);
a.quota_max_bytes = 2473;
a.quota_max_objects = 4374;
a.tiers.insert(0);
a.tiers.insert(1);
a.tier_of = 2;
a.cache_mode = CACHEMODE_WRITEBACK;
a.read_tier = 1;
a.write_tier = 1;
a.hit_set_params = HitSet::Params(new BloomHitSet::Params);
a.hit_set_period = 3600;
a.hit_set_count = 8;
a.min_read_recency_for_promote = 1;
a.min_write_recency_for_promote = 1;
a.hit_set_grade_decay_rate = 50;
a.hit_set_search_last_n = 1;
a.calc_grade_table();
a.set_stripe_width(12345);
a.target_max_bytes = 1238132132;
a.target_max_objects = 1232132;
a.cache_target_dirty_ratio_micro = 187232;
a.cache_target_dirty_high_ratio_micro = 309856;
a.cache_target_full_ratio_micro = 987222;
a.cache_min_flush_age = 231;
a.cache_min_evict_age = 2321;
a.erasure_code_profile = "profile in osdmap";
a.expected_num_objects = 123456;
a.fast_read = false;
a.application_metadata = {{"rbd", {{"key", "value"}}}};
o.push_back(new pg_pool_t(a));
}
ostream& operator<<(ostream& out, const pg_pool_t& p)
{
out << p.get_type_name();
if (p.get_type_name() == "erasure") {
out << " profile " << p.erasure_code_profile;
}
out << " size " << p.get_size()
<< " min_size " << p.get_min_size()
<< " crush_rule " << p.get_crush_rule()
<< " object_hash " << p.get_object_hash_name()
<< " pg_num " << p.get_pg_num()
<< " pgp_num " << p.get_pgp_num();
if (p.get_pg_num_target() != p.get_pg_num()) {
out << " pg_num_target " << p.get_pg_num_target();
}
if (p.get_pgp_num_target() != p.get_pgp_num()) {
out << " pgp_num_target " << p.get_pgp_num_target();
}
if (p.get_pg_num_pending() != p.get_pg_num()) {
out << " pg_num_pending " << p.get_pg_num_pending();
}
if (p.pg_autoscale_mode != pg_pool_t::pg_autoscale_mode_t::UNKNOWN) {
out << " autoscale_mode " << p.get_pg_autoscale_mode_name(p.pg_autoscale_mode);
}
out << " last_change " << p.get_last_change();
if (p.get_last_force_op_resend() ||
p.get_last_force_op_resend_prenautilus() ||
p.get_last_force_op_resend_preluminous())
out << " lfor " << p.get_last_force_op_resend() << "/"
<< p.get_last_force_op_resend_prenautilus() << "/"
<< p.get_last_force_op_resend_preluminous();
if (p.get_auid())
out << " owner " << p.get_auid();
if (p.flags)
out << " flags " << p.get_flags_string();
if (p.quota_max_bytes)
out << " max_bytes " << p.quota_max_bytes;
if (p.quota_max_objects)
out << " max_objects " << p.quota_max_objects;
if (!p.tiers.empty())
out << " tiers " << p.tiers;
if (p.is_tier())
out << " tier_of " << p.tier_of;
if (p.has_read_tier())
out << " read_tier " << p.read_tier;
if (p.has_write_tier())
out << " write_tier " << p.write_tier;
if (p.cache_mode)
out << " cache_mode " << p.get_cache_mode_name();
if (p.target_max_bytes)
out << " target_bytes " << p.target_max_bytes;
if (p.target_max_objects)
out << " target_objects " << p.target_max_objects;
if (p.hit_set_params.get_type() != HitSet::TYPE_NONE) {
out << " hit_set " << p.hit_set_params
<< " " << p.hit_set_period << "s"
<< " x" << p.hit_set_count << " decay_rate "
<< p.hit_set_grade_decay_rate
<< " search_last_n " << p.hit_set_search_last_n;
}
if (p.min_read_recency_for_promote)
out << " min_read_recency_for_promote " << p.min_read_recency_for_promote;
if (p.min_write_recency_for_promote)
out << " min_write_recency_for_promote " << p.min_write_recency_for_promote;
out << " stripe_width " << p.get_stripe_width();
if (p.expected_num_objects)
out << " expected_num_objects " << p.expected_num_objects;
if (p.fast_read)
out << " fast_read " << p.fast_read;
out << p.opts;
if (!p.application_metadata.empty()) {
out << " application ";
for (auto it = p.application_metadata.begin();
it != p.application_metadata.end(); ++it) {
if (it != p.application_metadata.begin())
out << ",";
out << it->first;
}
}
return out;
}
// -- object_stat_sum_t --
void object_stat_sum_t::dump(Formatter *f) const
{
f->dump_int("num_bytes", num_bytes);
f->dump_int("num_objects", num_objects);
f->dump_int("num_object_clones", num_object_clones);
f->dump_int("num_object_copies", num_object_copies);
f->dump_int("num_objects_missing_on_primary", num_objects_missing_on_primary);
f->dump_int("num_objects_missing", num_objects_missing);
f->dump_int("num_objects_degraded", num_objects_degraded);
f->dump_int("num_objects_misplaced", num_objects_misplaced);
f->dump_int("num_objects_unfound", num_objects_unfound);
f->dump_int("num_objects_dirty", num_objects_dirty);
f->dump_int("num_whiteouts", num_whiteouts);
f->dump_int("num_read", num_rd);
f->dump_int("num_read_kb", num_rd_kb);
f->dump_int("num_write", num_wr);
f->dump_int("num_write_kb", num_wr_kb);
f->dump_int("num_scrub_errors", num_scrub_errors);
f->dump_int("num_shallow_scrub_errors", num_shallow_scrub_errors);
f->dump_int("num_deep_scrub_errors", num_deep_scrub_errors);
f->dump_int("num_objects_recovered", num_objects_recovered);
f->dump_int("num_bytes_recovered", num_bytes_recovered);
f->dump_int("num_keys_recovered", num_keys_recovered);
f->dump_int("num_objects_omap", num_objects_omap);
f->dump_int("num_objects_hit_set_archive", num_objects_hit_set_archive);
f->dump_int("num_bytes_hit_set_archive", num_bytes_hit_set_archive);
f->dump_int("num_flush", num_flush);
f->dump_int("num_flush_kb", num_flush_kb);
f->dump_int("num_evict", num_evict);
f->dump_int("num_evict_kb", num_evict_kb);
f->dump_int("num_promote", num_promote);
f->dump_int("num_flush_mode_high", num_flush_mode_high);
f->dump_int("num_flush_mode_low", num_flush_mode_low);
f->dump_int("num_evict_mode_some", num_evict_mode_some);
f->dump_int("num_evict_mode_full", num_evict_mode_full);
f->dump_int("num_objects_pinned", num_objects_pinned);
f->dump_int("num_legacy_snapsets", num_legacy_snapsets);
f->dump_int("num_large_omap_objects", num_large_omap_objects);
f->dump_int("num_objects_manifest", num_objects_manifest);
f->dump_int("num_omap_bytes", num_omap_bytes);
f->dump_int("num_omap_keys", num_omap_keys);
f->dump_int("num_objects_repaired", num_objects_repaired);
}
void object_stat_sum_t::encode(ceph::buffer::list& bl) const
{
ENCODE_START(20, 14, bl);
#if defined(CEPH_LITTLE_ENDIAN)
bl.append((char *)(&num_bytes), sizeof(object_stat_sum_t));
#else
encode(num_bytes, bl);
encode(num_objects, bl);
encode(num_object_clones, bl);
encode(num_object_copies, bl);
encode(num_objects_missing_on_primary, bl);
encode(num_objects_degraded, bl);
encode(num_objects_unfound, bl);
encode(num_rd, bl);
encode(num_rd_kb, bl);
encode(num_wr, bl);
encode(num_wr_kb, bl);
encode(num_scrub_errors, bl);
encode(num_objects_recovered, bl);
encode(num_bytes_recovered, bl);
encode(num_keys_recovered, bl);
encode(num_shallow_scrub_errors, bl);
encode(num_deep_scrub_errors, bl);
encode(num_objects_dirty, bl);
encode(num_whiteouts, bl);
encode(num_objects_omap, bl);
encode(num_objects_hit_set_archive, bl);
encode(num_objects_misplaced, bl);
encode(num_bytes_hit_set_archive, bl);
encode(num_flush, bl);
encode(num_flush_kb, bl);
encode(num_evict, bl);
encode(num_evict_kb, bl);
encode(num_promote, bl);
encode(num_flush_mode_high, bl);
encode(num_flush_mode_low, bl);
encode(num_evict_mode_some, bl);
encode(num_evict_mode_full, bl);
encode(num_objects_pinned, bl);
encode(num_objects_missing, bl);
encode(num_legacy_snapsets, bl);
encode(num_large_omap_objects, bl);
encode(num_objects_manifest, bl);
encode(num_omap_bytes, bl);
encode(num_omap_keys, bl);
encode(num_objects_repaired, bl);
#endif
ENCODE_FINISH(bl);
}
void object_stat_sum_t::decode(ceph::buffer::list::const_iterator& bl)
{
bool decode_finish = false;
static const int STAT_SUM_DECODE_VERSION = 20;
DECODE_START(STAT_SUM_DECODE_VERSION, bl);
#if defined(CEPH_LITTLE_ENDIAN)
if (struct_v == STAT_SUM_DECODE_VERSION) {
bl.copy(sizeof(object_stat_sum_t), (char*)(&num_bytes));
decode_finish = true;
}
#endif
if (!decode_finish) {
decode(num_bytes, bl);
decode(num_objects, bl);
decode(num_object_clones, bl);
decode(num_object_copies, bl);
decode(num_objects_missing_on_primary, bl);
decode(num_objects_degraded, bl);
decode(num_objects_unfound, bl);
decode(num_rd, bl);
decode(num_rd_kb, bl);
decode(num_wr, bl);
decode(num_wr_kb, bl);
decode(num_scrub_errors, bl);
decode(num_objects_recovered, bl);
decode(num_bytes_recovered, bl);
decode(num_keys_recovered, bl);
decode(num_shallow_scrub_errors, bl);
decode(num_deep_scrub_errors, bl);
decode(num_objects_dirty, bl);
decode(num_whiteouts, bl);
decode(num_objects_omap, bl);
decode(num_objects_hit_set_archive, bl);
decode(num_objects_misplaced, bl);
decode(num_bytes_hit_set_archive, bl);
decode(num_flush, bl);
decode(num_flush_kb, bl);
decode(num_evict, bl);
decode(num_evict_kb, bl);
decode(num_promote, bl);
decode(num_flush_mode_high, bl);
decode(num_flush_mode_low, bl);
decode(num_evict_mode_some, bl);
decode(num_evict_mode_full, bl);
decode(num_objects_pinned, bl);
decode(num_objects_missing, bl);
if (struct_v >= 16) {
decode(num_legacy_snapsets, bl);
} else {
num_legacy_snapsets = num_object_clones; // upper bound
}
if (struct_v >= 17) {
decode(num_large_omap_objects, bl);
}
if (struct_v >= 18) {
decode(num_objects_manifest, bl);
}
if (struct_v >= 19) {
decode(num_omap_bytes, bl);
decode(num_omap_keys, bl);
}
if (struct_v >= 20) {
decode(num_objects_repaired, bl);
}
}
DECODE_FINISH(bl);
}
void object_stat_sum_t::generate_test_instances(list<object_stat_sum_t*>& o)
{
object_stat_sum_t a;
a.num_bytes = 1;
a.num_objects = 3;
a.num_object_clones = 4;
a.num_object_copies = 5;
a.num_objects_missing_on_primary = 6;
a.num_objects_missing = 123;
a.num_objects_degraded = 7;
a.num_objects_unfound = 8;
a.num_rd = 9; a.num_rd_kb = 10;
a.num_wr = 11; a.num_wr_kb = 12;
a.num_objects_recovered = 14;
a.num_bytes_recovered = 15;
a.num_keys_recovered = 16;
a.num_deep_scrub_errors = 17;
a.num_shallow_scrub_errors = 18;
a.num_scrub_errors = a.num_deep_scrub_errors + a.num_shallow_scrub_errors;
a.num_objects_dirty = 21;
a.num_whiteouts = 22;
a.num_objects_misplaced = 1232;
a.num_objects_hit_set_archive = 2;
a.num_bytes_hit_set_archive = 27;
a.num_flush = 5;
a.num_flush_kb = 6;
a.num_evict = 7;
a.num_evict_kb = 8;
a.num_promote = 9;
a.num_flush_mode_high = 0;
a.num_flush_mode_low = 1;
a.num_evict_mode_some = 1;
a.num_evict_mode_full = 0;
a.num_objects_pinned = 20;
a.num_large_omap_objects = 5;
a.num_objects_manifest = 2;
a.num_omap_bytes = 20000;
a.num_omap_keys = 200;
a.num_objects_repaired = 300;
o.push_back(new object_stat_sum_t(a));
}
void object_stat_sum_t::add(const object_stat_sum_t& o)
{
num_bytes += o.num_bytes;
num_objects += o.num_objects;
num_object_clones += o.num_object_clones;
num_object_copies += o.num_object_copies;
num_objects_missing_on_primary += o.num_objects_missing_on_primary;
num_objects_missing += o.num_objects_missing;
num_objects_degraded += o.num_objects_degraded;
num_objects_misplaced += o.num_objects_misplaced;
num_rd += o.num_rd;
num_rd_kb += o.num_rd_kb;
num_wr += o.num_wr;
num_wr_kb += o.num_wr_kb;
num_objects_unfound += o.num_objects_unfound;
num_scrub_errors += o.num_scrub_errors;
num_shallow_scrub_errors += o.num_shallow_scrub_errors;
num_deep_scrub_errors += o.num_deep_scrub_errors;
num_objects_recovered += o.num_objects_recovered;
num_bytes_recovered += o.num_bytes_recovered;
num_keys_recovered += o.num_keys_recovered;
num_objects_dirty += o.num_objects_dirty;
num_whiteouts += o.num_whiteouts;
num_objects_omap += o.num_objects_omap;
num_objects_hit_set_archive += o.num_objects_hit_set_archive;
num_bytes_hit_set_archive += o.num_bytes_hit_set_archive;
num_flush += o.num_flush;
num_flush_kb += o.num_flush_kb;
num_evict += o.num_evict;
num_evict_kb += o.num_evict_kb;
num_promote += o.num_promote;
num_flush_mode_high += o.num_flush_mode_high;
num_flush_mode_low += o.num_flush_mode_low;
num_evict_mode_some += o.num_evict_mode_some;
num_evict_mode_full += o.num_evict_mode_full;
num_objects_pinned += o.num_objects_pinned;
num_legacy_snapsets += o.num_legacy_snapsets;
num_large_omap_objects += o.num_large_omap_objects;
num_objects_manifest += o.num_objects_manifest;
num_omap_bytes += o.num_omap_bytes;
num_omap_keys += o.num_omap_keys;
num_objects_repaired += o.num_objects_repaired;
}
void object_stat_sum_t::sub(const object_stat_sum_t& o)
{
num_bytes -= o.num_bytes;
num_objects -= o.num_objects;
num_object_clones -= o.num_object_clones;
num_object_copies -= o.num_object_copies;
num_objects_missing_on_primary -= o.num_objects_missing_on_primary;
num_objects_missing -= o.num_objects_missing;
num_objects_degraded -= o.num_objects_degraded;
num_objects_misplaced -= o.num_objects_misplaced;
num_rd -= o.num_rd;
num_rd_kb -= o.num_rd_kb;
num_wr -= o.num_wr;
num_wr_kb -= o.num_wr_kb;
num_objects_unfound -= o.num_objects_unfound;
num_scrub_errors -= o.num_scrub_errors;
num_shallow_scrub_errors -= o.num_shallow_scrub_errors;
num_deep_scrub_errors -= o.num_deep_scrub_errors;
num_objects_recovered -= o.num_objects_recovered;
num_bytes_recovered -= o.num_bytes_recovered;
num_keys_recovered -= o.num_keys_recovered;
num_objects_dirty -= o.num_objects_dirty;
num_whiteouts -= o.num_whiteouts;
num_objects_omap -= o.num_objects_omap;
num_objects_hit_set_archive -= o.num_objects_hit_set_archive;
num_bytes_hit_set_archive -= o.num_bytes_hit_set_archive;
num_flush -= o.num_flush;
num_flush_kb -= o.num_flush_kb;
num_evict -= o.num_evict;
num_evict_kb -= o.num_evict_kb;
num_promote -= o.num_promote;
num_flush_mode_high -= o.num_flush_mode_high;
num_flush_mode_low -= o.num_flush_mode_low;
num_evict_mode_some -= o.num_evict_mode_some;
num_evict_mode_full -= o.num_evict_mode_full;
num_objects_pinned -= o.num_objects_pinned;
num_legacy_snapsets -= o.num_legacy_snapsets;
num_large_omap_objects -= o.num_large_omap_objects;
num_objects_manifest -= o.num_objects_manifest;
num_omap_bytes -= o.num_omap_bytes;
num_omap_keys -= o.num_omap_keys;
num_objects_repaired -= o.num_objects_repaired;
}
bool operator==(const object_stat_sum_t& l, const object_stat_sum_t& r)
{
return
l.num_bytes == r.num_bytes &&
l.num_objects == r.num_objects &&
l.num_object_clones == r.num_object_clones &&
l.num_object_copies == r.num_object_copies &&
l.num_objects_missing_on_primary == r.num_objects_missing_on_primary &&
l.num_objects_missing == r.num_objects_missing &&
l.num_objects_degraded == r.num_objects_degraded &&
l.num_objects_misplaced == r.num_objects_misplaced &&
l.num_objects_unfound == r.num_objects_unfound &&
l.num_rd == r.num_rd &&
l.num_rd_kb == r.num_rd_kb &&
l.num_wr == r.num_wr &&
l.num_wr_kb == r.num_wr_kb &&
l.num_scrub_errors == r.num_scrub_errors &&
l.num_shallow_scrub_errors == r.num_shallow_scrub_errors &&
l.num_deep_scrub_errors == r.num_deep_scrub_errors &&
l.num_objects_recovered == r.num_objects_recovered &&
l.num_bytes_recovered == r.num_bytes_recovered &&
l.num_keys_recovered == r.num_keys_recovered &&
l.num_objects_dirty == r.num_objects_dirty &&
l.num_whiteouts == r.num_whiteouts &&
l.num_objects_omap == r.num_objects_omap &&
l.num_objects_hit_set_archive == r.num_objects_hit_set_archive &&
l.num_bytes_hit_set_archive == r.num_bytes_hit_set_archive &&
l.num_flush == r.num_flush &&
l.num_flush_kb == r.num_flush_kb &&
l.num_evict == r.num_evict &&
l.num_evict_kb == r.num_evict_kb &&
l.num_promote == r.num_promote &&
l.num_flush_mode_high == r.num_flush_mode_high &&
l.num_flush_mode_low == r.num_flush_mode_low &&
l.num_evict_mode_some == r.num_evict_mode_some &&
l.num_evict_mode_full == r.num_evict_mode_full &&
l.num_objects_pinned == r.num_objects_pinned &&
l.num_legacy_snapsets == r.num_legacy_snapsets &&
l.num_large_omap_objects == r.num_large_omap_objects &&
l.num_objects_manifest == r.num_objects_manifest &&
l.num_omap_bytes == r.num_omap_bytes &&
l.num_omap_keys == r.num_omap_keys &&
l.num_objects_repaired == r.num_objects_repaired;
}
// -- object_stat_collection_t --
void object_stat_collection_t::dump(Formatter *f) const
{
f->open_object_section("stat_sum");
sum.dump(f);
f->close_section();
}
void object_stat_collection_t::encode(ceph::buffer::list& bl) const
{
ENCODE_START(2, 2, bl);
encode(sum, bl);
encode((__u32)0, bl);
ENCODE_FINISH(bl);
}
void object_stat_collection_t::decode(ceph::buffer::list::const_iterator& bl)
{
DECODE_START_LEGACY_COMPAT_LEN(2, 2, 2, bl);
decode(sum, bl);
{
map<string,object_stat_sum_t> cat_sum;
decode(cat_sum, bl);
}
DECODE_FINISH(bl);
}
void object_stat_collection_t::generate_test_instances(list<object_stat_collection_t*>& o)
{
object_stat_collection_t a;
o.push_back(new object_stat_collection_t(a));
list<object_stat_sum_t*> l;
object_stat_sum_t::generate_test_instances(l);
for (auto p = l.begin(); p != l.end(); ++p) {
a.add(**p);
o.push_back(new object_stat_collection_t(a));
}
}
// -- pg_stat_t --
bool pg_stat_t::is_acting_osd(int32_t osd, bool primary) const
{
if (primary && osd == acting_primary) {
return true;
} else if (!primary) {
for(auto it = acting.cbegin(); it != acting.cend(); ++it)
{
if (*it == osd)
return true;
}
}
return false;
}
void pg_stat_t::dump(Formatter *f) const
{
f->dump_stream("version") << version;
f->dump_unsigned("reported_seq", reported_seq);
f->dump_unsigned("reported_epoch", reported_epoch);
f->dump_string("state", pg_state_string(state));
f->dump_stream("last_fresh") << last_fresh;
f->dump_stream("last_change") << last_change;
f->dump_stream("last_active") << last_active;
f->dump_stream("last_peered") << last_peered;
f->dump_stream("last_clean") << last_clean;
f->dump_stream("last_became_active") << last_became_active;
f->dump_stream("last_became_peered") << last_became_peered;
f->dump_stream("last_unstale") << last_unstale;
f->dump_stream("last_undegraded") << last_undegraded;
f->dump_stream("last_fullsized") << last_fullsized;
f->dump_unsigned("mapping_epoch", mapping_epoch);
f->dump_stream("log_start") << log_start;
f->dump_stream("ondisk_log_start") << ondisk_log_start;
f->dump_unsigned("created", created);
f->dump_unsigned("last_epoch_clean", last_epoch_clean);
f->dump_stream("parent") << parent;
f->dump_unsigned("parent_split_bits", parent_split_bits);
f->dump_stream("last_scrub") << last_scrub;
f->dump_stream("last_scrub_stamp") << last_scrub_stamp;
f->dump_stream("last_deep_scrub") << last_deep_scrub;
f->dump_stream("last_deep_scrub_stamp") << last_deep_scrub_stamp;
f->dump_stream("last_clean_scrub_stamp") << last_clean_scrub_stamp;
f->dump_int("objects_scrubbed", objects_scrubbed);
f->dump_int("log_size", log_size);
f->dump_int("log_dups_size", log_dups_size);
f->dump_int("ondisk_log_size", ondisk_log_size);
f->dump_bool("stats_invalid", stats_invalid);
f->dump_bool("dirty_stats_invalid", dirty_stats_invalid);
f->dump_bool("omap_stats_invalid", omap_stats_invalid);
f->dump_bool("hitset_stats_invalid", hitset_stats_invalid);
f->dump_bool("hitset_bytes_stats_invalid", hitset_bytes_stats_invalid);
f->dump_bool("pin_stats_invalid", pin_stats_invalid);
f->dump_bool("manifest_stats_invalid", manifest_stats_invalid);
f->dump_unsigned("snaptrimq_len", snaptrimq_len);
f->dump_int("last_scrub_duration", last_scrub_duration);
f->dump_string("scrub_schedule", dump_scrub_schedule());
f->dump_float("scrub_duration", scrub_duration);
f->dump_int("objects_trimmed", objects_trimmed);
f->dump_float("snaptrim_duration", snaptrim_duration);
stats.dump(f);
f->open_array_section("up");
for (auto p = up.cbegin(); p != up.cend(); ++p)
f->dump_int("osd", *p);
f->close_section();
f->open_array_section("acting");
for (auto p = acting.cbegin(); p != acting.cend(); ++p)
f->dump_int("osd", *p);
f->close_section();
f->open_array_section("avail_no_missing");
for (auto p = avail_no_missing.cbegin(); p != avail_no_missing.cend(); ++p)
f->dump_stream("shard") << *p;
f->close_section();
f->open_array_section("object_location_counts");
for (auto p = object_location_counts.cbegin(); p != object_location_counts.cend(); ++p) {
f->open_object_section("entry");
f->dump_stream("shards") << p->first;
f->dump_int("objects", p->second);
f->close_section();
}
f->close_section();
f->open_array_section("blocked_by");
for (auto p = blocked_by.cbegin(); p != blocked_by.cend(); ++p)
f->dump_int("osd", *p);
f->close_section();
f->dump_int("up_primary", up_primary);
f->dump_int("acting_primary", acting_primary);
f->open_array_section("purged_snaps");
for (auto i = purged_snaps.begin(); i != purged_snaps.end(); ++i) {
f->open_object_section("interval");
f->dump_stream("start") << i.get_start();
f->dump_stream("length") << i.get_len();
f->close_section();
}
f->close_section();
}
void pg_stat_t::dump_brief(Formatter *f) const
{
f->dump_string("state", pg_state_string(state));
f->open_array_section("up");
for (auto p = up.cbegin(); p != up.cend(); ++p)
f->dump_int("osd", *p);
f->close_section();
f->open_array_section("acting");
for (auto p = acting.cbegin(); p != acting.cend(); ++p)
f->dump_int("osd", *p);
f->close_section();
f->dump_int("up_primary", up_primary);
f->dump_int("acting_primary", acting_primary);
}
std::string pg_stat_t::dump_scrub_schedule() const
{
if (scrub_sched_status.m_is_active) {
// are we blocked (in fact, stuck) on some locked object?
if (scrub_sched_status.m_sched_status == pg_scrub_sched_status_t::blocked) {
return fmt::format(
"Blocked! locked objects (for {}s)",
scrub_sched_status.m_duration_seconds);
} else {
return fmt::format(
"{}scrubbing for {}s",
((scrub_sched_status.m_is_deep == scrub_level_t::deep) ? "deep " : ""),
scrub_sched_status.m_duration_seconds);
}
}
switch (scrub_sched_status.m_sched_status) {
case pg_scrub_sched_status_t::unknown:
// no reported scrub schedule yet
return "--"s;
case pg_scrub_sched_status_t::not_queued:
return "no scrub is scheduled"s;
case pg_scrub_sched_status_t::scheduled:
return fmt::format(
"{} {}scrub scheduled @ {}",
(scrub_sched_status.m_is_periodic ? "periodic" : "user requested"),
((scrub_sched_status.m_is_deep == scrub_level_t::deep) ? "deep " : ""),
scrub_sched_status.m_scheduled_at);
case pg_scrub_sched_status_t::queued:
return fmt::format(
"queued for {}scrub",
((scrub_sched_status.m_is_deep == scrub_level_t::deep) ? "deep " : ""));
default:
// a bug!
return "SCRUB STATE MISMATCH!"s;
}
}
bool operator==(const pg_scrubbing_status_t& l, const pg_scrubbing_status_t& r)
{
return
l.m_sched_status == r.m_sched_status &&
l.m_scheduled_at == r.m_scheduled_at &&
l.m_duration_seconds == r.m_duration_seconds &&
l.m_is_active == r.m_is_active &&
l.m_is_deep == r.m_is_deep &&
l.m_is_periodic == r.m_is_periodic;
}
void pg_stat_t::encode(ceph::buffer::list &bl) const
{
ENCODE_START(29, 22, bl);
encode(version, bl);
encode(reported_seq, bl);
encode(reported_epoch, bl);
encode((__u32)state, bl); // for older peers
encode(log_start, bl);
encode(ondisk_log_start, bl);
encode(created, bl);
encode(last_epoch_clean, bl);
encode(parent, bl);
encode(parent_split_bits, bl);
encode(last_scrub, bl);
encode(last_scrub_stamp, bl);
encode(stats, bl);
encode(log_size, bl);
encode(ondisk_log_size, bl);
encode(up, bl);
encode(acting, bl);
encode(last_fresh, bl);
encode(last_change, bl);
encode(last_active, bl);
encode(last_clean, bl);
encode(last_unstale, bl);
encode(mapping_epoch, bl);
encode(last_deep_scrub, bl);
encode(last_deep_scrub_stamp, bl);
encode(stats_invalid, bl);
encode(last_clean_scrub_stamp, bl);
encode(last_became_active, bl);
encode(dirty_stats_invalid, bl);
encode(up_primary, bl);
encode(acting_primary, bl);
encode(omap_stats_invalid, bl);
encode(hitset_stats_invalid, bl);
encode(blocked_by, bl);
encode(last_undegraded, bl);
encode(last_fullsized, bl);
encode(hitset_bytes_stats_invalid, bl);
encode(last_peered, bl);
encode(last_became_peered, bl);
encode(pin_stats_invalid, bl);
encode(snaptrimq_len, bl);
__u32 top_state = (state >> 32);
encode(top_state, bl);
encode(purged_snaps, bl);
encode(manifest_stats_invalid, bl);
encode(avail_no_missing, bl);
encode(object_location_counts, bl);
encode(last_scrub_duration, bl);
encode(scrub_sched_status.m_scheduled_at, bl);
encode(scrub_sched_status.m_duration_seconds, bl);
encode((__u16)scrub_sched_status.m_sched_status, bl);
encode(scrub_sched_status.m_is_active, bl);
encode((scrub_sched_status.m_is_deep==scrub_level_t::deep), bl);
encode(scrub_sched_status.m_is_periodic, bl);
encode(objects_scrubbed, bl);
encode(scrub_duration, bl);
encode(objects_trimmed, bl);
encode(snaptrim_duration, bl);
encode(log_dups_size, bl);
ENCODE_FINISH(bl);
}
void pg_stat_t::decode(ceph::buffer::list::const_iterator &bl)
{
bool tmp;
uint32_t old_state;
DECODE_START(29, bl);
decode(version, bl);
decode(reported_seq, bl);
decode(reported_epoch, bl);
decode(old_state, bl);
decode(log_start, bl);
decode(ondisk_log_start, bl);
decode(created, bl);
decode(last_epoch_clean, bl);
decode(parent, bl);
decode(parent_split_bits, bl);
decode(last_scrub, bl);
decode(last_scrub_stamp, bl);
decode(stats, bl);
decode(log_size, bl);
decode(ondisk_log_size, bl);
decode(up, bl);
decode(acting, bl);
decode(last_fresh, bl);
decode(last_change, bl);
decode(last_active, bl);
decode(last_clean, bl);
decode(last_unstale, bl);
decode(mapping_epoch, bl);
decode(last_deep_scrub, bl);
decode(last_deep_scrub_stamp, bl);
decode(tmp, bl);
stats_invalid = tmp;
decode(last_clean_scrub_stamp, bl);
decode(last_became_active, bl);
decode(tmp, bl);
dirty_stats_invalid = tmp;
decode(up_primary, bl);
decode(acting_primary, bl);
decode(tmp, bl);
omap_stats_invalid = tmp;
decode(tmp, bl);
hitset_stats_invalid = tmp;
decode(blocked_by, bl);
decode(last_undegraded, bl);
decode(last_fullsized, bl);
decode(tmp, bl);
hitset_bytes_stats_invalid = tmp;
decode(last_peered, bl);
decode(last_became_peered, bl);
decode(tmp, bl);
pin_stats_invalid = tmp;
if (struct_v >= 23) {
decode(snaptrimq_len, bl);
if (struct_v >= 24) {
__u32 top_state;
decode(top_state, bl);
state = (uint64_t)old_state | ((uint64_t)top_state << 32);
decode(purged_snaps, bl);
} else {
state = old_state;
}
if (struct_v >= 25) {
decode(tmp, bl);
manifest_stats_invalid = tmp;
} else {
manifest_stats_invalid = true;
}
if (struct_v >= 26) {
decode(avail_no_missing, bl);
decode(object_location_counts, bl);
}
if (struct_v >= 27) {
decode(last_scrub_duration, bl);
decode(scrub_sched_status.m_scheduled_at, bl);
decode(scrub_sched_status.m_duration_seconds, bl);
__u16 scrub_sched_as_u16;
decode(scrub_sched_as_u16, bl);
scrub_sched_status.m_sched_status = (pg_scrub_sched_status_t)(scrub_sched_as_u16);
decode(tmp, bl);
scrub_sched_status.m_is_active = tmp;
decode(tmp, bl);
scrub_sched_status.m_is_deep = tmp ? scrub_level_t::deep : scrub_level_t::shallow;
decode(tmp, bl);
scrub_sched_status.m_is_periodic = tmp;
decode(objects_scrubbed, bl);
}
if (struct_v >= 28) {
decode(scrub_duration, bl);
decode(objects_trimmed, bl);
decode(snaptrim_duration, bl);
}
if (struct_v >= 29) {
decode(log_dups_size, bl);
}
}
DECODE_FINISH(bl);
}
void pg_stat_t::generate_test_instances(list<pg_stat_t*>& o)
{
pg_stat_t a;
o.push_back(new pg_stat_t(a));
a.version = eversion_t(1, 3);
a.reported_epoch = 1;
a.reported_seq = 2;
a.state = 123;
a.mapping_epoch = 998;
a.last_fresh = utime_t(1002, 1);
a.last_change = utime_t(1002, 2);
a.last_active = utime_t(1002, 3);
a.last_clean = utime_t(1002, 4);
a.last_unstale = utime_t(1002, 5);
a.last_undegraded = utime_t(1002, 7);
a.last_fullsized = utime_t(1002, 8);
a.log_start = eversion_t(1, 4);
a.ondisk_log_start = eversion_t(1, 5);
a.created = 6;
a.last_epoch_clean = 7;
a.parent = pg_t(1, 2);
a.parent_split_bits = 12;
a.last_scrub = eversion_t(9, 10);
a.last_scrub_stamp = utime_t(11, 12);
a.last_deep_scrub = eversion_t(13, 14);
a.last_deep_scrub_stamp = utime_t(15, 16);
a.last_clean_scrub_stamp = utime_t(17, 18);
a.last_scrub_duration = 3617;
a.scrub_duration = 0.003;
a.snaptrimq_len = 1048576;
a.objects_scrubbed = 0;
a.objects_trimmed = 0;
a.snaptrim_duration = 0.123;
list<object_stat_collection_t*> l;
object_stat_collection_t::generate_test_instances(l);
a.stats = *l.back();
a.log_size = 99;
a.ondisk_log_size = 88;
a.up.push_back(123);
a.up_primary = 123;
a.acting.push_back(456);
a.avail_no_missing.push_back(pg_shard_t(456, shard_id_t::NO_SHARD));
set<pg_shard_t> sset = { pg_shard_t(0), pg_shard_t(1) };
a.object_location_counts.insert(make_pair(sset, 10));
sset.insert(pg_shard_t(2));
a.object_location_counts.insert(make_pair(sset, 5));
a.acting_primary = 456;
o.push_back(new pg_stat_t(a));
a.up.push_back(124);
a.up_primary = 124;
a.acting.push_back(124);
a.acting_primary = 124;
a.blocked_by.push_back(155);
a.blocked_by.push_back(156);
o.push_back(new pg_stat_t(a));
}
bool operator==(const pg_stat_t& l, const pg_stat_t& r)
{
return
l.version == r.version &&
l.reported_seq == r.reported_seq &&
l.reported_epoch == r.reported_epoch &&
l.state == r.state &&
l.last_fresh == r.last_fresh &&
l.last_change == r.last_change &&
l.last_active == r.last_active &&
l.last_peered == r.last_peered &&
l.last_clean == r.last_clean &&
l.last_unstale == r.last_unstale &&
l.last_undegraded == r.last_undegraded &&
l.last_fullsized == r.last_fullsized &&
l.log_start == r.log_start &&
l.ondisk_log_start == r.ondisk_log_start &&
l.created == r.created &&
l.last_epoch_clean == r.last_epoch_clean &&
l.parent == r.parent &&
l.parent_split_bits == r.parent_split_bits &&
l.last_scrub == r.last_scrub &&
l.last_deep_scrub == r.last_deep_scrub &&
l.last_scrub_stamp == r.last_scrub_stamp &&
l.last_deep_scrub_stamp == r.last_deep_scrub_stamp &&
l.last_clean_scrub_stamp == r.last_clean_scrub_stamp &&
l.stats == r.stats &&
l.stats_invalid == r.stats_invalid &&
l.log_size == r.log_size &&
l.log_dups_size == r.log_dups_size &&
l.ondisk_log_size == r.ondisk_log_size &&
l.up == r.up &&
l.acting == r.acting &&
l.avail_no_missing == r.avail_no_missing &&
l.object_location_counts == r.object_location_counts &&
l.mapping_epoch == r.mapping_epoch &&
l.blocked_by == r.blocked_by &&
l.last_became_active == r.last_became_active &&
l.last_became_peered == r.last_became_peered &&
l.dirty_stats_invalid == r.dirty_stats_invalid &&
l.omap_stats_invalid == r.omap_stats_invalid &&
l.hitset_stats_invalid == r.hitset_stats_invalid &&
l.hitset_bytes_stats_invalid == r.hitset_bytes_stats_invalid &&
l.up_primary == r.up_primary &&
l.acting_primary == r.acting_primary &&
l.pin_stats_invalid == r.pin_stats_invalid &&
l.manifest_stats_invalid == r.manifest_stats_invalid &&
l.purged_snaps == r.purged_snaps &&
l.snaptrimq_len == r.snaptrimq_len &&
l.last_scrub_duration == r.last_scrub_duration &&
l.scrub_sched_status == r.scrub_sched_status &&
l.objects_scrubbed == r.objects_scrubbed &&
l.scrub_duration == r.scrub_duration &&
l.objects_trimmed == r.objects_trimmed &&
l.snaptrim_duration == r.snaptrim_duration;
}
// -- store_statfs_t --
bool store_statfs_t::operator==(const store_statfs_t& other) const
{
return total == other.total
&& available == other.available
&& allocated == other.allocated
&& internally_reserved == other.internally_reserved
&& data_stored == other.data_stored
&& data_compressed == other.data_compressed
&& data_compressed_allocated == other.data_compressed_allocated
&& data_compressed_original == other.data_compressed_original
&& omap_allocated == other.omap_allocated
&& internal_metadata == other.internal_metadata;
}
void store_statfs_t::dump(Formatter *f) const
{
f->dump_int("total", total);
f->dump_int("available", available);
f->dump_int("internally_reserved", internally_reserved);
f->dump_int("allocated", allocated);
f->dump_int("data_stored", data_stored);
f->dump_int("data_compressed", data_compressed);
f->dump_int("data_compressed_allocated", data_compressed_allocated);
f->dump_int("data_compressed_original", data_compressed_original);
f->dump_int("omap_allocated", omap_allocated);
f->dump_int("internal_metadata", internal_metadata);
}
ostream& operator<<(ostream& out, const store_statfs_t &s)
{
out << std::hex
<< "store_statfs(0x" << s.available
<< "/0x" << s.internally_reserved
<< "/0x" << s.total
<< ", data 0x" << s.data_stored
<< "/0x" << s.allocated
<< ", compress 0x" << s.data_compressed
<< "/0x" << s.data_compressed_allocated
<< "/0x" << s.data_compressed_original
<< ", omap 0x" << s.omap_allocated
<< ", meta 0x" << s.internal_metadata
<< std::dec
<< ")";
return out;
}
void store_statfs_t::generate_test_instances(list<store_statfs_t*>& o)
{
store_statfs_t a;
o.push_back(new store_statfs_t(a));
a.total = 234;
a.available = 123;
a.internally_reserved = 33;
a.allocated = 32;
a.data_stored = 44;
a.data_compressed = 21;
a.data_compressed_allocated = 12;
a.data_compressed_original = 13;
a.omap_allocated = 14;
a.internal_metadata = 15;
o.push_back(new store_statfs_t(a));
}
// -- pool_stat_t --
void pool_stat_t::dump(Formatter *f) const
{
stats.dump(f);
f->open_object_section("store_stats");
store_stats.dump(f);
f->close_section();
f->dump_int("log_size", log_size);
f->dump_int("ondisk_log_size", ondisk_log_size);
f->dump_int("up", up);
f->dump_int("acting", acting);
f->dump_int("num_store_stats", num_store_stats);
}
void pool_stat_t::encode(ceph::buffer::list &bl, uint64_t features) const
{
using ceph::encode;
if ((features & CEPH_FEATURE_OSDENC) == 0) {
__u8 v = 4;
encode(v, bl);
encode(stats, bl);
encode(log_size, bl);
encode(ondisk_log_size, bl);
return;
}
ENCODE_START(7, 5, bl);
encode(stats, bl);
encode(log_size, bl);
encode(ondisk_log_size, bl);
encode(up, bl);
encode(acting, bl);
encode(store_stats, bl);
encode(num_store_stats, bl);
ENCODE_FINISH(bl);
}
void pool_stat_t::decode(ceph::buffer::list::const_iterator &bl)
{
DECODE_START_LEGACY_COMPAT_LEN(7, 5, 5, bl);
if (struct_v >= 4) {
decode(stats, bl);
decode(log_size, bl);
decode(ondisk_log_size, bl);
if (struct_v >= 6) {
decode(up, bl);
decode(acting, bl);
} else {
up = 0;
acting = 0;
}
if (struct_v >= 7) {
decode(store_stats, bl);
decode(num_store_stats, bl);
} else {
store_stats.reset();
num_store_stats = 0;
}
} else {
decode(stats.sum.num_bytes, bl);
uint64_t num_kb;
decode(num_kb, bl);
decode(stats.sum.num_objects, bl);
decode(stats.sum.num_object_clones, bl);
decode(stats.sum.num_object_copies, bl);
decode(stats.sum.num_objects_missing_on_primary, bl);
decode(stats.sum.num_objects_degraded, bl);
decode(log_size, bl);
decode(ondisk_log_size, bl);
if (struct_v >= 2) {
decode(stats.sum.num_rd, bl);
decode(stats.sum.num_rd_kb, bl);
decode(stats.sum.num_wr, bl);
decode(stats.sum.num_wr_kb, bl);
}
if (struct_v >= 3) {
decode(stats.sum.num_objects_unfound, bl);
}
}
DECODE_FINISH(bl);
}
void pool_stat_t::generate_test_instances(list<pool_stat_t*>& o)
{
pool_stat_t a;
o.push_back(new pool_stat_t(a));
list<object_stat_collection_t*> l;
object_stat_collection_t::generate_test_instances(l);
list<store_statfs_t*> ll;
store_statfs_t::generate_test_instances(ll);
a.stats = *l.back();
a.store_stats = *ll.back();
a.log_size = 123;
a.ondisk_log_size = 456;
a.acting = 3;
a.up = 4;
a.num_store_stats = 1;
o.push_back(new pool_stat_t(a));
}
// -- pg_history_t --
void pg_history_t::encode(ceph::buffer::list &bl) const
{
ENCODE_START(10, 4, bl);
encode(epoch_created, bl);
encode(last_epoch_started, bl);
encode(last_epoch_clean, bl);
encode(last_epoch_split, bl);
encode(same_interval_since, bl);
encode(same_up_since, bl);
encode(same_primary_since, bl);
encode(last_scrub, bl);
encode(last_scrub_stamp, bl);
encode(last_deep_scrub, bl);
encode(last_deep_scrub_stamp, bl);
encode(last_clean_scrub_stamp, bl);
encode(last_epoch_marked_full, bl);
encode(last_interval_started, bl);
encode(last_interval_clean, bl);
encode(epoch_pool_created, bl);
encode(prior_readable_until_ub, bl);
ENCODE_FINISH(bl);
}
void pg_history_t::decode(ceph::buffer::list::const_iterator &bl)
{
DECODE_START_LEGACY_COMPAT_LEN(10, 4, 4, bl);
decode(epoch_created, bl);
decode(last_epoch_started, bl);
if (struct_v >= 3)
decode(last_epoch_clean, bl);
else
last_epoch_clean = last_epoch_started; // careful, it's a lie!
decode(last_epoch_split, bl);
decode(same_interval_since, bl);
decode(same_up_since, bl);
decode(same_primary_since, bl);
if (struct_v >= 2) {
decode(last_scrub, bl);
decode(last_scrub_stamp, bl);
}
if (struct_v >= 5) {
decode(last_deep_scrub, bl);
decode(last_deep_scrub_stamp, bl);
}
if (struct_v >= 6) {
decode(last_clean_scrub_stamp, bl);
}
if (struct_v >= 7) {
decode(last_epoch_marked_full, bl);
}
if (struct_v >= 8) {
decode(last_interval_started, bl);
decode(last_interval_clean, bl);
} else {
if (last_epoch_started >= same_interval_since) {
last_interval_started = same_interval_since;
} else {
last_interval_started = last_epoch_started; // best guess
}
if (last_epoch_clean >= same_interval_since) {
last_interval_clean = same_interval_since;
} else {
last_interval_clean = last_epoch_clean; // best guess
}
}
if (struct_v >= 9) {
decode(epoch_pool_created, bl);
} else {
epoch_pool_created = epoch_created;
}
if (struct_v >= 10) {
decode(prior_readable_until_ub, bl);
}
DECODE_FINISH(bl);
}
void pg_history_t::dump(Formatter *f) const
{
f->dump_int("epoch_created", epoch_created);
f->dump_int("epoch_pool_created", epoch_pool_created);
f->dump_int("last_epoch_started", last_epoch_started);
f->dump_int("last_interval_started", last_interval_started);
f->dump_int("last_epoch_clean", last_epoch_clean);
f->dump_int("last_interval_clean", last_interval_clean);
f->dump_int("last_epoch_split", last_epoch_split);
f->dump_int("last_epoch_marked_full", last_epoch_marked_full);
f->dump_int("same_up_since", same_up_since);
f->dump_int("same_interval_since", same_interval_since);
f->dump_int("same_primary_since", same_primary_since);
f->dump_stream("last_scrub") << last_scrub;
f->dump_stream("last_scrub_stamp") << last_scrub_stamp;
f->dump_stream("last_deep_scrub") << last_deep_scrub;
f->dump_stream("last_deep_scrub_stamp") << last_deep_scrub_stamp;
f->dump_stream("last_clean_scrub_stamp") << last_clean_scrub_stamp;
f->dump_float(
"prior_readable_until_ub",
std::chrono::duration<double>(prior_readable_until_ub).count());
}
void pg_history_t::generate_test_instances(list<pg_history_t*>& o)
{
o.push_back(new pg_history_t);
o.push_back(new pg_history_t);
o.back()->epoch_created = 1;
o.back()->epoch_pool_created = 1;
o.back()->last_epoch_started = 2;
o.back()->last_interval_started = 2;
o.back()->last_epoch_clean = 3;
o.back()->last_interval_clean = 2;
o.back()->last_epoch_split = 4;
o.back()->prior_readable_until_ub = make_timespan(3.1415);
o.back()->same_up_since = 5;
o.back()->same_interval_since = 6;
o.back()->same_primary_since = 7;
o.back()->last_scrub = eversion_t(8, 9);
o.back()->last_scrub_stamp = utime_t(10, 11);
o.back()->last_deep_scrub = eversion_t(12, 13);
o.back()->last_deep_scrub_stamp = utime_t(14, 15);
o.back()->last_clean_scrub_stamp = utime_t(16, 17);
o.back()->last_epoch_marked_full = 18;
}
// -- pg_info_t --
void pg_info_t::encode(ceph::buffer::list &bl) const
{
ENCODE_START(32, 26, bl);
encode(pgid.pgid, bl);
encode(last_update, bl);
encode(last_complete, bl);
encode(log_tail, bl);
encode(hobject_t(), bl); // old (nibblewise) last_backfill
encode(stats, bl);
history.encode(bl);
encode(purged_snaps, bl);
encode(last_epoch_started, bl);
encode(last_user_version, bl);
encode(hit_set, bl);
encode(pgid.shard, bl);
encode(last_backfill, bl);
encode(true, bl); // was last_backfill_bitwise
encode(last_interval_started, bl);
ENCODE_FINISH(bl);
}
void pg_info_t::decode(ceph::buffer::list::const_iterator &bl)
{
DECODE_START(32, bl);
decode(pgid.pgid, bl);
decode(last_update, bl);
decode(last_complete, bl);
decode(log_tail, bl);
{
hobject_t old_last_backfill;
decode(old_last_backfill, bl);
}
decode(stats, bl);
history.decode(bl);
decode(purged_snaps, bl);
decode(last_epoch_started, bl);
decode(last_user_version, bl);
decode(hit_set, bl);
decode(pgid.shard, bl);
decode(last_backfill, bl);
{
bool last_backfill_bitwise;
decode(last_backfill_bitwise, bl);
// note: we may see a false value here since the default value for
// the member was false, so it often didn't get set to true until
// peering progressed.
}
if (struct_v >= 32) {
decode(last_interval_started, bl);
} else {
last_interval_started = last_epoch_started;
}
DECODE_FINISH(bl);
}
// -- pg_info_t --
void pg_info_t::dump(Formatter *f) const
{
f->dump_stream("pgid") << pgid;
f->dump_stream("last_update") << last_update;
f->dump_stream("last_complete") << last_complete;
f->dump_stream("log_tail") << log_tail;
f->dump_int("last_user_version", last_user_version);
f->dump_stream("last_backfill") << last_backfill;
f->open_array_section("purged_snaps");
for (interval_set<snapid_t>::const_iterator i=purged_snaps.begin();
i != purged_snaps.end();
++i) {
f->open_object_section("purged_snap_interval");
f->dump_stream("start") << i.get_start();
f->dump_stream("length") << i.get_len();
f->close_section();
}
f->close_section();
f->open_object_section("history");
history.dump(f);
f->close_section();
f->open_object_section("stats");
stats.dump(f);
f->close_section();
f->dump_int("empty", is_empty());
f->dump_int("dne", dne());
f->dump_int("incomplete", is_incomplete());
f->dump_int("last_epoch_started", last_epoch_started);
f->open_object_section("hit_set_history");
hit_set.dump(f);
f->close_section();
}
void pg_info_t::generate_test_instances(list<pg_info_t*>& o)
{
o.push_back(new pg_info_t);
o.push_back(new pg_info_t);
list<pg_history_t*> h;
pg_history_t::generate_test_instances(h);
o.back()->history = *h.back();
o.back()->pgid = spg_t(pg_t(1, 2), shard_id_t::NO_SHARD);
o.back()->last_update = eversion_t(3, 4);
o.back()->last_complete = eversion_t(5, 6);
o.back()->last_user_version = 2;
o.back()->log_tail = eversion_t(7, 8);
o.back()->last_backfill = hobject_t(object_t("objname"), "key", 123, 456, -1, "");
{
list<pg_stat_t*> s;
pg_stat_t::generate_test_instances(s);
o.back()->stats = *s.back();
}
{
list<pg_hit_set_history_t*> s;
pg_hit_set_history_t::generate_test_instances(s);
o.back()->hit_set = *s.back();
}
}
// -- pg_notify_t --
void pg_notify_t::encode(ceph::buffer::list &bl) const
{
ENCODE_START(3, 2, bl);
encode(query_epoch, bl);
encode(epoch_sent, bl);
encode(info, bl);
encode(to, bl);
encode(from, bl);
encode(past_intervals, bl);
ENCODE_FINISH(bl);
}
void pg_notify_t::decode(ceph::buffer::list::const_iterator &bl)
{
DECODE_START(3, bl);
decode(query_epoch, bl);
decode(epoch_sent, bl);
decode(info, bl);
decode(to, bl);
decode(from, bl);
if (struct_v >= 3) {
decode(past_intervals, bl);
}
DECODE_FINISH(bl);
}
void pg_notify_t::dump(Formatter *f) const
{
f->dump_int("from", from);
f->dump_int("to", to);
f->dump_unsigned("query_epoch", query_epoch);
f->dump_unsigned("epoch_sent", epoch_sent);
{
f->open_object_section("info");
info.dump(f);
f->close_section();
}
f->dump_object("past_intervals", past_intervals);
}
void pg_notify_t::generate_test_instances(list<pg_notify_t*>& o)
{
o.push_back(new pg_notify_t(shard_id_t(3), shard_id_t::NO_SHARD, 1, 1,
pg_info_t(), PastIntervals()));
o.push_back(new pg_notify_t(shard_id_t(0), shard_id_t(0), 3, 10,
pg_info_t(), PastIntervals()));
}
ostream &operator<<(ostream &lhs, const pg_notify_t ¬ify)
{
lhs << "(query:" << notify.query_epoch
<< " sent:" << notify.epoch_sent
<< " " << notify.info;
if (notify.from != shard_id_t::NO_SHARD ||
notify.to != shard_id_t::NO_SHARD)
lhs << " " << (unsigned)notify.from
<< "->" << (unsigned)notify.to;
lhs << " " << notify.past_intervals;
return lhs << ")";
}
// -- pg_interval_t --
void PastIntervals::pg_interval_t::encode(ceph::buffer::list& bl) const
{
ENCODE_START(4, 2, bl);
encode(first, bl);
encode(last, bl);
encode(up, bl);
encode(acting, bl);
encode(maybe_went_rw, bl);
encode(primary, bl);
encode(up_primary, bl);
ENCODE_FINISH(bl);
}
void PastIntervals::pg_interval_t::decode(ceph::buffer::list::const_iterator& bl)
{
DECODE_START_LEGACY_COMPAT_LEN(4, 2, 2, bl);
decode(first, bl);
decode(last, bl);
decode(up, bl);
decode(acting, bl);
decode(maybe_went_rw, bl);
if (struct_v >= 3) {
decode(primary, bl);
} else {
if (acting.size())
primary = acting[0];
}
if (struct_v >= 4) {
decode(up_primary, bl);
} else {
if (up.size())
up_primary = up[0];
}
DECODE_FINISH(bl);
}
void PastIntervals::pg_interval_t::dump(Formatter *f) const
{
f->dump_unsigned("first", first);
f->dump_unsigned("last", last);
f->dump_int("maybe_went_rw", maybe_went_rw ? 1 : 0);
f->open_array_section("up");
for (auto p = up.cbegin(); p != up.cend(); ++p)
f->dump_int("osd", *p);
f->close_section();
f->open_array_section("acting");
for (auto p = acting.cbegin(); p != acting.cend(); ++p)
f->dump_int("osd", *p);
f->close_section();
f->dump_int("primary", primary);
f->dump_int("up_primary", up_primary);
}
void PastIntervals::pg_interval_t::generate_test_instances(list<pg_interval_t*>& o)
{
o.push_back(new pg_interval_t);
o.push_back(new pg_interval_t);
o.back()->up.push_back(1);
o.back()->acting.push_back(2);
o.back()->acting.push_back(3);
o.back()->first = 4;
o.back()->last = 5;
o.back()->maybe_went_rw = true;
}
WRITE_CLASS_ENCODER(PastIntervals::pg_interval_t)
/**
* pi_compact_rep
*
* PastIntervals only needs to be able to answer two questions:
* 1) Where should the primary look for unfound objects?
* 2) List a set of subsets of the OSDs such that contacting at least
* one from each subset guarantees we speak to at least one witness
* of any completed write.
*
* Crucially, 2) does not require keeping *all* past intervals. Certainly,
* we don't need to keep any where maybe_went_rw would be false. We also
* needn't keep two intervals where the actingset in one is a subset
* of the other (only need to keep the smaller of the two sets). In order
* to accurately trim the set of intervals as last_epoch_started changes
* without rebuilding the set from scratch, we'll retain the larger set
* if it in an older interval.
*/
struct compact_interval_t {
epoch_t first;
epoch_t last;
set<pg_shard_t> acting;
bool supersedes(const compact_interval_t &other) {
for (auto &&i: acting) {
if (!other.acting.count(i))
return false;
}
return true;
}
void dump(Formatter *f) const {
f->open_object_section("compact_interval_t");
f->dump_stream("first") << first;
f->dump_stream("last") << last;
f->dump_stream("acting") << acting;
f->close_section();
}
void encode(ceph::buffer::list &bl) const {
ENCODE_START(1, 1, bl);
encode(first, bl);
encode(last, bl);
encode(acting, bl);
ENCODE_FINISH(bl);
}
void decode(ceph::buffer::list::const_iterator &bl) {
DECODE_START(1, bl);
decode(first, bl);
decode(last, bl);
decode(acting, bl);
DECODE_FINISH(bl);
}
static void generate_test_instances(list<compact_interval_t*> & o) {
/* Not going to be used, we'll generate pi_compact_rep directly */
}
};
ostream &operator<<(ostream &o, const compact_interval_t &rhs)
{
return o << "([" << rhs.first << "," << rhs.last
<< "] acting " << rhs.acting << ")";
}
WRITE_CLASS_ENCODER(compact_interval_t)
class pi_compact_rep : public PastIntervals::interval_rep {
epoch_t first = 0;
epoch_t last = 0; // inclusive
set<pg_shard_t> all_participants;
list<compact_interval_t> intervals;
pi_compact_rep(
bool ec_pool,
std::list<PastIntervals::pg_interval_t> &&intervals) {
for (auto &&i: intervals)
add_interval(ec_pool, i);
}
public:
pi_compact_rep() = default;
pi_compact_rep(const pi_compact_rep &) = default;
pi_compact_rep(pi_compact_rep &&) = default;
pi_compact_rep &operator=(const pi_compact_rep &) = default;
pi_compact_rep &operator=(pi_compact_rep &&) = default;
size_t size() const override { return intervals.size(); }
bool empty() const override {
return first > last || (first == 0 && last == 0);
}
void clear() override {
*this = pi_compact_rep();
}
pair<epoch_t, epoch_t> get_bounds() const override {
return make_pair(first, last + 1);
}
void adjust_start_backwards(epoch_t last_epoch_clean) override {
first = last_epoch_clean;
}
set<pg_shard_t> get_all_participants(
bool ec_pool) const override {
return all_participants;
}
void add_interval(
bool ec_pool, const PastIntervals::pg_interval_t &interval) override {
if (first == 0)
first = interval.first;
ceph_assert(interval.last > last);
last = interval.last;
set<pg_shard_t> acting;
for (unsigned i = 0; i < interval.acting.size(); ++i) {
if (interval.acting[i] == CRUSH_ITEM_NONE)
continue;
acting.insert(
pg_shard_t(
interval.acting[i],
ec_pool ? shard_id_t(i) : shard_id_t::NO_SHARD));
}
all_participants.insert(acting.begin(), acting.end());
if (!interval.maybe_went_rw)
return;
intervals.push_back(
compact_interval_t{interval.first, interval.last, acting});
auto plast = intervals.end();
--plast;
for (auto cur = intervals.begin(); cur != plast; ) {
if (plast->supersedes(*cur)) {
intervals.erase(cur++);
} else {
++cur;
}
}
}
unique_ptr<PastIntervals::interval_rep> clone() const override {
return unique_ptr<PastIntervals::interval_rep>(new pi_compact_rep(*this));
}
ostream &print(ostream &out) const override {
return out << "([" << first << "," << last
<< "] all_participants=" << all_participants
<< " intervals=" << intervals << ")";
}
void encode(ceph::buffer::list &bl) const override {
ENCODE_START(1, 1, bl);
encode(first, bl);
encode(last, bl);
encode(all_participants, bl);
encode(intervals, bl);
ENCODE_FINISH(bl);
}
void decode(ceph::buffer::list::const_iterator &bl) override {
DECODE_START(1, bl);
decode(first, bl);
decode(last, bl);
decode(all_participants, bl);
decode(intervals, bl);
DECODE_FINISH(bl);
}
void dump(Formatter *f) const override {
f->open_object_section("PastIntervals::compact_rep");
f->dump_stream("first") << first;
f->dump_stream("last") << last;
f->open_array_section("all_participants");
for (auto& i : all_participants) {
f->dump_object("pg_shard", i);
}
f->close_section();
f->open_array_section("intervals");
for (auto &&i: intervals) {
i.dump(f);
}
f->close_section();
f->close_section();
}
static void generate_test_instances(list<pi_compact_rep*> &o) {
using ival = PastIntervals::pg_interval_t;
using ivallst = std::list<ival>;
o.push_back(
new pi_compact_rep(
true, ivallst
{ ival{{0, 1, 2}, {0, 1, 2}, 10, 20, true, 0, 0}
, ival{{ 1, 2}, { 1, 2}, 21, 30, true, 1, 1}
, ival{{ 2}, { 2}, 31, 35, false, 2, 2}
, ival{{0, 2}, {0, 2}, 36, 50, true, 0, 0}
}));
o.push_back(
new pi_compact_rep(
false, ivallst
{ ival{{0, 1, 2}, {0, 1, 2}, 10, 20, true, 0, 0}
, ival{{ 1, 2}, { 1, 2}, 21, 30, true, 1, 1}
, ival{{ 2}, { 2}, 31, 35, false, 2, 2}
, ival{{0, 2}, {0, 2}, 36, 50, true, 0, 0}
}));
o.push_back(
new pi_compact_rep(
true, ivallst
{ ival{{2, 1, 0}, {2, 1, 0}, 10, 20, true, 1, 1}
, ival{{ 0, 2}, { 0, 2}, 21, 30, true, 0, 0}
, ival{{ 0, 2}, {2, 0}, 31, 35, true, 2, 2}
, ival{{ 0, 2}, { 0, 2}, 36, 50, true, 0, 0}
}));
}
void iterate_mayberw_back_to(
epoch_t les,
std::function<void(epoch_t, const set<pg_shard_t> &)> &&f) const override {
for (auto i = intervals.rbegin(); i != intervals.rend(); ++i) {
if (i->last < les)
break;
f(i->first, i->acting);
}
}
virtual ~pi_compact_rep() override {}
};
WRITE_CLASS_ENCODER(pi_compact_rep)
PastIntervals::PastIntervals()
{
past_intervals.reset(new pi_compact_rep);
}
PastIntervals::PastIntervals(const PastIntervals &rhs)
: past_intervals(rhs.past_intervals ?
rhs.past_intervals->clone() :
nullptr) {}
PastIntervals &PastIntervals::operator=(const PastIntervals &rhs)
{
PastIntervals other(rhs);
swap(other);
return *this;
}
ostream& operator<<(ostream& out, const PastIntervals &i)
{
if (i.past_intervals) {
return i.past_intervals->print(out);
} else {
return out << "(empty)";
}
}
ostream& operator<<(ostream& out, const PastIntervals::PriorSet &i)
{
return out << "PriorSet("
<< "ec_pool: " << i.ec_pool
<< ", probe: " << i.probe
<< ", down: " << i.down
<< ", blocked_by: " << i.blocked_by
<< ", pg_down: " << i.pg_down
<< ")";
}
void PastIntervals::decode(ceph::buffer::list::const_iterator &bl)
{
DECODE_START(1, bl);
__u8 type = 0;
decode(type, bl);
switch (type) {
case 0:
break;
case 1:
ceph_abort_msg("pi_simple_rep support removed post-luminous");
break;
case 2:
past_intervals.reset(new pi_compact_rep);
past_intervals->decode(bl);
break;
}
DECODE_FINISH(bl);
}
void PastIntervals::generate_test_instances(list<PastIntervals*> &o)
{
{
list<pi_compact_rep *> compact;
pi_compact_rep::generate_test_instances(compact);
for (auto &&i: compact) {
// takes ownership of contents
o.push_back(new PastIntervals(i));
}
}
return;
}
bool PastIntervals::is_new_interval(
int old_acting_primary,
int new_acting_primary,
const vector<int> &old_acting,
const vector<int> &new_acting,
int old_up_primary,
int new_up_primary,
const vector<int> &old_up,
const vector<int> &new_up,
int old_size,
int new_size,
int old_min_size,
int new_min_size,
unsigned old_pg_num,
unsigned new_pg_num,
unsigned old_pg_num_pending,
unsigned new_pg_num_pending,
bool old_sort_bitwise,
bool new_sort_bitwise,
bool old_recovery_deletes,
bool new_recovery_deletes,
uint32_t old_crush_count,
uint32_t new_crush_count,
uint32_t old_crush_target,
uint32_t new_crush_target,
uint32_t old_crush_barrier,
uint32_t new_crush_barrier,
int32_t old_crush_member,
int32_t new_crush_member,
pg_t pgid) {
return old_acting_primary != new_acting_primary ||
new_acting != old_acting ||
old_up_primary != new_up_primary ||
new_up != old_up ||
old_min_size != new_min_size ||
old_size != new_size ||
pgid.is_split(old_pg_num, new_pg_num, 0) ||
// (is or was) pre-merge source
pgid.is_merge_source(old_pg_num_pending, new_pg_num_pending, 0) ||
pgid.is_merge_source(new_pg_num_pending, old_pg_num_pending, 0) ||
// merge source
pgid.is_merge_source(old_pg_num, new_pg_num, 0) ||
// (is or was) pre-merge target
pgid.is_merge_target(old_pg_num_pending, new_pg_num_pending) ||
pgid.is_merge_target(new_pg_num_pending, old_pg_num_pending) ||
// merge target
pgid.is_merge_target(old_pg_num, new_pg_num) ||
old_sort_bitwise != new_sort_bitwise ||
old_recovery_deletes != new_recovery_deletes ||
old_crush_count != new_crush_count ||
old_crush_target != new_crush_target ||
old_crush_barrier != new_crush_barrier ||
old_crush_member != new_crush_member;
}
bool PastIntervals::is_new_interval(
int old_acting_primary,
int new_acting_primary,
const vector<int> &old_acting,
const vector<int> &new_acting,
int old_up_primary,
int new_up_primary,
const vector<int> &old_up,
const vector<int> &new_up,
const OSDMap *osdmap,
const OSDMap *lastmap,
pg_t pgid)
{
const pg_pool_t *plast = lastmap->get_pg_pool(pgid.pool());
if (!plast) {
return false; // after pool is deleted there are no more interval changes
}
const pg_pool_t *pi = osdmap->get_pg_pool(pgid.pool());
if (!pi) {
return true; // pool was deleted this epoch -> (final!) interval change
}
return
is_new_interval(old_acting_primary,
new_acting_primary,
old_acting,
new_acting,
old_up_primary,
new_up_primary,
old_up,
new_up,
plast->size,
pi->size,
plast->min_size,
pi->min_size,
plast->get_pg_num(),
pi->get_pg_num(),
plast->get_pg_num_pending(),
pi->get_pg_num_pending(),
lastmap->test_flag(CEPH_OSDMAP_SORTBITWISE),
osdmap->test_flag(CEPH_OSDMAP_SORTBITWISE),
lastmap->test_flag(CEPH_OSDMAP_RECOVERY_DELETES),
osdmap->test_flag(CEPH_OSDMAP_RECOVERY_DELETES),
plast->peering_crush_bucket_count, pi->peering_crush_bucket_count,
plast->peering_crush_bucket_target, pi->peering_crush_bucket_target,
plast->peering_crush_bucket_barrier, pi->peering_crush_bucket_barrier,
plast->peering_crush_mandatory_member, pi->peering_crush_mandatory_member,
pgid);
}
bool PastIntervals::check_new_interval(
int old_acting_primary,
int new_acting_primary,
const vector<int> &old_acting,
const vector<int> &new_acting,
int old_up_primary,
int new_up_primary,
const vector<int> &old_up,
const vector<int> &new_up,
epoch_t same_interval_since,
epoch_t last_epoch_clean,
const OSDMap *osdmap,
const OSDMap *lastmap,
pg_t pgid,
const IsPGRecoverablePredicate &could_have_gone_active,
PastIntervals *past_intervals,
std::ostream *out)
{
/*
* We have to be careful to gracefully deal with situations like
* so. Say we have a power outage or something that takes out both
* OSDs, but the monitor doesn't mark them down in the same epoch.
* The history may look like
*
* 1: A B
* 2: B
* 3: let's say B dies for good, too (say, from the power spike)
* 4: A
*
* which makes it look like B may have applied updates to the PG
* that we need in order to proceed. This sucks...
*
* To minimize the risk of this happening, we CANNOT go active if
* _any_ OSDs in the prior set are down until we send an MOSDAlive
* to the monitor such that the OSDMap sets osd_up_thru to an epoch.
* Then, we have something like
*
* 1: A B
* 2: B up_thru[B]=0
* 3:
* 4: A
*
* -> we can ignore B, bc it couldn't have gone active (up_thru still 0).
*
* or,
*
* 1: A B
* 2: B up_thru[B]=0
* 3: B up_thru[B]=2
* 4:
* 5: A
*
* -> we must wait for B, bc it was alive through 2, and could have
* written to the pg.
*
* If B is really dead, then an administrator will need to manually
* intervene by marking the OSD as "lost."
*/
// remember past interval
// NOTE: a change in the up set primary triggers an interval
// change, even though the interval members in the pg_interval_t
// do not change.
ceph_assert(past_intervals);
ceph_assert(past_intervals->past_intervals);
if (is_new_interval(
old_acting_primary,
new_acting_primary,
old_acting,
new_acting,
old_up_primary,
new_up_primary,
old_up,
new_up,
osdmap,
lastmap,
pgid)) {
pg_interval_t i;
i.first = same_interval_since;
i.last = osdmap->get_epoch() - 1;
ceph_assert(i.first <= i.last);
i.acting = old_acting;
i.up = old_up;
i.primary = old_acting_primary;
i.up_primary = old_up_primary;
unsigned num_acting = 0;
for (auto p = i.acting.cbegin(); p != i.acting.cend(); ++p)
if (*p != CRUSH_ITEM_NONE)
++num_acting;
ceph_assert(lastmap->get_pools().count(pgid.pool()));
const pg_pool_t& old_pg_pool = lastmap->get_pools().find(pgid.pool())->second;
set<pg_shard_t> old_acting_shards;
old_pg_pool.convert_to_pg_shards(old_acting, &old_acting_shards);
if (num_acting &&
i.primary != -1 &&
num_acting >= old_pg_pool.min_size &&
(!old_pg_pool.is_stretch_pool() ||
old_pg_pool.stretch_set_can_peer(old_acting, *lastmap, out)) &&
could_have_gone_active(old_acting_shards)) {
if (out)
*out << __func__ << " " << i
<< " up_thru " << lastmap->get_up_thru(i.primary)
<< " up_from " << lastmap->get_up_from(i.primary)
<< " last_epoch_clean " << last_epoch_clean;
if (lastmap->get_up_thru(i.primary) >= i.first &&
lastmap->get_up_from(i.primary) <= i.first) {
i.maybe_went_rw = true;
if (out)
*out << " " << i
<< " : primary up " << lastmap->get_up_from(i.primary)
<< "-" << lastmap->get_up_thru(i.primary)
<< " includes interval"
<< std::endl;
} else if (last_epoch_clean >= i.first &&
last_epoch_clean <= i.last) {
// If the last_epoch_clean is included in this interval, then
// the pg must have been rw (for recovery to have completed).
// This is important because we won't know the _real_
// first_epoch because we stop at last_epoch_clean, and we
// don't want the oldest interval to randomly have
// maybe_went_rw false depending on the relative up_thru vs
// last_epoch_clean timing.
i.maybe_went_rw = true;
if (out)
*out << " " << i
<< " : includes last_epoch_clean " << last_epoch_clean
<< " and presumed to have been rw"
<< std::endl;
} else {
i.maybe_went_rw = false;
if (out)
*out << " " << i
<< " : primary up " << lastmap->get_up_from(i.primary)
<< "-" << lastmap->get_up_thru(i.primary)
<< " does not include interval"
<< std::endl;
}
} else {
i.maybe_went_rw = false;
if (out)
*out << __func__ << " " << i << " : acting set is too small" << std::endl;
}
past_intervals->past_intervals->add_interval(old_pg_pool.is_erasure(), i);
return true;
} else {
return false;
}
}
// true if the given map affects the prior set
bool PastIntervals::PriorSet::affected_by_map(
const OSDMap &osdmap,
const DoutPrefixProvider *dpp) const
{
for (auto p = probe.begin(); p != probe.end(); ++p) {
int o = p->osd;
// did someone in the prior set go down?
if (osdmap.is_down(o) && down.count(o) == 0) {
ldpp_dout(dpp, 10) << "affected_by_map osd." << o << " now down" << dendl;
return true;
}
// did a down osd in cur get (re)marked as lost?
auto r = blocked_by.find(o);
if (r != blocked_by.end()) {
if (!osdmap.exists(o)) {
ldpp_dout(dpp, 10) << "affected_by_map osd." << o << " no longer exists" << dendl;
return true;
}
if (osdmap.get_info(o).lost_at != r->second) {
ldpp_dout(dpp, 10) << "affected_by_map osd." << o << " (re)marked as lost" << dendl;
return true;
}
}
}
// did someone in the prior down set go up?
for (auto p = down.cbegin(); p != down.cend(); ++p) {
int o = *p;
if (osdmap.is_up(o)) {
ldpp_dout(dpp, 10) << "affected_by_map osd." << o << " now up" << dendl;
return true;
}
// did someone in the prior set get lost or destroyed?
if (!osdmap.exists(o)) {
ldpp_dout(dpp, 10) << "affected_by_map osd." << o << " no longer exists" << dendl;
return true;
}
// did a down osd in down get (re)marked as lost?
auto r = blocked_by.find(o);
if (r != blocked_by.end()) {
if (osdmap.get_info(o).lost_at != r->second) {
ldpp_dout(dpp, 10) << "affected_by_map osd." << o << " (re)marked as lost" << dendl;
return true;
}
}
}
return false;
}
ostream& operator<<(ostream& out, const PastIntervals::pg_interval_t& i)
{
out << "interval(" << i.first << "-" << i.last
<< " up " << i.up << "(" << i.up_primary << ")"
<< " acting " << i.acting << "(" << i.primary << ")";
if (i.maybe_went_rw)
out << " maybe_went_rw";
out << ")";
return out;
}
// -- pg_query_t --
void pg_query_t::encode(ceph::buffer::list &bl, uint64_t features) const {
ENCODE_START(3, 3, bl);
encode(type, bl);
encode(since, bl);
history.encode(bl);
encode(epoch_sent, bl);
encode(to, bl);
encode(from, bl);
ENCODE_FINISH(bl);
}
void pg_query_t::decode(ceph::buffer::list::const_iterator &bl) {
DECODE_START(3, bl);
decode(type, bl);
decode(since, bl);
history.decode(bl);
decode(epoch_sent, bl);
decode(to, bl);
decode(from, bl);
DECODE_FINISH(bl);
}
void pg_query_t::dump(Formatter *f) const
{
f->dump_int("from", from);
f->dump_int("to", to);
f->dump_string("type", get_type_name());
f->dump_stream("since") << since;
f->dump_stream("epoch_sent") << epoch_sent;
f->open_object_section("history");
history.dump(f);
f->close_section();
}
void pg_query_t::generate_test_instances(list<pg_query_t*>& o)
{
o.push_back(new pg_query_t());
list<pg_history_t*> h;
pg_history_t::generate_test_instances(h);
o.push_back(new pg_query_t(pg_query_t::INFO, shard_id_t(1), shard_id_t(2), *h.back(), 4));
o.push_back(new pg_query_t(pg_query_t::MISSING, shard_id_t(2), shard_id_t(3), *h.back(), 4));
o.push_back(new pg_query_t(pg_query_t::LOG, shard_id_t(0), shard_id_t(0),
eversion_t(4, 5), *h.back(), 4));
o.push_back(new pg_query_t(pg_query_t::FULLLOG,
shard_id_t::NO_SHARD, shard_id_t::NO_SHARD,
*h.back(), 5));
}
// -- pg_lease_t --
void pg_lease_t::encode(bufferlist& bl) const
{
ENCODE_START(1, 1, bl);
encode(readable_until, bl);
encode(readable_until_ub, bl);
encode(interval, bl);
ENCODE_FINISH(bl);
}
void pg_lease_t::decode(bufferlist::const_iterator& p)
{
DECODE_START(1, p);
decode(readable_until, p);
decode(readable_until_ub, p);
decode(interval, p);
DECODE_FINISH(p);
}
void pg_lease_t::dump(Formatter *f) const
{
f->dump_stream("readable_until") << readable_until;
f->dump_stream("readable_until_ub") << readable_until_ub;
f->dump_stream("interval") << interval;
}
void pg_lease_t::generate_test_instances(std::list<pg_lease_t*>& o)
{
o.push_back(new pg_lease_t());
o.push_back(new pg_lease_t());
o.back()->readable_until = make_timespan(1.5);
o.back()->readable_until_ub = make_timespan(3.4);
o.back()->interval = make_timespan(1.0);
}
// -- pg_lease_ack_t --
void pg_lease_ack_t::encode(bufferlist& bl) const
{
ENCODE_START(1, 1, bl);
encode(readable_until_ub, bl);
ENCODE_FINISH(bl);
}
void pg_lease_ack_t::decode(bufferlist::const_iterator& p)
{
DECODE_START(1, p);
decode(readable_until_ub, p);
DECODE_FINISH(p);
}
void pg_lease_ack_t::dump(Formatter *f) const
{
f->dump_stream("readable_until_ub") << readable_until_ub;
}
void pg_lease_ack_t::generate_test_instances(std::list<pg_lease_ack_t*>& o)
{
o.push_back(new pg_lease_ack_t());
o.push_back(new pg_lease_ack_t());
o.back()->readable_until_ub = make_timespan(3.4);
}
// -- ObjectModDesc --
void ObjectModDesc::visit(Visitor *visitor) const
{
auto bp = bl.cbegin();
try {
while (!bp.end()) {
DECODE_START(max_required_version, bp);
uint8_t code;
decode(code, bp);
switch (code) {
case APPEND: {
uint64_t size;
decode(size, bp);
visitor->append(size);
break;
}
case SETATTRS: {
map<string, std::optional<ceph::buffer::list> > attrs;
decode(attrs, bp);
visitor->setattrs(attrs);
break;
}
case DELETE: {
version_t old_version;
decode(old_version, bp);
visitor->rmobject(old_version);
break;
}
case CREATE: {
visitor->create();
break;
}
case UPDATE_SNAPS: {
set<snapid_t> snaps;
decode(snaps, bp);
visitor->update_snaps(snaps);
break;
}
case TRY_DELETE: {
version_t old_version;
decode(old_version, bp);
visitor->try_rmobject(old_version);
break;
}
case ROLLBACK_EXTENTS: {
vector<pair<uint64_t, uint64_t> > extents;
version_t gen;
decode(gen, bp);
decode(extents, bp);
visitor->rollback_extents(gen,extents);
break;
}
default:
ceph_abort_msg("Invalid rollback code");
}
DECODE_FINISH(bp);
}
} catch (...) {
ceph_abort_msg("Invalid encoding");
}
}
struct DumpVisitor : public ObjectModDesc::Visitor {
Formatter *f;
explicit DumpVisitor(Formatter *f) : f(f) {}
void append(uint64_t old_size) override {
f->open_object_section("op");
f->dump_string("code", "APPEND");
f->dump_unsigned("old_size", old_size);
f->close_section();
}
void setattrs(map<string, std::optional<ceph::buffer::list> > &attrs) override {
f->open_object_section("op");
f->dump_string("code", "SETATTRS");
f->open_array_section("attrs");
for (auto i = attrs.begin(); i != attrs.end(); ++i) {
f->dump_string("attr_name", i->first);
}
f->close_section();
f->close_section();
}
void rmobject(version_t old_version) override {
f->open_object_section("op");
f->dump_string("code", "RMOBJECT");
f->dump_unsigned("old_version", old_version);
f->close_section();
}
void try_rmobject(version_t old_version) override {
f->open_object_section("op");
f->dump_string("code", "TRY_RMOBJECT");
f->dump_unsigned("old_version", old_version);
f->close_section();
}
void create() override {
f->open_object_section("op");
f->dump_string("code", "CREATE");
f->close_section();
}
void update_snaps(const set<snapid_t> &snaps) override {
f->open_object_section("op");
f->dump_string("code", "UPDATE_SNAPS");
f->dump_stream("snaps") << snaps;
f->close_section();
}
void rollback_extents(
version_t gen,
const vector<pair<uint64_t, uint64_t> > &extents) override {
f->open_object_section("op");
f->dump_string("code", "ROLLBACK_EXTENTS");
f->dump_unsigned("gen", gen);
f->dump_stream("snaps") << extents;
f->close_section();
}
};
void ObjectModDesc::dump(Formatter *f) const
{
f->open_object_section("object_mod_desc");
f->dump_bool("can_local_rollback", can_local_rollback);
f->dump_bool("rollback_info_completed", rollback_info_completed);
{
f->open_array_section("ops");
DumpVisitor vis(f);
visit(&vis);
f->close_section();
}
f->close_section();
}
void ObjectModDesc::generate_test_instances(list<ObjectModDesc*>& o)
{
map<string, std::optional<ceph::buffer::list> > attrs;
attrs[OI_ATTR];
attrs[SS_ATTR];
attrs["asdf"];
o.push_back(new ObjectModDesc());
o.back()->append(100);
o.back()->setattrs(attrs);
o.push_back(new ObjectModDesc());
o.back()->rmobject(1001);
o.push_back(new ObjectModDesc());
o.back()->create();
o.back()->setattrs(attrs);
o.push_back(new ObjectModDesc());
o.back()->create();
o.back()->setattrs(attrs);
o.back()->mark_unrollbackable();
o.back()->append(1000);
}
void ObjectModDesc::encode(ceph::buffer::list &_bl) const
{
ENCODE_START(max_required_version, max_required_version, _bl);
encode(can_local_rollback, _bl);
encode(rollback_info_completed, _bl);
encode(bl, _bl);
ENCODE_FINISH(_bl);
}
void ObjectModDesc::decode(ceph::buffer::list::const_iterator &_bl)
{
DECODE_START(2, _bl);
max_required_version = struct_v;
decode(can_local_rollback, _bl);
decode(rollback_info_completed, _bl);
decode(bl, _bl);
// ensure bl does not pin a larger ceph::buffer in memory
bl.rebuild();
bl.reassign_to_mempool(mempool::mempool_osd_pglog);
DECODE_FINISH(_bl);
}
std::atomic<uint32_t> ObjectCleanRegions::max_num_intervals = {10};
void ObjectCleanRegions::set_max_num_intervals(uint32_t num)
{
max_num_intervals = num;
}
void ObjectCleanRegions::trim()
{
while(clean_offsets.num_intervals() > max_num_intervals) {
typename interval_set<uint64_t>::iterator shortest_interval = clean_offsets.begin();
if (shortest_interval == clean_offsets.end())
break;
for (typename interval_set<uint64_t>::iterator it = clean_offsets.begin();
it != clean_offsets.end();
++it) {
if (it.get_len() < shortest_interval.get_len())
shortest_interval = it;
}
clean_offsets.erase(shortest_interval);
}
}
void ObjectCleanRegions::merge(const ObjectCleanRegions &other)
{
clean_offsets.intersection_of(other.clean_offsets);
clean_omap = clean_omap && other.clean_omap;
trim();
}
void ObjectCleanRegions::mark_data_region_dirty(uint64_t offset, uint64_t len)
{
interval_set<uint64_t> clean_region;
clean_region.insert(0, (uint64_t)-1);
clean_region.erase(offset, len);
clean_offsets.intersection_of(clean_region);
trim();
}
bool ObjectCleanRegions::is_clean_region(uint64_t offset, uint64_t len) const
{
return clean_offsets.contains(offset, len);
}
void ObjectCleanRegions::mark_omap_dirty()
{
clean_omap = false;
}
void ObjectCleanRegions::mark_object_new()
{
new_object = true;
}
void ObjectCleanRegions::mark_fully_dirty()
{
mark_data_region_dirty(0, (uint64_t)-1);
mark_omap_dirty();
mark_object_new();
}
interval_set<uint64_t> ObjectCleanRegions::get_dirty_regions() const
{
interval_set<uint64_t> dirty_region;
dirty_region.insert(0, (uint64_t)-1);
dirty_region.subtract(clean_offsets);
return dirty_region;
}
bool ObjectCleanRegions::omap_is_dirty() const
{
return !clean_omap;
}
bool ObjectCleanRegions::object_is_exist() const
{
return !new_object;
}
void ObjectCleanRegions::encode(bufferlist &bl) const
{
ENCODE_START(1, 1, bl);
using ceph::encode;
encode(clean_offsets, bl);
encode(clean_omap, bl);
encode(new_object, bl);
ENCODE_FINISH(bl);
}
void ObjectCleanRegions::decode(bufferlist::const_iterator &bl)
{
DECODE_START(1, bl);
using ceph::decode;
decode(clean_offsets, bl);
decode(clean_omap, bl);
decode(new_object, bl);
DECODE_FINISH(bl);
}
void ObjectCleanRegions::dump(Formatter *f) const
{
f->open_object_section("object_clean_regions");
f->dump_stream("clean_offsets") << clean_offsets;
f->dump_bool("clean_omap", clean_omap);
f->dump_bool("new_object", new_object);
f->close_section();
}
void ObjectCleanRegions::generate_test_instances(list<ObjectCleanRegions*>& o)
{
o.push_back(new ObjectCleanRegions());
o.push_back(new ObjectCleanRegions());
o.back()->mark_data_region_dirty(4096, 40960);
o.back()->mark_omap_dirty();
o.back()->mark_object_new();
}
ostream& operator<<(ostream& out, const ObjectCleanRegions& ocr)
{
return out << "clean_offsets: " << ocr.clean_offsets
<< ", clean_omap: " << ocr.clean_omap
<< ", new_object: " << ocr.new_object;
}
// -- pg_log_entry_t --
string pg_log_entry_t::get_key_name() const
{
return version.get_key_name();
}
void pg_log_entry_t::encode_with_checksum(ceph::buffer::list& bl) const
{
using ceph::encode;
ceph::buffer::list ebl(sizeof(*this)*2);
this->encode(ebl);
__u32 crc = ebl.crc32c(0);
encode(ebl, bl);
encode(crc, bl);
}
void pg_log_entry_t::decode_with_checksum(ceph::buffer::list::const_iterator& p)
{
using ceph::decode;
ceph::buffer::list bl;
decode(bl, p);
__u32 crc;
decode(crc, p);
if (crc != bl.crc32c(0))
throw ceph::buffer::malformed_input("bad checksum on pg_log_entry_t");
auto q = bl.cbegin();
this->decode(q);
}
void pg_log_entry_t::encode(ceph::buffer::list &bl) const
{
ENCODE_START(14, 4, bl);
encode(op, bl);
encode(soid, bl);
encode(version, bl);
/**
* Added with reverting_to:
* Previous code used prior_version to encode
* what we now call reverting_to. This will
* allow older code to decode reverting_to
* into prior_version as expected.
*/
if (op == LOST_REVERT)
encode(reverting_to, bl);
else
encode(prior_version, bl);
encode(reqid, bl);
encode(mtime, bl);
if (op == LOST_REVERT)
encode(prior_version, bl);
encode(snaps, bl);
encode(user_version, bl);
encode(mod_desc, bl);
encode(extra_reqids, bl);
if (op == ERROR)
encode(return_code, bl);
if (!extra_reqids.empty())
encode(extra_reqid_return_codes, bl);
encode(clean_regions, bl);
if (op != ERROR)
encode(return_code, bl);
encode(op_returns, bl);
ENCODE_FINISH(bl);
}
void pg_log_entry_t::decode(ceph::buffer::list::const_iterator &bl)
{
DECODE_START_LEGACY_COMPAT_LEN(14, 4, 4, bl);
decode(op, bl);
if (struct_v < 2) {
sobject_t old_soid;
decode(old_soid, bl);
soid.oid = old_soid.oid;
soid.snap = old_soid.snap;
invalid_hash = true;
} else {
decode(soid, bl);
}
if (struct_v < 3)
invalid_hash = true;
decode(version, bl);
if (struct_v >= 6 && op == LOST_REVERT)
decode(reverting_to, bl);
else
decode(prior_version, bl);
decode(reqid, bl);
decode(mtime, bl);
if (struct_v < 5)
invalid_pool = true;
if (op == LOST_REVERT) {
if (struct_v >= 6) {
decode(prior_version, bl);
} else {
reverting_to = prior_version;
}
}
if (struct_v >= 7 || // for v >= 7, this is for all ops.
op == CLONE) { // for v < 7, it's only present for CLONE.
decode(snaps, bl);
// ensure snaps does not pin a larger ceph::buffer in memory
snaps.rebuild();
snaps.reassign_to_mempool(mempool::mempool_osd_pglog);
}
if (struct_v >= 8)
decode(user_version, bl);
else
user_version = version.version;
if (struct_v >= 9)
decode(mod_desc, bl);
else
mod_desc.mark_unrollbackable();
if (struct_v >= 10)
decode(extra_reqids, bl);
if (struct_v >= 11 && op == ERROR)
decode(return_code, bl);
if (struct_v >= 12 && !extra_reqids.empty())
decode(extra_reqid_return_codes, bl);
if (struct_v >= 13)
decode(clean_regions, bl);
else
clean_regions.mark_fully_dirty();
if (struct_v >= 14) {
if (op != ERROR) {
decode(return_code, bl);
}
decode(op_returns, bl);
}
DECODE_FINISH(bl);
}
void pg_log_entry_t::dump(Formatter *f) const
{
f->dump_string("op", get_op_name());
f->dump_stream("object") << soid;
f->dump_stream("version") << version;
f->dump_stream("prior_version") << prior_version;
f->dump_stream("reqid") << reqid;
f->open_array_section("extra_reqids");
uint32_t idx = 0;
for (auto p = extra_reqids.begin();
p != extra_reqids.end();
++idx, ++p) {
f->open_object_section("extra_reqid");
f->dump_stream("reqid") << p->first;
f->dump_stream("user_version") << p->second;
auto it = extra_reqid_return_codes.find(idx);
if (it != extra_reqid_return_codes.end()) {
f->dump_int("return_code", it->second);
}
f->close_section();
}
f->close_section();
f->dump_stream("mtime") << mtime;
f->dump_int("return_code", return_code);
if (!op_returns.empty()) {
f->open_array_section("op_returns");
for (auto& i : op_returns) {
f->dump_object("op", i);
}
f->close_section();
}
if (snaps.length() > 0) {
vector<snapid_t> v;
ceph::buffer::list c = snaps;
auto p = c.cbegin();
try {
using ceph::decode;
decode(v, p);
} catch (...) {
v.clear();
}
f->open_object_section("snaps");
for (auto p = v.begin(); p != v.end(); ++p)
f->dump_unsigned("snap", *p);
f->close_section();
}
{
f->open_object_section("mod_desc");
mod_desc.dump(f);
f->close_section();
}
{
f->open_object_section("clean_regions");
clean_regions.dump(f);
f->close_section();
}
}
void pg_log_entry_t::generate_test_instances(list<pg_log_entry_t*>& o)
{
o.push_back(new pg_log_entry_t());
hobject_t oid(object_t("objname"), "key", 123, 456, 0, "");
o.push_back(new pg_log_entry_t(MODIFY, oid, eversion_t(1,2), eversion_t(3,4),
1, osd_reqid_t(entity_name_t::CLIENT(777), 8, 999),
utime_t(8,9), 0));
o.push_back(new pg_log_entry_t(ERROR, oid, eversion_t(1,2), eversion_t(3,4),
1, osd_reqid_t(entity_name_t::CLIENT(777), 8, 999),
utime_t(8,9), -ENOENT));
}
ostream& operator<<(ostream& out, const pg_log_entry_t& e)
{
out << e.version << " (" << e.prior_version << ") "
<< std::left << std::setw(8) << e.get_op_name() << ' '
<< e.soid << " by " << e.reqid << " " << e.mtime
<< " " << e.return_code;
if (!e.op_returns.empty()) {
out << " " << e.op_returns;
}
if (e.snaps.length()) {
vector<snapid_t> snaps;
ceph::buffer::list c = e.snaps;
auto p = c.cbegin();
try {
decode(snaps, p);
} catch (...) {
snaps.clear();
}
out << " snaps " << snaps;
}
out << " ObjectCleanRegions " << e.clean_regions;
return out;
}
// -- pg_log_dup_t --
std::string pg_log_dup_t::get_key_name() const
{
static const char prefix[] = "dup_";
std::string key(36, ' ');
memcpy(&key[0], prefix, 4);
version.get_key_name(&key[4]);
key.resize(35); // remove the null terminator
return key;
}
void pg_log_dup_t::encode(ceph::buffer::list &bl) const
{
ENCODE_START(2, 1, bl);
encode(reqid, bl);
encode(version, bl);
encode(user_version, bl);
encode(return_code, bl);
encode(op_returns, bl);
ENCODE_FINISH(bl);
}
void pg_log_dup_t::decode(ceph::buffer::list::const_iterator &bl)
{
DECODE_START(2, bl);
decode(reqid, bl);
decode(version, bl);
decode(user_version, bl);
decode(return_code, bl);
if (struct_v >= 2) {
decode(op_returns, bl);
}
DECODE_FINISH(bl);
}
void pg_log_dup_t::dump(Formatter *f) const
{
f->dump_stream("reqid") << reqid;
f->dump_stream("version") << version;
f->dump_stream("user_version") << user_version;
f->dump_stream("return_code") << return_code;
if (!op_returns.empty()) {
f->open_array_section("op_returns");
for (auto& i : op_returns) {
f->dump_object("op", i);
}
f->close_section();
}
}
void pg_log_dup_t::generate_test_instances(list<pg_log_dup_t*>& o)
{
o.push_back(new pg_log_dup_t());
o.push_back(new pg_log_dup_t(eversion_t(1,2),
1,
osd_reqid_t(entity_name_t::CLIENT(777), 8, 999),
0));
o.push_back(new pg_log_dup_t(eversion_t(1,2),
2,
osd_reqid_t(entity_name_t::CLIENT(777), 8, 999),
-ENOENT));
}
std::ostream& operator<<(std::ostream& out, const pg_log_dup_t& e) {
out << "log_dup(reqid=" << e.reqid <<
" v=" << e.version << " uv=" << e.user_version <<
" rc=" << e.return_code;
if (!e.op_returns.empty()) {
out << " " << e.op_returns;
}
return out << ")";
}
// -- pg_log_t --
// out: pg_log_t that only has entries that apply to import_pgid using curmap
// reject: Entries rejected from "in" are in the reject.log. Other fields not set.
void pg_log_t::filter_log(spg_t import_pgid, const OSDMap &curmap,
const string &hit_set_namespace, const pg_log_t &in,
pg_log_t &out, pg_log_t &reject)
{
out = in;
out.log.clear();
reject.log.clear();
for (auto i = in.log.cbegin(); i != in.log.cend(); ++i) {
// Reject pg log entries for temporary objects
if (i->soid.is_temp()) {
reject.log.push_back(*i);
continue;
}
if (i->soid.nspace != hit_set_namespace) {
object_t oid = i->soid.oid;
object_locator_t loc(i->soid);
pg_t raw_pgid = curmap.object_locator_to_pg(oid, loc);
pg_t pgid = curmap.raw_pg_to_pg(raw_pgid);
if (import_pgid.pgid == pgid) {
out.log.push_back(*i);
} else {
reject.log.push_back(*i);
}
} else {
out.log.push_back(*i);
}
}
}
void pg_log_t::encode(ceph::buffer::list& bl) const
{
ENCODE_START(7, 3, bl);
encode(head, bl);
encode(tail, bl);
encode(log, bl);
encode(can_rollback_to, bl);
encode(rollback_info_trimmed_to, bl);
encode(dups, bl);
ENCODE_FINISH(bl);
}
void pg_log_t::decode(ceph::buffer::list::const_iterator &bl, int64_t pool)
{
DECODE_START_LEGACY_COMPAT_LEN(7, 3, 3, bl);
decode(head, bl);
decode(tail, bl);
if (struct_v < 2) {
bool backlog;
decode(backlog, bl);
}
decode(log, bl);
if (struct_v >= 5)
decode(can_rollback_to, bl);
if (struct_v >= 6)
decode(rollback_info_trimmed_to, bl);
else
rollback_info_trimmed_to = tail;
if (struct_v >= 7)
decode(dups, bl);
DECODE_FINISH(bl);
// handle hobject_t format change
if (struct_v < 4) {
for (auto i = log.begin(); i != log.end(); ++i) {
if (!i->soid.is_max() && i->soid.pool == -1)
i->soid.pool = pool;
}
}
}
void pg_log_t::dump(Formatter *f) const
{
f->dump_stream("head") << head;
f->dump_stream("tail") << tail;
f->open_array_section("log");
for (auto p = log.cbegin(); p != log.cend(); ++p) {
f->open_object_section("entry");
p->dump(f);
f->close_section();
}
f->close_section();
f->open_array_section("dups");
for (const auto& entry : dups) {
f->open_object_section("entry");
entry.dump(f);
f->close_section();
}
f->close_section();
}
void pg_log_t::generate_test_instances(list<pg_log_t*>& o)
{
o.push_back(new pg_log_t);
// this is nonsensical:
o.push_back(new pg_log_t);
o.back()->head = eversion_t(1,2);
o.back()->tail = eversion_t(3,4);
list<pg_log_entry_t*> e;
pg_log_entry_t::generate_test_instances(e);
for (auto p = e.begin(); p != e.end(); ++p)
o.back()->log.push_back(**p);
}
static void _handle_dups(CephContext* cct, pg_log_t &target, const pg_log_t &other, unsigned maxdups)
{
auto earliest_dup_version =
target.head.version < maxdups ? 0u : target.head.version - maxdups + 1;
lgeneric_subdout(cct, osd, 20) << __func__ << " earliest_dup_version "
<< earliest_dup_version << dendl;
for (auto d = other.dups.cbegin(); d != other.dups.cend(); ++d) {
if (d->version.version >= earliest_dup_version) {
lgeneric_subdout(cct, osd, 20)
<< "copy_up_to/copy_after copy dup version "
<< d->version << dendl;
target.dups.push_back(pg_log_dup_t(*d));
}
}
for (auto i = other.log.cbegin(); i != other.log.cend(); ++i) {
ceph_assert(i->version > other.tail);
if (i->version > target.tail)
break;
if (i->version.version >= earliest_dup_version) {
lgeneric_subdout(cct, osd, 20)
<< "copy_up_to/copy_after copy dup from log version "
<< i->version << dendl;
target.dups.push_back(pg_log_dup_t(*i));
}
}
}
void pg_log_t::copy_after(CephContext* cct, const pg_log_t &other, eversion_t v)
{
can_rollback_to = other.can_rollback_to;
head = other.head;
tail = other.tail;
lgeneric_subdout(cct, osd, 20) << __func__ << " v " << v
<< " dups.size()=" << dups.size()
<< " other.dups.size()=" << other.dups.size() << dendl;
for (auto i = other.log.crbegin(); i != other.log.crend(); ++i) {
ceph_assert(i->version > other.tail);
if (i->version <= v) {
// make tail accurate.
tail = i->version;
break;
}
lgeneric_subdout(cct, osd, 20) << __func__ << " copy log version " << i->version << dendl;
log.push_front(*i);
}
_handle_dups(cct, *this, other, cct->_conf->osd_pg_log_dups_tracked);
lgeneric_subdout(cct, osd, 20) << __func__ << " END v " << v
<< " dups.size()=" << dups.size()
<< " other.dups.size()=" << other.dups.size() << dendl;
}
void pg_log_t::copy_up_to(CephContext* cct, const pg_log_t &other, int max)
{
can_rollback_to = other.can_rollback_to;
int n = 0;
head = other.head;
tail = other.tail;
lgeneric_subdout(cct, osd, 20) << __func__ << " max " << max
<< " dups.size()=" << dups.size()
<< " other.dups.size()=" << other.dups.size() << dendl;
for (auto i = other.log.crbegin(); i != other.log.crend(); ++i) {
ceph_assert(i->version > other.tail);
if (n++ >= max) {
tail = i->version;
break;
}
lgeneric_subdout(cct, osd, 20) << __func__ << " copy log version " << i->version << dendl;
log.push_front(*i);
}
_handle_dups(cct, *this, other, cct->_conf->osd_pg_log_dups_tracked);
lgeneric_subdout(cct, osd, 20) << __func__ << " END max " << max
<< " dups.size()=" << dups.size()
<< " other.dups.size()=" << other.dups.size() << dendl;
}
ostream& pg_log_t::print(ostream& out) const
{
out << *this << std::endl;
for (auto p = log.cbegin(); p != log.cend(); ++p)
out << *p << std::endl;
for (const auto& entry : dups) {
out << " dup entry: " << entry << std::endl;
}
return out;
}
// -- pg_missing_t --
ostream& operator<<(ostream& out, const pg_missing_item& i)
{
out << i.need;
if (i.have != eversion_t())
out << "(" << i.have << ")";
out << " flags = " << i.flag_str()
<< " " << i.clean_regions;
return out;
}
// -- object_copy_cursor_t --
void object_copy_cursor_t::encode(ceph::buffer::list& bl) const
{
ENCODE_START(1, 1, bl);
encode(attr_complete, bl);
encode(data_offset, bl);
encode(data_complete, bl);
encode(omap_offset, bl);
encode(omap_complete, bl);
ENCODE_FINISH(bl);
}
void object_copy_cursor_t::decode(ceph::buffer::list::const_iterator &bl)
{
DECODE_START(1, bl);
decode(attr_complete, bl);
decode(data_offset, bl);
decode(data_complete, bl);
decode(omap_offset, bl);
decode(omap_complete, bl);
DECODE_FINISH(bl);
}
void object_copy_cursor_t::dump(Formatter *f) const
{
f->dump_unsigned("attr_complete", (int)attr_complete);
f->dump_unsigned("data_offset", data_offset);
f->dump_unsigned("data_complete", (int)data_complete);
f->dump_string("omap_offset", omap_offset);
f->dump_unsigned("omap_complete", (int)omap_complete);
}
void object_copy_cursor_t::generate_test_instances(list<object_copy_cursor_t*>& o)
{
o.push_back(new object_copy_cursor_t);
o.push_back(new object_copy_cursor_t);
o.back()->attr_complete = true;
o.back()->data_offset = 123;
o.push_back(new object_copy_cursor_t);
o.back()->attr_complete = true;
o.back()->data_complete = true;
o.back()->omap_offset = "foo";
o.push_back(new object_copy_cursor_t);
o.back()->attr_complete = true;
o.back()->data_complete = true;
o.back()->omap_complete = true;
}
// -- object_copy_data_t --
void object_copy_data_t::encode(ceph::buffer::list& bl, uint64_t features) const
{
ENCODE_START(8, 5, bl);
encode(size, bl);
encode(mtime, bl);
encode(attrs, bl);
encode(data, bl);
encode(omap_data, bl);
encode(cursor, bl);
encode(omap_header, bl);
encode(snaps, bl);
encode(snap_seq, bl);
encode(flags, bl);
encode(data_digest, bl);
encode(omap_digest, bl);
encode(reqids, bl);
encode(truncate_seq, bl);
encode(truncate_size, bl);
encode(reqid_return_codes, bl);
ENCODE_FINISH(bl);
}
void object_copy_data_t::decode(ceph::buffer::list::const_iterator& bl)
{
DECODE_START(8, bl);
if (struct_v < 5) {
// old
decode(size, bl);
decode(mtime, bl);
{
string category;
decode(category, bl); // no longer used
}
decode(attrs, bl);
decode(data, bl);
{
map<string,ceph::buffer::list> omap;
decode(omap, bl);
omap_data.clear();
if (!omap.empty()) {
using ceph::encode;
encode(omap, omap_data);
}
}
decode(cursor, bl);
if (struct_v >= 2)
decode(omap_header, bl);
if (struct_v >= 3) {
decode(snaps, bl);
decode(snap_seq, bl);
} else {
snaps.clear();
snap_seq = 0;
}
if (struct_v >= 4) {
decode(flags, bl);
decode(data_digest, bl);
decode(omap_digest, bl);
}
} else {
// current
decode(size, bl);
decode(mtime, bl);
decode(attrs, bl);
decode(data, bl);
decode(omap_data, bl);
decode(cursor, bl);
decode(omap_header, bl);
decode(snaps, bl);
decode(snap_seq, bl);
if (struct_v >= 4) {
decode(flags, bl);
decode(data_digest, bl);
decode(omap_digest, bl);
}
if (struct_v >= 6) {
decode(reqids, bl);
}
if (struct_v >= 7) {
decode(truncate_seq, bl);
decode(truncate_size, bl);
}
if (struct_v >= 8) {
decode(reqid_return_codes, bl);
}
}
DECODE_FINISH(bl);
}
void object_copy_data_t::generate_test_instances(list<object_copy_data_t*>& o)
{
o.push_back(new object_copy_data_t());
list<object_copy_cursor_t*> cursors;
object_copy_cursor_t::generate_test_instances(cursors);
auto ci = cursors.begin();
o.back()->cursor = **(ci++);
o.push_back(new object_copy_data_t());
o.back()->cursor = **(ci++);
o.push_back(new object_copy_data_t());
o.back()->size = 1234;
o.back()->mtime.set_from_double(1234);
ceph::buffer::ptr bp("there", 5);
ceph::buffer::list bl;
bl.push_back(bp);
o.back()->attrs["hello"] = bl;
ceph::buffer::ptr bp2("not", 3);
ceph::buffer::list bl2;
bl2.push_back(bp2);
map<string,ceph::buffer::list> omap;
omap["why"] = bl2;
using ceph::encode;
encode(omap, o.back()->omap_data);
ceph::buffer::ptr databp("iamsomedatatocontain", 20);
o.back()->data.push_back(databp);
o.back()->omap_header.append("this is an omap header");
o.back()->snaps.push_back(123);
o.back()->reqids.push_back(make_pair(osd_reqid_t(), version_t()));
}
void object_copy_data_t::dump(Formatter *f) const
{
f->open_object_section("cursor");
cursor.dump(f);
f->close_section(); // cursor
f->dump_int("size", size);
f->dump_stream("mtime") << mtime;
/* we should really print out the attrs here, but ceph::buffer::list
const-correctness prevents that */
f->dump_int("attrs_size", attrs.size());
f->dump_int("flags", flags);
f->dump_unsigned("data_digest", data_digest);
f->dump_unsigned("omap_digest", omap_digest);
f->dump_int("omap_data_length", omap_data.length());
f->dump_int("omap_header_length", omap_header.length());
f->dump_int("data_length", data.length());
f->open_array_section("snaps");
for (auto p = snaps.cbegin(); p != snaps.cend(); ++p)
f->dump_unsigned("snap", *p);
f->close_section();
f->open_array_section("reqids");
uint32_t idx = 0;
for (auto p = reqids.begin();
p != reqids.end();
++idx, ++p) {
f->open_object_section("extra_reqid");
f->dump_stream("reqid") << p->first;
f->dump_stream("user_version") << p->second;
auto it = reqid_return_codes.find(idx);
if (it != reqid_return_codes.end()) {
f->dump_int("return_code", it->second);
}
f->close_section();
}
f->close_section();
}
// -- pg_create_t --
void pg_create_t::encode(ceph::buffer::list &bl) const
{
ENCODE_START(1, 1, bl);
encode(created, bl);
encode(parent, bl);
encode(split_bits, bl);
ENCODE_FINISH(bl);
}
void pg_create_t::decode(ceph::buffer::list::const_iterator &bl)
{
DECODE_START(1, bl);
decode(created, bl);
decode(parent, bl);
decode(split_bits, bl);
DECODE_FINISH(bl);
}
void pg_create_t::dump(Formatter *f) const
{
f->dump_unsigned("created", created);
f->dump_stream("parent") << parent;
f->dump_int("split_bits", split_bits);
}
void pg_create_t::generate_test_instances(list<pg_create_t*>& o)
{
o.push_back(new pg_create_t);
o.push_back(new pg_create_t(1, pg_t(3, 4), 2));
}
// -- pg_hit_set_info_t --
void pg_hit_set_info_t::encode(ceph::buffer::list& bl) const
{
ENCODE_START(2, 1, bl);
encode(begin, bl);
encode(end, bl);
encode(version, bl);
encode(using_gmt, bl);
ENCODE_FINISH(bl);
}
void pg_hit_set_info_t::decode(ceph::buffer::list::const_iterator& p)
{
DECODE_START(2, p);
decode(begin, p);
decode(end, p);
decode(version, p);
if (struct_v >= 2) {
decode(using_gmt, p);
} else {
using_gmt = false;
}
DECODE_FINISH(p);
}
void pg_hit_set_info_t::dump(Formatter *f) const
{
f->dump_stream("begin") << begin;
f->dump_stream("end") << end;
f->dump_stream("version") << version;
f->dump_stream("using_gmt") << using_gmt;
}
void pg_hit_set_info_t::generate_test_instances(list<pg_hit_set_info_t*>& ls)
{
ls.push_back(new pg_hit_set_info_t);
ls.push_back(new pg_hit_set_info_t);
ls.back()->begin = utime_t(1, 2);
ls.back()->end = utime_t(3, 4);
}
// -- pg_hit_set_history_t --
void pg_hit_set_history_t::encode(ceph::buffer::list& bl) const
{
ENCODE_START(1, 1, bl);
encode(current_last_update, bl);
{
utime_t dummy_stamp;
encode(dummy_stamp, bl);
}
{
pg_hit_set_info_t dummy_info;
encode(dummy_info, bl);
}
encode(history, bl);
ENCODE_FINISH(bl);
}
void pg_hit_set_history_t::decode(ceph::buffer::list::const_iterator& p)
{
DECODE_START(1, p);
decode(current_last_update, p);
{
utime_t dummy_stamp;
decode(dummy_stamp, p);
}
{
pg_hit_set_info_t dummy_info;
decode(dummy_info, p);
}
decode(history, p);
DECODE_FINISH(p);
}
void pg_hit_set_history_t::dump(Formatter *f) const
{
f->dump_stream("current_last_update") << current_last_update;
f->open_array_section("history");
for (auto p = history.cbegin(); p != history.cend(); ++p) {
f->open_object_section("info");
p->dump(f);
f->close_section();
}
f->close_section();
}
void pg_hit_set_history_t::generate_test_instances(list<pg_hit_set_history_t*>& ls)
{
ls.push_back(new pg_hit_set_history_t);
ls.push_back(new pg_hit_set_history_t);
ls.back()->current_last_update = eversion_t(1, 2);
ls.back()->history.push_back(pg_hit_set_info_t());
}
// -- OSDSuperblock --
void OSDSuperblock::encode(ceph::buffer::list &bl) const
{
ENCODE_START(10, 5, bl);
encode(cluster_fsid, bl);
encode(whoami, bl);
encode(current_epoch, bl);
encode(oldest_map, bl);
encode(newest_map, bl);
encode(weight, bl);
compat_features.encode(bl);
encode(clean_thru, bl);
encode(mounted, bl);
encode(osd_fsid, bl);
encode((epoch_t)0, bl); // epoch_t last_epoch_marked_full
encode((uint32_t)0, bl); // map<int64_t,epoch_t> pool_last_epoch_marked_full
encode(purged_snaps_last, bl);
encode(last_purged_snaps_scrub, bl);
encode(cluster_osdmap_trim_lower_bound, bl);
ENCODE_FINISH(bl);
}
void OSDSuperblock::decode(ceph::buffer::list::const_iterator &bl)
{
DECODE_START_LEGACY_COMPAT_LEN(10, 5, 5, bl);
if (struct_v < 3) {
string magic;
decode(magic, bl);
}
decode(cluster_fsid, bl);
decode(whoami, bl);
decode(current_epoch, bl);
decode(oldest_map, bl);
decode(newest_map, bl);
decode(weight, bl);
if (struct_v >= 2) {
compat_features.decode(bl);
} else { //upgrade it!
compat_features.incompat.insert(CEPH_OSD_FEATURE_INCOMPAT_BASE);
}
decode(clean_thru, bl);
decode(mounted, bl);
if (struct_v >= 4)
decode(osd_fsid, bl);
if (struct_v >= 6) {
epoch_t last_map_marked_full;
decode(last_map_marked_full, bl);
}
if (struct_v >= 7) {
map<int64_t,epoch_t> pool_last_map_marked_full;
decode(pool_last_map_marked_full, bl);
}
if (struct_v >= 9) {
decode(purged_snaps_last, bl);
decode(last_purged_snaps_scrub, bl);
} else {
purged_snaps_last = 0;
}
if (struct_v >= 10) {
decode(cluster_osdmap_trim_lower_bound, bl);
} else {
cluster_osdmap_trim_lower_bound = 0;
}
DECODE_FINISH(bl);
}
void OSDSuperblock::dump(Formatter *f) const
{
f->dump_stream("cluster_fsid") << cluster_fsid;
f->dump_stream("osd_fsid") << osd_fsid;
f->dump_int("whoami", whoami);
f->dump_int("current_epoch", current_epoch);
f->dump_int("oldest_map", oldest_map);
f->dump_int("newest_map", newest_map);
f->dump_float("weight", weight);
f->open_object_section("compat");
compat_features.dump(f);
f->close_section();
f->dump_int("clean_thru", clean_thru);
f->dump_int("last_epoch_mounted", mounted);
f->dump_unsigned("purged_snaps_last", purged_snaps_last);
f->dump_stream("last_purged_snaps_scrub") << last_purged_snaps_scrub;
f->dump_int("cluster_osdmap_trim_lower_bound",
cluster_osdmap_trim_lower_bound);
}
void OSDSuperblock::generate_test_instances(list<OSDSuperblock*>& o)
{
OSDSuperblock z;
o.push_back(new OSDSuperblock(z));
z.cluster_fsid.parse("01010101-0101-0101-0101-010101010101");
z.osd_fsid.parse("02020202-0202-0202-0202-020202020202");
z.whoami = 3;
z.current_epoch = 4;
z.oldest_map = 5;
z.newest_map = 9;
z.mounted = 8;
z.clean_thru = 7;
o.push_back(new OSDSuperblock(z));
o.push_back(new OSDSuperblock(z));
}
// -- SnapSet --
void SnapSet::encode(ceph::buffer::list& bl) const
{
ENCODE_START(3, 2, bl);
encode(seq, bl);
encode(true, bl); // head_exists
encode(snaps, bl);
encode(clones, bl);
encode(clone_overlap, bl);
encode(clone_size, bl);
encode(clone_snaps, bl);
ENCODE_FINISH(bl);
}
void SnapSet::decode(ceph::buffer::list::const_iterator& bl)
{
DECODE_START_LEGACY_COMPAT_LEN(3, 2, 2, bl);
decode(seq, bl);
bl += 1u; // skip legacy head_exists (always true)
decode(snaps, bl);
decode(clones, bl);
decode(clone_overlap, bl);
decode(clone_size, bl);
if (struct_v >= 3) {
decode(clone_snaps, bl);
} else {
clone_snaps.clear();
}
DECODE_FINISH(bl);
}
void SnapSet::dump(Formatter *f) const
{
f->dump_unsigned("seq", seq);
f->open_array_section("clones");
for (auto p = clones.cbegin(); p != clones.cend(); ++p) {
f->open_object_section("clone");
f->dump_unsigned("snap", *p);
auto cs = clone_size.find(*p);
if (cs != clone_size.end())
f->dump_unsigned("size", cs->second);
else
f->dump_string("size", "????");
auto co = clone_overlap.find(*p);
if (co != clone_overlap.end())
f->dump_stream("overlap") << co->second;
else
f->dump_stream("overlap") << "????";
auto q = clone_snaps.find(*p);
if (q != clone_snaps.end()) {
f->open_array_section("snaps");
for (auto s : q->second) {
f->dump_unsigned("snap", s);
}
f->close_section();
}
f->close_section();
}
f->close_section();
}
void SnapSet::generate_test_instances(list<SnapSet*>& o)
{
o.push_back(new SnapSet);
o.push_back(new SnapSet);
o.back()->seq = 123;
o.back()->snaps.push_back(123);
o.back()->snaps.push_back(12);
o.push_back(new SnapSet);
o.back()->seq = 123;
o.back()->snaps.push_back(123);
o.back()->snaps.push_back(12);
o.back()->clones.push_back(12);
o.back()->clone_size[12] = 12345;
o.back()->clone_overlap[12];
o.back()->clone_snaps[12] = {12, 10, 8};
}
ostream& operator<<(ostream& out, const SnapSet& cs)
{
return out << cs.seq << "=" << cs.snaps << ":"
<< cs.clone_snaps;
}
void SnapSet::from_snap_set(const librados::snap_set_t& ss, bool legacy)
{
// NOTE: our reconstruction of snaps (and the snapc) is not strictly
// correct: it will not include snaps that still logically exist
// but for which there was no clone that is defined. For all
// practical purposes this doesn't matter, since we only use that
// information to clone on the OSD, and we have already moved
// forward past that part of the object history.
seq = ss.seq;
set<snapid_t> _snaps;
set<snapid_t> _clones;
for (auto p = ss.clones.cbegin(); p != ss.clones.cend(); ++p) {
if (p->cloneid != librados::SNAP_HEAD) {
_clones.insert(p->cloneid);
_snaps.insert(p->snaps.begin(), p->snaps.end());
clone_size[p->cloneid] = p->size;
clone_overlap[p->cloneid]; // the entry must exist, even if it's empty.
for (auto q = p->overlap.cbegin(); q != p->overlap.cend(); ++q)
clone_overlap[p->cloneid].insert(q->first, q->second);
if (!legacy) {
// p->snaps is ascending; clone_snaps is descending
vector<snapid_t>& v = clone_snaps[p->cloneid];
for (auto q = p->snaps.rbegin(); q != p->snaps.rend(); ++q) {
v.push_back(*q);
}
}
}
}
// ascending
clones.clear();
clones.reserve(_clones.size());
for (auto p = _clones.begin(); p != _clones.end(); ++p)
clones.push_back(*p);
// descending
snaps.clear();
snaps.reserve(_snaps.size());
for (auto p = _snaps.rbegin();
p != _snaps.rend(); ++p)
snaps.push_back(*p);
}
uint64_t SnapSet::get_clone_bytes(snapid_t clone) const
{
ceph_assert(clone_size.count(clone));
uint64_t size = clone_size.find(clone)->second;
ceph_assert(clone_overlap.count(clone));
const interval_set<uint64_t> &overlap = clone_overlap.find(clone)->second;
ceph_assert(size >= (uint64_t)overlap.size());
return size - overlap.size();
}
void SnapSet::filter(const pg_pool_t &pinfo)
{
vector<snapid_t> oldsnaps;
oldsnaps.swap(snaps);
for (auto i = oldsnaps.cbegin(); i != oldsnaps.cend(); ++i) {
if (!pinfo.is_removed_snap(*i))
snaps.push_back(*i);
}
}
SnapSet SnapSet::get_filtered(const pg_pool_t &pinfo) const
{
SnapSet ss = *this;
ss.filter(pinfo);
return ss;
}
// -- watch_info_t --
void watch_info_t::encode(ceph::buffer::list& bl, uint64_t features) const
{
ENCODE_START(4, 3, bl);
encode(cookie, bl);
encode(timeout_seconds, bl);
encode(addr, bl, features);
ENCODE_FINISH(bl);
}
void watch_info_t::decode(ceph::buffer::list::const_iterator& bl)
{
DECODE_START_LEGACY_COMPAT_LEN(4, 3, 3, bl);
decode(cookie, bl);
if (struct_v < 2) {
uint64_t ver;
decode(ver, bl);
}
decode(timeout_seconds, bl);
if (struct_v >= 4) {
decode(addr, bl);
}
DECODE_FINISH(bl);
}
void watch_info_t::dump(Formatter *f) const
{
f->dump_unsigned("cookie", cookie);
f->dump_unsigned("timeout_seconds", timeout_seconds);
f->open_object_section("addr");
addr.dump(f);
f->close_section();
}
void watch_info_t::generate_test_instances(list<watch_info_t*>& o)
{
o.push_back(new watch_info_t);
o.push_back(new watch_info_t);
o.back()->cookie = 123;
o.back()->timeout_seconds = 99;
entity_addr_t ea;
ea.set_type(entity_addr_t::TYPE_LEGACY);
ea.set_nonce(1);
ea.set_family(AF_INET);
ea.set_in4_quad(0, 127);
ea.set_in4_quad(1, 0);
ea.set_in4_quad(2, 1);
ea.set_in4_quad(3, 2);
ea.set_port(2);
o.back()->addr = ea;
}
// -- chunk_info_t --
void chunk_info_t::encode(ceph::buffer::list& bl) const
{
ENCODE_START(1, 1, bl);
encode(offset, bl);
encode(length, bl);
encode(oid, bl);
__u32 _flags = flags;
encode(_flags, bl);
ENCODE_FINISH(bl);
}
void chunk_info_t::decode(ceph::buffer::list::const_iterator& bl)
{
DECODE_START(1, bl);
decode(offset, bl);
decode(length, bl);
decode(oid, bl);
__u32 _flags;
decode(_flags, bl);
flags = (cflag_t)_flags;
DECODE_FINISH(bl);
}
void chunk_info_t::dump(Formatter *f) const
{
f->dump_unsigned("length", length);
f->open_object_section("oid");
oid.dump(f);
f->close_section();
f->dump_unsigned("flags", flags);
}
bool chunk_info_t::operator==(const chunk_info_t& cit) const
{
if (has_fingerprint()) {
if (oid.oid.name == cit.oid.oid.name) {
return true;
}
} else {
if (offset == cit.offset && length == cit.length &&
oid.oid.name == cit.oid.oid.name) {
return true;
}
}
return false;
}
bool operator==(const std::pair<const long unsigned int, chunk_info_t> & l,
const std::pair<const long unsigned int, chunk_info_t> & r)
{
return l.first == r.first &&
l.second == r.second;
}
ostream& operator<<(ostream& out, const chunk_info_t& ci)
{
return out << "(len: " << ci.length << " oid: " << ci.oid
<< " offset: " << ci.offset
<< " flags: " << ci.get_flag_string(ci.flags) << ")";
}
// -- object_manifest_t --
std::ostream& operator<<(std::ostream& out, const object_ref_delta_t & ci)
{
return out << ci.ref_delta << std::endl;
}
void object_manifest_t::calc_refs_to_inc_on_set(
const object_manifest_t* _g,
const object_manifest_t* _l,
object_ref_delta_t &refs) const
{
/* avoid to increment the same reference on adjacent clones */
auto iter = chunk_map.begin();
auto find_chunk = [](decltype(iter) &i, const object_manifest_t* cur)
-> bool {
if (cur) {
auto c = cur->chunk_map.find(i->first);
if (c != cur->chunk_map.end() && c->second == i->second) {
return true;
}
}
return false;
};
/* If at least a same chunk exists on either _g or _l, do not increment
* the reference
*
* head: [0, 2) ccc, [6, 2) bbb, [8, 2) ccc
* 20: [0, 2) aaa, <- set_chunk
* 30: [0, 2) abc, [6, 2) bbb, [8, 2) ccc
* --> incremnt the reference
*
* head: [0, 2) ccc, [6, 2) bbb, [8, 2) ccc
* 20: [0, 2) ccc, <- set_chunk
* 30: [0, 2) abc, [6, 2) bbb, [8, 2) ccc
* --> do not need to increment
*
* head: [0, 2) ccc, [6, 2) bbb, [8, 2) ccc
* 20: [0, 2) ccc, <- set_chunk
* 30: [0, 2) ccc, [6, 2) bbb, [8, 2) ccc
* --> decrement the reference of ccc
*
*/
for (; iter != chunk_map.end(); ++iter) {
auto found_g = find_chunk(iter, _g);
auto found_l = find_chunk(iter, _l);
if (!found_g && !found_l) {
refs.inc_ref(iter->second.oid);
} else if (found_g && found_l) {
refs.dec_ref(iter->second.oid);
}
}
}
void object_manifest_t::calc_refs_to_drop_on_modify(
const object_manifest_t* _l,
const ObjectCleanRegions& clean_regions,
object_ref_delta_t &refs) const
{
for (auto &p : chunk_map) {
if (!clean_regions.is_clean_region(p.first, p.second.length)) {
// has previous snapshot
if (_l) {
/*
* Let's assume that there is a manifest snapshotted object which has three chunks
* head: [0, 2) aaa, [6, 2) bbb, [8, 2) ccc
* 20: [0, 2) aaa, [6, 2) bbb, [8, 2) ccc
*
* If we modify [6, 2) at head, we shouldn't decrement bbb's refcount because
* 20 has the reference for bbb. Therefore, we only drop the reference if two chunks
* (head: [6, 2) and 20: [6, 2)) are different.
*
*/
auto c = _l->chunk_map.find(p.first);
if (c != _l->chunk_map.end()) {
if (p.second == c->second) {
continue;
}
}
refs.dec_ref(p.second.oid);
} else {
// decrement the reference of the updated chunks if the manifest object has no snapshot
refs.dec_ref(p.second.oid);
}
}
}
}
void object_manifest_t::calc_refs_to_drop_on_removal(
const object_manifest_t* _g,
const object_manifest_t* _l,
object_ref_delta_t &refs) const
{
/* At a high level, the rule is that consecutive clones with the same reference
* at the same offset share a reference. As such, removing *this may result
* in removing references in two cases:
* 1) *this has a reference which it shares with neither _g nor _l
* 2) _g and _l have a reference which they share with each other but not
* *this.
*
* For a particular offset, both 1 and 2 can happen.
*
* Notably, this means that to evaluate the reference change from removing
* the object with *this, we only need to look at the two adjacent clones.
*/
// Paper over possibly missing _g or _l -- nullopt is semantically the same
// as an empty chunk_map
static const object_manifest_t empty;
const object_manifest_t &g = _g ? *_g : empty;
const object_manifest_t &l = _l ? *_l : empty;
auto giter = g.chunk_map.begin();
auto iter = chunk_map.begin();
auto liter = l.chunk_map.begin();
// Translate iter, map pair to the current offset, end() -> max
auto get_offset = [](decltype(iter) &i, const object_manifest_t &manifest)
-> uint64_t {
return i == manifest.chunk_map.end() ?
std::numeric_limits<uint64_t>::max() : i->first;
};
/* If current matches the offset at iter, returns the chunk at *iter
* and increments iter. Otherwise, returns nullptr.
*
* current will always be derived from the min of *giter, *iter, and
* *liter on each cycle, so the result will be that each loop iteration
* will pick up all chunks at the offest being considered, each offset
* will be considered once, and all offsets will be considered.
*/
auto get_chunk = [](
uint64_t current, decltype(iter) &i, const object_manifest_t &manifest)
-> const chunk_info_t * {
if (i == manifest.chunk_map.end() || current != i->first) {
return nullptr;
} else {
return &(i++)->second;
}
};
while (giter != g.chunk_map.end() ||
iter != chunk_map.end() ||
liter != l.chunk_map.end()) {
auto current = std::min(
std::min(get_offset(giter, g), get_offset(iter, *this)),
get_offset(liter, l));
auto gchunk = get_chunk(current, giter, g);
auto chunk = get_chunk(current, iter, *this);
auto lchunk = get_chunk(current, liter, l);
if (gchunk && lchunk && *gchunk == *lchunk &&
(!chunk || *gchunk != *chunk)) {
// case 1 from above: l and g match, chunk does not
refs.dec_ref(gchunk->oid);
}
if (chunk &&
(!gchunk || chunk->oid != gchunk->oid) &&
(!lchunk || chunk->oid != lchunk->oid)) {
// case 2 from above: *this matches neither
refs.dec_ref(chunk->oid);
}
}
}
void object_manifest_t::encode(ceph::buffer::list& bl) const
{
ENCODE_START(1, 1, bl);
encode(type, bl);
switch (type) {
case TYPE_NONE: break;
case TYPE_REDIRECT:
encode(redirect_target, bl);
break;
case TYPE_CHUNKED:
encode(chunk_map, bl);
break;
default:
ceph_abort();
}
ENCODE_FINISH(bl);
}
void object_manifest_t::decode(ceph::buffer::list::const_iterator& bl)
{
DECODE_START(1, bl);
decode(type, bl);
switch (type) {
case TYPE_NONE: break;
case TYPE_REDIRECT:
decode(redirect_target, bl);
break;
case TYPE_CHUNKED:
decode(chunk_map, bl);
break;
default:
ceph_abort();
}
DECODE_FINISH(bl);
}
void object_manifest_t::dump(Formatter *f) const
{
f->dump_unsigned("type", type);
if (type == TYPE_REDIRECT) {
f->open_object_section("redirect_target");
redirect_target.dump(f);
f->close_section();
} else if (type == TYPE_CHUNKED) {
f->open_array_section("chunk_map");
for (auto& p : chunk_map) {
f->open_object_section("chunk");
f->dump_unsigned("offset", p.first);
p.second.dump(f);
f->close_section();
}
f->close_section();
}
}
void object_manifest_t::generate_test_instances(list<object_manifest_t*>& o)
{
o.push_back(new object_manifest_t());
o.back()->type = TYPE_REDIRECT;
}
ostream& operator<<(ostream& out, const object_manifest_t& om)
{
out << "manifest(" << om.get_type_name();
if (om.is_redirect()) {
out << " " << om.redirect_target;
} else if (om.is_chunked()) {
out << " " << om.chunk_map;
}
out << ")";
return out;
}
// -- object_info_t --
void object_info_t::copy_user_bits(const object_info_t& other)
{
// these bits are copied from head->clone.
size = other.size;
mtime = other.mtime;
local_mtime = other.local_mtime;
last_reqid = other.last_reqid;
truncate_seq = other.truncate_seq;
truncate_size = other.truncate_size;
flags = other.flags;
user_version = other.user_version;
data_digest = other.data_digest;
omap_digest = other.omap_digest;
}
void object_info_t::encode(ceph::buffer::list& bl, uint64_t features) const
{
object_locator_t myoloc(soid);
map<entity_name_t, watch_info_t> old_watchers;
for (auto i = watchers.cbegin(); i != watchers.cend(); ++i) {
old_watchers.insert(make_pair(i->first.second, i->second));
}
ENCODE_START(17, 8, bl);
encode(soid, bl);
encode(myoloc, bl); //Retained for compatibility
encode((__u32)0, bl); // was category, no longer used
encode(version, bl);
encode(prior_version, bl);
encode(last_reqid, bl);
encode(size, bl);
encode(mtime, bl);
if (soid.snap == CEPH_NOSNAP)
encode(osd_reqid_t(), bl); // used to be wrlock_by
else
encode((uint32_t)0, bl); // was legacy_snaps
encode(truncate_seq, bl);
encode(truncate_size, bl);
encode(is_lost(), bl);
encode(old_watchers, bl, features);
/* shenanigans to avoid breaking backwards compatibility in the disk format.
* When we can, switch this out for simply putting the version_t on disk. */
eversion_t user_eversion(0, user_version);
encode(user_eversion, bl);
encode(test_flag(FLAG_USES_TMAP), bl);
encode(watchers, bl, features);
__u32 _flags = flags;
encode(_flags, bl);
encode(local_mtime, bl);
encode(data_digest, bl);
encode(omap_digest, bl);
encode(expected_object_size, bl);
encode(expected_write_size, bl);
encode(alloc_hint_flags, bl);
if (has_manifest()) {
encode(manifest, bl);
}
ENCODE_FINISH(bl);
}
void object_info_t::decode(ceph::buffer::list::const_iterator& bl)
{
object_locator_t myoloc;
DECODE_START_LEGACY_COMPAT_LEN(17, 8, 8, bl);
map<entity_name_t, watch_info_t> old_watchers;
decode(soid, bl);
decode(myoloc, bl);
{
string category;
decode(category, bl); // no longer used
}
decode(version, bl);
decode(prior_version, bl);
decode(last_reqid, bl);
decode(size, bl);
decode(mtime, bl);
if (soid.snap == CEPH_NOSNAP) {
osd_reqid_t wrlock_by;
decode(wrlock_by, bl);
} else {
vector<snapid_t> legacy_snaps;
decode(legacy_snaps, bl);
}
decode(truncate_seq, bl);
decode(truncate_size, bl);
// if this is struct_v >= 13, we will overwrite this
// below since this field is just here for backwards
// compatibility
__u8 lo;
decode(lo, bl);
flags = (flag_t)lo;
decode(old_watchers, bl);
eversion_t user_eversion;
decode(user_eversion, bl);
user_version = user_eversion.version;
if (struct_v >= 9) {
bool uses_tmap = false;
decode(uses_tmap, bl);
if (uses_tmap)
set_flag(FLAG_USES_TMAP);
} else {
set_flag(FLAG_USES_TMAP);
}
if (struct_v < 10)
soid.pool = myoloc.pool;
if (struct_v >= 11) {
decode(watchers, bl);
} else {
for (auto i = old_watchers.begin(); i != old_watchers.end(); ++i) {
watchers.insert(
make_pair(
make_pair(i->second.cookie, i->first), i->second));
}
}
if (struct_v >= 13) {
__u32 _flags;
decode(_flags, bl);
flags = (flag_t)_flags;
}
if (struct_v >= 14) {
decode(local_mtime, bl);
} else {
local_mtime = utime_t();
}
if (struct_v >= 15) {
decode(data_digest, bl);
decode(omap_digest, bl);
} else {
data_digest = omap_digest = -1;
clear_flag(FLAG_DATA_DIGEST);
clear_flag(FLAG_OMAP_DIGEST);
}
if (struct_v >= 16) {
decode(expected_object_size, bl);
decode(expected_write_size, bl);
decode(alloc_hint_flags, bl);
} else {
expected_object_size = 0;
expected_write_size = 0;
alloc_hint_flags = 0;
}
if (struct_v >= 17) {
if (has_manifest()) {
decode(manifest, bl);
}
}
DECODE_FINISH(bl);
}
void object_info_t::dump(Formatter *f) const
{
f->open_object_section("oid");
soid.dump(f);
f->close_section();
f->dump_stream("version") << version;
f->dump_stream("prior_version") << prior_version;
f->dump_stream("last_reqid") << last_reqid;
f->dump_unsigned("user_version", user_version);
f->dump_unsigned("size", size);
f->dump_stream("mtime") << mtime;
f->dump_stream("local_mtime") << local_mtime;
f->dump_unsigned("lost", (int)is_lost());
vector<string> sv = get_flag_vector(flags);
f->open_array_section("flags");
for (const auto& str: sv) {
f->dump_string("flags", str);
}
f->close_section();
f->dump_unsigned("truncate_seq", truncate_seq);
f->dump_unsigned("truncate_size", truncate_size);
f->dump_format("data_digest", "0x%08x", data_digest);
f->dump_format("omap_digest", "0x%08x", omap_digest);
f->dump_unsigned("expected_object_size", expected_object_size);
f->dump_unsigned("expected_write_size", expected_write_size);
f->dump_unsigned("alloc_hint_flags", alloc_hint_flags);
f->dump_object("manifest", manifest);
f->open_object_section("watchers");
for (auto p = watchers.cbegin(); p != watchers.cend(); ++p) {
CachedStackStringStream css;
*css << p->first.second;
f->open_object_section(css->strv());
p->second.dump(f);
f->close_section();
}
f->close_section();
}
void object_info_t::generate_test_instances(list<object_info_t*>& o)
{
o.push_back(new object_info_t());
// fixme
}
ostream& operator<<(ostream& out, const object_info_t& oi)
{
out << oi.soid << "(" << oi.version
<< " " << oi.last_reqid;
if (oi.flags)
out << " " << oi.get_flag_string();
out << " s " << oi.size;
out << " uv " << oi.user_version;
if (oi.is_data_digest())
out << " dd " << std::hex << oi.data_digest << std::dec;
if (oi.is_omap_digest())
out << " od " << std::hex << oi.omap_digest << std::dec;
out << " alloc_hint [" << oi.expected_object_size
<< " " << oi.expected_write_size
<< " " << oi.alloc_hint_flags << "]";
if (oi.has_manifest())
out << " " << oi.manifest;
out << ")";
return out;
}
// -- ObjectRecovery --
void ObjectRecoveryProgress::encode(ceph::buffer::list &bl) const
{
ENCODE_START(1, 1, bl);
encode(first, bl);
encode(data_complete, bl);
encode(data_recovered_to, bl);
encode(omap_recovered_to, bl);
encode(omap_complete, bl);
ENCODE_FINISH(bl);
}
void ObjectRecoveryProgress::decode(ceph::buffer::list::const_iterator &bl)
{
DECODE_START(1, bl);
decode(first, bl);
decode(data_complete, bl);
decode(data_recovered_to, bl);
decode(omap_recovered_to, bl);
decode(omap_complete, bl);
DECODE_FINISH(bl);
}
ostream &operator<<(ostream &out, const ObjectRecoveryProgress &prog)
{
return prog.print(out);
}
void ObjectRecoveryProgress::generate_test_instances(
list<ObjectRecoveryProgress*>& o)
{
o.push_back(new ObjectRecoveryProgress);
o.back()->first = false;
o.back()->data_complete = true;
o.back()->omap_complete = true;
o.back()->data_recovered_to = 100;
o.push_back(new ObjectRecoveryProgress);
o.back()->first = true;
o.back()->data_complete = false;
o.back()->omap_complete = false;
o.back()->data_recovered_to = 0;
}
ostream &ObjectRecoveryProgress::print(ostream &out) const
{
return out << "ObjectRecoveryProgress("
<< ( first ? "" : "!" ) << "first, "
<< "data_recovered_to:" << data_recovered_to
<< ", data_complete:" << ( data_complete ? "true" : "false" )
<< ", omap_recovered_to:" << omap_recovered_to
<< ", omap_complete:" << ( omap_complete ? "true" : "false" )
<< ", error:" << ( error ? "true" : "false" )
<< ")";
}
void ObjectRecoveryProgress::dump(Formatter *f) const
{
f->dump_int("first?", first);
f->dump_int("data_complete?", data_complete);
f->dump_unsigned("data_recovered_to", data_recovered_to);
f->dump_int("omap_complete?", omap_complete);
f->dump_string("omap_recovered_to", omap_recovered_to);
}
void ObjectRecoveryInfo::encode(ceph::buffer::list &bl, uint64_t features) const
{
ENCODE_START(3, 1, bl);
encode(soid, bl);
encode(version, bl);
encode(size, bl);
encode(oi, bl, features);
encode(ss, bl);
encode(copy_subset, bl);
encode(clone_subset, bl);
encode(object_exist, bl);
ENCODE_FINISH(bl);
}
void ObjectRecoveryInfo::decode(ceph::buffer::list::const_iterator &bl,
int64_t pool)
{
DECODE_START(3, bl);
decode(soid, bl);
decode(version, bl);
decode(size, bl);
decode(oi, bl);
decode(ss, bl);
decode(copy_subset, bl);
decode(clone_subset, bl);
if (struct_v > 2)
decode(object_exist, bl);
else
object_exist = false;
DECODE_FINISH(bl);
if (struct_v < 2) {
if (!soid.is_max() && soid.pool == -1)
soid.pool = pool;
map<hobject_t, interval_set<uint64_t>> tmp;
tmp.swap(clone_subset);
for (auto i = tmp.begin(); i != tmp.end(); ++i) {
hobject_t first(i->first);
if (!first.is_max() && first.pool == -1)
first.pool = pool;
clone_subset[first].swap(i->second);
}
}
}
void ObjectRecoveryInfo::generate_test_instances(
list<ObjectRecoveryInfo*>& o)
{
o.push_back(new ObjectRecoveryInfo);
o.back()->soid = hobject_t(sobject_t("key", CEPH_NOSNAP));
o.back()->version = eversion_t(0,0);
o.back()->size = 100;
o.back()->object_exist = false;
}
void ObjectRecoveryInfo::dump(Formatter *f) const
{
f->dump_stream("object") << soid;
f->dump_stream("at_version") << version;
f->dump_stream("size") << size;
{
f->open_object_section("object_info");
oi.dump(f);
f->close_section();
}
{
f->open_object_section("snapset");
ss.dump(f);
f->close_section();
}
f->dump_stream("copy_subset") << copy_subset;
f->dump_stream("clone_subset") << clone_subset;
f->dump_stream("object_exist") << object_exist;
}
ostream& operator<<(ostream& out, const ObjectRecoveryInfo &inf)
{
return inf.print(out);
}
ostream &ObjectRecoveryInfo::print(ostream &out) const
{
return out << "ObjectRecoveryInfo("
<< soid << "@" << version
<< ", size: " << size
<< ", copy_subset: " << copy_subset
<< ", clone_subset: " << clone_subset
<< ", snapset: " << ss
<< ", object_exist: " << object_exist
<< ")";
}
// -- PushReplyOp --
void PushReplyOp::generate_test_instances(list<PushReplyOp*> &o)
{
o.push_back(new PushReplyOp);
o.push_back(new PushReplyOp);
o.back()->soid = hobject_t(sobject_t("asdf", 2));
o.push_back(new PushReplyOp);
o.back()->soid = hobject_t(sobject_t("asdf", CEPH_NOSNAP));
}
void PushReplyOp::encode(ceph::buffer::list &bl) const
{
ENCODE_START(1, 1, bl);
encode(soid, bl);
ENCODE_FINISH(bl);
}
void PushReplyOp::decode(ceph::buffer::list::const_iterator &bl)
{
DECODE_START(1, bl);
decode(soid, bl);
DECODE_FINISH(bl);
}
void PushReplyOp::dump(Formatter *f) const
{
f->dump_stream("soid") << soid;
}
ostream &PushReplyOp::print(ostream &out) const
{
return out
<< "PushReplyOp(" << soid
<< ")";
}
ostream& operator<<(ostream& out, const PushReplyOp &op)
{
return op.print(out);
}
uint64_t PushReplyOp::cost(CephContext *cct) const
{
if (cct->_conf->osd_op_queue == "mclock_scheduler") {
/* In general, we really never want to throttle PushReplyOp messages.
* As long as the object is smaller than osd_recovery_max_chunk (8M at
* time of writing this comment, so this is basically always true),
* processing the PushReplyOp does not cost any further IO and simply
* permits the object once more to be written to.
*
* In the unlikely event that the object is larger than
* osd_recovery_max_chunk (again, 8M at the moment, so never for common
* configurations of rbd and virtually never for cephfs and rgw),
* we *still* want to push out the next portion immediately so that we can
* release the object for IO.
*
* The throttling for this operation on the primary occurs at the point
* where we queue the PGRecoveryContext which calls into recover_missing
* and recover_backfill to initiate pushes.
* See OSD::queue_recovery_context.
*/
return 1;
} else {
/* We retain this legacy behavior for WeightedPriorityQueue. It seems to
* require very large costs for several messages in order to do any
* meaningful amount of throttling. This branch should be removed after
* Reef.
*/
return cct->_conf->osd_push_per_object_cost +
cct->_conf->osd_recovery_max_chunk;
}
}
// -- PullOp --
void PullOp::generate_test_instances(list<PullOp*> &o)
{
o.push_back(new PullOp);
o.push_back(new PullOp);
o.back()->soid = hobject_t(sobject_t("asdf", 2));
o.back()->recovery_info.version = eversion_t(3, 10);
o.push_back(new PullOp);
o.back()->soid = hobject_t(sobject_t("asdf", CEPH_NOSNAP));
o.back()->recovery_info.version = eversion_t(0, 0);
}
void PullOp::encode(ceph::buffer::list &bl, uint64_t features) const
{
ENCODE_START(1, 1, bl);
encode(soid, bl);
encode(recovery_info, bl, features);
encode(recovery_progress, bl);
ENCODE_FINISH(bl);
}
void PullOp::decode(ceph::buffer::list::const_iterator &bl)
{
DECODE_START(1, bl);
decode(soid, bl);
decode(recovery_info, bl);
decode(recovery_progress, bl);
DECODE_FINISH(bl);
}
void PullOp::dump(Formatter *f) const
{
f->dump_stream("soid") << soid;
{
f->open_object_section("recovery_info");
recovery_info.dump(f);
f->close_section();
}
{
f->open_object_section("recovery_progress");
recovery_progress.dump(f);
f->close_section();
}
}
ostream &PullOp::print(ostream &out) const
{
return out
<< "PullOp(" << soid
<< ", recovery_info: " << recovery_info
<< ", recovery_progress: " << recovery_progress
<< ")";
}
ostream& operator<<(ostream& out, const PullOp &op)
{
return op.print(out);
}
uint64_t PullOp::cost(CephContext *cct) const
{
if (cct->_conf->osd_op_queue == "mclock_scheduler") {
return std::clamp<uint64_t>(
recovery_progress.estimate_remaining_data_to_recover(recovery_info),
1,
cct->_conf->osd_recovery_max_chunk);
} else {
/* We retain this legacy behavior for WeightedPriorityQueue. It seems to
* require very large costs for several messages in order to do any
* meaningful amount of throttling. This branch should be removed after
* Reef.
*/
return cct->_conf->osd_push_per_object_cost +
cct->_conf->osd_recovery_max_chunk;
}
}
// -- PushOp --
void PushOp::generate_test_instances(list<PushOp*> &o)
{
o.push_back(new PushOp);
o.push_back(new PushOp);
o.back()->soid = hobject_t(sobject_t("asdf", 2));
o.back()->version = eversion_t(3, 10);
o.push_back(new PushOp);
o.back()->soid = hobject_t(sobject_t("asdf", CEPH_NOSNAP));
o.back()->version = eversion_t(0, 0);
}
void PushOp::encode(ceph::buffer::list &bl, uint64_t features) const
{
ENCODE_START(1, 1, bl);
encode(soid, bl);
encode(version, bl);
encode(data, bl);
encode(data_included, bl);
encode(omap_header, bl);
encode(omap_entries, bl);
encode(attrset, bl);
encode(recovery_info, bl, features);
encode(after_progress, bl);
encode(before_progress, bl);
ENCODE_FINISH(bl);
}
void PushOp::decode(ceph::buffer::list::const_iterator &bl)
{
DECODE_START(1, bl);
decode(soid, bl);
decode(version, bl);
decode(data, bl);
decode(data_included, bl);
decode(omap_header, bl);
decode(omap_entries, bl);
decode(attrset, bl);
decode(recovery_info, bl);
decode(after_progress, bl);
decode(before_progress, bl);
DECODE_FINISH(bl);
}
void PushOp::dump(Formatter *f) const
{
f->dump_stream("soid") << soid;
f->dump_stream("version") << version;
f->dump_int("data_len", data.length());
f->dump_stream("data_included") << data_included;
f->dump_int("omap_header_len", omap_header.length());
f->dump_int("omap_entries_len", omap_entries.size());
f->dump_int("attrset_len", attrset.size());
{
f->open_object_section("recovery_info");
recovery_info.dump(f);
f->close_section();
}
{
f->open_object_section("after_progress");
after_progress.dump(f);
f->close_section();
}
{
f->open_object_section("before_progress");
before_progress.dump(f);
f->close_section();
}
}
ostream &PushOp::print(ostream &out) const
{
return out
<< "PushOp(" << soid
<< ", version: " << version
<< ", data_included: " << data_included
<< ", data_size: " << data.length()
<< ", omap_header_size: " << omap_header.length()
<< ", omap_entries_size: " << omap_entries.size()
<< ", attrset_size: " << attrset.size()
<< ", recovery_info: " << recovery_info
<< ", after_progress: " << after_progress
<< ", before_progress: " << before_progress
<< ")";
}
ostream& operator<<(ostream& out, const PushOp &op)
{
return op.print(out);
}
uint64_t PushOp::cost(CephContext *cct) const
{
uint64_t cost = data_included.size();
for (auto i = omap_entries.cbegin(); i != omap_entries.cend(); ++i) {
cost += i->second.length();
}
cost += cct->_conf->osd_push_per_object_cost;
return cost;
}
// -- ScrubMap --
void ScrubMap::merge_incr(const ScrubMap &l)
{
ceph_assert(valid_through == l.incr_since);
valid_through = l.valid_through;
for (auto p = l.objects.cbegin(); p != l.objects.cend(); ++p){
if (p->second.negative) {
auto q = objects.find(p->first);
if (q != objects.end()) {
objects.erase(q);
}
} else {
objects[p->first] = p->second;
}
}
}
void ScrubMap::encode(ceph::buffer::list& bl) const
{
ENCODE_START(3, 2, bl);
encode(objects, bl);
encode((__u32)0, bl); // used to be attrs; now deprecated
ceph::buffer::list old_logbl; // not used
encode(old_logbl, bl);
encode(valid_through, bl);
encode(incr_since, bl);
ENCODE_FINISH(bl);
}
void ScrubMap::decode(ceph::buffer::list::const_iterator& bl, int64_t pool)
{
DECODE_START_LEGACY_COMPAT_LEN(3, 2, 2, bl);
decode(objects, bl);
{
map<string,string> attrs; // deprecated
decode(attrs, bl);
}
ceph::buffer::list old_logbl; // not used
decode(old_logbl, bl);
decode(valid_through, bl);
decode(incr_since, bl);
DECODE_FINISH(bl);
// handle hobject_t upgrade
if (struct_v < 3) {
map<hobject_t, object> tmp;
tmp.swap(objects);
for (auto i = tmp.begin(); i != tmp.end(); ++i) {
hobject_t first(i->first);
if (!first.is_max() && first.pool == -1)
first.pool = pool;
objects[first] = i->second;
}
}
}
void ScrubMap::dump(Formatter *f) const
{
f->dump_stream("valid_through") << valid_through;
f->dump_stream("incremental_since") << incr_since;
f->open_array_section("objects");
for (auto p = objects.cbegin(); p != objects.cend(); ++p) {
f->open_object_section("object");
f->dump_string("name", p->first.oid.name);
f->dump_unsigned("hash", p->first.get_hash());
f->dump_string("key", p->first.get_key());
f->dump_int("snapid", p->first.snap);
p->second.dump(f);
f->close_section();
}
f->close_section();
}
void ScrubMap::generate_test_instances(list<ScrubMap*>& o)
{
o.push_back(new ScrubMap);
o.push_back(new ScrubMap);
o.back()->valid_through = eversion_t(1, 2);
o.back()->incr_since = eversion_t(3, 4);
list<object*> obj;
object::generate_test_instances(obj);
o.back()->objects[hobject_t(object_t("foo"), "fookey", 123, 456, 0, "")] = *obj.back();
obj.pop_back();
o.back()->objects[hobject_t(object_t("bar"), string(), 123, 456, 0, "")] = *obj.back();
}
// -- ScrubMap::object --
void ScrubMap::object::encode(ceph::buffer::list& bl) const
{
bool compat_read_error = read_error || ec_hash_mismatch || ec_size_mismatch;
ENCODE_START(10, 7, bl);
encode(size, bl);
encode(negative, bl);
encode(attrs, bl);
encode(digest, bl);
encode(digest_present, bl);
encode((uint32_t)0, bl); // obsolete nlinks
encode((uint32_t)0, bl); // snapcolls
encode(omap_digest, bl);
encode(omap_digest_present, bl);
encode(compat_read_error, bl);
encode(stat_error, bl);
encode(read_error, bl);
encode(ec_hash_mismatch, bl);
encode(ec_size_mismatch, bl);
encode(large_omap_object_found, bl);
encode(large_omap_object_key_count, bl);
encode(large_omap_object_value_size, bl);
encode(object_omap_bytes, bl);
encode(object_omap_keys, bl);
ENCODE_FINISH(bl);
}
void ScrubMap::object::decode(ceph::buffer::list::const_iterator& bl)
{
DECODE_START(10, bl);
decode(size, bl);
bool tmp, compat_read_error = false;
decode(tmp, bl);
negative = tmp;
decode(attrs, bl);
decode(digest, bl);
decode(tmp, bl);
digest_present = tmp;
{
uint32_t nlinks;
decode(nlinks, bl);
set<snapid_t> snapcolls;
decode(snapcolls, bl);
}
decode(omap_digest, bl);
decode(tmp, bl);
omap_digest_present = tmp;
decode(compat_read_error, bl);
decode(tmp, bl);
stat_error = tmp;
if (struct_v >= 8) {
decode(tmp, bl);
read_error = tmp;
decode(tmp, bl);
ec_hash_mismatch = tmp;
decode(tmp, bl);
ec_size_mismatch = tmp;
}
// If older encoder found a read_error, set read_error
if (compat_read_error && !read_error && !ec_hash_mismatch && !ec_size_mismatch)
read_error = true;
if (struct_v >= 9) {
decode(tmp, bl);
large_omap_object_found = tmp;
decode(large_omap_object_key_count, bl);
decode(large_omap_object_value_size, bl);
}
if (struct_v >= 10) {
decode(object_omap_bytes, bl);
decode(object_omap_keys, bl);
}
DECODE_FINISH(bl);
}
void ScrubMap::object::dump(Formatter *f) const
{
f->dump_int("size", size);
f->dump_int("negative", negative);
f->open_array_section("attrs");
for (auto p = attrs.cbegin(); p != attrs.cend(); ++p) {
f->open_object_section("attr");
f->dump_string("name", p->first);
f->dump_int("length", p->second.length());
f->close_section();
}
f->close_section();
}
void ScrubMap::object::generate_test_instances(list<object*>& o)
{
o.push_back(new object);
o.push_back(new object);
o.back()->negative = true;
o.push_back(new object);
o.back()->size = 123;
o.back()->attrs["foo"] = ceph::buffer::copy("foo", 3);
o.back()->attrs["bar"] = ceph::buffer::copy("barval", 6);
}
// -- OSDOp --
ostream& operator<<(ostream& out, const OSDOp& op)
{
out << ceph_osd_op_name(op.op.op);
if (ceph_osd_op_type_data(op.op.op)) {
// data extent
switch (op.op.op) {
case CEPH_OSD_OP_ASSERT_VER:
out << " v" << op.op.assert_ver.ver;
break;
case CEPH_OSD_OP_TRUNCATE:
out << " " << op.op.extent.offset;
break;
case CEPH_OSD_OP_MASKTRUNC:
case CEPH_OSD_OP_TRIMTRUNC:
out << " " << op.op.extent.truncate_seq << "@"
<< (int64_t)op.op.extent.truncate_size;
break;
case CEPH_OSD_OP_ROLLBACK:
out << " " << snapid_t(op.op.snap.snapid);
break;
case CEPH_OSD_OP_WATCH:
out << " " << ceph_osd_watch_op_name(op.op.watch.op)
<< " cookie " << op.op.watch.cookie;
if (op.op.watch.gen)
out << " gen " << op.op.watch.gen;
break;
case CEPH_OSD_OP_NOTIFY:
out << " cookie " << op.op.notify.cookie;
break;
case CEPH_OSD_OP_COPY_GET:
out << " max " << op.op.copy_get.max;
break;
case CEPH_OSD_OP_COPY_FROM:
out << " ver " << op.op.copy_from.src_version;
break;
case CEPH_OSD_OP_SETALLOCHINT:
out << " object_size " << op.op.alloc_hint.expected_object_size
<< " write_size " << op.op.alloc_hint.expected_write_size;
break;
case CEPH_OSD_OP_READ:
case CEPH_OSD_OP_SPARSE_READ:
case CEPH_OSD_OP_SYNC_READ:
case CEPH_OSD_OP_WRITE:
case CEPH_OSD_OP_WRITEFULL:
case CEPH_OSD_OP_ZERO:
case CEPH_OSD_OP_APPEND:
case CEPH_OSD_OP_MAPEXT:
case CEPH_OSD_OP_CMPEXT:
out << " " << op.op.extent.offset << "~" << op.op.extent.length;
if (op.op.extent.truncate_seq)
out << " [" << op.op.extent.truncate_seq << "@"
<< (int64_t)op.op.extent.truncate_size << "]";
if (op.op.flags)
out << " [" << ceph_osd_op_flag_string(op.op.flags) << "]";
default:
// don't show any arg info
break;
}
} else if (ceph_osd_op_type_attr(op.op.op)) {
// xattr name
if (op.op.xattr.name_len && op.indata.length()) {
out << " ";
op.indata.write(0, op.op.xattr.name_len, out);
}
if (op.op.xattr.value_len)
out << " (" << op.op.xattr.value_len << ")";
if (op.op.op == CEPH_OSD_OP_CMPXATTR)
out << " op " << (int)op.op.xattr.cmp_op
<< " mode " << (int)op.op.xattr.cmp_mode;
} else if (ceph_osd_op_type_exec(op.op.op)) {
// class.method
if (op.op.cls.class_len && op.indata.length()) {
out << " ";
op.indata.write(0, op.op.cls.class_len, out);
out << ".";
op.indata.write(op.op.cls.class_len, op.op.cls.method_len, out);
}
} else if (ceph_osd_op_type_pg(op.op.op)) {
switch (op.op.op) {
case CEPH_OSD_OP_PGLS:
case CEPH_OSD_OP_PGLS_FILTER:
case CEPH_OSD_OP_PGNLS:
case CEPH_OSD_OP_PGNLS_FILTER:
out << " start_epoch " << op.op.pgls.start_epoch;
break;
case CEPH_OSD_OP_PG_HITSET_LS:
break;
case CEPH_OSD_OP_PG_HITSET_GET:
out << " " << utime_t(op.op.hit_set_get.stamp);
break;
case CEPH_OSD_OP_SCRUBLS:
break;
}
}
if (op.indata.length()) {
out << " in=" << op.indata.length() << "b";
}
if (op.outdata.length()) {
out << " out=" << op.outdata.length() << "b";
}
return out;
}
void OSDOp::split_osd_op_vector_out_data(vector<OSDOp>& ops, ceph::buffer::list& in)
{
auto datap = in.begin();
for (unsigned i = 0; i < ops.size(); i++) {
if (ops[i].op.payload_len) {
datap.copy(ops[i].op.payload_len, ops[i].outdata);
}
}
}
void OSDOp::merge_osd_op_vector_out_data(vector<OSDOp>& ops, ceph::buffer::list& out)
{
for (unsigned i = 0; i < ops.size(); i++) {
ops[i].op.payload_len = ops[i].outdata.length();
if (ops[i].outdata.length()) {
out.append(ops[i].outdata);
}
}
}
int prepare_info_keymap(
CephContext* cct,
map<string,bufferlist> *km,
string *key_to_remove,
epoch_t epoch,
pg_info_t &info,
pg_info_t &last_written_info,
PastIntervals &past_intervals,
bool dirty_big_info,
bool dirty_epoch,
bool try_fast_info,
PerfCounters *logger,
DoutPrefixProvider *dpp)
{
if (dirty_epoch) {
encode(epoch, (*km)[string(epoch_key)]);
}
if (logger)
logger->inc(l_osd_pg_info);
// try to do info efficiently?
if (!dirty_big_info && try_fast_info &&
info.last_update > last_written_info.last_update) {
pg_fast_info_t fast;
fast.populate_from(info);
bool did = fast.try_apply_to(&last_written_info);
ceph_assert(did); // we verified last_update increased above
if (info == last_written_info) {
encode(fast, (*km)[string(fastinfo_key)]);
if (logger)
logger->inc(l_osd_pg_fastinfo);
return 0;
}
if (dpp) {
ldpp_dout(dpp, 30) << __func__ << " fastinfo failed, info:\n";
{
JSONFormatter jf(true);
jf.dump_object("info", info);
jf.flush(*_dout);
}
{
*_dout << "\nlast_written_info:\n";
JSONFormatter jf(true);
jf.dump_object("last_written_info", last_written_info);
jf.flush(*_dout);
}
*_dout << dendl;
}
} else if (info.last_update <= last_written_info.last_update) {
// clean up any potentially stale fastinfo key resulting from last_update
// not moving forwards (e.g., a backwards jump during peering)
*key_to_remove = fastinfo_key;
}
last_written_info = info;
// info. store purged_snaps separately.
interval_set<snapid_t> purged_snaps;
purged_snaps.swap(info.purged_snaps);
encode(info, (*km)[string(info_key)]);
purged_snaps.swap(info.purged_snaps);
if (dirty_big_info) {
// potentially big stuff
bufferlist& bigbl = (*km)[string(biginfo_key)];
encode(past_intervals, bigbl);
encode(info.purged_snaps, bigbl);
//dout(20) << "write_info bigbl " << bigbl.length() << dendl;
if (logger)
logger->inc(l_osd_pg_biginfo);
}
return 0;
}
void create_pg_collection(
ceph::os::Transaction& t, spg_t pgid, int bits)
{
coll_t coll(pgid);
t.create_collection(coll, bits);
}
void init_pg_ondisk(
ceph::os::Transaction& t,
spg_t pgid,
const pg_pool_t *pool)
{
coll_t coll(pgid);
if (pool) {
// Give a hint to the PG collection
bufferlist hint;
uint32_t pg_num = pool->get_pg_num();
uint64_t expected_num_objects_pg = pool->expected_num_objects / pg_num;
encode(pg_num, hint);
encode(expected_num_objects_pg, hint);
uint32_t hint_type = ceph::os::Transaction::COLL_HINT_EXPECTED_NUM_OBJECTS;
t.collection_hint(coll, hint_type, hint);
}
ghobject_t pgmeta_oid(pgid.make_pgmeta_oid());
t.touch(coll, pgmeta_oid);
map<string,bufferlist> values;
__u8 struct_v = pg_latest_struct_v;
encode(struct_v, values[string(infover_key)]);
t.omap_setkeys(coll, pgmeta_oid, values);
}
PGLSFilter::PGLSFilter() : cct(nullptr)
{
}
PGLSFilter::~PGLSFilter()
{
}
int PGLSPlainFilter::init(ceph::bufferlist::const_iterator ¶ms)
{
try {
decode(xattr, params);
decode(val, params);
} catch (ceph::buffer::error &e) {
return -EINVAL;
}
return 0;
}
bool PGLSPlainFilter::filter(const hobject_t& obj,
const ceph::bufferlist& xattr_data) const
{
return xattr_data.contents_equal(val.c_str(), val.size());
}
| 211,086 | 27.625848 | 101 | cc |
null | ceph-main/src/osd/osd_types.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <[email protected]>
* Copyright (C) 2013,2014 Cloudwatt <[email protected]>
*
* Author: Loic Dachary <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_OSD_TYPES_H
#define CEPH_OSD_TYPES_H
#include <atomic>
#include <sstream>
#include <cstdio>
#include <memory>
#include <string_view>
#include <boost/scoped_ptr.hpp>
#include <boost/optional/optional_io.hpp>
#include <boost/variant.hpp>
#include <boost/smart_ptr/local_shared_ptr.hpp>
#include "include/rados/rados_types.hpp"
#include "include/mempool.h"
#include "msg/msg_types.h"
#include "include/compat.h"
#include "include/types.h"
#include "include/utime.h"
#include "include/CompatSet.h"
#include "common/ceph_context.h"
#include "common/histogram.h"
#include "include/interval_set.h"
#include "include/inline_memory.h"
#include "common/Formatter.h"
#include "common/bloom_filter.hpp"
#include "common/hobject.h"
#include "common/snap_types.h"
#include "HitSet.h"
#include "Watch.h"
#include "librados/ListObjectImpl.h"
#include "compressor/Compressor.h"
#include "osd_perf_counters.h"
#define CEPH_OSD_ONDISK_MAGIC "ceph osd volume v026"
#define CEPH_OSD_FEATURE_INCOMPAT_BASE CompatSet::Feature(1, "initial feature set(~v.18)")
#define CEPH_OSD_FEATURE_INCOMPAT_PGINFO CompatSet::Feature(2, "pginfo object")
#define CEPH_OSD_FEATURE_INCOMPAT_OLOC CompatSet::Feature(3, "object locator")
#define CEPH_OSD_FEATURE_INCOMPAT_LEC CompatSet::Feature(4, "last_epoch_clean")
#define CEPH_OSD_FEATURE_INCOMPAT_CATEGORIES CompatSet::Feature(5, "categories")
#define CEPH_OSD_FEATURE_INCOMPAT_HOBJECTPOOL CompatSet::Feature(6, "hobjectpool")
#define CEPH_OSD_FEATURE_INCOMPAT_BIGINFO CompatSet::Feature(7, "biginfo")
#define CEPH_OSD_FEATURE_INCOMPAT_LEVELDBINFO CompatSet::Feature(8, "leveldbinfo")
#define CEPH_OSD_FEATURE_INCOMPAT_LEVELDBLOG CompatSet::Feature(9, "leveldblog")
#define CEPH_OSD_FEATURE_INCOMPAT_SNAPMAPPER CompatSet::Feature(10, "snapmapper")
#define CEPH_OSD_FEATURE_INCOMPAT_SHARDS CompatSet::Feature(11, "sharded objects")
#define CEPH_OSD_FEATURE_INCOMPAT_HINTS CompatSet::Feature(12, "transaction hints")
#define CEPH_OSD_FEATURE_INCOMPAT_PGMETA CompatSet::Feature(13, "pg meta object")
#define CEPH_OSD_FEATURE_INCOMPAT_MISSING CompatSet::Feature(14, "explicit missing set")
#define CEPH_OSD_FEATURE_INCOMPAT_FASTINFO CompatSet::Feature(15, "fastinfo pg attr")
#define CEPH_OSD_FEATURE_INCOMPAT_RECOVERY_DELETES CompatSet::Feature(16, "deletes in missing set")
#define CEPH_OSD_FEATURE_INCOMPAT_SNAPMAPPER2 CompatSet::Feature(17, "new snapmapper key structure")
/// pool priority range set by user
#define OSD_POOL_PRIORITY_MAX 10
#define OSD_POOL_PRIORITY_MIN -OSD_POOL_PRIORITY_MAX
/// min recovery priority for MBackfillReserve
#define OSD_RECOVERY_PRIORITY_MIN 0
/// base backfill priority for MBackfillReserve
#define OSD_BACKFILL_PRIORITY_BASE 100
/// base backfill priority for MBackfillReserve (degraded PG)
#define OSD_BACKFILL_DEGRADED_PRIORITY_BASE 140
/// base recovery priority for MBackfillReserve
#define OSD_RECOVERY_PRIORITY_BASE 180
/// base backfill priority for MBackfillReserve (inactive PG)
#define OSD_BACKFILL_INACTIVE_PRIORITY_BASE 220
/// base recovery priority for MRecoveryReserve (inactive PG)
#define OSD_RECOVERY_INACTIVE_PRIORITY_BASE 220
/// max manually/automatically set recovery priority for MBackfillReserve
#define OSD_RECOVERY_PRIORITY_MAX 253
/// backfill priority for MBackfillReserve, when forced manually
#define OSD_BACKFILL_PRIORITY_FORCED 254
/// recovery priority for MRecoveryReserve, when forced manually
#define OSD_RECOVERY_PRIORITY_FORCED 255
/// priority for pg deletion when osd is not fullish
#define OSD_DELETE_PRIORITY_NORMAL 179
/// priority for pg deletion when osd is approaching full
#define OSD_DELETE_PRIORITY_FULLISH 219
/// priority when more full
#define OSD_DELETE_PRIORITY_FULL 255
static std::map<int, int> max_prio_map = {
{OSD_BACKFILL_PRIORITY_BASE, OSD_BACKFILL_DEGRADED_PRIORITY_BASE - 1},
{OSD_BACKFILL_DEGRADED_PRIORITY_BASE, OSD_RECOVERY_PRIORITY_BASE - 1},
{OSD_RECOVERY_PRIORITY_BASE, OSD_BACKFILL_INACTIVE_PRIORITY_BASE - 1},
{OSD_RECOVERY_INACTIVE_PRIORITY_BASE, OSD_RECOVERY_PRIORITY_MAX},
{OSD_BACKFILL_INACTIVE_PRIORITY_BASE, OSD_RECOVERY_PRIORITY_MAX}
};
typedef hobject_t collection_list_handle_t;
/// convert a single CPEH_OSD_FLAG_* to a std::string
const char *ceph_osd_flag_name(unsigned flag);
/// convert a single CEPH_OSD_OF_FLAG_* to a std::string
const char *ceph_osd_op_flag_name(unsigned flag);
/// convert CEPH_OSD_FLAG_* op flags to a std::string
std::string ceph_osd_flag_string(unsigned flags);
/// conver CEPH_OSD_OP_FLAG_* op flags to a std::string
std::string ceph_osd_op_flag_string(unsigned flags);
/// conver CEPH_OSD_ALLOC_HINT_FLAG_* op flags to a std::string
std::string ceph_osd_alloc_hint_flag_string(unsigned flags);
typedef std::map<std::string,std::string> osd_alert_list_t;
/// map osd id -> alert_list_t
typedef std::map<int, osd_alert_list_t> osd_alerts_t;
void dump(ceph::Formatter* f, const osd_alerts_t& alerts);
typedef interval_set<
snapid_t,
mempool::osdmap::flat_map> snap_interval_set_t;
/**
* osd request identifier
*
* caller name + incarnation# + tid to unique identify this request.
*/
struct osd_reqid_t {
entity_name_t name; // who
ceph_tid_t tid;
int32_t inc; // incarnation
osd_reqid_t()
: tid(0), inc(0)
{}
osd_reqid_t(const entity_name_t& a, int i, ceph_tid_t t)
: name(a), tid(t), inc(i)
{}
DENC(osd_reqid_t, v, p) {
DENC_START(2, 2, p);
denc(v.name, p);
denc(v.tid, p);
denc(v.inc, p);
DENC_FINISH(p);
}
void dump(ceph::Formatter *f) const;
static void generate_test_instances(std::list<osd_reqid_t*>& o);
};
WRITE_CLASS_DENC(osd_reqid_t)
struct pg_shard_t {
static const int32_t NO_OSD = 0x7fffffff;
int32_t osd;
shard_id_t shard;
pg_shard_t() : osd(-1), shard(shard_id_t::NO_SHARD) {}
explicit pg_shard_t(int osd) : osd(osd), shard(shard_id_t::NO_SHARD) {}
pg_shard_t(int osd, shard_id_t shard) : osd(osd), shard(shard) {}
bool is_undefined() const {
return osd == -1;
}
std::string get_osd() const { return (osd == NO_OSD ? "NONE" : std::to_string(osd)); }
void encode(ceph::buffer::list &bl) const;
void decode(ceph::buffer::list::const_iterator &bl);
void dump(ceph::Formatter *f) const {
f->dump_unsigned("osd", osd);
if (shard != shard_id_t::NO_SHARD) {
f->dump_unsigned("shard", shard);
}
}
auto operator<=>(const pg_shard_t&) const = default;
};
WRITE_CLASS_ENCODER(pg_shard_t)
std::ostream& operator<<(std::ostream &lhs, const pg_shard_t &rhs);
using HobjToShardSetMapping = std::map<hobject_t, std::set<pg_shard_t>>;
class IsPGRecoverablePredicate {
public:
/**
* have encodes the shards available
*/
virtual bool operator()(const std::set<pg_shard_t> &have) const = 0;
virtual ~IsPGRecoverablePredicate() {}
};
class IsPGReadablePredicate {
public:
/**
* have encodes the shards available
*/
virtual bool operator()(const std::set<pg_shard_t> &have) const = 0;
virtual ~IsPGReadablePredicate() {}
};
inline std::ostream& operator<<(std::ostream& out, const osd_reqid_t& r) {
return out << r.name << "." << r.inc << ":" << r.tid;
}
inline bool operator==(const osd_reqid_t& l, const osd_reqid_t& r) {
return (l.name == r.name) && (l.inc == r.inc) && (l.tid == r.tid);
}
inline bool operator!=(const osd_reqid_t& l, const osd_reqid_t& r) {
return (l.name != r.name) || (l.inc != r.inc) || (l.tid != r.tid);
}
inline bool operator<(const osd_reqid_t& l, const osd_reqid_t& r) {
return (l.name < r.name) || (l.inc < r.inc) ||
(l.name == r.name && l.inc == r.inc && l.tid < r.tid);
}
inline bool operator<=(const osd_reqid_t& l, const osd_reqid_t& r) {
return (l.name < r.name) || (l.inc < r.inc) ||
(l.name == r.name && l.inc == r.inc && l.tid <= r.tid);
}
inline bool operator>(const osd_reqid_t& l, const osd_reqid_t& r) { return !(l <= r); }
inline bool operator>=(const osd_reqid_t& l, const osd_reqid_t& r) { return !(l < r); }
namespace std {
template<> struct hash<osd_reqid_t> {
size_t operator()(const osd_reqid_t &r) const {
static hash<uint64_t> H;
return H(r.name.num() ^ r.tid ^ r.inc);
}
};
} // namespace std
// -----
// a locator constrains the placement of an object. mainly, which pool
// does it go in.
struct object_locator_t {
// You specify either the hash or the key -- not both
std::int64_t pool; ///< pool id
std::string key; ///< key string (if non-empty)
std::string nspace; ///< namespace
std::int64_t hash; ///< hash position (if >= 0)
explicit object_locator_t()
: pool(-1), hash(-1) {}
explicit object_locator_t(int64_t po)
: pool(po), hash(-1) {}
explicit object_locator_t(int64_t po, int64_t ps)
: pool(po), hash(ps) {}
explicit object_locator_t(int64_t po, std::string_view ns)
: pool(po), nspace(ns), hash(-1) {}
explicit object_locator_t(int64_t po, std::string_view ns, int64_t ps)
: pool(po), nspace(ns), hash(ps) {}
explicit object_locator_t(int64_t po, std::string_view ns, std::string_view s)
: pool(po), key(s), nspace(ns), hash(-1) {}
explicit object_locator_t(const hobject_t& soid)
: pool(soid.pool), key(soid.get_key()), nspace(soid.nspace), hash(-1) {}
int64_t get_pool() const {
return pool;
}
void clear() {
pool = -1;
key = "";
nspace = "";
hash = -1;
}
bool empty() const {
return pool == -1;
}
void encode(ceph::buffer::list& bl) const;
void decode(ceph::buffer::list::const_iterator& p);
void dump(ceph::Formatter *f) const;
static void generate_test_instances(std::list<object_locator_t*>& o);
};
WRITE_CLASS_ENCODER(object_locator_t)
inline bool operator==(const object_locator_t& l, const object_locator_t& r) {
return l.pool == r.pool && l.key == r.key && l.nspace == r.nspace && l.hash == r.hash;
}
inline bool operator!=(const object_locator_t& l, const object_locator_t& r) {
return !(l == r);
}
inline std::ostream& operator<<(std::ostream& out, const object_locator_t& loc)
{
out << "@" << loc.pool;
if (loc.nspace.length())
out << ";" << loc.nspace;
if (loc.key.length())
out << ":" << loc.key;
return out;
}
struct request_redirect_t {
private:
object_locator_t redirect_locator; ///< this is authoritative
std::string redirect_object; ///< If non-empty, the request goes to this object name
friend std::ostream& operator<<(std::ostream& out, const request_redirect_t& redir);
public:
request_redirect_t() {}
explicit request_redirect_t(const object_locator_t& orig, int64_t rpool) :
redirect_locator(orig) { redirect_locator.pool = rpool; }
explicit request_redirect_t(const object_locator_t& rloc) :
redirect_locator(rloc) {}
explicit request_redirect_t(const object_locator_t& orig,
const std::string& robj) :
redirect_locator(orig), redirect_object(robj) {}
bool empty() const { return redirect_locator.empty() &&
redirect_object.empty(); }
void combine_with_locator(object_locator_t& orig, std::string& obj) const {
orig = redirect_locator;
if (!redirect_object.empty())
obj = redirect_object;
}
void encode(ceph::buffer::list& bl) const;
void decode(ceph::buffer::list::const_iterator& bl);
void dump(ceph::Formatter *f) const;
static void generate_test_instances(std::list<request_redirect_t*>& o);
};
WRITE_CLASS_ENCODER(request_redirect_t)
inline std::ostream& operator<<(std::ostream& out, const request_redirect_t& redir) {
out << "object " << redir.redirect_object << ", locator{" << redir.redirect_locator << "}";
return out;
}
// Internal OSD op flags - set by the OSD based on the op types
enum {
CEPH_OSD_RMW_FLAG_READ = (1 << 1),
CEPH_OSD_RMW_FLAG_WRITE = (1 << 2),
CEPH_OSD_RMW_FLAG_CLASS_READ = (1 << 3),
CEPH_OSD_RMW_FLAG_CLASS_WRITE = (1 << 4),
CEPH_OSD_RMW_FLAG_PGOP = (1 << 5),
CEPH_OSD_RMW_FLAG_CACHE = (1 << 6),
CEPH_OSD_RMW_FLAG_FORCE_PROMOTE = (1 << 7),
CEPH_OSD_RMW_FLAG_SKIP_HANDLE_CACHE = (1 << 8),
CEPH_OSD_RMW_FLAG_SKIP_PROMOTE = (1 << 9),
CEPH_OSD_RMW_FLAG_RWORDERED = (1 << 10),
CEPH_OSD_RMW_FLAG_RETURNVEC = (1 << 11),
CEPH_OSD_RMW_FLAG_READ_DATA = (1 << 12),
};
// pg stuff
#define OSD_SUPERBLOCK_GOBJECT ghobject_t(hobject_t(sobject_t(object_t("osd_superblock"), 0)))
#define OSD_SUPERBLOCK_OMAP_KEY "osd_superblock"
// placement seed (a hash value)
typedef uint32_t ps_t;
// old (v1) pg_t encoding (wrap old struct ceph_pg)
struct old_pg_t {
ceph_pg v;
void encode(ceph::buffer::list& bl) const {
ceph::encode_raw(v, bl);
}
void decode(ceph::buffer::list::const_iterator& bl) {
ceph::decode_raw(v, bl);
}
};
WRITE_CLASS_ENCODER(old_pg_t)
// placement group id
struct pg_t {
uint64_t m_pool;
uint32_t m_seed;
pg_t() : m_pool(0), m_seed(0) {}
pg_t(ps_t seed, uint64_t pool) :
m_pool(pool), m_seed(seed) {}
// cppcheck-suppress noExplicitConstructor
pg_t(const ceph_pg& cpg) :
m_pool(cpg.pool), m_seed(cpg.ps) {}
// cppcheck-suppress noExplicitConstructor
pg_t(const old_pg_t& opg) {
*this = opg.v;
}
old_pg_t get_old_pg() const {
old_pg_t o;
ceph_assert(m_pool < 0xffffffffull);
o.v.pool = m_pool;
o.v.ps = m_seed;
o.v.preferred = (__s16)-1;
return o;
}
ps_t ps() const {
return m_seed;
}
int64_t pool() const {
return m_pool;
}
static const uint8_t calc_name_buf_size = 36; // max length for max values len("18446744073709551615.ffffffff") + future suffix len("_head") + '\0'
char *calc_name(char *buf, const char *suffix_backwords) const;
void set_ps(ps_t p) {
m_seed = p;
}
void set_pool(uint64_t p) {
m_pool = p;
}
pg_t get_parent() const;
pg_t get_ancestor(unsigned old_pg_num) const;
int print(char *o, int maxlen) const;
bool parse(const char *s);
bool is_split(unsigned old_pg_num, unsigned new_pg_num, std::set<pg_t> *pchildren) const;
bool is_merge_source(unsigned old_pg_num, unsigned new_pg_num, pg_t *parent) const;
bool is_merge_target(unsigned old_pg_num, unsigned new_pg_num) const {
return ps() < new_pg_num && is_split(new_pg_num, old_pg_num, nullptr);
}
/**
* Returns b such that for all object o:
* ~((~0)<<b) & o.hash) == 0 iff o is in the pg for *this
*/
unsigned get_split_bits(unsigned pg_num) const;
bool contains(int bits, const ghobject_t& oid) const {
return
(int64_t)m_pool == oid.hobj.get_logical_pool() &&
oid.match(bits, ps());
}
bool contains(int bits, const hobject_t& oid) const {
return
(int64_t)m_pool == oid.get_logical_pool() &&
oid.match(bits, ps());
}
hobject_t get_hobj_start() const;
hobject_t get_hobj_end(unsigned pg_num) const;
// strong ordering is supported
auto operator<=>(const pg_t&) const noexcept = default;
void encode(ceph::buffer::list& bl) const {
using ceph::encode;
__u8 v = 1;
encode(v, bl);
encode(m_pool, bl);
encode(m_seed, bl);
encode((int32_t)-1, bl); // was preferred
}
void decode(ceph::buffer::list::const_iterator& bl) {
using ceph::decode;
__u8 v;
decode(v, bl);
decode(m_pool, bl);
decode(m_seed, bl);
bl += sizeof(int32_t); // was preferred
}
void decode_old(ceph::buffer::list::const_iterator& bl) {
using ceph::decode;
old_pg_t opg;
decode(opg, bl);
*this = opg;
}
void dump(ceph::Formatter *f) const;
static void generate_test_instances(std::list<pg_t*>& o);
};
WRITE_CLASS_ENCODER(pg_t)
std::ostream& operator<<(std::ostream& out, const pg_t &pg);
namespace std {
template<> struct hash< pg_t >
{
size_t operator()( const pg_t& x ) const
{
static hash<uint32_t> H;
// xor (s32)-1 in there to preserve original m_preferred result (paranoia!)
return H((x.pool() & 0xffffffff) ^ (x.pool() >> 32) ^ x.ps() ^ (int32_t)(-1));
}
};
} // namespace std
struct spg_t {
pg_t pgid;
shard_id_t shard;
spg_t() : shard(shard_id_t::NO_SHARD) {}
spg_t(pg_t pgid, shard_id_t shard) : pgid(pgid), shard(shard) {}
explicit spg_t(pg_t pgid) : pgid(pgid), shard(shard_id_t::NO_SHARD) {}
auto operator<=>(const spg_t&) const = default;
unsigned get_split_bits(unsigned pg_num) const {
return pgid.get_split_bits(pg_num);
}
spg_t get_parent() const {
return spg_t(pgid.get_parent(), shard);
}
ps_t ps() const {
return pgid.ps();
}
uint64_t pool() const {
return pgid.pool();
}
void reset_shard(shard_id_t s) {
shard = s;
}
static const uint8_t calc_name_buf_size = pg_t::calc_name_buf_size + 4; // 36 + len('s') + len("255");
char *calc_name(char *buf, const char *suffix_backwords) const;
// and a (limited) version that uses an internal buffer:
std::string calc_name_sring() const;
bool parse(const char *s);
bool parse(const std::string& s) {
return parse(s.c_str());
}
spg_t get_ancestor(unsigned old_pg_num) const {
return spg_t(pgid.get_ancestor(old_pg_num), shard);
}
bool is_split(unsigned old_pg_num, unsigned new_pg_num,
std::set<spg_t> *pchildren) const {
std::set<pg_t> _children;
std::set<pg_t> *children = pchildren ? &_children : NULL;
bool is_split = pgid.is_split(old_pg_num, new_pg_num, children);
if (pchildren && is_split) {
for (std::set<pg_t>::iterator i = _children.begin();
i != _children.end();
++i) {
pchildren->insert(spg_t(*i, shard));
}
}
return is_split;
}
bool is_merge_target(unsigned old_pg_num, unsigned new_pg_num) const {
return pgid.is_merge_target(old_pg_num, new_pg_num);
}
bool is_merge_source(unsigned old_pg_num, unsigned new_pg_num,
spg_t *parent) const {
spg_t out = *this;
bool r = pgid.is_merge_source(old_pg_num, new_pg_num, &out.pgid);
if (r && parent) {
*parent = out;
}
return r;
}
bool is_no_shard() const {
return shard == shard_id_t::NO_SHARD;
}
ghobject_t make_pgmeta_oid() const {
return ghobject_t::make_pgmeta(pgid.pool(), pgid.ps(), shard);
}
void encode(ceph::buffer::list &bl) const {
ENCODE_START(1, 1, bl);
encode(pgid, bl);
encode(shard, bl);
ENCODE_FINISH(bl);
}
void decode(ceph::buffer::list::const_iterator& bl) {
DECODE_START(1, bl);
decode(pgid, bl);
decode(shard, bl);
DECODE_FINISH(bl);
}
ghobject_t make_temp_ghobject(const std::string& name) const {
return ghobject_t(
hobject_t(object_t(name), "", CEPH_NOSNAP,
pgid.ps(),
hobject_t::get_temp_pool(pgid.pool()),
""),
ghobject_t::NO_GEN,
shard);
}
unsigned hash_to_shard(unsigned num_shards) const {
return ps() % num_shards;
}
};
WRITE_CLASS_ENCODER(spg_t)
namespace std {
template<> struct hash< spg_t >
{
size_t operator()( const spg_t& x ) const
{
static hash<uint32_t> H;
return H(hash<pg_t>()(x.pgid) ^ x.shard);
}
};
} // namespace std
std::ostream& operator<<(std::ostream& out, const spg_t &pg);
// ----------------------
class coll_t {
enum type_t : uint8_t {
TYPE_META = 0,
TYPE_LEGACY_TEMP = 1, /* no longer used */
TYPE_PG = 2,
TYPE_PG_TEMP = 3,
};
type_t type;
spg_t pgid;
uint64_t removal_seq; // note: deprecated, not encoded
char _str_buff[spg_t::calc_name_buf_size];
char *_str;
void calc_str();
coll_t(type_t t, spg_t p, uint64_t r)
: type(t), pgid(p), removal_seq(r) {
calc_str();
}
friend class denc_coll_t;
public:
coll_t() : type(TYPE_META), removal_seq(0)
{
calc_str();
}
coll_t(const coll_t& other)
: type(other.type), pgid(other.pgid), removal_seq(other.removal_seq) {
calc_str();
}
explicit coll_t(spg_t pgid)
: type(TYPE_PG), pgid(pgid), removal_seq(0)
{
calc_str();
}
coll_t& operator=(const coll_t& rhs)
{
this->type = rhs.type;
this->pgid = rhs.pgid;
this->removal_seq = rhs.removal_seq;
this->calc_str();
return *this;
}
// named constructors
static coll_t meta() {
return coll_t();
}
static coll_t pg(spg_t p) {
return coll_t(p);
}
const std::string to_str() const {
return std::string(_str);
}
const char *c_str() const {
return _str;
}
bool parse(const std::string& s);
int operator<(const coll_t &rhs) const {
return type < rhs.type ||
(type == rhs.type && pgid < rhs.pgid);
}
bool is_meta() const {
return type == TYPE_META;
}
bool is_pg_prefix(spg_t *pgid_) const {
if (type == TYPE_PG || type == TYPE_PG_TEMP) {
*pgid_ = pgid;
return true;
}
return false;
}
bool is_pg() const {
return type == TYPE_PG;
}
bool is_pg(spg_t *pgid_) const {
if (type == TYPE_PG) {
*pgid_ = pgid;
return true;
}
return false;
}
bool is_temp() const {
return type == TYPE_PG_TEMP;
}
bool is_temp(spg_t *pgid_) const {
if (type == TYPE_PG_TEMP) {
*pgid_ = pgid;
return true;
}
return false;
}
int64_t pool() const {
return pgid.pool();
}
void encode(ceph::buffer::list& bl) const;
void decode(ceph::buffer::list::const_iterator& bl);
size_t encoded_size() const;
inline bool operator==(const coll_t& rhs) const {
// only compare type if meta
if (type != rhs.type)
return false;
if (type == TYPE_META)
return true;
return type == rhs.type && pgid == rhs.pgid;
}
inline bool operator!=(const coll_t& rhs) const {
return !(*this == rhs);
}
// get a TEMP collection that corresponds to the current collection,
// which we presume is a pg collection.
coll_t get_temp() const {
ceph_assert(type == TYPE_PG);
return coll_t(TYPE_PG_TEMP, pgid, 0);
}
ghobject_t get_min_hobj() const {
ghobject_t o;
switch (type) {
case TYPE_PG:
o.hobj.pool = pgid.pool();
o.set_shard(pgid.shard);
break;
case TYPE_META:
o.hobj.pool = -1;
break;
default:
break;
}
return o;
}
unsigned hash_to_shard(unsigned num_shards) const {
if (type == TYPE_PG)
return pgid.hash_to_shard(num_shards);
return 0; // whatever.
}
void dump(ceph::Formatter *f) const;
static void generate_test_instances(std::list<coll_t*>& o);
};
WRITE_CLASS_ENCODER(coll_t)
inline std::ostream& operator<<(std::ostream& out, const coll_t& c) {
out << c.to_str();
return out;
}
#if FMT_VERSION >= 90000
template <> struct fmt::formatter<coll_t> : fmt::ostream_formatter {};
#endif
namespace std {
template<> struct hash<coll_t> {
size_t operator()(const coll_t &c) const {
size_t h = 0;
std::string str(c.to_str());
std::string::const_iterator end(str.end());
for (std::string::const_iterator s = str.begin(); s != end; ++s) {
h += *s;
h += (h << 10);
h ^= (h >> 6);
}
h += (h << 3);
h ^= (h >> 11);
h += (h << 15);
return h;
}
};
} // namespace std
inline std::ostream& operator<<(std::ostream& out, const ceph_object_layout &ol)
{
out << pg_t(ol.ol_pgid);
int su = ol.ol_stripe_unit;
if (su)
out << ".su=" << su;
return out;
}
struct denc_coll_t {
coll_t coll;
auto &get_type() const { return coll.type; }
auto &get_type() { return coll.type; }
auto &get_pgid() const { return coll.pgid; }
auto &get_pgid() { return coll.pgid; }
denc_coll_t() = default;
denc_coll_t(const denc_coll_t &) = default;
denc_coll_t(denc_coll_t &&) = default;
denc_coll_t &operator=(const denc_coll_t &) = default;
denc_coll_t &operator=(denc_coll_t &&) = default;
explicit denc_coll_t(const coll_t &coll) : coll(coll) {}
operator coll_t() const {
return coll;
}
bool operator<(const denc_coll_t &rhs) const {
return coll < rhs.coll;
}
DENC(denc_coll_t, v, p) {
DENC_START(1, 1, p);
denc(v.get_type(), p);
denc(v.get_pgid().pgid.m_pool, p);
denc(v.get_pgid().pgid.m_seed, p);
denc(v.get_pgid().shard.id, p);
DENC_FINISH(p);
}
};
WRITE_CLASS_DENC(denc_coll_t)
// compound rados version type
/* WARNING: If add member in eversion_t, please make sure the encode/decode function
* work well. For little-endian machine, we should make sure there is no padding
* in 32-bit machine and 64-bit machine.
*/
class eversion_t {
public:
version_t version;
epoch_t epoch;
__u32 __pad;
eversion_t() : version(0), epoch(0), __pad(0) {}
eversion_t(epoch_t e, version_t v) : version(v), epoch(e), __pad(0) {}
// cppcheck-suppress noExplicitConstructor
eversion_t(const ceph_eversion& ce) :
version(ce.version),
epoch(ce.epoch),
__pad(0) { }
explicit eversion_t(ceph::buffer::list& bl) : __pad(0) { decode(bl); }
static const eversion_t& max() {
static const eversion_t max(-1,-1);
return max;
}
operator ceph_eversion() {
ceph_eversion c;
c.epoch = epoch;
c.version = version;
return c;
}
std::string get_key_name() const;
// key must point to the beginning of a block of 32 chars
inline void get_key_name(char* key) const {
// Below is equivalent of sprintf("%010u.%020llu");
key[31] = 0;
ritoa<uint64_t, 10, 20>(version, key + 31);
key[10] = '.';
ritoa<uint32_t, 10, 10>(epoch, key + 10);
}
void encode(ceph::buffer::list &bl) const {
#if defined(CEPH_LITTLE_ENDIAN)
bl.append((char *)this, sizeof(version_t) + sizeof(epoch_t));
#else
using ceph::encode;
encode(version, bl);
encode(epoch, bl);
#endif
}
void decode(ceph::buffer::list::const_iterator &bl) {
#if defined(CEPH_LITTLE_ENDIAN)
bl.copy(sizeof(version_t) + sizeof(epoch_t), (char *)this);
#else
using ceph::decode;
decode(version, bl);
decode(epoch, bl);
#endif
}
void decode(ceph::buffer::list& bl) {
auto p = std::cbegin(bl);
decode(p);
}
};
WRITE_CLASS_ENCODER(eversion_t)
inline bool operator==(const eversion_t& l, const eversion_t& r) {
return (l.epoch == r.epoch) && (l.version == r.version);
}
inline bool operator!=(const eversion_t& l, const eversion_t& r) {
return (l.epoch != r.epoch) || (l.version != r.version);
}
inline bool operator<(const eversion_t& l, const eversion_t& r) {
return (l.epoch == r.epoch) ? (l.version < r.version):(l.epoch < r.epoch);
}
inline bool operator<=(const eversion_t& l, const eversion_t& r) {
return (l.epoch == r.epoch) ? (l.version <= r.version):(l.epoch <= r.epoch);
}
inline bool operator>(const eversion_t& l, const eversion_t& r) {
return (l.epoch == r.epoch) ? (l.version > r.version):(l.epoch > r.epoch);
}
inline bool operator>=(const eversion_t& l, const eversion_t& r) {
return (l.epoch == r.epoch) ? (l.version >= r.version):(l.epoch >= r.epoch);
}
inline std::ostream& operator<<(std::ostream& out, const eversion_t& e) {
return out << e.epoch << "'" << e.version;
}
/**
* objectstore_perf_stat_t
*
* current perf information about the osd
*/
struct objectstore_perf_stat_t {
// cur_op_latency is in ns since double add/sub are not associative
uint64_t os_commit_latency_ns;
uint64_t os_apply_latency_ns;
objectstore_perf_stat_t() :
os_commit_latency_ns(0), os_apply_latency_ns(0) {}
bool operator==(const objectstore_perf_stat_t &r) const {
return os_commit_latency_ns == r.os_commit_latency_ns &&
os_apply_latency_ns == r.os_apply_latency_ns;
}
void add(const objectstore_perf_stat_t &o) {
os_commit_latency_ns += o.os_commit_latency_ns;
os_apply_latency_ns += o.os_apply_latency_ns;
}
void sub(const objectstore_perf_stat_t &o) {
os_commit_latency_ns -= o.os_commit_latency_ns;
os_apply_latency_ns -= o.os_apply_latency_ns;
}
void dump(ceph::Formatter *f) const;
void encode(ceph::buffer::list &bl, uint64_t features) const;
void decode(ceph::buffer::list::const_iterator &bl);
static void generate_test_instances(std::list<objectstore_perf_stat_t*>& o);
};
WRITE_CLASS_ENCODER_FEATURES(objectstore_perf_stat_t)
/*
* pg states
*/
#define PG_STATE_CREATING (1ULL << 0) // creating
#define PG_STATE_ACTIVE (1ULL << 1) // i am active. (primary: replicas too)
#define PG_STATE_CLEAN (1ULL << 2) // peers are complete, clean of stray replicas.
#define PG_STATE_DOWN (1ULL << 4) // a needed replica is down, PG offline
#define PG_STATE_RECOVERY_UNFOUND (1ULL << 5) // recovery stopped due to unfound
#define PG_STATE_BACKFILL_UNFOUND (1ULL << 6) // backfill stopped due to unfound
#define PG_STATE_PREMERGE (1ULL << 7) // i am prepare to merging
#define PG_STATE_SCRUBBING (1ULL << 8) // scrubbing
//#define PG_STATE_SCRUBQ (1ULL << 9) // queued for scrub
#define PG_STATE_DEGRADED (1ULL << 10) // pg contains objects with reduced redundancy
#define PG_STATE_INCONSISTENT (1ULL << 11) // pg replicas are inconsistent (but shouldn't be)
#define PG_STATE_PEERING (1ULL << 12) // pg is (re)peering
#define PG_STATE_REPAIR (1ULL << 13) // pg should repair on next scrub
#define PG_STATE_RECOVERING (1ULL << 14) // pg is recovering/migrating objects
#define PG_STATE_BACKFILL_WAIT (1ULL << 15) // [active] reserving backfill
#define PG_STATE_INCOMPLETE (1ULL << 16) // incomplete content, peering failed.
#define PG_STATE_STALE (1ULL << 17) // our state for this pg is stale, unknown.
#define PG_STATE_REMAPPED (1ULL << 18) // pg is explicitly remapped to different OSDs than CRUSH
#define PG_STATE_DEEP_SCRUB (1ULL << 19) // deep scrub: check CRC32 on files
#define PG_STATE_BACKFILLING (1ULL << 20) // [active] backfilling pg content
#define PG_STATE_BACKFILL_TOOFULL (1ULL << 21) // backfill can't proceed: too full
#define PG_STATE_RECOVERY_WAIT (1ULL << 22) // waiting for recovery reservations
#define PG_STATE_UNDERSIZED (1ULL << 23) // pg acting < pool size
#define PG_STATE_ACTIVATING (1ULL << 24) // pg is peered but not yet active
#define PG_STATE_PEERED (1ULL << 25) // peered, cannot go active, can recover
#define PG_STATE_SNAPTRIM (1ULL << 26) // trimming snaps
#define PG_STATE_SNAPTRIM_WAIT (1ULL << 27) // queued to trim snaps
#define PG_STATE_RECOVERY_TOOFULL (1ULL << 28) // recovery can't proceed: too full
#define PG_STATE_SNAPTRIM_ERROR (1ULL << 29) // error stopped trimming snaps
#define PG_STATE_FORCED_RECOVERY (1ULL << 30) // force recovery of this pg before any other
#define PG_STATE_FORCED_BACKFILL (1ULL << 31) // force backfill of this pg before any other
#define PG_STATE_FAILED_REPAIR (1ULL << 32) // A repair failed to fix all errors
#define PG_STATE_LAGGY (1ULL << 33) // PG is laggy/unreabable due to slow/delayed pings
#define PG_STATE_WAIT (1ULL << 34) // PG is waiting for prior intervals' readable period to expire
std::string pg_state_string(uint64_t state);
std::string pg_vector_string(const std::vector<int32_t> &a);
std::optional<uint64_t> pg_string_state(const std::string& state);
/*
* pool_snap_info_t
*
* attributes for a single pool snapshot.
*/
struct pool_snap_info_t {
snapid_t snapid;
utime_t stamp;
std::string name;
void dump(ceph::Formatter *f) const;
void encode(ceph::buffer::list& bl, uint64_t features) const;
void decode(ceph::buffer::list::const_iterator& bl);
static void generate_test_instances(std::list<pool_snap_info_t*>& o);
};
WRITE_CLASS_ENCODER_FEATURES(pool_snap_info_t)
inline std::ostream& operator<<(std::ostream& out, const pool_snap_info_t& si) {
return out << si.snapid << '(' << si.name << ' ' << si.stamp << ')';
}
/*
* pool_opts_t
*
* pool options.
*/
// The order of items in the list is important, therefore,
// you should always add to the end of the list when adding new options.
class pool_opts_t {
public:
enum key_t {
SCRUB_MIN_INTERVAL,
SCRUB_MAX_INTERVAL,
DEEP_SCRUB_INTERVAL,
RECOVERY_PRIORITY,
RECOVERY_OP_PRIORITY,
SCRUB_PRIORITY,
COMPRESSION_MODE,
COMPRESSION_ALGORITHM,
COMPRESSION_REQUIRED_RATIO,
COMPRESSION_MAX_BLOB_SIZE,
COMPRESSION_MIN_BLOB_SIZE,
CSUM_TYPE,
CSUM_MAX_BLOCK,
CSUM_MIN_BLOCK,
FINGERPRINT_ALGORITHM,
PG_NUM_MIN, // min pg_num
TARGET_SIZE_BYTES, // total bytes in pool
TARGET_SIZE_RATIO, // fraction of total cluster
PG_AUTOSCALE_BIAS,
READ_LEASE_INTERVAL,
DEDUP_TIER,
DEDUP_CHUNK_ALGORITHM,
DEDUP_CDC_CHUNK_SIZE,
PG_NUM_MAX, // max pg_num
};
enum type_t {
STR,
INT,
DOUBLE,
};
struct opt_desc_t {
key_t key;
type_t type;
opt_desc_t(key_t k, type_t t) : key(k), type(t) {}
bool operator==(const opt_desc_t& rhs) const {
return key == rhs.key && type == rhs.type;
}
};
typedef boost::variant<std::string,int64_t,double> value_t;
static bool is_opt_name(const std::string& name);
static opt_desc_t get_opt_desc(const std::string& name);
pool_opts_t() : opts() {}
bool is_set(key_t key) const;
template<typename T>
void set(key_t key, const T &val) {
value_t value = val;
opts[key] = value;
}
template<typename T>
bool get(key_t key, T *val) const {
opts_t::const_iterator i = opts.find(key);
if (i == opts.end()) {
return false;
}
*val = boost::get<T>(i->second);
return true;
}
template<typename T>
T value_or(key_t key, T&& default_value) const {
auto i = opts.find(key);
if (i == opts.end()) {
return std::forward<T>(default_value);
}
return boost::get<T>(i->second);
}
const value_t& get(key_t key) const;
bool unset(key_t key);
void dump(const std::string& name, ceph::Formatter *f) const;
void dump(ceph::Formatter *f) const;
void encode(ceph::buffer::list &bl, uint64_t features) const;
void decode(ceph::buffer::list::const_iterator &bl);
private:
typedef std::map<key_t, value_t> opts_t;
opts_t opts;
friend std::ostream& operator<<(std::ostream& out, const pool_opts_t& opts);
};
WRITE_CLASS_ENCODER_FEATURES(pool_opts_t)
struct pg_merge_meta_t {
pg_t source_pgid;
epoch_t ready_epoch = 0;
epoch_t last_epoch_started = 0;
epoch_t last_epoch_clean = 0;
eversion_t source_version;
eversion_t target_version;
void encode(ceph::buffer::list& bl) const {
ENCODE_START(1, 1, bl);
encode(source_pgid, bl);
encode(ready_epoch, bl);
encode(last_epoch_started, bl);
encode(last_epoch_clean, bl);
encode(source_version, bl);
encode(target_version, bl);
ENCODE_FINISH(bl);
}
void decode(ceph::buffer::list::const_iterator& p) {
DECODE_START(1, p);
decode(source_pgid, p);
decode(ready_epoch, p);
decode(last_epoch_started, p);
decode(last_epoch_clean, p);
decode(source_version, p);
decode(target_version, p);
DECODE_FINISH(p);
}
void dump(ceph::Formatter *f) const {
f->dump_stream("source_pgid") << source_pgid;
f->dump_unsigned("ready_epoch", ready_epoch);
f->dump_unsigned("last_epoch_started", last_epoch_started);
f->dump_unsigned("last_epoch_clean", last_epoch_clean);
f->dump_stream("source_version") << source_version;
f->dump_stream("target_version") << target_version;
}
};
WRITE_CLASS_ENCODER(pg_merge_meta_t)
class OSDMap;
/*
* pg_pool
*/
struct pg_pool_t {
static const char *APPLICATION_NAME_CEPHFS;
static const char *APPLICATION_NAME_RBD;
static const char *APPLICATION_NAME_RGW;
enum {
TYPE_REPLICATED = 1, // replication
//TYPE_RAID4 = 2, // raid4 (never implemented)
TYPE_ERASURE = 3, // erasure-coded
};
static constexpr uint32_t pg_CRUSH_ITEM_NONE = 0x7fffffff; /* can't import crush.h here */
static std::string_view get_type_name(int t) {
switch (t) {
case TYPE_REPLICATED: return "replicated";
//case TYPE_RAID4: return "raid4";
case TYPE_ERASURE: return "erasure";
default: return "???";
}
}
std::string_view get_type_name() const {
return get_type_name(type);
}
enum {
FLAG_HASHPSPOOL = 1<<0, // hash pg seed and pool together (instead of adding)
FLAG_FULL = 1<<1, // pool is full
FLAG_EC_OVERWRITES = 1<<2, // enables overwrites, once enabled, cannot be disabled
FLAG_INCOMPLETE_CLONES = 1<<3, // may have incomplete clones (bc we are/were an overlay)
FLAG_NODELETE = 1<<4, // pool can't be deleted
FLAG_NOPGCHANGE = 1<<5, // pool's pg and pgp num can't be changed
FLAG_NOSIZECHANGE = 1<<6, // pool's size and min size can't be changed
FLAG_WRITE_FADVISE_DONTNEED = 1<<7, // write mode with LIBRADOS_OP_FLAG_FADVISE_DONTNEED
FLAG_NOSCRUB = 1<<8, // block periodic scrub
FLAG_NODEEP_SCRUB = 1<<9, // block periodic deep-scrub
FLAG_FULL_QUOTA = 1<<10, // pool is currently running out of quota, will set FLAG_FULL too
FLAG_NEARFULL = 1<<11, // pool is nearfull
FLAG_BACKFILLFULL = 1<<12, // pool is backfillfull
FLAG_SELFMANAGED_SNAPS = 1<<13, // pool uses selfmanaged snaps
FLAG_POOL_SNAPS = 1<<14, // pool has pool snaps
FLAG_CREATING = 1<<15, // initial pool PGs are being created
FLAG_EIO = 1<<16, // return EIO for all client ops
FLAG_BULK = 1<<17, //pool is large
// PGs from this pool are allowed to be created on crimson osds.
// Pool features are restricted to those supported by crimson-osd.
// Note, does not prohibit being created on classic osd.
FLAG_CRIMSON = 1<<18,
};
static const char *get_flag_name(uint64_t f) {
switch (f) {
case FLAG_HASHPSPOOL: return "hashpspool";
case FLAG_FULL: return "full";
case FLAG_EC_OVERWRITES: return "ec_overwrites";
case FLAG_INCOMPLETE_CLONES: return "incomplete_clones";
case FLAG_NODELETE: return "nodelete";
case FLAG_NOPGCHANGE: return "nopgchange";
case FLAG_NOSIZECHANGE: return "nosizechange";
case FLAG_WRITE_FADVISE_DONTNEED: return "write_fadvise_dontneed";
case FLAG_NOSCRUB: return "noscrub";
case FLAG_NODEEP_SCRUB: return "nodeep-scrub";
case FLAG_FULL_QUOTA: return "full_quota";
case FLAG_NEARFULL: return "nearfull";
case FLAG_BACKFILLFULL: return "backfillfull";
case FLAG_SELFMANAGED_SNAPS: return "selfmanaged_snaps";
case FLAG_POOL_SNAPS: return "pool_snaps";
case FLAG_CREATING: return "creating";
case FLAG_EIO: return "eio";
case FLAG_BULK: return "bulk";
case FLAG_CRIMSON: return "crimson";
default: return "???";
}
}
static std::string get_flags_string(uint64_t f) {
std::string s;
for (unsigned n=0; f && n<64; ++n) {
if (f & (1ull << n)) {
if (s.length())
s += ",";
s += get_flag_name(1ull << n);
}
}
return s;
}
std::string get_flags_string() const {
return get_flags_string(flags);
}
static uint64_t get_flag_by_name(const std::string& name) {
if (name == "hashpspool")
return FLAG_HASHPSPOOL;
if (name == "full")
return FLAG_FULL;
if (name == "ec_overwrites")
return FLAG_EC_OVERWRITES;
if (name == "incomplete_clones")
return FLAG_INCOMPLETE_CLONES;
if (name == "nodelete")
return FLAG_NODELETE;
if (name == "nopgchange")
return FLAG_NOPGCHANGE;
if (name == "nosizechange")
return FLAG_NOSIZECHANGE;
if (name == "write_fadvise_dontneed")
return FLAG_WRITE_FADVISE_DONTNEED;
if (name == "noscrub")
return FLAG_NOSCRUB;
if (name == "nodeep-scrub")
return FLAG_NODEEP_SCRUB;
if (name == "full_quota")
return FLAG_FULL_QUOTA;
if (name == "nearfull")
return FLAG_NEARFULL;
if (name == "backfillfull")
return FLAG_BACKFILLFULL;
if (name == "selfmanaged_snaps")
return FLAG_SELFMANAGED_SNAPS;
if (name == "pool_snaps")
return FLAG_POOL_SNAPS;
if (name == "creating")
return FLAG_CREATING;
if (name == "eio")
return FLAG_EIO;
if (name == "bulk")
return FLAG_BULK;
if (name == "crimson")
return FLAG_CRIMSON;
return 0;
}
/// converts the acting/up vector to a set of pg shards
void convert_to_pg_shards(const std::vector<int> &from, std::set<pg_shard_t>* to) const;
typedef enum {
CACHEMODE_NONE = 0, ///< no caching
CACHEMODE_WRITEBACK = 1, ///< write to cache, flush later
CACHEMODE_FORWARD = 2, ///< forward if not in cache
CACHEMODE_READONLY = 3, ///< handle reads, forward writes [not strongly consistent]
CACHEMODE_READFORWARD = 4, ///< forward reads, write to cache flush later
CACHEMODE_READPROXY = 5, ///< proxy reads, write to cache flush later
CACHEMODE_PROXY = 6, ///< proxy if not in cache
} cache_mode_t;
static const char *get_cache_mode_name(cache_mode_t m) {
switch (m) {
case CACHEMODE_NONE: return "none";
case CACHEMODE_WRITEBACK: return "writeback";
case CACHEMODE_FORWARD: return "forward";
case CACHEMODE_READONLY: return "readonly";
case CACHEMODE_READFORWARD: return "readforward";
case CACHEMODE_READPROXY: return "readproxy";
case CACHEMODE_PROXY: return "proxy";
default: return "unknown";
}
}
static cache_mode_t get_cache_mode_from_str(const std::string& s) {
if (s == "none")
return CACHEMODE_NONE;
if (s == "writeback")
return CACHEMODE_WRITEBACK;
if (s == "forward")
return CACHEMODE_FORWARD;
if (s == "readonly")
return CACHEMODE_READONLY;
if (s == "readforward")
return CACHEMODE_READFORWARD;
if (s == "readproxy")
return CACHEMODE_READPROXY;
if (s == "proxy")
return CACHEMODE_PROXY;
return (cache_mode_t)-1;
}
const char *get_cache_mode_name() const {
return get_cache_mode_name(cache_mode);
}
bool cache_mode_requires_hit_set() const {
switch (cache_mode) {
case CACHEMODE_NONE:
case CACHEMODE_FORWARD:
case CACHEMODE_READONLY:
case CACHEMODE_PROXY:
return false;
case CACHEMODE_WRITEBACK:
case CACHEMODE_READFORWARD:
case CACHEMODE_READPROXY:
return true;
default:
ceph_abort_msg("implement me");
}
}
enum class pg_autoscale_mode_t : uint8_t {
OFF = 0,
WARN = 1,
ON = 2,
UNKNOWN = UINT8_MAX,
};
static const char *get_pg_autoscale_mode_name(pg_autoscale_mode_t m) {
switch (m) {
case pg_autoscale_mode_t::OFF: return "off";
case pg_autoscale_mode_t::ON: return "on";
case pg_autoscale_mode_t::WARN: return "warn";
default: return "???";
}
}
static pg_autoscale_mode_t get_pg_autoscale_mode_by_name(const std::string& m) {
if (m == "off") {
return pg_autoscale_mode_t::OFF;
}
if (m == "warn") {
return pg_autoscale_mode_t::WARN;
}
if (m == "on") {
return pg_autoscale_mode_t::ON;
}
return pg_autoscale_mode_t::UNKNOWN;
}
utime_t create_time;
uint64_t flags = 0; ///< FLAG_*
__u8 type = 0; ///< TYPE_*
__u8 size = 0, min_size = 0; ///< number of osds in each pg
__u8 crush_rule = 0; ///< crush placement rule
__u8 object_hash = 0; ///< hash mapping object name to ps
pg_autoscale_mode_t pg_autoscale_mode = pg_autoscale_mode_t::UNKNOWN;
private:
__u32 pg_num = 0, pgp_num = 0; ///< number of pgs
__u32 pg_num_pending = 0; ///< pg_num we are about to merge down to
__u32 pg_num_target = 0; ///< pg_num we should converge toward
__u32 pgp_num_target = 0; ///< pgp_num we should converge toward
public:
std::map<std::string, std::string> properties; ///< OBSOLETE
std::string erasure_code_profile; ///< name of the erasure code profile in OSDMap
epoch_t last_change = 0; ///< most recent epoch changed, exclusing snapshot changes
// If non-zero, require OSDs in at least this many different instances...
uint32_t peering_crush_bucket_count = 0;
// of this bucket type...
uint32_t peering_crush_bucket_barrier = 0;
// including this one
int32_t peering_crush_mandatory_member = pg_CRUSH_ITEM_NONE;
// The per-bucket replica count is calculated with this "target"
// instead of the above crush_bucket_count. This means we can maintain a
// target size of 4 without attempting to place them all in 1 DC
uint32_t peering_crush_bucket_target = 0;
/// last epoch that forced clients to resend
epoch_t last_force_op_resend = 0;
/// last epoch that forced clients to resend (pre-nautilus clients only)
epoch_t last_force_op_resend_prenautilus = 0;
/// last epoch that forced clients to resend (pre-luminous clients only)
epoch_t last_force_op_resend_preluminous = 0;
/// metadata for the most recent PG merge
pg_merge_meta_t last_pg_merge_meta;
snapid_t snap_seq = 0; ///< seq for per-pool snapshot
epoch_t snap_epoch = 0; ///< osdmap epoch of last snap
uint64_t auid = 0; ///< who owns the pg
uint64_t quota_max_bytes = 0; ///< maximum number of bytes for this pool
uint64_t quota_max_objects = 0; ///< maximum number of objects for this pool
/*
* Pool snaps (global to this pool). These define a SnapContext for
* the pool, unless the client manually specifies an alternate
* context.
*/
std::map<snapid_t, pool_snap_info_t> snaps;
/*
* Alternatively, if we are defining non-pool snaps (e.g. via the
* Ceph MDS), we must track @removed_snaps (since @snaps is not
* used). Snaps and removed_snaps are to be used exclusive of each
* other!
*/
interval_set<snapid_t> removed_snaps;
unsigned pg_num_mask = 0, pgp_num_mask = 0;
std::set<uint64_t> tiers; ///< pools that are tiers of us
int64_t tier_of = -1; ///< pool for which we are a tier
// Note that write wins for read+write ops
int64_t read_tier = -1; ///< pool/tier for objecter to direct reads to
int64_t write_tier = -1; ///< pool/tier for objecter to direct writes to
cache_mode_t cache_mode = CACHEMODE_NONE; ///< cache pool mode
bool is_tier() const { return tier_of >= 0; }
bool has_tiers() const { return !tiers.empty(); }
void clear_tier() {
tier_of = -1;
clear_read_tier();
clear_write_tier();
clear_tier_tunables();
}
bool has_read_tier() const { return read_tier >= 0; }
void clear_read_tier() { read_tier = -1; }
bool has_write_tier() const { return write_tier >= 0; }
void clear_write_tier() { write_tier = -1; }
void clear_tier_tunables() {
if (cache_mode != CACHEMODE_NONE)
flags |= FLAG_INCOMPLETE_CLONES;
cache_mode = CACHEMODE_NONE;
target_max_bytes = 0;
target_max_objects = 0;
cache_target_dirty_ratio_micro = 0;
cache_target_dirty_high_ratio_micro = 0;
cache_target_full_ratio_micro = 0;
hit_set_params = HitSet::Params();
hit_set_period = 0;
hit_set_count = 0;
hit_set_grade_decay_rate = 0;
hit_set_search_last_n = 0;
grade_table.resize(0);
}
bool has_snaps() const {
return snaps.size() > 0;
}
bool is_stretch_pool() const {
return peering_crush_bucket_count != 0;
}
bool stretch_set_can_peer(const std::set<int>& want, const OSDMap& osdmap,
std::ostream *out) const;
bool stretch_set_can_peer(const std::vector<int>& want, const OSDMap& osdmap,
std::ostream *out) const {
if (!is_stretch_pool()) return true;
std::set<int> swant;
for (auto i : want) swant.insert(i);
return stretch_set_can_peer(swant, osdmap, out);
}
uint64_t target_max_bytes = 0; ///< tiering: target max pool size
uint64_t target_max_objects = 0; ///< tiering: target max pool size
uint32_t cache_target_dirty_ratio_micro = 0; ///< cache: fraction of target to leave dirty
uint32_t cache_target_dirty_high_ratio_micro = 0; ///< cache: fraction of target to flush with high speed
uint32_t cache_target_full_ratio_micro = 0; ///< cache: fraction of target to fill before we evict in earnest
uint32_t cache_min_flush_age = 0; ///< minimum age (seconds) before we can flush
uint32_t cache_min_evict_age = 0; ///< minimum age (seconds) before we can evict
HitSet::Params hit_set_params; ///< The HitSet params to use on this pool
uint32_t hit_set_period = 0; ///< periodicity of HitSet segments (seconds)
uint32_t hit_set_count = 0; ///< number of periods to retain
bool use_gmt_hitset = true; ///< use gmt to name the hitset archive object
uint32_t min_read_recency_for_promote = 0; ///< minimum number of HitSet to check before promote on read
uint32_t min_write_recency_for_promote = 0; ///< minimum number of HitSet to check before promote on write
uint32_t hit_set_grade_decay_rate = 0; ///< current hit_set has highest priority on objects
///< temperature count,the follow hit_set's priority decay
///< by this params than pre hit_set
uint32_t hit_set_search_last_n = 0; ///< accumulate atmost N hit_sets for temperature
uint32_t stripe_width = 0; ///< erasure coded stripe size in bytes
uint64_t expected_num_objects = 0; ///< expected number of objects on this pool, a value of 0 indicates
///< user does not specify any expected value
bool fast_read = false; ///< whether turn on fast read on the pool or not
pool_opts_t opts; ///< options
typedef enum {
TYPE_FINGERPRINT_NONE = 0,
TYPE_FINGERPRINT_SHA1 = 1,
TYPE_FINGERPRINT_SHA256 = 2,
TYPE_FINGERPRINT_SHA512 = 3,
} fingerprint_t;
static fingerprint_t get_fingerprint_from_str(const std::string& s) {
if (s == "none")
return TYPE_FINGERPRINT_NONE;
if (s == "sha1")
return TYPE_FINGERPRINT_SHA1;
if (s == "sha256")
return TYPE_FINGERPRINT_SHA256;
if (s == "sha512")
return TYPE_FINGERPRINT_SHA512;
return (fingerprint_t)-1;
}
const fingerprint_t get_fingerprint_type() const {
std::string fp_str;
opts.get(pool_opts_t::FINGERPRINT_ALGORITHM, &fp_str);
return get_fingerprint_from_str(fp_str);
}
const char *get_fingerprint_name() const {
std::string fp_str;
fingerprint_t fp_t;
opts.get(pool_opts_t::FINGERPRINT_ALGORITHM, &fp_str);
fp_t = get_fingerprint_from_str(fp_str);
return get_fingerprint_name(fp_t);
}
static const char *get_fingerprint_name(fingerprint_t m) {
switch (m) {
case TYPE_FINGERPRINT_NONE: return "none";
case TYPE_FINGERPRINT_SHA1: return "sha1";
case TYPE_FINGERPRINT_SHA256: return "sha256";
case TYPE_FINGERPRINT_SHA512: return "sha512";
default: return "unknown";
}
}
typedef enum {
TYPE_DEDUP_CHUNK_NONE = 0,
TYPE_DEDUP_CHUNK_FASTCDC = 1,
TYPE_DEDUP_CHUNK_FIXEDCDC = 2,
} dedup_chunk_algo_t;
static dedup_chunk_algo_t get_dedup_chunk_algorithm_from_str(const std::string& s) {
if (s == "none")
return TYPE_DEDUP_CHUNK_NONE;
if (s == "fastcdc")
return TYPE_DEDUP_CHUNK_FASTCDC;
if (s == "fixed")
return TYPE_DEDUP_CHUNK_FIXEDCDC;
return (dedup_chunk_algo_t)-1;
}
const dedup_chunk_algo_t get_dedup_chunk_algorithm_type() const {
std::string algo_str;
opts.get(pool_opts_t::DEDUP_CHUNK_ALGORITHM, &algo_str);
return get_dedup_chunk_algorithm_from_str(algo_str);
}
const char *get_dedup_chunk_algorithm_name() const {
std::string dedup_chunk_algo_str;
dedup_chunk_algo_t dedup_chunk_algo_t;
opts.get(pool_opts_t::DEDUP_CHUNK_ALGORITHM, &dedup_chunk_algo_str);
dedup_chunk_algo_t = get_dedup_chunk_algorithm_from_str(dedup_chunk_algo_str);
return get_dedup_chunk_algorithm_name(dedup_chunk_algo_t);
}
static const char *get_dedup_chunk_algorithm_name(dedup_chunk_algo_t m) {
switch (m) {
case TYPE_DEDUP_CHUNK_NONE: return "none";
case TYPE_DEDUP_CHUNK_FASTCDC: return "fastcdc";
case TYPE_DEDUP_CHUNK_FIXEDCDC: return "fixed";
default: return "unknown";
}
}
int64_t get_dedup_tier() const {
int64_t tier_id = 0;
opts.get(pool_opts_t::DEDUP_TIER, &tier_id);
return tier_id;
}
int64_t get_dedup_cdc_chunk_size() const {
int64_t chunk_size = 0;
opts.get(pool_opts_t::DEDUP_CDC_CHUNK_SIZE, &chunk_size);
return chunk_size;
}
/// application -> key/value metadata
std::map<std::string, std::map<std::string, std::string>> application_metadata;
private:
std::vector<uint32_t> grade_table;
public:
uint32_t get_grade(unsigned i) const {
if (grade_table.size() <= i)
return 0;
return grade_table[i];
}
void calc_grade_table() {
unsigned v = 1000000;
grade_table.resize(hit_set_count);
for (unsigned i = 0; i < hit_set_count; i++) {
v = v * (1 - (hit_set_grade_decay_rate / 100.0));
grade_table[i] = v;
}
}
pg_pool_t() = default;
void dump(ceph::Formatter *f) const;
const utime_t &get_create_time() const { return create_time; }
uint64_t get_flags() const { return flags; }
bool has_flag(uint64_t f) const { return flags & f; }
void set_flag(uint64_t f) { flags |= f; }
void unset_flag(uint64_t f) { flags &= ~f; }
bool require_rollback() const {
return is_erasure();
}
/// true if incomplete clones may be present
bool allow_incomplete_clones() const {
return cache_mode != CACHEMODE_NONE || has_flag(FLAG_INCOMPLETE_CLONES);
}
unsigned get_type() const { return type; }
unsigned get_size() const { return size; }
unsigned get_min_size() const { return min_size; }
int get_crush_rule() const { return crush_rule; }
int get_object_hash() const { return object_hash; }
const char *get_object_hash_name() const {
return ceph_str_hash_name(get_object_hash());
}
epoch_t get_last_change() const { return last_change; }
epoch_t get_last_force_op_resend() const { return last_force_op_resend; }
epoch_t get_last_force_op_resend_prenautilus() const {
return last_force_op_resend_prenautilus;
}
epoch_t get_last_force_op_resend_preluminous() const {
return last_force_op_resend_preluminous;
}
epoch_t get_snap_epoch() const { return snap_epoch; }
snapid_t get_snap_seq() const { return snap_seq; }
uint64_t get_auid() const { return auid; }
void set_snap_seq(snapid_t s) { snap_seq = s; }
void set_snap_epoch(epoch_t e) { snap_epoch = e; }
void set_stripe_width(uint32_t s) { stripe_width = s; }
uint32_t get_stripe_width() const { return stripe_width; }
bool is_replicated() const { return get_type() == TYPE_REPLICATED; }
bool is_erasure() const { return get_type() == TYPE_ERASURE; }
bool supports_omap() const {
return !(get_type() == TYPE_ERASURE);
}
bool requires_aligned_append() const {
return is_erasure() && !has_flag(FLAG_EC_OVERWRITES);
}
uint64_t required_alignment() const { return stripe_width; }
bool allows_ecoverwrites() const {
return has_flag(FLAG_EC_OVERWRITES);
}
bool is_crimson() const {
return has_flag(FLAG_CRIMSON);
}
bool can_shift_osds() const {
switch (get_type()) {
case TYPE_REPLICATED:
return true;
case TYPE_ERASURE:
return false;
default:
ceph_abort_msg("unhandled pool type");
}
}
unsigned get_pg_num() const { return pg_num; }
unsigned get_pgp_num() const { return pgp_num; }
unsigned get_pg_num_target() const { return pg_num_target; }
unsigned get_pgp_num_target() const { return pgp_num_target; }
unsigned get_pg_num_pending() const { return pg_num_pending; }
unsigned get_pg_num_mask() const { return pg_num_mask; }
unsigned get_pgp_num_mask() const { return pgp_num_mask; }
// if pg_num is not a multiple of two, pgs are not equally sized.
// return, for a given pg, the fraction (denominator) of the total
// pool size that it represents.
unsigned get_pg_num_divisor(pg_t pgid) const;
bool is_pending_merge(pg_t pgid, bool *target) const;
void set_pg_num(int p) {
pg_num = p;
pg_num_pending = p;
calc_pg_masks();
}
void set_pgp_num(int p) {
pgp_num = p;
calc_pg_masks();
}
void set_pg_num_pending(int p) {
pg_num_pending = p;
calc_pg_masks();
}
void set_pg_num_target(int p) {
pg_num_target = p;
}
void set_pgp_num_target(int p) {
pgp_num_target = p;
}
void dec_pg_num(pg_t source_pgid,
epoch_t ready_epoch,
eversion_t source_version,
eversion_t target_version,
epoch_t last_epoch_started,
epoch_t last_epoch_clean) {
--pg_num;
last_pg_merge_meta.source_pgid = source_pgid;
last_pg_merge_meta.ready_epoch = ready_epoch;
last_pg_merge_meta.source_version = source_version;
last_pg_merge_meta.target_version = target_version;
last_pg_merge_meta.last_epoch_started = last_epoch_started;
last_pg_merge_meta.last_epoch_clean = last_epoch_clean;
calc_pg_masks();
}
void set_quota_max_bytes(uint64_t m) {
quota_max_bytes = m;
}
uint64_t get_quota_max_bytes() {
return quota_max_bytes;
}
void set_quota_max_objects(uint64_t m) {
quota_max_objects = m;
}
uint64_t get_quota_max_objects() {
return quota_max_objects;
}
void set_last_force_op_resend(uint64_t t) {
last_force_op_resend = t;
last_force_op_resend_prenautilus = t;
last_force_op_resend_preluminous = t;
}
void calc_pg_masks();
/*
* we have two snap modes:
* - pool global snaps
* - snap existence/non-existence defined by snaps[] and snap_seq
* - user managed snaps
* - removal governed by removed_snaps
*
* we know which mode we're using based on whether removed_snaps is empty.
* If nothing has been created, both functions report false.
*/
bool is_pool_snaps_mode() const;
bool is_unmanaged_snaps_mode() const;
bool is_removed_snap(snapid_t s) const;
snapid_t snap_exists(std::string_view s) const;
void add_snap(const char *n, utime_t stamp);
uint64_t add_unmanaged_snap(bool preoctopus_compat);
void remove_snap(snapid_t s);
void remove_unmanaged_snap(snapid_t s, bool preoctopus_compat);
SnapContext get_snap_context() const;
/// hash a object name+namespace key to a hash position
uint32_t hash_key(const std::string& key, const std::string& ns) const;
/// round a hash position down to a pg num
uint32_t raw_hash_to_pg(uint32_t v) const;
/*
* map a raw pg (with full precision ps) into an actual pg, for storage
*/
pg_t raw_pg_to_pg(pg_t pg) const;
/*
* map raw pg (full precision ps) into a placement seed. include
* pool id in that value so that different pools don't use the same
* seeds.
*/
ps_t raw_pg_to_pps(pg_t pg) const;
/// choose a random hash position within a pg
uint32_t get_random_pg_position(pg_t pgid, uint32_t seed) const;
void encode(ceph::buffer::list& bl, uint64_t features) const;
void decode(ceph::buffer::list::const_iterator& bl);
static void generate_test_instances(std::list<pg_pool_t*>& o);
};
WRITE_CLASS_ENCODER_FEATURES(pg_pool_t)
std::ostream& operator<<(std::ostream& out, const pg_pool_t& p);
/**
* a summation of object stats
*
* This is just a container for object stats; we don't know what for.
*
* If you add members in object_stat_sum_t, you should make sure there are
* not padding among these members.
* You should also modify the padding_check function.
*/
struct object_stat_sum_t {
/**************************************************************************
* WARNING: be sure to update operator==, floor, and split when
* adding/removing fields!
**************************************************************************/
int64_t num_bytes{0}; // in bytes
int64_t num_objects{0};
int64_t num_object_clones{0};
int64_t num_object_copies{0}; // num_objects * num_replicas
int64_t num_objects_missing_on_primary{0};
int64_t num_objects_degraded{0};
int64_t num_objects_unfound{0};
int64_t num_rd{0};
int64_t num_rd_kb{0};
int64_t num_wr{0};
int64_t num_wr_kb{0};
int64_t num_scrub_errors{0}; // total deep and shallow scrub errors
int64_t num_objects_recovered{0};
int64_t num_bytes_recovered{0};
int64_t num_keys_recovered{0};
int64_t num_shallow_scrub_errors{0};
int64_t num_deep_scrub_errors{0};
int64_t num_objects_dirty{0};
int64_t num_whiteouts{0};
int64_t num_objects_omap{0};
int64_t num_objects_hit_set_archive{0};
int64_t num_objects_misplaced{0};
int64_t num_bytes_hit_set_archive{0};
int64_t num_flush{0};
int64_t num_flush_kb{0};
int64_t num_evict{0};
int64_t num_evict_kb{0};
int64_t num_promote{0};
int32_t num_flush_mode_high{0}; // 1 when in high flush mode, otherwise 0
int32_t num_flush_mode_low{0}; // 1 when in low flush mode, otherwise 0
int32_t num_evict_mode_some{0}; // 1 when in evict some mode, otherwise 0
int32_t num_evict_mode_full{0}; // 1 when in evict full mode, otherwise 0
int64_t num_objects_pinned{0};
int64_t num_objects_missing{0};
int64_t num_legacy_snapsets{0}; ///< upper bound on pre-luminous-style SnapSets
int64_t num_large_omap_objects{0};
int64_t num_objects_manifest{0};
int64_t num_omap_bytes{0};
int64_t num_omap_keys{0};
int64_t num_objects_repaired{0};
object_stat_sum_t() = default;
void floor(int64_t f) {
#define FLOOR(x) if (x < f) x = f
FLOOR(num_bytes);
FLOOR(num_objects);
FLOOR(num_object_clones);
FLOOR(num_object_copies);
FLOOR(num_objects_missing_on_primary);
FLOOR(num_objects_missing);
FLOOR(num_objects_degraded);
FLOOR(num_objects_misplaced);
FLOOR(num_objects_unfound);
FLOOR(num_rd);
FLOOR(num_rd_kb);
FLOOR(num_wr);
FLOOR(num_wr_kb);
FLOOR(num_large_omap_objects);
FLOOR(num_objects_manifest);
FLOOR(num_omap_bytes);
FLOOR(num_omap_keys);
FLOOR(num_shallow_scrub_errors);
FLOOR(num_deep_scrub_errors);
num_scrub_errors = num_shallow_scrub_errors + num_deep_scrub_errors;
FLOOR(num_objects_recovered);
FLOOR(num_bytes_recovered);
FLOOR(num_keys_recovered);
FLOOR(num_objects_dirty);
FLOOR(num_whiteouts);
FLOOR(num_objects_omap);
FLOOR(num_objects_hit_set_archive);
FLOOR(num_bytes_hit_set_archive);
FLOOR(num_flush);
FLOOR(num_flush_kb);
FLOOR(num_evict);
FLOOR(num_evict_kb);
FLOOR(num_promote);
FLOOR(num_flush_mode_high);
FLOOR(num_flush_mode_low);
FLOOR(num_evict_mode_some);
FLOOR(num_evict_mode_full);
FLOOR(num_objects_pinned);
FLOOR(num_legacy_snapsets);
FLOOR(num_objects_repaired);
#undef FLOOR
}
void split(std::vector<object_stat_sum_t> &out) const {
#define SPLIT(PARAM) \
for (unsigned i = 0; i < out.size(); ++i) { \
out[i].PARAM = PARAM / out.size(); \
if (i < (PARAM % out.size())) { \
out[i].PARAM++; \
} \
}
#define SPLIT_PRESERVE_NONZERO(PARAM) \
for (unsigned i = 0; i < out.size(); ++i) { \
if (PARAM) \
out[i].PARAM = 1 + PARAM / out.size(); \
else \
out[i].PARAM = 0; \
}
SPLIT(num_bytes);
SPLIT(num_objects);
SPLIT(num_object_clones);
SPLIT(num_object_copies);
SPLIT(num_objects_missing_on_primary);
SPLIT(num_objects_missing);
SPLIT(num_objects_degraded);
SPLIT(num_objects_misplaced);
SPLIT(num_objects_unfound);
SPLIT(num_rd);
SPLIT(num_rd_kb);
SPLIT(num_wr);
SPLIT(num_wr_kb);
SPLIT(num_large_omap_objects);
SPLIT(num_objects_manifest);
SPLIT(num_omap_bytes);
SPLIT(num_omap_keys);
SPLIT(num_objects_repaired);
SPLIT_PRESERVE_NONZERO(num_shallow_scrub_errors);
SPLIT_PRESERVE_NONZERO(num_deep_scrub_errors);
for (unsigned i = 0; i < out.size(); ++i) {
out[i].num_scrub_errors = out[i].num_shallow_scrub_errors +
out[i].num_deep_scrub_errors;
}
SPLIT(num_objects_recovered);
SPLIT(num_bytes_recovered);
SPLIT(num_keys_recovered);
SPLIT(num_objects_dirty);
SPLIT(num_whiteouts);
SPLIT(num_objects_omap);
SPLIT(num_objects_hit_set_archive);
SPLIT(num_bytes_hit_set_archive);
SPLIT(num_flush);
SPLIT(num_flush_kb);
SPLIT(num_evict);
SPLIT(num_evict_kb);
SPLIT(num_promote);
SPLIT(num_flush_mode_high);
SPLIT(num_flush_mode_low);
SPLIT(num_evict_mode_some);
SPLIT(num_evict_mode_full);
SPLIT(num_objects_pinned);
SPLIT_PRESERVE_NONZERO(num_legacy_snapsets);
#undef SPLIT
#undef SPLIT_PRESERVE_NONZERO
}
void clear() {
// FIPS zeroization audit 20191117: this memset is not security related.
memset(this, 0, sizeof(*this));
}
void calc_copies(int nrep) {
num_object_copies = nrep * num_objects;
}
bool is_zero() const {
return mem_is_zero((char*)this, sizeof(*this));
}
void add(const object_stat_sum_t& o);
void sub(const object_stat_sum_t& o);
void dump(ceph::Formatter *f) const;
void padding_check() {
static_assert(
sizeof(object_stat_sum_t) ==
sizeof(num_bytes) +
sizeof(num_objects) +
sizeof(num_object_clones) +
sizeof(num_object_copies) +
sizeof(num_objects_missing_on_primary) +
sizeof(num_objects_degraded) +
sizeof(num_objects_unfound) +
sizeof(num_rd) +
sizeof(num_rd_kb) +
sizeof(num_wr) +
sizeof(num_wr_kb) +
sizeof(num_scrub_errors) +
sizeof(num_large_omap_objects) +
sizeof(num_objects_manifest) +
sizeof(num_omap_bytes) +
sizeof(num_omap_keys) +
sizeof(num_objects_repaired) +
sizeof(num_objects_recovered) +
sizeof(num_bytes_recovered) +
sizeof(num_keys_recovered) +
sizeof(num_shallow_scrub_errors) +
sizeof(num_deep_scrub_errors) +
sizeof(num_objects_dirty) +
sizeof(num_whiteouts) +
sizeof(num_objects_omap) +
sizeof(num_objects_hit_set_archive) +
sizeof(num_objects_misplaced) +
sizeof(num_bytes_hit_set_archive) +
sizeof(num_flush) +
sizeof(num_flush_kb) +
sizeof(num_evict) +
sizeof(num_evict_kb) +
sizeof(num_promote) +
sizeof(num_flush_mode_high) +
sizeof(num_flush_mode_low) +
sizeof(num_evict_mode_some) +
sizeof(num_evict_mode_full) +
sizeof(num_objects_pinned) +
sizeof(num_objects_missing) +
sizeof(num_legacy_snapsets)
,
"object_stat_sum_t have padding");
}
void encode(ceph::buffer::list& bl) const;
void decode(ceph::buffer::list::const_iterator& bl);
static void generate_test_instances(std::list<object_stat_sum_t*>& o);
};
WRITE_CLASS_ENCODER(object_stat_sum_t)
bool operator==(const object_stat_sum_t& l, const object_stat_sum_t& r);
/**
* a collection of object stat sums
*
* This is a collection of stat sums over different categories.
*/
struct object_stat_collection_t {
/**************************************************************************
* WARNING: be sure to update the operator== when adding/removing fields! *
**************************************************************************/
object_stat_sum_t sum;
void calc_copies(int nrep) {
sum.calc_copies(nrep);
}
void dump(ceph::Formatter *f) const;
void encode(ceph::buffer::list& bl) const;
void decode(ceph::buffer::list::const_iterator& bl);
static void generate_test_instances(std::list<object_stat_collection_t*>& o);
bool is_zero() const {
return sum.is_zero();
}
void clear() {
sum.clear();
}
void floor(int64_t f) {
sum.floor(f);
}
void add(const object_stat_sum_t& o) {
sum.add(o);
}
void add(const object_stat_collection_t& o) {
sum.add(o.sum);
}
void sub(const object_stat_collection_t& o) {
sum.sub(o.sum);
}
};
WRITE_CLASS_ENCODER(object_stat_collection_t)
inline bool operator==(const object_stat_collection_t& l,
const object_stat_collection_t& r) {
return l.sum == r.sum;
}
enum class scrub_level_t : bool { shallow = false, deep = true };
enum class scrub_type_t : bool { not_repair = false, do_repair = true };
/// is there a scrub in our future?
enum class pg_scrub_sched_status_t : uint16_t {
unknown, ///< status not reported yet
not_queued, ///< not in the OSD's scrub queue. Probably not active.
active, ///< scrubbing
scheduled, ///< scheduled for a scrub at an already determined time
queued, ///< queued to be scrubbed
blocked ///< blocked waiting for objects to be unlocked
};
struct pg_scrubbing_status_t {
utime_t m_scheduled_at{};
int32_t m_duration_seconds{0}; // relevant when scrubbing
pg_scrub_sched_status_t m_sched_status{pg_scrub_sched_status_t::unknown};
bool m_is_active{false};
scrub_level_t m_is_deep{scrub_level_t::shallow};
bool m_is_periodic{true};
};
bool operator==(const pg_scrubbing_status_t& l, const pg_scrubbing_status_t& r);
/** pg_stat
* aggregate stats for a single PG.
*/
struct pg_stat_t {
/**************************************************************************
* WARNING: be sure to update the operator== when adding/removing fields! *
**************************************************************************/
eversion_t version;
version_t reported_seq; // sequence number
epoch_t reported_epoch; // epoch of this report
uint64_t state;
utime_t last_fresh; // last reported
utime_t last_change; // new state != previous state
utime_t last_active; // state & PG_STATE_ACTIVE
utime_t last_peered; // state & PG_STATE_ACTIVE || state & PG_STATE_PEERED
utime_t last_clean; // state & PG_STATE_CLEAN
utime_t last_unstale; // (state & PG_STATE_STALE) == 0
utime_t last_undegraded; // (state & PG_STATE_DEGRADED) == 0
utime_t last_fullsized; // (state & PG_STATE_UNDERSIZED) == 0
eversion_t log_start; // (log_start,version]
eversion_t ondisk_log_start; // there may be more on disk
epoch_t created;
epoch_t last_epoch_clean;
pg_t parent;
__u32 parent_split_bits;
eversion_t last_scrub;
eversion_t last_deep_scrub;
utime_t last_scrub_stamp;
utime_t last_deep_scrub_stamp;
utime_t last_clean_scrub_stamp;
int32_t last_scrub_duration{0};
object_stat_collection_t stats;
int64_t log_size;
int64_t log_dups_size;
int64_t ondisk_log_size; // >= active_log_size
int64_t objects_scrubbed;
double scrub_duration;
std::vector<int32_t> up, acting;
std::vector<pg_shard_t> avail_no_missing;
std::map< std::set<pg_shard_t>, int32_t > object_location_counts;
epoch_t mapping_epoch;
std::vector<int32_t> blocked_by; ///< osds on which the pg is blocked
interval_set<snapid_t> purged_snaps; ///< recently removed snaps that we've purged
utime_t last_became_active;
utime_t last_became_peered;
/// up, acting primaries
int32_t up_primary;
int32_t acting_primary;
// snaptrimq.size() is 64bit, but let's be serious - anything over 50k is
// absurd already, so cap it to 2^32 and save 4 bytes at the same time
uint32_t snaptrimq_len;
int64_t objects_trimmed;
double snaptrim_duration;
pg_scrubbing_status_t scrub_sched_status;
bool stats_invalid:1;
/// true if num_objects_dirty is not accurate (because it was not
/// maintained starting from pool creation)
bool dirty_stats_invalid:1;
bool omap_stats_invalid:1;
bool hitset_stats_invalid:1;
bool hitset_bytes_stats_invalid:1;
bool pin_stats_invalid:1;
bool manifest_stats_invalid:1;
pg_stat_t()
: reported_seq(0),
reported_epoch(0),
state(0),
created(0), last_epoch_clean(0),
parent_split_bits(0),
log_size(0), log_dups_size(0),
ondisk_log_size(0),
objects_scrubbed(0),
scrub_duration(0),
mapping_epoch(0),
up_primary(-1),
acting_primary(-1),
snaptrimq_len(0),
objects_trimmed(0),
snaptrim_duration(0.0),
stats_invalid(false),
dirty_stats_invalid(false),
omap_stats_invalid(false),
hitset_stats_invalid(false),
hitset_bytes_stats_invalid(false),
pin_stats_invalid(false),
manifest_stats_invalid(false)
{ }
epoch_t get_effective_last_epoch_clean() const {
if (state & PG_STATE_CLEAN) {
// we are clean as of this report, and should thus take the
// reported epoch
return reported_epoch;
} else {
return last_epoch_clean;
}
}
std::pair<epoch_t, version_t> get_version_pair() const {
return { reported_epoch, reported_seq };
}
void floor(int64_t f) {
stats.floor(f);
if (log_size < f)
log_size = f;
if (ondisk_log_size < f)
ondisk_log_size = f;
if (snaptrimq_len < f)
snaptrimq_len = f;
}
void add_sub_invalid_flags(const pg_stat_t& o) {
// adding (or subtracting!) invalid stats render our stats invalid too
stats_invalid |= o.stats_invalid;
dirty_stats_invalid |= o.dirty_stats_invalid;
omap_stats_invalid |= o.omap_stats_invalid;
hitset_stats_invalid |= o.hitset_stats_invalid;
hitset_bytes_stats_invalid |= o.hitset_bytes_stats_invalid;
pin_stats_invalid |= o.pin_stats_invalid;
manifest_stats_invalid |= o.manifest_stats_invalid;
}
void add(const pg_stat_t& o) {
stats.add(o.stats);
log_size += o.log_size;
log_dups_size += o.log_dups_size;
ondisk_log_size += o.ondisk_log_size;
snaptrimq_len = std::min((uint64_t)snaptrimq_len + o.snaptrimq_len,
(uint64_t)(1ull << 31));
add_sub_invalid_flags(o);
}
void sub(const pg_stat_t& o) {
stats.sub(o.stats);
log_size -= o.log_size;
log_dups_size -= o.log_dups_size;
ondisk_log_size -= o.ondisk_log_size;
if (o.snaptrimq_len < snaptrimq_len) {
snaptrimq_len -= o.snaptrimq_len;
} else {
snaptrimq_len = 0;
}
add_sub_invalid_flags(o);
}
bool is_acting_osd(int32_t osd, bool primary) const;
void dump(ceph::Formatter *f) const;
void dump_brief(ceph::Formatter *f) const;
std::string dump_scrub_schedule() const;
void encode(ceph::buffer::list &bl) const;
void decode(ceph::buffer::list::const_iterator &bl);
static void generate_test_instances(std::list<pg_stat_t*>& o);
};
WRITE_CLASS_ENCODER(pg_stat_t)
bool operator==(const pg_stat_t& l, const pg_stat_t& r);
/** store_statfs_t
* ObjectStore full statfs information
*/
struct store_statfs_t
{
uint64_t total = 0; ///< Total bytes
uint64_t available = 0; ///< Free bytes available
uint64_t internally_reserved = 0; ///< Bytes reserved for internal purposes
int64_t allocated = 0; ///< Bytes allocated by the store
int64_t data_stored = 0; ///< Bytes actually stored by the user
int64_t data_compressed = 0; ///< Bytes stored after compression
int64_t data_compressed_allocated = 0; ///< Bytes allocated for compressed data
int64_t data_compressed_original = 0; ///< Bytes that were compressed
int64_t omap_allocated = 0; ///< approx usage of omap data
int64_t internal_metadata = 0; ///< approx usage of internal metadata
void reset() {
*this = store_statfs_t();
}
void floor(int64_t f) {
#define FLOOR(x) if (int64_t(x) < f) x = f
FLOOR(total);
FLOOR(available);
FLOOR(internally_reserved);
FLOOR(allocated);
FLOOR(data_stored);
FLOOR(data_compressed);
FLOOR(data_compressed_allocated);
FLOOR(data_compressed_original);
FLOOR(omap_allocated);
FLOOR(internal_metadata);
#undef FLOOR
}
bool operator ==(const store_statfs_t& other) const;
bool is_zero() const {
return *this == store_statfs_t();
}
uint64_t get_used() const {
return total - available - internally_reserved;
}
// this accumulates both actually used and statfs's internally_reserved
uint64_t get_used_raw() const {
return total - available;
}
float get_used_raw_ratio() const {
if (total) {
return (float)get_used_raw() / (float)total;
} else {
return 0.0;
}
}
// helpers to ease legacy code porting
uint64_t kb_avail() const {
return available >> 10;
}
uint64_t kb() const {
return total >> 10;
}
uint64_t kb_used() const {
return (total - available - internally_reserved) >> 10;
}
uint64_t kb_used_raw() const {
return get_used_raw() >> 10;
}
uint64_t kb_used_data() const {
return allocated >> 10;
}
uint64_t kb_used_omap() const {
return omap_allocated >> 10;
}
uint64_t kb_used_internal_metadata() const {
return internal_metadata >> 10;
}
void add(const store_statfs_t& o) {
total += o.total;
available += o.available;
internally_reserved += o.internally_reserved;
allocated += o.allocated;
data_stored += o.data_stored;
data_compressed += o.data_compressed;
data_compressed_allocated += o.data_compressed_allocated;
data_compressed_original += o.data_compressed_original;
omap_allocated += o.omap_allocated;
internal_metadata += o.internal_metadata;
}
void sub(const store_statfs_t& o) {
total -= o.total;
available -= o.available;
internally_reserved -= o.internally_reserved;
allocated -= o.allocated;
data_stored -= o.data_stored;
data_compressed -= o.data_compressed;
data_compressed_allocated -= o.data_compressed_allocated;
data_compressed_original -= o.data_compressed_original;
omap_allocated -= o.omap_allocated;
internal_metadata -= o.internal_metadata;
}
void dump(ceph::Formatter *f) const;
DENC(store_statfs_t, v, p) {
DENC_START(1, 1, p);
denc(v.total, p);
denc(v.available, p);
denc(v.internally_reserved, p);
denc(v.allocated, p);
denc(v.data_stored, p);
denc(v.data_compressed, p);
denc(v.data_compressed_allocated, p);
denc(v.data_compressed_original, p);
denc(v.omap_allocated, p);
denc(v.internal_metadata, p);
DENC_FINISH(p);
}
static void generate_test_instances(std::list<store_statfs_t*>& o);
};
WRITE_CLASS_DENC(store_statfs_t)
std::ostream &operator<<(std::ostream &lhs, const store_statfs_t &rhs);
/** osd_stat
* aggregate stats for an osd
*/
struct osd_stat_t {
store_statfs_t statfs;
std::vector<int> hb_peers;
int32_t snap_trim_queue_len, num_snap_trimming;
uint64_t num_shards_repaired;
pow2_hist_t op_queue_age_hist;
objectstore_perf_stat_t os_perf_stat;
osd_alerts_t os_alerts;
epoch_t up_from = 0;
uint64_t seq = 0;
uint32_t num_pgs = 0;
uint32_t num_osds = 0;
uint32_t num_per_pool_osds = 0;
uint32_t num_per_pool_omap_osds = 0;
struct Interfaces {
uint32_t last_update; // in seconds
uint32_t back_pingtime[3];
uint32_t back_min[3];
uint32_t back_max[3];
uint32_t back_last;
uint32_t front_pingtime[3];
uint32_t front_min[3];
uint32_t front_max[3];
uint32_t front_last;
};
std::map<int, Interfaces> hb_pingtime; ///< map of osd id to Interfaces
osd_stat_t() : snap_trim_queue_len(0), num_snap_trimming(0),
num_shards_repaired(0) {}
void add(const osd_stat_t& o) {
statfs.add(o.statfs);
snap_trim_queue_len += o.snap_trim_queue_len;
num_snap_trimming += o.num_snap_trimming;
num_shards_repaired += o.num_shards_repaired;
op_queue_age_hist.add(o.op_queue_age_hist);
os_perf_stat.add(o.os_perf_stat);
num_pgs += o.num_pgs;
num_osds += o.num_osds;
num_per_pool_osds += o.num_per_pool_osds;
num_per_pool_omap_osds += o.num_per_pool_omap_osds;
for (const auto& a : o.os_alerts) {
auto& target = os_alerts[a.first];
for (auto& i : a.second) {
target.emplace(i.first, i.second);
}
}
}
void sub(const osd_stat_t& o) {
statfs.sub(o.statfs);
snap_trim_queue_len -= o.snap_trim_queue_len;
num_snap_trimming -= o.num_snap_trimming;
num_shards_repaired -= o.num_shards_repaired;
op_queue_age_hist.sub(o.op_queue_age_hist);
os_perf_stat.sub(o.os_perf_stat);
num_pgs -= o.num_pgs;
num_osds -= o.num_osds;
num_per_pool_osds -= o.num_per_pool_osds;
num_per_pool_omap_osds -= o.num_per_pool_omap_osds;
for (const auto& a : o.os_alerts) {
auto& target = os_alerts[a.first];
for (auto& i : a.second) {
target.erase(i.first);
}
if (target.empty()) {
os_alerts.erase(a.first);
}
}
}
void dump(ceph::Formatter *f, bool with_net = true) const;
void dump_ping_time(ceph::Formatter *f) const;
void encode(ceph::buffer::list &bl, uint64_t features) const;
void decode(ceph::buffer::list::const_iterator &bl);
static void generate_test_instances(std::list<osd_stat_t*>& o);
};
WRITE_CLASS_ENCODER_FEATURES(osd_stat_t)
inline bool operator==(const osd_stat_t& l, const osd_stat_t& r) {
return l.statfs == r.statfs &&
l.snap_trim_queue_len == r.snap_trim_queue_len &&
l.num_snap_trimming == r.num_snap_trimming &&
l.num_shards_repaired == r.num_shards_repaired &&
l.hb_peers == r.hb_peers &&
l.op_queue_age_hist == r.op_queue_age_hist &&
l.os_perf_stat == r.os_perf_stat &&
l.num_pgs == r.num_pgs &&
l.num_osds == r.num_osds &&
l.num_per_pool_osds == r.num_per_pool_osds &&
l.num_per_pool_omap_osds == r.num_per_pool_omap_osds;
}
inline bool operator!=(const osd_stat_t& l, const osd_stat_t& r) {
return !(l == r);
}
inline std::ostream& operator<<(std::ostream& out, const osd_stat_t& s) {
return out << "osd_stat(" << s.statfs << ", "
<< "peers " << s.hb_peers
<< " op hist " << s.op_queue_age_hist.h
<< ")";
}
/*
* summation over an entire pool
*/
struct pool_stat_t {
object_stat_collection_t stats;
store_statfs_t store_stats;
int64_t log_size;
int64_t ondisk_log_size; // >= active_log_size
int32_t up; ///< number of up replicas or shards
int32_t acting; ///< number of acting replicas or shards
int32_t num_store_stats; ///< amount of store_stats accumulated
pool_stat_t() : log_size(0), ondisk_log_size(0), up(0), acting(0),
num_store_stats(0)
{ }
void floor(int64_t f) {
stats.floor(f);
store_stats.floor(f);
if (log_size < f)
log_size = f;
if (ondisk_log_size < f)
ondisk_log_size = f;
if (up < f)
up = f;
if (acting < f)
acting = f;
if (num_store_stats < f)
num_store_stats = f;
}
void add(const store_statfs_t& o) {
store_stats.add(o);
++num_store_stats;
}
void sub(const store_statfs_t& o) {
store_stats.sub(o);
--num_store_stats;
}
void add(const pg_stat_t& o) {
stats.add(o.stats);
log_size += o.log_size;
ondisk_log_size += o.ondisk_log_size;
up += o.up.size();
acting += o.acting.size();
}
void sub(const pg_stat_t& o) {
stats.sub(o.stats);
log_size -= o.log_size;
ondisk_log_size -= o.ondisk_log_size;
up -= o.up.size();
acting -= o.acting.size();
}
bool is_zero() const {
return (stats.is_zero() &&
store_stats.is_zero() &&
log_size == 0 &&
ondisk_log_size == 0 &&
up == 0 &&
acting == 0 &&
num_store_stats == 0);
}
// helper accessors to retrieve used/netto bytes depending on the
// collection method: new per-pool objectstore report or legacy PG
// summation at OSD.
// In legacy mode used and netto values are the same. But for new per-pool
// collection 'used' provides amount of space ALLOCATED at all related OSDs
// and 'netto' is amount of stored user data.
uint64_t get_allocated_data_bytes(bool per_pool) const {
if (per_pool) {
return store_stats.allocated;
} else {
// legacy mode, use numbers from 'stats'
return stats.sum.num_bytes + stats.sum.num_bytes_hit_set_archive;
}
}
uint64_t get_allocated_omap_bytes(bool per_pool_omap) const {
if (per_pool_omap) {
return store_stats.omap_allocated;
} else {
// omap is not broken out by pool by nautilus bluestore; report the
// scrub value. this will be imprecise in that it won't account for
// any storage overhead/efficiency.
return stats.sum.num_omap_bytes;
}
}
uint64_t get_user_data_bytes(float raw_used_rate, ///< space amp factor
bool per_pool) const {
// NOTE: we need the space amp factor so that we can work backwards from
// the raw utilization to the amount of data that the user actually stored.
if (per_pool) {
return raw_used_rate ? store_stats.data_stored / raw_used_rate : 0;
} else {
// legacy mode, use numbers from 'stats'. note that we do NOT use the
// raw_used_rate factor here because we are working from the PG stats
// directly.
return stats.sum.num_bytes + stats.sum.num_bytes_hit_set_archive;
}
}
uint64_t get_user_omap_bytes(float raw_used_rate, ///< space amp factor
bool per_pool_omap) const {
if (per_pool_omap) {
return raw_used_rate ? store_stats.omap_allocated / raw_used_rate : 0;
} else {
// omap usage is lazily reported during scrub; this value may lag.
return stats.sum.num_omap_bytes;
}
}
void dump(ceph::Formatter *f) const;
void encode(ceph::buffer::list &bl, uint64_t features) const;
void decode(ceph::buffer::list::const_iterator &bl);
static void generate_test_instances(std::list<pool_stat_t*>& o);
};
WRITE_CLASS_ENCODER_FEATURES(pool_stat_t)
// -----------------------------------------
/**
* pg_hit_set_info_t - information about a single recorded HitSet
*
* Track basic metadata about a HitSet, like the number of insertions
* and the time range it covers.
*/
struct pg_hit_set_info_t {
utime_t begin, end; ///< time interval
eversion_t version; ///< version this HitSet object was written
bool using_gmt; ///< use gmt for creating the hit_set archive object name
friend bool operator==(const pg_hit_set_info_t& l,
const pg_hit_set_info_t& r) {
return
l.begin == r.begin &&
l.end == r.end &&
l.version == r.version &&
l.using_gmt == r.using_gmt;
}
explicit pg_hit_set_info_t(bool using_gmt = true)
: using_gmt(using_gmt) {}
void encode(ceph::buffer::list &bl) const;
void decode(ceph::buffer::list::const_iterator &bl);
void dump(ceph::Formatter *f) const;
static void generate_test_instances(std::list<pg_hit_set_info_t*>& o);
};
WRITE_CLASS_ENCODER(pg_hit_set_info_t)
/**
* pg_hit_set_history_t - information about a history of hitsets
*
* Include information about the currently accumulating hit set as well
* as archived/historical ones.
*/
struct pg_hit_set_history_t {
eversion_t current_last_update; ///< last version inserted into current set
std::list<pg_hit_set_info_t> history; ///< archived sets, sorted oldest -> newest
friend bool operator==(const pg_hit_set_history_t& l,
const pg_hit_set_history_t& r) {
return
l.current_last_update == r.current_last_update &&
l.history == r.history;
}
void encode(ceph::buffer::list &bl) const;
void decode(ceph::buffer::list::const_iterator &bl);
void dump(ceph::Formatter *f) const;
static void generate_test_instances(std::list<pg_hit_set_history_t*>& o);
};
WRITE_CLASS_ENCODER(pg_hit_set_history_t)
// -----------------------------------------
/**
* pg_history_t - information about recent pg peering/mapping history
*
* This is aggressively shared between OSDs to bound the amount of past
* history they need to worry about.
*/
struct pg_history_t {
epoch_t epoch_created = 0; // epoch in which *pg* was created (pool or pg)
epoch_t epoch_pool_created = 0; // epoch in which *pool* was created
// (note: may be pg creation epoch for
// pre-luminous clusters)
epoch_t last_epoch_started = 0;; // lower bound on last epoch started (anywhere, not necessarily locally)
// https://docs.ceph.com/docs/master/dev/osd_internals/last_epoch_started/
epoch_t last_interval_started = 0;; // first epoch of last_epoch_started interval
epoch_t last_epoch_clean = 0;; // lower bound on last epoch the PG was completely clean.
epoch_t last_interval_clean = 0;; // first epoch of last_epoch_clean interval
epoch_t last_epoch_split = 0;; // as parent or child
epoch_t last_epoch_marked_full = 0;; // pool or cluster
/**
* In the event of a map discontinuity, same_*_since may reflect the first
* map the osd has seen in the new map sequence rather than the actual start
* of the interval. This is ok since a discontinuity at epoch e means there
* must have been a clean interval between e and now and that we cannot be
* in the active set during the interval containing e.
*/
epoch_t same_up_since = 0;; // same acting set since
epoch_t same_interval_since = 0;; // same acting AND up set since
epoch_t same_primary_since = 0;; // same primary at least back through this epoch.
eversion_t last_scrub;
eversion_t last_deep_scrub;
utime_t last_scrub_stamp;
utime_t last_deep_scrub_stamp;
utime_t last_clean_scrub_stamp;
/// upper bound on how long prior interval readable (relative to encode time)
ceph::timespan prior_readable_until_ub = ceph::timespan::zero();
friend bool operator==(const pg_history_t& l, const pg_history_t& r) {
return
l.epoch_created == r.epoch_created &&
l.epoch_pool_created == r.epoch_pool_created &&
l.last_epoch_started == r.last_epoch_started &&
l.last_interval_started == r.last_interval_started &&
l.last_epoch_clean == r.last_epoch_clean &&
l.last_interval_clean == r.last_interval_clean &&
l.last_epoch_split == r.last_epoch_split &&
l.last_epoch_marked_full == r.last_epoch_marked_full &&
l.same_up_since == r.same_up_since &&
l.same_interval_since == r.same_interval_since &&
l.same_primary_since == r.same_primary_since &&
l.last_scrub == r.last_scrub &&
l.last_deep_scrub == r.last_deep_scrub &&
l.last_scrub_stamp == r.last_scrub_stamp &&
l.last_deep_scrub_stamp == r.last_deep_scrub_stamp &&
l.last_clean_scrub_stamp == r.last_clean_scrub_stamp &&
l.prior_readable_until_ub == r.prior_readable_until_ub;
}
pg_history_t() {}
pg_history_t(epoch_t created, utime_t stamp)
: epoch_created(created),
epoch_pool_created(created),
same_up_since(created),
same_interval_since(created),
same_primary_since(created),
last_scrub_stamp(stamp),
last_deep_scrub_stamp(stamp),
last_clean_scrub_stamp(stamp) {}
bool merge(const pg_history_t &other) {
// Here, we only update the fields which cannot be calculated from the OSDmap.
bool modified = false;
if (epoch_created < other.epoch_created) {
epoch_created = other.epoch_created;
modified = true;
}
if (epoch_pool_created < other.epoch_pool_created) {
// FIXME: for jewel compat only; this should either be 0 or always the
// same value across all pg instances.
epoch_pool_created = other.epoch_pool_created;
modified = true;
}
if (last_epoch_started < other.last_epoch_started) {
last_epoch_started = other.last_epoch_started;
modified = true;
}
if (last_interval_started < other.last_interval_started) {
last_interval_started = other.last_interval_started;
// if we are learning about a newer *started* interval, our
// readable_until_ub is obsolete
prior_readable_until_ub = other.prior_readable_until_ub;
modified = true;
} else if (other.last_interval_started == last_interval_started &&
other.prior_readable_until_ub < prior_readable_until_ub) {
// if other is the *same* interval, than pull our upper bound in
// if they have a tighter bound.
prior_readable_until_ub = other.prior_readable_until_ub;
modified = true;
}
if (last_epoch_clean < other.last_epoch_clean) {
last_epoch_clean = other.last_epoch_clean;
modified = true;
}
if (last_interval_clean < other.last_interval_clean) {
last_interval_clean = other.last_interval_clean;
modified = true;
}
if (last_epoch_split < other.last_epoch_split) {
last_epoch_split = other.last_epoch_split;
modified = true;
}
if (last_epoch_marked_full < other.last_epoch_marked_full) {
last_epoch_marked_full = other.last_epoch_marked_full;
modified = true;
}
if (other.last_scrub > last_scrub) {
last_scrub = other.last_scrub;
modified = true;
}
if (other.last_scrub_stamp > last_scrub_stamp) {
last_scrub_stamp = other.last_scrub_stamp;
modified = true;
}
if (other.last_deep_scrub > last_deep_scrub) {
last_deep_scrub = other.last_deep_scrub;
modified = true;
}
if (other.last_deep_scrub_stamp > last_deep_scrub_stamp) {
last_deep_scrub_stamp = other.last_deep_scrub_stamp;
modified = true;
}
if (other.last_clean_scrub_stamp > last_clean_scrub_stamp) {
last_clean_scrub_stamp = other.last_clean_scrub_stamp;
modified = true;
}
return modified;
}
void encode(ceph::buffer::list& bl) const;
void decode(ceph::buffer::list::const_iterator& p);
void dump(ceph::Formatter *f) const;
static void generate_test_instances(std::list<pg_history_t*>& o);
ceph::signedspan refresh_prior_readable_until_ub(
ceph::signedspan now, ///< now, relative to osd startup_time
ceph::signedspan ub) { ///< ub, relative to osd startup_time
if (now >= ub) {
// prior interval(s) are unreadable; we can zero the upper bound
prior_readable_until_ub = ceph::signedspan::zero();
return ceph::signedspan::zero();
} else {
prior_readable_until_ub = ub - now;
return ub;
}
}
ceph::signedspan get_prior_readable_until_ub(ceph::signedspan now) {
if (prior_readable_until_ub == ceph::signedspan::zero()) {
return ceph::signedspan::zero();
}
return now + prior_readable_until_ub;
}
};
WRITE_CLASS_ENCODER(pg_history_t)
inline std::ostream& operator<<(std::ostream& out, const pg_history_t& h) {
out << "ec=" << h.epoch_created << "/" << h.epoch_pool_created
<< " lis/c=" << h.last_interval_started
<< "/" << h.last_interval_clean
<< " les/c/f=" << h.last_epoch_started << "/" << h.last_epoch_clean
<< "/" << h.last_epoch_marked_full
<< " sis=" << h.same_interval_since;
if (h.prior_readable_until_ub != ceph::timespan::zero()) {
out << " pruub=" << h.prior_readable_until_ub;
}
return out;
}
/**
* pg_info_t - summary of PG statistics.
*
* some notes:
* - last_complete implies we have all objects that existed as of that
* stamp, OR a newer object, OR have already applied a later delete.
* - if last_complete >= log.tail, then we know pg contents thru log.head.
* otherwise, we have no idea what the pg is supposed to contain.
*/
struct pg_info_t {
spg_t pgid;
eversion_t last_update; ///< last object version applied to store.
eversion_t last_complete; ///< last version pg was complete through.
epoch_t last_epoch_started; ///< last epoch at which this pg started on this osd
epoch_t last_interval_started; ///< first epoch of last_epoch_started interval
version_t last_user_version; ///< last user object version applied to store
eversion_t log_tail; ///< oldest log entry.
hobject_t last_backfill; ///< objects >= this and < last_complete may be missing
interval_set<snapid_t> purged_snaps;
pg_stat_t stats;
pg_history_t history;
pg_hit_set_history_t hit_set;
friend bool operator==(const pg_info_t& l, const pg_info_t& r) {
return
l.pgid == r.pgid &&
l.last_update == r.last_update &&
l.last_complete == r.last_complete &&
l.last_epoch_started == r.last_epoch_started &&
l.last_interval_started == r.last_interval_started &&
l.last_user_version == r.last_user_version &&
l.log_tail == r.log_tail &&
l.last_backfill == r.last_backfill &&
l.purged_snaps == r.purged_snaps &&
l.stats == r.stats &&
l.history == r.history &&
l.hit_set == r.hit_set;
}
pg_info_t()
: last_epoch_started(0),
last_interval_started(0),
last_user_version(0),
last_backfill(hobject_t::get_max())
{ }
// cppcheck-suppress noExplicitConstructor
pg_info_t(spg_t p)
: pgid(p),
last_epoch_started(0),
last_interval_started(0),
last_user_version(0),
last_backfill(hobject_t::get_max())
{ }
void set_last_backfill(hobject_t pos) {
last_backfill = pos;
}
bool is_empty() const { return last_update.version == 0; }
bool dne() const { return history.epoch_created == 0; }
bool has_missing() const { return last_complete != last_update; }
bool is_incomplete() const { return !last_backfill.is_max(); }
void encode(ceph::buffer::list& bl) const;
void decode(ceph::buffer::list::const_iterator& p);
void dump(ceph::Formatter *f) const;
static void generate_test_instances(std::list<pg_info_t*>& o);
};
WRITE_CLASS_ENCODER(pg_info_t)
inline std::ostream& operator<<(std::ostream& out, const pg_info_t& pgi)
{
out << pgi.pgid << "(";
if (pgi.dne())
out << " DNE";
if (pgi.is_empty())
out << " empty";
else {
out << " v " << pgi.last_update;
if (pgi.last_complete != pgi.last_update)
out << " lc " << pgi.last_complete;
out << " (" << pgi.log_tail << "," << pgi.last_update << "]";
}
if (pgi.is_incomplete())
out << " lb " << pgi.last_backfill;
//out << " c " << pgi.epoch_created;
out << " local-lis/les=" << pgi.last_interval_started
<< "/" << pgi.last_epoch_started;
out << " n=" << pgi.stats.stats.sum.num_objects;
out << " " << pgi.history
<< ")";
return out;
}
/**
* pg_fast_info_t - common pg_info_t fields
*
* These are the fields of pg_info_t (and children) that are updated for
* most IO operations.
*
* ** WARNING **
* Because we rely on these fields to be applied to the normal
* info struct, adding a new field here that is not also new in info
* means that we must set an incompat OSD feature bit!
*/
struct pg_fast_info_t {
eversion_t last_update;
eversion_t last_complete;
version_t last_user_version;
struct { // pg_stat_t stats
eversion_t version;
version_t reported_seq;
utime_t last_fresh;
utime_t last_active;
utime_t last_peered;
utime_t last_clean;
utime_t last_unstale;
utime_t last_undegraded;
utime_t last_fullsized;
int64_t log_size; // (also ondisk_log_size, which has the same value)
struct { // object_stat_collection_t stats;
struct { // objct_stat_sum_t sum
int64_t num_bytes; // in bytes
int64_t num_objects;
int64_t num_object_copies;
int64_t num_rd;
int64_t num_rd_kb;
int64_t num_wr;
int64_t num_wr_kb;
int64_t num_objects_dirty;
} sum;
} stats;
} stats;
void populate_from(const pg_info_t& info) {
last_update = info.last_update;
last_complete = info.last_complete;
last_user_version = info.last_user_version;
stats.version = info.stats.version;
stats.reported_seq = info.stats.reported_seq;
stats.last_fresh = info.stats.last_fresh;
stats.last_active = info.stats.last_active;
stats.last_peered = info.stats.last_peered;
stats.last_clean = info.stats.last_clean;
stats.last_unstale = info.stats.last_unstale;
stats.last_undegraded = info.stats.last_undegraded;
stats.last_fullsized = info.stats.last_fullsized;
stats.log_size = info.stats.log_size;
stats.stats.sum.num_bytes = info.stats.stats.sum.num_bytes;
stats.stats.sum.num_objects = info.stats.stats.sum.num_objects;
stats.stats.sum.num_object_copies = info.stats.stats.sum.num_object_copies;
stats.stats.sum.num_rd = info.stats.stats.sum.num_rd;
stats.stats.sum.num_rd_kb = info.stats.stats.sum.num_rd_kb;
stats.stats.sum.num_wr = info.stats.stats.sum.num_wr;
stats.stats.sum.num_wr_kb = info.stats.stats.sum.num_wr_kb;
stats.stats.sum.num_objects_dirty = info.stats.stats.sum.num_objects_dirty;
}
bool try_apply_to(pg_info_t* info) {
if (last_update <= info->last_update)
return false;
info->last_update = last_update;
info->last_complete = last_complete;
info->last_user_version = last_user_version;
info->stats.version = stats.version;
info->stats.reported_seq = stats.reported_seq;
info->stats.last_fresh = stats.last_fresh;
info->stats.last_active = stats.last_active;
info->stats.last_peered = stats.last_peered;
info->stats.last_clean = stats.last_clean;
info->stats.last_unstale = stats.last_unstale;
info->stats.last_undegraded = stats.last_undegraded;
info->stats.last_fullsized = stats.last_fullsized;
info->stats.log_size = stats.log_size;
info->stats.ondisk_log_size = stats.log_size;
info->stats.stats.sum.num_bytes = stats.stats.sum.num_bytes;
info->stats.stats.sum.num_objects = stats.stats.sum.num_objects;
info->stats.stats.sum.num_object_copies = stats.stats.sum.num_object_copies;
info->stats.stats.sum.num_rd = stats.stats.sum.num_rd;
info->stats.stats.sum.num_rd_kb = stats.stats.sum.num_rd_kb;
info->stats.stats.sum.num_wr = stats.stats.sum.num_wr;
info->stats.stats.sum.num_wr_kb = stats.stats.sum.num_wr_kb;
info->stats.stats.sum.num_objects_dirty = stats.stats.sum.num_objects_dirty;
return true;
}
void encode(ceph::buffer::list& bl) const {
ENCODE_START(1, 1, bl);
encode(last_update, bl);
encode(last_complete, bl);
encode(last_user_version, bl);
encode(stats.version, bl);
encode(stats.reported_seq, bl);
encode(stats.last_fresh, bl);
encode(stats.last_active, bl);
encode(stats.last_peered, bl);
encode(stats.last_clean, bl);
encode(stats.last_unstale, bl);
encode(stats.last_undegraded, bl);
encode(stats.last_fullsized, bl);
encode(stats.log_size, bl);
encode(stats.stats.sum.num_bytes, bl);
encode(stats.stats.sum.num_objects, bl);
encode(stats.stats.sum.num_object_copies, bl);
encode(stats.stats.sum.num_rd, bl);
encode(stats.stats.sum.num_rd_kb, bl);
encode(stats.stats.sum.num_wr, bl);
encode(stats.stats.sum.num_wr_kb, bl);
encode(stats.stats.sum.num_objects_dirty, bl);
ENCODE_FINISH(bl);
}
void decode(ceph::buffer::list::const_iterator& p) {
DECODE_START(1, p);
decode(last_update, p);
decode(last_complete, p);
decode(last_user_version, p);
decode(stats.version, p);
decode(stats.reported_seq, p);
decode(stats.last_fresh, p);
decode(stats.last_active, p);
decode(stats.last_peered, p);
decode(stats.last_clean, p);
decode(stats.last_unstale, p);
decode(stats.last_undegraded, p);
decode(stats.last_fullsized, p);
decode(stats.log_size, p);
decode(stats.stats.sum.num_bytes, p);
decode(stats.stats.sum.num_objects, p);
decode(stats.stats.sum.num_object_copies, p);
decode(stats.stats.sum.num_rd, p);
decode(stats.stats.sum.num_rd_kb, p);
decode(stats.stats.sum.num_wr, p);
decode(stats.stats.sum.num_wr_kb, p);
decode(stats.stats.sum.num_objects_dirty, p);
DECODE_FINISH(p);
}
};
WRITE_CLASS_ENCODER(pg_fast_info_t)
/**
* PastIntervals -- information needed to determine the PriorSet and
* the might_have_unfound set
*/
class PastIntervals {
#ifdef WITH_SEASTAR
using OSDMapRef = boost::local_shared_ptr<const OSDMap>;
#else
using OSDMapRef = std::shared_ptr<const OSDMap>;
#endif
public:
struct pg_interval_t {
std::vector<int32_t> up, acting;
epoch_t first, last;
bool maybe_went_rw;
int32_t primary;
int32_t up_primary;
pg_interval_t()
: first(0), last(0),
maybe_went_rw(false),
primary(-1),
up_primary(-1)
{}
pg_interval_t(
std::vector<int32_t> &&up,
std::vector<int32_t> &&acting,
epoch_t first,
epoch_t last,
bool maybe_went_rw,
int32_t primary,
int32_t up_primary)
: up(up), acting(acting), first(first), last(last),
maybe_went_rw(maybe_went_rw), primary(primary), up_primary(up_primary)
{}
void encode(ceph::buffer::list& bl) const;
void decode(ceph::buffer::list::const_iterator& bl);
void dump(ceph::Formatter *f) const;
static void generate_test_instances(std::list<pg_interval_t*>& o);
};
PastIntervals();
PastIntervals(PastIntervals &&rhs) = default;
PastIntervals &operator=(PastIntervals &&rhs) = default;
PastIntervals(const PastIntervals &rhs);
PastIntervals &operator=(const PastIntervals &rhs);
class interval_rep {
public:
virtual size_t size() const = 0;
virtual bool empty() const = 0;
virtual void clear() = 0;
virtual std::pair<epoch_t, epoch_t> get_bounds() const = 0;
virtual std::set<pg_shard_t> get_all_participants(
bool ec_pool) const = 0;
virtual void add_interval(bool ec_pool, const pg_interval_t &interval) = 0;
virtual std::unique_ptr<interval_rep> clone() const = 0;
virtual std::ostream &print(std::ostream &out) const = 0;
virtual void encode(ceph::buffer::list &bl) const = 0;
virtual void decode(ceph::buffer::list::const_iterator &bl) = 0;
virtual void dump(ceph::Formatter *f) const = 0;
virtual void iterate_mayberw_back_to(
epoch_t les,
std::function<void(epoch_t, const std::set<pg_shard_t> &)> &&f) const = 0;
virtual bool has_full_intervals() const { return false; }
virtual void iterate_all_intervals(
std::function<void(const pg_interval_t &)> &&f) const {
ceph_assert(!has_full_intervals());
ceph_abort_msg("not valid for this implementation");
}
virtual void adjust_start_backwards(epoch_t last_epoch_clean) = 0;
virtual ~interval_rep() {}
};
friend class pi_compact_rep;
private:
std::unique_ptr<interval_rep> past_intervals;
explicit PastIntervals(interval_rep *rep) : past_intervals(rep) {}
public:
void add_interval(bool ec_pool, const pg_interval_t &interval) {
ceph_assert(past_intervals);
return past_intervals->add_interval(ec_pool, interval);
}
void encode(ceph::buffer::list &bl) const {
ENCODE_START(1, 1, bl);
if (past_intervals) {
__u8 type = 2;
encode(type, bl);
past_intervals->encode(bl);
} else {
encode((__u8)0, bl);
}
ENCODE_FINISH(bl);
}
void decode(ceph::buffer::list::const_iterator &bl);
void dump(ceph::Formatter *f) const {
ceph_assert(past_intervals);
past_intervals->dump(f);
}
static void generate_test_instances(std::list<PastIntervals *> & o);
/**
* Determines whether there is an interval change
*/
static bool is_new_interval(
int old_acting_primary,
int new_acting_primary,
const std::vector<int> &old_acting,
const std::vector<int> &new_acting,
int old_up_primary,
int new_up_primary,
const std::vector<int> &old_up,
const std::vector<int> &new_up,
int old_size,
int new_size,
int old_min_size,
int new_min_size,
unsigned old_pg_num,
unsigned new_pg_num,
unsigned old_pg_num_pending,
unsigned new_pg_num_pending,
bool old_sort_bitwise,
bool new_sort_bitwise,
bool old_recovery_deletes,
bool new_recovery_deletes,
uint32_t old_crush_count,
uint32_t new_crush_count,
uint32_t old_crush_target,
uint32_t new_crush_target,
uint32_t old_crush_barrier,
uint32_t new_crush_barrier,
int32_t old_crush_member,
int32_t new_crush_member,
pg_t pgid
);
/**
* Determines whether there is an interval change
*/
static bool is_new_interval(
int old_acting_primary, ///< [in] primary as of lastmap
int new_acting_primary, ///< [in] primary as of lastmap
const std::vector<int> &old_acting, ///< [in] acting as of lastmap
const std::vector<int> &new_acting, ///< [in] acting as of osdmap
int old_up_primary, ///< [in] up primary of lastmap
int new_up_primary, ///< [in] up primary of osdmap
const std::vector<int> &old_up, ///< [in] up as of lastmap
const std::vector<int> &new_up, ///< [in] up as of osdmap
const OSDMap *osdmap, ///< [in] current map
const OSDMap *lastmap, ///< [in] last map
pg_t pgid ///< [in] pgid for pg
);
/**
* Integrates a new map into *past_intervals, returns true
* if an interval was closed out.
*/
static bool check_new_interval(
int old_acting_primary, ///< [in] primary as of lastmap
int new_acting_primary, ///< [in] primary as of osdmap
const std::vector<int> &old_acting, ///< [in] acting as of lastmap
const std::vector<int> &new_acting, ///< [in] acting as of osdmap
int old_up_primary, ///< [in] up primary of lastmap
int new_up_primary, ///< [in] up primary of osdmap
const std::vector<int> &old_up, ///< [in] up as of lastmap
const std::vector<int> &new_up, ///< [in] up as of osdmap
epoch_t same_interval_since, ///< [in] as of osdmap
epoch_t last_epoch_clean, ///< [in] current
const OSDMap *osdmap, ///< [in] current map
const OSDMap *lastmap, ///< [in] last map
pg_t pgid, ///< [in] pgid for pg
const IsPGRecoverablePredicate &could_have_gone_active, ///< [in] predicate whether the pg can be active
PastIntervals *past_intervals, ///< [out] intervals
std::ostream *out = 0 ///< [out] debug ostream
);
static bool check_new_interval(
int old_acting_primary, ///< [in] primary as of lastmap
int new_acting_primary, ///< [in] primary as of osdmap
const std::vector<int> &old_acting, ///< [in] acting as of lastmap
const std::vector<int> &new_acting, ///< [in] acting as of osdmap
int old_up_primary, ///< [in] up primary of lastmap
int new_up_primary, ///< [in] up primary of osdmap
const std::vector<int> &old_up, ///< [in] up as of lastmap
const std::vector<int> &new_up, ///< [in] up as of osdmap
epoch_t same_interval_since, ///< [in] as of osdmap
epoch_t last_epoch_clean, ///< [in] current
OSDMapRef osdmap, ///< [in] current map
OSDMapRef lastmap, ///< [in] last map
pg_t pgid, ///< [in] pgid for pg
const IsPGRecoverablePredicate &could_have_gone_active, ///< [in] predicate whether the pg can be active
PastIntervals *past_intervals, ///< [out] intervals
std::ostream *out = 0 ///< [out] debug ostream
) {
return check_new_interval(
old_acting_primary, new_acting_primary,
old_acting, new_acting,
old_up_primary, new_up_primary,
old_up, new_up,
same_interval_since, last_epoch_clean,
osdmap.get(), lastmap.get(),
pgid,
could_have_gone_active,
past_intervals,
out);
}
friend std::ostream& operator<<(std::ostream& out, const PastIntervals &i);
template <typename F>
void iterate_mayberw_back_to(
epoch_t les,
F &&f) const {
ceph_assert(past_intervals);
past_intervals->iterate_mayberw_back_to(les, std::forward<F>(f));
}
void clear() {
ceph_assert(past_intervals);
past_intervals->clear();
}
/**
* Should return a value which gives an indication of the amount
* of state contained
*/
size_t size() const {
ceph_assert(past_intervals);
return past_intervals->size();
}
bool empty() const {
ceph_assert(past_intervals);
return past_intervals->empty();
}
void swap(PastIntervals &other) {
using std::swap;
swap(other.past_intervals, past_intervals);
}
/**
* Return all shards which have been in the acting set back to the
* latest epoch to which we have trimmed except for pg_whoami
*/
std::set<pg_shard_t> get_might_have_unfound(
pg_shard_t pg_whoami,
bool ec_pool) const {
ceph_assert(past_intervals);
auto ret = past_intervals->get_all_participants(ec_pool);
ret.erase(pg_whoami);
return ret;
}
/**
* Return all shards which we might want to talk to for peering
*/
std::set<pg_shard_t> get_all_probe(
bool ec_pool) const {
ceph_assert(past_intervals);
return past_intervals->get_all_participants(ec_pool);
}
/* Return the set of epochs [start, end) represented by the
* past_interval set.
*/
std::pair<epoch_t, epoch_t> get_bounds() const {
ceph_assert(past_intervals);
return past_intervals->get_bounds();
}
void adjust_start_backwards(epoch_t last_epoch_clean) {
ceph_assert(past_intervals);
past_intervals->adjust_start_backwards(last_epoch_clean);
}
enum osd_state_t {
UP,
DOWN,
DNE,
LOST
};
struct PriorSet {
bool ec_pool = false;
std::set<pg_shard_t> probe; ///< current+prior OSDs we need to probe.
std::set<int> down; ///< down osds that would normally be in @a probe and might be interesting.
std::map<int, epoch_t> blocked_by; ///< current lost_at values for any OSDs in cur set for which (re)marking them lost would affect cur set
bool pg_down = false; ///< some down osds are included in @a cur; the DOWN pg state bit should be set.
const IsPGRecoverablePredicate* pcontdec = nullptr;
PriorSet() = default;
PriorSet(PriorSet &&) = default;
PriorSet &operator=(PriorSet &&) = default;
PriorSet &operator=(const PriorSet &) = delete;
PriorSet(const PriorSet &) = delete;
bool operator==(const PriorSet &rhs) const {
return (ec_pool == rhs.ec_pool) &&
(probe == rhs.probe) &&
(down == rhs.down) &&
(blocked_by == rhs.blocked_by) &&
(pg_down == rhs.pg_down);
}
bool affected_by_map(
const OSDMap &osdmap,
const DoutPrefixProvider *dpp) const;
// For verifying tests
PriorSet(
bool ec_pool,
std::set<pg_shard_t> probe,
std::set<int> down,
std::map<int, epoch_t> blocked_by,
bool pg_down,
const IsPGRecoverablePredicate *pcontdec)
: ec_pool(ec_pool), probe(probe), down(down), blocked_by(blocked_by),
pg_down(pg_down), pcontdec(pcontdec) {}
private:
template <typename F>
PriorSet(
const PastIntervals &past_intervals,
bool ec_pool,
epoch_t last_epoch_started,
const IsPGRecoverablePredicate *c,
F f,
const std::vector<int> &up,
const std::vector<int> &acting,
const DoutPrefixProvider *dpp);
friend class PastIntervals;
};
template <typename... Args>
PriorSet get_prior_set(Args&&... args) const {
return PriorSet(*this, std::forward<Args>(args)...);
}
};
WRITE_CLASS_ENCODER(PastIntervals)
std::ostream& operator<<(std::ostream& out, const PastIntervals::pg_interval_t& i);
std::ostream& operator<<(std::ostream& out, const PastIntervals &i);
std::ostream& operator<<(std::ostream& out, const PastIntervals::PriorSet &i);
template <typename F>
PastIntervals::PriorSet::PriorSet(
const PastIntervals &past_intervals,
bool ec_pool,
epoch_t last_epoch_started,
const IsPGRecoverablePredicate *c,
F f,
const std::vector<int> &up,
const std::vector<int> &acting,
const DoutPrefixProvider *dpp)
: ec_pool(ec_pool), pg_down(false), pcontdec(c)
{
/*
* We have to be careful to gracefully deal with situations like
* so. Say we have a power outage or something that takes out both
* OSDs, but the monitor doesn't mark them down in the same epoch.
* The history may look like
*
* 1: A B
* 2: B
* 3: let's say B dies for good, too (say, from the power spike)
* 4: A
*
* which makes it look like B may have applied updates to the PG
* that we need in order to proceed. This sucks...
*
* To minimize the risk of this happening, we CANNOT go active if
* _any_ OSDs in the prior set are down until we send an MOSDAlive
* to the monitor such that the OSDMap sets osd_up_thru to an epoch.
* Then, we have something like
*
* 1: A B
* 2: B up_thru[B]=0
* 3:
* 4: A
*
* -> we can ignore B, bc it couldn't have gone active (alive_thru
* still 0).
*
* or,
*
* 1: A B
* 2: B up_thru[B]=0
* 3: B up_thru[B]=2
* 4:
* 5: A
*
* -> we must wait for B, bc it was alive through 2, and could have
* written to the pg.
*
* If B is really dead, then an administrator will need to manually
* intervene by marking the OSD as "lost."
*/
// Include current acting and up nodes... not because they may
// contain old data (this interval hasn't gone active, obviously),
// but because we want their pg_info to inform choose_acting(), and
// so that we know what they do/do not have explicitly before
// sending them any new info/logs/whatever.
for (unsigned i = 0; i < acting.size(); i++) {
if (acting[i] != pg_pool_t::pg_CRUSH_ITEM_NONE)
probe.insert(pg_shard_t(acting[i], ec_pool ? shard_id_t(i) : shard_id_t::NO_SHARD));
}
// It may be possible to exclude the up nodes, but let's keep them in
// there for now.
for (unsigned i = 0; i < up.size(); i++) {
if (up[i] != pg_pool_t::pg_CRUSH_ITEM_NONE)
probe.insert(pg_shard_t(up[i], ec_pool ? shard_id_t(i) : shard_id_t::NO_SHARD));
}
std::set<pg_shard_t> all_probe = past_intervals.get_all_probe(ec_pool);
ldpp_dout(dpp, 10) << "build_prior all_probe " << all_probe << dendl;
for (auto &&i: all_probe) {
switch (f(0, i.osd, nullptr)) {
case UP: {
probe.insert(i);
break;
}
case DNE:
case LOST:
case DOWN: {
down.insert(i.osd);
break;
}
}
}
past_intervals.iterate_mayberw_back_to(
last_epoch_started,
[&](epoch_t start, const std::set<pg_shard_t> &acting) {
ldpp_dout(dpp, 10) << "build_prior maybe_rw interval:" << start
<< ", acting: " << acting << dendl;
// look at candidate osds during this interval. each falls into
// one of three categories: up, down (but potentially
// interesting), or lost (down, but we won't wait for it).
std::set<pg_shard_t> up_now;
std::map<int, epoch_t> candidate_blocked_by;
// any candidates down now (that might have useful data)
bool any_down_now = false;
// consider ACTING osds
for (auto &&so: acting) {
epoch_t lost_at = 0;
switch (f(start, so.osd, &lost_at)) {
case UP: {
// include past acting osds if they are up.
up_now.insert(so);
break;
}
case DNE: {
ldpp_dout(dpp, 10) << "build_prior prior osd." << so.osd
<< " no longer exists" << dendl;
break;
}
case LOST: {
ldpp_dout(dpp, 10) << "build_prior prior osd." << so.osd
<< " is down, but lost_at " << lost_at << dendl;
up_now.insert(so);
break;
}
case DOWN: {
ldpp_dout(dpp, 10) << "build_prior prior osd." << so.osd
<< " is down" << dendl;
candidate_blocked_by[so.osd] = lost_at;
any_down_now = true;
break;
}
}
}
// if not enough osds survived this interval, and we may have gone rw,
// then we need to wait for one of those osds to recover to
// ensure that we haven't lost any information.
if (!(*pcontdec)(up_now) && any_down_now) {
// fixme: how do we identify a "clean" shutdown anyway?
ldpp_dout(dpp, 10) << "build_prior possibly went active+rw,"
<< " insufficient up; including down osds" << dendl;
ceph_assert(!candidate_blocked_by.empty());
pg_down = true;
blocked_by.insert(
candidate_blocked_by.begin(),
candidate_blocked_by.end());
}
});
ldpp_dout(dpp, 10) << "build_prior final: probe " << probe
<< " down " << down
<< " blocked_by " << blocked_by
<< (pg_down ? " pg_down":"")
<< dendl;
}
struct pg_notify_t {
epoch_t query_epoch;
epoch_t epoch_sent;
pg_info_t info;
shard_id_t to;
shard_id_t from;
PastIntervals past_intervals;
pg_notify_t() :
query_epoch(0), epoch_sent(0), to(shard_id_t::NO_SHARD),
from(shard_id_t::NO_SHARD) {}
pg_notify_t(
shard_id_t to,
shard_id_t from,
epoch_t query_epoch,
epoch_t epoch_sent,
const pg_info_t &info,
const PastIntervals& pi)
: query_epoch(query_epoch),
epoch_sent(epoch_sent),
info(info), to(to), from(from),
past_intervals(pi) {
ceph_assert(from == info.pgid.shard);
}
void encode(ceph::buffer::list &bl) const;
void decode(ceph::buffer::list::const_iterator &p);
void dump(ceph::Formatter *f) const;
static void generate_test_instances(std::list<pg_notify_t*> &o);
};
WRITE_CLASS_ENCODER(pg_notify_t)
std::ostream &operator<<(std::ostream &lhs, const pg_notify_t ¬ify);
/**
* pg_query_t - used to ask a peer for information about a pg.
*
* note: if version=0, type=LOG, then we just provide our full log.
*/
struct pg_query_t {
enum {
INFO = 0,
LOG = 1,
MISSING = 4,
FULLLOG = 5,
};
std::string_view get_type_name() const {
switch (type) {
case INFO: return "info";
case LOG: return "log";
case MISSING: return "missing";
case FULLLOG: return "fulllog";
default: return "???";
}
}
__s32 type;
eversion_t since;
pg_history_t history;
epoch_t epoch_sent;
shard_id_t to;
shard_id_t from;
pg_query_t() : type(-1), epoch_sent(0), to(shard_id_t::NO_SHARD),
from(shard_id_t::NO_SHARD) {}
pg_query_t(
int t,
shard_id_t to,
shard_id_t from,
const pg_history_t& h,
epoch_t epoch_sent)
: type(t),
history(h),
epoch_sent(epoch_sent),
to(to), from(from) {
ceph_assert(t != LOG);
}
pg_query_t(
int t,
shard_id_t to,
shard_id_t from,
eversion_t s,
const pg_history_t& h,
epoch_t epoch_sent)
: type(t), since(s), history(h),
epoch_sent(epoch_sent), to(to), from(from) {
ceph_assert(t == LOG);
}
void encode(ceph::buffer::list &bl, uint64_t features) const;
void decode(ceph::buffer::list::const_iterator &bl);
void dump(ceph::Formatter *f) const;
static void generate_test_instances(std::list<pg_query_t*>& o);
};
WRITE_CLASS_ENCODER_FEATURES(pg_query_t)
inline std::ostream& operator<<(std::ostream& out, const pg_query_t& q) {
out << "query(" << q.get_type_name() << " " << q.since;
if (q.type == pg_query_t::LOG)
out << " " << q.history;
out << " epoch_sent " << q.epoch_sent;
out << ")";
return out;
}
/**
* pg_lease_t - readable lease metadata, from primary -> non-primary
*
* This metadata serves to increase either or both of the lease expiration
* and upper bound on the non-primary.
*/
struct pg_lease_t {
/// pg readable_until value; replicas must not be readable beyond this
ceph::signedspan readable_until = ceph::signedspan::zero();
/// upper bound on any acting osd's readable_until
ceph::signedspan readable_until_ub = ceph::signedspan::zero();
/// duration of the lease (in case clock deltas aren't available)
ceph::signedspan interval = ceph::signedspan::zero();
pg_lease_t() {}
pg_lease_t(ceph::signedspan ru, ceph::signedspan ruub,
ceph::signedspan i)
: readable_until(ru),
readable_until_ub(ruub),
interval(i) {}
void encode(ceph::buffer::list &bl) const;
void decode(ceph::buffer::list::const_iterator &bl);
void dump(ceph::Formatter *f) const;
static void generate_test_instances(std::list<pg_lease_t*>& o);
friend std::ostream& operator<<(std::ostream& out, const pg_lease_t& l) {
return out << "pg_lease(ru " << l.readable_until
<< " ub " << l.readable_until_ub
<< " int " << l.interval << ")";
}
};
WRITE_CLASS_ENCODER(pg_lease_t)
/**
* pg_lease_ack_t - lease ack, from non-primary -> primary
*
* This metadata acknowledges to the primary what a non-primary's noted
* upper bound is.
*/
struct pg_lease_ack_t {
/// highest upper bound non-primary has recorded (primary's clock)
ceph::signedspan readable_until_ub = ceph::signedspan::zero();
pg_lease_ack_t() {}
pg_lease_ack_t(ceph::signedspan ub)
: readable_until_ub(ub) {}
void encode(ceph::buffer::list &bl) const;
void decode(ceph::buffer::list::const_iterator &bl);
void dump(ceph::Formatter *f) const;
static void generate_test_instances(std::list<pg_lease_ack_t*>& o);
friend std::ostream& operator<<(std::ostream& out, const pg_lease_ack_t& l) {
return out << "pg_lease_ack(ruub " << l.readable_until_ub << ")";
}
};
WRITE_CLASS_ENCODER(pg_lease_ack_t)
class PGBackend;
class ObjectModDesc {
bool can_local_rollback;
bool rollback_info_completed;
// version required to decode, reflected in encode/decode version
__u8 max_required_version = 1;
public:
class Visitor {
public:
virtual void append(uint64_t old_offset) {}
virtual void setattrs(std::map<std::string, std::optional<ceph::buffer::list>> &attrs) {}
virtual void rmobject(version_t old_version) {}
/**
* Used to support the unfound_lost_delete log event: if the stashed
* version exists, we unstash it, otherwise, we do nothing. This way
* each replica rolls back to whatever state it had prior to the attempt
* at mark unfound lost delete
*/
virtual void try_rmobject(version_t old_version) {
rmobject(old_version);
}
virtual void create() {}
virtual void update_snaps(const std::set<snapid_t> &old_snaps) {}
virtual void rollback_extents(
version_t gen,
const std::vector<std::pair<uint64_t, uint64_t> > &extents) {}
virtual ~Visitor() {}
};
void visit(Visitor *visitor) const;
mutable ceph::buffer::list bl;
enum ModID {
APPEND = 1,
SETATTRS = 2,
DELETE = 3,
CREATE = 4,
UPDATE_SNAPS = 5,
TRY_DELETE = 6,
ROLLBACK_EXTENTS = 7
};
ObjectModDesc() : can_local_rollback(true), rollback_info_completed(false) {
bl.reassign_to_mempool(mempool::mempool_osd_pglog);
}
void claim(ObjectModDesc &other) {
bl = std::move(other.bl);
can_local_rollback = other.can_local_rollback;
rollback_info_completed = other.rollback_info_completed;
}
void claim_append(ObjectModDesc &other) {
if (!can_local_rollback || rollback_info_completed)
return;
if (!other.can_local_rollback) {
mark_unrollbackable();
return;
}
bl.claim_append(other.bl);
rollback_info_completed = other.rollback_info_completed;
}
void swap(ObjectModDesc &other) {
bl.swap(other.bl);
using std::swap;
swap(other.can_local_rollback, can_local_rollback);
swap(other.rollback_info_completed, rollback_info_completed);
swap(other.max_required_version, max_required_version);
}
void append_id(ModID id) {
using ceph::encode;
uint8_t _id(id);
encode(_id, bl);
}
void append(uint64_t old_size) {
if (!can_local_rollback || rollback_info_completed)
return;
ENCODE_START(1, 1, bl);
append_id(APPEND);
encode(old_size, bl);
ENCODE_FINISH(bl);
}
void setattrs(std::map<std::string, std::optional<ceph::buffer::list>> &old_attrs) {
if (!can_local_rollback || rollback_info_completed)
return;
ENCODE_START(1, 1, bl);
append_id(SETATTRS);
encode(old_attrs, bl);
ENCODE_FINISH(bl);
}
bool rmobject(version_t deletion_version) {
if (!can_local_rollback || rollback_info_completed)
return false;
ENCODE_START(1, 1, bl);
append_id(DELETE);
encode(deletion_version, bl);
ENCODE_FINISH(bl);
rollback_info_completed = true;
return true;
}
bool try_rmobject(version_t deletion_version) {
if (!can_local_rollback || rollback_info_completed)
return false;
ENCODE_START(1, 1, bl);
append_id(TRY_DELETE);
encode(deletion_version, bl);
ENCODE_FINISH(bl);
rollback_info_completed = true;
return true;
}
void create() {
if (!can_local_rollback || rollback_info_completed)
return;
rollback_info_completed = true;
ENCODE_START(1, 1, bl);
append_id(CREATE);
ENCODE_FINISH(bl);
}
void update_snaps(const std::set<snapid_t> &old_snaps) {
if (!can_local_rollback || rollback_info_completed)
return;
ENCODE_START(1, 1, bl);
append_id(UPDATE_SNAPS);
encode(old_snaps, bl);
ENCODE_FINISH(bl);
}
void rollback_extents(
version_t gen, const std::vector<std::pair<uint64_t, uint64_t> > &extents) {
ceph_assert(can_local_rollback);
ceph_assert(!rollback_info_completed);
if (max_required_version < 2)
max_required_version = 2;
ENCODE_START(2, 2, bl);
append_id(ROLLBACK_EXTENTS);
encode(gen, bl);
encode(extents, bl);
ENCODE_FINISH(bl);
}
// cannot be rolled back
void mark_unrollbackable() {
can_local_rollback = false;
bl.clear();
}
bool can_rollback() const {
return can_local_rollback;
}
bool empty() const {
return can_local_rollback && (bl.length() == 0);
}
bool requires_kraken() const {
return max_required_version >= 2;
}
/**
* Create fresh copy of bl bytes to avoid keeping large buffers around
* in the case that bl contains ptrs which point into a much larger
* message buffer
*/
void trim_bl() const {
if (bl.length() > 0)
bl.rebuild();
}
void encode(ceph::buffer::list &bl) const;
void decode(ceph::buffer::list::const_iterator &bl);
void dump(ceph::Formatter *f) const;
static void generate_test_instances(std::list<ObjectModDesc*>& o);
};
WRITE_CLASS_ENCODER(ObjectModDesc)
class ObjectCleanRegions {
private:
bool new_object;
bool clean_omap;
interval_set<uint64_t> clean_offsets;
static std::atomic<uint32_t> max_num_intervals;
/**
* trim the number of intervals if clean_offsets.num_intervals()
* exceeds the given upbound max_num_intervals
* etc. max_num_intervals=2, clean_offsets:{[5~10], [20~5]}
* then new interval [30~10] will evict out the shortest one [20~5]
* finally, clean_offsets becomes {[5~10], [30~10]}
*/
void trim();
friend std::ostream& operator<<(std::ostream& out, const ObjectCleanRegions& ocr);
public:
ObjectCleanRegions() : new_object(false), clean_omap(true) {
clean_offsets.insert(0, (uint64_t)-1);
}
ObjectCleanRegions(uint64_t offset, uint64_t len, bool co)
: new_object(false), clean_omap(co) {
clean_offsets.insert(offset, len);
}
bool operator==(const ObjectCleanRegions &orc) const {
return new_object == orc.new_object && clean_omap == orc.clean_omap && clean_offsets == orc.clean_offsets;
}
static void set_max_num_intervals(uint32_t num);
void merge(const ObjectCleanRegions &other);
void mark_data_region_dirty(uint64_t offset, uint64_t len);
void mark_omap_dirty();
void mark_object_new();
void mark_fully_dirty();
interval_set<uint64_t> get_dirty_regions() const;
bool omap_is_dirty() const;
bool object_is_exist() const;
bool is_clean_region(uint64_t offset, uint64_t len) const;
void encode(ceph::buffer::list &bl) const;
void decode(ceph::buffer::list::const_iterator &bl);
void dump(ceph::Formatter *f) const;
static void generate_test_instances(std::list<ObjectCleanRegions*>& o);
};
WRITE_CLASS_ENCODER(ObjectCleanRegions)
std::ostream& operator<<(std::ostream& out, const ObjectCleanRegions& ocr);
struct OSDOp {
ceph_osd_op op;
ceph::buffer::list indata, outdata;
errorcode32_t rval;
OSDOp() {
// FIPS zeroization audit 20191115: this memset clean for security
memset(&op, 0, sizeof(ceph_osd_op));
}
OSDOp(const int op_code) {
// FIPS zeroization audit 20191115: this memset clean for security
memset(&op, 0, sizeof(ceph_osd_op));
op.op = op_code;
}
/**
* split a ceph::buffer::list into constituent indata members of a vector of OSDOps
*
* @param ops [out] vector of OSDOps
* @param in [in] combined data buffer
*/
template<typename V>
static void split_osd_op_vector_in_data(V& ops,
ceph::buffer::list& in) {
ceph::buffer::list::iterator datap = in.begin();
for (unsigned i = 0; i < ops.size(); i++) {
if (ops[i].op.payload_len) {
datap.copy(ops[i].op.payload_len, ops[i].indata);
}
}
}
/**
* merge indata members of a vector of OSDOp into a single ceph::buffer::list
*
* Notably this also encodes certain other OSDOp data into the data
* buffer, including the sobject_t soid.
*
* @param ops [in] vector of OSDOps
* @param out [out] combined data buffer
*/
template<typename V>
static void merge_osd_op_vector_in_data(V& ops, ceph::buffer::list& out) {
for (unsigned i = 0; i < ops.size(); i++) {
if (ops[i].indata.length()) {
ops[i].op.payload_len = ops[i].indata.length();
out.append(ops[i].indata);
}
}
}
/**
* split a ceph::buffer::list into constituent outdata members of a vector of OSDOps
*
* @param ops [out] vector of OSDOps
* @param in [in] combined data buffer
*/
static void split_osd_op_vector_out_data(std::vector<OSDOp>& ops, ceph::buffer::list& in);
/**
* merge outdata members of a vector of OSDOps into a single ceph::buffer::list
*
* @param ops [in] vector of OSDOps
* @param out [out] combined data buffer
*/
static void merge_osd_op_vector_out_data(std::vector<OSDOp>& ops, ceph::buffer::list& out);
/**
* Clear data as much as possible, leave minimal data for historical op dump
*
* @param ops [in] vector of OSDOps
*/
template<typename V>
static void clear_data(V& ops) {
for (unsigned i = 0; i < ops.size(); i++) {
OSDOp& op = ops[i];
op.outdata.clear();
if (ceph_osd_op_type_attr(op.op.op) &&
op.op.xattr.name_len &&
op.indata.length() >= op.op.xattr.name_len) {
ceph::buffer::list bl;
bl.push_back(ceph::buffer::ptr_node::create(op.op.xattr.name_len));
bl.begin().copy_in(op.op.xattr.name_len, op.indata);
op.indata = std::move(bl);
} else if (ceph_osd_op_type_exec(op.op.op) &&
op.op.cls.class_len &&
op.indata.length() >
(op.op.cls.class_len + op.op.cls.method_len)) {
__u8 len = op.op.cls.class_len + op.op.cls.method_len;
ceph::buffer::list bl;
bl.push_back(ceph::buffer::ptr_node::create(len));
bl.begin().copy_in(len, op.indata);
op.indata = std::move(bl);
} else {
op.indata.clear();
}
}
}
};
std::ostream& operator<<(std::ostream& out, const OSDOp& op);
struct pg_log_op_return_item_t {
int32_t rval;
ceph::buffer::list bl;
void encode(ceph::buffer::list& p) const {
using ceph::encode;
encode(rval, p);
encode(bl, p);
}
void decode(ceph::buffer::list::const_iterator& p) {
using ceph::decode;
decode(rval, p);
decode(bl, p);
}
void dump(ceph::Formatter *f) const {
f->dump_int("rval", rval);
f->dump_unsigned("bl_length", bl.length());
}
friend bool operator==(const pg_log_op_return_item_t& lhs,
const pg_log_op_return_item_t& rhs) {
return lhs.rval == rhs.rval &&
lhs.bl.contents_equal(rhs.bl);
}
friend bool operator!=(const pg_log_op_return_item_t& lhs,
const pg_log_op_return_item_t& rhs) {
return !(lhs == rhs);
}
friend std::ostream& operator<<(std::ostream& out, const pg_log_op_return_item_t& i) {
return out << "r=" << i.rval << "+" << i.bl.length() << "b";
}
};
WRITE_CLASS_ENCODER(pg_log_op_return_item_t)
/**
* pg_log_entry_t - single entry/event in pg log
*
*/
struct pg_log_entry_t {
enum {
MODIFY = 1, // some unspecified modification (but not *all* modifications)
CLONE = 2, // cloned object from head
DELETE = 3, // deleted object
//BACKLOG = 4, // event invented by generate_backlog [obsolete]
LOST_REVERT = 5, // lost new version, revert to an older version.
LOST_DELETE = 6, // lost new version, revert to no object (deleted).
LOST_MARK = 7, // lost new version, now EIO
PROMOTE = 8, // promoted object from another tier
CLEAN = 9, // mark an object clean
ERROR = 10, // write that returned an error
};
static const char *get_op_name(int op) {
switch (op) {
case MODIFY:
return "modify";
case PROMOTE:
return "promote";
case CLONE:
return "clone";
case DELETE:
return "delete";
case LOST_REVERT:
return "l_revert";
case LOST_DELETE:
return "l_delete";
case LOST_MARK:
return "l_mark";
case CLEAN:
return "clean";
case ERROR:
return "error";
default:
return "unknown";
}
}
const char *get_op_name() const {
return get_op_name(op);
}
// describes state for a locally-rollbackable entry
ObjectModDesc mod_desc;
ceph::buffer::list snaps; // only for clone entries
hobject_t soid;
osd_reqid_t reqid; // caller+tid to uniquely identify request
mempool::osd_pglog::vector<std::pair<osd_reqid_t, version_t> > extra_reqids;
/// map extra_reqids by index to error return code (if any)
mempool::osd_pglog::map<uint32_t, int> extra_reqid_return_codes;
eversion_t version, prior_version, reverting_to;
version_t user_version; // the user version for this entry
utime_t mtime; // this is the _user_ mtime, mind you
int32_t return_code; // only stored for ERRORs for dup detection
std::vector<pg_log_op_return_item_t> op_returns;
__s32 op;
bool invalid_hash; // only when decoding sobject_t based entries
bool invalid_pool; // only when decoding pool-less hobject based entries
ObjectCleanRegions clean_regions;
pg_log_entry_t()
: user_version(0), return_code(0), op(0),
invalid_hash(false), invalid_pool(false) {
snaps.reassign_to_mempool(mempool::mempool_osd_pglog);
}
pg_log_entry_t(int _op, const hobject_t& _soid,
const eversion_t& v, const eversion_t& pv,
version_t uv,
const osd_reqid_t& rid, const utime_t& mt,
int return_code)
: soid(_soid), reqid(rid), version(v), prior_version(pv), user_version(uv),
mtime(mt), return_code(return_code), op(_op),
invalid_hash(false), invalid_pool(false) {
snaps.reassign_to_mempool(mempool::mempool_osd_pglog);
}
bool is_clone() const { return op == CLONE; }
bool is_modify() const { return op == MODIFY; }
bool is_promote() const { return op == PROMOTE; }
bool is_clean() const { return op == CLEAN; }
bool is_lost_revert() const { return op == LOST_REVERT; }
bool is_lost_delete() const { return op == LOST_DELETE; }
bool is_lost_mark() const { return op == LOST_MARK; }
bool is_error() const { return op == ERROR; }
bool is_update() const {
return
is_clone() || is_modify() || is_promote() || is_clean() ||
is_lost_revert() || is_lost_mark();
}
bool is_delete() const {
return op == DELETE || op == LOST_DELETE;
}
bool can_rollback() const {
return mod_desc.can_rollback();
}
void mark_unrollbackable() {
mod_desc.mark_unrollbackable();
}
bool requires_kraken() const {
return mod_desc.requires_kraken();
}
// Errors are only used for dup detection, whereas
// the index by objects is used by recovery, copy_get,
// and other facilities that don't expect or need to
// be aware of error entries.
bool object_is_indexed() const {
return !is_error();
}
bool reqid_is_indexed() const {
return reqid != osd_reqid_t() &&
(op == MODIFY || op == DELETE || op == ERROR);
}
void set_op_returns(const std::vector<OSDOp>& ops) {
op_returns.resize(ops.size());
for (unsigned i = 0; i < ops.size(); ++i) {
op_returns[i].rval = ops[i].rval;
op_returns[i].bl = ops[i].outdata;
}
}
std::string get_key_name() const;
void encode_with_checksum(ceph::buffer::list& bl) const;
void decode_with_checksum(ceph::buffer::list::const_iterator& p);
void encode(ceph::buffer::list &bl) const;
void decode(ceph::buffer::list::const_iterator &bl);
void dump(ceph::Formatter *f) const;
static void generate_test_instances(std::list<pg_log_entry_t*>& o);
};
WRITE_CLASS_ENCODER(pg_log_entry_t)
std::ostream& operator<<(std::ostream& out, const pg_log_entry_t& e);
struct pg_log_dup_t {
osd_reqid_t reqid; // caller+tid to uniquely identify request
eversion_t version;
version_t user_version; // the user version for this entry
int32_t return_code; // only stored for ERRORs for dup detection
std::vector<pg_log_op_return_item_t> op_returns;
pg_log_dup_t()
: user_version(0), return_code(0)
{}
explicit pg_log_dup_t(const pg_log_entry_t& entry)
: reqid(entry.reqid), version(entry.version),
user_version(entry.user_version),
return_code(entry.return_code),
op_returns(entry.op_returns)
{}
pg_log_dup_t(const eversion_t& v, version_t uv,
const osd_reqid_t& rid, int return_code)
: reqid(rid), version(v), user_version(uv),
return_code(return_code)
{}
std::string get_key_name() const;
void encode(ceph::buffer::list &bl) const;
void decode(ceph::buffer::list::const_iterator &bl);
void dump(ceph::Formatter *f) const;
static void generate_test_instances(std::list<pg_log_dup_t*>& o);
bool operator==(const pg_log_dup_t &rhs) const {
return reqid == rhs.reqid &&
version == rhs.version &&
user_version == rhs.user_version &&
return_code == rhs.return_code &&
op_returns == rhs.op_returns;
}
bool operator!=(const pg_log_dup_t &rhs) const {
return !(*this == rhs);
}
friend std::ostream& operator<<(std::ostream& out, const pg_log_dup_t& e);
};
WRITE_CLASS_ENCODER(pg_log_dup_t)
std::ostream& operator<<(std::ostream& out, const pg_log_dup_t& e);
/**
* pg_log_t - incremental log of recent pg changes.
*
* serves as a recovery queue for recent changes.
*/
struct pg_log_t {
/*
* head - newest entry (update|delete)
* tail - entry previous to oldest (update|delete) for which we have
* complete negative information.
* i.e. we can infer pg contents for any store whose last_update >= tail.
*/
eversion_t head; // newest entry
eversion_t tail; // version prior to oldest
protected:
// We can rollback rollback-able entries > can_rollback_to
eversion_t can_rollback_to;
// always <= can_rollback_to, indicates how far stashed rollback
// data can be found
eversion_t rollback_info_trimmed_to;
public:
// the actual log
mempool::osd_pglog::list<pg_log_entry_t> log;
// entries just for dup op detection ordered oldest to newest
mempool::osd_pglog::list<pg_log_dup_t> dups;
pg_log_t() = default;
pg_log_t(const eversion_t &last_update,
const eversion_t &log_tail,
const eversion_t &can_rollback_to,
const eversion_t &rollback_info_trimmed_to,
mempool::osd_pglog::list<pg_log_entry_t> &&entries,
mempool::osd_pglog::list<pg_log_dup_t> &&dup_entries)
: head(last_update), tail(log_tail), can_rollback_to(can_rollback_to),
rollback_info_trimmed_to(rollback_info_trimmed_to),
log(std::move(entries)), dups(std::move(dup_entries)) {}
pg_log_t(const eversion_t &last_update,
const eversion_t &log_tail,
const eversion_t &can_rollback_to,
const eversion_t &rollback_info_trimmed_to,
const std::list<pg_log_entry_t> &entries,
const std::list<pg_log_dup_t> &dup_entries)
: head(last_update), tail(log_tail), can_rollback_to(can_rollback_to),
rollback_info_trimmed_to(rollback_info_trimmed_to) {
for (auto &&entry: entries) {
log.push_back(entry);
}
for (auto &&entry: dup_entries) {
dups.push_back(entry);
}
}
void clear() {
eversion_t z;
rollback_info_trimmed_to = can_rollback_to = head = tail = z;
log.clear();
dups.clear();
}
eversion_t get_rollback_info_trimmed_to() const {
return rollback_info_trimmed_to;
}
eversion_t get_can_rollback_to() const {
return can_rollback_to;
}
pg_log_t split_out_child(pg_t child_pgid, unsigned split_bits) {
mempool::osd_pglog::list<pg_log_entry_t> oldlog, childlog;
oldlog.swap(log);
eversion_t old_tail;
unsigned mask = ~((~0)<<split_bits);
for (auto i = oldlog.begin();
i != oldlog.end();
) {
if ((i->soid.get_hash() & mask) == child_pgid.m_seed) {
childlog.push_back(*i);
} else {
log.push_back(*i);
}
oldlog.erase(i++);
}
// osd_reqid is unique, so it doesn't matter if there are extra
// dup entries in each pg. To avoid storing oid with the dup
// entries, just copy the whole list.
auto childdups(dups);
return pg_log_t(
head,
tail,
can_rollback_to,
rollback_info_trimmed_to,
std::move(childlog),
std::move(childdups));
}
mempool::osd_pglog::list<pg_log_entry_t> rewind_from_head(eversion_t newhead) {
ceph_assert(newhead >= tail);
mempool::osd_pglog::list<pg_log_entry_t>::iterator p = log.end();
mempool::osd_pglog::list<pg_log_entry_t> divergent;
while (true) {
if (p == log.begin()) {
// yikes, the whole thing is divergent!
using std::swap;
swap(divergent, log);
break;
}
--p;
if (p->version.version <= newhead.version) {
/*
* look at eversion.version here. we want to avoid a situation like:
* our log: 100'10 (0'0) m 10000004d3a.00000000/head by client4225.1:18529
* new log: 122'10 (0'0) m 10000004d3a.00000000/head by client4225.1:18529
* lower_bound = 100'9
* i.e, same request, different version. If the eversion.version is > the
* lower_bound, we it is divergent.
*/
++p;
divergent.splice(divergent.begin(), log, p, log.end());
break;
}
ceph_assert(p->version > newhead);
}
head = newhead;
if (can_rollback_to > newhead)
can_rollback_to = newhead;
if (rollback_info_trimmed_to > newhead)
rollback_info_trimmed_to = newhead;
return divergent;
}
void merge_from(const std::vector<pg_log_t*>& slogs, eversion_t last_update) {
log.clear();
// sort and merge dups
std::multimap<eversion_t,pg_log_dup_t> sorted;
for (auto& d : dups) {
sorted.emplace(d.version, d);
}
for (auto l : slogs) {
for (auto& d : l->dups) {
sorted.emplace(d.version, d);
}
}
dups.clear();
for (auto& i : sorted) {
dups.push_back(i.second);
}
head = last_update;
tail = last_update;
can_rollback_to = last_update;
rollback_info_trimmed_to = last_update;
}
bool empty() const {
return log.empty();
}
bool null() const {
return head.version == 0 && head.epoch == 0;
}
uint64_t approx_size() const {
return head.version - tail.version;
}
static void filter_log(spg_t import_pgid, const OSDMap &curmap,
const std::string &hit_set_namespace, const pg_log_t &in,
pg_log_t &out, pg_log_t &reject);
/**
* copy entries from the tail of another pg_log_t
*
* @param other pg_log_t to copy from
* @param from copy entries after this version
*/
void copy_after(CephContext* cct, const pg_log_t &other, eversion_t from);
/**
* copy up to N entries
*
* @param other source log
* @param max max number of entries to copy
*/
void copy_up_to(CephContext* cct, const pg_log_t &other, int max);
std::ostream& print(std::ostream& out) const;
void encode(ceph::buffer::list &bl) const;
void decode(ceph::buffer::list::const_iterator &bl, int64_t pool = -1);
void dump(ceph::Formatter *f) const;
static void generate_test_instances(std::list<pg_log_t*>& o);
};
WRITE_CLASS_ENCODER(pg_log_t)
inline std::ostream& operator<<(std::ostream& out, const pg_log_t& log)
{
out << "log((" << log.tail << "," << log.head << "], crt="
<< log.get_can_rollback_to() << ")";
return out;
}
/**
* pg_missing_t - summary of missing objects.
*
* kept in memory, as a supplement to pg_log_t
* also used to pass missing info in messages.
*/
struct pg_missing_item {
eversion_t need, have;
ObjectCleanRegions clean_regions;
enum missing_flags_t {
FLAG_NONE = 0,
FLAG_DELETE = 1,
} flags;
pg_missing_item() : flags(FLAG_NONE) {}
explicit pg_missing_item(eversion_t n) : need(n), flags(FLAG_NONE) {} // have no old version
pg_missing_item(eversion_t n, eversion_t h, bool is_delete=false, bool old_style = false) :
need(n), have(h) {
set_delete(is_delete);
if (old_style)
clean_regions.mark_fully_dirty();
}
void encode(ceph::buffer::list& bl, uint64_t features) const {
using ceph::encode;
if (HAVE_FEATURE(features, SERVER_OCTOPUS)) {
// encoding a zeroed eversion_t to differentiate between OSD_RECOVERY_DELETES、
// SERVER_OCTOPUS and legacy unversioned encoding - a need value of 0'0 is not
// possible. This can be replaced with the legacy encoding
encode(eversion_t(), bl);
encode(eversion_t(-1, -1), bl);
encode(need, bl);
encode(have, bl);
encode(static_cast<uint8_t>(flags), bl);
encode(clean_regions, bl);
} else {
encode(eversion_t(), bl);
encode(need, bl);
encode(have, bl);
encode(static_cast<uint8_t>(flags), bl);
}
}
void decode(ceph::buffer::list::const_iterator& bl) {
using ceph::decode;
eversion_t e, l;
decode(e, bl);
decode(l, bl);
if(l == eversion_t(-1, -1)) {
// support all
decode(need, bl);
decode(have, bl);
uint8_t f;
decode(f, bl);
flags = static_cast<missing_flags_t>(f);
decode(clean_regions, bl);
} else {
// support OSD_RECOVERY_DELETES
need = l;
decode(have, bl);
uint8_t f;
decode(f, bl);
flags = static_cast<missing_flags_t>(f);
clean_regions.mark_fully_dirty();
}
}
void set_delete(bool is_delete) {
flags = is_delete ? FLAG_DELETE : FLAG_NONE;
}
bool is_delete() const {
return (flags & FLAG_DELETE) == FLAG_DELETE;
}
std::string flag_str() const {
if (flags == FLAG_NONE) {
return "none";
} else {
return "delete";
}
}
void dump(ceph::Formatter *f) const {
f->dump_stream("need") << need;
f->dump_stream("have") << have;
f->dump_stream("flags") << flag_str();
f->dump_stream("clean_regions") << clean_regions;
}
static void generate_test_instances(std::list<pg_missing_item*>& o) {
o.push_back(new pg_missing_item);
o.push_back(new pg_missing_item);
o.back()->need = eversion_t(1, 2);
o.back()->have = eversion_t(1, 1);
o.push_back(new pg_missing_item);
o.back()->need = eversion_t(3, 5);
o.back()->have = eversion_t(3, 4);
o.back()->clean_regions.mark_data_region_dirty(4096, 8192);
o.back()->clean_regions.mark_omap_dirty();
o.back()->flags = FLAG_DELETE;
}
bool operator==(const pg_missing_item &rhs) const {
return need == rhs.need && have == rhs.have && flags == rhs.flags;
}
bool operator!=(const pg_missing_item &rhs) const {
return !(*this == rhs);
}
};
WRITE_CLASS_ENCODER_FEATURES(pg_missing_item)
std::ostream& operator<<(std::ostream& out, const pg_missing_item &item);
#if FMT_VERSION >= 90000
template <> struct fmt::formatter<pg_missing_item> : fmt::ostream_formatter {};
#endif
class pg_missing_const_i {
public:
virtual const std::map<hobject_t, pg_missing_item> &
get_items() const = 0;
virtual const std::map<version_t, hobject_t> &get_rmissing() const = 0;
virtual bool get_may_include_deletes() const = 0;
virtual unsigned int num_missing() const = 0;
virtual bool have_missing() const = 0;
virtual bool is_missing(const hobject_t& oid, pg_missing_item *out = nullptr) const = 0;
virtual bool is_missing(const hobject_t& oid, eversion_t v) const = 0;
virtual ~pg_missing_const_i() {}
};
template <bool Track>
class ChangeTracker {
public:
void changed(const hobject_t &obj) {}
template <typename F>
void get_changed(F &&f) const {}
void flush() {}
bool is_clean() const {
return true;
}
};
template <>
class ChangeTracker<true> {
std::set<hobject_t> _changed;
public:
void changed(const hobject_t &obj) {
_changed.insert(obj);
}
template <typename F>
void get_changed(F &&f) const {
for (auto const &i: _changed) {
f(i);
}
}
void flush() {
_changed.clear();
}
bool is_clean() const {
return _changed.empty();
}
};
template <bool TrackChanges>
class pg_missing_set : public pg_missing_const_i {
using item = pg_missing_item;
std::map<hobject_t, item> missing; // oid -> (need v, have v)
std::map<version_t, hobject_t> rmissing; // v -> oid
ChangeTracker<TrackChanges> tracker;
public:
pg_missing_set() = default;
template <typename missing_type>
pg_missing_set(const missing_type &m) {
missing = m.get_items();
rmissing = m.get_rmissing();
may_include_deletes = m.get_may_include_deletes();
for (auto &&i: missing)
tracker.changed(i.first);
}
bool may_include_deletes = false;
const std::map<hobject_t, item> &get_items() const override {
return missing;
}
const std::map<version_t, hobject_t> &get_rmissing() const override {
return rmissing;
}
bool get_may_include_deletes() const override {
return may_include_deletes;
}
unsigned int num_missing() const override {
return missing.size();
}
bool have_missing() const override {
return !missing.empty();
}
void merge(const pg_log_entry_t& e) {
auto miter = missing.find(e.soid);
if (miter != missing.end() && miter->second.have != eversion_t() && e.version > miter->second.have)
miter->second.clean_regions.merge(e.clean_regions);
}
bool is_missing(const hobject_t& oid, pg_missing_item *out = nullptr) const override {
auto iter = missing.find(oid);
if (iter == missing.end())
return false;
if (out)
*out = iter->second;
return true;
}
bool is_missing(const hobject_t& oid, eversion_t v) const override {
std::map<hobject_t, item>::const_iterator m =
missing.find(oid);
if (m == missing.end())
return false;
const item &item(m->second);
if (item.need > v)
return false;
return true;
}
eversion_t get_oldest_need() const {
if (missing.empty()) {
return eversion_t();
}
auto it = missing.find(rmissing.begin()->second);
ceph_assert(it != missing.end());
return it->second.need;
}
void claim(pg_missing_set&& o) {
static_assert(!TrackChanges, "Can't use claim with TrackChanges");
missing = std::move(o.missing);
rmissing = std::move(o.rmissing);
}
/*
* this needs to be called in log order as we extend the log. it
* assumes missing is accurate up through the previous log entry.
*/
void add_next_event(const pg_log_entry_t& e) {
std::map<hobject_t, item>::iterator missing_it;
missing_it = missing.find(e.soid);
bool is_missing_divergent_item = missing_it != missing.end();
if (e.prior_version == eversion_t() || e.is_clone()) {
// new object.
if (is_missing_divergent_item) { // use iterator
rmissing.erase(missing_it->second.need.version);
// .have = nil
missing_it->second = item(e.version, eversion_t(), e.is_delete());
missing_it->second.clean_regions.mark_fully_dirty();
} else {
// create new element in missing map
// .have = nil
missing[e.soid] = item(e.version, eversion_t(), e.is_delete());
missing[e.soid].clean_regions.mark_fully_dirty();
}
} else if (is_missing_divergent_item) {
// already missing (prior).
rmissing.erase((missing_it->second).need.version);
missing_it->second.need = e.version; // leave .have unchanged.
missing_it->second.set_delete(e.is_delete());
if (e.is_lost_revert())
missing_it->second.clean_regions.mark_fully_dirty();
else
missing_it->second.clean_regions.merge(e.clean_regions);
} else {
// not missing, we must have prior_version (if any)
ceph_assert(!is_missing_divergent_item);
missing[e.soid] = item(e.version, e.prior_version, e.is_delete());
if (e.is_lost_revert())
missing[e.soid].clean_regions.mark_fully_dirty();
else
missing[e.soid].clean_regions = e.clean_regions;
}
rmissing[e.version.version] = e.soid;
tracker.changed(e.soid);
}
void revise_need(hobject_t oid, eversion_t need, bool is_delete) {
auto p = missing.find(oid);
if (p != missing.end()) {
rmissing.erase((p->second).need.version);
p->second.need = need; // do not adjust .have
p->second.set_delete(is_delete);
p->second.clean_regions.mark_fully_dirty();
} else {
missing[oid] = item(need, eversion_t(), is_delete);
missing[oid].clean_regions.mark_fully_dirty();
}
rmissing[need.version] = oid;
tracker.changed(oid);
}
void revise_have(hobject_t oid, eversion_t have) {
auto p = missing.find(oid);
if (p != missing.end()) {
tracker.changed(oid);
(p->second).have = have;
}
}
void mark_fully_dirty(const hobject_t& oid) {
auto p = missing.find(oid);
if (p != missing.end()) {
tracker.changed(oid);
(p->second).clean_regions.mark_fully_dirty();
}
}
void add(const hobject_t& oid, eversion_t need, eversion_t have,
bool is_delete) {
missing[oid] = item(need, have, is_delete, true);
rmissing[need.version] = oid;
tracker.changed(oid);
}
void add(const hobject_t& oid, pg_missing_item&& item) {
rmissing[item.need.version] = oid;
missing.insert({oid, std::move(item)});
tracker.changed(oid);
}
void rm(const hobject_t& oid, eversion_t v) {
std::map<hobject_t, item>::iterator p = missing.find(oid);
if (p != missing.end() && p->second.need <= v)
rm(p);
}
void rm(std::map<hobject_t, item>::const_iterator m) {
tracker.changed(m->first);
rmissing.erase(m->second.need.version);
missing.erase(m);
}
void got(const hobject_t& oid, eversion_t v) {
std::map<hobject_t, item>::iterator p = missing.find(oid);
ceph_assert(p != missing.end());
ceph_assert(p->second.need <= v || p->second.is_delete());
got(p);
}
void got(std::map<hobject_t, item>::const_iterator m) {
tracker.changed(m->first);
rmissing.erase(m->second.need.version);
missing.erase(m);
}
void split_into(
pg_t child_pgid,
unsigned split_bits,
pg_missing_set *omissing) {
omissing->may_include_deletes = may_include_deletes;
unsigned mask = ~((~0)<<split_bits);
for (std::map<hobject_t, item>::iterator i = missing.begin();
i != missing.end();
) {
if ((i->first.get_hash() & mask) == child_pgid.m_seed) {
omissing->add(i->first, i->second.need, i->second.have,
i->second.is_delete());
rm(i++);
} else {
++i;
}
}
}
void clear() {
for (auto const &i: missing)
tracker.changed(i.first);
missing.clear();
rmissing.clear();
}
void encode(ceph::buffer::list &bl, uint64_t features) const {
ENCODE_START(5, 2, bl)
encode(missing, bl, features);
encode(may_include_deletes, bl);
ENCODE_FINISH(bl);
}
void decode(ceph::buffer::list::const_iterator &bl, int64_t pool = -1) {
for (auto const &i: missing)
tracker.changed(i.first);
DECODE_START_LEGACY_COMPAT_LEN(5, 2, 2, bl);
decode(missing, bl);
if (struct_v >= 4) {
decode(may_include_deletes, bl);
}
DECODE_FINISH(bl);
if (struct_v < 3) {
// Handle hobject_t upgrade
std::map<hobject_t, item> tmp;
for (std::map<hobject_t, item>::iterator i =
missing.begin();
i != missing.end();
) {
if (!i->first.is_max() && i->first.pool == -1) {
hobject_t to_insert(i->first);
to_insert.pool = pool;
tmp[to_insert] = i->second;
missing.erase(i++);
} else {
++i;
}
}
missing.insert(tmp.begin(), tmp.end());
}
for (std::map<hobject_t,item>::iterator it =
missing.begin();
it != missing.end();
++it)
rmissing[it->second.need.version] = it->first;
for (auto const &i: missing)
tracker.changed(i.first);
}
void dump(ceph::Formatter *f) const {
f->open_array_section("missing");
for (std::map<hobject_t,item>::const_iterator p =
missing.begin(); p != missing.end(); ++p) {
f->open_object_section("item");
f->dump_stream("object") << p->first;
p->second.dump(f);
f->close_section();
}
f->close_section();
f->dump_bool("may_include_deletes", may_include_deletes);
}
template <typename F>
void filter_objects(F &&f) {
for (auto i = missing.begin(); i != missing.end();) {
if (f(i->first)) {
rm(i++);
} else {
++i;
}
}
}
static void generate_test_instances(std::list<pg_missing_set*>& o) {
o.push_back(new pg_missing_set);
o.back()->may_include_deletes = true;
o.push_back(new pg_missing_set);
o.back()->add(
hobject_t(object_t("foo"), "foo", 123, 456, 0, ""),
eversion_t(5, 6), eversion_t(5, 1), false);
o.back()->may_include_deletes = true;
o.push_back(new pg_missing_set);
o.back()->add(
hobject_t(object_t("foo"), "foo", 123, 456, 0, ""),
eversion_t(5, 6), eversion_t(5, 1), true);
o.back()->may_include_deletes = true;
}
template <typename F>
void get_changed(F &&f) const {
tracker.get_changed(f);
}
void flush() {
tracker.flush();
}
bool is_clean() const {
return tracker.is_clean();
}
template <typename missing_t>
bool debug_verify_from_init(
const missing_t &init_missing,
std::ostream *oss) const {
if (!TrackChanges)
return true;
auto check_missing(init_missing.get_items());
tracker.get_changed([&](const hobject_t &hoid) {
check_missing.erase(hoid);
if (missing.count(hoid)) {
check_missing.insert(*(missing.find(hoid)));
}
});
bool ok = true;
if (check_missing.size() != missing.size()) {
if (oss) {
*oss << "Size mismatch, check: " << check_missing.size()
<< ", actual: " << missing.size() << "\n";
}
ok = false;
}
for (auto &i: missing) {
if (!check_missing.count(i.first)) {
if (oss)
*oss << "check_missing missing " << i.first << "\n";
ok = false;
} else if (check_missing[i.first] != i.second) {
if (oss)
*oss << "check_missing missing item mismatch on " << i.first
<< ", check: " << check_missing[i.first]
<< ", actual: " << i.second << "\n";
ok = false;
}
}
if (oss && !ok) {
*oss << "check_missing: " << check_missing << "\n";
std::set<hobject_t> changed;
tracker.get_changed([&](const hobject_t &hoid) { changed.insert(hoid); });
*oss << "changed: " << changed << "\n";
}
return ok;
}
};
template <bool TrackChanges>
void encode(
const pg_missing_set<TrackChanges> &c, ceph::buffer::list &bl, uint64_t features=0) {
ENCODE_DUMP_PRE();
c.encode(bl, features);
ENCODE_DUMP_POST(cl);
}
template <bool TrackChanges>
void decode(pg_missing_set<TrackChanges> &c, ceph::buffer::list::const_iterator &p) {
c.decode(p);
}
template <bool TrackChanges>
std::ostream& operator<<(std::ostream& out, const pg_missing_set<TrackChanges> &missing)
{
out << "missing(" << missing.num_missing()
<< " may_include_deletes = " << missing.may_include_deletes;
//if (missing.num_lost()) out << ", " << missing.num_lost() << " lost";
out << ")";
return out;
}
using pg_missing_t = pg_missing_set<false>;
using pg_missing_tracker_t = pg_missing_set<true>;
/**
* pg list objects response format
*
*/
template<typename T>
struct pg_nls_response_template {
collection_list_handle_t handle;
std::vector<T> entries;
void encode(ceph::buffer::list& bl) const {
ENCODE_START(1, 1, bl);
encode(handle, bl);
__u32 n = (__u32)entries.size();
encode(n, bl);
for (auto i = entries.begin(); i != entries.end(); ++i) {
encode(i->nspace, bl);
encode(i->oid, bl);
encode(i->locator, bl);
}
ENCODE_FINISH(bl);
}
void decode(ceph::buffer::list::const_iterator& bl) {
DECODE_START(1, bl);
decode(handle, bl);
__u32 n;
decode(n, bl);
entries.clear();
while (n--) {
T i;
decode(i.nspace, bl);
decode(i.oid, bl);
decode(i.locator, bl);
entries.push_back(i);
}
DECODE_FINISH(bl);
}
void dump(ceph::Formatter *f) const {
f->dump_stream("handle") << handle;
f->open_array_section("entries");
for (auto p = entries.begin(); p != entries.end(); ++p) {
f->open_object_section("object");
f->dump_string("namespace", p->nspace);
f->dump_string("object", p->oid);
f->dump_string("key", p->locator);
f->close_section();
}
f->close_section();
}
static void generate_test_instances(std::list<pg_nls_response_template<T>*>& o) {
o.push_back(new pg_nls_response_template<T>);
o.push_back(new pg_nls_response_template<T>);
o.back()->handle = hobject_t(object_t("hi"), "key", 1, 2, -1, "");
o.back()->entries.push_back(librados::ListObjectImpl("", "one", ""));
o.back()->entries.push_back(librados::ListObjectImpl("", "two", "twokey"));
o.back()->entries.push_back(librados::ListObjectImpl("", "three", ""));
o.push_back(new pg_nls_response_template<T>);
o.back()->handle = hobject_t(object_t("hi"), "key", 3, 4, -1, "");
o.back()->entries.push_back(librados::ListObjectImpl("n1", "n1one", ""));
o.back()->entries.push_back(librados::ListObjectImpl("n1", "n1two", "n1twokey"));
o.back()->entries.push_back(librados::ListObjectImpl("n1", "n1three", ""));
o.push_back(new pg_nls_response_template<T>);
o.back()->handle = hobject_t(object_t("hi"), "key", 5, 6, -1, "");
o.back()->entries.push_back(librados::ListObjectImpl("", "one", ""));
o.back()->entries.push_back(librados::ListObjectImpl("", "two", "twokey"));
o.back()->entries.push_back(librados::ListObjectImpl("", "three", ""));
o.back()->entries.push_back(librados::ListObjectImpl("n1", "n1one", ""));
o.back()->entries.push_back(librados::ListObjectImpl("n1", "n1two", "n1twokey"));
o.back()->entries.push_back(librados::ListObjectImpl("n1", "n1three", ""));
}
};
using pg_nls_response_t = pg_nls_response_template<librados::ListObjectImpl>;
WRITE_CLASS_ENCODER(pg_nls_response_t)
// For backwards compatibility with older OSD requests
struct pg_ls_response_t {
collection_list_handle_t handle;
std::list<std::pair<object_t, std::string> > entries;
void encode(ceph::buffer::list& bl) const {
using ceph::encode;
__u8 v = 1;
encode(v, bl);
encode(handle, bl);
encode(entries, bl);
}
void decode(ceph::buffer::list::const_iterator& bl) {
using ceph::decode;
__u8 v;
decode(v, bl);
ceph_assert(v == 1);
decode(handle, bl);
decode(entries, bl);
}
void dump(ceph::Formatter *f) const {
f->dump_stream("handle") << handle;
f->open_array_section("entries");
for (std::list<std::pair<object_t, std::string> >::const_iterator p = entries.begin(); p != entries.end(); ++p) {
f->open_object_section("object");
f->dump_stream("object") << p->first;
f->dump_string("key", p->second);
f->close_section();
}
f->close_section();
}
static void generate_test_instances(std::list<pg_ls_response_t*>& o) {
o.push_back(new pg_ls_response_t);
o.push_back(new pg_ls_response_t);
o.back()->handle = hobject_t(object_t("hi"), "key", 1, 2, -1, "");
o.back()->entries.push_back(std::make_pair(object_t("one"), std::string()));
o.back()->entries.push_back(std::make_pair(object_t("two"), std::string("twokey")));
}
};
WRITE_CLASS_ENCODER(pg_ls_response_t)
/**
* object_copy_cursor_t
*/
struct object_copy_cursor_t {
uint64_t data_offset;
std::string omap_offset;
bool attr_complete;
bool data_complete;
bool omap_complete;
object_copy_cursor_t()
: data_offset(0),
attr_complete(false),
data_complete(false),
omap_complete(false)
{}
bool is_initial() const {
return !attr_complete && data_offset == 0 && omap_offset.empty();
}
bool is_complete() const {
return attr_complete && data_complete && omap_complete;
}
static void generate_test_instances(std::list<object_copy_cursor_t*>& o);
void encode(ceph::buffer::list& bl) const;
void decode(ceph::buffer::list::const_iterator &bl);
void dump(ceph::Formatter *f) const;
};
WRITE_CLASS_ENCODER(object_copy_cursor_t)
/**
* object_copy_data_t
*
* Return data from a copy request. The semantics are a little strange
* as a result of the encoding's heritage.
*
* In particular, the sender unconditionally fills in the cursor (from what
* it receives and sends), the size, and the mtime, but is responsible for
* figuring out whether it should put any data in the attrs, data, or
* omap members (corresponding to xattrs, object data, and the omap entries)
* based on external data (the client includes a max amount to return with
* the copy request). The client then looks into the attrs, data, and/or omap
* based on the contents of the cursor.
*/
struct object_copy_data_t {
enum {
FLAG_DATA_DIGEST = 1<<0,
FLAG_OMAP_DIGEST = 1<<1,
};
object_copy_cursor_t cursor;
uint64_t size;
utime_t mtime;
uint32_t data_digest, omap_digest;
uint32_t flags;
std::map<std::string, ceph::buffer::list, std::less<>> attrs;
ceph::buffer::list data;
ceph::buffer::list omap_header;
ceph::buffer::list omap_data;
/// which snaps we are defined for (if a snap and not the head)
std::vector<snapid_t> snaps;
/// latest snap seq for the object (if head)
snapid_t snap_seq;
/// recent reqids on this object
mempool::osd_pglog::vector<std::pair<osd_reqid_t, version_t> > reqids;
/// map reqids by index to error return code (if any)
mempool::osd_pglog::map<uint32_t, int> reqid_return_codes;
uint64_t truncate_seq;
uint64_t truncate_size;
public:
object_copy_data_t() :
size((uint64_t)-1), data_digest(-1),
omap_digest(-1), flags(0),
truncate_seq(0),
truncate_size(0) {}
static void generate_test_instances(std::list<object_copy_data_t*>& o);
void encode(ceph::buffer::list& bl, uint64_t features) const;
void decode(ceph::buffer::list::const_iterator& bl);
void dump(ceph::Formatter *f) const;
};
WRITE_CLASS_ENCODER_FEATURES(object_copy_data_t)
/**
* pg creation info
*/
struct pg_create_t {
epoch_t created; // epoch pg created
pg_t parent; // split from parent (if != pg_t())
__s32 split_bits;
pg_create_t()
: created(0), split_bits(0) {}
pg_create_t(unsigned c, pg_t p, int s)
: created(c), parent(p), split_bits(s) {}
void encode(ceph::buffer::list &bl) const;
void decode(ceph::buffer::list::const_iterator &bl);
void dump(ceph::Formatter *f) const;
static void generate_test_instances(std::list<pg_create_t*>& o);
};
WRITE_CLASS_ENCODER(pg_create_t)
// -----------------------------------------
class ObjectExtent {
/**
* ObjectExtents are used for specifying IO behavior against RADOS
* objects when one is using the ObjectCacher.
*
* To use this in a real system, *every member* must be filled
* out correctly. In particular, make sure to initialize the
* oloc correctly, as its default values are deliberate poison
* and will cause internal ObjectCacher asserts.
*
* Similarly, your buffer_extents vector *must* specify a total
* size equal to your length. If the buffer_extents inadvertently
* contain less space than the length member specifies, you
* will get unintelligible asserts deep in the ObjectCacher.
*
* If you are trying to do testing and don't care about actual
* RADOS function, the simplest thing to do is to initialize
* the ObjectExtent (truncate_size can be 0), create a single entry
* in buffer_extents matching the length, and set oloc.pool to 0.
*/
public:
object_t oid; // object id
uint64_t objectno;
uint64_t offset; // in object
uint64_t length; // in object
uint64_t truncate_size; // in object
object_locator_t oloc; // object locator (pool etc)
std::vector<std::pair<uint64_t,uint64_t> > buffer_extents; // off -> len. extents in buffer being mapped (may be fragmented bc of striping!)
ObjectExtent() : objectno(0), offset(0), length(0), truncate_size(0) {}
ObjectExtent(object_t o, uint64_t ono, uint64_t off, uint64_t l, uint64_t ts) :
oid(o), objectno(ono), offset(off), length(l), truncate_size(ts) { }
};
inline std::ostream& operator<<(std::ostream& out, const ObjectExtent &ex)
{
return out << "extent("
<< ex.oid << " (" << ex.objectno << ") in " << ex.oloc
<< " " << ex.offset << "~" << ex.length
<< " -> " << ex.buffer_extents
<< ")";
}
// ---------------------------------------
class OSDSuperblock {
public:
uuid_d cluster_fsid, osd_fsid;
int32_t whoami = -1; // my role in this fs.
epoch_t current_epoch = 0; // most recent epoch
epoch_t oldest_map = 0, newest_map = 0; // oldest/newest maps we have.
double weight = 0.0;
CompatSet compat_features;
// last interval over which i mounted and was then active
epoch_t mounted = 0; // last epoch i mounted
epoch_t clean_thru = 0; // epoch i was active and clean thru
epoch_t purged_snaps_last = 0;
utime_t last_purged_snaps_scrub;
epoch_t cluster_osdmap_trim_lower_bound = 0;
void encode(ceph::buffer::list &bl) const;
void decode(ceph::buffer::list::const_iterator &bl);
void dump(ceph::Formatter *f) const;
static void generate_test_instances(std::list<OSDSuperblock*>& o);
};
WRITE_CLASS_ENCODER(OSDSuperblock)
inline std::ostream& operator<<(std::ostream& out, const OSDSuperblock& sb)
{
return out << "sb(" << sb.cluster_fsid
<< " osd." << sb.whoami
<< " " << sb.osd_fsid
<< " e" << sb.current_epoch
<< " [" << sb.oldest_map << "," << sb.newest_map << "]"
<< " lci=[" << sb.mounted << "," << sb.clean_thru << "]"
<< " tlb=" << sb.cluster_osdmap_trim_lower_bound
<< ")";
}
// -------
/*
* attached to object head. describes most recent snap context, and
* set of existing clones.
*/
struct SnapSet {
snapid_t seq;
// NOTE: this is for pre-octopus compatibility only! remove in Q release
std::vector<snapid_t> snaps; // descending
std::vector<snapid_t> clones; // ascending
std::map<snapid_t, interval_set<uint64_t> > clone_overlap; // overlap w/ next newest
std::map<snapid_t, uint64_t> clone_size;
std::map<snapid_t, std::vector<snapid_t>> clone_snaps; // descending
SnapSet() : seq(0) {}
explicit SnapSet(ceph::buffer::list& bl) {
auto p = std::cbegin(bl);
decode(p);
}
/// populate SnapSet from a librados::snap_set_t
void from_snap_set(const librados::snap_set_t& ss, bool legacy);
/// get space accounted to clone
uint64_t get_clone_bytes(snapid_t clone) const;
void encode(ceph::buffer::list& bl) const;
void decode(ceph::buffer::list::const_iterator& bl);
void dump(ceph::Formatter *f) const;
static void generate_test_instances(std::list<SnapSet*>& o);
SnapContext get_ssc_as_of(snapid_t as_of) const {
SnapContext out;
out.seq = as_of;
for (auto p = clone_snaps.rbegin();
p != clone_snaps.rend();
++p) {
for (auto snap : p->second) {
if (snap <= as_of) {
out.snaps.push_back(snap);
}
}
}
return out;
}
SnapSet get_filtered(const pg_pool_t &pinfo) const;
void filter(const pg_pool_t &pinfo);
};
WRITE_CLASS_ENCODER(SnapSet)
std::ostream& operator<<(std::ostream& out, const SnapSet& cs);
#define OI_ATTR "_"
#define SS_ATTR "snapset"
struct watch_info_t {
uint64_t cookie;
uint32_t timeout_seconds;
entity_addr_t addr;
watch_info_t() : cookie(0), timeout_seconds(0) { }
watch_info_t(uint64_t c, uint32_t t, const entity_addr_t& a) : cookie(c), timeout_seconds(t), addr(a) {}
void encode(ceph::buffer::list& bl, uint64_t features) const;
void decode(ceph::buffer::list::const_iterator& bl);
void dump(ceph::Formatter *f) const;
static void generate_test_instances(std::list<watch_info_t*>& o);
};
WRITE_CLASS_ENCODER_FEATURES(watch_info_t)
static inline bool operator==(const watch_info_t& l, const watch_info_t& r) {
return l.cookie == r.cookie && l.timeout_seconds == r.timeout_seconds
&& l.addr == r.addr;
}
static inline std::ostream& operator<<(std::ostream& out, const watch_info_t& w) {
return out << "watch(cookie " << w.cookie << " " << w.timeout_seconds << "s"
<< " " << w.addr << ")";
}
struct notify_info_t {
uint64_t cookie;
uint64_t notify_id;
uint32_t timeout;
ceph::buffer::list bl;
};
static inline std::ostream& operator<<(std::ostream& out, const notify_info_t& n) {
return out << "notify(cookie " << n.cookie
<< " notify" << n.notify_id
<< " " << n.timeout << "s)";
}
class object_ref_delta_t {
std::map<hobject_t, int> ref_delta;
public:
object_ref_delta_t() = default;
object_ref_delta_t(const object_ref_delta_t &) = default;
object_ref_delta_t(object_ref_delta_t &&) = default;
object_ref_delta_t(decltype(ref_delta) &&ref_delta)
: ref_delta(std::move(ref_delta)) {}
object_ref_delta_t(const decltype(ref_delta) &ref_delta)
: ref_delta(ref_delta) {}
object_ref_delta_t &operator=(const object_ref_delta_t &) = default;
object_ref_delta_t &operator=(object_ref_delta_t &&) = default;
void dec_ref(const hobject_t &hoid, unsigned num=1) {
mut_ref(hoid, -num);
}
void inc_ref(const hobject_t &hoid, unsigned num=1) {
mut_ref(hoid, num);
}
void mut_ref(const hobject_t &hoid, int num) {
[[maybe_unused]] auto [iter, _] = ref_delta.try_emplace(hoid, 0);
iter->second += num;
if (iter->second == 0)
ref_delta.erase(iter);
}
auto begin() const { return ref_delta.begin(); }
auto end() const { return ref_delta.end(); }
auto find(hobject_t &key) const { return ref_delta.find(key); }
bool operator==(const object_ref_delta_t &rhs) const {
return ref_delta == rhs.ref_delta;
}
bool operator!=(const object_ref_delta_t &rhs) const {
return !(*this == rhs);
}
bool is_empty() {
return ref_delta.empty();
}
uint64_t size() {
return ref_delta.size();
}
friend std::ostream& operator<<(std::ostream& out, const object_ref_delta_t & ci);
};
struct chunk_info_t {
typedef enum {
FLAG_DIRTY = 1,
FLAG_MISSING = 2,
FLAG_HAS_REFERENCE = 4,
FLAG_HAS_FINGERPRINT = 8,
} cflag_t;
uint32_t offset;
uint32_t length;
hobject_t oid;
cflag_t flags; // FLAG_*
chunk_info_t() : offset(0), length(0), flags((cflag_t)0) { }
chunk_info_t(uint32_t offset, uint32_t length, hobject_t oid) :
offset(offset), length(length), oid(oid), flags((cflag_t)0) { }
static std::string get_flag_string(uint64_t flags) {
std::string r;
if (flags & FLAG_DIRTY) {
r += "|dirty";
}
if (flags & FLAG_MISSING) {
r += "|missing";
}
if (flags & FLAG_HAS_REFERENCE) {
r += "|has_reference";
}
if (flags & FLAG_HAS_FINGERPRINT) {
r += "|has_fingerprint";
}
if (r.length())
return r.substr(1);
return r;
}
bool test_flag(cflag_t f) const {
return (flags & f) == f;
}
void set_flag(cflag_t f) {
flags = (cflag_t)(flags | f);
}
void set_flags(cflag_t f) {
flags = f;
}
void clear_flag(cflag_t f) {
flags = (cflag_t)(flags & ~f);
}
void clear_flags() {
flags = (cflag_t)0;
}
bool is_dirty() const {
return test_flag(FLAG_DIRTY);
}
bool is_missing() const {
return test_flag(FLAG_MISSING);
}
bool has_reference() const {
return test_flag(FLAG_HAS_REFERENCE);
}
bool has_fingerprint() const {
return test_flag(FLAG_HAS_FINGERPRINT);
}
void encode(ceph::buffer::list &bl) const;
void decode(ceph::buffer::list::const_iterator &bl);
void dump(ceph::Formatter *f) const;
friend std::ostream& operator<<(std::ostream& out, const chunk_info_t& ci);
bool operator==(const chunk_info_t& cit) const;
bool operator!=(const chunk_info_t& cit) const {
return !(cit == *this);
}
};
WRITE_CLASS_ENCODER(chunk_info_t)
std::ostream& operator<<(std::ostream& out, const chunk_info_t& ci);
struct object_info_t;
struct object_manifest_t {
enum {
TYPE_NONE = 0,
TYPE_REDIRECT = 1,
TYPE_CHUNKED = 2,
};
uint8_t type; // redirect, chunked, ...
hobject_t redirect_target;
std::map<uint64_t, chunk_info_t> chunk_map;
object_manifest_t() : type(0) { }
object_manifest_t(uint8_t type, const hobject_t& redirect_target)
: type(type), redirect_target(redirect_target) { }
bool is_empty() const {
return type == TYPE_NONE;
}
bool is_redirect() const {
return type == TYPE_REDIRECT;
}
bool is_chunked() const {
return type == TYPE_CHUNKED;
}
static std::string_view get_type_name(uint8_t m) {
switch (m) {
case TYPE_NONE: return "none";
case TYPE_REDIRECT: return "redirect";
case TYPE_CHUNKED: return "chunked";
default: return "unknown";
}
}
std::string_view get_type_name() const {
return get_type_name(type);
}
void clear() {
type = 0;
redirect_target = hobject_t();
chunk_map.clear();
}
/**
* calc_refs_to_inc_on_set
*
* Takes a manifest and returns the set of refs to
* increment upon set-chunk
*
* l should be nullptr if there are no clones, or
* l and g may each be null if the corresponding clone does not exist.
* *this contains the set of new references to set
*
*/
void calc_refs_to_inc_on_set(
const object_manifest_t* g, ///< [in] manifest for clone > *this
const object_manifest_t* l, ///< [in] manifest for clone < *this
object_ref_delta_t &delta ///< [out] set of refs to drop
) const;
/**
* calc_refs_to_drop_on_modify
*
* Takes a manifest and returns the set of refs to
* drop upon modification
*
* l should be nullptr if there are no clones, or
* l may be null if the corresponding clone does not exist.
*
*/
void calc_refs_to_drop_on_modify(
const object_manifest_t* l, ///< [in] manifest for previous clone
const ObjectCleanRegions& clean_regions, ///< [in] clean regions
object_ref_delta_t &delta ///< [out] set of refs to drop
) const;
/**
* calc_refs_to_drop_on_removal
*
* Takes the two adjacent manifests and returns the set of refs to
* drop upon removal of the clone containing *this.
*
* g should be nullptr if *this is on HEAD, l should be nullptr if
* *this is on the oldest clone (or head if there are no clones).
*/
void calc_refs_to_drop_on_removal(
const object_manifest_t* g, ///< [in] manifest for clone > *this
const object_manifest_t* l, ///< [in] manifest for clone < *this
object_ref_delta_t &delta ///< [out] set of refs to drop
) const;
static void generate_test_instances(std::list<object_manifest_t*>& o);
void encode(ceph::buffer::list &bl) const;
void decode(ceph::buffer::list::const_iterator &bl);
void dump(ceph::Formatter *f) const;
friend std::ostream& operator<<(std::ostream& out, const object_info_t& oi);
};
WRITE_CLASS_ENCODER(object_manifest_t)
std::ostream& operator<<(std::ostream& out, const object_manifest_t& oi);
struct object_info_t {
hobject_t soid;
eversion_t version, prior_version;
version_t user_version;
osd_reqid_t last_reqid;
uint64_t size;
utime_t mtime;
utime_t local_mtime; // local mtime
// note: these are currently encoded into a total 16 bits; see
// encode()/decode() for the weirdness.
typedef enum {
FLAG_LOST = 1<<0,
FLAG_WHITEOUT = 1<<1, // object logically does not exist
FLAG_DIRTY = 1<<2, // object has been modified since last flushed or undirtied
FLAG_OMAP = 1<<3, // has (or may have) some/any omap data
FLAG_DATA_DIGEST = 1<<4, // has data crc
FLAG_OMAP_DIGEST = 1<<5, // has omap crc
FLAG_CACHE_PIN = 1<<6, // pin the object in cache tier
FLAG_MANIFEST = 1<<7, // has manifest
FLAG_USES_TMAP = 1<<8, // deprecated; no longer used
FLAG_REDIRECT_HAS_REFERENCE = 1<<9, // has reference
} flag_t;
flag_t flags;
static std::string get_flag_string(flag_t flags) {
std::string s;
std::vector<std::string> sv = get_flag_vector(flags);
for (auto ss : sv) {
s += std::string("|") + ss;
}
if (s.length())
return s.substr(1);
return s;
}
static std::vector<std::string> get_flag_vector(flag_t flags) {
std::vector<std::string> sv;
if (flags & FLAG_LOST)
sv.insert(sv.end(), "lost");
if (flags & FLAG_WHITEOUT)
sv.insert(sv.end(), "whiteout");
if (flags & FLAG_DIRTY)
sv.insert(sv.end(), "dirty");
if (flags & FLAG_USES_TMAP)
sv.insert(sv.end(), "uses_tmap");
if (flags & FLAG_OMAP)
sv.insert(sv.end(), "omap");
if (flags & FLAG_DATA_DIGEST)
sv.insert(sv.end(), "data_digest");
if (flags & FLAG_OMAP_DIGEST)
sv.insert(sv.end(), "omap_digest");
if (flags & FLAG_CACHE_PIN)
sv.insert(sv.end(), "cache_pin");
if (flags & FLAG_MANIFEST)
sv.insert(sv.end(), "manifest");
if (flags & FLAG_REDIRECT_HAS_REFERENCE)
sv.insert(sv.end(), "redirect_has_reference");
return sv;
}
std::string get_flag_string() const {
return get_flag_string(flags);
}
uint64_t truncate_seq, truncate_size;
std::map<std::pair<uint64_t, entity_name_t>, watch_info_t> watchers;
// opportunistic checksums; may or may not be present
__u32 data_digest; ///< data crc32c
__u32 omap_digest; ///< omap crc32c
// alloc hint attribute
uint64_t expected_object_size, expected_write_size;
uint32_t alloc_hint_flags;
struct object_manifest_t manifest;
void copy_user_bits(const object_info_t& other);
bool test_flag(flag_t f) const {
return (flags & f) == f;
}
void set_flag(flag_t f) {
flags = (flag_t)(flags | f);
}
void clear_flag(flag_t f) {
flags = (flag_t)(flags & ~f);
}
bool is_lost() const {
return test_flag(FLAG_LOST);
}
bool is_whiteout() const {
return test_flag(FLAG_WHITEOUT);
}
bool is_dirty() const {
return test_flag(FLAG_DIRTY);
}
bool is_omap() const {
return test_flag(FLAG_OMAP);
}
bool is_data_digest() const {
return test_flag(FLAG_DATA_DIGEST);
}
bool is_omap_digest() const {
return test_flag(FLAG_OMAP_DIGEST);
}
bool is_cache_pinned() const {
return test_flag(FLAG_CACHE_PIN);
}
bool has_manifest() const {
return test_flag(FLAG_MANIFEST);
}
void set_data_digest(__u32 d) {
set_flag(FLAG_DATA_DIGEST);
data_digest = d;
}
void set_omap_digest(__u32 d) {
set_flag(FLAG_OMAP_DIGEST);
omap_digest = d;
}
void clear_data_digest() {
clear_flag(FLAG_DATA_DIGEST);
data_digest = -1;
}
void clear_omap_digest() {
clear_flag(FLAG_OMAP_DIGEST);
omap_digest = -1;
}
void new_object() {
clear_data_digest();
clear_omap_digest();
}
void encode(ceph::buffer::list& bl, uint64_t features) const;
void decode(ceph::buffer::list::const_iterator& bl);
void decode(const ceph::buffer::list& bl) {
auto p = std::cbegin(bl);
decode(p);
}
void encode_no_oid(ceph::buffer::list& bl, uint64_t features) {
// TODO: drop soid field and remove the denc no_oid methods
auto tmp_oid = hobject_t(hobject_t::get_max());
tmp_oid.swap(soid);
encode(bl, features);
soid = tmp_oid;
}
void decode_no_oid(ceph::buffer::list::const_iterator& bl) {
decode(bl);
ceph_assert(soid.is_max());
}
void decode_no_oid(const ceph::buffer::list& bl) {
auto p = std::cbegin(bl);
decode_no_oid(p);
}
void decode_no_oid(const ceph::buffer::list& bl, const hobject_t& _soid) {
auto p = std::cbegin(bl);
decode_no_oid(p);
soid = _soid;
}
void dump(ceph::Formatter *f) const;
static void generate_test_instances(std::list<object_info_t*>& o);
explicit object_info_t()
: user_version(0), size(0), flags((flag_t)0),
truncate_seq(0), truncate_size(0),
data_digest(-1), omap_digest(-1),
expected_object_size(0), expected_write_size(0),
alloc_hint_flags(0)
{}
explicit object_info_t(const hobject_t& s)
: soid(s),
user_version(0), size(0), flags((flag_t)0),
truncate_seq(0), truncate_size(0),
data_digest(-1), omap_digest(-1),
expected_object_size(0), expected_write_size(0),
alloc_hint_flags(0)
{}
explicit object_info_t(const ceph::buffer::list& bl) {
decode(bl);
}
explicit object_info_t(const ceph::buffer::list& bl, const hobject_t& _soid) {
decode_no_oid(bl);
soid = _soid;
}
};
WRITE_CLASS_ENCODER_FEATURES(object_info_t)
std::ostream& operator<<(std::ostream& out, const object_info_t& oi);
// Object recovery
struct ObjectRecoveryInfo {
hobject_t soid;
eversion_t version;
uint64_t size;
object_info_t oi;
SnapSet ss; // only populated if soid is_snap()
interval_set<uint64_t> copy_subset;
std::map<hobject_t, interval_set<uint64_t>> clone_subset;
bool object_exist;
ObjectRecoveryInfo() : size(0), object_exist(true) { }
static void generate_test_instances(std::list<ObjectRecoveryInfo*>& o);
void encode(ceph::buffer::list &bl, uint64_t features) const;
void decode(ceph::buffer::list::const_iterator &bl, int64_t pool = -1);
std::ostream &print(std::ostream &out) const;
void dump(ceph::Formatter *f) const;
};
WRITE_CLASS_ENCODER_FEATURES(ObjectRecoveryInfo)
std::ostream& operator<<(std::ostream& out, const ObjectRecoveryInfo &inf);
struct ObjectRecoveryProgress {
uint64_t data_recovered_to;
std::string omap_recovered_to;
bool first;
bool data_complete;
bool omap_complete;
bool error = false;
ObjectRecoveryProgress()
: data_recovered_to(0),
first(true),
data_complete(false), omap_complete(false) { }
bool is_complete(const ObjectRecoveryInfo& info) const {
return (data_recovered_to >= (
info.copy_subset.empty() ?
0 : info.copy_subset.range_end())) &&
omap_complete;
}
uint64_t estimate_remaining_data_to_recover(const ObjectRecoveryInfo& info) const {
// Overestimates in case of clones, but avoids traversing copy_subset
return info.size - data_recovered_to;
}
static void generate_test_instances(std::list<ObjectRecoveryProgress*>& o);
void encode(ceph::buffer::list &bl) const;
void decode(ceph::buffer::list::const_iterator &bl);
std::ostream &print(std::ostream &out) const;
void dump(ceph::Formatter *f) const;
};
WRITE_CLASS_ENCODER(ObjectRecoveryProgress)
std::ostream& operator<<(std::ostream& out, const ObjectRecoveryProgress &prog);
struct PushReplyOp {
hobject_t soid;
static void generate_test_instances(std::list<PushReplyOp*>& o);
void encode(ceph::buffer::list &bl) const;
void decode(ceph::buffer::list::const_iterator &bl);
std::ostream &print(std::ostream &out) const;
void dump(ceph::Formatter *f) const;
uint64_t cost(CephContext *cct) const;
};
WRITE_CLASS_ENCODER(PushReplyOp)
std::ostream& operator<<(std::ostream& out, const PushReplyOp &op);
struct PullOp {
hobject_t soid;
ObjectRecoveryInfo recovery_info;
ObjectRecoveryProgress recovery_progress;
static void generate_test_instances(std::list<PullOp*>& o);
void encode(ceph::buffer::list &bl, uint64_t features) const;
void decode(ceph::buffer::list::const_iterator &bl);
std::ostream &print(std::ostream &out) const;
void dump(ceph::Formatter *f) const;
uint64_t cost(CephContext *cct) const;
};
WRITE_CLASS_ENCODER_FEATURES(PullOp)
std::ostream& operator<<(std::ostream& out, const PullOp &op);
struct PushOp {
hobject_t soid;
eversion_t version;
ceph::buffer::list data;
interval_set<uint64_t> data_included;
ceph::buffer::list omap_header;
std::map<std::string, ceph::buffer::list> omap_entries;
std::map<std::string, ceph::buffer::list, std::less<>> attrset;
ObjectRecoveryInfo recovery_info;
ObjectRecoveryProgress before_progress;
ObjectRecoveryProgress after_progress;
static void generate_test_instances(std::list<PushOp*>& o);
void encode(ceph::buffer::list &bl, uint64_t features) const;
void decode(ceph::buffer::list::const_iterator &bl);
std::ostream &print(std::ostream &out) const;
void dump(ceph::Formatter *f) const;
uint64_t cost(CephContext *cct) const;
};
WRITE_CLASS_ENCODER_FEATURES(PushOp)
std::ostream& operator<<(std::ostream& out, const PushOp &op);
/*
* summarize pg contents for purposes of a scrub
*
* If members are added to ScrubMap, make sure to modify swap().
*/
struct ScrubMap {
struct object {
std::map<std::string, ceph::buffer::ptr, std::less<>> attrs;
uint64_t size;
__u32 omap_digest; ///< omap crc32c
__u32 digest; ///< data crc32c
bool negative:1;
bool digest_present:1;
bool omap_digest_present:1;
bool read_error:1;
bool stat_error:1;
bool ec_hash_mismatch:1;
bool ec_size_mismatch:1;
bool large_omap_object_found:1;
uint64_t large_omap_object_key_count = 0;
uint64_t large_omap_object_value_size = 0;
uint64_t object_omap_bytes = 0;
uint64_t object_omap_keys = 0;
object() :
// Init invalid size so it won't match if we get a stat EIO error
size(-1), omap_digest(0), digest(0),
negative(false), digest_present(false), omap_digest_present(false),
read_error(false), stat_error(false), ec_hash_mismatch(false),
ec_size_mismatch(false), large_omap_object_found(false) {}
void encode(ceph::buffer::list& bl) const;
void decode(ceph::buffer::list::const_iterator& bl);
void dump(ceph::Formatter *f) const;
static void generate_test_instances(std::list<object*>& o);
};
WRITE_CLASS_ENCODER(object)
std::map<hobject_t,object> objects;
eversion_t valid_through;
eversion_t incr_since;
bool has_large_omap_object_errors{false};
bool has_omap_keys{false};
void merge_incr(const ScrubMap &l);
void clear_from(const hobject_t& start) {
objects.erase(objects.lower_bound(start), objects.end());
}
void insert(const ScrubMap &r) {
objects.insert(r.objects.begin(), r.objects.end());
}
void swap(ScrubMap &r) {
using std::swap;
swap(objects, r.objects);
swap(valid_through, r.valid_through);
swap(incr_since, r.incr_since);
swap(has_large_omap_object_errors, r.has_large_omap_object_errors);
swap(has_omap_keys, r.has_omap_keys);
}
void encode(ceph::buffer::list& bl) const;
void decode(ceph::buffer::list::const_iterator& bl, int64_t pool=-1);
void dump(ceph::Formatter *f) const;
static void generate_test_instances(std::list<ScrubMap*>& o);
};
WRITE_CLASS_ENCODER(ScrubMap::object)
WRITE_CLASS_ENCODER(ScrubMap)
struct ScrubMapBuilder {
bool deep = false;
std::vector<hobject_t> ls;
size_t pos = 0;
int64_t data_pos = 0;
std::string omap_pos;
int ret = 0;
ceph::buffer::hash data_hash, omap_hash; ///< accumulatinng hash value
uint64_t omap_keys = 0;
uint64_t omap_bytes = 0;
bool empty() {
return ls.empty();
}
bool done() {
return pos >= ls.size();
}
void reset() {
*this = ScrubMapBuilder();
}
bool data_done() {
return data_pos < 0;
}
void next_object() {
++pos;
data_pos = 0;
omap_pos.clear();
omap_keys = 0;
omap_bytes = 0;
}
friend std::ostream& operator<<(std::ostream& out, const ScrubMapBuilder& pos) {
out << "(" << pos.pos << "/" << pos.ls.size();
if (pos.pos < pos.ls.size()) {
out << " " << pos.ls[pos.pos];
}
if (pos.data_pos < 0) {
out << " byte " << pos.data_pos;
}
if (!pos.omap_pos.empty()) {
out << " key " << pos.omap_pos;
}
if (pos.deep) {
out << " deep";
}
if (pos.ret) {
out << " ret " << pos.ret;
}
return out << ")";
}
};
struct watch_item_t {
entity_name_t name;
uint64_t cookie;
uint32_t timeout_seconds;
entity_addr_t addr;
watch_item_t() : cookie(0), timeout_seconds(0) { }
watch_item_t(entity_name_t name, uint64_t cookie, uint32_t timeout,
const entity_addr_t& addr)
: name(name), cookie(cookie), timeout_seconds(timeout),
addr(addr) { }
void encode(ceph::buffer::list &bl, uint64_t features) const {
ENCODE_START(2, 1, bl);
encode(name, bl);
encode(cookie, bl);
encode(timeout_seconds, bl);
encode(addr, bl, features);
ENCODE_FINISH(bl);
}
void decode(ceph::buffer::list::const_iterator &bl) {
DECODE_START(2, bl);
decode(name, bl);
decode(cookie, bl);
decode(timeout_seconds, bl);
if (struct_v >= 2) {
decode(addr, bl);
}
DECODE_FINISH(bl);
}
void dump(ceph::Formatter *f) const {
f->dump_stream("watcher") << name;
f->dump_int("cookie", cookie);
f->dump_int("timeout", timeout_seconds);
f->open_object_section("addr");
addr.dump(f);
f->close_section();
}
static void generate_test_instances(std::list<watch_item_t*>& o) {
entity_addr_t ea;
ea.set_type(entity_addr_t::TYPE_LEGACY);
ea.set_nonce(1000);
ea.set_family(AF_INET);
ea.set_in4_quad(0, 127);
ea.set_in4_quad(1, 0);
ea.set_in4_quad(2, 0);
ea.set_in4_quad(3, 1);
ea.set_port(1024);
o.push_back(new watch_item_t(entity_name_t(entity_name_t::TYPE_CLIENT, 1), 10, 30, ea));
ea.set_nonce(1001);
ea.set_in4_quad(3, 2);
ea.set_port(1025);
o.push_back(new watch_item_t(entity_name_t(entity_name_t::TYPE_CLIENT, 2), 20, 60, ea));
}
};
WRITE_CLASS_ENCODER_FEATURES(watch_item_t)
struct obj_watch_item_t {
hobject_t obj;
watch_item_t wi;
};
/**
* obj list watch response format
*
*/
struct obj_list_watch_response_t {
std::list<watch_item_t> entries;
void encode(ceph::buffer::list& bl, uint64_t features) const {
ENCODE_START(1, 1, bl);
encode(entries, bl, features);
ENCODE_FINISH(bl);
}
void decode(ceph::buffer::list::const_iterator& bl) {
DECODE_START(1, bl);
decode(entries, bl);
DECODE_FINISH(bl);
}
void dump(ceph::Formatter *f) const {
f->open_array_section("entries");
for (std::list<watch_item_t>::const_iterator p = entries.begin(); p != entries.end(); ++p) {
f->open_object_section("watch");
p->dump(f);
f->close_section();
}
f->close_section();
}
static void generate_test_instances(std::list<obj_list_watch_response_t*>& o) {
entity_addr_t ea;
o.push_back(new obj_list_watch_response_t);
o.push_back(new obj_list_watch_response_t);
std::list<watch_item_t*> test_watchers;
watch_item_t::generate_test_instances(test_watchers);
for (auto &e : test_watchers) {
o.back()->entries.push_back(*e);
delete e;
}
}
};
WRITE_CLASS_ENCODER_FEATURES(obj_list_watch_response_t)
struct clone_info {
snapid_t cloneid;
std::vector<snapid_t> snaps; // ascending
std::vector< std::pair<uint64_t,uint64_t> > overlap;
uint64_t size;
clone_info() : cloneid(CEPH_NOSNAP), size(0) {}
void encode(ceph::buffer::list& bl) const {
ENCODE_START(1, 1, bl);
encode(cloneid, bl);
encode(snaps, bl);
encode(overlap, bl);
encode(size, bl);
ENCODE_FINISH(bl);
}
void decode(ceph::buffer::list::const_iterator& bl) {
DECODE_START(1, bl);
decode(cloneid, bl);
decode(snaps, bl);
decode(overlap, bl);
decode(size, bl);
DECODE_FINISH(bl);
}
void dump(ceph::Formatter *f) const {
if (cloneid == CEPH_NOSNAP)
f->dump_string("cloneid", "HEAD");
else
f->dump_unsigned("cloneid", cloneid.val);
f->open_array_section("snapshots");
for (std::vector<snapid_t>::const_iterator p = snaps.begin(); p != snaps.end(); ++p) {
f->open_object_section("snap");
f->dump_unsigned("id", p->val);
f->close_section();
}
f->close_section();
f->open_array_section("overlaps");
for (std::vector< std::pair<uint64_t,uint64_t> >::const_iterator q = overlap.begin();
q != overlap.end(); ++q) {
f->open_object_section("overlap");
f->dump_unsigned("offset", q->first);
f->dump_unsigned("length", q->second);
f->close_section();
}
f->close_section();
f->dump_unsigned("size", size);
}
static void generate_test_instances(std::list<clone_info*>& o) {
o.push_back(new clone_info);
o.push_back(new clone_info);
o.back()->cloneid = 1;
o.back()->snaps.push_back(1);
o.back()->overlap.push_back(std::pair<uint64_t,uint64_t>(0,4096));
o.back()->overlap.push_back(std::pair<uint64_t,uint64_t>(8192,4096));
o.back()->size = 16384;
o.push_back(new clone_info);
o.back()->cloneid = CEPH_NOSNAP;
o.back()->size = 32768;
}
};
WRITE_CLASS_ENCODER(clone_info)
/**
* obj list snaps response format
*
*/
struct obj_list_snap_response_t {
std::vector<clone_info> clones; // ascending
snapid_t seq;
void encode(ceph::buffer::list& bl) const {
ENCODE_START(2, 1, bl);
encode(clones, bl);
encode(seq, bl);
ENCODE_FINISH(bl);
}
void decode(ceph::buffer::list::const_iterator& bl) {
DECODE_START(2, bl);
decode(clones, bl);
if (struct_v >= 2)
decode(seq, bl);
else
seq = CEPH_NOSNAP;
DECODE_FINISH(bl);
}
void dump(ceph::Formatter *f) const {
f->open_array_section("clones");
for (std::vector<clone_info>::const_iterator p = clones.begin(); p != clones.end(); ++p) {
f->open_object_section("clone");
p->dump(f);
f->close_section();
}
f->dump_unsigned("seq", seq);
f->close_section();
}
static void generate_test_instances(std::list<obj_list_snap_response_t*>& o) {
o.push_back(new obj_list_snap_response_t);
o.push_back(new obj_list_snap_response_t);
clone_info cl;
cl.cloneid = 1;
cl.snaps.push_back(1);
cl.overlap.push_back(std::pair<uint64_t,uint64_t>(0,4096));
cl.overlap.push_back(std::pair<uint64_t,uint64_t>(8192,4096));
cl.size = 16384;
o.back()->clones.push_back(cl);
cl.cloneid = CEPH_NOSNAP;
cl.snaps.clear();
cl.overlap.clear();
cl.size = 32768;
o.back()->clones.push_back(cl);
o.back()->seq = 123;
}
};
WRITE_CLASS_ENCODER(obj_list_snap_response_t)
// PromoteCounter
struct PromoteCounter {
std::atomic<unsigned long long> attempts{0};
std::atomic<unsigned long long> objects{0};
std::atomic<unsigned long long> bytes{0};
void attempt() {
attempts++;
}
void finish(uint64_t size) {
objects++;
bytes += size;
}
void sample_and_attenuate(uint64_t *a, uint64_t *o, uint64_t *b) {
*a = attempts;
*o = objects;
*b = bytes;
attempts = *a / 2;
objects = *o / 2;
bytes = *b / 2;
}
};
struct pool_pg_num_history_t {
/// last epoch updated
epoch_t epoch = 0;
/// poolid -> epoch -> pg_num
std::map<int64_t, std::map<epoch_t,uint32_t>> pg_nums;
/// pair(epoch, poolid)
std::set<std::pair<epoch_t,int64_t>> deleted_pools;
void log_pg_num_change(epoch_t epoch, int64_t pool, uint32_t pg_num) {
pg_nums[pool][epoch] = pg_num;
}
void log_pool_delete(epoch_t epoch, int64_t pool) {
deleted_pools.insert(std::make_pair(epoch, pool));
}
/// prune history based on oldest osdmap epoch in the cluster
void prune(epoch_t oldest_epoch) {
auto i = deleted_pools.begin();
while (i != deleted_pools.end()) {
if (i->first >= oldest_epoch) {
break;
}
pg_nums.erase(i->second);
i = deleted_pools.erase(i);
}
for (auto& j : pg_nums) {
auto k = j.second.lower_bound(oldest_epoch);
// keep this and the entry before it (just to be paranoid)
if (k != j.second.begin()) {
--k;
j.second.erase(j.second.begin(), k);
}
}
}
void encode(ceph::buffer::list& bl) const {
ENCODE_START(1, 1, bl);
encode(epoch, bl);
encode(pg_nums, bl);
encode(deleted_pools, bl);
ENCODE_FINISH(bl);
}
void decode(ceph::buffer::list::const_iterator& p) {
DECODE_START(1, p);
decode(epoch, p);
decode(pg_nums, p);
decode(deleted_pools, p);
DECODE_FINISH(p);
}
void dump(ceph::Formatter *f) const {
f->dump_unsigned("epoch", epoch);
f->open_object_section("pools");
for (auto& i : pg_nums) {
f->open_object_section("pool");
f->dump_unsigned("pool_id", i.first);
f->open_array_section("changes");
for (auto& j : i.second) {
f->open_object_section("change");
f->dump_unsigned("epoch", j.first);
f->dump_unsigned("pg_num", j.second);
f->close_section();
}
f->close_section();
f->close_section();
}
f->close_section();
f->open_array_section("deleted_pools");
for (auto& i : deleted_pools) {
f->open_object_section("deletion");
f->dump_unsigned("pool_id", i.second);
f->dump_unsigned("epoch", i.first);
f->close_section();
}
f->close_section();
}
static void generate_test_instances(std::list<pool_pg_num_history_t*>& ls) {
ls.push_back(new pool_pg_num_history_t);
}
friend std::ostream& operator<<(std::ostream& out, const pool_pg_num_history_t& h) {
return out << "pg_num_history(e" << h.epoch
<< " pg_nums " << h.pg_nums
<< " deleted_pools " << h.deleted_pools
<< ")";
}
};
WRITE_CLASS_ENCODER(pool_pg_num_history_t)
// prefix pgmeta_oid keys with _ so that PGLog::read_log_and_missing() can
// easily skip them
static const std::string_view infover_key = "_infover";
static const std::string_view info_key = "_info";
static const std::string_view biginfo_key = "_biginfo";
static const std::string_view epoch_key = "_epoch";
static const std::string_view fastinfo_key = "_fastinfo";
static const __u8 pg_latest_struct_v = 10;
// v10 is the new past_intervals encoding
// v9 was fastinfo_key addition
// v8 was the move to a per-pg pgmeta object
// v7 was SnapMapper addition in 86658392516d5175b2756659ef7ffaaf95b0f8ad
// (first appeared in cuttlefish).
static const __u8 pg_compat_struct_v = 10;
int prepare_info_keymap(
CephContext* cct,
std::map<std::string,ceph::buffer::list> *km,
std::string *key_to_remove,
epoch_t epoch,
pg_info_t &info,
pg_info_t &last_written_info,
PastIntervals &past_intervals,
bool dirty_big_info,
bool dirty_epoch,
bool try_fast_info,
PerfCounters *logger = nullptr,
DoutPrefixProvider *dpp = nullptr);
namespace ceph::os {
class Transaction;
};
void create_pg_collection(
ceph::os::Transaction& t, spg_t pgid, int bits);
void init_pg_ondisk(
ceph::os::Transaction& t, spg_t pgid, const pg_pool_t *pool);
// filter for pg listings
class PGLSFilter {
CephContext* cct;
protected:
std::string xattr;
public:
PGLSFilter();
virtual ~PGLSFilter();
virtual bool filter(const hobject_t &obj,
const ceph::buffer::list& xattr_data) const = 0;
/**
* Arguments passed from the RADOS client. Implementations must
* handle any encoding errors, and return an appropriate error code,
* or 0 on valid input.
*/
virtual int init(ceph::buffer::list::const_iterator ¶ms) = 0;
/**
* xattr key, or empty string. If non-empty, this xattr will be fetched
* and the value passed into ::filter
*/
virtual const std::string& get_xattr() const { return xattr; }
/**
* If true, objects without the named xattr (if xattr name is not empty)
* will be rejected without calling ::filter
*/
virtual bool reject_empty_xattr() const { return true; }
};
class PGLSPlainFilter : public PGLSFilter {
std::string val;
public:
int init(ceph::buffer::list::const_iterator ¶ms) override;
~PGLSPlainFilter() override {}
bool filter(const hobject_t& obj,
const ceph::buffer::list& xattr_data) const override;
};
// alias name for this structure:
using missing_map_t = std::map<hobject_t,
std::pair<std::optional<uint32_t>,
std::optional<uint32_t>>>;
#endif
| 208,516 | 30.398434 | 150 | h |
null | ceph-main/src/osd/osd_types_fmt.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
/**
* \file fmtlib formatters for some types.h classes
*/
#include "common/hobject_fmt.h"
#include "osd/osd_types.h"
#include <fmt/chrono.h>
#if FMT_VERSION >= 90000
#include <fmt/ostream.h>
#endif
template <>
struct fmt::formatter<osd_reqid_t> {
constexpr auto parse(format_parse_context& ctx) { return ctx.begin(); }
template <typename FormatContext>
auto format(const osd_reqid_t& req_id, FormatContext& ctx) const
{
return fmt::format_to(ctx.out(), "{}.{}:{}", req_id.name, req_id.inc,
req_id.tid);
}
};
template <>
struct fmt::formatter<pg_shard_t> {
constexpr auto parse(format_parse_context& ctx) { return ctx.begin(); }
template <typename FormatContext>
auto format(const pg_shard_t& shrd, FormatContext& ctx) const
{
if (shrd.is_undefined()) {
return fmt::format_to(ctx.out(), "?");
}
if (shrd.shard == shard_id_t::NO_SHARD) {
return fmt::format_to(ctx.out(), "{}", shrd.get_osd());
}
return fmt::format_to(ctx.out(), "{}({})", shrd.get_osd(), shrd.shard);
}
};
template <>
struct fmt::formatter<eversion_t> {
constexpr auto parse(format_parse_context& ctx) { return ctx.begin(); }
template <typename FormatContext>
auto format(const eversion_t& ev, FormatContext& ctx) const
{
return fmt::format_to(ctx.out(), "{}'{}", ev.epoch, ev.version);
}
};
template <>
struct fmt::formatter<chunk_info_t> {
constexpr auto parse(format_parse_context& ctx) { return ctx.begin(); }
template <typename FormatContext>
auto format(const chunk_info_t& ci, FormatContext& ctx) const
{
return fmt::format_to(ctx.out(), "(len: {} oid: {} offset: {} flags: {})",
ci.length, ci.oid, ci.offset,
ci.get_flag_string(ci.flags));
}
};
template <>
struct fmt::formatter<object_manifest_t> {
constexpr auto parse(format_parse_context& ctx) { return ctx.begin(); }
template <typename FormatContext>
auto format(const object_manifest_t& om, FormatContext& ctx) const
{
fmt::format_to(ctx.out(), "manifest({}", om.get_type_name());
if (om.is_redirect()) {
fmt::format_to(ctx.out(), " {}", om.redirect_target);
} else if (om.is_chunked()) {
fmt::format_to(ctx.out(), " {}", om.chunk_map);
}
return fmt::format_to(ctx.out(), ")");
}
};
template <>
struct fmt::formatter<object_info_t> {
constexpr auto parse(format_parse_context& ctx) { return ctx.begin(); }
template <typename FormatContext>
auto format(const object_info_t& oi, FormatContext& ctx) const
{
fmt::format_to(ctx.out(), "{}({} {} {} s {} uv {}", oi.soid, oi.version,
oi.last_reqid, (oi.flags ? oi.get_flag_string() : ""), oi.size,
oi.user_version);
if (oi.is_data_digest()) {
fmt::format_to(ctx.out(), " dd {:x}", oi.data_digest);
}
if (oi.is_omap_digest()) {
fmt::format_to(ctx.out(), " od {:x}", oi.omap_digest);
}
fmt::format_to(ctx.out(), " alloc_hint [{} {} {}]", oi.expected_object_size,
oi.expected_write_size, oi.alloc_hint_flags);
if (oi.has_manifest()) {
fmt::format_to(ctx.out(), " {}", oi.manifest);
}
return fmt::format_to(ctx.out(), ")");
}
};
template <>
struct fmt::formatter<pg_t> {
constexpr auto parse(format_parse_context& ctx) { return ctx.begin(); }
template <typename FormatContext>
auto format(const pg_t& pg, FormatContext& ctx) const
{
return fmt::format_to(ctx.out(), "{}.{:x}", pg.pool(), pg.m_seed);
}
};
template <>
struct fmt::formatter<spg_t> {
constexpr auto parse(format_parse_context& ctx) { return ctx.begin(); }
template <typename FormatContext>
auto format(const spg_t& spg, FormatContext& ctx) const
{
if (shard_id_t::NO_SHARD == spg.shard.id) {
return fmt::format_to(ctx.out(), "{}", spg.pgid);
} else {
return fmt::format_to(ctx.out(), "{}s{}>", spg.pgid, spg.shard.id);
}
}
};
template <>
struct fmt::formatter<pg_history_t> {
constexpr auto parse(format_parse_context& ctx) { return ctx.begin(); }
template <typename FormatContext>
auto format(const pg_history_t& pgh, FormatContext& ctx) const
{
fmt::format_to(ctx.out(),
"ec={}/{} lis/c={}/{} les/c/f={}/{}/{} sis={}",
pgh.epoch_created,
pgh.epoch_pool_created,
pgh.last_interval_started,
pgh.last_interval_clean,
pgh.last_epoch_started,
pgh.last_epoch_clean,
pgh.last_epoch_marked_full,
pgh.same_interval_since);
if (pgh.prior_readable_until_ub != ceph::timespan::zero()) {
return fmt::format_to(ctx.out(),
" pruub={}",
pgh.prior_readable_until_ub);
} else {
return ctx.out();
}
}
};
template <>
struct fmt::formatter<pg_info_t> {
constexpr auto parse(format_parse_context& ctx) { return ctx.begin(); }
template <typename FormatContext>
auto format(const pg_info_t& pgi, FormatContext& ctx) const
{
fmt::format_to(ctx.out(), "{}({}", pgi.pgid, (pgi.dne() ? " DNE" : ""));
if (pgi.is_empty()) {
fmt::format_to(ctx.out(), " empty");
} else {
fmt::format_to(ctx.out(), " v {}", pgi.last_update);
if (pgi.last_complete != pgi.last_update) {
fmt::format_to(ctx.out(), " lc {}", pgi.last_complete);
}
fmt::format_to(ctx.out(), " ({},{}]", pgi.log_tail, pgi.last_update);
}
if (pgi.is_incomplete()) {
fmt::format_to(ctx.out(), " lb {}", pgi.last_backfill);
}
fmt::format_to(ctx.out(),
" local-lis/les={}/{}",
pgi.last_interval_started,
pgi.last_epoch_started);
return fmt::format_to(ctx.out(),
" n={} {})",
pgi.stats.stats.sum.num_objects,
pgi.history);
}
};
// snaps and snap-sets
template <>
struct fmt::formatter<SnapSet> {
template <typename ParseContext>
constexpr auto parse(ParseContext& ctx)
{
auto it = ctx.begin();
if (it != ctx.end() && *it == 'D') {
verbose = true;
++it;
}
return it;
}
template <typename FormatContext>
auto format(const SnapSet& snps, FormatContext& ctx) const
{
if (verbose) {
// similar to SnapSet::dump()
fmt::format_to(ctx.out(),
"snaps{{{}: clns ({}): ",
snps.seq,
snps.clones.size());
for (auto cln : snps.clones) {
fmt::format_to(ctx.out(), "[{}: sz:", cln);
auto cs = snps.clone_size.find(cln);
if (cs != snps.clone_size.end()) {
fmt::format_to(ctx.out(), "{} ", cs->second);
} else {
fmt::format_to(ctx.out(), "??");
}
auto co = snps.clone_overlap.find(cln);
if (co != snps.clone_overlap.end()) {
fmt::format_to(ctx.out(), "olp:{} ", co->second);
} else {
fmt::format_to(ctx.out(), "olp:?? ");
}
auto cln_snps = snps.clone_snaps.find(cln);
if (cln_snps != snps.clone_snaps.end()) {
fmt::format_to(ctx.out(), "cl-snps:{} ]", cln_snps->second);
} else {
fmt::format_to(ctx.out(), "cl-snps:?? ]");
}
}
return fmt::format_to(ctx.out(), "}}");
} else {
return fmt::format_to(ctx.out(),
"{}={}:{}",
snps.seq,
snps.snaps,
snps.clone_snaps);
}
}
bool verbose{false};
};
template <>
struct fmt::formatter<ScrubMap::object> {
constexpr auto parse(format_parse_context& ctx) { return ctx.begin(); }
///\todo: consider passing the 'D" flag to control snapset dump
template <typename FormatContext>
auto format(const ScrubMap::object& so, FormatContext& ctx) const
{
fmt::format_to(ctx.out(),
"so{{ sz:{} dd:{} od:{} ",
so.size,
so.digest,
so.digest_present);
// note the special handling of (1) OI_ATTR and (2) non-printables
for (auto [k, v] : so.attrs) {
std::string bkstr{v.raw_c_str(), v.raw_length()};
if (k == std::string{OI_ATTR}) {
/// \todo consider parsing the OI args here. Maybe add a specific format
/// specifier
fmt::format_to(ctx.out(), "{{{}:<<OI_ATTR>>({})}} ", k, bkstr.length());
} else if (k == std::string{SS_ATTR}) {
bufferlist bl;
bl.push_back(v);
SnapSet sns{bl};
fmt::format_to(ctx.out(), "{{{}:{:D}}} ", k, sns);
} else {
fmt::format_to(ctx.out(), "{{{}:{}({})}} ", k, bkstr, bkstr.length());
}
}
return fmt::format_to(ctx.out(), "}}");
}
};
template <>
struct fmt::formatter<ScrubMap> {
template <typename ParseContext>
constexpr auto parse(ParseContext& ctx)
{
auto it = ctx.begin();
if (it != ctx.end() && *it == 'D') {
debug_log = true; // list the objects
++it;
}
return it;
}
template <typename FormatContext>
auto format(const ScrubMap& smap, FormatContext& ctx) const
{
fmt::format_to(ctx.out(),
"smap{{ valid:{} incr-since:{} #:{}",
smap.valid_through,
smap.incr_since,
smap.objects.size());
if (debug_log) {
fmt::format_to(ctx.out(), " objects:");
for (const auto& [ho, so] : smap.objects) {
fmt::format_to(ctx.out(), "\n\th.o<{}>:<{}> ", ho, so);
}
fmt::format_to(ctx.out(), "\n");
}
return fmt::format_to(ctx.out(), "}}");
}
bool debug_log{false};
};
#if FMT_VERSION >= 90000
template <> struct fmt::formatter<ObjectRecoveryInfo> : fmt::ostream_formatter {};
template <> struct fmt::formatter<ObjectRecoveryProgress> : fmt::ostream_formatter {};
template <> struct fmt::formatter<PastIntervals> : fmt::ostream_formatter {};
template <> struct fmt::formatter<pg_log_op_return_item_t> : fmt::ostream_formatter {};
template <> struct fmt::formatter<watch_info_t> : fmt::ostream_formatter {};
template <> struct fmt::formatter<pg_log_entry_t> : fmt::ostream_formatter {};
template <bool TrackChanges> struct fmt::formatter<pg_missing_set<TrackChanges>> : fmt::ostream_formatter {};
#endif
| 9,683 | 27.482353 | 109 | h |
null | ceph-main/src/osd/recovery_types.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "recovery_types.h"
std::ostream& operator<<(std::ostream& out, const BackfillInterval& bi)
{
out << "BackfillInfo(" << bi.begin << "-" << bi.end
<< " " << bi.objects.size() << " objects";
if (!bi.objects.empty())
out << " " << bi.objects;
out << ")";
return out;
}
| 394 | 22.235294 | 71 | cc |
null | ceph-main/src/osd/recovery_types.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <map>
#include "osd_types.h"
/**
* BackfillInterval
*
* Represents the objects in a range [begin, end)
*
* Possible states:
* 1) begin == end == hobject_t() indicates the the interval is unpopulated
* 2) Else, objects contains all objects in [begin, end)
*/
struct BackfillInterval {
// info about a backfill interval on a peer
eversion_t version; /// version at which the scan occurred
std::map<hobject_t,eversion_t> objects;
hobject_t begin;
hobject_t end;
/// clear content
void clear() {
*this = BackfillInterval();
}
/// clear objects std::list only
void clear_objects() {
objects.clear();
}
/// reinstantiate with a new start+end position and sort order
void reset(hobject_t start) {
clear();
begin = end = start;
}
/// true if there are no objects in this interval
bool empty() const {
return objects.empty();
}
/// true if interval extends to the end of the range
bool extends_to_end() const {
return end.is_max();
}
/// removes items <= soid and adjusts begin to the first object
void trim_to(const hobject_t &soid) {
trim();
while (!objects.empty() &&
objects.begin()->first <= soid) {
pop_front();
}
}
/// Adjusts begin to the first object
void trim() {
if (!objects.empty())
begin = objects.begin()->first;
else
begin = end;
}
/// drop first entry, and adjust @begin accordingly
void pop_front() {
ceph_assert(!objects.empty());
objects.erase(objects.begin());
trim();
}
/// dump
void dump(ceph::Formatter *f) const {
f->dump_stream("begin") << begin;
f->dump_stream("end") << end;
f->open_array_section("objects");
for (std::map<hobject_t, eversion_t>::const_iterator i =
objects.begin();
i != objects.end();
++i) {
f->open_object_section("object");
f->dump_stream("object") << i->first;
f->dump_stream("version") << i->second;
f->close_section();
}
f->close_section();
}
};
std::ostream &operator<<(std::ostream &out, const BackfillInterval &bi);
#if FMT_VERSION >= 90000
template <> struct fmt::formatter<BackfillInterval> : fmt::ostream_formatter {};
#endif
| 2,343 | 22.676768 | 80 | h |
null | ceph-main/src/osd/scrubber_common.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <fmt/ranges.h>
#include "common/scrub_types.h"
#include "include/types.h"
#include "os/ObjectStore.h"
#include "OpRequest.h"
namespace ceph {
class Formatter;
}
struct PGPool;
namespace Scrub {
class ReplicaReservations;
}
/// Facilitating scrub-realated object access to private PG data
class ScrubberPasskey {
private:
friend class Scrub::ReplicaReservations;
friend class PrimaryLogScrub;
friend class PgScrubber;
friend class ScrubBackend;
ScrubberPasskey() {}
ScrubberPasskey(const ScrubberPasskey&) = default;
ScrubberPasskey& operator=(const ScrubberPasskey&) = delete;
};
namespace Scrub {
/// high/low OP priority
enum class scrub_prio_t : bool { low_priority = false, high_priority = true };
/// Identifies a specific scrub activation within an interval,
/// see ScrubPGgIF::m_current_token
using act_token_t = uint32_t;
/// "environment" preconditions affecting which PGs are eligible for scrubbing
struct ScrubPreconds {
bool allow_requested_repair_only{false};
bool load_is_low{true};
bool time_permit{true};
bool only_deadlined{false};
};
/// PG services used by the scrubber backend
struct PgScrubBeListener {
virtual ~PgScrubBeListener() = default;
virtual const PGPool& get_pgpool() const = 0;
virtual pg_shard_t get_primary() const = 0;
virtual void force_object_missing(ScrubberPasskey,
const std::set<pg_shard_t>& peer,
const hobject_t& oid,
eversion_t version) = 0;
virtual const pg_info_t& get_pg_info(ScrubberPasskey) const = 0;
// query the PG backend for the on-disk size of an object
virtual uint64_t logical_to_ondisk_size(uint64_t logical_size) const = 0;
// used to verify our "cleaness" before scrubbing
virtual bool is_waiting_for_unreadable_object() const = 0;
};
} // namespace Scrub
/**
* Flags affecting the scheduling and behaviour of the *next* scrub.
*
* we hold two of these flag collections: one
* for the next scrub, and one frozen at initiation (i.e. in pg::queue_scrub())
*/
struct requested_scrub_t {
// flags to indicate explicitly requested scrubs (by admin):
// bool must_scrub, must_deep_scrub, must_repair, need_auto;
/**
* 'must_scrub' is set by an admin command (or by need_auto).
* Affects the priority of the scrubbing, and the sleep periods
* during the scrub.
*/
bool must_scrub{false};
/**
* scrub must not be aborted.
* Set for explicitly requested scrubs, and for scrubs originated by the
* pairing process with the 'repair' flag set (in the RequestScrub event).
*
* Will be copied into the 'required' scrub flag upon scrub start.
*/
bool req_scrub{false};
/**
* Set from:
* - scrub_requested() with need_auto param set, which only happens in
* - scrub_finish() - if deep_scrub_on_error is set, and we have errors
*
* If set, will prevent the OSD from casually postponing our scrub. When
* scrubbing starts, will cause must_scrub, must_deep_scrub and auto_repair to
* be set.
*/
bool need_auto{false};
/**
* Set for scrub-after-recovery just before we initiate the recovery deep
* scrub, or if scrub_requested() was called with either need_auto ot repair.
* Affects PG_STATE_DEEP_SCRUB.
*/
bool must_deep_scrub{false};
/**
* (An intermediary flag used by pg::sched_scrub() on the first time
* a planned scrub has all its resources). Determines whether the next
* repair/scrub will be 'deep'.
*
* Note: 'dumped' by PgScrubber::dump() and such. In reality, being a
* temporary that is set and reset by the same operation, will never
* appear externally to be set
*/
bool time_for_deep{false};
bool deep_scrub_on_error{false};
/**
* If set, we should see must_deep_scrub & must_scrub, too
*
* - 'must_repair' is checked by the OSD when scheduling the scrubs.
* - also checked & cleared at pg::queue_scrub()
*/
bool must_repair{false};
/*
* the value of auto_repair is determined in sched_scrub() (once per scrub.
* previous value is not remembered). Set if
* - allowed by configuration and backend, and
* - must_scrub is not set (i.e. - this is a periodic scrub),
* - time_for_deep was just set
*/
bool auto_repair{false};
/**
* indicating that we are scrubbing post repair to verify everything is fixed.
* Otherwise - PG_STATE_FAILED_REPAIR will be asserted.
*/
bool check_repair{false};
/**
* Used to indicate, both in client-facing listings and internally, that
* the planned scrub will be a deep one.
*/
bool calculated_to_deep{false};
};
std::ostream& operator<<(std::ostream& out, const requested_scrub_t& sf);
template <>
struct fmt::formatter<requested_scrub_t> {
constexpr auto parse(format_parse_context& ctx) { return ctx.begin(); }
template <typename FormatContext>
auto format(const requested_scrub_t& rs, FormatContext& ctx)
{
return fmt::format_to(ctx.out(),
"(plnd:{}{}{}{}{}{}{}{}{}{})",
rs.must_repair ? " must_repair" : "",
rs.auto_repair ? " auto_repair" : "",
rs.check_repair ? " check_repair" : "",
rs.deep_scrub_on_error ? " deep_scrub_on_error" : "",
rs.must_deep_scrub ? " must_deep_scrub" : "",
rs.must_scrub ? " must_scrub" : "",
rs.time_for_deep ? " time_for_deep" : "",
rs.need_auto ? " need_auto" : "",
rs.req_scrub ? " req_scrub" : "",
rs.calculated_to_deep ? " deep" : "");
}
};
/**
* The interface used by the PG when requesting scrub-related info or services
*/
struct ScrubPgIF {
virtual ~ScrubPgIF() = default;
friend std::ostream& operator<<(std::ostream& out, const ScrubPgIF& s)
{
return s.show(out);
}
virtual std::ostream& show(std::ostream& out) const = 0;
// --------------- triggering state-machine events:
virtual void initiate_regular_scrub(epoch_t epoch_queued) = 0;
virtual void initiate_scrub_after_repair(epoch_t epoch_queued) = 0;
virtual void send_scrub_resched(epoch_t epoch_queued) = 0;
virtual void active_pushes_notification(epoch_t epoch_queued) = 0;
virtual void update_applied_notification(epoch_t epoch_queued) = 0;
virtual void digest_update_notification(epoch_t epoch_queued) = 0;
virtual void send_scrub_unblock(epoch_t epoch_queued) = 0;
virtual void send_replica_maps_ready(epoch_t epoch_queued) = 0;
virtual void send_replica_pushes_upd(epoch_t epoch_queued) = 0;
virtual void send_start_replica(epoch_t epoch_queued,
Scrub::act_token_t token) = 0;
virtual void send_sched_replica(epoch_t epoch_queued,
Scrub::act_token_t token) = 0;
virtual void send_chunk_free(epoch_t epoch_queued) = 0;
virtual void send_chunk_busy(epoch_t epoch_queued) = 0;
virtual void send_local_map_done(epoch_t epoch_queued) = 0;
virtual void send_get_next_chunk(epoch_t epoch_queued) = 0;
virtual void send_scrub_is_finished(epoch_t epoch_queued) = 0;
virtual void on_applied_when_primary(const eversion_t& applied_version) = 0;
// --------------------------------------------------
[[nodiscard]] virtual bool are_callbacks_pending() const = 0; // currently
// only used
// for an
// assert
/**
* the scrubber is marked 'active':
* - for the primary: when all replica OSDs grant us the requested resources
* - for replicas: upon receiving the scrub request from the primary
*/
[[nodiscard]] virtual bool is_scrub_active() const = 0;
/**
* 'true' until after the FSM processes the 'scrub-finished' event,
* and scrubbing is completely cleaned-up.
*
* In other words - holds longer than is_scrub_active(), thus preventing
* a rescrubbing of the same PG while the previous scrub has not fully
* terminated.
*/
[[nodiscard]] virtual bool is_queued_or_active() const = 0;
/**
* Manipulate the 'scrubbing request has been queued, or - we are
* actually scrubbing' Scrubber's flag
*
* clear_queued_or_active() will also restart any blocked snaptrimming.
*/
virtual void set_queued_or_active() = 0;
virtual void clear_queued_or_active() = 0;
/// are we waiting for resource reservation grants form our replicas?
[[nodiscard]] virtual bool is_reserving() const = 0;
/// handle a message carrying a replica map
virtual void map_from_replica(OpRequestRef op) = 0;
virtual void replica_scrub_op(OpRequestRef op) = 0;
virtual void set_op_parameters(const requested_scrub_t&) = 0;
/// stop any active scrubbing (on interval end) and unregister from
/// the OSD scrub queue
virtual void on_new_interval() = 0;
virtual void scrub_clear_state() = 0;
virtual void handle_query_state(ceph::Formatter* f) = 0;
virtual pg_scrubbing_status_t get_schedule() const = 0;
virtual void dump_scrubber(ceph::Formatter* f,
const requested_scrub_t& request_flags) const = 0;
/**
* Return true if soid is currently being scrubbed and pending IOs should
* block. May have a side effect of preempting an in-progress scrub -- will
* return false in that case.
*
* @param soid object to check for ongoing scrub
* @return boolean whether a request on soid should block until scrub
* completion
*/
virtual bool write_blocked_by_scrub(const hobject_t& soid) = 0;
/// Returns whether any objects in the range [begin, end] are being scrubbed
virtual bool range_intersects_scrub(const hobject_t& start,
const hobject_t& end) = 0;
/// the op priority, taken from the primary's request message
virtual Scrub::scrub_prio_t replica_op_priority() const = 0;
/// the priority of the on-going scrub (used when requeuing events)
virtual unsigned int scrub_requeue_priority(
Scrub::scrub_prio_t with_priority) const = 0;
virtual unsigned int scrub_requeue_priority(
Scrub::scrub_prio_t with_priority,
unsigned int suggested_priority) const = 0;
virtual void add_callback(Context* context) = 0;
/// add to scrub statistics, but only if the soid is below the scrub start
virtual void stats_of_handled_objects(const object_stat_sum_t& delta_stats,
const hobject_t& soid) = 0;
/**
* the version of 'scrub_clear_state()' that does not try to invoke FSM
* services (thus can be called from FSM reactions)
*/
virtual void clear_pgscrub_state() = 0;
/**
* triggers the 'RemotesReserved' (all replicas granted scrub resources)
* state-machine event
*/
virtual void send_remotes_reserved(epoch_t epoch_queued) = 0;
/**
* triggers the 'ReservationFailure' (at least one replica denied us the
* requested resources) state-machine event
*/
virtual void send_reservation_failure(epoch_t epoch_queued) = 0;
virtual void cleanup_store(ObjectStore::Transaction* t) = 0;
virtual bool get_store_errors(const scrub_ls_arg_t& arg,
scrub_ls_result_t& res_inout) const = 0;
/**
* force a periodic 'publish_stats_to_osd()' call, to update scrub-related
* counters and statistics.
*/
virtual void update_scrub_stats(
ceph::coarse_real_clock::time_point now_is) = 0;
// --------------- reservations -----------------------------------
/**
* message all replicas with a request to "unreserve" scrub
*/
virtual void unreserve_replicas() = 0;
/**
* "forget" all replica reservations. No messages are sent to the
* previously-reserved.
*
* Used upon interval change. The replicas' state is guaranteed to
* be reset separately by the interval-change event.
*/
virtual void discard_replica_reservations() = 0;
/**
* clear both local and OSD-managed resource reservation flags
*/
virtual void clear_scrub_reservations() = 0;
/**
* Reserve local scrub resources (managed by the OSD)
*
* Fails if OSD's local-scrubs budget was exhausted
* \returns were local resources reserved?
*/
virtual bool reserve_local() = 0;
/**
* if activated as a Primary - register the scrub job with the OSD
* scrub queue
*/
virtual void on_pg_activate(const requested_scrub_t& request_flags) = 0;
/**
* Recalculate the required scrub time.
*
* This function assumes that the queue registration status is up-to-date,
* i.e. the OSD "knows our name" if-f we are the Primary.
*/
virtual void update_scrub_job(const requested_scrub_t& request_flags) = 0;
// on the replica:
virtual void handle_scrub_reserve_request(OpRequestRef op) = 0;
virtual void handle_scrub_reserve_release(OpRequestRef op) = 0;
// and on the primary:
virtual void handle_scrub_reserve_grant(OpRequestRef op, pg_shard_t from) = 0;
virtual void handle_scrub_reserve_reject(OpRequestRef op,
pg_shard_t from) = 0;
virtual void rm_from_osd_scrubbing() = 0;
virtual void scrub_requested(scrub_level_t scrub_level,
scrub_type_t scrub_type,
requested_scrub_t& req_flags) = 0;
// --------------- debugging via the asok ------------------------------
virtual int asok_debug(std::string_view cmd,
std::string param,
Formatter* f,
std::stringstream& ss) = 0;
};
| 13,461 | 31.052381 | 80 | h |
null | ceph-main/src/osd/scheduler/OpScheduler.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2019 Red Hat Inc.
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include <ostream>
#include "osd/scheduler/OpScheduler.h"
#include "common/WeightedPriorityQueue.h"
#include "osd/scheduler/mClockScheduler.h"
namespace ceph::osd::scheduler {
OpSchedulerRef make_scheduler(
CephContext *cct, int whoami, uint32_t num_shards, int shard_id,
bool is_rotational, std::string_view osd_objectstore, MonClient *monc)
{
const std::string *type = &cct->_conf->osd_op_queue;
if (*type == "debug_random") {
static const std::string index_lookup[] = { "mclock_scheduler",
"wpq" };
srand(time(NULL));
unsigned which = rand() % (sizeof(index_lookup) / sizeof(index_lookup[0]));
type = &index_lookup[which];
}
// Force the use of 'wpq' scheduler for filestore OSDs.
// The 'mclock_scheduler' is not supported for filestore OSDs.
if (*type == "wpq" || osd_objectstore == "filestore") {
return std::make_unique<
ClassedOpQueueScheduler<WeightedPriorityQueue<OpSchedulerItem, client>>>(
cct,
cct->_conf->osd_op_pq_max_tokens_per_priority,
cct->_conf->osd_op_pq_min_cost
);
} else if (*type == "mclock_scheduler") {
// default is 'mclock_scheduler'
return std::make_unique<
mClockScheduler>(cct, whoami, num_shards, shard_id, is_rotational, monc);
} else {
ceph_assert("Invalid choice of wq" == 0);
}
}
std::ostream &operator<<(std::ostream &lhs, const OpScheduler &rhs) {
rhs.print(lhs);
return lhs;
}
}
| 1,813 | 28.737705 | 79 | cc |
null | ceph-main/src/osd/scheduler/OpScheduler.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2019 Red Hat Inc.
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#pragma once
#include <ostream>
#include <variant>
#include "common/ceph_context.h"
#include "mon/MonClient.h"
#include "osd/scheduler/OpSchedulerItem.h"
namespace ceph::osd::scheduler {
using client = uint64_t;
using WorkItem = std::variant<std::monostate, OpSchedulerItem, double>;
/**
* Base interface for classes responsible for choosing
* op processing order in the OSD.
*/
class OpScheduler {
public:
// Enqueue op for scheduling
virtual void enqueue(OpSchedulerItem &&item) = 0;
// Enqueue op for processing as though it were enqueued prior
// to other items already scheduled.
virtual void enqueue_front(OpSchedulerItem &&item) = 0;
// Returns true iff there are no ops scheduled
virtual bool empty() const = 0;
// Return next op to be processed
virtual WorkItem dequeue() = 0;
// Dump formatted representation for the queue
virtual void dump(ceph::Formatter &f) const = 0;
// Print human readable brief description with relevant parameters
virtual void print(std::ostream &out) const = 0;
// Apply config changes to the scheduler (if any)
virtual void update_configuration() = 0;
// Destructor
virtual ~OpScheduler() {};
};
std::ostream &operator<<(std::ostream &lhs, const OpScheduler &);
using OpSchedulerRef = std::unique_ptr<OpScheduler>;
OpSchedulerRef make_scheduler(
CephContext *cct, int whoami, uint32_t num_shards, int shard_id,
bool is_rotational, std::string_view osd_objectstore, MonClient *monc);
/**
* Implements OpScheduler in terms of OpQueue
*
* Templated on queue type to avoid dynamic dispatch, T should implement
* OpQueue<OpSchedulerItem, client>. This adapter is mainly responsible for
* the boilerplate priority cutoff/strict concept which is needed for
* OpQueue based implementations.
*/
template <typename T>
class ClassedOpQueueScheduler final : public OpScheduler {
unsigned cutoff;
T queue;
static unsigned int get_io_prio_cut(CephContext *cct) {
if (cct->_conf->osd_op_queue_cut_off == "debug_random") {
srand(time(NULL));
return (rand() % 2 < 1) ? CEPH_MSG_PRIO_HIGH : CEPH_MSG_PRIO_LOW;
} else if (cct->_conf->osd_op_queue_cut_off == "high") {
return CEPH_MSG_PRIO_HIGH;
} else {
// default / catch-all is 'low'
return CEPH_MSG_PRIO_LOW;
}
}
public:
template <typename... Args>
ClassedOpQueueScheduler(CephContext *cct, Args&&... args) :
cutoff(get_io_prio_cut(cct)),
queue(std::forward<Args>(args)...)
{}
void enqueue(OpSchedulerItem &&item) final {
unsigned priority = item.get_priority();
unsigned cost = item.get_cost();
if (priority >= cutoff)
queue.enqueue_strict(
item.get_owner(), priority, std::move(item));
else
queue.enqueue(
item.get_owner(), priority, cost, std::move(item));
}
void enqueue_front(OpSchedulerItem &&item) final {
unsigned priority = item.get_priority();
unsigned cost = item.get_cost();
if (priority >= cutoff)
queue.enqueue_strict_front(
item.get_owner(),
priority, std::move(item));
else
queue.enqueue_front(
item.get_owner(),
priority, cost, std::move(item));
}
bool empty() const final {
return queue.empty();
}
WorkItem dequeue() final {
return queue.dequeue();
}
void dump(ceph::Formatter &f) const final {
return queue.dump(&f);
}
void print(std::ostream &out) const final {
out << "ClassedOpQueueScheduler(queue=";
queue.print(out);
out << ", cutoff=" << cutoff << ")";
}
void update_configuration() final {
// no-op
}
~ClassedOpQueueScheduler() final {};
};
}
| 4,007 | 25.72 | 76 | h |
null | ceph-main/src/osd/scheduler/OpSchedulerItem.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2016 Red Hat Inc.
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include "osd/scheduler/OpSchedulerItem.h"
#include "osd/OSD.h"
#include "osd/osd_tracer.h"
namespace ceph::osd::scheduler {
std::ostream& operator<<(std::ostream& out, const op_scheduler_class& class_id) {
out << static_cast<size_t>(class_id);
return out;
}
void PGOpItem::run(
OSD *osd,
OSDShard *sdata,
PGRef& pg,
ThreadPool::TPHandle &handle)
{
osd->dequeue_op(pg, op, handle);
pg->unlock();
}
void PGPeeringItem::run(
OSD *osd,
OSDShard *sdata,
PGRef& pg,
ThreadPool::TPHandle &handle)
{
osd->dequeue_peering_evt(sdata, pg.get(), evt, handle);
}
void PGSnapTrim::run(
OSD *osd,
OSDShard *sdata,
PGRef& pg,
ThreadPool::TPHandle &handle)
{
pg->snap_trimmer(epoch_queued);
pg->unlock();
}
void PGScrub::run(OSD* osd, OSDShard* sdata, PGRef& pg, ThreadPool::TPHandle& handle)
{
pg->scrub(epoch_queued, handle);
pg->unlock();
}
void PGScrubAfterRepair::run(OSD* osd,
OSDShard* sdata,
PGRef& pg,
ThreadPool::TPHandle& handle)
{
pg->recovery_scrub(epoch_queued, handle);
pg->unlock();
}
void PGScrubResched::run(OSD* osd,
OSDShard* sdata,
PGRef& pg,
ThreadPool::TPHandle& handle)
{
pg->scrub_send_scrub_resched(epoch_queued, handle);
pg->unlock();
}
void PGScrubResourcesOK::run(OSD* osd,
OSDShard* sdata,
PGRef& pg,
ThreadPool::TPHandle& handle)
{
pg->scrub_send_resources_granted(epoch_queued, handle);
pg->unlock();
}
void PGScrubDenied::run(OSD* osd,
OSDShard* sdata,
PGRef& pg,
ThreadPool::TPHandle& handle)
{
pg->scrub_send_resources_denied(epoch_queued, handle);
pg->unlock();
}
void PGScrubPushesUpdate::run(OSD* osd,
OSDShard* sdata,
PGRef& pg,
ThreadPool::TPHandle& handle)
{
pg->scrub_send_pushes_update(epoch_queued, handle);
pg->unlock();
}
void PGScrubAppliedUpdate::run(OSD* osd,
OSDShard* sdata,
PGRef& pg,
ThreadPool::TPHandle& handle)
{
pg->scrub_send_applied_update(epoch_queued, handle);
pg->unlock();
}
void PGScrubUnblocked::run(OSD* osd,
OSDShard* sdata,
PGRef& pg,
ThreadPool::TPHandle& handle)
{
pg->scrub_send_unblocking(epoch_queued, handle);
pg->unlock();
}
void PGScrubDigestUpdate::run(OSD* osd,
OSDShard* sdata,
PGRef& pg,
ThreadPool::TPHandle& handle)
{
pg->scrub_send_digest_update(epoch_queued, handle);
pg->unlock();
}
void PGScrubGotLocalMap::run(OSD* osd,
OSDShard* sdata,
PGRef& pg,
ThreadPool::TPHandle& handle)
{
pg->scrub_send_local_map_ready(epoch_queued, handle);
pg->unlock();
}
void PGScrubGotReplMaps::run(OSD* osd,
OSDShard* sdata,
PGRef& pg,
ThreadPool::TPHandle& handle)
{
pg->scrub_send_replmaps_ready(epoch_queued, handle);
pg->unlock();
}
void PGRepScrub::run(OSD* osd, OSDShard* sdata, PGRef& pg, ThreadPool::TPHandle& handle)
{
pg->replica_scrub(epoch_queued, activation_index, handle);
pg->unlock();
}
void PGRepScrubResched::run(OSD* osd,
OSDShard* sdata,
PGRef& pg,
ThreadPool::TPHandle& handle)
{
pg->replica_scrub_resched(epoch_queued, activation_index, handle);
pg->unlock();
}
void PGScrubReplicaPushes::run([[maybe_unused]] OSD* osd,
OSDShard* sdata,
PGRef& pg,
ThreadPool::TPHandle& handle)
{
pg->scrub_send_replica_pushes(epoch_queued, handle);
pg->unlock();
}
void PGScrubScrubFinished::run([[maybe_unused]] OSD* osd,
OSDShard* sdata,
PGRef& pg,
ThreadPool::TPHandle& handle)
{
pg->scrub_send_scrub_is_finished(epoch_queued, handle);
pg->unlock();
}
void PGScrubGetNextChunk::run([[maybe_unused]] OSD* osd,
OSDShard* sdata,
PGRef& pg,
ThreadPool::TPHandle& handle)
{
pg->scrub_send_get_next_chunk(epoch_queued, handle);
pg->unlock();
}
void PGScrubChunkIsBusy::run([[maybe_unused]] OSD* osd,
OSDShard* sdata,
PGRef& pg,
ThreadPool::TPHandle& handle)
{
pg->scrub_send_chunk_busy(epoch_queued, handle);
pg->unlock();
}
void PGScrubChunkIsFree::run([[maybe_unused]] OSD* osd,
OSDShard* sdata,
PGRef& pg,
ThreadPool::TPHandle& handle)
{
pg->scrub_send_chunk_free(epoch_queued, handle);
pg->unlock();
}
void PGRecovery::run(
OSD *osd,
OSDShard *sdata,
PGRef& pg,
ThreadPool::TPHandle &handle)
{
osd->logger->tinc(
l_osd_recovery_queue_lat,
time_queued - ceph_clock_now());
osd->do_recovery(pg.get(), epoch_queued, reserved_pushes, priority, handle);
pg->unlock();
}
void PGRecoveryContext::run(
OSD *osd,
OSDShard *sdata,
PGRef& pg,
ThreadPool::TPHandle &handle)
{
osd->logger->tinc(
l_osd_recovery_context_queue_lat,
time_queued - ceph_clock_now());
c.release()->complete(handle);
pg->unlock();
}
void PGDelete::run(
OSD *osd,
OSDShard *sdata,
PGRef& pg,
ThreadPool::TPHandle &handle)
{
osd->dequeue_delete(sdata, pg.get(), epoch_queued, handle);
}
void PGRecoveryMsg::run(
OSD *osd,
OSDShard *sdata,
PGRef& pg,
ThreadPool::TPHandle &handle)
{
auto latency = time_queued - ceph_clock_now();
switch (op->get_req()->get_type()) {
case MSG_OSD_PG_PUSH:
osd->logger->tinc(l_osd_recovery_push_queue_lat, latency);
case MSG_OSD_PG_PUSH_REPLY:
osd->logger->tinc(l_osd_recovery_push_reply_queue_lat, latency);
case MSG_OSD_PG_PULL:
osd->logger->tinc(l_osd_recovery_pull_queue_lat, latency);
case MSG_OSD_PG_BACKFILL:
osd->logger->tinc(l_osd_recovery_backfill_queue_lat, latency);
case MSG_OSD_PG_BACKFILL_REMOVE:
osd->logger->tinc(l_osd_recovery_backfill_remove_queue_lat, latency);
case MSG_OSD_PG_SCAN:
osd->logger->tinc(l_osd_recovery_scan_queue_lat, latency);
}
osd->dequeue_op(pg, op, handle);
pg->unlock();
}
}
| 6,191 | 21.681319 | 88 | cc |
null | ceph-main/src/osd/scheduler/OpSchedulerItem.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2016 Red Hat Inc.
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#pragma once
#include <ostream>
#include "include/types.h"
#include "include/utime_fmt.h"
#include "osd/osd_types_fmt.h"
#include "osd/OpRequest.h"
#include "osd/PG.h"
#include "osd/PGPeeringEvent.h"
#include "messages/MOSDOp.h"
class OSD;
struct OSDShard;
namespace ceph::osd::scheduler {
enum class op_scheduler_class : uint8_t {
background_recovery = 0,
background_best_effort,
immediate,
client,
};
std::ostream& operator<<(std::ostream& out, const op_scheduler_class& class_id);
class OpSchedulerItem {
public:
// Abstraction for operations queueable in the op queue
class OpQueueable {
public:
using Ref = std::unique_ptr<OpQueueable>;
/// Items with the same queue token will end up in the same shard
virtual uint32_t get_queue_token() const = 0;
/* Items will be dequeued and locked atomically w.r.t. other items with the
* same ordering token */
virtual const spg_t& get_ordering_token() const = 0;
virtual std::optional<OpRequestRef> maybe_get_op() const {
return std::nullopt;
}
virtual uint64_t get_reserved_pushes() const {
return 0;
}
virtual bool is_peering() const {
return false;
}
virtual bool peering_requires_pg() const {
ceph_abort();
}
virtual const PGCreateInfo *creates_pg() const {
return nullptr;
}
virtual std::ostream &print(std::ostream &rhs) const = 0;
/// and a version geared towards fmt::format use:
virtual std::string print() const = 0;
virtual void run(OSD *osd, OSDShard *sdata, PGRef& pg, ThreadPool::TPHandle &handle) = 0;
virtual op_scheduler_class get_scheduler_class() const = 0;
virtual ~OpQueueable() {}
friend std::ostream& operator<<(std::ostream& out, const OpQueueable& q) {
return q.print(out);
}
};
private:
OpQueueable::Ref qitem;
int cost;
unsigned priority;
utime_t start_time;
uint64_t owner; ///< global id (e.g., client.XXX)
epoch_t map_epoch; ///< an epoch we expect the PG to exist in
/**
* qos_cost
*
* Set by mClockScheduler iff queued into mclock proper and not the
* high/immediate queues. Represents mClockScheduler's adjusted
* cost value.
*/
uint32_t qos_cost = 0;
/// True iff queued via mclock proper, not the high/immediate queues
bool was_queued_via_mclock() const {
return qos_cost > 0;
}
public:
OpSchedulerItem(
OpQueueable::Ref &&item,
int cost,
unsigned priority,
utime_t start_time,
uint64_t owner,
epoch_t e)
: qitem(std::move(item)),
cost(cost),
priority(priority),
start_time(start_time),
owner(owner),
map_epoch(e) {}
OpSchedulerItem(OpSchedulerItem &&) = default;
OpSchedulerItem(const OpSchedulerItem &) = delete;
OpSchedulerItem &operator=(OpSchedulerItem &&) = default;
OpSchedulerItem &operator=(const OpSchedulerItem &) = delete;
friend struct fmt::formatter<OpSchedulerItem>;
uint32_t get_queue_token() const {
return qitem->get_queue_token();
}
const spg_t& get_ordering_token() const {
return qitem->get_ordering_token();
}
std::optional<OpRequestRef> maybe_get_op() const {
return qitem->maybe_get_op();
}
uint64_t get_reserved_pushes() const {
return qitem->get_reserved_pushes();
}
void run(OSD *osd, OSDShard *sdata,PGRef& pg, ThreadPool::TPHandle &handle) {
qitem->run(osd, sdata, pg, handle);
}
unsigned get_priority() const { return priority; }
int get_cost() const { return cost; }
utime_t get_start_time() const { return start_time; }
uint64_t get_owner() const { return owner; }
epoch_t get_map_epoch() const { return map_epoch; }
bool is_peering() const {
return qitem->is_peering();
}
const PGCreateInfo *creates_pg() const {
return qitem->creates_pg();
}
bool peering_requires_pg() const {
return qitem->peering_requires_pg();
}
op_scheduler_class get_scheduler_class() const {
return qitem->get_scheduler_class();
}
void set_qos_cost(uint32_t scaled_cost) {
qos_cost = scaled_cost;
}
friend std::ostream& operator<<(std::ostream& out, const OpSchedulerItem& item) {
out << "OpSchedulerItem("
<< item.get_ordering_token() << " " << *item.qitem;
out << " class_id " << item.get_scheduler_class();
out << " prio " << item.get_priority();
if (item.was_queued_via_mclock()) {
out << " qos_cost " << item.qos_cost;
}
out << " cost " << item.get_cost()
<< " e" << item.get_map_epoch();
if (item.get_reserved_pushes()) {
out << " reserved_pushes " << item.get_reserved_pushes();
}
return out << ")";
}
}; // class OpSchedulerItem
/// Implements boilerplate for operations queued for the pg lock
class PGOpQueueable : public OpSchedulerItem::OpQueueable {
spg_t pgid;
protected:
const spg_t& get_pgid() const {
return pgid;
}
static op_scheduler_class priority_to_scheduler_class(int priority) {
if (priority >= CEPH_MSG_PRIO_HIGH) {
return op_scheduler_class::immediate;
} else if (priority >= PeeringState::recovery_msg_priority_t::DEGRADED) {
return op_scheduler_class::background_recovery;
} else {
return op_scheduler_class::background_best_effort;
}
}
public:
explicit PGOpQueueable(spg_t pg) : pgid(pg) {}
uint32_t get_queue_token() const final {
return get_pgid().ps();
}
const spg_t& get_ordering_token() const final {
return get_pgid();
}
};
class PGOpItem : public PGOpQueueable {
OpRequestRef op;
public:
PGOpItem(spg_t pg, OpRequestRef op) : PGOpQueueable(pg), op(std::move(op)) {}
std::ostream &print(std::ostream &rhs) const final {
return rhs << "PGOpItem(op=" << *(op->get_req()) << ")";
}
std::string print() const override {
return fmt::format("PGOpItem(op={})", *(op->get_req()));
}
std::optional<OpRequestRef> maybe_get_op() const final {
return op;
}
op_scheduler_class get_scheduler_class() const final {
auto type = op->get_req()->get_type();
if (type == CEPH_MSG_OSD_OP ||
type == CEPH_MSG_OSD_BACKOFF) {
return op_scheduler_class::client;
} else {
return op_scheduler_class::immediate;
}
}
void run(OSD *osd, OSDShard *sdata, PGRef& pg, ThreadPool::TPHandle &handle) final;
};
class PGPeeringItem : public PGOpQueueable {
PGPeeringEventRef evt;
public:
PGPeeringItem(spg_t pg, PGPeeringEventRef e) : PGOpQueueable(pg), evt(e) {}
std::ostream &print(std::ostream &rhs) const final {
return rhs << "PGPeeringEvent(" << evt->get_desc() << ")";
}
std::string print() const final {
return fmt::format("PGPeeringEvent({})", evt->get_desc());
}
void run(OSD *osd, OSDShard *sdata, PGRef& pg, ThreadPool::TPHandle &handle) final;
bool is_peering() const override {
return true;
}
bool peering_requires_pg() const override {
return evt->requires_pg;
}
const PGCreateInfo *creates_pg() const override {
return evt->create_info.get();
}
op_scheduler_class get_scheduler_class() const final {
return op_scheduler_class::immediate;
}
};
class PGSnapTrim : public PGOpQueueable {
epoch_t epoch_queued;
public:
PGSnapTrim(
spg_t pg,
epoch_t epoch_queued)
: PGOpQueueable(pg), epoch_queued(epoch_queued) {}
std::ostream &print(std::ostream &rhs) const final {
return rhs << "PGSnapTrim(pgid=" << get_pgid()
<< " epoch_queued=" << epoch_queued
<< ")";
}
std::string print() const final {
return fmt::format(
"PGSnapTrim(pgid={} epoch_queued={})", get_pgid(), epoch_queued);
}
void run(
OSD *osd, OSDShard *sdata, PGRef& pg, ThreadPool::TPHandle &handle) final;
op_scheduler_class get_scheduler_class() const final {
return op_scheduler_class::background_best_effort;
}
};
class PGScrub : public PGOpQueueable {
epoch_t epoch_queued;
public:
PGScrub(
spg_t pg,
epoch_t epoch_queued)
: PGOpQueueable(pg), epoch_queued(epoch_queued) {}
std::ostream &print(std::ostream &rhs) const final {
return rhs << "PGScrub(pgid=" << get_pgid()
<< "epoch_queued=" << epoch_queued
<< ")";
}
std::string print() const final {
return fmt::format(
"PGScrub(pgid={} epoch_queued={})", get_pgid(), epoch_queued);
}
void run(
OSD *osd, OSDShard *sdata, PGRef& pg, ThreadPool::TPHandle &handle) final;
op_scheduler_class get_scheduler_class() const final {
return op_scheduler_class::background_best_effort;
}
};
class PGScrubItem : public PGOpQueueable {
protected:
epoch_t epoch_queued;
Scrub::act_token_t activation_index;
std::string_view message_name;
PGScrubItem(spg_t pg, epoch_t epoch_queued, std::string_view derivative_name)
: PGOpQueueable{pg}
, epoch_queued{epoch_queued}
, activation_index{0}
, message_name{derivative_name}
{}
PGScrubItem(spg_t pg,
epoch_t epoch_queued,
Scrub::act_token_t op_index,
std::string_view derivative_name)
: PGOpQueueable{pg}
, epoch_queued{epoch_queued}
, activation_index{op_index}
, message_name{derivative_name}
{}
std::ostream& print(std::ostream& rhs) const final
{
return rhs << message_name << "(pgid=" << get_pgid()
<< "epoch_queued=" << epoch_queued
<< " scrub-token=" << activation_index << ")";
}
std::string print() const override {
return fmt::format(
"{}(pgid={} epoch_queued={} scrub-token={})", message_name, get_pgid(),
epoch_queued, activation_index);
}
void run(OSD* osd,
OSDShard* sdata,
PGRef& pg,
ThreadPool::TPHandle& handle) override = 0;
op_scheduler_class get_scheduler_class() const final
{
return op_scheduler_class::background_best_effort;
}
};
class PGScrubResched : public PGScrubItem {
public:
PGScrubResched(spg_t pg, epoch_t epoch_queued)
: PGScrubItem{pg, epoch_queued, "PGScrubResched"}
{}
void run(OSD* osd, OSDShard* sdata, PGRef& pg, ThreadPool::TPHandle& handle) final;
};
/**
* all replicas have granted our scrub resources request
*/
class PGScrubResourcesOK : public PGScrubItem {
public:
PGScrubResourcesOK(spg_t pg, epoch_t epoch_queued)
: PGScrubItem{pg, epoch_queued, "PGScrubResourcesOK"}
{}
void run(OSD* osd, OSDShard* sdata, PGRef& pg, ThreadPool::TPHandle& handle) final;
};
/**
* scrub resources requests denied by replica(s)
*/
class PGScrubDenied : public PGScrubItem {
public:
PGScrubDenied(spg_t pg, epoch_t epoch_queued)
: PGScrubItem{pg, epoch_queued, "PGScrubDenied"}
{}
void run(OSD* osd, OSDShard* sdata, PGRef& pg, ThreadPool::TPHandle& handle) final;
};
/**
* called when a repair process completes, to initiate scrubbing. No local/remote
* resources are allocated.
*/
class PGScrubAfterRepair : public PGScrubItem {
public:
PGScrubAfterRepair(spg_t pg, epoch_t epoch_queued)
: PGScrubItem{pg, epoch_queued, "PGScrubAfterRepair"}
{}
void run(OSD* osd, OSDShard* sdata, PGRef& pg, ThreadPool::TPHandle& handle) final;
};
class PGScrubPushesUpdate : public PGScrubItem {
public:
PGScrubPushesUpdate(spg_t pg, epoch_t epoch_queued)
: PGScrubItem{pg, epoch_queued, "PGScrubPushesUpdate"}
{}
void run(OSD* osd, OSDShard* sdata, PGRef& pg, ThreadPool::TPHandle& handle) final;
};
class PGScrubAppliedUpdate : public PGScrubItem {
public:
PGScrubAppliedUpdate(spg_t pg, epoch_t epoch_queued)
: PGScrubItem{pg, epoch_queued, "PGScrubAppliedUpdate"}
{}
void run(OSD* osd,
OSDShard* sdata,
PGRef& pg,
[[maybe_unused]] ThreadPool::TPHandle& handle) final;
};
class PGScrubUnblocked : public PGScrubItem {
public:
PGScrubUnblocked(spg_t pg, epoch_t epoch_queued)
: PGScrubItem{pg, epoch_queued, "PGScrubUnblocked"}
{}
void run(OSD* osd,
OSDShard* sdata,
PGRef& pg,
[[maybe_unused]] ThreadPool::TPHandle& handle) final;
};
class PGScrubDigestUpdate : public PGScrubItem {
public:
PGScrubDigestUpdate(spg_t pg, epoch_t epoch_queued)
: PGScrubItem{pg, epoch_queued, "PGScrubDigestUpdate"}
{}
void run(OSD* osd, OSDShard* sdata, PGRef& pg, ThreadPool::TPHandle& handle) final;
};
class PGScrubGotLocalMap : public PGScrubItem {
public:
PGScrubGotLocalMap(spg_t pg, epoch_t epoch_queued)
: PGScrubItem{pg, epoch_queued, "PGScrubGotLocalMap"}
{}
void run(OSD* osd, OSDShard* sdata, PGRef& pg, ThreadPool::TPHandle& handle) final;
};
class PGScrubGotReplMaps : public PGScrubItem {
public:
PGScrubGotReplMaps(spg_t pg, epoch_t epoch_queued)
: PGScrubItem{pg, epoch_queued, "PGScrubGotReplMaps"}
{}
void run(OSD* osd, OSDShard* sdata, PGRef& pg, ThreadPool::TPHandle& handle) final;
};
class PGRepScrub : public PGScrubItem {
public:
PGRepScrub(spg_t pg, epoch_t epoch_queued, Scrub::act_token_t op_token)
: PGScrubItem{pg, epoch_queued, op_token, "PGRepScrub"}
{}
void run(OSD* osd, OSDShard* sdata, PGRef& pg, ThreadPool::TPHandle& handle) final;
};
class PGRepScrubResched : public PGScrubItem {
public:
PGRepScrubResched(spg_t pg, epoch_t epoch_queued, Scrub::act_token_t op_token)
: PGScrubItem{pg, epoch_queued, op_token, "PGRepScrubResched"}
{}
void run(OSD* osd, OSDShard* sdata, PGRef& pg, ThreadPool::TPHandle& handle) final;
};
class PGScrubReplicaPushes : public PGScrubItem {
public:
PGScrubReplicaPushes(spg_t pg, epoch_t epoch_queued)
: PGScrubItem{pg, epoch_queued, "PGScrubReplicaPushes"}
{}
void run(OSD* osd, OSDShard* sdata, PGRef& pg, ThreadPool::TPHandle& handle) final;
};
class PGScrubScrubFinished : public PGScrubItem {
public:
PGScrubScrubFinished(spg_t pg, epoch_t epoch_queued)
: PGScrubItem{pg, epoch_queued, "PGScrubScrubFinished"}
{}
void run(OSD* osd, OSDShard* sdata, PGRef& pg, ThreadPool::TPHandle& handle) final;
};
class PGScrubGetNextChunk : public PGScrubItem {
public:
PGScrubGetNextChunk(spg_t pg, epoch_t epoch_queued)
: PGScrubItem{pg, epoch_queued, "PGScrubGetNextChunk"}
{}
void run(OSD* osd, OSDShard* sdata, PGRef& pg, ThreadPool::TPHandle& handle) final;
};
class PGScrubChunkIsBusy : public PGScrubItem {
public:
PGScrubChunkIsBusy(spg_t pg, epoch_t epoch_queued)
: PGScrubItem{pg, epoch_queued, "PGScrubChunkIsBusy"}
{}
void run(OSD* osd, OSDShard* sdata, PGRef& pg, ThreadPool::TPHandle& handle) final;
};
class PGScrubChunkIsFree : public PGScrubItem {
public:
PGScrubChunkIsFree(spg_t pg, epoch_t epoch_queued)
: PGScrubItem{pg, epoch_queued, "PGScrubChunkIsFree"}
{}
void run(OSD* osd, OSDShard* sdata, PGRef& pg, ThreadPool::TPHandle& handle) final;
};
class PGRecovery : public PGOpQueueable {
utime_t time_queued;
epoch_t epoch_queued;
uint64_t reserved_pushes;
int priority;
public:
PGRecovery(
spg_t pg,
epoch_t epoch_queued,
uint64_t reserved_pushes,
int priority)
: PGOpQueueable(pg),
time_queued(ceph_clock_now()),
epoch_queued(epoch_queued),
reserved_pushes(reserved_pushes),
priority(priority) {}
std::ostream &print(std::ostream &rhs) const final {
return rhs << "PGRecovery(pgid=" << get_pgid()
<< " epoch_queued=" << epoch_queued
<< " reserved_pushes=" << reserved_pushes
<< ")";
}
std::string print() const final {
return fmt::format(
"PGRecovery(pgid={} epoch_queued={} reserved_pushes={})", get_pgid(),
epoch_queued, reserved_pushes);
}
uint64_t get_reserved_pushes() const final {
return reserved_pushes;
}
void run(
OSD *osd, OSDShard *sdata, PGRef& pg, ThreadPool::TPHandle &handle) final;
op_scheduler_class get_scheduler_class() const final {
return priority_to_scheduler_class(priority);
}
};
class PGRecoveryContext : public PGOpQueueable {
utime_t time_queued;
std::unique_ptr<GenContext<ThreadPool::TPHandle&>> c;
epoch_t epoch;
int priority;
public:
PGRecoveryContext(spg_t pgid,
GenContext<ThreadPool::TPHandle&> *c, epoch_t epoch,
int priority)
: PGOpQueueable(pgid),
time_queued(ceph_clock_now()),
c(c), epoch(epoch), priority(priority) {}
std::ostream &print(std::ostream &rhs) const final {
return rhs << "PGRecoveryContext(pgid=" << get_pgid()
<< " c=" << c.get() << " epoch=" << epoch
<< ")";
}
std::string print() const final {
return fmt::format(
"PGRecoveryContext(pgid={} c={} epoch={})", get_pgid(), (void*)c.get(), epoch);
}
void run(
OSD *osd, OSDShard *sdata, PGRef& pg, ThreadPool::TPHandle &handle) final;
op_scheduler_class get_scheduler_class() const final {
return priority_to_scheduler_class(priority);
}
};
class PGDelete : public PGOpQueueable {
epoch_t epoch_queued;
public:
PGDelete(
spg_t pg,
epoch_t epoch_queued)
: PGOpQueueable(pg),
epoch_queued(epoch_queued) {}
std::ostream &print(std::ostream &rhs) const final {
return rhs << "PGDelete(" << get_pgid()
<< " e" << epoch_queued
<< ")";
}
std::string print() const final {
return fmt::format(
"PGDelete(pgid={} epoch_queued={})", get_pgid(), epoch_queued);
}
void run(
OSD *osd, OSDShard *sdata, PGRef& pg, ThreadPool::TPHandle &handle) final;
op_scheduler_class get_scheduler_class() const final {
return op_scheduler_class::background_best_effort;
}
};
class PGRecoveryMsg : public PGOpQueueable {
utime_t time_queued;
OpRequestRef op;
public:
PGRecoveryMsg(spg_t pg, OpRequestRef op)
: PGOpQueueable(pg), time_queued(ceph_clock_now()), op(std::move(op)) {}
static bool is_recovery_msg(OpRequestRef &op) {
switch (op->get_req()->get_type()) {
case MSG_OSD_PG_PUSH:
case MSG_OSD_PG_PUSH_REPLY:
case MSG_OSD_PG_PULL:
case MSG_OSD_PG_BACKFILL:
case MSG_OSD_PG_BACKFILL_REMOVE:
case MSG_OSD_PG_SCAN:
return true;
default:
return false;
}
}
std::ostream &print(std::ostream &rhs) const final {
return rhs << "PGRecoveryMsg(op=" << *(op->get_req()) << ")";
}
std::string print() const final {
return fmt::format("PGRecoveryMsg(op={})", *(op->get_req()));
}
std::optional<OpRequestRef> maybe_get_op() const final {
return op;
}
op_scheduler_class get_scheduler_class() const final {
return priority_to_scheduler_class(op->get_req()->get_priority());
}
void run(OSD* osd, OSDShard* sdata, PGRef& pg, ThreadPool::TPHandle& handle)
final;
};
} // namespace ceph::osd::scheduler
template <>
struct fmt::formatter<ceph::osd::scheduler::OpSchedulerItem> {
constexpr auto parse(format_parse_context& ctx) { return ctx.begin(); }
template <typename FormatContext>
auto format(
const ceph::osd::scheduler::OpSchedulerItem& opsi,
FormatContext& ctx) const
{
// matching existing op_scheduler_item_t::operator<<() format
using class_t =
std::underlying_type_t<ceph::osd::scheduler::op_scheduler_class>;
const auto qos_cost = opsi.was_queued_via_mclock()
? fmt::format(" qos_cost {}", opsi.qos_cost)
: "";
const auto pushes =
opsi.get_reserved_pushes()
? fmt::format(" reserved_pushes {}", opsi.get_reserved_pushes())
: "";
return fmt::format_to(
ctx.out(), "OpSchedulerItem({} {} class_id {} prio {}{} cost {} e{}{})",
opsi.get_ordering_token(), opsi.qitem->print(),
static_cast<class_t>(opsi.get_scheduler_class()), opsi.get_priority(),
qos_cost, opsi.get_cost(), opsi.get_map_epoch(), pushes);
}
};
| 19,776 | 28.126657 | 93 | h |
null | ceph-main/src/osd/scheduler/mClockScheduler.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2016 Red Hat Inc.
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include <memory>
#include <functional>
#include "osd/scheduler/mClockScheduler.h"
#include "common/dout.h"
namespace dmc = crimson::dmclock;
using namespace std::placeholders;
#define dout_context cct
#define dout_subsys ceph_subsys_mclock
#undef dout_prefix
#define dout_prefix *_dout << "mClockScheduler: "
namespace ceph::osd::scheduler {
mClockScheduler::mClockScheduler(CephContext *cct,
int whoami,
uint32_t num_shards,
int shard_id,
bool is_rotational,
MonClient *monc)
: cct(cct),
whoami(whoami),
num_shards(num_shards),
shard_id(shard_id),
is_rotational(is_rotational),
monc(monc),
scheduler(
std::bind(&mClockScheduler::ClientRegistry::get_info,
&client_registry,
_1),
dmc::AtLimit::Wait,
cct->_conf.get_val<double>("osd_mclock_scheduler_anticipation_timeout"))
{
cct->_conf.add_observer(this);
ceph_assert(num_shards > 0);
set_osd_capacity_params_from_config();
set_config_defaults_from_profile();
client_registry.update_from_config(
cct->_conf, osd_bandwidth_capacity_per_shard);
}
/* ClientRegistry holds the dmclock::ClientInfo configuration parameters
* (reservation (bytes/second), weight (unitless), limit (bytes/second))
* for each IO class in the OSD (client, background_recovery,
* background_best_effort).
*
* mclock expects limit and reservation to have units of <cost>/second
* (bytes/second), but osd_mclock_scheduler_client_(lim|res) are provided
* as ratios of the OSD's capacity. We convert from the one to the other
* using the capacity_per_shard parameter.
*
* Note, mclock profile information will already have been set as a default
* for the osd_mclock_scheduler_client_* parameters prior to calling
* update_from_config -- see set_config_defaults_from_profile().
*/
void mClockScheduler::ClientRegistry::update_from_config(
const ConfigProxy &conf,
const double capacity_per_shard)
{
auto get_res = [&](double res) {
if (res) {
return res * capacity_per_shard;
} else {
return default_min; // min reservation
}
};
auto get_lim = [&](double lim) {
if (lim) {
return lim * capacity_per_shard;
} else {
return default_max; // high limit
}
};
// Set external client infos
double res = conf.get_val<double>(
"osd_mclock_scheduler_client_res");
double lim = conf.get_val<double>(
"osd_mclock_scheduler_client_lim");
uint64_t wgt = conf.get_val<uint64_t>(
"osd_mclock_scheduler_client_wgt");
default_external_client_info.update(
get_res(res),
wgt,
get_lim(lim));
// Set background recovery client infos
res = conf.get_val<double>(
"osd_mclock_scheduler_background_recovery_res");
lim = conf.get_val<double>(
"osd_mclock_scheduler_background_recovery_lim");
wgt = conf.get_val<uint64_t>(
"osd_mclock_scheduler_background_recovery_wgt");
internal_client_infos[
static_cast<size_t>(op_scheduler_class::background_recovery)].update(
get_res(res),
wgt,
get_lim(lim));
// Set background best effort client infos
res = conf.get_val<double>(
"osd_mclock_scheduler_background_best_effort_res");
lim = conf.get_val<double>(
"osd_mclock_scheduler_background_best_effort_lim");
wgt = conf.get_val<uint64_t>(
"osd_mclock_scheduler_background_best_effort_wgt");
internal_client_infos[
static_cast<size_t>(op_scheduler_class::background_best_effort)].update(
get_res(res),
wgt,
get_lim(lim));
}
const dmc::ClientInfo *mClockScheduler::ClientRegistry::get_external_client(
const client_profile_id_t &client) const
{
auto ret = external_client_infos.find(client);
if (ret == external_client_infos.end())
return &default_external_client_info;
else
return &(ret->second);
}
const dmc::ClientInfo *mClockScheduler::ClientRegistry::get_info(
const scheduler_id_t &id) const {
switch (id.class_id) {
case op_scheduler_class::immediate:
ceph_assert(0 == "Cannot schedule immediate");
return (dmc::ClientInfo*)nullptr;
case op_scheduler_class::client:
return get_external_client(id.client_profile_id);
default:
ceph_assert(static_cast<size_t>(id.class_id) < internal_client_infos.size());
return &internal_client_infos[static_cast<size_t>(id.class_id)];
}
}
void mClockScheduler::set_osd_capacity_params_from_config()
{
uint64_t osd_bandwidth_capacity;
double osd_iop_capacity;
std::tie(osd_bandwidth_capacity, osd_iop_capacity) = [&, this] {
if (is_rotational) {
return std::make_tuple(
cct->_conf.get_val<Option::size_t>(
"osd_mclock_max_sequential_bandwidth_hdd"),
cct->_conf.get_val<double>("osd_mclock_max_capacity_iops_hdd"));
} else {
return std::make_tuple(
cct->_conf.get_val<Option::size_t>(
"osd_mclock_max_sequential_bandwidth_ssd"),
cct->_conf.get_val<double>("osd_mclock_max_capacity_iops_ssd"));
}
}();
osd_bandwidth_capacity = std::max<uint64_t>(1, osd_bandwidth_capacity);
osd_iop_capacity = std::max<double>(1.0, osd_iop_capacity);
osd_bandwidth_cost_per_io =
static_cast<double>(osd_bandwidth_capacity) / osd_iop_capacity;
osd_bandwidth_capacity_per_shard = static_cast<double>(osd_bandwidth_capacity)
/ static_cast<double>(num_shards);
dout(1) << __func__ << ": osd_bandwidth_cost_per_io: "
<< std::fixed << std::setprecision(2)
<< osd_bandwidth_cost_per_io << " bytes/io"
<< ", osd_bandwidth_capacity_per_shard "
<< osd_bandwidth_capacity_per_shard << " bytes/second"
<< dendl;
}
/**
* profile_t
*
* mclock profile -- 3 params for each of 3 client classes
* 0 (min): specifies no minimum reservation
* 0 (max): specifies no upper limit
*/
struct profile_t {
struct client_config_t {
double reservation;
uint64_t weight;
double limit;
};
client_config_t client;
client_config_t background_recovery;
client_config_t background_best_effort;
};
static std::ostream &operator<<(
std::ostream &lhs, const profile_t::client_config_t &rhs)
{
return lhs << "{res: " << rhs.reservation
<< ", wgt: " << rhs.weight
<< ", lim: " << rhs.limit
<< "}";
}
static std::ostream &operator<<(std::ostream &lhs, const profile_t &rhs)
{
return lhs << "[client: " << rhs.client
<< ", background_recovery: " << rhs.background_recovery
<< ", background_best_effort: " << rhs.background_best_effort
<< "]";
}
void mClockScheduler::set_config_defaults_from_profile()
{
// Let only a single osd shard (id:0) set the profile configs
if (shard_id > 0) {
return;
}
/**
* high_client_ops
*
* Client Allocation:
* reservation: 60% | weight: 2 | limit: 0 (max) |
* Background Recovery Allocation:
* reservation: 40% | weight: 1 | limit: 0 (max) |
* Background Best Effort Allocation:
* reservation: 0 (min) | weight: 1 | limit: 70% |
*/
static constexpr profile_t high_client_ops_profile{
{ .6, 2, 0 },
{ .4, 1, 0 },
{ 0, 1, .7 }
};
/**
* high_recovery_ops
*
* Client Allocation:
* reservation: 30% | weight: 1 | limit: 0 (max) |
* Background Recovery Allocation:
* reservation: 70% | weight: 2 | limit: 0 (max) |
* Background Best Effort Allocation:
* reservation: 0 (min) | weight: 1 | limit: 0 (max) |
*/
static constexpr profile_t high_recovery_ops_profile{
{ .3, 1, 0 },
{ .7, 2, 0 },
{ 0, 1, 0 }
};
/**
* balanced
*
* Client Allocation:
* reservation: 50% | weight: 1 | limit: 0 (max) |
* Background Recovery Allocation:
* reservation: 50% | weight: 1 | limit: 0 (max) |
* Background Best Effort Allocation:
* reservation: 0 (min) | weight: 1 | limit: 90% |
*/
static constexpr profile_t balanced_profile{
{ .5, 1, 0 },
{ .5, 1, 0 },
{ 0, 1, .9 }
};
const profile_t *profile = nullptr;
auto mclock_profile = cct->_conf.get_val<std::string>("osd_mclock_profile");
if (mclock_profile == "high_client_ops") {
profile = &high_client_ops_profile;
dout(10) << "Setting high_client_ops profile " << *profile << dendl;
} else if (mclock_profile == "high_recovery_ops") {
profile = &high_recovery_ops_profile;
dout(10) << "Setting high_recovery_ops profile " << *profile << dendl;
} else if (mclock_profile == "balanced") {
profile = &balanced_profile;
dout(10) << "Setting balanced profile " << *profile << dendl;
} else if (mclock_profile == "custom") {
dout(10) << "Profile set to custom, not setting defaults" << dendl;
return;
} else {
derr << "Invalid mclock profile: " << mclock_profile << dendl;
ceph_assert("Invalid choice of mclock profile" == 0);
return;
}
ceph_assert(nullptr != profile);
auto set_config = [&conf = cct->_conf](const char *key, auto val) {
conf.set_val_default(key, std::to_string(val));
};
set_config("osd_mclock_scheduler_client_res", profile->client.reservation);
set_config("osd_mclock_scheduler_client_wgt", profile->client.weight);
set_config("osd_mclock_scheduler_client_lim", profile->client.limit);
set_config(
"osd_mclock_scheduler_background_recovery_res",
profile->background_recovery.reservation);
set_config(
"osd_mclock_scheduler_background_recovery_wgt",
profile->background_recovery.weight);
set_config(
"osd_mclock_scheduler_background_recovery_lim",
profile->background_recovery.limit);
set_config(
"osd_mclock_scheduler_background_best_effort_res",
profile->background_best_effort.reservation);
set_config(
"osd_mclock_scheduler_background_best_effort_wgt",
profile->background_best_effort.weight);
set_config(
"osd_mclock_scheduler_background_best_effort_lim",
profile->background_best_effort.limit);
cct->_conf.apply_changes(nullptr);
}
uint32_t mClockScheduler::calc_scaled_cost(int item_cost)
{
auto cost = static_cast<uint32_t>(
std::max<int>(
1, // ensure cost is non-zero and positive
item_cost));
auto cost_per_io = static_cast<uint32_t>(osd_bandwidth_cost_per_io);
// Calculate total scaled cost in bytes
return cost_per_io + cost;
}
void mClockScheduler::update_configuration()
{
// Apply configuration change. The expectation is that
// at least one of the tracked mclock config option keys
// is modified before calling this method.
cct->_conf.apply_changes(nullptr);
}
void mClockScheduler::dump(ceph::Formatter &f) const
{
// Display queue sizes
f.open_object_section("queue_sizes");
f.dump_int("high_priority_queue", high_priority.size());
f.dump_int("scheduler", scheduler.request_count());
f.close_section();
// client map and queue tops (res, wgt, lim)
std::ostringstream out;
f.open_object_section("mClockClients");
f.dump_int("client_count", scheduler.client_count());
out << scheduler;
f.dump_string("clients", out.str());
f.close_section();
// Display sorted queues (res, wgt, lim)
f.open_object_section("mClockQueues");
f.dump_string("queues", display_queues());
f.close_section();
f.open_object_section("HighPriorityQueue");
for (auto it = high_priority.begin();
it != high_priority.end(); it++) {
f.dump_int("priority", it->first);
f.dump_int("queue_size", it->second.size());
}
f.close_section();
}
void mClockScheduler::enqueue(OpSchedulerItem&& item)
{
auto id = get_scheduler_id(item);
unsigned priority = item.get_priority();
// TODO: move this check into OpSchedulerItem, handle backwards compat
if (op_scheduler_class::immediate == id.class_id) {
enqueue_high(immediate_class_priority, std::move(item));
} else if (priority >= cutoff_priority) {
enqueue_high(priority, std::move(item));
} else {
auto cost = calc_scaled_cost(item.get_cost());
item.set_qos_cost(cost);
dout(20) << __func__ << " " << id
<< " item_cost: " << item.get_cost()
<< " scaled_cost: " << cost
<< dendl;
// Add item to scheduler queue
scheduler.add_request(
std::move(item),
id,
cost);
}
dout(20) << __func__ << " client_count: " << scheduler.client_count()
<< " queue_sizes: [ "
<< " high_priority_queue: " << high_priority.size()
<< " sched: " << scheduler.request_count() << " ]"
<< dendl;
dout(30) << __func__ << " mClockClients: "
<< scheduler
<< dendl;
dout(30) << __func__ << " mClockQueues: { "
<< display_queues() << " }"
<< dendl;
}
void mClockScheduler::enqueue_front(OpSchedulerItem&& item)
{
unsigned priority = item.get_priority();
auto id = get_scheduler_id(item);
if (op_scheduler_class::immediate == id.class_id) {
enqueue_high(immediate_class_priority, std::move(item), true);
} else if (priority >= cutoff_priority) {
enqueue_high(priority, std::move(item), true);
} else {
// mClock does not support enqueue at front, so we use
// the high queue with priority 0
enqueue_high(0, std::move(item), true);
}
}
void mClockScheduler::enqueue_high(unsigned priority,
OpSchedulerItem&& item,
bool front)
{
if (front) {
high_priority[priority].push_back(std::move(item));
} else {
high_priority[priority].push_front(std::move(item));
}
}
WorkItem mClockScheduler::dequeue()
{
if (!high_priority.empty()) {
auto iter = high_priority.begin();
// invariant: high_priority entries are never empty
assert(!iter->second.empty());
WorkItem ret{std::move(iter->second.back())};
iter->second.pop_back();
if (iter->second.empty()) {
// maintain invariant, high priority entries are never empty
high_priority.erase(iter);
}
ceph_assert(std::get_if<OpSchedulerItem>(&ret));
return ret;
} else {
mclock_queue_t::PullReq result = scheduler.pull_request();
if (result.is_future()) {
return result.getTime();
} else if (result.is_none()) {
ceph_assert(
0 == "Impossible, must have checked empty() first");
return {};
} else {
ceph_assert(result.is_retn());
auto &retn = result.get_retn();
return std::move(*retn.request);
}
}
}
std::string mClockScheduler::display_queues() const
{
std::ostringstream out;
scheduler.display_queues(out);
return out.str();
}
const char** mClockScheduler::get_tracked_conf_keys() const
{
static const char* KEYS[] = {
"osd_mclock_scheduler_client_res",
"osd_mclock_scheduler_client_wgt",
"osd_mclock_scheduler_client_lim",
"osd_mclock_scheduler_background_recovery_res",
"osd_mclock_scheduler_background_recovery_wgt",
"osd_mclock_scheduler_background_recovery_lim",
"osd_mclock_scheduler_background_best_effort_res",
"osd_mclock_scheduler_background_best_effort_wgt",
"osd_mclock_scheduler_background_best_effort_lim",
"osd_mclock_max_capacity_iops_hdd",
"osd_mclock_max_capacity_iops_ssd",
"osd_mclock_max_sequential_bandwidth_hdd",
"osd_mclock_max_sequential_bandwidth_ssd",
"osd_mclock_profile",
NULL
};
return KEYS;
}
void mClockScheduler::handle_conf_change(
const ConfigProxy& conf,
const std::set<std::string> &changed)
{
if (changed.count("osd_mclock_max_capacity_iops_hdd") ||
changed.count("osd_mclock_max_capacity_iops_ssd")) {
set_osd_capacity_params_from_config();
client_registry.update_from_config(
conf, osd_bandwidth_capacity_per_shard);
}
if (changed.count("osd_mclock_max_sequential_bandwidth_hdd") ||
changed.count("osd_mclock_max_sequential_bandwidth_ssd")) {
set_osd_capacity_params_from_config();
client_registry.update_from_config(
conf, osd_bandwidth_capacity_per_shard);
}
if (changed.count("osd_mclock_profile")) {
set_config_defaults_from_profile();
client_registry.update_from_config(
conf, osd_bandwidth_capacity_per_shard);
}
auto get_changed_key = [&changed]() -> std::optional<std::string> {
static const std::vector<std::string> qos_params = {
"osd_mclock_scheduler_client_res",
"osd_mclock_scheduler_client_wgt",
"osd_mclock_scheduler_client_lim",
"osd_mclock_scheduler_background_recovery_res",
"osd_mclock_scheduler_background_recovery_wgt",
"osd_mclock_scheduler_background_recovery_lim",
"osd_mclock_scheduler_background_best_effort_res",
"osd_mclock_scheduler_background_best_effort_wgt",
"osd_mclock_scheduler_background_best_effort_lim"
};
for (auto &qp : qos_params) {
if (changed.count(qp)) {
return qp;
}
}
return std::nullopt;
};
if (auto key = get_changed_key(); key.has_value()) {
auto mclock_profile = cct->_conf.get_val<std::string>("osd_mclock_profile");
if (mclock_profile == "custom") {
client_registry.update_from_config(
conf, osd_bandwidth_capacity_per_shard);
} else {
// Attempt to change QoS parameter for a built-in profile. Restore the
// profile defaults by making one of the OSD shards remove the key from
// config monitor store. Note: monc is included in the check since the
// mock unit test currently doesn't initialize it.
if (shard_id == 0 && monc) {
static const std::vector<std::string> osds = {
"osd",
"osd." + std::to_string(whoami)
};
for (auto osd : osds) {
std::string cmd =
"{"
"\"prefix\": \"config rm\", "
"\"who\": \"" + osd + "\", "
"\"name\": \"" + *key + "\""
"}";
std::vector<std::string> vcmd{cmd};
dout(10) << __func__ << " Removing Key: " << *key
<< " for " << osd << " from Mon db" << dendl;
monc->start_mon_command(vcmd, {}, nullptr, nullptr, nullptr);
}
}
}
// Alternatively, the QoS parameter, if set ephemerally for this OSD via
// the 'daemon' or 'tell' interfaces must be removed.
if (!cct->_conf.rm_val(*key)) {
dout(10) << __func__ << " Restored " << *key << " to default" << dendl;
cct->_conf.apply_changes(nullptr);
}
}
}
mClockScheduler::~mClockScheduler()
{
cct->_conf.remove_observer(this);
}
}
| 18,719 | 30.252087 | 81 | cc |
null | ceph-main/src/osd/scheduler/mClockScheduler.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2016 Red Hat Inc.
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#pragma once
#include <functional>
#include <ostream>
#include <map>
#include <vector>
#include "boost/variant.hpp"
#include "dmclock/src/dmclock_server.h"
#include "osd/scheduler/OpScheduler.h"
#include "common/config.h"
#include "common/ceph_context.h"
#include "common/mClockPriorityQueue.h"
#include "osd/scheduler/OpSchedulerItem.h"
namespace ceph::osd::scheduler {
constexpr double default_min = 0.0;
constexpr double default_max = std::numeric_limits<double>::is_iec559 ?
std::numeric_limits<double>::infinity() :
std::numeric_limits<double>::max();
using client_id_t = uint64_t;
using profile_id_t = uint64_t;
struct client_profile_id_t {
client_id_t client_id;
profile_id_t profile_id;
auto operator<=>(const client_profile_id_t&) const = default;
friend std::ostream& operator<<(std::ostream& out,
const client_profile_id_t& client_profile) {
out << " client_id: " << client_profile.client_id
<< " profile_id: " << client_profile.profile_id;
return out;
}
};
struct scheduler_id_t {
op_scheduler_class class_id;
client_profile_id_t client_profile_id;
auto operator<=>(const scheduler_id_t&) const = default;
friend std::ostream& operator<<(std::ostream& out,
const scheduler_id_t& sched_id) {
out << "{ class_id: " << sched_id.class_id
<< sched_id.client_profile_id;
return out << " }";
}
};
/**
* Scheduler implementation based on mclock.
*
* TODO: explain configs
*/
class mClockScheduler : public OpScheduler, md_config_obs_t {
CephContext *cct;
const int whoami;
const uint32_t num_shards;
const int shard_id;
const bool is_rotational;
MonClient *monc;
/**
* osd_bandwidth_cost_per_io
*
* mClock expects all queued items to have a uniform expression of
* "cost". However, IO devices generally have quite different capacity
* for sequential IO vs small random IO. This implementation handles this
* by expressing all costs as a number of sequential bytes written adding
* additional cost for each random IO equal to osd_bandwidth_cost_per_io.
*
* Thus, an IO operation requiring a total of <size> bytes to be written
* accross <iops> different locations will have a cost of
* <size> + (osd_bandwidth_cost_per_io * <iops>) bytes.
*
* Set in set_osd_capacity_params_from_config in the constructor and upon
* config change.
*
* Has units bytes/io.
*/
double osd_bandwidth_cost_per_io;
/**
* osd_bandwidth_capacity_per_shard
*
* mClock expects reservation and limit paramters to be expressed in units
* of cost/second -- which means bytes/second for this implementation.
*
* Rather than expecting users to compute appropriate limit and reservation
* values for each class of OSDs in their cluster, we instead express
* reservation and limit paramaters as ratios of the OSD's maxmimum capacity.
* osd_bandwidth_capacity_per_shard is that capacity divided by the number
* of shards.
*
* Set in set_osd_capacity_params_from_config in the constructor and upon
* config change.
*
* This value gets passed to ClientRegistry::update_from_config in order
* to resolve the full reservaiton and limit parameters for mclock from
* the configured ratios.
*
* Has units bytes/second.
*/
double osd_bandwidth_capacity_per_shard;
class ClientRegistry {
std::array<
crimson::dmclock::ClientInfo,
static_cast<size_t>(op_scheduler_class::immediate)
> internal_client_infos = {
// Placeholder, gets replaced with configured values
crimson::dmclock::ClientInfo(1, 1, 1),
crimson::dmclock::ClientInfo(1, 1, 1)
};
crimson::dmclock::ClientInfo default_external_client_info = {1, 1, 1};
std::map<client_profile_id_t,
crimson::dmclock::ClientInfo> external_client_infos;
const crimson::dmclock::ClientInfo *get_external_client(
const client_profile_id_t &client) const;
public:
/**
* update_from_config
*
* Sets the mclock paramaters (reservation, weight, and limit)
* for each class of IO (background_recovery, background_best_effort,
* and client).
*/
void update_from_config(
const ConfigProxy &conf,
double capacity_per_shard);
const crimson::dmclock::ClientInfo *get_info(
const scheduler_id_t &id) const;
} client_registry;
using mclock_queue_t = crimson::dmclock::PullPriorityQueue<
scheduler_id_t,
OpSchedulerItem,
true,
true,
2>;
using priority_t = unsigned;
using SubQueue = std::map<priority_t,
std::list<OpSchedulerItem>,
std::greater<priority_t>>;
mclock_queue_t scheduler;
/**
* high_priority
*
* Holds entries to be dequeued in strict order ahead of mClock
* Invariant: entries are never empty
*/
SubQueue high_priority;
priority_t immediate_class_priority = std::numeric_limits<priority_t>::max();
static scheduler_id_t get_scheduler_id(const OpSchedulerItem &item) {
return scheduler_id_t{
item.get_scheduler_class(),
client_profile_id_t{
item.get_owner(),
0
}
};
}
static unsigned int get_io_prio_cut(CephContext *cct) {
if (cct->_conf->osd_op_queue_cut_off == "debug_random") {
std::random_device rd;
std::mt19937 random_gen(rd());
return (random_gen() % 2 < 1) ? CEPH_MSG_PRIO_HIGH : CEPH_MSG_PRIO_LOW;
} else if (cct->_conf->osd_op_queue_cut_off == "high") {
return CEPH_MSG_PRIO_HIGH;
} else {
// default / catch-all is 'low'
return CEPH_MSG_PRIO_LOW;
}
}
unsigned cutoff_priority = get_io_prio_cut(cct);
/**
* set_osd_capacity_params_from_config
*
* mClockScheduler uses two parameters, osd_bandwidth_cost_per_io
* and osd_bandwidth_capacity_per_shard, internally. These two
* parameters are derived from config parameters
* osd_mclock_max_capacity_iops_(hdd|ssd) and
* osd_mclock_max_sequential_bandwidth_(hdd|ssd) as well as num_shards.
* Invoking set_osd_capacity_params_from_config() resets those derived
* params based on the current config and should be invoked any time they
* are modified as well as in the constructor. See handle_conf_change().
*/
void set_osd_capacity_params_from_config();
// Set the mclock related config params based on the profile
void set_config_defaults_from_profile();
public:
mClockScheduler(CephContext *cct, int whoami, uint32_t num_shards,
int shard_id, bool is_rotational, MonClient *monc);
~mClockScheduler() override;
/// Calculate scaled cost per item
uint32_t calc_scaled_cost(int cost);
// Helper method to display mclock queues
std::string display_queues() const;
// Enqueue op in the back of the regular queue
void enqueue(OpSchedulerItem &&item) final;
// Enqueue the op in the front of the high priority queue
void enqueue_front(OpSchedulerItem &&item) final;
// Return an op to be dispatch
WorkItem dequeue() final;
// Returns if the queue is empty
bool empty() const final {
return scheduler.empty() && high_priority.empty();
}
// Formatted output of the queue
void dump(ceph::Formatter &f) const final;
void print(std::ostream &ostream) const final {
ostream << "mClockScheduler";
}
// Update data associated with the modified mclock config key(s)
void update_configuration() final;
const char** get_tracked_conf_keys() const final;
void handle_conf_change(const ConfigProxy& conf,
const std::set<std::string> &changed) final;
private:
// Enqueue the op to the high priority queue
void enqueue_high(unsigned prio, OpSchedulerItem &&item, bool front = false);
};
}
| 8,102 | 29.809886 | 79 | h |
null | ceph-main/src/osd/scrubber/PrimaryLogScrub.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "./PrimaryLogScrub.h"
#include <sstream>
#include "common/scrub_types.h"
#include "osd/PeeringState.h"
#include "osd/PrimaryLogPG.h"
#include "osd/osd_types_fmt.h"
#include "scrub_machine.h"
#define dout_context (m_osds->cct)
#define dout_subsys ceph_subsys_osd
#undef dout_prefix
#define dout_prefix _prefix(_dout, this)
template <class T>
static ostream& _prefix(std::ostream* _dout, T* t)
{
return t->gen_prefix(*_dout);
}
using namespace Scrub;
bool PrimaryLogScrub::get_store_errors(const scrub_ls_arg_t& arg,
scrub_ls_result_t& res_inout) const
{
if (!m_store) {
return false;
}
if (arg.get_snapsets) {
res_inout.vals = m_store->get_snap_errors(m_pg->get_pgid().pool(),
arg.start_after,
arg.max_return);
} else {
res_inout.vals = m_store->get_object_errors(m_pg->get_pgid().pool(),
arg.start_after,
arg.max_return);
}
return true;
}
/// \todo combine the multiple transactions into a single one
void PrimaryLogScrub::submit_digest_fixes(const digests_fixes_t& fixes)
{
// note: the following line was modified from '+=' to '=', as we should not
// encounter previous-chunk digest updates after starting a new chunk
num_digest_updates_pending = fixes.size();
dout(10) << __func__
<< ": num_digest_updates_pending: " << num_digest_updates_pending
<< dendl;
for (auto& [obj, dgs] : fixes) {
ObjectContextRef obc = m_pl_pg->get_object_context(obj, false);
if (!obc) {
m_osds->clog->error() << m_pg_id << " " << m_mode_desc
<< " cannot get object context for object " << obj;
num_digest_updates_pending--;
continue;
}
dout(15) << fmt::format(
"{}: {}, pg[{}] {}/{}", __func__, num_digest_updates_pending,
m_pg_id, obj, dgs)
<< dendl;
if (obc->obs.oi.soid != obj) {
m_osds->clog->error()
<< m_pg_id << " " << m_mode_desc << " " << obj
<< " : object has a valid oi attr with a mismatched name, "
<< " obc->obs.oi.soid: " << obc->obs.oi.soid;
num_digest_updates_pending--;
continue;
}
PrimaryLogPG::OpContextUPtr ctx = m_pl_pg->simple_opc_create(obc);
ctx->at_version = m_pl_pg->get_next_version();
ctx->mtime = utime_t(); // do not update mtime
if (dgs.first) {
ctx->new_obs.oi.set_data_digest(*dgs.first);
} else {
ctx->new_obs.oi.clear_data_digest();
}
if (dgs.second) {
ctx->new_obs.oi.set_omap_digest(*dgs.second);
} else {
ctx->new_obs.oi.clear_omap_digest();
}
m_pl_pg->finish_ctx(ctx.get(), pg_log_entry_t::MODIFY);
ctx->register_on_success([this]() {
if ((num_digest_updates_pending >= 1) &&
(--num_digest_updates_pending == 0)) {
m_osds->queue_scrub_digest_update(m_pl_pg,
m_pl_pg->is_scrub_blocking_ops());
}
});
m_pl_pg->simple_opc_submit(std::move(ctx));
}
}
// a forwarder used by the scrubber backend
void PrimaryLogScrub::add_to_stats(const object_stat_sum_t& stat)
{
m_scrub_cstat.add(stat);
}
void PrimaryLogScrub::_scrub_finish()
{
auto& info = m_pg->get_pg_info(ScrubberPasskey{}); ///< a temporary alias
dout(10) << __func__ << " info stats: "
<< (info.stats.stats_invalid ? "invalid" : "valid")
<< " m_is_repair: " << m_is_repair << dendl;
if (info.stats.stats_invalid) {
m_pl_pg->recovery_state.update_stats([=, this](auto& history, auto& stats) {
stats.stats = m_scrub_cstat;
stats.stats_invalid = false;
return false;
});
if (m_pl_pg->agent_state) {
m_pl_pg->agent_choose_mode();
}
}
dout(10) << m_mode_desc << " got " << m_scrub_cstat.sum.num_objects << "/"
<< info.stats.stats.sum.num_objects << " objects, "
<< m_scrub_cstat.sum.num_object_clones << "/"
<< info.stats.stats.sum.num_object_clones << " clones, "
<< m_scrub_cstat.sum.num_objects_dirty << "/"
<< info.stats.stats.sum.num_objects_dirty << " dirty, "
<< m_scrub_cstat.sum.num_objects_omap << "/"
<< info.stats.stats.sum.num_objects_omap << " omap, "
<< m_scrub_cstat.sum.num_objects_pinned << "/"
<< info.stats.stats.sum.num_objects_pinned << " pinned, "
<< m_scrub_cstat.sum.num_objects_hit_set_archive << "/"
<< info.stats.stats.sum.num_objects_hit_set_archive
<< " hit_set_archive, " << m_scrub_cstat.sum.num_bytes << "/"
<< info.stats.stats.sum.num_bytes << " bytes, "
<< m_scrub_cstat.sum.num_objects_manifest << "/"
<< info.stats.stats.sum.num_objects_manifest << " manifest objects, "
<< m_scrub_cstat.sum.num_bytes_hit_set_archive << "/"
<< info.stats.stats.sum.num_bytes_hit_set_archive
<< " hit_set_archive bytes." << dendl;
if (m_scrub_cstat.sum.num_objects != info.stats.stats.sum.num_objects ||
m_scrub_cstat.sum.num_object_clones !=
info.stats.stats.sum.num_object_clones ||
(m_scrub_cstat.sum.num_objects_dirty !=
info.stats.stats.sum.num_objects_dirty &&
!info.stats.dirty_stats_invalid) ||
(m_scrub_cstat.sum.num_objects_omap !=
info.stats.stats.sum.num_objects_omap &&
!info.stats.omap_stats_invalid) ||
(m_scrub_cstat.sum.num_objects_pinned !=
info.stats.stats.sum.num_objects_pinned &&
!info.stats.pin_stats_invalid) ||
(m_scrub_cstat.sum.num_objects_hit_set_archive !=
info.stats.stats.sum.num_objects_hit_set_archive &&
!info.stats.hitset_stats_invalid) ||
(m_scrub_cstat.sum.num_bytes_hit_set_archive !=
info.stats.stats.sum.num_bytes_hit_set_archive &&
!info.stats.hitset_bytes_stats_invalid) ||
(m_scrub_cstat.sum.num_objects_manifest !=
info.stats.stats.sum.num_objects_manifest &&
!info.stats.manifest_stats_invalid) ||
m_scrub_cstat.sum.num_whiteouts != info.stats.stats.sum.num_whiteouts ||
m_scrub_cstat.sum.num_bytes != info.stats.stats.sum.num_bytes) {
m_osds->clog->error() << info.pgid << " " << m_mode_desc
<< " : stat mismatch, got "
<< m_scrub_cstat.sum.num_objects << "/"
<< info.stats.stats.sum.num_objects << " objects, "
<< m_scrub_cstat.sum.num_object_clones << "/"
<< info.stats.stats.sum.num_object_clones
<< " clones, " << m_scrub_cstat.sum.num_objects_dirty
<< "/" << info.stats.stats.sum.num_objects_dirty
<< " dirty, " << m_scrub_cstat.sum.num_objects_omap
<< "/" << info.stats.stats.sum.num_objects_omap
<< " omap, " << m_scrub_cstat.sum.num_objects_pinned
<< "/" << info.stats.stats.sum.num_objects_pinned
<< " pinned, "
<< m_scrub_cstat.sum.num_objects_hit_set_archive
<< "/"
<< info.stats.stats.sum.num_objects_hit_set_archive
<< " hit_set_archive, "
<< m_scrub_cstat.sum.num_whiteouts << "/"
<< info.stats.stats.sum.num_whiteouts
<< " whiteouts, " << m_scrub_cstat.sum.num_bytes
<< "/" << info.stats.stats.sum.num_bytes << " bytes, "
<< m_scrub_cstat.sum.num_objects_manifest << "/"
<< info.stats.stats.sum.num_objects_manifest
<< " manifest objects, "
<< m_scrub_cstat.sum.num_bytes_hit_set_archive << "/"
<< info.stats.stats.sum.num_bytes_hit_set_archive
<< " hit_set_archive bytes.";
++m_shallow_errors;
if (m_is_repair) {
++m_fixed_count;
m_pl_pg->recovery_state.update_stats([this](auto& history, auto& stats) {
stats.stats = m_scrub_cstat;
stats.dirty_stats_invalid = false;
stats.omap_stats_invalid = false;
stats.hitset_stats_invalid = false;
stats.hitset_bytes_stats_invalid = false;
stats.pin_stats_invalid = false;
stats.manifest_stats_invalid = false;
return false;
});
m_pl_pg->publish_stats_to_osd();
m_pl_pg->recovery_state.share_pg_info();
}
}
// Clear object context cache to get repair information
if (m_is_repair)
m_pl_pg->object_contexts.clear();
}
PrimaryLogScrub::PrimaryLogScrub(PrimaryLogPG* pg) : PgScrubber{pg}, m_pl_pg{pg}
{}
void PrimaryLogScrub::_scrub_clear_state()
{
m_scrub_cstat = object_stat_collection_t();
}
void PrimaryLogScrub::stats_of_handled_objects(
const object_stat_sum_t& delta_stats,
const hobject_t& soid)
{
// We scrub objects in hobject_t order, so objects before m_start have already
// been scrubbed and their stats have already been added to the scrubber.
// Objects after that point haven't been included in the scrubber's stats
// accounting yet, so they will be included when the scrubber gets to that
// object.
if (is_primary() && is_scrub_active()) {
if (soid < m_start) {
dout(20) << fmt::format("{} {} < [{},{})", __func__, soid, m_start, m_end)
<< dendl;
m_scrub_cstat.add(delta_stats);
} else {
dout(25)
<< fmt::format("{} {} >= [{},{})", __func__, soid, m_start, m_end)
<< dendl;
}
}
}
| 8,804 | 32.735632 | 80 | cc |
null | ceph-main/src/osd/scrubber/PrimaryLogScrub.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
// the './' includes are marked this way to affect clang-format
#include "./pg_scrubber.h"
#include "debug.h"
#include "common/errno.h"
#include "common/scrub_types.h"
#include "messages/MOSDOp.h"
#include "messages/MOSDRepScrub.h"
#include "messages/MOSDRepScrubMap.h"
#include "messages/MOSDScrubReserve.h"
#include "osd/OSD.h"
#include "scrub_machine.h"
class PrimaryLogPG;
/**
* The derivative of PgScrubber that is used by PrimaryLogPG.
*/
class PrimaryLogScrub : public PgScrubber {
public:
explicit PrimaryLogScrub(PrimaryLogPG* pg);
void _scrub_finish() final;
bool get_store_errors(const scrub_ls_arg_t& arg,
scrub_ls_result_t& res_inout) const final;
void stats_of_handled_objects(const object_stat_sum_t& delta_stats,
const hobject_t& soid) final;
// the interface used by the scrubber-backend:
void add_to_stats(const object_stat_sum_t& stat) final;
void submit_digest_fixes(const digests_fixes_t& fixes) final;
private:
// we know our PG is actually a PrimaryLogPG. Let's alias the pointer to that
// object:
PrimaryLogPG* const m_pl_pg;
// handle our part in stats collection
object_stat_collection_t m_scrub_cstat;
void _scrub_clear_state() final; // which just clears the stats
};
| 1,360 | 25.173077 | 79 | h |
null | ceph-main/src/osd/scrubber/ScrubStore.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "ScrubStore.h"
#include "osd/osd_types.h"
#include "common/scrub_types.h"
#include "include/rados/rados_types.hpp"
using std::ostringstream;
using std::string;
using std::vector;
using ceph::bufferlist;
namespace {
ghobject_t make_scrub_object(const spg_t& pgid)
{
ostringstream ss;
ss << "scrub_" << pgid;
return pgid.make_temp_ghobject(ss.str());
}
string first_object_key(int64_t pool)
{
auto hoid = hobject_t(object_t(),
"",
0,
0x00000000,
pool,
"");
hoid.build_hash_cache();
return "SCRUB_OBJ_" + hoid.to_str();
}
// the object_key should be unique across pools
string to_object_key(int64_t pool, const librados::object_id_t& oid)
{
auto hoid = hobject_t(object_t(oid.name),
oid.locator, // key
oid.snap,
0, // hash
pool,
oid.nspace);
hoid.build_hash_cache();
return "SCRUB_OBJ_" + hoid.to_str();
}
string last_object_key(int64_t pool)
{
auto hoid = hobject_t(object_t(),
"",
0,
0xffffffff,
pool,
"");
hoid.build_hash_cache();
return "SCRUB_OBJ_" + hoid.to_str();
}
string first_snap_key(int64_t pool)
{
// scrub object is per spg_t object, so we can misuse the hash (pg.seed) for
// the representing the minimal and maximum keys. and this relies on how
// hobject_t::to_str() works: hex(pool).hex(revhash).
auto hoid = hobject_t(object_t(),
"",
0,
0x00000000,
pool,
"");
hoid.build_hash_cache();
return "SCRUB_SS_" + hoid.to_str();
}
string to_snap_key(int64_t pool, const librados::object_id_t& oid)
{
auto hoid = hobject_t(object_t(oid.name),
oid.locator, // key
oid.snap,
0x77777777, // hash
pool,
oid.nspace);
hoid.build_hash_cache();
return "SCRUB_SS_" + hoid.to_str();
}
string last_snap_key(int64_t pool)
{
auto hoid = hobject_t(object_t(),
"",
0,
0xffffffff,
pool,
"");
hoid.build_hash_cache();
return "SCRUB_SS_" + hoid.to_str();
}
}
namespace Scrub {
Store*
Store::create(ObjectStore* store,
ObjectStore::Transaction* t,
const spg_t& pgid,
const coll_t& coll)
{
ceph_assert(store);
ceph_assert(t);
ghobject_t oid = make_scrub_object(pgid);
t->touch(coll, oid);
return new Store{coll, oid, store};
}
Store::Store(const coll_t& coll, const ghobject_t& oid, ObjectStore* store)
: coll(coll),
hoid(oid),
driver(store, coll, hoid),
backend(&driver)
{}
Store::~Store()
{
ceph_assert(results.empty());
}
void Store::add_error(int64_t pool, const inconsistent_obj_wrapper& e)
{
add_object_error(pool, e);
}
void Store::add_object_error(int64_t pool, const inconsistent_obj_wrapper& e)
{
bufferlist bl;
e.encode(bl);
results[to_object_key(pool, e.object)] = bl;
}
void Store::add_error(int64_t pool, const inconsistent_snapset_wrapper& e)
{
add_snap_error(pool, e);
}
void Store::add_snap_error(int64_t pool, const inconsistent_snapset_wrapper& e)
{
bufferlist bl;
e.encode(bl);
results[to_snap_key(pool, e.object)] = bl;
}
bool Store::empty() const
{
return results.empty();
}
void Store::flush(ObjectStore::Transaction* t)
{
if (t) {
OSDriver::OSTransaction txn = driver.get_transaction(t);
backend.set_keys(results, &txn);
}
results.clear();
}
void Store::cleanup(ObjectStore::Transaction* t)
{
t->remove(coll, hoid);
}
std::vector<bufferlist>
Store::get_snap_errors(int64_t pool,
const librados::object_id_t& start,
uint64_t max_return) const
{
const string begin = (start.name.empty() ?
first_snap_key(pool) : to_snap_key(pool, start));
const string end = last_snap_key(pool);
return get_errors(begin, end, max_return);
}
std::vector<bufferlist>
Store::get_object_errors(int64_t pool,
const librados::object_id_t& start,
uint64_t max_return) const
{
const string begin = (start.name.empty() ?
first_object_key(pool) : to_object_key(pool, start));
const string end = last_object_key(pool);
return get_errors(begin, end, max_return);
}
std::vector<bufferlist>
Store::get_errors(const string& begin,
const string& end,
uint64_t max_return) const
{
vector<bufferlist> errors;
auto next = std::make_pair(begin, bufferlist{});
while (max_return && !backend.get_next(next.first, &next)) {
if (next.first >= end)
break;
errors.push_back(next.second);
max_return--;
}
return errors;
}
} // namespace Scrub
| 4,442 | 20.258373 | 79 | cc |
null | ceph-main/src/osd/scrubber/ScrubStore.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_SCRUB_RESULT_H
#define CEPH_SCRUB_RESULT_H
#include "common/map_cacher.hpp"
#include "osd/SnapMapper.h" // for OSDriver
namespace librados {
struct object_id_t;
}
struct inconsistent_obj_wrapper;
struct inconsistent_snapset_wrapper;
namespace Scrub {
class Store {
public:
~Store();
static Store* create(ObjectStore* store,
ObjectStore::Transaction* t,
const spg_t& pgid,
const coll_t& coll);
void add_object_error(int64_t pool, const inconsistent_obj_wrapper& e);
void add_snap_error(int64_t pool, const inconsistent_snapset_wrapper& e);
// and a variant-friendly interface:
void add_error(int64_t pool, const inconsistent_obj_wrapper& e);
void add_error(int64_t pool, const inconsistent_snapset_wrapper& e);
bool empty() const;
void flush(ObjectStore::Transaction*);
void cleanup(ObjectStore::Transaction*);
std::vector<ceph::buffer::list> get_snap_errors(
int64_t pool,
const librados::object_id_t& start,
uint64_t max_return) const;
std::vector<ceph::buffer::list> get_object_errors(
int64_t pool,
const librados::object_id_t& start,
uint64_t max_return) const;
private:
Store(const coll_t& coll, const ghobject_t& oid, ObjectStore* store);
std::vector<ceph::buffer::list> get_errors(const std::string& start,
const std::string& end,
uint64_t max_return) const;
private:
const coll_t coll;
const ghobject_t hoid;
// a temp object holding mappings from seq-id to inconsistencies found in
// scrubbing
OSDriver driver;
mutable MapCacher::MapCacher<std::string, ceph::buffer::list> backend;
std::map<std::string, ceph::buffer::list> results;
};
} // namespace Scrub
#endif // CEPH_SCRUB_RESULT_H
| 1,838 | 27.734375 | 75 | h |
null | ceph-main/src/osd/scrubber/osd_scrub_sched.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "./osd_scrub_sched.h"
#include "osd/OSD.h"
#include "pg_scrubber.h"
using namespace ::std::literals;
// ////////////////////////////////////////////////////////////////////////// //
// ScrubJob
#define dout_context (cct)
#define dout_subsys ceph_subsys_osd
#undef dout_prefix
#define dout_prefix *_dout << "osd." << whoami << " "
ScrubQueue::ScrubJob::ScrubJob(CephContext* cct, const spg_t& pg, int node_id)
: RefCountedObject{cct}
, pgid{pg}
, whoami{node_id}
, cct{cct}
{}
// debug usage only
ostream& operator<<(ostream& out, const ScrubQueue::ScrubJob& sjob)
{
out << sjob.pgid << ", " << sjob.schedule.scheduled_at
<< " dead: " << sjob.schedule.deadline << " - "
<< sjob.registration_state() << " / failure: " << sjob.resources_failure
<< " / pen. t.o.: " << sjob.penalty_timeout
<< " / queue state: " << ScrubQueue::qu_state_text(sjob.state);
return out;
}
void ScrubQueue::ScrubJob::update_schedule(
const ScrubQueue::scrub_schedule_t& adjusted)
{
schedule = adjusted;
penalty_timeout = utime_t(0, 0); // helps with debugging
// 'updated' is changed here while not holding jobs_lock. That's OK, as
// the (atomic) flag will only be cleared by select_pg_and_scrub() after
// scan_penalized() is called and the job was moved to the to_scrub queue.
updated = true;
dout(10) << fmt::format("{}: pg[{}] adjusted: {:s} ({})", __func__, pgid,
schedule.scheduled_at, registration_state()) << dendl;
}
std::string ScrubQueue::ScrubJob::scheduling_state(utime_t now_is,
bool is_deep_expected) const
{
// if not in the OSD scheduling queues, not a candidate for scrubbing
if (state != qu_state_t::registered) {
return "no scrub is scheduled";
}
// if the time has passed, we are surely in the queue
// (note that for now we do not tell client if 'penalized')
if (now_is > schedule.scheduled_at) {
// we are never sure that the next scrub will indeed be shallow:
return fmt::format("queued for {}scrub", (is_deep_expected ? "deep " : ""));
}
return fmt::format("{}scrub scheduled @ {:s}",
(is_deep_expected ? "deep " : ""),
schedule.scheduled_at);
}
// ////////////////////////////////////////////////////////////////////////// //
// ScrubQueue
#undef dout_context
#define dout_context (cct)
#undef dout_prefix
#define dout_prefix \
*_dout << "osd." << osd_service.get_nodeid() << " scrub-queue::" << __func__ \
<< " "
ScrubQueue::ScrubQueue(CephContext* cct, Scrub::ScrubSchedListener& osds)
: cct{cct}
, osd_service{osds}
{
// initialize the daily loadavg with current 15min loadavg
if (double loadavgs[3]; getloadavg(loadavgs, 3) == 3) {
daily_loadavg = loadavgs[2];
} else {
derr << "OSD::init() : couldn't read loadavgs\n" << dendl;
daily_loadavg = 1.0;
}
}
std::optional<double> ScrubQueue::update_load_average()
{
int hb_interval = conf()->osd_heartbeat_interval;
int n_samples = 60 * 24 * 24;
if (hb_interval > 1) {
n_samples /= hb_interval;
if (n_samples < 1)
n_samples = 1;
}
// get CPU load avg
double loadavg;
if (getloadavg(&loadavg, 1) == 1) {
daily_loadavg = (daily_loadavg * (n_samples - 1) + loadavg) / n_samples;
dout(17) << "heartbeat: daily_loadavg " << daily_loadavg << dendl;
return 100 * loadavg;
}
return std::nullopt;
}
/*
* Modify the scrub job state:
* - if 'registered' (as expected): mark as 'unregistering'. The job will be
* dequeued the next time sched_scrub() is called.
* - if already 'not_registered': shouldn't really happen, but not a problem.
* The state will not be modified.
* - same for 'unregistering'.
*
* Note: not holding the jobs lock
*/
void ScrubQueue::remove_from_osd_queue(ScrubJobRef scrub_job)
{
dout(15) << "removing pg[" << scrub_job->pgid << "] from OSD scrub queue"
<< dendl;
qu_state_t expected_state{qu_state_t::registered};
auto ret =
scrub_job->state.compare_exchange_strong(expected_state,
qu_state_t::unregistering);
if (ret) {
dout(10) << "pg[" << scrub_job->pgid << "] sched-state changed from "
<< qu_state_text(expected_state) << " to "
<< qu_state_text(scrub_job->state) << dendl;
} else {
// job wasn't in state 'registered' coming in
dout(5) << "removing pg[" << scrub_job->pgid
<< "] failed. State was: " << qu_state_text(expected_state)
<< dendl;
}
}
void ScrubQueue::register_with_osd(
ScrubJobRef scrub_job,
const ScrubQueue::sched_params_t& suggested)
{
qu_state_t state_at_entry = scrub_job->state.load();
dout(20) << fmt::format(
"pg[{}] state at entry: <{:.14}>", scrub_job->pgid,
state_at_entry)
<< dendl;
switch (state_at_entry) {
case qu_state_t::registered:
// just updating the schedule?
update_job(scrub_job, suggested);
break;
case qu_state_t::not_registered:
// insertion under lock
{
std::unique_lock lck{jobs_lock};
if (state_at_entry != scrub_job->state) {
lck.unlock();
dout(5) << " scrub job state changed. Retrying." << dendl;
// retry
register_with_osd(scrub_job, suggested);
break;
}
update_job(scrub_job, suggested);
to_scrub.push_back(scrub_job);
scrub_job->in_queues = true;
scrub_job->state = qu_state_t::registered;
}
break;
case qu_state_t::unregistering:
// restore to the to_sched queue
{
// must be under lock, as the job might be removed from the queue
// at any minute
std::lock_guard lck{jobs_lock};
update_job(scrub_job, suggested);
if (scrub_job->state == qu_state_t::not_registered) {
dout(5) << " scrub job state changed to 'not registered'" << dendl;
to_scrub.push_back(scrub_job);
}
scrub_job->in_queues = true;
scrub_job->state = qu_state_t::registered;
}
break;
}
dout(10) << fmt::format(
"pg[{}] sched-state changed from <{:.14}> to <{:.14}> (@{:s})",
scrub_job->pgid, state_at_entry, scrub_job->state.load(),
scrub_job->schedule.scheduled_at)
<< dendl;
}
// look mommy - no locks!
void ScrubQueue::update_job(ScrubJobRef scrub_job,
const ScrubQueue::sched_params_t& suggested)
{
// adjust the suggested scrub time according to OSD-wide status
auto adjusted = adjust_target_time(suggested);
scrub_job->update_schedule(adjusted);
}
ScrubQueue::sched_params_t ScrubQueue::determine_scrub_time(
const requested_scrub_t& request_flags,
const pg_info_t& pg_info,
const pool_opts_t& pool_conf) const
{
ScrubQueue::sched_params_t res;
if (request_flags.must_scrub || request_flags.need_auto) {
// Set the smallest time that isn't utime_t()
res.proposed_time = PgScrubber::scrub_must_stamp();
res.is_must = ScrubQueue::must_scrub_t::mandatory;
// we do not need the interval data in this case
} else if (pg_info.stats.stats_invalid && conf()->osd_scrub_invalid_stats) {
res.proposed_time = time_now();
res.is_must = ScrubQueue::must_scrub_t::mandatory;
} else {
res.proposed_time = pg_info.history.last_scrub_stamp;
res.min_interval = pool_conf.value_or(pool_opts_t::SCRUB_MIN_INTERVAL, 0.0);
res.max_interval = pool_conf.value_or(pool_opts_t::SCRUB_MAX_INTERVAL, 0.0);
}
dout(15) << fmt::format(
"suggested: {:s} hist: {:s} v:{}/{} must:{} pool-min:{} {}",
res.proposed_time, pg_info.history.last_scrub_stamp,
(bool)pg_info.stats.stats_invalid,
conf()->osd_scrub_invalid_stats,
(res.is_must == must_scrub_t::mandatory ? "y" : "n"),
res.min_interval, request_flags)
<< dendl;
return res;
}
// used under jobs_lock
void ScrubQueue::move_failed_pgs(utime_t now_is)
{
int punished_cnt{0}; // for log/debug only
for (auto job = to_scrub.begin(); job != to_scrub.end();) {
if ((*job)->resources_failure) {
auto sjob = *job;
// last time it was scheduled for a scrub, this PG failed in securing
// remote resources. Move it to the secondary scrub queue.
dout(15) << "moving " << sjob->pgid
<< " state: " << ScrubQueue::qu_state_text(sjob->state) << dendl;
// determine the penalty time, after which the job should be reinstated
utime_t after = now_is;
after += conf()->osd_scrub_sleep * 2 + utime_t{300'000ms};
// note: currently - not taking 'deadline' into account when determining
// 'penalty_timeout'.
sjob->penalty_timeout = after;
sjob->resources_failure = false;
sjob->updated = false; // as otherwise will be pardoned immediately
// place in the penalty list, and remove from the to-scrub group
penalized.push_back(sjob);
job = to_scrub.erase(job);
punished_cnt++;
} else {
job++;
}
}
if (punished_cnt) {
dout(15) << "# of jobs penalized: " << punished_cnt << dendl;
}
}
// clang-format off
/*
* Implementation note:
* Clang (10 & 11) produces here efficient table-based code, comparable to using
* a direct index into an array of strings.
* Gcc (11, trunk) is almost as efficient.
*/
std::string_view ScrubQueue::attempt_res_text(Scrub::schedule_result_t v)
{
switch (v) {
case Scrub::schedule_result_t::scrub_initiated: return "scrubbing"sv;
case Scrub::schedule_result_t::none_ready: return "no ready job"sv;
case Scrub::schedule_result_t::no_local_resources: return "local resources shortage"sv;
case Scrub::schedule_result_t::already_started: return "denied as already started"sv;
case Scrub::schedule_result_t::no_such_pg: return "pg not found"sv;
case Scrub::schedule_result_t::bad_pg_state: return "prevented by pg state"sv;
case Scrub::schedule_result_t::preconditions: return "preconditions not met"sv;
}
// g++ (unlike CLANG), requires an extra 'return' here
return "(unknown)"sv;
}
std::string_view ScrubQueue::qu_state_text(qu_state_t st)
{
switch (st) {
case qu_state_t::not_registered: return "not registered w/ OSD"sv;
case qu_state_t::registered: return "registered"sv;
case qu_state_t::unregistering: return "unregistering"sv;
}
// g++ (unlike CLANG), requires an extra 'return' here
return "(unknown)"sv;
}
// clang-format on
/**
* a note regarding 'to_scrub_copy':
* 'to_scrub_copy' is a sorted set of all the ripe jobs from to_copy.
* As we usually expect to refer to only the first job in this set, we could
* consider an alternative implementation:
* - have collect_ripe_jobs() return the copied set without sorting it;
* - loop, performing:
* - use std::min_element() to find a candidate;
* - try that one. If not suitable, discard from 'to_scrub_copy'
*/
Scrub::schedule_result_t ScrubQueue::select_pg_and_scrub(
Scrub::ScrubPreconds& preconds)
{
dout(10) << " reg./pen. sizes: " << to_scrub.size() << " / "
<< penalized.size() << dendl;
utime_t now_is = time_now();
preconds.time_permit = scrub_time_permit(now_is);
preconds.load_is_low = scrub_load_below_threshold();
preconds.only_deadlined = !preconds.time_permit || !preconds.load_is_low;
// create a list of candidates (copying, as otherwise creating a deadlock):
// - possibly restore penalized
// - (if we didn't handle directly) remove invalid jobs
// - create a copy of the to_scrub (possibly up to first not-ripe)
// - same for the penalized (although that usually be a waste)
// unlock, then try the lists
std::unique_lock lck{jobs_lock};
// pardon all penalized jobs that have deadlined (or were updated)
scan_penalized(restore_penalized, now_is);
restore_penalized = false;
// remove the 'updated' flag from all entries
std::for_each(to_scrub.begin(),
to_scrub.end(),
[](const auto& jobref) -> void { jobref->updated = false; });
// add failed scrub attempts to the penalized list
move_failed_pgs(now_is);
// collect all valid & ripe jobs from the two lists. Note that we must copy,
// as when we use the lists we will not be holding jobs_lock (see
// explanation above)
auto to_scrub_copy = collect_ripe_jobs(to_scrub, now_is);
auto penalized_copy = collect_ripe_jobs(penalized, now_is);
lck.unlock();
// try the regular queue first
auto res = select_from_group(to_scrub_copy, preconds, now_is);
// in the sole scenario in which we've gone over all ripe jobs without success
// - we will try the penalized
if (res == Scrub::schedule_result_t::none_ready && !penalized_copy.empty()) {
res = select_from_group(penalized_copy, preconds, now_is);
dout(10) << "tried the penalized. Res: "
<< ScrubQueue::attempt_res_text(res) << dendl;
restore_penalized = true;
}
dout(15) << dendl;
return res;
}
// must be called under lock
void ScrubQueue::rm_unregistered_jobs(ScrubQContainer& group)
{
std::for_each(group.begin(), group.end(), [](auto& job) {
if (job->state == qu_state_t::unregistering) {
job->in_queues = false;
job->state = qu_state_t::not_registered;
} else if (job->state == qu_state_t::not_registered) {
job->in_queues = false;
}
});
group.erase(std::remove_if(group.begin(), group.end(), invalid_state),
group.end());
}
namespace {
struct cmp_sched_time_t {
bool operator()(const ScrubQueue::ScrubJobRef& lhs,
const ScrubQueue::ScrubJobRef& rhs) const
{
return lhs->schedule.scheduled_at < rhs->schedule.scheduled_at;
}
};
} // namespace
// called under lock
ScrubQueue::ScrubQContainer ScrubQueue::collect_ripe_jobs(
ScrubQContainer& group,
utime_t time_now)
{
rm_unregistered_jobs(group);
// copy ripe jobs
ScrubQueue::ScrubQContainer ripes;
ripes.reserve(group.size());
std::copy_if(group.begin(),
group.end(),
std::back_inserter(ripes),
[time_now](const auto& jobref) -> bool {
return jobref->schedule.scheduled_at <= time_now;
});
std::sort(ripes.begin(), ripes.end(), cmp_sched_time_t{});
if (g_conf()->subsys.should_gather<ceph_subsys_osd, 20>()) {
for (const auto& jobref : group) {
if (jobref->schedule.scheduled_at > time_now) {
dout(20) << " not ripe: " << jobref->pgid << " @ "
<< jobref->schedule.scheduled_at << dendl;
}
}
}
return ripes;
}
// not holding jobs_lock. 'group' is a copy of the actual list.
Scrub::schedule_result_t ScrubQueue::select_from_group(
ScrubQContainer& group,
const Scrub::ScrubPreconds& preconds,
utime_t now_is)
{
dout(15) << "jobs #: " << group.size() << dendl;
for (auto& candidate : group) {
// we expect the first job in the list to be a good candidate (if any)
dout(20) << "try initiating scrub for " << candidate->pgid << dendl;
if (preconds.only_deadlined && (candidate->schedule.deadline.is_zero() ||
candidate->schedule.deadline >= now_is)) {
dout(15) << " not scheduling scrub for " << candidate->pgid << " due to "
<< (preconds.time_permit ? "high load" : "time not permitting")
<< dendl;
continue;
}
// we have a candidate to scrub. We turn to the OSD to verify that the PG
// configuration allows the specified type of scrub, and to initiate the
// scrub.
switch (
osd_service.initiate_a_scrub(candidate->pgid,
preconds.allow_requested_repair_only)) {
case Scrub::schedule_result_t::scrub_initiated:
// the happy path. We are done
dout(20) << " initiated for " << candidate->pgid << dendl;
return Scrub::schedule_result_t::scrub_initiated;
case Scrub::schedule_result_t::already_started:
case Scrub::schedule_result_t::preconditions:
case Scrub::schedule_result_t::bad_pg_state:
// continue with the next job
dout(20) << "failed (state/cond/started) " << candidate->pgid << dendl;
break;
case Scrub::schedule_result_t::no_such_pg:
// The pg is no longer there
dout(20) << "failed (no pg) " << candidate->pgid << dendl;
break;
case Scrub::schedule_result_t::no_local_resources:
// failure to secure local resources. No point in trying the other
// PGs at this time. Note that this is not the same as replica resources
// failure!
dout(20) << "failed (local) " << candidate->pgid << dendl;
return Scrub::schedule_result_t::no_local_resources;
case Scrub::schedule_result_t::none_ready:
// can't happen. Just for the compiler.
dout(5) << "failed !!! " << candidate->pgid << dendl;
return Scrub::schedule_result_t::none_ready;
}
}
dout(20) << " returning 'none ready'" << dendl;
return Scrub::schedule_result_t::none_ready;
}
ScrubQueue::scrub_schedule_t ScrubQueue::adjust_target_time(
const sched_params_t& times) const
{
ScrubQueue::scrub_schedule_t sched_n_dead{
times.proposed_time, times.proposed_time};
if (times.is_must == ScrubQueue::must_scrub_t::not_mandatory) {
// unless explicitly requested, postpone the scrub with a random delay
double scrub_min_interval = times.min_interval > 0
? times.min_interval
: conf()->osd_scrub_min_interval;
double scrub_max_interval = times.max_interval > 0
? times.max_interval
: conf()->osd_scrub_max_interval;
sched_n_dead.scheduled_at += scrub_min_interval;
double r = rand() / (double)RAND_MAX;
sched_n_dead.scheduled_at +=
scrub_min_interval * conf()->osd_scrub_interval_randomize_ratio * r;
if (scrub_max_interval <= 0) {
sched_n_dead.deadline = utime_t{};
} else {
sched_n_dead.deadline += scrub_max_interval;
}
// note: no specific job can be named in the log message
dout(20) << fmt::format(
"not-must. Was:{:s} {{min:{}/{} max:{}/{} ratio:{}}} "
"Adjusted:{:s} ({:s})",
times.proposed_time, fmt::group_digits(times.min_interval),
fmt::group_digits(conf()->osd_scrub_min_interval),
fmt::group_digits(times.max_interval),
fmt::group_digits(conf()->osd_scrub_max_interval),
conf()->osd_scrub_interval_randomize_ratio,
sched_n_dead.scheduled_at, sched_n_dead.deadline)
<< dendl;
}
// else - no log needed. All relevant data will be logged by the caller
return sched_n_dead;
}
std::chrono::milliseconds ScrubQueue::scrub_sleep_time(bool must_scrub) const
{
std::chrono::milliseconds regular_sleep_period{
uint64_t(std::max(0.0, conf()->osd_scrub_sleep) * 1000)};
if (must_scrub || scrub_time_permit(time_now())) {
return regular_sleep_period;
}
// relevant if scrubbing started during allowed time, but continued into
// forbidden hours
std::chrono::milliseconds extended_sleep{
uint64_t(std::max(0.0, conf()->osd_scrub_extended_sleep) * 1000)};
dout(20) << "w/ extended sleep (" << extended_sleep << ")" << dendl;
return std::max(extended_sleep, regular_sleep_period);
}
bool ScrubQueue::scrub_load_below_threshold() const
{
double loadavgs[3];
if (getloadavg(loadavgs, 3) != 3) {
dout(10) << __func__ << " couldn't read loadavgs\n" << dendl;
return false;
}
// allow scrub if below configured threshold
long cpus = sysconf(_SC_NPROCESSORS_ONLN);
double loadavg_per_cpu = cpus > 0 ? loadavgs[0] / cpus : loadavgs[0];
if (loadavg_per_cpu < conf()->osd_scrub_load_threshold) {
dout(20) << "loadavg per cpu " << loadavg_per_cpu << " < max "
<< conf()->osd_scrub_load_threshold << " = yes" << dendl;
return true;
}
// allow scrub if below daily avg and currently decreasing
if (loadavgs[0] < daily_loadavg && loadavgs[0] < loadavgs[2]) {
dout(20) << "loadavg " << loadavgs[0] << " < daily_loadavg "
<< daily_loadavg << " and < 15m avg " << loadavgs[2] << " = yes"
<< dendl;
return true;
}
dout(20) << "loadavg " << loadavgs[0] << " >= max "
<< conf()->osd_scrub_load_threshold << " and ( >= daily_loadavg "
<< daily_loadavg << " or >= 15m avg " << loadavgs[2] << ") = no"
<< dendl;
return false;
}
// note: called with jobs_lock held
void ScrubQueue::scan_penalized(bool forgive_all, utime_t time_now)
{
dout(20) << time_now << (forgive_all ? " all " : " - ") << penalized.size()
<< dendl;
// clear dead entries (deleted PGs, or those PGs we are no longer their
// primary)
rm_unregistered_jobs(penalized);
if (forgive_all) {
std::copy(penalized.begin(), penalized.end(), std::back_inserter(to_scrub));
penalized.clear();
} else {
auto forgiven_last = std::partition(
penalized.begin(),
penalized.end(),
[time_now](const auto& e) {
return (*e).updated || ((*e).penalty_timeout <= time_now);
});
std::copy(penalized.begin(), forgiven_last, std::back_inserter(to_scrub));
penalized.erase(penalized.begin(), forgiven_last);
dout(20) << "penalized after screening: " << penalized.size() << dendl;
}
}
// checks for half-closed ranges. Modify the (p<till)to '<=' to check for
// closed.
static inline bool isbetween_modulo(int64_t from, int64_t till, int p)
{
// the 1st condition is because we have defined from==till as "always true"
return (till == from) || ((till >= from) ^ (p >= from) ^ (p < till));
}
bool ScrubQueue::scrub_time_permit(utime_t now) const
{
tm bdt;
time_t tt = now.sec();
localtime_r(&tt, &bdt);
bool day_permit = isbetween_modulo(conf()->osd_scrub_begin_week_day,
conf()->osd_scrub_end_week_day,
bdt.tm_wday);
if (!day_permit) {
dout(20) << "should run between week day "
<< conf()->osd_scrub_begin_week_day << " - "
<< conf()->osd_scrub_end_week_day << " now " << bdt.tm_wday
<< " - no" << dendl;
return false;
}
bool time_permit = isbetween_modulo(conf()->osd_scrub_begin_hour,
conf()->osd_scrub_end_hour,
bdt.tm_hour);
dout(20) << "should run between " << conf()->osd_scrub_begin_hour << " - "
<< conf()->osd_scrub_end_hour << " now (" << bdt.tm_hour
<< ") = " << (time_permit ? "yes" : "no") << dendl;
return time_permit;
}
void ScrubQueue::ScrubJob::dump(ceph::Formatter* f) const
{
f->open_object_section("scrub");
f->dump_stream("pgid") << pgid;
f->dump_stream("sched_time") << schedule.scheduled_at;
f->dump_stream("deadline") << schedule.deadline;
f->dump_bool("forced",
schedule.scheduled_at == PgScrubber::scrub_must_stamp());
f->close_section();
}
void ScrubQueue::dump_scrubs(ceph::Formatter* f) const
{
ceph_assert(f != nullptr);
std::lock_guard lck(jobs_lock);
f->open_array_section("scrubs");
std::for_each(to_scrub.cbegin(), to_scrub.cend(), [&f](const ScrubJobRef& j) {
j->dump(f);
});
std::for_each(penalized.cbegin(),
penalized.cend(),
[&f](const ScrubJobRef& j) { j->dump(f); });
f->close_section();
}
ScrubQueue::ScrubQContainer ScrubQueue::list_registered_jobs() const
{
ScrubQueue::ScrubQContainer all_jobs;
all_jobs.reserve(to_scrub.size() + penalized.size());
dout(20) << " size: " << all_jobs.capacity() << dendl;
std::lock_guard lck{jobs_lock};
std::copy_if(to_scrub.begin(),
to_scrub.end(),
std::back_inserter(all_jobs),
registered_job);
std::copy_if(penalized.begin(),
penalized.end(),
std::back_inserter(all_jobs),
registered_job);
return all_jobs;
}
// ////////////////////////////////////////////////////////////////////////// //
// ScrubQueue - scrub resource management
bool ScrubQueue::can_inc_scrubs() const
{
// consider removing the lock here. Caller already handles delayed
// inc_scrubs_local() failures
std::lock_guard lck{resource_lock};
if (scrubs_local + scrubs_remote < conf()->osd_max_scrubs) {
return true;
}
dout(20) << " == false. " << scrubs_local << " local + " << scrubs_remote
<< " remote >= max " << conf()->osd_max_scrubs << dendl;
return false;
}
bool ScrubQueue::inc_scrubs_local()
{
std::lock_guard lck{resource_lock};
if (scrubs_local + scrubs_remote < conf()->osd_max_scrubs) {
++scrubs_local;
return true;
}
dout(20) << ": " << scrubs_local << " local + " << scrubs_remote
<< " remote >= max " << conf()->osd_max_scrubs << dendl;
return false;
}
void ScrubQueue::dec_scrubs_local()
{
std::lock_guard lck{resource_lock};
dout(20) << ": " << scrubs_local << " -> " << (scrubs_local - 1) << " (max "
<< conf()->osd_max_scrubs << ", remote " << scrubs_remote << ")"
<< dendl;
--scrubs_local;
ceph_assert(scrubs_local >= 0);
}
bool ScrubQueue::inc_scrubs_remote()
{
std::lock_guard lck{resource_lock};
if (scrubs_local + scrubs_remote < conf()->osd_max_scrubs) {
dout(20) << ": " << scrubs_remote << " -> " << (scrubs_remote + 1)
<< " (max " << conf()->osd_max_scrubs << ", local "
<< scrubs_local << ")" << dendl;
++scrubs_remote;
return true;
}
dout(20) << ": " << scrubs_local << " local + " << scrubs_remote
<< " remote >= max " << conf()->osd_max_scrubs << dendl;
return false;
}
void ScrubQueue::dec_scrubs_remote()
{
std::lock_guard lck{resource_lock};
dout(20) << ": " << scrubs_remote << " -> " << (scrubs_remote - 1) << " (max "
<< conf()->osd_max_scrubs << ", local " << scrubs_local << ")"
<< dendl;
--scrubs_remote;
ceph_assert(scrubs_remote >= 0);
}
void ScrubQueue::dump_scrub_reservations(ceph::Formatter* f) const
{
std::lock_guard lck{resource_lock};
f->dump_int("scrubs_local", scrubs_local);
f->dump_int("scrubs_remote", scrubs_remote);
f->dump_int("osd_max_scrubs", conf()->osd_max_scrubs);
}
void ScrubQueue::clear_pg_scrub_blocked(spg_t blocked_pg)
{
dout(5) << fmt::format(": pg {} is unblocked", blocked_pg) << dendl;
--blocked_scrubs_cnt;
ceph_assert(blocked_scrubs_cnt >= 0);
}
void ScrubQueue::mark_pg_scrub_blocked(spg_t blocked_pg)
{
dout(5) << fmt::format(": pg {} is blocked on an object", blocked_pg)
<< dendl;
++blocked_scrubs_cnt;
}
int ScrubQueue::get_blocked_pgs_count() const
{
return blocked_scrubs_cnt;
}
| 25,816 | 30.445798 | 91 | cc |
null | ceph-main/src/osd/scrubber/osd_scrub_sched.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
// clang-format off
/*
┌───────────────────────┐
│ OSD │
│ OSDService ─┼───┐
│ │ │
│ │ │
└───────────────────────┘ │ Ownes & uses the following
│ ScrubQueue interfaces:
│
│
│ - resource management (*1)
│
│ - environment conditions (*2)
│
│ - scrub scheduling (*3)
│
│
│
│
│
│
ScrubQueue │
┌───────────────────────────▼────────────┐
│ │
│ │
│ ScrubQContainer to_scrub <>────────┼────────┐
│ ScrubQContainer penalized │ │
│ │ │
│ │ │
│ OSD_wide resource counters │ │
│ │ │
│ │ │
│ "env scrub conditions" monitoring │ │
│ │ │
│ │ │
│ │ │
│ │ │
└─▲──────────────────────────────────────┘ │
│ │
│ │
│uses interface <4> │
│ │
│ │
│ ┌──────────────────────────────────┘
│ │ shared ownership of jobs
│ │
│ ┌─────▼──────┐
│ │ScrubJob │
│ │ ├┐
│ │ ││
│ │ │┼┐
│ │ │┼│
└──────┤ │┼┤◄──────┐
│ │┼│ │
│ │┼│ │
│ │┼│ │
└┬───────────┼┼│ │shared ownership
└─┼┼┼┼┼┼┼┼┼┼┼┼│ │
└───────────┘ │
│
│
│
│
┌───────────────────────────────┼─┐
│ <>│
│PgScrubber │
│ │
│ │
│ │
│ │
│ │
└─────────────────────────────────┘
ScrubQueue interfaces (main functions):
<1> - OSD/PG resources management:
- can_inc_scrubs()
- {inc/dec}_scrubs_{local/remote}()
- dump_scrub_reservations()
- {set/clear/is}_reserving_now()
<2> - environment conditions:
- update_loadavg()
- scrub_load_below_threshold()
- scrub_time_permit()
<3> - scheduling scrubs:
- select_pg_and_scrub()
- dump_scrubs()
<4> - manipulating a job's state:
- register_with_osd()
- remove_from_osd_queue()
- update_job()
*/
// clang-format on
#include <atomic>
#include <chrono>
#include <memory>
#include <optional>
#include <vector>
#include "common/RefCountedObj.h"
#include "common/ceph_atomic.h"
#include "osd/osd_types.h"
#include "osd/scrubber_common.h"
#include "include/utime_fmt.h"
#include "osd/osd_types_fmt.h"
#include "utime.h"
class PG;
namespace Scrub {
using namespace ::std::literals;
// possible outcome when trying to select a PG and scrub it
enum class schedule_result_t {
scrub_initiated, // successfully started a scrub
none_ready, // no pg to scrub
no_local_resources, // failure to secure local OSD scrub resource
already_started, // failed, as already started scrubbing this pg
no_such_pg, // can't find this pg
bad_pg_state, // pg state (clean, active, etc.)
preconditions // time, configuration, etc.
};
// the OSD services provided to the scrub scheduler
class ScrubSchedListener {
public:
virtual int get_nodeid() const = 0; // returns the OSD number ('whoami')
/**
* A callback used by the ScrubQueue object to initiate a scrub on a specific
* PG.
*
* The request might fail for multiple reasons, as ScrubQueue cannot by its
* own check some of the PG-specific preconditions and those are checked here.
* See attempt_t definition.
*
* @return a Scrub::attempt_t detailing either a success, or the failure
* reason.
*/
virtual schedule_result_t initiate_a_scrub(
spg_t pgid,
bool allow_requested_repair_only) = 0;
virtual ~ScrubSchedListener() {}
};
} // namespace Scrub
/**
* the queue of PGs waiting to be scrubbed.
* Main operations are scheduling/unscheduling a PG to be scrubbed at a certain
* time.
*
* A "penalty" queue maintains those PGs that have failed to reserve the
* resources of their replicas. The PGs in this list will be reinstated into the
* scrub queue when all eligible PGs were already handled, or after a timeout
* (or if their deadline has passed [[disabled at this time]]).
*/
class ScrubQueue {
public:
enum class must_scrub_t { not_mandatory, mandatory };
enum class qu_state_t {
not_registered, // not a primary, thus not considered for scrubbing by this
// OSD (also the temporary state when just created)
registered, // in either of the two queues ('to_scrub' or 'penalized')
unregistering // in the process of being unregistered. Will be finalized
// under lock
};
ScrubQueue(CephContext* cct, Scrub::ScrubSchedListener& osds);
virtual ~ScrubQueue() = default;
struct scrub_schedule_t {
utime_t scheduled_at{};
utime_t deadline{0, 0};
};
struct sched_params_t {
utime_t proposed_time{};
double min_interval{0.0};
double max_interval{0.0};
must_scrub_t is_must{ScrubQueue::must_scrub_t::not_mandatory};
};
struct ScrubJob final : public RefCountedObject {
/**
* a time scheduled for scrub, and a deadline: The scrub could be delayed
* if system load is too high (but not if after the deadline),or if trying
* to scrub out of scrub hours.
*/
scrub_schedule_t schedule;
/// pg to be scrubbed
const spg_t pgid;
/// the OSD id (for the log)
const int whoami;
ceph::atomic<qu_state_t> state{qu_state_t::not_registered};
/**
* the old 'is_registered'. Set whenever the job is registered with the OSD,
* i.e. is in either the 'to_scrub' or the 'penalized' vectors.
*/
std::atomic_bool in_queues{false};
/// last scrub attempt failed to secure replica resources
bool resources_failure{false};
/**
* 'updated' is a temporary flag, used to create a barrier after
* 'sched_time' and 'deadline' (or any other job entry) were modified by
* different task.
* 'updated' also signals the need to move a job back from the penalized
* queue to the regular one.
*/
std::atomic_bool updated{false};
/**
* the scrubber is waiting for locked objects to be unlocked.
* Set after a grace period has passed.
*/
bool blocked{false};
utime_t blocked_since{};
utime_t penalty_timeout{0, 0};
CephContext* cct;
ScrubJob(CephContext* cct, const spg_t& pg, int node_id);
utime_t get_sched_time() const { return schedule.scheduled_at; }
/**
* relatively low-cost(*) access to the scrub job's state, to be used in
* logging.
* (*) not a low-cost access on x64 architecture
*/
std::string_view state_desc() const
{
return ScrubQueue::qu_state_text(state.load(std::memory_order_relaxed));
}
void update_schedule(const ScrubQueue::scrub_schedule_t& adjusted);
void dump(ceph::Formatter* f) const;
/*
* as the atomic 'in_queues' appears in many log prints, accessing it for
* display-only should be made less expensive (on ARM. On x86 the _relaxed
* produces the same code as '_cs')
*/
std::string_view registration_state() const
{
return in_queues.load(std::memory_order_relaxed) ? "in-queue"
: "not-queued";
}
/**
* access the 'state' directly, for when a distinction between 'registered'
* and 'unregistering' is needed (both have in_queues() == true)
*/
bool is_state_registered() const { return state == qu_state_t::registered; }
/**
* a text description of the "scheduling intentions" of this PG:
* are we already scheduled for a scrub/deep scrub? when?
*/
std::string scheduling_state(utime_t now_is, bool is_deep_expected) const;
friend std::ostream& operator<<(std::ostream& out, const ScrubJob& pg);
};
friend class TestOSDScrub;
friend class ScrubSchedTestWrapper; ///< unit-tests structure
using ScrubJobRef = ceph::ref_t<ScrubJob>;
using ScrubQContainer = std::vector<ScrubJobRef>;
static std::string_view qu_state_text(qu_state_t st);
/**
* called periodically by the OSD to select the first scrub-eligible PG
* and scrub it.
*
* Selection is affected by:
* - time of day: scheduled scrubbing might be configured to only happen
* during certain hours;
* - same for days of the week, and for the system load;
*
* @param preconds: what types of scrub are allowed, given system status &
* config. Some of the preconditions are calculated here.
* @return Scrub::attempt_t::scrubbing if a scrub session was successfully
* initiated. Otherwise - the failure cause.
*
* locking: locks jobs_lock
*/
Scrub::schedule_result_t select_pg_and_scrub(Scrub::ScrubPreconds& preconds);
/**
* Translate attempt_ values into readable text
*/
static std::string_view attempt_res_text(Scrub::schedule_result_t v);
/**
* remove the pg from set of PGs to be scanned for scrubbing.
* To be used if we are no longer the PG's primary, or if the PG is removed.
*/
void remove_from_osd_queue(ScrubJobRef sjob);
/**
* @return the list (not std::set!) of all scrub jobs registered
* (apart from PGs in the process of being removed)
*/
ScrubQContainer list_registered_jobs() const;
/**
* Add the scrub job to the list of jobs (i.e. list of PGs) to be periodically
* scrubbed by the OSD.
* The registration is active as long as the PG exists and the OSD is its
* primary.
*
* See update_job() for the handling of the 'suggested' parameter.
*
* locking: might lock jobs_lock
*/
void register_with_osd(ScrubJobRef sjob, const sched_params_t& suggested);
/**
* modify a scrub-job's scheduled time and deadline
*
* There are 3 argument combinations to consider:
* - 'must' is asserted, and the suggested time is 'scrub_must_stamp':
* the registration will be with "beginning of time" target, making the
* scrub-job eligible to immediate scrub (given that external conditions
* do not prevent scrubbing)
*
* - 'must' is asserted, and the suggested time is 'now':
* This happens if our stats are unknown. The results are similar to the
* previous scenario.
*
* - not a 'must': we take the suggested time as a basis, and add to it some
* configuration / random delays.
*
* ('must' is sched_params_t.is_must)
*
* locking: not using the jobs_lock
*/
void update_job(ScrubJobRef sjob, const sched_params_t& suggested);
sched_params_t determine_scrub_time(const requested_scrub_t& request_flags,
const pg_info_t& pg_info,
const pool_opts_t& pool_conf) const;
public:
void dump_scrubs(ceph::Formatter* f) const;
/**
* No new scrub session will start while a scrub was initiated on a PG,
* and that PG is trying to acquire replica resources.
*/
void set_reserving_now() { a_pg_is_reserving = true; }
void clear_reserving_now() { a_pg_is_reserving = false; }
bool is_reserving_now() const { return a_pg_is_reserving; }
bool can_inc_scrubs() const;
bool inc_scrubs_local();
void dec_scrubs_local();
bool inc_scrubs_remote();
void dec_scrubs_remote();
void dump_scrub_reservations(ceph::Formatter* f) const;
/// counting the number of PGs stuck while scrubbing, waiting for objects
void mark_pg_scrub_blocked(spg_t blocked_pg);
void clear_pg_scrub_blocked(spg_t blocked_pg);
int get_blocked_pgs_count() const;
/**
* scrub_sleep_time
*
* Returns std::chrono::milliseconds indicating how long to wait between
* chunks.
*
* Implementation Note: Returned value will either osd_scrub_sleep or
* osd_scrub_extended_sleep depending on must_scrub_param and time
* of day (see configs osd_scrub_begin*)
*/
std::chrono::milliseconds scrub_sleep_time(bool must_scrub) const;
/**
* called every heartbeat to update the "daily" load average
*
* @returns a load value for the logger
*/
[[nodiscard]] std::optional<double> update_load_average();
private:
CephContext* cct;
Scrub::ScrubSchedListener& osd_service;
#ifdef WITH_SEASTAR
auto& conf() const { return local_conf(); }
#else
auto& conf() const { return cct->_conf; }
#endif
/**
* jobs_lock protects the job containers and the relevant scrub-jobs state
* variables. Specifically, the following are guaranteed:
* - 'in_queues' is asserted only if the job is in one of the queues;
* - a job will only be in state 'registered' if in one of the queues;
* - no job will be in the two queues simultaneously;
*
* Note that PG locks should not be acquired while holding jobs_lock.
*/
mutable ceph::mutex jobs_lock = ceph::make_mutex("ScrubQueue::jobs_lock");
ScrubQContainer to_scrub; ///< scrub jobs (i.e. PGs) to scrub
ScrubQContainer penalized; ///< those that failed to reserve remote resources
bool restore_penalized{false};
double daily_loadavg{0.0};
static inline constexpr auto registered_job = [](const auto& jobref) -> bool {
return jobref->state == qu_state_t::registered;
};
static inline constexpr auto invalid_state = [](const auto& jobref) -> bool {
return jobref->state == qu_state_t::not_registered;
};
/**
* Are there scrub jobs that should be reinstated?
*/
void scan_penalized(bool forgive_all, utime_t time_now);
/**
* clear dead entries (unregistered, or belonging to removed PGs) from a
* queue. Job state is changed to match new status.
*/
void rm_unregistered_jobs(ScrubQContainer& group);
/**
* the set of all scrub jobs in 'group' which are ready to be scrubbed
* (ready = their scheduled time has passed).
* The scrub jobs in the new collection are sorted according to
* their scheduled time.
*
* Note that the returned container holds independent refs to the
* scrub jobs.
*/
ScrubQContainer collect_ripe_jobs(ScrubQContainer& group, utime_t time_now);
/// scrub resources management lock (guarding scrubs_local & scrubs_remote)
mutable ceph::mutex resource_lock =
ceph::make_mutex("ScrubQueue::resource_lock");
/// the counters used to manage scrub activity parallelism:
int scrubs_local{0};
int scrubs_remote{0};
/**
* The scrubbing of PGs might be delayed if the scrubbed chunk of objects is
* locked by some other operation. A bug might cause this to be an infinite
* delay. If that happens, the OSDs "scrub resources" (i.e. the
* counters that limit the number of concurrent scrub operations) might
* be exhausted.
* We do issue a cluster-log warning in such occasions, but that message is
* easy to miss. The 'some pg is blocked' global flag is used to note the
* existence of such a situation in the scrub-queue log messages.
*/
std::atomic_int_fast16_t blocked_scrubs_cnt{0};
std::atomic_bool a_pg_is_reserving{false};
[[nodiscard]] bool scrub_load_below_threshold() const;
[[nodiscard]] bool scrub_time_permit(utime_t now) const;
/**
* If the scrub job was not explicitly requested, we postpone it by some
* random length of time.
* And if delaying the scrub - we calculate, based on pool parameters, a
* deadline we should scrub before.
*
* @return a pair of values: the determined scrub time, and the deadline
*/
scrub_schedule_t adjust_target_time(
const sched_params_t& recomputed_params) const;
/**
* Look for scrub jobs that have their 'resources_failure' set. These jobs
* have failed to acquire remote resources last time we've initiated a scrub
* session on them. They are now moved from the 'to_scrub' queue to the
* 'penalized' set.
*
* locking: called with job_lock held
*/
void move_failed_pgs(utime_t now_is);
Scrub::schedule_result_t select_from_group(
ScrubQContainer& group,
const Scrub::ScrubPreconds& preconds,
utime_t now_is);
protected: // used by the unit-tests
/**
* unit-tests will override this function to return a mock time
*/
virtual utime_t time_now() const { return ceph_clock_now(); }
};
template <>
struct fmt::formatter<ScrubQueue::qu_state_t>
: fmt::formatter<std::string_view> {
template <typename FormatContext>
auto format(const ScrubQueue::qu_state_t& s, FormatContext& ctx)
{
auto out = ctx.out();
out = fmt::formatter<string_view>::format(
std::string{ScrubQueue::qu_state_text(s)}, ctx);
return out;
}
};
template <>
struct fmt::formatter<ScrubQueue::ScrubJob> {
constexpr auto parse(format_parse_context& ctx) { return ctx.begin(); }
template <typename FormatContext>
auto format(const ScrubQueue::ScrubJob& sjob, FormatContext& ctx)
{
return fmt::format_to(
ctx.out(),
"pg[{}] @ {:s} (dl:{:s}) - <{}> / failure: {} / pen. t.o.: {:s} / queue "
"state: {:.7}",
sjob.pgid, sjob.schedule.scheduled_at, sjob.schedule.deadline,
sjob.registration_state(), sjob.resources_failure, sjob.penalty_timeout,
sjob.state.load(std::memory_order_relaxed));
}
};
| 18,331 | 31.677362 | 80 | h |
null | ceph-main/src/osd/scrubber/pg_scrubber.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=2 sw=2 smarttab
#include "./pg_scrubber.h" // '.' notation used to affect clang-format order
#include <cmath>
#include <iostream>
#include <vector>
#include <fmt/ranges.h>
#include "debug.h"
#include "common/ceph_time.h"
#include "common/errno.h"
#include "messages/MOSDOp.h"
#include "messages/MOSDRepScrub.h"
#include "messages/MOSDRepScrubMap.h"
#include "messages/MOSDScrubReserve.h"
#include "osd/OSD.h"
#include "osd/PG.h"
#include "include/utime_fmt.h"
#include "osd/osd_types_fmt.h"
#include "ScrubStore.h"
#include "scrub_backend.h"
#include "scrub_machine.h"
using std::list;
using std::pair;
using std::stringstream;
using std::vector;
using namespace Scrub;
using namespace std::chrono;
using namespace std::chrono_literals;
using namespace std::literals;
#define dout_context (m_osds->cct)
#define dout_subsys ceph_subsys_osd
#undef dout_prefix
#define dout_prefix _prefix(_dout, this)
template <class T>
static ostream& _prefix(std::ostream* _dout, T* t)
{
return t->gen_prefix(*_dout);
}
ostream& operator<<(ostream& out, const scrub_flags_t& sf)
{
if (sf.auto_repair)
out << " AUTO_REPAIR";
if (sf.check_repair)
out << " CHECK_REPAIR";
if (sf.deep_scrub_on_error)
out << " DEEP_SCRUB_ON_ERROR";
if (sf.required)
out << " REQ_SCRUB";
return out;
}
ostream& operator<<(ostream& out, const requested_scrub_t& sf)
{
if (sf.must_repair)
out << " MUST_REPAIR";
if (sf.auto_repair)
out << " planned AUTO_REPAIR";
if (sf.check_repair)
out << " planned CHECK_REPAIR";
if (sf.deep_scrub_on_error)
out << " planned DEEP_SCRUB_ON_ERROR";
if (sf.must_deep_scrub)
out << " MUST_DEEP_SCRUB";
if (sf.must_scrub)
out << " MUST_SCRUB";
if (sf.time_for_deep)
out << " TIME_FOR_DEEP";
if (sf.need_auto)
out << " NEED_AUTO";
if (sf.req_scrub)
out << " planned REQ_SCRUB";
return out;
}
/*
* if the incoming message is from a previous interval, it must mean
* PrimaryLogPG::on_change() was called when that interval ended. We can safely
* discard the stale message.
*/
bool PgScrubber::check_interval(epoch_t epoch_to_verify) const
{
return epoch_to_verify >= m_pg->get_same_interval_since();
}
bool PgScrubber::should_drop_message(OpRequestRef &op) const
{
if (check_interval(op->sent_epoch)) {
return false;
} else {
dout(10) << __func__ << ": discarding message " << *(op->get_req())
<< " from prior interval, epoch " << op->sent_epoch
<< ". Current history.same_interval_since: "
<< m_pg->info.history.same_interval_since << dendl;
return true;
}
}
bool PgScrubber::is_message_relevant(epoch_t epoch_to_verify)
{
if (!m_active) {
// not scrubbing. We can assume that the scrub was already terminated, and
// we can silently discard the incoming event.
return false;
}
// is this a message from before we started this scrub?
if (epoch_to_verify < m_epoch_start) {
return false;
}
// has a new interval started?
if (!check_interval(epoch_to_verify)) {
// if this is a new interval, on_change() has already terminated that
// old scrub.
return false;
}
ceph_assert(is_primary());
// were we instructed to abort?
return verify_against_abort(epoch_to_verify);
}
bool PgScrubber::verify_against_abort(epoch_t epoch_to_verify)
{
if (!should_abort()) {
return true;
}
dout(10) << __func__ << " aborting. incoming epoch: " << epoch_to_verify
<< " vs last-aborted: " << m_last_aborted << dendl;
// if we were not aware of the abort before - kill the scrub.
if (epoch_to_verify >= m_last_aborted) {
scrub_clear_state();
m_last_aborted = std::max(epoch_to_verify, m_epoch_start);
}
return false;
}
bool PgScrubber::should_abort() const
{
// note that set_op_parameters() guarantees that we would never have
// must_scrub set (i.e. possibly have started a scrub even though noscrub
// was set), without having 'required' also set.
if (m_flags.required) {
return false; // not stopping 'required' scrubs for configuration changes
}
// note: deep scrubs are allowed even if 'no-scrub' is set (but not
// 'no-deepscrub')
if (m_is_deep) {
if (get_osdmap()->test_flag(CEPH_OSDMAP_NODEEP_SCRUB) ||
m_pg->pool.info.has_flag(pg_pool_t::FLAG_NODEEP_SCRUB)) {
dout(10) << "nodeep_scrub set, aborting" << dendl;
return true;
}
} else if (get_osdmap()->test_flag(CEPH_OSDMAP_NOSCRUB) ||
m_pg->pool.info.has_flag(pg_pool_t::FLAG_NOSCRUB)) {
dout(10) << "noscrub set, aborting" << dendl;
return true;
}
return false;
}
// initiating state-machine events --------------------------------
/*
* a note re the checks performed before sending scrub-initiating messages:
*
* For those ('StartScrub', 'AfterRepairScrub') scrub-initiation messages that
* possibly were in the queue while the PG changed state and became unavailable
* for scrubbing:
*
* The check_interval() catches all major changes to the PG. As for the other
* conditions we may check (and see is_message_relevant() above):
*
* - we are not 'active' yet, so must not check against is_active(), and:
*
* - the 'abort' flags were just verified (when the triggering message was
* queued). As those are only modified in human speeds - they need not be
* queried again.
*
* Some of the considerations above are also relevant to the replica-side
* initiation
* ('StartReplica' & 'StartReplicaNoWait').
*/
void PgScrubber::initiate_regular_scrub(epoch_t epoch_queued)
{
dout(15) << __func__ << " epoch: " << epoch_queued << dendl;
// we may have lost our Primary status while the message languished in the
// queue
if (check_interval(epoch_queued)) {
dout(10) << "scrubber event -->> StartScrub epoch: " << epoch_queued
<< dendl;
reset_epoch(epoch_queued);
m_fsm->process_event(StartScrub{});
dout(10) << "scrubber event --<< StartScrub" << dendl;
} else {
clear_queued_or_active(); // also restarts snap trimming
}
}
void PgScrubber::dec_scrubs_remote()
{
m_osds->get_scrub_services().dec_scrubs_remote();
}
void PgScrubber::advance_token()
{
m_current_token++;
}
void PgScrubber::initiate_scrub_after_repair(epoch_t epoch_queued)
{
dout(15) << __func__ << " epoch: " << epoch_queued << dendl;
// we may have lost our Primary status while the message languished in the
// queue
if (check_interval(epoch_queued)) {
dout(10) << "scrubber event -->> AfterRepairScrub epoch: " << epoch_queued
<< dendl;
reset_epoch(epoch_queued);
m_fsm->process_event(AfterRepairScrub{});
dout(10) << "scrubber event --<< AfterRepairScrub" << dendl;
} else {
clear_queued_or_active(); // also restarts snap trimming
}
}
void PgScrubber::send_scrub_unblock(epoch_t epoch_queued)
{
dout(10) << "scrubber event -->> " << __func__ << " epoch: " << epoch_queued
<< dendl;
if (is_message_relevant(epoch_queued)) {
m_fsm->process_event(Unblocked{});
}
dout(10) << "scrubber event --<< " << __func__ << dendl;
}
void PgScrubber::send_scrub_resched(epoch_t epoch_queued)
{
dout(10) << "scrubber event -->> " << __func__ << " epoch: " << epoch_queued
<< dendl;
if (is_message_relevant(epoch_queued)) {
m_fsm->process_event(InternalSchedScrub{});
}
dout(10) << "scrubber event --<< " << __func__ << dendl;
}
void PgScrubber::send_start_replica(epoch_t epoch_queued,
Scrub::act_token_t token)
{
dout(10) << "scrubber event -->> " << __func__ << " epoch: " << epoch_queued
<< " token: " << token << dendl;
if (is_primary()) {
// shouldn't happen. Ignore
dout(1) << "got a replica scrub request while Primary!" << dendl;
return;
}
if (check_interval(epoch_queued) && is_token_current(token)) {
// save us some time by not waiting for updates if there are none
// to wait for. Affects the transition from NotActive into either
// ReplicaWaitUpdates or ActiveReplica.
if (pending_active_pushes())
m_fsm->process_event(StartReplica{});
else
m_fsm->process_event(StartReplicaNoWait{});
}
dout(10) << "scrubber event --<< " << __func__ << dendl;
}
void PgScrubber::send_sched_replica(epoch_t epoch_queued,
Scrub::act_token_t token)
{
dout(10) << "scrubber event -->> " << __func__ << " epoch: " << epoch_queued
<< " token: " << token << dendl;
if (check_interval(epoch_queued) && is_token_current(token)) {
m_fsm->process_event(SchedReplica{}); // retest for map availability
}
dout(10) << "scrubber event --<< " << __func__ << dendl;
}
void PgScrubber::active_pushes_notification(epoch_t epoch_queued)
{
// note: Primary only
dout(10) << "scrubber event -->> " << __func__ << " epoch: " << epoch_queued
<< dendl;
if (is_message_relevant(epoch_queued)) {
m_fsm->process_event(ActivePushesUpd{});
}
dout(10) << "scrubber event --<< " << __func__ << dendl;
}
void PgScrubber::update_applied_notification(epoch_t epoch_queued)
{
// note: Primary only
dout(10) << "scrubber event -->> " << __func__ << " epoch: " << epoch_queued
<< dendl;
if (is_message_relevant(epoch_queued)) {
m_fsm->process_event(UpdatesApplied{});
}
dout(10) << "scrubber event --<< " << __func__ << dendl;
}
void PgScrubber::digest_update_notification(epoch_t epoch_queued)
{
// note: Primary only
dout(10) << "scrubber event -->> " << __func__ << " epoch: " << epoch_queued
<< dendl;
if (is_message_relevant(epoch_queued)) {
m_fsm->process_event(DigestUpdate{});
}
dout(10) << "scrubber event --<< " << __func__ << dendl;
}
void PgScrubber::send_local_map_done(epoch_t epoch_queued)
{
dout(10) << "scrubber event -->> " << __func__ << " epoch: " << epoch_queued
<< dendl;
if (is_message_relevant(epoch_queued)) {
m_fsm->process_event(Scrub::IntLocalMapDone{});
}
dout(10) << "scrubber event --<< " << __func__ << dendl;
}
void PgScrubber::send_replica_maps_ready(epoch_t epoch_queued)
{
dout(10) << "scrubber event -->> " << __func__ << " epoch: " << epoch_queued
<< dendl;
if (is_message_relevant(epoch_queued)) {
m_fsm->process_event(GotReplicas{});
}
dout(10) << "scrubber event --<< " << __func__ << dendl;
}
void PgScrubber::send_replica_pushes_upd(epoch_t epoch_queued)
{
dout(10) << "scrubber event -->> " << __func__ << " epoch: " << epoch_queued
<< dendl;
if (check_interval(epoch_queued)) {
m_fsm->process_event(ReplicaPushesUpd{});
}
dout(10) << "scrubber event --<< " << __func__ << dendl;
}
void PgScrubber::send_remotes_reserved(epoch_t epoch_queued)
{
dout(10) << "scrubber event -->> " << __func__ << " epoch: " << epoch_queued
<< dendl;
// note: scrub is not active yet
if (check_interval(epoch_queued)) {
m_fsm->process_event(RemotesReserved{});
}
dout(10) << "scrubber event --<< " << __func__ << dendl;
}
void PgScrubber::send_reservation_failure(epoch_t epoch_queued)
{
dout(10) << "scrubber event -->> " << __func__ << " epoch: " << epoch_queued
<< dendl;
if (check_interval(epoch_queued)) { // do not check for 'active'!
m_fsm->process_event(ReservationFailure{});
}
dout(10) << "scrubber event --<< " << __func__ << dendl;
}
void PgScrubber::send_chunk_free(epoch_t epoch_queued)
{
dout(10) << "scrubber event -->> " << __func__ << " epoch: " << epoch_queued
<< dendl;
if (check_interval(epoch_queued)) {
m_fsm->process_event(Scrub::SelectedChunkFree{});
}
dout(10) << "scrubber event --<< " << __func__ << dendl;
}
void PgScrubber::send_chunk_busy(epoch_t epoch_queued)
{
dout(10) << "scrubber event -->> " << __func__ << " epoch: " << epoch_queued
<< dendl;
if (check_interval(epoch_queued)) {
m_fsm->process_event(Scrub::ChunkIsBusy{});
}
dout(10) << "scrubber event --<< " << __func__ << dendl;
}
void PgScrubber::send_get_next_chunk(epoch_t epoch_queued)
{
dout(10) << "scrubber event -->> " << __func__ << " epoch: " << epoch_queued
<< dendl;
if (is_message_relevant(epoch_queued)) {
m_fsm->process_event(Scrub::NextChunk{});
}
dout(10) << "scrubber event --<< " << __func__ << dendl;
}
void PgScrubber::send_scrub_is_finished(epoch_t epoch_queued)
{
dout(10) << "scrubber event -->> " << __func__ << " epoch: " << epoch_queued
<< dendl;
// can't check for "active"
m_fsm->process_event(Scrub::ScrubFinished{});
dout(10) << "scrubber event --<< " << __func__ << dendl;
}
// -----------------
bool PgScrubber::is_reserving() const
{
return m_fsm->is_reserving();
}
void PgScrubber::reset_epoch(epoch_t epoch_queued)
{
dout(10) << __func__ << " state deep? " << state_test(PG_STATE_DEEP_SCRUB)
<< dendl;
m_fsm->assert_not_active();
m_epoch_start = epoch_queued;
ceph_assert(m_is_deep == state_test(PG_STATE_DEEP_SCRUB));
update_op_mode_text();
}
unsigned int PgScrubber::scrub_requeue_priority(
Scrub::scrub_prio_t with_priority) const
{
unsigned int qu_priority = m_flags.priority;
if (with_priority == Scrub::scrub_prio_t::high_priority) {
qu_priority =
std::max(qu_priority,
(unsigned int)m_pg->get_cct()->_conf->osd_client_op_priority);
}
return qu_priority;
}
unsigned int PgScrubber::scrub_requeue_priority(
Scrub::scrub_prio_t with_priority,
unsigned int suggested_priority) const
{
if (with_priority == Scrub::scrub_prio_t::high_priority) {
suggested_priority =
std::max(suggested_priority,
(unsigned int)m_pg->get_cct()->_conf->osd_client_op_priority);
}
return suggested_priority;
}
// ///////////////////////////////////////////////////////////////////// //
// scrub-op registration handling
/* on_new_interval
*
* Responsible for restting any scrub state and releasing any resources.
* Any inflight events will be ignored via check_interval/should_drop_message
* or canceled.
*/
void PgScrubber::on_new_interval()
{
dout(10) << fmt::format(
"{}: current role:{} active?{} q/a:{}", __func__,
(is_primary() ? "Primary" : "Replica/other"),
is_scrub_active(), is_queued_or_active())
<< dendl;
m_fsm->process_event(FullReset{});
// we may be the primary
if (is_queued_or_active()) {
clear_pgscrub_state();
}
rm_from_osd_scrubbing();
}
bool PgScrubber::is_scrub_registered() const
{
return m_scrub_job && m_scrub_job->in_queues;
}
std::string_view PgScrubber::registration_state() const
{
if (m_scrub_job) {
return m_scrub_job->registration_state();
}
return "(no sched job)"sv;
}
void PgScrubber::rm_from_osd_scrubbing()
{
if (m_scrub_job && m_scrub_job->is_state_registered()) {
dout(15) << fmt::format(
"{}: prev. state: {}", __func__, registration_state())
<< dendl;
m_osds->get_scrub_services().remove_from_osd_queue(m_scrub_job);
}
}
void PgScrubber::on_pg_activate(const requested_scrub_t& request_flags)
{
ceph_assert(is_primary());
if (!m_scrub_job) {
// we won't have a chance to see more logs from this function, thus:
dout(2) << fmt::format(
"{}: flags:<{}> {}.Reg-state:{:.7}. No scrub-job", __func__,
request_flags, (is_primary() ? "Primary" : "Replica/other"),
registration_state())
<< dendl;
return;
}
ceph_assert(!is_queued_or_active());
auto pre_state = m_scrub_job->state_desc();
auto pre_reg = registration_state();
auto suggested = m_osds->get_scrub_services().determine_scrub_time(
request_flags, m_pg->info, m_pg->get_pgpool().info.opts);
m_osds->get_scrub_services().register_with_osd(m_scrub_job, suggested);
dout(10) << fmt::format(
"{}: <flags:{}> {} <{:.5}>&<{:.10}> --> <{:.5}>&<{:.14}>",
__func__, request_flags,
(is_primary() ? "Primary" : "Replica/other"), pre_reg,
pre_state, registration_state(), m_scrub_job->state_desc())
<< dendl;
}
/*
* A note re the call to publish_stats_to_osd() below:
* - we are called from either request_rescrubbing() or scrub_requested().
* - in both cases - the schedule was modified, and needs to be published;
* - we are a Primary.
* - in the 1st case - the call is made as part of scrub_finish(), which
* guarantees that the PG is locked and the interval is still the same.
* - in the 2nd case - we know the PG state and we know we are only called
* for a Primary.
*/
void PgScrubber::update_scrub_job(const requested_scrub_t& request_flags)
{
dout(10) << fmt::format("{}: flags:<{}>", __func__, request_flags) << dendl;
// verify that the 'in_q' status matches our "Primariority"
if (m_scrub_job && is_primary() && !m_scrub_job->in_queues) {
dout(1) << __func__ << " !!! primary but not scheduled! " << dendl;
}
if (is_primary() && m_scrub_job) {
ceph_assert(m_pg->is_locked());
auto suggested = m_osds->get_scrub_services().determine_scrub_time(
request_flags, m_pg->info, m_pg->get_pgpool().info.opts);
m_osds->get_scrub_services().update_job(m_scrub_job, suggested);
m_pg->publish_stats_to_osd();
}
dout(15) << __func__ << ": done " << registration_state() << dendl;
}
void PgScrubber::scrub_requested(scrub_level_t scrub_level,
scrub_type_t scrub_type,
requested_scrub_t& req_flags)
{
dout(10) << __func__
<< (scrub_level == scrub_level_t::deep ? " deep " : " shallow ")
<< (scrub_type == scrub_type_t::do_repair ? " repair-scrub "
: " not-repair ")
<< " prev stamp: " << m_scrub_job->get_sched_time()
<< " registered? " << registration_state() << dendl;
req_flags.must_scrub = true;
req_flags.must_deep_scrub = (scrub_level == scrub_level_t::deep) ||
(scrub_type == scrub_type_t::do_repair);
req_flags.must_repair = (scrub_type == scrub_type_t::do_repair);
// User might intervene, so clear this
req_flags.need_auto = false;
req_flags.req_scrub = true;
dout(20) << __func__ << " pg(" << m_pg_id << ") planned:" << req_flags
<< dendl;
update_scrub_job(req_flags);
}
void PgScrubber::request_rescrubbing(requested_scrub_t& request_flags)
{
dout(10) << __func__ << " flags: " << request_flags << dendl;
request_flags.need_auto = true;
update_scrub_job(request_flags);
}
bool PgScrubber::reserve_local()
{
// try to create the reservation object (which translates into asking the
// OSD for the local scrub resource). If failing - undo it immediately
m_local_osd_resource.emplace(m_osds);
if (m_local_osd_resource->is_reserved()) {
dout(15) << __func__ << ": local resources reserved" << dendl;
return true;
}
dout(10) << __func__ << ": failed to reserve local scrub resources" << dendl;
m_local_osd_resource.reset();
return false;
}
// ----------------------------------------------------------------------------
bool PgScrubber::has_pg_marked_new_updates() const
{
auto last_applied = m_pg->recovery_state.get_last_update_applied();
dout(10) << __func__ << " recovery last: " << last_applied
<< " vs. scrub's: " << m_subset_last_update << dendl;
return last_applied >= m_subset_last_update;
}
void PgScrubber::set_subset_last_update(eversion_t e)
{
m_subset_last_update = e;
dout(15) << __func__ << " last-update: " << e << dendl;
}
void PgScrubber::on_applied_when_primary(const eversion_t& applied_version)
{
// we are only interested in updates if we are the Primary, and in state
// WaitLastUpdate
if (m_fsm->is_accepting_updates() &&
(applied_version >= m_subset_last_update)) {
m_osds->queue_scrub_applied_update(m_pg, m_pg->is_scrub_blocking_ops());
dout(15) << __func__ << " update: " << applied_version
<< " vs. required: " << m_subset_last_update << dendl;
}
}
namespace {
/**
* an aux function to be used in select_range() below, to
* select the correct chunk size based on the type of scrub
*/
int size_from_conf(
bool is_deep,
const ceph::common::ConfigProxy& conf,
std::string_view deep_opt,
std::string_view shallow_opt)
{
if (!is_deep) {
auto sz = conf.get_val<int64_t>(shallow_opt);
if (sz != 0) {
// assuming '0' means that no distinction was yet configured between
// deep and shallow scrubbing
return static_cast<int>(sz);
}
}
return static_cast<int>(conf.get_val<int64_t>(deep_opt));
}
} // anonymous namespace
PgScrubber::scrubber_callback_cancel_token_t
PgScrubber::schedule_callback_after(
ceph::timespan duration, scrubber_callback_t &&cb)
{
std::lock_guard l(m_osds->sleep_lock);
return m_osds->sleep_timer.add_event_after(
duration,
new LambdaContext(
[this, pg=PGRef(m_pg), cb=std::move(cb), epoch=get_osdmap_epoch()] {
pg->lock();
if (check_interval(epoch)) {
cb();
}
pg->unlock();
}));
}
void PgScrubber::cancel_callback(scrubber_callback_cancel_token_t token)
{
std::lock_guard l(m_osds->sleep_lock);
m_osds->sleep_timer.cancel_event(token);
}
LogChannelRef &PgScrubber::get_clog() const
{
return m_osds->clog;
}
int PgScrubber::get_whoami() const
{
return m_osds->whoami;
}
/*
* The selected range is set directly into 'm_start' and 'm_end'
* setting:
* - m_subset_last_update
* - m_max_end
* - end
* - start
*/
bool PgScrubber::select_range()
{
m_be->new_chunk();
/* get the start and end of our scrub chunk
*
* Our scrub chunk has an important restriction we're going to need to
* respect. We can't let head be start or end.
* Using a half-open interval means that if end == head,
* we'd scrub/lock head and the clone right next to head in different
* chunks which would allow us to miss clones created between
* scrubbing that chunk and scrubbing the chunk including head.
* This isn't true for any of the other clones since clones can
* only be created "just to the left of" head. There is one exception
* to this: promotion of clones which always happens to the left of the
* left-most clone, but promote_object checks the scrubber in that
* case, so it should be ok. Also, it's ok to "miss" clones at the
* left end of the range if we are a tier because they may legitimately
* not exist (see _scrub).
*/
const auto& conf = m_pg->get_cct()->_conf;
dout(20) << fmt::format(
"{} {} mins: {}d {}s, max: {}d {}s", __func__,
(m_is_deep ? "D" : "S"),
conf.get_val<int64_t>("osd_scrub_chunk_min"),
conf.get_val<int64_t>("osd_shallow_scrub_chunk_min"),
conf.get_val<int64_t>("osd_scrub_chunk_max"),
conf.get_val<int64_t>("osd_shallow_scrub_chunk_max"))
<< dendl;
const int min_from_conf = size_from_conf(
m_is_deep, conf, "osd_scrub_chunk_min", "osd_shallow_scrub_chunk_min");
const int max_from_conf = size_from_conf(
m_is_deep, conf, "osd_scrub_chunk_max", "osd_shallow_scrub_chunk_max");
const int divisor = static_cast<int>(preemption_data.chunk_divisor());
const int min_chunk_sz = std::max(3, min_from_conf / divisor);
const int max_chunk_sz = std::max(min_chunk_sz, max_from_conf / divisor);
dout(10) << fmt::format(
"{}: Min: {} Max: {} Div: {}", __func__, min_chunk_sz,
max_chunk_sz, divisor)
<< dendl;
hobject_t start = m_start;
hobject_t candidate_end;
std::vector<hobject_t> objects;
int ret = m_pg->get_pgbackend()->objects_list_partial(
start, min_chunk_sz, max_chunk_sz, &objects, &candidate_end);
ceph_assert(ret >= 0);
if (!objects.empty()) {
hobject_t back = objects.back();
while (candidate_end.is_head() && candidate_end == back.get_head()) {
candidate_end = back;
objects.pop_back();
if (objects.empty()) {
ceph_assert(0 ==
"Somehow we got more than 2 objects which"
"have the same head but are not clones");
}
back = objects.back();
}
if (candidate_end.is_head()) {
ceph_assert(candidate_end != back.get_head());
candidate_end = candidate_end.get_object_boundary();
}
} else {
ceph_assert(candidate_end.is_max());
}
// is that range free for us? if not - we will be rescheduled later by whoever
// triggered us this time
if (!m_pg->_range_available_for_scrub(m_start, candidate_end)) {
// we'll be requeued by whatever made us unavailable for scrub
dout(10) << __func__ << ": scrub blocked somewhere in range "
<< "[" << m_start << ", " << candidate_end << ")" << dendl;
return false;
}
m_end = candidate_end;
if (m_end > m_max_end)
m_max_end = m_end;
dout(15) << __func__ << " range selected: " << m_start << " //// " << m_end
<< " //// " << m_max_end << dendl;
// debug: be 'blocked' if told so by the 'pg scrub_debug block' asok command
if (m_debug_blockrange > 0) {
m_debug_blockrange--;
return false;
}
return true;
}
void PgScrubber::select_range_n_notify()
{
if (select_range()) {
// the next chunk to handle is not blocked
dout(20) << __func__ << ": selection OK" << dendl;
m_osds->queue_scrub_chunk_free(m_pg, Scrub::scrub_prio_t::low_priority);
} else {
// we will wait for the objects range to become available for scrubbing
dout(10) << __func__ << ": selected chunk is busy" << dendl;
m_osds->queue_scrub_chunk_busy(m_pg, Scrub::scrub_prio_t::low_priority);
}
}
bool PgScrubber::write_blocked_by_scrub(const hobject_t& soid)
{
if (soid < m_start || soid >= m_end) {
return false;
}
dout(20) << __func__ << " " << soid << " can preempt? "
<< preemption_data.is_preemptable() << " already preempted? "
<< preemption_data.was_preempted() << dendl;
if (preemption_data.was_preempted()) {
// otherwise - write requests arriving while 'already preempted' is set
// but 'preemptable' is not - will not be allowed to continue, and will
// not be requeued on time.
return false;
}
if (preemption_data.is_preemptable()) {
dout(10) << __func__ << " " << soid << " preempted" << dendl;
// signal the preemption
preemption_data.do_preempt();
m_end = m_start; // free the range we were scrubbing
return false;
}
return true;
}
bool PgScrubber::range_intersects_scrub(const hobject_t& start,
const hobject_t& end)
{
// does [start, end] intersect [scrubber.start, scrubber.m_max_end)
return (start < m_max_end && end >= m_start);
}
eversion_t PgScrubber::search_log_for_updates() const
{
auto& projected = m_pg->projected_log.log;
auto pi = find_if(projected.crbegin(),
projected.crend(),
[this](const auto& e) -> bool {
return e.soid >= m_start && e.soid < m_end;
});
if (pi != projected.crend())
return pi->version;
// there was no relevant update entry in the log
auto& log = m_pg->recovery_state.get_pg_log().get_log().log;
auto p = find_if(log.crbegin(), log.crend(), [this](const auto& e) -> bool {
return e.soid >= m_start && e.soid < m_end;
});
if (p == log.crend())
return eversion_t{};
else
return p->version;
}
void PgScrubber::get_replicas_maps(bool replica_can_preempt)
{
dout(10) << __func__ << " started in epoch/interval: " << m_epoch_start << "/"
<< m_interval_start << " pg same_interval_since: "
<< m_pg->info.history.same_interval_since << dendl;
m_primary_scrubmap_pos.reset();
// ask replicas to scan and send maps
for (const auto& i : m_pg->get_actingset()) {
if (i == m_pg_whoami)
continue;
m_maps_status.mark_replica_map_request(i);
_request_scrub_map(i,
m_subset_last_update,
m_start,
m_end,
m_is_deep,
replica_can_preempt);
}
dout(10) << __func__ << " awaiting" << m_maps_status << dendl;
}
bool PgScrubber::was_epoch_changed() const
{
// for crimson we have m_pg->get_info().history.same_interval_since
dout(10) << __func__ << " epoch_start: " << m_interval_start
<< " from pg: " << m_pg->get_history().same_interval_since << dendl;
return m_interval_start < m_pg->get_history().same_interval_since;
}
void PgScrubber::mark_local_map_ready()
{
m_maps_status.mark_local_map_ready();
}
bool PgScrubber::are_all_maps_available() const
{
return m_maps_status.are_all_maps_available();
}
std::string PgScrubber::dump_awaited_maps() const
{
return m_maps_status.dump();
}
void PgScrubber::update_op_mode_text()
{
auto visible_repair = state_test(PG_STATE_REPAIR);
m_mode_desc =
(visible_repair ? "repair" : (m_is_deep ? "deep-scrub" : "scrub"));
dout(10) << __func__
<< ": repair: visible: " << (visible_repair ? "true" : "false")
<< ", internal: " << (m_is_repair ? "true" : "false")
<< ". Displayed: " << m_mode_desc << dendl;
}
void PgScrubber::_request_scrub_map(pg_shard_t replica,
eversion_t version,
hobject_t start,
hobject_t end,
bool deep,
bool allow_preemption)
{
ceph_assert(replica != m_pg_whoami);
dout(10) << __func__ << " scrubmap from osd." << replica
<< (deep ? " deep" : " shallow") << dendl;
auto repscrubop = new MOSDRepScrub(spg_t(m_pg->info.pgid.pgid, replica.shard),
version,
get_osdmap_epoch(),
m_pg->get_last_peering_reset(),
start,
end,
deep,
allow_preemption,
m_flags.priority,
m_pg->ops_blocked_by_scrub());
// default priority. We want the replica-scrub processed prior to any recovery
// or client io messages (we are holding a lock!)
m_osds->send_message_osd_cluster(replica.osd, repscrubop, get_osdmap_epoch());
}
void PgScrubber::cleanup_store(ObjectStore::Transaction* t)
{
if (!m_store)
return;
struct OnComplete : Context {
std::unique_ptr<Scrub::Store> store;
explicit OnComplete(std::unique_ptr<Scrub::Store>&& store)
: store(std::move(store))
{}
void finish(int) override {}
};
m_store->cleanup(t);
t->register_on_complete(new OnComplete(std::move(m_store)));
ceph_assert(!m_store);
}
void PgScrubber::on_init()
{
// going upwards from 'inactive'
ceph_assert(!is_scrub_active());
m_pg->reset_objects_scrubbed();
preemption_data.reset();
m_interval_start = m_pg->get_history().same_interval_since;
dout(10) << __func__ << " start same_interval:" << m_interval_start << dendl;
m_be = std::make_unique<ScrubBackend>(
*this,
*m_pg,
m_pg_whoami,
m_is_repair,
m_is_deep ? scrub_level_t::deep : scrub_level_t::shallow,
m_pg->get_actingset());
// create a new store
{
ObjectStore::Transaction t;
cleanup_store(&t);
m_store.reset(
Scrub::Store::create(m_pg->osd->store, &t, m_pg->info.pgid, m_pg->coll));
m_pg->osd->store->queue_transaction(m_pg->ch, std::move(t), nullptr);
}
m_start = m_pg->info.pgid.pgid.get_hobj_start();
m_active = true;
++m_sessions_counter;
// publish the session counter and the fact the we are scrubbing.
m_pg->publish_stats_to_osd();
}
/*
* Note: as on_replica_init() is likely to be called twice (entering
* both ReplicaWaitUpdates & ActiveReplica), its operations should be
* idempotent.
* Now that it includes some state-changing operations, we need to check
* m_active against double-activation.
*/
void PgScrubber::on_replica_init()
{
dout(10) << __func__ << " called with 'active' "
<< (m_active ? "set" : "cleared") << dendl;
if (!m_active) {
m_be = std::make_unique<ScrubBackend>(
*this, *m_pg, m_pg_whoami, m_is_repair,
m_is_deep ? scrub_level_t::deep : scrub_level_t::shallow);
m_active = true;
++m_sessions_counter;
}
}
int PgScrubber::build_primary_map_chunk()
{
epoch_t map_building_since = m_pg->get_osdmap_epoch();
dout(20) << __func__ << ": initiated at epoch " << map_building_since
<< dendl;
auto ret = build_scrub_map_chunk(m_be->get_primary_scrubmap(),
m_primary_scrubmap_pos,
m_start,
m_end,
m_is_deep);
if (ret == -EINPROGRESS) {
// reschedule another round of asking the backend to collect the scrub data
m_osds->queue_for_scrub_resched(m_pg, Scrub::scrub_prio_t::low_priority);
}
return ret;
}
int PgScrubber::build_replica_map_chunk()
{
dout(10) << __func__ << " interval start: " << m_interval_start
<< " current token: " << m_current_token
<< " epoch: " << m_epoch_start << " deep: " << m_is_deep << dendl;
ceph_assert(m_be);
auto ret = build_scrub_map_chunk(replica_scrubmap,
replica_scrubmap_pos,
m_start,
m_end,
m_is_deep);
switch (ret) {
case -EINPROGRESS:
// must wait for the backend to finish. No external event source.
// (note: previous version used low priority here. Now switched to using
// the priority of the original message)
m_osds->queue_for_rep_scrub_resched(m_pg,
m_replica_request_priority,
m_flags.priority,
m_current_token);
break;
case 0: {
// finished!
auto required_fixes = m_be->replica_clean_meta(replica_scrubmap,
m_end.is_max(),
m_start,
get_snap_mapper_accessor());
// actuate snap-mapper changes:
apply_snap_mapper_fixes(required_fixes);
// the local map has been created. Send it to the primary.
// Note: once the message reaches the Primary, it may ask us for another
// chunk - and we better be done with the current scrub. Thus - the
// preparation of the reply message is separate, and we clear the scrub
// state before actually sending it.
auto reply = prep_replica_map_msg(PreemptionNoted::no_preemption);
replica_handling_done();
dout(15) << __func__ << " chunk map sent " << dendl;
send_replica_map(reply);
} break;
default:
// negative retval: build_scrub_map_chunk() signalled an error
// Pre-Pacific code ignored this option, treating it as a success.
// \todo Add an error flag in the returning message.
dout(1) << "Error! Aborting. ActiveReplica::react(SchedReplica) Ret: "
<< ret << dendl;
replica_handling_done();
// only in debug mode for now:
assert(false && "backend error");
break;
};
return ret;
}
int PgScrubber::build_scrub_map_chunk(ScrubMap& map,
ScrubMapBuilder& pos,
hobject_t start,
hobject_t end,
bool deep)
{
dout(10) << __func__ << " [" << start << "," << end << ") "
<< " pos " << pos << " Deep: " << deep << dendl;
// start
while (pos.empty()) {
pos.deep = deep;
map.valid_through = m_pg->info.last_update;
// objects
vector<ghobject_t> rollback_obs;
pos.ret = m_pg->get_pgbackend()->objects_list_range(start,
end,
&pos.ls,
&rollback_obs);
dout(10) << __func__ << " while pos empty " << pos.ret << dendl;
if (pos.ret < 0) {
dout(5) << "objects_list_range error: " << pos.ret << dendl;
return pos.ret;
}
dout(10) << __func__ << " pos.ls.empty()? " << (pos.ls.empty() ? "+" : "-")
<< dendl;
if (pos.ls.empty()) {
break;
}
m_pg->_scan_rollback_obs(rollback_obs);
pos.pos = 0;
return -EINPROGRESS;
}
// scan objects
while (!pos.done()) {
int r = m_pg->get_pgbackend()->be_scan_list(map, pos);
dout(30) << __func__ << " BE returned " << r << dendl;
if (r == -EINPROGRESS) {
dout(20) << __func__ << " in progress" << dendl;
return r;
}
}
// finish
dout(20) << __func__ << " finishing" << dendl;
ceph_assert(pos.done());
repair_oinfo_oid(map);
dout(20) << __func__ << " done, got " << map.objects.size() << " items"
<< dendl;
return 0;
}
/// \todo consider moving repair_oinfo_oid() back to the backend
void PgScrubber::repair_oinfo_oid(ScrubMap& smap)
{
for (auto i = smap.objects.rbegin(); i != smap.objects.rend(); ++i) {
const hobject_t& hoid = i->first;
ScrubMap::object& o = i->second;
if (o.attrs.find(OI_ATTR) == o.attrs.end()) {
continue;
}
bufferlist bl;
bl.push_back(o.attrs[OI_ATTR]);
object_info_t oi;
try {
oi.decode(bl);
} catch (...) {
continue;
}
if (oi.soid != hoid) {
ObjectStore::Transaction t;
OSDriver::OSTransaction _t(m_pg->osdriver.get_transaction(&t));
m_osds->clog->error()
<< "osd." << m_pg_whoami << " found object info error on pg " << m_pg_id
<< " oid " << hoid << " oid in object info: " << oi.soid
<< "...repaired";
// Fix object info
oi.soid = hoid;
bl.clear();
encode(oi,
bl,
m_pg->get_osdmap()->get_features(CEPH_ENTITY_TYPE_OSD, nullptr));
bufferptr bp(bl.c_str(), bl.length());
o.attrs[OI_ATTR] = bp;
t.setattr(m_pg->coll, ghobject_t(hoid), OI_ATTR, bl);
int r = m_pg->osd->store->queue_transaction(m_pg->ch, std::move(t));
if (r != 0) {
derr << __func__ << ": queue_transaction got " << cpp_strerror(r)
<< dendl;
}
}
}
}
void PgScrubber::run_callbacks()
{
std::list<Context*> to_run;
to_run.swap(m_callbacks);
for (auto& tr : to_run) {
tr->complete(0);
}
}
void PgScrubber::persist_scrub_results(inconsistent_objs_t&& all_errors)
{
dout(10) << __func__ << " " << all_errors.size() << " errors" << dendl;
for (auto& e : all_errors) {
std::visit([this](auto& e) { m_store->add_error(m_pg->pool.id, e); }, e);
}
ObjectStore::Transaction t;
m_store->flush(&t);
m_osds->store->queue_transaction(m_pg->ch, std::move(t), nullptr);
}
void PgScrubber::apply_snap_mapper_fixes(
const std::vector<snap_mapper_fix_t>& fix_list)
{
dout(15) << __func__ << " " << fix_list.size() << " fixes" << dendl;
if (fix_list.empty()) {
return;
}
ObjectStore::Transaction t;
OSDriver::OSTransaction t_drv(m_pg->osdriver.get_transaction(&t));
for (auto& [fix_op, hoid, snaps, bogus_snaps] : fix_list) {
if (fix_op != snap_mapper_op_t::add) {
// must remove the existing snap-set before inserting the correct one
if (auto r = m_pg->snap_mapper.remove_oid(hoid, &t_drv); r < 0) {
derr << __func__ << ": remove_oid returned " << cpp_strerror(r)
<< dendl;
if (fix_op == snap_mapper_op_t::update) {
// for inconsistent snapmapper objects (i.e. for
// snap_mapper_op_t::inconsistent), we don't fret if we can't remove
// the old entries
ceph_abort();
}
}
m_osds->clog->error() << fmt::format(
"osd.{} found snap mapper error on pg {} oid {} snaps in mapper: {}, "
"oi: "
"{} ...repaired",
m_pg_whoami,
m_pg_id,
hoid,
bogus_snaps,
snaps);
} else {
m_osds->clog->error() << fmt::format(
"osd.{} found snap mapper error on pg {} oid {} snaps missing in "
"mapper, should be: {} ...repaired",
m_pg_whoami,
m_pg_id,
hoid,
snaps);
}
// now - insert the correct snap-set
m_pg->snap_mapper.add_oid(hoid, snaps, &t_drv);
}
// wait for repair to apply to avoid confusing other bits of the system.
{
dout(15) << __func__ << " wait on repair!" << dendl;
ceph::condition_variable my_cond;
ceph::mutex my_lock = ceph::make_mutex("PG::_scan_snaps my_lock");
int e = 0;
bool done{false};
t.register_on_applied_sync(new C_SafeCond(my_lock, my_cond, &done, &e));
if (e = m_pg->osd->store->queue_transaction(m_pg->ch, std::move(t));
e != 0) {
derr << __func__ << ": queue_transaction got " << cpp_strerror(e)
<< dendl;
} else {
std::unique_lock l{my_lock};
my_cond.wait(l, [&done] { return done; });
ceph_assert(m_pg->osd->store); // RRR why?
}
dout(15) << __func__ << " wait on repair - done" << dendl;
}
}
void PgScrubber::maps_compare_n_cleanup()
{
m_pg->add_objects_scrubbed_count(m_be->get_primary_scrubmap().objects.size());
auto required_fixes =
m_be->scrub_compare_maps(m_end.is_max(), get_snap_mapper_accessor());
if (!required_fixes.inconsistent_objs.empty()) {
if (state_test(PG_STATE_REPAIR)) {
dout(10) << __func__ << ": discarding scrub results (repairing)" << dendl;
} else {
// perform the ordered scrub-store I/O:
persist_scrub_results(std::move(required_fixes.inconsistent_objs));
}
}
// actuate snap-mapper changes:
apply_snap_mapper_fixes(required_fixes.snap_fix_list);
auto chunk_err_counts = m_be->get_error_counts();
m_shallow_errors += chunk_err_counts.shallow_errors;
m_deep_errors += chunk_err_counts.deep_errors;
m_start = m_end;
run_callbacks();
// requeue the writes from the chunk that just finished
requeue_waiting();
}
Scrub::preemption_t& PgScrubber::get_preemptor()
{
return preemption_data;
}
/*
* Process note: called for the arriving "give me your map, replica!" request.
* Unlike the original implementation, we do not requeue the Op waiting for
* updates. Instead - we trigger the FSM.
*/
void PgScrubber::replica_scrub_op(OpRequestRef op)
{
op->mark_started();
auto msg = op->get_req<MOSDRepScrub>();
dout(10) << __func__ << " pg:" << m_pg->pg_id
<< " Msg: map_epoch:" << msg->map_epoch
<< " min_epoch:" << msg->min_epoch << " deep?" << msg->deep << dendl;
if (should_drop_message(op)) {
return;
}
replica_scrubmap = ScrubMap{};
replica_scrubmap_pos = ScrubMapBuilder{};
m_replica_min_epoch = msg->min_epoch;
m_start = msg->start;
m_end = msg->end;
m_max_end = msg->end;
m_is_deep = msg->deep;
m_interval_start = m_pg->info.history.same_interval_since;
m_replica_request_priority = msg->high_priority
? Scrub::scrub_prio_t::high_priority
: Scrub::scrub_prio_t::low_priority;
m_flags.priority = msg->priority ? msg->priority : m_pg->get_scrub_priority();
preemption_data.reset();
preemption_data.force_preemptability(msg->allow_preemption);
replica_scrubmap_pos.reset(); // needed? RRR
set_queued_or_active();
m_osds->queue_for_rep_scrub(m_pg,
m_replica_request_priority,
m_flags.priority,
m_current_token);
}
void PgScrubber::set_op_parameters(const requested_scrub_t& request)
{
dout(10) << fmt::format("{}: @ input: {}", __func__, request) << dendl;
set_queued_or_active(); // we are fully committed now.
// write down the epoch of starting a new scrub. Will be used
// to discard stale messages from previous aborted scrubs.
m_epoch_start = m_pg->get_osdmap_epoch();
m_flags.check_repair = request.check_repair;
m_flags.auto_repair = request.auto_repair || request.need_auto;
m_flags.required = request.req_scrub || request.must_scrub;
m_flags.priority = (request.must_scrub || request.need_auto)
? get_pg_cct()->_conf->osd_requested_scrub_priority
: m_pg->get_scrub_priority();
state_set(PG_STATE_SCRUBBING);
// will we be deep-scrubbing?
if (request.calculated_to_deep) {
state_set(PG_STATE_DEEP_SCRUB);
m_is_deep = true;
} else {
m_is_deep = false;
// make sure we got the 'calculated_to_deep' flag right
ceph_assert(!request.must_deep_scrub);
ceph_assert(!request.need_auto);
}
// m_is_repair is set for either 'must_repair' or 'repair-on-the-go' (i.e.
// deep-scrub with the auto_repair configuration flag set). m_is_repair value
// determines the scrubber behavior.
//
// PG_STATE_REPAIR, on the other hand, is only used for status reports (inc.
// the PG status as appearing in the logs).
m_is_repair = request.must_repair || m_flags.auto_repair;
if (request.must_repair) {
state_set(PG_STATE_REPAIR);
update_op_mode_text();
}
// The publishing here is required for tests synchronization.
// The PG state flags were modified.
m_pg->publish_stats_to_osd();
m_flags.deep_scrub_on_error = request.deep_scrub_on_error;
}
ScrubMachineListener::MsgAndEpoch PgScrubber::prep_replica_map_msg(
PreemptionNoted was_preempted)
{
dout(10) << __func__ << " min epoch:" << m_replica_min_epoch << dendl;
auto reply = make_message<MOSDRepScrubMap>(
spg_t(m_pg->info.pgid.pgid, m_pg->get_primary().shard),
m_replica_min_epoch,
m_pg_whoami);
reply->preempted = (was_preempted == PreemptionNoted::preempted);
::encode(replica_scrubmap, reply->get_data());
return ScrubMachineListener::MsgAndEpoch{reply, m_replica_min_epoch};
}
void PgScrubber::send_replica_map(const MsgAndEpoch& preprepared)
{
m_pg->send_cluster_message(m_pg->get_primary().osd,
preprepared.m_msg,
preprepared.m_epoch,
false);
}
void PgScrubber::send_preempted_replica()
{
auto reply = make_message<MOSDRepScrubMap>(
spg_t{m_pg->info.pgid.pgid, m_pg->get_primary().shard},
m_replica_min_epoch,
m_pg_whoami);
reply->preempted = true;
::encode(replica_scrubmap,
reply->get_data()); // skipping this crashes the scrubber
m_pg->send_cluster_message(m_pg->get_primary().osd,
reply,
m_replica_min_epoch,
false);
}
/*
* - if the replica lets us know it was interrupted, we mark the chunk as
* interrupted. The state-machine will react to that when all replica maps are
* received.
* - when all maps are received, we signal the FSM with the GotReplicas event
* (see scrub_send_replmaps_ready()). Note that due to the no-reentrancy
* limitations of the FSM, we do not 'process' the event directly. Instead - it
* is queued for the OSD to handle.
*/
void PgScrubber::map_from_replica(OpRequestRef op)
{
auto m = op->get_req<MOSDRepScrubMap>();
dout(15) << __func__ << " " << *m << dendl;
if (should_drop_message(op)) {
return;
}
// note: we check for active() before map_from_replica() is called. Thus, we
// know m_be is initialized
m_be->decode_received_map(m->from, *m);
auto [is_ok, err_txt] = m_maps_status.mark_arriving_map(m->from);
if (!is_ok) {
// previously an unexpected map was triggering an assert. Now, as scrubs can
// be aborted at any time, the chances of this happening have increased, and
// aborting is not justified
dout(1) << __func__ << err_txt << " from OSD " << m->from << dendl;
return;
}
if (m->preempted) {
dout(10) << __func__ << " replica was preempted, setting flag" << dendl;
preemption_data.do_preempt();
}
if (m_maps_status.are_all_maps_available()) {
dout(15) << __func__ << " all repl-maps available" << dendl;
m_osds->queue_scrub_got_repl_maps(m_pg, m_pg->is_scrub_blocking_ops());
}
}
void PgScrubber::handle_scrub_reserve_request(OpRequestRef op)
{
dout(10) << __func__ << " " << *op->get_req() << dendl;
op->mark_started();
auto request_ep = op->sent_epoch;
dout(20) << fmt::format("{}: request_ep:{} recovery:{}",
__func__,
request_ep,
m_osds->is_recovery_active())
<< dendl;
if (should_drop_message(op)) {
return;
}
/* The primary may unilaterally restart the scrub process without notifying
* replicas. Unconditionally clear any existing state prior to handling
* the new reservation. */
m_fsm->process_event(FullReset{});
bool granted{false};
if (m_pg->cct->_conf->osd_scrub_during_recovery ||
!m_osds->is_recovery_active()) {
granted = m_osds->get_scrub_services().inc_scrubs_remote();
if (granted) {
m_fsm->process_event(ReplicaGrantReservation{});
} else {
dout(20) << __func__ << ": failed to reserve remotely" << dendl;
}
} else {
dout(10) << __func__ << ": recovery is active; not granting" << dendl;
}
dout(10) << __func__ << " reserved? " << (granted ? "yes" : "no") << dendl;
Message* reply = new MOSDScrubReserve(
spg_t(m_pg->info.pgid.pgid, m_pg->get_primary().shard),
request_ep,
granted ? MOSDScrubReserve::GRANT : MOSDScrubReserve::REJECT,
m_pg_whoami);
m_osds->send_message_osd_cluster(reply, op->get_req()->get_connection());
}
void PgScrubber::handle_scrub_reserve_grant(OpRequestRef op, pg_shard_t from)
{
dout(10) << __func__ << " " << *op->get_req() << dendl;
op->mark_started();
if (m_reservations.has_value()) {
m_reservations->handle_reserve_grant(op, from);
} else {
dout(20) << __func__ << ": late/unsolicited reservation grant from osd "
<< from << " (" << op << ")" << dendl;
}
}
void PgScrubber::handle_scrub_reserve_reject(OpRequestRef op, pg_shard_t from)
{
dout(10) << __func__ << " " << *op->get_req() << dendl;
op->mark_started();
if (m_reservations.has_value()) {
// there is an active reservation process. No action is required otherwise.
m_reservations->handle_reserve_reject(op, from);
}
}
void PgScrubber::handle_scrub_reserve_release(OpRequestRef op)
{
dout(10) << __func__ << " " << *op->get_req() << dendl;
op->mark_started();
/*
* this specific scrub session has terminated. All incoming events carrying
* the old tag will be discarded.
*/
m_fsm->process_event(FullReset{});
}
void PgScrubber::discard_replica_reservations()
{
dout(10) << __func__ << dendl;
if (m_reservations.has_value()) {
m_reservations->discard_all();
}
}
void PgScrubber::clear_scrub_reservations()
{
dout(10) << __func__ << dendl;
m_reservations.reset(); // the remote reservations
m_local_osd_resource.reset(); // the local reservation
}
void PgScrubber::message_all_replicas(int32_t opcode, std::string_view op_text)
{
ceph_assert(m_pg->recovery_state.get_backfill_targets().empty());
std::vector<pair<int, Message*>> messages;
messages.reserve(m_pg->get_actingset().size());
epoch_t epch = get_osdmap_epoch();
for (auto& p : m_pg->get_actingset()) {
if (p == m_pg_whoami)
continue;
dout(10) << "scrub requesting " << op_text << " from osd." << p
<< " Epoch: " << epch << dendl;
Message* m = new MOSDScrubReserve(spg_t(m_pg->info.pgid.pgid, p.shard),
epch,
opcode,
m_pg_whoami);
messages.push_back(std::make_pair(p.osd, m));
}
if (!messages.empty()) {
m_osds->send_message_osd_cluster(messages, epch);
}
}
void PgScrubber::unreserve_replicas()
{
dout(10) << __func__ << dendl;
m_reservations.reset();
}
void PgScrubber::on_replica_reservation_timeout()
{
if (m_reservations) {
m_reservations->handle_no_reply_timeout();
}
}
void PgScrubber::set_reserving_now()
{
m_osds->get_scrub_services().set_reserving_now();
}
void PgScrubber::clear_reserving_now()
{
m_osds->get_scrub_services().clear_reserving_now();
}
void PgScrubber::set_queued_or_active()
{
m_queued_or_active = true;
}
void PgScrubber::clear_queued_or_active()
{
if (m_queued_or_active) {
m_queued_or_active = false;
// and just in case snap trimming was blocked by the aborted scrub
m_pg->snap_trimmer_scrub_complete();
}
}
bool PgScrubber::is_queued_or_active() const
{
return m_queued_or_active;
}
void PgScrubber::set_scrub_blocked(utime_t since)
{
ceph_assert(!m_scrub_job->blocked);
// we are called from a time-triggered lambda,
// thus - not under PG-lock
PGRef pg = m_osds->osd->lookup_lock_pg(m_pg_id);
ceph_assert(pg); // 'this' here should not exist if the PG was removed
m_osds->get_scrub_services().mark_pg_scrub_blocked(m_pg_id);
m_scrub_job->blocked_since = since;
m_scrub_job->blocked = true;
m_pg->publish_stats_to_osd();
pg->unlock();
}
void PgScrubber::clear_scrub_blocked()
{
ceph_assert(m_scrub_job->blocked);
m_osds->get_scrub_services().clear_pg_scrub_blocked(m_pg_id);
m_scrub_job->blocked = false;
m_pg->publish_stats_to_osd();
}
/*
* note: only called for the Primary.
*/
void PgScrubber::scrub_finish()
{
dout(10) << __func__ << " before flags: " << m_flags << ". repair state: "
<< (state_test(PG_STATE_REPAIR) ? "repair" : "no-repair")
<< ". deep_scrub_on_error: " << m_flags.deep_scrub_on_error << dendl;
ceph_assert(m_pg->is_locked());
ceph_assert(is_queued_or_active());
m_planned_scrub = requested_scrub_t{};
// if the repair request comes from auto-repair and large number of errors,
// we would like to cancel auto-repair
if (m_is_repair && m_flags.auto_repair &&
m_be->authoritative_peers_count() >
static_cast<int>(m_pg->cct->_conf->osd_scrub_auto_repair_num_errors)) {
dout(10) << __func__ << " undoing the repair" << dendl;
state_clear(PG_STATE_REPAIR); // not expected to be set, anyway
m_is_repair = false;
update_op_mode_text();
}
m_be->update_repair_status(m_is_repair);
// if a regular scrub had errors within the limit, do a deep scrub to auto
// repair
bool do_auto_scrub = false;
if (m_flags.deep_scrub_on_error && m_be->authoritative_peers_count() &&
m_be->authoritative_peers_count() <=
static_cast<int>(m_pg->cct->_conf->osd_scrub_auto_repair_num_errors)) {
ceph_assert(!m_is_deep);
do_auto_scrub = true;
dout(15) << __func__ << " Try to auto repair after scrub errors" << dendl;
}
m_flags.deep_scrub_on_error = false;
// type-specific finish (can tally more errors)
_scrub_finish();
/// \todo fix the relevant scrub test so that we would not need the extra log
/// line here (even if the following 'if' is false)
if (m_be->authoritative_peers_count()) {
auto err_msg = fmt::format("{} {} {} missing, {} inconsistent objects",
m_pg->info.pgid,
m_mode_desc,
m_be->m_missing.size(),
m_be->m_inconsistent.size());
dout(2) << err_msg << dendl;
m_osds->clog->error() << fmt::to_string(err_msg);
}
// note that the PG_STATE_REPAIR might have changed above
if (m_be->authoritative_peers_count() && m_is_repair) {
state_clear(PG_STATE_CLEAN);
// we know we have a problem, so it's OK to set the user-visible flag
// even if we only reached here via auto-repair
state_set(PG_STATE_REPAIR);
update_op_mode_text();
m_be->update_repair_status(true);
m_fixed_count += m_be->scrub_process_inconsistent();
}
bool has_error = (m_be->authoritative_peers_count() > 0) && m_is_repair;
{
stringstream oss;
oss << m_pg->info.pgid.pgid << " " << m_mode_desc << " ";
int total_errors = m_shallow_errors + m_deep_errors;
if (total_errors)
oss << total_errors << " errors";
else
oss << "ok";
if (!m_is_deep && m_pg->info.stats.stats.sum.num_deep_scrub_errors)
oss << " ( " << m_pg->info.stats.stats.sum.num_deep_scrub_errors
<< " remaining deep scrub error details lost)";
if (m_is_repair)
oss << ", " << m_fixed_count << " fixed";
if (total_errors)
m_osds->clog->error(oss);
else
m_osds->clog->debug(oss);
}
// Since we don't know which errors were fixed, we can only clear them
// when every one has been fixed.
if (m_is_repair) {
dout(15) << fmt::format("{}: {} errors. {} errors fixed",
__func__,
m_shallow_errors + m_deep_errors,
m_fixed_count)
<< dendl;
if (m_fixed_count == m_shallow_errors + m_deep_errors) {
ceph_assert(m_is_deep);
m_shallow_errors = 0;
m_deep_errors = 0;
dout(20) << __func__ << " All may be fixed" << dendl;
} else if (has_error) {
// Deep scrub in order to get corrected error counts
m_pg->scrub_after_recovery = true;
m_planned_scrub.req_scrub = m_planned_scrub.req_scrub || m_flags.required;
dout(20) << __func__ << " Current 'required': " << m_flags.required
<< " Planned 'req_scrub': " << m_planned_scrub.req_scrub
<< dendl;
} else if (m_shallow_errors || m_deep_errors) {
// We have errors but nothing can be fixed, so there is no repair
// possible.
state_set(PG_STATE_FAILED_REPAIR);
dout(10) << __func__ << " " << (m_shallow_errors + m_deep_errors)
<< " error(s) present with no repair possible" << dendl;
}
}
{
// finish up
ObjectStore::Transaction t;
m_pg->recovery_state.update_stats(
[this](auto& history, auto& stats) {
dout(10) << "m_pg->recovery_state.update_stats() errors:"
<< m_shallow_errors << "/" << m_deep_errors << " deep? "
<< m_is_deep << dendl;
utime_t now = ceph_clock_now();
history.last_scrub = m_pg->recovery_state.get_info().last_update;
history.last_scrub_stamp = now;
if (m_is_deep) {
history.last_deep_scrub = m_pg->recovery_state.get_info().last_update;
history.last_deep_scrub_stamp = now;
}
if (m_is_deep) {
if ((m_shallow_errors == 0) && (m_deep_errors == 0)) {
history.last_clean_scrub_stamp = now;
}
stats.stats.sum.num_shallow_scrub_errors = m_shallow_errors;
stats.stats.sum.num_deep_scrub_errors = m_deep_errors;
auto omap_stats = m_be->this_scrub_omapstats();
stats.stats.sum.num_large_omap_objects =
omap_stats.large_omap_objects;
stats.stats.sum.num_omap_bytes = omap_stats.omap_bytes;
stats.stats.sum.num_omap_keys = omap_stats.omap_keys;
dout(19) << "scrub_finish shard " << m_pg_whoami
<< " num_omap_bytes = " << stats.stats.sum.num_omap_bytes
<< " num_omap_keys = " << stats.stats.sum.num_omap_keys
<< dendl;
} else {
stats.stats.sum.num_shallow_scrub_errors = m_shallow_errors;
// XXX: last_clean_scrub_stamp doesn't mean the pg is not inconsistent
// because of deep-scrub errors
if (m_shallow_errors == 0) {
history.last_clean_scrub_stamp = now;
}
}
stats.stats.sum.num_scrub_errors =
stats.stats.sum.num_shallow_scrub_errors +
stats.stats.sum.num_deep_scrub_errors;
if (m_flags.check_repair) {
m_flags.check_repair = false;
if (m_pg->info.stats.stats.sum.num_scrub_errors) {
state_set(PG_STATE_FAILED_REPAIR);
dout(10) << "scrub_finish "
<< m_pg->info.stats.stats.sum.num_scrub_errors
<< " error(s) still present after re-scrub" << dendl;
}
}
return true;
},
&t);
int tr = m_osds->store->queue_transaction(m_pg->ch, std::move(t), nullptr);
ceph_assert(tr == 0);
}
update_scrub_job(m_planned_scrub);
if (has_error) {
m_pg->queue_peering_event(PGPeeringEventRef(
std::make_shared<PGPeeringEvent>(get_osdmap_epoch(),
get_osdmap_epoch(),
PeeringState::DoRecovery())));
} else {
m_is_repair = false;
state_clear(PG_STATE_REPAIR);
update_op_mode_text();
}
cleanup_on_finish();
if (do_auto_scrub) {
request_rescrubbing(m_planned_scrub);
}
if (m_pg->is_active() && m_pg->is_primary()) {
m_pg->recovery_state.share_pg_info();
}
}
void PgScrubber::on_digest_updates()
{
dout(10) << __func__ << " #pending: " << num_digest_updates_pending << " "
<< (m_end.is_max() ? " <last chunk>" : " <mid chunk>")
<< (is_queued_or_active() ? "" : " ** not marked as scrubbing **")
<< dendl;
if (num_digest_updates_pending > 0) {
// do nothing for now. We will be called again when new updates arrive
return;
}
// got all updates, and finished with this chunk. Any more?
if (m_end.is_max()) {
m_osds->queue_scrub_is_finished(m_pg);
} else {
// go get a new chunk (via "requeue")
preemption_data.reset();
m_osds->queue_scrub_next_chunk(m_pg, m_pg->is_scrub_blocking_ops());
}
}
/*
* note that the flags-set fetched from the PG (m_pg->m_planned_scrub)
* is cleared once scrubbing starts; Some of the values dumped here are
* thus transitory.
*/
void PgScrubber::dump_scrubber(ceph::Formatter* f,
const requested_scrub_t& request_flags) const
{
f->open_object_section("scrubber");
if (m_active) { // TBD replace with PR#42780's test
f->dump_bool("active", true);
dump_active_scrubber(f, state_test(PG_STATE_DEEP_SCRUB));
} else {
f->dump_bool("active", false);
f->dump_bool("must_scrub",
(m_planned_scrub.must_scrub || m_flags.required));
f->dump_bool("must_deep_scrub", request_flags.must_deep_scrub);
f->dump_bool("must_repair", request_flags.must_repair);
f->dump_bool("need_auto", request_flags.need_auto);
f->dump_stream("scrub_reg_stamp") << m_scrub_job->get_sched_time();
// note that we are repeating logic that is coded elsewhere (currently
// PG.cc). This is not optimal.
bool deep_expected =
(ceph_clock_now() >= m_pg->next_deepscrub_interval()) ||
request_flags.must_deep_scrub || request_flags.need_auto;
auto sched_state =
m_scrub_job->scheduling_state(ceph_clock_now(), deep_expected);
f->dump_string("schedule", sched_state);
}
if (m_publish_sessions) {
f->dump_int("test_sequence",
m_sessions_counter); // an ever-increasing number used by tests
}
f->close_section();
}
void PgScrubber::dump_active_scrubber(ceph::Formatter* f, bool is_deep) const
{
f->dump_stream("epoch_start") << m_interval_start;
f->dump_stream("start") << m_start;
f->dump_stream("end") << m_end;
f->dump_stream("max_end") << m_max_end;
f->dump_stream("subset_last_update") << m_subset_last_update;
// note that m_is_deep will be set some time after PG_STATE_DEEP_SCRUB is
// asserted. Thus, using the latter.
f->dump_bool("deep", is_deep);
// dump the scrub-type flags
f->dump_bool("req_scrub", m_flags.required);
f->dump_bool("auto_repair", m_flags.auto_repair);
f->dump_bool("check_repair", m_flags.check_repair);
f->dump_bool("deep_scrub_on_error", m_flags.deep_scrub_on_error);
f->dump_unsigned("priority", m_flags.priority);
f->dump_int("shallow_errors", m_shallow_errors);
f->dump_int("deep_errors", m_deep_errors);
f->dump_int("fixed", m_fixed_count);
{
f->open_array_section("waiting_on_whom");
for (const auto& p : m_maps_status.get_awaited()) {
f->dump_stream("shard") << p;
}
f->close_section();
}
if (m_scrub_job->blocked) {
f->dump_string("schedule", "blocked");
} else {
f->dump_string("schedule", "scrubbing");
}
}
pg_scrubbing_status_t PgScrubber::get_schedule() const
{
if (!m_scrub_job) {
return pg_scrubbing_status_t{};
}
dout(25) << fmt::format("{}: active:{} blocked:{}",
__func__,
m_active,
m_scrub_job->blocked)
<< dendl;
auto now_is = ceph_clock_now();
if (m_active) {
// report current scrub info, including updated duration
if (m_scrub_job->blocked) {
// a bug. An object is held locked.
int32_t blocked_for =
(utime_t{now_is} - m_scrub_job->blocked_since).sec();
return pg_scrubbing_status_t{
utime_t{},
blocked_for,
pg_scrub_sched_status_t::blocked,
true, // active
(m_is_deep ? scrub_level_t::deep : scrub_level_t::shallow),
false};
} else {
int32_t duration = (utime_t{now_is} - scrub_begin_stamp).sec();
return pg_scrubbing_status_t{
utime_t{},
duration,
pg_scrub_sched_status_t::active,
true, // active
(m_is_deep ? scrub_level_t::deep : scrub_level_t::shallow),
false /* is periodic? unknown, actually */};
}
}
if (m_scrub_job->state != ScrubQueue::qu_state_t::registered) {
return pg_scrubbing_status_t{utime_t{},
0,
pg_scrub_sched_status_t::not_queued,
false,
scrub_level_t::shallow,
false};
}
// Will next scrub surely be a deep one? note that deep-scrub might be
// selected even if we report a regular scrub here.
bool deep_expected = (now_is >= m_pg->next_deepscrub_interval()) ||
m_planned_scrub.must_deep_scrub ||
m_planned_scrub.need_auto;
scrub_level_t expected_level =
deep_expected ? scrub_level_t::deep : scrub_level_t::shallow;
bool periodic = !m_planned_scrub.must_scrub && !m_planned_scrub.need_auto &&
!m_planned_scrub.must_deep_scrub;
// are we ripe for scrubbing?
if (now_is > m_scrub_job->schedule.scheduled_at) {
// we are waiting for our turn at the OSD.
return pg_scrubbing_status_t{m_scrub_job->schedule.scheduled_at,
0,
pg_scrub_sched_status_t::queued,
false,
expected_level,
periodic};
}
return pg_scrubbing_status_t{m_scrub_job->schedule.scheduled_at,
0,
pg_scrub_sched_status_t::scheduled,
false,
expected_level,
periodic};
}
void PgScrubber::handle_query_state(ceph::Formatter* f)
{
dout(15) << __func__ << dendl;
f->open_object_section("scrub");
f->dump_stream("scrubber.epoch_start") << m_interval_start;
f->dump_bool("scrubber.active", m_active);
f->dump_stream("scrubber.start") << m_start;
f->dump_stream("scrubber.end") << m_end;
f->dump_stream("scrubber.max_end") << m_max_end;
f->dump_stream("scrubber.subset_last_update") << m_subset_last_update;
f->dump_bool("scrubber.deep", m_is_deep);
{
f->open_array_section("scrubber.waiting_on_whom");
for (const auto& p : m_maps_status.get_awaited()) {
f->dump_stream("shard") << p;
}
f->close_section();
}
f->dump_string("comment", "DEPRECATED - may be removed in the next release");
f->close_section();
}
PgScrubber::~PgScrubber()
{
if (m_scrub_job) {
// make sure the OSD won't try to scrub this one just now
rm_from_osd_scrubbing();
m_scrub_job.reset();
}
}
PgScrubber::PgScrubber(PG* pg)
: m_pg{pg}
, m_pg_id{pg->pg_id}
, m_osds{m_pg->osd}
, m_pg_whoami{pg->pg_whoami}
, m_planned_scrub{pg->get_planned_scrub(ScrubberPasskey{})}
, preemption_data{pg}
{
m_fsm = std::make_unique<ScrubMachine>(m_pg, this);
m_fsm->initiate();
m_scrub_job = ceph::make_ref<ScrubQueue::ScrubJob>(m_osds->cct,
m_pg->pg_id,
m_osds->get_nodeid());
}
void PgScrubber::set_scrub_begin_time()
{
scrub_begin_stamp = ceph_clock_now();
m_osds->clog->debug() << fmt::format(
"{} {} starts",
m_pg->info.pgid.pgid,
m_mode_desc);
}
void PgScrubber::set_scrub_duration()
{
utime_t stamp = ceph_clock_now();
utime_t duration = stamp - scrub_begin_stamp;
m_pg->recovery_state.update_stats([=](auto& history, auto& stats) {
stats.last_scrub_duration = ceill(duration.to_msec() / 1000.0);
stats.scrub_duration = double(duration);
return true;
});
}
void PgScrubber::reserve_replicas()
{
dout(10) << __func__ << dendl;
m_reservations.emplace(
m_pg, m_pg_whoami, m_scrub_job, m_pg->get_cct()->_conf);
}
void PgScrubber::cleanup_on_finish()
{
dout(10) << __func__ << dendl;
ceph_assert(m_pg->is_locked());
state_clear(PG_STATE_SCRUBBING);
state_clear(PG_STATE_DEEP_SCRUB);
clear_scrub_reservations();
requeue_waiting();
reset_internal_state();
m_flags = scrub_flags_t{};
// type-specific state clear
_scrub_clear_state();
// PG state flags changed:
m_pg->publish_stats_to_osd();
}
// uses process_event(), so must be invoked externally
void PgScrubber::scrub_clear_state()
{
dout(10) << __func__ << dendl;
clear_pgscrub_state();
m_fsm->process_event(FullReset{});
}
/*
* note: does not access the state-machine
*/
void PgScrubber::clear_pgscrub_state()
{
dout(10) << __func__ << dendl;
ceph_assert(m_pg->is_locked());
state_clear(PG_STATE_SCRUBBING);
state_clear(PG_STATE_DEEP_SCRUB);
state_clear(PG_STATE_REPAIR);
clear_scrub_reservations();
requeue_waiting();
reset_internal_state();
m_flags = scrub_flags_t{};
// type-specific state clear
_scrub_clear_state();
}
void PgScrubber::replica_handling_done()
{
dout(10) << __func__ << dendl;
state_clear(PG_STATE_SCRUBBING);
state_clear(PG_STATE_DEEP_SCRUB);
reset_internal_state();
}
std::chrono::milliseconds PgScrubber::get_scrub_sleep_time() const
{
return m_osds->get_scrub_services().scrub_sleep_time(
m_flags.required);
}
void PgScrubber::queue_for_scrub_resched(Scrub::scrub_prio_t prio)
{
m_osds->queue_for_scrub_resched(m_pg, prio);
}
/*
* note: performs run_callbacks()
* note: reservations-related variables are not reset here
*/
void PgScrubber::reset_internal_state()
{
dout(10) << __func__ << dendl;
preemption_data.reset();
m_maps_status.reset();
m_start = hobject_t{};
m_end = hobject_t{};
m_max_end = hobject_t{};
m_subset_last_update = eversion_t{};
m_shallow_errors = 0;
m_deep_errors = 0;
m_fixed_count = 0;
run_callbacks();
num_digest_updates_pending = 0;
m_primary_scrubmap_pos.reset();
replica_scrubmap = ScrubMap{};
replica_scrubmap_pos.reset();
m_active = false;
clear_queued_or_active();
++m_sessions_counter;
m_be.reset();
}
bool PgScrubber::is_token_current(Scrub::act_token_t received_token)
{
if (received_token == 0 || received_token == m_current_token) {
return true;
}
dout(5) << __func__ << " obsolete token (" << received_token << " vs current "
<< m_current_token << dendl;
return false;
}
const OSDMapRef& PgScrubber::get_osdmap() const
{
return m_pg->get_osdmap();
}
LoggerSinkSet& PgScrubber::get_logger() const { return *m_osds->clog.get(); }
ostream &operator<<(ostream &out, const PgScrubber &scrubber) {
return out << scrubber.m_flags;
}
std::ostream& PgScrubber::gen_prefix(std::ostream& out) const
{
if (m_pg) {
return m_pg->gen_prefix(out) << "scrubber<" << m_fsm_state_name << ">: ";
} else {
return out << " scrubber [" << m_pg_id << "]: ";
}
}
void PgScrubber::log_cluster_warning(const std::string& warning) const
{
m_osds->clog->do_log(CLOG_WARN, warning);
}
ostream& PgScrubber::show(ostream& out) const
{
return out << " [ " << m_pg_id << ": " << m_flags << " ] ";
}
int PgScrubber::asok_debug(std::string_view cmd,
std::string param,
Formatter* f,
stringstream& ss)
{
dout(10) << __func__ << " cmd: " << cmd << " param: " << param << dendl;
if (cmd == "block") {
// 'm_debug_blockrange' causes the next 'select_range' to report a blocked
// object
m_debug_blockrange = 10; // >1, so that will trigger fast state reports
} else if (cmd == "unblock") {
// send an 'unblock' event, as if a blocked range was freed
m_debug_blockrange = 0;
m_fsm->process_event(Unblocked{});
} else if ((cmd == "set") || (cmd == "unset")) {
if (param == "sessions") {
// set/reset the inclusion of the scrub sessions counter in 'query' output
m_publish_sessions = (cmd == "set");
} else if (param == "block") {
if (cmd == "set") {
// set a flag that will cause the next 'select_range' to report a
// blocked object
m_debug_blockrange = 10; // >1, so that will trigger fast state reports
} else {
// send an 'unblock' event, as if a blocked range was freed
m_debug_blockrange = 0;
m_fsm->process_event(Unblocked{});
}
}
}
return 0;
}
/*
* Note: under PG lock
*/
void PgScrubber::update_scrub_stats(ceph::coarse_real_clock::time_point now_is)
{
using clock = ceph::coarse_real_clock;
using namespace std::chrono;
const seconds period_active = seconds(m_pg->get_cct()->_conf.get_val<int64_t>(
"osd_stats_update_period_scrubbing"));
if (!period_active.count()) {
// a way for the operator to disable these stats updates
return;
}
const seconds period_inactive =
seconds(m_pg->get_cct()->_conf.get_val<int64_t>(
"osd_stats_update_period_not_scrubbing") +
m_pg_id.pgid.m_seed % 30);
// determine the required update period, based on our current state
auto period{period_inactive};
if (m_active) {
period = m_debug_blockrange ? 2s : period_active;
}
/// \todo use the date library (either the one included in Arrow or directly)
/// to get the formatting of the time_points.
if (g_conf()->subsys.should_gather<ceph_subsys_osd, 20>()) {
// will only create the debug strings if required
char buf[50];
auto printable_last = fmt::localtime(clock::to_time_t(m_last_stat_upd));
strftime(buf, sizeof(buf), "%Y-%m-%dT%T", &printable_last);
dout(20) << fmt::format("{}: period: {}/{}-> {} last:{}",
__func__,
period_active,
period_inactive,
period,
buf)
<< dendl;
}
if (now_is - m_last_stat_upd > period) {
m_pg->publish_stats_to_osd();
m_last_stat_upd = now_is;
}
}
// ///////////////////// preemption_data_t //////////////////////////////////
PgScrubber::preemption_data_t::preemption_data_t(PG* pg) : m_pg{pg}
{
m_left = static_cast<int>(
m_pg->get_cct()->_conf.get_val<uint64_t>("osd_scrub_max_preemptions"));
}
void PgScrubber::preemption_data_t::reset()
{
std::lock_guard<ceph::mutex> lk{m_preemption_lock};
m_preemptable = false;
m_preempted = false;
m_left = static_cast<int>(
m_pg->cct->_conf.get_val<uint64_t>("osd_scrub_max_preemptions"));
m_size_divisor = 1;
}
// ///////////////////// ReplicaReservations //////////////////////////////////
namespace Scrub {
void ReplicaReservations::release_replica(pg_shard_t peer, epoch_t epoch)
{
auto m = new MOSDScrubReserve(spg_t(m_pg_info.pgid.pgid, peer.shard),
epoch,
MOSDScrubReserve::RELEASE,
m_pg->pg_whoami);
m_osds->send_message_osd_cluster(peer.osd, m, epoch);
}
ReplicaReservations::ReplicaReservations(
PG* pg,
pg_shard_t whoami,
ScrubQueue::ScrubJobRef scrubjob,
const ConfigProxy& conf)
: m_pg{pg}
, m_acting_set{pg->get_actingset()}
, m_osds{m_pg->get_pg_osd(ScrubberPasskey())}
, m_pending{static_cast<int>(m_acting_set.size()) - 1}
, m_pg_info{m_pg->get_pg_info(ScrubberPasskey())}
, m_scrub_job{scrubjob}
, m_conf{conf}
{
epoch_t epoch = m_pg->get_osdmap_epoch();
m_log_msg_prefix = fmt::format(
"osd.{} ep: {} scrubber::ReplicaReservations pg[{}]: ", m_osds->whoami,
epoch, pg->pg_id);
m_timeout = conf.get_val<std::chrono::milliseconds>(
"osd_scrub_slow_reservation_response");
if (m_pending <= 0) {
// A special case of no replicas.
// just signal the scrub state-machine to continue
send_all_done();
} else {
// send the reservation requests
for (auto p : m_acting_set) {
if (p == whoami)
continue;
auto m = new MOSDScrubReserve(
spg_t(m_pg_info.pgid.pgid, p.shard), epoch, MOSDScrubReserve::REQUEST,
m_pg->pg_whoami);
m_osds->send_message_osd_cluster(p.osd, m, epoch);
m_waited_for_peers.push_back(p);
dout(10) << __func__ << ": reserve " << p.osd << dendl;
}
}
}
void ReplicaReservations::send_all_done()
{
// stop any pending timeout timer
m_osds->queue_for_scrub_granted(m_pg, scrub_prio_t::low_priority);
}
void ReplicaReservations::send_reject()
{
// stop any pending timeout timer
m_scrub_job->resources_failure = true;
m_osds->queue_for_scrub_denied(m_pg, scrub_prio_t::low_priority);
}
void ReplicaReservations::discard_all()
{
dout(10) << __func__ << ": " << m_reserved_peers << dendl;
m_had_rejections = true; // preventing late-coming responses from triggering
// events
m_reserved_peers.clear();
m_waited_for_peers.clear();
}
/*
* The following holds when update_latecomers() is called:
* - we are still waiting for replies from some of the replicas;
* - we might have already set a timer. If so, we should restart it.
* - we might have received responses from 50% of the replicas.
*/
std::optional<ReplicaReservations::tpoint_t>
ReplicaReservations::update_latecomers(tpoint_t now_is)
{
if (m_reserved_peers.size() > m_waited_for_peers.size()) {
// at least half of the replicas have already responded. Time we flag
// latecomers.
return now_is + m_timeout;
} else {
return std::nullopt;
}
}
ReplicaReservations::~ReplicaReservations()
{
m_had_rejections = true; // preventing late-coming responses from triggering
// events
// send un-reserve messages to all reserved replicas. We do not wait for
// answer (there wouldn't be one). Other incoming messages will be discarded
// on the way, by our owner.
epoch_t epoch = m_pg->get_osdmap_epoch();
for (auto& p : m_reserved_peers) {
release_replica(p, epoch);
}
m_reserved_peers.clear();
// note: the release will follow on the heels of the request. When tried
// otherwise, grants that followed a reject arrived after the whole scrub
// machine-state was reset, causing leaked reservations.
for (auto& p : m_waited_for_peers) {
release_replica(p, epoch);
}
m_waited_for_peers.clear();
}
/**
* @ATTN we would not reach here if the ReplicaReservation object managed by
* the scrubber was reset.
*/
void ReplicaReservations::handle_reserve_grant(OpRequestRef op, pg_shard_t from)
{
dout(10) << __func__ << ": granted by " << from << dendl;
op->mark_started();
{
// reduce the amount of extra release messages. Not a must, but the log is
// cleaner
auto w = find(m_waited_for_peers.begin(), m_waited_for_peers.end(), from);
if (w != m_waited_for_peers.end())
m_waited_for_peers.erase(w);
}
// are we forced to reject the reservation?
if (m_had_rejections) {
dout(10) << __func__ << ": rejecting late-coming reservation from " << from
<< dendl;
release_replica(from, m_pg->get_osdmap_epoch());
} else if (std::find(m_reserved_peers.begin(),
m_reserved_peers.end(),
from) != m_reserved_peers.end()) {
dout(10) << __func__ << ": already had osd." << from << " reserved"
<< dendl;
} else {
dout(10) << __func__ << ": osd." << from << " scrub reserve = success"
<< dendl;
m_reserved_peers.push_back(from);
// was this response late?
auto now_is = clock::now();
if (m_timeout_point && (now_is > *m_timeout_point)) {
m_osds->clog->warn() << fmt::format(
"osd.{} scrubber pg[{}]: late reservation from osd.{}",
m_osds->whoami,
m_pg->pg_id,
from);
m_timeout_point.reset();
} else {
// possibly set a timer to warn about late-coming reservations
m_timeout_point = update_latecomers(now_is);
}
if (--m_pending == 0) {
send_all_done();
}
}
}
void ReplicaReservations::handle_reserve_reject(OpRequestRef op,
pg_shard_t from)
{
dout(10) << __func__ << ": rejected by " << from << dendl;
dout(15) << __func__ << ": " << *op->get_req() << dendl;
op->mark_started();
{
// reduce the amount of extra release messages. Not a must, but the log is
// cleaner
auto w = find(m_waited_for_peers.begin(), m_waited_for_peers.end(), from);
if (w != m_waited_for_peers.end())
m_waited_for_peers.erase(w);
}
if (m_had_rejections) {
// our failure was already handled when the first rejection arrived
dout(15) << __func__ << ": ignoring late-coming rejection from " << from
<< dendl;
} else if (std::find(m_reserved_peers.begin(),
m_reserved_peers.end(),
from) != m_reserved_peers.end()) {
dout(10) << __func__ << ": already had osd." << from << " reserved"
<< dendl;
} else {
dout(10) << __func__ << ": osd." << from << " scrub reserve = fail"
<< dendl;
m_had_rejections = true; // preventing any additional notifications
send_reject();
}
}
void ReplicaReservations::handle_no_reply_timeout()
{
dout(1) << fmt::format(
"{}: timeout! no reply from {}", __func__, m_waited_for_peers)
<< dendl;
// treat reply timeout as if a REJECT was received
m_had_rejections = true; // preventing any additional notifications
send_reject();
}
std::ostream& ReplicaReservations::gen_prefix(std::ostream& out) const
{
return out << m_log_msg_prefix;
}
// ///////////////////// LocalReservation //////////////////////////////////
// note: no dout()s in LocalReservation functions. Client logs interactions.
LocalReservation::LocalReservation(OSDService* osds) : m_osds{osds}
{
if (m_osds->get_scrub_services().inc_scrubs_local()) {
// a failure is signalled by not having m_holding_local_reservation set
m_holding_local_reservation = true;
}
}
LocalReservation::~LocalReservation()
{
if (m_holding_local_reservation) {
m_holding_local_reservation = false;
m_osds->get_scrub_services().dec_scrubs_local();
}
}
// ///////////////////// MapsCollectionStatus ////////////////////////////////
auto MapsCollectionStatus::mark_arriving_map(pg_shard_t from)
-> std::tuple<bool, std::string_view>
{
auto fe =
std::find(m_maps_awaited_for.begin(), m_maps_awaited_for.end(), from);
if (fe != m_maps_awaited_for.end()) {
// we are indeed waiting for a map from this replica
m_maps_awaited_for.erase(fe);
return std::tuple{true, ""sv};
} else {
return std::tuple{false, " unsolicited scrub-map"sv};
}
}
void MapsCollectionStatus::reset()
{
*this = MapsCollectionStatus{};
}
std::string MapsCollectionStatus::dump() const
{
std::string all;
for (const auto& rp : m_maps_awaited_for) {
all.append(rp.get_osd() + " "s);
}
return all;
}
ostream& operator<<(ostream& out, const MapsCollectionStatus& sf)
{
out << " [ ";
for (const auto& rp : sf.m_maps_awaited_for) {
out << rp.get_osd() << " ";
}
if (!sf.m_local_map_ready) {
out << " local ";
}
return out << " ] ";
}
} // namespace Scrub
| 80,181 | 28.178311 | 80 | cc |
null | ceph-main/src/osd/scrubber/pg_scrubber.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
// clang-format off
/*
Main Scrubber interfaces:
┌──────────────────────────────────────────────┬────┐
│ │ │
│ │ │
│ PG │ │
│ │ │
│ │ │
├──────────────────────────────────────────────┘ │
│ │
│ PrimaryLogPG │
└────────────────────────────────┬──────────────────┘
│
│
│ ownes & uses
│
│
│
┌────────────────────────────────▼──────────────────┐
│ <<ScrubPgIF>> │
└───────────────────────────▲───────────────────────┘
│
│
│implements
│
│
│
┌───────────────────────────┴───────────────┬───────┐
│ │ │
│ PgScrubber │ │
│ │ │
│ │ ├───────┐
├───────────────────────────────────────────┘ │ │
│ │ │
│ PrimaryLogScrub │ │
└─────┬───────────────────┬─────────────────────────┘ │
│ │ implements
│ ownes & uses │ │
│ │ ┌─────────────────────────▼──────┐
│ │ │ <<ScrubMachineListener>> │
│ │ └─────────▲──────────────────────┘
│ │ │
│ │ │
│ ▼ │
│ ┌────────────────────────────────┴───────┐
│ │ │
│ │ ScrubMachine │
│ │ │
│ └────────────────────────────────────────┘
│
┌───▼─────────────────────────────────┐
│ │
│ ScrubStore │
│ │
└─────────────────────────────────────┘
*/
// clang-format on
#include <cassert>
#include <chrono>
#include <memory>
#include <mutex>
#include <optional>
#include <string>
#include <string_view>
#include <vector>
#include "osd/PG.h"
#include "osd/scrubber_common.h"
#include "ScrubStore.h"
#include "osd_scrub_sched.h"
#include "scrub_backend.h"
#include "scrub_machine_lstnr.h"
namespace Scrub {
class ScrubMachine;
struct BuildMap;
/**
* Reserving/freeing scrub resources at the replicas.
*
* When constructed - sends reservation requests to the acting_set.
* A rejection triggers a "couldn't acquire the replicas' scrub resources"
* event. All previous requests, whether already granted or not, are explicitly
* released.
*
* Timeouts:
*
* Slow-Secondary Warning:
* Once at least half of the replicas have accepted the reservation, we start
* reporting any secondary that takes too long (more than <conf> milliseconds
* after the previous response received) to respond to the reservation request.
* (Why? because we have encountered real-life situations where a specific OSD
* was systematically very slow (e.g. 5 seconds) to respond to the reservation
* requests, slowing the scrub process to a crawl).
*
* Reservation Timeout:
* We limit the total time we wait for the replicas to respond to the
* reservation request. If we don't get all the responses (either Grant or
* Reject) within <conf> milliseconds, we give up and release all the
* reservations we have acquired so far.
* (Why? because we have encountered instances where a reservation request was
* lost - either due to a bug or due to a network issue.)
*
* A note re performance: I've measured a few container alternatives for
* m_reserved_peers, with its specific usage pattern. Std::set is extremely
* slow, as expected. flat_set is only slightly better. Surprisingly -
* std::vector (with no sorting) is better than boost::small_vec. And for
* std::vector: no need to pre-reserve.
*/
class ReplicaReservations {
using clock = std::chrono::system_clock;
using tpoint_t = std::chrono::time_point<clock>;
PG* m_pg;
std::set<pg_shard_t> m_acting_set;
OSDService* m_osds;
std::vector<pg_shard_t> m_waited_for_peers;
std::vector<pg_shard_t> m_reserved_peers;
bool m_had_rejections{false};
int m_pending{-1};
const pg_info_t& m_pg_info;
ScrubQueue::ScrubJobRef m_scrub_job; ///< a ref to this PG's scrub job
const ConfigProxy& m_conf;
// detecting slow peers (see 'slow-secondary' above)
std::chrono::milliseconds m_timeout;
std::optional<tpoint_t> m_timeout_point;
void release_replica(pg_shard_t peer, epoch_t epoch);
void send_all_done(); ///< all reservations are granted
/// notify the scrubber that we have failed to reserve replicas' resources
void send_reject();
std::optional<tpoint_t> update_latecomers(tpoint_t now_is);
public:
std::string m_log_msg_prefix;
/**
* quietly discard all knowledge about existing reservations. No messages
* are sent to peers.
* To be used upon interval change, as we know the the running scrub is no
* longer relevant, and that the replicas had reset the reservations on
* their side.
*/
void discard_all();
ReplicaReservations(PG* pg,
pg_shard_t whoami,
ScrubQueue::ScrubJobRef scrubjob,
const ConfigProxy& conf);
~ReplicaReservations();
void handle_reserve_grant(OpRequestRef op, pg_shard_t from);
void handle_reserve_reject(OpRequestRef op, pg_shard_t from);
// if timing out on receiving replies from our replicas:
void handle_no_reply_timeout();
std::ostream& gen_prefix(std::ostream& out) const;
};
/**
* wraps the local OSD scrub resource reservation in an RAII wrapper
*/
class LocalReservation {
OSDService* m_osds;
bool m_holding_local_reservation{false};
public:
explicit LocalReservation(OSDService* osds);
~LocalReservation();
bool is_reserved() const { return m_holding_local_reservation; }
};
/**
* Once all replicas' scrub maps are received, we go on to compare the maps.
* That is - unless we we have not yet completed building our own scrub map.
* MapsCollectionStatus combines the status of waiting for both the local map
* and the replicas, without resorting to adding dummy entries into a list.
*/
class MapsCollectionStatus {
bool m_local_map_ready{false};
std::vector<pg_shard_t> m_maps_awaited_for;
public:
[[nodiscard]] bool are_all_maps_available() const
{
return m_local_map_ready && m_maps_awaited_for.empty();
}
void mark_local_map_ready() { m_local_map_ready = true; }
void mark_replica_map_request(pg_shard_t from_whom)
{
m_maps_awaited_for.push_back(from_whom);
}
/// @returns true if indeed waiting for this one. Otherwise: an error string
auto mark_arriving_map(pg_shard_t from) -> std::tuple<bool, std::string_view>;
[[nodiscard]] std::vector<pg_shard_t> get_awaited() const
{
return m_maps_awaited_for;
}
void reset();
std::string dump() const;
friend ostream& operator<<(ostream& out, const MapsCollectionStatus& sf);
};
} // namespace Scrub
/**
* the scrub operation flags. Primary only.
* Set at scrub start. Checked in multiple locations - mostly
* at finish.
*/
struct scrub_flags_t {
unsigned int priority{0};
/**
* set by queue_scrub() if either planned_scrub.auto_repair or
* need_auto were set.
* Tested at scrub end.
*/
bool auto_repair{false};
/// this flag indicates that we are scrubbing post repair to verify everything
/// is fixed
bool check_repair{false};
/// checked at the end of the scrub, to possibly initiate a deep-scrub
bool deep_scrub_on_error{false};
/**
* scrub must not be aborted.
* Set for explicitly requested scrubs, and for scrubs originated by the
* pairing process with the 'repair' flag set (in the RequestScrub event).
*/
bool required{false};
};
ostream& operator<<(ostream& out, const scrub_flags_t& sf);
/**
* The part of PG-scrubbing code that isn't state-machine wiring.
*
* Why the separation? I wish to move to a different FSM implementation. Thus I
* am forced to strongly decouple the state-machine implementation details from
* the actual scrubbing code.
*/
class PgScrubber : public ScrubPgIF,
public ScrubMachineListener,
public ScrubBeListener {
public:
explicit PgScrubber(PG* pg);
friend class ScrubBackend; // will be replaced by a limited interface
// ------------------ the I/F exposed to the PG (ScrubPgIF) -------------
/// are we waiting for resource reservation grants form our replicas?
[[nodiscard]] bool is_reserving() const final;
void initiate_regular_scrub(epoch_t epoch_queued) final;
void initiate_scrub_after_repair(epoch_t epoch_queued) final;
void send_scrub_resched(epoch_t epoch_queued) final;
void active_pushes_notification(epoch_t epoch_queued) final;
void update_applied_notification(epoch_t epoch_queued) final;
void send_scrub_unblock(epoch_t epoch_queued) final;
void digest_update_notification(epoch_t epoch_queued) final;
void send_replica_maps_ready(epoch_t epoch_queued) final;
void send_start_replica(epoch_t epoch_queued, Scrub::act_token_t token) final;
void send_sched_replica(epoch_t epoch_queued, Scrub::act_token_t token) final;
void send_replica_pushes_upd(epoch_t epoch_queued) final;
/**
* The PG has updated its 'applied version'. It might be that we are waiting
* for this information: after selecting a range of objects to scrub, we've
* marked the latest version of these objects in m_subset_last_update. We will
* not start the map building before we know that the PG has reached this
* version.
*/
void on_applied_when_primary(const eversion_t& applied_version) final;
void send_chunk_free(epoch_t epoch_queued) final;
void send_chunk_busy(epoch_t epoch_queued) final;
void send_local_map_done(epoch_t epoch_queued) final;
void send_get_next_chunk(epoch_t epoch_queued) final;
void send_scrub_is_finished(epoch_t epoch_queued) final;
/**
* we allow some number of preemptions of the scrub, which mean we do
* not block. Then we start to block. Once we start blocking, we do
* not stop until the scrub range is completed.
*/
bool write_blocked_by_scrub(const hobject_t& soid) final;
/// true if the given range intersects the scrub interval in any way
bool range_intersects_scrub(const hobject_t& start,
const hobject_t& end) final;
/**
* we are a replica being asked by the Primary to reserve OSD resources for
* scrubbing
*/
void handle_scrub_reserve_request(OpRequestRef op) final;
void handle_scrub_reserve_grant(OpRequestRef op, pg_shard_t from) final;
void handle_scrub_reserve_reject(OpRequestRef op, pg_shard_t from) final;
void handle_scrub_reserve_release(OpRequestRef op) final;
void discard_replica_reservations() final;
void clear_scrub_reservations() final; // PG::clear... fwds to here
void unreserve_replicas() final;
void on_replica_reservation_timeout() final;
// managing scrub op registration
void update_scrub_job(const requested_scrub_t& request_flags) final;
void rm_from_osd_scrubbing() final;
void on_pg_activate(const requested_scrub_t& request_flags) final;
void scrub_requested(
scrub_level_t scrub_level,
scrub_type_t scrub_type,
requested_scrub_t& req_flags) final;
/**
* Reserve local scrub resources (managed by the OSD)
*
* Fails if OSD's local-scrubs budget was exhausted
* \returns were local resources reserved?
*/
bool reserve_local() final;
void handle_query_state(ceph::Formatter* f) final;
pg_scrubbing_status_t get_schedule() const final;
void dump_scrubber(ceph::Formatter* f,
const requested_scrub_t& request_flags) const final;
// used if we are a replica
void replica_scrub_op(OpRequestRef op) final;
/// the op priority, taken from the primary's request message
Scrub::scrub_prio_t replica_op_priority() const final
{
return m_replica_request_priority;
};
unsigned int scrub_requeue_priority(
Scrub::scrub_prio_t with_priority,
unsigned int suggested_priority) const final;
/// the version that refers to m_flags.priority
unsigned int scrub_requeue_priority(
Scrub::scrub_prio_t with_priority) const final;
void add_callback(Context* context) final { m_callbacks.push_back(context); }
[[nodiscard]] bool are_callbacks_pending() const final // used for an assert
// in PG.cc
{
return !m_callbacks.empty();
}
/// handle a message carrying a replica map
void map_from_replica(OpRequestRef op) final;
void on_new_interval() final;
void scrub_clear_state() final;
bool is_queued_or_active() const final;
/**
* add to scrub statistics, but only if the soid is below the scrub start
*/
void stats_of_handled_objects(const object_stat_sum_t& delta_stats,
const hobject_t& soid) override
{
ceph_assert(false);
}
/**
* finalize the parameters of the initiated scrubbing session:
*
* The "current scrub" flags (m_flags) are set from the 'planned_scrub'
* flag-set; PG_STATE_SCRUBBING, and possibly PG_STATE_DEEP_SCRUB &
* PG_STATE_REPAIR are set.
*/
void set_op_parameters(const requested_scrub_t& request) final;
void cleanup_store(ObjectStore::Transaction* t) final;
bool get_store_errors(const scrub_ls_arg_t& arg,
scrub_ls_result_t& res_inout) const override
{
return false;
}
void update_scrub_stats(ceph::coarse_real_clock::time_point now_is) final;
int asok_debug(std::string_view cmd,
std::string param,
Formatter* f,
std::stringstream& ss) override;
int m_debug_blockrange{0};
// --------------------------------------------------------------------------
// the I/F used by the state-machine (i.e. the implementation of
// ScrubMachineListener)
CephContext* get_cct() const final { return m_pg->cct; }
LogChannelRef &get_clog() const final;
int get_whoami() const final;
spg_t get_spgid() const final { return m_pg->get_pgid(); }
scrubber_callback_cancel_token_t schedule_callback_after(
ceph::timespan duration, scrubber_callback_t &&cb);
void cancel_callback(scrubber_callback_cancel_token_t);
ceph::timespan get_range_blocked_grace() {
int grace = get_pg_cct()->_conf->osd_blocked_scrub_grace_period;
if (grace == 0) {
return ceph::timespan{};
}
ceph::timespan grace_period{
m_debug_blockrange ?
std::chrono::seconds(4) :
std::chrono::seconds{grace}};
return grace_period;
}
[[nodiscard]] bool is_primary() const final
{
return m_pg->recovery_state.is_primary();
}
void set_state_name(const char* name) final
{
m_fsm_state_name = name;
}
void select_range_n_notify() final;
void set_scrub_blocked(utime_t since) final;
void clear_scrub_blocked() final;
/// walk the log to find the latest update that affects our chunk
eversion_t search_log_for_updates() const final;
eversion_t get_last_update_applied() const final
{
return m_pg->recovery_state.get_last_update_applied();
}
int pending_active_pushes() const final { return m_pg->active_pushes; }
void on_init() final;
void on_replica_init() final;
void replica_handling_done() final;
/// the version of 'scrub_clear_state()' that does not try to invoke FSM
/// services (thus can be called from FSM reactions)
void clear_pgscrub_state() final;
std::chrono::milliseconds get_scrub_sleep_time() const final;
void queue_for_scrub_resched(Scrub::scrub_prio_t prio) final;
void get_replicas_maps(bool replica_can_preempt) final;
void on_digest_updates() final;
void scrub_finish() final;
ScrubMachineListener::MsgAndEpoch prep_replica_map_msg(
Scrub::PreemptionNoted was_preempted) final;
void send_replica_map(
const ScrubMachineListener::MsgAndEpoch& preprepared) final;
void send_preempted_replica() final;
void send_remotes_reserved(epoch_t epoch_queued) final;
void send_reservation_failure(epoch_t epoch_queued) final;
/**
* does the PG have newer updates than what we (the scrubber) know?
*/
[[nodiscard]] bool has_pg_marked_new_updates() const final;
void set_subset_last_update(eversion_t e) final;
void maps_compare_n_cleanup() final;
Scrub::preemption_t& get_preemptor() final;
int build_primary_map_chunk() final;
int build_replica_map_chunk() final;
void reserve_replicas() final;
void set_reserving_now() final;
void clear_reserving_now() final;
[[nodiscard]] bool was_epoch_changed() const final;
void set_queued_or_active() final;
/// Clears `m_queued_or_active` and restarts snaptrimming
void clear_queued_or_active() final;
void dec_scrubs_remote() final;
void advance_token() final;
void mark_local_map_ready() final;
[[nodiscard]] bool are_all_maps_available() const final;
std::string dump_awaited_maps() const final;
void set_scrub_begin_time() final;
void set_scrub_duration() final;
utime_t scrub_begin_stamp;
std::ostream& gen_prefix(std::ostream& out) const final;
/// facilitate scrub-backend access to SnapMapper mappings
Scrub::SnapMapReaderI& get_snap_mapper_accessor()
{
return m_pg->snap_mapper;
}
void log_cluster_warning(const std::string& warning) const final;
protected:
bool state_test(uint64_t m) const { return m_pg->state_test(m); }
void state_set(uint64_t m) { m_pg->state_set(m); }
void state_clear(uint64_t m) { m_pg->state_clear(m); }
[[nodiscard]] bool is_scrub_registered() const;
/// the 'is-in-scheduling-queue' status, using relaxed-semantics access to the
/// status
std::string_view registration_state() const;
virtual void _scrub_clear_state() {}
utime_t m_scrub_reg_stamp; ///< stamp we registered for
ScrubQueue::ScrubJobRef m_scrub_job; ///< the scrub-job used by the OSD to
///< schedule us
ostream& show(ostream& out) const override;
public:
// ------------------ the I/F used by the ScrubBackend (ScrubBeListener)
// note: the reason we must have these forwarders, is because of the
// artificial PG vs. PrimaryLogPG distinction. Some of the services used
// by the scrubber backend are PrimaryLog-specific.
void add_to_stats(const object_stat_sum_t& stat) override
{
ceph_assert(0 && "expecting a PrimaryLogScrub object");
}
void submit_digest_fixes(const digests_fixes_t& fixes) override
{
ceph_assert(0 && "expecting a PrimaryLogScrub object");
}
CephContext* get_pg_cct() const final { return m_pg->cct; }
LoggerSinkSet& get_logger() const final;
spg_t get_pgid() const final { return m_pg->get_pgid(); }
/// Returns reference to current osdmap
const OSDMapRef& get_osdmap() const final;
// ---------------------------------------------------------------------------
friend ostream& operator<<(ostream& out, const PgScrubber& scrubber);
static utime_t scrub_must_stamp() { return utime_t(1, 1); }
virtual ~PgScrubber(); // must be defined separately, in the .cc file
[[nodiscard]] bool is_scrub_active() const final { return m_active; }
private:
void reset_internal_state();
bool is_token_current(Scrub::act_token_t received_token);
void requeue_waiting() const { m_pg->requeue_ops(m_pg->waiting_for_scrub); }
/**
* mark down some parameters of the initiated scrub:
* - the epoch when started;
* - the depth of the scrub requested (from the PG_STATE variable)
*/
void reset_epoch(epoch_t epoch_queued);
void run_callbacks();
// 'query' command data for an active scrub
void dump_active_scrubber(ceph::Formatter* f, bool is_deep) const;
// ----- methods used to verify the relevance of incoming events:
/**
* should_drop_message
*
* Returns false if message was sent in the current epoch. Otherwise,
* returns true and logs a debug message.
*/
bool should_drop_message(OpRequestRef &op) const;
/**
* is the incoming event still relevant and should be forwarded to the FSM?
*
* It isn't if:
* - (1) we are no longer 'actively scrubbing'; or
* - (2) the message is from an epoch prior to when we started the current
* scrub session; or
* - (3) the message epoch is from a previous interval; or
* - (4) the 'abort' configuration flags were set.
*
* For (1) & (2) - the incoming message is discarded, w/o further action.
*
* For (3): (see check_interval() for a full description) if we have not
* reacted yet to this specific new interval, we do now:
* - replica reservations are silently discarded (we count on the replicas to
* notice the interval change and un-reserve themselves);
* - the scrubbing is halted.
*
* For (4): the message will be discarded, but also:
* if this is the first time we've noticed the 'abort' request, we perform
* the abort.
*
* \returns should the incoming event be processed?
*/
bool is_message_relevant(epoch_t epoch_to_verify);
/**
* check the 'no scrub' configuration options.
*/
[[nodiscard]] bool should_abort() const;
/**
* Check the 'no scrub' configuration flags.
*
* Reset everything if the abort was not handled before.
* @returns false if the message was discarded due to abort flag.
*/
[[nodiscard]] bool verify_against_abort(epoch_t epoch_to_verify);
[[nodiscard]] bool check_interval(epoch_t epoch_to_verify) const;
epoch_t m_last_aborted{}; // last time we've noticed a request to abort
// 'optional', as 'ReplicaReservations' & 'LocalReservation' are
// 'RAII-designed' to guarantee un-reserving when deleted.
std::optional<Scrub::ReplicaReservations> m_reservations;
std::optional<Scrub::LocalReservation> m_local_osd_resource;
void cleanup_on_finish(); // scrub_clear_state() as called for a Primary when
// Active->NotActive
protected:
PG* const m_pg;
/**
* the derivative-specific scrub-finishing touches:
*/
virtual void _scrub_finish() {}
// common code used by build_primary_map_chunk() and
// build_replica_map_chunk():
int build_scrub_map_chunk(ScrubMap& map, // primary or replica?
ScrubMapBuilder& pos,
hobject_t start,
hobject_t end,
bool deep);
std::unique_ptr<Scrub::ScrubMachine> m_fsm;
/// the FSM state, as a string for logging
const char* m_fsm_state_name{nullptr};
const spg_t m_pg_id; ///< a local copy of m_pg->pg_id
OSDService* const m_osds;
const pg_shard_t m_pg_whoami; ///< a local copy of m_pg->pg_whoami;
epoch_t m_interval_start{0}; ///< interval's 'from' of when scrubbing was
///< first scheduled
void repair_oinfo_oid(ScrubMap& smap);
/*
* the exact epoch when the scrubbing actually started (started here - cleared
* checks for no-scrub conf). Incoming events are verified against this, with
* stale events discarded.
*/
epoch_t m_epoch_start{0}; ///< the actual epoch when scrubbing started
/**
* (replica) a tag identifying a specific scrub "session". Incremented
* whenever the Primary releases the replica scrub resources. When the scrub
* session is terminated (even if the interval remains unchanged, as might
* happen following an asok no-scrub command), stale scrub-resched messages
* triggered by the backend will be discarded.
*/
Scrub::act_token_t m_current_token{1};
/**
* (primary/replica) a test aid. A counter that is incremented whenever a
* scrub starts, and again when it terminates. Exposed as part of the 'pg
* query' command, to be used by test scripts.
*
* @ATTN: not guaranteed to be accurate. To be only used for tests. This is
* why it is initialized to a meaningless number;
*/
int32_t m_sessions_counter{
(int32_t)((int64_t)(this) & 0x0000'0000'00ff'fff0)};
bool m_publish_sessions{false}; //< will the counter be part of 'query'
//output?
scrub_flags_t m_flags;
/// a reference to the details of the next scrub (as requested and managed by
/// the PG)
requested_scrub_t& m_planned_scrub;
bool m_active{false};
/**
* a flag designed to prevent the initiation of a second scrub on a PG for
* which scrubbing has been initiated.
*
* set once scrubbing was initiated (i.e. - even before the FSM event that
* will trigger a state-change out of Inactive was handled), and only reset
* once the FSM is back in Inactive.
* In other words - its ON period encompasses:
* - the time period covered today by 'queued', and
* - the time when m_active is set, and
* - all the time from scrub_finish() calling update_stats() till the
* FSM handles the 'finished' event
*
* Compared with 'm_active', this flag is asserted earlier and remains ON for
* longer.
*/
bool m_queued_or_active{false};
eversion_t m_subset_last_update{};
std::unique_ptr<Scrub::Store> m_store;
int num_digest_updates_pending{0};
hobject_t m_start, m_end; ///< note: half-closed: [start,end)
/// Returns epoch of current osdmap
epoch_t get_osdmap_epoch() const { return get_osdmap()->get_epoch(); }
// collected statistics
int m_shallow_errors{0};
int m_deep_errors{0};
int m_fixed_count{0};
protected:
/**
* 'm_is_deep' - is the running scrub a deep one?
*
* Note that most of the code directly checks PG_STATE_DEEP_SCRUB, which is
* primary-only (and is set earlier - when scheduling the scrub). 'm_is_deep'
* is meaningful both for the primary and the replicas, and is used as a
* parameter when building the scrub maps.
*/
bool m_is_deep{false};
/**
* If set: affects the backend & scrubber-backend functions called after all
* scrub maps are available.
*
* Replaces code that directly checks PG_STATE_REPAIR (which was meant to be
* a "user facing" status display only).
*/
bool m_is_repair{false};
/**
* User-readable summary of the scrubber's current mode of operation. Used for
* both osd.*.log and the cluster log.
* One of:
* "repair"
* "deep-scrub",
* "scrub
*
* Note: based on PG_STATE_REPAIR, and not on m_is_repair. I.e. for
* auto_repair will show as "deep-scrub" and not as "repair" (until the first
* error is detected).
*/
std::string_view m_mode_desc;
void update_op_mode_text();
private:
/**
* initiate a deep-scrub after the current scrub ended with errors.
*/
void request_rescrubbing(requested_scrub_t& req_flags);
/*
* Select a range of objects to scrub.
*
* By:
* - setting tentative range based on conf and divisor
* - requesting a partial list of elements from the backend;
* - handling some head/clones issues
*
* The selected range is set directly into 'm_start' and 'm_end'
*/
bool select_range();
std::list<Context*> m_callbacks;
/**
* send a replica (un)reservation request to the acting set
*
* @param opcode - one of MOSDScrubReserve::REQUEST
* or MOSDScrubReserve::RELEASE
*/
void message_all_replicas(int32_t opcode, std::string_view op_text);
hobject_t m_max_end; ///< Largest end that may have been sent to replicas
ScrubMapBuilder m_primary_scrubmap_pos;
void _request_scrub_map(pg_shard_t replica,
eversion_t version,
hobject_t start,
hobject_t end,
bool deep,
bool allow_preemption);
Scrub::MapsCollectionStatus m_maps_status;
void persist_scrub_results(inconsistent_objs_t&& all_errors);
void apply_snap_mapper_fixes(
const std::vector<Scrub::snap_mapper_fix_t>& fix_list);
// our latest periodic 'publish_stats_to_osd()'. Required frequency depends on
// scrub state.
ceph::coarse_real_clock::time_point m_last_stat_upd{};
// ------------ members used if we are a replica
epoch_t m_replica_min_epoch; ///< the min epoch needed to handle this message
ScrubMapBuilder replica_scrubmap_pos;
ScrubMap replica_scrubmap;
// the backend, handling the details of comparing maps & fixing objects
std::unique_ptr<ScrubBackend> m_be;
/**
* we mark the request priority as it arrived. It influences the queuing
* priority when we wait for local updates
*/
Scrub::scrub_prio_t m_replica_request_priority;
/**
* the 'preemption' "state-machine".
* Note: I was considering an orthogonal sub-machine implementation, but as
* the state diagram is extremely simple, the added complexity wasn't
* justified.
*/
class preemption_data_t : public Scrub::preemption_t {
public:
explicit preemption_data_t(PG* pg); // the PG access is used for conf
// access (and logs)
[[nodiscard]] bool is_preemptable() const final { return m_preemptable; }
preemption_data_t(const preemption_data_t&) = delete;
preemption_data_t(preemption_data_t&&) = delete;
bool do_preempt() final
{
if (m_preempted || !m_preemptable)
return false;
std::lock_guard<ceph::mutex> lk{m_preemption_lock};
if (!m_preemptable)
return false;
m_preempted = true;
return true;
}
/// same as 'do_preempt()' but w/o checks (as once a replica
/// was preempted, we cannot continue)
void replica_preempted() { m_preempted = true; }
void enable_preemption()
{
std::lock_guard<ceph::mutex> lk{m_preemption_lock};
if (are_preemptions_left() && !m_preempted) {
m_preemptable = true;
}
}
/// used by a replica to set preemptability state according to the Primary's
/// request
void force_preemptability(bool is_allowed)
{
// note: no need to lock for a replica
m_preempted = false;
m_preemptable = is_allowed;
}
bool disable_and_test() final
{
std::lock_guard<ceph::mutex> lk{m_preemption_lock};
m_preemptable = false;
return m_preempted;
}
[[nodiscard]] bool was_preempted() const { return m_preempted; }
[[nodiscard]] size_t chunk_divisor() const { return m_size_divisor; }
void reset();
void adjust_parameters() final
{
std::lock_guard<ceph::mutex> lk{m_preemption_lock};
if (m_preempted) {
m_preempted = false;
m_preemptable = adjust_left();
} else {
m_preemptable = are_preemptions_left();
}
}
private:
PG* m_pg;
mutable ceph::mutex m_preemption_lock = ceph::make_mutex("preemption_lock");
bool m_preemptable{false};
bool m_preempted{false};
int m_left;
size_t m_size_divisor{1};
bool are_preemptions_left() const { return m_left > 0; }
bool adjust_left()
{
if (m_left > 0) {
--m_left;
m_size_divisor *= 2;
}
return m_left > 0;
}
};
preemption_data_t preemption_data;
};
| 31,558 | 30.092611 | 81 | h |
null | ceph-main/src/osd/scrubber/scrub_backend.cc | // -*- m_mode_desc:C++; tab-width:8; c-basic-offset:2; indent-tabs-m_mode_desc:t
// -*- vim: ts=2 sw=2 smarttab
#include "./scrub_backend.h"
#include <algorithm>
#include <fmt/ranges.h>
#include "common/debug.h"
#include "include/utime_fmt.h"
#include "messages/MOSDRepScrubMap.h"
#include "osd/ECUtil.h"
#include "osd/OSD.h"
#include "osd/PG.h"
#include "osd/PrimaryLogPG.h"
#include "osd/osd_types_fmt.h"
#include "pg_scrubber.h"
using std::set;
using std::stringstream;
using std::vector;
using namespace Scrub;
using namespace std::chrono;
using namespace std::chrono_literals;
using namespace std::literals;
#define dout_context (m_scrubber.get_pg_cct())
#define dout_subsys ceph_subsys_osd
#undef dout_prefix
#define dout_prefix ScrubBackend::logger_prefix(_dout, this)
std::ostream& ScrubBackend::logger_prefix(std::ostream* out,
const ScrubBackend* t)
{
return t->m_scrubber.gen_prefix(*out) << " b.e.: ";
}
// ////////////////////////////////////////////////////////////////////////// //
// for a Primary
ScrubBackend::ScrubBackend(ScrubBeListener& scrubber,
PgScrubBeListener& pg,
pg_shard_t i_am,
bool repair,
scrub_level_t shallow_or_deep,
const std::set<pg_shard_t>& acting)
: m_scrubber{scrubber}
, m_pg{pg}
, m_pg_whoami{i_am}
, m_repair{repair}
, m_depth{shallow_or_deep}
, m_pg_id{scrubber.get_pgid()}
, m_pool{m_pg.get_pgpool()}
, m_incomplete_clones_allowed{m_pool.info.allow_incomplete_clones()}
, m_conf{m_scrubber.get_pg_cct()->_conf}
, clog{m_scrubber.get_logger()}
{
m_formatted_id = m_pg_id.calc_name_sring();
m_acting_but_me.reserve(acting.size());
std::copy_if(acting.begin(),
acting.end(),
std::back_inserter(m_acting_but_me),
[i_am](const pg_shard_t& shard) { return shard != i_am; });
m_is_replicated = m_pool.info.is_replicated();
m_mode_desc =
(m_repair ? "repair"sv
: (m_depth == scrub_level_t::deep ? "deep-scrub"sv : "scrub"sv));
}
// for a Replica
ScrubBackend::ScrubBackend(ScrubBeListener& scrubber,
PgScrubBeListener& pg,
pg_shard_t i_am,
bool repair,
scrub_level_t shallow_or_deep)
: m_scrubber{scrubber}
, m_pg{pg}
, m_pg_whoami{i_am}
, m_repair{repair}
, m_depth{shallow_or_deep}
, m_pg_id{scrubber.get_pgid()}
, m_pool{m_pg.get_pgpool()}
, m_conf{m_scrubber.get_pg_cct()->_conf}
, clog{m_scrubber.get_logger()}
{
m_formatted_id = m_pg_id.calc_name_sring();
m_is_replicated = m_pool.info.is_replicated();
m_mode_desc =
(m_repair ? "repair"sv
: (m_depth == scrub_level_t::deep ? "deep-scrub"sv : "scrub"sv));
}
uint64_t ScrubBackend::logical_to_ondisk_size(uint64_t logical_size) const
{
return m_pg.logical_to_ondisk_size(logical_size);
}
void ScrubBackend::update_repair_status(bool should_repair)
{
dout(15) << __func__
<< ": repair state set to :" << (should_repair ? "true" : "false")
<< dendl;
m_repair = should_repair;
m_mode_desc =
(m_repair ? "repair"sv
: (m_depth == scrub_level_t::deep ? "deep-scrub"sv : "scrub"sv));
}
void ScrubBackend::new_chunk()
{
dout(15) << __func__ << dendl;
this_chunk.emplace(m_pg_whoami);
}
ScrubMap& ScrubBackend::get_primary_scrubmap()
{
return this_chunk->received_maps[m_pg_whoami];
}
void ScrubBackend::merge_to_authoritative_set()
{
dout(15) << __func__ << dendl;
ceph_assert(m_scrubber.is_primary());
ceph_assert(this_chunk->authoritative_set.empty() &&
"the scrubber-backend should be empty");
if (g_conf()->subsys.should_gather<ceph_subsys_osd, 15>()) {
for (const auto& rpl : m_acting_but_me) {
dout(15) << fmt::format("{}: replica {} has {} items",
__func__,
rpl,
this_chunk->received_maps[rpl].objects.size())
<< dendl;
}
}
// Construct the authoritative set of objects
for (const auto& map : this_chunk->received_maps) {
std::transform(map.second.objects.begin(),
map.second.objects.end(),
std::inserter(this_chunk->authoritative_set,
this_chunk->authoritative_set.end()),
[](const auto& i) { return i.first; });
}
}
ScrubMap& ScrubBackend::my_map()
{
return this_chunk->received_maps[m_pg_whoami];
}
void ScrubBackend::decode_received_map(pg_shard_t from,
const MOSDRepScrubMap& msg)
{
auto p = const_cast<bufferlist&>(msg.get_data()).cbegin();
this_chunk->received_maps[from].decode(p, m_pool.id);
dout(15) << __func__ << ": decoded map from : " << from
<< ": versions: " << this_chunk->received_maps[from].valid_through
<< " / " << msg.get_map_epoch() << dendl;
}
std::vector<snap_mapper_fix_t> ScrubBackend::replica_clean_meta(
ScrubMap& repl_map,
bool max_reached,
const hobject_t& start,
SnapMapReaderI& snaps_getter)
{
dout(15) << __func__ << ": REPL META # " << m_cleaned_meta_map.objects.size()
<< " objects" << dendl;
ceph_assert(!m_cleaned_meta_map.objects.size());
m_cleaned_meta_map.clear_from(start); // RRR how can this be required?
m_cleaned_meta_map.insert(repl_map);
auto for_meta_scrub = clean_meta_map(m_cleaned_meta_map, max_reached);
return scan_snaps(for_meta_scrub, snaps_getter);
}
// /////////////////////////////////////////////////////////////////////////////
//
// comparing the maps
//
// /////////////////////////////////////////////////////////////////////////////
objs_fix_list_t ScrubBackend::scrub_compare_maps(
bool max_reached,
SnapMapReaderI& snaps_getter)
{
dout(10) << __func__ << " has maps, analyzing" << dendl;
ceph_assert(m_scrubber.is_primary());
// construct authoritative scrub map for type-specific scrubbing
m_cleaned_meta_map.insert(my_map());
merge_to_authoritative_set();
// collect some omap statistics into m_omap_stats
omap_checks();
update_authoritative();
auto for_meta_scrub = clean_meta_map(m_cleaned_meta_map, max_reached);
// ok, do the pg-type specific scrubbing
// (Validates consistency of the object info and snap sets)
scrub_snapshot_metadata(for_meta_scrub);
return objs_fix_list_t{std::move(this_chunk->m_inconsistent_objs),
scan_snaps(for_meta_scrub, snaps_getter)};
}
void ScrubBackend::omap_checks()
{
const bool needs_omap_check = std::any_of(
this_chunk->received_maps.begin(),
this_chunk->received_maps.end(),
[](const auto& m) -> bool {
return m.second.has_large_omap_object_errors || m.second.has_omap_keys;
});
if (!needs_omap_check) {
return; // Nothing to do
}
stringstream wss;
// Iterate through objects and update omap stats
for (const auto& ho : this_chunk->authoritative_set) {
for (const auto& [srd, smap] : this_chunk->received_maps) {
if (srd != m_pg_whoami) {
// Only set omap stats for the primary
continue;
}
auto it = smap.objects.find(ho);
if (it == smap.objects.end()) {
continue;
}
const ScrubMap::object& smap_obj = it->second;
m_omap_stats.omap_bytes += smap_obj.object_omap_bytes;
m_omap_stats.omap_keys += smap_obj.object_omap_keys;
if (smap_obj.large_omap_object_found) {
auto osdmap = m_scrubber.get_osdmap();
pg_t pg;
osdmap->map_to_pg(ho.pool, ho.oid.name, ho.get_key(), ho.nspace, &pg);
pg_t mpg = osdmap->raw_pg_to_pg(pg);
m_omap_stats.large_omap_objects++;
wss << "Large omap object found. Object: " << ho << " PG: " << pg
<< " (" << mpg << ")"
<< " Key count: " << smap_obj.large_omap_object_key_count
<< " Size (bytes): " << smap_obj.large_omap_object_value_size
<< '\n';
break;
}
}
}
if (!wss.str().empty()) {
dout(5) << __func__ << ": " << wss.str() << dendl;
clog.warn(wss);
}
}
/*
* update_authoritative() updates:
*
* - m_auth_peers: adds obj-> list of pairs < scrub-map, shard>
*
* - m_cleaned_meta_map: replaces [obj] entry with:
* the relevant object in the scrub-map of the "selected" (back-most) peer
*/
void ScrubBackend::update_authoritative()
{
dout(10) << __func__ << dendl;
if (m_acting_but_me.empty()) {
return;
}
compare_smaps(); // note: might cluster-log errors
// update the session-wide m_auth_peers with the list of good
// peers for each object (i.e. the ones that are in this_chunks's auth list)
for (auto& [obj, peers] : this_chunk->authoritative) {
auth_peers_t good_peers;
for (auto& peer : peers) {
good_peers.emplace_back(this_chunk->received_maps[peer].objects[obj],
peer);
}
m_auth_peers.emplace(obj, std::move(good_peers));
}
for (const auto& [obj, peers] : this_chunk->authoritative) {
m_cleaned_meta_map.objects.erase(obj);
m_cleaned_meta_map.objects.insert(
*(this_chunk->received_maps[peers.back()].objects.find(obj)));
}
}
int ScrubBackend::scrub_process_inconsistent()
{
dout(20) << fmt::format("{}: {} (m_repair:{}) good peers tbl #: {}",
__func__,
m_mode_desc,
m_repair,
m_auth_peers.size())
<< dendl;
ceph_assert(!m_auth_peers.empty());
// authoritative only store objects which are missing or inconsistent.
// some tests expect an error message that does not contain the __func__ and
// PG:
auto err_msg = fmt::format("{} {} {} missing, {} inconsistent objects",
m_formatted_id,
m_mode_desc,
m_missing.size(),
m_inconsistent.size());
dout(4) << err_msg << dendl;
clog.error() << err_msg;
ceph_assert(m_repair);
int fixed_cnt{0};
for (const auto& [hobj, shrd_list] : m_auth_peers) {
auto missing_entry = m_missing.find(hobj);
if (missing_entry != m_missing.end()) {
repair_object(hobj, shrd_list, missing_entry->second);
fixed_cnt += missing_entry->second.size();
}
if (m_inconsistent.count(hobj)) {
repair_object(hobj, shrd_list, m_inconsistent[hobj]);
fixed_cnt += m_inconsistent[hobj].size();
}
}
return fixed_cnt;
}
void ScrubBackend::repair_object(const hobject_t& soid,
const auth_peers_t& ok_peers,
const set<pg_shard_t>& bad_peers)
{
if (g_conf()->subsys.should_gather<ceph_subsys_osd, 20>()) {
// log the good peers
set<pg_shard_t> ok_shards; // the shards from the ok_peers list
for (const auto& peer : ok_peers) {
ok_shards.insert(peer.second);
}
dout(10) << fmt::format(
"repair_object {} bad_peers osd.{{{}}}, ok_peers osd.{{{}}}",
soid,
bad_peers,
ok_shards)
<< dendl;
}
const ScrubMap::object& po = ok_peers.back().first;
object_info_t oi;
try {
bufferlist bv;
if (po.attrs.count(OI_ATTR)) {
bv.push_back(po.attrs.find(OI_ATTR)->second);
}
auto bliter = bv.cbegin();
decode(oi, bliter);
} catch (...) {
dout(0) << __func__
<< ": Need version of replica, bad object_info_t: " << soid
<< dendl;
ceph_abort();
}
if (bad_peers.count(m_pg.get_primary())) {
// We should only be scrubbing if the PG is clean.
ceph_assert(!m_pg.is_waiting_for_unreadable_object());
dout(10) << __func__ << ": primary = " << m_pg.get_primary() << dendl;
}
// No need to pass ok_peers, they must not be missing the object, so
// force_object_missing will add them to missing_loc anyway
m_pg.force_object_missing(ScrubberPasskey{}, bad_peers, soid, oi.version);
}
// /////////////////////////////////////////////////////////////////////////////
//
// components formerly of PGBackend::be_compare_scrubmaps()
//
// /////////////////////////////////////////////////////////////////////////////
using usable_t = shard_as_auth_t::usable_t;
static inline int dcount(const object_info_t& oi)
{
return (oi.is_data_digest() ? 1 : 0) + (oi.is_omap_digest() ? 1 : 0);
}
auth_selection_t ScrubBackend::select_auth_object(const hobject_t& ho,
stringstream& errstream)
{
// Create a list of shards (with the Primary first, so that it will be
// auth-copy, all other things being equal)
/// \todo: consider sorting the candidate shards by the conditions for
/// selecting best auth source below. Then - stopping on the first one
/// that is auth eligible.
/// This creates an issue with 'digest_match' that should be handled.
std::list<pg_shard_t> shards;
for (const auto& [srd, smap] : this_chunk->received_maps) {
if (srd != m_pg_whoami) {
shards.push_back(srd);
}
}
shards.push_front(m_pg_whoami);
auth_selection_t ret_auth;
ret_auth.auth = this_chunk->received_maps.end();
eversion_t auth_version;
for (auto& l : shards) {
auto shard_ret = possible_auth_shard(ho, l, ret_auth.shard_map);
// digest_match will only be true if computed digests are the same
if (auth_version != eversion_t() &&
ret_auth.auth->second.objects[ho].digest_present &&
shard_ret.digest.has_value() &&
ret_auth.auth->second.objects[ho].digest != *shard_ret.digest) {
ret_auth.digest_match = false;
dout(10) << fmt::format(
"{}: digest_match = false, {} data_digest 0x{:x} != "
"data_digest 0x{:x}",
__func__,
ho,
ret_auth.auth->second.objects[ho].digest,
*shard_ret.digest)
<< dendl;
}
dout(20)
<< fmt::format("{}: {} shard {} got:{:D}", __func__, ho, l, shard_ret)
<< dendl;
if (shard_ret.possible_auth == shard_as_auth_t::usable_t::not_usable) {
// Don't use this particular shard due to previous errors
// XXX: For now we can't pick one shard for repair and another's object
// info or snapset
ceph_assert(shard_ret.error_text.length());
errstream << m_pg_id.pgid << " shard " << l << " soid " << ho << " : "
<< shard_ret.error_text << "\n";
} else if (shard_ret.possible_auth ==
shard_as_auth_t::usable_t::not_found) {
// do not emit the returned error message to the log
dout(15) << fmt::format("{}: {} not found on shard {}", __func__, ho, l)
<< dendl;
} else {
dout(30) << fmt::format("{}: consider using {} srv: {} oi soid: {}",
__func__,
l,
shard_ret.oi.version,
shard_ret.oi.soid)
<< dendl;
// consider using this shard as authoritative. Is it more recent?
if (auth_version == eversion_t() || shard_ret.oi.version > auth_version ||
(shard_ret.oi.version == auth_version &&
dcount(shard_ret.oi) > dcount(ret_auth.auth_oi))) {
dout(20) << fmt::format("{}: using {} moved auth oi {:p} <-> {:p}",
__func__,
l,
(void*)&ret_auth.auth_oi,
(void*)&shard_ret.oi)
<< dendl;
ret_auth.auth = shard_ret.auth_iter;
ret_auth.auth_shard = ret_auth.auth->first;
ret_auth.auth_oi = shard_ret.oi;
auth_version = shard_ret.oi.version;
ret_auth.is_auth_available = true;
}
}
}
dout(10) << fmt::format("{}: selecting osd {} for obj {} with oi {}",
__func__,
ret_auth.auth_shard,
ho,
ret_auth.auth_oi)
<< dendl;
return ret_auth;
}
using set_sinfo_err_t = void (shard_info_wrapper::*)();
inline static const char* sep(bool& prev_err)
{
if (prev_err) {
return ", ";
} else {
prev_err = true;
return "";
}
}
// retval: should we continue with the tests
static inline bool dup_error_cond(bool& prev_err,
bool continue_on_err,
bool pred,
shard_info_wrapper& si,
set_sinfo_err_t sete,
std::string_view msg,
stringstream& errstream)
{
if (pred) {
(si.*sete)();
errstream << sep(prev_err) << msg;
return continue_on_err;
}
return true;
}
/**
* calls a shard_info_wrapper function, but only if the error predicate is
* true.
* Returns a copy of the error status.
*/
static inline bool test_error_cond(bool error_pred,
shard_info_wrapper& si,
set_sinfo_err_t sete)
{
if (error_pred) {
(si.*sete)();
}
return error_pred;
}
shard_as_auth_t ScrubBackend::possible_auth_shard(const hobject_t& obj,
const pg_shard_t& srd,
shard_info_map_t& shard_map)
{
// 'maps' (originally called with this_chunk->maps): this_chunk->maps
// 'auth_oi' (called with 'auth_oi', which wasn't initialized at call site)
// - create and return
// 'shard_map' - the one created in select_auth_object()
// - used to access the 'shard_info'
const auto j = this_chunk->received_maps.find(srd);
const auto& j_shard = j->first;
const auto& j_smap = j->second;
auto i = j_smap.objects.find(obj);
if (i == j_smap.objects.end()) {
return shard_as_auth_t{};
}
const auto& smap_obj = i->second;
auto& shard_info = shard_map[j_shard];
if (j_shard == m_pg_whoami) {
shard_info.primary = true;
}
stringstream errstream; // for this shard
bool err{false};
dup_error_cond(err,
true,
smap_obj.read_error,
shard_info,
&shard_info_wrapper::set_read_error,
"candidate had a read error"sv,
errstream);
dup_error_cond(err,
true,
smap_obj.ec_hash_mismatch,
shard_info,
&shard_info_wrapper::set_ec_hash_mismatch,
"candidate had an ec hash mismatch"sv,
errstream);
dup_error_cond(err,
true,
smap_obj.ec_size_mismatch,
shard_info,
&shard_info_wrapper::set_ec_size_mismatch,
"candidate had an ec size mismatch"sv,
errstream);
if (!dup_error_cond(err,
false,
smap_obj.stat_error,
shard_info,
&shard_info_wrapper::set_stat_error,
"candidate had a stat error"sv,
errstream)) {
// With stat_error no further checking
// We don't need to also see a missing_object_info_attr
return shard_as_auth_t{errstream.str()};
}
// We won't pick an auth copy if the snapset is missing or won't decode.
ceph_assert(!obj.is_snapdir());
if (obj.is_head()) {
auto k = smap_obj.attrs.find(SS_ATTR);
if (dup_error_cond(err,
false,
(k == smap_obj.attrs.end()),
shard_info,
&shard_info_wrapper::set_snapset_missing,
"candidate had a missing snapset key"sv,
errstream)) {
bufferlist ss_bl;
SnapSet snapset;
ss_bl.push_back(k->second);
try {
auto bliter = ss_bl.cbegin();
decode(snapset, bliter);
} catch (...) {
// invalid snapset, probably corrupt
dup_error_cond(err,
false,
true,
shard_info,
&shard_info_wrapper::set_snapset_corrupted,
"candidate had a corrupt snapset"sv,
errstream);
}
} else {
// debug@dev only
dout(30) << fmt::format(
"{} missing snap addr: {:p} shard_info: {:p} er: {:x}",
__func__,
(void*)&smap_obj,
(void*)&shard_info,
shard_info.errors)
<< dendl;
}
}
if (!m_is_replicated) {
auto k = smap_obj.attrs.find(ECUtil::get_hinfo_key());
if (dup_error_cond(err,
false,
(k == smap_obj.attrs.end()),
shard_info,
&shard_info_wrapper::set_hinfo_missing,
"candidate had a missing hinfo key"sv,
errstream)) {
bufferlist hk_bl;
ECUtil::HashInfo hi;
hk_bl.push_back(k->second);
try {
auto bliter = hk_bl.cbegin();
decode(hi, bliter);
} catch (...) {
dup_error_cond(err,
false,
true,
shard_info,
&shard_info_wrapper::set_hinfo_corrupted,
"candidate had a corrupt hinfo"sv,
errstream);
}
}
}
object_info_t oi;
{
auto k = smap_obj.attrs.find(OI_ATTR);
if (!dup_error_cond(err,
false,
(k == smap_obj.attrs.end()),
shard_info,
&shard_info_wrapper::set_info_missing,
"candidate had a missing info key"sv,
errstream)) {
// no object info on object, probably corrupt
return shard_as_auth_t{errstream.str()};
}
bufferlist bl;
bl.push_back(k->second);
try {
auto bliter = bl.cbegin();
decode(oi, bliter);
} catch (...) {
// invalid object info, probably corrupt
if (!dup_error_cond(err,
false,
true,
shard_info,
&shard_info_wrapper::set_info_corrupted,
"candidate had a corrupt info"sv,
errstream)) {
return shard_as_auth_t{errstream.str()};
}
}
}
// This is automatically corrected in repair_oinfo_oid()
ceph_assert(oi.soid == obj);
if (test_error_cond(smap_obj.size != logical_to_ondisk_size(oi.size),
shard_info,
&shard_info_wrapper::set_obj_size_info_mismatch)) {
errstream << sep(err) << "candidate size " << smap_obj.size << " info size "
<< logical_to_ondisk_size(oi.size) << " mismatch";
}
std::optional<uint32_t> digest;
if (smap_obj.digest_present) {
digest = smap_obj.digest;
}
if (shard_info.errors) {
ceph_assert(err);
return shard_as_auth_t{errstream.str(), digest};
}
ceph_assert(!err);
// note that the error text is made available to the caller, even
// for a successful shard selection
return shard_as_auth_t{oi, j, errstream.str(), digest};
}
// re-implementation of PGBackend::be_compare_scrubmaps()
void ScrubBackend::compare_smaps()
{
dout(10) << __func__
<< ": authoritative-set #: " << this_chunk->authoritative_set.size()
<< dendl;
std::for_each(this_chunk->authoritative_set.begin(),
this_chunk->authoritative_set.end(),
[this](const auto& ho) {
if (auto maybe_clust_err = compare_obj_in_maps(ho);
maybe_clust_err) {
clog.error() << *maybe_clust_err;
}
});
}
std::optional<std::string> ScrubBackend::compare_obj_in_maps(
const hobject_t& ho)
{
// clear per-object data:
this_chunk->cur_inconsistent.clear();
this_chunk->cur_missing.clear();
this_chunk->fix_digest = false;
stringstream candidates_errors;
auto auth_res = select_auth_object(ho, candidates_errors);
if (candidates_errors.str().size()) {
// a collection of shard-specific errors detected while
// finding the best shard to serve as authoritative
clog.error() << candidates_errors.str();
}
inconsistent_obj_wrapper object_error{ho};
if (!auth_res.is_auth_available) {
// no auth selected
object_error.set_version(0);
object_error.set_auth_missing(ho,
this_chunk->received_maps,
auth_res.shard_map,
this_chunk->m_error_counts.shallow_errors,
this_chunk->m_error_counts.deep_errors,
m_pg_whoami);
if (object_error.has_deep_errors()) {
this_chunk->m_error_counts.deep_errors++;
} else if (object_error.has_shallow_errors()) {
this_chunk->m_error_counts.shallow_errors++;
}
this_chunk->m_inconsistent_objs.push_back(std::move(object_error));
return fmt::format("{} soid {} : failed to pick suitable object info\n",
m_scrubber.get_pgid().pgid,
ho);
}
stringstream errstream;
auto& auth = auth_res.auth;
// an auth source was selected
object_error.set_version(auth_res.auth_oi.user_version);
ScrubMap::object& auth_object = auth->second.objects[ho];
ceph_assert(!this_chunk->fix_digest);
auto [auths, objerrs] =
match_in_shards(ho, auth_res, object_error, errstream);
auto opt_ers =
for_empty_auth_list(std::move(auths),
std::move(objerrs),
auth,
ho,
errstream);
if (opt_ers.has_value()) {
// At this point auth_list is populated, so we add the object error
// shards as inconsistent.
inconsistents(ho,
auth_object,
auth_res.auth_oi,
std::move(*opt_ers),
errstream);
} else {
// both the auth & errs containers are empty
errstream << m_pg_id << " soid " << ho << " : empty auth list\n";
}
if (object_error.has_deep_errors()) {
this_chunk->m_error_counts.deep_errors++;
} else if (object_error.has_shallow_errors()) {
this_chunk->m_error_counts.shallow_errors++;
}
if (object_error.errors || object_error.union_shards.errors) {
this_chunk->m_inconsistent_objs.push_back(std::move(object_error));
}
if (errstream.str().empty()) {
return std::nullopt;
} else {
return errstream.str();
}
}
std::optional<ScrubBackend::auth_and_obj_errs_t>
ScrubBackend::for_empty_auth_list(std::list<pg_shard_t>&& auths,
std::set<pg_shard_t>&& obj_errors,
shard_to_scrubmap_t::iterator auth,
const hobject_t& ho,
stringstream& errstream)
{
if (auths.empty()) {
if (obj_errors.empty()) {
errstream << m_pg_id << " soid " << ho
<< " : failed to pick suitable auth object\n";
return std::nullopt;
}
// Object errors exist and nothing in auth_list
// Prefer the auth shard, otherwise take first from list.
pg_shard_t shard;
if (obj_errors.count(auth->first)) {
shard = auth->first;
} else {
shard = *(obj_errors.begin());
}
auths.push_back(shard);
obj_errors.erase(shard);
}
return ScrubBackend::auth_and_obj_errs_t{std::move(auths),
std::move(obj_errors)};
}
/// \todo replace the errstream with a member of this_chunk. Better be a
/// fmt::buffer. Then - we can use it directly in should_fix_digest()
void ScrubBackend::inconsistents(const hobject_t& ho,
ScrubMap::object& auth_object,
object_info_t& auth_oi,
auth_and_obj_errs_t&& auth_n_errs,
stringstream& errstream)
{
auto& object_errors = auth_n_errs.object_errors;
auto& auth_list = auth_n_errs.auth_list;
this_chunk->cur_inconsistent.insert(object_errors.begin(),
object_errors.end()); // merge?
dout(15) << fmt::format(
"{}: object errors #: {} auth list #: {} cur_missing #: {} "
"cur_incon #: {}",
__func__,
object_errors.size(),
auth_list.size(),
this_chunk->cur_missing.size(),
this_chunk->cur_inconsistent.size())
<< dendl;
if (!this_chunk->cur_missing.empty()) {
m_missing[ho] = this_chunk->cur_missing;
}
if (!this_chunk->cur_inconsistent.empty()) {
m_inconsistent[ho] = this_chunk->cur_inconsistent;
}
if (this_chunk->fix_digest) {
ceph_assert(auth_object.digest_present);
std::optional<uint32_t> data_digest{auth_object.digest};
std::optional<uint32_t> omap_digest;
if (auth_object.omap_digest_present) {
omap_digest = auth_object.omap_digest;
}
this_chunk->missing_digest.push_back(
make_pair(ho, make_pair(data_digest, omap_digest)));
}
if (!this_chunk->cur_inconsistent.empty() ||
!this_chunk->cur_missing.empty()) {
this_chunk->authoritative[ho] = auth_list;
} else if (!this_chunk->fix_digest && m_is_replicated) {
auto is_to_fix =
should_fix_digest(ho, auth_object, auth_oi, m_repair, errstream);
switch (is_to_fix) {
case digest_fixing_t::no:
break;
case digest_fixing_t::if_aged: {
utime_t age = this_chunk->started - auth_oi.local_mtime;
// \todo find out 'age_limit' only once
const auto age_limit = m_conf->osd_deep_scrub_update_digest_min_age;
if (age <= age_limit) {
dout(20) << __func__ << ": missing digest but age (" << age
<< ") < conf (" << age_limit << ") on " << ho << dendl;
break;
}
}
[[fallthrough]];
case digest_fixing_t::force:
std::optional<uint32_t> data_digest;
if (auth_object.digest_present) {
data_digest = auth_object.digest;
dout(20) << __func__ << ": will update data digest on " << ho
<< dendl;
}
std::optional<uint32_t> omap_digest;
if (auth_object.omap_digest_present) {
omap_digest = auth_object.omap_digest;
dout(20) << __func__ << ": will update omap digest on " << ho
<< dendl;
}
this_chunk->missing_digest.push_back(
make_pair(ho, make_pair(data_digest, omap_digest)));
break;
}
}
}
/// \todo consider changing to use format() and to return the strings
ScrubBackend::digest_fixing_t ScrubBackend::should_fix_digest(
const hobject_t& ho,
const ScrubMap::object& auth_object,
const object_info_t& auth_oi,
bool repair_flag,
stringstream& errstream)
{
digest_fixing_t update{digest_fixing_t::no};
if (auth_object.digest_present && !auth_oi.is_data_digest()) {
dout(15) << __func__ << " missing data digest on " << ho << dendl;
update = digest_fixing_t::if_aged;
}
if (auth_object.omap_digest_present && !auth_oi.is_omap_digest()) {
dout(15) << __func__ << " missing omap digest on " << ho << dendl;
update = digest_fixing_t::if_aged;
}
// recorded digest != actual digest?
if (auth_oi.is_data_digest() && auth_object.digest_present &&
auth_oi.data_digest != auth_object.digest) {
errstream << m_pg_id << " recorded data digest 0x" << std::hex
<< auth_oi.data_digest << " != on disk 0x" << auth_object.digest
<< std::dec << " on " << auth_oi.soid << "\n";
if (repair_flag)
update = digest_fixing_t::force;
}
if (auth_oi.is_omap_digest() && auth_object.omap_digest_present &&
auth_oi.omap_digest != auth_object.omap_digest) {
errstream << m_pg_id << " recorded omap digest 0x" << std::hex
<< auth_oi.omap_digest << " != on disk 0x"
<< auth_object.omap_digest << std::dec << " on " << auth_oi.soid
<< "\n";
if (repair_flag)
update = digest_fixing_t::force;
}
return update;
}
ScrubBackend::auth_and_obj_errs_t ScrubBackend::match_in_shards(
const hobject_t& ho,
auth_selection_t& auth_sel,
inconsistent_obj_wrapper& obj_result,
stringstream& errstream)
{
std::list<pg_shard_t> auth_list; // out "param" to
std::set<pg_shard_t> object_errors; // be returned
for (auto& [srd, smap] : this_chunk->received_maps) {
if (srd == auth_sel.auth_shard) {
auth_sel.shard_map[auth_sel.auth_shard].selected_oi = true;
}
if (smap.objects.count(ho)) {
// the scrub-map has our object
auth_sel.shard_map[srd].set_object(smap.objects[ho]);
// Compare
stringstream ss;
const auto& auth_object = auth_sel.auth->second.objects[ho];
const bool discrep_found = compare_obj_details(auth_sel.auth_shard,
auth_object,
auth_sel.auth_oi,
smap.objects[ho],
auth_sel.shard_map[srd],
obj_result,
ss,
ho.has_snapset());
dout(20) << fmt::format(
"{}: {}{} <{}:{}> shards: {} {} {}", __func__,
(m_repair ? "repair " : ""),
(m_is_replicated ? "replicated " : ""), srd,
(srd == auth_sel.auth_shard ? "auth" : "-"),
auth_sel.shard_map.size(),
(auth_sel.digest_match ? " digest_match " : " "),
(auth_sel.shard_map[srd].only_data_digest_mismatch_info()
? "'info mismatch info'"
: ""))
<< dendl;
if (discrep_found) {
dout(10) << fmt::format(
"{}: <{}> auth:{} ({}/{}) vs {} ({}/{}) {}", __func__, ho,
auth_sel.auth_shard, auth_object.omap_digest_present,
auth_object.omap_digest, srd,
smap.objects[ho].omap_digest_present ? true : false,
smap.objects[ho].omap_digest, ss.str())
<< dendl;
}
// If all replicas match, but they don't match object_info we can
// repair it by using missing_digest mechanism
if (m_repair && m_is_replicated && (srd == auth_sel.auth_shard) &&
auth_sel.shard_map.size() > 1 && auth_sel.digest_match &&
auth_sel.shard_map[srd].only_data_digest_mismatch_info() &&
auth_object.digest_present) {
// Set in missing_digests
this_chunk->fix_digest = true;
// Clear the error
auth_sel.shard_map[srd].clear_data_digest_mismatch_info();
errstream << m_pg_id << " soid " << ho
<< " : repairing object info data_digest"
<< "\n";
}
// Some errors might have already been set in select_auth_object()
if (auth_sel.shard_map[srd].errors != 0) {
this_chunk->cur_inconsistent.insert(srd);
if (auth_sel.shard_map[srd].has_deep_errors()) {
this_chunk->m_error_counts.deep_errors++;
} else {
this_chunk->m_error_counts.shallow_errors++;
}
if (discrep_found) {
// Only true if compare_obj_details() found errors and put something
// in ss
errstream << m_pg_id << " shard " << srd << " soid " << ho << " : "
<< ss.str() << "\n";
}
} else if (discrep_found) {
// Track possible shards to use as authoritative, if needed
// There are errors, without identifying the shard
object_errors.insert(srd);
errstream << m_pg_id << " soid " << ho << " : " << ss.str() << "\n";
} else {
// XXX: The auth shard might get here that we don't know
// that it has the "correct" data.
auth_list.push_back(srd);
}
} else {
this_chunk->cur_missing.insert(srd);
auth_sel.shard_map[srd].set_missing();
auth_sel.shard_map[srd].primary = (srd == m_pg_whoami);
// Can't have any other errors if there is no information available
this_chunk->m_error_counts.shallow_errors++;
errstream << m_pg_id << " shard " << srd << " " << ho << " : missing\n";
}
obj_result.add_shard(srd, auth_sel.shard_map[srd]);
dout(20) << __func__ << ": (debug) soid " << ho << " : " << errstream.str()
<< dendl;
}
dout(15) << fmt::format("{}: auth_list: {} #: {}; obj-errs#: {}",
__func__,
auth_list,
auth_list.size(),
object_errors.size())
<< dendl;
return {auth_list, object_errors};
}
// == PGBackend::be_compare_scrub_objects()
bool ScrubBackend::compare_obj_details(pg_shard_t auth_shard,
const ScrubMap::object& auth,
const object_info_t& auth_oi,
const ScrubMap::object& candidate,
shard_info_wrapper& shard_result,
inconsistent_obj_wrapper& obj_result,
stringstream& errstream,
bool has_snapset)
{
fmt::memory_buffer out;
bool error{false};
// ------------------------------------------------------------------------
if (auth.digest_present && candidate.digest_present &&
auth.digest != candidate.digest) {
fmt::format_to(std::back_inserter(out),
"data_digest {:#x} != data_digest {:#x} from shard {}",
candidate.digest,
auth.digest,
auth_shard);
error = true;
obj_result.set_data_digest_mismatch();
}
if (auth.omap_digest_present && candidate.omap_digest_present &&
auth.omap_digest != candidate.omap_digest) {
fmt::format_to(std::back_inserter(out),
"{}omap_digest {:#x} != omap_digest {:#x} from shard {}",
sep(error),
candidate.omap_digest,
auth.omap_digest,
auth_shard);
obj_result.set_omap_digest_mismatch();
}
// for replicated:
if (m_is_replicated) {
if (auth_oi.is_data_digest() && candidate.digest_present &&
auth_oi.data_digest != candidate.digest) {
fmt::format_to(std::back_inserter(out),
"{}data_digest {:#x} != data_digest {:#x} from auth oi {}",
sep(error),
candidate.digest,
auth_oi.data_digest,
auth_oi);
shard_result.set_data_digest_mismatch_info();
}
// for replicated:
if (auth_oi.is_omap_digest() && candidate.omap_digest_present &&
auth_oi.omap_digest != candidate.omap_digest) {
fmt::format_to(std::back_inserter(out),
"{}omap_digest {:#x} != omap_digest {:#x} from auth oi {}",
sep(error),
candidate.omap_digest,
auth_oi.omap_digest,
auth_oi);
shard_result.set_omap_digest_mismatch_info();
}
}
// ------------------------------------------------------------------------
if (candidate.stat_error) {
if (error) {
errstream << fmt::to_string(out);
}
return error;
}
// ------------------------------------------------------------------------
if (!shard_result.has_info_missing() && !shard_result.has_info_corrupted()) {
auto can_attr = candidate.attrs.find(OI_ATTR);
ceph_assert(can_attr != candidate.attrs.end());
bufferlist can_bl;
can_bl.push_back(can_attr->second);
auto auth_attr = auth.attrs.find(OI_ATTR);
ceph_assert(auth_attr != auth.attrs.end());
bufferlist auth_bl;
auth_bl.push_back(auth_attr->second);
if (!can_bl.contents_equal(auth_bl)) {
fmt::format_to(std::back_inserter(out),
"{}object info inconsistent ",
sep(error));
obj_result.set_object_info_inconsistency();
}
}
if (has_snapset) {
if (!shard_result.has_snapset_missing() &&
!shard_result.has_snapset_corrupted()) {
auto can_attr = candidate.attrs.find(SS_ATTR);
ceph_assert(can_attr != candidate.attrs.end());
bufferlist can_bl;
can_bl.push_back(can_attr->second);
auto auth_attr = auth.attrs.find(SS_ATTR);
ceph_assert(auth_attr != auth.attrs.end());
bufferlist auth_bl;
auth_bl.push_back(auth_attr->second);
if (!can_bl.contents_equal(auth_bl)) {
fmt::format_to(std::back_inserter(out),
"{}snapset inconsistent ",
sep(error));
obj_result.set_snapset_inconsistency();
}
}
}
// ------------------------------------------------------------------------
if (!m_is_replicated) {
if (!shard_result.has_hinfo_missing() &&
!shard_result.has_hinfo_corrupted()) {
auto can_hi = candidate.attrs.find(ECUtil::get_hinfo_key());
ceph_assert(can_hi != candidate.attrs.end());
bufferlist can_bl;
can_bl.push_back(can_hi->second);
auto auth_hi = auth.attrs.find(ECUtil::get_hinfo_key());
ceph_assert(auth_hi != auth.attrs.end());
bufferlist auth_bl;
auth_bl.push_back(auth_hi->second);
if (!can_bl.contents_equal(auth_bl)) {
fmt::format_to(std::back_inserter(out),
"{}hinfo inconsistent ",
sep(error));
obj_result.set_hinfo_inconsistency();
}
}
}
// ------------------------------------------------------------------------
// sizes:
uint64_t oi_size = logical_to_ondisk_size(auth_oi.size);
if (oi_size != candidate.size) {
fmt::format_to(std::back_inserter(out),
"{}size {} != size {} from auth oi {}",
sep(error),
candidate.size,
oi_size,
auth_oi);
shard_result.set_size_mismatch_info();
}
if (auth.size != candidate.size) {
fmt::format_to(std::back_inserter(out),
"{}size {} != size {} from shard {}",
sep(error),
candidate.size,
auth.size,
auth_shard);
obj_result.set_size_mismatch();
}
// If the replica is too large and we didn't already count it for this object
if (candidate.size > m_conf->osd_max_object_size &&
!obj_result.has_size_too_large()) {
fmt::format_to(std::back_inserter(out),
"{}size {} > {} is too large",
sep(error),
candidate.size,
m_conf->osd_max_object_size);
obj_result.set_size_too_large();
}
// ------------------------------------------------------------------------
// comparing the attributes:
for (const auto& [k, v] : auth.attrs) {
if (k == OI_ATTR || k[0] != '_') {
// We check system keys separately
continue;
}
auto cand = candidate.attrs.find(k);
if (cand == candidate.attrs.end()) {
fmt::format_to(std::back_inserter(out),
"{}attr name mismatch '{}'",
sep(error),
k);
obj_result.set_attr_name_mismatch();
} else if (cand->second.cmp(v)) {
fmt::format_to(std::back_inserter(out),
"{}attr value mismatch '{}'",
sep(error),
k);
obj_result.set_attr_value_mismatch();
}
}
for (const auto& [k, v] : candidate.attrs) {
if (k == OI_ATTR || k[0] != '_') {
// We check system keys separately
continue;
}
auto in_auth = auth.attrs.find(k);
if (in_auth == auth.attrs.end()) {
fmt::format_to(std::back_inserter(out),
"{}attr name mismatch '{}'",
sep(error),
k);
obj_result.set_attr_name_mismatch();
}
}
if (error) {
errstream << fmt::to_string(out);
}
return error;
}
static inline bool doing_clones(
const std::optional<SnapSet>& snapset,
const vector<snapid_t>::reverse_iterator& curclone)
{
return snapset && curclone != snapset->clones.rend();
}
// /////////////////////////////////////////////////////////////////////////////
//
// final checking & fixing - scrub_snapshot_metadata()
//
// /////////////////////////////////////////////////////////////////////////////
/*
* Validate consistency of the object info and snap sets.
*
* We are sort of comparing 2 lists. The main loop is on objmap.objects. But
* the comparison of the objects is against multiple snapset.clones. There are
* multiple clone lists and in between lists we expect head.
*
* Example
*
* objects expected
* ======= =======
* obj1 snap 1 head, unexpected obj1 snap 1
* obj2 head head, match
* [SnapSet clones 6 4 2 1]
* obj2 snap 7 obj2 snap 6, unexpected obj2 snap 7
* obj2 snap 6 obj2 snap 6, match
* obj2 snap 4 obj2 snap 4, match
* obj3 head obj2 snap 2 (expected), obj2 snap 1 (expected), match
* [Snapset clones 3 1]
* obj3 snap 3 obj3 snap 3 match
* obj3 snap 1 obj3 snap 1 match
* obj4 head head, match
* [Snapset clones 4]
* EOL obj4 snap 4, (expected)
*/
void ScrubBackend::scrub_snapshot_metadata(ScrubMap& map)
{
dout(10) << __func__ << " num stat obj "
<< m_pg.get_pg_info(ScrubberPasskey{}).stats.stats.sum.num_objects
<< dendl;
std::optional<snapid_t> all_clones; // Unspecified snapid_t or std::nullopt
// traverse in reverse order.
std::optional<hobject_t> head;
std::optional<SnapSet> snapset; // If initialized so will head (above)
vector<snapid_t>::reverse_iterator
curclone; // Defined only if snapset initialized
int missing = 0;
inconsistent_snapset_wrapper soid_error, head_error;
int soid_error_count = 0;
for (auto p = map.objects.rbegin(); p != map.objects.rend(); ++p) {
const hobject_t& soid = p->first;
ceph_assert(!soid.is_snapdir());
soid_error = inconsistent_snapset_wrapper{soid};
object_stat_sum_t stat;
stat.num_objects++;
if (soid.nspace == m_conf->osd_hit_set_namespace)
stat.num_objects_hit_set_archive++;
if (soid.is_snap()) {
// it's a clone
stat.num_object_clones++;
}
// basic checks.
std::optional<object_info_t> oi;
if (!p->second.attrs.count(OI_ATTR)) {
oi = std::nullopt;
clog.error() << m_mode_desc << " " << m_pg_id << " " << soid
<< " : no '" << OI_ATTR << "' attr";
this_chunk->m_error_counts.shallow_errors++;
soid_error.set_info_missing();
} else {
bufferlist bv;
bv.push_back(p->second.attrs[OI_ATTR]);
try {
oi = object_info_t(bv);
} catch (ceph::buffer::error& e) {
oi = std::nullopt;
clog.error() << m_mode_desc << " " << m_pg_id << " " << soid
<< " : can't decode '" << OI_ATTR << "' attr "
<< e.what();
this_chunk->m_error_counts.shallow_errors++;
soid_error.set_info_corrupted();
soid_error.set_info_missing(); // Not available too
}
}
if (oi) {
if (logical_to_ondisk_size(oi->size) != p->second.size) {
clog.error() << m_mode_desc << " " << m_pg_id << " " << soid
<< " : on disk size (" << p->second.size
<< ") does not match object info size (" << oi->size
<< ") adjusted for ondisk to ("
<< logical_to_ondisk_size(oi->size) << ")";
soid_error.set_size_mismatch();
this_chunk->m_error_counts.shallow_errors++;
}
dout(20) << m_mode_desc << " " << soid << " " << *oi << dendl;
// A clone num_bytes will be added later when we have snapset
if (!soid.is_snap()) {
stat.num_bytes += oi->size;
}
if (soid.nspace == m_conf->osd_hit_set_namespace)
stat.num_bytes_hit_set_archive += oi->size;
if (oi->is_dirty())
++stat.num_objects_dirty;
if (oi->is_whiteout())
++stat.num_whiteouts;
if (oi->is_omap())
++stat.num_objects_omap;
if (oi->is_cache_pinned())
++stat.num_objects_pinned;
if (oi->has_manifest())
++stat.num_objects_manifest;
}
// Check for any problems while processing clones
if (doing_clones(snapset, curclone)) {
std::optional<snapid_t> target;
// Expecting an object with snap for current head
if (soid.has_snapset() || soid.get_head() != head->get_head()) {
dout(10) << __func__ << " " << m_mode_desc << " " << m_pg_id
<< " new object " << soid << " while processing " << *head
<< dendl;
target = all_clones;
} else {
ceph_assert(soid.is_snap());
target = soid.snap;
}
// Log any clones we were expecting to be there up to target
// This will set missing, but will be a no-op if snap.soid == *curclone.
missing +=
process_clones_to(head, snapset, target, &curclone, head_error);
}
bool expected;
// Check doing_clones() again in case we ran process_clones_to()
if (doing_clones(snapset, curclone)) {
// A head would have processed all clones above
// or all greater than *curclone.
ceph_assert(soid.is_snap() && *curclone <= soid.snap);
// After processing above clone snap should match the expected curclone
expected = (*curclone == soid.snap);
} else {
// If we aren't doing clones any longer, then expecting head
expected = soid.has_snapset();
}
if (!expected) {
// If we couldn't read the head's snapset, just ignore clones
if (head && !snapset) {
clog.error() << m_mode_desc << " " << m_pg_id
<< " " << soid
<< " : clone ignored due to missing snapset";
} else {
clog.error() << m_mode_desc << " " << m_pg_id << " " << soid
<< " : is an unexpected clone";
}
this_chunk->m_error_counts.shallow_errors++;
soid_error.set_headless();
this_chunk->m_inconsistent_objs.push_back(std::move(soid_error));
++soid_error_count;
if (head && soid.get_head() == head->get_head())
head_error.set_clone(soid.snap);
continue;
}
// new snapset?
if (soid.has_snapset()) {
if (missing) {
log_missing(missing, head, __func__);
}
// Save previous head error information
if (head && (head_error.errors || soid_error_count)) {
this_chunk->m_inconsistent_objs.push_back(std::move(head_error));
}
// Set this as a new head object
head = soid;
missing = 0;
head_error = soid_error;
soid_error_count = 0;
dout(20) << __func__ << " " << m_mode_desc << " new head " << head
<< dendl;
if (p->second.attrs.count(SS_ATTR) == 0) {
clog.error() << m_mode_desc << " " << m_pg_id << " " << soid
<< " : no '" << SS_ATTR << "' attr";
this_chunk->m_error_counts.shallow_errors++;
snapset = std::nullopt;
head_error.set_snapset_missing();
} else {
bufferlist bl;
bl.push_back(p->second.attrs[SS_ATTR]);
auto blp = bl.cbegin();
try {
snapset = SnapSet(); // Initialize optional<> before decoding into it
decode(*snapset, blp);
head_error.ss_bl.push_back(p->second.attrs[SS_ATTR]);
} catch (ceph::buffer::error& e) {
snapset = std::nullopt;
clog.error() << m_mode_desc << " " << m_pg_id << " " << soid
<< " : can't decode '" << SS_ATTR << "' attr "
<< e.what();
this_chunk->m_error_counts.shallow_errors++;
head_error.set_snapset_corrupted();
}
}
if (snapset) {
// what will be next?
curclone = snapset->clones.rbegin();
if (!snapset->clones.empty()) {
dout(20) << " snapset " << *snapset << dendl;
if (snapset->seq == 0) {
clog.error() << m_mode_desc << " " << m_pg_id << " " << soid
<< " : snaps.seq not set";
this_chunk->m_error_counts.shallow_errors++;
head_error.set_snapset_error();
}
}
}
} else {
ceph_assert(soid.is_snap());
ceph_assert(head);
ceph_assert(snapset);
ceph_assert(soid.snap == *curclone);
dout(20) << __func__ << " " << m_mode_desc << " matched clone " << soid
<< dendl;
if (snapset->clone_size.count(soid.snap) == 0) {
clog.error() << m_mode_desc << " " << m_pg_id << " " << soid
<< " : is missing in clone_size";
this_chunk->m_error_counts.shallow_errors++;
soid_error.set_size_mismatch();
} else {
if (oi && oi->size != snapset->clone_size[soid.snap]) {
clog.error() << m_mode_desc << " " << m_pg_id << " " << soid
<< " : size " << oi->size << " != clone_size "
<< snapset->clone_size[*curclone];
this_chunk->m_error_counts.shallow_errors++;
soid_error.set_size_mismatch();
}
if (snapset->clone_overlap.count(soid.snap) == 0) {
clog.error() << m_mode_desc << " " << m_pg_id << " " << soid
<< " : is missing in clone_overlap";
this_chunk->m_error_counts.shallow_errors++;
soid_error.set_size_mismatch();
} else {
// This checking is based on get_clone_bytes(). The first 2 asserts
// can't happen because we know we have a clone_size and
// a clone_overlap. Now we check that the interval_set won't
// cause the last assert.
uint64_t size = snapset->clone_size.find(soid.snap)->second;
const interval_set<uint64_t>& overlap =
snapset->clone_overlap.find(soid.snap)->second;
bool bad_interval_set = false;
for (interval_set<uint64_t>::const_iterator i = overlap.begin();
i != overlap.end();
++i) {
if (size < i.get_len()) {
bad_interval_set = true;
break;
}
size -= i.get_len();
}
if (bad_interval_set) {
clog.error() << m_mode_desc << " " << m_pg_id << " " << soid
<< " : bad interval_set in clone_overlap";
this_chunk->m_error_counts.shallow_errors++;
soid_error.set_size_mismatch();
} else {
stat.num_bytes += snapset->get_clone_bytes(soid.snap);
}
}
}
// what's next?
++curclone;
if (soid_error.errors) {
this_chunk->m_inconsistent_objs.push_back(std::move(soid_error));
++soid_error_count;
}
}
m_scrubber.add_to_stats(stat);
}
if (doing_clones(snapset, curclone)) {
dout(10) << __func__ << " " << m_mode_desc << " " << m_pg_id
<< " No more objects while processing " << *head << dendl;
missing +=
process_clones_to(head, snapset, all_clones, &curclone, head_error);
}
// There could be missing found by the test above or even
// before dropping out of the loop for the last head.
if (missing) {
log_missing(missing, head, __func__);
}
if (head && (head_error.errors || soid_error_count)) {
this_chunk->m_inconsistent_objs.push_back(std::move(head_error));
}
// fix data/omap digests
m_scrubber.submit_digest_fixes(this_chunk->missing_digest);
dout(10) << __func__ << " (" << m_mode_desc << ") finish" << dendl;
}
int ScrubBackend::process_clones_to(
const std::optional<hobject_t>& head,
const std::optional<SnapSet>& snapset,
std::optional<snapid_t> target,
vector<snapid_t>::reverse_iterator* curclone,
inconsistent_snapset_wrapper& e)
{
ceph_assert(head);
ceph_assert(snapset);
int missing_count = 0;
// NOTE: clones are in descending order, thus **curclone > target test here
hobject_t next_clone(*head);
while (doing_clones(snapset, *curclone) &&
(!target || **curclone > *target)) {
++missing_count;
// it is okay to be missing one or more clones in a cache tier.
// skip higher-numbered clones in the list.
if (!m_incomplete_clones_allowed) {
next_clone.snap = **curclone;
clog.error() << m_mode_desc << " " << m_pg_id << " " << *head
<< " : expected clone " << next_clone << " " << m_missing
<< " missing";
this_chunk->m_error_counts.shallow_errors++;
e.set_clone_missing(next_clone.snap);
}
// Clones are descending
++(*curclone);
}
return missing_count;
}
void ScrubBackend::log_missing(int missing,
const std::optional<hobject_t>& head,
const char* logged_func_name)
{
ceph_assert(head);
if (m_incomplete_clones_allowed) {
dout(20) << logged_func_name << " " << m_mode_desc << " " << m_pg_id << " "
<< *head << " skipped " << missing << " clone(s) in cache tier"
<< dendl;
} else {
clog.info() << m_mode_desc << " " << m_pg_id << " " << *head << " : "
<< missing << " missing clone(s)";
}
}
// ////////////////////////////////////////////////////////////////////////////////
std::vector<snap_mapper_fix_t> ScrubBackend::scan_snaps(
ScrubMap& smap,
SnapMapReaderI& snaps_getter)
{
std::vector<snap_mapper_fix_t> out_orders;
hobject_t head;
SnapSet snapset;
// Test qa/standalone/scrub/osd-scrub-snaps.sh greps for the strings
// in this function
dout(15) << "_scan_snaps starts" << dendl;
for (auto i = smap.objects.rbegin(); i != smap.objects.rend(); ++i) {
const hobject_t& hoid = i->first;
ScrubMap::object& o = i->second;
dout(20) << __func__ << " " << hoid << dendl;
ceph_assert(!hoid.is_snapdir());
if (hoid.is_head()) {
// parse the SnapSet
bufferlist bl;
if (o.attrs.find(SS_ATTR) == o.attrs.end()) {
// no snaps for this head
continue;
}
bl.push_back(o.attrs[SS_ATTR]);
auto p = bl.cbegin();
try {
decode(snapset, p);
} catch (...) {
dout(20) << fmt::format("{}: failed to decode the snapset ({})",
__func__,
hoid)
<< dendl;
continue;
}
head = hoid.get_head();
continue;
}
/// \todo document why guaranteed to have initialized 'head' at this point
if (hoid.snap < CEPH_MAXSNAP) {
if (hoid.get_head() != head) {
derr << __func__ << " no head for " << hoid << " (have " << head << ")"
<< dendl;
continue;
}
// the 'hoid' is a clone hoid at this point. The 'snapset' below was taken
// from the corresponding head hoid.
auto maybe_fix_order = scan_object_snaps(hoid, snapset, snaps_getter);
if (maybe_fix_order) {
out_orders.push_back(std::move(*maybe_fix_order));
}
}
}
dout(15) << __func__ << " " << out_orders.size() << " fix orders" << dendl;
return out_orders;
}
std::optional<snap_mapper_fix_t> ScrubBackend::scan_object_snaps(
const hobject_t& hoid,
const SnapSet& snapset,
SnapMapReaderI& snaps_getter)
{
using result_t = Scrub::SnapMapReaderI::result_t;
dout(15) << fmt::format("{}: obj:{} snapset:{}", __func__, hoid, snapset)
<< dendl;
auto p = snapset.clone_snaps.find(hoid.snap);
if (p == snapset.clone_snaps.end()) {
derr << __func__ << " no clone_snaps for " << hoid << " in " << snapset
<< dendl;
return std::nullopt;
}
set<snapid_t> obj_snaps{p->second.begin(), p->second.end()};
// clang-format off
// validate both that the mapper contains the correct snaps for the object
// and that it is internally consistent.
// possible outcomes:
//
// Error scenarios:
// - SnapMapper index of object snaps does not match that stored in head
// object snapset attribute:
// we should delete the snapmapper entry and re-add it.
// - no mapping found for the object's snaps:
// we should add the missing mapper entries.
// - the snapmapper set for this object is internally inconsistent (e.g.
// the OBJ_ entries do not match the SNA_ entries). We remove
// whatever entries are there, and redo the DB content for this object.
//
// And
// There is the "happy path": cur_snaps == obj_snaps. Nothing to do there.
// clang-format on
auto cur_snaps = snaps_getter.get_snaps_check_consistency(hoid);
if (!cur_snaps) {
switch (auto e = cur_snaps.error(); e.code) {
case result_t::code_t::backend_error:
derr << __func__ << ": get_snaps returned "
<< cpp_strerror(e.backend_error) << " for " << hoid << dendl;
ceph_abort();
case result_t::code_t::not_found:
dout(10) << __func__ << ": no snaps for " << hoid << ". Adding."
<< dendl;
return snap_mapper_fix_t{snap_mapper_op_t::add, hoid, obj_snaps, {}};
case result_t::code_t::inconsistent:
dout(10) << __func__ << ": inconsistent snapmapper data for " << hoid
<< ". Recreating." << dendl;
return snap_mapper_fix_t{
snap_mapper_op_t::overwrite, hoid, obj_snaps, {}};
default:
dout(10) << __func__ << ": error (" << cpp_strerror(e.backend_error)
<< ") fetching snapmapper data for " << hoid << ". Recreating."
<< dendl;
return snap_mapper_fix_t{
snap_mapper_op_t::overwrite, hoid, obj_snaps, {}};
}
__builtin_unreachable();
}
if (*cur_snaps == obj_snaps) {
dout(20) << fmt::format(
"{}: {}: snapset match SnapMapper's ({})", __func__, hoid,
obj_snaps)
<< dendl;
return std::nullopt;
}
// add this object to the list of snapsets that needs fixing. Note
// that we also collect the existing (bogus) list, for logging purposes
dout(20) << fmt::format(
"{}: obj {}: was: {} updating to: {}", __func__, hoid,
*cur_snaps, obj_snaps)
<< dendl;
return snap_mapper_fix_t{
snap_mapper_op_t::update, hoid, obj_snaps, *cur_snaps};
}
/*
* Building a map of objects suitable for snapshot validation.
*
* We are moving all "full" clone sets, i.e. the head and (preceding it, as
* snapshots precede the head entry) the clone entries, into 'for_meta_scrub'.
* That collection, not containing partial items, will be scrubbed by
* scrub_snapshot_metadata().
*
* What's left in m_cleaned_meta_map is the leftover partial items that need to
* be completed before they can be processed.
*/
ScrubMap ScrubBackend::clean_meta_map(ScrubMap& cleaned, bool max_reached)
{
ScrubMap for_meta_scrub;
if (max_reached || cleaned.objects.empty()) {
cleaned.swap(for_meta_scrub);
} else {
auto iter = cleaned.objects.end();
--iter; // not empty, see 'if' clause
auto begin = cleaned.objects.begin();
if (iter->first.has_snapset()) {
++iter;
} else {
while (iter != begin) {
auto next = iter--;
if (next->first.get_head() != iter->first.get_head()) {
++iter;
break;
}
}
}
for_meta_scrub.objects.insert(begin, iter);
cleaned.objects.erase(begin, iter);
}
return for_meta_scrub;
}
| 64,012 | 31.743223 | 83 | cc |
null | ceph-main/src/osd/scrubber/scrub_backend.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
// clang-format off
/*
+------------------------+
| |
| PgScrubber |
| |-----------------------------+
| | |
+------------------------+ | ownes & uses
| PrimaryLogScrub | |
+------------------------+ |
|
|
v
+-------------------------------------------+
|ScrubBackend |
+----------------+ |============ |
| this_chunk | | |
| (scrub_chunk_t)|<-------| + decode_received_map() |
+----------------+ | + scrub_compare_maps() |
| + scan_snaps() |
| ..... |
| |
| |
+--------------------/-------------\--------+
--/ / \
--/ | |
--/ / \
-/ uses | uses |
uses --/ / \
--/ / |
--/ | \
v v v
PgBackend PG/PrimaryLogPG OSD Services
*/
// clang-format on
#include <fmt/core.h>
#include <fmt/format.h>
#include <string_view>
#include "common/LogClient.h"
#include "osd/OSDMap.h"
#include "osd/osd_types_fmt.h"
#include "osd/scrubber_common.h"
#include "osd/SnapMapReaderI.h"
struct ScrubMap;
class PG;
class PgScrubber;
struct PGPool;
using Scrub::PgScrubBeListener;
using data_omap_digests_t =
std::pair<std::optional<uint32_t>, std::optional<uint32_t>>;
/// a list of fixes to be performed on objects' digests
using digests_fixes_t = std::vector<std::pair<hobject_t, data_omap_digests_t>>;
using shard_info_map_t = std::map<pg_shard_t, shard_info_wrapper>;
using shard_to_scrubmap_t = std::map<pg_shard_t, ScrubMap>;
using auth_peers_t = std::vector<std::pair<ScrubMap::object, pg_shard_t>>;
using wrapped_err_t =
std::variant<inconsistent_obj_wrapper, inconsistent_snapset_wrapper>;
using inconsistent_objs_t = std::vector<wrapped_err_t>;
/// omap-specific stats
struct omap_stat_t {
int large_omap_objects{0};
int64_t omap_bytes{0};
int64_t omap_keys{0};
};
struct error_counters_t {
int shallow_errors{0};
int deep_errors{0};
};
// the PgScrubber services used by the backend
struct ScrubBeListener {
virtual std::ostream& gen_prefix(std::ostream& out) const = 0;
virtual CephContext* get_pg_cct() const = 0;
virtual LoggerSinkSet& get_logger() const = 0;
virtual bool is_primary() const = 0;
virtual spg_t get_pgid() const = 0;
virtual const OSDMapRef& get_osdmap() const = 0;
virtual void add_to_stats(const object_stat_sum_t& stat) = 0;
virtual void submit_digest_fixes(const digests_fixes_t& fixes) = 0;
virtual ~ScrubBeListener() = default;
};
// As the main scrub-backend entry point - scrub_compare_maps() - must
// be able to return both a list of snap fixes and a list of inconsistent
// objects:
struct objs_fix_list_t {
inconsistent_objs_t inconsistent_objs;
std::vector<Scrub::snap_mapper_fix_t> snap_fix_list;
};
/**
* A structure used internally by select_auth_object()
*
* Conveys the usability of a specific shard as an auth source.
*/
struct shard_as_auth_t {
// note: 'not_found' differs from 'not_usable' in that 'not_found'
// does not carry an error message to be cluster-logged.
enum class usable_t : uint8_t { not_usable, not_found, usable };
// the ctor used when the shard should not be considered as auth
explicit shard_as_auth_t(std::string err_msg)
: possible_auth{usable_t::not_usable}
, error_text{err_msg}
, oi{}
, auth_iter{}
, digest{std::nullopt}
{}
// the object cannot be found on the shard
explicit shard_as_auth_t()
: possible_auth{usable_t::not_found}
, error_text{}
, oi{}
, auth_iter{}
, digest{std::nullopt}
{}
shard_as_auth_t(std::string err_msg, std::optional<uint32_t> data_digest)
: possible_auth{usable_t::not_usable}
, error_text{err_msg}
, oi{}
, auth_iter{}
, digest{data_digest}
{}
// possible auth candidate
shard_as_auth_t(const object_info_t& anoi,
shard_to_scrubmap_t::iterator it,
std::string err_msg,
std::optional<uint32_t> data_digest)
: possible_auth{usable_t::usable}
, error_text{err_msg}
, oi{anoi}
, auth_iter{it}
, digest{data_digest}
{}
usable_t possible_auth;
std::string error_text;
object_info_t oi;
shard_to_scrubmap_t::iterator auth_iter;
std::optional<uint32_t> digest;
// when used for Crimson, we'll probably want to return 'digest_match' (and
// other in/out arguments) via this struct
};
// the format specifier {D} is used to request debug output
template <>
struct fmt::formatter<shard_as_auth_t> {
template <typename ParseContext>
constexpr auto parse(ParseContext& ctx)
{
auto it = ctx.begin();
if (it != ctx.end()) {
debug_log = (*it++) == 'D';
}
return it;
}
template <typename FormatContext>
auto format(shard_as_auth_t const& as_auth, FormatContext& ctx)
{
if (debug_log) {
// note: 'if' chain, as hard to consistently (on all compilers) avoid some
// warnings for a switch plus multiple return paths
if (as_auth.possible_auth == shard_as_auth_t::usable_t::not_usable) {
return fmt::format_to(ctx.out(),
"{{shard-not-usable:{}}}",
as_auth.error_text);
}
if (as_auth.possible_auth == shard_as_auth_t::usable_t::not_found) {
return fmt::format_to(ctx.out(), "{{shard-not-found}}");
}
return fmt::format_to(ctx.out(),
"{{shard-usable: soid:{} {{txt:{}}} }}",
as_auth.oi.soid,
as_auth.error_text);
} else {
return fmt::format_to(
ctx.out(),
"usable:{} soid:{} {{txt:{}}}",
(as_auth.possible_auth == shard_as_auth_t::usable_t::usable) ? "yes"
: "no",
as_auth.oi.soid,
as_auth.error_text);
}
}
bool debug_log{false};
};
struct auth_selection_t {
shard_to_scrubmap_t::iterator auth; ///< an iter into one of this_chunk->maps
pg_shard_t auth_shard; // set to auth->first
object_info_t auth_oi;
shard_info_map_t shard_map;
bool is_auth_available{false}; ///< managed to select an auth' source?
bool digest_match{true}; ///< do all (existing) digests match?
};
// note: some scrub tests are sensitive to the specific format of
// auth_selection_t listing in the logs
template <>
struct fmt::formatter<auth_selection_t> {
template <typename ParseContext>
constexpr auto parse(ParseContext& ctx)
{
return ctx.begin();
}
template <typename FormatContext>
auto format(auth_selection_t const& aus, FormatContext& ctx)
{
return fmt::format_to(ctx.out(),
" {{AU-S: {}->{:x} OI({:x}:{}) {} dm:{}}} ",
aus.auth->first,
(uint64_t)(&aus.auth->second),
(uint64_t)(&aus.auth_oi),
aus.auth_oi,
aus.shard_map.size(),
aus.digest_match);
}
};
/**
* the back-end data that is per-chunk
*
* Created by the Scrubber after all replicas' maps have arrived.
*/
struct scrub_chunk_t {
explicit scrub_chunk_t(pg_shard_t i_am) { received_maps[i_am] = ScrubMap{}; }
/// the working set of scrub maps: the received maps, plus
/// Primary's own map.
std::map<pg_shard_t, ScrubMap> received_maps;
/// a collection of all objs mentioned in the maps
std::set<hobject_t> authoritative_set;
utime_t started{ceph_clock_now()};
digests_fixes_t missing_digest;
/// Map from object with errors to good peers
std::map<hobject_t, std::list<pg_shard_t>> authoritative;
inconsistent_objs_t m_inconsistent_objs;
/// shallow/deep error counters
error_counters_t m_error_counts;
// these must be reset for each element:
std::set<pg_shard_t> cur_missing;
std::set<pg_shard_t> cur_inconsistent;
bool fix_digest{false};
};
/**
* ScrubBackend wraps the data and operations required for the back-end part of
* the scrubbing (i.e. for comparing the maps and fixing objects).
*
* Created anew upon each initiation of a scrub session.
*/
class ScrubBackend {
public:
// Primary constructor
ScrubBackend(ScrubBeListener& scrubber,
PgScrubBeListener& pg,
pg_shard_t i_am,
bool repair,
scrub_level_t shallow_or_deep,
const std::set<pg_shard_t>& acting);
// Replica constructor: no primary map
ScrubBackend(ScrubBeListener& scrubber,
PgScrubBeListener& pg,
pg_shard_t i_am,
bool repair,
scrub_level_t shallow_or_deep);
friend class PgScrubber;
friend class TestScrubBackend;
/**
* reset the per-chunk data structure (scrub_chunk_t).
* Create an empty scrub-map for this shard, and place it
* in the appropriate entry in 'received_maps'.
*
* @returns a pointer to the newly created ScrubMap.
*/
void new_chunk();
ScrubMap& get_primary_scrubmap();
/**
* sets Backend's m_repair flag (setting m_mode_desc to a corresponding
* string)
*/
void update_repair_status(bool should_repair);
std::vector<Scrub::snap_mapper_fix_t> replica_clean_meta(
ScrubMap& smap,
bool max_reached,
const hobject_t& start,
Scrub::SnapMapReaderI& snaps_getter);
/**
* decode the arriving MOSDRepScrubMap message, placing the replica's
* scrub-map into received_maps[from].
*
* @param from replica
*/
void decode_received_map(pg_shard_t from, const MOSDRepScrubMap& msg);
objs_fix_list_t scrub_compare_maps(bool max_reached,
Scrub::SnapMapReaderI& snaps_getter);
int scrub_process_inconsistent();
const omap_stat_t& this_scrub_omapstats() const { return m_omap_stats; }
int authoritative_peers_count() const { return m_auth_peers.size(); };
std::ostream& logger_prefix(std::ostream* _dout, const ScrubBackend* t);
private:
// set/constructed at the ctor():
ScrubBeListener& m_scrubber;
Scrub::PgScrubBeListener& m_pg;
const pg_shard_t m_pg_whoami;
bool m_repair;
const scrub_level_t m_depth;
const spg_t m_pg_id;
std::vector<pg_shard_t> m_acting_but_me; // primary only
bool m_is_replicated{true};
std::string_view m_mode_desc;
std::string m_formatted_id;
const PGPool& m_pool;
bool m_incomplete_clones_allowed{false};
/// collecting some scrub-session-wide omap stats
omap_stat_t m_omap_stats;
/// Mapping from object with errors to good peers
std::map<hobject_t, auth_peers_t> m_auth_peers;
// shorthands:
ConfigProxy& m_conf;
LoggerSinkSet& clog;
private:
struct auth_and_obj_errs_t {
std::list<pg_shard_t> auth_list;
std::set<pg_shard_t> object_errors;
};
std::optional<scrub_chunk_t> this_chunk;
/// Maps from objects with errors to missing peers
HobjToShardSetMapping m_missing; // used by scrub_process_inconsistent()
/// Maps from objects with errors to inconsistent peers
HobjToShardSetMapping m_inconsistent; // used by scrub_process_inconsistent()
/// Cleaned std::map pending snap metadata scrub
ScrubMap m_cleaned_meta_map{};
/// a reference to the primary map
ScrubMap& my_map();
/// shallow/deep error counters
error_counters_t get_error_counts() const { return this_chunk->m_error_counts; }
/**
* merge_to_authoritative_set() updates
* - this_chunk->maps[from] with the replicas' scrub-maps;
* - this_chunk->authoritative_set as a union of all the maps' objects;
*/
void merge_to_authoritative_set();
// note: used by both Primary & replicas
static ScrubMap clean_meta_map(ScrubMap& cleaned, bool max_reached);
void compare_smaps();
/// might return error messages to be cluster-logged
std::optional<std::string> compare_obj_in_maps(const hobject_t& ho);
void omap_checks();
std::optional<auth_and_obj_errs_t> for_empty_auth_list(
std::list<pg_shard_t>&& auths,
std::set<pg_shard_t>&& obj_errors,
shard_to_scrubmap_t::iterator auth,
const hobject_t& ho,
std::stringstream& errstream);
auth_and_obj_errs_t match_in_shards(const hobject_t& ho,
auth_selection_t& auth_sel,
inconsistent_obj_wrapper& obj_result,
std::stringstream& errstream);
// returns: true if a discrepancy was found
bool compare_obj_details(pg_shard_t auth_shard,
const ScrubMap::object& auth,
const object_info_t& auth_oi,
const ScrubMap::object& candidate,
shard_info_wrapper& shard_result,
inconsistent_obj_wrapper& obj_result,
std::stringstream& errorstream,
bool has_snapset);
void repair_object(const hobject_t& soid,
const auth_peers_t& ok_peers,
const std::set<pg_shard_t>& bad_peers);
/**
* An auxiliary used by select_auth_object() to test a specific shard
* as a possible auth candidate.
* @param ho the hobject for which we are looking for an auth source
* @param srd the candidate shard
* @param shard_map [out] a collection of shard_info-s per shard.
* possible_auth_shard() might set error flags in the relevant (this shard's)
* entry.
*/
shard_as_auth_t possible_auth_shard(const hobject_t& ho,
const pg_shard_t& srd,
shard_info_map_t& shard_map);
auth_selection_t select_auth_object(const hobject_t& ho,
std::stringstream& errstream);
enum class digest_fixing_t { no, if_aged, force };
/*
* an aux used by inconsistents() to determine whether to fix the digest
*/
[[nodiscard]] digest_fixing_t should_fix_digest(
const hobject_t& ho,
const ScrubMap::object& auth_object,
const object_info_t& auth_oi,
bool repair_flag,
std::stringstream& errstream);
void inconsistents(const hobject_t& ho,
ScrubMap::object& auth_object,
object_info_t& auth_oi, // consider moving to object
auth_and_obj_errs_t&& auth_n_errs,
std::stringstream& errstream);
int process_clones_to(const std::optional<hobject_t>& head,
const std::optional<SnapSet>& snapset,
std::optional<snapid_t> target,
std::vector<snapid_t>::reverse_iterator* curclone,
inconsistent_snapset_wrapper& e);
/**
* Validate consistency of the object info and snap sets.
*/
void scrub_snapshot_metadata(ScrubMap& map);
/**
* Updates the "global" (i.e. - not 'per-chunk') databases:
* - in m_authoritative: a list of good peers for each "problem" object in
* the current chunk;
* - in m_cleaned_meta_map: a "cleaned" version of the object (the one from
* the selected shard).
*/
void update_authoritative();
void log_missing(int missing,
const std::optional<hobject_t>& head,
const char* logged_func_name);
/**
* returns a list of snaps "fix orders"
*/
std::vector<Scrub::snap_mapper_fix_t> scan_snaps(
ScrubMap& smap,
Scrub::SnapMapReaderI& snaps_getter);
/**
* an aux used by scan_snaps(), possibly returning a fix-order
* for a specific hobject.
*/
std::optional<Scrub::snap_mapper_fix_t> scan_object_snaps(
const hobject_t& hoid,
const SnapSet& snapset,
Scrub::SnapMapReaderI& snaps_getter);
// accessing the PG backend for this translation service
uint64_t logical_to_ondisk_size(uint64_t logical_size) const;
};
template <>
struct fmt::formatter<data_omap_digests_t> {
constexpr auto parse(format_parse_context& ctx) { return ctx.begin(); }
template <typename FormatContext>
auto format(const data_omap_digests_t& dg, FormatContext& ctx)
{
// can't use value_or() due to different output types
if (std::get<0>(dg).has_value()) {
fmt::format_to(ctx.out(), "[{:#x}/", std::get<0>(dg).value());
} else {
fmt::format_to(ctx.out(), "[---/");
}
if (std::get<1>(dg).has_value()) {
return fmt::format_to(ctx.out(), "{:#x}]", std::get<1>(dg).value());
} else {
return fmt::format_to(ctx.out(), "---]");
}
}
};
template <>
struct fmt::formatter<std::pair<hobject_t, data_omap_digests_t>> {
constexpr auto parse(format_parse_context& ctx) { return ctx.begin(); }
template <typename FormatContext>
auto format(const std::pair<hobject_t, data_omap_digests_t>& x,
FormatContext& ctx) const
{
return fmt::format_to(ctx.out(),
"{{ {} - {} }}",
std::get<0>(x),
std::get<1>(x));
}
};
| 18,408 | 32.169369 | 82 | h |
null | ceph-main/src/osd/scrubber/scrub_machine.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include <chrono>
#include <typeinfo>
#include <boost/core/demangle.hpp>
#include "osd/OSD.h"
#include "osd/OpRequest.h"
#include "ScrubStore.h"
#include "scrub_machine.h"
#define dout_context g_ceph_context
#define dout_subsys ceph_subsys_osd
#undef dout_prefix
#define dout_prefix *_dout << " scrubberFSM "
using namespace std::chrono;
using namespace std::chrono_literals;
#define DECLARE_LOCALS \
auto& machine = context<ScrubMachine>(); \
std::ignore = machine; \
ScrubMachineListener* scrbr = machine.m_scrbr; \
std::ignore = scrbr; \
auto pg_id = machine.m_pg_id; \
std::ignore = pg_id;
NamedSimply::NamedSimply(ScrubMachineListener* scrubber, const char* name)
{
scrubber->set_state_name(name);
}
namespace Scrub {
// --------- trace/debug auxiliaries -------------------------------
void on_event_creation(std::string_view nm)
{
dout(20) << " event: --vvvv---- " << nm << dendl;
}
void on_event_discard(std::string_view nm)
{
dout(20) << " event: --^^^^---- " << nm << dendl;
}
void ScrubMachine::assert_not_active() const
{
ceph_assert(state_cast<const NotActive*>());
}
bool ScrubMachine::is_reserving() const
{
return state_cast<const ReservingReplicas*>();
}
bool ScrubMachine::is_accepting_updates() const
{
DECLARE_LOCALS; // 'scrbr' & 'pg_id' aliases
ceph_assert(scrbr->is_primary());
return state_cast<const WaitLastUpdate*>();
}
// for the rest of the code in this file - we know what PG we are dealing with:
#undef dout_prefix
#define dout_prefix _prefix(_dout, this->context<ScrubMachine>())
template <class T>
static ostream& _prefix(std::ostream* _dout, T& t)
{
return t.gen_prefix(*_dout);
}
std::ostream& ScrubMachine::gen_prefix(std::ostream& out) const
{
return m_scrbr->gen_prefix(out) << "FSM: ";
}
// ////////////// the actual actions
// ----------------------- NotActive -----------------------------------------
NotActive::NotActive(my_context ctx)
: my_base(ctx)
, NamedSimply(context<ScrubMachine>().m_scrbr, "NotActive")
{
dout(10) << "-- state -->> NotActive" << dendl;
DECLARE_LOCALS; // 'scrbr' & 'pg_id' aliases
scrbr->clear_queued_or_active();
}
sc::result NotActive::react(const StartScrub&)
{
dout(10) << "NotActive::react(const StartScrub&)" << dendl;
DECLARE_LOCALS;
scrbr->set_scrub_begin_time();
return transit<ReservingReplicas>();
}
sc::result NotActive::react(const AfterRepairScrub&)
{
dout(10) << "NotActive::react(const AfterRepairScrub&)" << dendl;
DECLARE_LOCALS;
scrbr->set_scrub_begin_time();
return transit<ReservingReplicas>();
}
// ----------------------- ReservingReplicas ---------------------------------
ReservingReplicas::ReservingReplicas(my_context ctx)
: my_base(ctx)
, NamedSimply(context<ScrubMachine>().m_scrbr, "ReservingReplicas")
{
dout(10) << "-- state -->> ReservingReplicas" << dendl;
DECLARE_LOCALS; // 'scrbr' & 'pg_id' aliases
// prevent the OSD from starting another scrub while we are trying to secure
// replicas resources
scrbr->set_reserving_now();
scrbr->reserve_replicas();
auto timeout = scrbr->get_cct()->_conf.get_val<
std::chrono::milliseconds>("osd_scrub_reservation_timeout");
if (timeout.count() > 0) {
// Start a timer to handle case where the replicas take a long time to
// ack the reservation. See ReservationTimeout handler below.
m_timeout_token = machine.schedule_timer_event_after<ReservationTimeout>(
timeout);
}
}
ReservingReplicas::~ReservingReplicas()
{
DECLARE_LOCALS; // 'scrbr' & 'pg_id' aliases
scrbr->clear_reserving_now();
}
sc::result ReservingReplicas::react(const ReservationTimeout&)
{
DECLARE_LOCALS; // 'scrbr' & 'pg_id' aliases
dout(10) << "ReservingReplicas::react(const ReservationTimeout&)" << dendl;
dout(10)
<< "PgScrubber: " << scrbr->get_spgid()
<< " timeout on reserving replicas (since " << entered_at
<< ")" << dendl;
scrbr->get_clog()->warn()
<< "osd." << scrbr->get_whoami()
<< " PgScrubber: " << scrbr->get_spgid()
<< " timeout on reserving replicsa (since " << entered_at
<< ")";
scrbr->on_replica_reservation_timeout();
return discard_event();
}
sc::result ReservingReplicas::react(const ReservationFailure&)
{
DECLARE_LOCALS; // 'scrbr' & 'pg_id' aliases
dout(10) << "ReservingReplicas::react(const ReservationFailure&)" << dendl;
// the Scrubber must release all resources and abort the scrubbing
scrbr->clear_pgscrub_state();
return transit<NotActive>();
}
/**
* note: the event poster is handling the scrubber reset
*/
sc::result ReservingReplicas::react(const FullReset&)
{
dout(10) << "ReservingReplicas::react(const FullReset&)" << dendl;
return transit<NotActive>();
}
// ----------------------- ActiveScrubbing -----------------------------------
ActiveScrubbing::ActiveScrubbing(my_context ctx)
: my_base(ctx)
, NamedSimply(context<ScrubMachine>().m_scrbr, "ActiveScrubbing")
{
dout(10) << "-- state -->> ActiveScrubbing" << dendl;
DECLARE_LOCALS; // 'scrbr' & 'pg_id' aliases
scrbr->on_init();
}
/**
* upon exiting the Active state
*/
ActiveScrubbing::~ActiveScrubbing()
{
DECLARE_LOCALS; // 'scrbr' & 'pg_id' aliases
dout(15) << __func__ << dendl;
scrbr->unreserve_replicas();
scrbr->clear_queued_or_active();
}
/*
* The only source of an InternalError event as of now is the BuildMap state,
* when encountering a backend error.
* We kill the scrub and reset the FSM.
*/
sc::result ActiveScrubbing::react(const InternalError&)
{
DECLARE_LOCALS; // 'scrbr' & 'pg_id' aliases
dout(10) << __func__ << dendl;
scrbr->clear_pgscrub_state();
return transit<NotActive>();
}
sc::result ActiveScrubbing::react(const FullReset&)
{
dout(10) << "ActiveScrubbing::react(const FullReset&)" << dendl;
// caller takes care of clearing the scrubber & FSM states
return transit<NotActive>();
}
// ----------------------- RangeBlocked -----------------------------------
/*
* Blocked. Will be released by kick_object_context_blocked() (or upon
* an abort)
*
* Note: we are never expected to be waiting for long for a blocked object.
* Unfortunately we know from experience that a bug elsewhere might result
* in an indefinite wait in this state, for an object that is never released.
* If that happens, all we can do is to issue a warning message to help
* with the debugging.
*/
RangeBlocked::RangeBlocked(my_context ctx)
: my_base(ctx)
, NamedSimply(context<ScrubMachine>().m_scrbr, "Act/RangeBlocked")
{
dout(10) << "-- state -->> Act/RangeBlocked" << dendl;
DECLARE_LOCALS; // 'scrbr' & 'pg_id' aliases
auto grace = scrbr->get_range_blocked_grace();
if (grace == ceph::timespan{}) {
// we will not be sending any alarms re the blocked object
dout(10)
<< __func__
<< ": blocked-alarm disabled ('osd_blocked_scrub_grace_period' set to 0)"
<< dendl;
} else {
// Schedule an event to warn that the pg has been blocked for longer than
// the timeout, see RangeBlockedAlarm handler below
dout(20) << fmt::format(": timeout:{}",
std::chrono::duration_cast<seconds>(grace))
<< dendl;
m_timeout_token = machine.schedule_timer_event_after<RangeBlockedAlarm>(
grace);
}
}
sc::result RangeBlocked::react(const RangeBlockedAlarm&)
{
DECLARE_LOCALS;
char buf[50];
std::time_t now_c = ceph::coarse_real_clock::to_time_t(entered_at);
strftime(buf, sizeof(buf), "%Y-%m-%dT%H:%M:%S", std::localtime(&now_c));
dout(10)
<< "PgScrubber: " << scrbr->get_spgid()
<< " blocked on an object for too long (since " << buf << ")" << dendl;
scrbr->get_clog()->warn()
<< "osd." << scrbr->get_whoami()
<< " PgScrubber: " << scrbr->get_spgid()
<< " blocked on an object for too long (since " << buf
<< ")";
scrbr->set_scrub_blocked(utime_t{now_c, 0});
return discard_event();
}
// ----------------------- PendingTimer -----------------------------------
/**
* Sleeping till timer reactivation - or just requeuing
*/
PendingTimer::PendingTimer(my_context ctx)
: my_base(ctx)
, NamedSimply(context<ScrubMachine>().m_scrbr, "Act/PendingTimer")
{
dout(10) << "-- state -->> Act/PendingTimer" << dendl;
DECLARE_LOCALS; // 'scrbr' & 'pg_id' aliases
auto sleep_time = scrbr->get_scrub_sleep_time();
if (sleep_time.count()) {
// the following log line is used by osd-scrub-test.sh
dout(20) << __func__ << " scrub state is PendingTimer, sleeping" << dendl;
dout(20) << "PgScrubber: " << scrbr->get_spgid()
<< " sleeping for " << sleep_time << dendl;
m_sleep_timer = machine.schedule_timer_event_after<SleepComplete>(
sleep_time);
} else {
scrbr->queue_for_scrub_resched(Scrub::scrub_prio_t::high_priority);
}
}
sc::result PendingTimer::react(const SleepComplete&)
{
DECLARE_LOCALS; // 'scrbr' & 'pg_id' aliases
dout(10) << "PendingTimer::react(const SleepComplete&)" << dendl;
auto slept_for = ceph::coarse_real_clock::now() - entered_at;
dout(20) << "PgScrubber: " << scrbr->get_spgid()
<< " slept for " << slept_for << dendl;
scrbr->queue_for_scrub_resched(Scrub::scrub_prio_t::low_priority);
return discard_event();
}
// ----------------------- NewChunk -----------------------------------
/**
* Preconditions:
* - preemption data was set
* - epoch start was updated
*/
NewChunk::NewChunk(my_context ctx)
: my_base(ctx)
, NamedSimply(context<ScrubMachine>().m_scrbr, "Act/NewChunk")
{
dout(10) << "-- state -->> Act/NewChunk" << dendl;
DECLARE_LOCALS; // 'scrbr' & 'pg_id' aliases
scrbr->get_preemptor().adjust_parameters();
// choose range to work on
// select_range_n_notify() will signal either SelectedChunkFree or
// ChunkIsBusy. If 'busy', we transition to Blocked, and wait for the
// range to become available.
scrbr->select_range_n_notify();
}
sc::result NewChunk::react(const SelectedChunkFree&)
{
DECLARE_LOCALS; // 'scrbr' & 'pg_id' aliases
dout(10) << "NewChunk::react(const SelectedChunkFree&)" << dendl;
scrbr->set_subset_last_update(scrbr->search_log_for_updates());
return transit<WaitPushes>();
}
// ----------------------- WaitPushes -----------------------------------
WaitPushes::WaitPushes(my_context ctx)
: my_base(ctx)
, NamedSimply(context<ScrubMachine>().m_scrbr, "Act/WaitPushes")
{
dout(10) << " -- state -->> Act/WaitPushes" << dendl;
post_event(ActivePushesUpd{});
}
/*
* Triggered externally, by the entity that had an update re pushes
*/
sc::result WaitPushes::react(const ActivePushesUpd&)
{
DECLARE_LOCALS; // 'scrbr' & 'pg_id' aliases
dout(10)
<< "WaitPushes::react(const ActivePushesUpd&) pending_active_pushes: "
<< scrbr->pending_active_pushes() << dendl;
if (!scrbr->pending_active_pushes()) {
// done waiting
return transit<WaitLastUpdate>();
}
return discard_event();
}
// ----------------------- WaitLastUpdate -----------------------------------
WaitLastUpdate::WaitLastUpdate(my_context ctx)
: my_base(ctx)
, NamedSimply(context<ScrubMachine>().m_scrbr, "Act/WaitLastUpdate")
{
dout(10) << " -- state -->> Act/WaitLastUpdate" << dendl;
post_event(UpdatesApplied{});
}
/**
* Note:
* Updates are locally readable immediately. Thus, on the replicas we do need
* to wait for the update notifications before scrubbing. For the Primary it's
* a bit different: on EC (and only there) rmw operations have an additional
* read roundtrip. That means that on the Primary we need to wait for
* last_update_applied (the replica side, even on EC, is still safe
* since the actual transaction will already be readable by commit time.
*/
void WaitLastUpdate::on_new_updates(const UpdatesApplied&)
{
DECLARE_LOCALS; // 'scrbr' & 'pg_id' aliases
dout(10) << "WaitLastUpdate::on_new_updates(const UpdatesApplied&)" << dendl;
if (scrbr->has_pg_marked_new_updates()) {
post_event(InternalAllUpdates{});
} else {
// will be requeued by op_applied
dout(10) << "wait for EC read/modify/writes to queue" << dendl;
}
}
/*
* request maps from the replicas in the acting set
*/
sc::result WaitLastUpdate::react(const InternalAllUpdates&)
{
DECLARE_LOCALS; // 'scrbr' & 'pg_id' aliases
dout(10) << "WaitLastUpdate::react(const InternalAllUpdates&)" << dendl;
scrbr->get_replicas_maps(scrbr->get_preemptor().is_preemptable());
return transit<BuildMap>();
}
// ----------------------- BuildMap -----------------------------------
BuildMap::BuildMap(my_context ctx)
: my_base(ctx)
, NamedSimply(context<ScrubMachine>().m_scrbr, "Act/BuildMap")
{
dout(10) << " -- state -->> Act/BuildMap" << dendl;
DECLARE_LOCALS; // 'scrbr' & 'pg_id' aliases
// no need to check for an epoch change, as all possible flows that brought
// us here have a check_interval() verification of their final event.
if (scrbr->get_preemptor().was_preempted()) {
// we were preempted, either directly or by a replica
dout(10) << __func__ << " preempted!!!" << dendl;
scrbr->mark_local_map_ready();
post_event(IntBmPreempted{});
} else {
auto ret = scrbr->build_primary_map_chunk();
if (ret == -EINPROGRESS) {
// must wait for the backend to finish. No specific event provided.
// build_primary_map_chunk() has already requeued us.
dout(20) << "waiting for the backend..." << dendl;
} else if (ret < 0) {
dout(10) << "BuildMap::BuildMap() Error! Aborting. Ret: " << ret << dendl;
post_event(InternalError{});
} else {
// the local map was created
post_event(IntLocalMapDone{});
}
}
}
sc::result BuildMap::react(const IntLocalMapDone&)
{
DECLARE_LOCALS; // 'scrbr' & 'pg_id' aliases
dout(10) << "BuildMap::react(const IntLocalMapDone&)" << dendl;
scrbr->mark_local_map_ready();
return transit<WaitReplicas>();
}
// ----------------------- DrainReplMaps -----------------------------------
DrainReplMaps::DrainReplMaps(my_context ctx)
: my_base(ctx)
, NamedSimply(context<ScrubMachine>().m_scrbr, "Act/DrainReplMaps")
{
dout(10) << "-- state -->> Act/DrainReplMaps" << dendl;
// we may have got all maps already. Send the event that will make us check.
post_event(GotReplicas{});
}
sc::result DrainReplMaps::react(const GotReplicas&)
{
DECLARE_LOCALS; // 'scrbr' & 'pg_id' aliases
dout(10) << "DrainReplMaps::react(const GotReplicas&)" << dendl;
if (scrbr->are_all_maps_available()) {
// NewChunk will handle the preemption that brought us to this state
return transit<PendingTimer>();
}
dout(15) << "DrainReplMaps::react(const GotReplicas&): still draining "
"incoming maps: "
<< scrbr->dump_awaited_maps() << dendl;
return discard_event();
}
// ----------------------- WaitReplicas -----------------------------------
WaitReplicas::WaitReplicas(my_context ctx)
: my_base(ctx)
, NamedSimply(context<ScrubMachine>().m_scrbr, "Act/WaitReplicas")
{
dout(10) << "-- state -->> Act/WaitReplicas" << dendl;
post_event(GotReplicas{});
}
/**
* note: now that maps_compare_n_cleanup() is "futurized"(*), and we remain in
* this state for a while even after we got all our maps, we must prevent
* are_all_maps_available() (actually - the code after the if()) from being
* called more than once.
* This is basically a separate state, but it's too transitory and artificial
* to justify the cost of a separate state.
* (*) "futurized" - in Crimson, the call to maps_compare_n_cleanup() returns
* immediately after initiating the process. The actual termination of the
* maps comparing etc' is signalled via an event. As we share the code with
* "classic" OSD, here too maps_compare_n_cleanup() is responsible for
* signalling the completion of the processing.
*/
sc::result WaitReplicas::react(const GotReplicas&)
{
DECLARE_LOCALS; // 'scrbr' & 'pg_id' aliases
dout(10) << "WaitReplicas::react(const GotReplicas&)" << dendl;
if (!all_maps_already_called && scrbr->are_all_maps_available()) {
dout(10) << "WaitReplicas::react(const GotReplicas&) got all" << dendl;
all_maps_already_called = true;
// were we preempted?
if (scrbr->get_preemptor().disable_and_test()) { // a test&set
dout(10) << "WaitReplicas::react(const GotReplicas&) PREEMPTED!" << dendl;
return transit<PendingTimer>();
} else {
scrbr->maps_compare_n_cleanup();
return transit<WaitDigestUpdate>();
}
} else {
return discard_event();
}
}
sc::result WaitReplicas::react(const DigestUpdate&)
{
DECLARE_LOCALS; // 'scrbr' & 'pg_id' aliases
auto warn_msg =
"WaitReplicas::react(const DigestUpdate&): Unexpected DigestUpdate event"s;
dout(10) << warn_msg << dendl;
scrbr->log_cluster_warning(warn_msg);
return discard_event();
}
// ----------------------- WaitDigestUpdate -----------------------------------
WaitDigestUpdate::WaitDigestUpdate(my_context ctx)
: my_base(ctx)
, NamedSimply(context<ScrubMachine>().m_scrbr, "Act/WaitDigestUpdate")
{
DECLARE_LOCALS; // 'scrbr' & 'pg_id' aliases
dout(10) << "-- state -->> Act/WaitDigestUpdate" << dendl;
// perform an initial check: maybe we already
// have all the updates we need:
// (note that DigestUpdate is usually an external event)
post_event(DigestUpdate{});
}
sc::result WaitDigestUpdate::react(const DigestUpdate&)
{
DECLARE_LOCALS; // 'scrbr' & 'pg_id' aliases
dout(10) << "WaitDigestUpdate::react(const DigestUpdate&)" << dendl;
// on_digest_updates() will either:
// - do nothing - if we are still waiting for updates, or
// - finish the scrubbing of the current chunk, and:
// - send NextChunk, or
// - send ScrubFinished
scrbr->on_digest_updates();
return discard_event();
}
sc::result WaitDigestUpdate::react(const ScrubFinished&)
{
DECLARE_LOCALS; // 'scrbr' & 'pg_id' aliases
dout(10) << "WaitDigestUpdate::react(const ScrubFinished&)" << dendl;
scrbr->set_scrub_duration();
scrbr->scrub_finish();
return transit<NotActive>();
}
ScrubMachine::ScrubMachine(PG* pg, ScrubMachineListener* pg_scrub)
: m_pg_id{pg->pg_id}
, m_scrbr{pg_scrub}
{}
ScrubMachine::~ScrubMachine() = default;
// -------- for replicas -----------------------------------------------------
// ----------------------- ReservedReplica --------------------------------
ReservedReplica::ReservedReplica(my_context ctx)
: my_base(ctx)
, NamedSimply(context<ScrubMachine>().m_scrbr, "ReservedReplica")
{
dout(10) << "-- state -->> ReservedReplica" << dendl;
}
ReservedReplica::~ReservedReplica()
{
DECLARE_LOCALS; // 'scrbr' & 'pg_id' aliases
scrbr->dec_scrubs_remote();
scrbr->advance_token();
}
// ----------------------- ReplicaIdle --------------------------------
ReplicaIdle::ReplicaIdle(my_context ctx)
: my_base(ctx)
, NamedSimply(context<ScrubMachine>().m_scrbr, "ReplicaIdle")
{
dout(10) << "-- state -->> ReplicaIdle" << dendl;
}
ReplicaIdle::~ReplicaIdle()
{
}
// ----------------------- ReplicaActiveOp --------------------------------
ReplicaActiveOp::ReplicaActiveOp(my_context ctx)
: my_base(ctx)
, NamedSimply(context<ScrubMachine>().m_scrbr, "ReplicaActiveOp")
{
dout(10) << "-- state -->> ReplicaActiveOp" << dendl;
}
ReplicaActiveOp::~ReplicaActiveOp()
{
DECLARE_LOCALS; // 'scrbr' & 'pg_id' aliases
scrbr->replica_handling_done();
}
// ----------------------- ReplicaWaitUpdates --------------------------------
ReplicaWaitUpdates::ReplicaWaitUpdates(my_context ctx)
: my_base(ctx)
, NamedSimply(context<ScrubMachine>().m_scrbr, "ReplicaWaitUpdates")
{
dout(10) << "-- state -->> ReplicaWaitUpdates" << dendl;
DECLARE_LOCALS; // 'scrbr' & 'pg_id' aliases
scrbr->on_replica_init();
}
/*
* Triggered externally, by the entity that had an update re pushes
*/
sc::result ReplicaWaitUpdates::react(const ReplicaPushesUpd&)
{
DECLARE_LOCALS; // 'scrbr' & 'pg_id' aliases
dout(10) << "ReplicaWaitUpdates::react(const ReplicaPushesUpd&): "
<< scrbr->pending_active_pushes() << dendl;
if (scrbr->pending_active_pushes() == 0) {
// done waiting
return transit<ReplicaBuildingMap>();
}
return discard_event();
}
// ----------------------- ReplicaBuildingMap -----------------------------------
ReplicaBuildingMap::ReplicaBuildingMap(my_context ctx)
: my_base(ctx)
, NamedSimply(context<ScrubMachine>().m_scrbr, "ReplicaBuildingMap")
{
DECLARE_LOCALS; // 'scrbr' & 'pg_id' aliases
dout(10) << "-- state -->> ReplicaBuildingMap" << dendl;
// and as we might have skipped ReplicaWaitUpdates:
scrbr->on_replica_init();
post_event(SchedReplica{});
}
sc::result ReplicaBuildingMap::react(const SchedReplica&)
{
DECLARE_LOCALS; // 'scrbr' & 'pg_id' aliases
dout(10) << "ReplicaBuildingMap::react(const SchedReplica&). is_preemptable? "
<< scrbr->get_preemptor().is_preemptable() << dendl;
if (scrbr->get_preemptor().was_preempted()) {
dout(10) << "replica scrub job preempted" << dendl;
scrbr->send_preempted_replica();
scrbr->replica_handling_done();
return transit<ReplicaIdle>();
}
// start or check progress of build_replica_map_chunk()
auto ret_init = scrbr->build_replica_map_chunk();
if (ret_init != -EINPROGRESS) {
return transit<ReplicaIdle>();
}
return discard_event();
}
} // namespace Scrub
| 21,529 | 29.027894 | 81 | cc |
null | ceph-main/src/osd/scrubber/scrub_machine.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <string>
#include <boost/statechart/custom_reaction.hpp>
#include <boost/statechart/deferral.hpp>
#include <boost/statechart/event.hpp>
#include <boost/statechart/event_base.hpp>
#include <boost/statechart/in_state_reaction.hpp>
#include <boost/statechart/simple_state.hpp>
#include <boost/statechart/state.hpp>
#include <boost/statechart/state_machine.hpp>
#include <boost/statechart/transition.hpp>
#include "common/version.h"
#include "include/Context.h"
#include "osd/scrubber_common.h"
#include "scrub_machine_lstnr.h"
/// a wrapper that sets the FSM state description used by the
/// PgScrubber
/// \todo consider using the full NamedState as in Peering
struct NamedSimply {
explicit NamedSimply(ScrubMachineListener* scrubber, const char* name);
};
class PG; // holding a pointer to that one - just for testing
class PgScrubber;
namespace Scrub {
namespace sc = ::boost::statechart;
namespace mpl = ::boost::mpl;
//
// EVENTS
//
void on_event_creation(std::string_view nm);
void on_event_discard(std::string_view nm);
#define MEV(E) \
struct E : sc::event<E> { \
inline static int actv{0}; \
E() \
{ \
if (!actv++) \
on_event_creation(#E); \
} \
~E() \
{ \
if (!--actv) \
on_event_discard(#E); \
} \
void print(std::ostream* out) const { *out << #E; } \
std::string_view print() const { return #E; } \
};
/// all replicas have granted our reserve request
MEV(RemotesReserved)
/// a reservation request has failed
MEV(ReservationFailure)
/// reservations have timed out
MEV(ReservationTimeout)
/// initiate a new scrubbing session (relevant if we are a Primary)
MEV(StartScrub)
/// initiate a new scrubbing session. Only triggered at Recovery completion
MEV(AfterRepairScrub)
/// triggered when the PG unblocked an object that was marked for scrubbing.
/// Via the PGScrubUnblocked op
MEV(Unblocked)
MEV(InternalSchedScrub)
MEV(RangeBlockedAlarm)
MEV(SleepComplete)
MEV(SelectedChunkFree)
MEV(ChunkIsBusy)
/// Update to active_pushes. 'active_pushes' represents recovery that
/// is in-flight to the local ObjectStore
MEV(ActivePushesUpd)
/// (Primary only) all updates are committed
MEV(UpdatesApplied)
/// the internal counterpart of UpdatesApplied
MEV(InternalAllUpdates)
/// got a map from a replica
MEV(GotReplicas)
/// internal - BuildMap preempted. Required, as detected within the ctor
MEV(IntBmPreempted)
MEV(InternalError)
MEV(IntLocalMapDone)
/// external. called upon success of a MODIFY op. See
/// scrub_snapshot_metadata()
MEV(DigestUpdate)
/// event emitted when the replica grants a reservation to the primary
MEV(ReplicaGrantReservation)
/// initiating replica scrub
MEV(StartReplica)
/// 'start replica' when there are no pending updates
MEV(StartReplicaNoWait)
MEV(SchedReplica)
/// Update to active_pushes. 'active_pushes' represents recovery
/// that is in-flight to the local ObjectStore
MEV(ReplicaPushesUpd)
/// guarantee that the FSM is in the quiescent state (i.e. NotActive)
MEV(FullReset)
/// finished handling this chunk. Go get the next one
MEV(NextChunk)
/// all chunks handled
MEV(ScrubFinished)
//
// STATES
//
struct NotActive; ///< the quiescent state. No active scrubbing.
struct ReservingReplicas; ///< securing scrub resources from replicas' OSDs
struct ActiveScrubbing; ///< the active state for a Primary. A sub-machine.
struct ReplicaIdle; ///< Initial reserved replica state
struct ReplicaBuildingMap; ///< an active state for a replica.
class ScrubMachine : public sc::state_machine<ScrubMachine, NotActive> {
public:
friend class PgScrubber;
public:
explicit ScrubMachine(PG* pg, ScrubMachineListener* pg_scrub);
~ScrubMachine();
spg_t m_pg_id;
ScrubMachineListener* m_scrbr;
std::ostream& gen_prefix(std::ostream& out) const;
void assert_not_active() const;
[[nodiscard]] bool is_reserving() const;
[[nodiscard]] bool is_accepting_updates() const;
private:
/**
* scheduled_event_state_t
*
* Heap allocated, ref-counted state shared between scheduled event callback
* and timer_event_token_t. Ensures that callback and timer_event_token_t
* can be safetly destroyed in either order while still allowing for
* cancellation.
*/
struct scheduled_event_state_t {
bool canceled = false;
ScrubMachineListener::scrubber_callback_cancel_token_t cb_token = nullptr;
operator bool() const {
return nullptr != cb_token;
}
~scheduled_event_state_t() {
/* For the moment, this assert encodes an assumption that we always
* retain the token until the event either fires or is canceled.
* If a user needs/wants to relaxt that requirement, this assert can
* be removed */
assert(!cb_token);
}
};
public:
/**
* timer_event_token_t
*
* Represents in-flight timer event. Destroying the object or invoking
* release() directly will cancel the in-flight timer event preventing it
* from being delivered. The intended usage is to invoke
* schedule_timer_event_after in the constructor of the state machine state
* intended to handle the event and assign the returned timer_event_token_t
* to a member of that state. That way, exiting the state will implicitely
* cancel the event. See RangedBlocked::m_timeout_token and
* RangeBlockedAlarm for an example usage.
*/
class timer_event_token_t {
friend ScrubMachine;
// invariant: (bool)parent == (bool)event_state
ScrubMachine *parent = nullptr;
std::shared_ptr<scheduled_event_state_t> event_state;
timer_event_token_t(
ScrubMachine *parent,
std::shared_ptr<scheduled_event_state_t> event_state)
: parent(parent), event_state(event_state) {
assert(*this);
}
void swap(timer_event_token_t &rhs) {
std::swap(parent, rhs.parent);
std::swap(event_state, rhs.event_state);
}
public:
timer_event_token_t() = default;
timer_event_token_t(timer_event_token_t &&rhs) {
swap(rhs);
assert(static_cast<bool>(parent) == static_cast<bool>(event_state));
}
timer_event_token_t &operator=(timer_event_token_t &&rhs) {
swap(rhs);
assert(static_cast<bool>(parent) == static_cast<bool>(event_state));
return *this;
}
operator bool() const {
assert(static_cast<bool>(parent) == static_cast<bool>(event_state));
return parent;
}
void release() {
if (*this) {
if (*event_state) {
parent->m_scrbr->cancel_callback(event_state->cb_token);
event_state->canceled = true;
event_state->cb_token = nullptr;
}
event_state.reset();
parent = nullptr;
}
}
~timer_event_token_t() {
release();
}
};
/**
* schedule_timer_event_after
*
* Schedules event EventT{Args...} to be delivered duration in the future.
* The implementation implicitely drops the event on interval change. The
* returned timer_event_token_t can be used to cancel the event prior to
* its delivery -- it should generally be embedded as a member in the state
* intended to handle the event. See the comment on timer_event_token_t
* for further information.
*/
template <typename EventT, typename... Args>
timer_event_token_t schedule_timer_event_after(
ceph::timespan duration, Args&&... args) {
auto token = std::make_shared<scheduled_event_state_t>();
token->cb_token = m_scrbr->schedule_callback_after(
duration,
[this, token, event=EventT(std::forward<Args>(args)...)] {
if (!token->canceled) {
token->cb_token = nullptr;
process_event(std::move(event));
} else {
assert(nullptr == token->cb_token);
}
}
);
return timer_event_token_t{this, token};
}
};
/**
* The Scrubber's base (quiescent) state.
* Scrubbing is triggered by one of the following events:
*
* - (standard scenario for a Primary): 'StartScrub'. Initiates the OSDs
* resources reservation process. Will be issued by PG::scrub(), following a
* queued "PGScrub" op.
*
* - a special end-of-recovery Primary scrub event ('AfterRepairScrub').
*
* - (for a replica) 'StartReplica' or 'StartReplicaNoWait', triggered by
* an incoming MOSDRepScrub message.
*
* note (20.8.21): originally, AfterRepairScrub was triggering a scrub without
* waiting for replica resources to be acquired. But once replicas started
* using the resource-request to identify and tag the scrub session, this
* bypass cannot be supported anymore.
*/
struct NotActive : sc::state<NotActive, ScrubMachine>, NamedSimply {
explicit NotActive(my_context ctx);
using reactions =
mpl::list<sc::custom_reaction<StartScrub>,
// a scrubbing that was initiated at recovery completion:
sc::custom_reaction<AfterRepairScrub>,
sc::transition<ReplicaGrantReservation, ReplicaIdle>>;
sc::result react(const StartScrub&);
sc::result react(const AfterRepairScrub&);
};
struct ReservingReplicas : sc::state<ReservingReplicas, ScrubMachine>,
NamedSimply {
explicit ReservingReplicas(my_context ctx);
~ReservingReplicas();
using reactions = mpl::list<sc::custom_reaction<FullReset>,
// all replicas granted our resources request
sc::transition<RemotesReserved, ActiveScrubbing>,
sc::custom_reaction<ReservationTimeout>,
sc::custom_reaction<ReservationFailure>>;
ceph::coarse_real_clock::time_point entered_at =
ceph::coarse_real_clock::now();
ScrubMachine::timer_event_token_t m_timeout_token;
sc::result react(const FullReset&);
sc::result react(const ReservationTimeout&);
/// at least one replica denied us the scrub resources we've requested
sc::result react(const ReservationFailure&);
};
// the "active" sub-states
/// the objects range is blocked
struct RangeBlocked;
/// either delaying the scrub by some time and requeuing, or just requeue
struct PendingTimer;
/// select a chunk to scrub, and verify its availability
struct NewChunk;
struct WaitPushes;
struct WaitLastUpdate;
struct BuildMap;
/// a problem during BuildMap. Wait for all replicas to report, then restart.
struct DrainReplMaps;
/// wait for all replicas to report
struct WaitReplicas;
struct WaitDigestUpdate;
struct ActiveScrubbing
: sc::state<ActiveScrubbing, ScrubMachine, PendingTimer>, NamedSimply {
explicit ActiveScrubbing(my_context ctx);
~ActiveScrubbing();
using reactions = mpl::list<sc::custom_reaction<InternalError>,
sc::custom_reaction<FullReset>>;
sc::result react(const FullReset&);
sc::result react(const InternalError&);
};
struct RangeBlocked : sc::state<RangeBlocked, ActiveScrubbing>, NamedSimply {
explicit RangeBlocked(my_context ctx);
using reactions = mpl::list<
sc::custom_reaction<RangeBlockedAlarm>,
sc::transition<Unblocked, PendingTimer>>;
ceph::coarse_real_clock::time_point entered_at =
ceph::coarse_real_clock::now();
ScrubMachine::timer_event_token_t m_timeout_token;
sc::result react(const RangeBlockedAlarm &);
};
/**
* PendingTimer
*
* Represents period between chunks. Waits get_scrub_sleep_time() (if non-zero)
* by scheduling a SleepComplete event and then queues an InternalSchedScrub
* to start the next chunk.
*/
struct PendingTimer : sc::state<PendingTimer, ActiveScrubbing>, NamedSimply {
explicit PendingTimer(my_context ctx);
using reactions = mpl::list<
sc::transition<InternalSchedScrub, NewChunk>,
sc::custom_reaction<SleepComplete>>;
ceph::coarse_real_clock::time_point entered_at =
ceph::coarse_real_clock::now();
ScrubMachine::timer_event_token_t m_sleep_timer;
sc::result react(const SleepComplete&);
};
struct NewChunk : sc::state<NewChunk, ActiveScrubbing>, NamedSimply {
explicit NewChunk(my_context ctx);
using reactions = mpl::list<sc::transition<ChunkIsBusy, RangeBlocked>,
sc::custom_reaction<SelectedChunkFree>>;
sc::result react(const SelectedChunkFree&);
};
/**
* initiate the update process for this chunk
*
* Wait fo 'active_pushes' to clear.
* 'active_pushes' represents recovery that is in-flight to the local
* Objectstore, hence scrub waits until the correct data is readable
* (in-flight data to the Objectstore is not readable until written to
* disk, termed 'applied' here)
*/
struct WaitPushes : sc::state<WaitPushes, ActiveScrubbing>, NamedSimply {
explicit WaitPushes(my_context ctx);
using reactions = mpl::list<sc::custom_reaction<ActivePushesUpd>>;
sc::result react(const ActivePushesUpd&);
};
struct WaitLastUpdate : sc::state<WaitLastUpdate, ActiveScrubbing>,
NamedSimply {
explicit WaitLastUpdate(my_context ctx);
void on_new_updates(const UpdatesApplied&);
using reactions =
mpl::list<sc::custom_reaction<InternalAllUpdates>,
sc::in_state_reaction<UpdatesApplied,
WaitLastUpdate,
&WaitLastUpdate::on_new_updates>>;
sc::result react(const InternalAllUpdates&);
};
struct BuildMap : sc::state<BuildMap, ActiveScrubbing>, NamedSimply {
explicit BuildMap(my_context ctx);
// possible error scenarios:
// - an error reported by the backend will trigger an 'InternalError' event,
// handled by our parent state;
// - if preempted, we switch to DrainReplMaps, where we will wait for all
// replicas to send their maps before acknowledging the preemption;
// - an interval change will be handled by the relevant 'send-event'
// functions, and will translated into a 'FullReset' event.
using reactions = mpl::list<sc::transition<IntBmPreempted, DrainReplMaps>,
// looping, waiting for the backend to finish:
sc::transition<InternalSchedScrub, BuildMap>,
sc::custom_reaction<IntLocalMapDone>>;
sc::result react(const IntLocalMapDone&);
};
/*
* "drain" scrub-maps responses from replicas
*/
struct DrainReplMaps : sc::state<DrainReplMaps, ActiveScrubbing>, NamedSimply {
explicit DrainReplMaps(my_context ctx);
using reactions =
// all replicas are accounted for:
mpl::list<sc::custom_reaction<GotReplicas>>;
sc::result react(const GotReplicas&);
};
struct WaitReplicas : sc::state<WaitReplicas, ActiveScrubbing>, NamedSimply {
explicit WaitReplicas(my_context ctx);
using reactions = mpl::list<
// all replicas are accounted for:
sc::custom_reaction<GotReplicas>,
sc::custom_reaction<DigestUpdate>>;
sc::result react(const GotReplicas&);
sc::result react(const DigestUpdate&);
bool all_maps_already_called{false}; // see comment in react code
};
struct WaitDigestUpdate : sc::state<WaitDigestUpdate, ActiveScrubbing>,
NamedSimply {
explicit WaitDigestUpdate(my_context ctx);
using reactions = mpl::list<sc::custom_reaction<DigestUpdate>,
sc::custom_reaction<ScrubFinished>,
sc::transition<NextChunk, PendingTimer>>;
sc::result react(const DigestUpdate&);
sc::result react(const ScrubFinished&);
};
// ----------------------------- the "replica active" states
/**
* ReservedReplica
*
* Parent state for replica states, Controls lifecycle for
* PgScrubber::m_reservations.
*/
struct ReservedReplica : sc::state<ReservedReplica, ScrubMachine, ReplicaIdle>,
NamedSimply {
explicit ReservedReplica(my_context ctx);
~ReservedReplica();
using reactions = mpl::list<sc::transition<FullReset, NotActive>>;
};
struct ReplicaWaitUpdates;
/**
* ReplicaIdle
*
* Replica is waiting for a map request.
*/
struct ReplicaIdle : sc::state<ReplicaIdle, ReservedReplica>,
NamedSimply {
explicit ReplicaIdle(my_context ctx);
~ReplicaIdle();
using reactions = mpl::list<
sc::transition<StartReplica, ReplicaWaitUpdates>,
sc::transition<StartReplicaNoWait, ReplicaBuildingMap>>;
};
/**
* ReservedActiveOp
*
* Lifetime matches handling for a single map request op
*/
struct ReplicaActiveOp
: sc::state<ReplicaActiveOp, ReservedReplica, ReplicaWaitUpdates>,
NamedSimply {
explicit ReplicaActiveOp(my_context ctx);
~ReplicaActiveOp();
};
/*
* Waiting for 'active_pushes' to complete
*
* When in this state:
* - the details of the Primary's request were internalized by PgScrubber;
* - 'active' scrubbing is set
*/
struct ReplicaWaitUpdates : sc::state<ReplicaWaitUpdates, ReservedReplica>,
NamedSimply {
explicit ReplicaWaitUpdates(my_context ctx);
using reactions = mpl::list<sc::custom_reaction<ReplicaPushesUpd>>;
sc::result react(const ReplicaPushesUpd&);
};
struct ReplicaBuildingMap : sc::state<ReplicaBuildingMap, ReservedReplica>
, NamedSimply {
explicit ReplicaBuildingMap(my_context ctx);
using reactions = mpl::list<sc::custom_reaction<SchedReplica>>;
sc::result react(const SchedReplica&);
};
} // namespace Scrub
| 17,326 | 28.977509 | 80 | h |
null | ceph-main/src/osd/scrubber/scrub_machine_lstnr.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
/**
* \file the PgScrubber interface used by the scrub FSM
*/
#include "common/version.h"
#include "include/Context.h"
#include "osd/osd_types.h"
struct ScrubMachineListener;
namespace Scrub {
enum class PreemptionNoted { no_preemption, preempted };
/// the interface exposed by the PgScrubber into its internal
/// preemption_data object
struct preemption_t {
virtual ~preemption_t() = default;
preemption_t() = default;
preemption_t(const preemption_t&) = delete;
preemption_t(preemption_t&&) = delete;
[[nodiscard]] virtual bool is_preemptable() const = 0;
[[nodiscard]] virtual bool was_preempted() const = 0;
virtual void adjust_parameters() = 0;
/**
* Try to preempt the scrub.
* 'true' (i.e. - preempted) if:
* preemptable && not already preempted
*/
virtual bool do_preempt() = 0;
/**
* disables preemptions.
* Returns 'true' if we were already preempted
*/
virtual bool disable_and_test() = 0;
};
} // namespace Scrub
struct ScrubMachineListener {
virtual CephContext *get_cct() const = 0;
virtual LogChannelRef &get_clog() const = 0;
virtual int get_whoami() const = 0;
virtual spg_t get_spgid() const = 0;
using scrubber_callback_t = std::function<void(void)>;
using scrubber_callback_cancel_token_t = Context*;
/**
* schedule_callback_after
*
* cb will be invoked after least duration time has elapsed.
* Interface implementation is responsible for maintaining and locking
* a PG reference. cb will be silently discarded if the interval has changed
* between the call to schedule_callback_after and when the pg is locked.
*
* Returns an associated token to be used in cancel_callback below.
*/
virtual scrubber_callback_cancel_token_t schedule_callback_after(
ceph::timespan duration, scrubber_callback_t &&cb) = 0;
/**
* cancel_callback
*
* Attempts to cancel the callback to whcih the passed token is associated.
* cancel_callback is best effort, the callback may still fire.
* cancel_callback guarrantees that exactly one of the two things will happen:
* - the callback is destroyed and will not be invoked
* - the callback will be invoked
*/
virtual void cancel_callback(scrubber_callback_cancel_token_t) = 0;
virtual ceph::timespan get_range_blocked_grace() = 0;
struct MsgAndEpoch {
MessageRef m_msg;
epoch_t m_epoch;
};
virtual ~ScrubMachineListener() = default;
/// set the string we'd use in logs to convey the current state-machine
/// state.
virtual void set_state_name(const char* name) = 0;
[[nodiscard]] virtual bool is_primary() const = 0;
virtual void select_range_n_notify() = 0;
/// walk the log to find the latest update that affects our chunk
virtual eversion_t search_log_for_updates() const = 0;
virtual eversion_t get_last_update_applied() const = 0;
virtual int pending_active_pushes() const = 0;
virtual int build_primary_map_chunk() = 0;
virtual int build_replica_map_chunk() = 0;
virtual void on_init() = 0;
virtual void on_replica_init() = 0;
virtual void replica_handling_done() = 0;
/// the version of 'scrub_clear_state()' that does not try to invoke FSM
/// services (thus can be called from FSM reactions)
virtual void clear_pgscrub_state() = 0;
/// Get time to sleep before next scrub
virtual std::chrono::milliseconds get_scrub_sleep_time() const = 0;
/// Queues InternalSchedScrub for later
virtual void queue_for_scrub_resched(Scrub::scrub_prio_t prio) = 0;
/**
* Ask all replicas for their scrub maps for the current chunk.
*/
virtual void get_replicas_maps(bool replica_can_preempt) = 0;
virtual void on_digest_updates() = 0;
/// the part that actually finalizes a scrub
virtual void scrub_finish() = 0;
/**
* Prepare a MOSDRepScrubMap message carrying the requested scrub map
* @param was_preempted - were we preempted?
* @return the message, and the current value of 'm_replica_min_epoch' (which
* is used when sending the message, but will be overwritten before that).
*/
[[nodiscard]] virtual MsgAndEpoch prep_replica_map_msg(
Scrub::PreemptionNoted was_preempted) = 0;
/**
* Send to the primary the pre-prepared message containing the requested map
*/
virtual void send_replica_map(const MsgAndEpoch& preprepared) = 0;
/**
* Let the primary know that we were preempted while trying to build the
* requested map.
*/
virtual void send_preempted_replica() = 0;
[[nodiscard]] virtual bool has_pg_marked_new_updates() const = 0;
virtual void set_subset_last_update(eversion_t e) = 0;
[[nodiscard]] virtual bool was_epoch_changed() const = 0;
virtual Scrub::preemption_t& get_preemptor() = 0;
/**
* a "technical" collection of the steps performed once all
* rep maps are available:
* - the maps are compared
* - the scrub region markers (start_ & end_) are advanced
* - callbacks and ops that were pending are allowed to run
*/
virtual void maps_compare_n_cleanup() = 0;
/**
* order the PgScrubber to initiate the process of reserving replicas' scrub
* resources.
*/
virtual void reserve_replicas() = 0;
virtual void unreserve_replicas() = 0;
virtual void on_replica_reservation_timeout() = 0;
virtual void set_scrub_begin_time() = 0;
virtual void set_scrub_duration() = 0;
/**
* No new scrub session will start while a scrub was initiate on a PG,
* and that PG is trying to acquire replica resources.
* set_reserving_now()/clear_reserving_now() let's the OSD scrub-queue know
* we are busy reserving.
*/
virtual void set_reserving_now() = 0;
virtual void clear_reserving_now() = 0;
/**
* Manipulate the 'I am being scrubbed now' Scrubber's flag
*/
virtual void set_queued_or_active() = 0;
virtual void clear_queued_or_active() = 0;
/// Release remote scrub reservation
virtual void dec_scrubs_remote() = 0;
/// Advance replica token
virtual void advance_token() = 0;
/**
* Our scrubbing is blocked, waiting for an excessive length of time for
* our target chunk to be unlocked. We will set the corresponding flags,
* both in the OSD_wide scrub-queue object, and in our own scrub-job object.
* Both flags are used to report the unhealthy state in the log and in
* response to scrub-queue queries.
*/
virtual void set_scrub_blocked(utime_t since) = 0;
virtual void clear_scrub_blocked() = 0;
/**
* the FSM interface into the "are we waiting for maps, either our own or from
* replicas" state.
* The FSM can only:
* - mark the local map as available, and
* - query status
*/
virtual void mark_local_map_ready() = 0;
[[nodiscard]] virtual bool are_all_maps_available() const = 0;
/// a log/debug interface
virtual std::string dump_awaited_maps() const = 0;
/// exposed to be used by the scrub_machine logger
virtual std::ostream& gen_prefix(std::ostream& out) const = 0;
/// sending cluster-log warnings
virtual void log_cluster_warning(const std::string& msg) const = 0;
};
| 7,198 | 29.121339 | 80 | h |
null | ceph-main/src/osdc/Filer.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include <mutex>
#include <algorithm>
#include "Filer.h"
#include "osd/OSDMap.h"
#include "Striper.h"
#include "messages/MOSDOp.h"
#include "messages/MOSDOpReply.h"
#include "messages/MOSDMap.h"
#include "msg/Messenger.h"
#include "include/Context.h"
#include "common/Finisher.h"
#include "common/config.h"
#define dout_subsys ceph_subsys_filer
#undef dout_prefix
#define dout_prefix *_dout << objecter->messenger->get_myname() << ".filer "
using std::hex;
using std::dec;
using std::vector;
class Filer::C_Probe : public Context {
public:
Filer *filer;
Probe *probe;
object_t oid;
uint64_t size;
ceph::real_time mtime;
C_Probe(Filer *f, Probe *p, object_t o) : filer(f), probe(p), oid(o),
size(0) {}
void finish(int r) override {
if (r == -ENOENT) {
r = 0;
ceph_assert(size == 0);
}
bool probe_complete;
{
Probe::unique_lock pl(probe->lock);
if (r != 0) {
probe->err = r;
}
probe_complete = filer->_probed(probe, oid, size, mtime, pl);
ceph_assert(!pl.owns_lock());
}
if (probe_complete) {
probe->onfinish->complete(probe->err);
delete probe;
}
}
};
int Filer::probe(inodeno_t ino,
const file_layout_t *layout,
snapid_t snapid,
uint64_t start_from,
uint64_t *end, // LB, when !fwd
ceph::real_time *pmtime,
bool fwd,
int flags,
Context *onfinish)
{
ldout(cct, 10) << "probe " << (fwd ? "fwd ":"bwd ")
<< hex << ino << dec
<< " starting from " << start_from
<< dendl;
ceph_assert(snapid); // (until there is a non-NOSNAP write)
Probe *probe = new Probe(ino, *layout, snapid, start_from, end, pmtime,
flags, fwd, onfinish);
return probe_impl(probe, layout, start_from, end);
}
int Filer::probe(inodeno_t ino,
const file_layout_t *layout,
snapid_t snapid,
uint64_t start_from,
uint64_t *end, // LB, when !fwd
utime_t *pmtime,
bool fwd,
int flags,
Context *onfinish)
{
ldout(cct, 10) << "probe " << (fwd ? "fwd ":"bwd ")
<< hex << ino << dec
<< " starting from " << start_from
<< dendl;
ceph_assert(snapid); // (until there is a non-NOSNAP write)
Probe *probe = new Probe(ino, *layout, snapid, start_from, end, pmtime,
flags, fwd, onfinish);
return probe_impl(probe, layout, start_from, end);
}
int Filer::probe_impl(Probe* probe, const file_layout_t *layout,
uint64_t start_from, uint64_t *end) // LB, when !fwd
{
// period (bytes before we jump unto a new set of object(s))
uint64_t period = layout->get_period();
// start with 1+ periods.
probe->probing_len = period;
if (probe->fwd) {
if (start_from % period)
probe->probing_len += period - (start_from % period);
} else {
ceph_assert(start_from > *end);
if (start_from % period)
probe->probing_len -= period - (start_from % period);
probe->probing_off -= probe->probing_len;
}
Probe::unique_lock pl(probe->lock);
_probe(probe, pl);
ceph_assert(!pl.owns_lock());
return 0;
}
/**
* probe->lock must be initially locked, this function will release it
*/
void Filer::_probe(Probe *probe, Probe::unique_lock& pl)
{
ceph_assert(pl.owns_lock() && pl.mutex() == &probe->lock);
ldout(cct, 10) << "_probe " << hex << probe->ino << dec
<< " " << probe->probing_off << "~" << probe->probing_len
<< dendl;
// map range onto objects
probe->known_size.clear();
probe->probing.clear();
Striper::file_to_extents(cct, probe->ino, &probe->layout, probe->probing_off,
probe->probing_len, 0, probe->probing);
std::vector<ObjectExtent> stat_extents;
for (auto p = probe->probing.begin(); p != probe->probing.end(); ++p) {
ldout(cct, 10) << "_probe probing " << p->oid << dendl;
probe->ops.insert(p->oid);
stat_extents.push_back(*p);
}
pl.unlock();
for (std::vector<ObjectExtent>::iterator i = stat_extents.begin();
i != stat_extents.end(); ++i) {
C_Probe *c = new C_Probe(this, probe, i->oid);
objecter->stat(i->oid, i->oloc, probe->snapid, &c->size, &c->mtime,
probe->flags | CEPH_OSD_FLAG_RWORDERED,
new C_OnFinisher(c, finisher));
}
}
/**
* probe->lock must be initially held, and will be released by this function.
*
* @return true if probe is complete and Probe object may be freed.
*/
bool Filer::_probed(Probe *probe, const object_t& oid, uint64_t size,
ceph::real_time mtime, Probe::unique_lock& pl)
{
ceph_assert(pl.owns_lock() && pl.mutex() == &probe->lock);
ldout(cct, 10) << "_probed " << probe->ino << " object " << oid
<< " has size " << size << " mtime " << mtime << dendl;
probe->known_size[oid] = size;
if (mtime > probe->max_mtime)
probe->max_mtime = mtime;
ceph_assert(probe->ops.count(oid));
probe->ops.erase(oid);
if (!probe->ops.empty()) {
pl.unlock();
return false; // waiting for more!
}
if (probe->err) { // we hit an error, propagate back up
pl.unlock();
return true;
}
// analyze!
uint64_t end = 0;
if (!probe->fwd) {
std::reverse(probe->probing.begin(), probe->probing.end());
}
for (auto p = probe->probing.begin(); p != probe->probing.end(); ++p) {
uint64_t shouldbe = p->length + p->offset;
ldout(cct, 10) << "_probed " << probe->ino << " object " << hex
<< p->oid << dec << " should be " << shouldbe
<< ", actual is " << probe->known_size[p->oid]
<< dendl;
if (!probe->found_size) {
ceph_assert(probe->known_size[p->oid] <= shouldbe);
if ((probe->fwd && probe->known_size[p->oid] == shouldbe) ||
(!probe->fwd && probe->known_size[p->oid] == 0 &&
probe->probing_off > 0))
continue; // keep going
// aha, we found the end!
// calc offset into buffer_extent to get distance from probe->from.
uint64_t oleft = probe->known_size[p->oid] - p->offset;
for (auto i = p->buffer_extents.begin();
i != p->buffer_extents.end();
++i) {
if (oleft <= (uint64_t)i->second) {
end = probe->probing_off + i->first + oleft;
ldout(cct, 10) << "_probed end is in buffer_extent " << i->first
<< "~" << i->second << " off " << oleft
<< ", from was " << probe->probing_off << ", end is "
<< end << dendl;
probe->found_size = true;
ldout(cct, 10) << "_probed found size at " << end << dendl;
*probe->psize = end;
if (!probe->pmtime &&
!probe->pumtime) // stop if we don't need mtime too
break;
}
oleft -= i->second;
}
}
break;
}
if (!probe->found_size || (probe->probing_off && (probe->pmtime ||
probe->pumtime))) {
// keep probing!
ldout(cct, 10) << "_probed probing further" << dendl;
uint64_t period = probe->layout.get_period();
if (probe->fwd) {
probe->probing_off += probe->probing_len;
ceph_assert(probe->probing_off % period == 0);
probe->probing_len = period;
} else {
// previous period.
ceph_assert(probe->probing_off % period == 0);
probe->probing_len = period;
probe->probing_off -= period;
}
_probe(probe, pl);
ceph_assert(!pl.owns_lock());
return false;
} else if (probe->pmtime) {
ldout(cct, 10) << "_probed found mtime " << probe->max_mtime << dendl;
*probe->pmtime = probe->max_mtime;
} else if (probe->pumtime) {
ldout(cct, 10) << "_probed found mtime " << probe->max_mtime << dendl;
*probe->pumtime = ceph::real_clock::to_ceph_timespec(probe->max_mtime);
}
// done!
pl.unlock();
return true;
}
// -----------------------
struct PurgeRange {
std::mutex lock;
typedef std::lock_guard<std::mutex> lock_guard;
typedef std::unique_lock<std::mutex> unique_lock;
inodeno_t ino;
file_layout_t layout;
SnapContext snapc;
uint64_t first, num;
ceph::real_time mtime;
int flags;
Context *oncommit;
int uncommitted;
int err = 0;
PurgeRange(inodeno_t i, const file_layout_t& l, const SnapContext& sc,
uint64_t fo, uint64_t no, ceph::real_time t, int fl,
Context *fin)
: ino(i), layout(l), snapc(sc), first(fo), num(no), mtime(t), flags(fl),
oncommit(fin), uncommitted(0) {}
};
int Filer::purge_range(inodeno_t ino,
const file_layout_t *layout,
const SnapContext& snapc,
uint64_t first_obj, uint64_t num_obj,
ceph::real_time mtime,
int flags,
Context *oncommit)
{
ceph_assert(num_obj > 0);
// single object? easy!
if (num_obj == 1) {
object_t oid = file_object_t(ino, first_obj);
object_locator_t oloc = OSDMap::file_to_object_locator(*layout);
ldout(cct, 10) << "purge_range removing " << oid << dendl;
objecter->remove(oid, oloc, snapc, mtime, flags, oncommit);
return 0;
}
PurgeRange *pr = new PurgeRange(ino, *layout, snapc, first_obj,
num_obj, mtime, flags, oncommit);
_do_purge_range(pr, 0, 0);
return 0;
}
struct C_PurgeRange : public Context {
Filer *filer;
PurgeRange *pr;
C_PurgeRange(Filer *f, PurgeRange *p) : filer(f), pr(p) {}
void finish(int r) override {
filer->_do_purge_range(pr, 1, r);
}
};
void Filer::_do_purge_range(PurgeRange *pr, int fin, int err)
{
PurgeRange::unique_lock prl(pr->lock);
if (err && err != -ENOENT)
pr->err = err;
pr->uncommitted -= fin;
ldout(cct, 10) << "_do_purge_range " << pr->ino << " objects " << pr->first
<< "~" << pr->num << " uncommitted " << pr->uncommitted
<< dendl;
if (pr->num == 0 && pr->uncommitted == 0) {
pr->oncommit->complete(pr->err);
prl.unlock();
delete pr;
return;
}
std::vector<object_t> remove_oids;
int max = cct->_conf->filer_max_purge_ops - pr->uncommitted;
while (pr->num > 0 && max > 0) {
remove_oids.push_back(file_object_t(pr->ino, pr->first));
pr->uncommitted++;
pr->first++;
pr->num--;
max--;
}
prl.unlock();
// Issue objecter ops outside pr->lock to avoid lock dependency loop
for (const auto& oid : remove_oids) {
object_locator_t oloc = OSDMap::file_to_object_locator(pr->layout);
objecter->remove(oid, oloc, pr->snapc, pr->mtime, pr->flags,
new C_OnFinisher(new C_PurgeRange(this, pr), finisher));
}
}
// -----------------------
struct TruncRange {
std::mutex lock;
typedef std::lock_guard<std::mutex> lock_guard;
typedef std::unique_lock<std::mutex> unique_lock;
inodeno_t ino;
file_layout_t layout;
SnapContext snapc;
ceph::real_time mtime;
int flags;
Context *oncommit;
int uncommitted;
uint64_t offset;
uint64_t length;
uint32_t truncate_seq;
TruncRange(inodeno_t i, const file_layout_t& l, const SnapContext& sc,
ceph::real_time t, int fl, Context *fin,
uint64_t off, uint64_t len, uint32_t ts)
: ino(i), layout(l), snapc(sc), mtime(t), flags(fl), oncommit(fin),
uncommitted(0), offset(off), length(len), truncate_seq(ts) {}
};
void Filer::truncate(inodeno_t ino,
const file_layout_t *layout,
const SnapContext& snapc,
uint64_t offset,
uint64_t len,
__u32 truncate_seq,
ceph::real_time mtime,
int flags,
Context *oncommit)
{
uint64_t period = layout->get_period();
uint64_t num_objs = Striper::get_num_objects(*layout, len + (offset % period));
if (num_objs == 1) {
vector<ObjectExtent> extents;
Striper::file_to_extents(cct, ino, layout, offset, len, 0, extents);
osdc_opvec ops(1);
ops[0].op.op = CEPH_OSD_OP_TRIMTRUNC;
ops[0].op.extent.truncate_seq = truncate_seq;
ops[0].op.extent.truncate_size = extents[0].offset;
objecter->_modify(extents[0].oid, extents[0].oloc, ops, mtime, snapc,
flags, oncommit);
return;
}
if (len > 0 && (offset + len) % period)
len += period - ((offset + len) % period);
TruncRange *tr = new TruncRange(ino, *layout, snapc, mtime, flags, oncommit,
offset, len, truncate_seq);
_do_truncate_range(tr, 0);
}
struct C_TruncRange : public Context {
Filer *filer;
TruncRange *tr;
C_TruncRange(Filer *f, TruncRange *t) : filer(f), tr(t) {}
void finish(int r) override {
filer->_do_truncate_range(tr, 1);
}
};
void Filer::_do_truncate_range(TruncRange *tr, int fin)
{
TruncRange::unique_lock trl(tr->lock);
tr->uncommitted -= fin;
ldout(cct, 10) << "_do_truncate_range " << tr->ino << " objects " << tr->offset
<< "~" << tr->length << " uncommitted " << tr->uncommitted
<< dendl;
if (tr->length == 0 && tr->uncommitted == 0) {
tr->oncommit->complete(0);
trl.unlock();
delete tr;
return;
}
vector<ObjectExtent> extents;
int max = cct->_conf->filer_max_truncate_ops - tr->uncommitted;
if (max > 0 && tr->length > 0) {
uint64_t len = tr->layout.get_period() * max;
if (len > tr->length)
len = tr->length;
uint64_t offset = tr->offset + tr->length - len;
Striper::file_to_extents(cct, tr->ino, &tr->layout, offset, len, 0, extents);
tr->uncommitted += extents.size();
tr->length -= len;
}
trl.unlock();
// Issue objecter ops outside tr->lock to avoid lock dependency loop
for (const auto& p : extents) {
osdc_opvec ops(1);
ops[0].op.op = CEPH_OSD_OP_TRIMTRUNC;
ops[0].op.extent.truncate_size = p.offset;
ops[0].op.extent.truncate_seq = tr->truncate_seq;
objecter->_modify(p.oid, p.oloc, ops, tr->mtime, tr->snapc, tr->flags,
new C_OnFinisher(new C_TruncRange(this, tr), finisher));
}
}
| 13,736 | 27.14959 | 81 | cc |
null | ceph-main/src/osdc/Filer.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_FILER_H
#define CEPH_FILER_H
/*** Filer
*
* stripe file ranges onto objects.
* build list<ObjectExtent> for the objecter or objectcacher.
*
* also, provide convenience methods that call objecter for you.
*
* "files" are identified by ino.
*/
#include <mutex>
#include "include/types.h"
#include "common/ceph_time.h"
#include "osd/OSDMap.h"
#include "Objecter.h"
#include "Striper.h"
class Context;
class Messenger;
class OSDMap;
class Finisher;
/**** Filer interface ***/
class Filer {
CephContext *cct;
Objecter *objecter;
Finisher *finisher;
// probes
struct Probe {
std::mutex lock;
typedef std::lock_guard<std::mutex> lock_guard;
typedef std::unique_lock<std::mutex> unique_lock;
inodeno_t ino;
file_layout_t layout;
snapid_t snapid;
uint64_t *psize;
ceph::real_time *pmtime;
utime_t *pumtime;
int flags;
bool fwd;
Context *onfinish;
std::vector<ObjectExtent> probing;
uint64_t probing_off, probing_len;
std::map<object_t, uint64_t> known_size;
ceph::real_time max_mtime;
std::set<object_t> ops;
int err;
bool found_size;
Probe(inodeno_t i, const file_layout_t &l, snapid_t sn,
uint64_t f, uint64_t *e, ceph::real_time *m, int fl, bool fw,
Context *c) :
ino(i), layout(l), snapid(sn),
psize(e), pmtime(m), pumtime(nullptr), flags(fl), fwd(fw), onfinish(c),
probing_off(f), probing_len(0),
err(0), found_size(false) {}
Probe(inodeno_t i, const file_layout_t &l, snapid_t sn,
uint64_t f, uint64_t *e, utime_t *m, int fl, bool fw,
Context *c) :
ino(i), layout(l), snapid(sn),
psize(e), pmtime(nullptr), pumtime(m), flags(fl), fwd(fw),
onfinish(c), probing_off(f), probing_len(0),
err(0), found_size(false) {}
};
class C_Probe;
void _probe(Probe *p, Probe::unique_lock& pl);
bool _probed(Probe *p, const object_t& oid, uint64_t size,
ceph::real_time mtime, Probe::unique_lock& pl);
public:
Filer(const Filer& other);
const Filer operator=(const Filer& other);
Filer(Objecter *o, Finisher *f) : cct(o->cct), objecter(o), finisher(f) {}
~Filer() {}
bool is_active() {
return objecter->is_active(); // || (oc && oc->is_active());
}
/*** async file interface. scatter/gather as needed. ***/
void read(inodeno_t ino,
const file_layout_t *layout,
snapid_t snap,
uint64_t offset,
uint64_t len,
ceph::buffer::list *bl, // ptr to data
int flags,
Context *onfinish,
int op_flags = 0) {
ceph_assert(snap); // (until there is a non-NOSNAP write)
std::vector<ObjectExtent> extents;
Striper::file_to_extents(cct, ino, layout, offset, len, 0, extents);
objecter->sg_read(extents, snap, bl, flags, onfinish, op_flags);
}
void read_trunc(inodeno_t ino,
const file_layout_t *layout,
snapid_t snap,
uint64_t offset,
uint64_t len,
ceph::buffer::list *bl, // ptr to data
int flags,
uint64_t truncate_size,
__u32 truncate_seq,
Context *onfinish,
int op_flags = 0) {
ceph_assert(snap); // (until there is a non-NOSNAP write)
std::vector<ObjectExtent> extents;
Striper::file_to_extents(cct, ino, layout, offset, len, truncate_size,
extents);
objecter->sg_read_trunc(extents, snap, bl, flags,
truncate_size, truncate_seq, onfinish, op_flags);
}
void write(inodeno_t ino,
const file_layout_t *layout,
const SnapContext& snapc,
uint64_t offset,
uint64_t len,
ceph::buffer::list& bl,
ceph::real_time mtime,
int flags,
Context *oncommit,
int op_flags = 0) {
std::vector<ObjectExtent> extents;
Striper::file_to_extents(cct, ino, layout, offset, len, 0, extents);
objecter->sg_write(extents, snapc, bl, mtime, flags, oncommit, op_flags);
}
void write_trunc(inodeno_t ino,
const file_layout_t *layout,
const SnapContext& snapc,
uint64_t offset,
uint64_t len,
ceph::buffer::list& bl,
ceph::real_time mtime,
int flags,
uint64_t truncate_size,
__u32 truncate_seq,
Context *oncommit,
int op_flags = 0) {
std::vector<ObjectExtent> extents;
Striper::file_to_extents(cct, ino, layout, offset, len, truncate_size,
extents);
objecter->sg_write_trunc(extents, snapc, bl, mtime, flags,
truncate_size, truncate_seq, oncommit, op_flags);
}
void truncate(inodeno_t ino,
const file_layout_t *layout,
const SnapContext& snapc,
uint64_t offset,
uint64_t len,
__u32 truncate_seq,
ceph::real_time mtime,
int flags,
Context *oncommit);
void _do_truncate_range(struct TruncRange *pr, int fin);
void zero(inodeno_t ino,
const file_layout_t *layout,
const SnapContext& snapc,
uint64_t offset,
uint64_t len,
ceph::real_time mtime,
int flags,
bool keep_first,
Context *oncommit) {
std::vector<ObjectExtent> extents;
Striper::file_to_extents(cct, ino, layout, offset, len, 0, extents);
if (extents.size() == 1) {
if (extents[0].offset == 0 && extents[0].length == layout->object_size
&& (!keep_first || extents[0].objectno != 0))
objecter->remove(extents[0].oid, extents[0].oloc,
snapc, mtime, flags, oncommit);
else
objecter->zero(extents[0].oid, extents[0].oloc, extents[0].offset,
extents[0].length, snapc, mtime, flags, oncommit);
} else {
C_GatherBuilder gcom(cct, oncommit);
for (auto p = extents.begin(); p != extents.end(); ++p) {
if (p->offset == 0 && p->length == layout->object_size &&
(!keep_first || p->objectno != 0))
objecter->remove(p->oid, p->oloc,
snapc, mtime, flags,
oncommit ? gcom.new_sub():0);
else
objecter->zero(p->oid, p->oloc, p->offset, p->length,
snapc, mtime, flags,
oncommit ? gcom.new_sub():0);
}
gcom.activate();
}
}
void zero(inodeno_t ino,
const file_layout_t *layout,
const SnapContext& snapc,
uint64_t offset,
uint64_t len,
ceph::real_time mtime,
int flags,
Context *oncommit) {
zero(ino, layout,
snapc, offset,
len, mtime,
flags, false,
oncommit);
}
// purge range of ino.### objects
int purge_range(inodeno_t ino,
const file_layout_t *layout,
const SnapContext& snapc,
uint64_t first_obj, uint64_t num_obj,
ceph::real_time mtime,
int flags, Context *oncommit);
void _do_purge_range(struct PurgeRange *pr, int fin, int err);
/*
* probe
* specify direction,
* and whether we stop when we find data, or hole.
*/
int probe(inodeno_t ino,
const file_layout_t *layout,
snapid_t snapid,
uint64_t start_from,
uint64_t *end,
ceph::real_time *mtime,
bool fwd,
int flags,
Context *onfinish);
int probe(inodeno_t ino,
const file_layout_t *layout,
snapid_t snapid,
uint64_t start_from,
uint64_t *end,
bool fwd,
int flags,
Context *onfinish) {
return probe(ino, layout, snapid, start_from, end,
(ceph::real_time* )0, fwd, flags, onfinish);
}
int probe(inodeno_t ino,
const file_layout_t *layout,
snapid_t snapid,
uint64_t start_from,
uint64_t *end,
utime_t *mtime,
bool fwd,
int flags,
Context *onfinish);
private:
int probe_impl(Probe* probe, const file_layout_t *layout,
uint64_t start_from, uint64_t *end);
};
#endif // !CEPH_FILER_H
| 7,905 | 25.265781 | 77 | h |
null | ceph-main/src/osdc/Journaler.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include "common/perf_counters.h"
#include "common/dout.h"
#include "include/Context.h"
#include "msg/Messenger.h"
#include "osdc/Journaler.h"
#include "common/errno.h"
#include "include/ceph_assert.h"
#include "common/Finisher.h"
#define dout_subsys ceph_subsys_journaler
#undef dout_prefix
#define dout_prefix *_dout << objecter->messenger->get_myname() \
<< ".journaler." << name << (readonly ? "(ro) ":"(rw) ")
using namespace std;
using std::chrono::seconds;
class Journaler::C_DelayFlush : public Context {
Journaler *journaler;
public:
explicit C_DelayFlush(Journaler *j) : journaler(j) {}
void finish(int r) override {
journaler->_do_delayed_flush();
}
};
void Journaler::set_readonly()
{
lock_guard l(lock);
ldout(cct, 1) << "set_readonly" << dendl;
readonly = true;
}
void Journaler::set_writeable()
{
lock_guard l(lock);
ldout(cct, 1) << "set_writeable" << dendl;
readonly = false;
}
void Journaler::create(file_layout_t *l, stream_format_t const sf)
{
lock_guard lk(lock);
ceph_assert(!readonly);
state = STATE_ACTIVE;
stream_format = sf;
journal_stream.set_format(sf);
_set_layout(l);
prezeroing_pos = prezero_pos = write_pos = flush_pos =
safe_pos = read_pos = requested_pos = received_pos =
expire_pos = trimming_pos = trimmed_pos =
next_safe_pos = layout.get_period();
ldout(cct, 1) << "created blank journal at inode 0x" << std::hex << ino
<< std::dec << ", format=" << stream_format << dendl;
}
void Journaler::set_layout(file_layout_t const *l)
{
lock_guard lk(lock);
_set_layout(l);
}
void Journaler::_set_layout(file_layout_t const *l)
{
layout = *l;
if (layout.pool_id != pg_pool) {
// user can reset pool id through cephfs-journal-tool
lderr(cct) << "may got older pool id from header layout" << dendl;
ceph_abort();
}
last_written.layout = layout;
last_committed.layout = layout;
// prefetch intelligently.
// (watch out, this is big if you use big objects or weird striping)
uint64_t periods = cct->_conf.get_val<uint64_t>("journaler_prefetch_periods");
fetch_len = layout.get_period() * periods;
}
/***************** HEADER *******************/
ostream& operator<<(ostream &out, const Journaler::Header &h)
{
return out << "loghead(trim " << h.trimmed_pos
<< ", expire " << h.expire_pos
<< ", write " << h.write_pos
<< ", stream_format " << (int)(h.stream_format)
<< ")";
}
class Journaler::C_ReadHead : public Context {
Journaler *ls;
public:
bufferlist bl;
explicit C_ReadHead(Journaler *l) : ls(l) {}
void finish(int r) override {
ls->_finish_read_head(r, bl);
}
};
class Journaler::C_RereadHead : public Context {
Journaler *ls;
Context *onfinish;
public:
bufferlist bl;
C_RereadHead(Journaler *l, Context *onfinish_) : ls (l),
onfinish(onfinish_) {}
void finish(int r) override {
ls->_finish_reread_head(r, bl, onfinish);
}
};
class Journaler::C_ProbeEnd : public Context {
Journaler *ls;
public:
uint64_t end;
explicit C_ProbeEnd(Journaler *l) : ls(l), end(-1) {}
void finish(int r) override {
ls->_finish_probe_end(r, end);
}
};
class Journaler::C_ReProbe : public Context {
Journaler *ls;
C_OnFinisher *onfinish;
public:
uint64_t end;
C_ReProbe(Journaler *l, C_OnFinisher *onfinish_) :
ls(l), onfinish(onfinish_), end(0) {}
void finish(int r) override {
ls->_finish_reprobe(r, end, onfinish);
}
};
void Journaler::recover(Context *onread)
{
lock_guard l(lock);
if (is_stopping()) {
onread->complete(-EAGAIN);
return;
}
ldout(cct, 1) << "recover start" << dendl;
ceph_assert(state != STATE_ACTIVE);
ceph_assert(readonly);
if (onread)
waitfor_recover.push_back(wrap_finisher(onread));
if (state != STATE_UNDEF) {
ldout(cct, 1) << "recover - already recovering" << dendl;
return;
}
ldout(cct, 1) << "read_head" << dendl;
state = STATE_READHEAD;
C_ReadHead *fin = new C_ReadHead(this);
_read_head(fin, &fin->bl);
}
void Journaler::_read_head(Context *on_finish, bufferlist *bl)
{
// lock is locked
ceph_assert(state == STATE_READHEAD || state == STATE_REREADHEAD);
object_t oid = file_object_t(ino, 0);
object_locator_t oloc(pg_pool);
objecter->read_full(oid, oloc, CEPH_NOSNAP, bl, 0, wrap_finisher(on_finish));
}
void Journaler::reread_head(Context *onfinish)
{
lock_guard l(lock);
_reread_head(wrap_finisher(onfinish));
}
/**
* Re-read the head from disk, and set the write_pos, expire_pos, trimmed_pos
* from the on-disk header. This switches the state to STATE_REREADHEAD for
* the duration, and you shouldn't start a re-read while other operations are
* in-flight, nor start other operations while a re-read is in progress.
* Also, don't call this until the Journaler has finished its recovery and has
* gone STATE_ACTIVE!
*/
void Journaler::_reread_head(Context *onfinish)
{
ldout(cct, 10) << "reread_head" << dendl;
ceph_assert(state == STATE_ACTIVE);
state = STATE_REREADHEAD;
C_RereadHead *fin = new C_RereadHead(this, onfinish);
_read_head(fin, &fin->bl);
}
void Journaler::_finish_reread_head(int r, bufferlist& bl, Context *finish)
{
lock_guard l(lock);
if (is_stopping()) {
finish->complete(-EAGAIN);
return;
}
//read on-disk header into
ceph_assert(bl.length() || r < 0 );
// unpack header
if (r == 0) {
Header h;
auto p = bl.cbegin();
try {
decode(h, p);
} catch (const buffer::error &e) {
finish->complete(-EINVAL);
return;
}
prezeroing_pos = prezero_pos = write_pos = flush_pos = safe_pos = next_safe_pos
= h.write_pos;
expire_pos = h.expire_pos;
trimmed_pos = trimming_pos = h.trimmed_pos;
init_headers(h);
state = STATE_ACTIVE;
}
finish->complete(r);
}
void Journaler::_finish_read_head(int r, bufferlist& bl)
{
lock_guard l(lock);
if (is_stopping())
return;
ceph_assert(state == STATE_READHEAD);
if (r!=0) {
ldout(cct, 0) << "error getting journal off disk" << dendl;
list<Context*> ls;
ls.swap(waitfor_recover);
finish_contexts(cct, ls, r);
return;
}
if (bl.length() == 0) {
ldout(cct, 1) << "_finish_read_head r=" << r
<< " read 0 bytes, assuming empty log" << dendl;
state = STATE_ACTIVE;
list<Context*> ls;
ls.swap(waitfor_recover);
finish_contexts(cct, ls, 0);
return;
}
// unpack header
bool corrupt = false;
Header h;
auto p = bl.cbegin();
try {
decode(h, p);
if (h.magic != magic) {
ldout(cct, 0) << "on disk magic '" << h.magic << "' != my magic '"
<< magic << "'" << dendl;
corrupt = true;
} else if (h.write_pos < h.expire_pos || h.expire_pos < h.trimmed_pos) {
ldout(cct, 0) << "Corrupt header (bad offsets): " << h << dendl;
corrupt = true;
}
} catch (const buffer::error &e) {
corrupt = true;
}
if (corrupt) {
list<Context*> ls;
ls.swap(waitfor_recover);
finish_contexts(cct, ls, -EINVAL);
return;
}
prezeroing_pos = prezero_pos = write_pos = flush_pos = safe_pos = next_safe_pos
= h.write_pos;
read_pos = requested_pos = received_pos = expire_pos = h.expire_pos;
trimmed_pos = trimming_pos = h.trimmed_pos;
init_headers(h);
_set_layout(&h.layout);
stream_format = h.stream_format;
journal_stream.set_format(h.stream_format);
ldout(cct, 1) << "_finish_read_head " << h
<< ". probing for end of log (from " << write_pos << ")..."
<< dendl;
C_ProbeEnd *fin = new C_ProbeEnd(this);
state = STATE_PROBING;
_probe(fin, &fin->end);
}
void Journaler::_probe(Context *finish, uint64_t *end)
{
// lock is locked
ldout(cct, 1) << "probing for end of the log" << dendl;
ceph_assert(state == STATE_PROBING || state == STATE_REPROBING);
// probe the log
filer.probe(ino, &layout, CEPH_NOSNAP,
write_pos, end, true, 0, wrap_finisher(finish));
}
void Journaler::_reprobe(C_OnFinisher *finish)
{
ldout(cct, 10) << "reprobe" << dendl;
ceph_assert(state == STATE_ACTIVE);
state = STATE_REPROBING;
C_ReProbe *fin = new C_ReProbe(this, finish);
_probe(fin, &fin->end);
}
void Journaler::_finish_reprobe(int r, uint64_t new_end,
C_OnFinisher *onfinish)
{
lock_guard l(lock);
if (is_stopping()) {
onfinish->complete(-EAGAIN);
return;
}
ceph_assert(new_end >= write_pos || r < 0);
ldout(cct, 1) << "_finish_reprobe new_end = " << new_end
<< " (header had " << write_pos << ")."
<< dendl;
prezeroing_pos = prezero_pos = write_pos = flush_pos = safe_pos = next_safe_pos = new_end;
state = STATE_ACTIVE;
onfinish->complete(r);
}
void Journaler::_finish_probe_end(int r, uint64_t end)
{
lock_guard l(lock);
if (is_stopping())
return;
ceph_assert(state == STATE_PROBING);
if (r < 0) { // error in probing
goto out;
}
if (((int64_t)end) == -1) {
end = write_pos;
ldout(cct, 1) << "_finish_probe_end write_pos = " << end << " (header had "
<< write_pos << "). log was empty. recovered." << dendl;
ceph_abort(); // hrm.
} else {
ceph_assert(end >= write_pos);
ldout(cct, 1) << "_finish_probe_end write_pos = " << end
<< " (header had " << write_pos << "). recovered."
<< dendl;
}
state = STATE_ACTIVE;
prezeroing_pos = prezero_pos = write_pos = flush_pos = safe_pos = next_safe_pos = end;
out:
// done.
list<Context*> ls;
ls.swap(waitfor_recover);
finish_contexts(cct, ls, r);
}
class Journaler::C_RereadHeadProbe : public Context
{
Journaler *ls;
C_OnFinisher *final_finish;
public:
C_RereadHeadProbe(Journaler *l, C_OnFinisher *finish) :
ls(l), final_finish(finish) {}
void finish(int r) override {
ls->_finish_reread_head_and_probe(r, final_finish);
}
};
void Journaler::reread_head_and_probe(Context *onfinish)
{
lock_guard l(lock);
ceph_assert(state == STATE_ACTIVE);
_reread_head(new C_RereadHeadProbe(this, wrap_finisher(onfinish)));
}
void Journaler::_finish_reread_head_and_probe(int r, C_OnFinisher *onfinish)
{
// Expect to be called back from finish_reread_head, which already takes lock
// lock is locked
if (is_stopping()) {
onfinish->complete(-EAGAIN);
return;
}
// Let the caller know that the operation has failed or was intentionally
// failed since the caller has been blocklisted.
if (r == -EBLOCKLISTED) {
onfinish->complete(r);
return;
}
ceph_assert(!r); //if we get an error, we're boned
_reprobe(onfinish);
}
// WRITING
class Journaler::C_WriteHead : public Context {
public:
Journaler *ls;
Header h;
C_OnFinisher *oncommit;
C_WriteHead(Journaler *l, Header& h_, C_OnFinisher *c) : ls(l), h(h_),
oncommit(c) {}
void finish(int r) override {
ls->_finish_write_head(r, h, oncommit);
}
};
void Journaler::write_head(Context *oncommit)
{
lock_guard l(lock);
_write_head(oncommit);
}
void Journaler::_write_head(Context *oncommit)
{
ceph_assert(!readonly);
ceph_assert(state == STATE_ACTIVE);
last_written.trimmed_pos = trimmed_pos;
last_written.expire_pos = expire_pos;
last_written.unused_field = expire_pos;
last_written.write_pos = safe_pos;
last_written.stream_format = stream_format;
ldout(cct, 10) << "write_head " << last_written << dendl;
// Avoid persisting bad pointers in case of bugs
ceph_assert(last_written.write_pos >= last_written.expire_pos);
ceph_assert(last_written.expire_pos >= last_written.trimmed_pos);
last_wrote_head = ceph::real_clock::now();
bufferlist bl;
encode(last_written, bl);
SnapContext snapc;
object_t oid = file_object_t(ino, 0);
object_locator_t oloc(pg_pool);
objecter->write_full(oid, oloc, snapc, bl, ceph::real_clock::now(), 0,
wrap_finisher(new C_WriteHead(
this, last_written,
wrap_finisher(oncommit))),
0, 0, write_iohint);
}
void Journaler::_finish_write_head(int r, Header &wrote,
C_OnFinisher *oncommit)
{
lock_guard l(lock);
if (r < 0) {
lderr(cct) << "_finish_write_head got " << cpp_strerror(r) << dendl;
handle_write_error(r);
return;
}
ceph_assert(!readonly);
ldout(cct, 10) << "_finish_write_head " << wrote << dendl;
last_committed = wrote;
if (oncommit) {
oncommit->complete(r);
}
_trim(); // trim?
}
/***************** WRITING *******************/
class Journaler::C_Flush : public Context {
Journaler *ls;
uint64_t start;
ceph::real_time stamp;
public:
C_Flush(Journaler *l, int64_t s, ceph::real_time st)
: ls(l), start(s), stamp(st) {}
void finish(int r) override {
ls->_finish_flush(r, start, stamp);
}
};
void Journaler::_finish_flush(int r, uint64_t start, ceph::real_time stamp)
{
lock_guard l(lock);
ceph_assert(!readonly);
if (r < 0) {
lderr(cct) << "_finish_flush got " << cpp_strerror(r) << dendl;
handle_write_error(r);
return;
}
ceph_assert(start < flush_pos);
// calc latency?
if (logger) {
ceph::timespan lat = ceph::real_clock::now() - stamp;
logger->tinc(logger_key_lat, lat);
}
// adjust safe_pos
auto it = pending_safe.find(start);
ceph_assert(it != pending_safe.end());
uint64_t min_next_safe_pos = pending_safe.begin()->second;
pending_safe.erase(it);
if (pending_safe.empty())
safe_pos = next_safe_pos;
else
safe_pos = min_next_safe_pos;
ldout(cct, 10) << "_finish_flush safe from " << start
<< ", pending_safe " << pending_safe
<< ", (prezeroing/prezero)/write/flush/safe positions now "
<< "(" << prezeroing_pos << "/" << prezero_pos << ")/"
<< write_pos << "/" << flush_pos << "/" << safe_pos
<< dendl;
// kick waiters <= safe_pos
if (!waitfor_safe.empty()) {
list<Context*> ls;
while (!waitfor_safe.empty()) {
auto it = waitfor_safe.begin();
if (it->first > safe_pos)
break;
ls.splice(ls.end(), it->second);
waitfor_safe.erase(it);
}
finish_contexts(cct, ls);
}
}
uint64_t Journaler::append_entry(bufferlist& bl)
{
unique_lock l(lock);
ceph_assert(!readonly);
uint32_t s = bl.length();
// append
size_t delta = bl.length() + journal_stream.get_envelope_size();
// write_buf space is nearly full
if (!write_buf_throttle.get_or_fail(delta)) {
l.unlock();
ldout(cct, 10) << "write_buf_throttle wait, delta " << delta << dendl;
write_buf_throttle.get(delta);
l.lock();
}
ldout(cct, 20) << "write_buf_throttle get, delta " << delta << dendl;
size_t wrote = journal_stream.write(bl, &write_buf, write_pos);
ldout(cct, 10) << "append_entry len " << s << " to " << write_pos << "~"
<< wrote << dendl;
write_pos += wrote;
// flush previous object?
uint64_t su = get_layout_period();
ceph_assert(su > 0);
uint64_t write_off = write_pos % su;
uint64_t write_obj = write_pos / su;
uint64_t flush_obj = flush_pos / su;
if (write_obj != flush_obj) {
ldout(cct, 10) << " flushing completed object(s) (su " << su << " wro "
<< write_obj << " flo " << flush_obj << ")" << dendl;
_do_flush(write_buf.length() - write_off);
// if _do_flush() skips flushing some data, it does do a best effort to
// update next_safe_pos.
if (write_buf.length() > 0 &&
write_buf.length() <= wrote) { // the unflushed data are within this entry
// set next_safe_pos to end of previous entry
next_safe_pos = write_pos - wrote;
}
}
return write_pos;
}
void Journaler::_do_flush(unsigned amount)
{
if (is_stopping())
return;
if (write_pos == flush_pos)
return;
ceph_assert(write_pos > flush_pos);
ceph_assert(!readonly);
// flush
uint64_t len = write_pos - flush_pos;
ceph_assert(len == write_buf.length());
if (amount && amount < len)
len = amount;
// zero at least two full periods ahead. this ensures
// that the next object will not exist.
uint64_t period = get_layout_period();
if (flush_pos + len + 2*period > prezero_pos) {
_issue_prezero();
int64_t newlen = prezero_pos - flush_pos - period;
if (newlen <= 0) {
ldout(cct, 10) << "_do_flush wanted to do " << flush_pos << "~" << len
<< " already too close to prezero_pos " << prezero_pos
<< ", zeroing first" << dendl;
waiting_for_zero_pos = flush_pos + len;
return;
}
if (static_cast<uint64_t>(newlen) < len) {
ldout(cct, 10) << "_do_flush wanted to do " << flush_pos << "~" << len
<< " but hit prezero_pos " << prezero_pos
<< ", will do " << flush_pos << "~" << newlen << dendl;
waiting_for_zero_pos = flush_pos + len;
len = newlen;
}
}
ldout(cct, 10) << "_do_flush flushing " << flush_pos << "~" << len << dendl;
// submit write for anything pending
// flush _start_ pos to _finish_flush
ceph::real_time now = ceph::real_clock::now();
SnapContext snapc;
Context *onsafe = new C_Flush(this, flush_pos, now); // on COMMIT
pending_safe[flush_pos] = next_safe_pos;
bufferlist write_bl;
// adjust pointers
if (len == write_buf.length()) {
write_bl.swap(write_buf);
next_safe_pos = write_pos;
} else {
write_buf.splice(0, len, &write_bl);
// Keys of waitfor_safe map are journal entry boundaries.
// Try finding a journal entry that we are actually flushing
// and set next_safe_pos to end of it. This is best effort.
// The one we found may not be the lastest flushing entry.
auto p = waitfor_safe.lower_bound(flush_pos + len);
if (p != waitfor_safe.end()) {
if (p->first > flush_pos + len && p != waitfor_safe.begin())
--p;
if (p->first <= flush_pos + len && p->first > next_safe_pos)
next_safe_pos = p->first;
}
}
filer.write(ino, &layout, snapc,
flush_pos, len, write_bl, ceph::real_clock::now(),
0,
wrap_finisher(onsafe), write_iohint);
flush_pos += len;
ceph_assert(write_buf.length() == write_pos - flush_pos);
write_buf_throttle.put(len);
ldout(cct, 20) << "write_buf_throttle put, len " << len << dendl;
ldout(cct, 10)
<< "_do_flush (prezeroing/prezero)/write/flush/safe pointers now at "
<< "(" << prezeroing_pos << "/" << prezero_pos << ")/" << write_pos
<< "/" << flush_pos << "/" << safe_pos << dendl;
_issue_prezero();
}
void Journaler::wait_for_flush(Context *onsafe)
{
lock_guard l(lock);
if (is_stopping()) {
if (onsafe)
onsafe->complete(-EAGAIN);
return;
}
_wait_for_flush(onsafe);
}
void Journaler::_wait_for_flush(Context *onsafe)
{
ceph_assert(!readonly);
// all flushed and safe?
if (write_pos == safe_pos) {
ceph_assert(write_buf.length() == 0);
ldout(cct, 10)
<< "flush nothing to flush, (prezeroing/prezero)/write/flush/safe "
"pointers at " << "(" << prezeroing_pos << "/" << prezero_pos << ")/"
<< write_pos << "/" << flush_pos << "/" << safe_pos << dendl;
if (onsafe) {
finisher->queue(onsafe, 0);
}
return;
}
// queue waiter
if (onsafe) {
waitfor_safe[write_pos].push_back(wrap_finisher(onsafe));
}
}
void Journaler::flush(Context *onsafe)
{
lock_guard l(lock);
if (is_stopping()) {
if (onsafe)
onsafe->complete(-EAGAIN);
return;
}
_flush(wrap_finisher(onsafe));
}
void Journaler::_flush(C_OnFinisher *onsafe)
{
ceph_assert(!readonly);
if (write_pos == flush_pos) {
ceph_assert(write_buf.length() == 0);
ldout(cct, 10) << "flush nothing to flush, (prezeroing/prezero)/write/"
"flush/safe pointers at " << "(" << prezeroing_pos << "/" << prezero_pos
<< ")/" << write_pos << "/" << flush_pos << "/" << safe_pos
<< dendl;
if (onsafe) {
onsafe->complete(0);
}
} else {
_do_flush();
_wait_for_flush(onsafe);
}
// write head?
if (_write_head_needed()) {
_write_head();
}
}
bool Journaler::_write_head_needed()
{
return last_wrote_head + seconds(cct->_conf.get_val<int64_t>("journaler_write_head_interval"))
< ceph::real_clock::now();
}
/*************** prezeroing ******************/
struct C_Journaler_Prezero : public Context {
Journaler *journaler;
uint64_t from, len;
C_Journaler_Prezero(Journaler *j, uint64_t f, uint64_t l)
: journaler(j), from(f), len(l) {}
void finish(int r) override {
journaler->_finish_prezero(r, from, len);
}
};
void Journaler::_issue_prezero()
{
ceph_assert(prezeroing_pos >= flush_pos);
uint64_t num_periods = cct->_conf.get_val<uint64_t>("journaler_prezero_periods");
/*
* issue zero requests based on write_pos, even though the invariant
* is that we zero ahead of flush_pos.
*/
uint64_t period = get_layout_period();
uint64_t to = write_pos + period * num_periods + period - 1;
to -= to % period;
if (prezeroing_pos >= to) {
ldout(cct, 20) << "_issue_prezero target " << to << " <= prezeroing_pos "
<< prezeroing_pos << dendl;
return;
}
while (prezeroing_pos < to) {
uint64_t len;
if (prezeroing_pos % period == 0) {
len = period;
ldout(cct, 10) << "_issue_prezero removing " << prezeroing_pos << "~"
<< period << " (full period)" << dendl;
} else {
len = period - (prezeroing_pos % period);
ldout(cct, 10) << "_issue_prezero zeroing " << prezeroing_pos << "~"
<< len << " (partial period)" << dendl;
}
SnapContext snapc;
Context *c = wrap_finisher(new C_Journaler_Prezero(this, prezeroing_pos,
len));
filer.zero(ino, &layout, snapc, prezeroing_pos, len,
ceph::real_clock::now(), 0, c);
prezeroing_pos += len;
}
}
// Lock cycle because we get called out of objecter callback (holding
// objecter read lock), but there are also cases where we take the journaler
// lock before calling into objecter to do I/O.
void Journaler::_finish_prezero(int r, uint64_t start, uint64_t len)
{
lock_guard l(lock);
ldout(cct, 10) << "_prezeroed to " << start << "~" << len
<< ", prezeroing/prezero was " << prezeroing_pos << "/"
<< prezero_pos << ", pending " << pending_zero
<< dendl;
if (r < 0 && r != -ENOENT) {
lderr(cct) << "_prezeroed got " << cpp_strerror(r) << dendl;
handle_write_error(r);
return;
}
ceph_assert(r == 0 || r == -ENOENT);
if (start == prezero_pos) {
prezero_pos += len;
while (!pending_zero.empty() &&
pending_zero.begin().get_start() == prezero_pos) {
interval_set<uint64_t>::iterator b(pending_zero.begin());
prezero_pos += b.get_len();
pending_zero.erase(b);
}
if (waiting_for_zero_pos > flush_pos) {
_do_flush(waiting_for_zero_pos - flush_pos);
}
if (prezero_pos == prezeroing_pos &&
!waitfor_prezero.empty()) {
list<Context*> ls;
ls.swap(waitfor_prezero);
finish_contexts(cct, ls, 0);
}
} else {
pending_zero.insert(start, len);
}
ldout(cct, 10) << "_prezeroed prezeroing/prezero now " << prezeroing_pos
<< "/" << prezero_pos
<< ", pending " << pending_zero
<< dendl;
}
void Journaler::wait_for_prezero(Context *onfinish)
{
ceph_assert(onfinish);
lock_guard l(lock);
if (prezero_pos == prezeroing_pos) {
finisher->queue(onfinish, 0);
return;
}
waitfor_prezero.push_back(wrap_finisher(onfinish));
}
/***************** READING *******************/
class Journaler::C_Read : public Context {
Journaler *ls;
uint64_t offset;
uint64_t length;
public:
bufferlist bl;
C_Read(Journaler *j, uint64_t o, uint64_t l) : ls(j), offset(o), length(l) {}
void finish(int r) override {
ls->_finish_read(r, offset, length, bl);
}
};
class Journaler::C_RetryRead : public Context {
Journaler *ls;
public:
explicit C_RetryRead(Journaler *l) : ls(l) {}
void finish(int r) override {
// Should only be called from waitfor_safe i.e. already inside lock
// (ls->lock is locked
ls->_prefetch();
}
};
void Journaler::_finish_read(int r, uint64_t offset, uint64_t length,
bufferlist& bl)
{
lock_guard l(lock);
if (r < 0) {
ldout(cct, 0) << "_finish_read got error " << r << dendl;
error = r;
} else {
ldout(cct, 10) << "_finish_read got " << offset << "~" << bl.length()
<< dendl;
if (bl.length() < length) {
ldout(cct, 0) << "_finish_read got less than expected (" << length << ")"
<< dendl;
error = -EINVAL;
}
}
if (error) {
if (on_readable) {
C_OnFinisher *f = on_readable;
on_readable = 0;
f->complete(error);
}
return;
}
prefetch_buf[offset].swap(bl);
try {
_assimilate_prefetch();
} catch (const buffer::error &err) {
lderr(cct) << "_decode error from assimilate_prefetch" << dendl;
error = -EINVAL;
if (on_readable) {
C_OnFinisher *f = on_readable;
on_readable = 0;
f->complete(error);
}
return;
}
_prefetch();
}
void Journaler::_assimilate_prefetch()
{
bool was_readable = readable;
bool got_any = false;
while (!prefetch_buf.empty()) {
map<uint64_t,bufferlist>::iterator p = prefetch_buf.begin();
if (p->first != received_pos) {
uint64_t gap = p->first - received_pos;
ldout(cct, 10) << "_assimilate_prefetch gap of " << gap
<< " from received_pos " << received_pos
<< " to first prefetched buffer " << p->first << dendl;
break;
}
ldout(cct, 10) << "_assimilate_prefetch " << p->first << "~"
<< p->second.length() << dendl;
received_pos += p->second.length();
read_buf.claim_append(p->second);
ceph_assert(received_pos <= requested_pos);
prefetch_buf.erase(p);
got_any = true;
}
if (got_any) {
ldout(cct, 10) << "_assimilate_prefetch read_buf now " << read_pos << "~"
<< read_buf.length() << ", read pointers read_pos=" << read_pos
<< " received_pos=" << received_pos << " requested_pos=" << requested_pos
<< dendl;
// Update readability (this will also hit any decode errors resulting
// from bad data)
readable = _have_next_entry();
}
if ((got_any && !was_readable && readable) || read_pos == write_pos) {
// readable!
ldout(cct, 10) << "_finish_read now readable (or at journal end) readable="
<< readable << " read_pos=" << read_pos << " write_pos="
<< write_pos << dendl;
if (on_readable) {
C_OnFinisher *f = on_readable;
on_readable = 0;
f->complete(0);
}
}
}
void Journaler::_issue_read(uint64_t len)
{
// stuck at safe_pos? (this is needed if we are reading the tail of
// a journal we are also writing to)
ceph_assert(requested_pos <= safe_pos);
if (requested_pos == safe_pos) {
ldout(cct, 10) << "_issue_read requested_pos = safe_pos = " << safe_pos
<< ", waiting" << dendl;
ceph_assert(write_pos > requested_pos);
if (pending_safe.empty()) {
_flush(NULL);
}
// Make sure keys of waitfor_safe map are journal entry boundaries.
// The key we used here is either next_safe_pos or old value of
// next_safe_pos. next_safe_pos is always set to journal entry
// boundary.
auto p = pending_safe.rbegin();
if (p != pending_safe.rend())
waitfor_safe[p->second].push_back(new C_RetryRead(this));
else
waitfor_safe[next_safe_pos].push_back(new C_RetryRead(this));
return;
}
// don't read too much
if (requested_pos + len > safe_pos) {
len = safe_pos - requested_pos;
ldout(cct, 10) << "_issue_read reading only up to safe_pos " << safe_pos
<< dendl;
}
// go.
ldout(cct, 10) << "_issue_read reading " << requested_pos << "~" << len
<< ", read pointers read_pos=" << read_pos << " received_pos=" << received_pos
<< " requested_pos+len=" << (requested_pos+len) << dendl;
// step by period (object). _don't_ do a single big filer.read()
// here because it will wait for all object reads to complete before
// giving us back any data. this way we can process whatever bits
// come in that are contiguous.
uint64_t period = get_layout_period();
while (len > 0) {
uint64_t e = requested_pos + period;
e -= e % period;
uint64_t l = e - requested_pos;
if (l > len)
l = len;
C_Read *c = new C_Read(this, requested_pos, l);
filer.read(ino, &layout, CEPH_NOSNAP, requested_pos, l, &c->bl, 0,
wrap_finisher(c), CEPH_OSD_OP_FLAG_FADVISE_DONTNEED);
requested_pos += l;
len -= l;
}
}
void Journaler::_prefetch()
{
if (is_stopping())
return;
ldout(cct, 10) << "_prefetch" << dendl;
// prefetch
uint64_t pf;
if (temp_fetch_len) {
ldout(cct, 10) << "_prefetch temp_fetch_len " << temp_fetch_len << dendl;
pf = temp_fetch_len;
temp_fetch_len = 0;
} else {
pf = fetch_len;
}
uint64_t raw_target = read_pos + pf;
// read full log segments, so increase if necessary
uint64_t period = get_layout_period();
uint64_t remainder = raw_target % period;
uint64_t adjustment = remainder ? period - remainder : 0;
uint64_t target = raw_target + adjustment;
// don't read past the log tail
if (target > write_pos)
target = write_pos;
if (requested_pos < target) {
uint64_t len = target - requested_pos;
ldout(cct, 10) << "_prefetch " << pf << " requested_pos " << requested_pos
<< " < target " << target << " (" << raw_target
<< "), prefetching " << len << dendl;
if (pending_safe.empty() && write_pos > safe_pos) {
// If we are reading and writing the journal, then we may need
// to issue a flush if one isn't already in progress.
// Avoid doing a flush every time so that if we do write/read/write/read
// we don't end up flushing after every write.
ldout(cct, 10) << "_prefetch: requested_pos=" << requested_pos
<< ", read_pos=" << read_pos
<< ", write_pos=" << write_pos
<< ", safe_pos=" << safe_pos << dendl;
_do_flush();
}
_issue_read(len);
}
}
/*
* _have_next_entry() - return true if next entry is ready.
*/
bool Journaler::_have_next_entry()
{
// anything to read?
if (read_pos == write_pos)
return false;
// Check if the retrieve bytestream has enough for an entry
uint64_t need;
if (journal_stream.readable(read_buf, &need)) {
return true;
}
ldout (cct, 10) << "_have_next_entry read_buf.length() == " << read_buf.length()
<< ", but need " << need << " for next entry; fetch_len is "
<< fetch_len << dendl;
// partial fragment at the end?
if (received_pos == write_pos) {
ldout(cct, 10) << "_have_next_entry() detected partial entry at tail, "
"adjusting write_pos to " << read_pos << dendl;
// adjust write_pos
prezeroing_pos = prezero_pos = write_pos = flush_pos = safe_pos = next_safe_pos = read_pos;
ceph_assert(write_buf.length() == 0);
ceph_assert(waitfor_safe.empty());
// reset read state
requested_pos = received_pos = read_pos;
read_buf.clear();
// FIXME: truncate on disk?
return false;
}
if (need > fetch_len) {
temp_fetch_len = need;
ldout(cct, 10) << "_have_next_entry noting temp_fetch_len " << temp_fetch_len
<< dendl;
}
ldout(cct, 10) << "_have_next_entry: not readable, returning false" << dendl;
return false;
}
/*
* is_readable() - kickstart prefetch, too
*/
bool Journaler::is_readable()
{
lock_guard l(lock);
return _is_readable();
}
bool Journaler::_is_readable()
{
if (error != 0) {
return false;
}
bool r = readable;
_prefetch();
return r;
}
class Journaler::C_EraseFinish : public Context {
Journaler *journaler;
C_OnFinisher *completion;
public:
C_EraseFinish(Journaler *j, C_OnFinisher *c) : journaler(j), completion(c) {}
void finish(int r) override {
journaler->_finish_erase(r, completion);
}
};
/**
* Entirely erase the journal, including header. For use when you
* have already made a copy of the journal somewhere else.
*/
void Journaler::erase(Context *completion)
{
lock_guard l(lock);
// Async delete the journal data
uint64_t first = trimmed_pos / get_layout_period();
uint64_t num = (write_pos - trimmed_pos) / get_layout_period() + 2;
filer.purge_range(ino, &layout, SnapContext(), first, num,
ceph::real_clock::now(), 0,
wrap_finisher(new C_EraseFinish(
this, wrap_finisher(completion))));
// We will not start the operation to delete the header until
// _finish_erase has seen the data deletion succeed: otherwise if
// there was an error deleting data we might prematurely delete the
// header thereby lose our reference to the data.
}
void Journaler::_finish_erase(int data_result, C_OnFinisher *completion)
{
lock_guard l(lock);
if (is_stopping()) {
completion->complete(-EAGAIN);
return;
}
if (data_result == 0) {
// Async delete the journal header
filer.purge_range(ino, &layout, SnapContext(), 0, 1,
ceph::real_clock::now(),
0, wrap_finisher(completion));
} else {
lderr(cct) << "Failed to delete journal " << ino << " data: "
<< cpp_strerror(data_result) << dendl;
completion->complete(data_result);
}
}
/* try_read_entry(bl)
* read entry into bl if it's ready.
* otherwise, do nothing.
*/
bool Journaler::try_read_entry(bufferlist& bl)
{
lock_guard l(lock);
if (!readable) {
ldout(cct, 10) << "try_read_entry at " << read_pos << " not readable"
<< dendl;
return false;
}
uint64_t start_ptr;
size_t consumed;
try {
consumed = journal_stream.read(read_buf, &bl, &start_ptr);
if (stream_format >= JOURNAL_FORMAT_RESILIENT) {
ceph_assert(start_ptr == read_pos);
}
} catch (const buffer::error &e) {
lderr(cct) << __func__ << ": decode error from journal_stream" << dendl;
error = -EINVAL;
return false;
}
ldout(cct, 10) << "try_read_entry at " << read_pos << " read "
<< read_pos << "~" << consumed << " (have "
<< read_buf.length() << ")" << dendl;
read_pos += consumed;
try {
// We were readable, we might not be any more
readable = _have_next_entry();
} catch (const buffer::error &e) {
lderr(cct) << __func__ << ": decode error from _have_next_entry" << dendl;
error = -EINVAL;
return false;
}
// prefetch?
_prefetch();
// If bufferlist consists of discontiguous memory, decoding types whose
// denc_traits needs contiguous memory is inefficient. The bufferlist may
// get copied to temporary memory multiple times (copy_shallow() in
// src/include/denc.h actually does deep copy)
if (bl.get_num_buffers() > 1)
bl.rebuild();
return true;
}
void Journaler::wait_for_readable(Context *onreadable)
{
lock_guard l(lock);
_wait_for_readable(onreadable);
}
void Journaler::_wait_for_readable(Context *onreadable)
{
if (is_stopping()) {
finisher->queue(onreadable, -EAGAIN);
return;
}
ceph_assert(on_readable == 0);
if (!readable) {
ldout(cct, 10) << "wait_for_readable at " << read_pos << " onreadable "
<< onreadable << dendl;
on_readable = wrap_finisher(onreadable);
} else {
// race with OSD reply
finisher->queue(onreadable, 0);
}
}
bool Journaler::have_waiter() const
{
return on_readable != nullptr;
}
/***************** TRIMMING *******************/
class Journaler::C_Trim : public Context {
Journaler *ls;
uint64_t to;
public:
C_Trim(Journaler *l, int64_t t) : ls(l), to(t) {}
void finish(int r) override {
ls->_finish_trim(r, to);
}
};
void Journaler::trim()
{
lock_guard l(lock);
_trim();
}
void Journaler::_trim()
{
if (is_stopping())
return;
ceph_assert(!readonly);
uint64_t period = get_layout_period();
uint64_t trim_to = last_committed.expire_pos;
trim_to -= trim_to % period;
ldout(cct, 10) << "trim last_commited head was " << last_committed
<< ", can trim to " << trim_to
<< dendl;
if (trim_to == 0 || trim_to == trimming_pos) {
ldout(cct, 10) << "trim already trimmed/trimming to "
<< trimmed_pos << "/" << trimming_pos << dendl;
return;
}
if (trimming_pos > trimmed_pos) {
ldout(cct, 10) << "trim already trimming atm, try again later. "
"trimmed/trimming is " << trimmed_pos << "/" << trimming_pos << dendl;
return;
}
// trim
ceph_assert(trim_to <= write_pos);
ceph_assert(trim_to <= expire_pos);
ceph_assert(trim_to > trimming_pos);
ldout(cct, 10) << "trim trimming to " << trim_to
<< ", trimmed/trimming/expire are "
<< trimmed_pos << "/" << trimming_pos << "/" << expire_pos
<< dendl;
// delete range of objects
uint64_t first = trimming_pos / period;
uint64_t num = (trim_to - trimming_pos) / period;
SnapContext snapc;
filer.purge_range(ino, &layout, snapc, first, num,
ceph::real_clock::now(), 0,
wrap_finisher(new C_Trim(this, trim_to)));
trimming_pos = trim_to;
}
void Journaler::_finish_trim(int r, uint64_t to)
{
lock_guard l(lock);
ceph_assert(!readonly);
ldout(cct, 10) << "_finish_trim trimmed_pos was " << trimmed_pos
<< ", trimmed/trimming/expire now "
<< to << "/" << trimming_pos << "/" << expire_pos
<< dendl;
if (r < 0 && r != -ENOENT) {
lderr(cct) << "_finish_trim got " << cpp_strerror(r) << dendl;
handle_write_error(r);
return;
}
ceph_assert(r >= 0 || r == -ENOENT);
ceph_assert(to <= trimming_pos);
ceph_assert(to > trimmed_pos);
trimmed_pos = to;
}
void Journaler::handle_write_error(int r)
{
// lock is locked
lderr(cct) << "handle_write_error " << cpp_strerror(r) << dendl;
if (on_write_error) {
on_write_error->complete(r);
on_write_error = NULL;
called_write_error = true;
} else if (called_write_error) {
/* We don't call error handler more than once, subsequent errors
* are dropped -- this is okay as long as the error handler does
* something dramatic like respawn */
lderr(cct) << __func__ << ": multiple write errors, handler already called"
<< dendl;
} else {
ceph_abort_msg("unhandled write error");
}
}
/**
* Test whether the 'read_buf' byte stream has enough data to read
* an entry
*
* sets 'next_envelope_size' to the number of bytes needed to advance (enough
* to get the next header if header was unavailable, or enough to get the whole
* next entry if the header was available but the body wasn't).
*/
bool JournalStream::readable(bufferlist &read_buf, uint64_t *need) const
{
ceph_assert(need != NULL);
uint32_t entry_size = 0;
uint64_t entry_sentinel = 0;
auto p = read_buf.cbegin();
// Do we have enough data to decode an entry prefix?
if (format >= JOURNAL_FORMAT_RESILIENT) {
*need = sizeof(entry_size) + sizeof(entry_sentinel);
} else {
*need = sizeof(entry_size);
}
if (read_buf.length() >= *need) {
if (format >= JOURNAL_FORMAT_RESILIENT) {
decode(entry_sentinel, p);
if (entry_sentinel != sentinel) {
throw buffer::malformed_input("Invalid sentinel");
}
}
decode(entry_size, p);
} else {
return false;
}
// Do we have enough data to decode an entry prefix, payload and suffix?
if (format >= JOURNAL_FORMAT_RESILIENT) {
*need = JOURNAL_ENVELOPE_RESILIENT + entry_size;
} else {
*need = JOURNAL_ENVELOPE_LEGACY + entry_size;
}
if (read_buf.length() >= *need) {
return true; // No more bytes needed
}
return false;
}
/**
* Consume one entry from a journal byte stream 'from', splicing a
* serialized LogEvent blob into 'entry'.
*
* 'entry' must be non null and point to an empty bufferlist.
*
* 'from' must contain sufficient valid data (i.e. readable is true).
*
* 'start_ptr' will be set to the entry's start pointer, if the collection
* format provides it. It may not be null.
*
* @returns The number of bytes consumed from the `from` byte stream. Note
* that this is not equal to the length of `entry`, which contains
* the inner serialized LogEvent and not the envelope.
*/
size_t JournalStream::read(bufferlist &from, bufferlist *entry,
uint64_t *start_ptr)
{
ceph_assert(start_ptr != NULL);
ceph_assert(entry != NULL);
ceph_assert(entry->length() == 0);
uint32_t entry_size = 0;
// Consume envelope prefix: entry_size and entry_sentinel
auto from_ptr = from.cbegin();
if (format >= JOURNAL_FORMAT_RESILIENT) {
uint64_t entry_sentinel = 0;
decode(entry_sentinel, from_ptr);
// Assertion instead of clean check because of precondition of this
// fn is that readable() already passed
ceph_assert(entry_sentinel == sentinel);
}
decode(entry_size, from_ptr);
// Read out the payload
from_ptr.copy(entry_size, *entry);
// Consume the envelope suffix (start_ptr)
if (format >= JOURNAL_FORMAT_RESILIENT) {
decode(*start_ptr, from_ptr);
} else {
*start_ptr = 0;
}
// Trim the input buffer to discard the bytes we have consumed
from.splice(0, from_ptr.get_off());
return from_ptr.get_off();
}
/**
* Append one entry
*/
size_t JournalStream::write(bufferlist &entry, bufferlist *to,
uint64_t const &start_ptr)
{
ceph_assert(to != NULL);
uint32_t const entry_size = entry.length();
if (format >= JOURNAL_FORMAT_RESILIENT) {
encode(sentinel, *to);
}
encode(entry_size, *to);
to->claim_append(entry);
if (format >= JOURNAL_FORMAT_RESILIENT) {
encode(start_ptr, *to);
}
if (format >= JOURNAL_FORMAT_RESILIENT) {
return JOURNAL_ENVELOPE_RESILIENT + entry_size;
} else {
return JOURNAL_ENVELOPE_LEGACY + entry_size;
}
}
/**
* set write error callback
*
* Set a callback/context to trigger if we get a write error from
* the objecter. This may be from an explicit request (e.g., flush)
* or something async the journaler did on its own (e.g., journal
* header update).
*
* It is only used once; if the caller continues to use the
* Journaler and wants to hear about errors, it needs to reset the
* error_handler.
*
* @param c callback/context to trigger on error
*/
void Journaler::set_write_error_handler(Context *c) {
lock_guard l(lock);
ceph_assert(!on_write_error);
on_write_error = wrap_finisher(c);
called_write_error = false;
}
/**
* Wrap a context in a C_OnFinisher, if it is non-NULL
*
* Utility function to avoid lots of error-prone and verbose
* NULL checking on contexts passed in.
*/
C_OnFinisher *Journaler::wrap_finisher(Context *c)
{
if (c != NULL) {
return new C_OnFinisher(c, finisher);
} else {
return NULL;
}
}
void Journaler::shutdown()
{
lock_guard l(lock);
ldout(cct, 1) << __func__ << dendl;
state = STATE_STOPPING;
readable = false;
// Kick out anyone reading from journal
error = -EAGAIN;
if (on_readable) {
C_OnFinisher *f = on_readable;
on_readable = 0;
f->complete(-EAGAIN);
}
list<Context*> ls;
ls.swap(waitfor_recover);
finish_contexts(cct, ls, -ESHUTDOWN);
std::map<uint64_t, std::list<Context*> >::iterator i;
for (i = waitfor_safe.begin(); i != waitfor_safe.end(); ++i) {
finish_contexts(cct, i->second, -EAGAIN);
}
waitfor_safe.clear();
}
void Journaler::check_isreadable()
{
std::unique_lock l(lock);
while (!_is_readable() &&
get_read_pos() < get_write_pos() &&
!get_error()) {
C_SaferCond readable_waiter;
_wait_for_readable(&readable_waiter);
l.unlock();
readable_waiter.wait();
l.lock();
}
return ;
}
| 43,617 | 25.726716 | 96 | cc |
null | ceph-main/src/osdc/Journaler.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
/* Journaler
*
* This class stripes a serial log over objects on the store. Four
* logical pointers:
*
* write_pos - where we're writing new entries
* unused_field - where we're reading old entires
* expire_pos - what is deemed "old" by user
* trimmed_pos - where we're expiring old items
*
* trimmed_pos <= expire_pos <= unused_field <= write_pos.
*
* Often, unused_field <= write_pos (as with MDS log). During
* recovery, write_pos is undefined until the end of the log is
* discovered.
*
* A "head" struct at the beginning of the log is used to store
* metadata at regular intervals. The basic invariants include:
*
* head.unused_field <= unused_field -- the head may "lag", since
* it's updated lazily.
* head.write_pos <= write_pos
* head.expire_pos <= expire_pos
* head.trimmed_pos <= trimmed_pos
*
* More significantly,
*
* head.expire_pos >= trimmed_pos -- this ensures we can find the
* "beginning" of the log as last
* recorded, before it is trimmed.
* trimming will block until a
* sufficiently current expire_pos
* is committed.
*
* To recover log state, we simply start at the last write_pos in the
* head, and probe the object sequence sizes until we read the end.
*
* Head struct is stored in the first object. Actual journal starts
* after layout.period() bytes.
*
*/
#ifndef CEPH_JOURNALER_H
#define CEPH_JOURNALER_H
#include <list>
#include <map>
#include "Objecter.h"
#include "Filer.h"
#include "common/Timer.h"
#include "common/Throttle.h"
#include "include/common_fwd.h"
class Context;
class Finisher;
class C_OnFinisher;
typedef __u8 stream_format_t;
// Legacy envelope is leading uint32_t size
enum StreamFormat {
JOURNAL_FORMAT_LEGACY = 0,
JOURNAL_FORMAT_RESILIENT = 1,
// Insert new formats here, before COUNT
JOURNAL_FORMAT_COUNT
};
// Highest journal format version that we support
#define JOURNAL_FORMAT_MAX (JOURNAL_FORMAT_COUNT - 1)
// Legacy envelope is leading uint32_t size
#define JOURNAL_ENVELOPE_LEGACY (sizeof(uint32_t))
// Resilient envelope is leading uint64_t sentinel, uint32_t size,
// trailing uint64_t start_ptr
#define JOURNAL_ENVELOPE_RESILIENT (sizeof(uint32_t) + sizeof(uint64_t) + \
sizeof(uint64_t))
/**
* Represents a collection of entries serialized in a byte stream.
*
* Each entry consists of:
* - a blob (used by the next level up as a serialized LogEvent)
* - a uint64_t (used by the next level up as a pointer to the start
* of the entry in the collection bytestream)
*/
class JournalStream
{
stream_format_t format;
public:
JournalStream(stream_format_t format_) : format(format_) {}
void set_format(stream_format_t format_) {format = format_;}
bool readable(bufferlist &bl, uint64_t *need) const;
size_t read(bufferlist &from, bufferlist *to, uint64_t *start_ptr);
size_t write(bufferlist &entry, bufferlist *to, uint64_t const &start_ptr);
size_t get_envelope_size() const {
if (format >= JOURNAL_FORMAT_RESILIENT) {
return JOURNAL_ENVELOPE_RESILIENT;
} else {
return JOURNAL_ENVELOPE_LEGACY;
}
}
// A magic number for the start of journal entries, so that we can
// identify them in damaged journals.
static const uint64_t sentinel = 0x3141592653589793;
};
class Journaler {
public:
// this goes at the head of the log "file".
class Header {
public:
uint64_t trimmed_pos;
uint64_t expire_pos;
uint64_t unused_field;
uint64_t write_pos;
std::string magic;
file_layout_t layout; //< The mapping from byte stream offsets
// to RADOS objects
stream_format_t stream_format; //< The encoding of LogEvents
// within the journal byte stream
Header(const char *m="") :
trimmed_pos(0), expire_pos(0), unused_field(0), write_pos(0), magic(m),
stream_format(-1) {
}
void encode(bufferlist &bl) const {
ENCODE_START(2, 2, bl);
encode(magic, bl);
encode(trimmed_pos, bl);
encode(expire_pos, bl);
encode(unused_field, bl);
encode(write_pos, bl);
encode(layout, bl, 0); // encode in legacy format
encode(stream_format, bl);
ENCODE_FINISH(bl);
}
void decode(bufferlist::const_iterator &bl) {
DECODE_START_LEGACY_COMPAT_LEN(2, 2, 2, bl);
decode(magic, bl);
decode(trimmed_pos, bl);
decode(expire_pos, bl);
decode(unused_field, bl);
decode(write_pos, bl);
decode(layout, bl);
if (struct_v > 1) {
decode(stream_format, bl);
} else {
stream_format = JOURNAL_FORMAT_LEGACY;
}
DECODE_FINISH(bl);
}
void dump(Formatter *f) const {
f->open_object_section("journal_header");
{
f->dump_string("magic", magic);
f->dump_unsigned("write_pos", write_pos);
f->dump_unsigned("expire_pos", expire_pos);
f->dump_unsigned("trimmed_pos", trimmed_pos);
f->dump_unsigned("stream_format", stream_format);
f->dump_object("layout", layout);
}
f->close_section(); // journal_header
}
static void generate_test_instances(std::list<Header*> &ls)
{
ls.push_back(new Header());
ls.push_back(new Header());
ls.back()->trimmed_pos = 1;
ls.back()->expire_pos = 2;
ls.back()->unused_field = 3;
ls.back()->write_pos = 4;
ls.back()->magic = "magique";
ls.push_back(new Header());
ls.back()->stream_format = JOURNAL_FORMAT_RESILIENT;
}
};
WRITE_CLASS_ENCODER(Header)
uint32_t get_stream_format() const {
return stream_format;
}
Header last_committed;
private:
// me
CephContext *cct;
std::mutex lock;
const std::string name;
typedef std::lock_guard<std::mutex> lock_guard;
typedef std::unique_lock<std::mutex> unique_lock;
Finisher *finisher;
Header last_written;
inodeno_t ino;
int64_t pg_pool;
bool readonly;
file_layout_t layout;
uint32_t stream_format;
JournalStream journal_stream;
const char *magic;
Objecter *objecter;
Filer filer;
PerfCounters *logger;
int logger_key_lat;
class C_DelayFlush;
C_DelayFlush *delay_flush_event;
/*
* Do a flush as a result of a C_DelayFlush context.
*/
void _do_delayed_flush()
{
ceph_assert(delay_flush_event != NULL);
lock_guard l(lock);
delay_flush_event = NULL;
_do_flush();
}
// my state
static const int STATE_UNDEF = 0;
static const int STATE_READHEAD = 1;
static const int STATE_PROBING = 2;
static const int STATE_ACTIVE = 3;
static const int STATE_REREADHEAD = 4;
static const int STATE_REPROBING = 5;
static const int STATE_STOPPING = 6;
int state;
int error;
void _write_head(Context *oncommit=NULL);
void _wait_for_flush(Context *onsafe);
void _trim();
// header
ceph::real_time last_wrote_head;
void _finish_write_head(int r, Header &wrote, C_OnFinisher *oncommit);
class C_WriteHead;
friend class C_WriteHead;
void _reread_head(Context *onfinish);
void _set_layout(file_layout_t const *l);
std::list<Context*> waitfor_recover;
void _read_head(Context *on_finish, bufferlist *bl);
void _finish_read_head(int r, bufferlist& bl);
void _finish_reread_head(int r, bufferlist& bl, Context *finish);
void _probe(Context *finish, uint64_t *end);
void _finish_probe_end(int r, uint64_t end);
void _reprobe(C_OnFinisher *onfinish);
void _finish_reprobe(int r, uint64_t end, C_OnFinisher *onfinish);
void _finish_reread_head_and_probe(int r, C_OnFinisher *onfinish);
class C_ReadHead;
friend class C_ReadHead;
class C_ProbeEnd;
friend class C_ProbeEnd;
class C_RereadHead;
friend class C_RereadHead;
class C_ReProbe;
friend class C_ReProbe;
class C_RereadHeadProbe;
friend class C_RereadHeadProbe;
// writer
uint64_t prezeroing_pos;
uint64_t prezero_pos; ///< we zero journal space ahead of write_pos to
// avoid problems with tail probing
uint64_t write_pos; ///< logical write position, where next entry
// will go
uint64_t flush_pos; ///< where we will flush. if
/// write_pos>flush_pos, we're buffering writes.
uint64_t safe_pos; ///< what has been committed safely to disk.
uint64_t next_safe_pos; /// start position of the first entry that isn't
/// being fully flushed. If we don't flush any
// partial entry, it's equal to flush_pos.
bufferlist write_buf; ///< write buffer. flush_pos +
/// write_buf.length() == write_pos.
// protect write_buf from bufferlist _len overflow
Throttle write_buf_throttle;
uint64_t waiting_for_zero_pos;
interval_set<uint64_t> pending_zero; // non-contig bits we've zeroed
std::list<Context*> waitfor_prezero;
std::map<uint64_t, uint64_t> pending_safe; // flush_pos -> safe_pos
// when safe through given offset
std::map<uint64_t, std::list<Context*> > waitfor_safe;
void _flush(C_OnFinisher *onsafe);
void _do_flush(unsigned amount=0);
void _finish_flush(int r, uint64_t start, ceph::real_time stamp);
class C_Flush;
friend class C_Flush;
// reader
uint64_t read_pos; // logical read position, where next entry starts.
uint64_t requested_pos; // what we've requested from OSD.
uint64_t received_pos; // what we've received from OSD.
// read buffer. unused_field + read_buf.length() == prefetch_pos.
bufferlist read_buf;
std::map<uint64_t,bufferlist> prefetch_buf;
uint64_t fetch_len; // how much to read at a time
uint64_t temp_fetch_len;
// for wait_for_readable()
C_OnFinisher *on_readable;
C_OnFinisher *on_write_error;
bool called_write_error;
// read completion callback
void _finish_read(int r, uint64_t offset, uint64_t length, bufferlist &bl);
void _finish_retry_read(int r);
void _assimilate_prefetch();
void _issue_read(uint64_t len); // read some more
void _prefetch(); // maybe read ahead
class C_Read;
friend class C_Read;
class C_RetryRead;
friend class C_RetryRead;
// trimmer
uint64_t expire_pos; // what we're allowed to trim to
uint64_t trimming_pos; // what we've requested to trim through
uint64_t trimmed_pos; // what has been trimmed
bool readable;
void _finish_trim(int r, uint64_t to);
class C_Trim;
friend class C_Trim;
void _issue_prezero();
void _finish_prezero(int r, uint64_t from, uint64_t len);
friend struct C_Journaler_Prezero;
// only init_headers when following or first reading off-disk
void init_headers(Header& h) {
ceph_assert(readonly ||
state == STATE_READHEAD ||
state == STATE_REREADHEAD);
last_written = last_committed = h;
}
/**
* handle a write error
*
* called when we get an objecter error on a write.
*
* @param r error code
*/
void handle_write_error(int r);
bool _have_next_entry();
void _finish_erase(int data_result, C_OnFinisher *completion);
class C_EraseFinish;
friend class C_EraseFinish;
C_OnFinisher *wrap_finisher(Context *c);
uint32_t write_iohint; // the fadvise flags for write op, see
// CEPH_OSD_OP_FADIVSE_*
public:
Journaler(const std::string &name_, inodeno_t ino_, int64_t pool,
const char *mag, Objecter *obj, PerfCounters *l, int lkey, Finisher *f) :
last_committed(mag),
cct(obj->cct), name(name_), finisher(f), last_written(mag),
ino(ino_), pg_pool(pool), readonly(true),
stream_format(-1), journal_stream(-1),
magic(mag),
objecter(obj), filer(objecter, f), logger(l), logger_key_lat(lkey),
delay_flush_event(0),
state(STATE_UNDEF), error(0),
prezeroing_pos(0), prezero_pos(0), write_pos(0), flush_pos(0),
safe_pos(0), next_safe_pos(0),
write_buf_throttle(cct, "write_buf_throttle", UINT_MAX - (UINT_MAX >> 3)),
waiting_for_zero_pos(0),
read_pos(0), requested_pos(0), received_pos(0),
fetch_len(0), temp_fetch_len(0),
on_readable(0), on_write_error(NULL), called_write_error(false),
expire_pos(0), trimming_pos(0), trimmed_pos(0), readable(false),
write_iohint(0)
{
}
/* reset
*
* NOTE: we assume the caller knows/has ensured that any objects in
* our sequence do not exist.. e.g. after a MKFS. this is _not_ an
* "erase" method.
*/
void reset() {
lock_guard l(lock);
ceph_assert(state == STATE_ACTIVE);
readonly = true;
delay_flush_event = NULL;
state = STATE_UNDEF;
error = 0;
prezeroing_pos = 0;
prezero_pos = 0;
write_pos = 0;
flush_pos = 0;
safe_pos = 0;
next_safe_pos = 0;
read_pos = 0;
requested_pos = 0;
received_pos = 0;
fetch_len = 0;
ceph_assert(!on_readable);
expire_pos = 0;
trimming_pos = 0;
trimmed_pos = 0;
waiting_for_zero_pos = 0;
}
// Asynchronous operations
// =======================
void erase(Context *completion);
void create(file_layout_t *layout, stream_format_t const sf);
void recover(Context *onfinish);
void reread_head(Context *onfinish);
void reread_head_and_probe(Context *onfinish);
void write_head(Context *onsave=0);
void wait_for_flush(Context *onsafe = 0);
void flush(Context *onsafe = 0);
void wait_for_readable(Context *onfinish);
void _wait_for_readable(Context *onfinish);
bool have_waiter() const;
void wait_for_prezero(Context *onfinish);
// Synchronous setters
// ===================
void set_layout(file_layout_t const *l);
void set_readonly();
void set_writeable();
void set_write_pos(uint64_t p) {
lock_guard l(lock);
prezeroing_pos = prezero_pos = write_pos = flush_pos = safe_pos = next_safe_pos = p;
}
void set_read_pos(uint64_t p) {
lock_guard l(lock);
// we can't cope w/ in-progress read right now.
ceph_assert(requested_pos == received_pos);
read_pos = requested_pos = received_pos = p;
read_buf.clear();
}
uint64_t append_entry(bufferlist& bl);
void set_expire_pos(uint64_t ep) {
lock_guard l(lock);
expire_pos = ep;
}
void set_trimmed_pos(uint64_t p) {
lock_guard l(lock);
trimming_pos = trimmed_pos = p;
}
bool _write_head_needed();
bool write_head_needed() {
lock_guard l(lock);
return _write_head_needed();
}
void trim();
void trim_tail() {
lock_guard l(lock);
ceph_assert(!readonly);
_issue_prezero();
}
void set_write_error_handler(Context *c);
void set_write_iohint(uint32_t iohint_flags) {
write_iohint = iohint_flags;
}
/**
* Cause any ongoing waits to error out with -EAGAIN, set error
* to -EAGAIN.
*/
void shutdown();
public:
// Synchronous getters
// ===================
// TODO: need some locks on reads for true safety
uint64_t get_layout_period() const {
return layout.get_period();
}
file_layout_t& get_layout() { return layout; }
bool is_active() { return state == STATE_ACTIVE; }
bool is_stopping() { return state == STATE_STOPPING; }
int get_error() { return error; }
bool is_readonly() { return readonly; }
bool is_readable();
bool _is_readable();
bool try_read_entry(bufferlist& bl);
uint64_t get_write_pos() const { return write_pos; }
uint64_t get_write_safe_pos() const { return safe_pos; }
uint64_t get_read_pos() const { return read_pos; }
uint64_t get_expire_pos() const { return expire_pos; }
uint64_t get_trimmed_pos() const { return trimmed_pos; }
size_t get_journal_envelope_size() const {
return journal_stream.get_envelope_size();
}
void check_isreadable();
};
WRITE_CLASS_ENCODER(Journaler::Header)
#endif
| 16,039 | 28.377289 | 88 | h |
null | ceph-main/src/osdc/ObjectCacher.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include <limits.h>
#include "msg/Messenger.h"
#include "ObjectCacher.h"
#include "WritebackHandler.h"
#include "common/errno.h"
#include "common/perf_counters.h"
#include "include/ceph_assert.h"
#define MAX_FLUSH_UNDER_LOCK 20 ///< max bh's we start writeback on
#define BUFFER_MEMORY_WEIGHT CEPH_PAGE_SHIFT // memory usage of BufferHead, count in (1<<n)
/// while holding the lock
using std::chrono::seconds;
using std::list;
using std::map;
using std::make_pair;
using std::pair;
using std::set;
using std::string;
using std::vector;
using ceph::bufferlist;
using namespace std::literals;
/*** ObjectCacher::BufferHead ***/
/*** ObjectCacher::Object ***/
#define dout_subsys ceph_subsys_objectcacher
#undef dout_prefix
#define dout_prefix *_dout << "objectcacher.object(" << oid << ") "
class ObjectCacher::C_ReadFinish : public Context {
ObjectCacher *oc;
int64_t poolid;
sobject_t oid;
loff_t start;
uint64_t length;
xlist<C_ReadFinish*>::item set_item;
bool trust_enoent;
ceph_tid_t tid;
ZTracer::Trace trace;
public:
bufferlist bl;
C_ReadFinish(ObjectCacher *c, Object *ob, ceph_tid_t t, loff_t s,
uint64_t l, const ZTracer::Trace &trace) :
oc(c), poolid(ob->oloc.pool), oid(ob->get_soid()), start(s), length(l),
set_item(this), trust_enoent(true),
tid(t), trace(trace) {
ob->reads.push_back(&set_item);
}
void finish(int r) override {
oc->bh_read_finish(poolid, oid, tid, start, length, bl, r, trust_enoent);
trace.event("finish");
// object destructor clears the list
if (set_item.is_on_list())
set_item.remove_myself();
}
void distrust_enoent() {
trust_enoent = false;
}
};
class ObjectCacher::C_RetryRead : public Context {
ObjectCacher *oc;
OSDRead *rd;
ObjectSet *oset;
Context *onfinish;
ZTracer::Trace trace;
public:
C_RetryRead(ObjectCacher *_oc, OSDRead *r, ObjectSet *os, Context *c,
const ZTracer::Trace &trace)
: oc(_oc), rd(r), oset(os), onfinish(c), trace(trace) {
}
void finish(int r) override {
if (r >= 0) {
r = oc->_readx(rd, oset, onfinish, false, &trace);
}
if (r == 0) {
// read is still in-progress
return;
}
trace.event("finish");
if (onfinish) {
onfinish->complete(r);
}
}
};
ObjectCacher::BufferHead *ObjectCacher::Object::split(BufferHead *left,
loff_t off)
{
ceph_assert(ceph_mutex_is_locked(oc->lock));
ldout(oc->cct, 20) << "split " << *left << " at " << off << dendl;
// split off right
ObjectCacher::BufferHead *right = new BufferHead(this);
//inherit and if later access, this auto clean.
right->set_dontneed(left->get_dontneed());
right->set_nocache(left->get_nocache());
right->last_write_tid = left->last_write_tid;
right->last_read_tid = left->last_read_tid;
right->set_state(left->get_state());
right->set_error(left->error);
right->snapc = left->snapc;
right->set_journal_tid(left->journal_tid);
loff_t newleftlen = off - left->start();
right->set_start(off);
right->set_length(left->length() - newleftlen);
// shorten left
oc->bh_stat_sub(left);
left->set_length(newleftlen);
oc->bh_stat_add(left);
// add right
oc->bh_add(this, right);
// split buffers too
bufferlist bl;
bl = std::move(left->bl);
if (bl.length()) {
ceph_assert(bl.length() == (left->length() + right->length()));
right->bl.substr_of(bl, left->length(), right->length());
left->bl.substr_of(bl, 0, left->length());
}
// move read waiters
if (!left->waitfor_read.empty()) {
auto start_remove = left->waitfor_read.begin();
while (start_remove != left->waitfor_read.end() &&
start_remove->first < right->start())
++start_remove;
for (auto p = start_remove; p != left->waitfor_read.end(); ++p) {
ldout(oc->cct, 20) << "split moving waiters at byte " << p->first
<< " to right bh" << dendl;
right->waitfor_read[p->first].swap( p->second );
ceph_assert(p->second.empty());
}
left->waitfor_read.erase(start_remove, left->waitfor_read.end());
}
ldout(oc->cct, 20) << "split left is " << *left << dendl;
ldout(oc->cct, 20) << "split right is " << *right << dendl;
return right;
}
void ObjectCacher::Object::merge_left(BufferHead *left, BufferHead *right)
{
ceph_assert(ceph_mutex_is_locked(oc->lock));
ldout(oc->cct, 10) << "merge_left " << *left << " + " << *right << dendl;
if (left->get_journal_tid() == 0) {
left->set_journal_tid(right->get_journal_tid());
}
right->set_journal_tid(0);
oc->bh_remove(this, right);
oc->bh_stat_sub(left);
left->set_length(left->length() + right->length());
oc->bh_stat_add(left);
// data
left->bl.claim_append(right->bl);
// version
// note: this is sorta busted, but should only be used for dirty buffers
left->last_write_tid = std::max( left->last_write_tid, right->last_write_tid );
left->last_write = std::max( left->last_write, right->last_write );
left->set_dontneed(right->get_dontneed() ? left->get_dontneed() : false);
left->set_nocache(right->get_nocache() ? left->get_nocache() : false);
// waiters
for (auto p = right->waitfor_read.begin();
p != right->waitfor_read.end();
++p)
left->waitfor_read[p->first].splice(left->waitfor_read[p->first].begin(),
p->second );
// hose right
delete right;
ldout(oc->cct, 10) << "merge_left result " << *left << dendl;
}
bool ObjectCacher::Object::can_merge_bh(BufferHead *left, BufferHead *right)
{
if (left->end() != right->start() ||
left->get_state() != right->get_state() ||
!left->can_merge_journal(right))
return false;
if (left->is_tx() && left->last_write_tid != right->last_write_tid)
return false;
return true;
}
void ObjectCacher::Object::try_merge_bh(BufferHead *bh)
{
ceph_assert(ceph_mutex_is_locked(oc->lock));
ldout(oc->cct, 10) << "try_merge_bh " << *bh << dendl;
// do not merge rx buffers; last_read_tid may not match
if (bh->is_rx())
return;
// to the left?
auto p = data.find(bh->start());
ceph_assert(p->second == bh);
if (p != data.begin()) {
--p;
if (can_merge_bh(p->second, bh)) {
merge_left(p->second, bh);
bh = p->second;
} else {
++p;
}
}
// to the right?
ceph_assert(p->second == bh);
++p;
if (p != data.end() && can_merge_bh(bh, p->second))
merge_left(bh, p->second);
maybe_rebuild_buffer(bh);
}
void ObjectCacher::Object::maybe_rebuild_buffer(BufferHead *bh)
{
auto& bl = bh->bl;
if (bl.get_num_buffers() <= 1)
return;
auto wasted = bl.get_wasted_space();
if (wasted * 2 > bl.length() &&
wasted > (1U << BUFFER_MEMORY_WEIGHT))
bl.rebuild();
}
/*
* count bytes we have cached in given range
*/
bool ObjectCacher::Object::is_cached(loff_t cur, loff_t left) const
{
ceph_assert(ceph_mutex_is_locked(oc->lock));
auto p = data_lower_bound(cur);
while (left > 0) {
if (p == data.end())
return false;
if (p->first <= cur) {
// have part of it
loff_t lenfromcur = std::min(p->second->end() - cur, left);
cur += lenfromcur;
left -= lenfromcur;
++p;
continue;
} else if (p->first > cur) {
// gap
return false;
} else
ceph_abort();
}
return true;
}
/*
* all cached data in this range[off, off+len]
*/
bool ObjectCacher::Object::include_all_cached_data(loff_t off, loff_t len)
{
ceph_assert(ceph_mutex_is_locked(oc->lock));
if (data.empty())
return true;
auto first = data.begin();
auto last = data.rbegin();
if (first->second->start() >= off && last->second->end() <= (off + len))
return true;
else
return false;
}
/*
* map a range of bytes into buffer_heads.
* - create missing buffer_heads as necessary.
*/
int ObjectCacher::Object::map_read(ObjectExtent &ex,
map<loff_t, BufferHead*>& hits,
map<loff_t, BufferHead*>& missing,
map<loff_t, BufferHead*>& rx,
map<loff_t, BufferHead*>& errors)
{
ceph_assert(ceph_mutex_is_locked(oc->lock));
ldout(oc->cct, 10) << "map_read " << ex.oid << " "
<< ex.offset << "~" << ex.length << dendl;
loff_t cur = ex.offset;
loff_t left = ex.length;
auto p = data_lower_bound(ex.offset);
while (left > 0) {
// at end?
if (p == data.end()) {
// rest is a miss.
BufferHead *n = new BufferHead(this);
n->set_start(cur);
n->set_length(left);
oc->bh_add(this, n);
if (complete) {
oc->mark_zero(n);
hits[cur] = n;
ldout(oc->cct, 20) << "map_read miss+complete+zero " << left << " left, " << *n << dendl;
} else {
missing[cur] = n;
ldout(oc->cct, 20) << "map_read miss " << left << " left, " << *n << dendl;
}
cur += left;
ceph_assert(cur == (loff_t)ex.offset + (loff_t)ex.length);
break; // no more.
}
if (p->first <= cur) {
// have it (or part of it)
BufferHead *e = p->second;
if (e->is_clean() ||
e->is_dirty() ||
e->is_tx() ||
e->is_zero()) {
hits[cur] = e; // readable!
ldout(oc->cct, 20) << "map_read hit " << *e << dendl;
} else if (e->is_rx()) {
rx[cur] = e; // missing, not readable.
ldout(oc->cct, 20) << "map_read rx " << *e << dendl;
} else if (e->is_error()) {
errors[cur] = e;
ldout(oc->cct, 20) << "map_read error " << *e << dendl;
} else {
ceph_abort();
}
loff_t lenfromcur = std::min(e->end() - cur, left);
cur += lenfromcur;
left -= lenfromcur;
++p;
continue; // more?
} else if (p->first > cur) {
// gap.. miss
loff_t next = p->first;
BufferHead *n = new BufferHead(this);
loff_t len = std::min(next - cur, left);
n->set_start(cur);
n->set_length(len);
oc->bh_add(this,n);
if (complete) {
oc->mark_zero(n);
hits[cur] = n;
ldout(oc->cct, 20) << "map_read gap+complete+zero " << *n << dendl;
} else {
missing[cur] = n;
ldout(oc->cct, 20) << "map_read gap " << *n << dendl;
}
cur += std::min(left, n->length());
left -= std::min(left, n->length());
continue; // more?
} else {
ceph_abort();
}
}
return 0;
}
void ObjectCacher::Object::audit_buffers()
{
loff_t offset = 0;
for (auto it = data.begin(); it != data.end(); ++it) {
if (it->first != it->second->start()) {
lderr(oc->cct) << "AUDIT FAILURE: map position " << it->first
<< " does not match bh start position: "
<< *it->second << dendl;
ceph_assert(it->first == it->second->start());
}
if (it->first < offset) {
lderr(oc->cct) << "AUDIT FAILURE: " << it->first << " " << *it->second
<< " overlaps with previous bh " << *((--it)->second)
<< dendl;
ceph_assert(it->first >= offset);
}
BufferHead *bh = it->second;
for (auto w_it = bh->waitfor_read.begin();
w_it != bh->waitfor_read.end(); ++w_it) {
if (w_it->first < bh->start() ||
w_it->first >= bh->start() + bh->length()) {
lderr(oc->cct) << "AUDIT FAILURE: waiter at " << w_it->first
<< " is not within bh " << *bh << dendl;
ceph_assert(w_it->first >= bh->start());
ceph_assert(w_it->first < bh->start() + bh->length());
}
}
offset = it->first + it->second->length();
}
}
/*
* map a range of extents on an object's buffer cache.
* - combine any bh's we're writing into one
* - break up bufferheads that don't fall completely within the range
* //no! - return a bh that includes the write. may also include
* other dirty data to left and/or right.
*/
ObjectCacher::BufferHead *ObjectCacher::Object::map_write(ObjectExtent &ex,
ceph_tid_t tid)
{
ceph_assert(ceph_mutex_is_locked(oc->lock));
BufferHead *final = 0;
ldout(oc->cct, 10) << "map_write oex " << ex.oid
<< " " << ex.offset << "~" << ex.length << dendl;
loff_t cur = ex.offset;
loff_t left = ex.length;
auto p = data_lower_bound(ex.offset);
while (left > 0) {
loff_t max = left;
// at end ?
if (p == data.end()) {
if (final == NULL) {
final = new BufferHead(this);
replace_journal_tid(final, tid);
final->set_start( cur );
final->set_length( max );
oc->bh_add(this, final);
ldout(oc->cct, 10) << "map_write adding trailing bh " << *final << dendl;
} else {
oc->bh_stat_sub(final);
final->set_length(final->length() + max);
oc->bh_stat_add(final);
}
left -= max;
cur += max;
continue;
}
ldout(oc->cct, 10) << "cur is " << cur << ", p is " << *p->second << dendl;
//oc->verify_stats();
if (p->first <= cur) {
BufferHead *bh = p->second;
ldout(oc->cct, 10) << "map_write bh " << *bh << " intersected" << dendl;
if (p->first < cur) {
ceph_assert(final == 0);
if (cur + max >= bh->end()) {
// we want right bit (one splice)
final = split(bh, cur); // just split it, take right half.
maybe_rebuild_buffer(bh);
replace_journal_tid(final, tid);
++p;
ceph_assert(p->second == final);
} else {
// we want middle bit (two splices)
final = split(bh, cur);
maybe_rebuild_buffer(bh);
++p;
ceph_assert(p->second == final);
auto right = split(final, cur+max);
maybe_rebuild_buffer(right);
replace_journal_tid(final, tid);
}
} else {
ceph_assert(p->first == cur);
if (bh->length() <= max) {
// whole bufferhead, piece of cake.
} else {
// we want left bit (one splice)
auto right = split(bh, cur + max); // just split
maybe_rebuild_buffer(right);
}
if (final) {
oc->mark_dirty(bh);
oc->mark_dirty(final);
--p; // move iterator back to final
ceph_assert(p->second == final);
replace_journal_tid(bh, tid);
merge_left(final, bh);
} else {
final = bh;
replace_journal_tid(final, tid);
}
}
// keep going.
loff_t lenfromcur = final->end() - cur;
cur += lenfromcur;
left -= lenfromcur;
++p;
continue;
} else {
// gap!
loff_t next = p->first;
loff_t glen = std::min(next - cur, max);
ldout(oc->cct, 10) << "map_write gap " << cur << "~" << glen << dendl;
if (final) {
oc->bh_stat_sub(final);
final->set_length(final->length() + glen);
oc->bh_stat_add(final);
} else {
final = new BufferHead(this);
replace_journal_tid(final, tid);
final->set_start( cur );
final->set_length( glen );
oc->bh_add(this, final);
}
cur += glen;
left -= glen;
continue; // more?
}
}
// set version
ceph_assert(final);
ceph_assert(final->get_journal_tid() == tid);
ldout(oc->cct, 10) << "map_write final is " << *final << dendl;
return final;
}
void ObjectCacher::Object::replace_journal_tid(BufferHead *bh,
ceph_tid_t tid) {
ceph_tid_t bh_tid = bh->get_journal_tid();
ceph_assert(tid == 0 || bh_tid <= tid);
if (bh_tid != 0 && bh_tid != tid) {
// inform journal that it should not expect a writeback from this extent
oc->writeback_handler.overwrite_extent(get_oid(), bh->start(),
bh->length(), bh_tid, tid);
}
bh->set_journal_tid(tid);
}
void ObjectCacher::Object::truncate(loff_t s)
{
ceph_assert(ceph_mutex_is_locked(oc->lock));
ldout(oc->cct, 10) << "truncate " << *this << " to " << s << dendl;
std::list<Context*> waiting_for_read;
while (!data.empty()) {
BufferHead *bh = data.rbegin()->second;
if (bh->end() <= s)
break;
// split bh at truncation point?
if (bh->start() < s) {
split(bh, s);
maybe_rebuild_buffer(bh);
continue;
}
// remove bh entirely
ceph_assert(bh->start() >= s);
for ([[maybe_unused]] auto& [off, ctxs] : bh->waitfor_read) {
waiting_for_read.splice(waiting_for_read.end(), ctxs);
}
bh->waitfor_read.clear();
replace_journal_tid(bh, 0);
oc->bh_remove(this, bh);
delete bh;
}
if (!waiting_for_read.empty()) {
ldout(oc->cct, 10) << "restarting reads post-truncate" << dendl;
}
finish_contexts(oc->cct, waiting_for_read, 0);
}
void ObjectCacher::Object::discard(loff_t off, loff_t len,
C_GatherBuilder* commit_gather)
{
ceph_assert(ceph_mutex_is_locked(oc->lock));
ldout(oc->cct, 10) << "discard " << *this << " " << off << "~" << len
<< dendl;
if (!exists) {
ldout(oc->cct, 10) << " setting exists on " << *this << dendl;
exists = true;
}
if (complete) {
ldout(oc->cct, 10) << " clearing complete on " << *this << dendl;
complete = false;
}
std::list<Context*> waiting_for_read;
auto p = data_lower_bound(off);
while (p != data.end()) {
BufferHead *bh = p->second;
if (bh->start() >= off + len)
break;
// split bh at truncation point?
if (bh->start() < off) {
split(bh, off);
maybe_rebuild_buffer(bh);
++p;
continue;
}
ceph_assert(bh->start() >= off);
if (bh->end() > off + len) {
auto right = split(bh, off + len);
maybe_rebuild_buffer(right);
}
++p;
ldout(oc->cct, 10) << "discard " << *this << " bh " << *bh << dendl;
replace_journal_tid(bh, 0);
if (bh->is_tx() && commit_gather != nullptr) {
// wait for the writeback to commit
waitfor_commit[bh->last_write_tid].emplace_back(commit_gather->new_sub());
} else if (bh->is_rx()) {
// cannot remove bh with in-flight read, but we can ensure the
// read won't overwrite the discard
bh->last_read_tid = ++oc->last_read_tid;
bh->bl.clear();
bh->set_nocache(true);
oc->mark_zero(bh);
// we should mark all Rx bh to zero
continue;
} else {
for ([[maybe_unused]] auto& [off, ctxs] : bh->waitfor_read) {
waiting_for_read.splice(waiting_for_read.end(), ctxs);
}
bh->waitfor_read.clear();
}
oc->bh_remove(this, bh);
delete bh;
}
if (!waiting_for_read.empty()) {
ldout(oc->cct, 10) << "restarting reads post-discard" << dendl;
}
finish_contexts(oc->cct, waiting_for_read, 0); /* restart reads */
}
/*** ObjectCacher ***/
#undef dout_prefix
#define dout_prefix *_dout << "objectcacher "
ObjectCacher::ObjectCacher(CephContext *cct_, string name,
WritebackHandler& wb, ceph::mutex& l,
flush_set_callback_t flush_callback,
void *flush_callback_arg, uint64_t max_bytes,
uint64_t max_objects, uint64_t max_dirty,
uint64_t target_dirty, double max_dirty_age,
bool block_writes_upfront)
: perfcounter(NULL),
cct(cct_), writeback_handler(wb), name(name), lock(l),
max_dirty(max_dirty), target_dirty(target_dirty),
max_size(max_bytes), max_objects(max_objects),
max_dirty_age(ceph::make_timespan(max_dirty_age)),
block_writes_upfront(block_writes_upfront),
trace_endpoint("ObjectCacher"),
flush_set_callback(flush_callback),
flush_set_callback_arg(flush_callback_arg),
last_read_tid(0), flusher_stop(false), flusher_thread(this),finisher(cct),
stat_clean(0), stat_zero(0), stat_dirty(0), stat_rx(0), stat_tx(0),
stat_missing(0), stat_error(0), stat_dirty_waiting(0),
stat_nr_dirty_waiters(0), reads_outstanding(0)
{
perf_start();
finisher.start();
scattered_write = writeback_handler.can_scattered_write();
}
ObjectCacher::~ObjectCacher()
{
finisher.stop();
perf_stop();
// we should be empty.
for (auto i = objects.begin(); i != objects.end(); ++i)
ceph_assert(i->empty());
ceph_assert(bh_lru_rest.lru_get_size() == 0);
ceph_assert(bh_lru_dirty.lru_get_size() == 0);
ceph_assert(ob_lru.lru_get_size() == 0);
ceph_assert(dirty_or_tx_bh.empty());
}
void ObjectCacher::perf_start()
{
string n = "objectcacher-" + name;
PerfCountersBuilder plb(cct, n, l_objectcacher_first, l_objectcacher_last);
plb.add_u64_counter(l_objectcacher_cache_ops_hit,
"cache_ops_hit", "Hit operations");
plb.add_u64_counter(l_objectcacher_cache_ops_miss,
"cache_ops_miss", "Miss operations");
plb.add_u64_counter(l_objectcacher_cache_bytes_hit,
"cache_bytes_hit", "Hit data", NULL, 0, unit_t(UNIT_BYTES));
plb.add_u64_counter(l_objectcacher_cache_bytes_miss,
"cache_bytes_miss", "Miss data", NULL, 0, unit_t(UNIT_BYTES));
plb.add_u64_counter(l_objectcacher_data_read,
"data_read", "Read data");
plb.add_u64_counter(l_objectcacher_data_written,
"data_written", "Data written to cache");
plb.add_u64_counter(l_objectcacher_data_flushed,
"data_flushed", "Data flushed");
plb.add_u64_counter(l_objectcacher_overwritten_in_flush,
"data_overwritten_while_flushing",
"Data overwritten while flushing");
plb.add_u64_counter(l_objectcacher_write_ops_blocked, "write_ops_blocked",
"Write operations, delayed due to dirty limits");
plb.add_u64_counter(l_objectcacher_write_bytes_blocked,
"write_bytes_blocked",
"Write data blocked on dirty limit", NULL, 0, unit_t(UNIT_BYTES));
plb.add_time(l_objectcacher_write_time_blocked, "write_time_blocked",
"Time spent blocking a write due to dirty limits");
perfcounter = plb.create_perf_counters();
cct->get_perfcounters_collection()->add(perfcounter);
}
void ObjectCacher::perf_stop()
{
ceph_assert(perfcounter);
cct->get_perfcounters_collection()->remove(perfcounter);
delete perfcounter;
}
/* private */
ObjectCacher::Object *ObjectCacher::get_object(sobject_t oid,
uint64_t object_no,
ObjectSet *oset,
object_locator_t &l,
uint64_t truncate_size,
uint64_t truncate_seq)
{
// XXX: Add handling of nspace in object_locator_t in cache
ceph_assert(ceph_mutex_is_locked(lock));
// have it?
if ((uint32_t)l.pool < objects.size()) {
if (objects[l.pool].count(oid)) {
Object *o = objects[l.pool][oid];
o->object_no = object_no;
o->truncate_size = truncate_size;
o->truncate_seq = truncate_seq;
return o;
}
} else {
objects.resize(l.pool+1);
}
// create it.
Object *o = new Object(this, oid, object_no, oset, l, truncate_size,
truncate_seq);
objects[l.pool][oid] = o;
ob_lru.lru_insert_top(o);
return o;
}
void ObjectCacher::close_object(Object *ob)
{
ceph_assert(ceph_mutex_is_locked(lock));
ldout(cct, 10) << "close_object " << *ob << dendl;
ceph_assert(ob->can_close());
// ok!
ob_lru.lru_remove(ob);
objects[ob->oloc.pool].erase(ob->get_soid());
ob->set_item.remove_myself();
delete ob;
}
void ObjectCacher::bh_read(BufferHead *bh, int op_flags,
const ZTracer::Trace &parent_trace)
{
ceph_assert(ceph_mutex_is_locked(lock));
ldout(cct, 7) << "bh_read on " << *bh << " outstanding reads "
<< reads_outstanding << dendl;
ZTracer::Trace trace;
if (parent_trace.valid()) {
trace.init("", &trace_endpoint, &parent_trace);
trace.copy_name("bh_read " + bh->ob->get_oid().name);
trace.event("start");
}
mark_rx(bh);
bh->last_read_tid = ++last_read_tid;
// finisher
C_ReadFinish *onfinish = new C_ReadFinish(this, bh->ob, bh->last_read_tid,
bh->start(), bh->length(), trace);
// go
writeback_handler.read(bh->ob->get_oid(), bh->ob->get_object_number(),
bh->ob->get_oloc(), bh->start(), bh->length(),
bh->ob->get_snap(), &onfinish->bl,
bh->ob->truncate_size, bh->ob->truncate_seq,
op_flags, trace, onfinish);
++reads_outstanding;
}
void ObjectCacher::bh_read_finish(int64_t poolid, sobject_t oid,
ceph_tid_t tid, loff_t start,
uint64_t length, bufferlist &bl, int r,
bool trust_enoent)
{
ceph_assert(ceph_mutex_is_locked(lock));
ldout(cct, 7) << "bh_read_finish "
<< oid
<< " tid " << tid
<< " " << start << "~" << length
<< " (bl is " << bl.length() << ")"
<< " returned " << r
<< " outstanding reads " << reads_outstanding
<< dendl;
if (r >= 0 && bl.length() < length) {
ldout(cct, 7) << "bh_read_finish " << oid << " padding " << start << "~"
<< length << " with " << length - bl.length() << " bytes of zeroes"
<< dendl;
bl.append_zero(length - bl.length());
}
list<Context*> ls;
int err = 0;
if (objects[poolid].count(oid) == 0) {
ldout(cct, 7) << "bh_read_finish no object cache" << dendl;
} else {
Object *ob = objects[poolid][oid];
if (r == -ENOENT && !ob->complete) {
// wake up *all* rx waiters, or else we risk reordering
// identical reads. e.g.
// read 1~1
// reply to unrelated 3~1 -> !exists
// read 1~1 -> immediate ENOENT
// reply to first 1~1 -> ooo ENOENT
bool allzero = true;
for (auto p = ob->data.begin(); p != ob->data.end(); ++p) {
BufferHead *bh = p->second;
for (auto p = bh->waitfor_read.begin();
p != bh->waitfor_read.end();
++p)
ls.splice(ls.end(), p->second);
bh->waitfor_read.clear();
if (!bh->is_zero() && !bh->is_rx())
allzero = false;
}
// just pass through and retry all waiters if we don't trust
// -ENOENT for this read
if (trust_enoent) {
ldout(cct, 7)
<< "bh_read_finish ENOENT, marking complete and !exists on " << *ob
<< dendl;
ob->complete = true;
ob->exists = false;
/* If all the bhs are effectively zero, get rid of them. All
* the waiters will be retried and get -ENOENT immediately, so
* it's safe to clean up the unneeded bh's now. Since we know
* it's safe to remove them now, do so, so they aren't hanging
*around waiting for more -ENOENTs from rados while the cache
* is being shut down.
*
* Only do this when all the bhs are rx or clean, to match the
* condition in _readx(). If there are any non-rx or non-clean
* bhs, _readx() will wait for the final result instead of
* returning -ENOENT immediately.
*/
if (allzero) {
ldout(cct, 10)
<< "bh_read_finish ENOENT and allzero, getting rid of "
<< "bhs for " << *ob << dendl;
auto p = ob->data.begin();
while (p != ob->data.end()) {
BufferHead *bh = p->second;
// current iterator will be invalidated by bh_remove()
++p;
bh_remove(ob, bh);
delete bh;
}
}
}
}
// apply to bh's!
loff_t opos = start;
while (true) {
auto p = ob->data_lower_bound(opos);
if (p == ob->data.end())
break;
if (opos >= start+(loff_t)length) {
ldout(cct, 20) << "break due to opos " << opos << " >= start+length "
<< start << "+" << length << "=" << start+(loff_t)length
<< dendl;
break;
}
BufferHead *bh = p->second;
ldout(cct, 20) << "checking bh " << *bh << dendl;
// finishers?
for (auto it = bh->waitfor_read.begin();
it != bh->waitfor_read.end();
++it)
ls.splice(ls.end(), it->second);
bh->waitfor_read.clear();
if (bh->start() > opos) {
ldout(cct, 1) << "bh_read_finish skipping gap "
<< opos << "~" << bh->start() - opos
<< dendl;
opos = bh->start();
continue;
}
if (!bh->is_rx()) {
ldout(cct, 10) << "bh_read_finish skipping non-rx " << *bh << dendl;
opos = bh->end();
continue;
}
if (bh->last_read_tid != tid) {
ldout(cct, 10) << "bh_read_finish bh->last_read_tid "
<< bh->last_read_tid << " != tid " << tid
<< ", skipping" << dendl;
opos = bh->end();
continue;
}
ceph_assert(opos >= bh->start());
ceph_assert(bh->start() == opos); // we don't merge rx bh's... yet!
ceph_assert(bh->length() <= start+(loff_t)length-opos);
if (bh->error < 0)
err = bh->error;
opos = bh->end();
if (r == -ENOENT) {
if (trust_enoent) {
ldout(cct, 10) << "bh_read_finish removing " << *bh << dendl;
bh_remove(ob, bh);
delete bh;
} else {
ldout(cct, 10) << "skipping unstrusted -ENOENT and will retry for "
<< *bh << dendl;
}
continue;
}
if (r < 0) {
bh->error = r;
mark_error(bh);
} else {
bh->bl.substr_of(bl,
bh->start() - start,
bh->length());
mark_clean(bh);
}
ldout(cct, 10) << "bh_read_finish read " << *bh << dendl;
ob->try_merge_bh(bh);
}
}
// called with lock held.
ldout(cct, 20) << "finishing waiters " << ls << dendl;
finish_contexts(cct, ls, err);
retry_waiting_reads();
--reads_outstanding;
read_cond.notify_all();
}
void ObjectCacher::bh_write_adjacencies(BufferHead *bh, ceph::real_time cutoff,
int64_t *max_amount, int *max_count)
{
list<BufferHead*> blist;
int count = 0;
int64_t total_len = 0;
set<BufferHead*, BufferHead::ptr_lt>::iterator it = dirty_or_tx_bh.find(bh);
ceph_assert(it != dirty_or_tx_bh.end());
for (set<BufferHead*, BufferHead::ptr_lt>::iterator p = it;
p != dirty_or_tx_bh.end();
++p) {
BufferHead *obh = *p;
if (obh->ob != bh->ob)
break;
if (obh->is_dirty() && obh->last_write <= cutoff) {
blist.push_back(obh);
++count;
total_len += obh->length();
if ((max_count && count > *max_count) ||
(max_amount && total_len > *max_amount))
break;
}
}
while (it != dirty_or_tx_bh.begin()) {
--it;
BufferHead *obh = *it;
if (obh->ob != bh->ob)
break;
if (obh->is_dirty() && obh->last_write <= cutoff) {
blist.push_front(obh);
++count;
total_len += obh->length();
if ((max_count && count > *max_count) ||
(max_amount && total_len > *max_amount))
break;
}
}
if (max_count)
*max_count -= count;
if (max_amount)
*max_amount -= total_len;
bh_write_scattered(blist);
}
class ObjectCacher::C_WriteCommit : public Context {
ObjectCacher *oc;
int64_t poolid;
sobject_t oid;
vector<pair<loff_t, uint64_t> > ranges;
ZTracer::Trace trace;
public:
ceph_tid_t tid = 0;
C_WriteCommit(ObjectCacher *c, int64_t _poolid, sobject_t o, loff_t s,
uint64_t l, const ZTracer::Trace &trace) :
oc(c), poolid(_poolid), oid(o), trace(trace) {
ranges.push_back(make_pair(s, l));
}
C_WriteCommit(ObjectCacher *c, int64_t _poolid, sobject_t o,
vector<pair<loff_t, uint64_t> >& _ranges) :
oc(c), poolid(_poolid), oid(o), tid(0) {
ranges.swap(_ranges);
}
void finish(int r) override {
oc->bh_write_commit(poolid, oid, ranges, tid, r);
trace.event("finish");
}
};
void ObjectCacher::bh_write_scattered(list<BufferHead*>& blist)
{
ceph_assert(ceph_mutex_is_locked(lock));
Object *ob = blist.front()->ob;
ob->get();
ceph::real_time last_write;
SnapContext snapc;
vector<pair<loff_t, uint64_t> > ranges;
vector<pair<uint64_t, bufferlist> > io_vec;
ranges.reserve(blist.size());
io_vec.reserve(blist.size());
uint64_t total_len = 0;
for (list<BufferHead*>::iterator p = blist.begin(); p != blist.end(); ++p) {
BufferHead *bh = *p;
ldout(cct, 7) << "bh_write_scattered " << *bh << dendl;
ceph_assert(bh->ob == ob);
ceph_assert(bh->bl.length() == bh->length());
ranges.push_back(pair<loff_t, uint64_t>(bh->start(), bh->length()));
int n = io_vec.size();
io_vec.resize(n + 1);
io_vec[n].first = bh->start();
io_vec[n].second = bh->bl;
total_len += bh->length();
if (bh->snapc.seq > snapc.seq)
snapc = bh->snapc;
if (bh->last_write > last_write)
last_write = bh->last_write;
}
C_WriteCommit *oncommit = new C_WriteCommit(this, ob->oloc.pool, ob->get_soid(), ranges);
ceph_tid_t tid = writeback_handler.write(ob->get_oid(), ob->get_oloc(),
io_vec, snapc, last_write,
ob->truncate_size, ob->truncate_seq,
oncommit);
oncommit->tid = tid;
ob->last_write_tid = tid;
for (list<BufferHead*>::iterator p = blist.begin(); p != blist.end(); ++p) {
BufferHead *bh = *p;
bh->last_write_tid = tid;
mark_tx(bh);
}
if (perfcounter)
perfcounter->inc(l_objectcacher_data_flushed, total_len);
}
void ObjectCacher::bh_write(BufferHead *bh, const ZTracer::Trace &parent_trace)
{
ceph_assert(ceph_mutex_is_locked(lock));
ldout(cct, 7) << "bh_write " << *bh << dendl;
bh->ob->get();
ZTracer::Trace trace;
if (parent_trace.valid()) {
trace.init("", &trace_endpoint, &parent_trace);
trace.copy_name("bh_write " + bh->ob->get_oid().name);
trace.event("start");
}
// finishers
C_WriteCommit *oncommit = new C_WriteCommit(this, bh->ob->oloc.pool,
bh->ob->get_soid(), bh->start(),
bh->length(), trace);
// go
ceph_tid_t tid = writeback_handler.write(bh->ob->get_oid(),
bh->ob->get_oloc(),
bh->start(), bh->length(),
bh->snapc, bh->bl, bh->last_write,
bh->ob->truncate_size,
bh->ob->truncate_seq,
bh->journal_tid, trace, oncommit);
ldout(cct, 20) << " tid " << tid << " on " << bh->ob->get_oid() << dendl;
// set bh last_write_tid
oncommit->tid = tid;
bh->ob->last_write_tid = tid;
bh->last_write_tid = tid;
if (perfcounter) {
perfcounter->inc(l_objectcacher_data_flushed, bh->length());
}
mark_tx(bh);
}
void ObjectCacher::bh_write_commit(int64_t poolid, sobject_t oid,
vector<pair<loff_t, uint64_t> >& ranges,
ceph_tid_t tid, int r)
{
ceph_assert(ceph_mutex_is_locked(lock));
ldout(cct, 7) << "bh_write_commit " << oid << " tid " << tid
<< " ranges " << ranges << " returned " << r << dendl;
if (objects[poolid].count(oid) == 0) {
ldout(cct, 7) << "bh_write_commit no object cache" << dendl;
return;
}
Object *ob = objects[poolid][oid];
int was_dirty_or_tx = ob->oset->dirty_or_tx;
for (vector<pair<loff_t, uint64_t> >::iterator p = ranges.begin();
p != ranges.end();
++p) {
loff_t start = p->first;
uint64_t length = p->second;
if (!ob->exists) {
ldout(cct, 10) << "bh_write_commit marking exists on " << *ob << dendl;
ob->exists = true;
if (writeback_handler.may_copy_on_write(ob->get_oid(), start, length,
ob->get_snap())) {
ldout(cct, 10) << "bh_write_commit may copy on write, clearing "
"complete on " << *ob << dendl;
ob->complete = false;
}
}
vector<pair<loff_t, BufferHead*>> hit;
// apply to bh's!
for (map<loff_t, BufferHead*>::const_iterator p = ob->data_lower_bound(start);
p != ob->data.end();
++p) {
BufferHead *bh = p->second;
if (bh->start() >= start+(loff_t)length)
break;
// make sure bh is tx
if (!bh->is_tx()) {
ldout(cct, 10) << "bh_write_commit skipping non-tx " << *bh << dendl;
continue;
}
// make sure bh tid matches
if (bh->last_write_tid != tid) {
ceph_assert(bh->last_write_tid > tid);
ldout(cct, 10) << "bh_write_commit newer tid on " << *bh << dendl;
continue;
}
// we don't merge tx buffers. tx buffer should be within the range
ceph_assert(bh->start() >= start);
ceph_assert(bh->end() <= start+(loff_t)length);
if (r >= 0) {
// ok! mark bh clean and error-free
mark_clean(bh);
bh->set_journal_tid(0);
if (bh->get_nocache())
bh_lru_rest.lru_bottouch(bh);
hit.push_back(make_pair(bh->start(), bh));
ldout(cct, 10) << "bh_write_commit clean " << *bh << dendl;
} else {
mark_dirty(bh);
ldout(cct, 10) << "bh_write_commit marking dirty again due to error "
<< *bh << " r = " << r << " " << cpp_strerror(-r)
<< dendl;
}
}
for (auto& p : hit) {
//p.second maybe merged and deleted in merge_left
if (ob->data.count(p.first))
ob->try_merge_bh(p.second);
}
}
// update last_commit.
ceph_assert(ob->last_commit_tid < tid);
ob->last_commit_tid = tid;
// waiters?
list<Context*> ls;
if (ob->waitfor_commit.count(tid)) {
ls.splice(ls.begin(), ob->waitfor_commit[tid]);
ob->waitfor_commit.erase(tid);
}
// is the entire object set now clean and fully committed?
ObjectSet *oset = ob->oset;
ob->put();
if (flush_set_callback &&
was_dirty_or_tx > 0 &&
oset->dirty_or_tx == 0) { // nothing dirty/tx
flush_set_callback(flush_set_callback_arg, oset);
}
if (!ls.empty())
finish_contexts(cct, ls, r);
}
void ObjectCacher::flush(ZTracer::Trace *trace, loff_t amount)
{
ceph_assert(trace != nullptr);
ceph_assert(ceph_mutex_is_locked(lock));
ceph::real_time cutoff = ceph::real_clock::now();
ldout(cct, 10) << "flush " << amount << dendl;
/*
* NOTE: we aren't actually pulling things off the LRU here, just
* looking at the tail item. Then we call bh_write, which moves it
* to the other LRU, so that we can call
* lru_dirty.lru_get_next_expire() again.
*/
int64_t left = amount;
while (amount == 0 || left > 0) {
BufferHead *bh = static_cast<BufferHead*>(
bh_lru_dirty.lru_get_next_expire());
if (!bh) break;
if (bh->last_write > cutoff) break;
if (scattered_write) {
bh_write_adjacencies(bh, cutoff, amount > 0 ? &left : NULL, NULL);
} else {
left -= bh->length();
bh_write(bh, *trace);
}
}
}
void ObjectCacher::trim()
{
ceph_assert(ceph_mutex_is_locked(lock));
ldout(cct, 10) << "trim start: bytes: max " << max_size << " clean "
<< get_stat_clean() << ", objects: max " << max_objects
<< " current " << ob_lru.lru_get_size() << dendl;
uint64_t max_clean_bh = max_size >> BUFFER_MEMORY_WEIGHT;
uint64_t nr_clean_bh = bh_lru_rest.lru_get_size() - bh_lru_rest.lru_get_num_pinned();
while (get_stat_clean() > 0 &&
((uint64_t)get_stat_clean() > max_size ||
nr_clean_bh > max_clean_bh)) {
BufferHead *bh = static_cast<BufferHead*>(bh_lru_rest.lru_expire());
if (!bh)
break;
ldout(cct, 10) << "trim trimming " << *bh << dendl;
ceph_assert(bh->is_clean() || bh->is_zero() || bh->is_error());
Object *ob = bh->ob;
bh_remove(ob, bh);
delete bh;
--nr_clean_bh;
if (ob->complete) {
ldout(cct, 10) << "trim clearing complete on " << *ob << dendl;
ob->complete = false;
}
}
while (ob_lru.lru_get_size() > max_objects) {
Object *ob = static_cast<Object*>(ob_lru.lru_expire());
if (!ob)
break;
ldout(cct, 10) << "trim trimming " << *ob << dendl;
close_object(ob);
}
ldout(cct, 10) << "trim finish: max " << max_size << " clean "
<< get_stat_clean() << ", objects: max " << max_objects
<< " current " << ob_lru.lru_get_size() << dendl;
}
/* public */
bool ObjectCacher::is_cached(ObjectSet *oset, vector<ObjectExtent>& extents,
snapid_t snapid)
{
ceph_assert(ceph_mutex_is_locked(lock));
for (vector<ObjectExtent>::iterator ex_it = extents.begin();
ex_it != extents.end();
++ex_it) {
ldout(cct, 10) << "is_cached " << *ex_it << dendl;
// get Object cache
sobject_t soid(ex_it->oid, snapid);
Object *o = get_object_maybe(soid, ex_it->oloc);
if (!o)
return false;
if (!o->is_cached(ex_it->offset, ex_it->length))
return false;
}
return true;
}
/*
* returns # bytes read (if in cache). onfinish is untouched (caller
* must delete it)
* returns 0 if doing async read
*/
int ObjectCacher::readx(OSDRead *rd, ObjectSet *oset, Context *onfinish,
ZTracer::Trace *parent_trace)
{
ZTracer::Trace trace;
if (parent_trace != nullptr) {
trace.init("read", &trace_endpoint, parent_trace);
trace.event("start");
}
int r =_readx(rd, oset, onfinish, true, &trace);
if (r < 0) {
trace.event("finish");
}
return r;
}
int ObjectCacher::_readx(OSDRead *rd, ObjectSet *oset, Context *onfinish,
bool external_call, ZTracer::Trace *trace)
{
ceph_assert(trace != nullptr);
ceph_assert(ceph_mutex_is_locked(lock));
bool success = true;
int error = 0;
uint64_t bytes_in_cache = 0;
uint64_t bytes_not_in_cache = 0;
uint64_t total_bytes_read = 0;
map<uint64_t, bufferlist> stripe_map; // final buffer offset -> substring
bool dontneed = rd->fadvise_flags & LIBRADOS_OP_FLAG_FADVISE_DONTNEED;
bool nocache = rd->fadvise_flags & LIBRADOS_OP_FLAG_FADVISE_NOCACHE;
/*
* WARNING: we can only meaningfully return ENOENT if the read request
* passed in a single ObjectExtent. Any caller who wants ENOENT instead of
* zeroed buffers needs to feed single extents into readx().
*/
ceph_assert(!oset->return_enoent || rd->extents.size() == 1);
for (vector<ObjectExtent>::iterator ex_it = rd->extents.begin();
ex_it != rd->extents.end();
++ex_it) {
ldout(cct, 10) << "readx " << *ex_it << dendl;
total_bytes_read += ex_it->length;
// get Object cache
sobject_t soid(ex_it->oid, rd->snap);
Object *o = get_object(soid, ex_it->objectno, oset, ex_it->oloc,
ex_it->truncate_size, oset->truncate_seq);
if (external_call)
touch_ob(o);
// does not exist and no hits?
if (oset->return_enoent && !o->exists) {
ldout(cct, 10) << "readx object !exists, 1 extent..." << dendl;
// should we worry about COW underneath us?
if (writeback_handler.may_copy_on_write(soid.oid, ex_it->offset,
ex_it->length, soid.snap)) {
ldout(cct, 20) << "readx may copy on write" << dendl;
bool wait = false;
list<BufferHead*> blist;
for (map<loff_t, BufferHead*>::iterator bh_it = o->data.begin();
bh_it != o->data.end();
++bh_it) {
BufferHead *bh = bh_it->second;
if (bh->is_dirty() || bh->is_tx()) {
ldout(cct, 10) << "readx flushing " << *bh << dendl;
wait = true;
if (bh->is_dirty()) {
if (scattered_write)
blist.push_back(bh);
else
bh_write(bh, *trace);
}
}
}
if (scattered_write && !blist.empty())
bh_write_scattered(blist);
if (wait) {
ldout(cct, 10) << "readx waiting on tid " << o->last_write_tid
<< " on " << *o << dendl;
o->waitfor_commit[o->last_write_tid].push_back(
new C_RetryRead(this,rd, oset, onfinish, *trace));
// FIXME: perfcounter!
return 0;
}
}
// can we return ENOENT?
bool allzero = true;
for (map<loff_t, BufferHead*>::iterator bh_it = o->data.begin();
bh_it != o->data.end();
++bh_it) {
ldout(cct, 20) << "readx ob has bh " << *bh_it->second << dendl;
if (!bh_it->second->is_zero() && !bh_it->second->is_rx()) {
allzero = false;
break;
}
}
if (allzero) {
ldout(cct, 10) << "readx ob has all zero|rx, returning ENOENT"
<< dendl;
delete rd;
if (dontneed)
bottouch_ob(o);
return -ENOENT;
}
}
// map extent into bufferheads
map<loff_t, BufferHead*> hits, missing, rx, errors;
o->map_read(*ex_it, hits, missing, rx, errors);
if (external_call) {
// retry reading error buffers
missing.insert(errors.begin(), errors.end());
} else {
// some reads had errors, fail later so completions
// are cleaned up properly
// TODO: make read path not call _readx for every completion
hits.insert(errors.begin(), errors.end());
}
if (!missing.empty() || !rx.empty()) {
// read missing
map<loff_t, BufferHead*>::iterator last = missing.end();
for (map<loff_t, BufferHead*>::iterator bh_it = missing.begin();
bh_it != missing.end();
++bh_it) {
uint64_t rx_bytes = static_cast<uint64_t>(
stat_rx + bh_it->second->length());
bytes_not_in_cache += bh_it->second->length();
if (!waitfor_read.empty() || (stat_rx > 0 && rx_bytes > max_size)) {
// cache is full with concurrent reads -- wait for rx's to complete
// to constrain memory growth (especially during copy-ups)
if (success) {
ldout(cct, 10) << "readx missed, waiting on cache to complete "
<< waitfor_read.size() << " blocked reads, "
<< (std::max(rx_bytes, max_size) - max_size)
<< " read bytes" << dendl;
waitfor_read.push_back(new C_RetryRead(this, rd, oset, onfinish,
*trace));
}
bh_remove(o, bh_it->second);
delete bh_it->second;
} else {
bh_it->second->set_nocache(nocache);
bh_read(bh_it->second, rd->fadvise_flags, *trace);
if ((success && onfinish) || last != missing.end())
last = bh_it;
}
success = false;
}
//add wait in last bh avoid wakeup early. Because read is order
if (last != missing.end()) {
ldout(cct, 10) << "readx missed, waiting on " << *last->second
<< " off " << last->first << dendl;
last->second->waitfor_read[last->first].push_back(
new C_RetryRead(this, rd, oset, onfinish, *trace) );
}
// bump rx
for (map<loff_t, BufferHead*>::iterator bh_it = rx.begin();
bh_it != rx.end();
++bh_it) {
touch_bh(bh_it->second); // bump in lru, so we don't lose it.
if (success && onfinish) {
ldout(cct, 10) << "readx missed, waiting on " << *bh_it->second
<< " off " << bh_it->first << dendl;
bh_it->second->waitfor_read[bh_it->first].push_back(
new C_RetryRead(this, rd, oset, onfinish, *trace) );
}
bytes_not_in_cache += bh_it->second->length();
success = false;
}
for (map<loff_t, BufferHead*>::iterator bh_it = hits.begin();
bh_it != hits.end(); ++bh_it)
//bump in lru, so we don't lose it when later read
touch_bh(bh_it->second);
} else {
ceph_assert(!hits.empty());
// make a plain list
for (map<loff_t, BufferHead*>::iterator bh_it = hits.begin();
bh_it != hits.end();
++bh_it) {
BufferHead *bh = bh_it->second;
ldout(cct, 10) << "readx hit bh " << *bh << dendl;
if (bh->is_error() && bh->error)
error = bh->error;
bytes_in_cache += bh->length();
if (bh->get_nocache() && bh->is_clean())
bh_lru_rest.lru_bottouch(bh);
else
touch_bh(bh);
//must be after touch_bh because touch_bh set dontneed false
if (dontneed &&
((loff_t)ex_it->offset <= bh->start() &&
(bh->end() <=(loff_t)(ex_it->offset + ex_it->length)))) {
bh->set_dontneed(true); //if dirty
if (bh->is_clean())
bh_lru_rest.lru_bottouch(bh);
}
}
if (!error) {
// create reverse map of buffer offset -> object for the
// eventual result. this is over a single ObjectExtent, so we
// know that
// - the bh's are contiguous
// - the buffer frags need not be (and almost certainly aren't)
loff_t opos = ex_it->offset;
map<loff_t, BufferHead*>::iterator bh_it = hits.begin();
ceph_assert(bh_it->second->start() <= opos);
uint64_t bhoff = opos - bh_it->second->start();
vector<pair<uint64_t,uint64_t> >::iterator f_it
= ex_it->buffer_extents.begin();
uint64_t foff = 0;
while (1) {
BufferHead *bh = bh_it->second;
ceph_assert(opos == (loff_t)(bh->start() + bhoff));
uint64_t len = std::min(f_it->second - foff, bh->length() - bhoff);
ldout(cct, 10) << "readx rmap opos " << opos << ": " << *bh << " +"
<< bhoff << " frag " << f_it->first << "~"
<< f_it->second << " +" << foff << "~" << len
<< dendl;
bufferlist bit;
// put substr here first, since substr_of clobbers, and we
// may get multiple bh's at this stripe_map position
if (bh->is_zero()) {
stripe_map[f_it->first].append_zero(len);
} else {
bit.substr_of(bh->bl,
opos - bh->start(),
len);
stripe_map[f_it->first].claim_append(bit);
}
opos += len;
bhoff += len;
foff += len;
if (opos == bh->end()) {
++bh_it;
bhoff = 0;
}
if (foff == f_it->second) {
++f_it;
foff = 0;
}
if (bh_it == hits.end()) break;
if (f_it == ex_it->buffer_extents.end())
break;
}
ceph_assert(f_it == ex_it->buffer_extents.end());
ceph_assert(opos == (loff_t)ex_it->offset + (loff_t)ex_it->length);
}
if (dontneed && o->include_all_cached_data(ex_it->offset, ex_it->length))
bottouch_ob(o);
}
}
if (!success) {
if (perfcounter && external_call) {
perfcounter->inc(l_objectcacher_data_read, total_bytes_read);
perfcounter->inc(l_objectcacher_cache_bytes_miss, bytes_not_in_cache);
perfcounter->inc(l_objectcacher_cache_ops_miss);
}
if (onfinish) {
ldout(cct, 20) << "readx defer " << rd << dendl;
} else {
ldout(cct, 20) << "readx drop " << rd << " (no complete, but no waiter)"
<< dendl;
delete rd;
}
return 0; // wait!
}
if (perfcounter && external_call) {
perfcounter->inc(l_objectcacher_data_read, total_bytes_read);
perfcounter->inc(l_objectcacher_cache_bytes_hit, bytes_in_cache);
perfcounter->inc(l_objectcacher_cache_ops_hit);
}
// no misses... success! do the read.
ldout(cct, 10) << "readx has all buffers" << dendl;
// ok, assemble into result buffer.
uint64_t pos = 0;
if (rd->bl && !error) {
rd->bl->clear();
for (map<uint64_t,bufferlist>::iterator i = stripe_map.begin();
i != stripe_map.end();
++i) {
ceph_assert(pos == i->first);
ldout(cct, 10) << "readx adding buffer len " << i->second.length()
<< " at " << pos << dendl;
pos += i->second.length();
rd->bl->claim_append(i->second);
ceph_assert(rd->bl->length() == pos);
}
ldout(cct, 10) << "readx result is " << rd->bl->length() << dendl;
} else if (!error) {
ldout(cct, 10) << "readx no bufferlist ptr (readahead?), done." << dendl;
map<uint64_t,bufferlist>::reverse_iterator i = stripe_map.rbegin();
pos = i->first + i->second.length();
}
// done with read.
int ret = error ? error : pos;
ldout(cct, 20) << "readx done " << rd << " " << ret << dendl;
ceph_assert(pos <= (uint64_t) INT_MAX);
delete rd;
trim();
return ret;
}
void ObjectCacher::retry_waiting_reads()
{
list<Context *> ls;
ls.swap(waitfor_read);
while (!ls.empty() && waitfor_read.empty()) {
Context *ctx = ls.front();
ls.pop_front();
ctx->complete(0);
}
waitfor_read.splice(waitfor_read.end(), ls);
}
int ObjectCacher::writex(OSDWrite *wr, ObjectSet *oset, Context *onfreespace,
ZTracer::Trace *parent_trace)
{
ceph_assert(ceph_mutex_is_locked(lock));
ceph::real_time now = ceph::real_clock::now();
uint64_t bytes_written = 0;
uint64_t bytes_written_in_flush = 0;
bool dontneed = wr->fadvise_flags & LIBRADOS_OP_FLAG_FADVISE_DONTNEED;
bool nocache = wr->fadvise_flags & LIBRADOS_OP_FLAG_FADVISE_NOCACHE;
ZTracer::Trace trace;
if (parent_trace != nullptr) {
trace.init("write", &trace_endpoint, parent_trace);
trace.event("start");
}
list<Context*> wait_for_reads;
for (vector<ObjectExtent>::iterator ex_it = wr->extents.begin();
ex_it != wr->extents.end();
++ex_it) {
// get object cache
sobject_t soid(ex_it->oid, CEPH_NOSNAP);
Object *o = get_object(soid, ex_it->objectno, oset, ex_it->oloc,
ex_it->truncate_size, oset->truncate_seq);
// map it all into a single bufferhead.
BufferHead *bh = o->map_write(*ex_it, wr->journal_tid);
bool missing = bh->is_missing();
bh->snapc = wr->snapc;
// readers that need to be woken up due to an overwrite
for (auto& [_, wait_for_read] : bh->waitfor_read) {
wait_for_reads.splice(wait_for_reads.end(), wait_for_read);
}
bh->waitfor_read.clear();
bytes_written += ex_it->length;
if (bh->is_tx()) {
bytes_written_in_flush += ex_it->length;
}
// adjust buffer pointers (ie "copy" data into my cache)
// this is over a single ObjectExtent, so we know that
// - there is one contiguous bh
// - the buffer frags need not be (and almost certainly aren't)
// note: i assume striping is monotonic... no jumps backwards, ever!
loff_t opos = ex_it->offset;
for (vector<pair<uint64_t, uint64_t> >::iterator f_it
= ex_it->buffer_extents.begin();
f_it != ex_it->buffer_extents.end();
++f_it) {
ldout(cct, 10) << "writex writing " << f_it->first << "~"
<< f_it->second << " into " << *bh << " at " << opos
<< dendl;
uint64_t bhoff = opos - bh->start();
ceph_assert(f_it->second <= bh->length() - bhoff);
// get the frag we're mapping in
bufferlist frag;
frag.substr_of(wr->bl, f_it->first, f_it->second);
// keep anything left of bhoff
if (!bhoff)
bh->bl.swap(frag);
else
bh->bl.claim_append(frag);
opos += f_it->second;
}
// ok, now bh is dirty.
mark_dirty(bh);
if (dontneed)
bh->set_dontneed(true);
else if (nocache && missing)
bh->set_nocache(true);
else
touch_bh(bh);
bh->last_write = now;
o->try_merge_bh(bh);
}
if (perfcounter) {
perfcounter->inc(l_objectcacher_data_written, bytes_written);
if (bytes_written_in_flush) {
perfcounter->inc(l_objectcacher_overwritten_in_flush,
bytes_written_in_flush);
}
}
int r = _wait_for_write(wr, bytes_written, oset, &trace, onfreespace);
delete wr;
finish_contexts(cct, wait_for_reads, 0);
//verify_stats();
trim();
return r;
}
class ObjectCacher::C_WaitForWrite : public Context {
public:
C_WaitForWrite(ObjectCacher *oc, uint64_t len,
const ZTracer::Trace &trace, Context *onfinish) :
m_oc(oc), m_len(len), m_trace(trace), m_onfinish(onfinish) {}
void finish(int r) override;
private:
ObjectCacher *m_oc;
uint64_t m_len;
ZTracer::Trace m_trace;
Context *m_onfinish;
};
void ObjectCacher::C_WaitForWrite::finish(int r)
{
std::lock_guard l(m_oc->lock);
m_oc->_maybe_wait_for_writeback(m_len, &m_trace);
m_onfinish->complete(r);
}
void ObjectCacher::_maybe_wait_for_writeback(uint64_t len,
ZTracer::Trace *trace)
{
ceph_assert(ceph_mutex_is_locked(lock));
ceph::mono_time start = ceph::mono_clock::now();
int blocked = 0;
// wait for writeback?
// - wait for dirty and tx bytes (relative to the max_dirty threshold)
// - do not wait for bytes other waiters are waiting on. this means that
// threads do not wait for each other. this effectively allows the cache
// size to balloon proportional to the data that is in flight.
uint64_t max_dirty_bh = max_dirty >> BUFFER_MEMORY_WEIGHT;
while (get_stat_dirty() + get_stat_tx() > 0 &&
(((uint64_t)(get_stat_dirty() + get_stat_tx()) >=
max_dirty + get_stat_dirty_waiting()) ||
(dirty_or_tx_bh.size() >=
max_dirty_bh + get_stat_nr_dirty_waiters()))) {
if (blocked == 0) {
trace->event("start wait for writeback");
}
ldout(cct, 10) << __func__ << " waiting for dirty|tx "
<< (get_stat_dirty() + get_stat_tx()) << " >= max "
<< max_dirty << " + dirty_waiting "
<< get_stat_dirty_waiting() << dendl;
flusher_cond.notify_all();
stat_dirty_waiting += len;
++stat_nr_dirty_waiters;
std::unique_lock l{lock, std::adopt_lock};
stat_cond.wait(l);
l.release();
stat_dirty_waiting -= len;
--stat_nr_dirty_waiters;
++blocked;
ldout(cct, 10) << __func__ << " woke up" << dendl;
}
if (blocked > 0) {
trace->event("finish wait for writeback");
}
if (blocked && perfcounter) {
perfcounter->inc(l_objectcacher_write_ops_blocked);
perfcounter->inc(l_objectcacher_write_bytes_blocked, len);
ceph::timespan blocked = ceph::mono_clock::now() - start;
perfcounter->tinc(l_objectcacher_write_time_blocked, blocked);
}
}
// blocking wait for write.
int ObjectCacher::_wait_for_write(OSDWrite *wr, uint64_t len, ObjectSet *oset,
ZTracer::Trace *trace, Context *onfreespace)
{
ceph_assert(ceph_mutex_is_locked(lock));
ceph_assert(trace != nullptr);
int ret = 0;
if (max_dirty > 0 && !(wr->fadvise_flags & LIBRADOS_OP_FLAG_FADVISE_FUA)) {
if (block_writes_upfront) {
_maybe_wait_for_writeback(len, trace);
if (onfreespace)
onfreespace->complete(0);
} else {
ceph_assert(onfreespace);
finisher.queue(new C_WaitForWrite(this, len, *trace, onfreespace));
}
} else {
// write-thru! flush what we just wrote.
ceph::condition_variable cond;
bool done = false;
Context *fin = block_writes_upfront ?
new C_Cond(cond, &done, &ret) : onfreespace;
ceph_assert(fin);
bool flushed = flush_set(oset, wr->extents, trace, fin);
ceph_assert(!flushed); // we just dirtied it, and didn't drop our lock!
ldout(cct, 10) << "wait_for_write waiting on write-thru of " << len
<< " bytes" << dendl;
if (block_writes_upfront) {
std::unique_lock l{lock, std::adopt_lock};
cond.wait(l, [&done] { return done; });
l.release();
ldout(cct, 10) << "wait_for_write woke up, ret " << ret << dendl;
if (onfreespace)
onfreespace->complete(ret);
}
}
// start writeback anyway?
if (get_stat_dirty() > 0 && (uint64_t) get_stat_dirty() > target_dirty) {
ldout(cct, 10) << "wait_for_write " << get_stat_dirty() << " > target "
<< target_dirty << ", nudging flusher" << dendl;
flusher_cond.notify_all();
}
return ret;
}
void ObjectCacher::flusher_entry()
{
ldout(cct, 10) << "flusher start" << dendl;
std::unique_lock l{lock};
while (!flusher_stop) {
loff_t all = get_stat_tx() + get_stat_rx() + get_stat_clean() +
get_stat_dirty();
ldout(cct, 11) << "flusher "
<< all << " / " << max_size << ": "
<< get_stat_tx() << " tx, "
<< get_stat_rx() << " rx, "
<< get_stat_clean() << " clean, "
<< get_stat_dirty() << " dirty ("
<< target_dirty << " target, "
<< max_dirty << " max)"
<< dendl;
loff_t actual = get_stat_dirty() + get_stat_dirty_waiting();
ZTracer::Trace trace;
if (cct->_conf->osdc_blkin_trace_all) {
trace.init("flusher", &trace_endpoint);
trace.event("start");
}
if (actual > 0 && (uint64_t) actual > target_dirty) {
// flush some dirty pages
ldout(cct, 10) << "flusher " << get_stat_dirty() << " dirty + "
<< get_stat_dirty_waiting() << " dirty_waiting > target "
<< target_dirty << ", flushing some dirty bhs" << dendl;
flush(&trace, actual - target_dirty);
} else {
// check tail of lru for old dirty items
ceph::real_time cutoff = ceph::real_clock::now();
cutoff -= max_dirty_age;
BufferHead *bh = 0;
int max = MAX_FLUSH_UNDER_LOCK;
while ((bh = static_cast<BufferHead*>(bh_lru_dirty.
lru_get_next_expire())) != 0 &&
bh->last_write <= cutoff &&
max > 0) {
ldout(cct, 10) << "flusher flushing aged dirty bh " << *bh << dendl;
if (scattered_write) {
bh_write_adjacencies(bh, cutoff, NULL, &max);
} else {
bh_write(bh, trace);
--max;
}
}
if (!max) {
// back off the lock to avoid starving other threads
trace.event("backoff");
l.unlock();
l.lock();
continue;
}
}
trace.event("finish");
if (flusher_stop)
break;
flusher_cond.wait_for(l, 1s);
}
/* Wait for reads to finish. This is only possible if handling
* -ENOENT made some read completions finish before their rados read
* came back. If we don't wait for them, and destroy the cache, when
* the rados reads do come back their callback will try to access the
* no-longer-valid ObjectCacher.
*/
read_cond.wait(l, [this] {
if (reads_outstanding > 0) {
ldout(cct, 10) << "Waiting for all reads to complete. Number left: "
<< reads_outstanding << dendl;
return false;
} else {
return true;
}
});
ldout(cct, 10) << "flusher finish" << dendl;
}
// -------------------------------------------------
bool ObjectCacher::set_is_empty(ObjectSet *oset)
{
ceph_assert(ceph_mutex_is_locked(lock));
if (oset->objects.empty())
return true;
for (xlist<Object*>::iterator p = oset->objects.begin(); !p.end(); ++p)
if (!(*p)->is_empty())
return false;
return true;
}
bool ObjectCacher::set_is_cached(ObjectSet *oset)
{
ceph_assert(ceph_mutex_is_locked(lock));
if (oset->objects.empty())
return false;
for (xlist<Object*>::iterator p = oset->objects.begin();
!p.end(); ++p) {
Object *ob = *p;
for (map<loff_t,BufferHead*>::iterator q = ob->data.begin();
q != ob->data.end();
++q) {
BufferHead *bh = q->second;
if (!bh->is_dirty() && !bh->is_tx())
return true;
}
}
return false;
}
bool ObjectCacher::set_is_dirty_or_committing(ObjectSet *oset)
{
ceph_assert(ceph_mutex_is_locked(lock));
if (oset->objects.empty())
return false;
for (xlist<Object*>::iterator i = oset->objects.begin();
!i.end(); ++i) {
Object *ob = *i;
for (map<loff_t,BufferHead*>::iterator p = ob->data.begin();
p != ob->data.end();
++p) {
BufferHead *bh = p->second;
if (bh->is_dirty() || bh->is_tx())
return true;
}
}
return false;
}
// purge. non-blocking. violently removes dirty buffers from cache.
void ObjectCacher::purge(Object *ob)
{
ceph_assert(ceph_mutex_is_locked(lock));
ldout(cct, 10) << "purge " << *ob << dendl;
ob->truncate(0);
}
// flush. non-blocking. no callback.
// true if clean, already flushed.
// false if we wrote something.
// be sloppy about the ranges and flush any buffer it touches
bool ObjectCacher::flush(Object *ob, loff_t offset, loff_t length,
ZTracer::Trace *trace)
{
ceph_assert(trace != nullptr);
ceph_assert(ceph_mutex_is_locked(lock));
list<BufferHead*> blist;
bool clean = true;
ldout(cct, 10) << "flush " << *ob << " " << offset << "~" << length << dendl;
for (map<loff_t,BufferHead*>::const_iterator p = ob->data_lower_bound(offset);
p != ob->data.end();
++p) {
BufferHead *bh = p->second;
ldout(cct, 20) << "flush " << *bh << dendl;
if (length && bh->start() > offset+length) {
break;
}
if (bh->is_tx()) {
clean = false;
continue;
}
if (!bh->is_dirty()) {
continue;
}
if (scattered_write)
blist.push_back(bh);
else
bh_write(bh, *trace);
clean = false;
}
if (scattered_write && !blist.empty())
bh_write_scattered(blist);
return clean;
}
bool ObjectCacher::_flush_set_finish(C_GatherBuilder *gather,
Context *onfinish)
{
ceph_assert(ceph_mutex_is_locked(lock));
if (gather->has_subs()) {
gather->set_finisher(onfinish);
gather->activate();
return false;
}
ldout(cct, 10) << "flush_set has no dirty|tx bhs" << dendl;
onfinish->complete(0);
return true;
}
// flush. non-blocking, takes callback.
// returns true if already flushed
bool ObjectCacher::flush_set(ObjectSet *oset, Context *onfinish)
{
ceph_assert(ceph_mutex_is_locked(lock));
ceph_assert(onfinish != NULL);
if (oset->objects.empty()) {
ldout(cct, 10) << "flush_set on " << oset << " dne" << dendl;
onfinish->complete(0);
return true;
}
ldout(cct, 10) << "flush_set " << oset << dendl;
// we'll need to wait for all objects to flush!
C_GatherBuilder gather(cct);
set<Object*> waitfor_commit;
list<BufferHead*> blist;
Object *last_ob = NULL;
set<BufferHead*, BufferHead::ptr_lt>::const_iterator it, p, q;
// Buffer heads in dirty_or_tx_bh are sorted in ObjectSet/Object/offset
// order. But items in oset->objects are not sorted. So the iterator can
// point to any buffer head in the ObjectSet
BufferHead key(*oset->objects.begin());
it = dirty_or_tx_bh.lower_bound(&key);
p = q = it;
bool backwards = true;
if (it != dirty_or_tx_bh.begin())
--it;
else
backwards = false;
for (; p != dirty_or_tx_bh.end(); p = q) {
++q;
BufferHead *bh = *p;
if (bh->ob->oset != oset)
break;
waitfor_commit.insert(bh->ob);
if (bh->is_dirty()) {
if (scattered_write) {
if (last_ob != bh->ob) {
if (!blist.empty()) {
bh_write_scattered(blist);
blist.clear();
}
last_ob = bh->ob;
}
blist.push_back(bh);
} else {
bh_write(bh, {});
}
}
}
if (backwards) {
for(p = q = it; true; p = q) {
if (q != dirty_or_tx_bh.begin())
--q;
else
backwards = false;
BufferHead *bh = *p;
if (bh->ob->oset != oset)
break;
waitfor_commit.insert(bh->ob);
if (bh->is_dirty()) {
if (scattered_write) {
if (last_ob != bh->ob) {
if (!blist.empty()) {
bh_write_scattered(blist);
blist.clear();
}
last_ob = bh->ob;
}
blist.push_front(bh);
} else {
bh_write(bh, {});
}
}
if (!backwards)
break;
}
}
if (scattered_write && !blist.empty())
bh_write_scattered(blist);
for (set<Object*>::iterator i = waitfor_commit.begin();
i != waitfor_commit.end(); ++i) {
Object *ob = *i;
// we'll need to gather...
ldout(cct, 10) << "flush_set " << oset << " will wait for ack tid "
<< ob->last_write_tid << " on " << *ob << dendl;
ob->waitfor_commit[ob->last_write_tid].push_back(gather.new_sub());
}
return _flush_set_finish(&gather, onfinish);
}
// flush. non-blocking, takes callback.
// returns true if already flushed
bool ObjectCacher::flush_set(ObjectSet *oset, vector<ObjectExtent>& exv,
ZTracer::Trace *trace, Context *onfinish)
{
ceph_assert(ceph_mutex_is_locked(lock));
ceph_assert(trace != nullptr);
ceph_assert(onfinish != NULL);
if (oset->objects.empty()) {
ldout(cct, 10) << "flush_set on " << oset << " dne" << dendl;
onfinish->complete(0);
return true;
}
ldout(cct, 10) << "flush_set " << oset << " on " << exv.size()
<< " ObjectExtents" << dendl;
// we'll need to wait for all objects to flush!
C_GatherBuilder gather(cct);
for (vector<ObjectExtent>::iterator p = exv.begin();
p != exv.end();
++p) {
ObjectExtent &ex = *p;
sobject_t soid(ex.oid, CEPH_NOSNAP);
if (objects[oset->poolid].count(soid) == 0)
continue;
Object *ob = objects[oset->poolid][soid];
ldout(cct, 20) << "flush_set " << oset << " ex " << ex << " ob " << soid
<< " " << ob << dendl;
if (!flush(ob, ex.offset, ex.length, trace)) {
// we'll need to gather...
ldout(cct, 10) << "flush_set " << oset << " will wait for ack tid "
<< ob->last_write_tid << " on " << *ob << dendl;
ob->waitfor_commit[ob->last_write_tid].push_back(gather.new_sub());
}
}
return _flush_set_finish(&gather, onfinish);
}
// flush all dirty data. non-blocking, takes callback.
// returns true if already flushed
bool ObjectCacher::flush_all(Context *onfinish)
{
ceph_assert(ceph_mutex_is_locked(lock));
ceph_assert(onfinish != NULL);
ldout(cct, 10) << "flush_all " << dendl;
// we'll need to wait for all objects to flush!
C_GatherBuilder gather(cct);
set<Object*> waitfor_commit;
list<BufferHead*> blist;
Object *last_ob = NULL;
set<BufferHead*, BufferHead::ptr_lt>::iterator next, it;
next = it = dirty_or_tx_bh.begin();
while (it != dirty_or_tx_bh.end()) {
++next;
BufferHead *bh = *it;
waitfor_commit.insert(bh->ob);
if (bh->is_dirty()) {
if (scattered_write) {
if (last_ob != bh->ob) {
if (!blist.empty()) {
bh_write_scattered(blist);
blist.clear();
}
last_ob = bh->ob;
}
blist.push_back(bh);
} else {
bh_write(bh, {});
}
}
it = next;
}
if (scattered_write && !blist.empty())
bh_write_scattered(blist);
for (set<Object*>::iterator i = waitfor_commit.begin();
i != waitfor_commit.end();
++i) {
Object *ob = *i;
// we'll need to gather...
ldout(cct, 10) << "flush_all will wait for ack tid "
<< ob->last_write_tid << " on " << *ob << dendl;
ob->waitfor_commit[ob->last_write_tid].push_back(gather.new_sub());
}
return _flush_set_finish(&gather, onfinish);
}
void ObjectCacher::purge_set(ObjectSet *oset)
{
ceph_assert(ceph_mutex_is_locked(lock));
if (oset->objects.empty()) {
ldout(cct, 10) << "purge_set on " << oset << " dne" << dendl;
return;
}
ldout(cct, 10) << "purge_set " << oset << dendl;
const bool were_dirty = oset->dirty_or_tx > 0;
for (xlist<Object*>::iterator i = oset->objects.begin();
!i.end(); ++i) {
Object *ob = *i;
purge(ob);
}
// Although we have purged rather than flushed, caller should still
// drop any resources associate with dirty data.
ceph_assert(oset->dirty_or_tx == 0);
if (flush_set_callback && were_dirty) {
flush_set_callback(flush_set_callback_arg, oset);
}
}
loff_t ObjectCacher::release(Object *ob)
{
ceph_assert(ceph_mutex_is_locked(lock));
list<BufferHead*> clean;
loff_t o_unclean = 0;
for (map<loff_t,BufferHead*>::iterator p = ob->data.begin();
p != ob->data.end();
++p) {
BufferHead *bh = p->second;
if (bh->is_clean() || bh->is_zero() || bh->is_error())
clean.push_back(bh);
else
o_unclean += bh->length();
}
for (list<BufferHead*>::iterator p = clean.begin();
p != clean.end();
++p) {
bh_remove(ob, *p);
delete *p;
}
if (ob->can_close()) {
ldout(cct, 10) << "release trimming " << *ob << dendl;
close_object(ob);
ceph_assert(o_unclean == 0);
return 0;
}
if (ob->complete) {
ldout(cct, 10) << "release clearing complete on " << *ob << dendl;
ob->complete = false;
}
if (!ob->exists) {
ldout(cct, 10) << "release setting exists on " << *ob << dendl;
ob->exists = true;
}
return o_unclean;
}
loff_t ObjectCacher::release_set(ObjectSet *oset)
{
ceph_assert(ceph_mutex_is_locked(lock));
// return # bytes not clean (and thus not released).
loff_t unclean = 0;
if (oset->objects.empty()) {
ldout(cct, 10) << "release_set on " << oset << " dne" << dendl;
return 0;
}
ldout(cct, 10) << "release_set " << oset << dendl;
xlist<Object*>::iterator q;
for (xlist<Object*>::iterator p = oset->objects.begin();
!p.end(); ) {
q = p;
++q;
Object *ob = *p;
loff_t o_unclean = release(ob);
unclean += o_unclean;
if (o_unclean)
ldout(cct, 10) << "release_set " << oset << " " << *ob
<< " has " << o_unclean << " bytes left"
<< dendl;
p = q;
}
if (unclean) {
ldout(cct, 10) << "release_set " << oset
<< ", " << unclean << " bytes left" << dendl;
}
return unclean;
}
uint64_t ObjectCacher::release_all()
{
ceph_assert(ceph_mutex_is_locked(lock));
ldout(cct, 10) << "release_all" << dendl;
uint64_t unclean = 0;
vector<ceph::unordered_map<sobject_t, Object*> >::iterator i
= objects.begin();
while (i != objects.end()) {
ceph::unordered_map<sobject_t, Object*>::iterator p = i->begin();
while (p != i->end()) {
ceph::unordered_map<sobject_t, Object*>::iterator n = p;
++n;
Object *ob = p->second;
loff_t o_unclean = release(ob);
unclean += o_unclean;
if (o_unclean)
ldout(cct, 10) << "release_all " << *ob
<< " has " << o_unclean << " bytes left"
<< dendl;
p = n;
}
++i;
}
if (unclean) {
ldout(cct, 10) << "release_all unclean " << unclean << " bytes left"
<< dendl;
}
return unclean;
}
void ObjectCacher::clear_nonexistence(ObjectSet *oset)
{
ceph_assert(ceph_mutex_is_locked(lock));
ldout(cct, 10) << "clear_nonexistence() " << oset << dendl;
for (xlist<Object*>::iterator p = oset->objects.begin();
!p.end(); ++p) {
Object *ob = *p;
if (!ob->exists) {
ldout(cct, 10) << " setting exists and complete on " << *ob << dendl;
ob->exists = true;
ob->complete = false;
}
for (xlist<C_ReadFinish*>::iterator q = ob->reads.begin();
!q.end(); ++q) {
C_ReadFinish *comp = *q;
comp->distrust_enoent();
}
}
}
/**
* discard object extents from an ObjectSet by removing the objects in
* exls from the in-memory oset.
*/
void ObjectCacher::discard_set(ObjectSet *oset, const vector<ObjectExtent>& exls)
{
ceph_assert(ceph_mutex_is_locked(lock));
bool was_dirty = oset->dirty_or_tx > 0;
_discard(oset, exls, nullptr);
_discard_finish(oset, was_dirty, nullptr);
}
/**
* discard object extents from an ObjectSet by removing the objects in
* exls from the in-memory oset. If the bh is in TX state, the discard
* will wait for the write to commit prior to invoking on_finish.
*/
void ObjectCacher::discard_writeback(ObjectSet *oset,
const vector<ObjectExtent>& exls,
Context* on_finish)
{
ceph_assert(ceph_mutex_is_locked(lock));
bool was_dirty = oset->dirty_or_tx > 0;
C_GatherBuilder gather(cct);
_discard(oset, exls, &gather);
if (gather.has_subs()) {
bool flushed = was_dirty && oset->dirty_or_tx == 0;
gather.set_finisher(new LambdaContext(
[this, oset, flushed, on_finish](int) {
ceph_assert(ceph_mutex_is_locked(lock));
if (flushed && flush_set_callback)
flush_set_callback(flush_set_callback_arg, oset);
if (on_finish)
on_finish->complete(0);
}));
gather.activate();
return;
}
_discard_finish(oset, was_dirty, on_finish);
}
void ObjectCacher::_discard(ObjectSet *oset, const vector<ObjectExtent>& exls,
C_GatherBuilder* gather)
{
if (oset->objects.empty()) {
ldout(cct, 10) << __func__ << " on " << oset << " dne" << dendl;
return;
}
ldout(cct, 10) << __func__ << " " << oset << dendl;
for (auto& ex : exls) {
ldout(cct, 10) << __func__ << " " << oset << " ex " << ex << dendl;
sobject_t soid(ex.oid, CEPH_NOSNAP);
if (objects[oset->poolid].count(soid) == 0)
continue;
Object *ob = objects[oset->poolid][soid];
ob->discard(ex.offset, ex.length, gather);
}
}
void ObjectCacher::_discard_finish(ObjectSet *oset, bool was_dirty,
Context* on_finish)
{
ceph_assert(ceph_mutex_is_locked(lock));
// did we truncate off dirty data?
if (flush_set_callback && was_dirty && oset->dirty_or_tx == 0) {
flush_set_callback(flush_set_callback_arg, oset);
}
// notify that in-flight writeback has completed
if (on_finish != nullptr) {
on_finish->complete(0);
}
}
void ObjectCacher::verify_stats() const
{
ceph_assert(ceph_mutex_is_locked(lock));
ldout(cct, 10) << "verify_stats" << dendl;
loff_t clean = 0, zero = 0, dirty = 0, rx = 0, tx = 0, missing = 0,
error = 0;
for (vector<ceph::unordered_map<sobject_t, Object*> >::const_iterator i
= objects.begin();
i != objects.end();
++i) {
for (ceph::unordered_map<sobject_t, Object*>::const_iterator p
= i->begin();
p != i->end();
++p) {
Object *ob = p->second;
for (map<loff_t, BufferHead*>::const_iterator q = ob->data.begin();
q != ob->data.end();
++q) {
BufferHead *bh = q->second;
switch (bh->get_state()) {
case BufferHead::STATE_MISSING:
missing += bh->length();
break;
case BufferHead::STATE_CLEAN:
clean += bh->length();
break;
case BufferHead::STATE_ZERO:
zero += bh->length();
break;
case BufferHead::STATE_DIRTY:
dirty += bh->length();
break;
case BufferHead::STATE_TX:
tx += bh->length();
break;
case BufferHead::STATE_RX:
rx += bh->length();
break;
case BufferHead::STATE_ERROR:
error += bh->length();
break;
default:
ceph_abort();
}
}
}
}
ldout(cct, 10) << " clean " << clean << " rx " << rx << " tx " << tx
<< " dirty " << dirty << " missing " << missing
<< " error " << error << dendl;
ceph_assert(clean == stat_clean);
ceph_assert(rx == stat_rx);
ceph_assert(tx == stat_tx);
ceph_assert(dirty == stat_dirty);
ceph_assert(missing == stat_missing);
ceph_assert(zero == stat_zero);
ceph_assert(error == stat_error);
}
void ObjectCacher::bh_stat_add(BufferHead *bh)
{
ceph_assert(ceph_mutex_is_locked(lock));
switch (bh->get_state()) {
case BufferHead::STATE_MISSING:
stat_missing += bh->length();
break;
case BufferHead::STATE_CLEAN:
stat_clean += bh->length();
break;
case BufferHead::STATE_ZERO:
stat_zero += bh->length();
break;
case BufferHead::STATE_DIRTY:
stat_dirty += bh->length();
bh->ob->dirty_or_tx += bh->length();
bh->ob->oset->dirty_or_tx += bh->length();
break;
case BufferHead::STATE_TX:
stat_tx += bh->length();
bh->ob->dirty_or_tx += bh->length();
bh->ob->oset->dirty_or_tx += bh->length();
break;
case BufferHead::STATE_RX:
stat_rx += bh->length();
break;
case BufferHead::STATE_ERROR:
stat_error += bh->length();
break;
default:
ceph_abort_msg("bh_stat_add: invalid bufferhead state");
}
if (get_stat_dirty_waiting() > 0)
stat_cond.notify_all();
}
void ObjectCacher::bh_stat_sub(BufferHead *bh)
{
ceph_assert(ceph_mutex_is_locked(lock));
switch (bh->get_state()) {
case BufferHead::STATE_MISSING:
stat_missing -= bh->length();
break;
case BufferHead::STATE_CLEAN:
stat_clean -= bh->length();
break;
case BufferHead::STATE_ZERO:
stat_zero -= bh->length();
break;
case BufferHead::STATE_DIRTY:
stat_dirty -= bh->length();
bh->ob->dirty_or_tx -= bh->length();
bh->ob->oset->dirty_or_tx -= bh->length();
break;
case BufferHead::STATE_TX:
stat_tx -= bh->length();
bh->ob->dirty_or_tx -= bh->length();
bh->ob->oset->dirty_or_tx -= bh->length();
break;
case BufferHead::STATE_RX:
stat_rx -= bh->length();
break;
case BufferHead::STATE_ERROR:
stat_error -= bh->length();
break;
default:
ceph_abort_msg("bh_stat_sub: invalid bufferhead state");
}
}
void ObjectCacher::bh_set_state(BufferHead *bh, int s)
{
ceph_assert(ceph_mutex_is_locked(lock));
int state = bh->get_state();
// move between lru lists?
if (s == BufferHead::STATE_DIRTY && state != BufferHead::STATE_DIRTY) {
bh_lru_rest.lru_remove(bh);
bh_lru_dirty.lru_insert_top(bh);
} else if (s != BufferHead::STATE_DIRTY &&state == BufferHead::STATE_DIRTY) {
bh_lru_dirty.lru_remove(bh);
if (bh->get_dontneed())
bh_lru_rest.lru_insert_bot(bh);
else
bh_lru_rest.lru_insert_top(bh);
}
if ((s == BufferHead::STATE_TX ||
s == BufferHead::STATE_DIRTY) &&
state != BufferHead::STATE_TX &&
state != BufferHead::STATE_DIRTY) {
dirty_or_tx_bh.insert(bh);
} else if ((state == BufferHead::STATE_TX ||
state == BufferHead::STATE_DIRTY) &&
s != BufferHead::STATE_TX &&
s != BufferHead::STATE_DIRTY) {
dirty_or_tx_bh.erase(bh);
}
if (s != BufferHead::STATE_ERROR &&
state == BufferHead::STATE_ERROR) {
bh->error = 0;
}
// set state
bh_stat_sub(bh);
bh->set_state(s);
bh_stat_add(bh);
}
void ObjectCacher::bh_add(Object *ob, BufferHead *bh)
{
ceph_assert(ceph_mutex_is_locked(lock));
ldout(cct, 30) << "bh_add " << *ob << " " << *bh << dendl;
ob->add_bh(bh);
if (bh->is_dirty()) {
bh_lru_dirty.lru_insert_top(bh);
dirty_or_tx_bh.insert(bh);
} else {
if (bh->get_dontneed())
bh_lru_rest.lru_insert_bot(bh);
else
bh_lru_rest.lru_insert_top(bh);
}
if (bh->is_tx()) {
dirty_or_tx_bh.insert(bh);
}
bh_stat_add(bh);
}
void ObjectCacher::bh_remove(Object *ob, BufferHead *bh)
{
ceph_assert(ceph_mutex_is_locked(lock));
ceph_assert(bh->get_journal_tid() == 0);
ldout(cct, 30) << "bh_remove " << *ob << " " << *bh << dendl;
ob->remove_bh(bh);
if (bh->is_dirty()) {
bh_lru_dirty.lru_remove(bh);
dirty_or_tx_bh.erase(bh);
} else {
bh_lru_rest.lru_remove(bh);
}
if (bh->is_tx()) {
dirty_or_tx_bh.erase(bh);
}
bh_stat_sub(bh);
if (get_stat_dirty_waiting() > 0)
stat_cond.notify_all();
}
| 79,577 | 27.339744 | 97 | cc |
null | ceph-main/src/osdc/ObjectCacher.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_OBJECTCACHER_H
#define CEPH_OBJECTCACHER_H
#include "include/types.h"
#include "include/lru.h"
#include "include/Context.h"
#include "include/xlist.h"
#include "include/common_fwd.h"
#include "common/Cond.h"
#include "common/Finisher.h"
#include "common/Thread.h"
#include "common/zipkin_trace.h"
#include "Objecter.h"
#include "Striper.h"
class WritebackHandler;
enum {
l_objectcacher_first = 25000,
l_objectcacher_cache_ops_hit, // ops we satisfy completely from cache
l_objectcacher_cache_ops_miss, // ops we don't satisfy completely from cache
l_objectcacher_cache_bytes_hit, // bytes read directly from cache
l_objectcacher_cache_bytes_miss, // bytes we couldn't read directly
// from cache
l_objectcacher_data_read, // total bytes read out
l_objectcacher_data_written, // bytes written to cache
l_objectcacher_data_flushed, // bytes flushed to WritebackHandler
l_objectcacher_overwritten_in_flush, // bytes overwritten while
// flushing is in progress
l_objectcacher_write_ops_blocked, // total write ops we delayed due
// to dirty limits
l_objectcacher_write_bytes_blocked, // total number of write bytes
// we delayed due to dirty
// limits
l_objectcacher_write_time_blocked, // total time in seconds spent
// blocking a write due to dirty
// limits
l_objectcacher_last,
};
class ObjectCacher {
PerfCounters *perfcounter;
public:
CephContext *cct;
class Object;
struct ObjectSet;
class C_ReadFinish;
typedef void (*flush_set_callback_t) (void *p, ObjectSet *oset);
// read scatter/gather
struct OSDRead {
std::vector<ObjectExtent> extents;
snapid_t snap;
ceph::buffer::list *bl;
int fadvise_flags;
OSDRead(snapid_t s, ceph::buffer::list *b, int f)
: snap(s), bl(b), fadvise_flags(f) {}
};
OSDRead *prepare_read(snapid_t snap, ceph::buffer::list *b, int f) const {
return new OSDRead(snap, b, f);
}
// write scatter/gather
struct OSDWrite {
std::vector<ObjectExtent> extents;
SnapContext snapc;
ceph::buffer::list bl;
ceph::real_time mtime;
int fadvise_flags;
ceph_tid_t journal_tid;
OSDWrite(const SnapContext& sc, const ceph::buffer::list& b, ceph::real_time mt,
int f, ceph_tid_t _journal_tid)
: snapc(sc), bl(b), mtime(mt), fadvise_flags(f),
journal_tid(_journal_tid) {}
};
OSDWrite *prepare_write(const SnapContext& sc,
const ceph::buffer::list &b,
ceph::real_time mt,
int f,
ceph_tid_t journal_tid) const {
return new OSDWrite(sc, b, mt, f, journal_tid);
}
// ******* BufferHead *********
class BufferHead : public LRUObject {
public:
// states
static const int STATE_MISSING = 0;
static const int STATE_CLEAN = 1;
static const int STATE_ZERO = 2; // NOTE: these are *clean* zeros
static const int STATE_DIRTY = 3;
static const int STATE_RX = 4;
static const int STATE_TX = 5;
static const int STATE_ERROR = 6; // a read error occurred
private:
// my fields
int state;
int ref;
struct {
loff_t start, length; // bh extent in object
} ex;
bool dontneed; //indicate bh don't need by anyone
bool nocache; //indicate bh don't need by this caller
public:
Object *ob;
ceph::buffer::list bl;
ceph_tid_t last_write_tid; // version of bh (if non-zero)
ceph_tid_t last_read_tid; // tid of last read op (if any)
ceph::real_time last_write;
SnapContext snapc;
ceph_tid_t journal_tid;
int error; // holds return value for failed reads
std::map<loff_t, std::list<Context*> > waitfor_read;
// cons
explicit BufferHead(Object *o) :
state(STATE_MISSING),
ref(0),
dontneed(false),
nocache(false),
ob(o),
last_write_tid(0),
last_read_tid(0),
journal_tid(0),
error(0) {
ex.start = ex.length = 0;
}
// extent
loff_t start() const { return ex.start; }
void set_start(loff_t s) { ex.start = s; }
loff_t length() const { return ex.length; }
void set_length(loff_t l) { ex.length = l; }
loff_t end() const { return ex.start + ex.length; }
loff_t last() const { return end() - 1; }
// states
void set_state(int s) {
if (s == STATE_RX || s == STATE_TX) get();
if (state == STATE_RX || state == STATE_TX) put();
state = s;
}
int get_state() const { return state; }
inline int get_error() const {
return error;
}
inline void set_error(int _error) {
error = _error;
}
inline ceph_tid_t get_journal_tid() const {
return journal_tid;
}
inline void set_journal_tid(ceph_tid_t _journal_tid) {
journal_tid = _journal_tid;
}
bool is_missing() const { return state == STATE_MISSING; }
bool is_dirty() const { return state == STATE_DIRTY; }
bool is_clean() const { return state == STATE_CLEAN; }
bool is_zero() const { return state == STATE_ZERO; }
bool is_tx() const { return state == STATE_TX; }
bool is_rx() const { return state == STATE_RX; }
bool is_error() const { return state == STATE_ERROR; }
// reference counting
int get() {
ceph_assert(ref >= 0);
if (ref == 0) lru_pin();
return ++ref;
}
int put() {
ceph_assert(ref > 0);
if (ref == 1) lru_unpin();
--ref;
return ref;
}
void set_dontneed(bool v) {
dontneed = v;
}
bool get_dontneed() const {
return dontneed;
}
void set_nocache(bool v) {
nocache = v;
}
bool get_nocache() const {
return nocache;
}
inline bool can_merge_journal(BufferHead *bh) const {
return (get_journal_tid() == bh->get_journal_tid());
}
struct ptr_lt {
bool operator()(const BufferHead* l, const BufferHead* r) const {
const Object *lob = l->ob;
const Object *rob = r->ob;
const ObjectSet *loset = lob->oset;
const ObjectSet *roset = rob->oset;
if (loset != roset)
return loset < roset;
if (lob != rob)
return lob < rob;
if (l->start() != r->start())
return l->start() < r->start();
return l < r;
}
};
};
// ******* Object *********
class Object : public LRUObject {
private:
// ObjectCacher::Object fields
int ref;
ObjectCacher *oc;
sobject_t oid;
friend struct ObjectSet;
public:
uint64_t object_no;
ObjectSet *oset;
xlist<Object*>::item set_item;
object_locator_t oloc;
uint64_t truncate_size, truncate_seq;
bool complete;
bool exists;
std::map<loff_t, BufferHead*> data;
ceph_tid_t last_write_tid; // version of bh (if non-zero)
ceph_tid_t last_commit_tid; // last update committed.
int dirty_or_tx;
std::map< ceph_tid_t, std::list<Context*> > waitfor_commit;
xlist<C_ReadFinish*> reads;
Object(const Object&) = delete;
Object& operator=(const Object&) = delete;
Object(ObjectCacher *_oc, sobject_t o, uint64_t ono, ObjectSet *os,
object_locator_t& l, uint64_t ts, uint64_t tq) :
ref(0),
oc(_oc),
oid(o), object_no(ono), oset(os), set_item(this), oloc(l),
truncate_size(ts), truncate_seq(tq),
complete(false), exists(true),
last_write_tid(0), last_commit_tid(0),
dirty_or_tx(0) {
// add to set
os->objects.push_back(&set_item);
}
~Object() {
reads.clear();
ceph_assert(ref == 0);
ceph_assert(data.empty());
ceph_assert(dirty_or_tx == 0);
set_item.remove_myself();
}
sobject_t get_soid() const { return oid; }
object_t get_oid() { return oid.oid; }
snapid_t get_snap() { return oid.snap; }
ObjectSet *get_object_set() const { return oset; }
std::string get_namespace() { return oloc.nspace; }
uint64_t get_object_number() const { return object_no; }
const object_locator_t& get_oloc() const { return oloc; }
void set_object_locator(object_locator_t& l) { oloc = l; }
bool can_close() const {
if (lru_is_expireable()) {
ceph_assert(data.empty());
ceph_assert(waitfor_commit.empty());
return true;
}
return false;
}
/**
* Check buffers and waiters for consistency
* - no overlapping buffers
* - index in map matches BH
* - waiters fall within BH
*/
void audit_buffers();
/**
* find first buffer that includes or follows an offset
*
* @param offset object byte offset
* @return iterator pointing to buffer, or data.end()
*/
std::map<loff_t,BufferHead*>::const_iterator data_lower_bound(loff_t offset) const {
auto p = data.lower_bound(offset);
if (p != data.begin() &&
(p == data.end() || p->first > offset)) {
--p; // might overlap!
if (p->first + p->second->length() <= offset)
++p; // doesn't overlap.
}
return p;
}
// bh
// add to my map
void add_bh(BufferHead *bh) {
if (data.empty())
get();
ceph_assert(data.count(bh->start()) == 0);
data[bh->start()] = bh;
}
void remove_bh(BufferHead *bh) {
ceph_assert(data.count(bh->start()));
data.erase(bh->start());
if (data.empty())
put();
}
bool is_empty() const { return data.empty(); }
// mid-level
BufferHead *split(BufferHead *bh, loff_t off);
void merge_left(BufferHead *left, BufferHead *right);
bool can_merge_bh(BufferHead *left, BufferHead *right);
void try_merge_bh(BufferHead *bh);
void maybe_rebuild_buffer(BufferHead *bh);
bool is_cached(loff_t off, loff_t len) const;
bool include_all_cached_data(loff_t off, loff_t len);
int map_read(ObjectExtent &ex,
std::map<loff_t, BufferHead*>& hits,
std::map<loff_t, BufferHead*>& missing,
std::map<loff_t, BufferHead*>& rx,
std::map<loff_t, BufferHead*>& errors);
BufferHead *map_write(ObjectExtent &ex, ceph_tid_t tid);
void replace_journal_tid(BufferHead *bh, ceph_tid_t tid);
void truncate(loff_t s);
void discard(loff_t off, loff_t len, C_GatherBuilder* commit_gather);
// reference counting
int get() {
ceph_assert(ref >= 0);
if (ref == 0) lru_pin();
return ++ref;
}
int put() {
ceph_assert(ref > 0);
if (ref == 1) lru_unpin();
--ref;
return ref;
}
};
struct ObjectSet {
void *parent;
inodeno_t ino;
uint64_t truncate_seq, truncate_size;
int64_t poolid;
xlist<Object*> objects;
int dirty_or_tx;
bool return_enoent;
ObjectSet(void *p, int64_t _poolid, inodeno_t i)
: parent(p), ino(i), truncate_seq(0),
truncate_size(0), poolid(_poolid), dirty_or_tx(0),
return_enoent(false) {}
};
// ******* ObjectCacher *********
// ObjectCacher fields
private:
WritebackHandler& writeback_handler;
bool scattered_write;
std::string name;
ceph::mutex& lock;
uint64_t max_dirty, target_dirty, max_size, max_objects;
ceph::timespan max_dirty_age;
bool block_writes_upfront;
ZTracer::Endpoint trace_endpoint;
flush_set_callback_t flush_set_callback;
void *flush_set_callback_arg;
// indexed by pool_id
std::vector<ceph::unordered_map<sobject_t, Object*> > objects;
std::list<Context*> waitfor_read;
ceph_tid_t last_read_tid;
std::set<BufferHead*, BufferHead::ptr_lt> dirty_or_tx_bh;
LRU bh_lru_dirty, bh_lru_rest;
LRU ob_lru;
ceph::condition_variable flusher_cond;
bool flusher_stop;
void flusher_entry();
class FlusherThread : public Thread {
ObjectCacher *oc;
public:
explicit FlusherThread(ObjectCacher *o) : oc(o) {}
void *entry() override {
oc->flusher_entry();
return 0;
}
} flusher_thread;
Finisher finisher;
// objects
Object *get_object_maybe(sobject_t oid, object_locator_t &l) {
// have it?
if (((uint32_t)l.pool < objects.size()) &&
(objects[l.pool].count(oid)))
return objects[l.pool][oid];
return NULL;
}
Object *get_object(sobject_t oid, uint64_t object_no, ObjectSet *oset,
object_locator_t &l, uint64_t truncate_size,
uint64_t truncate_seq);
void close_object(Object *ob);
// bh stats
ceph::condition_variable stat_cond;
loff_t stat_clean;
loff_t stat_zero;
loff_t stat_dirty;
loff_t stat_rx;
loff_t stat_tx;
loff_t stat_missing;
loff_t stat_error;
loff_t stat_dirty_waiting; // bytes that writers are waiting on to write
size_t stat_nr_dirty_waiters;
void verify_stats() const;
void bh_stat_add(BufferHead *bh);
void bh_stat_sub(BufferHead *bh);
loff_t get_stat_tx() const { return stat_tx; }
loff_t get_stat_rx() const { return stat_rx; }
loff_t get_stat_dirty() const { return stat_dirty; }
loff_t get_stat_clean() const { return stat_clean; }
loff_t get_stat_zero() const { return stat_zero; }
loff_t get_stat_dirty_waiting() const { return stat_dirty_waiting; }
size_t get_stat_nr_dirty_waiters() const { return stat_nr_dirty_waiters; }
void touch_bh(BufferHead *bh) {
if (bh->is_dirty())
bh_lru_dirty.lru_touch(bh);
else
bh_lru_rest.lru_touch(bh);
bh->set_dontneed(false);
bh->set_nocache(false);
touch_ob(bh->ob);
}
void touch_ob(Object *ob) {
ob_lru.lru_touch(ob);
}
void bottouch_ob(Object *ob) {
ob_lru.lru_bottouch(ob);
}
// bh states
void bh_set_state(BufferHead *bh, int s);
void copy_bh_state(BufferHead *bh1, BufferHead *bh2) {
bh_set_state(bh2, bh1->get_state());
}
void mark_missing(BufferHead *bh) {
bh_set_state(bh,BufferHead::STATE_MISSING);
}
void mark_clean(BufferHead *bh) {
bh_set_state(bh, BufferHead::STATE_CLEAN);
}
void mark_zero(BufferHead *bh) {
bh_set_state(bh, BufferHead::STATE_ZERO);
}
void mark_rx(BufferHead *bh) {
bh_set_state(bh, BufferHead::STATE_RX);
}
void mark_tx(BufferHead *bh) {
bh_set_state(bh, BufferHead::STATE_TX); }
void mark_error(BufferHead *bh) {
bh_set_state(bh, BufferHead::STATE_ERROR);
}
void mark_dirty(BufferHead *bh) {
bh_set_state(bh, BufferHead::STATE_DIRTY);
bh_lru_dirty.lru_touch(bh);
//bh->set_dirty_stamp(ceph_clock_now());
}
void bh_add(Object *ob, BufferHead *bh);
void bh_remove(Object *ob, BufferHead *bh);
// io
void bh_read(BufferHead *bh, int op_flags,
const ZTracer::Trace &parent_trace);
void bh_write(BufferHead *bh, const ZTracer::Trace &parent_trace);
void bh_write_scattered(std::list<BufferHead*>& blist);
void bh_write_adjacencies(BufferHead *bh, ceph::real_time cutoff,
int64_t *amount, int *max_count);
void trim();
void flush(ZTracer::Trace *trace, loff_t amount=0);
/**
* flush a range of buffers
*
* Flush any buffers that intersect the specified extent. If len==0,
* flush *all* buffers for the object.
*
* @param o object
* @param off start offset
* @param len extent length, or 0 for entire object
* @return true if object was already clean/flushed.
*/
bool flush(Object *o, loff_t off, loff_t len,
ZTracer::Trace *trace);
loff_t release(Object *o);
void purge(Object *o);
int64_t reads_outstanding;
ceph::condition_variable read_cond;
int _readx(OSDRead *rd, ObjectSet *oset, Context *onfinish,
bool external_call, ZTracer::Trace *trace);
void retry_waiting_reads();
public:
void bh_read_finish(int64_t poolid, sobject_t oid, ceph_tid_t tid,
loff_t offset, uint64_t length,
ceph::buffer::list &bl, int r,
bool trust_enoent);
void bh_write_commit(int64_t poolid, sobject_t oid,
std::vector<std::pair<loff_t, uint64_t> >& ranges,
ceph_tid_t t, int r);
class C_WriteCommit;
class C_WaitForWrite;
void perf_start();
void perf_stop();
ObjectCacher(CephContext *cct_, std::string name, WritebackHandler& wb, ceph::mutex& l,
flush_set_callback_t flush_callback,
void *flush_callback_arg,
uint64_t max_bytes, uint64_t max_objects,
uint64_t max_dirty, uint64_t target_dirty, double max_age,
bool block_writes_upfront);
~ObjectCacher();
void start() {
flusher_thread.create("flusher");
}
void stop() {
ceph_assert(flusher_thread.is_started());
lock.lock(); // hmm.. watch out for deadlock!
flusher_stop = true;
flusher_cond.notify_all();
lock.unlock();
flusher_thread.join();
}
class C_RetryRead;
// non-blocking. async.
/**
* @note total read size must be <= INT_MAX, since
* the return value is total bytes read
*/
int readx(OSDRead *rd, ObjectSet *oset, Context *onfinish,
ZTracer::Trace *parent_trace = nullptr);
int writex(OSDWrite *wr, ObjectSet *oset, Context *onfreespace,
ZTracer::Trace *parent_trace = nullptr);
bool is_cached(ObjectSet *oset, std::vector<ObjectExtent>& extents,
snapid_t snapid);
private:
// write blocking
int _wait_for_write(OSDWrite *wr, uint64_t len, ObjectSet *oset,
ZTracer::Trace *trace, Context *onfreespace);
void _maybe_wait_for_writeback(uint64_t len, ZTracer::Trace *trace);
bool _flush_set_finish(C_GatherBuilder *gather, Context *onfinish);
void _discard(ObjectSet *oset, const std::vector<ObjectExtent>& exls,
C_GatherBuilder* gather);
void _discard_finish(ObjectSet *oset, bool was_dirty, Context* on_finish);
public:
bool set_is_empty(ObjectSet *oset);
bool set_is_cached(ObjectSet *oset);
bool set_is_dirty_or_committing(ObjectSet *oset);
bool flush_set(ObjectSet *oset, Context *onfinish=0);
bool flush_set(ObjectSet *oset, std::vector<ObjectExtent>& ex,
ZTracer::Trace *trace, Context *onfinish = 0);
bool flush_all(Context *onfinish = 0);
void purge_set(ObjectSet *oset);
// returns # of bytes not released (ie non-clean)
loff_t release_set(ObjectSet *oset);
uint64_t release_all();
void discard_set(ObjectSet *oset, const std::vector<ObjectExtent>& ex);
void discard_writeback(ObjectSet *oset, const std::vector<ObjectExtent>& ex,
Context* on_finish);
/**
* Retry any in-flight reads that get -ENOENT instead of marking
* them zero, and get rid of any cached -ENOENTs.
* After this is called and the cache's lock is unlocked,
* any new requests will treat -ENOENT normally.
*/
void clear_nonexistence(ObjectSet *oset);
// cache sizes
void set_max_dirty(uint64_t v) {
max_dirty = v;
}
void set_target_dirty(int64_t v) {
target_dirty = v;
}
void set_max_size(int64_t v) {
max_size = v;
}
void set_max_dirty_age(double a) {
max_dirty_age = ceph::make_timespan(a);
}
void set_max_objects(int64_t v) {
max_objects = v;
}
// file functions
/*** async+caching (non-blocking) file interface ***/
int file_is_cached(ObjectSet *oset, file_layout_t *layout,
snapid_t snapid, loff_t offset, uint64_t len) {
std::vector<ObjectExtent> extents;
Striper::file_to_extents(cct, oset->ino, layout, offset, len,
oset->truncate_size, extents);
return is_cached(oset, extents, snapid);
}
int file_read(ObjectSet *oset, file_layout_t *layout, snapid_t snapid,
loff_t offset, uint64_t len, ceph::buffer::list *bl, int flags,
Context *onfinish) {
OSDRead *rd = prepare_read(snapid, bl, flags);
Striper::file_to_extents(cct, oset->ino, layout, offset, len,
oset->truncate_size, rd->extents);
return readx(rd, oset, onfinish);
}
int file_write(ObjectSet *oset, file_layout_t *layout,
const SnapContext& snapc, loff_t offset, uint64_t len,
ceph::buffer::list& bl, ceph::real_time mtime, int flags) {
OSDWrite *wr = prepare_write(snapc, bl, mtime, flags, 0);
Striper::file_to_extents(cct, oset->ino, layout, offset, len,
oset->truncate_size, wr->extents);
return writex(wr, oset, nullptr);
}
bool file_flush(ObjectSet *oset, file_layout_t *layout,
const SnapContext& snapc, loff_t offset, uint64_t len,
Context *onfinish) {
std::vector<ObjectExtent> extents;
Striper::file_to_extents(cct, oset->ino, layout, offset, len,
oset->truncate_size, extents);
ZTracer::Trace trace;
return flush_set(oset, extents, &trace, onfinish);
}
};
inline std::ostream& operator<<(std::ostream &out,
const ObjectCacher::BufferHead &bh)
{
out << "bh[ " << &bh << " "
<< bh.start() << "~" << bh.length()
<< " " << bh.ob
<< " (" << bh.bl.length() << ")"
<< " v " << bh.last_write_tid;
if (bh.get_journal_tid() != 0) {
out << " j " << bh.get_journal_tid();
}
if (bh.is_tx()) out << " tx";
if (bh.is_rx()) out << " rx";
if (bh.is_dirty()) out << " dirty";
if (bh.is_clean()) out << " clean";
if (bh.is_zero()) out << " zero";
if (bh.is_missing()) out << " missing";
if (bh.bl.length() > 0) out << " firstbyte=" << (int)bh.bl[0];
if (bh.error) out << " error=" << bh.error;
out << "]";
out << " waiters = {";
for (auto it = bh.waitfor_read.begin(); it != bh.waitfor_read.end(); ++it) {
out << " " << it->first << "->[";
for (auto lit = it->second.begin();
lit != it->second.end(); ++lit) {
out << *lit << ", ";
}
out << "]";
}
out << "}";
return out;
}
inline std::ostream& operator<<(std::ostream &out,
const ObjectCacher::ObjectSet &os)
{
return out << "objectset[" << os.ino
<< " ts " << os.truncate_seq << "/" << os.truncate_size
<< " objects " << os.objects.size()
<< " dirty_or_tx " << os.dirty_or_tx
<< "]";
}
inline std::ostream& operator<<(std::ostream &out,
const ObjectCacher::Object &ob)
{
out << "object["
<< ob.get_soid() << " oset " << ob.oset << std::dec
<< " wr " << ob.last_write_tid << "/" << ob.last_commit_tid;
if (ob.complete)
out << " COMPLETE";
if (!ob.exists)
out << " !EXISTS";
out << "]";
return out;
}
#endif
| 21,970 | 27.095908 | 89 | h |
null | ceph-main/src/osdc/Objecter.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include <algorithm>
#include <cerrno>
#include "Objecter.h"
#include "osd/OSDMap.h"
#include "osd/error_code.h"
#include "Filer.h"
#include "mon/MonClient.h"
#include "mon/error_code.h"
#include "msg/Messenger.h"
#include "msg/Message.h"
#include "messages/MPing.h"
#include "messages/MOSDOp.h"
#include "messages/MOSDOpReply.h"
#include "messages/MOSDBackoff.h"
#include "messages/MOSDMap.h"
#include "messages/MPoolOp.h"
#include "messages/MPoolOpReply.h"
#include "messages/MGetPoolStats.h"
#include "messages/MGetPoolStatsReply.h"
#include "messages/MStatfs.h"
#include "messages/MStatfsReply.h"
#include "messages/MMonCommand.h"
#include "messages/MCommand.h"
#include "messages/MCommandReply.h"
#include "messages/MWatchNotify.h"
#include "common/Cond.h"
#include "common/config.h"
#include "common/perf_counters.h"
#include "common/scrub_types.h"
#include "include/str_list.h"
#include "common/errno.h"
#include "common/EventTrace.h"
#include "common/async/waiter.h"
#include "error_code.h"
using std::list;
using std::make_pair;
using std::map;
using std::ostream;
using std::ostringstream;
using std::pair;
using std::set;
using std::string;
using std::stringstream;
using std::vector;
using ceph::decode;
using ceph::encode;
using ceph::Formatter;
using std::defer_lock;
using std::scoped_lock;
using std::shared_lock;
using std::unique_lock;
using ceph::real_time;
using ceph::real_clock;
using ceph::mono_clock;
using ceph::mono_time;
using ceph::timespan;
using ceph::shunique_lock;
using ceph::acquire_shared;
using ceph::acquire_unique;
namespace bc = boost::container;
namespace bs = boost::system;
namespace ca = ceph::async;
namespace cb = ceph::buffer;
#define dout_subsys ceph_subsys_objecter
#undef dout_prefix
#define dout_prefix *_dout << messenger->get_myname() << ".objecter "
enum {
l_osdc_first = 123200,
l_osdc_op_active,
l_osdc_op_laggy,
l_osdc_op_send,
l_osdc_op_send_bytes,
l_osdc_op_resend,
l_osdc_op_reply,
l_osdc_op_latency,
l_osdc_op_inflight,
l_osdc_oplen_avg,
l_osdc_op,
l_osdc_op_r,
l_osdc_op_w,
l_osdc_op_rmw,
l_osdc_op_pg,
l_osdc_osdop_stat,
l_osdc_osdop_create,
l_osdc_osdop_read,
l_osdc_osdop_write,
l_osdc_osdop_writefull,
l_osdc_osdop_writesame,
l_osdc_osdop_append,
l_osdc_osdop_zero,
l_osdc_osdop_truncate,
l_osdc_osdop_delete,
l_osdc_osdop_mapext,
l_osdc_osdop_sparse_read,
l_osdc_osdop_clonerange,
l_osdc_osdop_getxattr,
l_osdc_osdop_setxattr,
l_osdc_osdop_cmpxattr,
l_osdc_osdop_rmxattr,
l_osdc_osdop_resetxattrs,
l_osdc_osdop_call,
l_osdc_osdop_watch,
l_osdc_osdop_notify,
l_osdc_osdop_src_cmpxattr,
l_osdc_osdop_pgls,
l_osdc_osdop_pgls_filter,
l_osdc_osdop_other,
l_osdc_linger_active,
l_osdc_linger_send,
l_osdc_linger_resend,
l_osdc_linger_ping,
l_osdc_poolop_active,
l_osdc_poolop_send,
l_osdc_poolop_resend,
l_osdc_poolstat_active,
l_osdc_poolstat_send,
l_osdc_poolstat_resend,
l_osdc_statfs_active,
l_osdc_statfs_send,
l_osdc_statfs_resend,
l_osdc_command_active,
l_osdc_command_send,
l_osdc_command_resend,
l_osdc_map_epoch,
l_osdc_map_full,
l_osdc_map_inc,
l_osdc_osd_sessions,
l_osdc_osd_session_open,
l_osdc_osd_session_close,
l_osdc_osd_laggy,
l_osdc_osdop_omap_wr,
l_osdc_osdop_omap_rd,
l_osdc_osdop_omap_del,
l_osdc_last,
};
namespace {
inline bs::error_code osdcode(int r) {
return (r < 0) ? bs::error_code(-r, osd_category()) : bs::error_code();
}
}
// config obs ----------------------------
class Objecter::RequestStateHook : public AdminSocketHook {
Objecter *m_objecter;
public:
explicit RequestStateHook(Objecter *objecter);
int call(std::string_view command, const cmdmap_t& cmdmap,
const bufferlist&,
Formatter *f,
std::ostream& ss,
cb::list& out) override;
};
std::unique_lock<std::mutex> Objecter::OSDSession::get_lock(object_t& oid)
{
if (oid.name.empty())
return {};
static constexpr uint32_t HASH_PRIME = 1021;
uint32_t h = ceph_str_hash_linux(oid.name.c_str(), oid.name.size())
% HASH_PRIME;
return {completion_locks[h % num_locks], std::defer_lock};
}
const char** Objecter::get_tracked_conf_keys() const
{
static const char *config_keys[] = {
"crush_location",
"rados_mon_op_timeout",
"rados_osd_op_timeout",
NULL
};
return config_keys;
}
void Objecter::handle_conf_change(const ConfigProxy& conf,
const std::set <std::string> &changed)
{
if (changed.count("crush_location")) {
update_crush_location();
}
if (changed.count("rados_mon_op_timeout")) {
mon_timeout = conf.get_val<std::chrono::seconds>("rados_mon_op_timeout");
}
if (changed.count("rados_osd_op_timeout")) {
osd_timeout = conf.get_val<std::chrono::seconds>("rados_osd_op_timeout");
}
}
void Objecter::update_crush_location()
{
unique_lock wl(rwlock);
crush_location = cct->crush_location.get_location();
}
// messages ------------------------------
/*
* initialize only internal data structures, don't initiate cluster interaction
*/
void Objecter::init()
{
ceph_assert(!initialized);
if (!logger) {
PerfCountersBuilder pcb(cct, "objecter", l_osdc_first, l_osdc_last);
pcb.add_u64(l_osdc_op_active, "op_active", "Operations active", "actv",
PerfCountersBuilder::PRIO_CRITICAL);
pcb.add_u64(l_osdc_op_laggy, "op_laggy", "Laggy operations");
pcb.add_u64_counter(l_osdc_op_send, "op_send", "Sent operations");
pcb.add_u64_counter(l_osdc_op_send_bytes, "op_send_bytes", "Sent data", NULL, 0, unit_t(UNIT_BYTES));
pcb.add_u64_counter(l_osdc_op_resend, "op_resend", "Resent operations");
pcb.add_u64_counter(l_osdc_op_reply, "op_reply", "Operation reply");
pcb.add_time_avg(l_osdc_op_latency, "op_latency", "Operation latency");
pcb.add_u64(l_osdc_op_inflight, "op_inflight", "Operations in flight");
pcb.add_u64_avg(l_osdc_oplen_avg, "oplen_avg", "Average length of operation vector");
pcb.add_u64_counter(l_osdc_op, "op", "Operations");
pcb.add_u64_counter(l_osdc_op_r, "op_r", "Read operations", "rd",
PerfCountersBuilder::PRIO_CRITICAL);
pcb.add_u64_counter(l_osdc_op_w, "op_w", "Write operations", "wr",
PerfCountersBuilder::PRIO_CRITICAL);
pcb.add_u64_counter(l_osdc_op_rmw, "op_rmw", "Read-modify-write operations",
"rdwr", PerfCountersBuilder::PRIO_INTERESTING);
pcb.add_u64_counter(l_osdc_op_pg, "op_pg", "PG operation");
pcb.add_u64_counter(l_osdc_osdop_stat, "osdop_stat", "Stat operations");
pcb.add_u64_counter(l_osdc_osdop_create, "osdop_create",
"Create object operations");
pcb.add_u64_counter(l_osdc_osdop_read, "osdop_read", "Read operations");
pcb.add_u64_counter(l_osdc_osdop_write, "osdop_write", "Write operations");
pcb.add_u64_counter(l_osdc_osdop_writefull, "osdop_writefull",
"Write full object operations");
pcb.add_u64_counter(l_osdc_osdop_writesame, "osdop_writesame",
"Write same operations");
pcb.add_u64_counter(l_osdc_osdop_append, "osdop_append",
"Append operation");
pcb.add_u64_counter(l_osdc_osdop_zero, "osdop_zero",
"Set object to zero operations");
pcb.add_u64_counter(l_osdc_osdop_truncate, "osdop_truncate",
"Truncate object operations");
pcb.add_u64_counter(l_osdc_osdop_delete, "osdop_delete",
"Delete object operations");
pcb.add_u64_counter(l_osdc_osdop_mapext, "osdop_mapext",
"Map extent operations");
pcb.add_u64_counter(l_osdc_osdop_sparse_read, "osdop_sparse_read",
"Sparse read operations");
pcb.add_u64_counter(l_osdc_osdop_clonerange, "osdop_clonerange",
"Clone range operations");
pcb.add_u64_counter(l_osdc_osdop_getxattr, "osdop_getxattr",
"Get xattr operations");
pcb.add_u64_counter(l_osdc_osdop_setxattr, "osdop_setxattr",
"Set xattr operations");
pcb.add_u64_counter(l_osdc_osdop_cmpxattr, "osdop_cmpxattr",
"Xattr comparison operations");
pcb.add_u64_counter(l_osdc_osdop_rmxattr, "osdop_rmxattr",
"Remove xattr operations");
pcb.add_u64_counter(l_osdc_osdop_resetxattrs, "osdop_resetxattrs",
"Reset xattr operations");
pcb.add_u64_counter(l_osdc_osdop_call, "osdop_call",
"Call (execute) operations");
pcb.add_u64_counter(l_osdc_osdop_watch, "osdop_watch",
"Watch by object operations");
pcb.add_u64_counter(l_osdc_osdop_notify, "osdop_notify",
"Notify about object operations");
pcb.add_u64_counter(l_osdc_osdop_src_cmpxattr, "osdop_src_cmpxattr",
"Extended attribute comparison in multi operations");
pcb.add_u64_counter(l_osdc_osdop_pgls, "osdop_pgls");
pcb.add_u64_counter(l_osdc_osdop_pgls_filter, "osdop_pgls_filter");
pcb.add_u64_counter(l_osdc_osdop_other, "osdop_other", "Other operations");
pcb.add_u64(l_osdc_linger_active, "linger_active",
"Active lingering operations");
pcb.add_u64_counter(l_osdc_linger_send, "linger_send",
"Sent lingering operations");
pcb.add_u64_counter(l_osdc_linger_resend, "linger_resend",
"Resent lingering operations");
pcb.add_u64_counter(l_osdc_linger_ping, "linger_ping",
"Sent pings to lingering operations");
pcb.add_u64(l_osdc_poolop_active, "poolop_active",
"Active pool operations");
pcb.add_u64_counter(l_osdc_poolop_send, "poolop_send",
"Sent pool operations");
pcb.add_u64_counter(l_osdc_poolop_resend, "poolop_resend",
"Resent pool operations");
pcb.add_u64(l_osdc_poolstat_active, "poolstat_active",
"Active get pool stat operations");
pcb.add_u64_counter(l_osdc_poolstat_send, "poolstat_send",
"Pool stat operations sent");
pcb.add_u64_counter(l_osdc_poolstat_resend, "poolstat_resend",
"Resent pool stats");
pcb.add_u64(l_osdc_statfs_active, "statfs_active", "Statfs operations");
pcb.add_u64_counter(l_osdc_statfs_send, "statfs_send", "Sent FS stats");
pcb.add_u64_counter(l_osdc_statfs_resend, "statfs_resend",
"Resent FS stats");
pcb.add_u64(l_osdc_command_active, "command_active", "Active commands");
pcb.add_u64_counter(l_osdc_command_send, "command_send",
"Sent commands");
pcb.add_u64_counter(l_osdc_command_resend, "command_resend",
"Resent commands");
pcb.add_u64(l_osdc_map_epoch, "map_epoch", "OSD map epoch");
pcb.add_u64_counter(l_osdc_map_full, "map_full",
"Full OSD maps received");
pcb.add_u64_counter(l_osdc_map_inc, "map_inc",
"Incremental OSD maps received");
pcb.add_u64(l_osdc_osd_sessions, "osd_sessions",
"Open sessions"); // open sessions
pcb.add_u64_counter(l_osdc_osd_session_open, "osd_session_open",
"Sessions opened");
pcb.add_u64_counter(l_osdc_osd_session_close, "osd_session_close",
"Sessions closed");
pcb.add_u64(l_osdc_osd_laggy, "osd_laggy", "Laggy OSD sessions");
pcb.add_u64_counter(l_osdc_osdop_omap_wr, "omap_wr",
"OSD OMAP write operations");
pcb.add_u64_counter(l_osdc_osdop_omap_rd, "omap_rd",
"OSD OMAP read operations");
pcb.add_u64_counter(l_osdc_osdop_omap_del, "omap_del",
"OSD OMAP delete operations");
logger = pcb.create_perf_counters();
cct->get_perfcounters_collection()->add(logger);
}
m_request_state_hook = new RequestStateHook(this);
auto admin_socket = cct->get_admin_socket();
int ret = admin_socket->register_command("objecter_requests",
m_request_state_hook,
"show in-progress osd requests");
/* Don't warn on EEXIST, happens if multiple ceph clients
* are instantiated from one process */
if (ret < 0 && ret != -EEXIST) {
lderr(cct) << "error registering admin socket command: "
<< cpp_strerror(ret) << dendl;
}
update_crush_location();
cct->_conf.add_observer(this);
initialized = true;
}
/*
* ok, cluster interaction can happen
*/
void Objecter::start(const OSDMap* o)
{
shared_lock rl(rwlock);
start_tick();
if (o) {
osdmap->deepish_copy_from(*o);
prune_pg_mapping(osdmap->get_pools());
} else if (osdmap->get_epoch() == 0) {
_maybe_request_map();
}
}
void Objecter::shutdown()
{
ceph_assert(initialized);
unique_lock wl(rwlock);
initialized = false;
wl.unlock();
cct->_conf.remove_observer(this);
wl.lock();
while (!osd_sessions.empty()) {
auto p = osd_sessions.begin();
close_session(p->second);
}
while(!check_latest_map_lingers.empty()) {
auto i = check_latest_map_lingers.begin();
i->second->put();
check_latest_map_lingers.erase(i->first);
}
while(!check_latest_map_ops.empty()) {
auto i = check_latest_map_ops.begin();
i->second->put();
check_latest_map_ops.erase(i->first);
}
while(!check_latest_map_commands.empty()) {
auto i = check_latest_map_commands.begin();
i->second->put();
check_latest_map_commands.erase(i->first);
}
while(!poolstat_ops.empty()) {
auto i = poolstat_ops.begin();
delete i->second;
poolstat_ops.erase(i->first);
}
while(!statfs_ops.empty()) {
auto i = statfs_ops.begin();
delete i->second;
statfs_ops.erase(i->first);
}
while(!pool_ops.empty()) {
auto i = pool_ops.begin();
delete i->second;
pool_ops.erase(i->first);
}
ldout(cct, 20) << __func__ << " clearing up homeless session..." << dendl;
while(!homeless_session->linger_ops.empty()) {
auto i = homeless_session->linger_ops.begin();
ldout(cct, 10) << " linger_op " << i->first << dendl;
LingerOp *lop = i->second;
{
std::unique_lock swl(homeless_session->lock);
_session_linger_op_remove(homeless_session, lop);
}
linger_ops.erase(lop->linger_id);
linger_ops_set.erase(lop);
lop->put();
}
while(!homeless_session->ops.empty()) {
auto i = homeless_session->ops.begin();
ldout(cct, 10) << " op " << i->first << dendl;
auto op = i->second;
{
std::unique_lock swl(homeless_session->lock);
_session_op_remove(homeless_session, op);
}
op->put();
}
while(!homeless_session->command_ops.empty()) {
auto i = homeless_session->command_ops.begin();
ldout(cct, 10) << " command_op " << i->first << dendl;
auto cop = i->second;
{
std::unique_lock swl(homeless_session->lock);
_session_command_op_remove(homeless_session, cop);
}
cop->put();
}
if (tick_event) {
if (timer.cancel_event(tick_event)) {
ldout(cct, 10) << " successfully canceled tick" << dendl;
}
tick_event = 0;
}
if (logger) {
cct->get_perfcounters_collection()->remove(logger);
delete logger;
logger = NULL;
}
// Let go of Objecter write lock so timer thread can shutdown
wl.unlock();
// Outside of lock to avoid cycle WRT calls to RequestStateHook
// This is safe because we guarantee no concurrent calls to
// shutdown() with the ::initialized check at start.
if (m_request_state_hook) {
auto admin_socket = cct->get_admin_socket();
admin_socket->unregister_commands(m_request_state_hook);
delete m_request_state_hook;
m_request_state_hook = NULL;
}
}
void Objecter::_send_linger(LingerOp *info,
ceph::shunique_lock<ceph::shared_mutex>& sul)
{
ceph_assert(sul.owns_lock() && sul.mutex() == &rwlock);
fu2::unique_function<Op::OpSig> oncommit;
osdc_opvec opv;
std::shared_lock watchl(info->watch_lock);
cb::list *poutbl = nullptr;
if (info->registered && info->is_watch) {
ldout(cct, 15) << "send_linger " << info->linger_id << " reconnect"
<< dendl;
opv.push_back(OSDOp());
opv.back().op.op = CEPH_OSD_OP_WATCH;
opv.back().op.watch.cookie = info->get_cookie();
opv.back().op.watch.op = CEPH_OSD_WATCH_OP_RECONNECT;
opv.back().op.watch.gen = ++info->register_gen;
oncommit = CB_Linger_Reconnect(this, info);
} else {
ldout(cct, 15) << "send_linger " << info->linger_id << " register"
<< dendl;
opv = info->ops;
// TODO Augment ca::Completion with an equivalent of
// target so we can handle these cases better.
auto c = std::make_unique<CB_Linger_Commit>(this, info);
if (!info->is_watch) {
info->notify_id = 0;
poutbl = &c->outbl;
}
oncommit = [c = std::move(c)](bs::error_code ec) mutable {
std::move(*c)(ec);
};
}
watchl.unlock();
auto o = new Op(info->target.base_oid, info->target.base_oloc,
std::move(opv), info->target.flags | CEPH_OSD_FLAG_READ,
std::move(oncommit), info->pobjver);
o->outbl = poutbl;
o->snapid = info->snap;
o->snapc = info->snapc;
o->mtime = info->mtime;
o->target = info->target;
o->tid = ++last_tid;
// do not resend this; we will send a new op to reregister
o->should_resend = false;
o->ctx_budgeted = true;
if (info->register_tid) {
// repeat send. cancel old registration op, if any.
std::unique_lock sl(info->session->lock);
if (info->session->ops.count(info->register_tid)) {
auto o = info->session->ops[info->register_tid];
_op_cancel_map_check(o);
_cancel_linger_op(o);
}
sl.unlock();
}
_op_submit_with_budget(o, sul, &info->register_tid, &info->ctx_budget);
logger->inc(l_osdc_linger_send);
}
void Objecter::_linger_commit(LingerOp *info, bs::error_code ec,
cb::list& outbl)
{
std::unique_lock wl(info->watch_lock);
ldout(cct, 10) << "_linger_commit " << info->linger_id << dendl;
if (info->on_reg_commit) {
info->on_reg_commit->defer(std::move(info->on_reg_commit),
ec, cb::list{});
info->on_reg_commit.reset();
}
if (ec && info->on_notify_finish) {
info->on_notify_finish->defer(std::move(info->on_notify_finish),
ec, cb::list{});
info->on_notify_finish.reset();
}
// only tell the user the first time we do this
info->registered = true;
info->pobjver = NULL;
if (!info->is_watch) {
// make note of the notify_id
auto p = outbl.cbegin();
try {
decode(info->notify_id, p);
ldout(cct, 10) << "_linger_commit notify_id=" << info->notify_id
<< dendl;
}
catch (cb::error& e) {
}
}
}
class CB_DoWatchError {
Objecter *objecter;
boost::intrusive_ptr<Objecter::LingerOp> info;
bs::error_code ec;
public:
CB_DoWatchError(Objecter *o, Objecter::LingerOp *i,
bs::error_code ec)
: objecter(o), info(i), ec(ec) {
info->_queued_async();
}
void operator()() {
std::unique_lock wl(objecter->rwlock);
bool canceled = info->canceled;
wl.unlock();
if (!canceled) {
info->handle(ec, 0, info->get_cookie(), 0, {});
}
info->finished_async();
}
};
bs::error_code Objecter::_normalize_watch_error(bs::error_code ec)
{
// translate ENOENT -> ENOTCONN so that a delete->disconnection
// notification and a failure to reconnect because we raced with
// the delete appear the same to the user.
if (ec == bs::errc::no_such_file_or_directory)
ec = bs::error_code(ENOTCONN, osd_category());
return ec;
}
void Objecter::_linger_reconnect(LingerOp *info, bs::error_code ec)
{
ldout(cct, 10) << __func__ << " " << info->linger_id << " = " << ec
<< " (last_error " << info->last_error << ")" << dendl;
std::unique_lock wl(info->watch_lock);
if (ec) {
if (!info->last_error) {
ec = _normalize_watch_error(ec);
if (info->handle) {
boost::asio::defer(finish_strand, CB_DoWatchError(this, info, ec));
}
}
}
info->last_error = ec;
}
void Objecter::_send_linger_ping(LingerOp *info)
{
// rwlock is locked unique
// info->session->lock is locked
if (cct->_conf->objecter_inject_no_watch_ping) {
ldout(cct, 10) << __func__ << " " << info->linger_id << " SKIPPING"
<< dendl;
return;
}
if (osdmap->test_flag(CEPH_OSDMAP_PAUSERD)) {
ldout(cct, 10) << __func__ << " PAUSERD" << dendl;
return;
}
ceph::coarse_mono_time now = ceph::coarse_mono_clock::now();
ldout(cct, 10) << __func__ << " " << info->linger_id << " now " << now
<< dendl;
osdc_opvec opv(1);
opv[0].op.op = CEPH_OSD_OP_WATCH;
opv[0].op.watch.cookie = info->get_cookie();
opv[0].op.watch.op = CEPH_OSD_WATCH_OP_PING;
opv[0].op.watch.gen = info->register_gen;
Op *o = new Op(info->target.base_oid, info->target.base_oloc,
std::move(opv), info->target.flags | CEPH_OSD_FLAG_READ,
CB_Linger_Ping(this, info, now),
nullptr, nullptr);
o->target = info->target;
o->should_resend = false;
_send_op_account(o);
o->tid = ++last_tid;
_session_op_assign(info->session, o);
_send_op(o);
info->ping_tid = o->tid;
logger->inc(l_osdc_linger_ping);
}
void Objecter::_linger_ping(LingerOp *info, bs::error_code ec, ceph::coarse_mono_time sent,
uint32_t register_gen)
{
std::unique_lock l(info->watch_lock);
ldout(cct, 10) << __func__ << " " << info->linger_id
<< " sent " << sent << " gen " << register_gen << " = " << ec
<< " (last_error " << info->last_error
<< " register_gen " << info->register_gen << ")" << dendl;
if (info->register_gen == register_gen) {
if (!ec) {
info->watch_valid_thru = sent;
} else if (ec && !info->last_error) {
ec = _normalize_watch_error(ec);
info->last_error = ec;
if (info->handle) {
boost::asio::defer(finish_strand, CB_DoWatchError(this, info, ec));
}
}
} else {
ldout(cct, 20) << " ignoring old gen" << dendl;
}
}
tl::expected<ceph::timespan,
bs::error_code> Objecter::linger_check(LingerOp *info)
{
std::shared_lock l(info->watch_lock);
ceph::coarse_mono_time stamp = info->watch_valid_thru;
if (!info->watch_pending_async.empty())
stamp = std::min(info->watch_valid_thru, info->watch_pending_async.front());
auto age = ceph::coarse_mono_clock::now() - stamp;
ldout(cct, 10) << __func__ << " " << info->linger_id
<< " err " << info->last_error
<< " age " << age << dendl;
if (info->last_error)
return tl::unexpected(info->last_error);
// return a safe upper bound (we are truncating to ms)
return age;
}
void Objecter::linger_cancel(LingerOp *info)
{
unique_lock wl(rwlock);
_linger_cancel(info);
info->put();
}
void Objecter::_linger_cancel(LingerOp *info)
{
// rwlock is locked unique
ldout(cct, 20) << __func__ << " linger_id=" << info->linger_id << dendl;
if (!info->canceled) {
OSDSession *s = info->session;
std::unique_lock sl(s->lock);
_session_linger_op_remove(s, info);
sl.unlock();
linger_ops.erase(info->linger_id);
linger_ops_set.erase(info);
ceph_assert(linger_ops.size() == linger_ops_set.size());
info->canceled = true;
info->put();
logger->dec(l_osdc_linger_active);
}
}
Objecter::LingerOp *Objecter::linger_register(const object_t& oid,
const object_locator_t& oloc,
int flags)
{
unique_lock l(rwlock);
// Acquire linger ID
auto info = new LingerOp(this, ++max_linger_id);
info->target.base_oid = oid;
info->target.base_oloc = oloc;
if (info->target.base_oloc.key == oid)
info->target.base_oloc.key.clear();
info->target.flags = flags;
info->watch_valid_thru = ceph::coarse_mono_clock::now();
ldout(cct, 10) << __func__ << " info " << info
<< " linger_id " << info->linger_id
<< " cookie " << info->get_cookie()
<< dendl;
linger_ops[info->linger_id] = info;
linger_ops_set.insert(info);
ceph_assert(linger_ops.size() == linger_ops_set.size());
info->get(); // for the caller
return info;
}
ceph_tid_t Objecter::linger_watch(LingerOp *info,
ObjectOperation& op,
const SnapContext& snapc,
real_time mtime,
cb::list& inbl,
decltype(info->on_reg_commit)&& oncommit,
version_t *objver)
{
info->is_watch = true;
info->snapc = snapc;
info->mtime = mtime;
info->target.flags |= CEPH_OSD_FLAG_WRITE;
info->ops = op.ops;
info->inbl = inbl;
info->pobjver = objver;
info->on_reg_commit = std::move(oncommit);
info->ctx_budget = take_linger_budget(info);
shunique_lock sul(rwlock, ceph::acquire_unique);
_linger_submit(info, sul);
logger->inc(l_osdc_linger_active);
op.clear();
return info->linger_id;
}
ceph_tid_t Objecter::linger_notify(LingerOp *info,
ObjectOperation& op,
snapid_t snap, cb::list& inbl,
decltype(LingerOp::on_reg_commit)&& onfinish,
version_t *objver)
{
info->snap = snap;
info->target.flags |= CEPH_OSD_FLAG_READ;
info->ops = op.ops;
info->inbl = inbl;
info->pobjver = objver;
info->on_reg_commit = std::move(onfinish);
info->ctx_budget = take_linger_budget(info);
shunique_lock sul(rwlock, ceph::acquire_unique);
_linger_submit(info, sul);
logger->inc(l_osdc_linger_active);
op.clear();
return info->linger_id;
}
void Objecter::_linger_submit(LingerOp *info,
ceph::shunique_lock<ceph::shared_mutex>& sul)
{
ceph_assert(sul.owns_lock() && sul.mutex() == &rwlock);
ceph_assert(info->linger_id);
ceph_assert(info->ctx_budget != -1); // caller needs to have taken budget already!
// Populate Op::target
OSDSession *s = NULL;
int r = _calc_target(&info->target, nullptr);
switch (r) {
case RECALC_OP_TARGET_POOL_EIO:
_check_linger_pool_eio(info);
return;
}
// Create LingerOp<->OSDSession relation
r = _get_session(info->target.osd, &s, sul);
ceph_assert(r == 0);
unique_lock sl(s->lock);
_session_linger_op_assign(s, info);
sl.unlock();
put_session(s);
_send_linger(info, sul);
}
struct CB_DoWatchNotify {
Objecter *objecter;
boost::intrusive_ptr<Objecter::LingerOp> info;
boost::intrusive_ptr<MWatchNotify> msg;
CB_DoWatchNotify(Objecter *o, Objecter::LingerOp *i, MWatchNotify *m)
: objecter(o), info(i), msg(m) {
info->_queued_async();
}
void operator()() {
objecter->_do_watch_notify(std::move(info), std::move(msg));
}
};
void Objecter::handle_watch_notify(MWatchNotify *m)
{
shared_lock l(rwlock);
if (!initialized) {
return;
}
LingerOp *info = reinterpret_cast<LingerOp*>(m->cookie);
if (linger_ops_set.count(info) == 0) {
ldout(cct, 7) << __func__ << " cookie " << m->cookie << " dne" << dendl;
return;
}
std::unique_lock wl(info->watch_lock);
if (m->opcode == CEPH_WATCH_EVENT_DISCONNECT) {
if (!info->last_error) {
info->last_error = bs::error_code(ENOTCONN, osd_category());
if (info->handle) {
boost::asio::defer(finish_strand, CB_DoWatchError(this, info,
info->last_error));
}
}
} else if (!info->is_watch) {
// we have CEPH_WATCH_EVENT_NOTIFY_COMPLETE; we can do this inline
// since we know the only user (librados) is safe to call in
// fast-dispatch context
if (info->notify_id &&
info->notify_id != m->notify_id) {
ldout(cct, 10) << __func__ << " reply notify " << m->notify_id
<< " != " << info->notify_id << ", ignoring" << dendl;
} else if (info->on_notify_finish) {
info->on_notify_finish->defer(
std::move(info->on_notify_finish),
osdcode(m->return_code), std::move(m->get_data()));
// if we race with reconnect we might get a second notify; only
// notify the caller once!
info->on_notify_finish = nullptr;
}
} else {
boost::asio::defer(finish_strand, CB_DoWatchNotify(this, info, m));
}
}
void Objecter::_do_watch_notify(boost::intrusive_ptr<LingerOp> info,
boost::intrusive_ptr<MWatchNotify> m)
{
ldout(cct, 10) << __func__ << " " << *m << dendl;
shared_lock l(rwlock);
ceph_assert(initialized);
if (info->canceled) {
l.unlock();
goto out;
}
// notify completion?
ceph_assert(info->is_watch);
ceph_assert(info->handle);
ceph_assert(m->opcode != CEPH_WATCH_EVENT_DISCONNECT);
l.unlock();
switch (m->opcode) {
case CEPH_WATCH_EVENT_NOTIFY:
info->handle({}, m->notify_id, m->cookie, m->notifier_gid, std::move(m->bl));
break;
}
out:
info->finished_async();
}
bool Objecter::ms_dispatch(Message *m)
{
ldout(cct, 10) << __func__ << " " << cct << " " << *m << dendl;
switch (m->get_type()) {
// these we exlusively handle
case CEPH_MSG_OSD_OPREPLY:
handle_osd_op_reply(static_cast<MOSDOpReply*>(m));
return true;
case CEPH_MSG_OSD_BACKOFF:
handle_osd_backoff(static_cast<MOSDBackoff*>(m));
return true;
case CEPH_MSG_WATCH_NOTIFY:
handle_watch_notify(static_cast<MWatchNotify*>(m));
m->put();
return true;
case MSG_COMMAND_REPLY:
if (m->get_source().type() == CEPH_ENTITY_TYPE_OSD) {
handle_command_reply(static_cast<MCommandReply*>(m));
return true;
} else {
return false;
}
case MSG_GETPOOLSTATSREPLY:
handle_get_pool_stats_reply(static_cast<MGetPoolStatsReply*>(m));
return true;
case CEPH_MSG_POOLOP_REPLY:
handle_pool_op_reply(static_cast<MPoolOpReply*>(m));
return true;
case CEPH_MSG_STATFS_REPLY:
handle_fs_stats_reply(static_cast<MStatfsReply*>(m));
return true;
// these we give others a chance to inspect
// MDS, OSD
case CEPH_MSG_OSD_MAP:
handle_osd_map(static_cast<MOSDMap*>(m));
return false;
}
return false;
}
void Objecter::_scan_requests(
OSDSession *s,
bool skipped_map,
bool cluster_full,
map<int64_t, bool> *pool_full_map,
map<ceph_tid_t, Op*>& need_resend,
list<LingerOp*>& need_resend_linger,
map<ceph_tid_t, CommandOp*>& need_resend_command,
ceph::shunique_lock<ceph::shared_mutex>& sul)
{
ceph_assert(sul.owns_lock() && sul.mutex() == &rwlock);
list<LingerOp*> unregister_lingers;
std::unique_lock sl(s->lock);
// check for changed linger mappings (_before_ regular ops)
auto lp = s->linger_ops.begin();
while (lp != s->linger_ops.end()) {
auto op = lp->second;
ceph_assert(op->session == s);
// check_linger_pool_dne() may touch linger_ops; prevent iterator
// invalidation
++lp;
ldout(cct, 10) << " checking linger op " << op->linger_id << dendl;
bool unregister, force_resend_writes = cluster_full;
int r = _recalc_linger_op_target(op, sul);
if (pool_full_map)
force_resend_writes = force_resend_writes ||
(*pool_full_map)[op->target.base_oloc.pool];
switch (r) {
case RECALC_OP_TARGET_NO_ACTION:
if (!skipped_map && !force_resend_writes)
break;
// -- fall-thru --
case RECALC_OP_TARGET_NEED_RESEND:
need_resend_linger.push_back(op);
_linger_cancel_map_check(op);
break;
case RECALC_OP_TARGET_POOL_DNE:
_check_linger_pool_dne(op, &unregister);
if (unregister) {
ldout(cct, 10) << " need to unregister linger op "
<< op->linger_id << dendl;
op->get();
unregister_lingers.push_back(op);
}
break;
case RECALC_OP_TARGET_POOL_EIO:
_check_linger_pool_eio(op);
ldout(cct, 10) << " need to unregister linger op "
<< op->linger_id << dendl;
op->get();
unregister_lingers.push_back(op);
break;
}
}
// check for changed request mappings
auto p = s->ops.begin();
while (p != s->ops.end()) {
Op *op = p->second;
++p; // check_op_pool_dne() may touch ops; prevent iterator invalidation
ldout(cct, 10) << " checking op " << op->tid << dendl;
_prune_snapc(osdmap->get_new_removed_snaps(), op);
bool force_resend_writes = cluster_full;
if (pool_full_map)
force_resend_writes = force_resend_writes ||
(*pool_full_map)[op->target.base_oloc.pool];
int r = _calc_target(&op->target,
op->session ? op->session->con.get() : nullptr);
switch (r) {
case RECALC_OP_TARGET_NO_ACTION:
if (!skipped_map && !(force_resend_writes && op->target.respects_full()))
break;
// -- fall-thru --
case RECALC_OP_TARGET_NEED_RESEND:
_session_op_remove(op->session, op);
need_resend[op->tid] = op;
_op_cancel_map_check(op);
break;
case RECALC_OP_TARGET_POOL_DNE:
_check_op_pool_dne(op, &sl);
break;
case RECALC_OP_TARGET_POOL_EIO:
_check_op_pool_eio(op, &sl);
break;
}
}
// commands
auto cp = s->command_ops.begin();
while (cp != s->command_ops.end()) {
auto c = cp->second;
++cp;
ldout(cct, 10) << " checking command " << c->tid << dendl;
bool force_resend_writes = cluster_full;
if (pool_full_map)
force_resend_writes = force_resend_writes ||
(*pool_full_map)[c->target_pg.pool()];
int r = _calc_command_target(c, sul);
switch (r) {
case RECALC_OP_TARGET_NO_ACTION:
// resend if skipped map; otherwise do nothing.
if (!skipped_map && !force_resend_writes)
break;
// -- fall-thru --
case RECALC_OP_TARGET_NEED_RESEND:
need_resend_command[c->tid] = c;
_session_command_op_remove(c->session, c);
_command_cancel_map_check(c);
break;
case RECALC_OP_TARGET_POOL_DNE:
case RECALC_OP_TARGET_OSD_DNE:
case RECALC_OP_TARGET_OSD_DOWN:
_check_command_map_dne(c);
break;
}
}
sl.unlock();
for (auto iter = unregister_lingers.begin();
iter != unregister_lingers.end();
++iter) {
_linger_cancel(*iter);
(*iter)->put();
}
}
void Objecter::handle_osd_map(MOSDMap *m)
{
ceph::shunique_lock sul(rwlock, acquire_unique);
if (!initialized)
return;
ceph_assert(osdmap);
if (m->fsid != monc->get_fsid()) {
ldout(cct, 0) << "handle_osd_map fsid " << m->fsid
<< " != " << monc->get_fsid() << dendl;
return;
}
bool was_pauserd = osdmap->test_flag(CEPH_OSDMAP_PAUSERD);
bool cluster_full = _osdmap_full_flag();
bool was_pausewr = osdmap->test_flag(CEPH_OSDMAP_PAUSEWR) || cluster_full ||
_osdmap_has_pool_full();
map<int64_t, bool> pool_full_map;
for (auto it = osdmap->get_pools().begin();
it != osdmap->get_pools().end(); ++it)
pool_full_map[it->first] = _osdmap_pool_full(it->second);
list<LingerOp*> need_resend_linger;
map<ceph_tid_t, Op*> need_resend;
map<ceph_tid_t, CommandOp*> need_resend_command;
if (m->get_last() <= osdmap->get_epoch()) {
ldout(cct, 3) << "handle_osd_map ignoring epochs ["
<< m->get_first() << "," << m->get_last()
<< "] <= " << osdmap->get_epoch() << dendl;
} else {
ldout(cct, 3) << "handle_osd_map got epochs ["
<< m->get_first() << "," << m->get_last()
<< "] > " << osdmap->get_epoch() << dendl;
if (osdmap->get_epoch()) {
bool skipped_map = false;
// we want incrementals
for (epoch_t e = osdmap->get_epoch() + 1;
e <= m->get_last();
e++) {
if (osdmap->get_epoch() == e-1 &&
m->incremental_maps.count(e)) {
ldout(cct, 3) << "handle_osd_map decoding incremental epoch " << e
<< dendl;
OSDMap::Incremental inc(m->incremental_maps[e]);
osdmap->apply_incremental(inc);
emit_blocklist_events(inc);
logger->inc(l_osdc_map_inc);
}
else if (m->maps.count(e)) {
ldout(cct, 3) << "handle_osd_map decoding full epoch " << e << dendl;
auto new_osdmap = std::make_unique<OSDMap>();
new_osdmap->decode(m->maps[e]);
emit_blocklist_events(*osdmap, *new_osdmap);
osdmap = std::move(new_osdmap);
logger->inc(l_osdc_map_full);
}
else {
if (e >= m->cluster_osdmap_trim_lower_bound) {
ldout(cct, 3) << "handle_osd_map requesting missing epoch "
<< osdmap->get_epoch()+1 << dendl;
_maybe_request_map();
break;
}
ldout(cct, 3) << "handle_osd_map missing epoch "
<< osdmap->get_epoch()+1
<< ", jumping to "
<< m->cluster_osdmap_trim_lower_bound << dendl;
e = m->cluster_osdmap_trim_lower_bound - 1;
skipped_map = true;
continue;
}
logger->set(l_osdc_map_epoch, osdmap->get_epoch());
prune_pg_mapping(osdmap->get_pools());
cluster_full = cluster_full || _osdmap_full_flag();
update_pool_full_map(pool_full_map);
// check all outstanding requests on every epoch
for (auto& i : need_resend) {
_prune_snapc(osdmap->get_new_removed_snaps(), i.second);
}
_scan_requests(homeless_session, skipped_map, cluster_full,
&pool_full_map, need_resend,
need_resend_linger, need_resend_command, sul);
for (auto p = osd_sessions.begin();
p != osd_sessions.end(); ) {
auto s = p->second;
_scan_requests(s, skipped_map, cluster_full,
&pool_full_map, need_resend,
need_resend_linger, need_resend_command, sul);
++p;
// osd down or addr change?
if (!osdmap->is_up(s->osd) ||
(s->con &&
s->con->get_peer_addrs() != osdmap->get_addrs(s->osd))) {
close_session(s);
}
}
ceph_assert(e == osdmap->get_epoch());
}
} else {
// first map. we want the full thing.
if (m->maps.count(m->get_last())) {
for (auto p = osd_sessions.begin();
p != osd_sessions.end(); ++p) {
OSDSession *s = p->second;
_scan_requests(s, false, false, NULL, need_resend,
need_resend_linger, need_resend_command, sul);
}
ldout(cct, 3) << "handle_osd_map decoding full epoch "
<< m->get_last() << dendl;
osdmap->decode(m->maps[m->get_last()]);
prune_pg_mapping(osdmap->get_pools());
_scan_requests(homeless_session, false, false, NULL,
need_resend, need_resend_linger,
need_resend_command, sul);
} else {
ldout(cct, 3) << "handle_osd_map hmm, i want a full map, requesting"
<< dendl;
monc->sub_want("osdmap", 0, CEPH_SUBSCRIBE_ONETIME);
monc->renew_subs();
}
}
}
// make sure need_resend targets reflect latest map
for (auto p = need_resend.begin(); p != need_resend.end(); ) {
Op *op = p->second;
if (op->target.epoch < osdmap->get_epoch()) {
ldout(cct, 10) << __func__ << " checking op " << p->first << dendl;
int r = _calc_target(&op->target, nullptr);
if (r == RECALC_OP_TARGET_POOL_DNE) {
p = need_resend.erase(p);
_check_op_pool_dne(op, nullptr);
} else {
++p;
}
} else {
++p;
}
}
bool pauserd = osdmap->test_flag(CEPH_OSDMAP_PAUSERD);
bool pausewr = osdmap->test_flag(CEPH_OSDMAP_PAUSEWR) || _osdmap_full_flag()
|| _osdmap_has_pool_full();
// was/is paused?
if (was_pauserd || was_pausewr || pauserd || pausewr ||
osdmap->get_epoch() < epoch_barrier) {
_maybe_request_map();
}
// resend requests
for (auto p = need_resend.begin();
p != need_resend.end(); ++p) {
auto op = p->second;
auto s = op->session;
bool mapped_session = false;
if (!s) {
int r = _map_session(&op->target, &s, sul);
ceph_assert(r == 0);
mapped_session = true;
} else {
get_session(s);
}
std::unique_lock sl(s->lock);
if (mapped_session) {
_session_op_assign(s, op);
}
if (op->should_resend) {
if (!op->session->is_homeless() && !op->target.paused) {
logger->inc(l_osdc_op_resend);
_send_op(op);
}
} else {
_op_cancel_map_check(op);
_cancel_linger_op(op);
}
sl.unlock();
put_session(s);
}
for (auto p = need_resend_linger.begin();
p != need_resend_linger.end(); ++p) {
LingerOp *op = *p;
ceph_assert(op->session);
if (!op->session->is_homeless()) {
logger->inc(l_osdc_linger_resend);
_send_linger(op, sul);
}
}
for (auto p = need_resend_command.begin();
p != need_resend_command.end(); ++p) {
auto c = p->second;
if (c->target.osd >= 0) {
_assign_command_session(c, sul);
if (c->session && !c->session->is_homeless()) {
_send_command(c);
}
}
}
_dump_active();
// finish any Contexts that were waiting on a map update
auto p = waiting_for_map.begin();
while (p != waiting_for_map.end() &&
p->first <= osdmap->get_epoch()) {
//go through the list and call the onfinish methods
for (auto& [c, ec] : p->second) {
ca::post(std::move(c), ec);
}
waiting_for_map.erase(p++);
}
monc->sub_got("osdmap", osdmap->get_epoch());
if (!waiting_for_map.empty()) {
_maybe_request_map();
}
}
void Objecter::enable_blocklist_events()
{
unique_lock wl(rwlock);
blocklist_events_enabled = true;
}
void Objecter::consume_blocklist_events(std::set<entity_addr_t> *events)
{
unique_lock wl(rwlock);
if (events->empty()) {
events->swap(blocklist_events);
} else {
for (const auto &i : blocklist_events) {
events->insert(i);
}
blocklist_events.clear();
}
}
void Objecter::emit_blocklist_events(const OSDMap::Incremental &inc)
{
if (!blocklist_events_enabled) {
return;
}
for (const auto &i : inc.new_blocklist) {
blocklist_events.insert(i.first);
}
}
void Objecter::emit_blocklist_events(const OSDMap &old_osd_map,
const OSDMap &new_osd_map)
{
if (!blocklist_events_enabled) {
return;
}
std::set<entity_addr_t> old_set;
std::set<entity_addr_t> new_set;
std::set<entity_addr_t> old_range_set;
std::set<entity_addr_t> new_range_set;
old_osd_map.get_blocklist(&old_set, &old_range_set);
new_osd_map.get_blocklist(&new_set, &new_range_set);
std::set<entity_addr_t> delta_set;
std::set_difference(
new_set.begin(), new_set.end(), old_set.begin(), old_set.end(),
std::inserter(delta_set, delta_set.begin()));
std::set_difference(
new_range_set.begin(), new_range_set.end(),
old_range_set.begin(), old_range_set.end(),
std::inserter(delta_set, delta_set.begin()));
blocklist_events.insert(delta_set.begin(), delta_set.end());
}
// op pool check
void Objecter::CB_Op_Map_Latest::operator()(bs::error_code e,
version_t latest, version_t)
{
if (e == bs::errc::resource_unavailable_try_again ||
e == bs::errc::operation_canceled)
return;
lgeneric_subdout(objecter->cct, objecter, 10)
<< "op_map_latest r=" << e << " tid=" << tid
<< " latest " << latest << dendl;
unique_lock wl(objecter->rwlock);
auto iter = objecter->check_latest_map_ops.find(tid);
if (iter == objecter->check_latest_map_ops.end()) {
lgeneric_subdout(objecter->cct, objecter, 10)
<< "op_map_latest op "<< tid << " not found" << dendl;
return;
}
Op *op = iter->second;
objecter->check_latest_map_ops.erase(iter);
lgeneric_subdout(objecter->cct, objecter, 20)
<< "op_map_latest op "<< op << dendl;
if (op->map_dne_bound == 0)
op->map_dne_bound = latest;
unique_lock sl(op->session->lock, defer_lock);
objecter->_check_op_pool_dne(op, &sl);
op->put();
}
int Objecter::pool_snap_by_name(int64_t poolid, const char *snap_name,
snapid_t *snap) const
{
shared_lock rl(rwlock);
auto& pools = osdmap->get_pools();
auto iter = pools.find(poolid);
if (iter == pools.end()) {
return -ENOENT;
}
const pg_pool_t& pg_pool = iter->second;
for (auto p = pg_pool.snaps.begin();
p != pg_pool.snaps.end();
++p) {
if (p->second.name == snap_name) {
*snap = p->first;
return 0;
}
}
return -ENOENT;
}
int Objecter::pool_snap_get_info(int64_t poolid, snapid_t snap,
pool_snap_info_t *info) const
{
shared_lock rl(rwlock);
auto& pools = osdmap->get_pools();
auto iter = pools.find(poolid);
if (iter == pools.end()) {
return -ENOENT;
}
const pg_pool_t& pg_pool = iter->second;
auto p = pg_pool.snaps.find(snap);
if (p == pg_pool.snaps.end())
return -ENOENT;
*info = p->second;
return 0;
}
int Objecter::pool_snap_list(int64_t poolid, vector<uint64_t> *snaps)
{
shared_lock rl(rwlock);
const pg_pool_t *pi = osdmap->get_pg_pool(poolid);
if (!pi)
return -ENOENT;
for (auto p = pi->snaps.begin();
p != pi->snaps.end();
++p) {
snaps->push_back(p->first);
}
return 0;
}
// sl may be unlocked.
void Objecter::_check_op_pool_dne(Op *op, std::unique_lock<std::shared_mutex> *sl)
{
// rwlock is locked unique
if (op->target.pool_ever_existed) {
// the pool previously existed and now it does not, which means it
// was deleted.
op->map_dne_bound = osdmap->get_epoch();
ldout(cct, 10) << "check_op_pool_dne tid " << op->tid
<< " pool previously exists but now does not"
<< dendl;
} else {
ldout(cct, 10) << "check_op_pool_dne tid " << op->tid
<< " current " << osdmap->get_epoch()
<< " map_dne_bound " << op->map_dne_bound
<< dendl;
}
if (op->map_dne_bound > 0) {
if (osdmap->get_epoch() >= op->map_dne_bound) {
// we had a new enough map
ldout(cct, 10) << "check_op_pool_dne tid " << op->tid
<< " concluding pool " << op->target.base_pgid.pool()
<< " dne" << dendl;
if (op->has_completion()) {
num_in_flight--;
op->complete(osdc_errc::pool_dne, -ENOENT);
}
OSDSession *s = op->session;
if (s) {
ceph_assert(s != NULL);
ceph_assert(sl->mutex() == &s->lock);
bool session_locked = sl->owns_lock();
if (!session_locked) {
sl->lock();
}
_finish_op(op, 0);
if (!session_locked) {
sl->unlock();
}
} else {
_finish_op(op, 0); // no session
}
}
} else {
_send_op_map_check(op);
}
}
// sl may be unlocked.
void Objecter::_check_op_pool_eio(Op *op, std::unique_lock<std::shared_mutex> *sl)
{
// rwlock is locked unique
// we had a new enough map
ldout(cct, 10) << "check_op_pool_eio tid " << op->tid
<< " concluding pool " << op->target.base_pgid.pool()
<< " has eio" << dendl;
if (op->has_completion()) {
num_in_flight--;
op->complete(osdc_errc::pool_eio, -EIO);
}
OSDSession *s = op->session;
if (s) {
ceph_assert(s != NULL);
ceph_assert(sl->mutex() == &s->lock);
bool session_locked = sl->owns_lock();
if (!session_locked) {
sl->lock();
}
_finish_op(op, 0);
if (!session_locked) {
sl->unlock();
}
} else {
_finish_op(op, 0); // no session
}
}
void Objecter::_send_op_map_check(Op *op)
{
// rwlock is locked unique
// ask the monitor
if (check_latest_map_ops.count(op->tid) == 0) {
op->get();
check_latest_map_ops[op->tid] = op;
monc->get_version("osdmap", CB_Op_Map_Latest(this, op->tid));
}
}
void Objecter::_op_cancel_map_check(Op *op)
{
// rwlock is locked unique
auto iter = check_latest_map_ops.find(op->tid);
if (iter != check_latest_map_ops.end()) {
Op *op = iter->second;
op->put();
check_latest_map_ops.erase(iter);
}
}
// linger pool check
void Objecter::CB_Linger_Map_Latest::operator()(bs::error_code e,
version_t latest,
version_t)
{
if (e == bs::errc::resource_unavailable_try_again ||
e == bs::errc::operation_canceled) {
// ignore callback; we will retry in resend_mon_ops()
return;
}
unique_lock wl(objecter->rwlock);
auto iter = objecter->check_latest_map_lingers.find(linger_id);
if (iter == objecter->check_latest_map_lingers.end()) {
return;
}
auto op = iter->second;
objecter->check_latest_map_lingers.erase(iter);
if (op->map_dne_bound == 0)
op->map_dne_bound = latest;
bool unregister;
objecter->_check_linger_pool_dne(op, &unregister);
if (unregister) {
objecter->_linger_cancel(op);
}
op->put();
}
void Objecter::_check_linger_pool_dne(LingerOp *op, bool *need_unregister)
{
// rwlock is locked unique
*need_unregister = false;
if (op->register_gen > 0) {
ldout(cct, 10) << "_check_linger_pool_dne linger_id " << op->linger_id
<< " pool previously existed but now does not"
<< dendl;
op->map_dne_bound = osdmap->get_epoch();
} else {
ldout(cct, 10) << "_check_linger_pool_dne linger_id " << op->linger_id
<< " current " << osdmap->get_epoch()
<< " map_dne_bound " << op->map_dne_bound
<< dendl;
}
if (op->map_dne_bound > 0) {
if (osdmap->get_epoch() >= op->map_dne_bound) {
std::unique_lock wl{op->watch_lock};
if (op->on_reg_commit) {
op->on_reg_commit->defer(std::move(op->on_reg_commit),
osdc_errc::pool_dne, cb::list{});
op->on_reg_commit = nullptr;
}
if (op->on_notify_finish) {
op->on_notify_finish->defer(std::move(op->on_notify_finish),
osdc_errc::pool_dne, cb::list{});
op->on_notify_finish = nullptr;
}
*need_unregister = true;
}
} else {
_send_linger_map_check(op);
}
}
void Objecter::_check_linger_pool_eio(LingerOp *op)
{
// rwlock is locked unique
std::unique_lock wl{op->watch_lock};
if (op->on_reg_commit) {
op->on_reg_commit->defer(std::move(op->on_reg_commit),
osdc_errc::pool_dne, cb::list{});
op->on_reg_commit = nullptr;
}
if (op->on_notify_finish) {
op->on_notify_finish->defer(std::move(op->on_notify_finish),
osdc_errc::pool_dne, cb::list{});
op->on_notify_finish = nullptr;
}
}
void Objecter::_send_linger_map_check(LingerOp *op)
{
// ask the monitor
if (check_latest_map_lingers.count(op->linger_id) == 0) {
op->get();
check_latest_map_lingers[op->linger_id] = op;
monc->get_version("osdmap", CB_Linger_Map_Latest(this, op->linger_id));
}
}
void Objecter::_linger_cancel_map_check(LingerOp *op)
{
// rwlock is locked unique
auto iter = check_latest_map_lingers.find(op->linger_id);
if (iter != check_latest_map_lingers.end()) {
LingerOp *op = iter->second;
op->put();
check_latest_map_lingers.erase(iter);
}
}
// command pool check
void Objecter::CB_Command_Map_Latest::operator()(bs::error_code e,
version_t latest, version_t)
{
if (e == bs::errc::resource_unavailable_try_again ||
e == bs::errc::operation_canceled) {
// ignore callback; we will retry in resend_mon_ops()
return;
}
unique_lock wl(objecter->rwlock);
auto iter = objecter->check_latest_map_commands.find(tid);
if (iter == objecter->check_latest_map_commands.end()) {
return;
}
auto c = iter->second;
objecter->check_latest_map_commands.erase(iter);
if (c->map_dne_bound == 0)
c->map_dne_bound = latest;
unique_lock sul(c->session->lock);
objecter->_check_command_map_dne(c);
sul.unlock();
c->put();
}
void Objecter::_check_command_map_dne(CommandOp *c)
{
// rwlock is locked unique
// session is locked unique
ldout(cct, 10) << "_check_command_map_dne tid " << c->tid
<< " current " << osdmap->get_epoch()
<< " map_dne_bound " << c->map_dne_bound
<< dendl;
if (c->map_dne_bound > 0) {
if (osdmap->get_epoch() >= c->map_dne_bound) {
_finish_command(c, osdcode(c->map_check_error),
std::move(c->map_check_error_str), {});
}
} else {
_send_command_map_check(c);
}
}
void Objecter::_send_command_map_check(CommandOp *c)
{
// rwlock is locked unique
// session is locked unique
// ask the monitor
if (check_latest_map_commands.count(c->tid) == 0) {
c->get();
check_latest_map_commands[c->tid] = c;
monc->get_version("osdmap", CB_Command_Map_Latest(this, c->tid));
}
}
void Objecter::_command_cancel_map_check(CommandOp *c)
{
// rwlock is locked uniqe
auto iter = check_latest_map_commands.find(c->tid);
if (iter != check_latest_map_commands.end()) {
auto c = iter->second;
c->put();
check_latest_map_commands.erase(iter);
}
}
/**
* Look up OSDSession by OSD id.
*
* @returns 0 on success, or -EAGAIN if the lock context requires
* promotion to write.
*/
int Objecter::_get_session(int osd, OSDSession **session,
shunique_lock<ceph::shared_mutex>& sul)
{
ceph_assert(sul && sul.mutex() == &rwlock);
if (osd < 0) {
*session = homeless_session;
ldout(cct, 20) << __func__ << " osd=" << osd << " returning homeless"
<< dendl;
return 0;
}
auto p = osd_sessions.find(osd);
if (p != osd_sessions.end()) {
auto s = p->second;
s->get();
*session = s;
ldout(cct, 20) << __func__ << " s=" << s << " osd=" << osd << " "
<< s->get_nref() << dendl;
return 0;
}
if (!sul.owns_lock()) {
return -EAGAIN;
}
auto s = new OSDSession(cct, osd);
osd_sessions[osd] = s;
s->con = messenger->connect_to_osd(osdmap->get_addrs(osd));
s->con->set_priv(RefCountedPtr{s});
logger->inc(l_osdc_osd_session_open);
logger->set(l_osdc_osd_sessions, osd_sessions.size());
s->get();
*session = s;
ldout(cct, 20) << __func__ << " s=" << s << " osd=" << osd << " "
<< s->get_nref() << dendl;
return 0;
}
void Objecter::put_session(Objecter::OSDSession *s)
{
if (s && !s->is_homeless()) {
ldout(cct, 20) << __func__ << " s=" << s << " osd=" << s->osd << " "
<< s->get_nref() << dendl;
s->put();
}
}
void Objecter::get_session(Objecter::OSDSession *s)
{
ceph_assert(s != NULL);
if (!s->is_homeless()) {
ldout(cct, 20) << __func__ << " s=" << s << " osd=" << s->osd << " "
<< s->get_nref() << dendl;
s->get();
}
}
void Objecter::_reopen_session(OSDSession *s)
{
// rwlock is locked unique
// s->lock is locked
auto addrs = osdmap->get_addrs(s->osd);
ldout(cct, 10) << "reopen_session osd." << s->osd << " session, addr now "
<< addrs << dendl;
if (s->con) {
s->con->set_priv(NULL);
s->con->mark_down();
logger->inc(l_osdc_osd_session_close);
}
s->con = messenger->connect_to_osd(addrs);
s->con->set_priv(RefCountedPtr{s});
s->incarnation++;
logger->inc(l_osdc_osd_session_open);
}
void Objecter::close_session(OSDSession *s)
{
// rwlock is locked unique
ldout(cct, 10) << "close_session for osd." << s->osd << dendl;
if (s->con) {
s->con->set_priv(NULL);
s->con->mark_down();
logger->inc(l_osdc_osd_session_close);
}
unique_lock sl(s->lock);
std::list<LingerOp*> homeless_lingers;
std::list<CommandOp*> homeless_commands;
std::list<Op*> homeless_ops;
while (!s->linger_ops.empty()) {
auto i = s->linger_ops.begin();
ldout(cct, 10) << " linger_op " << i->first << dendl;
homeless_lingers.push_back(i->second);
_session_linger_op_remove(s, i->second);
}
while (!s->ops.empty()) {
auto i = s->ops.begin();
ldout(cct, 10) << " op " << i->first << dendl;
homeless_ops.push_back(i->second);
_session_op_remove(s, i->second);
}
while (!s->command_ops.empty()) {
auto i = s->command_ops.begin();
ldout(cct, 10) << " command_op " << i->first << dendl;
homeless_commands.push_back(i->second);
_session_command_op_remove(s, i->second);
}
osd_sessions.erase(s->osd);
sl.unlock();
put_session(s);
// Assign any leftover ops to the homeless session
{
unique_lock hsl(homeless_session->lock);
for (auto i = homeless_lingers.begin();
i != homeless_lingers.end(); ++i) {
_session_linger_op_assign(homeless_session, *i);
}
for (auto i = homeless_ops.begin();
i != homeless_ops.end(); ++i) {
_session_op_assign(homeless_session, *i);
}
for (auto i = homeless_commands.begin();
i != homeless_commands.end(); ++i) {
_session_command_op_assign(homeless_session, *i);
}
}
logger->set(l_osdc_osd_sessions, osd_sessions.size());
}
void Objecter::wait_for_osd_map(epoch_t e)
{
unique_lock l(rwlock);
if (osdmap->get_epoch() >= e) {
l.unlock();
return;
}
ca::waiter<bs::error_code> w;
waiting_for_map[e].emplace_back(OpCompletion::create(
service.get_executor(),
w.ref()),
bs::error_code{});
l.unlock();
w.wait();
}
void Objecter::_get_latest_version(epoch_t oldest, epoch_t newest,
std::unique_ptr<OpCompletion> fin,
std::unique_lock<ceph::shared_mutex>&& l)
{
ceph_assert(fin);
if (osdmap->get_epoch() >= newest) {
ldout(cct, 10) << __func__ << " latest " << newest << ", have it" << dendl;
l.unlock();
ca::defer(std::move(fin), bs::error_code{});
} else {
ldout(cct, 10) << __func__ << " latest " << newest << ", waiting" << dendl;
_wait_for_new_map(std::move(fin), newest, bs::error_code{});
l.unlock();
}
}
void Objecter::maybe_request_map()
{
shared_lock rl(rwlock);
_maybe_request_map();
}
void Objecter::_maybe_request_map()
{
// rwlock is locked
int flag = 0;
if (_osdmap_full_flag()
|| osdmap->test_flag(CEPH_OSDMAP_PAUSERD)
|| osdmap->test_flag(CEPH_OSDMAP_PAUSEWR)) {
ldout(cct, 10) << "_maybe_request_map subscribing (continuous) to next "
"osd map (FULL flag is set)" << dendl;
} else {
ldout(cct, 10)
<< "_maybe_request_map subscribing (onetime) to next osd map" << dendl;
flag = CEPH_SUBSCRIBE_ONETIME;
}
epoch_t epoch = osdmap->get_epoch() ? osdmap->get_epoch()+1 : 0;
if (monc->sub_want("osdmap", epoch, flag)) {
monc->renew_subs();
}
}
void Objecter::_wait_for_new_map(std::unique_ptr<OpCompletion> c, epoch_t epoch,
bs::error_code ec)
{
// rwlock is locked unique
waiting_for_map[epoch].emplace_back(std::move(c), ec);
_maybe_request_map();
}
/**
* Use this together with wait_for_map: this is a pre-check to avoid
* allocating a Context for wait_for_map if we can see that we
* definitely already have the epoch.
*
* This does *not* replace the need to handle the return value of
* wait_for_map: just because we don't have it in this pre-check
* doesn't mean we won't have it when calling back into wait_for_map,
* since the objecter lock is dropped in between.
*/
bool Objecter::have_map(const epoch_t epoch)
{
shared_lock rl(rwlock);
if (osdmap->get_epoch() >= epoch) {
return true;
} else {
return false;
}
}
void Objecter::_kick_requests(OSDSession *session,
map<uint64_t, LingerOp *>& lresend)
{
// rwlock is locked unique
// clear backoffs
session->backoffs.clear();
session->backoffs_by_id.clear();
// resend ops
map<ceph_tid_t,Op*> resend; // resend in tid order
for (auto p = session->ops.begin(); p != session->ops.end();) {
Op *op = p->second;
++p;
if (op->should_resend) {
if (!op->target.paused)
resend[op->tid] = op;
} else {
_op_cancel_map_check(op);
_cancel_linger_op(op);
}
}
logger->inc(l_osdc_op_resend, resend.size());
while (!resend.empty()) {
_send_op(resend.begin()->second);
resend.erase(resend.begin());
}
// resend lingers
logger->inc(l_osdc_linger_resend, session->linger_ops.size());
for (auto j = session->linger_ops.begin();
j != session->linger_ops.end(); ++j) {
LingerOp *op = j->second;
op->get();
ceph_assert(lresend.count(j->first) == 0);
lresend[j->first] = op;
}
// resend commands
logger->inc(l_osdc_command_resend, session->command_ops.size());
map<uint64_t,CommandOp*> cresend; // resend in order
for (auto k = session->command_ops.begin();
k != session->command_ops.end(); ++k) {
cresend[k->first] = k->second;
}
while (!cresend.empty()) {
_send_command(cresend.begin()->second);
cresend.erase(cresend.begin());
}
}
void Objecter::_linger_ops_resend(map<uint64_t, LingerOp *>& lresend,
unique_lock<ceph::shared_mutex>& ul)
{
ceph_assert(ul.owns_lock());
shunique_lock sul(std::move(ul));
while (!lresend.empty()) {
LingerOp *op = lresend.begin()->second;
if (!op->canceled) {
_send_linger(op, sul);
}
op->put();
lresend.erase(lresend.begin());
}
ul = sul.release_to_unique();
}
void Objecter::start_tick()
{
ceph_assert(tick_event == 0);
tick_event =
timer.add_event(ceph::make_timespan(cct->_conf->objecter_tick_interval),
&Objecter::tick, this);
}
void Objecter::tick()
{
shared_lock rl(rwlock);
ldout(cct, 10) << "tick" << dendl;
// we are only called by C_Tick
tick_event = 0;
if (!initialized) {
// we raced with shutdown
ldout(cct, 10) << __func__ << " raced with shutdown" << dendl;
return;
}
set<OSDSession*> toping;
// look for laggy requests
auto cutoff = ceph::coarse_mono_clock::now();
cutoff -= ceph::make_timespan(cct->_conf->objecter_timeout); // timeout
unsigned laggy_ops = 0;
for (auto siter = osd_sessions.begin();
siter != osd_sessions.end(); ++siter) {
auto s = siter->second;
scoped_lock l(s->lock);
bool found = false;
for (auto p = s->ops.begin(); p != s->ops.end(); ++p) {
auto op = p->second;
ceph_assert(op->session);
if (op->stamp < cutoff) {
ldout(cct, 2) << " tid " << p->first << " on osd." << op->session->osd
<< " is laggy" << dendl;
found = true;
++laggy_ops;
}
}
for (auto p = s->linger_ops.begin();
p != s->linger_ops.end();
++p) {
auto op = p->second;
std::unique_lock wl(op->watch_lock);
ceph_assert(op->session);
ldout(cct, 10) << " pinging osd that serves lingering tid " << p->first
<< " (osd." << op->session->osd << ")" << dendl;
found = true;
if (op->is_watch && op->registered && !op->last_error)
_send_linger_ping(op);
}
for (auto p = s->command_ops.begin();
p != s->command_ops.end();
++p) {
auto op = p->second;
ceph_assert(op->session);
ldout(cct, 10) << " pinging osd that serves command tid " << p->first
<< " (osd." << op->session->osd << ")" << dendl;
found = true;
}
if (found)
toping.insert(s);
}
if (num_homeless_ops || !toping.empty()) {
_maybe_request_map();
}
logger->set(l_osdc_op_laggy, laggy_ops);
logger->set(l_osdc_osd_laggy, toping.size());
if (!toping.empty()) {
// send a ping to these osds, to ensure we detect any session resets
// (osd reply message policy is lossy)
for (auto i = toping.begin(); i != toping.end(); ++i) {
(*i)->con->send_message(new MPing);
}
}
// Make sure we don't reschedule if we wake up after shutdown
if (initialized) {
tick_event = timer.reschedule_me(ceph::make_timespan(
cct->_conf->objecter_tick_interval));
}
}
void Objecter::resend_mon_ops()
{
unique_lock wl(rwlock);
ldout(cct, 10) << "resend_mon_ops" << dendl;
for (auto p = poolstat_ops.begin(); p != poolstat_ops.end(); ++p) {
_poolstat_submit(p->second);
logger->inc(l_osdc_poolstat_resend);
}
for (auto p = statfs_ops.begin(); p != statfs_ops.end(); ++p) {
_fs_stats_submit(p->second);
logger->inc(l_osdc_statfs_resend);
}
for (auto p = pool_ops.begin(); p != pool_ops.end(); ++p) {
_pool_op_submit(p->second);
logger->inc(l_osdc_poolop_resend);
}
for (auto p = check_latest_map_ops.begin();
p != check_latest_map_ops.end();
++p) {
monc->get_version("osdmap", CB_Op_Map_Latest(this, p->second->tid));
}
for (auto p = check_latest_map_lingers.begin();
p != check_latest_map_lingers.end();
++p) {
monc->get_version("osdmap", CB_Linger_Map_Latest(this, p->second->linger_id));
}
for (auto p = check_latest_map_commands.begin();
p != check_latest_map_commands.end();
++p) {
monc->get_version("osdmap", CB_Command_Map_Latest(this, p->second->tid));
}
}
// read | write ---------------------------
void Objecter::op_submit(Op *op, ceph_tid_t *ptid, int *ctx_budget)
{
shunique_lock rl(rwlock, ceph::acquire_shared);
ceph_tid_t tid = 0;
if (!ptid)
ptid = &tid;
op->trace.event("op submit");
_op_submit_with_budget(op, rl, ptid, ctx_budget);
}
void Objecter::_op_submit_with_budget(Op *op,
shunique_lock<ceph::shared_mutex>& sul,
ceph_tid_t *ptid,
int *ctx_budget)
{
ceph_assert(initialized);
ceph_assert(op->ops.size() == op->out_bl.size());
ceph_assert(op->ops.size() == op->out_rval.size());
ceph_assert(op->ops.size() == op->out_handler.size());
// throttle. before we look at any state, because
// _take_op_budget() may drop our lock while it blocks.
if (!op->ctx_budgeted || (ctx_budget && (*ctx_budget == -1))) {
int op_budget = _take_op_budget(op, sul);
// take and pass out the budget for the first OP
// in the context session
if (ctx_budget && (*ctx_budget == -1)) {
*ctx_budget = op_budget;
}
}
if (osd_timeout > timespan(0)) {
if (op->tid == 0)
op->tid = ++last_tid;
auto tid = op->tid;
op->ontimeout = timer.add_event(osd_timeout,
[this, tid]() {
op_cancel(tid, -ETIMEDOUT); });
}
_op_submit(op, sul, ptid);
}
void Objecter::_send_op_account(Op *op)
{
inflight_ops++;
// add to gather set(s)
if (op->has_completion()) {
num_in_flight++;
} else {
ldout(cct, 20) << " note: not requesting reply" << dendl;
}
logger->inc(l_osdc_op_active);
logger->inc(l_osdc_op);
logger->inc(l_osdc_oplen_avg, op->ops.size());
if ((op->target.flags & (CEPH_OSD_FLAG_READ | CEPH_OSD_FLAG_WRITE)) ==
(CEPH_OSD_FLAG_READ|CEPH_OSD_FLAG_WRITE))
logger->inc(l_osdc_op_rmw);
else if (op->target.flags & CEPH_OSD_FLAG_WRITE)
logger->inc(l_osdc_op_w);
else if (op->target.flags & CEPH_OSD_FLAG_READ)
logger->inc(l_osdc_op_r);
if (op->target.flags & CEPH_OSD_FLAG_PGOP)
logger->inc(l_osdc_op_pg);
for (auto p = op->ops.begin(); p != op->ops.end(); ++p) {
int code = l_osdc_osdop_other;
switch (p->op.op) {
case CEPH_OSD_OP_STAT: code = l_osdc_osdop_stat; break;
case CEPH_OSD_OP_CREATE: code = l_osdc_osdop_create; break;
case CEPH_OSD_OP_READ: code = l_osdc_osdop_read; break;
case CEPH_OSD_OP_WRITE: code = l_osdc_osdop_write; break;
case CEPH_OSD_OP_WRITEFULL: code = l_osdc_osdop_writefull; break;
case CEPH_OSD_OP_WRITESAME: code = l_osdc_osdop_writesame; break;
case CEPH_OSD_OP_APPEND: code = l_osdc_osdop_append; break;
case CEPH_OSD_OP_ZERO: code = l_osdc_osdop_zero; break;
case CEPH_OSD_OP_TRUNCATE: code = l_osdc_osdop_truncate; break;
case CEPH_OSD_OP_DELETE: code = l_osdc_osdop_delete; break;
case CEPH_OSD_OP_MAPEXT: code = l_osdc_osdop_mapext; break;
case CEPH_OSD_OP_SPARSE_READ: code = l_osdc_osdop_sparse_read; break;
case CEPH_OSD_OP_GETXATTR: code = l_osdc_osdop_getxattr; break;
case CEPH_OSD_OP_SETXATTR: code = l_osdc_osdop_setxattr; break;
case CEPH_OSD_OP_CMPXATTR: code = l_osdc_osdop_cmpxattr; break;
case CEPH_OSD_OP_RMXATTR: code = l_osdc_osdop_rmxattr; break;
case CEPH_OSD_OP_RESETXATTRS: code = l_osdc_osdop_resetxattrs; break;
// OMAP read operations
case CEPH_OSD_OP_OMAPGETVALS:
case CEPH_OSD_OP_OMAPGETKEYS:
case CEPH_OSD_OP_OMAPGETHEADER:
case CEPH_OSD_OP_OMAPGETVALSBYKEYS:
case CEPH_OSD_OP_OMAP_CMP: code = l_osdc_osdop_omap_rd; break;
// OMAP write operations
case CEPH_OSD_OP_OMAPSETVALS:
case CEPH_OSD_OP_OMAPSETHEADER: code = l_osdc_osdop_omap_wr; break;
// OMAP del operations
case CEPH_OSD_OP_OMAPCLEAR:
case CEPH_OSD_OP_OMAPRMKEYS: code = l_osdc_osdop_omap_del; break;
case CEPH_OSD_OP_CALL: code = l_osdc_osdop_call; break;
case CEPH_OSD_OP_WATCH: code = l_osdc_osdop_watch; break;
case CEPH_OSD_OP_NOTIFY: code = l_osdc_osdop_notify; break;
}
if (code)
logger->inc(code);
}
}
void Objecter::_op_submit(Op *op, shunique_lock<ceph::shared_mutex>& sul, ceph_tid_t *ptid)
{
// rwlock is locked
ldout(cct, 10) << __func__ << " op " << op << dendl;
// pick target
ceph_assert(op->session == NULL);
OSDSession *s = NULL;
bool check_for_latest_map = false;
int r = _calc_target(&op->target, nullptr);
switch(r) {
case RECALC_OP_TARGET_POOL_DNE:
check_for_latest_map = true;
break;
case RECALC_OP_TARGET_POOL_EIO:
if (op->has_completion()) {
op->complete(osdc_errc::pool_eio, -EIO);
}
return;
}
// Try to get a session, including a retry if we need to take write lock
r = _get_session(op->target.osd, &s, sul);
if (r == -EAGAIN ||
(check_for_latest_map && sul.owns_lock_shared()) ||
cct->_conf->objecter_debug_inject_relock_delay) {
epoch_t orig_epoch = osdmap->get_epoch();
sul.unlock();
if (cct->_conf->objecter_debug_inject_relock_delay) {
sleep(1);
}
sul.lock();
if (orig_epoch != osdmap->get_epoch()) {
// map changed; recalculate mapping
ldout(cct, 10) << __func__ << " relock raced with osdmap, recalc target"
<< dendl;
check_for_latest_map = _calc_target(&op->target, nullptr)
== RECALC_OP_TARGET_POOL_DNE;
if (s) {
put_session(s);
s = NULL;
r = -EAGAIN;
}
}
}
if (r == -EAGAIN) {
ceph_assert(s == NULL);
r = _get_session(op->target.osd, &s, sul);
}
ceph_assert(r == 0);
ceph_assert(s); // may be homeless
_send_op_account(op);
// send?
ceph_assert(op->target.flags & (CEPH_OSD_FLAG_READ|CEPH_OSD_FLAG_WRITE));
bool need_send = false;
if (op->target.paused) {
ldout(cct, 10) << " tid " << op->tid << " op " << op << " is paused"
<< dendl;
_maybe_request_map();
} else if (!s->is_homeless()) {
need_send = true;
} else {
_maybe_request_map();
}
unique_lock sl(s->lock);
if (op->tid == 0)
op->tid = ++last_tid;
ldout(cct, 10) << "_op_submit oid " << op->target.base_oid
<< " '" << op->target.base_oloc << "' '"
<< op->target.target_oloc << "' " << op->ops << " tid "
<< op->tid << " osd." << (!s->is_homeless() ? s->osd : -1)
<< dendl;
_session_op_assign(s, op);
if (need_send) {
_send_op(op);
}
// Last chance to touch Op here, after giving up session lock it can
// be freed at any time by response handler.
ceph_tid_t tid = op->tid;
if (check_for_latest_map) {
_send_op_map_check(op);
}
if (ptid)
*ptid = tid;
op = NULL;
sl.unlock();
put_session(s);
ldout(cct, 5) << num_in_flight << " in flight" << dendl;
}
int Objecter::op_cancel(OSDSession *s, ceph_tid_t tid, int r)
{
ceph_assert(initialized);
unique_lock sl(s->lock);
auto p = s->ops.find(tid);
if (p == s->ops.end()) {
ldout(cct, 10) << __func__ << " tid " << tid << " dne in session "
<< s->osd << dendl;
return -ENOENT;
}
#if 0
if (s->con) {
ldout(cct, 20) << " revoking rx ceph::buffer for " << tid
<< " on " << s->con << dendl;
s->con->revoke_rx_buffer(tid);
}
#endif
ldout(cct, 10) << __func__ << " tid " << tid << " in session " << s->osd
<< dendl;
Op *op = p->second;
if (op->has_completion()) {
num_in_flight--;
op->complete(osdcode(r), r);
}
_op_cancel_map_check(op);
_finish_op(op, r);
sl.unlock();
return 0;
}
int Objecter::op_cancel(ceph_tid_t tid, int r)
{
int ret = 0;
unique_lock wl(rwlock);
ret = _op_cancel(tid, r);
return ret;
}
int Objecter::op_cancel(const vector<ceph_tid_t>& tids, int r)
{
unique_lock wl(rwlock);
ldout(cct,10) << __func__ << " " << tids << dendl;
for (auto tid : tids) {
_op_cancel(tid, r);
}
return 0;
}
int Objecter::_op_cancel(ceph_tid_t tid, int r)
{
int ret = 0;
ldout(cct, 5) << __func__ << ": cancelling tid " << tid << " r=" << r
<< dendl;
start:
for (auto siter = osd_sessions.begin();
siter != osd_sessions.end(); ++siter) {
OSDSession *s = siter->second;
shared_lock sl(s->lock);
if (s->ops.find(tid) != s->ops.end()) {
sl.unlock();
ret = op_cancel(s, tid, r);
if (ret == -ENOENT) {
/* oh no! raced, maybe tid moved to another session, restarting */
goto start;
}
return ret;
}
}
ldout(cct, 5) << __func__ << ": tid " << tid
<< " not found in live sessions" << dendl;
// Handle case where the op is in homeless session
shared_lock sl(homeless_session->lock);
if (homeless_session->ops.find(tid) != homeless_session->ops.end()) {
sl.unlock();
ret = op_cancel(homeless_session, tid, r);
if (ret == -ENOENT) {
/* oh no! raced, maybe tid moved to another session, restarting */
goto start;
} else {
return ret;
}
} else {
sl.unlock();
}
ldout(cct, 5) << __func__ << ": tid " << tid
<< " not found in homeless session" << dendl;
return ret;
}
epoch_t Objecter::op_cancel_writes(int r, int64_t pool)
{
unique_lock wl(rwlock);
std::vector<ceph_tid_t> to_cancel;
bool found = false;
for (auto siter = osd_sessions.begin();
siter != osd_sessions.end(); ++siter) {
OSDSession *s = siter->second;
shared_lock sl(s->lock);
for (auto op_i = s->ops.begin();
op_i != s->ops.end(); ++op_i) {
if (op_i->second->target.flags & CEPH_OSD_FLAG_WRITE
&& (pool == -1 || op_i->second->target.target_oloc.pool == pool)) {
to_cancel.push_back(op_i->first);
}
}
sl.unlock();
for (auto titer = to_cancel.begin(); titer != to_cancel.end(); ++titer) {
int cancel_result = op_cancel(s, *titer, r);
// We hold rwlock across search and cancellation, so cancels
// should always succeed
ceph_assert(cancel_result == 0);
}
if (!found && to_cancel.size())
found = true;
to_cancel.clear();
}
const epoch_t epoch = osdmap->get_epoch();
wl.unlock();
if (found) {
return epoch;
} else {
return -1;
}
}
bool Objecter::is_pg_changed(
int oldprimary,
const vector<int>& oldacting,
int newprimary,
const vector<int>& newacting,
bool any_change)
{
if (OSDMap::primary_changed_broken( // https://tracker.ceph.com/issues/43213
oldprimary,
oldacting,
newprimary,
newacting))
return true;
if (any_change && oldacting != newacting)
return true;
return false; // same primary (tho replicas may have changed)
}
bool Objecter::target_should_be_paused(op_target_t *t)
{
const pg_pool_t *pi = osdmap->get_pg_pool(t->base_oloc.pool);
bool pauserd = osdmap->test_flag(CEPH_OSDMAP_PAUSERD);
bool pausewr = osdmap->test_flag(CEPH_OSDMAP_PAUSEWR) ||
(t->respects_full() && (_osdmap_full_flag() || _osdmap_pool_full(*pi)));
return (t->flags & CEPH_OSD_FLAG_READ && pauserd) ||
(t->flags & CEPH_OSD_FLAG_WRITE && pausewr) ||
(osdmap->get_epoch() < epoch_barrier);
}
/**
* Locking public accessor for _osdmap_full_flag
*/
bool Objecter::osdmap_full_flag() const
{
shared_lock rl(rwlock);
return _osdmap_full_flag();
}
bool Objecter::osdmap_pool_full(const int64_t pool_id) const
{
shared_lock rl(rwlock);
if (_osdmap_full_flag()) {
return true;
}
return _osdmap_pool_full(pool_id);
}
bool Objecter::_osdmap_pool_full(const int64_t pool_id) const
{
const pg_pool_t *pool = osdmap->get_pg_pool(pool_id);
if (pool == NULL) {
ldout(cct, 4) << __func__ << ": DNE pool " << pool_id << dendl;
return false;
}
return _osdmap_pool_full(*pool);
}
bool Objecter::_osdmap_has_pool_full() const
{
for (auto it = osdmap->get_pools().begin();
it != osdmap->get_pools().end(); ++it) {
if (_osdmap_pool_full(it->second))
return true;
}
return false;
}
/**
* Wrapper around osdmap->test_flag for special handling of the FULL flag.
*/
bool Objecter::_osdmap_full_flag() const
{
// Ignore the FULL flag if the caller does not have honor_osdmap_full
return osdmap->test_flag(CEPH_OSDMAP_FULL) && honor_pool_full;
}
void Objecter::update_pool_full_map(map<int64_t, bool>& pool_full_map)
{
for (map<int64_t, pg_pool_t>::const_iterator it
= osdmap->get_pools().begin();
it != osdmap->get_pools().end(); ++it) {
if (pool_full_map.find(it->first) == pool_full_map.end()) {
pool_full_map[it->first] = _osdmap_pool_full(it->second);
} else {
pool_full_map[it->first] = _osdmap_pool_full(it->second) ||
pool_full_map[it->first];
}
}
}
int64_t Objecter::get_object_hash_position(int64_t pool, const string& key,
const string& ns)
{
shared_lock rl(rwlock);
const pg_pool_t *p = osdmap->get_pg_pool(pool);
if (!p)
return -ENOENT;
return p->hash_key(key, ns);
}
int64_t Objecter::get_object_pg_hash_position(int64_t pool, const string& key,
const string& ns)
{
shared_lock rl(rwlock);
const pg_pool_t *p = osdmap->get_pg_pool(pool);
if (!p)
return -ENOENT;
return p->raw_hash_to_pg(p->hash_key(key, ns));
}
void Objecter::_prune_snapc(
const mempool::osdmap::map<int64_t,
snap_interval_set_t>& new_removed_snaps,
Op *op)
{
bool match = false;
auto i = new_removed_snaps.find(op->target.base_pgid.pool());
if (i != new_removed_snaps.end()) {
for (auto s : op->snapc.snaps) {
if (i->second.contains(s)) {
match = true;
break;
}
}
if (match) {
vector<snapid_t> new_snaps;
for (auto s : op->snapc.snaps) {
if (!i->second.contains(s)) {
new_snaps.push_back(s);
}
}
op->snapc.snaps.swap(new_snaps);
ldout(cct,10) << __func__ << " op " << op->tid << " snapc " << op->snapc
<< " (was " << new_snaps << ")" << dendl;
}
}
}
int Objecter::_calc_target(op_target_t *t, Connection *con, bool any_change)
{
// rwlock is locked
bool is_read = t->flags & CEPH_OSD_FLAG_READ;
bool is_write = t->flags & CEPH_OSD_FLAG_WRITE;
t->epoch = osdmap->get_epoch();
ldout(cct,20) << __func__ << " epoch " << t->epoch
<< " base " << t->base_oid << " " << t->base_oloc
<< " precalc_pgid " << (int)t->precalc_pgid
<< " pgid " << t->base_pgid
<< (is_read ? " is_read" : "")
<< (is_write ? " is_write" : "")
<< dendl;
const pg_pool_t *pi = osdmap->get_pg_pool(t->base_oloc.pool);
if (!pi) {
t->osd = -1;
return RECALC_OP_TARGET_POOL_DNE;
}
if (pi->has_flag(pg_pool_t::FLAG_EIO)) {
return RECALC_OP_TARGET_POOL_EIO;
}
ldout(cct,30) << __func__ << " base pi " << pi
<< " pg_num " << pi->get_pg_num() << dendl;
bool force_resend = false;
if (osdmap->get_epoch() == pi->last_force_op_resend) {
if (t->last_force_resend < pi->last_force_op_resend) {
t->last_force_resend = pi->last_force_op_resend;
force_resend = true;
} else if (t->last_force_resend == 0) {
force_resend = true;
}
}
// apply tiering
t->target_oid = t->base_oid;
t->target_oloc = t->base_oloc;
if ((t->flags & CEPH_OSD_FLAG_IGNORE_OVERLAY) == 0) {
if (is_read && pi->has_read_tier())
t->target_oloc.pool = pi->read_tier;
if (is_write && pi->has_write_tier())
t->target_oloc.pool = pi->write_tier;
pi = osdmap->get_pg_pool(t->target_oloc.pool);
if (!pi) {
t->osd = -1;
return RECALC_OP_TARGET_POOL_DNE;
}
}
pg_t pgid;
if (t->precalc_pgid) {
ceph_assert(t->flags & CEPH_OSD_FLAG_IGNORE_OVERLAY);
ceph_assert(t->base_oid.name.empty()); // make sure this is a pg op
ceph_assert(t->base_oloc.pool == (int64_t)t->base_pgid.pool());
pgid = t->base_pgid;
} else {
int ret = osdmap->object_locator_to_pg(t->target_oid, t->target_oloc,
pgid);
if (ret == -ENOENT) {
t->osd = -1;
return RECALC_OP_TARGET_POOL_DNE;
}
}
ldout(cct,20) << __func__ << " target " << t->target_oid << " "
<< t->target_oloc << " -> pgid " << pgid << dendl;
ldout(cct,30) << __func__ << " target pi " << pi
<< " pg_num " << pi->get_pg_num() << dendl;
t->pool_ever_existed = true;
int size = pi->size;
int min_size = pi->min_size;
unsigned pg_num = pi->get_pg_num();
unsigned pg_num_mask = pi->get_pg_num_mask();
unsigned pg_num_pending = pi->get_pg_num_pending();
int up_primary, acting_primary;
vector<int> up, acting;
ps_t actual_ps = ceph_stable_mod(pgid.ps(), pg_num, pg_num_mask);
pg_t actual_pgid(actual_ps, pgid.pool());
if (!lookup_pg_mapping(actual_pgid, osdmap->get_epoch(), &up, &up_primary,
&acting, &acting_primary)) {
osdmap->pg_to_up_acting_osds(actual_pgid, &up, &up_primary,
&acting, &acting_primary);
pg_mapping_t pg_mapping(osdmap->get_epoch(),
up, up_primary, acting, acting_primary);
update_pg_mapping(actual_pgid, std::move(pg_mapping));
}
bool sort_bitwise = osdmap->test_flag(CEPH_OSDMAP_SORTBITWISE);
bool recovery_deletes = osdmap->test_flag(CEPH_OSDMAP_RECOVERY_DELETES);
unsigned prev_seed = ceph_stable_mod(pgid.ps(), t->pg_num, t->pg_num_mask);
pg_t prev_pgid(prev_seed, pgid.pool());
if (any_change && PastIntervals::is_new_interval(
t->acting_primary,
acting_primary,
t->acting,
acting,
t->up_primary,
up_primary,
t->up,
up,
t->size,
size,
t->min_size,
min_size,
t->pg_num,
pg_num,
t->pg_num_pending,
pg_num_pending,
t->sort_bitwise,
sort_bitwise,
t->recovery_deletes,
recovery_deletes,
t->peering_crush_bucket_count,
pi->peering_crush_bucket_count,
t->peering_crush_bucket_target,
pi->peering_crush_bucket_target,
t->peering_crush_bucket_barrier,
pi->peering_crush_bucket_barrier,
t->peering_crush_mandatory_member,
pi->peering_crush_mandatory_member,
prev_pgid)) {
force_resend = true;
}
bool unpaused = false;
bool should_be_paused = target_should_be_paused(t);
if (t->paused && !should_be_paused) {
unpaused = true;
}
if (t->paused != should_be_paused) {
ldout(cct, 10) << __func__ << " paused " << t->paused
<< " -> " << should_be_paused << dendl;
t->paused = should_be_paused;
}
bool legacy_change =
t->pgid != pgid ||
is_pg_changed(
t->acting_primary, t->acting, acting_primary, acting,
t->used_replica || any_change);
bool split_or_merge = false;
if (t->pg_num) {
split_or_merge =
prev_pgid.is_split(t->pg_num, pg_num, nullptr) ||
prev_pgid.is_merge_source(t->pg_num, pg_num, nullptr) ||
prev_pgid.is_merge_target(t->pg_num, pg_num);
}
if (legacy_change || split_or_merge || force_resend) {
t->pgid = pgid;
t->acting = std::move(acting);
t->acting_primary = acting_primary;
t->up_primary = up_primary;
t->up = std::move(up);
t->size = size;
t->min_size = min_size;
t->pg_num = pg_num;
t->pg_num_mask = pg_num_mask;
t->pg_num_pending = pg_num_pending;
spg_t spgid(actual_pgid);
if (pi->is_erasure()) {
for (uint8_t i = 0; i < t->acting.size(); ++i) {
if (t->acting[i] == acting_primary) {
spgid.reset_shard(shard_id_t(i));
break;
}
}
}
t->actual_pgid = spgid;
t->sort_bitwise = sort_bitwise;
t->recovery_deletes = recovery_deletes;
t->peering_crush_bucket_count = pi->peering_crush_bucket_count;
t->peering_crush_bucket_target = pi->peering_crush_bucket_target;
t->peering_crush_bucket_barrier = pi->peering_crush_bucket_barrier;
t->peering_crush_mandatory_member = pi->peering_crush_mandatory_member;
ldout(cct, 10) << __func__ << " "
<< " raw pgid " << pgid << " -> actual " << t->actual_pgid
<< " acting " << t->acting
<< " primary " << acting_primary << dendl;
t->used_replica = false;
if ((t->flags & (CEPH_OSD_FLAG_BALANCE_READS |
CEPH_OSD_FLAG_LOCALIZE_READS)) &&
!is_write && pi->is_replicated() && t->acting.size() > 1) {
int osd;
ceph_assert(is_read && t->acting[0] == acting_primary);
if (t->flags & CEPH_OSD_FLAG_BALANCE_READS) {
int p = rand() % t->acting.size();
if (p)
t->used_replica = true;
osd = t->acting[p];
ldout(cct, 10) << " chose random osd." << osd << " of " << t->acting
<< dendl;
} else {
// look for a local replica. prefer the primary if the
// distance is the same.
int best = -1;
int best_locality = 0;
for (unsigned i = 0; i < t->acting.size(); ++i) {
int locality = osdmap->crush->get_common_ancestor_distance(
cct, t->acting[i], crush_location);
ldout(cct, 20) << __func__ << " localize: rank " << i
<< " osd." << t->acting[i]
<< " locality " << locality << dendl;
if (i == 0 ||
(locality >= 0 && best_locality >= 0 &&
locality < best_locality) ||
(best_locality < 0 && locality >= 0)) {
best = i;
best_locality = locality;
if (i)
t->used_replica = true;
}
}
ceph_assert(best >= 0);
osd = t->acting[best];
}
t->osd = osd;
} else {
t->osd = acting_primary;
}
}
if (legacy_change || unpaused || force_resend) {
return RECALC_OP_TARGET_NEED_RESEND;
}
if (split_or_merge &&
(osdmap->require_osd_release >= ceph_release_t::luminous ||
HAVE_FEATURE(osdmap->get_xinfo(acting_primary).features,
RESEND_ON_SPLIT))) {
return RECALC_OP_TARGET_NEED_RESEND;
}
return RECALC_OP_TARGET_NO_ACTION;
}
int Objecter::_map_session(op_target_t *target, OSDSession **s,
shunique_lock<ceph::shared_mutex>& sul)
{
_calc_target(target, nullptr);
return _get_session(target->osd, s, sul);
}
void Objecter::_session_op_assign(OSDSession *to, Op *op)
{
// to->lock is locked
ceph_assert(op->session == NULL);
ceph_assert(op->tid);
get_session(to);
op->session = to;
to->ops[op->tid] = op;
if (to->is_homeless()) {
num_homeless_ops++;
}
ldout(cct, 15) << __func__ << " " << to->osd << " " << op->tid << dendl;
}
void Objecter::_session_op_remove(OSDSession *from, Op *op)
{
ceph_assert(op->session == from);
// from->lock is locked
if (from->is_homeless()) {
num_homeless_ops--;
}
from->ops.erase(op->tid);
put_session(from);
op->session = NULL;
ldout(cct, 15) << __func__ << " " << from->osd << " " << op->tid << dendl;
}
void Objecter::_session_linger_op_assign(OSDSession *to, LingerOp *op)
{
// to lock is locked unique
ceph_assert(op->session == NULL);
if (to->is_homeless()) {
num_homeless_ops++;
}
get_session(to);
op->session = to;
to->linger_ops[op->linger_id] = op;
ldout(cct, 15) << __func__ << " " << to->osd << " " << op->linger_id
<< dendl;
}
void Objecter::_session_linger_op_remove(OSDSession *from, LingerOp *op)
{
ceph_assert(from == op->session);
// from->lock is locked unique
if (from->is_homeless()) {
num_homeless_ops--;
}
from->linger_ops.erase(op->linger_id);
put_session(from);
op->session = NULL;
ldout(cct, 15) << __func__ << " " << from->osd << " " << op->linger_id
<< dendl;
}
void Objecter::_session_command_op_remove(OSDSession *from, CommandOp *op)
{
ceph_assert(from == op->session);
// from->lock is locked
if (from->is_homeless()) {
num_homeless_ops--;
}
from->command_ops.erase(op->tid);
put_session(from);
op->session = NULL;
ldout(cct, 15) << __func__ << " " << from->osd << " " << op->tid << dendl;
}
void Objecter::_session_command_op_assign(OSDSession *to, CommandOp *op)
{
// to->lock is locked
ceph_assert(op->session == NULL);
ceph_assert(op->tid);
if (to->is_homeless()) {
num_homeless_ops++;
}
get_session(to);
op->session = to;
to->command_ops[op->tid] = op;
ldout(cct, 15) << __func__ << " " << to->osd << " " << op->tid << dendl;
}
int Objecter::_recalc_linger_op_target(LingerOp *linger_op,
shunique_lock<ceph::shared_mutex>& sul)
{
// rwlock is locked unique
int r = _calc_target(&linger_op->target, nullptr, true);
if (r == RECALC_OP_TARGET_NEED_RESEND) {
ldout(cct, 10) << "recalc_linger_op_target tid " << linger_op->linger_id
<< " pgid " << linger_op->target.pgid
<< " acting " << linger_op->target.acting << dendl;
OSDSession *s = NULL;
r = _get_session(linger_op->target.osd, &s, sul);
ceph_assert(r == 0);
if (linger_op->session != s) {
// NB locking two sessions (s and linger_op->session) at the
// same time here is only safe because we are the only one that
// takes two, and we are holding rwlock for write. We use
// std::shared_mutex in OSDSession because lockdep doesn't know
// that.
unique_lock sl(s->lock);
_session_linger_op_remove(linger_op->session, linger_op);
_session_linger_op_assign(s, linger_op);
}
put_session(s);
return RECALC_OP_TARGET_NEED_RESEND;
}
return r;
}
void Objecter::_cancel_linger_op(Op *op)
{
ldout(cct, 15) << "cancel_op " << op->tid << dendl;
ceph_assert(!op->should_resend);
if (op->has_completion()) {
op->onfinish = nullptr;
num_in_flight--;
}
_finish_op(op, 0);
}
void Objecter::_finish_op(Op *op, int r)
{
ldout(cct, 15) << __func__ << " " << op->tid << dendl;
// op->session->lock is locked unique or op->session is null
if (!op->ctx_budgeted && op->budget >= 0) {
put_op_budget_bytes(op->budget);
op->budget = -1;
}
if (op->ontimeout && r != -ETIMEDOUT)
timer.cancel_event(op->ontimeout);
if (op->session) {
_session_op_remove(op->session, op);
}
logger->dec(l_osdc_op_active);
ceph_assert(check_latest_map_ops.find(op->tid) == check_latest_map_ops.end());
inflight_ops--;
op->put();
}
Objecter::MOSDOp *Objecter::_prepare_osd_op(Op *op)
{
// rwlock is locked
int flags = op->target.flags;
flags |= CEPH_OSD_FLAG_KNOWN_REDIR;
flags |= CEPH_OSD_FLAG_SUPPORTSPOOLEIO;
// Nothing checks this any longer, but needed for compatibility with
// pre-luminous osds
flags |= CEPH_OSD_FLAG_ONDISK;
if (!honor_pool_full)
flags |= CEPH_OSD_FLAG_FULL_FORCE;
op->target.paused = false;
op->stamp = ceph::coarse_mono_clock::now();
hobject_t hobj = op->target.get_hobj();
auto m = new MOSDOp(client_inc, op->tid,
hobj, op->target.actual_pgid,
osdmap->get_epoch(),
flags, op->features);
m->set_snapid(op->snapid);
m->set_snap_seq(op->snapc.seq);
m->set_snaps(op->snapc.snaps);
m->ops = op->ops;
m->set_mtime(op->mtime);
m->set_retry_attempt(op->attempts++);
if (!op->trace.valid() && cct->_conf->osdc_blkin_trace_all) {
op->trace.init("op", &trace_endpoint);
}
if (op->priority)
m->set_priority(op->priority);
else
m->set_priority(cct->_conf->osd_client_op_priority);
if (op->reqid != osd_reqid_t()) {
m->set_reqid(op->reqid);
}
logger->inc(l_osdc_op_send);
ssize_t sum = 0;
for (unsigned i = 0; i < m->ops.size(); i++) {
sum += m->ops[i].indata.length();
}
logger->inc(l_osdc_op_send_bytes, sum);
return m;
}
void Objecter::_send_op(Op *op)
{
// rwlock is locked
// op->session->lock is locked
// backoff?
auto p = op->session->backoffs.find(op->target.actual_pgid);
if (p != op->session->backoffs.end()) {
hobject_t hoid = op->target.get_hobj();
auto q = p->second.lower_bound(hoid);
if (q != p->second.begin()) {
--q;
if (hoid >= q->second.end) {
++q;
}
}
if (q != p->second.end()) {
ldout(cct, 20) << __func__ << " ? " << q->first << " [" << q->second.begin
<< "," << q->second.end << ")" << dendl;
int r = cmp(hoid, q->second.begin);
if (r == 0 || (r > 0 && hoid < q->second.end)) {
ldout(cct, 10) << __func__ << " backoff " << op->target.actual_pgid
<< " id " << q->second.id << " on " << hoid
<< ", queuing " << op << " tid " << op->tid << dendl;
return;
}
}
}
ceph_assert(op->tid > 0);
MOSDOp *m = _prepare_osd_op(op);
if (op->target.actual_pgid != m->get_spg()) {
ldout(cct, 10) << __func__ << " " << op->tid << " pgid change from "
<< m->get_spg() << " to " << op->target.actual_pgid
<< ", updating and reencoding" << dendl;
m->set_spg(op->target.actual_pgid);
m->clear_payload(); // reencode
}
ldout(cct, 15) << "_send_op " << op->tid << " to "
<< op->target.actual_pgid << " on osd." << op->session->osd
<< dendl;
ConnectionRef con = op->session->con;
ceph_assert(con);
#if 0
// preallocated rx ceph::buffer?
if (op->con) {
ldout(cct, 20) << " revoking rx ceph::buffer for " << op->tid << " on "
<< op->con << dendl;
op->con->revoke_rx_buffer(op->tid);
}
if (op->outbl &&
op->ontimeout == 0 && // only post rx_buffer if no timeout; see #9582
op->outbl->length()) {
op->outbl->invalidate_crc(); // messenger writes through c_str()
ldout(cct, 20) << " posting rx ceph::buffer for " << op->tid << " on " << con
<< dendl;
op->con = con;
op->con->post_rx_buffer(op->tid, *op->outbl);
}
#endif
op->incarnation = op->session->incarnation;
if (op->trace.valid()) {
m->trace.init("op msg", nullptr, &op->trace);
}
op->session->con->send_message(m);
}
int Objecter::calc_op_budget(const bc::small_vector_base<OSDOp>& ops)
{
int op_budget = 0;
for (auto i = ops.begin(); i != ops.end(); ++i) {
if (i->op.op & CEPH_OSD_OP_MODE_WR) {
op_budget += i->indata.length();
} else if (ceph_osd_op_mode_read(i->op.op)) {
if (ceph_osd_op_uses_extent(i->op.op)) {
if ((int64_t)i->op.extent.length > 0)
op_budget += (int64_t)i->op.extent.length;
} else if (ceph_osd_op_type_attr(i->op.op)) {
op_budget += i->op.xattr.name_len + i->op.xattr.value_len;
}
}
}
return op_budget;
}
void Objecter::_throttle_op(Op *op,
shunique_lock<ceph::shared_mutex>& sul,
int op_budget)
{
ceph_assert(sul && sul.mutex() == &rwlock);
bool locked_for_write = sul.owns_lock();
if (!op_budget)
op_budget = calc_op_budget(op->ops);
if (!op_throttle_bytes.get_or_fail(op_budget)) { //couldn't take right now
sul.unlock();
op_throttle_bytes.get(op_budget);
if (locked_for_write)
sul.lock();
else
sul.lock_shared();
}
if (!op_throttle_ops.get_or_fail(1)) { //couldn't take right now
sul.unlock();
op_throttle_ops.get(1);
if (locked_for_write)
sul.lock();
else
sul.lock_shared();
}
}
int Objecter::take_linger_budget(LingerOp *info)
{
return 1;
}
/* This function DOES put the passed message before returning */
void Objecter::handle_osd_op_reply(MOSDOpReply *m)
{
ldout(cct, 10) << "in handle_osd_op_reply" << dendl;
// get pio
ceph_tid_t tid = m->get_tid();
shunique_lock sul(rwlock, ceph::acquire_shared);
if (!initialized) {
m->put();
return;
}
ConnectionRef con = m->get_connection();
auto priv = con->get_priv();
auto s = static_cast<OSDSession*>(priv.get());
if (!s || s->con != con) {
ldout(cct, 7) << __func__ << " no session on con " << con << dendl;
m->put();
return;
}
unique_lock sl(s->lock);
map<ceph_tid_t, Op *>::iterator iter = s->ops.find(tid);
if (iter == s->ops.end()) {
ldout(cct, 7) << "handle_osd_op_reply " << tid
<< (m->is_ondisk() ? " ondisk" : (m->is_onnvram() ?
" onnvram" : " ack"))
<< " ... stray" << dendl;
sl.unlock();
m->put();
return;
}
ldout(cct, 7) << "handle_osd_op_reply " << tid
<< (m->is_ondisk() ? " ondisk" :
(m->is_onnvram() ? " onnvram" : " ack"))
<< " uv " << m->get_user_version()
<< " in " << m->get_pg()
<< " attempt " << m->get_retry_attempt()
<< dendl;
Op *op = iter->second;
op->trace.event("osd op reply");
if (retry_writes_after_first_reply && op->attempts == 1 &&
(op->target.flags & CEPH_OSD_FLAG_WRITE)) {
ldout(cct, 7) << "retrying write after first reply: " << tid << dendl;
if (op->has_completion()) {
num_in_flight--;
}
_session_op_remove(s, op);
sl.unlock();
_op_submit(op, sul, NULL);
m->put();
return;
}
if (m->get_retry_attempt() >= 0) {
if (m->get_retry_attempt() != (op->attempts - 1)) {
ldout(cct, 7) << " ignoring reply from attempt "
<< m->get_retry_attempt()
<< " from " << m->get_source_inst()
<< "; last attempt " << (op->attempts - 1) << " sent to "
<< op->session->con->get_peer_addr() << dendl;
m->put();
sl.unlock();
return;
}
} else {
// we don't know the request attempt because the server is old, so
// just accept this one. we may do ACK callbacks we shouldn't
// have, but that is better than doing callbacks out of order.
}
decltype(op->onfinish) onfinish;
int rc = m->get_result();
if (m->is_redirect_reply()) {
ldout(cct, 5) << " got redirect reply; redirecting" << dendl;
if (op->has_completion())
num_in_flight--;
_session_op_remove(s, op);
sl.unlock();
// FIXME: two redirects could race and reorder
op->tid = 0;
m->get_redirect().combine_with_locator(op->target.target_oloc,
op->target.target_oid.name);
op->target.flags |= (CEPH_OSD_FLAG_REDIRECTED |
CEPH_OSD_FLAG_IGNORE_CACHE |
CEPH_OSD_FLAG_IGNORE_OVERLAY);
_op_submit(op, sul, NULL);
m->put();
return;
}
if (rc == -EAGAIN) {
ldout(cct, 7) << " got -EAGAIN, resubmitting" << dendl;
if (op->has_completion())
num_in_flight--;
_session_op_remove(s, op);
sl.unlock();
op->tid = 0;
op->target.flags &= ~(CEPH_OSD_FLAG_BALANCE_READS |
CEPH_OSD_FLAG_LOCALIZE_READS);
op->target.pgid = pg_t();
_op_submit(op, sul, NULL);
m->put();
return;
}
sul.unlock();
if (op->objver)
*op->objver = m->get_user_version();
if (op->reply_epoch)
*op->reply_epoch = m->get_map_epoch();
if (op->data_offset)
*op->data_offset = m->get_header().data_off;
// got data?
if (op->outbl) {
#if 0
if (op->con)
op->con->revoke_rx_buffer(op->tid);
#endif
auto& bl = m->get_data();
if (op->outbl->length() == bl.length() &&
bl.get_num_buffers() <= 1) {
// this is here to keep previous users to *relied* on getting data
// read into existing buffers happy. Notably,
// libradosstriper::RadosStriperImpl::aio_read().
ldout(cct,10) << __func__ << " copying resulting " << bl.length()
<< " into existing ceph::buffer of length " << op->outbl->length()
<< dendl;
cb::list t;
t = std::move(*op->outbl);
t.invalidate_crc(); // we're overwriting the raw buffers via c_str()
bl.begin().copy(bl.length(), t.c_str());
op->outbl->substr_of(t, 0, bl.length());
} else {
m->claim_data(*op->outbl);
}
op->outbl = 0;
}
// per-op result demuxing
vector<OSDOp> out_ops;
m->claim_ops(out_ops);
if (out_ops.size() != op->ops.size())
ldout(cct, 0) << "WARNING: tid " << op->tid << " reply ops " << out_ops
<< " != request ops " << op->ops
<< " from " << m->get_source_inst() << dendl;
ceph_assert(op->ops.size() == op->out_bl.size());
ceph_assert(op->ops.size() == op->out_rval.size());
ceph_assert(op->ops.size() == op->out_ec.size());
ceph_assert(op->ops.size() == op->out_handler.size());
auto pb = op->out_bl.begin();
auto pr = op->out_rval.begin();
auto pe = op->out_ec.begin();
auto ph = op->out_handler.begin();
ceph_assert(op->out_bl.size() == op->out_rval.size());
ceph_assert(op->out_bl.size() == op->out_handler.size());
auto p = out_ops.begin();
for (unsigned i = 0;
p != out_ops.end() && pb != op->out_bl.end();
++i, ++p, ++pb, ++pr, ++pe, ++ph) {
ldout(cct, 10) << " op " << i << " rval " << p->rval
<< " len " << p->outdata.length() << dendl;
if (*pb)
**pb = p->outdata;
// set rval before running handlers so that handlers
// can change it if e.g. decoding fails
if (*pr)
**pr = ceph_to_hostos_errno(p->rval);
if (*pe)
**pe = p->rval < 0 ? bs::error_code(-p->rval, osd_category()) :
bs::error_code();
if (*ph) {
std::move((*ph))(p->rval < 0 ?
bs::error_code(-p->rval, osd_category()) :
bs::error_code(),
p->rval, p->outdata);
}
}
// NOTE: we assume that since we only request ONDISK ever we will
// only ever get back one (type of) ack ever.
if (op->has_completion()) {
num_in_flight--;
onfinish = std::move(op->onfinish);
op->onfinish = nullptr;
}
logger->inc(l_osdc_op_reply);
logger->tinc(l_osdc_op_latency, ceph::coarse_mono_time::clock::now() - op->stamp);
logger->set(l_osdc_op_inflight, num_in_flight);
/* get it before we call _finish_op() */
auto completion_lock = s->get_lock(op->target.base_oid);
ldout(cct, 15) << "handle_osd_op_reply completed tid " << tid << dendl;
_finish_op(op, 0);
ldout(cct, 5) << num_in_flight << " in flight" << dendl;
// serialize completions
if (completion_lock.mutex()) {
completion_lock.lock();
}
sl.unlock();
// do callbacks
if (Op::has_completion(onfinish)) {
Op::complete(std::move(onfinish), osdcode(rc), rc);
}
if (completion_lock.mutex()) {
completion_lock.unlock();
}
m->put();
}
void Objecter::handle_osd_backoff(MOSDBackoff *m)
{
ldout(cct, 10) << __func__ << " " << *m << dendl;
shunique_lock sul(rwlock, ceph::acquire_shared);
if (!initialized) {
m->put();
return;
}
ConnectionRef con = m->get_connection();
auto priv = con->get_priv();
auto s = static_cast<OSDSession*>(priv.get());
if (!s || s->con != con) {
ldout(cct, 7) << __func__ << " no session on con " << con << dendl;
m->put();
return;
}
get_session(s);
unique_lock sl(s->lock);
switch (m->op) {
case CEPH_OSD_BACKOFF_OP_BLOCK:
{
// register
OSDBackoff& b = s->backoffs[m->pgid][m->begin];
s->backoffs_by_id.insert(make_pair(m->id, &b));
b.pgid = m->pgid;
b.id = m->id;
b.begin = m->begin;
b.end = m->end;
// ack with original backoff's epoch so that the osd can discard this if
// there was a pg split.
auto r = new MOSDBackoff(m->pgid, m->map_epoch,
CEPH_OSD_BACKOFF_OP_ACK_BLOCK,
m->id, m->begin, m->end);
// this priority must match the MOSDOps from _prepare_osd_op
r->set_priority(cct->_conf->osd_client_op_priority);
con->send_message(r);
}
break;
case CEPH_OSD_BACKOFF_OP_UNBLOCK:
{
auto p = s->backoffs_by_id.find(m->id);
if (p != s->backoffs_by_id.end()) {
OSDBackoff *b = p->second;
if (b->begin != m->begin &&
b->end != m->end) {
lderr(cct) << __func__ << " got " << m->pgid << " id " << m->id
<< " unblock on ["
<< m->begin << "," << m->end << ") but backoff is ["
<< b->begin << "," << b->end << ")" << dendl;
// hrmpf, unblock it anyway.
}
ldout(cct, 10) << __func__ << " unblock backoff " << b->pgid
<< " id " << b->id
<< " [" << b->begin << "," << b->end
<< ")" << dendl;
auto spgp = s->backoffs.find(b->pgid);
ceph_assert(spgp != s->backoffs.end());
spgp->second.erase(b->begin);
if (spgp->second.empty()) {
s->backoffs.erase(spgp);
}
s->backoffs_by_id.erase(p);
// check for any ops to resend
for (auto& q : s->ops) {
if (q.second->target.actual_pgid == m->pgid) {
int r = q.second->target.contained_by(m->begin, m->end);
ldout(cct, 20) << __func__ << " contained_by " << r << " on "
<< q.second->target.get_hobj() << dendl;
if (r) {
_send_op(q.second);
}
}
}
} else {
lderr(cct) << __func__ << " " << m->pgid << " id " << m->id
<< " unblock on ["
<< m->begin << "," << m->end << ") but backoff dne" << dendl;
}
}
break;
default:
ldout(cct, 10) << __func__ << " unrecognized op " << (int)m->op << dendl;
}
sul.unlock();
sl.unlock();
m->put();
put_session(s);
}
uint32_t Objecter::list_nobjects_seek(NListContext *list_context,
uint32_t pos)
{
shared_lock rl(rwlock);
list_context->pos = hobject_t(object_t(), string(), CEPH_NOSNAP,
pos, list_context->pool_id, string());
ldout(cct, 10) << __func__ << " " << list_context
<< " pos " << pos << " -> " << list_context->pos << dendl;
pg_t actual = osdmap->raw_pg_to_pg(pg_t(pos, list_context->pool_id));
list_context->current_pg = actual.ps();
list_context->at_end_of_pool = false;
return pos;
}
uint32_t Objecter::list_nobjects_seek(NListContext *list_context,
const hobject_t& cursor)
{
shared_lock rl(rwlock);
ldout(cct, 10) << "list_nobjects_seek " << list_context << dendl;
list_context->pos = cursor;
list_context->at_end_of_pool = false;
pg_t actual = osdmap->raw_pg_to_pg(pg_t(cursor.get_hash(), list_context->pool_id));
list_context->current_pg = actual.ps();
list_context->sort_bitwise = true;
return list_context->current_pg;
}
void Objecter::list_nobjects_get_cursor(NListContext *list_context,
hobject_t *cursor)
{
shared_lock rl(rwlock);
if (list_context->list.empty()) {
*cursor = list_context->pos;
} else {
const librados::ListObjectImpl& entry = list_context->list.front();
const string *key = (entry.locator.empty() ? &entry.oid : &entry.locator);
uint32_t h = osdmap->get_pg_pool(list_context->pool_id)->hash_key(*key, entry.nspace);
*cursor = hobject_t(entry.oid, entry.locator, list_context->pool_snap_seq, h, list_context->pool_id, entry.nspace);
}
}
void Objecter::list_nobjects(NListContext *list_context, Context *onfinish)
{
ldout(cct, 10) << __func__ << " pool_id " << list_context->pool_id
<< " pool_snap_seq " << list_context->pool_snap_seq
<< " max_entries " << list_context->max_entries
<< " list_context " << list_context
<< " onfinish " << onfinish
<< " current_pg " << list_context->current_pg
<< " pos " << list_context->pos << dendl;
shared_lock rl(rwlock);
const pg_pool_t *pool = osdmap->get_pg_pool(list_context->pool_id);
if (!pool) { // pool is gone
rl.unlock();
put_nlist_context_budget(list_context);
onfinish->complete(-ENOENT);
return;
}
int pg_num = pool->get_pg_num();
bool sort_bitwise = osdmap->test_flag(CEPH_OSDMAP_SORTBITWISE);
if (list_context->pos.is_min()) {
list_context->starting_pg_num = 0;
list_context->sort_bitwise = sort_bitwise;
list_context->starting_pg_num = pg_num;
}
if (list_context->sort_bitwise != sort_bitwise) {
list_context->pos = hobject_t(
object_t(), string(), CEPH_NOSNAP,
list_context->current_pg, list_context->pool_id, string());
list_context->sort_bitwise = sort_bitwise;
ldout(cct, 10) << " hobject sort order changed, restarting this pg at "
<< list_context->pos << dendl;
}
if (list_context->starting_pg_num != pg_num) {
if (!sort_bitwise) {
// start reading from the beginning; the pgs have changed
ldout(cct, 10) << " pg_num changed; restarting with " << pg_num << dendl;
list_context->pos = collection_list_handle_t();
}
list_context->starting_pg_num = pg_num;
}
if (list_context->pos.is_max()) {
ldout(cct, 20) << __func__ << " end of pool, list "
<< list_context->list << dendl;
if (list_context->list.empty()) {
list_context->at_end_of_pool = true;
}
// release the listing context's budget once all
// OPs (in the session) are finished
put_nlist_context_budget(list_context);
onfinish->complete(0);
return;
}
ObjectOperation op;
op.pg_nls(list_context->max_entries, list_context->filter,
list_context->pos, osdmap->get_epoch());
list_context->bl.clear();
auto onack = new C_NList(list_context, onfinish, this);
object_locator_t oloc(list_context->pool_id, list_context->nspace);
// note current_pg in case we don't have (or lose) SORTBITWISE
list_context->current_pg = pool->raw_hash_to_pg(list_context->pos.get_hash());
rl.unlock();
pg_read(list_context->current_pg, oloc, op,
&list_context->bl, 0, onack, &onack->epoch,
&list_context->ctx_budget);
}
void Objecter::_nlist_reply(NListContext *list_context, int r,
Context *final_finish, epoch_t reply_epoch)
{
ldout(cct, 10) << __func__ << " " << list_context << dendl;
auto iter = list_context->bl.cbegin();
pg_nls_response_t response;
decode(response, iter);
if (!iter.end()) {
// we do this as legacy.
cb::list legacy_extra_info;
decode(legacy_extra_info, iter);
}
// if the osd returns 1 (newer code), or handle MAX, it means we
// hit the end of the pg.
if ((response.handle.is_max() || r == 1) &&
!list_context->sort_bitwise) {
// legacy OSD and !sortbitwise, figure out the next PG on our own
++list_context->current_pg;
if (list_context->current_pg == list_context->starting_pg_num) {
// end of pool
list_context->pos = hobject_t::get_max();
} else {
// next pg
list_context->pos = hobject_t(object_t(), string(), CEPH_NOSNAP,
list_context->current_pg,
list_context->pool_id, string());
}
} else {
list_context->pos = response.handle;
}
int response_size = response.entries.size();
ldout(cct, 20) << " response.entries.size " << response_size
<< ", response.entries " << response.entries
<< ", handle " << response.handle
<< ", tentative new pos " << list_context->pos << dendl;
if (response_size) {
std::move(response.entries.begin(), response.entries.end(),
std::back_inserter(list_context->list));
response.entries.clear();
}
if (list_context->list.size() >= list_context->max_entries) {
ldout(cct, 20) << " hit max, returning results so far, "
<< list_context->list << dendl;
// release the listing context's budget once all
// OPs (in the session) are finished
put_nlist_context_budget(list_context);
final_finish->complete(0);
return;
}
// continue!
list_nobjects(list_context, final_finish);
}
void Objecter::put_nlist_context_budget(NListContext *list_context)
{
if (list_context->ctx_budget >= 0) {
ldout(cct, 10) << " release listing context's budget " <<
list_context->ctx_budget << dendl;
put_op_budget_bytes(list_context->ctx_budget);
list_context->ctx_budget = -1;
}
}
// snapshots
void Objecter::create_pool_snap(int64_t pool, std::string_view snap_name,
decltype(PoolOp::onfinish)&& onfinish)
{
unique_lock wl(rwlock);
ldout(cct, 10) << "create_pool_snap; pool: " << pool << "; snap: "
<< snap_name << dendl;
const pg_pool_t *p = osdmap->get_pg_pool(pool);
if (!p) {
onfinish->defer(std::move(onfinish), osdc_errc::pool_dne, cb::list{});
return;
}
if (p->snap_exists(snap_name)) {
onfinish->defer(std::move(onfinish), osdc_errc::snapshot_exists,
cb::list{});
return;
}
auto op = new PoolOp;
op->tid = ++last_tid;
op->pool = pool;
op->name = snap_name;
op->onfinish = std::move(onfinish);
op->pool_op = POOL_OP_CREATE_SNAP;
pool_ops[op->tid] = op;
pool_op_submit(op);
}
struct CB_SelfmanagedSnap {
std::unique_ptr<ca::Completion<void(bs::error_code, snapid_t)>> fin;
CB_SelfmanagedSnap(decltype(fin)&& fin)
: fin(std::move(fin)) {}
void operator()(bs::error_code ec, const cb::list& bl) {
snapid_t snapid = 0;
if (!ec) {
try {
auto p = bl.cbegin();
decode(snapid, p);
} catch (const cb::error& e) {
ec = e.code();
}
}
fin->defer(std::move(fin), ec, snapid);
}
};
void Objecter::allocate_selfmanaged_snap(
int64_t pool,
std::unique_ptr<ca::Completion<void(bs::error_code, snapid_t)>> onfinish)
{
unique_lock wl(rwlock);
ldout(cct, 10) << "allocate_selfmanaged_snap; pool: " << pool << dendl;
auto op = new PoolOp;
op->tid = ++last_tid;
op->pool = pool;
op->onfinish = PoolOp::OpComp::create(
service.get_executor(),
CB_SelfmanagedSnap(std::move(onfinish)));
op->pool_op = POOL_OP_CREATE_UNMANAGED_SNAP;
pool_ops[op->tid] = op;
pool_op_submit(op);
}
void Objecter::delete_pool_snap(
int64_t pool, std::string_view snap_name,
decltype(PoolOp::onfinish)&& onfinish)
{
unique_lock wl(rwlock);
ldout(cct, 10) << "delete_pool_snap; pool: " << pool << "; snap: "
<< snap_name << dendl;
const pg_pool_t *p = osdmap->get_pg_pool(pool);
if (!p) {
onfinish->defer(std::move(onfinish), osdc_errc::pool_dne, cb::list{});
return;
}
if (!p->snap_exists(snap_name)) {
onfinish->defer(std::move(onfinish), osdc_errc::snapshot_dne, cb::list{});
return;
}
auto op = new PoolOp;
op->tid = ++last_tid;
op->pool = pool;
op->name = snap_name;
op->onfinish = std::move(onfinish);
op->pool_op = POOL_OP_DELETE_SNAP;
pool_ops[op->tid] = op;
pool_op_submit(op);
}
void Objecter::delete_selfmanaged_snap(int64_t pool, snapid_t snap,
decltype(PoolOp::onfinish)&& onfinish)
{
unique_lock wl(rwlock);
ldout(cct, 10) << "delete_selfmanaged_snap; pool: " << pool << "; snap: "
<< snap << dendl;
auto op = new PoolOp;
op->tid = ++last_tid;
op->pool = pool;
op->onfinish = std::move(onfinish);
op->pool_op = POOL_OP_DELETE_UNMANAGED_SNAP;
op->snapid = snap;
pool_ops[op->tid] = op;
pool_op_submit(op);
}
void Objecter::create_pool(std::string_view name,
decltype(PoolOp::onfinish)&& onfinish,
int crush_rule)
{
unique_lock wl(rwlock);
ldout(cct, 10) << "create_pool name=" << name << dendl;
if (osdmap->lookup_pg_pool_name(name) >= 0) {
onfinish->defer(std::move(onfinish), osdc_errc::pool_exists, cb::list{});
return;
}
auto op = new PoolOp;
op->tid = ++last_tid;
op->pool = 0;
op->name = name;
op->onfinish = std::move(onfinish);
op->pool_op = POOL_OP_CREATE;
pool_ops[op->tid] = op;
op->crush_rule = crush_rule;
pool_op_submit(op);
}
void Objecter::delete_pool(int64_t pool,
decltype(PoolOp::onfinish)&& onfinish)
{
unique_lock wl(rwlock);
ldout(cct, 10) << "delete_pool " << pool << dendl;
if (!osdmap->have_pg_pool(pool))
onfinish->defer(std::move(onfinish), osdc_errc::pool_dne, cb::list{});
else
_do_delete_pool(pool, std::move(onfinish));
}
void Objecter::delete_pool(std::string_view pool_name,
decltype(PoolOp::onfinish)&& onfinish)
{
unique_lock wl(rwlock);
ldout(cct, 10) << "delete_pool " << pool_name << dendl;
int64_t pool = osdmap->lookup_pg_pool_name(pool_name);
if (pool < 0)
// This only returns one error: -ENOENT.
onfinish->defer(std::move(onfinish), osdc_errc::pool_dne, cb::list{});
else
_do_delete_pool(pool, std::move(onfinish));
}
void Objecter::_do_delete_pool(int64_t pool,
decltype(PoolOp::onfinish)&& onfinish)
{
auto op = new PoolOp;
op->tid = ++last_tid;
op->pool = pool;
op->name = "delete";
op->onfinish = std::move(onfinish);
op->pool_op = POOL_OP_DELETE;
pool_ops[op->tid] = op;
pool_op_submit(op);
}
void Objecter::pool_op_submit(PoolOp *op)
{
// rwlock is locked
if (mon_timeout > timespan(0)) {
op->ontimeout = timer.add_event(mon_timeout,
[this, op]() {
pool_op_cancel(op->tid, -ETIMEDOUT); });
}
_pool_op_submit(op);
}
void Objecter::_pool_op_submit(PoolOp *op)
{
// rwlock is locked unique
ldout(cct, 10) << "pool_op_submit " << op->tid << dendl;
auto m = new MPoolOp(monc->get_fsid(), op->tid, op->pool,
op->name, op->pool_op,
last_seen_osdmap_version);
if (op->snapid) m->snapid = op->snapid;
if (op->crush_rule) m->crush_rule = op->crush_rule;
monc->send_mon_message(m);
op->last_submit = ceph::coarse_mono_clock::now();
logger->inc(l_osdc_poolop_send);
}
/**
* Handle a reply to a PoolOp message. Check that we sent the message
* and give the caller responsibility for the returned cb::list.
* Then either call the finisher or stash the PoolOp, depending on if we
* have a new enough map.
* Lastly, clean up the message and PoolOp.
*/
void Objecter::handle_pool_op_reply(MPoolOpReply *m)
{
int rc = m->replyCode;
auto ec = rc < 0 ? bs::error_code(-rc, mon_category()) : bs::error_code();
FUNCTRACE(cct);
shunique_lock sul(rwlock, acquire_shared);
if (!initialized) {
sul.unlock();
m->put();
return;
}
ldout(cct, 10) << "handle_pool_op_reply " << *m << dendl;
ceph_tid_t tid = m->get_tid();
auto iter = pool_ops.find(tid);
if (iter != pool_ops.end()) {
PoolOp *op = iter->second;
ldout(cct, 10) << "have request " << tid << " at " << op << " Op: "
<< ceph_pool_op_name(op->pool_op) << dendl;
cb::list bl{std::move(m->response_data)};
if (m->version > last_seen_osdmap_version)
last_seen_osdmap_version = m->version;
if (osdmap->get_epoch() < m->epoch) {
sul.unlock();
sul.lock();
// recheck op existence since we have let go of rwlock
// (for promotion) above.
iter = pool_ops.find(tid);
if (iter == pool_ops.end())
goto done; // op is gone.
if (osdmap->get_epoch() < m->epoch) {
ldout(cct, 20) << "waiting for client to reach epoch " << m->epoch
<< " before calling back" << dendl;
_wait_for_new_map(OpCompletion::create(
service.get_executor(),
[o = std::move(op->onfinish),
bl = std::move(bl)](
bs::error_code ec) mutable {
o->defer(std::move(o), ec, bl);
}),
m->epoch,
ec);
} else {
// map epoch changed, probably because a MOSDMap message
// sneaked in. Do caller-specified callback now or else
// we lose it forever.
ceph_assert(op->onfinish);
op->onfinish->defer(std::move(op->onfinish), ec, std::move(bl));
}
} else {
ceph_assert(op->onfinish);
op->onfinish->defer(std::move(op->onfinish), ec, std::move(bl));
}
op->onfinish = nullptr;
if (!sul.owns_lock()) {
sul.unlock();
sul.lock();
}
iter = pool_ops.find(tid);
if (iter != pool_ops.end()) {
_finish_pool_op(op, 0);
}
} else {
ldout(cct, 10) << "unknown request " << tid << dendl;
}
done:
// Not strictly necessary, since we'll release it on return.
sul.unlock();
ldout(cct, 10) << "done" << dendl;
m->put();
}
int Objecter::pool_op_cancel(ceph_tid_t tid, int r)
{
ceph_assert(initialized);
unique_lock wl(rwlock);
auto it = pool_ops.find(tid);
if (it == pool_ops.end()) {
ldout(cct, 10) << __func__ << " tid " << tid << " dne" << dendl;
return -ENOENT;
}
ldout(cct, 10) << __func__ << " tid " << tid << dendl;
PoolOp *op = it->second;
if (op->onfinish)
op->onfinish->defer(std::move(op->onfinish), osdcode(r), cb::list{});
_finish_pool_op(op, r);
return 0;
}
void Objecter::_finish_pool_op(PoolOp *op, int r)
{
// rwlock is locked unique
pool_ops.erase(op->tid);
logger->set(l_osdc_poolop_active, pool_ops.size());
if (op->ontimeout && r != -ETIMEDOUT) {
timer.cancel_event(op->ontimeout);
}
delete op;
}
// pool stats
void Objecter::get_pool_stats(
const std::vector<std::string>& pools,
decltype(PoolStatOp::onfinish)&& onfinish)
{
ldout(cct, 10) << "get_pool_stats " << pools << dendl;
auto op = new PoolStatOp;
op->tid = ++last_tid;
op->pools = pools;
op->onfinish = std::move(onfinish);
if (mon_timeout > timespan(0)) {
op->ontimeout = timer.add_event(mon_timeout,
[this, op]() {
pool_stat_op_cancel(op->tid,
-ETIMEDOUT); });
} else {
op->ontimeout = 0;
}
unique_lock wl(rwlock);
poolstat_ops[op->tid] = op;
logger->set(l_osdc_poolstat_active, poolstat_ops.size());
_poolstat_submit(op);
}
void Objecter::_poolstat_submit(PoolStatOp *op)
{
ldout(cct, 10) << "_poolstat_submit " << op->tid << dendl;
monc->send_mon_message(new MGetPoolStats(monc->get_fsid(), op->tid,
op->pools,
last_seen_pgmap_version));
op->last_submit = ceph::coarse_mono_clock::now();
logger->inc(l_osdc_poolstat_send);
}
void Objecter::handle_get_pool_stats_reply(MGetPoolStatsReply *m)
{
ldout(cct, 10) << "handle_get_pool_stats_reply " << *m << dendl;
ceph_tid_t tid = m->get_tid();
unique_lock wl(rwlock);
if (!initialized) {
m->put();
return;
}
auto iter = poolstat_ops.find(tid);
if (iter != poolstat_ops.end()) {
PoolStatOp *op = poolstat_ops[tid];
ldout(cct, 10) << "have request " << tid << " at " << op << dendl;
if (m->version > last_seen_pgmap_version) {
last_seen_pgmap_version = m->version;
}
op->onfinish->defer(std::move(op->onfinish), bs::error_code{},
std::move(m->pool_stats), m->per_pool);
_finish_pool_stat_op(op, 0);
} else {
ldout(cct, 10) << "unknown request " << tid << dendl;
}
ldout(cct, 10) << "done" << dendl;
m->put();
}
int Objecter::pool_stat_op_cancel(ceph_tid_t tid, int r)
{
ceph_assert(initialized);
unique_lock wl(rwlock);
auto it = poolstat_ops.find(tid);
if (it == poolstat_ops.end()) {
ldout(cct, 10) << __func__ << " tid " << tid << " dne" << dendl;
return -ENOENT;
}
ldout(cct, 10) << __func__ << " tid " << tid << dendl;
auto op = it->second;
if (op->onfinish)
op->onfinish->defer(std::move(op->onfinish), osdcode(r),
bc::flat_map<std::string, pool_stat_t>{}, false);
_finish_pool_stat_op(op, r);
return 0;
}
void Objecter::_finish_pool_stat_op(PoolStatOp *op, int r)
{
// rwlock is locked unique
poolstat_ops.erase(op->tid);
logger->set(l_osdc_poolstat_active, poolstat_ops.size());
if (op->ontimeout && r != -ETIMEDOUT)
timer.cancel_event(op->ontimeout);
delete op;
}
void Objecter::get_fs_stats(std::optional<int64_t> poolid,
decltype(StatfsOp::onfinish)&& onfinish)
{
ldout(cct, 10) << "get_fs_stats" << dendl;
unique_lock l(rwlock);
auto op = new StatfsOp;
op->tid = ++last_tid;
op->data_pool = poolid;
op->onfinish = std::move(onfinish);
if (mon_timeout > timespan(0)) {
op->ontimeout = timer.add_event(mon_timeout,
[this, op]() {
statfs_op_cancel(op->tid,
-ETIMEDOUT); });
} else {
op->ontimeout = 0;
}
statfs_ops[op->tid] = op;
logger->set(l_osdc_statfs_active, statfs_ops.size());
_fs_stats_submit(op);
}
void Objecter::_fs_stats_submit(StatfsOp *op)
{
// rwlock is locked unique
ldout(cct, 10) << "fs_stats_submit" << op->tid << dendl;
monc->send_mon_message(new MStatfs(monc->get_fsid(), op->tid,
op->data_pool,
last_seen_pgmap_version));
op->last_submit = ceph::coarse_mono_clock::now();
logger->inc(l_osdc_statfs_send);
}
void Objecter::handle_fs_stats_reply(MStatfsReply *m)
{
unique_lock wl(rwlock);
if (!initialized) {
m->put();
return;
}
ldout(cct, 10) << "handle_fs_stats_reply " << *m << dendl;
ceph_tid_t tid = m->get_tid();
if (statfs_ops.count(tid)) {
StatfsOp *op = statfs_ops[tid];
ldout(cct, 10) << "have request " << tid << " at " << op << dendl;
if (m->h.version > last_seen_pgmap_version)
last_seen_pgmap_version = m->h.version;
op->onfinish->defer(std::move(op->onfinish), bs::error_code{}, m->h.st);
_finish_statfs_op(op, 0);
} else {
ldout(cct, 10) << "unknown request " << tid << dendl;
}
m->put();
ldout(cct, 10) << "done" << dendl;
}
int Objecter::statfs_op_cancel(ceph_tid_t tid, int r)
{
ceph_assert(initialized);
unique_lock wl(rwlock);
auto it = statfs_ops.find(tid);
if (it == statfs_ops.end()) {
ldout(cct, 10) << __func__ << " tid " << tid << " dne" << dendl;
return -ENOENT;
}
ldout(cct, 10) << __func__ << " tid " << tid << dendl;
auto op = it->second;
if (op->onfinish)
op->onfinish->defer(std::move(op->onfinish), osdcode(r), ceph_statfs{});
_finish_statfs_op(op, r);
return 0;
}
void Objecter::_finish_statfs_op(StatfsOp *op, int r)
{
// rwlock is locked unique
statfs_ops.erase(op->tid);
logger->set(l_osdc_statfs_active, statfs_ops.size());
if (op->ontimeout && r != -ETIMEDOUT)
timer.cancel_event(op->ontimeout);
delete op;
}
// scatter/gather
void Objecter::_sg_read_finish(vector<ObjectExtent>& extents,
vector<cb::list>& resultbl,
cb::list *bl, Context *onfinish)
{
// all done
ldout(cct, 15) << "_sg_read_finish" << dendl;
if (extents.size() > 1) {
Striper::StripedReadResult r;
auto bit = resultbl.begin();
for (auto eit = extents.begin();
eit != extents.end();
++eit, ++bit) {
r.add_partial_result(cct, *bit, eit->buffer_extents);
}
bl->clear();
r.assemble_result(cct, *bl, false);
} else {
ldout(cct, 15) << " only one frag" << dendl;
*bl = std::move(resultbl[0]);
}
// done
uint64_t bytes_read = bl->length();
ldout(cct, 7) << "_sg_read_finish " << bytes_read << " bytes" << dendl;
if (onfinish) {
onfinish->complete(bytes_read);// > 0 ? bytes_read:m->get_result());
}
}
void Objecter::ms_handle_connect(Connection *con)
{
ldout(cct, 10) << "ms_handle_connect " << con << dendl;
if (!initialized)
return;
if (con->get_peer_type() == CEPH_ENTITY_TYPE_MON)
resend_mon_ops();
}
bool Objecter::ms_handle_reset(Connection *con)
{
if (!initialized)
return false;
if (con->get_peer_type() == CEPH_ENTITY_TYPE_OSD) {
unique_lock wl(rwlock);
auto priv = con->get_priv();
auto session = static_cast<OSDSession*>(priv.get());
if (session) {
ldout(cct, 1) << "ms_handle_reset " << con << " session " << session
<< " osd." << session->osd << dendl;
// the session maybe had been closed if new osdmap just handled
// says the osd down
if (!(initialized && osdmap->is_up(session->osd))) {
ldout(cct, 1) << "ms_handle_reset aborted,initialized=" << initialized << dendl;
wl.unlock();
return false;
}
map<uint64_t, LingerOp *> lresend;
unique_lock sl(session->lock);
_reopen_session(session);
_kick_requests(session, lresend);
sl.unlock();
_linger_ops_resend(lresend, wl);
wl.unlock();
maybe_request_map();
}
return true;
}
return false;
}
void Objecter::ms_handle_remote_reset(Connection *con)
{
/*
* treat these the same.
*/
ms_handle_reset(con);
}
bool Objecter::ms_handle_refused(Connection *con)
{
// just log for now
if (osdmap && (con->get_peer_type() == CEPH_ENTITY_TYPE_OSD)) {
int osd = osdmap->identify_osd(con->get_peer_addr());
if (osd >= 0) {
ldout(cct, 1) << "ms_handle_refused on osd." << osd << dendl;
}
}
return false;
}
void Objecter::op_target_t::dump(Formatter *f) const
{
f->dump_stream("pg") << pgid;
f->dump_int("osd", osd);
f->dump_stream("object_id") << base_oid;
f->dump_stream("object_locator") << base_oloc;
f->dump_stream("target_object_id") << target_oid;
f->dump_stream("target_object_locator") << target_oloc;
f->dump_int("paused", (int)paused);
f->dump_int("used_replica", (int)used_replica);
f->dump_int("precalc_pgid", (int)precalc_pgid);
}
void Objecter::_dump_active(OSDSession *s)
{
for (auto p = s->ops.begin(); p != s->ops.end(); ++p) {
Op *op = p->second;
ldout(cct, 20) << op->tid << "\t" << op->target.pgid
<< "\tosd." << (op->session ? op->session->osd : -1)
<< "\t" << op->target.base_oid
<< "\t" << op->ops << dendl;
}
}
void Objecter::_dump_active()
{
ldout(cct, 20) << "dump_active .. " << num_homeless_ops << " homeless"
<< dendl;
for (auto siter = osd_sessions.begin();
siter != osd_sessions.end(); ++siter) {
auto s = siter->second;
shared_lock sl(s->lock);
_dump_active(s);
sl.unlock();
}
_dump_active(homeless_session);
}
void Objecter::dump_active()
{
shared_lock rl(rwlock);
_dump_active();
rl.unlock();
}
void Objecter::dump_requests(Formatter *fmt)
{
// Read-lock on Objecter held here
fmt->open_object_section("requests");
dump_ops(fmt);
dump_linger_ops(fmt);
dump_pool_ops(fmt);
dump_pool_stat_ops(fmt);
dump_statfs_ops(fmt);
dump_command_ops(fmt);
fmt->close_section(); // requests object
}
void Objecter::_dump_ops(const OSDSession *s, Formatter *fmt)
{
for (auto p = s->ops.begin(); p != s->ops.end(); ++p) {
Op *op = p->second;
auto age = std::chrono::duration<double>(ceph::coarse_mono_clock::now() - op->stamp);
fmt->open_object_section("op");
fmt->dump_unsigned("tid", op->tid);
op->target.dump(fmt);
fmt->dump_stream("last_sent") << op->stamp;
fmt->dump_float("age", age.count());
fmt->dump_int("attempts", op->attempts);
fmt->dump_stream("snapid") << op->snapid;
fmt->dump_stream("snap_context") << op->snapc;
fmt->dump_stream("mtime") << op->mtime;
fmt->open_array_section("osd_ops");
for (auto it = op->ops.begin(); it != op->ops.end(); ++it) {
fmt->dump_stream("osd_op") << *it;
}
fmt->close_section(); // osd_ops array
fmt->close_section(); // op object
}
}
void Objecter::dump_ops(Formatter *fmt)
{
// Read-lock on Objecter held
fmt->open_array_section("ops");
for (auto siter = osd_sessions.begin();
siter != osd_sessions.end(); ++siter) {
OSDSession *s = siter->second;
shared_lock sl(s->lock);
_dump_ops(s, fmt);
sl.unlock();
}
_dump_ops(homeless_session, fmt);
fmt->close_section(); // ops array
}
void Objecter::_dump_linger_ops(const OSDSession *s, Formatter *fmt)
{
for (auto p = s->linger_ops.begin(); p != s->linger_ops.end(); ++p) {
auto op = p->second;
fmt->open_object_section("linger_op");
fmt->dump_unsigned("linger_id", op->linger_id);
op->target.dump(fmt);
fmt->dump_stream("snapid") << op->snap;
fmt->dump_stream("registered") << op->registered;
fmt->close_section(); // linger_op object
}
}
void Objecter::dump_linger_ops(Formatter *fmt)
{
// We have a read-lock on the objecter
fmt->open_array_section("linger_ops");
for (auto siter = osd_sessions.begin();
siter != osd_sessions.end(); ++siter) {
auto s = siter->second;
shared_lock sl(s->lock);
_dump_linger_ops(s, fmt);
sl.unlock();
}
_dump_linger_ops(homeless_session, fmt);
fmt->close_section(); // linger_ops array
}
void Objecter::_dump_command_ops(const OSDSession *s, Formatter *fmt)
{
for (auto p = s->command_ops.begin(); p != s->command_ops.end(); ++p) {
auto op = p->second;
fmt->open_object_section("command_op");
fmt->dump_unsigned("command_id", op->tid);
fmt->dump_int("osd", op->session ? op->session->osd : -1);
fmt->open_array_section("command");
for (auto q = op->cmd.begin(); q != op->cmd.end(); ++q)
fmt->dump_string("word", *q);
fmt->close_section();
if (op->target_osd >= 0)
fmt->dump_int("target_osd", op->target_osd);
else
fmt->dump_stream("target_pg") << op->target_pg;
fmt->close_section(); // command_op object
}
}
void Objecter::dump_command_ops(Formatter *fmt)
{
// We have a read-lock on the Objecter here
fmt->open_array_section("command_ops");
for (auto siter = osd_sessions.begin();
siter != osd_sessions.end(); ++siter) {
auto s = siter->second;
shared_lock sl(s->lock);
_dump_command_ops(s, fmt);
sl.unlock();
}
_dump_command_ops(homeless_session, fmt);
fmt->close_section(); // command_ops array
}
void Objecter::dump_pool_ops(Formatter *fmt) const
{
fmt->open_array_section("pool_ops");
for (auto p = pool_ops.begin(); p != pool_ops.end(); ++p) {
auto op = p->second;
fmt->open_object_section("pool_op");
fmt->dump_unsigned("tid", op->tid);
fmt->dump_int("pool", op->pool);
fmt->dump_string("name", op->name);
fmt->dump_int("operation_type", op->pool_op);
fmt->dump_unsigned("crush_rule", op->crush_rule);
fmt->dump_stream("snapid") << op->snapid;
fmt->dump_stream("last_sent") << op->last_submit;
fmt->close_section(); // pool_op object
}
fmt->close_section(); // pool_ops array
}
void Objecter::dump_pool_stat_ops(Formatter *fmt) const
{
fmt->open_array_section("pool_stat_ops");
for (auto p = poolstat_ops.begin();
p != poolstat_ops.end();
++p) {
PoolStatOp *op = p->second;
fmt->open_object_section("pool_stat_op");
fmt->dump_unsigned("tid", op->tid);
fmt->dump_stream("last_sent") << op->last_submit;
fmt->open_array_section("pools");
for (const auto& it : op->pools) {
fmt->dump_string("pool", it);
}
fmt->close_section(); // pools array
fmt->close_section(); // pool_stat_op object
}
fmt->close_section(); // pool_stat_ops array
}
void Objecter::dump_statfs_ops(Formatter *fmt) const
{
fmt->open_array_section("statfs_ops");
for (auto p = statfs_ops.begin(); p != statfs_ops.end(); ++p) {
auto op = p->second;
fmt->open_object_section("statfs_op");
fmt->dump_unsigned("tid", op->tid);
fmt->dump_stream("last_sent") << op->last_submit;
fmt->close_section(); // statfs_op object
}
fmt->close_section(); // statfs_ops array
}
Objecter::RequestStateHook::RequestStateHook(Objecter *objecter) :
m_objecter(objecter)
{
}
int Objecter::RequestStateHook::call(std::string_view command,
const cmdmap_t& cmdmap,
const bufferlist&,
Formatter *f,
std::ostream& ss,
cb::list& out)
{
shared_lock rl(m_objecter->rwlock);
m_objecter->dump_requests(f);
return 0;
}
void Objecter::blocklist_self(bool set)
{
ldout(cct, 10) << "blocklist_self " << (set ? "add" : "rm") << dendl;
vector<string> cmd;
cmd.push_back("{\"prefix\":\"osd blocklist\", ");
if (set)
cmd.push_back("\"blocklistop\":\"add\",");
else
cmd.push_back("\"blocklistop\":\"rm\",");
stringstream ss;
// this is somewhat imprecise in that we are blocklisting our first addr only
ss << messenger->get_myaddrs().front().get_legacy_str();
cmd.push_back("\"addr\":\"" + ss.str() + "\"");
auto m = new MMonCommand(monc->get_fsid());
m->cmd = cmd;
// NOTE: no fallback to legacy blacklist command implemented here
// since this is only used for test code.
monc->send_mon_message(m);
}
// commands
void Objecter::handle_command_reply(MCommandReply *m)
{
unique_lock wl(rwlock);
if (!initialized) {
m->put();
return;
}
ConnectionRef con = m->get_connection();
auto priv = con->get_priv();
auto s = static_cast<OSDSession*>(priv.get());
if (!s || s->con != con) {
ldout(cct, 7) << __func__ << " no session on con " << con << dendl;
m->put();
return;
}
shared_lock sl(s->lock);
auto p = s->command_ops.find(m->get_tid());
if (p == s->command_ops.end()) {
ldout(cct, 10) << "handle_command_reply tid " << m->get_tid()
<< " not found" << dendl;
m->put();
sl.unlock();
return;
}
CommandOp *c = p->second;
if (!c->session ||
m->get_connection() != c->session->con) {
ldout(cct, 10) << "handle_command_reply tid " << m->get_tid()
<< " got reply from wrong connection "
<< m->get_connection() << " " << m->get_source_inst()
<< dendl;
m->put();
sl.unlock();
return;
}
if (m->r == -EAGAIN) {
ldout(cct,10) << __func__ << " tid " << m->get_tid()
<< " got EAGAIN, requesting map and resending" << dendl;
// NOTE: This might resend twice... once now, and once again when
// we get an updated osdmap and the PG is found to have moved.
_maybe_request_map();
_send_command(c);
m->put();
sl.unlock();
return;
}
sl.unlock();
unique_lock sul(s->lock);
_finish_command(c, m->r < 0 ? bs::error_code(-m->r, osd_category()) :
bs::error_code(), std::move(m->rs),
std::move(m->get_data()));
sul.unlock();
m->put();
}
Objecter::LingerOp::LingerOp(Objecter *o, uint64_t linger_id)
: objecter(o),
linger_id(linger_id),
watch_lock(ceph::make_shared_mutex(
fmt::format("LingerOp::watch_lock #{}", linger_id)))
{}
void Objecter::submit_command(CommandOp *c, ceph_tid_t *ptid)
{
shunique_lock sul(rwlock, ceph::acquire_unique);
ceph_tid_t tid = ++last_tid;
ldout(cct, 10) << "_submit_command " << tid << " " << c->cmd << dendl;
c->tid = tid;
{
unique_lock hs_wl(homeless_session->lock);
_session_command_op_assign(homeless_session, c);
}
_calc_command_target(c, sul);
_assign_command_session(c, sul);
if (osd_timeout > timespan(0)) {
c->ontimeout = timer.add_event(osd_timeout,
[this, c, tid]() {
command_op_cancel(
c->session, tid,
osdc_errc::timed_out); });
}
if (!c->session->is_homeless()) {
_send_command(c);
} else {
_maybe_request_map();
}
if (c->map_check_error)
_send_command_map_check(c);
if (ptid)
*ptid = tid;
logger->inc(l_osdc_command_active);
}
int Objecter::_calc_command_target(CommandOp *c,
shunique_lock<ceph::shared_mutex>& sul)
{
ceph_assert(sul.owns_lock() && sul.mutex() == &rwlock);
c->map_check_error = 0;
// ignore overlays, just like we do with pg ops
c->target.flags |= CEPH_OSD_FLAG_IGNORE_OVERLAY;
if (c->target_osd >= 0) {
if (!osdmap->exists(c->target_osd)) {
c->map_check_error = -ENOENT;
c->map_check_error_str = "osd dne";
c->target.osd = -1;
return RECALC_OP_TARGET_OSD_DNE;
}
if (osdmap->is_down(c->target_osd)) {
c->map_check_error = -ENXIO;
c->map_check_error_str = "osd down";
c->target.osd = -1;
return RECALC_OP_TARGET_OSD_DOWN;
}
c->target.osd = c->target_osd;
} else {
int ret = _calc_target(&(c->target), nullptr, true);
if (ret == RECALC_OP_TARGET_POOL_DNE) {
c->map_check_error = -ENOENT;
c->map_check_error_str = "pool dne";
c->target.osd = -1;
return ret;
} else if (ret == RECALC_OP_TARGET_OSD_DOWN) {
c->map_check_error = -ENXIO;
c->map_check_error_str = "osd down";
c->target.osd = -1;
return ret;
}
}
OSDSession *s;
int r = _get_session(c->target.osd, &s, sul);
ceph_assert(r != -EAGAIN); /* shouldn't happen as we're holding the write lock */
if (c->session != s) {
put_session(s);
return RECALC_OP_TARGET_NEED_RESEND;
}
put_session(s);
ldout(cct, 20) << "_recalc_command_target " << c->tid << " no change, "
<< c->session << dendl;
return RECALC_OP_TARGET_NO_ACTION;
}
void Objecter::_assign_command_session(CommandOp *c,
shunique_lock<ceph::shared_mutex>& sul)
{
ceph_assert(sul.owns_lock() && sul.mutex() == &rwlock);
OSDSession *s;
int r = _get_session(c->target.osd, &s, sul);
ceph_assert(r != -EAGAIN); /* shouldn't happen as we're holding the write lock */
if (c->session != s) {
if (c->session) {
OSDSession *cs = c->session;
unique_lock csl(cs->lock);
_session_command_op_remove(c->session, c);
csl.unlock();
}
unique_lock sl(s->lock);
_session_command_op_assign(s, c);
}
put_session(s);
}
void Objecter::_send_command(CommandOp *c)
{
ldout(cct, 10) << "_send_command " << c->tid << dendl;
ceph_assert(c->session);
ceph_assert(c->session->con);
auto m = new MCommand(monc->monmap.fsid);
m->cmd = c->cmd;
m->set_data(c->inbl);
m->set_tid(c->tid);
c->session->con->send_message(m);
logger->inc(l_osdc_command_send);
}
int Objecter::command_op_cancel(OSDSession *s, ceph_tid_t tid,
bs::error_code ec)
{
ceph_assert(initialized);
unique_lock wl(rwlock);
auto it = s->command_ops.find(tid);
if (it == s->command_ops.end()) {
ldout(cct, 10) << __func__ << " tid " << tid << " dne" << dendl;
return -ENOENT;
}
ldout(cct, 10) << __func__ << " tid " << tid << dendl;
CommandOp *op = it->second;
_command_cancel_map_check(op);
unique_lock sl(op->session->lock);
_finish_command(op, ec, {}, {});
sl.unlock();
return 0;
}
void Objecter::_finish_command(CommandOp *c, bs::error_code ec,
string&& rs, cb::list&& bl)
{
// rwlock is locked unique
// session lock is locked
ldout(cct, 10) << "_finish_command " << c->tid << " = " << ec << " "
<< rs << dendl;
if (c->onfinish)
c->onfinish->defer(std::move(c->onfinish), ec, std::move(rs), std::move(bl));
if (c->ontimeout && ec != bs::errc::timed_out)
timer.cancel_event(c->ontimeout);
_session_command_op_remove(c->session, c);
c->put();
logger->dec(l_osdc_command_active);
}
Objecter::OSDSession::~OSDSession()
{
// Caller is responsible for re-assigning or
// destroying any ops that were assigned to us
ceph_assert(ops.empty());
ceph_assert(linger_ops.empty());
ceph_assert(command_ops.empty());
}
Objecter::Objecter(CephContext *cct,
Messenger *m, MonClient *mc,
boost::asio::io_context& service) :
Dispatcher(cct), messenger(m), monc(mc), service(service)
{
mon_timeout = cct->_conf.get_val<std::chrono::seconds>("rados_mon_op_timeout");
osd_timeout = cct->_conf.get_val<std::chrono::seconds>("rados_osd_op_timeout");
}
Objecter::~Objecter()
{
ceph_assert(homeless_session->get_nref() == 1);
ceph_assert(num_homeless_ops == 0);
homeless_session->put();
ceph_assert(osd_sessions.empty());
ceph_assert(poolstat_ops.empty());
ceph_assert(statfs_ops.empty());
ceph_assert(pool_ops.empty());
ceph_assert(waiting_for_map.empty());
ceph_assert(linger_ops.empty());
ceph_assert(check_latest_map_lingers.empty());
ceph_assert(check_latest_map_ops.empty());
ceph_assert(check_latest_map_commands.empty());
ceph_assert(!m_request_state_hook);
ceph_assert(!logger);
}
/**
* Wait until this OSD map epoch is received before
* sending any more operations to OSDs. Use this
* when it is known that the client can't trust
* anything from before this epoch (e.g. due to
* client blocklist at this epoch).
*/
void Objecter::set_epoch_barrier(epoch_t epoch)
{
unique_lock wl(rwlock);
ldout(cct, 7) << __func__ << ": barrier " << epoch << " (was "
<< epoch_barrier << ") current epoch " << osdmap->get_epoch()
<< dendl;
if (epoch > epoch_barrier) {
epoch_barrier = epoch;
_maybe_request_map();
}
}
hobject_t Objecter::enumerate_objects_begin()
{
return hobject_t();
}
hobject_t Objecter::enumerate_objects_end()
{
return hobject_t::get_max();
}
template<typename T>
struct EnumerationContext {
Objecter* objecter;
const hobject_t end;
const cb::list filter;
uint32_t max;
const object_locator_t oloc;
std::vector<T> ls;
private:
fu2::unique_function<void(bs::error_code,
std::vector<T>,
hobject_t) &&> on_finish;
public:
epoch_t epoch = 0;
int budget = -1;
EnumerationContext(Objecter* objecter,
hobject_t end, cb::list filter,
uint32_t max, object_locator_t oloc,
decltype(on_finish) on_finish)
: objecter(objecter), end(std::move(end)), filter(std::move(filter)),
max(max), oloc(std::move(oloc)), on_finish(std::move(on_finish)) {}
void operator()(bs::error_code ec,
std::vector<T> v,
hobject_t h) && {
if (budget >= 0) {
objecter->put_op_budget_bytes(budget);
budget = -1;
}
std::move(on_finish)(ec, std::move(v), std::move(h));
}
};
template<typename T>
struct CB_EnumerateReply {
cb::list bl;
Objecter* objecter;
std::unique_ptr<EnumerationContext<T>> ctx;
CB_EnumerateReply(Objecter* objecter,
std::unique_ptr<EnumerationContext<T>>&& ctx) :
objecter(objecter), ctx(std::move(ctx)) {}
void operator()(bs::error_code ec) {
objecter->_enumerate_reply(std::move(bl), ec, std::move(ctx));
}
};
template<typename T>
void Objecter::enumerate_objects(
int64_t pool_id,
std::string_view ns,
hobject_t start,
hobject_t end,
const uint32_t max,
const cb::list& filter_bl,
fu2::unique_function<void(bs::error_code,
std::vector<T>,
hobject_t) &&> on_finish) {
if (!end.is_max() && start > end) {
lderr(cct) << __func__ << ": start " << start << " > end " << end << dendl;
std::move(on_finish)(osdc_errc::precondition_violated, {}, {});
return;
}
if (max < 1) {
lderr(cct) << __func__ << ": result size may not be zero" << dendl;
std::move(on_finish)(osdc_errc::precondition_violated, {}, {});
return;
}
if (start.is_max()) {
std::move(on_finish)({}, {}, {});
return;
}
shared_lock rl(rwlock);
ceph_assert(osdmap->get_epoch());
if (!osdmap->test_flag(CEPH_OSDMAP_SORTBITWISE)) {
rl.unlock();
lderr(cct) << __func__ << ": SORTBITWISE cluster flag not set" << dendl;
std::move(on_finish)(osdc_errc::not_supported, {}, {});
return;
}
const pg_pool_t* p = osdmap->get_pg_pool(pool_id);
if (!p) {
lderr(cct) << __func__ << ": pool " << pool_id << " DNE in osd epoch "
<< osdmap->get_epoch() << dendl;
rl.unlock();
std::move(on_finish)(osdc_errc::pool_dne, {}, {});
return;
} else {
rl.unlock();
}
_issue_enumerate(start,
std::make_unique<EnumerationContext<T>>(
this, std::move(end), filter_bl,
max, object_locator_t{pool_id, ns},
std::move(on_finish)));
}
template
void Objecter::enumerate_objects<librados::ListObjectImpl>(
int64_t pool_id,
std::string_view ns,
hobject_t start,
hobject_t end,
const uint32_t max,
const cb::list& filter_bl,
fu2::unique_function<void(bs::error_code,
std::vector<librados::ListObjectImpl>,
hobject_t) &&> on_finish);
template
void Objecter::enumerate_objects<neorados::Entry>(
int64_t pool_id,
std::string_view ns,
hobject_t start,
hobject_t end,
const uint32_t max,
const cb::list& filter_bl,
fu2::unique_function<void(bs::error_code,
std::vector<neorados::Entry>,
hobject_t) &&> on_finish);
template<typename T>
void Objecter::_issue_enumerate(hobject_t start,
std::unique_ptr<EnumerationContext<T>> ctx) {
ObjectOperation op;
auto c = ctx.get();
op.pg_nls(c->max, c->filter, start, osdmap->get_epoch());
auto on_ack = std::make_unique<CB_EnumerateReply<T>>(this, std::move(ctx));
// I hate having to do this. Try to find a cleaner way
// later.
auto epoch = &c->epoch;
auto budget = &c->budget;
auto pbl = &on_ack->bl;
// Issue. See you later in _enumerate_reply
pg_read(start.get_hash(),
c->oloc, op, pbl, 0,
Op::OpComp::create(service.get_executor(),
[c = std::move(on_ack)]
(bs::error_code ec) mutable {
(*c)(ec);
}), epoch, budget);
}
template
void Objecter::_issue_enumerate<librados::ListObjectImpl>(
hobject_t start,
std::unique_ptr<EnumerationContext<librados::ListObjectImpl>> ctx);
template
void Objecter::_issue_enumerate<neorados::Entry>(
hobject_t start, std::unique_ptr<EnumerationContext<neorados::Entry>> ctx);
template<typename T>
void Objecter::_enumerate_reply(
cb::list&& bl,
bs::error_code ec,
std::unique_ptr<EnumerationContext<T>>&& ctx)
{
if (ec) {
std::move(*ctx)(ec, {}, {});
return;
}
// Decode the results
auto iter = bl.cbegin();
pg_nls_response_template<T> response;
try {
response.decode(iter);
if (!iter.end()) {
// extra_info isn't used anywhere. We do this solely to preserve
// backward compatibility
cb::list legacy_extra_info;
decode(legacy_extra_info, iter);
}
} catch (const bs::system_error& e) {
std::move(*ctx)(e.code(), {}, {});
return;
}
shared_lock rl(rwlock);
auto pool = osdmap->get_pg_pool(ctx->oloc.get_pool());
rl.unlock();
if (!pool) {
// pool is gone, drop any results which are now meaningless.
std::move(*ctx)(osdc_errc::pool_dne, {}, {});
return;
}
hobject_t next;
if ((response.handle <= ctx->end)) {
next = response.handle;
} else {
next = ctx->end;
// drop anything after 'end'
while (!response.entries.empty()) {
uint32_t hash = response.entries.back().locator.empty() ?
pool->hash_key(response.entries.back().oid,
response.entries.back().nspace) :
pool->hash_key(response.entries.back().locator,
response.entries.back().nspace);
hobject_t last(response.entries.back().oid,
response.entries.back().locator,
CEPH_NOSNAP,
hash,
ctx->oloc.get_pool(),
response.entries.back().nspace);
if (last < ctx->end)
break;
response.entries.pop_back();
}
}
if (response.entries.size() <= ctx->max) {
ctx->max -= response.entries.size();
std::move(response.entries.begin(), response.entries.end(),
std::back_inserter(ctx->ls));
} else {
auto i = response.entries.begin();
while (ctx->max > 0) {
ctx->ls.push_back(std::move(*i));
--(ctx->max);
++i;
}
uint32_t hash =
i->locator.empty() ?
pool->hash_key(i->oid, i->nspace) :
pool->hash_key(i->locator, i->nspace);
next = hobject_t{i->oid, i->locator,
CEPH_NOSNAP,
hash,
ctx->oloc.get_pool(),
i->nspace};
}
if (next == ctx->end || ctx->max == 0) {
std::move(*ctx)(ec, std::move(ctx->ls), std::move(next));
} else {
_issue_enumerate(next, std::move(ctx));
}
}
template
void Objecter::_enumerate_reply<librados::ListObjectImpl>(
cb::list&& bl,
bs::error_code ec,
std::unique_ptr<EnumerationContext<librados::ListObjectImpl>>&& ctx);
template
void Objecter::_enumerate_reply<neorados::Entry>(
cb::list&& bl,
bs::error_code ec,
std::unique_ptr<EnumerationContext<neorados::Entry>>&& ctx);
namespace {
using namespace librados;
template <typename T>
void do_decode(std::vector<T>& items, std::vector<cb::list>& bls)
{
for (auto bl : bls) {
auto p = bl.cbegin();
T t;
decode(t, p);
items.push_back(t);
}
}
struct C_ObjectOperation_scrub_ls : public Context {
cb::list bl;
uint32_t* interval;
std::vector<inconsistent_obj_t> *objects = nullptr;
std::vector<inconsistent_snapset_t> *snapsets = nullptr;
int* rval;
C_ObjectOperation_scrub_ls(uint32_t* interval,
std::vector<inconsistent_obj_t>* objects,
int* rval)
: interval(interval), objects(objects), rval(rval) {}
C_ObjectOperation_scrub_ls(uint32_t* interval,
std::vector<inconsistent_snapset_t>* snapsets,
int* rval)
: interval(interval), snapsets(snapsets), rval(rval) {}
void finish(int r) override {
if (r < 0 && r != -EAGAIN) {
if (rval)
*rval = r;
return;
}
if (rval)
*rval = 0;
try {
decode();
} catch (cb::error&) {
if (rval)
*rval = -EIO;
}
}
private:
void decode() {
scrub_ls_result_t result;
auto p = bl.cbegin();
result.decode(p);
*interval = result.interval;
if (objects) {
do_decode(*objects, result.vals);
} else {
do_decode(*snapsets, result.vals);
}
}
};
template <typename T>
void do_scrub_ls(::ObjectOperation* op,
const scrub_ls_arg_t& arg,
std::vector<T> *items,
uint32_t* interval,
int* rval)
{
OSDOp& osd_op = op->add_op(CEPH_OSD_OP_SCRUBLS);
op->flags |= CEPH_OSD_FLAG_PGOP;
ceph_assert(interval);
arg.encode(osd_op.indata);
unsigned p = op->ops.size() - 1;
auto h = new C_ObjectOperation_scrub_ls{interval, items, rval};
op->set_handler(h);
op->out_bl[p] = &h->bl;
op->out_rval[p] = rval;
}
}
void ::ObjectOperation::scrub_ls(const librados::object_id_t& start_after,
uint64_t max_to_get,
std::vector<librados::inconsistent_obj_t>* objects,
uint32_t* interval,
int* rval)
{
scrub_ls_arg_t arg = {*interval, 0, start_after, max_to_get};
do_scrub_ls(this, arg, objects, interval, rval);
}
void ::ObjectOperation::scrub_ls(const librados::object_id_t& start_after,
uint64_t max_to_get,
std::vector<librados::inconsistent_snapset_t> *snapsets,
uint32_t *interval,
int *rval)
{
scrub_ls_arg_t arg = {*interval, 1, start_after, max_to_get};
do_scrub_ls(this, arg, snapsets, interval, rval);
}
| 150,013 | 26.626888 | 119 | cc |
null | ceph-main/src/osdc/Objecter.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_OBJECTER_H
#define CEPH_OBJECTER_H
#include <condition_variable>
#include <list>
#include <map>
#include <mutex>
#include <memory>
#include <sstream>
#include <string>
#include <string_view>
#include <type_traits>
#include <variant>
#include <boost/container/small_vector.hpp>
#include <boost/asio.hpp>
#include <fmt/format.h>
#include "include/buffer.h"
#include "include/ceph_assert.h"
#include "include/ceph_fs.h"
#include "include/common_fwd.h"
#include "include/expected.hpp"
#include "include/types.h"
#include "include/rados/rados_types.hpp"
#include "include/function2.hpp"
#include "include/neorados/RADOS_Decodable.hpp"
#include "common/admin_socket.h"
#include "common/async/completion.h"
#include "common/ceph_time.h"
#include "common/ceph_mutex.h"
#include "common/ceph_timer.h"
#include "common/config_obs.h"
#include "common/shunique_lock.h"
#include "common/zipkin_trace.h"
#include "common/Throttle.h"
#include "mon/MonClient.h"
#include "messages/MOSDOp.h"
#include "msg/Dispatcher.h"
#include "osd/OSDMap.h"
class Context;
class Messenger;
class MonClient;
class Message;
class MPoolOpReply;
class MGetPoolStatsReply;
class MStatfsReply;
class MCommandReply;
class MWatchNotify;
struct ObjectOperation;
template<typename T>
struct EnumerationContext;
template<typename t>
struct CB_EnumerateReply;
inline constexpr std::size_t osdc_opvec_len = 2;
using osdc_opvec = boost::container::small_vector<OSDOp, osdc_opvec_len>;
// -----------------------------------------
struct ObjectOperation {
osdc_opvec ops;
int flags = 0;
int priority = 0;
boost::container::small_vector<ceph::buffer::list*, osdc_opvec_len> out_bl;
boost::container::small_vector<
fu2::unique_function<void(boost::system::error_code, int,
const ceph::buffer::list& bl) &&>,
osdc_opvec_len> out_handler;
boost::container::small_vector<int*, osdc_opvec_len> out_rval;
boost::container::small_vector<boost::system::error_code*,
osdc_opvec_len> out_ec;
ObjectOperation() = default;
ObjectOperation(const ObjectOperation&) = delete;
ObjectOperation& operator =(const ObjectOperation&) = delete;
ObjectOperation(ObjectOperation&&) = default;
ObjectOperation& operator =(ObjectOperation&&) = default;
~ObjectOperation() = default;
size_t size() const {
return ops.size();
}
void clear() {
ops.clear();
flags = 0;
priority = 0;
out_bl.clear();
out_handler.clear();
out_rval.clear();
out_ec.clear();
}
void set_last_op_flags(int flags) {
ceph_assert(!ops.empty());
ops.rbegin()->op.flags = flags;
}
void set_handler(fu2::unique_function<void(boost::system::error_code, int,
const ceph::buffer::list&) &&> f) {
if (f) {
if (out_handler.back()) {
// This happens seldom enough that we may as well keep folding
// functions together when we get another one rather than
// using a container.
out_handler.back() =
[f = std::move(f),
g = std::move(std::move(out_handler.back()))]
(boost::system::error_code ec, int r,
const ceph::buffer::list& bl) mutable {
std::move(g)(ec, r, bl);
std::move(f)(ec, r, bl);
};
} else {
out_handler.back() = std::move(f);
}
}
ceph_assert(ops.size() == out_handler.size());
}
void set_handler(Context *c) {
if (c)
set_handler([c = std::unique_ptr<Context>(c)](boost::system::error_code,
int r,
const ceph::buffer::list&) mutable {
c.release()->complete(r);
});
}
OSDOp& add_op(int op) {
ops.emplace_back();
ops.back().op.op = op;
out_bl.push_back(nullptr);
ceph_assert(ops.size() == out_bl.size());
out_handler.emplace_back();
ceph_assert(ops.size() == out_handler.size());
out_rval.push_back(nullptr);
ceph_assert(ops.size() == out_rval.size());
out_ec.push_back(nullptr);
ceph_assert(ops.size() == out_ec.size());
return ops.back();
}
void add_data(int op, uint64_t off, uint64_t len, ceph::buffer::list& bl) {
OSDOp& osd_op = add_op(op);
osd_op.op.extent.offset = off;
osd_op.op.extent.length = len;
osd_op.indata.claim_append(bl);
}
void add_writesame(int op, uint64_t off, uint64_t write_len,
ceph::buffer::list& bl) {
OSDOp& osd_op = add_op(op);
osd_op.op.writesame.offset = off;
osd_op.op.writesame.length = write_len;
osd_op.op.writesame.data_length = bl.length();
osd_op.indata.claim_append(bl);
}
void add_xattr(int op, const char *name, const ceph::buffer::list& data) {
OSDOp& osd_op = add_op(op);
osd_op.op.xattr.name_len = (name ? strlen(name) : 0);
osd_op.op.xattr.value_len = data.length();
if (name)
osd_op.indata.append(name, osd_op.op.xattr.name_len);
osd_op.indata.append(data);
}
void add_xattr_cmp(int op, const char *name, uint8_t cmp_op,
uint8_t cmp_mode, const ceph::buffer::list& data) {
OSDOp& osd_op = add_op(op);
osd_op.op.xattr.name_len = (name ? strlen(name) : 0);
osd_op.op.xattr.value_len = data.length();
osd_op.op.xattr.cmp_op = cmp_op;
osd_op.op.xattr.cmp_mode = cmp_mode;
if (name)
osd_op.indata.append(name, osd_op.op.xattr.name_len);
osd_op.indata.append(data);
}
void add_xattr(int op, std::string_view name, const ceph::buffer::list& data) {
OSDOp& osd_op = add_op(op);
osd_op.op.xattr.name_len = name.size();
osd_op.op.xattr.value_len = data.length();
osd_op.indata.append(name.data(), osd_op.op.xattr.name_len);
osd_op.indata.append(data);
}
void add_xattr_cmp(int op, std::string_view name, uint8_t cmp_op,
uint8_t cmp_mode, const ceph::buffer::list& data) {
OSDOp& osd_op = add_op(op);
osd_op.op.xattr.name_len = name.size();
osd_op.op.xattr.value_len = data.length();
osd_op.op.xattr.cmp_op = cmp_op;
osd_op.op.xattr.cmp_mode = cmp_mode;
if (!name.empty())
osd_op.indata.append(name.data(), osd_op.op.xattr.name_len);
osd_op.indata.append(data);
}
void add_call(int op, std::string_view cname, std::string_view method,
const ceph::buffer::list &indata,
ceph::buffer::list *outbl, Context *ctx, int *prval) {
OSDOp& osd_op = add_op(op);
unsigned p = ops.size() - 1;
set_handler(ctx);
out_bl[p] = outbl;
out_rval[p] = prval;
osd_op.op.cls.class_len = cname.size();
osd_op.op.cls.method_len = method.size();
osd_op.op.cls.indata_len = indata.length();
osd_op.indata.append(cname.data(), osd_op.op.cls.class_len);
osd_op.indata.append(method.data(), osd_op.op.cls.method_len);
osd_op.indata.append(indata);
}
void add_call(int op, std::string_view cname, std::string_view method,
const ceph::buffer::list &indata,
fu2::unique_function<void(boost::system::error_code,
const ceph::buffer::list&) &&> f) {
OSDOp& osd_op = add_op(op);
set_handler([f = std::move(f)](boost::system::error_code ec,
int,
const ceph::buffer::list& bl) mutable {
std::move(f)(ec, bl);
});
osd_op.op.cls.class_len = cname.size();
osd_op.op.cls.method_len = method.size();
osd_op.op.cls.indata_len = indata.length();
osd_op.indata.append(cname.data(), osd_op.op.cls.class_len);
osd_op.indata.append(method.data(), osd_op.op.cls.method_len);
osd_op.indata.append(indata);
}
void add_call(int op, std::string_view cname, std::string_view method,
const ceph::buffer::list &indata,
fu2::unique_function<void(boost::system::error_code, int,
const ceph::buffer::list&) &&> f) {
OSDOp& osd_op = add_op(op);
set_handler([f = std::move(f)](boost::system::error_code ec,
int r,
const ceph::buffer::list& bl) mutable {
std::move(f)(ec, r, bl);
});
osd_op.op.cls.class_len = cname.size();
osd_op.op.cls.method_len = method.size();
osd_op.op.cls.indata_len = indata.length();
osd_op.indata.append(cname.data(), osd_op.op.cls.class_len);
osd_op.indata.append(method.data(), osd_op.op.cls.method_len);
osd_op.indata.append(indata);
}
void add_pgls(int op, uint64_t count, collection_list_handle_t cookie,
epoch_t start_epoch) {
using ceph::encode;
OSDOp& osd_op = add_op(op);
osd_op.op.pgls.count = count;
osd_op.op.pgls.start_epoch = start_epoch;
encode(cookie, osd_op.indata);
}
void add_pgls_filter(int op, uint64_t count, const ceph::buffer::list& filter,
collection_list_handle_t cookie, epoch_t start_epoch) {
using ceph::encode;
OSDOp& osd_op = add_op(op);
osd_op.op.pgls.count = count;
osd_op.op.pgls.start_epoch = start_epoch;
std::string cname = "pg";
std::string mname = "filter";
encode(cname, osd_op.indata);
encode(mname, osd_op.indata);
osd_op.indata.append(filter);
encode(cookie, osd_op.indata);
}
void add_alloc_hint(int op, uint64_t expected_object_size,
uint64_t expected_write_size,
uint32_t flags) {
OSDOp& osd_op = add_op(op);
osd_op.op.alloc_hint.expected_object_size = expected_object_size;
osd_op.op.alloc_hint.expected_write_size = expected_write_size;
osd_op.op.alloc_hint.flags = flags;
}
// ------
// pg
void pg_ls(uint64_t count, ceph::buffer::list& filter,
collection_list_handle_t cookie, epoch_t start_epoch) {
if (filter.length() == 0)
add_pgls(CEPH_OSD_OP_PGLS, count, cookie, start_epoch);
else
add_pgls_filter(CEPH_OSD_OP_PGLS_FILTER, count, filter, cookie,
start_epoch);
flags |= CEPH_OSD_FLAG_PGOP;
}
void pg_nls(uint64_t count, const ceph::buffer::list& filter,
collection_list_handle_t cookie, epoch_t start_epoch) {
if (filter.length() == 0)
add_pgls(CEPH_OSD_OP_PGNLS, count, cookie, start_epoch);
else
add_pgls_filter(CEPH_OSD_OP_PGNLS_FILTER, count, filter, cookie,
start_epoch);
flags |= CEPH_OSD_FLAG_PGOP;
}
void scrub_ls(const librados::object_id_t& start_after,
uint64_t max_to_get,
std::vector<librados::inconsistent_obj_t> *objects,
uint32_t *interval,
int *rval);
void scrub_ls(const librados::object_id_t& start_after,
uint64_t max_to_get,
std::vector<librados::inconsistent_snapset_t> *objects,
uint32_t *interval,
int *rval);
void create(bool excl) {
OSDOp& o = add_op(CEPH_OSD_OP_CREATE);
o.op.flags = (excl ? CEPH_OSD_OP_FLAG_EXCL : 0);
}
struct CB_ObjectOperation_stat {
ceph::buffer::list bl;
uint64_t *psize;
ceph::real_time *pmtime;
time_t *ptime;
struct timespec *pts;
int *prval;
boost::system::error_code* pec;
CB_ObjectOperation_stat(uint64_t *ps, ceph::real_time *pm, time_t *pt, struct timespec *_pts,
int *prval, boost::system::error_code* pec)
: psize(ps), pmtime(pm), ptime(pt), pts(_pts), prval(prval), pec(pec) {}
void operator()(boost::system::error_code ec, int r, const ceph::buffer::list& bl) {
using ceph::decode;
if (r >= 0) {
auto p = bl.cbegin();
try {
uint64_t size;
ceph::real_time mtime;
decode(size, p);
decode(mtime, p);
if (psize)
*psize = size;
if (pmtime)
*pmtime = mtime;
if (ptime)
*ptime = ceph::real_clock::to_time_t(mtime);
if (pts)
*pts = ceph::real_clock::to_timespec(mtime);
} catch (const ceph::buffer::error& e) {
if (prval)
*prval = -EIO;
if (pec)
*pec = e.code();
}
}
}
};
void stat(uint64_t *psize, ceph::real_time *pmtime, int *prval) {
add_op(CEPH_OSD_OP_STAT);
set_handler(CB_ObjectOperation_stat(psize, pmtime, nullptr, nullptr, prval,
nullptr));
out_rval.back() = prval;
}
void stat(uint64_t *psize, ceph::real_time *pmtime,
boost::system::error_code* ec) {
add_op(CEPH_OSD_OP_STAT);
set_handler(CB_ObjectOperation_stat(psize, pmtime, nullptr, nullptr,
nullptr, ec));
out_ec.back() = ec;
}
void stat(uint64_t *psize, time_t *ptime, int *prval) {
add_op(CEPH_OSD_OP_STAT);
set_handler(CB_ObjectOperation_stat(psize, nullptr, ptime, nullptr, prval,
nullptr));
out_rval.back() = prval;
}
void stat(uint64_t *psize, struct timespec *pts, int *prval) {
add_op(CEPH_OSD_OP_STAT);
set_handler(CB_ObjectOperation_stat(psize, nullptr, nullptr, pts, prval, nullptr));
out_rval.back() = prval;
}
void stat(uint64_t *psize, ceph::real_time *pmtime, std::nullptr_t) {
add_op(CEPH_OSD_OP_STAT);
set_handler(CB_ObjectOperation_stat(psize, pmtime, nullptr, nullptr, nullptr,
nullptr));
}
void stat(uint64_t *psize, time_t *ptime, std::nullptr_t) {
add_op(CEPH_OSD_OP_STAT);
set_handler(CB_ObjectOperation_stat(psize, nullptr, ptime, nullptr, nullptr,
nullptr));
}
void stat(uint64_t *psize, struct timespec *pts, std::nullptr_t) {
add_op(CEPH_OSD_OP_STAT);
set_handler(CB_ObjectOperation_stat(psize, nullptr, nullptr, pts, nullptr,
nullptr));
}
void stat(uint64_t *psize, std::nullptr_t, std::nullptr_t) {
add_op(CEPH_OSD_OP_STAT);
set_handler(CB_ObjectOperation_stat(psize, nullptr, nullptr, nullptr,
nullptr, nullptr));
}
// object cmpext
struct CB_ObjectOperation_cmpext {
int* prval = nullptr;
boost::system::error_code* ec = nullptr;
std::size_t* s = nullptr;
explicit CB_ObjectOperation_cmpext(int *prval)
: prval(prval) {}
CB_ObjectOperation_cmpext(boost::system::error_code* ec, std::size_t* s)
: ec(ec), s(s) {}
void operator()(boost::system::error_code ec, int r, const ceph::buffer::list&) {
if (prval)
*prval = r;
if (this->ec)
*this->ec = ec;
if (s)
*s = static_cast<std::size_t>(-(MAX_ERRNO - r));
}
};
void cmpext(uint64_t off, ceph::buffer::list& cmp_bl, int *prval) {
add_data(CEPH_OSD_OP_CMPEXT, off, cmp_bl.length(), cmp_bl);
set_handler(CB_ObjectOperation_cmpext(prval));
out_rval.back() = prval;
}
void cmpext(uint64_t off, ceph::buffer::list&& cmp_bl, boost::system::error_code* ec,
std::size_t* s) {
add_data(CEPH_OSD_OP_CMPEXT, off, cmp_bl.length(), cmp_bl);
set_handler(CB_ObjectOperation_cmpext(ec, s));
out_ec.back() = ec;
}
// Used by C API
void cmpext(uint64_t off, uint64_t cmp_len, const char *cmp_buf, int *prval) {
ceph::buffer::list cmp_bl;
cmp_bl.append(cmp_buf, cmp_len);
add_data(CEPH_OSD_OP_CMPEXT, off, cmp_len, cmp_bl);
set_handler(CB_ObjectOperation_cmpext(prval));
out_rval.back() = prval;
}
void read(uint64_t off, uint64_t len, ceph::buffer::list *pbl, int *prval,
Context* ctx) {
ceph::buffer::list bl;
add_data(CEPH_OSD_OP_READ, off, len, bl);
unsigned p = ops.size() - 1;
out_bl[p] = pbl;
out_rval[p] = prval;
set_handler(ctx);
}
void read(uint64_t off, uint64_t len, boost::system::error_code* ec,
ceph::buffer::list* pbl) {
ceph::buffer::list bl;
add_data(CEPH_OSD_OP_READ, off, len, bl);
out_ec.back() = ec;
out_bl.back() = pbl;
}
template<typename Ex>
struct CB_ObjectOperation_sparse_read {
ceph::buffer::list* data_bl;
Ex* extents;
int* prval;
boost::system::error_code* pec;
CB_ObjectOperation_sparse_read(ceph::buffer::list* data_bl,
Ex* extents,
int* prval,
boost::system::error_code* pec)
: data_bl(data_bl), extents(extents), prval(prval), pec(pec) {}
void operator()(boost::system::error_code ec, int r, const ceph::buffer::list& bl) {
auto iter = bl.cbegin();
if (r >= 0) {
// NOTE: it's possible the sub-op has not been executed but the result
// code remains zeroed. Avoid the costly exception handling on a
// potential IO path.
if (bl.length() > 0) {
try {
decode(*extents, iter);
decode(*data_bl, iter);
} catch (const ceph::buffer::error& e) {
if (prval)
*prval = -EIO;
if (pec)
*pec = e.code();
}
} else if (prval) {
*prval = -EIO;
if (pec)
*pec = buffer::errc::end_of_buffer;
}
}
}
};
void sparse_read(uint64_t off, uint64_t len, std::map<uint64_t, uint64_t>* m,
ceph::buffer::list* data_bl, int* prval,
uint64_t truncate_size = 0, uint32_t truncate_seq = 0) {
ceph::buffer::list bl;
add_data(CEPH_OSD_OP_SPARSE_READ, off, len, bl);
OSDOp& o = *ops.rbegin();
o.op.extent.truncate_size = truncate_size;
o.op.extent.truncate_seq = truncate_seq;
set_handler(CB_ObjectOperation_sparse_read(data_bl, m, prval, nullptr));
out_rval.back() = prval;
}
void sparse_read(uint64_t off, uint64_t len,
boost::system::error_code* ec,
std::vector<std::pair<uint64_t, uint64_t>>* m,
ceph::buffer::list* data_bl) {
ceph::buffer::list bl;
add_data(CEPH_OSD_OP_SPARSE_READ, off, len, bl);
set_handler(CB_ObjectOperation_sparse_read(data_bl, m, nullptr, ec));
out_ec.back() = ec;
}
void write(uint64_t off, ceph::buffer::list& bl,
uint64_t truncate_size,
uint32_t truncate_seq) {
add_data(CEPH_OSD_OP_WRITE, off, bl.length(), bl);
OSDOp& o = *ops.rbegin();
o.op.extent.truncate_size = truncate_size;
o.op.extent.truncate_seq = truncate_seq;
}
void write(uint64_t off, ceph::buffer::list& bl) {
write(off, bl, 0, 0);
}
void write_full(ceph::buffer::list& bl) {
add_data(CEPH_OSD_OP_WRITEFULL, 0, bl.length(), bl);
}
void writesame(uint64_t off, uint64_t write_len, ceph::buffer::list& bl) {
add_writesame(CEPH_OSD_OP_WRITESAME, off, write_len, bl);
}
void append(ceph::buffer::list& bl) {
add_data(CEPH_OSD_OP_APPEND, 0, bl.length(), bl);
}
void zero(uint64_t off, uint64_t len) {
ceph::buffer::list bl;
add_data(CEPH_OSD_OP_ZERO, off, len, bl);
}
void truncate(uint64_t off) {
ceph::buffer::list bl;
add_data(CEPH_OSD_OP_TRUNCATE, off, 0, bl);
}
void remove() {
ceph::buffer::list bl;
add_data(CEPH_OSD_OP_DELETE, 0, 0, bl);
}
void mapext(uint64_t off, uint64_t len) {
ceph::buffer::list bl;
add_data(CEPH_OSD_OP_MAPEXT, off, len, bl);
}
void sparse_read(uint64_t off, uint64_t len) {
ceph::buffer::list bl;
add_data(CEPH_OSD_OP_SPARSE_READ, off, len, bl);
}
void checksum(uint8_t type, const ceph::buffer::list &init_value_bl,
uint64_t off, uint64_t len, size_t chunk_size,
ceph::buffer::list *pbl, int *prval, Context *ctx) {
OSDOp& osd_op = add_op(CEPH_OSD_OP_CHECKSUM);
osd_op.op.checksum.offset = off;
osd_op.op.checksum.length = len;
osd_op.op.checksum.type = type;
osd_op.op.checksum.chunk_size = chunk_size;
osd_op.indata.append(init_value_bl);
unsigned p = ops.size() - 1;
out_bl[p] = pbl;
out_rval[p] = prval;
set_handler(ctx);
}
// object attrs
void getxattr(const char *name, ceph::buffer::list *pbl, int *prval) {
ceph::buffer::list bl;
add_xattr(CEPH_OSD_OP_GETXATTR, name, bl);
unsigned p = ops.size() - 1;
out_bl[p] = pbl;
out_rval[p] = prval;
}
void getxattr(std::string_view name, boost::system::error_code* ec,
buffer::list *pbl) {
ceph::buffer::list bl;
add_xattr(CEPH_OSD_OP_GETXATTR, name, bl);
out_bl.back() = pbl;
out_ec.back() = ec;
}
template<typename Vals>
struct CB_ObjectOperation_decodevals {
uint64_t max_entries;
Vals* pattrs;
bool* ptruncated;
int* prval;
boost::system::error_code* pec;
CB_ObjectOperation_decodevals(uint64_t m, Vals* pa,
bool *pt, int *pr,
boost::system::error_code* pec)
: max_entries(m), pattrs(pa), ptruncated(pt), prval(pr), pec(pec) {
if (ptruncated) {
*ptruncated = false;
}
}
void operator()(boost::system::error_code ec, int r, const ceph::buffer::list& bl) {
if (r >= 0) {
auto p = bl.cbegin();
try {
if (pattrs)
decode(*pattrs, p);
if (ptruncated) {
Vals ignore;
if (!pattrs) {
decode(ignore, p);
pattrs = &ignore;
}
if (!p.end()) {
decode(*ptruncated, p);
} else {
// The OSD did not provide this. Since old OSDs do not
// enfoce omap result limits either, we can infer it from
// the size of the result
*ptruncated = (pattrs->size() == max_entries);
}
}
} catch (const ceph::buffer::error& e) {
if (prval)
*prval = -EIO;
if (pec)
*pec = e.code();
}
}
}
};
template<typename Keys>
struct CB_ObjectOperation_decodekeys {
uint64_t max_entries;
Keys* pattrs;
bool *ptruncated;
int *prval;
boost::system::error_code* pec;
CB_ObjectOperation_decodekeys(uint64_t m, Keys* pa, bool *pt,
int *pr, boost::system::error_code* pec)
: max_entries(m), pattrs(pa), ptruncated(pt), prval(pr), pec(pec) {
if (ptruncated) {
*ptruncated = false;
}
}
void operator()(boost::system::error_code ec, int r, const ceph::buffer::list& bl) {
if (r >= 0) {
using ceph::decode;
auto p = bl.cbegin();
try {
if (pattrs)
decode(*pattrs, p);
if (ptruncated) {
Keys ignore;
if (!pattrs) {
decode(ignore, p);
pattrs = &ignore;
}
if (!p.end()) {
decode(*ptruncated, p);
} else {
// the OSD did not provide this. since old OSDs do not
// enforce omap result limits either, we can infer it from
// the size of the result
*ptruncated = (pattrs->size() == max_entries);
}
}
} catch (const ceph::buffer::error& e) {
if (prval)
*prval = -EIO;
if (pec)
*pec = e.code();
}
}
}
};
struct CB_ObjectOperation_decodewatchers {
std::list<obj_watch_t>* pwatchers;
int* prval;
boost::system::error_code* pec;
CB_ObjectOperation_decodewatchers(std::list<obj_watch_t>* pw, int* pr,
boost::system::error_code* pec)
: pwatchers(pw), prval(pr), pec(pec) {}
void operator()(boost::system::error_code ec, int r,
const ceph::buffer::list& bl) {
if (r >= 0) {
auto p = bl.cbegin();
try {
obj_list_watch_response_t resp;
decode(resp, p);
if (pwatchers) {
for (const auto& watch_item : resp.entries) {
obj_watch_t ow;
std::string sa = watch_item.addr.get_legacy_str();
strncpy(ow.addr, sa.c_str(), sizeof(ow.addr) - 1);
ow.addr[sizeof(ow.addr) - 1] = '\0';
ow.watcher_id = watch_item.name.num();
ow.cookie = watch_item.cookie;
ow.timeout_seconds = watch_item.timeout_seconds;
pwatchers->push_back(std::move(ow));
}
}
} catch (const ceph::buffer::error& e) {
if (prval)
*prval = -EIO;
if (pec)
*pec = e.code();
}
}
}
};
struct CB_ObjectOperation_decodewatchersneo {
std::vector<neorados::ObjWatcher>* pwatchers;
int* prval;
boost::system::error_code* pec;
CB_ObjectOperation_decodewatchersneo(std::vector<neorados::ObjWatcher>* pw,
int* pr,
boost::system::error_code* pec)
: pwatchers(pw), prval(pr), pec(pec) {}
void operator()(boost::system::error_code ec, int r,
const ceph::buffer::list& bl) {
if (r >= 0) {
auto p = bl.cbegin();
try {
obj_list_watch_response_t resp;
decode(resp, p);
if (pwatchers) {
for (const auto& watch_item : resp.entries) {
neorados::ObjWatcher ow;
ow.addr = watch_item.addr.get_legacy_str();
ow.watcher_id = watch_item.name.num();
ow.cookie = watch_item.cookie;
ow.timeout_seconds = watch_item.timeout_seconds;
pwatchers->push_back(std::move(ow));
}
}
} catch (const ceph::buffer::error& e) {
if (prval)
*prval = -EIO;
if (pec)
*pec = e.code();
}
}
}
};
struct CB_ObjectOperation_decodesnaps {
librados::snap_set_t *psnaps;
neorados::SnapSet *neosnaps;
int *prval;
boost::system::error_code* pec;
CB_ObjectOperation_decodesnaps(librados::snap_set_t* ps,
neorados::SnapSet* ns, int* pr,
boost::system::error_code* pec)
: psnaps(ps), neosnaps(ns), prval(pr), pec(pec) {}
void operator()(boost::system::error_code ec, int r, const ceph::buffer::list& bl) {
if (r >= 0) {
using ceph::decode;
auto p = bl.cbegin();
try {
obj_list_snap_response_t resp;
decode(resp, p);
if (psnaps) {
psnaps->clones.clear();
for (auto ci = resp.clones.begin();
ci != resp.clones.end();
++ci) {
librados::clone_info_t clone;
clone.cloneid = ci->cloneid;
clone.snaps.reserve(ci->snaps.size());
clone.snaps.insert(clone.snaps.end(), ci->snaps.begin(),
ci->snaps.end());
clone.overlap = ci->overlap;
clone.size = ci->size;
psnaps->clones.push_back(clone);
}
psnaps->seq = resp.seq;
}
if (neosnaps) {
neosnaps->clones.clear();
for (auto&& c : resp.clones) {
neorados::CloneInfo clone;
clone.cloneid = std::move(c.cloneid);
clone.snaps.reserve(c.snaps.size());
std::move(c.snaps.begin(), c.snaps.end(),
std::back_inserter(clone.snaps));
clone.overlap = c.overlap;
clone.size = c.size;
neosnaps->clones.push_back(std::move(clone));
}
neosnaps->seq = resp.seq;
}
} catch (const ceph::buffer::error& e) {
if (prval)
*prval = -EIO;
if (pec)
*pec = e.code();
}
}
}
};
void getxattrs(std::map<std::string,ceph::buffer::list> *pattrs, int *prval) {
add_op(CEPH_OSD_OP_GETXATTRS);
if (pattrs || prval) {
set_handler(CB_ObjectOperation_decodevals(0, pattrs, nullptr, prval,
nullptr));
out_rval.back() = prval;
}
}
void getxattrs(boost::system::error_code* ec,
boost::container::flat_map<std::string, ceph::buffer::list> *pattrs) {
add_op(CEPH_OSD_OP_GETXATTRS);
set_handler(CB_ObjectOperation_decodevals(0, pattrs, nullptr, nullptr, ec));
out_ec.back() = ec;
}
void setxattr(const char *name, const ceph::buffer::list& bl) {
add_xattr(CEPH_OSD_OP_SETXATTR, name, bl);
}
void setxattr(std::string_view name, const ceph::buffer::list& bl) {
add_xattr(CEPH_OSD_OP_SETXATTR, name, bl);
}
void setxattr(const char *name, const std::string& s) {
ceph::buffer::list bl;
bl.append(s);
add_xattr(CEPH_OSD_OP_SETXATTR, name, bl);
}
void cmpxattr(const char *name, uint8_t cmp_op, uint8_t cmp_mode,
const ceph::buffer::list& bl) {
add_xattr_cmp(CEPH_OSD_OP_CMPXATTR, name, cmp_op, cmp_mode, bl);
}
void cmpxattr(std::string_view name, uint8_t cmp_op, uint8_t cmp_mode,
const ceph::buffer::list& bl) {
add_xattr_cmp(CEPH_OSD_OP_CMPXATTR, name, cmp_op, cmp_mode, bl);
}
void rmxattr(const char *name) {
ceph::buffer::list bl;
add_xattr(CEPH_OSD_OP_RMXATTR, name, bl);
}
void rmxattr(std::string_view name) {
ceph::buffer::list bl;
add_xattr(CEPH_OSD_OP_RMXATTR, name, bl);
}
void setxattrs(std::map<std::string, ceph::buffer::list>& attrs) {
using ceph::encode;
ceph::buffer::list bl;
encode(attrs, bl);
add_xattr(CEPH_OSD_OP_RESETXATTRS, 0, bl.length());
}
void resetxattrs(const char *prefix, std::map<std::string, ceph::buffer::list>& attrs) {
using ceph::encode;
ceph::buffer::list bl;
encode(attrs, bl);
add_xattr(CEPH_OSD_OP_RESETXATTRS, prefix, bl);
}
// trivialmap
void tmap_update(ceph::buffer::list& bl) {
add_data(CEPH_OSD_OP_TMAPUP, 0, 0, bl);
}
// objectmap
void omap_get_keys(const std::string &start_after,
uint64_t max_to_get,
std::set<std::string> *out_set,
bool *ptruncated,
int *prval) {
using ceph::encode;
OSDOp &op = add_op(CEPH_OSD_OP_OMAPGETKEYS);
ceph::buffer::list bl;
encode(start_after, bl);
encode(max_to_get, bl);
op.op.extent.offset = 0;
op.op.extent.length = bl.length();
op.indata.claim_append(bl);
if (prval || ptruncated || out_set) {
set_handler(CB_ObjectOperation_decodekeys(max_to_get, out_set, ptruncated, prval,
nullptr));
out_rval.back() = prval;
}
}
void omap_get_keys(std::optional<std::string_view> start_after,
uint64_t max_to_get,
boost::system::error_code* ec,
boost::container::flat_set<std::string> *out_set,
bool *ptruncated) {
OSDOp& op = add_op(CEPH_OSD_OP_OMAPGETKEYS);
ceph::buffer::list bl;
encode(start_after ? *start_after : std::string_view{}, bl);
encode(max_to_get, bl);
op.op.extent.offset = 0;
op.op.extent.length = bl.length();
op.indata.claim_append(bl);
set_handler(
CB_ObjectOperation_decodekeys(max_to_get, out_set, ptruncated, nullptr,
ec));
out_ec.back() = ec;
}
void omap_get_vals(const std::string &start_after,
const std::string &filter_prefix,
uint64_t max_to_get,
std::map<std::string, ceph::buffer::list> *out_set,
bool *ptruncated,
int *prval) {
using ceph::encode;
OSDOp &op = add_op(CEPH_OSD_OP_OMAPGETVALS);
ceph::buffer::list bl;
encode(start_after, bl);
encode(max_to_get, bl);
encode(filter_prefix, bl);
op.op.extent.offset = 0;
op.op.extent.length = bl.length();
op.indata.claim_append(bl);
if (prval || out_set || ptruncated) {
set_handler(CB_ObjectOperation_decodevals(max_to_get, out_set, ptruncated,
prval, nullptr));
out_rval.back() = prval;
}
}
void omap_get_vals(std::optional<std::string_view> start_after,
std::optional<std::string_view> filter_prefix,
uint64_t max_to_get,
boost::system::error_code* ec,
boost::container::flat_map<std::string, ceph::buffer::list> *out_set,
bool *ptruncated) {
OSDOp &op = add_op(CEPH_OSD_OP_OMAPGETVALS);
ceph::buffer::list bl;
encode(start_after ? *start_after : std::string_view{}, bl);
encode(max_to_get, bl);
encode(filter_prefix ? *start_after : std::string_view{}, bl);
op.op.extent.offset = 0;
op.op.extent.length = bl.length();
op.indata.claim_append(bl);
set_handler(CB_ObjectOperation_decodevals(max_to_get, out_set, ptruncated,
nullptr, ec));
out_ec.back() = ec;
}
void omap_get_vals_by_keys(const std::set<std::string> &to_get,
std::map<std::string, ceph::buffer::list> *out_set,
int *prval) {
OSDOp &op = add_op(CEPH_OSD_OP_OMAPGETVALSBYKEYS);
ceph::buffer::list bl;
encode(to_get, bl);
op.op.extent.offset = 0;
op.op.extent.length = bl.length();
op.indata.claim_append(bl);
if (prval || out_set) {
set_handler(CB_ObjectOperation_decodevals(0, out_set, nullptr, prval,
nullptr));
out_rval.back() = prval;
}
}
void omap_get_vals_by_keys(
const boost::container::flat_set<std::string>& to_get,
boost::system::error_code* ec,
boost::container::flat_map<std::string, ceph::buffer::list> *out_set) {
OSDOp &op = add_op(CEPH_OSD_OP_OMAPGETVALSBYKEYS);
ceph::buffer::list bl;
encode(to_get, bl);
op.op.extent.offset = 0;
op.op.extent.length = bl.length();
op.indata.claim_append(bl);
set_handler(CB_ObjectOperation_decodevals(0, out_set, nullptr, nullptr,
ec));
out_ec.back() = ec;
}
void omap_cmp(const std::map<std::string, std::pair<ceph::buffer::list,int> > &assertions,
int *prval) {
using ceph::encode;
OSDOp &op = add_op(CEPH_OSD_OP_OMAP_CMP);
ceph::buffer::list bl;
encode(assertions, bl);
op.op.extent.offset = 0;
op.op.extent.length = bl.length();
op.indata.claim_append(bl);
if (prval) {
unsigned p = ops.size() - 1;
out_rval[p] = prval;
}
}
void omap_cmp(const boost::container::flat_map<
std::string, std::pair<ceph::buffer::list, int>>& assertions,
boost::system::error_code *ec) {
OSDOp &op = add_op(CEPH_OSD_OP_OMAP_CMP);
ceph::buffer::list bl;
encode(assertions, bl);
op.op.extent.offset = 0;
op.op.extent.length = bl.length();
op.indata.claim_append(bl);
out_ec.back() = ec;
}
struct C_ObjectOperation_copyget : public Context {
ceph::buffer::list bl;
object_copy_cursor_t *cursor;
uint64_t *out_size;
ceph::real_time *out_mtime;
std::map<std::string,ceph::buffer::list,std::less<>> *out_attrs;
ceph::buffer::list *out_data, *out_omap_header, *out_omap_data;
std::vector<snapid_t> *out_snaps;
snapid_t *out_snap_seq;
uint32_t *out_flags;
uint32_t *out_data_digest;
uint32_t *out_omap_digest;
mempool::osd_pglog::vector<std::pair<osd_reqid_t, version_t> > *out_reqids;
mempool::osd_pglog::map<uint32_t, int> *out_reqid_return_codes;
uint64_t *out_truncate_seq;
uint64_t *out_truncate_size;
int *prval;
C_ObjectOperation_copyget(object_copy_cursor_t *c,
uint64_t *s,
ceph::real_time *m,
std::map<std::string,ceph::buffer::list,std::less<>> *a,
ceph::buffer::list *d, ceph::buffer::list *oh,
ceph::buffer::list *o,
std::vector<snapid_t> *osnaps,
snapid_t *osnap_seq,
uint32_t *flags,
uint32_t *dd,
uint32_t *od,
mempool::osd_pglog::vector<std::pair<osd_reqid_t, version_t> > *oreqids,
mempool::osd_pglog::map<uint32_t, int> *oreqid_return_codes,
uint64_t *otseq,
uint64_t *otsize,
int *r)
: cursor(c),
out_size(s), out_mtime(m),
out_attrs(a), out_data(d), out_omap_header(oh),
out_omap_data(o), out_snaps(osnaps), out_snap_seq(osnap_seq),
out_flags(flags), out_data_digest(dd), out_omap_digest(od),
out_reqids(oreqids),
out_reqid_return_codes(oreqid_return_codes),
out_truncate_seq(otseq),
out_truncate_size(otsize),
prval(r) {}
void finish(int r) override {
using ceph::decode;
// reqids are copied on ENOENT
if (r < 0 && r != -ENOENT)
return;
try {
auto p = bl.cbegin();
object_copy_data_t copy_reply;
decode(copy_reply, p);
if (r == -ENOENT) {
if (out_reqids)
*out_reqids = copy_reply.reqids;
return;
}
if (out_size)
*out_size = copy_reply.size;
if (out_mtime)
*out_mtime = ceph::real_clock::from_ceph_timespec(copy_reply.mtime);
if (out_attrs)
*out_attrs = copy_reply.attrs;
if (out_data)
out_data->claim_append(copy_reply.data);
if (out_omap_header)
out_omap_header->claim_append(copy_reply.omap_header);
if (out_omap_data)
*out_omap_data = copy_reply.omap_data;
if (out_snaps)
*out_snaps = copy_reply.snaps;
if (out_snap_seq)
*out_snap_seq = copy_reply.snap_seq;
if (out_flags)
*out_flags = copy_reply.flags;
if (out_data_digest)
*out_data_digest = copy_reply.data_digest;
if (out_omap_digest)
*out_omap_digest = copy_reply.omap_digest;
if (out_reqids)
*out_reqids = copy_reply.reqids;
if (out_reqid_return_codes)
*out_reqid_return_codes = copy_reply.reqid_return_codes;
if (out_truncate_seq)
*out_truncate_seq = copy_reply.truncate_seq;
if (out_truncate_size)
*out_truncate_size = copy_reply.truncate_size;
*cursor = copy_reply.cursor;
} catch (const ceph::buffer::error& e) {
if (prval)
*prval = -EIO;
}
}
};
void copy_get(object_copy_cursor_t *cursor,
uint64_t max,
uint64_t *out_size,
ceph::real_time *out_mtime,
std::map<std::string,ceph::buffer::list,std::less<>> *out_attrs,
ceph::buffer::list *out_data,
ceph::buffer::list *out_omap_header,
ceph::buffer::list *out_omap_data,
std::vector<snapid_t> *out_snaps,
snapid_t *out_snap_seq,
uint32_t *out_flags,
uint32_t *out_data_digest,
uint32_t *out_omap_digest,
mempool::osd_pglog::vector<std::pair<osd_reqid_t, version_t> > *out_reqids,
mempool::osd_pglog::map<uint32_t, int> *out_reqid_return_codes,
uint64_t *truncate_seq,
uint64_t *truncate_size,
int *prval) {
using ceph::encode;
OSDOp& osd_op = add_op(CEPH_OSD_OP_COPY_GET);
osd_op.op.copy_get.max = max;
encode(*cursor, osd_op.indata);
encode(max, osd_op.indata);
unsigned p = ops.size() - 1;
out_rval[p] = prval;
C_ObjectOperation_copyget *h =
new C_ObjectOperation_copyget(cursor, out_size, out_mtime,
out_attrs, out_data, out_omap_header,
out_omap_data, out_snaps, out_snap_seq,
out_flags, out_data_digest,
out_omap_digest, out_reqids,
out_reqid_return_codes, truncate_seq,
truncate_size, prval);
out_bl[p] = &h->bl;
set_handler(h);
}
void undirty() {
add_op(CEPH_OSD_OP_UNDIRTY);
}
struct C_ObjectOperation_isdirty : public Context {
ceph::buffer::list bl;
bool *pisdirty;
int *prval;
C_ObjectOperation_isdirty(bool *p, int *r)
: pisdirty(p), prval(r) {}
void finish(int r) override {
using ceph::decode;
if (r < 0)
return;
try {
auto p = bl.cbegin();
bool isdirty;
decode(isdirty, p);
if (pisdirty)
*pisdirty = isdirty;
} catch (const ceph::buffer::error& e) {
if (prval)
*prval = -EIO;
}
}
};
void is_dirty(bool *pisdirty, int *prval) {
add_op(CEPH_OSD_OP_ISDIRTY);
unsigned p = ops.size() - 1;
out_rval[p] = prval;
C_ObjectOperation_isdirty *h =
new C_ObjectOperation_isdirty(pisdirty, prval);
out_bl[p] = &h->bl;
set_handler(h);
}
struct C_ObjectOperation_hit_set_ls : public Context {
ceph::buffer::list bl;
std::list< std::pair<time_t, time_t> > *ptls;
std::list< std::pair<ceph::real_time, ceph::real_time> > *putls;
int *prval;
C_ObjectOperation_hit_set_ls(std::list< std::pair<time_t, time_t> > *t,
std::list< std::pair<ceph::real_time,
ceph::real_time> > *ut,
int *r)
: ptls(t), putls(ut), prval(r) {}
void finish(int r) override {
using ceph::decode;
if (r < 0)
return;
try {
auto p = bl.cbegin();
std::list< std::pair<ceph::real_time, ceph::real_time> > ls;
decode(ls, p);
if (ptls) {
ptls->clear();
for (auto p = ls.begin(); p != ls.end(); ++p)
// round initial timestamp up to the next full second to
// keep this a valid interval.
ptls->push_back(
std::make_pair(ceph::real_clock::to_time_t(
ceph::ceil(p->first,
// Sadly, no time literals until C++14.
std::chrono::seconds(1))),
ceph::real_clock::to_time_t(p->second)));
}
if (putls)
putls->swap(ls);
} catch (const ceph::buffer::error& e) {
r = -EIO;
}
if (prval)
*prval = r;
}
};
/**
* std::list available HitSets.
*
* We will get back a std::list of time intervals. Note that the most
* recent range may have an empty end timestamp if it is still
* accumulating.
*
* @param pls [out] std::list of time intervals
* @param prval [out] return value
*/
void hit_set_ls(std::list< std::pair<time_t, time_t> > *pls, int *prval) {
add_op(CEPH_OSD_OP_PG_HITSET_LS);
unsigned p = ops.size() - 1;
out_rval[p] = prval;
C_ObjectOperation_hit_set_ls *h =
new C_ObjectOperation_hit_set_ls(pls, NULL, prval);
out_bl[p] = &h->bl;
set_handler(h);
}
void hit_set_ls(std::list<std::pair<ceph::real_time, ceph::real_time> > *pls,
int *prval) {
add_op(CEPH_OSD_OP_PG_HITSET_LS);
unsigned p = ops.size() - 1;
out_rval[p] = prval;
C_ObjectOperation_hit_set_ls *h =
new C_ObjectOperation_hit_set_ls(NULL, pls, prval);
out_bl[p] = &h->bl;
set_handler(h);
}
/**
* get HitSet
*
* Return an encoded HitSet that includes the provided time
* interval.
*
* @param stamp [in] timestamp
* @param pbl [out] target buffer for encoded HitSet
* @param prval [out] return value
*/
void hit_set_get(ceph::real_time stamp, ceph::buffer::list *pbl, int *prval) {
OSDOp& op = add_op(CEPH_OSD_OP_PG_HITSET_GET);
op.op.hit_set_get.stamp = ceph::real_clock::to_ceph_timespec(stamp);
unsigned p = ops.size() - 1;
out_rval[p] = prval;
out_bl[p] = pbl;
}
void omap_get_header(ceph::buffer::list *bl, int *prval) {
add_op(CEPH_OSD_OP_OMAPGETHEADER);
unsigned p = ops.size() - 1;
out_bl[p] = bl;
out_rval[p] = prval;
}
void omap_get_header(boost::system::error_code* ec, ceph::buffer::list *bl) {
add_op(CEPH_OSD_OP_OMAPGETHEADER);
out_bl.back() = bl;
out_ec.back() = ec;
}
void omap_set(const std::map<std::string, ceph::buffer::list> &map) {
ceph::buffer::list bl;
encode(map, bl);
add_data(CEPH_OSD_OP_OMAPSETVALS, 0, bl.length(), bl);
}
void omap_set(const boost::container::flat_map<std::string, ceph::buffer::list>& map) {
ceph::buffer::list bl;
encode(map, bl);
add_data(CEPH_OSD_OP_OMAPSETVALS, 0, bl.length(), bl);
}
void omap_set_header(ceph::buffer::list &bl) {
add_data(CEPH_OSD_OP_OMAPSETHEADER, 0, bl.length(), bl);
}
void omap_clear() {
add_op(CEPH_OSD_OP_OMAPCLEAR);
}
void omap_rm_keys(const std::set<std::string> &to_remove) {
using ceph::encode;
ceph::buffer::list bl;
encode(to_remove, bl);
add_data(CEPH_OSD_OP_OMAPRMKEYS, 0, bl.length(), bl);
}
void omap_rm_keys(const boost::container::flat_set<std::string>& to_remove) {
ceph::buffer::list bl;
encode(to_remove, bl);
add_data(CEPH_OSD_OP_OMAPRMKEYS, 0, bl.length(), bl);
}
void omap_rm_range(std::string_view key_begin, std::string_view key_end) {
ceph::buffer::list bl;
using ceph::encode;
encode(key_begin, bl);
encode(key_end, bl);
add_data(CEPH_OSD_OP_OMAPRMKEYRANGE, 0, bl.length(), bl);
}
// object classes
void call(const char *cname, const char *method, ceph::buffer::list &indata) {
add_call(CEPH_OSD_OP_CALL, cname, method, indata, NULL, NULL, NULL);
}
void call(const char *cname, const char *method, ceph::buffer::list &indata,
ceph::buffer::list *outdata, Context *ctx, int *prval) {
add_call(CEPH_OSD_OP_CALL, cname, method, indata, outdata, ctx, prval);
}
void call(std::string_view cname, std::string_view method,
const ceph::buffer::list& indata, boost::system::error_code* ec) {
add_call(CEPH_OSD_OP_CALL, cname, method, indata, NULL, NULL, NULL);
out_ec.back() = ec;
}
void call(std::string_view cname, std::string_view method, const ceph::buffer::list& indata,
boost::system::error_code* ec, ceph::buffer::list *outdata) {
add_call(CEPH_OSD_OP_CALL, cname, method, indata, outdata, nullptr, nullptr);
out_ec.back() = ec;
}
void call(std::string_view cname, std::string_view method,
const ceph::buffer::list& indata,
fu2::unique_function<void (boost::system::error_code,
const ceph::buffer::list&) &&> f) {
add_call(CEPH_OSD_OP_CALL, cname, method, indata, std::move(f));
}
void call(std::string_view cname, std::string_view method,
const ceph::buffer::list& indata,
fu2::unique_function<void (boost::system::error_code, int,
const ceph::buffer::list&) &&> f) {
add_call(CEPH_OSD_OP_CALL, cname, method, indata, std::move(f));
}
// watch/notify
void watch(uint64_t cookie, __u8 op, uint32_t timeout = 0) {
OSDOp& osd_op = add_op(CEPH_OSD_OP_WATCH);
osd_op.op.watch.cookie = cookie;
osd_op.op.watch.op = op;
osd_op.op.watch.timeout = timeout;
}
void notify(uint64_t cookie, uint32_t prot_ver, uint32_t timeout,
ceph::buffer::list &bl, ceph::buffer::list *inbl) {
using ceph::encode;
OSDOp& osd_op = add_op(CEPH_OSD_OP_NOTIFY);
osd_op.op.notify.cookie = cookie;
encode(prot_ver, *inbl);
encode(timeout, *inbl);
encode(bl, *inbl);
osd_op.indata.append(*inbl);
}
void notify_ack(uint64_t notify_id, uint64_t cookie,
ceph::buffer::list& reply_bl) {
using ceph::encode;
OSDOp& osd_op = add_op(CEPH_OSD_OP_NOTIFY_ACK);
ceph::buffer::list bl;
encode(notify_id, bl);
encode(cookie, bl);
encode(reply_bl, bl);
osd_op.indata.append(bl);
}
void list_watchers(std::list<obj_watch_t> *out,
int *prval) {
add_op(CEPH_OSD_OP_LIST_WATCHERS);
if (prval || out) {
set_handler(CB_ObjectOperation_decodewatchers(out, prval, nullptr));
out_rval.back() = prval;
}
}
void list_watchers(std::vector<neorados::ObjWatcher>* out,
boost::system::error_code* ec) {
add_op(CEPH_OSD_OP_LIST_WATCHERS);
set_handler(CB_ObjectOperation_decodewatchersneo(out, nullptr, ec));
out_ec.back() = ec;
}
void list_snaps(librados::snap_set_t *out, int *prval,
boost::system::error_code* ec = nullptr) {
add_op(CEPH_OSD_OP_LIST_SNAPS);
if (prval || out || ec) {
set_handler(CB_ObjectOperation_decodesnaps(out, nullptr, prval, ec));
out_rval.back() = prval;
out_ec.back() = ec;
}
}
void list_snaps(neorados::SnapSet *out, int *prval,
boost::system::error_code* ec = nullptr) {
add_op(CEPH_OSD_OP_LIST_SNAPS);
if (prval || out || ec) {
set_handler(CB_ObjectOperation_decodesnaps(nullptr, out, prval, ec));
out_rval.back() = prval;
out_ec.back() = ec;
}
}
void assert_version(uint64_t ver) {
OSDOp& osd_op = add_op(CEPH_OSD_OP_ASSERT_VER);
osd_op.op.assert_ver.ver = ver;
}
void cmpxattr(const char *name, const ceph::buffer::list& val,
int op, int mode) {
add_xattr(CEPH_OSD_OP_CMPXATTR, name, val);
OSDOp& o = *ops.rbegin();
o.op.xattr.cmp_op = op;
o.op.xattr.cmp_mode = mode;
}
void rollback(uint64_t snapid) {
OSDOp& osd_op = add_op(CEPH_OSD_OP_ROLLBACK);
osd_op.op.snap.snapid = snapid;
}
void copy_from(object_t src, snapid_t snapid, object_locator_t src_oloc,
version_t src_version, unsigned flags,
unsigned src_fadvise_flags) {
using ceph::encode;
OSDOp& osd_op = add_op(CEPH_OSD_OP_COPY_FROM);
osd_op.op.copy_from.snapid = snapid;
osd_op.op.copy_from.src_version = src_version;
osd_op.op.copy_from.flags = flags;
osd_op.op.copy_from.src_fadvise_flags = src_fadvise_flags;
encode(src, osd_op.indata);
encode(src_oloc, osd_op.indata);
}
void copy_from2(object_t src, snapid_t snapid, object_locator_t src_oloc,
version_t src_version, unsigned flags,
uint32_t truncate_seq, uint64_t truncate_size,
unsigned src_fadvise_flags) {
using ceph::encode;
OSDOp& osd_op = add_op(CEPH_OSD_OP_COPY_FROM2);
osd_op.op.copy_from.snapid = snapid;
osd_op.op.copy_from.src_version = src_version;
osd_op.op.copy_from.flags = flags;
osd_op.op.copy_from.src_fadvise_flags = src_fadvise_flags;
encode(src, osd_op.indata);
encode(src_oloc, osd_op.indata);
encode(truncate_seq, osd_op.indata);
encode(truncate_size, osd_op.indata);
}
/**
* writeback content to backing tier
*
* If object is marked dirty in the cache tier, write back content
* to backing tier. If the object is clean this is a no-op.
*
* If writeback races with an update, the update will block.
*
* use with IGNORE_CACHE to avoid triggering promote.
*/
void cache_flush() {
add_op(CEPH_OSD_OP_CACHE_FLUSH);
}
/**
* writeback content to backing tier
*
* If object is marked dirty in the cache tier, write back content
* to backing tier. If the object is clean this is a no-op.
*
* If writeback races with an update, return EAGAIN. Requires that
* the SKIPRWLOCKS flag be set.
*
* use with IGNORE_CACHE to avoid triggering promote.
*/
void cache_try_flush() {
add_op(CEPH_OSD_OP_CACHE_TRY_FLUSH);
}
/**
* evict object from cache tier
*
* If object is marked clean, remove the object from the cache tier.
* Otherwise, return EBUSY.
*
* use with IGNORE_CACHE to avoid triggering promote.
*/
void cache_evict() {
add_op(CEPH_OSD_OP_CACHE_EVICT);
}
/*
* Extensible tier
*/
void set_redirect(object_t tgt, snapid_t snapid, object_locator_t tgt_oloc,
version_t tgt_version, int flag) {
using ceph::encode;
OSDOp& osd_op = add_op(CEPH_OSD_OP_SET_REDIRECT);
osd_op.op.copy_from.snapid = snapid;
osd_op.op.copy_from.src_version = tgt_version;
encode(tgt, osd_op.indata);
encode(tgt_oloc, osd_op.indata);
set_last_op_flags(flag);
}
void set_chunk(uint64_t src_offset, uint64_t src_length, object_locator_t tgt_oloc,
object_t tgt_oid, uint64_t tgt_offset, int flag) {
using ceph::encode;
OSDOp& osd_op = add_op(CEPH_OSD_OP_SET_CHUNK);
encode(src_offset, osd_op.indata);
encode(src_length, osd_op.indata);
encode(tgt_oloc, osd_op.indata);
encode(tgt_oid, osd_op.indata);
encode(tgt_offset, osd_op.indata);
set_last_op_flags(flag);
}
void tier_promote() {
add_op(CEPH_OSD_OP_TIER_PROMOTE);
}
void unset_manifest() {
add_op(CEPH_OSD_OP_UNSET_MANIFEST);
}
void tier_flush() {
add_op(CEPH_OSD_OP_TIER_FLUSH);
}
void tier_evict() {
add_op(CEPH_OSD_OP_TIER_EVICT);
}
void set_alloc_hint(uint64_t expected_object_size,
uint64_t expected_write_size,
uint32_t flags) {
add_alloc_hint(CEPH_OSD_OP_SETALLOCHINT, expected_object_size,
expected_write_size, flags);
// CEPH_OSD_OP_SETALLOCHINT op is advisory and therefore deemed
// not worth a feature bit. Set FAILOK per-op flag to make
// sure older osds don't trip over an unsupported opcode.
set_last_op_flags(CEPH_OSD_OP_FLAG_FAILOK);
}
template<typename V>
void dup(V& sops) {
ops.clear();
std::copy(sops.begin(), sops.end(),
std::back_inserter(ops));
out_bl.resize(sops.size());
out_handler.resize(sops.size());
out_rval.resize(sops.size());
out_ec.resize(sops.size());
for (uint32_t i = 0; i < sops.size(); i++) {
out_bl[i] = &sops[i].outdata;
out_rval[i] = &sops[i].rval;
out_ec[i] = nullptr;
}
}
/**
* Pin/unpin an object in cache tier
*/
void cache_pin() {
add_op(CEPH_OSD_OP_CACHE_PIN);
}
void cache_unpin() {
add_op(CEPH_OSD_OP_CACHE_UNPIN);
}
};
inline std::ostream& operator <<(std::ostream& m, const ObjectOperation& oo) {
auto i = oo.ops.cbegin();
m << '[';
while (i != oo.ops.cend()) {
if (i != oo.ops.cbegin())
m << ' ';
m << *i;
++i;
}
m << ']';
return m;
}
// ----------------
class Objecter : public md_config_obs_t, public Dispatcher {
using MOSDOp = _mosdop::MOSDOp<osdc_opvec>;
public:
using OpSignature = void(boost::system::error_code);
using OpCompletion = ceph::async::Completion<OpSignature>;
// config observer bits
const char** get_tracked_conf_keys() const override;
void handle_conf_change(const ConfigProxy& conf,
const std::set <std::string> &changed) override;
public:
Messenger *messenger;
MonClient *monc;
boost::asio::io_context& service;
// The guaranteed sequenced, one-at-a-time execution and apparently
// people sometimes depend on this.
boost::asio::io_context::strand finish_strand{service};
ZTracer::Endpoint trace_endpoint{"0.0.0.0", 0, "Objecter"};
private:
std::unique_ptr<OSDMap> osdmap{std::make_unique<OSDMap>()};
public:
using Dispatcher::cct;
std::multimap<std::string,std::string> crush_location;
std::atomic<bool> initialized{false};
private:
std::atomic<uint64_t> last_tid{0};
std::atomic<unsigned> inflight_ops{0};
std::atomic<int> client_inc{-1};
uint64_t max_linger_id{0};
std::atomic<unsigned> num_in_flight{0};
std::atomic<int> global_op_flags{0}; // flags which are applied to each IO op
bool keep_balanced_budget = false;
bool honor_pool_full = true;
// If this is true, accumulate a set of blocklisted entities
// to be drained by consume_blocklist_events.
bool blocklist_events_enabled = false;
std::set<entity_addr_t> blocklist_events;
struct pg_mapping_t {
epoch_t epoch = 0;
std::vector<int> up;
int up_primary = -1;
std::vector<int> acting;
int acting_primary = -1;
pg_mapping_t() {}
pg_mapping_t(epoch_t epoch, const std::vector<int>& up, int up_primary,
const std::vector<int>& acting, int acting_primary)
: epoch(epoch), up(up), up_primary(up_primary),
acting(acting), acting_primary(acting_primary) {}
};
ceph::shared_mutex pg_mapping_lock =
ceph::make_shared_mutex("Objecter::pg_mapping_lock");
// pool -> pg mapping
std::map<int64_t, std::vector<pg_mapping_t>> pg_mappings;
// convenient accessors
bool lookup_pg_mapping(const pg_t& pg, epoch_t epoch, std::vector<int> *up,
int *up_primary, std::vector<int> *acting,
int *acting_primary) {
std::shared_lock l{pg_mapping_lock};
auto it = pg_mappings.find(pg.pool());
if (it == pg_mappings.end())
return false;
auto& mapping_array = it->second;
if (pg.ps() >= mapping_array.size())
return false;
if (mapping_array[pg.ps()].epoch != epoch) // stale
return false;
auto& pg_mapping = mapping_array[pg.ps()];
*up = pg_mapping.up;
*up_primary = pg_mapping.up_primary;
*acting = pg_mapping.acting;
*acting_primary = pg_mapping.acting_primary;
return true;
}
void update_pg_mapping(const pg_t& pg, pg_mapping_t&& pg_mapping) {
std::lock_guard l{pg_mapping_lock};
auto& mapping_array = pg_mappings[pg.pool()];
ceph_assert(pg.ps() < mapping_array.size());
mapping_array[pg.ps()] = std::move(pg_mapping);
}
void prune_pg_mapping(const mempool::osdmap::map<int64_t,pg_pool_t>& pools) {
std::lock_guard l{pg_mapping_lock};
for (auto& pool : pools) {
auto& mapping_array = pg_mappings[pool.first];
size_t pg_num = pool.second.get_pg_num();
if (mapping_array.size() != pg_num) {
// catch both pg_num increasing & decreasing
mapping_array.resize(pg_num);
}
}
for (auto it = pg_mappings.begin(); it != pg_mappings.end(); ) {
if (!pools.count(it->first)) {
// pool is gone
pg_mappings.erase(it++);
continue;
}
it++;
}
}
public:
void maybe_request_map();
void enable_blocklist_events();
private:
void _maybe_request_map();
version_t last_seen_osdmap_version = 0;
version_t last_seen_pgmap_version = 0;
mutable ceph::shared_mutex rwlock =
ceph::make_shared_mutex("Objecter::rwlock");
ceph::timer<ceph::coarse_mono_clock> timer;
PerfCounters* logger = nullptr;
uint64_t tick_event = 0;
void start_tick();
void tick();
void update_crush_location();
class RequestStateHook;
RequestStateHook *m_request_state_hook = nullptr;
public:
/*** track pending operations ***/
// read
struct OSDSession;
struct op_target_t {
int flags = 0;
epoch_t epoch = 0; ///< latest epoch we calculated the mapping
object_t base_oid;
object_locator_t base_oloc;
object_t target_oid;
object_locator_t target_oloc;
///< true if we are directed at base_pgid, not base_oid
bool precalc_pgid = false;
///< true if we have ever mapped to a valid pool
bool pool_ever_existed = false;
///< explcit pg target, if any
pg_t base_pgid;
pg_t pgid; ///< last (raw) pg we mapped to
spg_t actual_pgid; ///< last (actual) spg_t we mapped to
unsigned pg_num = 0; ///< last pg_num we mapped to
unsigned pg_num_mask = 0; ///< last pg_num_mask we mapped to
unsigned pg_num_pending = 0; ///< last pg_num we mapped to
std::vector<int> up; ///< set of up osds for last pg we mapped to
std::vector<int> acting; ///< set of acting osds for last pg we mapped to
int up_primary = -1; ///< last up_primary we mapped to
int acting_primary = -1; ///< last acting_primary we mapped to
int size = -1; ///< the size of the pool when were were last mapped
int min_size = -1; ///< the min size of the pool when were were last mapped
bool sort_bitwise = false; ///< whether the hobject_t sort order is bitwise
bool recovery_deletes = false; ///< whether the deletes are performed during recovery instead of peering
uint32_t peering_crush_bucket_count = 0;
uint32_t peering_crush_bucket_target = 0;
uint32_t peering_crush_bucket_barrier = 0;
int32_t peering_crush_mandatory_member = CRUSH_ITEM_NONE;
bool used_replica = false;
bool paused = false;
int osd = -1; ///< the final target osd, or -1
epoch_t last_force_resend = 0;
op_target_t(const object_t& oid, const object_locator_t& oloc, int flags)
: flags(flags),
base_oid(oid),
base_oloc(oloc)
{}
explicit op_target_t(pg_t pgid)
: base_oloc(pgid.pool(), pgid.ps()),
precalc_pgid(true),
base_pgid(pgid)
{}
op_target_t() = default;
hobject_t get_hobj() {
return hobject_t(target_oid,
target_oloc.key,
CEPH_NOSNAP,
target_oloc.hash >= 0 ? target_oloc.hash : pgid.ps(),
target_oloc.pool,
target_oloc.nspace);
}
bool contained_by(const hobject_t& begin, const hobject_t& end) {
hobject_t h = get_hobj();
int r = cmp(h, begin);
return r == 0 || (r > 0 && h < end);
}
bool respects_full() const {
return
(flags & (CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_RWORDERED)) &&
!(flags & (CEPH_OSD_FLAG_FULL_TRY | CEPH_OSD_FLAG_FULL_FORCE));
}
void dump(ceph::Formatter *f) const;
};
std::unique_ptr<ceph::async::Completion<void(boost::system::error_code)>>
OpContextVert(Context* c) {
if (c)
return ceph::async::Completion<void(boost::system::error_code)>::create(
service.get_executor(),
[c = std::unique_ptr<Context>(c)]
(boost::system::error_code e) mutable {
c.release()->complete(e);
});
else
return nullptr;
}
template<typename T>
std::unique_ptr<ceph::async::Completion<void(boost::system::error_code, T)>>
OpContextVert(Context* c, T* p) {
if (c || p)
return
ceph::async::Completion<void(boost::system::error_code, T)>::create(
service.get_executor(),
[c = std::unique_ptr<Context>(c), p]
(boost::system::error_code e, T r) mutable {
if (p)
*p = std::move(r);
if (c)
c.release()->complete(ceph::from_error_code(e));
});
else
return nullptr;
}
template<typename T>
std::unique_ptr<ceph::async::Completion<void(boost::system::error_code, T)>>
OpContextVert(Context* c, T& p) {
if (c)
return ceph::async::Completion<
void(boost::system::error_code, T)>::create(
service.get_executor(),
[c = std::unique_ptr<Context>(c), &p]
(boost::system::error_code e, T r) mutable {
p = std::move(r);
if (c)
c.release()->complete(ceph::from_error_code(e));
});
else
return nullptr;
}
struct Op : public RefCountedObject {
OSDSession *session = nullptr;
int incarnation = 0;
op_target_t target;
ConnectionRef con = nullptr; // for rx buffer only
uint64_t features = CEPH_FEATURES_SUPPORTED_DEFAULT; // explicitly specified op features
osdc_opvec ops;
snapid_t snapid = CEPH_NOSNAP;
SnapContext snapc;
ceph::real_time mtime;
ceph::buffer::list *outbl = nullptr;
boost::container::small_vector<ceph::buffer::list*, osdc_opvec_len> out_bl;
boost::container::small_vector<
fu2::unique_function<void(boost::system::error_code, int,
const ceph::buffer::list& bl) &&>,
osdc_opvec_len> out_handler;
boost::container::small_vector<int*, osdc_opvec_len> out_rval;
boost::container::small_vector<boost::system::error_code*,
osdc_opvec_len> out_ec;
int priority = 0;
using OpSig = void(boost::system::error_code);
using OpComp = ceph::async::Completion<OpSig>;
// Due to an irregularity of cmpxattr, we actualy need the 'int'
// value for onfinish for legacy librados users. As such just
// preserve the Context* in this one case. That way we can have
// our callers just pass in a unique_ptr<OpComp> and not deal with
// our signature in Objecter being different than the exposed
// signature in RADOS.
//
// Add a function for the linger case, where we want better
// semantics than Context, but still need to be under the completion_lock.
std::variant<std::unique_ptr<OpComp>, fu2::unique_function<OpSig>,
Context*> onfinish;
uint64_t ontimeout = 0;
ceph_tid_t tid = 0;
int attempts = 0;
version_t *objver;
epoch_t *reply_epoch = nullptr;
ceph::coarse_mono_time stamp;
epoch_t map_dne_bound = 0;
int budget = -1;
/// true if we should resend this message on failure
bool should_resend = true;
/// true if the throttle budget is get/put on a series of OPs,
/// instead of per OP basis, when this flag is set, the budget is
/// acquired before sending the very first OP of the series and
/// released upon receiving the last OP reply.
bool ctx_budgeted = false;
int *data_offset;
osd_reqid_t reqid; // explicitly setting reqid
ZTracer::Trace trace;
static bool has_completion(decltype(onfinish)& f) {
return std::visit([](auto&& arg) { return bool(arg);}, f);
}
bool has_completion() {
return has_completion(onfinish);
}
static void complete(decltype(onfinish)&& f, boost::system::error_code ec,
int r) {
std::visit([ec, r](auto&& arg) {
if constexpr (std::is_same_v<std::decay_t<decltype(arg)>,
Context*>) {
arg->complete(r);
} else if constexpr (std::is_same_v<std::decay_t<decltype(arg)>,
fu2::unique_function<OpSig>>) {
std::move(arg)(ec);
} else {
arg->defer(std::move(arg), ec);
}
}, std::move(f));
}
void complete(boost::system::error_code ec, int r) {
complete(std::move(onfinish), ec, r);
}
Op(const object_t& o, const object_locator_t& ol, osdc_opvec&& _ops,
int f, std::unique_ptr<OpComp>&& fin,
version_t *ov, int *offset = nullptr,
ZTracer::Trace *parent_trace = nullptr) :
target(o, ol, f),
ops(std::move(_ops)),
out_bl(ops.size(), nullptr),
out_handler(ops.size()),
out_rval(ops.size(), nullptr),
out_ec(ops.size(), nullptr),
onfinish(std::move(fin)),
objver(ov),
data_offset(offset) {
if (target.base_oloc.key == o)
target.base_oloc.key.clear();
if (parent_trace && parent_trace->valid()) {
trace.init("op", nullptr, parent_trace);
trace.event("start");
}
}
Op(const object_t& o, const object_locator_t& ol, osdc_opvec&& _ops,
int f, Context* fin, version_t *ov, int *offset = nullptr,
ZTracer::Trace *parent_trace = nullptr) :
target(o, ol, f),
ops(std::move(_ops)),
out_bl(ops.size(), nullptr),
out_handler(ops.size()),
out_rval(ops.size(), nullptr),
out_ec(ops.size(), nullptr),
onfinish(fin),
objver(ov),
data_offset(offset) {
if (target.base_oloc.key == o)
target.base_oloc.key.clear();
if (parent_trace && parent_trace->valid()) {
trace.init("op", nullptr, parent_trace);
trace.event("start");
}
}
Op(const object_t& o, const object_locator_t& ol, osdc_opvec&& _ops,
int f, fu2::unique_function<OpSig>&& fin, version_t *ov, int *offset = nullptr,
ZTracer::Trace *parent_trace = nullptr) :
target(o, ol, f),
ops(std::move(_ops)),
out_bl(ops.size(), nullptr),
out_handler(ops.size()),
out_rval(ops.size(), nullptr),
out_ec(ops.size(), nullptr),
onfinish(std::move(fin)),
objver(ov),
data_offset(offset) {
if (target.base_oloc.key == o)
target.base_oloc.key.clear();
if (parent_trace && parent_trace->valid()) {
trace.init("op", nullptr, parent_trace);
trace.event("start");
}
}
bool operator<(const Op& other) const {
return tid < other.tid;
}
private:
~Op() override {
trace.event("finish");
}
};
struct CB_Op_Map_Latest {
Objecter *objecter;
ceph_tid_t tid;
CB_Op_Map_Latest(Objecter *o, ceph_tid_t t) : objecter(o), tid(t) {}
void operator()(boost::system::error_code err, version_t latest, version_t);
};
struct CB_Command_Map_Latest {
Objecter *objecter;
uint64_t tid;
CB_Command_Map_Latest(Objecter *o, ceph_tid_t t) : objecter(o), tid(t) {}
void operator()(boost::system::error_code err, version_t latest, version_t);
};
struct C_Stat : public Context {
ceph::buffer::list bl;
uint64_t *psize;
ceph::real_time *pmtime;
Context *fin;
C_Stat(uint64_t *ps, ceph::real_time *pm, Context *c) :
psize(ps), pmtime(pm), fin(c) {}
void finish(int r) override {
using ceph::decode;
if (r >= 0) {
auto p = bl.cbegin();
uint64_t s;
ceph::real_time m;
decode(s, p);
decode(m, p);
if (psize)
*psize = s;
if (pmtime)
*pmtime = m;
}
fin->complete(r);
}
};
struct C_GetAttrs : public Context {
ceph::buffer::list bl;
std::map<std::string,ceph::buffer::list>& attrset;
Context *fin;
C_GetAttrs(std::map<std::string, ceph::buffer::list>& set, Context *c) : attrset(set),
fin(c) {}
void finish(int r) override {
using ceph::decode;
if (r >= 0) {
auto p = bl.cbegin();
decode(attrset, p);
}
fin->complete(r);
}
};
// Pools and statistics
struct NListContext {
collection_list_handle_t pos;
// these are for !sortbitwise compat only
int current_pg = 0;
int starting_pg_num = 0;
bool sort_bitwise = false;
bool at_end_of_pool = false; ///< publicly visible end flag
int64_t pool_id = -1;
int pool_snap_seq = 0;
uint64_t max_entries = 0;
std::string nspace;
ceph::buffer::list bl; // raw data read to here
std::list<librados::ListObjectImpl> list;
ceph::buffer::list filter;
// The budget associated with this context, once it is set (>= 0),
// the budget is not get/released on OP basis, instead the budget
// is acquired before sending the first OP and released upon receiving
// the last op reply.
int ctx_budget = -1;
bool at_end() const {
return at_end_of_pool;
}
uint32_t get_pg_hash_position() const {
return pos.get_hash();
}
};
struct C_NList : public Context {
NListContext *list_context;
Context *final_finish;
Objecter *objecter;
epoch_t epoch;
C_NList(NListContext *lc, Context * finish, Objecter *ob) :
list_context(lc), final_finish(finish), objecter(ob), epoch(0) {}
void finish(int r) override {
if (r >= 0) {
objecter->_nlist_reply(list_context, r, final_finish, epoch);
} else {
final_finish->complete(r);
}
}
};
struct PoolStatOp {
ceph_tid_t tid;
std::vector<std::string> pools;
using OpSig = void(boost::system::error_code,
boost::container::flat_map<std::string, pool_stat_t>,
bool);
using OpComp = ceph::async::Completion<OpSig>;
std::unique_ptr<OpComp> onfinish;
std::uint64_t ontimeout;
ceph::coarse_mono_time last_submit;
};
struct StatfsOp {
ceph_tid_t tid;
std::optional<int64_t> data_pool;
using OpSig = void(boost::system::error_code,
const struct ceph_statfs);
using OpComp = ceph::async::Completion<OpSig>;
std::unique_ptr<OpComp> onfinish;
uint64_t ontimeout;
ceph::coarse_mono_time last_submit;
};
struct PoolOp {
ceph_tid_t tid = 0;
int64_t pool = 0;
std::string name;
using OpSig = void(boost::system::error_code, ceph::buffer::list);
using OpComp = ceph::async::Completion<OpSig>;
std::unique_ptr<OpComp> onfinish;
uint64_t ontimeout = 0;
int pool_op = 0;
int16_t crush_rule = 0;
snapid_t snapid = 0;
ceph::coarse_mono_time last_submit;
PoolOp() {}
};
// -- osd commands --
struct CommandOp : public RefCountedObject {
OSDSession *session = nullptr;
ceph_tid_t tid = 0;
std::vector<std::string> cmd;
ceph::buffer::list inbl;
// target_osd == -1 means target_pg is valid
const int target_osd = -1;
const pg_t target_pg;
op_target_t target;
epoch_t map_dne_bound = 0;
int map_check_error = 0; // error to return if std::map check fails
const char *map_check_error_str = nullptr;
using OpSig = void(boost::system::error_code, std::string,
ceph::buffer::list);
using OpComp = ceph::async::Completion<OpSig>;
std::unique_ptr<OpComp> onfinish;
uint64_t ontimeout = 0;
ceph::coarse_mono_time last_submit;
CommandOp(
int target_osd,
std::vector<std::string>&& cmd,
ceph::buffer::list&& inbl,
decltype(onfinish)&& onfinish)
: cmd(std::move(cmd)),
inbl(std::move(inbl)),
target_osd(target_osd),
onfinish(std::move(onfinish)) {}
CommandOp(
pg_t pgid,
std::vector<std::string>&& cmd,
ceph::buffer::list&& inbl,
decltype(onfinish)&& onfinish)
: cmd(std::move(cmd)),
inbl(std::move(inbl)),
target_pg(pgid),
target(pgid),
onfinish(std::move(onfinish)) {}
};
void submit_command(CommandOp *c, ceph_tid_t *ptid);
int _calc_command_target(CommandOp *c,
ceph::shunique_lock<ceph::shared_mutex> &sul);
void _assign_command_session(CommandOp *c,
ceph::shunique_lock<ceph::shared_mutex> &sul);
void _send_command(CommandOp *c);
int command_op_cancel(OSDSession *s, ceph_tid_t tid,
boost::system::error_code ec);
void _finish_command(CommandOp *c, boost::system::error_code ec,
std::string&& rs, ceph::buffer::list&& bl);
void handle_command_reply(MCommandReply *m);
// -- lingering ops --
struct LingerOp : public RefCountedObject {
Objecter *objecter;
uint64_t linger_id{0};
op_target_t target{object_t(), object_locator_t(), 0};
snapid_t snap{CEPH_NOSNAP};
SnapContext snapc;
ceph::real_time mtime;
osdc_opvec ops;
ceph::buffer::list inbl;
version_t *pobjver{nullptr};
bool is_watch{false};
ceph::coarse_mono_time watch_valid_thru; ///< send time for last acked ping
boost::system::error_code last_error; ///< error from last failed ping|reconnect, if any
ceph::shared_mutex watch_lock;
// queue of pending async operations, with the timestamp of
// when they were queued.
std::list<ceph::coarse_mono_time> watch_pending_async;
uint32_t register_gen{0};
bool registered{false};
bool canceled{false};
using OpSig = void(boost::system::error_code, ceph::buffer::list);
using OpComp = ceph::async::Completion<OpSig>;
std::unique_ptr<OpComp> on_reg_commit;
std::unique_ptr<OpComp> on_notify_finish;
uint64_t notify_id{0};
fu2::unique_function<void(boost::system::error_code,
uint64_t notify_id,
uint64_t cookie,
uint64_t notifier_id,
ceph::buffer::list&& bl)> handle;
OSDSession *session{nullptr};
int ctx_budget{-1};
ceph_tid_t register_tid{0};
ceph_tid_t ping_tid{0};
epoch_t map_dne_bound{0};
void _queued_async() {
// watch_lock ust be locked unique
watch_pending_async.push_back(ceph::coarse_mono_clock::now());
}
void finished_async() {
std::unique_lock l(watch_lock);
ceph_assert(!watch_pending_async.empty());
watch_pending_async.pop_front();
}
LingerOp(Objecter *o, uint64_t linger_id);
const LingerOp& operator=(const LingerOp& r) = delete;
LingerOp(const LingerOp& o) = delete;
uint64_t get_cookie() {
return reinterpret_cast<uint64_t>(this);
}
};
struct CB_Linger_Commit {
Objecter *objecter;
boost::intrusive_ptr<LingerOp> info;
ceph::buffer::list outbl; // used for notify only
CB_Linger_Commit(Objecter *o, LingerOp *l) : objecter(o), info(l) {}
~CB_Linger_Commit() = default;
void operator()(boost::system::error_code ec) {
objecter->_linger_commit(info.get(), ec, outbl);
}
};
struct CB_Linger_Reconnect {
Objecter *objecter;
boost::intrusive_ptr<LingerOp> info;
CB_Linger_Reconnect(Objecter *o, LingerOp *l) : objecter(o), info(l) {}
~CB_Linger_Reconnect() = default;
void operator()(boost::system::error_code ec) {
objecter->_linger_reconnect(info.get(), ec);
info.reset();
}
};
struct CB_Linger_Ping {
Objecter *objecter;
boost::intrusive_ptr<LingerOp> info;
ceph::coarse_mono_time sent;
uint32_t register_gen;
CB_Linger_Ping(Objecter *o, LingerOp *l, ceph::coarse_mono_time s)
: objecter(o), info(l), sent(s), register_gen(info->register_gen) {}
void operator()(boost::system::error_code ec) {
objecter->_linger_ping(info.get(), ec, sent, register_gen);
info.reset();
}
};
struct CB_Linger_Map_Latest {
Objecter *objecter;
uint64_t linger_id;
CB_Linger_Map_Latest(Objecter *o, uint64_t id) : objecter(o), linger_id(id) {}
void operator()(boost::system::error_code err, version_t latest, version_t);
};
// -- osd sessions --
struct OSDBackoff {
spg_t pgid;
uint64_t id;
hobject_t begin, end;
};
struct OSDSession : public RefCountedObject {
// pending ops
std::map<ceph_tid_t,Op*> ops;
std::map<uint64_t, LingerOp*> linger_ops;
std::map<ceph_tid_t,CommandOp*> command_ops;
// backoffs
std::map<spg_t,std::map<hobject_t,OSDBackoff>> backoffs;
std::map<uint64_t,OSDBackoff*> backoffs_by_id;
int osd;
// NB locking two sessions at the same time is only safe because
// it is only done in _recalc_linger_op_target with s and
// linger_op->session, and it holds rwlock for write. We disable
// lockdep (using std::sharedMutex) because lockdep doesn't know
// that.
std::shared_mutex lock;
int incarnation;
ConnectionRef con;
int num_locks;
std::unique_ptr<std::mutex[]> completion_locks;
OSDSession(CephContext *cct, int o) :
osd(o), incarnation(0), con(NULL),
num_locks(cct->_conf->objecter_completion_locks_per_session),
completion_locks(new std::mutex[num_locks]) {}
~OSDSession() override;
bool is_homeless() { return (osd == -1); }
std::unique_lock<std::mutex> get_lock(object_t& oid);
};
std::map<int,OSDSession*> osd_sessions;
bool osdmap_full_flag() const;
bool osdmap_pool_full(const int64_t pool_id) const;
private:
/**
* Test pg_pool_t::FLAG_FULL on a pool
*
* @return true if the pool exists and has the flag set, or
* the global full flag is set, else false
*/
bool _osdmap_pool_full(const int64_t pool_id) const;
bool _osdmap_pool_full(const pg_pool_t &p) const {
return p.has_flag(pg_pool_t::FLAG_FULL) && honor_pool_full;
}
void update_pool_full_map(std::map<int64_t, bool>& pool_full_map);
std::map<uint64_t, LingerOp*> linger_ops;
// we use this just to confirm a cookie is valid before dereferencing the ptr
std::set<LingerOp*> linger_ops_set;
std::map<ceph_tid_t,PoolStatOp*> poolstat_ops;
std::map<ceph_tid_t,StatfsOp*> statfs_ops;
std::map<ceph_tid_t,PoolOp*> pool_ops;
std::atomic<unsigned> num_homeless_ops{0};
OSDSession* homeless_session = new OSDSession(cct, -1);
// ops waiting for an osdmap with a new pool or confirmation that
// the pool does not exist (may be expanded to other uses later)
std::map<uint64_t, LingerOp*> check_latest_map_lingers;
std::map<ceph_tid_t, Op*> check_latest_map_ops;
std::map<ceph_tid_t, CommandOp*> check_latest_map_commands;
std::map<epoch_t,
std::vector<std::pair<std::unique_ptr<OpCompletion>,
boost::system::error_code>>> waiting_for_map;
ceph::timespan mon_timeout;
ceph::timespan osd_timeout;
MOSDOp *_prepare_osd_op(Op *op);
void _send_op(Op *op);
void _send_op_account(Op *op);
void _cancel_linger_op(Op *op);
void _finish_op(Op *op, int r);
static bool is_pg_changed(
int oldprimary,
const std::vector<int>& oldacting,
int newprimary,
const std::vector<int>& newacting,
bool any_change=false);
enum recalc_op_target_result {
RECALC_OP_TARGET_NO_ACTION = 0,
RECALC_OP_TARGET_NEED_RESEND,
RECALC_OP_TARGET_POOL_DNE,
RECALC_OP_TARGET_OSD_DNE,
RECALC_OP_TARGET_OSD_DOWN,
RECALC_OP_TARGET_POOL_EIO,
};
bool _osdmap_full_flag() const;
bool _osdmap_has_pool_full() const;
void _prune_snapc(
const mempool::osdmap::map<int64_t, snap_interval_set_t>& new_removed_snaps,
Op *op);
bool target_should_be_paused(op_target_t *op);
int _calc_target(op_target_t *t, Connection *con,
bool any_change = false);
int _map_session(op_target_t *op, OSDSession **s,
ceph::shunique_lock<ceph::shared_mutex>& lc);
void _session_op_assign(OSDSession *s, Op *op);
void _session_op_remove(OSDSession *s, Op *op);
void _session_linger_op_assign(OSDSession *to, LingerOp *op);
void _session_linger_op_remove(OSDSession *from, LingerOp *op);
void _session_command_op_assign(OSDSession *to, CommandOp *op);
void _session_command_op_remove(OSDSession *from, CommandOp *op);
int _assign_op_target_session(Op *op, ceph::shunique_lock<ceph::shared_mutex>& lc,
bool src_session_locked,
bool dst_session_locked);
int _recalc_linger_op_target(LingerOp *op,
ceph::shunique_lock<ceph::shared_mutex>& lc);
void _linger_submit(LingerOp *info,
ceph::shunique_lock<ceph::shared_mutex>& sul);
void _send_linger(LingerOp *info,
ceph::shunique_lock<ceph::shared_mutex>& sul);
void _linger_commit(LingerOp *info, boost::system::error_code ec,
ceph::buffer::list& outbl);
void _linger_reconnect(LingerOp *info, boost::system::error_code ec);
void _send_linger_ping(LingerOp *info);
void _linger_ping(LingerOp *info, boost::system::error_code ec,
ceph::coarse_mono_time sent, uint32_t register_gen);
boost::system::error_code _normalize_watch_error(boost::system::error_code ec);
friend class CB_Objecter_GetVersion;
friend class CB_DoWatchError;
public:
template<typename CT>
auto linger_callback_flush(CT&& ct) {
boost::asio::async_completion<CT, void(void)> init(ct);
boost::asio::defer(finish_strand, std::move(init.completion_handler));
return init.result.get();
}
private:
void _check_op_pool_dne(Op *op, std::unique_lock<std::shared_mutex> *sl);
void _check_op_pool_eio(Op *op, std::unique_lock<std::shared_mutex> *sl);
void _send_op_map_check(Op *op);
void _op_cancel_map_check(Op *op);
void _check_linger_pool_dne(LingerOp *op, bool *need_unregister);
void _check_linger_pool_eio(LingerOp *op);
void _send_linger_map_check(LingerOp *op);
void _linger_cancel_map_check(LingerOp *op);
void _check_command_map_dne(CommandOp *op);
void _send_command_map_check(CommandOp *op);
void _command_cancel_map_check(CommandOp *op);
void _kick_requests(OSDSession *session, std::map<uint64_t, LingerOp *>& lresend);
void _linger_ops_resend(std::map<uint64_t, LingerOp *>& lresend,
std::unique_lock<ceph::shared_mutex>& ul);
int _get_session(int osd, OSDSession **session,
ceph::shunique_lock<ceph::shared_mutex>& sul);
void put_session(OSDSession *s);
void get_session(OSDSession *s);
void _reopen_session(OSDSession *session);
void close_session(OSDSession *session);
void _nlist_reply(NListContext *list_context, int r, Context *final_finish,
epoch_t reply_epoch);
void resend_mon_ops();
/**
* handle a budget for in-flight ops
* budget is taken whenever an op goes into the ops std::map
* and returned whenever an op is removed from the std::map
* If throttle_op needs to throttle it will unlock client_lock.
*/
int calc_op_budget(const boost::container::small_vector_base<OSDOp>& ops);
void _throttle_op(Op *op, ceph::shunique_lock<ceph::shared_mutex>& sul,
int op_size = 0);
int _take_op_budget(Op *op, ceph::shunique_lock<ceph::shared_mutex>& sul) {
ceph_assert(sul && sul.mutex() == &rwlock);
int op_budget = calc_op_budget(op->ops);
if (keep_balanced_budget) {
_throttle_op(op, sul, op_budget);
} else { // update take_linger_budget to match this!
op_throttle_bytes.take(op_budget);
op_throttle_ops.take(1);
}
op->budget = op_budget;
return op_budget;
}
int take_linger_budget(LingerOp *info);
void put_op_budget_bytes(int op_budget) {
ceph_assert(op_budget >= 0);
op_throttle_bytes.put(op_budget);
op_throttle_ops.put(1);
}
void put_nlist_context_budget(NListContext *list_context);
Throttle op_throttle_bytes{cct, "objecter_bytes",
static_cast<int64_t>(
cct->_conf->objecter_inflight_op_bytes)};
Throttle op_throttle_ops{cct, "objecter_ops",
static_cast<int64_t>(
cct->_conf->objecter_inflight_ops)};
public:
Objecter(CephContext *cct, Messenger *m, MonClient *mc,
boost::asio::io_context& service);
~Objecter() override;
void init();
void start(const OSDMap *o = nullptr);
void shutdown();
// These two templates replace osdmap_(get)|(put)_read. Simply wrap
// whatever functionality you want to use the OSDMap in a lambda like:
//
// with_osdmap([](const OSDMap& o) { o.do_stuff(); });
//
// or
//
// auto t = with_osdmap([&](const OSDMap& o) { return o.lookup_stuff(x); });
//
// Do not call into something that will try to lock the OSDMap from
// here or you will have great woe and misery.
template<typename Callback, typename...Args>
decltype(auto) with_osdmap(Callback&& cb, Args&&... args) {
std::shared_lock l(rwlock);
return std::forward<Callback>(cb)(*osdmap, std::forward<Args>(args)...);
}
/**
* Tell the objecter to throttle outgoing ops according to its
* budget (in _conf). If you do this, ops can block, in
* which case it will unlock client_lock and sleep until
* incoming messages reduce the used budget low enough for
* the ops to continue going; then it will lock client_lock again.
*/
void set_balanced_budget() { keep_balanced_budget = true; }
void unset_balanced_budget() { keep_balanced_budget = false; }
void set_honor_pool_full() { honor_pool_full = true; }
void unset_honor_pool_full() { honor_pool_full = false; }
void _scan_requests(
OSDSession *s,
bool skipped_map,
bool cluster_full,
std::map<int64_t, bool> *pool_full_map,
std::map<ceph_tid_t, Op*>& need_resend,
std::list<LingerOp*>& need_resend_linger,
std::map<ceph_tid_t, CommandOp*>& need_resend_command,
ceph::shunique_lock<ceph::shared_mutex>& sul);
int64_t get_object_hash_position(int64_t pool, const std::string& key,
const std::string& ns);
int64_t get_object_pg_hash_position(int64_t pool, const std::string& key,
const std::string& ns);
// messages
public:
bool ms_dispatch(Message *m) override;
bool ms_can_fast_dispatch_any() const override {
return true;
}
bool ms_can_fast_dispatch(const Message *m) const override {
switch (m->get_type()) {
case CEPH_MSG_OSD_OPREPLY:
case CEPH_MSG_WATCH_NOTIFY:
return true;
default:
return false;
}
}
void ms_fast_dispatch(Message *m) override {
if (!ms_dispatch(m)) {
m->put();
}
}
void handle_osd_op_reply(class MOSDOpReply *m);
void handle_osd_backoff(class MOSDBackoff *m);
void handle_watch_notify(class MWatchNotify *m);
void handle_osd_map(class MOSDMap *m);
void wait_for_osd_map(epoch_t e=0);
template<typename CompletionToken>
auto wait_for_osd_map(CompletionToken&& token) {
boost::asio::async_completion<CompletionToken, void()> init(token);
std::unique_lock l(rwlock);
if (osdmap->get_epoch()) {
l.unlock();
boost::asio::post(std::move(init.completion_handler));
} else {
waiting_for_map[0].emplace_back(
OpCompletion::create(
service.get_executor(),
[c = std::move(init.completion_handler)]
(boost::system::error_code) mutable {
std::move(c)();
}), boost::system::error_code{});
l.unlock();
}
return init.result.get();
}
/**
* Get std::list of entities blocklisted since this was last called,
* and reset the std::list.
*
* Uses a std::set because typical use case is to compare some
* other std::list of clients to see which overlap with the blocklisted
* addrs.
*
*/
void consume_blocklist_events(std::set<entity_addr_t> *events);
int pool_snap_by_name(int64_t poolid,
const char *snap_name,
snapid_t *snap) const;
int pool_snap_get_info(int64_t poolid, snapid_t snap,
pool_snap_info_t *info) const;
int pool_snap_list(int64_t poolid, std::vector<uint64_t> *snaps);
private:
void emit_blocklist_events(const OSDMap::Incremental &inc);
void emit_blocklist_events(const OSDMap &old_osd_map,
const OSDMap &new_osd_map);
// low-level
void _op_submit(Op *op, ceph::shunique_lock<ceph::shared_mutex>& lc,
ceph_tid_t *ptid);
void _op_submit_with_budget(Op *op,
ceph::shunique_lock<ceph::shared_mutex>& lc,
ceph_tid_t *ptid,
int *ctx_budget = NULL);
// public interface
public:
void op_submit(Op *op, ceph_tid_t *ptid = NULL, int *ctx_budget = NULL);
bool is_active() {
std::shared_lock l(rwlock);
return !((!inflight_ops) && linger_ops.empty() &&
poolstat_ops.empty() && statfs_ops.empty());
}
/**
* Output in-flight requests
*/
void _dump_active(OSDSession *s);
void _dump_active();
void dump_active();
void dump_requests(ceph::Formatter *fmt);
void _dump_ops(const OSDSession *s, ceph::Formatter *fmt);
void dump_ops(ceph::Formatter *fmt);
void _dump_linger_ops(const OSDSession *s, ceph::Formatter *fmt);
void dump_linger_ops(ceph::Formatter *fmt);
void _dump_command_ops(const OSDSession *s, ceph::Formatter *fmt);
void dump_command_ops(ceph::Formatter *fmt);
void dump_pool_ops(ceph::Formatter *fmt) const;
void dump_pool_stat_ops(ceph::Formatter *fmt) const;
void dump_statfs_ops(ceph::Formatter *fmt) const;
int get_client_incarnation() const { return client_inc; }
void set_client_incarnation(int inc) { client_inc = inc; }
bool have_map(epoch_t epoch);
struct CB_Objecter_GetVersion {
Objecter *objecter;
std::unique_ptr<OpCompletion> fin;
CB_Objecter_GetVersion(Objecter *o, std::unique_ptr<OpCompletion> c)
: objecter(o), fin(std::move(c)) {}
void operator()(boost::system::error_code ec, version_t newest,
version_t oldest) {
if (ec == boost::system::errc::resource_unavailable_try_again) {
// try again as instructed
objecter->_wait_for_latest_osdmap(std::move(*this));
} else if (ec) {
ceph::async::post(std::move(fin), ec);
} else {
auto l = std::unique_lock(objecter->rwlock);
objecter->_get_latest_version(oldest, newest, std::move(fin),
std::move(l));
}
}
};
template<typename CompletionToken>
auto wait_for_map(epoch_t epoch, CompletionToken&& token) {
boost::asio::async_completion<CompletionToken, OpSignature> init(token);
if (osdmap->get_epoch() >= epoch) {
boost::asio::post(service,
ceph::async::bind_handler(
std::move(init.completion_handler),
boost::system::error_code()));
} else {
monc->get_version("osdmap",
CB_Objecter_GetVersion(
this,
OpCompletion::create(service.get_executor(),
std::move(init.completion_handler))));
}
return init.result.get();
}
void _wait_for_new_map(std::unique_ptr<OpCompletion>, epoch_t epoch,
boost::system::error_code = {});
private:
void _wait_for_latest_osdmap(CB_Objecter_GetVersion&& c) {
monc->get_version("osdmap", std::move(c));
}
public:
template<typename CompletionToken>
auto wait_for_latest_osdmap(CompletionToken&& token) {
boost::asio::async_completion<CompletionToken, OpSignature> init(token);
monc->get_version("osdmap",
CB_Objecter_GetVersion(
this,
OpCompletion::create(service.get_executor(),
std::move(init.completion_handler))));
return init.result.get();
}
void wait_for_latest_osdmap(std::unique_ptr<OpCompletion> c) {
monc->get_version("osdmap",
CB_Objecter_GetVersion(this, std::move(c)));
}
template<typename CompletionToken>
auto get_latest_version(epoch_t oldest, epoch_t newest,
CompletionToken&& token) {
boost::asio::async_completion<CompletionToken, OpSignature> init(token);
{
std::unique_lock wl(rwlock);
_get_latest_version(oldest, newest,
OpCompletion::create(
service.get_executor(),
std::move(init.completion_handler)),
std::move(wl));
}
return init.result.get();
}
void _get_latest_version(epoch_t oldest, epoch_t neweset,
std::unique_ptr<OpCompletion> fin,
std::unique_lock<ceph::shared_mutex>&& ul);
/** Get the current set of global op flags */
int get_global_op_flags() const { return global_op_flags; }
/** Add a flag to the global op flags, not really atomic operation */
void add_global_op_flags(int flag) {
global_op_flags.fetch_or(flag);
}
/** Clear the passed flags from the global op flag set */
void clear_global_op_flag(int flags) {
global_op_flags.fetch_and(~flags);
}
/// cancel an in-progress request with the given return code
private:
int op_cancel(OSDSession *s, ceph_tid_t tid, int r);
int _op_cancel(ceph_tid_t tid, int r);
public:
int op_cancel(ceph_tid_t tid, int r);
int op_cancel(const std::vector<ceph_tid_t>& tidls, int r);
/**
* Any write op which is in progress at the start of this call shall no
* longer be in progress when this call ends. Operations started after the
* start of this call may still be in progress when this call ends.
*
* @return the latest possible epoch in which a cancelled op could have
* existed, or -1 if nothing was cancelled.
*/
epoch_t op_cancel_writes(int r, int64_t pool=-1);
// commands
void osd_command(int osd, std::vector<std::string> cmd,
ceph::buffer::list inbl, ceph_tid_t *ptid,
decltype(CommandOp::onfinish)&& onfinish) {
ceph_assert(osd >= 0);
auto c = new CommandOp(
osd,
std::move(cmd),
std::move(inbl),
std::move(onfinish));
submit_command(c, ptid);
}
template<typename CompletionToken>
auto osd_command(int osd, std::vector<std::string> cmd,
ceph::buffer::list inbl, ceph_tid_t *ptid,
CompletionToken&& token) {
boost::asio::async_completion<CompletionToken,
CommandOp::OpSig> init(token);
osd_command(osd, std::move(cmd), std::move(inbl), ptid,
CommandOp::OpComp::create(service.get_executor(),
std::move(init.completion_handler)));
return init.result.get();
}
void pg_command(pg_t pgid, std::vector<std::string> cmd,
ceph::buffer::list inbl, ceph_tid_t *ptid,
decltype(CommandOp::onfinish)&& onfinish) {
auto *c = new CommandOp(
pgid,
std::move(cmd),
std::move(inbl),
std::move(onfinish));
submit_command(c, ptid);
}
template<typename CompletionToken>
auto pg_command(pg_t pgid, std::vector<std::string> cmd,
ceph::buffer::list inbl, ceph_tid_t *ptid,
CompletionToken&& token) {
boost::asio::async_completion<CompletionToken,
CommandOp::OpSig> init(token);
pg_command(pgid, std::move(cmd), std::move(inbl), ptid,
CommandOp::OpComp::create(service.get_executor(),
std::move(init.completion_handler)));
return init.result.get();
}
// mid-level helpers
Op *prepare_mutate_op(
const object_t& oid, const object_locator_t& oloc,
ObjectOperation& op, const SnapContext& snapc,
ceph::real_time mtime, int flags,
Context *oncommit, version_t *objver = NULL,
osd_reqid_t reqid = osd_reqid_t(),
ZTracer::Trace *parent_trace = nullptr) {
Op *o = new Op(oid, oloc, std::move(op.ops), flags | global_op_flags |
CEPH_OSD_FLAG_WRITE, oncommit, objver,
nullptr, parent_trace);
o->priority = op.priority;
o->mtime = mtime;
o->snapc = snapc;
o->out_rval.swap(op.out_rval);
o->out_bl.swap(op.out_bl);
o->out_handler.swap(op.out_handler);
o->out_ec.swap(op.out_ec);
o->reqid = reqid;
op.clear();
return o;
}
ceph_tid_t mutate(
const object_t& oid, const object_locator_t& oloc,
ObjectOperation& op, const SnapContext& snapc,
ceph::real_time mtime, int flags,
Context *oncommit, version_t *objver = NULL,
osd_reqid_t reqid = osd_reqid_t()) {
Op *o = prepare_mutate_op(oid, oloc, op, snapc, mtime, flags,
oncommit, objver, reqid);
ceph_tid_t tid;
op_submit(o, &tid);
return tid;
}
void mutate(const object_t& oid, const object_locator_t& oloc,
ObjectOperation&& op, const SnapContext& snapc,
ceph::real_time mtime, int flags,
std::unique_ptr<Op::OpComp>&& oncommit,
version_t *objver = NULL, osd_reqid_t reqid = osd_reqid_t(),
ZTracer::Trace *parent_trace = nullptr) {
Op *o = new Op(oid, oloc, std::move(op.ops), flags | global_op_flags |
CEPH_OSD_FLAG_WRITE, std::move(oncommit), objver,
nullptr, parent_trace);
o->priority = op.priority;
o->mtime = mtime;
o->snapc = snapc;
o->out_bl.swap(op.out_bl);
o->out_handler.swap(op.out_handler);
o->out_rval.swap(op.out_rval);
o->out_ec.swap(op.out_ec);
o->reqid = reqid;
op.clear();
op_submit(o);
}
Op *prepare_read_op(
const object_t& oid, const object_locator_t& oloc,
ObjectOperation& op,
snapid_t snapid, ceph::buffer::list *pbl, int flags,
Context *onack, version_t *objver = NULL,
int *data_offset = NULL,
uint64_t features = 0,
ZTracer::Trace *parent_trace = nullptr) {
Op *o = new Op(oid, oloc, std::move(op.ops), flags | global_op_flags |
CEPH_OSD_FLAG_READ, onack, objver,
data_offset, parent_trace);
o->priority = op.priority;
o->snapid = snapid;
o->outbl = pbl;
if (!o->outbl && op.size() == 1 && op.out_bl[0] && op.out_bl[0]->length())
o->outbl = op.out_bl[0];
o->out_bl.swap(op.out_bl);
o->out_handler.swap(op.out_handler);
o->out_rval.swap(op.out_rval);
o->out_ec.swap(op.out_ec);
op.clear();
return o;
}
ceph_tid_t read(
const object_t& oid, const object_locator_t& oloc,
ObjectOperation& op,
snapid_t snapid, ceph::buffer::list *pbl, int flags,
Context *onack, version_t *objver = NULL,
int *data_offset = NULL,
uint64_t features = 0) {
Op *o = prepare_read_op(oid, oloc, op, snapid, pbl, flags, onack, objver,
data_offset);
if (features)
o->features = features;
ceph_tid_t tid;
op_submit(o, &tid);
return tid;
}
void read(const object_t& oid, const object_locator_t& oloc,
ObjectOperation&& op, snapid_t snapid, ceph::buffer::list *pbl,
int flags, std::unique_ptr<Op::OpComp>&& onack,
version_t *objver = nullptr, int *data_offset = nullptr,
uint64_t features = 0, ZTracer::Trace *parent_trace = nullptr) {
Op *o = new Op(oid, oloc, std::move(op.ops), flags | global_op_flags |
CEPH_OSD_FLAG_READ, std::move(onack), objver,
data_offset, parent_trace);
o->priority = op.priority;
o->snapid = snapid;
o->outbl = pbl;
// XXX
if (!o->outbl && op.size() == 1 && op.out_bl[0] && op.out_bl[0]->length()) {
o->outbl = op.out_bl[0];
}
o->out_bl.swap(op.out_bl);
o->out_handler.swap(op.out_handler);
o->out_rval.swap(op.out_rval);
o->out_ec.swap(op.out_ec);
if (features)
o->features = features;
op.clear();
op_submit(o);
}
Op *prepare_pg_read_op(
uint32_t hash, object_locator_t oloc,
ObjectOperation& op, ceph::buffer::list *pbl, int flags,
Context *onack, epoch_t *reply_epoch,
int *ctx_budget) {
Op *o = new Op(object_t(), oloc,
std::move(op.ops),
flags | global_op_flags | CEPH_OSD_FLAG_READ |
CEPH_OSD_FLAG_IGNORE_OVERLAY,
onack, NULL);
o->target.precalc_pgid = true;
o->target.base_pgid = pg_t(hash, oloc.pool);
o->priority = op.priority;
o->snapid = CEPH_NOSNAP;
o->outbl = pbl;
o->out_bl.swap(op.out_bl);
o->out_handler.swap(op.out_handler);
o->out_rval.swap(op.out_rval);
o->out_ec.swap(op.out_ec);
o->reply_epoch = reply_epoch;
if (ctx_budget) {
// budget is tracked by listing context
o->ctx_budgeted = true;
}
op.clear();
return o;
}
ceph_tid_t pg_read(
uint32_t hash, object_locator_t oloc,
ObjectOperation& op, ceph::buffer::list *pbl, int flags,
Context *onack, epoch_t *reply_epoch,
int *ctx_budget) {
Op *o = prepare_pg_read_op(hash, oloc, op, pbl, flags,
onack, reply_epoch, ctx_budget);
ceph_tid_t tid;
op_submit(o, &tid, ctx_budget);
return tid;
}
ceph_tid_t pg_read(
uint32_t hash, object_locator_t oloc,
ObjectOperation& op, ceph::buffer::list *pbl, int flags,
std::unique_ptr<Op::OpComp>&& onack, epoch_t *reply_epoch, int *ctx_budget) {
ceph_tid_t tid;
Op *o = new Op(object_t(), oloc,
std::move(op.ops),
flags | global_op_flags | CEPH_OSD_FLAG_READ |
CEPH_OSD_FLAG_IGNORE_OVERLAY,
std::move(onack), nullptr);
o->target.precalc_pgid = true;
o->target.base_pgid = pg_t(hash, oloc.pool);
o->priority = op.priority;
o->snapid = CEPH_NOSNAP;
o->outbl = pbl;
o->out_bl.swap(op.out_bl);
o->out_handler.swap(op.out_handler);
o->out_rval.swap(op.out_rval);
o->out_ec.swap(op.out_ec);
o->reply_epoch = reply_epoch;
if (ctx_budget) {
// budget is tracked by listing context
o->ctx_budgeted = true;
}
op_submit(o, &tid, ctx_budget);
op.clear();
return tid;
}
// caller owns a ref
LingerOp *linger_register(const object_t& oid, const object_locator_t& oloc,
int flags);
ceph_tid_t linger_watch(LingerOp *info,
ObjectOperation& op,
const SnapContext& snapc, ceph::real_time mtime,
ceph::buffer::list& inbl,
decltype(info->on_reg_commit)&& oncommit,
version_t *objver);
ceph_tid_t linger_watch(LingerOp *info,
ObjectOperation& op,
const SnapContext& snapc, ceph::real_time mtime,
ceph::buffer::list& inbl,
Context* onfinish,
version_t *objver) {
return linger_watch(info, op, snapc, mtime, inbl,
OpContextVert<ceph::buffer::list>(onfinish, nullptr), objver);
}
ceph_tid_t linger_notify(LingerOp *info,
ObjectOperation& op,
snapid_t snap, ceph::buffer::list& inbl,
decltype(LingerOp::on_reg_commit)&& onfinish,
version_t *objver);
ceph_tid_t linger_notify(LingerOp *info,
ObjectOperation& op,
snapid_t snap, ceph::buffer::list& inbl,
ceph::buffer::list *poutbl,
Context* onack,
version_t *objver) {
return linger_notify(info, op, snap, inbl,
OpContextVert(onack, poutbl),
objver);
}
tl::expected<ceph::timespan,
boost::system::error_code> linger_check(LingerOp *info);
void linger_cancel(LingerOp *info); // releases a reference
void _linger_cancel(LingerOp *info);
void _do_watch_notify(boost::intrusive_ptr<LingerOp> info,
boost::intrusive_ptr<MWatchNotify> m);
/**
* set up initial ops in the op std::vector, and allocate a final op slot.
*
* The caller is responsible for filling in the final ops_count ops.
*
* @param ops op std::vector
* @param ops_count number of final ops the caller will fill in
* @param extra_ops pointer to [array of] initial op[s]
* @return index of final op (for caller to fill in)
*/
int init_ops(boost::container::small_vector_base<OSDOp>& ops, int ops_count,
ObjectOperation *extra_ops) {
int i;
int extra = 0;
if (extra_ops)
extra = extra_ops->ops.size();
ops.resize(ops_count + extra);
for (i=0; i<extra; i++) {
ops[i] = extra_ops->ops[i];
}
return i;
}
// high-level helpers
Op *prepare_stat_op(
const object_t& oid, const object_locator_t& oloc,
snapid_t snap, uint64_t *psize, ceph::real_time *pmtime,
int flags, Context *onfinish, version_t *objver = NULL,
ObjectOperation *extra_ops = NULL) {
osdc_opvec ops;
int i = init_ops(ops, 1, extra_ops);
ops[i].op.op = CEPH_OSD_OP_STAT;
C_Stat *fin = new C_Stat(psize, pmtime, onfinish);
Op *o = new Op(oid, oloc, std::move(ops), flags | global_op_flags |
CEPH_OSD_FLAG_READ, fin, objver);
o->snapid = snap;
o->outbl = &fin->bl;
return o;
}
ceph_tid_t stat(
const object_t& oid, const object_locator_t& oloc,
snapid_t snap, uint64_t *psize, ceph::real_time *pmtime,
int flags, Context *onfinish, version_t *objver = NULL,
ObjectOperation *extra_ops = NULL) {
Op *o = prepare_stat_op(oid, oloc, snap, psize, pmtime, flags,
onfinish, objver, extra_ops);
ceph_tid_t tid;
op_submit(o, &tid);
return tid;
}
Op *prepare_read_op(
const object_t& oid, const object_locator_t& oloc,
uint64_t off, uint64_t len, snapid_t snap, ceph::buffer::list *pbl,
int flags, Context *onfinish, version_t *objver = NULL,
ObjectOperation *extra_ops = NULL, int op_flags = 0,
ZTracer::Trace *parent_trace = nullptr) {
osdc_opvec ops;
int i = init_ops(ops, 1, extra_ops);
ops[i].op.op = CEPH_OSD_OP_READ;
ops[i].op.extent.offset = off;
ops[i].op.extent.length = len;
ops[i].op.extent.truncate_size = 0;
ops[i].op.extent.truncate_seq = 0;
ops[i].op.flags = op_flags;
Op *o = new Op(oid, oloc, std::move(ops), flags | global_op_flags |
CEPH_OSD_FLAG_READ, onfinish, objver,
nullptr, parent_trace);
o->snapid = snap;
o->outbl = pbl;
return o;
}
ceph_tid_t read(
const object_t& oid, const object_locator_t& oloc,
uint64_t off, uint64_t len, snapid_t snap, ceph::buffer::list *pbl,
int flags, Context *onfinish, version_t *objver = NULL,
ObjectOperation *extra_ops = NULL, int op_flags = 0) {
Op *o = prepare_read_op(oid, oloc, off, len, snap, pbl, flags,
onfinish, objver, extra_ops, op_flags);
ceph_tid_t tid;
op_submit(o, &tid);
return tid;
}
Op *prepare_cmpext_op(
const object_t& oid, const object_locator_t& oloc,
uint64_t off, ceph::buffer::list &cmp_bl,
snapid_t snap, int flags, Context *onfinish, version_t *objver = NULL,
ObjectOperation *extra_ops = NULL, int op_flags = 0) {
osdc_opvec ops;
int i = init_ops(ops, 1, extra_ops);
ops[i].op.op = CEPH_OSD_OP_CMPEXT;
ops[i].op.extent.offset = off;
ops[i].op.extent.length = cmp_bl.length();
ops[i].op.extent.truncate_size = 0;
ops[i].op.extent.truncate_seq = 0;
ops[i].indata = cmp_bl;
ops[i].op.flags = op_flags;
Op *o = new Op(oid, oloc, std::move(ops), flags | global_op_flags |
CEPH_OSD_FLAG_READ, onfinish, objver);
o->snapid = snap;
return o;
}
ceph_tid_t cmpext(
const object_t& oid, const object_locator_t& oloc,
uint64_t off, ceph::buffer::list &cmp_bl,
snapid_t snap, int flags, Context *onfinish, version_t *objver = NULL,
ObjectOperation *extra_ops = NULL, int op_flags = 0) {
Op *o = prepare_cmpext_op(oid, oloc, off, cmp_bl, snap,
flags, onfinish, objver, extra_ops, op_flags);
ceph_tid_t tid;
op_submit(o, &tid);
return tid;
}
ceph_tid_t read_trunc(const object_t& oid, const object_locator_t& oloc,
uint64_t off, uint64_t len, snapid_t snap,
ceph::buffer::list *pbl, int flags, uint64_t trunc_size,
__u32 trunc_seq, Context *onfinish,
version_t *objver = NULL,
ObjectOperation *extra_ops = NULL, int op_flags = 0) {
osdc_opvec ops;
int i = init_ops(ops, 1, extra_ops);
ops[i].op.op = CEPH_OSD_OP_READ;
ops[i].op.extent.offset = off;
ops[i].op.extent.length = len;
ops[i].op.extent.truncate_size = trunc_size;
ops[i].op.extent.truncate_seq = trunc_seq;
ops[i].op.flags = op_flags;
Op *o = new Op(oid, oloc, std::move(ops), flags | global_op_flags |
CEPH_OSD_FLAG_READ, onfinish, objver);
o->snapid = snap;
o->outbl = pbl;
ceph_tid_t tid;
op_submit(o, &tid);
return tid;
}
ceph_tid_t mapext(const object_t& oid, const object_locator_t& oloc,
uint64_t off, uint64_t len, snapid_t snap, ceph::buffer::list *pbl,
int flags, Context *onfinish, version_t *objver = NULL,
ObjectOperation *extra_ops = NULL) {
osdc_opvec ops;
int i = init_ops(ops, 1, extra_ops);
ops[i].op.op = CEPH_OSD_OP_MAPEXT;
ops[i].op.extent.offset = off;
ops[i].op.extent.length = len;
ops[i].op.extent.truncate_size = 0;
ops[i].op.extent.truncate_seq = 0;
Op *o = new Op(oid, oloc, std::move(ops), flags | global_op_flags |
CEPH_OSD_FLAG_READ, onfinish, objver);
o->snapid = snap;
o->outbl = pbl;
ceph_tid_t tid;
op_submit(o, &tid);
return tid;
}
ceph_tid_t getxattr(const object_t& oid, const object_locator_t& oloc,
const char *name, snapid_t snap, ceph::buffer::list *pbl, int flags,
Context *onfinish,
version_t *objver = NULL, ObjectOperation *extra_ops = NULL) {
osdc_opvec ops;
int i = init_ops(ops, 1, extra_ops);
ops[i].op.op = CEPH_OSD_OP_GETXATTR;
ops[i].op.xattr.name_len = (name ? strlen(name) : 0);
ops[i].op.xattr.value_len = 0;
if (name)
ops[i].indata.append(name, ops[i].op.xattr.name_len);
Op *o = new Op(oid, oloc, std::move(ops), flags | global_op_flags |
CEPH_OSD_FLAG_READ, onfinish, objver);
o->snapid = snap;
o->outbl = pbl;
ceph_tid_t tid;
op_submit(o, &tid);
return tid;
}
ceph_tid_t getxattrs(const object_t& oid, const object_locator_t& oloc,
snapid_t snap, std::map<std::string,ceph::buffer::list>& attrset,
int flags, Context *onfinish, version_t *objver = NULL,
ObjectOperation *extra_ops = NULL) {
osdc_opvec ops;
int i = init_ops(ops, 1, extra_ops);
ops[i].op.op = CEPH_OSD_OP_GETXATTRS;
C_GetAttrs *fin = new C_GetAttrs(attrset, onfinish);
Op *o = new Op(oid, oloc, std::move(ops), flags | global_op_flags |
CEPH_OSD_FLAG_READ, fin, objver);
o->snapid = snap;
o->outbl = &fin->bl;
ceph_tid_t tid;
op_submit(o, &tid);
return tid;
}
ceph_tid_t read_full(const object_t& oid, const object_locator_t& oloc,
snapid_t snap, ceph::buffer::list *pbl, int flags,
Context *onfinish, version_t *objver = NULL,
ObjectOperation *extra_ops = NULL) {
return read(oid, oloc, 0, 0, snap, pbl, flags | global_op_flags |
CEPH_OSD_FLAG_READ, onfinish, objver, extra_ops);
}
// writes
ceph_tid_t _modify(const object_t& oid, const object_locator_t& oloc,
osdc_opvec& ops,
ceph::real_time mtime,
const SnapContext& snapc, int flags,
Context *oncommit,
version_t *objver = NULL) {
Op *o = new Op(oid, oloc, std::move(ops), flags | global_op_flags |
CEPH_OSD_FLAG_WRITE, oncommit, objver);
o->mtime = mtime;
o->snapc = snapc;
ceph_tid_t tid;
op_submit(o, &tid);
return tid;
}
Op *prepare_write_op(
const object_t& oid, const object_locator_t& oloc,
uint64_t off, uint64_t len, const SnapContext& snapc,
const ceph::buffer::list &bl, ceph::real_time mtime, int flags,
Context *oncommit, version_t *objver = NULL,
ObjectOperation *extra_ops = NULL, int op_flags = 0,
ZTracer::Trace *parent_trace = nullptr) {
osdc_opvec ops;
int i = init_ops(ops, 1, extra_ops);
ops[i].op.op = CEPH_OSD_OP_WRITE;
ops[i].op.extent.offset = off;
ops[i].op.extent.length = len;
ops[i].op.extent.truncate_size = 0;
ops[i].op.extent.truncate_seq = 0;
ops[i].indata = bl;
ops[i].op.flags = op_flags;
Op *o = new Op(oid, oloc, std::move(ops), flags | global_op_flags |
CEPH_OSD_FLAG_WRITE, std::move(oncommit), objver,
nullptr, parent_trace);
o->mtime = mtime;
o->snapc = snapc;
return o;
}
ceph_tid_t write(
const object_t& oid, const object_locator_t& oloc,
uint64_t off, uint64_t len, const SnapContext& snapc,
const ceph::buffer::list &bl, ceph::real_time mtime, int flags,
Context *oncommit, version_t *objver = NULL,
ObjectOperation *extra_ops = NULL, int op_flags = 0) {
Op *o = prepare_write_op(oid, oloc, off, len, snapc, bl, mtime, flags,
oncommit, objver, extra_ops, op_flags);
ceph_tid_t tid;
op_submit(o, &tid);
return tid;
}
Op *prepare_append_op(
const object_t& oid, const object_locator_t& oloc,
uint64_t len, const SnapContext& snapc,
const ceph::buffer::list &bl, ceph::real_time mtime, int flags,
Context *oncommit,
version_t *objver = NULL,
ObjectOperation *extra_ops = NULL) {
osdc_opvec ops;
int i = init_ops(ops, 1, extra_ops);
ops[i].op.op = CEPH_OSD_OP_APPEND;
ops[i].op.extent.offset = 0;
ops[i].op.extent.length = len;
ops[i].op.extent.truncate_size = 0;
ops[i].op.extent.truncate_seq = 0;
ops[i].indata = bl;
Op *o = new Op(oid, oloc, std::move(ops), flags | global_op_flags |
CEPH_OSD_FLAG_WRITE, oncommit, objver);
o->mtime = mtime;
o->snapc = snapc;
return o;
}
ceph_tid_t append(
const object_t& oid, const object_locator_t& oloc,
uint64_t len, const SnapContext& snapc,
const ceph::buffer::list &bl, ceph::real_time mtime, int flags,
Context *oncommit,
version_t *objver = NULL,
ObjectOperation *extra_ops = NULL) {
Op *o = prepare_append_op(oid, oloc, len, snapc, bl, mtime, flags,
oncommit, objver, extra_ops);
ceph_tid_t tid;
op_submit(o, &tid);
return tid;
}
ceph_tid_t write_trunc(const object_t& oid, const object_locator_t& oloc,
uint64_t off, uint64_t len, const SnapContext& snapc,
const ceph::buffer::list &bl, ceph::real_time mtime, int flags,
uint64_t trunc_size, __u32 trunc_seq,
Context *oncommit,
version_t *objver = NULL,
ObjectOperation *extra_ops = NULL, int op_flags = 0) {
osdc_opvec ops;
int i = init_ops(ops, 1, extra_ops);
ops[i].op.op = CEPH_OSD_OP_WRITE;
ops[i].op.extent.offset = off;
ops[i].op.extent.length = len;
ops[i].op.extent.truncate_size = trunc_size;
ops[i].op.extent.truncate_seq = trunc_seq;
ops[i].indata = bl;
ops[i].op.flags = op_flags;
Op *o = new Op(oid, oloc, std::move(ops), flags | global_op_flags |
CEPH_OSD_FLAG_WRITE, oncommit, objver);
o->mtime = mtime;
o->snapc = snapc;
ceph_tid_t tid;
op_submit(o, &tid);
return tid;
}
Op *prepare_write_full_op(
const object_t& oid, const object_locator_t& oloc,
const SnapContext& snapc, const ceph::buffer::list &bl,
ceph::real_time mtime, int flags,
Context *oncommit, version_t *objver = NULL,
ObjectOperation *extra_ops = NULL, int op_flags = 0) {
osdc_opvec ops;
int i = init_ops(ops, 1, extra_ops);
ops[i].op.op = CEPH_OSD_OP_WRITEFULL;
ops[i].op.extent.offset = 0;
ops[i].op.extent.length = bl.length();
ops[i].indata = bl;
ops[i].op.flags = op_flags;
Op *o = new Op(oid, oloc, std::move(ops), flags | global_op_flags |
CEPH_OSD_FLAG_WRITE, oncommit, objver);
o->mtime = mtime;
o->snapc = snapc;
return o;
}
ceph_tid_t write_full(
const object_t& oid, const object_locator_t& oloc,
const SnapContext& snapc, const ceph::buffer::list &bl,
ceph::real_time mtime, int flags,
Context *oncommit, version_t *objver = NULL,
ObjectOperation *extra_ops = NULL, int op_flags = 0) {
Op *o = prepare_write_full_op(oid, oloc, snapc, bl, mtime, flags,
oncommit, objver, extra_ops, op_flags);
ceph_tid_t tid;
op_submit(o, &tid);
return tid;
}
Op *prepare_writesame_op(
const object_t& oid, const object_locator_t& oloc,
uint64_t write_len, uint64_t off,
const SnapContext& snapc, const ceph::buffer::list &bl,
ceph::real_time mtime, int flags,
Context *oncommit, version_t *objver = NULL,
ObjectOperation *extra_ops = NULL, int op_flags = 0) {
osdc_opvec ops;
int i = init_ops(ops, 1, extra_ops);
ops[i].op.op = CEPH_OSD_OP_WRITESAME;
ops[i].op.writesame.offset = off;
ops[i].op.writesame.length = write_len;
ops[i].op.writesame.data_length = bl.length();
ops[i].indata = bl;
ops[i].op.flags = op_flags;
Op *o = new Op(oid, oloc, std::move(ops), flags | global_op_flags |
CEPH_OSD_FLAG_WRITE, oncommit, objver);
o->mtime = mtime;
o->snapc = snapc;
return o;
}
ceph_tid_t writesame(
const object_t& oid, const object_locator_t& oloc,
uint64_t write_len, uint64_t off,
const SnapContext& snapc, const ceph::buffer::list &bl,
ceph::real_time mtime, int flags,
Context *oncommit, version_t *objver = NULL,
ObjectOperation *extra_ops = NULL, int op_flags = 0) {
Op *o = prepare_writesame_op(oid, oloc, write_len, off, snapc, bl,
mtime, flags, oncommit, objver,
extra_ops, op_flags);
ceph_tid_t tid;
op_submit(o, &tid);
return tid;
}
ceph_tid_t trunc(const object_t& oid, const object_locator_t& oloc,
const SnapContext& snapc, ceph::real_time mtime, int flags,
uint64_t trunc_size, __u32 trunc_seq,
Context *oncommit, version_t *objver = NULL,
ObjectOperation *extra_ops = NULL) {
osdc_opvec ops;
int i = init_ops(ops, 1, extra_ops);
ops[i].op.op = CEPH_OSD_OP_TRUNCATE;
ops[i].op.extent.offset = trunc_size;
ops[i].op.extent.truncate_size = trunc_size;
ops[i].op.extent.truncate_seq = trunc_seq;
Op *o = new Op(oid, oloc, std::move(ops), flags | global_op_flags |
CEPH_OSD_FLAG_WRITE, oncommit, objver);
o->mtime = mtime;
o->snapc = snapc;
ceph_tid_t tid;
op_submit(o, &tid);
return tid;
}
ceph_tid_t zero(const object_t& oid, const object_locator_t& oloc,
uint64_t off, uint64_t len, const SnapContext& snapc,
ceph::real_time mtime, int flags, Context *oncommit,
version_t *objver = NULL, ObjectOperation *extra_ops = NULL) {
osdc_opvec ops;
int i = init_ops(ops, 1, extra_ops);
ops[i].op.op = CEPH_OSD_OP_ZERO;
ops[i].op.extent.offset = off;
ops[i].op.extent.length = len;
Op *o = new Op(oid, oloc, std::move(ops), flags | global_op_flags |
CEPH_OSD_FLAG_WRITE, oncommit, objver);
o->mtime = mtime;
o->snapc = snapc;
ceph_tid_t tid;
op_submit(o, &tid);
return tid;
}
ceph_tid_t rollback_object(const object_t& oid, const object_locator_t& oloc,
const SnapContext& snapc, snapid_t snapid,
ceph::real_time mtime, Context *oncommit,
version_t *objver = NULL,
ObjectOperation *extra_ops = NULL) {
osdc_opvec ops;
int i = init_ops(ops, 1, extra_ops);
ops[i].op.op = CEPH_OSD_OP_ROLLBACK;
ops[i].op.snap.snapid = snapid;
Op *o = new Op(oid, oloc, std::move(ops), CEPH_OSD_FLAG_WRITE, oncommit, objver);
o->mtime = mtime;
o->snapc = snapc;
ceph_tid_t tid;
op_submit(o, &tid);
return tid;
}
ceph_tid_t create(const object_t& oid, const object_locator_t& oloc,
const SnapContext& snapc, ceph::real_time mtime, int global_flags,
int create_flags, Context *oncommit,
version_t *objver = NULL,
ObjectOperation *extra_ops = NULL) {
osdc_opvec ops;
int i = init_ops(ops, 1, extra_ops);
ops[i].op.op = CEPH_OSD_OP_CREATE;
ops[i].op.flags = create_flags;
Op *o = new Op(oid, oloc, std::move(ops), global_flags | global_op_flags |
CEPH_OSD_FLAG_WRITE, oncommit, objver);
o->mtime = mtime;
o->snapc = snapc;
ceph_tid_t tid;
op_submit(o, &tid);
return tid;
}
Op *prepare_remove_op(
const object_t& oid, const object_locator_t& oloc,
const SnapContext& snapc, ceph::real_time mtime, int flags,
Context *oncommit,
version_t *objver = NULL, ObjectOperation *extra_ops = NULL) {
osdc_opvec ops;
int i = init_ops(ops, 1, extra_ops);
ops[i].op.op = CEPH_OSD_OP_DELETE;
Op *o = new Op(oid, oloc, std::move(ops), flags | global_op_flags |
CEPH_OSD_FLAG_WRITE, oncommit, objver);
o->mtime = mtime;
o->snapc = snapc;
return o;
}
ceph_tid_t remove(
const object_t& oid, const object_locator_t& oloc,
const SnapContext& snapc, ceph::real_time mtime, int flags,
Context *oncommit,
version_t *objver = NULL, ObjectOperation *extra_ops = NULL) {
Op *o = prepare_remove_op(oid, oloc, snapc, mtime, flags,
oncommit, objver, extra_ops);
ceph_tid_t tid;
op_submit(o, &tid);
return tid;
}
ceph_tid_t setxattr(const object_t& oid, const object_locator_t& oloc,
const char *name, const SnapContext& snapc, const ceph::buffer::list &bl,
ceph::real_time mtime, int flags,
Context *oncommit,
version_t *objver = NULL, ObjectOperation *extra_ops = NULL) {
osdc_opvec ops;
int i = init_ops(ops, 1, extra_ops);
ops[i].op.op = CEPH_OSD_OP_SETXATTR;
ops[i].op.xattr.name_len = (name ? strlen(name) : 0);
ops[i].op.xattr.value_len = bl.length();
if (name)
ops[i].indata.append(name, ops[i].op.xattr.name_len);
ops[i].indata.append(bl);
Op *o = new Op(oid, oloc, std::move(ops), flags | global_op_flags |
CEPH_OSD_FLAG_WRITE, oncommit,
objver);
o->mtime = mtime;
o->snapc = snapc;
ceph_tid_t tid;
op_submit(o, &tid);
return tid;
}
ceph_tid_t removexattr(const object_t& oid, const object_locator_t& oloc,
const char *name, const SnapContext& snapc,
ceph::real_time mtime, int flags,
Context *oncommit,
version_t *objver = NULL, ObjectOperation *extra_ops = NULL) {
osdc_opvec ops;
int i = init_ops(ops, 1, extra_ops);
ops[i].op.op = CEPH_OSD_OP_RMXATTR;
ops[i].op.xattr.name_len = (name ? strlen(name) : 0);
ops[i].op.xattr.value_len = 0;
if (name)
ops[i].indata.append(name, ops[i].op.xattr.name_len);
Op *o = new Op(oid, oloc, std::move(ops), flags | global_op_flags |
CEPH_OSD_FLAG_WRITE, oncommit, objver);
o->mtime = mtime;
o->snapc = snapc;
ceph_tid_t tid;
op_submit(o, &tid);
return tid;
}
void list_nobjects(NListContext *p, Context *onfinish);
uint32_t list_nobjects_seek(NListContext *p, uint32_t pos);
uint32_t list_nobjects_seek(NListContext *list_context, const hobject_t& c);
void list_nobjects_get_cursor(NListContext *list_context, hobject_t *c);
hobject_t enumerate_objects_begin();
hobject_t enumerate_objects_end();
template<typename T>
friend struct EnumerationContext;
template<typename T>
friend struct CB_EnumerateReply;
template<typename T>
void enumerate_objects(
int64_t pool_id,
std::string_view ns,
hobject_t start,
hobject_t end,
const uint32_t max,
const ceph::buffer::list& filter_bl,
fu2::unique_function<void(boost::system::error_code,
std::vector<T>,
hobject_t) &&> on_finish);
template<typename T>
void _issue_enumerate(hobject_t start,
std::unique_ptr<EnumerationContext<T>>);
template<typename T>
void _enumerate_reply(
ceph::buffer::list&& bl,
boost::system::error_code ec,
std::unique_ptr<EnumerationContext<T>>&& ectx);
// -------------------------
// pool ops
private:
void pool_op_submit(PoolOp *op);
void _pool_op_submit(PoolOp *op);
void _finish_pool_op(PoolOp *op, int r);
void _do_delete_pool(int64_t pool,
decltype(PoolOp::onfinish)&& onfinish);
public:
void create_pool_snap(int64_t pool, std::string_view snapName,
decltype(PoolOp::onfinish)&& onfinish);
void create_pool_snap(int64_t pool, std::string_view snapName,
Context* c) {
create_pool_snap(pool, snapName,
OpContextVert<ceph::buffer::list>(c, nullptr));
}
void allocate_selfmanaged_snap(int64_t pool,
std::unique_ptr<ceph::async::Completion<
void(boost::system::error_code,
snapid_t)>> onfinish);
void allocate_selfmanaged_snap(int64_t pool, snapid_t* psnapid,
Context* c) {
allocate_selfmanaged_snap(pool,
OpContextVert(c, psnapid));
}
void delete_pool_snap(int64_t pool, std::string_view snapName,
decltype(PoolOp::onfinish)&& onfinish);
void delete_pool_snap(int64_t pool, std::string_view snapName,
Context* c) {
delete_pool_snap(pool, snapName,
OpContextVert<ceph::buffer::list>(c, nullptr));
}
void delete_selfmanaged_snap(int64_t pool, snapid_t snap,
decltype(PoolOp::onfinish)&& onfinish);
void delete_selfmanaged_snap(int64_t pool, snapid_t snap,
Context* c) {
delete_selfmanaged_snap(pool, snap,
OpContextVert<ceph::buffer::list>(c, nullptr));
}
void create_pool(std::string_view name,
decltype(PoolOp::onfinish)&& onfinish,
int crush_rule=-1);
void create_pool(std::string_view name, Context *onfinish,
int crush_rule=-1) {
create_pool(name,
OpContextVert<ceph::buffer::list>(onfinish, nullptr),
crush_rule);
}
void delete_pool(int64_t pool,
decltype(PoolOp::onfinish)&& onfinish);
void delete_pool(int64_t pool,
Context* onfinish) {
delete_pool(pool, OpContextVert<ceph::buffer::list>(onfinish, nullptr));
}
void delete_pool(std::string_view name,
decltype(PoolOp::onfinish)&& onfinish);
void delete_pool(std::string_view name,
Context* onfinish) {
delete_pool(name, OpContextVert<ceph::buffer::list>(onfinish, nullptr));
}
void handle_pool_op_reply(MPoolOpReply *m);
int pool_op_cancel(ceph_tid_t tid, int r);
// --------------------------
// pool stats
private:
void _poolstat_submit(PoolStatOp *op);
public:
void handle_get_pool_stats_reply(MGetPoolStatsReply *m);
void get_pool_stats(const std::vector<std::string>& pools,
decltype(PoolStatOp::onfinish)&& onfinish);
template<typename CompletionToken>
auto get_pool_stats(const std::vector<std::string>& pools,
CompletionToken&& token) {
boost::asio::async_completion<CompletionToken,
PoolStatOp::OpSig> init(token);
get_pool_stats(pools,
PoolStatOp::OpComp::create(
service.get_executor(),
std::move(init.completion_handler)));
return init.result.get();
}
int pool_stat_op_cancel(ceph_tid_t tid, int r);
void _finish_pool_stat_op(PoolStatOp *op, int r);
// ---------------------------
// df stats
private:
void _fs_stats_submit(StatfsOp *op);
public:
void handle_fs_stats_reply(MStatfsReply *m);
void get_fs_stats(std::optional<int64_t> poolid,
decltype(StatfsOp::onfinish)&& onfinish);
template<typename CompletionToken>
auto get_fs_stats(std::optional<int64_t> poolid,
CompletionToken&& token) {
boost::asio::async_completion<CompletionToken, StatfsOp::OpSig> init(token);
get_fs_stats(poolid,
StatfsOp::OpComp::create(service.get_executor(),
std::move(init.completion_handler)));
return init.result.get();
}
void get_fs_stats(struct ceph_statfs& result, std::optional<int64_t> poolid,
Context *onfinish) {
get_fs_stats(poolid, OpContextVert(onfinish, result));
}
int statfs_op_cancel(ceph_tid_t tid, int r);
void _finish_statfs_op(StatfsOp *op, int r);
// ---------------------------
// some scatter/gather hackery
void _sg_read_finish(std::vector<ObjectExtent>& extents,
std::vector<ceph::buffer::list>& resultbl,
ceph::buffer::list *bl, Context *onfinish);
struct C_SGRead : public Context {
Objecter *objecter;
std::vector<ObjectExtent> extents;
std::vector<ceph::buffer::list> resultbl;
ceph::buffer::list *bl;
Context *onfinish;
C_SGRead(Objecter *ob,
std::vector<ObjectExtent>& e, std::vector<ceph::buffer::list>& r, ceph::buffer::list *b,
Context *c) :
objecter(ob), bl(b), onfinish(c) {
extents.swap(e);
resultbl.swap(r);
}
void finish(int r) override {
objecter->_sg_read_finish(extents, resultbl, bl, onfinish);
}
};
void sg_read_trunc(std::vector<ObjectExtent>& extents, snapid_t snap,
ceph::buffer::list *bl, int flags, uint64_t trunc_size,
__u32 trunc_seq, Context *onfinish, int op_flags = 0) {
if (extents.size() == 1) {
read_trunc(extents[0].oid, extents[0].oloc, extents[0].offset,
extents[0].length, snap, bl, flags, extents[0].truncate_size,
trunc_seq, onfinish, 0, 0, op_flags);
} else {
C_GatherBuilder gather(cct);
std::vector<ceph::buffer::list> resultbl(extents.size());
int i=0;
for (auto p = extents.begin(); p != extents.end(); ++p) {
read_trunc(p->oid, p->oloc, p->offset, p->length, snap, &resultbl[i++],
flags, p->truncate_size, trunc_seq, gather.new_sub(),
0, 0, op_flags);
}
gather.set_finisher(new C_SGRead(this, extents, resultbl, bl, onfinish));
gather.activate();
}
}
void sg_read(std::vector<ObjectExtent>& extents, snapid_t snap, ceph::buffer::list *bl,
int flags, Context *onfinish, int op_flags = 0) {
sg_read_trunc(extents, snap, bl, flags, 0, 0, onfinish, op_flags);
}
void sg_write_trunc(std::vector<ObjectExtent>& extents, const SnapContext& snapc,
const ceph::buffer::list& bl, ceph::real_time mtime, int flags,
uint64_t trunc_size, __u32 trunc_seq,
Context *oncommit, int op_flags = 0) {
if (extents.size() == 1) {
write_trunc(extents[0].oid, extents[0].oloc, extents[0].offset,
extents[0].length, snapc, bl, mtime, flags,
extents[0].truncate_size, trunc_seq, oncommit,
0, 0, op_flags);
} else {
C_GatherBuilder gcom(cct, oncommit);
auto it = bl.cbegin();
for (auto p = extents.begin(); p != extents.end(); ++p) {
ceph::buffer::list cur;
for (auto bit = p->buffer_extents.begin();
bit != p->buffer_extents.end();
++bit) {
if (it.get_off() != bit->first) {
it.seek(bit->first);
}
it.copy(bit->second, cur);
}
ceph_assert(cur.length() == p->length);
write_trunc(p->oid, p->oloc, p->offset, p->length,
snapc, cur, mtime, flags, p->truncate_size, trunc_seq,
oncommit ? gcom.new_sub():0,
0, 0, op_flags);
}
gcom.activate();
}
}
void sg_write(std::vector<ObjectExtent>& extents, const SnapContext& snapc,
const ceph::buffer::list& bl, ceph::real_time mtime, int flags,
Context *oncommit, int op_flags = 0) {
sg_write_trunc(extents, snapc, bl, mtime, flags, 0, 0, oncommit,
op_flags);
}
void ms_handle_connect(Connection *con) override;
bool ms_handle_reset(Connection *con) override;
void ms_handle_remote_reset(Connection *con) override;
bool ms_handle_refused(Connection *con) override;
void blocklist_self(bool set);
private:
epoch_t epoch_barrier = 0;
bool retry_writes_after_first_reply =
cct->_conf->objecter_retry_writes_after_first_reply;
public:
void set_epoch_barrier(epoch_t epoch);
PerfCounters *get_logger() {
return logger;
}
};
#endif
| 126,879 | 31.326115 | 108 | h |
null | ceph-main/src/osdc/Striper.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2012 Inktank
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include "Striper.h"
#include "include/types.h"
#include "include/buffer.h"
#include "osd/OSDMap.h"
#include "common/config.h"
#include "common/debug.h"
#define dout_subsys ceph_subsys_striper
#undef dout_prefix
#define dout_prefix *_dout << "striper "
using std::make_pair;
using std::map;
using std::pair;
using ceph::bufferlist;
namespace {
object_t format_oid(const char* object_format, uint64_t object_no) {
char buf[strlen(object_format) + 32];
snprintf(buf, sizeof(buf), object_format, (long long unsigned)object_no);
return object_t(buf);
}
struct OrderByObject {
constexpr bool operator()(uint64_t object_no,
const striper::LightweightObjectExtent& rhs) const {
return object_no < rhs.object_no;
}
constexpr bool operator()(const striper::LightweightObjectExtent& lhs,
uint64_t object_no) const {
return lhs.object_no < object_no;
}
};
template <typename I>
void add_partial_sparse_result(
CephContext *cct,
std::map<uint64_t, std::pair<ceph::buffer::list, uint64_t> >* partial,
uint64_t* total_intended_len, bufferlist& bl, I* it, const I& end_it,
uint64_t* bl_off, uint64_t tofs, uint64_t tlen) {
ldout(cct, 30) << " be " << tofs << "~" << tlen << dendl;
auto& s = *it;
while (tlen > 0) {
ldout(cct, 20) << " t " << tofs << "~" << tlen
<< " bl has " << bl.length()
<< " off " << *bl_off << dendl;
if (s == end_it) {
ldout(cct, 20) << " s at end" << dendl;
auto& r = (*partial)[tofs];
r.second = tlen;
*total_intended_len += r.second;
break;
}
ldout(cct, 30) << " s " << s->first << "~" << s->second << dendl;
// skip zero-length extent
if (s->second == 0) {
ldout(cct, 30) << " s len 0, skipping" << dendl;
++s;
continue;
}
if (s->first > *bl_off) {
// gap in sparse read result
pair<bufferlist, uint64_t>& r = (*partial)[tofs];
size_t gap = std::min<size_t>(s->first - *bl_off, tlen);
ldout(cct, 20) << " s gap " << gap << ", skipping" << dendl;
r.second = gap;
*total_intended_len += r.second;
*bl_off += gap;
tofs += gap;
tlen -= gap;
if (tlen == 0) {
continue;
}
}
ceph_assert(s->first <= *bl_off);
size_t left = (s->first + s->second) - *bl_off;
size_t actual = std::min<size_t>(left, tlen);
if (actual > 0) {
ldout(cct, 20) << " s has " << actual << ", copying" << dendl;
pair<bufferlist, uint64_t>& r = (*partial)[tofs];
bl.splice(0, actual, &r.first);
r.second = actual;
*total_intended_len += r.second;
*bl_off += actual;
tofs += actual;
tlen -= actual;
}
if (actual == left) {
ldout(cct, 30) << " s advancing" << dendl;
++s;
}
}
}
} // anonymous namespace
void Striper::file_to_extents(CephContext *cct, const char *object_format,
const file_layout_t *layout,
uint64_t offset, uint64_t len,
uint64_t trunc_size,
std::vector<ObjectExtent>& extents,
uint64_t buffer_offset)
{
striper::LightweightObjectExtents lightweight_object_extents;
file_to_extents(cct, layout, offset, len, trunc_size, buffer_offset,
&lightweight_object_extents);
// convert lightweight object extents to heavyweight version
extents.reserve(lightweight_object_extents.size());
for (auto& lightweight_object_extent : lightweight_object_extents) {
auto& object_extent = extents.emplace_back(
object_t(format_oid(object_format, lightweight_object_extent.object_no)),
lightweight_object_extent.object_no,
lightweight_object_extent.offset, lightweight_object_extent.length,
lightweight_object_extent.truncate_size);
object_extent.oloc = OSDMap::file_to_object_locator(*layout);
object_extent.buffer_extents.reserve(
lightweight_object_extent.buffer_extents.size());
object_extent.buffer_extents.insert(
object_extent.buffer_extents.end(),
lightweight_object_extent.buffer_extents.begin(),
lightweight_object_extent.buffer_extents.end());
}
}
void Striper::file_to_extents(
CephContext *cct, const char *object_format,
const file_layout_t *layout,
uint64_t offset, uint64_t len,
uint64_t trunc_size,
map<object_t,std::vector<ObjectExtent> >& object_extents,
uint64_t buffer_offset)
{
striper::LightweightObjectExtents lightweight_object_extents;
file_to_extents(cct, layout, offset, len, trunc_size, buffer_offset,
&lightweight_object_extents);
// convert lightweight object extents to heavyweight version
for (auto& lightweight_object_extent : lightweight_object_extents) {
auto oid = format_oid(object_format, lightweight_object_extent.object_no);
auto& object_extent = object_extents[oid].emplace_back(
oid, lightweight_object_extent.object_no,
lightweight_object_extent.offset, lightweight_object_extent.length,
lightweight_object_extent.truncate_size);
object_extent.oloc = OSDMap::file_to_object_locator(*layout);
object_extent.buffer_extents.reserve(
lightweight_object_extent.buffer_extents.size());
object_extent.buffer_extents.insert(
object_extent.buffer_extents.end(),
lightweight_object_extent.buffer_extents.begin(),
lightweight_object_extent.buffer_extents.end());
}
}
void Striper::file_to_extents(
CephContext *cct, const file_layout_t *layout, uint64_t offset,
uint64_t len, uint64_t trunc_size, uint64_t buffer_offset,
striper::LightweightObjectExtents* object_extents) {
ldout(cct, 10) << "file_to_extents " << offset << "~" << len << dendl;
ceph_assert(len > 0);
/*
* we want only one extent per object! this means that each extent
* we read may map into different bits of the final read
* buffer.. hence buffer_extents
*/
__u32 object_size = layout->object_size;
__u32 su = layout->stripe_unit;
__u32 stripe_count = layout->stripe_count;
ceph_assert(object_size >= su);
if (stripe_count == 1) {
ldout(cct, 20) << " sc is one, reset su to os" << dendl;
su = object_size;
}
uint64_t stripes_per_object = object_size / su;
ldout(cct, 20) << " su " << su << " sc " << stripe_count << " os "
<< object_size << " stripes_per_object " << stripes_per_object
<< dendl;
uint64_t cur = offset;
uint64_t left = len;
while (left > 0) {
// layout into objects
uint64_t blockno = cur / su; // which block
// which horizontal stripe (Y)
uint64_t stripeno = blockno / stripe_count;
// which object in the object set (X)
uint64_t stripepos = blockno % stripe_count;
// which object set
uint64_t objectsetno = stripeno / stripes_per_object;
// object id
uint64_t objectno = objectsetno * stripe_count + stripepos;
// map range into object
uint64_t block_start = (stripeno % stripes_per_object) * su;
uint64_t block_off = cur % su;
uint64_t max = su - block_off;
uint64_t x_offset = block_start + block_off;
uint64_t x_len;
if (left > max)
x_len = max;
else
x_len = left;
ldout(cct, 20) << " off " << cur << " blockno " << blockno << " stripeno "
<< stripeno << " stripepos " << stripepos << " objectsetno "
<< objectsetno << " objectno " << objectno
<< " block_start " << block_start << " block_off "
<< block_off << " " << x_offset << "~" << x_len
<< dendl;
striper::LightweightObjectExtent* ex = nullptr;
auto it = std::upper_bound(object_extents->begin(), object_extents->end(),
objectno, OrderByObject());
striper::LightweightObjectExtents::reverse_iterator rev_it(it);
if (rev_it == object_extents->rend() ||
rev_it->object_no != objectno ||
rev_it->offset + rev_it->length != x_offset) {
// expect up to "stripe-width - 1" vector shifts in the worst-case
ex = &(*object_extents->emplace(
it, objectno, x_offset, x_len,
object_truncate_size(cct, layout, objectno, trunc_size)));
ldout(cct, 20) << " added new " << *ex << dendl;
} else {
ex = &(*rev_it);
ceph_assert(ex->offset + ex->length == x_offset);
ldout(cct, 20) << " adding in to " << *ex << dendl;
ex->length += x_len;
}
ex->buffer_extents.emplace_back(cur - offset + buffer_offset, x_len);
ldout(cct, 15) << "file_to_extents " << *ex << dendl;
// ldout(cct, 0) << "map: ino " << ino << " oid " << ex.oid << " osd "
// << ex.osd << " offset " << ex.offset << " len " << ex.len
// << " ... left " << left << dendl;
left -= x_len;
cur += x_len;
}
}
void Striper::extent_to_file(CephContext *cct, file_layout_t *layout,
uint64_t objectno, uint64_t off, uint64_t len,
std::vector<pair<uint64_t, uint64_t> >& extents)
{
ldout(cct, 10) << "extent_to_file " << objectno << " " << off << "~"
<< len << dendl;
__u32 object_size = layout->object_size;
__u32 su = layout->stripe_unit;
__u32 stripe_count = layout->stripe_count;
ceph_assert(object_size >= su);
uint64_t stripes_per_object = object_size / su;
ldout(cct, 20) << " stripes_per_object " << stripes_per_object << dendl;
uint64_t off_in_block = off % su;
extents.reserve(len / su + 1);
while (len > 0) {
uint64_t stripepos = objectno % stripe_count;
uint64_t objectsetno = objectno / stripe_count;
uint64_t stripeno = off / su + objectsetno * stripes_per_object;
uint64_t blockno = stripeno * stripe_count + stripepos;
uint64_t extent_off = blockno * su + off_in_block;
uint64_t extent_len = std::min(len, su - off_in_block);
extents.push_back(make_pair(extent_off, extent_len));
ldout(cct, 20) << " object " << off << "~" << extent_len
<< " -> file " << extent_off << "~" << extent_len
<< dendl;
off_in_block = 0;
off += extent_len;
len -= extent_len;
}
}
uint64_t Striper::object_truncate_size(CephContext *cct,
const file_layout_t *layout,
uint64_t objectno, uint64_t trunc_size)
{
uint64_t obj_trunc_size;
if (trunc_size == 0 || trunc_size == (uint64_t)-1) {
obj_trunc_size = trunc_size;
} else {
__u32 object_size = layout->object_size;
__u32 su = layout->stripe_unit;
__u32 stripe_count = layout->stripe_count;
ceph_assert(object_size >= su);
uint64_t stripes_per_object = object_size / su;
uint64_t objectsetno = objectno / stripe_count;
uint64_t trunc_objectsetno = trunc_size / object_size / stripe_count;
if (objectsetno > trunc_objectsetno)
obj_trunc_size = 0;
else if (objectsetno < trunc_objectsetno)
obj_trunc_size = object_size;
else {
uint64_t trunc_blockno = trunc_size / su;
uint64_t trunc_stripeno = trunc_blockno / stripe_count;
uint64_t trunc_stripepos = trunc_blockno % stripe_count;
uint64_t trunc_objectno = trunc_objectsetno * stripe_count
+ trunc_stripepos;
if (objectno < trunc_objectno)
obj_trunc_size = ((trunc_stripeno % stripes_per_object) + 1) * su;
else if (objectno > trunc_objectno)
obj_trunc_size = (trunc_stripeno % stripes_per_object) * su;
else
obj_trunc_size = (trunc_stripeno % stripes_per_object) * su
+ (trunc_size % su);
}
}
ldout(cct, 20) << "object_truncate_size " << objectno << " "
<< trunc_size << "->" << obj_trunc_size << dendl;
return obj_trunc_size;
}
uint64_t Striper::get_num_objects(const file_layout_t& layout,
uint64_t size)
{
__u32 stripe_unit = layout.stripe_unit;
__u32 stripe_count = layout.stripe_count;
uint64_t period = layout.get_period();
uint64_t num_periods = (size + period - 1) / period;
uint64_t remainder_bytes = size % period;
uint64_t remainder_objs = 0;
if ((remainder_bytes > 0) && (remainder_bytes < (uint64_t)stripe_count
* stripe_unit))
remainder_objs = stripe_count - ((remainder_bytes + stripe_unit - 1)
/ stripe_unit);
return num_periods * stripe_count - remainder_objs;
}
uint64_t Striper::get_file_offset(CephContext *cct,
const file_layout_t *layout, uint64_t objectno, uint64_t off) {
ldout(cct, 10) << "get_file_offset " << objectno << " " << off << dendl;
__u32 object_size = layout->object_size;
__u32 su = layout->stripe_unit;
__u32 stripe_count = layout->stripe_count;
ceph_assert(object_size >= su);
uint64_t stripes_per_object = object_size / su;
ldout(cct, 20) << " stripes_per_object " << stripes_per_object << dendl;
uint64_t off_in_block = off % su;
uint64_t stripepos = objectno % stripe_count;
uint64_t objectsetno = objectno / stripe_count;
uint64_t stripeno = off / su + objectsetno * stripes_per_object;
uint64_t blockno = stripeno * stripe_count + stripepos;
return blockno * su + off_in_block;
}
// StripedReadResult
void Striper::StripedReadResult::add_partial_result(
CephContext *cct, bufferlist& bl,
const std::vector<pair<uint64_t,uint64_t> >& buffer_extents)
{
ldout(cct, 10) << "add_partial_result(" << this << ") " << bl.length()
<< " to " << buffer_extents << dendl;
for (auto p = buffer_extents.cbegin(); p != buffer_extents.cend(); ++p) {
pair<bufferlist, uint64_t>& r = partial[p->first];
size_t actual = std::min<uint64_t>(bl.length(), p->second);
bl.splice(0, actual, &r.first);
r.second = p->second;
total_intended_len += r.second;
}
}
void Striper::StripedReadResult::add_partial_result(
CephContext *cct, bufferlist&& bl,
const striper::LightweightBufferExtents& buffer_extents)
{
ldout(cct, 10) << "add_partial_result(" << this << ") " << bl.length()
<< " to " << buffer_extents << dendl;
for (auto& be : buffer_extents) {
auto& r = partial[be.first];
size_t actual = std::min<uint64_t>(bl.length(), be.second);
if (buffer_extents.size() == 1) {
r.first = std::move(bl);
} else {
bl.splice(0, actual, &r.first);
}
r.second = be.second;
total_intended_len += r.second;
}
}
void Striper::StripedReadResult::add_partial_sparse_result(
CephContext *cct, bufferlist& bl, const map<uint64_t, uint64_t>& bl_map,
uint64_t bl_off, const std::vector<pair<uint64_t,uint64_t> >& buffer_extents)
{
ldout(cct, 10) << "add_partial_sparse_result(" << this << ") " << bl.length()
<< " covering " << bl_map << " (offset " << bl_off << ")"
<< " to " << buffer_extents << dendl;
if (bl_map.empty()) {
add_partial_result(cct, bl, buffer_extents);
return;
}
auto s = bl_map.cbegin();
for (auto& be : buffer_extents) {
::add_partial_sparse_result(cct, &partial, &total_intended_len, bl, &s,
bl_map.end(), &bl_off, be.first, be.second);
}
}
void Striper::StripedReadResult::add_partial_sparse_result(
CephContext *cct, ceph::buffer::list&& bl,
const std::vector<std::pair<uint64_t, uint64_t>>& bl_map, uint64_t bl_off,
const striper::LightweightBufferExtents& buffer_extents) {
ldout(cct, 10) << "add_partial_sparse_result(" << this << ") " << bl.length()
<< " covering " << bl_map << " (offset " << bl_off << ")"
<< " to " << buffer_extents << dendl;
if (bl_map.empty()) {
add_partial_result(cct, std::move(bl), buffer_extents);
return;
}
auto s = bl_map.cbegin();
for (auto& be : buffer_extents) {
::add_partial_sparse_result(cct, &partial, &total_intended_len, bl, &s,
bl_map.cend(), &bl_off, be.first, be.second);
}
}
void Striper::StripedReadResult::assemble_result(CephContext *cct,
bufferlist& bl,
bool zero_tail)
{
ldout(cct, 10) << "assemble_result(" << this << ") zero_tail=" << zero_tail
<< dendl;
size_t zeros = 0; // zeros preceding current position
for (auto& p : partial) {
size_t got = p.second.first.length();
size_t expect = p.second.second;
if (got) {
if (zeros) {
bl.append_zero(zeros);
zeros = 0;
}
bl.claim_append(p.second.first);
}
zeros += expect - got;
}
if (zero_tail && zeros) {
bl.append_zero(zeros);
}
partial.clear();
}
void Striper::StripedReadResult::assemble_result(CephContext *cct, char *buffer, size_t length)
{
ceph_assert(buffer && length == total_intended_len);
map<uint64_t,pair<bufferlist,uint64_t> >::reverse_iterator p = partial.rbegin();
if (p == partial.rend())
return;
uint64_t curr = length;
uint64_t end = p->first + p->second.second;
while (p != partial.rend()) {
// sanity check
ldout(cct, 20) << "assemble_result(" << this << ") " << p->first << "~" << p->second.second
<< " " << p->second.first.length() << " bytes"
<< dendl;
ceph_assert(p->first == end - p->second.second);
end = p->first;
size_t len = p->second.first.length();
ceph_assert(curr >= p->second.second);
curr -= p->second.second;
if (len < p->second.second) {
if (len)
p->second.first.begin().copy(len, buffer + curr);
// FIPS zeroization audit 20191117: this memset is not security related.
memset(buffer + curr + len, 0, p->second.second - len);
} else {
p->second.first.begin().copy(len, buffer + curr);
}
++p;
}
partial.clear();
ceph_assert(curr == 0);
}
uint64_t Striper::StripedReadResult::assemble_result(
CephContext *cct, std::map<uint64_t, uint64_t> *extent_map,
bufferlist *bl)
{
ldout(cct, 10) << "assemble_result(" << this << ")" << dendl;
for (auto& p : partial) {
uint64_t off = p.first;
uint64_t len = p.second.first.length();
if (len > 0) {
(*extent_map)[off] = len;
bl->claim_append(p.second.first);
}
}
partial.clear();
return total_intended_len;
}
| 18,104 | 32.652416 | 95 | cc |
null | ceph-main/src/osdc/Striper.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_STRIPER_H
#define CEPH_STRIPER_H
#include "include/common_fwd.h"
#include "include/types.h"
#include "osd/osd_types.h"
#include "osdc/StriperTypes.h"
//namespace ceph {
class Striper {
public:
static void file_to_extents(
CephContext *cct, const file_layout_t *layout, uint64_t offset,
uint64_t len, uint64_t trunc_size, uint64_t buffer_offset,
striper::LightweightObjectExtents* object_extents);
/*
* std::map (ino, layout, offset, len) to a (list of) ObjectExtents (byte
* ranges in objects on (primary) osds)
*/
static void file_to_extents(CephContext *cct, const char *object_format,
const file_layout_t *layout,
uint64_t offset, uint64_t len,
uint64_t trunc_size,
std::map<object_t, std::vector<ObjectExtent> >& extents,
uint64_t buffer_offset=0);
static void file_to_extents(CephContext *cct, const char *object_format,
const file_layout_t *layout,
uint64_t offset, uint64_t len,
uint64_t trunc_size,
std::vector<ObjectExtent>& extents,
uint64_t buffer_offset=0);
static void file_to_extents(CephContext *cct, inodeno_t ino,
const file_layout_t *layout,
uint64_t offset, uint64_t len,
uint64_t trunc_size,
std::vector<ObjectExtent>& extents) {
// generate prefix/format
char buf[32];
snprintf(buf, sizeof(buf), "%llx.%%08llx", (long long unsigned)ino);
file_to_extents(cct, buf, layout, offset, len, trunc_size, extents);
}
/**
* reverse std::map an object extent to file extents
*/
static void extent_to_file(CephContext *cct, file_layout_t *layout,
uint64_t objectno, uint64_t off, uint64_t len,
std::vector<std::pair<uint64_t, uint64_t> >& extents);
static uint64_t object_truncate_size(
CephContext *cct, const file_layout_t *layout,
uint64_t objectno, uint64_t trunc_size);
static uint64_t get_num_objects(const file_layout_t& layout,
uint64_t size);
static uint64_t get_file_offset(CephContext *cct,
const file_layout_t *layout, uint64_t objectno, uint64_t off);
/*
* helper to assemble a striped result
*/
class StripedReadResult {
// offset -> (data, intended length)
std::map<uint64_t, std::pair<ceph::buffer::list, uint64_t> > partial;
uint64_t total_intended_len = 0; //sum of partial.second.second
public:
void add_partial_result(
CephContext *cct, ceph::buffer::list& bl,
const std::vector<std::pair<uint64_t,uint64_t> >& buffer_extents);
void add_partial_result(
CephContext *cct, ceph::buffer::list&& bl,
const striper::LightweightBufferExtents& buffer_extents);
/**
* add sparse read into results
*
* @param bl buffer
* @param bl_map std::map of which logical source extents this covers
* @param bl_off logical buffer offset (e.g., first bl_map key
* if the buffer is not sparse)
* @param buffer_extents output buffer extents the data maps to
*/
void add_partial_sparse_result(
CephContext *cct, ceph::buffer::list& bl,
const std::map<uint64_t, uint64_t>& bl_map, uint64_t bl_off,
const std::vector<std::pair<uint64_t,uint64_t> >& buffer_extents);
void add_partial_sparse_result(
CephContext *cct, ceph::buffer::list&& bl,
const std::vector<std::pair<uint64_t, uint64_t>>& bl_map,
uint64_t bl_off,
const striper::LightweightBufferExtents& buffer_extents);
void assemble_result(CephContext *cct, ceph::buffer::list& bl,
bool zero_tail);
/**
* @buffer copy read data into buffer
* @len the length of buffer
*/
void assemble_result(CephContext *cct, char *buffer, size_t len);
uint64_t assemble_result(CephContext *cct,
std::map<uint64_t, uint64_t> *extent_map,
ceph::buffer::list *bl);
};
};
//};
#endif
| 4,429 | 32.308271 | 77 | h |
null | ceph-main/src/osdc/StriperTypes.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_OSDC_STRIPER_TYPES_H
#define CEPH_OSDC_STRIPER_TYPES_H
#include "include/types.h"
#include <boost/container/small_vector.hpp>
#include <ios>
#include <utility>
namespace striper {
// off -> len extents in (striped) buffer being mapped
typedef std::pair<uint64_t,uint64_t> BufferExtent;
typedef boost::container::small_vector<
BufferExtent, 4> LightweightBufferExtents;
struct LightweightObjectExtent {
LightweightObjectExtent() = delete;
LightweightObjectExtent(uint64_t object_no, uint64_t offset,
uint64_t length, uint64_t truncate_size)
: object_no(object_no), offset(offset), length(length),
truncate_size(truncate_size) {
}
uint64_t object_no;
uint64_t offset; // in-object
uint64_t length; // in-object
uint64_t truncate_size; // in-object
LightweightBufferExtents buffer_extents;
};
typedef boost::container::small_vector<
LightweightObjectExtent, 4> LightweightObjectExtents;
inline std::ostream& operator<<(std::ostream& os,
const LightweightObjectExtent& ex) {
return os << "extent("
<< ex.object_no << " "
<< ex.offset << "~" << ex.length
<< " -> " << ex.buffer_extents
<< ")";
}
} // namespace striper
#endif // CEPH_OSDC_STRIPER_TYPES_H
| 1,427 | 28.142857 | 70 | h |
null | ceph-main/src/osdc/WritebackHandler.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_OSDC_WRITEBACKHANDLER_H
#define CEPH_OSDC_WRITEBACKHANDLER_H
#include "include/Context.h"
#include "include/types.h"
#include "common/zipkin_trace.h"
#include "osd/osd_types.h"
class WritebackHandler {
public:
WritebackHandler() {}
virtual ~WritebackHandler() {}
virtual void read(const object_t& oid, uint64_t object_no,
const object_locator_t& oloc, uint64_t off, uint64_t len,
snapid_t snapid, ceph::buffer::list *pbl, uint64_t trunc_size,
__u32 trunc_seq, int op_flags,
const ZTracer::Trace &parent_trace, Context *onfinish) = 0;
/**
* check if a given extent read result may change due to a write
*
* Check if the content we see at the given read offset may change
* due to a write to this object.
*
* @param oid object
* @param read_off read offset
* @param read_len read length
* @param snapid read snapid
*/
virtual bool may_copy_on_write(const object_t& oid, uint64_t read_off,
uint64_t read_len, snapid_t snapid) = 0;
virtual ceph_tid_t write(const object_t& oid, const object_locator_t& oloc,
uint64_t off, uint64_t len,
const SnapContext& snapc,
const ceph::buffer::list &bl, ceph::real_time mtime,
uint64_t trunc_size, __u32 trunc_seq,
ceph_tid_t journal_tid,
const ZTracer::Trace &parent_trace,
Context *oncommit) = 0;
virtual void overwrite_extent(const object_t& oid, uint64_t off, uint64_t len,
ceph_tid_t original_journal_tid,
ceph_tid_t new_journal_tid) {}
virtual bool can_scattered_write() { return false; }
virtual ceph_tid_t write(const object_t& oid, const object_locator_t& oloc,
std::vector<std::pair<uint64_t, ceph::buffer::list> >& io_vec,
const SnapContext& snapc, ceph::real_time mtime,
uint64_t trunc_size, __u32 trunc_seq,
Context *oncommit) {
return 0;
}
};
#endif
| 2,106 | 35.327586 | 80 | h |
null | ceph-main/src/osdc/error_code.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2019 Red Hat <[email protected]>
* Author: Adam C. Emerson <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include <string>
#include "common/error_code.h"
#include "error_code.h"
namespace bs = boost::system;
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wnon-virtual-dtor"
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wnon-virtual-dtor"
class osdc_error_category : public ceph::converting_category {
public:
osdc_error_category(){}
const char* name() const noexcept override;
const char* message(int ev, char*, std::size_t) const noexcept override;
std::string message(int ev) const override;
bs::error_condition default_error_condition(int ev) const noexcept
override;
bool equivalent(int ev, const bs::error_condition& c) const
noexcept override;
using ceph::converting_category::equivalent;
int from_code(int ev) const noexcept override;
};
#pragma GCC diagnostic pop
#pragma clang diagnostic pop
const char* osdc_error_category::name() const noexcept {
return "osdc";
}
const char* osdc_error_category::message(int ev, char*,
std::size_t) const noexcept {
if (ev == 0)
return "No error";
switch (static_cast<osdc_errc>(ev)) {
case osdc_errc::pool_dne:
return "Pool does not exist";
case osdc_errc::pool_exists:
return "Pool already exists";
case osdc_errc::precondition_violated:
return "Precondition for operation not satisfied";
case osdc_errc::not_supported:
return "Operation not supported";
case osdc_errc::snapshot_exists:
return "Snapshot already exists";
case osdc_errc::snapshot_dne:
return "Snapshot does not exist";
case osdc_errc::timed_out:
return "Operation timed out";
case osdc_errc::pool_eio:
return "Pool EIO flag set";
}
return "Unknown error";
}
std::string osdc_error_category::message(int ev) const {
return message(ev, nullptr, 0);
}
bs::error_condition
osdc_error_category::default_error_condition(int ev) const noexcept {
switch (static_cast<osdc_errc>(ev)) {
case osdc_errc::pool_dne:
return ceph::errc::does_not_exist;
case osdc_errc::pool_exists:
return ceph::errc::exists;
case osdc_errc::precondition_violated:
return bs::errc::invalid_argument;
case osdc_errc::not_supported:
return bs::errc::operation_not_supported;
case osdc_errc::snapshot_exists:
return ceph::errc::exists;
case osdc_errc::snapshot_dne:
return ceph::errc::does_not_exist;
case osdc_errc::timed_out:
return bs::errc::timed_out;
case osdc_errc::pool_eio:
return bs::errc::io_error;
}
return { ev, *this };
}
bool osdc_error_category::equivalent(int ev,
const bs::error_condition& c) const noexcept {
if (static_cast<osdc_errc>(ev) == osdc_errc::pool_dne) {
if (c == bs::errc::no_such_file_or_directory) {
return true;
}
if (c == ceph::errc::not_in_map) {
return true;
}
}
if (static_cast<osdc_errc>(ev) == osdc_errc::pool_exists) {
if (c == bs::errc::file_exists) {
return true;
}
}
if (static_cast<osdc_errc>(ev) == osdc_errc::snapshot_exists) {
if (c == bs::errc::file_exists) {
return true;
}
}
if (static_cast<osdc_errc>(ev) == osdc_errc::snapshot_dne) {
if (c == bs::errc::no_such_file_or_directory) {
return true;
}
if (c == ceph::errc::not_in_map) {
return true;
}
}
return default_error_condition(ev) == c;
}
int osdc_error_category::from_code(int ev) const noexcept {
switch (static_cast<osdc_errc>(ev)) {
case osdc_errc::pool_dne:
return -ENOENT;
case osdc_errc::pool_exists:
return -EEXIST;
case osdc_errc::precondition_violated:
return -EINVAL;
case osdc_errc::not_supported:
return -EOPNOTSUPP;
case osdc_errc::snapshot_exists:
return -EEXIST;
case osdc_errc::snapshot_dne:
return -ENOENT;
case osdc_errc::timed_out:
return -ETIMEDOUT;
case osdc_errc::pool_eio:
return -EIO;
}
return -EDOM;
}
const bs::error_category& osdc_category() noexcept {
static const osdc_error_category c;
return c;
}
| 4,476 | 25.808383 | 83 | cc |
null | ceph-main/src/osdc/error_code.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2019 Red Hat <[email protected]>
* Author: Adam C. Emerson <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#pragma once
#include <boost/system/error_code.hpp>
#include "include/rados.h"
const boost::system::error_category& osdc_category() noexcept;
enum class osdc_errc {
pool_dne = 1,
pool_exists,
// Come the revolution, we'll just kill your program. Maybe.
precondition_violated,
not_supported,
snapshot_exists,
snapshot_dne,
timed_out,
pool_eio
};
namespace boost::system {
template<>
struct is_error_code_enum<::osdc_errc> {
static const bool value = true;
};
template<>
struct is_error_condition_enum<::osdc_errc> {
static const bool value = false;
};
}
// implicit conversion:
inline boost::system::error_code make_error_code(osdc_errc e) noexcept {
return { static_cast<int>(e), osdc_category() };
}
// explicit conversion:
inline boost::system::error_condition make_error_condition(osdc_errc e) noexcept {
return { static_cast<int>(e), osdc_category() };
}
| 1,363 | 22.929825 | 82 | h |
null | ceph-main/src/perfglue/cpu_profiler.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2011 New Dream Network
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include "acconfig.h"
#include <gperftools/profiler.h>
#include "common/LogClient.h"
#include "perfglue/cpu_profiler.h"
void cpu_profiler_handle_command(const std::vector<std::string> &cmd,
std::ostream& out)
{
if (cmd.size() == 1 && cmd[0] == "status") {
ProfilerState st;
ProfilerGetCurrentState(&st);
out << "cpu_profiler " << (st.enabled ? "enabled":"not enabled")
<< " start_time " << st.start_time
<< " profile_name " << st.profile_name
<< " samples " << st.samples_gathered;
}
else if (cmd.size() == 1 && cmd[0] == "flush") {
ProfilerFlush();
out << "cpu_profiler: flushed";
}
else {
out << "cpu_profiler: unrecognized command " << cmd
<< "; expected one of status, flush.";
}
}
| 1,143 | 26.238095 | 70 | cc |
null | ceph-main/src/perfglue/cpu_profiler.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2011 New Dream Network
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_PERFGLUE_CPU_PROFILER
/*
* Ceph glue for the Google Perftools CPU profiler
*/
#include <string>
#include <vector>
void cpu_profiler_handle_command(const std::vector<std::string> &cmd,
std::ostream& out);
#endif
| 642 | 23.730769 | 70 | h |
null | ceph-main/src/perfglue/disabled_heap_profiler.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2011 New Dream Network/Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include "heap_profiler.h"
bool ceph_using_tcmalloc() { return false; }
void ceph_heap_profiler_init() { return; }
void ceph_heap_profiler_stats(char *buf, int length) { return; }
void ceph_heap_release_free_memory() { return; }
double ceph_heap_get_release_rate() { return 0; }
void ceph_heap_set_release_rate(double value) { return; }
bool ceph_heap_profiler_running() { return false; }
void ceph_heap_profiler_start() { return; }
void ceph_heap_profiler_stop() { return; }
void ceph_heap_profiler_dump(const char *reason) { return; }
bool ceph_heap_get_numeric_property(const char *property, size_t *value)
{
return false;
}
bool ceph_heap_set_numeric_property(const char *property, size_t value)
{
return false;
}
void ceph_heap_profiler_handle_command(const std::vector<std::string>& cmd,
std::ostream& out) { return; }
| 1,322 | 26.5625 | 75 | cc |
null | ceph-main/src/perfglue/disabled_stubs.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2011 New Dream Network
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include "common/LogClient.h"
#include "perfglue/cpu_profiler.h"
#include <vector>
#include <string>
void cpu_profiler_handle_command(const std::vector<std::string> &cmd,
std::ostream& out)
{
out << "cpu_profiler support not linked in";
}
| 657 | 24.307692 | 70 | cc |
null | ceph-main/src/perfglue/heap_profiler.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2011 New Dream Network/Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include "acconfig.h"
// Use the newer gperftools header locations if available.
// If not, fall back to the old (gperftools < 2.0) locations.
#include <gperftools/heap-profiler.h>
#include <gperftools/malloc_extension.h>
#include "heap_profiler.h"
#include "common/environment.h"
#include "common/LogClient.h"
#include "global/global_context.h"
#include "common/debug.h"
#define dout_context g_ceph_context
bool ceph_using_tcmalloc()
{
return true;
}
void ceph_heap_profiler_init()
{
// Two other interesting environment variables to set are:
// HEAP_PROFILE_ALLOCATION_INTERVAL, HEAP_PROFILE_INUSE_INTERVAL
if (get_env_bool("CEPH_HEAP_PROFILER_INIT")) {
ceph_heap_profiler_start();
}
}
void ceph_heap_profiler_stats(char *buf, int length)
{
MallocExtension::instance()->GetStats(buf, length);
}
void ceph_heap_release_free_memory()
{
MallocExtension::instance()->ReleaseFreeMemory();
}
double ceph_heap_get_release_rate()
{
return MallocExtension::instance()->GetMemoryReleaseRate();
}
void ceph_heap_set_release_rate(double val)
{
MallocExtension::instance()->SetMemoryReleaseRate(val);
}
bool ceph_heap_get_numeric_property(
const char *property, size_t *value)
{
return MallocExtension::instance()->GetNumericProperty(
property,
value);
}
bool ceph_heap_set_numeric_property(
const char *property, size_t value)
{
return MallocExtension::instance()->SetNumericProperty(
property,
value);
}
bool ceph_heap_profiler_running()
{
#ifdef HAVE_LIBTCMALLOC
return IsHeapProfilerRunning();
#else
return false;
#endif
}
static void get_profile_name(char *profile_name, int profile_name_len)
{
#if __GNUC__ && __GNUC__ >= 8
#pragma GCC diagnostic push
// Don't care, it doesn't matter, and we can't do anything about it.
#pragma GCC diagnostic ignored "-Wformat-truncation"
#endif
char path[PATH_MAX];
snprintf(path, sizeof(path), "%s", g_conf()->log_file.c_str());
char *last_slash = rindex(path, '/');
if (last_slash == NULL) {
snprintf(profile_name, profile_name_len, "./%s.profile",
g_conf()->name.to_cstr());
}
else {
last_slash[1] = '\0';
snprintf(profile_name, profile_name_len, "%s/%s.profile",
path, g_conf()->name.to_cstr());
}
#if __GNUC__ && __GNUC__ >= 8
#pragma GCC diagnostic pop
#endif
}
void ceph_heap_profiler_start()
{
#ifdef HAVE_LIBTCMALLOC
char profile_name[PATH_MAX];
get_profile_name(profile_name, sizeof(profile_name));
generic_dout(0) << "turning on heap profiler with prefix "
<< profile_name << dendl;
HeapProfilerStart(profile_name);
#endif
}
void ceph_heap_profiler_stop()
{
#ifdef HAVE_LIBTCMALLOC
HeapProfilerStop();
#endif
}
void ceph_heap_profiler_dump(const char *reason)
{
#ifdef HAVE_LIBTCMALLOC
HeapProfilerDump(reason);
#endif
}
#define HEAP_PROFILER_STATS_SIZE 2048
void ceph_heap_profiler_handle_command(const std::vector<std::string>& cmd,
std::ostream& out)
{
#ifdef HAVE_LIBTCMALLOC
if (cmd.size() == 1 && cmd[0] == "dump") {
if (!ceph_heap_profiler_running()) {
out << "heap profiler not running; can't dump";
return;
}
char heap_stats[HEAP_PROFILER_STATS_SIZE];
ceph_heap_profiler_stats(heap_stats, sizeof(heap_stats));
out << g_conf()->name << " dumping heap profile now.\n"
<< heap_stats;
ceph_heap_profiler_dump("admin request");
} else if (cmd.size() == 1 && cmd[0] == "start_profiler") {
ceph_heap_profiler_start();
out << g_conf()->name << " started profiler";
} else if (cmd.size() == 1 && cmd[0] == "stop_profiler") {
ceph_heap_profiler_stop();
out << g_conf()->name << " stopped profiler";
} else if (cmd.size() == 1 && cmd[0] == "release") {
ceph_heap_release_free_memory();
out << g_conf()->name << " releasing free RAM back to system.";
} else if (cmd.size() == 1 && cmd[0] == "get_release_rate") {
out << g_conf()->name << " release rate: "
<< std::setprecision(4) << ceph_heap_get_release_rate() << "\n";
} else if (cmd.size() == 2 && cmd[0] == "set_release_rate") {
try {
double val = std::stod(cmd[1]);
ceph_heap_set_release_rate(val);
out << g_conf()->name << " release rate changed to: "
<< std::setprecision(4) << ceph_heap_get_release_rate() << "\n";
} catch (...) {
out << g_conf()->name << " *** need an numerical value. ";
}
} else
#endif
if (cmd.size() == 1 && cmd[0] == "stats") {
char heap_stats[HEAP_PROFILER_STATS_SIZE];
ceph_heap_profiler_stats(heap_stats, sizeof(heap_stats));
out << g_conf()->name << " tcmalloc heap stats:"
<< heap_stats;
} else {
out << "unknown command " << cmd;
}
}
| 5,121 | 26.100529 | 75 | cc |
null | ceph-main/src/perfglue/heap_profiler.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2011 New Dream Network/Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*/
#ifndef HEAP_PROFILER_H_
#define HEAP_PROFILER_H_
#include <string>
#include <vector>
#include "common/config.h"
class LogClient;
/*
* Ceph glue for the Google perftools heap profiler, included
* as part of tcmalloc. This replaces ugly function pointers
* and #ifdef hacks!
*/
bool ceph_using_tcmalloc();
/*
* Configure the heap profiler
*/
void ceph_heap_profiler_init();
void ceph_heap_profiler_stats(char *buf, int length);
void ceph_heap_release_free_memory();
double ceph_heap_get_release_rate();
void ceph_heap_get_release_rate(double value);
bool ceph_heap_profiler_running();
void ceph_heap_profiler_start();
void ceph_heap_profiler_stop();
void ceph_heap_profiler_dump(const char *reason);
bool ceph_heap_get_numeric_property(const char *property, size_t *value);
bool ceph_heap_set_numeric_property(const char *property, size_t value);
void ceph_heap_profiler_handle_command(const std::vector<std::string> &cmd,
std::ostream& out);
#endif /* HEAP_PROFILER_H_ */
| 1,468 | 24.327586 | 75 | h |
null | ceph-main/src/pybind/mgr/dashboard/run-backend-api-request.sh | #!/bin/bash
CURR_DIR=`pwd`
[ -z "$BUILD_DIR" ] && BUILD_DIR=build
cd ../../../../${BUILD_DIR}
API_URL=`./bin/ceph mgr services 2>/dev/null | jq .dashboard | sed -e 's/"//g' -e 's!/$!!g'`
if [ "$API_URL" = "null" ]; then
echo "Couldn't retrieve API URL, exiting..." >&2
exit 1
fi
cd $CURR_DIR
TOKEN=`curl --insecure -s -H "Content-Type: application/json" -X POST \
-d '{"username":"admin","password":"admin"}' $API_URL/api/auth \
| jq .token | sed -e 's/"//g'`
echo "METHOD: $1"
echo "URL: ${API_URL}${2}"
echo "DATA: $3"
echo ""
curl --insecure -s -b /tmp/cd-cookie.txt -H "Authorization: Bearer $TOKEN " \
-H "Content-Type: application/json" -X $1 -d "$3" ${API_URL}$2 | jq
| 700 | 27.04 | 92 | sh |
null | ceph-main/src/pybind/mgr/dashboard/run-backend-api-tests.sh | #!/usr/bin/env bash
# SHELL_TRACE=true ./run-backend-api-tests.sh to enable debugging
[ -v SHELL_TRACE ] && set -x
# cross shell: Are we sourced?
# Source: https://stackoverflow.com/a/28776166/3185053
([[ -n $ZSH_EVAL_CONTEXT && $ZSH_EVAL_CONTEXT =~ :file$ ]] ||
[[ -n $KSH_VERSION && $(cd "$(dirname -- "$0")" &&
printf '%s' "${PWD%/}/")$(basename -- "$0") != "${.sh.file}" ]] ||
[[ -n $BASH_VERSION ]] && (return 0 2>/dev/null)) && sourced=1 || sourced=0
if [ "$sourced" -eq 0 ] ; then
set -eo pipefail
fi
if [[ "$1" = "-h" || "$1" = "--help" ]]; then
echo "Usage (run from ./):"
echo -e "\t./run-backend-api-tests.sh"
echo -e "\t./run-backend-api-tests.sh [tests]..."
echo
echo "Example:"
echo -e "\t./run-backend-api-tests.sh tasks.mgr.dashboard.test_pool.DashboardTest"
echo
echo "Or source this script. This allows to re-run tests faster:"
echo -e "\tsource run-backend-api-tests.sh"
echo -e "\trun_teuthology_tests [tests]..."
echo -e "\tcleanup_teuthology"
echo
exit 0
fi
get_cmake_variable() {
local variable=$1
grep "$variable" CMakeCache.txt | cut -d "=" -f 2
}
[ -z "$BUILD_DIR" ] && BUILD_DIR=build
CURR_DIR=`pwd`
LOCAL_BUILD_DIR=$(cd "$CURR_DIR/../../../../$BUILD_DIR"; pwd)
setup_teuthology() {
TEMP_DIR=`mktemp -d`
cd $TEMP_DIR
${TEUTHOLOGY_PYTHON_BIN:-/usr/bin/python3} -m venv venv
source venv/bin/activate
pip install -U pip 'setuptools>=12,<60'
pip install "git+https://github.com/ceph/teuthology@9e4bf63#egg=teuthology[test]"
pushd $CURR_DIR
pip install -r requirements.txt -c constraints.txt
popd
deactivate
}
setup_coverage() {
# In CI environment we cannot install coverage in system, so we install it in a dedicated venv
# so only coverage is available when adding this path.
cd $TEMP_DIR
/usr/bin/python3 -m venv coverage-venv
source coverage-venv/bin/activate
cd $CURR_DIR
pip install coverage==4.5.2
COVERAGE_PATH=$(python -c "import sysconfig; print(sysconfig.get_paths()['platlib'])")
deactivate
}
display_log() {
local daemon=$1
shift
local lines=$1
shift
local log_files=$(find "$CEPH_OUT_DIR" -iname "${daemon}.*.log" | tr '\n' ' ')
for log_file in ${log_files[@]}; do
printf "\n\nDisplaying last ${lines} lines of: ${log_file}\n\n"
tail -n ${lines} $log_file
printf "\n\nEnd of: ${log_file}\n\n"
done
printf "\n\nTEST FAILED.\n\n"
}
on_tests_error() {
local ret=$?
if [[ -n "$JENKINS_HOME" && -z "$ON_TESTS_ERROR_RUN" ]]; then
CEPH_OUT_DIR=${CEPH_OUT_DIR:-"$LOCAL_BUILD_DIR"/out}
display_log "mgr" 1500
display_log "osd" 1000
ON_TESTS_ERROR_RUN=1
fi
return $ret
}
run_teuthology_tests() {
trap on_tests_error ERR
cd "$LOCAL_BUILD_DIR"
find ../src/pybind/mgr/dashboard/ -name '*.pyc' -exec rm -f {} \;
OPTIONS=''
TEST_CASES=''
if [[ "$@" == '' || "$@" == '--create-cluster-only' ]]; then
TEST_CASES=`for i in \`ls $LOCAL_BUILD_DIR/../qa/tasks/mgr/dashboard/test_*\`; do F=$(basename $i); M="${F%.*}"; echo -n " tasks.mgr.dashboard.$M"; done`
# Mgr selftest module tests have to be run at the end as they stress the mgr daemon.
TEST_CASES="tasks.mgr.test_dashboard $TEST_CASES tasks.mgr.test_module_selftest"
if [[ "$@" == '--create-cluster-only' ]]; then
OPTIONS="$@"
fi
else
for t in "$@"; do
TEST_CASES="$TEST_CASES $t"
done
fi
export PATH=$LOCAL_BUILD_DIR/bin:$PATH
source $TEMP_DIR/venv/bin/activate # Run after setting PATH as it does the last PATH export.
export LD_LIBRARY_PATH=$LOCAL_BUILD_DIR/lib/cython_modules/lib.3/:$LOCAL_BUILD_DIR/lib
local source_dir=$(dirname "$LOCAL_BUILD_DIR")
local pybind_dir=$source_dir/src/pybind
local python_common_dir=$source_dir/src/python-common
# In CI environment we set python paths inside build (where you find the required frontend build: "dist" dir).
if [[ -n "$JENKINS_HOME" ]]; then
pybind_dir+=":$LOCAL_BUILD_DIR/src/pybind"
fi
export PYTHONPATH=$source_dir/qa:$LOCAL_BUILD_DIR/lib/cython_modules/lib.3/:$pybind_dir:$python_common_dir:${COVERAGE_PATH}
export DASHBOARD_SSL=1
export NFS=0
export RGW=1
export COVERAGE_ENABLED=true
export COVERAGE_FILE=.coverage.mgr.dashboard
export CEPH_OUT_CLIENT_DIR=${LOCAL_BUILD_DIR}/out/client
find . -iname "*${COVERAGE_FILE}*" -type f -delete
python ../qa/tasks/vstart_runner.py --ignore-missing-binaries --no-verbose $OPTIONS $(echo $TEST_CASES) ||
on_tests_error
deactivate
cd $CURR_DIR
}
cleanup_teuthology() {
cd "$LOCAL_BUILD_DIR"
killall ceph-mgr
sleep 10
if [[ "$COVERAGE_ENABLED" == 'true' ]]; then
source $TEMP_DIR/coverage-venv/bin/activate
(coverage combine && coverage report) || true
deactivate
fi
../src/stop.sh
sleep 5
cd $CURR_DIR
rm -rf $TEMP_DIR
unset TEMP_DIR
unset CURR_DIR
unset LOCAL_BUILD_DIR
unset COVERAGE_PATH
unset setup_teuthology
unset setup_coverage
unset on_tests_error
unset run_teuthology_tests
unset cleanup_teuthology
}
export LC_ALL=en_US.UTF-8
setup_teuthology
setup_coverage
run_teuthology_tests --create-cluster-only
# End sourced section. Do not exit shell when the script has been sourced.
if [ "$sourced" -eq 1 ] ; then
return
fi
run_teuthology_tests "$@"
cleanup_teuthology
| 5,466 | 28.874317 | 159 | sh |
null | ceph-main/src/pybind/mgr/dashboard/run-backend-rook-api-request.sh | #!/bin/bash
#
# Query k8s to determine where the mgr is running and how to reach the
# dashboard from the local machine. This assumes that the dashboard is being
# exposed via a nodePort service
CURR_DIR=`pwd`
K8S_NAMESPACE='rook-ceph'
HOST=$(kubectl get pods -n $K8S_NAMESPACE -l "app=rook-ceph-mgr" -o json | jq .items[0].spec.nodeName | sed s/\"//g)
if [ "$HOST" = "minikube" ]; then
HOST=$(minikube ip)
fi
PORT=$(kubectl get service -n $K8S_NAMESPACE rook-ceph-mgr-dashboard -o yaml | grep nodePort: | awk '{print $2}')
API_URL="https://${HOST}:${PORT}"
#
# Rook automagically sets up an "admin" account with a random PW and stuffs
# that into a k8s secret. This fetches it.
#
PASSWD=$(kubectl -n $K8S_NAMESPACE get secret rook-ceph-dashboard-password -o yaml | grep "password:" | awk '{print $2}' | base64 --decode)
if [ "$API_URL" = "null" ]; then
echo "Couldn't retrieve API URL, exiting..." >&2
exit 1
fi
cd $CURR_DIR
TOKEN=`curl --insecure -s -H "Content-Type: application/json" -X POST \
-d "{\"username\":\"admin\",\"password\":\"${PASSWD}\"}" $API_URL/api/auth \
| jq .token | sed -e 's/"//g'`
echo "METHOD: $1"
echo "URL: ${API_URL}${2}"
echo "DATA: $3"
echo ""
curl --insecure -s -b /tmp/cd-cookie.txt -H "Authorization: Bearer $TOKEN " \
-H "Content-Type: application/json" -X $1 -d "$3" ${API_URL}$2 | jq
| 1,350 | 31.95122 | 139 | sh |
null | ceph-main/src/pybind/mgr/dashboard/run-frontend-e2e-tests.sh | #!/usr/bin/env bash
set -e
CLUSTERS=("1" "2")
ceph() {
${FULL_PATH_BUILD_DIR}/../src/mrun 1 ceph $@
}
ceph2() {
${FULL_PATH_BUILD_DIR}/../src/mrun 2 ceph $@
}
ceph_all() {
ceph $@
ceph2 $@
}
start_ceph() {
cd $FULL_PATH_BUILD_DIR
for cluster in ${CLUSTERS[@]}; do
export CEPH_OUT_CLIENT_DIR=${FULL_PATH_BUILD_DIR}/run/${cluster}/out/client
MGR=2 RGW=1 ../src/mstart.sh $cluster -n -d
done
set -x
# Create an Object Gateway User
ceph_all dashboard set-rgw-credentials
# Set SSL verify to False
ceph_all dashboard set-rgw-api-ssl-verify False
CYPRESS_BASE_URL=$(ceph mgr services | jq -r .dashboard)
CYPRESS_CEPH2_URL=$(ceph2 mgr services | jq -r .dashboard)
# start rbd-mirror daemon in the cluster
KEY=$(ceph auth get client.admin --format=json | jq -r .[0].key)
MON_CLUSTER_1=$(grep "mon host" ${FULL_PATH_BUILD_DIR}/run/1/ceph.conf | awk '{print $4}')
${FULL_PATH_BUILD_DIR}/bin/rbd-mirror --mon_host $MON_CLUSTER_1 --key $KEY -c ${FULL_PATH_BUILD_DIR}/run/1/ceph.conf &
set +x
}
stop() {
if [ "$REMOTE" == "false" ]; then
cd ${FULL_PATH_BUILD_DIR}
for cluster in ${CLUSTERS[@]}; do
../src/mstop.sh $cluster
done
fi
exit $1
}
check_device_available() {
failed=false
if [ "$DEVICE" == "docker" ]; then
[ -x "$(command -v docker)" ] || failed=true
else
cd $DASH_DIR/frontend
npx cypress verify
case "$DEVICE" in
chrome)
[ -x "$(command -v chrome)" ] || [ -x "$(command -v google-chrome)" ] ||
[ -x "$(command -v google-chrome-stable)" ] || failed=true
;;
chromium)
[ -x "$(command -v chromium)" ] || [ -x "$(command -v chromium-browser)" ] || failed=true
;;
esac
fi
if [ "$failed" = "true" ]; then
echo "ERROR: $DEVICE not found. You need to install $DEVICE or \
use a different device. Supported devices: chrome (default), chromium, electron or docker."
stop 1
fi
}
: ${CYPRESS_BASE_URL:=''}
: ${CYPRESS_CEPH2_URL:=''}
: ${CYPRESS_LOGIN_PWD:=''}
: ${CYPRESS_LOGIN_USER:=''}
: ${DEVICE:="chrome"}
: ${NO_COLOR:=1}
: ${CYPRESS_ARGS:=''}
: ${REMOTE:='false'}
while getopts 'd:p:r:u:' flag; do
case "${flag}" in
d) DEVICE=$OPTARG;;
p) CYPRESS_LOGIN_PWD=$OPTARG;;
r) REMOTE='true'
CYPRESS_BASE_URL=$OPTARG;;
u) CYPRESS_LOGIN_USER=$OPTARG;;
esac
done
DASH_DIR=`pwd`
[ -z "$BUILD_DIR" ] && BUILD_DIR=build
cd ../../../../${BUILD_DIR}
FULL_PATH_BUILD_DIR=`pwd`
[[ "$(command -v npm)" == '' ]] && . ${FULL_PATH_BUILD_DIR}/src/pybind/mgr/dashboard/frontend/node-env/bin/activate
: ${CYPRESS_CACHE_FOLDER:="${FULL_PATH_BUILD_DIR}/src/pybind/mgr/dashboard/cypress"}
export CYPRESS_BASE_URL CYPRESS_CACHE_FOLDER CYPRESS_LOGIN_USER CYPRESS_LOGIN_PWD NO_COLOR CYPRESS_CEPH2_URL
check_device_available
if [ "$CYPRESS_BASE_URL" == "" ]; then
start_ceph
fi
cd $DASH_DIR/frontend
# Remove existing XML results
rm -f cypress/reports/results-*.xml || true
case "$DEVICE" in
docker)
failed=0
CYPRESS_VERSION=$(cat package.json | grep '"cypress"' | grep -o "[0-9]\.[0-9]\.[0-9]")
docker run \
-v $(pwd):/e2e \
-w /e2e \
--env CYPRESS_BASE_URL \
--env CYPRESS_LOGIN_USER \
--env CYPRESS_LOGIN_PWD \
--env CYPRESS_CEPH2_URL \
--name=e2e \
--network=host \
cypress/included:${CYPRESS_VERSION} || failed=1
stop $failed
;;
*)
npx cypress run $CYPRESS_ARGS --browser $DEVICE --headless || stop 1
;;
esac
stop 0
| 3,748 | 24.503401 | 122 | sh |
null | ceph-main/src/pybind/mgr/dashboard/run-frontend-unittests.sh | #!/usr/bin/env bash
failed=false
SCRIPTPATH="$( cd "$(dirname "$0")" ; pwd -P )"
: ${CEPH_ROOT:=$SCRIPTPATH/../../../../}
cd $CEPH_ROOT/src/pybind/mgr/dashboard/frontend
[ -z "$BUILD_DIR" ] && BUILD_DIR=build
if [ `uname` != "FreeBSD" ]; then
. $CEPH_ROOT/${BUILD_DIR}/src/pybind/mgr/dashboard/frontend/node-env/bin/activate
fi
# Build
npm run build -- --configuration=production --progress=false || failed=true
# Unit Tests
npm run test:ci || failed=true
# Linting
npm run lint --silent
if [ $? -gt 0 ]; then
failed=true
echo -e "\nTry running 'npm run fix' to fix some linting errors. \
Some errors might need a manual fix."
fi
# I18N
npm run i18n:extract
if [ $? -gt 0 ]; then
failed=true
echo -e "\nTranslations extraction has failed."
else
i18n_lint=`awk '/<source> |<source>$| <\/source>/,/<\/context-group>/ {printf "%-4s ", NR; print}' src/locale/messages.xlf`
# Excluding the node_modules/ folder errors from the lint error
if [[ -n "$i18n_lint" && $i18n_lint != *"node_modules/"* ]]; then
echo -e "\nThe following source translations in 'messages.xlf' need to be \
fixed, please check the I18N suggestions on https://docs.ceph.com/en/latest/dev/developer_guide/dash-devel/#i18n:\n"
echo "${i18n_lint}"
failed=true
fi
fi
if [ `uname` != "FreeBSD" ]; then
deactivate
fi
if [ "$failed" = "true" ]; then
exit 1
fi
| 1,367 | 25.823529 | 125 | sh |
null | ceph-main/src/pybind/mgr/dashboard/ci/cephadm/bootstrap-cluster.sh | #!/usr/bin/env bash
set -x
export PATH=/root/bin:$PATH
mkdir /root/bin
export CEPHADM_IMAGE='quay.ceph.io/ceph-ci/ceph:main'
CEPHADM="/root/bin/cephadm"
/mnt/{{ ceph_dev_folder }}/src/cephadm/build.sh $CEPHADM
mkdir -p /etc/ceph
mon_ip=$(ifconfig eth0 | grep 'inet ' | awk '{ print $2}')
bootstrap_extra_options='--allow-fqdn-hostname --dashboard-password-noupdate'
# commenting the below lines. Uncomment it when any extra options are
# needed for the bootstrap.
# bootstrap_extra_options_not_expanded=''
# {% if expanded_cluster is not defined %}
# bootstrap_extra_options+=" ${bootstrap_extra_options_not_expanded}"
# {% endif %}
$CEPHADM bootstrap --mon-ip $mon_ip --initial-dashboard-password {{ admin_password }} --shared_ceph_folder /mnt/{{ ceph_dev_folder }} ${bootstrap_extra_options}
fsid=$(cat /etc/ceph/ceph.conf | grep fsid | awk '{ print $3}')
cephadm_shell="$CEPHADM shell --fsid ${fsid} -c /etc/ceph/ceph.conf -k /etc/ceph/ceph.client.admin.keyring"
{% for number in range(1, nodes) %}
ssh-copy-id -f -i /etc/ceph/ceph.pub -o StrictHostKeyChecking=no [email protected]{{ number }}
{% if expanded_cluster is defined %}
${cephadm_shell} ceph orch host add {{ prefix }}-node-0{{ number }}
{% endif %}
{% endfor %}
{% if expanded_cluster is defined %}
${cephadm_shell} ceph orch apply osd --all-available-devices
{% endif %}
| 1,366 | 33.175 | 160 | sh |
null | ceph-main/src/pybind/mgr/dashboard/ci/cephadm/run-cephadm-e2e-tests.sh | #!/usr/bin/env bash
set -ex
: ${CYPRESS_BASE_URL:=''}
: ${CYPRESS_LOGIN_USER:='admin'}
: ${CYPRESS_LOGIN_PWD:='password'}
: ${CYPRESS_ARGS:=''}
: ${DASHBOARD_PORT:='8443'}
get_vm_ip () {
local ip=$(kcli info vm "$1" -f ip -v | grep -Eo '[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}')
echo -n $ip
}
if [[ -n "${JENKINS_HOME}" || (-z "${CYPRESS_BASE_URL}" && -z "$(get_vm_ip ceph-node-00)") ]]; then
. "$(dirname $0)"/start-cluster.sh
CYPRESS_BASE_URL="https://$(get_vm_ip ceph-node-00):${DASHBOARD_PORT}"
fi
export CYPRESS_BASE_URL CYPRESS_LOGIN_USER CYPRESS_LOGIN_PWD
cypress_run () {
local specs="$1"
local timeout="$2"
local override_config="excludeSpecPattern=*.po.ts,retries=0,specPattern=${specs},chromeWebSecurity=false"
if [[ -n "$timeout" ]]; then
override_config="${override_config},defaultCommandTimeout=${timeout}"
fi
rm -f cypress/reports/results-*.xml || true
npx --no-install cypress run ${CYPRESS_ARGS} --browser chrome --headless --config "$override_config"
}
: ${CEPH_DEV_FOLDER:=${PWD}}
cd ${CEPH_DEV_FOLDER}/src/pybind/mgr/dashboard/frontend
# check if the prometheus daemon is running
# before starting the e2e tests
PROMETHEUS_RUNNING_COUNT=$(kcli ssh -u root ceph-node-00 'cephadm shell "ceph orch ls --service_name=prometheus --format=json"' | jq -r '.[] | .status.running')
while [[ $PROMETHEUS_RUNNING_COUNT -lt 1 ]]; do
PROMETHEUS_RUNNING_COUNT=$(kcli ssh -u root ceph-node-00 'cephadm shell "ceph orch ls --service_name=prometheus --format=json"' | jq -r '.[] | .status.running')
done
# grafana ip address is set to the fqdn by default.
# kcli is not working with that, so setting the IP manually.
kcli ssh -u root ceph-node-00 'cephadm shell "ceph dashboard set-alertmanager-api-host http://192.168.100.100:9093"'
kcli ssh -u root ceph-node-00 'cephadm shell "ceph dashboard set-prometheus-api-host http://192.168.100.100:9095"'
kcli ssh -u root ceph-node-00 'cephadm shell "ceph dashboard set-grafana-api-url https://192.168.100.100:3000"'
kcli ssh -u root ceph-node-00 'cephadm shell "ceph orch apply node-exporter --placement 'count:2'"'
kcli ssh -u root ceph-node-00 'cephadm shell "ceph config set mgr mgr/prometheus/exclude_perf_counters false"'
cypress_run ["cypress/e2e/orchestrator/workflow/*.feature","cypress/e2e/orchestrator/workflow/*-spec.ts"]
cypress_run "cypress/e2e/orchestrator/grafana/*.feature"
| 2,417 | 39.3 | 164 | sh |
null | ceph-main/src/pybind/mgr/dashboard/ci/cephadm/start-cluster.sh | #!/usr/bin/env bash
set -eEx
on_error() {
set +x
if [ "$1" != "0" ]; then
echo "ERROR $1 thrown on line $2"
echo
echo "Collecting info..."
echo
echo "Saving MGR logs:"
echo
mkdir -p ${CEPH_DEV_FOLDER}/logs
kcli ssh -u root -- ceph-node-00 'cephadm logs -n \$(cephadm ls | grep -Eo "mgr\.ceph[0-9a-z.-]+" | head -n 1) -- --no-tail --no-pager' > ${CEPH_DEV_FOLDER}/logs/mgr.cephadm.log
for vm_id in {0..3}
do
local vm="ceph-node-0${vm_id}"
echo "Saving journalctl from VM ${vm}:"
echo
kcli ssh -u root -- ${vm} 'journalctl --no-tail --no-pager -t cloud-init' > ${CEPH_DEV_FOLDER}/logs/journal.ceph-node-0${vm_id}.log || true
echo "Saving container logs:"
echo
kcli ssh -u root -- ${vm} 'podman logs --names --since 30s \$(podman ps -aq)' > ${CEPH_DEV_FOLDER}/logs/container.ceph-node-0${vm_id}.log || true
done
echo "TEST FAILED."
fi
}
trap 'on_error $? $LINENO' ERR
sed -i '/ceph-node-/d' $HOME/.ssh/known_hosts || true
: ${CEPH_DEV_FOLDER:=${PWD}}
EXTRA_PARAMS=''
DEV_MODE=''
# Check script args/options.
for arg in "$@"; do
shift
case "$arg" in
"--dev-mode") DEV_MODE='true'; EXTRA_PARAMS+=" -P dev_mode=${DEV_MODE}" ;;
"--expanded") EXTRA_PARAMS+=" -P expanded_cluster=true" ;;
esac
done
kcli delete plan -y ceph || true
# Build dashboard frontend (required to start the module).
cd ${CEPH_DEV_FOLDER}/src/pybind/mgr/dashboard/frontend
export NG_CLI_ANALYTICS=false
if [[ -n "$JENKINS_HOME" ]]; then
npm cache clean --force
fi
npm ci
FRONTEND_BUILD_OPTS='--configuration=production'
if [[ -n "${DEV_MODE}" ]]; then
FRONTEND_BUILD_OPTS+=' --deleteOutputPath=false --watch'
fi
npm run build ${FRONTEND_BUILD_OPTS} &
cd ${CEPH_DEV_FOLDER}
: ${VM_IMAGE:='fedora36'}
: ${VM_IMAGE_URL:='https://download.fedoraproject.org/pub/fedora/linux/releases/36/Cloud/x86_64/images/Fedora-Cloud-Base-36-1.5.x86_64.qcow2'}
kcli download image -p ceph-dashboard -u ${VM_IMAGE_URL} ${VM_IMAGE}
kcli delete plan -y ceph || true
kcli create plan -f src/pybind/mgr/dashboard/ci/cephadm/ceph_cluster.yml \
-P ceph_dev_folder=${CEPH_DEV_FOLDER} \
${EXTRA_PARAMS} ceph
: ${CLUSTER_DEBUG:=0}
: ${DASHBOARD_CHECK_INTERVAL:=10}
while [[ -z $(kcli ssh -u root -- ceph-node-00 'journalctl --no-tail --no-pager -t cloud-init' | grep "kcli boot finished") ]]; do
sleep ${DASHBOARD_CHECK_INTERVAL}
kcli list vm
if [[ ${CLUSTER_DEBUG} != 0 ]]; then
kcli ssh -u root -- ceph-node-00 'podman ps -a'
kcli ssh -u root -- ceph-node-00 'podman logs --names --since 30s \$(podman ps -aq)'
fi
kcli ssh -u root -- ceph-node-00 'journalctl -n 100 --no-pager -t cloud-init'
done
| 2,802 | 33.604938 | 185 | sh |
null | ceph-main/src/pybind/mgr/dashboard/frontend/src/index.html | <!doctype html>
<html lang="en">
<head>
<meta charset="utf-8">
<title>Ceph</title>
<meta name="viewport" content="width=device-width, initial-scale=1, shrink-to-fit=no">
<link rel="icon" type="image/x-icon" id="cdFavicon" href="favicon.ico">
</head>
<body>
<noscript>
<div class="noscript container"
ng-if="false">
<div class="jumbotron alert alert-danger">
<h2 i18n>JavaScript required!</h2>
<p i18n>A browser with JavaScript enabled is required in order to use this service.</p>
<p i18n>When using Internet Explorer, please check your security settings and add this address to your trusted sites.</p>
</div>
</div>
</noscript>
<cd-root></cd-root>
</body>
</html>
| 734 | 28.4 | 129 | html |
null | ceph-main/src/pybind/mgr/dashboard/frontend/src/app/app.component.html | <router-outlet></router-outlet>
| 32 | 15.5 | 31 | html |
null | ceph-main/src/pybind/mgr/dashboard/frontend/src/app/ceph/block/iscsi-setting/iscsi-setting.component.html | <div class="form-group"
[formGroup]="settingsForm">
<label class="col-form-label"
for="{{ setting }}">{{ setting }}</label>
<select id="{{ setting }}"
name="{{ setting }}"
*ngIf="limits['type'] === 'enum'"
class="form-control"
[formControlName]="setting">
<option [ngValue]="null"></option>
<option *ngFor="let opt of limits['values']"
[ngValue]="opt">{{ opt }}</option>
</select>
<span *ngIf="limits['type'] !== 'enum'">
<input type="number"
*ngIf="limits['type'] === 'int'"
class="form-control"
[formControlName]="setting">
<input type="text"
*ngIf="limits['type'] === 'str'"
class="form-control"
[formControlName]="setting">
<ng-container *ngIf="limits['type'] === 'bool'">
<br>
<div class="custom-control custom-radio custom-control-inline">
<input type="radio"
[id]="setting + 'True'"
[value]="true"
[formControlName]="setting"
class="custom-control-input">
<label class="custom-control-label"
[for]="setting + 'True'">Yes</label>
</div>
<div class="custom-control custom-radio custom-control-inline">
<input type="radio"
[id]="setting + 'False'"
[value]="false"
class="custom-control-input"
[formControlName]="setting">
<label class="custom-control-label"
[for]="setting + 'False'">No</label>
</div>
</ng-container>
</span>
<span class="invalid-feedback"
*ngIf="settingsForm.showError(setting, formDir, 'min')">
<ng-container i18n>Must be greater than or equal to {{ limits['min'] }}.</ng-container>
</span>
<span class="invalid-feedback"
*ngIf="settingsForm.showError(setting, formDir, 'max')">
<ng-container i18n>Must be less than or equal to {{ limits['max'] }}.</ng-container>
</span>
</div>
| 2,016 | 33.775862 | 91 | html |
null | ceph-main/src/pybind/mgr/dashboard/frontend/src/app/ceph/block/iscsi-tabs/iscsi-tabs.component.html | <ul class="nav nav-tabs">
<li class="nav-item">
<a class="nav-link"
routerLink="/block/iscsi/overview"
routerLinkActive="active"
ariaCurrentWhenActive="page"
i18n>Overview</a>
</li>
<li class="nav-item">
<a class="nav-link"
routerLink="/block/iscsi/targets"
routerLinkActive="active"
ariaCurrentWhenActive="page"
i18n>Targets</a>
</li>
</ul>
| 414 | 23.411765 | 41 | html |
null | ceph-main/src/pybind/mgr/dashboard/frontend/src/app/ceph/block/iscsi-target-details/iscsi-target-details.component.html | <div class="row">
<div class="col-6">
<legend i18n>iSCSI Topology</legend>
<tree-root #tree
[nodes]="nodes"
[options]="treeOptions"
(updateData)="onUpdateData()">
<ng-template #treeNodeTemplate
let-node
let-index="index">
<i [class]="node.data.cdIcon"></i>
<span>{{ node.data.name }}</span>
<span class="badge"
[ngClass]="{'badge-success': ['logged_in'].includes(node.data.status), 'badge-danger': ['logged_out'].includes(node.data.status)}">
{{ node.data.status }}
</span>
</ng-template>
</tree-root>
</div>
<div class="col-6 metadata"
*ngIf="data">
<legend>{{ title }}</legend>
<cd-table #detailTable
[data]="data"
columnMode="flex"
[columns]="columns"
[limit]="0">
</cd-table>
</div>
</div>
<ng-template #highlightTpl
let-row="row"
let-value="value">
<span *ngIf="row.default === undefined || row.default === row.current">{{ value }}</span>
<strong *ngIf="row.default !== undefined && row.default !== row.current">{{ value }}</strong>
</ng-template>
| 1,248 | 28.738095 | 145 | html |
null | ceph-main/src/pybind/mgr/dashboard/frontend/src/app/ceph/block/iscsi-target-discovery-modal/iscsi-target-discovery-modal.component.html | <cd-modal [modalRef]="activeModal">
<ng-container class="modal-title"
i18n>Discovery Authentication</ng-container>
<ng-container class="modal-content">
<form name="discoveryForm"
#formDir="ngForm"
[formGroup]="discoveryForm"
novalidate>
<div class="modal-body">
<!-- User -->
<div class="form-group row">
<label class="cd-col-form-label"
for="user"
i18n>User</label>
<div class="cd-col-form-input">
<input id="user"
class="form-control"
formControlName="user"
type="text"
autocomplete="off">
<span class="invalid-feedback"
*ngIf="discoveryForm.showError('user', formDir, 'required')"
i18n>This field is required.</span>
<span class="invalid-feedback"
*ngIf="discoveryForm.showError('user', formDir, 'pattern')"
i18n>User names must have a length of 8 to 64 characters and can contain
alphanumeric characters, '.', '@', '-', '_' or ':'.</span>
</div>
</div>
<!-- Password -->
<div class="form-group row">
<label class="cd-col-form-label"
for="password"
i18n>Password</label>
<div class="cd-col-form-input">
<div class="input-group">
<input id="password"
class="form-control"
formControlName="password"
type="password"
autocomplete="new-password">
<button type="button"
class="btn btn-light"
cdPasswordButton="password">
</button>
<cd-copy-2-clipboard-button source="password">
</cd-copy-2-clipboard-button>
</div>
<span class="invalid-feedback"
*ngIf="discoveryForm.showError('password', formDir, 'required')"
i18n>This field is required.</span>
<span class="invalid-feedback"
*ngIf="discoveryForm.showError('password', formDir, 'pattern')"
i18n>Passwords must have a length of 12 to 16 characters and can contain
alphanumeric characters, '@', '-', '_' or '/'.</span>
</div>
</div>
<!-- mutual_user -->
<div class="form-group row">
<label class="cd-col-form-label"
for="mutual_user">
<ng-container i18n>Mutual User</ng-container>
</label>
<div class="cd-col-form-input">
<input id="mutual_user"
class="form-control"
formControlName="mutual_user"
type="text"
autocomplete="off">
<span class="invalid-feedback"
*ngIf="discoveryForm.showError('mutual_user', formDir, 'required')"
i18n>This field is required.</span>
<span class="invalid-feedback"
*ngIf="discoveryForm.showError('mutual_user', formDir, 'pattern')"
i18n>User names must have a length of 8 to 64 characters and can contain
alphanumeric characters, '.', '@', '-', '_' or ':'.</span>
</div>
</div>
<!-- mutual_password -->
<div class="form-group row">
<label class="cd-col-form-label"
for="mutual_password"
i18n>Mutual Password</label>
<div class="cd-col-form-input">
<div class="input-group">
<input id="mutual_password"
class="form-control"
formControlName="mutual_password"
type="password"
autocomplete="new-password">
<button type="button"
class="btn btn-light"
cdPasswordButton="mutual_password">
</button>
<cd-copy-2-clipboard-button source="mutual_password">
</cd-copy-2-clipboard-button>
</div>
<span class="invalid-feedback"
*ngIf="discoveryForm.showError('mutual_password', formDir, 'required')"
i18n>This field is required.</span>
<span class="invalid-feedback"
*ngIf="discoveryForm.showError('mutual_password', formDir, 'pattern')"
i18n>Passwords must have a length of 12 to 16 characters and can contain
alphanumeric characters, '@', '-', '_' or '/'.</span>
</div>
</div>
</div>
<div class="modal-footer">
<cd-form-button-panel (submitActionEvent)="submitAction()"
[form]="discoveryForm"
[showSubmit]="hasPermission"
[submitText]="actionLabels.SUBMIT"></cd-form-button-panel>
</div>
</form>
</ng-container>
</cd-modal>
| 5,101 | 38.550388 | 90 | html |
null | ceph-main/src/pybind/mgr/dashboard/frontend/src/app/ceph/block/iscsi-target-form/iscsi-target-form.component.html | <div class="cd-col-form"
*cdFormLoading="loading">
<form name="targetForm"
#formDir="ngForm"
[formGroup]="targetForm"
novalidate>
<div class="card">
<div i18n="form title"
class="card-header">{{ action | titlecase }} {{ resource | upperFirst }}</div>
<div class="card-body">
<!-- Target IQN -->
<div class="form-group row">
<label class="cd-col-form-label required"
for="target_iqn"
i18n>Target IQN</label>
<div class="cd-col-form-input">
<div class="input-group">
<input class="form-control"
type="text"
id="target_iqn"
name="target_iqn"
formControlName="target_iqn"
cdTrim />
<button class="btn btn-light"
id="ecp-info-button"
type="button"
(click)="targetSettingsModal()">
<i [ngClass]="[icons.deepCheck]"
aria-hidden="true"></i>
</button>
</div>
<span class="invalid-feedback"
*ngIf="targetForm.showError('target_iqn', formDir, 'required')"
i18n>This field is required.</span>
<span class="invalid-feedback"
*ngIf="targetForm.showError('target_iqn', formDir, 'pattern')"
i18n>IQN has wrong pattern.</span>
<span class="invalid-feedback"
*ngIf="targetForm.showError('target_iqn', formDir, 'iqn')">
<ng-container i18n>An IQN has the following notation
'iqn.$year-$month.$reversedAddress:$definedName'</ng-container>
<br>
<ng-container i18n>For example: iqn.2016-06.org.dashboard:storage:disk.sn-a8675309</ng-container>
<br>
<a target="_blank"
href="https://en.wikipedia.org/wiki/ISCSI#Addressing"
i18n>More information</a>
</span>
<span class="form-text text-muted"
*ngIf="hasAdvancedSettings(targetForm.getValue('target_controls'))"
i18n>This target has modified advanced settings.</span>
<hr />
</div>
</div>
<!-- Portals -->
<div class="form-group row">
<label class="cd-col-form-label required"
for="portals"
i18n>Portals</label>
<div class="cd-col-form-input">
<ng-container *ngFor="let portal of portals.value; let i = index">
<div class="input-group cd-mb">
<input class="cd-form-control"
type="text"
[value]="portal"
disabled />
<button class="btn btn-light"
type="button"
(click)="removePortal(i, portal)">
<i [ngClass]="[icons.destroy]"
aria-hidden="true"></i>
</button>
</div>
</ng-container>
<div class="row">
<div class="col-md-12">
<cd-select [data]="portals.value"
[options]="portalsSelections"
[messages]="messages.portals"
(selection)="onPortalSelection($event)"
elemClass="btn btn-light float-end">
<i [ngClass]="[icons.add]"></i>
<ng-container i18n>Add portal</ng-container>
</cd-select>
</div>
</div>
<input class="form-control"
type="hidden"
id="portals"
name="portals"
formControlName="portals" />
<span class="invalid-feedback"
*ngIf="targetForm.showError('portals', formDir, 'minGateways')"
i18n>At least {{ minimum_gateways }} gateways are required.</span>
<hr />
</div>
</div>
<!-- Images -->
<div class="form-group row">
<label class="cd-col-form-label"
for="disks"
i18n>Images</label>
<div class="cd-col-form-input">
<ng-container *ngFor="let image of targetForm.getValue('disks'); let i = index">
<div class="input-group cd-mb">
<input class="cd-form-control"
type="text"
[value]="image"
disabled />
<div class="input-group-text"
*ngIf="api_version >= 1">lun: {{ imagesSettings[image]['lun'] }}</div>
<button class="btn btn-light"
type="button"
(click)="imageSettingsModal(image)">
<i [ngClass]="[icons.deepCheck]"
aria-hidden="true"></i>
</button>
<button class="btn btn-light"
type="button"
(click)="removeImage(i, image)">
<i [ngClass]="[icons.destroy]"
aria-hidden="true"></i>
</button>
</div>
<span class="form-text text-muted">
<ng-container *ngIf="backstores.length > 1"
i18n>Backstore: {{ imagesSettings[image].backstore | iscsiBackstore }}. </ng-container>
<ng-container *ngIf="hasAdvancedSettings(imagesSettings[image][imagesSettings[image].backstore])"
i18n>This image has modified settings.</ng-container>
</span>
</ng-container>
<input class="form-control"
type="hidden"
id="disks"
name="disks"
formControlName="disks" />
<span class="invalid-feedback"
*ngIf="targetForm.showError('disks', formDir, 'dupLunId')"
i18n>Duplicated LUN numbers.</span>
<span class="invalid-feedback"
*ngIf="targetForm.showError('disks', formDir, 'dupWwn')"
i18n>Duplicated WWN.</span>
<div class="row">
<div class="col-md-12">
<cd-select [data]="disks.value"
[options]="imagesSelections"
[messages]="messages.images"
(selection)="onImageSelection($event)"
elemClass="btn btn-light float-end">
<i [ngClass]="[icons.add]"></i>
<ng-container i18n>Add image</ng-container>
</cd-select>
</div>
</div>
<hr />
</div>
</div>
<!-- acl_enabled -->
<div class="form-group row">
<div class="cd-col-form-offset">
<div class="custom-control custom-checkbox">
<input type="checkbox"
class="custom-control-input"
formControlName="acl_enabled"
name="acl_enabled"
id="acl_enabled">
<label for="acl_enabled"
class="custom-control-label"
i18n>ACL authentication</label>
</div>
<hr />
</div>
</div>
<!-- Target level authentication was introduced in ceph-iscsi config v11 -->
<div formGroupName="auth"
*ngIf="cephIscsiConfigVersion > 10 && !targetForm.getValue('acl_enabled')">
<!-- Target user -->
<div class="form-group row">
<label class="cd-col-form-label"
for="target_user">
<ng-container i18n>User</ng-container>
</label>
<div class="cd-col-form-input">
<input class="form-control"
type="text"
autocomplete="off"
id="target_user"
name="target_user"
formControlName="user" />
<span class="invalid-feedback"
*ngIf="targetForm.showError('user', formDir, 'required')"
i18n>This field is required.</span>
<span class="invalid-feedback"
*ngIf="targetForm.showError('user', formDir, 'pattern')"
i18n>User names must have a length of 8 to 64 characters and can contain
alphanumeric characters, '.', '@', '-', '_' or ':'.</span>
</div>
</div>
<!-- Target password -->
<div class="form-group row">
<label class="cd-col-form-label"
for="target_password">
<ng-container i18n>Password</ng-container>
</label>
<div class="cd-col-form-input">
<div class="input-group">
<input class="form-control"
type="password"
autocomplete="new-password"
id="target_password"
name="target_password"
formControlName="password" />
<button type="button"
class="btn btn-light"
cdPasswordButton="target_password">
</button>
<cd-copy-2-clipboard-button source="target_password">
</cd-copy-2-clipboard-button>
</div>
<span class="invalid-feedback"
*ngIf="targetForm.showError('password', formDir, 'required')"
i18n>This field is required.</span>
<span class="invalid-feedback"
*ngIf="targetForm.showError('password', formDir, 'pattern')"
i18n>Passwords must have a length of 12 to 16 characters and can contain
alphanumeric characters, '@', '-', '_' or '/'.</span>
</div>
</div>
<!-- Target mutual_user -->
<div class="form-group row">
<label class="cd-col-form-label"
for="target_mutual_user">
<ng-container i18n>Mutual User</ng-container>
</label>
<div class="cd-col-form-input">
<input class="form-control"
type="text"
autocomplete="off"
id="target_mutual_user"
name="target_mutual_user"
formControlName="mutual_user" />
<span class="invalid-feedback"
*ngIf="targetForm.showError('mutual_user', formDir, 'required')"
i18n>This field is required.</span>
<span class="invalid-feedback"
*ngIf="targetForm.showError('mutual_user', formDir, 'pattern')"
i18n>User names must have a length of 8 to 64 characters and can contain
alphanumeric characters, '.', '@', '-', '_' or ':'.</span>
</div>
</div>
<!-- Target mutual_password -->
<div class="form-group row">
<label class="cd-col-form-label"
for="target_mutual_password">
<ng-container i18n>Mutual Password</ng-container>
</label>
<div class="cd-col-form-input">
<div class="input-group">
<input class="form-control"
type="password"
autocomplete="new-password"
id="target_mutual_password"
name="target_mutual_password"
formControlName="mutual_password" />
<button type="button"
class="btn btn-light"
cdPasswordButton="target_mutual_password">
</button>
<cd-copy-2-clipboard-button source="target_mutual_password">
</cd-copy-2-clipboard-button>
</div>
<span class="invalid-feedback"
*ngIf="targetForm.showError('mutual_password', formDir, 'required')"
i18n>This field is required.</span>
<span class="invalid-feedback"
*ngIf="targetForm.showError('mutual_password', formDir, 'pattern')"
i18n>Passwords must have a length of 12 to 16 characters and can contain
alphanumeric characters, '@', '-', '_' or '/'.</span>
</div>
</div>
</div>
<!-- Initiators -->
<div class="form-group row"
*ngIf="targetForm.getValue('acl_enabled')">
<label class="cd-col-form-label"
for="initiators"
i18n>Initiators</label>
<div class="cd-col-form-input"
formArrayName="initiators">
<div class="card mb-2"
*ngFor="let initiator of initiators.controls; let ii = index"
[formGroup]="initiator">
<div class="card-header">
<ng-container i18n>Initiator</ng-container>: {{ initiator.getValue('client_iqn') }}
<button type="button"
class="btn-close float-end"
(click)="removeInitiator(ii)">
</button>
</div>
<div class="card-body">
<!-- Initiator: Name -->
<div class="form-group row">
<label class="cd-col-form-label required"
for="client_iqn"
i18n>Client IQN</label>
<div class="cd-col-form-input">
<input class="form-control"
type="text"
formControlName="client_iqn"
cdTrim
(blur)="updatedInitiatorSelector()">
<span class="invalid-feedback"
*ngIf="initiator.showError('client_iqn', formDir, 'notUnique')"
i18n>Initiator IQN needs to be unique.</span>
<span class="invalid-feedback"
*ngIf="initiator.showError('client_iqn', formDir, 'required')"
i18n>This field is required.</span>
<span class="invalid-feedback"
*ngIf="initiator.showError('client_iqn', formDir, 'pattern')"
i18n>IQN has wrong pattern.</span>
</div>
</div>
<ng-container formGroupName="auth">
<!-- Initiator: User -->
<div class="form-group row">
<label class="cd-col-form-label"
for="user"
i18n>User</label>
<div class="cd-col-form-input">
<input [id]="'user' + ii"
class="form-control"
formControlName="user"
autocomplete="off"
type="text">
<span class="invalid-feedback"
*ngIf="initiator.showError('user', formDir, 'required')"
i18n>This field is required.</span>
<span class="invalid-feedback"
*ngIf="initiator.showError('user', formDir, 'pattern')"
i18n>User names must have a length of 8 to 64 characters and can contain
alphanumeric characters, '.', '@', '-', '_' or ':'.</span>
</div>
</div>
<!-- Initiator: Password -->
<div class="form-group row">
<label class="cd-col-form-label"
for="password"
i18n>Password</label>
<div class="cd-col-form-input">
<div class="input-group">
<input [id]="'password' + ii"
class="form-control"
formControlName="password"
autocomplete="new-password"
type="password">
<button type="button"
class="btn btn-light"
[cdPasswordButton]="'password' + ii">
</button>
<cd-copy-2-clipboard-button [source]="'password' + ii">
</cd-copy-2-clipboard-button>
</div>
<span class="invalid-feedback"
*ngIf="initiator.showError('password', formDir, 'required')"
i18n>This field is required.</span>
<span class="invalid-feedback"
*ngIf="initiator.showError('password', formDir, 'pattern')"
i18n>Passwords must have a length of 12 to 16 characters and can contain
alphanumeric characters, '@', '-', '_' or '/'.</span>
</div>
</div>
<!-- Initiator: mutual_user -->
<div class="form-group row">
<label class="cd-col-form-label"
for="mutual_user">
<ng-container i18n>Mutual User</ng-container>
</label>
<div class="cd-col-form-input">
<input [id]="'mutual_user' + ii"
class="form-control"
formControlName="mutual_user"
autocomplete="off"
type="text">
<span class="invalid-feedback"
*ngIf="initiator.showError('mutual_user', formDir, 'required')"
i18n>This field is required.</span>
<span class="invalid-feedback"
*ngIf="initiator.showError('mutual_user', formDir, 'pattern')"
i18n>User names must have a length of 8 to 64 characters and can contain
alphanumeric characters, '.', '@', '-', '_' or ':'.</span>
</div>
</div>
<!-- Initiator: mutual_password -->
<div class="form-group row">
<label class="cd-col-form-label"
for="mutual_password"
i18n>Mutual Password</label>
<div class="cd-col-form-input">
<div class="input-group">
<input [id]="'mutual_password' + ii"
class="form-control"
formControlName="mutual_password"
autocomplete="new-password"
type="password">
<button type="button"
class="btn btn-light"
[cdPasswordButton]="'mutual_password' + ii">
</button>
<cd-copy-2-clipboard-button [source]="'mutual_password' + ii">
</cd-copy-2-clipboard-button>
</div>
<span class="invalid-feedback"
*ngIf="initiator.showError('mutual_password', formDir, 'required')"
i18n>This field is required.</span>
<span class="invalid-feedback"
*ngIf="initiator.showError('mutual_password', formDir, 'pattern')"
i18n>Passwords must have a length of 12 to 16 characters and can contain
alphanumeric characters, '@', '-', '_' or '/'.</span>
</div>
</div>
</ng-container>
<!-- Initiator: Images -->
<div class="form-group row">
<label class="cd-col-form-label"
for="luns"
i18n>Images</label>
<div class="cd-col-form-input">
<ng-container *ngFor="let image of initiator.getValue('luns'); let li = index">
<div class="input-group cd-mb">
<input class="cd-form-control"
type="text"
[value]="image"
disabled />
<button class="btn btn-light"
type="button"
(click)="removeInitiatorImage(initiator, li, ii, image)">
<i [ngClass]="[icons.destroy]"
aria-hidden="true"></i>
</button>
</div>
</ng-container>
<span *ngIf="initiator.getValue('cdIsInGroup')"
i18n>Initiator belongs to a group. Images will be configure in the group.</span>
<div class="row"
*ngIf="!initiator.getValue('cdIsInGroup')">
<div class="col-md-12">
<cd-select [data]="initiator.getValue('luns')"
[options]="imagesInitiatorSelections[ii]"
[messages]="messages.initiatorImage"
elemClass="btn btn-light float-end">
<i [ngClass]="[icons.add]"></i>
<ng-container i18n>Add image</ng-container>
</cd-select>
</div>
</div>
</div>
</div>
</div>
</div>
<div class="row">
<div class="col-md-12">
<span class="form-text text-muted"
*ngIf="initiators.controls.length === 0"
i18n>No items added.</span>
<button (click)="addInitiator(); false"
class="btn btn-light float-end">
<i [ngClass]="[icons.add]"></i>
<ng-container i18n>Add initiator</ng-container>
</button>
</div>
</div>
<hr />
</div>
</div>
<!-- Groups -->
<div class="form-group row"
*ngIf="targetForm.getValue('acl_enabled')">
<label class="cd-col-form-label"
for="initiators"
i18n>Groups</label>
<div class="cd-col-form-input"
formArrayName="groups">
<div class="card mb-2"
*ngFor="let group of groups.controls; let gi = index"
[formGroup]="group">
<div class="card-header">
<ng-container i18n>Group</ng-container>: {{ group.getValue('group_id') }}
<button type="button"
class="btn-close float-end"
(click)="removeGroup(gi)">
</button>
</div>
<div class="card-body">
<!-- Group: group_id -->
<div class="form-group row">
<label class="cd-col-form-label required"
for="group_id"
i18n>Name</label>
<div class="cd-col-form-input">
<input class="form-control"
type="text"
formControlName="group_id">
</div>
</div>
<!-- Group: members -->
<div class="form-group row">
<label class="cd-col-form-label"
for="members">
<ng-container i18n>Initiators</ng-container>
</label>
<div class="cd-col-form-input">
<ng-container *ngFor="let member of group.getValue('members'); let i = index">
<div class="input-group cd-mb">
<input class="cd-form-control"
type="text"
[value]="member"
disabled />
<button class="btn btn-light"
type="button"
(click)="removeGroupInitiator(group, i, gi)">
<i [ngClass]="[icons.destroy]"
aria-hidden="true"></i>
</button>
</div>
</ng-container>
<div class="row">
<div class="col-md-12">
<cd-select [data]="group.getValue('members')"
[options]="groupMembersSelections[gi]"
[messages]="messages.groupInitiator"
(selection)="onGroupMemberSelection($event, gi)"
elemClass="btn btn-light float-end">
<i [ngClass]="[icons.add]"></i>
<ng-container i18n>Add initiator</ng-container>
</cd-select>
</div>
</div>
<hr />
</div>
</div>
<!-- Group: disks -->
<div class="form-group row">
<label class="cd-col-form-label"
for="disks">
<ng-container i18n>Images</ng-container>
</label>
<div class="cd-col-form-input">
<ng-container *ngFor="let disk of group.getValue('disks'); let i = index">
<div class="input-group cd-mb">
<input class="cd-form-control"
type="text"
[value]="disk"
disabled />
<button class="btn btn-light"
type="button"
(click)="removeGroupDisk(group, i, gi)">
<i [ngClass]="[icons.destroy]"
aria-hidden="true"></i>
</button>
</div>
</ng-container>
<div class="row">
<div class="col-md-12">
<cd-select [data]="group.getValue('disks')"
[options]="groupDiskSelections[gi]"
[messages]="messages.initiatorImage"
elemClass="btn btn-light float-end">
<i [ngClass]="[icons.add]"></i>
<ng-container i18n>Add image</ng-container>
</cd-select>
</div>
</div>
<hr />
</div>
</div>
</div>
</div>
<div class="row">
<div class="col-md-12">
<span class="form-text text-muted"
*ngIf="groups.controls.length === 0"
i18n>No items added.</span>
<button (click)="addGroup(); false"
class="btn btn-light float-end">
<i [ngClass]="[icons.add]"></i>
<ng-container i18n>Add group</ng-container>
</button>
</div>
</div>
</div>
</div>
</div>
<div class="card-footer">
<cd-form-button-panel (submitActionEvent)="submit()"
[form]="targetForm"
[submitText]="(action | titlecase) + ' ' + (resource | upperFirst)"
wrappingClass="text-right"></cd-form-button-panel>
</div>
</div>
</form>
</div>
| 28,650 | 41.698957 | 122 | html |
null | ceph-main/src/pybind/mgr/dashboard/frontend/src/app/ceph/block/iscsi-target-image-settings-modal/iscsi-target-image-settings-modal.component.html | <cd-modal [modalRef]="activeModal">
<ng-container class="modal-title">
<ng-container i18n>Configure</ng-container>
<small>{{ image }}</small>
</ng-container>
<ng-container class="modal-content">
<form name="settingsForm"
class="form"
#formDir="ngForm"
[formGroup]="settingsForm"
novalidate>
<div class="modal-body">
<p class="alert-warning"
i18n>Changing these parameters from their default values is usually not necessary.</p>
<span *ngIf="api_version >= 1">
<legend class="cd-header"
i18n>Identifier</legend>
<!-- LUN -->
<div class="form-group row">
<div class="col-sm-12">
<label class="col-form-label required"
for="lun"
i18n>lun</label>
<input type="number"
class="form-control"
id="lun"
name="lun"
formControlName="lun">
<span class="invalid-feedback"
*ngIf="settingsForm.showError('lun', formDir, 'required')"
i18n>This field is required.</span>
</div>
</div>
<!-- WWN -->
<div class="form-group row">
<div class="col-sm-12">
<label class="col-form-label"
for="wwn"
i18n>wwn</label>
<input type="text"
class="form-control"
id="wwn"
name="wwn"
formControlName="wwn">
</div>
</div>
</span>
<legend class="cd-header"
i18n>Settings</legend>
<!-- BACKSTORE -->
<div class="form-group row">
<div class="col-sm-12">
<label class="col-form-label"
i18n>Backstore</label>
<select id="backstore"
name="backstore"
class="form-select"
formControlName="backstore">
<option *ngFor="let bs of backstores"
[value]="bs">{{ bs | iscsiBackstore }}</option>
</select>
</div>
</div>
<!-- CONTROLS -->
<ng-container *ngFor="let bs of backstores">
<ng-container *ngIf="settingsForm.value['backstore'] === bs">
<div class="form-group row"
*ngFor="let setting of disk_default_controls[bs] | keyvalue">
<div class="col-sm-12">
<cd-iscsi-setting [settingsForm]="settingsForm"
[formDir]="formDir"
[setting]="setting.key"
[limits]="getDiskControlLimits(bs, setting.key)"></cd-iscsi-setting>
</div>
</div>
</ng-container>
</ng-container>
</div>
<div class="modal-footer">
<cd-form-button-panel (submitActionEvent)="save()"
[form]="settingsForm"
[submitText]="actionLabels.UPDATE"></cd-form-button-panel>
</div>
</form>
</ng-container>
</cd-modal>
| 3,275 | 34.225806 | 102 | html |
null | ceph-main/src/pybind/mgr/dashboard/frontend/src/app/ceph/block/iscsi-target-iqn-settings-modal/iscsi-target-iqn-settings-modal.component.html | <cd-modal [modalRef]="activeModal">
<ng-container class="modal-title"
i18n>Advanced Settings</ng-container>
<ng-container class="modal-content">
<form name="settingsForm"
#formDir="ngForm"
[formGroup]="settingsForm"
novalidate>
<div class="modal-body">
<p class="alert-warning"
i18n>Changing these parameters from their default values is usually not necessary.</p>
<div class="form-group row"
*ngFor="let setting of settingsForm.controls | keyvalue">
<div class="col-sm-12">
<cd-iscsi-setting [settingsForm]="settingsForm"
[formDir]="formDir"
[setting]="setting.key"
[limits]="getTargetControlLimits(setting.key)"></cd-iscsi-setting>
</div>
</div>
</div>
<div class="modal-footer">
<cd-form-button-panel (submitActionEvent)="save()"
[form]="settingsForm"
[submitText]="actionLabels.UPDATE"></cd-form-button-panel>
</div>
</form>
</ng-container>
</cd-modal>
| 1,182 | 34.848485 | 97 | html |
null | ceph-main/src/pybind/mgr/dashboard/frontend/src/app/ceph/block/iscsi-target-list/iscsi-target-list.component.html | <cd-iscsi-tabs></cd-iscsi-tabs>
<cd-alert-panel type="info"
*ngIf="available === false"
title="iSCSI Targets not available"
i18n-title>
<ng-container i18n>Please consult the <cd-doc section="iscsi"></cd-doc> on
how to configure and enable the iSCSI Targets management functionality.</ng-container>
<ng-container *ngIf="status">
<br>
<span i18n>Available information:</span>
<pre>{{ status }}</pre>
</ng-container>
</cd-alert-panel>
<cd-table #table
*ngIf="available === true"
[data]="targets"
columnMode="flex"
[columns]="columns"
identifier="target_iqn"
forceIdentifier="true"
selectionType="single"
[hasDetails]="true"
[autoReload]="false"
[status]="tableStatus"
(fetchData)="getTargets()"
(setExpandedRow)="setExpandedRow($event)"
(updateSelection)="updateSelection($event)">
<div class="table-actions btn-toolbar">
<cd-table-actions class="btn-group"
[permission]="permission"
[selection]="selection"
[tableActions]="tableActions">
</cd-table-actions>
<button class="btn btn-light"
type="button"
(click)="configureDiscoveryAuth()">
<i [ngClass]="[icons.key]"
aria-hidden="true">
</i>
<ng-container i18n>Discovery authentication</ng-container>
</button>
</div>
<cd-iscsi-target-details cdTableDetail
*ngIf="expandedRow"
[cephIscsiConfigVersion]="cephIscsiConfigVersion"
[selection]="expandedRow"
[settings]="settings"></cd-iscsi-target-details>
</cd-table>
| 1,812 | 32.574074 | 90 | html |
null | ceph-main/src/pybind/mgr/dashboard/frontend/src/app/ceph/block/iscsi/iscsi.component.html | <cd-iscsi-tabs></cd-iscsi-tabs>
<legend i18n>Gateways</legend>
<div>
<cd-table [data]="gateways"
(fetchData)="refresh()"
[columns]="gatewaysColumns">
</cd-table>
</div>
<legend i18n>Images</legend>
<div>
<cd-table [data]="images"
[columns]="imagesColumns">
</cd-table>
</div>
<ng-template #iscsiSparklineTpl
let-row="row"
let-value="value">
<span *ngIf="row.backstore === 'user:rbd'">
<cd-sparkline [data]="value"
[isBinary]="row.cdIsBinary"></cd-sparkline>
</span>
<span *ngIf="row.backstore !== 'user:rbd'"
class="text-muted">
n/a
</span>
</ng-template>
<ng-template #iscsiPerSecondTpl
let-row="row"
let-value="value">
<span *ngIf="row.backstore === 'user:rbd'">
{{ value }} /s
</span>
<span *ngIf="row.backstore !== 'user:rbd'"
class="text-muted">
n/a
</span>
</ng-template>
<ng-template #iscsiRelativeDateTpl
let-row="row"
let-value="value">
<span *ngIf="row.backstore === 'user:rbd'">
{{ value | relativeDate | notAvailable }}
</span>
<span *ngIf="row.backstore !== 'user:rbd'"
class="text-muted">
n/a
</span>
</ng-template>
| 1,247 | 22.111111 | 61 | html |
null | ceph-main/src/pybind/mgr/dashboard/frontend/src/app/ceph/block/mirroring/bootstrap-create-modal/bootstrap-create-modal.component.html | <cd-modal [modalRef]="activeModal">
<ng-container i18n
class="modal-title">Create Bootstrap Token</ng-container>
<ng-container class="modal-content">
<form name="createBootstrapForm"
class="form"
#formDir="ngForm"
[formGroup]="createBootstrapForm"
novalidate>
<div class="modal-body">
<p>
<ng-container i18n>To create a bootstrap token which can be imported
by a peer site cluster, provide the local site's name, select
which pools will have mirroring enabled, and click
<kbd>Generate</kbd>.</ng-container>
</p>
<div class="form-group">
<label class="col-form-label required"
for="siteName"
i18n>Site Name</label>
<input class="form-control"
type="text"
placeholder="Name..."
i18n-placeholder
id="siteName"
name="siteName"
formControlName="siteName"
autofocus>
<span *ngIf="createBootstrapForm.showError('siteName', formDir, 'required')"
class="invalid-feedback"
i18n>This field is required.</span>
</div>
<div class="form-group"
formGroupName="pools">
<label class="col-form-label required"
for="pools"
i18n>Pools</label>
<div class="custom-control custom-checkbox"
*ngFor="let pool of pools">
<input type="checkbox"
class="custom-control-input"
id="{{ pool.name }}"
name="{{ pool.name }}"
formControlName="{{ pool.name }}">
<label class="custom-control-label"
for="{{ pool.name }}">{{ pool.name }}</label>
</div>
<span *ngIf="createBootstrapForm.showError('pools', formDir, 'requirePool')"
class="invalid-feedback"
i18n>At least one pool is required.</span>
</div>
<cd-submit-button class="mb-4 float-end"
i18n
[form]="createBootstrapForm"
(submitAction)="generate()">Generate</cd-submit-button>
<div class="form-group">
<label class="col-form-label"
for="token">
<span i18n>Token</span>
</label>
<textarea class="form-control resize-vertical"
placeholder="Generated token..."
i18n-placeholder
id="token"
formControlName="token"
readonly>
</textarea>
</div>
<cd-copy-2-clipboard-button class="float-end"
source="token">
</cd-copy-2-clipboard-button>
</div>
<div class="modal-footer">
<cd-back-button (backAction)="activeModal.close()"
name="Close"
i18n-name>
</cd-back-button>
</div>
</form>
</ng-container>
</cd-modal>
| 3,171 | 35.045455 | 86 | html |
null | ceph-main/src/pybind/mgr/dashboard/frontend/src/app/ceph/block/mirroring/bootstrap-import-modal/bootstrap-import-modal.component.html | <cd-modal [modalRef]="activeModal">
<ng-container i18n
class="modal-title">Import Bootstrap Token</ng-container>
<ng-container class="modal-content">
<form name="importBootstrapForm"
class="form"
#formDir="ngForm"
[formGroup]="importBootstrapForm"
novalidate>
<div class="modal-body">
<p>
<ng-container i18n>To import a bootstrap token which was created
by a peer site cluster, provide the local site's name, select
which pools will have mirroring enabled, provide the generated
token, and click <kbd>Import</kbd>.</ng-container>
</p>
<div class="form-group">
<label class="col-form-label required"
for="siteName"
i18n>Site Name</label>
<input class="form-control"
type="text"
placeholder="Name..."
i18n-placeholder
id="siteName"
name="siteName"
formControlName="siteName"
autofocus>
<span *ngIf="importBootstrapForm.showError('siteName', formDir, 'required')"
class="invalid-feedback"
i18n>This field is required.</span>
</div>
<div class="form-group">
<label class="col-form-label"
for="direction">
<span i18n>Direction</span>
</label>
<select id="direction"
name="direction"
class="form-control"
formControlName="direction">
<option *ngFor="let direction of directions"
[value]="direction.key">{{ direction.desc }}</option>
</select>
</div>
<div class="form-group"
formGroupName="pools">
<label class="col-form-label required"
for="pools"
i18n>Pools</label>
<div class="custom-control custom-checkbox"
*ngFor="let pool of pools">
<input type="checkbox"
class="custom-control-input"
id="{{ pool.name }}"
name="{{ pool.name }}"
formControlName="{{ pool.name }}">
<label class="custom-control-label"
for="{{ pool.name }}">{{ pool.name }}</label>
</div>
<span *ngIf="importBootstrapForm.showError('pools', formDir, 'requirePool')"
class="invalid-feedback"
i18n>At least one pool is required.</span>
</div>
<div class="form-group">
<label class="col-form-label required"
for="token"
i18n>Token</label>
<textarea class="form-control resize-vertical"
placeholder="Generated token..."
i18n-placeholder
id="token"
formControlName="token">
</textarea>
<span *ngIf="importBootstrapForm.showError('token', formDir, 'required')"
class="invalid-feedback"
i18n>This field is required.</span>
<span *ngIf="importBootstrapForm.showError('token', formDir, 'invalidToken')"
class="invalid-feedback"
i18n>The token is invalid.</span>
</div>
</div>
<div class="modal-footer">
<cd-form-button-panel (submitActionEvent)="import()"
[form]="importBootstrapForm"
[submitText]="actionLabels.SUBMIT"></cd-form-button-panel>
</div>
</form>
</ng-container>
</cd-modal>
| 3,686 | 37.010309 | 88 | html |
null | ceph-main/src/pybind/mgr/dashboard/frontend/src/app/ceph/block/mirroring/daemon-list/daemon-list.component.html | <cd-table [data]="data"
columnMode="flex"
[columns]="columns"
[autoReload]="-1"
(fetchData)="refresh()"
[status]="tableStatus">
</cd-table>
<ng-template #healthTmpl
let-row="row"
let-value="value">
<span [ngClass]="row.health_color | mirrorHealthColor">{{ value }}</span>
</ng-template>
| 366 | 25.214286 | 75 | html |
null | ceph-main/src/pybind/mgr/dashboard/frontend/src/app/ceph/block/mirroring/image-list/image-list.component.html | <nav ngbNav
#nav="ngbNav"
class="nav-tabs"
cdStatefulTab="image-list">
<ng-container ngbNavItem="issues">
<a ngbNavLink
i18n>Issues ({{ image_error.data.length }})</a>
<ng-template ngbNavContent>
<cd-table [data]="image_error.data"
columnMode="flex"
[columns]="image_error.columns"
[autoReload]="-1"
(fetchData)="refresh()"
[status]="tableStatus">
</cd-table>
</ng-template>
</ng-container>
<ng-container ngbNavItem="syncing">
<a ngbNavLink
i18n>Syncing ({{ image_syncing.data.length }})</a>
<ng-template ngbNavContent>
<cd-table [data]="image_syncing.data"
columnMode="flex"
[columns]="image_syncing.columns"
[autoReload]="-1"
(fetchData)="refresh()"
[status]="tableStatus">
</cd-table>
</ng-template>
</ng-container>
<ng-container ngbNavItem="ready">
<a ngbNavLink
i18n>Ready ({{ image_ready.data.length }})</a>
<ng-template ngbNavContent>
<cd-table [data]="image_ready.data"
columnMode="flex"
[columns]="image_ready.columns"
[autoReload]="-1"
(fetchData)="refresh()"
[status]="tableStatus">
</cd-table>
</ng-template>
</ng-container>
</nav>
<div [ngbNavOutlet]="nav"></div>
<ng-template #stateTmpl
let-row="row"
let-value="value">
<span [ngClass]="row.state_color | mirrorHealthColor">{{ value }}</span>
</ng-template>
<ng-template #progressTmpl
let-row="row"
let-value="value">
<div *ngIf="row.state === 'Replaying'">
</div>
<div class="w-100 h-100 d-flex justify-content-center align-items-center">
<ngb-progressbar *ngIf="row.state === 'Replaying'"
type="info"
class="w-100"
[value]="value"
[showValue]="true"></ngb-progressbar>
</div>
</ng-template>
| 2,070 | 29.910448 | 76 | html |
null | ceph-main/src/pybind/mgr/dashboard/frontend/src/app/ceph/block/mirroring/overview/overview.component.html | <form name="rbdmirroringForm"
#formDir="ngForm"
[formGroup]="rbdmirroringForm"
novalidate>
<div class="row mb-3">
<div class="col-md-auto">
<label class="col-form-label"
for="siteName"
i18n>Site Name</label></div>
<div class="col-sm-4 d-flex">
<input type="text"
class="form-control"
id="siteName"
name="siteName"
formControlName="siteName"
[attr.disabled]="!editing ? true : null">
<button class="btn btn-light"
id="editSiteName"
(click)="updateSiteName()"
[attr.title]="editing ? 'Save' : 'Edit'">
<i [ngClass]="icons.edit"
*ngIf="!editing"></i>
<i [ngClass]="icons.check"
*ngIf="editing"></i>
</button>
<cd-copy-2-clipboard-button [source]="siteName"
[byId]="false">
</cd-copy-2-clipboard-button>
</div>
<div class="col">
<cd-table-actions class="table-actions float-end"
[permission]="permission"
[selection]="selection"
[tableActions]="tableActions">
</cd-table-actions>
</div>
</div>
</form>
<div class="row">
<div class="col-sm-6">
<legend i18n>Daemons</legend>
<div>
<cd-mirroring-daemons>
</cd-mirroring-daemons>
</div>
</div>
<div class="col-sm-6">
<legend i18n>Pools</legend>
<div>
<cd-mirroring-pools>
</cd-mirroring-pools>
</div>
</div>
</div>
<div class="row">
<div class="col-md-12">
<legend i18n>Images</legend>
<div>
<cd-mirroring-images>
</cd-mirroring-images>
</div>
</div>
</div>
| 1,756 | 24.463768 | 55 | html |
null | ceph-main/src/pybind/mgr/dashboard/frontend/src/app/ceph/block/mirroring/pool-edit-mode-modal/pool-edit-mode-modal.component.html | <cd-modal [modalRef]="activeModal"
pageURL="mirroring">
<ng-container i18n
class="modal-title">Edit pool mirror mode</ng-container>
<ng-container class="modal-content">
<form name="editModeForm"
class="form"
#formDir="ngForm"
[formGroup]="editModeForm"
novalidate>
<div class="modal-body">
<p>
<ng-container i18n>To edit the mirror mode for pool
<kbd>{{ poolName }}</kbd>, select a new mode from the list and click
<kbd>Update</kbd>.</ng-container>
</p>
<div class="form-group">
<label class="col-form-label"
for="mirrorMode">
<span i18n>Mode</span>
</label>
<select id="mirrorMode"
name="mirrorMode"
class="form-select"
formControlName="mirrorMode">
<option *ngFor="let mirrorMode of mirrorModes"
[value]="mirrorMode.id">{{ mirrorMode.name }}</option>
</select>
<span class="invalid-feedback"
*ngIf="editModeForm.showError('mirrorMode', formDir, 'cannotDisable')"
i18n>Peer clusters must be removed prior to disabling mirror.</span>
</div>
</div>
<div class="modal-footer">
<cd-form-button-panel (submitActionEvent)="update()"
[form]="editModeForm"
[submitText]="actionLabels.UPDATE"></cd-form-button-panel>
</div>
</form>
</ng-container>
</cd-modal>
| 1,598 | 34.533333 | 88 | html |
null | ceph-main/src/pybind/mgr/dashboard/frontend/src/app/ceph/block/mirroring/pool-edit-peer-modal/pool-edit-peer-modal.component.html | <cd-modal [modalRef]="activeModal">
<span class="modal-title"
i18n>{mode, select, edit {Edit} other {Add}} pool mirror peer</span>
<ng-container class="modal-content">
<form name="editPeerForm"
class="form"
#formDir="ngForm"
[formGroup]="editPeerForm"
novalidate>
<div class="modal-body">
<p>
<span i18n>{mode, select, edit {Edit} other {Add}} the pool
mirror peer attributes for pool <kbd>{{ poolName }}</kbd> and click
<kbd>Submit</kbd>.</span>
</p>
<div class="form-group">
<label class="col-form-label required"
for="clusterName"
i18n>Cluster Name</label>
<input class="form-control"
type="text"
placeholder="Name..."
i18n-placeholder
id="clusterName"
name="clusterName"
formControlName="clusterName"
autofocus>
<span class="invalid-feedback"
*ngIf="editPeerForm.showError('clusterName', formDir, 'required')"
i18n>This field is required.</span>
<span class="invalid-feedback"
*ngIf="editPeerForm.showError('clusterName', formDir, 'invalidClusterName')"
i18n>The cluster name is not valid.</span>
</div>
<div class="form-group">
<label class="col-form-label required"
for="clientID"
i18n>CephX ID</label>
<input class="form-control"
type="text"
placeholder="CephX ID..."
i18n-placeholder
id="clientID"
name="clientID"
formControlName="clientID">
<span class="invalid-feedback"
*ngIf="editPeerForm.showError('clientID', formDir, 'required')"
i18n>This field is required.</span>
<span class="invalid-feedback"
*ngIf="editPeerForm.showError('clientID', formDir, 'invalidClientID')"
i18n>The CephX ID is not valid.</span>
</div>
<div class="form-group">
<label class="col-form-label"
for="monAddr">
<span i18n>Monitor Addresses</span>
</label>
<input class="form-control"
type="text"
placeholder="Comma-delimited addresses..."
i18n-placeholder
id="monAddr"
name="monAddr"
formControlName="monAddr">
<span class="invalid-feedback"
*ngIf="editPeerForm.showError('monAddr', formDir, 'invalidMonAddr')"
i18n>The monitory address is not valid.</span>
</div>
<div class="form-group">
<label class="col-form-label"
for="key">
<span i18n>CephX Key</span>
</label>
<input class="form-control"
type="text"
placeholder="Base64-encoded key..."
i18n-placeholder
id="key"
name="key"
formControlName="key">
<span class="invalid-feedback"
*ngIf="editPeerForm.showError('key', formDir, 'invalidKey')"
i18n>CephX key must be base64 encoded.</span>
</div>
</div>
<div class="modal-footer">
<cd-form-button-panel (submitActionEvent)="update()"
[form]="editPeerForm"
[submitText]="actionLabels.SUBMIT"></cd-form-button-panel>
</div>
</form>
</ng-container>
</cd-modal>
| 3,722 | 35.861386 | 92 | html |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.