repo
stringlengths 1
152
⌀ | file
stringlengths 15
205
| code
stringlengths 0
41.6M
| file_length
int64 0
41.6M
| avg_line_length
float64 0
1.81M
| max_line_length
int64 0
12.7M
| extension_type
stringclasses 90
values |
---|---|---|---|---|---|---|
null |
ceph-main/src/test/msgr/test_msgr.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2014 UnitedStack <[email protected]>
*
* Author: Haomai Wang <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include <atomic>
#include <iostream>
#include <list>
#include <memory>
#include <set>
#include <stdlib.h>
#include <time.h>
#include <unistd.h>
#include <boost/random/binomial_distribution.hpp>
#include <boost/random/mersenne_twister.hpp>
#include <boost/random/uniform_int.hpp>
#include <gtest/gtest.h>
#define MSG_POLICY_UNIT_TESTING
#include "common/ceph_argparse.h"
#include "common/ceph_mutex.h"
#include "global/global_init.h"
#include "messages/MCommand.h"
#include "messages/MPing.h"
#include "msg/Connection.h"
#include "msg/Dispatcher.h"
#include "msg/Message.h"
#include "msg/Messenger.h"
#include "msg/msg_types.h"
typedef boost::mt11213b gen_type;
#include "common/dout.h"
#include "include/ceph_assert.h"
#include "auth/DummyAuth.h"
#define dout_subsys ceph_subsys_ms
#undef dout_prefix
#define dout_prefix *_dout << " ceph_test_msgr "
#define CHECK_AND_WAIT_TRUE(expr) do { \
int n = 1000; \
while (--n) { \
if (expr) \
break; \
usleep(1000); \
} \
} while(0);
using namespace std;
class MessengerTest : public ::testing::TestWithParam<const char*> {
public:
DummyAuthClientServer dummy_auth;
Messenger *server_msgr;
Messenger *client_msgr;
MessengerTest() : dummy_auth(g_ceph_context),
server_msgr(NULL), client_msgr(NULL) {
dummy_auth.auth_registry.refresh_config();
}
void SetUp() override {
lderr(g_ceph_context) << __func__ << " start set up " << GetParam() << dendl;
server_msgr = Messenger::create(g_ceph_context, string(GetParam()), entity_name_t::OSD(0), "server", getpid());
client_msgr = Messenger::create(g_ceph_context, string(GetParam()), entity_name_t::CLIENT(-1), "client", getpid());
server_msgr->set_default_policy(Messenger::Policy::stateless_server(0));
client_msgr->set_default_policy(Messenger::Policy::lossy_client(0));
server_msgr->set_auth_client(&dummy_auth);
server_msgr->set_auth_server(&dummy_auth);
client_msgr->set_auth_client(&dummy_auth);
client_msgr->set_auth_server(&dummy_auth);
server_msgr->set_require_authorizer(false);
}
void TearDown() override {
ASSERT_EQ(server_msgr->get_dispatch_queue_len(), 0);
ASSERT_EQ(client_msgr->get_dispatch_queue_len(), 0);
delete server_msgr;
delete client_msgr;
}
};
class FakeDispatcher : public Dispatcher {
public:
struct Session : public RefCountedObject {
atomic<uint64_t> count;
ConnectionRef con;
explicit Session(ConnectionRef c): RefCountedObject(g_ceph_context), count(0), con(c) {
}
uint64_t get_count() { return count; }
};
ceph::mutex lock = ceph::make_mutex("FakeDispatcher::lock");
ceph::condition_variable cond;
bool is_server;
bool got_new;
bool got_remote_reset;
bool got_connect;
bool loopback;
entity_addrvec_t last_accept;
ConnectionRef *last_accept_con_ptr = nullptr;
explicit FakeDispatcher(bool s): Dispatcher(g_ceph_context),
is_server(s), got_new(false), got_remote_reset(false),
got_connect(false), loopback(false) {
}
bool ms_can_fast_dispatch_any() const override { return true; }
bool ms_can_fast_dispatch(const Message *m) const override {
switch (m->get_type()) {
case CEPH_MSG_PING:
return true;
default:
return false;
}
}
void ms_handle_fast_connect(Connection *con) override {
std::scoped_lock l{lock};
lderr(g_ceph_context) << __func__ << " " << con << dendl;
auto s = con->get_priv();
if (!s) {
auto session = new Session(con);
con->set_priv(RefCountedPtr{session, false});
lderr(g_ceph_context) << __func__ << " con: " << con
<< " count: " << session->count << dendl;
}
got_connect = true;
cond.notify_all();
}
void ms_handle_fast_accept(Connection *con) override {
last_accept = con->get_peer_addrs();
if (last_accept_con_ptr) {
*last_accept_con_ptr = con;
}
if (!con->get_priv()) {
con->set_priv(RefCountedPtr{new Session(con), false});
}
}
bool ms_dispatch(Message *m) override {
auto priv = m->get_connection()->get_priv();
auto s = static_cast<Session*>(priv.get());
if (!s) {
s = new Session(m->get_connection());
priv.reset(s, false);
m->get_connection()->set_priv(priv);
}
s->count++;
lderr(g_ceph_context) << __func__ << " conn: " << m->get_connection() << " session " << s << " count: " << s->count << dendl;
if (is_server) {
reply_message(m);
}
std::lock_guard l{lock};
got_new = true;
cond.notify_all();
m->put();
return true;
}
bool ms_handle_reset(Connection *con) override {
std::lock_guard l{lock};
lderr(g_ceph_context) << __func__ << " " << con << dendl;
auto priv = con->get_priv();
if (auto s = static_cast<Session*>(priv.get()); s) {
s->con.reset(); // break con <-> session ref cycle
con->set_priv(nullptr); // break ref <-> session cycle, if any
}
return true;
}
void ms_handle_remote_reset(Connection *con) override {
std::lock_guard l{lock};
lderr(g_ceph_context) << __func__ << " " << con << dendl;
auto priv = con->get_priv();
if (auto s = static_cast<Session*>(priv.get()); s) {
s->con.reset(); // break con <-> session ref cycle
con->set_priv(nullptr); // break ref <-> session cycle, if any
}
got_remote_reset = true;
cond.notify_all();
}
bool ms_handle_refused(Connection *con) override {
return false;
}
void ms_fast_dispatch(Message *m) override {
auto priv = m->get_connection()->get_priv();
auto s = static_cast<Session*>(priv.get());
if (!s) {
s = new Session(m->get_connection());
priv.reset(s, false);
m->get_connection()->set_priv(priv);
}
s->count++;
lderr(g_ceph_context) << __func__ << " conn: " << m->get_connection() << " session " << s << " count: " << s->count << dendl;
if (is_server) {
if (loopback)
ceph_assert(m->get_source().is_osd());
else
reply_message(m);
} else if (loopback) {
ceph_assert(m->get_source().is_client());
}
m->put();
std::lock_guard l{lock};
got_new = true;
cond.notify_all();
}
int ms_handle_authentication(Connection *con) override {
return 1;
}
void reply_message(Message *m) {
MPing *rm = new MPing();
m->get_connection()->send_message(rm);
}
};
typedef FakeDispatcher::Session Session;
struct TestInterceptor : public Interceptor {
bool step_waiting = false;
bool waiting = true;
std::map<Connection *, uint32_t> current_step;
std::map<Connection *, std::list<uint32_t>> step_history;
std::map<uint32_t, std::optional<ACTION>> decisions;
std::set<uint32_t> breakpoints;
uint32_t count_step(Connection *conn, uint32_t step) {
uint32_t count = 0;
for (auto s : step_history[conn]) {
if (s == step) {
count++;
}
}
return count;
}
void breakpoint(uint32_t step) {
breakpoints.insert(step);
}
void remove_bp(uint32_t step) {
breakpoints.erase(step);
}
Connection *wait(uint32_t step, Connection *conn=nullptr) {
std::unique_lock<std::mutex> l(lock);
while(true) {
if (conn) {
auto it = current_step.find(conn);
if (it != current_step.end()) {
if (it->second == step) {
break;
}
}
} else {
for (auto it : current_step) {
if (it.second == step) {
conn = it.first;
break;
}
}
if (conn) {
break;
}
}
step_waiting = true;
cond_var.wait(l);
}
step_waiting = false;
return conn;
}
ACTION wait_for_decision(uint32_t step, std::unique_lock<std::mutex> &l) {
if (decisions[step]) {
return *(decisions[step]);
}
waiting = true;
cond_var.wait(l, [this] { return !waiting; });
return *(decisions[step]);
}
void proceed(uint32_t step, ACTION decision) {
std::unique_lock<std::mutex> l(lock);
decisions[step] = decision;
if (waiting) {
waiting = false;
cond_var.notify_one();
}
}
ACTION intercept(Connection *conn, uint32_t step) override {
lderr(g_ceph_context) << __func__ << " conn(" << conn
<< ") intercept called on step=" << step << dendl;
{
std::unique_lock<std::mutex> l(lock);
step_history[conn].push_back(step);
current_step[conn] = step;
if (step_waiting) {
cond_var.notify_one();
}
}
std::unique_lock<std::mutex> l(lock);
ACTION decision = ACTION::CONTINUE;
if (breakpoints.find(step) != breakpoints.end()) {
lderr(g_ceph_context) << __func__ << " conn(" << conn
<< ") pausing on step=" << step << dendl;
decision = wait_for_decision(step, l);
} else {
if (decisions[step]) {
decision = *(decisions[step]);
}
}
lderr(g_ceph_context) << __func__ << " conn(" << conn
<< ") resuming step=" << step << " with decision="
<< decision << dendl;
decisions[step].reset();
return decision;
}
};
/**
* Scenario: A connects to B, and B connects to A at the same time.
*/
TEST_P(MessengerTest, ConnectionRaceTest) {
FakeDispatcher cli_dispatcher(false), srv_dispatcher(false);
TestInterceptor *cli_interceptor = new TestInterceptor();
TestInterceptor *srv_interceptor = new TestInterceptor();
server_msgr->set_policy(entity_name_t::TYPE_CLIENT, Messenger::Policy::lossless_peer_reuse(0));
server_msgr->interceptor = srv_interceptor;
client_msgr->set_policy(entity_name_t::TYPE_OSD, Messenger::Policy::lossless_peer_reuse(0));
client_msgr->interceptor = cli_interceptor;
entity_addr_t bind_addr;
bind_addr.parse("v2:127.0.0.1:3300");
server_msgr->bind(bind_addr);
server_msgr->add_dispatcher_head(&srv_dispatcher);
server_msgr->start();
bind_addr.parse("v2:127.0.0.1:3301");
client_msgr->bind(bind_addr);
client_msgr->add_dispatcher_head(&cli_dispatcher);
client_msgr->start();
// pause before sending client_ident message
cli_interceptor->breakpoint(Interceptor::STEP::SEND_CLIENT_IDENTITY);
// pause before sending client_ident message
srv_interceptor->breakpoint(Interceptor::STEP::SEND_CLIENT_IDENTITY);
ConnectionRef c2s = client_msgr->connect_to(server_msgr->get_mytype(),
server_msgr->get_myaddrs());
MPing *m1 = new MPing();
ASSERT_EQ(c2s->send_message(m1), 0);
ConnectionRef s2c = server_msgr->connect_to(client_msgr->get_mytype(),
client_msgr->get_myaddrs());
MPing *m2 = new MPing();
ASSERT_EQ(s2c->send_message(m2), 0);
cli_interceptor->wait(Interceptor::STEP::SEND_CLIENT_IDENTITY, c2s.get());
srv_interceptor->wait(Interceptor::STEP::SEND_CLIENT_IDENTITY, s2c.get());
// at this point both connections (A->B, B->A) are paused just before sending
// the client_ident message.
cli_interceptor->remove_bp(Interceptor::STEP::SEND_CLIENT_IDENTITY);
srv_interceptor->remove_bp(Interceptor::STEP::SEND_CLIENT_IDENTITY);
cli_interceptor->proceed(Interceptor::STEP::SEND_CLIENT_IDENTITY, Interceptor::ACTION::CONTINUE);
srv_interceptor->proceed(Interceptor::STEP::SEND_CLIENT_IDENTITY, Interceptor::ACTION::CONTINUE);
{
std::unique_lock l{cli_dispatcher.lock};
cli_dispatcher.cond.wait(l, [&] { return cli_dispatcher.got_new; });
cli_dispatcher.got_new = false;
}
{
std::unique_lock l{srv_dispatcher.lock};
srv_dispatcher.cond.wait(l, [&] { return srv_dispatcher.got_new; });
srv_dispatcher.got_new = false;
}
ASSERT_TRUE(s2c->is_connected());
ASSERT_EQ(1u, static_cast<Session*>(s2c->get_priv().get())->get_count());
ASSERT_TRUE(s2c->peer_is_client());
ASSERT_TRUE(c2s->is_connected());
ASSERT_EQ(1u, static_cast<Session*>(c2s->get_priv().get())->get_count());
ASSERT_TRUE(c2s->peer_is_osd());
client_msgr->shutdown();
client_msgr->wait();
server_msgr->shutdown();
server_msgr->wait();
delete cli_interceptor;
delete srv_interceptor;
}
/**
* Scenario: A connects to B, and B connects to A at the same time.
* The first (A -> B) connection gets to message flow handshake, the
* second (B -> A) connection is stuck waiting for a banner from A.
* After A sends client_ident to B, the first connection wins and B
* calls reuse_connection() to replace the second connection's socket
* while the second connection is still in BANNER_CONNECTING.
*/
TEST_P(MessengerTest, ConnectionRaceReuseBannerTest) {
FakeDispatcher cli_dispatcher(false), srv_dispatcher(false);
auto cli_interceptor = std::make_unique<TestInterceptor>();
auto srv_interceptor = std::make_unique<TestInterceptor>();
server_msgr->set_policy(entity_name_t::TYPE_CLIENT,
Messenger::Policy::lossless_peer_reuse(0));
server_msgr->interceptor = srv_interceptor.get();
client_msgr->set_policy(entity_name_t::TYPE_OSD,
Messenger::Policy::lossless_peer_reuse(0));
client_msgr->interceptor = cli_interceptor.get();
entity_addr_t bind_addr;
bind_addr.parse("v2:127.0.0.1:3300");
server_msgr->bind(bind_addr);
server_msgr->add_dispatcher_head(&srv_dispatcher);
server_msgr->start();
bind_addr.parse("v2:127.0.0.1:3301");
client_msgr->bind(bind_addr);
client_msgr->add_dispatcher_head(&cli_dispatcher);
client_msgr->start();
// pause before sending client_ident message
srv_interceptor->breakpoint(Interceptor::STEP::SEND_CLIENT_IDENTITY);
ConnectionRef s2c = server_msgr->connect_to(client_msgr->get_mytype(),
client_msgr->get_myaddrs());
MPing *m1 = new MPing();
ASSERT_EQ(s2c->send_message(m1), 0);
srv_interceptor->wait(Interceptor::STEP::SEND_CLIENT_IDENTITY);
srv_interceptor->remove_bp(Interceptor::STEP::SEND_CLIENT_IDENTITY);
// pause before sending banner
cli_interceptor->breakpoint(Interceptor::STEP::BANNER_EXCHANGE_BANNER_CONNECTING);
ConnectionRef c2s = client_msgr->connect_to(server_msgr->get_mytype(),
server_msgr->get_myaddrs());
MPing *m2 = new MPing();
ASSERT_EQ(c2s->send_message(m2), 0);
cli_interceptor->wait(Interceptor::STEP::BANNER_EXCHANGE_BANNER_CONNECTING);
cli_interceptor->remove_bp(Interceptor::STEP::BANNER_EXCHANGE_BANNER_CONNECTING);
// second connection is in BANNER_CONNECTING, ensure it stays so
// and send client_ident
srv_interceptor->breakpoint(Interceptor::STEP::BANNER_EXCHANGE);
srv_interceptor->proceed(Interceptor::STEP::SEND_CLIENT_IDENTITY, Interceptor::ACTION::CONTINUE);
// handle client_ident -- triggers reuse_connection() with exproto
// in BANNER_CONNECTING
cli_interceptor->breakpoint(Interceptor::STEP::READY);
cli_interceptor->proceed(Interceptor::STEP::BANNER_EXCHANGE_BANNER_CONNECTING, Interceptor::ACTION::CONTINUE);
cli_interceptor->wait(Interceptor::STEP::READY);
cli_interceptor->remove_bp(Interceptor::STEP::READY);
// first connection is in READY
Connection *s2c_accepter = srv_interceptor->wait(Interceptor::STEP::BANNER_EXCHANGE);
srv_interceptor->remove_bp(Interceptor::STEP::BANNER_EXCHANGE);
srv_interceptor->proceed(Interceptor::STEP::BANNER_EXCHANGE, Interceptor::ACTION::CONTINUE);
cli_interceptor->proceed(Interceptor::STEP::READY, Interceptor::ACTION::CONTINUE);
{
std::unique_lock l{cli_dispatcher.lock};
cli_dispatcher.cond.wait(l, [&] { return cli_dispatcher.got_new; });
cli_dispatcher.got_new = false;
}
{
std::unique_lock l{srv_dispatcher.lock};
srv_dispatcher.cond.wait(l, [&] { return srv_dispatcher.got_new; });
srv_dispatcher.got_new = false;
}
EXPECT_TRUE(s2c->is_connected());
EXPECT_EQ(1u, static_cast<Session*>(s2c->get_priv().get())->get_count());
EXPECT_TRUE(s2c->peer_is_client());
EXPECT_TRUE(c2s->is_connected());
EXPECT_EQ(1u, static_cast<Session*>(c2s->get_priv().get())->get_count());
EXPECT_TRUE(c2s->peer_is_osd());
// closed in reuse_connection() -- EPIPE when writing banner/hello
EXPECT_FALSE(s2c_accepter->is_connected());
// established exactly once, never faulted and reconnected
EXPECT_EQ(cli_interceptor->count_step(c2s.get(), Interceptor::STEP::START_CLIENT_BANNER_EXCHANGE), 1u);
EXPECT_EQ(cli_interceptor->count_step(c2s.get(), Interceptor::STEP::SEND_RECONNECT), 0u);
EXPECT_EQ(cli_interceptor->count_step(c2s.get(), Interceptor::STEP::READY), 1u);
client_msgr->shutdown();
client_msgr->wait();
server_msgr->shutdown();
server_msgr->wait();
}
/**
* Scenario:
* - A connects to B
* - A sends client_ident to B
* - B fails before sending server_ident to A
* - A reconnects
*/
TEST_P(MessengerTest, MissingServerIdenTest) {
FakeDispatcher cli_dispatcher(false), srv_dispatcher(false);
TestInterceptor *cli_interceptor = new TestInterceptor();
TestInterceptor *srv_interceptor = new TestInterceptor();
server_msgr->set_policy(entity_name_t::TYPE_CLIENT, Messenger::Policy::stateful_server(0));
server_msgr->interceptor = srv_interceptor;
client_msgr->set_policy(entity_name_t::TYPE_OSD, Messenger::Policy::lossy_client(0));
client_msgr->interceptor = cli_interceptor;
entity_addr_t bind_addr;
bind_addr.parse("v2:127.0.0.1:3300");
server_msgr->bind(bind_addr);
server_msgr->add_dispatcher_head(&srv_dispatcher);
server_msgr->start();
bind_addr.parse("v2:127.0.0.1:3301");
client_msgr->bind(bind_addr);
client_msgr->add_dispatcher_head(&cli_dispatcher);
client_msgr->start();
// pause before sending server_ident message
srv_interceptor->breakpoint(Interceptor::STEP::SEND_SERVER_IDENTITY);
ConnectionRef c2s = client_msgr->connect_to(server_msgr->get_mytype(),
server_msgr->get_myaddrs());
MPing *m1 = new MPing();
ASSERT_EQ(c2s->send_message(m1), 0);
Connection *c2s_accepter = srv_interceptor->wait(Interceptor::STEP::SEND_SERVER_IDENTITY);
srv_interceptor->remove_bp(Interceptor::STEP::SEND_SERVER_IDENTITY);
// We inject a message from this side of the connection to force it to be
// in standby when we inject the failure below
MPing *m2 = new MPing();
ASSERT_EQ(c2s_accepter->send_message(m2), 0);
srv_interceptor->proceed(Interceptor::STEP::SEND_SERVER_IDENTITY, Interceptor::ACTION::FAIL);
{
std::unique_lock l{srv_dispatcher.lock};
srv_dispatcher.cond.wait(l, [&] { return srv_dispatcher.got_new; });
srv_dispatcher.got_new = false;
}
{
std::unique_lock l{cli_dispatcher.lock};
cli_dispatcher.cond.wait(l, [&] { return cli_dispatcher.got_new; });
cli_dispatcher.got_new = false;
}
ASSERT_TRUE(c2s->is_connected());
ASSERT_EQ(1u, static_cast<Session*>(c2s->get_priv().get())->get_count());
ASSERT_TRUE(c2s->peer_is_osd());
ASSERT_TRUE(c2s_accepter->is_connected());
ASSERT_EQ(1u, static_cast<Session*>(c2s_accepter->get_priv().get())->get_count());
ASSERT_TRUE(c2s_accepter->peer_is_client());
client_msgr->shutdown();
client_msgr->wait();
server_msgr->shutdown();
server_msgr->wait();
delete cli_interceptor;
delete srv_interceptor;
}
/**
* Scenario:
* - A connects to B
* - A sends client_ident to B
* - B fails before sending server_ident to A
* - A goes to standby
* - B reconnects to A
*/
TEST_P(MessengerTest, MissingServerIdenTest2) {
FakeDispatcher cli_dispatcher(false), srv_dispatcher(false);
TestInterceptor *cli_interceptor = new TestInterceptor();
TestInterceptor *srv_interceptor = new TestInterceptor();
server_msgr->set_policy(entity_name_t::TYPE_CLIENT, Messenger::Policy::lossless_peer(0));
server_msgr->interceptor = srv_interceptor;
client_msgr->set_policy(entity_name_t::TYPE_OSD, Messenger::Policy::lossless_peer(0));
client_msgr->interceptor = cli_interceptor;
entity_addr_t bind_addr;
bind_addr.parse("v2:127.0.0.1:3300");
server_msgr->bind(bind_addr);
server_msgr->add_dispatcher_head(&srv_dispatcher);
server_msgr->start();
bind_addr.parse("v2:127.0.0.1:3301");
client_msgr->bind(bind_addr);
client_msgr->add_dispatcher_head(&cli_dispatcher);
client_msgr->start();
// pause before sending server_ident message
srv_interceptor->breakpoint(Interceptor::STEP::SEND_SERVER_IDENTITY);
ConnectionRef c2s = client_msgr->connect_to(server_msgr->get_mytype(),
server_msgr->get_myaddrs());
Connection *c2s_accepter = srv_interceptor->wait(Interceptor::STEP::SEND_SERVER_IDENTITY);
srv_interceptor->remove_bp(Interceptor::STEP::SEND_SERVER_IDENTITY);
// We inject a message from this side of the connection to force it to be
// in standby when we inject the failure below
MPing *m2 = new MPing();
ASSERT_EQ(c2s_accepter->send_message(m2), 0);
srv_interceptor->proceed(Interceptor::STEP::SEND_SERVER_IDENTITY, Interceptor::ACTION::FAIL);
{
std::unique_lock l{cli_dispatcher.lock};
cli_dispatcher.cond.wait(l, [&] { return cli_dispatcher.got_new; });
cli_dispatcher.got_new = false;
}
ASSERT_TRUE(c2s->is_connected());
ASSERT_EQ(1u, static_cast<Session*>(c2s->get_priv().get())->get_count());
ASSERT_TRUE(c2s->peer_is_osd());
ASSERT_TRUE(c2s_accepter->is_connected());
ASSERT_EQ(0u, static_cast<Session*>(c2s_accepter->get_priv().get())->get_count());
ASSERT_TRUE(c2s_accepter->peer_is_client());
client_msgr->shutdown();
client_msgr->wait();
server_msgr->shutdown();
server_msgr->wait();
delete cli_interceptor;
delete srv_interceptor;
}
/**
* Scenario:
* - A connects to B
* - A and B exchange messages
* - A fails
* - B goes into standby
* - A reconnects
*/
TEST_P(MessengerTest, ReconnectTest) {
FakeDispatcher cli_dispatcher(false), srv_dispatcher(true);
TestInterceptor *cli_interceptor = new TestInterceptor();
TestInterceptor *srv_interceptor = new TestInterceptor();
server_msgr->set_policy(entity_name_t::TYPE_CLIENT, Messenger::Policy::stateful_server(0));
server_msgr->interceptor = srv_interceptor;
client_msgr->set_policy(entity_name_t::TYPE_OSD, Messenger::Policy::lossless_peer(0));
client_msgr->interceptor = cli_interceptor;
entity_addr_t bind_addr;
bind_addr.parse("v2:127.0.0.1:3300");
server_msgr->bind(bind_addr);
server_msgr->add_dispatcher_head(&srv_dispatcher);
server_msgr->start();
bind_addr.parse("v2:127.0.0.1:3301");
client_msgr->bind(bind_addr);
client_msgr->add_dispatcher_head(&cli_dispatcher);
client_msgr->start();
ConnectionRef c2s = client_msgr->connect_to(server_msgr->get_mytype(),
server_msgr->get_myaddrs());
MPing *m1 = new MPing();
ASSERT_EQ(c2s->send_message(m1), 0);
{
std::unique_lock l{cli_dispatcher.lock};
cli_dispatcher.cond.wait(l, [&] { return cli_dispatcher.got_new; });
cli_dispatcher.got_new = false;
}
ASSERT_TRUE(c2s->is_connected());
ASSERT_EQ(1u, static_cast<Session*>(c2s->get_priv().get())->get_count());
ASSERT_TRUE(c2s->peer_is_osd());
cli_interceptor->breakpoint(Interceptor::STEP::HANDLE_MESSAGE);
MPing *m2 = new MPing();
ASSERT_EQ(c2s->send_message(m2), 0);
cli_interceptor->wait(Interceptor::STEP::HANDLE_MESSAGE, c2s.get());
cli_interceptor->remove_bp(Interceptor::STEP::HANDLE_MESSAGE);
// at this point client and server are connected together
srv_interceptor->breakpoint(Interceptor::STEP::READY);
// failing client
cli_interceptor->proceed(Interceptor::STEP::HANDLE_MESSAGE, Interceptor::ACTION::FAIL);
MPing *m3 = new MPing();
ASSERT_EQ(c2s->send_message(m3), 0);
Connection *c2s_accepter = srv_interceptor->wait(Interceptor::STEP::READY);
// the srv end of theconnection is now paused at ready
// this means that the reconnect was successful
srv_interceptor->remove_bp(Interceptor::STEP::READY);
ASSERT_TRUE(c2s_accepter->peer_is_client());
// c2s_accepter sent 0 reconnect messages
ASSERT_EQ(srv_interceptor->count_step(c2s_accepter, Interceptor::STEP::SEND_RECONNECT), 0u);
// c2s_accepter sent 1 reconnect_ok messages
ASSERT_EQ(srv_interceptor->count_step(c2s_accepter, Interceptor::STEP::SEND_RECONNECT_OK), 1u);
// c2s sent 1 reconnect messages
ASSERT_EQ(cli_interceptor->count_step(c2s.get(), Interceptor::STEP::SEND_RECONNECT), 1u);
// c2s sent 0 reconnect_ok messages
ASSERT_EQ(cli_interceptor->count_step(c2s.get(), Interceptor::STEP::SEND_RECONNECT_OK), 0u);
srv_interceptor->proceed(15, Interceptor::ACTION::CONTINUE);
{
std::unique_lock l{cli_dispatcher.lock};
cli_dispatcher.cond.wait(l, [&] { return cli_dispatcher.got_new; });
cli_dispatcher.got_new = false;
}
client_msgr->shutdown();
client_msgr->wait();
server_msgr->shutdown();
server_msgr->wait();
delete cli_interceptor;
delete srv_interceptor;
}
/**
* Scenario:
* - A connects to B
* - A and B exchange messages
* - A fails
* - A reconnects // B reconnects
*/
TEST_P(MessengerTest, ReconnectRaceTest) {
FakeDispatcher cli_dispatcher(false), srv_dispatcher(true);
TestInterceptor *cli_interceptor = new TestInterceptor();
TestInterceptor *srv_interceptor = new TestInterceptor();
server_msgr->set_policy(entity_name_t::TYPE_CLIENT, Messenger::Policy::lossless_peer(0));
server_msgr->interceptor = srv_interceptor;
client_msgr->set_policy(entity_name_t::TYPE_OSD, Messenger::Policy::lossless_peer(0));
client_msgr->interceptor = cli_interceptor;
entity_addr_t bind_addr;
bind_addr.parse("v2:127.0.0.1:3300");
server_msgr->bind(bind_addr);
server_msgr->add_dispatcher_head(&srv_dispatcher);
server_msgr->start();
bind_addr.parse("v2:127.0.0.1:3301");
client_msgr->bind(bind_addr);
client_msgr->add_dispatcher_head(&cli_dispatcher);
client_msgr->start();
ConnectionRef c2s = client_msgr->connect_to(server_msgr->get_mytype(),
server_msgr->get_myaddrs());
MPing *m1 = new MPing();
ASSERT_EQ(c2s->send_message(m1), 0);
{
std::unique_lock l{cli_dispatcher.lock};
cli_dispatcher.cond.wait(l, [&] { return cli_dispatcher.got_new; });
cli_dispatcher.got_new = false;
}
ASSERT_TRUE(c2s->is_connected());
ASSERT_EQ(1u, static_cast<Session*>(c2s->get_priv().get())->get_count());
ASSERT_TRUE(c2s->peer_is_osd());
cli_interceptor->breakpoint(Interceptor::STEP::HANDLE_MESSAGE);
MPing *m2 = new MPing();
ASSERT_EQ(c2s->send_message(m2), 0);
cli_interceptor->wait(Interceptor::STEP::HANDLE_MESSAGE, c2s.get());
cli_interceptor->remove_bp(Interceptor::STEP::HANDLE_MESSAGE);
// at this point client and server are connected together
// force both client and server to race on reconnect
cli_interceptor->breakpoint(Interceptor::STEP::SEND_RECONNECT);
srv_interceptor->breakpoint(Interceptor::STEP::SEND_RECONNECT);
// failing client
// this will cause both client and server to reconnect at the same time
cli_interceptor->proceed(Interceptor::STEP::HANDLE_MESSAGE, Interceptor::ACTION::FAIL);
MPing *m3 = new MPing();
ASSERT_EQ(c2s->send_message(m3), 0);
cli_interceptor->wait(Interceptor::STEP::SEND_RECONNECT, c2s.get());
srv_interceptor->wait(Interceptor::STEP::SEND_RECONNECT);
cli_interceptor->remove_bp(Interceptor::STEP::SEND_RECONNECT);
srv_interceptor->remove_bp(Interceptor::STEP::SEND_RECONNECT);
// pause on "ready"
srv_interceptor->breakpoint(Interceptor::STEP::READY);
cli_interceptor->proceed(Interceptor::STEP::SEND_RECONNECT, Interceptor::ACTION::CONTINUE);
srv_interceptor->proceed(Interceptor::STEP::SEND_RECONNECT, Interceptor::ACTION::CONTINUE);
Connection *c2s_accepter = srv_interceptor->wait(Interceptor::STEP::READY);
// the server has reconnected and is "ready"
srv_interceptor->remove_bp(Interceptor::STEP::READY);
ASSERT_TRUE(c2s_accepter->peer_is_client());
ASSERT_TRUE(c2s->peer_is_osd());
// the server should win the reconnect race
// c2s_accepter sent 1 or 2 reconnect messages
ASSERT_LT(srv_interceptor->count_step(c2s_accepter, Interceptor::STEP::SEND_RECONNECT), 3u);
ASSERT_GT(srv_interceptor->count_step(c2s_accepter, Interceptor::STEP::SEND_RECONNECT), 0u);
// c2s_accepter sent 0 reconnect_ok messages
ASSERT_EQ(srv_interceptor->count_step(c2s_accepter, Interceptor::STEP::SEND_RECONNECT_OK), 0u);
// c2s sent 1 reconnect messages
ASSERT_EQ(cli_interceptor->count_step(c2s.get(), Interceptor::STEP::SEND_RECONNECT), 1u);
// c2s sent 1 reconnect_ok messages
ASSERT_EQ(cli_interceptor->count_step(c2s.get(), Interceptor::STEP::SEND_RECONNECT_OK), 1u);
if (srv_interceptor->count_step(c2s_accepter, Interceptor::STEP::SEND_RECONNECT) == 2) {
// if the server send the reconnect message two times then
// the client must have sent a session retry message to the server
ASSERT_EQ(cli_interceptor->count_step(c2s.get(), Interceptor::STEP::SESSION_RETRY), 1u);
} else {
ASSERT_EQ(cli_interceptor->count_step(c2s.get(), Interceptor::STEP::SESSION_RETRY), 0u);
}
srv_interceptor->proceed(Interceptor::STEP::READY, Interceptor::ACTION::CONTINUE);
{
std::unique_lock l{cli_dispatcher.lock};
cli_dispatcher.cond.wait(l, [&] { return cli_dispatcher.got_new; });
cli_dispatcher.got_new = false;
}
client_msgr->shutdown();
client_msgr->wait();
server_msgr->shutdown();
server_msgr->wait();
delete cli_interceptor;
delete srv_interceptor;
}
TEST_P(MessengerTest, SimpleTest) {
FakeDispatcher cli_dispatcher(false), srv_dispatcher(true);
entity_addr_t bind_addr;
bind_addr.parse("v2:127.0.0.1");
server_msgr->bind(bind_addr);
server_msgr->add_dispatcher_head(&srv_dispatcher);
server_msgr->start();
client_msgr->add_dispatcher_head(&cli_dispatcher);
client_msgr->start();
// 1. simple round trip
MPing *m = new MPing();
ConnectionRef conn = client_msgr->connect_to(server_msgr->get_mytype(),
server_msgr->get_myaddrs());
{
ASSERT_EQ(conn->send_message(m), 0);
std::unique_lock l{cli_dispatcher.lock};
cli_dispatcher.cond.wait(l, [&] { return cli_dispatcher.got_new; });
cli_dispatcher.got_new = false;
}
ASSERT_TRUE(conn->is_connected());
ASSERT_EQ(1u, static_cast<Session*>(conn->get_priv().get())->get_count());
ASSERT_TRUE(conn->peer_is_osd());
// 2. test rebind port
set<int> avoid_ports;
for (int i = 0; i < 10 ; i++) {
for (auto a : server_msgr->get_myaddrs().v) {
avoid_ports.insert(a.get_port() + i);
}
}
server_msgr->rebind(avoid_ports);
for (auto a : server_msgr->get_myaddrs().v) {
ASSERT_TRUE(avoid_ports.count(a.get_port()) == 0);
}
conn = client_msgr->connect_to(server_msgr->get_mytype(),
server_msgr->get_myaddrs());
{
m = new MPing();
ASSERT_EQ(conn->send_message(m), 0);
std::unique_lock l{cli_dispatcher.lock};
cli_dispatcher.cond.wait(l, [&] { return cli_dispatcher.got_new; });
cli_dispatcher.got_new = false;
}
ASSERT_EQ(1U, static_cast<Session*>(conn->get_priv().get())->get_count());
// 3. test markdown connection
conn->mark_down();
ASSERT_FALSE(conn->is_connected());
// 4. test failed connection
server_msgr->shutdown();
server_msgr->wait();
m = new MPing();
conn->send_message(m);
CHECK_AND_WAIT_TRUE(!conn->is_connected());
ASSERT_FALSE(conn->is_connected());
// 5. loopback connection
srv_dispatcher.loopback = true;
conn = client_msgr->get_loopback_connection();
{
m = new MPing();
ASSERT_EQ(conn->send_message(m), 0);
std::unique_lock l{cli_dispatcher.lock};
cli_dispatcher.cond.wait(l, [&] { return cli_dispatcher.got_new; });
cli_dispatcher.got_new = false;
}
srv_dispatcher.loopback = false;
ASSERT_EQ(1U, static_cast<Session*>(conn->get_priv().get())->get_count());
client_msgr->shutdown();
client_msgr->wait();
server_msgr->shutdown();
server_msgr->wait();
}
TEST_P(MessengerTest, SimpleMsgr2Test) {
FakeDispatcher cli_dispatcher(false), srv_dispatcher(true);
entity_addr_t legacy_addr;
legacy_addr.parse("v1:127.0.0.1");
entity_addr_t msgr2_addr;
msgr2_addr.parse("v2:127.0.0.1");
entity_addrvec_t bind_addrs;
bind_addrs.v.push_back(legacy_addr);
bind_addrs.v.push_back(msgr2_addr);
server_msgr->bindv(bind_addrs);
server_msgr->add_dispatcher_head(&srv_dispatcher);
server_msgr->start();
client_msgr->add_dispatcher_head(&cli_dispatcher);
client_msgr->start();
// 1. simple round trip
MPing *m = new MPing();
ConnectionRef conn = client_msgr->connect_to(
server_msgr->get_mytype(),
server_msgr->get_myaddrs());
{
ASSERT_EQ(conn->send_message(m), 0);
std::unique_lock l{cli_dispatcher.lock};
cli_dispatcher.cond.wait(l, [&] { return cli_dispatcher.got_new; });
cli_dispatcher.got_new = false;
}
ASSERT_TRUE(conn->is_connected());
ASSERT_EQ(1u, static_cast<Session*>(conn->get_priv().get())->get_count());
ASSERT_TRUE(conn->peer_is_osd());
// 2. test rebind port
set<int> avoid_ports;
for (int i = 0; i < 10 ; i++) {
for (auto a : server_msgr->get_myaddrs().v) {
avoid_ports.insert(a.get_port() + i);
}
}
server_msgr->rebind(avoid_ports);
for (auto a : server_msgr->get_myaddrs().v) {
ASSERT_TRUE(avoid_ports.count(a.get_port()) == 0);
}
conn = client_msgr->connect_to(
server_msgr->get_mytype(),
server_msgr->get_myaddrs());
{
m = new MPing();
ASSERT_EQ(conn->send_message(m), 0);
std::unique_lock l{cli_dispatcher.lock};
cli_dispatcher.cond.wait(l, [&] { return cli_dispatcher.got_new; });
cli_dispatcher.got_new = false;
}
ASSERT_EQ(1U, static_cast<Session*>(conn->get_priv().get())->get_count());
// 3. test markdown connection
conn->mark_down();
ASSERT_FALSE(conn->is_connected());
// 4. test failed connection
server_msgr->shutdown();
server_msgr->wait();
m = new MPing();
conn->send_message(m);
CHECK_AND_WAIT_TRUE(!conn->is_connected());
ASSERT_FALSE(conn->is_connected());
// 5. loopback connection
srv_dispatcher.loopback = true;
conn = client_msgr->get_loopback_connection();
{
m = new MPing();
ASSERT_EQ(conn->send_message(m), 0);
std::unique_lock l{cli_dispatcher.lock};
cli_dispatcher.cond.wait(l, [&] { return cli_dispatcher.got_new; });
cli_dispatcher.got_new = false;
}
srv_dispatcher.loopback = false;
ASSERT_EQ(1U, static_cast<Session*>(conn->get_priv().get())->get_count());
client_msgr->shutdown();
client_msgr->wait();
server_msgr->shutdown();
server_msgr->wait();
}
TEST_P(MessengerTest, FeatureTest) {
FakeDispatcher cli_dispatcher(false), srv_dispatcher(true);
entity_addr_t bind_addr;
bind_addr.parse("v2:127.0.0.1");
uint64_t all_feature_supported, feature_required, feature_supported = 0;
for (int i = 0; i < 10; i++)
feature_supported |= 1ULL << i;
feature_supported |= CEPH_FEATUREMASK_MSG_ADDR2;
feature_supported |= CEPH_FEATUREMASK_SERVER_NAUTILUS;
feature_required = feature_supported | 1ULL << 13;
all_feature_supported = feature_required | 1ULL << 14;
Messenger::Policy p = server_msgr->get_policy(entity_name_t::TYPE_CLIENT);
p.features_required = feature_required;
server_msgr->set_policy(entity_name_t::TYPE_CLIENT, p);
server_msgr->bind(bind_addr);
server_msgr->add_dispatcher_head(&srv_dispatcher);
server_msgr->start();
// 1. Suppose if only support less than required
p = client_msgr->get_policy(entity_name_t::TYPE_OSD);
p.features_supported = feature_supported;
client_msgr->set_policy(entity_name_t::TYPE_OSD, p);
client_msgr->add_dispatcher_head(&cli_dispatcher);
client_msgr->start();
MPing *m = new MPing();
ConnectionRef conn = client_msgr->connect_to(server_msgr->get_mytype(),
server_msgr->get_myaddrs());
conn->send_message(m);
CHECK_AND_WAIT_TRUE(!conn->is_connected());
// should failed build a connection
ASSERT_FALSE(conn->is_connected());
client_msgr->shutdown();
client_msgr->wait();
// 2. supported met required
p = client_msgr->get_policy(entity_name_t::TYPE_OSD);
p.features_supported = all_feature_supported;
client_msgr->set_policy(entity_name_t::TYPE_OSD, p);
client_msgr->start();
conn = client_msgr->connect_to(server_msgr->get_mytype(),
server_msgr->get_myaddrs());
{
m = new MPing();
ASSERT_EQ(conn->send_message(m), 0);
std::unique_lock l{cli_dispatcher.lock};
cli_dispatcher.cond.wait(l, [&] { return cli_dispatcher.got_new; });
cli_dispatcher.got_new = false;
}
ASSERT_EQ(1U, static_cast<Session*>(conn->get_priv().get())->get_count());
server_msgr->shutdown();
client_msgr->shutdown();
server_msgr->wait();
client_msgr->wait();
}
TEST_P(MessengerTest, TimeoutTest) {
g_ceph_context->_conf.set_val("ms_connection_idle_timeout", "1");
FakeDispatcher cli_dispatcher(false), srv_dispatcher(true);
entity_addr_t bind_addr;
bind_addr.parse("v2:127.0.0.1");
server_msgr->bind(bind_addr);
server_msgr->add_dispatcher_head(&srv_dispatcher);
server_msgr->start();
client_msgr->add_dispatcher_head(&cli_dispatcher);
client_msgr->start();
// 1. build the connection
MPing *m = new MPing();
ConnectionRef conn = client_msgr->connect_to(server_msgr->get_mytype(),
server_msgr->get_myaddrs());
{
ASSERT_EQ(conn->send_message(m), 0);
std::unique_lock l{cli_dispatcher.lock};
cli_dispatcher.cond.wait(l, [&] { return cli_dispatcher.got_new; });
cli_dispatcher.got_new = false;
}
ASSERT_TRUE(conn->is_connected());
ASSERT_EQ(1U, static_cast<Session*>(conn->get_priv().get())->get_count());
ASSERT_TRUE(conn->peer_is_osd());
// 2. wait for idle
usleep(2500*1000);
ASSERT_FALSE(conn->is_connected());
server_msgr->shutdown();
server_msgr->wait();
client_msgr->shutdown();
client_msgr->wait();
g_ceph_context->_conf.set_val("ms_connection_idle_timeout", "900");
}
TEST_P(MessengerTest, StatefulTest) {
Message *m;
FakeDispatcher cli_dispatcher(false), srv_dispatcher(true);
entity_addr_t bind_addr;
bind_addr.parse("v2:127.0.0.1");
Messenger::Policy p = Messenger::Policy::stateful_server(0);
server_msgr->set_policy(entity_name_t::TYPE_CLIENT, p);
p = Messenger::Policy::lossless_client(0);
client_msgr->set_policy(entity_name_t::TYPE_OSD, p);
server_msgr->bind(bind_addr);
server_msgr->add_dispatcher_head(&srv_dispatcher);
server_msgr->start();
client_msgr->add_dispatcher_head(&cli_dispatcher);
client_msgr->start();
// 1. test for server standby
ConnectionRef conn = client_msgr->connect_to(server_msgr->get_mytype(),
server_msgr->get_myaddrs());
{
m = new MPing();
ASSERT_EQ(conn->send_message(m), 0);
std::unique_lock l{cli_dispatcher.lock};
cli_dispatcher.cond.wait(l, [&] { return cli_dispatcher.got_new; });
cli_dispatcher.got_new = false;
}
ASSERT_EQ(1U, static_cast<Session*>(conn->get_priv().get())->get_count());
conn->mark_down();
ASSERT_FALSE(conn->is_connected());
ConnectionRef server_conn = server_msgr->connect_to(
client_msgr->get_mytype(), srv_dispatcher.last_accept);
// don't lose state
ASSERT_EQ(1U, static_cast<Session*>(server_conn->get_priv().get())->get_count());
srv_dispatcher.got_new = false;
conn = client_msgr->connect_to(server_msgr->get_mytype(),
server_msgr->get_myaddrs());
{
m = new MPing();
ASSERT_EQ(conn->send_message(m), 0);
std::unique_lock l{cli_dispatcher.lock};
cli_dispatcher.cond.wait(l, [&] { return cli_dispatcher.got_new; });
cli_dispatcher.got_new = false;
}
ASSERT_EQ(1U, static_cast<Session*>(conn->get_priv().get())->get_count());
server_conn = server_msgr->connect_to(client_msgr->get_mytype(),
srv_dispatcher.last_accept);
{
std::unique_lock l{srv_dispatcher.lock};
srv_dispatcher.cond.wait(l, [&] { return srv_dispatcher.got_remote_reset; });
}
// 2. test for client reconnect
ASSERT_FALSE(cli_dispatcher.got_remote_reset);
cli_dispatcher.got_connect = false;
cli_dispatcher.got_new = false;
cli_dispatcher.got_remote_reset = false;
server_conn->mark_down();
ASSERT_FALSE(server_conn->is_connected());
// ensure client detect server socket closed
{
std::unique_lock l{cli_dispatcher.lock};
cli_dispatcher.cond.wait(l, [&] { return cli_dispatcher.got_remote_reset; });
cli_dispatcher.got_remote_reset = false;
}
{
std::unique_lock l{cli_dispatcher.lock};
cli_dispatcher.cond.wait(l, [&] { return cli_dispatcher.got_connect; });
cli_dispatcher.got_connect = false;
}
CHECK_AND_WAIT_TRUE(conn->is_connected());
ASSERT_TRUE(conn->is_connected());
{
m = new MPing();
ASSERT_EQ(conn->send_message(m), 0);
ASSERT_TRUE(conn->is_connected());
std::unique_lock l{cli_dispatcher.lock};
cli_dispatcher.cond.wait(l, [&] { return cli_dispatcher.got_new; });
cli_dispatcher.got_new = false;
}
// resetcheck happen
ASSERT_EQ(1U, static_cast<Session*>(conn->get_priv().get())->get_count());
server_conn = server_msgr->connect_to(client_msgr->get_mytype(),
srv_dispatcher.last_accept);
ASSERT_EQ(1U, static_cast<Session*>(server_conn->get_priv().get())->get_count());
cli_dispatcher.got_remote_reset = false;
server_msgr->shutdown();
client_msgr->shutdown();
server_msgr->wait();
client_msgr->wait();
}
TEST_P(MessengerTest, StatelessTest) {
Message *m;
FakeDispatcher cli_dispatcher(false), srv_dispatcher(true);
entity_addr_t bind_addr;
bind_addr.parse("v2:127.0.0.1");
Messenger::Policy p = Messenger::Policy::stateless_server(0);
server_msgr->set_policy(entity_name_t::TYPE_CLIENT, p);
p = Messenger::Policy::lossy_client(0);
client_msgr->set_policy(entity_name_t::TYPE_OSD, p);
server_msgr->bind(bind_addr);
server_msgr->add_dispatcher_head(&srv_dispatcher);
server_msgr->start();
client_msgr->add_dispatcher_head(&cli_dispatcher);
client_msgr->start();
// 1. test for server lose state
ConnectionRef conn = client_msgr->connect_to(server_msgr->get_mytype(),
server_msgr->get_myaddrs());
{
m = new MPing();
ASSERT_EQ(conn->send_message(m), 0);
std::unique_lock l{cli_dispatcher.lock};
cli_dispatcher.cond.wait(l, [&] { return cli_dispatcher.got_new; });
cli_dispatcher.got_new = false;
}
ASSERT_EQ(1U, static_cast<Session*>(conn->get_priv().get())->get_count());
conn->mark_down();
ASSERT_FALSE(conn->is_connected());
srv_dispatcher.got_new = false;
ConnectionRef server_conn;
srv_dispatcher.last_accept_con_ptr = &server_conn;
conn = client_msgr->connect_to(server_msgr->get_mytype(),
server_msgr->get_myaddrs());
{
m = new MPing();
ASSERT_EQ(conn->send_message(m), 0);
std::unique_lock l{cli_dispatcher.lock};
cli_dispatcher.cond.wait(l, [&] { return cli_dispatcher.got_new; });
cli_dispatcher.got_new = false;
}
ASSERT_EQ(1U, static_cast<Session*>(conn->get_priv().get())->get_count());
ASSERT_TRUE(server_conn);
// server lose state
{
std::unique_lock l{srv_dispatcher.lock};
srv_dispatcher.cond.wait(l, [&] { return srv_dispatcher.got_new; });
}
ASSERT_EQ(1U, static_cast<Session*>(server_conn->get_priv().get())->get_count());
// 2. test for client lossy
server_conn->mark_down();
ASSERT_FALSE(server_conn->is_connected());
conn->send_keepalive();
CHECK_AND_WAIT_TRUE(!conn->is_connected());
ASSERT_FALSE(conn->is_connected());
conn = client_msgr->connect_to(server_msgr->get_mytype(),
server_msgr->get_myaddrs());
{
m = new MPing();
ASSERT_EQ(conn->send_message(m), 0);
std::unique_lock l{cli_dispatcher.lock};
cli_dispatcher.cond.wait(l, [&] { return cli_dispatcher.got_new; });
cli_dispatcher.got_new = false;
}
ASSERT_EQ(1U, static_cast<Session*>(conn->get_priv().get())->get_count());
server_msgr->shutdown();
client_msgr->shutdown();
server_msgr->wait();
client_msgr->wait();
}
TEST_P(MessengerTest, AnonTest) {
Message *m;
FakeDispatcher cli_dispatcher(false), srv_dispatcher(true);
entity_addr_t bind_addr;
bind_addr.parse("v2:127.0.0.1");
Messenger::Policy p = Messenger::Policy::stateless_server(0);
server_msgr->set_policy(entity_name_t::TYPE_CLIENT, p);
p = Messenger::Policy::lossy_client(0);
client_msgr->set_policy(entity_name_t::TYPE_OSD, p);
server_msgr->bind(bind_addr);
server_msgr->add_dispatcher_head(&srv_dispatcher);
server_msgr->start();
client_msgr->add_dispatcher_head(&cli_dispatcher);
client_msgr->start();
ConnectionRef server_con_a, server_con_b;
// a
srv_dispatcher.last_accept_con_ptr = &server_con_a;
ConnectionRef con_a = client_msgr->connect_to(server_msgr->get_mytype(),
server_msgr->get_myaddrs(),
true);
{
m = new MPing();
ASSERT_EQ(con_a->send_message(m), 0);
std::unique_lock l{cli_dispatcher.lock};
cli_dispatcher.cond.wait(l, [&] { return cli_dispatcher.got_new; });
cli_dispatcher.got_new = false;
}
ASSERT_EQ(1U, static_cast<Session*>(con_a->get_priv().get())->get_count());
// b
srv_dispatcher.last_accept_con_ptr = &server_con_b;
ConnectionRef con_b = client_msgr->connect_to(server_msgr->get_mytype(),
server_msgr->get_myaddrs(),
true);
{
m = new MPing();
ASSERT_EQ(con_b->send_message(m), 0);
std::unique_lock l{cli_dispatcher.lock};
cli_dispatcher.cond.wait(l, [&] { return cli_dispatcher.got_new; });
cli_dispatcher.got_new = false;
}
ASSERT_EQ(1U, static_cast<Session*>(con_b->get_priv().get())->get_count());
// these should be distinct
ASSERT_NE(con_a, con_b);
ASSERT_NE(server_con_a, server_con_b);
// and both connected
{
m = new MPing();
ASSERT_EQ(con_a->send_message(m), 0);
std::unique_lock l{cli_dispatcher.lock};
cli_dispatcher.cond.wait(l, [&] { return cli_dispatcher.got_new; });
cli_dispatcher.got_new = false;
}
{
m = new MPing();
ASSERT_EQ(con_b->send_message(m), 0);
std::unique_lock l{cli_dispatcher.lock};
cli_dispatcher.cond.wait(l, [&] { return cli_dispatcher.got_new; });
cli_dispatcher.got_new = false;
}
// clean up
con_a->mark_down();
ASSERT_FALSE(con_a->is_connected());
con_b->mark_down();
ASSERT_FALSE(con_b->is_connected());
server_msgr->shutdown();
client_msgr->shutdown();
server_msgr->wait();
client_msgr->wait();
}
TEST_P(MessengerTest, ClientStandbyTest) {
Message *m;
FakeDispatcher cli_dispatcher(false), srv_dispatcher(true);
entity_addr_t bind_addr;
bind_addr.parse("v2:127.0.0.1");
Messenger::Policy p = Messenger::Policy::stateful_server(0);
server_msgr->set_policy(entity_name_t::TYPE_CLIENT, p);
p = Messenger::Policy::lossless_peer(0);
client_msgr->set_policy(entity_name_t::TYPE_OSD, p);
server_msgr->bind(bind_addr);
server_msgr->add_dispatcher_head(&srv_dispatcher);
server_msgr->start();
client_msgr->add_dispatcher_head(&cli_dispatcher);
client_msgr->start();
// 1. test for client standby, resetcheck
ConnectionRef conn = client_msgr->connect_to(server_msgr->get_mytype(),
server_msgr->get_myaddrs());
{
m = new MPing();
ASSERT_EQ(conn->send_message(m), 0);
std::unique_lock l{cli_dispatcher.lock};
cli_dispatcher.cond.wait(l, [&] { return cli_dispatcher.got_new; });
cli_dispatcher.got_new = false;
}
ASSERT_EQ(1U, static_cast<Session*>(conn->get_priv().get())->get_count());
ConnectionRef server_conn = server_msgr->connect_to(
client_msgr->get_mytype(),
srv_dispatcher.last_accept);
ASSERT_FALSE(cli_dispatcher.got_remote_reset);
cli_dispatcher.got_connect = false;
server_conn->mark_down();
ASSERT_FALSE(server_conn->is_connected());
// client should be standby
usleep(300*1000);
// client should be standby, so we use original connection
{
// Try send message to verify got remote reset callback
m = new MPing();
ASSERT_EQ(conn->send_message(m), 0);
{
std::unique_lock l{cli_dispatcher.lock};
cli_dispatcher.cond.wait(l, [&] { return cli_dispatcher.got_remote_reset; });
cli_dispatcher.got_remote_reset = false;
cli_dispatcher.cond.wait(l, [&] { return cli_dispatcher.got_connect; });
cli_dispatcher.got_connect = false;
}
CHECK_AND_WAIT_TRUE(conn->is_connected());
ASSERT_TRUE(conn->is_connected());
m = new MPing();
ASSERT_EQ(conn->send_message(m), 0);
std::unique_lock l{cli_dispatcher.lock};
cli_dispatcher.cond.wait(l, [&] { return cli_dispatcher.got_new; });
cli_dispatcher.got_new = false;
}
ASSERT_EQ(1U, static_cast<Session*>(conn->get_priv().get())->get_count());
server_conn = server_msgr->connect_to(client_msgr->get_mytype(),
srv_dispatcher.last_accept);
ASSERT_EQ(1U, static_cast<Session*>(server_conn->get_priv().get())->get_count());
server_msgr->shutdown();
client_msgr->shutdown();
server_msgr->wait();
client_msgr->wait();
}
TEST_P(MessengerTest, AuthTest) {
g_ceph_context->_conf.set_val("auth_cluster_required", "cephx");
g_ceph_context->_conf.set_val("auth_service_required", "cephx");
g_ceph_context->_conf.set_val("auth_client_required", "cephx");
FakeDispatcher cli_dispatcher(false), srv_dispatcher(true);
entity_addr_t bind_addr;
bind_addr.parse("v2:127.0.0.1");
server_msgr->bind(bind_addr);
server_msgr->add_dispatcher_head(&srv_dispatcher);
server_msgr->start();
client_msgr->add_dispatcher_head(&cli_dispatcher);
client_msgr->start();
// 1. simple auth round trip
MPing *m = new MPing();
ConnectionRef conn = client_msgr->connect_to(server_msgr->get_mytype(),
server_msgr->get_myaddrs());
{
ASSERT_EQ(conn->send_message(m), 0);
std::unique_lock l{cli_dispatcher.lock};
cli_dispatcher.cond.wait(l, [&] { return cli_dispatcher.got_new; });
cli_dispatcher.got_new = false;
}
ASSERT_TRUE(conn->is_connected());
ASSERT_EQ(1U, static_cast<Session*>(conn->get_priv().get())->get_count());
// 2. mix auth
g_ceph_context->_conf.set_val("auth_cluster_required", "none");
g_ceph_context->_conf.set_val("auth_service_required", "none");
g_ceph_context->_conf.set_val("auth_client_required", "none");
conn->mark_down();
ASSERT_FALSE(conn->is_connected());
conn = client_msgr->connect_to(server_msgr->get_mytype(),
server_msgr->get_myaddrs());
{
MPing *m = new MPing();
ASSERT_EQ(conn->send_message(m), 0);
std::unique_lock l{cli_dispatcher.lock};
cli_dispatcher.cond.wait(l, [&] { return cli_dispatcher.got_new; });
cli_dispatcher.got_new = false;
}
ASSERT_TRUE(conn->is_connected());
ASSERT_EQ(1U, static_cast<Session*>(conn->get_priv().get())->get_count());
server_msgr->shutdown();
client_msgr->shutdown();
server_msgr->wait();
client_msgr->wait();
}
TEST_P(MessengerTest, MessageTest) {
FakeDispatcher cli_dispatcher(false), srv_dispatcher(true);
entity_addr_t bind_addr;
bind_addr.parse("v2:127.0.0.1");
Messenger::Policy p = Messenger::Policy::stateful_server(0);
server_msgr->set_policy(entity_name_t::TYPE_CLIENT, p);
p = Messenger::Policy::lossless_peer(0);
client_msgr->set_policy(entity_name_t::TYPE_OSD, p);
server_msgr->bind(bind_addr);
server_msgr->add_dispatcher_head(&srv_dispatcher);
server_msgr->start();
client_msgr->add_dispatcher_head(&cli_dispatcher);
client_msgr->start();
// 1. A very large "front"(as well as "payload")
// Because a external message need to invade Messenger::decode_message,
// here we only use existing message class(MCommand)
ConnectionRef conn = client_msgr->connect_to(server_msgr->get_mytype(),
server_msgr->get_myaddrs());
{
uuid_d uuid;
uuid.generate_random();
vector<string> cmds;
string s("abcdefghijklmnopqrstuvwxyz");
for (int i = 0; i < 1024*30; i++)
cmds.push_back(s);
MCommand *m = new MCommand(uuid);
m->cmd = cmds;
conn->send_message(m);
std::unique_lock l{cli_dispatcher.lock};
cli_dispatcher.cond.wait_for(l, 500s, [&] { return cli_dispatcher.got_new; });
ASSERT_TRUE(cli_dispatcher.got_new);
cli_dispatcher.got_new = false;
}
// 2. A very large "data"
{
bufferlist bl;
string s("abcdefghijklmnopqrstuvwxyz");
for (int i = 0; i < 1024*30; i++)
bl.append(s);
MPing *m = new MPing();
m->set_data(bl);
conn->send_message(m);
utime_t t;
t += 1000*1000*500;
std::unique_lock l{cli_dispatcher.lock};
cli_dispatcher.cond.wait(l, [&] { return cli_dispatcher.got_new; });
ASSERT_TRUE(cli_dispatcher.got_new);
cli_dispatcher.got_new = false;
}
server_msgr->shutdown();
client_msgr->shutdown();
server_msgr->wait();
client_msgr->wait();
}
class SyntheticWorkload;
struct Payload {
enum Who : uint8_t {
PING = 0,
PONG = 1,
};
uint8_t who = 0;
uint64_t seq = 0;
bufferlist data;
Payload(Who who, uint64_t seq, const bufferlist& data)
: who(who), seq(seq), data(data)
{}
Payload() = default;
DENC(Payload, v, p) {
DENC_START(1, 1, p);
denc(v.who, p);
denc(v.seq, p);
denc(v.data, p);
DENC_FINISH(p);
}
};
WRITE_CLASS_DENC(Payload)
ostream& operator<<(ostream& out, const Payload &pl)
{
return out << "reply=" << pl.who << " i = " << pl.seq;
}
class SyntheticDispatcher : public Dispatcher {
public:
ceph::mutex lock = ceph::make_mutex("SyntheticDispatcher::lock");
ceph::condition_variable cond;
bool is_server;
bool got_new;
bool got_remote_reset;
bool got_connect;
map<ConnectionRef, list<uint64_t> > conn_sent;
map<uint64_t, bufferlist> sent;
atomic<uint64_t> index;
SyntheticWorkload *workload;
SyntheticDispatcher(bool s, SyntheticWorkload *wl):
Dispatcher(g_ceph_context), is_server(s), got_new(false),
got_remote_reset(false), got_connect(false), index(0), workload(wl) {
}
bool ms_can_fast_dispatch_any() const override { return true; }
bool ms_can_fast_dispatch(const Message *m) const override {
switch (m->get_type()) {
case CEPH_MSG_PING:
case MSG_COMMAND:
return true;
default:
return false;
}
}
void ms_handle_fast_connect(Connection *con) override {
std::lock_guard l{lock};
list<uint64_t> c = conn_sent[con];
for (list<uint64_t>::iterator it = c.begin();
it != c.end(); ++it)
sent.erase(*it);
conn_sent.erase(con);
got_connect = true;
cond.notify_all();
}
void ms_handle_fast_accept(Connection *con) override {
std::lock_guard l{lock};
list<uint64_t> c = conn_sent[con];
for (list<uint64_t>::iterator it = c.begin();
it != c.end(); ++it)
sent.erase(*it);
conn_sent.erase(con);
cond.notify_all();
}
bool ms_dispatch(Message *m) override {
ceph_abort();
}
bool ms_handle_reset(Connection *con) override;
void ms_handle_remote_reset(Connection *con) override {
std::lock_guard l{lock};
list<uint64_t> c = conn_sent[con];
for (list<uint64_t>::iterator it = c.begin();
it != c.end(); ++it)
sent.erase(*it);
conn_sent.erase(con);
got_remote_reset = true;
}
bool ms_handle_refused(Connection *con) override {
return false;
}
void ms_fast_dispatch(Message *m) override {
// MSG_COMMAND is used to disorganize regular message flow
if (m->get_type() == MSG_COMMAND) {
m->put();
return ;
}
Payload pl;
auto p = m->get_data().cbegin();
decode(pl, p);
if (pl.who == Payload::PING) {
lderr(g_ceph_context) << __func__ << " conn=" << m->get_connection() << pl << dendl;
reply_message(m, pl);
m->put();
std::lock_guard l{lock};
got_new = true;
cond.notify_all();
} else {
std::lock_guard l{lock};
if (sent.count(pl.seq)) {
lderr(g_ceph_context) << __func__ << " conn=" << m->get_connection() << pl << dendl;
ASSERT_EQ(conn_sent[m->get_connection()].front(), pl.seq);
ASSERT_TRUE(pl.data.contents_equal(sent[pl.seq]));
conn_sent[m->get_connection()].pop_front();
sent.erase(pl.seq);
}
m->put();
got_new = true;
cond.notify_all();
}
}
int ms_handle_authentication(Connection *con) override {
return 1;
}
void reply_message(const Message *m, Payload& pl) {
pl.who = Payload::PONG;
bufferlist bl;
encode(pl, bl);
MPing *rm = new MPing();
rm->set_data(bl);
m->get_connection()->send_message(rm);
lderr(g_ceph_context) << __func__ << " conn=" << m->get_connection() << " reply m=" << m << " i=" << pl.seq << dendl;
}
void send_message_wrap(ConnectionRef con, const bufferlist& data) {
Message *m = new MPing();
Payload pl{Payload::PING, index++, data};
bufferlist bl;
encode(pl, bl);
m->set_data(bl);
if (!con->get_messenger()->get_default_policy().lossy) {
std::lock_guard l{lock};
sent[pl.seq] = pl.data;
conn_sent[con].push_back(pl.seq);
}
lderr(g_ceph_context) << __func__ << " conn=" << con.get() << " send m=" << m << " i=" << pl.seq << dendl;
ASSERT_EQ(0, con->send_message(m));
}
uint64_t get_pending() {
std::lock_guard l{lock};
return sent.size();
}
void clear_pending(ConnectionRef con) {
std::lock_guard l{lock};
for (list<uint64_t>::iterator it = conn_sent[con].begin();
it != conn_sent[con].end(); ++it)
sent.erase(*it);
conn_sent.erase(con);
}
void print() {
for (auto && p : conn_sent) {
if (!p.second.empty()) {
lderr(g_ceph_context) << __func__ << " " << p.first << " wait " << p.second.size() << dendl;
}
}
}
};
class SyntheticWorkload {
ceph::mutex lock = ceph::make_mutex("SyntheticWorkload::lock");
ceph::condition_variable cond;
set<Messenger*> available_servers;
set<Messenger*> available_clients;
Messenger::Policy client_policy;
map<ConnectionRef, pair<Messenger*, Messenger*> > available_connections;
SyntheticDispatcher dispatcher;
gen_type rng;
vector<bufferlist> rand_data;
DummyAuthClientServer dummy_auth;
public:
const unsigned max_in_flight = 0;
const unsigned max_connections = 0;
static const unsigned max_message_len = 1024 * 1024 * 4;
SyntheticWorkload(int servers, int clients, string type, int random_num,
Messenger::Policy srv_policy, Messenger::Policy cli_policy,
int _max_in_flight = 64, int _max_connections = 128)
: client_policy(cli_policy),
dispatcher(false, this),
rng(time(NULL)),
dummy_auth(g_ceph_context),
max_in_flight(_max_in_flight),
max_connections(_max_connections) {
dummy_auth.auth_registry.refresh_config();
Messenger *msgr;
int base_port = 16800;
entity_addr_t bind_addr;
char addr[64];
for (int i = 0; i < servers; ++i) {
msgr = Messenger::create(g_ceph_context, type, entity_name_t::OSD(0),
"server", getpid()+i);
snprintf(addr, sizeof(addr), "v2:127.0.0.1:%d",
base_port+i);
bind_addr.parse(addr);
msgr->bind(bind_addr);
msgr->add_dispatcher_head(&dispatcher);
msgr->set_auth_client(&dummy_auth);
msgr->set_auth_server(&dummy_auth);
ceph_assert(msgr);
msgr->set_default_policy(srv_policy);
available_servers.insert(msgr);
msgr->start();
}
for (int i = 0; i < clients; ++i) {
msgr = Messenger::create(g_ceph_context, type, entity_name_t::CLIENT(-1),
"client", getpid()+i+servers);
if (cli_policy.standby) {
snprintf(addr, sizeof(addr), "v2:127.0.0.1:%d",
base_port+i+servers);
bind_addr.parse(addr);
msgr->bind(bind_addr);
}
msgr->add_dispatcher_head(&dispatcher);
msgr->set_auth_client(&dummy_auth);
msgr->set_auth_server(&dummy_auth);
ceph_assert(msgr);
msgr->set_default_policy(cli_policy);
available_clients.insert(msgr);
msgr->start();
}
for (int i = 0; i < random_num; i++) {
bufferlist bl;
boost::uniform_int<> u(32, max_message_len);
uint64_t value_len = u(rng);
bufferptr bp(value_len);
bp.zero();
for (uint64_t j = 0; j < value_len-sizeof(i); ) {
memcpy(bp.c_str()+j, &i, sizeof(i));
j += 4096;
}
bl.append(bp);
rand_data.push_back(bl);
}
}
ConnectionRef _get_random_connection() {
while (dispatcher.get_pending() > max_in_flight) {
lock.unlock();
usleep(500);
lock.lock();
}
ceph_assert(ceph_mutex_is_locked(lock));
boost::uniform_int<> choose(0, available_connections.size() - 1);
int index = choose(rng);
map<ConnectionRef, pair<Messenger*, Messenger*> >::iterator i = available_connections.begin();
for (; index > 0; --index, ++i) ;
return i->first;
}
bool can_create_connection() {
return available_connections.size() < max_connections;
}
void generate_connection() {
std::lock_guard l{lock};
if (!can_create_connection())
return ;
Messenger *server, *client;
{
boost::uniform_int<> choose(0, available_servers.size() - 1);
int index = choose(rng);
set<Messenger*>::iterator i = available_servers.begin();
for (; index > 0; --index, ++i) ;
server = *i;
}
{
boost::uniform_int<> choose(0, available_clients.size() - 1);
int index = choose(rng);
set<Messenger*>::iterator i = available_clients.begin();
for (; index > 0; --index, ++i) ;
client = *i;
}
pair<Messenger*, Messenger*> p;
{
boost::uniform_int<> choose(0, available_servers.size() - 1);
if (server->get_default_policy().server) {
p = make_pair(client, server);
ConnectionRef conn = client->connect_to(server->get_mytype(),
server->get_myaddrs());
available_connections[conn] = p;
} else {
ConnectionRef conn = client->connect_to(server->get_mytype(),
server->get_myaddrs());
p = make_pair(client, server);
available_connections[conn] = p;
}
}
}
void send_message() {
std::lock_guard l{lock};
ConnectionRef conn = _get_random_connection();
boost::uniform_int<> true_false(0, 99);
int val = true_false(rng);
if (val >= 95) {
uuid_d uuid;
uuid.generate_random();
MCommand *m = new MCommand(uuid);
vector<string> cmds;
cmds.push_back("command");
m->cmd = cmds;
m->set_priority(200);
conn->send_message(m);
} else {
boost::uniform_int<> u(0, rand_data.size()-1);
dispatcher.send_message_wrap(conn, rand_data[u(rng)]);
}
}
void send_large_message(bool inject_network_congestion=false) {
std::lock_guard l{lock};
ConnectionRef conn = _get_random_connection();
uuid_d uuid;
uuid.generate_random();
MCommand *m = new MCommand(uuid);
vector<string> cmds;
cmds.push_back("command");
// set the random data to make the large message
bufferlist bl;
string s("abcdefghijklmnopqrstuvwxyz");
for (int i = 0; i < 1024*256; i++)
bl.append(s);
// bl is around 6M
m->set_data(bl);
m->cmd = cmds;
m->set_priority(200);
// setup after connection is ready
if (inject_network_congestion && conn->is_connected()) {
g_ceph_context->_conf.set_val("ms_inject_network_congestion", "100");
} else {
g_ceph_context->_conf.set_val("ms_inject_network_congestion", "0");
}
conn->send_message(m);
}
void drop_connection() {
std::lock_guard l{lock};
if (available_connections.size() < 10)
return;
ConnectionRef conn = _get_random_connection();
dispatcher.clear_pending(conn);
conn->mark_down();
if (!client_policy.server &&
!client_policy.lossy &&
client_policy.standby) {
// it's a lossless policy, so we need to mark down each side
pair<Messenger*, Messenger*> &p = available_connections[conn];
if (!p.first->get_default_policy().server && !p.second->get_default_policy().server) {
ASSERT_EQ(conn->get_messenger(), p.first);
ConnectionRef peer = p.second->connect_to(p.first->get_mytype(),
p.first->get_myaddrs());
peer->mark_down();
dispatcher.clear_pending(peer);
available_connections.erase(peer);
}
}
ASSERT_EQ(available_connections.erase(conn), 1U);
}
void print_internal_state(bool detail=false) {
std::lock_guard l{lock};
lderr(g_ceph_context) << "available_connections: " << available_connections.size()
<< " inflight messages: " << dispatcher.get_pending() << dendl;
if (detail && !available_connections.empty()) {
dispatcher.print();
}
}
void wait_for_done() {
int64_t tick_us = 1000 * 100; // 100ms
int64_t timeout_us = 5 * 60 * 1000 * 1000; // 5 mins
int i = 0;
while (dispatcher.get_pending()) {
usleep(tick_us);
timeout_us -= tick_us;
if (i++ % 50 == 0)
print_internal_state(true);
if (timeout_us < 0)
ceph_abort_msg(" loop time exceed 5 mins, it looks we stuck into some problems!");
}
for (set<Messenger*>::iterator it = available_servers.begin();
it != available_servers.end(); ++it) {
(*it)->shutdown();
(*it)->wait();
ASSERT_EQ((*it)->get_dispatch_queue_len(), 0);
delete (*it);
}
available_servers.clear();
for (set<Messenger*>::iterator it = available_clients.begin();
it != available_clients.end(); ++it) {
(*it)->shutdown();
(*it)->wait();
ASSERT_EQ((*it)->get_dispatch_queue_len(), 0);
delete (*it);
}
available_clients.clear();
}
void handle_reset(Connection *con) {
std::lock_guard l{lock};
available_connections.erase(con);
dispatcher.clear_pending(con);
}
};
bool SyntheticDispatcher::ms_handle_reset(Connection *con) {
workload->handle_reset(con);
return true;
}
TEST_P(MessengerTest, SyntheticStressTest) {
SyntheticWorkload test_msg(8, 32, GetParam(), 100,
Messenger::Policy::stateful_server(0),
Messenger::Policy::lossless_client(0));
for (int i = 0; i < 100; ++i) {
if (!(i % 10)) lderr(g_ceph_context) << "seeding connection " << i << dendl;
test_msg.generate_connection();
}
gen_type rng(time(NULL));
for (int i = 0; i < 5000; ++i) {
if (!(i % 10)) {
lderr(g_ceph_context) << "Op " << i << ": " << dendl;
test_msg.print_internal_state();
}
boost::uniform_int<> true_false(0, 99);
int val = true_false(rng);
if (val > 90) {
test_msg.generate_connection();
} else if (val > 80) {
test_msg.drop_connection();
} else if (val > 10) {
test_msg.send_message();
} else {
usleep(rand() % 1000 + 500);
}
}
test_msg.wait_for_done();
}
TEST_P(MessengerTest, SyntheticStressTest1) {
SyntheticWorkload test_msg(16, 32, GetParam(), 100,
Messenger::Policy::lossless_peer_reuse(0),
Messenger::Policy::lossless_peer_reuse(0));
for (int i = 0; i < 10; ++i) {
if (!(i % 10)) lderr(g_ceph_context) << "seeding connection " << i << dendl;
test_msg.generate_connection();
}
gen_type rng(time(NULL));
for (int i = 0; i < 10000; ++i) {
if (!(i % 10)) {
lderr(g_ceph_context) << "Op " << i << ": " << dendl;
test_msg.print_internal_state();
}
boost::uniform_int<> true_false(0, 99);
int val = true_false(rng);
if (val > 80) {
test_msg.generate_connection();
} else if (val > 60) {
test_msg.drop_connection();
} else if (val > 10) {
test_msg.send_message();
} else {
usleep(rand() % 1000 + 500);
}
}
test_msg.wait_for_done();
}
TEST_P(MessengerTest, SyntheticInjectTest) {
uint64_t dispatch_throttle_bytes = g_ceph_context->_conf->ms_dispatch_throttle_bytes;
g_ceph_context->_conf.set_val("ms_inject_socket_failures", "30");
g_ceph_context->_conf.set_val("ms_inject_internal_delays", "0.1");
g_ceph_context->_conf.set_val("ms_dispatch_throttle_bytes", "16777216");
SyntheticWorkload test_msg(8, 32, GetParam(), 100,
Messenger::Policy::stateful_server(0),
Messenger::Policy::lossless_client(0));
for (int i = 0; i < 100; ++i) {
if (!(i % 10)) lderr(g_ceph_context) << "seeding connection " << i << dendl;
test_msg.generate_connection();
}
gen_type rng(time(NULL));
for (int i = 0; i < 1000; ++i) {
if (!(i % 10)) {
lderr(g_ceph_context) << "Op " << i << ": " << dendl;
test_msg.print_internal_state();
}
boost::uniform_int<> true_false(0, 99);
int val = true_false(rng);
if (val > 90) {
test_msg.generate_connection();
} else if (val > 80) {
test_msg.drop_connection();
} else if (val > 10) {
test_msg.send_message();
} else {
usleep(rand() % 500 + 100);
}
}
test_msg.wait_for_done();
g_ceph_context->_conf.set_val("ms_inject_socket_failures", "0");
g_ceph_context->_conf.set_val("ms_inject_internal_delays", "0");
g_ceph_context->_conf.set_val(
"ms_dispatch_throttle_bytes", std::to_string(dispatch_throttle_bytes));
}
TEST_P(MessengerTest, SyntheticInjectTest2) {
g_ceph_context->_conf.set_val("ms_inject_socket_failures", "30");
g_ceph_context->_conf.set_val("ms_inject_internal_delays", "0.1");
SyntheticWorkload test_msg(8, 16, GetParam(), 100,
Messenger::Policy::lossless_peer_reuse(0),
Messenger::Policy::lossless_peer_reuse(0));
for (int i = 0; i < 100; ++i) {
if (!(i % 10)) lderr(g_ceph_context) << "seeding connection " << i << dendl;
test_msg.generate_connection();
}
gen_type rng(time(NULL));
for (int i = 0; i < 1000; ++i) {
if (!(i % 10)) {
lderr(g_ceph_context) << "Op " << i << ": " << dendl;
test_msg.print_internal_state();
}
boost::uniform_int<> true_false(0, 99);
int val = true_false(rng);
if (val > 90) {
test_msg.generate_connection();
} else if (val > 80) {
test_msg.drop_connection();
} else if (val > 10) {
test_msg.send_message();
} else {
usleep(rand() % 500 + 100);
}
}
test_msg.wait_for_done();
g_ceph_context->_conf.set_val("ms_inject_socket_failures", "0");
g_ceph_context->_conf.set_val("ms_inject_internal_delays", "0");
}
TEST_P(MessengerTest, SyntheticInjectTest3) {
g_ceph_context->_conf.set_val("ms_inject_socket_failures", "600");
g_ceph_context->_conf.set_val("ms_inject_internal_delays", "0.1");
SyntheticWorkload test_msg(8, 16, GetParam(), 100,
Messenger::Policy::stateless_server(0),
Messenger::Policy::lossy_client(0));
for (int i = 0; i < 100; ++i) {
if (!(i % 10)) lderr(g_ceph_context) << "seeding connection " << i << dendl;
test_msg.generate_connection();
}
gen_type rng(time(NULL));
for (int i = 0; i < 1000; ++i) {
if (!(i % 10)) {
lderr(g_ceph_context) << "Op " << i << ": " << dendl;
test_msg.print_internal_state();
}
boost::uniform_int<> true_false(0, 99);
int val = true_false(rng);
if (val > 90) {
test_msg.generate_connection();
} else if (val > 80) {
test_msg.drop_connection();
} else if (val > 10) {
test_msg.send_message();
} else {
usleep(rand() % 500 + 100);
}
}
test_msg.wait_for_done();
g_ceph_context->_conf.set_val("ms_inject_socket_failures", "0");
g_ceph_context->_conf.set_val("ms_inject_internal_delays", "0");
}
TEST_P(MessengerTest, SyntheticInjectTest4) {
g_ceph_context->_conf.set_val("ms_inject_socket_failures", "30");
g_ceph_context->_conf.set_val("ms_inject_internal_delays", "0.1");
g_ceph_context->_conf.set_val("ms_inject_delay_probability", "1");
g_ceph_context->_conf.set_val("ms_inject_delay_type", "client osd");
g_ceph_context->_conf.set_val("ms_inject_delay_max", "5");
SyntheticWorkload test_msg(16, 32, GetParam(), 100,
Messenger::Policy::lossless_peer(0),
Messenger::Policy::lossless_peer(0));
for (int i = 0; i < 100; ++i) {
if (!(i % 10)) lderr(g_ceph_context) << "seeding connection " << i << dendl;
test_msg.generate_connection();
}
gen_type rng(time(NULL));
for (int i = 0; i < 1000; ++i) {
if (!(i % 10)) {
lderr(g_ceph_context) << "Op " << i << ": " << dendl;
test_msg.print_internal_state();
}
boost::uniform_int<> true_false(0, 99);
int val = true_false(rng);
if (val > 95) {
test_msg.generate_connection();
} else if (val > 80) {
// test_msg.drop_connection();
} else if (val > 10) {
test_msg.send_message();
} else {
usleep(rand() % 500 + 100);
}
}
test_msg.wait_for_done();
g_ceph_context->_conf.set_val("ms_inject_socket_failures", "0");
g_ceph_context->_conf.set_val("ms_inject_internal_delays", "0");
g_ceph_context->_conf.set_val("ms_inject_delay_probability", "0");
g_ceph_context->_conf.set_val("ms_inject_delay_type", "");
g_ceph_context->_conf.set_val("ms_inject_delay_max", "0");
}
// This is test for network block, means ::send return EAGAIN
TEST_P(MessengerTest, SyntheticInjectTest5) {
SyntheticWorkload test_msg(1, 8, GetParam(), 100,
Messenger::Policy::stateful_server(0),
Messenger::Policy::lossless_client(0),
64, 2);
bool simulate_network_congestion = true;
for (int i = 0; i < 2; ++i)
test_msg.generate_connection();
for (int i = 0; i < 5000; ++i) {
if (!(i % 10)) {
ldout(g_ceph_context, 0) << "Op " << i << ": " << dendl;
test_msg.print_internal_state();
}
if (i < 1600) {
// means that we would stuck 1600 * 6M (9.6G) around with 2 connections
test_msg.send_large_message(simulate_network_congestion);
} else {
simulate_network_congestion = false;
test_msg.send_large_message(simulate_network_congestion);
}
}
test_msg.wait_for_done();
}
class MarkdownDispatcher : public Dispatcher {
ceph::mutex lock = ceph::make_mutex("MarkdownDispatcher::lock");
set<ConnectionRef> conns;
bool last_mark;
public:
std::atomic<uint64_t> count = { 0 };
explicit MarkdownDispatcher(bool s): Dispatcher(g_ceph_context),
last_mark(false) {
}
bool ms_can_fast_dispatch_any() const override { return false; }
bool ms_can_fast_dispatch(const Message *m) const override {
switch (m->get_type()) {
case CEPH_MSG_PING:
return true;
default:
return false;
}
}
void ms_handle_fast_connect(Connection *con) override {
lderr(g_ceph_context) << __func__ << " " << con << dendl;
std::lock_guard l{lock};
conns.insert(con);
}
void ms_handle_fast_accept(Connection *con) override {
std::lock_guard l{lock};
conns.insert(con);
}
bool ms_dispatch(Message *m) override {
lderr(g_ceph_context) << __func__ << " conn: " << m->get_connection() << dendl;
std::lock_guard l{lock};
count++;
conns.insert(m->get_connection());
if (conns.size() < 2 && !last_mark) {
m->put();
return true;
}
last_mark = true;
usleep(rand() % 500);
for (set<ConnectionRef>::iterator it = conns.begin(); it != conns.end(); ++it) {
if ((*it) != m->get_connection().get()) {
(*it)->mark_down();
conns.erase(it);
break;
}
}
if (conns.empty())
last_mark = false;
m->put();
return true;
}
bool ms_handle_reset(Connection *con) override {
lderr(g_ceph_context) << __func__ << " " << con << dendl;
std::lock_guard l{lock};
conns.erase(con);
usleep(rand() % 500);
return true;
}
void ms_handle_remote_reset(Connection *con) override {
std::lock_guard l{lock};
conns.erase(con);
lderr(g_ceph_context) << __func__ << " " << con << dendl;
}
bool ms_handle_refused(Connection *con) override {
return false;
}
void ms_fast_dispatch(Message *m) override {
ceph_abort();
}
int ms_handle_authentication(Connection *con) override {
return 1;
}
};
// Markdown with external lock
TEST_P(MessengerTest, MarkdownTest) {
Messenger *server_msgr2 = Messenger::create(g_ceph_context, string(GetParam()), entity_name_t::OSD(0), "server", getpid());
MarkdownDispatcher cli_dispatcher(false), srv_dispatcher(true);
DummyAuthClientServer dummy_auth(g_ceph_context);
dummy_auth.auth_registry.refresh_config();
entity_addr_t bind_addr;
bind_addr.parse("v2:127.0.0.1:16800");
server_msgr->bind(bind_addr);
server_msgr->add_dispatcher_head(&srv_dispatcher);
server_msgr->set_auth_client(&dummy_auth);
server_msgr->set_auth_server(&dummy_auth);
server_msgr->start();
bind_addr.parse("v2:127.0.0.1:16801");
server_msgr2->bind(bind_addr);
server_msgr2->add_dispatcher_head(&srv_dispatcher);
server_msgr2->set_auth_client(&dummy_auth);
server_msgr2->set_auth_server(&dummy_auth);
server_msgr2->start();
client_msgr->add_dispatcher_head(&cli_dispatcher);
client_msgr->set_auth_client(&dummy_auth);
client_msgr->set_auth_server(&dummy_auth);
client_msgr->start();
int i = 1000;
uint64_t last = 0;
bool equal = false;
uint64_t equal_count = 0;
while (i--) {
ConnectionRef conn1 = client_msgr->connect_to(server_msgr->get_mytype(),
server_msgr->get_myaddrs());
ConnectionRef conn2 = client_msgr->connect_to(server_msgr2->get_mytype(),
server_msgr2->get_myaddrs());
MPing *m = new MPing();
ASSERT_EQ(conn1->send_message(m), 0);
m = new MPing();
ASSERT_EQ(conn2->send_message(m), 0);
CHECK_AND_WAIT_TRUE(srv_dispatcher.count > last + 1);
if (srv_dispatcher.count == last) {
lderr(g_ceph_context) << __func__ << " last is " << last << dendl;
equal = true;
equal_count++;
} else {
equal = false;
equal_count = 0;
}
last = srv_dispatcher.count;
if (equal_count)
usleep(1000*500);
ASSERT_FALSE(equal && equal_count > 3);
}
server_msgr->shutdown();
client_msgr->shutdown();
server_msgr2->shutdown();
server_msgr->wait();
client_msgr->wait();
server_msgr2->wait();
delete server_msgr2;
}
INSTANTIATE_TEST_SUITE_P(
Messenger,
MessengerTest,
::testing::Values(
"async+posix"
)
);
int main(int argc, char **argv) {
auto args = argv_to_vec(argc, argv);
auto cct = global_init(NULL, args, CEPH_ENTITY_TYPE_CLIENT,
CODE_ENVIRONMENT_UTILITY,
CINIT_FLAG_NO_DEFAULT_CONFIG_FILE);
g_ceph_context->_conf.set_val("auth_cluster_required", "none");
g_ceph_context->_conf.set_val("auth_service_required", "none");
g_ceph_context->_conf.set_val("auth_client_required", "none");
g_ceph_context->_conf.set_val("keyring", "/dev/null");
g_ceph_context->_conf.set_val("enable_experimental_unrecoverable_data_corrupting_features", "ms-type-async");
g_ceph_context->_conf.set_val("ms_die_on_bad_msg", "true");
g_ceph_context->_conf.set_val("ms_die_on_old_message", "true");
g_ceph_context->_conf.set_val("ms_max_backoff", "1");
common_init_finish(g_ceph_context);
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}
/*
* Local Variables:
* compile-command: "cd ../.. ; make -j4 ceph_test_msgr && valgrind --tool=memcheck ./ceph_test_msgr"
* End:
*/
| 79,934 | 31.962887 | 129 |
cc
|
null |
ceph-main/src/test/msgr/test_userspace_event.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2016 XSky <[email protected]>
*
* Author: Haomai Wang <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include <map>
#include <random>
#include <gtest/gtest.h>
#include "msg/async/dpdk/UserspaceEvent.h"
#include "global/global_context.h"
class UserspaceManagerTest : public ::testing::Test {
public:
UserspaceEventManager *manager;
UserspaceManagerTest() {}
virtual void SetUp() {
manager = new UserspaceEventManager(g_ceph_context);
}
virtual void TearDown() {
delete manager;
}
};
TEST_F(UserspaceManagerTest, BasicTest) {
int events[10];
int masks[10];
int fd = manager->get_eventfd();
ASSERT_EQ(fd, 1);
ASSERT_EQ(0, manager->listen(fd, 1));
ASSERT_EQ(0, manager->notify(fd, 1));
ASSERT_EQ(1, manager->poll(events, masks, 10, nullptr));
ASSERT_EQ(fd, events[0]);
ASSERT_EQ(1, masks[0]);
ASSERT_EQ(0, manager->notify(fd, 2));
ASSERT_EQ(0, manager->poll(events, masks, 10, nullptr));
ASSERT_EQ(0, manager->unlisten(fd, 1));
ASSERT_EQ(0, manager->notify(fd, 1));
ASSERT_EQ(0, manager->poll(events, masks, 10, nullptr));
manager->close(fd);
fd = manager->get_eventfd();
ASSERT_EQ(fd, 1);
ASSERT_EQ(0, manager->poll(events, masks, 10, nullptr));
}
TEST_F(UserspaceManagerTest, FailTest) {
int events[10];
int masks[10];
int fd = manager->get_eventfd();
ASSERT_EQ(fd, 1);
ASSERT_EQ(-ENOENT, manager->listen(fd+1, 1));
ASSERT_EQ(-ENOENT, manager->notify(fd+1, 1));
ASSERT_EQ(0, manager->poll(events, masks, 10, nullptr));
ASSERT_EQ(-ENOENT, manager->unlisten(fd+1, 1));
manager->close(fd);
}
TEST_F(UserspaceManagerTest, StressTest) {
std::vector<std::pair<int, int> > mappings;
int events[10];
int masks[10];
std::random_device rd;
std::default_random_engine rng(rd());
std::uniform_int_distribution<> dist(0, 100);
mappings.resize(1001);
mappings[0] = std::make_pair(-1, -1);
for (int i = 0; i < 1000; ++i) {
int fd = manager->get_eventfd();
ASSERT_TRUE(fd > 0);
mappings[fd] = std::make_pair(0, 0);
}
int r = 0;
int fd = manager->get_eventfd();
auto get_activate_count = [](std::vector<std::pair<int, int> > &m) {
std::vector<int> fds;
int mask = 0;
size_t idx = 0;
for (auto &&p : m) {
mask = p.first & p.second;
if (p.first != -1 && mask) {
p.second &= (~mask);
fds.push_back(idx);
std::cerr << " activate " << idx << " mask " << mask << std::endl;
}
++idx;
}
return fds;
};
for (int i = 0; i < 10000; ++i) {
int value = dist(rng);
fd = dist(rng) % mappings.size();
auto &p = mappings[fd];
int mask = dist(rng) % 2 + 1;
if (value > 55) {
r = manager->notify(fd, mask);
if (p.first == -1) {
ASSERT_EQ(p.second, -1);
ASSERT_EQ(r, -ENOENT);
} else {
p.second |= mask;
ASSERT_EQ(r, 0);
}
std::cerr << " notify fd " << fd << " mask " << mask << " r " << r << std::endl;
} else if (value > 45) {
r = manager->listen(fd, mask);
std::cerr << " listen fd " << fd << " mask " << mask << " r " << r << std::endl;
if (p.first == -1) {
ASSERT_EQ(p.second, -1);
ASSERT_EQ(r, -ENOENT);
} else {
p.first |= mask;
ASSERT_EQ(r, 0);
}
} else if (value > 35) {
r = manager->unlisten(fd, mask);
std::cerr << " unlisten fd " << fd << " mask " << mask << " r " << r << std::endl;
if (p.first == -1) {
ASSERT_EQ(p.second, -1);
ASSERT_EQ(r, -ENOENT);
} else {
p.first &= ~mask;
ASSERT_EQ(r, 0);
}
} else if (value > 20) {
std::set<int> actual, expected;
do {
r = manager->poll(events, masks, 3, nullptr);
std::cerr << " poll " << r;
for (int k = 0; k < r; ++k) {
std::cerr << events[k] << " ";
actual.insert(events[k]);
}
} while (r == 3);
std::cerr << std::endl;
auto fds = get_activate_count(mappings);
for (auto &&d : fds)
expected.insert(d);
ASSERT_EQ(expected, actual);
} else if (value > 10) {
r = manager->get_eventfd();
std::cerr << " open fd " << r << std::endl;
ASSERT_TRUE(r > 0);
if ((size_t)r >= mappings.size())
mappings.resize(r+1);
mappings[r] = std::make_pair(0, 0);
} else {
manager->close(fd);
std::cerr << " close fd " << fd << std::endl;
mappings[fd] = std::make_pair(-1, -1);
}
ASSERT_TRUE(manager->check());
}
}
/*
* Local Variables:
* compile-command: "cd ../.. ; make ceph_test_userspace_event &&
* ./ceph_test_userspace_event.cc
*
* End:
*/
| 5,005 | 27.605714 | 88 |
cc
|
null |
ceph-main/src/test/neorados/common_tests.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2020 Red Hat, Inc.
* Author: Adam C. Emerson <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*/
#include <cstring>
#include <string>
#include <string_view>
#include <boost/asio/ip/host_name.hpp>
#include <fmt/format.h>
#include "common_tests.h"
#include "include/neorados/RADOS.hpp"
namespace ba = boost::asio;
namespace R = neorados;
std::string get_temp_pool_name(std::string_view prefix)
{
static auto hostname = ba::ip::host_name();
static auto num = 1ull;
return fmt::format("{}{}-{}-{}", prefix, hostname, getpid(), num++);
}
| 898 | 24.685714 | 70 |
cc
|
null |
ceph-main/src/test/neorados/common_tests.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2020 Red Hat, Inc.
* Author: Adam C. Emerson <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*/
#include <string>
#include <string_view>
#include "include/neorados/RADOS.hpp"
std::string get_temp_pool_name(std::string_view prefix = {});
template<typename CompletionToken>
auto create_pool(neorados::RADOS& r, std::string_view pname,
CompletionToken&& token)
{
boost::asio::async_completion<CompletionToken,
void(boost::system::error_code,
std::int64_t)> init(token);
r.create_pool(pname, std::nullopt,
[&r, pname = std::string(pname),
h = std::move(init.completion_handler)]
(boost::system::error_code ec) mutable {
r.lookup_pool(
pname,
[h = std::move(h)]
(boost::system::error_code ec, std::int64_t pool) mutable {
std::move(h)(ec, pool);
});
});
return init.result.get();
}
| 1,202 | 27.642857 | 70 |
h
|
null |
ceph-main/src/test/neorados/completions.cc
|
#include <cassert>
#include <boost/asio.hpp>
#include <boost/system/system_error.hpp>
constexpr int max_completions = 10'000'000;
int completed = 0;
boost::asio::io_context c;
void nested_cb() {
if (++completed < max_completions)
c.post(&nested_cb);
}
int main(void) {
c.post(&nested_cb);
c.run();
assert(completed == max_completions);
return 0;
}
| 366 | 16.47619 | 43 |
cc
|
null |
ceph-main/src/test/neorados/list_pool.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2019 Red Hat <[email protected]>
* Author: Adam C. Emerson <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include <iostream>
#include <initializer_list>
#include <optional>
#include <thread>
#include <tuple>
#include <string_view>
#include <vector>
#include <sys/param.h>
#include <unistd.h>
#include <boost/system/system_error.hpp>
#include <fmt/format.h>
#include "include/neorados/RADOS.hpp"
#include "include/scope_guard.h"
#include "common/async/context_pool.h"
#include "common/ceph_time.h"
#include "common/ceph_argparse.h"
#include "common/async/blocked_completion.h"
#include "global/global_init.h"
#include "test/neorados/common_tests.h"
namespace ba = boost::asio;
namespace bs = boost::system;
namespace ca = ceph::async;
namespace R = neorados;
std::string_view hostname() {
static char hostname[MAXHOSTNAMELEN] = { 0 };
static size_t len = 0;
if (!len) {
auto r = gethostname(hostname, sizeof(hostname));
if (r != 0) {
throw bs::system_error(
errno, bs::system_category());
}
len = std::strlen(hostname);
}
return {hostname, len};
}
std::string temp_pool_name(const std::string_view prefix)
{
using namespace std::chrono;
static std::uint64_t num = 1;
return fmt::format(
"{}-{}-{}-{}-{}",
prefix,
hostname(),
getpid(),
duration_cast<milliseconds>(ceph::coarse_real_clock::now()
.time_since_epoch()).count(),
num++);
}
bs::error_code noisy_list(R::RADOS& r, int64_t p)
{
auto b = R::Cursor::begin();
auto e = R::Cursor::end();
std::cout << "begin = " << b.to_str() << std::endl;
std::cout << "end = " << e.to_str() << std::endl;
try {
auto [v, next] = r.enumerate_objects(p, b, e, 1000, {}, ca::use_blocked,
R::all_nspaces);
std::cout << "Got " << v.size() << " entries." << std::endl;
std::cout << "next cursor = " << next.to_str() << std::endl;
std::cout << "next == end: " << (next == e) << std::endl;
std::cout << "Returned Objects: ";
std::cout << "[";
auto o = v.cbegin();
while (o != v.cend()) {
std::cout << *o;
if (++o != v.cend())
std::cout << " ";
}
std::cout << "]" << std::endl;
} catch (const bs::system_error& e) {
std::cerr << "RADOS::enumerate_objects: " << e.what() << std::endl;
return e.code();
}
return {};
}
bs::error_code create_several(R::RADOS& r, const R::IOContext& i,
std::initializer_list<std::string> l)
{
for (const auto& o : l) try {
R::WriteOp op;
std::cout << "Creating " << o << std::endl;
ceph::bufferlist bl;
bl.append("My bologna has no name.");
op.write_full(std::move(bl));
r.execute(o, i, std::move(op), ca::use_blocked);
} catch (const bs::system_error& e) {
std::cerr << "RADOS::execute: " << e.what() << std::endl;
return e.code();
}
return {};
}
int main(int argc, char** argv)
{
using namespace std::literals;
auto args = argv_to_vec(argc, argv);
env_to_vec(args);
auto cct = global_init(NULL, args, CEPH_ENTITY_TYPE_CLIENT,
CODE_ENVIRONMENT_UTILITY, 0);
common_init_finish(cct.get());
try {
ca::io_context_pool p(1);
auto r = R::RADOS::make_with_cct(cct.get(), p, ca::use_blocked);
auto pool_name = get_temp_pool_name("ceph_test_RADOS_list_pool"sv);
r.create_pool(pool_name, std::nullopt, ca::use_blocked);
auto pd = make_scope_guard(
[&pool_name, &r]() {
r.delete_pool(pool_name, ca::use_blocked);
});
auto pool = r.lookup_pool(pool_name, ca::use_blocked);
R::IOContext i(pool);
if (noisy_list(r, pool)) {
return 1;
}
if (create_several(r, i, {"meow", "woof", "squeak"})) {
return 1;
}
if (noisy_list(r, pool)) {
return 1;
}
} catch (const bs::system_error& e) {
std::cerr << "Error: " << e.what() << std::endl;
return 1;
}
return 0;
}
| 4,276 | 24.610778 | 76 |
cc
|
null |
ceph-main/src/test/neorados/op_speed.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2019 Red Hat <[email protected]>
* Author: Adam C. Emerson <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include "include/neorados/RADOS.hpp"
constexpr int to_create = 10'000'000;
int main() {
for (int i = 0; i < to_create; ++i) {
neorados::ReadOp op;
bufferlist bl;
std::uint64_t sz;
ceph::real_time tm;
boost::container::flat_map<std::string, ceph::buffer::list> xattrs;
boost::container::flat_map<std::string, ceph::buffer::list> omap;
bool trunc;
op.read(0, 0, &bl);
op.stat(&sz, &tm);
op.get_xattrs(&xattrs);
op.get_omap_vals(std::nullopt, std::nullopt, 1000, &omap, &trunc);
}
}
| 1,000 | 27.6 | 71 |
cc
|
null |
ceph-main/src/test/neorados/start_stop.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2019 Red Hat <[email protected]>
* Author: Adam C. Emerson <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include <thread>
#include <vector>
#include "include/neorados/RADOS.hpp"
#include "common/async/context_pool.h"
#include "common/ceph_argparse.h"
#include "global/global_init.h"
namespace R = neorados;
int main(int argc, char** argv)
{
using namespace std::literals;
auto args = argv_to_vec(argc, argv);
env_to_vec(args);
auto cct = global_init(nullptr, args, CEPH_ENTITY_TYPE_CLIENT,
CODE_ENVIRONMENT_UTILITY, 0);
common_init_finish(cct.get());
{
ceph::async::io_context_pool p(1);
auto r = R::RADOS::make_with_cct(cct.get(), p,
boost::asio::use_future).get();
std::this_thread::sleep_for(30s);
}
std::this_thread::sleep_for(30s);
{
ceph::async::io_context_pool p(1);
auto r = R::RADOS::make_with_cct(cct.get(), p,
boost::asio::use_future).get();
std::this_thread::sleep_for(30s);
}
{
ceph::async::io_context_pool p(1);
auto r = R::RADOS::make_with_cct(cct.get(), p,
boost::asio::use_future).get();
std::this_thread::sleep_for(1s);
}
{
ceph::async::io_context_pool p(1);
auto r = R::RADOS::make_with_cct(cct.get(), p,
boost::asio::use_future).get();
std::this_thread::sleep_for(1s);
}
{
ceph::async::io_context_pool p(1);
auto r = R::RADOS::make_with_cct(cct.get(), p,
boost::asio::use_future).get();
std::this_thread::sleep_for(1s);
}
{
ceph::async::io_context_pool p(1);
auto r = R::RADOS::make_with_cct(cct.get(), p,
boost::asio::use_future).get();
std::this_thread::sleep_for(1s);
}
{
ceph::async::io_context_pool p(1);
auto r = R::RADOS::make_with_cct(cct.get(), p,
boost::asio::use_future).get();
std::this_thread::sleep_for(1s);
}
{
ceph::async::io_context_pool p(1);
auto r = R::RADOS::make_with_cct(cct.get(), p,
boost::asio::use_future).get();
std::this_thread::sleep_for(1s);
}
{
ceph::async::io_context_pool p(1);
auto r = R::RADOS::make_with_cct(cct.get(), p,
boost::asio::use_future).get();
std::this_thread::sleep_for(1s);
}
{
ceph::async::io_context_pool p(1);
auto r = R::RADOS::make_with_cct(cct.get(), p,
boost::asio::use_future).get();
std::this_thread::sleep_for(500ms);
}
{
ceph::async::io_context_pool p(1);
auto r = R::RADOS::make_with_cct(cct.get(), p,
boost::asio::use_future).get();
std::this_thread::sleep_for(500ms);
}
{
ceph::async::io_context_pool p(1);
auto r = R::RADOS::make_with_cct(cct.get(), p,
boost::asio::use_future).get();
std::this_thread::sleep_for(50ms);
}
{
ceph::async::io_context_pool p(1);
auto r = R::RADOS::make_with_cct(cct.get(), p,
boost::asio::use_future).get();
std::this_thread::sleep_for(50ms);
}
{
ceph::async::io_context_pool p(1);
auto r = R::RADOS::make_with_cct(cct.get(), p,
boost::asio::use_future).get();
std::this_thread::sleep_for(50ms);
}
{
ceph::async::io_context_pool p(1);
auto r = R::RADOS::make_with_cct(cct.get(), p,
boost::asio::use_future).get();
std::this_thread::sleep_for(5ms);
}
{
ceph::async::io_context_pool p(1);
auto r = R::RADOS::make_with_cct(cct.get(), p,
boost::asio::use_future).get();
std::this_thread::sleep_for(5ms);
}
{
ceph::async::io_context_pool p(1);
auto r = R::RADOS::make_with_cct(cct.get(), p,
boost::asio::use_future).get();
std::this_thread::sleep_for(5ms);
}
{
ceph::async::io_context_pool p(1);
auto r = R::RADOS::make_with_cct(cct.get(), p,
boost::asio::use_future).get();
std::this_thread::sleep_for(5ms);
}
{
ceph::async::io_context_pool p(1);
auto r = R::RADOS::make_with_cct(cct.get(), p,
boost::asio::use_future).get();
std::this_thread::sleep_for(5ms);
}
{
ceph::async::io_context_pool p(1);
auto r = R::RADOS::make_with_cct(cct.get(), p,
boost::asio::use_future).get();
std::this_thread::sleep_for(5us);
}
{
ceph::async::io_context_pool p(1);
auto r = R::RADOS::make_with_cct(cct.get(), p,
boost::asio::use_future).get();
std::this_thread::sleep_for(5us);
}
{
ceph::async::io_context_pool p(1);
auto r = R::RADOS::make_with_cct(cct.get(), p,
boost::asio::use_future).get();
std::this_thread::sleep_for(5us);
}
return 0;
}
| 4,838 | 26.651429 | 70 |
cc
|
null |
ceph-main/src/test/neorados/test_neorados.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "include/rados/librados.hpp"
#include "include/neorados/RADOS.hpp"
#include "common/async/blocked_completion.h"
#include "test/librados/test_cxx.h"
#include "gtest/gtest.h"
#include <iostream>
namespace neorados {
class TestNeoRADOS : public ::testing::Test {
public:
TestNeoRADOS() {
}
};
TEST_F(TestNeoRADOS, MakeWithLibRADOS) {
librados::Rados paleo_rados;
auto result = connect_cluster_pp(paleo_rados);
ASSERT_EQ("", result);
auto rados = RADOS::make_with_librados(paleo_rados);
ReadOp op;
bufferlist bl;
op.read(0, 0, &bl);
// provide pool that doesn't exists -- just testing round-trip
ASSERT_THROW(
rados.execute({"dummy-obj"}, std::numeric_limits<int64_t>::max(),
std::move(op), nullptr, ceph::async::use_blocked),
boost::system::system_error);
}
} // namespace neorados
int main(int argc, char **argv) {
::testing::InitGoogleTest(&argc, argv);
int seed = getpid();
std::cout << "seed " << seed << std::endl;
srand(seed);
return RUN_ALL_TESTS();
}
| 1,132 | 22.604167 | 70 |
cc
|
null |
ceph-main/src/test/objectstore/Allocator_aging_fragmentation.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Bitmap allocator fragmentation benchmarks.
* Author: Adam Kupczyk, [email protected]
*/
#include <bit>
#include <iostream>
#include <boost/scoped_ptr.hpp>
#include <gtest/gtest.h>
#include <boost/random/triangle_distribution.hpp>
#include "common/ceph_mutex.h"
#include "common/Cond.h"
#include "common/errno.h"
#include "global/global_init.h"
#include "include/stringify.h"
#include "include/Context.h"
#include "os/bluestore/Allocator.h"
#include <boost/random/uniform_int.hpp>
typedef boost::mt11213b gen_type;
#include "common/debug.h"
#define dout_context cct
#define dout_subsys ceph_subsys_
struct Scenario {
uint64_t capacity;
uint64_t alloc_unit;
double high_mark;
double low_mark;
double leakness;
uint32_t repeats;
};
std::vector<Scenario> scenarios{
Scenario{512, 65536, 0.8, 0.6, 0.1, 3},
Scenario{512, 65536, 0.9, 0.7, 0.0, 3},
Scenario{512, 65536, 0.9, 0.7, 0.1, 3},
Scenario{512, 65536, 0.8, 0.6, 0.5, 3},
Scenario{512, 65536, 0.9, 0.7, 0.5, 3},
Scenario{1024, 65536, 0.8, 0.6, 0.1, 3},
Scenario{1024, 65536, 0.9, 0.7, 0.0, 3},
Scenario{1024, 65536, 0.9, 0.7, 0.1, 3},
Scenario{1024*2, 65536, 0.8, 0.6, 0.3, 3},
Scenario{1024*2, 65536, 0.9, 0.7, 0.0, 3},
Scenario{1024*2, 65536, 0.9, 0.7, 0.3, 3},
Scenario{512, 65536/16, 0.8, 0.6, 0.1, 3},
Scenario{512, 65536/16, 0.9, 0.7, 0.0, 3},
Scenario{512, 65536/16, 0.9, 0.7, 0.1, 3},
Scenario{512, 65536/16, 0.8, 0.6, 0.5, 3},
Scenario{512, 65536/16, 0.9, 0.7, 0.5, 3},
Scenario{1024, 65536/16, 0.8, 0.6, 0.1, 3},
Scenario{1024, 65536/16, 0.9, 0.7, 0.0, 3},
Scenario{1024, 65536/16, 0.9, 0.7, 0.1, 3},
Scenario{1024*2, 65536/16, 0.8, 0.6, 0.3, 3},
Scenario{1024*2, 65536/16, 0.9, 0.7, 0.0, 3},
Scenario{1024*2, 65536/16, 0.9, 0.7, 0.3, 3}
};
void PrintTo(const Scenario& s, ::std::ostream* os)
{
*os << "(capacity=" << s.capacity;
*os << "G, alloc_unit=" << s.alloc_unit;
*os << ", high_mark=" << s.high_mark;
*os << ", low_mark=" << s.low_mark;
*os << ", leakness=" << s.leakness;
*os << ", repeats=" << s.repeats << ")";
}
bool verbose = getenv("VERBOSE") != nullptr;
class AllocTracker;
class AllocTest : public ::testing::TestWithParam<std::string> {
protected:
boost::scoped_ptr<AllocTracker> at;
gen_type rng;
static boost::intrusive_ptr<CephContext> cct;
public:
boost::scoped_ptr<Allocator> alloc;
AllocTest(): alloc(nullptr) {}
void init_alloc(const std::string& alloc_name, int64_t size, uint64_t min_alloc_size);
void init_close();
void doAgingTest(std::function<uint32_t()> size_generator,
const std::string& alloc_name, uint64_t capacity, uint32_t alloc_unit,
uint64_t high_mark, uint64_t low_mark, uint32_t iterations, double leak_factor = 0);
uint64_t capacity;
uint32_t alloc_unit;
uint64_t level = 0;
uint64_t allocs = 0;
uint64_t fragmented = 0;
uint64_t fragments = 0;
uint64_t total_fragments = 0;
void do_fill(uint64_t high_mark, std::function<uint32_t()> size_generator, double leak_factor = 0);
void do_free(uint64_t low_mark);
uint32_t free_random();
void TearDown() final;
static void SetUpTestSuite();
static void TearDownTestSuite();
};
struct test_result {
uint64_t tests_cnt = 0;
double fragmented_percent = 0;
double fragments_count = 0;
double time = 0;
double frag_score = 0;
};
std::map<std::string, test_result> results_per_allocator;
const uint64_t _1m = 1024 * 1024;
const uint64_t _1G = 1024 * 1024 * 1024;
const uint64_t _2m = 2 * 1024 * 1024;
class AllocTracker
{
std::vector<bluestore_pextent_t> allocations;
uint64_t size = 0;
public:
bool push(uint64_t offs, uint32_t len)
{
assert(len != 0);
if (size + 1 > allocations.size())
allocations.resize(size + 100);
allocations[size++] = bluestore_pextent_t(offs, len);
return true;
}
bool pop_random(gen_type& rng, uint64_t* offs, uint32_t* len,
uint32_t max_len = 0)
{
if (size == 0)
return false;
uint64_t pos = rng() % size;
*len = allocations[pos].length;
*offs = allocations[pos].offset;
if (max_len && *len > max_len) {
allocations[pos].length = *len - max_len;
allocations[pos].offset = *offs + max_len;
*len = max_len;
} else {
allocations[pos] = allocations[size-1];
--size;
}
return true;
}
};
boost::intrusive_ptr<CephContext> AllocTest::cct;
void AllocTest::init_alloc(const std::string& allocator_name, int64_t size, uint64_t min_alloc_size) {
this->capacity = size;
this->alloc_unit = min_alloc_size;
rng.seed(0);
alloc.reset(Allocator::create(cct.get(), allocator_name, size,
min_alloc_size));
at.reset(new AllocTracker());
}
void AllocTest::init_close() {
alloc.reset(0);
at.reset(nullptr);
}
uint32_t AllocTest::free_random() {
uint64_t o = 0;
uint32_t l = 0;
interval_set<uint64_t> release_set;
if (!at->pop_random(rng, &o, &l)) {
//empty?
return 0;
}
release_set.insert(o, l);
alloc->release(release_set);
level -= l;
return l;
}
void AllocTest::do_fill(uint64_t high_mark, std::function<uint32_t()> size_generator, double leak_factor) {
assert (leak_factor >= 0);
assert (leak_factor < 1);
uint32_t leak_level = leak_factor * std::numeric_limits<uint32_t>::max();
PExtentVector tmp;
while (level < high_mark)
{
uint32_t want = size_generator();
tmp.clear();
auto r = alloc->allocate(want, alloc_unit, 0, 0, &tmp);
if (r < want) {
break;
}
level += r;
for(auto a : tmp) {
bool full = !at->push(a.offset, a.length);
EXPECT_EQ(full, false);
}
allocs++;
if (tmp.size() > 1) {
fragmented ++;
total_fragments += r;
fragments += tmp.size();
}
if (leak_level > 0) {
for (size_t i=0; i<tmp.size(); i++) {
if (uint32_t(rng()) < leak_level) {
free_random();
}
}
}
}
}
void AllocTest::do_free(uint64_t low_mark) {
while (level > low_mark)
{
if (free_random() == 0)
break;
}
}
void AllocTest::doAgingTest(
std::function<uint32_t()> size_generator,
const std::string& allocator_name,
uint64_t capacity, uint32_t alloc_unit,
uint64_t high_mark, uint64_t low_mark, uint32_t iterations, double leak_factor)
{
assert(std::has_single_bit(alloc_unit));
cct->_conf->bdev_block_size = alloc_unit;
PExtentVector allocated, tmp;
init_alloc(allocator_name, capacity, alloc_unit);
alloc->init_add_free(0, capacity);
utime_t start = ceph_clock_now();
level = 0;
allocs = 0;
fragmented = 0;
fragments = 0;
total_fragments = 0;
if (verbose) std::cout << "INITIAL FILL" << std::endl;
do_fill(high_mark, size_generator, leak_factor); //initial fill with data
if (verbose) std::cout << " fragmented allocs=" << 100.0 * fragmented / allocs << "%" <<
" #frags=" << ( fragmented != 0 ? double(fragments) / fragmented : 0 )<<
" time=" << (ceph_clock_now() - start) * 1000 << "ms" << std::endl;
for (uint32_t i=0; i < iterations; i++)
{
allocs = 0;
fragmented = 0;
fragments = 0;
total_fragments = 0;
uint64_t level_previous = level;
start = ceph_clock_now();
if (verbose) std::cout << "ADDING CAPACITY " << i + 1 << std::endl;
do_free(low_mark); //simulates adding new capacity to cluster
if (verbose) std::cout << " level change: " <<
double(level_previous) / capacity * 100 << "% -> " <<
double(level) / capacity * 100 << "% time=" <<
(ceph_clock_now() - start) * 1000 << "ms" << std::endl;
start = ceph_clock_now();
if (verbose) std::cout << "APPENDING " << i + 1 << std::endl;
do_fill(high_mark, size_generator, leak_factor); //only creating elements
if (verbose) std::cout << " fragmented allocs=" << 100.0 * fragmented / allocs << "%" <<
" #frags=" << ( fragmented != 0 ? double(fragments) / fragmented : 0 ) <<
" time=" << (ceph_clock_now() - start) * 1000 << "ms" << std::endl;
}
double frag_score = alloc->get_fragmentation_score();
do_free(0);
double free_frag_score = alloc->get_fragmentation_score();
ASSERT_EQ(alloc->get_free(), capacity);
std::cout << " fragmented allocs=" << 100.0 * fragmented / allocs << "%" <<
" #frags=" << ( fragmented != 0 ? double(fragments) / fragmented : 0 ) <<
" time=" << (ceph_clock_now() - start) * 1000 << "ms" <<
" frag.score=" << frag_score << " after free frag.score=" << free_frag_score << std::endl;
uint64_t sum = 0;
uint64_t cnt = 0;
auto list_free = [&](size_t off, size_t len) {
cnt++;
sum+=len;
};
alloc->dump(list_free);
ASSERT_EQ(sum, capacity);
if (verbose)
std::cout << "free chunks sum=" << sum << " free chunks count=" << cnt << std::endl;
//adding to totals
test_result &r = results_per_allocator[allocator_name];
r.tests_cnt ++;
r.fragmented_percent += 100.0 * fragmented / allocs;
r.fragments_count += ( fragmented != 0 ? double(fragments) / fragmented : 2 );
r.time += ceph_clock_now() - start;
r.frag_score += frag_score;
}
void AllocTest::SetUpTestSuite()
{
vector<const char*> args;
cct = global_init(NULL, args,
CEPH_ENTITY_TYPE_CLIENT,
CODE_ENVIRONMENT_UTILITY,
CINIT_FLAG_NO_DEFAULT_CONFIG_FILE);
common_init_finish(cct.get());
}
void AllocTest::TearDown()
{
at.reset();
alloc.reset();
}
void AllocTest::TearDownTestSuite()
{
cct.reset();
std::cout << "Summary: " << std::endl;
for (auto& r: results_per_allocator) {
std::cout << r.first <<
" fragmented allocs=" << r.second.fragmented_percent / r.second.tests_cnt << "%" <<
" #frags=" << r.second.fragments_count / r.second.tests_cnt <<
" free_score=" << r.second.frag_score / r.second.tests_cnt <<
" time=" << r.second.time * 1000 << "ms" << std::endl;
}
}
TEST_P(AllocTest, test_alloc_triangle_0_8M_16M)
{
std::string allocator_name = GetParam();
boost::triangle_distribution<double> D(1, (8 * 1024 * 1024) , (16 * 1024 * 1024) );
for (auto& s:scenarios) {
std::cout << "Allocator: " << allocator_name << ", ";
PrintTo(s, &std::cout);
std::cout << std::endl;
auto size_generator = [&]() -> uint32_t {
return (uint32_t(D(rng)) + s.alloc_unit) & ~(s.alloc_unit - 1);
};
doAgingTest(size_generator, allocator_name, s.capacity * _1G, s.alloc_unit,
s.high_mark * s.capacity * _1G,
s.low_mark * s.capacity * _1G,
s.repeats, s.leakness);
}
}
TEST_P(AllocTest, test_alloc_8M_and_64K)
{
std::string allocator_name = GetParam();
constexpr uint32_t max_chunk_size = 8*1024*1024;
constexpr uint32_t min_chunk_size = 64*1024;
for (auto& s:scenarios) {
std::cout << "Allocator: " << allocator_name << ", ";
PrintTo(s, &std::cout);
std::cout << std::endl;
boost::uniform_int<> D(0, 1);
auto size_generator = [&]() -> uint32_t {
if (D(rng) == 0)
return max_chunk_size;
else
return min_chunk_size;
};
doAgingTest(size_generator, allocator_name, s.capacity * _1G, s.alloc_unit,
s.high_mark * s.capacity * _1G,
s.low_mark * s.capacity * _1G,
s.repeats, s.leakness);
}
}
TEST_P(AllocTest, test_alloc_fragmentation_max_chunk_8M)
{
std::string allocator_name = GetParam();
constexpr uint32_t max_object_size = 150*1000*1000;
constexpr uint32_t max_chunk_size = 8*1024*1024;
for (auto& s:scenarios) {
std::cout << "Allocator: " << allocator_name << ", ";
PrintTo(s, &std::cout);
std::cout << std::endl;
boost::uniform_int<> D(1, max_object_size / s.alloc_unit);
uint32_t object_size = 0;
auto size_generator = [&]() -> uint32_t {
uint32_t c;
if (object_size == 0)
object_size = (uint32_t(D(rng))* s.alloc_unit);
if (object_size > max_chunk_size)
c = max_chunk_size;
else
c = object_size;
object_size -= c;
return c;
};
doAgingTest(size_generator, allocator_name, s.capacity * _1G, s.alloc_unit,
s.high_mark * s.capacity * _1G,
s.low_mark * s.capacity * _1G,
s.repeats, s.leakness);
}
}
TEST_P(AllocTest, test_bonus_empty_fragmented)
{
uint64_t capacity = uint64_t(512) * 1024 * 1024 * 1024; //512 G
uint64_t alloc_unit = 64 * 1024;
std::string allocator_name = GetParam();
std::cout << "Allocator: " << allocator_name << std::endl;
init_alloc(allocator_name, capacity, alloc_unit);
alloc->init_add_free(0, capacity);
PExtentVector tmp;
for (size_t i = 0; i < capacity / (1024 * 1024); i++) {
tmp.clear();
uint32_t want = 1024 * 1024;
int r = alloc->allocate(want, alloc_unit, 0, 0, &tmp);
ASSERT_EQ(r, want);
if (tmp.size() > 1) {
interval_set<uint64_t> release_set;
for (auto& t: tmp) {
release_set.insert(t.offset, t.length);
}
alloc->release(release_set);
} else {
interval_set<uint64_t> release_set;
uint64_t offset = tmp[0].offset;
uint64_t length = tmp[0].length;
release_set.insert(offset + alloc_unit, length - 3 * alloc_unit);
alloc->release(release_set);
release_set.clear();
release_set.insert(offset , alloc_unit);
alloc->release(release_set);
release_set.clear();
release_set.insert(offset + length - 2 * alloc_unit, 2 * alloc_unit);
alloc->release(release_set);
release_set.clear();
}
}
double frag_score = alloc->get_fragmentation_score();
ASSERT_EQ(alloc->get_free(), capacity);
std::cout << " empty storage frag.score=" << frag_score << std::endl;
}
INSTANTIATE_TEST_SUITE_P(
Allocator,
AllocTest,
::testing::Values("stupid", "bitmap", "avl", "btree"));
| 13,776 | 28.69181 | 107 |
cc
|
null |
ceph-main/src/test/objectstore/Allocator_bench.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* In memory space allocator benchmarks.
* Author: Igor Fedotov, [email protected]
*/
#include <iostream>
#include <boost/scoped_ptr.hpp>
#include <gtest/gtest.h>
#include "common/Cond.h"
#include "common/errno.h"
#include "include/stringify.h"
#include "include/Context.h"
#include "os/bluestore/Allocator.h"
#include <boost/random/uniform_int.hpp>
typedef boost::mt11213b gen_type;
#include "common/debug.h"
#define dout_context g_ceph_context
#define dout_subsys ceph_subsys_
using namespace std;
class AllocTest : public ::testing::TestWithParam<const char*> {
public:
boost::scoped_ptr<Allocator> alloc;
AllocTest(): alloc(0) { }
void init_alloc(int64_t size, uint64_t min_alloc_size) {
std::cout << "Creating alloc type " << string(GetParam()) << " \n";
alloc.reset(Allocator::create(g_ceph_context, GetParam(), size,
min_alloc_size));
}
void init_close() {
alloc.reset(0);
}
void doOverwriteTest(uint64_t capacity, uint64_t prefill,
uint64_t overwrite);
};
const uint64_t _1m = 1024 * 1024;
void dump_mempools()
{
ostringstream ostr;
Formatter* f = Formatter::create("json-pretty", "json-pretty", "json-pretty");
ostr << "Mempools: ";
f->open_object_section("mempools");
mempool::dump(f);
f->close_section();
f->flush(ostr);
delete f;
ldout(g_ceph_context, 0) << ostr.str() << dendl;
}
class AllocTracker
{
std::vector<uint64_t> allocations;
uint64_t head = 0;
uint64_t tail = 0;
uint64_t size = 0;
boost::uniform_int<> u1;
public:
AllocTracker(uint64_t capacity, uint64_t alloc_unit)
: u1(0, capacity)
{
ceph_assert(alloc_unit >= 0x100);
ceph_assert(capacity <= (uint64_t(1) << 48)); // we use 5 octets (bytes 1 - 5) to store
// offset to save the required space.
// This supports capacity up to 281 TB
allocations.resize(capacity / alloc_unit);
}
inline uint64_t get_head() const
{
return head;
}
inline uint64_t get_tail() const
{
return tail;
}
bool push(uint64_t offs, uint32_t len)
{
ceph_assert((len & 0xff) == 0);
ceph_assert((offs & 0xff) == 0);
ceph_assert((offs & 0xffff000000000000) == 0);
if (head + 1 == tail)
return false;
uint64_t val = (offs << 16) | (len >> 8);
allocations[head++] = val;
head %= allocations.size();
++size;
return true;
}
bool pop(uint64_t* offs, uint32_t* len)
{
if (size == 0)
return false;
uint64_t val = allocations[tail++];
*len = uint64_t((val & 0xffffff) << 8);
*offs = (val >> 16) & ~uint64_t(0xff);
tail %= allocations.size();
--size;
return true;
}
bool pop_random(gen_type& rng, uint64_t* offs, uint32_t* len,
uint32_t max_len = 0)
{
if (size == 0)
return false;
uint64_t pos = (u1(rng) % size) + tail;
pos %= allocations.size();
uint64_t val = allocations[pos];
*len = uint64_t((val & 0xffffff) << 8);
*offs = (val >> 16) & ~uint64_t(0xff);
if (max_len && *len > max_len) {
val = ((*offs + max_len) << 16) | ((*len - max_len) >> 8);
allocations[pos] = val;
*len = max_len;
} else {
allocations[pos] = allocations[tail++];
tail %= allocations.size();
--size;
}
return true;
}
};
TEST_P(AllocTest, test_alloc_bench_seq)
{
uint64_t capacity = uint64_t(1024) * 1024 * 1024 * 1024;
uint64_t alloc_unit = 4096;
uint64_t want_size = alloc_unit;
PExtentVector allocated, tmp;
init_alloc(capacity, alloc_unit);
alloc->init_add_free(0, capacity);
utime_t start = ceph_clock_now();
for (uint64_t i = 0; i < capacity; i += want_size)
{
tmp.clear();
EXPECT_EQ(static_cast<int64_t>(want_size),
alloc->allocate(want_size, alloc_unit, 0, 0, &tmp));
if (0 == (i % (1 * 1024 * _1m))) {
std::cout << "alloc " << i / 1024 / 1024 << " mb of "
<< capacity / 1024 / 1024 << std::endl;
}
}
std::cout << "releasing..." << std::endl;
for (size_t i = 0; i < capacity; i += want_size)
{
interval_set<uint64_t> release_set;
release_set.insert(i, want_size);
alloc->release(release_set);
if (0 == (i % (1 * 1024 * _1m))) {
std::cout << "release " << i / 1024 / 1024 << " mb of "
<< capacity / 1024 / 1024 << std::endl;
}
}
std::cout<<"Executed in "<< ceph_clock_now() - start << std::endl;
dump_mempools();
}
TEST_P(AllocTest, test_alloc_bench)
{
uint64_t capacity = uint64_t(1024) * 1024 * 1024 * 1024;
uint64_t alloc_unit = 4096;
PExtentVector allocated, tmp;
AllocTracker at(capacity, alloc_unit);
init_alloc(capacity, alloc_unit);
alloc->init_add_free(0, capacity);
gen_type rng(time(NULL));
boost::uniform_int<> u1(0, 9); // 4K-2M
boost::uniform_int<> u2(0, 7); // 4K-512K
utime_t start = ceph_clock_now();
for (uint64_t i = 0; i < capacity * 2; )
{
uint32_t want = alloc_unit << u1(rng);
tmp.clear();
auto r = alloc->allocate(want, alloc_unit, 0, 0, &tmp);
if (r < want) {
break;
}
i += r;
for(auto a : tmp) {
bool full = !at.push(a.offset, a.length);
EXPECT_EQ(full, false);
}
uint64_t want_release = alloc_unit << u2(rng);
uint64_t released = 0;
do {
uint64_t o = 0;
uint32_t l = 0;
interval_set<uint64_t> release_set;
if (!at.pop_random(rng, &o, &l, want_release - released)) {
break;
}
release_set.insert(o, l);
alloc->release(release_set);
released += l;
} while (released < want_release);
if (0 == (i % (1 * 1024 * _1m))) {
std::cout << "alloc " << i / 1024 / 1024 << " mb of "
<< capacity / 1024 / 1024 << std::endl;
}
}
std::cout<<"Executed in "<< ceph_clock_now() - start << std::endl;
std::cout<<"Avail "<< alloc->get_free() / _1m << " MB" << std::endl;
dump_mempools();
}
void AllocTest::doOverwriteTest(uint64_t capacity, uint64_t prefill,
uint64_t overwrite)
{
uint64_t alloc_unit = 4096;
PExtentVector allocated, tmp;
AllocTracker at(capacity, alloc_unit);
init_alloc(capacity, alloc_unit);
alloc->init_add_free(0, capacity);
gen_type rng(time(NULL));
boost::uniform_int<> u1(0, 9); // 4K-2M
boost::uniform_int<> u2(0, 9); // 4K-512K
utime_t start = ceph_clock_now();
// allocate 90% of the capacity
auto cap = prefill;
for (uint64_t i = 0; i < cap; )
{
uint32_t want = alloc_unit << u1(rng);
tmp.clear();
auto r = alloc->allocate(want, alloc_unit, 0, 0, &tmp);
if (r < want) {
break;
}
i += r;
for(auto a : tmp) {
bool full = !at.push(a.offset, a.length);
EXPECT_EQ(full, false);
}
if (0 == (i % (1 * 1024 * _1m))) {
std::cout << "alloc " << i / 1024 / 1024 << " mb of "
<< cap / 1024 / 1024 << std::endl;
}
}
cap = overwrite;
for (uint64_t i = 0; i < cap; )
{
uint64_t want_release = alloc_unit << u2(rng);
uint64_t released = 0;
do {
uint64_t o = 0;
uint32_t l = 0;
interval_set<uint64_t> release_set;
if (!at.pop_random(rng, &o, &l, want_release - released)) {
break;
}
release_set.insert(o, l);
alloc->release(release_set);
released += l;
} while (released < want_release);
uint32_t want = alloc_unit << u1(rng);
tmp.clear();
auto r = alloc->allocate(want, alloc_unit, 0, 0, &tmp);
if (r != want) {
std::cout<<"Can't allocate more space, stopping."<< std::endl;
break;
}
i += r;
for(auto a : tmp) {
bool full = !at.push(a.offset, a.length);
EXPECT_EQ(full, false);
}
if (0 == (i % (1 * 1024 * _1m))) {
std::cout << "reuse " << i / 1024 / 1024 << " mb of "
<< cap / 1024 / 1024 << std::endl;
}
}
std::cout<<"Executed in "<< ceph_clock_now() - start << std::endl;
std::cout<<"Avail "<< alloc->get_free() / _1m << " MB" << std::endl;
dump_mempools();
}
TEST_P(AllocTest, test_alloc_bench_90_300)
{
uint64_t capacity = uint64_t(1024) * 1024 * 1024 * 1024;
auto prefill = capacity - capacity / 10;
auto overwrite = capacity * 3;
doOverwriteTest(capacity, prefill, overwrite);
}
TEST_P(AllocTest, test_alloc_bench_50_300)
{
uint64_t capacity = uint64_t(1024) * 1024 * 1024 * 1024;
auto prefill = capacity / 2;
auto overwrite = capacity * 3;
doOverwriteTest(capacity, prefill, overwrite);
}
TEST_P(AllocTest, test_alloc_bench_10_300)
{
uint64_t capacity = uint64_t(1024) * 1024 * 1024 * 1024;
auto prefill = capacity / 10;
auto overwrite = capacity * 3;
doOverwriteTest(capacity, prefill, overwrite);
}
TEST_P(AllocTest, mempoolAccounting)
{
uint64_t bytes = mempool::bluestore_alloc::allocated_bytes();
uint64_t items = mempool::bluestore_alloc::allocated_items();
uint64_t alloc_size = 4 * 1024;
uint64_t capacity = 512ll * 1024 * 1024 * 1024;
Allocator* alloc = Allocator::create(g_ceph_context, GetParam(),
capacity, alloc_size);
ASSERT_NE(alloc, nullptr);
alloc->init_add_free(0, capacity);
std::map<uint32_t, PExtentVector> all_allocs;
for (size_t i = 0; i < 10000; i++) {
PExtentVector tmp;
alloc->allocate(alloc_size, alloc_size, 0, 0, &tmp);
all_allocs[rand()] = tmp;
tmp.clear();
alloc->allocate(alloc_size, alloc_size, 0, 0, &tmp);
all_allocs[rand()] = tmp;
tmp.clear();
auto it = all_allocs.upper_bound(rand());
if (it != all_allocs.end()) {
alloc->release(it->second);
all_allocs.erase(it);
}
}
delete(alloc);
ASSERT_EQ(mempool::bluestore_alloc::allocated_bytes(), bytes);
ASSERT_EQ(mempool::bluestore_alloc::allocated_items(), items);
}
INSTANTIATE_TEST_SUITE_P(
Allocator,
AllocTest,
::testing::Values("stupid", "bitmap", "avl", "btree", "hybrid"));
| 9,803 | 25.569106 | 91 |
cc
|
null |
ceph-main/src/test/objectstore/Allocator_test.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* In memory space allocator test cases.
* Author: Ramesh Chander, [email protected]
*/
#include <iostream>
#include <boost/scoped_ptr.hpp>
#include <gtest/gtest.h>
#include "common/Cond.h"
#include "common/errno.h"
#include "include/stringify.h"
#include "include/Context.h"
#include "os/bluestore/Allocator.h"
using namespace std;
typedef boost::mt11213b gen_type;
class AllocTest : public ::testing::TestWithParam<const char*> {
public:
boost::scoped_ptr<Allocator> alloc;
AllocTest(): alloc(0) { }
void init_alloc(int64_t size, uint64_t min_alloc_size) {
std::cout << "Creating alloc type " << string(GetParam()) << " \n";
alloc.reset(Allocator::create(g_ceph_context, GetParam(), size,
min_alloc_size,
256*1048576, 100*256*1048576ull));
}
void init_close() {
alloc.reset(0);
}
};
TEST_P(AllocTest, test_alloc_init)
{
int64_t blocks = 64;
init_alloc(blocks, 1);
ASSERT_EQ(0U, alloc->get_free());
alloc->shutdown();
blocks = 1024 * 2 + 16;
init_alloc(blocks, 1);
ASSERT_EQ(0U, alloc->get_free());
alloc->shutdown();
blocks = 1024 * 2;
init_alloc(blocks, 1);
ASSERT_EQ(alloc->get_free(), (uint64_t) 0);
}
TEST_P(AllocTest, test_init_add_free)
{
int64_t block_size = 1024;
int64_t capacity = 4 * 1024 * block_size;
{
init_alloc(capacity, block_size);
auto free = alloc->get_free();
alloc->init_add_free(block_size, 0);
ASSERT_EQ(free, alloc->get_free());
alloc->init_rm_free(block_size, 0);
ASSERT_EQ(free, alloc->get_free());
}
}
TEST_P(AllocTest, test_alloc_min_alloc)
{
int64_t block_size = 1024;
int64_t capacity = 4 * 1024 * block_size;
{
init_alloc(capacity, block_size);
alloc->init_add_free(block_size, block_size);
PExtentVector extents;
EXPECT_EQ(block_size, alloc->allocate(block_size, block_size,
0, (int64_t) 0, &extents));
}
/*
* Allocate extent and make sure all comes in single extent.
*/
{
init_alloc(capacity, block_size);
alloc->init_add_free(0, block_size * 4);
PExtentVector extents;
EXPECT_EQ(4*block_size,
alloc->allocate(4 * (uint64_t)block_size, (uint64_t) block_size,
0, (int64_t) 0, &extents));
EXPECT_EQ(1u, extents.size());
EXPECT_EQ(extents[0].length, 4 * block_size);
}
/*
* Allocate extent and make sure we get two different extents.
*/
{
init_alloc(capacity, block_size);
alloc->init_add_free(0, block_size * 2);
alloc->init_add_free(3 * block_size, block_size * 2);
PExtentVector extents;
EXPECT_EQ(4*block_size,
alloc->allocate(4 * (uint64_t)block_size, (uint64_t) block_size,
0, (int64_t) 0, &extents));
EXPECT_EQ(2u, extents.size());
EXPECT_EQ(extents[0].length, 2 * block_size);
EXPECT_EQ(extents[1].length, 2 * block_size);
}
alloc->shutdown();
}
TEST_P(AllocTest, test_alloc_min_max_alloc)
{
int64_t block_size = 1024;
int64_t capacity = 4 * 1024 * block_size;
init_alloc(capacity, block_size);
/*
* Make sure we get all extents different when
* min_alloc_size == max_alloc_size
*/
{
init_alloc(capacity, block_size);
alloc->init_add_free(0, block_size * 4);
PExtentVector extents;
EXPECT_EQ(4*block_size,
alloc->allocate(4 * (uint64_t)block_size, (uint64_t) block_size,
block_size, (int64_t) 0, &extents));
for (auto e : extents) {
EXPECT_EQ(e.length, block_size);
}
EXPECT_EQ(4u, extents.size());
}
/*
* Make sure we get extents of length max_alloc size
* when max alloc size > min_alloc size
*/
{
init_alloc(capacity, block_size);
alloc->init_add_free(0, block_size * 4);
PExtentVector extents;
EXPECT_EQ(4*block_size,
alloc->allocate(4 * (uint64_t)block_size, (uint64_t) block_size,
2 * block_size, (int64_t) 0, &extents));
EXPECT_EQ(2u, extents.size());
for (auto& e : extents) {
EXPECT_EQ(e.length, block_size * 2);
}
}
/*
* Make sure allocations are of min_alloc_size when min_alloc_size > block_size.
*/
{
init_alloc(capacity, block_size);
alloc->init_add_free(0, block_size * 1024);
PExtentVector extents;
EXPECT_EQ(1024 * block_size,
alloc->allocate(1024 * (uint64_t)block_size,
(uint64_t) block_size * 4,
block_size * 4, (int64_t) 0, &extents));
for (auto& e : extents) {
EXPECT_EQ(e.length, block_size * 4);
}
EXPECT_EQ(1024u/4, extents.size());
}
/*
* Allocate and free.
*/
{
init_alloc(capacity, block_size);
alloc->init_add_free(0, block_size * 16);
PExtentVector extents;
EXPECT_EQ(16 * block_size,
alloc->allocate(16 * (uint64_t)block_size, (uint64_t) block_size,
2 * block_size, (int64_t) 0, &extents));
EXPECT_EQ(extents.size(), 8u);
for (auto& e : extents) {
EXPECT_EQ(e.length, 2 * block_size);
}
}
}
TEST_P(AllocTest, test_alloc_failure)
{
int64_t block_size = 1024;
int64_t capacity = 4 * 1024 * block_size;
{
init_alloc(capacity, block_size);
alloc->init_add_free(0, block_size * 256);
alloc->init_add_free(block_size * 512, block_size * 256);
PExtentVector extents;
EXPECT_EQ(512 * block_size,
alloc->allocate(512 * (uint64_t)block_size,
(uint64_t) block_size * 256,
block_size * 256, (int64_t) 0, &extents));
alloc->init_add_free(0, block_size * 256);
alloc->init_add_free(block_size * 512, block_size * 256);
extents.clear();
EXPECT_EQ(-ENOSPC,
alloc->allocate(512 * (uint64_t)block_size,
(uint64_t) block_size * 512,
block_size * 512, (int64_t) 0, &extents));
}
}
TEST_P(AllocTest, test_alloc_big)
{
int64_t block_size = 4096;
int64_t blocks = 104857600;
int64_t mas = 4096;
init_alloc(blocks*block_size, block_size);
alloc->init_add_free(2*block_size, (blocks-2)*block_size);
for (int64_t big = mas; big < 1048576*128; big*=2) {
cout << big << std::endl;
PExtentVector extents;
EXPECT_EQ(big,
alloc->allocate(big, mas, 0, &extents));
}
}
TEST_P(AllocTest, test_alloc_non_aligned_len)
{
int64_t block_size = 1 << 12;
int64_t blocks = (1 << 20) * 100;
int64_t want_size = 1 << 22;
int64_t alloc_unit = 1 << 20;
init_alloc(blocks*block_size, block_size);
alloc->init_add_free(0, 2097152);
alloc->init_add_free(2097152, 1064960);
alloc->init_add_free(3670016, 2097152);
PExtentVector extents;
EXPECT_EQ(want_size, alloc->allocate(want_size, alloc_unit, 0, &extents));
}
TEST_P(AllocTest, test_alloc_39334)
{
uint64_t block = 0x4000;
uint64_t size = 0x5d00000000;
init_alloc(size, block);
alloc->init_add_free(0x4000, 0x5cffffc000);
EXPECT_EQ(size - block, alloc->get_free());
}
TEST_P(AllocTest, test_alloc_fragmentation)
{
uint64_t capacity = 4 * 1024 * 1024;
uint64_t alloc_unit = 4096;
uint64_t want_size = alloc_unit;
PExtentVector allocated, tmp;
init_alloc(capacity, alloc_unit);
alloc->init_add_free(0, capacity);
bool bitmap_alloc = GetParam() == std::string("bitmap");
EXPECT_EQ(0.0, alloc->get_fragmentation());
for (size_t i = 0; i < capacity / alloc_unit; ++i)
{
tmp.clear();
EXPECT_EQ(static_cast<int64_t>(want_size),
alloc->allocate(want_size, alloc_unit, 0, 0, &tmp));
allocated.insert(allocated.end(), tmp.begin(), tmp.end());
// bitmap fragmentation calculation doesn't provide such constant
// estimate
if (!bitmap_alloc) {
EXPECT_EQ(0.0, alloc->get_fragmentation());
}
}
tmp.clear();
EXPECT_EQ(-ENOSPC, alloc->allocate(want_size, alloc_unit, 0, 0, &tmp));
if (GetParam() == string("avl")) {
// AVL allocator uses a different allocating strategy
GTEST_SKIP() << "skipping for AVL allocator";
} else if (GetParam() == string("hybrid")) {
// AVL allocator uses a different allocating strategy
GTEST_SKIP() << "skipping for Hybrid allocator";
}
for (size_t i = 0; i < allocated.size(); i += 2)
{
interval_set<uint64_t> release_set;
release_set.insert(allocated[i].offset, allocated[i].length);
alloc->release(release_set);
}
EXPECT_EQ(1.0, alloc->get_fragmentation());
for (size_t i = 1; i < allocated.size() / 2; i += 2)
{
interval_set<uint64_t> release_set;
release_set.insert(allocated[i].offset, allocated[i].length);
alloc->release(release_set);
}
if (bitmap_alloc) {
// fragmentation = one l1 slot is free + one l1 slot is partial
EXPECT_EQ(50U, uint64_t(alloc->get_fragmentation() * 100));
} else {
// fragmentation approx = 257 intervals / 768 max intervals
EXPECT_EQ(33u, uint64_t(alloc->get_fragmentation() * 100));
}
for (size_t i = allocated.size() / 2 + 1; i < allocated.size(); i += 2)
{
interval_set<uint64_t> release_set;
release_set.insert(allocated[i].offset, allocated[i].length);
alloc->release(release_set);
}
// doing some rounding trick as stupid allocator doesn't merge all the
// extents that causes some minor fragmentation (minor bug or by-design behavior?).
// Hence leaving just two
// digits after decimal point due to this.
EXPECT_EQ(0u, uint64_t(alloc->get_fragmentation() * 100));
}
TEST_P(AllocTest, test_fragmentation_score_0)
{
uint64_t capacity = 16LL * 1024 * 1024 * 1024; //16 GB, very small
uint64_t alloc_unit = 4096;
init_alloc(capacity, alloc_unit);
alloc->init_add_free(0, capacity);
EXPECT_EQ(0, alloc->get_fragmentation_score());
// alloc every 100M, should get very small score
for (uint64_t pos = 0; pos < capacity; pos += 100 * 1024 * 1024) {
alloc->init_rm_free(pos, alloc_unit);
}
EXPECT_LT(alloc->get_fragmentation_score(), 0.0001); // frag < 0.01%
for (uint64_t pos = 0; pos < capacity; pos += 100 * 1024 * 1024) {
// put back
alloc->init_add_free(pos, alloc_unit);
}
// 10% space is trashed, rest is free, small score
for (uint64_t pos = 0; pos < capacity / 10; pos += 3 * alloc_unit) {
alloc->init_rm_free(pos, alloc_unit);
}
EXPECT_LT(0.01, alloc->get_fragmentation_score()); // 1% < frag < 10%
EXPECT_LT(alloc->get_fragmentation_score(), 0.1);
}
TEST_P(AllocTest, test_fragmentation_score_some)
{
uint64_t capacity = 1024 * 1024 * 1024; //1 GB, very small
uint64_t alloc_unit = 4096;
init_alloc(capacity, alloc_unit);
alloc->init_add_free(0, capacity);
// half (in 16 chunks) is completely free,
// other half completely fragmented, expect less than 50% fragmentation score
for (uint64_t chunk = 0; chunk < capacity; chunk += capacity / 16) {
for (uint64_t pos = 0; pos < capacity / 32; pos += alloc_unit * 3) {
alloc->init_rm_free(chunk + pos, alloc_unit);
}
}
EXPECT_LT(alloc->get_fragmentation_score(), 0.5); // f < 50%
init_alloc(capacity, alloc_unit);
alloc->init_add_free(0, capacity);
// half (in 16 chunks) is completely full,
// other half completely fragmented, expect really high fragmentation score
for (uint64_t chunk = 0; chunk < capacity; chunk += capacity / 16) {
alloc->init_rm_free(chunk + capacity / 32, capacity / 32);
for (uint64_t pos = 0; pos < capacity / 32; pos += alloc_unit * 3) {
alloc->init_rm_free(chunk + pos, alloc_unit);
}
}
EXPECT_LT(0.9, alloc->get_fragmentation_score()); // 50% < f
}
TEST_P(AllocTest, test_fragmentation_score_1)
{
uint64_t capacity = 1024 * 1024 * 1024; //1 GB, very small
uint64_t alloc_unit = 4096;
init_alloc(capacity, alloc_unit);
alloc->init_add_free(0, capacity);
// alloc every second AU, max fragmentation
for (uint64_t pos = 0; pos < capacity; pos += alloc_unit * 2) {
alloc->init_rm_free(pos, alloc_unit);
}
EXPECT_LT(0.99, alloc->get_fragmentation_score()); // 99% < f
init_alloc(capacity, alloc_unit);
alloc->init_add_free(0, capacity);
// 1 allocated, 4 empty; expect very high score
for (uint64_t pos = 0; pos < capacity; pos += alloc_unit * 5) {
alloc->init_rm_free(pos, alloc_unit);
}
EXPECT_LT(0.90, alloc->get_fragmentation_score()); // 90% < f
}
TEST_P(AllocTest, test_dump_fragmentation_score)
{
uint64_t capacity = 1024 * 1024 * 1024;
uint64_t one_alloc_max = 2 * 1024 * 1024;
uint64_t alloc_unit = 4096;
uint64_t want_size = alloc_unit;
uint64_t rounds = 10;
uint64_t actions_per_round = 1000;
PExtentVector allocated, tmp;
gen_type rng;
init_alloc(capacity, alloc_unit);
alloc->init_add_free(0, capacity);
EXPECT_EQ(0.0, alloc->get_fragmentation());
EXPECT_EQ(0.0, alloc->get_fragmentation_score());
uint64_t allocated_cnt = 0;
for (size_t round = 0; round < rounds ; round++) {
for (size_t j = 0; j < actions_per_round ; j++) {
//free or allocate ?
if ( rng() % capacity >= allocated_cnt ) {
//allocate
want_size = ( rng() % one_alloc_max ) / alloc_unit * alloc_unit + alloc_unit;
tmp.clear();
int64_t r = alloc->allocate(want_size, alloc_unit, 0, 0, &tmp);
if (r > 0) {
for (auto& t: tmp) {
if (t.length > 0)
allocated.push_back(t);
}
allocated_cnt += r;
}
} else {
//free
ceph_assert(allocated.size() > 0);
size_t item = rng() % allocated.size();
ceph_assert(allocated[item].length > 0);
allocated_cnt -= allocated[item].length;
interval_set<uint64_t> release_set;
release_set.insert(allocated[item].offset, allocated[item].length);
alloc->release(release_set);
std::swap(allocated[item], allocated[allocated.size() - 1]);
allocated.resize(allocated.size() - 1);
}
}
size_t free_sum = 0;
auto iterated_allocation = [&](size_t off, size_t len) {
ceph_assert(len > 0);
free_sum += len;
};
alloc->foreach(iterated_allocation);
EXPECT_GT(1, alloc->get_fragmentation_score());
EXPECT_EQ(capacity, free_sum + allocated_cnt);
}
for (size_t i = 0; i < allocated.size(); i ++)
{
interval_set<uint64_t> release_set;
release_set.insert(allocated[i].offset, allocated[i].length);
alloc->release(release_set);
}
}
TEST_P(AllocTest, test_alloc_bug_24598)
{
if (string(GetParam()) != "bitmap")
return;
uint64_t capacity = 0x2625a0000ull;
uint64_t alloc_unit = 0x4000;
uint64_t want_size = 0x200000;
PExtentVector allocated, tmp;
init_alloc(capacity, alloc_unit);
alloc->init_add_free(0x4800000, 0x100000);
alloc->init_add_free(0x4a00000, 0x100000);
alloc->init_rm_free(0x4800000, 0x100000);
alloc->init_rm_free(0x4a00000, 0x100000);
alloc->init_add_free(0x3f00000, 0x500000);
alloc->init_add_free(0x4500000, 0x100000);
alloc->init_add_free(0x4700000, 0x100000);
alloc->init_add_free(0x4900000, 0x100000);
alloc->init_add_free(0x4b00000, 0x200000);
EXPECT_EQ(static_cast<int64_t>(want_size),
alloc->allocate(want_size, 0x100000, 0, 0, &tmp));
EXPECT_EQ(1u, tmp.size());
EXPECT_EQ(0x4b00000u, tmp[0].offset);
EXPECT_EQ(0x200000u, tmp[0].length);
}
//Verifies issue from
//http://tracker.ceph.com/issues/40703
//
TEST_P(AllocTest, test_alloc_big2)
{
int64_t block_size = 4096;
int64_t blocks = 1048576 * 2;
int64_t mas = 1024*1024;
init_alloc(blocks*block_size, block_size);
alloc->init_add_free(0, blocks * block_size);
PExtentVector extents;
uint64_t need = block_size * blocks / 4; // 2GB
EXPECT_EQ(need,
alloc->allocate(need, mas, 0, &extents));
need = block_size * blocks / 4; // 2GB
extents.clear();
EXPECT_EQ(need,
alloc->allocate(need, mas, 0, &extents));
EXPECT_TRUE(extents[0].length > 0);
}
//Verifies stuck 4GB chunk allocation
//in StupidAllocator
//
TEST_P(AllocTest, test_alloc_big3)
{
int64_t block_size = 4096;
int64_t blocks = 1048576 * 2;
int64_t mas = 1024*1024;
init_alloc(blocks*block_size, block_size);
alloc->init_add_free(0, blocks * block_size);
PExtentVector extents;
uint64_t need = block_size * blocks / 2; // 4GB
EXPECT_EQ(need,
alloc->allocate(need, mas, 0, &extents));
EXPECT_TRUE(extents[0].length > 0);
}
TEST_P(AllocTest, test_alloc_contiguous)
{
int64_t block_size = 0x1000;
int64_t capacity = block_size * 1024 * 1024;
{
init_alloc(capacity, block_size);
alloc->init_add_free(0, capacity);
PExtentVector extents;
uint64_t need = 4 * block_size;
EXPECT_EQ(need,
alloc->allocate(need, need,
0, (int64_t)0, &extents));
EXPECT_EQ(1u, extents.size());
EXPECT_EQ(extents[0].offset, 0);
EXPECT_EQ(extents[0].length, 4 * block_size);
extents.clear();
EXPECT_EQ(need,
alloc->allocate(need, need,
0, (int64_t)0, &extents));
EXPECT_EQ(1u, extents.size());
EXPECT_EQ(extents[0].offset, 4 * block_size);
EXPECT_EQ(extents[0].length, 4 * block_size);
}
alloc->shutdown();
}
TEST_P(AllocTest, test_alloc_47883)
{
uint64_t block = 0x1000;
uint64_t size = 1599858540544ul;
init_alloc(size, block);
alloc->init_add_free(0x1b970000, 0x26000);
alloc->init_add_free(0x1747e9d5000, 0x493000);
alloc->init_add_free(0x1747ee6a000, 0x196000);
PExtentVector extents;
auto need = 0x3f980000;
auto got = alloc->allocate(need, 0x10000, 0, (int64_t)0, &extents);
EXPECT_GT(got, 0);
EXPECT_EQ(got, 0x630000);
}
TEST_P(AllocTest, test_alloc_50656_best_fit)
{
uint64_t block = 0x1000;
uint64_t size = 0x3b9e400000;
init_alloc(size, block);
// too few free extents - causes best fit mode for avls
for (size_t i = 0; i < 0x10; i++) {
alloc->init_add_free(i * 2 * 0x100000, 0x100000);
}
alloc->init_add_free(0x1e1bd13000, 0x404000);
PExtentVector extents;
auto need = 0x400000;
auto got = alloc->allocate(need, 0x10000, 0, (int64_t)0, &extents);
EXPECT_GT(got, 0);
EXPECT_EQ(got, 0x400000);
}
TEST_P(AllocTest, test_alloc_50656_first_fit)
{
uint64_t block = 0x1000;
uint64_t size = 0x3b9e400000;
init_alloc(size, block);
for (size_t i = 0; i < 0x10000; i += 2) {
alloc->init_add_free(i * 0x100000, 0x100000);
}
alloc->init_add_free(0x1e1bd13000, 0x404000);
PExtentVector extents;
auto need = 0x400000;
auto got = alloc->allocate(need, 0x10000, 0, (int64_t)0, &extents);
EXPECT_GT(got, 0);
EXPECT_EQ(got, 0x400000);
}
INSTANTIATE_TEST_SUITE_P(
Allocator,
AllocTest,
::testing::Values("stupid", "bitmap", "avl", "hybrid"));
| 18,320 | 27.671362 | 85 |
cc
|
null |
ceph-main/src/test/objectstore/Fragmentation_simulator.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Fragmentation Simulator
* Author: Tri Dao, [email protected]
*/
#include "common/ceph_argparse.h"
#include "common/common_init.h"
#include "common/hobject.h"
#include "global/global_init.h"
#include "include/buffer_fwd.h"
#include "os/ObjectStore.h"
#include "test/objectstore/ObjectStoreImitator.h"
#include <fmt/core.h>
#include <gtest/gtest.h>
#include <iostream>
constexpr uint64_t _1Kb = 1024;
constexpr uint64_t _1Mb = 1024 * _1Kb;
constexpr uint64_t _1Gb = 1024 * _1Mb;
static bufferlist make_bl(size_t len, char c) {
bufferlist bl;
if (len > 0) {
bl.reserve(len);
bl.append(std::string(len, c));
}
return bl;
}
// --------- FragmentationSimulator ----------
class FragmentationSimulator : public ::testing::TestWithParam<std::string> {
public:
static boost::intrusive_ptr<CephContext> cct;
struct WorkloadGenerator {
virtual int generate_txns(ObjectStore::CollectionHandle &ch,
ObjectStoreImitator *os) = 0;
virtual std::string name() = 0;
WorkloadGenerator() {}
virtual ~WorkloadGenerator() {}
};
using WorkloadGeneratorRef = std::shared_ptr<WorkloadGenerator>;
void add_generator(WorkloadGeneratorRef gen);
void clear_generators() { generators.clear(); }
int begin_simulation_with_generators();
void init(const std::string &alloc_type, uint64_t size,
uint64_t min_alloc_size = 4096);
static void TearDownTestSuite() { cct.reset(); }
static void SetUpTestSuite() {}
void TearDown() final {}
FragmentationSimulator() = default;
~FragmentationSimulator() = default;
private:
ObjectStoreImitator *os;
std::vector<WorkloadGeneratorRef> generators;
};
void FragmentationSimulator::init(const std::string &alloc_type, uint64_t size,
uint64_t min_alloc_size) {
std::cout << std::endl;
std::cout << "Initializing ObjectStoreImitator" << std::endl;
os = new ObjectStoreImitator(g_ceph_context, "", min_alloc_size);
std::cout << "Initializing allocator: " << alloc_type << " size: 0x"
<< std::hex << size << std::dec << "\n"
<< std::endl;
os->init_alloc(alloc_type, size);
}
void FragmentationSimulator::add_generator(WorkloadGeneratorRef gen) {
std::cout << "Generator: " << gen->name() << " added\n";
generators.push_back(gen);
}
int FragmentationSimulator::begin_simulation_with_generators() {
for (auto &g : generators) {
ObjectStore::CollectionHandle ch =
os->create_new_collection(coll_t::meta());
ObjectStore::Transaction t;
t.create_collection(ch->cid, 0);
os->queue_transaction(ch, std::move(t));
int r = g->generate_txns(ch, os);
if (r < 0)
return r;
}
os->print_status();
return 0;
}
// --------- Generators ----------
struct SimpleCWGenerator : public FragmentationSimulator::WorkloadGenerator {
std::string name() override { return "SimpleCW"; }
int generate_txns(ObjectStore::CollectionHandle &ch,
ObjectStoreImitator *os) override {
std::vector<ghobject_t> objs;
for (unsigned i{0}; i < 100; ++i) {
hobject_t h;
h.oid = fmt::format("obj_{}", i);
h.set_hash(1);
h.pool = 1;
objs.emplace_back(h);
}
std::vector<ObjectStore::Transaction> tls;
for (unsigned i{0}; i < 100; ++i) {
ObjectStore::Transaction t1;
t1.create(ch->get_cid(), objs[i]);
tls.emplace_back(std::move(t1));
ObjectStore::Transaction t2;
t2.write(ch->get_cid(), objs[i], 0, _1Mb, make_bl(_1Mb, 'c'));
tls.emplace_back(std::move(t2));
}
os->queue_transactions(ch, tls);
os->verify_objects(ch);
// reapply
os->queue_transactions(ch, tls);
os->verify_objects(ch);
tls.clear();
// Overwrite on object
for (unsigned i{0}; i < 100; ++i) {
ObjectStore::Transaction t;
t.write(ch->get_cid(), objs[i], _1Kb * i, _1Mb * 3,
make_bl(_1Mb * 3, 'x'));
tls.emplace_back(std::move(t));
}
os->queue_transactions(ch, tls);
os->verify_objects(ch);
tls.clear();
for (unsigned i{0}; i < 50; ++i) {
ObjectStore::Transaction t1, t2;
t1.clone(ch->get_cid(), objs[i], objs[i + 50]);
tls.emplace_back(std::move(t1));
t2.clone(ch->get_cid(), objs[i + 50], objs[i]);
tls.emplace_back(std::move(t2));
}
os->queue_transactions(ch, tls);
os->verify_objects(ch);
return 0;
}
};
// ----------- Tests -----------
TEST_P(FragmentationSimulator, SimpleCWGenerator) {
init(GetParam(), _1Gb);
add_generator(std::make_shared<SimpleCWGenerator>());
begin_simulation_with_generators();
}
// ----------- main -----------
INSTANTIATE_TEST_SUITE_P(Allocator, FragmentationSimulator,
::testing::Values("stupid", "bitmap", "avl", "btree"));
boost::intrusive_ptr<CephContext> FragmentationSimulator::cct;
int main(int argc, char **argv) {
auto args = argv_to_vec(argc, argv);
FragmentationSimulator::cct =
global_init(NULL, args, CEPH_ENTITY_TYPE_CLIENT, CODE_ENVIRONMENT_UTILITY,
CINIT_FLAG_NO_DEFAULT_CONFIG_FILE);
common_init_finish(FragmentationSimulator::cct->get());
FragmentationSimulator::cct->_conf->bluestore_clone_cow = false;
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}
| 5,420 | 27.68254 | 80 |
cc
|
null |
ceph-main/src/test/objectstore/ObjectStoreImitator.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Fragmentation Simulator
* Author: Tri Dao, [email protected]
*/
#include "test/objectstore/ObjectStoreImitator.h"
#include "common/errno.h"
#include "include/ceph_assert.h"
#include "include/intarith.h"
#define dout_context cct
#define OBJECT_MAX_SIZE 0xffffffff // 32 bits
void ObjectStoreImitator::init_alloc(const std::string &alloc_type,
uint64_t size) {
alloc.reset(Allocator::create(cct, alloc_type, size, min_alloc_size));
alloc->init_add_free(0, size);
ceph_assert(alloc->get_free() == size);
}
void ObjectStoreImitator::print_status() {
std::cout << std::hex
<< "Fragmentation score: " << alloc->get_fragmentation_score()
<< " , fragmentation: " << alloc->get_fragmentation()
<< ", allocator type " << alloc->get_type() << ", capacity 0x"
<< alloc->get_capacity() << ", block size 0x"
<< alloc->get_block_size() << ", free 0x" << alloc->get_free()
<< std::dec << std::endl;
}
void ObjectStoreImitator::verify_objects(CollectionHandle &ch) {
Collection *c = static_cast<Collection *>(ch.get());
c->verify_objects();
}
// ------- Transactions -------
int ObjectStoreImitator::queue_transactions(CollectionHandle &ch,
std::vector<Transaction> &tls,
TrackedOpRef op,
ThreadPool::TPHandle *handle) {
for (std::vector<Transaction>::iterator p = tls.begin(); p != tls.end();
++p) {
_add_transaction(&(*p));
}
if (handle)
handle->suspend_tp_timeout();
if (handle)
handle->reset_tp_timeout();
return 0;
}
ObjectStoreImitator::CollectionRef
ObjectStoreImitator::_get_collection(const coll_t &cid) {
std::shared_lock l(coll_lock);
ceph::unordered_map<coll_t, CollectionRef>::iterator cp = coll_map.find(cid);
if (cp == coll_map.end())
return CollectionRef();
return cp->second;
}
void ObjectStoreImitator::_add_transaction(Transaction *t) {
Transaction::iterator i = t->begin();
std::vector<CollectionRef> cvec(i.colls.size());
unsigned j = 0;
for (auto p = i.colls.begin(); p != i.colls.end(); ++p, ++j) {
cvec[j] = _get_collection(*p);
}
std::vector<ObjectRef> ovec(i.objects.size());
for (int pos = 0; i.have_op(); ++pos) {
Transaction::Op *op = i.decode_op();
int r = 0;
// no coll or obj
if (op->op == Transaction::OP_NOP)
continue;
// collection operations
CollectionRef &c = cvec[op->cid];
switch (op->op) {
case Transaction::OP_RMCOLL: {
const coll_t &cid = i.get_cid(op->cid);
r = _remove_collection(cid, &c);
if (!r)
continue;
} break;
case Transaction::OP_MKCOLL: {
ceph_assert(!c);
const coll_t &cid = i.get_cid(op->cid);
r = _create_collection(cid, op->split_bits, &c);
if (!r)
continue;
} break;
case Transaction::OP_SPLIT_COLLECTION:
ceph_abort_msg("deprecated");
break;
case Transaction::OP_SPLIT_COLLECTION2: {
uint32_t bits = op->split_bits;
uint32_t rem = op->split_rem;
r = _split_collection(c, cvec[op->dest_cid], bits, rem);
if (!r)
continue;
} break;
case Transaction::OP_MERGE_COLLECTION: {
uint32_t bits = op->split_bits;
r = _merge_collection(&c, cvec[op->dest_cid], bits);
if (!r)
continue;
} break;
case Transaction::OP_COLL_HINT: {
uint32_t type = op->hint;
bufferlist hint;
i.decode_bl(hint);
auto hiter = hint.cbegin();
if (type == Transaction::COLL_HINT_EXPECTED_NUM_OBJECTS) {
uint32_t pg_num;
uint64_t num_objs;
decode(pg_num, hiter);
decode(num_objs, hiter);
}
continue;
} break;
case Transaction::OP_COLL_SETATTR:
r = -EOPNOTSUPP;
break;
case Transaction::OP_COLL_RMATTR:
r = -EOPNOTSUPP;
break;
case Transaction::OP_COLL_RENAME:
ceph_abort_msg("not implemented");
break;
}
// these operations implicity create the object
bool create =
(op->op == Transaction::OP_TOUCH || op->op == Transaction::OP_CREATE ||
op->op == Transaction::OP_WRITE || op->op == Transaction::OP_ZERO);
// object operations
std::unique_lock l(c->lock);
ObjectRef &o = ovec[op->oid];
if (!o) {
ghobject_t oid = i.get_oid(op->oid);
o = c->get_obj(oid, create);
}
if (!create && (!o || !o->exists)) {
r = -ENOENT;
goto endop;
}
switch (op->op) {
case Transaction::OP_CREATE:
case Transaction::OP_TOUCH: {
_assign_nid(o);
r = 0;
} break;
case Transaction::OP_WRITE: {
uint64_t off = op->off;
uint64_t len = op->len;
uint32_t fadvise_flags = i.get_fadvise_flags();
bufferlist bl;
i.decode_bl(bl);
r = _write(c, o, off, len, bl, fadvise_flags);
} break;
case Transaction::OP_ZERO: {
if (op->off + op->len > OBJECT_MAX_SIZE) {
r = -E2BIG;
} else {
_assign_nid(o);
r = _do_zero(c, o, op->off, op->len);
}
} break;
case Transaction::OP_TRIMCACHE: {
// deprecated, no-op
} break;
case Transaction::OP_TRUNCATE: {
_do_truncate(c, o, op->off);
} break;
case Transaction::OP_REMOVE: {
_do_truncate(c, o, 0);
} break;
case Transaction::OP_SETATTR:
case Transaction::OP_SETATTRS:
case Transaction::OP_RMATTR:
case Transaction::OP_RMATTRS:
break;
case Transaction::OP_CLONE: {
ObjectRef &no = ovec[op->dest_oid];
if (!no) {
const ghobject_t &noid = i.get_oid(op->dest_oid);
no = c->get_obj(noid, true);
}
r = _clone(c, o, no);
} break;
case Transaction::OP_CLONERANGE:
ceph_abort_msg("deprecated");
break;
case Transaction::OP_CLONERANGE2: {
ObjectRef &no = ovec[op->dest_oid];
if (!no) {
const ghobject_t &noid = i.get_oid(op->dest_oid);
no = c->get_obj(noid, true);
}
uint64_t srcoff = op->off;
uint64_t len = op->len;
uint64_t dstoff = op->dest_off;
r = _clone_range(c, o, no, srcoff, len, dstoff);
} break;
case Transaction::OP_COLL_ADD:
case Transaction::OP_COLL_REMOVE:
ceph_abort_msg("not implemented");
break;
case Transaction::OP_COLL_MOVE:
ceph_abort_msg("deprecated");
break;
case Transaction::OP_COLL_MOVE_RENAME:
case Transaction::OP_TRY_RENAME: {
ceph_assert(op->cid == op->dest_cid);
const ghobject_t &noid = i.get_oid(op->dest_oid);
ObjectRef &no = ovec[op->dest_oid];
if (!no) {
no = c->get_obj(noid, false);
}
r = _rename(c, o, no, noid);
} break;
case Transaction::OP_OMAP_CLEAR:
case Transaction::OP_OMAP_SETKEYS:
case Transaction::OP_OMAP_RMKEYS:
case Transaction::OP_OMAP_RMKEYRANGE:
case Transaction::OP_OMAP_SETHEADER:
break;
case Transaction::OP_SETALLOCHINT: {
r = _set_alloc_hint(c, o, op->expected_object_size,
op->expected_write_size, op->hint);
} break;
default:
derr << __func__ << " bad op " << op->op << dendl;
ceph_abort();
}
endop:
if (r < 0) {
derr << __func__ << " error " << cpp_strerror(r)
<< " not handled on operation " << op->op << " (op " << pos
<< ", counting from 0)" << dendl;
ceph_abort_msg("unexpected error");
}
}
}
int ObjectStoreImitator::read(CollectionHandle &c_, const ghobject_t &oid,
uint64_t offset, size_t length, bufferlist &bl,
uint32_t op_flags) {
Collection *c = static_cast<Collection *>(c_.get());
if (!c->exists)
return -ENOENT;
bl.clear();
int r;
{
std::shared_lock l(c->lock);
ObjectRef o = c->get_obj(oid, false);
if (!o || !o->exists) {
r = -ENOENT;
goto out;
}
if (offset == length && offset == 0)
length = o->size;
r = _do_read(c, o, offset, length, bl, op_flags);
}
out:
return r;
}
// ------- Helpers -------
void ObjectStoreImitator::_assign_nid(ObjectRef &o) {
if (o->nid) {
ceph_assert(o->exists);
}
o->nid = ++nid_last;
o->exists = true;
}
int ObjectStoreImitator::_do_zero(CollectionRef &c, ObjectRef &o,
uint64_t offset, size_t length) {
PExtentVector old_extents;
o->punch_hole(offset, length, old_extents);
alloc->release(old_extents);
return 0;
}
int ObjectStoreImitator::_do_read(Collection *c, ObjectRef &o, uint64_t offset,
size_t len, ceph::buffer::list &bl,
uint32_t op_flags, uint64_t retry_count) {
auto data = std::string(len, 'a');
bl.append(data);
return bl.length();
}
int ObjectStoreImitator::_do_write(CollectionRef &c, ObjectRef &o,
uint64_t offset, uint64_t length,
bufferlist &bl, uint32_t fadvise_flags) {
ceph_assert(length == bl.length());
int r = 0;
uint64_t end = length + offset;
if (length == 0) {
return 0;
}
PExtentVector punched;
o->punch_hole(offset, length, punched);
alloc->release(punched);
// all writes will trigger an allocation
r = _do_alloc_write(c, o, bl);
if (r < 0) {
derr << __func__ << " _do_alloc_write failed with " << cpp_strerror(r)
<< dendl;
goto out;
}
if (end > o->size) {
o->size = end;
}
r = 0;
out:
return r;
}
int ObjectStoreImitator::_do_clone_range(CollectionRef &c, ObjectRef &oldo,
ObjectRef &newo, uint64_t srcoff,
uint64_t length, uint64_t dstoff) {
if (dstoff + length > newo->size)
newo->size = dstoff + length;
return 0;
}
// ------- Operations -------
int ObjectStoreImitator::_write(CollectionRef &c, ObjectRef &o, uint64_t offset,
size_t length, bufferlist &bl,
uint32_t fadvise_flags) {
int r = 0;
if (offset + length >= OBJECT_MAX_SIZE) {
r = -E2BIG;
} else {
_assign_nid(o);
r = _do_write(c, o, offset, length, bl, fadvise_flags);
}
return r;
}
int ObjectStoreImitator::_do_alloc_write(CollectionRef coll, ObjectRef &o,
bufferlist &bl) {
// No compression for now
uint64_t need = p2roundup(static_cast<uint64_t>(bl.length()), min_alloc_size);
PExtentVector prealloc;
int64_t prealloc_left =
alloc->allocate(need, min_alloc_size, need, 0, &prealloc);
if (prealloc_left < 0 || prealloc_left < (int64_t)need) {
derr << __func__ << " failed to allocate 0x" << std::hex << need
<< " allocated 0x " << (prealloc_left < 0 ? 0 : prealloc_left)
<< " min_alloc_size 0x" << min_alloc_size << " available 0x "
<< alloc->get_free() << std::dec << dendl;
if (prealloc.size())
alloc->release(prealloc);
return -ENOSPC;
}
auto prealloc_pos = prealloc.begin();
ceph_assert(prealloc_pos != prealloc.end());
PExtentVector extents;
int64_t left = need;
while (left > 0) {
ceph_assert(prealloc_left > 0);
if (prealloc_pos->length <= left) {
prealloc_left -= prealloc_pos->length;
left -= prealloc_pos->length;
extents.push_back(*prealloc_pos);
++prealloc_pos;
} else {
extents.emplace_back(prealloc_pos->offset, left);
prealloc_pos->offset += left;
prealloc_pos->length -= left;
prealloc_left -= left;
left = 0;
break;
}
}
o->append(extents);
if (prealloc_left > 0) {
PExtentVector old_extents;
while (prealloc_pos != prealloc.end()) {
old_extents.push_back(*prealloc_pos);
prealloc_left -= prealloc_pos->length;
++prealloc_pos;
}
alloc->release(old_extents);
}
ceph_assert(prealloc_pos == prealloc.end());
ceph_assert(prealloc_left == 0);
return 0;
}
void ObjectStoreImitator::_do_truncate(CollectionRef &c, ObjectRef &o,
uint64_t offset) {
if (offset == o->size)
return;
PExtentVector old_extents;
o->punch_hole(offset, o->size - offset, old_extents);
o->size = offset;
alloc->release(old_extents);
}
int ObjectStoreImitator::_rename(CollectionRef &c, ObjectRef &oldo,
ObjectRef &newo, const ghobject_t &new_oid) {
int r;
ghobject_t old_oid = oldo->oid;
if (newo) {
if (newo->exists) {
r = -EEXIST;
goto out;
}
}
newo = oldo;
c->rename_obj(oldo, old_oid, new_oid);
r = 0;
out:
return r;
}
int ObjectStoreImitator::_set_alloc_hint(CollectionRef &c, ObjectRef &o,
uint64_t expected_object_size,
uint64_t expected_write_size,
uint32_t flags) {
o->expected_object_size = expected_object_size;
o->expected_write_size = expected_write_size;
o->alloc_hint_flags = flags;
return 0;
}
int ObjectStoreImitator::_clone(CollectionRef &c, ObjectRef &oldo,
ObjectRef &newo) {
int r = 0;
if (oldo->oid.hobj.get_hash() != newo->oid.hobj.get_hash()) {
return -EINVAL;
}
_assign_nid(newo);
_do_truncate(c, newo, 0);
if (cct->_conf->bluestore_clone_cow) {
_do_clone_range(c, oldo, newo, 0, oldo->size, 0);
} else {
bufferlist bl;
r = _do_read(c.get(), oldo, 0, oldo->size, bl, 0);
if (r < 0)
goto out;
r = _do_write(c, newo, 0, oldo->size, bl, 0);
if (r < 0)
goto out;
}
r = 0;
out:
return r;
}
int ObjectStoreImitator::_clone_range(CollectionRef &c, ObjectRef &oldo,
ObjectRef &newo, uint64_t srcoff,
uint64_t length, uint64_t dstoff) {
int r = 0;
if (srcoff + length >= OBJECT_MAX_SIZE ||
dstoff + length >= OBJECT_MAX_SIZE) {
r = -E2BIG;
goto out;
}
if (srcoff + length > oldo->size) {
r = -EINVAL;
goto out;
}
_assign_nid(newo);
if (length > 0) {
if (cct->_conf->bluestore_clone_cow) {
_do_zero(c, newo, dstoff, length);
_do_clone_range(c, oldo, newo, srcoff, length, dstoff);
} else {
bufferlist bl;
r = _do_read(c.get(), oldo, srcoff, length, bl, 0);
if (r < 0)
goto out;
r = _do_write(c, newo, dstoff, bl.length(), bl, 0);
if (r < 0)
goto out;
}
}
r = 0;
out:
return r;
}
// ------- Collections -------
int ObjectStoreImitator::_merge_collection(CollectionRef *c, CollectionRef &d,
unsigned bits) {
std::unique_lock l((*c)->lock);
std::unique_lock l2(d->lock);
coll_t cid = (*c)->cid;
spg_t pgid, dest_pgid;
bool is_pg = cid.is_pg(&pgid);
ceph_assert(is_pg);
is_pg = d->cid.is_pg(&dest_pgid);
ceph_assert(is_pg);
// adjust bits. note that this will be redundant for all but the first
// merge call for the parent/target.
d->cnode.bits = bits;
// remove source collection
{
std::unique_lock l3(coll_lock);
_do_remove_collection(c);
}
return 0;
}
int ObjectStoreImitator::_split_collection(CollectionRef &c, CollectionRef &d,
unsigned bits, int rem) {
std::unique_lock l(c->lock);
std::unique_lock l2(d->lock);
// move any cached items (onodes and referenced shared blobs) that will
// belong to the child collection post-split. leave everything else behind.
// this may include things that don't strictly belong to the now-smaller
// parent split, but the OSD will always send us a split for every new
// child.
spg_t pgid, dest_pgid;
bool is_pg = c->cid.is_pg(&pgid);
ceph_assert(is_pg);
is_pg = d->cid.is_pg(&dest_pgid);
ceph_assert(is_pg);
ceph_assert(d->cnode.bits == bits);
// adjust bits. note that this will be redundant for all but the first
// split call for this parent (first child).
c->cnode.bits = bits;
ceph_assert(d->cnode.bits == bits);
return 0;
};
ObjectStore::CollectionHandle
ObjectStoreImitator::open_collection(const coll_t &cid) {
std::shared_lock l(coll_lock);
ceph::unordered_map<coll_t, CollectionRef>::iterator cp = coll_map.find(cid);
if (cp == coll_map.end())
return CollectionRef();
return cp->second;
}
ObjectStore::CollectionHandle
ObjectStoreImitator::create_new_collection(const coll_t &cid) {
std::unique_lock l{coll_lock};
auto c = ceph::make_ref<Collection>(this, cid);
new_coll_map[cid] = c;
return c;
}
void ObjectStoreImitator::set_collection_commit_queue(
const coll_t &cid, ContextQueue *commit_queue) {
if (commit_queue) {
std::shared_lock l(coll_lock);
if (coll_map.count(cid)) {
coll_map[cid]->commit_queue = commit_queue;
} else if (new_coll_map.count(cid)) {
new_coll_map[cid]->commit_queue = commit_queue;
}
}
}
bool ObjectStoreImitator::exists(CollectionHandle &c_, const ghobject_t &oid) {
Collection *c = static_cast<Collection *>(c_.get());
if (!c->exists)
return false;
bool r = true;
{
std::shared_lock l(c->lock);
ObjectRef o = c->get_obj(oid, false);
if (!o || !o->exists)
r = false;
}
return r;
}
int ObjectStoreImitator::set_collection_opts(CollectionHandle &ch,
const pool_opts_t &opts) {
Collection *c = static_cast<Collection *>(ch.get());
if (!c->exists)
return -ENOENT;
std::unique_lock l{c->lock};
c->pool_opts = opts;
return 0;
}
int ObjectStoreImitator::list_collections(std::vector<coll_t> &ls) {
std::shared_lock l(coll_lock);
ls.reserve(coll_map.size());
for (ceph::unordered_map<coll_t, CollectionRef>::iterator p =
coll_map.begin();
p != coll_map.end(); ++p)
ls.push_back(p->first);
return 0;
}
bool ObjectStoreImitator::collection_exists(const coll_t &c) {
std::shared_lock l(coll_lock);
return coll_map.count(c);
}
int ObjectStoreImitator::collection_empty(CollectionHandle &ch, bool *empty) {
std::vector<ghobject_t> ls;
ghobject_t next;
int r =
collection_list(ch, ghobject_t(), ghobject_t::get_max(), 1, &ls, &next);
if (r < 0) {
derr << __func__ << " collection_list returned: " << cpp_strerror(r)
<< dendl;
return r;
}
*empty = ls.empty();
return 0;
}
int ObjectStoreImitator::collection_bits(CollectionHandle &ch) {
Collection *c = static_cast<Collection *>(ch.get());
std::shared_lock l(c->lock);
return c->cnode.bits;
}
int ObjectStoreImitator::collection_list(CollectionHandle &c_,
const ghobject_t &start,
const ghobject_t &end, int max,
std::vector<ghobject_t> *ls,
ghobject_t *pnext) {
Collection *c = static_cast<Collection *>(c_.get());
c->flush();
int r;
{
std::shared_lock l(c->lock);
r = _collection_list(c, start, end, max, false, ls, pnext);
}
return r;
}
int ObjectStoreImitator::_collection_list(
Collection *c, const ghobject_t &start, const ghobject_t &end, int max,
bool legacy, std::vector<ghobject_t> *ls, ghobject_t *next) {
if (!c->exists)
return -ENOENT;
if (start.is_max() || start.hobj.is_max()) {
*next = ghobject_t::get_max();
return 0;
}
auto it = c->objects.find(start);
if (it == c->objects.end()) {
*next = ghobject_t::get_max();
return -ENOENT;
}
do {
ls->push_back((it++)->first);
if (ls->size() >= (unsigned)max) {
*next = it->first;
return 0;
}
} while (it != c->objects.end() && it->first != end);
if (it != c->objects.end())
*next = it->first;
else
*next = ghobject_t::get_max();
return 0;
}
int ObjectStoreImitator::_remove_collection(const coll_t &cid,
CollectionRef *c) {
int r;
{
std::unique_lock l(coll_lock);
if (!*c) {
r = -ENOENT;
goto out;
}
ceph_assert((*c)->exists);
for (auto o : (*c)->objects) {
if (o.second->exists) {
r = -ENOTEMPTY;
goto out;
}
}
_do_remove_collection(c);
r = 0;
}
out:
return r;
}
void ObjectStoreImitator::_do_remove_collection(CollectionRef *c) {
coll_map.erase((*c)->cid);
(*c)->exists = false;
c->reset();
}
int ObjectStoreImitator::_create_collection(const coll_t &cid, unsigned bits,
CollectionRef *c) {
int r;
bufferlist bl;
{
std::unique_lock l(coll_lock);
if (*c) {
r = -EEXIST;
goto out;
}
auto p = new_coll_map.find(cid);
ceph_assert(p != new_coll_map.end());
*c = p->second;
(*c)->cnode.bits = bits;
coll_map[cid] = *c;
new_coll_map.erase(p);
}
encode((*c)->cnode, bl);
r = 0;
out:
return r;
}
| 21,195 | 24.785888 | 80 |
cc
|
null |
ceph-main/src/test/objectstore/ObjectStoreImitator.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Fragmentation Simulator
* Author: Tri Dao, [email protected]
*/
#pragma once
#include "include/common_fwd.h"
#include "os/ObjectStore.h"
#include "os/bluestore/Allocator.h"
#include "os/bluestore/bluestore_types.h"
#include <algorithm>
#include <boost/smart_ptr/intrusive_ptr.hpp>
/**
* ObjectStoreImitator will simulate how BlueStore does IO (as of the time
* the simulator is written) and assess the defragmentation levels of different
* allocators. As the main concern of the simulator is allocators, it mainly
* focuses on operations that triggers IOs and tries to simplify the rest as
* much as possible(caches, memory buffers).
*
* The simulator inherited from ObjectStore and tries to simulate BlueStore as
* much as possible.
*
* #Note: This is an allocation simulator not a data consistency simulator so
* any object data is not stored.
*/
class ObjectStoreImitator : public ObjectStore {
private:
class Collection;
typedef boost::intrusive_ptr<Collection> CollectionRef;
struct Object : public RefCountedObject {
Collection *c;
ghobject_t oid;
bool exists;
uint64_t nid;
uint64_t size;
uint32_t alloc_hint_flags = 0;
uint32_t expected_object_size = 0;
uint32_t expected_write_size = 0;
// We assume these extents are sorted according by "logical" order.
PExtentVector extents;
Object(Collection *c_, const ghobject_t &oid_, bool exists_ = false,
uint64_t nid_ = 0, uint64_t size_ = 0)
: c(c_), oid(oid_), exists(exists_), nid(nid_), size(size_) {}
void punch_hole(uint64_t offset, uint64_t length,
PExtentVector &old_extents) {
if (offset >= size || length == 0)
return;
if (offset + length >= size) {
length = size - offset;
}
uint64_t l_offset{0}, punched_length{0};
PExtentVector to_be_punched, remains;
for (auto e : extents) {
if (l_offset > offset && l_offset - length >= offset)
break;
// Found where we need to punch
if (l_offset >= offset) {
// We only punched a portion of the extent
if (e.length + punched_length > length) {
uint64_t left = e.length + punched_length - length;
e.length = length - punched_length;
remains.emplace_back(e.offset + e.length, left);
}
to_be_punched.push_back(e);
punched_length += e.length;
} else { // else the extent will remain
remains.push_back(e);
}
l_offset += e.length;
}
size -= punched_length;
extents = remains;
old_extents = to_be_punched;
}
void append(PExtentVector &ext) {
for (auto &e : ext) {
extents.push_back(e);
size += e.length;
}
std::sort(extents.begin(), extents.end(),
[](bluestore_pextent_t &a, bluestore_pextent_t &b) {
return a.offset < b.offset;
});
}
void verify_extents() {
uint64_t total{0};
for (auto &e : extents) {
ceph_assert(total <= e.offset);
ceph_assert(e.length > 0);
total += e.length;
}
ceph_assert(total == size);
}
};
typedef boost::intrusive_ptr<Object> ObjectRef;
struct Collection : public CollectionImpl {
bluestore_cnode_t cnode;
std::map<ghobject_t, ObjectRef> objects;
ceph::shared_mutex lock = ceph::make_shared_mutex(
"FragmentationSimulator::Collection::lock", true, false);
// Lock for 'objects'
ceph::recursive_mutex obj_lock = ceph::make_recursive_mutex(
"FragmentationSimulator::Collection::obj_lock");
bool exists;
// pool options
pool_opts_t pool_opts;
ContextQueue *commit_queue;
bool contains(const ghobject_t &oid) {
if (cid.is_meta())
return oid.hobj.pool == -1;
spg_t spgid;
if (cid.is_pg(&spgid))
return spgid.pgid.contains(cnode.bits, oid) &&
oid.shard_id == spgid.shard;
return false;
}
int64_t pool() const { return cid.pool(); }
ObjectRef get_obj(const ghobject_t &oid, bool create) {
ceph_assert(create ? ceph_mutex_is_wlocked(lock)
: ceph_mutex_is_locked(lock));
spg_t pgid;
if (cid.is_pg(&pgid) && !oid.match(cnode.bits, pgid.ps())) {
ceph_abort();
}
auto o = objects.find(oid);
if (o != objects.end())
return o->second;
if (!create)
return nullptr;
return objects[oid] = new Object(this, oid);
}
bool flush_commit(Context *c) override { return false; }
void flush() override {}
void rename_obj(ObjectRef &oldo, const ghobject_t &old_oid,
const ghobject_t &new_oid) {
std::lock_guard l(obj_lock);
auto po = objects.find(old_oid);
auto pn = objects.find(new_oid);
ceph_assert(po != pn);
ceph_assert(po != objects.end());
if (pn != objects.end()) {
objects.erase(pn);
}
ObjectRef o = po->second;
oldo.reset(new Object(o->c, old_oid));
po->second = oldo;
objects.insert(std::make_pair(new_oid, o));
o->oid = new_oid;
}
void verify_objects() {
for (auto &[_, obj] : objects) {
obj->verify_extents();
}
}
Collection(ObjectStoreImitator *sim_, coll_t cid_)
: CollectionImpl(sim_->cct, cid_), exists(true), commit_queue(nullptr) {
}
};
CollectionRef _get_collection(const coll_t &cid);
int _split_collection(CollectionRef &c, CollectionRef &d, unsigned bits,
int rem);
int _merge_collection(CollectionRef *c, CollectionRef &d, unsigned bits);
int _collection_list(Collection *c, const ghobject_t &start,
const ghobject_t &end, int max, bool legacy,
std::vector<ghobject_t> *ls, ghobject_t *next);
int _remove_collection(const coll_t &cid, CollectionRef *c);
void _do_remove_collection(CollectionRef *c);
int _create_collection(const coll_t &cid, unsigned bits, CollectionRef *c);
// Transactions
void _add_transaction(Transaction *t);
// Object ops
int _write(CollectionRef &c, ObjectRef &o, uint64_t offset, size_t length,
bufferlist &bl, uint32_t fadvise_flags);
int _set_alloc_hint(CollectionRef &c, ObjectRef &o,
uint64_t expected_object_size,
uint64_t expected_write_size, uint32_t flags);
int _rename(CollectionRef &c, ObjectRef &oldo, ObjectRef &newo,
const ghobject_t &new_oid);
int _clone(CollectionRef &c, ObjectRef &oldo, ObjectRef &newo);
int _clone_range(CollectionRef &c, ObjectRef &oldo, ObjectRef &newo,
uint64_t srcoff, uint64_t length, uint64_t dstoff);
int read(CollectionHandle &c, const ghobject_t &oid, uint64_t offset,
size_t len, ceph::buffer::list &bl, uint32_t op_flags = 0) override;
// Helpers
void _assign_nid(ObjectRef &o);
int _do_write(CollectionRef &c, ObjectRef &o, uint64_t offset,
uint64_t length, ceph::buffer::list &bl,
uint32_t fadvise_flags);
int _do_alloc_write(CollectionRef c, ObjectRef &o, bufferlist &bl);
void _do_truncate(CollectionRef &c, ObjectRef &o, uint64_t offset);
int _do_zero(CollectionRef &c, ObjectRef &o, uint64_t offset, size_t length);
int _do_clone_range(CollectionRef &c, ObjectRef &oldo, ObjectRef &newo,
uint64_t srcoff, uint64_t length, uint64_t dstoff);
int _do_read(Collection *c, ObjectRef &o, uint64_t offset, size_t len,
ceph::buffer::list &bl, uint32_t op_flags = 0,
uint64_t retry_count = 0);
// Members
boost::scoped_ptr<Allocator> alloc;
std::atomic<uint64_t> nid_last = {0};
uint64_t min_alloc_size; ///< minimum allocation unit (power of 2)
static_assert(std::numeric_limits<uint8_t>::max() >
std::numeric_limits<decltype(min_alloc_size)>::digits,
"not enough bits for min_alloc_size");
///< rwlock to protect coll_map/new_coll_map
ceph::shared_mutex coll_lock =
ceph::make_shared_mutex("FragmentationSimulator::coll_lock");
std::unordered_map<coll_t, CollectionRef> coll_map;
std::unordered_map<coll_t, CollectionRef>
new_coll_map; // store collections that is opened via open_new_collection
// but a create txn has not executed
public:
ObjectStoreImitator(CephContext *cct, const std::string &path_,
uint64_t min_alloc_size_)
: ObjectStore(cct, path_), alloc(nullptr),
min_alloc_size(min_alloc_size_) {}
~ObjectStoreImitator() = default;
void init_alloc(const std::string &alloc_type, uint64_t size);
void print_status();
void verify_objects(CollectionHandle &ch);
// Overrides
// This is often not called directly but through queue_transaction
int queue_transactions(CollectionHandle &ch, std::vector<Transaction> &tls,
TrackedOpRef op = TrackedOpRef(),
ThreadPool::TPHandle *handle = NULL) override;
CollectionHandle open_collection(const coll_t &cid) override;
CollectionHandle create_new_collection(const coll_t &cid) override;
void set_collection_commit_queue(const coll_t &cid,
ContextQueue *commit_queue) override;
bool exists(CollectionHandle &c, const ghobject_t &old) override;
int set_collection_opts(CollectionHandle &c,
const pool_opts_t &opts) override;
int list_collections(std::vector<coll_t> &ls) override;
bool collection_exists(const coll_t &c) override;
int collection_empty(CollectionHandle &c, bool *empty) override;
int collection_bits(CollectionHandle &c) override;
int collection_list(CollectionHandle &c, const ghobject_t &start,
const ghobject_t &end, int max,
std::vector<ghobject_t> *ls, ghobject_t *next) override;
// Not used but implemented so it compiles
std::string get_type() override { return "ObjectStoreImitator"; }
bool test_mount_in_use() override { return false; }
int mount() override { return 0; }
int umount() override { return 0; }
int validate_hobject_key(const hobject_t &obj) const override { return 0; }
unsigned get_max_attr_name_length() override { return 256; }
int mkfs() override { return 0; }
int mkjournal() override { return 0; }
bool needs_journal() override { return false; }
bool wants_journal() override { return false; }
bool allows_journal() override { return false; }
int statfs(struct store_statfs_t *buf,
osd_alert_list_t *alerts = nullptr) override {
return 0;
}
int pool_statfs(uint64_t pool_id, struct store_statfs_t *buf,
bool *per_pool_omap) override {
return 0;
}
int stat(CollectionHandle &c, const ghobject_t &oid, struct stat *st,
bool allow_eio = false) override {
return 0;
}
int fiemap(CollectionHandle &c, const ghobject_t &oid, uint64_t offset,
size_t len, ceph::buffer::list &bl) override {
return 0;
}
int fiemap(CollectionHandle &c, const ghobject_t &oid, uint64_t offset,
size_t len, std::map<uint64_t, uint64_t> &destmap) override {
return 0;
}
int getattr(CollectionHandle &c, const ghobject_t &oid, const char *name,
ceph::buffer::ptr &value) override {
return 0;
}
int getattrs(
CollectionHandle &c, const ghobject_t &oid,
std::map<std::string, ceph::buffer::ptr, std::less<>> &aset) override {
return 0;
}
int omap_get(CollectionHandle &c, ///< [in] Collection containing oid
const ghobject_t &oid, ///< [in] Object containing omap
ceph::buffer::list *header, ///< [out] omap header
std::map<std::string, ceph::buffer::list>
*out /// < [out] Key to value std::map
) override {
return 0;
}
int omap_get_header(CollectionHandle &c, ///< [in] Collection containing oid
const ghobject_t &oid, ///< [in] Object containing omap
ceph::buffer::list *header, ///< [out] omap header
bool allow_eio = false ///< [in] don't assert on eio
) override {
return 0;
}
int omap_get_keys(CollectionHandle &c, ///< [in] Collection containing oid
const ghobject_t &oid, ///< [in] Object containing omap
std::set<std::string> *keys ///< [out] Keys defined on oid
) override {
return 0;
}
int omap_get_values(CollectionHandle &c, ///< [in] Collection containing oid
const ghobject_t &oid, ///< [in] Object containing omap
const std::set<std::string> &keys, ///< [in] Keys to get
std::map<std::string, ceph::buffer::list>
*out ///< [out] Returned keys and values
) override {
return 0;
}
int omap_check_keys(
CollectionHandle &c, ///< [in] Collection containing oid
const ghobject_t &oid, ///< [in] Object containing omap
const std::set<std::string> &keys, ///< [in] Keys to check
std::set<std::string> *out ///< [out] Subset of keys defined on oid
) override {
return 0;
}
ObjectMap::ObjectMapIterator
get_omap_iterator(CollectionHandle &c, ///< [in] collection
const ghobject_t &oid ///< [in] object
) override {
return {};
}
void set_fsid(uuid_d u) override {}
uuid_d get_fsid() override { return {}; }
uint64_t estimate_objects_overhead(uint64_t num_objects) override {
return num_objects * 300;
}
objectstore_perf_stat_t get_cur_stats() override { return {}; }
const PerfCounters *get_perf_counters() const override { return nullptr; };
};
| 14,067 | 35.54026 | 80 |
h
|
null |
ceph-main/src/test/objectstore/ObjectStoreTransactionBenchmark.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2014 UnitedStack <[email protected]>
*
* Author: Haomai Wang <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include <stdlib.h>
#include <stdint.h>
#include <string>
#include <iostream>
using namespace std;
#include "common/ceph_argparse.h"
#include "common/debug.h"
#include "common/Cycles.h"
#include "global/global_init.h"
#include "os/ObjectStore.h"
class Transaction {
private:
ObjectStore::Transaction t;
public:
struct Tick {
uint64_t ticks;
uint64_t count;
Tick(): ticks(0), count(0) {}
void add(uint64_t a) {
ticks += a;
count++;
}
};
static Tick write_ticks, setattr_ticks, omap_setkeys_ticks, omap_rmkey_ticks;
static Tick encode_ticks, decode_ticks, iterate_ticks;
void write(coll_t cid, const ghobject_t& oid, uint64_t off, uint64_t len,
const bufferlist& data) {
uint64_t start_time = Cycles::rdtsc();
t.write(cid, oid, off, len, data);
write_ticks.add(Cycles::rdtsc() - start_time);
}
void setattr(coll_t cid, const ghobject_t& oid, const string &name,
bufferlist& val) {
uint64_t start_time = Cycles::rdtsc();
t.setattr(cid, oid, name, val);
setattr_ticks.add(Cycles::rdtsc() - start_time);
}
void omap_setkeys(coll_t cid, const ghobject_t &oid,
const map<string, bufferlist> &attrset) {
uint64_t start_time = Cycles::rdtsc();
t.omap_setkeys(cid, oid, attrset);
omap_setkeys_ticks.add(Cycles::rdtsc() - start_time);
}
void omap_rmkey(coll_t cid, const ghobject_t &oid,
const string &key) {
uint64_t start_time = Cycles::rdtsc();
t.omap_rmkey(cid, oid, key);
omap_rmkey_ticks.add(Cycles::rdtsc() - start_time);
}
void apply_encode_decode() {
bufferlist bl;
ObjectStore::Transaction d;
uint64_t start_time = Cycles::rdtsc();
t.encode(bl);
encode_ticks.add(Cycles::rdtsc() - start_time);
auto bliter = bl.cbegin();
start_time = Cycles::rdtsc();
d.decode(bliter);
decode_ticks.add(Cycles::rdtsc() - start_time);
}
void apply_iterate() {
uint64_t start_time = Cycles::rdtsc();
ObjectStore::Transaction::iterator i = t.begin();
while (i.have_op()) {
ObjectStore::Transaction::Op *op = i.decode_op();
switch (op->op) {
case ObjectStore::Transaction::OP_WRITE:
{
ghobject_t oid = i.get_oid(op->oid);
bufferlist bl;
i.decode_bl(bl);
}
break;
case ObjectStore::Transaction::OP_SETATTR:
{
ghobject_t oid = i.get_oid(op->oid);
string name = i.decode_string();
bufferlist bl;
i.decode_bl(bl);
map<string, bufferptr> to_set;
to_set[name] = bufferptr(bl.c_str(), bl.length());
}
break;
case ObjectStore::Transaction::OP_OMAP_SETKEYS:
{
ghobject_t oid = i.get_oid(op->oid);
map<string, bufferptr> aset;
i.decode_attrset(aset);
}
break;
case ObjectStore::Transaction::OP_OMAP_RMKEYS:
{
ghobject_t oid = i.get_oid(op->oid);
set<string> keys;
i.decode_keyset(keys);
}
break;
}
}
iterate_ticks.add(Cycles::rdtsc() - start_time);
}
static void dump_stat() {
cerr << " write op: " << Cycles::to_microseconds(write_ticks.ticks) << "us count: " << write_ticks.count << std::endl;
cerr << " setattr op: " << Cycles::to_microseconds(setattr_ticks.ticks) << "us count: " << setattr_ticks.count << std::endl;
cerr << " omap_setkeys op: " << Cycles::to_microseconds(Transaction::omap_setkeys_ticks.ticks) << "us count: " << Transaction::omap_setkeys_ticks.count << std::endl;
cerr << " omap_rmkey op: " << Cycles::to_microseconds(Transaction::omap_rmkey_ticks.ticks) << "us count: " << Transaction::omap_rmkey_ticks.count << std::endl;
cerr << " encode op: " << Cycles::to_microseconds(Transaction::encode_ticks.ticks) << "us count: " << Transaction::encode_ticks.count << std::endl;
cerr << " decode op: " << Cycles::to_microseconds(Transaction::decode_ticks.ticks) << "us count: " << Transaction::decode_ticks.count << std::endl;
cerr << " iterate op: " << Cycles::to_microseconds(Transaction::iterate_ticks.ticks) << "us count: " << Transaction::iterate_ticks.count << std::endl;
}
};
class PerfCase {
static const uint64_t Kib = 1024;
static const uint64_t Mib = 1024 * 1024;
static const string info_epoch_attr;
static const string info_info_attr;
static const string attr;
static const string snapset_attr;
static const string pglog_attr;
static const coll_t meta_cid;
static const coll_t cid;
static const ghobject_t pglog_oid;
static const ghobject_t info_oid;
map<string, bufferlist> data;
ghobject_t create_object() {
bufferlist bl = generate_random(100, 1);
return ghobject_t(hobject_t(string("obj_")+string(bl.c_str()), string(), rand() & 2 ? CEPH_NOSNAP : rand(), rand() & 0xFF, 0, ""));
}
bufferlist generate_random(uint64_t len, int frag) {
static const char alphanum[] = "0123456789"
"ABCDEFGHIJKLMNOPQRSTUVWXYZ"
"abcdefghijklmnopqrstuvwxyz";
uint64_t per_frag = len / frag;
bufferlist bl;
for (int i = 0; i < frag; i++ ) {
bufferptr bp(per_frag);
for (unsigned int j = 0; j < len; j++) {
bp[j] = alphanum[rand() % (sizeof(alphanum) - 1)];
}
bl.append(bp);
}
return bl;
}
public:
PerfCase() {
uint64_t four_kb = Kib * 4;
uint64_t one_mb = Mib * 1;
uint64_t four_mb = Mib * 4;
data["4k"] = generate_random(four_kb, 1);
data["1m"] = generate_random(one_mb, 1);
data["4m"] = generate_random(four_mb, 1);
data[attr] = generate_random(256, 1);
data[snapset_attr] = generate_random(32, 1);
data[pglog_attr] = generate_random(128, 1);
data[info_epoch_attr] = generate_random(4, 1);
data[info_info_attr] = generate_random(560, 1);
}
uint64_t rados_write_4k(int times) {
uint64_t ticks = 0;
uint64_t len = Kib *4;
for (int i = 0; i < times; i++) {
uint64_t start_time = 0;
{
Transaction t;
ghobject_t oid = create_object();
start_time = Cycles::rdtsc();
t.write(cid, oid, 0, len, data["4k"]);
t.setattr(cid, oid, attr, data[attr]);
t.setattr(cid, oid, snapset_attr, data[snapset_attr]);
t.apply_encode_decode();
t.apply_iterate();
ticks += Cycles::rdtsc() - start_time;
}
{
Transaction t;
map<string, bufferlist> pglog_attrset;
map<string, bufferlist> info_attrset;
pglog_attrset[pglog_attr] = data[pglog_attr];
info_attrset[info_epoch_attr] = data[info_epoch_attr];
info_attrset[info_info_attr] = data[info_info_attr];
start_time = Cycles::rdtsc();
t.omap_setkeys(meta_cid, pglog_oid, pglog_attrset);
t.omap_setkeys(meta_cid, info_oid, info_attrset);
t.omap_rmkey(meta_cid, pglog_oid, pglog_attr);
t.apply_encode_decode();
t.apply_iterate();
ticks += Cycles::rdtsc() - start_time;
}
}
return ticks;
}
};
const string PerfCase::info_epoch_attr("11.40_epoch");
const string PerfCase::info_info_attr("11.40_info");
const string PerfCase::attr("_");
const string PerfCase::snapset_attr("snapset");
const string PerfCase::pglog_attr("pglog_attr");
const coll_t PerfCase::meta_cid;
const coll_t PerfCase::cid;
const ghobject_t PerfCase::pglog_oid(hobject_t(sobject_t(object_t("cid_pglog"), 0)));
const ghobject_t PerfCase::info_oid(hobject_t(sobject_t(object_t("infos"), 0)));
Transaction::Tick Transaction::write_ticks, Transaction::setattr_ticks, Transaction::omap_setkeys_ticks, Transaction::omap_rmkey_ticks;
Transaction::Tick Transaction::encode_ticks, Transaction::decode_ticks, Transaction::iterate_ticks;
void usage(const string &name) {
cerr << "Usage: " << name << " [times] "
<< std::endl;
}
int main(int argc, char **argv)
{
auto args = argv_to_vec(argc, argv);
auto cct = global_init(NULL, args, CEPH_ENTITY_TYPE_CLIENT,
CODE_ENVIRONMENT_UTILITY,
CINIT_FLAG_NO_DEFAULT_CONFIG_FILE);
common_init_finish(g_ceph_context);
g_ceph_context->_conf.apply_changes(nullptr);
Cycles::init();
cerr << "args: " << args << std::endl;
if (args.size() < 1) {
usage(argv[0]);
return 1;
}
uint64_t times = atoi(args[0]);
PerfCase c;
uint64_t ticks = c.rados_write_4k(times);
Transaction::dump_stat();
cerr << " Total rados op " << times << " run time " << Cycles::to_microseconds(ticks) << "us." << std::endl;
return 0;
}
| 9,053 | 32.910112 | 169 |
cc
|
null |
ceph-main/src/test/objectstore/TestObjectStoreState.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2012 New Dream Network
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*/
#include <stdio.h>
#include <string.h>
#include <iostream>
#include <time.h>
#include <stdlib.h>
#include <signal.h>
#include "os/ObjectStore.h"
#include "common/ceph_argparse.h"
#include "global/global_init.h"
#include "common/debug.h"
#include <boost/scoped_ptr.hpp>
#include <boost/lexical_cast.hpp>
#include "TestObjectStoreState.h"
#include "include/ceph_assert.h"
#define dout_context g_ceph_context
#define dout_subsys ceph_subsys_filestore
#undef dout_prefix
#define dout_prefix *_dout << "ceph_test_objectstore_state "
using namespace std;
void TestObjectStoreState::init(int colls, int objs)
{
dout(5) << "init " << colls << " colls " << objs << " objs" << dendl;
ObjectStore::Transaction t;
auto meta_ch = m_store->create_new_collection(coll_t::meta());
t.create_collection(coll_t::meta(), 0);
m_store->queue_transaction(meta_ch, std::move(t));
wait_for_ready();
int baseid = 0;
for (int i = 0; i < colls; i++) {
spg_t pgid(pg_t(i, 1), shard_id_t::NO_SHARD);
coll_t cid(pgid);
auto ch = m_store->create_new_collection(cid);
coll_entry_t *entry = coll_create(pgid, ch);
dout(5) << "init create collection " << entry->m_cid
<< " meta " << entry->m_meta_obj << dendl;
ObjectStore::Transaction *t = new ObjectStore::Transaction;
t->create_collection(entry->m_cid, 32);
bufferlist hint;
uint32_t pg_num = colls;
uint64_t num_objs = uint64_t(objs / colls);
encode(pg_num, hint);
encode(num_objs, hint);
t->collection_hint(entry->m_cid, ObjectStore::Transaction::COLL_HINT_EXPECTED_NUM_OBJECTS, hint);
dout(5) << "give collection hint, number of objects per collection: " << num_objs << dendl;
t->touch(cid, entry->m_meta_obj);
for (int i = 0; i < objs; i++) {
hobject_t *obj = entry->touch_obj(i + baseid);
t->touch(entry->m_cid, ghobject_t(*obj));
ceph_assert(i + baseid == m_num_objects);
m_num_objects++;
}
baseid += objs;
t->register_on_commit(new C_OnFinished(this));
m_store->queue_transaction(entry->m_ch, std::move(*t), nullptr);
delete t;
inc_in_flight();
m_collections.insert(make_pair(cid, entry));
rebuild_id_vec();
m_next_coll_nr++;
}
dout(5) << "init has " << m_in_flight.load() << "in-flight transactions" << dendl;
wait_for_done();
dout(5) << "init finished" << dendl;
}
TestObjectStoreState::coll_entry_t *TestObjectStoreState::coll_create(
spg_t pgid, ObjectStore::CollectionHandle ch)
{
char meta_buf[100];
memset(meta_buf, 0, 100);
snprintf(meta_buf, 100, "pglog_0_head");
return (new coll_entry_t(pgid, ch, meta_buf));
}
TestObjectStoreState::coll_entry_t*
TestObjectStoreState::get_coll(coll_t cid, bool erase)
{
dout(5) << "get_coll id " << cid << dendl;
coll_entry_t *entry = NULL;
auto it = m_collections.find(cid);
if (it != m_collections.end()) {
entry = it->second;
if (erase) {
m_collections.erase(it);
rebuild_id_vec();
}
}
dout(5) << "get_coll id " << cid;
if (!entry)
*_dout << " non-existent";
else
*_dout << " name " << entry->m_cid;
*_dout << dendl;
return entry;
}
TestObjectStoreState::coll_entry_t*
TestObjectStoreState::get_coll_at(int pos, bool erase)
{
dout(5) << "get_coll_at pos " << pos << dendl;
if (m_collections.empty())
return NULL;
ceph_assert((size_t) pos < m_collections_ids.size());
coll_t cid = m_collections_ids[pos];
coll_entry_t *entry = m_collections[cid];
if (entry == NULL) {
dout(5) << "get_coll_at pos " << pos << " non-existent" << dendl;
return NULL;
}
if (erase) {
m_collections.erase(cid);
rebuild_id_vec();
}
dout(5) << "get_coll_at pos " << pos << ": "
<< entry->m_cid << "(removed: " << erase << ")" << dendl;
return entry;
}
TestObjectStoreState::coll_entry_t::~coll_entry_t()
{
if (m_objects.size() > 0) {
map<int, hobject_t*>::iterator it = m_objects.begin();
for (; it != m_objects.end(); ++it) {
hobject_t *obj = it->second;
if (obj) {
delete obj;
}
}
m_objects.clear();
}
}
bool TestObjectStoreState::coll_entry_t::check_for_obj(int id)
{
if (m_objects.count(id))
return true;
return false;
}
hobject_t *TestObjectStoreState::coll_entry_t::touch_obj(int id)
{
map<int, hobject_t*>::iterator it = m_objects.find(id);
if (it != m_objects.end()) {
dout(5) << "touch_obj coll id " << m_cid
<< " name " << it->second->oid.name << dendl;
return it->second;
}
char buf[100];
memset(buf, 0, 100);
snprintf(buf, 100, "obj%d", id);
hobject_t *obj = new hobject_t(sobject_t(object_t(buf), CEPH_NOSNAP));
obj->set_hash(m_pgid.ps());
obj->pool = m_pgid.pool();
m_objects.insert(make_pair(id, obj));
dout(5) << "touch_obj coll id " << m_cid << " name " << buf << dendl;
return obj;
}
hobject_t *TestObjectStoreState::coll_entry_t::get_obj(int id)
{
return get_obj(id, false);
}
/**
* remove_obj - Removes object without freeing it.
* @param id Object's id in the map.
* @return The object or NULL in case of error.
*/
hobject_t *TestObjectStoreState::coll_entry_t::remove_obj(int id)
{
return get_obj(id, true);
}
hobject_t *TestObjectStoreState::coll_entry_t::get_obj(int id, bool remove)
{
map<int, hobject_t*>::iterator it = m_objects.find(id);
if (it == m_objects.end()) {
dout(5) << "get_obj coll " << m_cid
<< " obj #" << id << " non-existent" << dendl;
return NULL;
}
hobject_t *obj = it->second;
if (remove)
m_objects.erase(it);
dout(5) << "get_obj coll " << m_cid << " id " << id
<< ": " << obj->oid.name << "(removed: " << remove << ")" << dendl;
return obj;
}
hobject_t *TestObjectStoreState::coll_entry_t::get_obj_at(int pos, int *key)
{
return get_obj_at(pos, false, key);
}
/**
* remove_obj_at - Removes object without freeing it.
* @param pos The map's position in which the object lies.
* @return The object or NULL in case of error.
*/
hobject_t *TestObjectStoreState::coll_entry_t::remove_obj_at(int pos, int *key)
{
return get_obj_at(pos, true, key);
}
hobject_t *TestObjectStoreState::coll_entry_t::get_obj_at(int pos,
bool remove, int *key)
{
if (m_objects.empty()) {
dout(5) << "get_obj_at coll " << m_cid << " pos " << pos
<< " in an empty collection" << dendl;
return NULL;
}
hobject_t *ret = NULL;
map<int, hobject_t*>::iterator it = m_objects.begin();
for (int i = 0; it != m_objects.end(); ++it, i++) {
if (i == pos) {
ret = it->second;
break;
}
}
if (ret == NULL) {
dout(5) << "get_obj_at coll " << m_cid << " pos " << pos
<< " non-existent" << dendl;
return NULL;
}
if (key != NULL)
*key = it->first;
if (remove)
m_objects.erase(it);
dout(5) << "get_obj_at coll id " << m_cid << " pos " << pos
<< ": " << ret->oid.name << "(removed: " << remove << ")" << dendl;
return ret;
}
hobject_t*
TestObjectStoreState::coll_entry_t::replace_obj(int id, hobject_t *obj) {
hobject_t *old_obj = remove_obj(id);
m_objects.insert(make_pair(id, obj));
return old_obj;
}
int TestObjectStoreState::coll_entry_t::get_random_obj_id(rngen_t& gen)
{
ceph_assert(!m_objects.empty());
boost::uniform_int<> orig_obj_rng(0, m_objects.size()-1);
int pos = orig_obj_rng(gen);
map<int, hobject_t*>::iterator it = m_objects.begin();
for (int i = 0; it != m_objects.end(); ++it, i++) {
if (i == pos) {
return it->first;
}
}
ceph_abort_msg("INTERNAL ERROR");
}
| 7,909 | 25.366667 | 101 |
cc
|
null |
ceph-main/src/test/objectstore/TestObjectStoreState.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2012 New Dream Network
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*/
#ifndef TEST_OBJECTSTORE_STATE_H_
#define TEST_OBJECTSTORE_STATE_H_
#include <boost/scoped_ptr.hpp>
#include <boost/random/mersenne_twister.hpp>
#include <boost/random/uniform_int.hpp>
#include <map>
#include <vector>
#include "os/ObjectStore.h"
#include "common/Cond.h"
typedef boost::mt11213b rngen_t;
class TestObjectStoreState {
public:
struct coll_entry_t {
spg_t m_pgid;
coll_t m_cid;
ghobject_t m_meta_obj;
ObjectStore::CollectionHandle m_ch;
std::map<int, hobject_t*> m_objects;
int m_next_object_id;
coll_entry_t(spg_t pgid, ObjectStore::CollectionHandle& ch,
char *meta_obj_buf)
: m_pgid(pgid),
m_cid(m_pgid),
m_meta_obj(hobject_t(sobject_t(object_t(meta_obj_buf), CEPH_NOSNAP))),
m_ch(ch),
m_next_object_id(0) {
m_meta_obj.hobj.pool = m_pgid.pool();
m_meta_obj.hobj.set_hash(m_pgid.ps());
}
~coll_entry_t();
hobject_t *touch_obj(int id);
bool check_for_obj(int id);
hobject_t *get_obj(int id);
hobject_t *remove_obj(int id);
hobject_t *get_obj_at(int pos, int *key = NULL);
hobject_t *remove_obj_at(int pos, int *key = NULL);
hobject_t *replace_obj(int id, hobject_t *obj);
int get_random_obj_id(rngen_t& gen);
private:
hobject_t *get_obj(int id, bool remove);
hobject_t *get_obj_at(int pos, bool remove, int *key = NULL);
};
protected:
boost::shared_ptr<ObjectStore> m_store;
std::map<coll_t, coll_entry_t*> m_collections;
std::vector<coll_t> m_collections_ids;
int m_next_coll_nr;
int m_num_objs_per_coll;
int m_num_objects;
int m_max_in_flight;
std::atomic<int> m_in_flight = { 0 };
ceph::mutex m_finished_lock = ceph::make_mutex("Finished Lock");
ceph::condition_variable m_finished_cond;
void rebuild_id_vec() {
m_collections_ids.clear();
m_collections_ids.reserve(m_collections.size());
for (auto& i : m_collections) {
m_collections_ids.push_back(i.first);
}
}
void wait_for_ready() {
std::unique_lock locker{m_finished_lock};
m_finished_cond.wait(locker, [this] {
return m_max_in_flight <= 0 || m_in_flight < m_max_in_flight;
});
}
void wait_for_done() {
std::unique_lock locker{m_finished_lock};
m_finished_cond.wait(locker, [this] { return m_in_flight == 0; });
}
void set_max_in_flight(int max) {
m_max_in_flight = max;
}
void set_num_objs_per_coll(int val) {
m_num_objs_per_coll = val;
}
coll_entry_t *get_coll(coll_t cid, bool erase = false);
coll_entry_t *get_coll_at(int pos, bool erase = false);
int get_next_pool_id() { return m_next_pool++; }
private:
static const int m_default_num_colls = 30;
// The pool ID used for collection creation, ID 0 is preserve for other tests
int m_next_pool;
public:
explicit TestObjectStoreState(ObjectStore *store) :
m_next_coll_nr(0), m_num_objs_per_coll(10), m_num_objects(0),
m_max_in_flight(0), m_next_pool(2) {
m_store.reset(store);
}
~TestObjectStoreState() {
auto it = m_collections.begin();
while (it != m_collections.end()) {
if (it->second)
delete it->second;
m_collections.erase(it++);
}
}
void init(int colls, int objs);
void init() {
init(m_default_num_colls, 0);
}
int inc_in_flight() {
return ++m_in_flight;
}
int dec_in_flight() {
return --m_in_flight;
}
coll_entry_t *coll_create(spg_t pgid, ObjectStore::CollectionHandle ch);
class C_OnFinished: public Context {
protected:
TestObjectStoreState *m_state;
public:
explicit C_OnFinished(TestObjectStoreState *state) : m_state(state) { }
void finish(int r) override {
std::lock_guard locker{m_state->m_finished_lock};
m_state->dec_in_flight();
m_state->m_finished_cond.notify_all();
}
};
};
#endif /* TEST_OBJECTSTORE_STATE_H_ */
| 4,213 | 25.503145 | 79 |
h
|
null |
ceph-main/src/test/objectstore/TestRocksdbOptionParse.cc
|
#include <gtest/gtest.h>
#include "include/Context.h"
#include "rocksdb/db.h"
#include "rocksdb/env.h"
#include "rocksdb/thread_status.h"
#include "kv/RocksDBStore.h"
#include <iostream>
using namespace std;
const string dir("rocksdb.test_temp_dir");
TEST(RocksDBOption, simple) {
rocksdb::Options options;
rocksdb::Status status;
map<string,string> kvoptions;
RocksDBStore *db = new RocksDBStore(g_ceph_context, dir, kvoptions, NULL);
string options_string = ""
"write_buffer_size=536870912;"
"create_if_missing=true;"
"max_write_buffer_number=4;"
"max_background_compactions=4;"
"stats_dump_period_sec = 5;"
"min_write_buffer_number_to_merge = 2;"
"level0_file_num_compaction_trigger = 4;"
"max_bytes_for_level_base = 104857600;"
"target_file_size_base = 10485760;"
"num_levels = 3;"
"compression = kNoCompression;"
"compaction_options_universal = {min_merge_width=4;size_ratio=2;max_size_amplification_percent=500}";
int r = db->ParseOptionsFromString(options_string, options);
ASSERT_EQ(0, r);
ASSERT_EQ(536870912u, options.write_buffer_size);
ASSERT_EQ(4, options.max_write_buffer_number);
ASSERT_EQ(4, options.max_background_compactions);
ASSERT_EQ(5u, options.stats_dump_period_sec);
ASSERT_EQ(2, options.min_write_buffer_number_to_merge);
ASSERT_EQ(4, options.level0_file_num_compaction_trigger);
ASSERT_EQ(104857600u, options.max_bytes_for_level_base);
ASSERT_EQ(10485760u, options.target_file_size_base);
ASSERT_EQ(3, options.num_levels);
ASSERT_EQ(rocksdb::kNoCompression, options.compression);
ASSERT_EQ(2, options.compaction_options_universal.size_ratio);
ASSERT_EQ(4, options.compaction_options_universal.min_merge_width);
ASSERT_EQ(500, options.compaction_options_universal.max_size_amplification_percent);
}
TEST(RocksDBOption, interpret) {
rocksdb::Options options;
rocksdb::Status status;
map<string,string> kvoptions;
RocksDBStore *db = new RocksDBStore(g_ceph_context, dir, kvoptions, NULL);
string options_string = "compact_on_mount = true; compaction_threads=10;flusher_threads=5;";
int r = db->ParseOptionsFromString(options_string, options);
ASSERT_EQ(0, r);
ASSERT_TRUE(db->compact_on_mount);
//check thread pool setting
options.env->SleepForMicroseconds(100000);
std::vector<rocksdb::ThreadStatus> thread_list;
status = options.env->GetThreadList(&thread_list);
ASSERT_TRUE(status.ok());
int num_high_pri_threads = 0;
int num_low_pri_threads = 0;
for (vector<rocksdb::ThreadStatus>::iterator it = thread_list.begin();
it!= thread_list.end();
++it) {
if (it->thread_type == rocksdb::ThreadStatus::HIGH_PRIORITY)
num_high_pri_threads++;
if (it->thread_type == rocksdb::ThreadStatus::LOW_PRIORITY)
num_low_pri_threads++;
}
ASSERT_EQ(15u, thread_list.size());
//low pri threads is compaction_threads
ASSERT_EQ(10, num_low_pri_threads);
//high pri threads is flusher_threads
ASSERT_EQ(5, num_high_pri_threads);
}
| 3,007 | 37.075949 | 106 |
cc
|
null |
ceph-main/src/test/objectstore/allocator_replay_test.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Allocator replay tool.
* Author: Igor Fedotov, [email protected]
*/
#include <iostream>
#include <vector>
#include "common/ceph_argparse.h"
#include "common/debug.h"
#include "common/Cycles.h"
#include "common/errno.h"
#include "common/ceph_json.h"
#include "common/admin_socket.h"
#include "include/denc.h"
#include "global/global_init.h"
#include "os/bluestore/Allocator.h"
using namespace std;
void usage(const string &name) {
cerr << "Usage: " << name << " <log_to_replay> <raw_duplicates|duplicates|free_dump|try_alloc count want alloc_unit|replay_alloc alloc_list_file|export_binary out_file>" << std::endl;
}
void usage_replay_alloc(const string &name) {
cerr << "Detailed replay_alloc usage: " << name << " <allocator_dump_JSON> replay_alloc <alloc_list_file> [number of replays]" << std::endl;
cerr << "The number of replays defaults to 1." << std::endl;
cerr << "The \"alloc_list_file\" parameter should be a file with allocation requests, one per line." << std::endl;
cerr << "Allocation request format (space separated, optional parameters are 0 if not given): want unit [max] [hint]" << std::endl;
}
struct binary_alloc_map_t {
std::vector<std::pair<uint64_t, uint64_t>> free_extents;
DENC(binary_alloc_map_t, v, p) {
DENC_START(1, 1, p);
denc(v.free_extents, p);
DENC_FINISH(p);
}
};
WRITE_CLASS_DENC(binary_alloc_map_t)
int replay_and_check_for_duplicate(char* fname)
{
unique_ptr<Allocator> alloc;
FILE* f = fopen(fname, "r");
if (!f) {
std::cerr << "error: unable to open " << fname << std::endl;
return -1;
}
PExtentVector tmp;
bool init_done = false;
char s[4096];
char* sp, *token;
interval_set<uint64_t> owned_by_app;
while (true) {
if (fgets(s, sizeof(s), f) == nullptr) {
break;
}
sp = strstr(s, "init_add_free");
if (!sp) {
sp = strstr(s, "release");
}
if (sp) {
//2019-05-30 03:23:46.780 7f889a5edf00 10 fbmap_alloc 0x5642ed370600 init_add_free 0x100000~680000000
// or
//2019-05-30 03:23:46.780 7f889a5edf00 10 fbmap_alloc 0x5642ed370600 init_add_free done
// or
// 2019 - 10 - 08T16:19 : 32.257 + 0300 7f5679f3fe80 10 fbmap_alloc 0x564fab96f100 release 0x450000~10000
// or
// 2019 - 10 - 08T16 : 19 : 32.257 + 0300 7f5679f3fe80 10 fbmap_alloc 0x564fab96f100 release done
if (strstr(sp, "done") != nullptr) {
continue;
}
std::cout << s << std::endl;
if (!init_done) {
std::cerr << "error: no allocator init before: " << s << std::endl;
return -1;
}
uint64_t offs, len;
strtok(sp, " ~");
token = strtok(nullptr, " ~");
ceph_assert(token);
offs = strtoul(token, nullptr, 16);
token = strtok(nullptr, " ~");
ceph_assert(token);
len = strtoul(token, nullptr, 16);
if (len == 0) {
std::cerr << "error: " << sp <<": " << s << std::endl;
return -1;
}
if (!owned_by_app.contains(offs, len)) {
std::cerr << "error: unexpected return to allocator, not owned by app: "
<< s << std::endl;
return -1;
}
owned_by_app.erase(offs, len);
if (strstr(sp, "init_add_free") != nullptr) {
alloc->init_add_free(offs, len);
} else {
PExtentVector release_set;
release_set.emplace_back(offs, len);
alloc->release(release_set);
}
continue;
}
sp = strstr(s, "init_rm_free");
if (sp) {
//2019-05-30 03:23:46.912 7f889a5edf00 10 fbmap_alloc 0x5642ed370600 init_rm_free 0x100000~680000000
// or
// 2019-05-30 03:23:46.916 7f889a5edf00 10 fbmap_alloc 0x5642ed370600 init_rm_free done
if (strstr(sp, "done") != nullptr) {
continue;
}
std::cout << s << std::endl;
if (!init_done) {
std::cerr << "error: no allocator init before: " << s << std::endl;
return -1;
}
uint64_t offs, len;
strtok(sp, " ~");
token = strtok(nullptr, " ~");
ceph_assert(token);
offs = strtoul(token, nullptr, 16);
token = strtok(nullptr, " ~");
ceph_assert(token);
len = strtoul(token, nullptr, 16);
if (len == 0) {
std::cerr << "error: " << sp <<": " << s << std::endl;
return -1;
}
alloc->init_rm_free(offs, len);
if (owned_by_app.intersects(offs, len)) {
std::cerr
<< "error: unexpected takeover from allocator, already owned by app: "
<< s << std::endl;
return -1;
} else {
owned_by_app.insert(offs, len);
}
continue;
}
sp = strstr(s, "allocate");
if (sp) {
//2019-05-30 03:23:48.780 7f889a5edf00 10 fbmap_alloc 0x5642ed370600 allocate 0x80000000/100000,0,0
// and need to bypass
// 2019-05-30 03:23:48.780 7f889a5edf00 10 fbmap_alloc 0x5642ed370600 allocate 0x69d400000~200000/100000,0,0
// Very simple and stupid check to bypass actual allocations
if (strstr(sp, "~") != nullptr) {
continue;
}
std::cout << s << std::endl;
if (!init_done) {
std::cerr << "error: no allocator init before: " << s << std::endl;
return -1;
}
uint64_t want, alloc_unit;
strtok(sp, " /");
token = strtok(nullptr, " /");
ceph_assert(token);
want = strtoul(token, nullptr, 16);
token = strtok(nullptr, " ~");
ceph_assert(token);
alloc_unit = strtoul(token, nullptr, 16);
if (want == 0 || alloc_unit == 0) {
std::cerr << "error: allocate: " << s << std::endl;
return -1;
}
tmp.clear();
auto allocated = alloc->allocate(want, alloc_unit, 0, 0, &tmp);
std::cout << "allocated TOTAL: " << allocated << std::endl;
for (auto& ee : tmp) {
std::cerr << "dump extent: " << std::hex
<< ee.offset << "~" << ee.length
<< std::dec << std::endl;
}
std::cerr << "dump completed." << std::endl;
for (auto& e : tmp) {
if (owned_by_app.intersects(e.offset, e.length)) {
std::cerr << "error: unexpected allocated extent: " << std::hex
<< e.offset << "~" << e.length
<< " dumping all allocations:" << std::dec << std::endl;
for (auto& ee : tmp) {
std::cerr <<"dump extent: " << std::hex
<< ee.offset << "~" << ee.length
<< std::dec << std::endl;
}
std::cerr <<"dump completed." << std::endl;
return -1;
} else {
owned_by_app.insert(e.offset, e.length);
}
}
continue;
}
string alloc_type = "bitmap";
sp = strstr(s, "BitmapAllocator");
if (!sp) {
alloc_type = "avl";
sp = strstr(s, "AvlAllocator");
}
if (!sp) {
alloc_type = "hybrid";
sp = strstr(s, "HybridAllocator");
}
if (!sp) {
alloc_type = "stupid";
sp = strstr(s, "StupidAllocator");
}
if (sp) {
// 2019-05-30 03:23:43.460 7f889a5edf00 10 fbmap_alloc 0x5642ed36e900 BitmapAllocator 0x15940000000/100000
std::cout << s << std::endl;
if (init_done) {
std::cerr << "error: duplicate init: " << s << std::endl;
return -1;
}
uint64_t total, alloc_unit;
strtok(sp, " /");
token = strtok(nullptr, " /");
ceph_assert(token);
total = strtoul(token, nullptr, 16);
token = strtok(nullptr, " /");
ceph_assert(token);
alloc_unit = strtoul(token, nullptr, 16);
if (total == 0 || alloc_unit == 0) {
std::cerr << "error: invalid init: " << s << std::endl;
return -1;
}
alloc.reset(Allocator::create(g_ceph_context, alloc_type, total,
alloc_unit));
owned_by_app.insert(0, total);
init_done = true;
continue;
}
}
fclose(f);
return 0;
}
int replay_free_dump_and_apply_raw(
char* fname,
std::function<void (
std::string_view,
int64_t,
int64_t,
std::string_view)> create,
std::function<void (uint64_t, uint64_t)> add_ext)
{
string alloc_type;
string alloc_name;
uint64_t capacity = 0;
uint64_t alloc_unit = 0;
JSONParser p;
std::cout << "parsing..." << std::endl;
bool b = p.parse(fname);
if (!b) {
std::cerr << "Failed to parse json: " << fname << std::endl;
return -1;
}
JSONObj::data_val v;
ceph_assert(p.is_object());
auto *o = p.find_obj("alloc_type");
ceph_assert(o);
alloc_type = o->get_data_val().str;
o = p.find_obj("alloc_name");
ceph_assert(o);
alloc_name = o->get_data_val().str;
o = p.find_obj("capacity");
ceph_assert(o);
decode_json_obj(capacity, o);
o = p.find_obj("alloc_unit");
ceph_assert(o);
decode_json_obj(alloc_unit, o);
int fd = -1;
o = p.find_obj("extents_file");
if (o) {
string filename = o->get_data_val().str;
fd = open(filename.c_str(), O_RDONLY);
if (fd < 0) {
std::cerr << "error: unable to open extents file: " << filename
<< ", " << cpp_strerror(-errno)
<< std::endl;
return -1;
}
} else {
o = p.find_obj("extents");
ceph_assert(o);
ceph_assert(o->is_array());
}
std::cout << "parsing completed!" << std::endl;
create(alloc_type, capacity, alloc_unit, alloc_name);
int r = 0;
if (fd < 0) {
auto it = o->find_first();
while (!it.end()) {
auto *item_obj = *it;
uint64_t offset = 0;
uint64_t length = 0;
string offset_str, length_str;
bool b = JSONDecoder::decode_json("offset", offset_str, item_obj);
ceph_assert(b);
b = JSONDecoder::decode_json("length", length_str, item_obj);
ceph_assert(b);
char* p;
offset = strtol(offset_str.c_str(), &p, 16);
length = strtol(length_str.c_str(), &p, 16);
// intentionally skip/trim entries that are above the capacity,
// just to be able to "shrink" allocator by editing that field
if (offset < capacity) {
if (offset + length > capacity) {
length = offset + length - capacity;
}
add_ext(offset, length);
}
++it;
}
} else {
bufferlist bl;
char buf[4096];
do {
r = read(fd, buf, sizeof(buf));
if (r > 0) {
bl.append(buf, r);
}
} while(r > 0);
if (r < 0) {
std::cerr << "error: error reading from extents file: "
<< cpp_strerror(-errno)
<< std::endl;
} else {
auto p = bl.cbegin();
binary_alloc_map_t amap;
try {
decode(amap, p);
for (auto p : amap.free_extents) {
add_ext(p.first, p.second);
}
} catch (ceph::buffer::error& e) {
std::cerr << __func__ << " unable to decode extents "
<< ": " << e.what()
<< std::endl;
r = -1;
}
}
close(fd);
}
return r;
}
/*
* This replays allocator dump (in JSON) reported by
"ceph daemon <osd> bluestore allocator dump <name>"
command and applies custom method to it
*/
int replay_free_dump_and_apply(char* fname,
std::function<int (Allocator*, const string& aname)> fn)
{
unique_ptr<Allocator> alloc;
auto create_fn = [&](std::string_view alloc_type,
int64_t capacity,
int64_t alloc_unit,
std::string_view alloc_name) {
alloc.reset(
Allocator::create(
g_ceph_context, alloc_type, capacity, alloc_unit, 0, 0, alloc_name));
};
auto add_fn = [&](uint64_t offset,
uint64_t len) {
alloc->init_add_free(offset, len);
};
int r = replay_free_dump_and_apply_raw(
fname,
create_fn,
add_fn);
if (r == 0) {
r = fn(alloc.get(), alloc->get_name());
}
return r;
}
void dump_alloc(Allocator* alloc, const string& aname)
{
AdminSocket* admin_socket = g_ceph_context->get_admin_socket();
ceph_assert(admin_socket);
ceph::bufferlist in, out;
ostringstream err;
string cmd = "{\"prefix\": \"bluestore allocator dump " + aname + "\"}";
auto r = admin_socket->execute_command(
{ cmd },
in, err, &out);
if (r != 0) {
cerr << "failure querying: " << cpp_strerror(r) << std::endl;
}
else {
std::cout << std::string(out.c_str(), out.length()) << std::endl;
}
}
int export_as_binary(char* fname, char* target_fname)
{
int fd = creat(target_fname, 0);
if (fd < 0) {
std::cerr << "error: unable to open target file: " << target_fname
<< ", " << cpp_strerror(-errno)
<< std::endl;
return -1;
}
binary_alloc_map_t amap;
auto dummy_create_fn =
[&](std::string_view alloc_type,
int64_t capacity,
int64_t alloc_unit,
std::string_view alloc_name) {
};
auto add_fn = [&](uint64_t offset,
uint64_t len) {
amap.free_extents.emplace_back(offset, len);
};
int r = replay_free_dump_and_apply_raw(
fname,
dummy_create_fn,
add_fn);
if (r == 0) {
bufferlist out;
ceph::encode(amap, out);
auto w = write(fd, out.c_str(), out.length());
if (w < 1) {
std::cerr << "error: unable to open target file: " << target_fname
<< ", " << cpp_strerror(-errno)
<< std::endl;
}
}
close(fd);
return r;
}
int check_duplicates(char* fname)
{
interval_set<uint64_t> free_extents;
interval_set<uint64_t> invalid_extentsA;
interval_set<uint64_t> invalid_extentsB;
auto dummy_create_fn =
[&](std::string_view alloc_type,
int64_t capacity,
int64_t alloc_unit,
std::string_view alloc_name) {
};
size_t errors = 0;
size_t pos = 0;
size_t first_err_pos = 0;
auto add_fn = [&](uint64_t offset,
uint64_t len) {
++pos;
if (free_extents.intersects(offset, len)) {
invalid_extentsB.insert(offset, len);
++errors;
if (first_err_pos == 0) {
first_err_pos = pos;
}
} else {
free_extents.insert(offset, len);
}
};
int r = replay_free_dump_and_apply_raw(
fname,
dummy_create_fn,
add_fn);
if (r < 0) {
return r;
}
pos = 0;
auto add_fn2 = [&](uint64_t offset,
uint64_t len) {
++pos;
if (pos < first_err_pos) {
if (invalid_extentsB.intersects(offset, len)) {
invalid_extentsA.insert(offset, len);
}
}
};
r = replay_free_dump_and_apply_raw(
fname,
dummy_create_fn,
add_fn2);
ceph_assert(r >= 0);
auto itA = invalid_extentsA.begin();
auto itB = invalid_extentsB.begin();
while (itA != invalid_extentsA.end()) {
std::cerr << "error: overlapping extents: " << std::hex
<< itA.get_start() << "~" << itA.get_end() - itA.get_start()
<< " vs.";
while (itB != invalid_extentsB.end() &&
itB.get_start() >= itA.get_start() &&
itB.get_end() <= itA.get_end()) {
std::cerr << " " << itB.get_start() << "~" << itB.get_end() - itB.get_start();
++itB;
}
std::cerr << std::dec << std::endl;
++itA;
}
return r >= 0 ? errors != 0 : r;
}
int main(int argc, char **argv)
{
vector<const char*> args;
auto cct = global_init(NULL, args, CEPH_ENTITY_TYPE_CLIENT,
CODE_ENVIRONMENT_UTILITY,
CINIT_FLAG_NO_DEFAULT_CONFIG_FILE);
common_init_finish(g_ceph_context);
g_ceph_context->_conf.apply_changes(nullptr);
if (argc < 3) {
usage(argv[0]);
return 1;
}
if (strcmp(argv[2], "raw_duplicates") == 0) {
return replay_and_check_for_duplicate(argv[1]);
} else if (strcmp(argv[2], "free_dump") == 0) {
return replay_free_dump_and_apply(argv[1],
[&](Allocator* a, const string& aname) {
ceph_assert(a);
std::cout << "Fragmentation:" << a->get_fragmentation()
<< std::endl;
std::cout << "Fragmentation score:" << a->get_fragmentation_score()
<< std::endl;
std::cout << "Free:" << std::hex << a->get_free() << std::dec
<< std::endl;
{
// stub to implement various testing stuff on properly initialized allocator
// e.g. one can dump allocator back via dump_alloc(a, aname);
}
return 0;
});
} else if (strcmp(argv[2], "try_alloc") == 0) {
if (argc < 6) {
std::cerr << "Error: insufficient arguments for \"try_alloc\" operation."
<< std::endl;
usage(argv[0]);
return 1;
}
auto count = strtoul(argv[3], nullptr, 10);
auto want = strtoul(argv[4], nullptr, 10);
auto alloc_unit = strtoul(argv[5], nullptr, 10);
return replay_free_dump_and_apply(argv[1],
[&](Allocator* a, const string& aname) {
ceph_assert(a);
std::cout << "Fragmentation:" << a->get_fragmentation()
<< std::endl;
std::cout << "Fragmentation score:" << a->get_fragmentation_score()
<< std::endl;
std::cout << "Free:" << std::hex << a->get_free() << std::dec
<< std::endl;
{
PExtentVector extents;
for(size_t i = 0; i < count; i++) {
extents.clear();
auto r = a->allocate(want, alloc_unit, 0, &extents);
if (r < 0) {
std::cerr << "Error: allocation failure at step:" << i + 1
<< ", ret = " << r << std::endl;
return -1;
}
}
}
std::cout << "Successfully allocated: " << count << " * " << want
<< ", unit:" << alloc_unit << std::endl;
return 0;
});
} else if (strcmp(argv[2], "replay_alloc") == 0) {
if (argc < 4) {
std::cerr << "Error: insufficient arguments for \"replay_alloc\" option."
<< std::endl;
usage_replay_alloc(argv[0]);
return 1;
}
return replay_free_dump_and_apply(argv[1],
[&](Allocator *a, const string &aname) {
ceph_assert(a);
std::cout << "Fragmentation:" << a->get_fragmentation()
<< std::endl;
std::cout << "Fragmentation score:" << a->get_fragmentation_score()
<< std::endl;
std::cout << "Free:" << std::hex << a->get_free() << std::dec
<< std::endl;
{
/* replay a set of allocation requests */
char s[4096];
FILE *f_alloc_list = fopen(argv[3], "r");
if (!f_alloc_list) {
std::cerr << "error: unable to open " << argv[3] << std::endl;
return -1;
}
/* Replay user specified number of times to simulate extended activity
* Defaults to 1 replay.
*/
auto replay_count = 1;
if (argc == 5) {
replay_count = atoi(argv[4]);
}
for (auto i = 0; i < replay_count; ++i) {
while (fgets(s, sizeof(s), f_alloc_list) != nullptr) {
/* parse allocation request */
uint64_t want = 0, unit = 0, max = 0, hint = 0;
if (std::sscanf(s, "%ji %ji %ji %ji", &want, &unit, &max, &hint) < 2)
{
cerr << "Error: malformed allocation request:" << std::endl;
cerr << s << std::endl;
/* do not attempt to allocate a malformed request */
continue;
}
/* timestamp for allocation start */
auto t0 = ceph::mono_clock::now();
/* allocate */
PExtentVector extents;
auto r = a->allocate(want, unit, max, hint, &extents);
if (r < 0) {
/* blind replays of allocations may run out of space, provide info for easy confirmation */
std::cerr << "Error: allocation failure code: " << r
<< " requested want/unit/max/hint (hex): " << std::hex
<< want << "/" << unit << "/" << max << "/" << hint
<< std::dec << std::endl;
std::cerr << "Fragmentation:" << a->get_fragmentation()
<< std::endl;
std::cerr << "Fragmentation score:" << a->get_fragmentation_score()
<< std::endl;
std::cerr << "Free:" << std::hex << a->get_free() << std::dec
<< std::endl;
/* return 0 if the allocator ran out of space */
if (r == -ENOSPC) {
return 0;
}
return -1;
}
/* Outputs the allocation's duration in nanoseconds and the allocation request parameters */
std::cout << "Duration (ns): " << (ceph::mono_clock::now() - t0).count()
<< " want/unit/max/hint (hex): " << std::hex
<< want << "/" << unit << "/" << max << "/" << hint
<< std::dec << std::endl;
/* Do not release. */
//alloc->release(extents);
extents.clear();
}
fseek(f_alloc_list, 0, SEEK_SET);
}
fclose(f_alloc_list);
std::cout << "Fragmentation:" << a->get_fragmentation()
<< std::endl;
std::cout << "Fragmentation score:" << a->get_fragmentation_score()
<< std::endl;
std::cout << "Free:" << std::hex << a->get_free() << std::dec
<< std::endl;
}
return 0;
});
} else if (strcmp(argv[2], "export_binary") == 0) {
return export_as_binary(argv[1], argv[3]);
} else if (strcmp(argv[2], "duplicates") == 0) {
return check_duplicates(argv[1]);
}
}
| 21,521 | 29.966906 | 185 |
cc
|
null |
ceph-main/src/test/objectstore/fastbmap_allocator_test.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include <iostream>
#include <gtest/gtest.h>
#include "os/bluestore/fastbmap_allocator_impl.h"
class TestAllocatorLevel01 : public AllocatorLevel01Loose
{
public:
void init(uint64_t capacity, uint64_t alloc_unit)
{
_init(capacity, alloc_unit);
}
interval_t allocate_l1_cont(uint64_t length, uint64_t min_length,
uint64_t pos_start, uint64_t pos_end)
{
return _allocate_l1_contiguous(length, min_length, 0, pos_start, pos_end);
}
void free_l1(const interval_t& r)
{
_free_l1(r.offset, r.length);
}
};
class TestAllocatorLevel02 : public AllocatorLevel02<AllocatorLevel01Loose>
{
public:
void init(uint64_t capacity, uint64_t alloc_unit)
{
_init(capacity, alloc_unit);
}
void allocate_l2(uint64_t length, uint64_t min_length,
uint64_t* allocated0,
interval_vector_t* res)
{
uint64_t allocated = 0;
uint64_t hint = 0; // trigger internal l2 hint support
_allocate_l2(length, min_length, 0, hint, &allocated, res);
*allocated0 += allocated;
}
void free_l2(const interval_vector_t& r)
{
_free_l2(r);
}
void mark_free(uint64_t o, uint64_t len)
{
_mark_free(o, len);
}
void mark_allocated(uint64_t o, uint64_t len)
{
_mark_allocated(o, len);
}
};
const uint64_t _1m = 1024 * 1024;
const uint64_t _2m = 2 * 1024 * 1024;
TEST(TestAllocatorLevel01, test_l1)
{
TestAllocatorLevel01 al1;
uint64_t num_l1_entries = 3 * 256;
uint64_t capacity = num_l1_entries * 512 * 4096;
al1.init(capacity, 0x1000);
ASSERT_EQ(capacity, al1.debug_get_free());
auto i1 = al1.allocate_l1_cont(0x1000, 0x1000, 0, num_l1_entries);
ASSERT_EQ(i1.offset, 0u);
ASSERT_EQ(i1.length, 0x1000u);
ASSERT_EQ(capacity - 0x1000, al1.debug_get_free());
auto i2 = al1.allocate_l1_cont(0x1000, 0x1000, 0, num_l1_entries);
ASSERT_EQ(i2.offset, 0x1000u);
ASSERT_EQ(i2.length, 0x1000u);
al1.free_l1(i2);
al1.free_l1(i1);
i1 = al1.allocate_l1_cont(0x1000, 0x1000, 0, num_l1_entries);
ASSERT_EQ(i1.offset, 0u);
ASSERT_EQ(i1.length, 0x1000u);
i2 = al1.allocate_l1_cont(0x1000, 0x1000, 0, num_l1_entries);
ASSERT_EQ(i2.offset, 0x1000u);
ASSERT_EQ(i2.length, 0x1000u);
al1.free_l1(i1);
al1.free_l1(i2);
i1 = al1.allocate_l1_cont(0x2000, 0x1000, 0, num_l1_entries);
ASSERT_EQ(i1.offset, 0u);
ASSERT_EQ(i1.length, 0x2000u);
i2 = al1.allocate_l1_cont(0x3000, 0x1000, 0, num_l1_entries);
ASSERT_EQ(i2.offset, 0x2000u);
ASSERT_EQ(i2.length, 0x3000u);
al1.free_l1(i1);
al1.free_l1(i2);
i1 = al1.allocate_l1_cont(0x2000, 0x1000, 0, num_l1_entries);
ASSERT_EQ(i1.offset, 0u);
ASSERT_EQ(i1.length, 0x2000u);
i2 = al1.allocate_l1_cont(2 * 1024 * 1024, 0x1000, 0, num_l1_entries);
ASSERT_EQ(i2.offset, 2u * 1024u * 1024u);
ASSERT_EQ(i2.length, 2u * 1024u * 1024u);
al1.free_l1(i1);
i1 = al1.allocate_l1_cont(1024 * 1024, 0x1000, 0, num_l1_entries);
ASSERT_EQ(i1.offset, 0u);
ASSERT_EQ(i1.length, 1024u * 1024u);
auto i3 = al1.allocate_l1_cont(1024 * 1024 + 0x1000, 0x1000, 0, num_l1_entries);
ASSERT_EQ(i3.offset, 2u * 2u * 1024u * 1024u);
ASSERT_EQ(i3.length, 1024u * 1024u + 0x1000u);
// here we have the following layout:
// Alloc: 0~1M, 2M~2M, 4M~1M+4K
// Free: 1M~1M, 4M+4K ~ 2M-4K, 6M ~...
//
auto i4 = al1.allocate_l1_cont(1024 * 1024, 0x1000, 0, num_l1_entries);
ASSERT_EQ(1 * 1024 * 1024u, i4.offset);
ASSERT_EQ(1024 * 1024u, i4.length);
al1.free_l1(i4);
i4 = al1.allocate_l1_cont(1024 * 1024 - 0x1000, 0x1000, 0, num_l1_entries);
ASSERT_EQ(i4.offset, 5u * 1024u * 1024u + 0x1000u);
ASSERT_EQ(i4.length, 1024u * 1024u - 0x1000u);
al1.free_l1(i4);
i4 = al1.allocate_l1_cont(1024 * 1024 + 0x1000, 0x1000, 0, num_l1_entries);
ASSERT_EQ(i4.offset, 6u * 1024u * 1024u);
//ASSERT_EQ(i4.offset, 5 * 1024 * 1024 + 0x1000);
ASSERT_EQ(i4.length, 1024u * 1024u + 0x1000u);
al1.free_l1(i1);
al1.free_l1(i2);
al1.free_l1(i3);
al1.free_l1(i4);
i1 = al1.allocate_l1_cont(1024 * 1024, 0x1000, 0, num_l1_entries);
ASSERT_EQ(i1.offset, 0u);
ASSERT_EQ(i1.length, 1024u * 1024u);
i2 = al1.allocate_l1_cont(1024 * 1024, 0x1000, 0, num_l1_entries);
ASSERT_EQ(i2.offset, 1u * 1024u * 1024u);
ASSERT_EQ(i2.length, 1024u * 1024u);
i3 = al1.allocate_l1_cont(512 * 1024, 0x1000, 0, num_l1_entries);
ASSERT_EQ(i3.offset, 2u * 1024u * 1024u);
ASSERT_EQ(i3.length, 512u * 1024u);
i4 = al1.allocate_l1_cont(1536 * 1024, 0x1000, 0, num_l1_entries);
ASSERT_EQ(i4.offset, (2u * 1024u + 512u) * 1024u);
ASSERT_EQ(i4.length, 1536u * 1024u);
// making a hole 1.5 Mb length
al1.free_l1(i2);
al1.free_l1(i3);
// and trying to fill it
i2 = al1.allocate_l1_cont(1536 * 1024, 0x1000, 0, num_l1_entries);
ASSERT_EQ(i2.offset, 1024u * 1024u);
ASSERT_EQ(i2.length, 1536u * 1024u);
al1.free_l1(i2);
// and trying to fill it partially
i2 = al1.allocate_l1_cont(1528 * 1024, 0x1000, 0, num_l1_entries);
ASSERT_EQ(i2.offset, 1024u * 1024u);
ASSERT_EQ(i2.length, 1528u * 1024u);
i3 = al1.allocate_l1_cont(8 * 1024, 0x1000, 0, num_l1_entries);
ASSERT_EQ(i3.offset, 2552u * 1024u);
ASSERT_EQ(i3.length, 8u * 1024u);
al1.free_l1(i2);
// here we have the following layout:
// Alloc: 0~1M, 2552K~8K, num_l1_entries0K~1.5M
// Free: 1M~1528K, 4M ~...
//
i2 = al1.allocate_l1_cont(1536 * 1024, 0x1000, 0, num_l1_entries);
ASSERT_EQ(i2.offset, 4u * 1024u * 1024u);
ASSERT_EQ(i2.length, 1536u * 1024u);
al1.free_l1(i1);
al1.free_l1(i2);
al1.free_l1(i3);
al1.free_l1(i4);
ASSERT_EQ(capacity, al1.debug_get_free());
for (uint64_t i = 0; i < capacity; i += _2m) {
i1 = al1.allocate_l1_cont(_2m, _2m, 0, num_l1_entries);
ASSERT_EQ(i1.offset, i);
ASSERT_EQ(i1.length, _2m);
}
ASSERT_EQ(0u, al1.debug_get_free());
i2 = al1.allocate_l1_cont(_2m, _2m, 0, num_l1_entries);
ASSERT_EQ(i2.length, 0u);
ASSERT_EQ(0u, al1.debug_get_free());
al1.free_l1(i1);
i2 = al1.allocate_l1_cont(_2m, _2m, 0, num_l1_entries);
ASSERT_EQ(i2, i1);
al1.free_l1(i2);
i2 = al1.allocate_l1_cont(_1m, _1m, 0, num_l1_entries);
ASSERT_EQ(i2.offset, i1.offset);
ASSERT_EQ(i2.length, _1m);
i3 = al1.allocate_l1_cont(_2m, _2m, 0, num_l1_entries);
ASSERT_EQ(i3.length, 0u);
i3 = al1.allocate_l1_cont(_2m, _1m, 0, num_l1_entries);
ASSERT_EQ(i3.length, _1m);
i4 = al1.allocate_l1_cont(_2m, _1m, 0, num_l1_entries);
ASSERT_EQ(i4.length, 0u);
al1.free_l1(i2);
i2 = al1.allocate_l1_cont(_2m, _2m, 0, num_l1_entries);
ASSERT_EQ(i2.length, 0u);
i2 = al1.allocate_l1_cont(_2m, 0x1000, 0, num_l1_entries);
ASSERT_EQ(i2.length, _1m);
al1.free_l1(i2);
al1.free_l1(i3);
ASSERT_EQ(_2m, al1.debug_get_free());
i1 = al1.allocate_l1_cont(_2m - 3 * 0x1000, 0x1000, 0, num_l1_entries);
i2 = al1.allocate_l1_cont(0x1000, 0x1000, 0, num_l1_entries);
i3 = al1.allocate_l1_cont(0x1000, 0x1000, 0, num_l1_entries);
i4 = al1.allocate_l1_cont(0x1000, 0x1000, 0, num_l1_entries);
ASSERT_EQ(0u, al1.debug_get_free());
al1.free_l1(i2);
al1.free_l1(i4);
i2 = al1.allocate_l1_cont(0x4000, 0x2000, 0, num_l1_entries);
ASSERT_EQ(i2.length, 0u);
i2 = al1.allocate_l1_cont(0x4000, 0x1000, 0, num_l1_entries);
ASSERT_EQ(i2.length, 0x1000u);
al1.free_l1(i3);
i3 = al1.allocate_l1_cont(0x6000, 0x3000, 0, num_l1_entries);
ASSERT_EQ(i3.length, 0u);
i3 = al1.allocate_l1_cont(0x6000, 0x1000, 0, num_l1_entries);
ASSERT_EQ(i3.length, 0x2000u);
ASSERT_EQ(0u, al1.debug_get_free());
std::cout << "Done L1" << std::endl;
}
TEST(TestAllocatorLevel01, test_l2)
{
TestAllocatorLevel02 al2;
uint64_t num_l2_entries = 64;// *512;
uint64_t capacity = num_l2_entries * 256 * 512 * 4096;
al2.init(capacity, 0x1000);
std::cout << "Init L2" << std::endl;
uint64_t allocated1 = 0;
interval_vector_t a1;
al2.allocate_l2(0x2000, 0x2000, &allocated1, &a1);
ASSERT_EQ(allocated1, 0x2000u);
ASSERT_EQ(a1[0].offset, 0u);
ASSERT_EQ(a1[0].length, 0x2000u);
// limit query range in debug_get_free for the sake of performance
ASSERT_EQ(0x2000u, al2.debug_get_allocated(0, 1));
ASSERT_EQ(0u, al2.debug_get_allocated(1, 2));
uint64_t allocated2 = 0;
interval_vector_t a2;
al2.allocate_l2(0x2000, 0x2000, &allocated2, &a2);
ASSERT_EQ(allocated2, 0x2000u);
ASSERT_EQ(a2[0].offset, 0x2000u);
ASSERT_EQ(a2[0].length, 0x2000u);
// limit query range in debug_get_free for the sake of performance
ASSERT_EQ(0x4000u, al2.debug_get_allocated(0, 1));
ASSERT_EQ(0u, al2.debug_get_allocated(1, 2));
al2.free_l2(a1);
allocated2 = 0;
a2.clear();
al2.allocate_l2(0x1000, 0x1000, &allocated2, &a2);
ASSERT_EQ(allocated2, 0x1000u);
ASSERT_EQ(a2[0].offset, 0x0000u);
ASSERT_EQ(a2[0].length, 0x1000u);
// limit query range in debug_get_free for the sake of performance
ASSERT_EQ(0x3000u, al2.debug_get_allocated(0, 1));
ASSERT_EQ(0u, al2.debug_get_allocated(1, 2));
uint64_t allocated3 = 0;
interval_vector_t a3;
al2.allocate_l2(0x2000, 0x1000, &allocated3, &a3);
ASSERT_EQ(allocated3, 0x2000u);
ASSERT_EQ(a3.size(), 2u);
ASSERT_EQ(a3[0].offset, 0x1000u);
ASSERT_EQ(a3[0].length, 0x1000u);
ASSERT_EQ(a3[1].offset, 0x4000u);
ASSERT_EQ(a3[1].length, 0x1000u);
// limit query range in debug_get_free for the sake of performance
ASSERT_EQ(0x5000u, al2.debug_get_allocated(0, 1));
ASSERT_EQ(0u, al2.debug_get_allocated(1, 2));
{
interval_vector_t r;
r.emplace_back(0x0, 0x5000);
al2.free_l2(r);
}
a3.clear();
allocated3 = 0;
al2.allocate_l2(_1m, _1m, &allocated3, &a3);
ASSERT_EQ(a3.size(), 1u);
ASSERT_EQ(a3[0].offset, 0u);
ASSERT_EQ(a3[0].length, _1m);
al2.free_l2(a3);
a3.clear();
allocated3 = 0;
al2.allocate_l2(4 * _1m, _1m, &allocated3, &a3);
ASSERT_EQ(a3.size(), 1u);
ASSERT_EQ(a3[0].offset, 0u);
ASSERT_EQ(a3[0].length, 4 * _1m);
al2.free_l2(a3);
#ifndef _DEBUG
for (uint64_t i = 0; i < capacity; i += 0x1000) {
uint64_t allocated4 = 0;
interval_vector_t a4;
al2.allocate_l2(0x1000, 0x1000, &allocated4, &a4);
ASSERT_EQ(a4.size(), 1u);
ASSERT_EQ(a4[0].offset, i);
ASSERT_EQ(a4[0].length, 0x1000u);
if (0 == (i % (1 * 1024 * _1m))) {
std::cout << "alloc1 " << i / 1024 / 1024 << " mb of "
<< capacity / 1024 / 1024 << std::endl;
}
}
#else
for (uint64_t i = 0; i < capacity; i += _2m) {
uint64_t allocated4 = 0;
interval_vector_t a4;
al2.allocate_l2(_2m, _2m, &allocated4, &a4);
ASSERT_EQ(a4.size(), 1);
ASSERT_EQ(a4[0].offset, i);
ASSERT_EQ(a4[0].length, _2m);
if (0 == (i % (1 * 1024 * _1m))) {
std::cout << "alloc1 " << i / 1024 / 1024 << " mb of "
<< capacity / 1024 / 1024 << std::endl;
}
}
#endif
ASSERT_EQ(0u, al2.debug_get_free());
for (uint64_t i = 0; i < capacity; i += _1m) {
interval_vector_t r;
r.emplace_back(i, _1m);
al2.free_l2(r);
if (0 == (i % (1 * 1024 * _1m))) {
std::cout << "free1 " << i / 1024 / 1024 << " mb of "
<< capacity / 1024 / 1024 << std::endl;
}
}
ASSERT_EQ(capacity, al2.debug_get_free());
for (uint64_t i = 0; i < capacity; i += _1m) {
uint64_t allocated4 = 0;
interval_vector_t a4;
al2.allocate_l2(_1m, _1m, &allocated4, &a4);
ASSERT_EQ(a4.size(), 1u);
ASSERT_EQ(allocated4, _1m);
ASSERT_EQ(a4[0].offset, i);
ASSERT_EQ(a4[0].length, _1m);
if (0 == (i % (1 * 1024 * _1m))) {
std::cout << "alloc2 " << i / 1024 / 1024 << " mb of "
<< capacity / 1024 / 1024 << std::endl;
}
}
ASSERT_EQ(0u, al2.debug_get_free());
uint64_t allocated4 = 0;
interval_vector_t a4;
al2.allocate_l2(_1m, _1m, &allocated4, &a4);
ASSERT_EQ(a4.size(), 0u);
al2.allocate_l2(0x1000, 0x1000, &allocated4, &a4);
ASSERT_EQ(a4.size(), 0u);
for (uint64_t i = 0; i < capacity; i += 0x2000) {
interval_vector_t r;
r.emplace_back(i, 0x1000);
al2.free_l2(r);
if (0 == (i % (1 * 1024 * _1m))) {
std::cout << "free2 " << i / 1024 / 1024 << " mb of "
<< capacity / 1024 / 1024 << std::endl;
}
}
ASSERT_EQ(capacity / 2, al2.debug_get_free());
// unable to allocate due to fragmentation
al2.allocate_l2(_1m, _1m, &allocated4, &a4);
ASSERT_EQ(a4.size(), 0u);
for (uint64_t i = 0; i < capacity; i += 2 * _1m) {
a4.clear();
allocated4 = 0;
al2.allocate_l2(_1m, 0x1000, &allocated4, &a4);
ASSERT_EQ(a4.size(), _1m / 0x1000);
ASSERT_EQ(allocated4, _1m);
ASSERT_EQ(a4[0].offset, i);
ASSERT_EQ(a4[0].length, 0x1000u);
if (0 == (i % (1 * 1024 * _1m))) {
std::cout << "alloc3 " << i / 1024 / 1024 << " mb of "
<< capacity / 1024 / 1024 << std::endl;
}
}
ASSERT_EQ(0u, al2.debug_get_free());
std::cout << "Done L2" << std::endl;
}
TEST(TestAllocatorLevel01, test_l2_huge)
{
TestAllocatorLevel02 al2;
uint64_t num_l2_entries = 4 * 512;
uint64_t capacity = num_l2_entries * 256 * 512 * 4096; // 1 TB
al2.init(capacity, 0x1000);
std::cout << "Init L2 Huge" << std::endl;
for (uint64_t i = 0; i < capacity; i += _1m) {
uint64_t allocated4 = 0;
interval_vector_t a4;
al2.allocate_l2(0x1000, 0x1000, &allocated4, &a4);
ASSERT_EQ(a4.size(), 1u);
ASSERT_EQ(allocated4, 0x1000u);
ASSERT_EQ(a4[0].offset, i);
ASSERT_EQ(a4[0].length, 0x1000u);
allocated4 = 0;
a4.clear();
al2.allocate_l2(_1m - 0x1000, 0x1000, &allocated4, &a4);
ASSERT_EQ(a4.size(), 1u);
ASSERT_EQ(allocated4, _1m - 0x1000);
ASSERT_EQ(a4[0].offset, i + 0x1000);
ASSERT_EQ(a4[0].length, _1m - 0x1000);
if (0 == (i % (1 * 1024 * _1m))) {
std::cout << "allocH " << i / 1024 / 1024 << " mb of "
<< capacity / 1024 / 1024 << std::endl;
}
}
for (uint64_t i = 0; i < capacity; i += _1m) {
interval_vector_t a4;
a4.emplace_back(i, 0x1000);
al2.free_l2(a4);
if (0 == (i % (1 * 1024 * _1m))) {
std::cout << "freeH1 " << i / 1024 / 1024 << " mb of "
<< capacity / 1024 / 1024 << std::endl;
}
}
{
std::cout << "Try" << std::endl;
time_t t = time(NULL);
for (int i = 0; i < 10; ++i) {
uint64_t allocated = 0;
interval_vector_t a;
al2.allocate_l2(0x2000, 0x2000, &allocated, &a);
ASSERT_EQ(a.size(), 0u);
}
std::cout << "End try in " << time(NULL) - t << " seconds" << std::endl;
}
{
std::cout << "Try" << std::endl;
time_t t = time(NULL);
for (int i = 0; i < 10; ++i) {
uint64_t allocated = 0;
interval_vector_t a;
al2.allocate_l2(_2m, _2m, &allocated, &a);
ASSERT_EQ(a.size(), 0u);
}
std::cout << "End try in " << time(NULL) - t << " seconds" << std::endl;
}
ASSERT_EQ((capacity / _1m) * 0x1000, al2.debug_get_free());
std::cout << "Done L2 Huge" << std::endl;
}
TEST(TestAllocatorLevel01, test_l2_unaligned)
{
{
TestAllocatorLevel02 al2;
uint64_t num_l2_entries = 3;
uint64_t capacity = num_l2_entries * 256 * 512 * 4096; // 3x512 MB
al2.init(capacity, 0x1000);
std::cout << "Init L2 Unaligned" << std::endl;
for (uint64_t i = 0; i < capacity; i += _1m / 2) {
uint64_t allocated4 = 0;
interval_vector_t a4;
al2.allocate_l2(_1m / 2, _1m / 2, &allocated4, &a4);
ASSERT_EQ(a4.size(), 1u);
ASSERT_EQ(allocated4, _1m / 2);
ASSERT_EQ(a4[0].offset, i);
ASSERT_EQ(a4[0].length, _1m / 2);
if (0 == (i % (1 * 1024 * _1m))) {
std::cout << "allocU " << i / 1024 / 1024 << " mb of "
<< capacity / 1024 / 1024 << std::endl;
}
}
ASSERT_EQ(0u, al2.debug_get_free());
{
// no space to allocate
uint64_t allocated4 = 0;
interval_vector_t a4;
al2.allocate_l2(0x1000, 0x1000, &allocated4, &a4);
ASSERT_EQ(a4.size(), 0u);
}
}
{
TestAllocatorLevel02 al2;
uint64_t capacity = 500 * 512 * 4096; // 500x2 MB
al2.init(capacity, 0x1000);
std::cout << ("Init L2 Unaligned2\n");
for (uint64_t i = 0; i < capacity; i += _1m / 2) {
uint64_t allocated4 = 0;
interval_vector_t a4;
al2.allocate_l2(_1m / 2, _1m / 2, &allocated4, &a4);
ASSERT_EQ(a4.size(), 1u);
ASSERT_EQ(allocated4, _1m / 2);
ASSERT_EQ(a4[0].offset, i);
ASSERT_EQ(a4[0].length, _1m / 2);
if (0 == (i % (1 * 1024 * _1m))) {
std::cout << "allocU2 " << i / 1024 / 1024 << " mb of "
<< capacity / 1024 / 1024 << std::endl;
}
}
ASSERT_EQ(0u, al2.debug_get_free());
{
// no space to allocate
uint64_t allocated4 = 0;
interval_vector_t a4;
al2.allocate_l2(0x1000, 0x1000, &allocated4, &a4);
ASSERT_EQ(a4.size(), 0u);
}
}
{
TestAllocatorLevel02 al2;
uint64_t capacity = 100 * 512 * 4096 + 127 * 4096;
al2.init(capacity, 0x1000);
std::cout << "Init L2 Unaligned2" << std::endl;
for (uint64_t i = 0; i < capacity; i += 0x1000) {
uint64_t allocated4 = 0;
interval_vector_t a4;
al2.allocate_l2(0x1000, 0x1000, &allocated4, &a4);
ASSERT_EQ(a4.size(), 1u);
ASSERT_EQ(allocated4, 0x1000u);
ASSERT_EQ(a4[0].offset, i);
ASSERT_EQ(a4[0].length, 0x1000u);
}
ASSERT_EQ(0u, al2.debug_get_free());
{
// no space to allocate
uint64_t allocated4 = 0;
interval_vector_t a4;
al2.allocate_l2(0x1000, 0x1000, &allocated4, &a4);
ASSERT_EQ(a4.size(), 0u);
}
}
{
TestAllocatorLevel02 al2;
uint64_t capacity = 3 * 4096;
al2.init(capacity, 0x1000);
std::cout << "Init L2 Unaligned2" << std::endl;
for (uint64_t i = 0; i < capacity; i += 0x1000) {
uint64_t allocated4 = 0;
interval_vector_t a4;
al2.allocate_l2(0x1000, 0x1000, &allocated4, &a4);
ASSERT_EQ(a4.size(), 1u);
ASSERT_EQ(allocated4, 0x1000u);
ASSERT_EQ(a4[0].offset, i);
ASSERT_EQ(a4[0].length, 0x1000u);
}
ASSERT_EQ(0u, al2.debug_get_free());
{
// no space to allocate
uint64_t allocated4 = 0;
interval_vector_t a4;
al2.allocate_l2(0x1000, 0x1000, &allocated4, &a4);
ASSERT_EQ(a4.size(), 0u);
}
}
std::cout << "Done L2 Unaligned" << std::endl;
}
TEST(TestAllocatorLevel01, test_l2_contiguous_alignment)
{
{
TestAllocatorLevel02 al2;
uint64_t num_l2_entries = 3;
uint64_t capacity = num_l2_entries * 256 * 512 * 4096; // 3x512 MB
uint64_t num_chunks = capacity / 4096;
al2.init(capacity, 4096);
std::cout << "Init L2 cont aligned" << std::endl;
std::map<size_t, size_t> bins_overall;
al2.collect_stats(bins_overall);
ASSERT_EQ(bins_overall.size(), 1u);
// std::cout<<bins_overall.begin()->first << std::endl;
ASSERT_EQ(bins_overall[cbits(num_chunks) - 1], 1u);
for (uint64_t i = 0; i < capacity / 2; i += _1m) {
uint64_t allocated4 = 0;
interval_vector_t a4;
al2.allocate_l2(_1m, _1m, &allocated4, &a4);
ASSERT_EQ(a4.size(), 1u);
ASSERT_EQ(allocated4, _1m);
ASSERT_EQ(a4[0].offset, i);
ASSERT_EQ(a4[0].length, _1m);
}
ASSERT_EQ(capacity / 2, al2.debug_get_free());
bins_overall.clear();
al2.collect_stats(bins_overall);
ASSERT_EQ(bins_overall.size(), 1u);
ASSERT_EQ(bins_overall[cbits(num_chunks / 2) - 1], 1u);
{
size_t to_release = 2 * _1m + 0x1000;
// release 2M + 4K at the beginning
interval_vector_t r;
r.emplace_back(0, to_release);
al2.free_l2(r);
bins_overall.clear();
al2.collect_stats(bins_overall);
ASSERT_EQ(bins_overall.size(), 2u);
ASSERT_EQ(bins_overall[cbits(to_release / 0x1000) - 1], 1u);
ASSERT_EQ(bins_overall[cbits(num_chunks / 2) - 1], 1u);
}
{
// allocate 4K within the deallocated range
uint64_t allocated4 = 0;
interval_vector_t a4;
al2.allocate_l2(0x1000, 0x1000, &allocated4, &a4);
ASSERT_EQ(a4.size(), 1u);
ASSERT_EQ(allocated4, 0x1000u);
ASSERT_EQ(a4[0].offset, 0u);
ASSERT_EQ(a4[0].length, 0x1000u);
bins_overall.clear();
al2.collect_stats(bins_overall);
ASSERT_EQ(bins_overall.size(), 2u);
ASSERT_EQ(bins_overall[cbits(2 * _1m / 0x1000) - 1], 1u);
ASSERT_EQ(bins_overall[cbits(num_chunks / 2) - 1], 1u);
}
{
// allocate 1M - should go to the second 1M chunk
uint64_t allocated4 = 0;
interval_vector_t a4;
al2.allocate_l2(_1m, _1m, &allocated4, &a4);
ASSERT_EQ(a4.size(), 1u);
ASSERT_EQ(allocated4, _1m);
ASSERT_EQ(a4[0].offset, _1m);
ASSERT_EQ(a4[0].length, _1m);
bins_overall.clear();
al2.collect_stats(bins_overall);
ASSERT_EQ(bins_overall.size(), 3u);
ASSERT_EQ(bins_overall[0], 1u);
ASSERT_EQ(bins_overall[cbits((_1m - 0x1000) / 0x1000) - 1], 1u);
ASSERT_EQ(bins_overall[cbits(num_chunks / 2) - 1], 1u);
}
{
// and allocate yet another 8K within the deallocated range
uint64_t allocated4 = 0;
interval_vector_t a4;
al2.allocate_l2(0x2000, 0x1000, &allocated4, &a4);
ASSERT_EQ(a4.size(), 1u);
ASSERT_EQ(allocated4, 0x2000u);
ASSERT_EQ(a4[0].offset, 0x1000u);
ASSERT_EQ(a4[0].length, 0x2000u);
bins_overall.clear();
al2.collect_stats(bins_overall);
ASSERT_EQ(bins_overall[0], 1u);
ASSERT_EQ(bins_overall[cbits((_1m - 0x3000) / 0x1000) - 1], 1u);
ASSERT_EQ(bins_overall[cbits(num_chunks / 2) - 1], 1u);
}
{
// release just allocated 1M
interval_vector_t r;
r.emplace_back(_1m, _1m);
al2.free_l2(r);
bins_overall.clear();
al2.collect_stats(bins_overall);
ASSERT_EQ(bins_overall.size(), 2u);
ASSERT_EQ(bins_overall[cbits((2 * _1m - 0x3000) / 0x1000) - 1], 1u);
ASSERT_EQ(bins_overall[cbits(num_chunks / 2) - 1], 1u);
}
{
// allocate 3M - should go to the second 1M chunk and @capacity/2
uint64_t allocated4 = 0;
interval_vector_t a4;
al2.allocate_l2(3 * _1m, _1m, &allocated4, &a4);
ASSERT_EQ(a4.size(), 2u);
ASSERT_EQ(allocated4, 3 * _1m);
ASSERT_EQ(a4[0].offset, _1m);
ASSERT_EQ(a4[0].length, _1m);
ASSERT_EQ(a4[1].offset, capacity / 2);
ASSERT_EQ(a4[1].length, 2 * _1m);
bins_overall.clear();
al2.collect_stats(bins_overall);
ASSERT_EQ(bins_overall.size(), 3u);
ASSERT_EQ(bins_overall[0], 1u);
ASSERT_EQ(bins_overall[cbits((_1m - 0x3000) / 0x1000) - 1], 1u);
ASSERT_EQ(bins_overall[cbits((num_chunks - 512) / 2) - 1], 1u);
}
{
// release allocated 1M in the second meg chunk except
// the first 4K chunk
interval_vector_t r;
r.emplace_back(_1m + 0x1000, _1m);
al2.free_l2(r);
bins_overall.clear();
al2.collect_stats(bins_overall);
ASSERT_EQ(bins_overall.size(), 3u);
ASSERT_EQ(bins_overall[cbits(_1m / 0x1000) - 1], 1u);
ASSERT_EQ(bins_overall[cbits((_1m - 0x3000) / 0x1000) - 1], 1u);
ASSERT_EQ(bins_overall[cbits((num_chunks - 512) / 2) - 1], 1u);
}
{
// release 2M @(capacity / 2)
interval_vector_t r;
r.emplace_back(capacity / 2, 2 * _1m);
al2.free_l2(r);
bins_overall.clear();
al2.collect_stats(bins_overall);
ASSERT_EQ(bins_overall.size(), 3u);
ASSERT_EQ(bins_overall[cbits(_1m / 0x1000) - 1], 1u);
ASSERT_EQ(bins_overall[cbits((_1m - 0x3000) / 0x1000) - 1], 1u);
ASSERT_EQ(bins_overall[cbits((num_chunks) / 2) - 1], 1u);
}
{
// allocate 4x512K - should go to the second halves of
// the first and second 1M chunks and @(capacity / 2)
uint64_t allocated4 = 0;
interval_vector_t a4;
al2.allocate_l2(2 * _1m, _1m / 2, &allocated4, &a4);
ASSERT_EQ(a4.size(), 3u);
ASSERT_EQ(allocated4, 2 * _1m);
ASSERT_EQ(a4[0].offset, _1m / 2);
ASSERT_EQ(a4[0].length, _1m / 2);
ASSERT_EQ(a4[1].offset, _1m + _1m / 2);
ASSERT_EQ(a4[1].length, _1m / 2);
ASSERT_EQ(a4[2].offset, capacity / 2);
ASSERT_EQ(a4[2].length, _1m);
bins_overall.clear();
al2.collect_stats(bins_overall);
ASSERT_EQ(bins_overall.size(), 3u);
ASSERT_EQ(bins_overall[0], 1u);
// below we have 512K - 4K & 512K - 12K chunks which both fit into
// the same bin = 6
ASSERT_EQ(bins_overall[6], 2u);
ASSERT_EQ(bins_overall[cbits((num_chunks - 256) / 2) - 1], 1u);
}
{
// cleanup first 2M except except the last 4K chunk
interval_vector_t r;
r.emplace_back(0, 2 * _1m - 0x1000);
al2.free_l2(r);
bins_overall.clear();
al2.collect_stats(bins_overall);
ASSERT_EQ(bins_overall.size(), 3u);
ASSERT_EQ(bins_overall[0], 1u);
ASSERT_EQ(bins_overall[cbits((_2m - 0x1000) / 0x1000) - 1], 1u);
ASSERT_EQ(bins_overall[cbits((num_chunks - 256) / 2) - 1], 1u);
}
{
// release 2M @(capacity / 2)
interval_vector_t r;
r.emplace_back(capacity / 2, 2 * _1m);
al2.free_l2(r);
bins_overall.clear();
al2.collect_stats(bins_overall);
ASSERT_EQ(bins_overall.size(), 3u);
ASSERT_EQ(bins_overall[0], 1u);
ASSERT_EQ(bins_overall[cbits((_2m - 0x1000) / 0x1000) - 1], 1u);
ASSERT_EQ(bins_overall[cbits(num_chunks / 2) - 1], 1u);
}
{
// allocate 132M using 4M granularity should go to (capacity / 2)
uint64_t allocated4 = 0;
interval_vector_t a4;
al2.allocate_l2(132 * _1m, 4 * _1m , &allocated4, &a4);
ASSERT_EQ(a4.size(), 1u);
ASSERT_EQ(a4[0].offset, capacity / 2);
ASSERT_EQ(a4[0].length, 132 * _1m);
bins_overall.clear();
al2.collect_stats(bins_overall);
ASSERT_EQ(bins_overall.size(), 3u);
}
{
// cleanup left 4K chunk in the first 2M
interval_vector_t r;
r.emplace_back(2 * _1m - 0x1000, 0x1000);
al2.free_l2(r);
bins_overall.clear();
al2.collect_stats(bins_overall);
ASSERT_EQ(bins_overall.size(), 2u);
}
{
// release 132M @(capacity / 2)
interval_vector_t r;
r.emplace_back(capacity / 2, 132 * _1m);
al2.free_l2(r);
}
{
// allocate 132M using 2M granularity should go to the first chunk and to
// (capacity / 2)
uint64_t allocated4 = 0;
interval_vector_t a4;
al2.allocate_l2(132 * _1m, 2 * _1m , &allocated4, &a4);
ASSERT_EQ(a4.size(), 2u);
ASSERT_EQ(a4[0].offset, 0u);
ASSERT_EQ(a4[0].length, 2 * _1m);
ASSERT_EQ(a4[1].offset, capacity / 2);
ASSERT_EQ(a4[1].length, 130 * _1m);
}
{
// release 130M @(capacity / 2)
interval_vector_t r;
r.emplace_back(capacity / 2, 132 * _1m);
al2.free_l2(r);
}
{
// release 4K~16K
// release 28K~32K
// release 68K~24K
interval_vector_t r;
r.emplace_back(0x1000, 0x4000);
r.emplace_back(0x7000, 0x8000);
r.emplace_back(0x11000, 0x6000);
al2.free_l2(r);
}
{
// allocate 32K using 16K granularity - should bypass the first
// unaligned extent, use the second free extent partially given
// the 16K alignment and then fallback to capacity / 2
uint64_t allocated4 = 0;
interval_vector_t a4;
al2.allocate_l2(0x8000, 0x4000, &allocated4, &a4);
ASSERT_EQ(a4.size(), 2u);
ASSERT_EQ(a4[0].offset, 0x8000u);
ASSERT_EQ(a4[0].length, 0x4000u);
ASSERT_EQ(a4[1].offset, capacity / 2);
ASSERT_EQ(a4[1].length, 0x4000u);
}
}
std::cout << "Done L2 cont aligned" << std::endl;
}
TEST(TestAllocatorLevel01, test_4G_alloc_bug)
{
{
TestAllocatorLevel02 al2;
uint64_t capacity = 0x8000 * _1m; // = 32GB
al2.init(capacity, 0x10000);
std::cout << "Init L2 cont aligned" << std::endl;
uint64_t allocated4 = 0;
interval_vector_t a4;
al2.allocate_l2(_1m, _1m, &allocated4, &a4);
ASSERT_EQ(a4.size(), 1u); // the bug caused no allocations here
ASSERT_EQ(allocated4, _1m);
ASSERT_EQ(a4[0].offset, 0u);
ASSERT_EQ(a4[0].length, _1m);
}
}
TEST(TestAllocatorLevel01, test_4G_alloc_bug2)
{
{
TestAllocatorLevel02 al2;
uint64_t capacity = 0x8000 * _1m; // = 32GB
al2.init(capacity, 0x10000);
for (uint64_t i = 0; i < capacity; i += _1m) {
uint64_t allocated4 = 0;
interval_vector_t a4;
al2.allocate_l2(_1m, _1m, &allocated4, &a4);
ASSERT_EQ(a4.size(), 1u);
ASSERT_EQ(allocated4, _1m);
ASSERT_EQ(a4[0].offset, i);
ASSERT_EQ(a4[0].length, _1m);
}
ASSERT_EQ(0u , al2.debug_get_free());
interval_vector_t r;
r.emplace_back(0x5fec30000, 0x13d0000);
r.emplace_back(0x628000000, 0x80000000);
r.emplace_back(0x6a8000000, 0x80000000);
r.emplace_back(0x728100000, 0x70000);
al2.free_l2(r);
std::map<size_t, size_t> bins_overall;
al2.collect_stats(bins_overall);
uint64_t allocated4 = 0;
interval_vector_t a4;
al2.allocate_l2(0x3e000000, _1m, &allocated4, &a4);
ASSERT_EQ(a4.size(), 2u);
ASSERT_EQ(allocated4, 0x3e000000u);
ASSERT_EQ(a4[0].offset, 0x5fed00000u);
ASSERT_EQ(a4[0].length, 0x1300000u);
ASSERT_EQ(a4[1].offset, 0x628000000u);
ASSERT_EQ(a4[1].length, 0x3cd00000u);
}
}
TEST(TestAllocatorLevel01, test_4G_alloc_bug3)
{
{
TestAllocatorLevel02 al2;
uint64_t capacity = 0x8000 * _1m; // = 32GB
al2.init(capacity, 0x10000);
std::cout << "Init L2 cont aligned" << std::endl;
uint64_t allocated4 = 0;
interval_vector_t a4;
al2.allocate_l2(4096ull * _1m, _1m, &allocated4, &a4);
ASSERT_EQ(a4.size(), 2u); // allocator has to split into 2 allocations
ASSERT_EQ(allocated4, 4096ull * _1m);
ASSERT_EQ(a4[0].offset, 0u);
ASSERT_EQ(a4[0].length, 2048ull * _1m);
ASSERT_EQ(a4[1].offset, 2048ull * _1m);
ASSERT_EQ(a4[1].length, 2048ull * _1m);
}
}
TEST(TestAllocatorLevel01, test_claim_free_l2)
{
TestAllocatorLevel02 al2;
uint64_t num_l2_entries = 64;// *512;
uint64_t capacity = num_l2_entries * 256 * 512 * 4096;
al2.init(capacity, 0x1000);
std::cout << "Init L2" << std::endl;
uint64_t max_available = 0x20000;
al2.mark_allocated(max_available, capacity - max_available);
uint64_t allocated1 = 0;
interval_vector_t a1;
al2.allocate_l2(0x2000, 0x2000, &allocated1, &a1);
ASSERT_EQ(allocated1, 0x2000u);
ASSERT_EQ(a1[0].offset, 0u);
ASSERT_EQ(a1[0].length, 0x2000u);
uint64_t allocated2 = 0;
interval_vector_t a2;
al2.allocate_l2(0x2000, 0x2000, &allocated2, &a2);
ASSERT_EQ(allocated2, 0x2000u);
ASSERT_EQ(a2[0].offset, 0x2000u);
ASSERT_EQ(a2[0].length, 0x2000u);
uint64_t allocated3 = 0;
interval_vector_t a3;
al2.allocate_l2(0x3000, 0x3000, &allocated3, &a3);
ASSERT_EQ(allocated3, 0x3000u);
ASSERT_EQ(a3[0].offset, 0x4000u);
ASSERT_EQ(a3[0].length, 0x3000u);
al2.free_l2(a1);
al2.free_l2(a3);
ASSERT_EQ(max_available - 0x2000, al2.debug_get_free());
auto claimed = al2.claim_free_to_right(0x4000);
ASSERT_EQ(max_available - 0x4000u, claimed);
ASSERT_EQ(0x2000, al2.debug_get_free());
claimed = al2.claim_free_to_right(0x4000);
ASSERT_EQ(0, claimed);
ASSERT_EQ(0x2000, al2.debug_get_free());
claimed = al2.claim_free_to_left(0x2000);
ASSERT_EQ(0x2000u, claimed);
ASSERT_EQ(0, al2.debug_get_free());
claimed = al2.claim_free_to_left(0x2000);
ASSERT_EQ(0, claimed);
ASSERT_EQ(0, al2.debug_get_free());
al2.mark_free(0x3000, 0x4000);
ASSERT_EQ(0x4000, al2.debug_get_free());
claimed = al2.claim_free_to_right(0x7000);
ASSERT_EQ(0, claimed);
ASSERT_EQ(0x4000, al2.debug_get_free());
claimed = al2.claim_free_to_right(0x6000);
ASSERT_EQ(0x1000, claimed);
ASSERT_EQ(0x3000, al2.debug_get_free());
claimed = al2.claim_free_to_right(0x6000);
ASSERT_EQ(0, claimed);
ASSERT_EQ(0x3000, al2.debug_get_free());
claimed = al2.claim_free_to_left(0x3000);
ASSERT_EQ(0u, claimed);
ASSERT_EQ(0x3000, al2.debug_get_free());
claimed = al2.claim_free_to_left(0x4000);
ASSERT_EQ(0x1000, claimed);
ASSERT_EQ(0x2000, al2.debug_get_free());
// claiming on the right boundary
claimed = al2.claim_free_to_right(capacity);
ASSERT_EQ(0x0, claimed);
ASSERT_EQ(0x2000, al2.debug_get_free());
// extend allocator space up to 64M
auto max_available2 = 64 * 1024 * 1024;
al2.mark_free(max_available, max_available2 - max_available);
ASSERT_EQ(max_available2 - max_available + 0x2000, al2.debug_get_free());
// pin some allocations
al2.mark_allocated(0x400000 + 0x2000, 1000);
al2.mark_allocated(0x400000 + 0x5000, 1000);
al2.mark_allocated(0x400000 + 0x20000, 1000);
ASSERT_EQ(max_available2 - max_available - 0x1000, al2.debug_get_free());
claimed = al2.claim_free_to_left(0x403000);
ASSERT_EQ(0x0, claimed);
claimed = al2.claim_free_to_left(0x404000);
ASSERT_EQ(0x1000, claimed);
ASSERT_EQ(max_available2 - max_available - 0x2000, al2.debug_get_free());
claimed = al2.claim_free_to_left(max_available);
ASSERT_EQ(0, claimed);
claimed = al2.claim_free_to_left(0x400000);
ASSERT_EQ(0x3e0000, claimed);
ASSERT_EQ(max_available2 - max_available - 0x3e2000, al2.get_available());
ASSERT_EQ(max_available2 - max_available - 0x3e2000, al2.debug_get_free());
claimed = al2.claim_free_to_right(0x407000);
ASSERT_EQ(0x19000, claimed);
ASSERT_EQ(max_available2 - max_available - 0x3e2000 - 0x19000,
al2.get_available());
ASSERT_EQ(max_available2 - max_available - 0x3e2000 - 0x19000,
al2.debug_get_free());
claimed = al2.claim_free_to_right(0x407000);
ASSERT_EQ(0, claimed);
claimed = al2.claim_free_to_right(0x430000);
ASSERT_EQ(max_available2 - 0x430000, claimed);
ASSERT_EQ(0x15000,
al2.get_available());
ASSERT_EQ(0x15000,
al2.debug_get_free());
}
| 33,713 | 30.597001 | 82 |
cc
|
null |
ceph-main/src/test/objectstore/hybrid_allocator_test.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include <iostream>
#include <gtest/gtest.h>
#include "os/bluestore/HybridAllocator.h"
class TestHybridAllocator : public HybridAllocator {
public:
TestHybridAllocator(CephContext* cct,
int64_t device_size,
int64_t _block_size,
uint64_t max_entries,
const std::string& name) :
HybridAllocator(cct, device_size, _block_size,
max_entries,
name) {
}
uint64_t get_bmap_free() {
return get_bmap() ? get_bmap()->get_free() : 0;
}
uint64_t get_avl_free() {
return AvlAllocator::get_free();
}
};
const uint64_t _1m = 1024 * 1024;
const uint64_t _4m = 4 * 1024 * 1024;
TEST(HybridAllocator, basic)
{
{
uint64_t block_size = 0x1000;
uint64_t capacity = 0x10000 * _1m; // = 64GB
TestHybridAllocator ha(g_ceph_context, capacity, block_size,
4 * sizeof(range_seg_t), "test_hybrid_allocator");
ASSERT_EQ(0, ha.get_free());
ASSERT_EQ(0, ha.get_avl_free());
ASSERT_EQ(0, ha.get_bmap_free());
ha.init_add_free(0, _4m);
ASSERT_EQ(_4m, ha.get_free());
ASSERT_EQ(_4m, ha.get_avl_free());
ASSERT_EQ(0, ha.get_bmap_free());
ha.init_add_free(2 * _4m, _4m);
ASSERT_EQ(_4m * 2, ha.get_free());
ASSERT_EQ(_4m * 2, ha.get_avl_free());
ASSERT_EQ(0, ha.get_bmap_free());
ha.init_add_free(100 * _4m, _4m);
ha.init_add_free(102 * _4m, _4m);
ASSERT_EQ(_4m * 4, ha.get_free());
ASSERT_EQ(_4m * 4, ha.get_avl_free());
ASSERT_EQ(0, ha.get_bmap_free());
// next allocs will go to bitmap
ha.init_add_free(4 * _4m, _4m);
ASSERT_EQ(_4m * 5, ha.get_free());
ASSERT_EQ(_4m * 4, ha.get_avl_free());
ASSERT_EQ(_4m * 1, ha.get_bmap_free());
ha.init_add_free(6 * _4m, _4m);
ASSERT_EQ(_4m * 6, ha.get_free());
ASSERT_EQ(_4m * 4, ha.get_avl_free());
ASSERT_EQ(_4m * 2, ha.get_bmap_free());
// so we have 6x4M chunks, 4 chunks at AVL and 2 at bitmap
ha.init_rm_free(_1m, _1m); // take 1M from AVL
ASSERT_EQ(_1m * 23, ha.get_free());
ASSERT_EQ(_1m * 14, ha.get_avl_free());
ASSERT_EQ(_1m * 9, ha.get_bmap_free());
ha.init_rm_free(6 * _4m + _1m, _1m); // take 1M from bmap
ASSERT_EQ(_1m * 22, ha.get_free());
ASSERT_EQ(_1m * 14, ha.get_avl_free());
ASSERT_EQ(_1m * 8, ha.get_bmap_free());
// so we have at avl: 2M~2M, 8M~4M, 400M~4M , 408M~4M
// and at bmap: 0~1M, 16M~1M, 18M~2M, 24~4M
PExtentVector extents;
// allocate 4K, to be served from bitmap
EXPECT_EQ(block_size, ha.allocate(block_size, block_size,
0, (int64_t)0, &extents));
ASSERT_EQ(1, extents.size());
ASSERT_EQ(0, extents[0].offset);
ASSERT_EQ(_1m * 14, ha.get_avl_free());
ASSERT_EQ(_1m * 8 - block_size, ha.get_bmap_free());
interval_set<uint64_t> release_set;
// release 4K, to be returned to bitmap
release_set.insert(extents[0].offset, extents[0].length);
ha.release(release_set);
ASSERT_EQ(_1m * 14, ha.get_avl_free());
ASSERT_EQ(_1m * 8, ha.get_bmap_free());
extents.clear();
release_set.clear();
// again we have at avl: 2M~2M, 8M~4M, 400M~4M , 408M~4M
// and at bmap: 0~1M, 16M~1M, 18M~2M, 24~4M
// add 12M~3M which will go to avl
ha.init_add_free(3 * _4m, 3 * _1m);
ASSERT_EQ(_1m * 17, ha.get_avl_free());
ASSERT_EQ(_1m * 8, ha.get_bmap_free());
// add 15M~4K which will be appended to existing slot
ha.init_add_free(15 * _1m, 0x1000);
ASSERT_EQ(_1m * 17 + 0x1000, ha.get_avl_free());
ASSERT_EQ(_1m * 8, ha.get_bmap_free());
// again we have at avl: 2M~2M, 8M~(7M+4K), 400M~4M , 408M~4M
// and at bmap: 0~1M, 16M~1M, 18M~2M, 24~4M
//some removals from bmap
ha.init_rm_free(28 * _1m - 0x1000, 0x1000);
ASSERT_EQ(_1m * 17 + 0x1000, ha.get_avl_free());
ASSERT_EQ(_1m * 8 - 0x1000, ha.get_bmap_free());
ha.init_rm_free(24 * _1m + 0x1000, 0x1000);
ASSERT_EQ(_1m * 17 + 0x1000, ha.get_avl_free());
ASSERT_EQ(_1m * 8 - 0x2000, ha.get_bmap_free());
ha.init_rm_free(24 * _1m + 0x1000, _4m - 0x2000);
ASSERT_EQ(_1m * 17 + 0x1000, ha.get_avl_free());
ASSERT_EQ(_1m * 4, ha.get_bmap_free());
//4K removal from avl
ha.init_rm_free(15 * _1m, 0x1000);
ASSERT_EQ(_1m * 17, ha.get_avl_free());
ASSERT_EQ(_1m * 4, ha.get_bmap_free());
//remove highest 4Ms from avl
ha.init_rm_free(_1m * 400, _4m);
ha.init_rm_free(_1m * 408, _4m);
ASSERT_EQ(_1m * 9, ha.get_avl_free());
ASSERT_EQ(_1m * 4, ha.get_bmap_free());
// we have at avl: 2M~2M, 8M~7M
// and at bmap: 0~1M, 16M~1M, 18M~2M
// this will be merged with neighbors from bmap and go to avl
ha.init_add_free(17 * _1m, _1m);
ASSERT_EQ(_1m * 1, ha.get_bmap_free());
ASSERT_EQ(_1m * 13, ha.get_avl_free());
// we have at avl: 2M~2M, 8M~7M, 16M~4M
// and at bmap: 0~1M
// and now do some cutoffs from 0~1M span
//cut off 4K from bmap
ha.init_rm_free(0 * _1m, 0x1000);
ASSERT_EQ(_1m * 13, ha.get_avl_free());
ASSERT_EQ(_1m * 1 - 0x1000, ha.get_bmap_free());
//cut off 1M-4K from bmap
ha.init_rm_free(0 * _1m + 0x1000, _1m - 0x1000);
ASSERT_EQ(_1m * 13, ha.get_avl_free());
ASSERT_EQ(0, ha.get_bmap_free());
//cut off 512K avl
ha.init_rm_free(17 * _1m + 0x1000, _1m / 2);
ASSERT_EQ(_1m * 13 - _1m / 2, ha.get_avl_free());
ASSERT_EQ(0, ha.get_bmap_free());
//cut off the rest from avl
ha.init_rm_free(17 * _1m + 0x1000 + _1m / 2, _1m / 2);
ASSERT_EQ(_1m * 12, ha.get_avl_free());
ASSERT_EQ(0, ha.get_bmap_free());
}
{
uint64_t block_size = 0x1000;
uint64_t capacity = 0x10000 * _1m; // = 64GB
TestHybridAllocator ha(g_ceph_context, capacity, block_size,
4 * sizeof(range_seg_t), "test_hybrid_allocator");
ha.init_add_free(_1m, _1m);
ha.init_add_free(_1m * 3, _1m);
ha.init_add_free(_1m * 5, _1m);
ha.init_add_free(0x4000, 0x1000);
ASSERT_EQ(_1m * 3 + 0x1000, ha.get_free());
ASSERT_EQ(_1m * 3 + 0x1000, ha.get_avl_free());
ASSERT_EQ(0, ha.get_bmap_free());
// This will substitute chunk 0x4000~1000.
// Since new chunk insertion into into AvlAllocator:range_tree
// happens immediately before 0x4000~1000 chunk care should be taken
// to order operations properly and do not use already disposed iterator.
ha.init_add_free(0, 0x2000);
ASSERT_EQ(_1m * 3 + 0x3000, ha.get_free());
ASSERT_EQ(_1m * 3 + 0x2000, ha.get_avl_free());
ASSERT_EQ(0x1000, ha.get_bmap_free());
}
}
TEST(HybridAllocator, fragmentation)
{
{
uint64_t block_size = 0x1000;
uint64_t capacity = 0x1000 * 0x1000; // = 16M
TestHybridAllocator ha(g_ceph_context, capacity, block_size,
4 * sizeof(range_seg_t), "test_hybrid_allocator");
ha.init_add_free(0, 0x2000);
ha.init_add_free(0x4000, 0x2000);
ha.init_add_free(0x8000, 0x2000);
ha.init_add_free(0xc000, 0x1000);
ASSERT_EQ(0.5, ha.get_fragmentation());
// this will got to bmap with fragmentation = 1
ha.init_add_free(0x10000, 0x1000);
// which results in the following total fragmentation
ASSERT_EQ(0.5 * 7 / 8 + 1.0 / 8, ha.get_fragmentation());
}
}
| 7,271 | 30.344828 | 77 |
cc
|
null |
ceph-main/src/test/objectstore/run_seed_to.sh
|
#!/usr/bin/env bash
# vim: ts=8 sw=2 smarttab
#
# run_seed_to.sh - Run ceph_test_filestore_idempotent_sequence up until an
# injection point, generating a sequence of operations based on a
# provided seed.
#
# We also perform three additional tests, focused on assessing if
# replaying a larger chunck of the journal affects the expected store
# behavior. These tests will be performed by increasing the store's
# journal sync interval to a very large value, allowing the store to
# finish execution before the first sync (unless the store runs for
# over 10 hours, case on which the interval variables must be changed
# to an appropriate value). Unless the '--no-journal-test' option is
# specified, we will run the 3 following scenarios:
#
# 1) journal sync'ing for both stores is good as disabled
# (we call it '00', for store naming purposes)
# 2) journal sync'ing for store A is as good as disabled
# (we call it '01', for store naming purposes)
# 3) journal sync'ing for store B is as good as disabled
# (we call it '10', for store naming purposes)
#
# All log files are also appropriately named accordingly (i.e., a.00.fail,
# a.10.recover, or b.01.clean).
#
# By default, the test will not exit on error, although it will show the
# fail message. This behavior is so defined so we run the whole battery of
# tests, and obtain as many mismatches as possible in one go. We may force
# the test to exit on error by specifying the '--exit-on-error' option.
#
#
set -e
test_opts=""
usage() {
echo "usage: $1 [options..] <seed> <kill-at>"
echo
echo "options:"
echo " -c, --colls <VAL> # of collections"
echo " -o, --objs <VAL> # of objects"
echo " -b, --btrfs <VAL> seq number for btrfs stores"
echo " --no-journal-test don't perform journal replay tests"
echo " -e, --exit-on-error exit with 1 on error"
echo " -v, --valgrind run commands through valgrind"
echo
echo "env vars:"
echo " OPTS_STORE additional opts for both stores"
echo " OPTS_STORE_A additional opts for store A"
echo " OPTS_STORE_B additional opts for store B"
echo
}
echo $0 $*
die_on_missing_arg() {
if [[ "$2" == "" ]]; then
echo "$1: missing required parameter"
exit 1
fi
}
required_args=2
obtained_args=0
seed=""
killat=""
on_btrfs=0
on_btrfs_seq=0
journal_test=1
min_sync_interval="36000" # ten hours, yes.
max_sync_interval="36001"
exit_on_error=0
v=""
do_rm() {
if [[ $on_btrfs -eq 0 ]]; then
rm -fr $*
fi
}
set_arg() {
if [[ $1 -eq 1 ]]; then
seed=$2
elif [[ $1 -eq 2 ]]; then
killat=$2
else
echo "error: unknown purpose for '$2'"
usage $0
exit 1
fi
}
while [[ $# -gt 0 ]];
do
case "$1" in
-c | --colls)
die_on_missing_arg "$1" "$2"
test_opts="$test_opts --test-num-colls $2"
shift 2
;;
-o | --objs)
die_on_missing_arg "$1" "$2"
test_opts="$test_opts --test-num-objs $2"
shift 2
;;
-h | --help)
usage $0 ;
exit 0
;;
-b | --btrfs)
die_on_missing_arg "$1" "$2"
on_btrfs=1
on_btrfs_seq=$2
shift 2
;;
--no-journal-test)
journal_test=0
shift
;;
-e | --exit-on-error)
exit_on_error=1
shift
;;
-v | --valgrind)
v="valgrind --leak-check=full"
shift
;;
--)
shift
break
;;
-*)
echo "$1: unknown option" >&2
usage $0
exit 1
;;
*)
obtained_args=$(($obtained_args+1))
set_arg $obtained_args $1
shift
;;
esac
done
if [[ $obtained_args -ne $required_args ]]; then
echo "error: missing argument"
usage $0 ;
exit 1
fi
if [[ "$OPTS_STORE" != "" ]]; then
test_opts="$test_opts $OPTS_STORE"
fi
test_opts_a="$test_opts"
test_opts_b="$test_opts"
if [[ "$OPTS_STORE_A" != "" ]]; then
test_opts_a="$test_opts_a $OPTS_STORE_A"
fi
if [[ "$OPTS_STORE_B" != "" ]]; then
test_opts_b="$test_opts_b $OPTS_STORE_B"
fi
echo seed $seed
echo kill at $killat
# run forever, until $killat...
to=1000000000
#
# store names
#
# We need these for two reasons:
# 1) if we are running the tests on a btrfs volume, then we need to use
# a seq number for each run. Being on btrfs means we will fail when
# removing the store's directories and it's far more simple to just
# specify differente store names such as 'a.$seq' or 'b.$seq'.
#
# 2) unless the '--no-journal-test' option is specified, we will run
# three additional tests for each store, and we will reuse the same
# command for each one of the runs, but varying the store's name and
# arguments.
#
store_a="a"
store_b="b"
if [[ $on_btrfs -eq 1 ]]; then
store_a="$store_a.$on_btrfs_seq"
store_b="$store_b.$on_btrfs_seq"
fi
total_runs=1
if [[ $journal_test -eq 1 ]]; then
total_runs=$(($total_runs + 3))
fi
num_runs=0
opt_min_sync="--filestore-min-sync-interval $min_sync_interval"
opt_max_sync="--filestore-max-sync-interval $max_sync_interval"
ret=0
while [[ $num_runs -lt $total_runs ]];
do
tmp_name_a=$store_a
tmp_name_b=$store_b
tmp_opts_a=$test_opts_a
tmp_opts_b=$test_opts_b
#
# We have already tested whether there are diffs when both journals
# are properly working. Now let's try on three other scenarios:
# 1) journal sync'ing for both stores is good as disabled
# (we call it '00')
# 2) journal sync'ing for store A is as good as disabled
# (we call it '01')
# 3) journal sync'ing for store B is as good as disabled
# (we call it '10')
#
if [[ $num_runs -gt 0 && $journal_test -eq 1 ]]; then
echo "run #$num_runs"
case $num_runs in
1)
tmp_name_a="$tmp_name_a.00"
tmp_name_b="$tmp_name_b.00"
tmp_opts_a="$tmp_opts_a $opt_min_sync $opt_max_sync"
tmp_opts_b="$tmp_opts_b $opt_min_sync $opt_max_sync"
;;
2)
tmp_name_a="$tmp_name_a.01"
tmp_name_b="$tmp_name_b.01"
tmp_opts_a="$tmp_opts_a $opt_min_sync $opt_max_sync"
;;
3)
tmp_name_a="$tmp_name_a.10"
tmp_name_b="$tmp_name_b.10"
tmp_opts_b="$tmp_opts_b $opt_min_sync $opt_max_sync"
;;
esac
fi
do_rm $tmp_name_a $tmp_name_a.fail $tmp_name_a.recover
$v ceph_test_filestore_idempotent_sequence run-sequence-to $to \
$tmp_name_a $tmp_name_a/journal \
--test-seed $seed --osd-journal-size 100 \
--filestore-kill-at $killat $tmp_opts_a \
--log-file $tmp_name_a.fail --debug-filestore 20 --no-log-to-stderr || true
stop_at=`ceph_test_filestore_idempotent_sequence get-last-op \
$tmp_name_a $tmp_name_a/journal \
--log-file $tmp_name_a.recover \
--debug-filestore 20 --debug-journal 20 --no-log-to-stderr`
if [[ "`expr $stop_at - $stop_at 2>/dev/null`" != "0" ]]; then
echo "error: get-last-op returned '$stop_at'"
exit 1
fi
echo stopped at $stop_at
do_rm $tmp_name_b $tmp_name_b.clean
$v ceph_test_filestore_idempotent_sequence run-sequence-to \
$stop_at $tmp_name_b $tmp_name_b/journal \
--test-seed $seed --osd-journal-size 100 \
--log-file $tmp_name_b.clean --debug-filestore 20 --no-log-to-stderr \
$tmp_opts_b
if $v ceph_test_filestore_idempotent_sequence diff \
$tmp_name_a $tmp_name_a/journal $tmp_name_b $tmp_name_b/journal --no-log-to-stderr --log-file $tmp_name_a.diff.log --debug-filestore 20 ; then
echo OK
else
echo "FAIL"
echo " see:"
echo " $tmp_name_a.fail -- leading up to failure"
echo " $tmp_name_a.recover -- journal replay"
echo " $tmp_name_b.clean -- the clean reference"
ret=1
if [[ $exit_on_error -eq 1 ]]; then
exit 1
fi
fi
num_runs=$(($num_runs+1))
done
exit $ret
| 7,659 | 25.054422 | 146 |
sh
|
null |
ceph-main/src/test/objectstore/run_seed_to_range.sh
|
#!/bin/sh
set -x
set -e
seed=$1
from=$2
to=$3
dir=$4
mydir=`dirname $0`
for f in `seq $from $to`
do
if ! $mydir/run_seed_to.sh -o 10 -e $seed $f; then
if [ -d "$dir" ]; then
echo copying evidence to $dir
cp -a . $dir
else
echo no dir provided for evidence disposal
fi
exit 1
fi
done
| 314 | 11.6 | 54 |
sh
|
null |
ceph-main/src/test/objectstore/run_smr_bluestore_test.sh
|
#!/bin/bash -ex
# 1) run_smr_bluestore_test.sh
# Setup smr device, run all tests
# 2) run_smr_bluestore_test.sh --smr
# Setup smr device but skip tests failing on smr
before_creation=$(mktemp)
lsscsi > $before_creation
echo "cd /backstores/user:zbc
create name=zbc0 size=20G cfgstring=model-HM/zsize-256/[email protected]
/loopback create
cd /loopback
create naa.50014055e5f25aa0
cd naa.50014055e5f25aa0/luns
create /backstores/user:zbc/zbc0 0
" | sudo targetcli
sleep 1 #if too fast device does not show up
after_creation=$(mktemp)
lsscsi > $after_creation
if [[ $(diff $before_creation $after_creation | wc -l ) != 2 ]]
then
echo New zbc device not created
false
fi
function cleanup() {
echo "cd /loopback
delete naa.50014055e5f25aa0
cd /backstores/user:zbc
delete zbc0" | sudo targetcli
sudo rm -f zbc0.raw
rm -f $before_creation $after_creation
}
trap cleanup EXIT
DEV=$(diff $before_creation $after_creation |grep zbc |sed "s@.* /@/@")
sudo chmod 666 $DEV
# Need sudo
# https://patchwork.kernel.org/project/linux-block/patch/[email protected]/
sudo ceph_test_objectstore \
--bluestore-block-path $DEV \
--gtest_filter=*/2 \
$*
| 1,198 | 23.469388 | 102 |
sh
|
null |
ceph-main/src/test/objectstore/run_test_deferred.sh
|
#!/bin/bash
if [[ ! (-x ./bin/unittest_deferred) || ! (-x ./bin/ceph-kvstore-tool) || ! (-x ./bin/ceph-bluestore-tool)]]
then
echo Test must be run from ceph build directory
echo with unittest_deferred, ceph-kvstore-tool and ceph-bluestore-tool compiled
exit 1
fi
# Create BlueStore, only main block device, 4K AU, forced deferred 4K, 64K AU for BlueFS
# Create file zapchajdziura, that is 0xe000 in size.
# This adds to 0x0000 - 0x1000 of BlueStore superblock and 0x1000 - 0x2000 of BlueFS superblock,
# making 0x00000 - 0x10000 filled, nicely aligning for 64K BlueFS requirements
# Prefill 10 objects Object-0 .. Object-9, each 64K. Sync to disk.
# Do transactions like:
# - fill Object-x+1 16 times at offsets 0x0000, 0x1000, ... 0xf000 with 8bytes, trigerring deferred writes
# - fill Object-x with 64K data
# Repeat for Object-0 to Object-8.
# Right after getting notification on_complete for all 9 transactions, immediately exit(1).
./bin/unittest_deferred --log-to-stderr=false
# Now we should have a considerable amount of pending deferred writes.
# They do refer disk regions that do not belong to any object.
# Perform compaction on RocksDB
# This initializes BlueFS, but does not replay deferred writes.
# It jiggles RocksDB files around. CURRENT and MANIFEST are recreated, with some .sst files too.
# The hope here is that newly created RocksDB files will occupy space that is free,
# but targetted by pending deferred writes.
./bin/ceph-kvstore-tool bluestore-kv bluestore.test_temp_dir/ compact --log-to-stderr=false
# It this step we (hopefully) get RocksDB files overwritten
# We initialize BlueFS and RocksDB, there should be no problem here.
# Then we apply deferred writes. Now some of RocksDB files might get corrupted.
# It is very likely that this will not cause any problems, since CURRENT and MANIFEST are only read at bootup.
./bin/ceph-bluestore-tool --path bluestore.test_temp_dir/ --command fsck --deep 1 --debug-bluestore=30/30 --debug-bdev=30/30 --log-file=log-bs-corrupts.txt --log-to-file --log-to-stderr=false
# If we were lucky, this command now fails
./bin/ceph-bluestore-tool --path bluestore.test_temp_dir/ --command fsck --deep 1 --debug-bluestore=30/30 --debug-bdev=30/30 --log-file=log-bs-crash.txt --log-to-file --log-to-stderr=false
if [[ $? != 0 ]]
then
echo "Deferred writes corruption successfully created !"
else
echo "No deferred write problems detected."
fi
#cleanup
rm -rf bluestore.test_temp_dir/
| 2,479 | 45.792453 | 191 |
sh
|
null |
ceph-main/src/test/objectstore/store_test.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include <glob.h>
#include <stdio.h>
#include <string.h>
#include <iostream>
#include <memory>
#include <time.h>
#include <sys/mount.h>
#include <boost/random/mersenne_twister.hpp>
#include <boost/random/uniform_int.hpp>
#include <boost/random/binomial_distribution.hpp>
#include <fmt/format.h>
#include <gtest/gtest.h>
#include "os/ObjectStore.h"
#if defined(WITH_BLUESTORE)
#include "os/bluestore/BlueStore.h"
#include "os/bluestore/BlueFS.h"
#endif
#include "include/Context.h"
#include "common/buffer_instrumentation.h"
#include "common/ceph_argparse.h"
#include "common/admin_socket.h"
#include "global/global_init.h"
#include "common/ceph_mutex.h"
#include "common/Cond.h"
#include "common/errno.h"
#include "common/options.h" // for the size literals
#include "common/pretty_binary.h"
#include "include/stringify.h"
#include "include/coredumpctl.h"
#include "include/unordered_map.h"
#include "os/kv.h"
#include "store_test_fixture.h"
using namespace std;
using namespace std::placeholders;
typedef boost::mt11213b gen_type;
const uint64_t DEF_STORE_TEST_BLOCKDEV_SIZE = 10240000000;
#define dout_context g_ceph_context
bool smr = false;
static bool bl_eq(bufferlist& expected, bufferlist& actual)
{
if (expected.contents_equal(actual))
return true;
unsigned first = 0;
if(expected.length() != actual.length()) {
cout << "--- buffer lengths mismatch " << std::hex
<< "expected 0x" << expected.length() << " != actual 0x"
<< actual.length() << std::dec << std::endl;
derr << "--- buffer lengths mismatch " << std::hex
<< "expected 0x" << expected.length() << " != actual 0x"
<< actual.length() << std::dec << dendl;
}
auto len = std::min(expected.length(), actual.length());
while ( first<len && expected[first] == actual[first])
++first;
unsigned last = len;
while (last > 0 && expected[last-1] == actual[last-1])
--last;
if(len > 0) {
cout << "--- buffer mismatch between offset 0x" << std::hex << first
<< " and 0x" << last << ", total 0x" << len << std::dec
<< std::endl;
derr << "--- buffer mismatch between offset 0x" << std::hex << first
<< " and 0x" << last << ", total 0x" << len << std::dec
<< dendl;
cout << "--- expected:\n";
expected.hexdump(cout);
cout << "--- actual:\n";
actual.hexdump(cout);
}
return false;
}
template <typename T>
int queue_transaction(
T &store,
ObjectStore::CollectionHandle ch,
ObjectStore::Transaction &&t) {
if (rand() % 2) {
ObjectStore::Transaction t2;
t2.append(t);
return store->queue_transaction(ch, std::move(t2));
} else {
return store->queue_transaction(ch, std::move(t));
}
}
template <typename T>
int collection_list(T &store, ObjectStore::CollectionHandle &c,
const ghobject_t& start, const ghobject_t& end, int max,
vector<ghobject_t> *ls, ghobject_t *pnext,
bool disable_legacy = false) {
if (disable_legacy || rand() % 2) {
return store->collection_list(c, start, end, max, ls, pnext);
} else {
return store->collection_list_legacy(c, start, end, max, ls, pnext);
}
}
bool sorted(const vector<ghobject_t> &in) {
ghobject_t start;
for (vector<ghobject_t>::const_iterator i = in.begin();
i != in.end();
++i) {
if (start > *i) {
cout << start << " should follow " << *i << std::endl;
return false;
}
start = *i;
}
return true;
}
class StoreTest : public StoreTestFixture,
public ::testing::WithParamInterface<const char*> {
public:
StoreTest()
: StoreTestFixture(GetParam())
{}
void doCompressionTest();
void doSyntheticTest(
int num_ops,
uint64_t max_obj, uint64_t max_wr, uint64_t align);
};
class StoreTestDeferredSetup : public StoreTest {
void SetUp() override {
//do nothing
}
protected:
void DeferredSetup() {
StoreTest::SetUp();
}
public:
};
class StoreTestSpecificAUSize : public StoreTestDeferredSetup {
public:
typedef
std::function<void(
uint64_t num_ops,
uint64_t max_obj,
uint64_t max_wr,
uint64_t align)> MatrixTest;
void StartDeferred(size_t min_alloc_size) {
SetVal(g_conf(), "bluestore_min_alloc_size", stringify(min_alloc_size).c_str());
DeferredSetup();
}
private:
// bluestore matrix testing
uint64_t max_write = 40 * 1024;
uint64_t max_size = 400 * 1024;
uint64_t alignment = 0;
uint64_t num_ops = 10000;
protected:
string matrix_get(const char *k) {
if (string(k) == "max_write") {
return stringify(max_write);
} else if (string(k) == "max_size") {
return stringify(max_size);
} else if (string(k) == "alignment") {
return stringify(alignment);
} else if (string(k) == "num_ops") {
return stringify(num_ops);
} else {
char *buf;
g_conf().get_val(k, &buf, -1);
string v = buf;
free(buf);
return v;
}
}
void matrix_set(const char *k, const char *v) {
if (string(k) == "max_write") {
max_write = atoll(v);
} else if (string(k) == "max_size") {
max_size = atoll(v);
} else if (string(k) == "alignment") {
alignment = atoll(v);
} else if (string(k) == "num_ops") {
num_ops = atoll(v);
} else {
SetVal(g_conf(), k, v);
}
}
void do_matrix_choose(const char *matrix[][10],
int i, int pos, int num,
MatrixTest fn) {
if (matrix[i][0]) {
int count;
for (count = 0; matrix[i][count+1]; ++count) ;
for (int j = 1; matrix[i][j]; ++j) {
matrix_set(matrix[i][0], matrix[i][j]);
do_matrix_choose(matrix,
i + 1,
pos * count + j - 1,
num * count,
fn);
}
} else {
cout << "---------------------- " << (pos + 1) << " / " << num
<< " ----------------------" << std::endl;
for (unsigned k=0; matrix[k][0]; ++k) {
cout << " " << matrix[k][0] << " = " << matrix_get(matrix[k][0])
<< std::endl;
}
g_ceph_context->_conf.apply_changes(nullptr);
fn(num_ops, max_size, max_write, alignment);
}
}
void do_matrix(const char *matrix[][10],
MatrixTest fn) {
if (strcmp(matrix[0][0], "bluestore_min_alloc_size") == 0) {
int count;
for (count = 0; matrix[0][count+1]; ++count) ;
for (size_t j = 1; matrix[0][j]; ++j) {
if (j > 1) {
TearDown();
}
StartDeferred(strtoll(matrix[0][j], NULL, 10));
do_matrix_choose(matrix, 1, j - 1, count, fn);
}
} else {
StartDeferred(0);
do_matrix_choose(matrix, 0, 0, 1, fn);
}
}
};
class StoreTestOmapUpgrade : public StoreTestDeferredSetup {
protected:
void StartDeferred() {
DeferredSetup();
}
public:
struct generator {
double r = 3.6;
double x = 0.5;
double operator()(){
double v = x;
x = r * x * (1 - x);
return v;
}
};
std::string generate_monotonic_name(uint32_t SUM, uint32_t i, double r, double x)
{
generator gen{r, x};
//std::cout << "r=" << r << " x=" << x << std::endl;
std::string s;
while (SUM > 1) {
uint32_t lo = 0;
uint32_t hi = 1 + gen() * 10;
uint32_t start = ('z' - 'a' + 1 - hi) * gen();
while (hi - lo > 0) {
uint32_t mid = (lo + hi + 1 + (SUM&1)) / 2; // round up or down, depending on SUM
// std::cout << "SUM=" << SUM << " x=" << gen.x << std::endl;
uint32_t mid_val = gen() * (SUM - 1) + 1;
// LEFT = lo .. mid - 1
// RIGHT = mid .. hi
// std::cout << "lo=" << lo << " hi=" << hi << " mid=" << mid
// << " SUM=" << SUM << " i=" << i << " x=" << gen.x << " mid_val=" << mid_val << std::endl;
if (i < mid_val) {
hi = mid - 1;
SUM = mid_val;
} else {
lo = mid;
SUM = SUM - mid_val;
i = i - mid_val;
}
}
//std::cout << "lo=" << lo << " hi=" << hi
// << " SUM=" << SUM << " i=" << i << std::endl;
s.push_back('a' + lo + start); // to keep alphabetic order
uint32_t cnt = gen() * 8;
for (uint32_t j = 0; j < cnt; j++) {
s.push_back('a' + ('z' - 'a' + 1) * gen());
}
s.push_back('.');
}
return s;
}
std::string gen_string(size_t size, generator& gen) {
std::string s;
for (size_t i = 0; i < size; i++) {
s.push_back('a' + ('z' - 'a' + 1 ) * gen());
}
return s;
}
void make_omap_data(size_t object_count,
int64_t poolid,
coll_t cid) {
int r;
ObjectStore::CollectionHandle ch = store->open_collection(cid);
for (size_t o = 0; o < object_count; o++)
{
ObjectStore::Transaction t;
std::string oid = generate_monotonic_name(object_count, o, 3.71, 0.5);
ghobject_t hoid(hobject_t(oid, "", CEPH_NOSNAP, 0, poolid, ""));
t.touch(cid, hoid);
generator gen{3.85 + 0.1 * o / object_count, 1 - double(o) / object_count};
map<string, bufferlist> start_set;
size_t omap_count = 1 + gen() * 20;
bool do_omap_header = gen() > 0.5;
if (do_omap_header) {
bufferlist header;
header.append(gen_string(50, gen));
t.omap_setheader(cid, hoid, header);
}
for (size_t i = 0; i < omap_count; i++) {
std::string name = generate_monotonic_name(omap_count, i, 3.66 + 0.22 * o / object_count, 0.5);
bufferlist val;
val.append(gen_string(100, gen));
start_set.emplace(name, val);
}
t.omap_setkeys(cid, hoid, start_set);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
}
void check_omap_data(size_t object_count,
int64_t poolid,
coll_t cid) {
int r;
ObjectStore::CollectionHandle ch = store->open_collection(cid);
for (size_t o = 0; o < object_count; o++)
{
ObjectStore::Transaction t;
std::string oid = generate_monotonic_name(object_count, o, 3.71, 0.5);
ghobject_t hoid(hobject_t(oid, "", CEPH_NOSNAP, 0, poolid, ""));
generator gen{3.85 + 0.1 * o / object_count, 1 - double(o) / object_count};
bufferlist omap_header;
map<string, bufferlist> omap_set;
r = store->omap_get(ch, hoid, &omap_header, &omap_set);
ASSERT_EQ(r, 0);
size_t omap_count = 1 + gen() * 20;
bool do_omap_header = gen() > 0.5;
if (do_omap_header) {
std::string header_str = gen_string(50, gen);
ASSERT_EQ(header_str, omap_header.to_str());
}
auto it = omap_set.begin();
for (size_t i = 0; i < omap_count; i++) {
ASSERT_TRUE(it != omap_set.end());
std::string name = generate_monotonic_name(omap_count, i, 3.66 + 0.22 * o / object_count, 0.5);
std::string val_gen = gen_string(100, gen);
ASSERT_EQ(it->first, name);
ASSERT_EQ(it->second.to_str(), val_gen);
++it;
}
}
}
};
TEST_P(StoreTest, collect_metadata) {
map<string,string> pm;
store->collect_metadata(&pm);
if (GetParam() == string("filestore")) {
ASSERT_NE(pm.count("filestore_backend"), 0u);
ASSERT_NE(pm.count("filestore_f_type"), 0u);
ASSERT_NE(pm.count("backend_filestore_partition_path"), 0u);
ASSERT_NE(pm.count("backend_filestore_dev_node"), 0u);
}
}
TEST_P(StoreTest, Trivial) {
}
TEST_P(StoreTest, TrivialRemount) {
int r = store->umount();
ASSERT_EQ(0, r);
r = store->mount();
ASSERT_EQ(0, r);
}
TEST_P(StoreTest, TrivialRemountFsck) {
if(string(GetParam()) != "bluestore")
return;
int r = store->umount();
ASSERT_EQ(0, r);
r = store->fsck(false);
ASSERT_EQ(0, r);
r = store->mount();
ASSERT_EQ(0, r);
}
TEST_P(StoreTest, SimpleRemount) {
coll_t cid;
ghobject_t hoid(hobject_t(sobject_t("Object 1", CEPH_NOSNAP)));
ghobject_t hoid2(hobject_t(sobject_t("Object 2", CEPH_NOSNAP)));
bufferlist bl;
bl.append("1234512345");
int r;
auto ch = store->create_new_collection(cid);
{
cerr << "create collection + write" << std::endl;
ObjectStore::Transaction t;
t.create_collection(cid, 0);
t.write(cid, hoid, 0, bl.length(), bl);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
ch.reset();
r = store->umount();
ASSERT_EQ(0, r);
r = store->mount();
ASSERT_EQ(0, r);
ch = store->open_collection(cid);
{
ObjectStore::Transaction t;
t.write(cid, hoid2, 0, bl.length(), bl);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
{
ObjectStore::Transaction t;
t.remove(cid, hoid);
t.remove(cid, hoid2);
t.remove_collection(cid);
cerr << "remove collection" << std::endl;
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
ch.reset();
r = store->umount();
ASSERT_EQ(0, r);
r = store->mount();
ASSERT_EQ(0, r);
ch = store->create_new_collection(cid);
{
ObjectStore::Transaction t;
t.create_collection(cid, 0);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
bool exists = store->exists(ch, hoid);
ASSERT_TRUE(!exists);
}
{
ObjectStore::Transaction t;
t.remove_collection(cid);
cerr << "remove collection" << std::endl;
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
}
TEST_P(StoreTest, IORemount) {
coll_t cid;
bufferlist bl;
bl.append("1234512345");
int r;
auto ch = store->create_new_collection(cid);
{
cerr << "create collection + objects" << std::endl;
ObjectStore::Transaction t;
t.create_collection(cid, 0);
for (int n=1; n<=100; ++n) {
ghobject_t hoid(hobject_t(sobject_t("Object " + stringify(n), CEPH_NOSNAP)));
t.write(cid, hoid, 0, bl.length(), bl);
}
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
// overwrites
{
cout << "overwrites" << std::endl;
for (int n=1; n<=100; ++n) {
ObjectStore::Transaction t;
ghobject_t hoid(hobject_t(sobject_t("Object " + stringify(n), CEPH_NOSNAP)));
t.write(cid, hoid, 1, bl.length(), bl);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
}
ch.reset();
r = store->umount();
ASSERT_EQ(0, r);
r = store->mount();
ASSERT_EQ(0, r);
{
ObjectStore::Transaction t;
for (int n=1; n<=100; ++n) {
ghobject_t hoid(hobject_t(sobject_t("Object " + stringify(n), CEPH_NOSNAP)));
t.remove(cid, hoid);
}
t.remove_collection(cid);
auto ch = store->open_collection(cid);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
}
TEST_P(StoreTest, UnprintableCharsName) {
coll_t cid;
string name = "funnychars_";
for (unsigned i = 0; i < 256; ++i) {
name.push_back(i);
}
ghobject_t oid(hobject_t(sobject_t(name, CEPH_NOSNAP)));
int r;
auto ch = store->create_new_collection(cid);
{
cerr << "create collection + object" << std::endl;
ObjectStore::Transaction t;
t.create_collection(cid, 0);
t.touch(cid, oid);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
ch.reset();
r = store->umount();
ASSERT_EQ(0, r);
r = store->mount();
ASSERT_EQ(0, r);
{
cout << "removing" << std::endl;
ObjectStore::Transaction t;
t.remove(cid, oid);
t.remove_collection(cid);
auto ch = store->open_collection(cid);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
}
TEST_P(StoreTest, FiemapEmpty) {
coll_t cid;
int r = 0;
ghobject_t oid(hobject_t(sobject_t("fiemap_object", CEPH_NOSNAP)));
auto ch = store->create_new_collection(cid);
{
ObjectStore::Transaction t;
t.create_collection(cid, 0);
t.touch(cid, oid);
t.truncate(cid, oid, 100000);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
{
bufferlist bl;
store->fiemap(ch, oid, 0, 100000, bl);
map<uint64_t,uint64_t> m, e;
auto p = bl.cbegin();
decode(m, p);
cout << " got " << m << std::endl;
e[0] = 100000;
EXPECT_TRUE(m == e || m.empty());
}
{
ObjectStore::Transaction t;
t.remove(cid, oid);
t.remove_collection(cid);
cerr << "remove collection" << std::endl;
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
}
TEST_P(StoreTest, FiemapHoles) {
const uint64_t MAX_EXTENTS = 4000;
const uint64_t SKIP_STEP = 65536;
coll_t cid;
int r = 0;
ghobject_t oid(hobject_t(sobject_t("fiemap_object", CEPH_NOSNAP)));
bufferlist bl;
bl.append("foo");
auto ch = store->create_new_collection(cid);
{
ObjectStore::Transaction t;
t.create_collection(cid, 0);
t.touch(cid, oid);
for (uint64_t i = 0; i < MAX_EXTENTS; i++)
t.write(cid, oid, SKIP_STEP * i, 3, bl);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
{
//fiemap test from 0 to SKIP_STEP * (MAX_EXTENTS - 1) + 3
bufferlist bl;
store->fiemap(ch, oid, 0, SKIP_STEP * (MAX_EXTENTS - 1) + 3, bl);
map<uint64_t,uint64_t> m, e;
auto p = bl.cbegin();
decode(m, p);
cout << " got " << m << std::endl;
ASSERT_TRUE(!m.empty());
ASSERT_GE(m[0], 3u);
auto last = m.crbegin();
if (m.size() == 1) {
ASSERT_EQ(0u, last->first);
} else if (m.size() == MAX_EXTENTS) {
for (uint64_t i = 0; i < MAX_EXTENTS; i++) {
ASSERT_TRUE(m.count(SKIP_STEP * i));
}
}
ASSERT_GT(last->first + last->second, SKIP_STEP * (MAX_EXTENTS - 1));
}
{
// fiemap test from SKIP_STEP to SKIP_STEP * (MAX_EXTENTS - 2) + 3
bufferlist bl;
store->fiemap(ch, oid, SKIP_STEP, SKIP_STEP * (MAX_EXTENTS - 2) + 3, bl);
map<uint64_t,uint64_t> m, e;
auto p = bl.cbegin();
decode(m, p);
cout << " got " << m << std::endl;
ASSERT_TRUE(!m.empty());
// kstore always returns [0, object_size] regardless of offset and length
// FIXME: if fiemap logic in kstore is refined
if (string(GetParam()) != "kstore") {
ASSERT_GE(m[SKIP_STEP], 3u);
auto last = m.crbegin();
if (m.size() == 1) {
ASSERT_EQ(SKIP_STEP, last->first);
} else if (m.size() == MAX_EXTENTS - 2) {
for (uint64_t i = 1; i < MAX_EXTENTS - 1; i++) {
ASSERT_TRUE(m.count(SKIP_STEP*i));
}
}
ASSERT_GT(last->first + last->second, SKIP_STEP * (MAX_EXTENTS - 1));
}
}
{
ObjectStore::Transaction t;
t.remove(cid, oid);
t.remove_collection(cid);
cerr << "remove collection" << std::endl;
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
}
TEST_P(StoreTest, SimpleMetaColTest) {
coll_t cid;
int r = 0;
{
auto ch = store->create_new_collection(cid);
ObjectStore::Transaction t;
t.create_collection(cid, 0);
cerr << "create collection" << std::endl;
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
{
ObjectStore::Transaction t;
t.remove_collection(cid);
cerr << "remove collection" << std::endl;
auto ch = store->open_collection(cid);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
{
auto ch = store->create_new_collection(cid);
ObjectStore::Transaction t;
t.create_collection(cid, 0);
cerr << "add collection" << std::endl;
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
{
ObjectStore::Transaction t;
t.remove_collection(cid);
cerr << "remove collection" << std::endl;
auto ch = store->open_collection(cid);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
}
TEST_P(StoreTest, SimplePGColTest) {
coll_t cid(spg_t(pg_t(1,2), shard_id_t::NO_SHARD));
int r = 0;
{
ObjectStore::Transaction t;
auto ch = store->create_new_collection(cid);
t.create_collection(cid, 4);
cerr << "create collection" << std::endl;
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
{
ObjectStore::Transaction t;
t.remove_collection(cid);
cerr << "remove collection" << std::endl;
auto ch = store->open_collection(cid);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
{
ObjectStore::Transaction t;
t.create_collection(cid, 4);
cerr << "add collection" << std::endl;
auto ch = store->create_new_collection(cid);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
{
ObjectStore::Transaction t;
t.remove_collection(cid);
cerr << "remove collection" << std::endl;
auto ch = store->open_collection(cid);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
}
TEST_P(StoreTest, SimpleColPreHashTest) {
// Firstly we will need to revert the value making sure
// collection hint actually works
int merge_threshold = g_ceph_context->_conf->filestore_merge_threshold;
std::ostringstream oss;
if (merge_threshold > 0) {
oss << "-" << merge_threshold;
SetVal(g_conf(), "filestore_merge_threshold", oss.str().c_str());
}
uint32_t pg_num = 128;
boost::uniform_int<> pg_id_range(0, pg_num);
gen_type rng(time(NULL));
int pg_id = pg_id_range(rng);
int objs_per_folder = abs(merge_threshold) * 16 * g_ceph_context->_conf->filestore_split_multiple;
boost::uniform_int<> folders_range(5, 256);
uint64_t expected_num_objs = (uint64_t)objs_per_folder * (uint64_t)folders_range(rng);
coll_t cid(spg_t(pg_t(pg_id, 15), shard_id_t::NO_SHARD));
int r;
auto ch = store->create_new_collection(cid);
{
// Create a collection along with a hint
ObjectStore::Transaction t;
t.create_collection(cid, 5);
cerr << "create collection" << std::endl;
bufferlist hint;
encode(pg_num, hint);
encode(expected_num_objs, hint);
t.collection_hint(cid, ObjectStore::Transaction::COLL_HINT_EXPECTED_NUM_OBJECTS, hint);
cerr << "collection hint" << std::endl;
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
{
// Remove the collection
ObjectStore::Transaction t;
t.remove_collection(cid);
cerr << "remove collection" << std::endl;
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
}
TEST_P(StoreTest, SmallBlockWrites) {
int r;
coll_t cid;
auto ch = store->create_new_collection(cid);
ghobject_t hoid(hobject_t(sobject_t("foo", CEPH_NOSNAP)));
{
ObjectStore::Transaction t;
t.create_collection(cid, 0);
cerr << "Creating collection " << cid << std::endl;
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
bufferlist a;
bufferptr ap(0x1000);
memset(ap.c_str(), 'a', 0x1000);
a.append(ap);
bufferlist b;
bufferptr bp(0x1000);
memset(bp.c_str(), 'b', 0x1000);
b.append(bp);
bufferlist c;
bufferptr cp(0x1000);
memset(cp.c_str(), 'c', 0x1000);
c.append(cp);
bufferptr zp(0x1000);
zp.zero();
bufferlist z;
z.append(zp);
{
ObjectStore::Transaction t;
t.write(cid, hoid, 0, 0x1000, a);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
bufferlist in, exp;
r = store->read(ch, hoid, 0, 0x4000, in);
ASSERT_EQ(0x1000, r);
exp.append(a);
ASSERT_TRUE(bl_eq(exp, in));
}
{
ObjectStore::Transaction t;
t.write(cid, hoid, 0x1000, 0x1000, b);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
bufferlist in, exp;
r = store->read(ch, hoid, 0, 0x4000, in);
ASSERT_EQ(0x2000, r);
exp.append(a);
exp.append(b);
ASSERT_TRUE(bl_eq(exp, in));
}
{
ObjectStore::Transaction t;
t.write(cid, hoid, 0x3000, 0x1000, c);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
bufferlist in, exp;
r = store->read(ch, hoid, 0, 0x4000, in);
ASSERT_EQ(0x4000, r);
exp.append(a);
exp.append(b);
exp.append(z);
exp.append(c);
ASSERT_TRUE(bl_eq(exp, in));
}
{
ObjectStore::Transaction t;
t.write(cid, hoid, 0x2000, 0x1000, a);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
bufferlist in, exp;
r = store->read(ch, hoid, 0, 0x4000, in);
ASSERT_EQ(0x4000, r);
exp.append(a);
exp.append(b);
exp.append(a);
exp.append(c);
ASSERT_TRUE(bl_eq(exp, in));
}
{
ObjectStore::Transaction t;
t.write(cid, hoid, 0, 0x1000, c);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
{
bufferlist in, exp;
r = store->read(ch, hoid, 0, 0x4000, in);
ASSERT_EQ(0x4000, r);
exp.append(c);
exp.append(b);
exp.append(a);
exp.append(c);
ASSERT_TRUE(bl_eq(exp, in));
}
{
ObjectStore::Transaction t;
t.remove(cid, hoid);
t.remove_collection(cid);
cerr << "Cleaning" << std::endl;
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
}
TEST_P(StoreTest, BufferCacheReadTest) {
int r;
coll_t cid;
ghobject_t hoid(hobject_t(sobject_t("Object 1", CEPH_NOSNAP)));
{
auto ch = store->open_collection(cid);
ASSERT_FALSE(ch);
}
auto ch = store->create_new_collection(cid);
{
ObjectStore::Transaction t;
t.create_collection(cid, 0);
cerr << "Creating collection " << cid << std::endl;
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
{
bool exists = store->exists(ch, hoid);
ASSERT_TRUE(!exists);
ObjectStore::Transaction t;
t.touch(cid, hoid);
cerr << "Creating object " << hoid << std::endl;
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
exists = store->exists(ch, hoid);
ASSERT_EQ(true, exists);
}
{
ObjectStore::Transaction t;
bufferlist bl, newdata;
bl.append("abcde");
t.write(cid, hoid, 0, 5, bl);
t.write(cid, hoid, 10, 5, bl);
cerr << "TwinWrite" << std::endl;
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
r = store->read(ch, hoid, 0, 15, newdata);
ASSERT_EQ(r, 15);
{
bufferlist expected;
expected.append(bl);
expected.append_zero(5);
expected.append(bl);
ASSERT_TRUE(bl_eq(expected, newdata));
}
}
//overwrite over the same extents
{
ObjectStore::Transaction t;
bufferlist bl, newdata;
bl.append("edcba");
t.write(cid, hoid, 0, 5, bl);
t.write(cid, hoid, 10, 5, bl);
cerr << "TwinWrite" << std::endl;
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
r = store->read(ch, hoid, 0, 15, newdata);
ASSERT_EQ(r, 15);
{
bufferlist expected;
expected.append(bl);
expected.append_zero(5);
expected.append(bl);
ASSERT_TRUE(bl_eq(expected, newdata));
}
}
//additional write to an unused region of some blob
{
ObjectStore::Transaction t;
bufferlist bl2, newdata;
bl2.append("1234567890");
t.write(cid, hoid, 20, bl2.length(), bl2);
cerr << "Append" << std::endl;
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
r = store->read(ch, hoid, 0, 30, newdata);
ASSERT_EQ(r, 30);
{
bufferlist expected;
expected.append("edcba");
expected.append_zero(5);
expected.append("edcba");
expected.append_zero(5);
expected.append(bl2);
ASSERT_TRUE(bl_eq(expected, newdata));
}
}
//additional write to an unused region of some blob and partial owerite over existing extents
{
ObjectStore::Transaction t;
bufferlist bl, bl2, bl3, newdata;
bl.append("DCB");
bl2.append("1234567890");
bl3.append("BA");
t.write(cid, hoid, 30, bl2.length(), bl2);
t.write(cid, hoid, 1, bl.length(), bl);
t.write(cid, hoid, 13, bl3.length(), bl3);
cerr << "TripleWrite" << std::endl;
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
r = store->read(ch, hoid, 0, 40, newdata);
ASSERT_EQ(r, 40);
{
bufferlist expected;
expected.append("eDCBa");
expected.append_zero(5);
expected.append("edcBA");
expected.append_zero(5);
expected.append(bl2);
expected.append(bl2);
ASSERT_TRUE(bl_eq(expected, newdata));
}
}
}
void StoreTest::doCompressionTest()
{
int r;
coll_t cid;
ghobject_t hoid(hobject_t(sobject_t("Object 1", CEPH_NOSNAP)));
{
auto ch = store->open_collection(cid);
ASSERT_FALSE(ch);
}
auto ch = store->create_new_collection(cid);
{
ObjectStore::Transaction t;
t.create_collection(cid, 0);
cerr << "Creating collection " << cid << std::endl;
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
{
bool exists = store->exists(ch, hoid);
ASSERT_TRUE(!exists);
ObjectStore::Transaction t;
t.touch(cid, hoid);
cerr << "Creating object " << hoid << std::endl;
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
exists = store->exists(ch, hoid);
ASSERT_EQ(true, exists);
}
std::string data;
data.resize(0x10000 * 4);
for(size_t i = 0;i < data.size(); i++)
data[i] = i / 256;
{
ObjectStore::Transaction t;
bufferlist bl, newdata;
bl.append(data);
t.write(cid, hoid, 0, bl.length(), bl);
cerr << "CompressibleData (4xAU) Write" << std::endl;
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
r = store->read(ch, hoid, 0, data.size() , newdata);
ASSERT_EQ(r, (int)data.size());
{
bufferlist expected;
expected.append(data);
ASSERT_TRUE(bl_eq(expected, newdata));
}
newdata.clear();
r = store->read(ch, hoid, 0, 711 , newdata);
ASSERT_EQ(r, 711);
{
bufferlist expected;
expected.append(data.substr(0,711));
ASSERT_TRUE(bl_eq(expected, newdata));
}
newdata.clear();
r = store->read(ch, hoid, 0xf00f, data.size(), newdata);
ASSERT_EQ(r, int(data.size() - 0xf00f) );
{
bufferlist expected;
expected.append(data.substr(0xf00f));
ASSERT_TRUE(bl_eq(expected, newdata));
}
{
struct store_statfs_t statfs;
int r = store->statfs(&statfs);
ASSERT_EQ(r, 0);
ASSERT_EQ(statfs.data_stored, (unsigned)data.size());
ASSERT_LE(statfs.data_compressed, (unsigned)data.size());
ASSERT_EQ(statfs.data_compressed_original, (unsigned)data.size());
ASSERT_LE(statfs.data_compressed_allocated, (unsigned)data.size());
}
}
std::string data2;
data2.resize(0x10000 * 4 - 0x9000);
for(size_t i = 0;i < data2.size(); i++)
data2[i] = (i+1) / 256;
{
ObjectStore::Transaction t;
bufferlist bl, newdata;
bl.append(data2);
t.write(cid, hoid, 0x8000, bl.length(), bl);
cerr << "CompressibleData partial overwrite" << std::endl;
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
r = store->read(ch, hoid, 0, 0x10000, newdata);
ASSERT_EQ(r, (int)0x10000);
{
bufferlist expected;
expected.append(data.substr(0, 0x8000));
expected.append(data2.substr(0, 0x8000));
ASSERT_TRUE(bl_eq(expected, newdata));
}
newdata.clear();
r = store->read(ch, hoid, 0x9000, 711 , newdata);
ASSERT_EQ(r, 711);
{
bufferlist expected;
expected.append(data2.substr(0x1000,711));
ASSERT_TRUE(bl_eq(expected, newdata));
}
newdata.clear();
r = store->read(ch, hoid, 0x0, 0x40000, newdata);
ASSERT_EQ(r, int(0x40000) );
{
bufferlist expected;
expected.append(data.substr(0, 0x8000));
expected.append(data2.substr(0, 0x37000));
expected.append(data.substr(0x3f000, 0x1000));
ASSERT_TRUE(bl_eq(expected, newdata));
}
}
data2.resize(0x3f000);
for(size_t i = 0;i < data2.size(); i++)
data2[i] = (i+2) / 256;
{
ObjectStore::Transaction t;
bufferlist bl, newdata;
bl.append(data2);
t.write(cid, hoid, 0, bl.length(), bl);
cerr << "CompressibleData partial overwrite, two extents overlapped, single one to be removed" << std::endl;
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
r = store->read(ch, hoid, 0, 0x3e000 - 1, newdata);
ASSERT_EQ(r, (int)0x3e000 - 1);
{
bufferlist expected;
expected.append(data2.substr(0, 0x3e000 - 1));
ASSERT_TRUE(bl_eq(expected, newdata));
}
newdata.clear();
r = store->read(ch, hoid, 0x3e000-1, 0x2001, newdata);
ASSERT_EQ(r, 0x2001);
{
bufferlist expected;
expected.append(data2.substr(0x3e000-1, 0x1001));
expected.append(data.substr(0x3f000, 0x1000));
ASSERT_TRUE(bl_eq(expected, newdata));
}
newdata.clear();
r = store->read(ch, hoid, 0x0, 0x40000, newdata);
ASSERT_EQ(r, int(0x40000) );
{
bufferlist expected;
expected.append(data2.substr(0, 0x3f000));
expected.append(data.substr(0x3f000, 0x1000));
ASSERT_TRUE(bl_eq(expected, newdata));
}
}
data.resize(0x1001);
for(size_t i = 0;i < data.size(); i++)
data[i] = (i+3) / 256;
{
ObjectStore::Transaction t;
bufferlist bl, newdata;
bl.append(data);
t.write(cid, hoid, 0x3f000-1, bl.length(), bl);
cerr << "Small chunk partial overwrite, two extents overlapped, single one to be removed" << std::endl;
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
r = store->read(ch, hoid, 0x3e000, 0x2000, newdata);
ASSERT_EQ(r, (int)0x2000);
{
bufferlist expected;
expected.append(data2.substr(0x3e000, 0x1000 - 1));
expected.append(data.substr(0, 0x1001));
ASSERT_TRUE(bl_eq(expected, newdata));
}
}
{
ObjectStore::Transaction t;
t.remove(cid, hoid);
cerr << "Cleaning object" << std::endl;
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
//force fsck
ch.reset();
EXPECT_EQ(store->umount(), 0);
ASSERT_EQ(store->fsck(false), 0); // do fsck explicitly
EXPECT_EQ(store->mount(), 0);
ch = store->open_collection(cid);
auto settingsBookmark = BookmarkSettings();
SetVal(g_conf(), "bluestore_compression_min_blob_size", "262144");
g_ceph_context->_conf.apply_changes(nullptr);
{
data.resize(0x10000*6);
for(size_t i = 0;i < data.size(); i++)
data[i] = i / 256;
ObjectStore::Transaction t;
bufferlist bl, newdata;
bl.append(data);
t.write(cid, hoid, 0, bl.length(), bl);
cerr << "CompressibleData large blob" << std::endl;
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
//force fsck
ch.reset();
EXPECT_EQ(store->umount(), 0);
ASSERT_EQ(store->fsck(false), 0); // do fsck explicitly
EXPECT_EQ(store->mount(), 0);
ch = store->open_collection(cid);
{
ObjectStore::Transaction t;
t.remove(cid, hoid);
t.remove_collection(cid);
cerr << "Cleaning" << std::endl;
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
}
TEST_P(StoreTest, CompressionTest) {
if (string(GetParam()) != "bluestore")
return;
if (smr) {
cout << "TODO: need to adjust statfs check for smr" << std::endl;
return;
}
SetVal(g_conf(), "bluestore_compression_algorithm", "snappy");
SetVal(g_conf(), "bluestore_compression_mode", "force");
g_ceph_context->_conf.apply_changes(nullptr);
doCompressionTest();
SetVal(g_conf(), "bluestore_compression_algorithm", "zlib");
SetVal(g_conf(), "bluestore_compression_mode", "aggressive");
g_ceph_context->_conf.apply_changes(nullptr);
doCompressionTest();
}
TEST_P(StoreTest, SimpleObjectTest) {
int r;
coll_t cid;
ghobject_t hoid(hobject_t(sobject_t("Object 1", CEPH_NOSNAP)));
{
auto ch = store->open_collection(cid);
ASSERT_FALSE(ch);
}
auto ch = store->create_new_collection(cid);
{
ObjectStore::Transaction t;
t.create_collection(cid, 0);
cerr << "Creating collection " << cid << std::endl;
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
{
bool exists = store->exists(ch, hoid);
ASSERT_TRUE(!exists);
ObjectStore::Transaction t;
t.touch(cid, hoid);
cerr << "Creating object " << hoid << std::endl;
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
exists = store->exists(ch, hoid);
ASSERT_EQ(true, exists);
}
{
ObjectStore::Transaction t;
t.remove(cid, hoid);
t.touch(cid, hoid);
cerr << "Remove then create" << std::endl;
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
{
ObjectStore::Transaction t;
bufferlist bl, orig;
bl.append("abcde");
orig = bl;
t.remove(cid, hoid);
t.write(cid, hoid, 0, 5, bl);
cerr << "Remove then create" << std::endl;
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
bufferlist in;
r = store->read(ch, hoid, 0, 5, in);
ASSERT_EQ(5, r);
ASSERT_TRUE(bl_eq(orig, in));
}
{
ObjectStore::Transaction t;
bufferlist bl, exp;
bl.append("abcde");
exp = bl;
exp.append(bl);
t.write(cid, hoid, 5, 5, bl);
cerr << "Append" << std::endl;
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
bufferlist in;
r = store->read(ch, hoid, 0, 10, in);
ASSERT_EQ(10, r);
ASSERT_TRUE(bl_eq(exp, in));
}
{
ObjectStore::Transaction t;
bufferlist bl, exp;
bl.append("abcdeabcde");
exp = bl;
t.write(cid, hoid, 0, 10, bl);
cerr << "Full overwrite" << std::endl;
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
bufferlist in;
r = store->read(ch, hoid, 0, 10, in);
ASSERT_EQ(10, r);
ASSERT_TRUE(bl_eq(exp, in));
}
{
ObjectStore::Transaction t;
bufferlist bl;
bl.append("abcde");
t.write(cid, hoid, 3, 5, bl);
cerr << "Partial overwrite" << std::endl;
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
bufferlist in, exp;
exp.append("abcabcdede");
r = store->read(ch, hoid, 0, 10, in);
ASSERT_EQ(10, r);
in.hexdump(cout);
ASSERT_TRUE(bl_eq(exp, in));
}
{
{
ObjectStore::Transaction t;
bufferlist bl;
bl.append("fghij");
t.truncate(cid, hoid, 0);
t.write(cid, hoid, 5, 5, bl);
cerr << "Truncate + hole" << std::endl;
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
{
ObjectStore::Transaction t;
bufferlist bl;
bl.append("abcde");
t.write(cid, hoid, 0, 5, bl);
cerr << "Reverse fill-in" << std::endl;
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
bufferlist in, exp;
exp.append("abcdefghij");
r = store->read(ch, hoid, 0, 10, in);
ASSERT_EQ(10, r);
in.hexdump(cout);
ASSERT_TRUE(bl_eq(exp, in));
}
{
ObjectStore::Transaction t;
bufferlist bl;
bl.append("abcde01234012340123401234abcde01234012340123401234abcde01234012340123401234abcde01234012340123401234");
t.write(cid, hoid, 0, bl.length(), bl);
cerr << "larger overwrite" << std::endl;
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
bufferlist in;
r = store->read(ch, hoid, 0, bl.length(), in);
ASSERT_EQ((int)bl.length(), r);
in.hexdump(cout);
ASSERT_TRUE(bl_eq(bl, in));
}
{
bufferlist bl;
bl.append("abcde01234012340123401234abcde01234012340123401234abcde01234012340123401234abcde01234012340123401234");
//test: offset=len=0 mean read all data
bufferlist in;
r = store->read(ch, hoid, 0, 0, in);
ASSERT_EQ((int)bl.length(), r);
in.hexdump(cout);
ASSERT_TRUE(bl_eq(bl, in));
}
{
//verifying unaligned csums
std::string s1("1"), s2(0x1000, '2'), s3("00");
{
ObjectStore::Transaction t;
bufferlist bl;
bl.append(s1);
bl.append(s2);
t.truncate(cid, hoid, 0);
t.write(cid, hoid, 0x1000-1, bl.length(), bl);
cerr << "Write unaligned csum, stage 1" << std::endl;
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
bufferlist in, exp1, exp2, exp3;
exp1.append(s1);
exp2.append(s2);
exp3.append(s3);
r = store->read(ch, hoid, 0x1000-1, 1, in);
ASSERT_EQ(1, r);
ASSERT_TRUE(bl_eq(exp1, in));
in.clear();
r = store->read(ch, hoid, 0x1000, 0x1000, in);
ASSERT_EQ(0x1000, r);
ASSERT_TRUE(bl_eq(exp2, in));
{
ObjectStore::Transaction t;
bufferlist bl;
bl.append(s3);
t.write(cid, hoid, 1, bl.length(), bl);
cerr << "Write unaligned csum, stage 2" << std::endl;
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
in.clear();
r = store->read(ch, hoid, 1, 2, in);
ASSERT_EQ(2, r);
ASSERT_TRUE(bl_eq(exp3, in));
in.clear();
r = store->read(ch, hoid, 0x1000-1, 1, in);
ASSERT_EQ(1, r);
ASSERT_TRUE(bl_eq(exp1, in));
in.clear();
r = store->read(ch, hoid, 0x1000, 0x1000, in);
ASSERT_EQ(0x1000, r);
ASSERT_TRUE(bl_eq(exp2, in));
}
{
ObjectStore::Transaction t;
t.remove(cid, hoid);
t.remove_collection(cid);
cerr << "Cleaning" << std::endl;
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
}
#if defined(WITH_BLUESTORE)
TEST_P(StoreTestSpecificAUSize, ReproBug41901Test) {
if(string(GetParam()) != "bluestore")
return;
if (smr) {
cout << "SKIP (smr)" << std::endl;
return;
}
SetVal(g_conf(), "bluestore_max_blob_size", "524288");
SetVal(g_conf(), "bluestore_debug_enforce_settings", "hdd");
g_conf().apply_changes(nullptr);
StartDeferred(65536);
int r;
coll_t cid;
ghobject_t hoid(hobject_t(sobject_t("Object 1", CEPH_NOSNAP)));
const PerfCounters* logger = store->get_perf_counters();
auto ch = store->create_new_collection(cid);
{
ObjectStore::Transaction t;
t.create_collection(cid, 0);
cerr << "Creating collection " << cid << std::endl;
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
{
bool exists = store->exists(ch, hoid);
ASSERT_TRUE(!exists);
ObjectStore::Transaction t;
t.touch(cid, hoid);
cerr << "Creating object " << hoid << std::endl;
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
exists = store->exists(ch, hoid);
ASSERT_EQ(true, exists);
}
{
ObjectStore::Transaction t;
bufferlist bl, orig;
string s(4096, 'a');
bl.append(s);
t.write(cid, hoid, 0x11000, bl.length(), bl);
cerr << "write1" << std::endl;
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
{
ObjectStore::Transaction t;
bufferlist bl, orig;
string s(4096 * 3, 'a');
bl.append(s);
t.write(cid, hoid, 0x15000, bl.length(), bl);
cerr << "write2" << std::endl;
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
ASSERT_EQ(logger->get(l_bluestore_write_small), 2u);
ASSERT_EQ(logger->get(l_bluestore_write_small_unused), 1u);
{
ObjectStore::Transaction t;
bufferlist bl, orig;
string s(4096 * 2, 'a');
bl.append(s);
t.write(cid, hoid, 0xe000, bl.length(), bl);
cerr << "write3" << std::endl;
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
ASSERT_EQ(logger->get(l_bluestore_write_small), 3u);
ASSERT_EQ(logger->get(l_bluestore_write_small_unused), 2u);
{
ObjectStore::Transaction t;
bufferlist bl, orig;
string s(4096, 'a');
bl.append(s);
t.write(cid, hoid, 0xf000, bl.length(), bl);
t.write(cid, hoid, 0x10000, bl.length(), bl);
cerr << "write3" << std::endl;
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
ASSERT_EQ(logger->get(l_bluestore_write_small), 5u);
ASSERT_EQ(logger->get(l_bluestore_write_small_unused), 2u);
{
ObjectStore::Transaction t;
t.remove(cid, hoid);
t.remove_collection(cid);
cerr << "Cleaning" << std::endl;
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
}
TEST_P(StoreTestSpecificAUSize, BluestoreStatFSTest) {
if(string(GetParam()) != "bluestore")
return;
if (smr) {
cout << "TODO: fix this for smr" << std::endl;
return;
}
SetVal(g_conf(), "bluestore_block_db_path", "");
StartDeferred(65536);
SetVal(g_conf(), "bluestore_compression_mode", "force");
SetVal(g_conf(), "bluestore_max_blob_size", "524288");
// just a big number to disble gc
SetVal(g_conf(), "bluestore_gc_enable_total_threshold", "100000");
SetVal(g_conf(), "bluestore_fsck_on_umount", "true");
g_conf().apply_changes(nullptr);
int r;
int poolid = 4373;
coll_t cid = coll_t(spg_t(pg_t(0, poolid), shard_id_t::NO_SHARD));
ghobject_t hoid(hobject_t(sobject_t("Object 1", CEPH_NOSNAP),
string(),
0,
poolid,
string()));
ghobject_t hoid2 = hoid;
hoid2.hobj.snap = 1;
{
auto ch = store->open_collection(cid);
ASSERT_FALSE(ch);
}
auto ch = store->create_new_collection(cid);
{
ObjectStore::Transaction t;
t.create_collection(cid, 0);
cerr << "Creating collection " << cid << std::endl;
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
{
bool exists = store->exists(ch, hoid);
ASSERT_TRUE(!exists);
ObjectStore::Transaction t;
t.touch(cid, hoid);
cerr << "Creating object " << hoid << std::endl;
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
exists = store->exists(ch, hoid);
ASSERT_EQ(true, exists);
}
{
struct store_statfs_t statfs;
int r = store->statfs(&statfs);
ASSERT_EQ(r, 0);
ASSERT_EQ( 0u, statfs.allocated);
ASSERT_EQ( 0u, statfs.data_stored);
ASSERT_EQ(g_conf()->bluestore_block_size, statfs.total);
ASSERT_TRUE(statfs.available > 0u && statfs.available < g_conf()->bluestore_block_size);
struct store_statfs_t statfs_pool;
bool per_pool_omap;
r = store->pool_statfs(poolid, &statfs_pool, &per_pool_omap);
ASSERT_EQ(r, 0);
ASSERT_EQ( 0u, statfs_pool.allocated);
ASSERT_EQ( 0u, statfs_pool.data_stored);
//force fsck
ch.reset();
EXPECT_EQ(store->umount(), 0);
ASSERT_EQ(store->fsck(false), 0); // do fsck explicitly
EXPECT_EQ(store->mount(), 0);
ch = store->open_collection(cid);
}
{
ObjectStore::Transaction t;
bufferlist bl;
bl.append("abcde");
t.write(cid, hoid, 0, 5, bl);
cerr << "Append 5 bytes" << std::endl;
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
struct store_statfs_t statfs;
int r = store->statfs(&statfs);
ASSERT_EQ(r, 0);
ASSERT_EQ(5, statfs.data_stored);
ASSERT_EQ(0x10000, statfs.allocated);
ASSERT_EQ(0, statfs.data_compressed);
ASSERT_EQ(0, statfs.data_compressed_original);
ASSERT_EQ(0, statfs.data_compressed_allocated);
struct store_statfs_t statfs_pool;
bool per_pool_omap;
r = store->pool_statfs(poolid, &statfs_pool, &per_pool_omap);
ASSERT_EQ(r, 0);
ASSERT_EQ(5, statfs_pool.data_stored);
ASSERT_EQ(0x10000, statfs_pool.allocated);
ASSERT_EQ(0, statfs_pool.data_compressed);
ASSERT_EQ(0, statfs_pool.data_compressed_original);
ASSERT_EQ(0, statfs_pool.data_compressed_allocated);
// accessing unknown pool
r = store->pool_statfs(poolid + 1, &statfs_pool, &per_pool_omap);
ASSERT_EQ(r, 0);
ASSERT_EQ(0, statfs_pool.data_stored);
ASSERT_EQ(0, statfs_pool.allocated);
ASSERT_EQ(0, statfs_pool.data_compressed);
ASSERT_EQ(0, statfs_pool.data_compressed_original);
ASSERT_EQ(0, statfs_pool.data_compressed_allocated);
//force fsck
ch.reset();
EXPECT_EQ(store->umount(), 0);
ASSERT_EQ(store->fsck(false), 0); // do fsck explicitly
EXPECT_EQ(store->mount(), 0);
ch = store->open_collection(cid);
}
{
ObjectStore::Transaction t;
std::string s(0x30000, 'a');
bufferlist bl;
bl.append(s);
t.write(cid, hoid, 0x10000, bl.length(), bl);
cerr << "Append 0x30000 compressible bytes" << std::endl;
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
struct store_statfs_t statfs;
int r = store->statfs(&statfs);
ASSERT_EQ(r, 0);
ASSERT_EQ(0x30005, statfs.data_stored);
ASSERT_EQ(0x30000, statfs.allocated);
ASSERT_LE(statfs.data_compressed, 0x10000);
ASSERT_EQ(0x20000, statfs.data_compressed_original);
ASSERT_EQ(statfs.data_compressed_allocated, 0x10000);
struct store_statfs_t statfs_pool;
bool per_pool_omap;
r = store->pool_statfs(poolid, &statfs_pool, &per_pool_omap);
ASSERT_EQ(r, 0);
ASSERT_EQ(0x30005, statfs_pool.data_stored);
ASSERT_EQ(0x30000, statfs_pool.allocated);
ASSERT_LE(statfs_pool.data_compressed, 0x10000);
ASSERT_EQ(0x20000, statfs_pool.data_compressed_original);
ASSERT_EQ(statfs_pool.data_compressed_allocated, 0x10000);
//force fsck
ch.reset();
EXPECT_EQ(store->umount(), 0);
ASSERT_EQ(store->fsck(false), 0); // do fsck explicitly
EXPECT_EQ(store->mount(), 0);
ch = store->open_collection(cid);
}
{
ObjectStore::Transaction t;
t.zero(cid, hoid, 1, 3);
t.zero(cid, hoid, 0x20000, 9);
cerr << "Punch hole at 1~3, 0x20000~9" << std::endl;
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
struct store_statfs_t statfs;
int r = store->statfs(&statfs);
ASSERT_EQ(r, 0);
ASSERT_EQ(0x30005 - 3 - 9, statfs.data_stored);
ASSERT_EQ(0x30000, statfs.allocated);
ASSERT_LE(statfs.data_compressed, 0x10000);
ASSERT_EQ(0x20000 - 9, statfs.data_compressed_original);
ASSERT_EQ(statfs.data_compressed_allocated, 0x10000);
struct store_statfs_t statfs_pool;
bool per_pool_omap;
r = store->pool_statfs(poolid, &statfs_pool, &per_pool_omap);
ASSERT_EQ(r, 0);
ASSERT_EQ(0x30005 - 3 - 9, statfs_pool.data_stored);
ASSERT_EQ(0x30000, statfs_pool.allocated);
ASSERT_LE(statfs_pool.data_compressed, 0x10000);
ASSERT_EQ(0x20000 - 9, statfs_pool.data_compressed_original);
ASSERT_EQ(statfs_pool.data_compressed_allocated, 0x10000);
//force fsck
ch.reset();
EXPECT_EQ(store->umount(), 0);
ASSERT_EQ(store->fsck(false), 0); // do fsck explicitly
EXPECT_EQ(store->mount(), 0);
ch = store->open_collection(cid);
}
{
ObjectStore::Transaction t;
std::string s(0x1000, 'b');
bufferlist bl;
bl.append(s);
t.write(cid, hoid, 1, bl.length(), bl);
t.write(cid, hoid, 0x10001, bl.length(), bl);
cerr << "Overwrite first and second(compressible) extents" << std::endl;
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
struct store_statfs_t statfs;
int r = store->statfs(&statfs);
ASSERT_EQ(r, 0);
ASSERT_EQ(0x30001 - 9 + 0x1000, statfs.data_stored);
ASSERT_EQ(0x40000, statfs.allocated);
ASSERT_LE(statfs.data_compressed, 0x10000);
ASSERT_EQ(0x20000 - 9 - 0x1000, statfs.data_compressed_original);
ASSERT_EQ(statfs.data_compressed_allocated, 0x10000);
struct store_statfs_t statfs_pool;
bool per_pool_omap;
r = store->pool_statfs(poolid, &statfs_pool, &per_pool_omap);
ASSERT_EQ(r, 0);
ASSERT_EQ(0x30001 - 9 + 0x1000, statfs_pool.data_stored);
ASSERT_EQ(0x40000, statfs_pool.allocated);
ASSERT_LE(statfs_pool.data_compressed, 0x10000);
ASSERT_EQ(0x20000 - 9 - 0x1000, statfs_pool.data_compressed_original);
ASSERT_EQ(statfs_pool.data_compressed_allocated, 0x10000);
//force fsck
ch.reset();
EXPECT_EQ(store->umount(), 0);
ASSERT_EQ(store->fsck(false), 0); // do fsck explicitly
EXPECT_EQ(store->mount(), 0);
ch = store->open_collection(cid);
}
{
ObjectStore::Transaction t;
std::string s(0x10000, 'c');
bufferlist bl;
bl.append(s);
t.write(cid, hoid, 0x10000, bl.length(), bl);
t.write(cid, hoid, 0x20000, bl.length(), bl);
t.write(cid, hoid, 0x30000, bl.length(), bl);
cerr << "Overwrite compressed extent with 3 uncompressible ones" << std::endl;
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
struct store_statfs_t statfs;
int r = store->statfs(&statfs);
ASSERT_EQ(r, 0);
ASSERT_EQ(0x30000 + 0x1001, statfs.data_stored);
ASSERT_EQ(0x40000, statfs.allocated);
ASSERT_LE(statfs.data_compressed, 0);
ASSERT_EQ(0, statfs.data_compressed_original);
ASSERT_EQ(0, statfs.data_compressed_allocated);
struct store_statfs_t statfs_pool;
bool per_pool_omap;
r = store->pool_statfs(poolid, &statfs_pool, &per_pool_omap);
ASSERT_EQ(r, 0);
ASSERT_EQ(0x30000 + 0x1001, statfs_pool.data_stored);
ASSERT_EQ(0x40000, statfs_pool.allocated);
ASSERT_LE(statfs_pool.data_compressed, 0);
ASSERT_EQ(0, statfs_pool.data_compressed_original);
ASSERT_EQ(0, statfs_pool.data_compressed_allocated);
//force fsck
ch.reset();
EXPECT_EQ(store->umount(), 0);
ASSERT_EQ(store->fsck(false), 0); // do fsck explicitly
EXPECT_EQ(store->mount(), 0);
ch = store->open_collection(cid);
}
{
ObjectStore::Transaction t;
t.zero(cid, hoid, 0, 0x40000);
cerr << "Zero object" << std::endl;
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
struct store_statfs_t statfs;
int r = store->statfs(&statfs);
ASSERT_EQ(r, 0);
ASSERT_EQ(0u, statfs.allocated);
ASSERT_EQ(0u, statfs.data_stored);
ASSERT_EQ(0u, statfs.data_compressed_original);
ASSERT_EQ(0u, statfs.data_compressed);
ASSERT_EQ(0u, statfs.data_compressed_allocated);
struct store_statfs_t statfs_pool;
bool per_pool_omap;
r = store->pool_statfs(poolid, &statfs_pool, &per_pool_omap);
ASSERT_EQ(r, 0);
ASSERT_EQ(0u, statfs_pool.allocated);
ASSERT_EQ(0u, statfs_pool.data_stored);
ASSERT_EQ(0u, statfs_pool.data_compressed_original);
ASSERT_EQ(0u, statfs_pool.data_compressed);
ASSERT_EQ(0u, statfs_pool.data_compressed_allocated);
//force fsck
ch.reset();
EXPECT_EQ(store->umount(), 0);
ASSERT_EQ(store->fsck(false), 0); // do fsck explicitly
EXPECT_EQ(store->mount(), 0);
ch = store->open_collection(cid);
}
{
ObjectStore::Transaction t;
std::string s(0x10000, 'c');
bufferlist bl;
bl.append(s);
bl.append(s);
bl.append(s);
bl.append(s.substr(0, 0x10000-2));
t.write(cid, hoid, 0, bl.length(), bl);
cerr << "Yet another compressible write" << std::endl;
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
struct store_statfs_t statfs;
r = store->statfs(&statfs);
ASSERT_EQ(r, 0);
ASSERT_EQ(0x40000 - 2, statfs.data_stored);
ASSERT_EQ(0x30000, statfs.allocated);
ASSERT_LE(statfs.data_compressed, 0x10000);
ASSERT_EQ(0x20000, statfs.data_compressed_original);
ASSERT_EQ(0x10000, statfs.data_compressed_allocated);
struct store_statfs_t statfs_pool;
bool per_pool_omap;
r = store->pool_statfs(poolid, &statfs_pool, &per_pool_omap);
ASSERT_EQ(r, 0);
ASSERT_EQ(0x40000 - 2, statfs_pool.data_stored);
ASSERT_EQ(0x30000, statfs_pool.allocated);
ASSERT_LE(statfs_pool.data_compressed, 0x10000);
ASSERT_EQ(0x20000, statfs_pool.data_compressed_original);
ASSERT_EQ(0x10000, statfs_pool.data_compressed_allocated);
//force fsck
ch.reset();
EXPECT_EQ(store->umount(), 0);
ASSERT_EQ(store->fsck(false), 0); // do fsck explicitly
EXPECT_EQ(store->mount(), 0);
ch = store->open_collection(cid);
}
{
struct store_statfs_t statfs;
r = store->statfs(&statfs);
ASSERT_EQ(r, 0);
struct store_statfs_t statfs_pool;
bool per_pool_omap;
r = store->pool_statfs(poolid, &statfs_pool, &per_pool_omap);
ASSERT_EQ(r, 0);
ObjectStore::Transaction t;
t.clone(cid, hoid, hoid2);
cerr << "Clone compressed objecte" << std::endl;
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
struct store_statfs_t statfs2;
r = store->statfs(&statfs2);
ASSERT_EQ(r, 0);
ASSERT_GT(statfs2.data_stored, statfs.data_stored);
ASSERT_EQ(statfs2.allocated, statfs.allocated);
ASSERT_GT(statfs2.data_compressed, statfs.data_compressed);
ASSERT_GT(statfs2.data_compressed_original, statfs.data_compressed_original);
ASSERT_EQ(statfs2.data_compressed_allocated, statfs.data_compressed_allocated);
struct store_statfs_t statfs2_pool;
r = store->pool_statfs(poolid, &statfs2_pool, &per_pool_omap);
ASSERT_EQ(r, 0);
ASSERT_GT(statfs2_pool.data_stored, statfs_pool.data_stored);
ASSERT_EQ(statfs2_pool.allocated, statfs_pool.allocated);
ASSERT_GT(statfs2_pool.data_compressed, statfs_pool.data_compressed);
ASSERT_GT(statfs2_pool.data_compressed_original,
statfs_pool.data_compressed_original);
ASSERT_EQ(statfs2_pool.data_compressed_allocated,
statfs_pool.data_compressed_allocated);
}
{
// verify no
auto poolid2 = poolid + 1;
coll_t cid2 = coll_t(spg_t(pg_t(20, poolid2), shard_id_t::NO_SHARD));
ghobject_t hoid(hobject_t(sobject_t("Object 2", CEPH_NOSNAP),
string(),
0,
poolid2,
string()));
auto ch = store->create_new_collection(cid2);
{
struct store_statfs_t statfs1_pool;
bool per_pool_omap;
int r = store->pool_statfs(poolid, &statfs1_pool, &per_pool_omap);
ASSERT_EQ(r, 0);
cerr << "Creating second collection " << cid2 << std::endl;
ObjectStore::Transaction t;
t.create_collection(cid2, 0);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
t = ObjectStore::Transaction();
bufferlist bl;
bl.append("abcde");
t.write(cid2, hoid, 0, 5, bl);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
struct store_statfs_t statfs2_pool;
r = store->pool_statfs(poolid2, &statfs2_pool, &per_pool_omap);
ASSERT_EQ(r, 0);
ASSERT_EQ(5, statfs2_pool.data_stored);
ASSERT_EQ(0x10000, statfs2_pool.allocated);
ASSERT_EQ(0, statfs2_pool.data_compressed);
ASSERT_EQ(0, statfs2_pool.data_compressed_original);
ASSERT_EQ(0, statfs2_pool.data_compressed_allocated);
struct store_statfs_t statfs1_pool_again;
r = store->pool_statfs(poolid, &statfs1_pool_again, &per_pool_omap);
ASSERT_EQ(r, 0);
// adjust 'available' since it has changed
statfs1_pool_again.available = statfs1_pool.available;
ASSERT_EQ(statfs1_pool_again, statfs1_pool);
t = ObjectStore::Transaction();
t.remove(cid2, hoid);
t.remove_collection(cid2);
cerr << "Cleaning" << std::endl;
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
}
{
// verify ops on temporary object
auto poolid3 = poolid + 2;
coll_t cid3 = coll_t(spg_t(pg_t(20, poolid3), shard_id_t::NO_SHARD));
ghobject_t hoid3(hobject_t(sobject_t("Object 3", CEPH_NOSNAP),
string(),
0,
poolid3,
string()));
ghobject_t hoid3_temp;
hoid3_temp.hobj = hoid3.hobj.make_temp_hobject("Object 3 temp");
auto ch3 = store->create_new_collection(cid3);
{
struct store_statfs_t statfs1_pool;
bool per_pool_omap;
int r = store->pool_statfs(poolid, &statfs1_pool, &per_pool_omap);
ASSERT_EQ(r, 0);
cerr << "Creating third collection " << cid3 << std::endl;
ObjectStore::Transaction t;
t.create_collection(cid3, 0);
r = queue_transaction(store, ch3, std::move(t));
ASSERT_EQ(r, 0);
t = ObjectStore::Transaction();
bufferlist bl;
bl.append("abcde");
t.write(cid3, hoid3_temp, 0, 5, bl);
r = queue_transaction(store, ch3, std::move(t));
ASSERT_EQ(r, 0);
struct store_statfs_t statfs3_pool;
r = store->pool_statfs(poolid3, &statfs3_pool, &per_pool_omap);
ASSERT_EQ(r, 0);
ASSERT_EQ(5, statfs3_pool.data_stored);
ASSERT_EQ(0x10000, statfs3_pool.allocated);
ASSERT_EQ(0, statfs3_pool.data_compressed);
ASSERT_EQ(0, statfs3_pool.data_compressed_original);
ASSERT_EQ(0, statfs3_pool.data_compressed_allocated);
struct store_statfs_t statfs1_pool_again;
r = store->pool_statfs(poolid, &statfs1_pool_again, &per_pool_omap);
ASSERT_EQ(r, 0);
// adjust 'available' since it has changed
statfs1_pool_again.available = statfs1_pool.available;
ASSERT_EQ(statfs1_pool_again, statfs1_pool);
//force fsck
ch.reset();
ch3.reset();
EXPECT_EQ(store->umount(), 0);
EXPECT_EQ(store->mount(), 0);
ch = store->open_collection(cid);
ch3 = store->open_collection(cid3);
t = ObjectStore::Transaction();
t.collection_move_rename(
cid3, hoid3_temp,
cid3, hoid3);
r = queue_transaction(store, ch3, std::move(t));
ASSERT_EQ(r, 0);
struct store_statfs_t statfs3_pool_again;
r = store->pool_statfs(poolid3, &statfs3_pool_again, &per_pool_omap);
ASSERT_EQ(r, 0);
ASSERT_EQ(statfs3_pool_again, statfs3_pool);
//force fsck
ch.reset();
ch3.reset();
EXPECT_EQ(store->umount(), 0);
EXPECT_EQ(store->mount(), 0);
ch = store->open_collection(cid);
ch3 = store->open_collection(cid3);
t = ObjectStore::Transaction();
t.remove(cid3, hoid3);
t.remove_collection(cid3);
cerr << "Cleaning" << std::endl;
r = queue_transaction(store, ch3, std::move(t));
ASSERT_EQ(r, 0);
}
}
{
ObjectStore::Transaction t;
t.remove(cid, hoid);
t.remove(cid, hoid2);
t.remove_collection(cid);
cerr << "Cleaning" << std::endl;
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
struct store_statfs_t statfs;
r = store->statfs(&statfs);
ASSERT_EQ(r, 0);
ASSERT_EQ( 0u, statfs.allocated);
ASSERT_EQ( 0u, statfs.data_stored);
ASSERT_EQ( 0u, statfs.data_compressed_original);
ASSERT_EQ( 0u, statfs.data_compressed);
ASSERT_EQ( 0u, statfs.data_compressed_allocated);
struct store_statfs_t statfs_pool;
bool per_pool_omap;
r = store->pool_statfs(poolid, &statfs_pool, &per_pool_omap);
ASSERT_EQ(r, 0);
ASSERT_EQ( 0u, statfs_pool.allocated);
ASSERT_EQ( 0u, statfs_pool.data_stored);
ASSERT_EQ( 0u, statfs_pool.data_compressed_original);
ASSERT_EQ( 0u, statfs_pool.data_compressed);
ASSERT_EQ( 0u, statfs_pool.data_compressed_allocated);
}
}
TEST_P(StoreTestSpecificAUSize, BluestoreFragmentedBlobTest) {
if(string(GetParam()) != "bluestore")
return;
if (smr) {
cout << "TODO: fix this for smr" << std::endl;
return;
}
SetVal(g_conf(), "bluestore_block_db_path", "");
StartDeferred(0x10000);
int r;
coll_t cid;
ghobject_t hoid(hobject_t(sobject_t("Object 1", CEPH_NOSNAP)));
auto ch = store->create_new_collection(cid);
{
ObjectStore::Transaction t;
t.create_collection(cid, 0);
cerr << "Creating collection " << cid << std::endl;
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
{
bool exists = store->exists(ch, hoid);
ASSERT_TRUE(!exists);
ObjectStore::Transaction t;
t.touch(cid, hoid);
cerr << "Creating object " << hoid << std::endl;
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
exists = store->exists(ch, hoid);
ASSERT_EQ(true, exists);
}
{
struct store_statfs_t statfs;
int r = store->statfs(&statfs);
ASSERT_EQ(r, 0);
ASSERT_EQ(g_conf()->bluestore_block_size, statfs.total);
ASSERT_EQ(0u, statfs.allocated);
ASSERT_EQ(0u, statfs.data_stored);
ASSERT_TRUE(statfs.available > 0u && statfs.available < g_conf()->bluestore_block_size);
}
std::string data;
data.resize(0x10000 * 3);
{
ObjectStore::Transaction t;
for(size_t i = 0;i < data.size(); i++)
data[i] = i / 256 + 1;
bufferlist bl, newdata;
bl.append(data);
t.write(cid, hoid, 0, bl.length(), bl);
t.zero(cid, hoid, 0x10000, 0x10000);
cerr << "Append 3*0x10000 bytes and punch a hole 0x10000~10000" << std::endl;
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
struct store_statfs_t statfs;
int r = store->statfs(&statfs);
ASSERT_EQ(r, 0);
ASSERT_EQ(0x20000, statfs.data_stored);
ASSERT_EQ(0x20000, statfs.allocated);
r = store->read(ch, hoid, 0, data.size(), newdata);
ASSERT_EQ(r, (int)data.size());
{
bufferlist expected;
expected.append(data.substr(0, 0x10000));
expected.append(string(0x10000, 0));
expected.append(data.substr(0x20000, 0x10000));
ASSERT_TRUE(bl_eq(expected, newdata));
}
newdata.clear();
r = store->read(ch, hoid, 1, data.size()-2, newdata);
ASSERT_EQ(r, (int)data.size()-2);
{
bufferlist expected;
expected.append(data.substr(1, 0x10000-1));
expected.append(string(0x10000, 0));
expected.append(data.substr(0x20000, 0x10000 - 1));
ASSERT_TRUE(bl_eq(expected, newdata));
}
newdata.clear();
}
//force fsck
ch.reset();
EXPECT_EQ(store->umount(), 0);
ASSERT_EQ(store->fsck(false), 0); // do fsck explicitly
EXPECT_EQ(store->mount(), 0);
ch = store->open_collection(cid);
{
ObjectStore::Transaction t;
std::string data2(3, 'b');
bufferlist bl, newdata;
bl.append(data2);
t.write(cid, hoid, 0x20000, bl.length(), bl);
cerr << "Write 3 bytes after the hole" << std::endl;
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
struct store_statfs_t statfs;
int r = store->statfs(&statfs);
ASSERT_EQ(r, 0);
ASSERT_EQ(0x20000, statfs.allocated);
ASSERT_EQ(0x20000, statfs.data_stored);
r = store->read(ch, hoid, 0x20000-1, 21, newdata);
ASSERT_EQ(r, (int)21);
{
bufferlist expected;
expected.append(string(0x1, 0));
expected.append(string(data2));
expected.append(data.substr(0x20003, 21-4));
ASSERT_TRUE(bl_eq(expected, newdata));
}
newdata.clear();
}
//force fsck
ch.reset();
EXPECT_EQ(store->umount(), 0);
ASSERT_EQ(store->fsck(false), 0); // do fsck explicitly
EXPECT_EQ(store->mount(), 0);
ch = store->open_collection(cid);
{
ObjectStore::Transaction t;
std::string data2(3, 'a');
bufferlist bl, newdata;
bl.append(data2);
t.write(cid, hoid, 0x10000+1, bl.length(), bl);
cerr << "Write 3 bytes to the hole" << std::endl;
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
struct store_statfs_t statfs;
int r = store->statfs(&statfs);
ASSERT_EQ(r, 0);
ASSERT_EQ(0x30000, statfs.allocated);
ASSERT_EQ(0x20003, statfs.data_stored);
r = store->read(ch, hoid, 0x10000-1, 0x10000+22, newdata);
ASSERT_EQ(r, (int)0x10000+22);
{
bufferlist expected;
expected.append(data.substr(0x10000-1, 1));
expected.append(string(0x1, 0));
expected.append(data2);
expected.append(string(0x10000-4, 0));
expected.append(string(0x3, 'b'));
expected.append(data.substr(0x20004, 21-3));
ASSERT_TRUE(bl_eq(expected, newdata));
}
newdata.clear();
}
{
ObjectStore::Transaction t;
bufferlist bl, newdata;
bl.append(string(0x30000, 'c'));
t.write(cid, hoid, 0, 0x30000, bl);
t.zero(cid, hoid, 0, 0x10000);
t.zero(cid, hoid, 0x20000, 0x10000);
cerr << "Rewrite an object and create two holes at the beginning and the end" << std::endl;
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
struct store_statfs_t statfs;
int r = store->statfs(&statfs);
ASSERT_EQ(r, 0);
ASSERT_EQ(0x10000, statfs.allocated);
ASSERT_EQ(0x10000, statfs.data_stored);
r = store->read(ch, hoid, 0, 0x30000, newdata);
ASSERT_EQ(r, (int)0x30000);
{
bufferlist expected;
expected.append(string(0x10000, 0));
expected.append(string(0x10000, 'c'));
expected.append(string(0x10000, 0));
ASSERT_TRUE(bl_eq(expected, newdata));
}
newdata.clear();
}
//force fsck
ch.reset();
EXPECT_EQ(store->umount(), 0);
ASSERT_EQ(store->fsck(false), 0); // do fsck explicitly
EXPECT_EQ(store->mount(), 0);
ch = store->open_collection(cid);
{
ObjectStore::Transaction t;
t.remove(cid, hoid);
t.remove_collection(cid);
cerr << "Cleaning" << std::endl;
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
struct store_statfs_t statfs;
r = store->statfs(&statfs);
ASSERT_EQ(r, 0);
ASSERT_EQ( 0u, statfs.allocated);
ASSERT_EQ( 0u, statfs.data_stored);
ASSERT_EQ( 0u, statfs.data_compressed_original);
ASSERT_EQ( 0u, statfs.data_compressed);
ASSERT_EQ( 0u, statfs.data_compressed_allocated);
}
}
#endif
TEST_P(StoreTest, ManySmallWrite) {
int r;
coll_t cid;
ghobject_t a(hobject_t(sobject_t("Object 1", CEPH_NOSNAP)));
ghobject_t b(hobject_t(sobject_t("Object 2", CEPH_NOSNAP)));
auto ch = store->create_new_collection(cid);
{
ObjectStore::Transaction t;
t.create_collection(cid, 0);
cerr << "Creating collection " << cid << std::endl;
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
bufferlist bl;
bufferptr bp(4096);
bp.zero();
bl.append(bp);
for (int i=0; i<100; ++i) {
ObjectStore::Transaction t;
t.write(cid, a, i*4096, 4096, bl, 0);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
for (int i=0; i<100; ++i) {
ObjectStore::Transaction t;
t.write(cid, b, (rand() % 1024)*4096, 4096, bl, 0);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
{
ObjectStore::Transaction t;
t.remove(cid, a);
t.remove(cid, b);
t.remove_collection(cid);
cerr << "Cleaning" << std::endl;
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
}
TEST_P(StoreTest, MultiSmallWriteSameBlock) {
int r;
coll_t cid;
ghobject_t a(hobject_t(sobject_t("Object 1", CEPH_NOSNAP)));
auto ch = store->create_new_collection(cid);
{
ObjectStore::Transaction t;
t.create_collection(cid, 0);
cerr << "Creating collection " << cid << std::endl;
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
bufferlist bl;
bl.append("short");
C_SaferCond c, d;
// touch same block in both same transaction, tls, and pipelined txns
{
ObjectStore::Transaction t, u;
t.write(cid, a, 0, 5, bl, 0);
t.write(cid, a, 5, 5, bl, 0);
t.write(cid, a, 4094, 5, bl, 0);
t.write(cid, a, 9000, 5, bl, 0);
u.write(cid, a, 10, 5, bl, 0);
u.write(cid, a, 7000, 5, bl, 0);
t.register_on_commit(&c);
vector<ObjectStore::Transaction> v = {t, u};
store->queue_transactions(ch, v);
}
{
ObjectStore::Transaction t, u;
t.write(cid, a, 40, 5, bl, 0);
t.write(cid, a, 45, 5, bl, 0);
t.write(cid, a, 4094, 5, bl, 0);
t.write(cid, a, 6000, 5, bl, 0);
u.write(cid, a, 610, 5, bl, 0);
u.write(cid, a, 11000, 5, bl, 0);
t.register_on_commit(&d);
vector<ObjectStore::Transaction> v = {t, u};
store->queue_transactions(ch, v);
}
c.wait();
d.wait();
{
bufferlist bl2;
r = store->read(ch, a, 0, 16000, bl2);
ASSERT_GE(r, 0);
}
{
ObjectStore::Transaction t;
t.remove(cid, a);
t.remove_collection(cid);
cerr << "Cleaning" << std::endl;
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
}
TEST_P(StoreTest, SmallSkipFront) {
int r;
coll_t cid;
ghobject_t a(hobject_t(sobject_t("Object 1", CEPH_NOSNAP)));
auto ch = store->create_new_collection(cid);
{
ObjectStore::Transaction t;
t.create_collection(cid, 0);
cerr << "Creating collection " << cid << std::endl;
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
{
ObjectStore::Transaction t;
t.touch(cid, a);
t.truncate(cid, a, 3000);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
{
bufferlist bl;
bufferptr bp(4096);
memset(bp.c_str(), 1, 4096);
bl.append(bp);
ObjectStore::Transaction t;
t.write(cid, a, 4096, 4096, bl);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
{
bufferlist bl;
ASSERT_EQ(8192, store->read(ch, a, 0, 8192, bl));
for (unsigned i=0; i<4096; ++i)
ASSERT_EQ(0, bl[i]);
for (unsigned i=4096; i<8192; ++i)
ASSERT_EQ(1, bl[i]);
}
{
ObjectStore::Transaction t;
t.remove(cid, a);
t.remove_collection(cid);
cerr << "Cleaning" << std::endl;
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
}
TEST_P(StoreTest, AppendDeferredVsTailCache) {
int r;
coll_t cid;
ghobject_t a(hobject_t(sobject_t("fooo", CEPH_NOSNAP)));
auto ch = store->create_new_collection(cid);
{
ObjectStore::Transaction t;
t.create_collection(cid, 0);
cerr << "Creating collection " << cid << std::endl;
r = store->queue_transaction(ch, std::move(t));
ASSERT_EQ(r, 0);
}
unsigned min_alloc = g_conf()->bluestore_min_alloc_size;
unsigned size = min_alloc / 3;
bufferptr bpa(size);
memset(bpa.c_str(), 1, bpa.length());
bufferlist bla;
bla.append(bpa);
{
ObjectStore::Transaction t;
t.write(cid, a, 0, bla.length(), bla, 0);
r = store->queue_transaction(ch, std::move(t));
ASSERT_EQ(r, 0);
}
// force cached tail to clear ...
{
ch.reset();
int r = store->umount();
ASSERT_EQ(0, r);
r = store->mount();
ASSERT_EQ(0, r);
ch = store->open_collection(cid);
}
bufferptr bpb(size);
memset(bpb.c_str(), 2, bpb.length());
bufferlist blb;
blb.append(bpb);
{
ObjectStore::Transaction t;
t.write(cid, a, bla.length(), blb.length(), blb, 0);
r = store->queue_transaction(ch, std::move(t));
ASSERT_EQ(r, 0);
}
bufferptr bpc(size);
memset(bpc.c_str(), 3, bpc.length());
bufferlist blc;
blc.append(bpc);
{
ObjectStore::Transaction t;
t.write(cid, a, bla.length() + blb.length(), blc.length(), blc, 0);
r = store->queue_transaction(ch, std::move(t));
ASSERT_EQ(r, 0);
}
bufferlist final;
final.append(bla);
final.append(blb);
final.append(blc);
bufferlist actual;
{
ASSERT_EQ((int)final.length(),
store->read(ch, a, 0, final.length(), actual));
ASSERT_TRUE(bl_eq(final, actual));
}
{
ObjectStore::Transaction t;
t.remove(cid, a);
t.remove_collection(cid);
cerr << "Cleaning" << std::endl;
r = store->queue_transaction(ch, std::move(t));
ASSERT_EQ(r, 0);
}
}
TEST_P(StoreTest, AppendZeroTrailingSharedBlock) {
int r;
coll_t cid;
ghobject_t a(hobject_t(sobject_t("fooo", CEPH_NOSNAP)));
ghobject_t b = a;
b.hobj.snap = 1;
auto ch = store->create_new_collection(cid);
{
ObjectStore::Transaction t;
t.create_collection(cid, 0);
cerr << "Creating collection " << cid << std::endl;
r = store->queue_transaction(ch, std::move(t));
ASSERT_EQ(r, 0);
}
unsigned min_alloc = g_conf()->bluestore_min_alloc_size;
unsigned size = min_alloc / 3;
bufferptr bpa(size);
memset(bpa.c_str(), 1, bpa.length());
bufferlist bla;
bla.append(bpa);
// make sure there is some trailing gunk in the last block
{
bufferlist bt;
bt.append(bla);
bt.append("BADBADBADBAD");
ObjectStore::Transaction t;
t.write(cid, a, 0, bt.length(), bt, 0);
r = store->queue_transaction(ch, std::move(t));
ASSERT_EQ(r, 0);
}
{
ObjectStore::Transaction t;
t.truncate(cid, a, size);
r = store->queue_transaction(ch, std::move(t));
ASSERT_EQ(r, 0);
}
// clone
{
ObjectStore::Transaction t;
t.clone(cid, a, b);
r = store->queue_transaction(ch, std::move(t));
ASSERT_EQ(r, 0);
}
// append with implicit zeroing
bufferptr bpb(size);
memset(bpb.c_str(), 2, bpb.length());
bufferlist blb;
blb.append(bpb);
{
ObjectStore::Transaction t;
t.write(cid, a, min_alloc * 3, blb.length(), blb, 0);
r = store->queue_transaction(ch, std::move(t));
ASSERT_EQ(r, 0);
}
bufferlist final;
final.append(bla);
bufferlist zeros;
zeros.append_zero(min_alloc * 3 - size);
final.append(zeros);
final.append(blb);
bufferlist actual;
{
ASSERT_EQ((int)final.length(),
store->read(ch, a, 0, final.length(), actual));
final.hexdump(cout);
actual.hexdump(cout);
ASSERT_TRUE(bl_eq(final, actual));
}
{
ObjectStore::Transaction t;
t.remove(cid, a);
t.remove(cid, b);
t.remove_collection(cid);
cerr << "Cleaning" << std::endl;
r = store->queue_transaction(ch, std::move(t));
ASSERT_EQ(r, 0);
}
}
TEST_P(StoreTest, SmallSequentialUnaligned) {
int r;
coll_t cid;
ghobject_t a(hobject_t(sobject_t("Object 1", CEPH_NOSNAP)));
auto ch = store->create_new_collection(cid);
{
ObjectStore::Transaction t;
t.create_collection(cid, 0);
cerr << "Creating collection " << cid << std::endl;
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
bufferlist bl;
int len = 1000;
bufferptr bp(len);
bp.zero();
bl.append(bp);
for (int i=0; i<1000; ++i) {
ObjectStore::Transaction t;
t.write(cid, a, i*len, len, bl, 0);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
{
ObjectStore::Transaction t;
t.remove(cid, a);
t.remove_collection(cid);
cerr << "Cleaning" << std::endl;
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
}
TEST_P(StoreTest, ManyBigWrite) {
int r;
coll_t cid;
ghobject_t a(hobject_t(sobject_t("Object 1", CEPH_NOSNAP)));
ghobject_t b(hobject_t(sobject_t("Object 2", CEPH_NOSNAP)));
auto ch = store->create_new_collection(cid);
{
ObjectStore::Transaction t;
t.create_collection(cid, 0);
cerr << "Creating collection " << cid << std::endl;
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
bufferlist bl;
bufferptr bp(4 * 1048576);
bp.zero();
bl.append(bp);
for (int i=0; i<10; ++i) {
ObjectStore::Transaction t;
t.write(cid, a, i*4*1048586, 4*1048576, bl, 0);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
// aligned
for (int i=0; i<10; ++i) {
ObjectStore::Transaction t;
t.write(cid, b, (rand() % 256)*4*1048576, 4*1048576, bl, 0);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
// unaligned
for (int i=0; i<10; ++i) {
ObjectStore::Transaction t;
t.write(cid, b, (rand() % (256*4096))*1024, 4*1048576, bl, 0);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
// do some zeros
for (int i=0; i<10; ++i) {
ObjectStore::Transaction t;
t.zero(cid, b, (rand() % (256*4096))*1024, 16*1048576);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
{
ObjectStore::Transaction t;
t.remove(cid, a);
t.remove(cid, b);
t.remove_collection(cid);
cerr << "Cleaning" << std::endl;
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
}
TEST_P(StoreTest, BigWriteBigZero) {
int r;
coll_t cid;
ghobject_t a(hobject_t(sobject_t("foo", CEPH_NOSNAP)));
auto ch = store->create_new_collection(cid);
{
ObjectStore::Transaction t;
t.create_collection(cid, 0);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
bufferlist bl;
bufferptr bp(1048576);
memset(bp.c_str(), 'b', bp.length());
bl.append(bp);
bufferlist s;
bufferptr sp(4096);
memset(sp.c_str(), 's', sp.length());
s.append(sp);
{
ObjectStore::Transaction t;
t.write(cid, a, 0, bl.length(), bl);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
{
ObjectStore::Transaction t;
t.zero(cid, a, bl.length() / 4, bl.length() / 2);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
{
ObjectStore::Transaction t;
t.write(cid, a, bl.length() / 2, s.length(), s);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
{
ObjectStore::Transaction t;
t.remove(cid, a);
t.remove_collection(cid);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
}
TEST_P(StoreTest, MiscFragmentTests) {
int r;
coll_t cid;
ghobject_t a(hobject_t(sobject_t("Object 1", CEPH_NOSNAP)));
auto ch = store->create_new_collection(cid);
{
ObjectStore::Transaction t;
t.create_collection(cid, 0);
cerr << "Creating collection " << cid << std::endl;
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
bufferlist bl;
bufferptr bp(524288);
bp.zero();
bl.append(bp);
{
ObjectStore::Transaction t;
t.write(cid, a, 0, 524288, bl, 0);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
{
ObjectStore::Transaction t;
t.write(cid, a, 1048576, 524288, bl, 0);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
{
bufferlist inbl;
int r = store->read(ch, a, 524288 + 131072, 1024, inbl);
ASSERT_EQ(r, 1024);
ASSERT_EQ(inbl.length(), 1024u);
ASSERT_TRUE(inbl.is_zero());
}
{
ObjectStore::Transaction t;
t.write(cid, a, 1048576 - 4096, 524288, bl, 0);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
{
ObjectStore::Transaction t;
t.remove(cid, a);
t.remove_collection(cid);
cerr << "Cleaning" << std::endl;
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
}
TEST_P(StoreTest, ZeroVsObjectSize) {
int r;
coll_t cid;
struct stat stat;
ghobject_t hoid(hobject_t(sobject_t("foo", CEPH_NOSNAP)));
auto ch = store->create_new_collection(cid);
{
ObjectStore::Transaction t;
t.create_collection(cid, 0);
cerr << "Creating collection " << cid << std::endl;
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
bufferlist a;
a.append("stuff");
{
ObjectStore::Transaction t;
t.write(cid, hoid, 0, 5, a);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
ASSERT_EQ(0, store->stat(ch, hoid, &stat));
ASSERT_EQ(5, stat.st_size);
{
ObjectStore::Transaction t;
t.zero(cid, hoid, 1, 2);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
ASSERT_EQ(0, store->stat(ch, hoid, &stat));
ASSERT_EQ(5, stat.st_size);
{
ObjectStore::Transaction t;
t.zero(cid, hoid, 3, 200);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
ASSERT_EQ(0, store->stat(ch, hoid, &stat));
ASSERT_EQ(203, stat.st_size);
{
ObjectStore::Transaction t;
t.zero(cid, hoid, 100000, 200);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
ASSERT_EQ(0, store->stat(ch, hoid, &stat));
ASSERT_EQ(100200, stat.st_size);
}
TEST_P(StoreTest, ZeroLengthWrite) {
int r;
coll_t cid;
ghobject_t hoid(hobject_t(sobject_t("foo", CEPH_NOSNAP)));
auto ch = store->create_new_collection(cid);
{
ObjectStore::Transaction t;
t.create_collection(cid, 0);
t.touch(cid, hoid);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
{
ObjectStore::Transaction t;
bufferlist empty;
t.write(cid, hoid, 1048576, 0, empty);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
struct stat stat;
r = store->stat(ch, hoid, &stat);
ASSERT_EQ(0, r);
ASSERT_EQ(0, stat.st_size);
bufferlist newdata;
r = store->read(ch, hoid, 0, 1048576, newdata);
ASSERT_EQ(0, r);
}
TEST_P(StoreTest, ZeroLengthZero) {
int r;
coll_t cid;
ghobject_t hoid(hobject_t(sobject_t("foo", CEPH_NOSNAP)));
auto ch = store->create_new_collection(cid);
{
ObjectStore::Transaction t;
t.create_collection(cid, 0);
t.touch(cid, hoid);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(0, r);
}
{
ObjectStore::Transaction t;
t.zero(cid, hoid, 1048576, 0);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(0, r);
}
struct stat stat;
r = store->stat(ch, hoid, &stat);
ASSERT_EQ(0, r);
ASSERT_EQ(0, stat.st_size);
bufferlist newdata;
r = store->read(ch, hoid, 0, 1048576, newdata);
ASSERT_EQ(0, r);
}
TEST_P(StoreTest, SimpleAttrTest) {
int r;
coll_t cid;
ghobject_t hoid(hobject_t(sobject_t("attr object 1", CEPH_NOSNAP)));
bufferlist val, val2;
val.append("value");
val.append("value2");
{
auto ch = store->open_collection(cid);
ASSERT_FALSE(ch);
}
auto ch = store->create_new_collection(cid);
{
ObjectStore::Transaction t;
t.create_collection(cid, 0);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
{
bool empty;
int r = store->collection_empty(ch, &empty);
ASSERT_EQ(0, r);
ASSERT_TRUE(empty);
}
{
bufferptr bp;
r = store->getattr(ch, hoid, "nofoo", bp);
ASSERT_EQ(-ENOENT, r);
}
{
ObjectStore::Transaction t;
t.touch(cid, hoid);
t.setattr(cid, hoid, "foo", val);
t.setattr(cid, hoid, "bar", val2);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
{
bool empty;
int r = store->collection_empty(ch, &empty);
ASSERT_EQ(0, r);
ASSERT_TRUE(!empty);
}
{
bufferptr bp;
r = store->getattr(ch, hoid, "nofoo", bp);
ASSERT_EQ(-ENODATA, r);
r = store->getattr(ch, hoid, "foo", bp);
ASSERT_EQ(0, r);
bufferlist bl;
bl.append(bp);
ASSERT_TRUE(bl_eq(val, bl));
map<string,bufferptr,less<>> bm;
r = store->getattrs(ch, hoid, bm);
ASSERT_EQ(0, r);
}
{
ObjectStore::Transaction t;
t.remove(cid, hoid);
t.remove_collection(cid);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
}
TEST_P(StoreTest, SimpleListTest) {
int r;
coll_t cid(spg_t(pg_t(0, 1), shard_id_t(1)));
auto ch = store->create_new_collection(cid);
{
ObjectStore::Transaction t;
t.create_collection(cid, 0);
cerr << "Creating collection " << cid << std::endl;
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
set<ghobject_t> all;
{
ObjectStore::Transaction t;
for (int i=0; i<200; ++i) {
string name("object_");
name += stringify(i);
ghobject_t hoid(hobject_t(sobject_t(name, CEPH_NOSNAP)),
ghobject_t::NO_GEN, shard_id_t(1));
hoid.hobj.pool = 1;
all.insert(hoid);
t.touch(cid, hoid);
cerr << "Creating object " << hoid << std::endl;
}
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
{
set<ghobject_t> saw;
vector<ghobject_t> objects;
ghobject_t next, current;
while (!next.is_max()) {
int r = collection_list(store, ch, current, ghobject_t::get_max(), 50,
&objects, &next);
ASSERT_EQ(r, 0);
ASSERT_TRUE(sorted(objects));
cout << " got " << objects.size() << " next " << next << std::endl;
for (vector<ghobject_t>::iterator p = objects.begin(); p != objects.end();
++p) {
if (saw.count(*p)) {
cout << "got DUP " << *p << std::endl;
} else {
//cout << "got new " << *p << std::endl;
}
saw.insert(*p);
}
objects.clear();
current = next;
}
ASSERT_EQ(saw.size(), all.size());
ASSERT_EQ(saw, all);
}
{
ObjectStore::Transaction t;
for (set<ghobject_t>::iterator p = all.begin(); p != all.end(); ++p)
t.remove(cid, *p);
t.remove_collection(cid);
cerr << "Cleaning" << std::endl;
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
}
TEST_P(StoreTest, ListEndTest) {
int r;
coll_t cid(spg_t(pg_t(0, 1), shard_id_t(1)));
auto ch = store->create_new_collection(cid);
{
ObjectStore::Transaction t;
t.create_collection(cid, 0);
cerr << "Creating collection " << cid << std::endl;
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
set<ghobject_t> all;
{
ObjectStore::Transaction t;
for (int i=0; i<200; ++i) {
string name("object_");
name += stringify(i);
ghobject_t hoid(hobject_t(sobject_t(name, CEPH_NOSNAP)),
ghobject_t::NO_GEN, shard_id_t(1));
hoid.hobj.pool = 1;
all.insert(hoid);
t.touch(cid, hoid);
cerr << "Creating object " << hoid << std::endl;
}
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
{
ghobject_t end(hobject_t(sobject_t("object_100", CEPH_NOSNAP)),
ghobject_t::NO_GEN, shard_id_t(1));
end.hobj.pool = 1;
vector<ghobject_t> objects;
ghobject_t next;
int r = collection_list(store, ch, ghobject_t(), end, 500, &objects, &next);
ASSERT_EQ(r, 0);
for (auto &p : objects) {
ASSERT_NE(p, end);
}
}
{
ObjectStore::Transaction t;
for (set<ghobject_t>::iterator p = all.begin(); p != all.end(); ++p)
t.remove(cid, *p);
t.remove_collection(cid);
cerr << "Cleaning" << std::endl;
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
}
TEST_P(StoreTest, List_0xfffffff_Hash_Test_in_meta) {
int r = 0;
coll_t cid;
auto ch = store->create_new_collection(cid);
{
ObjectStore::Transaction t;
t.create_collection(cid, 0);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
{
ObjectStore::Transaction t;
ghobject_t hoid(hobject_t(sobject_t("obj", CEPH_NOSNAP),
"", UINT32_C(0xffffffff), -1, "nspace"));
t.touch(cid, hoid);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
{
vector<ghobject_t> objects;
r = collection_list(store, ch, ghobject_t(), ghobject_t::get_max(), INT_MAX,
&objects, nullptr, true);
ASSERT_EQ(r, 0);
ASSERT_EQ(objects.size(), 1);
}
}
TEST_P(StoreTest, List_0xfffffff_Hash_Test_in_PG) {
int r = 0;
const int64_t poolid = 1;
coll_t cid(spg_t(pg_t(0, poolid), shard_id_t::NO_SHARD));
auto ch = store->create_new_collection(cid);
{
ObjectStore::Transaction t;
t.create_collection(cid, 0);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
{
ObjectStore::Transaction t;
ghobject_t hoid(hobject_t(sobject_t("obj", CEPH_NOSNAP),
"", UINT32_C(0xffffffff), poolid, "nspace"));
t.touch(cid, hoid);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
{
vector<ghobject_t> objects;
r = collection_list(store, ch, ghobject_t(), ghobject_t::get_max(), INT_MAX,
&objects, nullptr, true);
ASSERT_EQ(r, 0);
ASSERT_EQ(objects.size(), 1);
}
}
TEST_P(StoreTest, Sort) {
{
hobject_t a(sobject_t("a", CEPH_NOSNAP));
hobject_t b = a;
ASSERT_EQ(a, b);
b.oid.name = "b";
ASSERT_NE(a, b);
ASSERT_TRUE(a < b);
a.pool = 1;
b.pool = 2;
ASSERT_TRUE(a < b);
a.pool = 3;
ASSERT_TRUE(a > b);
}
{
ghobject_t a(hobject_t(sobject_t("a", CEPH_NOSNAP)));
ghobject_t b(hobject_t(sobject_t("b", CEPH_NOSNAP)));
a.hobj.pool = 1;
b.hobj.pool = 1;
ASSERT_TRUE(a < b);
a.hobj.pool = -3;
ASSERT_TRUE(a < b);
a.hobj.pool = 1;
b.hobj.pool = -3;
ASSERT_TRUE(a > b);
}
}
TEST_P(StoreTest, MultipoolListTest) {
int r;
int poolid = 4373;
coll_t cid = coll_t(spg_t(pg_t(0, poolid), shard_id_t::NO_SHARD));
auto ch = store->create_new_collection(cid);
{
ObjectStore::Transaction t;
t.create_collection(cid, 0);
cerr << "Creating collection " << cid << std::endl;
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
set<ghobject_t> all, saw;
{
ObjectStore::Transaction t;
for (int i=0; i<200; ++i) {
string name("object_");
name += stringify(i);
ghobject_t hoid(hobject_t(sobject_t(name, CEPH_NOSNAP)));
if (rand() & 1)
hoid.hobj.pool = -2 - poolid;
else
hoid.hobj.pool = poolid;
all.insert(hoid);
t.touch(cid, hoid);
cerr << "Creating object " << hoid << std::endl;
}
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
{
vector<ghobject_t> objects;
ghobject_t next, current;
while (!next.is_max()) {
int r = collection_list(store, ch, current, ghobject_t::get_max(), 50,
&objects, &next);
ASSERT_EQ(r, 0);
cout << " got " << objects.size() << " next " << next << std::endl;
for (vector<ghobject_t>::iterator p = objects.begin(); p != objects.end();
++p) {
saw.insert(*p);
}
objects.clear();
current = next;
}
ASSERT_EQ(saw, all);
}
{
ObjectStore::Transaction t;
for (set<ghobject_t>::iterator p = all.begin(); p != all.end(); ++p)
t.remove(cid, *p);
t.remove_collection(cid);
cerr << "Cleaning" << std::endl;
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
}
TEST_P(StoreTest, SimpleCloneTest) {
int r;
coll_t cid;
SetDeathTestStyle("threadsafe");
auto ch = store->create_new_collection(cid);
{
ObjectStore::Transaction t;
t.create_collection(cid, 0);
cerr << "Creating collection " << cid << std::endl;
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
ghobject_t hoid(hobject_t(sobject_t("Object 1", CEPH_NOSNAP),
"key", 123, -1, ""));
bufferlist small, large, xlarge, newdata, attr;
small.append("small");
large.append("large");
xlarge.append("xlarge");
{
ObjectStore::Transaction t;
t.touch(cid, hoid);
t.setattr(cid, hoid, "attr1", small);
t.setattr(cid, hoid, "attr2", large);
t.setattr(cid, hoid, "attr3", xlarge);
t.write(cid, hoid, 0, small.length(), small);
t.write(cid, hoid, 10, small.length(), small);
cerr << "Creating object and set attr " << hoid << std::endl;
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
ghobject_t hoid2(hobject_t(sobject_t("Object 2", CEPH_NOSNAP),
"key", 123, -1, ""));
ghobject_t hoid3(hobject_t(sobject_t("Object 3", CEPH_NOSNAP)));
{
ObjectStore::Transaction t;
t.clone(cid, hoid, hoid2);
t.setattr(cid, hoid2, "attr2", small);
t.rmattr(cid, hoid2, "attr1");
t.write(cid, hoid, 10, large.length(), large);
t.setattr(cid, hoid, "attr1", large);
t.setattr(cid, hoid, "attr2", small);
cerr << "Clone object and rm attr" << std::endl;
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
r = store->read(ch, hoid, 10, 5, newdata);
ASSERT_EQ(r, 5);
ASSERT_TRUE(bl_eq(large, newdata));
newdata.clear();
r = store->read(ch, hoid, 0, 5, newdata);
ASSERT_EQ(r, 5);
ASSERT_TRUE(bl_eq(small, newdata));
newdata.clear();
r = store->read(ch, hoid2, 10, 5, newdata);
ASSERT_EQ(r, 5);
ASSERT_TRUE(bl_eq(small, newdata));
r = store->getattr(ch, hoid2, "attr2", attr);
ASSERT_EQ(r, 0);
ASSERT_TRUE(bl_eq(small, attr));
attr.clear();
r = store->getattr(ch, hoid2, "attr3", attr);
ASSERT_EQ(r, 0);
ASSERT_TRUE(bl_eq(xlarge, attr));
attr.clear();
r = store->getattr(ch, hoid, "attr1", attr);
ASSERT_EQ(r, 0);
ASSERT_TRUE(bl_eq(large, attr));
}
{
ObjectStore::Transaction t;
t.remove(cid, hoid);
t.remove(cid, hoid2);
ASSERT_EQ(0, queue_transaction(store, ch, std::move(t)));
}
{
bufferlist final;
bufferptr p(16384);
memset(p.c_str(), 1, p.length());
bufferlist pl;
pl.append(p);
final.append(p);
ObjectStore::Transaction t;
t.write(cid, hoid, 0, pl.length(), pl);
t.clone(cid, hoid, hoid2);
bufferptr a(4096);
memset(a.c_str(), 2, a.length());
bufferlist al;
al.append(a);
final.append(a);
t.write(cid, hoid, pl.length(), a.length(), al);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
bufferlist rl;
ASSERT_EQ((int)final.length(),
store->read(ch, hoid, 0, final.length(), rl));
ASSERT_TRUE(bl_eq(rl, final));
}
{
ObjectStore::Transaction t;
t.remove(cid, hoid);
t.remove(cid, hoid2);
ASSERT_EQ(0, queue_transaction(store, ch, std::move(t)));
}
{
bufferlist final;
bufferptr p(16384);
memset(p.c_str(), 111, p.length());
bufferlist pl;
pl.append(p);
final.append(p);
ObjectStore::Transaction t;
t.write(cid, hoid, 0, pl.length(), pl);
t.clone(cid, hoid, hoid2);
bufferptr z(4096);
z.zero();
final.append(z);
bufferptr a(4096);
memset(a.c_str(), 112, a.length());
bufferlist al;
al.append(a);
final.append(a);
t.write(cid, hoid, pl.length() + z.length(), a.length(), al);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
bufferlist rl;
ASSERT_EQ((int)final.length(),
store->read(ch, hoid, 0, final.length(), rl));
ASSERT_TRUE(bl_eq(rl, final));
}
{
ObjectStore::Transaction t;
t.remove(cid, hoid);
t.remove(cid, hoid2);
ASSERT_EQ(0, queue_transaction(store, ch, std::move(t)));
}
{
bufferlist final;
bufferptr p(16000);
memset(p.c_str(), 5, p.length());
bufferlist pl;
pl.append(p);
final.append(p);
ObjectStore::Transaction t;
t.write(cid, hoid, 0, pl.length(), pl);
t.clone(cid, hoid, hoid2);
bufferptr z(1000);
z.zero();
final.append(z);
bufferptr a(8000);
memset(a.c_str(), 6, a.length());
bufferlist al;
al.append(a);
final.append(a);
t.write(cid, hoid, 17000, a.length(), al);
ASSERT_EQ(0, queue_transaction(store, ch, std::move(t)));
bufferlist rl;
ASSERT_EQ((int)final.length(),
store->read(ch, hoid, 0, final.length(), rl));
/*cout << "expected:\n";
final.hexdump(cout);
cout << "got:\n";
rl.hexdump(cout);*/
ASSERT_TRUE(bl_eq(rl, final));
}
{
ObjectStore::Transaction t;
t.remove(cid, hoid);
t.remove(cid, hoid2);
ASSERT_EQ(0, queue_transaction(store, ch, std::move(t)));
}
{
bufferptr p(1048576);
memset(p.c_str(), 3, p.length());
bufferlist pl;
pl.append(p);
ObjectStore::Transaction t;
t.write(cid, hoid, 0, pl.length(), pl);
t.clone(cid, hoid, hoid2);
bufferptr a(65536);
memset(a.c_str(), 4, a.length());
bufferlist al;
al.append(a);
t.write(cid, hoid, a.length(), a.length(), al);
ASSERT_EQ(0, queue_transaction(store, ch, std::move(t)));
bufferlist rl;
bufferlist final;
final.substr_of(pl, 0, al.length());
final.append(al);
bufferlist end;
end.substr_of(pl, al.length()*2, pl.length() - al.length()*2);
final.append(end);
ASSERT_EQ((int)final.length(),
store->read(ch, hoid, 0, final.length(), rl));
/*cout << "expected:\n";
final.hexdump(cout);
cout << "got:\n";
rl.hexdump(cout);*/
ASSERT_TRUE(bl_eq(rl, final));
}
{
ObjectStore::Transaction t;
t.remove(cid, hoid);
t.remove(cid, hoid2);
ASSERT_EQ(0, queue_transaction(store, ch, std::move(t)));
}
{
bufferptr p(65536);
memset(p.c_str(), 7, p.length());
bufferlist pl;
pl.append(p);
ObjectStore::Transaction t;
t.write(cid, hoid, 0, pl.length(), pl);
t.clone(cid, hoid, hoid2);
bufferptr a(4096);
memset(a.c_str(), 8, a.length());
bufferlist al;
al.append(a);
t.write(cid, hoid, 32768, a.length(), al);
ASSERT_EQ(0, queue_transaction(store, ch, std::move(t)));
bufferlist rl;
bufferlist final;
final.substr_of(pl, 0, 32768);
final.append(al);
bufferlist end;
end.substr_of(pl, final.length(), pl.length() - final.length());
final.append(end);
ASSERT_EQ((int)final.length(),
store->read(ch, hoid, 0, final.length(), rl));
/*cout << "expected:\n";
final.hexdump(cout);
cout << "got:\n";
rl.hexdump(cout);*/
ASSERT_TRUE(bl_eq(rl, final));
}
{
ObjectStore::Transaction t;
t.remove(cid, hoid);
t.remove(cid, hoid2);
ASSERT_EQ(0, queue_transaction(store, ch, std::move(t)));
}
{
bufferptr p(65536);
memset(p.c_str(), 9, p.length());
bufferlist pl;
pl.append(p);
ObjectStore::Transaction t;
t.write(cid, hoid, 0, pl.length(), pl);
t.clone(cid, hoid, hoid2);
bufferptr a(4096);
memset(a.c_str(), 10, a.length());
bufferlist al;
al.append(a);
t.write(cid, hoid, 33768, a.length(), al);
ASSERT_EQ(0, queue_transaction(store, ch, std::move(t)));
bufferlist rl;
bufferlist final;
final.substr_of(pl, 0, 33768);
final.append(al);
bufferlist end;
end.substr_of(pl, final.length(), pl.length() - final.length());
final.append(end);
ASSERT_EQ((int)final.length(),
store->read(ch, hoid, 0, final.length(), rl));
/*cout << "expected:\n";
final.hexdump(cout);
cout << "got:\n";
rl.hexdump(cout);*/
ASSERT_TRUE(bl_eq(rl, final));
}
{
//verify if non-empty collection is properly handled after store reload
ch.reset();
r = store->umount();
ASSERT_EQ(r, 0);
r = store->mount();
ASSERT_EQ(r, 0);
ch = store->open_collection(cid);
ObjectStore::Transaction t;
t.remove_collection(cid);
cerr << "Invalid rm coll" << std::endl;
PrCtl unset_dumpable;
EXPECT_DEATH(queue_transaction(store, ch, std::move(t)), "");
}
{
ObjectStore::Transaction t;
t.touch(cid, hoid3); //new record in db
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
{
ObjectStore::Transaction t;
//verify if non-empty collection is properly handled when there are some pending removes and live records in db
cerr << "Invalid rm coll again" << std::endl;
ch.reset();
r = store->umount();
ASSERT_EQ(r, 0);
r = store->mount();
ASSERT_EQ(r, 0);
ch = store->open_collection(cid);
t.remove(cid, hoid);
t.remove(cid, hoid2);
t.remove_collection(cid);
PrCtl unset_dumpable;
EXPECT_DEATH(queue_transaction(store, ch, std::move(t)), "");
}
{
ObjectStore::Transaction t;
t.remove(cid, hoid);
t.remove(cid, hoid2);
t.remove(cid, hoid3);
t.remove_collection(cid);
cerr << "Cleaning" << std::endl;
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
}
TEST_P(StoreTest, OmapSimple) {
int r;
coll_t cid;
auto ch = store->create_new_collection(cid);
{
ObjectStore::Transaction t;
t.create_collection(cid, 0);
cerr << "Creating collection " << cid << std::endl;
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
ghobject_t hoid(hobject_t(sobject_t("omap_obj", CEPH_NOSNAP),
"key", 123, -1, ""));
bufferlist small;
small.append("small");
map<string,bufferlist> km;
km["foo"] = small;
km["bar"].append("asdfjkasdkjdfsjkafskjsfdj");
bufferlist header;
header.append("this is a header");
{
ObjectStore::Transaction t;
t.touch(cid, hoid);
t.omap_setkeys(cid, hoid, km);
t.omap_setheader(cid, hoid, header);
cerr << "Creating object and set omap " << hoid << std::endl;
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
// get header, keys
{
bufferlist h;
map<string,bufferlist> r;
store->omap_get(ch, hoid, &h, &r);
ASSERT_TRUE(bl_eq(header, h));
ASSERT_EQ(r.size(), km.size());
cout << "r: " << r << std::endl;
}
// test iterator with seek_to_first
{
map<string,bufferlist> r;
ObjectMap::ObjectMapIterator iter = store->get_omap_iterator(ch, hoid);
for (iter->seek_to_first(); iter->valid(); iter->next()) {
r[iter->key()] = iter->value();
}
cout << "r: " << r << std::endl;
ASSERT_EQ(r.size(), km.size());
}
// test iterator with initial lower_bound
{
map<string,bufferlist> r;
ObjectMap::ObjectMapIterator iter = store->get_omap_iterator(ch, hoid);
for (iter->lower_bound(string()); iter->valid(); iter->next()) {
r[iter->key()] = iter->value();
}
cout << "r: " << r << std::endl;
ASSERT_EQ(r.size(), km.size());
}
{
ObjectStore::Transaction t;
t.remove(cid, hoid);
t.remove_collection(cid);
cerr << "Cleaning" << std::endl;
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
}
TEST_P(StoreTest, OmapCloneTest) {
int r;
coll_t cid;
auto ch = store->create_new_collection(cid);
{
ObjectStore::Transaction t;
t.create_collection(cid, 0);
cerr << "Creating collection " << cid << std::endl;
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
ghobject_t hoid(hobject_t(sobject_t("Object 1", CEPH_NOSNAP),
"key", 123, -1, ""));
bufferlist small;
small.append("small");
map<string,bufferlist> km;
km["foo"] = small;
km["bar"].append("asdfjkasdkjdfsjkafskjsfdj");
bufferlist header;
header.append("this is a header");
{
ObjectStore::Transaction t;
t.touch(cid, hoid);
t.omap_setkeys(cid, hoid, km);
t.omap_setheader(cid, hoid, header);
cerr << "Creating object and set omap " << hoid << std::endl;
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
ghobject_t hoid2(hobject_t(sobject_t("Object 2", CEPH_NOSNAP),
"key", 123, -1, ""));
{
ObjectStore::Transaction t;
t.clone(cid, hoid, hoid2);
cerr << "Clone object" << std::endl;
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
{
map<string,bufferlist> r;
bufferlist h;
store->omap_get(ch, hoid2, &h, &r);
ASSERT_TRUE(bl_eq(header, h));
ASSERT_EQ(r.size(), km.size());
}
{
ObjectStore::Transaction t;
t.remove(cid, hoid);
t.remove(cid, hoid2);
t.remove_collection(cid);
cerr << "Cleaning" << std::endl;
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
}
TEST_P(StoreTest, SimpleCloneRangeTest) {
int r;
coll_t cid;
auto ch = store->create_new_collection(cid);
{
ObjectStore::Transaction t;
t.create_collection(cid, 0);
cerr << "Creating collection " << cid << std::endl;
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
ghobject_t hoid(hobject_t(sobject_t("Object 1", CEPH_NOSNAP)));
hoid.hobj.pool = -1;
bufferlist small, newdata;
small.append("small");
{
ObjectStore::Transaction t;
t.write(cid, hoid, 10, 5, small);
cerr << "Creating object and write bl " << hoid << std::endl;
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
ghobject_t hoid2(hobject_t(sobject_t("Object 2", CEPH_NOSNAP)));
hoid2.hobj.pool = -1;
{
ObjectStore::Transaction t;
t.clone_range(cid, hoid, hoid2, 10, 5, 10);
cerr << "Clone range object" << std::endl;
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
r = store->read(ch, hoid2, 10, 5, newdata);
ASSERT_EQ(r, 5);
ASSERT_TRUE(bl_eq(small, newdata));
}
{
ObjectStore::Transaction t;
t.truncate(cid, hoid, 1024*1024);
t.clone_range(cid, hoid, hoid2, 0, 1024*1024, 0);
cerr << "Clone range object" << std::endl;
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
struct stat stat, stat2;
r = store->stat(ch, hoid, &stat);
r = store->stat(ch, hoid2, &stat2);
ASSERT_EQ(stat.st_size, stat2.st_size);
ASSERT_EQ(1024*1024, stat2.st_size);
}
{
ObjectStore::Transaction t;
t.remove(cid, hoid);
t.remove(cid, hoid2);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
}
#if defined(WITH_BLUESTORE)
TEST_P(StoreTest, BlueStoreUnshareBlobTest) {
if (string(GetParam()) != "bluestore")
return;
if (smr) {
cout << "SKIP: non-deterministic behavior with smr" << std::endl;
return;
}
int r;
coll_t cid;
auto ch = store->create_new_collection(cid);
{
ObjectStore::Transaction t;
t.create_collection(cid, 0);
cerr << "Creating collection " << cid << std::endl;
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
ghobject_t hoid(hobject_t(sobject_t("Object 1", CEPH_NOSNAP)));
hoid.hobj.pool = -1;
ghobject_t hoid2(hobject_t(sobject_t("Object 1", CEPH_NOSNAP)));
hoid2.hobj.pool = -1;
hoid2.generation = 2;
{
// check if blob is unshared properly
bufferlist data, newdata;
data.append(string(8192, 'a'));
ObjectStore::Transaction t;
t.write(cid, hoid, 0, data.length(), data);
cerr << "Creating object and write 8K " << hoid << std::endl;
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
ObjectStore::Transaction t2;
t2.clone_range(cid, hoid, hoid2, 0, 4096, 0);
cerr << "Clone range object" << std::endl;
r = queue_transaction(store, ch, std::move(t2));
ASSERT_EQ(r, 0);
data.clear();
data.append(string(4096, 'b'));
ObjectStore::Transaction t3;
t3.write(cid, hoid, 0, data.length(), data);
cerr << "Writing 4k to source object " << hoid << std::endl;
r = queue_transaction(store, ch, std::move(t3));
ASSERT_EQ(r, 0);
{
// this trims hoid one out of onode cache
EXPECT_EQ(store->umount(), 0);
EXPECT_EQ(store->mount(), 0);
ch = store->open_collection(cid);
}
ObjectStore::Transaction t4;
t4.remove(cid, hoid2);
cerr << "Deleting dest object" << hoid2 << std::endl;
r = queue_transaction(store, ch, std::move(t4));
ASSERT_EQ(r, 0);
{
// this ensures remove operation submitted to kv store
EXPECT_EQ(store->umount(), 0);
EXPECT_EQ(store->mount(), 0);
ch = store->open_collection(cid);
}
bufferlist resdata;
r = store->read(ch, hoid, 0, 0x2000, resdata);
ASSERT_EQ(r, 0x2000);
{
BlueStore* bstore = dynamic_cast<BlueStore*> (store.get());
auto* kv = bstore->get_kv();
// to be inline with BlueStore.cc
const string PREFIX_SHARED_BLOB = "X";
size_t cnt = 0;
auto it = kv->get_iterator(PREFIX_SHARED_BLOB);
ceph_assert(it);
for (it->lower_bound(string()); it->valid(); it->next()) {
++cnt;
}
ASSERT_EQ(cnt, 0);
}
}
{
ObjectStore::Transaction t;
t.remove(cid, hoid);
t.remove_collection(cid);
cerr << "Cleaning" << std::endl;
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
}
TEST_P(StoreTest, BlueStoreUnshareBlobBugTest) {
if (string(GetParam()) != "bluestore")
return;
int r;
coll_t cid;
auto ch = store->create_new_collection(cid);
{
ObjectStore::Transaction t;
t.create_collection(cid, 0);
cerr << "Creating collection " << cid << std::endl;
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
ghobject_t hoid(hobject_t(sobject_t("Object 1", CEPH_NOSNAP)));
hoid.hobj.pool = -1;
ghobject_t hoid2(hobject_t(sobject_t("Object 1", CEPH_NOSNAP)));
hoid2.hobj.pool = -1;
hoid2.generation = 2;
{
// check if blob is unshared properly
bufferlist data, newdata;
data.append(string(8192, 'a'));
ObjectStore::Transaction t;
t.write(cid, hoid, 0, data.length(), data);
cerr << "Creating object and write 8K " << hoid << std::endl;
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
ObjectStore::Transaction t2;
t2.clone_range(cid, hoid, hoid2, 0, 4096, 0);
cerr << "Clone range object" << std::endl;
r = queue_transaction(store, ch, std::move(t2));
ASSERT_EQ(r, 0);
data.clear();
data.append(string(4096, 'b'));
ObjectStore::Transaction t3;
t3.write(cid, hoid, 0, data.length(), data);
cerr << "Writing 4k to source object " << hoid << std::endl;
r = queue_transaction(store, ch, std::move(t3));
ASSERT_EQ(r, 0);
{
// this trims hoid one out of onode cache
EXPECT_EQ(store->umount(), 0);
EXPECT_EQ(store->mount(), 0);
ch = store->open_collection(cid);
}
ObjectStore::Transaction t4;
t4.write(cid, hoid2, 0, data.length(), data);
cerr << "Writing 4k to second object " << hoid2 << std::endl;
r = queue_transaction(store, ch, std::move(t4));
ASSERT_EQ(r, 0);
bufferlist resdata;
r = store->read(ch, hoid, 0, 0x2000, resdata);
ASSERT_EQ(r, 0x2000);
{
BlueStore* bstore = dynamic_cast<BlueStore*> (store.get());
auto* kv = bstore->get_kv();
// to be inline with BlueStore.cc
const string PREFIX_SHARED_BLOB = "X";
size_t cnt = 0;
auto it = kv->get_iterator(PREFIX_SHARED_BLOB);
ceph_assert(it);
for (it->lower_bound(string()); it->valid(); it->next()) {
++cnt;
}
// This shows a bug in unsharing a blob,
// after writing to 0x0~1000 to hoid2 share blob at hoid should be
//unshared but it doesn't in the current implementation
ASSERT_EQ(cnt, 1);
}
}
{
ObjectStore::Transaction t;
t.remove(cid, hoid);
t.remove(cid, hoid2);
t.remove_collection(cid);
cerr << "Cleaning" << std::endl;
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
}
#endif
TEST_P(StoreTest, SimpleObjectLongnameTest) {
int r;
coll_t cid;
auto ch = store->create_new_collection(cid);
{
ObjectStore::Transaction t;
t.create_collection(cid, 0);
cerr << "Creating collection " << cid << std::endl;
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
ghobject_t hoid(hobject_t(sobject_t("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaObjectaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa 1", CEPH_NOSNAP)));
{
ObjectStore::Transaction t;
t.touch(cid, hoid);
cerr << "Creating object " << hoid << std::endl;
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
{
ObjectStore::Transaction t;
t.remove(cid, hoid);
t.remove_collection(cid);
cerr << "Cleaning" << std::endl;
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
}
ghobject_t generate_long_name(unsigned i)
{
stringstream name;
name << "object id " << i << " ";
for (unsigned j = 0; j < 500; ++j) name << 'a';
ghobject_t hoid(hobject_t(sobject_t(name.str(), CEPH_NOSNAP)));
hoid.hobj.set_hash(i % 2);
return hoid;
}
TEST_P(StoreTest, LongnameSplitTest) {
int r;
coll_t cid;
auto ch = store->create_new_collection(cid);
{
ObjectStore::Transaction t;
t.create_collection(cid, 0);
cerr << "Creating collection " << cid << std::endl;
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(0, r);
}
for (unsigned i = 0; i < 320; ++i) {
ObjectStore::Transaction t;
ghobject_t hoid = generate_long_name(i);
t.touch(cid, hoid);
cerr << "Creating object " << hoid << std::endl;
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(0, r);
}
ghobject_t test_obj = generate_long_name(319);
ghobject_t test_obj_2 = test_obj;
test_obj_2.generation = 0;
{
ObjectStore::Transaction t;
// should cause a split
t.collection_move_rename(
cid, test_obj,
cid, test_obj_2);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(0, r);
}
for (unsigned i = 0; i < 319; ++i) {
ObjectStore::Transaction t;
ghobject_t hoid = generate_long_name(i);
t.remove(cid, hoid);
cerr << "Removing object " << hoid << std::endl;
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(0, r);
}
{
ObjectStore::Transaction t;
t.remove(cid, test_obj_2);
t.remove_collection(cid);
cerr << "Cleaning" << std::endl;
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(0, r);
}
}
TEST_P(StoreTest, ManyObjectTest) {
int NUM_OBJS = 2000;
int r = 0;
coll_t cid;
string base = "";
for (int i = 0; i < 100; ++i) base.append("aaaaa");
set<ghobject_t> created;
auto ch = store->create_new_collection(cid);
{
ObjectStore::Transaction t;
t.create_collection(cid, 0);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
for (int i = 0; i < NUM_OBJS; ++i) {
if (!(i % 5)) {
cerr << "Object " << i << std::endl;
}
ObjectStore::Transaction t;
char buf[100];
snprintf(buf, sizeof(buf), "%d", i);
ghobject_t hoid(hobject_t(sobject_t(string(buf) + base, CEPH_NOSNAP)));
t.touch(cid, hoid);
created.insert(hoid);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
for (set<ghobject_t>::iterator i = created.begin();
i != created.end();
++i) {
struct stat buf;
ASSERT_TRUE(!store->stat(ch, *i, &buf));
}
set<ghobject_t> listed, listed2;
vector<ghobject_t> objects;
r = collection_list(store, ch, ghobject_t(), ghobject_t::get_max(), INT_MAX,
&objects, 0);
ASSERT_EQ(r, 0);
cerr << "objects.size() is " << objects.size() << std::endl;
for (vector<ghobject_t> ::iterator i = objects.begin();
i != objects.end();
++i) {
listed.insert(*i);
ASSERT_TRUE(created.count(*i));
}
ASSERT_TRUE(listed.size() == created.size());
ghobject_t start, next;
objects.clear();
r = collection_list(
store,
ch,
ghobject_t::get_max(),
ghobject_t::get_max(),
50,
&objects,
&next
);
ASSERT_EQ(r, 0);
ASSERT_TRUE(objects.empty());
objects.clear();
listed.clear();
ghobject_t start2, next2;
while (1) {
r = collection_list(store, ch, start, ghobject_t::get_max(), 50, &objects,
&next);
ASSERT_TRUE(sorted(objects));
ASSERT_EQ(r, 0);
listed.insert(objects.begin(), objects.end());
if (objects.size() < 50) {
ASSERT_TRUE(next.is_max());
break;
}
objects.clear();
start = next;
}
cerr << "listed.size() is " << listed.size() << std::endl;
ASSERT_TRUE(listed.size() == created.size());
if (listed2.size()) {
ASSERT_EQ(listed.size(), listed2.size());
}
for (set<ghobject_t>::iterator i = listed.begin();
i != listed.end();
++i) {
ASSERT_TRUE(created.count(*i));
}
for (set<ghobject_t>::iterator i = created.begin();
i != created.end();
++i) {
ObjectStore::Transaction t;
t.remove(cid, *i);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
cerr << "cleaning up" << std::endl;
{
ObjectStore::Transaction t;
t.remove_collection(cid);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
}
class ObjectGenerator {
public:
virtual ghobject_t create_object(gen_type *gen) = 0;
virtual ~ObjectGenerator() {}
};
class MixedGenerator : public ObjectGenerator {
public:
unsigned seq;
int64_t poolid;
explicit MixedGenerator(int64_t p) : seq(0), poolid(p) {}
ghobject_t create_object(gen_type *gen) override {
char buf[100];
snprintf(buf, sizeof(buf), "OBJ_%u", seq);
string name(buf);
if (seq % 2) {
for (unsigned i = 0; i < 300; ++i) {
name.push_back('a');
}
}
++seq;
return ghobject_t(
hobject_t(
name, string(), rand() & 2 ? CEPH_NOSNAP : rand(),
(((seq / 1024) % 2) * 0xF00 ) +
(seq & 0xFF),
poolid, ""));
}
};
class SyntheticWorkloadState {
struct Object {
bufferlist data;
map<string, bufferlist> attrs;
};
public:
static const unsigned max_in_flight = 16;
static const unsigned max_objects = 3000;
static const unsigned max_attr_size = 5;
static const unsigned max_attr_name_len = 100;
static const unsigned max_attr_value_len = 1024 * 64;
coll_t cid;
unsigned write_alignment;
unsigned max_object_len, max_write_len;
unsigned in_flight;
map<ghobject_t, Object> contents;
set<ghobject_t> available_objects;
set<ghobject_t>::iterator next_available_object;
set<ghobject_t> in_flight_objects;
ObjectGenerator *object_gen;
gen_type *rng;
ObjectStore *store;
ObjectStore::CollectionHandle ch;
ceph::mutex lock = ceph::make_mutex("State lock");
ceph::condition_variable cond;
struct EnterExit {
const char *msg;
explicit EnterExit(const char *m) : msg(m) {
//cout << pthread_self() << " enter " << msg << std::endl;
}
~EnterExit() {
//cout << pthread_self() << " exit " << msg << std::endl;
}
};
class C_SyntheticOnReadable : public Context {
public:
SyntheticWorkloadState *state;
ghobject_t hoid;
C_SyntheticOnReadable(SyntheticWorkloadState *state, ghobject_t hoid)
: state(state), hoid(hoid) {}
void finish(int r) override {
std::lock_guard locker{state->lock};
EnterExit ee("onreadable finish");
ASSERT_TRUE(state->in_flight_objects.count(hoid));
ASSERT_EQ(r, 0);
state->in_flight_objects.erase(hoid);
if (state->contents.count(hoid))
state->available_objects.insert(hoid);
--(state->in_flight);
state->cond.notify_all();
bufferlist r2;
r = state->store->read(state->ch, hoid, 0, state->contents[hoid].data.length(), r2);
ceph_assert(bl_eq(state->contents[hoid].data, r2));
state->cond.notify_all();
}
};
class C_SyntheticOnStash : public Context {
public:
SyntheticWorkloadState *state;
ghobject_t oid, noid;
C_SyntheticOnStash(SyntheticWorkloadState *state,
ghobject_t oid, ghobject_t noid)
: state(state), oid(oid), noid(noid) {}
void finish(int r) override {
std::lock_guard locker{state->lock};
EnterExit ee("stash finish");
ASSERT_TRUE(state->in_flight_objects.count(oid));
ASSERT_EQ(r, 0);
state->in_flight_objects.erase(oid);
if (state->contents.count(noid))
state->available_objects.insert(noid);
--(state->in_flight);
bufferlist r2;
r = state->store->read(
state->ch, noid, 0,
state->contents[noid].data.length(), r2);
ceph_assert(bl_eq(state->contents[noid].data, r2));
state->cond.notify_all();
}
};
class C_SyntheticOnClone : public Context {
public:
SyntheticWorkloadState *state;
ghobject_t oid, noid;
C_SyntheticOnClone(SyntheticWorkloadState *state,
ghobject_t oid, ghobject_t noid)
: state(state), oid(oid), noid(noid) {}
void finish(int r) override {
std::lock_guard locker{state->lock};
EnterExit ee("clone finish");
ASSERT_TRUE(state->in_flight_objects.count(oid));
ASSERT_EQ(r, 0);
state->in_flight_objects.erase(oid);
if (state->contents.count(oid))
state->available_objects.insert(oid);
if (state->contents.count(noid))
state->available_objects.insert(noid);
--(state->in_flight);
bufferlist r2;
r = state->store->read(state->ch, noid, 0, state->contents[noid].data.length(), r2);
ceph_assert(bl_eq(state->contents[noid].data, r2));
state->cond.notify_all();
}
};
static void filled_byte_array(bufferlist& bl, size_t size)
{
static const char alphanum[] = "0123456789"
"ABCDEFGHIJKLMNOPQRSTUVWXYZ"
"abcdefghijklmnopqrstuvwxyz";
if (!size) {
return;
}
bufferptr bp(size);
for (unsigned int i = 0; i < size - 1; i++) {
// severely limit entropy so we can compress...
bp[i] = alphanum[rand() % 10]; //(sizeof(alphanum) - 1)];
}
bp[size - 1] = '\0';
bl.append(bp);
}
SyntheticWorkloadState(ObjectStore *store,
ObjectGenerator *gen,
gen_type *rng,
coll_t cid,
unsigned max_size,
unsigned max_write,
unsigned alignment)
: cid(cid), write_alignment(alignment), max_object_len(max_size),
max_write_len(max_write), in_flight(0),
next_available_object(available_objects.end()),
object_gen(gen), rng(rng), store(store) {}
int init() {
ObjectStore::Transaction t;
ch = store->create_new_collection(cid);
t.create_collection(cid, 0);
return queue_transaction(store, ch, std::move(t));
}
void shutdown() {
ghobject_t next;
while (1) {
vector<ghobject_t> objects;
int r = collection_list(store, ch, next, ghobject_t::get_max(), 10,
&objects, &next);
ceph_assert(r >= 0);
if (objects.size() == 0)
break;
ObjectStore::Transaction t;
std::map<std::string, ceph::buffer::list> attrset;
for (vector<ghobject_t>::iterator p = objects.begin();
p != objects.end(); ++p) {
t.remove(cid, *p);
}
queue_transaction(store, ch, std::move(t));
}
ObjectStore::Transaction t;
t.remove_collection(cid);
queue_transaction(store, ch, std::move(t));
}
void statfs(store_statfs_t& stat) {
store->statfs(&stat);
}
ghobject_t get_uniform_random_object(std::unique_lock<ceph::mutex>& locker) {
cond.wait(locker, [this] {
return in_flight < max_in_flight && !available_objects.empty();
});
boost::uniform_int<> choose(0, available_objects.size() - 1);
int index = choose(*rng);
set<ghobject_t>::iterator i = available_objects.begin();
for ( ; index > 0; --index, ++i) ;
ghobject_t ret = *i;
return ret;
}
ghobject_t get_next_object(std::unique_lock<ceph::mutex>& locker) {
cond.wait(locker, [this] {
return in_flight < max_in_flight && !available_objects.empty();
});
if (next_available_object == available_objects.end()) {
next_available_object = available_objects.begin();
}
ghobject_t ret = *next_available_object;
++next_available_object;
return ret;
}
void wait_for_ready(std::unique_lock<ceph::mutex>& locker) {
cond.wait(locker, [this] { return in_flight < max_in_flight; });
}
void wait_for_done() {
std::unique_lock locker{lock};
cond.wait(locker, [this] { return in_flight == 0; });
}
bool can_create() {
return (available_objects.size() + in_flight_objects.size()) < max_objects;
}
bool can_unlink() {
return (available_objects.size() + in_flight_objects.size()) > 0;
}
unsigned get_random_alloc_hints() {
unsigned f = 0;
{
boost::uniform_int<> u(0, 3);
switch (u(*rng)) {
case 1:
f |= CEPH_OSD_ALLOC_HINT_FLAG_SEQUENTIAL_WRITE;
break;
case 2:
f |= CEPH_OSD_ALLOC_HINT_FLAG_RANDOM_WRITE;
break;
}
}
{
boost::uniform_int<> u(0, 3);
switch (u(*rng)) {
case 1:
f |= CEPH_OSD_ALLOC_HINT_FLAG_SEQUENTIAL_READ;
break;
case 2:
f |= CEPH_OSD_ALLOC_HINT_FLAG_RANDOM_READ;
break;
}
}
{
// append_only, immutable
boost::uniform_int<> u(0, 4);
f |= u(*rng) << 4;
}
{
boost::uniform_int<> u(0, 3);
switch (u(*rng)) {
case 1:
f |= CEPH_OSD_ALLOC_HINT_FLAG_SHORTLIVED;
break;
case 2:
f |= CEPH_OSD_ALLOC_HINT_FLAG_LONGLIVED;
break;
}
}
{
boost::uniform_int<> u(0, 3);
switch (u(*rng)) {
case 1:
f |= CEPH_OSD_ALLOC_HINT_FLAG_COMPRESSIBLE;
break;
case 2:
f |= CEPH_OSD_ALLOC_HINT_FLAG_INCOMPRESSIBLE;
break;
}
}
return f;
}
int touch() {
std::unique_lock locker{lock};
EnterExit ee("touch");
if (!can_create())
return -ENOSPC;
wait_for_ready(locker);
ghobject_t new_obj = object_gen->create_object(rng);
available_objects.erase(new_obj);
ObjectStore::Transaction t;
t.touch(cid, new_obj);
boost::uniform_int<> u(17, 22);
boost::uniform_int<> v(12, 17);
t.set_alloc_hint(cid, new_obj,
1ull << u(*rng),
1ull << v(*rng),
get_random_alloc_hints());
++in_flight;
in_flight_objects.insert(new_obj);
if (!contents.count(new_obj))
contents[new_obj] = Object();
t.register_on_applied(new C_SyntheticOnReadable(this, new_obj));
int status = store->queue_transaction(ch, std::move(t));
return status;
}
int stash() {
std::unique_lock locker{lock};
EnterExit ee("stash");
if (!can_unlink())
return -ENOENT;
if (!can_create())
return -ENOSPC;
wait_for_ready(locker);
ghobject_t old_obj;
int max = 20;
do {
old_obj = get_uniform_random_object(locker);
} while (--max && !contents[old_obj].data.length());
available_objects.erase(old_obj);
ghobject_t new_obj = old_obj;
new_obj.generation++;
available_objects.erase(new_obj);
ObjectStore::Transaction t;
t.collection_move_rename(cid, old_obj, cid, new_obj);
++in_flight;
in_flight_objects.insert(old_obj);
contents[new_obj].attrs = contents[old_obj].attrs;
contents[new_obj].data = contents[old_obj].data;
contents.erase(old_obj);
t.register_on_applied(new C_SyntheticOnStash(this, old_obj, new_obj));
int status = store->queue_transaction(ch, std::move(t));
return status;
}
int clone() {
std::unique_lock locker{lock};
EnterExit ee("clone");
if (!can_unlink())
return -ENOENT;
if (!can_create())
return -ENOSPC;
wait_for_ready(locker);
ghobject_t old_obj;
int max = 20;
do {
old_obj = get_uniform_random_object(locker);
} while (--max && !contents[old_obj].data.length());
available_objects.erase(old_obj);
ghobject_t new_obj = object_gen->create_object(rng);
// make the hash match
new_obj.hobj.set_hash(old_obj.hobj.get_hash());
available_objects.erase(new_obj);
ObjectStore::Transaction t;
t.clone(cid, old_obj, new_obj);
++in_flight;
in_flight_objects.insert(old_obj);
contents[new_obj].attrs = contents[old_obj].attrs;
contents[new_obj].data = contents[old_obj].data;
t.register_on_applied(new C_SyntheticOnClone(this, old_obj, new_obj));
int status = store->queue_transaction(ch, std::move(t));
return status;
}
int clone_range() {
std::unique_lock locker{lock};
EnterExit ee("clone_range");
if (!can_unlink())
return -ENOENT;
if (!can_create())
return -ENOSPC;
wait_for_ready(locker);
ghobject_t old_obj;
int max = 20;
do {
old_obj = get_uniform_random_object(locker);
} while (--max && !contents[old_obj].data.length());
bufferlist &srcdata = contents[old_obj].data;
if (srcdata.length() == 0) {
return 0;
}
available_objects.erase(old_obj);
ghobject_t new_obj = get_uniform_random_object(locker);
available_objects.erase(new_obj);
boost::uniform_int<> u1(0, max_object_len - max_write_len);
boost::uniform_int<> u2(0, max_write_len);
uint64_t srcoff = u1(*rng);
// make src and dst offsets match, since that's what the osd does
uint64_t dstoff = srcoff; //u1(*rng);
uint64_t len = u2(*rng);
if (write_alignment) {
srcoff = round_up_to(srcoff, write_alignment);
dstoff = round_up_to(dstoff, write_alignment);
len = round_up_to(len, write_alignment);
}
if (srcoff > srcdata.length() - 1) {
srcoff = srcdata.length() - 1;
}
if (srcoff + len > srcdata.length()) {
len = srcdata.length() - srcoff;
}
if (0)
cout << __func__ << " from " << srcoff << "~" << len
<< " (size " << srcdata.length() << ") to "
<< dstoff << "~" << len << std::endl;
ObjectStore::Transaction t;
t.clone_range(cid, old_obj, new_obj, srcoff, len, dstoff);
++in_flight;
in_flight_objects.insert(old_obj);
bufferlist bl;
if (srcoff < srcdata.length()) {
if (srcoff + len > srcdata.length()) {
bl.substr_of(srcdata, srcoff, srcdata.length() - srcoff);
} else {
bl.substr_of(srcdata, srcoff, len);
}
}
bufferlist& dstdata = contents[new_obj].data;
if (dstdata.length() <= dstoff) {
if (bl.length() > 0) {
dstdata.append_zero(dstoff - dstdata.length());
dstdata.append(bl);
}
} else {
bufferlist value;
ceph_assert(dstdata.length() > dstoff);
dstdata.cbegin().copy(dstoff, value);
value.append(bl);
if (value.length() < dstdata.length())
dstdata.cbegin(value.length()).copy(
dstdata.length() - value.length(), value);
value.swap(dstdata);
}
t.register_on_applied(new C_SyntheticOnClone(this, old_obj, new_obj));
int status = store->queue_transaction(ch, std::move(t));
return status;
}
int write() {
std::unique_lock locker{lock};
EnterExit ee("write");
if (!can_unlink())
return -ENOENT;
wait_for_ready(locker);
ghobject_t new_obj = get_uniform_random_object(locker);
available_objects.erase(new_obj);
ObjectStore::Transaction t;
boost::uniform_int<> u1(0, max_object_len - max_write_len);
boost::uniform_int<> u2(0, max_write_len);
uint64_t offset = u1(*rng);
uint64_t len = u2(*rng);
bufferlist bl;
if (write_alignment) {
offset = round_up_to(offset, write_alignment);
len = round_up_to(len, write_alignment);
}
filled_byte_array(bl, len);
bufferlist& data = contents[new_obj].data;
if (data.length() <= offset) {
if (len > 0) {
data.append_zero(offset-data.length());
data.append(bl);
}
} else {
bufferlist value;
ceph_assert(data.length() > offset);
data.cbegin().copy(offset, value);
value.append(bl);
if (value.length() < data.length())
data.cbegin(value.length()).copy(
data.length()-value.length(), value);
value.swap(data);
}
t.write(cid, new_obj, offset, len, bl);
++in_flight;
in_flight_objects.insert(new_obj);
t.register_on_applied(new C_SyntheticOnReadable(this, new_obj));
int status = store->queue_transaction(ch, std::move(t));
return status;
}
int truncate() {
std::unique_lock locker{lock};
EnterExit ee("truncate");
if (!can_unlink())
return -ENOENT;
wait_for_ready(locker);
ghobject_t obj = get_uniform_random_object(locker);
available_objects.erase(obj);
ObjectStore::Transaction t;
boost::uniform_int<> choose(0, max_object_len);
size_t len = choose(*rng);
if (write_alignment) {
len = round_up_to(len, write_alignment);
}
t.truncate(cid, obj, len);
++in_flight;
in_flight_objects.insert(obj);
bufferlist& data = contents[obj].data;
if (data.length() <= len) {
data.append_zero(len - data.length());
} else {
bufferlist bl;
data.cbegin().copy(len, bl);
bl.swap(data);
}
t.register_on_applied(new C_SyntheticOnReadable(this, obj));
int status = store->queue_transaction(ch, std::move(t));
return status;
}
int zero() {
std::unique_lock locker{lock};
EnterExit ee("zero");
if (!can_unlink())
return -ENOENT;
wait_for_ready(locker);
ghobject_t new_obj = get_uniform_random_object(locker);
available_objects.erase(new_obj);
ObjectStore::Transaction t;
boost::uniform_int<> u1(0, max_object_len - max_write_len);
boost::uniform_int<> u2(0, max_write_len);
uint64_t offset = u1(*rng);
uint64_t len = u2(*rng);
if (write_alignment) {
offset = round_up_to(offset, write_alignment);
len = round_up_to(len, write_alignment);
}
if (len > 0) {
auto& data = contents[new_obj].data;
if (data.length() < offset + len) {
data.append_zero(offset+len-data.length());
}
bufferlist n;
n.substr_of(data, 0, offset);
n.append_zero(len);
if (data.length() > offset + len)
data.cbegin(offset + len).copy(data.length() - offset - len, n);
data.swap(n);
}
t.zero(cid, new_obj, offset, len);
++in_flight;
in_flight_objects.insert(new_obj);
t.register_on_applied(new C_SyntheticOnReadable(this, new_obj));
int status = store->queue_transaction(ch, std::move(t));
return status;
}
void read() {
EnterExit ee("read");
boost::uniform_int<> u1(0, max_object_len/2);
boost::uniform_int<> u2(0, max_object_len);
uint64_t offset = u1(*rng);
uint64_t len = u2(*rng);
if (offset > len)
swap(offset, len);
ghobject_t obj;
bufferlist expected;
int r;
{
std::unique_lock locker{lock};
EnterExit ee("read locked");
if (!can_unlink())
return ;
wait_for_ready(locker);
obj = get_uniform_random_object(locker);
expected = contents[obj].data;
}
bufferlist bl, result;
if (0) cout << " obj " << obj
<< " size " << expected.length()
<< " offset " << offset
<< " len " << len << std::endl;
r = store->read(ch, obj, offset, len, result);
if (offset >= expected.length()) {
ASSERT_EQ(r, 0);
} else {
size_t max_len = expected.length() - offset;
if (len > max_len)
len = max_len;
ceph_assert(len == result.length());
ASSERT_EQ(len, result.length());
expected.cbegin(offset).copy(len, bl);
ASSERT_EQ(r, (int)len);
ASSERT_TRUE(bl_eq(bl, result));
}
}
int setattrs() {
std::unique_lock locker{lock};
EnterExit ee("setattrs");
if (!can_unlink())
return -ENOENT;
wait_for_ready(locker);
ghobject_t obj = get_uniform_random_object(locker);
available_objects.erase(obj);
ObjectStore::Transaction t;
boost::uniform_int<> u0(1, max_attr_size);
boost::uniform_int<> u1(4, max_attr_name_len);
boost::uniform_int<> u2(4, max_attr_value_len);
boost::uniform_int<> u3(0, 100);
uint64_t size = u0(*rng);
uint64_t name_len;
map<string, bufferlist, less<>> attrs;
set<string> keys;
for (map<string, bufferlist>::iterator it = contents[obj].attrs.begin();
it != contents[obj].attrs.end(); ++it)
keys.insert(it->first);
while (size--) {
bufferlist name, value;
uint64_t get_exist = u3(*rng);
uint64_t value_len = u2(*rng);
filled_byte_array(value, value_len);
if (get_exist < 50 && keys.size()) {
set<string>::iterator k = keys.begin();
attrs[*k] = value;
contents[obj].attrs[*k] = value;
keys.erase(k);
} else {
name_len = u1(*rng);
filled_byte_array(name, name_len);
attrs[name.c_str()] = value;
contents[obj].attrs[name.c_str()] = value;
}
}
t.setattrs(cid, obj, attrs);
++in_flight;
in_flight_objects.insert(obj);
t.register_on_applied(new C_SyntheticOnReadable(this, obj));
int status = store->queue_transaction(ch, std::move(t));
return status;
}
int set_fixed_attrs(size_t entries, size_t key_size, size_t val_size) {
std::unique_lock locker{ lock };
EnterExit ee("setattrs");
if (!can_unlink())
return -ENOENT;
wait_for_ready(locker);
ghobject_t obj = get_next_object(locker);
available_objects.erase(obj);
ObjectStore::Transaction t;
map<string, bufferlist, less<>> attrs;
set<string> keys;
while (entries--) {
bufferlist name, value;
filled_byte_array(value, val_size);
filled_byte_array(name, key_size);
attrs[name.c_str()] = value;
contents[obj].attrs[name.c_str()] = value;
}
t.setattrs(cid, obj, attrs);
++in_flight;
in_flight_objects.insert(obj);
t.register_on_applied(new C_SyntheticOnReadable(this, obj));
int status = store->queue_transaction(ch, std::move(t));
return status;
}
void getattrs() {
EnterExit ee("getattrs");
ghobject_t obj;
map<string, bufferlist> expected;
{
std::unique_lock locker{lock};
EnterExit ee("getattrs locked");
if (!can_unlink())
return ;
wait_for_ready(locker);
int retry = 10;
do {
obj = get_uniform_random_object(locker);
if (!--retry)
return ;
} while (contents[obj].attrs.empty());
expected = contents[obj].attrs;
}
map<string, bufferlist, less<>> attrs;
int r = store->getattrs(ch, obj, attrs);
ASSERT_TRUE(r == 0);
ASSERT_TRUE(attrs.size() == expected.size());
for (map<string, bufferlist>::iterator it = expected.begin();
it != expected.end(); ++it) {
ASSERT_TRUE(bl_eq(attrs[it->first], it->second));
}
}
void getattr() {
EnterExit ee("getattr");
ghobject_t obj;
int r;
int retry;
map<string, bufferlist> expected;
{
std::unique_lock locker{lock};
EnterExit ee("getattr locked");
if (!can_unlink())
return ;
wait_for_ready(locker);
retry = 10;
do {
obj = get_uniform_random_object(locker);
if (!--retry)
return ;
} while (contents[obj].attrs.empty());
expected = contents[obj].attrs;
}
boost::uniform_int<> u(0, expected.size()-1);
retry = u(*rng);
map<string, bufferlist>::iterator it = expected.begin();
while (retry) {
retry--;
++it;
}
bufferlist bl;
r = store->getattr(ch, obj, it->first, bl);
ASSERT_EQ(r, 0);
ASSERT_TRUE(bl_eq(it->second, bl));
}
int rmattr() {
std::unique_lock locker{lock};
EnterExit ee("rmattr");
if (!can_unlink())
return -ENOENT;
wait_for_ready(locker);
ghobject_t obj;
int retry = 10;
do {
obj = get_uniform_random_object(locker);
if (!--retry)
return 0;
} while (contents[obj].attrs.empty());
boost::uniform_int<> u(0, contents[obj].attrs.size()-1);
retry = u(*rng);
map<string, bufferlist>::iterator it = contents[obj].attrs.begin();
while (retry) {
retry--;
++it;
}
available_objects.erase(obj);
ObjectStore::Transaction t;
t.rmattr(cid, obj, it->first);
contents[obj].attrs.erase(it->first);
++in_flight;
in_flight_objects.insert(obj);
t.register_on_applied(new C_SyntheticOnReadable(this, obj));
int status = store->queue_transaction(ch, std::move(t));
return status;
}
void fsck(bool deep) {
std::unique_lock locker{lock};
EnterExit ee("fsck");
cond.wait(locker, [this] { return in_flight == 0; });
ch.reset();
store->umount();
int r = store->fsck(deep);
ceph_assert(r == 0 || r == -EOPNOTSUPP);
store->mount();
ch = store->open_collection(cid);
}
void scan() {
std::unique_lock locker{lock};
EnterExit ee("scan");
cond.wait(locker, [this] { return in_flight == 0; });
vector<ghobject_t> objects;
set<ghobject_t> objects_set, objects_set2;
ghobject_t next, current;
while (1) {
//cerr << "scanning..." << std::endl;
int r = collection_list(store, ch, current, ghobject_t::get_max(), 100,
&objects, &next);
ASSERT_EQ(r, 0);
ASSERT_TRUE(sorted(objects));
objects_set.insert(objects.begin(), objects.end());
objects.clear();
if (next.is_max()) break;
current = next;
}
if (objects_set.size() != available_objects.size()) {
for (set<ghobject_t>::iterator p = objects_set.begin();
p != objects_set.end();
++p)
if (available_objects.count(*p) == 0) {
cerr << "+ " << *p << std::endl;
ceph_abort();
}
for (set<ghobject_t>::iterator p = available_objects.begin();
p != available_objects.end();
++p)
if (objects_set.count(*p) == 0)
cerr << "- " << *p << std::endl;
//cerr << " objects_set: " << objects_set << std::endl;
//cerr << " available_set: " << available_objects << std::endl;
ceph_abort_msg("badness");
}
ASSERT_EQ(objects_set.size(), available_objects.size());
for (set<ghobject_t>::iterator i = objects_set.begin();
i != objects_set.end();
++i) {
ASSERT_GT(available_objects.count(*i), (unsigned)0);
}
int r = collection_list(store, ch, ghobject_t(), ghobject_t::get_max(),
INT_MAX, &objects, 0);
ASSERT_EQ(r, 0);
objects_set2.insert(objects.begin(), objects.end());
ASSERT_EQ(objects_set2.size(), available_objects.size());
for (set<ghobject_t>::iterator i = objects_set2.begin();
i != objects_set2.end();
++i) {
ASSERT_GT(available_objects.count(*i), (unsigned)0);
if (available_objects.count(*i) == 0) {
cerr << "+ " << *i << std::endl;
}
}
}
void stat() {
EnterExit ee("stat");
ghobject_t hoid;
uint64_t expected;
{
std::unique_lock locker{lock};
EnterExit ee("stat lock1");
if (!can_unlink())
return ;
hoid = get_uniform_random_object(locker);
in_flight_objects.insert(hoid);
available_objects.erase(hoid);
++in_flight;
expected = contents[hoid].data.length();
}
struct stat buf;
int r = store->stat(ch, hoid, &buf);
ASSERT_EQ(0, r);
ceph_assert((uint64_t)buf.st_size == expected);
ASSERT_TRUE((uint64_t)buf.st_size == expected);
{
std::lock_guard locker{lock};
EnterExit ee("stat lock2");
--in_flight;
cond.notify_all();
in_flight_objects.erase(hoid);
available_objects.insert(hoid);
}
}
int unlink() {
std::unique_lock locker{lock};
EnterExit ee("unlink");
if (!can_unlink())
return -ENOENT;
ghobject_t to_remove = get_uniform_random_object(locker);
ObjectStore::Transaction t;
t.remove(cid, to_remove);
++in_flight;
available_objects.erase(to_remove);
in_flight_objects.insert(to_remove);
contents.erase(to_remove);
t.register_on_applied(new C_SyntheticOnReadable(this, to_remove));
int status = store->queue_transaction(ch, std::move(t));
return status;
}
void print_internal_state() {
std::lock_guard locker{lock};
cerr << "available_objects: " << available_objects.size()
<< " in_flight_objects: " << in_flight_objects.size()
<< " total objects: " << in_flight_objects.size() + available_objects.size()
<< " in_flight " << in_flight << std::endl;
}
};
void StoreTest::doSyntheticTest(
int num_ops,
uint64_t max_obj, uint64_t max_wr, uint64_t align)
{
MixedGenerator gen(555);
gen_type rng(time(NULL));
coll_t cid(spg_t(pg_t(0,555), shard_id_t::NO_SHARD));
SetVal(g_conf(), "bluestore_fsck_on_mount", "false");
SetVal(g_conf(), "bluestore_fsck_on_umount", "false");
g_ceph_context->_conf.apply_changes(nullptr);
SyntheticWorkloadState test_obj(store.get(), &gen, &rng, cid,
max_obj, max_wr, align);
test_obj.init();
for (int i = 0; i < num_ops/10; ++i) {
if (!(i % 500)) cerr << "seeding object " << i << std::endl;
test_obj.touch();
}
for (int i = 0; i < num_ops; ++i) {
if (!(i % 1000)) {
cerr << "Op " << i << std::endl;
test_obj.print_internal_state();
}
boost::uniform_int<> true_false(0, 999);
int val = true_false(rng);
if (val > 998) {
test_obj.fsck(true);
} else if (val > 997) {
test_obj.fsck(false);
} else if (val > 970) {
test_obj.scan();
} else if (val > 950) {
test_obj.stat();
} else if (val > 850) {
test_obj.zero();
} else if (val > 800) {
test_obj.unlink();
} else if (val > 550) {
test_obj.write();
} else if (val > 500) {
test_obj.clone();
} else if (val > 450) {
test_obj.clone_range();
} else if (val > 300) {
test_obj.stash();
} else if (val > 100) {
test_obj.read();
} else {
test_obj.truncate();
}
}
test_obj.wait_for_done();
test_obj.shutdown();
}
TEST_P(StoreTest, Synthetic) {
doSyntheticTest(10000, 400*1024, 40*1024, 0);
}
#if defined(WITH_BLUESTORE)
TEST_P(StoreTestSpecificAUSize, SyntheticMatrixSharding) {
if (string(GetParam()) != "bluestore")
return;
const char *m[][10] = {
{ "bluestore_min_alloc_size", "4096", 0 }, // must be the first!
{ "num_ops", "50000", 0 },
{ "max_write", "65536", 0 },
{ "max_size", "262144", 0 },
{ "alignment", "4096", 0 },
{ "bluestore_max_blob_size", "65536", 0 },
{ "bluestore_extent_map_shard_min_size", "60", 0 },
{ "bluestore_extent_map_shard_max_size", "300", 0 },
{ "bluestore_extent_map_shard_target_size", "150", 0 },
{ "bluestore_default_buffered_read", "true", 0 },
{ "bluestore_default_buffered_write", "true", 0 },
{ 0 },
};
do_matrix(m, std::bind(&StoreTest::doSyntheticTest, this, _1, _2, _3, _4));
}
TEST_P(StoreTestSpecificAUSize, ZipperPatternSharded) {
if(string(GetParam()) != "bluestore")
return;
StartDeferred(4096);
int r;
coll_t cid;
ghobject_t a(hobject_t(sobject_t("Object 1", CEPH_NOSNAP)));
auto ch = store->create_new_collection(cid);
{
ObjectStore::Transaction t;
t.create_collection(cid, 0);
cerr << "Creating collection " << cid << std::endl;
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
bufferlist bl;
int len = 4096;
bufferptr bp(len);
bp.zero();
bl.append(bp);
for (int i=0; i<1000; ++i) {
ObjectStore::Transaction t;
t.write(cid, a, i*2*len, len, bl, 0);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
for (int i=0; i<1000; ++i) {
ObjectStore::Transaction t;
t.write(cid, a, i*2*len + 1, len, bl, 0);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
{
ObjectStore::Transaction t;
t.remove(cid, a);
t.remove_collection(cid);
cerr << "Cleaning" << std::endl;
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
}
TEST_P(StoreTestSpecificAUSize, SyntheticMatrixCsumAlgorithm) {
if (string(GetParam()) != "bluestore")
return;
const char *m[][10] = {
{ "bluestore_min_alloc_size", "65536", 0 }, // must be the first!
{ "max_write", "65536", 0 },
{ "max_size", "1048576", 0 },
{ "alignment", "16", 0 },
{ "bluestore_csum_type", "crc32c", "crc32c_16", "crc32c_8", "xxhash32",
"xxhash64", "none", 0 },
{ "bluestore_default_buffered_write", "false", 0 },
{ 0 },
};
do_matrix(m, std::bind(&StoreTest::doSyntheticTest, this, _1, _2, _3, _4));
}
TEST_P(StoreTestSpecificAUSize, SyntheticMatrixCsumVsCompression) {
if (string(GetParam()) != "bluestore")
return;
const char *m[][10] = {
{ "bluestore_min_alloc_size", "4096", "16384", 0 }, //to be the first!
{ "max_write", "131072", 0 },
{ "max_size", "262144", 0 },
{ "alignment", "512", 0 },
{ "bluestore_compression_mode", "force", 0},
{ "bluestore_compression_algorithm", "snappy", "zlib", 0 },
{ "bluestore_csum_type", "crc32c", 0 },
{ "bluestore_default_buffered_read", "true", "false", 0 },
{ "bluestore_default_buffered_write", "true", "false", 0 },
{ "bluestore_sync_submit_transaction", "false", 0 },
{ 0 },
};
do_matrix(m, std::bind(&StoreTest::doSyntheticTest, this, _1, _2, _3, _4));
}
TEST_P(StoreTestSpecificAUSize, SyntheticMatrixCompression) {
if (string(GetParam()) != "bluestore")
return;
const char *m[][10] = {
{ "bluestore_min_alloc_size", "4096", "65536", 0 }, // to be the first!
{ "max_write", "1048576", 0 },
{ "max_size", "4194304", 0 },
{ "alignment", "65536", 0 },
{ "bluestore_compression_mode", "force", "aggressive", "passive", "none", 0},
{ "bluestore_default_buffered_write", "false", 0 },
{ "bluestore_sync_submit_transaction", "true", 0 },
{ 0 },
};
do_matrix(m, std::bind(&StoreTest::doSyntheticTest, this, _1, _2, _3, _4));
}
TEST_P(StoreTestSpecificAUSize, SyntheticMatrixCompressionAlgorithm) {
if (string(GetParam()) != "bluestore")
return;
const char *m[][10] = {
{ "bluestore_min_alloc_size", "4096", "65536", 0 }, // to be the first!
{ "max_write", "1048576", 0 },
{ "max_size", "4194304", 0 },
{ "alignment", "65536", 0 },
{ "bluestore_compression_algorithm", "zlib", "snappy", 0 },
{ "bluestore_compression_mode", "force", 0 },
{ "bluestore_default_buffered_write", "false", 0 },
{ 0 },
};
do_matrix(m, std::bind(&StoreTest::doSyntheticTest, this, _1, _2, _3, _4));
}
TEST_P(StoreTestSpecificAUSize, SyntheticMatrixNoCsum) {
if (string(GetParam()) != "bluestore")
return;
const char *m[][10] = {
{ "bluestore_min_alloc_size", "4096", "65536", 0 }, // to be the first!
{ "max_write", "65536", 0 },
{ "max_size", "1048576", 0 },
{ "alignment", "512", 0 },
{ "bluestore_max_blob_size", "262144", 0 },
{ "bluestore_compression_mode", "force", "none", 0},
{ "bluestore_csum_type", "none", 0},
{ "bluestore_default_buffered_read", "true", "false", 0 },
{ "bluestore_default_buffered_write", "true", 0 },
{ "bluestore_sync_submit_transaction", "true", "false", 0 },
{ 0 },
};
do_matrix(m, std::bind(&StoreTest::doSyntheticTest, this, _1, _2, _3, _4));
}
TEST_P(StoreTestSpecificAUSize, SyntheticMatrixPreferDeferred) {
if (string(GetParam()) != "bluestore")
return;
const char *m[][10] = {
{ "bluestore_min_alloc_size", "4096", "65536", 0 }, // to be the first!
{ "max_write", "65536", 0 },
{ "max_size", "1048576", 0 },
{ "alignment", "512", 0 },
{ "bluestore_max_blob_size", "262144", 0 },
{ "bluestore_compression_mode", "force", "none", 0},
{ "bluestore_prefer_deferred_size", "32768", "0", 0},
{ 0 },
};
do_matrix(m, std::bind(&StoreTest::doSyntheticTest, this, _1, _2, _3, _4));
}
#endif // WITH_BLUESTORE
TEST_P(StoreTest, AttrSynthetic) {
MixedGenerator gen(447);
gen_type rng(time(NULL));
coll_t cid(spg_t(pg_t(0,447),shard_id_t::NO_SHARD));
SyntheticWorkloadState test_obj(store.get(), &gen, &rng, cid, 40*1024, 4*1024, 0);
test_obj.init();
for (int i = 0; i < 500; ++i) {
if (!(i % 10)) cerr << "seeding object " << i << std::endl;
test_obj.touch();
}
for (int i = 0; i < 1000; ++i) {
if (!(i % 100)) {
cerr << "Op " << i << std::endl;
test_obj.print_internal_state();
}
boost::uniform_int<> true_false(0, 99);
int val = true_false(rng);
if (val > 97) {
test_obj.scan();
} else if (val > 93) {
test_obj.stat();
} else if (val > 75) {
test_obj.rmattr();
} else if (val > 47) {
test_obj.setattrs();
} else if (val > 45) {
test_obj.clone();
} else if (val > 37) {
test_obj.stash();
} else if (val > 30) {
test_obj.getattrs();
} else {
test_obj.getattr();
}
}
test_obj.wait_for_done();
test_obj.shutdown();
}
TEST_P(StoreTest, HashCollisionTest) {
int64_t poolid = 11;
coll_t cid(spg_t(pg_t(0,poolid),shard_id_t::NO_SHARD));
int r;
auto ch = store->create_new_collection(cid);
{
ObjectStore::Transaction t;
t.create_collection(cid, 0);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
string base = "";
for (int i = 0; i < 100; ++i) base.append("aaaaa");
set<ghobject_t> created;
for (int n = 0; n < 10; ++n) {
char nbuf[100];
sprintf(nbuf, "n%d", n);
for (int i = 0; i < 1000; ++i) {
char buf[100];
sprintf(buf, "%d", i);
if (!(i % 100)) {
cerr << "Object n" << n << " "<< i << std::endl;
}
ghobject_t hoid(hobject_t(string(buf) + base, string(), CEPH_NOSNAP, 0, poolid, string(nbuf)));
{
ObjectStore::Transaction t;
t.touch(cid, hoid);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
created.insert(hoid);
}
}
vector<ghobject_t> objects;
r = collection_list(store, ch, ghobject_t(), ghobject_t::get_max(), INT_MAX,
&objects, 0);
ASSERT_EQ(r, 0);
set<ghobject_t> listed(objects.begin(), objects.end());
cerr << "listed.size() is " << listed.size() << " and created.size() is " << created.size() << std::endl;
ASSERT_TRUE(listed.size() == created.size());
objects.clear();
listed.clear();
ghobject_t current, next;
while (1) {
r = collection_list(store, ch, current, ghobject_t::get_max(), 60, &objects,
&next);
ASSERT_EQ(r, 0);
ASSERT_TRUE(sorted(objects));
for (vector<ghobject_t>::iterator i = objects.begin();
i != objects.end();
++i) {
if (listed.count(*i))
cerr << *i << " repeated" << std::endl;
listed.insert(*i);
}
if (objects.size() < 50) {
ASSERT_TRUE(next.is_max());
break;
}
objects.clear();
current = next;
}
cerr << "listed.size() is " << listed.size() << std::endl;
ASSERT_TRUE(listed.size() == created.size());
for (set<ghobject_t>::iterator i = listed.begin();
i != listed.end();
++i) {
ASSERT_TRUE(created.count(*i));
}
for (set<ghobject_t>::iterator i = created.begin();
i != created.end();
++i) {
ObjectStore::Transaction t;
t.remove(cid, *i);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
ObjectStore::Transaction t;
t.remove_collection(cid);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
TEST_P(StoreTest, HashCollisionSorting) {
bool disable_legacy = (string(GetParam()) == "bluestore");
char buf121664318_1[] = {18, -119, -121, -111, 0};
char buf121664318_2[] = {19, 127, -121, 32, 0};
char buf121664318_3[] = {19, -118, 15, 19, 0};
char buf121664318_4[] = {28, 27, -116, -113, 0};
char buf121664318_5[] = {28, 27, -115, -124, 0};
char buf121666222_1[] = {18, -119, -120, -111, 0};
char buf121666222_2[] = {19, 127, -120, 32, 0};
char buf121666222_3[] = {19, -118, 15, 30, 0};
char buf121666222_4[] = {29, 17, -126, -113, 0};
char buf121666222_5[] = {29, 17, -125, -124, 0};
std::map<uint32_t, std::vector<std::string>> object_names = {
{121664318, {{buf121664318_1},
{buf121664318_2},
{buf121664318_3},
{buf121664318_4},
{buf121664318_5}}},
{121666222, {{buf121666222_1},
{buf121666222_2},
{buf121666222_3},
{buf121666222_4},
{buf121666222_5}}}};
int64_t poolid = 111;
coll_t cid = coll_t(spg_t(pg_t(0, poolid), shard_id_t::NO_SHARD));
auto ch = store->create_new_collection(cid);
{
ObjectStore::Transaction t;
t.create_collection(cid, 0);
int r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
std::set<ghobject_t> created;
for (auto &[hash, names] : object_names) {
for (auto &name : names) {
ghobject_t hoid(hobject_t(sobject_t(name, CEPH_NOSNAP),
string(),
hash,
poolid,
string()));
ASSERT_EQ(hash, hoid.hobj.get_hash());
ObjectStore::Transaction t;
t.touch(cid, hoid);
int r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
created.insert(hoid);
}
}
vector<ghobject_t> objects;
int r = collection_list(store, ch, ghobject_t(), ghobject_t::get_max(),
INT_MAX, &objects, 0, disable_legacy);
ASSERT_EQ(r, 0);
ASSERT_EQ(created.size(), objects.size());
auto it = objects.begin();
for (auto &hoid : created) {
ASSERT_EQ(hoid, *it);
it++;
}
for (auto i = created.begin(); i != created.end(); i++) {
auto j = i;
for (j++; j != created.end(); j++) {
std::set<ghobject_t> created_sub(i, j);
objects.clear();
ghobject_t next;
r = collection_list(store, ch, *i, ghobject_t::get_max(),
created_sub.size(), &objects, &next, disable_legacy);
ASSERT_EQ(r, 0);
ASSERT_EQ(created_sub.size(), objects.size());
it = objects.begin();
for (auto &hoid : created_sub) {
ASSERT_EQ(hoid, *it);
it++;
}
if (j == created.end()) {
ASSERT_TRUE(next.is_max());
} else {
ASSERT_EQ(*j, next);
}
}
}
for (auto i = created.begin(); i != created.end(); i++) {
auto j = i;
for (j++; j != created.end(); j++) {
std::set<ghobject_t> created_sub(i, j);
objects.clear();
ghobject_t next;
r = collection_list(store, ch, *i, *j, INT_MAX, &objects, &next,
disable_legacy);
ASSERT_EQ(r, 0);
ASSERT_EQ(created_sub.size(), objects.size());
it = objects.begin();
for (auto &hoid : created_sub) {
ASSERT_EQ(hoid, *it);
it++;
}
if (j == created.end()) {
ASSERT_TRUE(next.is_max());
} else {
ASSERT_EQ(*j, next);
}
}
}
}
TEST_P(StoreTest, ScrubTest) {
int64_t poolid = 111;
coll_t cid(spg_t(pg_t(0, poolid),shard_id_t(1)));
int r;
auto ch = store->create_new_collection(cid);
{
ObjectStore::Transaction t;
t.create_collection(cid, 0);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
string base = "aaaaa";
set<ghobject_t> created;
for (int i = 0; i < 1000; ++i) {
char buf[100];
sprintf(buf, "%d", i);
if (!(i % 5)) {
cerr << "Object " << i << std::endl;
}
ghobject_t hoid(hobject_t(string(buf) + base, string(), CEPH_NOSNAP, i,
poolid, ""),
ghobject_t::NO_GEN, shard_id_t(1));
{
ObjectStore::Transaction t;
t.touch(cid, hoid);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
created.insert(hoid);
}
// Add same hobject_t but different generation
{
ghobject_t hoid1(hobject_t("same-object", string(), CEPH_NOSNAP, 0, poolid, ""),
ghobject_t::NO_GEN, shard_id_t(1));
ghobject_t hoid2(hobject_t("same-object", string(), CEPH_NOSNAP, 0, poolid, ""), (gen_t)1, shard_id_t(1));
ghobject_t hoid3(hobject_t("same-object", string(), CEPH_NOSNAP, 0, poolid, ""), (gen_t)2, shard_id_t(1));
ObjectStore::Transaction t;
t.touch(cid, hoid1);
t.touch(cid, hoid2);
t.touch(cid, hoid3);
r = queue_transaction(store, ch, std::move(t));
created.insert(hoid1);
created.insert(hoid2);
created.insert(hoid3);
ASSERT_EQ(r, 0);
}
vector<ghobject_t> objects;
r = collection_list(store, ch, ghobject_t(), ghobject_t::get_max(), INT_MAX,
&objects, 0);
ASSERT_EQ(r, 0);
set<ghobject_t> listed(objects.begin(), objects.end());
cerr << "listed.size() is " << listed.size() << " and created.size() is " << created.size() << std::endl;
ASSERT_TRUE(listed.size() == created.size());
objects.clear();
listed.clear();
ghobject_t current, next;
while (1) {
r = collection_list(store, ch, current, ghobject_t::get_max(), 60, &objects,
&next);
ASSERT_EQ(r, 0);
ASSERT_TRUE(sorted(objects));
for (vector<ghobject_t>::iterator i = objects.begin();
i != objects.end(); ++i) {
if (listed.count(*i))
cerr << *i << " repeated" << std::endl;
listed.insert(*i);
}
if (objects.size() < 50) {
ASSERT_TRUE(next.is_max());
break;
}
objects.clear();
current = next.get_boundary();
}
cerr << "listed.size() is " << listed.size() << std::endl;
ASSERT_TRUE(listed.size() == created.size());
for (set<ghobject_t>::iterator i = listed.begin();
i != listed.end();
++i) {
ASSERT_TRUE(created.count(*i));
}
for (set<ghobject_t>::iterator i = created.begin();
i != created.end();
++i) {
ObjectStore::Transaction t;
t.remove(cid, *i);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
ObjectStore::Transaction t;
t.remove_collection(cid);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
TEST_P(StoreTest, OMapTest) {
coll_t cid;
ghobject_t hoid(hobject_t("tesomap", "", CEPH_NOSNAP, 0, 0, ""));
auto ch = store->create_new_collection(cid);
int r;
{
ObjectStore::Transaction t;
t.create_collection(cid, 0);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
map<string, bufferlist> attrs;
{
ObjectStore::Transaction t;
t.touch(cid, hoid);
t.omap_clear(cid, hoid);
map<string, bufferlist> start_set;
t.omap_setkeys(cid, hoid, start_set);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
for (int i = 0; i < 100; i++) {
if (!(i%5)) {
std::cout << "On iteration " << i << std::endl;
}
ObjectStore::Transaction t;
bufferlist bl;
map<string, bufferlist> cur_attrs;
r = store->omap_get(ch, hoid, &bl, &cur_attrs);
ASSERT_EQ(r, 0);
for (map<string, bufferlist>::iterator j = attrs.begin();
j != attrs.end();
++j) {
bool correct = cur_attrs.count(j->first) && string(cur_attrs[j->first].c_str()) == string(j->second.c_str());
if (!correct) {
std::cout << j->first << " is present in cur_attrs " << cur_attrs.count(j->first) << " times " << std::endl;
if (cur_attrs.count(j->first) > 0) {
std::cout << j->second.c_str() << " : " << cur_attrs[j->first].c_str() << std::endl;
}
}
ASSERT_EQ(correct, true);
}
ASSERT_EQ(attrs.size(), cur_attrs.size());
char buf[100];
snprintf(buf, sizeof(buf), "%d", i);
bl.clear();
bufferptr bp(buf, strlen(buf) + 1);
bl.append(bp);
map<string, bufferlist> to_add;
to_add.insert(pair<string, bufferlist>("key-" + string(buf), bl));
attrs.insert(pair<string, bufferlist>("key-" + string(buf), bl));
t.omap_setkeys(cid, hoid, to_add);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
int i = 0;
while (attrs.size()) {
if (!(i%5)) {
std::cout << "removal: On iteration " << i << std::endl;
}
ObjectStore::Transaction t;
bufferlist bl;
map<string, bufferlist> cur_attrs;
r = store->omap_get(ch, hoid, &bl, &cur_attrs);
ASSERT_EQ(r, 0);
for (map<string, bufferlist>::iterator j = attrs.begin();
j != attrs.end();
++j) {
bool correct = cur_attrs.count(j->first) && string(cur_attrs[j->first].c_str()) == string(j->second.c_str());
if (!correct) {
std::cout << j->first << " is present in cur_attrs " << cur_attrs.count(j->first) << " times " << std::endl;
if (cur_attrs.count(j->first) > 0) {
std::cout << j->second.c_str() << " : " << cur_attrs[j->first].c_str() << std::endl;
}
}
ASSERT_EQ(correct, true);
}
string to_remove = attrs.begin()->first;
t.omap_rmkey(cid, hoid, to_remove);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
attrs.erase(to_remove);
++i;
}
{
bufferlist bl1;
bl1.append("omap_header");
ObjectStore::Transaction t;
t.omap_setheader(cid, hoid, bl1);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
t = ObjectStore::Transaction();
bufferlist bl2;
bl2.append("value");
map<string, bufferlist> to_add;
to_add.insert(pair<string, bufferlist>("key", bl2));
t.omap_setkeys(cid, hoid, to_add);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
bufferlist bl3;
map<string, bufferlist> cur_attrs;
r = store->omap_get(ch, hoid, &bl3, &cur_attrs);
ASSERT_EQ(r, 0);
ASSERT_EQ(cur_attrs.size(), size_t(1));
ASSERT_TRUE(bl_eq(bl1, bl3));
set<string> keys;
r = store->omap_get_keys(ch, hoid, &keys);
ASSERT_EQ(r, 0);
ASSERT_EQ(keys.size(), size_t(1));
}
// test omap_clear, omap_rmkey_range
{
{
map<string,bufferlist> to_set;
for (int n=0; n<10; ++n) {
to_set[stringify(n)].append("foo");
}
bufferlist h;
h.append("header");
ObjectStore::Transaction t;
t.remove(cid, hoid);
t.touch(cid, hoid);
t.omap_setheader(cid, hoid, h);
t.omap_setkeys(cid, hoid, to_set);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
{
ObjectStore::Transaction t;
t.omap_rmkeyrange(cid, hoid, "3", "7");
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
{
bufferlist hdr;
map<string,bufferlist> m;
store->omap_get(ch, hoid, &hdr, &m);
ASSERT_EQ(6u, hdr.length());
ASSERT_TRUE(m.count("2"));
ASSERT_TRUE(!m.count("3"));
ASSERT_TRUE(!m.count("6"));
ASSERT_TRUE(m.count("7"));
ASSERT_TRUE(m.count("8"));
//cout << m << std::endl;
ASSERT_EQ(6u, m.size());
}
{
ObjectStore::Transaction t;
t.omap_clear(cid, hoid);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
{
bufferlist hdr;
map<string,bufferlist> m;
store->omap_get(ch, hoid, &hdr, &m);
ASSERT_EQ(0u, hdr.length());
ASSERT_EQ(0u, m.size());
}
}
ObjectStore::Transaction t;
t.remove(cid, hoid);
t.remove_collection(cid);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
TEST_P(StoreTest, OMapIterator) {
coll_t cid;
ghobject_t hoid(hobject_t("tesomap", "", CEPH_NOSNAP, 0, 0, ""));
int count = 0;
auto ch = store->create_new_collection(cid);
int r;
{
ObjectStore::Transaction t;
t.create_collection(cid, 0);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
map<string, bufferlist> attrs;
{
ObjectStore::Transaction t;
t.touch(cid, hoid);
t.omap_clear(cid, hoid);
map<string, bufferlist> start_set;
t.omap_setkeys(cid, hoid, start_set);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
ObjectMap::ObjectMapIterator iter;
bool correct;
//basic iteration
for (int i = 0; i < 100; i++) {
if (!(i%5)) {
std::cout << "On iteration " << i << std::endl;
}
bufferlist bl;
// FileStore may deadlock two active iterators over the same data
iter = ObjectMap::ObjectMapIterator();
iter = store->get_omap_iterator(ch, hoid);
for (iter->seek_to_first(), count=0; iter->valid(); iter->next(), count++) {
string key = iter->key();
bufferlist value = iter->value();
correct = attrs.count(key) && (string(value.c_str()) == string(attrs[key].c_str()));
if (!correct) {
if (attrs.count(key) > 0) {
std::cout << "key " << key << "in omap , " << value.c_str() << " : " << attrs[key].c_str() << std::endl;
}
else
std::cout << "key " << key << "should not exists in omap" << std::endl;
}
ASSERT_EQ(correct, true);
}
ASSERT_EQ((int)attrs.size(), count);
// FileStore may deadlock an active iterator vs queue_transaction
iter = ObjectMap::ObjectMapIterator();
char buf[100];
snprintf(buf, sizeof(buf), "%d", i);
bl.clear();
bufferptr bp(buf, strlen(buf) + 1);
bl.append(bp);
map<string, bufferlist> to_add;
to_add.insert(pair<string, bufferlist>("key-" + string(buf), bl));
attrs.insert(pair<string, bufferlist>("key-" + string(buf), bl));
ObjectStore::Transaction t;
t.omap_setkeys(cid, hoid, to_add);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
iter = store->get_omap_iterator(ch, hoid);
//lower bound
string bound_key = "key-5";
iter->lower_bound(bound_key);
correct = bound_key <= iter->key();
if (!correct) {
std::cout << "lower bound, bound key is " << bound_key << " < iter key is " << iter->key() << std::endl;
}
ASSERT_EQ(correct, true);
//upper bound
iter->upper_bound(bound_key);
correct = iter->key() > bound_key;
if (!correct) {
std::cout << "upper bound, bound key is " << bound_key << " >= iter key is " << iter->key() << std::endl;
}
ASSERT_EQ(correct, true);
// FileStore may deadlock an active iterator vs queue_transaction
iter = ObjectMap::ObjectMapIterator();
{
ObjectStore::Transaction t;
t.remove(cid, hoid);
t.remove_collection(cid);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
}
TEST_P(StoreTest, XattrTest) {
coll_t cid;
ghobject_t hoid(hobject_t("tesomap", "", CEPH_NOSNAP, 0, 0, ""));
bufferlist big;
for (unsigned i = 0; i < 10000; ++i) {
big.append('\0');
}
bufferlist small;
for (unsigned i = 0; i < 10; ++i) {
small.append('\0');
}
int r;
auto ch = store->create_new_collection(cid);
{
ObjectStore::Transaction t;
t.create_collection(cid, 0);
t.touch(cid, hoid);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
map<string, bufferlist> attrs;
{
ObjectStore::Transaction t;
t.setattr(cid, hoid, "attr1", small);
attrs["attr1"] = small;
t.setattr(cid, hoid, "attr2", big);
attrs["attr2"] = big;
t.setattr(cid, hoid, "attr3", small);
attrs["attr3"] = small;
t.setattr(cid, hoid, "attr1", small);
attrs["attr1"] = small;
t.setattr(cid, hoid, "attr4", big);
attrs["attr4"] = big;
t.setattr(cid, hoid, "attr3", big);
attrs["attr3"] = big;
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
map<string, bufferptr, less<>> aset;
store->getattrs(ch, hoid, aset);
ASSERT_EQ(aset.size(), attrs.size());
for (map<string, bufferptr>::iterator i = aset.begin();
i != aset.end();
++i) {
bufferlist bl;
bl.push_back(i->second);
ASSERT_TRUE(attrs[i->first] == bl);
}
{
ObjectStore::Transaction t;
t.rmattr(cid, hoid, "attr2");
attrs.erase("attr2");
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
aset.clear();
store->getattrs(ch, hoid, aset);
ASSERT_EQ(aset.size(), attrs.size());
for (map<string, bufferptr>::iterator i = aset.begin();
i != aset.end();
++i) {
bufferlist bl;
bl.push_back(i->second);
ASSERT_TRUE(attrs[i->first] == bl);
}
bufferptr bp;
r = store->getattr(ch, hoid, "attr2", bp);
ASSERT_EQ(r, -ENODATA);
r = store->getattr(ch, hoid, "attr3", bp);
ASSERT_EQ(r, 0);
bufferlist bl2;
bl2.push_back(bp);
ASSERT_TRUE(bl2 == attrs["attr3"]);
ObjectStore::Transaction t;
t.remove(cid, hoid);
t.remove_collection(cid);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
void colsplittest(
ObjectStore *store,
unsigned num_objects,
unsigned common_suffix_size,
bool clones
) {
coll_t cid(spg_t(pg_t(0,52),shard_id_t::NO_SHARD));
coll_t tid(spg_t(pg_t(1<<common_suffix_size,52),shard_id_t::NO_SHARD));
auto ch = store->create_new_collection(cid);
auto tch = store->create_new_collection(tid);
int r = 0;
{
ObjectStore::Transaction t;
t.create_collection(cid, common_suffix_size);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
bufferlist small;
small.append("small");
{
ObjectStore::Transaction t;
for (uint32_t i = 0; i < (2 - (int)clones)*num_objects; ++i) {
stringstream objname;
objname << "obj" << i;
ghobject_t a(hobject_t(
objname.str(),
"",
CEPH_NOSNAP,
i<<common_suffix_size,
52, ""));
t.write(cid, a, 0, small.length(), small,
CEPH_OSD_OP_FLAG_FADVISE_WILLNEED);
if (clones) {
objname << "-clone";
ghobject_t b(hobject_t(
objname.str(),
"",
CEPH_NOSNAP,
i<<common_suffix_size,
52, ""));
t.clone(cid, a, b);
}
if (i % 100) {
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
t = ObjectStore::Transaction();
}
}
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
{
ObjectStore::Transaction t;
t.create_collection(tid, common_suffix_size + 1);
t.split_collection(cid, common_suffix_size+1, 1<<common_suffix_size, tid);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
ch->flush();
// check
vector<ghobject_t> objects;
r = collection_list(store, ch, ghobject_t(), ghobject_t::get_max(), INT_MAX,
&objects, 0);
ASSERT_EQ(r, 0);
ASSERT_EQ(objects.size(), num_objects);
for (vector<ghobject_t>::iterator i = objects.begin();
i != objects.end();
++i) {
ASSERT_EQ(!!(i->hobj.get_hash() & (1<<common_suffix_size)), 0u);
}
objects.clear();
r = collection_list(store, tch, ghobject_t(), ghobject_t::get_max(), INT_MAX,
&objects, 0);
ASSERT_EQ(r, 0);
ASSERT_EQ(objects.size(), num_objects);
for (vector<ghobject_t>::iterator i = objects.begin();
i != objects.end();
++i) {
ASSERT_EQ(!(i->hobj.get_hash() & (1<<common_suffix_size)), 0u);
}
// merge them again!
{
ObjectStore::Transaction t;
t.merge_collection(tid, cid, common_suffix_size);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
// check and clean up
ObjectStore::Transaction t;
{
vector<ghobject_t> objects;
r = collection_list(store, ch, ghobject_t(), ghobject_t::get_max(), INT_MAX,
&objects, 0);
ASSERT_EQ(r, 0);
ASSERT_EQ(objects.size(), num_objects * 2); // both halves
unsigned size = 0;
for (vector<ghobject_t>::iterator i = objects.begin();
i != objects.end();
++i) {
t.remove(cid, *i);
if (++size > 100) {
size = 0;
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
t = ObjectStore::Transaction();
}
}
}
t.remove_collection(cid);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
ch->flush();
ASSERT_TRUE(!store->collection_exists(tid));
}
TEST_P(StoreTest, ColSplitTest0) {
colsplittest(store.get(), 10, 5, false);
}
TEST_P(StoreTest, ColSplitTest1) {
colsplittest(store.get(), 10000, 11, false);
}
TEST_P(StoreTest, ColSplitTest1Clones) {
colsplittest(store.get(), 10000, 11, true);
}
TEST_P(StoreTest, ColSplitTest2) {
colsplittest(store.get(), 100, 7, false);
}
TEST_P(StoreTest, ColSplitTest2Clones) {
colsplittest(store.get(), 100, 7, true);
}
#if 0
TEST_P(StoreTest, ColSplitTest3) {
colsplittest(store.get(), 100000, 25);
}
#endif
void test_merge_skewed(ObjectStore *store,
unsigned base, unsigned bits,
unsigned anum, unsigned bnum)
{
cout << __func__ << " 0x" << std::hex << base << std::dec
<< " bits " << bits
<< " anum " << anum << " bnum " << bnum << std::endl;
/*
make merge source pgs have radically different # of objects in them,
which should trigger different splitting in filestore, and verify that
post-merge all objects are accessible.
*/
int r;
coll_t a(spg_t(pg_t(base, 0), shard_id_t::NO_SHARD));
coll_t b(spg_t(pg_t(base | (1<<bits), 0), shard_id_t::NO_SHARD));
auto cha = store->create_new_collection(a);
auto chb = store->create_new_collection(b);
{
ObjectStore::Transaction t;
t.create_collection(a, bits + 1);
r = queue_transaction(store, cha, std::move(t));
ASSERT_EQ(r, 0);
}
{
ObjectStore::Transaction t;
t.create_collection(b, bits + 1);
r = queue_transaction(store, chb, std::move(t));
ASSERT_EQ(r, 0);
}
bufferlist small;
small.append("small");
string suffix = "ooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooaaaaaaaaaa";
set<ghobject_t> aobjects, bobjects;
{
// fill a
ObjectStore::Transaction t;
for (unsigned i = 0; i < 1000; ++i) {
string objname = "a" + stringify(i) + suffix;
ghobject_t o(hobject_t(
objname,
"",
CEPH_NOSNAP,
i<<(bits+1) | base,
52, ""));
aobjects.insert(o);
t.write(a, o, 0, small.length(), small, 0);
if (i % 100) {
r = queue_transaction(store, cha, std::move(t));
ASSERT_EQ(r, 0);
t = ObjectStore::Transaction();
}
}
r = queue_transaction(store, cha, std::move(t));
ASSERT_EQ(r, 0);
}
{
// fill b
ObjectStore::Transaction t;
for (unsigned i = 0; i < 10; ++i) {
string objname = "b" + stringify(i) + suffix;
ghobject_t o(hobject_t(
objname,
"",
CEPH_NOSNAP,
(i<<(base+1)) | base | (1<<bits),
52, ""));
bobjects.insert(o);
t.write(b, o, 0, small.length(), small, 0);
if (i % 100) {
r = queue_transaction(store, chb, std::move(t));
ASSERT_EQ(r, 0);
t = ObjectStore::Transaction();
}
}
r = queue_transaction(store, chb, std::move(t));
ASSERT_EQ(r, 0);
}
// merge b->a
{
ObjectStore::Transaction t;
t.merge_collection(b, a, bits);
r = queue_transaction(store, cha, std::move(t));
ASSERT_EQ(r, 0);
}
// verify
{
vector<ghobject_t> got;
collection_list(store, cha, ghobject_t(), ghobject_t::get_max(), INT_MAX,
&got, 0);
set<ghobject_t> gotset;
for (auto& o : got) {
ASSERT_TRUE(aobjects.count(o) || bobjects.count(o));
gotset.insert(o);
}
// check both listing and stat-ability (different code paths!)
struct stat st;
for (auto& o : aobjects) {
ASSERT_TRUE(gotset.count(o));
int r = store->stat(cha, o, &st, false);
ASSERT_EQ(r, 0);
}
for (auto& o : bobjects) {
ASSERT_TRUE(gotset.count(o));
int r = store->stat(cha, o, &st, false);
ASSERT_EQ(r, 0);
}
}
// clean up
{
ObjectStore::Transaction t;
for (auto &o : aobjects) {
t.remove(a, o);
}
r = queue_transaction(store, cha, std::move(t));
ASSERT_EQ(r, 0);
}
{
ObjectStore::Transaction t;
for (auto &o : bobjects) {
t.remove(a, o);
}
t.remove_collection(a);
r = queue_transaction(store, cha, std::move(t));
ASSERT_EQ(r, 0);
}
}
TEST_P(StoreTest, MergeSkewed) {
if (string(GetParam()) != "filestore")
return;
// this is sufficient to exercise merges with different hashing levels
test_merge_skewed(store.get(), 0xf, 4, 10, 10000);
test_merge_skewed(store.get(), 0xf, 4, 10000, 10);
/*
// this covers a zillion variations that all boil down to the same thing
for (unsigned base = 3; base < 0x1000; base *= 5) {
unsigned bits;
unsigned t = base;
for (bits = 0; t; t >>= 1) {
++bits;
}
for (unsigned b = bits; b < bits + 10; b += 3) {
for (auto anum : { 10, 1000, 10000 }) {
for (auto bnum : { 10, 1000, 10000 }) {
if (anum == bnum) {
continue;
}
test_merge_skewed(store.get(), base, b, anum, bnum);
}
}
}
}
*/
}
/**
* This test tests adding two different groups
* of objects, each with 1 common prefix and 1
* different prefix. We then remove half
* in order to verify that the merging correctly
* stops at the common prefix subdir. See bug
* #5273 */
TEST_P(StoreTest, TwoHash) {
coll_t cid;
int r;
auto ch = store->create_new_collection(cid);
{
ObjectStore::Transaction t;
t.create_collection(cid, 0);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
std::cout << "Making objects" << std::endl;
for (int i = 0; i < 360; ++i) {
ObjectStore::Transaction t;
ghobject_t o;
o.hobj.pool = -1;
if (i < 8) {
o.hobj.set_hash((i << 16) | 0xA1);
t.touch(cid, o);
}
o.hobj.set_hash((i << 16) | 0xB1);
t.touch(cid, o);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
std::cout << "Removing half" << std::endl;
for (int i = 1; i < 8; ++i) {
ObjectStore::Transaction t;
ghobject_t o;
o.hobj.pool = -1;
o.hobj.set_hash((i << 16) | 0xA1);
t.remove(cid, o);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
std::cout << "Checking" << std::endl;
for (int i = 1; i < 8; ++i) {
ObjectStore::Transaction t;
ghobject_t o;
o.hobj.set_hash((i << 16) | 0xA1);
o.hobj.pool = -1;
bool exists = store->exists(ch, o);
ASSERT_EQ(exists, false);
}
{
ghobject_t o;
o.hobj.set_hash(0xA1);
o.hobj.pool = -1;
bool exists = store->exists(ch, o);
ASSERT_EQ(exists, true);
}
std::cout << "Cleanup" << std::endl;
for (int i = 0; i < 360; ++i) {
ObjectStore::Transaction t;
ghobject_t o;
o.hobj.set_hash((i << 16) | 0xA1);
o.hobj.pool = -1;
t.remove(cid, o);
o.hobj.set_hash((i << 16) | 0xB1);
t.remove(cid, o);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
ObjectStore::Transaction t;
t.remove_collection(cid);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
TEST_P(StoreTest, Rename) {
coll_t cid(spg_t(pg_t(0, 2122),shard_id_t::NO_SHARD));
ghobject_t srcoid(hobject_t("src_oid", "", CEPH_NOSNAP, 0, 0, ""));
ghobject_t dstoid(hobject_t("dest_oid", "", CEPH_NOSNAP, 0, 0, ""));
bufferlist a, b;
a.append("foo");
b.append("bar");
auto ch = store->create_new_collection(cid);
int r;
{
ObjectStore::Transaction t;
t.create_collection(cid, 0);
t.write(cid, srcoid, 0, a.length(), a);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
ASSERT_TRUE(store->exists(ch, srcoid));
{
ObjectStore::Transaction t;
t.collection_move_rename(cid, srcoid, cid, dstoid);
t.write(cid, srcoid, 0, b.length(), b);
t.setattr(cid, srcoid, "attr", b);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
ASSERT_TRUE(store->exists(ch, srcoid));
ASSERT_TRUE(store->exists(ch, dstoid));
{
bufferlist bl;
store->read(ch, srcoid, 0, 3, bl);
ASSERT_TRUE(bl_eq(b, bl));
store->read(ch, dstoid, 0, 3, bl);
ASSERT_TRUE(bl_eq(a, bl));
}
{
ObjectStore::Transaction t;
t.remove(cid, dstoid);
t.collection_move_rename(cid, srcoid, cid, dstoid);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
ASSERT_TRUE(store->exists(ch, dstoid));
ASSERT_FALSE(store->exists(ch, srcoid));
{
bufferlist bl;
store->read(ch, dstoid, 0, 3, bl);
ASSERT_TRUE(bl_eq(b, bl));
}
{
ObjectStore::Transaction t;
t.remove(cid, dstoid);
t.remove_collection(cid);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
}
TEST_P(StoreTest, MoveRename) {
coll_t cid(spg_t(pg_t(0, 212),shard_id_t::NO_SHARD));
ghobject_t temp_oid(hobject_t("tmp_oid", "", CEPH_NOSNAP, 0, 0, ""));
ghobject_t oid(hobject_t("dest_oid", "", CEPH_NOSNAP, 0, 0, ""));
auto ch = store->create_new_collection(cid);
int r;
{
ObjectStore::Transaction t;
t.create_collection(cid, 0);
t.touch(cid, oid);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
ASSERT_TRUE(store->exists(ch, oid));
bufferlist data, attr;
map<string, bufferlist> omap;
data.append("data payload");
attr.append("attr value");
omap["omap_key"].append("omap value");
{
ObjectStore::Transaction t;
t.touch(cid, temp_oid);
t.write(cid, temp_oid, 0, data.length(), data);
t.setattr(cid, temp_oid, "attr", attr);
t.omap_setkeys(cid, temp_oid, omap);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
ASSERT_TRUE(store->exists(ch, temp_oid));
{
ObjectStore::Transaction t;
t.remove(cid, oid);
t.collection_move_rename(cid, temp_oid, cid, oid);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
ASSERT_TRUE(store->exists(ch, oid));
ASSERT_FALSE(store->exists(ch, temp_oid));
{
bufferlist newdata;
r = store->read(ch, oid, 0, 1000, newdata);
ASSERT_GE(r, 0);
ASSERT_TRUE(bl_eq(data, newdata));
bufferlist newattr;
r = store->getattr(ch, oid, "attr", newattr);
ASSERT_EQ(r, 0);
ASSERT_TRUE(bl_eq(attr, newattr));
set<string> keys;
keys.insert("omap_key");
map<string, bufferlist> newomap;
r = store->omap_get_values(ch, oid, keys, &newomap);
ASSERT_GE(r, 0);
ASSERT_EQ(1u, newomap.size());
ASSERT_TRUE(newomap.count("omap_key"));
ASSERT_TRUE(bl_eq(omap["omap_key"], newomap["omap_key"]));
}
{
ObjectStore::Transaction t;
t.remove(cid, oid);
t.remove_collection(cid);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
}
TEST_P(StoreTest, BigRGWObjectName) {
coll_t cid(spg_t(pg_t(0,12),shard_id_t::NO_SHARD));
ghobject_t oid(
hobject_t(
"default.4106.50_aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"",
CEPH_NOSNAP,
0x81920472,
12,
""),
15,
shard_id_t::NO_SHARD);
ghobject_t oid2(oid);
oid2.generation = 17;
ghobject_t oidhead(oid);
oidhead.generation = ghobject_t::NO_GEN;
auto ch = store->create_new_collection(cid);
int r;
{
ObjectStore::Transaction t;
t.create_collection(cid, 0);
t.touch(cid, oidhead);
t.collection_move_rename(cid, oidhead, cid, oid);
t.touch(cid, oidhead);
t.collection_move_rename(cid, oidhead, cid, oid2);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
{
ObjectStore::Transaction t;
t.remove(cid, oid);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
{
vector<ghobject_t> objects;
r = collection_list(store, ch, ghobject_t(), ghobject_t::get_max(), INT_MAX,
&objects, 0);
ASSERT_EQ(r, 0);
ASSERT_EQ(objects.size(), 1u);
ASSERT_EQ(objects[0], oid2);
}
ASSERT_FALSE(store->exists(ch, oid));
{
ObjectStore::Transaction t;
t.remove(cid, oid2);
t.remove_collection(cid);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
}
TEST_P(StoreTest, SetAllocHint) {
coll_t cid;
ghobject_t hoid(hobject_t("test_hint", "", CEPH_NOSNAP, 0, 0, ""));
auto ch = store->create_new_collection(cid);
int r;
{
ObjectStore::Transaction t;
t.create_collection(cid, 0);
t.touch(cid, hoid);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
{
ObjectStore::Transaction t;
t.set_alloc_hint(cid, hoid, 4*1024*1024, 1024*4, 0);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
{
ObjectStore::Transaction t;
t.remove(cid, hoid);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
{
ObjectStore::Transaction t;
t.set_alloc_hint(cid, hoid, 4*1024*1024, 1024*4, 0);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
{
ObjectStore::Transaction t;
t.remove_collection(cid);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
}
TEST_P(StoreTest, TryMoveRename) {
coll_t cid;
ghobject_t hoid(hobject_t("test_hint", "", CEPH_NOSNAP, 0, -1, ""));
ghobject_t hoid2(hobject_t("test_hint2", "", CEPH_NOSNAP, 0, -1, ""));
auto ch = store->create_new_collection(cid);
int r;
{
ObjectStore::Transaction t;
t.create_collection(cid, 0);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
{
ObjectStore::Transaction t;
t.try_rename(cid, hoid, hoid2);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
{
ObjectStore::Transaction t;
t.touch(cid, hoid);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
{
ObjectStore::Transaction t;
t.try_rename(cid, hoid, hoid2);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
struct stat st;
ASSERT_EQ(store->stat(ch, hoid, &st), -ENOENT);
ASSERT_EQ(store->stat(ch, hoid2, &st), 0);
}
#if defined(WITH_BLUESTORE)
TEST_P(StoreTest, BluestoreOnOffCSumTest) {
if (string(GetParam()) != "bluestore")
return;
SetVal(g_conf(), "bluestore_csum_type", "crc32c");
g_conf().apply_changes(nullptr);
int r;
coll_t cid;
ghobject_t hoid(hobject_t(sobject_t("Object 1", CEPH_NOSNAP)));
{
auto ch = store->open_collection(cid);
ASSERT_FALSE(ch);
}
auto ch = store->create_new_collection(cid);
{
ObjectStore::Transaction t;
t.create_collection(cid, 0);
cerr << "Creating collection " << cid << std::endl;
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
{
//write with csum enabled followed by read with csum disabled
size_t block_size = 64*1024;
ObjectStore::Transaction t;
bufferlist bl, orig;
bl.append(std::string(block_size, 'a'));
orig = bl;
t.remove(cid, hoid);
t.set_alloc_hint(cid, hoid, 4*1024*1024, 1024*8, 0);
t.write(cid, hoid, 0, bl.length(), bl);
cerr << "Remove then create" << std::endl;
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
SetVal(g_conf(), "bluestore_csum_type", "none");
g_conf().apply_changes(nullptr);
bufferlist in;
r = store->read(ch, hoid, 0, block_size, in);
ASSERT_EQ((int)block_size, r);
ASSERT_TRUE(bl_eq(orig, in));
}
{
//write with csum disabled followed by read with csum enabled
size_t block_size = 64*1024;
ObjectStore::Transaction t;
bufferlist bl, orig;
bl.append(std::string(block_size, 'a'));
orig = bl;
t.remove(cid, hoid);
t.set_alloc_hint(cid, hoid, 4*1024*1024, 1024*8, 0);
t.write(cid, hoid, 0, bl.length(), bl);
cerr << "Remove then create" << std::endl;
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
SetVal(g_conf(), "bluestore_csum_type", "crc32c");
g_conf().apply_changes(nullptr);
bufferlist in;
r = store->read(ch, hoid, 0, block_size, in);
ASSERT_EQ((int)block_size, r);
ASSERT_TRUE(bl_eq(orig, in));
}
{
//'mixed' non-overlapping writes to the same blob
ObjectStore::Transaction t;
bufferlist bl, orig;
size_t block_size = 8000;
bl.append(std::string(block_size, 'a'));
orig = bl;
t.remove(cid, hoid);
t.write(cid, hoid, 0, bl.length(), bl);
cerr << "Remove then create" << std::endl;
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
SetVal(g_conf(), "bluestore_csum_type", "none");
g_conf().apply_changes(nullptr);
ObjectStore::Transaction t2;
t2.write(cid, hoid, block_size*2, bl.length(), bl);
cerr << "Append 'unprotected'" << std::endl;
r = queue_transaction(store, ch, std::move(t2));
ASSERT_EQ(r, 0);
bufferlist in;
r = store->read(ch, hoid, 0, block_size, in);
ASSERT_EQ((int)block_size, r);
ASSERT_TRUE(bl_eq(orig, in));
in.clear();
r = store->read(ch, hoid, block_size*2, block_size, in);
ASSERT_EQ((int)block_size, r);
ASSERT_TRUE(bl_eq(orig, in));
SetVal(g_conf(), "bluestore_csum_type", "crc32c");
g_conf().apply_changes(nullptr);
in.clear();
r = store->read(ch, hoid, 0, block_size, in);
ASSERT_EQ((int)block_size, r);
ASSERT_TRUE(bl_eq(orig, in));
in.clear();
r = store->read(ch, hoid, block_size*2, block_size, in);
ASSERT_EQ((int)block_size, r);
ASSERT_TRUE(bl_eq(orig, in));
}
{
//partially blob overwrite under a different csum enablement mode
ObjectStore::Transaction t;
bufferlist bl, orig, orig2;
size_t block_size0 = 0x10000;
size_t block_size = 9000;
size_t block_size2 = 5000;
bl.append(std::string(block_size0, 'a'));
t.remove(cid, hoid);
t.set_alloc_hint(cid, hoid, 4*1024*1024, 1024*8, 0);
t.write(cid, hoid, 0, bl.length(), bl);
cerr << "Remove then create" << std::endl;
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
SetVal(g_conf(), "bluestore_csum_type", "none");
g_conf().apply_changes(nullptr);
ObjectStore::Transaction t2;
bl.clear();
bl.append(std::string(block_size, 'b'));
t2.write(cid, hoid, 0, bl.length(), bl);
t2.write(cid, hoid, block_size0, bl.length(), bl);
cerr << "Overwrite with unprotected data" << std::endl;
r = queue_transaction(store, ch, std::move(t2));
ASSERT_EQ(r, 0);
orig = bl;
orig2 = bl;
orig.append( std::string(block_size0 - block_size, 'a'));
bufferlist in;
r = store->read(ch, hoid, 0, block_size0, in);
ASSERT_EQ((int)block_size0, r);
ASSERT_TRUE(bl_eq(orig, in));
r = store->read(ch, hoid, block_size0, block_size, in);
ASSERT_EQ((int)block_size, r);
ASSERT_TRUE(bl_eq(orig2, in));
SetVal(g_conf(), "bluestore_csum_type", "crc32c");
g_conf().apply_changes(nullptr);
ObjectStore::Transaction t3;
bl.clear();
bl.append(std::string(block_size2, 'c'));
t3.write(cid, hoid, block_size0, bl.length(), bl);
cerr << "Overwrite with protected data" << std::endl;
r = queue_transaction(store, ch, std::move(t3));
ASSERT_EQ(r, 0);
in.clear();
orig = bl;
orig.append( std::string(block_size - block_size2, 'b'));
r = store->read(ch, hoid, block_size0, block_size, in);
ASSERT_EQ((int)block_size, r);
ASSERT_TRUE(bl_eq(orig, in));
}
{
ObjectStore::Transaction t;
t.remove(cid, hoid);
t.remove_collection(cid);
cerr << "Cleaning" << std::endl;
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
}
#endif
INSTANTIATE_TEST_SUITE_P(
ObjectStore,
StoreTest,
::testing::Values(
"memstore",
#if defined(WITH_BLUESTORE)
"bluestore",
#endif
"kstore"));
// Note: instantiate all stores to preserve store numbering order only
INSTANTIATE_TEST_SUITE_P(
ObjectStore,
StoreTestSpecificAUSize,
::testing::Values(
"memstore",
#if defined(WITH_BLUESTORE)
"bluestore",
#endif
"kstore"));
// Note: instantiate all stores to preserve store numbering order only
INSTANTIATE_TEST_SUITE_P(
ObjectStore,
StoreTestOmapUpgrade,
::testing::Values(
"memstore",
#if defined(WITH_BLUESTORE)
"bluestore",
#endif
"kstore"));
#if defined(WITH_BLUESTORE)
INSTANTIATE_TEST_SUITE_P(
ObjectStore,
StoreTestDeferredSetup,
::testing::Values(
"bluestore"));
#endif
struct deferred_test_t {
uint32_t bdev_block_size;
uint32_t min_alloc_size;
uint32_t max_blob_size;
uint32_t prefer_deferred_size;
};
void PrintTo(const deferred_test_t& t, ::std::ostream* os)
{
*os << t.bdev_block_size << "/" << t.min_alloc_size << "/"
<< t.max_blob_size << "/" << t.prefer_deferred_size;
}
class DeferredWriteTest : public StoreTestFixture,
public ::testing::WithParamInterface<deferred_test_t> {
public:
DeferredWriteTest()
: StoreTestFixture("bluestore")
{}
void SetUp() override {
//do nothing
}
protected:
void DeferredSetup() {
StoreTestFixture::SetUp();
}
public:
std::vector<uint32_t> offsets = {0, 3000, 4096, 20000, 32768, 65000, 65536, 80000, 128 * 1024};
std::vector<uint32_t> lengths = {1, 1000, 4096, 12000, 32768, 30000, 80000, 128 * 1024};
};
TEST_P(DeferredWriteTest, NewData) {
const bool print = false;
deferred_test_t t = GetParam();
SetVal(g_conf(), "bdev_block_size", stringify(t.bdev_block_size).c_str());
SetVal(g_conf(), "bluestore_min_alloc_size", stringify(t.min_alloc_size).c_str());
SetVal(g_conf(), "bluestore_max_blob_size", stringify(t.max_blob_size).c_str());
SetVal(g_conf(), "bluestore_prefer_deferred_size", stringify(t.prefer_deferred_size).c_str());
g_conf().apply_changes(nullptr);
DeferredSetup();
int r;
coll_t cid;
const PerfCounters* logger = store->get_perf_counters();
ObjectStore::CollectionHandle ch = store->create_new_collection(cid);
{
ObjectStore::Transaction t;
t.create_collection(cid, 0);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
{
for (auto offset:offsets) {
for (auto length:lengths) {
std::string hname = fmt::format("test-{}-{}", offset, length);
ghobject_t hoid(hobject_t(hname, "", CEPH_NOSNAP, 0, -1, ""));
{
ObjectStore::Transaction t;
t.touch(cid, hoid);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
if (print)
std::cout << hname << std::endl;
auto w_new = logger->get(l_bluestore_write_new);
auto w_big_deferred = logger->get(l_bluestore_write_big_deferred);
auto i_deferred_w = logger->get(l_bluestore_issued_deferred_writes);
{
ObjectStore::Transaction t;
bufferlist bl;
bl.append(std::string(length, 'x'));
t.write(cid, hoid, offset, bl.length(), bl,
CEPH_OSD_OP_FLAG_FADVISE_NOCACHE);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
uint32_t first_db = offset / t.bdev_block_size;
uint32_t last_db = (offset + length - 1) / t.bdev_block_size;
uint32_t write_size = (last_db - first_db + 1) * t.bdev_block_size;
if (write_size < t.prefer_deferred_size) {
// expect no direct writes
ASSERT_EQ(w_new , logger->get(l_bluestore_write_new));
} else {
// expect no deferred
ASSERT_EQ(w_big_deferred , logger->get(l_bluestore_write_big_deferred));
ASSERT_EQ(i_deferred_w , logger->get(l_bluestore_issued_deferred_writes));
}
}
}
}
}
#if defined(WITH_BLUESTORE)
INSTANTIATE_TEST_SUITE_P(
BlueStore,
DeferredWriteTest,
::testing::Values(
// bdev alloc blob deferred
deferred_test_t{4 * 1024, 4 * 1024, 16 * 1024, 32 * 1024},
deferred_test_t{4 * 1024, 16 * 1024, 64 * 1024, 64 * 1024},
deferred_test_t{4 * 1024, 64 * 1024, 64 * 1024, 4 * 1024},
deferred_test_t{4 * 1024, 4 * 1024, 64 * 1024, 0 * 1024},
deferred_test_t{4 * 1024, 16 * 1024, 32 * 1024, 32 * 1024},
deferred_test_t{4 * 1024, 16 * 1024, 64 * 1024, 128 * 1024}
));
#endif
void doMany4KWritesTest(ObjectStore* store,
unsigned max_objects,
unsigned max_ops,
unsigned max_object_size,
unsigned max_write_size,
unsigned write_alignment)
{
MixedGenerator gen(555);
gen_type rng(time(NULL));
coll_t cid(spg_t(pg_t(0,555), shard_id_t::NO_SHARD));
store_statfs_t res_stat;
SyntheticWorkloadState test_obj(store,
&gen,
&rng,
cid,
max_object_size,
max_write_size,
write_alignment);
test_obj.init();
for (unsigned i = 0; i < max_objects; ++i) {
if (!(i % 500)) cerr << "seeding object " << i << std::endl;
test_obj.touch();
}
for (unsigned i = 0; i < max_ops; ++i) {
if (!(i % 200)) {
cerr << "Op " << i << std::endl;
test_obj.print_internal_state();
}
test_obj.write();
}
test_obj.wait_for_done();
test_obj.statfs(res_stat);
if (!(res_stat.data_stored <= max_object_size) ||
!(res_stat.allocated <= max_object_size)) {
// this will provide more insight on the mismatch and
// helps to avoid any races during stats collection
test_obj.fsck(false);
// retrieving stats once again and assert if still broken
test_obj.statfs(res_stat);
ASSERT_LE(res_stat.data_stored, max_object_size);
ASSERT_LE(res_stat.allocated, max_object_size);
}
test_obj.shutdown();
}
TEST_P(StoreTestSpecificAUSize, Many4KWritesTest) {
if (string(GetParam()) != "bluestore")
return;
if (smr) {
cout << "SKIP: no deferred; assertions around res_stat.allocated don't apply"
<< std::endl;
return;
}
StartDeferred(0x10000);
const unsigned max_object = 4*1024*1024;
doMany4KWritesTest(store.get(), 1, 1000, max_object, 4*1024, 0);
}
TEST_P(StoreTestSpecificAUSize, Many4KWritesNoCSumTest) {
if (string(GetParam()) != "bluestore")
return;
if (smr) {
cout << "SKIP: no deferred; assertions around res_stat.allocated don't apply"
<< std::endl;
return;
}
StartDeferred(0x10000);
SetVal(g_conf(), "bluestore_csum_type", "none");
g_ceph_context->_conf.apply_changes(nullptr);
const unsigned max_object = 4*1024*1024;
doMany4KWritesTest(store.get(), 1, 1000, max_object, 4*1024, 0 );
}
TEST_P(StoreTestSpecificAUSize, TooManyBlobsTest) {
if (string(GetParam()) != "bluestore")
return;
if (smr) {
cout << "SKIP: no deferred; assertions around res_stat.allocated don't apply"
<< std::endl;
return;
}
StartDeferred(0x10000);
const unsigned max_object = 4*1024*1024;
doMany4KWritesTest(store.get(), 1, 1000, max_object, 4*1024, 0);
}
#if defined(WITH_BLUESTORE)
void get_mempool_stats(uint64_t* total_bytes, uint64_t* total_items)
{
uint64_t meta_allocated = mempool::bluestore_cache_meta::allocated_bytes();
uint64_t onode_allocated = mempool::bluestore_cache_onode::allocated_bytes();
uint64_t other_allocated = mempool::bluestore_cache_other::allocated_bytes();
uint64_t meta_items = mempool::bluestore_cache_meta::allocated_items();
uint64_t onode_items = mempool::bluestore_cache_onode::allocated_items();
uint64_t other_items = mempool::bluestore_cache_other::allocated_items();
cout << "meta(" << meta_allocated << "/" << meta_items
<< ") onode(" << onode_allocated << "/" << onode_items
<< ") other(" << other_allocated << "/" << other_items
<< ")" << std::endl;
*total_bytes = meta_allocated + onode_allocated + other_allocated;
*total_items = onode_items;
}
TEST_P(StoreTestSpecificAUSize, OnodeSizeTracking) {
if (string(GetParam()) != "bluestore")
return;
size_t block_size = 4096;
StartDeferred(block_size);
SetVal(g_conf(), "bluestore_compression_mode", "none");
SetVal(g_conf(), "bluestore_csum_type", "none");
SetVal(g_conf(), "bluestore_cache_size_hdd", "400000000");
SetVal(g_conf(), "bluestore_cache_size_ssd", "400000000");
g_conf().apply_changes(nullptr);
int r;
coll_t cid;
ghobject_t hoid(hobject_t("test_hint", "", CEPH_NOSNAP, 0, -1, ""));
size_t obj_size = 4 * 1024 * 1024;
uint64_t total_bytes_prev;
uint64_t total_bytes, total_bytes2;
uint64_t total_onodes;
get_mempool_stats(&total_bytes, &total_onodes);
total_bytes_prev = total_bytes;
// 5u for onode_cache_shards vector
ASSERT_EQ(total_onodes, 5u);
ASSERT_EQ(total_bytes, 40u);
auto ch = store->create_new_collection(cid);
{
ObjectStore::Transaction t;
t.create_collection(cid, 0);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
{
ObjectStore::Transaction t;
bufferlist bl, orig, orig2;
bl.append(std::string(obj_size, 'a'));
t.write(cid, hoid, 0, bl.length(), bl);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
get_mempool_stats(&total_bytes, &total_onodes);
ASSERT_GT(total_bytes - total_bytes_prev, 0u);
ASSERT_EQ(total_onodes, 6u);
{
ObjectStore::Transaction t;
t.truncate(cid, hoid, 0);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
for(size_t i = 0; i < 1; ++i) {
bufferlist bl;
bl.append(std::string(block_size * (i+1), 'a'));
for( size_t j = 0; j < obj_size; j+= bl.length()) {
ObjectStore::Transaction t;
t.write(cid, hoid, j, bl.length(), bl);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
get_mempool_stats(&total_bytes2, &total_onodes);
ASSERT_NE(total_bytes2, 0u);
ASSERT_EQ(total_onodes, 6u);
}
{
cout <<" mempool dump:\n";
JSONFormatter f(true);
f.open_object_section("transaction");
mempool::dump(&f);
f.close_section();
f.flush(cout);
cout << std::endl;
}
{
bufferlist bl;
for (size_t i = 0; i < obj_size; i += 0x1000) {
store->read(ch, hoid, i, 0x1000, bl);
}
}
get_mempool_stats(&total_bytes, &total_onodes);
ASSERT_NE(total_bytes, 0u);
ASSERT_EQ(total_onodes, 6u);
{
cout <<" mempool dump:\n";
JSONFormatter f(true);
f.open_object_section("transaction");
mempool::dump(&f);
f.close_section();
f.flush(cout);
cout << std::endl;
}
{
ObjectStore::Transaction t;
t.remove(cid, hoid);
t.remove_collection(cid);
cerr << "Cleaning" << std::endl;
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
}
TEST_P(StoreTestSpecificAUSize, BlobReuseOnOverwrite) {
if (string(GetParam()) != "bluestore")
return;
size_t block_size = 4096;
StartDeferred(block_size);
SetVal(g_conf(), "bluestore_max_blob_size", "65536");
g_conf().apply_changes(nullptr);
int r;
coll_t cid;
ghobject_t hoid(hobject_t("test_hint", "", CEPH_NOSNAP, 0, -1, ""));
const PerfCounters* logger = store->get_perf_counters();
auto ch = store->create_new_collection(cid);
{
ObjectStore::Transaction t;
t.create_collection(cid, 0);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
{
ObjectStore::Transaction t;
bufferlist bl;
bl.append(std::string(block_size * 2, 'a'));
t.write(cid, hoid, 0, bl.length(), bl, CEPH_OSD_OP_FLAG_FADVISE_WILLNEED);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
{
// overwrite at the beginning
ObjectStore::Transaction t;
bufferlist bl;
bl.append(std::string(block_size, 'b'));
t.write(cid, hoid, 0, bl.length(), bl, CEPH_OSD_OP_FLAG_FADVISE_WILLNEED);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
{
// append
ObjectStore::Transaction t;
bufferlist bl;
bl.append(std::string(block_size * 2, 'c'));
t.write(cid, hoid, block_size * 2, bl.length(), bl, CEPH_OSD_OP_FLAG_FADVISE_WILLNEED);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
{
// append with a gap
ObjectStore::Transaction t;
bufferlist bl;
bl.append(std::string(block_size * 2, 'd'));
t.write(cid, hoid, block_size * 5, bl.length(), bl, CEPH_OSD_OP_FLAG_FADVISE_WILLNEED);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
{
// We need to issue a read to trigger cache stat update that refresh
// perf counters. additionally we need to wait some time for mempool
// thread to update stats.
sleep(1);
bufferlist bl, expected;
r = store->read(ch, hoid, 0, block_size, bl);
ASSERT_EQ(r, (int)block_size);
expected.append(string(block_size, 'b'));
ASSERT_TRUE(bl_eq(expected, bl));
ASSERT_EQ(logger->get(l_bluestore_blobs), 1u);
ASSERT_EQ(logger->get(l_bluestore_extents), 2u);
}
{
// overwrite at end
ObjectStore::Transaction t;
bufferlist bl;
bl.append(std::string(block_size * 2, 'e'));
// Currently we are unable to reuse blob when overwriting in a single step
t.write(cid, hoid, block_size * 6, bl.length(), bl, CEPH_OSD_OP_FLAG_FADVISE_WILLNEED);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
{
// We need to issue a read to trigger cache stat update that refresh
// perf counters. additionally we need to wait some time for mempool
// thread to update stats.
sleep(1);
bufferlist bl, expected;
r = store->read(ch, hoid, 0, block_size, bl);
ASSERT_EQ(r, (int)block_size);
expected.append(string(block_size, 'b'));
ASSERT_TRUE(bl_eq(expected, bl));
ASSERT_EQ(logger->get(l_bluestore_blobs), 1u);
ASSERT_EQ(logger->get(l_bluestore_extents), 2u);
}
{
// fill the gap
ObjectStore::Transaction t;
bufferlist bl;
bl.append(std::string(block_size, 'f'));
t.write(cid, hoid, block_size * 4, bl.length(), bl, CEPH_OSD_OP_FLAG_FADVISE_WILLNEED);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
{
// we need to wait some time for mempool
// thread to update stats to be able to check blob/extent numbers from
// perf counters.
sleep(1);
bufferlist bl, expected;
r = store->read(ch, hoid, 0, block_size, bl);
ASSERT_EQ(r, (int)block_size);
expected.append(string(block_size, 'b'));
ASSERT_TRUE(bl_eq(expected, bl));
bl.clear();
expected.clear();
r = store->read(ch, hoid, block_size, block_size, bl);
ASSERT_EQ(r, (int)block_size);
expected.append(string(block_size, 'a'));
ASSERT_TRUE(bl_eq(expected, bl));
bl.clear();
expected.clear();
r = store->read(ch, hoid, block_size * 2, block_size * 2, bl);
ASSERT_EQ(r, (int)block_size * 2);
expected.append(string(block_size * 2, 'c'));
ASSERT_TRUE(bl_eq(expected, bl));
bl.clear();
expected.clear();
r = store->read(ch, hoid, block_size * 4, block_size, bl);
ASSERT_EQ(r, (int)block_size);
expected.append(string(block_size, 'f'));
ASSERT_TRUE(bl_eq(expected, bl));
bl.clear();
expected.clear();
r = store->read(ch, hoid, block_size * 5, block_size, bl);
ASSERT_EQ(r, (int)block_size);
expected.append(string(block_size, 'd'));
ASSERT_TRUE(bl_eq(expected, bl));
bl.clear();
expected.clear();
r = store->read(ch, hoid, block_size * 5, block_size * 3, bl);
ASSERT_EQ(r, (int)block_size * 3);
expected.append(string(block_size, 'd'));
expected.append(string(block_size * 2, 'e'));
ASSERT_TRUE(bl_eq(expected, bl));
}
ASSERT_EQ(logger->get(l_bluestore_blobs), 1u);
ASSERT_EQ(logger->get(l_bluestore_extents), 1u);
{
ObjectStore::Transaction t;
t.remove(cid, hoid);
t.remove_collection(cid);
cerr << "Cleaning" << std::endl;
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
}
TEST_P(StoreTestSpecificAUSize, ZeroBlockDetectionSmallAppend) {
CephContext *cct = (new CephContext(CEPH_ENTITY_TYPE_CLIENT))->get();
if (string(GetParam()) != "bluestore" || !cct->_conf->bluestore_zero_block_detection) {
GTEST_SKIP() << "not bluestore or bluestore_zero_block_detection=false, skipping";
}
size_t block_size = 65536;
StartDeferred(block_size);
int r;
coll_t cid;
ghobject_t hoid(hobject_t("test", "", CEPH_NOSNAP, 0, -1, ""));
const PerfCounters* logger = store->get_perf_counters();
auto ch = store->create_new_collection(cid);
{
ObjectStore::Transaction t;
t.create_collection(cid, 0);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
{
// [1] append zeros
ObjectStore::Transaction t;
bufferlist bl;
bl.append_zero(4096);
t.write(cid, hoid, 0, bl.length(), bl);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
ASSERT_EQ(logger->get(l_bluestore_write_small), 1u);
ASSERT_EQ(logger->get(l_bluestore_write_small_bytes), 4096u);
ASSERT_EQ(logger->get(l_bluestore_write_small_skipped), 1u);
ASSERT_EQ(logger->get(l_bluestore_write_small_skipped_bytes), 4096u);
bufferlist in;
r = store->read(ch, hoid, 0, 0x4000, in);
ASSERT_EQ(4096, r);
ASSERT_TRUE(in.is_zero());
}
{
// [2] append non-zeros
ObjectStore::Transaction t;
bufferlist bl;
bl.append(std::string(4096, 'c'));
t.write(cid, hoid, 4096, bl.length(), bl);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
ASSERT_EQ(logger->get(l_bluestore_write_small), 2u);
ASSERT_EQ(logger->get(l_bluestore_write_small_bytes), 4096u*2);
ASSERT_EQ(logger->get(l_bluestore_write_small_skipped), 1u);
ASSERT_EQ(logger->get(l_bluestore_write_small_skipped_bytes), 4096u);
bufferlist in, _exp;
r = store->read(ch, hoid, 0, 0x4000, in);
ASSERT_EQ(4096 * 2, r);
_exp.append_zero(4096);
_exp.append(bl);
ASSERT_TRUE(bl_eq(_exp, in));
}
{
ObjectStore::Transaction t;
t.remove(cid, hoid);
t.remove_collection(cid);
cerr << "Cleaning" << std::endl;
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
}
TEST_P(StoreTestSpecificAUSize, ZeroBlockDetectionSmallOverwrite) {
CephContext *cct = (new CephContext(CEPH_ENTITY_TYPE_CLIENT))->get();
if (string(GetParam()) != "bluestore" || !cct->_conf->bluestore_zero_block_detection) {
GTEST_SKIP() << "not bluestore or bluestore_zero_block_detection=false, skipping";
}
if (smr) {
GTEST_SKIP() << "smr, skipping";
}
size_t block_size = 65536;
StartDeferred(block_size);
int r;
coll_t cid;
ghobject_t hoid(hobject_t("test", "", CEPH_NOSNAP, 0, -1, ""));
const PerfCounters* logger = store->get_perf_counters();
auto ch = store->create_new_collection(cid);
{
ObjectStore::Transaction t;
t.create_collection(cid, 0);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
{
// {setting up the scenario} append non-zeros
ObjectStore::Transaction t;
bufferlist bl;
bl.append(std::string(4096, 'c'));
t.write(cid, hoid, 0, bl.length(), bl);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
ASSERT_EQ(logger->get(l_bluestore_write_small), 1u);
ASSERT_EQ(logger->get(l_bluestore_write_small_bytes), 4096u);
ASSERT_EQ(logger->get(l_bluestore_write_small_skipped), 0u);
ASSERT_EQ(logger->get(l_bluestore_write_small_skipped_bytes), 0u);
bufferlist in, _exp;
r = store->read(ch, hoid, 0, 0x4000, in);
ASSERT_EQ(4096, r);
_exp.append(bl);
ASSERT_TRUE(bl_eq(_exp, in));
}
{
// [1] overwrite non-zeros with zeros
ObjectStore::Transaction t;
bufferlist bl;
bl.append_zero(4096);
t.write(cid, hoid, 0, bl.length(), bl);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
ASSERT_EQ(logger->get(l_bluestore_write_small), 2u);
ASSERT_EQ(logger->get(l_bluestore_write_small_bytes), 4096u*2);
ASSERT_EQ(logger->get(l_bluestore_write_small_skipped), 0u);
ASSERT_EQ(logger->get(l_bluestore_write_small_skipped_bytes), 0u);
bufferlist in;
r = store->read(ch, hoid, 0, 0x4000, in);
ASSERT_EQ(4096, r);
ASSERT_TRUE(in.is_zero());
}
{
// [2] overwrite zeros with non-zeros
ObjectStore::Transaction t;
bufferlist bl;
bl.append(std::string(4096, 'c'));
t.write(cid, hoid, 0, bl.length(), bl);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
ASSERT_EQ(logger->get(l_bluestore_write_small), 3u);
ASSERT_EQ(logger->get(l_bluestore_write_small_bytes), 4096u*3);
ASSERT_EQ(logger->get(l_bluestore_write_small_skipped), 0u);
ASSERT_EQ(logger->get(l_bluestore_write_small_skipped_bytes), 0u);
bufferlist in, _exp;
r = store->read(ch, hoid, 0, 0x4000, in);
ASSERT_EQ(4096, r);
_exp.append(bl);
ASSERT_TRUE(bl_eq(_exp, in));
}
{
ObjectStore::Transaction t;
t.remove(cid, hoid);
t.remove_collection(cid);
cerr << "Cleaning" << std::endl;
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
}
TEST_P(StoreTestSpecificAUSize, ZeroBlockDetectionBigAppend) {
CephContext *cct = (new CephContext(CEPH_ENTITY_TYPE_CLIENT))->get();
if (string(GetParam()) != "bluestore" || !cct->_conf->bluestore_zero_block_detection) {
GTEST_SKIP() << "not bluestore or bluestore_zero_block_detection=false, skipping";
}
size_t block_size = 4096;
StartDeferred(block_size);
int r;
coll_t cid;
ghobject_t hoid(hobject_t("test", "", CEPH_NOSNAP, 0, -1, ""));
const PerfCounters* logger = store->get_perf_counters();
auto ch = store->create_new_collection(cid);
{
ObjectStore::Transaction t;
t.create_collection(cid, 0);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
{
// [1] append zeros
ObjectStore::Transaction t;
bufferlist bl;
bl.append_zero(block_size * 2);
t.write(cid, hoid, 0, bl.length(), bl);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
ASSERT_EQ(logger->get(l_bluestore_write_big), 1u);
ASSERT_EQ(logger->get(l_bluestore_write_big_bytes), 4096u*2);
ASSERT_EQ(logger->get(l_bluestore_write_big_blobs), 0u);
ASSERT_EQ(logger->get(l_bluestore_write_big_skipped_blobs), 1u);
ASSERT_EQ(logger->get(l_bluestore_write_big_skipped_bytes), 4096u*2);
bufferlist in;
r = store->read(ch, hoid, 0, block_size * 8, in);
ASSERT_EQ(block_size * 2, r);
ASSERT_TRUE(in.is_zero());
}
{
// [2] append non-zeros
ObjectStore::Transaction t;
bufferlist bl;
bl.append(std::string(block_size * 2, 'c'));
t.write(cid, hoid, block_size * 2, bl.length(), bl);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
ASSERT_EQ(logger->get(l_bluestore_write_big), 2u);
ASSERT_EQ(logger->get(l_bluestore_write_big_bytes), 4096u*4);
ASSERT_EQ(logger->get(l_bluestore_write_big_blobs), 1u);
ASSERT_EQ(logger->get(l_bluestore_write_big_skipped_blobs), 1u);
ASSERT_EQ(logger->get(l_bluestore_write_big_skipped_bytes), 4096u*2);
bufferlist in, _exp;
r = store->read(ch, hoid, 0, block_size * 8, in);
ASSERT_EQ(block_size * 4, r);
_exp.append_zero(block_size * 2);
_exp.append(bl);
ASSERT_TRUE(bl_eq(_exp, in));
}
{
ObjectStore::Transaction t;
t.remove(cid, hoid);
t.remove_collection(cid);
cerr << "Cleaning" << std::endl;
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
}
TEST_P(StoreTestSpecificAUSize, ZeroBlockDetectionBigOverwrite) {
CephContext *cct = (new CephContext(CEPH_ENTITY_TYPE_CLIENT))->get();
if (string(GetParam()) != "bluestore" || !cct->_conf->bluestore_zero_block_detection) {
GTEST_SKIP() << "not bluestore or bluestore_zero_block_detection=false, skipping";
}
if (smr) {
GTEST_SKIP() << "smr, skipping";
}
size_t block_size = 4096;
StartDeferred(block_size);
int r;
coll_t cid;
ghobject_t hoid(hobject_t("test", "", CEPH_NOSNAP, 0, -1, ""));
const PerfCounters* logger = store->get_perf_counters();
auto ch = store->create_new_collection(cid);
{
ObjectStore::Transaction t;
t.create_collection(cid, 0);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
{
// {setting up the scenario} append non-zeros
ObjectStore::Transaction t;
bufferlist bl;
bl.append(std::string(block_size * 2, 'c'));
t.write(cid, hoid, 0, bl.length(), bl);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
ASSERT_EQ(logger->get(l_bluestore_write_big), 1u);
ASSERT_EQ(logger->get(l_bluestore_write_big_bytes), 4096u*2);
ASSERT_EQ(logger->get(l_bluestore_write_big_blobs), 1u);
ASSERT_EQ(logger->get(l_bluestore_write_big_skipped_blobs), 0u);
ASSERT_EQ(logger->get(l_bluestore_write_big_skipped_bytes), 0u);
bufferlist in, _exp;
r = store->read(ch, hoid, 0, block_size * 8, in);
ASSERT_EQ(block_size * 2, r);
_exp.append(bl);
ASSERT_TRUE(bl_eq(_exp, in));
}
{
// [1] overwrite non-zeros with zeros
ObjectStore::Transaction t;
bufferlist bl;
bl.append_zero(block_size * 2);
t.write(cid, hoid, 0, bl.length(), bl);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
ASSERT_EQ(logger->get(l_bluestore_write_big), 2u);
ASSERT_EQ(logger->get(l_bluestore_write_big_bytes), 4096u*4);
ASSERT_EQ(logger->get(l_bluestore_write_big_blobs), 1u);
ASSERT_EQ(logger->get(l_bluestore_write_big_skipped_blobs), 1u);
ASSERT_EQ(logger->get(l_bluestore_write_big_skipped_bytes), 4096u*2);
bufferlist in;
r = store->read(ch, hoid, 0, block_size * 8, in);
ASSERT_EQ(block_size * 2, r);
ASSERT_TRUE(in.is_zero());
}
{
// [2] overwrite zeros with non-zeros
ObjectStore::Transaction t;
bufferlist bl;
bl.append(std::string(block_size * 2, 'c'));
t.write(cid, hoid, 0, bl.length(), bl);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
ASSERT_EQ(logger->get(l_bluestore_write_big), 3u);
ASSERT_EQ(logger->get(l_bluestore_write_big_bytes), 4096u*6);
ASSERT_EQ(logger->get(l_bluestore_write_big_blobs), 2u);
ASSERT_EQ(logger->get(l_bluestore_write_big_skipped_blobs), 1u);
ASSERT_EQ(logger->get(l_bluestore_write_big_skipped_bytes), 4096u*2);
bufferlist in, _exp;
r = store->read(ch, hoid, 0, block_size * 8, in);
ASSERT_EQ(block_size * 2, r);
_exp.append(bl);
ASSERT_TRUE(bl_eq(_exp, in));
}
{
ObjectStore::Transaction t;
t.remove(cid, hoid);
t.remove_collection(cid);
cerr << "Cleaning" << std::endl;
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
}
TEST_P(StoreTestSpecificAUSize, DeferredOnBigOverwrite) {
if (string(GetParam()) != "bluestore")
return;
if (smr) {
cout << "SKIP: no deferred" << std::endl;
return;
}
size_t block_size = 4096;
StartDeferred(block_size);
SetVal(g_conf(), "bluestore_max_blob_size", "131072");
SetVal(g_conf(), "bluestore_prefer_deferred_size", "65536");
g_conf().apply_changes(nullptr);
int r;
coll_t cid;
ghobject_t hoid(hobject_t("test", "", CEPH_NOSNAP, 0, -1, ""));
ghobject_t hoid2(hobject_t("test2", "", CEPH_NOSNAP, 0, -1, ""));
PerfCounters* logger = const_cast<PerfCounters*>(store->get_perf_counters());
auto ch = store->create_new_collection(cid);
{
ObjectStore::Transaction t;
t.create_collection(cid, 0);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
{
ObjectStore::Transaction t;
bufferlist bl, bl2;
bl.append(std::string(block_size * 2, 'c'));
bl2.append(std::string(block_size * 3, 'd'));
t.write(cid, hoid, 0, bl.length(), bl, CEPH_OSD_OP_FLAG_FADVISE_NOCACHE);
t.set_alloc_hint(cid, hoid2, block_size * 4, block_size * 4,
CEPH_OSD_ALLOC_HINT_FLAG_SEQUENTIAL_READ);
t.write(cid, hoid2, 0, bl2.length(), bl2, CEPH_OSD_OP_FLAG_FADVISE_NOCACHE);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
ASSERT_EQ(logger->get(l_bluestore_write_big), 2u);
ASSERT_EQ(logger->get(l_bluestore_write_big_deferred), 0u);
{
struct store_statfs_t statfs;
int r = store->statfs(&statfs);
ASSERT_EQ(r, 0);
ASSERT_EQ(statfs.data_stored, (unsigned)block_size * 5);
ASSERT_LE(statfs.allocated, (unsigned)block_size * 5);
}
// overwrite at the beginning, 4K alignment
{
ObjectStore::Transaction t;
bufferlist bl;
bl.append(std::string(block_size, 'b'));
t.write(cid, hoid, 0, bl.length(), bl, CEPH_OSD_OP_FLAG_FADVISE_NOCACHE);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
ASSERT_EQ(logger->get(l_bluestore_write_big), 3u);
ASSERT_EQ(logger->get(l_bluestore_write_big_deferred), 1u);
{
bufferlist bl, expected;
r = store->read(ch, hoid, 0, block_size, bl);
ASSERT_EQ(r, (int)block_size);
expected.append(string(block_size, 'b'));
ASSERT_TRUE(bl_eq(expected, bl));
}
{
bufferlist bl, expected;
r = store->read(ch, hoid, block_size, block_size, bl);
ASSERT_EQ(r, (int)block_size);
expected.append(string(block_size, 'c'));
ASSERT_TRUE(bl_eq(expected, bl));
}
// overwrite at the end, 4K alignment
{
ObjectStore::Transaction t;
bufferlist bl;
bl.append(std::string(block_size, 'g'));
t.write(cid, hoid, block_size, bl.length(), bl, CEPH_OSD_OP_FLAG_FADVISE_NOCACHE);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
ASSERT_EQ(logger->get(l_bluestore_write_big), 4u);
ASSERT_EQ(logger->get(l_bluestore_write_big_deferred), 2u);
{
bufferlist bl, expected;
r = store->read(ch, hoid, 0, block_size, bl);
ASSERT_EQ(r, (int)block_size);
expected.append(string(block_size, 'b'));
ASSERT_TRUE(bl_eq(expected, bl));
}
{
bufferlist bl, expected;
r = store->read(ch, hoid, block_size, block_size, bl);
ASSERT_EQ(r, (int)block_size);
expected.append(string(block_size, 'g'));
ASSERT_TRUE(bl_eq(expected, bl));
}
// overwrite at 4K, 12K alignment
{
ObjectStore::Transaction t;
bufferlist bl;
bl.append(std::string(block_size, 'e'));
t.write(cid, hoid2, block_size , bl.length(), bl, CEPH_OSD_OP_FLAG_FADVISE_NOCACHE);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
ASSERT_EQ(logger->get(l_bluestore_write_big), 5u);
ASSERT_EQ(logger->get(l_bluestore_write_big_deferred), 3u);
// makes sure deferred has been submitted
// and do all the checks again
sleep(g_conf().get_val<double>("bluestore_max_defer_interval") + 2);
ASSERT_EQ(logger->get(l_bluestore_write_big), 5u);
ASSERT_EQ(logger->get(l_bluestore_write_big_deferred), 3u);
{
bufferlist bl, expected;
r = store->read(ch, hoid, 0, block_size, bl);
ASSERT_EQ(r, (int)block_size);
expected.append(string(block_size, 'b'));
ASSERT_TRUE(bl_eq(expected, bl));
}
{
bufferlist bl, expected;
r = store->read(ch, hoid, block_size, block_size, bl);
ASSERT_EQ(r, (int)block_size);
expected.append(string(block_size, 'g'));
ASSERT_TRUE(bl_eq(expected, bl));
}
{
bufferlist bl, expected;
r = store->read(ch, hoid2, 0, block_size, bl);
ASSERT_EQ(r, (int)block_size);
expected.append(string(block_size, 'd'));
ASSERT_TRUE(bl_eq(expected, bl));
}
{
bufferlist bl, expected;
r = store->read(ch, hoid2, block_size, block_size, bl);
ASSERT_EQ(r, (int)block_size);
expected.append(string(block_size, 'e'));
ASSERT_TRUE(bl_eq(expected, bl));
}
{
bufferlist bl, expected;
r = store->read(ch, hoid2, block_size * 2, block_size, bl);
ASSERT_EQ(r, (int)block_size);
expected.append(string(block_size, 'd'));
ASSERT_TRUE(bl_eq(expected, bl));
}
{
struct store_statfs_t statfs;
int r = store->statfs(&statfs);
ASSERT_EQ(r, 0);
ASSERT_EQ(statfs.data_stored, (unsigned)block_size * 5);
ASSERT_LE(statfs.allocated, (unsigned)block_size * 5);
}
ASSERT_EQ(logger->get(l_bluestore_blobs), 2u);
ASSERT_EQ(logger->get(l_bluestore_extents), 2u);
{
ObjectStore::Transaction t;
t.remove(cid, hoid);
t.remove(cid, hoid2);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
{
ObjectStore::Transaction t;
bufferlist bl;
bl.append(std::string(block_size * 2, 'f'));
t.write(cid, hoid, 0, bl.length(), bl, CEPH_OSD_OP_FLAG_FADVISE_NOCACHE);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
ASSERT_EQ(logger->get(l_bluestore_write_big), 6u);
ASSERT_EQ(logger->get(l_bluestore_write_big_deferred), 3u);
{
ObjectStore::Transaction t;
t.zero(cid, hoid, 0, 100);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
{
bufferlist bl, expected;
r = store->read(ch, hoid, 0, 100, bl);
ASSERT_EQ(r, (int)100);
expected.append(string(100, 0));
ASSERT_TRUE(bl_eq(expected, bl));
}
{
bufferlist bl, expected;
r = store->read(ch, hoid, 100, block_size * 2 - 100, bl);
ASSERT_EQ(r, (int)block_size * 2 - 100);
expected.append(string(block_size * 2 - 100, 'f'));
ASSERT_TRUE(bl_eq(expected, bl));
}
sleep(2);
{
struct store_statfs_t statfs;
int r = store->statfs(&statfs);
ASSERT_EQ(r, 0);
ASSERT_EQ(statfs.data_stored, (unsigned)block_size * 2 - 100);
ASSERT_LE(statfs.allocated, (unsigned)block_size * 2);
}
ASSERT_EQ(logger->get(l_bluestore_blobs), 1u);
ASSERT_EQ(logger->get(l_bluestore_extents), 1u);
{
ObjectStore::Transaction t;
bufferlist bl;
bl.append(std::string(block_size, 'g'));
t.write(cid, hoid, 0, bl.length(), bl, CEPH_OSD_OP_FLAG_FADVISE_NOCACHE);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
ASSERT_EQ(logger->get(l_bluestore_write_big), 7u);
ASSERT_EQ(logger->get(l_bluestore_write_big_deferred), 4u);
{
bufferlist bl, expected;
r = store->read(ch, hoid, 0, block_size, bl);
ASSERT_EQ(r, (int)block_size);
expected.append(string(block_size, 'g'));
ASSERT_TRUE(bl_eq(expected, bl));
}
{
bufferlist bl, expected;
r = store->read(ch, hoid, block_size, block_size, bl);
ASSERT_EQ(r, (int)block_size);
expected.append(string(block_size, 'f'));
ASSERT_TRUE(bl_eq(expected, bl));
}
{
struct store_statfs_t statfs;
int r = store->statfs(&statfs);
ASSERT_EQ(r, 0);
ASSERT_EQ(statfs.data_stored, (unsigned)block_size * 2);
ASSERT_LE(statfs.allocated, (unsigned)block_size * 2);
}
ASSERT_EQ(logger->get(l_bluestore_blobs), 1u);
ASSERT_EQ(logger->get(l_bluestore_extents), 1u);
// check whether full overwrite bypass deferred
{
ObjectStore::Transaction t;
bufferlist bl;
bl.append(std::string(block_size * 2, 'h'));
t.write(cid, hoid, 0, bl.length(), bl, CEPH_OSD_OP_FLAG_FADVISE_NOCACHE);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
ASSERT_EQ(logger->get(l_bluestore_write_big), 8u);
ASSERT_EQ(logger->get(l_bluestore_write_big_deferred), 4u);
{
bufferlist bl, expected;
r = store->read(ch, hoid, 0, block_size * 2, bl);
ASSERT_EQ(r, (int)block_size * 2);
expected.append(string(block_size * 2, 'h'));
ASSERT_TRUE(bl_eq(expected, bl));
}
{
struct store_statfs_t statfs;
int r = store->statfs(&statfs);
ASSERT_EQ(r, 0);
ASSERT_EQ(statfs.data_stored, (unsigned)block_size * 2);
ASSERT_LE(statfs.allocated, (unsigned)block_size * 2);
}
{
ObjectStore::Transaction t;
t.remove(cid, hoid);
t.remove(cid, hoid2);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
{
ObjectStore::Transaction t;
bufferlist bl;
bl.append(std::string(block_size * 32, 'a'));
// this will create two 128K aligned blobs
t.write(cid, hoid, 0, bl.length(), bl, CEPH_OSD_OP_FLAG_FADVISE_NOCACHE);
t.write(cid, hoid, bl.length(), bl.length(), bl, CEPH_OSD_OP_FLAG_FADVISE_NOCACHE);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
ASSERT_EQ(logger->get(l_bluestore_write_big), 10u);
ASSERT_EQ(logger->get(l_bluestore_write_big_deferred), 4u);
// check whether overwrite (less than prefer_deferred_size) partially overlapping two adjacent blobs goes
// deferred
{
ObjectStore::Transaction t;
bufferlist bl;
bl.append(std::string(block_size * 3, 'b'));
t.write(cid, hoid, 0x20000 - block_size, bl.length(), bl, CEPH_OSD_OP_FLAG_FADVISE_NOCACHE);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
ASSERT_EQ(logger->get(l_bluestore_write_big), 11u);
ASSERT_EQ(logger->get(l_bluestore_write_big_deferred), 6u);
{
bufferlist bl, expected;
r = store->read(ch, hoid, 0, 0x20000 - block_size, bl);
ASSERT_EQ(r, 0x20000 - block_size);
expected.append(string(r, 'a'));
ASSERT_TRUE(bl_eq(expected, bl));
expected.clear();
r = store->read(ch, hoid, 0x20000 - block_size, block_size * 3, bl);
ASSERT_EQ(r, 3 * block_size);
expected.append(string(r, 'b'));
ASSERT_TRUE(bl_eq(expected, bl));
expected.clear();
r = store->read(ch, hoid, 0x20000 + 2 * block_size, block_size * 30, bl);
ASSERT_EQ(r, 30 * block_size);
expected.append(string(r, 'a'));
ASSERT_TRUE(bl_eq(expected, bl));
expected.clear();
}
{
struct store_statfs_t statfs;
int r = store->statfs(&statfs);
ASSERT_EQ(r, 0);
ASSERT_EQ(statfs.data_stored, (unsigned)block_size * 64);
ASSERT_LE(statfs.allocated, (unsigned)block_size * 64);
}
// check whether overwrite (larger than prefer_deferred_size) partially
// overlapping two adjacent blobs goes deferred
{
ObjectStore::Transaction t;
bufferlist bl;
bl.append(std::string(block_size * 30, 'c'));
t.write(cid, hoid, 0x10000 + block_size, bl.length(), bl, CEPH_OSD_OP_FLAG_FADVISE_NOCACHE);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
sleep(2);
ASSERT_EQ(logger->get(l_bluestore_write_big), 12u);
ASSERT_EQ(logger->get(l_bluestore_write_big_deferred), 8u);
{
bufferlist bl, expected;
r = store->read(ch, hoid, 0, 0x11000, bl);
ASSERT_EQ(r, 0x11000);
expected.append(string(r, 'a'));
ASSERT_TRUE(bl_eq(expected, bl));
expected.clear();
r = store->read(ch, hoid, 0x11000, block_size * 30, bl);
ASSERT_EQ(r, block_size * 30);
expected.append(string(r, 'c'));
ASSERT_TRUE(bl_eq(expected, bl));
expected.clear();
r = store->read(ch, hoid, block_size * 47, 0x10000 + block_size, bl);
ASSERT_EQ(r, 0x10000 + block_size);
expected.append(string(r, 'a'));
ASSERT_TRUE(bl_eq(expected, bl));
expected.clear();
}
{
struct store_statfs_t statfs;
int r = store->statfs(&statfs);
ASSERT_EQ(r, 0);
ASSERT_EQ(statfs.data_stored, (unsigned)block_size * 64);
ASSERT_LE(statfs.allocated, (unsigned)block_size * 64);
}
logger->reset();
// check whether overwrite (prefer_deferred_size < 120K < 2 * prefer_defer_size) partially
// overlapping two adjacent blobs goes partly deferred
{
ObjectStore::Transaction t;
bufferlist bl;
bl.append(std::string(block_size * 30, 'e'));
t.write(cid, hoid, 0x20000 - block_size, bl.length(), bl, CEPH_OSD_OP_FLAG_FADVISE_NOCACHE);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
sleep(2);
ASSERT_EQ(logger->get(l_bluestore_write_big), 1u);
ASSERT_EQ(logger->get(l_bluestore_write_big_deferred), 1u);
ASSERT_EQ(logger->get(l_bluestore_issued_deferred_writes), 1u);
ASSERT_EQ(logger->get(l_bluestore_issued_deferred_write_bytes), block_size);
{
struct store_statfs_t statfs;
int r = store->statfs(&statfs);
ASSERT_EQ(r, 0);
ASSERT_EQ(statfs.data_stored, (unsigned)block_size * 64);
ASSERT_LE(statfs.allocated, (unsigned)block_size * 64);
}
{
ObjectStore::Transaction t;
t.remove(cid, hoid);
t.remove(cid, hoid2);
t.remove_collection(cid);
cerr << "Cleaning" << std::endl;
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
}
TEST_P(StoreTestSpecificAUSize, DeferredOnBigOverwrite2) {
if (string(GetParam()) != "bluestore")
return;
if (smr) {
cout << "SKIP: no deferred" << std::endl;
return;
}
size_t block_size = 4096;
StartDeferred(block_size);
SetVal(g_conf(), "bluestore_max_blob_size", "65536");
SetVal(g_conf(), "bluestore_prefer_deferred_size", "65536");
g_conf().apply_changes(nullptr);
int r;
coll_t cid;
ghobject_t hoid(hobject_t("test", "", CEPH_NOSNAP, 0, -1, ""));
PerfCounters* logger = const_cast<PerfCounters*>(store->get_perf_counters());
auto ch = store->create_new_collection(cid);
{
ObjectStore::Transaction t;
t.create_collection(cid, 0);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
{
ObjectStore::Transaction t;
bufferlist bl;
bl.append(std::string(128 * 1024, 'c'));
t.write(cid, hoid, 0x1000, bl.length(), bl, CEPH_OSD_OP_FLAG_FADVISE_NOCACHE);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
ASSERT_EQ(logger->get(l_bluestore_write_big), 1u);
ASSERT_EQ(logger->get(l_bluestore_write_big_bytes), bl.length());
ASSERT_EQ(logger->get(l_bluestore_write_big_blobs), 3u);
ASSERT_EQ(logger->get(l_bluestore_write_big_deferred), 0u);
ASSERT_EQ(logger->get(l_bluestore_issued_deferred_writes), 0u);
ASSERT_EQ(logger->get(l_bluestore_issued_deferred_write_bytes), 0);
}
logger->reset();
{
ObjectStore::Transaction t;
bufferlist bl;
bl.append(std::string(128 * 1024, 'c'));
t.write(cid, hoid, 0x2000, bl.length(), bl, CEPH_OSD_OP_FLAG_FADVISE_NOCACHE);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
ASSERT_EQ(logger->get(l_bluestore_write_big), 1u);
ASSERT_EQ(logger->get(l_bluestore_write_big_bytes), bl.length());
ASSERT_EQ(logger->get(l_bluestore_write_big_blobs), 3u);
ASSERT_EQ(logger->get(l_bluestore_write_big_deferred), 1u);
ASSERT_EQ(logger->get(l_bluestore_issued_deferred_writes), 1u);
ASSERT_EQ(logger->get(l_bluestore_issued_deferred_write_bytes), 57344);
}
{
ObjectStore::Transaction t;
t.remove(cid, hoid);
t.remove(cid, hoid);
t.remove_collection(cid);
cerr << "Cleaning" << std::endl;
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
}
TEST_P(StoreTestSpecificAUSize, DeferredOnBigOverwrite3) {
if (string(GetParam()) != "bluestore")
return;
if (smr) {
cout << "SKIP: no deferred" << std::endl;
return;
}
size_t block_size = 4096;
StartDeferred(block_size);
SetVal(g_conf(), "bluestore_max_blob_size", "65536");
SetVal(g_conf(), "bluestore_prefer_deferred_size", "65536");
g_conf().apply_changes(nullptr);
int r;
coll_t cid;
ghobject_t hoid(hobject_t("test", "", CEPH_NOSNAP, 0, -1, ""));
PerfCounters* logger = const_cast<PerfCounters*>(store->get_perf_counters());
auto ch = store->create_new_collection(cid);
{
ObjectStore::Transaction t;
t.create_collection(cid, 0);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
logger->reset();
{
ObjectStore::Transaction t;
bufferlist bl;
bl.append(std::string(4096 * 1024, 'c'));
t.write(cid, hoid, 0, bl.length(), bl, CEPH_OSD_OP_FLAG_FADVISE_NOCACHE);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
ASSERT_EQ(logger->get(l_bluestore_write_big), 1u);
ASSERT_EQ(logger->get(l_bluestore_write_big_bytes), bl.length());
ASSERT_EQ(logger->get(l_bluestore_write_big_blobs), 64u);
ASSERT_EQ(logger->get(l_bluestore_write_big_deferred), 0u);
ASSERT_EQ(logger->get(l_bluestore_issued_deferred_writes), 0u);
ASSERT_EQ(logger->get(l_bluestore_issued_deferred_write_bytes), 0u);
}
logger->reset();
{
ObjectStore::Transaction t;
bufferlist bl;
bl.append(std::string(4096 * 1024, 'c'));
t.write(cid, hoid, 0x1000, bl.length(), bl, CEPH_OSD_OP_FLAG_FADVISE_NOCACHE);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
ASSERT_EQ(logger->get(l_bluestore_write_big), 1u);
ASSERT_EQ(logger->get(l_bluestore_write_big_bytes), bl.length());
ASSERT_EQ(logger->get(l_bluestore_write_big_blobs), 65u);
ASSERT_EQ(logger->get(l_bluestore_write_big_deferred), 1u);
ASSERT_EQ(logger->get(l_bluestore_issued_deferred_writes), 1u);
ASSERT_EQ(logger->get(l_bluestore_issued_deferred_write_bytes), 61440);
}
{
ObjectStore::Transaction t;
t.remove(cid, hoid);
t.remove_collection(cid);
cerr << "Cleaning" << std::endl;
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
}
TEST_P(StoreTestSpecificAUSize, DeferredDifferentChunks) {
if (string(GetParam()) != "bluestore")
return;
if (smr) {
cout << "SKIP: no deferred" << std::endl;
return;
}
size_t alloc_size = 4096;
size_t large_object_size = 1 * 1024 * 1024;
size_t prefer_deferred_size = 65536;
StartDeferred(alloc_size);
SetVal(g_conf(), "bluestore_max_blob_size", "131072");
SetVal(g_conf(), "bluestore_prefer_deferred_size",
stringify(prefer_deferred_size).c_str());
g_conf().apply_changes(nullptr);
int r;
coll_t cid;
const PerfCounters* logger = store->get_perf_counters();
size_t exp_bluestore_write_big = 0;
size_t exp_bluestore_write_big_deferred = 0;
ObjectStore::CollectionHandle ch = store->create_new_collection(cid);
{
ObjectStore::Transaction t;
t.create_collection(cid, 0);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
for (size_t expected_write_size = 1024; expected_write_size <= prefer_deferred_size; expected_write_size *= 2) {
//create object with hint
ghobject_t hoid(hobject_t("test-"+to_string(expected_write_size), "", CEPH_NOSNAP, 0, -1, ""));
{
ObjectStore::Transaction t;
t.touch(cid, hoid);
t.set_alloc_hint(cid, hoid, large_object_size, expected_write_size,
CEPH_OSD_ALLOC_HINT_FLAG_SEQUENTIAL_READ |
CEPH_OSD_ALLOC_HINT_FLAG_APPEND_ONLY);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
//fill object
{
ObjectStore::Transaction t;
bufferlist bl;
bl.append(std::string(large_object_size, 'h'));
t.write(cid, hoid, 0, bl.length(), bl,
CEPH_OSD_OP_FLAG_FADVISE_NOCACHE);
r = queue_transaction(store, ch, std::move(t));
++exp_bluestore_write_big;
ASSERT_EQ(r, 0);
}
ASSERT_EQ(logger->get(l_bluestore_write_big), exp_bluestore_write_big);
ASSERT_EQ(logger->get(l_bluestore_write_big_deferred), exp_bluestore_write_big_deferred);
// check whether write will properly use deferred
{
ObjectStore::Transaction t;
bufferlist bl;
bl.append(std::string(alloc_size + 2, 'z'));
t.write(cid, hoid, large_object_size - 2 * alloc_size - 1, bl.length(), bl,
CEPH_OSD_OP_FLAG_FADVISE_NOCACHE);
r = queue_transaction(store, ch, std::move(t));
++exp_bluestore_write_big;
if (expected_write_size < prefer_deferred_size)
++exp_bluestore_write_big_deferred;
ASSERT_EQ(r, 0);
}
ASSERT_EQ(logger->get(l_bluestore_write_big), exp_bluestore_write_big);
ASSERT_EQ(logger->get(l_bluestore_write_big_deferred), exp_bluestore_write_big_deferred);
}
ch.reset(nullptr);
CloseAndReopen();
ch = store->open_collection(cid);
// check values
for (size_t expected_write_size = 1024; expected_write_size <= 65536; expected_write_size *= 2) {
ghobject_t hoid(hobject_t("test-"+to_string(expected_write_size), "", CEPH_NOSNAP, 0, -1, ""));
{
bufferlist bl, expected;
r = store->read(ch, hoid, 0, large_object_size, bl);
ASSERT_EQ(r, large_object_size);
expected.append(string(large_object_size - 2 * alloc_size - 1, 'h'));
expected.append(string(alloc_size + 2, 'z'));
expected.append(string(alloc_size - 1, 'h'));
ASSERT_TRUE(bl_eq(expected, bl));
}
}
{
ObjectStore::Transaction t;
for (size_t expected_write_size = 1024; expected_write_size <= 65536; expected_write_size *= 2) {
ghobject_t hoid(hobject_t("test-"+to_string(expected_write_size), "", CEPH_NOSNAP, 0, -1, ""));
t.remove(cid, hoid);
}
t.remove_collection(cid);
cerr << "Cleaning" << std::endl;
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
}
TEST_P(StoreTestSpecificAUSize, BlobReuseOnOverwriteReverse) {
if (string(GetParam()) != "bluestore")
return;
if (smr) {
cout << "SKIP: no overwrite" << std::endl;
return;
}
size_t block_size = 4096;
StartDeferred(block_size);
SetVal(g_conf(), "bluestore_max_blob_size", "65536");
g_conf().apply_changes(nullptr);
int r;
coll_t cid;
ghobject_t hoid(hobject_t("test_hint", "", CEPH_NOSNAP, 0, -1, ""));
auto ch = store->create_new_collection(cid);
const PerfCounters* logger = store->get_perf_counters();
{
ObjectStore::Transaction t;
t.create_collection(cid, 0);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
{
ObjectStore::Transaction t;
bufferlist bl;
bl.append(std::string(block_size * 2, 'a'));
t.write(cid, hoid, block_size * 10, bl.length(), bl,
CEPH_OSD_OP_FLAG_FADVISE_WILLNEED);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
{
// prepend existing
ObjectStore::Transaction t;
bufferlist bl;
bl.append(std::string(block_size, 'b'));
t.write(cid, hoid, block_size * 9, bl.length(), bl,
CEPH_OSD_OP_FLAG_FADVISE_WILLNEED);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
{
// We need to issue a read to trigger cache stat update that refresh
// perf counters. additionally we need to wait some time for mempool
// thread to update stats.
sleep(1);
bufferlist bl, expected;
r = store->read(ch, hoid, block_size * 9, block_size * 2, bl);
ASSERT_EQ(r, (int)block_size * 2);
expected.append(string(block_size, 'b'));
expected.append(string(block_size, 'a'));
ASSERT_TRUE(bl_eq(expected, bl));
ASSERT_EQ(logger->get(l_bluestore_blobs), 1u);
ASSERT_EQ(logger->get(l_bluestore_extents), 1u);
}
{
// prepend existing with a gap
ObjectStore::Transaction t;
bufferlist bl;
bl.append(std::string(block_size, 'c'));
t.write(cid, hoid, block_size * 7, bl.length(), bl,
CEPH_OSD_OP_FLAG_FADVISE_WILLNEED);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
{
// We need to issue a read to trigger cache stat update that refresh
// perf counters. additionally we need to wait some time for mempool
// thread to update stats.
sleep(1);
bufferlist bl, expected;
r = store->read(ch, hoid, block_size * 7, block_size * 3, bl);
ASSERT_EQ(r, (int)block_size * 3);
expected.append(string(block_size, 'c'));
expected.append(string(block_size, 0));
expected.append(string(block_size, 'b'));
ASSERT_TRUE(bl_eq(expected, bl));
ASSERT_EQ(logger->get(l_bluestore_blobs), 1u);
ASSERT_EQ(logger->get(l_bluestore_extents), 2u);
}
{
// append after existing with a gap
ObjectStore::Transaction t;
bufferlist bl;
bl.append(std::string(block_size, 'd'));
t.write(cid, hoid, block_size * 13, bl.length(), bl,
CEPH_OSD_OP_FLAG_FADVISE_WILLNEED);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
{
// We need to issue a read to trigger cache stat update that refresh
// perf counters. additionally we need to wait some time for mempool
// thread to update stats.
sleep(1);
bufferlist bl, expected;
r = store->read(ch, hoid, block_size * 11, block_size * 3, bl);
ASSERT_EQ(r, (int)block_size * 3);
expected.append(string(block_size, 'a'));
expected.append(string(block_size, 0));
expected.append(string(block_size, 'd'));
ASSERT_TRUE(bl_eq(expected, bl));
ASSERT_EQ(logger->get(l_bluestore_blobs), 1u);
ASSERT_EQ(logger->get(l_bluestore_extents), 3u);
}
{
// append twice to the next max_blob slot
ObjectStore::Transaction t;
bufferlist bl;
bl.append(std::string(block_size, 'e'));
t.write(cid, hoid, block_size * 17, bl.length(), bl,
CEPH_OSD_OP_FLAG_FADVISE_WILLNEED);
t.write(cid, hoid, block_size * 19, bl.length(), bl,
CEPH_OSD_OP_FLAG_FADVISE_WILLNEED);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
{
// We need to issue a read to trigger cache stat update that refresh
// perf counters. additionally we need to wait some time for mempool
// thread to update stats.
sleep(1);
bufferlist bl, expected;
r = store->read(ch, hoid, block_size * 17, block_size * 3, bl);
ASSERT_EQ(r, (int)block_size * 3);
expected.append(string(block_size, 'e'));
expected.append(string(block_size, 0));
expected.append(string(block_size, 'e'));
ASSERT_TRUE(bl_eq(expected, bl));
ASSERT_EQ(logger->get(l_bluestore_blobs), 2u);
ASSERT_EQ(logger->get(l_bluestore_extents), 5u);
}
{
// fill gaps at the second slot
ObjectStore::Transaction t;
bufferlist bl;
bl.append(std::string(block_size, 'f'));
t.write(cid, hoid, block_size * 16, bl.length(), bl,
CEPH_OSD_OP_FLAG_FADVISE_WILLNEED);
t.write(cid, hoid, block_size * 18, bl.length(), bl,
CEPH_OSD_OP_FLAG_FADVISE_WILLNEED);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
{
// We need to issue a read to trigger cache stat update that refresh
// perf counters. additionally we need to wait some time for mempool
// thread to update stats.
sleep(1);
bufferlist bl, expected;
r = store->read(ch, hoid, block_size * 16, block_size * 4, bl);
ASSERT_EQ(r, (int)block_size * 4);
expected.append(string(block_size, 'f'));
expected.append(string(block_size, 'e'));
expected.append(string(block_size, 'f'));
expected.append(string(block_size, 'e'));
ASSERT_TRUE(bl_eq(expected, bl));
ASSERT_EQ(logger->get(l_bluestore_blobs), 2u);
ASSERT_EQ(logger->get(l_bluestore_extents), 4u);
}
{
ObjectStore::Transaction t;
t.remove(cid, hoid);
t.remove_collection(cid);
cerr << "Cleaning" << std::endl;
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
}
TEST_P(StoreTestSpecificAUSize, BlobReuseOnSmallOverwrite) {
if (string(GetParam()) != "bluestore")
return;
if (smr) {
cout << "SKIP: no overwrite" << std::endl;
return;
}
size_t block_size = 4096;
StartDeferred(block_size);
SetVal(g_conf(), "bluestore_max_blob_size", "65536");
g_conf().apply_changes(nullptr);
int r;
coll_t cid;
ghobject_t hoid(hobject_t("test_hint", "", CEPH_NOSNAP, 0, -1, ""));
const PerfCounters* logger = store->get_perf_counters();
auto ch = store->create_new_collection(cid);
{
ObjectStore::Transaction t;
t.create_collection(cid, 0);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
{
ObjectStore::Transaction t;
bufferlist bl;
bl.append(std::string(block_size, 'a'));
t.write(cid, hoid, 0, bl.length(), bl, CEPH_OSD_OP_FLAG_FADVISE_WILLNEED);
t.write(cid, hoid, block_size * 2, bl.length(), bl,
CEPH_OSD_OP_FLAG_FADVISE_WILLNEED);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
{
// write small into the gap
ObjectStore::Transaction t;
bufferlist bl;
bl.append(std::string(3, 'b'));
t.write(cid, hoid, block_size + 1, bl.length(), bl,
CEPH_OSD_OP_FLAG_FADVISE_WILLNEED);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
{
// We need to issue a read to trigger cache stat update that refresh
// perf counters. additionally we need to wait some time for mempool
// thread to update stats.
sleep(1);
bufferlist bl, expected;
r = store->read(ch, hoid, 0, block_size * 3, bl);
ASSERT_EQ(r, (int)block_size * 3);
expected.append(string(block_size, 'a'));
expected.append(string(1, 0));
expected.append(string(3, 'b'));
expected.append(string(block_size - 4, 0));
expected.append(string(block_size, 'a'));
ASSERT_TRUE(bl_eq(expected, bl));
ASSERT_EQ(logger->get(l_bluestore_blobs), 1u);
ASSERT_EQ(logger->get(l_bluestore_extents), 3u);
}
{
ObjectStore::Transaction t;
t.remove(cid, hoid);
t.remove_collection(cid);
cerr << "Cleaning" << std::endl;
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
}
// The test case to reproduce an issue when write happens
// to a zero space between the extents sharing the same spanning blob
// with unloaded shard map.
// Second extent might be filled with zeros this way due to wrong result
// returned by has_any_extents() call in do_write_small. The latter is caused
// by incompletly loaded extent map.
TEST_P(StoreTestSpecificAUSize, SmallWriteOnShardedExtents) {
if (string(GetParam()) != "bluestore")
return;
size_t block_size = 0x10000;
StartDeferred(block_size);
SetVal(g_conf(), "bluestore_csum_type", "xxhash64");
SetVal(g_conf(), "bluestore_max_blob_size", "524288"); // for sure
g_conf().apply_changes(nullptr);
int r;
coll_t cid;
ghobject_t hoid1(hobject_t(sobject_t("Object 1", CEPH_NOSNAP)));
auto ch = store->create_new_collection(cid);
{
ObjectStore::Transaction t;
t.create_collection(cid, 0);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
{
//doing some tricks to have sharded extents/spanning objects
ObjectStore::Transaction t;
bufferlist bl, bl2;
bl.append(std::string(0x80000, 'a'));
t.write(cid, hoid1, 0, bl.length(), bl, 0);
t.zero(cid, hoid1, 0x719e0, 0x75b0 );
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
bl2.append(std::string(0x70000, 'b'));
t.write(cid, hoid1, 0, bl2.length(), bl2, 0);
t.zero(cid, hoid1, 0, 0x50000);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
ch.reset();
store->umount();
store->mount();
ch = store->open_collection(cid);
{
// do a write to zero space in between some extents sharing the same blob
ObjectStore::Transaction t;
bufferlist bl, bl2;
bl.append(std::string(0x6520, 'c'));
t.write(cid, hoid1, 0x71c00, bl.length(), bl, 0);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
{
ObjectStore::Transaction t;
bufferlist bl, expected;
r = store->read(ch, hoid1, 0x70000, 0x9c00, bl);
ASSERT_EQ(r, (int)0x9c00);
expected.append(string(0x19e0, 'a'));
expected.append(string(0x220, 0));
expected.append(string(0x6520, 'c'));
expected.append(string(0xe70, 0));
expected.append(string(0xc70, 'a'));
ASSERT_TRUE(bl_eq(expected, bl));
bl.clear();
}
{
ObjectStore::Transaction t;
t.remove(cid, hoid1);
t.remove_collection(cid);
cerr << "Cleaning" << std::endl;
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
}
TEST_P(StoreTestSpecificAUSize, ReproBug56488Test) {
if (string(GetParam()) != "bluestore")
return;
if (smr) {
cout << "SKIP: no deferred" << std::endl;
return;
}
size_t alloc_size = 65536;
size_t write_size = 4096;
SetVal(g_conf(), "bluestore_debug_enforce_settings", "hdd");
SetVal(g_conf(), "bluestore_block_db_create", "true");
SetVal(g_conf(), "bluestore_block_db_size", stringify(1 << 30).c_str());
g_conf().apply_changes(nullptr);
StartDeferred(alloc_size);
int r;
coll_t cid;
const PerfCounters* logger = store->get_perf_counters();
ObjectStore::CollectionHandle ch = store->create_new_collection(cid);
{
ObjectStore::Transaction t;
t.create_collection(cid, 0);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
{
ghobject_t hoid(hobject_t("test", "", CEPH_NOSNAP, 0, -1, ""));
{
ObjectStore::Transaction t;
t.touch(cid, hoid);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
auto issued_dw = logger->get(l_bluestore_issued_deferred_writes);
auto issued_dw_bytes = logger->get(l_bluestore_issued_deferred_write_bytes);
{
ObjectStore::Transaction t;
bufferlist bl;
bl.append(std::string(write_size, 'x'));
t.write(cid, hoid, 0, bl.length(), bl,
CEPH_OSD_OP_FLAG_FADVISE_NOCACHE);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
ASSERT_EQ(logger->get(l_bluestore_issued_deferred_writes), issued_dw + 1);
ASSERT_EQ(logger->get(l_bluestore_issued_deferred_write_bytes),
issued_dw_bytes + write_size);
}
{
ghobject_t hoid(hobject_t("test-a", "", CEPH_NOSNAP, 0, -1, ""));
{
ObjectStore::Transaction t;
t.touch(cid, hoid);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
auto issued_dw = logger->get(l_bluestore_issued_deferred_writes);
auto issued_dw_bytes = logger->get(l_bluestore_issued_deferred_write_bytes);
{
ObjectStore::Transaction t;
bufferlist bl;
bl.append(std::string(write_size * 2, 'x'));
t.write(cid, hoid, alloc_size - write_size, bl.length(), bl,
CEPH_OSD_OP_FLAG_FADVISE_NOCACHE);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
ASSERT_EQ(logger->get(l_bluestore_issued_deferred_writes), issued_dw + 2);
ASSERT_EQ(logger->get(l_bluestore_issued_deferred_write_bytes),
issued_dw_bytes + write_size * 2);
}
{
ObjectStore::Transaction t;
ghobject_t hoid(hobject_t("test", "", CEPH_NOSNAP, 0, -1, ""));
t.remove(cid, hoid);
ghobject_t hoid_a(hobject_t("test-a", "", CEPH_NOSNAP, 0, -1, ""));
t.remove(cid, hoid_a);
t.remove_collection(cid);
cerr << "Cleaning" << std::endl;
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
}
#endif //#if defined(WITH_BLUESTORE)
TEST_P(StoreTest, KVDBHistogramTest) {
if (string(GetParam()) != "bluestore")
return;
int NUM_OBJS = 200;
int r = 0;
coll_t cid;
string base("testobj.");
bufferlist a;
bufferptr ap(0x1000);
memset(ap.c_str(), 'a', 0x1000);
a.append(ap);
auto ch = store->create_new_collection(cid);
{
ObjectStore::Transaction t;
t.create_collection(cid, 0);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
for (int i = 0; i < NUM_OBJS; ++i) {
ObjectStore::Transaction t;
char buf[100];
snprintf(buf, sizeof(buf), "%d", i);
ghobject_t hoid(hobject_t(sobject_t(base + string(buf), CEPH_NOSNAP)));
t.write(cid, hoid, 0, 0x1000, a);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
std::unique_ptr<Formatter> f(Formatter::create("store_test", "json-pretty", "json-pretty"));
store->generate_db_histogram(f.get());
f->flush(cout);
cout << std::endl;
}
TEST_P(StoreTest, KVDBStatsTest) {
if (string(GetParam()) != "bluestore")
return;
SetVal(g_conf(), "rocksdb_perf", "true");
SetVal(g_conf(), "rocksdb_collect_compaction_stats", "true");
SetVal(g_conf(), "rocksdb_collect_extended_stats","true");
SetVal(g_conf(), "rocksdb_collect_memory_stats","true");
g_ceph_context->_conf.apply_changes(nullptr);
int r = store->umount();
ASSERT_EQ(r, 0);
r = store->mount(); //to force rocksdb stats
ASSERT_EQ(r, 0);
int NUM_OBJS = 200;
coll_t cid;
string base("testobj.");
bufferlist a;
bufferptr ap(0x1000);
memset(ap.c_str(), 'a', 0x1000);
a.append(ap);
auto ch = store->create_new_collection(cid);
{
ObjectStore::Transaction t;
t.create_collection(cid, 0);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
for (int i = 0; i < NUM_OBJS; ++i) {
ObjectStore::Transaction t;
char buf[100];
snprintf(buf, sizeof(buf), "%d", i);
ghobject_t hoid(hobject_t(sobject_t(base + string(buf), CEPH_NOSNAP)));
t.write(cid, hoid, 0, 0x1000, a);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
std::unique_ptr<Formatter> f(Formatter::create("store_test", "json-pretty", "json-pretty"));
store->get_db_statistics(f.get());
f->flush(cout);
cout << std::endl;
}
#if defined(WITH_BLUESTORE)
TEST_P(StoreTestSpecificAUSize, garbageCollection) {
int r;
coll_t cid;
int buf_len = 256 * 1024;
int overlap_offset = 64 * 1024;
int write_offset = buf_len;
if (string(GetParam()) != "bluestore")
return;
if (smr) {
cout << "SKIP: assertions about allocations need to be adjusted" << std::endl;
return;
}
#define WRITE_AT(offset, _length) {\
ObjectStore::Transaction t;\
if ((uint64_t)_length != bl.length()) { \
buffer::ptr p(bl.c_str(), _length);\
bufferlist bl_tmp;\
bl_tmp.push_back(p);\
t.write(cid, hoid, offset, bl_tmp.length(), bl_tmp);\
} else {\
t.write(cid, hoid, offset, bl.length(), bl);\
}\
r = queue_transaction(store, ch, std::move(t));\
ASSERT_EQ(r, 0);\
}
StartDeferred(65536);
SetVal(g_conf(), "bluestore_compression_max_blob_size", "524288");
SetVal(g_conf(), "bluestore_compression_min_blob_size", "262144");
SetVal(g_conf(), "bluestore_max_blob_size", "524288");
SetVal(g_conf(), "bluestore_compression_mode", "force");
g_conf().apply_changes(nullptr);
auto ch = store->create_new_collection(cid);
ghobject_t hoid(hobject_t(sobject_t("Object 1", CEPH_NOSNAP)));
{
bufferlist in;
r = store->read(ch, hoid, 0, 5, in);
ASSERT_EQ(-ENOENT, r);
}
{
ObjectStore::Transaction t;
t.create_collection(cid, 0);
cerr << "Creating collection " << cid << std::endl;
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
std::string data;
data.resize(buf_len);
{
{
bool exists = store->exists(ch, hoid);
ASSERT_TRUE(!exists);
ObjectStore::Transaction t;
t.touch(cid, hoid);
cerr << "Creating object " << hoid << std::endl;
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
exists = store->exists(ch, hoid);
ASSERT_EQ(true, exists);
}
bufferlist bl;
for(size_t i = 0; i < data.size(); i++)
data[i] = i % 256;
bl.append(data);
{
struct store_statfs_t statfs;
WRITE_AT(0, buf_len);
int r = store->statfs(&statfs);
ASSERT_EQ(r, 0);
ASSERT_EQ(statfs.data_compressed_allocated, 0x10000);
}
{
struct store_statfs_t statfs;
WRITE_AT(write_offset - 2 * overlap_offset, buf_len);
int r = store->statfs(&statfs);
ASSERT_EQ(r, 0);
ASSERT_EQ(statfs.data_compressed_allocated, 0x20000);
const PerfCounters* counters = store->get_perf_counters();
ASSERT_EQ(counters->get(l_bluestore_gc_merged), 0u);
}
{
struct store_statfs_t statfs;
WRITE_AT(write_offset - overlap_offset, buf_len);
int r = store->statfs(&statfs);
ASSERT_EQ(r, 0);
ASSERT_EQ(statfs.data_compressed_allocated, 0x20000);
const PerfCounters* counters = store->get_perf_counters();
ASSERT_EQ(counters->get(l_bluestore_gc_merged), 0x10000u);
}
{
struct store_statfs_t statfs;
WRITE_AT(write_offset - 3 * overlap_offset, buf_len);
int r = store->statfs(&statfs);
ASSERT_EQ(r, 0);
ASSERT_EQ(statfs.data_compressed_allocated, 0x20000);
const PerfCounters* counters = store->get_perf_counters();
ASSERT_EQ(counters->get(l_bluestore_gc_merged), 0x20000u);
}
{
struct store_statfs_t statfs;
WRITE_AT(write_offset + 1, overlap_offset-1);
int r = store->statfs(&statfs);
ASSERT_EQ(r, 0);
ASSERT_EQ(statfs.data_compressed_allocated, 0x20000);
const PerfCounters* counters = store->get_perf_counters();
ASSERT_EQ(counters->get(l_bluestore_gc_merged), 0x20000u);
}
{
struct store_statfs_t statfs;
WRITE_AT(write_offset + 1, overlap_offset);
int r = store->statfs(&statfs);
ASSERT_EQ(r, 0);
ASSERT_EQ(statfs.data_compressed_allocated, 0x10000);
const PerfCounters* counters = store->get_perf_counters();
ASSERT_EQ(counters->get(l_bluestore_gc_merged), 0x3ffffu);
}
{
struct store_statfs_t statfs;
WRITE_AT(0, buf_len-1);
int r = store->statfs(&statfs);
ASSERT_EQ(r, 0);
ASSERT_EQ(statfs.data_compressed_allocated, 0x10000);
const PerfCounters* counters = store->get_perf_counters();
ASSERT_EQ(counters->get(l_bluestore_gc_merged), 0x40001u);
}
SetVal(g_conf(), "bluestore_gc_enable_total_threshold", "1"); //forbid GC when saving = 0
{
struct store_statfs_t statfs;
WRITE_AT(1, overlap_offset-2);
WRITE_AT(overlap_offset * 2 + 1, overlap_offset-2);
int r = store->statfs(&statfs);
ASSERT_EQ(r, 0);
ASSERT_EQ(statfs.data_compressed_allocated, 0x10000);
const PerfCounters* counters = store->get_perf_counters();
ASSERT_EQ(counters->get(l_bluestore_gc_merged), 0x40001u);
}
{
struct store_statfs_t statfs;
WRITE_AT(overlap_offset + 1, overlap_offset-2);
int r = store->statfs(&statfs);
ASSERT_EQ(r, 0);
ASSERT_EQ(statfs.data_compressed_allocated, 0x0);
const PerfCounters* counters = store->get_perf_counters();
ASSERT_EQ(counters->get(l_bluestore_gc_merged), 0x40007u);
}
{
ObjectStore::Transaction t;
t.remove(cid, hoid);
cerr << "Cleaning" << std::endl;
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
}
}
TEST_P(StoreTestSpecificAUSize, fsckOnUnalignedDevice) {
if (string(GetParam()) != "bluestore")
return;
SetVal(g_conf(), "bluestore_block_size",
stringify(0x280005000).c_str()); //10 Gb + 4K
SetVal(g_conf(), "bluestore_fsck_on_mount", "false");
SetVal(g_conf(), "bluestore_fsck_on_umount", "false");
StartDeferred(0x4000);
store->umount();
ASSERT_EQ(store->fsck(false), 0); // do fsck explicitly
store->mount();
}
TEST_P(StoreTestSpecificAUSize, fsckOnUnalignedDevice2) {
if (string(GetParam()) != "bluestore")
return;
SetVal(g_conf(), "bluestore_block_size",
stringify(0x280005000).c_str()); //10 Gb + 20K
SetVal(g_conf(), "bluestore_fsck_on_mount", "false");
SetVal(g_conf(), "bluestore_fsck_on_umount", "false");
StartDeferred(0x1000);
store->umount();
ASSERT_EQ(store->fsck(false), 0); // do fsck explicitly
store->mount();
}
namespace {
ghobject_t make_object(const char* name, int64_t pool) {
sobject_t soid{name, CEPH_NOSNAP};
uint32_t hash = std::hash<sobject_t>{}(soid);
return ghobject_t{hobject_t{soid, "", hash, pool, ""}};
}
}
TEST_P(StoreTestSpecificAUSize, BluestoreRepairTest) {
if (string(GetParam()) != "bluestore")
return;
if (smr) {
cout << "TODO: repair mismatched write pointer (+ dead bytes mismatch)" << std::endl;
return;
}
const size_t offs_base = 65536 / 2;
// Now we need standalone db to pass "false free fix" section below
// Due to new BlueFS allocation model (single allocator for main device)
// it might cause "false free" blob overwrite by BlueFS/DB stuff
// and hence fail the test case and corrupt data.
//
SetVal(g_conf(), "bluestore_block_db_create", "true");
SetVal(g_conf(), "bluestore_block_db_size", "4294967296");
SetVal(g_conf(), "bluestore_fsck_on_mount", "false");
SetVal(g_conf(), "bluestore_fsck_on_umount", "false");
SetVal(g_conf(), "bluestore_max_blob_size",
stringify(2 * offs_base).c_str());
SetVal(g_conf(), "bluestore_extent_map_shard_max_size", "12000");
StartDeferred(0x10000);
BlueStore* bstore = dynamic_cast<BlueStore*> (store.get());
// fill the store with some data
const uint64_t pool = 555;
coll_t cid(spg_t(pg_t(0, pool), shard_id_t::NO_SHARD));
auto ch = store->create_new_collection(cid);
ghobject_t hoid = make_object("Object 1", pool);
ghobject_t hoid_dup = make_object("Object 1(dup)", pool);
ghobject_t hoid2 = make_object("Object 2", pool);
ghobject_t hoid_cloned = hoid2;
hoid_cloned.hobj.snap = 1;
ghobject_t hoid3 = make_object("Object 3", pool);
ghobject_t hoid3_cloned = hoid3;
hoid3_cloned.hobj.snap = 1;
bufferlist bl;
bl.append("1234512345");
int r;
const size_t repeats = 16;
{
auto ch = store->create_new_collection(cid);
cerr << "create collection + write" << std::endl;
ObjectStore::Transaction t;
t.create_collection(cid, 0);
for( auto i = 0ul; i < repeats; ++i ) {
t.write(cid, hoid, i * offs_base, bl.length(), bl);
t.write(cid, hoid_dup, i * offs_base, bl.length(), bl);
}
for( auto i = 0ul; i < repeats; ++i ) {
t.write(cid, hoid2, i * offs_base, bl.length(), bl);
}
t.clone(cid, hoid2, hoid_cloned);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
bstore->umount();
bool err_was_injected = false;
//////////// leaked pextent fix ////////////
cerr << "fix leaked pextents" << std::endl;
ASSERT_EQ(bstore->fsck(false), 0);
ASSERT_EQ(bstore->repair(false), 0);
bstore->mount();
if (!bstore->has_null_manager()) {
bstore->inject_leaked(0x30000);
err_was_injected = true;
}
bstore->umount();
if (err_was_injected) {
ASSERT_EQ(bstore->fsck(false), 1);
}
ASSERT_EQ(bstore->repair(false), 0);
ASSERT_EQ(bstore->fsck(false), 0);
//////////// false free fix ////////////
cerr << "fix false free pextents" << std::endl;
bstore->mount();
if (!bstore->has_null_manager()) {
bstore->inject_false_free(cid, hoid);
err_was_injected = true;
}
bstore->umount();
if (err_was_injected) {
ASSERT_EQ(bstore->fsck(false), 2);
ASSERT_EQ(bstore->repair(false), 0);
}
ASSERT_EQ(bstore->fsck(false), 0);
///////// undecodable shared blob key / stray shared blob records ///////
bstore->mount();
cerr << "undecodable shared blob key" << std::endl;
bstore->inject_broken_shared_blob_key("undec1",
bufferlist());
bstore->inject_broken_shared_blob_key("undecodable key 2",
bufferlist());
bstore->inject_broken_shared_blob_key("undecodable key 3",
bufferlist());
bstore->umount();
ASSERT_EQ(bstore->fsck(false), 3);
ASSERT_EQ(bstore->repair(false), 0);
ASSERT_EQ(bstore->fsck(false), 0);
cerr << "misreferencing" << std::endl;
bstore->mount();
bstore->inject_misreference(cid, hoid, cid, hoid_dup, 0);
bstore->inject_misreference(cid, hoid, cid, hoid_dup, (offs_base * repeats) / 2);
bstore->inject_misreference(cid, hoid, cid, hoid_dup, offs_base * (repeats -1) );
int expected_errors = bstore->has_null_manager() ? 3 : 6;
bstore->umount();
ASSERT_EQ(bstore->fsck(false), expected_errors);
ASSERT_EQ(bstore->repair(false), 0);
ASSERT_EQ(bstore->fsck(true), 0);
// reproducing issues #21040 & 20983
SetVal(g_conf(), "bluestore_debug_inject_bug21040", "true");
g_ceph_context->_conf.apply_changes(nullptr);
bstore->mount();
cerr << "repro bug #21040" << std::endl;
{
auto ch = store->open_collection(cid);
{
ObjectStore::Transaction t;
bl.append("0123456789012345");
t.write(cid, hoid3, offs_base, bl.length(), bl);
bl.clear();
bl.append('!');
t.write(cid, hoid3, 0, bl.length(), bl);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
{
ObjectStore::Transaction t;
t.clone(cid, hoid3, hoid3_cloned);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
bstore->umount();
// depending on statfs tracking we might meet or miss relevant error
// hence error count >= 3
ASSERT_GE(bstore->fsck(false), 3);
ASSERT_LE(bstore->repair(false), 0);
ASSERT_EQ(bstore->fsck(false), 0);
}
cerr << "Zombie spanning blob" << std::endl;
{
bstore->mount();
ghobject_t hoid4 = make_object("Object 4", pool);
auto ch = store->open_collection(cid);
{
bufferlist bl;
string s(0x1000, 'a');
bl.append(s);
ObjectStore::Transaction t;
for(size_t i = 0; i < 0x10; i++) {
t.write(cid, hoid4, i * bl.length(), bl.length(), bl);
}
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
sleep(5);
{
bstore->inject_zombie_spanning_blob(cid, hoid4, 12345);
bstore->inject_zombie_spanning_blob(cid, hoid4, 23456);
bstore->inject_zombie_spanning_blob(cid, hoid4, 23457);
}
bstore->umount();
ASSERT_EQ(bstore->fsck(false), 1);
ASSERT_LE(bstore->repair(false), 0);
ASSERT_EQ(bstore->fsck(false), 0);
}
//////////// verify invalid statfs ///////////
cerr << "fix invalid statfs" << std::endl;
SetVal(g_conf(), "bluestore_fsck_error_on_no_per_pool_stats", "true");
SetVal(g_conf(),
"bluestore_debug_inject_allocation_from_file_failure", "1");
store_statfs_t statfs0;
store_statfs_t statfs;
bstore->mount();
ASSERT_EQ(bstore->statfs(&statfs0), 0);
statfs = statfs0;
statfs.allocated += 0x10000;
statfs.data_stored += 0x10000;
ASSERT_FALSE(statfs0 == statfs);
// this enforces global stats usage
bstore->inject_statfs("bluestore_statfs", statfs);
bstore->umount();
ASSERT_GE(bstore->fsck(false), 1); // global stats mismatch might omitted when
// NCB restore is applied. Hence using >= for
// error count
ASSERT_EQ(bstore->repair(false), 0);
ASSERT_EQ(bstore->fsck(false), 0);
ASSERT_EQ(bstore->mount(), 0);
ASSERT_EQ(bstore->statfs(&statfs), 0);
// adjust free/internal meta space to success in comparison
statfs0.available = statfs.available;
statfs0.internal_metadata = statfs.internal_metadata;
ASSERT_EQ(statfs0, statfs);
SetVal(g_conf(),
"bluestore_debug_inject_allocation_from_file_failure", "0");
cerr << "fix invalid statfs2" << std::endl;
ASSERT_EQ(bstore->statfs(&statfs0), 0);
statfs = statfs0;
statfs.allocated += 0x20000;
statfs.data_stored += 0x20000;
ASSERT_FALSE(statfs0 == statfs);
// this enforces global stats usage
bstore->inject_statfs("bluestore_statfs", statfs);
bstore->umount();
ASSERT_EQ(bstore->fsck(false), 2);
ASSERT_EQ(bstore->repair(false), 0);
ASSERT_EQ(bstore->fsck(false), 0);
ASSERT_EQ(bstore->mount(), 0);
ASSERT_EQ(bstore->statfs(&statfs), 0);
// adjust free/internal meta space to success in comparison
statfs0.available = statfs.available;
statfs0.internal_metadata = statfs.internal_metadata;
ASSERT_EQ(statfs0, statfs);
cerr << "Completing" << std::endl;
}
TEST_P(StoreTestSpecificAUSize, BluestoreBrokenZombieRepairTest) {
if (string(GetParam()) != "bluestore")
return;
if (smr) {
cout << "SKIP: smr repair is different" << std::endl;
return;
}
SetVal(g_conf(), "bluestore_fsck_on_mount", "false");
SetVal(g_conf(), "bluestore_fsck_on_umount", "false");
StartDeferred(0x10000);
BlueStore* bstore = dynamic_cast<BlueStore*> (store.get());
int r;
cerr << "initializing" << std::endl;
{
const size_t col_count = 16;
const size_t obj_count = 1024;
ObjectStore::CollectionHandle ch[col_count];
ghobject_t hoid[col_count][obj_count];
unique_ptr<coll_t> cid[col_count];
for (size_t i = 0; i < col_count; i++) {
cid[i].reset(new coll_t(spg_t(pg_t(0, i), shard_id_t::NO_SHARD)));
ch[i] = store->create_new_collection(*cid[i]);
for (size_t j = 0; j < obj_count; j++) {
hoid[i][j] = make_object(stringify(j).c_str(), i);
}
}
for (size_t i = 0; i < col_count; i++) {
ObjectStore::Transaction t;
t.create_collection(*cid[i], 0);
r = queue_transaction(store, ch[i], std::move(t));
ASSERT_EQ(r, 0);
}
cerr << "onode preparing" << std::endl;
bufferlist bl;
string s(0x1000, 'a');
bl.append(s);
for (size_t i = 0; i < col_count; i++) {
for (size_t j = 0; j < obj_count; j++) {
ObjectStore::Transaction t;
t.write(*cid[i], hoid[i][j], bl.length(), bl.length(), bl);
r = queue_transaction(store, ch[i], std::move(t));
ASSERT_EQ(r, 0);
}
}
cerr << "Zombie spanning blob injection" << std::endl;
sleep(5);
for (size_t i = 0; i < col_count; i++) {
for (size_t j = 0; j < obj_count; j++) {
bstore->inject_zombie_spanning_blob(*cid[i], hoid[i][j], 12345);
}
}
cerr << "fscking/fixing" << std::endl;
bstore->umount();
ASSERT_EQ(bstore->fsck(false), col_count * obj_count);
ASSERT_LE(bstore->quick_fix(), 0);
ASSERT_EQ(bstore->fsck(false), 0);
}
cerr << "Completing" << std::endl;
bstore->mount();
}
TEST_P(StoreTestSpecificAUSize, BluestoreRepairSharedBlobTest) {
if (string(GetParam()) != "bluestore")
return;
if (smr) {
cout << "TODO: repair mismatched write pointer (+ dead bytes mismatch)" << std::endl;
return;
}
SetVal(g_conf(), "bluestore_fsck_on_mount", "false");
SetVal(g_conf(), "bluestore_fsck_on_umount", "false");
const size_t block_size = 0x1000;
StartDeferred(block_size);
BlueStore* bstore = dynamic_cast<BlueStore*> (store.get());
// fill the store with some data
const uint64_t pool = 555;
coll_t cid(spg_t(pg_t(0, pool), shard_id_t::NO_SHARD));
auto ch = store->create_new_collection(cid);
ghobject_t hoid = make_object("Object 1", pool);
ghobject_t hoid_cloned = hoid;
hoid_cloned.hobj.snap = 1;
ghobject_t hoid2 = make_object("Object 2", pool);
string s(block_size, 1);
bufferlist bl;
bl.append(s);
int r;
{
ObjectStore::Transaction t;
t.create_collection(cid, 0);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
// check the scenario when shared blob contains
// references to extents from two objects which don't overlapp
// o1 -> 0x2000~1K
// o2 -> 0x4000~1k
cerr << "introduce 2 non-overlapped extents in a shared blob"
<< std::endl;
{
ObjectStore::Transaction t;
t.write(cid, hoid, 0, bl.length(), bl);
t.write(cid, hoid2, 0, bl.length(), bl); // to make a gap in allocations
t.write(cid, hoid, block_size * 2 , bl.length(), bl);
t.clone(cid, hoid, hoid_cloned);
t.zero(cid, hoid, 0, bl.length());
t.zero(cid, hoid_cloned, block_size * 2, bl.length());
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
bstore->umount();
bstore->mount();
{
string key;
_key_encode_u64(1, &key);
bluestore_shared_blob_t sb(1);
sb.ref_map.get(0x2000, block_size);
sb.ref_map.get(0x4000, block_size);
sb.ref_map.get(0x4000, block_size);
bufferlist bl;
encode(sb, bl);
bstore->inject_broken_shared_blob_key(key, bl);
}
bstore->umount();
ASSERT_EQ(bstore->fsck(false), 2);
ASSERT_EQ(bstore->repair(false), 0);
ASSERT_EQ(bstore->fsck(false), 0);
cerr << "Completing" << std::endl;
bstore->mount();
}
TEST_P(StoreTestSpecificAUSize, BluestoreBrokenNoSharedBlobRepairTest) {
if (string(GetParam()) != "bluestore")
return;
if (smr) {
cout << "SKIP: smr repair is different" << std::endl;
return;
}
SetVal(g_conf(), "bluestore_fsck_on_mount", "false");
SetVal(g_conf(), "bluestore_fsck_on_umount", "false");
StartDeferred(0x10000);
BlueStore* bstore = dynamic_cast<BlueStore*> (store.get());
int r;
// initializing
cerr << "initializing" << std::endl;
{
const uint64_t pool = 555;
coll_t cid(spg_t(pg_t(0, pool), shard_id_t::NO_SHARD));
auto ch = store->create_new_collection(cid);
ghobject_t hoid = make_object("Object", pool);
ghobject_t hoid_cloned = hoid;
hoid_cloned.hobj.snap = 1;
{
ObjectStore::Transaction t;
t.create_collection(cid, 0);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
{
ObjectStore::Transaction t;
bufferlist bl;
bl.append("0123456789012345");
t.write(cid, hoid, 0, bl.length(), bl);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
{
ObjectStore::Transaction t;
t.clone(cid, hoid, hoid_cloned);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
}
// injecting an error and checking
cerr << "injecting" << std::endl;
sleep(3); // need some time for the previous write to land
bstore->inject_no_shared_blob_key();
bstore->inject_stray_shared_blob_key(12345678);
{
cerr << "fscking/fixing" << std::endl;
// we need to check for null-manager before umount()
bool has_null_manager = bstore->has_null_manager();
bstore->umount();
// depending on the allocation map's source we can
// either observe or don't observe an additional
// extent leak detection. Hence adjusting the expected
// value
size_t expected_error_count =
has_null_manager ?
4: // 4 sb ref mismatch errors [+ 1 optional statfs, hence ASSERT_GE]
7; // 4 sb ref mismatch errors + 1 statfs + 1 block leak + 1 non-free
ASSERT_GE(bstore->fsck(false), expected_error_count);
// repair might report less errors than fsck above showed
// as some errors, e.g. statfs mismatch, are implicitly fixed
// before the detection during the previous repair steps...
ASSERT_LE(bstore->repair(false), expected_error_count);
ASSERT_EQ(bstore->fsck(false), 0);
}
cerr << "Completing" << std::endl;
bstore->mount();
}
TEST_P(StoreTest, BluestoreRepairGlobalStats) {
if (string(GetParam()) != "bluestore")
return;
const size_t offs_base = 65536 / 2;
BlueStore* bstore = dynamic_cast<BlueStore*> (store.get());
// start with global stats
bstore->inject_global_statfs({});
bstore->umount();
SetVal(g_conf(), "bluestore_fsck_quick_fix_on_mount", "false");
bstore->mount();
// fill the store with some data
const uint64_t pool = 555;
coll_t cid(spg_t(pg_t(0, pool), shard_id_t::NO_SHARD));
auto ch = store->create_new_collection(cid);
ghobject_t hoid = make_object("Object 1", pool);
ghobject_t hoid_dup = make_object("Object 1(dup)", pool);
ghobject_t hoid2 = make_object("Object 2", pool);
ghobject_t hoid_cloned = hoid2;
hoid_cloned.hobj.snap = 1;
ghobject_t hoid3 = make_object("Object 3", pool);
ghobject_t hoid3_cloned = hoid3;
hoid3_cloned.hobj.snap = 1;
bufferlist bl;
bl.append("1234512345");
int r;
const size_t repeats = 16;
{
auto ch = store->create_new_collection(cid);
cerr << "create collection + write" << std::endl;
ObjectStore::Transaction t;
t.create_collection(cid, 0);
for( auto i = 0ul; i < repeats; ++i ) {
t.write(cid, hoid, i * offs_base, bl.length(), bl);
t.write(cid, hoid_dup, i * offs_base, bl.length(), bl);
}
for( auto i = 0ul; i < repeats; ++i ) {
t.write(cid, hoid2, i * offs_base, bl.length(), bl);
}
t.clone(cid, hoid2, hoid_cloned);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
bstore->umount();
// enable per-pool stats collection hence causing fsck to fail
cerr << "per-pool statfs" << std::endl;
SetVal(g_conf(), "bluestore_fsck_error_on_no_per_pool_stats", "true");
g_ceph_context->_conf.apply_changes(nullptr);
ASSERT_EQ(bstore->fsck(false), 1);
ASSERT_EQ(bstore->repair(false), 0);
ASSERT_EQ(bstore->fsck(false), 0);
bstore->mount();
}
TEST_P(StoreTest, BluestoreRepairGlobalStatsFixOnMount) {
if (string(GetParam()) != "bluestore")
return;
const size_t offs_base = 65536 / 2;
BlueStore* bstore = dynamic_cast<BlueStore*> (store.get());
// start with global stats
bstore->inject_global_statfs({});
bstore->umount();
SetVal(g_conf(), "bluestore_fsck_quick_fix_on_mount", "false");
bstore->mount();
// fill the store with some data
const uint64_t pool = 555;
coll_t cid(spg_t(pg_t(0, pool), shard_id_t::NO_SHARD));
auto ch = store->create_new_collection(cid);
ghobject_t hoid = make_object("Object 1", pool);
ghobject_t hoid_dup = make_object("Object 1(dup)", pool);
ghobject_t hoid2 = make_object("Object 2", pool);
ghobject_t hoid_cloned = hoid2;
hoid_cloned.hobj.snap = 1;
ghobject_t hoid3 = make_object("Object 3", pool);
ghobject_t hoid3_cloned = hoid3;
hoid3_cloned.hobj.snap = 1;
bufferlist bl;
bl.append("1234512345");
int r;
const size_t repeats = 16;
{
auto ch = store->create_new_collection(cid);
cerr << "create collection + write" << std::endl;
ObjectStore::Transaction t;
t.create_collection(cid, 0);
for( auto i = 0ul; i < repeats; ++i ) {
t.write(cid, hoid, i * offs_base, bl.length(), bl);
t.write(cid, hoid_dup, i * offs_base, bl.length(), bl);
}
for( auto i = 0ul; i < repeats; ++i ) {
t.write(cid, hoid2, i * offs_base, bl.length(), bl);
}
t.clone(cid, hoid2, hoid_cloned);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
bstore->umount();
// enable per-pool stats collection hence causing fsck to fail
cerr << "per-pool statfs" << std::endl;
SetVal(g_conf(), "bluestore_fsck_error_on_no_per_pool_stats", "true");
g_ceph_context->_conf.apply_changes(nullptr);
ASSERT_EQ(bstore->fsck(false), 1);
SetVal(g_conf(), "bluestore_fsck_quick_fix_on_mount", "true");
bstore->mount();
bstore->umount();
ASSERT_EQ(bstore->fsck(false), 0);
bstore->mount();
}
TEST_P(StoreTest, BluestoreStatistics) {
if (string(GetParam()) != "bluestore")
return;
SetVal(g_conf(), "rocksdb_perf", "true");
SetVal(g_conf(), "rocksdb_collect_compaction_stats", "true");
SetVal(g_conf(), "rocksdb_collect_extended_stats","true");
SetVal(g_conf(), "rocksdb_collect_memory_stats","true");
// disable cache
SetVal(g_conf(), "bluestore_cache_size_ssd", "0");
SetVal(g_conf(), "bluestore_cache_size_hdd", "0");
SetVal(g_conf(), "bluestore_cache_size", "0");
g_ceph_context->_conf.apply_changes(nullptr);
int r = store->umount();
ASSERT_EQ(r, 0);
r = store->mount();
ASSERT_EQ(r, 0);
BlueStore* bstore = NULL;
EXPECT_NO_THROW(bstore = dynamic_cast<BlueStore*> (store.get()));
coll_t cid;
ghobject_t hoid(hobject_t("test_db_statistics", "", CEPH_NOSNAP, 0, 0, ""));
auto ch = bstore->create_new_collection(cid);
bufferlist bl;
bl.append("0123456789abcdefghi");
{
ObjectStore::Transaction t;
t.create_collection(cid, 0);
t.touch(cid, hoid);
t.write(cid, hoid, 0, bl.length(), bl);
cerr << "Write object" << std::endl;
r = queue_transaction(bstore, ch, std::move(t));
ASSERT_EQ(r, 0);
}
{
bufferlist readback;
r = store->read(ch, hoid, 0, bl.length(), readback);
ASSERT_EQ(static_cast<int>(bl.length()), r);
ASSERT_TRUE(bl_eq(bl, readback));
}
std::unique_ptr<Formatter> f(Formatter::create("store_test", "json-pretty", "json-pretty"));
EXPECT_NO_THROW(store->get_db_statistics(f.get()));
f->flush(cout);
cout << std::endl;
}
TEST_P(StoreTest, BluestoreStrayOmapDetection)
{
if (string(GetParam()) != "bluestore")
return;
BlueStore* bstore = dynamic_cast<BlueStore*> (store.get());
const uint64_t pool = 555;
coll_t cid(spg_t(pg_t(0, pool), shard_id_t::NO_SHARD));
ghobject_t oid = make_object("Object 1", pool);
ghobject_t oid2 = make_object("Object 2", pool);
// fill the store with some data
auto ch = store->create_new_collection(cid);
bufferlist h;
h.append("header");
{
ObjectStore::Transaction t;
t.create_collection(cid, 0);
t.touch(cid, oid);
t.omap_setheader(cid, oid, h);
t.touch(cid, oid2);
t.omap_setheader(cid, oid2, h);
int r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
// inject stray omap
bstore->inject_stray_omap(123456, "somename");
bstore->umount();
// check we detect injected stray omap..
ASSERT_EQ(bstore->fsck(false), 1);
SetVal(g_conf(), "bluestore_fsck_on_mount", "false");
bstore->mount();
}
TEST_P(StoreTest, BluestorePerPoolOmapFixOnMount)
{
if (string(GetParam()) != "bluestore")
return;
BlueStore* bstore = dynamic_cast<BlueStore*> (store.get());
const uint64_t pool = 555;
coll_t cid(spg_t(pg_t(0, pool), shard_id_t::NO_SHARD));
ghobject_t oid = make_object("Object 1", pool);
ghobject_t oid2 = make_object("Object 2", pool);
// fill the store with some data
auto ch = store->create_new_collection(cid);
map<string, bufferlist> omap;
bufferlist h;
h.append("header");
{
omap["omap_key"].append("omap value");
ObjectStore::Transaction t;
t.create_collection(cid, 0);
t.touch(cid, oid);
t.omap_setheader(cid, oid, h);
t.touch(cid, oid2);
t.omap_setheader(cid, oid2, h);
int r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
// inject legacy omaps
bstore->inject_legacy_omap();
bstore->inject_legacy_omap(cid, oid);
bstore->inject_legacy_omap(cid, oid2);
bstore->umount();
// check we injected an issue
SetVal(g_conf(), "bluestore_fsck_quick_fix_on_mount", "false");
SetVal(g_conf(), "bluestore_fsck_error_on_no_per_pool_omap", "true");
g_ceph_context->_conf.apply_changes(nullptr);
ASSERT_EQ(bstore->fsck(false), 3);
// set autofix and mount
SetVal(g_conf(), "bluestore_fsck_quick_fix_on_mount", "true");
g_ceph_context->_conf.apply_changes(nullptr);
bstore->mount();
bstore->umount();
// check we fixed it..
ASSERT_EQ(bstore->fsck(false), 0);
bstore->mount();
//
// Now repro https://tracker.ceph.com/issues/43824
//
// inject legacy omaps again
bstore->inject_legacy_omap();
bstore->inject_legacy_omap(cid, oid);
bstore->inject_legacy_omap(cid, oid2);
bstore->umount();
// check we injected an issue
SetVal(g_conf(), "bluestore_fsck_quick_fix_on_mount", "true");
SetVal(g_conf(), "bluestore_fsck_error_on_no_per_pool_omap", "true");
g_ceph_context->_conf.apply_changes(nullptr);
bstore->mount();
ch = store->open_collection(cid);
{
// write to onode which will partiall revert per-pool
// omap repair done on mount due to #43824.
// And object removal will leave stray per-pool omap recs
//
ObjectStore::Transaction t;
bufferlist bl;
bl.append("data");
//this triggers onode rec update and hence legacy omap
t.write(cid, oid, 0, bl.length(), bl);
t.remove(cid, oid2); // this will trigger stray per-pool omap
int r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
bstore->umount();
// check omap's been fixed.
ASSERT_EQ(bstore->fsck(false), 0); // this will fail without fix for #43824
bstore->mount();
}
class hugepaged_raw;
static bool is_hugepaged(const bufferptr& bp)
{
const auto& ibp =
static_cast<const ceph::buffer_instrumentation::instrumented_bptr&>(bp);
return ibp.is_raw_marked<BlockDevice::hugepaged_raw_marker_t>();
}
// disabled by default b/c of the dependency on huge page ssome test
// environments might not offer without extra configuration.
TEST_P(StoreTestDeferredSetup, DISABLED_BluestoreHugeReads)
{
if (string(GetParam()) != "bluestore") {
return;
}
constexpr static size_t HUGE_BUFFER_SIZE{2_M};
cout << "Configuring huge page pools" << std::endl;
{
SetVal(g_conf(), "bdev_read_preallocated_huge_buffers",
fmt::format("{}=2", HUGE_BUFFER_SIZE).c_str());
SetVal(g_conf(), "bluestore_max_blob_size",
std::to_string(HUGE_BUFFER_SIZE).c_str());
// let's verify the per-IOContext no-cache override
SetVal(g_conf(), "bluestore_default_buffered_read", "true");
g_ceph_context->_conf.apply_changes(nullptr);
}
DeferredSetup();
coll_t cid;
ghobject_t hoid(hobject_t("test_huge_buffers", "", CEPH_NOSNAP, 0, 0, ""));
auto ch = store->create_new_collection(cid);
bufferlist bl;
{
bufferptr bp{HUGE_BUFFER_SIZE};
// non-zeros! Otherwise the deduplication will take place.
::memset(bp.c_str(), 0x42, HUGE_BUFFER_SIZE);
bl.push_back(std::move(bp));
ASSERT_EQ(bl.get_num_buffers(), 1);
ASSERT_EQ(bl.length(), HUGE_BUFFER_SIZE);
}
cout << "Write object" << std::endl;
{
ObjectStore::Transaction t;
t.create_collection(cid, 0);
t.touch(cid, hoid);
t.write(cid, hoid, 0, bl.length(), bl);
const auto r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
// force cache clear
{
EXPECT_EQ(store->umount(), 0);
EXPECT_EQ(store->mount(), 0);
ch = store->open_collection(cid);
}
// we want to extend the life-time of all huge paged-backed
// bufferlists to validate the behaviour on pool exhaustion.
bufferlist bl_1_huge, bl_2_huge, bl_3_plain;
cout << "Read object 1st time" << std::endl;
{
const auto r = store->read(ch, hoid, 0, HUGE_BUFFER_SIZE, bl_1_huge);
ASSERT_EQ(static_cast<int>(HUGE_BUFFER_SIZE), r);
ASSERT_TRUE(bl_eq(bl, bl_1_huge));
ASSERT_EQ(bl_1_huge.get_num_buffers(), 1);
ASSERT_TRUE(is_hugepaged(bl_1_huge.front()));
}
cout << "Read object 2nd time" << std::endl;
{
const auto r = store->read(ch, hoid, 0, HUGE_BUFFER_SIZE, bl_2_huge);
ASSERT_EQ(static_cast<int>(HUGE_BUFFER_SIZE), r);
ASSERT_TRUE(bl_eq(bl, bl_2_huge));
ASSERT_EQ(bl_2_huge.get_num_buffers(), 1);
ASSERT_TRUE(is_hugepaged(bl_2_huge.front()));
}
cout << "Read object 3rd time" << std::endl;
{
const auto r = store->read(ch, hoid, 0, HUGE_BUFFER_SIZE, bl_3_plain);
ASSERT_EQ(static_cast<int>(HUGE_BUFFER_SIZE), r);
ASSERT_TRUE(bl_eq(bl, bl_3_plain));
ASSERT_EQ(bl_3_plain.get_num_buffers(), 1);
ASSERT_FALSE(is_hugepaged(bl_3_plain.front()));
}
}
TEST_P(StoreTest, SpuriousReadErrorTest) {
if (string(GetParam()) != "bluestore")
return;
int r;
auto logger = store->get_perf_counters();
coll_t cid;
auto ch = store->create_new_collection(cid);
ghobject_t hoid(hobject_t(sobject_t("foo", CEPH_NOSNAP)));
{
ObjectStore::Transaction t;
t.create_collection(cid, 0);
cerr << "Creating collection " << cid << std::endl;
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
bufferlist test_data;
bufferptr ap(0x2000);
memset(ap.c_str(), 'a', 0x2000);
test_data.append(ap);
{
ObjectStore::Transaction t;
t.write(cid, hoid, 0, 0x2000, test_data);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
// force cache clear
EXPECT_EQ(store->umount(), 0);
EXPECT_EQ(store->mount(), 0);
}
ch = store->open_collection(cid);
cerr << "Injecting CRC error with no retry, expecting EIO" << std::endl;
SetVal(g_conf(), "bluestore_retry_disk_reads", "0");
SetVal(g_conf(), "bluestore_debug_inject_csum_err_probability", "1");
g_ceph_context->_conf.apply_changes(nullptr);
{
bufferlist in;
r = store->read(ch, hoid, 0, 0x2000, in, CEPH_OSD_OP_FLAG_FADVISE_NOCACHE);
ASSERT_EQ(-EIO, r);
ASSERT_EQ(logger->get(l_bluestore_read_eio), 1u);
ASSERT_EQ(logger->get(l_bluestore_reads_with_retries), 0u);
}
cerr << "Injecting CRC error with retries, expecting success after several retries" << std::endl;
SetVal(g_conf(), "bluestore_retry_disk_reads", "255");
SetVal(g_conf(), "bluestore_debug_inject_csum_err_probability", "0.8");
/**
* Probabilistic test: 25 reads, each has a 80% chance of failing with 255 retries
* Probability of at least one retried read: 1 - (0.2 ** 25) = 100% - 3e-18
* Probability of a random test failure: 1 - ((1 - (0.8 ** 255)) ** 25) ~= 5e-24
*/
g_ceph_context->_conf.apply_changes(nullptr);
{
for (int i = 0; i < 25; ++i) {
bufferlist in;
r = store->read(ch, hoid, 0, 0x2000, in, CEPH_OSD_OP_FLAG_FADVISE_NOCACHE);
ASSERT_EQ(0x2000, r);
ASSERT_TRUE(bl_eq(test_data, in));
}
ASSERT_GE(logger->get(l_bluestore_reads_with_retries), 1u);
}
}
TEST_P(StoreTest, mergeRegionTest) {
if (string(GetParam()) != "bluestore")
return;
SetVal(g_conf(), "bluestore_fsck_on_mount", "true");
SetVal(g_conf(), "bluestore_fsck_on_umount", "true");
SetVal(g_conf(), "bdev_debug_inflight_ios", "true");
g_ceph_context->_conf.apply_changes(nullptr);
uint32_t chunk_size = g_ceph_context->_conf->bdev_block_size;
int r = -1;
coll_t cid;
ghobject_t hoid(hobject_t(sobject_t("Object", CEPH_NOSNAP)));
auto ch = store->create_new_collection(cid);
{
ObjectStore::Transaction t;
t.create_collection(cid, 0);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
{
ObjectStore::Transaction t;
t.touch(cid, hoid);
cerr << "Creating object " << hoid << std::endl;
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
bufferlist bl5;
bl5.append("abcde");
uint64_t offset = 0;
{ // 1. same region
ObjectStore::Transaction t;
t.write(cid, hoid, offset, 5, bl5);
t.write(cid, hoid, 0xa + offset, 5, bl5);
t.write(cid, hoid, 0x14 + offset, 5, bl5);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
{ // 2. adjacent regions
ObjectStore::Transaction t;
offset = chunk_size;
t.write(cid, hoid, offset, 5, bl5);
t.write(cid, hoid, offset + chunk_size + 3, 5, bl5);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
{ // 3. front merge
ObjectStore::Transaction t;
offset = chunk_size * 2;
t.write(cid, hoid, offset, 5, bl5);
t.write(cid, hoid, offset + chunk_size - 2, 5, bl5);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
{ // 4. back merge
ObjectStore::Transaction t;
bufferlist blc2;
blc2.append_zero(chunk_size + 2);
offset = chunk_size * 3;
t.write(cid, hoid, offset, chunk_size + 2, blc2);
t.write(cid, hoid, offset + chunk_size + 3, 5, bl5);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
{ // 5. overlapping
ObjectStore::Transaction t;
uint64_t final_len = 0;
offset = chunk_size * 10;
bufferlist bl2c2;
bl2c2.append_zero(chunk_size * 2);
t.write(cid, hoid, offset + chunk_size * 3 - 3, chunk_size * 2, bl2c2);
bl2c2.append_zero(2);
t.write(cid, hoid, offset + chunk_size - 2, chunk_size * 2 + 2, bl2c2);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
final_len = (offset + chunk_size * 3 - 3) + (chunk_size * 2);
bufferlist bl;
r = store->read(ch, hoid, 0, final_len, bl);
ASSERT_EQ(final_len, static_cast<uint64_t>(r));
}
}
TEST_P(StoreTest, FixSMRWritePointer) {
if(string(GetParam()) != "bluestore")
return;
if (!smr)
return;
int r = store->umount();
ASSERT_EQ(0, r);
// copied from StoreTestFixture
std::string path = GetParam() + ".test_temp_dir"s;
std::string p = path + "/block";
BlockDevice* bdev = BlockDevice::create(g_ceph_context, p, nullptr, nullptr, nullptr, nullptr);
r = bdev->open(p);
ASSERT_EQ(0, r);
ASSERT_EQ(true, bdev->is_smr());
std::vector<uint64_t> wp = bdev->get_zones();
uint64_t first_seq_zone = bdev->get_conventional_region_size() / bdev->get_zone_size();
IOContext ioc(g_ceph_context, NULL, true);
bufferlist bl;
bl.append(std::string(1024 * 1024, 'x'));
r = bdev->aio_write(wp[first_seq_zone], bl, &ioc, false);
ASSERT_EQ(0, r);
bdev->aio_submit(&ioc);
ioc.aio_wait();
bdev->close();
delete bdev;
r = store->mount();
ASSERT_EQ(0, r);
}
TEST_P(StoreTestSpecificAUSize, BluestoreEnforceHWSettingsHdd) {
if (string(GetParam()) != "bluestore")
return;
SetVal(g_conf(), "bluestore_debug_enforce_settings", "hdd");
StartDeferred(0x1000);
int r;
coll_t cid;
ghobject_t hoid(hobject_t(sobject_t("Object", CEPH_NOSNAP)));
auto ch = store->create_new_collection(cid);
{
ObjectStore::Transaction t;
t.create_collection(cid, 0);
cerr << "Creating collection " << cid << std::endl;
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
{
ObjectStore::Transaction t;
bufferlist bl, orig;
string s(g_ceph_context->_conf->bluestore_max_blob_size_hdd, '0');
bl.append(s);
t.write(cid, hoid, 0, bl.length(), bl);
cerr << "write" << std::endl;
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
const PerfCounters* logger = store->get_perf_counters();
ASSERT_EQ(logger->get(l_bluestore_write_big_blobs), 1u);
}
}
TEST_P(StoreTestSpecificAUSize, BluestoreEnforceHWSettingsSsd) {
if (string(GetParam()) != "bluestore")
return;
SetVal(g_conf(), "bluestore_debug_enforce_settings", "ssd");
StartDeferred(0x1000);
int r;
coll_t cid;
ghobject_t hoid(hobject_t(sobject_t("Object", CEPH_NOSNAP)));
auto ch = store->create_new_collection(cid);
{
ObjectStore::Transaction t;
t.create_collection(cid, 0);
cerr << "Creating collection " << cid << std::endl;
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
{
ObjectStore::Transaction t;
bufferlist bl, orig;
string s(g_ceph_context->_conf->bluestore_max_blob_size_ssd * 8, '0');
bl.append(s);
t.write(cid, hoid, 0, bl.length(), bl);
cerr << "write" << std::endl;
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
const PerfCounters* logger = store->get_perf_counters();
ASSERT_EQ(logger->get(l_bluestore_write_big_blobs), 8u);
}
}
TEST_P(StoreTestSpecificAUSize, ReproNoBlobMultiTest) {
if(string(GetParam()) != "bluestore")
return;
if (smr) {
cout << "SKIP (FIXME): bluestore gc does not seem to do the trick here" << std::endl;
return;
}
SetVal(g_conf(), "bluestore_block_db_create", "true");
SetVal(g_conf(), "bluestore_block_db_size", "4294967296");
SetVal(g_conf(), "bluestore_block_size", "12884901888");
SetVal(g_conf(), "bluestore_max_blob_size", "524288");
g_conf().apply_changes(nullptr);
StartDeferred(65536);
int r;
coll_t cid;
ghobject_t hoid(hobject_t(sobject_t("Object 1", CEPH_NOSNAP)));
ghobject_t hoid2 = hoid;
hoid2.hobj.snap = 1;
auto ch = store->create_new_collection(cid);
{
ObjectStore::Transaction t;
t.create_collection(cid, 0);
cerr << "Creating collection " << cid << std::endl;
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
{
bool exists = store->exists(ch, hoid);
ASSERT_TRUE(!exists);
ObjectStore::Transaction t;
t.touch(cid, hoid);
cerr << "Creating object " << hoid << std::endl;
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
exists = store->exists(ch, hoid);
ASSERT_EQ(true, exists);
}
{
uint64_t offs = 0;
bufferlist bl;
const int size = 0x100;
bufferptr ap(size);
memset(ap.c_str(), 'a', size);
bl.append(ap);
int i = 0;
uint64_t blob_size = 524288;
uint64_t total = 0;
for (i = 0; i <= 512; i++) {
offs = 0 + i * size;
ObjectStore::Transaction t;
ghobject_t hoid2 = hoid;
hoid2.hobj.snap = i + 1;
while (offs < 128 * 1024 * 1024) {
t.write(cid, hoid, offs, ap.length(), bl);
offs += blob_size;
total += ap.length();
}
t.clone(cid, hoid, hoid2);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
cerr << "Total written = " << total << std::endl;
}
{
cerr << "Finalizing" << std::endl;
const PerfCounters* logger = store->get_perf_counters();
ASSERT_GE(logger->get(l_bluestore_gc_merged), 1024*1024*1024);
}
}
void doManySetAttr(ObjectStore* store,
std::function<void(ObjectStore*)> do_check_fn)
{
MixedGenerator gen(447);
gen_type rng(time(NULL));
coll_t cid(spg_t(pg_t(0, 447), shard_id_t::NO_SHARD));
SyntheticWorkloadState test_obj(store, &gen, &rng, cid, 0, 0, 0);
test_obj.init();
size_t object_count = 256;
for (size_t i = 0; i < object_count; ++i) {
if (!(i % 10)) cerr << "seeding object " << i << std::endl;
test_obj.touch();
}
for (size_t i = 0; i < object_count; ++i) {
if (!(i % 100)) {
cerr << "Op " << i << std::endl;
test_obj.print_internal_state();
}
test_obj.set_fixed_attrs(1024, 64, 4096); // 1024 attributes, 64 bytes name and 4K value
}
test_obj.wait_for_done();
std::cout << "done" << std::endl;
do_check_fn(store);
AdminSocket* admin_socket = g_ceph_context->get_admin_socket();
ceph_assert(admin_socket);
ceph::bufferlist in, out;
ostringstream err;
auto r = admin_socket->execute_command(
{ "{\"prefix\": \"bluefs stats\"}" },
in, err, &out);
if (r != 0) {
cerr << "failure querying: " << cpp_strerror(r) << std::endl;
} else {
std::cout << std::string(out.c_str(), out.length()) << std::endl;
}
test_obj.shutdown();
}
TEST_P(StoreTestSpecificAUSize, SpilloverTest) {
if (string(GetParam()) != "bluestore")
return;
if (smr) {
cout << "SKIP: (FIXME?) adjust me for smr at some point?" << std::endl;
return;
}
SetVal(g_conf(), "bluestore_block_db_create", "true");
SetVal(g_conf(), "bluestore_block_db_size", "3221225472");
SetVal(g_conf(), "bluestore_volume_selection_policy", "rocksdb_original");
// original RocksDB settings used before https://github.com/ceph/ceph/pull/47221/
// which enable BlueFS spillover.
SetVal(g_conf(), "bluestore_rocksdb_options",
"compression=kNoCompression,max_write_buffer_number=4,"
"min_write_buffer_number_to_merge=1,recycle_log_file_num=4,"
"write_buffer_size=268435456,writable_file_max_buffer_size=0,"
"compaction_readahead_size=2097152,max_background_compactions=2,"
"max_total_wal_size=1073741824");
g_conf().apply_changes(nullptr);
StartDeferred(65536);
doManySetAttr(store.get(),
[&](ObjectStore* _store) {
BlueStore* bstore = dynamic_cast<BlueStore*> (_store);
ceph_assert(bstore);
bstore->compact();
const PerfCounters* logger = bstore->get_bluefs_perf_counters();
//experimentally it was discovered that this case results in 400+MB spillover
//using lower 300MB threshold just to be safe enough
std::cout << "DB used:" << logger->get(l_bluefs_db_used_bytes) << std::endl;
std::cout << "SLOW used:" << logger->get(l_bluefs_slow_used_bytes) << std::endl;
ASSERT_GE(logger->get(l_bluefs_slow_used_bytes), 16 * 1024 * 1024);
struct store_statfs_t statfs;
osd_alert_list_t alerts;
int r = store->statfs(&statfs, &alerts);
ASSERT_EQ(r, 0);
ASSERT_EQ(alerts.count("BLUEFS_SPILLOVER"), 1);
std::cout << "spillover_alert:" << alerts.find("BLUEFS_SPILLOVER")->second
<< std::endl;
}
);
}
TEST_P(StoreTestSpecificAUSize, SpilloverFixedTest) {
if (string(GetParam()) != "bluestore")
return;
if (smr) {
cout << "SKIP: (FIXME?) adjust me for smr at some point?" << std::endl;
return;
}
SetVal(g_conf(), "bluestore_block_db_create", "true");
SetVal(g_conf(), "bluestore_block_db_size", "3221225472");
SetVal(g_conf(), "bluestore_volume_selection_policy", "use_some_extra");
SetVal(g_conf(), "bluestore_volume_selection_reserved", "1"); // just use non-zero to enable
g_conf().apply_changes(nullptr);
StartDeferred(65536);
doManySetAttr(store.get(),
[&](ObjectStore* _store) {
BlueStore* bstore = dynamic_cast<BlueStore*> (_store);
ceph_assert(bstore);
bstore->compact();
const PerfCounters* logger = bstore->get_bluefs_perf_counters();
ASSERT_EQ(0, logger->get(l_bluefs_slow_used_bytes));
}
);
}
TEST_P(StoreTestSpecificAUSize, SpilloverFixed2Test) {
if (string(GetParam()) != "bluestore")
return;
if (smr) {
cout << "SKIP: (FIXME?) adjust me for smr at some point?" << std::endl;
return;
}
SetVal(g_conf(), "bluestore_block_db_create", "true");
SetVal(g_conf(), "bluestore_block_db_size", "3221225472");
SetVal(g_conf(), "bluestore_volume_selection_policy", "use_some_extra");
//default 2.0 factor results in too high threshold, using less value
// that results in less but still present spillover.
SetVal(g_conf(), "bluestore_volume_selection_reserved_factor", "0.5");
g_conf().apply_changes(nullptr);
StartDeferred(65536);
doManySetAttr(store.get(),
[&](ObjectStore* _store) {
BlueStore* bstore = dynamic_cast<BlueStore*> (_store);
ceph_assert(bstore);
bstore->compact();
const PerfCounters* logger = bstore->get_bluefs_perf_counters();
ASSERT_LE(logger->get(l_bluefs_slow_used_bytes), 300 * 1024 * 1024); // see SpilloverTest for 300MB choice rationale
}
);
}
TEST_P(StoreTestSpecificAUSize, SpilloverFixed3Test) {
if (string(GetParam()) != "bluestore")
return;
if (smr) {
cout << "SKIP: (FIXME?) adjust me for smr at some point?" << std::endl;
return;
}
SetVal(g_conf(), "bluestore_block_db_create", "true");
SetVal(g_conf(), "bluestore_block_db_size", "3221225472");
SetVal(g_conf(), "bluestore_volume_selection_policy", "fit_to_fast");
g_conf().apply_changes(nullptr);
StartDeferred(65536);
doManySetAttr(store.get(),
[&](ObjectStore* _store) {
BlueStore* bstore = dynamic_cast<BlueStore*> (_store);
ceph_assert(bstore);
bstore->compact();
const PerfCounters* logger = bstore->get_bluefs_perf_counters();
ASSERT_EQ(logger->get(l_bluefs_slow_used_bytes), 0); // reffering to SpilloverFixedTest
}
);
}
TEST_P(StoreTestSpecificAUSize, Ticket45195Repro) {
if (string(GetParam()) != "bluestore")
return;
if (smr) {
return;
}
SetVal(g_conf(), "bluestore_default_buffered_write", "true");
SetVal(g_conf(), "bluestore_max_blob_size", "65536");
SetVal(g_conf(), "bluestore_debug_enforce_settings", "hdd");
SetVal(g_conf(), "bluestore_fsck_on_mount", "false");
g_conf().apply_changes(nullptr);
StartDeferred(0x1000);
int r;
coll_t cid;
ghobject_t hoid(hobject_t(sobject_t("Object", CEPH_NOSNAP)));
auto ch = store->create_new_collection(cid);
{
ObjectStore::Transaction t;
t.create_collection(cid, 0);
cerr << "Creating collection " << cid << std::endl;
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
{
size_t large_object_size = 1 * 1024 * 1024;
size_t expected_write_size = 0x8000;
ObjectStore::Transaction t;
t.touch(cid, hoid);
t.set_alloc_hint(cid, hoid, large_object_size, expected_write_size,
CEPH_OSD_ALLOC_HINT_FLAG_SEQUENTIAL_READ |
CEPH_OSD_ALLOC_HINT_FLAG_APPEND_ONLY);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
{
ObjectStore::Transaction t;
bufferlist bl, orig;
string s(0xc000, '0');
bl.append(s);
t.write(cid, hoid, 0xb000, bl.length(), bl);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
{
ObjectStore::Transaction t;
bufferlist bl, orig;
string s(0x10000, '1');
bl.append(s);
t.write(cid, hoid, 0x16000, bl.length(), bl);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
{
ObjectStore::Transaction t;
bufferlist bl, orig;
string s(0x4000, '1');
bl.append(s);
t.write(cid, hoid, 0x1b000, bl.length(), bl);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
bufferlist bl;
r = store->read(ch, hoid, 0xb000, 0xb000, bl);
ASSERT_EQ(r, 0xb000);
store->umount();
store->mount();
ch = store->open_collection(cid);
{
ObjectStore::Transaction t;
bufferlist bl, orig;
string s(0xf000, '3');
bl.append(s);
t.write(cid, hoid, 0xf000, bl.length(), bl);
cerr << "write4" << std::endl;
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
r = store->read(ch, hoid, 0xb000, 0x10000, bl);
ASSERT_EQ(r, 0x10000);
}
TEST_P(StoreTestOmapUpgrade, WithOmapHeader) {
if (string(GetParam()) != "bluestore")
return;
SetVal(g_conf(), "bluestore_debug_legacy_omap", "true");
g_conf().apply_changes(nullptr);
StartDeferred();
int64_t poolid = 11;
coll_t cid(spg_t(pg_t(1, poolid), shard_id_t::NO_SHARD));
ghobject_t hoid(hobject_t("tesomap", "", CEPH_NOSNAP, 0, poolid, ""));
auto ch = store->create_new_collection(cid);
int r;
{
ObjectStore::Transaction t;
t.create_collection(cid, 0);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
map<string, bufferlist> attrs;
bufferlist expected_header;
expected_header.append("this is a header");
{
ObjectStore::Transaction t;
t.touch(cid, hoid);
bufferlist header;
header.append(expected_header);
t.omap_setheader(cid, hoid, header);
map<string, bufferlist> start_set;
bufferlist bl;
bl.append(string("value"));
start_set.emplace(string("key1"), bl);
t.omap_setkeys(cid, hoid, start_set);
r = queue_transaction(store, ch, std::move(t));
}
{
map<string,bufferlist> res;
bufferlist h;
r = store->omap_get(ch, hoid, &h, &res);
ASSERT_EQ(r, 0);
ASSERT_TRUE(bl_eq(h, expected_header));
ASSERT_EQ(res.size(), 1);
ASSERT_EQ(res.begin()->first, "key1");
}
store->umount();
ASSERT_EQ(store->fsck(false), 0);
SetVal(g_conf(), "bluestore_debug_legacy_omap", "false");
SetVal(g_conf(), "bluestore_fsck_error_on_no_per_pool_omap", "true");
g_conf().apply_changes(nullptr);
ASSERT_EQ(store->fsck(false), 2);
ASSERT_EQ(store->quick_fix(), 0);
store->mount();
ch = store->open_collection(cid);
{
map<string,bufferlist> res;
bufferlist h;
r = store->omap_get(ch, hoid, &h, &res);
ASSERT_EQ(r, 0);
ASSERT_EQ(res.size(), 1);
ASSERT_EQ(res.begin()->first, "key1");
}
{
ObjectStore::Transaction t;
t.remove(cid, hoid);
t.remove_collection(cid);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
}
TEST_P(StoreTestSpecificAUSize, BluefsWriteInSingleDiskEnvTest) {
if (string(GetParam()) != "bluestore")
return;
g_conf().apply_changes(nullptr);
StartDeferred(0x1000);
BlueStore* bstore = dynamic_cast<BlueStore*> (store.get());
ceph_assert(bstore);
bstore->inject_bluefs_file("db.slow", "store_test_injection_slow", 1 << 20ul);
bstore->inject_bluefs_file("db.wal", "store_test_injection_wal", 1 << 20ul);
bstore->inject_bluefs_file("db", "store_test_injection_wal", 1 << 20ul);
AdminSocket* admin_socket = g_ceph_context->get_admin_socket();
ceph_assert(admin_socket);
ceph::bufferlist in, out;
ostringstream err;
auto r = admin_socket->execute_command(
{ "{\"prefix\": \"bluefs stats\"}" },
in, err, &out);
if (r != 0) {
cerr << "failure querying: " << cpp_strerror(r) << std::endl;
} else {
std::cout << std::string(out.c_str(), out.length()) << std::endl;
}
}
TEST_P(StoreTestSpecificAUSize, BluefsWriteInNoWalDiskEnvTest) {
if (string(GetParam()) != "bluestore")
return;
SetVal(g_conf(), "bluestore_block_db_path", "db");
SetVal(g_conf(), "bluestore_block_db_size", stringify(1ull << 31).c_str());
SetVal(g_conf(), "bluestore_block_db_create", "true");
g_conf().apply_changes(nullptr);
StartDeferred(0x1000);
BlueStore* bstore = dynamic_cast<BlueStore*> (store.get());
ceph_assert(bstore);
bstore->inject_bluefs_file("db.slow", "store_test_injection_slow", 1 << 20ul);
bstore->inject_bluefs_file("db.wal", "store_test_injection_wal", 1 << 20ul);
bstore->inject_bluefs_file("db", "store_test_injection_wal", 1 << 20ul);
AdminSocket* admin_socket = g_ceph_context->get_admin_socket();
ceph_assert(admin_socket);
ceph::bufferlist in, out;
ostringstream err;
auto r = admin_socket->execute_command(
{ "{\"prefix\": \"bluefs stats\"}" },
in, err, &out);
if (r != 0) {
cerr << "failure querying: " << cpp_strerror(r) << std::endl;
}
else {
std::cout << std::string(out.c_str(), out.length()) << std::endl;
}
}
TEST_P(StoreTestOmapUpgrade, NoOmapHeader) {
if (string(GetParam()) != "bluestore")
return;
SetVal(g_conf(), "bluestore_debug_legacy_omap", "true");
g_conf().apply_changes(nullptr);
StartDeferred();
int64_t poolid = 11;
coll_t cid(spg_t(pg_t(1, poolid), shard_id_t::NO_SHARD));
ghobject_t hoid(hobject_t("tesomap", "", CEPH_NOSNAP, 0, poolid, ""));
auto ch = store->create_new_collection(cid);
int r;
{
ObjectStore::Transaction t;
t.create_collection(cid, 0);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
map<string, bufferlist> attrs;
{
ObjectStore::Transaction t;
t.touch(cid, hoid);
map<string, bufferlist> start_set;
bufferlist bl;
bl.append(string("value"));
start_set.emplace(string("key1"), bl);
t.omap_setkeys(cid, hoid, start_set);
r = queue_transaction(store, ch, std::move(t));
}
{
map<string,bufferlist> res;
bufferlist h;
r = store->omap_get(ch, hoid, &h, &res);
ASSERT_EQ(r, 0);
ASSERT_EQ(h.length(), 0);
ASSERT_EQ(res.size(), 1);
ASSERT_EQ(res.begin()->first, "key1");
}
store->umount();
ASSERT_EQ(store->fsck(false), 0);
SetVal(g_conf(), "bluestore_debug_legacy_omap", "false");
SetVal(g_conf(), "bluestore_fsck_error_on_no_per_pool_omap", "true");
g_conf().apply_changes(nullptr);
ASSERT_EQ(store->fsck(false), 2);
ASSERT_EQ(store->quick_fix(), 0);
store->mount();
ch = store->open_collection(cid);
{
map<string,bufferlist> res;
bufferlist h;
r = store->omap_get(ch, hoid, &h, &res);
ASSERT_EQ(r, 0);
ASSERT_EQ(res.size(), 1);
ASSERT_EQ(res.begin()->first, "key1");
}
{
ObjectStore::Transaction t;
t.remove(cid, hoid);
t.remove_collection(cid);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
}
TEST_P(StoreTestOmapUpgrade, LargeLegacyToPG) {
if (string(GetParam()) != "bluestore")
return;
SetVal(g_conf(), "bluestore_debug_legacy_omap", "true");
g_conf().apply_changes(nullptr);
int64_t poolid;
coll_t cid;
ghobject_t hoid;
ObjectStore::CollectionHandle ch;
StartDeferred();
poolid = 11;
cid = coll_t(spg_t(pg_t(1, poolid), shard_id_t::NO_SHARD));
ch = store->create_new_collection(cid);
int r;
{
ObjectStore::Transaction t;
t.create_collection(cid, 0);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
//ASSERT_EQ(false, g_conf().get_val<bool>("bluestore_debug_inject_upgrade_bug53062"));
map<string, bufferlist> attrs;
bufferlist expected_header;
expected_header.append("this is a header");
size_t object_count = 1000;
make_omap_data(object_count, poolid, cid);
//checking just written data
check_omap_data(object_count, poolid, cid);
store->umount();
ASSERT_EQ(store->fsck(false), 0);
SetVal(g_conf(), "bluestore_debug_legacy_omap", "false");
SetVal(g_conf(), "bluestore_fsck_error_on_no_per_pool_omap", "true");
g_conf().apply_changes(nullptr);
ASSERT_EQ(store->fsck(false), 1001);
ASSERT_EQ(store->quick_fix(), 0);
store->mount();
ch = store->open_collection(cid);
//checking quick_fix() data
check_omap_data(object_count, poolid, cid);
{
ObjectStore::Transaction t;
for (size_t o = 0; o < object_count; o++)
{
std::string oid = generate_monotonic_name(object_count, o, 3.71, 0.5);
ghobject_t hoid(hobject_t(oid, "", CEPH_NOSNAP, 0, poolid, ""));
t.remove(cid, hoid);
}
t.remove_collection(cid);
r = queue_transaction(store, ch, std::move(t));
ASSERT_EQ(r, 0);
}
}
#endif // WITH_BLUESTORE
int main(int argc, char **argv) {
auto args = argv_to_vec(argc, argv);
auto cct = global_init(NULL, args, CEPH_ENTITY_TYPE_CLIENT,
CODE_ENVIRONMENT_UTILITY,
CINIT_FLAG_NO_DEFAULT_CONFIG_FILE);
common_init_finish(g_ceph_context);
for (auto& i : args) {
if (i == "--smr"s) {
#if defined(HAVE_LIBZBD)
derr << "Adjusting tests for smr mode." << dendl;
smr = true;
#else
derr << "smr mode selected, but support not compiled in" << dendl;
return 1;
#endif
}
}
// make sure we can adjust any config settings
g_ceph_context->_conf._clear_safe_to_start_threads();
g_ceph_context->_conf.set_val_or_die("osd_journal_size", "400");
g_ceph_context->_conf.set_val_or_die("filestore_index_retry_probability", "0.5");
g_ceph_context->_conf.set_val_or_die("filestore_op_thread_timeout", "1000");
g_ceph_context->_conf.set_val_or_die("filestore_op_thread_suicide_timeout", "10000");
//g_ceph_context->_conf.set_val_or_die("filestore_fiemap", "true");
g_ceph_context->_conf.set_val_or_die("bluestore_fsck_on_mkfs", "false");
g_ceph_context->_conf.set_val_or_die("bluestore_fsck_on_mount", "false");
g_ceph_context->_conf.set_val_or_die("bluestore_fsck_on_umount", "false");
g_ceph_context->_conf.set_val_or_die("bluestore_debug_small_allocations", "4");
g_ceph_context->_conf.set_val_or_die("bluestore_debug_freelist", "true");
g_ceph_context->_conf.set_val_or_die("bluestore_clone_cow", "true");
g_ceph_context->_conf.set_val_or_die("bluestore_max_alloc_size", "196608");
// set small cache sizes so we see trimming during Synthetic tests
g_ceph_context->_conf.set_val_or_die("bluestore_cache_size_hdd", "4000000");
g_ceph_context->_conf.set_val_or_die("bluestore_cache_size_ssd", "4000000");
g_ceph_context->_conf.set_val_or_die(
"bluestore_debug_inject_allocation_from_file_failure", "0.66");
// very short *_max prealloc so that we fall back to async submits
g_ceph_context->_conf.set_val_or_die("bluestore_blobid_prealloc", "10");
g_ceph_context->_conf.set_val_or_die("bluestore_nid_prealloc", "10");
g_ceph_context->_conf.set_val_or_die("bluestore_debug_randomize_serial_transaction",
"10");
g_ceph_context->_conf.set_val_or_die("bdev_debug_aio", "true");
// specify device size
g_ceph_context->_conf.set_val_or_die("bluestore_block_size",
stringify(DEF_STORE_TEST_BLOCKDEV_SIZE));
g_ceph_context->_conf.set_val_or_die(
"enable_experimental_unrecoverable_data_corrupting_features", "*");
g_ceph_context->_conf.apply_changes(nullptr);
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}
/*
* Local Variables:
* compile-command: "cd ../.. ; make ceph_test_objectstore &&
* ./ceph_test_objectstore \
* --gtest_filter=*.collect_metadata* --log-to-stderr=true --debug-filestore=20
* "
* End:
*/
| 324,221 | 28.655355 | 1,049 |
cc
|
null |
ceph-main/src/test/objectstore/store_test_fixture.cc
|
#include <stdlib.h>
#include <string>
#include <iostream>
#include <assert.h>
#include <gtest/gtest.h>
#include "common/errno.h"
#include "common/config.h"
#include "os/ObjectStore.h"
#if defined(WITH_BLUESTORE)
#include "os/bluestore/BlueStore.h"
#endif
#include "store_test_fixture.h"
using namespace std;
static void rm_r(const string& path)
{
string cmd = string("rm -r ") + path;
cout << "==> " << cmd << std::endl;
int r = ::system(cmd.c_str());
if (r) {
if (r == -1) {
r = errno;
cerr << "system() failed to fork() " << cpp_strerror(r)
<< ", continuing anyway" << std::endl;
} else {
cerr << "failed with exit code " << r
<< ", continuing anyway" << std::endl;
}
}
}
void StoreTestFixture::SetUp()
{
int r = ::mkdir(data_dir.c_str(), 0777);
if (r < 0) {
r = -errno;
cerr << __func__ << ": unable to create " << data_dir << ": " << cpp_strerror(r) << std::endl;
}
ASSERT_EQ(0, r);
store = ObjectStore::create(g_ceph_context,
type,
data_dir,
"store_test_temp_journal");
if (!store) {
cerr << __func__ << ": objectstore type " << type << " doesn't exist yet!" << std::endl;
}
ASSERT_TRUE(store);
#if defined(WITH_BLUESTORE)
if (type == "bluestore") {
BlueStore *s = static_cast<BlueStore*>(store.get());
// better test coverage!
s->set_cache_shards(5);
}
#endif
ASSERT_EQ(0, store->mkfs());
ASSERT_EQ(0, store->mount());
// we keep this stuff 'unsafe' out of test case scope to be able to update ANY
// config settings. Hence setting it to 'safe' here to proceed with the test
// case
g_conf().set_safe_to_start_threads();
}
void StoreTestFixture::TearDown()
{
if (store) {
int r = store->umount();
EXPECT_EQ(0, r);
rm_r(data_dir);
}
// we keep this stuff 'unsafe' out of test case scope to be able to update ANY
// config settings. Hence setting it to 'unsafe' here as test case is closing.
g_conf()._clear_safe_to_start_threads();
PopSettings(0);
if (!orig_death_test_style.empty()) {
::testing::FLAGS_gtest_death_test_style = orig_death_test_style;
orig_death_test_style.clear();
}
}
void StoreTestFixture::SetVal(ConfigProxy& _conf, const char* key, const char* val)
{
ceph_assert(!conf || conf == &_conf);
conf = &_conf;
std::string skey(key);
std::string prev_val;
conf->get_val(skey, &prev_val);
conf->set_val_or_die(key, val);
saved_settings.emplace(skey, prev_val);
}
void StoreTestFixture::PopSettings(size_t pos)
{
if (conf) {
ceph_assert(pos == 0 || pos <= saved_settings.size()); // for sanity
while(pos < saved_settings.size())
{
auto& e = saved_settings.top();
conf->set_val_or_die(e.first, e.second);
saved_settings.pop();
}
conf->apply_changes(NULL);
}
}
void StoreTestFixture::CloseAndReopen() {
ceph_assert(store != nullptr);
g_conf()._clear_safe_to_start_threads();
int r = store->umount();
EXPECT_EQ(0, r);
ch.reset(nullptr);
store.reset(nullptr);
store = ObjectStore::create(g_ceph_context,
type,
data_dir,
"store_test_temp_journal");
if (!store) {
cerr << __func__ << ": objectstore type " << type << " failed to reopen!" << std::endl;
}
ASSERT_TRUE(store);
#if defined(WITH_BLUESTORE)
if (type == "bluestore") {
BlueStore *s = static_cast<BlueStore*>(store.get());
// better test coverage!
s->set_cache_shards(5);
}
#endif
ASSERT_EQ(0, store->mount());
g_conf().set_safe_to_start_threads();
}
| 3,667 | 25.970588 | 98 |
cc
|
null |
ceph-main/src/test/objectstore/store_test_fixture.h
|
#include <string>
#include <stack>
#include <memory>
#include <gtest/gtest.h>
#include "common/config_fwd.h"
class ObjectStore;
class StoreTestFixture : virtual public ::testing::Test {
const std::string type;
const std::string data_dir;
std::stack<std::pair<std::string, std::string>> saved_settings;
ConfigProxy* conf = nullptr;
std::string orig_death_test_style;
public:
std::unique_ptr<ObjectStore> store;
ObjectStore::CollectionHandle ch;
explicit StoreTestFixture(const std::string& type)
: type(type), data_dir(type + ".test_temp_dir")
{}
void SetUp() override;
void TearDown() override;
void SetDeathTestStyle(const char* new_style) {
if (orig_death_test_style.empty()) {
orig_death_test_style = ::testing::FLAGS_gtest_death_test_style;
}
::testing::FLAGS_gtest_death_test_style = new_style;
}
void SetVal(ConfigProxy& conf, const char* key, const char* val);
struct SettingsBookmark {
StoreTestFixture& s;
size_t pos;
SettingsBookmark(StoreTestFixture& _s, size_t p) : s(_s), pos(p)
{}
~SettingsBookmark() {
s.PopSettings(pos);
}
};
SettingsBookmark BookmarkSettings() {
return SettingsBookmark(*this, saved_settings.size());
}
void PopSettings(size_t);
void CloseAndReopen();
};
| 1,295 | 23.45283 | 70 |
h
|
null |
ceph-main/src/test/objectstore/test_bdev.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include <stdio.h>
#include <string.h>
#include <iostream>
#include <gtest/gtest.h>
#include "global/global_init.h"
#include "global/global_context.h"
#include "common/ceph_context.h"
#include "common/ceph_argparse.h"
#include "include/stringify.h"
#include "common/errno.h"
#include "blk/BlockDevice.h"
using namespace std;
class TempBdev {
public:
TempBdev(uint64_t size)
: path{get_temp_bdev(size)}
{}
~TempBdev() {
rm_temp_bdev(path);
}
const std::string path;
private:
static string get_temp_bdev(uint64_t size)
{
static int n = 0;
string fn = "ceph_test_bluefs.tmp.block." + stringify(getpid())
+ "." + stringify(++n);
int fd = ::open(fn.c_str(), O_CREAT|O_RDWR|O_TRUNC, 0644);
ceph_assert(fd >= 0);
int r = ::ftruncate(fd, size);
ceph_assert(r >= 0);
::close(fd);
return fn;
}
static void rm_temp_bdev(string f)
{
::unlink(f.c_str());
}
};
TEST(KernelDevice, Ticket45337) {
// Large (>=2 GB) writes are incomplete when bluefs_buffered_io = true
uint64_t size = 1048576ull * 8192;
TempBdev bdev{ size };
const bool buffered = true;
std::unique_ptr<BlockDevice> b(
BlockDevice::create(g_ceph_context, bdev.path, NULL, NULL,
[](void* handle, void* aio) {}, NULL));
bufferlist bl;
// writing a bit less than 4GB
for (auto i = 0; i < 4000; i++) {
string s(1048576, 'a' + (i % 28));
bl.append(s);
}
uint64_t magic_offs = bl.length();
string s(4086, 'z');
s += "0123456789";
bl.append(s);
{
int r = b->open(bdev.path);
if (r < 0) {
std::cerr << "open " << bdev.path << " failed" << std::endl;
return;
}
}
std::unique_ptr<IOContext> ioc(new IOContext(g_ceph_context, NULL));
auto r = b->aio_write(0, bl, ioc.get(), buffered);
ASSERT_EQ(r, 0);
if (ioc->has_pending_aios()) {
b->aio_submit(ioc.get());
ioc->aio_wait();
}
char outbuf[0x1000];
r = b->read_random(magic_offs, sizeof(outbuf), outbuf, buffered);
ASSERT_EQ(r, 0);
ASSERT_EQ(memcmp(s.c_str(), outbuf, sizeof(outbuf)), 0);
b->close();
}
int main(int argc, char **argv) {
auto args = argv_to_vec(argc, argv);
map<string,string> defaults = {
{ "debug_bdev", "1/20" }
};
auto cct = global_init(&defaults, args, CEPH_ENTITY_TYPE_CLIENT,
CODE_ENVIRONMENT_UTILITY,
CINIT_FLAG_NO_DEFAULT_CONFIG_FILE);
common_init_finish(g_ceph_context);
g_ceph_context->_conf.set_val(
"enable_experimental_unrecoverable_data_corrupting_features",
"*");
g_ceph_context->_conf.apply_changes(nullptr);
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}
| 2,725 | 23.339286 | 73 |
cc
|
null |
ceph-main/src/test/objectstore/test_bluefs.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include <stdio.h>
#include <string.h>
#include <iostream>
#include <time.h>
#include <fcntl.h>
#include <unistd.h>
#include <random>
#include <thread>
#include <stack>
#include <gtest/gtest.h>
#include "global/global_init.h"
#include "common/ceph_argparse.h"
#include "include/stringify.h"
#include "include/scope_guard.h"
#include "common/errno.h"
#include "os/bluestore/Allocator.h"
#include "os/bluestore/BlueFS.h"
using namespace std;
std::unique_ptr<char[]> gen_buffer(uint64_t size)
{
std::unique_ptr<char[]> buffer = std::make_unique<char[]>(size);
std::independent_bits_engine<std::default_random_engine, CHAR_BIT, unsigned char> e;
std::generate(buffer.get(), buffer.get()+size, std::ref(e));
return buffer;
}
class TempBdev {
public:
TempBdev(uint64_t size)
: path{get_temp_bdev(size)}
{}
~TempBdev() {
rm_temp_bdev(path);
}
const std::string path;
private:
static string get_temp_bdev(uint64_t size)
{
static int n = 0;
string fn = "ceph_test_bluefs.tmp.block." + stringify(getpid())
+ "." + stringify(++n);
int fd = ::open(fn.c_str(), O_CREAT|O_RDWR|O_TRUNC, 0644);
ceph_assert(fd >= 0);
int r = ::ftruncate(fd, size);
ceph_assert(r >= 0);
::close(fd);
return fn;
}
static void rm_temp_bdev(string f)
{
::unlink(f.c_str());
}
};
class ConfSaver {
std::stack<std::pair<std::string, std::string>> saved_settings;
ConfigProxy& conf;
public:
ConfSaver(ConfigProxy& conf) : conf(conf) {
conf._clear_safe_to_start_threads();
};
~ConfSaver() {
conf._clear_safe_to_start_threads();
while(saved_settings.size() > 0) {
auto& e = saved_settings.top();
conf.set_val_or_die(e.first, e.second);
saved_settings.pop();
}
conf.set_safe_to_start_threads();
conf.apply_changes(nullptr);
}
void SetVal(const char* key, const char* val) {
std::string skey(key);
std::string prev_val;
conf.get_val(skey, &prev_val);
conf.set_val_or_die(skey, val);
saved_settings.emplace(skey, prev_val);
}
void ApplyChanges() {
conf.set_safe_to_start_threads();
conf.apply_changes(nullptr);
}
};
TEST(BlueFS, mkfs) {
uint64_t size = 1048576 * 128;
TempBdev bdev{size};
uuid_d fsid;
BlueFS fs(g_ceph_context);
ASSERT_EQ(0, fs.add_block_device(BlueFS::BDEV_DB, bdev.path, false, 1048576));
ASSERT_EQ(0, fs.mkfs(fsid, { BlueFS::BDEV_DB, false, false }));
}
TEST(BlueFS, mkfs_mount) {
uint64_t size = 1048576 * 128;
TempBdev bdev{size};
BlueFS fs(g_ceph_context);
ASSERT_EQ(0, fs.add_block_device(BlueFS::BDEV_DB, bdev.path, false, 1048576));
uuid_d fsid;
ASSERT_EQ(0, fs.mkfs(fsid, { BlueFS::BDEV_DB, false, false }));
ASSERT_EQ(0, fs.mount());
ASSERT_EQ(0, fs.maybe_verify_layout({ BlueFS::BDEV_DB, false, false }));
ASSERT_EQ(fs.get_total(BlueFS::BDEV_DB), size - 1048576);
ASSERT_LT(fs.get_free(BlueFS::BDEV_DB), size - 1048576);
fs.umount();
}
TEST(BlueFS, write_read) {
uint64_t size = 1048576 * 128;
TempBdev bdev{size};
BlueFS fs(g_ceph_context);
ASSERT_EQ(0, fs.add_block_device(BlueFS::BDEV_DB, bdev.path, false, 1048576));
uuid_d fsid;
ASSERT_EQ(0, fs.mkfs(fsid, { BlueFS::BDEV_DB, false, false }));
ASSERT_EQ(0, fs.mount());
ASSERT_EQ(0, fs.maybe_verify_layout({ BlueFS::BDEV_DB, false, false }));
{
BlueFS::FileWriter *h;
ASSERT_EQ(0, fs.mkdir("dir"));
ASSERT_EQ(0, fs.open_for_write("dir", "file", &h, false));
h->append("foo", 3);
h->append("bar", 3);
h->append("baz", 3);
fs.fsync(h);
fs.close_writer(h);
}
{
BlueFS::FileReader *h;
ASSERT_EQ(0, fs.open_for_read("dir", "file", &h));
bufferlist bl;
ASSERT_EQ(9, fs.read(h, 0, 1024, &bl, NULL));
ASSERT_EQ(0, strncmp("foobarbaz", bl.c_str(), 9));
delete h;
}
fs.umount();
}
TEST(BlueFS, small_appends) {
uint64_t size = 1048576 * 128;
TempBdev bdev{size};
BlueFS fs(g_ceph_context);
ASSERT_EQ(0, fs.add_block_device(BlueFS::BDEV_DB, bdev.path, false, 1048576));
uuid_d fsid;
ASSERT_EQ(0, fs.mkfs(fsid, { BlueFS::BDEV_DB, false, false }));
ASSERT_EQ(0, fs.mount());
ASSERT_EQ(0, fs.maybe_verify_layout({ BlueFS::BDEV_DB, false, false }));
{
BlueFS::FileWriter *h;
ASSERT_EQ(0, fs.mkdir("dir"));
ASSERT_EQ(0, fs.open_for_write("dir", "file", &h, false));
for (unsigned i = 0; i < 10000; ++i) {
h->append("abcdeabcdeabcdeabcdeabcdeabc", 23);
}
fs.fsync(h);
fs.close_writer(h);
}
{
BlueFS::FileWriter *h;
ASSERT_EQ(0, fs.open_for_write("dir", "file_sync", &h, false));
for (unsigned i = 0; i < 1000; ++i) {
h->append("abcdeabcdeabcdeabcdeabcdeabc", 23);
ASSERT_EQ(0, fs.fsync(h));
}
fs.close_writer(h);
}
fs.umount();
}
TEST(BlueFS, very_large_write) {
// we'll write a ~5G file, so allocate more than that for the whole fs
uint64_t size = 1048576 * 1024 * 6ull;
TempBdev bdev{size};
BlueFS fs(g_ceph_context);
bool old = g_ceph_context->_conf.get_val<bool>("bluefs_buffered_io");
g_ceph_context->_conf.set_val("bluefs_buffered_io", "false");
uint64_t total_written = 0;
ASSERT_EQ(0, fs.add_block_device(BlueFS::BDEV_DB, bdev.path, false, 1048576));
uuid_d fsid;
ASSERT_EQ(0, fs.mkfs(fsid, { BlueFS::BDEV_DB, false, false }));
ASSERT_EQ(0, fs.mount());
ASSERT_EQ(0, fs.maybe_verify_layout({ BlueFS::BDEV_DB, false, false }));
char buf[1048571]; // this is biggish, but intentionally not evenly aligned
for (unsigned i = 0; i < sizeof(buf); ++i) {
buf[i] = i;
}
{
BlueFS::FileWriter *h;
ASSERT_EQ(0, fs.mkdir("dir"));
ASSERT_EQ(0, fs.open_for_write("dir", "bigfile", &h, false));
for (unsigned i = 0; i < 3*1024*1048576ull / sizeof(buf); ++i) {
h->append(buf, sizeof(buf));
total_written += sizeof(buf);
}
fs.fsync(h);
for (unsigned i = 0; i < 2*1024*1048576ull / sizeof(buf); ++i) {
h->append(buf, sizeof(buf));
total_written += sizeof(buf);
}
fs.fsync(h);
fs.close_writer(h);
}
{
BlueFS::FileReader *h;
ASSERT_EQ(0, fs.open_for_read("dir", "bigfile", &h));
bufferlist bl;
ASSERT_EQ(h->file->fnode.size, total_written);
for (unsigned i = 0; i < 3*1024*1048576ull / sizeof(buf); ++i) {
bl.clear();
fs.read(h, i * sizeof(buf), sizeof(buf), &bl, NULL);
int r = memcmp(buf, bl.c_str(), sizeof(buf));
if (r) {
cerr << "read got mismatch at offset " << i*sizeof(buf) << " r " << r
<< std::endl;
}
ASSERT_EQ(0, r);
}
for (unsigned i = 0; i < 2*1024*1048576ull / sizeof(buf); ++i) {
bl.clear();
fs.read(h, i * sizeof(buf), sizeof(buf), &bl, NULL);
int r = memcmp(buf, bl.c_str(), sizeof(buf));
if (r) {
cerr << "read got mismatch at offset " << i*sizeof(buf) << " r " << r
<< std::endl;
}
ASSERT_EQ(0, r);
}
delete h;
ASSERT_EQ(0, fs.open_for_read("dir", "bigfile", &h));
ASSERT_EQ(h->file->fnode.size, total_written);
auto huge_buf = std::make_unique<char[]>(h->file->fnode.size);
auto l = h->file->fnode.size;
int64_t r = fs.read(h, 0, l, NULL, huge_buf.get());
ASSERT_EQ(r, l);
delete h;
}
fs.umount();
g_ceph_context->_conf.set_val("bluefs_buffered_io", stringify((int)old));
}
TEST(BlueFS, very_large_write2) {
// we'll write a ~5G file, so allocate more than that for the whole fs
uint64_t size_full = 1048576 * 1024 * 6ull;
uint64_t size = 1048576 * 1024 * 5ull;
TempBdev bdev{ size_full };
BlueFS fs(g_ceph_context);
bool old = g_ceph_context->_conf.get_val<bool>("bluefs_buffered_io");
g_ceph_context->_conf.set_val("bluefs_buffered_io", "false");
uint64_t total_written = 0;
ASSERT_EQ(0, fs.add_block_device(BlueFS::BDEV_DB, bdev.path, false, 1048576));
uuid_d fsid;
ASSERT_EQ(0, fs.mkfs(fsid, { BlueFS::BDEV_DB, false, false }));
ASSERT_EQ(0, fs.mount());
ASSERT_EQ(0, fs.maybe_verify_layout({ BlueFS::BDEV_DB, false, false }));
char fill_arr[1 << 20]; // 1M
for (size_t i = 0; i < sizeof(fill_arr); ++i) {
fill_arr[i] = (char)i;
}
std::unique_ptr<char[]> buf;
buf.reset(new char[size]);
for (size_t i = 0; i < size; i += sizeof(fill_arr)) {
memcpy(buf.get() + i, fill_arr, sizeof(fill_arr));
}
{
BlueFS::FileWriter* h;
ASSERT_EQ(0, fs.mkdir("dir"));
ASSERT_EQ(0, fs.open_for_write("dir", "bigfile", &h, false));
fs.append_try_flush(h, buf.get(), size);
total_written = size;
fs.fsync(h);
fs.close_writer(h);
}
memset(buf.get(), 0, size);
{
BlueFS::FileReader* h;
ASSERT_EQ(0, fs.open_for_read("dir", "bigfile", &h));
ASSERT_EQ(h->file->fnode.size, total_written);
auto l = h->file->fnode.size;
int64_t r = fs.read(h, 0, l, NULL, buf.get());
ASSERT_EQ(r, l);
for (size_t i = 0; i < size; i += sizeof(fill_arr)) {
ceph_assert(memcmp(buf.get() + i, fill_arr, sizeof(fill_arr)) == 0);
}
delete h;
}
fs.umount();
g_ceph_context->_conf.set_val("bluefs_buffered_io", stringify((int)old));
}
#define ALLOC_SIZE 4096
void write_data(BlueFS &fs, uint64_t rationed_bytes)
{
int j=0, r=0;
uint64_t written_bytes = 0;
rationed_bytes -= ALLOC_SIZE;
stringstream ss;
string dir = "dir.";
ss << std::this_thread::get_id();
dir.append(ss.str());
dir.append(".");
dir.append(to_string(j));
ASSERT_EQ(0, fs.mkdir(dir));
while (1) {
string file = "file.";
file.append(to_string(j));
BlueFS::FileWriter *h;
ASSERT_EQ(0, fs.open_for_write(dir, file, &h, false));
ASSERT_NE(nullptr, h);
auto sg = make_scope_guard([&fs, h] { fs.close_writer(h); });
bufferlist bl;
std::unique_ptr<char[]> buf = gen_buffer(ALLOC_SIZE);
bufferptr bp = buffer::claim_char(ALLOC_SIZE, buf.get());
bl.push_back(bp);
h->append(bl.c_str(), bl.length());
r = fs.fsync(h);
if (r < 0) {
break;
}
written_bytes += g_conf()->bluefs_alloc_size;
j++;
if ((rationed_bytes - written_bytes) <= g_conf()->bluefs_alloc_size) {
break;
}
}
}
void create_single_file(BlueFS &fs)
{
BlueFS::FileWriter *h;
stringstream ss;
string dir = "dir.test";
ASSERT_EQ(0, fs.mkdir(dir));
string file = "testfile";
ASSERT_EQ(0, fs.open_for_write(dir, file, &h, false));
bufferlist bl;
std::unique_ptr<char[]> buf = gen_buffer(ALLOC_SIZE);
bufferptr bp = buffer::claim_char(ALLOC_SIZE, buf.get());
bl.push_back(bp);
h->append(bl.c_str(), bl.length());
fs.fsync(h);
fs.close_writer(h);
}
void write_single_file(BlueFS &fs, uint64_t rationed_bytes)
{
stringstream ss;
const string dir = "dir.test";
const string file = "testfile";
uint64_t written_bytes = 0;
rationed_bytes -= ALLOC_SIZE;
while (1) {
BlueFS::FileWriter *h;
ASSERT_EQ(0, fs.open_for_write(dir, file, &h, false));
ASSERT_NE(nullptr, h);
auto sg = make_scope_guard([&fs, h] { fs.close_writer(h); });
bufferlist bl;
std::unique_ptr<char[]> buf = gen_buffer(ALLOC_SIZE);
bufferptr bp = buffer::claim_char(ALLOC_SIZE, buf.get());
bl.push_back(bp);
h->append(bl.c_str(), bl.length());
int r = fs.fsync(h);
if (r < 0) {
break;
}
written_bytes += g_conf()->bluefs_alloc_size;
if ((rationed_bytes - written_bytes) <= g_conf()->bluefs_alloc_size) {
break;
}
}
}
bool writes_done = false;
void sync_fs(BlueFS &fs)
{
while (1) {
if (writes_done == true)
break;
fs.sync_metadata(false);
sleep(1);
}
}
void do_join(std::thread& t)
{
t.join();
}
void join_all(std::vector<std::thread>& v)
{
std::for_each(v.begin(),v.end(),do_join);
}
#define NUM_WRITERS 3
#define NUM_SYNC_THREADS 1
#define NUM_SINGLE_FILE_WRITERS 1
#define NUM_MULTIPLE_FILE_WRITERS 2
TEST(BlueFS, test_flush_1) {
uint64_t size = 1048576 * 128;
TempBdev bdev{size};
g_ceph_context->_conf.set_val(
"bluefs_alloc_size",
"65536");
g_ceph_context->_conf.apply_changes(nullptr);
BlueFS fs(g_ceph_context);
ASSERT_EQ(0, fs.add_block_device(BlueFS::BDEV_DB, bdev.path, false, 1048576));
uuid_d fsid;
ASSERT_EQ(0, fs.mkfs(fsid, { BlueFS::BDEV_DB, false, false }));
ASSERT_EQ(0, fs.mount());
ASSERT_EQ(0, fs.maybe_verify_layout({ BlueFS::BDEV_DB, false, false }));
{
std::vector<std::thread> write_thread_multiple;
uint64_t effective_size = size - (32 * 1048576); // leaving the last 32 MB for log compaction
uint64_t per_thread_bytes = (effective_size/(NUM_MULTIPLE_FILE_WRITERS + NUM_SINGLE_FILE_WRITERS));
for (int i=0; i<NUM_MULTIPLE_FILE_WRITERS ; i++) {
write_thread_multiple.push_back(std::thread(write_data, std::ref(fs), per_thread_bytes));
}
create_single_file(fs);
std::vector<std::thread> write_thread_single;
for (int i=0; i<NUM_SINGLE_FILE_WRITERS; i++) {
write_thread_single.push_back(std::thread(write_single_file, std::ref(fs), per_thread_bytes));
}
join_all(write_thread_single);
join_all(write_thread_multiple);
}
fs.umount();
}
TEST(BlueFS, test_flush_2) {
uint64_t size = 1048576 * 256;
TempBdev bdev{size};
g_ceph_context->_conf.set_val(
"bluefs_alloc_size",
"65536");
g_ceph_context->_conf.apply_changes(nullptr);
BlueFS fs(g_ceph_context);
ASSERT_EQ(0, fs.add_block_device(BlueFS::BDEV_DB, bdev.path, false, 1048576));
uuid_d fsid;
ASSERT_EQ(0, fs.mkfs(fsid, { BlueFS::BDEV_DB, false, false }));
ASSERT_EQ(0, fs.mount());
ASSERT_EQ(0, fs.maybe_verify_layout({ BlueFS::BDEV_DB, false, false }));
{
uint64_t effective_size = size - (128 * 1048576); // leaving the last 32 MB for log compaction
uint64_t per_thread_bytes = (effective_size/(NUM_WRITERS));
std::vector<std::thread> write_thread_multiple;
for (int i=0; i<NUM_WRITERS; i++) {
write_thread_multiple.push_back(std::thread(write_data, std::ref(fs), per_thread_bytes));
}
join_all(write_thread_multiple);
}
fs.umount();
}
TEST(BlueFS, test_flush_3) {
uint64_t size = 1048576 * 256;
TempBdev bdev{size};
g_ceph_context->_conf.set_val(
"bluefs_alloc_size",
"65536");
g_ceph_context->_conf.apply_changes(nullptr);
BlueFS fs(g_ceph_context);
ASSERT_EQ(0, fs.add_block_device(BlueFS::BDEV_DB, bdev.path, false, 1048576));
uuid_d fsid;
ASSERT_EQ(0, fs.mkfs(fsid, { BlueFS::BDEV_DB, false, false }));
ASSERT_EQ(0, fs.mount());
ASSERT_EQ(0, fs.maybe_verify_layout({ BlueFS::BDEV_DB, false, false }));
{
std::vector<std::thread> write_threads;
uint64_t effective_size = size - (64 * 1048576); // leaving the last 11 MB for log compaction
uint64_t per_thread_bytes = (effective_size/(NUM_WRITERS));
for (int i=0; i<NUM_WRITERS; i++) {
write_threads.push_back(std::thread(write_data, std::ref(fs), per_thread_bytes));
}
std::vector<std::thread> sync_threads;
for (int i=0; i<NUM_SYNC_THREADS; i++) {
sync_threads.push_back(std::thread(sync_fs, std::ref(fs)));
}
join_all(write_threads);
writes_done = true;
join_all(sync_threads);
}
fs.umount();
}
TEST(BlueFS, test_simple_compaction_sync) {
g_ceph_context->_conf.set_val(
"bluefs_compact_log_sync",
"true");
uint64_t size = 1048576 * 128;
TempBdev bdev{size};
BlueFS fs(g_ceph_context);
ASSERT_EQ(0, fs.add_block_device(BlueFS::BDEV_DB, bdev.path, false, 1048576));
uuid_d fsid;
ASSERT_EQ(0, fs.mkfs(fsid, { BlueFS::BDEV_DB, false, false }));
ASSERT_EQ(0, fs.mount());
ASSERT_EQ(0, fs.maybe_verify_layout({ BlueFS::BDEV_DB, false, false }));
{
for (int i=0; i<10; i++) {
string dir = "dir.";
dir.append(to_string(i));
ASSERT_EQ(0, fs.mkdir(dir));
for (int j=0; j<10; j++) {
string file = "file.";
file.append(to_string(j));
BlueFS::FileWriter *h;
ASSERT_EQ(0, fs.open_for_write(dir, file, &h, false));
ASSERT_NE(nullptr, h);
auto sg = make_scope_guard([&fs, h] { fs.close_writer(h); });
bufferlist bl;
std::unique_ptr<char[]> buf = gen_buffer(4096);
bufferptr bp = buffer::claim_char(4096, buf.get());
bl.push_back(bp);
h->append(bl.c_str(), bl.length());
fs.fsync(h);
}
}
}
{
for (int i=0; i<10; i+=2) {
string dir = "dir.";
dir.append(to_string(i));
for (int j=0; j<10; j++) {
string file = "file.";
file.append(to_string(j));
fs.unlink(dir, file);
fs.sync_metadata(false);
}
ASSERT_EQ(0, fs.rmdir(dir));
fs.sync_metadata(false);
}
}
fs.compact_log();
fs.umount();
}
TEST(BlueFS, test_simple_compaction_async) {
g_ceph_context->_conf.set_val(
"bluefs_compact_log_sync",
"false");
uint64_t size = 1048576 * 128;
TempBdev bdev{size};
BlueFS fs(g_ceph_context);
ASSERT_EQ(0, fs.add_block_device(BlueFS::BDEV_DB, bdev.path, false, 1048576));
uuid_d fsid;
ASSERT_EQ(0, fs.mkfs(fsid, { BlueFS::BDEV_DB, false, false }));
ASSERT_EQ(0, fs.mount());
ASSERT_EQ(0, fs.maybe_verify_layout({ BlueFS::BDEV_DB, false, false }));
{
for (int i=0; i<10; i++) {
string dir = "dir.";
dir.append(to_string(i));
ASSERT_EQ(0, fs.mkdir(dir));
for (int j=0; j<10; j++) {
string file = "file.";
file.append(to_string(j));
BlueFS::FileWriter *h;
ASSERT_EQ(0, fs.open_for_write(dir, file, &h, false));
ASSERT_NE(nullptr, h);
auto sg = make_scope_guard([&fs, h] { fs.close_writer(h); });
bufferlist bl;
std::unique_ptr<char[]> buf = gen_buffer(4096);
bufferptr bp = buffer::claim_char(4096, buf.get());
bl.push_back(bp);
h->append(bl.c_str(), bl.length());
fs.fsync(h);
}
}
}
{
for (int i=0; i<10; i+=2) {
string dir = "dir.";
dir.append(to_string(i));
for (int j=0; j<10; j++) {
string file = "file.";
file.append(to_string(j));
fs.unlink(dir, file);
fs.sync_metadata(false);
}
ASSERT_EQ(0, fs.rmdir(dir));
fs.sync_metadata(false);
}
}
fs.compact_log();
fs.umount();
}
TEST(BlueFS, test_compaction_sync) {
uint64_t size = 1048576 * 128;
TempBdev bdev{size};
g_ceph_context->_conf.set_val(
"bluefs_alloc_size",
"65536");
g_ceph_context->_conf.set_val(
"bluefs_compact_log_sync",
"true");
const char* canary_dir = "dir.after_compact_test";
const char* canary_file = "file.after_compact_test";
const char* canary_data = "some random data";
BlueFS fs(g_ceph_context);
ASSERT_EQ(0, fs.add_block_device(BlueFS::BDEV_DB, bdev.path, false, 1048576));
uuid_d fsid;
ASSERT_EQ(0, fs.mkfs(fsid, { BlueFS::BDEV_DB, false, false }));
ASSERT_EQ(0, fs.mount());
ASSERT_EQ(0, fs.maybe_verify_layout({ BlueFS::BDEV_DB, false, false }));
{
std::vector<std::thread> write_threads;
uint64_t effective_size = size - (32 * 1048576); // leaving the last 32 MB for log compaction
uint64_t per_thread_bytes = (effective_size/(NUM_WRITERS));
for (int i=0; i<NUM_WRITERS; i++) {
write_threads.push_back(std::thread(write_data, std::ref(fs), per_thread_bytes));
}
std::vector<std::thread> sync_threads;
for (int i=0; i<NUM_SYNC_THREADS; i++) {
sync_threads.push_back(std::thread(sync_fs, std::ref(fs)));
}
join_all(write_threads);
writes_done = true;
join_all(sync_threads);
fs.compact_log();
{
ASSERT_EQ(0, fs.mkdir(canary_dir));
BlueFS::FileWriter *h;
ASSERT_EQ(0, fs.open_for_write(canary_dir, canary_file, &h, false));
ASSERT_NE(nullptr, h);
auto sg = make_scope_guard([&fs, h] { fs.close_writer(h); });
h->append(canary_data, strlen(canary_data));
int r = fs.fsync(h);
ASSERT_EQ(r, 0);
}
}
fs.umount();
fs.mount();
{
BlueFS::FileReader *h;
ASSERT_EQ(0, fs.open_for_read(canary_dir, canary_file, &h));
ASSERT_NE(nullptr, h);
bufferlist bl;
ASSERT_EQ(strlen(canary_data), fs.read(h, 0, 1024, &bl, NULL));
std::cout << bl.c_str() << std::endl;
ASSERT_EQ(0, strncmp(canary_data, bl.c_str(), strlen(canary_data)));
delete h;
}
fs.umount();
}
TEST(BlueFS, test_compaction_async) {
uint64_t size = 1048576 * 128;
TempBdev bdev{size};
g_ceph_context->_conf.set_val(
"bluefs_alloc_size",
"65536");
g_ceph_context->_conf.set_val(
"bluefs_compact_log_sync",
"false");
const char* canary_dir = "dir.after_compact_test";
const char* canary_file = "file.after_compact_test";
const char* canary_data = "some random data";
BlueFS fs(g_ceph_context);
ASSERT_EQ(0, fs.add_block_device(BlueFS::BDEV_DB, bdev.path, false, 1048576));
uuid_d fsid;
ASSERT_EQ(0, fs.mkfs(fsid, { BlueFS::BDEV_DB, false, false }));
ASSERT_EQ(0, fs.mount());
ASSERT_EQ(0, fs.maybe_verify_layout({ BlueFS::BDEV_DB, false, false }));
{
std::vector<std::thread> write_threads;
uint64_t effective_size = size - (32 * 1048576); // leaving the last 32 MB for log compaction
uint64_t per_thread_bytes = (effective_size/(NUM_WRITERS));
for (int i=0; i<NUM_WRITERS; i++) {
write_threads.push_back(std::thread(write_data, std::ref(fs), per_thread_bytes));
}
std::vector<std::thread> sync_threads;
for (int i=0; i<NUM_SYNC_THREADS; i++) {
sync_threads.push_back(std::thread(sync_fs, std::ref(fs)));
}
join_all(write_threads);
writes_done = true;
join_all(sync_threads);
fs.compact_log();
{
ASSERT_EQ(0, fs.mkdir(canary_dir));
BlueFS::FileWriter *h;
ASSERT_EQ(0, fs.open_for_write(canary_dir, canary_file, &h, false));
ASSERT_NE(nullptr, h);
auto sg = make_scope_guard([&fs, h] { fs.close_writer(h); });
h->append(canary_data, strlen(canary_data));
int r = fs.fsync(h);
ASSERT_EQ(r, 0);
}
}
fs.umount();
fs.mount();
{
BlueFS::FileReader *h;
ASSERT_EQ(0, fs.open_for_read(canary_dir, canary_file, &h));
ASSERT_NE(nullptr, h);
bufferlist bl;
ASSERT_EQ(strlen(canary_data), fs.read(h, 0, 1024, &bl, NULL));
std::cout << bl.c_str() << std::endl;
ASSERT_EQ(0, strncmp(canary_data, bl.c_str(), strlen(canary_data)));
delete h;
}
fs.umount();
}
TEST(BlueFS, test_replay) {
uint64_t size = 1048576 * 128;
TempBdev bdev{size};
g_ceph_context->_conf.set_val(
"bluefs_alloc_size",
"65536");
g_ceph_context->_conf.set_val(
"bluefs_compact_log_sync",
"false");
BlueFS fs(g_ceph_context);
ASSERT_EQ(0, fs.add_block_device(BlueFS::BDEV_DB, bdev.path, false, 1048576));
uuid_d fsid;
ASSERT_EQ(0, fs.mkfs(fsid, { BlueFS::BDEV_DB, false, false }));
ASSERT_EQ(0, fs.mount());
ASSERT_EQ(0, fs.maybe_verify_layout({ BlueFS::BDEV_DB, false, false }));
{
std::vector<std::thread> write_threads;
uint64_t effective_size = size - (32 * 1048576); // leaving the last 32 MB for log compaction
uint64_t per_thread_bytes = (effective_size/(NUM_WRITERS));
for (int i=0; i<NUM_WRITERS; i++) {
write_threads.push_back(std::thread(write_data, std::ref(fs), per_thread_bytes));
}
std::vector<std::thread> sync_threads;
for (int i=0; i<NUM_SYNC_THREADS; i++) {
sync_threads.push_back(std::thread(sync_fs, std::ref(fs)));
}
join_all(write_threads);
writes_done = true;
join_all(sync_threads);
fs.compact_log();
}
fs.umount();
// remount and check log can replay safe?
ASSERT_EQ(0, fs.mount());
ASSERT_EQ(0, fs.maybe_verify_layout({ BlueFS::BDEV_DB, false, false }));
fs.umount();
}
TEST(BlueFS, test_replay_growth) {
uint64_t size = 1048576LL * (2 * 1024 + 128);
TempBdev bdev{size};
ConfSaver conf(g_ceph_context->_conf);
conf.SetVal("bluefs_alloc_size", "4096");
conf.SetVal("bluefs_shared_alloc_size", "4096");
conf.SetVal("bluefs_compact_log_sync", "false");
conf.SetVal("bluefs_min_log_runway", "32768");
conf.SetVal("bluefs_max_log_runway", "65536");
conf.SetVal("bluefs_allocator", "stupid");
conf.SetVal("bluefs_sync_write", "true");
conf.ApplyChanges();
BlueFS fs(g_ceph_context);
ASSERT_EQ(0, fs.add_block_device(BlueFS::BDEV_DB, bdev.path, false, 1048576));
uuid_d fsid;
ASSERT_EQ(0, fs.mkfs(fsid, { BlueFS::BDEV_DB, false, false }));
ASSERT_EQ(0, fs.mount());
ASSERT_EQ(0, fs.maybe_verify_layout({ BlueFS::BDEV_DB, false, false }));
ASSERT_EQ(0, fs.mkdir("dir"));
char data[2000];
BlueFS::FileWriter *h;
ASSERT_EQ(0, fs.open_for_write("dir", "file", &h, false));
for (size_t i = 0; i < 10000; i++) {
h->append(data, 2000);
fs.fsync(h);
}
fs.close_writer(h);
fs.umount(true); //do not compact on exit!
// remount and check log can replay safe?
ASSERT_EQ(0, fs.mount());
ASSERT_EQ(0, fs.maybe_verify_layout({ BlueFS::BDEV_DB, false, false }));
fs.umount();
}
TEST(BlueFS, test_tracker_50965) {
uint64_t size_wal = 1048576 * 64;
TempBdev bdev_wal{size_wal};
uint64_t size_db = 1048576 * 128;
TempBdev bdev_db{size_db};
uint64_t size_slow = 1048576 * 256;
TempBdev bdev_slow{size_slow};
ConfSaver conf(g_ceph_context->_conf);
conf.SetVal("bluefs_min_flush_size", "65536");
conf.ApplyChanges();
BlueFS fs(g_ceph_context);
ASSERT_EQ(0, fs.add_block_device(BlueFS::BDEV_WAL, bdev_wal.path, false, 0));
ASSERT_EQ(0, fs.add_block_device(BlueFS::BDEV_DB, bdev_db.path, false, 0));
ASSERT_EQ(0, fs.add_block_device(BlueFS::BDEV_SLOW, bdev_slow.path, false, 0));
uuid_d fsid;
ASSERT_EQ(0, fs.mkfs(fsid, { BlueFS::BDEV_DB, true, true }));
ASSERT_EQ(0, fs.mount());
ASSERT_EQ(0, fs.maybe_verify_layout({ BlueFS::BDEV_DB, true, true }));
string dir_slow = "dir.slow";
ASSERT_EQ(0, fs.mkdir(dir_slow));
string dir_db = "dir_db";
ASSERT_EQ(0, fs.mkdir(dir_db));
string file_slow = "file";
BlueFS::FileWriter *h_slow;
ASSERT_EQ(0, fs.open_for_write(dir_slow, file_slow, &h_slow, false));
ASSERT_NE(nullptr, h_slow);
string file_db = "file";
BlueFS::FileWriter *h_db;
ASSERT_EQ(0, fs.open_for_write(dir_db, file_db, &h_db, false));
ASSERT_NE(nullptr, h_db);
bufferlist bl1;
std::unique_ptr<char[]> buf1 = gen_buffer(70000);
bufferptr bp1 = buffer::claim_char(70000, buf1.get());
bl1.push_back(bp1);
h_slow->append(bl1.c_str(), bl1.length());
fs.flush(h_slow);
uint64_t h_slow_dirty_seq_1 = fs.debug_get_dirty_seq(h_slow);
bufferlist bl2;
std::unique_ptr<char[]> buf2 = gen_buffer(1000);
bufferptr bp2 = buffer::claim_char(1000, buf2.get());
bl2.push_back(bp2);
h_db->append(bl2.c_str(), bl2.length());
fs.fsync(h_db);
uint64_t h_slow_dirty_seq_2 = fs.debug_get_dirty_seq(h_slow);
bool h_slow_dev_dirty = fs.debug_get_is_dev_dirty(h_slow, BlueFS::BDEV_SLOW);
//problem if allocations are stable in log but slow device is not flushed yet
ASSERT_FALSE(h_slow_dirty_seq_1 != 0 &&
h_slow_dirty_seq_2 == 0 &&
h_slow_dev_dirty == true);
fs.close_writer(h_slow);
fs.close_writer(h_db);
fs.umount();
}
TEST(BlueFS, test_truncate_stable_53129) {
ConfSaver conf(g_ceph_context->_conf);
conf.SetVal("bluefs_min_flush_size", "65536");
conf.ApplyChanges();
uint64_t size_wal = 1048576 * 64;
TempBdev bdev_wal{size_wal};
uint64_t size_db = 1048576 * 128;
TempBdev bdev_db{size_db};
uint64_t size_slow = 1048576 * 256;
TempBdev bdev_slow{size_slow};
BlueFS fs(g_ceph_context);
ASSERT_EQ(0, fs.add_block_device(BlueFS::BDEV_WAL, bdev_wal.path, false, 0));
ASSERT_EQ(0, fs.add_block_device(BlueFS::BDEV_DB, bdev_db.path, false, 0));
ASSERT_EQ(0, fs.add_block_device(BlueFS::BDEV_SLOW, bdev_slow.path, false, 0));
uuid_d fsid;
ASSERT_EQ(0, fs.mkfs(fsid, { BlueFS::BDEV_DB, true, true }));
ASSERT_EQ(0, fs.mount());
ASSERT_EQ(0, fs.maybe_verify_layout({ BlueFS::BDEV_DB, true, true }));
string dir_slow = "dir.slow";
ASSERT_EQ(0, fs.mkdir(dir_slow));
string dir_db = "dir_db";
ASSERT_EQ(0, fs.mkdir(dir_db));
string file_slow = "file";
BlueFS::FileWriter *h_slow;
ASSERT_EQ(0, fs.open_for_write(dir_slow, file_slow, &h_slow, false));
ASSERT_NE(nullptr, h_slow);
string file_db = "file";
BlueFS::FileWriter *h_db;
ASSERT_EQ(0, fs.open_for_write(dir_db, file_db, &h_db, false));
ASSERT_NE(nullptr, h_db);
bufferlist bl1;
std::unique_ptr<char[]> buf1 = gen_buffer(70000);
bufferptr bp1 = buffer::claim_char(70000, buf1.get());
bl1.push_back(bp1);
// add 70000 bytes
h_slow->append(bl1.c_str(), bl1.length());
fs.flush(h_slow);
// and truncate to 60000 bytes
fs.truncate(h_slow, 60000);
// write something to file on DB device
bufferlist bl2;
std::unique_ptr<char[]> buf2 = gen_buffer(1000);
bufferptr bp2 = buffer::claim_char(1000, buf2.get());
bl2.push_back(bp2);
h_db->append(bl2.c_str(), bl2.length());
// and force bluefs log to flush
fs.fsync(h_db);
// This is the actual test point.
// We completed truncate, and we expect
// - size to be 60000
// - data to be stable on slow device
// OR
// - size = 0 or file does not exist
// - dev_dirty is irrelevant
bool h_slow_dev_dirty = fs.debug_get_is_dev_dirty(h_slow, BlueFS::BDEV_SLOW);
// Imagine power goes down here.
fs.close_writer(h_slow);
fs.close_writer(h_db);
fs.umount();
ASSERT_EQ(0, fs.mount());
ASSERT_EQ(0, fs.maybe_verify_layout({ BlueFS::BDEV_DB, true, true }));
uint64_t size;
utime_t mtime;
ASSERT_EQ(0, fs.stat("dir.slow", "file", &size, &mtime));
// check file size 60000
ASSERT_EQ(size, 60000);
// check that dev_dirty was false (data stable on media)
ASSERT_EQ(h_slow_dev_dirty, false);
fs.umount();
}
TEST(BlueFS, test_update_ino1_delta_after_replay) {
uint64_t size = 1048576LL * (2 * 1024 + 128);
TempBdev bdev{size};
ConfSaver conf(g_ceph_context->_conf);
conf.SetVal("bluefs_alloc_size", "4096");
conf.SetVal("bluefs_shared_alloc_size", "4096");
conf.SetVal("bluefs_compact_log_sync", "false");
conf.SetVal("bluefs_min_log_runway", "32768");
conf.SetVal("bluefs_max_log_runway", "65536");
conf.SetVal("bluefs_allocator", "stupid");
conf.ApplyChanges();
BlueFS fs(g_ceph_context);
ASSERT_EQ(0, fs.add_block_device(BlueFS::BDEV_DB, bdev.path, false, 1048576));
uuid_d fsid;
ASSERT_EQ(0, fs.mkfs(fsid, { BlueFS::BDEV_DB, false, false }));
ASSERT_EQ(0, fs.mount());
ASSERT_EQ(0, fs.maybe_verify_layout({ BlueFS::BDEV_DB, false, false }));
ASSERT_EQ(0, fs.mkdir("dir"));
char data[2000];
BlueFS::FileWriter *h;
ASSERT_EQ(0, fs.open_for_write("dir", "file", &h, false));
for (size_t i = 0; i < 100; i++) {
h->append(data, 2000);
fs.fsync(h);
}
fs.close_writer(h);
fs.umount(true); //do not compact on exit!
ASSERT_EQ(0, fs.mount());
ASSERT_EQ(0, fs.open_for_write("dir", "file2", &h, false));
for (size_t i = 0; i < 100; i++) {
h->append(data, 2000);
fs.fsync(h);
}
fs.close_writer(h);
fs.umount();
// remount and check log can replay safe?
ASSERT_EQ(0, fs.mount());
ASSERT_EQ(0, fs.maybe_verify_layout({ BlueFS::BDEV_DB, false, false }));
fs.umount();
}
TEST(BlueFS, broken_unlink_fsync_seq) {
uint64_t size = 1048576 * 128;
TempBdev bdev{size};
BlueFS fs(g_ceph_context);
ASSERT_EQ(0, fs.add_block_device(BlueFS::BDEV_DB, bdev.path, false, 1048576));
uuid_d fsid;
ASSERT_EQ(0, fs.mkfs(fsid, { BlueFS::BDEV_DB, false, false }));
ASSERT_EQ(0, fs.mount());
ASSERT_EQ(0, fs.maybe_verify_layout({ BlueFS::BDEV_DB, false, false }));
{
/*
* This reproduces a weird file op sequence (unlink+fsync) that Octopus
* RocksDB might issue to BlueFS when recycle_log_file_num setting is 0
* See https://tracker.ceph.com/issues/55636 for more details
*
*/
char buf[1048571]; // this is biggish, but intentionally not evenly aligned
for (unsigned i = 0; i < sizeof(buf); ++i) {
buf[i] = i;
}
BlueFS::FileWriter *h;
ASSERT_EQ(0, fs.mkdir("dir"));
ASSERT_EQ(0, fs.open_for_write("dir", "file", &h, false));
h->append(buf, sizeof(buf));
fs.flush(h);
h->append(buf, sizeof(buf));
fs.unlink("dir", "file");
fs.fsync(h);
fs.close_writer(h);
}
fs.umount();
// remount and check log can replay safe?
ASSERT_EQ(0, fs.mount());
ASSERT_EQ(0, fs.maybe_verify_layout({ BlueFS::BDEV_DB, false, false }));
fs.umount();
}
TEST(BlueFS, truncate_fsync) {
uint64_t bdev_size = 128 * 1048576;
uint64_t block_size = 4096;
uint64_t reserved = 1048576;
TempBdev bdev{bdev_size};
uuid_d fsid;
const char* DIR_NAME="dir";
const char* FILE_NAME="file1";
size_t sizes[] = {3, 1024, 4096, 1024 * 4096};
for (size_t i = 0; i < sizeof(sizes) / sizeof(sizes[0]); i++) {
const size_t content_size= sizes[i];
const size_t read_size = p2roundup(content_size, size_t(block_size));
const std::string content(content_size, 'x');
{
BlueFS fs(g_ceph_context);
ASSERT_EQ(0, fs.add_block_device(BlueFS::BDEV_DB, bdev.path, false, reserved));
ASSERT_EQ(0, fs.mkfs(fsid, { BlueFS::BDEV_DB, false, false }));
ASSERT_EQ(0, fs.mount());
ASSERT_EQ(0, fs.maybe_verify_layout({ BlueFS::BDEV_DB, false, false }));
{
BlueFS::FileWriter *h;
ASSERT_EQ(0, fs.mkdir("dir"));
ASSERT_EQ(0, fs.open_for_write(DIR_NAME, FILE_NAME, &h, false));
h->append(content.c_str(), content.length());
fs.fsync(h);
fs.close_writer(h);
}
{
BlueFS::FileReader *h;
ASSERT_EQ(0, fs.open_for_read(DIR_NAME, FILE_NAME, &h));
bufferlist bl;
ASSERT_EQ(content.length(), fs.read(h, 0, read_size, &bl, NULL));
ASSERT_EQ(0, strncmp(content.c_str(), bl.c_str(), content.length()));
delete h;
}
{
BlueFS::FileWriter *h;
ASSERT_EQ(0, fs.open_for_write(DIR_NAME, FILE_NAME, &h, true));
fs.truncate(h, 0);
fs.fsync(h);
fs.close_writer(h);
}
}
{
//this was broken due to https://tracker.ceph.com/issues/55307
BlueFS fs(g_ceph_context);
ASSERT_EQ(0, fs.add_block_device(BlueFS::BDEV_DB, bdev.path, false, reserved));
ASSERT_EQ(0, fs.mount());
BlueFS::FileReader *h;
ASSERT_EQ(0, fs.open_for_read(DIR_NAME, FILE_NAME, &h));
bufferlist bl;
ASSERT_EQ(0, fs.read(h, 0, read_size, &bl, NULL));
delete h;
fs.umount();
}
}
}
TEST(BlueFS, test_shared_alloc) {
uint64_t size = 1048576 * 128;
TempBdev bdev_slow{size};
uint64_t size_db = 1048576 * 8;
TempBdev bdev_db{size_db};
ConfSaver conf(g_ceph_context->_conf);
conf.SetVal("bluefs_shared_alloc_size", "1048576");
bluefs_shared_alloc_context_t shared_alloc;
uint64_t shared_alloc_unit = 4096;
shared_alloc.set(
Allocator::create(g_ceph_context, g_ceph_context->_conf->bluefs_allocator,
size, shared_alloc_unit, 0, 0, "test shared allocator"),
shared_alloc_unit);
shared_alloc.a->init_add_free(0, size);
BlueFS fs(g_ceph_context);
// DB device is fully utilized
ASSERT_EQ(0, fs.add_block_device(BlueFS::BDEV_DB, bdev_db.path, false, size_db - 0x1000));
ASSERT_EQ(0, fs.add_block_device(BlueFS::BDEV_SLOW, bdev_slow.path, false, 0,
&shared_alloc));
uuid_d fsid;
ASSERT_EQ(0, fs.mkfs(fsid, { BlueFS::BDEV_DB, false, false }));
ASSERT_EQ(0, fs.mount());
ASSERT_EQ(0, fs.maybe_verify_layout({ BlueFS::BDEV_DB, false, false }));
{
for (int i=0; i<10; i++) {
string dir = "dir.";
dir.append(to_string(i));
ASSERT_EQ(0, fs.mkdir(dir));
for (int j=0; j<10; j++) {
string file = "file.";
file.append(to_string(j));
BlueFS::FileWriter *h;
ASSERT_EQ(0, fs.open_for_write(dir, file, &h, false));
ASSERT_NE(nullptr, h);
auto sg = make_scope_guard([&fs, h] { fs.close_writer(h); });
bufferlist bl;
std::unique_ptr<char[]> buf = gen_buffer(4096);
bufferptr bp = buffer::claim_char(4096, buf.get());
bl.push_back(bp);
h->append(bl.c_str(), bl.length());
fs.fsync(h);
}
}
}
{
for (int i=0; i<10; i+=2) {
string dir = "dir.";
dir.append(to_string(i));
for (int j=0; j<10; j++) {
string file = "file.";
file.append(to_string(j));
fs.unlink(dir, file);
fs.sync_metadata(false);
}
ASSERT_EQ(0, fs.rmdir(dir));
fs.sync_metadata(false);
}
}
fs.compact_log();
auto *logger = fs.get_perf_counters();
ASSERT_NE(logger->get(l_bluefs_alloc_shared_dev_fallbacks), 0);
auto num_files = logger->get(l_bluefs_num_files);
fs.umount();
fs.mount();
ASSERT_EQ(num_files, logger->get(l_bluefs_num_files));
fs.umount();
}
TEST(BlueFS, test_shared_alloc_sparse) {
uint64_t size = 1048576 * 128 * 2;
uint64_t main_unit = 4096;
uint64_t bluefs_alloc_unit = 1048576;
TempBdev bdev_slow{size};
ConfSaver conf(g_ceph_context->_conf);
conf.SetVal("bluefs_shared_alloc_size",
stringify(bluefs_alloc_unit).c_str());
bluefs_shared_alloc_context_t shared_alloc;
shared_alloc.set(
Allocator::create(g_ceph_context, g_ceph_context->_conf->bluefs_allocator,
size, main_unit, 0, 0, "test shared allocator"),
main_unit);
// prepare sparse free space but let's have a continuous chunk at
// the beginning to fit initial log's fnode into superblock,
// we don't have any tricks to deal with sparse allocations
// (and hence long fnode) at mkfs
shared_alloc.a->init_add_free(bluefs_alloc_unit, 4 * bluefs_alloc_unit);
for(uint64_t i = 5 * bluefs_alloc_unit; i < size; i += 2 * main_unit) {
shared_alloc.a->init_add_free(i, main_unit);
}
BlueFS fs(g_ceph_context);
ASSERT_EQ(0, fs.add_block_device(BlueFS::BDEV_DB, bdev_slow.path, false, 0,
&shared_alloc));
uuid_d fsid;
ASSERT_EQ(0, fs.mkfs(fsid, { BlueFS::BDEV_DB, false, false }));
ASSERT_EQ(0, fs.mount());
ASSERT_EQ(0, fs.maybe_verify_layout({ BlueFS::BDEV_DB, false, false }));
{
for (int i=0; i<10; i++) {
string dir = "dir.";
dir.append(to_string(i));
ASSERT_EQ(0, fs.mkdir(dir));
for (int j=0; j<10; j++) {
string file = "file.";
file.append(to_string(j));
BlueFS::FileWriter *h;
ASSERT_EQ(0, fs.open_for_write(dir, file, &h, false));
ASSERT_NE(nullptr, h);
auto sg = make_scope_guard([&fs, h] { fs.close_writer(h); });
bufferlist bl;
std::unique_ptr<char[]> buf = gen_buffer(4096);
bufferptr bp = buffer::claim_char(4096, buf.get());
bl.push_back(bp);
h->append(bl.c_str(), bl.length());
fs.fsync(h);
}
}
}
{
for (int i=0; i<10; i+=2) {
string dir = "dir.";
dir.append(to_string(i));
for (int j=0; j<10; j++) {
string file = "file.";
file.append(to_string(j));
fs.unlink(dir, file);
fs.sync_metadata(false);
}
ASSERT_EQ(0, fs.rmdir(dir));
fs.sync_metadata(false);
}
}
fs.compact_log();
auto *logger = fs.get_perf_counters();
ASSERT_NE(logger->get(l_bluefs_alloc_shared_size_fallbacks), 0);
auto num_files = logger->get(l_bluefs_num_files);
fs.umount();
fs.mount();
ASSERT_EQ(num_files, logger->get(l_bluefs_num_files));
fs.umount();
}
TEST(BlueFS, test_4k_shared_alloc) {
uint64_t size = 1048576 * 128 * 2;
uint64_t main_unit = 4096;
uint64_t bluefs_alloc_unit = main_unit;
TempBdev bdev_slow{size};
ConfSaver conf(g_ceph_context->_conf);
conf.SetVal("bluefs_shared_alloc_size",
stringify(bluefs_alloc_unit).c_str());
bluefs_shared_alloc_context_t shared_alloc;
shared_alloc.set(
Allocator::create(g_ceph_context, g_ceph_context->_conf->bluefs_allocator,
size, main_unit, 0, 0, "test shared allocator"),
main_unit);
shared_alloc.a->init_add_free(bluefs_alloc_unit, size - bluefs_alloc_unit);
BlueFS fs(g_ceph_context);
ASSERT_EQ(0, fs.add_block_device(BlueFS::BDEV_DB, bdev_slow.path, false, 0,
&shared_alloc));
uuid_d fsid;
ASSERT_EQ(0, fs.mkfs(fsid, { BlueFS::BDEV_DB, false, false }));
ASSERT_EQ(0, fs.mount());
ASSERT_EQ(0, fs.maybe_verify_layout({ BlueFS::BDEV_DB, false, false }));
{
for (int i=0; i<10; i++) {
string dir = "dir.";
dir.append(to_string(i));
ASSERT_EQ(0, fs.mkdir(dir));
for (int j=0; j<10; j++) {
string file = "file.";
file.append(to_string(j));
BlueFS::FileWriter *h;
ASSERT_EQ(0, fs.open_for_write(dir, file, &h, false));
ASSERT_NE(nullptr, h);
auto sg = make_scope_guard([&fs, h] { fs.close_writer(h); });
bufferlist bl;
std::unique_ptr<char[]> buf = gen_buffer(4096);
bufferptr bp = buffer::claim_char(4096, buf.get());
bl.push_back(bp);
h->append(bl.c_str(), bl.length());
fs.fsync(h);
}
}
}
{
for (int i=0; i<10; i+=2) {
string dir = "dir.";
dir.append(to_string(i));
for (int j=0; j<10; j++) {
string file = "file.";
file.append(to_string(j));
fs.unlink(dir, file);
fs.sync_metadata(false);
}
ASSERT_EQ(0, fs.rmdir(dir));
fs.sync_metadata(false);
}
}
fs.compact_log();
auto *logger = fs.get_perf_counters();
ASSERT_EQ(logger->get(l_bluefs_alloc_shared_dev_fallbacks), 0);
ASSERT_EQ(logger->get(l_bluefs_alloc_shared_size_fallbacks), 0);
auto num_files = logger->get(l_bluefs_num_files);
fs.umount();
fs.mount();
ASSERT_EQ(num_files, logger->get(l_bluefs_num_files));
fs.umount();
}
void create_files(BlueFS &fs,
atomic_bool& stop_creating,
atomic_bool& started_creating)
{
uint32_t i = 0;
stringstream ss;
string dir = "dir.";
ss << std::this_thread::get_id();
dir.append(ss.str());
dir.append(".");
dir.append(to_string(i));
ASSERT_EQ(0, fs.mkdir(dir));
while (!stop_creating.load()) {
string file = "file.";
file.append(to_string(i));
BlueFS::FileWriter *h;
ASSERT_EQ(0, fs.open_for_write(dir, file, &h, false));
ASSERT_NE(nullptr, h);
fs.close_writer(h);
i++;
started_creating = true;
}
}
TEST(BlueFS, test_concurrent_dir_link_and_compact_log_56210) {
uint64_t size = 1048576 * 128;
TempBdev bdev{size};
ConfSaver conf(g_ceph_context->_conf);
conf.SetVal("bluefs_alloc_size", "65536");
conf.SetVal("bluefs_compact_log_sync", "false");
// make sure fsync always trigger log compact
conf.SetVal("bluefs_log_compact_min_ratio", "0");
conf.SetVal("bluefs_log_compact_min_size", "0");
conf.ApplyChanges();
for (int i=0; i<10; ++i) {
BlueFS fs(g_ceph_context);
ASSERT_EQ(0, fs.add_block_device(BlueFS::BDEV_DB, bdev.path, false, 1048576));
uuid_d fsid;
ASSERT_EQ(0, fs.mkfs(fsid, { BlueFS::BDEV_DB, false, false }));
ASSERT_EQ(0, fs.mount());
ASSERT_EQ(0, fs.maybe_verify_layout({ BlueFS::BDEV_DB, false, false }));
{
atomic_bool stop_creating{false};
atomic_bool started_creating{false};
std::thread create_thread;
create_thread = std::thread(create_files,
std::ref(fs),
std::ref(stop_creating),
std::ref(started_creating));
while (!started_creating.load()) {
}
BlueFS::FileWriter *h;
ASSERT_EQ(0, fs.mkdir("foo"));
ASSERT_EQ(0, fs.open_for_write("foo", "bar", &h, false));
fs.fsync(h);
fs.close_writer(h);
stop_creating = true;
do_join(create_thread);
fs.umount(true); //do not compact on exit!
ASSERT_EQ(0, fs.mount());
fs.umount();
}
}
}
int main(int argc, char **argv) {
auto args = argv_to_vec(argc, argv);
map<string,string> defaults = {
{ "debug_bluefs", "1/20" },
{ "debug_bdev", "1/20" }
};
auto cct = global_init(&defaults, args, CEPH_ENTITY_TYPE_CLIENT,
CODE_ENVIRONMENT_UTILITY,
CINIT_FLAG_NO_DEFAULT_CONFIG_FILE);
common_init_finish(g_ceph_context);
g_ceph_context->_conf.set_val(
"enable_experimental_unrecoverable_data_corrupting_features",
"*");
g_ceph_context->_conf.apply_changes(nullptr);
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}
| 44,357 | 30.172171 | 103 |
cc
|
null |
ceph-main/src/test/objectstore/test_bluestore_types.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "include/types.h"
#include "os/bluestore/bluestore_types.h"
#include "gtest/gtest.h"
#include "include/stringify.h"
#include "common/ceph_time.h"
#include "os/bluestore/BlueStore.h"
#include "os/bluestore/simple_bitmap.h"
#include "os/bluestore/AvlAllocator.h"
#include "common/ceph_argparse.h"
#include "global/global_init.h"
#include "global/global_context.h"
#include "perfglue/heap_profiler.h"
#include <sstream>
#define _STR(x) #x
#define STRINGIFY(x) _STR(x)
using namespace std;
TEST(bluestore, sizeof) {
#define P(t) cout << STRINGIFY(t) << "\t" << sizeof(t) << std::endl
P(BlueStore::Onode);
P(BlueStore::Extent);
P(BlueStore::Blob);
P(BlueStore::SharedBlob);
P(BlueStore::ExtentMap);
P(BlueStore::extent_map_t);
P(BlueStore::blob_map_t);
P(BlueStore::BufferSpace);
P(BlueStore::Buffer);
P(bluestore_onode_t);
P(bluestore_blob_t);
P(PExtentVector);
P(ghobject_t);
P(bluestore_shared_blob_t);
P(bluestore_extent_ref_map_t);
P(bluestore_extent_ref_map_t::record_t);
P(bluestore_blob_use_tracker_t);
P(std::atomic_int);
P(BlueStore::SharedBlobRef);
P(boost::intrusive::set_base_hook<>);
P(boost::intrusive::unordered_set_base_hook<>);
P(bufferlist);
P(bufferptr);
P(range_seg_t);
P(sb_info_t);
P(SimpleBitmap);
cout << "map<uint64_t,uint64_t>\t" << sizeof(map<uint64_t,uint64_t>) << std::endl;
cout << "map<char,char>\t" << sizeof(map<char,char>) << std::endl;
}
void dump_mempools()
{
ostringstream ostr;
auto f = Formatter::create_unique("json-pretty", "json-pretty", "json-pretty");
ostr << "Mempools: ";
f->open_object_section("mempools");
mempool::dump(f.get());
f->close_section();
f->flush(ostr);
cout << ostr.str() << std::endl;
}
/*void get_mempool_stats(uint64_t* total_bytes, uint64_t* total_items)
{
uint64_t meta_allocated = mempool::bluestore_cache_meta::allocated_bytes();
uint64_t onode_allocated = mempool::bluestore_cache_onode::allocated_bytes();
uint64_t other_allocated = mempool::bluestore_cache_other::allocated_bytes();
uint64_t meta_items = mempool::bluestore_cache_meta::allocated_items();
uint64_t onode_items = mempool::bluestore_cache_onode::allocated_items();
uint64_t other_items = mempool::bluestore_cache_other::allocated_items();
cout << "meta(" << meta_allocated << "/" << meta_items
<< ") onode(" << onode_allocated << "/" << onode_items
<< ") other(" << other_allocated << "/" << other_items
<< ")" << std::endl;
*total_bytes = meta_allocated + onode_allocated + other_allocated;
*total_items = onode_items;
}*/
TEST(sb_info_space_efficient_map_t, basic) {
sb_info_space_efficient_map_t sb_info;
const size_t num_shared = 1000;
for (size_t i = 0; i < num_shared; i += 2) {
auto& sbi = sb_info.add_maybe_stray(i);
sbi.pool_id = i;
}
ASSERT_TRUE(sb_info.find(0) != sb_info.end());
ASSERT_TRUE(sb_info.find(1) == sb_info.end());
ASSERT_TRUE(sb_info.find(2) != sb_info.end());
ASSERT_TRUE(sb_info.find(4)->pool_id == 4);
ASSERT_TRUE(sb_info.find(num_shared) == sb_info.end());
// ordered insertion
sb_info.add_or_adopt(num_shared).pool_id = num_shared;
ASSERT_TRUE(sb_info.find(num_shared) != sb_info.end());
ASSERT_TRUE(sb_info.find(num_shared)->pool_id == num_shared);
// out of order insertion
sb_info.add_or_adopt(1).pool_id = 1;
ASSERT_TRUE(sb_info.find(1) != sb_info.end());
ASSERT_TRUE(sb_info.find(1)->pool_id == 1);
// ordered insertion
sb_info.add_maybe_stray(num_shared + 1).pool_id = num_shared + 1;
ASSERT_TRUE(sb_info.find(num_shared + 1) != sb_info.end());
ASSERT_TRUE(sb_info.find(num_shared + 1)->pool_id == num_shared + 1);
// out of order insertion
sb_info.add_maybe_stray(105).pool_id = 105;
ASSERT_TRUE(sb_info.find(105) != sb_info.end());
ASSERT_TRUE(sb_info.find(105)->pool_id == 105);
}
TEST(sb_info_space_efficient_map_t, size) {
const size_t num_shared = 10000000;
sb_info_space_efficient_map_t sb_info;
BlueStore store(g_ceph_context, "", 4096);
BlueStore::OnodeCacheShard* oc = BlueStore::OnodeCacheShard::create(
g_ceph_context, "lru", NULL);
BlueStore::BufferCacheShard* bc = BlueStore::BufferCacheShard::create(
g_ceph_context, "lru", NULL);
auto coll = ceph::make_ref<BlueStore::Collection>(&store, oc, bc, coll_t());
for (size_t i = 0; i < num_shared; i++) {
auto& sbi = sb_info.add_or_adopt(i);
// primarily to silent the 'unused' warning
ceph_assert(sbi.pool_id == sb_info_t::INVALID_POOL_ID);
}
dump_mempools();
}
TEST(bluestore_extent_ref_map_t, add)
{
bluestore_extent_ref_map_t m;
m.get(10, 10);
ASSERT_EQ(1u, m.ref_map.size());
cout << m << std::endl;
m.get(20, 10);
cout << m << std::endl;
ASSERT_EQ(1u, m.ref_map.size());
ASSERT_EQ(20u, m.ref_map[10].length);
ASSERT_EQ(1u, m.ref_map[10].refs);
m.get(40, 10);
cout << m << std::endl;
ASSERT_EQ(2u, m.ref_map.size());
m.get(30, 10);
cout << m << std::endl;
ASSERT_EQ(1u, m.ref_map.size());
m.get(50, 10);
cout << m << std::endl;
ASSERT_EQ(1u, m.ref_map.size());
m.get(5, 5);
cout << m << std::endl;
ASSERT_EQ(1u, m.ref_map.size());
}
TEST(bluestore_extent_ref_map_t, get)
{
bluestore_extent_ref_map_t m;
m.get(00, 30);
cout << m << std::endl;
m.get(10, 10);
cout << m << std::endl;
ASSERT_EQ(3u, m.ref_map.size());
ASSERT_EQ(10u, m.ref_map[0].length);
ASSERT_EQ(1u, m.ref_map[0].refs);
ASSERT_EQ(10u, m.ref_map[10].length);
ASSERT_EQ(2u, m.ref_map[10].refs);
ASSERT_EQ(10u, m.ref_map[20].length);
ASSERT_EQ(1u, m.ref_map[20].refs);
m.get(20, 5);
cout << m << std::endl;
ASSERT_EQ(3u, m.ref_map.size());
ASSERT_EQ(15u, m.ref_map[10].length);
ASSERT_EQ(2u, m.ref_map[10].refs);
ASSERT_EQ(5u, m.ref_map[25].length);
ASSERT_EQ(1u, m.ref_map[25].refs);
m.get(5, 20);
cout << m << std::endl;
ASSERT_EQ(4u, m.ref_map.size());
ASSERT_EQ(5u, m.ref_map[0].length);
ASSERT_EQ(1u, m.ref_map[0].refs);
ASSERT_EQ(5u, m.ref_map[5].length);
ASSERT_EQ(2u, m.ref_map[5].refs);
ASSERT_EQ(15u, m.ref_map[10].length);
ASSERT_EQ(3u, m.ref_map[10].refs);
ASSERT_EQ(5u, m.ref_map[25].length);
ASSERT_EQ(1u, m.ref_map[25].refs);
m.get(25, 3);
cout << m << std::endl;
ASSERT_EQ(5u, m.ref_map.size());
ASSERT_EQ(5u, m.ref_map[0].length);
ASSERT_EQ(1u, m.ref_map[0].refs);
ASSERT_EQ(5u, m.ref_map[5].length);
ASSERT_EQ(2u, m.ref_map[5].refs);
ASSERT_EQ(15u, m.ref_map[10].length);
ASSERT_EQ(3u, m.ref_map[10].refs);
ASSERT_EQ(3u, m.ref_map[25].length);
ASSERT_EQ(2u, m.ref_map[25].refs);
ASSERT_EQ(2u, m.ref_map[28].length);
ASSERT_EQ(1u, m.ref_map[28].refs);
}
TEST(bluestore_extent_ref_map_t, put)
{
bluestore_extent_ref_map_t m;
PExtentVector r;
bool maybe_unshared = false;
m.get(10, 30);
maybe_unshared = true;
m.put(10, 30, &r, &maybe_unshared);
cout << m << " " << r << " " << (int)maybe_unshared << std::endl;
ASSERT_EQ(0u, m.ref_map.size());
ASSERT_EQ(1u, r.size());
ASSERT_EQ(10u, r[0].offset);
ASSERT_EQ(30u, r[0].length);
ASSERT_TRUE(maybe_unshared);
r.clear();
m.get(10, 30);
m.get(20, 10);
maybe_unshared = true;
m.put(10, 30, &r, &maybe_unshared);
cout << m << " " << r << " " << (int)maybe_unshared << std::endl;
ASSERT_EQ(1u, m.ref_map.size());
ASSERT_EQ(10u, m.ref_map[20].length);
ASSERT_EQ(1u, m.ref_map[20].refs);
ASSERT_EQ(2u, r.size());
ASSERT_EQ(10u, r[0].offset);
ASSERT_EQ(10u, r[0].length);
ASSERT_EQ(30u, r[1].offset);
ASSERT_EQ(10u, r[1].length);
ASSERT_TRUE(maybe_unshared);
r.clear();
m.get(30, 10);
m.get(30, 10);
maybe_unshared = true;
m.put(20, 15, &r, &maybe_unshared);
cout << m << " " << r << " " << (int)maybe_unshared << std::endl;
ASSERT_EQ(2u, m.ref_map.size());
ASSERT_EQ(5u, m.ref_map[30].length);
ASSERT_EQ(1u, m.ref_map[30].refs);
ASSERT_EQ(5u, m.ref_map[35].length);
ASSERT_EQ(2u, m.ref_map[35].refs);
ASSERT_EQ(1u, r.size());
ASSERT_EQ(20u, r[0].offset);
ASSERT_EQ(10u, r[0].length);
ASSERT_FALSE(maybe_unshared);
r.clear();
maybe_unshared = true;
m.put(33, 5, &r, &maybe_unshared);
cout << m << " " << r << " " << (int)maybe_unshared << std::endl;
ASSERT_EQ(3u, m.ref_map.size());
ASSERT_EQ(3u, m.ref_map[30].length);
ASSERT_EQ(1u, m.ref_map[30].refs);
ASSERT_EQ(3u, m.ref_map[35].length);
ASSERT_EQ(1u, m.ref_map[35].refs);
ASSERT_EQ(2u, m.ref_map[38].length);
ASSERT_EQ(2u, m.ref_map[38].refs);
ASSERT_EQ(1u, r.size());
ASSERT_EQ(33u, r[0].offset);
ASSERT_EQ(2u, r[0].length);
ASSERT_FALSE(maybe_unshared);
r.clear();
maybe_unshared = true;
m.put(38, 2, &r, &maybe_unshared);
cout << m << " " << r << " " << (int)maybe_unshared << std::endl;
ASSERT_TRUE(maybe_unshared);
}
TEST(bluestore_extent_ref_map_t, contains)
{
bluestore_extent_ref_map_t m;
m.get(10, 30);
ASSERT_TRUE(m.contains(10, 30));
ASSERT_TRUE(m.contains(10, 10));
ASSERT_TRUE(m.contains(30, 10));
ASSERT_FALSE(m.contains(0, 10));
ASSERT_FALSE(m.contains(0, 20));
ASSERT_FALSE(m.contains(0, 100));
ASSERT_FALSE(m.contains(40, 10));
ASSERT_FALSE(m.contains(30, 11));
m.get(40, 10);
m.get(40, 10);
ASSERT_TRUE(m.contains(30, 11));
ASSERT_TRUE(m.contains(30, 20));
ASSERT_TRUE(m.contains(10, 40));
ASSERT_FALSE(m.contains(0, 50));
ASSERT_FALSE(m.contains(40, 20));
m.get(60, 100);
ASSERT_TRUE(m.contains(60, 10));
ASSERT_TRUE(m.contains(40, 10));
ASSERT_FALSE(m.contains(40, 11));
ASSERT_FALSE(m.contains(40, 20));
ASSERT_FALSE(m.contains(40, 30));
ASSERT_FALSE(m.contains(40, 3000));
ASSERT_FALSE(m.contains(4000, 30));
}
TEST(bluestore_extent_ref_map_t, intersects)
{
bluestore_extent_ref_map_t m;
m.get(10, 30);
ASSERT_TRUE(m.intersects(10, 30));
ASSERT_TRUE(m.intersects(0, 11));
ASSERT_TRUE(m.intersects(10, 40));
ASSERT_TRUE(m.intersects(15, 40));
ASSERT_FALSE(m.intersects(0, 10));
ASSERT_FALSE(m.intersects(0, 5));
ASSERT_FALSE(m.intersects(40, 20));
ASSERT_FALSE(m.intersects(41, 20));
m.get(40, 10);
m.get(40, 10);
ASSERT_TRUE(m.intersects(0, 100));
ASSERT_TRUE(m.intersects(10, 35));
ASSERT_TRUE(m.intersects(45, 10));
ASSERT_FALSE(m.intersects(50, 5));
m.get(60, 100);
ASSERT_TRUE(m.intersects(45, 10));
ASSERT_TRUE(m.intersects(55, 10));
ASSERT_TRUE(m.intersects(50, 11));
ASSERT_FALSE(m.intersects(50, 10));
ASSERT_FALSE(m.intersects(51, 9));
ASSERT_FALSE(m.intersects(55, 1));
}
TEST(bluestore_blob_t, calc_csum)
{
bufferlist bl;
bl.append("asdfghjkqwertyuizxcvbnm,");
bufferlist bl2;
bl2.append("xxxxXXXXyyyyYYYYzzzzZZZZ");
bufferlist f;
f.substr_of(bl, 0, 8);
bufferlist m;
m.substr_of(bl, 8, 8);
bufferlist e;
e.substr_of(bl, 16, 8);
bufferlist n;
n.append("12345678");
for (unsigned csum_type = Checksummer::CSUM_NONE + 1;
csum_type < Checksummer::CSUM_MAX;
++csum_type) {
cout << "csum_type " << Checksummer::get_csum_type_string(csum_type)
<< std::endl;
bluestore_blob_t b;
int bad_off;
uint64_t bad_csum;
ASSERT_EQ(0, b.verify_csum(0, bl, &bad_off, &bad_csum));
ASSERT_EQ(-1, bad_off);
b.init_csum(csum_type, 3, 24);
cout << " value size " << b.get_csum_value_size() << std::endl;
b.calc_csum(0, bl);
ASSERT_EQ(0, b.verify_csum(0, bl, &bad_off, &bad_csum));
ASSERT_EQ(-1, bad_off);
ASSERT_EQ(-1, b.verify_csum(0, bl2, &bad_off, &bad_csum));
ASSERT_EQ(0, bad_off);
ASSERT_EQ(0, b.verify_csum(0, f, &bad_off, &bad_csum));
ASSERT_EQ(-1, bad_off);
ASSERT_EQ(-1, b.verify_csum(8, f, &bad_off, &bad_csum));
ASSERT_EQ(8, bad_off);
ASSERT_EQ(-1, b.verify_csum(16, f, &bad_off, &bad_csum));
ASSERT_EQ(16, bad_off);
ASSERT_EQ(-1, b.verify_csum(0, m, &bad_off, &bad_csum));
ASSERT_EQ(0, bad_off);
ASSERT_EQ(0, b.verify_csum(8, m, &bad_off, &bad_csum));
ASSERT_EQ(-1, bad_off);
ASSERT_EQ(-1, b.verify_csum(16, m, &bad_off, &bad_csum));
ASSERT_EQ(16, bad_off);
ASSERT_EQ(-1, b.verify_csum(0, e, &bad_off, &bad_csum));
ASSERT_EQ(0, bad_off);
ASSERT_EQ(-1, b.verify_csum(8, e, &bad_off, &bad_csum));
ASSERT_EQ(8, bad_off);
ASSERT_EQ(0, b.verify_csum(16, e, &bad_off, &bad_csum));
ASSERT_EQ(-1, bad_off);
b.calc_csum(8, n);
ASSERT_EQ(0, b.verify_csum(0, f, &bad_off, &bad_csum));
ASSERT_EQ(-1, bad_off);
ASSERT_EQ(0, b.verify_csum(8, n, &bad_off, &bad_csum));
ASSERT_EQ(-1, bad_off);
ASSERT_EQ(0, b.verify_csum(16, e, &bad_off, &bad_csum));
ASSERT_EQ(-1, bad_off);
ASSERT_EQ(-1, b.verify_csum(0, bl, &bad_off, &bad_csum));
ASSERT_EQ(8, bad_off);
}
}
TEST(bluestore_blob_t, csum_bench)
{
bufferlist bl;
bufferptr bp(10485760);
for (char *a = bp.c_str(); a < bp.c_str() + bp.length(); ++a)
*a = (unsigned long)a & 0xff;
bl.append(bp);
int count = 256;
for (unsigned csum_type = 1;
csum_type < Checksummer::CSUM_MAX;
++csum_type) {
bluestore_blob_t b;
b.init_csum(csum_type, 12, bl.length());
ceph::mono_clock::time_point start = ceph::mono_clock::now();
for (int i = 0; i<count; ++i) {
b.calc_csum(0, bl);
}
ceph::mono_clock::time_point end = ceph::mono_clock::now();
auto dur = std::chrono::duration_cast<ceph::timespan>(end - start);
double mbsec = (double)count * (double)bl.length() / 1000000.0 / (double)dur.count() * 1000000000.0;
cout << "csum_type " << Checksummer::get_csum_type_string(csum_type)
<< ", " << dur << " seconds, "
<< mbsec << " MB/sec" << std::endl;
}
}
TEST(Blob, put_ref)
{
{
BlueStore store(g_ceph_context, "", 4096);
BlueStore::OnodeCacheShard *oc = BlueStore::OnodeCacheShard::create(
g_ceph_context, "lru", NULL);
BlueStore::BufferCacheShard *bc = BlueStore::BufferCacheShard::create(
g_ceph_context, "lru", NULL);
auto coll = ceph::make_ref<BlueStore::Collection>(&store, oc, bc, coll_t());
BlueStore::Blob b;
b.shared_blob = new BlueStore::SharedBlob(coll.get());
b.dirty_blob().allocated_test(bluestore_pextent_t(0x40715000, 0x2000));
b.dirty_blob().allocated_test(
bluestore_pextent_t(bluestore_pextent_t::INVALID_OFFSET, 0x8000));
b.dirty_blob().allocated_test(bluestore_pextent_t(0x4071f000, 0x5000));
b.get_ref(coll.get(), 0, 0x1200);
b.get_ref(coll.get(), 0xae00, 0x4200);
ASSERT_EQ(0x5400u, b.get_referenced_bytes());
cout << b << std::endl;
PExtentVector r;
ASSERT_FALSE(b.put_ref(coll.get(), 0, 0x1200, &r));
ASSERT_EQ(0x4200u, b.get_referenced_bytes());
cout << " r " << r << std::endl;
cout << b << std::endl;
r.clear();
ASSERT_TRUE(b.put_ref(coll.get(), 0xae00, 0x4200, &r));
ASSERT_EQ(0u, b.get_referenced_bytes());
cout << " r " << r << std::endl;
cout << b << std::endl;
}
unsigned mas = 4096;
BlueStore store(g_ceph_context, "", 8192);
BlueStore::OnodeCacheShard *oc = BlueStore::OnodeCacheShard::create(
g_ceph_context, "lru", NULL);
BlueStore::BufferCacheShard *bc = BlueStore::BufferCacheShard::create(
g_ceph_context, "lru", NULL);
auto coll = ceph::make_ref<BlueStore::Collection>(&store, oc, bc, coll_t());
{
BlueStore::Blob B;
B.shared_blob = new BlueStore::SharedBlob(coll.get());
bluestore_blob_t& b = B.dirty_blob();
PExtentVector r;
b.allocated_test(bluestore_pextent_t(0, mas * 2));
B.get_ref(coll.get(), 0, mas*2);
ASSERT_EQ(mas * 2, B.get_referenced_bytes());
ASSERT_TRUE(b.is_allocated(0, mas*2));
ASSERT_TRUE(B.put_ref(coll.get(), 0, mas*2, &r));
ASSERT_EQ(0u, B.get_referenced_bytes());
cout << "r " << r << " " << b << std::endl;
ASSERT_EQ(1u, r.size());
ASSERT_EQ(0u, r[0].offset);
ASSERT_EQ(mas*2, r[0].length);
ASSERT_FALSE(b.is_allocated(0, mas*2));
ASSERT_FALSE(b.is_allocated(0, mas));
ASSERT_FALSE(b.is_allocated(mas, 0));
ASSERT_FALSE(b.get_extents()[0].is_valid());
ASSERT_EQ(mas*2, b.get_extents()[0].length);
}
{
BlueStore::Blob B;
B.shared_blob = new BlueStore::SharedBlob(coll.get());
bluestore_blob_t& b = B.dirty_blob();
PExtentVector r;
b.allocated_test(bluestore_pextent_t(123, mas * 2));
B.get_ref(coll.get(), 0, mas*2);
ASSERT_EQ(mas * 2, B.get_referenced_bytes());
ASSERT_FALSE(B.put_ref(coll.get(), 0, mas, &r));
ASSERT_EQ(mas, B.get_referenced_bytes());
cout << "r " << r << " " << b << std::endl;
ASSERT_EQ(0u, r.size());
ASSERT_TRUE(b.is_allocated(0, mas*2));
ASSERT_TRUE(B.put_ref(coll.get(), mas, mas, &r));
ASSERT_EQ(0u, B.get_referenced_bytes());
ASSERT_EQ(0u, B.get_referenced_bytes());
cout << "r " << r << " " << b << std::endl;
ASSERT_EQ(1u, r.size());
ASSERT_EQ(123u, r[0].offset);
ASSERT_EQ(mas*2, r[0].length);
ASSERT_FALSE(b.is_allocated(0, mas*2));
ASSERT_FALSE(b.get_extents()[0].is_valid());
ASSERT_EQ(mas*2, b.get_extents()[0].length);
}
{
BlueStore::Blob B;
B.shared_blob = new BlueStore::SharedBlob(coll.get());
bluestore_blob_t& b = B.dirty_blob();
PExtentVector r;
b.allocated_test(bluestore_pextent_t(1, mas));
b.allocated_test(bluestore_pextent_t(2, mas));
b.allocated_test(bluestore_pextent_t(3, mas));
b.allocated_test(bluestore_pextent_t(4, mas));
B.get_ref(coll.get(), 0, mas*4);
ASSERT_EQ(mas * 4, B.get_referenced_bytes());
ASSERT_FALSE(B.put_ref(coll.get(), mas, mas, &r));
ASSERT_EQ(mas * 3, B.get_referenced_bytes());
cout << "r " << r << " " << b << std::endl;
ASSERT_EQ(0u, r.size());
ASSERT_TRUE(b.is_allocated(0, mas*4));
ASSERT_TRUE(b.is_allocated(mas, mas));
ASSERT_FALSE(B.put_ref(coll.get(), mas*2, mas, &r));
ASSERT_EQ(mas * 2, B.get_referenced_bytes());
cout << "r " << r << " " << b << std::endl;
ASSERT_EQ(0u, r.size());
ASSERT_TRUE(b.is_allocated(mas*2, mas));
ASSERT_TRUE(b.is_allocated(0, mas*4));
ASSERT_FALSE(B.put_ref(coll.get(), mas*3, mas, &r));
ASSERT_EQ(mas, B.get_referenced_bytes());
cout << "r " << r << " " << b << std::endl;
ASSERT_EQ(2u, r.size());
ASSERT_EQ(3u, r[0].offset);
ASSERT_EQ(mas, r[0].length);
ASSERT_EQ(4u, r[1].offset);
ASSERT_EQ(mas, r[1].length);
ASSERT_TRUE(b.is_allocated(0, mas*2));
ASSERT_FALSE(b.is_allocated(mas*2, mas*2));
ASSERT_TRUE(b.get_extents()[0].is_valid());
ASSERT_TRUE(b.get_extents()[1].is_valid());
ASSERT_FALSE(b.get_extents()[2].is_valid());
ASSERT_EQ(3u, b.get_extents().size());
}
{
BlueStore::Blob B;
B.shared_blob = new BlueStore::SharedBlob(coll.get());
bluestore_blob_t& b = B.dirty_blob();
PExtentVector r;
b.allocated_test(bluestore_pextent_t(1, mas));
b.allocated_test(bluestore_pextent_t(2, mas));
b.allocated_test(bluestore_pextent_t(3, mas));
b.allocated_test(bluestore_pextent_t(4, mas));
b.allocated_test(bluestore_pextent_t(5, mas));
b.allocated_test(bluestore_pextent_t(6, mas));
B.get_ref(coll.get(), 0, mas*6);
ASSERT_EQ(mas * 6, B.get_referenced_bytes());
ASSERT_FALSE(B.put_ref(coll.get(), mas, mas, &r));
ASSERT_EQ(mas * 5, B.get_referenced_bytes());
cout << "r " << r << " " << b << std::endl;
ASSERT_EQ(0u, r.size());
ASSERT_TRUE(b.is_allocated(0, mas*6));
ASSERT_FALSE(B.put_ref(coll.get(), mas*2, mas, &r));
ASSERT_EQ(mas * 4, B.get_referenced_bytes());
cout << "r " << r << " " << b << std::endl;
ASSERT_EQ(0u, r.size());
ASSERT_TRUE(b.is_allocated(0, mas*6));
ASSERT_FALSE(B.put_ref(coll.get(), mas*3, mas, &r));
ASSERT_EQ(mas * 3, B.get_referenced_bytes());
cout << "r " << r << " " << b << std::endl;
ASSERT_EQ(2u, r.size());
ASSERT_EQ(3u, r[0].offset);
ASSERT_EQ(mas, r[0].length);
ASSERT_EQ(4u, r[1].offset);
ASSERT_EQ(mas, r[1].length);
ASSERT_TRUE(b.is_allocated(0, mas*2));
ASSERT_FALSE(b.is_allocated(mas*2, mas*2));
ASSERT_TRUE(b.is_allocated(mas*4, mas*2));
ASSERT_EQ(5u, b.get_extents().size());
ASSERT_TRUE(b.get_extents()[0].is_valid());
ASSERT_TRUE(b.get_extents()[1].is_valid());
ASSERT_FALSE(b.get_extents()[2].is_valid());
ASSERT_TRUE(b.get_extents()[3].is_valid());
ASSERT_TRUE(b.get_extents()[4].is_valid());
}
{
BlueStore::Blob B;
B.shared_blob = new BlueStore::SharedBlob(coll.get());
bluestore_blob_t& b = B.dirty_blob();
PExtentVector r;
b.allocated_test(bluestore_pextent_t(1, mas * 6));
B.get_ref(coll.get(), 0, mas*6);
ASSERT_EQ(mas * 6, B.get_referenced_bytes());
ASSERT_FALSE(B.put_ref(coll.get(), mas, mas, &r));
ASSERT_EQ(mas * 5, B.get_referenced_bytes());
cout << "r " << r << " " << b << std::endl;
ASSERT_EQ(0u, r.size());
ASSERT_TRUE(b.is_allocated(0, mas*6));
ASSERT_FALSE(B.put_ref(coll.get(), mas*2, mas, &r));
ASSERT_EQ(mas * 4, B.get_referenced_bytes());
cout << "r " << r << " " << b << std::endl;
ASSERT_EQ(0u, r.size());
ASSERT_TRUE(b.is_allocated(0, mas*6));
ASSERT_FALSE(B.put_ref(coll.get(), mas*3, mas, &r));
ASSERT_EQ(mas * 3, B.get_referenced_bytes());
cout << "r " << r << " " << b << std::endl;
ASSERT_EQ(1u, r.size());
ASSERT_EQ(0x2001u, r[0].offset);
ASSERT_EQ(mas*2, r[0].length);
ASSERT_TRUE(b.is_allocated(0, mas*2));
ASSERT_FALSE(b.is_allocated(mas*2, mas*2));
ASSERT_TRUE(b.is_allocated(mas*4, mas*2));
ASSERT_EQ(3u, b.get_extents().size());
ASSERT_TRUE(b.get_extents()[0].is_valid());
ASSERT_FALSE(b.get_extents()[1].is_valid());
ASSERT_TRUE(b.get_extents()[2].is_valid());
}
{
BlueStore::Blob B;
B.shared_blob = new BlueStore::SharedBlob(coll.get());
bluestore_blob_t& b = B.dirty_blob();
PExtentVector r;
b.allocated_test(bluestore_pextent_t(1, mas * 4));
b.allocated_test(bluestore_pextent_t(2, mas * 4));
b.allocated_test(bluestore_pextent_t(3, mas * 4));
B.get_ref(coll.get(), 0, mas*12);
ASSERT_EQ(mas * 12, B.get_referenced_bytes());
ASSERT_FALSE(B.put_ref(coll.get(), mas, mas, &r));
ASSERT_EQ(mas * 11, B.get_referenced_bytes());
cout << "r " << r << " " << b << std::endl;
ASSERT_EQ(0u, r.size());
ASSERT_TRUE(b.is_allocated(0, mas*12));
ASSERT_FALSE(B.put_ref(coll.get(), mas*9, mas, &r));
ASSERT_EQ(mas * 10, B.get_referenced_bytes());
cout << "r " << r << " " << b << std::endl;
ASSERT_EQ(0u, r.size());
ASSERT_TRUE(b.is_allocated(0, mas*12));
ASSERT_FALSE(B.put_ref(coll.get(), mas*2, mas*7, &r));
ASSERT_EQ(mas * 3, B.get_referenced_bytes());
cout << "r " << r << " " << b << std::endl;
ASSERT_EQ(3u, r.size());
ASSERT_EQ(0x2001u, r[0].offset);
ASSERT_EQ(mas*2, r[0].length);
ASSERT_EQ(0x2u, r[1].offset);
ASSERT_EQ(mas*4, r[1].length);
ASSERT_EQ(0x3u, r[2].offset);
ASSERT_EQ(mas*2, r[2].length);
ASSERT_TRUE(b.is_allocated(0, mas*2));
ASSERT_FALSE(b.is_allocated(mas*2, mas*8));
ASSERT_TRUE(b.is_allocated(mas*10, mas*2));
ASSERT_EQ(3u, b.get_extents().size());
ASSERT_TRUE(b.get_extents()[0].is_valid());
ASSERT_FALSE(b.get_extents()[1].is_valid());
ASSERT_TRUE(b.get_extents()[2].is_valid());
}
{
BlueStore::Blob B;
B.shared_blob = new BlueStore::SharedBlob(coll.get());
bluestore_blob_t& b = B.dirty_blob();
PExtentVector r;
b.allocated_test(bluestore_pextent_t(1, mas * 4));
b.allocated_test(bluestore_pextent_t(2, mas * 4));
b.allocated_test(bluestore_pextent_t(3, mas * 4));
B.get_ref(coll.get(), 0, mas*12);
ASSERT_EQ(mas * 12, B.get_referenced_bytes());
ASSERT_FALSE(B.put_ref(coll.get(), mas, mas, &r));
ASSERT_EQ(mas * 11, B.get_referenced_bytes());
cout << "r " << r << " " << b << std::endl;
ASSERT_EQ(0u, r.size());
ASSERT_TRUE(b.is_allocated(0, mas*12));
ASSERT_FALSE(B.put_ref(coll.get(), mas*9, mas, &r));
ASSERT_EQ(mas * 10, B.get_referenced_bytes());
cout << "r " << r << " " << b << std::endl;
ASSERT_EQ(0u, r.size());
ASSERT_TRUE(b.is_allocated(0, mas*12));
ASSERT_FALSE(B.put_ref(coll.get(), mas*2, mas*7, &r));
ASSERT_EQ(mas * 3, B.get_referenced_bytes());
cout << "r " << r << " " << b << std::endl;
ASSERT_EQ(3u, r.size());
ASSERT_EQ(0x2001u, r[0].offset);
ASSERT_EQ(mas*2, r[0].length);
ASSERT_EQ(0x2u, r[1].offset);
ASSERT_EQ(mas*4, r[1].length);
ASSERT_EQ(0x3u, r[2].offset);
ASSERT_EQ(mas*2, r[2].length);
ASSERT_TRUE(b.is_allocated(0, mas*2));
ASSERT_FALSE(b.is_allocated(mas*2, mas*8));
ASSERT_TRUE(b.is_allocated(mas*10, mas*2));
ASSERT_EQ(3u, b.get_extents().size());
ASSERT_TRUE(b.get_extents()[0].is_valid());
ASSERT_FALSE(b.get_extents()[1].is_valid());
ASSERT_TRUE(b.get_extents()[2].is_valid());
ASSERT_FALSE(B.put_ref(coll.get(), 0, mas, &r));
ASSERT_EQ(mas * 2, B.get_referenced_bytes());
cout << "r " << r << " " << b << std::endl;
ASSERT_EQ(1u, r.size());
ASSERT_EQ(0x1u, r[0].offset);
ASSERT_EQ(mas*2, r[0].length);
ASSERT_EQ(2u, b.get_extents().size());
ASSERT_FALSE(b.get_extents()[0].is_valid());
ASSERT_TRUE(b.get_extents()[1].is_valid());
ASSERT_TRUE(B.put_ref(coll.get(), mas*10, mas*2, &r));
ASSERT_EQ(mas * 0, B.get_referenced_bytes());
cout << "r " << r << " " << b << std::endl;
ASSERT_EQ(1u, r.size());
ASSERT_EQ(0x2003u, r[0].offset);
ASSERT_EQ(mas*2, r[0].length);
ASSERT_EQ(1u, b.get_extents().size());
ASSERT_FALSE(b.get_extents()[0].is_valid());
}
{
BlueStore::Blob B;
B.shared_blob = new BlueStore::SharedBlob(coll.get());
bluestore_blob_t& b = B.dirty_blob();
PExtentVector r;
b.allocated_test(bluestore_pextent_t(1, mas * 4));
b.allocated_test(bluestore_pextent_t(2, mas * 4));
b.allocated_test(bluestore_pextent_t(3, mas * 4));
B.get_ref(coll.get(), 0, mas*12);
ASSERT_EQ(mas * 12, B.get_referenced_bytes());
ASSERT_FALSE(B.put_ref(coll.get(), mas, mas, &r));
ASSERT_EQ(mas * 11, B.get_referenced_bytes());
cout << "r " << r << " " << b << std::endl;
ASSERT_EQ(0u, r.size());
ASSERT_TRUE(b.is_allocated(0, mas*12));
ASSERT_FALSE(B.put_ref(coll.get(), mas*9, mas, &r));
ASSERT_EQ(mas * 10, B.get_referenced_bytes());
cout << "r " << r << " " << b << std::endl;
ASSERT_EQ(0u, r.size());
ASSERT_TRUE(b.is_allocated(0, mas*12));
ASSERT_FALSE(B.put_ref(coll.get(), mas*2, mas*7, &r));
ASSERT_EQ(mas * 3, B.get_referenced_bytes());
cout << "r " << r << " " << b << std::endl;
ASSERT_EQ(3u, r.size());
ASSERT_EQ(0x2001u, r[0].offset);
ASSERT_EQ(mas*2, r[0].length);
ASSERT_EQ(0x2u, r[1].offset);
ASSERT_EQ(mas*4, r[1].length);
ASSERT_EQ(0x3u, r[2].offset);
ASSERT_EQ(mas*2, r[2].length);
ASSERT_TRUE(b.is_allocated(0, mas*2));
ASSERT_FALSE(b.is_allocated(mas*2, mas*8));
ASSERT_TRUE(b.is_allocated(mas*10, mas*2));
ASSERT_EQ(3u, b.get_extents().size());
ASSERT_TRUE(b.get_extents()[0].is_valid());
ASSERT_FALSE(b.get_extents()[1].is_valid());
ASSERT_TRUE(b.get_extents()[2].is_valid());
ASSERT_FALSE(B.put_ref(coll.get(), mas*10, mas*2, &r));
ASSERT_EQ(mas * 1, B.get_referenced_bytes());
cout << "r " << r << " " << b << std::endl;
ASSERT_EQ(1u, r.size());
ASSERT_EQ(0x2003u, r[0].offset);
ASSERT_EQ(mas*2, r[0].length);
ASSERT_EQ(2u, b.get_extents().size());
ASSERT_TRUE(b.get_extents()[0].is_valid());
ASSERT_FALSE(b.get_extents()[1].is_valid());
ASSERT_TRUE(B.put_ref(coll.get(), 0, mas, &r));
ASSERT_EQ(mas * 0, B.get_referenced_bytes());
cout << "r " << r << " " << b << std::endl;
ASSERT_EQ(1u, r.size());
ASSERT_EQ(0x1u, r[0].offset);
ASSERT_EQ(mas*2, r[0].length);
ASSERT_EQ(1u, b.get_extents().size());
ASSERT_FALSE(b.get_extents()[0].is_valid());
}
{
BlueStore::Blob B;
B.shared_blob = new BlueStore::SharedBlob(coll.get());
bluestore_blob_t& b = B.dirty_blob();
PExtentVector r;
b.allocated_test(bluestore_pextent_t(1, mas * 8));
B.get_ref(coll.get(), 0, mas*8);
ASSERT_EQ(mas * 8, B.get_referenced_bytes());
ASSERT_FALSE(B.put_ref(coll.get(), 0, mas, &r));
ASSERT_EQ(mas * 7, B.get_referenced_bytes());
cout << "r " << r << " " << b << std::endl;
ASSERT_EQ(0u, r.size());
ASSERT_TRUE(b.is_allocated(0, mas*8));
ASSERT_FALSE(B.put_ref(coll.get(), mas*7, mas, &r));
ASSERT_EQ(mas * 6, B.get_referenced_bytes());
cout << "r " << r << " " << b << std::endl;
ASSERT_EQ(0u, r.size());
ASSERT_TRUE(b.is_allocated(0, mas*8));
ASSERT_FALSE(B.put_ref(coll.get(), mas*2, mas, &r));
ASSERT_EQ(mas * 5, B.get_referenced_bytes());
cout << "r " << r << " " << b << std::endl;
ASSERT_EQ(0u, r.size());
ASSERT_TRUE(b.is_allocated(0, 8));
ASSERT_FALSE(B.put_ref(coll.get(), mas*3, mas*4, &r));
ASSERT_EQ(mas * 1, B.get_referenced_bytes());
ASSERT_EQ(1u, r.size());
ASSERT_EQ(0x2001u, r[0].offset);
ASSERT_EQ(mas*6, r[0].length);
ASSERT_TRUE(b.is_allocated(0, mas*2));
ASSERT_FALSE(b.is_allocated(mas*2, mas*6));
ASSERT_EQ(2u, b.get_extents().size());
ASSERT_TRUE(b.get_extents()[0].is_valid());
ASSERT_FALSE(b.get_extents()[1].is_valid());
ASSERT_TRUE(B.put_ref(coll.get(), mas, mas, &r));
ASSERT_EQ(mas * 0, B.get_referenced_bytes());
cout << "r " << r << " " << b << std::endl;
ASSERT_EQ(1u, r.size());
ASSERT_EQ(0x1u, r[0].offset);
ASSERT_EQ(mas*2, r[0].length);
ASSERT_EQ(1u, b.get_extents().size());
ASSERT_FALSE(b.get_extents()[0].is_valid());
}
// verify csum chunk size if factored in properly
{
BlueStore::Blob B;
B.shared_blob = new BlueStore::SharedBlob(coll.get());
bluestore_blob_t& b = B.dirty_blob();
PExtentVector r;
b.allocated_test(bluestore_pextent_t(0, mas*4));
b.init_csum(Checksummer::CSUM_CRC32C, 14, mas * 4);
B.get_ref(coll.get(), 0, mas*4);
ASSERT_EQ(mas * 4, B.get_referenced_bytes());
ASSERT_TRUE(b.is_allocated(0, mas*4));
ASSERT_FALSE(B.put_ref(coll.get(), 0, mas*3, &r));
ASSERT_EQ(mas * 1, B.get_referenced_bytes());
cout << "r " << r << " " << b << std::endl;
ASSERT_EQ(0u, r.size());
ASSERT_TRUE(b.is_allocated(0, mas*4));
ASSERT_TRUE(b.get_extents()[0].is_valid());
ASSERT_EQ(mas*4, b.get_extents()[0].length);
}
{
BlueStore::Blob B;
B.shared_blob = new BlueStore::SharedBlob(coll.get());
bluestore_blob_t& b = B.dirty_blob();
b.allocated_test(bluestore_pextent_t(0x40101000, 0x4000));
b.allocated_test(bluestore_pextent_t(bluestore_pextent_t::INVALID_OFFSET,
0x13000));
b.allocated_test(bluestore_pextent_t(0x40118000, 0x7000));
B.get_ref(coll.get(), 0x0, 0x3800);
B.get_ref(coll.get(), 0x17c00, 0x6400);
ASSERT_EQ(0x3800u + 0x6400u, B.get_referenced_bytes());
b.set_flag(bluestore_blob_t::FLAG_SHARED);
b.init_csum(Checksummer::CSUM_CRC32C, 12, 0x1e000);
cout << "before: " << B << std::endl;
PExtentVector r;
ASSERT_FALSE(B.put_ref(coll.get(), 0x1800, 0x2000, &r));
ASSERT_EQ(0x3800u + 0x6400u - 0x2000u, B.get_referenced_bytes());
cout << "after: " << B << std::endl;
cout << "r " << r << std::endl;
}
{
BlueStore::Blob B;
B.shared_blob = new BlueStore::SharedBlob(coll.get());
bluestore_blob_t& b = B.dirty_blob();
b.allocated_test(bluestore_pextent_t(1, 0x5000));
b.allocated_test(bluestore_pextent_t(2, 0x5000));
B.get_ref(coll.get(), 0x0, 0xa000);
ASSERT_EQ(0xa000u, B.get_referenced_bytes());
cout << "before: " << B << std::endl;
PExtentVector r;
ASSERT_FALSE(B.put_ref(coll.get(), 0x8000, 0x2000, &r));
cout << "after: " << B << std::endl;
cout << "r " << r << std::endl;
ASSERT_EQ(0x8000u, B.get_referenced_bytes());
ASSERT_EQ(1u, r.size());
ASSERT_EQ(0x3002u, r[0].offset);
ASSERT_EQ(0x2000u, r[0].length);
}
{
BlueStore::Blob B;
B.shared_blob = new BlueStore::SharedBlob(coll.get());
bluestore_blob_t& b = B.dirty_blob();
b.allocated_test(bluestore_pextent_t(1, 0x7000));
b.allocated_test(bluestore_pextent_t(2, 0x7000));
B.get_ref(coll.get(), 0x0, 0xe000);
ASSERT_EQ(0xe000u, B.get_referenced_bytes());
cout << "before: " << B << std::endl;
PExtentVector r;
ASSERT_FALSE(B.put_ref(coll.get(), 0, 0xb000, &r));
ASSERT_EQ(0x3000u, B.get_referenced_bytes());
cout << "after: " << B << std::endl;
cout << "r " << r << std::endl;
ASSERT_EQ(0x3000u, B.get_referenced_bytes());
ASSERT_EQ(2u, r.size());
ASSERT_EQ(1u, r[0].offset);
ASSERT_EQ(0x7000u, r[0].length);
ASSERT_EQ(2u, r[1].offset);
ASSERT_EQ(0x3000u, r[1].length); // we have 0x1000 bytes less due to
// alignment caused by min_alloc_size = 0x2000
}
{
BlueStore store(g_ceph_context, "", 0x4000);
BlueStore::OnodeCacheShard *oc = BlueStore::OnodeCacheShard::create(
g_ceph_context, "lru", NULL);
BlueStore::BufferCacheShard *bc = BlueStore::BufferCacheShard::create(
g_ceph_context, "lru", NULL);
auto coll = ceph::make_ref<BlueStore::Collection>(&store, oc, bc, coll_t());
BlueStore::Blob B;
B.shared_blob = new BlueStore::SharedBlob(coll.get());
bluestore_blob_t& b = B.dirty_blob();
b.allocated_test(bluestore_pextent_t(1, 0x5000));
b.allocated_test(bluestore_pextent_t(2, 0x7000));
B.get_ref(coll.get(), 0x0, 0xc000);
ASSERT_EQ(0xc000u, B.get_referenced_bytes());
cout << "before: " << B << std::endl;
PExtentVector r;
ASSERT_FALSE(B.put_ref(coll.get(), 0x2000, 0xa000, &r));
cout << "after: " << B << std::endl;
cout << "r " << r << std::endl;
ASSERT_EQ(0x2000u, B.get_referenced_bytes());
ASSERT_EQ(2u, r.size());
ASSERT_EQ(0x4001u, r[0].offset);
ASSERT_EQ(0x1000u, r[0].length);
ASSERT_EQ(2u, r[1].offset);
ASSERT_EQ(0x7000u, r[1].length);
ASSERT_EQ(1u, b.get_extents()[0].offset);
ASSERT_EQ(0x4000u, b.get_extents()[0].length);
}
}
TEST(bluestore_blob_t, can_split)
{
bluestore_blob_t a;
ASSERT_TRUE(a.can_split());
a.flags = bluestore_blob_t::FLAG_SHARED;
ASSERT_FALSE(a.can_split());
a.flags = bluestore_blob_t::FLAG_COMPRESSED;
ASSERT_FALSE(a.can_split());
a.flags = bluestore_blob_t::FLAG_HAS_UNUSED;
ASSERT_FALSE(a.can_split());
}
TEST(bluestore_blob_t, can_split_at)
{
bluestore_blob_t a;
a.allocated_test(bluestore_pextent_t(0x10000, 0x2000));
a.allocated_test(bluestore_pextent_t(0x20000, 0x2000));
ASSERT_TRUE(a.can_split_at(0x1000));
ASSERT_TRUE(a.can_split_at(0x1800));
a.init_csum(Checksummer::CSUM_CRC32C, 12, 0x4000);
ASSERT_TRUE(a.can_split_at(0x1000));
ASSERT_TRUE(a.can_split_at(0x2000));
ASSERT_TRUE(a.can_split_at(0x3000));
ASSERT_FALSE(a.can_split_at(0x2800));
}
TEST(bluestore_blob_t, prune_tail)
{
bluestore_blob_t a;
a.allocated_test(bluestore_pextent_t(0x10000, 0x2000));
a.allocated_test(bluestore_pextent_t(0x20000, 0x2000));
ASSERT_FALSE(a.can_prune_tail());
a.allocated_test(
bluestore_pextent_t(bluestore_pextent_t::INVALID_OFFSET, 0x2000));
ASSERT_TRUE(a.can_prune_tail());
a.prune_tail();
ASSERT_FALSE(a.can_prune_tail());
ASSERT_EQ(2u, a.get_extents().size());
ASSERT_EQ(0x4000u, a.get_logical_length());
a.allocated_test(
bluestore_pextent_t(bluestore_pextent_t::INVALID_OFFSET, 0x2000));
a.init_csum(Checksummer::CSUM_CRC32C_8, 12, 0x6000);
ASSERT_EQ(6u, a.csum_data.length());
ASSERT_TRUE(a.can_prune_tail());
a.prune_tail();
ASSERT_FALSE(a.can_prune_tail());
ASSERT_EQ(2u, a.get_extents().size());
ASSERT_EQ(0x4000u, a.get_logical_length());
ASSERT_EQ(4u, a.csum_data.length());
bluestore_blob_t b;
b.allocated_test(
bluestore_pextent_t(bluestore_pextent_t::INVALID_OFFSET, 0x2000));
ASSERT_FALSE(a.can_prune_tail());
}
TEST(Blob, split)
{
BlueStore store(g_ceph_context, "", 4096);
BlueStore::OnodeCacheShard *oc = BlueStore::OnodeCacheShard::create(
g_ceph_context, "lru", NULL);
BlueStore::BufferCacheShard *bc = BlueStore::BufferCacheShard::create(
g_ceph_context, "lru", NULL);
auto coll = ceph::make_ref<BlueStore::Collection>(&store, oc, bc, coll_t());
{
BlueStore::Blob L, R;
L.shared_blob = new BlueStore::SharedBlob(coll.get());
R.shared_blob = new BlueStore::SharedBlob(coll.get());
L.dirty_blob().allocated_test(bluestore_pextent_t(0x2000, 0x2000));
L.dirty_blob().init_csum(Checksummer::CSUM_CRC32C, 12, 0x2000);
L.get_ref(coll.get(), 0, 0x2000);
L.split(coll.get(), 0x1000, &R);
ASSERT_EQ(0x1000u, L.get_blob().get_logical_length());
ASSERT_EQ(4u, L.get_blob().csum_data.length());
ASSERT_EQ(1u, L.get_blob().get_extents().size());
ASSERT_EQ(0x2000u, L.get_blob().get_extents().front().offset);
ASSERT_EQ(0x1000u, L.get_blob().get_extents().front().length);
ASSERT_EQ(0x1000u, L.get_referenced_bytes());
ASSERT_EQ(0x1000u, R.get_blob().get_logical_length());
ASSERT_EQ(4u, R.get_blob().csum_data.length());
ASSERT_EQ(1u, R.get_blob().get_extents().size());
ASSERT_EQ(0x3000u, R.get_blob().get_extents().front().offset);
ASSERT_EQ(0x1000u, R.get_blob().get_extents().front().length);
ASSERT_EQ(0x1000u, R.get_referenced_bytes());
}
{
BlueStore::Blob L, R;
L.shared_blob = new BlueStore::SharedBlob(coll.get());
R.shared_blob = new BlueStore::SharedBlob(coll.get());
L.dirty_blob().allocated_test(bluestore_pextent_t(0x2000, 0x1000));
L.dirty_blob().allocated_test(bluestore_pextent_t(0x12000, 0x1000));
L.dirty_blob().init_csum(Checksummer::CSUM_CRC32C, 12, 0x2000);
L.get_ref(coll.get(), 0, 0x1000);
L.get_ref(coll.get(), 0x1000, 0x1000);
L.split(coll.get(), 0x1000, &R);
ASSERT_EQ(0x1000u, L.get_blob().get_logical_length());
ASSERT_EQ(4u, L.get_blob().csum_data.length());
ASSERT_EQ(1u, L.get_blob().get_extents().size());
ASSERT_EQ(0x2000u, L.get_blob().get_extents().front().offset);
ASSERT_EQ(0x1000u, L.get_blob().get_extents().front().length);
ASSERT_EQ(0x1000u, L.get_referenced_bytes());
ASSERT_EQ(0x1000u, R.get_blob().get_logical_length());
ASSERT_EQ(4u, R.get_blob().csum_data.length());
ASSERT_EQ(1u, R.get_blob().get_extents().size());
ASSERT_EQ(0x12000u, R.get_blob().get_extents().front().offset);
ASSERT_EQ(0x1000u, R.get_blob().get_extents().front().length);
ASSERT_EQ(0x1000u, R.get_referenced_bytes());
}
}
TEST(Blob, legacy_decode)
{
BlueStore store(g_ceph_context, "", 4096);
BlueStore::OnodeCacheShard *oc = BlueStore::OnodeCacheShard::create(
g_ceph_context, "lru", NULL);
BlueStore::BufferCacheShard *bc = BlueStore::BufferCacheShard::create(
g_ceph_context, "lru", NULL);
auto coll = ceph::make_ref<BlueStore::Collection>(&store, oc, bc, coll_t());
bufferlist bl, bl2;
{
BlueStore::Blob B;
B.shared_blob = new BlueStore::SharedBlob(coll.get());
B.dirty_blob().allocated_test(bluestore_pextent_t(0x1, 0x2000));
B.dirty_blob().init_csum(Checksummer::CSUM_CRC32C, 12, 0x2000);
B.get_ref(coll.get(), 0, 0xff0);
B.get_ref(coll.get(), 0x1fff, 1);
bluestore_extent_ref_map_t fake_ref_map;
fake_ref_map.get(0, 0xff0);
fake_ref_map.get(0x1fff, 1);
size_t bound = 0, bound2 = 0;
B.bound_encode(
bound,
1, /*struct_v*/
0, /*sbid*/
false);
fake_ref_map.bound_encode(bound);
B.bound_encode(
bound2,
2, /*struct_v*/
0, /*sbid*/
true);
{
auto app = bl.get_contiguous_appender(bound);
auto app2 = bl2.get_contiguous_appender(bound2);
B.encode(
app,
1, /*struct_v*/
0, /*sbid*/
false);
fake_ref_map.encode(app);
B.encode(
app2,
2, /*struct_v*/
0, /*sbid*/
true);
}
auto p = bl.front().begin_deep();
auto p2 = bl2.front().begin_deep();
BlueStore::Blob Bres, Bres2;
Bres.shared_blob = new BlueStore::SharedBlob(coll.get());
Bres2.shared_blob = new BlueStore::SharedBlob(coll.get());
uint64_t sbid, sbid2;
Bres.decode(
p,
1, /*struct_v*/
&sbid,
true,
coll.get());
Bres2.decode(
p2,
2, /*struct_v*/
&sbid2,
true,
coll.get());
ASSERT_EQ(0xff0u + 1u, Bres.get_blob_use_tracker().get_referenced_bytes());
ASSERT_EQ(0xff0u + 1u, Bres2.get_blob_use_tracker().get_referenced_bytes());
ASSERT_TRUE(Bres.get_blob_use_tracker().equal(Bres2.get_blob_use_tracker()));
}
}
TEST(ExtentMap, seek_lextent)
{
BlueStore store(g_ceph_context, "", 4096);
BlueStore::OnodeCacheShard *oc = BlueStore::OnodeCacheShard::create(
g_ceph_context, "lru", NULL);
BlueStore::BufferCacheShard *bc = BlueStore::BufferCacheShard::create(
g_ceph_context, "lru", NULL);
auto coll = ceph::make_ref<BlueStore::Collection>(&store, oc, bc, coll_t());
BlueStore::Onode onode(coll.get(), ghobject_t(), "");
BlueStore::ExtentMap em(&onode,
g_ceph_context->_conf->bluestore_extent_map_inline_shard_prealloc_size);
BlueStore::BlobRef br(new BlueStore::Blob);
br->shared_blob = new BlueStore::SharedBlob(coll.get());
ASSERT_EQ(em.extent_map.end(), em.seek_lextent(0));
ASSERT_EQ(em.extent_map.end(), em.seek_lextent(100));
em.extent_map.insert(*new BlueStore::Extent(100, 0, 100, br));
auto a = em.find(100);
ASSERT_EQ(a, em.seek_lextent(0));
ASSERT_EQ(a, em.seek_lextent(99));
ASSERT_EQ(a, em.seek_lextent(100));
ASSERT_EQ(a, em.seek_lextent(101));
ASSERT_EQ(a, em.seek_lextent(199));
ASSERT_EQ(em.extent_map.end(), em.seek_lextent(200));
em.extent_map.insert(*new BlueStore::Extent(200, 0, 100, br));
auto b = em.find(200);
ASSERT_EQ(a, em.seek_lextent(0));
ASSERT_EQ(a, em.seek_lextent(99));
ASSERT_EQ(a, em.seek_lextent(100));
ASSERT_EQ(a, em.seek_lextent(101));
ASSERT_EQ(a, em.seek_lextent(199));
ASSERT_EQ(b, em.seek_lextent(200));
ASSERT_EQ(b, em.seek_lextent(299));
ASSERT_EQ(em.extent_map.end(), em.seek_lextent(300));
em.extent_map.insert(*new BlueStore::Extent(400, 0, 100, br));
auto d = em.find(400);
ASSERT_EQ(a, em.seek_lextent(0));
ASSERT_EQ(a, em.seek_lextent(99));
ASSERT_EQ(a, em.seek_lextent(100));
ASSERT_EQ(a, em.seek_lextent(101));
ASSERT_EQ(a, em.seek_lextent(199));
ASSERT_EQ(b, em.seek_lextent(200));
ASSERT_EQ(b, em.seek_lextent(299));
ASSERT_EQ(d, em.seek_lextent(300));
ASSERT_EQ(d, em.seek_lextent(399));
ASSERT_EQ(d, em.seek_lextent(400));
ASSERT_EQ(d, em.seek_lextent(499));
ASSERT_EQ(em.extent_map.end(), em.seek_lextent(500));
}
TEST(ExtentMap, has_any_lextents)
{
BlueStore store(g_ceph_context, "", 4096);
BlueStore::OnodeCacheShard *oc = BlueStore::OnodeCacheShard::create(
g_ceph_context, "lru", NULL);
BlueStore::BufferCacheShard *bc = BlueStore::BufferCacheShard::create(
g_ceph_context, "lru", NULL);
auto coll = ceph::make_ref<BlueStore::Collection>(&store, oc, bc, coll_t());
BlueStore::Onode onode(coll.get(), ghobject_t(), "");
BlueStore::ExtentMap em(&onode,
g_ceph_context->_conf->bluestore_extent_map_inline_shard_prealloc_size);
BlueStore::BlobRef b(new BlueStore::Blob);
b->shared_blob = new BlueStore::SharedBlob(coll.get());
ASSERT_FALSE(em.has_any_lextents(0, 0));
ASSERT_FALSE(em.has_any_lextents(0, 1000));
ASSERT_FALSE(em.has_any_lextents(1000, 1000));
em.extent_map.insert(*new BlueStore::Extent(100, 0, 100, b));
ASSERT_FALSE(em.has_any_lextents(0, 50));
ASSERT_FALSE(em.has_any_lextents(0, 100));
ASSERT_FALSE(em.has_any_lextents(50, 50));
ASSERT_TRUE(em.has_any_lextents(50, 51));
ASSERT_TRUE(em.has_any_lextents(50, 100051));
ASSERT_TRUE(em.has_any_lextents(100, 100));
ASSERT_TRUE(em.has_any_lextents(100, 1));
ASSERT_TRUE(em.has_any_lextents(199, 1));
ASSERT_TRUE(em.has_any_lextents(199, 2));
ASSERT_FALSE(em.has_any_lextents(200, 2));
em.extent_map.insert(*new BlueStore::Extent(200, 0, 100, b));
ASSERT_TRUE(em.has_any_lextents(199, 1));
ASSERT_TRUE(em.has_any_lextents(199, 2));
ASSERT_TRUE(em.has_any_lextents(200, 2));
ASSERT_TRUE(em.has_any_lextents(200, 200));
ASSERT_TRUE(em.has_any_lextents(299, 1));
ASSERT_FALSE(em.has_any_lextents(300, 1));
em.extent_map.insert(*new BlueStore::Extent(400, 0, 100, b));
ASSERT_TRUE(em.has_any_lextents(0, 10000));
ASSERT_TRUE(em.has_any_lextents(199, 1));
ASSERT_FALSE(em.has_any_lextents(300, 1));
ASSERT_FALSE(em.has_any_lextents(300, 100));
ASSERT_FALSE(em.has_any_lextents(399, 1));
ASSERT_TRUE(em.has_any_lextents(400, 1));
ASSERT_TRUE(em.has_any_lextents(400, 100));
ASSERT_TRUE(em.has_any_lextents(400, 1000));
ASSERT_TRUE(em.has_any_lextents(499, 1000));
ASSERT_FALSE(em.has_any_lextents(500, 1000));
}
void erase_and_delete(BlueStore::ExtentMap& em, size_t v)
{
auto d = em.find(v);
ASSERT_NE(d, em.extent_map.end());
em.extent_map.erase(d);
delete &*d;
}
TEST(ExtentMap, compress_extent_map)
{
BlueStore store(g_ceph_context, "", 4096);
BlueStore::OnodeCacheShard *oc = BlueStore::OnodeCacheShard::create(
g_ceph_context, "lru", NULL);
BlueStore::BufferCacheShard *bc = BlueStore::BufferCacheShard::create(
g_ceph_context, "lru", NULL);
auto coll = ceph::make_ref<BlueStore::Collection>(&store, oc, bc, coll_t());
BlueStore::Onode onode(coll.get(), ghobject_t(), "");
BlueStore::ExtentMap em(&onode,
g_ceph_context->_conf->bluestore_extent_map_inline_shard_prealloc_size);
BlueStore::BlobRef b1(new BlueStore::Blob);
BlueStore::BlobRef b2(new BlueStore::Blob);
BlueStore::BlobRef b3(new BlueStore::Blob);
b1->shared_blob = new BlueStore::SharedBlob(coll.get());
b2->shared_blob = new BlueStore::SharedBlob(coll.get());
b3->shared_blob = new BlueStore::SharedBlob(coll.get());
em.extent_map.insert(*new BlueStore::Extent(0, 0, 100, b1));
em.extent_map.insert(*new BlueStore::Extent(100, 0, 100, b2));
ASSERT_EQ(0, em.compress_extent_map(0, 10000));
ASSERT_EQ(2u, em.extent_map.size());
em.extent_map.insert(*new BlueStore::Extent(200, 100, 100, b2));
em.extent_map.insert(*new BlueStore::Extent(300, 200, 100, b2));
ASSERT_EQ(0, em.compress_extent_map(0, 0));
ASSERT_EQ(0, em.compress_extent_map(100000, 1000));
ASSERT_EQ(2, em.compress_extent_map(0, 100000));
ASSERT_EQ(2u, em.extent_map.size());
erase_and_delete(em, 100);
em.extent_map.insert(*new BlueStore::Extent(100, 0, 100, b2));
em.extent_map.insert(*new BlueStore::Extent(200, 100, 100, b3));
em.extent_map.insert(*new BlueStore::Extent(300, 200, 100, b2));
ASSERT_EQ(0, em.compress_extent_map(0, 1));
ASSERT_EQ(0, em.compress_extent_map(0, 100000));
ASSERT_EQ(4u, em.extent_map.size());
em.extent_map.insert(*new BlueStore::Extent(400, 300, 100, b2));
em.extent_map.insert(*new BlueStore::Extent(500, 500, 100, b2));
em.extent_map.insert(*new BlueStore::Extent(600, 600, 100, b2));
em.extent_map.insert(*new BlueStore::Extent(700, 0, 100, b1));
em.extent_map.insert(*new BlueStore::Extent(800, 0, 100, b3));
ASSERT_EQ(0, em.compress_extent_map(0, 99));
ASSERT_EQ(0, em.compress_extent_map(800, 1000));
ASSERT_EQ(2, em.compress_extent_map(100, 500));
ASSERT_EQ(7u, em.extent_map.size());
erase_and_delete(em, 300);
erase_and_delete(em, 500);
erase_and_delete(em, 700);
em.extent_map.insert(*new BlueStore::Extent(400, 300, 100, b2));
em.extent_map.insert(*new BlueStore::Extent(500, 400, 100, b2));
em.extent_map.insert(*new BlueStore::Extent(700, 500, 100, b2));
ASSERT_EQ(1, em.compress_extent_map(0, 1000));
ASSERT_EQ(6u, em.extent_map.size());
}
void clear_and_dispose(BlueStore::old_extent_map_t& old_em)
{
auto oep = old_em.begin();
while (oep != old_em.end()) {
auto &lo = *oep;
oep = old_em.erase(oep);
delete &lo;
}
}
TEST(GarbageCollector, BasicTest)
{
BlueStore::OnodeCacheShard *oc = BlueStore::OnodeCacheShard::create(
g_ceph_context, "lru", NULL);
BlueStore::BufferCacheShard *bc = BlueStore::BufferCacheShard::create(
g_ceph_context, "lru", NULL);
BlueStore store(g_ceph_context, "", 4096);
auto coll = ceph::make_ref<BlueStore::Collection>(&store, oc, bc, coll_t());
BlueStore::Onode onode(coll.get(), ghobject_t(), "");
BlueStore::ExtentMap em(&onode,
g_ceph_context->_conf->bluestore_extent_map_inline_shard_prealloc_size);
BlueStore::old_extent_map_t old_extents;
/*
min_alloc_size = 4096
original disposition
extent1 <loffs = 100, boffs = 100, len = 10>
-> blob1<compressed, len_on_disk=4096, logical_len=8192>
extent2 <loffs = 200, boffs = 200, len = 10>
-> blob2<raw, len_on_disk=4096, llen=4096>
extent3 <loffs = 300, boffs = 300, len = 10>
-> blob1<compressed, len_on_disk=4096, llen=8192>
extent4 <loffs = 4096, boffs = 0, len = 10>
-> blob3<raw, len_on_disk=4096, llen=4096>
on write(300~100) resulted in
extent1 <loffs = 100, boffs = 100, len = 10>
-> blob1<compressed, len_on_disk=4096, logical_len=8192>
extent2 <loffs = 200, boffs = 200, len = 10>
-> blob2<raw, len_on_disk=4096, llen=4096>
extent3 <loffs = 300, boffs = 300, len = 100>
-> blob4<raw, len_on_disk=4096, llen=4096>
extent4 <loffs = 4096, boffs = 0, len = 10>
-> blob3<raw, len_on_disk=4096, llen=4096>
*/
{
BlueStore::GarbageCollector gc(g_ceph_context);
int64_t saving;
BlueStore::BlobRef b1(new BlueStore::Blob);
BlueStore::BlobRef b2(new BlueStore::Blob);
BlueStore::BlobRef b3(new BlueStore::Blob);
BlueStore::BlobRef b4(new BlueStore::Blob);
b1->shared_blob = new BlueStore::SharedBlob(coll.get());
b2->shared_blob = new BlueStore::SharedBlob(coll.get());
b3->shared_blob = new BlueStore::SharedBlob(coll.get());
b4->shared_blob = new BlueStore::SharedBlob(coll.get());
b1->dirty_blob().set_compressed(0x2000, 0x1000);
b1->dirty_blob().allocated_test(bluestore_pextent_t(0, 0x1000));
b2->dirty_blob().allocated_test(bluestore_pextent_t(1, 0x1000));
b3->dirty_blob().allocated_test(bluestore_pextent_t(2, 0x1000));
b4->dirty_blob().allocated_test(bluestore_pextent_t(3, 0x1000));
em.extent_map.insert(*new BlueStore::Extent(100, 100, 10, b1));
b1->get_ref(coll.get(), 100, 10);
em.extent_map.insert(*new BlueStore::Extent(200, 200, 10, b2));
b2->get_ref(coll.get(), 200, 10);
em.extent_map.insert(*new BlueStore::Extent(300, 300, 100, b4));
b4->get_ref(coll.get(), 300, 100);
em.extent_map.insert(*new BlueStore::Extent(4096, 0, 10, b3));
b3->get_ref(coll.get(), 0, 10);
old_extents.push_back(*new BlueStore::OldExtent(300, 300, 10, b1));
saving = gc.estimate(300, 100, em, old_extents, 4096);
ASSERT_EQ(saving, 1);
auto& to_collect = gc.get_extents_to_collect();
ASSERT_EQ(to_collect.num_intervals(), 1u);
{
auto it = to_collect.begin();
using p = decltype(*it);
auto v = p{100ul, 10ul};
ASSERT_EQ(*it, v);
}
em.clear();
clear_and_dispose(old_extents);
}
/*
original disposition
min_alloc_size = 0x10000
extent1 <loffs = 0, boffs = 0, len = 0x40000>
-> blob1<compressed, len_on_disk=0x20000, logical_len=0x40000>
Write 0x8000~37000 resulted in the following extent map prior to GC
for the last write_small(0x30000~0xf000):
extent1 <loffs = 0, boffs = 0, len = 0x8000>
-> blob1<compressed, len_on_disk=0x20000, logical_len=0x40000>
extent2 <loffs = 0x8000, boffs = 0x8000, len = 0x8000>
-> blob2<raw, len_on_disk=0x10000, llen=0x10000>
extent3 <loffs = 0x10000, boffs = 0, len = 0x20000>
-> blob3<raw, len_on_disk=0x20000, llen=0x20000>
extent4 <loffs = 0x30000, boffs = 0, len = 0xf000>
-> blob4<raw, len_on_disk=0x10000, llen=0x10000>
extent5 <loffs = 0x3f000, boffs = 0x3f000, len = 0x1000>
-> blob1<compressed, len_on_disk=0x20000, llen=0x40000>
*/
{
BlueStore store(g_ceph_context, "", 0x10000);
auto coll = ceph::make_ref<BlueStore::Collection>(&store, oc, bc, coll_t());
BlueStore::Onode onode(coll.get(), ghobject_t(), "");
BlueStore::ExtentMap em(&onode,
g_ceph_context->_conf->bluestore_extent_map_inline_shard_prealloc_size);
BlueStore::old_extent_map_t old_extents;
BlueStore::GarbageCollector gc(g_ceph_context);
int64_t saving;
BlueStore::BlobRef b1(new BlueStore::Blob);
BlueStore::BlobRef b2(new BlueStore::Blob);
BlueStore::BlobRef b3(new BlueStore::Blob);
BlueStore::BlobRef b4(new BlueStore::Blob);
b1->shared_blob = new BlueStore::SharedBlob(coll.get());
b2->shared_blob = new BlueStore::SharedBlob(coll.get());
b3->shared_blob = new BlueStore::SharedBlob(coll.get());
b4->shared_blob = new BlueStore::SharedBlob(coll.get());
b1->dirty_blob().set_compressed(0x40000, 0x20000);
b1->dirty_blob().allocated_test(bluestore_pextent_t(0, 0x20000));
b2->dirty_blob().allocated_test(bluestore_pextent_t(1, 0x10000));
b3->dirty_blob().allocated_test(bluestore_pextent_t(2, 0x20000));
b4->dirty_blob().allocated_test(bluestore_pextent_t(3, 0x10000));
em.extent_map.insert(*new BlueStore::Extent(0, 0, 0x8000, b1));
b1->get_ref(coll.get(), 0, 0x8000);
em.extent_map.insert(
*new BlueStore::Extent(0x8000, 0x8000, 0x8000, b2)); // new extent
b2->get_ref(coll.get(), 0x8000, 0x8000);
em.extent_map.insert(
*new BlueStore::Extent(0x10000, 0, 0x20000, b3)); // new extent
b3->get_ref(coll.get(), 0, 0x20000);
em.extent_map.insert(
*new BlueStore::Extent(0x30000, 0, 0xf000, b4)); // new extent
b4->get_ref(coll.get(), 0, 0xf000);
em.extent_map.insert(*new BlueStore::Extent(0x3f000, 0x3f000, 0x1000, b1));
b1->get_ref(coll.get(), 0x3f000, 0x1000);
old_extents.push_back(*new BlueStore::OldExtent(0x8000, 0x8000, 0x8000, b1));
old_extents.push_back(
*new BlueStore::OldExtent(0x10000, 0x10000, 0x20000, b1));
old_extents.push_back(*new BlueStore::OldExtent(0x30000, 0x30000, 0xf000, b1));
saving = gc.estimate(0x30000, 0xf000, em, old_extents, 0x10000);
ASSERT_EQ(saving, 2);
auto& to_collect = gc.get_extents_to_collect();
ASSERT_EQ(to_collect.num_intervals(), 2u);
{
auto it1 = to_collect.begin();
auto it2 = ++to_collect.begin();
using p = decltype(*it1);
{
auto v1 = p{0x0ul ,0x8000ul};
auto v2 = p{0x0ul, 0x8000ul};
ASSERT_TRUE(*it1 == v1 || *it2 == v2);
}
{
auto v1 = p{0x3f000ul, 0x1000ul};
auto v2 = p{0x3f000ul, 0x1000ul};
ASSERT_TRUE(*it1 == v1 || *it2 == v2);
}
}
em.clear();
clear_and_dispose(old_extents);
}
/*
original disposition
min_alloc_size = 0x1000
extent1 <loffs = 0, boffs = 0, len = 0x4000>
-> blob1<compressed, len_on_disk=0x2000, logical_len=0x4000>
write 0x3000~4000 resulted in the following extent map
(future feature - suppose we can compress incoming write prior to
GC invocation)
extent1 <loffs = 0, boffs = 0, len = 0x4000>
-> blob1<compressed, len_on_disk=0x2000, logical_len=0x4000>
extent2 <loffs = 0x3000, boffs = 0, len = 0x4000>
-> blob2<compressed, len_on_disk=0x2000, llen=0x4000>
*/
{
BlueStore::GarbageCollector gc(g_ceph_context);
int64_t saving;
BlueStore::BlobRef b1(new BlueStore::Blob);
BlueStore::BlobRef b2(new BlueStore::Blob);
b1->shared_blob = new BlueStore::SharedBlob(coll.get());
b2->shared_blob = new BlueStore::SharedBlob(coll.get());
b1->dirty_blob().set_compressed(0x4000, 0x2000);
b1->dirty_blob().allocated_test(bluestore_pextent_t(0, 0x2000));
b2->dirty_blob().set_compressed(0x4000, 0x2000);
b2->dirty_blob().allocated_test(bluestore_pextent_t(0, 0x2000));
em.extent_map.insert(*new BlueStore::Extent(0, 0, 0x3000, b1));
b1->get_ref(coll.get(), 0, 0x3000);
em.extent_map.insert(
*new BlueStore::Extent(0x3000, 0, 0x4000, b2)); // new extent
b2->get_ref(coll.get(), 0, 0x4000);
old_extents.push_back(*new BlueStore::OldExtent(0x3000, 0x3000, 0x1000, b1));
saving = gc.estimate(0x3000, 0x4000, em, old_extents, 0x1000);
ASSERT_EQ(saving, 0);
auto& to_collect = gc.get_extents_to_collect();
ASSERT_EQ(to_collect.num_intervals(), 0u);
em.clear();
clear_and_dispose(old_extents);
}
/*
original disposition
min_alloc_size = 0x10000
extent0 <loffs = 0, boffs = 0, len = 0x20000>
-> blob0<compressed, len_on_disk=0x10000, logical_len=0x20000>
extent1 <loffs = 0x20000, boffs = 0, len = 0x20000>
-> blob1<compressed, len_on_disk=0x10000, logical_len=0x20000>
write 0x8000~37000 resulted in the following extent map prior
to GC for the last write_small(0x30000~0xf000)
extent0 <loffs = 0, boffs = 0, len = 0x8000>
-> blob0<compressed, len_on_disk=0x10000, logical_len=0x20000>
extent2 <loffs = 0x8000, boffs = 0x8000, len = 0x8000>
-> blob2<raw, len_on_disk=0x10000, llen=0x10000>
extent3 <loffs = 0x10000, boffs = 0, len = 0x20000>
-> blob3<raw, len_on_disk=0x20000, llen=0x20000>
extent4 <loffs = 0x30000, boffs = 0, len = 0xf000>
-> blob4<raw, len_on_disk=0x1000, llen=0x1000>
extent5 <loffs = 0x3f000, boffs = 0x1f000, len = 0x1000>
-> blob1<compressed, len_on_disk=0x10000, llen=0x20000>
*/
{
BlueStore store(g_ceph_context, "", 0x10000);
auto coll = ceph::make_ref<BlueStore::Collection>(&store, oc, bc, coll_t());
BlueStore::Onode onode(coll.get(), ghobject_t(), "");
BlueStore::ExtentMap em(&onode,
g_ceph_context->_conf->bluestore_extent_map_inline_shard_prealloc_size);
BlueStore::old_extent_map_t old_extents;
BlueStore::GarbageCollector gc(g_ceph_context);
int64_t saving;
BlueStore::BlobRef b0(new BlueStore::Blob);
BlueStore::BlobRef b1(new BlueStore::Blob);
BlueStore::BlobRef b2(new BlueStore::Blob);
BlueStore::BlobRef b3(new BlueStore::Blob);
BlueStore::BlobRef b4(new BlueStore::Blob);
b0->shared_blob = new BlueStore::SharedBlob(coll.get());
b1->shared_blob = new BlueStore::SharedBlob(coll.get());
b2->shared_blob = new BlueStore::SharedBlob(coll.get());
b3->shared_blob = new BlueStore::SharedBlob(coll.get());
b4->shared_blob = new BlueStore::SharedBlob(coll.get());
b0->dirty_blob().set_compressed(0x2000, 0x1000);
b0->dirty_blob().allocated_test(bluestore_pextent_t(0, 0x10000));
b1->dirty_blob().set_compressed(0x20000, 0x10000);
b1->dirty_blob().allocated_test(bluestore_pextent_t(0, 0x10000));
b2->dirty_blob().allocated_test(bluestore_pextent_t(1, 0x10000));
b3->dirty_blob().allocated_test(bluestore_pextent_t(2, 0x20000));
b4->dirty_blob().allocated_test(bluestore_pextent_t(3, 0x1000));
em.extent_map.insert(*new BlueStore::Extent(0, 0, 0x8000, b0));
b0->get_ref(coll.get(), 0, 0x8000);
em.extent_map.insert(
*new BlueStore::Extent(0x8000, 0x8000, 0x8000, b2)); // new extent
b2->get_ref(coll.get(), 0x8000, 0x8000);
em.extent_map.insert(
*new BlueStore::Extent(0x10000, 0, 0x20000, b3)); // new extent
b3->get_ref(coll.get(), 0, 0x20000);
em.extent_map.insert(
*new BlueStore::Extent(0x30000, 0, 0xf000, b4)); // new extent
b4->get_ref(coll.get(), 0, 0xf000);
em.extent_map.insert(*new BlueStore::Extent(0x3f000, 0x1f000, 0x1000, b1));
b1->get_ref(coll.get(), 0x1f000, 0x1000);
old_extents.push_back(*new BlueStore::OldExtent(0x8000, 0x8000, 0x8000, b0));
old_extents.push_back(
*new BlueStore::OldExtent(0x10000, 0x10000, 0x10000, b0));
old_extents.push_back(
*new BlueStore::OldExtent(0x20000, 0x00000, 0x1f000, b1));
saving = gc.estimate(0x30000, 0xf000, em, old_extents, 0x10000);
ASSERT_EQ(saving, 2);
auto& to_collect = gc.get_extents_to_collect();
ASSERT_EQ(to_collect.num_intervals(), 2u);
{
auto it1 = to_collect.begin();
auto it2 = ++to_collect.begin();
using p = decltype(*it1);
{
auto v1 = p{0x0ul, 0x8000ul};
auto v2 = p{0x0ul, 0x8000ul};
ASSERT_TRUE(*it1 == v1 || *it2 == v2);
}
{
auto v1 = p{0x3f000ul, 0x1000ul};
auto v2 = p{0x3f000ul, 0x1000ul};
ASSERT_TRUE(*it1 == v1 || *it2 == v2);
}
}
em.clear();
clear_and_dispose(old_extents);
}
}
TEST(BlueStoreRepairer, StoreSpaceTracker)
{
BlueStoreRepairer::StoreSpaceTracker bmap0;
bmap0.init((uint64_t)4096 * 1024 * 1024 * 1024, 0x1000);
ASSERT_EQ(bmap0.granularity, 2 * 1024 * 1024U);
ASSERT_EQ(bmap0.collections_bfs.size(), 2048u * 1024u);
ASSERT_EQ(bmap0.objects_bfs.size(), 2048u * 1024u);
BlueStoreRepairer::StoreSpaceTracker bmap;
bmap.init(0x2000 * 0x1000 - 1, 0x1000, 512 * 1024);
ASSERT_EQ(bmap.granularity, 0x1000u);
ASSERT_EQ(bmap.collections_bfs.size(), 0x2000u);
ASSERT_EQ(bmap.objects_bfs.size(), 0x2000u);
coll_t cid;
ghobject_t hoid;
ASSERT_FALSE(bmap.is_used(cid, 0));
ASSERT_FALSE(bmap.is_used(hoid, 0));
bmap.set_used(0, 1, cid, hoid);
ASSERT_TRUE(bmap.is_used(cid, 0));
ASSERT_TRUE(bmap.is_used(hoid, 0));
ASSERT_FALSE(bmap.is_used(cid, 0x1023));
ASSERT_FALSE(bmap.is_used(hoid, 0x1023));
ASSERT_FALSE(bmap.is_used(cid, 0x2023));
ASSERT_FALSE(bmap.is_used(hoid, 0x2023));
ASSERT_FALSE(bmap.is_used(cid, 0x3023));
ASSERT_FALSE(bmap.is_used(hoid, 0x3023));
bmap.set_used(0x1023, 0x3000, cid, hoid);
ASSERT_TRUE(bmap.is_used(cid, 0x1023));
ASSERT_TRUE(bmap.is_used(hoid, 0x1023));
ASSERT_TRUE(bmap.is_used(cid, 0x2023));
ASSERT_TRUE(bmap.is_used(hoid, 0x2023));
ASSERT_TRUE(bmap.is_used(cid, 0x3023));
ASSERT_TRUE(bmap.is_used(hoid, 0x3023));
ASSERT_FALSE(bmap.is_used(cid, 0x9001));
ASSERT_FALSE(bmap.is_used(hoid, 0x9001));
ASSERT_FALSE(bmap.is_used(cid, 0xa001));
ASSERT_FALSE(bmap.is_used(hoid, 0xa001));
ASSERT_FALSE(bmap.is_used(cid, 0xb000));
ASSERT_FALSE(bmap.is_used(hoid, 0xb000));
ASSERT_FALSE(bmap.is_used(cid, 0xc000));
ASSERT_FALSE(bmap.is_used(hoid, 0xc000));
bmap.set_used(0x9001, 0x2fff, cid, hoid);
ASSERT_TRUE(bmap.is_used(cid, 0x9001));
ASSERT_TRUE(bmap.is_used(hoid, 0x9001));
ASSERT_TRUE(bmap.is_used(cid, 0xa001));
ASSERT_TRUE(bmap.is_used(hoid, 0xa001));
ASSERT_TRUE(bmap.is_used(cid, 0xb001));
ASSERT_TRUE(bmap.is_used(hoid, 0xb001));
ASSERT_FALSE(bmap.is_used(cid, 0xc000));
ASSERT_FALSE(bmap.is_used(hoid, 0xc000));
bmap.set_used(0xa001, 0x2, cid, hoid);
ASSERT_TRUE(bmap.is_used(cid, 0x9001));
ASSERT_TRUE(bmap.is_used(hoid, 0x9001));
ASSERT_TRUE(bmap.is_used(cid, 0xa001));
ASSERT_TRUE(bmap.is_used(hoid, 0xa001));
ASSERT_TRUE(bmap.is_used(cid, 0xb001));
ASSERT_TRUE(bmap.is_used(hoid, 0xb001));
ASSERT_FALSE(bmap.is_used(cid, 0xc000));
ASSERT_FALSE(bmap.is_used(hoid, 0xc000));
ASSERT_FALSE(bmap.is_used(cid, 0xc0000));
ASSERT_FALSE(bmap.is_used(hoid, 0xc0000));
ASSERT_FALSE(bmap.is_used(cid, 0xc1000));
ASSERT_FALSE(bmap.is_used(hoid, 0xc1000));
bmap.set_used(0xc0000, 0x2000, cid, hoid);
ASSERT_TRUE(bmap.is_used(cid, 0xc0000));
ASSERT_TRUE(bmap.is_used(hoid, 0xc0000));
ASSERT_TRUE(bmap.is_used(cid, 0xc1000));
ASSERT_TRUE(bmap.is_used(hoid, 0xc1000));
interval_set<uint64_t> extents;
extents.insert(0,0x500);
extents.insert(0x800,0x100);
extents.insert(0x1000,0x1000);
extents.insert(0xa001,1);
extents.insert(0xa0000,0xff8);
ASSERT_EQ(3u, bmap.filter_out(extents));
ASSERT_TRUE(bmap.is_used(cid));
ASSERT_TRUE(bmap.is_used(hoid));
BlueStoreRepairer::StoreSpaceTracker bmap2;
bmap2.init((uint64_t)0x3223b1d1000, 0x10000);
ASSERT_EQ(0x1a0000u, bmap2.granularity);
ASSERT_EQ(0x1edae4u, bmap2.collections_bfs.size());
ASSERT_EQ(0x1edae4u, bmap2.objects_bfs.size());
bmap2.set_used(0x3223b190000, 0x10000, cid, hoid);
ASSERT_TRUE(bmap2.is_used(cid, 0x3223b190000));
ASSERT_TRUE(bmap2.is_used(hoid, 0x3223b190000));
ASSERT_TRUE(bmap2.is_used(cid, 0x3223b19f000));
ASSERT_TRUE(bmap2.is_used(hoid, 0x3223b19ffff));
}
TEST(bluestore_blob_t, unused)
{
{
bluestore_blob_t b;
uint64_t min_alloc_size = 64 << 10; // 64 kB
// _do_write_small 0x0~1000
uint64_t offset = 0x0;
uint64_t length = 0x1000; // 4kB
uint64_t suggested_boff = 0;
PExtentVector extents;
extents.emplace_back(0x1a560000, min_alloc_size);
b.allocated(p2align(suggested_boff, min_alloc_size), 0 /*no matter*/, extents);
b.mark_used(offset, length);
ASSERT_FALSE(b.is_unused(offset, length));
// _do_write_small 0x2000~1000
offset = 0x2000;
length = 0x1000;
b.add_unused(0, 0x10000);
ASSERT_TRUE(b.is_unused(offset, length));
b.mark_used(offset, length);
ASSERT_FALSE(b.is_unused(offset, length));
// _do_write_small 0xc000~2000
offset = 0xc000;
length = 0x2000;
ASSERT_TRUE(b.is_unused(offset, length));
b.mark_used(offset, length);
ASSERT_FALSE(b.is_unused(offset, length));
}
{
bluestore_blob_t b;
uint64_t min_alloc_size = 64 << 10; // 64 kB
// _do_write_small 0x11000~1000
uint64_t offset = 0x11000;
uint64_t length = 0x1000; // 4kB
uint64_t suggested_boff = 0x11000;
PExtentVector extents;
extents.emplace_back(0x1a560000, min_alloc_size);
b.allocated(p2align(suggested_boff, min_alloc_size), 0 /*no matter*/, extents);
b.add_unused(0, offset);
b.add_unused(offset + length, min_alloc_size * 2 - offset - length);
b.mark_used(offset, length);
ASSERT_FALSE(b.is_unused(offset, length));
// _do_write_small 0x15000~3000
offset = 0x15000;
length = 0x3000;
ASSERT_TRUE(b.is_unused(offset, length));
b.mark_used(offset, length);
ASSERT_FALSE(b.is_unused(offset, length));
}
{
// reuse blob
bluestore_blob_t b;
uint64_t min_alloc_size = 64 << 10; // 64 kB
// _do_write_small 0x2a000~1000
// and 0x1d000~1000
uint64_t unused_granularity = 0x3000;
// offsets and lenght below are selected to
// be aligned with unused_granularity
uint64_t offset0 = 0x2a000;
uint64_t offset = 0x1d000;
uint64_t length = 0x1000; // 4kB
PExtentVector extents;
extents.emplace_back(0x410000, min_alloc_size);
b.allocated(p2align(offset0, min_alloc_size), min_alloc_size, extents);
b.add_unused(0, min_alloc_size * 3);
b.mark_used(offset0, length);
ASSERT_FALSE(b.is_unused(offset0, length));
ASSERT_TRUE(b.is_unused(offset, length));
extents.clear();
extents.emplace_back(0x430000, min_alloc_size);
b.allocated(p2align(offset, min_alloc_size), min_alloc_size, extents);
b.mark_used(offset, length);
ASSERT_FALSE(b.is_unused(offset0, length));
ASSERT_FALSE(b.is_unused(offset, length));
ASSERT_FALSE(b.is_unused(offset, unused_granularity));
ASSERT_TRUE(b.is_unused(0, offset / unused_granularity * unused_granularity));
ASSERT_TRUE(b.is_unused(offset + length, offset0 - offset - length));
auto end0_aligned = round_up_to(offset0 + length, unused_granularity);
ASSERT_TRUE(b.is_unused(end0_aligned, min_alloc_size * 3 - end0_aligned));
}
}
// This UT is primarily intended to show how repair procedure
// causes erroneous write to INVALID_OFFSET which is reported in
// https://tracker.ceph.com/issues/51682
// Basic map_any functionality is tested as well though.
//
TEST(bluestore_blob_t, wrong_map_bl_in_51682)
{
{
bluestore_blob_t b;
uint64_t min_alloc_size = 4 << 10; // 64 kB
b.allocated_test(bluestore_pextent_t(0x17ba000, 4 * min_alloc_size));
b.allocated_test(bluestore_pextent_t(0x17bf000, 4 * min_alloc_size));
b.allocated_test(
bluestore_pextent_t(
bluestore_pextent_t::INVALID_OFFSET,
1 * min_alloc_size));
b.allocated_test(bluestore_pextent_t(0x153c44d000, 7 * min_alloc_size));
b.mark_used(0, 0x8000);
b.mark_used(0x9000, 0x7000);
string s(0x7000, 'a');
bufferlist bl;
bl.append(s);
const size_t num_expected_entries = 5;
uint64_t expected[num_expected_entries][2] = {
{0x17ba000, 0x4000},
{0x17bf000, 0x3000},
{0x17c0000, 0x3000},
{0xffffffffffffffff, 0x1000},
{0x153c44d000, 0x3000}};
size_t expected_pos = 0;
b.map_bl(0, bl,
[&](uint64_t o, bufferlist& bl) {
ASSERT_EQ(o, expected[expected_pos][0]);
ASSERT_EQ(bl.length(), expected[expected_pos][1]);
++expected_pos;
});
// 0x5000 is an improper offset presumably provided when doing a repair
b.map_bl(0x5000, bl,
[&](uint64_t o, bufferlist& bl) {
ASSERT_EQ(o, expected[expected_pos][0]);
ASSERT_EQ(bl.length(), expected[expected_pos][1]);
++expected_pos;
});
ASSERT_EQ(expected_pos, num_expected_entries);
}
}
//---------------------------------------------------------------------------------
static int verify_extent(const extent_t & ext, const extent_t *ext_arr, uint64_t ext_arr_size, uint64_t idx)
{
const extent_t & ext_ref = ext_arr[idx];
if (ext.offset == ext_ref.offset && ext.length == ext_ref.length) {
return 0;
} else {
std::cerr << "mismatch was found at index " << idx << std::endl;
if (ext.length == 0) {
std::cerr << "Null extent was returned at idx = " << idx << std::endl;
}
unsigned start = std::max(((int32_t)(idx)-3), 0);
unsigned end = std::min(idx+3, ext_arr_size);
for (unsigned j = start; j < end; j++) {
const extent_t & ext_ref = ext_arr[j];
std::cerr << j << ") ref_ext = [" << ext_ref.offset << ", " << ext_ref.length << "]" << std::endl;
}
std::cerr << idx << ") ext = [" << ext.offset << ", " << ext.length << "]" << std::endl;
return -1;
}
}
//---------------------------------------------------------------------------------
static int test_extents(uint64_t index, extent_t *ext_arr, uint64_t ext_arr_size, SimpleBitmap& sbmap, bool set)
{
const uint64_t MAX_JUMP_BIG = 1523;
const uint64_t MAX_JUMP_SMALL = 19;
const uint64_t MAX_LEN_BIG = 523;
const uint64_t MAX_LEN_SMALL = 23;
uint64_t n = sbmap.get_size();
uint64_t offset = 0;
unsigned length, jump, i;
for (i = 0; i < ext_arr_size; i++) {
if (i & 3) {
jump = std::rand() % MAX_JUMP_BIG;
} else {
jump = std::rand() % MAX_JUMP_SMALL;
}
offset += jump;
if (i & 1) {
length = std::rand() % MAX_LEN_BIG;
} else {
length = std::rand() % MAX_LEN_SMALL;
}
// make sure no zero length will be used
length++;
if (offset + length >= n) {
break;
}
bool success;
if (set) {
success = sbmap.set(offset, length);
} else {
success = sbmap.clr(offset, length);
}
if (!success) {
std::cerr << "Failed sbmap." << (set ? "set(" : "clr(") << offset << ", " << length << ")"<< std::endl;
return -1;
}
// if this is not the first entry and no jump -> merge extents
if ( (i==0) || (jump > 0) ) {
ext_arr[i] = {offset, length};
} else {
// merge 2 extents
i --;
ext_arr[i].length += length;
}
offset += length;
}
unsigned arr_size = std::min((uint64_t)i, ext_arr_size);
std::cout << std::hex << std::right;
std::cout << "[" << index << "] " << (set ? "Set::" : "Clr::") << " extents count = 0x" << arr_size;
std::cout << std::dec << std::endl;
offset = 0;
extent_t ext;
for(unsigned i = 0; i < arr_size; i++) {
if (set) {
ext = sbmap.get_next_set_extent(offset);
} else {
ext = sbmap.get_next_clr_extent(offset);
}
if (verify_extent(ext, ext_arr, ext_arr_size, i) != 0) {
return -1;
}
offset = ext.offset + ext.length;
}
if (set) {
ext = sbmap.get_next_set_extent(offset);
} else {
ext = sbmap.get_next_clr_extent(offset);
}
if (ext.length == 0) {
return 0;
} else {
std::cerr << "sbmap.get_next_" << (set ? "set" : "clr") << "_extent(" << offset << ") return length = " << ext.length << std::endl;
return -1;
}
}
//---------------------------------------------------------------------------------
TEST(SimpleBitmap, basic)
{
const uint64_t MAX_EXTENTS_COUNT = 7131177;
std::unique_ptr<extent_t[]> ext_arr = std::make_unique<extent_t[]>(MAX_EXTENTS_COUNT);
ASSERT_TRUE(ext_arr != nullptr);
const uint64_t BIT_COUNT = 4ULL << 30; // 4Gb = 512MB
SimpleBitmap sbmap(g_ceph_context, BIT_COUNT);
// use current time as seed for random generator
std::srand(std::time(nullptr));
for (unsigned i = 0; i < 3; i++ ) {
memset(ext_arr.get(), 0, sizeof(extent_t)*MAX_EXTENTS_COUNT);
sbmap.clear_all();
ASSERT_TRUE(test_extents(i, ext_arr.get(), MAX_EXTENTS_COUNT, sbmap, true) == 0);
memset(ext_arr.get(), 0, sizeof(extent_t)*MAX_EXTENTS_COUNT);
sbmap.set_all();
ASSERT_TRUE(test_extents(i, ext_arr.get(), MAX_EXTENTS_COUNT, sbmap, false) == 0);
}
}
//---------------------------------------------------------------------------------
static int test_intersections(unsigned test_idx, SimpleBitmap &sbmap, uint8_t map[], uint64_t map_size)
{
const uint64_t MAX_LEN_BIG = 523;
const uint64_t MAX_LEN_SMALL = 23;
bool success;
uint64_t set_op_count = 0, clr_op_count = 0;
unsigned length, i;
for (i = 0; i < map_size / (MAX_LEN_BIG*2); i++) {
uint64_t offset = (std::rand() % (map_size - 1));
if (i & 1) {
length = std::rand() % MAX_LEN_BIG;
} else {
length = std::rand() % MAX_LEN_SMALL;
}
// make sure no zero length will be used
length++;
if (offset + length >= map_size) {
continue;
}
// 2:1 set/clr
bool set = (std::rand() % 3);
if (set) {
success = sbmap.set(offset, length);
memset(map+offset, 0xFF, length);
set_op_count++;
} else {
success = sbmap.clr(offset, length);
memset(map+offset, 0x0, length);
clr_op_count++;
}
if (!success) {
std::cerr << "Failed sbmap." << (set ? "set(" : "clr(") << offset << ", " << length << ")"<< std::endl;
return -1;
}
}
uint64_t set_bit_count = 0;
uint64_t clr_bit_count = 0;
for(uint64_t idx = 0; idx < map_size; idx++) {
if (map[idx]) {
set_bit_count++;
success = sbmap.bit_is_set(idx);
} else {
clr_bit_count++;
success = sbmap.bit_is_clr(idx);
}
if (!success) {
std::cerr << "expected: sbmap.bit_is_" << (map[idx] ? "set(" : "clr(") << idx << ")"<< std::endl;
return -1;
}
}
std::cout << std::hex << std::right << __func__ ;
std::cout << " [" << test_idx << "] set_bit_count = 0x" << std::setfill('0') << std::setw(8) << set_bit_count
<< ", clr_bit_count = 0x" << std::setfill('0') << std::setw(8) << clr_bit_count
<< ", sum = 0x" << set_bit_count + clr_bit_count << std::endl;
std::cout << std::dec;
uint64_t offset = 0;
for(uint64_t i = 0; i < (set_op_count + clr_op_count); i++) {
extent_t ext = sbmap.get_next_set_extent(offset);
//std::cout << "set_ext:: " << i << ") [" << ext.offset << ", " << ext.length << "]" << std::endl;
for (uint64_t idx = ext.offset; idx < ext.offset + ext.length; idx++) {
if (map[idx] != 0xFF) {
std::cerr << "map[" << idx << "] is clear, but extent [" << ext.offset << ", " << ext.length << "] is set" << std::endl;
return -1;
}
}
offset = ext.offset + ext.length;
}
offset = 0;
for(uint64_t i = 0; i < (set_op_count + clr_op_count); i++) {
extent_t ext = sbmap.get_next_clr_extent(offset);
//std::cout << "clr_ext:: " << i << ") [" << ext.offset << ", " << ext.length << "]" << std::endl;
for (uint64_t idx = ext.offset; idx < ext.offset + ext.length; idx++) {
if (map[idx] ) {
std::cerr << "map[" << idx << "] is set, but extent [" << ext.offset << ", " << ext.length << "] is free" << std::endl;
return -1;
}
}
offset = ext.offset + ext.length;
}
return 0;
}
//---------------------------------------------------------------------------------
TEST(SimpleBitmap, intersection)
{
const uint64_t MAP_SIZE = 1ULL << 30; // 1G
SimpleBitmap sbmap(g_ceph_context, MAP_SIZE);
// use current time as seed for random generator
std::srand(std::time(nullptr));
std::unique_ptr<uint8_t[]> map = std::make_unique<uint8_t[]> (MAP_SIZE);
ASSERT_TRUE(map != nullptr);
for (unsigned i = 0; i < 1; i++ ) {
sbmap.clear_all();
memset(map.get(), 0, MAP_SIZE);
ASSERT_TRUE(test_intersections(i, sbmap, map.get(), MAP_SIZE) == 0);
sbmap.set_all();
memset(map.get(), 0xFF, MAP_SIZE);
ASSERT_TRUE(test_intersections(i, sbmap, map.get(), MAP_SIZE) == 0);
}
}
//---------------------------------------------------------------------------------
static int test_extents_boundaries(uint64_t index, extent_t *ext_arr, uint64_t ext_arr_size, SimpleBitmap& sbmap, bool set)
{
uint64_t n = sbmap.get_size();
uint64_t offset = 0, k = 0;
for(unsigned i = 0; i < 64; i++) {
offset += i;
if (offset >= n) {
break;
}
for(unsigned length = 1; length <= 128; length++) {
if (offset + length >= n) {
break;
}
if (k >= ext_arr_size) {
break;
}
bool success;
if (set) {
success = sbmap.set(offset, length);
} else {
success = sbmap.clr(offset, length);
}
if (!success) {
std::cerr << "Failed sbmap." << (set ? "set(" : "clr(") << offset << ", " << length << ")"<< std::endl;
return -1;
}
ext_arr[k++] = {offset, length};
if (length < 64) {
offset += 64;
} else {
offset += 128;
}
}
if (k >= ext_arr_size) {
break;
}
}
unsigned arr_size = std::min((uint64_t)k, ext_arr_size);
std::cout << std::hex << std::right << __func__ ;
std::cout << " [" << index << "] " << (set ? "Set::" : "Clr::") << " extents count = 0x" << arr_size;
std::cout << std::dec << std::endl;
offset = 0;
extent_t ext;
for(unsigned i = 0; i < arr_size; i++) {
if (set) {
ext = sbmap.get_next_set_extent(offset);
} else {
ext = sbmap.get_next_clr_extent(offset);
}
if (verify_extent(ext, ext_arr, ext_arr_size, i) != 0) {
return -1;
}
offset = ext.offset + ext.length;
}
if (set) {
ext = sbmap.get_next_set_extent(offset);
} else {
ext = sbmap.get_next_clr_extent(offset);
}
if (ext.length == 0) {
return 0;
} else {
std::cerr << "sbmap.get_next_" << (set ? "set" : "clr") << "_extent(" << offset << ") return length = " << ext.length << std::endl;
return -1;
}
}
//---------------------------------------------------------------------------------
TEST(SimpleBitmap, boundaries)
{
const uint64_t MAX_EXTENTS_COUNT = 64 << 10;
std::unique_ptr<extent_t[]> ext_arr = std::make_unique<extent_t[]>(MAX_EXTENTS_COUNT);
ASSERT_TRUE(ext_arr != nullptr);
// use current time as seed for random generator
std::srand(std::time(nullptr));
uint64_t bit_count = 32 << 20; // 32Mb = 4MB
unsigned count = 0;
for (unsigned i = 0; i < 64; i++) {
SimpleBitmap sbmap(g_ceph_context, bit_count+i);
memset(ext_arr.get(), 0, sizeof(extent_t)*MAX_EXTENTS_COUNT);
sbmap.clear_all();
ASSERT_TRUE(test_extents_boundaries(count, ext_arr.get(), MAX_EXTENTS_COUNT, sbmap, true) == 0);
memset(ext_arr.get(), 0, sizeof(extent_t)*MAX_EXTENTS_COUNT);
sbmap.set_all();
ASSERT_TRUE(test_extents_boundaries(count++, ext_arr.get(), MAX_EXTENTS_COUNT, sbmap, false) == 0);
}
}
//---------------------------------------------------------------------------------
TEST(SimpleBitmap, boundaries2)
{
const uint64_t bit_count_base = 64 << 10; // 64Kb = 8MB
const extent_t null_extent = {0, 0};
for (unsigned i = 0; i < 64; i++) {
uint64_t bit_count = bit_count_base + i;
extent_t full_extent = {0, bit_count};
SimpleBitmap sbmap(g_ceph_context, bit_count);
sbmap.set(0, bit_count);
ASSERT_TRUE(sbmap.get_next_set_extent(0) == full_extent);
ASSERT_TRUE(sbmap.get_next_clr_extent(0) == null_extent);
for (uint64_t bit = 0; bit < bit_count; bit++) {
sbmap.clr(bit, 1);
}
ASSERT_TRUE(sbmap.get_next_set_extent(0) == null_extent);
ASSERT_TRUE(sbmap.get_next_clr_extent(0) == full_extent);
for (uint64_t bit = 0; bit < bit_count; bit++) {
sbmap.set(bit, 1);
}
ASSERT_TRUE(sbmap.get_next_set_extent(0) == full_extent);
ASSERT_TRUE(sbmap.get_next_clr_extent(0) == null_extent);
sbmap.clr(0, bit_count);
ASSERT_TRUE(sbmap.get_next_set_extent(0) == null_extent);
ASSERT_TRUE(sbmap.get_next_clr_extent(0) == full_extent);
}
}
TEST(shared_blob_2hash_tracker_t, basic_test)
{
shared_blob_2hash_tracker_t t1(1024 * 1024, 4096);
ASSERT_TRUE(t1.count_non_zero() == 0);
t1.inc(0, 0, 1);
ASSERT_TRUE(t1.count_non_zero() != 0);
t1.inc(0, 0, -1);
ASSERT_TRUE(t1.count_non_zero() == 0);
t1.inc(3, 0x1000, 2);
ASSERT_TRUE(t1.count_non_zero() != 0);
t1.inc(3, 0x1000, -1);
ASSERT_TRUE(t1.count_non_zero() != 0);
t1.inc(3, 0x1000, -1);
ASSERT_TRUE(t1.count_non_zero() == 0);
t1.inc(2, 0x2000, 5);
ASSERT_TRUE(t1.count_non_zero() != 0);
t1.inc(18, 0x2000, -5);
ASSERT_TRUE(t1.count_non_zero() != 0);
t1.inc(18, 0x2000, 1);
ASSERT_TRUE(t1.count_non_zero() != 0);
t1.inc(2, 0x2000, -1);
ASSERT_TRUE(t1.count_non_zero() != 0);
t1.inc(18, 0x2000, 4);
ASSERT_TRUE(t1.count_non_zero() != 0);
t1.inc(2, 0x2000, -4);
ASSERT_TRUE(t1.count_non_zero() == 0);
t1.inc(3, 0x3000, 2);
ASSERT_TRUE(t1.count_non_zero() != 0);
t1.inc(4, 0x3000, -1);
ASSERT_TRUE(t1.count_non_zero() != 0);
t1.inc(4, 0x3000, -1);
ASSERT_TRUE(t1.count_non_zero() != 0);
t1.inc(3, 0x3000, -2);
ASSERT_TRUE(t1.count_non_zero() != 0);
t1.inc(4, 0x3000, 1);
ASSERT_TRUE(t1.count_non_zero() != 0);
t1.inc(4, 0x3000, 1);
ASSERT_TRUE(t1.count_non_zero() == 0);
t1.inc(5, 0x1000, 1);
t1.inc(5, 0x2000, 3);
t1.inc(5, 0x3000, 2);
t1.inc(5, 0x8000, 1);
ASSERT_TRUE(t1.count_non_zero() != 0);
ASSERT_TRUE(!t1.test_all_zero(5,0x1000));
ASSERT_TRUE(!t1.test_all_zero(5, 0x2000));
ASSERT_TRUE(!t1.test_all_zero(5, 0x3000));
ASSERT_TRUE(t1.test_all_zero(5, 0x4000));
ASSERT_TRUE(!t1.test_all_zero(5, 0x8000));
ASSERT_TRUE(t1.test_all_zero_range(5, 0, 0x1000));
ASSERT_TRUE(t1.test_all_zero_range(5, 0x500, 0x500));
ASSERT_TRUE(!t1.test_all_zero_range(5, 0x500, 0x1500));
ASSERT_TRUE(!t1.test_all_zero_range(5, 0x1500, 0x3200));
ASSERT_TRUE(t1.test_all_zero_range(5, 0x4500, 0x1500));
ASSERT_TRUE(t1.test_all_zero_range(5, 0x4500, 0x3b00));
ASSERT_TRUE(!t1.test_all_zero_range(5, 0, 0x9000));
}
TEST(bluestore_blob_use_tracker_t, mempool_stats_test)
{
using mempool::bluestore_cache_other::allocated_items;
using mempool::bluestore_cache_other::allocated_bytes;
uint64_t other_items0 = allocated_items();
uint64_t other_bytes0 = allocated_bytes();
{
bluestore_blob_use_tracker_t* t1 = new bluestore_blob_use_tracker_t;
t1->init(1024 * 1024, 4096);
ASSERT_EQ(256, allocated_items() - other_items0); // = 1M / 4K
ASSERT_EQ(1024, allocated_bytes() - other_bytes0); // = 1M / 4K * 4
delete t1;
ASSERT_EQ(allocated_items(), other_items0);
ASSERT_EQ(allocated_bytes(), other_bytes0);
}
{
bluestore_blob_use_tracker_t* t1 = new bluestore_blob_use_tracker_t;
t1->init(1024 * 1024, 4096);
t1->add_tail(2048 * 1024, 4096);
// proper stats update after tail add
ASSERT_EQ(512, allocated_items() - other_items0); // = 2M / 4K
ASSERT_EQ(2048, allocated_bytes() - other_bytes0); // = 2M / 4K * 4
delete t1;
ASSERT_EQ(allocated_items(), other_items0);
ASSERT_EQ(allocated_bytes(), other_bytes0);
}
{
bluestore_blob_use_tracker_t* t1 = new bluestore_blob_use_tracker_t;
t1->init(1024 * 1024, 4096);
t1->prune_tail(512 * 1024);
// no changes in stats after pruning
ASSERT_EQ(256, allocated_items() - other_items0); // = 1M / 4K
ASSERT_EQ(1024, allocated_bytes() - other_bytes0); // = 1M / 4K * 4
delete t1;
ASSERT_EQ(allocated_items(), other_items0);
ASSERT_EQ(allocated_bytes(), other_bytes0);
}
{
bluestore_blob_use_tracker_t* t1 = new bluestore_blob_use_tracker_t;
bluestore_blob_use_tracker_t* t2 = new bluestore_blob_use_tracker_t;
t1->init(1024 * 1024, 4096);
// t1 keeps the same amount of entries + t2 has got half of them
t1->split(512 * 1024, t2);
ASSERT_EQ(256 + 128, allocated_items() - other_items0); //= 1M / 4K*1.5
ASSERT_EQ(1024 + 512, allocated_bytes() - other_bytes0); //= 1M / 4K*4*1.5
// t1 & t2 release everything, then t2 get one less entry than t2 had had
// before
t1->split(4096, t2);
ASSERT_EQ(127, allocated_items() - other_items0); // = 512K / 4K - 1
ASSERT_EQ(127 * 4, allocated_bytes() - other_bytes0); // = 512L / 4K * 4 - 4
delete t1;
delete t2;
ASSERT_EQ(allocated_items(), other_items0);
ASSERT_EQ(allocated_bytes(), other_bytes0);
}
}
int main(int argc, char **argv) {
auto args = argv_to_vec(argc, argv);
auto cct = global_init(NULL, args, CEPH_ENTITY_TYPE_CLIENT,
CODE_ENVIRONMENT_UTILITY,
CINIT_FLAG_NO_DEFAULT_CONFIG_FILE);
common_init_finish(g_ceph_context);
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}
| 85,116 | 35.266297 | 135 |
cc
|
null |
ceph-main/src/test/objectstore/test_deferred.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include <stdio.h>
#include <string.h>
#include <iostream>
#include <memory>
#include <time.h>
#include "os/ObjectStore.h"
#include "os/bluestore/BlueStore.h"
#include "include/Context.h"
#include "common/ceph_argparse.h"
#include "global/global_init.h"
#include "common/ceph_mutex.h"
#include "common/Cond.h"
#include "common/errno.h"
#include "common/options.h" // for the size literals
#include <semaphore.h>
class C_do_action : public Context {
public:
std::function<void()> action;
C_do_action(std::function<void()> action)
: action(action) {}
void finish(int r) override {
action();
}
};
void create_deferred_and_terminate() {
std::unique_ptr<ObjectStore> store;
g_ceph_context->_conf._clear_safe_to_start_threads();
g_ceph_context->_conf.set_val_or_die("bluestore_prefer_deferred_size", "4096");
g_ceph_context->_conf.set_val_or_die("bluestore_allocator", "bitmap");
g_ceph_context->_conf.set_val_or_die("bluestore_block_size", "10240000000");
g_ceph_context->_conf.apply_changes(nullptr);
int64_t poolid;
coll_t cid;
ghobject_t hoid;
ObjectStore::CollectionHandle ch;
ceph_assert(::mkdir("bluestore.test_temp_dir", 0777) == 0);
store = ObjectStore::create(g_ceph_context,
"bluestore",
"bluestore.test_temp_dir",
"store_test_temp_journal");
ceph_assert(store->mkfs() == 0);
ceph_assert(store->mount() == 0);
poolid = 11;
cid = coll_t(spg_t(pg_t(1, poolid), shard_id_t::NO_SHARD));
ch = store->create_new_collection(cid);
int r;
{
ObjectStore::Transaction t;
t.create_collection(cid, 0);
r = store->queue_transaction(ch, std::move(t));
ceph_assert(r == 0);
}
{
ObjectStore::Transaction t;
std::string oid = "zapchajdziura";
ghobject_t hoid(hobject_t(oid, "", CEPH_NOSNAP, 1, poolid, ""));
bufferlist bl;
bl.append(std::string(0xe000, '-'));
t.write(cid, hoid, 0, 0xe000, bl);
r = store->queue_transaction(ch, std::move(t));
ceph_assert(r == 0);
}
size_t object_count = 10;
// initial fill
bufferlist bl_64K;
bl_64K.append(std::string(64 * 1024, '-'));
std::atomic<size_t> prefill_counter{0};
sem_t prefill_mutex;
sem_init(&prefill_mutex, 0, 0);
for (size_t o = 0; o < object_count; o++) {
ObjectStore::Transaction t;
std::string oid = "object-" + std::to_string(o);
ghobject_t hoid(hobject_t(oid, "", CEPH_NOSNAP, 1, poolid, ""));
t.write(cid, hoid, 0, bl_64K.length(), bl_64K);
t.register_on_commit(new C_do_action([&] {
if (++prefill_counter == object_count) {
sem_post(&prefill_mutex);
}
}));
r = store->queue_transaction(ch, std::move(t));
ceph_assert(r == 0);
}
sem_wait(&prefill_mutex);
// small deferred writes over object
// and complete overwrite of previous one
bufferlist bl_8_bytes;
bl_8_bytes.append("abcdefgh");
std::atomic<size_t> deferred_counter{0};
for (size_t o = 0; o < object_count - 1; o++) {
ObjectStore::Transaction t;
// sprinkle deferred writes
std::string oid_d = "object-" + std::to_string(o + 1);
ghobject_t hoid_d(hobject_t(oid_d, "", CEPH_NOSNAP, 1, poolid, ""));
for(int i = 0; i < 16; i++) {
t.write(cid, hoid_d, 4096 * i, bl_8_bytes.length(), bl_8_bytes);
}
// overwrite previous object
std::string oid_m = "object-" + std::to_string(o);
ghobject_t hoid_m(hobject_t(oid_m, "", CEPH_NOSNAP, 1, poolid, ""));
t.write(cid, hoid_m, 0, bl_64K.length(), bl_64K);
t.register_on_commit(new C_do_action([&] {
if (++deferred_counter == object_count - 1) {
exit(0);
}
}));
r = store->queue_transaction(ch, std::move(t));
ceph_assert(r == 0);
}
sleep(10);
ceph_assert(0 && "should not reach here");
}
int main(int argc, char **argv) {
auto args = argv_to_vec(argc, argv);
auto cct = global_init(NULL, args, CEPH_ENTITY_TYPE_CLIENT,
CODE_ENVIRONMENT_UTILITY,
CINIT_FLAG_NO_DEFAULT_CONFIG_FILE);
common_init_finish(g_ceph_context);
create_deferred_and_terminate();
return 0;
}
| 4,206 | 27.619048 | 81 |
cc
|
null |
ceph-main/src/test/objectstore/test_kv.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2006 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include <stdio.h>
#include <string.h>
#include <iostream>
#include <time.h>
#include <sys/mount.h>
#include "kv/KeyValueDB.h"
#include "kv/RocksDBStore.h"
#include "include/Context.h"
#include "common/ceph_argparse.h"
#include "global/global_init.h"
#include "common/Cond.h"
#include "common/errno.h"
#include "include/stringify.h"
#include <gtest/gtest.h>
using namespace std;
class KVTest : public ::testing::TestWithParam<const char*> {
public:
boost::scoped_ptr<KeyValueDB> db;
KVTest() : db(0) {}
string _bl_to_str(bufferlist val) {
string str(val.c_str(), val.length());
return str;
}
void rm_r(string path) {
string cmd = string("rm -r ") + path;
cout << "==> " << cmd << std::endl;
int r = ::system(cmd.c_str());
if (r) {
cerr << "failed with exit code " << r
<< ", continuing anyway" << std::endl;
}
}
void init() {
cout << "Creating " << string(GetParam()) << "\n";
db.reset(KeyValueDB::create(g_ceph_context, string(GetParam()),
"kv_test_temp_dir"));
}
void fini() {
db.reset(NULL);
}
void SetUp() override {
int r = ::mkdir("kv_test_temp_dir", 0777);
if (r < 0 && errno != EEXIST) {
r = -errno;
cerr << __func__ << ": unable to create kv_test_temp_dir: "
<< cpp_strerror(r) << std::endl;
return;
}
init();
}
void TearDown() override {
fini();
rm_r("kv_test_temp_dir");
}
};
TEST_P(KVTest, OpenClose) {
ASSERT_EQ(0, db->create_and_open(cout));
db->close();
db->open(cout);
fini();
}
TEST_P(KVTest, OpenCloseReopenClose) {
ASSERT_EQ(0, db->create_and_open(cout));
fini();
init();
ASSERT_EQ(0, db->open(cout));
fini();
}
/*
* Basic write and read test case in same database session.
*/
TEST_P(KVTest, OpenWriteRead) {
ASSERT_EQ(0, db->create_and_open(cout));
{
KeyValueDB::Transaction t = db->get_transaction();
bufferlist value;
value.append("value");
t->set("prefix", "key", value);
value.clear();
value.append("value2");
t->set("prefix", "key2", value);
value.clear();
value.append("value3");
t->set("prefix", "key3", value);
db->submit_transaction_sync(t);
bufferlist v1, v2;
ASSERT_EQ(0, db->get("prefix", "key", &v1));
ASSERT_EQ(v1.length(), 5u);
(v1.c_str())[v1.length()] = 0x0;
ASSERT_EQ(std::string(v1.c_str()), std::string("value"));
ASSERT_EQ(0, db->get("prefix", "key2", &v2));
ASSERT_EQ(v2.length(), 6u);
(v2.c_str())[v2.length()] = 0x0;
ASSERT_EQ(std::string(v2.c_str()), std::string("value2"));
}
fini();
}
TEST_P(KVTest, PutReopen) {
ASSERT_EQ(0, db->create_and_open(cout));
{
KeyValueDB::Transaction t = db->get_transaction();
bufferlist value;
value.append("value");
t->set("prefix", "key", value);
t->set("prefix", "key2", value);
t->set("prefix", "key3", value);
db->submit_transaction_sync(t);
}
fini();
init();
ASSERT_EQ(0, db->open(cout));
{
bufferlist v1, v2;
ASSERT_EQ(0, db->get("prefix", "key", &v1));
ASSERT_EQ(v1.length(), 5u);
ASSERT_EQ(0, db->get("prefix", "key2", &v2));
ASSERT_EQ(v2.length(), 5u);
}
{
KeyValueDB::Transaction t = db->get_transaction();
t->rmkey("prefix", "key");
t->rmkey("prefix", "key3");
db->submit_transaction_sync(t);
}
fini();
init();
ASSERT_EQ(0, db->open(cout));
{
bufferlist v1, v2, v3;
ASSERT_EQ(-ENOENT, db->get("prefix", "key", &v1));
ASSERT_EQ(0, db->get("prefix", "key2", &v2));
ASSERT_EQ(v2.length(), 5u);
ASSERT_EQ(-ENOENT, db->get("prefix", "key3", &v3));
}
fini();
}
TEST_P(KVTest, BenchCommit) {
int n = 1024;
ASSERT_EQ(0, db->create_and_open(cout));
utime_t start = ceph_clock_now();
{
cout << "priming" << std::endl;
// prime
bufferlist big;
bufferptr bp(1048576);
bp.zero();
big.append(bp);
for (int i=0; i<30; ++i) {
KeyValueDB::Transaction t = db->get_transaction();
t->set("prefix", "big" + stringify(i), big);
db->submit_transaction_sync(t);
}
}
cout << "now doing small writes" << std::endl;
bufferlist data;
bufferptr bp(1024);
bp.zero();
data.append(bp);
for (int i=0; i<n; ++i) {
KeyValueDB::Transaction t = db->get_transaction();
t->set("prefix", "key" + stringify(i), data);
db->submit_transaction_sync(t);
}
utime_t end = ceph_clock_now();
utime_t dur = end - start;
cout << n << " commits in " << dur << ", avg latency " << (dur / (double)n)
<< std::endl;
fini();
}
struct AppendMOP : public KeyValueDB::MergeOperator {
void merge_nonexistent(
const char *rdata, size_t rlen, std::string *new_value) override {
*new_value = "?" + std::string(rdata, rlen);
}
void merge(
const char *ldata, size_t llen,
const char *rdata, size_t rlen,
std::string *new_value) override {
*new_value = std::string(ldata, llen) + std::string(rdata, rlen);
}
// We use each operator name and each prefix to construct the
// overall RocksDB operator name for consistency check at open time.
const char *name() const override {
return "Append";
}
};
string tostr(bufferlist& b) {
return string(b.c_str(),b.length());
}
TEST_P(KVTest, Merge) {
shared_ptr<KeyValueDB::MergeOperator> p(new AppendMOP);
int r = db->set_merge_operator("A",p);
if (r < 0)
return; // No merge operators for this database type
ASSERT_EQ(0, db->create_and_open(cout));
{
KeyValueDB::Transaction t = db->get_transaction();
bufferlist v1, v2, v3;
v1.append(string("1"));
v2.append(string("2"));
v3.append(string("3"));
t->set("P", "K1", v1);
t->set("A", "A1", v2);
t->rmkey("A", "A2");
t->merge("A", "A2", v3);
db->submit_transaction_sync(t);
}
{
bufferlist v1, v2, v3;
ASSERT_EQ(0, db->get("P", "K1", &v1));
ASSERT_EQ(tostr(v1), "1");
ASSERT_EQ(0, db->get("A", "A1", &v2));
ASSERT_EQ(tostr(v2), "2");
ASSERT_EQ(0, db->get("A", "A2", &v3));
ASSERT_EQ(tostr(v3), "?3");
}
{
KeyValueDB::Transaction t = db->get_transaction();
bufferlist v1;
v1.append(string("1"));
t->merge("A", "A2", v1);
db->submit_transaction_sync(t);
}
{
bufferlist v;
ASSERT_EQ(0, db->get("A", "A2", &v));
ASSERT_EQ(tostr(v), "?31");
}
fini();
}
TEST_P(KVTest, RMRange) {
ASSERT_EQ(0, db->create_and_open(cout));
bufferlist value;
value.append("value");
{
KeyValueDB::Transaction t = db->get_transaction();
t->set("prefix", "key1", value);
t->set("prefix", "key2", value);
t->set("prefix", "key3", value);
t->set("prefix", "key4", value);
t->set("prefix", "key45", value);
t->set("prefix", "key5", value);
t->set("prefix", "key6", value);
db->submit_transaction_sync(t);
}
{
KeyValueDB::Transaction t = db->get_transaction();
t->set("prefix", "key7", value);
t->set("prefix", "key8", value);
t->rm_range_keys("prefix", "key2", "key7");
db->submit_transaction_sync(t);
bufferlist v1, v2;
ASSERT_EQ(0, db->get("prefix", "key1", &v1));
v1.clear();
ASSERT_EQ(-ENOENT, db->get("prefix", "key45", &v1));
ASSERT_EQ(0, db->get("prefix", "key8", &v1));
v1.clear();
ASSERT_EQ(-ENOENT, db->get("prefix", "key2", &v1));
ASSERT_EQ(0, db->get("prefix", "key7", &v2));
}
{
KeyValueDB::Transaction t = db->get_transaction();
t->rm_range_keys("prefix", "key", "key");
db->submit_transaction_sync(t);
bufferlist v1, v2;
ASSERT_EQ(0, db->get("prefix", "key1", &v1));
ASSERT_EQ(0, db->get("prefix", "key8", &v2));
}
{
KeyValueDB::Transaction t = db->get_transaction();
t->rm_range_keys("prefix", "key-", "key~");
db->submit_transaction_sync(t);
bufferlist v1, v2;
ASSERT_EQ(-ENOENT, db->get("prefix", "key1", &v1));
ASSERT_EQ(-ENOENT, db->get("prefix", "key8", &v2));
}
fini();
}
TEST_P(KVTest, ShardingRMRange) {
if(string(GetParam()) != "rocksdb")
return;
std::string cfs("O(7)=");
ASSERT_EQ(0, db->create_and_open(cout, cfs));
{
KeyValueDB::Transaction t = db->get_transaction();
for (size_t i = 0; i < 1000; i++) {
bufferlist value;
char* a;
ASSERT_EQ(asprintf(&a, "key%3.3ld", i), 6);
value.append(a);
t->set("O", a, value);
free(a);
}
db->submit_transaction_sync(t);
}
{
KeyValueDB::Transaction t = db->get_transaction();
t->rm_range_keys("O", "key277", "key467");
db->submit_transaction_sync(t);
}
for (size_t i = 0; i < 1000; i++) {
char* key;
ASSERT_EQ(asprintf(&key, "key%3.3ld", i), 6);
bufferlist value;
int r = db->get("O", key, &value);
ASSERT_EQ(r, (i >= 277 && i < 467 ? -ENOENT : 0));
free(key);
}
fini();
}
TEST_P(KVTest, RocksDBColumnFamilyTest) {
if(string(GetParam()) != "rocksdb")
return;
std::string cfs("cf1 cf2");
ASSERT_EQ(0, db->init(g_conf()->bluestore_rocksdb_options));
cout << "creating two column families and opening them" << std::endl;
ASSERT_EQ(0, db->create_and_open(cout, cfs));
{
KeyValueDB::Transaction t = db->get_transaction();
bufferlist value;
value.append("value");
cout << "write a transaction includes three keys in different CFs" << std::endl;
t->set("prefix", "key", value);
t->set("cf1", "key", value);
t->set("cf2", "key2", value);
ASSERT_EQ(0, db->submit_transaction_sync(t));
}
fini();
init();
ASSERT_EQ(0, db->open(cout, cfs));
{
bufferlist v1, v2, v3;
cout << "reopen db and read those keys" << std::endl;
ASSERT_EQ(0, db->get("prefix", "key", &v1));
ASSERT_EQ(0, _bl_to_str(v1) != "value");
ASSERT_EQ(0, db->get("cf1", "key", &v2));
ASSERT_EQ(0, _bl_to_str(v2) != "value");
ASSERT_EQ(0, db->get("cf2", "key2", &v3));
ASSERT_EQ(0, _bl_to_str(v2) != "value");
}
{
cout << "delete two keys in CFs" << std::endl;
KeyValueDB::Transaction t = db->get_transaction();
t->rmkey("prefix", "key");
t->rmkey("cf2", "key2");
ASSERT_EQ(0, db->submit_transaction_sync(t));
}
fini();
init();
ASSERT_EQ(0, db->open(cout, cfs));
{
cout << "reopen db and read keys again." << std::endl;
bufferlist v1, v2, v3;
ASSERT_EQ(-ENOENT, db->get("prefix", "key", &v1));
ASSERT_EQ(0, db->get("cf1", "key", &v2));
ASSERT_EQ(0, _bl_to_str(v2) != "value");
ASSERT_EQ(-ENOENT, db->get("cf2", "key2", &v3));
}
fini();
}
TEST_P(KVTest, RocksDBIteratorTest) {
if(string(GetParam()) != "rocksdb")
return;
std::string cfs("cf1");
ASSERT_EQ(0, db->init(g_conf()->bluestore_rocksdb_options));
cout << "creating one column family and opening it" << std::endl;
ASSERT_EQ(0, db->create_and_open(cout, cfs));
{
KeyValueDB::Transaction t = db->get_transaction();
bufferlist bl1;
bl1.append("hello");
bufferlist bl2;
bl2.append("world");
cout << "write some kv pairs into default and new CFs" << std::endl;
t->set("prefix", "key1", bl1);
t->set("prefix", "key2", bl2);
t->set("cf1", "key1", bl1);
t->set("cf1", "key2", bl2);
ASSERT_EQ(0, db->submit_transaction_sync(t));
}
{
cout << "iterating the default CF" << std::endl;
KeyValueDB::Iterator iter = db->get_iterator("prefix");
iter->seek_to_first();
ASSERT_EQ(1, iter->valid());
ASSERT_EQ("key1", iter->key());
ASSERT_EQ("hello", _bl_to_str(iter->value()));
ASSERT_EQ(0, iter->next());
ASSERT_EQ(1, iter->valid());
ASSERT_EQ("key2", iter->key());
ASSERT_EQ("world", _bl_to_str(iter->value()));
}
{
cout << "iterating the new CF" << std::endl;
KeyValueDB::Iterator iter = db->get_iterator("cf1");
iter->seek_to_first();
ASSERT_EQ(1, iter->valid());
ASSERT_EQ("key1", iter->key());
ASSERT_EQ("hello", _bl_to_str(iter->value()));
ASSERT_EQ(0, iter->next());
ASSERT_EQ(1, iter->valid());
ASSERT_EQ("key2", iter->key());
ASSERT_EQ("world", _bl_to_str(iter->value()));
}
fini();
}
TEST_P(KVTest, RocksDBShardingIteratorTest) {
if(string(GetParam()) != "rocksdb")
return;
std::string cfs("A(6)");
ASSERT_EQ(0, db->init(g_conf()->bluestore_rocksdb_options));
cout << "creating one column family and opening it" << std::endl;
ASSERT_EQ(0, db->create_and_open(cout, cfs));
{
KeyValueDB::Transaction t = db->get_transaction();
for (int v = 100; v <= 999; v++) {
std::string str = to_string(v);
bufferlist val;
val.append(str);
t->set("A", str, val);
}
ASSERT_EQ(0, db->submit_transaction_sync(t));
}
{
KeyValueDB::Iterator it = db->get_iterator("A");
int pos = 0;
ASSERT_EQ(it->lower_bound(to_string(pos)), 0);
for (pos = 100; pos <= 999; pos++) {
ASSERT_EQ(it->valid(), true);
ASSERT_EQ(it->key(), to_string(pos));
ASSERT_EQ(it->value().to_str(), to_string(pos));
it->next();
}
ASSERT_EQ(it->valid(), false);
pos = 999;
ASSERT_EQ(it->lower_bound(to_string(pos)), 0);
for (pos = 999; pos >= 100; pos--) {
ASSERT_EQ(it->valid(), true);
ASSERT_EQ(it->key(), to_string(pos));
ASSERT_EQ(it->value().to_str(), to_string(pos));
it->prev();
}
ASSERT_EQ(it->valid(), false);
}
fini();
}
TEST_P(KVTest, RocksDBCFMerge) {
if(string(GetParam()) != "rocksdb")
return;
shared_ptr<KeyValueDB::MergeOperator> p(new AppendMOP);
int r = db->set_merge_operator("cf1",p);
if (r < 0)
return; // No merge operators for this database type
std::string cfs("cf1");
ASSERT_EQ(0, db->init(g_conf()->bluestore_rocksdb_options));
cout << "creating one column family and opening it" << std::endl;
ASSERT_EQ(0, db->create_and_open(cout, cfs));
{
KeyValueDB::Transaction t = db->get_transaction();
bufferlist v1, v2, v3;
v1.append(string("1"));
v2.append(string("2"));
v3.append(string("3"));
t->set("P", "K1", v1);
t->set("cf1", "A1", v2);
t->rmkey("cf1", "A2");
t->merge("cf1", "A2", v3);
db->submit_transaction_sync(t);
}
{
bufferlist v1, v2, v3;
ASSERT_EQ(0, db->get("P", "K1", &v1));
ASSERT_EQ(tostr(v1), "1");
ASSERT_EQ(0, db->get("cf1", "A1", &v2));
ASSERT_EQ(tostr(v2), "2");
ASSERT_EQ(0, db->get("cf1", "A2", &v3));
ASSERT_EQ(tostr(v3), "?3");
}
{
KeyValueDB::Transaction t = db->get_transaction();
bufferlist v1;
v1.append(string("1"));
t->merge("cf1", "A2", v1);
db->submit_transaction_sync(t);
}
{
bufferlist v;
ASSERT_EQ(0, db->get("cf1", "A2", &v));
ASSERT_EQ(tostr(v), "?31");
}
fini();
}
TEST_P(KVTest, RocksDB_estimate_size) {
if(string(GetParam()) != "rocksdb")
GTEST_SKIP();
std::string cfs("cf1");
ASSERT_EQ(0, db->init(g_conf()->bluestore_rocksdb_options));
cout << "creating one column family and opening it" << std::endl;
ASSERT_EQ(0, db->create_and_open(cout));
for(int test = 0; test < 20; test++)
{
KeyValueDB::Transaction t = db->get_transaction();
bufferlist v1;
v1.append(string(1000, '1'));
for (int i = 0; i < 100; i++)
t->set("A", to_string(rand()%100000), v1);
db->submit_transaction_sync(t);
db->compact();
int64_t size_a = db->estimate_prefix_size("A","");
ASSERT_GT(size_a, (test + 1) * 1000 * 100 * 0.5);
ASSERT_LT(size_a, (test + 1) * 1000 * 100 * 1.5);
int64_t size_a1 = db->estimate_prefix_size("A","1");
ASSERT_GT(size_a1, (test + 1) * 1000 * 100 * 0.1 * 0.5);
ASSERT_LT(size_a1, (test + 1) * 1000 * 100 * 0.1 * 1.5);
int64_t size_b = db->estimate_prefix_size("B","");
ASSERT_EQ(size_b, 0);
}
fini();
}
TEST_P(KVTest, RocksDB_estimate_size_column_family) {
if(string(GetParam()) != "rocksdb")
GTEST_SKIP();
std::string cfs("cf1");
ASSERT_EQ(0, db->init(g_conf()->bluestore_rocksdb_options));
cout << "creating one column family and opening it" << std::endl;
ASSERT_EQ(0, db->create_and_open(cout, cfs));
for(int test = 0; test < 20; test++)
{
KeyValueDB::Transaction t = db->get_transaction();
bufferlist v1;
v1.append(string(1000, '1'));
for (int i = 0; i < 100; i++)
t->set("cf1", to_string(rand()%100000), v1);
db->submit_transaction_sync(t);
db->compact();
int64_t size_a = db->estimate_prefix_size("cf1","");
ASSERT_GT(size_a, (test + 1) * 1000 * 100 * 0.5);
ASSERT_LT(size_a, (test + 1) * 1000 * 100 * 1.5);
int64_t size_a1 = db->estimate_prefix_size("cf1","1");
ASSERT_GT(size_a1, (test + 1) * 1000 * 100 * 0.1 * 0.5);
ASSERT_LT(size_a1, (test + 1) * 1000 * 100 * 0.1 * 1.5);
int64_t size_b = db->estimate_prefix_size("B","");
ASSERT_EQ(size_b, 0);
}
fini();
}
TEST_P(KVTest, RocksDB_parse_sharding_def) {
if(string(GetParam()) != "rocksdb")
GTEST_SKIP();
bool result;
std::vector<RocksDBStore::ColumnFamily> sharding_def;
char const* error_position = nullptr;
std::string error_msg;
std::string_view text_def = "A(10,0-30) B(6)=option1,option2=aaaa C";
result = RocksDBStore::parse_sharding_def(text_def,
sharding_def,
&error_position,
&error_msg);
ASSERT_EQ(result, true);
ASSERT_EQ(error_position, nullptr);
ASSERT_EQ(error_msg, "");
std::cout << text_def << std::endl;
if (error_position) std::cout << std::string(error_position - text_def.begin(), ' ') << "^" << error_msg << std::endl;
ASSERT_EQ(sharding_def.size(), 3);
ASSERT_EQ(sharding_def[0].name, "A");
ASSERT_EQ(sharding_def[0].shard_cnt, 10);
ASSERT_EQ(sharding_def[0].hash_l, 0);
ASSERT_EQ(sharding_def[0].hash_h, 30);
ASSERT_EQ(sharding_def[1].name, "B");
ASSERT_EQ(sharding_def[1].shard_cnt, 6);
ASSERT_EQ(sharding_def[1].options, "option1,option2=aaaa");
ASSERT_EQ(sharding_def[2].name, "C");
ASSERT_EQ(sharding_def[2].shard_cnt, 1);
text_def = "A(10 B(6)=option C";
result = RocksDBStore::parse_sharding_def(text_def,
sharding_def,
&error_position,
&error_msg);
std::cout << text_def << std::endl;
if (error_position)
std::cout << std::string(error_position - text_def.begin(), ' ') << "^" << error_msg << std::endl;
ASSERT_EQ(result, false);
ASSERT_NE(error_position, nullptr);
ASSERT_NE(error_msg, "");
text_def = "A(10,1) B(6)=option C";
result = RocksDBStore::parse_sharding_def(text_def,
sharding_def,
&error_position,
&error_msg);
std::cout << text_def << std::endl;
std::cout << std::string(error_position - text_def.begin(), ' ') << "^" << error_msg << std::endl;
ASSERT_EQ(result, false);
ASSERT_NE(error_position, nullptr);
ASSERT_NE(error_msg, "");
}
class RocksDBShardingTest : public ::testing::TestWithParam<const char*> {
public:
boost::scoped_ptr<KeyValueDB> db;
RocksDBShardingTest() : db(0) {}
string _bl_to_str(bufferlist val) {
string str(val.c_str(), val.length());
return str;
}
void rm_r(string path) {
string cmd = string("rm -r ") + path;
if (verbose)
cout << "==> " << cmd << std::endl;
int r = ::system(cmd.c_str());
if (r) {
cerr << "failed with exit code " << r
<< ", continuing anyway" << std::endl;
}
}
void SetUp() override {
verbose = getenv("VERBOSE") && strcmp(getenv("VERBOSE"), "1") == 0;
int r = ::mkdir("kv_test_temp_dir", 0777);
if (r < 0 && errno != EEXIST) {
r = -errno;
cerr << __func__ << ": unable to create kv_test_temp_dir: "
<< cpp_strerror(r) << std::endl;
return;
}
db.reset(KeyValueDB::create(g_ceph_context, "rocksdb",
"kv_test_temp_dir"));
ASSERT_EQ(0, db->init(g_conf()->bluestore_rocksdb_options));
if (verbose)
cout << "Creating database with sharding: " << GetParam() << std::endl;
ASSERT_EQ(0, db->create_and_open(cout, GetParam()));
}
void TearDown() override {
db.reset(nullptr);
rm_r("kv_test_temp_dir");
}
/*
A - main 0/1/20
B - shard 1/3 x 0/1/20
C - main 0/1/20
D - shard 1/3 x 0/1/20
E - main 0/1/20
*/
bool verbose;
std::vector<std::string> sharding_defs = {
"Betelgeuse D",
"Betelgeuse(3) D",
"Betelgeuse D(3)",
"Betelgeuse(3) D(3)"};
std::vector<std::string> prefixes = {"Ad", "Betelgeuse", "C", "D", "Evade"};
std::vector<std::string> randoms = {"0", "1", "2", "3", "4", "5",
"found", "brain", "fully", "pen", "worth", "race",
"stand", "nodded", "whenever", "surrounded", "industrial", "skin",
"this", "direction", "family", "beginning", "whenever", "held",
"metal", "year", "like", "valuable", "softly", "whistle",
"perfectly", "broken", "idea", "also", "coffee", "branch",
"tongue", "immediately", "bent", "partly", "burn", "include",
"certain", "burst", "final", "smoke", "positive", "perfectly"
};
int R = randoms.size();
typedef int test_id[6];
void zero(test_id& x) {
k = 0;
v = 0;
for (auto& i:x)
i = 0;
}
bool end(const test_id& x) {
return x[5] != 0;
}
void next(test_id& x) {
x[0]++;
for (int i = 0; i < 5; i++) {
if (x[i] == 3) {
x[i] = 0;
++x[i + 1];
}
}
}
std::map<std::string, std::string> data;
int k = 0;
int v = 0;
void generate_data(const test_id& x) {
data.clear();
for (int i = 0; i < 5; i++) {
if (verbose)
std::cout << x[i] << "-";
switch (x[i]) {
case 0:
break;
case 1:
data[RocksDBStore::combine_strings(prefixes[i], randoms[k++ % R])] = randoms[v++ % R];
break;
case 2:
std::string base = randoms[k++ % R];
for (int j = 0; j < 10; j++) {
data[RocksDBStore::combine_strings(prefixes[i], base + "." + randoms[k++ % R])] = randoms[v++ % R];
}
break;
}
}
}
void data_to_db() {
KeyValueDB::Transaction t = db->get_transaction();
for (auto &d : data) {
bufferlist v1;
v1.append(d.second);
string prefix;
string key;
RocksDBStore::split_key(d.first, &prefix, &key);
t->set(prefix, key, v1);
if (verbose)
std::cout << "SET " << prefix << " " << key << std::endl;
}
ASSERT_EQ(db->submit_transaction_sync(t), 0);
}
void clear_db() {
KeyValueDB::Transaction t = db->get_transaction();
for (auto &d : data) {
string prefix;
string key;
RocksDBStore::split_key(d.first, &prefix, &key);
t->rmkey(prefix, key);
}
ASSERT_EQ(db->submit_transaction_sync(t), 0);
//paranoid, check if db empty
KeyValueDB::WholeSpaceIterator it = db->get_wholespace_iterator();
ASSERT_EQ(it->seek_to_first(), 0);
ASSERT_EQ(it->valid(), false);
}
};
TEST_P(RocksDBShardingTest, wholespace_next) {
test_id X;
zero(X);
do {
generate_data(X);
data_to_db();
KeyValueDB::WholeSpaceIterator it = db->get_wholespace_iterator();
//move forward
auto dit = data.begin();
int r = it->seek_to_first();
ASSERT_EQ(r, 0);
ASSERT_EQ(it->valid(), (dit != data.end()));
while (dit != data.end()) {
ASSERT_EQ(it->valid(), true);
string prefix;
string key;
RocksDBStore::split_key(dit->first, &prefix, &key);
auto raw_key = it->raw_key();
ASSERT_EQ(raw_key.first, prefix);
ASSERT_EQ(raw_key.second, key);
ASSERT_EQ(it->value().to_str(), dit->second);
if (verbose)
std::cout << "next " << prefix << " " << key << std::endl;
ASSERT_EQ(it->next(), 0);
++dit;
}
ASSERT_EQ(it->valid(), false);
clear_db();
next(X);
} while (!end(X));
}
TEST_P(RocksDBShardingTest, wholespace_prev) {
test_id X;
zero(X);
do {
generate_data(X);
data_to_db();
KeyValueDB::WholeSpaceIterator it = db->get_wholespace_iterator();
auto dit = data.rbegin();
int r = it->seek_to_last();
ASSERT_EQ(r, 0);
ASSERT_EQ(it->valid(), (dit != data.rend()));
while (dit != data.rend()) {
ASSERT_EQ(it->valid(), true);
string prefix;
string key;
RocksDBStore::split_key(dit->first, &prefix, &key);
auto raw_key = it->raw_key();
ASSERT_EQ(raw_key.first, prefix);
ASSERT_EQ(raw_key.second, key);
ASSERT_EQ(it->value().to_str(), dit->second);
if (verbose)
std::cout << "prev " << prefix << " " << key << std::endl;
ASSERT_EQ(it->prev(), 0);
++dit;
}
ASSERT_EQ(it->valid(), false);
clear_db();
next(X);
} while (!end(X));
}
TEST_P(RocksDBShardingTest, wholespace_lower_bound) {
test_id X;
zero(X);
do {
generate_data(X);
data_to_db();
KeyValueDB::WholeSpaceIterator it = db->get_wholespace_iterator();
auto dit = data.begin();
int r = it->seek_to_first();
ASSERT_EQ(r, 0);
ASSERT_EQ(it->valid(), (dit != data.end()));
while (dit != data.end()) {
ASSERT_EQ(it->valid(), true);
string prefix;
string key;
RocksDBStore::split_key(dit->first, &prefix, &key);
KeyValueDB::WholeSpaceIterator it1 = db->get_wholespace_iterator();
ASSERT_EQ(it1->lower_bound(prefix, key), 0);
ASSERT_EQ(it1->valid(), true);
auto raw_key = it1->raw_key();
ASSERT_EQ(raw_key.first, prefix);
ASSERT_EQ(raw_key.second, key);
if (verbose)
std::cout << "lower_bound " << prefix << " " << key << std::endl;
ASSERT_EQ(it->next(), 0);
++dit;
}
ASSERT_EQ(it->valid(), false);
clear_db();
next(X);
} while (!end(X));
}
TEST_P(RocksDBShardingTest, wholespace_upper_bound) {
test_id X;
zero(X);
do {
generate_data(X);
data_to_db();
KeyValueDB::WholeSpaceIterator it = db->get_wholespace_iterator();
auto dit = data.begin();
int r = it->seek_to_first();
ASSERT_EQ(r, 0);
ASSERT_EQ(it->valid(), (dit != data.end()));
while (dit != data.end()) {
ASSERT_EQ(it->valid(), true);
string prefix;
string key;
string key_minus_1;
RocksDBStore::split_key(dit->first, &prefix, &key);
//decrement key minimally
key_minus_1 = key.substr(0, key.length() - 1) + std::string(1, key[key.length() - 1] - 1);
KeyValueDB::WholeSpaceIterator it1 = db->get_wholespace_iterator();
ASSERT_EQ(it1->upper_bound(prefix, key_minus_1), 0);
ASSERT_EQ(it1->valid(), true);
auto raw_key = it1->raw_key();
ASSERT_EQ(raw_key.first, prefix);
ASSERT_EQ(raw_key.second, key);
if (verbose)
std::cout << "upper_bound " << prefix << " " << key_minus_1 << std::endl;
ASSERT_EQ(it->next(), 0);
++dit;
}
ASSERT_EQ(it->valid(), false);
clear_db();
next(X);
} while (!end(X));
}
TEST_P(RocksDBShardingTest, wholespace_lookup_limits) {
test_id X;
zero(X);
do {
generate_data(X);
data_to_db();
//lookup before first
if (data.size() > 0) {
auto dit = data.begin();
string prefix;
string key;
RocksDBStore::split_key(dit->first, &prefix, &key);
KeyValueDB::WholeSpaceIterator it1 = db->get_wholespace_iterator();
ASSERT_EQ(it1->lower_bound(" ", " "), 0);
ASSERT_EQ(it1->valid(), true);
auto raw_key = it1->raw_key();
ASSERT_EQ(raw_key.first, prefix);
ASSERT_EQ(raw_key.second, key);
}
//lookup after last
KeyValueDB::WholeSpaceIterator it1 = db->get_wholespace_iterator();
ASSERT_EQ(it1->lower_bound("~", "~"), 0);
ASSERT_EQ(it1->valid(), false);
clear_db();
next(X);
} while (!end(X));
}
class RocksDBResharding : public ::testing::Test {
public:
boost::scoped_ptr<RocksDBStore> db;
RocksDBResharding() : db(0) {}
string _bl_to_str(bufferlist val) {
string str(val.c_str(), val.length());
return str;
}
void rm_r(string path) {
string cmd = string("rm -r ") + path;
if (verbose)
cout << "==> " << cmd << std::endl;
int r = ::system(cmd.c_str());
if (r) {
cerr << "failed with exit code " << r
<< ", continuing anyway" << std::endl;
}
}
void SetUp() override {
verbose = getenv("VERBOSE") && strcmp(getenv("VERBOSE"), "1") == 0;
int r = ::mkdir("kv_test_temp_dir", 0777);
if (r < 0 && errno != EEXIST) {
r = -errno;
cerr << __func__ << ": unable to create kv_test_temp_dir: "
<< cpp_strerror(r) << std::endl;
return;
}
KeyValueDB* db_kv = KeyValueDB::create(g_ceph_context, "rocksdb",
"kv_test_temp_dir");
RocksDBStore* db_rocks = dynamic_cast<RocksDBStore*>(db_kv);
ceph_assert(db_rocks);
db.reset(db_rocks);
ASSERT_EQ(0, db->init(g_conf()->bluestore_rocksdb_options));
}
void TearDown() override {
db.reset(nullptr);
rm_r("kv_test_temp_dir");
}
bool verbose;
std::vector<std::string> prefixes = {"Ad", "Betelgeuse", "C", "D", "Evade"};
std::vector<std::string> randoms = {"0", "1", "2", "3", "4", "5",
"found", "brain", "fully", "pen", "worth", "race",
"stand", "nodded", "whenever", "surrounded", "industrial", "skin",
"this", "direction", "family", "beginning", "whenever", "held",
"metal", "year", "like", "valuable", "softly", "whistle",
"perfectly", "broken", "idea", "also", "coffee", "branch",
"tongue", "immediately", "bent", "partly", "burn", "include",
"certain", "burst", "final", "smoke", "positive", "perfectly"
};
int R = randoms.size();
int k = 0;
std::map<std::string, std::string> data;
void generate_data() {
data.clear();
for (size_t p = 0; p < prefixes.size(); p++) {
size_t elem_count = 1 << (( p * 3 ) + 3);
for (size_t i = 0; i < elem_count; i++) {
std::string key;
for (int x = 0; x < 5; x++) {
key = key + randoms[rand() % R];
}
std::string value;
for (int x = 0; x < 3; x++) {
value = value + randoms[rand() % R];
}
data[RocksDBStore::combine_strings(prefixes[p], key)] = value;
}
}
}
void data_to_db() {
KeyValueDB::Transaction t = db->get_transaction();
size_t i = 0;
for (auto& d: data) {
bufferlist v1;
v1.append(d.second);
string prefix;
string key;
RocksDBStore::split_key(d.first, &prefix, &key);
t->set(prefix, key, v1);
if (verbose)
std::cout << "SET " << prefix << " " << key << std::endl;
i++;
if ((i % 1000) == 0) {
ASSERT_EQ(db->submit_transaction_sync(t), 0);
t.reset();
if (verbose)
std::cout << "writing key to DB" << std::endl;
t = db->get_transaction();
}
}
if (verbose)
std::cout << "writing keys to DB" << std::endl;
ASSERT_EQ(db->submit_transaction_sync(t), 0);
}
void clear_db() {
KeyValueDB::Transaction t = db->get_transaction();
for (auto &d : data) {
string prefix;
string key;
RocksDBStore::split_key(d.first, &prefix, &key);
t->rmkey(prefix, key);
}
ASSERT_EQ(db->submit_transaction_sync(t), 0);
//paranoid, check if db empty
KeyValueDB::WholeSpaceIterator it = db->get_wholespace_iterator();
ASSERT_EQ(it->seek_to_first(), 0);
ASSERT_EQ(it->valid(), false);
}
void check_db() {
KeyValueDB::WholeSpaceIterator it = db->get_wholespace_iterator();
//move forward
auto dit = data.begin();
int r = it->seek_to_first();
ASSERT_EQ(r, 0);
ASSERT_EQ(it->valid(), (dit != data.end()));
while (dit != data.end()) {
ASSERT_EQ(it->valid(), true);
string prefix;
string key;
RocksDBStore::split_key(dit->first, &prefix, &key);
auto raw_key = it->raw_key();
ASSERT_EQ(raw_key.first, prefix);
ASSERT_EQ(raw_key.second, key);
ASSERT_EQ(it->value().to_str(), dit->second);
if (verbose)
std::cout << "next " << prefix << " " << key << std::endl;
ASSERT_EQ(it->next(), 0);
++dit;
}
ASSERT_EQ(it->valid(), false);
}
};
TEST_F(RocksDBResharding, basic) {
ASSERT_EQ(0, db->create_and_open(cout, ""));
generate_data();
data_to_db();
check_db();
db->close();
ASSERT_EQ(db->reshard("Evade(4)"), 0);
ASSERT_EQ(db->open(cout), 0);
check_db();
db->close();
}
TEST_F(RocksDBResharding, all_to_shards) {
ASSERT_EQ(0, db->create_and_open(cout, ""));
generate_data();
data_to_db();
check_db();
db->close();
ASSERT_EQ(db->reshard("Ad(1) Betelgeuse(1) C(1) D(1) Evade(1)"), 0);
ASSERT_EQ(db->open(cout), 0);
check_db();
db->close();
}
TEST_F(RocksDBResharding, all_to_shards_and_back_again) {
ASSERT_EQ(0, db->create_and_open(cout, ""));
generate_data();
data_to_db();
check_db();
db->close();
ASSERT_EQ(db->reshard("Ad(1) Betelgeuse(1) C(1) D(1) Evade(1)"), 0);
ASSERT_EQ(db->open(cout), 0);
check_db();
db->close();
ASSERT_EQ(db->reshard(""), 0);
ASSERT_EQ(db->open(cout), 0);
check_db();
db->close();
}
TEST_F(RocksDBResharding, resume_interrupted_at_batch) {
ASSERT_EQ(0, db->create_and_open(cout, ""));
generate_data();
data_to_db();
check_db();
db->close();
RocksDBStore::resharding_ctrl ctrl;
ctrl.unittest_fail_after_first_batch = true;
ASSERT_EQ(db->reshard("Evade(4)", &ctrl), -1000);
ASSERT_NE(db->open(cout), 0);
ASSERT_EQ(db->reshard("Evade(4)"), 0);
ASSERT_EQ(db->open(cout), 0);
check_db();
db->close();
}
TEST_F(RocksDBResharding, resume_interrupted_at_column) {
ASSERT_EQ(0, db->create_and_open(cout, ""));
generate_data();
data_to_db();
check_db();
db->close();
RocksDBStore::resharding_ctrl ctrl;
ctrl.unittest_fail_after_processing_column = true;
ASSERT_EQ(db->reshard("Evade(4)", &ctrl), -1001);
ASSERT_NE(db->open(cout), 0);
ASSERT_EQ(db->reshard("Evade(4)"), 0);
ASSERT_EQ(db->open(cout), 0);
check_db();
db->close();
}
TEST_F(RocksDBResharding, resume_interrupted_before_commit) {
ASSERT_EQ(0, db->create_and_open(cout, ""));
generate_data();
data_to_db();
check_db();
db->close();
RocksDBStore::resharding_ctrl ctrl;
ctrl.unittest_fail_after_successful_processing = true;
ASSERT_EQ(db->reshard("Evade(4)", &ctrl), -1002);
ASSERT_NE(db->open(cout), 0);
ASSERT_EQ(db->reshard("Evade(4)"), 0);
ASSERT_EQ(db->open(cout), 0);
check_db();
db->close();
}
TEST_F(RocksDBResharding, prevent_incomplete_hash_change) {
ASSERT_EQ(0, db->create_and_open(cout, "Evade(4,0-3)"));
generate_data();
data_to_db();
check_db();
db->close();
RocksDBStore::resharding_ctrl ctrl;
ctrl.unittest_fail_after_successful_processing = true;
ASSERT_EQ(db->reshard("Evade(4,0-8)", &ctrl), -1002);
ASSERT_NE(db->open(cout), 0);
ASSERT_EQ(db->reshard("Evade(4,0-8)"), 0);
ASSERT_EQ(db->open(cout), 0);
check_db();
db->close();
}
TEST_F(RocksDBResharding, change_reshard) {
ASSERT_EQ(0, db->create_and_open(cout, "Ad(4)"));
generate_data();
data_to_db();
check_db();
db->close();
{
RocksDBStore::resharding_ctrl ctrl;
ctrl.unittest_fail_after_first_batch = true;
ASSERT_EQ(db->reshard("C(5) D(3)", &ctrl), -1000);
}
{
RocksDBStore::resharding_ctrl ctrl;
ASSERT_NE(db->open(cout), 0);
ctrl.unittest_fail_after_first_batch = false;
ctrl.unittest_fail_after_processing_column = true;
ASSERT_EQ(db->reshard("C(5) Evade(2)", &ctrl), -1001);
}
{
RocksDBStore::resharding_ctrl ctrl;
ASSERT_NE(db->open(cout), 0);
ctrl.unittest_fail_after_processing_column = false;
ctrl.unittest_fail_after_successful_processing = true;
ASSERT_EQ(db->reshard("Evade(2) D(3)", &ctrl), -1002);
}
{
ASSERT_NE(db->open(cout), 0);
ASSERT_EQ(db->reshard("Ad(1) Evade(5)"), 0);
}
{
ASSERT_EQ(db->open(cout), 0);
check_db();
db->close();
}
}
INSTANTIATE_TEST_SUITE_P(
KeyValueDB,
KVTest,
::testing::Values("rocksdb"));
INSTANTIATE_TEST_SUITE_P(
KeyValueDB,
RocksDBShardingTest,
::testing::Values("Betelgeuse D",
"Betelgeuse(3) D",
"Betelgeuse D(3)",
"Betelgeuse(3) D(3)"));
int main(int argc, char **argv) {
auto args = argv_to_vec(argc, argv);
auto cct = global_init(NULL, args, CEPH_ENTITY_TYPE_CLIENT,
CODE_ENVIRONMENT_UTILITY,
CINIT_FLAG_NO_DEFAULT_CONFIG_FILE);
common_init_finish(g_ceph_context);
g_ceph_context->_conf.set_val(
"enable_experimental_unrecoverable_data_corrupting_features",
"rocksdb");
g_ceph_context->_conf.apply_changes(nullptr);
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}
| 36,536 | 26.997701 | 120 |
cc
|
null |
ceph-main/src/test/objectstore/test_memstore_clone.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2015 Red Hat
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include <boost/intrusive_ptr.hpp>
#include "global/global_init.h"
#include "common/ceph_argparse.h"
#include "os/ObjectStore.h"
#include <gtest/gtest.h>
#include "include/ceph_assert.h"
#include "common/errno.h"
#include "store_test_fixture.h"
#define dout_context g_ceph_context
using namespace std;
namespace {
const coll_t cid;
ghobject_t make_ghobject(const char *oid)
{
return ghobject_t{hobject_t{oid, "", CEPH_NOSNAP, 0, 0, ""}};
}
} // anonymous namespace
class MemStoreClone : public StoreTestFixture {
public:
MemStoreClone()
: StoreTestFixture("memstore")
{}
void SetUp() override {
StoreTestFixture::SetUp();
if (HasFailure()) {
return;
}
ObjectStore::Transaction t;
ch = store->create_new_collection(cid);
t.create_collection(cid, 4);
unsigned r = store->queue_transaction(ch, std::move(t));
if (r != 0) {
derr << "failed to create collection with " << cpp_strerror(r) << dendl;
}
ASSERT_EQ(0U, r);
}
void TearDown() override {
ch.reset();
StoreTestFixture::TearDown();
}
};
// src 11[11 11 11 11]11
// dst 22 22 22 22 22 22
// res 22 11 11 11 11 22
TEST_F(MemStoreClone, CloneRangeAllocated)
{
ASSERT_TRUE(store);
const auto src = make_ghobject("src1");
const auto dst = make_ghobject("dst1");
bufferlist srcbl, dstbl, result, expected;
srcbl.append("111111111111");
dstbl.append("222222222222");
expected.append("221111111122");
ObjectStore::Transaction t;
t.write(cid, src, 0, 12, srcbl);
t.write(cid, dst, 0, 12, dstbl);
t.clone_range(cid, src, dst, 2, 8, 2);
ASSERT_EQ(0, store->queue_transaction(ch, std::move(t)));
ASSERT_EQ(12, store->read(ch, dst, 0, 12, result));
ASSERT_EQ(expected, result);
}
// src __[__ __ __ __]__ 11 11
// dst 22 22 22 22 22 22
// res 22 00 00 00 00 22
TEST_F(MemStoreClone, CloneRangeHole)
{
ASSERT_TRUE(store);
const auto src = make_ghobject("src2");
const auto dst = make_ghobject("dst2");
bufferlist srcbl, dstbl, result, expected;
srcbl.append("1111");
dstbl.append("222222222222");
expected.append("22\000\000\000\000\000\000\000\00022", 12);
ObjectStore::Transaction t;
t.write(cid, src, 12, 4, srcbl);
t.write(cid, dst, 0, 12, dstbl);
t.clone_range(cid, src, dst, 2, 8, 2);
ASSERT_EQ(0, store->queue_transaction(ch, std::move(t)));
ASSERT_EQ(12, store->read(ch, dst, 0, 12, result));
ASSERT_EQ(expected, result);
}
// src __[__ __ __ 11]11
// dst 22 22 22 22 22 22
// res 22 00 00 00 11 22
TEST_F(MemStoreClone, CloneRangeHoleStart)
{
ASSERT_TRUE(store);
const auto src = make_ghobject("src3");
const auto dst = make_ghobject("dst3");
bufferlist srcbl, dstbl, result, expected;
srcbl.append("1111");
dstbl.append("222222222222");
expected.append("22\000\000\000\000\000\0001122", 12);
ObjectStore::Transaction t;
t.write(cid, src, 8, 4, srcbl);
t.write(cid, dst, 0, 12, dstbl);
t.clone_range(cid, src, dst, 2, 8, 2);
ASSERT_EQ(0, store->queue_transaction(ch, std::move(t)));
ASSERT_EQ(12, store->read(ch, dst, 0, 12, result));
ASSERT_EQ(expected, result);
}
// src 11[11 __ __ 11]11
// dst 22 22 22 22 22 22
// res 22 11 00 00 11 22
TEST_F(MemStoreClone, CloneRangeHoleMiddle)
{
ASSERT_TRUE(store);
const auto src = make_ghobject("src4");
const auto dst = make_ghobject("dst4");
bufferlist srcbl, dstbl, result, expected;
srcbl.append("1111");
dstbl.append("222222222222");
expected.append("2211\000\000\000\0001122", 12);
ObjectStore::Transaction t;
t.write(cid, src, 0, 4, srcbl);
t.write(cid, src, 8, 4, srcbl);
t.write(cid, dst, 0, 12, dstbl);
t.clone_range(cid, src, dst, 2, 8, 2);
ASSERT_EQ(0, store->queue_transaction(ch, std::move(t)));
ASSERT_EQ(12, store->read(ch, dst, 0, 12, result));
ASSERT_EQ(expected, result);
}
// src 11[11 __ __ __]__ 11 11
// dst 22 22 22 22 22 22
// res 22 11 00 00 00 22
TEST_F(MemStoreClone, CloneRangeHoleEnd)
{
ASSERT_TRUE(store);
const auto src = make_ghobject("src5");
const auto dst = make_ghobject("dst5");
bufferlist srcbl, dstbl, result, expected;
srcbl.append("1111");
dstbl.append("222222222222");
expected.append("2211\000\000\000\000\000\00022", 12);
ObjectStore::Transaction t;
t.write(cid, src, 0, 4, srcbl);
t.write(cid, src, 12, 4, srcbl);
t.write(cid, dst, 0, 12, dstbl);
t.clone_range(cid, src, dst, 2, 8, 2);
ASSERT_EQ(0, store->queue_transaction(ch, std::move(t)));
ASSERT_EQ(12, store->read(ch, dst, 0, 12, result));
ASSERT_EQ(expected, result);
}
int main(int argc, char** argv)
{
// default to memstore
map<string,string> defaults = {
{ "osd_objectstore", "memstore" },
{ "osd_data", "msc.test_temp_dir" },
{ "memstore_page_size", "4" }
};
auto args = argv_to_vec(argc, argv);
auto cct = global_init(&defaults, args, CEPH_ENTITY_TYPE_CLIENT,
CODE_ENVIRONMENT_UTILITY,
CINIT_FLAG_NO_DEFAULT_CONFIG_FILE);
common_init_finish(g_ceph_context);
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}
| 5,408 | 25.64532 | 78 |
cc
|
null |
ceph-main/src/test/objectstore/test_transaction.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2016 Casey Bodley <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include "os/ObjectStore.h"
#include <gtest/gtest.h>
#include "common/Clock.h"
#include "include/utime.h"
#include <boost/tuple/tuple.hpp>
using namespace std;
TEST(Transaction, MoveConstruct)
{
auto a = ObjectStore::Transaction{};
a.nop();
ASSERT_FALSE(a.empty());
// move-construct in b
auto b = std::move(a);
ASSERT_TRUE(a.empty());
ASSERT_FALSE(b.empty());
}
TEST(Transaction, MoveAssign)
{
auto a = ObjectStore::Transaction{};
a.nop();
ASSERT_FALSE(a.empty());
auto b = ObjectStore::Transaction{};
b = std::move(a); // move-assign to b
ASSERT_TRUE(a.empty());
ASSERT_FALSE(b.empty());
}
TEST(Transaction, CopyConstruct)
{
auto a = ObjectStore::Transaction{};
a.nop();
ASSERT_FALSE(a.empty());
auto b = a; // copy-construct in b
ASSERT_FALSE(a.empty());
ASSERT_FALSE(b.empty());
}
TEST(Transaction, CopyAssign)
{
auto a = ObjectStore::Transaction{};
a.nop();
ASSERT_FALSE(a.empty());
auto b = ObjectStore::Transaction{};
b = a; // copy-assign to b
ASSERT_FALSE(a.empty());
ASSERT_FALSE(b.empty());
}
TEST(Transaction, Swap)
{
auto a = ObjectStore::Transaction{};
a.nop();
ASSERT_FALSE(a.empty());
auto b = ObjectStore::Transaction{};
std::swap(a, b); // swap a and b
ASSERT_TRUE(a.empty());
ASSERT_FALSE(b.empty());
}
ObjectStore::Transaction generate_transaction()
{
auto a = ObjectStore::Transaction{};
a.nop();
coll_t cid;
object_t obj("test_name");
snapid_t snap(0);
hobject_t hoid(obj, "key", snap, 0, 0, "nspace");
ghobject_t oid(hoid);
coll_t acid;
object_t aobj("another_test_name");
snapid_t asnap(0);
hobject_t ahoid(obj, "another_key", snap, 0, 0, "another_nspace");
ghobject_t aoid(hoid);
std::set<string> keys;
keys.insert("any_1");
keys.insert("any_2");
keys.insert("any_3");
bufferlist bl;
bl.append_zero(4096);
a.write(cid, oid, 1, 4096, bl, 0);
a.omap_setkeys(acid, aoid, bl);
a.omap_rmkeys(cid, aoid, keys);
a.touch(acid, oid);
return a;
}
TEST(Transaction, MoveRangesDelSrcObj)
{
auto t = ObjectStore::Transaction{};
t.nop();
coll_t c(spg_t(pg_t(1,2), shard_id_t::NO_SHARD));
ghobject_t o1(hobject_t("obj", "", 123, 456, -1, ""));
ghobject_t o2(hobject_t("obj2", "", 123, 456, -1, ""));
vector<std::pair<uint64_t, uint64_t>> move_info = {
make_pair(1, 5),
make_pair(10, 5)
};
t.touch(c, o1);
bufferlist bl;
bl.append("some data");
t.write(c, o1, 1, bl.length(), bl);
t.write(c, o1, 10, bl.length(), bl);
t.clone(c, o1, o2);
bl.append("some other data");
t.write(c, o2, 1, bl.length(), bl);
}
TEST(Transaction, GetNumBytes)
{
auto a = ObjectStore::Transaction{};
a.nop();
ASSERT_TRUE(a.get_encoded_bytes() == a.get_encoded_bytes_test());
coll_t cid;
object_t obj("test_name");
snapid_t snap(0);
hobject_t hoid(obj, "key", snap, 0, 0, "nspace");
ghobject_t oid(hoid);
coll_t acid;
object_t aobj("another_test_name");
snapid_t asnap(0);
hobject_t ahoid(obj, "another_key", snap, 0, 0, "another_nspace");
ghobject_t aoid(hoid);
std::set<string> keys;
keys.insert("any_1");
keys.insert("any_2");
keys.insert("any_3");
bufferlist bl;
bl.append_zero(4096);
a.write(cid, oid, 1, 4096, bl, 0);
ASSERT_TRUE(a.get_encoded_bytes() == a.get_encoded_bytes_test());
a.omap_setkeys(acid, aoid, bl);
ASSERT_TRUE(a.get_encoded_bytes() == a.get_encoded_bytes_test());
a.omap_rmkeys(cid, aoid, keys);
ASSERT_TRUE(a.get_encoded_bytes() == a.get_encoded_bytes_test());
a.touch(acid, oid);
ASSERT_TRUE(a.get_encoded_bytes() == a.get_encoded_bytes_test());
}
void bench_num_bytes(bool legacy)
{
const int max = 2500000;
auto a = generate_transaction();
if (legacy) {
cout << "get_encoded_bytes_test: ";
} else {
cout << "get_encoded_bytes: ";
}
utime_t start = ceph_clock_now();
if (legacy) {
for (int i = 0; i < max; ++i) {
a.get_encoded_bytes_test();
}
} else {
for (int i = 0; i < max; ++i) {
a.get_encoded_bytes();
}
}
utime_t end = ceph_clock_now();
cout << max << " encodes in " << (end - start) << std::endl;
}
TEST(Transaction, GetNumBytesBenchLegacy)
{
bench_num_bytes(true);
}
TEST(Transaction, GetNumBytesBenchCurrent)
{
bench_num_bytes(false);
}
| 4,692 | 20.726852 | 70 |
cc
|
null |
ceph-main/src/test/old/test_disk_bw.cc
|
#include <sys/time.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <string.h>
#include <stdlib.h>
#include <stdio.h>
#include <unistd.h>
#include <errno.h>
#include <sys/uio.h>
#include "common/Clock.h"
#include "common/safe_io.h"
#include <iostream>
using namespace std;
int main(int argc, char **argv)
{
void *buf;
int fd, count, loop = 0;
if (argc != 4) {
fprintf(stderr, "Usage: %s device bsize count\n", argv[0]);
exit (0);
}
int bsize = atoi(argv[2]);
count = atoi(argv[3]);
posix_memalign(&buf, sysconf(_SC_PAGESIZE), bsize);
//if ((fd = open(argv[1], O_SYNC|O_RDWR)) < 0) {
if ((fd = open(argv[1], O_DIRECT|O_RDWR)) < 0) {
fprintf(stderr, "Can't open device %s\n", argv[1]);
exit (4);
}
utime_t start = ceph_clock_now();
while (loop++ < count) {
int ret = safe_write(fd, buf, bsize);
if (ret)
ceph_abort();
//if ((loop % 100) == 0)
//fprintf(stderr, ".");
}
::fsync(fd);
::close(fd);
utime_t end = ceph_clock_now();
end -= start;
char hostname[80];
gethostname(hostname, 80);
double mb = bsize*count/1024/1024;
cout << hostname << "\t" << mb << " MB\t" << end << " seconds\t" << (mb / (double)end) << " MB/sec" << std::endl;
}
| 1,280 | 19.333333 | 115 |
cc
|
null |
ceph-main/src/test/old/testfilepath.cc
|
#include "include/filepath.h"
#include <iostream>
using namespace std;
int print(const string &s) {
filepath fp = s;
cout << "s = " << s << " filepath = " << fp << endl;
cout << " depth " << fp.depth() << endl;
for (int i=0; i<fp.depth(); i++) {
cout << "\t" << i << " " << fp[i] << endl;
}
}
int main() {
filepath p;
print("/home/sage");
print("a/b/c");
print("/a/b/c");
print("/a/b/c/");
print("/a/b/../d");
}
| 444 | 18.347826 | 56 |
cc
|
null |
ceph-main/src/test/opensuse-13.2/install-deps.sh
|
../../../install-deps.sh
| 24 | 24 | 24 |
sh
|
null |
ceph-main/src/test/osd/Object.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
#include "include/interval_set.h"
#include "include/buffer.h"
#include <list>
#include <map>
#include <set>
#include <iostream>
#include "Object.h"
void ContDesc::encode(bufferlist &bl) const
{
ENCODE_START(1, 1, bl);
encode(objnum, bl);
encode(cursnap, bl);
encode(seqnum, bl);
encode(prefix, bl);
encode(oid, bl);
ENCODE_FINISH(bl);
}
void ContDesc::decode(bufferlist::const_iterator &bl)
{
DECODE_START(1, bl);
decode(objnum, bl);
decode(cursnap, bl);
decode(seqnum, bl);
decode(prefix, bl);
decode(oid, bl);
DECODE_FINISH(bl);
}
std::ostream &operator<<(std::ostream &out, const ContDesc &rhs)
{
return out << "(ObjNum " << rhs.objnum
<< " snap " << rhs.cursnap
<< " seq_num " << rhs.seqnum
<< ")";
}
void AppendGenerator::get_ranges_map(
const ContDesc &cont, std::map<uint64_t, uint64_t> &out) {
RandWrap rand(cont.seqnum);
uint64_t pos = off;
uint64_t limit = off + get_append_size(cont);
while (pos < limit) {
uint64_t segment_length = round_up(
rand() % (max_append_size - min_append_size),
alignment) + min_append_size;
ceph_assert(segment_length >= min_append_size);
if (segment_length + pos > limit) {
segment_length = limit - pos;
}
if (alignment)
ceph_assert(segment_length % alignment == 0);
out.insert(std::pair<uint64_t, uint64_t>(pos, segment_length));
pos += segment_length;
}
}
void VarLenGenerator::get_ranges_map(
const ContDesc &cont, std::map<uint64_t, uint64_t> &out) {
RandWrap rand(cont.seqnum);
uint64_t pos = 0;
uint64_t limit = get_length(cont);
bool include = false;
while (pos < limit) {
uint64_t segment_length = (rand() % (max_stride_size - min_stride_size)) + min_stride_size;
ceph_assert(segment_length < max_stride_size);
ceph_assert(segment_length >= min_stride_size);
if (segment_length + pos > limit) {
segment_length = limit - pos;
}
if (include) {
out.insert(std::pair<uint64_t, uint64_t>(pos, segment_length));
include = false;
} else {
include = true;
}
pos += segment_length;
}
}
void ObjectDesc::iterator::adjust_stack() {
while (!stack.empty() && pos >= stack.top().second.next) {
ceph_assert(pos == stack.top().second.next);
size = stack.top().second.size;
current = stack.top().first;
stack.pop();
}
if (stack.empty()) {
cur_valid_till = std::numeric_limits<uint64_t>::max();
} else {
cur_valid_till = stack.top().second.next;
}
while (current != layers.end() && !current->covers(pos)) {
uint64_t next = current->next(pos);
if (next < cur_valid_till) {
stack.emplace(current, StackState{next, size});
cur_valid_till = next;
}
++current;
}
if (current == layers.end()) {
size = 0;
} else {
current->iter.seek(pos);
size = std::min(size, current->get_size());
cur_valid_till = std::min(
current->valid_till(pos),
cur_valid_till);
}
}
const ContDesc &ObjectDesc::most_recent() {
return layers.begin()->second;
}
void ObjectDesc::update(ContentsGenerator *gen, const ContDesc &next) {
layers.push_front(std::pair<std::shared_ptr<ContentsGenerator>, ContDesc>(std::shared_ptr<ContentsGenerator>(gen), next));
return;
}
bool ObjectDesc::check(bufferlist &to_check) {
iterator objiter = begin();
uint64_t error_at = 0;
if (!objiter.check_bl_advance(to_check, &error_at)) {
std::cout << "incorrect buffer at pos " << error_at << std::endl;
return false;
}
uint64_t size = layers.begin()->first->get_length(layers.begin()->second);
if (to_check.length() < size) {
std::cout << "only read " << to_check.length()
<< " out of size " << size << std::endl;
return false;
}
return true;
}
bool ObjectDesc::check_sparse(const std::map<uint64_t, uint64_t>& extents,
bufferlist &to_check)
{
uint64_t off = 0;
uint64_t pos = 0;
auto objiter = begin();
for (auto &&extiter : extents) {
// verify hole
{
bufferlist bl;
bl.append_zero(extiter.first - pos);
uint64_t error_at = 0;
if (!objiter.check_bl_advance(bl, &error_at)) {
std::cout << "sparse read omitted non-zero data at "
<< error_at << std::endl;
return false;
}
}
ceph_assert(off <= to_check.length());
pos = extiter.first;
objiter.seek(pos);
{
bufferlist bl;
bl.substr_of(
to_check,
off,
std::min(to_check.length() - off, extiter.second));
uint64_t error_at = 0;
if (!objiter.check_bl_advance(bl, &error_at)) {
std::cout << "incorrect buffer at pos " << error_at << std::endl;
return false;
}
off += extiter.second;
pos += extiter.second;
}
if (pos < extiter.first + extiter.second) {
std::cout << "reached end of iterator first" << std::endl;
return false;
}
}
// final hole
bufferlist bl;
uint64_t size = layers.begin()->first->get_length(layers.begin()->second);
bl.append_zero(size - pos);
uint64_t error_at;
if (!objiter.check_bl_advance(bl, &error_at)) {
std::cout << "sparse read omitted non-zero data at "
<< error_at << std::endl;
return false;
}
return true;
}
| 5,268 | 25.21393 | 124 |
cc
|
null |
ceph-main/src/test/osd/Object.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
#include "include/interval_set.h"
#include "include/buffer.h"
#include "include/encoding.h"
#include <list>
#include <map>
#include <set>
#include <stack>
#include <random>
#ifndef OBJECT_H
#define OBJECT_H
/// describes an object
class ContDesc {
public:
int objnum;
int cursnap;
unsigned seqnum;
std::string prefix;
std::string oid;
ContDesc() :
objnum(0), cursnap(0),
seqnum(0), prefix("") {}
ContDesc(int objnum,
int cursnap,
unsigned seqnum,
const std::string &prefix) :
objnum(objnum), cursnap(cursnap),
seqnum(seqnum), prefix(prefix) {}
bool operator==(const ContDesc &rhs) {
return (rhs.objnum == objnum &&
rhs.cursnap == cursnap &&
rhs.seqnum == seqnum &&
rhs.prefix == prefix &&
rhs.oid == oid);
}
bool operator<(const ContDesc &rhs) const {
return seqnum < rhs.seqnum;
}
bool operator!=(const ContDesc &rhs) {
return !((*this) == rhs);
}
void encode(bufferlist &bl) const;
void decode(bufferlist::const_iterator &bp);
};
WRITE_CLASS_ENCODER(ContDesc)
std::ostream &operator<<(std::ostream &out, const ContDesc &rhs);
class ChunkDesc {
public:
uint32_t offset;
uint32_t length;
std::string oid;
};
class ContentsGenerator {
public:
class iterator_impl {
public:
virtual char operator*() = 0;
virtual iterator_impl &operator++() = 0;
virtual void seek(uint64_t pos) = 0;
virtual bool end() = 0;
virtual ContDesc get_cont() const = 0;
virtual uint64_t get_pos() const = 0;
virtual bufferlist gen_bl_advance(uint64_t s) {
bufferptr ret = buffer::create(s);
for (uint64_t i = 0; i < s; ++i, ++(*this)) {
ret[i] = **this;
}
bufferlist _ret;
_ret.push_back(ret);
return _ret;
}
/// walk through given @c bl
///
/// @param[out] off the offset of the first byte which does not match
/// @returns true if @c bl matches with the content, false otherwise
virtual bool check_bl_advance(bufferlist &bl, uint64_t *off = nullptr) {
uint64_t _off = 0;
for (bufferlist::iterator i = bl.begin();
!i.end();
++i, ++_off, ++(*this)) {
if (*i != **this) {
if (off)
*off = _off;
return false;
}
}
return true;
}
virtual ~iterator_impl() {};
};
class iterator {
public:
ContentsGenerator *parent;
iterator_impl *impl;
char operator *() { return **impl; }
iterator &operator++() { ++(*impl); return *this; };
void seek(uint64_t pos) { impl->seek(pos); }
bool end() { return impl->end(); }
~iterator() { parent->put_iterator_impl(impl); }
iterator(const iterator &rhs) : parent(rhs.parent) {
impl = parent->dup_iterator_impl(rhs.impl);
}
iterator &operator=(const iterator &rhs) {
iterator new_iter(rhs);
swap(new_iter);
return *this;
}
void swap(iterator &other) {
ContentsGenerator *otherparent = other.parent;
other.parent = parent;
parent = otherparent;
iterator_impl *otherimpl = other.impl;
other.impl = impl;
impl = otherimpl;
}
bufferlist gen_bl_advance(uint64_t s) {
return impl->gen_bl_advance(s);
}
bool check_bl_advance(bufferlist &bl, uint64_t *off = nullptr) {
return impl->check_bl_advance(bl, off);
}
iterator(ContentsGenerator *parent, iterator_impl *impl) :
parent(parent), impl(impl) {}
};
virtual uint64_t get_length(const ContDesc &in) = 0;
virtual void get_ranges_map(
const ContDesc &cont, std::map<uint64_t, uint64_t> &out) = 0;
void get_ranges(const ContDesc &cont, interval_set<uint64_t> &out) {
std::map<uint64_t, uint64_t> ranges;
get_ranges_map(cont, ranges);
for (std::map<uint64_t, uint64_t>::iterator i = ranges.begin();
i != ranges.end();
++i) {
out.insert(i->first, i->second);
}
}
virtual iterator_impl *get_iterator_impl(const ContDesc &in) = 0;
virtual iterator_impl *dup_iterator_impl(const iterator_impl *in) = 0;
virtual void put_iterator_impl(iterator_impl *in) = 0;
virtual ~ContentsGenerator() {};
iterator get_iterator(const ContDesc &in) {
return iterator(this, get_iterator_impl(in));
}
};
class RandGenerator : public ContentsGenerator {
public:
typedef std::minstd_rand0 RandWrap;
class iterator_impl : public ContentsGenerator::iterator_impl {
public:
uint64_t pos;
ContDesc cont;
RandWrap rand;
RandGenerator *cont_gen;
char current;
iterator_impl(const ContDesc &cont, RandGenerator *cont_gen) :
pos(0), cont(cont), rand(cont.seqnum), cont_gen(cont_gen) {
current = rand();
}
ContDesc get_cont() const override { return cont; }
uint64_t get_pos() const override { return pos; }
iterator_impl &operator++() override {
pos++;
current = rand();
return *this;
}
char operator*() override {
return current;
}
void seek(uint64_t _pos) override {
if (_pos < pos) {
iterator_impl begin = iterator_impl(cont, cont_gen);
begin.seek(_pos);
*this = begin;
}
while (pos < _pos) {
++(*this);
}
}
bool end() override {
return pos >= cont_gen->get_length(cont);
}
};
ContentsGenerator::iterator_impl *get_iterator_impl(const ContDesc &in) override {
RandGenerator::iterator_impl *i = new iterator_impl(in, this);
return i;
}
void put_iterator_impl(ContentsGenerator::iterator_impl *in) override {
delete in;
}
ContentsGenerator::iterator_impl *dup_iterator_impl(
const ContentsGenerator::iterator_impl *in) override {
ContentsGenerator::iterator_impl *retval = get_iterator_impl(in->get_cont());
retval->seek(in->get_pos());
return retval;
}
};
class VarLenGenerator : public RandGenerator {
uint64_t max_length;
uint64_t min_stride_size;
uint64_t max_stride_size;
public:
VarLenGenerator(
uint64_t length, uint64_t min_stride_size, uint64_t max_stride_size) :
max_length(length),
min_stride_size(min_stride_size),
max_stride_size(max_stride_size) {}
void get_ranges_map(
const ContDesc &cont, std::map<uint64_t, uint64_t> &out) override;
uint64_t get_length(const ContDesc &in) override {
RandWrap rand(in.seqnum);
if (max_length == 0)
return 0;
return (rand() % (max_length/2)) + ((max_length - 1)/2) + 1;
}
};
class AttrGenerator : public RandGenerator {
uint64_t max_len;
uint64_t big_max_len;
public:
AttrGenerator(uint64_t max_len, uint64_t big_max_len)
: max_len(max_len), big_max_len(big_max_len) {}
void get_ranges_map(
const ContDesc &cont, std::map<uint64_t, uint64_t> &out) override {
out.insert(std::pair<uint64_t, uint64_t>(0, get_length(cont)));
}
uint64_t get_length(const ContDesc &in) override {
RandWrap rand(in.seqnum);
// make some attrs big
if (in.seqnum & 3)
return (rand() % max_len);
else
return (rand() % big_max_len);
}
bufferlist gen_bl(const ContDesc &in) {
bufferlist bl;
for (iterator i = get_iterator(in); !i.end(); ++i) {
bl.append(*i);
}
ceph_assert(bl.length() < big_max_len);
return bl;
}
};
class AppendGenerator : public RandGenerator {
uint64_t off;
uint64_t alignment;
uint64_t min_append_size;
uint64_t max_append_size;
uint64_t max_append_total;
uint64_t round_up(uint64_t in, uint64_t by) {
if (by)
in += (by - (in % by));
return in;
}
public:
AppendGenerator(
uint64_t off,
uint64_t alignment,
uint64_t min_append_size,
uint64_t _max_append_size,
uint64_t max_append_multiple) :
off(off), alignment(alignment),
min_append_size(round_up(min_append_size, alignment)),
max_append_size(round_up(_max_append_size, alignment)) {
if (_max_append_size == min_append_size)
max_append_size += alignment;
max_append_total = max_append_multiple * max_append_size;
}
uint64_t get_append_size(const ContDesc &in) {
RandWrap rand(in.seqnum);
return round_up(rand() % max_append_total, alignment);
}
uint64_t get_length(const ContDesc &in) override {
return off + get_append_size(in);
}
void get_ranges_map(
const ContDesc &cont, std::map<uint64_t, uint64_t> &out) override;
};
class ObjectDesc {
public:
ObjectDesc()
: exists(false), dirty(false),
version(0), flushed(false) {}
ObjectDesc(const ContDesc &init, ContentsGenerator *cont_gen)
: exists(false), dirty(false),
version(0), flushed(false) {
layers.push_front(std::pair<std::shared_ptr<ContentsGenerator>, ContDesc>(std::shared_ptr<ContentsGenerator>(cont_gen), init));
}
class iterator {
public:
uint64_t pos;
uint64_t size;
uint64_t cur_valid_till;
class ContState {
interval_set<uint64_t> ranges;
const uint64_t size;
public:
ContDesc cont;
std::shared_ptr<ContentsGenerator> gen;
ContentsGenerator::iterator iter;
ContState(
const ContDesc &_cont,
std::shared_ptr<ContentsGenerator> _gen,
ContentsGenerator::iterator _iter)
: size(_gen->get_length(_cont)), cont(_cont), gen(_gen), iter(_iter) {
gen->get_ranges(cont, ranges);
}
const interval_set<uint64_t> &get_ranges() {
return ranges;
}
uint64_t get_size() {
return gen->get_length(cont);
}
bool covers(uint64_t pos) {
return ranges.contains(pos) || (!ranges.starts_after(pos) && pos >= size);
}
uint64_t next(uint64_t pos) {
ceph_assert(!covers(pos));
return ranges.starts_after(pos) ? ranges.start_after(pos) : size;
}
uint64_t valid_till(uint64_t pos) {
ceph_assert(covers(pos));
return ranges.contains(pos) ?
ranges.end_after(pos) :
std::numeric_limits<uint64_t>::max();
}
};
// from latest to earliest
using layers_t = std::vector<ContState>;
layers_t layers;
struct StackState {
const uint64_t next;
const uint64_t size;
};
std::stack<std::pair<layers_t::iterator, StackState> > stack;
layers_t::iterator current;
explicit iterator(ObjectDesc &obj) :
pos(0),
size(obj.layers.begin()->first->get_length(obj.layers.begin()->second)),
cur_valid_till(0) {
for (auto &&i : obj.layers) {
layers.push_back({i.second, i.first, i.first->get_iterator(i.second)});
}
current = layers.begin();
adjust_stack();
}
void adjust_stack();
iterator &operator++() {
ceph_assert(cur_valid_till >= pos);
++pos;
if (pos >= cur_valid_till) {
adjust_stack();
}
return *this;
}
char operator*() {
if (current == layers.end()) {
return '\0';
} else {
return pos >= size ? '\0' : *(current->iter);
}
}
bool end() {
return pos >= size;
}
// advance @c pos to given position
void seek(uint64_t _pos) {
if (_pos < pos) {
ceph_abort();
}
while (pos < _pos) {
ceph_assert(cur_valid_till >= pos);
uint64_t next = std::min(_pos - pos, cur_valid_till - pos);
pos += next;
if (pos >= cur_valid_till) {
ceph_assert(pos == cur_valid_till);
adjust_stack();
}
}
ceph_assert(pos == _pos);
}
// grab the bytes in the range of [pos, pos+s), and advance @c pos
//
// @returns the bytes in the specified range
bufferlist gen_bl_advance(uint64_t s) {
bufferlist ret;
while (s > 0) {
ceph_assert(cur_valid_till >= pos);
uint64_t next = std::min(s, cur_valid_till - pos);
if (current != layers.end() && pos < size) {
ret.append(current->iter.gen_bl_advance(next));
} else {
ret.append_zero(next);
}
pos += next;
ceph_assert(next <= s);
s -= next;
if (pos >= cur_valid_till) {
ceph_assert(cur_valid_till == pos);
adjust_stack();
}
}
return ret;
}
// compare the range of [pos, pos+bl.length()) with given @c bl, and
// advance @pos if all bytes in the range match
//
// @param error_at the offset of the first byte which does not match
// @returns true if all bytes match, false otherwise
bool check_bl_advance(bufferlist &bl, uint64_t *error_at = nullptr) {
uint64_t off = 0;
while (off < bl.length()) {
ceph_assert(cur_valid_till >= pos);
uint64_t next = std::min(bl.length() - off, cur_valid_till - pos);
bufferlist to_check;
to_check.substr_of(bl, off, next);
if (current != layers.end() && pos < size) {
if (!current->iter.check_bl_advance(to_check, error_at)) {
if (error_at)
*error_at += off;
return false;
}
} else {
uint64_t at = pos;
for (auto i = to_check.begin(); !i.end(); ++i, ++at) {
if (*i) {
if (error_at)
*error_at = at;
return false;
}
}
}
pos += next;
off += next;
ceph_assert(off <= bl.length());
if (pos >= cur_valid_till) {
ceph_assert(cur_valid_till == pos);
adjust_stack();
}
}
ceph_assert(off == bl.length());
return true;
}
};
iterator begin() {
return iterator(*this);
}
bool deleted() {
return !exists;
}
bool has_contents() {
return layers.size();
}
// takes ownership of gen
void update(ContentsGenerator *gen, const ContDesc &next);
bool check(bufferlist &to_check);
bool check_sparse(const std::map<uint64_t, uint64_t>& extends,
bufferlist &to_check);
const ContDesc &most_recent();
ContentsGenerator *most_recent_gen() {
return layers.begin()->first.get();
}
std::map<std::string, ContDesc> attrs; // Both omap and xattrs
bufferlist header;
bool exists;
bool dirty;
uint64_t version;
std::string redirect_target;
std::map<uint64_t, ChunkDesc> chunk_info;
bool flushed;
private:
std::list<std::pair<std::shared_ptr<ContentsGenerator>, ContDesc> > layers;
};
#endif
| 13,817 | 24.54159 | 131 |
h
|
null |
ceph-main/src/test/osd/RadosModel.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "include/interval_set.h"
#include "include/buffer.h"
#include <list>
#include <map>
#include <set>
#include "include/rados/librados.h"
#include "RadosModel.h"
#include "TestOpStat.h"
void TestOp::begin()
{
_begin();
}
void TestOp::finish(TestOp::CallbackInfo *info)
{
_finish(info);
}
void read_callback(librados::completion_t comp, void *arg) {
TestOp* op = static_cast<TestOp*>(arg);
op->finish(NULL);
}
void write_callback(librados::completion_t comp, void *arg) {
std::pair<TestOp*, TestOp::CallbackInfo*> *args =
static_cast<std::pair<TestOp*, TestOp::CallbackInfo*> *>(arg);
TestOp* op = args->first;
TestOp::CallbackInfo *info = args->second;
op->finish(info);
delete args;
delete info;
}
| 833 | 21.540541 | 71 |
cc
|
null |
ceph-main/src/test/osd/RadosModel.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "include/int_types.h"
#include "common/ceph_mutex.h"
#include "include/rados/librados.hpp"
#include <iostream>
#include <iterator>
#include <sstream>
#include <map>
#include <set>
#include <list>
#include <string>
#include <string.h>
#include <stdlib.h>
#include <errno.h>
#include <time.h>
#include "Object.h"
#include "TestOpStat.h"
#include "test/librados/test.h"
#include "common/sharedptr_registry.hpp"
#include "common/errno.h"
#include "osd/HitSet.h"
#include "common/ceph_crypto.h"
#include "cls/cas/cls_cas_client.h"
#include "cls/cas/cls_cas_internal.h"
#ifndef RADOSMODEL_H
#define RADOSMODEL_H
class RadosTestContext;
class TestOpStat;
template <typename T>
typename T::iterator rand_choose(T &cont) {
if (std::empty(cont)) {
return std::end(cont);
}
return std::next(std::begin(cont), rand() % cont.size());
}
enum TestOpType {
TEST_OP_READ,
TEST_OP_WRITE,
TEST_OP_WRITE_EXCL,
TEST_OP_WRITESAME,
TEST_OP_DELETE,
TEST_OP_SNAP_CREATE,
TEST_OP_SNAP_REMOVE,
TEST_OP_ROLLBACK,
TEST_OP_SETATTR,
TEST_OP_RMATTR,
TEST_OP_WATCH,
TEST_OP_COPY_FROM,
TEST_OP_HIT_SET_LIST,
TEST_OP_UNDIRTY,
TEST_OP_IS_DIRTY,
TEST_OP_CACHE_FLUSH,
TEST_OP_CACHE_TRY_FLUSH,
TEST_OP_CACHE_EVICT,
TEST_OP_APPEND,
TEST_OP_APPEND_EXCL,
TEST_OP_SET_REDIRECT,
TEST_OP_UNSET_REDIRECT,
TEST_OP_CHUNK_READ,
TEST_OP_TIER_PROMOTE,
TEST_OP_TIER_FLUSH,
TEST_OP_SET_CHUNK,
TEST_OP_TIER_EVICT
};
class TestWatchContext : public librados::WatchCtx2 {
TestWatchContext(const TestWatchContext&);
public:
ceph::condition_variable cond;
uint64_t handle = 0;
bool waiting = false;
ceph::mutex lock = ceph::make_mutex("watch lock");
TestWatchContext() = default;
void handle_notify(uint64_t notify_id, uint64_t cookie,
uint64_t notifier_id,
bufferlist &bl) override {
std::lock_guard l{lock};
waiting = false;
cond.notify_all();
}
void handle_error(uint64_t cookie, int err) override {
std::lock_guard l{lock};
std::cout << "watch handle_error " << err << std::endl;
}
void start() {
std::lock_guard l{lock};
waiting = true;
}
void wait() {
std::unique_lock l{lock};
cond.wait(l, [this] { return !waiting; });
}
uint64_t &get_handle() {
return handle;
}
};
class TestOp {
public:
const int num;
RadosTestContext *context;
TestOpStat *stat;
bool done = false;
TestOp(int n, RadosTestContext *context,
TestOpStat *stat = 0)
: num(n),
context(context),
stat(stat)
{}
virtual ~TestOp() {};
/**
* This struct holds data to be passed by a callback
* to a TestOp::finish method.
*/
struct CallbackInfo {
uint64_t id;
explicit CallbackInfo(uint64_t id) : id(id) {}
virtual ~CallbackInfo() {};
};
virtual void _begin() = 0;
/**
* Called when the operation completes.
* This should be overridden by asynchronous operations.
*
* @param info information stored by a callback, or NULL -
* useful for multi-operation TestOps
*/
virtual void _finish(CallbackInfo *info)
{
return;
}
virtual std::string getType() = 0;
virtual bool finished()
{
return true;
}
void begin();
void finish(CallbackInfo *info);
virtual bool must_quiesce_other_ops() { return false; }
};
class TestOpGenerator {
public:
virtual ~TestOpGenerator() {};
virtual TestOp *next(RadosTestContext &context) = 0;
};
class RadosTestContext {
public:
ceph::mutex state_lock = ceph::make_mutex("Context Lock");
ceph::condition_variable wait_cond;
// snap => {oid => desc}
std::map<int, std::map<std::string,ObjectDesc> > pool_obj_cont;
std::set<std::string> oid_in_use;
std::set<std::string> oid_not_in_use;
std::set<std::string> oid_flushing;
std::set<std::string> oid_not_flushing;
std::set<std::string> oid_redirect_not_in_use;
std::set<std::string> oid_redirect_in_use;
std::set<std::string> oid_set_chunk_tgt_pool;
SharedPtrRegistry<int, int> snaps_in_use;
int current_snap;
std::string pool_name;
librados::IoCtx io_ctx;
librados::Rados rados;
int next_oid;
std::string prefix;
int errors;
int max_in_flight;
int seq_num;
std::map<int,uint64_t> snaps;
uint64_t seq;
const char *rados_id;
bool initialized;
std::map<std::string, TestWatchContext*> watches;
const uint64_t max_size;
const uint64_t min_stride_size;
const uint64_t max_stride_size;
AttrGenerator attr_gen;
const bool no_omap;
const bool no_sparse;
bool pool_snaps;
bool write_fadvise_dontneed;
std::string low_tier_pool_name;
librados::IoCtx low_tier_io_ctx;
int snapname_num;
std::map<std::string, std::string> redirect_objs;
bool enable_dedup;
std::string chunk_algo;
std::string chunk_size;
RadosTestContext(const std::string &pool_name,
int max_in_flight,
uint64_t max_size,
uint64_t min_stride_size,
uint64_t max_stride_size,
bool no_omap,
bool no_sparse,
bool pool_snaps,
bool write_fadvise_dontneed,
const std::string &low_tier_pool_name,
bool enable_dedup,
std::string chunk_algo,
std::string chunk_size,
const char *id = 0) :
pool_obj_cont(),
current_snap(0),
pool_name(pool_name),
next_oid(0),
errors(0),
max_in_flight(max_in_flight),
seq_num(0), seq(0),
rados_id(id), initialized(false),
max_size(max_size),
min_stride_size(min_stride_size), max_stride_size(max_stride_size),
attr_gen(2000, 20000),
no_omap(no_omap),
no_sparse(no_sparse),
pool_snaps(pool_snaps),
write_fadvise_dontneed(write_fadvise_dontneed),
low_tier_pool_name(low_tier_pool_name),
snapname_num(0),
enable_dedup(enable_dedup),
chunk_algo(chunk_algo),
chunk_size(chunk_size)
{
}
int init()
{
int r = rados.init(rados_id);
if (r < 0)
return r;
r = rados.conf_read_file(NULL);
if (r < 0)
return r;
r = rados.conf_parse_env(NULL);
if (r < 0)
return r;
r = rados.connect();
if (r < 0)
return r;
r = rados.ioctx_create(pool_name.c_str(), io_ctx);
if (r < 0) {
rados.shutdown();
return r;
}
if (!low_tier_pool_name.empty()) {
r = rados.ioctx_create(low_tier_pool_name.c_str(), low_tier_io_ctx);
if (r < 0) {
rados.shutdown();
return r;
}
}
bufferlist inbl;
r = rados.mon_command(
"{\"prefix\": \"osd pool set\", \"pool\": \"" + pool_name +
"\", \"var\": \"write_fadvise_dontneed\", \"val\": \"" + (write_fadvise_dontneed ? "true" : "false") + "\"}",
inbl, NULL, NULL);
if (r < 0) {
rados.shutdown();
return r;
}
if (enable_dedup) {
r = rados.mon_command(
"{\"prefix\": \"osd pool set\", \"pool\": \"" + pool_name +
"\", \"var\": \"fingerprint_algorithm\", \"val\": \"" + "sha256" + "\"}",
inbl, NULL, NULL);
if (r < 0) {
rados.shutdown();
return r;
}
r = rados.mon_command(
"{\"prefix\": \"osd pool set\", \"pool\": \"" + pool_name +
"\", \"var\": \"dedup_tier\", \"val\": \"" + low_tier_pool_name + "\"}",
inbl, NULL, NULL);
if (r < 0) {
rados.shutdown();
return r;
}
r = rados.mon_command(
"{\"prefix\": \"osd pool set\", \"pool\": \"" + pool_name +
"\", \"var\": \"dedup_chunk_algorithm\", \"val\": \"" + chunk_algo + "\"}",
inbl, NULL, NULL);
if (r < 0) {
rados.shutdown();
return r;
}
r = rados.mon_command(
"{\"prefix\": \"osd pool set\", \"pool\": \"" + pool_name +
"\", \"var\": \"dedup_cdc_chunk_size\", \"val\": \"" + chunk_size + "\"}",
inbl, NULL, NULL);
if (r < 0) {
rados.shutdown();
return r;
}
}
char hostname_cstr[100];
gethostname(hostname_cstr, 100);
std::stringstream hostpid;
hostpid << hostname_cstr << getpid() << "-";
prefix = hostpid.str();
ceph_assert(!initialized);
initialized = true;
return 0;
}
void shutdown()
{
if (initialized) {
rados.shutdown();
}
}
void loop(TestOpGenerator *gen)
{
ceph_assert(initialized);
std::list<TestOp*> inflight;
std::unique_lock state_locker{state_lock};
TestOp *next = gen->next(*this);
TestOp *waiting = NULL;
while (next || !inflight.empty()) {
if (next && next->must_quiesce_other_ops() && !inflight.empty()) {
waiting = next;
next = NULL; // Force to wait for inflight to drain
}
if (next) {
inflight.push_back(next);
}
state_lock.unlock();
if (next) {
(*inflight.rbegin())->begin();
}
state_lock.lock();
while (1) {
for (auto i = inflight.begin();
i != inflight.end();) {
if ((*i)->finished()) {
std::cout << (*i)->num << ": done (" << (inflight.size()-1) << " left)" << std::endl;
delete *i;
inflight.erase(i++);
} else {
++i;
}
}
if (inflight.size() >= (unsigned) max_in_flight || (!next && !inflight.empty())) {
std::cout << " waiting on " << inflight.size() << std::endl;
wait_cond.wait(state_locker);
} else {
break;
}
}
if (waiting) {
next = waiting;
waiting = NULL;
} else {
next = gen->next(*this);
}
}
}
void kick()
{
wait_cond.notify_all();
}
TestWatchContext *get_watch_context(const std::string &oid) {
return watches.count(oid) ? watches[oid] : 0;
}
TestWatchContext *watch(const std::string &oid) {
ceph_assert(!watches.count(oid));
return (watches[oid] = new TestWatchContext);
}
void unwatch(const std::string &oid) {
ceph_assert(watches.count(oid));
delete watches[oid];
watches.erase(oid);
}
ObjectDesc get_most_recent(const std::string &oid) {
ObjectDesc new_obj;
for (auto i = pool_obj_cont.rbegin();
i != pool_obj_cont.rend();
++i) {
std::map<std::string,ObjectDesc>::iterator j = i->second.find(oid);
if (j != i->second.end()) {
new_obj = j->second;
break;
}
}
return new_obj;
}
void rm_object_attrs(const std::string &oid, const std::set<std::string> &attrs)
{
ObjectDesc new_obj = get_most_recent(oid);
for (std::set<std::string>::const_iterator i = attrs.begin();
i != attrs.end();
++i) {
new_obj.attrs.erase(*i);
}
new_obj.dirty = true;
new_obj.flushed = false;
pool_obj_cont[current_snap].insert_or_assign(oid, new_obj);
}
void remove_object_header(const std::string &oid)
{
ObjectDesc new_obj = get_most_recent(oid);
new_obj.header = bufferlist();
new_obj.dirty = true;
new_obj.flushed = false;
pool_obj_cont[current_snap].insert_or_assign(oid, new_obj);
}
void update_object_header(const std::string &oid, const bufferlist &bl)
{
ObjectDesc new_obj = get_most_recent(oid);
new_obj.header = bl;
new_obj.exists = true;
new_obj.dirty = true;
new_obj.flushed = false;
pool_obj_cont[current_snap].insert_or_assign(oid, new_obj);
}
void update_object_attrs(const std::string &oid, const std::map<std::string, ContDesc> &attrs)
{
ObjectDesc new_obj = get_most_recent(oid);
for (auto i = attrs.cbegin();
i != attrs.cend();
++i) {
new_obj.attrs[i->first] = i->second;
}
new_obj.exists = true;
new_obj.dirty = true;
new_obj.flushed = false;
pool_obj_cont[current_snap].insert_or_assign(oid, new_obj);
}
void update_object(ContentsGenerator *cont_gen,
const std::string &oid, const ContDesc &contents)
{
ObjectDesc new_obj = get_most_recent(oid);
new_obj.exists = true;
new_obj.dirty = true;
new_obj.flushed = false;
new_obj.update(cont_gen,
contents);
pool_obj_cont[current_snap].insert_or_assign(oid, new_obj);
}
void update_object_full(const std::string &oid, const ObjectDesc &contents)
{
pool_obj_cont[current_snap].insert_or_assign(oid, contents);
pool_obj_cont[current_snap][oid].dirty = true;
}
void update_object_undirty(const std::string &oid)
{
ObjectDesc new_obj = get_most_recent(oid);
new_obj.dirty = false;
pool_obj_cont[current_snap].insert_or_assign(oid, new_obj);
}
void update_object_version(const std::string &oid, uint64_t version,
int snap = -1)
{
for (auto i = pool_obj_cont.rbegin();
i != pool_obj_cont.rend();
++i) {
if (snap != -1 && snap < i->first)
continue;
std::map<std::string,ObjectDesc>::iterator j = i->second.find(oid);
if (j != i->second.end()) {
if (version)
j->second.version = version;
std::cout << __func__ << " oid " << oid
<< " v " << version << " " << j->second.most_recent()
<< " " << (j->second.dirty ? "dirty" : "clean")
<< " " << (j->second.exists ? "exists" : "dne")
<< std::endl;
break;
}
}
}
void remove_object(const std::string &oid)
{
ceph_assert(!get_watch_context(oid));
ObjectDesc new_obj;
pool_obj_cont[current_snap].insert_or_assign(oid, new_obj);
}
bool find_object(const std::string &oid, ObjectDesc *contents, int snap = -1) const
{
for (auto i = pool_obj_cont.crbegin();
i != pool_obj_cont.crend();
++i) {
if (snap != -1 && snap < i->first) continue;
if (i->second.count(oid) != 0) {
*contents = i->second.find(oid)->second;
return true;
}
}
return false;
}
void update_object_redirect_target(const std::string &oid, const std::string &target)
{
redirect_objs[oid] = target;
}
void update_object_chunk_target(const std::string &oid, uint64_t offset, const ChunkDesc &info)
{
for (auto i = pool_obj_cont.crbegin();
i != pool_obj_cont.crend();
++i) {
if (i->second.count(oid) != 0) {
ObjectDesc obj_desc = i->second.find(oid)->second;
obj_desc.chunk_info[offset] = info;
update_object_full(oid, obj_desc);
return ;
}
}
return;
}
bool object_existed_at(const std::string &oid, int snap = -1) const
{
ObjectDesc contents;
bool found = find_object(oid, &contents, snap);
return found && contents.exists;
}
void remove_snap(int snap)
{
std::map<int, std::map<std::string,ObjectDesc> >::iterator next_iter = pool_obj_cont.find(snap);
ceph_assert(next_iter != pool_obj_cont.end());
std::map<int, std::map<std::string,ObjectDesc> >::iterator current_iter = next_iter++;
ceph_assert(current_iter != pool_obj_cont.end());
std::map<std::string,ObjectDesc> ¤t = current_iter->second;
std::map<std::string,ObjectDesc> &next = next_iter->second;
for (auto i = current.begin(); i != current.end(); ++i) {
if (next.count(i->first) == 0) {
next.insert(std::pair<std::string,ObjectDesc>(i->first, i->second));
}
}
pool_obj_cont.erase(current_iter);
snaps.erase(snap);
}
void add_snap(uint64_t snap)
{
snaps[current_snap] = snap;
current_snap++;
pool_obj_cont[current_snap];
seq = snap;
}
void roll_back(const std::string &oid, int snap)
{
ceph_assert(!get_watch_context(oid));
ObjectDesc contents;
find_object(oid, &contents, snap);
contents.dirty = true;
contents.flushed = false;
pool_obj_cont.rbegin()->second.insert_or_assign(oid, contents);
}
void update_object_tier_flushed(const std::string &oid, int snap)
{
for (auto i = pool_obj_cont.rbegin();
i != pool_obj_cont.rend();
++i) {
if (snap != -1 && snap < i->first)
continue;
std::map<std::string,ObjectDesc>::iterator j = i->second.find(oid);
if (j != i->second.end()) {
j->second.flushed = true;
break;
}
}
}
bool check_oldest_snap_flushed(const std::string &oid, int snap)
{
for (auto i = pool_obj_cont.rbegin();
i != pool_obj_cont.rend();
++i) {
if (snap != -1 && snap < i->first)
continue;
std::map<std::string,ObjectDesc>::iterator j = i->second.find(oid);
if (j != i->second.end() && !j->second.flushed) {
std::cout << __func__ << " oid " << oid
<< " v " << j->second.version << " " << j->second.most_recent()
<< " " << (j->second.flushed ? "flushed" : "unflushed")
<< " " << i->first << std::endl;
return false;
}
}
return true;
}
bool check_chunks_refcount(librados::IoCtx &chunk_pool_ctx, librados::IoCtx &manifest_pool_ctx)
{
librados::ObjectCursor shard_start;
librados::ObjectCursor shard_end;
librados::ObjectCursor begin;
librados::ObjectCursor end;
begin = chunk_pool_ctx.object_list_begin();
end = chunk_pool_ctx.object_list_end();
chunk_pool_ctx.object_list_slice(
begin,
end,
1,
1,
&shard_start,
&shard_end);
librados::ObjectCursor c(shard_start);
while(c < shard_end)
{
std::vector<librados::ObjectItem> result;
int r = chunk_pool_ctx.object_list(c, shard_end, 12, {}, &result, &c);
if (r < 0) {
std::cerr << "error object_list : " << cpp_strerror(r) << std::endl;
return false;
}
for (const auto & i : result) {
auto oid = i.oid;
chunk_refs_t refs;
{
bufferlist t;
r = chunk_pool_ctx.getxattr(oid, CHUNK_REFCOUNT_ATTR, t);
if (r < 0) {
continue;
}
auto p = t.cbegin();
decode(refs, p);
}
ceph_assert(refs.get_type() == chunk_refs_t::TYPE_BY_OBJECT);
chunk_refs_by_object_t *byo =
static_cast<chunk_refs_by_object_t*>(refs.r.get());
for (auto& pp : byo->by_object) {
int src_refcount = 0;
int dst_refcount = byo->by_object.count(pp);
for (int tries = 0; tries < 10; tries++) {
r = cls_cas_references_chunk(manifest_pool_ctx, pp.oid.name, oid);
if (r == -ENOENT || r == -ENOLINK) {
src_refcount = 0;
} else if (r == -EBUSY) {
sleep(10);
continue;
} else {
src_refcount = r;
}
break;
}
if (src_refcount > dst_refcount) {
std::cerr << " src_object " << pp
<< ": src_refcount " << src_refcount
<< ", dst_object " << oid
<< ": dst_refcount " << dst_refcount
<< std::endl;
return false;
}
}
}
}
return true;
}
};
void read_callback(librados::completion_t comp, void *arg);
void write_callback(librados::completion_t comp, void *arg);
/// remove random xattrs from given object, and optionally remove omap
/// entries if @c no_omap is not specified in context
class RemoveAttrsOp : public TestOp {
public:
std::string oid;
librados::ObjectWriteOperation op;
librados::AioCompletion *comp;
RemoveAttrsOp(int n, RadosTestContext *context,
const std::string &oid,
TestOpStat *stat)
: TestOp(n, context, stat), oid(oid), comp(NULL)
{}
void _begin() override
{
ContDesc cont;
std::set<std::string> to_remove;
{
std::lock_guard l{context->state_lock};
ObjectDesc obj;
if (!context->find_object(oid, &obj)) {
context->kick();
done = true;
return;
}
cont = ContDesc(context->seq_num, context->current_snap,
context->seq_num, "");
context->oid_in_use.insert(oid);
context->oid_not_in_use.erase(oid);
if (rand() % 30) {
ContentsGenerator::iterator iter = context->attr_gen.get_iterator(cont);
for (auto i = obj.attrs.begin();
i != obj.attrs.end();
++i, ++iter) {
if (!(*iter % 3)) {
to_remove.insert(i->first);
op.rmxattr(i->first.c_str());
}
}
if (to_remove.empty()) {
context->kick();
context->oid_in_use.erase(oid);
context->oid_not_in_use.insert(oid);
done = true;
return;
}
if (!context->no_omap) {
op.omap_rm_keys(to_remove);
}
} else {
if (!context->no_omap) {
op.omap_clear();
}
for (auto i = obj.attrs.begin();
i != obj.attrs.end();
++i) {
op.rmxattr(i->first.c_str());
to_remove.insert(i->first);
}
context->remove_object_header(oid);
}
context->rm_object_attrs(oid, to_remove);
}
std::pair<TestOp*, TestOp::CallbackInfo*> *cb_arg =
new std::pair<TestOp*, TestOp::CallbackInfo*>(this,
new TestOp::CallbackInfo(0));
comp = context->rados.aio_create_completion((void*) cb_arg,
&write_callback);
context->io_ctx.aio_operate(context->prefix+oid, comp, &op);
}
void _finish(CallbackInfo *info) override
{
std::lock_guard l{context->state_lock};
done = true;
context->update_object_version(oid, comp->get_version64());
context->oid_in_use.erase(oid);
context->oid_not_in_use.insert(oid);
context->kick();
}
bool finished() override
{
return done;
}
std::string getType() override
{
return "RemoveAttrsOp";
}
};
/// add random xattrs to given object, and optionally add omap
/// entries if @c no_omap is not specified in context
class SetAttrsOp : public TestOp {
public:
std::string oid;
librados::ObjectWriteOperation op;
librados::AioCompletion *comp;
SetAttrsOp(int n,
RadosTestContext *context,
const std::string &oid,
TestOpStat *stat)
: TestOp(n, context, stat),
oid(oid), comp(NULL)
{}
void _begin() override
{
ContDesc cont;
{
std::lock_guard l{context->state_lock};
cont = ContDesc(context->seq_num, context->current_snap,
context->seq_num, "");
context->oid_in_use.insert(oid);
context->oid_not_in_use.erase(oid);
}
std::map<std::string, bufferlist> omap_contents;
std::map<std::string, ContDesc> omap;
bufferlist header;
ContentsGenerator::iterator keygen = context->attr_gen.get_iterator(cont);
op.create(false);
while (!*keygen) ++keygen;
while (*keygen) {
if (*keygen != '_')
header.append(*keygen);
++keygen;
}
for (int i = 0; i < 20; ++i) {
std::string key;
while (!*keygen) ++keygen;
while (*keygen && key.size() < 40) {
key.push_back((*keygen % 20) + 'a');
++keygen;
}
ContDesc val(cont);
val.seqnum += (unsigned)(*keygen);
val.prefix = ("oid: " + oid);
omap[key] = val;
bufferlist val_buffer = context->attr_gen.gen_bl(val);
omap_contents[key] = val_buffer;
op.setxattr(key.c_str(), val_buffer);
}
if (!context->no_omap) {
op.omap_set_header(header);
op.omap_set(omap_contents);
}
{
std::lock_guard l{context->state_lock};
context->update_object_header(oid, header);
context->update_object_attrs(oid, omap);
}
std::pair<TestOp*, TestOp::CallbackInfo*> *cb_arg =
new std::pair<TestOp*, TestOp::CallbackInfo*>(this,
new TestOp::CallbackInfo(0));
comp = context->rados.aio_create_completion((void*) cb_arg, &write_callback);
context->io_ctx.aio_operate(context->prefix+oid, comp, &op);
}
void _finish(CallbackInfo *info) override
{
std::lock_guard l{context->state_lock};
int r;
if ((r = comp->get_return_value())) {
std::cerr << "err " << r << std::endl;
ceph_abort();
}
done = true;
context->update_object_version(oid, comp->get_version64());
context->oid_in_use.erase(oid);
context->oid_not_in_use.insert(oid);
context->kick();
}
bool finished() override
{
return done;
}
std::string getType() override
{
return "SetAttrsOp";
}
};
class WriteOp : public TestOp {
public:
const std::string oid;
ContDesc cont;
std::set<librados::AioCompletion *> waiting;
librados::AioCompletion *rcompletion = nullptr;
// numbers of async ops submitted
uint64_t waiting_on = 0;
uint64_t last_acked_tid = 0;
librados::ObjectReadOperation read_op;
librados::ObjectWriteOperation write_op;
bufferlist rbuffer;
const bool do_append;
const bool do_excl;
WriteOp(int n,
RadosTestContext *context,
const std::string &oid,
bool do_append,
bool do_excl,
TestOpStat *stat = 0)
: TestOp(n, context, stat),
oid(oid),
do_append(do_append),
do_excl(do_excl)
{}
void _begin() override
{
assert(!done);
std::stringstream acc;
std::lock_guard state_locker{context->state_lock};
acc << context->prefix << "OID: " << oid << " snap " << context->current_snap << std::endl;
std::string prefix = acc.str();
cont = ContDesc(context->seq_num, context->current_snap, context->seq_num, prefix);
ContentsGenerator *cont_gen;
if (do_append) {
ObjectDesc old_value;
bool found = context->find_object(oid, &old_value);
uint64_t prev_length = found && old_value.has_contents() ?
old_value.most_recent_gen()->get_length(old_value.most_recent()) :
0;
bool requires_alignment;
int r = context->io_ctx.pool_requires_alignment2(&requires_alignment);
ceph_assert(r == 0);
uint64_t alignment = 0;
if (requires_alignment) {
r = context->io_ctx.pool_required_alignment2(&alignment);
ceph_assert(r == 0);
ceph_assert(alignment != 0);
}
cont_gen = new AppendGenerator(
prev_length,
alignment,
context->min_stride_size,
context->max_stride_size,
3);
} else {
cont_gen = new VarLenGenerator(
context->max_size, context->min_stride_size, context->max_stride_size);
}
context->update_object(cont_gen, oid, cont);
context->oid_in_use.insert(oid);
context->oid_not_in_use.erase(oid);
std::map<uint64_t, uint64_t> ranges;
cont_gen->get_ranges_map(cont, ranges);
std::cout << num << ": seq_num " << context->seq_num << " ranges " << ranges << std::endl;
context->seq_num++;
waiting_on = ranges.size();
ContentsGenerator::iterator gen_pos = cont_gen->get_iterator(cont);
// assure that tid is greater than last_acked_tid
uint64_t tid = last_acked_tid + 1;
for (auto [offset, len] : ranges) {
gen_pos.seek(offset);
bufferlist to_write = gen_pos.gen_bl_advance(len);
ceph_assert(to_write.length() == len);
ceph_assert(to_write.length() > 0);
std::cout << num << ": writing " << context->prefix+oid
<< " from " << offset
<< " to " << len + offset << " tid " << tid << std::endl;
auto cb_arg =
new std::pair<TestOp*, TestOp::CallbackInfo*>(this,
new TestOp::CallbackInfo(tid++));
librados::AioCompletion *completion =
context->rados.aio_create_completion((void*) cb_arg, &write_callback);
waiting.insert(completion);
librados::ObjectWriteOperation op;
if (do_append) {
op.append(to_write);
} else {
op.write(offset, to_write);
}
if (do_excl && cb_arg->second->id == last_acked_tid + 1)
op.assert_exists();
context->io_ctx.aio_operate(
context->prefix+oid, completion,
&op);
}
bufferlist contbl;
encode(cont, contbl);
std::pair<TestOp*, TestOp::CallbackInfo*> *cb_arg =
new std::pair<TestOp*, TestOp::CallbackInfo*>(
this,
new TestOp::CallbackInfo(tid++));
librados::AioCompletion *completion = context->rados.aio_create_completion(
(void*) cb_arg, &write_callback);
waiting.insert(completion);
waiting_on++;
write_op.setxattr("_header", contbl);
if (!do_append) {
write_op.truncate(cont_gen->get_length(cont));
}
context->io_ctx.aio_operate(
context->prefix+oid, completion, &write_op);
cb_arg =
new std::pair<TestOp*, TestOp::CallbackInfo*>(
this,
new TestOp::CallbackInfo(tid++));
rcompletion = context->rados.aio_create_completion(
(void*) cb_arg, &write_callback);
waiting_on++;
read_op.read(0, 1, &rbuffer, 0);
context->io_ctx.aio_operate(
context->prefix+oid, rcompletion,
&read_op,
librados::OPERATION_ORDER_READS_WRITES, // order wrt previous write/update
0);
}
void _finish(CallbackInfo *info) override
{
ceph_assert(info);
std::lock_guard state_locker{context->state_lock};
uint64_t tid = info->id;
std::cout << num << ": finishing write tid " << tid << " to " << context->prefix + oid << std::endl;
if (tid <= last_acked_tid) {
std::cerr << "Error: finished tid " << tid
<< " when last_acked_tid was " << last_acked_tid << std::endl;
ceph_abort();
}
last_acked_tid = tid;
ceph_assert(!done);
waiting_on--;
if (waiting_on == 0) {
uint64_t version = 0;
for (auto i = waiting.begin(); i != waiting.end();) {
ceph_assert((*i)->is_complete());
if (int err = (*i)->get_return_value()) {
std::cerr << "Error: oid " << oid << " write returned error code "
<< err << std::endl;
ceph_abort();
}
if ((*i)->get_version64() > version) {
std::cout << num << ": oid " << oid << " updating version " << version
<< " to " << (*i)->get_version64() << std::endl;
version = (*i)->get_version64();
} else {
std::cout << num << ": oid " << oid << " version " << version
<< " is already newer than " << (*i)->get_version64() << std::endl;
}
(*i)->release();
waiting.erase(i++);
}
context->update_object_version(oid, version);
ceph_assert(rcompletion->is_complete());
int r = rcompletion->get_return_value();
assertf(r >= 0, "r = %d", r);
if (rcompletion->get_version64() != version) {
std::cerr << "Error: racing read on " << oid << " returned version "
<< rcompletion->get_version64() << " rather than version "
<< version << std::endl;
ceph_abort_msg("racing read got wrong version");
}
rcompletion->release();
{
ObjectDesc old_value;
ceph_assert(context->find_object(oid, &old_value, -1));
if (old_value.deleted())
std::cout << num << ": left oid " << oid << " deleted" << std::endl;
else
std::cout << num << ": left oid " << oid << " "
<< old_value.most_recent() << std::endl;
}
context->oid_in_use.erase(oid);
context->oid_not_in_use.insert(oid);
context->kick();
done = true;
}
}
bool finished() override
{
return done;
}
std::string getType() override
{
return "WriteOp";
}
};
class WriteSameOp : public TestOp {
public:
std::string oid;
ContDesc cont;
std::set<librados::AioCompletion *> waiting;
librados::AioCompletion *rcompletion;
uint64_t waiting_on;
uint64_t last_acked_tid;
librados::ObjectReadOperation read_op;
librados::ObjectWriteOperation write_op;
bufferlist rbuffer;
WriteSameOp(int n,
RadosTestContext *context,
const std::string &oid,
TestOpStat *stat = 0)
: TestOp(n, context, stat),
oid(oid), rcompletion(NULL), waiting_on(0),
last_acked_tid(0)
{}
void _begin() override
{
std::lock_guard state_locker{context->state_lock};
done = 0;
std::stringstream acc;
acc << context->prefix << "OID: " << oid << " snap " << context->current_snap << std::endl;
std::string prefix = acc.str();
cont = ContDesc(context->seq_num, context->current_snap, context->seq_num, prefix);
ContentsGenerator *cont_gen;
cont_gen = new VarLenGenerator(
context->max_size, context->min_stride_size, context->max_stride_size);
context->update_object(cont_gen, oid, cont);
context->oid_in_use.insert(oid);
context->oid_not_in_use.erase(oid);
std::map<uint64_t, uint64_t> ranges;
cont_gen->get_ranges_map(cont, ranges);
std::cout << num << ": seq_num " << context->seq_num << " ranges " << ranges << std::endl;
context->seq_num++;
waiting_on = ranges.size();
ContentsGenerator::iterator gen_pos = cont_gen->get_iterator(cont);
// assure that tid is greater than last_acked_tid
uint64_t tid = last_acked_tid + 1;
for (auto [offset, len] : ranges) {
gen_pos.seek(offset);
bufferlist to_write = gen_pos.gen_bl_advance(len);
ceph_assert(to_write.length() == len);
ceph_assert(to_write.length() > 0);
std::cout << num << ": writing " << context->prefix+oid
<< " from " << offset
<< " to " << offset + len << " tid " << tid << std::endl;
auto cb_arg =
new std::pair<TestOp*, TestOp::CallbackInfo*>(this,
new TestOp::CallbackInfo(tid++));
librados::AioCompletion *completion =
context->rados.aio_create_completion((void*) cb_arg,
&write_callback);
waiting.insert(completion);
librados::ObjectWriteOperation op;
/* no writesame multiplication factor for now */
op.writesame(offset, to_write.length(), to_write);
context->io_ctx.aio_operate(
context->prefix+oid, completion,
&op);
}
bufferlist contbl;
encode(cont, contbl);
std::pair<TestOp*, TestOp::CallbackInfo*> *cb_arg =
new std::pair<TestOp*, TestOp::CallbackInfo*>(
this,
new TestOp::CallbackInfo(tid++));
librados::AioCompletion *completion = context->rados.aio_create_completion(
(void*) cb_arg, &write_callback);
waiting.insert(completion);
waiting_on++;
write_op.setxattr("_header", contbl);
write_op.truncate(cont_gen->get_length(cont));
context->io_ctx.aio_operate(
context->prefix+oid, completion, &write_op);
cb_arg =
new std::pair<TestOp*, TestOp::CallbackInfo*>(
this,
new TestOp::CallbackInfo(tid++));
rcompletion = context->rados.aio_create_completion(
(void*) cb_arg, &write_callback);
waiting_on++;
read_op.read(0, 1, &rbuffer, 0);
context->io_ctx.aio_operate(
context->prefix+oid, rcompletion,
&read_op,
librados::OPERATION_ORDER_READS_WRITES, // order wrt previous write/update
0);
}
void _finish(CallbackInfo *info) override
{
ceph_assert(info);
std::lock_guard state_locker{context->state_lock};
uint64_t tid = info->id;
std::cout << num << ": finishing writesame tid " << tid << " to " << context->prefix + oid << std::endl;
if (tid <= last_acked_tid) {
std::cerr << "Error: finished tid " << tid
<< " when last_acked_tid was " << last_acked_tid << std::endl;
ceph_abort();
}
last_acked_tid = tid;
ceph_assert(!done);
waiting_on--;
if (waiting_on == 0) {
uint64_t version = 0;
for (auto i = waiting.begin(); i != waiting.end();) {
ceph_assert((*i)->is_complete());
if (int err = (*i)->get_return_value()) {
std::cerr << "Error: oid " << oid << " writesame returned error code "
<< err << std::endl;
ceph_abort();
}
if ((*i)->get_version64() > version) {
std::cout << "oid " << oid << "updating version " << version
<< "to " << (*i)->get_version64() << std::endl;
version = (*i)->get_version64();
} else {
std::cout << "oid " << oid << "version " << version
<< "is already newer than " << (*i)->get_version64() << std::endl;
}
(*i)->release();
waiting.erase(i++);
}
context->update_object_version(oid, version);
ceph_assert(rcompletion->is_complete());
int r = rcompletion->get_return_value();
assertf(r >= 0, "r = %d", r);
if (rcompletion->get_version64() != version) {
std::cerr << "Error: racing read on " << oid << " returned version "
<< rcompletion->get_version64() << " rather than version "
<< version << std::endl;
ceph_abort_msg("racing read got wrong version");
}
rcompletion->release();
{
ObjectDesc old_value;
ceph_assert(context->find_object(oid, &old_value, -1));
if (old_value.deleted())
std::cout << num << ": left oid " << oid << " deleted" << std::endl;
else
std::cout << num << ": left oid " << oid << " "
<< old_value.most_recent() << std::endl;
}
context->oid_in_use.erase(oid);
context->oid_not_in_use.insert(oid);
context->kick();
done = true;
}
}
bool finished() override
{
return done;
}
std::string getType() override
{
return "WriteSameOp";
}
};
class DeleteOp : public TestOp {
public:
std::string oid;
DeleteOp(int n,
RadosTestContext *context,
const std::string &oid,
TestOpStat *stat = 0)
: TestOp(n, context, stat), oid(oid)
{}
void _begin() override
{
std::unique_lock state_locker{context->state_lock};
if (context->get_watch_context(oid)) {
context->kick();
return;
}
ObjectDesc contents;
context->find_object(oid, &contents);
bool present = !contents.deleted();
context->oid_in_use.insert(oid);
context->oid_not_in_use.erase(oid);
context->seq_num++;
context->remove_object(oid);
interval_set<uint64_t> ranges;
state_locker.unlock();
int r = 0;
if (rand() % 2) {
librados::ObjectWriteOperation op;
op.assert_exists();
op.remove();
r = context->io_ctx.operate(context->prefix+oid, &op);
} else {
r = context->io_ctx.remove(context->prefix+oid);
}
if (r && !(r == -ENOENT && !present)) {
std::cerr << "r is " << r << " while deleting " << oid << " and present is " << present << std::endl;
ceph_abort();
}
state_locker.lock();
context->oid_in_use.erase(oid);
context->oid_not_in_use.insert(oid);
context->kick();
}
std::string getType() override
{
return "DeleteOp";
}
};
class ReadOp : public TestOp {
public:
std::vector<librados::AioCompletion *> completions;
librados::ObjectReadOperation op;
std::string oid;
ObjectDesc old_value;
int snap;
bool balance_reads;
bool localize_reads;
std::shared_ptr<int> in_use;
std::vector<bufferlist> results;
std::vector<int> retvals;
std::vector<std::map<uint64_t, uint64_t>> extent_results;
std::vector<bool> is_sparse_read;
uint64_t waiting_on;
std::vector<bufferlist> checksums;
std::vector<int> checksum_retvals;
std::map<std::string, bufferlist> attrs;
int attrretval;
std::set<std::string> omap_requested_keys;
std::map<std::string, bufferlist> omap_returned_values;
std::set<std::string> omap_keys;
std::map<std::string, bufferlist> omap;
bufferlist header;
std::map<std::string, bufferlist> xattrs;
ReadOp(int n,
RadosTestContext *context,
const std::string &oid,
bool balance_reads,
bool localize_reads,
TestOpStat *stat = 0)
: TestOp(n, context, stat),
completions(3),
oid(oid),
snap(0),
balance_reads(balance_reads),
localize_reads(localize_reads),
results(3),
retvals(3),
extent_results(3),
is_sparse_read(3, false),
waiting_on(0),
checksums(3),
checksum_retvals(3),
attrretval(0)
{}
void _do_read(librados::ObjectReadOperation& read_op, int index) {
uint64_t len = 0;
if (old_value.has_contents())
len = old_value.most_recent_gen()->get_length(old_value.most_recent());
if (context->no_sparse || rand() % 2) {
is_sparse_read[index] = false;
read_op.read(0,
len,
&results[index],
&retvals[index]);
bufferlist init_value_bl;
encode(static_cast<uint32_t>(-1), init_value_bl);
read_op.checksum(LIBRADOS_CHECKSUM_TYPE_CRC32C, init_value_bl, 0, len,
0, &checksums[index], &checksum_retvals[index]);
} else {
is_sparse_read[index] = true;
read_op.sparse_read(0,
len,
&extent_results[index],
&results[index],
&retvals[index]);
}
}
void _begin() override
{
std::unique_lock state_locker{context->state_lock};
if (!(rand() % 4) && !context->snaps.empty()) {
snap = rand_choose(context->snaps)->first;
in_use = context->snaps_in_use.lookup_or_create(snap, snap);
} else {
snap = -1;
}
std::cout << num << ": read oid " << oid << " snap " << snap << std::endl;
done = 0;
for (uint32_t i = 0; i < 3; i++) {
completions[i] = context->rados.aio_create_completion((void *) this, &read_callback);
}
context->oid_in_use.insert(oid);
context->oid_not_in_use.erase(oid);
ceph_assert(context->find_object(oid, &old_value, snap));
if (old_value.deleted())
std::cout << num << ": expect deleted" << std::endl;
else
std::cout << num << ": expect " << old_value.most_recent() << std::endl;
TestWatchContext *ctx = context->get_watch_context(oid);
state_locker.unlock();
if (ctx) {
ceph_assert(old_value.exists);
TestAlarm alarm;
std::cerr << num << ": about to start" << std::endl;
ctx->start();
std::cerr << num << ": started" << std::endl;
bufferlist bl;
context->io_ctx.set_notify_timeout(600);
int r = context->io_ctx.notify2(context->prefix+oid, bl, 0, NULL);
if (r < 0) {
std::cerr << "r is " << r << std::endl;
ceph_abort();
}
std::cerr << num << ": notified, waiting" << std::endl;
ctx->wait();
}
state_locker.lock();
if (snap >= 0) {
context->io_ctx.snap_set_read(context->snaps[snap]);
}
_do_read(op, 0);
for (auto i = old_value.attrs.begin(); i != old_value.attrs.end(); ++i) {
if (rand() % 2) {
std::string key = i->first;
if (rand() % 2)
key.push_back((rand() % 26) + 'a');
omap_requested_keys.insert(key);
}
}
if (!context->no_omap) {
op.omap_get_vals_by_keys(omap_requested_keys, &omap_returned_values, 0);
// NOTE: we're ignore pmore here, which assumes the OSD limit is high
// enough for us.
op.omap_get_keys2("", -1, &omap_keys, nullptr, nullptr);
op.omap_get_vals2("", -1, &omap, nullptr, nullptr);
op.omap_get_header(&header, 0);
}
op.getxattrs(&xattrs, 0);
unsigned flags = 0;
if (balance_reads)
flags |= librados::OPERATION_BALANCE_READS;
if (localize_reads)
flags |= librados::OPERATION_LOCALIZE_READS;
ceph_assert(!context->io_ctx.aio_operate(context->prefix+oid, completions[0], &op,
flags, NULL));
waiting_on++;
// send 2 pipelined reads on the same object/snap. This can help testing
// OSD's read behavior in some scenarios
for (uint32_t i = 1; i < 3; ++i) {
librados::ObjectReadOperation pipeline_op;
_do_read(pipeline_op, i);
ceph_assert(!context->io_ctx.aio_operate(context->prefix+oid, completions[i], &pipeline_op, 0));
waiting_on++;
}
if (snap >= 0) {
context->io_ctx.snap_set_read(0);
}
}
void _finish(CallbackInfo *info) override
{
std::unique_lock state_locker{context->state_lock};
ceph_assert(!done);
ceph_assert(waiting_on > 0);
if (--waiting_on) {
return;
}
context->oid_in_use.erase(oid);
context->oid_not_in_use.insert(oid);
int retval = completions[0]->get_return_value();
for (auto it = completions.begin();
it != completions.end(); ++it) {
ceph_assert((*it)->is_complete());
uint64_t version = (*it)->get_version64();
int err = (*it)->get_return_value();
if (err != retval) {
std::cerr << num << ": Error: oid " << oid << " read returned different error codes: "
<< retval << " and " << err << std::endl;
ceph_abort();
}
if (err) {
if (!(err == -ENOENT && old_value.deleted())) {
std::cerr << num << ": Error: oid " << oid << " read returned error code "
<< err << std::endl;
ceph_abort();
}
} else if (version != old_value.version) {
std::cerr << num << ": oid " << oid << " version is " << version
<< " and expected " << old_value.version << std::endl;
ceph_assert(version == old_value.version);
}
}
if (!retval) {
std::map<std::string, bufferlist>::iterator iter = xattrs.find("_header");
bufferlist headerbl;
if (iter == xattrs.end()) {
if (old_value.has_contents()) {
std::cerr << num << ": Error: did not find header attr, has_contents: "
<< old_value.has_contents()
<< std::endl;
ceph_assert(!old_value.has_contents());
}
} else {
headerbl = iter->second;
xattrs.erase(iter);
}
if (old_value.deleted()) {
std::cout << num << ": expect deleted" << std::endl;
ceph_abort_msg("expected deleted");
} else {
std::cout << num << ": expect " << old_value.most_recent() << std::endl;
}
if (old_value.has_contents()) {
ContDesc to_check;
auto p = headerbl.cbegin();
decode(to_check, p);
if (to_check != old_value.most_recent()) {
std::cerr << num << ": oid " << oid << " found incorrect object contents " << to_check
<< ", expected " << old_value.most_recent() << std::endl;
context->errors++;
}
for (unsigned i = 0; i < results.size(); i++) {
if (is_sparse_read[i]) {
if (!old_value.check_sparse(extent_results[i], results[i])) {
std::cerr << num << ": oid " << oid << " contents " << to_check << " corrupt" << std::endl;
context->errors++;
}
} else {
if (!old_value.check(results[i])) {
std::cerr << num << ": oid " << oid << " contents " << to_check << " corrupt" << std::endl;
context->errors++;
}
uint32_t checksum = 0;
if (checksum_retvals[i] == 0) {
try {
auto bl_it = checksums[i].cbegin();
uint32_t csum_count;
decode(csum_count, bl_it);
decode(checksum, bl_it);
} catch (const buffer::error &err) {
checksum_retvals[i] = -EBADMSG;
}
}
if (checksum_retvals[i] != 0 || checksum != results[i].crc32c(-1)) {
std::cerr << num << ": oid " << oid << " checksum " << checksums[i]
<< " incorrect, expecting " << results[i].crc32c(-1)
<< std::endl;
context->errors++;
}
}
}
if (context->errors) ceph_abort();
}
// Attributes
if (!context->no_omap) {
if (!(old_value.header == header)) {
std::cerr << num << ": oid " << oid << " header does not match, old size: "
<< old_value.header.length() << " new size " << header.length()
<< std::endl;
ceph_assert(old_value.header == header);
}
if (omap.size() != old_value.attrs.size()) {
std::cerr << num << ": oid " << oid << " omap.size() is " << omap.size()
<< " and old is " << old_value.attrs.size() << std::endl;
ceph_assert(omap.size() == old_value.attrs.size());
}
if (omap_keys.size() != old_value.attrs.size()) {
std::cerr << num << ": oid " << oid << " omap.size() is " << omap_keys.size()
<< " and old is " << old_value.attrs.size() << std::endl;
ceph_assert(omap_keys.size() == old_value.attrs.size());
}
}
if (xattrs.size() != old_value.attrs.size()) {
std::cerr << num << ": oid " << oid << " xattrs.size() is " << xattrs.size()
<< " and old is " << old_value.attrs.size() << std::endl;
ceph_assert(xattrs.size() == old_value.attrs.size());
}
for (auto iter = old_value.attrs.begin();
iter != old_value.attrs.end();
++iter) {
bufferlist bl = context->attr_gen.gen_bl(
iter->second);
if (!context->no_omap) {
std::map<std::string, bufferlist>::iterator omap_iter = omap.find(iter->first);
ceph_assert(omap_iter != omap.end());
ceph_assert(bl.length() == omap_iter->second.length());
bufferlist::iterator k = bl.begin();
for(bufferlist::iterator l = omap_iter->second.begin();
!k.end() && !l.end();
++k, ++l) {
ceph_assert(*l == *k);
}
}
auto xattr_iter = xattrs.find(iter->first);
ceph_assert(xattr_iter != xattrs.end());
ceph_assert(bl.length() == xattr_iter->second.length());
bufferlist::iterator k = bl.begin();
for (bufferlist::iterator j = xattr_iter->second.begin();
!k.end() && !j.end();
++j, ++k) {
ceph_assert(*j == *k);
}
}
if (!context->no_omap) {
for (std::set<std::string>::iterator i = omap_requested_keys.begin();
i != omap_requested_keys.end();
++i) {
if (!omap_returned_values.count(*i))
ceph_assert(!old_value.attrs.count(*i));
if (!old_value.attrs.count(*i))
ceph_assert(!omap_returned_values.count(*i));
}
for (auto i = omap_returned_values.begin();
i != omap_returned_values.end();
++i) {
ceph_assert(omap_requested_keys.count(i->first));
ceph_assert(omap.count(i->first));
ceph_assert(old_value.attrs.count(i->first));
ceph_assert(i->second == omap[i->first]);
}
}
}
for (auto it = completions.begin(); it != completions.end(); ++it) {
(*it)->release();
}
context->kick();
done = true;
}
bool finished() override
{
return done;
}
std::string getType() override
{
return "ReadOp";
}
};
class SnapCreateOp : public TestOp {
public:
SnapCreateOp(int n,
RadosTestContext *context,
TestOpStat *stat = 0)
: TestOp(n, context, stat)
{}
void _begin() override
{
uint64_t snap;
std::string snapname;
if (context->pool_snaps) {
std::stringstream ss;
ss << context->prefix << "snap" << ++context->snapname_num;
snapname = ss.str();
int ret = context->io_ctx.snap_create(snapname.c_str());
if (ret) {
std::cerr << "snap_create returned " << ret << std::endl;
ceph_abort();
}
ceph_assert(!context->io_ctx.snap_lookup(snapname.c_str(), &snap));
} else {
ceph_assert(!context->io_ctx.selfmanaged_snap_create(&snap));
}
std::unique_lock state_locker{context->state_lock};
context->add_snap(snap);
if (!context->pool_snaps) {
std::vector<uint64_t> snapset(context->snaps.size());
int j = 0;
for (auto i = context->snaps.rbegin();
i != context->snaps.rend();
++i, ++j) {
snapset[j] = i->second;
}
state_locker.unlock();
int r = context->io_ctx.selfmanaged_snap_set_write_ctx(context->seq, snapset);
if (r) {
std::cerr << "r is " << r << " snapset is " << snapset << " seq is " << context->seq << std::endl;
ceph_abort();
}
}
}
std::string getType() override
{
return "SnapCreateOp";
}
bool must_quiesce_other_ops() override { return context->pool_snaps; }
};
class SnapRemoveOp : public TestOp {
public:
int to_remove;
SnapRemoveOp(int n, RadosTestContext *context,
int snap,
TestOpStat *stat = 0)
: TestOp(n, context, stat),
to_remove(snap)
{}
void _begin() override
{
std::unique_lock state_locker{context->state_lock};
uint64_t snap = context->snaps[to_remove];
context->remove_snap(to_remove);
if (context->pool_snaps) {
std::string snapname;
ceph_assert(!context->io_ctx.snap_get_name(snap, &snapname));
ceph_assert(!context->io_ctx.snap_remove(snapname.c_str()));
} else {
ceph_assert(!context->io_ctx.selfmanaged_snap_remove(snap));
std::vector<uint64_t> snapset(context->snaps.size());
int j = 0;
for (auto i = context->snaps.rbegin();
i != context->snaps.rend();
++i, ++j) {
snapset[j] = i->second;
}
int r = context->io_ctx.selfmanaged_snap_set_write_ctx(context->seq, snapset);
if (r) {
std::cerr << "r is " << r << " snapset is " << snapset << " seq is " << context->seq << std::endl;
ceph_abort();
}
}
}
std::string getType() override
{
return "SnapRemoveOp";
}
};
class WatchOp : public TestOp {
std::string oid;
public:
WatchOp(int n,
RadosTestContext *context,
const std::string &_oid,
TestOpStat *stat = 0)
: TestOp(n, context, stat),
oid(_oid)
{}
void _begin() override
{
std::unique_lock state_locker{context->state_lock};
ObjectDesc contents;
context->find_object(oid, &contents);
if (contents.deleted()) {
context->kick();
return;
}
context->oid_in_use.insert(oid);
context->oid_not_in_use.erase(oid);
TestWatchContext *ctx = context->get_watch_context(oid);
state_locker.unlock();
int r;
if (!ctx) {
{
std::lock_guard l{context->state_lock};
ctx = context->watch(oid);
}
r = context->io_ctx.watch2(context->prefix+oid,
&ctx->get_handle(),
ctx);
} else {
r = context->io_ctx.unwatch2(ctx->get_handle());
{
std::lock_guard l{context->state_lock};
context->unwatch(oid);
}
}
if (r) {
std::cerr << "r is " << r << std::endl;
ceph_abort();
}
{
std::lock_guard l{context->state_lock};
context->oid_in_use.erase(oid);
context->oid_not_in_use.insert(oid);
}
}
std::string getType() override
{
return "WatchOp";
}
};
class RollbackOp : public TestOp {
public:
std::string oid;
int roll_back_to;
librados::ObjectWriteOperation zero_write_op1;
librados::ObjectWriteOperation zero_write_op2;
librados::ObjectWriteOperation op;
std::vector<librados::AioCompletion *> comps;
std::shared_ptr<int> in_use;
int last_finished;
int outstanding;
RollbackOp(int n,
RadosTestContext *context,
const std::string &_oid,
TestOpStat *stat = 0)
: TestOp(n, context, stat),
oid(_oid), roll_back_to(-1),
comps(3, NULL),
last_finished(-1), outstanding(3)
{}
void _begin() override
{
context->state_lock.lock();
if (context->get_watch_context(oid)) {
context->kick();
context->state_lock.unlock();
return;
}
if (context->snaps.empty()) {
context->kick();
context->state_lock.unlock();
done = true;
return;
}
context->oid_in_use.insert(oid);
context->oid_not_in_use.erase(oid);
roll_back_to = rand_choose(context->snaps)->first;
in_use = context->snaps_in_use.lookup_or_create(
roll_back_to,
roll_back_to);
std::cout << "rollback oid " << oid << " to " << roll_back_to << std::endl;
bool existed_before = context->object_existed_at(oid);
bool existed_after = context->object_existed_at(oid, roll_back_to);
context->roll_back(oid, roll_back_to);
uint64_t snap = context->snaps[roll_back_to];
outstanding -= (!existed_before) + (!existed_after);
context->state_lock.unlock();
bufferlist bl, bl2;
zero_write_op1.append(bl);
zero_write_op2.append(bl2);
if (context->pool_snaps) {
op.snap_rollback(snap);
} else {
op.selfmanaged_snap_rollback(snap);
}
if (existed_before) {
std::pair<TestOp*, TestOp::CallbackInfo*> *cb_arg =
new std::pair<TestOp*, TestOp::CallbackInfo*>(this,
new TestOp::CallbackInfo(0));
comps[0] =
context->rados.aio_create_completion((void*) cb_arg,
&write_callback);
context->io_ctx.aio_operate(
context->prefix+oid, comps[0], &zero_write_op1);
}
{
std::pair<TestOp*, TestOp::CallbackInfo*> *cb_arg =
new std::pair<TestOp*, TestOp::CallbackInfo*>(this,
new TestOp::CallbackInfo(1));
comps[1] =
context->rados.aio_create_completion((void*) cb_arg,
&write_callback);
context->io_ctx.aio_operate(
context->prefix+oid, comps[1], &op);
}
if (existed_after) {
std::pair<TestOp*, TestOp::CallbackInfo*> *cb_arg =
new std::pair<TestOp*, TestOp::CallbackInfo*>(this,
new TestOp::CallbackInfo(2));
comps[2] =
context->rados.aio_create_completion((void*) cb_arg,
&write_callback);
context->io_ctx.aio_operate(
context->prefix+oid, comps[2], &zero_write_op2);
}
}
void _finish(CallbackInfo *info) override
{
std::lock_guard l{context->state_lock};
uint64_t tid = info->id;
std::cout << num << ": finishing rollback tid " << tid
<< " to " << context->prefix + oid << std::endl;
ceph_assert((int)(info->id) > last_finished);
last_finished = info->id;
int r;
if ((r = comps[last_finished]->get_return_value()) != 0) {
std::cerr << "err " << r << std::endl;
ceph_abort();
}
if (--outstanding == 0) {
done = true;
context->update_object_version(oid, comps[tid]->get_version64());
context->oid_in_use.erase(oid);
context->oid_not_in_use.insert(oid);
in_use = std::shared_ptr<int>();
context->kick();
}
}
bool finished() override
{
return done;
}
std::string getType() override
{
return "RollBackOp";
}
};
class CopyFromOp : public TestOp {
public:
std::string oid, oid_src;
ObjectDesc src_value;
librados::ObjectWriteOperation op;
librados::ObjectReadOperation rd_op;
librados::AioCompletion *comp;
librados::AioCompletion *comp_racing_read = nullptr;
std::shared_ptr<int> in_use;
int snap;
int done;
uint64_t version;
int r;
CopyFromOp(int n,
RadosTestContext *context,
const std::string &oid,
const std::string &oid_src,
TestOpStat *stat)
: TestOp(n, context, stat),
oid(oid), oid_src(oid_src),
comp(NULL), snap(-1), done(0),
version(0), r(0)
{}
void _begin() override
{
ContDesc cont;
{
std::lock_guard l{context->state_lock};
cont = ContDesc(context->seq_num, context->current_snap,
context->seq_num, "");
context->oid_in_use.insert(oid);
context->oid_not_in_use.erase(oid);
context->oid_in_use.insert(oid_src);
context->oid_not_in_use.erase(oid_src);
// choose source snap
if (0 && !(rand() % 4) && !context->snaps.empty()) {
snap = rand_choose(context->snaps)->first;
in_use = context->snaps_in_use.lookup_or_create(snap, snap);
} else {
snap = -1;
}
context->find_object(oid_src, &src_value, snap);
if (!src_value.deleted())
context->update_object_full(oid, src_value);
}
std::string src = context->prefix+oid_src;
op.copy_from(src.c_str(), context->io_ctx, src_value.version, 0);
std::pair<TestOp*, TestOp::CallbackInfo*> *cb_arg =
new std::pair<TestOp*, TestOp::CallbackInfo*>(this,
new TestOp::CallbackInfo(0));
comp = context->rados.aio_create_completion((void*) cb_arg,
&write_callback);
context->io_ctx.aio_operate(context->prefix+oid, comp, &op);
// queue up a racing read, too.
std::pair<TestOp*, TestOp::CallbackInfo*> *read_cb_arg =
new std::pair<TestOp*, TestOp::CallbackInfo*>(this,
new TestOp::CallbackInfo(1));
comp_racing_read = context->rados.aio_create_completion((void*) read_cb_arg, &write_callback);
rd_op.stat(NULL, NULL, NULL);
context->io_ctx.aio_operate(context->prefix+oid, comp_racing_read, &rd_op,
librados::OPERATION_ORDER_READS_WRITES, // order wrt previous write/update
NULL);
}
void _finish(CallbackInfo *info) override
{
std::lock_guard l{context->state_lock};
// note that the read can (and atm will) come back before the
// write reply, but will reflect the update and the versions will
// match.
if (info->id == 0) {
// copy_from
ceph_assert(comp->is_complete());
std::cout << num << ": finishing copy_from to " << context->prefix + oid << std::endl;
if ((r = comp->get_return_value())) {
if (r == -ENOENT && src_value.deleted()) {
std::cout << num << ": got expected ENOENT (src dne)" << std::endl;
} else {
std::cerr << "Error: oid " << oid << " copy_from " << oid_src << " returned error code "
<< r << std::endl;
ceph_abort();
}
} else {
ceph_assert(!version || comp->get_version64() == version);
version = comp->get_version64();
context->update_object_version(oid, comp->get_version64());
}
} else if (info->id == 1) {
// racing read
ceph_assert(comp_racing_read->is_complete());
std::cout << num << ": finishing copy_from racing read to " << context->prefix + oid << std::endl;
if ((r = comp_racing_read->get_return_value())) {
if (!(r == -ENOENT && src_value.deleted())) {
std::cerr << "Error: oid " << oid << " copy_from " << oid_src << " returned error code "
<< r << std::endl;
}
} else {
ceph_assert(comp_racing_read->get_return_value() == 0);
ceph_assert(!version || comp_racing_read->get_version64() == version);
version = comp_racing_read->get_version64();
}
}
if (++done == 2) {
context->oid_in_use.erase(oid);
context->oid_not_in_use.insert(oid);
context->oid_in_use.erase(oid_src);
context->oid_not_in_use.insert(oid_src);
context->kick();
}
}
bool finished() override
{
return done == 2;
}
std::string getType() override
{
return "CopyFromOp";
}
};
class ChunkReadOp : public TestOp {
public:
std::vector<librados::AioCompletion *> completions;
librados::ObjectReadOperation op;
std::string oid;
ObjectDesc old_value;
ObjectDesc tgt_value;
int snap;
bool balance_reads;
bool localize_reads;
std::shared_ptr<int> in_use;
std::vector<bufferlist> results;
std::vector<int> retvals;
std::vector<bool> is_sparse_read;
uint64_t waiting_on;
std::vector<bufferlist> checksums;
std::vector<int> checksum_retvals;
uint32_t offset = 0;
uint32_t length = 0;
std::string tgt_oid;
std::string tgt_pool_name;
uint32_t tgt_offset = 0;
ChunkReadOp(int n,
RadosTestContext *context,
const std::string &oid,
const std::string &tgt_pool_name,
bool balance_reads,
bool localize_reads,
TestOpStat *stat = 0)
: TestOp(n, context, stat),
completions(2),
oid(oid),
snap(0),
balance_reads(balance_reads),
localize_reads(localize_reads),
results(2),
retvals(2),
waiting_on(0),
checksums(2),
checksum_retvals(2),
tgt_pool_name(tgt_pool_name)
{}
void _do_read(librados::ObjectReadOperation& read_op, uint32_t offset, uint32_t length, int index) {
read_op.read(offset,
length,
&results[index],
&retvals[index]);
if (index != 0) {
bufferlist init_value_bl;
encode(static_cast<uint32_t>(-1), init_value_bl);
read_op.checksum(LIBRADOS_CHECKSUM_TYPE_CRC32C, init_value_bl, offset, length,
0, &checksums[index], &checksum_retvals[index]);
}
}
void _begin() override
{
context->state_lock.lock();
std::cout << num << ": chunk read oid " << oid << " snap " << snap << std::endl;
done = 0;
for (uint32_t i = 0; i < 2; i++) {
completions[i] = context->rados.aio_create_completion((void *) this, &read_callback);
}
context->find_object(oid, &old_value);
if (old_value.chunk_info.size() == 0) {
std::cout << ": no chunks" << std::endl;
context->kick();
context->state_lock.unlock();
done = true;
return;
}
context->oid_in_use.insert(oid);
context->oid_not_in_use.erase(oid);
if (old_value.deleted()) {
std::cout << num << ": expect deleted" << std::endl;
} else {
std::cout << num << ": expect " << old_value.most_recent() << std::endl;
}
int rand_index = rand() % old_value.chunk_info.size();
auto iter = old_value.chunk_info.begin();
for (int i = 0; i < rand_index; i++) {
iter++;
}
offset = iter->first;
offset += (rand() % iter->second.length)/2;
uint32_t t_length = rand() % iter->second.length;
while (t_length + offset > iter->first + iter->second.length) {
t_length = rand() % iter->second.length;
}
length = t_length;
tgt_offset = iter->second.offset + offset - iter->first;
tgt_oid = iter->second.oid;
std::cout << num << ": ori offset " << iter->first << " req offset " << offset
<< " ori length " << iter->second.length << " req length " << length
<< " ori tgt_offset " << iter->second.offset << " req tgt_offset " << tgt_offset
<< " tgt_oid " << tgt_oid << std::endl;
TestWatchContext *ctx = context->get_watch_context(oid);
context->state_lock.unlock();
if (ctx) {
ceph_assert(old_value.exists);
TestAlarm alarm;
std::cerr << num << ": about to start" << std::endl;
ctx->start();
std::cerr << num << ": started" << std::endl;
bufferlist bl;
context->io_ctx.set_notify_timeout(600);
int r = context->io_ctx.notify2(context->prefix+oid, bl, 0, NULL);
if (r < 0) {
std::cerr << "r is " << r << std::endl;
ceph_abort();
}
std::cerr << num << ": notified, waiting" << std::endl;
ctx->wait();
}
std::lock_guard state_locker{context->state_lock};
_do_read(op, offset, length, 0);
unsigned flags = 0;
if (balance_reads)
flags |= librados::OPERATION_BALANCE_READS;
if (localize_reads)
flags |= librados::OPERATION_LOCALIZE_READS;
ceph_assert(!context->io_ctx.aio_operate(context->prefix+oid, completions[0], &op,
flags, NULL));
waiting_on++;
_do_read(op, tgt_offset, length, 1);
ceph_assert(!context->io_ctx.aio_operate(context->prefix+tgt_oid, completions[1], &op,
flags, NULL));
waiting_on++;
}
void _finish(CallbackInfo *info) override
{
std::lock_guard l{context->state_lock};
ceph_assert(!done);
ceph_assert(waiting_on > 0);
if (--waiting_on) {
return;
}
context->oid_in_use.erase(oid);
context->oid_not_in_use.insert(oid);
int retval = completions[0]->get_return_value();
std::cout << ": finish!! ret: " << retval << std::endl;
context->find_object(tgt_oid, &tgt_value);
for (int i = 0; i < 2; i++) {
ceph_assert(completions[i]->is_complete());
int err = completions[i]->get_return_value();
if (err != retval) {
std::cerr << num << ": Error: oid " << oid << " read returned different error codes: "
<< retval << " and " << err << std::endl;
ceph_abort();
}
if (err) {
if (!(err == -ENOENT && old_value.deleted())) {
std::cerr << num << ": Error: oid " << oid << " read returned error code "
<< err << std::endl;
ceph_abort();
}
}
}
if (!retval) {
if (old_value.deleted()) {
std::cout << num << ": expect deleted" << std::endl;
ceph_abort_msg("expected deleted");
} else {
std::cout << num << ": expect " << old_value.most_recent() << std::endl;
}
if (tgt_value.has_contents()) {
uint32_t checksum[2] = {0};
if (checksum_retvals[1] == 0) {
try {
auto bl_it = checksums[1].cbegin();
uint32_t csum_count;
decode(csum_count, bl_it);
decode(checksum[1], bl_it);
} catch (const buffer::error &err) {
checksum_retvals[1] = -EBADMSG;
}
}
if (checksum_retvals[1] != 0) {
std::cerr << num << ": oid " << oid << " checksum retvals " << checksums[0]
<< " error " << std::endl;
context->errors++;
}
checksum[0] = results[0].crc32c(-1);
if (checksum[0] != checksum[1]) {
std::cerr << num << ": oid " << oid << " checksum src " << checksum[0]
<< " chunksum tgt " << checksum[1] << " incorrect, expecting "
<< results[0].crc32c(-1)
<< std::endl;
context->errors++;
}
if (context->errors) ceph_abort();
}
}
for (auto it = completions.begin(); it != completions.end(); ++it) {
(*it)->release();
}
context->kick();
done = true;
}
bool finished() override
{
return done;
}
std::string getType() override
{
return "ChunkReadOp";
}
};
class CopyOp : public TestOp {
public:
std::string oid, oid_src, tgt_pool_name;
librados::ObjectWriteOperation op;
librados::ObjectReadOperation rd_op;
librados::AioCompletion *comp;
ObjectDesc src_value, tgt_value;
int done;
int r;
CopyOp(int n,
RadosTestContext *context,
const std::string &oid_src,
const std::string &oid,
const std::string &tgt_pool_name,
TestOpStat *stat = 0)
: TestOp(n, context, stat),
oid(oid), oid_src(oid_src), tgt_pool_name(tgt_pool_name),
comp(NULL), done(0), r(0)
{}
void _begin() override
{
std::lock_guard l{context->state_lock};
context->oid_in_use.insert(oid_src);
context->oid_not_in_use.erase(oid_src);
std::string src = context->prefix+oid_src;
context->find_object(oid_src, &src_value);
op.copy_from(src.c_str(), context->io_ctx, src_value.version, 0);
std::cout << "copy op oid " << oid_src << " to " << oid << " tgt_pool_name " << tgt_pool_name << std::endl;
std::pair<TestOp*, TestOp::CallbackInfo*> *cb_arg =
new std::pair<TestOp*, TestOp::CallbackInfo*>(this,
new TestOp::CallbackInfo(0));
comp = context->rados.aio_create_completion((void*) cb_arg, &write_callback);
if (tgt_pool_name == context->low_tier_pool_name) {
context->low_tier_io_ctx.aio_operate(context->prefix+oid, comp, &op);
} else {
context->io_ctx.aio_operate(context->prefix+oid, comp, &op);
}
}
void _finish(CallbackInfo *info) override
{
std::lock_guard l{context->state_lock};
if (info->id == 0) {
ceph_assert(comp->is_complete());
std::cout << num << ": finishing copy op to oid " << oid << std::endl;
if ((r = comp->get_return_value())) {
std::cerr << "Error: oid " << oid << " write returned error code "
<< r << std::endl;
ceph_abort();
}
}
if (++done == 1) {
context->oid_in_use.erase(oid_src);
context->oid_not_in_use.insert(oid_src);
context->kick();
}
}
bool finished() override
{
return done == 1;
}
std::string getType() override
{
return "CopyOp";
}
};
class SetChunkOp : public TestOp {
public:
std::string oid, oid_tgt;
ObjectDesc src_value, tgt_value;
librados::ObjectReadOperation op;
librados::AioCompletion *comp;
int done;
int r;
uint64_t offset;
uint32_t length;
uint32_t tgt_offset;
int snap;
std::shared_ptr<int> in_use;
SetChunkOp(int n,
RadosTestContext *context,
const std::string &oid,
const std::string &oid_tgt,
TestOpStat *stat = 0)
: TestOp(n, context, stat),
oid(oid), oid_tgt(oid_tgt),
comp(NULL), done(0),
r(0), offset(0), length(0),
tgt_offset(0),
snap(0)
{}
std::pair<uint64_t, uint64_t> get_rand_off_len(uint32_t max_len) {
std::pair<uint64_t, uint64_t> r (0, 0);
r.first = rand() % max_len;
r.second = rand() % max_len;
r.first = r.first - (r.first % 512);
r.second = r.second - (r.second % 512);
while (r.first + r.second > max_len || r.second == 0) {
r.first = rand() % max_len;
r.second = rand() % max_len;
r.first = r.first - (r.first % 512);
r.second = r.second - (r.second % 512);
}
return r;
}
void _begin() override
{
std::lock_guard l{context->state_lock};
if (!(rand() % 4) && !context->snaps.empty()) {
snap = rand_choose(context->snaps)->first;
in_use = context->snaps_in_use.lookup_or_create(snap, snap);
} else {
snap = -1;
}
context->oid_in_use.insert(oid);
context->oid_not_in_use.erase(oid);
context->find_object(oid, &src_value, snap);
context->find_object(oid_tgt, &tgt_value);
uint32_t max_len = 0;
if (src_value.deleted()) {
/* just random length to check ENOENT */
max_len = context->max_size;
} else {
max_len = src_value.most_recent_gen()->get_length(src_value.most_recent());
}
std::pair<uint64_t, uint64_t> off_len; // first: offset, second: length
if (snap >= 0) {
context->io_ctx.snap_set_read(context->snaps[snap]);
off_len = get_rand_off_len(max_len);
} else if (src_value.version != 0 && !src_value.deleted()) {
op.assert_version(src_value.version);
off_len = get_rand_off_len(max_len);
} else if (src_value.deleted()) {
off_len.first = 0;
off_len.second = max_len;
}
offset = off_len.first;
length = off_len.second;
tgt_offset = offset;
std::string target_oid;
if (!src_value.deleted() && oid_tgt.empty()) {
bufferlist bl;
int r = context->io_ctx.read(context->prefix+oid, bl, length, offset);
ceph_assert(r > 0);
std::string fp_oid = ceph::crypto::digest<ceph::crypto::SHA256>(bl).to_str();
r = context->low_tier_io_ctx.write(fp_oid, bl, bl.length(), 0);
ceph_assert(r == 0);
target_oid = fp_oid;
tgt_offset = 0;
} else {
target_oid = context->prefix+oid_tgt;
}
std::cout << num << ": " << "set_chunk oid " << oid << " offset: " << offset
<< " length: " << length << " target oid " << target_oid
<< " offset: " << tgt_offset << " snap " << snap << std::endl;
op.set_chunk(offset, length, context->low_tier_io_ctx,
target_oid, tgt_offset, CEPH_OSD_OP_FLAG_WITH_REFERENCE);
std::pair<TestOp*, TestOp::CallbackInfo*> *cb_arg =
new std::pair<TestOp*, TestOp::CallbackInfo*>(this,
new TestOp::CallbackInfo(0));
comp = context->rados.aio_create_completion((void*) cb_arg,
&write_callback);
context->io_ctx.aio_operate(context->prefix+oid, comp, &op,
librados::OPERATION_ORDER_READS_WRITES, NULL);
if (snap >= 0) {
context->io_ctx.snap_set_read(0);
}
}
void _finish(CallbackInfo *info) override
{
std::lock_guard l{context->state_lock};
if (info->id == 0) {
ceph_assert(comp->is_complete());
std::cout << num << ": finishing set_chunk to oid " << oid << std::endl;
if ((r = comp->get_return_value())) {
if (r == -ENOENT && src_value.deleted()) {
std::cout << num << ": got expected ENOENT (src dne)" << std::endl;
} else if (r == -ENOENT && context->oid_set_chunk_tgt_pool.find(oid_tgt) !=
context->oid_set_chunk_tgt_pool.end()) {
std::cout << num << ": get expected ENOENT tgt oid " << oid_tgt << std::endl;
} else if (r == -ERANGE && src_value.deleted()) {
std::cout << num << ": got expected ERANGE (src dne)" << std::endl;
} else if (r == -EOPNOTSUPP) {
std::cout << "Range is overlapped: oid " << oid << " set_chunk " << oid_tgt << " returned error code "
<< r << " offset: " << offset << " length: " << length << std::endl;
context->update_object_version(oid, comp->get_version64());
} else {
std::cerr << "Error: oid " << oid << " set_chunk " << oid_tgt << " returned error code "
<< r << std::endl;
ceph_abort();
}
} else {
if (snap == -1) {
ChunkDesc info {tgt_offset, length, oid_tgt};
context->update_object_chunk_target(oid, offset, info);
context->update_object_version(oid, comp->get_version64());
}
}
}
if (++done == 1) {
context->oid_set_chunk_tgt_pool.insert(oid_tgt);
context->oid_in_use.erase(oid);
context->oid_not_in_use.insert(oid);
context->kick();
}
}
bool finished() override
{
return done == 1;
}
std::string getType() override
{
return "SetChunkOp";
}
};
class SetRedirectOp : public TestOp {
public:
std::string oid, oid_tgt, tgt_pool_name;
ObjectDesc src_value, tgt_value;
librados::ObjectWriteOperation op;
librados::ObjectReadOperation rd_op;
librados::AioCompletion *comp;
std::shared_ptr<int> in_use;
int done;
int r;
SetRedirectOp(int n,
RadosTestContext *context,
const std::string &oid,
const std::string &oid_tgt,
const std::string &tgt_pool_name,
TestOpStat *stat = 0)
: TestOp(n, context, stat),
oid(oid), oid_tgt(oid_tgt), tgt_pool_name(tgt_pool_name),
comp(NULL), done(0),
r(0)
{}
void _begin() override
{
std::lock_guard l{context->state_lock};
context->oid_in_use.insert(oid);
context->oid_not_in_use.erase(oid);
context->oid_redirect_in_use.insert(oid_tgt);
context->oid_redirect_not_in_use.erase(oid_tgt);
if (tgt_pool_name.empty()) ceph_abort();
context->find_object(oid, &src_value);
if(!context->redirect_objs[oid].empty()) {
/* copy_from oid --> oid_tgt */
comp = context->rados.aio_create_completion();
std::string src = context->prefix+oid;
op.copy_from(src.c_str(), context->io_ctx, src_value.version, 0);
context->low_tier_io_ctx.aio_operate(context->prefix+oid_tgt, comp, &op,
librados::OPERATION_ORDER_READS_WRITES);
comp->wait_for_complete();
if ((r = comp->get_return_value())) {
std::cerr << "Error: oid " << oid << " copy_from " << oid_tgt << " returned error code "
<< r << std::endl;
ceph_abort();
}
comp->release();
/* unset redirect target */
comp = context->rados.aio_create_completion();
bool present = !src_value.deleted();
op.unset_manifest();
context->io_ctx.aio_operate(context->prefix+oid, comp, &op,
librados::OPERATION_ORDER_READS_WRITES |
librados::OPERATION_IGNORE_REDIRECT);
comp->wait_for_complete();
if ((r = comp->get_return_value())) {
if (!(r == -ENOENT && !present) && r != -EOPNOTSUPP) {
std::cerr << "r is " << r << " while deleting " << oid << " and present is " << present << std::endl;
ceph_abort();
}
}
comp->release();
context->oid_redirect_not_in_use.insert(context->redirect_objs[oid]);
context->oid_redirect_in_use.erase(context->redirect_objs[oid]);
}
comp = context->rados.aio_create_completion();
rd_op.stat(NULL, NULL, NULL);
context->io_ctx.aio_operate(context->prefix+oid, comp, &rd_op,
librados::OPERATION_ORDER_READS_WRITES |
librados::OPERATION_IGNORE_REDIRECT,
NULL);
comp->wait_for_complete();
if ((r = comp->get_return_value()) && !src_value.deleted()) {
std::cerr << "Error: oid " << oid << " stat returned error code "
<< r << std::endl;
ceph_abort();
}
context->update_object_version(oid, comp->get_version64());
comp->release();
comp = context->rados.aio_create_completion();
rd_op.stat(NULL, NULL, NULL);
context->low_tier_io_ctx.aio_operate(context->prefix+oid_tgt, comp, &rd_op,
librados::OPERATION_ORDER_READS_WRITES |
librados::OPERATION_IGNORE_REDIRECT,
NULL);
comp->wait_for_complete();
if ((r = comp->get_return_value())) {
std::cerr << "Error: oid " << oid_tgt << " stat returned error code "
<< r << std::endl;
ceph_abort();
}
uint64_t tgt_version = comp->get_version64();
comp->release();
context->find_object(oid, &src_value);
if (src_value.version != 0 && !src_value.deleted())
op.assert_version(src_value.version);
op.set_redirect(context->prefix+oid_tgt, context->low_tier_io_ctx, tgt_version);
std::pair<TestOp*, TestOp::CallbackInfo*> *cb_arg =
new std::pair<TestOp*, TestOp::CallbackInfo*>(this,
new TestOp::CallbackInfo(0));
comp = context->rados.aio_create_completion((void*) cb_arg, &write_callback);
context->io_ctx.aio_operate(context->prefix+oid, comp, &op,
librados::OPERATION_ORDER_READS_WRITES);
}
void _finish(CallbackInfo *info) override
{
std::lock_guard l{context->state_lock};
if (info->id == 0) {
ceph_assert(comp->is_complete());
std::cout << num << ": finishing set_redirect to oid " << oid << std::endl;
if ((r = comp->get_return_value())) {
if (r == -ENOENT && src_value.deleted()) {
std::cout << num << ": got expected ENOENT (src dne)" << std::endl;
} else {
std::cerr << "Error: oid " << oid << " set_redirect " << oid_tgt << " returned error code "
<< r << std::endl;
ceph_abort();
}
} else {
context->update_object_redirect_target(oid, oid_tgt);
context->update_object_version(oid, comp->get_version64());
}
}
if (++done == 1) {
context->oid_in_use.erase(oid);
context->oid_not_in_use.insert(oid);
context->kick();
}
}
bool finished() override
{
return done == 1;
}
std::string getType() override
{
return "SetRedirectOp";
}
};
class UnsetRedirectOp : public TestOp {
public:
std::string oid;
librados::ObjectWriteOperation op;
librados::AioCompletion *comp = nullptr;
UnsetRedirectOp(int n,
RadosTestContext *context,
const std::string &oid,
TestOpStat *stat = 0)
: TestOp(n, context, stat), oid(oid)
{}
void _begin() override
{
std::unique_lock state_locker{context->state_lock};
if (context->get_watch_context(oid)) {
context->kick();
return;
}
ObjectDesc contents;
context->find_object(oid, &contents);
bool present = !contents.deleted();
context->oid_in_use.insert(oid);
context->oid_not_in_use.erase(oid);
context->seq_num++;
context->remove_object(oid);
state_locker.unlock();
comp = context->rados.aio_create_completion();
op.remove();
context->io_ctx.aio_operate(context->prefix+oid, comp, &op,
librados::OPERATION_ORDER_READS_WRITES |
librados::OPERATION_IGNORE_REDIRECT);
comp->wait_for_complete();
int r = comp->get_return_value();
if (r && !(r == -ENOENT && !present)) {
std::cerr << "r is " << r << " while deleting " << oid << " and present is " << present << std::endl;
ceph_abort();
}
state_locker.lock();
context->oid_in_use.erase(oid);
context->oid_not_in_use.insert(oid);
if(!context->redirect_objs[oid].empty()) {
context->oid_redirect_not_in_use.insert(context->redirect_objs[oid]);
context->oid_redirect_in_use.erase(context->redirect_objs[oid]);
context->update_object_redirect_target(oid, {});
}
context->kick();
}
std::string getType() override
{
return "UnsetRedirectOp";
}
};
class TierPromoteOp : public TestOp {
public:
librados::AioCompletion *completion;
librados::ObjectWriteOperation op;
std::string oid;
std::shared_ptr<int> in_use;
ObjectDesc src_value;
TierPromoteOp(int n,
RadosTestContext *context,
const std::string &oid,
TestOpStat *stat)
: TestOp(n, context, stat),
completion(NULL),
oid(oid)
{}
void _begin() override
{
context->state_lock.lock();
context->oid_in_use.insert(oid);
context->oid_not_in_use.erase(oid);
context->find_object(oid, &src_value);
std::pair<TestOp*, TestOp::CallbackInfo*> *cb_arg =
new std::pair<TestOp*, TestOp::CallbackInfo*>(this,
new TestOp::CallbackInfo(0));
completion = context->rados.aio_create_completion((void *) cb_arg,
&write_callback);
context->state_lock.unlock();
op.tier_promote();
int r = context->io_ctx.aio_operate(context->prefix+oid, completion,
&op);
ceph_assert(!r);
}
void _finish(CallbackInfo *info) override
{
std::lock_guard l{context->state_lock};
ceph_assert(!done);
ceph_assert(completion->is_complete());
ObjectDesc oid_value;
context->find_object(oid, &oid_value);
int r = completion->get_return_value();
std::cout << num << ": got " << cpp_strerror(r) << std::endl;
if (r == 0) {
// sucess
} else if (r == -ENOENT && src_value.deleted()) {
std::cout << num << ": got expected ENOENT (src dne)" << std::endl;
} else {
ceph_abort_msg("shouldn't happen");
}
context->update_object_version(oid, completion->get_version64());
context->find_object(oid, &oid_value);
context->oid_in_use.erase(oid);
context->oid_not_in_use.insert(oid);
context->kick();
done = true;
}
bool finished() override
{
return done;
}
std::string getType() override
{
return "TierPromoteOp";
}
};
class TierFlushOp : public TestOp {
public:
librados::AioCompletion *completion;
librados::ObjectReadOperation op;
std::string oid;
std::shared_ptr<int> in_use;
int snap;
ObjectDesc src_value;
TierFlushOp(int n,
RadosTestContext *context,
const std::string &oid,
TestOpStat *stat)
: TestOp(n, context, stat),
completion(NULL),
oid(oid),
snap(-1)
{}
void _begin() override
{
context->state_lock.lock();
context->oid_in_use.insert(oid);
context->oid_not_in_use.erase(oid);
if (0 && !(rand() % 4) && !context->snaps.empty()) {
snap = rand_choose(context->snaps)->first;
in_use = context->snaps_in_use.lookup_or_create(snap, snap);
} else {
snap = -1;
}
std::cout << num << ": tier_flush oid " << oid << " snap " << snap << std::endl;
if (snap >= 0) {
context->io_ctx.snap_set_read(context->snaps[snap]);
}
context->find_object(oid, &src_value, snap);
std::pair<TestOp*, TestOp::CallbackInfo*> *cb_arg =
new std::pair<TestOp*, TestOp::CallbackInfo*>(this,
new TestOp::CallbackInfo(0));
completion = context->rados.aio_create_completion((void *) cb_arg,
&write_callback);
context->state_lock.unlock();
op.tier_flush();
unsigned flags = librados::OPERATION_IGNORE_CACHE;
int r = context->io_ctx.aio_operate(context->prefix+oid, completion,
&op, flags, NULL);
ceph_assert(!r);
if (snap >= 0) {
context->io_ctx.snap_set_read(0);
}
}
void _finish(CallbackInfo *info) override
{
context->state_lock.lock();
ceph_assert(!done);
ceph_assert(completion->is_complete());
int r = completion->get_return_value();
std::cout << num << ": got " << cpp_strerror(r) << std::endl;
if (r == 0) {
// sucess
context->update_object_tier_flushed(oid, snap);
context->update_object_version(oid, completion->get_version64(), snap);
} else if (r == -EBUSY) {
// could fail if snap is not oldest
ceph_assert(!context->check_oldest_snap_flushed(oid, snap));
} else if (r == -ENOENT) {
// could fail if object is removed
if (src_value.deleted()) {
std::cout << num << ": got expected ENOENT (src dne)" << std::endl;
} else {
std::cerr << num << ": got unexpected ENOENT" << std::endl;
ceph_abort();
}
} else {
if (r != -ENOENT && src_value.deleted()) {
std::cerr << num << ": src dne, but r is not ENOENT" << std::endl;
}
ceph_abort_msg("shouldn't happen");
}
context->oid_in_use.erase(oid);
context->oid_not_in_use.insert(oid);
context->kick();
done = true;
context->state_lock.unlock();
}
bool finished() override
{
return done;
}
std::string getType() override
{
return "TierFlushOp";
}
};
class TierEvictOp : public TestOp {
public:
librados::AioCompletion *completion;
librados::ObjectReadOperation op;
std::string oid;
std::shared_ptr<int> in_use;
int snap;
ObjectDesc src_value;
TierEvictOp(int n,
RadosTestContext *context,
const std::string &oid,
TestOpStat *stat)
: TestOp(n, context, stat),
completion(NULL),
oid(oid),
snap(-1)
{}
void _begin() override
{
context->state_lock.lock();
context->oid_in_use.insert(oid);
context->oid_not_in_use.erase(oid);
if (0 && !(rand() % 4) && !context->snaps.empty()) {
snap = rand_choose(context->snaps)->first;
in_use = context->snaps_in_use.lookup_or_create(snap, snap);
} else {
snap = -1;
}
std::cout << num << ": tier_evict oid " << oid << " snap " << snap << std::endl;
if (snap >= 0) {
context->io_ctx.snap_set_read(context->snaps[snap]);
}
context->find_object(oid, &src_value, snap);
std::pair<TestOp*, TestOp::CallbackInfo*> *cb_arg =
new std::pair<TestOp*, TestOp::CallbackInfo*>(this,
new TestOp::CallbackInfo(0));
completion = context->rados.aio_create_completion((void *) cb_arg,
&write_callback);
context->state_lock.unlock();
op.cache_evict();
int r = context->io_ctx.aio_operate(context->prefix+oid, completion,
&op, librados::OPERATION_IGNORE_CACHE,
NULL);
ceph_assert(!r);
if (snap >= 0) {
context->io_ctx.snap_set_read(0);
}
}
void _finish(CallbackInfo *info) override
{
std::lock_guard state_locker{context->state_lock};
ceph_assert(!done);
ceph_assert(completion->is_complete());
int r = completion->get_return_value();
std::cout << num << ": got " << cpp_strerror(r) << std::endl;
if (r == 0) {
// ok
} else if (r == -EINVAL) {
// modifying manifest object makes existing chunk_map clear
// as a result, the modified object is no longer manifest object
// this casues to return -EINVAL
} else if (r == -ENOENT) {
// could fail if object is removed
if (src_value.deleted()) {
std::cout << num << ": got expected ENOENT (src dne)" << std::endl;
} else {
std::cerr << num << ": got unexpected ENOENT" << std::endl;
ceph_abort();
}
} else {
if (r != -ENOENT && src_value.deleted()) {
std::cerr << num << ": src dne, but r is not ENOENT" << std::endl;
}
ceph_abort_msg("shouldn't happen");
}
context->oid_in_use.erase(oid);
context->oid_not_in_use.insert(oid);
context->kick();
done = true;
}
bool finished() override
{
return done;
}
std::string getType() override
{
return "TierEvictOp";
}
};
class HitSetListOp : public TestOp {
librados::AioCompletion *comp1, *comp2;
uint32_t hash;
std::list< std::pair<time_t, time_t> > ls;
bufferlist bl;
public:
HitSetListOp(int n,
RadosTestContext *context,
uint32_t hash,
TestOpStat *stat = 0)
: TestOp(n, context, stat),
comp1(NULL), comp2(NULL),
hash(hash)
{}
void _begin() override
{
std::pair<TestOp*, TestOp::CallbackInfo*> *cb_arg =
new std::pair<TestOp*, TestOp::CallbackInfo*>(this,
new TestOp::CallbackInfo(0));
comp1 = context->rados.aio_create_completion((void*) cb_arg,
&write_callback);
int r = context->io_ctx.hit_set_list(hash, comp1, &ls);
ceph_assert(r == 0);
}
void _finish(CallbackInfo *info) override {
std::lock_guard l{context->state_lock};
if (!comp2) {
if (ls.empty()) {
std::cerr << num << ": no hitsets" << std::endl;
done = true;
} else {
std::cerr << num << ": hitsets are " << ls << std::endl;
int r = rand() % ls.size();
auto p = ls.begin();
while (r--)
++p;
auto cb_arg = new std::pair<TestOp*, TestOp::CallbackInfo*>(
this, new TestOp::CallbackInfo(0));
comp2 = context->rados.aio_create_completion((void*) cb_arg, &write_callback);
r = context->io_ctx.hit_set_get(hash, comp2, p->second, &bl);
ceph_assert(r == 0);
}
} else {
int r = comp2->get_return_value();
if (r == 0) {
HitSet hitset;
auto p = bl.cbegin();
decode(hitset, p);
std::cout << num << ": got hitset of type " << hitset.get_type_name()
<< " size " << bl.length()
<< std::endl;
} else {
// FIXME: we could verify that we did in fact race with a trim...
ceph_assert(r == -ENOENT);
}
done = true;
}
context->kick();
}
bool finished() override {
return done;
}
std::string getType() override {
return "HitSetListOp";
}
};
class UndirtyOp : public TestOp {
public:
librados::AioCompletion *completion;
librados::ObjectWriteOperation op;
std::string oid;
UndirtyOp(int n,
RadosTestContext *context,
const std::string &oid,
TestOpStat *stat = 0)
: TestOp(n, context, stat),
completion(NULL),
oid(oid)
{}
void _begin() override
{
context->state_lock.lock();
std::pair<TestOp*, TestOp::CallbackInfo*> *cb_arg =
new std::pair<TestOp*, TestOp::CallbackInfo*>(this,
new TestOp::CallbackInfo(0));
completion = context->rados.aio_create_completion((void *) cb_arg,
&write_callback);
context->oid_in_use.insert(oid);
context->oid_not_in_use.erase(oid);
context->update_object_undirty(oid);
context->state_lock.unlock();
op.undirty();
int r = context->io_ctx.aio_operate(context->prefix+oid, completion,
&op, 0);
ceph_assert(!r);
}
void _finish(CallbackInfo *info) override
{
std::lock_guard state_locker{context->state_lock};
ceph_assert(!done);
ceph_assert(completion->is_complete());
context->oid_in_use.erase(oid);
context->oid_not_in_use.insert(oid);
context->update_object_version(oid, completion->get_version64());
context->kick();
done = true;
}
bool finished() override
{
return done;
}
std::string getType() override
{
return "UndirtyOp";
}
};
class IsDirtyOp : public TestOp {
public:
librados::AioCompletion *completion;
librados::ObjectReadOperation op;
std::string oid;
bool dirty;
ObjectDesc old_value;
int snap = 0;
std::shared_ptr<int> in_use;
IsDirtyOp(int n,
RadosTestContext *context,
const std::string &oid,
TestOpStat *stat = 0)
: TestOp(n, context, stat),
completion(NULL),
oid(oid),
dirty(false)
{}
void _begin() override
{
context->state_lock.lock();
if (!(rand() % 4) && !context->snaps.empty()) {
snap = rand_choose(context->snaps)->first;
in_use = context->snaps_in_use.lookup_or_create(snap, snap);
} else {
snap = -1;
}
std::cout << num << ": is_dirty oid " << oid << " snap " << snap
<< std::endl;
std::pair<TestOp*, TestOp::CallbackInfo*> *cb_arg =
new std::pair<TestOp*, TestOp::CallbackInfo*>(this,
new TestOp::CallbackInfo(0));
completion = context->rados.aio_create_completion((void *) cb_arg,
&write_callback);
context->oid_in_use.insert(oid);
context->oid_not_in_use.erase(oid);
context->state_lock.unlock();
if (snap >= 0) {
context->io_ctx.snap_set_read(context->snaps[snap]);
}
op.is_dirty(&dirty, NULL);
int r = context->io_ctx.aio_operate(context->prefix+oid, completion,
&op, 0);
ceph_assert(!r);
if (snap >= 0) {
context->io_ctx.snap_set_read(0);
}
}
void _finish(CallbackInfo *info) override
{
std::lock_guard state_locker{context->state_lock};
ceph_assert(!done);
ceph_assert(completion->is_complete());
context->oid_in_use.erase(oid);
context->oid_not_in_use.insert(oid);
ceph_assert(context->find_object(oid, &old_value, snap));
int r = completion->get_return_value();
if (r == 0) {
std::cout << num << ": " << (dirty ? "dirty" : "clean") << std::endl;
ceph_assert(!old_value.deleted());
ceph_assert(dirty == old_value.dirty);
} else {
std::cout << num << ": got " << r << std::endl;
ceph_assert(r == -ENOENT);
ceph_assert(old_value.deleted());
}
context->kick();
done = true;
}
bool finished() override
{
return done;
}
std::string getType() override
{
return "IsDirtyOp";
}
};
class CacheFlushOp : public TestOp {
public:
librados::AioCompletion *completion;
librados::ObjectReadOperation op;
std::string oid;
bool blocking;
int snap;
bool can_fail;
std::shared_ptr<int> in_use;
CacheFlushOp(int n,
RadosTestContext *context,
const std::string &oid,
TestOpStat *stat,
bool b)
: TestOp(n, context, stat),
completion(NULL),
oid(oid),
blocking(b),
snap(0),
can_fail(false)
{}
void _begin() override
{
context->state_lock.lock();
if (!(rand() % 4) && !context->snaps.empty()) {
snap = rand_choose(context->snaps)->first;
in_use = context->snaps_in_use.lookup_or_create(snap, snap);
} else {
snap = -1;
}
// not being particularly specific here about knowing which
// flushes are on the oldest clean snap and which ones are not.
can_fail = !blocking || !context->snaps.empty();
// FIXME: we could fail if we've ever removed a snap due to
// the async snap trimming.
can_fail = true;
std::cout << num << ": " << (blocking ? "cache_flush" : "cache_try_flush")
<< " oid " << oid << " snap " << snap << std::endl;
if (snap >= 0) {
context->io_ctx.snap_set_read(context->snaps[snap]);
}
std::pair<TestOp*, TestOp::CallbackInfo*> *cb_arg =
new std::pair<TestOp*, TestOp::CallbackInfo*>(this,
new TestOp::CallbackInfo(0));
completion = context->rados.aio_create_completion((void *) cb_arg,
&write_callback);
context->oid_flushing.insert(oid);
context->oid_not_flushing.erase(oid);
context->state_lock.unlock();
unsigned flags = librados::OPERATION_IGNORE_CACHE;
if (blocking) {
op.cache_flush();
} else {
op.cache_try_flush();
flags = librados::OPERATION_SKIPRWLOCKS;
}
int r = context->io_ctx.aio_operate(context->prefix+oid, completion,
&op, flags, NULL);
ceph_assert(!r);
if (snap >= 0) {
context->io_ctx.snap_set_read(0);
}
}
void _finish(CallbackInfo *info) override
{
std::lock_guard state_locker{context->state_lock};
ceph_assert(!done);
ceph_assert(completion->is_complete());
context->oid_flushing.erase(oid);
context->oid_not_flushing.insert(oid);
int r = completion->get_return_value();
std::cout << num << ": got " << cpp_strerror(r) << std::endl;
if (r == 0) {
context->update_object_version(oid, 0, snap);
} else if (r == -EBUSY) {
ceph_assert(can_fail);
} else if (r == -EINVAL) {
// caching not enabled?
} else if (r == -ENOENT) {
// may have raced with a remove?
} else {
ceph_abort_msg("shouldn't happen");
}
context->kick();
done = true;
}
bool finished() override
{
return done;
}
std::string getType() override
{
return "CacheFlushOp";
}
};
class CacheEvictOp : public TestOp {
public:
librados::AioCompletion *completion;
librados::ObjectReadOperation op;
std::string oid;
std::shared_ptr<int> in_use;
CacheEvictOp(int n,
RadosTestContext *context,
const std::string &oid,
TestOpStat *stat)
: TestOp(n, context, stat),
completion(NULL),
oid(oid)
{}
void _begin() override
{
context->state_lock.lock();
int snap;
if (!(rand() % 4) && !context->snaps.empty()) {
snap = rand_choose(context->snaps)->first;
in_use = context->snaps_in_use.lookup_or_create(snap, snap);
} else {
snap = -1;
}
std::cout << num << ": cache_evict oid " << oid << " snap " << snap << std::endl;
if (snap >= 0) {
context->io_ctx.snap_set_read(context->snaps[snap]);
}
std::pair<TestOp*, TestOp::CallbackInfo*> *cb_arg =
new std::pair<TestOp*, TestOp::CallbackInfo*>(this,
new TestOp::CallbackInfo(0));
completion = context->rados.aio_create_completion((void *) cb_arg,
&write_callback);
context->state_lock.unlock();
op.cache_evict();
int r = context->io_ctx.aio_operate(context->prefix+oid, completion,
&op, librados::OPERATION_IGNORE_CACHE,
NULL);
ceph_assert(!r);
if (snap >= 0) {
context->io_ctx.snap_set_read(0);
}
}
void _finish(CallbackInfo *info) override
{
std::lock_guard state_locker{context->state_lock};
ceph_assert(!done);
ceph_assert(completion->is_complete());
int r = completion->get_return_value();
std::cout << num << ": got " << cpp_strerror(r) << std::endl;
if (r == 0) {
// yay!
} else if (r == -EBUSY) {
// raced with something that dirtied the object
} else if (r == -EINVAL) {
// caching not enabled?
} else if (r == -ENOENT) {
// may have raced with a remove?
} else {
ceph_abort_msg("shouldn't happen");
}
context->kick();
done = true;
}
bool finished() override
{
return done;
}
std::string getType() override
{
return "CacheEvictOp";
}
};
#endif
| 98,817 | 27.065322 | 115 |
h
|
null |
ceph-main/src/test/osd/TestECBackend.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2013 Inktank Storage, Inc.
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include <iostream>
#include <sstream>
#include <errno.h>
#include <signal.h>
#include "osd/ECBackend.h"
#include "gtest/gtest.h"
using namespace std;
TEST(ECUtil, stripe_info_t)
{
const uint64_t swidth = 4096;
const uint64_t ssize = 4;
ECUtil::stripe_info_t s(ssize, swidth);
ASSERT_EQ(s.get_stripe_width(), swidth);
ASSERT_EQ(s.logical_to_next_chunk_offset(0), 0u);
ASSERT_EQ(s.logical_to_next_chunk_offset(1), s.get_chunk_size());
ASSERT_EQ(s.logical_to_next_chunk_offset(swidth - 1),
s.get_chunk_size());
ASSERT_EQ(s.logical_to_prev_chunk_offset(0), 0u);
ASSERT_EQ(s.logical_to_prev_chunk_offset(swidth), s.get_chunk_size());
ASSERT_EQ(s.logical_to_prev_chunk_offset((swidth * 2) - 1),
s.get_chunk_size());
ASSERT_EQ(s.logical_to_next_stripe_offset(0), 0u);
ASSERT_EQ(s.logical_to_next_stripe_offset(swidth - 1),
s.get_stripe_width());
ASSERT_EQ(s.logical_to_prev_stripe_offset(swidth), s.get_stripe_width());
ASSERT_EQ(s.logical_to_prev_stripe_offset(swidth), s.get_stripe_width());
ASSERT_EQ(s.logical_to_prev_stripe_offset((swidth * 2) - 1),
s.get_stripe_width());
ASSERT_EQ(s.aligned_logical_offset_to_chunk_offset(2*swidth),
2*s.get_chunk_size());
ASSERT_EQ(s.aligned_chunk_offset_to_logical_offset(2*s.get_chunk_size()),
2*s.get_stripe_width());
ASSERT_EQ(s.aligned_offset_len_to_chunk(make_pair(swidth, 10*swidth)),
make_pair(s.get_chunk_size(), 10*s.get_chunk_size()));
ASSERT_EQ(s.offset_len_to_stripe_bounds(make_pair(swidth-10, (uint64_t)20)),
make_pair((uint64_t)0, 2*swidth));
}
| 2,005 | 30.84127 | 78 |
cc
|
null |
ceph-main/src/test/osd/TestMClockScheduler.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
#include <chrono>
#include "gtest/gtest.h"
#include "global/global_context.h"
#include "global/global_init.h"
#include "common/common_init.h"
#include "osd/scheduler/mClockScheduler.h"
#include "osd/scheduler/OpSchedulerItem.h"
using namespace ceph::osd::scheduler;
int main(int argc, char **argv) {
std::vector<const char*> args(argv, argv+argc);
auto cct = global_init(nullptr, args, CEPH_ENTITY_TYPE_OSD,
CODE_ENVIRONMENT_UTILITY,
CINIT_FLAG_NO_DEFAULT_CONFIG_FILE);
common_init_finish(g_ceph_context);
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}
class mClockSchedulerTest : public testing::Test {
public:
int whoami;
uint32_t num_shards;
int shard_id;
bool is_rotational;
MonClient *monc;
mClockScheduler q;
uint64_t client1;
uint64_t client2;
uint64_t client3;
mClockSchedulerTest() :
whoami(0),
num_shards(1),
shard_id(0),
is_rotational(false),
monc(nullptr),
q(g_ceph_context, whoami, num_shards, shard_id, is_rotational, monc),
client1(1001),
client2(9999),
client3(100000001)
{}
struct MockDmclockItem : public PGOpQueueable {
op_scheduler_class scheduler_class;
MockDmclockItem(op_scheduler_class _scheduler_class) :
PGOpQueueable(spg_t()),
scheduler_class(_scheduler_class) {}
MockDmclockItem()
: MockDmclockItem(op_scheduler_class::background_best_effort) {}
ostream &print(ostream &rhs) const final { return rhs; }
std::string print() const final {
return std::string();
}
std::optional<OpRequestRef> maybe_get_op() const final {
return std::nullopt;
}
op_scheduler_class get_scheduler_class() const final {
return scheduler_class;
}
void run(OSD *osd, OSDShard *sdata, PGRef& pg, ThreadPool::TPHandle &handle) final {}
};
};
template <typename... Args>
OpSchedulerItem create_item(
epoch_t e, uint64_t owner, Args&&... args)
{
return OpSchedulerItem(
std::make_unique<mClockSchedulerTest::MockDmclockItem>(
std::forward<Args>(args)...),
12, 12,
utime_t(), owner, e);
}
template <typename... Args>
OpSchedulerItem create_high_prio_item(
unsigned priority, epoch_t e, uint64_t owner, Args&&... args)
{
// Create high priority item for testing high prio queue
return OpSchedulerItem(
std::make_unique<mClockSchedulerTest::MockDmclockItem>(
std::forward<Args>(args)...),
12, priority,
utime_t(), owner, e);
}
OpSchedulerItem get_item(WorkItem item)
{
return std::move(std::get<OpSchedulerItem>(item));
}
TEST_F(mClockSchedulerTest, TestEmpty) {
ASSERT_TRUE(q.empty());
for (unsigned i = 100; i < 105; i+=2) {
q.enqueue(create_item(i, client1, op_scheduler_class::client));
std::this_thread::sleep_for(std::chrono::microseconds(1));
}
ASSERT_FALSE(q.empty());
std::list<OpSchedulerItem> reqs;
reqs.push_back(get_item(q.dequeue()));
reqs.push_back(get_item(q.dequeue()));
ASSERT_EQ(2u, reqs.size());
ASSERT_FALSE(q.empty());
for (auto &&i : reqs) {
q.enqueue_front(std::move(i));
}
reqs.clear();
ASSERT_FALSE(q.empty());
for (int i = 0; i < 3; ++i) {
ASSERT_FALSE(q.empty());
q.dequeue();
}
ASSERT_TRUE(q.empty());
}
TEST_F(mClockSchedulerTest, TestSingleClientOrderedEnqueueDequeue) {
ASSERT_TRUE(q.empty());
for (unsigned i = 100; i < 105; ++i) {
q.enqueue(create_item(i, client1, op_scheduler_class::client));
std::this_thread::sleep_for(std::chrono::microseconds(1));
}
auto r = get_item(q.dequeue());
ASSERT_EQ(100u, r.get_map_epoch());
r = get_item(q.dequeue());
ASSERT_EQ(101u, r.get_map_epoch());
r = get_item(q.dequeue());
ASSERT_EQ(102u, r.get_map_epoch());
r = get_item(q.dequeue());
ASSERT_EQ(103u, r.get_map_epoch());
r = get_item(q.dequeue());
ASSERT_EQ(104u, r.get_map_epoch());
}
TEST_F(mClockSchedulerTest, TestMultiClientOrderedEnqueueDequeue) {
const unsigned NUM = 1000;
for (unsigned i = 0; i < NUM; ++i) {
for (auto &&c: {client1, client2, client3}) {
q.enqueue(create_item(i, c));
std::this_thread::sleep_for(std::chrono::microseconds(1));
}
}
std::map<uint64_t, epoch_t> next;
for (auto &&c: {client1, client2, client3}) {
next[c] = 0;
}
for (unsigned i = 0; i < NUM * 3; ++i) {
ASSERT_FALSE(q.empty());
auto r = get_item(q.dequeue());
auto owner = r.get_owner();
auto niter = next.find(owner);
ASSERT_FALSE(niter == next.end());
ASSERT_EQ(niter->second, r.get_map_epoch());
niter->second++;
}
ASSERT_TRUE(q.empty());
}
TEST_F(mClockSchedulerTest, TestHighPriorityQueueEnqueueDequeue) {
ASSERT_TRUE(q.empty());
for (unsigned i = 200; i < 205; ++i) {
q.enqueue(create_high_prio_item(i, i, client1, op_scheduler_class::client));
std::this_thread::sleep_for(std::chrono::milliseconds(1));
}
ASSERT_FALSE(q.empty());
// Higher priority ops should be dequeued first
auto r = get_item(q.dequeue());
ASSERT_EQ(204u, r.get_map_epoch());
r = get_item(q.dequeue());
ASSERT_EQ(203u, r.get_map_epoch());
r = get_item(q.dequeue());
ASSERT_EQ(202u, r.get_map_epoch());
r = get_item(q.dequeue());
ASSERT_EQ(201u, r.get_map_epoch());
r = get_item(q.dequeue());
ASSERT_EQ(200u, r.get_map_epoch());
ASSERT_TRUE(q.empty());
}
TEST_F(mClockSchedulerTest, TestAllQueuesEnqueueDequeue) {
ASSERT_TRUE(q.empty());
// Insert ops into the mClock queue
for (unsigned i = 100; i < 102; ++i) {
q.enqueue(create_item(i, client1, op_scheduler_class::client));
std::this_thread::sleep_for(std::chrono::microseconds(1));
}
// Insert Immediate ops
for (unsigned i = 103; i < 105; ++i) {
q.enqueue(create_item(i, client1, op_scheduler_class::immediate));
std::this_thread::sleep_for(std::chrono::microseconds(1));
}
// Insert ops into the high queue
for (unsigned i = 200; i < 202; ++i) {
q.enqueue(create_high_prio_item(i, i, client1, op_scheduler_class::client));
std::this_thread::sleep_for(std::chrono::milliseconds(1));
}
ASSERT_FALSE(q.empty());
auto r = get_item(q.dequeue());
// Ops classified as Immediate should be dequeued first
ASSERT_EQ(103u, r.get_map_epoch());
r = get_item(q.dequeue());
ASSERT_EQ(104u, r.get_map_epoch());
// High priority queue should be dequeued second
// higher priority operation first
r = get_item(q.dequeue());
ASSERT_EQ(201u, r.get_map_epoch());
r = get_item(q.dequeue());
ASSERT_EQ(200u, r.get_map_epoch());
// mClock queue will be dequeued last
r = get_item(q.dequeue());
ASSERT_EQ(100u, r.get_map_epoch());
r = get_item(q.dequeue());
ASSERT_EQ(101u, r.get_map_epoch());
ASSERT_TRUE(q.empty());
}
| 6,769 | 24.938697 | 89 |
cc
|
null |
ceph-main/src/test/osd/TestOSDMap.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
#include "gtest/gtest.h"
#include "osd/OSDMap.h"
#include "osd/OSDMapMapping.h"
#include "mon/OSDMonitor.h"
#include "mon/PGMap.h"
#include "global/global_context.h"
#include "global/global_init.h"
#include "common/common_init.h"
#include "common/ceph_argparse.h"
#include "common/ceph_json.h"
#include <iostream>
#include <cmath>
using namespace std;
int main(int argc, char **argv) {
map<string,string> defaults = {
// make sure we have 3 copies, or some tests won't work
{ "osd_pool_default_size", "3" },
// our map is flat, so just try and split across OSDs, not hosts or whatever
{ "osd_crush_chooseleaf_type", "0" },
};
std::vector<const char*> args(argv, argv+argc);
auto cct = global_init(&defaults, args, CEPH_ENTITY_TYPE_CLIENT,
CODE_ENVIRONMENT_UTILITY,
CINIT_FLAG_NO_DEFAULT_CONFIG_FILE);
common_init_finish(g_ceph_context);
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}
class OSDMapTest : public testing::Test,
public ::testing::WithParamInterface<std::pair<int, int>> {
int num_osds = 6;
public:
OSDMap osdmap;
OSDMapMapping mapping;
const uint64_t my_ec_pool = 1;
const uint64_t my_rep_pool = 2;
// Blacklist testing lists
// I pulled the first two ranges and their start/end points from
// https://en.wikipedia.org/wiki/Classless_Inter-Domain_Routing#CIDR_notation
static const string range_addrs[];
static const string ip_addrs[];
static const string unblocked_ip_addrs[];
const string EC_RULE_NAME = "erasure";
OSDMapTest() {}
void set_up_map(int new_num_osds = 6, bool no_default_pools = false) {
num_osds = new_num_osds;
uuid_d fsid;
osdmap.build_simple(g_ceph_context, 0, fsid, num_osds);
OSDMap::Incremental pending_inc(osdmap.get_epoch() + 1);
pending_inc.fsid = osdmap.get_fsid();
entity_addrvec_t sample_addrs;
sample_addrs.v.push_back(entity_addr_t());
uuid_d sample_uuid;
for (int i = 0; i < num_osds; ++i) {
sample_uuid.generate_random();
sample_addrs.v[0].nonce = i;
pending_inc.new_state[i] = CEPH_OSD_EXISTS | CEPH_OSD_NEW;
pending_inc.new_up_client[i] = sample_addrs;
pending_inc.new_up_cluster[i] = sample_addrs;
pending_inc.new_hb_back_up[i] = sample_addrs;
pending_inc.new_hb_front_up[i] = sample_addrs;
pending_inc.new_weight[i] = CEPH_OSD_IN;
pending_inc.new_uuid[i] = sample_uuid;
}
osdmap.apply_incremental(pending_inc);
if (no_default_pools) // do not create any default pool(s)
return;
OSDMap::Incremental new_pool_inc(osdmap.get_epoch() + 1);
new_pool_inc.new_pool_max = osdmap.get_pool_max();
new_pool_inc.fsid = osdmap.get_fsid();
// make an ec pool
set_ec_pool("ec", new_pool_inc);
// and a replicated pool
set_rep_pool("reppool",new_pool_inc);
osdmap.apply_incremental(new_pool_inc);
}
int get_ec_crush_rule() {
int r = osdmap.crush->get_rule_id(EC_RULE_NAME);
if (r < 0) {
r = osdmap.crush->add_simple_rule(
EC_RULE_NAME, "default", "osd", "",
"indep", pg_pool_t::TYPE_ERASURE,
&cerr);
}
return r;
}
uint64_t set_ec_pool(const string &name, OSDMap::Incremental &new_pool_inc,
bool assert_pool_id = true) {
pg_pool_t empty;
uint64_t pool_id = ++new_pool_inc.new_pool_max;
if (assert_pool_id)
ceph_assert(pool_id == my_ec_pool);
pg_pool_t *p = new_pool_inc.get_new_pool(pool_id, &empty);
p->size = 3;
p->set_pg_num(64);
p->set_pgp_num(64);
p->type = pg_pool_t::TYPE_ERASURE;
p->crush_rule = get_ec_crush_rule();
new_pool_inc.new_pool_names[pool_id] = name;//"ec";
return pool_id;
}
uint64_t set_rep_pool(const string name, OSDMap::Incremental &new_pool_inc,
bool assert_pool_id = true) {
pg_pool_t empty;
uint64_t pool_id = ++new_pool_inc.new_pool_max;
if (assert_pool_id)
ceph_assert(pool_id == my_rep_pool);
pg_pool_t *p = new_pool_inc.get_new_pool(pool_id, &empty);
p->size = 3;
p->set_pg_num(64);
p->set_pgp_num(64);
p->type = pg_pool_t::TYPE_REPLICATED;
p->crush_rule = 0;
p->set_flag(pg_pool_t::FLAG_HASHPSPOOL);
new_pool_inc.new_pool_names[pool_id] = name;//"reppool";
return pool_id;
}
unsigned int get_num_osds() { return num_osds; }
void get_crush(const OSDMap& tmap, CrushWrapper& newcrush) {
bufferlist bl;
tmap.crush->encode(bl, CEPH_FEATURES_SUPPORTED_DEFAULT);
auto p = bl.cbegin();
newcrush.decode(p);
}
int crush_move(OSDMap& tmap, const string &name, const vector<string> &argvec) {
map<string,string> loc;
CrushWrapper::parse_loc_map(argvec, &loc);
CrushWrapper newcrush;
get_crush(tmap, newcrush);
if (!newcrush.name_exists(name)) {
return -ENOENT;
}
int id = newcrush.get_item_id(name);
int err;
if (!newcrush.check_item_loc(g_ceph_context, id, loc, (int *)NULL)) {
if (id >= 0) {
err = newcrush.create_or_move_item(g_ceph_context, id, 0, name, loc);
} else {
err = newcrush.move_bucket(g_ceph_context, id, loc);
}
if (err >= 0) {
OSDMap::Incremental pending_inc(tmap.get_epoch() + 1);
pending_inc.crush.clear();
newcrush.encode(pending_inc.crush, CEPH_FEATURES_SUPPORTED_DEFAULT);
tmap.apply_incremental(pending_inc);
err = 0;
}
} else {
// already there
err = 0;
}
return err;
}
int crush_rule_create_replicated(const string &name,
const string &root,
const string &type) {
if (osdmap.crush->rule_exists(name)) {
return osdmap.crush->get_rule_id(name);
}
CrushWrapper newcrush;
get_crush(osdmap, newcrush);
string device_class;
stringstream ss;
int ruleno = newcrush.add_simple_rule(
name, root, type, device_class,
"firstn", pg_pool_t::TYPE_REPLICATED, &ss);
if (ruleno >= 0) {
OSDMap::Incremental pending_inc(osdmap.get_epoch() + 1);
pending_inc.crush.clear();
newcrush.encode(pending_inc.crush, CEPH_FEATURES_SUPPORTED_DEFAULT);
osdmap.apply_incremental(pending_inc);
}
return ruleno;
}
void test_mappings(int pool,
int num,
vector<int> *any,
vector<int> *first,
vector<int> *primary) {
mapping.update(osdmap);
for (int i=0; i<num; ++i) {
vector<int> up, acting;
int up_primary, acting_primary;
pg_t pgid(i, pool);
osdmap.pg_to_up_acting_osds(pgid,
&up, &up_primary, &acting, &acting_primary);
for (unsigned j=0; j<acting.size(); ++j)
(*any)[acting[j]]++;
if (!acting.empty())
(*first)[acting[0]]++;
if (acting_primary >= 0)
(*primary)[acting_primary]++;
// compare to precalc mapping
vector<int> up2, acting2;
int up_primary2, acting_primary2;
pgid = osdmap.raw_pg_to_pg(pgid);
mapping.get(pgid, &up2, &up_primary2, &acting2, &acting_primary2);
ASSERT_EQ(up, up2);
ASSERT_EQ(up_primary, up_primary2);
ASSERT_EQ(acting, acting2);
ASSERT_EQ(acting_primary, acting_primary2);
}
cout << "any: " << *any << std::endl;;
cout << "first: " << *first << std::endl;;
cout << "primary: " << *primary << std::endl;;
}
void clean_pg_upmaps(CephContext *cct,
const OSDMap& om,
OSDMap::Incremental& pending_inc) {
int cpu_num = 8;
int pgs_per_chunk = 256;
ThreadPool tp(cct, "BUG_40104::clean_upmap_tp", "clean_upmap_tp", cpu_num);
tp.start();
ParallelPGMapper mapper(cct, &tp);
vector<pg_t> pgs_to_check;
om.get_upmap_pgs(&pgs_to_check);
OSDMonitor::CleanUpmapJob job(cct, om, pending_inc);
mapper.queue(&job, pgs_per_chunk, pgs_to_check);
job.wait();
tp.stop();
}
void set_primary_affinity_all(float pa) {
for (uint i = 0 ; i < get_num_osds() ; i++) {
osdmap.set_primary_affinity(i, int(pa * CEPH_OSD_MAX_PRIMARY_AFFINITY));
}
}
bool score_in_range(float score, uint nosds = 0) {
if (nosds == 0) {
nosds = get_num_osds();
}
return score >= 1.0 && score <= float(nosds);
}
};
TEST_F(OSDMapTest, Create) {
set_up_map();
ASSERT_EQ(get_num_osds(), (unsigned)osdmap.get_max_osd());
ASSERT_EQ(get_num_osds(), osdmap.get_num_in_osds());
}
TEST_F(OSDMapTest, Features) {
// with EC pool
set_up_map();
uint64_t features = osdmap.get_features(CEPH_ENTITY_TYPE_OSD, NULL);
ASSERT_TRUE(features & CEPH_FEATURE_CRUSH_TUNABLES);
ASSERT_TRUE(features & CEPH_FEATURE_CRUSH_TUNABLES2);
ASSERT_TRUE(features & CEPH_FEATURE_CRUSH_TUNABLES3);
ASSERT_TRUE(features & CEPH_FEATURE_CRUSH_V2);
ASSERT_TRUE(features & CEPH_FEATURE_OSDHASHPSPOOL);
ASSERT_TRUE(features & CEPH_FEATURE_OSD_PRIMARY_AFFINITY);
// clients have a slightly different view
features = osdmap.get_features(CEPH_ENTITY_TYPE_CLIENT, NULL);
ASSERT_TRUE(features & CEPH_FEATURE_CRUSH_TUNABLES);
ASSERT_TRUE(features & CEPH_FEATURE_CRUSH_TUNABLES2);
ASSERT_TRUE(features & CEPH_FEATURE_CRUSH_TUNABLES3);
ASSERT_TRUE(features & CEPH_FEATURE_CRUSH_V2);
ASSERT_TRUE(features & CEPH_FEATURE_OSDHASHPSPOOL);
ASSERT_TRUE(features & CEPH_FEATURE_OSD_PRIMARY_AFFINITY);
// remove teh EC pool, but leave the rule. add primary affinity.
{
OSDMap::Incremental new_pool_inc(osdmap.get_epoch() + 1);
new_pool_inc.old_pools.insert(osdmap.lookup_pg_pool_name("ec"));
new_pool_inc.new_primary_affinity[0] = 0x8000;
osdmap.apply_incremental(new_pool_inc);
}
features = osdmap.get_features(CEPH_ENTITY_TYPE_MON, NULL);
ASSERT_TRUE(features & CEPH_FEATURE_CRUSH_TUNABLES);
ASSERT_TRUE(features & CEPH_FEATURE_CRUSH_TUNABLES2);
ASSERT_TRUE(features & CEPH_FEATURE_CRUSH_TUNABLES3); // shared bit with primary affinity
ASSERT_FALSE(features & CEPH_FEATURE_CRUSH_V2);
ASSERT_TRUE(features & CEPH_FEATURE_OSDHASHPSPOOL);
ASSERT_TRUE(features & CEPH_FEATURE_OSD_PRIMARY_AFFINITY);
// FIXME: test tiering feature bits
}
TEST_F(OSDMapTest, MapPG) {
set_up_map();
std::cerr << " osdmap.pool_max==" << osdmap.get_pool_max() << std::endl;
pg_t rawpg(0, my_rep_pool);
pg_t pgid = osdmap.raw_pg_to_pg(rawpg);
vector<int> up_osds, acting_osds;
int up_primary, acting_primary;
osdmap.pg_to_up_acting_osds(pgid, &up_osds, &up_primary,
&acting_osds, &acting_primary);
vector<int> old_up_osds, old_acting_osds;
osdmap.pg_to_up_acting_osds(pgid, old_up_osds, old_acting_osds);
ASSERT_EQ(old_up_osds, up_osds);
ASSERT_EQ(old_acting_osds, acting_osds);
ASSERT_EQ(osdmap.get_pg_pool(my_rep_pool)->get_size(), up_osds.size());
}
TEST_F(OSDMapTest, MapFunctionsMatch) {
// TODO: make sure pg_to_up_acting_osds and pg_to_acting_osds match
set_up_map();
pg_t rawpg(0, my_rep_pool);
pg_t pgid = osdmap.raw_pg_to_pg(rawpg);
vector<int> up_osds, acting_osds;
int up_primary, acting_primary;
osdmap.pg_to_up_acting_osds(pgid, &up_osds, &up_primary,
&acting_osds, &acting_primary);
vector<int> up_osds_two, acting_osds_two;
osdmap.pg_to_up_acting_osds(pgid, up_osds_two, acting_osds_two);
ASSERT_EQ(up_osds, up_osds_two);
ASSERT_EQ(acting_osds, acting_osds_two);
int acting_primary_two;
osdmap.pg_to_acting_osds(pgid, &acting_osds_two, &acting_primary_two);
EXPECT_EQ(acting_osds, acting_osds_two);
EXPECT_EQ(acting_primary, acting_primary_two);
osdmap.pg_to_acting_osds(pgid, acting_osds_two);
EXPECT_EQ(acting_osds, acting_osds_two);
}
/** This test must be removed or modified appropriately when we allow
* other ways to specify a primary. */
TEST_F(OSDMapTest, PrimaryIsFirst) {
set_up_map();
pg_t rawpg(0, my_rep_pool);
pg_t pgid = osdmap.raw_pg_to_pg(rawpg);
vector<int> up_osds, acting_osds;
int up_primary, acting_primary;
osdmap.pg_to_up_acting_osds(pgid, &up_osds, &up_primary,
&acting_osds, &acting_primary);
EXPECT_EQ(up_osds[0], up_primary);
EXPECT_EQ(acting_osds[0], acting_primary);
}
TEST_F(OSDMapTest, PGTempRespected) {
set_up_map();
pg_t rawpg(0, my_rep_pool);
pg_t pgid = osdmap.raw_pg_to_pg(rawpg);
vector<int> up_osds, acting_osds;
int up_primary, acting_primary;
osdmap.pg_to_up_acting_osds(pgid, &up_osds, &up_primary,
&acting_osds, &acting_primary);
// copy and swap first and last element in acting_osds
vector<int> new_acting_osds(acting_osds);
int first = new_acting_osds[0];
new_acting_osds[0] = *new_acting_osds.rbegin();
*new_acting_osds.rbegin() = first;
// apply pg_temp to osdmap
OSDMap::Incremental pgtemp_map(osdmap.get_epoch() + 1);
pgtemp_map.new_pg_temp[pgid] = mempool::osdmap::vector<int>(
new_acting_osds.begin(), new_acting_osds.end());
osdmap.apply_incremental(pgtemp_map);
osdmap.pg_to_up_acting_osds(pgid, &up_osds, &up_primary,
&acting_osds, &acting_primary);
EXPECT_EQ(new_acting_osds, acting_osds);
}
TEST_F(OSDMapTest, PrimaryTempRespected) {
set_up_map();
pg_t rawpg(0, my_rep_pool);
pg_t pgid = osdmap.raw_pg_to_pg(rawpg);
vector<int> up_osds;
vector<int> acting_osds;
int up_primary, acting_primary;
osdmap.pg_to_up_acting_osds(pgid, &up_osds, &up_primary,
&acting_osds, &acting_primary);
// make second OSD primary via incremental
OSDMap::Incremental pgtemp_map(osdmap.get_epoch() + 1);
pgtemp_map.new_primary_temp[pgid] = acting_osds[1];
osdmap.apply_incremental(pgtemp_map);
osdmap.pg_to_up_acting_osds(pgid, &up_osds, &up_primary,
&acting_osds, &acting_primary);
EXPECT_EQ(acting_primary, acting_osds[1]);
}
TEST_F(OSDMapTest, CleanTemps) {
set_up_map();
OSDMap::Incremental pgtemp_map(osdmap.get_epoch() + 1);
OSDMap::Incremental pending_inc(osdmap.get_epoch() + 2);
pg_t pga = osdmap.raw_pg_to_pg(pg_t(0, my_rep_pool));
{
vector<int> up_osds, acting_osds;
int up_primary, acting_primary;
osdmap.pg_to_up_acting_osds(pga, &up_osds, &up_primary,
&acting_osds, &acting_primary);
pgtemp_map.new_pg_temp[pga] = mempool::osdmap::vector<int>(
up_osds.begin(), up_osds.end());
pgtemp_map.new_primary_temp[pga] = up_primary;
}
pg_t pgb = osdmap.raw_pg_to_pg(pg_t(1, my_rep_pool));
{
vector<int> up_osds, acting_osds;
int up_primary, acting_primary;
osdmap.pg_to_up_acting_osds(pgb, &up_osds, &up_primary,
&acting_osds, &acting_primary);
pending_inc.new_pg_temp[pgb] = mempool::osdmap::vector<int>(
up_osds.begin(), up_osds.end());
pending_inc.new_primary_temp[pgb] = up_primary;
}
osdmap.apply_incremental(pgtemp_map);
OSDMap tmpmap;
tmpmap.deepish_copy_from(osdmap);
tmpmap.apply_incremental(pending_inc);
OSDMap::clean_temps(g_ceph_context, osdmap, tmpmap, &pending_inc);
EXPECT_TRUE(pending_inc.new_pg_temp.count(pga) &&
pending_inc.new_pg_temp[pga].size() == 0);
EXPECT_EQ(-1, pending_inc.new_primary_temp[pga]);
EXPECT_TRUE(!pending_inc.new_pg_temp.count(pgb) &&
!pending_inc.new_primary_temp.count(pgb));
}
TEST_F(OSDMapTest, KeepsNecessaryTemps) {
set_up_map();
pg_t rawpg(0, my_rep_pool);
pg_t pgid = osdmap.raw_pg_to_pg(rawpg);
vector<int> up_osds, acting_osds;
int up_primary, acting_primary;
osdmap.pg_to_up_acting_osds(pgid, &up_osds, &up_primary,
&acting_osds, &acting_primary);
// find unused OSD and stick it in there
OSDMap::Incremental pgtemp_map(osdmap.get_epoch() + 1);
// find an unused osd and put it in place of the first one
int i = 0;
for(; i != (int)get_num_osds(); ++i) {
bool in_use = false;
for (vector<int>::iterator osd_it = up_osds.begin();
osd_it != up_osds.end();
++osd_it) {
if (i == *osd_it) {
in_use = true;
break;
}
}
if (!in_use) {
up_osds[1] = i;
break;
}
}
if (i == (int)get_num_osds())
FAIL() << "did not find unused OSD for temp mapping";
pgtemp_map.new_pg_temp[pgid] = mempool::osdmap::vector<int>(
up_osds.begin(), up_osds.end());
pgtemp_map.new_primary_temp[pgid] = up_osds[1];
osdmap.apply_incremental(pgtemp_map);
OSDMap::Incremental pending_inc(osdmap.get_epoch() + 1);
OSDMap tmpmap;
tmpmap.deepish_copy_from(osdmap);
tmpmap.apply_incremental(pending_inc);
OSDMap::clean_temps(g_ceph_context, osdmap, tmpmap, &pending_inc);
EXPECT_FALSE(pending_inc.new_pg_temp.count(pgid));
EXPECT_FALSE(pending_inc.new_primary_temp.count(pgid));
}
TEST_F(OSDMapTest, PrimaryAffinity) {
set_up_map();
int n = get_num_osds();
for (map<int64_t,pg_pool_t>::const_iterator p = osdmap.get_pools().begin();
p != osdmap.get_pools().end();
++p) {
int pool = p->first;
int expect_primary = 10000 / n;
cout << "pool " << pool << " size " << (int)p->second.size
<< " expect_primary " << expect_primary << std::endl;
{
vector<int> any(n, 0);
vector<int> first(n, 0);
vector<int> primary(n, 0);
test_mappings(pool, 10000, &any, &first, &primary);
for (int i=0; i<n; ++i) {
ASSERT_LT(0, any[i]);
ASSERT_LT(0, first[i]);
ASSERT_LT(0, primary[i]);
}
}
osdmap.set_primary_affinity(0, 0);
osdmap.set_primary_affinity(1, 0);
{
vector<int> any(n, 0);
vector<int> first(n, 0);
vector<int> primary(n, 0);
test_mappings(pool, 10000, &any, &first, &primary);
for (int i=0; i<n; ++i) {
ASSERT_LT(0, any[i]);
if (i >= 2) {
ASSERT_LT(0, first[i]);
ASSERT_LT(0, primary[i]);
} else {
if (p->second.is_replicated()) {
ASSERT_EQ(0, first[i]);
}
ASSERT_EQ(0, primary[i]);
}
}
}
osdmap.set_primary_affinity(0, 0x8000);
osdmap.set_primary_affinity(1, 0);
{
vector<int> any(n, 0);
vector<int> first(n, 0);
vector<int> primary(n, 0);
test_mappings(pool, 10000, &any, &first, &primary);
int expect = (10000 / (n-2)) / 2; // half weight
cout << "expect " << expect << std::endl;
for (int i=0; i<n; ++i) {
ASSERT_LT(0, any[i]);
if (i >= 2) {
ASSERT_LT(0, first[i]);
ASSERT_LT(0, primary[i]);
} else if (i == 1) {
if (p->second.is_replicated()) {
ASSERT_EQ(0, first[i]);
}
ASSERT_EQ(0, primary[i]);
} else {
ASSERT_LT(expect *2/3, primary[0]);
ASSERT_GT(expect *4/3, primary[0]);
}
}
}
osdmap.set_primary_affinity(0, 0x10000);
osdmap.set_primary_affinity(1, 0x10000);
}
}
TEST_F(OSDMapTest, get_osd_crush_node_flags) {
set_up_map();
for (unsigned i=0; i<get_num_osds(); ++i) {
ASSERT_EQ(0u, osdmap.get_osd_crush_node_flags(i));
}
OSDMap::Incremental inc(osdmap.get_epoch() + 1);
inc.new_crush_node_flags[-1] = 123u;
osdmap.apply_incremental(inc);
for (unsigned i=0; i<get_num_osds(); ++i) {
ASSERT_EQ(123u, osdmap.get_osd_crush_node_flags(i));
}
ASSERT_EQ(0u, osdmap.get_osd_crush_node_flags(1000));
OSDMap::Incremental inc3(osdmap.get_epoch() + 1);
inc3.new_crush_node_flags[-1] = 456u;
osdmap.apply_incremental(inc3);
for (unsigned i=0; i<get_num_osds(); ++i) {
ASSERT_EQ(456u, osdmap.get_osd_crush_node_flags(i));
}
ASSERT_EQ(0u, osdmap.get_osd_crush_node_flags(1000));
OSDMap::Incremental inc2(osdmap.get_epoch() + 1);
inc2.new_crush_node_flags[-1] = 0;
osdmap.apply_incremental(inc2);
for (unsigned i=0; i<get_num_osds(); ++i) {
ASSERT_EQ(0u, osdmap.get_crush_node_flags(i));
}
}
TEST_F(OSDMapTest, parse_osd_id_list) {
set_up_map();
set<int> out;
set<int> all;
osdmap.get_all_osds(all);
ASSERT_EQ(0, osdmap.parse_osd_id_list({"osd.0"}, &out, &cout));
ASSERT_EQ(1u, out.size());
ASSERT_EQ(0, *out.begin());
ASSERT_EQ(0, osdmap.parse_osd_id_list({"1"}, &out, &cout));
ASSERT_EQ(1u, out.size());
ASSERT_EQ(1, *out.begin());
ASSERT_EQ(0, osdmap.parse_osd_id_list({"osd.0","osd.1"}, &out, &cout));
ASSERT_EQ(2u, out.size());
ASSERT_EQ(0, *out.begin());
ASSERT_EQ(1, *out.rbegin());
ASSERT_EQ(0, osdmap.parse_osd_id_list({"osd.0","1"}, &out, &cout));
ASSERT_EQ(2u, out.size());
ASSERT_EQ(0, *out.begin());
ASSERT_EQ(1, *out.rbegin());
ASSERT_EQ(0, osdmap.parse_osd_id_list({"*"}, &out, &cout));
ASSERT_EQ(all.size(), out.size());
ASSERT_EQ(all, out);
ASSERT_EQ(0, osdmap.parse_osd_id_list({"all"}, &out, &cout));
ASSERT_EQ(all, out);
ASSERT_EQ(0, osdmap.parse_osd_id_list({"any"}, &out, &cout));
ASSERT_EQ(all, out);
ASSERT_EQ(-EINVAL, osdmap.parse_osd_id_list({"foo"}, &out, &cout));
ASSERT_EQ(-EINVAL, osdmap.parse_osd_id_list({"-12"}, &out, &cout));
}
TEST_F(OSDMapTest, CleanPGUpmaps) {
set_up_map();
// build a crush rule of type host
const int expected_host_num = 3;
int osd_per_host = get_num_osds() / expected_host_num;
ASSERT_GE(2, osd_per_host);
int index = 0;
for (int i = 0; i < (int)get_num_osds(); i++) {
if (i && i % osd_per_host == 0) {
++index;
}
stringstream osd_name;
stringstream host_name;
vector<string> move_to;
osd_name << "osd." << i;
host_name << "host-" << index;
move_to.push_back("root=default");
string host_loc = "host=" + host_name.str();
move_to.push_back(host_loc);
int r = crush_move(osdmap, osd_name.str(), move_to);
ASSERT_EQ(0, r);
}
const string upmap_rule = "upmap";
int upmap_rule_no = crush_rule_create_replicated(
upmap_rule, "default", "host");
ASSERT_LT(0, upmap_rule_no);
// create a replicated pool which references the above rule
OSDMap::Incremental new_pool_inc(osdmap.get_epoch() + 1);
new_pool_inc.new_pool_max = osdmap.get_pool_max();
new_pool_inc.fsid = osdmap.get_fsid();
pg_pool_t empty;
uint64_t upmap_pool_id = ++new_pool_inc.new_pool_max;
pg_pool_t *p = new_pool_inc.get_new_pool(upmap_pool_id, &empty);
p->size = 2;
p->set_pg_num(64);
p->set_pgp_num(64);
p->type = pg_pool_t::TYPE_REPLICATED;
p->crush_rule = upmap_rule_no;
p->set_flag(pg_pool_t::FLAG_HASHPSPOOL);
new_pool_inc.new_pool_names[upmap_pool_id] = "upmap_pool";
osdmap.apply_incremental(new_pool_inc);
pg_t rawpg(0, upmap_pool_id);
pg_t pgid = osdmap.raw_pg_to_pg(rawpg);
vector<int> up;
int up_primary;
osdmap.pg_to_raw_up(pgid, &up, &up_primary);
ASSERT_LT(1U, up.size());
{
// validate we won't have two OSDs from a same host
int parent_0 = osdmap.crush->get_parent_of_type(up[0],
osdmap.crush->get_type_id("host"));
int parent_1 = osdmap.crush->get_parent_of_type(up[1],
osdmap.crush->get_type_id("host"));
ASSERT_TRUE(parent_0 != parent_1);
}
{
// cancel stale upmaps
osdmap.pg_to_raw_up(pgid, &up, &up_primary);
int from = -1;
for (int i = 0; i < (int)get_num_osds(); i++) {
if (std::find(up.begin(), up.end(), i) == up.end()) {
from = i;
break;
}
}
ASSERT_TRUE(from >= 0);
int to = -1;
for (int i = 0; i < (int)get_num_osds(); i++) {
if (std::find(up.begin(), up.end(), i) == up.end() && i != from) {
to = i;
break;
}
}
ASSERT_TRUE(to >= 0);
vector<pair<int32_t,int32_t>> new_pg_upmap_items;
new_pg_upmap_items.push_back(make_pair(from, to));
OSDMap::Incremental pending_inc(osdmap.get_epoch() + 1);
pending_inc.new_pg_upmap_items[pgid] =
mempool::osdmap::vector<pair<int32_t,int32_t>>(
new_pg_upmap_items.begin(), new_pg_upmap_items.end());
OSDMap nextmap;
nextmap.deepish_copy_from(osdmap);
nextmap.apply_incremental(pending_inc);
ASSERT_TRUE(nextmap.have_pg_upmaps(pgid));
OSDMap::Incremental new_pending_inc(nextmap.get_epoch() + 1);
clean_pg_upmaps(g_ceph_context, nextmap, new_pending_inc);
nextmap.apply_incremental(new_pending_inc);
ASSERT_TRUE(!nextmap.have_pg_upmaps(pgid));
}
{
// https://tracker.ceph.com/issues/37493
pg_t ec_pg(0, my_ec_pool);
pg_t ec_pgid = osdmap.raw_pg_to_pg(ec_pg);
OSDMap tmpmap; // use a tmpmap here, so we do not dirty origin map..
int from = -1;
int to = -1;
{
// insert a valid pg_upmap_item
vector<int> ec_up;
int ec_up_primary;
osdmap.pg_to_raw_up(ec_pgid, &ec_up, &ec_up_primary);
ASSERT_TRUE(!ec_up.empty());
from = *(ec_up.begin());
ASSERT_TRUE(from >= 0);
for (int i = 0; i < (int)get_num_osds(); i++) {
if (std::find(ec_up.begin(), ec_up.end(), i) == ec_up.end()) {
to = i;
break;
}
}
ASSERT_TRUE(to >= 0);
ASSERT_TRUE(from != to);
vector<pair<int32_t,int32_t>> new_pg_upmap_items;
new_pg_upmap_items.push_back(make_pair(from, to));
OSDMap::Incremental pending_inc(osdmap.get_epoch() + 1);
pending_inc.new_pg_upmap_items[ec_pgid] =
mempool::osdmap::vector<pair<int32_t,int32_t>>(
new_pg_upmap_items.begin(), new_pg_upmap_items.end());
tmpmap.deepish_copy_from(osdmap);
tmpmap.apply_incremental(pending_inc);
ASSERT_TRUE(tmpmap.have_pg_upmaps(ec_pgid));
}
{
// mark one of the target OSDs of the above pg_upmap_item as down
OSDMap::Incremental pending_inc(tmpmap.get_epoch() + 1);
pending_inc.new_state[to] = CEPH_OSD_UP;
tmpmap.apply_incremental(pending_inc);
ASSERT_TRUE(!tmpmap.is_up(to));
ASSERT_TRUE(tmpmap.have_pg_upmaps(ec_pgid));
}
{
// confirm *clean_pg_upmaps* won't do anything bad
OSDMap::Incremental pending_inc(tmpmap.get_epoch() + 1);
clean_pg_upmaps(g_ceph_context, tmpmap, pending_inc);
tmpmap.apply_incremental(pending_inc);
ASSERT_TRUE(tmpmap.have_pg_upmaps(ec_pgid));
}
}
{
// http://tracker.ceph.com/issues/37501
pg_t ec_pg(0, my_ec_pool);
pg_t ec_pgid = osdmap.raw_pg_to_pg(ec_pg);
OSDMap tmpmap; // use a tmpmap here, so we do not dirty origin map..
int from = -1;
int to = -1;
{
// insert a valid pg_upmap_item
vector<int> ec_up;
int ec_up_primary;
osdmap.pg_to_raw_up(ec_pgid, &ec_up, &ec_up_primary);
ASSERT_TRUE(!ec_up.empty());
from = *(ec_up.begin());
ASSERT_TRUE(from >= 0);
for (int i = 0; i < (int)get_num_osds(); i++) {
if (std::find(ec_up.begin(), ec_up.end(), i) == ec_up.end()) {
to = i;
break;
}
}
ASSERT_TRUE(to >= 0);
ASSERT_TRUE(from != to);
vector<pair<int32_t,int32_t>> new_pg_upmap_items;
new_pg_upmap_items.push_back(make_pair(from, to));
OSDMap::Incremental pending_inc(osdmap.get_epoch() + 1);
pending_inc.new_pg_upmap_items[ec_pgid] =
mempool::osdmap::vector<pair<int32_t,int32_t>>(
new_pg_upmap_items.begin(), new_pg_upmap_items.end());
tmpmap.deepish_copy_from(osdmap);
tmpmap.apply_incremental(pending_inc);
ASSERT_TRUE(tmpmap.have_pg_upmaps(ec_pgid));
}
{
// mark one of the target OSDs of the above pg_upmap_item as out
OSDMap::Incremental pending_inc(tmpmap.get_epoch() + 1);
pending_inc.new_weight[to] = CEPH_OSD_OUT;
tmpmap.apply_incremental(pending_inc);
ASSERT_TRUE(tmpmap.is_out(to));
ASSERT_TRUE(tmpmap.have_pg_upmaps(ec_pgid));
}
{
// *clean_pg_upmaps* should be able to remove the above *bad* mapping
OSDMap::Incremental pending_inc(tmpmap.get_epoch() + 1);
clean_pg_upmaps(g_ceph_context, tmpmap, pending_inc);
tmpmap.apply_incremental(pending_inc);
ASSERT_TRUE(!tmpmap.have_pg_upmaps(ec_pgid));
}
}
{
// http://tracker.ceph.com/issues/37968
// build a temporary crush topology of 2 hosts, 3 osds per host
OSDMap tmp; // use a tmpmap here, so we do not dirty origin map..
tmp.deepish_copy_from(osdmap);
const int expected_host_num = 2;
int osd_per_host = get_num_osds() / expected_host_num;
ASSERT_GE(osd_per_host, 3);
int index = 0;
for (int i = 0; i < (int)get_num_osds(); i++) {
if (i && i % osd_per_host == 0) {
++index;
}
stringstream osd_name;
stringstream host_name;
vector<string> move_to;
osd_name << "osd." << i;
host_name << "host-" << index;
move_to.push_back("root=default");
string host_loc = "host=" + host_name.str();
move_to.push_back(host_loc);
auto r = crush_move(tmp, osd_name.str(), move_to);
ASSERT_EQ(0, r);
}
// build crush rule
CrushWrapper crush;
get_crush(tmp, crush);
string rule_name = "rule_37968";
int rule_type = pg_pool_t::TYPE_ERASURE;
ASSERT_TRUE(!crush.rule_exists(rule_name));
int rno;
for (rno = 0; rno < crush.get_max_rules(); rno++) {
if (!crush.rule_exists(rno))
break;
}
string root_name = "default";
int root = crush.get_item_id(root_name);
int steps = 6;
crush_rule *rule = crush_make_rule(steps, rule_type);
int step = 0;
crush_rule_set_step(rule, step++, CRUSH_RULE_SET_CHOOSELEAF_TRIES, 5, 0);
crush_rule_set_step(rule, step++, CRUSH_RULE_SET_CHOOSE_TRIES, 100, 0);
crush_rule_set_step(rule, step++, CRUSH_RULE_TAKE, root, 0);
crush_rule_set_step(rule, step++, CRUSH_RULE_CHOOSE_INDEP, 2, 1 /* host*/);
crush_rule_set_step(rule, step++, CRUSH_RULE_CHOOSE_INDEP, 2, 0 /* osd */);
crush_rule_set_step(rule, step++, CRUSH_RULE_EMIT, 0, 0);
ASSERT_TRUE(step == steps);
auto r = crush_add_rule(crush.get_crush_map(), rule, rno);
ASSERT_TRUE(r >= 0);
crush.set_rule_name(rno, rule_name);
{
OSDMap::Incremental pending_inc(tmp.get_epoch() + 1);
pending_inc.crush.clear();
crush.encode(pending_inc.crush, CEPH_FEATURES_SUPPORTED_DEFAULT);
tmp.apply_incremental(pending_inc);
}
// create a erasuce-coded pool referencing the above rule
int64_t pool_37968;
{
OSDMap::Incremental new_pool_inc(tmp.get_epoch() + 1);
new_pool_inc.new_pool_max = tmp.get_pool_max();
new_pool_inc.fsid = tmp.get_fsid();
pg_pool_t empty;
pool_37968 = ++new_pool_inc.new_pool_max;
pg_pool_t *p = new_pool_inc.get_new_pool(pool_37968, &empty);
p->size = 4;
p->set_pg_num(8);
p->set_pgp_num(8);
p->type = pg_pool_t::TYPE_ERASURE;
p->crush_rule = rno;
p->set_flag(pg_pool_t::FLAG_HASHPSPOOL);
new_pool_inc.new_pool_names[pool_37968] = "pool_37968";
tmp.apply_incremental(new_pool_inc);
}
pg_t ec_pg(0, pool_37968);
pg_t ec_pgid = tmp.raw_pg_to_pg(ec_pg);
int from = -1;
int to = -1;
{
// insert a valid pg_upmap_item
vector<int> ec_up;
int ec_up_primary;
tmp.pg_to_raw_up(ec_pgid, &ec_up, &ec_up_primary);
ASSERT_TRUE(ec_up.size() == 4);
from = *(ec_up.begin());
ASSERT_TRUE(from >= 0);
auto parent = tmp.crush->get_parent_of_type(from, 1 /* host */, rno);
ASSERT_TRUE(parent < 0);
// pick an osd of the same parent with *from*
for (int i = 0; i < (int)get_num_osds(); i++) {
if (std::find(ec_up.begin(), ec_up.end(), i) == ec_up.end()) {
auto p = tmp.crush->get_parent_of_type(i, 1 /* host */, rno);
if (p == parent) {
to = i;
break;
}
}
}
ASSERT_TRUE(to >= 0);
ASSERT_TRUE(from != to);
vector<pair<int32_t,int32_t>> new_pg_upmap_items;
new_pg_upmap_items.push_back(make_pair(from, to));
OSDMap::Incremental pending_inc(tmp.get_epoch() + 1);
pending_inc.new_pg_upmap_items[ec_pgid] =
mempool::osdmap::vector<pair<int32_t,int32_t>>(
new_pg_upmap_items.begin(), new_pg_upmap_items.end());
tmp.apply_incremental(pending_inc);
ASSERT_TRUE(tmp.have_pg_upmaps(ec_pgid));
}
{
// *clean_pg_upmaps* should not remove the above upmap_item
OSDMap::Incremental pending_inc(tmp.get_epoch() + 1);
clean_pg_upmaps(g_ceph_context, tmp, pending_inc);
tmp.apply_incremental(pending_inc);
ASSERT_TRUE(tmp.have_pg_upmaps(ec_pgid));
}
}
{
// TEST pg_upmap
{
// STEP-1: enumerate all children of up[0]'s parent,
// replace up[1] with one of them (other than up[0])
int parent = osdmap.crush->get_parent_of_type(up[0],
osdmap.crush->get_type_id("host"));
set<int> candidates;
osdmap.crush->get_leaves(osdmap.crush->get_item_name(parent), &candidates);
ASSERT_LT(1U, candidates.size());
int replaced_by = -1;
for (auto c: candidates) {
if (c != up[0]) {
replaced_by = c;
break;
}
}
{
// Check we can handle a negative pg_upmap value
vector<int32_t> new_pg_upmap;
new_pg_upmap.push_back(up[0]);
new_pg_upmap.push_back(-823648512);
OSDMap::Incremental pending_inc(osdmap.get_epoch() + 1);
pending_inc.new_pg_upmap[pgid] = mempool::osdmap::vector<int32_t>(
new_pg_upmap.begin(), new_pg_upmap.end());
osdmap.apply_incremental(pending_inc);
vector<int> new_up;
int new_up_primary;
// crucial call - _apply_upmap should ignore the negative value
osdmap.pg_to_raw_up(pgid, &new_up, &new_up_primary);
}
ASSERT_NE(-1, replaced_by);
// generate a new pg_upmap item and apply
vector<int32_t> new_pg_upmap;
new_pg_upmap.push_back(up[0]);
new_pg_upmap.push_back(replaced_by); // up[1] -> replaced_by
OSDMap::Incremental pending_inc(osdmap.get_epoch() + 1);
pending_inc.new_pg_upmap[pgid] = mempool::osdmap::vector<int32_t>(
new_pg_upmap.begin(), new_pg_upmap.end());
osdmap.apply_incremental(pending_inc);
{
// validate pg_upmap is there
vector<int> new_up;
int new_up_primary;
osdmap.pg_to_raw_up(pgid, &new_up, &new_up_primary);
ASSERT_EQ(new_up.size(), up.size());
ASSERT_EQ(new_up[0], new_pg_upmap[0]);
ASSERT_EQ(new_up[1], new_pg_upmap[1]);
// and we shall have two OSDs from a same host now..
int parent_0 = osdmap.crush->get_parent_of_type(new_up[0],
osdmap.crush->get_type_id("host"));
int parent_1 = osdmap.crush->get_parent_of_type(new_up[1],
osdmap.crush->get_type_id("host"));
ASSERT_EQ(parent_0, parent_1);
}
}
{
// STEP-2: apply cure
OSDMap::Incremental pending_inc(osdmap.get_epoch() + 1);
clean_pg_upmaps(g_ceph_context, osdmap, pending_inc);
osdmap.apply_incremental(pending_inc);
{
// validate pg_upmap is gone (reverted)
vector<int> new_up;
int new_up_primary;
osdmap.pg_to_raw_up(pgid, &new_up, &new_up_primary);
ASSERT_EQ(new_up, up);
ASSERT_EQ(new_up_primary, up_primary);
}
}
}
{
// TEST pg_upmap_items
// enumerate all used hosts first
set<int> parents;
for (auto u: up) {
int parent = osdmap.crush->get_parent_of_type(u,
osdmap.crush->get_type_id("host"));
ASSERT_GT(0, parent);
parents.insert(parent);
}
int candidate_parent = 0;
set<int> candidate_children;
vector<int> up_after_out;
{
// STEP-1: try mark out up[1] and all other OSDs from the same host
int parent = osdmap.crush->get_parent_of_type(up[1],
osdmap.crush->get_type_id("host"));
set<int> children;
osdmap.crush->get_leaves(osdmap.crush->get_item_name(parent),
&children);
OSDMap::Incremental pending_inc(osdmap.get_epoch() + 1);
for (auto c: children) {
pending_inc.new_weight[c] = CEPH_OSD_OUT;
}
OSDMap tmpmap;
tmpmap.deepish_copy_from(osdmap);
tmpmap.apply_incremental(pending_inc);
vector<int> new_up;
int new_up_primary;
tmpmap.pg_to_raw_up(pgid, &new_up, &new_up_primary);
// verify that we'll have OSDs from a different host..
int will_choose = -1;
for (auto o: new_up) {
int parent = tmpmap.crush->get_parent_of_type(o,
osdmap.crush->get_type_id("host"));
if (!parents.count(parent)) {
will_choose = o;
candidate_parent = parent; // record
break;
}
}
ASSERT_LT(-1, will_choose); // it is an OSD!
ASSERT_NE(candidate_parent, 0);
osdmap.crush->get_leaves(osdmap.crush->get_item_name(candidate_parent),
&candidate_children);
ASSERT_TRUE(candidate_children.count(will_choose));
candidate_children.erase(will_choose);
ASSERT_FALSE(candidate_children.empty());
up_after_out = new_up; // needed for verification..
}
{
// Make sure we can handle a negative pg_upmap_item
int victim = up[0];
int replaced_by = -823648512;
vector<pair<int32_t,int32_t>> new_pg_upmap_items;
new_pg_upmap_items.push_back(make_pair(victim, replaced_by));
// apply
OSDMap::Incremental pending_inc(osdmap.get_epoch() + 1);
pending_inc.new_pg_upmap_items[pgid] =
mempool::osdmap::vector<pair<int32_t,int32_t>>(
new_pg_upmap_items.begin(), new_pg_upmap_items.end());
osdmap.apply_incremental(pending_inc);
vector<int> new_up;
int new_up_primary;
// crucial call - _apply_upmap should ignore the negative value
osdmap.pg_to_raw_up(pgid, &new_up, &new_up_primary);
}
{
// STEP-2: generating a new pg_upmap_items entry by
// replacing up[0] with one coming from candidate_children
int victim = up[0];
int replaced_by = *candidate_children.begin();
vector<pair<int32_t,int32_t>> new_pg_upmap_items;
new_pg_upmap_items.push_back(make_pair(victim, replaced_by));
// apply
OSDMap::Incremental pending_inc(osdmap.get_epoch() + 1);
pending_inc.new_pg_upmap_items[pgid] =
mempool::osdmap::vector<pair<int32_t,int32_t>>(
new_pg_upmap_items.begin(), new_pg_upmap_items.end());
osdmap.apply_incremental(pending_inc);
{
// validate pg_upmap_items is there
vector<int> new_up;
int new_up_primary;
osdmap.pg_to_raw_up(pgid, &new_up, &new_up_primary);
ASSERT_EQ(new_up.size(), up.size());
ASSERT_TRUE(std::find(new_up.begin(), new_up.end(), replaced_by) !=
new_up.end());
// and up[1] too
ASSERT_TRUE(std::find(new_up.begin(), new_up.end(), up[1]) !=
new_up.end());
}
}
{
// STEP-3: mark out up[1] and all other OSDs from the same host
int parent = osdmap.crush->get_parent_of_type(up[1],
osdmap.crush->get_type_id("host"));
set<int> children;
osdmap.crush->get_leaves(osdmap.crush->get_item_name(parent),
&children);
OSDMap::Incremental pending_inc(osdmap.get_epoch() + 1);
for (auto c: children) {
pending_inc.new_weight[c] = CEPH_OSD_OUT;
}
osdmap.apply_incremental(pending_inc);
{
// validate we have two OSDs from the same host now..
vector<int> new_up;
int new_up_primary;
osdmap.pg_to_raw_up(pgid, &new_up, &new_up_primary);
ASSERT_EQ(up.size(), new_up.size());
int parent_0 = osdmap.crush->get_parent_of_type(new_up[0],
osdmap.crush->get_type_id("host"));
int parent_1 = osdmap.crush->get_parent_of_type(new_up[1],
osdmap.crush->get_type_id("host"));
ASSERT_EQ(parent_0, parent_1);
}
}
{
// STEP-4: apply cure
OSDMap::Incremental pending_inc(osdmap.get_epoch() + 1);
clean_pg_upmaps(g_ceph_context, osdmap, pending_inc);
osdmap.apply_incremental(pending_inc);
{
// validate pg_upmap_items is gone (reverted)
vector<int> new_up;
int new_up_primary;
osdmap.pg_to_raw_up(pgid, &new_up, &new_up_primary);
ASSERT_EQ(new_up, up_after_out);
}
}
}
}
TEST_F(OSDMapTest, BUG_38897) {
// http://tracker.ceph.com/issues/38897
// build a fresh map with 12 OSDs, without any default pools
set_up_map(12, true);
const string pool_1("pool1");
const string pool_2("pool2");
int64_t pool_1_id = -1;
{
// build customized crush rule for "pool1"
string host_name = "host_for_pool_1";
// build a customized host to capture osd.1~5
for (int i = 1; i < 5; i++) {
stringstream osd_name;
vector<string> move_to;
osd_name << "osd." << i;
move_to.push_back("root=default");
string host_loc = "host=" + host_name;
move_to.push_back(host_loc);
auto r = crush_move(osdmap, osd_name.str(), move_to);
ASSERT_EQ(0, r);
}
CrushWrapper crush;
get_crush(osdmap, crush);
auto host_id = crush.get_item_id(host_name);
ASSERT_TRUE(host_id < 0);
string rule_name = "rule_for_pool1";
int rule_type = pg_pool_t::TYPE_REPLICATED;
ASSERT_TRUE(!crush.rule_exists(rule_name));
int rno;
for (rno = 0; rno < crush.get_max_rules(); rno++) {
if (!crush.rule_exists(rno))
break;
}
int steps = 7;
crush_rule *rule = crush_make_rule(steps, rule_type);
int step = 0;
crush_rule_set_step(rule, step++, CRUSH_RULE_SET_CHOOSELEAF_TRIES, 5, 0);
crush_rule_set_step(rule, step++, CRUSH_RULE_SET_CHOOSE_TRIES, 100, 0);
// always choose osd.0
crush_rule_set_step(rule, step++, CRUSH_RULE_TAKE, 0, 0);
crush_rule_set_step(rule, step++, CRUSH_RULE_EMIT, 0, 0);
// then pick any other random osds
crush_rule_set_step(rule, step++, CRUSH_RULE_TAKE, host_id, 0);
crush_rule_set_step(rule, step++, CRUSH_RULE_CHOOSELEAF_FIRSTN, 2, 0);
crush_rule_set_step(rule, step++, CRUSH_RULE_EMIT, 0, 0);
ASSERT_TRUE(step == steps);
auto r = crush_add_rule(crush.get_crush_map(), rule, rno);
ASSERT_TRUE(r >= 0);
crush.set_rule_name(rno, rule_name);
{
OSDMap::Incremental pending_inc(osdmap.get_epoch() + 1);
pending_inc.crush.clear();
crush.encode(pending_inc.crush, CEPH_FEATURES_SUPPORTED_DEFAULT);
osdmap.apply_incremental(pending_inc);
}
// create "pool1"
OSDMap::Incremental pending_inc(osdmap.get_epoch() + 1);
pending_inc.new_pool_max = osdmap.get_pool_max();
auto pool_id = ++pending_inc.new_pool_max;
pool_1_id = pool_id;
pg_pool_t empty;
auto p = pending_inc.get_new_pool(pool_id, &empty);
p->size = 3;
p->min_size = 1;
p->set_pg_num(3);
p->set_pgp_num(3);
p->type = pg_pool_t::TYPE_REPLICATED;
p->crush_rule = rno;
p->set_flag(pg_pool_t::FLAG_HASHPSPOOL);
pending_inc.new_pool_names[pool_id] = pool_1;
osdmap.apply_incremental(pending_inc);
ASSERT_TRUE(osdmap.have_pg_pool(pool_id));
ASSERT_TRUE(osdmap.get_pool_name(pool_id) == pool_1);
{
for (unsigned i = 0; i < 3; i++) {
// 1.x -> [1]
pg_t rawpg(i, pool_id);
pg_t pgid = osdmap.raw_pg_to_pg(rawpg);
vector<int> up;
int up_primary;
osdmap.pg_to_raw_up(pgid, &up, &up_primary);
ASSERT_TRUE(up.size() == 3);
ASSERT_TRUE(up[0] == 0);
// insert a new pg_upmap
vector<int32_t> new_up;
// and remap 1.x to osd.1 only
// this way osd.0 is deemed to be *underfull*
// and osd.1 is deemed to be *overfull*
new_up.push_back(1);
{
OSDMap::Incremental pending_inc(osdmap.get_epoch() + 1);
pending_inc.new_pg_upmap[pgid] = mempool::osdmap::vector<int32_t>(
new_up.begin(), new_up.end());
osdmap.apply_incremental(pending_inc);
}
osdmap.pg_to_raw_up(pgid, &up, &up_primary);
ASSERT_TRUE(up.size() == 1);
ASSERT_TRUE(up[0] == 1);
}
}
}
{
// build customized crush rule for "pool2"
string host_name = "host_for_pool_2";
// build a customized host to capture osd.6~11
for (int i = 6; i < (int)get_num_osds(); i++) {
stringstream osd_name;
vector<string> move_to;
osd_name << "osd." << i;
move_to.push_back("root=default");
string host_loc = "host=" + host_name;
move_to.push_back(host_loc);
auto r = crush_move(osdmap, osd_name.str(), move_to);
ASSERT_EQ(0, r);
}
CrushWrapper crush;
get_crush(osdmap, crush);
auto host_id = crush.get_item_id(host_name);
ASSERT_TRUE(host_id < 0);
string rule_name = "rule_for_pool2";
int rule_type = pg_pool_t::TYPE_REPLICATED;
ASSERT_TRUE(!crush.rule_exists(rule_name));
int rno;
for (rno = 0; rno < crush.get_max_rules(); rno++) {
if (!crush.rule_exists(rno))
break;
}
int steps = 7;
crush_rule *rule = crush_make_rule(steps, rule_type);
int step = 0;
crush_rule_set_step(rule, step++, CRUSH_RULE_SET_CHOOSELEAF_TRIES, 5, 0);
crush_rule_set_step(rule, step++, CRUSH_RULE_SET_CHOOSE_TRIES, 100, 0);
// always choose osd.0
crush_rule_set_step(rule, step++, CRUSH_RULE_TAKE, 0, 0);
crush_rule_set_step(rule, step++, CRUSH_RULE_EMIT, 0, 0);
// then pick any other random osds
crush_rule_set_step(rule, step++, CRUSH_RULE_TAKE, host_id, 0);
crush_rule_set_step(rule, step++, CRUSH_RULE_CHOOSELEAF_FIRSTN, 2, 0);
crush_rule_set_step(rule, step++, CRUSH_RULE_EMIT, 0, 0);
ASSERT_TRUE(step == steps);
auto r = crush_add_rule(crush.get_crush_map(), rule, rno);
ASSERT_TRUE(r >= 0);
crush.set_rule_name(rno, rule_name);
{
OSDMap::Incremental pending_inc(osdmap.get_epoch() + 1);
pending_inc.crush.clear();
crush.encode(pending_inc.crush, CEPH_FEATURES_SUPPORTED_DEFAULT);
osdmap.apply_incremental(pending_inc);
}
// create "pool2"
OSDMap::Incremental pending_inc(osdmap.get_epoch() + 1);
pending_inc.new_pool_max = osdmap.get_pool_max();
auto pool_id = ++pending_inc.new_pool_max;
pg_pool_t empty;
auto p = pending_inc.get_new_pool(pool_id, &empty);
p->size = 3;
// include a single PG
p->set_pg_num(1);
p->set_pgp_num(1);
p->type = pg_pool_t::TYPE_REPLICATED;
p->crush_rule = rno;
p->set_flag(pg_pool_t::FLAG_HASHPSPOOL);
pending_inc.new_pool_names[pool_id] = pool_2;
osdmap.apply_incremental(pending_inc);
ASSERT_TRUE(osdmap.have_pg_pool(pool_id));
ASSERT_TRUE(osdmap.get_pool_name(pool_id) == pool_2);
pg_t rawpg(0, pool_id);
pg_t pgid = osdmap.raw_pg_to_pg(rawpg);
EXPECT_TRUE(!osdmap.have_pg_upmaps(pgid));
vector<int> up;
int up_primary;
osdmap.pg_to_raw_up(pgid, &up, &up_primary);
ASSERT_TRUE(up.size() == 3);
ASSERT_TRUE(up[0] == 0);
{
// build a pg_upmap_item that will
// remap pg out from *underfull* osd.0
vector<pair<int32_t,int32_t>> new_pg_upmap_items;
new_pg_upmap_items.push_back(make_pair(0, 10)); // osd.0 -> osd.10
OSDMap::Incremental pending_inc(osdmap.get_epoch() + 1);
pending_inc.new_pg_upmap_items[pgid] =
mempool::osdmap::vector<pair<int32_t,int32_t>>(
new_pg_upmap_items.begin(), new_pg_upmap_items.end());
osdmap.apply_incremental(pending_inc);
ASSERT_TRUE(osdmap.have_pg_upmaps(pgid));
vector<int> up;
int up_primary;
osdmap.pg_to_raw_up(pgid, &up, &up_primary);
ASSERT_TRUE(up.size() == 3);
ASSERT_TRUE(up[0] == 10);
}
}
// ready to go
{
set<int64_t> only_pools;
ASSERT_TRUE(pool_1_id >= 0);
only_pools.insert(pool_1_id);
OSDMap::Incremental pending_inc(osdmap.get_epoch() + 1);
// require perfect distribution! (max deviation 0)
osdmap.calc_pg_upmaps(g_ceph_context,
0, // so we can force optimizing
100,
only_pools,
&pending_inc);
osdmap.apply_incremental(pending_inc);
}
}
TEST_F(OSDMapTest, BUG_40104) {
// http://tracker.ceph.com/issues/40104
int big_osd_num = 5000;
int big_pg_num = 10000;
set_up_map(big_osd_num, true);
int pool_id;
{
OSDMap::Incremental pending_inc(osdmap.get_epoch() + 1);
pending_inc.new_pool_max = osdmap.get_pool_max();
pool_id = ++pending_inc.new_pool_max;
pg_pool_t empty;
auto p = pending_inc.get_new_pool(pool_id, &empty);
p->size = 3;
p->min_size = 1;
p->set_pg_num(big_pg_num);
p->set_pgp_num(big_pg_num);
p->type = pg_pool_t::TYPE_REPLICATED;
p->crush_rule = 0;
p->set_flag(pg_pool_t::FLAG_HASHPSPOOL);
pending_inc.new_pool_names[pool_id] = "big_pool";
osdmap.apply_incremental(pending_inc);
ASSERT_TRUE(osdmap.have_pg_pool(pool_id));
ASSERT_TRUE(osdmap.get_pool_name(pool_id) == "big_pool");
}
{
// generate pg_upmap_items for each pg
OSDMap::Incremental pending_inc(osdmap.get_epoch() + 1);
for (int i = 0; i < big_pg_num; i++) {
pg_t rawpg(i, pool_id);
pg_t pgid = osdmap.raw_pg_to_pg(rawpg);
vector<int> up;
int up_primary;
osdmap.pg_to_raw_up(pgid, &up, &up_primary);
ASSERT_TRUE(up.size() == 3);
int victim = up[0];
int replaced_by = random() % big_osd_num;
vector<pair<int32_t,int32_t>> new_pg_upmap_items;
// note that it might or might not be valid, we don't care
new_pg_upmap_items.push_back(make_pair(victim, replaced_by));
pending_inc.new_pg_upmap_items[pgid] =
mempool::osdmap::vector<pair<int32_t,int32_t>>(
new_pg_upmap_items.begin(), new_pg_upmap_items.end());
}
osdmap.apply_incremental(pending_inc);
}
{
OSDMap::Incremental pending_inc(osdmap.get_epoch() + 1);
auto start = mono_clock::now();
clean_pg_upmaps(g_ceph_context, osdmap, pending_inc);
auto latency = mono_clock::now() - start;
std::cout << "clean_pg_upmaps (~" << big_pg_num
<< " pg_upmap_items) latency:" << timespan_str(latency)
<< std::endl;
}
}
TEST_F(OSDMapTest, BUG_42052) {
// https://tracker.ceph.com/issues/42052
set_up_map(6, true);
const string pool_name("pool");
// build customized crush rule for "pool"
CrushWrapper crush;
get_crush(osdmap, crush);
string rule_name = "rule";
int rule_type = pg_pool_t::TYPE_REPLICATED;
ASSERT_TRUE(!crush.rule_exists(rule_name));
int rno;
for (rno = 0; rno < crush.get_max_rules(); rno++) {
if (!crush.rule_exists(rno))
break;
}
int steps = 8;
crush_rule *rule = crush_make_rule(steps, rule_type);
int step = 0;
crush_rule_set_step(rule, step++, CRUSH_RULE_SET_CHOOSELEAF_TRIES, 5, 0);
crush_rule_set_step(rule, step++, CRUSH_RULE_SET_CHOOSE_TRIES, 100, 0);
// always choose osd.0, osd.1, osd.2
crush_rule_set_step(rule, step++, CRUSH_RULE_TAKE, 0, 0);
crush_rule_set_step(rule, step++, CRUSH_RULE_EMIT, 0, 0);
crush_rule_set_step(rule, step++, CRUSH_RULE_TAKE, 0, 1);
crush_rule_set_step(rule, step++, CRUSH_RULE_EMIT, 0, 0);
crush_rule_set_step(rule, step++, CRUSH_RULE_TAKE, 0, 2);
crush_rule_set_step(rule, step++, CRUSH_RULE_EMIT, 0, 0);
ASSERT_TRUE(step == steps);
auto r = crush_add_rule(crush.get_crush_map(), rule, rno);
ASSERT_TRUE(r >= 0);
crush.set_rule_name(rno, rule_name);
{
OSDMap::Incremental pending_inc(osdmap.get_epoch() + 1);
pending_inc.crush.clear();
crush.encode(pending_inc.crush, CEPH_FEATURES_SUPPORTED_DEFAULT);
osdmap.apply_incremental(pending_inc);
}
// create "pool"
OSDMap::Incremental pending_inc(osdmap.get_epoch() + 1);
pending_inc.new_pool_max = osdmap.get_pool_max();
auto pool_id = ++pending_inc.new_pool_max;
pg_pool_t empty;
auto p = pending_inc.get_new_pool(pool_id, &empty);
p->size = 3;
p->min_size = 1;
p->set_pg_num(1);
p->set_pgp_num(1);
p->type = pg_pool_t::TYPE_REPLICATED;
p->crush_rule = rno;
p->set_flag(pg_pool_t::FLAG_HASHPSPOOL);
pending_inc.new_pool_names[pool_id] = pool_name;
osdmap.apply_incremental(pending_inc);
ASSERT_TRUE(osdmap.have_pg_pool(pool_id));
ASSERT_TRUE(osdmap.get_pool_name(pool_id) == pool_name);
pg_t rawpg(0, pool_id);
pg_t pgid = osdmap.raw_pg_to_pg(rawpg);
{
// pg_upmap 1.0 [2,3,5]
vector<int32_t> new_up{2,3,5};
OSDMap::Incremental pending_inc(osdmap.get_epoch() + 1);
pending_inc.new_pg_upmap[pgid] = mempool::osdmap::vector<int32_t>(
new_up.begin(), new_up.end());
osdmap.apply_incremental(pending_inc);
}
{
// pg_upmap_items 1.0 [0,3,4,5]
vector<pair<int32_t,int32_t>> new_pg_upmap_items;
new_pg_upmap_items.push_back(make_pair(0, 3));
new_pg_upmap_items.push_back(make_pair(4, 5));
OSDMap::Incremental pending_inc(osdmap.get_epoch() + 1);
pending_inc.new_pg_upmap_items[pgid] =
mempool::osdmap::vector<pair<int32_t,int32_t>>(
new_pg_upmap_items.begin(), new_pg_upmap_items.end());
osdmap.apply_incremental(pending_inc);
}
{
OSDMap::Incremental pending_inc(osdmap.get_epoch() + 1);
clean_pg_upmaps(g_ceph_context, osdmap, pending_inc);
osdmap.apply_incremental(pending_inc);
ASSERT_FALSE(osdmap.have_pg_upmaps(pgid));
}
}
TEST_F(OSDMapTest, BUG_42485) {
set_up_map(60);
{
// build a temporary crush topology of 2datacenters, 3racks per dc,
// 1host per rack, 10osds per host
OSDMap tmp; // use a tmpmap here, so we do not dirty origin map..
tmp.deepish_copy_from(osdmap);
const int expected_host_num = 6;
int osd_per_host = (int)get_num_osds() / expected_host_num;
ASSERT_GE(osd_per_host, 10);
int host_per_dc = 3;
int index = 0;
int dc_index = 0;
for (int i = 0; i < (int)get_num_osds(); i++) {
if (i && i % osd_per_host == 0) {
++index;
}
if (i && i % (host_per_dc * osd_per_host) == 0) {
++dc_index;
}
stringstream osd_name;
stringstream host_name;
stringstream rack_name;
stringstream dc_name;
vector<string> move_to;
osd_name << "osd." << i;
host_name << "host-" << index;
rack_name << "rack-" << index;
dc_name << "dc-" << dc_index;
move_to.push_back("root=default");
string dc_loc = "datacenter=" + dc_name.str();
move_to.push_back(dc_loc);
string rack_loc = "rack=" + rack_name.str();
move_to.push_back(rack_loc);
string host_loc = "host=" + host_name.str();
move_to.push_back(host_loc);
auto r = crush_move(tmp, osd_name.str(), move_to);
ASSERT_EQ(0, r);
}
// build crush rule
CrushWrapper crush;
get_crush(tmp, crush);
string rule_name = "rule_xeus_993_1";
int rule_type = pg_pool_t::TYPE_REPLICATED;
ASSERT_TRUE(!crush.rule_exists(rule_name));
int rno;
for (rno = 0; rno < crush.get_max_rules(); rno++) {
if (!crush.rule_exists(rno))
break;
}
string root_name = "default";
string dc_1 = "dc-0";
int dc1 = crush.get_item_id(dc_1);
string dc_2 = "dc-1";
int dc2 = crush.get_item_id(dc_2);
int steps = 8;
crush_rule *rule = crush_make_rule(steps, rule_type);
int step = 0;
crush_rule_set_step(rule, step++, CRUSH_RULE_SET_CHOOSELEAF_TRIES, 5, 0);
crush_rule_set_step(rule, step++, CRUSH_RULE_SET_CHOOSE_TRIES, 100, 0);
crush_rule_set_step(rule, step++, CRUSH_RULE_TAKE, dc1, 0);
crush_rule_set_step(rule, step++, CRUSH_RULE_CHOOSELEAF_FIRSTN, 2, 3 /* rack */);
crush_rule_set_step(rule, step++, CRUSH_RULE_EMIT, 0, 0);
crush_rule_set_step(rule, step++, CRUSH_RULE_TAKE, dc2, 0);
crush_rule_set_step(rule, step++, CRUSH_RULE_CHOOSELEAF_FIRSTN, 2, 3 /* rack */);
crush_rule_set_step(rule, step++, CRUSH_RULE_EMIT, 0, 0);
ASSERT_TRUE(step == steps);
auto r = crush_add_rule(crush.get_crush_map(), rule, rno);
ASSERT_TRUE(r >= 0);
crush.set_rule_name(rno, rule_name);
{
OSDMap::Incremental pending_inc(tmp.get_epoch() + 1);
pending_inc.crush.clear();
crush.encode(pending_inc.crush, CEPH_FEATURES_SUPPORTED_DEFAULT);
tmp.apply_incremental(pending_inc);
}
// create a repliacted pool referencing the above rule
int64_t pool_xeus_993;
{
OSDMap::Incremental new_pool_inc(tmp.get_epoch() + 1);
new_pool_inc.new_pool_max = tmp.get_pool_max();
new_pool_inc.fsid = tmp.get_fsid();
pg_pool_t empty;
pool_xeus_993 = ++new_pool_inc.new_pool_max;
pg_pool_t *p = new_pool_inc.get_new_pool(pool_xeus_993, &empty);
p->size = 4;
p->set_pg_num(4096);
p->set_pgp_num(4096);
p->type = pg_pool_t::TYPE_REPLICATED;
p->crush_rule = rno;
p->set_flag(pg_pool_t::FLAG_HASHPSPOOL);
new_pool_inc.new_pool_names[pool_xeus_993] = "pool_xeus_993";
tmp.apply_incremental(new_pool_inc);
}
pg_t rep_pg(0, pool_xeus_993);
pg_t rep_pgid = tmp.raw_pg_to_pg(rep_pg);
{
int from = -1;
int to = -1;
vector<int> rep_up;
int rep_up_primary;
tmp.pg_to_raw_up(rep_pgid, &rep_up, &rep_up_primary);
std::cout << "pgid " << rep_up << " up " << rep_up << std::endl;
ASSERT_TRUE(rep_up.size() == 4);
from = *(rep_up.begin());
ASSERT_TRUE(from >= 0);
auto dc_parent = tmp.crush->get_parent_of_type(from, 8 /* dc */, rno);
if (dc_parent == dc1)
dc_parent = dc2;
else
dc_parent = dc1;
auto rack_parent = tmp.crush->get_parent_of_type(from, 3 /* rack */, rno);
ASSERT_TRUE(dc_parent < 0);
ASSERT_TRUE(rack_parent < 0);
set<int> rack_parents;
for (auto &i: rep_up) {
if (i == from) continue;
auto rack_parent = tmp.crush->get_parent_of_type(i, 3 /* rack */, rno);
rack_parents.insert(rack_parent);
}
for (int i = 0; i < (int)get_num_osds(); i++) {
if (std::find(rep_up.begin(), rep_up.end(), i) == rep_up.end()) {
auto dc_p = tmp.crush->get_parent_of_type(i, 8 /* dc */, rno);
auto rack_p = tmp.crush->get_parent_of_type(i, 3 /* rack */, rno);
if (dc_p == dc_parent &&
rack_parents.find(rack_p) == rack_parents.end()) {
to = i;
break;
}
}
}
ASSERT_TRUE(to >= 0);
ASSERT_TRUE(from != to);
std::cout << "from " << from << " to " << to << std::endl;
vector<pair<int32_t,int32_t>> new_pg_upmap_items;
new_pg_upmap_items.push_back(make_pair(from, to));
OSDMap::Incremental pending_inc(tmp.get_epoch() + 1);
pending_inc.new_pg_upmap_items[rep_pgid] =
mempool::osdmap::vector<pair<int32_t,int32_t>>(
new_pg_upmap_items.begin(), new_pg_upmap_items.end());
tmp.apply_incremental(pending_inc);
ASSERT_TRUE(tmp.have_pg_upmaps(rep_pgid));
}
pg_t rep_pg2(2, pool_xeus_993);
pg_t rep_pgid2 = tmp.raw_pg_to_pg(rep_pg2);
{
pg_t rep_pgid = rep_pgid2;
vector<int> from_osds{-1, -1};
vector<int> rep_up;
int rep_up_primary;
tmp.pg_to_raw_up(rep_pgid, &rep_up, &rep_up_primary);
ASSERT_TRUE(rep_up.size() == 4);
from_osds[0] = *(rep_up.begin());
from_osds[1] = *(rep_up.rbegin());
std::cout << "pgid " << rep_pgid2 << " up " << rep_up << std::endl;
ASSERT_TRUE(*(from_osds.begin()) >= 0);
ASSERT_TRUE(*(from_osds.rbegin()) >= 0);
vector<pair<int32_t,int32_t>> new_pg_upmap_items;
for (auto &from: from_osds) {
int to = -1;
auto dc_parent = tmp.crush->get_parent_of_type(from, 8 /* dc */, rno);
if (dc_parent == dc1)
dc_parent = dc2;
else
dc_parent = dc1;
auto rack_parent = tmp.crush->get_parent_of_type(from, 3 /* rack */, rno);
ASSERT_TRUE(dc_parent < 0);
ASSERT_TRUE(rack_parent < 0);
set<int> rack_parents;
for (auto &i: rep_up) {
if (i == from) continue;
auto rack_parent = tmp.crush->get_parent_of_type(i, 3 /* rack */, rno);
rack_parents.insert(rack_parent);
}
for (auto &i: new_pg_upmap_items) {
auto rack_from = tmp.crush->get_parent_of_type(i.first, 3, rno);
auto rack_to = tmp.crush->get_parent_of_type(i.second, 3, rno);
rack_parents.insert(rack_from);
rack_parents.insert(rack_to);
}
for (int i = 0; i < (int)get_num_osds(); i++) {
if (std::find(rep_up.begin(), rep_up.end(), i) == rep_up.end()) {
auto dc_p = tmp.crush->get_parent_of_type(i, 8 /* dc */, rno);
auto rack_p = tmp.crush->get_parent_of_type(i, 3 /* rack */, rno);
if (dc_p == dc_parent &&
rack_parents.find(rack_p) == rack_parents.end()) {
to = i;
break;
}
}
}
ASSERT_TRUE(to >= 0);
ASSERT_TRUE(from != to);
std::cout << "from " << from << " to " << to << std::endl;
new_pg_upmap_items.push_back(make_pair(from, to));
}
OSDMap::Incremental pending_inc(tmp.get_epoch() + 1);
pending_inc.new_pg_upmap_items[rep_pgid] =
mempool::osdmap::vector<pair<int32_t,int32_t>>(
new_pg_upmap_items.begin(), new_pg_upmap_items.end());
tmp.apply_incremental(pending_inc);
ASSERT_TRUE(tmp.have_pg_upmaps(rep_pgid));
}
{
// *maybe_remove_pg_upmaps* should remove the above upmap_item
OSDMap::Incremental pending_inc(tmp.get_epoch() + 1);
clean_pg_upmaps(g_ceph_context, tmp, pending_inc);
tmp.apply_incremental(pending_inc);
ASSERT_FALSE(tmp.have_pg_upmaps(rep_pgid));
ASSERT_FALSE(tmp.have_pg_upmaps(rep_pgid2));
}
}
}
TEST(PGTempMap, basic)
{
PGTempMap m;
pg_t a(1,1);
for (auto i=3; i<1000; ++i) {
pg_t x(i, 1);
m.set(x, {static_cast<int>(i)});
}
pg_t b(2,1);
m.set(a, {1, 2});
ASSERT_NE(m.find(a), m.end());
ASSERT_EQ(m.find(a), m.begin());
ASSERT_EQ(m.find(b), m.end());
ASSERT_EQ(998u, m.size());
}
TEST_F(OSDMapTest, BUG_43124) {
set_up_map(200);
{
// https://tracker.ceph.com/issues/43124
// build a temporary crush topology of 5racks,
// 4 hosts per rack, 10osds per host
OSDMap tmp; // use a tmpmap here, so we do not dirty origin map..
tmp.deepish_copy_from(osdmap);
const int expected_host_num = 20;
int osd_per_host = (int)get_num_osds() / expected_host_num;
ASSERT_GE(osd_per_host, 10);
int host_per_rack = 4;
int index = 0;
int rack_index = 0;
for (int i = 0; i < (int)get_num_osds(); i++) {
if (i && i % osd_per_host == 0) {
++index;
}
if (i && i % (host_per_rack * osd_per_host) == 0) {
++rack_index;
}
stringstream osd_name;
stringstream host_name;
stringstream rack_name;
vector<string> move_to;
osd_name << "osd." << i;
host_name << "host-" << index;
rack_name << "rack-" << rack_index;
move_to.push_back("root=default");
string rack_loc = "rack=" + rack_name.str();
move_to.push_back(rack_loc);
string host_loc = "host=" + host_name.str();
move_to.push_back(host_loc);
auto r = crush_move(tmp, osd_name.str(), move_to);
ASSERT_EQ(0, r);
}
// build crush rule
CrushWrapper crush;
get_crush(tmp, crush);
string rule_name = "rule_angel_1944";
int rule_type = pg_pool_t::TYPE_ERASURE;
ASSERT_TRUE(!crush.rule_exists(rule_name));
int rno;
for (rno = 0; rno < crush.get_max_rules(); rno++) {
if (!crush.rule_exists(rno))
break;
}
int steps = 6;
string root_name = "default";
int root = crush.get_item_id(root_name);
crush_rule *rule = crush_make_rule(steps, rule_type);
int step = 0;
crush_rule_set_step(rule, step++, CRUSH_RULE_SET_CHOOSELEAF_TRIES, 5, 0);
crush_rule_set_step(rule, step++, CRUSH_RULE_SET_CHOOSE_TRIES, 100, 0);
crush_rule_set_step(rule, step++, CRUSH_RULE_TAKE, root, 0);
crush_rule_set_step(rule, step++, CRUSH_RULE_CHOOSE_FIRSTN, 4, 3 /* rack */);
crush_rule_set_step(rule, step++, CRUSH_RULE_CHOOSELEAF_INDEP, 3, 1 /* host */);
crush_rule_set_step(rule, step++, CRUSH_RULE_EMIT, 0, 0);
ASSERT_TRUE(step == steps);
auto r = crush_add_rule(crush.get_crush_map(), rule, rno);
ASSERT_TRUE(r >= 0);
crush.set_rule_name(rno, rule_name);
{
OSDMap::Incremental pending_inc(tmp.get_epoch() + 1);
pending_inc.crush.clear();
crush.encode(pending_inc.crush, CEPH_FEATURES_SUPPORTED_DEFAULT);
tmp.apply_incremental(pending_inc);
}
{
stringstream oss;
crush.dump_tree(&oss, NULL);
std::cout << oss.str() << std::endl;
Formatter *f = Formatter::create("json-pretty");
f->open_object_section("crush_rules");
crush.dump_rules(f);
f->close_section();
f->flush(cout);
delete f;
}
// create a erasuce-coded pool referencing the above rule
int64_t pool_angel_1944;
{
OSDMap::Incremental new_pool_inc(tmp.get_epoch() + 1);
new_pool_inc.new_pool_max = tmp.get_pool_max();
new_pool_inc.fsid = tmp.get_fsid();
pg_pool_t empty;
pool_angel_1944 = ++new_pool_inc.new_pool_max;
pg_pool_t *p = new_pool_inc.get_new_pool(pool_angel_1944, &empty);
p->size = 12;
p->set_pg_num(4096);
p->set_pgp_num(4096);
p->type = pg_pool_t::TYPE_ERASURE;
p->crush_rule = rno;
p->set_flag(pg_pool_t::FLAG_HASHPSPOOL);
new_pool_inc.new_pool_names[pool_angel_1944] = "pool_angel_1944";
tmp.apply_incremental(new_pool_inc);
}
pg_t rep_pg(0, pool_angel_1944);
pg_t rep_pgid = tmp.raw_pg_to_pg(rep_pg);
{
// insert a pg_upmap_item
int from = -1;
int to = -1;
vector<int> rep_up;
int rep_up_primary;
tmp.pg_to_raw_up(rep_pgid, &rep_up, &rep_up_primary);
std::cout << "pgid " << rep_pgid << " up " << rep_up << std::endl;
ASSERT_TRUE(rep_up.size() == 12);
from = *(rep_up.begin());
ASSERT_TRUE(from >= 0);
auto from_rack = tmp.crush->get_parent_of_type(from, 3 /* rack */, rno);
set<int> failure_domains;
for (auto &osd : rep_up) {
failure_domains.insert(tmp.crush->get_parent_of_type(osd, 1 /* host */, rno));
}
for (int i = 0; i < (int)get_num_osds(); i++) {
if (std::find(rep_up.begin(), rep_up.end(), i) == rep_up.end()) {
auto to_rack = tmp.crush->get_parent_of_type(i, 3 /* rack */, rno);
auto to_host = tmp.crush->get_parent_of_type(i, 1 /* host */, rno);
if (to_rack != from_rack && failure_domains.count(to_host) == 0) {
to = i;
break;
}
}
}
ASSERT_TRUE(to >= 0);
ASSERT_TRUE(from != to);
std::cout << "from " << from << " to " << to << std::endl;
vector<pair<int32_t,int32_t>> new_pg_upmap_items;
new_pg_upmap_items.push_back(make_pair(from, to));
OSDMap::Incremental pending_inc(tmp.get_epoch() + 1);
pending_inc.new_pg_upmap_items[rep_pgid] =
mempool::osdmap::vector<pair<int32_t,int32_t>>(
new_pg_upmap_items.begin(), new_pg_upmap_items.end());
tmp.apply_incremental(pending_inc);
ASSERT_TRUE(tmp.have_pg_upmaps(rep_pgid));
}
{
// *maybe_remove_pg_upmaps* should not remove the above upmap_item
OSDMap::Incremental pending_inc(tmp.get_epoch() + 1);
clean_pg_upmaps(g_ceph_context, tmp, pending_inc);
tmp.apply_incremental(pending_inc);
ASSERT_TRUE(tmp.have_pg_upmaps(rep_pgid));
}
}
}
TEST_F(OSDMapTest, BUG_48884)
{
set_up_map(12);
unsigned int host_index = 1;
for (unsigned int x=0; x < get_num_osds();) {
// Create three hosts with four osds each
for (unsigned int y=0; y < 4; y++) {
stringstream osd_name;
stringstream host_name;
vector<string> move_to;
osd_name << "osd." << x;
host_name << "host-" << host_index;
move_to.push_back("root=default");
move_to.push_back("rack=localrack");
string host_loc = "host=" + host_name.str();
move_to.push_back(host_loc);
int r = crush_move(osdmap, osd_name.str(), move_to);
ASSERT_EQ(0, r);
x++;
}
host_index++;
}
CrushWrapper crush;
get_crush(osdmap, crush);
auto host_id = crush.get_item_id("localhost");
crush.remove_item(g_ceph_context, host_id, false);
OSDMap::Incremental pending_inc(osdmap.get_epoch() + 1);
pending_inc.crush.clear();
crush.encode(pending_inc.crush, CEPH_FEATURES_SUPPORTED_DEFAULT);
osdmap.apply_incremental(pending_inc);
PGMap pgmap;
osd_stat_t stats, stats_null;
stats.statfs.total = 500000;
stats.statfs.available = 50000;
stats.statfs.omap_allocated = 50000;
stats.statfs.internal_metadata = 50000;
stats_null.statfs.total = 0;
stats_null.statfs.available = 0;
stats_null.statfs.omap_allocated = 0;
stats_null.statfs.internal_metadata = 0;
for (unsigned int x=0; x < get_num_osds(); x++) {
if (x > 3 && x < 8) {
pgmap.osd_stat.insert({x,stats_null});
} else {
pgmap.osd_stat.insert({x,stats});
}
}
stringstream ss;
boost::scoped_ptr<Formatter> f(Formatter::create("json-pretty"));
print_osd_utilization(osdmap, pgmap, ss, f.get(), true, "root");
JSONParser parser;
parser.parse(ss.str().c_str(), static_cast<int>(ss.str().size()));
auto iter = parser.find_first();
for (const auto& bucket : (*iter)->get_array_elements()) {
JSONParser parser2;
parser2.parse(bucket.c_str(), static_cast<int>(bucket.size()));
auto* obj = parser2.find_obj("name");
if (obj->get_data().compare("localrack") == 0) {
obj = parser2.find_obj("kb");
ASSERT_EQ(obj->get_data(), "3904");
obj = parser2.find_obj("kb_used");
ASSERT_EQ(obj->get_data(), "3512");
obj = parser2.find_obj("kb_used_omap");
ASSERT_EQ(obj->get_data(), "384");
obj = parser2.find_obj("kb_used_meta");
ASSERT_EQ(obj->get_data(), "384");
obj = parser2.find_obj("kb_avail");
ASSERT_EQ(obj->get_data(), "384");
}
}
}
TEST_P(OSDMapTest, BUG_51842) {
set_up_map(3, true);
OSDMap tmp; // use a tmpmap here, so we do not dirty origin map..
tmp.deepish_copy_from(osdmap);
for (int i = 0; i < (int)get_num_osds(); i++) {
stringstream osd_name;
stringstream host_name;
vector<string> move_to;
osd_name << "osd." << i;
host_name << "host=host-" << i;
move_to.push_back("root=infra-1706");
move_to.push_back(host_name.str());
auto r = crush_move(tmp, osd_name.str(), move_to);
ASSERT_EQ(0, r);
}
// build crush rule
CrushWrapper crush;
get_crush(tmp, crush);
string rule_name = "infra-1706";
int rule_type = pg_pool_t::TYPE_REPLICATED;
ASSERT_TRUE(!crush.rule_exists(rule_name));
int rno;
for (rno = 0; rno < crush.get_max_rules(); rno++) {
if (!crush.rule_exists(rno))
break;
}
string root_bucket = "infra-1706";
int root = crush.get_item_id(root_bucket);
int steps = 5;
crush_rule *rule = crush_make_rule(steps, rule_type);
int step = 0;
crush_rule_set_step(rule, step++, CRUSH_RULE_SET_CHOOSELEAF_TRIES, 5, 0);
crush_rule_set_step(rule, step++, CRUSH_RULE_SET_CHOOSE_TRIES, 100, 0);
crush_rule_set_step(rule, step++, CRUSH_RULE_TAKE, root, 0);
// note: it's ok to set like 'step chooseleaf_firstn 0 host'
std::pair<int, int> param = GetParam();
int rep_num = std::get<0>(param);
int domain = std::get<1>(param);
crush_rule_set_step(rule, step++, CRUSH_RULE_CHOOSELEAF_FIRSTN, rep_num, domain);
crush_rule_set_step(rule, step++, CRUSH_RULE_EMIT, 0, 0);
ASSERT_TRUE(step == steps);
auto r = crush_add_rule(crush.get_crush_map(), rule, rno);
ASSERT_TRUE(r >= 0);
crush.set_rule_name(rno, rule_name);
{
OSDMap::Incremental pending_inc(tmp.get_epoch() + 1);
pending_inc.crush.clear();
crush.encode(pending_inc.crush, CEPH_FEATURES_SUPPORTED_DEFAULT);
tmp.apply_incremental(pending_inc);
}
{
stringstream oss;
crush.dump_tree(&oss, NULL);
std::cout << oss.str() << std::endl;
Formatter *f = Formatter::create("json-pretty");
f->open_object_section("crush_rules");
crush.dump_rules(f);
f->close_section();
f->flush(cout);
delete f;
}
// create a replicated pool referencing the above rule
int64_t pool_infra_1706;
{
OSDMap::Incremental new_pool_inc(tmp.get_epoch() + 1);
new_pool_inc.new_pool_max = tmp.get_pool_max();
new_pool_inc.fsid = tmp.get_fsid();
pg_pool_t empty;
pool_infra_1706 = ++new_pool_inc.new_pool_max;
pg_pool_t *p = new_pool_inc.get_new_pool(pool_infra_1706, &empty);
p->size = 3;
p->min_size = 1;
p->set_pg_num(256);
p->set_pgp_num(256);
p->type = pg_pool_t::TYPE_REPLICATED;
p->crush_rule = rno;
p->set_flag(pg_pool_t::FLAG_HASHPSPOOL);
new_pool_inc.new_pool_names[pool_infra_1706] = "pool_infra_1706";
tmp.apply_incremental(new_pool_inc);
}
// add upmaps
pg_t rep_pg(3, pool_infra_1706);
pg_t rep_pgid = tmp.raw_pg_to_pg(rep_pg);
pg_t rep_pg2(4, pool_infra_1706);
pg_t rep_pgid2 = tmp.raw_pg_to_pg(rep_pg2);
pg_t rep_pg3(6, pool_infra_1706);
pg_t rep_pgid3 = tmp.raw_pg_to_pg(rep_pg3);
{
OSDMap::Incremental pending_inc(tmp.get_epoch() + 1);
pending_inc.new_pg_upmap[rep_pgid] = mempool::osdmap::vector<int32_t>({1,0,2});
pending_inc.new_pg_upmap[rep_pgid2] = mempool::osdmap::vector<int32_t>({1,2,0});
pending_inc.new_pg_upmap[rep_pgid3] = mempool::osdmap::vector<int32_t>({1,2,0});
tmp.apply_incremental(pending_inc);
ASSERT_TRUE(tmp.have_pg_upmaps(rep_pgid));
ASSERT_TRUE(tmp.have_pg_upmaps(rep_pgid2));
ASSERT_TRUE(tmp.have_pg_upmaps(rep_pgid3));
}
{
// now, set pool size to 1
OSDMap tmpmap;
tmpmap.deepish_copy_from(tmp);
OSDMap::Incremental new_pool_inc(tmpmap.get_epoch() + 1);
pg_pool_t p = *tmpmap.get_pg_pool(pool_infra_1706);
p.size = 1;
p.last_change = new_pool_inc.epoch;
new_pool_inc.new_pools[pool_infra_1706] = p;
tmpmap.apply_incremental(new_pool_inc);
OSDMap::Incremental new_pending_inc(tmpmap.get_epoch() + 1);
clean_pg_upmaps(g_ceph_context, tmpmap, new_pending_inc);
tmpmap.apply_incremental(new_pending_inc);
// check pg upmaps
ASSERT_TRUE(!tmpmap.have_pg_upmaps(rep_pgid));
ASSERT_TRUE(!tmpmap.have_pg_upmaps(rep_pgid2));
ASSERT_TRUE(!tmpmap.have_pg_upmaps(rep_pgid3));
}
{
// now, set pool size to 4
OSDMap tmpmap;
tmpmap.deepish_copy_from(tmp);
OSDMap::Incremental new_pool_inc(tmpmap.get_epoch() + 1);
pg_pool_t p = *tmpmap.get_pg_pool(pool_infra_1706);
p.size = 4;
p.last_change = new_pool_inc.epoch;
new_pool_inc.new_pools[pool_infra_1706] = p;
tmpmap.apply_incremental(new_pool_inc);
OSDMap::Incremental new_pending_inc(tmpmap.get_epoch() + 1);
clean_pg_upmaps(g_ceph_context, tmpmap, new_pending_inc);
tmpmap.apply_incremental(new_pending_inc);
// check pg upmaps
ASSERT_TRUE(!tmpmap.have_pg_upmaps(rep_pgid));
ASSERT_TRUE(!tmpmap.have_pg_upmaps(rep_pgid2));
ASSERT_TRUE(!tmpmap.have_pg_upmaps(rep_pgid3));
}
}
const string OSDMapTest::range_addrs[] = {"198.51.100.0/22", "10.2.5.102/32", "2001:db8::/48",
"3001:db8::/72", "4001:db8::/30", "5001:db8::/64", "6001:db8::/128", "7001:db8::/127"};
const string OSDMapTest::ip_addrs[] = {"198.51.100.14", "198.51.100.0", "198.51.103.255",
"10.2.5.102",
"2001:db8:0:0:0:0:0:0", "2001:db8:0:0:0:0001:ffff:ffff",
"2001:db8:0:ffff:ffff:ffff:ffff:ffff",
"3001:db8:0:0:0:0:0:0", "3001:db8:0:0:0:0001:ffff:ffff",
"3001:db8:0:0:00ff:ffff:ffff:ffff",
"4001:db8::", "4001:db8:0:0:0:0001:ffff:ffff",
"4001:dbb:ffff:ffff:ffff:ffff:ffff:ffff",
"5001:db8:0:0:0:0:0:0", "5001:db8:0:0:0:0:ffff:ffff",
"5001:db8:0:0:ffff:ffff:ffff:ffff",
"6001:db8:0:0:0:0:0:0",
"7001:db8:0:0:0:0:0:0", "7001:db8:0:0:0:0:0:0001"
};
const string OSDMapTest::unblocked_ip_addrs[] = { "0.0.0.0", "1.1.1.1", "192.168.1.1",
"198.51.99.255", "198.51.104.0",
"10.2.5.101", "10.2.5.103",
"2001:db7:ffff:ffff:ffff:ffff:ffff:ffff", "2001:db8:0001::",
"3001:db7:ffff:ffff:ffff:ffff:ffff:ffff", "3001:db8:0:0:0100::",
"4001:db7:ffff:ffff:ffff:ffff:ffff:ffff", "4001:dbc::",
"5001:db7:ffff:ffff:ffff:ffff:ffff:ffff", "5001:db8:0:0001:0:0:0:0",
"6001:db8:0:0:0:0:0:0001",
"7001:db7:ffff:ffff:ffff:ffff:ffff:ffff", "7001:db8:0:0:0:0:0:0002"
};
TEST_F(OSDMapTest, blocklisting_ips) {
set_up_map(6); //whatever
OSDMap::Incremental new_blocklist_inc(osdmap.get_epoch() + 1);
for (const auto& a : ip_addrs) {
entity_addr_t addr;
addr.parse(a);
addr.set_type(entity_addr_t::TYPE_LEGACY);
new_blocklist_inc.new_blocklist[addr] = ceph_clock_now();
}
osdmap.apply_incremental(new_blocklist_inc);
for (const auto& a: ip_addrs) {
entity_addr_t addr;
addr.parse(a);
addr.set_type(entity_addr_t::TYPE_LEGACY);
ASSERT_TRUE(osdmap.is_blocklisted(addr, g_ceph_context));
}
for (const auto& a: unblocked_ip_addrs) {
entity_addr_t addr;
addr.parse(a);
addr.set_type(entity_addr_t::TYPE_LEGACY);
ASSERT_FALSE(osdmap.is_blocklisted(addr, g_ceph_context));
}
OSDMap::Incremental rm_blocklist_inc(osdmap.get_epoch() + 1);
for (const auto& a : ip_addrs) {
entity_addr_t addr;
addr.parse(a);
addr.set_type(entity_addr_t::TYPE_LEGACY);
rm_blocklist_inc.old_blocklist.push_back(addr);
}
osdmap.apply_incremental(rm_blocklist_inc);
for (const auto& a: ip_addrs) {
entity_addr_t addr;
addr.parse(a);
addr.set_type(entity_addr_t::TYPE_LEGACY);
ASSERT_FALSE(osdmap.is_blocklisted(addr, g_ceph_context));
}
for (const auto& a: unblocked_ip_addrs) {
entity_addr_t addr;
addr.parse(a);
addr.set_type(entity_addr_t::TYPE_LEGACY);
bool blocklisted = osdmap.is_blocklisted(addr, g_ceph_context);
if (blocklisted) {
cout << "erroneously blocklisted " << addr << std::endl;
}
EXPECT_FALSE(blocklisted);
}
}
TEST_F(OSDMapTest, blocklisting_ranges) {
set_up_map(6); //whatever
OSDMap::Incremental range_blocklist_inc(osdmap.get_epoch() + 1);
for (const auto& a : range_addrs) {
entity_addr_t addr;
addr.parse(a);
addr.type = entity_addr_t::TYPE_CIDR;
range_blocklist_inc.new_range_blocklist[addr] = ceph_clock_now();
}
osdmap.apply_incremental(range_blocklist_inc);
for (const auto& a: ip_addrs) {
entity_addr_t addr;
addr.parse(a);
addr.set_type(entity_addr_t::TYPE_LEGACY);
bool blocklisted = osdmap.is_blocklisted(addr, g_ceph_context);
if (!blocklisted) {
cout << "erroneously not blocklisted " << addr << std::endl;
}
ASSERT_TRUE(blocklisted);
}
for (const auto& a: unblocked_ip_addrs) {
entity_addr_t addr;
addr.parse(a);
addr.set_type(entity_addr_t::TYPE_LEGACY);
bool blocklisted = osdmap.is_blocklisted(addr, g_ceph_context);
if (blocklisted) {
cout << "erroneously blocklisted " << addr << std::endl;
}
EXPECT_FALSE(blocklisted);
}
OSDMap::Incremental rm_range_blocklist(osdmap.get_epoch() + 1);
for (const auto& a : range_addrs) {
entity_addr_t addr;
addr.parse(a);
addr.type = entity_addr_t::TYPE_CIDR;
rm_range_blocklist.old_range_blocklist.push_back(addr);
}
osdmap.apply_incremental(rm_range_blocklist);
for (const auto& a: ip_addrs) {
entity_addr_t addr;
addr.parse(a);
addr.set_type(entity_addr_t::TYPE_LEGACY);
ASSERT_FALSE(osdmap.is_blocklisted(addr, g_ceph_context));
}
for (const auto& a: unblocked_ip_addrs) {
entity_addr_t addr;
addr.parse(a);
addr.set_type(entity_addr_t::TYPE_LEGACY);
bool blocklisted = osdmap.is_blocklisted(addr, g_ceph_context);
if (blocklisted) {
cout << "erroneously blocklisted " << addr << std::endl;
}
EXPECT_FALSE(blocklisted);
}
}
TEST_F(OSDMapTest, blocklisting_everything) {
set_up_map(6); //whatever
OSDMap::Incremental range_blocklist_inc(osdmap.get_epoch() + 1);
entity_addr_t baddr;
baddr.parse("2001:db8::/0");
baddr.type = entity_addr_t::TYPE_CIDR;
range_blocklist_inc.new_range_blocklist[baddr] = ceph_clock_now();
osdmap.apply_incremental(range_blocklist_inc);
for (const auto& a: ip_addrs) {
entity_addr_t addr;
addr.parse(a);
addr.set_type(entity_addr_t::TYPE_LEGACY);
if (addr.is_ipv4()) continue;
bool blocklisted = osdmap.is_blocklisted(addr, g_ceph_context);
if (!blocklisted) {
cout << "erroneously not blocklisted " << addr << std::endl;
}
ASSERT_TRUE(blocklisted);
}
for (const auto& a: unblocked_ip_addrs) {
entity_addr_t addr;
addr.parse(a);
addr.set_type(entity_addr_t::TYPE_LEGACY);
if (addr.is_ipv4()) continue;
bool blocklisted = osdmap.is_blocklisted(addr, g_ceph_context);
if (!blocklisted) {
cout << "erroneously not blocklisted " << addr << std::endl;
}
ASSERT_TRUE(blocklisted);
}
OSDMap::Incremental swap_blocklist_inc(osdmap.get_epoch()+1);
swap_blocklist_inc.old_range_blocklist.push_back(baddr);
entity_addr_t caddr;
caddr.parse("1.1.1.1/0");
caddr.type = entity_addr_t::TYPE_CIDR;
swap_blocklist_inc.new_range_blocklist[caddr] = ceph_clock_now();
osdmap.apply_incremental(swap_blocklist_inc);
for (const auto& a: ip_addrs) {
entity_addr_t addr;
addr.parse(a);
addr.set_type(entity_addr_t::TYPE_LEGACY);
if (!addr.is_ipv4()) continue;
bool blocklisted = osdmap.is_blocklisted(addr, g_ceph_context);
if (!blocklisted) {
cout << "erroneously not blocklisted " << addr << std::endl;
}
ASSERT_TRUE(blocklisted);
}
for (const auto& a: unblocked_ip_addrs) {
entity_addr_t addr;
addr.parse(a);
addr.set_type(entity_addr_t::TYPE_LEGACY);
if (!addr.is_ipv4()) continue;
bool blocklisted = osdmap.is_blocklisted(addr, g_ceph_context);
if (!blocklisted) {
cout << "erroneously not blocklisted " << addr << std::endl;
}
ASSERT_TRUE(blocklisted);
}
}
TEST_F(OSDMapTest, ReadBalanceScore1) {
std::srand ( unsigned ( std::time(0) ) );
uint osd_rand = rand() % 13;
set_up_map(6 + osd_rand); //whatever
auto pools = osdmap.get_pools();
for (auto &[pid, pg_pool] : pools) {
const pg_pool_t *pi = osdmap.get_pg_pool(pid);
if (pi->is_replicated()) {
//cout << "pool " << pid << " " << pg_pool << std::endl;
auto replica_count = pi->get_size();
OSDMap::read_balance_info_t rbi;
auto rc = osdmap.calc_read_balance_score(g_ceph_context, pid, &rbi);
// "Normal" score is between 1 and num_osds
ASSERT_TRUE(rc == 0);
ASSERT_TRUE(score_in_range(rbi.adjusted_score));
ASSERT_TRUE(score_in_range(rbi.acting_adj_score));
ASSERT_TRUE(rbi.err_msg.empty());
// When all OSDs have primary_affinity 0, score should be 0
auto num_osds = get_num_osds();
set_primary_affinity_all(0.);
rc = osdmap.calc_read_balance_score(g_ceph_context, pid, &rbi);
ASSERT_TRUE(rc < 0);
ASSERT_TRUE(rbi.adjusted_score == 0.);
ASSERT_TRUE(rbi.acting_adj_score == 0.);
ASSERT_FALSE(rbi.err_msg.empty());
std::vector<uint> osds;
for (uint i = 0 ; i < num_osds ; i++) {
osds.push_back(i);
}
// Change primary_affinity of some OSDs to 1 others are 0
float fratio = 1. / (float)replica_count;
for (int iter = 0 ; iter < 100 ; iter++) { // run the test 100 times
// Create random shuffle of OSDs
std::random_shuffle (osds.begin(), osds.end());
for (uint i = 0 ; i < num_osds ; i++) {
if ((float(i + 1) / float(num_osds)) < fratio) {
ASSERT_TRUE(osds[i] < num_osds);
osdmap.set_primary_affinity(osds[i], CEPH_OSD_MAX_PRIMARY_AFFINITY);
rc = osdmap.calc_read_balance_score(g_ceph_context, pid, &rbi);
ASSERT_TRUE(rc < 0);
ASSERT_TRUE(rbi.adjusted_score == 0.);
ASSERT_TRUE(rbi.acting_adj_score == 0.);
ASSERT_FALSE(rbi.err_msg.empty());
}
else {
if (rc < 0) {
ASSERT_TRUE(rbi.adjusted_score == 0.);
ASSERT_TRUE(rbi.acting_adj_score == 0.);
ASSERT_FALSE(rbi.err_msg.empty());
}
else {
ASSERT_TRUE(score_in_range(rbi.acting_adj_score, i + 1));
ASSERT_TRUE(rbi.err_msg.empty());
}
}
}
set_primary_affinity_all(0.);
}
}
}
}
TEST_F(OSDMapTest, ReadBalanceScore2) {
std::srand ( unsigned ( std::time(0) ) );
uint osd_num = 6 + rand() % 13;
set_up_map(osd_num, true);
for (int i = 0 ; i < 100 ; i++) { //running 100 random tests
uint num_pa_osds = 0;
float pa_sum = 0.;
OSDMap::read_balance_info_t rbi;
// set pa for all osds
for (uint j = 0 ; j < osd_num ; j++) {
uint pa = 1 + rand() % 100;
if (pa > 80)
pa = 100;
if (pa < 20)
pa = 0;
float fpa = (float)pa / 100.;
if (pa > 0) {
num_pa_osds++;
pa_sum += fpa;
}
osdmap.set_primary_affinity(j, int(fpa * CEPH_OSD_MAX_PRIMARY_AFFINITY));
}
float pa_ratio = pa_sum / (float) osd_num;
// create a pool with the current osdmap configuration
OSDMap::Incremental new_pool_inc(osdmap.get_epoch() + 1);
new_pool_inc.new_pool_max = osdmap.get_pool_max();
new_pool_inc.fsid = osdmap.get_fsid();
string pool_name = "rep_pool" + stringify(i);
uint64_t new_pid = set_rep_pool(pool_name, new_pool_inc, false);
ASSERT_TRUE(new_pid > 0);
osdmap.apply_incremental(new_pool_inc);
// now run the test on the pool.
const pg_pool_t *pi = osdmap.get_pg_pool(new_pid);
ASSERT_NE(pi, nullptr);
ASSERT_TRUE(pi->is_replicated());
float fratio = 1. / (float)pi->get_size();
auto rc = osdmap.calc_read_balance_score(g_ceph_context, new_pid, &rbi);
if (pa_ratio < fratio) {
ASSERT_TRUE(rc < 0);
ASSERT_FALSE(rbi.err_msg.empty());
ASSERT_TRUE(rbi.acting_adj_score == 0.);
ASSERT_TRUE(rbi.adjusted_score == 0.);
}
else {
if (rc < 0) {
ASSERT_TRUE(rbi.adjusted_score == 0.);
ASSERT_TRUE(rbi.acting_adj_score == 0.);
ASSERT_FALSE(rbi.err_msg.empty());
}
else {
if (rbi.err_msg.empty()) {
ASSERT_TRUE(score_in_range(rbi.acting_adj_score, num_pa_osds));
}
}
}
}
//TODO add ReadBalanceScore3 - with weighted osds.
}
TEST_F(OSDMapTest, read_balance_small_map) {
// Set up a map with 4 OSDs and default pools
set_up_map(4);
const vector<string> test_cases = {"basic", "prim_affinity"};
for (const auto & test : test_cases) {
if (test == "prim_affinity") {
// Make osd.0 off-limits for primaries by giving it prim affinity 0
OSDMap::Incremental pending_inc0(osdmap.get_epoch() + 1);
pending_inc0.new_primary_affinity[0] = 0;
osdmap.apply_incremental(pending_inc0);
// Ensure osd.0 has no primaries assigned to it
map<uint64_t,set<pg_t>> prim_pgs_by_osd, acting_prims_by_osd;
osdmap.get_pgs_by_osd(g_ceph_context, my_rep_pool, &prim_pgs_by_osd, &acting_prims_by_osd);
ASSERT_TRUE(prim_pgs_by_osd[0].size() == 0);
ASSERT_TRUE(acting_prims_by_osd[0].size() == 0);
}
// Make sure capacity is balanced first
set<int64_t> only_pools;
only_pools.insert(my_rep_pool);
OSDMap::Incremental pending_inc(osdmap.get_epoch()+1);
osdmap.calc_pg_upmaps(g_ceph_context,
0,
100,
only_pools,
&pending_inc);
osdmap.apply_incremental(pending_inc);
// Get read balance score before balancing
OSDMap::read_balance_info_t rb_info;
auto rc = osdmap.calc_read_balance_score(g_ceph_context, my_rep_pool, &rb_info);
ASSERT_TRUE(rc >= 0);
float read_balance_score_before = rb_info.adjusted_score;
// Calculate desired prim distributions to verify later
map<uint64_t,set<pg_t>> prim_pgs_by_osd_2, acting_prims_by_osd_2;
osdmap.get_pgs_by_osd(g_ceph_context, my_rep_pool, &prim_pgs_by_osd_2, &acting_prims_by_osd_2);
vector<uint64_t> osds_to_check;
for (const auto & [osd, pgs] : prim_pgs_by_osd_2) {
osds_to_check.push_back(osd);
}
map<uint64_t,float> desired_prim_dist;
rc = osdmap.calc_desired_primary_distribution(g_ceph_context, my_rep_pool,
osds_to_check, desired_prim_dist);
ASSERT_TRUE(rc >= 0);
// Balance reads
OSDMap::Incremental pending_inc_2(osdmap.get_epoch()+1);
int num_changes = osdmap.balance_primaries(g_ceph_context, my_rep_pool, &pending_inc_2, osdmap);
osdmap.apply_incremental(pending_inc_2);
if (test == "prim_affinity") {
// Ensure osd.0 still has no primaries assigned to it
map<uint64_t,set<pg_t>> prim_pgs_by_osd_3, acting_prims_by_osd_3;
osdmap.get_pgs_by_osd(g_ceph_context, my_rep_pool, &prim_pgs_by_osd_3, &acting_prims_by_osd_3);
ASSERT_TRUE(prim_pgs_by_osd_3[0].size() == 0);
ASSERT_TRUE(acting_prims_by_osd_3[0].size() == 0);
}
// Get read balance score after balancing
rc = osdmap.calc_read_balance_score(g_ceph_context, my_rep_pool, &rb_info);
ASSERT_TRUE(rc >= 0);
float read_balance_score_after = rb_info.adjusted_score;
// Ensure the score hasn't gotten worse
ASSERT_TRUE(read_balance_score_after <= read_balance_score_before);
// Check for improvements
if (num_changes > 0) {
ASSERT_TRUE(read_balance_score_after < read_balance_score_before);
// Check num primaries for each OSD is within range
map<uint64_t,set<pg_t>> prim_pgs_by_osd_4, acting_prims_by_osd_4;
osdmap.get_pgs_by_osd(g_ceph_context, my_rep_pool, &prim_pgs_by_osd_4, &acting_prims_by_osd_4);
for (const auto & [osd, primaries] : prim_pgs_by_osd_4) {
ASSERT_TRUE(primaries.size() >= floor(desired_prim_dist[osd] - 1));
ASSERT_TRUE(primaries.size() <= ceil(desired_prim_dist[osd] + 1));
}
}
}
}
TEST_F(OSDMapTest, read_balance_large_map) {
// Set up a map with 60 OSDs and default pools
set_up_map(60);
const vector<string> test_cases = {"basic", "prim_affinity"};
for (const auto & test : test_cases) {
if (test == "prim_affinity") {
// Make osd.0 off-limits for primaries by giving it prim affinity 0
OSDMap::Incremental pending_inc0(osdmap.get_epoch() + 1);
pending_inc0.new_primary_affinity[0] = 0;
osdmap.apply_incremental(pending_inc0);
// Ensure osd.0 has no primaries assigned to it
map<uint64_t,set<pg_t>> prim_pgs_by_osd, acting_prims_by_osd;
osdmap.get_pgs_by_osd(g_ceph_context, my_rep_pool, &prim_pgs_by_osd, &acting_prims_by_osd);
ASSERT_TRUE(prim_pgs_by_osd[0].size() == 0);
ASSERT_TRUE(acting_prims_by_osd[0].size() == 0);
}
// Make sure capacity is balanced first
set<int64_t> only_pools;
only_pools.insert(my_rep_pool);
OSDMap::Incremental pending_inc(osdmap.get_epoch()+1);
osdmap.calc_pg_upmaps(g_ceph_context,
0,
100,
only_pools,
&pending_inc);
osdmap.apply_incremental(pending_inc);
// Get read balance score before balancing
OSDMap::read_balance_info_t rb_info;
auto rc = osdmap.calc_read_balance_score(g_ceph_context, my_rep_pool, &rb_info);
ASSERT_TRUE(rc >= 0);
float read_balance_score_before = rb_info.adjusted_score;
// Calculate desired prim distributions to verify later
map<uint64_t,set<pg_t>> prim_pgs_by_osd_2, acting_prims_by_osd_2;
osdmap.get_pgs_by_osd(g_ceph_context, my_rep_pool, &prim_pgs_by_osd_2, &acting_prims_by_osd_2);
vector<uint64_t> osds_to_check;
for (auto [osd, pgs] : prim_pgs_by_osd_2) {
osds_to_check.push_back(osd);
}
map<uint64_t,float> desired_prim_dist;
rc = osdmap.calc_desired_primary_distribution(g_ceph_context, my_rep_pool,
osds_to_check, desired_prim_dist);
ASSERT_TRUE(rc >= 0);
// Balance reads
OSDMap::Incremental pending_inc_2(osdmap.get_epoch()+1);
int num_changes = osdmap.balance_primaries(g_ceph_context, my_rep_pool, &pending_inc_2, osdmap);
osdmap.apply_incremental(pending_inc_2);
if (test == "prim_affinity") {
// Ensure osd.0 still has no primaries assigned to it
map<uint64_t,set<pg_t>> prim_pgs_by_osd_3, acting_prims_by_osd_3;
osdmap.get_pgs_by_osd(g_ceph_context, my_rep_pool, &prim_pgs_by_osd_3, &acting_prims_by_osd_3);
ASSERT_TRUE(prim_pgs_by_osd_3[0].size() == 0);
ASSERT_TRUE(acting_prims_by_osd_3[0].size() == 0);
}
// Get read balance score after balancing
rc = osdmap.calc_read_balance_score(g_ceph_context, my_rep_pool, &rb_info);
ASSERT_TRUE(rc >= 0);
float read_balance_score_after = rb_info.adjusted_score;
// Ensure the score hasn't gotten worse
ASSERT_TRUE(read_balance_score_after <= read_balance_score_before);
// Check for improvements
if (num_changes > 0) {
ASSERT_TRUE(read_balance_score_after < read_balance_score_before);
// Check num primaries for each OSD is within range
map<uint64_t,set<pg_t>> prim_pgs_by_osd_4, acting_prims_by_osd_4;
osdmap.get_pgs_by_osd(g_ceph_context, my_rep_pool, &prim_pgs_by_osd_4, &acting_prims_by_osd_4);
for (const auto & [osd, primaries] : prim_pgs_by_osd_4) {
ASSERT_TRUE(primaries.size() >= floor(desired_prim_dist[osd] - 1));
ASSERT_TRUE(primaries.size() <= ceil(desired_prim_dist[osd] + 1));
}
}
}
}
TEST_F(OSDMapTest, read_balance_random_map) {
// Set up map with random number of OSDs
std::srand ( unsigned ( std::time(0) ) );
uint num_osds = 3 + (rand() % 10);
ASSERT_TRUE(num_osds >= 3);
set_up_map(num_osds);
const vector<string> test_cases = {"basic", "prim_affinity"};
for (const auto & test : test_cases) {
uint rand_osd = rand() % num_osds;
if (test == "prim_affinity") {
// Make a random OSD off-limits for primaries by giving it prim affinity 0
ASSERT_TRUE(rand_osd < num_osds);
OSDMap::Incremental pending_inc0(osdmap.get_epoch() + 1);
pending_inc0.new_primary_affinity[rand_osd] = 0;
osdmap.apply_incremental(pending_inc0);
// Ensure the random OSD has no primaries assigned to it
map<uint64_t,set<pg_t>> prim_pgs_by_osd, acting_prims_by_osd;
osdmap.get_pgs_by_osd(g_ceph_context, my_rep_pool, &prim_pgs_by_osd, &acting_prims_by_osd);
ASSERT_TRUE(prim_pgs_by_osd[rand_osd].size() == 0);
ASSERT_TRUE(acting_prims_by_osd[rand_osd].size() == 0);
}
// Make sure capacity is balanced first
set<int64_t> only_pools;
only_pools.insert(my_rep_pool);
OSDMap::Incremental pending_inc(osdmap.get_epoch()+1);
osdmap.calc_pg_upmaps(g_ceph_context,
0,
100,
only_pools,
&pending_inc);
osdmap.apply_incremental(pending_inc);
// Get read balance score before balancing
OSDMap::read_balance_info_t rb_info;
auto rc = osdmap.calc_read_balance_score(g_ceph_context, my_rep_pool, &rb_info);
ASSERT_TRUE(rc >= 0);
float read_balance_score_before = rb_info.adjusted_score;
// Calculate desired prim distributions to verify later
map<uint64_t,set<pg_t>> prim_pgs_by_osd_2, acting_prims_by_osd_2;
osdmap.get_pgs_by_osd(g_ceph_context, my_rep_pool, &prim_pgs_by_osd_2, &acting_prims_by_osd_2);
vector<uint64_t> osds_to_check;
for (const auto & [osd, pgs] : prim_pgs_by_osd_2) {
osds_to_check.push_back(osd);
}
map<uint64_t,float> desired_prim_dist;
rc = osdmap.calc_desired_primary_distribution(g_ceph_context, my_rep_pool,
osds_to_check, desired_prim_dist);
ASSERT_TRUE(rc >= 0);
// Balance reads
OSDMap::Incremental pending_inc_2(osdmap.get_epoch()+1);
int num_changes = osdmap.balance_primaries(g_ceph_context, my_rep_pool, &pending_inc_2, osdmap);
osdmap.apply_incremental(pending_inc_2);
if (test == "prim_affinity") {
// Ensure the random OSD still has no primaries assigned to it
map<uint64_t,set<pg_t>> prim_pgs_by_osd_3, acting_prims_by_osd_3;
osdmap.get_pgs_by_osd(g_ceph_context, my_rep_pool, &prim_pgs_by_osd_3, &acting_prims_by_osd_3);
ASSERT_TRUE(prim_pgs_by_osd_3[rand_osd].size() == 0);
ASSERT_TRUE(acting_prims_by_osd_3[rand_osd].size() == 0);
}
// Get read balance score after balancing
rc = osdmap.calc_read_balance_score(g_ceph_context, my_rep_pool, &rb_info);
ASSERT_TRUE(rc >= 0);
float read_balance_score_after = rb_info.adjusted_score;
// Ensure the score hasn't gotten worse
ASSERT_TRUE(read_balance_score_after <= read_balance_score_before);
// Check for improvements
if (num_changes > 0) {
ASSERT_TRUE(read_balance_score_after < read_balance_score_before);
// Check num primaries for each OSD is within range
map<uint64_t,set<pg_t>> prim_pgs_by_osd_4, acting_prims_by_osd_4;
osdmap.get_pgs_by_osd(g_ceph_context, my_rep_pool, &prim_pgs_by_osd_4, &acting_prims_by_osd_4);
for (auto [osd, primaries] : prim_pgs_by_osd_4) {
ASSERT_TRUE(primaries.size() >= floor(desired_prim_dist[osd] - 1));
ASSERT_TRUE(primaries.size() <= ceil(desired_prim_dist[osd] + 1));
}
for (auto [osd, primaries] : prim_pgs_by_osd_4) {
ASSERT_TRUE(primaries.size() >= floor(desired_prim_dist[osd] - 1));
ASSERT_TRUE(primaries.size() <= ceil(desired_prim_dist[osd] + 1));
}
}
}
}
INSTANTIATE_TEST_SUITE_P(
OSDMap,
OSDMapTest,
::testing::Values(
std::make_pair<int, int>(0, 1), // chooseleaf firstn 0 host
std::make_pair<int, int>(3, 1), // chooseleaf firstn 3 host
std::make_pair<int, int>(0, 0), // chooseleaf firstn 0 osd
std::make_pair<int, int>(3, 0) // chooseleaf firstn 3 osd
)
);
| 98,246 | 35.146799 | 101 |
cc
|
null |
ceph-main/src/test/osd/TestOSDScrub.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2013 Cloudwatt <[email protected]>
*
* Author: Loic Dachary <[email protected]>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU Library Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Library Public License for more details.
*
*/
#include <stdio.h>
#include <signal.h>
#include <gtest/gtest.h>
#include "common/async/context_pool.h"
#include "osd/OSD.h"
#include "os/ObjectStore.h"
#include "mon/MonClient.h"
#include "common/ceph_argparse.h"
#include "msg/Messenger.h"
class TestOSDScrub: public OSD {
public:
TestOSDScrub(CephContext *cct_,
std::unique_ptr<ObjectStore> store_,
int id,
Messenger *internal,
Messenger *external,
Messenger *hb_front_client,
Messenger *hb_back_client,
Messenger *hb_front_server,
Messenger *hb_back_server,
Messenger *osdc_messenger,
MonClient *mc, const std::string &dev, const std::string &jdev,
ceph::async::io_context_pool& ictx) :
OSD(cct_, std::move(store_), id, internal, external,
hb_front_client, hb_back_client,
hb_front_server, hb_back_server,
osdc_messenger, mc, dev, jdev, ictx)
{
}
bool scrub_time_permit(utime_t now) {
return service.get_scrub_services().scrub_time_permit(now);
}
};
TEST(TestOSDScrub, scrub_time_permit) {
ceph::async::io_context_pool icp(1);
std::unique_ptr<ObjectStore> store = ObjectStore::create(g_ceph_context,
g_conf()->osd_objectstore,
g_conf()->osd_data,
g_conf()->osd_journal);
std::string cluster_msgr_type = g_conf()->ms_cluster_type.empty() ? g_conf().get_val<std::string>("ms_type") : g_conf()->ms_cluster_type;
Messenger *ms = Messenger::create(g_ceph_context, cluster_msgr_type,
entity_name_t::OSD(0), "make_checker",
getpid());
ms->set_cluster_protocol(CEPH_OSD_PROTOCOL);
ms->set_default_policy(Messenger::Policy::stateless_server(0));
ms->bind(g_conf()->public_addr);
MonClient mc(g_ceph_context, icp);
mc.build_initial_monmap();
TestOSDScrub* osd = new TestOSDScrub(g_ceph_context, std::move(store), 0, ms, ms, ms, ms, ms, ms, ms, &mc, "", "", icp);
// These are now invalid
int err = g_ceph_context->_conf.set_val("osd_scrub_begin_hour", "24");
ASSERT_TRUE(err < 0);
//GTEST_LOG_(INFO) << " osd_scrub_begin_hour = " << g_ceph_context->_conf.get_val<int64_t>("osd_scrub_begin_hour");
err = g_ceph_context->_conf.set_val("osd_scrub_end_hour", "24");
ASSERT_TRUE(err < 0);
//GTEST_LOG_(INFO) << " osd_scrub_end_hour = " << g_ceph_context->_conf.get_val<int64_t>("osd_scrub_end_hour");
err = g_ceph_context->_conf.set_val("osd_scrub_begin_week_day", "7");
ASSERT_TRUE(err < 0);
//GTEST_LOG_(INFO) << " osd_scrub_begin_week_day = " << g_ceph_context->_conf.get_val<int64_t>("osd_scrub_begin_week_day");
err = g_ceph_context->_conf.set_val("osd_scrub_end_week_day", "7");
ASSERT_TRUE(err < 0);
//GTEST_LOG_(INFO) << " osd_scrub_end_week_day = " << g_ceph_context->_conf.get_val<int64_t>("osd_scrub_end_week_day");
// Test all day
g_ceph_context->_conf.set_val("osd_scrub_begin_hour", "0");
g_ceph_context->_conf.set_val("osd_scrub_end_hour", "0");
g_ceph_context->_conf.apply_changes(nullptr);
tm tm;
tm.tm_isdst = -1;
strptime("2015-01-16 12:05:13", "%Y-%m-%d %H:%M:%S", &tm);
utime_t now = utime_t(mktime(&tm), 0);
bool ret = osd->scrub_time_permit(now);
ASSERT_TRUE(ret);
g_ceph_context->_conf.set_val("osd_scrub_begin_hour", "20");
g_ceph_context->_conf.set_val("osd_scrub_end_hour", "07");
g_ceph_context->_conf.apply_changes(nullptr);
strptime("2015-01-16 01:05:13", "%Y-%m-%d %H:%M:%S", &tm);
now = utime_t(mktime(&tm), 0);
ret = osd->scrub_time_permit(now);
ASSERT_TRUE(ret);
g_ceph_context->_conf.set_val("osd_scrub_begin_hour", "20");
g_ceph_context->_conf.set_val("osd_scrub_end_hour", "07");
g_ceph_context->_conf.apply_changes(nullptr);
strptime("2015-01-16 20:05:13", "%Y-%m-%d %H:%M:%S", &tm);
now = utime_t(mktime(&tm), 0);
ret = osd->scrub_time_permit(now);
ASSERT_TRUE(ret);
g_ceph_context->_conf.set_val("osd_scrub_begin_hour", "20");
g_ceph_context->_conf.set_val("osd_scrub_end_hour", "07");
g_ceph_context->_conf.apply_changes(nullptr);
strptime("2015-01-16 08:05:13", "%Y-%m-%d %H:%M:%S", &tm);
now = utime_t(mktime(&tm), 0);
ret = osd->scrub_time_permit(now);
ASSERT_FALSE(ret);
g_ceph_context->_conf.set_val("osd_scrub_begin_hour", "01");
g_ceph_context->_conf.set_val("osd_scrub_end_hour", "07");
g_ceph_context->_conf.apply_changes(nullptr);
strptime("2015-01-16 20:05:13", "%Y-%m-%d %H:%M:%S", &tm);
now = utime_t(mktime(&tm), 0);
ret = osd->scrub_time_permit(now);
ASSERT_FALSE(ret);
g_ceph_context->_conf.set_val("osd_scrub_begin_hour", "01");
g_ceph_context->_conf.set_val("osd_scrub_end_hour", "07");
g_ceph_context->_conf.apply_changes(nullptr);
strptime("2015-01-16 00:05:13", "%Y-%m-%d %H:%M:%S", &tm);
now = utime_t(mktime(&tm), 0);
ret = osd->scrub_time_permit(now);
ASSERT_FALSE(ret);
g_ceph_context->_conf.set_val("osd_scrub_begin_hour", "01");
g_ceph_context->_conf.set_val("osd_scrub_end_hour", "07");
g_ceph_context->_conf.apply_changes(nullptr);
strptime("2015-01-16 04:05:13", "%Y-%m-%d %H:%M:%S", &tm);
now = utime_t(mktime(&tm), 0);
ret = osd->scrub_time_permit(now);
ASSERT_TRUE(ret);
// Sun = 0, Mon = 1, Tue = 2, Wed = 3, Thu = 4m, Fri = 5, Sat = 6
// Jan 16, 2015 is a Friday (5)
// every day
g_ceph_context->_conf.set_val("osd_scrub_begin_week day", "0"); // inclusive
g_ceph_context->_conf.set_val("osd_scrub_end_week_day", "0"); // not inclusive
g_ceph_context->_conf.apply_changes(nullptr);
strptime("2015-01-16 04:05:13", "%Y-%m-%d %H:%M:%S", &tm);
now = utime_t(mktime(&tm), 0);
ret = osd->scrub_time_permit(now);
ASSERT_TRUE(ret);
// test Sun - Thu
g_ceph_context->_conf.set_val("osd_scrub_begin_week day", "0"); // inclusive
g_ceph_context->_conf.set_val("osd_scrub_end_week_day", "5"); // not inclusive
g_ceph_context->_conf.apply_changes(nullptr);
strptime("2015-01-16 04:05:13", "%Y-%m-%d %H:%M:%S", &tm);
now = utime_t(mktime(&tm), 0);
ret = osd->scrub_time_permit(now);
ASSERT_FALSE(ret);
// test Fri - Sat
g_ceph_context->_conf.set_val("osd_scrub_begin_week day", "5"); // inclusive
g_ceph_context->_conf.set_val("osd_scrub_end_week_day", "0"); // not inclusive
g_ceph_context->_conf.apply_changes(nullptr);
strptime("2015-01-16 04:05:13", "%Y-%m-%d %H:%M:%S", &tm);
now = utime_t(mktime(&tm), 0);
ret = osd->scrub_time_permit(now);
ASSERT_TRUE(ret);
// Jan 14, 2015 is a Wednesday (3)
// test Tue - Fri
g_ceph_context->_conf.set_val("osd_scrub_begin_week day", "2"); // inclusive
g_ceph_context->_conf.set_val("osd_scrub_end_week_day", "6"); // not inclusive
g_ceph_context->_conf.apply_changes(nullptr);
strptime("2015-01-14 04:05:13", "%Y-%m-%d %H:%M:%S", &tm);
now = utime_t(mktime(&tm), 0);
ret = osd->scrub_time_permit(now);
ASSERT_TRUE(ret);
// Test Sat - Sun
g_ceph_context->_conf.set_val("osd_scrub_begin_week day", "6"); // inclusive
g_ceph_context->_conf.set_val("osd_scrub_end_week_day", "1"); // not inclusive
g_ceph_context->_conf.apply_changes(nullptr);
strptime("2015-01-14 04:05:13", "%Y-%m-%d %H:%M:%S", &tm);
now = utime_t(mktime(&tm), 0);
ret = osd->scrub_time_permit(now);
ASSERT_FALSE(ret);
}
// Local Variables:
// compile-command: "cd ../.. ; make unittest_osdscrub ; ./unittest_osdscrub --log-to-stderr=true --debug-osd=20 # --gtest_filter=*.* "
// End:
| 8,055 | 38.490196 | 139 |
cc
|
null |
ceph-main/src/test/osd/TestOpStat.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
#include "include/interval_set.h"
#include "include/buffer.h"
#include <list>
#include <map>
#include <set>
#include "RadosModel.h"
#include "TestOpStat.h"
void TestOpStat::begin(TestOp *in) {
std::lock_guard l{stat_lock};
stats[in->getType()].begin(in);
}
void TestOpStat::end(TestOp *in) {
std::lock_guard l{stat_lock};
stats[in->getType()].end(in);
}
void TestOpStat::TypeStatus::export_latencies(std::map<double,uint64_t> &in) const
{
auto i = in.begin();
auto j = latencies.begin();
int count = 0;
while (j != latencies.end() && i != in.end()) {
count++;
if ((((double)count)/((double)latencies.size())) * 100 >= i->first) {
i->second = *j;
++i;
}
++j;
}
}
std::ostream & operator<<(std::ostream &out, const TestOpStat &rhs)
{
std::lock_guard l{rhs.stat_lock};
for (auto i = rhs.stats.begin();
i != rhs.stats.end();
++i) {
std::map<double,uint64_t> latency;
latency[10] = 0;
latency[50] = 0;
latency[90] = 0;
latency[99] = 0;
i->second.export_latencies(latency);
out << i->first << " latency: " << std::endl;
for (auto j = latency.begin();
j != latency.end();
++j) {
if (j->second == 0) break;
out << "\t" << j->first << "th percentile: "
<< j->second / 1000 << "ms" << std::endl;
}
}
return out;
}
| 1,405 | 22.830508 | 82 |
cc
|
null |
ceph-main/src/test/osd/TestOpStat.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
#include "common/ceph_mutex.h"
#include "common/Cond.h"
#include "include/rados/librados.hpp"
#ifndef TESTOPSTAT_H
#define TESTOPSTAT_H
class TestOp;
class TestOpStat {
public:
mutable ceph::mutex stat_lock = ceph::make_mutex("TestOpStat lock");
TestOpStat() = default;
static uint64_t gettime()
{
timeval t;
gettimeofday(&t,0);
return (1000000*t.tv_sec) + t.tv_usec;
}
class TypeStatus {
public:
std::map<TestOp*,uint64_t> inflight;
std::multiset<uint64_t> latencies;
void begin(TestOp *in)
{
ceph_assert(!inflight.count(in));
inflight[in] = gettime();
}
void end(TestOp *in)
{
ceph_assert(inflight.count(in));
uint64_t curtime = gettime();
latencies.insert(curtime - inflight[in]);
inflight.erase(in);
}
void export_latencies(std::map<double,uint64_t> &in) const;
};
std::map<std::string,TypeStatus> stats;
void begin(TestOp *in);
void end(TestOp *in);
friend std::ostream & operator<<(std::ostream &, const TestOpStat &);
};
std::ostream & operator<<(std::ostream &out, const TestOpStat &rhs);
#endif
| 1,199 | 21.222222 | 71 |
h
|
null |
ceph-main/src/test/osd/TestPGLog.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2013 Cloudwatt <[email protected]>
*
* Author: Loic Dachary <[email protected]>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU Library Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Library Public License for more details.
*
*/
#include <stdio.h>
#include <signal.h>
#include "gtest/gtest.h"
#include "osd/PGLog.h"
#include "osd/OSDMap.h"
#include "include/coredumpctl.h"
#include "../objectstore/store_test_fixture.h"
using namespace std;
struct PGLogTestBase {
static hobject_t mk_obj(unsigned id) {
hobject_t hoid;
stringstream ss;
ss << "obj_" << id;
hoid.oid = ss.str();
hoid.set_hash(id);
hoid.pool = 1;
return hoid;
}
static eversion_t mk_evt(unsigned ep, unsigned v) {
return eversion_t(ep, v);
}
static pg_log_entry_t mk_ple_mod(
const hobject_t &hoid, eversion_t v, eversion_t pv, osd_reqid_t reqid) {
pg_log_entry_t e;
e.mark_unrollbackable();
e.op = pg_log_entry_t::MODIFY;
e.soid = hoid;
e.version = v;
e.prior_version = pv;
e.reqid = reqid;
return e;
}
static pg_log_entry_t mk_ple_dt(
const hobject_t &hoid, eversion_t v, eversion_t pv, osd_reqid_t reqid) {
pg_log_entry_t e;
e.mark_unrollbackable();
e.op = pg_log_entry_t::DELETE;
e.soid = hoid;
e.version = v;
e.prior_version = pv;
e.reqid = reqid;
return e;
}
static pg_log_entry_t mk_ple_ldt(
const hobject_t &hoid, eversion_t v, eversion_t pv) {
pg_log_entry_t e;
e.mark_unrollbackable();
e.op = pg_log_entry_t::LOST_DELETE;
e.soid = hoid;
e.version = v;
e.prior_version = pv;
return e;
}
static pg_log_entry_t mk_ple_mod_rb(
const hobject_t &hoid, eversion_t v, eversion_t pv, osd_reqid_t reqid) {
pg_log_entry_t e;
e.op = pg_log_entry_t::MODIFY;
e.soid = hoid;
e.version = v;
e.prior_version = pv;
e.reqid = reqid;
return e;
}
static pg_log_entry_t mk_ple_dt_rb(
const hobject_t &hoid, eversion_t v, eversion_t pv, osd_reqid_t reqid) {
pg_log_entry_t e;
e.op = pg_log_entry_t::DELETE;
e.soid = hoid;
e.version = v;
e.prior_version = pv;
e.reqid = reqid;
return e;
}
static pg_log_entry_t mk_ple_err(
const hobject_t &hoid, eversion_t v, osd_reqid_t reqid) {
pg_log_entry_t e;
e.op = pg_log_entry_t::ERROR;
e.soid = hoid;
e.version = v;
e.prior_version = eversion_t(0, 0);
e.reqid = reqid;
return e;
}
static pg_log_entry_t mk_ple_mod(
const hobject_t &hoid, eversion_t v, eversion_t pv) {
return mk_ple_mod(hoid, v, pv, osd_reqid_t());
}
static pg_log_entry_t mk_ple_dt(
const hobject_t &hoid, eversion_t v, eversion_t pv) {
return mk_ple_dt(hoid, v, pv, osd_reqid_t());
}
static pg_log_entry_t mk_ple_mod_rb(
const hobject_t &hoid, eversion_t v, eversion_t pv) {
return mk_ple_mod_rb(hoid, v, pv, osd_reqid_t());
}
static pg_log_entry_t mk_ple_dt_rb(
const hobject_t &hoid, eversion_t v, eversion_t pv) {
return mk_ple_dt_rb(hoid, v, pv, osd_reqid_t());
}
static pg_log_entry_t mk_ple_err(
const hobject_t &hoid, eversion_t v) {
return mk_ple_err(hoid, v, osd_reqid_t());
}
}; // PGLogTestBase
class PGLogTest : virtual public ::testing::Test, protected PGLog, public PGLogTestBase {
public:
PGLogTest() : PGLog(g_ceph_context) {}
void SetUp() override {
missing.may_include_deletes = true;
}
#include "common/ceph_context.h"
#include "common/config.h"
void TearDown() override {
clear();
}
struct TestCase {
list<pg_log_entry_t> base;
list<pg_log_entry_t> auth;
list<pg_log_entry_t> div;
pg_missing_t init;
pg_missing_t final;
set<hobject_t> toremove;
list<pg_log_entry_t> torollback;
bool deletes_during_peering;
private:
IndexedLog fullauth;
IndexedLog fulldiv;
pg_info_t authinfo;
pg_info_t divinfo;
public:
TestCase() : deletes_during_peering(false) {}
void setup() {
init.may_include_deletes = !deletes_during_peering;
final.may_include_deletes = !deletes_during_peering;
fullauth.log.insert(fullauth.log.end(), base.begin(), base.end());
fullauth.log.insert(fullauth.log.end(), auth.begin(), auth.end());
fulldiv.log.insert(fulldiv.log.end(), base.begin(), base.end());
fulldiv.log.insert(fulldiv.log.end(), div.begin(), div.end());
fullauth.head = authinfo.last_update = fullauth.log.rbegin()->version;
authinfo.last_complete = fullauth.log.rbegin()->version;
authinfo.log_tail = fullauth.log.begin()->version;
authinfo.log_tail.version--;
fullauth.tail = authinfo.log_tail;
authinfo.last_backfill = hobject_t::get_max();
fulldiv.head = divinfo.last_update = fulldiv.log.rbegin()->version;
divinfo.last_complete = eversion_t();
divinfo.log_tail = fulldiv.log.begin()->version;
divinfo.log_tail.version--;
fulldiv.tail = divinfo.log_tail;
divinfo.last_backfill = hobject_t::get_max();
if (init.get_items().empty()) {
divinfo.last_complete = divinfo.last_update;
} else {
eversion_t fmissing = init.get_items().at(init.get_rmissing().begin()->second).need;
for (list<pg_log_entry_t>::const_iterator i = fulldiv.log.begin();
i != fulldiv.log.end();
++i) {
if (i->version < fmissing)
divinfo.last_complete = i->version;
else
break;
}
}
fullauth.index();
fulldiv.index();
}
void set_div_bounds(eversion_t head, eversion_t tail) {
fulldiv.tail = divinfo.log_tail = tail;
fulldiv.head = divinfo.last_update = head;
}
void set_auth_bounds(eversion_t head, eversion_t tail) {
fullauth.tail = authinfo.log_tail = tail;
fullauth.head = authinfo.last_update = head;
}
const IndexedLog &get_fullauth() const { return fullauth; }
const IndexedLog &get_fulldiv() const { return fulldiv; }
const pg_info_t &get_authinfo() const { return authinfo; }
const pg_info_t &get_divinfo() const { return divinfo; }
}; // struct TestCase
struct LogHandler : public PGLog::LogEntryHandler {
set<hobject_t> removed;
list<pg_log_entry_t> rolledback;
void rollback(
const pg_log_entry_t &entry) override {
rolledback.push_back(entry);
}
void rollforward(
const pg_log_entry_t &entry) override {}
void remove(
const hobject_t &hoid) override {
removed.insert(hoid);
}
void try_stash(const hobject_t &, version_t) override {
// lost/unfound cases are not tested yet
}
void trim(
const pg_log_entry_t &entry) override {}
};
template <typename missing_t>
void verify_missing(
const TestCase &tcase,
const missing_t &missing) {
ASSERT_EQ(tcase.final.get_items().size(), missing.get_items().size());
for (auto i = missing.get_items().begin();
i != missing.get_items().end();
++i) {
EXPECT_TRUE(tcase.final.get_items().count(i->first));
EXPECT_EQ(tcase.final.get_items().find(i->first)->second.need, i->second.need);
EXPECT_EQ(tcase.final.get_items().find(i->first)->second.have, i->second.have);
}
bool correct = missing.debug_verify_from_init(tcase.init, &(std::cout));
ASSERT_TRUE(correct);
}
void verify_sideeffects(
const TestCase &tcase,
const LogHandler &handler) {
ASSERT_EQ(tcase.toremove.size(), handler.removed.size());
ASSERT_EQ(tcase.torollback.size(), handler.rolledback.size());
{
list<pg_log_entry_t>::const_iterator titer = tcase.torollback.begin();
list<pg_log_entry_t>::const_iterator hiter = handler.rolledback.begin();
for (; titer != tcase.torollback.end(); ++titer, ++hiter) {
EXPECT_EQ(titer->version, hiter->version);
}
}
{
set<hobject_t>::const_iterator titer = tcase.toremove.begin();
set<hobject_t>::const_iterator hiter = handler.removed.begin();
for (; titer != tcase.toremove.end(); ++titer, ++hiter) {
EXPECT_EQ(*titer, *hiter);
}
}
}
void test_merge_log(const TestCase &tcase) {
clear();
log = tcase.get_fulldiv();
pg_info_t info = tcase.get_divinfo();
missing = tcase.init;
missing.flush();
IndexedLog olog;
olog = tcase.get_fullauth();
pg_info_t oinfo = tcase.get_authinfo();
LogHandler h;
bool dirty_info = false;
bool dirty_big_info = false;
merge_log(
oinfo, std::move(olog), pg_shard_t(1, shard_id_t(0)), info,
&h, dirty_info, dirty_big_info);
ASSERT_EQ(info.last_update, oinfo.last_update);
verify_missing(tcase, missing);
verify_sideeffects(tcase, h);
}
void test_proc_replica_log(const TestCase &tcase) {
clear();
log = tcase.get_fullauth();
pg_info_t info = tcase.get_authinfo();
pg_missing_t omissing = tcase.init;
IndexedLog olog;
olog = tcase.get_fulldiv();
pg_info_t oinfo = tcase.get_divinfo();
proc_replica_log(
oinfo, olog, omissing, pg_shard_t(1, shard_id_t(0)));
ceph_assert(oinfo.last_update >= log.tail);
if (!tcase.base.empty()) {
ASSERT_EQ(tcase.base.rbegin()->version, oinfo.last_update);
}
for (list<pg_log_entry_t>::const_iterator i = tcase.auth.begin();
i != tcase.auth.end();
++i) {
if (i->version > oinfo.last_update) {
if (i->is_delete() && tcase.deletes_during_peering) {
omissing.rm(i->soid, i->version);
} else {
omissing.add_next_event(*i);
}
}
}
verify_missing(tcase, omissing);
} // test_proc_replica_log
void run_test_case(const TestCase &tcase) {
test_merge_log(tcase);
test_proc_replica_log(tcase);
}
}; // class PGLogTest
struct TestHandler : public PGLog::LogEntryHandler {
list<hobject_t> &removed;
explicit TestHandler(list<hobject_t> &removed) : removed(removed) {}
void rollback(
const pg_log_entry_t &entry) override {}
void rollforward(
const pg_log_entry_t &entry) override {}
void remove(
const hobject_t &hoid) override {
removed.push_back(hoid);
}
void cant_rollback(const pg_log_entry_t &entry) {}
void try_stash(const hobject_t &, version_t) override {
// lost/unfound cases are not tested yet
}
void trim(
const pg_log_entry_t &entry) override {}
};
TEST_F(PGLogTest, rewind_divergent_log) {
/* +----------------+
| log |
+--------+-------+
| |object |
|version | hash |
| | |
tail > (1,1) | x5 |
| | |
| | |
| (1,4) | x9 < newhead
| MODIFY | |
| | |
head > (1,5) | x9 |
| DELETE | |
| | |
+--------+-------+
*/
{
clear();
pg_info_t info;
list<hobject_t> remove_snap;
bool dirty_info = false;
bool dirty_big_info = false;
hobject_t divergent_object;
eversion_t divergent_version;
eversion_t newhead;
hobject_t divergent;
divergent.set_hash(0x9);
{
pg_log_entry_t e;
e.mark_unrollbackable();
e.version = eversion_t(1, 1);
e.soid.set_hash(0x5);
log.tail = e.version;
log.log.push_back(e);
e.version = newhead = eversion_t(1, 4);
e.soid = divergent;
e.op = pg_log_entry_t::MODIFY;
log.log.push_back(e);
e.version = divergent_version = eversion_t(1, 5);
e.prior_version = eversion_t(1, 4);
e.soid = divergent;
divergent_object = e.soid;
e.op = pg_log_entry_t::DELETE;
log.log.push_back(e);
log.head = e.version;
log.index();
info.last_update = log.head;
info.last_complete = log.head;
}
EXPECT_FALSE(missing.have_missing());
EXPECT_EQ(3U, log.log.size());
EXPECT_TRUE(remove_snap.empty());
EXPECT_EQ(log.head, info.last_update);
EXPECT_EQ(log.head, info.last_complete);
EXPECT_FALSE(is_dirty());
EXPECT_FALSE(dirty_info);
EXPECT_FALSE(dirty_big_info);
TestHandler h(remove_snap);
rewind_divergent_log(newhead, info, &h,
dirty_info, dirty_big_info);
EXPECT_TRUE(log.objects.count(divergent));
EXPECT_TRUE(missing.is_missing(divergent_object));
EXPECT_EQ(1U, log.objects.count(divergent_object));
EXPECT_EQ(2U, log.log.size());
EXPECT_TRUE(remove_snap.empty());
EXPECT_EQ(newhead, info.last_update);
EXPECT_EQ(newhead, info.last_complete);
EXPECT_TRUE(is_dirty());
EXPECT_TRUE(dirty_info);
EXPECT_TRUE(dirty_big_info);
}
/* +----------------+
| log |
+--------+-------+
| |object |
|version | hash |
| | |
tail > (1,1) | NULL |
| | |
| (1,4) | NULL < newhead
| | |
head > (1,5) | x9 |
| | |
+--------+-------+
*/
{
clear();
pg_info_t info;
list<hobject_t> remove_snap;
bool dirty_info = false;
bool dirty_big_info = false;
hobject_t divergent_object;
eversion_t divergent_version;
eversion_t prior_version;
eversion_t newhead;
{
pg_log_entry_t e;
e.mark_unrollbackable();
info.log_tail = log.tail = eversion_t(1, 1);
newhead = eversion_t(1, 3);
e.version = divergent_version = eversion_t(1, 5);
e.soid.set_hash(0x9);
divergent_object = e.soid;
e.op = pg_log_entry_t::DELETE;
e.prior_version = prior_version = eversion_t(0, 2);
log.log.push_back(e);
log.head = e.version;
}
EXPECT_FALSE(missing.have_missing());
EXPECT_EQ(1U, log.log.size());
EXPECT_TRUE(remove_snap.empty());
EXPECT_FALSE(is_dirty());
EXPECT_FALSE(dirty_info);
EXPECT_FALSE(dirty_big_info);
TestHandler h(remove_snap);
rewind_divergent_log(newhead, info, &h,
dirty_info, dirty_big_info);
EXPECT_TRUE(missing.is_missing(divergent_object));
EXPECT_EQ(0U, log.objects.count(divergent_object));
EXPECT_TRUE(log.empty());
EXPECT_TRUE(remove_snap.empty());
EXPECT_TRUE(is_dirty());
EXPECT_TRUE(dirty_info);
EXPECT_TRUE(dirty_big_info);
}
// Test for 13965
{
clear();
list<hobject_t> remove_snap;
pg_info_t info;
info.log_tail = log.tail = eversion_t(1, 5);
info.last_update = eversion_t(1, 6);
bool dirty_info = false;
bool dirty_big_info = false;
{
pg_log_entry_t e;
e.mark_unrollbackable();
e.version = eversion_t(1, 5);
e.soid.set_hash(0x9);
add(e);
}
{
pg_log_entry_t e;
e.mark_unrollbackable();
e.version = eversion_t(1, 6);
e.soid.set_hash(0x10);
add(e);
}
TestHandler h(remove_snap);
roll_forward_to(eversion_t(1, 6), &h);
rewind_divergent_log(eversion_t(1, 5), info, &h,
dirty_info, dirty_big_info);
pg_log_t log;
reset_backfill_claim_log(log, &h);
}
}
TEST_F(PGLogTest, merge_old_entry) {
// entries > last_backfill are silently ignored
{
clear();
ObjectStore::Transaction t;
pg_log_entry_t oe;
oe.mark_unrollbackable();
pg_info_t info;
list<hobject_t> remove_snap;
info.last_backfill = hobject_t();
info.last_backfill.set_hash(100);
oe.soid.set_hash(2);
ASSERT_GT(oe.soid, info.last_backfill);
EXPECT_FALSE(is_dirty());
EXPECT_TRUE(remove_snap.empty());
EXPECT_TRUE(t.empty());
EXPECT_FALSE(missing.have_missing());
EXPECT_TRUE(log.empty());
TestHandler h(remove_snap);
merge_old_entry(t, oe, info, &h);
EXPECT_FALSE(is_dirty());
EXPECT_TRUE(remove_snap.empty());
EXPECT_TRUE(t.empty());
EXPECT_FALSE(missing.have_missing());
EXPECT_TRUE(log.empty());
}
// the new entry (from the logs) has a version that is higher than
// the old entry (from the log entry given in argument) : do
// nothing and return false
{
clear();
ObjectStore::Transaction t;
pg_info_t info;
list<hobject_t> remove_snap;
pg_log_entry_t ne;
ne.mark_unrollbackable();
ne.version = eversion_t(2,1);
log.add(ne);
EXPECT_FALSE(is_dirty());
EXPECT_TRUE(remove_snap.empty());
EXPECT_TRUE(t.empty());
EXPECT_FALSE(missing.have_missing());
EXPECT_EQ(1U, log.log.size());
EXPECT_EQ(ne.version, log.log.front().version);
// the newer entry ( from the logs ) can be DELETE
{
log.log.front().op = pg_log_entry_t::DELETE;
pg_log_entry_t oe;
oe.mark_unrollbackable();
oe.version = eversion_t(1,1);
TestHandler h(remove_snap);
merge_old_entry(t, oe, info, &h);
}
// if the newer entry is not DELETE, the object must be in missing
{
pg_log_entry_t &ne = log.log.front();
ne.op = pg_log_entry_t::MODIFY;
missing.add_next_event(ne);
pg_log_entry_t oe;
oe.mark_unrollbackable();
oe.version = eversion_t(1,1);
TestHandler h(remove_snap);
merge_old_entry(t, oe, info, &h);
missing.rm(ne.soid, ne.version);
}
missing.flush();
EXPECT_FALSE(is_dirty());
EXPECT_FALSE(remove_snap.empty());
EXPECT_TRUE(t.empty());
EXPECT_FALSE(missing.have_missing());
EXPECT_EQ(1U, log.log.size());
EXPECT_EQ(ne.version, log.log.front().version);
}
// the new entry (from the logs) has a version that is lower than
// the old entry (from the log entry given in argument) and
// old and new are delete : do nothing and return false
{
clear();
ObjectStore::Transaction t;
pg_log_entry_t oe;
oe.mark_unrollbackable();
pg_info_t info;
list<hobject_t> remove_snap;
pg_log_entry_t ne;
ne.mark_unrollbackable();
ne.version = eversion_t(1,1);
ne.op = pg_log_entry_t::DELETE;
log.add(ne);
oe.version = eversion_t(2,1);
oe.op = pg_log_entry_t::DELETE;
EXPECT_FALSE(is_dirty());
EXPECT_TRUE(remove_snap.empty());
EXPECT_TRUE(t.empty());
EXPECT_FALSE(missing.have_missing());
EXPECT_EQ(1U, log.log.size());
TestHandler h(remove_snap);
merge_old_entry(t, oe, info, &h);
EXPECT_FALSE(is_dirty());
EXPECT_TRUE(remove_snap.empty());
EXPECT_TRUE(t.empty());
EXPECT_FALSE(missing.have_missing());
EXPECT_EQ(1U, log.log.size());
}
// the new entry (from the logs) has a version that is lower than
// the old entry (from the log entry given in argument) and
// old is update and new is DELETE :
// if the object is in missing, it is removed
{
clear();
ObjectStore::Transaction t;
pg_log_entry_t oe;
oe.mark_unrollbackable();
pg_info_t info;
list<hobject_t> remove_snap;
pg_log_entry_t ne;
ne.mark_unrollbackable();
ne.version = eversion_t(1,1);
ne.op = pg_log_entry_t::DELETE;
log.add(ne);
oe.version = eversion_t(2,1);
oe.op = pg_log_entry_t::MODIFY;
missing.add_next_event(oe);
missing.flush();
EXPECT_FALSE(is_dirty());
EXPECT_TRUE(remove_snap.empty());
EXPECT_TRUE(t.empty());
EXPECT_TRUE(missing.is_missing(oe.soid));
EXPECT_EQ(1U, log.log.size());
TestHandler h(remove_snap);
merge_old_entry(t, oe, info, &h);
missing.flush();
EXPECT_FALSE(is_dirty());
EXPECT_TRUE(remove_snap.size() > 0);
EXPECT_TRUE(t.empty());
EXPECT_FALSE(missing.have_missing());
EXPECT_EQ(1U, log.log.size());
}
// there is no new entry (from the logs) and
// the old entry (from the log entry given in argument) is not a CLONE and
// the old entry prior_version is greater than the tail of the log :
// do nothing and return false
{
clear();
ObjectStore::Transaction t;
pg_log_entry_t oe;
oe.mark_unrollbackable();
pg_info_t info;
list<hobject_t> remove_snap;
info.log_tail = eversion_t(1,1);
oe.op = pg_log_entry_t::MODIFY;
oe.prior_version = eversion_t(2,1);
missing_add(oe.soid, oe.prior_version, eversion_t());
missing.flush();
EXPECT_FALSE(is_dirty());
EXPECT_TRUE(remove_snap.empty());
EXPECT_TRUE(t.empty());
EXPECT_TRUE(log.empty());
TestHandler h(remove_snap);
merge_old_entry(t, oe, info, &h);
missing.flush();
EXPECT_FALSE(is_dirty());
EXPECT_TRUE(remove_snap.empty());
EXPECT_TRUE(t.empty());
EXPECT_TRUE(log.empty());
}
// there is no new entry (from the logs) and
// the old entry (from the log entry given in argument) is not a CLONE and
// the old entry (from the log entry given in argument) is not a DELETE and
// the old entry prior_version is lower than the tail of the log :
// add the old object to the remove_snap list and
// add the old object to divergent priors and
// add or update the prior_version of the object to missing and
// return false
{
clear();
ObjectStore::Transaction t;
pg_log_entry_t oe;
oe.mark_unrollbackable();
pg_info_t info;
list<hobject_t> remove_snap;
info.log_tail = eversion_t(2,1);
oe.soid.set_hash(1);
oe.op = pg_log_entry_t::MODIFY;
oe.prior_version = eversion_t(1,1);
EXPECT_FALSE(is_dirty());
EXPECT_TRUE(remove_snap.empty());
EXPECT_TRUE(t.empty());
EXPECT_FALSE(missing.have_missing());
EXPECT_TRUE(log.empty());
TestHandler h(remove_snap);
merge_old_entry(t, oe, info, &h);
EXPECT_TRUE(is_dirty());
EXPECT_EQ(oe.soid, remove_snap.front());
EXPECT_TRUE(t.empty());
EXPECT_TRUE(missing.is_missing(oe.soid));
EXPECT_TRUE(log.empty());
}
// there is no new entry (from the logs) and
// the old entry (from the log entry given in argument) is not a CLONE and
// the old entry (from the log entry given in argument) is a DELETE and
// the old entry prior_version is lower than the tail of the log :
// add the old object to divergent priors and
// add or update the prior_version of the object to missing and
// return false
{
clear();
ObjectStore::Transaction t;
pg_log_entry_t oe;
oe.mark_unrollbackable();
pg_info_t info;
list<hobject_t> remove_snap;
info.log_tail = eversion_t(2,1);
oe.soid.set_hash(1);
oe.op = pg_log_entry_t::DELETE;
oe.prior_version = eversion_t(1,1);
EXPECT_FALSE(is_dirty());
EXPECT_TRUE(remove_snap.empty());
EXPECT_TRUE(t.empty());
EXPECT_FALSE(missing.have_missing());
EXPECT_TRUE(log.empty());
TestHandler h(remove_snap);
merge_old_entry(t, oe, info, &h);
EXPECT_TRUE(is_dirty());
EXPECT_TRUE(remove_snap.empty());
EXPECT_TRUE(t.empty());
EXPECT_TRUE(missing.is_missing(oe.soid));
EXPECT_TRUE(log.empty());
}
// there is no new entry (from the logs) and
// the old entry (from the log entry given in argument) is not a CLONE and
// the old entry (from the log entry given in argument) is not a DELETE and
// the old entry prior_version is eversion_t() :
// add the old object to the remove_snap list and
// remove the prior_version of the object from missing, if any and
// return false
{
clear();
ObjectStore::Transaction t;
pg_log_entry_t oe;
oe.mark_unrollbackable();
pg_info_t info;
list<hobject_t> remove_snap;
info.log_tail = eversion_t(10,1);
oe.soid.set_hash(1);
oe.op = pg_log_entry_t::MODIFY;
oe.prior_version = eversion_t();
missing.add(oe.soid, eversion_t(1,1), eversion_t(), false);
missing.flush();
EXPECT_FALSE(is_dirty());
EXPECT_TRUE(remove_snap.empty());
EXPECT_TRUE(t.empty());
EXPECT_TRUE(missing.is_missing(oe.soid));
EXPECT_TRUE(log.empty());
TestHandler h(remove_snap);
merge_old_entry(t, oe, info, &h);
missing.flush();
EXPECT_FALSE(is_dirty());
EXPECT_EQ(oe.soid, remove_snap.front());
EXPECT_TRUE(t.empty());
EXPECT_FALSE(missing.have_missing());
EXPECT_TRUE(log.empty());
}
}
TEST_F(PGLogTest, merge_log) {
// head and tail match, last_backfill is set:
// noop
{
clear();
pg_log_t olog;
pg_info_t oinfo;
pg_shard_t fromosd;
pg_info_t info;
list<hobject_t> remove_snap;
bool dirty_info = false;
bool dirty_big_info = false;
hobject_t last_backfill(object_t("oname"), string("key"), 1, 234, 1, "");
info.last_backfill = last_backfill;
eversion_t stat_version(10, 1);
info.stats.version = stat_version;
log.tail = olog.tail = eversion_t(1, 1);
log.head = olog.head = eversion_t(2, 1);
EXPECT_FALSE(missing.have_missing());
EXPECT_EQ(0U, log.log.size());
EXPECT_EQ(stat_version, info.stats.version);
EXPECT_TRUE(remove_snap.empty());
EXPECT_EQ(last_backfill, info.last_backfill);
EXPECT_TRUE(info.purged_snaps.empty());
EXPECT_FALSE(is_dirty());
EXPECT_FALSE(dirty_info);
EXPECT_FALSE(dirty_big_info);
TestHandler h(remove_snap);
merge_log(oinfo, std::move(olog), fromosd, info, &h,
dirty_info, dirty_big_info);
EXPECT_FALSE(missing.have_missing());
EXPECT_EQ(0U, log.log.size());
EXPECT_EQ(stat_version, info.stats.version);
EXPECT_TRUE(remove_snap.empty());
EXPECT_TRUE(info.purged_snaps.empty());
EXPECT_FALSE(is_dirty());
EXPECT_FALSE(dirty_info);
EXPECT_FALSE(dirty_big_info);
}
// head and tail match, last_backfill is not set: info.stats is
// copied from oinfo.stats but info.stats.reported_* is guaranteed to
// never be replaced by a lower version
{
clear();
pg_log_t olog;
pg_info_t oinfo;
pg_shard_t fromosd;
pg_info_t info;
list<hobject_t> remove_snap;
bool dirty_info = false;
bool dirty_big_info = false;
eversion_t stat_version(10, 1);
oinfo.stats.version = stat_version;
info.stats.reported_seq = 1;
info.stats.reported_epoch = 10;
oinfo.stats.reported_seq = 1;
oinfo.stats.reported_epoch = 1;
log.tail = olog.tail = eversion_t(1, 1);
log.head = olog.head = eversion_t(2, 1);
missing.may_include_deletes = false;
EXPECT_FALSE(missing.have_missing());
EXPECT_EQ(0U, log.log.size());
EXPECT_EQ(eversion_t(), info.stats.version);
EXPECT_EQ(1ull, info.stats.reported_seq);
EXPECT_EQ(10u, info.stats.reported_epoch);
EXPECT_TRUE(remove_snap.empty());
EXPECT_TRUE(info.last_backfill.is_max());
EXPECT_TRUE(info.purged_snaps.empty());
EXPECT_FALSE(is_dirty());
EXPECT_FALSE(dirty_info);
EXPECT_FALSE(dirty_big_info);
TestHandler h(remove_snap);
merge_log(oinfo, std::move(olog), fromosd, info, &h,
dirty_info, dirty_big_info);
EXPECT_FALSE(missing.have_missing());
EXPECT_EQ(0U, log.log.size());
EXPECT_EQ(stat_version, info.stats.version);
EXPECT_EQ(1ull, info.stats.reported_seq);
EXPECT_EQ(10u, info.stats.reported_epoch);
EXPECT_TRUE(remove_snap.empty());
EXPECT_TRUE(info.purged_snaps.empty());
EXPECT_FALSE(is_dirty());
EXPECT_FALSE(dirty_info);
EXPECT_FALSE(dirty_big_info);
}
/* Before
+--------------------------+
| log olog |
+--------+-------+---------+
| |object | |
|version | hash | version |
| | | |
| | x5 | (1,1) < tail
| | | |
| | | |
tail > (1,4) | x7 | |
| | | |
| | | |
head > (1,5) | x9 | (1,5) < head
| | | |
| | | |
+--------+-------+---------+
After
+-----------------
| log |
+--------+-------+
| |object |
|version | hash |
| | |
tail > (1,1) | x5 |
| | |
| | |
| (1,4) | x7 |
| | |
| | |
head > (1,5) | x9 |
| | |
| | |
+--------+-------+
*/
{
clear();
pg_log_t olog;
pg_info_t oinfo;
pg_shard_t fromosd;
pg_info_t info;
list<hobject_t> remove_snap;
bool dirty_info = false;
bool dirty_big_info = false;
missing.may_include_deletes = false;
{
pg_log_entry_t e;
e.mark_unrollbackable();
e.version = eversion_t(1, 4);
e.soid.set_hash(0x5);
log.tail = e.version;
log.log.push_back(e);
e.version = eversion_t(1, 5);
e.soid.set_hash(0x9);
log.log.push_back(e);
log.head = e.version;
log.index();
info.last_update = log.head;
e.version = eversion_t(1, 1);
e.soid.set_hash(0x5);
olog.tail = e.version;
olog.log.push_back(e);
e.version = eversion_t(1, 5);
e.soid.set_hash(0x9);
olog.log.push_back(e);
olog.head = e.version;
}
hobject_t last_backfill(object_t("oname"), string("key"), 1, 234, 1, "");
info.last_backfill = last_backfill;
eversion_t stat_version(10, 1);
info.stats.version = stat_version;
EXPECT_FALSE(missing.have_missing());
EXPECT_EQ(2U, log.log.size());
EXPECT_EQ(stat_version, info.stats.version);
EXPECT_TRUE(remove_snap.empty());
EXPECT_EQ(last_backfill, info.last_backfill);
EXPECT_TRUE(info.purged_snaps.empty());
EXPECT_FALSE(is_dirty());
EXPECT_FALSE(dirty_info);
EXPECT_FALSE(dirty_big_info);
TestHandler h(remove_snap);
merge_log(oinfo, std::move(olog), fromosd, info, &h,
dirty_info, dirty_big_info);
EXPECT_FALSE(missing.have_missing());
EXPECT_EQ(3U, log.log.size());
EXPECT_EQ(stat_version, info.stats.version);
EXPECT_TRUE(remove_snap.empty());
EXPECT_TRUE(info.purged_snaps.empty());
EXPECT_TRUE(is_dirty());
EXPECT_TRUE(dirty_info);
EXPECT_TRUE(dirty_big_info);
}
/* +--------------------------+
| log olog |
+--------+-------+---------+
| |object | |
|version | hash | version |
| | | |
tail > (1,1) | x5 | (1,1) < tail
| | | |
| | | |
| (1,2) | x3 | (1,2) < lower_bound
| | | |
| | | |
head > (1,3) | x9 | |
| DELETE | | |
| | | |
| | x9 | (2,3) |
| | | MODIFY |
| | | |
| | x7 | (2,4) < head
| | | DELETE |
+--------+-------+---------+
The log entry (1,3) deletes the object x9 but the olog entry (2,3) modifies
it and is authoritative : the log entry (1,3) is divergent.
*/
{
clear();
pg_log_t olog;
pg_info_t oinfo;
pg_shard_t fromosd;
pg_info_t info;
list<hobject_t> remove_snap;
bool dirty_info = false;
bool dirty_big_info = false;
hobject_t divergent_object;
missing.may_include_deletes = true;
{
pg_log_entry_t e;
e.mark_unrollbackable();
e.version = eversion_t(1, 1);
e.soid.set_hash(0x5);
log.tail = e.version;
log.log.push_back(e);
e.version = eversion_t(1, 2);
e.soid.set_hash(0x3);
log.log.push_back(e);
e.version = eversion_t(1,3);
e.soid.set_hash(0x9);
divergent_object = e.soid;
e.op = pg_log_entry_t::DELETE;
log.log.push_back(e);
log.head = e.version;
log.index();
info.last_update = log.head;
e.version = eversion_t(1, 1);
e.soid.set_hash(0x5);
olog.tail = e.version;
olog.log.push_back(e);
e.version = eversion_t(1, 2);
e.soid.set_hash(0x3);
olog.log.push_back(e);
e.version = eversion_t(2, 3);
e.soid.set_hash(0x9);
e.op = pg_log_entry_t::MODIFY;
olog.log.push_back(e);
e.version = eversion_t(2, 4);
e.soid.set_hash(0x7);
e.op = pg_log_entry_t::DELETE;
olog.log.push_back(e);
olog.head = e.version;
}
snapid_t purged_snap(1);
{
oinfo.last_update = olog.head;
oinfo.purged_snaps.insert(purged_snap);
}
EXPECT_FALSE(missing.have_missing());
EXPECT_EQ(1U, log.objects.count(divergent_object));
EXPECT_EQ(3U, log.log.size());
EXPECT_TRUE(remove_snap.empty());
EXPECT_EQ(log.head, info.last_update);
EXPECT_TRUE(info.purged_snaps.empty());
EXPECT_FALSE(is_dirty());
EXPECT_FALSE(dirty_info);
EXPECT_FALSE(dirty_big_info);
TestHandler h(remove_snap);
merge_log(oinfo, std::move(olog), fromosd, info, &h,
dirty_info, dirty_big_info);
/* When the divergent entry is a DELETE and the authoritative
entry is a MODIFY, the object will be added to missing : it is
a verifiable side effect proving the entry was identified
to be divergent.
*/
EXPECT_TRUE(missing.is_missing(divergent_object));
EXPECT_EQ(1U, log.objects.count(divergent_object));
EXPECT_EQ(4U, log.log.size());
/* DELETE entries from olog that are appended to the hed of the
log, and the divergent version of the object is removed (added
to remove_snap)
*/
EXPECT_EQ(0x9U, remove_snap.front().get_hash());
EXPECT_EQ(log.head, info.last_update);
EXPECT_TRUE(info.purged_snaps.contains(purged_snap));
EXPECT_TRUE(is_dirty());
EXPECT_TRUE(dirty_info);
EXPECT_TRUE(dirty_big_info);
}
/* +--------------------------+
| log olog |
+--------+-------+---------+
| |object | |
|version | hash | version |
| | | |
tail > (1,1) | x5 | (1,1) < tail
| | | |
| | | |
| (1,2) | x3 | (1,2) < lower_bound
| | | |
| | | |
head > (1,3) | x9 | |
| DELETE | | |
| | | |
| | x9 | (2,3) |
| | | MODIFY |
| | | |
| | x7 | (2,4) < head
| | | DELETE |
+--------+-------+---------+
The log entry (1,3) deletes the object x9 but the olog entry (2,3) modifies
it and is authoritative : the log entry (1,3) is divergent.
*/
{
clear();
pg_log_t olog;
pg_info_t oinfo;
pg_shard_t fromosd;
pg_info_t info;
list<hobject_t> remove_snap;
bool dirty_info = false;
bool dirty_big_info = false;
hobject_t divergent_object;
{
pg_log_entry_t e;
e.mark_unrollbackable();
e.version = eversion_t(1, 1);
e.soid.set_hash(0x5);
log.tail = e.version;
log.log.push_back(e);
e.version = eversion_t(1, 2);
e.soid.set_hash(0x3);
log.log.push_back(e);
e.version = eversion_t(1,3);
e.soid.set_hash(0x9);
divergent_object = e.soid;
e.op = pg_log_entry_t::DELETE;
log.log.push_back(e);
log.head = e.version;
log.index();
info.last_update = log.head;
e.version = eversion_t(1, 1);
e.soid.set_hash(0x5);
olog.tail = e.version;
olog.log.push_back(e);
e.version = eversion_t(1, 2);
e.soid.set_hash(0x3);
olog.log.push_back(e);
e.version = eversion_t(2, 3);
e.soid.set_hash(0x9);
e.op = pg_log_entry_t::MODIFY;
olog.log.push_back(e);
e.version = eversion_t(2, 4);
e.soid.set_hash(0x7);
e.op = pg_log_entry_t::DELETE;
olog.log.push_back(e);
olog.head = e.version;
}
snapid_t purged_snap(1);
{
oinfo.last_update = olog.head;
oinfo.purged_snaps.insert(purged_snap);
}
EXPECT_FALSE(missing.have_missing());
EXPECT_EQ(1U, log.objects.count(divergent_object));
EXPECT_EQ(3U, log.log.size());
EXPECT_TRUE(remove_snap.empty());
EXPECT_EQ(log.head, info.last_update);
EXPECT_TRUE(info.purged_snaps.empty());
EXPECT_FALSE(is_dirty());
EXPECT_FALSE(dirty_info);
EXPECT_FALSE(dirty_big_info);
TestHandler h(remove_snap);
missing.may_include_deletes = false;
merge_log(oinfo, std::move(olog), fromosd, info, &h,
dirty_info, dirty_big_info);
/* When the divergent entry is a DELETE and the authoritative
entry is a MODIFY, the object will be added to missing : it is
a verifiable side effect proving the entry was identified
to be divergent.
*/
EXPECT_TRUE(missing.is_missing(divergent_object));
EXPECT_EQ(1U, log.objects.count(divergent_object));
EXPECT_EQ(4U, log.log.size());
/* DELETE entries from olog that are appended to the hed of the
log, and the divergent version of the object is removed (added
to remove_snap). When peering handles deletes, it is the earlier
version that is in the removed list.
*/
EXPECT_EQ(0x7U, remove_snap.front().get_hash());
EXPECT_EQ(log.head, info.last_update);
EXPECT_TRUE(info.purged_snaps.contains(purged_snap));
EXPECT_TRUE(is_dirty());
EXPECT_TRUE(dirty_info);
EXPECT_TRUE(dirty_big_info);
}
/* +--------------------------+
| log olog |
+--------+-------+---------+
| |object | |
|version | hash | version |
| | | |
tail > (1,1) | x5 | (1,1) < tail
| | | |
| | | |
| (1,4) | x7 | (1,4) < head
| | | |
| | | |
head > (1,5) | x9 | |
| | | |
| | | |
+--------+-------+---------+
The head of the log entry (1,5) is divergent because it is greater than the
head of olog.
*/
{
clear();
pg_log_t olog;
pg_info_t oinfo;
pg_shard_t fromosd;
pg_info_t info;
list<hobject_t> remove_snap;
bool dirty_info = false;
bool dirty_big_info = false;
{
pg_log_entry_t e;
e.mark_unrollbackable();
e.version = eversion_t(1, 1);
e.soid.set_hash(0x5);
log.tail = e.version;
log.log.push_back(e);
e.version = eversion_t(1, 4);
e.soid.set_hash(0x7);
log.log.push_back(e);
e.version = eversion_t(1, 5);
e.soid.set_hash(0x9);
log.log.push_back(e);
log.head = e.version;
log.index();
info.last_update = log.head;
e.version = eversion_t(1, 1);
e.soid.set_hash(0x5);
olog.tail = e.version;
olog.log.push_back(e);
e.version = eversion_t(1, 4);
e.soid.set_hash(0x7);
olog.log.push_back(e);
olog.head = e.version;
}
hobject_t last_backfill(object_t("oname"), string("key"), 1, 234, 1, "");
info.last_backfill = last_backfill;
eversion_t stat_version(10, 1);
info.stats.version = stat_version;
EXPECT_FALSE(missing.have_missing());
EXPECT_EQ(3U, log.log.size());
EXPECT_EQ(stat_version, info.stats.version);
EXPECT_TRUE(remove_snap.empty());
EXPECT_EQ(last_backfill, info.last_backfill);
EXPECT_TRUE(info.purged_snaps.empty());
EXPECT_FALSE(is_dirty());
EXPECT_FALSE(dirty_info);
EXPECT_FALSE(dirty_big_info);
TestHandler h(remove_snap);
missing.may_include_deletes = false;
merge_log(oinfo, std::move(olog), fromosd, info, &h,
dirty_info, dirty_big_info);
EXPECT_FALSE(missing.have_missing());
EXPECT_EQ(2U, log.log.size());
EXPECT_EQ(stat_version, info.stats.version);
EXPECT_EQ(0x9U, remove_snap.front().get_hash());
EXPECT_TRUE(info.purged_snaps.empty());
EXPECT_TRUE(is_dirty());
EXPECT_TRUE(dirty_info);
EXPECT_TRUE(dirty_big_info);
}
}
TEST_F(PGLogTest, proc_replica_log) {
// empty log : no side effect
{
clear();
pg_log_t olog;
pg_info_t oinfo;
pg_missing_t omissing;
pg_shard_t from;
eversion_t last_update(1, 1);
log.head = olog.head = oinfo.last_update = last_update;
eversion_t last_complete(1, 1);
oinfo.last_complete = last_complete;
EXPECT_FALSE(omissing.have_missing());
EXPECT_EQ(last_update, oinfo.last_update);
EXPECT_EQ(last_complete, oinfo.last_complete);
missing.may_include_deletes = false;
proc_replica_log(oinfo, olog, omissing, from);
EXPECT_FALSE(omissing.have_missing());
EXPECT_EQ(last_update, oinfo.last_update);
EXPECT_EQ(last_update, oinfo.last_complete);
}
/* +--------------------------+
| log olog |
+--------+-------+---------+
| |object | |
|version | hash | version |
| | | |
| | x3 | (1,1) < tail
| | | |
| | | |
tail > (1,2) | x5 | |
| | | |
| | | |
head > (1,3) | x9 | |
| DELETE | | |
| | | |
| | x9 | (2,3) < head
| | | DELETE |
| | | |
+--------+-------+---------+
The log entry (1,3) deletes the object x9 and the olog entry
(2,3) also deletes it : do nothing. The olog tail is ignored
because it is before the log tail.
*/
{
clear();
pg_log_t olog;
pg_info_t oinfo;
pg_missing_t omissing;
pg_shard_t from;
{
pg_log_entry_t e;
e.mark_unrollbackable();
e.version = eversion_t(1, 2);
e.soid.set_hash(0x5);
log.tail = e.version;
log.log.push_back(e);
e.version = eversion_t(1, 3);
e.soid.set_hash(0x9);
e.op = pg_log_entry_t::DELETE;
log.log.push_back(e);
log.head = e.version;
log.index();
e.version = eversion_t(1, 1);
e.soid.set_hash(0x3);
olog.tail = e.version;
olog.log.push_back(e);
e.version = eversion_t(2, 3);
e.soid.set_hash(0x9);
e.op = pg_log_entry_t::DELETE;
olog.log.push_back(e);
olog.head = e.version;
oinfo.last_update = olog.head;
oinfo.last_complete = olog.head;
}
EXPECT_FALSE(omissing.have_missing());
EXPECT_EQ(olog.head, oinfo.last_update);
EXPECT_EQ(olog.head, oinfo.last_complete);
missing.may_include_deletes = false;
proc_replica_log(oinfo, olog, omissing, from);
EXPECT_FALSE(omissing.have_missing());
}
{
clear();
pg_log_t olog;
pg_info_t oinfo;
pg_missing_t omissing;
pg_shard_t from;
hobject_t divergent_object;
{
pg_log_entry_t e;
e.mark_unrollbackable();
{
e.soid = divergent_object;
e.soid.set_hash(0x1);
e.version = eversion_t(1, 1);
log.tail = e.version;
log.log.push_back(e);
e.soid = divergent_object;
e.prior_version = eversion_t(1, 1);
e.version = eversion_t(1, 2);
log.tail = e.version;
log.log.push_back(e);
e.soid.set_hash(0x3);
e.version = eversion_t(1, 4);
log.log.push_back(e);
e.soid.set_hash(0x7);
e.version = eversion_t(1, 5);
log.log.push_back(e);
e.soid.set_hash(0x8);
e.version = eversion_t(1, 6);
log.log.push_back(e);
e.soid.set_hash(0x9);
e.op = pg_log_entry_t::DELETE;
e.version = eversion_t(2, 7);
log.log.push_back(e);
e.soid.set_hash(0xa);
e.version = eversion_t(2, 8);
log.head = e.version;
log.log.push_back(e);
}
log.index();
{
e.soid = divergent_object;
e.soid.set_hash(0x1);
e.version = eversion_t(1, 1);
olog.tail = e.version;
olog.log.push_back(e);
e.soid = divergent_object;
e.prior_version = eversion_t(1, 1);
e.version = eversion_t(1, 2);
olog.log.push_back(e);
e.prior_version = eversion_t(0, 0);
e.soid.set_hash(0x3);
e.version = eversion_t(1, 4);
olog.log.push_back(e);
e.soid.set_hash(0x7);
e.version = eversion_t(1, 5);
olog.log.push_back(e);
e.soid.set_hash(0x8);
e.version = eversion_t(1, 6);
olog.log.push_back(e);
e.soid.set_hash(0x9); // should not be added to missing, create
e.op = pg_log_entry_t::MODIFY;
e.version = eversion_t(1, 7);
olog.log.push_back(e);
e.soid = divergent_object; // should be added to missing at 1,2
e.op = pg_log_entry_t::MODIFY;
e.version = eversion_t(1, 8);
e.prior_version = eversion_t(1, 2);
olog.log.push_back(e);
olog.head = e.version;
}
oinfo.last_update = olog.head;
oinfo.last_complete = olog.head;
}
EXPECT_FALSE(omissing.have_missing());
EXPECT_EQ(olog.head, oinfo.last_update);
EXPECT_EQ(olog.head, oinfo.last_complete);
missing.may_include_deletes = false;
proc_replica_log(oinfo, olog, omissing, from);
EXPECT_TRUE(omissing.have_missing());
EXPECT_TRUE(omissing.is_missing(divergent_object));
EXPECT_EQ(eversion_t(1, 2), omissing.get_items().at(divergent_object).need);
EXPECT_EQ(eversion_t(1, 6), oinfo.last_update);
EXPECT_EQ(eversion_t(1, 1), oinfo.last_complete);
}
/* +--------------------------+
| olog log |
+--------+-------+---------+
| |object | |
|version | hash | version |
| | | |
tail > (1,1) | x9 | (1,1) < tail
| | | |
| | | |
| (1,2) | x3 | (1,2) |
| | | |
| | | |
head > (1,3) | x9 | |
| DELETE | | |
| | | |
| | x9 | (2,3) < head
| | | DELETE |
| | | |
+--------+-------+---------+
The log entry (1,3) deletes the object x9 and the olog entry
(2,3) also deletes it : do nothing.
*/
{
clear();
pg_log_t olog;
pg_info_t oinfo;
pg_missing_t omissing;
pg_shard_t from;
eversion_t last_update(1, 2);
hobject_t divergent_object;
divergent_object.set_hash(0x9);
{
pg_log_entry_t e;
e.mark_unrollbackable();
e.version = eversion_t(1, 1);
e.soid = divergent_object;
log.tail = e.version;
log.log.push_back(e);
e.version = last_update;
e.soid.set_hash(0x3);
log.log.push_back(e);
e.version = eversion_t(2, 3);
e.prior_version = eversion_t(1, 1);
e.soid = divergent_object;
e.op = pg_log_entry_t::DELETE;
log.log.push_back(e);
log.head = e.version;
log.index();
e.version = eversion_t(1, 1);
e.soid = divergent_object;
olog.tail = e.version;
olog.log.push_back(e);
e.version = last_update;
e.soid.set_hash(0x3);
olog.log.push_back(e);
e.version = eversion_t(1, 3);
e.prior_version = eversion_t(1, 1);
e.soid = divergent_object;
e.op = pg_log_entry_t::DELETE;
olog.log.push_back(e);
olog.head = e.version;
oinfo.last_update = olog.head;
oinfo.last_complete = olog.head;
}
EXPECT_FALSE(omissing.have_missing());
EXPECT_EQ(olog.head, oinfo.last_update);
EXPECT_EQ(olog.head, oinfo.last_complete);
missing.may_include_deletes = false;
proc_replica_log(oinfo, olog, omissing, from);
EXPECT_TRUE(omissing.have_missing());
EXPECT_TRUE(omissing.is_missing(divergent_object));
EXPECT_EQ(omissing.get_items().at(divergent_object).have, eversion_t(0, 0));
EXPECT_EQ(omissing.get_items().at(divergent_object).need, eversion_t(1, 1));
EXPECT_EQ(last_update, oinfo.last_update);
}
/* +--------------------------+
| olog log |
+--------+-------+---------+
| |object | |
|version | hash | version |
| | | |
tail > (1,1) | x9 | (1,1) < tail
| | | |
| | | |
| (1,2) | x3 | (1,2) |
| | | |
| | | |
head > (1,3) | x9 | |
| MODIFY | | |
| | | |
| | x9 | (2,3) < head
| | | DELETE |
| | | |
+--------+-------+---------+
The log entry (2,3) deletes the object x9 but the olog entry
(1,3) modifies it : remove it from omissing.
*/
{
clear();
pg_log_t olog;
pg_info_t oinfo;
pg_missing_t omissing;
pg_shard_t from;
eversion_t last_update(1, 2);
hobject_t divergent_object;
{
pg_log_entry_t e;
e.mark_unrollbackable();
e.version = eversion_t(1, 1);
e.soid = divergent_object;
log.tail = e.version;
log.log.push_back(e);
e.version = last_update;
e.soid.set_hash(0x3);
log.log.push_back(e);
e.version = eversion_t(2, 3);
e.prior_version = eversion_t(1, 1);
e.soid = divergent_object;
e.op = pg_log_entry_t::DELETE;
log.log.push_back(e);
log.head = e.version;
log.index();
e.version = eversion_t(1, 1);
e.soid = divergent_object;
olog.tail = e.version;
olog.log.push_back(e);
e.version = last_update;
e.soid.set_hash(0x3);
olog.log.push_back(e);
e.version = eversion_t(1, 3);
e.prior_version = eversion_t(1, 1);
e.soid = divergent_object;
divergent_object = e.soid;
omissing.add(divergent_object, e.version, eversion_t(), false);
e.op = pg_log_entry_t::MODIFY;
olog.log.push_back(e);
olog.head = e.version;
oinfo.last_update = olog.head;
oinfo.last_complete = olog.head;
}
EXPECT_TRUE(omissing.have_missing());
EXPECT_TRUE(omissing.is_missing(divergent_object));
EXPECT_EQ(eversion_t(1, 3), omissing.get_items().at(divergent_object).need);
EXPECT_EQ(olog.head, oinfo.last_update);
EXPECT_EQ(olog.head, oinfo.last_complete);
missing.may_include_deletes = false;
proc_replica_log(oinfo, olog, omissing, from);
EXPECT_TRUE(omissing.have_missing());
EXPECT_TRUE(omissing.is_missing(divergent_object));
EXPECT_EQ(omissing.get_items().at(divergent_object).have, eversion_t(0, 0));
EXPECT_EQ(omissing.get_items().at(divergent_object).need, eversion_t(1, 1));
EXPECT_EQ(last_update, oinfo.last_update);
}
/* +--------------------------+
| log olog |
+--------+-------+---------+
| |object | |
|version | hash | version |
| | | |
tail > (1,1) | x9 | (1,1) < tail
| | | |
| | | |
| (1,2) | x3 | (1,2) |
| | | |
| | | |
| | x9 | (1,3) < head
| | | MODIFY |
| | | |
head > (2,3) | x9 | |
| DELETE | | |
| | | |
+--------+-------+---------+
The log entry (2,3) deletes the object x9 but the olog entry
(1,3) modifies it : proc_replica_log should adjust missing to
1,1 for that object until add_next_event in PG::activate processes
the delete.
*/
{
clear();
pg_log_t olog;
pg_info_t oinfo;
pg_missing_t omissing;
pg_shard_t from;
eversion_t last_update(1, 2);
hobject_t divergent_object;
eversion_t new_version(2, 3);
eversion_t divergent_version(1, 3);
{
pg_log_entry_t e;
e.mark_unrollbackable();
e.version = eversion_t(1, 1);
e.soid.set_hash(0x9);
log.tail = e.version;
log.log.push_back(e);
e.version = last_update;
e.soid.set_hash(0x3);
log.log.push_back(e);
e.version = new_version;
e.prior_version = eversion_t(1, 1);
e.soid.set_hash(0x9);
e.op = pg_log_entry_t::DELETE;
log.log.push_back(e);
log.head = e.version;
log.index();
e.op = pg_log_entry_t::MODIFY;
e.version = eversion_t(1, 1);
e.soid.set_hash(0x9);
olog.tail = e.version;
olog.log.push_back(e);
e.version = last_update;
e.soid.set_hash(0x3);
olog.log.push_back(e);
e.version = divergent_version;
e.prior_version = eversion_t(1, 1);
e.soid.set_hash(0x9);
divergent_object = e.soid;
omissing.add(divergent_object, e.version, eversion_t(), false);
e.op = pg_log_entry_t::MODIFY;
olog.log.push_back(e);
olog.head = e.version;
oinfo.last_update = olog.head;
oinfo.last_complete = olog.head;
}
EXPECT_TRUE(omissing.have_missing());
EXPECT_TRUE(omissing.is_missing(divergent_object));
EXPECT_EQ(divergent_version, omissing.get_items().at(divergent_object).need);
EXPECT_EQ(olog.head, oinfo.last_update);
EXPECT_EQ(olog.head, oinfo.last_complete);
missing.may_include_deletes = false;
proc_replica_log(oinfo, olog, omissing, from);
EXPECT_TRUE(omissing.have_missing());
EXPECT_TRUE(omissing.get_items().begin()->second.need == eversion_t(1, 1));
EXPECT_EQ(last_update, oinfo.last_update);
EXPECT_EQ(eversion_t(0, 0), oinfo.last_complete);
}
}
TEST_F(PGLogTest, merge_log_1) {
TestCase t;
t.base.push_back(mk_ple_mod(mk_obj(1), mk_evt(10, 100), mk_evt(8, 80)));
t.div.push_back(mk_ple_mod(mk_obj(1), mk_evt(10, 101), mk_evt(10, 100)));
t.final.add(mk_obj(1), mk_evt(10, 100), mk_evt(0, 0), false);
t.toremove.insert(mk_obj(1));
t.setup();
run_test_case(t);
}
TEST_F(PGLogTest, merge_log_2) {
TestCase t;
t.base.push_back(mk_ple_mod_rb(mk_obj(1), mk_evt(10, 100), mk_evt(8, 80)));
t.div.push_back(mk_ple_mod_rb(mk_obj(1), mk_evt(10, 101), mk_evt(10, 100)));
t.div.push_back(mk_ple_mod_rb(mk_obj(1), mk_evt(10, 102), mk_evt(10, 101)));
t.torollback.insert(
t.torollback.begin(), t.div.rbegin(), t.div.rend());
t.setup();
run_test_case(t);
}
TEST_F(PGLogTest, merge_log_3) {
TestCase t;
t.base.push_back(mk_ple_mod_rb(mk_obj(1), mk_evt(10, 100), mk_evt(8, 80)));
t.div.push_back(mk_ple_mod(mk_obj(1), mk_evt(10, 101), mk_evt(10, 100)));
t.div.push_back(mk_ple_mod_rb(mk_obj(1), mk_evt(10, 102), mk_evt(10, 101)));
t.final.add(mk_obj(1), mk_evt(10, 100), mk_evt(0, 0), false);
t.toremove.insert(mk_obj(1));
t.setup();
run_test_case(t);
}
TEST_F(PGLogTest, merge_log_4) {
TestCase t;
t.base.push_back(mk_ple_mod_rb(mk_obj(1), mk_evt(10, 100), mk_evt(8, 80)));
t.div.push_back(mk_ple_mod_rb(mk_obj(1), mk_evt(10, 101), mk_evt(10, 100)));
t.div.push_back(mk_ple_mod_rb(mk_obj(1), mk_evt(10, 102), mk_evt(10, 101)));
t.init.add(mk_obj(1), mk_evt(10, 102), mk_evt(0, 0), false);
t.final.add(mk_obj(1), mk_evt(10, 100), mk_evt(0, 0), false);
t.setup();
run_test_case(t);
}
TEST_F(PGLogTest, merge_log_5) {
TestCase t;
t.base.push_back(mk_ple_mod_rb(mk_obj(1), mk_evt(10, 100), mk_evt(8, 80)));
t.div.push_back(mk_ple_mod(mk_obj(1), mk_evt(10, 101), mk_evt(10, 100)));
t.div.push_back(mk_ple_mod_rb(mk_obj(1), mk_evt(10, 102), mk_evt(10, 101)));
t.auth.push_back(mk_ple_mod(mk_obj(1), mk_evt(11, 101), mk_evt(10, 100)));
t.final.add(mk_obj(1), mk_evt(11, 101), mk_evt(0, 0), false);
t.toremove.insert(mk_obj(1));
t.setup();
run_test_case(t);
}
TEST_F(PGLogTest, merge_log_6) {
TestCase t;
t.base.push_back(mk_ple_mod_rb(mk_obj(1), mk_evt(10, 100), mk_evt(8, 80)));
t.auth.push_back(mk_ple_mod(mk_obj(1), mk_evt(11, 101), mk_evt(10, 100)));
t.final.add(mk_obj(1), mk_evt(11, 101), mk_evt(10, 100), false);
t.setup();
run_test_case(t);
}
TEST_F(PGLogTest, merge_log_7) {
TestCase t;
t.base.push_back(mk_ple_mod_rb(mk_obj(1), mk_evt(10, 100), mk_evt(8, 80)));
t.auth.push_back(mk_ple_mod(mk_obj(1), mk_evt(11, 101), mk_evt(10, 100)));
t.init.add(mk_obj(1), mk_evt(10, 100), mk_evt(8, 80), false);
t.final.add(mk_obj(1), mk_evt(11, 101), mk_evt(8, 80), false);
t.setup();
run_test_case(t);
}
TEST_F(PGLogTest, merge_log_8) {
TestCase t;
t.base.push_back(mk_ple_mod_rb(mk_obj(1), mk_evt(10, 100), mk_evt(8, 80)));
t.auth.push_back(mk_ple_dt(mk_obj(1), mk_evt(11, 101), mk_evt(10, 100)));
t.init.add(mk_obj(1), mk_evt(10, 100), mk_evt(8, 80), false);
t.final.add(mk_obj(1), mk_evt(11, 101), mk_evt(8, 80), true);
t.setup();
run_test_case(t);
}
TEST_F(PGLogTest, merge_log_9) {
TestCase t;
t.base.push_back(mk_ple_mod_rb(mk_obj(1), mk_evt(10, 100), mk_evt(8, 80)));
t.auth.push_back(mk_ple_dt(mk_obj(1), mk_evt(11, 101), mk_evt(10, 100)));
t.init.add(mk_obj(1), mk_evt(10, 100), mk_evt(8, 80), false);
t.toremove.insert(mk_obj(1));
t.deletes_during_peering = true;
t.setup();
run_test_case(t);
}
TEST_F(PGLogTest, merge_log_10) {
TestCase t;
t.base.push_back(mk_ple_mod_rb(mk_obj(1), mk_evt(10, 100), mk_evt(8, 80)));
t.auth.push_back(mk_ple_ldt(mk_obj(1), mk_evt(11, 101), mk_evt(10, 100)));
t.init.add(mk_obj(1), mk_evt(10, 100), mk_evt(8, 80), false);
t.final.add(mk_obj(1), mk_evt(11, 101), mk_evt(8, 80), true);
t.setup();
run_test_case(t);
}
TEST_F(PGLogTest, merge_log_prior_version_have) {
TestCase t;
t.base.push_back(mk_ple_mod_rb(mk_obj(1), mk_evt(10, 100), mk_evt(8, 80)));
t.div.push_back(mk_ple_mod(mk_obj(1), mk_evt(10, 101), mk_evt(10, 100)));
t.init.add(mk_obj(1), mk_evt(10, 101), mk_evt(10, 100), false);
t.setup();
run_test_case(t);
}
TEST_F(PGLogTest, merge_log_split_missing_entries_at_head) {
TestCase t;
t.auth.push_back(mk_ple_mod_rb(mk_obj(1), mk_evt(10, 100), mk_evt(8, 70)));
t.auth.push_back(mk_ple_mod_rb(mk_obj(1), mk_evt(15, 150), mk_evt(10, 100)));
t.div.push_back(mk_ple_mod(mk_obj(1), mk_evt(8, 70), mk_evt(8, 65)));
t.setup();
t.set_div_bounds(mk_evt(9, 79), mk_evt(8, 69));
t.set_auth_bounds(mk_evt(15, 160), mk_evt(9, 77));
t.final.add(mk_obj(1), mk_evt(15, 150), mk_evt(8, 70), false);
run_test_case(t);
}
TEST_F(PGLogTest, olog_tail_gt_log_tail_split) {
TestCase t;
t.auth.push_back(mk_ple_mod(mk_obj(1), mk_evt(10, 100), mk_evt(8, 70)));
t.auth.push_back(mk_ple_mod(mk_obj(1), mk_evt(15, 150), mk_evt(10, 100)));
t.auth.push_back(mk_ple_mod(mk_obj(1), mk_evt(15, 155), mk_evt(15, 150)));
t.setup();
t.set_div_bounds(mk_evt(15, 153), mk_evt(15, 151));
t.set_auth_bounds(mk_evt(15, 156), mk_evt(10, 99));
t.final.add(mk_obj(1), mk_evt(15, 155), mk_evt(15, 150), false);
run_test_case(t);
}
TEST_F(PGLogTest, olog_tail_gt_log_tail_split2) {
TestCase t;
t.auth.push_back(mk_ple_mod(mk_obj(1), mk_evt(10, 100), mk_evt(8, 70)));
t.auth.push_back(mk_ple_mod(mk_obj(1), mk_evt(15, 150), mk_evt(10, 100)));
t.auth.push_back(mk_ple_mod(mk_obj(1), mk_evt(16, 155), mk_evt(15, 150)));
t.div.push_back(mk_ple_mod(mk_obj(1), mk_evt(15, 153), mk_evt(15, 150)));
t.setup();
t.set_div_bounds(mk_evt(15, 153), mk_evt(15, 151));
t.set_auth_bounds(mk_evt(16, 156), mk_evt(10, 99));
t.final.add(mk_obj(1), mk_evt(16, 155), mk_evt(0, 0), false);
t.toremove.insert(mk_obj(1));
run_test_case(t);
}
TEST_F(PGLogTest, filter_log_1) {
{
clear();
int osd_id = 1;
epoch_t epoch = 40;
int64_t pool_id = 1;
int bits = 2;
int max_osd = 4;
int pg_num = max_osd << bits;
int num_objects = 1000;
int num_internal = 10;
// Set up splitting map
std::unique_ptr<OSDMap> osdmap(new OSDMap);
uuid_d test_uuid;
test_uuid.generate_random();
osdmap->build_simple_with_pool(g_ceph_context, epoch, test_uuid, max_osd, bits, bits);
osdmap->set_state(osd_id, CEPH_OSD_EXISTS);
const string hit_set_namespace("internal");
{
pg_log_entry_t e;
e.mark_unrollbackable();
e.op = pg_log_entry_t::MODIFY;
e.soid.pool = pool_id;
uuid_d uuid_name;
int i;
for (i = 1; i <= num_objects; ++i) {
e.version = eversion_t(epoch, i);
// Use this to generate random file names
uuid_name.generate_random();
ostringstream name;
name << uuid_name;
e.soid.oid.name = name.str();
// First has no namespace
if (i != 1) {
// num_internal have the internal namspace
if (i <= num_internal + 1) {
e.soid.nspace = hit_set_namespace;
} else { // rest have different namespaces
ostringstream ns;
ns << "ns" << i;
e.soid.nspace = ns.str();
}
}
log.log.push_back(e);
if (i == 1)
log.tail = e.version;
}
log.head = e.version;
log.index();
}
spg_t pgid(pg_t(2, pool_id), shard_id_t::NO_SHARD);
// See if we created the right number of entries
int total = log.log.size();
ASSERT_EQ(total, num_objects);
// Some should be removed
{
pg_log_t filtered, reject;
pg_log_t::filter_log(
pgid, *osdmap, hit_set_namespace, log, filtered, reject);
log = IndexedLog(filtered);
}
EXPECT_LE(log.log.size(), (size_t)total);
// If we filter a second time, there should be the same total
total = log.log.size();
{
pg_log_t filtered, reject;
pg_log_t::filter_log(
pgid, *osdmap, hit_set_namespace, log, filtered, reject);
log = IndexedLog(filtered);
}
EXPECT_EQ(log.log.size(), (size_t)total);
// Increase pg_num as if there would be a split
int new_pg_num = pg_num * 16;
OSDMap::Incremental inc(epoch + 1);
inc.fsid = test_uuid;
const pg_pool_t *pool = osdmap->get_pg_pool(pool_id);
pg_pool_t newpool;
newpool = *pool;
newpool.set_pg_num(new_pg_num);
newpool.set_pgp_num(new_pg_num);
inc.new_pools[pool_id] = newpool;
int ret = osdmap->apply_incremental(inc);
ASSERT_EQ(ret, 0);
// We should have fewer entries after a filter
{
pg_log_t filtered, reject;
pg_log_t::filter_log(
pgid, *osdmap, hit_set_namespace, log, filtered, reject);
log = IndexedLog(filtered);
}
EXPECT_LE(log.log.size(), (size_t)total);
// Make sure all internal entries are retained
int count = 0;
for (list<pg_log_entry_t>::iterator i = log.log.begin();
i != log.log.end(); ++i) {
if (i->soid.nspace == hit_set_namespace) count++;
}
EXPECT_EQ(count, num_internal);
}
}
TEST_F(PGLogTest, get_request) {
clear();
// make sure writes, deletes, and errors are found
vector<pg_log_entry_t> entries;
hobject_t oid(object_t("objname"), "key", 123, 456, 0, "");
entries.push_back(
pg_log_entry_t(pg_log_entry_t::ERROR, oid, eversion_t(6,2), eversion_t(3,4),
1, osd_reqid_t(entity_name_t::CLIENT(777), 8, 1),
utime_t(0,1), -ENOENT));
entries.push_back(
pg_log_entry_t(pg_log_entry_t::MODIFY, oid, eversion_t(6,3), eversion_t(3,4),
2, osd_reqid_t(entity_name_t::CLIENT(777), 8, 2),
utime_t(1,2), 0));
entries.push_back(
pg_log_entry_t(pg_log_entry_t::DELETE, oid, eversion_t(7,4), eversion_t(7,4),
3, osd_reqid_t(entity_name_t::CLIENT(777), 8, 3),
utime_t(10,2), 0));
entries.push_back(
pg_log_entry_t(pg_log_entry_t::ERROR, oid, eversion_t(7,5), eversion_t(7,4),
3, osd_reqid_t(entity_name_t::CLIENT(777), 8, 4),
utime_t(20,1), -ENOENT));
for (auto &entry : entries) {
log.add(entry);
}
for (auto &entry : entries) {
eversion_t replay_version;
version_t user_version;
int return_code = 0;
vector<pg_log_op_return_item_t> op_returns;
bool got = log.get_request(
entry.reqid, &replay_version, &user_version, &return_code, &op_returns);
EXPECT_TRUE(got);
EXPECT_EQ(entry.return_code, return_code);
EXPECT_EQ(entry.version, replay_version);
EXPECT_EQ(entry.user_version, user_version);
}
}
TEST_F(PGLogTest, ErrorNotIndexedByObject) {
clear();
// make sure writes, deletes, and errors are found
hobject_t oid(object_t("objname"), "key", 123, 456, 0, "");
log.add(
pg_log_entry_t(pg_log_entry_t::ERROR, oid, eversion_t(6,2), eversion_t(3,4),
1, osd_reqid_t(entity_name_t::CLIENT(777), 8, 1),
utime_t(0,1), -ENOENT));
EXPECT_FALSE(log.logged_object(oid));
pg_log_entry_t modify(pg_log_entry_t::MODIFY, oid, eversion_t(6,3),
eversion_t(3,4), 2,
osd_reqid_t(entity_name_t::CLIENT(777), 8, 2),
utime_t(1,2), 0);
log.add(modify);
EXPECT_TRUE(log.logged_object(oid));
pg_log_entry_t *entry = log.objects[oid];
EXPECT_EQ(modify.op, entry->op);
EXPECT_EQ(modify.version, entry->version);
EXPECT_EQ(modify.prior_version, entry->prior_version);
EXPECT_EQ(modify.user_version, entry->user_version);
EXPECT_EQ(modify.reqid, entry->reqid);
pg_log_entry_t del(pg_log_entry_t::DELETE, oid, eversion_t(7,4),
eversion_t(7,4), 3,
osd_reqid_t(entity_name_t::CLIENT(777), 8, 3),
utime_t(10,2), 0);
log.add(del);
EXPECT_TRUE(log.logged_object(oid));
entry = log.objects[oid];
EXPECT_EQ(del.op, entry->op);
EXPECT_EQ(del.version, entry->version);
EXPECT_EQ(del.prior_version, entry->prior_version);
EXPECT_EQ(del.user_version, entry->user_version);
EXPECT_EQ(del.reqid, entry->reqid);
log.add(
pg_log_entry_t(pg_log_entry_t::ERROR, oid, eversion_t(7,5), eversion_t(7,4),
3, osd_reqid_t(entity_name_t::CLIENT(777), 8, 4),
utime_t(20,1), -ENOENT));
EXPECT_TRUE(log.logged_object(oid));
entry = log.objects[oid];
EXPECT_EQ(del.op, entry->op);
EXPECT_EQ(del.version, entry->version);
EXPECT_EQ(del.prior_version, entry->prior_version);
EXPECT_EQ(del.user_version, entry->user_version);
EXPECT_EQ(del.reqid, entry->reqid);
}
TEST_F(PGLogTest, split_into_preserves_may_include_deletes) {
clear();
{
may_include_deletes_in_missing_dirty = false;
missing.may_include_deletes = true;
PGLog child_log(cct);
pg_t child_pg;
split_into(child_pg, 6, &child_log);
ASSERT_TRUE(child_log.get_missing().may_include_deletes);
ASSERT_TRUE(child_log.get_may_include_deletes_in_missing_dirty());
}
{
may_include_deletes_in_missing_dirty = false;
missing.may_include_deletes = false;
PGLog child_log(cct);
pg_t child_pg;
split_into(child_pg, 6, &child_log);
ASSERT_FALSE(child_log.get_missing().may_include_deletes);
ASSERT_FALSE(child_log.get_may_include_deletes_in_missing_dirty());
}
}
class PGLogTestRebuildMissing : public PGLogTest, public StoreTestFixture {
public:
PGLogTestRebuildMissing() : PGLogTest(), StoreTestFixture("memstore") {}
void SetUp() override {
StoreTestFixture::SetUp();
ObjectStore::Transaction t;
test_coll = coll_t(spg_t(pg_t(1, 1)));
ch = store->create_new_collection(test_coll);
t.create_collection(test_coll, 0);
store->queue_transaction(ch, std::move(t));
existing_oid = mk_obj(0);
nonexistent_oid = mk_obj(1);
ghobject_t existing_ghobj(existing_oid);
object_info_t existing_info;
existing_info.version = eversion_t(6, 2);
bufferlist enc_oi;
encode(existing_info, enc_oi, 0);
ObjectStore::Transaction t2;
t2.touch(test_coll, ghobject_t(existing_oid));
t2.setattr(test_coll, ghobject_t(existing_oid), OI_ATTR, enc_oi);
ASSERT_EQ(0, store->queue_transaction(ch, std::move(t2)));
info.last_backfill = hobject_t::get_max();
info.last_complete = eversion_t();
}
void TearDown() override {
clear();
missing.may_include_deletes = false;
StoreTestFixture::TearDown();
}
pg_info_t info;
coll_t test_coll;
hobject_t existing_oid, nonexistent_oid;
void run_rebuild_missing_test(const map<hobject_t, pg_missing_item> &expected_missing_items) {
rebuild_missing_set_with_deletes(store.get(), ch, info);
ASSERT_EQ(expected_missing_items, missing.get_items());
}
};
TEST_F(PGLogTestRebuildMissing, EmptyLog) {
missing.add(existing_oid, mk_evt(6, 2), mk_evt(6, 3), false);
missing.add(nonexistent_oid, mk_evt(7, 4), mk_evt(0, 0), false);
map<hobject_t, pg_missing_item> orig_missing = missing.get_items();
run_rebuild_missing_test(orig_missing);
}
TEST_F(PGLogTestRebuildMissing, SameVersionMod) {
missing.add(existing_oid, mk_evt(6, 2), mk_evt(6, 1), false);
log.add(mk_ple_mod(existing_oid, mk_evt(6, 2), mk_evt(6, 1)));
map<hobject_t, pg_missing_item> empty_missing;
run_rebuild_missing_test(empty_missing);
}
TEST_F(PGLogTestRebuildMissing, DelExisting) {
missing.add(existing_oid, mk_evt(6, 3), mk_evt(6, 2), false);
log.add(mk_ple_dt(existing_oid, mk_evt(7, 5), mk_evt(7, 4)));
map<hobject_t, pg_missing_item> expected;
expected[existing_oid] = pg_missing_item(mk_evt(7, 5), mk_evt(6, 2), true);
run_rebuild_missing_test(expected);
}
TEST_F(PGLogTestRebuildMissing, DelNonexistent) {
log.add(mk_ple_dt(nonexistent_oid, mk_evt(7, 5), mk_evt(7, 4)));
map<hobject_t, pg_missing_item> expected;
expected[nonexistent_oid] = pg_missing_item(mk_evt(7, 5), mk_evt(0, 0), true);
run_rebuild_missing_test(expected);
}
TEST_F(PGLogTestRebuildMissing, MissingNotInLog) {
missing.add(mk_obj(10), mk_evt(8, 12), mk_evt(8, 10), false);
log.add(mk_ple_dt(nonexistent_oid, mk_evt(7, 5), mk_evt(7, 4)));
map<hobject_t, pg_missing_item> expected;
expected[nonexistent_oid] = pg_missing_item(mk_evt(7, 5), mk_evt(0, 0), true);
expected[mk_obj(10)] = pg_missing_item(mk_evt(8, 12), mk_evt(8, 10), false);
run_rebuild_missing_test(expected);
}
class PGLogMergeDupsTest : protected PGLog, public StoreTestFixture {
public:
PGLogMergeDupsTest() : PGLog(g_ceph_context), StoreTestFixture("memstore") { }
void SetUp() override {
StoreTestFixture::SetUp();
ObjectStore::Transaction t;
test_coll = coll_t(spg_t(pg_t(1, 1)));
auto ch = store->create_new_collection(test_coll);
t.create_collection(test_coll, 0);
store->queue_transaction(ch, std::move(t));
}
void TearDown() override {
test_disk_roundtrip();
clear();
StoreTestFixture::TearDown();
}
static pg_log_dup_t create_dup_entry(uint a, uint b) {
// make each dup_entry unique by using different client id's
static uint client_id = 777;
return pg_log_dup_t(eversion_t(a, b),
a,
osd_reqid_t(entity_name_t::CLIENT(client_id++), 8, 1),
0);
}
static std::vector<pg_log_dup_t> example_dups_1() {
std::vector<pg_log_dup_t> result = {
create_dup_entry(10, 11),
create_dup_entry(10, 12),
create_dup_entry(11, 1),
create_dup_entry(12, 3),
create_dup_entry(13, 99)
};
return result;
}
static std::vector<pg_log_dup_t> example_dups_2() {
std::vector<pg_log_dup_t> result = {
create_dup_entry(12, 3),
create_dup_entry(13, 99),
create_dup_entry(15, 11),
create_dup_entry(16, 14),
create_dup_entry(16, 32)
};
return result;
}
void add_dups(uint a, uint b) {
log.dups.push_back(create_dup_entry(a, b));
write_from_dups = std::min(write_from_dups, log.dups.back().version);
}
void add_dups(const std::vector<pg_log_dup_t>& l) {
for (auto& i : l) {
log.dups.push_back(i);
write_from_dups = std::min(write_from_dups, log.dups.back().version);
}
}
static void add_dups(IndexedLog& log, const std::vector<pg_log_dup_t>& dups) {
for (auto& i : dups) {
log.dups.push_back(i);
}
}
void check_order() {
eversion_t prev(0, 0);
for (auto& i : log.dups) {
EXPECT_LT(prev, i.version) << "verify versions monotonically increase";
prev = i.version;
}
}
void check_index() {
EXPECT_EQ(log.dups.size(), log.dup_index.size());
for (auto& i : log.dups) {
EXPECT_EQ(1u, log.dup_index.count(i.reqid));
}
}
void test_disk_roundtrip() {
ObjectStore::Transaction t;
hobject_t hoid;
hoid.pool = 1;
hoid.oid = "log";
ghobject_t log_oid(hoid);
map<string, bufferlist> km;
write_log_and_missing(t, &km, test_coll, log_oid, false);
if (!km.empty()) {
t.omap_setkeys(test_coll, log_oid, km);
}
auto ch = store->open_collection(test_coll);
ASSERT_EQ(0, store->queue_transaction(ch, std::move(t)));
auto orig_dups = log.dups;
clear();
ostringstream err;
read_log_and_missing(store.get(), ch, log_oid,
pg_info_t(), err, false);
ASSERT_EQ(orig_dups.size(), log.dups.size());
ASSERT_EQ(orig_dups, log.dups);
auto dups_it = log.dups.begin();
for (auto orig_dup : orig_dups) {
ASSERT_EQ(orig_dup, *dups_it);
++dups_it;
}
}
coll_t test_coll;
};
TEST_F(PGLogMergeDupsTest, OtherEmpty) {
log.tail = eversion_t(14, 5);
IndexedLog olog;
add_dups(example_dups_1());
index();
bool changed = merge_log_dups(olog);
EXPECT_FALSE(changed);
EXPECT_EQ(5u, log.dups.size());
if (5 == log.dups.size()) {
EXPECT_EQ(10u, log.dups.front().version.epoch);
EXPECT_EQ(11u, log.dups.front().version.version);
EXPECT_EQ(13u, log.dups.back().version.epoch);
EXPECT_EQ(99u, log.dups.back().version.version);
}
check_order();
check_index();
}
TEST_F(PGLogMergeDupsTest, AmEmpty) {
log.tail = eversion_t(14, 5);
index();
IndexedLog olog;
add_dups(olog, example_dups_1());
bool changed = merge_log_dups(olog);
EXPECT_TRUE(changed);
EXPECT_EQ(5u, log.dups.size());
if (5 == log.dups.size()) {
EXPECT_EQ(10u, log.dups.front().version.epoch);
EXPECT_EQ(11u, log.dups.front().version.version);
EXPECT_EQ(13u, log.dups.back().version.epoch);
EXPECT_EQ(99u, log.dups.back().version.version);
}
check_order();
check_index();
}
TEST_F(PGLogMergeDupsTest, AmEmptyOverlap) {
log.tail = eversion_t(12, 3);
index();
IndexedLog olog;
add_dups(olog, example_dups_1());
bool changed = merge_log_dups(olog);
EXPECT_TRUE(changed);
EXPECT_EQ(4u, log.dups.size());
if (4 == log.dups.size()) {
EXPECT_EQ(10u, log.dups.front().version.epoch);
EXPECT_EQ(11u, log.dups.front().version.version);
EXPECT_EQ(12u, log.dups.back().version.epoch);
EXPECT_EQ(3u, log.dups.back().version.version);
}
check_order();
check_index();
}
TEST_F(PGLogMergeDupsTest, Same) {
log.tail = eversion_t(14, 1);
IndexedLog olog;
add_dups(example_dups_1());
index();
add_dups(olog, example_dups_1());
bool changed = merge_log_dups(olog);
EXPECT_FALSE(changed);
EXPECT_EQ(5u, log.dups.size());
if (5 == log.dups.size()) {
EXPECT_EQ(10u, log.dups.front().version.epoch);
EXPECT_EQ(11u, log.dups.front().version.version);
EXPECT_EQ(13u, log.dups.back().version.epoch);
EXPECT_EQ(99u, log.dups.back().version.version);
}
check_order();
check_index();
}
TEST_F(PGLogMergeDupsTest, Later) {
log.tail = eversion_t(16, 14);
IndexedLog olog;
add_dups(example_dups_1());
index();
add_dups(olog, example_dups_2());
bool changed = merge_log_dups(olog);
EXPECT_TRUE(changed);
EXPECT_EQ(7u, log.dups.size());
if (7 == log.dups.size()) {
EXPECT_EQ(10u, log.dups.front().version.epoch);
EXPECT_EQ(11u, log.dups.front().version.version);
EXPECT_EQ(16u, log.dups.back().version.epoch);
EXPECT_EQ(14u, log.dups.back().version.version);
}
check_order();
check_index();
}
TEST_F(PGLogMergeDupsTest, Earlier) {
log.tail = eversion_t(17, 2);
IndexedLog olog;
add_dups(example_dups_2());
index();
add_dups(olog, example_dups_1());
bool changed = merge_log_dups(olog);
EXPECT_TRUE(changed);
EXPECT_EQ(8u, log.dups.size());
if (6 == log.dups.size()) {
EXPECT_EQ(10u, log.dups.front().version.epoch);
EXPECT_EQ(11u, log.dups.front().version.version);
EXPECT_EQ(16u, log.dups.back().version.epoch);
EXPECT_EQ(32u, log.dups.back().version.version);
}
check_order();
check_index();
}
TEST_F(PGLogMergeDupsTest, Superset) {
log.tail = eversion_t(17, 2);
IndexedLog olog;
add_dups(example_dups_1());
index();
olog.dups.push_back(create_dup_entry(9, 5));
olog.dups.push_back(create_dup_entry(15, 11));
bool changed = merge_log_dups(olog);
EXPECT_TRUE(changed);
EXPECT_EQ(7u, log.dups.size());
if (7 == log.dups.size()) {
EXPECT_EQ(9u, log.dups.front().version.epoch);
EXPECT_EQ(5u, log.dups.front().version.version);
EXPECT_EQ(15u, log.dups.back().version.epoch);
EXPECT_EQ(11u, log.dups.back().version.version);
}
check_order();
check_index();
}
struct PGLogTrimTest :
public ::testing::Test,
public PGLogTestBase,
public PGLog::IndexedLog
{
CephContext *cct = g_ceph_context;
using ::testing::Test::SetUp;
void SetUp(unsigned dup_track) {
constexpr size_t size = 10;
char dup_track_s[size];
snprintf(dup_track_s, size, "%u", dup_track);
cct->_conf.set_val_or_die("osd_pg_log_dups_tracked", dup_track_s);
}
}; // struct PGLogTrimTest
TEST_F(PGLogTrimTest, TestMakingCephContext)
{
SetUp(5);
EXPECT_EQ(5u, cct->_conf->osd_pg_log_dups_tracked);
}
TEST_F(PGLogTrimTest, TestPartialTrim)
{
SetUp(20);
PGLog::IndexedLog log;
log.head = mk_evt(24, 0);
log.skip_can_rollback_to_to_head();
log.head = mk_evt(9, 0);
log.add(mk_ple_mod(mk_obj(1), mk_evt(10, 100), mk_evt(8, 70)));
log.add(mk_ple_dt(mk_obj(2), mk_evt(15, 150), mk_evt(10, 100)));
log.add(mk_ple_mod_rb(mk_obj(3), mk_evt(15, 155), mk_evt(15, 150)));
log.add(mk_ple_mod(mk_obj(1), mk_evt(19, 160), mk_evt(25, 152)));
log.add(mk_ple_mod(mk_obj(4), mk_evt(21, 165), mk_evt(26, 160)));
log.add(mk_ple_dt_rb(mk_obj(5), mk_evt(21, 167), mk_evt(31, 166)));
std::set<eversion_t> trimmed;
std::set<std::string> trimmed_dups;
eversion_t write_from_dups = eversion_t::max();
log.trim(cct, mk_evt(19, 157), &trimmed, &trimmed_dups, &write_from_dups);
EXPECT_EQ(eversion_t(15, 150), write_from_dups);
EXPECT_EQ(3u, log.log.size());
EXPECT_EQ(3u, trimmed.size());
EXPECT_EQ(2u, log.dups.size());
EXPECT_EQ(0u, trimmed_dups.size());
SetUp(15);
std::set<eversion_t> trimmed2;
std::set<std::string> trimmed_dups2;
eversion_t write_from_dups2 = eversion_t::max();
log.trim(cct, mk_evt(20, 164), &trimmed2, &trimmed_dups2, &write_from_dups2);
EXPECT_EQ(eversion_t(19, 160), write_from_dups2);
EXPECT_EQ(2u, log.log.size());
EXPECT_EQ(1u, trimmed2.size());
EXPECT_EQ(3u, log.dups.size());
EXPECT_EQ(0u, trimmed_dups2.size());
}
TEST_F(PGLogTrimTest, TestTrimNoTrimmed) {
SetUp(20);
PGLog::IndexedLog log;
log.head = mk_evt(20, 0);
log.skip_can_rollback_to_to_head();
log.head = mk_evt(9, 0);
log.add(mk_ple_mod(mk_obj(1), mk_evt(10, 100), mk_evt(8, 70)));
log.add(mk_ple_dt(mk_obj(2), mk_evt(15, 150), mk_evt(10, 100)));
log.add(mk_ple_mod_rb(mk_obj(3), mk_evt(15, 155), mk_evt(15, 150)));
log.add(mk_ple_mod(mk_obj(1), mk_evt(20, 160), mk_evt(25, 152)));
log.add(mk_ple_mod(mk_obj(4), mk_evt(21, 165), mk_evt(26, 160)));
log.add(mk_ple_dt_rb(mk_obj(5), mk_evt(21, 167), mk_evt(31, 166)));
eversion_t write_from_dups = eversion_t::max();
log.trim(cct, mk_evt(19, 157), nullptr, nullptr, &write_from_dups);
EXPECT_EQ(eversion_t(15, 150), write_from_dups);
EXPECT_EQ(3u, log.log.size());
EXPECT_EQ(2u, log.dups.size());
}
TEST_F(PGLogTrimTest, TestTrimNoDups)
{
SetUp(10);
PGLog::IndexedLog log;
log.head = mk_evt(20, 0);
log.skip_can_rollback_to_to_head();
log.head = mk_evt(9, 0);
log.add(mk_ple_mod(mk_obj(1), mk_evt(10, 100), mk_evt(8, 70)));
log.add(mk_ple_dt(mk_obj(2), mk_evt(15, 150), mk_evt(10, 100)));
log.add(mk_ple_mod_rb(mk_obj(3), mk_evt(15, 155), mk_evt(15, 150)));
log.add(mk_ple_mod(mk_obj(1), mk_evt(20, 160), mk_evt(25, 152)));
log.add(mk_ple_mod(mk_obj(4), mk_evt(21, 165), mk_evt(26, 160)));
log.add(mk_ple_dt_rb(mk_obj(5), mk_evt(21, 167), mk_evt(31, 166)));
std::set<eversion_t> trimmed;
std::set<std::string> trimmed_dups;
eversion_t write_from_dups = eversion_t::max();
log.trim(cct, mk_evt(19, 157), &trimmed, &trimmed_dups, &write_from_dups);
EXPECT_EQ(eversion_t::max(), write_from_dups);
EXPECT_EQ(3u, log.log.size());
EXPECT_EQ(3u, trimmed.size());
EXPECT_EQ(0u, log.dups.size());
EXPECT_EQ(0u, trimmed_dups.size());
}
TEST_F(PGLogTrimTest, TestNoTrim)
{
SetUp(20);
PGLog::IndexedLog log;
log.head = mk_evt(24, 0);
log.skip_can_rollback_to_to_head();
log.head = mk_evt(9, 0);
log.add(mk_ple_mod(mk_obj(1), mk_evt(10, 100), mk_evt(8, 70)));
log.add(mk_ple_dt(mk_obj(2), mk_evt(15, 150), mk_evt(10, 100)));
log.add(mk_ple_mod_rb(mk_obj(3), mk_evt(15, 155), mk_evt(15, 150)));
log.add(mk_ple_mod(mk_obj(1), mk_evt(19, 160), mk_evt(25, 152)));
log.add(mk_ple_mod(mk_obj(4), mk_evt(21, 165), mk_evt(26, 160)));
log.add(mk_ple_dt_rb(mk_obj(5), mk_evt(21, 167), mk_evt(31, 166)));
std::set<eversion_t> trimmed;
std::set<std::string> trimmed_dups;
eversion_t write_from_dups = eversion_t::max();
log.trim(cct, mk_evt(9, 99), &trimmed, &trimmed_dups, &write_from_dups);
EXPECT_EQ(eversion_t::max(), write_from_dups);
EXPECT_EQ(6u, log.log.size());
EXPECT_EQ(0u, trimmed.size());
EXPECT_EQ(0u, log.dups.size());
EXPECT_EQ(0u, trimmed_dups.size());
}
TEST_F(PGLogTrimTest, TestTrimAll)
{
SetUp(20);
PGLog::IndexedLog log;
EXPECT_EQ(0u, log.dup_index.size()); // Sanity check
log.head = mk_evt(24, 0);
log.skip_can_rollback_to_to_head();
log.head = mk_evt(9, 0);
log.add(mk_ple_mod(mk_obj(1), mk_evt(10, 100), mk_evt(8, 70)));
log.add(mk_ple_dt(mk_obj(2), mk_evt(15, 150), mk_evt(10, 100)));
log.add(mk_ple_mod_rb(mk_obj(3), mk_evt(15, 155), mk_evt(15, 150)));
log.add(mk_ple_mod(mk_obj(1), mk_evt(19, 160), mk_evt(25, 152)));
log.add(mk_ple_mod(mk_obj(4), mk_evt(21, 165), mk_evt(26, 160)));
log.add(mk_ple_dt_rb(mk_obj(5), mk_evt(21, 167), mk_evt(31, 166)));
std::set<eversion_t> trimmed;
std::set<std::string> trimmed_dups;
eversion_t write_from_dups = eversion_t::max();
log.trim(cct, mk_evt(22, 180), &trimmed, &trimmed_dups, &write_from_dups);
EXPECT_EQ(eversion_t(15, 150), write_from_dups);
EXPECT_EQ(0u, log.log.size());
EXPECT_EQ(6u, trimmed.size());
EXPECT_EQ(5u, log.dups.size());
EXPECT_EQ(0u, trimmed_dups.size());
EXPECT_EQ(0u, log.dup_index.size()); // dup_index entry should be trimmed
}
TEST_F(PGLogTrimTest, TestGetRequest) {
SetUp(20);
PGLog::IndexedLog log;
log.head = mk_evt(20, 0);
log.skip_can_rollback_to_to_head();
log.head = mk_evt(9, 0);
entity_name_t client = entity_name_t::CLIENT(777);
log.add(mk_ple_mod(mk_obj(1), mk_evt(10, 100), mk_evt(8, 70),
osd_reqid_t(client, 8, 1)));
log.add(mk_ple_dt(mk_obj(2), mk_evt(15, 150), mk_evt(10, 100),
osd_reqid_t(client, 8, 2)));
log.add(mk_ple_mod_rb(mk_obj(3), mk_evt(15, 155), mk_evt(15, 150),
osd_reqid_t(client, 8, 3)));
log.add(mk_ple_mod(mk_obj(1), mk_evt(20, 160), mk_evt(25, 152),
osd_reqid_t(client, 8, 4)));
log.add(mk_ple_mod(mk_obj(4), mk_evt(21, 165), mk_evt(26, 160),
osd_reqid_t(client, 8, 5)));
log.add(mk_ple_dt_rb(mk_obj(5), mk_evt(21, 167), mk_evt(31, 166),
osd_reqid_t(client, 8, 6)));
eversion_t write_from_dups = eversion_t::max();
log.trim(cct, mk_evt(19, 157), nullptr, nullptr, &write_from_dups);
EXPECT_EQ(eversion_t(15, 150), write_from_dups);
EXPECT_EQ(3u, log.log.size());
EXPECT_EQ(2u, log.dups.size());
eversion_t version;
version_t user_version;
int return_code;
vector<pg_log_op_return_item_t> op_returns;
osd_reqid_t log_reqid = osd_reqid_t(client, 8, 5);
osd_reqid_t dup_reqid = osd_reqid_t(client, 8, 3);
osd_reqid_t bad_reqid = osd_reqid_t(client, 8, 1);
bool result;
result = log.get_request(log_reqid, &version, &user_version, &return_code,
&op_returns);
EXPECT_EQ(true, result);
EXPECT_EQ(mk_evt(21, 165), version);
result = log.get_request(dup_reqid, &version, &user_version, &return_code,
&op_returns);
EXPECT_EQ(true, result);
EXPECT_EQ(mk_evt(15, 155), version);
result = log.get_request(bad_reqid, &version, &user_version, &return_code,
&op_returns);
EXPECT_FALSE(result);
}
TEST_F(PGLogTest, _merge_object_divergent_entries) {
{
// Test for issue 20843
clear();
hobject_t hoid(object_t(/*name*/"notify.7"),
/*key*/string(""),
/*snap*/7,
/*hash*/77,
/*pool*/5,
/*nspace*/string(""));
mempool::osd_pglog::list<pg_log_entry_t> orig_entries;
orig_entries.push_back(mk_ple_mod(hoid, eversion_t(8336, 957), eversion_t(8336, 952)));
orig_entries.push_back(mk_ple_err(hoid, eversion_t(8336, 958)));
orig_entries.push_back(mk_ple_err(hoid, eversion_t(8336, 959)));
orig_entries.push_back(mk_ple_mod(hoid, eversion_t(8336, 960), eversion_t(8336, 957)));
log.add(mk_ple_mod(hoid, eversion_t(8973, 1075), eversion_t(8971, 1070)));
missing.add(hoid,
/*need*/eversion_t(8971, 1070),
/*have*/eversion_t(8336, 952),
false);
pg_info_t oinfo;
LogHandler rollbacker;
_merge_object_divergent_entries(log, hoid,
orig_entries, oinfo,
log.get_can_rollback_to(),
missing, &rollbacker,
this);
// No core dump
}
{
// skip leading error entries
clear();
hobject_t hoid(object_t(/*name*/"notify.7"),
/*key*/string(""),
/*snap*/7,
/*hash*/77,
/*pool*/5,
/*nspace*/string(""));
mempool::osd_pglog::list<pg_log_entry_t> orig_entries;
orig_entries.push_back(mk_ple_err(hoid, eversion_t(8336, 956)));
orig_entries.push_back(mk_ple_mod(hoid, eversion_t(8336, 957), eversion_t(8336, 952)));
log.add(mk_ple_mod(hoid, eversion_t(8973, 1075), eversion_t(8971, 1070)));
missing.add(hoid,
/*need*/eversion_t(8971, 1070),
/*have*/eversion_t(8336, 952),
false);
pg_info_t oinfo;
LogHandler rollbacker;
_merge_object_divergent_entries(log, hoid,
orig_entries, oinfo,
log.get_can_rollback_to(),
missing, &rollbacker,
this);
// No core dump
}
}
TEST(eversion_t, get_key_name) {
eversion_t a(1234, 5678);
std::string a_key_name = a.get_key_name();
EXPECT_EQ("0000001234.00000000000000005678", a_key_name);
}
TEST(pg_log_dup_t, get_key_name) {
pg_log_dup_t a(eversion_t(1234, 5678),
13,
osd_reqid_t(entity_name_t::CLIENT(777), 8, 999),
15);
std::string a_key_name = a.get_key_name();
EXPECT_EQ("dup_0000001234.00000000000000005678", a_key_name);
}
// This tests trim() to make copies of
// 2 log entries (107, 106) and 3 additional for a total
// of 5 dups. Nothing from the original dups is copied.
TEST_F(PGLogTrimTest, TestTrimDups) {
SetUp(5);
PGLog::IndexedLog log;
log.head = mk_evt(21, 107);
log.skip_can_rollback_to_to_head();
log.tail = mk_evt(9, 99);
log.head = mk_evt(9, 99);
entity_name_t client = entity_name_t::CLIENT(777);
log.dups.push_back(pg_log_dup_t(mk_ple_mod(mk_obj(1),
mk_evt(9, 99), mk_evt(8, 98), osd_reqid_t(client, 8, 1))));
log.add(mk_ple_mod(mk_obj(1), mk_evt(10, 100), mk_evt(9, 99),
osd_reqid_t(client, 8, 1)));
log.add(mk_ple_dt(mk_obj(2), mk_evt(15, 101), mk_evt(10, 100),
osd_reqid_t(client, 8, 2)));
log.add(mk_ple_mod_rb(mk_obj(3), mk_evt(15, 102), mk_evt(15, 101),
osd_reqid_t(client, 8, 3)));
log.add(mk_ple_mod(mk_obj(1), mk_evt(20, 103), mk_evt(15, 102),
osd_reqid_t(client, 8, 4)));
log.add(mk_ple_mod(mk_obj(4), mk_evt(21, 104), mk_evt(20, 103),
osd_reqid_t(client, 8, 5)));
log.add(mk_ple_dt_rb(mk_obj(5), mk_evt(21, 105), mk_evt(21, 104),
osd_reqid_t(client, 8, 6)));
log.add(mk_ple_dt_rb(mk_obj(5), mk_evt(21, 106), mk_evt(21, 105),
osd_reqid_t(client, 8, 6)));
log.add(mk_ple_dt_rb(mk_obj(5), mk_evt(21, 107), mk_evt(21, 106),
osd_reqid_t(client, 8, 6)));
eversion_t write_from_dups = eversion_t::max();
log.trim(cct, mk_evt(21, 105), nullptr, nullptr, &write_from_dups);
EXPECT_EQ(eversion_t(20, 103), write_from_dups) << log;
EXPECT_EQ(2u, log.log.size()) << log;
EXPECT_EQ(4u, log.dups.size()) << log;
}
// This tests trim() to make copies of
// 4 log entries (107, 106, 105, 104) and 5 additional for a total
// of 9 dups. Only 1 of 2 existing dups are copied.
TEST_F(PGLogTrimTest, TestTrimDups2) {
SetUp(9);
PGLog::IndexedLog log;
log.head = mk_evt(21, 107);
log.skip_can_rollback_to_to_head();
log.tail = mk_evt(9, 99);
log.head = mk_evt(9, 99);
entity_name_t client = entity_name_t::CLIENT(777);
log.dups.push_back(pg_log_dup_t(mk_ple_mod(mk_obj(1),
mk_evt(9, 98), mk_evt(8, 97), osd_reqid_t(client, 8, 1))));
log.dups.push_back(pg_log_dup_t(mk_ple_mod(mk_obj(1),
mk_evt(9, 99), mk_evt(8, 98), osd_reqid_t(client, 8, 1))));
log.add(mk_ple_mod(mk_obj(1), mk_evt(10, 100), mk_evt(9, 99),
osd_reqid_t(client, 8, 1)));
log.add(mk_ple_dt(mk_obj(2), mk_evt(15, 101), mk_evt(10, 100),
osd_reqid_t(client, 8, 2)));
log.add(mk_ple_mod_rb(mk_obj(3), mk_evt(15, 102), mk_evt(15, 101),
osd_reqid_t(client, 8, 3)));
log.add(mk_ple_mod(mk_obj(1), mk_evt(20, 103), mk_evt(15, 102),
osd_reqid_t(client, 8, 4)));
log.add(mk_ple_mod(mk_obj(4), mk_evt(21, 104), mk_evt(20, 103),
osd_reqid_t(client, 8, 5)));
log.add(mk_ple_dt_rb(mk_obj(5), mk_evt(21, 105), mk_evt(21, 104),
osd_reqid_t(client, 8, 6)));
log.add(mk_ple_dt_rb(mk_obj(5), mk_evt(21, 106), mk_evt(21, 105),
osd_reqid_t(client, 8, 6)));
log.add(mk_ple_dt_rb(mk_obj(5), mk_evt(21, 107), mk_evt(21, 106),
osd_reqid_t(client, 8, 6)));
eversion_t write_from_dups = eversion_t::max();
log.trim(cct, mk_evt(20, 103), nullptr, nullptr, &write_from_dups);
EXPECT_EQ(eversion_t(10, 100), write_from_dups) << log;
EXPECT_EQ(4u, log.log.size()) << log;
EXPECT_EQ(6u, log.dups.size()) << log;
}
// This tests copy_up_to() to make copies of
// 2 log entries (107, 106) and 3 additional for a total
// of 5 dups. Nothing from the original dups is copied.
TEST_F(PGLogTrimTest, TestCopyUpTo) {
SetUp(5);
PGLog::IndexedLog log, copy;
log.tail = mk_evt(9, 99);
log.head = mk_evt(9, 99);
entity_name_t client = entity_name_t::CLIENT(777);
log.dups.push_back(pg_log_dup_t(mk_ple_mod(mk_obj(1),
mk_evt(9, 99), mk_evt(8, 98), osd_reqid_t(client, 8, 1))));
log.add(mk_ple_mod(mk_obj(1), mk_evt(10, 100), mk_evt(9, 99),
osd_reqid_t(client, 8, 1)));
log.add(mk_ple_dt(mk_obj(2), mk_evt(15, 101), mk_evt(10, 100),
osd_reqid_t(client, 8, 2)));
log.add(mk_ple_mod_rb(mk_obj(3), mk_evt(15, 102), mk_evt(15, 101),
osd_reqid_t(client, 8, 3)));
log.add(mk_ple_mod(mk_obj(1), mk_evt(20, 103), mk_evt(15, 102),
osd_reqid_t(client, 8, 4)));
log.add(mk_ple_mod(mk_obj(4), mk_evt(21, 104), mk_evt(20, 103),
osd_reqid_t(client, 8, 5)));
log.add(mk_ple_dt_rb(mk_obj(5), mk_evt(21, 105), mk_evt(21, 104),
osd_reqid_t(client, 8, 6)));
log.add(mk_ple_dt_rb(mk_obj(5), mk_evt(21, 106), mk_evt(21, 105),
osd_reqid_t(client, 8, 6)));
log.add(mk_ple_dt_rb(mk_obj(5), mk_evt(21, 107), mk_evt(21, 106),
osd_reqid_t(client, 8, 6)));
copy.copy_up_to(cct, log, 2);
EXPECT_EQ(2u, copy.log.size()) << copy;
EXPECT_EQ(copy.head, mk_evt(21, 107)) << copy;
EXPECT_EQ(copy.tail, mk_evt(21, 105)) << copy;
// Tracking 5 means 3 additional as dups
EXPECT_EQ(3u, copy.dups.size()) << copy;
}
// This tests copy_up_to() to make copies of
// 4 log entries (107, 106, 105, 104) and 5 additional for a total
// of 5 dups. Only 1 of 2 existing dups are copied.
TEST_F(PGLogTrimTest, TestCopyUpTo2) {
SetUp(9);
PGLog::IndexedLog log, copy;
log.tail = mk_evt(9, 99);
log.head = mk_evt(9, 99);
entity_name_t client = entity_name_t::CLIENT(777);
log.dups.push_back(pg_log_dup_t(mk_ple_mod(mk_obj(1),
mk_evt(8, 98), mk_evt(8, 97), osd_reqid_t(client, 8, 1))));
log.dups.push_back(pg_log_dup_t(mk_ple_mod(mk_obj(1),
mk_evt(9, 99), mk_evt(8, 98), osd_reqid_t(client, 8, 1))));
log.add(mk_ple_mod(mk_obj(1), mk_evt(10, 100), mk_evt(9, 99),
osd_reqid_t(client, 8, 1)));
log.add(mk_ple_dt(mk_obj(2), mk_evt(15, 101), mk_evt(10, 100),
osd_reqid_t(client, 8, 2)));
log.add(mk_ple_mod_rb(mk_obj(3), mk_evt(15, 102), mk_evt(15, 101),
osd_reqid_t(client, 8, 3)));
log.add(mk_ple_mod(mk_obj(1), mk_evt(20, 103), mk_evt(15, 102),
osd_reqid_t(client, 8, 4)));
log.add(mk_ple_mod(mk_obj(4), mk_evt(21, 104), mk_evt(20, 103),
osd_reqid_t(client, 8, 5)));
log.add(mk_ple_dt_rb(mk_obj(5), mk_evt(21, 105), mk_evt(21, 104),
osd_reqid_t(client, 8, 6)));
log.add(mk_ple_dt_rb(mk_obj(5), mk_evt(21, 106), mk_evt(21, 105),
osd_reqid_t(client, 8, 6)));
log.add(mk_ple_dt_rb(mk_obj(5), mk_evt(21, 107), mk_evt(21, 106),
osd_reqid_t(client, 8, 6)));
copy.copy_up_to(cct, log, 4);
EXPECT_EQ(4u, copy.log.size()) << copy;
EXPECT_EQ(copy.head, mk_evt(21, 107)) << copy;
EXPECT_EQ(copy.tail, mk_evt(20, 103)) << copy;
// Tracking 5 means 3 additional as dups
EXPECT_EQ(5u, copy.dups.size()) << copy;
}
// This tests copy_after() by specifying a version that copies
// 2 log entries (107, 106) and 3 additional for a total
// of 5 dups. Nothing of the original dups is copied.
TEST_F(PGLogTrimTest, TestCopyAfter) {
SetUp(5);
PGLog::IndexedLog log, copy;
log.tail = mk_evt(9, 99);
log.head = mk_evt(9, 99);
entity_name_t client = entity_name_t::CLIENT(777);
log.dups.push_back(pg_log_dup_t(mk_ple_mod(mk_obj(1),
mk_evt(9, 99), mk_evt(8, 98), osd_reqid_t(client, 8, 1))));
log.add(mk_ple_mod(mk_obj(1), mk_evt(10, 100), mk_evt(9, 99),
osd_reqid_t(client, 8, 1)));
log.add(mk_ple_dt(mk_obj(2), mk_evt(15, 101), mk_evt(10, 100),
osd_reqid_t(client, 8, 2)));
log.add(mk_ple_mod_rb(mk_obj(3), mk_evt(15, 102), mk_evt(15, 101),
osd_reqid_t(client, 8, 3)));
log.add(mk_ple_mod(mk_obj(1), mk_evt(20, 103), mk_evt(15, 102),
osd_reqid_t(client, 8, 4)));
log.add(mk_ple_mod(mk_obj(4), mk_evt(21, 104), mk_evt(20, 103),
osd_reqid_t(client, 8, 5)));
log.add(mk_ple_dt_rb(mk_obj(5), mk_evt(21, 105), mk_evt(21, 104),
osd_reqid_t(client, 8, 6)));
log.add(mk_ple_dt_rb(mk_obj(5), mk_evt(21, 106), mk_evt(21, 105),
osd_reqid_t(client, 8, 6)));
log.add(mk_ple_dt_rb(mk_obj(5), mk_evt(21, 107), mk_evt(21, 106),
osd_reqid_t(client, 8, 6)));
copy.copy_after(cct, log, mk_evt(21, 105));
EXPECT_EQ(2u, copy.log.size()) << copy;
EXPECT_EQ(copy.head, mk_evt(21, 107)) << copy;
EXPECT_EQ(copy.tail, mk_evt(21, 105)) << copy;
// Tracking 5 means 3 additional as dups
EXPECT_EQ(3u, copy.dups.size()) << copy;
}
// This copies everything dups and log because of the large max dups
// and value passed to copy_after().
TEST_F(PGLogTrimTest, TestCopyAfter2) {
SetUp(3000);
PGLog::IndexedLog log, copy;
log.tail = mk_evt(9, 99);
log.head = mk_evt(9, 99);
entity_name_t client = entity_name_t::CLIENT(777);
log.dups.push_back(pg_log_dup_t(mk_ple_mod(mk_obj(1),
mk_evt(8, 93), mk_evt(8, 92), osd_reqid_t(client, 8, 1))));
log.dups.push_back(pg_log_dup_t(mk_ple_mod(mk_obj(1),
mk_evt(8, 94), mk_evt(8, 93), osd_reqid_t(client, 8, 1))));
log.dups.push_back(pg_log_dup_t(mk_ple_mod(mk_obj(1),
mk_evt(8, 95), mk_evt(8, 94), osd_reqid_t(client, 8, 1))));
log.dups.push_back(pg_log_dup_t(mk_ple_mod(mk_obj(1),
mk_evt(8, 96), mk_evt(8, 95), osd_reqid_t(client, 8, 1))));
log.dups.push_back(pg_log_dup_t(mk_ple_mod(mk_obj(1),
mk_evt(8, 97), mk_evt(8, 96), osd_reqid_t(client, 8, 1))));
log.dups.push_back(pg_log_dup_t(mk_ple_mod(mk_obj(1),
mk_evt(8, 98), mk_evt(8, 97), osd_reqid_t(client, 8, 1))));
log.dups.push_back(pg_log_dup_t(mk_ple_mod(mk_obj(1),
mk_evt(9, 99), mk_evt(8, 98), osd_reqid_t(client, 8, 1))));
log.add(mk_ple_mod(mk_obj(1), mk_evt(10, 100), mk_evt(9, 99),
osd_reqid_t(client, 8, 1)));
log.add(mk_ple_dt(mk_obj(2), mk_evt(15, 101), mk_evt(10, 100),
osd_reqid_t(client, 8, 2)));
log.add(mk_ple_mod_rb(mk_obj(3), mk_evt(15, 102), mk_evt(15, 101),
osd_reqid_t(client, 8, 3)));
log.add(mk_ple_mod(mk_obj(1), mk_evt(20, 103), mk_evt(15, 102),
osd_reqid_t(client, 8, 4)));
log.add(mk_ple_mod(mk_obj(4), mk_evt(21, 104), mk_evt(20, 103),
osd_reqid_t(client, 8, 5)));
log.add(mk_ple_dt_rb(mk_obj(5), mk_evt(21, 105), mk_evt(21, 104),
osd_reqid_t(client, 8, 6)));
log.add(mk_ple_dt_rb(mk_obj(5), mk_evt(21, 106), mk_evt(21, 105),
osd_reqid_t(client, 8, 6)));
log.add(mk_ple_dt_rb(mk_obj(5), mk_evt(21, 107), mk_evt(21, 106),
osd_reqid_t(client, 8, 6)));
copy.copy_after(cct, log, mk_evt(9, 99));
EXPECT_EQ(8u, copy.log.size()) << copy;
EXPECT_EQ(copy.head, mk_evt(21, 107)) << copy;
EXPECT_EQ(copy.tail, mk_evt(9, 99)) << copy;
// Tracking 3000 is larger than all entries, so all dups copied
EXPECT_EQ(7u, copy.dups.size()) << copy;
}
// Local Variables:
// compile-command: "cd ../.. ; make unittest_pglog ; ./unittest_pglog --log-to-stderr=true --debug-osd=20 # --gtest_filter=*.* "
// End:
| 98,502 | 29.308615 | 130 |
cc
|
null |
ceph-main/src/test/osd/TestRados.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "common/Cond.h"
#include "common/errno.h"
#include "common/version.h"
#include <iostream>
#include <sstream>
#include <map>
#include <numeric>
#include <string>
#include <vector>
#include <stdlib.h>
#include <unistd.h>
#include "test/osd/RadosModel.h"
using namespace std;
class WeightedTestGenerator : public TestOpGenerator
{
public:
WeightedTestGenerator(int ops,
int objects,
map<TestOpType, unsigned int> op_weights,
TestOpStat *stats,
int max_seconds,
bool ec_pool,
bool balance_reads,
bool localize_reads,
bool set_redirect,
bool set_chunk,
bool enable_dedup) :
m_nextop(NULL), m_op(0), m_ops(ops), m_seconds(max_seconds),
m_objects(objects), m_stats(stats),
m_total_weight(0),
m_ec_pool(ec_pool),
m_balance_reads(balance_reads),
m_localize_reads(localize_reads),
m_set_redirect(set_redirect),
m_set_chunk(set_chunk),
m_enable_dedup(enable_dedup)
{
m_start = time(0);
for (map<TestOpType, unsigned int>::const_iterator it = op_weights.begin();
it != op_weights.end();
++it) {
m_total_weight += it->second;
m_weight_sums.insert(pair<TestOpType, unsigned int>(it->first,
m_total_weight));
}
if (m_set_redirect || m_set_chunk) {
if (m_set_redirect) {
m_ops = ops+m_objects+m_objects;
} else {
/* create 10 chunks per an object*/
m_ops = ops+m_objects+m_objects*10;
}
}
}
TestOp *next(RadosTestContext &context) override
{
TestOp *retval = NULL;
++m_op;
if (m_op <= m_objects && !m_set_redirect && !m_set_chunk ) {
stringstream oid;
oid << m_op;
/*if (m_op % 2) {
// make it a long name
oid << " " << string(300, 'o');
}*/
cout << m_op << ": write initial oid " << oid.str() << std::endl;
context.oid_not_flushing.insert(oid.str());
if (m_ec_pool) {
return new WriteOp(m_op, &context, oid.str(), true, true);
} else {
return new WriteOp(m_op, &context, oid.str(), false, true);
}
} else if (m_op >= m_ops) {
return NULL;
}
if (m_set_redirect || m_set_chunk) {
if (init_extensible_tier(context, retval)) {
return retval;
}
}
if (m_nextop) {
retval = m_nextop;
m_nextop = NULL;
return retval;
}
while (retval == NULL) {
unsigned int rand_val = rand() % m_total_weight;
time_t now = time(0);
if (m_seconds && now - m_start > m_seconds)
break;
for (map<TestOpType, unsigned int>::const_iterator it = m_weight_sums.begin();
it != m_weight_sums.end();
++it) {
if (rand_val < it->second) {
retval = gen_op(context, it->first);
break;
}
}
}
return retval;
}
bool init_extensible_tier(RadosTestContext &context, TestOp *& op) {
/*
* set-redirect or set-chunk test (manifest test)
* 0. make default objects (using create op)
* 1. set-redirect or set-chunk
* 2. initialize target objects (using write op)
* 3. wait for set-* completion
*/
int copy_manifest_end = 0;
if (m_set_chunk) {
copy_manifest_end = m_objects*2;
} else {
copy_manifest_end = m_objects*3;
}
int make_manifest_end = copy_manifest_end;
if (m_set_chunk) {
/* make 10 chunks per an object*/
make_manifest_end = make_manifest_end + m_objects * 10;
} else {
/* redirect */
make_manifest_end = make_manifest_end + m_objects;
}
if (m_op <= m_objects) {
stringstream oid;
oid << m_op;
/*if (m_op % 2) {
oid << " " << string(300, 'o');
}*/
cout << m_op << ": write initial oid " << oid.str() << std::endl;
context.oid_not_flushing.insert(oid.str());
if (m_ec_pool) {
op = new WriteOp(m_op, &context, oid.str(), true, true);
} else {
op = new WriteOp(m_op, &context, oid.str(), false, true);
}
return true;
} else if (m_op <= copy_manifest_end) {
stringstream oid, oid2;
//int _oid = m_op-m_objects;
int _oid = m_op % m_objects + 1;
oid << _oid;
/*if ((_oid) % 2) {
oid << " " << string(300, 'o');
}*/
if (context.oid_in_use.count(oid.str())) {
/* previous write is not finished */
op = NULL;
m_op--;
cout << m_op << " wait for completion of write op! " << std::endl;
return true;
}
int _oid2 = m_op - m_objects + 1;
if (_oid2 > copy_manifest_end - m_objects) {
_oid2 -= (copy_manifest_end - m_objects);
}
oid2 << _oid2 << " " << context.low_tier_pool_name;
if ((_oid2) % 2) {
oid2 << " " << string(300, 'm');
}
cout << m_op << ": " << "copy oid " << oid.str() << " target oid "
<< oid2.str() << std::endl;
op = new CopyOp(m_op, &context, oid.str(), oid2.str(), context.low_tier_pool_name);
return true;
} else if (m_op <= make_manifest_end) {
if (m_set_redirect) {
stringstream oid, oid2;
int _oid = m_op-copy_manifest_end;
oid << _oid;
/*if ((_oid) % 2) {
oid << " " << string(300, 'o');
}*/
oid2 << _oid << " " << context.low_tier_pool_name;
if ((_oid) % 2) {
oid2 << " " << string(300, 'm');
}
if (context.oid_in_use.count(oid.str())) {
/* previous copy is not finished */
op = NULL;
m_op--;
cout << m_op << " retry set_redirect !" << std::endl;
return true;
}
cout << m_op << ": " << "set_redirect oid " << oid.str() << " target oid "
<< oid2.str() << std::endl;
op = new SetRedirectOp(m_op, &context, oid.str(), oid2.str(), context.pool_name);
return true;
} else if (m_set_chunk) {
stringstream oid;
int _oid = m_op % m_objects +1;
oid << _oid;
/*if ((_oid) % 2) {
oid << " " << string(300, 'o');
}*/
if (context.oid_in_use.count(oid.str())) {
/* previous set-chunk is not finished */
op = NULL;
m_op--;
cout << m_op << " retry set_chunk !" << std::endl;
return true;
}
stringstream oid2;
oid2 << _oid << " " << context.low_tier_pool_name;
if ((_oid) % 2) {
oid2 << " " << string(300, 'm');
}
cout << m_op << ": " << "set_chunk oid " << oid.str()
<< " target oid " << oid2.str() << std::endl;
op = new SetChunkOp(m_op, &context, oid.str(), oid2.str(), m_stats);
return true;
}
} else if (m_op == make_manifest_end + 1) {
int set_size = context.oid_not_in_use.size();
int set_manifest_size = context.oid_redirect_not_in_use.size();
cout << m_op << " oid_not_in_use " << set_size << " oid_redirect_not_in_use " << set_manifest_size << std::endl;
/* wait for redirect or set_chunk initialization */
if (set_size != m_objects || set_manifest_size != 0) {
op = NULL;
m_op--;
cout << m_op << " wait for manifest initialization " << std::endl;
return true;
}
for (int t_op = m_objects+1; t_op <= m_objects*2; t_op++) {
stringstream oid;
oid << t_op << " " << context.low_tier_pool_name;
if (t_op % 2) {
oid << " " << string(300, 'm');
}
cout << " redirect_not_in_use: " << oid.str() << std::endl;
context.oid_redirect_not_in_use.insert(oid.str());
}
}
return false;
}
private:
TestOp *gen_op(RadosTestContext &context, TestOpType type)
{
string oid, oid2;
ceph_assert(context.oid_not_in_use.size());
switch (type) {
case TEST_OP_READ:
oid = *(rand_choose(context.oid_not_in_use));
return new ReadOp(m_op, &context, oid, m_balance_reads, m_localize_reads,
m_stats);
case TEST_OP_WRITE:
oid = *(rand_choose(context.oid_not_in_use));
cout << m_op << ": " << "write oid " << oid << " current snap is "
<< context.current_snap << std::endl;
return new WriteOp(m_op, &context, oid, false, false, m_stats);
case TEST_OP_WRITE_EXCL:
oid = *(rand_choose(context.oid_not_in_use));
cout << m_op << ": " << "write (excl) oid "
<< oid << " current snap is "
<< context.current_snap << std::endl;
return new WriteOp(m_op, &context, oid, false, true, m_stats);
case TEST_OP_WRITESAME:
oid = *(rand_choose(context.oid_not_in_use));
cout << m_op << ": " << "writesame oid "
<< oid << " current snap is "
<< context.current_snap << std::endl;
return new WriteSameOp(m_op, &context, oid, m_stats);
case TEST_OP_DELETE:
oid = *(rand_choose(context.oid_not_in_use));
cout << m_op << ": " << "delete oid " << oid << " current snap is "
<< context.current_snap << std::endl;
return new DeleteOp(m_op, &context, oid, m_stats);
case TEST_OP_SNAP_CREATE:
cout << m_op << ": " << "snap_create" << std::endl;
return new SnapCreateOp(m_op, &context, m_stats);
case TEST_OP_SNAP_REMOVE:
if (context.snaps.size() <= context.snaps_in_use.size()) {
return NULL;
}
while (true) {
int snap = rand_choose(context.snaps)->first;
if (context.snaps_in_use.lookup(snap))
continue; // in use; try again!
cout << m_op << ": " << "snap_remove snap " << snap << std::endl;
return new SnapRemoveOp(m_op, &context, snap, m_stats);
}
case TEST_OP_ROLLBACK:
{
string oid = *(rand_choose(context.oid_not_in_use));
cout << m_op << ": " << "rollback oid " << oid << " current snap is "
<< context.current_snap << std::endl;
return new RollbackOp(m_op, &context, oid);
}
case TEST_OP_SETATTR:
oid = *(rand_choose(context.oid_not_in_use));
cout << m_op << ": " << "setattr oid " << oid
<< " current snap is " << context.current_snap << std::endl;
return new SetAttrsOp(m_op, &context, oid, m_stats);
case TEST_OP_RMATTR:
oid = *(rand_choose(context.oid_not_in_use));
cout << m_op << ": " << "rmattr oid " << oid
<< " current snap is " << context.current_snap << std::endl;
return new RemoveAttrsOp(m_op, &context, oid, m_stats);
case TEST_OP_WATCH:
oid = *(rand_choose(context.oid_not_in_use));
cout << m_op << ": " << "watch oid " << oid
<< " current snap is " << context.current_snap << std::endl;
return new WatchOp(m_op, &context, oid, m_stats);
case TEST_OP_COPY_FROM:
oid = *(rand_choose(context.oid_not_in_use));
do {
oid2 = *(rand_choose(context.oid_not_in_use));
} while (oid == oid2);
cout << m_op << ": " << "copy_from oid " << oid << " from oid " << oid2
<< " current snap is " << context.current_snap << std::endl;
return new CopyFromOp(m_op, &context, oid, oid2, m_stats);
case TEST_OP_HIT_SET_LIST:
{
uint32_t hash = rjhash32(rand());
cout << m_op << ": " << "hit_set_list " << hash << std::endl;
return new HitSetListOp(m_op, &context, hash, m_stats);
}
case TEST_OP_UNDIRTY:
{
oid = *(rand_choose(context.oid_not_in_use));
cout << m_op << ": " << "undirty oid " << oid << std::endl;
return new UndirtyOp(m_op, &context, oid, m_stats);
}
case TEST_OP_IS_DIRTY:
{
oid = *(rand_choose(context.oid_not_flushing));
return new IsDirtyOp(m_op, &context, oid, m_stats);
}
case TEST_OP_CACHE_FLUSH:
{
oid = *(rand_choose(context.oid_not_in_use));
return new CacheFlushOp(m_op, &context, oid, m_stats, true);
}
case TEST_OP_CACHE_TRY_FLUSH:
{
oid = *(rand_choose(context.oid_not_in_use));
return new CacheFlushOp(m_op, &context, oid, m_stats, false);
}
case TEST_OP_CACHE_EVICT:
{
oid = *(rand_choose(context.oid_not_in_use));
return new CacheEvictOp(m_op, &context, oid, m_stats);
}
case TEST_OP_APPEND:
oid = *(rand_choose(context.oid_not_in_use));
cout << "append oid " << oid << " current snap is "
<< context.current_snap << std::endl;
return new WriteOp(m_op, &context, oid, true, false, m_stats);
case TEST_OP_APPEND_EXCL:
oid = *(rand_choose(context.oid_not_in_use));
cout << "append oid (excl) " << oid << " current snap is "
<< context.current_snap << std::endl;
return new WriteOp(m_op, &context, oid, true, true, m_stats);
case TEST_OP_CHUNK_READ:
oid = *(rand_choose(context.oid_not_in_use));
cout << m_op << ": " << "chunk read oid " << oid << " target oid " << oid2 << std::endl;
return new ChunkReadOp(m_op, &context, oid, context.pool_name, false, m_stats);
case TEST_OP_TIER_PROMOTE:
oid = *(rand_choose(context.oid_not_in_use));
cout << m_op << ": " << "tier_promote oid " << oid << std::endl;
return new TierPromoteOp(m_op, &context, oid, m_stats);
case TEST_OP_TIER_FLUSH:
oid = *(rand_choose(context.oid_not_in_use));
cout << m_op << ": " << "tier_flush oid " << oid << std::endl;
return new TierFlushOp(m_op, &context, oid, m_stats);
case TEST_OP_SET_REDIRECT:
oid = *(rand_choose(context.oid_not_in_use));
oid2 = *(rand_choose(context.oid_redirect_not_in_use));
cout << m_op << ": " << "set_redirect oid " << oid << " target oid " << oid2 << std::endl;
return new SetRedirectOp(m_op, &context, oid, oid2, context.pool_name, m_stats);
case TEST_OP_UNSET_REDIRECT:
oid = *(rand_choose(context.oid_not_in_use));
cout << m_op << ": " << "unset_redirect oid " << oid << std::endl;
return new UnsetRedirectOp(m_op, &context, oid, m_stats);
case TEST_OP_SET_CHUNK:
{
ceph_assert(m_enable_dedup);
oid = *(rand_choose(context.oid_not_in_use));
cout << m_op << ": " << "set_chunk oid " << oid
<< " target oid " << std::endl;
return new SetChunkOp(m_op, &context, oid, "", m_stats);
}
case TEST_OP_TIER_EVICT:
oid = *(rand_choose(context.oid_not_in_use));
cout << m_op << ": " << "tier_evict oid " << oid << std::endl;
return new TierEvictOp(m_op, &context, oid, m_stats);
default:
cerr << m_op << ": Invalid op type " << type << std::endl;
ceph_abort();
return nullptr;
}
}
TestOp *m_nextop;
int m_op;
int m_ops;
int m_seconds;
int m_objects;
time_t m_start;
TestOpStat *m_stats;
map<TestOpType, unsigned int> m_weight_sums;
unsigned int m_total_weight;
bool m_ec_pool;
bool m_balance_reads;
bool m_localize_reads;
bool m_set_redirect;
bool m_set_chunk;
bool m_enable_dedup;
};
int main(int argc, char **argv)
{
int ops = 1000;
int objects = 50;
int max_in_flight = 16;
int64_t size = 4000000; // 4 MB
int64_t min_stride_size = -1, max_stride_size = -1;
int max_seconds = 0;
bool pool_snaps = false;
bool write_fadvise_dontneed = false;
struct {
TestOpType op;
const char *name;
bool ec_pool_valid;
} op_types[] = {
{ TEST_OP_READ, "read", true },
{ TEST_OP_WRITE, "write", false },
{ TEST_OP_WRITE_EXCL, "write_excl", false },
{ TEST_OP_WRITESAME, "writesame", false },
{ TEST_OP_DELETE, "delete", true },
{ TEST_OP_SNAP_CREATE, "snap_create", true },
{ TEST_OP_SNAP_REMOVE, "snap_remove", true },
{ TEST_OP_ROLLBACK, "rollback", true },
{ TEST_OP_SETATTR, "setattr", true },
{ TEST_OP_RMATTR, "rmattr", true },
{ TEST_OP_WATCH, "watch", true },
{ TEST_OP_COPY_FROM, "copy_from", true },
{ TEST_OP_HIT_SET_LIST, "hit_set_list", true },
{ TEST_OP_IS_DIRTY, "is_dirty", true },
{ TEST_OP_UNDIRTY, "undirty", true },
{ TEST_OP_CACHE_FLUSH, "cache_flush", true },
{ TEST_OP_CACHE_TRY_FLUSH, "cache_try_flush", true },
{ TEST_OP_CACHE_EVICT, "cache_evict", true },
{ TEST_OP_APPEND, "append", true },
{ TEST_OP_APPEND_EXCL, "append_excl", true },
{ TEST_OP_SET_REDIRECT, "set_redirect", true },
{ TEST_OP_UNSET_REDIRECT, "unset_redirect", true },
{ TEST_OP_CHUNK_READ, "chunk_read", true },
{ TEST_OP_TIER_PROMOTE, "tier_promote", true },
{ TEST_OP_TIER_FLUSH, "tier_flush", true },
{ TEST_OP_SET_CHUNK, "set_chunk", true },
{ TEST_OP_TIER_EVICT, "tier_evict", true },
{ TEST_OP_READ /* grr */, NULL },
};
struct {
const char *name;
} chunk_algo_types[] = {
{ "fastcdc" },
{ "fixcdc" },
};
map<TestOpType, unsigned int> op_weights;
string pool_name = "rbd";
string low_tier_pool_name = "";
bool ec_pool = false;
bool no_omap = false;
bool no_sparse = false;
bool balance_reads = false;
bool localize_reads = false;
bool set_redirect = false;
bool set_chunk = false;
bool enable_dedup = false;
string chunk_algo = "";
string chunk_size = "";
for (int i = 1; i < argc; ++i) {
if (strcmp(argv[i], "--max-ops") == 0)
ops = atoi(argv[++i]);
else if (strcmp(argv[i], "--pool") == 0)
pool_name = argv[++i];
else if (strcmp(argv[i], "--max-seconds") == 0)
max_seconds = atoi(argv[++i]);
else if (strcmp(argv[i], "--objects") == 0)
objects = atoi(argv[++i]);
else if (strcmp(argv[i], "--max-in-flight") == 0)
max_in_flight = atoi(argv[++i]);
else if (strcmp(argv[i], "--size") == 0)
size = atoi(argv[++i]);
else if (strcmp(argv[i], "--min-stride-size") == 0)
min_stride_size = atoi(argv[++i]);
else if (strcmp(argv[i], "--max-stride-size") == 0)
max_stride_size = atoi(argv[++i]);
else if (strcmp(argv[i], "--no-omap") == 0)
no_omap = true;
else if (strcmp(argv[i], "--no-sparse") == 0)
no_sparse = true;
else if (strcmp(argv[i], "--balance-reads") == 0)
balance_reads = true;
else if (strcmp(argv[i], "--localize-reads") == 0)
localize_reads = true;
else if (strcmp(argv[i], "--pool-snaps") == 0)
pool_snaps = true;
else if (strcmp(argv[i], "--write-fadvise-dontneed") == 0)
write_fadvise_dontneed = true;
else if (strcmp(argv[i], "--ec-pool") == 0) {
if (!op_weights.empty()) {
cerr << "--ec-pool must be specified prior to any ops" << std::endl;
exit(1);
}
ec_pool = true;
no_omap = true;
no_sparse = true;
} else if (strcmp(argv[i], "--op") == 0) {
i++;
if (i == argc) {
cerr << "Missing op after --op" << std::endl;
return 1;
}
int j;
for (j = 0; op_types[j].name; ++j) {
if (strcmp(op_types[j].name, argv[i]) == 0) {
break;
}
}
if (!op_types[j].name) {
cerr << "unknown op " << argv[i] << std::endl;
exit(1);
}
i++;
if (i == argc) {
cerr << "Weight unspecified." << std::endl;
return 1;
}
int weight = atoi(argv[i]);
if (weight < 0) {
cerr << "Weights must be nonnegative." << std::endl;
return 1;
} else if (weight > 0) {
if (ec_pool && !op_types[j].ec_pool_valid) {
cerr << "Error: cannot use op type " << op_types[j].name
<< " with --ec-pool" << std::endl;
exit(1);
}
cout << "adding op weight " << op_types[j].name << " -> " << weight << std::endl;
op_weights.insert(pair<TestOpType, unsigned int>(op_types[j].op, weight));
}
} else if (strcmp(argv[i], "--set_redirect") == 0) {
set_redirect = true;
} else if (strcmp(argv[i], "--set_chunk") == 0) {
set_chunk = true;
} else if (strcmp(argv[i], "--low_tier_pool") == 0) {
/*
* disallow redirect or chunk object into the same pool
* to prevent the race. see https://github.com/ceph/ceph/pull/20096
*/
low_tier_pool_name = argv[++i];
} else if (strcmp(argv[i], "--enable_dedup") == 0) {
enable_dedup = true;
} else if (strcmp(argv[i], "--dedup_chunk_algo") == 0) {
i++;
if (i == argc) {
cerr << "Missing chunking algorithm after --dedup_chunk_algo" << std::endl;
return 1;
}
int j;
for (j = 0; chunk_algo_types[j].name; ++j) {
if (strcmp(chunk_algo_types[j].name, argv[i]) == 0) {
break;
}
}
if (!chunk_algo_types[j].name) {
cerr << "unknown op " << argv[i] << std::endl;
exit(1);
}
chunk_algo = chunk_algo_types[j].name;
} else if (strcmp(argv[i], "--dedup_chunk_size") == 0) {
chunk_size = argv[++i];
} else {
cerr << "unknown arg " << argv[i] << std::endl;
exit(1);
}
}
if (set_redirect || set_chunk) {
if (low_tier_pool_name == "") {
cerr << "low_tier_pool is needed" << std::endl;
exit(1);
}
}
if (enable_dedup) {
if (chunk_algo == "" || chunk_size == "") {
cerr << "Missing chunking algorithm: " << chunk_algo
<< " or chunking size: " << chunk_size << std::endl;
exit(1);
}
}
if (op_weights.empty()) {
cerr << "No operations specified" << std::endl;
exit(1);
}
if (min_stride_size < 0)
min_stride_size = size / 10;
if (max_stride_size < 0)
max_stride_size = size / 5;
cout << pretty_version_to_str() << std::endl;
cout << "Configuration:" << std::endl
<< "\tNumber of operations: " << ops << std::endl
<< "\tNumber of objects: " << objects << std::endl
<< "\tMax in flight operations: " << max_in_flight << std::endl
<< "\tObject size (in bytes): " << size << std::endl
<< "\tWrite stride min: " << min_stride_size << std::endl
<< "\tWrite stride max: " << max_stride_size << std::endl;
if (min_stride_size >= max_stride_size) {
cerr << "Error: max_stride_size must be more than min_stride_size"
<< std::endl;
return 1;
}
if (min_stride_size > size || max_stride_size > size) {
cerr << "Error: min_stride_size and max_stride_size must be "
<< "smaller than object size" << std::endl;
return 1;
}
if (max_in_flight * 2 > objects) {
cerr << "Error: max_in_flight must be <= than the number of objects / 2"
<< std::endl;
return 1;
}
char *id = getenv("CEPH_CLIENT_ID");
RadosTestContext context(
pool_name,
max_in_flight,
size,
min_stride_size,
max_stride_size,
no_omap,
no_sparse,
pool_snaps,
write_fadvise_dontneed,
low_tier_pool_name,
enable_dedup,
chunk_algo,
chunk_size,
id);
TestOpStat stats;
WeightedTestGenerator gen = WeightedTestGenerator(
ops, objects,
op_weights, &stats, max_seconds,
ec_pool, balance_reads, localize_reads,
set_redirect, set_chunk, enable_dedup);
int r = context.init();
if (r < 0) {
cerr << "Error initializing rados test context: "
<< cpp_strerror(r) << std::endl;
exit(1);
}
context.loop(&gen);
if (enable_dedup) {
if (!context.check_chunks_refcount(context.low_tier_io_ctx, context.io_ctx)) {
cerr << " Invalid refcount " << std::endl;
exit(1);
}
}
context.shutdown();
cerr << context.errors << " errors." << std::endl;
cerr << stats << std::endl;
return 0;
}
| 22,438 | 29.738356 | 119 |
cc
|
null |
ceph-main/src/test/osd/ceph_test_osd_stale_read.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "gtest/gtest.h"
#include "mds/mdstypes.h"
#include "include/buffer.h"
#include "include/rbd_types.h"
#include "include/rados/librados.h"
#include "include/rados/librados.hpp"
#include "include/stringify.h"
#include "include/types.h"
#include "global/global_context.h"
#include "global/global_init.h"
#include "common/ceph_argparse.h"
#include "common/common_init.h"
#include "common/Cond.h"
#include "json_spirit/json_spirit.h"
#include <errno.h>
#include <map>
#include <sstream>
#include <string>
using namespace std;
using namespace librados;
int get_primary_osd(Rados& rados, const string& pool_name,
const string& oid, int *pprimary)
{
bufferlist inbl;
string cmd = string("{\"prefix\": \"osd map\",\"pool\":\"")
+ pool_name
+ string("\",\"object\": \"")
+ oid
+ string("\",\"format\": \"json\"}");
bufferlist outbl;
if (int r = rados.mon_command(cmd, inbl, &outbl, nullptr);
r < 0) {
return r;
}
string outstr(outbl.c_str(), outbl.length());
json_spirit::Value v;
if (!json_spirit::read(outstr, v)) {
cerr <<" unable to parse json " << outstr << std::endl;
return -1;
}
json_spirit::Object& o = v.get_obj();
for (json_spirit::Object::size_type i=0; i<o.size(); i++) {
json_spirit::Pair& p = o[i];
if (p.name_ == "acting_primary") {
cout << "primary = " << p.value_.get_int() << std::endl;
*pprimary = p.value_.get_int();
return 0;
}
}
cerr << "didn't find primary in " << outstr << std::endl;
return -1;
}
int fence_osd(Rados& rados, int osd)
{
bufferlist inbl, outbl;
string cmd("{\"prefix\": \"injectargs\",\"injected_args\":["
"\"--ms-blackhole-osd\", "
"\"--ms-blackhole-mon\"]}");
return rados.osd_command(osd, cmd, inbl, &outbl, NULL);
}
int mark_down_osd(Rados& rados, int osd)
{
bufferlist inbl, outbl;
string cmd("{\"prefix\": \"osd down\",\"ids\":[\"" +
stringify(osd) + "\"]}");
return rados.mon_command(cmd, inbl, &outbl, NULL);
}
TEST(OSD, StaleRead) {
// create two rados instances, one pool
Rados rados1, rados2;
IoCtx ioctx1, ioctx2;
int r;
r = rados1.init_with_context(g_ceph_context);
ASSERT_EQ(0, r);
r = rados1.connect();
ASSERT_EQ(0, r);
srand(time(0));
string pool_name = "read-hole-test-" + stringify(rand());
r = rados1.pool_create(pool_name.c_str());
ASSERT_EQ(0, r);
r = rados1.ioctx_create(pool_name.c_str(), ioctx1);
ASSERT_EQ(0, r);
r = rados2.init_with_context(g_ceph_context);
ASSERT_EQ(0, r);
r = rados2.connect();
ASSERT_EQ(0, r);
r = rados2.ioctx_create(pool_name.c_str(), ioctx2);
ASSERT_EQ(0, r);
string oid = "foo";
bufferlist one;
one.append("one");
{
cout << "client1: writing 'one'" << std::endl;
r = ioctx1.write_full(oid, one);
ASSERT_EQ(0, r);
}
// make sure 2 can read it
{
cout << "client2: reading 'one'" << std::endl;
bufferlist bl;
r = ioctx2.read(oid, bl, 3, 0);
ASSERT_EQ(3, r);
ASSERT_EQ('o', bl[0]);
ASSERT_EQ('n', bl[1]);
ASSERT_EQ('e', bl[2]);
}
// find the primary
int primary;
r = get_primary_osd(rados1, pool_name, oid, &primary);
ASSERT_EQ(0, r);
// fence it
cout << "client1: fencing primary" << std::endl;
fence_osd(rados1, primary);
mark_down_osd(rados1, primary);
rados1.wait_for_latest_osdmap();
// should still be able to read the old value on 2
{
cout << "client2: reading 'one' again from old primary" << std::endl;
bufferlist bl;
r = ioctx2.read(oid, bl, 3, 0);
ASSERT_EQ(3, r);
ASSERT_EQ('o', bl[0]);
ASSERT_EQ('n', bl[1]);
ASSERT_EQ('e', bl[2]);
}
// update object on 1
bufferlist two;
two.append("two");
{
cout << "client1: writing 'two' to new acting set" << std::endl;
r = ioctx1.write_full(oid, two);
ASSERT_EQ(0, r);
}
// make sure we can't still read the old value on 2
{
cout << "client2: reading again from old primary" << std::endl;
bufferlist bl;
r = ioctx2.read(oid, bl, 3, 0);
ASSERT_EQ(3, r);
ASSERT_EQ('t', bl[0]);
ASSERT_EQ('w', bl[1]);
ASSERT_EQ('o', bl[2]);
}
rados1.shutdown();
rados2.shutdown();
}
int main(int argc, char **argv) {
auto args = argv_to_vec(argc, argv);
auto cct = global_init(nullptr, args, CEPH_ENTITY_TYPE_CLIENT,
CODE_ENVIRONMENT_UTILITY, 0);
common_init_finish(g_ceph_context);
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}
| 4,536 | 24.488764 | 73 |
cc
|
null |
ceph-main/src/test/osd/hitset.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
* Copyright 2013 Inktank
*/
#include "gtest/gtest.h"
#include "osd/HitSet.h"
#include <iostream>
class HitSetTestStrap {
public:
HitSet *hitset;
explicit HitSetTestStrap(HitSet *h) : hitset(h) {}
void fill(unsigned count) {
char buf[50];
for (unsigned i = 0; i < count; ++i) {
sprintf(buf, "hitsettest_%u", i);
hobject_t obj(object_t(buf), "", 0, i, 0, "");
hitset->insert(obj);
}
EXPECT_EQ(count, hitset->insert_count());
}
void verify_fill(unsigned count) {
char buf[50];
for (unsigned i = 0; i < count; ++i) {
sprintf(buf, "hitsettest_%u", i);
hobject_t obj(object_t(buf), "", 0, i, 0, "");
EXPECT_TRUE(hitset->contains(obj));
}
}
};
class BloomHitSetTest : public testing::Test, public HitSetTestStrap {
public:
BloomHitSetTest() : HitSetTestStrap(new HitSet(new BloomHitSet)) {}
void rebuild(double fp, uint64_t target, uint64_t seed) {
BloomHitSet::Params *bparams = new BloomHitSet::Params(fp, target, seed);
HitSet::Params param(bparams);
HitSet new_set(param);
*hitset = new_set;
}
BloomHitSet *get_hitset() { return static_cast<BloomHitSet*>(hitset->impl.get()); }
};
TEST_F(BloomHitSetTest, Params) {
BloomHitSet::Params params(0.01, 100, 5);
EXPECT_EQ(.01, params.get_fpp());
EXPECT_EQ((unsigned)100, params.target_size);
EXPECT_EQ((unsigned)5, params.seed);
params.set_fpp(0.1);
EXPECT_EQ(0.1, params.get_fpp());
bufferlist bl;
params.encode(bl);
BloomHitSet::Params p2;
auto iter = bl.cbegin();
p2.decode(iter);
EXPECT_EQ(0.1, p2.get_fpp());
EXPECT_EQ((unsigned)100, p2.target_size);
EXPECT_EQ((unsigned)5, p2.seed);
}
TEST_F(BloomHitSetTest, Construct) {
ASSERT_EQ(hitset->impl->get_type(), HitSet::TYPE_BLOOM);
// success!
}
TEST_F(BloomHitSetTest, Rebuild) {
rebuild(0.1, 100, 1);
ASSERT_EQ(hitset->impl->get_type(), HitSet::TYPE_BLOOM);
}
TEST_F(BloomHitSetTest, InsertsMatch) {
rebuild(0.1, 100, 1);
fill(50);
/*
* the approx unique count is atrocious on bloom filters. Empirical
* evidence suggests the current test will produce a value of 62
* regardless of hitset size
*/
EXPECT_TRUE(hitset->approx_unique_insert_count() >= 50 &&
hitset->approx_unique_insert_count() <= 62);
verify_fill(50);
EXPECT_FALSE(hitset->is_full());
}
TEST_F(BloomHitSetTest, FillsUp) {
rebuild(0.1, 20, 1);
fill(20);
verify_fill(20);
EXPECT_TRUE(hitset->is_full());
}
TEST_F(BloomHitSetTest, RejectsNoMatch) {
rebuild(0.001, 100, 1);
fill(100);
verify_fill(100);
EXPECT_TRUE(hitset->is_full());
char buf[50];
int matches = 0;
for (int i = 100; i < 200; ++i) {
sprintf(buf, "hitsettest_%d", i);
hobject_t obj(object_t(buf), "", 0, i, 0, "");
if (hitset->contains(obj))
++matches;
}
// we set a 1 in 1000 false positive; allow one in our 100
EXPECT_LT(matches, 2);
}
class ExplicitHashHitSetTest : public testing::Test, public HitSetTestStrap {
public:
ExplicitHashHitSetTest() : HitSetTestStrap(new HitSet(new ExplicitHashHitSet)) {}
ExplicitHashHitSet *get_hitset() { return static_cast<ExplicitHashHitSet*>(hitset->impl.get()); }
};
TEST_F(ExplicitHashHitSetTest, Construct) {
ASSERT_EQ(hitset->impl->get_type(), HitSet::TYPE_EXPLICIT_HASH);
// success!
}
TEST_F(ExplicitHashHitSetTest, InsertsMatch) {
fill(50);
verify_fill(50);
EXPECT_EQ((unsigned)50, hitset->approx_unique_insert_count());
EXPECT_FALSE(hitset->is_full());
}
TEST_F(ExplicitHashHitSetTest, RejectsNoMatch) {
fill(100);
verify_fill(100);
EXPECT_FALSE(hitset->is_full());
char buf[50];
int matches = 0;
for (int i = 100; i < 200; ++i) {
sprintf(buf, "hitsettest_%d", i);
hobject_t obj(object_t(buf), "", 0, i, 0, "");
if (hitset->contains(obj)) {
++matches;
}
}
EXPECT_EQ(matches, 0);
}
class ExplicitObjectHitSetTest : public testing::Test, public HitSetTestStrap {
public:
ExplicitObjectHitSetTest() : HitSetTestStrap(new HitSet(new ExplicitObjectHitSet)) {}
ExplicitObjectHitSet *get_hitset() { return static_cast<ExplicitObjectHitSet*>(hitset->impl.get()); }
};
TEST_F(ExplicitObjectHitSetTest, Construct) {
ASSERT_EQ(hitset->impl->get_type(), HitSet::TYPE_EXPLICIT_OBJECT);
// success!
}
TEST_F(ExplicitObjectHitSetTest, InsertsMatch) {
fill(50);
verify_fill(50);
EXPECT_EQ((unsigned)50, hitset->approx_unique_insert_count());
EXPECT_FALSE(hitset->is_full());
}
TEST_F(ExplicitObjectHitSetTest, RejectsNoMatch) {
fill(100);
verify_fill(100);
EXPECT_FALSE(hitset->is_full());
char buf[50];
int matches = 0;
for (int i = 100; i < 200; ++i) {
sprintf(buf, "hitsettest_%d", i);
hobject_t obj(object_t(buf), "", 0, i, 0, "");
if (hitset->contains(obj)) {
++matches;
}
}
EXPECT_EQ(matches, 0);
}
| 5,183 | 25.181818 | 103 |
cc
|
null |
ceph-main/src/test/osd/osdcap.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2012 Inktank
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include <iostream>
#include "include/stringify.h"
#include "osd/OSDCap.h"
#include "gtest/gtest.h"
using namespace std;
const char *parse_good[] = {
"allow *",
"allow r",
"allow rwx",
"allow r pool foo ",
"allow r pool=foo",
"allow wx pool taco",
"allow pool foo r",
"allow pool taco wx",
"allow wx pool taco object_prefix obj",
"allow wx pool taco object_prefix obj_with_underscores_and_no_quotes",
"allow pool taco object_prefix obj wx",
"allow pool taco object_prefix obj_with_underscores_and_no_quotes wx",
"allow rwx pool 'weird name'",
"allow rwx pool \"weird name with ''s\"",
"allow rwx pool foo, allow r pool bar",
"allow rwx pool foo ; allow r pool bar",
"allow rwx pool foo ;allow r pool bar",
"allow rwx pool foo; allow r pool bar",
"allow pool foo rwx, allow pool bar r",
"allow pool foo.froo.foo rwx, allow pool bar r",
"allow pool foo rwx ; allow pool bar r",
"allow pool foo rwx ;allow pool bar r",
"allow pool foo rwx; allow pool bar r",
"allow pool data rw, allow pool rbd rwx, allow pool images class rbd foo",
"allow class-read",
"allow class-write",
"allow class-read class-write",
"allow r class-read pool foo",
"allow rw class-read class-write pool foo",
"allow r class-read pool foo",
"allow pool bar rwx; allow pool baz r class-read",
"allow class foo",
"allow class clsname \"clsthingidon'tunderstand\"",
" allow rwx pool foo; allow r pool bar ",
" allow rwx pool foo; allow r pool bar ",
" allow pool foo rwx; allow pool bar r ",
" allow pool foo rwx; allow pool bar r ",
" allow wx pool taco",
"\tallow\nwx\tpool \n taco\t",
"allow class-read object_prefix rbd_children, allow pool libvirt-pool-test rwx",
"allow class-read object_prefix rbd-children, allow pool libvirt_pool_test rwx",
"allow pool foo namespace nfoo rwx, allow pool bar namespace=nbar r",
"allow pool foo namespace=nfoo rwx ; allow pool bar namespace=nbar r",
"allow pool foo namespace nfoo rwx ;allow pool bar namespace nbar r",
"allow pool foo namespace=nfoo rwx; allow pool bar namespace nbar object_prefix rbd r",
"allow rwx namespace=nfoo tag cephfs data=cephfs_a",
"allow rwx namespace foo tag cephfs data =cephfs_a",
"allow pool foo namespace=nfoo* rwx",
"allow pool foo namespace=\"\" rwx; allow pool bar namespace='' object_prefix rbd r",
"allow pool foo namespace \"\" rwx; allow pool bar namespace '' object_prefix rbd r",
"profile abc, profile abc pool=bar, profile abc pool=bar namespace=foo",
"allow rwx tag application key=value",
"allow rwx tag application key = value",
"allow rwx tag application key =value",
"allow rwx tag application key= value",
"allow rwx tag application key = value",
"allow all tag application all=all",
"allow rwx network 127.0.0.1/8",
"allow rwx network ::1/128",
"allow rwx network [ff::1]/128",
"profile foo network 127.0.0.1/8",
"allow rwx namespace foo tag cephfs data =cephfs_a network 127.0.0.1/8",
"allow pool foo rwx network 1.2.3.4/24",
0
};
TEST(OSDCap, ParseGood) {
for (int i=0; parse_good[i]; i++) {
string str = parse_good[i];
OSDCap cap;
std::cout << "Testing good input: '" << str << "'" << std::endl;
ASSERT_TRUE(cap.parse(str, &cout));
}
}
const char *parse_bad[] = {
"allow r poolfoo",
"allow r w",
"ALLOW r",
"allow rwx,",
"allow rwx x",
"allow r pool foo r",
"allow wwx pool taco",
"allow wwx pool taco^funny&chars",
"allow rwx pool 'weird name''",
"allow rwx object_prefix \"beforepool\" pool weird",
"allow rwx auid 123 pool asdf",
"allow xrwx pool foo,, allow r pool bar",
";allow rwx pool foo rwx ; allow r pool bar",
"allow rwx pool foo ;allow r pool bar gibberish",
"allow rwx auid 123 pool asdf namespace=foo",
"allow rwx auid 123 namespace",
"allow rwx namespace",
"allow namespace",
"allow namespace=foo",
"allow namespace=f*oo",
"allow rwx auid 123 namespace asdf",
"allow wwx pool ''",
"allow rwx tag application key value",
"allow rwx auid 123",
"allow auid 123 rwx",
"allow r pool foo object_prefix blah ; allow w auid 5",
0
};
TEST(OSDCap, ParseBad) {
for (int i=0; parse_bad[i]; i++) {
string str = parse_bad[i];
OSDCap cap;
std::cout << "Testing bad input: '" << str << "'" << std::endl;
ASSERT_FALSE(cap.parse(str, &cout));
}
}
TEST(OSDCap, AllowAll) {
OSDCap cap;
entity_addr_t addr;
ASSERT_FALSE(cap.allow_all());
ASSERT_TRUE(cap.parse("allow r", NULL));
ASSERT_FALSE(cap.allow_all());
cap.grants.clear();
ASSERT_TRUE(cap.parse("allow w", NULL));
ASSERT_FALSE(cap.allow_all());
cap.grants.clear();
ASSERT_TRUE(cap.parse("allow x", NULL));
ASSERT_FALSE(cap.allow_all());
cap.grants.clear();
ASSERT_TRUE(cap.parse("allow rwx", NULL));
ASSERT_FALSE(cap.allow_all());
cap.grants.clear();
ASSERT_TRUE(cap.parse("allow rw", NULL));
ASSERT_FALSE(cap.allow_all());
cap.grants.clear();
ASSERT_TRUE(cap.parse("allow rx", NULL));
ASSERT_FALSE(cap.allow_all());
cap.grants.clear();
ASSERT_TRUE(cap.parse("allow wx", NULL));
ASSERT_FALSE(cap.allow_all());
cap.grants.clear();
ASSERT_TRUE(cap.parse("allow *", NULL));
ASSERT_TRUE(cap.allow_all());
ASSERT_TRUE(cap.is_capable("foo", "", {}, "asdf", true, true, {{"cls", "", true, true, true}}, addr));
ASSERT_TRUE(cap.is_capable("foo", "anamespace", {}, "asdf", true, true, {{"cls", "", true, true, true}}, addr));
ASSERT_TRUE(cap.is_capable("bar", "", {}, "asdf", true, true, {{"cls", "", true, true, true}}, addr));
ASSERT_TRUE(cap.is_capable("bar", "anamespace", {}, "asdf", true, true, {{"cls", "", true, true, true}}, addr));
ASSERT_TRUE(cap.is_capable("foo", "", {{"application", {{"key", "value"}}}}, "asdf", true, true, {{"cls", "", true, true, true}}, addr));
ASSERT_TRUE(cap.is_capable("foo", "anamespace", {{"application", {{"key", "value"}}}}, "asdf", true, true, {{"cls", "", true, true, true}}, addr));
ASSERT_TRUE(cap.is_capable("bar", "", {{"application", {{"key", "value"}}}}, "asdf", true, true, {{"cls", "", true, true, true}}, addr));
ASSERT_TRUE(cap.is_capable("bar", "anamespace", {{"application", {{"key", "value"}}}}, "asdf", true, true, {{"cls", "", true, true, true}}, addr));
// 'allow *' overrides allow list
ASSERT_TRUE(cap.is_capable("foo", "", {}, "asdf", true, true, {{"cls", "", true, true, false}}, addr));
ASSERT_TRUE(cap.is_capable("foo", "anamespace", {}, "asdf", true, true, {{"cls", "", true, true, false}}, addr));
ASSERT_TRUE(cap.is_capable("bar", "", {}, "asdf", true, true, {{"cls", "", true, true, false}}, addr));
ASSERT_TRUE(cap.is_capable("bar", "anamespace", {}, "asdf", true, true, {{"cls", "", true, true, false}}, addr));
ASSERT_TRUE(cap.is_capable("foo", "", {{"application", {{"key", "value"}}}}, "asdf", true, true, {{"cls", "", true, true, false}}, addr));
ASSERT_TRUE(cap.is_capable("foo", "anamespace", {{"application", {{"key", "value"}}}}, "asdf", true, true, {{"cls", "", true, true, false}}, addr));
ASSERT_TRUE(cap.is_capable("bar", "", {{"application", {{"key", "value"}}}}, "asdf", true, true, {{"cls", "", true, true, false}}, addr));
ASSERT_TRUE(cap.is_capable("bar", "anamespace", {{"application", {{"key", "value"}}}}, "asdf", true, true, {{"cls", "", true, true, false}}, addr));
}
TEST(OSDCap, AllowPool) {
OSDCap cap;
entity_addr_t addr;
bool r = cap.parse("allow rwx pool foo", NULL);
ASSERT_TRUE(r);
ASSERT_TRUE(cap.is_capable("foo", "", {}, "", true, true, {{"cls", "", true, true, true}}, addr));
ASSERT_TRUE(cap.is_capable("foo", "ns", {}, "", true, true, {{"cls", "", true, true, true}}, addr));
ASSERT_TRUE(cap.is_capable("foo", "", {{"application", {{"key", "value"}}}}, "", true, true, {{"cls", "", true, true, true}}, addr));
ASSERT_TRUE(cap.is_capable("foo", "ns", {{"application", {{"key", "value"}}}}, "", true, true, {{"cls", "", true, true, true}}, addr));
// true->false for classes not on allow list
ASSERT_FALSE(cap.is_capable("foo", "", {}, "", true, true, {{"cls", "", true, true, false}}, addr));
ASSERT_FALSE(cap.is_capable("foo", "ns", {}, "", true, true, {{"cls", "", true, true, false}}, addr));
ASSERT_FALSE(cap.is_capable("foo", "", {{"application", {{"key", "value"}}}}, "", true, true, {{"cls", "", true, true, false}}, addr));
ASSERT_FALSE(cap.is_capable("foo", "ns", {{"application", {{"key", "value"}}}}, "", true, true, {{"cls", "", true, true, false}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "", true, true, {{"cls", "", true, true, true}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "ns", {}, "", true, true, {{"cls", "", true, true, true}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {{"application", {{"key", "value"}}}}, "", true, true, {{"cls", "", true, true, true}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "ns", {{"application", {{"key", "value"}}}}, "", true, true, {{"cls", "", true, true, true}}, addr));
}
TEST(OSDCap, AllowPools) {
entity_addr_t addr;
OSDCap cap;
bool r = cap.parse("allow rwx pool foo, allow r pool bar", NULL);
ASSERT_TRUE(r);
ASSERT_TRUE(cap.is_capable("foo", "", {}, "", true, true, {{"cls", "", true, true, true}}, addr));
ASSERT_TRUE(cap.is_capable("foo", "ns", {}, "", true, true, {{"cls", "", true, true, true}}, addr));
ASSERT_TRUE(cap.is_capable("foo", "", {{"application", {{"key", "value"}}}}, "", true, true, {{"cls", "", true, true, true}}, addr));
ASSERT_TRUE(cap.is_capable("foo", "ns", {{"application", {{"key", "value"}}}}, "", true, true, {{"cls", "", true, true, true}}, addr));
// true-false for classes not on allow list
ASSERT_FALSE(cap.is_capable("foo", "", {}, "", true, true, {{"cls", "", true, true, false}}, addr));
ASSERT_FALSE(cap.is_capable("foo", "ns", {}, "", true, true, {{"cls", "", true, true, false}}, addr));
ASSERT_FALSE(cap.is_capable("foo", "", {{"application", {{"key", "value"}}}}, "", true, true, {{"cls", "", true, true, false}}, addr));
ASSERT_FALSE(cap.is_capable("foo", "ns", {{"application", {{"key", "value"}}}}, "", true, true, {{"cls", "", true, true, false}}, addr));
ASSERT_TRUE(cap.is_capable("bar", "", {}, "", true, false, {}, addr));
ASSERT_TRUE(cap.is_capable("bar", "ns", {}, "", true, false, {}, addr));
ASSERT_TRUE(cap.is_capable("bar", "", {{"application", {{"key", "value"}}}}, "", true, false, {}, addr));
ASSERT_TRUE(cap.is_capable("bar", "ns", {{"application", {{"key", "value"}}}}, "", true, false, {}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "", true, true, {{"cls", "", true, true, true}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "ns", {}, "", true, true, {{"cls", "", true, true, true}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {{"application", {{"key", "value"}}}}, "", true, true, {{"cls", "", true, true, true}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "ns", {{"application", {{"key", "value"}}}}, "", true, true, {{"cls", "", true, true, true}}, addr));
ASSERT_FALSE(cap.is_capable("baz", "", {}, "", true, false, {}, addr));
ASSERT_FALSE(cap.is_capable("baz", "ns", {}, "", true, false, {}, addr));
ASSERT_FALSE(cap.is_capable("baz", "", {{"application", {{"key", "value"}}}}, "", true, false, {}, addr));
ASSERT_FALSE(cap.is_capable("baz", "ns", {{"application", {{"key", "value"}}}}, "", true, false, {}, addr));
}
TEST(OSDCap, AllowPools2) {
entity_addr_t addr;
OSDCap cap;
bool r = cap.parse("allow r, allow rwx pool foo", NULL);
ASSERT_TRUE(r);
ASSERT_TRUE(cap.is_capable("foo", "", {}, "", true, true, {{"cls", "", true, true, true}}, addr));
// true-false for classes not on allow list
ASSERT_FALSE(cap.is_capable("foo", "", {}, "", true, true, {{"cls", "", true, true, false}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "", true, true, {{"cls", "", true, true, true}}, addr));
ASSERT_TRUE(cap.is_capable("bar", "", {}, "", true, false, {}, addr));
}
TEST(OSDCap, ObjectPrefix) {
entity_addr_t addr;
OSDCap cap;
bool r = cap.parse("allow rwx object_prefix foo", NULL);
ASSERT_TRUE(r);
ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", true, true, {{"cls", "", true, true, true}}, addr));
ASSERT_TRUE(cap.is_capable("bar", "", {}, "food", true, true, {{"cls", "", true, true, true}}, addr));
ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo_bar", true, true, {{"cls", "", true, true, true}}, addr));
// true-false for classes not on allow list
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", true, true, {{"cls", "", true, true, false}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "food", true, true, {{"cls", "", true, true, false}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo_bar", true, true, {{"cls", "", true, true, false}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "_foo", true, true, {{"cls", "", true, true, true}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, " foo ", true, true, {{"cls", "", true, true, true}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "fo", true, true, {{"cls", "", true, true, true}}, addr));
}
TEST(OSDCap, ObjectPoolAndPrefix) {
entity_addr_t addr;
OSDCap cap;
bool r = cap.parse("allow rwx pool bar object_prefix foo", NULL);
ASSERT_TRUE(r);
ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", true, true, {{"cls", "", true, true, true}}, addr));
ASSERT_TRUE(cap.is_capable("bar", "", {}, "food", true, true, {{"cls", "", true, true, true}}, addr));
ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo_bar", true, true, {{"cls", "", true, true, true}}, addr));
// true-false for classes not on allow list
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", true, true, {{"cls", "", true, true, false}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "food", true, true, {{"cls", "", true, true, false}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo_bar", true, true, {{"cls", "", true, true, false}}, addr));
ASSERT_FALSE(cap.is_capable("baz", "", {}, "foo", true, true, {{"cls", "", true, true, true}}, addr));
ASSERT_FALSE(cap.is_capable("baz", "", {}, "food", true, true, {{"cls", "", true, true, true}}, addr));
ASSERT_FALSE(cap.is_capable("baz", "", {}, "fo", true, true, {{"cls", "", true, true, true}}, addr));
}
TEST(OSDCap, Namespace) {
entity_addr_t addr;
OSDCap cap;
ASSERT_TRUE(cap.parse("allow rw namespace=nfoo"));
ASSERT_TRUE(cap.is_capable("bar", "nfoo", {}, "foo", true, true, {}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", true, true, {}, addr));
ASSERT_FALSE(cap.is_capable("bar", "nfoobar", {}, "foo", true, true, {}, addr));
}
TEST(OSDCap, NamespaceGlob) {
entity_addr_t addr;
OSDCap cap;
ASSERT_TRUE(cap.parse("allow rw namespace=nfoo*"));
ASSERT_TRUE(cap.is_capable("bar", "nfoo", {}, "foo", true, true, {}, addr));
ASSERT_TRUE(cap.is_capable("bar", "nfoobar", {}, "foo", true, true, {}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", true, true, {}, addr));
ASSERT_FALSE(cap.is_capable("bar", "nfo", {}, "foo", true, true, {}, addr));
}
TEST(OSDCap, BasicR) {
entity_addr_t addr;
OSDCap cap;
ASSERT_TRUE(cap.parse("allow r", NULL));
ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", true, false, {}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, true, {{"cls", "", false, true, true}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", true, false, {{"cls", "", true, false, true}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", true, true, {{"cls", "", true, true, true}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, true, {{"cls", "", false, true, true}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", true, true, {}, addr));
}
TEST(OSDCap, BasicW) {
entity_addr_t addr;
OSDCap cap;
ASSERT_TRUE(cap.parse("allow w", NULL));
ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, true, {}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, true, {{"cls", "", false, true, true}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", true, false, {{"cls", "", true, false, true}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", true, true, {{"cls", "", true, true, true}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", true, false, {}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", true, true, {}, addr));
}
TEST(OSDCap, BasicX) {
entity_addr_t addr;
OSDCap cap;
ASSERT_TRUE(cap.parse("allow x", NULL));
ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"cls", "", false, true, true}}, addr));
ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"cls", "", true, false, true}}, addr));
ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"cls", "", true, true, true}}, addr));
// true->false when class not on allow list
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"cls", "", false, true, false}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"cls", "", true, false, false}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"cls", "", true, true, false}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, true, {}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", true, false, {}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", true, true, {}, addr));
}
TEST(OSDCap, BasicRW) {
entity_addr_t addr;
OSDCap cap;
ASSERT_TRUE(cap.parse("allow rw", NULL));
ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, true, {}, addr));
ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", true, false, {}, addr));
ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", true, true, {}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, true, {{"cls", "", false, true, true}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", true, false, {{"cls", "", true, false, true}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", true, true, {{"cls", "", true, true, true}}, addr));
}
TEST(OSDCap, BasicRX) {
entity_addr_t addr;
OSDCap cap;
ASSERT_TRUE(cap.parse("allow rx", NULL));
ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", true, false, {{"cls", "", true, false, true}}, addr));
ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", true, false, {}, addr));
ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"cls", "", false, true, true}}, addr));
ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", true, false, {{"cls", "", true, true, true}}, addr));
// true->false for class not on allow list
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", true, false, {{"cls", "", true, false, false}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"cls", "", false, true, false}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", true, false, {{"cls", "", true, true, false}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, true, {}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", true, true, {}, addr));
}
TEST(OSDCap, BasicWX) {
entity_addr_t addr;
OSDCap cap;
ASSERT_TRUE(cap.parse("allow wx", NULL));
ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, true, {}, addr));
ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, true, {{"cls", "", false, true, true}}, addr));
ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"cls", "", true, false, true}}, addr));
ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, true, {{"cls", "", true, true, true}}, addr));
// true->false for class not on allow list
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, true, {{"cls", "", false, true, false}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"cls", "", true, false, false}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, true, {{"cls", "", true, true, false}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", true, false, {}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", true, true, {}, addr));
}
TEST(OSDCap, BasicRWX) {
entity_addr_t addr;
OSDCap cap;
ASSERT_TRUE(cap.parse("allow rwx", NULL));
ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {}, addr));
ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"cls", "", true, true, true}}, addr));
ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", true, true, {{"cls", "", true, true, true}}, addr));
ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, true, {{"cls", "", true, true, true}}, addr));
ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", true, false, {{"cls", "", false, true, true}}, addr));
ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", true, false, {}, addr));
ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", true, true, {{"cls", "", false, true, true}}, addr));
ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", true, true, {{"cls", "", true, false, true}}, addr));
// true->false for class not on allow list
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"cls", "", true, true, false}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", true, true, {{"cls", "", true, true, false}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, true, {{"cls", "", true, true, false}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", true, false, {{"cls", "", false, true, false}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", true, true, {{"cls", "", false, true, false}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", true, true, {{"cls", "", true, false, false}}, addr));
}
TEST(OSDCap, BasicRWClassRClassW) {
entity_addr_t addr;
OSDCap cap;
ASSERT_TRUE(cap.parse("allow rw class-read class-write", NULL));
ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {}, addr));
ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"cls", "", true, true, true}}, addr));
ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", true, true, {{"cls", "", true, true, true}}, addr));
ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, true, {{"cls", "", true, true, true}}, addr));
ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", true, false, {{"cls", "", false, true, true}}, addr));
ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", true, false, {}, addr));
ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", true, true, {{"cls", "", false, true, true}}, addr));
ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", true, true, {{"cls", "", true, false, true}}, addr));
// true->false when class not allow listed
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"cls", "", true, true, false}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", true, true, {{"cls", "", true, true, false}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, true, {{"cls", "", true, true, false}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", true, false, {{"cls", "", false, true, false}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", true, true, {{"cls", "", false, true, false}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", true, true, {{"cls", "", true, false, false}}, addr));
}
TEST(OSDCap, ClassR) {
entity_addr_t addr;
OSDCap cap;
ASSERT_TRUE(cap.parse("allow class-read", NULL));
ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"cls", "", true, false, true}}, addr));
// true->false when class not allow listed
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"cls", "", true, false, false}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", true, false, {}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, true, {}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"cls", "", false, true, true}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", true, true, {{"cls", "", true, true, true}}, addr));
}
TEST(OSDCap, ClassW) {
entity_addr_t addr;
OSDCap cap;
ASSERT_TRUE(cap.parse("allow class-write", NULL));
ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"cls", "", false, true, true}}, addr));
// true->false when class not allow listed
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"cls", "", false, true, false}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", true, false, {}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, true, {}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"cls", "", true, false, true}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", true, true, {{"cls", "", true, true, true}}, addr));
}
TEST(OSDCap, ClassRW) {
entity_addr_t addr;
OSDCap cap;
ASSERT_TRUE(cap.parse("allow class-read class-write", NULL));
ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"cls", "", false, true, true}}, addr));
ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"cls", "", true, false, true}}, addr));
ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"cls", "", true, true, true}}, addr));
// true->false when class not allow listed
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"cls", "", false, true, false}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"cls", "", true, false, false}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"cls", "", true, true, false}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", true, false, {}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, true, {}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", true, true, {{"cls", "", true, false, true}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", true, true, {{"cls", "", true, true, true}}, addr));
}
TEST(OSDCap, BasicRClassR) {
entity_addr_t addr;
OSDCap cap;
ASSERT_TRUE(cap.parse("allow r class-read", NULL));
ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"cls", "", true, false, true}}, addr));
ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", true, false, {{"cls", "", true, false, true}}, addr));
ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", true, false, {}, addr));
ASSERT_TRUE(cap.is_capable("bar", "", {{"application", {{"key", "value"}}}}, "foo", true, false, {}, addr));
// true->false when class not allow listed
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"cls", "", true, false, false}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", true, false, {{"cls", "", true, false, false}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {{"application", {{"key", "value"}}}}, "foo", true, false, {{"cls", "", true, false, false}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, true, {{"cls", "", true, true, true}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", true, true, {{"cls", "", true, true, true}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, true, {}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", true, true, {}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {{"application", {{"key", "value"}}}}, "foo", true, true, {}, addr));
ASSERT_TRUE(cap.is_capable("bar", "any", {}, "foo", false, false, {{"cls", "", true, false, true}}, addr));
ASSERT_TRUE(cap.is_capable("bar", "any", {}, "foo", true, false, {{"cls", "", true, false, true}}, addr));
ASSERT_TRUE(cap.is_capable("bar", "any", {}, "foo", true, false, {}, addr));
// true->false when class not allow listed
ASSERT_FALSE(cap.is_capable("bar", "any", {}, "foo", false, false, {{"cls", "", true, false, false}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "any", {}, "foo", true, false, {{"cls", "", true, false, false}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "any", {{"application", {{"key", "value"}}}}, "foo", true, false, {{"cls", "", true, false, false}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "any", {}, "foo", false, true, {{"cls", "", true, true, true}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "any", {}, "foo", true, true, {{"cls", "", true, true, true}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "any", {}, "foo", false, true, {}, addr));
ASSERT_FALSE(cap.is_capable("bar", "any", {}, "foo", true, true, {}, addr));
ASSERT_FALSE(cap.is_capable("bar", "any", {{"application", {{"key", "value"}}}}, "foo", true, true, {}, addr));
}
TEST(OSDCap, PoolClassR) {
entity_addr_t addr;
OSDCap cap;
ASSERT_TRUE(cap.parse("allow pool bar r class-read, allow pool foo rwx", NULL));
ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"cls", "", true, false, true}}, addr));
ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", true, false, {{"cls", "", true, false, true}}, addr));
ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", true, false, {}, addr));
ASSERT_TRUE(cap.is_capable("bar", "", {{"application", {{"key", "value"}}}}, "foo", true, false, {}, addr));
// true->false when class not allow listed
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"cls", "", true, false, false}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", true, false, {{"cls", "", true, false, false}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {{"application", {{"key", "value"}}}}, "foo", true, false, {{"cls", "", true, false, false}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, true, {{"cls", "", true, true, true}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", true, true, {{"cls", "", true, true, true}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, true, {}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", true, true, {}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {{"application", {{"key", "value"}}}}, "foo", true, true, {}, addr));
ASSERT_TRUE(cap.is_capable("bar", "ns", {}, "foo", false, false, {{"cls", "", true, false, true}}, addr));
ASSERT_TRUE(cap.is_capable("bar", "ns", {}, "foo", true, false, {{"cls", "", true, false, true}}, addr));
ASSERT_TRUE(cap.is_capable("bar", "ns", {}, "foo", true, false, {}, addr));
ASSERT_TRUE(cap.is_capable("bar", "ns", {{"application", {{"key", "value"}}}}, "foo", true, false, {}, addr));
// true->false when class not allow listed
ASSERT_FALSE(cap.is_capable("bar", "ns", {}, "foo", false, false, {{"cls", "", true, false, false}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "ns", {}, "foo", true, false, {{"cls", "", true, false, false}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "ns", {{"application", {{"key", "value"}}}}, "foo", true, false, {{"cls", "", true, false, false}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "ns", {}, "foo", false, true, {{"cls", "", true, true, true}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "ns", {}, "foo", true, true, {{"cls", "", true, true, true}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "ns", {}, "foo", false, true, {}, addr));
ASSERT_FALSE(cap.is_capable("bar", "ns", {}, "foo", true, true, {}, addr));
ASSERT_FALSE(cap.is_capable("bar", "ns", {{"application", {{"key", "value"}}}}, "foo", true, true, {}, addr));
ASSERT_TRUE(cap.is_capable("foo", "", {}, "foo", false, false, {}, addr));
ASSERT_TRUE(cap.is_capable("foo", "", {}, "foo", false, false, {{"cls", "", true, true, true}}, addr));
ASSERT_TRUE(cap.is_capable("foo", "", {}, "foo", true, true, {{"cls", "", true, true, true}}, addr));
ASSERT_TRUE(cap.is_capable("foo", "", {}, "foo", false, true, {{"cls", "", true, true, true}}, addr));
ASSERT_TRUE(cap.is_capable("foo", "", {}, "foo", true, false, {{"cls", "", false, true, true}}, addr));
ASSERT_TRUE(cap.is_capable("foo", "", {}, "foo", true, false, {}, addr));
ASSERT_TRUE(cap.is_capable("foo", "", {}, "foo", true, true, {{"cls", "", false, true, true}}, addr));
ASSERT_TRUE(cap.is_capable("foo", "", {}, "foo", true, true, {{"cls", "", true, false, true}}, addr));
ASSERT_TRUE(cap.is_capable("foo", "", {{"application", {{"key", "value"}}}}, "foo", true, true, {{"cls", "", true, false, true}}, addr));
// true->false when class not allow listed
ASSERT_FALSE(cap.is_capable("foo", "", {}, "foo", false, false, {{"cls", "", true, true, false}}, addr));
ASSERT_FALSE(cap.is_capable("foo", "", {}, "foo", true, true, {{"cls", "", true, true, false}}, addr));
ASSERT_FALSE(cap.is_capable("foo", "", {}, "foo", false, true, {{"cls", "", true, true, false}}, addr));
ASSERT_FALSE(cap.is_capable("foo", "", {}, "foo", true, false, {{"cls", "", false, true, false}}, addr));
ASSERT_FALSE(cap.is_capable("foo", "", {}, "foo", true, true, {{"cls", "", false, true, false}}, addr));
ASSERT_FALSE(cap.is_capable("foo", "", {}, "foo", true, true, {{"cls", "", true, false, false}}, addr));
ASSERT_FALSE(cap.is_capable("foo", "", {{"application", {{"key", "value"}}}}, "foo", true, true, {{"cls", "", true, false, false}}, addr));
ASSERT_TRUE(cap.is_capable("foo", "ns", {}, "foo", false, false, {}, addr));
ASSERT_TRUE(cap.is_capable("foo", "ns", {}, "foo", false, false, {{"cls", "", true, true, true}}, addr));
ASSERT_TRUE(cap.is_capable("foo", "ns", {}, "foo", true, true, {{"cls", "", true, true, true}}, addr));
ASSERT_TRUE(cap.is_capable("foo", "ns", {}, "foo", false, true, {{"cls", "", true, true, true}}, addr));
ASSERT_TRUE(cap.is_capable("foo", "ns", {}, "foo", true, false, {{"cls", "", false, true, true}}, addr));
ASSERT_TRUE(cap.is_capable("foo", "ns", {}, "foo", true, false, {}, addr));
ASSERT_TRUE(cap.is_capable("foo", "ns", {}, "foo", true, true, {{"cls", "", false, true, true}}, addr));
ASSERT_TRUE(cap.is_capable("foo", "ns", {}, "foo", true, true, {{"cls", "", true, false, true}}, addr));
ASSERT_TRUE(cap.is_capable("foo", "ns", {{"application", {{"key", "value"}}}}, "foo", true, true, {{"cls", "", true, false, true}}, addr));
// true->false when class not allow listed
ASSERT_FALSE(cap.is_capable("foo", "ns", {}, "foo", false, false, {{"cls", "", true, true, false}}, addr));
ASSERT_FALSE(cap.is_capable("foo", "ns", {}, "foo", true, true, {{"cls", "", true, true, false}}, addr));
ASSERT_FALSE(cap.is_capable("foo", "ns", {}, "foo", false, true, {{"cls", "", true, true, false}}, addr));
ASSERT_FALSE(cap.is_capable("foo", "ns", {}, "foo", true, false, {{"cls", "", false, true, false}}, addr));
ASSERT_FALSE(cap.is_capable("foo", "ns", {}, "foo", true, true, {{"cls", "", false, true, false}}, addr));
ASSERT_FALSE(cap.is_capable("foo", "ns", {}, "foo", true, true, {{"cls", "", true, false, false}}, addr));
ASSERT_FALSE(cap.is_capable("foo", "ns", {{"application", {{"key", "value"}}}}, "foo", true, true, {{"cls", "", true, false, false}}, addr));
ASSERT_FALSE(cap.is_capable("baz", "", {}, "foo", false, false, {}, addr));
ASSERT_FALSE(cap.is_capable("baz", "", {}, "foo", false, false, {{"cls", "", true, true, true}}, addr));
ASSERT_FALSE(cap.is_capable("baz", "", {}, "foo", true, true, {{"cls", "", true, true, true}}, addr));
ASSERT_FALSE(cap.is_capable("baz", "", {}, "foo", false, true, {{"cls", "", true, true, true}}, addr));
ASSERT_FALSE(cap.is_capable("baz", "", {}, "foo", true, false, {{"cls", "", false, true, true}}, addr));
ASSERT_FALSE(cap.is_capable("baz", "", {}, "foo", true, false, {}, addr));
ASSERT_FALSE(cap.is_capable("baz", "", {}, "foo", true, true, {{"cls", "", false, true, true}}, addr));
ASSERT_FALSE(cap.is_capable("baz", "", {}, "foo", true, true, {{"cls", "", true, false, true}}, addr));
ASSERT_FALSE(cap.is_capable("baz", "", {{"application", {{"key", "value"}}}}, "foo", true, true, {{"cls", "", true, false, true}}, addr));
}
TEST(OSDCap, PoolClassRNS) {
entity_addr_t addr;
OSDCap cap;
ASSERT_TRUE(cap.parse("allow pool bar namespace='' r class-read, allow pool foo namespace=ns rwx", NULL));
ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"cls", "", true, false, true}}, addr));
ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", true, false, {{"cls", "", true, false, true}}, addr));
ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", true, false, {}, addr));
ASSERT_TRUE(cap.is_capable("bar", "", {{"application", {{"key", "value"}}}}, "foo", true, false, {}, addr));
// true->false when class not allow listed
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"cls", "", true, false, false}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", true, false, {{"cls", "", true, false, false}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {{"application", {{"key", "value"}}}}, "foo", true, false, {{"cls", "", true, false, false}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, true, {{"cls", "", true, true, true}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", true, true, {{"cls", "", true, true, true}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, true, {}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", true, true, {}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {{"application", {{"key", "value"}}}}, "foo", true, true, {}, addr));
ASSERT_FALSE(cap.is_capable("bar", "ns", {}, "foo", false, false, {{"cls", "", true, false, true}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "ns", {}, "foo", true, false, {{"cls", "", true, false, true}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "ns", {}, "foo", true, false, {}, addr));
ASSERT_FALSE(cap.is_capable("bar", "ns", {{"application", {{"key", "value"}}}}, "foo", true, false, {}, addr));
ASSERT_FALSE(cap.is_capable("bar", "other", {}, "foo", false, true, {{"cls", "", true, true, true}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "other", {}, "foo", true, true, {{"cls", "", true, true, true}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "other", {}, "foo", false, true, {}, addr));
ASSERT_FALSE(cap.is_capable("bar", "other", {}, "foo", true, true, {}, addr));
ASSERT_FALSE(cap.is_capable("bar", "other", {{"application", {{"key", "value"}}}}, "foo", true, true, {}, addr));
ASSERT_FALSE(cap.is_capable("foo", "", {}, "foo", false, false, {}, addr));
ASSERT_FALSE(cap.is_capable("foo", "", {}, "foo", false, false, {{"cls", "", true, true, true}}, addr));
ASSERT_FALSE(cap.is_capable("foo", "", {}, "foo", true, true, {{"cls", "", true, true, true}}, addr));
ASSERT_FALSE(cap.is_capable("foo", "", {}, "foo", false, true, {{"cls", "", true, true, true}}, addr));
ASSERT_FALSE(cap.is_capable("foo", "", {}, "foo", true, false, {{"cls", "", false, true, true}}, addr));
ASSERT_FALSE(cap.is_capable("foo", "", {}, "foo", true, false, {}, addr));
ASSERT_FALSE(cap.is_capable("foo", "", {}, "foo", true, true, {{"cls", "", false, true, true}}, addr));
ASSERT_FALSE(cap.is_capable("foo", "", {}, "foo", true, true, {{"cls", "", true, false, true}}, addr));
ASSERT_FALSE(cap.is_capable("foo", "", {{"application", {{"key", "value"}}}}, "foo", true, true, {{"cls", "", true, false, true}}, addr));
ASSERT_TRUE(cap.is_capable("foo", "ns", {}, "foo", false, false, {}, addr));
ASSERT_TRUE(cap.is_capable("foo", "ns", {}, "foo", false, false, {{"cls", "", true, true, true}}, addr));
ASSERT_TRUE(cap.is_capable("foo", "ns", {}, "foo", true, true, {{"cls", "", true, true, true}}, addr));
ASSERT_TRUE(cap.is_capable("foo", "ns", {}, "foo", false, true, {{"cls", "", true, true, true}}, addr));
ASSERT_TRUE(cap.is_capable("foo", "ns", {}, "foo", true, false, {{"cls", "", false, true, true}}, addr));
ASSERT_TRUE(cap.is_capable("foo", "ns", {}, "foo", true, false, {}, addr));
ASSERT_TRUE(cap.is_capable("foo", "ns", {}, "foo", true, true, {{"cls", "", false, true, true}}, addr));
ASSERT_TRUE(cap.is_capable("foo", "ns", {}, "foo", true, true, {{"cls", "", true, false, true}}, addr));
ASSERT_TRUE(cap.is_capable("foo", "ns", {{"application", {{"key", "value"}}}}, "foo", true, true, {{"cls", "", true, false, true}}, addr));
// true->false when class not allow listed
ASSERT_FALSE(cap.is_capable("foo", "ns", {}, "foo", false, false, {{"cls", "", true, true, false}}, addr));
ASSERT_FALSE(cap.is_capable("foo", "ns", {}, "foo", true, true, {{"cls", "", true, true, false}}, addr));
ASSERT_FALSE(cap.is_capable("foo", "ns", {}, "foo", false, true, {{"cls", "", true, true, false}}, addr));
ASSERT_FALSE(cap.is_capable("foo", "ns", {}, "foo", true, false, {{"cls", "", false, true, false}}, addr));
ASSERT_FALSE(cap.is_capable("foo", "ns", {}, "foo", true, true, {{"cls", "", false, true, false}}, addr));
ASSERT_FALSE(cap.is_capable("foo", "ns", {}, "foo", true, true, {{"cls", "", true, false, false}}, addr));
ASSERT_FALSE(cap.is_capable("foo", "ns", {{"application", {{"key", "value"}}}}, "foo", true, true, {{"cls", "", true, false, false}}, addr));
ASSERT_FALSE(cap.is_capable("baz", "", {}, "foo", false, false, {}, addr));
ASSERT_FALSE(cap.is_capable("baz", "", {}, "foo", false, false, {{"cls", "", true, true, true}}, addr));
ASSERT_FALSE(cap.is_capable("baz", "", {}, "foo", true, true, {{"cls", "", true, true, true}}, addr));
ASSERT_FALSE(cap.is_capable("baz", "", {}, "foo", false, true, {{"cls", "", true, true, true}}, addr));
ASSERT_FALSE(cap.is_capable("baz", "", {}, "foo", true, false, {{"cls", "", false, true, true}}, addr));
ASSERT_FALSE(cap.is_capable("baz", "", {}, "foo", true, false, {}, addr));
ASSERT_FALSE(cap.is_capable("baz", "", {}, "foo", true, true, {{"cls", "", false, true, true}}, addr));
ASSERT_FALSE(cap.is_capable("baz", "", {}, "foo", true, true, {{"cls", "", true, false, true}}, addr));
ASSERT_FALSE(cap.is_capable("baz", "", {{"application", {{"key", "value"}}}}, "foo", true, true, {{"cls", "", true, false, true}}, addr));
}
TEST(OSDCap, NSClassR) {
entity_addr_t addr;
OSDCap cap;
ASSERT_TRUE(cap.parse("allow namespace '' rw class-read class-write, allow namespace test r", NULL));
ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {}, addr));
ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"cls", "", true, true, true}}, addr));
ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", true, true, {{"cls", "", true, true, true}}, addr));
ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, true, {{"cls", "", true, true, true}}, addr));
ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", true, false, {{"cls", "", false, true, true}}, addr));
ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", true, false, {}, addr));
ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", true, true, {{"cls", "", false, true, true}}, addr));
ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", true, true, {{"cls", "", true, false, true}}, addr));
ASSERT_TRUE(cap.is_capable("bar", "", {{"application", {{"key", "value"}}}}, "foo", true, true, {{"cls", "", true, false, true}}, addr));
// true->false when class not allow listed
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"cls", "", true, true, false}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", true, true, {{"cls", "", true, true, false}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, true, {{"cls", "", true, true, false}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", true, false, {{"cls", "", false, true, false}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", true, true, {{"cls", "", false, true, false}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", true, true, {{"cls", "", true, false, false}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {{"application", {{"key", "value"}}}}, "foo", true, true, {{"cls", "", true, false, false}}, addr));
ASSERT_TRUE(cap.is_capable("foo", "", {}, "foo", false, false, {}, addr));
ASSERT_TRUE(cap.is_capable("foo", "", {}, "foo", false, false, {{"cls", "", true, true, true}}, addr));
ASSERT_TRUE(cap.is_capable("foo", "", {}, "foo", true, true, {{"cls", "", true, true, true}}, addr));
ASSERT_TRUE(cap.is_capable("foo", "", {}, "foo", false, true, {{"cls", "", true, true, true}}, addr));
ASSERT_TRUE(cap.is_capable("foo", "", {}, "foo", true, false, {{"cls", "", false, true, true}}, addr));
ASSERT_TRUE(cap.is_capable("foo", "", {}, "foo", true, false, {}, addr));
ASSERT_TRUE(cap.is_capable("foo", "", {}, "foo", true, true, {{"cls", "", false, true, true}}, addr));
ASSERT_TRUE(cap.is_capable("foo", "", {}, "foo", true, true, {{"cls", "", true, false, true}}, addr));
ASSERT_TRUE(cap.is_capable("foo", "", {{"application", {{"key", "value"}}}}, "foo", true, true, {{"cls", "", true, false, true}}, addr));
// true->false when class not allow listed
ASSERT_FALSE(cap.is_capable("foo", "", {}, "foo", false, false, {{"cls", "", true, true, false}}, addr));
ASSERT_FALSE(cap.is_capable("foo", "", {}, "foo", true, true, {{"cls", "", true, true, false}}, addr));
ASSERT_FALSE(cap.is_capable("foo", "", {}, "foo", false, true, {{"cls", "", true, true, false}}, addr));
ASSERT_FALSE(cap.is_capable("foo", "", {}, "foo", true, false, {{"cls", "", false, true, false}}, addr));
ASSERT_FALSE(cap.is_capable("foo", "", {}, "foo", true, true, {{"cls", "", false, true, false}}, addr));
ASSERT_FALSE(cap.is_capable("foo", "", {}, "foo", true, true, {{"cls", "", true, false, false}}, addr));
ASSERT_FALSE(cap.is_capable("foo", "", {{"application", {{"key", "value"}}}}, "foo", true, true, {{"cls", "", true, false, false}}, addr));
ASSERT_TRUE(cap.is_capable("bar", "test", {}, "foo", true, false, {}, addr));
ASSERT_FALSE(cap.is_capable("bar", "test", {}, "foo", false, true, {{"cls", "", false, true, true}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "test", {}, "foo", true, false, {{"cls", "", true, false, true}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "test", {}, "foo", true, true, {{"cls", "", true, true, true}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "test", {}, "foo", false, true, {{"cls", "", false, true, true}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "test", {}, "foo", true, true, {}, addr));
ASSERT_FALSE(cap.is_capable("bar", "test", {{"application", {{"key", "value"}}}}, "foo", true, true, {}, addr));
ASSERT_TRUE(cap.is_capable("foo", "test", {}, "foo", true, false, {}, addr));
ASSERT_FALSE(cap.is_capable("foo", "test", {}, "foo", false, true, {{"cls", "", false, true, true}}, addr));
ASSERT_FALSE(cap.is_capable("foo", "test", {}, "foo", true, false, {{"cls", "", true, false, true}}, addr));
ASSERT_FALSE(cap.is_capable("foo", "test", {}, "foo", true, true, {{"cls", "", true, true, true}}, addr));
ASSERT_FALSE(cap.is_capable("foo", "test", {}, "foo", false, true, {{"cls", "", false, true, true}}, addr));
ASSERT_FALSE(cap.is_capable("foo", "test", {}, "foo", true, true, {}, addr));
ASSERT_FALSE(cap.is_capable("foo", "test", {{"application", {{"key", "value"}}}}, "foo", true, true, {}, addr));
ASSERT_FALSE(cap.is_capable("foo", "bad", {}, "foo", true, false, {}, addr));
ASSERT_FALSE(cap.is_capable("foo", "bad", {}, "foo", false, true, {}, addr));
ASSERT_FALSE(cap.is_capable("foo", "bad", {}, "foo", false, false, {{"cls", "", true, false, true}}, addr));
ASSERT_FALSE(cap.is_capable("foo", "bad", {}, "foo", false, false, {{"cls", "", false, true, true}}, addr));
ASSERT_FALSE(cap.is_capable("foo", "bad", {{"application", {{"key", "value"}}}}, "foo", false, false, {{"cls", "", false, true, true}}, addr));
}
TEST(OSDCap, PoolTagBasic) {
entity_addr_t addr;
OSDCap cap;
ASSERT_TRUE(cap.parse("allow rwx tag application key=value", NULL));
ASSERT_TRUE(cap.is_capable("foo", "", {{"application", {{"key", "value"}}}}, "foo", true, true, {}, addr));
ASSERT_TRUE(cap.is_capable("foo", "", {{"application", {{"foo", "bar"}, {"key", "value"}}}}, "foo", true, true, {}, addr));
ASSERT_TRUE(cap.is_capable("foo", "", {{"application", {{"foo", "bar"}, {"key", "value"}}}, {"app2", {{"foo", "bar"}}}}, "foo", true, true, {}, addr));
ASSERT_FALSE(cap.is_capable("foo", "", {{"application", {}}}, "foo", true, true, {}, addr));
ASSERT_FALSE(cap.is_capable("foo", "", {{"application", {{"key2", "value"}}}}, "foo", true, true, {}, addr));
ASSERT_FALSE(cap.is_capable("foo", "", {{"application", {{"foo", "bar"}}}}, "foo", true, true, {}, addr));
ASSERT_FALSE(cap.is_capable("foo", "", {{"app2", {{"key", "value"}}}}, "foo", true, true, {}, addr));
ASSERT_FALSE(cap.is_capable("foo", "", {}, "foo", true, true, {}, addr));
ASSERT_FALSE(cap.is_capable("foo", "", {{"application", {{"foo", "bar"}, {"key2", "value"}}}, {"app2", {{"foo", "bar"}}}}, "foo", true, true, {}, addr));
ASSERT_TRUE(cap.is_capable("foo", "", {{"application", {{"key", "value"}}}}, "foo", true, true, {}, addr));
ASSERT_TRUE(cap.is_capable("foo", "", {{"application", {{"key", "value"}}}}, "foo", true, false, {}, addr));
ASSERT_TRUE(cap.is_capable("foo", "", {{"application", {{"key", "value"}}}}, "foo", false, true, {}, addr));
ASSERT_TRUE(cap.is_capable("foo", "", {{"application", {{"key", "value"}}}}, "foo", false, false, {}, addr));
ASSERT_TRUE(cap.is_capable("foo", "ns", {{"application", {{"key", "value"}}}}, "foo", true, true, {}, addr));
ASSERT_FALSE(cap.is_capable("foo", "", {}, "foo", true, true, {}, addr));
ASSERT_FALSE(cap.is_capable("foo", "", {}, "foo", true, false, {}, addr));
ASSERT_FALSE(cap.is_capable("foo", "", {}, "foo", false, true, {}, addr));
ASSERT_FALSE(cap.is_capable("foo", "", {}, "foo", false, false, {}, addr));
ASSERT_FALSE(cap.is_capable("foo", "", {}, "foo", true, true, {}, addr));
ASSERT_FALSE(cap.is_capable("foo", "ns", {}, "foo", true, true, {}, addr));
ASSERT_TRUE(cap.is_capable("foo", "", {{"application", {{"key", "value"}}}}, "foo", false, true, {{"cls", "", true, true, true}}, addr));
ASSERT_TRUE(cap.is_capable("foo", "", {{"application", {{"key", "value"}}}}, "foo", false, true, {{"cls", "", true, false, true}}, addr));
ASSERT_TRUE(cap.is_capable("foo", "", {{"application", {{"key", "value"}}}}, "foo", false, true, {{"cls", "", false, true, true}}, addr));
ASSERT_TRUE(cap.is_capable("foo", "", {{"application", {{"key", "value"}}}}, "foo", false, true, {{"cls", "", false, false, true}}, addr));
// true->false when class not allow listed
ASSERT_FALSE(cap.is_capable("foo", "", {{"application", {{"key", "value"}}}}, "foo", false, true, {{"cls", "", false, false, false}}, addr));
ASSERT_FALSE(cap.is_capable("foo", "", {{"application", {{"key", "value"}}}}, "foo", false, true, {{"cls", "", true, true, false}}, addr));
ASSERT_FALSE(cap.is_capable("foo", "", {{"application", {{"key", "value"}}}}, "foo", false, true, {{"cls", "", true, false, false}}, addr));
ASSERT_FALSE(cap.is_capable("foo", "", {{"application", {{"key", "value"}}}}, "foo", false, true, {{"cls", "", false, true, false}}, addr));
ASSERT_FALSE(cap.is_capable("foo", "", {}, "foo", false, true, {{"cls", "", true, true, true}}, addr));
ASSERT_FALSE(cap.is_capable("foo", "", {}, "foo", false, true, {{"cls", "", true, false, true}}, addr));
ASSERT_FALSE(cap.is_capable("foo", "", {}, "foo", false, true, {{"cls", "", false, true, true}}, addr));
ASSERT_FALSE(cap.is_capable("foo", "", {}, "foo", false, true, {{"cls", "", false, false, true}}, addr));
}
TEST(OSDCap, PoolTagWildK)
{
entity_addr_t addr;
OSDCap cap;
ASSERT_TRUE(cap.parse("allow rwx tag application *=value", NULL));
ASSERT_TRUE(cap.is_capable("foo", "", {{"application", {{"key", "value"}}}}, "foo", true, true, {}, addr));
ASSERT_TRUE(cap.is_capable("foo", "", {{"application", {{"foo", "bar"}, {"key", "value"}}}}, "foo", true, true, {}, addr));
ASSERT_TRUE(cap.is_capable("foo", "", {{"application", {{"foo", "bar"}, {"key", "value"}}}, {"app2", {{"foo", "bar"}}}}, "foo", true, true, {}, addr));
ASSERT_TRUE(cap.is_capable("foo", "", {{"application", {{"key2", "value"}}}}, "foo", true, true, {}, addr));
ASSERT_FALSE(cap.is_capable("foo", "", {{"application", {}}}, "foo", true, true, {}, addr));
ASSERT_FALSE(cap.is_capable("foo", "", {{"application", {{"foo", "bar"}}}}, "foo", true, true, {}, addr));
ASSERT_FALSE(cap.is_capable("foo", "", {{"app2", {{"key", "value"}}}}, "foo", true, true, {}, addr));
ASSERT_FALSE(cap.is_capable("foo", "", {}, "foo", true, true, {}, addr));
ASSERT_TRUE(cap.is_capable("foo", "", {{"application", {{"foo", "bar"}, {"key2", "value"}}}, {"app2", {{"foo", "bar"}}}}, "foo", true, true, {}, addr));
}
TEST(OSDCap, PoolTagWildV)
{
entity_addr_t addr;
OSDCap cap;
ASSERT_TRUE(cap.parse("allow rwx tag application key=*", NULL));
ASSERT_TRUE(cap.is_capable("foo", "", {{"application", {{"key", "value"}}}}, "foo", true, true, {}, addr));
ASSERT_TRUE(cap.is_capable("foo", "", {{"application", {{"foo", "bar"}, {"key", "value"}}}}, "foo", true, true, {}, addr));
ASSERT_TRUE(cap.is_capable("foo", "", {{"application", {{"foo", "bar"}, {"key", "value"}}}, {"app2", {{"foo", "bar"}}}}, "foo", true, true, {}, addr));
ASSERT_FALSE(cap.is_capable("foo", "", {{"application", {{"key2", "value"}}}}, "foo", true, true, {}, addr));
ASSERT_FALSE(cap.is_capable("foo", "", {{"application", {{"foo", "bar"}}}}, "foo", true, true, {}, addr));
ASSERT_FALSE(cap.is_capable("foo", "", {{"app2", {{"key", "value"}}}}, "foo", true, true, {}, addr));
ASSERT_FALSE(cap.is_capable("foo", "", {}, "foo", true, true, {}, addr));
ASSERT_FALSE(cap.is_capable("foo", "", {{"application", {{"foo", "bar"}, {"key2", "value"}}}, {"app2", {{"foo", "bar"}}}}, "foo", true, true, {}, addr));
}
TEST(OSDCap, PoolTagWildKV)
{
entity_addr_t addr;
OSDCap cap;
ASSERT_TRUE(cap.parse("allow rwx tag application *=*", NULL));
ASSERT_TRUE(cap.is_capable("foo", "", {{"application", {{"key", "value"}}}}, "foo", true, true, {}, addr));
ASSERT_TRUE(cap.is_capable("foo", "", {{"application", {{"foo", "bar"}, {"key", "value"}}}}, "foo", true, true, {}, addr));
ASSERT_TRUE(cap.is_capable("foo", "", {{"application", {{"foo", "bar"}, {"key", "value"}}}, {"app2", {{"foo", "bar"}}}}, "foo", true, true, {}, addr));
ASSERT_TRUE(cap.is_capable("foo", "", {{"application", {}}}, "foo", true, true, {}, addr));
ASSERT_TRUE(cap.is_capable("foo", "", {{"application", {{"key2", "value"}}}}, "foo", true, true, {}, addr));
ASSERT_TRUE(cap.is_capable("foo", "", {{"application", {{"foo", "bar"}}}}, "foo", true, true, {}, addr));
ASSERT_FALSE(cap.is_capable("foo", "", {{"app2", {{"key", "value"}}}}, "foo", true, true, {}, addr));
ASSERT_FALSE(cap.is_capable("foo", "", {}, "foo", true, true, {}, addr));
ASSERT_TRUE(cap.is_capable("foo", "", {{"application", {{"foo", "bar"}, {"key2", "value"}}}, {"app2", {{"foo", "bar"}}}}, "foo", true, true, {}, addr));
}
TEST(OSDCap, NSPool)
{
entity_addr_t addr;
OSDCap cap;
ASSERT_TRUE(cap.parse("allow rwx namespace ns tag application key=value", NULL));
ASSERT_TRUE(cap.is_capable("foo", "ns", {{"application", {{"key", "value"}}}}, "foo", true, true, {}, addr));
ASSERT_FALSE(cap.is_capable("foo", "", {{"application", {{"key", "value"}}}}, "foo", true, true, {}, addr));
ASSERT_FALSE(cap.is_capable("foo", "ns", {}, "foo", true, true, {}, addr));
ASSERT_FALSE(cap.is_capable("foo", "ns2", {{"application", {{"key", "value"}}}}, "foo", true, true, {}, addr));
ASSERT_FALSE(cap.is_capable("foo", "ns", {{"application", {{"key", "value2"}}}}, "foo", true, true, {}, addr));
ASSERT_FALSE(cap.is_capable("foo", "ns", {{"application", {{"key2", "value"}}}}, "foo", true, true, {}, addr));
ASSERT_FALSE(cap.is_capable("foo", "", {}, "foo", true, true, {}, addr));
}
TEST(OSDCap, NSPoolGlob)
{
entity_addr_t addr;
OSDCap cap;
ASSERT_TRUE(cap.parse("allow rwx namespace ns* tag application key=value", NULL));
ASSERT_TRUE(cap.is_capable("foo", "ns", {{"application", {{"key", "value"}}}}, "foo", true, true, {}, addr));
ASSERT_TRUE(cap.is_capable("foo", "ns2", {{"application", {{"key", "value"}}}}, "foo", true, true, {}, addr));
ASSERT_FALSE(cap.is_capable("foo", "", {{"application", {{"key", "value"}}}}, "foo", true, true, {}, addr));
ASSERT_FALSE(cap.is_capable("foo", "ns", {}, "foo", true, true, {}, addr));
ASSERT_FALSE(cap.is_capable("foo", "ns", {{"application", {{"key", "value2"}}}}, "foo", true, true, {}, addr));
ASSERT_FALSE(cap.is_capable("foo", "ns", {{"application", {{"key2", "value"}}}}, "foo", true, true, {}, addr));
ASSERT_FALSE(cap.is_capable("foo", "", {}, "foo", true, true, {}, addr));
}
TEST(OSDCap, OutputParsed)
{
entity_addr_t addr;
struct CapsTest {
const char *input;
const char *output;
};
CapsTest test_values[] = {
{"allow *",
"osdcap[grant(*)]"},
{"allow r",
"osdcap[grant(r)]"},
{"allow rx",
"osdcap[grant(rx)]"},
{"allow rwx",
"osdcap[grant(rwx)]"},
{"allow rw class-read class-write",
"osdcap[grant(rwx)]"},
{"allow rw class-read",
"osdcap[grant(rw class-read)]"},
{"allow rw class-write",
"osdcap[grant(rw class-write)]"},
{"allow rwx pool images",
"osdcap[grant(pool images rwx)]"},
{"allow r pool images",
"osdcap[grant(pool images r)]"},
{"allow pool images rwx",
"osdcap[grant(pool images rwx)]"},
{"allow pool images r",
"osdcap[grant(pool images r)]"},
{"allow pool images w",
"osdcap[grant(pool images w)]"},
{"allow pool images x",
"osdcap[grant(pool images x)]"},
{"allow r pool images namespace ''",
"osdcap[grant(pool images namespace \"\" r)]"},
{"allow r pool images namespace foo",
"osdcap[grant(pool images namespace foo r)]"},
{"allow r pool images namespace \"\"",
"osdcap[grant(pool images namespace \"\" r)]"},
{"allow r namespace foo",
"osdcap[grant(namespace foo r)]"},
{"allow pool images r; allow pool rbd rwx",
"osdcap[grant(pool images r),grant(pool rbd rwx)]"},
{"allow pool images r, allow pool rbd rwx",
"osdcap[grant(pool images r),grant(pool rbd rwx)]"},
{"allow class-read object_prefix rbd_children, allow pool libvirt-pool-test rwx",
"osdcap[grant(object_prefix rbd_children class-read),grant(pool libvirt-pool-test rwx)]"},
{"allow rwx tag application key=value",
"osdcap[grant(app application key key val value rwx)]"},
{"allow rwx namespace ns* tag application key=value",
"osdcap[grant(namespace ns* app application key key val value rwx)]"},
{"allow all",
"osdcap[grant(*)]"},
{"allow rwx tag application all=all",
"osdcap[grant(app application key * val * rwx)]"},
{"allow rwx network 1.2.3.4/24",
"osdcap[grant(rwx network 1.2.3.4/24)]"},
};
size_t num_tests = sizeof(test_values) / sizeof(*test_values);
for (size_t i = 0; i < num_tests; ++i) {
OSDCap cap;
std::cout << "Testing input '" << test_values[i].input << "'" << std::endl;
ASSERT_TRUE(cap.parse(test_values[i].input));
ASSERT_EQ(test_values[i].output, stringify(cap));
}
}
TEST(OSDCap, AllowClass) {
entity_addr_t addr;
OSDCap cap;
ASSERT_TRUE(cap.parse("allow class foo", NULL));
// can call any method on class foo regardless of allow list status
ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, false, true}}, addr));
ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, true, true}}, addr));
ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, true, true}}, addr));
ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, false, false}}, addr));
ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, true, false}}, addr));
ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, true, false}}, addr));
// does not permit invoking class bar
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"bar", "", true, false, true}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"bar", "", false, true, true}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"bar", "", true, true, true}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"bar", "", true, false, false}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"bar", "", false, true, false}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"bar", "", true, true, false}}, addr));
}
TEST(OSDCap, AllowClassMethod) {
entity_addr_t addr;
OSDCap cap;
ASSERT_TRUE(cap.parse("allow class foo xyz", NULL));
// can call the xyz method on class foo regardless of allow list status
ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "xyz", true, false, true}}, addr));
ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "xyz", false, true, true}}, addr));
ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "xyz", true, true, true}}, addr));
ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "xyz", true, false, false}}, addr));
ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "xyz", false, true, false}}, addr));
ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "xyz", true, true, false}}, addr));
// does not permit invoking class bar
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"bar", "", true, false, true}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"bar", "", false, true, true}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"bar", "", true, true, true}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"bar", "", true, false, false}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"bar", "", false, true, false}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"bar", "", true, true, false}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"bar", "xyz", true, false, true}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"bar", "xyz", false, true, true}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"bar", "xyz", true, true, true}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"bar", "xyz", true, false, false}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"bar", "xyz", false, true, false}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"bar", "xyz", true, true, false}}, addr));
}
TEST(OSDCap, AllowClass2) {
entity_addr_t addr;
OSDCap cap;
ASSERT_TRUE(cap.parse("allow class foo, allow class bar", NULL));
ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, false, true}}, addr));
ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, true, true}}, addr));
ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, true, true}}, addr));
ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, false, false}}, addr));
ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, true, false}}, addr));
ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, true, false}}, addr));
ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"bar", "", true, false, true}}, addr));
ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"bar", "", false, true, true}}, addr));
ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"bar", "", true, true, true}}, addr));
ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"bar", "", true, false, false}}, addr));
ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"bar", "", false, true, false}}, addr));
ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"bar", "", true, true, false}}, addr));
}
TEST(OSDCap, AllowClassRWX) {
entity_addr_t addr;
OSDCap cap;
ASSERT_TRUE(cap.parse("allow rwx, allow class foo", NULL));
// can call any method on class foo regardless of allow list status
ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, false, true}}, addr));
ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, true, true}}, addr));
ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, true, true}}, addr));
ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, false, false}}, addr));
ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, true, false}}, addr));
ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, true, false}}, addr));
// does not permit invoking class bar
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"bar", "", true, false, false}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"bar", "", false, true, false}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"bar", "", true, true, false}}, addr));
// allows class bar if it is allow listed
ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"bar", "", true, false, true}}, addr));
ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"bar", "", false, true, true}}, addr));
ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"bar", "", true, true, true}}, addr));
}
TEST(OSDCap, AllowClassMulti) {
entity_addr_t addr;
OSDCap cap;
ASSERT_TRUE(cap.parse("allow class foo", NULL));
// can call any method on foo, but not bar, so the entire op is rejected
// bar with allow list is rejected because it still needs rwx/class-read,write
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, true, true}, {"bar", "", true, true, true}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, true, true}, {"bar", "", true, true, false}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, true, true}, {"bar", "", true, false, true}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, true, true}, {"bar", "", true, false, false}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, true, true}, {"bar", "", false, true, true}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, true, true}, {"bar", "", false, true, false}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, true, true}, {"bar", "", false, false, false}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, true, false}, {"bar", "", true, true, true}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, true, false}, {"bar", "", true, true, false}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, true, false}, {"bar", "", true, false, true}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, true, false}, {"bar", "", true, false, false}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, true, false}, {"bar", "", false, true, true}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, true, false}, {"bar", "", false, true, false}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, true, false}, {"bar", "", false, false, false}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, false, true}, {"bar", "", true, true, true}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, false, true}, {"bar", "", true, true, false}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, false, true}, {"bar", "", true, false, true}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, false, true}, {"bar", "", true, false, false}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, false, true}, {"bar", "", false, true, true}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, false, true}, {"bar", "", false, true, false}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, false, true}, {"bar", "", false, false, false}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, false, false}, {"bar", "", true, true, true}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, false, false}, {"bar", "", true, true, false}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, false, false}, {"bar", "", true, false, true}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, false, false}, {"bar", "", true, false, false}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, false, false}, {"bar", "", false, true, true}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, false, false}, {"bar", "", false, true, false}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, false, false}, {"bar", "", false, false, false}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, true, true}, {"bar", "", true, true, true}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, true, true}, {"bar", "", true, true, false}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, true, true}, {"bar", "", true, false, true}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, true, true}, {"bar", "", true, false, false}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, true, true}, {"bar", "", false, true, true}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, true, true}, {"bar", "", false, true, false}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, true, true}, {"bar", "", false, false, false}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, true, false}, {"bar", "", true, true, true}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, true, false}, {"bar", "", true, true, false}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, true, false}, {"bar", "", true, false, true}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, true, false}, {"bar", "", true, false, false}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, true, false}, {"bar", "", false, true, true}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, true, false}, {"bar", "", false, true, false}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, true, false}, {"bar", "", false, false, false}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, false, true}, {"bar", "", true, true, true}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, false, true}, {"bar", "", true, true, false}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, false, true}, {"bar", "", true, false, true}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, false, true}, {"bar", "", true, false, false}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, false, true}, {"bar", "", false, true, true}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, false, true}, {"bar", "", false, true, false}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, false, true}, {"bar", "", false, false, false}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, false, false}, {"bar", "", true, true, true}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, false, false}, {"bar", "", true, true, false}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, false, false}, {"bar", "", true, false, true}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, false, false}, {"bar", "", true, false, false}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, false, false}, {"bar", "", false, true, true}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, false, false}, {"bar", "", false, true, false}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, false, false}, {"bar", "", false, false, false}}, addr));
// these are OK because 'bar' is on the allow list BUT the calls don't read or write
ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, true, true}, {"bar", "", false, false, true}}, addr));
ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, true, false}, {"bar", "", false, false, true}}, addr));
ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, false, true}, {"bar", "", false, false, true}}, addr));
ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, false, false}, {"bar", "", false, false, true}}, addr));
ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, true, true}, {"bar", "", false, false, true}}, addr));
ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, true, false}, {"bar", "", false, false, true}}, addr));
ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, false, true}, {"bar", "", false, false, true}}, addr));
ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, false, false}, {"bar", "", false, false, true}}, addr));
// can call any method on foo or bar regardless of allow list status
OSDCap cap2;
ASSERT_TRUE(cap2.parse("allow class foo, allow class bar", NULL));
ASSERT_TRUE(cap2.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, true, true}, {"bar", "", true, true, true}}, addr));
ASSERT_TRUE(cap2.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, true, true}, {"bar", "", true, true, false}}, addr));
ASSERT_TRUE(cap2.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, true, true}, {"bar", "", true, false, true}}, addr));
ASSERT_TRUE(cap2.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, true, true}, {"bar", "", true, false, false}}, addr));
ASSERT_TRUE(cap2.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, true, true}, {"bar", "", false, true, true}}, addr));
ASSERT_TRUE(cap2.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, true, true}, {"bar", "", false, true, false}}, addr));
ASSERT_TRUE(cap2.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, true, true}, {"bar", "", false, false, true}}, addr));
ASSERT_TRUE(cap2.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, true, true}, {"bar", "", false, false, false}}, addr));
ASSERT_TRUE(cap2.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, true, false}, {"bar", "", true, true, true}}, addr));
ASSERT_TRUE(cap2.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, true, false}, {"bar", "", true, true, false}}, addr));
ASSERT_TRUE(cap2.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, true, false}, {"bar", "", true, false, true}}, addr));
ASSERT_TRUE(cap2.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, true, false}, {"bar", "", true, false, false}}, addr));
ASSERT_TRUE(cap2.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, true, false}, {"bar", "", false, true, true}}, addr));
ASSERT_TRUE(cap2.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, true, false}, {"bar", "", false, true, false}}, addr));
ASSERT_TRUE(cap2.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, true, false}, {"bar", "", false, false, true}}, addr));
ASSERT_TRUE(cap2.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, true, false}, {"bar", "", false, false, false}}, addr));
ASSERT_TRUE(cap2.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, false, true}, {"bar", "", true, true, true}}, addr));
ASSERT_TRUE(cap2.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, false, true}, {"bar", "", true, true, false}}, addr));
ASSERT_TRUE(cap2.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, false, true}, {"bar", "", true, false, true}}, addr));
ASSERT_TRUE(cap2.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, false, true}, {"bar", "", true, false, false}}, addr));
ASSERT_TRUE(cap2.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, false, true}, {"bar", "", false, true, true}}, addr));
ASSERT_TRUE(cap2.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, false, true}, {"bar", "", false, true, false}}, addr));
ASSERT_TRUE(cap2.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, false, true}, {"bar", "", false, false, true}}, addr));
ASSERT_TRUE(cap2.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, false, true}, {"bar", "", false, false, false}}, addr));
ASSERT_TRUE(cap2.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, false, false}, {"bar", "", true, true, true}}, addr));
ASSERT_TRUE(cap2.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, false, false}, {"bar", "", true, true, false}}, addr));
ASSERT_TRUE(cap2.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, false, false}, {"bar", "", true, false, true}}, addr));
ASSERT_TRUE(cap2.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, false, false}, {"bar", "", true, false, false}}, addr));
ASSERT_TRUE(cap2.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, false, false}, {"bar", "", false, true, true}}, addr));
ASSERT_TRUE(cap2.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, false, false}, {"bar", "", false, true, false}}, addr));
ASSERT_TRUE(cap2.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, false, false}, {"bar", "", false, false, true}}, addr));
ASSERT_TRUE(cap2.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, false, false}, {"bar", "", false, false, false}}, addr));
ASSERT_TRUE(cap2.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, true, true}, {"bar", "", true, true, true}}, addr));
ASSERT_TRUE(cap2.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, true, true}, {"bar", "", true, true, false}}, addr));
ASSERT_TRUE(cap2.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, true, true}, {"bar", "", true, false, true}}, addr));
ASSERT_TRUE(cap2.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, true, true}, {"bar", "", true, false, false}}, addr));
ASSERT_TRUE(cap2.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, true, true}, {"bar", "", false, true, true}}, addr));
ASSERT_TRUE(cap2.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, true, true}, {"bar", "", false, true, false}}, addr));
ASSERT_TRUE(cap2.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, true, true}, {"bar", "", false, false, true}}, addr));
ASSERT_TRUE(cap2.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, true, true}, {"bar", "", false, false, false}}, addr));
ASSERT_TRUE(cap2.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, true, false}, {"bar", "", true, true, true}}, addr));
ASSERT_TRUE(cap2.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, true, false}, {"bar", "", true, true, false}}, addr));
ASSERT_TRUE(cap2.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, true, false}, {"bar", "", true, false, true}}, addr));
ASSERT_TRUE(cap2.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, true, false}, {"bar", "", true, false, false}}, addr));
ASSERT_TRUE(cap2.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, true, false}, {"bar", "", false, true, true}}, addr));
ASSERT_TRUE(cap2.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, true, false}, {"bar", "", false, true, false}}, addr));
ASSERT_TRUE(cap2.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, true, false}, {"bar", "", false, false, true}}, addr));
ASSERT_TRUE(cap2.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, true, false}, {"bar", "", false, false, false}}, addr));
ASSERT_TRUE(cap2.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, false, true}, {"bar", "", true, true, true}}, addr));
ASSERT_TRUE(cap2.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, false, true}, {"bar", "", true, true, false}}, addr));
ASSERT_TRUE(cap2.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, false, true}, {"bar", "", true, false, true}}, addr));
ASSERT_TRUE(cap2.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, false, true}, {"bar", "", true, false, false}}, addr));
ASSERT_TRUE(cap2.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, false, true}, {"bar", "", false, true, true}}, addr));
ASSERT_TRUE(cap2.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, false, true}, {"bar", "", false, true, false}}, addr));
ASSERT_TRUE(cap2.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, false, true}, {"bar", "", false, false, true}}, addr));
ASSERT_TRUE(cap2.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, false, true}, {"bar", "", false, false, false}}, addr));
ASSERT_TRUE(cap2.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, false, false}, {"bar", "", true, true, true}}, addr));
ASSERT_TRUE(cap2.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, false, false}, {"bar", "", true, true, false}}, addr));
ASSERT_TRUE(cap2.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, false, false}, {"bar", "", true, false, true}}, addr));
ASSERT_TRUE(cap2.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, false, false}, {"bar", "", true, false, false}}, addr));
ASSERT_TRUE(cap2.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, false, false}, {"bar", "", false, true, true}}, addr));
ASSERT_TRUE(cap2.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, false, false}, {"bar", "", false, true, false}}, addr));
ASSERT_TRUE(cap2.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, false, false}, {"bar", "", false, false, true}}, addr));
ASSERT_TRUE(cap2.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, false, false}, {"bar", "", false, false, false}}, addr));
}
TEST(OSDCap, AllowClassMultiRWX) {
entity_addr_t addr;
OSDCap cap;
ASSERT_TRUE(cap.parse("allow rwx, allow class foo", NULL));
// can call anything on foo, but only allow listed methods on bar
ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, true, true}, {"bar", "", true, true, true}}, addr));
ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, true, true}, {"bar", "", true, false, true}}, addr));
ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, true, true}, {"bar", "", false, true, true}}, addr));
ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, true, true}, {"bar", "", false, false, true}}, addr));
// fails because bar not allow listed
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, true, true}, {"bar", "", true, true, false}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, true, true}, {"bar", "", true, false, false}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, true, true}, {"bar", "", false, true, false}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, true, true}, {"bar", "", false, false, false}}, addr));
ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, true, false}, {"bar", "", true, true, true}}, addr));
ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, true, false}, {"bar", "", true, false, true}}, addr));
ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, true, false}, {"bar", "", false, true, true}}, addr));
ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, true, false}, {"bar", "", false, false, true}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, true, false}, {"bar", "", true, true, false}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, true, false}, {"bar", "", true, false, false}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, true, false}, {"bar", "", false, true, false}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, true, false}, {"bar", "", false, false, false}}, addr));
ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, false, true}, {"bar", "", true, true, true}}, addr));
ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, false, true}, {"bar", "", true, false, true}}, addr));
ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, false, true}, {"bar", "", false, true, true}}, addr));
ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, false, true}, {"bar", "", false, false, true}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, false, true}, {"bar", "", true, true, false}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, false, true}, {"bar", "", true, false, false}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, false, true}, {"bar", "", false, true, false}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, false, true}, {"bar", "", false, false, false}}, addr));
ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, false, false}, {"bar", "", true, true, true}}, addr));
ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, false, false}, {"bar", "", true, false, true}}, addr));
ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, false, false}, {"bar", "", false, true, true}}, addr));
ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, false, false}, {"bar", "", false, false, true}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, false, false}, {"bar", "", true, true, false}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, false, false}, {"bar", "", true, false, false}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, false, false}, {"bar", "", false, true, false}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", true, false, false}, {"bar", "", false, false, false}}, addr));
ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, true, true}, {"bar", "", true, true, true}}, addr));
ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, true, true}, {"bar", "", true, false, true}}, addr));
ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, true, true}, {"bar", "", false, true, true}}, addr));
ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, true, true}, {"bar", "", false, false, true}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, true, true}, {"bar", "", true, true, false}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, true, true}, {"bar", "", true, false, false}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, true, true}, {"bar", "", false, true, false}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, true, true}, {"bar", "", false, false, false}}, addr));
ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, true, false}, {"bar", "", true, true, true}}, addr));
ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, true, false}, {"bar", "", true, false, true}}, addr));
ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, true, false}, {"bar", "", false, true, true}}, addr));
ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, true, false}, {"bar", "", false, false, true}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, true, false}, {"bar", "", true, true, false}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, true, false}, {"bar", "", true, false, false}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, true, false}, {"bar", "", false, true, false}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, true, false}, {"bar", "", false, false, false}}, addr));
ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, false, true}, {"bar", "", true, true, true}}, addr));
ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, false, true}, {"bar", "", true, false, true}}, addr));
ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, false, true}, {"bar", "", false, true, true}}, addr));
ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, false, true}, {"bar", "", false, false, true}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, false, true}, {"bar", "", true, true, false}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, false, true}, {"bar", "", true, false, false}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, false, true}, {"bar", "", false, true, false}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, false, true}, {"bar", "", false, false, false}}, addr));
ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, false, false}, {"bar", "", true, true, true}}, addr));
ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, false, false}, {"bar", "", true, false, true}}, addr));
ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, false, false}, {"bar", "", false, true, true}}, addr));
ASSERT_TRUE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, false, false}, {"bar", "", false, false, true}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, false, false}, {"bar", "", true, true, false}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, false, false}, {"bar", "", true, false, false}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, false, false}, {"bar", "", false, true, false}}, addr));
ASSERT_FALSE(cap.is_capable("bar", "", {}, "foo", false, false, {{"foo", "", false, false, false}, {"bar", "", false, false, false}}, addr));
}
TEST(OSDCap, AllowProfile) {
entity_addr_t addr;
OSDCap cap;
ASSERT_TRUE(cap.parse("profile read-only, profile read-write pool abc", NULL));
ASSERT_FALSE(cap.allow_all());
ASSERT_FALSE(cap.is_capable("foo", "", {}, "asdf", true, true, {}, addr));
ASSERT_TRUE(cap.is_capable("foo", "", {}, "asdf", true, false, {}, addr));
ASSERT_TRUE(cap.is_capable("abc", "", {}, "asdf", false, true, {}, addr));
// RBD
cap.grants.clear();
ASSERT_TRUE(cap.parse("profile rbd pool abc", NULL));
ASSERT_FALSE(cap.allow_all());
ASSERT_FALSE(cap.is_capable("foo", "", {}, "asdf", true, true, {}, addr));
ASSERT_FALSE(cap.is_capable("foo", "", {}, "rbd_children", true, false, {}, addr));
ASSERT_TRUE(cap.is_capable("foo", "", {}, "rbd_children", false, false,
{{"rbd", "", true, false, true}}, addr));
ASSERT_TRUE(cap.is_capable("abc", "", {}, "asdf", true, true,
{{"rbd", "", true, true, true}}, addr));
cap.grants.clear();
ASSERT_TRUE(cap.parse("profile rbd-read-only pool abc", NULL));
ASSERT_FALSE(cap.allow_all());
ASSERT_FALSE(cap.is_capable("foo", "", {}, "rbd_children", true, false, {}, addr));
ASSERT_TRUE(cap.is_capable("abc", "", {}, "asdf", true, false,
{{"rbd", "", true, false, true}}, addr));
ASSERT_FALSE(cap.is_capable("abc", "", {}, "asdf", true, true, {}, addr));
ASSERT_TRUE(cap.is_capable("abc", "", {}, "rbd_header.ABC", false, false,
{{"rbd", "child_attach", true, true, true}}, addr));
ASSERT_TRUE(cap.is_capable("abc", "", {}, "rbd_header.ABC", false, false,
{{"rbd", "child_detach", true, true, true}}, addr));
ASSERT_FALSE(cap.is_capable("abc", "", {}, "rbd_header.ABC", false, false,
{{"rbd", "other function", true, true, true}}, addr));
cap.grants.clear();
ASSERT_TRUE(cap.parse("profile rbd pool pool1 namespace ns1", nullptr));
ASSERT_TRUE(cap.is_capable("pool1", "", {}, "rbd_info", false, false,
{{"rbd", "metadata_list", true, false, true}},
addr));
ASSERT_TRUE(cap.is_capable("pool1", "ns1", {}, "rbd_info", false, false,
{{"rbd", "metadata_list", true, false, true}},
addr));
ASSERT_FALSE(cap.is_capable("pool1", "ns2", {}, "rbd_info", false, false,
{{"rbd", "metadata_list", true, false, true}},
addr));
ASSERT_FALSE(cap.is_capable("pool2", "", {}, "rbd_info", false, false,
{{"rbd", "metadata_list", true, false, true}},
addr));
ASSERT_FALSE(cap.is_capable("pool1", "", {}, "asdf", false, false,
{{"rbd", "metadata_list", true, false, true}},
addr));
ASSERT_FALSE(cap.is_capable("pool1", "", {}, "rbd_info", false, false,
{{"rbd", "other_method", true, false, true}},
addr));
cap.grants.clear();
ASSERT_TRUE(cap.parse("profile rbd-read-only pool pool1 namespace ns1",
nullptr));
ASSERT_TRUE(cap.is_capable("pool1", "", {}, "rbd_info", false, false,
{{"rbd", "metadata_list", true, false, true}},
addr));
ASSERT_TRUE(cap.is_capable("pool1", "ns1", {}, "rbd_info", false, false,
{{"rbd", "metadata_list", true, false, true}},
addr));
ASSERT_FALSE(cap.is_capable("pool1", "ns2", {}, "rbd_info", false, false,
{{"rbd", "metadata_list", true, false, true}},
addr));
ASSERT_FALSE(cap.is_capable("pool2", "", {}, "rbd_info", false, false,
{{"rbd", "metadata_list", true, false, true}},
addr));
ASSERT_FALSE(cap.is_capable("pool1", "", {}, "asdf", false, false,
{{"rbd", "metadata_list", true, false, true}},
addr));
ASSERT_FALSE(cap.is_capable("pool1", "", {}, "rbd_info", false, false,
{{"rbd", "other_method", true, false, true}},
addr));
}
TEST(OSDCap, network) {
entity_addr_t a, b, c;
a.parse("10.1.2.3");
b.parse("192.168.2.3");
c.parse("192.167.2.3");
OSDCap cap;
ASSERT_TRUE(cap.parse("allow * network 192.168.0.0/16, allow * network 10.0.0.0/8", NULL));
ASSERT_TRUE(cap.is_capable("foo", "", {}, "asdf", true, true, {{"cls", "", true, true, true}}, a));
ASSERT_TRUE(cap.is_capable("foo", "", {}, "asdf", true, true, {{"cls", "", true, true, true}}, b));
ASSERT_FALSE(cap.is_capable("foo", "", {}, "asdf", true, true, {{"cls", "", true, true, true}}, c));
}
| 98,791 | 69.616154 | 155 |
cc
|
null |
ceph-main/src/test/osd/safe-to-destroy.sh
|
#!/usr/bin/env bash
source $CEPH_ROOT/qa/standalone/ceph-helpers.sh
set -e
function run() {
local dir=$1
shift
export CEPH_MON="127.0.0.1:$(get_unused_port)"
export CEPH_ARGS
CEPH_ARGS+="--fsid=$(uuidgen) --auth-supported=none "
CEPH_ARGS+="--mon-host=$CEPH_MON "
set -e
local funcs=${@:-$(set | sed -n -e 's/^\(TEST_[0-9a-z_]*\) .*/\1/p')}
for func in $funcs ; do
setup $dir || return 1
$func $dir || return 1
teardown $dir || return 1
done
}
function TEST_safe_to_destroy() {
local dir=$1
run_mon $dir a
run_mgr $dir x
run_osd $dir 0
run_osd $dir 1
run_osd $dir 2
run_osd $dir 3
flush_pg_stats
ceph osd safe-to-destroy 0
ceph osd safe-to-destroy 1
ceph osd safe-to-destroy 2
ceph osd safe-to-destroy 3
ceph osd pool create foo 128
sleep 2
flush_pg_stats
wait_for_clean
expect_failure $dir 'pgs currently' ceph osd safe-to-destroy 0
expect_failure $dir 'pgs currently' ceph osd safe-to-destroy 1
expect_failure $dir 'pgs currently' ceph osd safe-to-destroy 2
expect_failure $dir 'pgs currently' ceph osd safe-to-destroy 3
ceph osd out 0
sleep 2
flush_pg_stats
wait_for_clean
ceph osd safe-to-destroy 0
# even osds without osd_stat are ok if all pgs are active+clean
id=`ceph osd create`
ceph osd safe-to-destroy $id
}
function TEST_ok_to_stop() {
local dir=$1
run_mon $dir a
run_mgr $dir x
run_osd $dir 0
run_osd $dir 1
run_osd $dir 2
run_osd $dir 3
ceph osd pool create foo 128
ceph osd pool set foo size 3
ceph osd pool set foo min_size 2
sleep 1
flush_pg_stats
wait_for_clean
ceph osd ok-to-stop 0
ceph osd ok-to-stop 1
ceph osd ok-to-stop 2
ceph osd ok-to-stop 3
expect_failure $dir bad_become_inactive ceph osd ok-to-stop 0 1
ceph osd pool set foo min_size 1
sleep 1
flush_pg_stats
wait_for_clean
ceph osd ok-to-stop 0 1
ceph osd ok-to-stop 1 2
ceph osd ok-to-stop 2 3
ceph osd ok-to-stop 3 4
expect_failure $dir bad_become_inactive ceph osd ok-to-stop 0 1 2
expect_failure $dir bad_become_inactive ceph osd ok-to-stop 0 1 2 3
}
main safe-to-destroy "$@"
| 2,262 | 21.63 | 73 |
sh
|
null |
ceph-main/src/test/osd/scrubber_generators.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "test/osd/scrubber_generators.h"
#include <fmt/ranges.h>
using namespace ScrubGenerator;
// ref: PGLogTestRebuildMissing()
bufferptr create_object_info(const ScrubGenerator::RealObj& objver)
{
object_info_t oi{};
oi.soid = objver.ghobj.hobj;
oi.version = eversion_t(objver.ghobj.generation, 0);
oi.size = objver.data.size;
bufferlist bl;
oi.encode(bl,
0 /*get_osdmap()->get_features(CEPH_ENTITY_TYPE_OSD, nullptr)*/);
bufferptr bp(bl.c_str(), bl.length());
return bp;
}
std::pair<bufferptr, std::vector<snapid_t>> create_object_snapset(
const ScrubGenerator::RealObj& robj,
const SnapsetMockData* snapset_mock_data)
{
if (!snapset_mock_data) {
return {bufferptr(), {}};
}
/// \todo fill in missing version/osd details from the robj
auto sns = snapset_mock_data->make_snapset();
bufferlist bl;
encode(sns, bl);
bufferptr bp = bufferptr(bl.c_str(), bl.length());
// extract the set of object snaps
return {bp, sns.snaps};
}
RealObjsConfList ScrubGenerator::make_real_objs_conf(
int64_t pool_id,
const RealObjsConf& blueprint,
std::vector<int32_t> active_osds)
{
RealObjsConfList all_osds;
for (auto osd : active_osds) {
RealObjsConfRef this_osd_fakes = std::make_unique<RealObjsConf>(blueprint);
// now - fix & corrupt every "object" in the blueprint
for (RealObj& robj : this_osd_fakes->objs) {
robj.ghobj.hobj.pool = pool_id;
}
all_osds[osd] = std::move(this_osd_fakes);
}
return all_osds; // reconsider (maybe add a move ctor?)
}
///\todo dispose of the created buffer pointers
ScrubGenerator::SmapEntry ScrubGenerator::make_smobject(
const ScrubGenerator::RealObj& blueprint,
int osd_num)
{
ScrubGenerator::SmapEntry ret;
ret.ghobj = blueprint.ghobj;
ret.smobj.attrs[OI_ATTR] = create_object_info(blueprint);
if (blueprint.snapset_mock_data) {
auto [bp, snaps] =
create_object_snapset(blueprint, blueprint.snapset_mock_data);
ret.smobj.attrs[SS_ATTR] = bp;
std::cout << fmt::format("{}: ({}) osd:{} snaps:{}",
__func__,
ret.ghobj.hobj,
osd_num,
snaps)
<< std::endl;
}
for (const auto& [at_k, at_v] : blueprint.data.attrs) {
ret.smobj.attrs[at_k] = ceph::buffer::copy(at_v.c_str(), at_v.size());
{
// verifying (to be removed after dev phase)
auto bk = ret.smobj.attrs[at_k].begin_deep().get_ptr(
ret.smobj.attrs[at_k].length());
std::string bkstr{bk.raw_c_str(), bk.raw_length()};
std::cout << fmt::format("{}: verification: {}", __func__, bkstr)
<< std::endl;
}
}
ret.smobj.size = blueprint.data.size;
ret.smobj.digest = blueprint.data.hash;
/// \todo handle the 'present' etc'
ret.smobj.object_omap_keys = blueprint.data.omap.size();
ret.smobj.object_omap_bytes = blueprint.data.omap_bytes;
return ret;
}
all_clones_snaps_t ScrubGenerator::all_clones(
const ScrubGenerator::RealObj& head_obj)
{
std::cout << fmt::format("{}: head_obj.ghobj.hobj:{}",
__func__,
head_obj.ghobj.hobj)
<< std::endl;
std::map<hobject_t, std::vector<snapid_t>> ret;
for (const auto& clone : head_obj.snapset_mock_data->clones) {
auto clone_set_it = head_obj.snapset_mock_data->clone_snaps.find(clone);
if (clone_set_it == head_obj.snapset_mock_data->clone_snaps.end()) {
std::cout << "note: no clone_snaps for " << clone << std::endl;
continue;
}
auto clone_set = clone_set_it->second;
hobject_t clone_hobj{head_obj.ghobj.hobj};
clone_hobj.snap = clone;
ret[clone_hobj] = clone_set_it->second;
std::cout << fmt::format("{}: clone:{} clone_set:{}",
__func__,
clone_hobj,
clone_set)
<< std::endl;
}
return ret;
}
void ScrubGenerator::add_object(ScrubMap& map,
const ScrubGenerator::RealObj& real_obj,
int osd_num)
{
// do we have data corruption recipe for this OSD?
/// \todo c++20: use contains()
CorruptFunc relevant_fix = crpt_do_nothing;
auto p = real_obj.corrupt_funcs->find(osd_num);
if (p != real_obj.corrupt_funcs->end()) {
// yes, we have a corruption recepie for this OSD
// \todo c++20: use at()
relevant_fix = p->second;
}
// create a possibly-corrupted copy of the "real object"
auto modified_obj = (relevant_fix)(real_obj, osd_num);
std::cout << fmt::format("{}: modified: osd:{} ho:{} key:{}",
__func__,
osd_num,
modified_obj.ghobj.hobj,
modified_obj.ghobj.hobj.get_key())
<< std::endl;
auto entry = make_smobject(modified_obj, osd_num);
std::cout << fmt::format("{}: osd:{} smap entry: {} {}",
__func__,
osd_num,
entry.smobj.size,
entry.smobj.attrs.size())
<< std::endl;
map.objects[entry.ghobj.hobj] = entry.smobj;
}
| 4,867 | 27.804734 | 79 |
cc
|
null |
ceph-main/src/test/osd/scrubber_generators.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
/// \file generating scrub-related maps & objects for unit tests
#include <functional>
#include <map>
#include <sstream>
#include <string>
#include <variant>
#include <vector>
#include "include/buffer.h"
#include "include/buffer_raw.h"
#include "include/object_fmt.h"
#include "osd/osd_types_fmt.h"
#include "osd/scrubber/pg_scrubber.h"
namespace ScrubGenerator {
/// \todo enhance the MockLog to capture the log messages
class MockLog : public LoggerSinkSet {
public:
void debug(std::stringstream& s) final
{
std::cout << "\n<<debug>> " << s.str() << std::endl;
}
void info(std::stringstream& s) final
{
std::cout << "\n<<info>> " << s.str() << std::endl;
}
void sec(std::stringstream& s) final
{
std::cout << "\n<<sec>> " << s.str() << std::endl;
}
void warn(std::stringstream& s) final
{
std::cout << "\n<<warn>> " << s.str() << std::endl;
}
void error(std::stringstream& s) final
{
err_count++;
std::cout << "\n<<error>> " << s.str() << std::endl;
}
OstreamTemp info() final { return OstreamTemp(CLOG_INFO, this); }
OstreamTemp warn() final { return OstreamTemp(CLOG_WARN, this); }
OstreamTemp error() final { return OstreamTemp(CLOG_ERROR, this); }
OstreamTemp sec() final { return OstreamTemp(CLOG_ERROR, this); }
OstreamTemp debug() final { return OstreamTemp(CLOG_DEBUG, this); }
void do_log(clog_type prio, std::stringstream& ss) final
{
switch (prio) {
case CLOG_DEBUG:
debug(ss);
break;
case CLOG_INFO:
info(ss);
break;
case CLOG_SEC:
sec(ss);
break;
case CLOG_WARN:
warn(ss);
break;
case CLOG_ERROR:
default:
error(ss);
break;
}
}
void do_log(clog_type prio, const std::string& ss) final
{
switch (prio) {
case CLOG_DEBUG:
debug() << ss;
break;
case CLOG_INFO:
info() << ss;
break;
case CLOG_SEC:
sec() << ss;
break;
case CLOG_WARN:
warn() << ss;
break;
case CLOG_ERROR:
default:
error() << ss;
break;
}
}
virtual ~MockLog() {}
int err_count{0};
int expected_err_count{0};
void set_expected_err_count(int c) { expected_err_count = c; }
};
// ///////////////////////////////////////////////////////////////////////// //
// ///////////////////////////////////////////////////////////////////////// //
struct pool_conf_t {
int pg_num{3};
int pgp_num{3};
int size{3};
int min_size{3};
std::string name{"rep_pool"};
};
using attr_t = std::map<std::string, std::string>;
using all_clones_snaps_t = std::map<hobject_t, std::vector<snapid_t>>;
struct RealObj;
// a function to manipulate (i.e. corrupt) an object in a specific OSD
using CorruptFunc =
std::function<RealObj(const RealObj& s, [[maybe_unused]] int osd_num)>;
using CorruptFuncList = std::map<int, CorruptFunc>; // per OSD
struct SnapsetMockData {
using CookedCloneSnaps =
std::tuple<std::map<snapid_t, uint64_t>,
std::map<snapid_t, std::vector<snapid_t>>,
std::map<snapid_t, interval_set<uint64_t>>>;
// an auxiliary function to cook the data for the SnapsetMockData
using clone_snaps_cooker = CookedCloneSnaps (*)();
snapid_t seq;
std::vector<snapid_t> snaps; // descending
std::vector<snapid_t> clones; // ascending
std::map<snapid_t, interval_set<uint64_t>> clone_overlap; // overlap w/ next
// newest
std::map<snapid_t, uint64_t> clone_size;
std::map<snapid_t, std::vector<snapid_t>> clone_snaps; // descending
SnapsetMockData(snapid_t seq,
std::vector<snapid_t> snaps,
std::vector<snapid_t> clones,
std::map<snapid_t, interval_set<uint64_t>> clone_overlap,
std::map<snapid_t, uint64_t> clone_size,
std::map<snapid_t, std::vector<snapid_t>> clone_snaps)
: seq(seq)
, snaps(snaps)
, clones(clones)
, clone_overlap(clone_overlap)
, clone_size(clone_size)
, clone_snaps(clone_snaps)
{}
SnapsetMockData(snapid_t seq,
std::vector<snapid_t> snaps,
std::vector<snapid_t> clones,
clone_snaps_cooker func)
: seq{seq}
, snaps{snaps}
, clones(clones)
{
auto [clone_size_, clone_snaps_, clone_overlap_] = func();
clone_size = clone_size_;
clone_snaps = clone_snaps_;
clone_overlap = clone_overlap_;
}
SnapSet make_snapset() const
{
SnapSet ss;
ss.seq = seq;
ss.snaps = snaps;
ss.clones = clones;
ss.clone_overlap = clone_overlap;
ss.clone_size = clone_size;
ss.clone_snaps = clone_snaps;
return ss;
}
};
// an object in our "DB" - with its versioned snaps, "data" (size and hash),
// and "omap" (size and hash)
struct RealData {
// not needed at this level of "data falsification": std::byte data;
uint64_t size;
uint32_t hash;
uint32_t omap_digest;
uint32_t omap_bytes;
attr_t omap;
attr_t attrs;
};
struct RealObj {
// the ghobject - oid, version, snap, hash, pool
ghobject_t ghobj;
RealData data;
const CorruptFuncList* corrupt_funcs;
const SnapsetMockData* snapset_mock_data;
};
static inline RealObj crpt_do_nothing(const RealObj& s, int osdn)
{
return s;
}
struct SmapEntry {
ghobject_t ghobj;
ScrubMap::object smobj;
};
ScrubGenerator::SmapEntry make_smobject(
const ScrubGenerator::RealObj& blueprint, // the whole set of versions
int osd_num);
/**
* returns the object's snap-set
*/
void add_object(ScrubMap& map, const RealObj& obj_versions, int osd_num);
struct RealObjsConf {
std::vector<RealObj> objs;
};
using RealObjsConfRef = std::unique_ptr<RealObjsConf>;
// RealObjsConf will be "developed" into the following of per-osd sets,
// now with the correct pool ID, and with the corrupting functions
// activated on the data
using RealObjsConfList = std::map<int, RealObjsConfRef>;
RealObjsConfList make_real_objs_conf(int64_t pool_id,
const RealObjsConf& blueprint,
std::vector<int32_t> active_osds);
/**
* create the snap-ids set for all clones appearing in the head
* object's snapset (those will be injected into the scrubber's mock,
* to be used as the 'snap_mapper')
*/
all_clones_snaps_t all_clones(const RealObj& head_obj);
} // namespace ScrubGenerator
template <>
struct fmt::formatter<ScrubGenerator::RealObj> {
constexpr auto parse(format_parse_context& ctx) { return ctx.begin(); }
template <typename FormatContext>
auto format(const ScrubGenerator::RealObj& rlo, FormatContext& ctx)
{
using namespace ScrubGenerator;
return fmt::format_to(ctx.out(),
"RealObj(gh:{}, dt:{}, snaps:{})",
rlo.ghobj,
rlo.data.size,
(rlo.snapset_mock_data ? rlo.snapset_mock_data->snaps
: std::vector<snapid_t>{}));
}
};
| 6,753 | 24.29588 | 79 |
h
|
null |
ceph-main/src/test/osd/scrubber_test_datasets.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/// \file data-sets used by the scrubber unit tests
#include "./scrubber_test_datasets.h"
using namespace ScrubGenerator;
using namespace std::string_literals;
namespace ScrubDatasets {
static RealObj corrupt_object_size(const RealObj& s, [[maybe_unused]] int osdn)
{
RealObj ret = s;
ret.data.size = s.data.size + 1;
return ret;
}
static RealObj corrupt_nothing(const RealObj& s, int osdn)
{
return s;
}
static CorruptFuncList crpt_funcs_set0 = {{0, &corrupt_nothing}};
CorruptFuncList crpt_funcs_set1 = {{0, &corrupt_object_size},
{1, &corrupt_nothing}};
// object with head & two snaps
static hobject_t hobj_ms1{object_t{"hobj_ms1"},
"keykey", // key
CEPH_NOSNAP, // snap_id
0, // hash
0, // pool
""s}; // nspace
SnapsetMockData::CookedCloneSnaps ms1_fn()
{
std::map<snapid_t, uint64_t> clnsz;
clnsz[0x20] = 222;
clnsz[0x30] = 333;
std::map<snapid_t, std::vector<snapid_t>> clnsn;
clnsn[0x20] = {0x20};
clnsn[0x30] = {0x30};
std::map<snapid_t, interval_set<uint64_t>> overlaps;
overlaps[0x20] = {};
overlaps[0x30] = {};
return {clnsz, clnsn, overlaps};
}
static SnapsetMockData hobj_ms1_snapset{/* seq */ 0x40,
/* snaps */ {0x30, 0x20},
/* clones */ {0x20, 0x30},
ms1_fn};
hobject_t hobj_ms1_snp30{object_t{"hobj_ms1"},
"keykey", // key
0x30, // snap_id
0, // hash
0, // pool
""s}; // nspace
static hobject_t hobj_ms1_snp20{object_t{"hobj_ms1"},
"keykey", // key
0x20, // snap_id
0, // hash
0, // pool
""s}; // nspace
ScrubGenerator::RealObjsConf minimal_snaps_configuration{
/* RealObjsConf::objs */ {
/* Clone 30 */ {
ghobject_t{hobj_ms1_snp30, 0, shard_id_t{0}},
RealData{
333,
0x17,
17,
21,
attr_t{/*{"_om1k", "om1v"}, {"om1k", "om1v"},*/ {"om3k", "om3v"}},
attr_t{{"_at1k", "_at1v"}, {"_at2k", "at2v"}, {"at3k", "at3v"}}},
&crpt_funcs_set0,
nullptr},
/* Clone 20 */
{ghobject_t{hobj_ms1_snp20, 0, shard_id_t{0}},
RealData{222,
0x17,
17,
21,
attr_t{/*{"_om1k", "om1v"}, {"om1k", "om1v"},*/ {"om3k", "om3v"}},
attr_t{{"_at1k", "_at1v"}, {"_at2k", "at2v"}, {"at3k", "at3v"}}},
&crpt_funcs_set0,
nullptr},
/* Head */
{ghobject_t{hobj_ms1, 0, shard_id_t{0}},
RealData{100,
0x17,
17,
21,
attr_t{{"_om1k", "om1v"}, {"om1k", "om1v"}, {"om3k", "om3v"}},
attr_t{{"_at1k", "_at1v"}, {"_at2k", "at2v"}, {"at3k", "at3v"}}
},
&crpt_funcs_set0,
&hobj_ms1_snapset}}
};
} // namespace ScrubDatasets
| 2,725 | 21.528926 | 79 |
cc
|
null |
ceph-main/src/test/osd/scrubber_test_datasets.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
/// \file data-sets used by the scrubber unit tests
#include "./scrubber_generators.h"
namespace ScrubDatasets {
/*
* Two objects with some clones. No inconsitencies.
*/
extern ScrubGenerator::RealObjsConf minimal_snaps_configuration;
// and a part of this configuration, one that we will corrupt in a test:
extern hobject_t hobj_ms1_snp30;
// a manipulation set used in TestTScrubberBe_data_2:
extern ScrubGenerator::CorruptFuncList crpt_funcs_set1;
} // namespace ScrubDatasets
| 600 | 26.318182 | 72 |
h
|
null |
ceph-main/src/test/osd/test_ec_transaction.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2016 Red Hat
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include <gtest/gtest.h>
#include "osd/PGTransaction.h"
#include "osd/ECTransaction.h"
#include "test/unit.cc"
struct mydpp : public DoutPrefixProvider {
std::ostream& gen_prefix(std::ostream& out) const override { return out << "foo"; }
CephContext *get_cct() const override { return g_ceph_context; }
unsigned get_subsys() const override { return ceph_subsys_osd; }
} dpp;
#define dout_context g_ceph_context
TEST(ectransaction, two_writes_separated)
{
hobject_t h;
PGTransactionUPtr t(new PGTransaction);
bufferlist a, b;
t->create(h);
a.append_zero(565760);
t->write(h, 0, a.length(), a, 0);
b.append_zero(2437120);
t->write(h, 669856, b.length(), b, 0);
ECUtil::stripe_info_t sinfo(2, 8192);
auto plan = ECTransaction::get_write_plan(
sinfo,
std::move(t),
[&](const hobject_t &i) {
ECUtil::HashInfoRef ref(new ECUtil::HashInfo(1));
return ref;
},
&dpp);
generic_derr << "to_read " << plan.to_read << dendl;
generic_derr << "will_write " << plan.will_write << dendl;
ASSERT_EQ(0u, plan.to_read.size());
ASSERT_EQ(1u, plan.will_write.size());
}
TEST(ectransaction, two_writes_nearby)
{
hobject_t h;
PGTransactionUPtr t(new PGTransaction);
bufferlist a, b;
t->create(h);
// two nearby writes, both partly touching the same 8192-byte stripe
ECUtil::stripe_info_t sinfo(2, 8192);
a.append_zero(565760);
t->write(h, 0, a.length(), a, 0);
b.append_zero(2437120);
t->write(h, 569856, b.length(), b, 0);
auto plan = ECTransaction::get_write_plan(
sinfo,
std::move(t),
[&](const hobject_t &i) {
ECUtil::HashInfoRef ref(new ECUtil::HashInfo(1));
return ref;
},
&dpp);
generic_derr << "to_read " << plan.to_read << dendl;
generic_derr << "will_write " << plan.will_write << dendl;
ASSERT_EQ(0u, plan.to_read.size());
ASSERT_EQ(1u, plan.will_write.size());
}
TEST(ectransaction, many_writes)
{
hobject_t h;
PGTransactionUPtr t(new PGTransaction);
bufferlist a, b;
a.append_zero(512);
b.append_zero(4096);
t->create(h);
ECUtil::stripe_info_t sinfo(2, 8192);
// write 2801664~512
// write 2802176~512
// write 2802688~512
// write 2803200~512
t->write(h, 2801664, a.length(), a, 0);
t->write(h, 2802176, a.length(), a, 0);
t->write(h, 2802688, a.length(), a, 0);
t->write(h, 2803200, a.length(), a, 0);
// write 2805760~4096
// write 2809856~4096
// write 2813952~4096
t->write(h, 2805760, b.length(), b, 0);
t->write(h, 2809856, b.length(), b, 0);
t->write(h, 2813952, b.length(), b, 0);
auto plan = ECTransaction::get_write_plan(
sinfo,
std::move(t),
[&](const hobject_t &i) {
ECUtil::HashInfoRef ref(new ECUtil::HashInfo(1));
return ref;
},
&dpp);
generic_derr << "to_read " << plan.to_read << dendl;
generic_derr << "will_write " << plan.will_write << dendl;
ASSERT_EQ(0u, plan.to_read.size());
ASSERT_EQ(1u, plan.will_write.size());
}
| 3,356 | 25.856 | 85 |
cc
|
null |
ceph-main/src/test/osd/test_extent_cache.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2016 Red Hat
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include <gtest/gtest.h>
#include "osd/ExtentCache.h"
#include <iostream>
using namespace std;
extent_map imap_from_vector(vector<pair<uint64_t, uint64_t> > &&in)
{
extent_map out;
for (auto &&tup: in) {
bufferlist bl;
bl.append_zero(tup.second);
out.insert(tup.first, bl.length(), bl);
}
return out;
}
extent_map imap_from_iset(const extent_set &set)
{
extent_map out;
for (auto &&iter: set) {
bufferlist bl;
bl.append_zero(iter.second);
out.insert(iter.first, iter.second, bl);
}
return out;
}
extent_set iset_from_vector(vector<pair<uint64_t, uint64_t> > &&in)
{
extent_set out;
for (auto &&tup: in) {
out.insert(tup.first, tup.second);
}
return out;
}
TEST(extentcache, simple_write)
{
hobject_t oid;
ExtentCache c;
ExtentCache::write_pin pin;
c.open_write_pin(pin);
auto to_read = iset_from_vector(
{{0, 2}, {8, 2}, {20, 2}});
auto to_write = iset_from_vector(
{{0, 10}, {20, 4}});
auto must_read = c.reserve_extents_for_rmw(
oid, pin, to_write, to_read);
ASSERT_EQ(
must_read,
to_read);
c.print(std::cerr);
auto got = imap_from_iset(must_read);
auto pending_read = to_read;
pending_read.subtract(must_read);
auto pending = c.get_remaining_extents_for_rmw(
oid,
pin,
pending_read);
ASSERT_TRUE(pending.empty());
auto write_map = imap_from_iset(to_write);
c.present_rmw_update(
oid,
pin,
write_map);
c.release_write_pin(pin);
}
TEST(extentcache, write_write_overlap)
{
hobject_t oid;
ExtentCache c;
ExtentCache::write_pin pin;
c.open_write_pin(pin);
// start write 1
auto to_read = iset_from_vector(
{{0, 2}, {8, 2}, {20, 2}});
auto to_write = iset_from_vector(
{{0, 10}, {20, 4}});
auto must_read = c.reserve_extents_for_rmw(
oid, pin, to_write, to_read);
ASSERT_EQ(
must_read,
to_read);
c.print(std::cerr);
// start write 2
ExtentCache::write_pin pin2;
c.open_write_pin(pin2);
auto to_read2 = iset_from_vector(
{{2, 4}, {10, 4}, {18, 4}});
auto to_write2 = iset_from_vector(
{{2, 12}, {18, 12}});
auto must_read2 = c.reserve_extents_for_rmw(
oid, pin2, to_write2, to_read2);
ASSERT_EQ(
must_read2,
iset_from_vector({{10, 4}, {18, 2}}));
c.print(std::cerr);
// complete read for write 1 and start commit
auto got = imap_from_iset(must_read);
auto pending_read = to_read;
pending_read.subtract(must_read);
auto pending = c.get_remaining_extents_for_rmw(
oid,
pin,
pending_read);
ASSERT_TRUE(pending.empty());
auto write_map = imap_from_iset(to_write);
c.present_rmw_update(
oid,
pin,
write_map);
c.print(std::cerr);
// complete read for write 2 and start commit
auto pending_read2 = to_read2;
pending_read2.subtract(must_read2);
auto pending2 = c.get_remaining_extents_for_rmw(
oid,
pin2,
pending_read2);
ASSERT_EQ(
pending2,
imap_from_iset(pending_read2));
auto write_map2 = imap_from_iset(to_write2);
c.present_rmw_update(
oid,
pin2,
write_map2);
c.print(std::cerr);
c.release_write_pin(pin);
c.print(std::cerr);
c.release_write_pin(pin2);
}
TEST(extentcache, write_write_overlap2)
{
hobject_t oid;
ExtentCache c;
ExtentCache::write_pin pin;
c.open_write_pin(pin);
// start write 1
auto to_read = extent_set();
auto to_write = iset_from_vector(
{{659456, 4096}});
auto must_read = c.reserve_extents_for_rmw(
oid, pin, to_write, to_read);
ASSERT_EQ(
must_read,
to_read);
c.print(std::cerr);
// start write 2
ExtentCache::write_pin pin2;
c.open_write_pin(pin2);
auto to_read2 = extent_set();
auto to_write2 = iset_from_vector(
{{663552, 4096}});
auto must_read2 = c.reserve_extents_for_rmw(
oid, pin2, to_write2, to_read2);
ASSERT_EQ(
must_read2,
to_read2);
// start write 3
ExtentCache::write_pin pin3;
c.open_write_pin(pin3);
auto to_read3 = iset_from_vector({{659456, 8192}});
auto to_write3 = iset_from_vector({{659456, 8192}});
auto must_read3 = c.reserve_extents_for_rmw(
oid, pin3, to_write3, to_read3);
ASSERT_EQ(
must_read3,
extent_set());
c.print(std::cerr);
// complete read for write 1 and start commit
auto got = imap_from_iset(must_read);
auto pending_read = to_read;
pending_read.subtract(must_read);
auto pending = c.get_remaining_extents_for_rmw(
oid,
pin,
pending_read);
ASSERT_TRUE(pending.empty());
auto write_map = imap_from_iset(to_write);
c.present_rmw_update(
oid,
pin,
write_map);
c.print(std::cerr);
// complete read for write 2 and start commit
auto pending_read2 = to_read2;
pending_read2.subtract(must_read2);
auto pending2 = c.get_remaining_extents_for_rmw(
oid,
pin2,
pending_read2);
ASSERT_EQ(
pending2,
imap_from_iset(pending_read2));
auto write_map2 = imap_from_iset(to_write2);
c.present_rmw_update(
oid,
pin2,
write_map2);
// complete read for write 2 and start commit
auto pending_read3 = to_read3;
pending_read3.subtract(must_read3);
auto pending3 = c.get_remaining_extents_for_rmw(
oid,
pin3,
pending_read3);
ASSERT_EQ(
pending3,
imap_from_iset(pending_read3));
auto write_map3 = imap_from_iset(to_write3);
c.present_rmw_update(
oid,
pin3,
write_map3);
c.print(std::cerr);
c.release_write_pin(pin);
c.print(std::cerr);
c.release_write_pin(pin2);
c.print(std::cerr);
c.release_write_pin(pin3);
}
| 5,929 | 19.954064 | 70 |
cc
|
null |
ceph-main/src/test/osd/test_pg_transaction.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2016 Red Hat
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include <gtest/gtest.h>
#include "osd/PGTransaction.h"
using namespace std;
TEST(pgtransaction, simple)
{
hobject_t h;
PGTransaction t;
ASSERT_TRUE(t.empty());
t.nop(h);
ASSERT_FALSE(t.empty());
unsigned num = 0;
t.safe_create_traverse(
[&](const pair<const hobject_t, PGTransaction::ObjectOperation> &p) {
ASSERT_EQ(p.first, h);
using T = PGTransaction::ObjectOperation::Init;
ASSERT_TRUE(boost::get<T::None>(&p.second.init_type));
++num;
});
ASSERT_EQ(num, 1u);
}
TEST(pgtransaction, clone_safe_create_traverse)
{
hobject_t h, h2;
h2.snap = 1;
PGTransaction t;
ASSERT_TRUE(t.empty());
t.nop(h2);
ASSERT_FALSE(t.empty());
t.clone(h, h2);
unsigned num = 0;
t.safe_create_traverse(
[&](const pair<const hobject_t, PGTransaction::ObjectOperation> &p) {
using T = PGTransaction::ObjectOperation::Init;
if (num == 0) {
ASSERT_EQ(p.first, h);
ASSERT_TRUE(boost::get<T::Clone>(&p.second.init_type));
ASSERT_EQ(
boost::get<T::Clone>(&p.second.init_type)->source,
h2);
} else if (num == 1) {
ASSERT_EQ(p.first, h2);
ASSERT_TRUE(boost::get<T::None>(&p.second.init_type));
} else {
ASSERT_LT(num, 2u);
}
++num;
});
}
TEST(pgtransaction, clone_safe_create_traverse2)
{
hobject_t h, h2, h3;
h.snap = 10;
h2.snap = 5;
h3.snap = 3;
PGTransaction t;
ASSERT_TRUE(t.empty());
t.nop(h3);
ASSERT_FALSE(t.empty());
t.clone(h, h2);
t.remove(h2);
t.clone(h2, h3);
unsigned num = 0;
t.safe_create_traverse(
[&](const pair<const hobject_t, PGTransaction::ObjectOperation> &p) {
using T = PGTransaction::ObjectOperation::Init;
if (num == 0) {
ASSERT_EQ(p.first, h);
ASSERT_TRUE(boost::get<T::Clone>(&p.second.init_type));
ASSERT_EQ(
boost::get<T::Clone>(&p.second.init_type)->source,
h2);
} else if (num == 1) {
ASSERT_EQ(p.first, h2);
ASSERT_TRUE(boost::get<T::Clone>(&p.second.init_type));
ASSERT_EQ(
boost::get<T::Clone>(&p.second.init_type)->source,
h3);
} else if (num == 2) {
ASSERT_EQ(p.first, h3);
ASSERT_TRUE(boost::get<T::None>(&p.second.init_type));
} else {
ASSERT_LT(num, 3u);
}
++num;
});
}
TEST(pgtransaction, clone_safe_create_traverse3)
{
hobject_t h, h2, h3;
h.snap = 10;
h2.snap = 5;
h3.snap = 3;
PGTransaction t;
t.remove(h);
t.remove(h2);
t.clone(h2, h3);
unsigned num = 0;
t.safe_create_traverse(
[&](const pair<const hobject_t, PGTransaction::ObjectOperation> &p) {
using T = PGTransaction::ObjectOperation::Init;
if (p.first == h) {
ASSERT_TRUE(p.second.is_delete());
} else if (p.first == h2) {
ASSERT_TRUE(boost::get<T::Clone>(&p.second.init_type));
ASSERT_EQ(
boost::get<T::Clone>(&p.second.init_type)->source,
h3);
}
ASSERT_LT(num, 2u);
++num;
});
}
| 3,250 | 23.628788 | 73 |
cc
|
null |
ceph-main/src/test/osd/test_scrub_sched.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/// \file testing the scrub scheduling algorithm
#include <gtest/gtest.h>
#include <algorithm>
#include <map>
#include "common/async/context_pool.h"
#include "common/ceph_argparse.h"
#include "global/global_context.h"
#include "global/global_init.h"
#include "include/utime_fmt.h"
#include "mon/MonClient.h"
#include "msg/Messenger.h"
#include "os/ObjectStore.h"
#include "osd/PG.h"
#include "osd/osd_types.h"
#include "osd/osd_types_fmt.h"
#include "osd/scrubber/osd_scrub_sched.h"
#include "osd/scrubber_common.h"
int main(int argc, char** argv)
{
std::map<std::string, std::string> defaults = {
// make sure we have 3 copies, or some tests won't work
{"osd_pool_default_size", "3"},
// our map is flat, so just try and split across OSDs, not hosts or whatever
{"osd_crush_chooseleaf_type", "0"},
};
std::vector<const char*> args(argv, argv + argc);
auto cct = global_init(&defaults,
args,
CEPH_ENTITY_TYPE_CLIENT,
CODE_ENVIRONMENT_UTILITY,
CINIT_FLAG_NO_DEFAULT_CONFIG_FILE);
common_init_finish(g_ceph_context);
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}
using schedule_result_t = Scrub::schedule_result_t;
using ScrubJobRef = ScrubQueue::ScrubJobRef;
using qu_state_t = ScrubQueue::qu_state_t;
/// enabling access into ScrubQueue internals
class ScrubSchedTestWrapper : public ScrubQueue {
public:
ScrubSchedTestWrapper(Scrub::ScrubSchedListener& osds)
: ScrubQueue(g_ceph_context, osds)
{}
void rm_unregistered_jobs()
{
ScrubQueue::rm_unregistered_jobs(to_scrub);
ScrubQueue::rm_unregistered_jobs(penalized);
}
ScrubQContainer collect_ripe_jobs()
{
return ScrubQueue::collect_ripe_jobs(to_scrub, time_now());
}
/**
* unit-test support for faking the current time. When
* not activated specifically - the default is to use ceph_clock_now()
*/
void set_time_for_testing(long faked_now)
{
m_time_for_testing = utime_t{timeval{faked_now}};
}
void clear_time_for_testing() { m_time_for_testing.reset(); }
mutable std::optional<utime_t> m_time_for_testing;
utime_t time_now() const final
{
if (m_time_for_testing) {
m_time_for_testing->tv.tv_nsec += 1'000'000;
}
return m_time_for_testing.value_or(ceph_clock_now());
}
~ScrubSchedTestWrapper() override = default;
};
/**
* providing the small number of OSD services used when scheduling
* a scrub
*/
class FakeOsd : public Scrub::ScrubSchedListener {
public:
FakeOsd(int osd_num) : m_osd_num(osd_num) {}
int get_nodeid() const final { return m_osd_num; }
schedule_result_t initiate_a_scrub(spg_t pgid,
bool allow_requested_repair_only) final
{
std::ignore = allow_requested_repair_only;
auto res = m_next_response.find(pgid);
if (res == m_next_response.end()) {
return schedule_result_t::no_such_pg;
}
return m_next_response[pgid];
}
void set_initiation_response(spg_t pgid, schedule_result_t result)
{
m_next_response[pgid] = result;
}
private:
int m_osd_num;
std::map<spg_t, schedule_result_t> m_next_response;
};
/// the static blueprint for creating a scrub job in the scrub queue
struct sjob_config_t {
spg_t spg;
bool are_stats_valid;
utime_t history_scrub_stamp;
std::optional<double> pool_conf_min;
std::optional<double> pool_conf_max;
bool is_must;
bool is_need_auto;
ScrubQueue::scrub_schedule_t initial_schedule;
};
/**
* the runtime configuration for a scrub job. Created basde on the blueprint
* above (sjob_config_t)
*/
struct sjob_dynamic_data_t {
sjob_config_t initial_config;
pg_info_t mocked_pg_info;
pool_opts_t mocked_pool_opts;
requested_scrub_t request_flags;
ScrubQueue::ScrubJobRef job;
};
class TestScrubSched : public ::testing::Test {
public:
TestScrubSched() = default;
protected:
int m_osd_num{1};
FakeOsd m_osds{m_osd_num};
std::unique_ptr<ScrubSchedTestWrapper> m_sched{
new ScrubSchedTestWrapper(m_osds)};
/// the pg-info is queried for stats validity and for the last-scrub-stamp
pg_info_t pg_info{};
/// the pool configuration holds some per-pool scrub timing settings
pool_opts_t pool_opts{};
/**
* the scrub-jobs created for the tests, along with their corresponding
* "pg info" and pool configuration. In real life - the scrub jobs
* are owned by the respective PGs.
*/
std::vector<sjob_dynamic_data_t> m_scrub_jobs;
protected:
sjob_dynamic_data_t create_scrub_job(const sjob_config_t& sjob_data)
{
sjob_dynamic_data_t dyn_data;
dyn_data.initial_config = sjob_data;
// populate the 'pool options' object with the scrub timing settings
if (sjob_data.pool_conf_min) {
dyn_data.mocked_pool_opts.set<double>(pool_opts_t::SCRUB_MIN_INTERVAL,
sjob_data.pool_conf_min.value());
}
if (sjob_data.pool_conf_max) {
dyn_data.mocked_pool_opts.set(pool_opts_t::SCRUB_MAX_INTERVAL,
sjob_data.pool_conf_max.value());
}
// create the 'pg info' object with the stats
dyn_data.mocked_pg_info = pg_info_t{sjob_data.spg};
dyn_data.mocked_pg_info.history.last_scrub_stamp =
sjob_data.history_scrub_stamp;
dyn_data.mocked_pg_info.stats.stats_invalid = !sjob_data.are_stats_valid;
// fake hust the required 'requested-scrub' flags
std::cout << "request_flags: sjob_data.is_must " << sjob_data.is_must
<< std::endl;
dyn_data.request_flags.must_scrub = sjob_data.is_must;
dyn_data.request_flags.need_auto = sjob_data.is_need_auto;
// create the scrub job
dyn_data.job = ceph::make_ref<ScrubQueue::ScrubJob>(g_ceph_context,
sjob_data.spg,
m_osd_num);
m_scrub_jobs.push_back(dyn_data);
return dyn_data;
}
void register_job_set(const std::vector<sjob_config_t>& job_configs)
{
std::for_each(job_configs.begin(),
job_configs.end(),
[this](const sjob_config_t& sj) {
auto dynjob = create_scrub_job(sj);
m_sched->register_with_osd(
dynjob.job,
m_sched->determine_scrub_time(dynjob.request_flags,
dynjob.mocked_pg_info,
dynjob.mocked_pool_opts));
});
}
/// count the scrub-jobs that are currently in a specific state
int count_scrub_jobs_in_state(qu_state_t state)
{
return std::count_if(m_scrub_jobs.begin(),
m_scrub_jobs.end(),
[state](const sjob_dynamic_data_t& sj) {
return sj.job->state == state;
});
}
void list_testers_jobs(std::string hdr)
{
std::cout << fmt::format("{}: {} jobs created for the test:",
hdr,
m_scrub_jobs.size())
<< std::endl;
for (const auto& job : m_scrub_jobs) {
std::cout << fmt::format("\t{}: job {}", hdr, *job.job) << std::endl;
}
}
void print_all_states(std::string hdr)
{
std::cout << fmt::format(
"{}: Created:{}. Per state: not-reg:{} reg:{} unreg:{}",
hdr,
m_scrub_jobs.size(),
count_scrub_jobs_in_state(qu_state_t::not_registered),
count_scrub_jobs_in_state(qu_state_t::registered),
count_scrub_jobs_in_state(qu_state_t::unregistering))
<< std::endl;
}
void debug_print_jobs(std::string hdr,
const ScrubQueue::ScrubQContainer& jobs)
{
std::cout << fmt::format("{}: time now {}", hdr, m_sched->time_now())
<< std::endl;
for (const auto& job : jobs) {
std::cout << fmt::format(
"\t{}: job {} ({}): scheduled {}",
hdr,
job->pgid,
job->scheduling_state(m_sched->time_now(), false),
job->get_sched_time())
<< std::endl;
}
}
};
// ///////////////////////////////////////////////////////////////////////////
// test data. Scrub-job creation requires a PG-id, and a set of 'scrub request'
// flags
namespace {
// the times used during the tests are offset to 1.1.2000, so that
// utime_t formatting will treat them as absolute (not as a relative time)
static const auto epoch_2000 = 946'684'800;
std::vector<sjob_config_t> sjob_configs = {
{
spg_t{pg_t{1, 1}},
true, // PG has valid stats
utime_t{std::time_t(epoch_2000 + 1'000'000), 0}, // last-scrub-stamp
100.0, // min scrub delay in pool config
std::nullopt, // max scrub delay in pool config
false, // must-scrub
false, // need-auto
ScrubQueue::scrub_schedule_t{} // initial schedule
},
{spg_t{pg_t{4, 1}},
true,
utime_t{epoch_2000 + 1'000'000, 0},
100.0,
std::nullopt,
true,
false,
ScrubQueue::scrub_schedule_t{}},
{spg_t{pg_t{7, 1}},
true,
utime_t{},
1.0,
std::nullopt,
false,
false,
ScrubQueue::scrub_schedule_t{}},
{spg_t{pg_t{5, 1}},
true,
utime_t{epoch_2000 + 1'900'000, 0},
1.0,
std::nullopt,
false,
false,
ScrubQueue::scrub_schedule_t{}}};
} // anonymous namespace
// //////////////////////////// tests ////////////////////////////////////////
/// basic test: scheduling simple jobs, validating their calculated schedule
TEST_F(TestScrubSched, populate_queue)
{
ASSERT_EQ(0, m_sched->list_registered_jobs().size());
auto dynjob_0 = create_scrub_job(sjob_configs[0]);
auto suggested = m_sched->determine_scrub_time(dynjob_0.request_flags,
dynjob_0.mocked_pg_info,
dynjob_0.mocked_pool_opts);
m_sched->register_with_osd(dynjob_0.job, suggested);
std::cout << fmt::format("scheduled at: {}", dynjob_0.job->get_sched_time())
<< std::endl;
auto dynjob_1 = create_scrub_job(sjob_configs[1]);
suggested = m_sched->determine_scrub_time(dynjob_1.request_flags,
dynjob_1.mocked_pg_info,
dynjob_1.mocked_pool_opts);
m_sched->register_with_osd(dynjob_1.job, suggested);
std::cout << fmt::format("scheduled at: {}", dynjob_1.job->get_sched_time())
<< std::endl;
EXPECT_EQ(dynjob_1.job->get_sched_time(), utime_t(1, 1));
EXPECT_EQ(2, m_sched->list_registered_jobs().size());
}
/// validate the states of the scrub-jobs (as set in the jobs themselves)
TEST_F(TestScrubSched, states)
{
m_sched->set_time_for_testing(epoch_2000);
register_job_set(sjob_configs);
list_testers_jobs("testing states");
EXPECT_EQ(sjob_configs.size(), m_sched->list_registered_jobs().size());
// check the initial state of the jobs
print_all_states("<initial state>");
m_sched->rm_unregistered_jobs();
EXPECT_EQ(0, count_scrub_jobs_in_state(qu_state_t::not_registered));
// now - remove a couple of them
m_sched->remove_from_osd_queue(m_scrub_jobs[2].job);
m_sched->remove_from_osd_queue(m_scrub_jobs[1].job);
m_sched->remove_from_osd_queue(m_scrub_jobs[2].job); // should have no effect
print_all_states("<w/ 2 jobs removed>");
EXPECT_EQ(2, count_scrub_jobs_in_state(qu_state_t::registered));
EXPECT_EQ(2, count_scrub_jobs_in_state(qu_state_t::unregistering));
m_sched->rm_unregistered_jobs();
EXPECT_EQ(2, count_scrub_jobs_in_state(qu_state_t::not_registered));
std::cout << fmt::format("inp size: {}. In list-registered: {}",
sjob_configs.size(),
m_sched->list_registered_jobs().size())
<< std::endl;
EXPECT_EQ(sjob_configs.size() - 2, m_sched->list_registered_jobs().size());
}
/// jobs that are ripe should be in the ready list, sorted by their scheduled
/// time
TEST_F(TestScrubSched, ready_list)
{
m_sched->set_time_for_testing(epoch_2000 + 900'000);
register_job_set(sjob_configs);
list_testers_jobs("testing states");
EXPECT_EQ(sjob_configs.size(), m_sched->list_registered_jobs().size());
m_sched->set_time_for_testing(epoch_2000 + 1'000'000);
auto all_reg_jobs = m_sched->list_registered_jobs();
debug_print_jobs("registered", all_reg_jobs);
auto ripe_jobs = m_sched->collect_ripe_jobs();
EXPECT_EQ(2, ripe_jobs.size());
debug_print_jobs("ready_list", ripe_jobs);
m_sched->set_time_for_testing(epoch_2000 + 3'000'000);
// all jobs should be in the ready list
ripe_jobs = m_sched->collect_ripe_jobs();
EXPECT_EQ(4, ripe_jobs.size());
debug_print_jobs("ready_list", ripe_jobs);
}
| 12,036 | 28.868486 | 80 |
cc
|
null |
ceph-main/src/test/osd/test_scrubber_be.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "./scrubber_generators.h"
#include "./scrubber_test_datasets.h"
#include <gtest/gtest.h>
#include <signal.h>
#include <stdio.h>
#include <fmt/ranges.h>
#include "common/async/context_pool.h"
#include "common/ceph_argparse.h"
#include "global/global_context.h"
#include "global/global_init.h"
#include "mon/MonClient.h"
#include "msg/Messenger.h"
#include "os/ObjectStore.h"
#include "osd/PG.h"
#include "osd/PGBackend.h"
#include "osd/PrimaryLogPG.h"
#include "osd/osd_types.h"
#include "osd/osd_types_fmt.h"
#include "osd/scrubber/pg_scrubber.h"
#include "osd/scrubber/scrub_backend.h"
/// \file testing isolated parts of the Scrubber backend
using namespace std::string_literals;
int main(int argc, char** argv)
{
std::map<std::string, std::string> defaults = {
// make sure we have 3 copies, or some tests won't work
{"osd_pool_default_size", "3"},
// our map is flat, so just try and split across OSDs, not hosts or whatever
{"osd_crush_chooseleaf_type", "0"},
};
std::vector<const char*> args(argv, argv + argc);
auto cct = global_init(&defaults,
args,
CEPH_ENTITY_TYPE_CLIENT,
CODE_ENVIRONMENT_UTILITY,
CINIT_FLAG_NO_DEFAULT_CONFIG_FILE);
common_init_finish(g_ceph_context);
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}
class TestScrubBackend : public ScrubBackend {
public:
TestScrubBackend(ScrubBeListener& scrubber,
PgScrubBeListener& pg,
pg_shard_t i_am,
bool repair,
scrub_level_t shallow_or_deep,
const std::set<pg_shard_t>& acting)
: ScrubBackend(scrubber, pg, i_am, repair, shallow_or_deep, acting)
{}
bool get_m_repair() const { return m_repair; }
bool get_is_replicated() const { return m_is_replicated; }
auto get_omap_stats() const { return m_omap_stats; }
const std::vector<pg_shard_t>& all_but_me() const { return m_acting_but_me; }
/// populate the scrub-maps set for the 'chunk' being scrubbed
void insert_faked_smap(pg_shard_t shard, const ScrubMap& smap);
};
// mocking the PG
class TestPg : public PgScrubBeListener {
public:
~TestPg() = default;
TestPg(std::shared_ptr<PGPool> pool, pg_info_t& pginfo, pg_shard_t my_osd)
: m_pool{pool}
, m_info{pginfo}
, m_pshard{my_osd}
{}
const PGPool& get_pgpool() const final { return *(m_pool.get()); }
pg_shard_t get_primary() const final { return m_pshard; }
void force_object_missing(ScrubberPasskey,
const std::set<pg_shard_t>& peer,
const hobject_t& oid,
eversion_t version) final
{}
const pg_info_t& get_pg_info(ScrubberPasskey) const final { return m_info; }
uint64_t logical_to_ondisk_size(uint64_t logical_size) const final
{
return logical_size;
}
bool is_waiting_for_unreadable_object() const final { return false; }
std::shared_ptr<PGPool> m_pool;
pg_info_t& m_info;
pg_shard_t m_pshard;
};
// ///////////////////////////////////////////////////////////////////////////
// ///////////////////////////////////////////////////////////////////////////
// and the scrubber
class TestScrubber : public ScrubBeListener, public Scrub::SnapMapReaderI {
using result_t = Scrub::SnapMapReaderI::result_t;
public:
~TestScrubber() = default;
TestScrubber(spg_t spg, OSDMapRef osdmap, LoggerSinkSet& logger)
: m_spg{spg}
, m_logger{logger}
, m_osdmap{osdmap}
{}
std::ostream& gen_prefix(std::ostream& out) const final { return out; }
CephContext* get_pg_cct() const final { return g_ceph_context; }
LoggerSinkSet& get_logger() const final { return m_logger; }
bool is_primary() const final { return m_primary; }
spg_t get_pgid() const final { return m_info.pgid; }
const OSDMapRef& get_osdmap() const final { return m_osdmap; }
void add_to_stats(const object_stat_sum_t& stat) final { m_stats.add(stat); }
// submit_digest_fixes() mock can be set to expect a specific set of
// fixes to perform.
/// \todo implement the mock.
void submit_digest_fixes(const digests_fixes_t& fixes) final
{
std::cout << fmt::format("{} submit_digest_fixes({})",
__func__,
fmt::join(fixes, ","))
<< std::endl;
}
int get_snaps(const hobject_t& hoid,
std::set<snapid_t>* snaps_set) const;
tl::expected<std::set<snapid_t>, result_t> get_snaps(
const hobject_t& oid) const final;
tl::expected<std::set<snapid_t>, result_t> get_snaps_check_consistency(
const hobject_t& oid) const final
{
/// \todo for now
return get_snaps(oid);
}
void set_snaps(const hobject_t& hoid, const std::vector<snapid_t>& snaps)
{
std::cout
<< fmt::format("{}: ({}) -> #{} {}", __func__, hoid, snaps.size(), snaps)
<< std::endl;
std::set<snapid_t> snaps_set(snaps.begin(), snaps.end());
m_snaps[hoid] = snaps_set;
}
void set_snaps(const ScrubGenerator::all_clones_snaps_t& clones_snaps)
{
for (const auto& [clone, snaps] : clones_snaps) {
std::cout << fmt::format("{}: ({}) -> #{} {}",
__func__,
clone,
snaps.size(),
snaps)
<< std::endl;
std::set<snapid_t> snaps_set(snaps.begin(), snaps.end());
m_snaps[clone] = snaps_set;
}
}
bool m_primary{true};
spg_t m_spg;
LoggerSinkSet& m_logger;
OSDMapRef m_osdmap;
pg_info_t m_info;
object_stat_sum_t m_stats;
// the "snap-mapper" database (returned by get_snaps())
std::map<hobject_t, std::set<snapid_t>> m_snaps;
};
int TestScrubber::get_snaps(const hobject_t& hoid,
std::set<snapid_t>* snaps_set) const
{
auto it = m_snaps.find(hoid);
if (it == m_snaps.end()) {
std::cout << fmt::format("{}: ({}) no snaps", __func__, hoid) << std::endl;
return -ENOENT;
}
*snaps_set = it->second;
std::cout << fmt::format("{}: ({}) -> #{} {}",
__func__,
hoid,
snaps_set->size(),
*snaps_set)
<< std::endl;
return 0;
}
tl::expected<std::set<snapid_t>, Scrub::SnapMapReaderI::result_t>
TestScrubber::get_snaps(const hobject_t& oid) const
{
std::set<snapid_t> snapset;
auto r = get_snaps(oid, &snapset);
if (r >= 0) {
return snapset;
}
return tl::make_unexpected(Scrub::SnapMapReaderI::result_t{
Scrub::SnapMapReaderI::result_t::code_t::not_found,
r});
}
// ///////////////////////////////////////////////////////////////////////////
// ///////////////////////////////////////////////////////////////////////////
/// parameters for TestTScrubberBe construction
struct TestTScrubberBeParams {
ScrubGenerator::pool_conf_t pool_conf;
ScrubGenerator::RealObjsConf objs_conf;
int num_osds;
};
// ///////////////////////////////////////////////////////////////////////////
// ///////////////////////////////////////////////////////////////////////////
// the actual owner of the OSD "objects" that are used by
// the mockers
class TestTScrubberBe : public ::testing::Test {
public:
// the test data source
virtual TestTScrubberBeParams inject_params() = 0;
// initial test data
ScrubGenerator::MockLog logger;
ScrubGenerator::pool_conf_t pool_conf;
ScrubGenerator::RealObjsConf real_objs;
int num_osds{0};
// ctor & initialization
TestTScrubberBe() = default;
~TestTScrubberBe() = default;
void SetUp() override;
void TearDown() override;
/**
* Create the set of scrub-maps supposedly sent by the replica (or
* generated by the Primary). Then - create the snap-sets for all
* the objects in the set.
*/
void fake_a_scrub_set(ScrubGenerator::RealObjsConfList& all_sets);
std::unique_ptr<TestScrubBackend> sbe;
spg_t spg;
pg_shard_t i_am; // set to 'my osd and no shard'
std::set<pg_shard_t> acting_shards;
std::vector<int> acting_osds;
int acting_primary;
std::unique_ptr<TestScrubber> test_scrubber;
int64_t pool_id;
pg_pool_t pool_info;
OSDMapRef osdmap;
std::shared_ptr<PGPool> pool;
pg_info_t info;
std::unique_ptr<TestPg> test_pg;
// generated sets of "objects" for the active OSDs
ScrubGenerator::RealObjsConfList real_objs_list;
protected:
/**
* Create the OSDmap and populate it with one pool, based on
* the pool configuration.
* For now - only replicated pools are supported.
*/
OSDMapRef setup_map(int num_osds, const ScrubGenerator::pool_conf_t& pconf);
/**
* Create a PG in the one pool we have. Fake the PG info.
* Use the primary of the PG to determine "who we are".
*
* \returns the PG info
*/
pg_info_t setup_pg_in_map();
};
// ///////////////////////////////////////////////////////////////////////////
// ///////////////////////////////////////////////////////////////////////////
void TestTScrubberBe::SetUp()
{
std::cout << "TestTScrubberBe::SetUp()" << std::endl;
logger.err_count = 0;
// fetch test configuration
auto params = inject_params();
pool_conf = params.pool_conf;
real_objs = params.objs_conf;
num_osds = params.num_osds;
// create the OSDMap
osdmap = setup_map(num_osds, pool_conf);
std::cout << "osdmap: " << *osdmap << std::endl;
// extract the pool from the osdmap
pool_id = osdmap->lookup_pg_pool_name(pool_conf.name);
const pg_pool_t* ext_pool_info = osdmap->get_pg_pool(pool_id);
pool =
std::make_shared<PGPool>(osdmap, pool_id, *ext_pool_info, pool_conf.name);
std::cout << "pool: " << pool->info << std::endl;
// a PG in that pool?
info = setup_pg_in_map();
std::cout << fmt::format("PG info: {}", info) << std::endl;
real_objs_list =
ScrubGenerator::make_real_objs_conf(pool_id, real_objs, acting_osds);
// now we can create the main mockers
// the "PgScrubber"
test_scrubber = std::make_unique<TestScrubber>(spg, osdmap, logger);
// the "PG" (and its backend)
test_pg = std::make_unique<TestPg>(pool, info, i_am);
std::cout << fmt::format("{}: acting: {}", __func__, acting_shards)
<< std::endl;
sbe = std::make_unique<TestScrubBackend>(*test_scrubber,
*test_pg,
i_am,
/* repair? */ false,
scrub_level_t::deep,
acting_shards);
// create a osd-num only copy of the relevant OSDs
acting_osds.reserve(acting_shards.size());
for (const auto& shard : acting_shards) {
acting_osds.push_back(shard.osd);
}
sbe->new_chunk();
fake_a_scrub_set(real_objs_list);
}
// Note: based on TestOSDMap.cc.
OSDMapRef TestTScrubberBe::setup_map(int num_osds,
const ScrubGenerator::pool_conf_t& pconf)
{
auto osdmap = std::make_shared<OSDMap>();
uuid_d fsid;
osdmap->build_simple(g_ceph_context, 0, fsid, num_osds);
OSDMap::Incremental pending_inc(osdmap->get_epoch() + 1);
pending_inc.fsid = osdmap->get_fsid();
entity_addrvec_t sample_addrs;
sample_addrs.v.push_back(entity_addr_t());
uuid_d sample_uuid;
for (int i = 0; i < num_osds; ++i) {
sample_uuid.generate_random();
sample_addrs.v[0].nonce = i;
pending_inc.new_state[i] = CEPH_OSD_EXISTS | CEPH_OSD_NEW;
pending_inc.new_up_client[i] = sample_addrs;
pending_inc.new_up_cluster[i] = sample_addrs;
pending_inc.new_hb_back_up[i] = sample_addrs;
pending_inc.new_hb_front_up[i] = sample_addrs;
pending_inc.new_weight[i] = CEPH_OSD_IN;
pending_inc.new_uuid[i] = sample_uuid;
}
osdmap->apply_incremental(pending_inc);
// create a replicated pool
OSDMap::Incremental new_pool_inc(osdmap->get_epoch() + 1);
new_pool_inc.new_pool_max = osdmap->get_pool_max();
new_pool_inc.fsid = osdmap->get_fsid();
uint64_t pool_id = ++new_pool_inc.new_pool_max;
pg_pool_t empty;
auto p = new_pool_inc.get_new_pool(pool_id, &empty);
p->size = pconf.size;
p->set_pg_num(pconf.pg_num);
p->set_pgp_num(pconf.pgp_num);
p->type = pg_pool_t::TYPE_REPLICATED;
p->crush_rule = 0;
p->set_flag(pg_pool_t::FLAG_HASHPSPOOL);
new_pool_inc.new_pool_names[pool_id] = pconf.name;
osdmap->apply_incremental(new_pool_inc);
return osdmap;
}
pg_info_t TestTScrubberBe::setup_pg_in_map()
{
pg_t rawpg(0, pool_id);
pg_t pgid = osdmap->raw_pg_to_pg(rawpg);
std::vector<int> up_osds;
int up_primary;
osdmap->pg_to_up_acting_osds(pgid,
&up_osds,
&up_primary,
&acting_osds,
&acting_primary);
std::cout << fmt::format(
"{}: pg: {} up_osds: {} up_primary: {} acting_osds: {} "
"acting_primary: "
"{}",
__func__,
pgid,
up_osds,
up_primary,
acting_osds,
acting_primary)
<< std::endl;
spg = spg_t{pgid};
i_am = pg_shard_t{up_primary};
std::cout << fmt::format("{}: spg: {} and I am {}", __func__, spg, i_am)
<< std::endl;
// the 'acting shards' set - the one actually used by the scrubber
std::for_each(acting_osds.begin(), acting_osds.end(), [&](int osd) {
acting_shards.insert(pg_shard_t{osd});
});
std::cout << fmt::format("{}: acting_shards: {}", __func__, acting_shards)
<< std::endl;
pg_info_t info;
info.pgid = spg;
/// \todo: handle the epochs:
// info.last_update = osdmap->get_epoch();
// info.last_complete = osdmap->get_epoch();
// info.last_osdmap_epoch = osdmap->get_epoch();
// info.history.last_epoch_marked_removed = osdmap->get_epoch();
info.last_user_version = 1;
info.purged_snaps = {};
info.last_user_version = 1;
info.history.last_epoch_clean = osdmap->get_epoch();
info.history.last_epoch_split = osdmap->get_epoch();
info.history.last_epoch_marked_full = osdmap->get_epoch();
info.last_backfill = hobject_t::get_max();
return info;
}
void TestTScrubberBe::TearDown()
{
EXPECT_EQ(logger.err_count, logger.expected_err_count);
}
void TestTScrubberBe::fake_a_scrub_set(
ScrubGenerator::RealObjsConfList& all_sets)
{
for (int osd_num = 0; osd_num < pool_conf.size; ++osd_num) {
ScrubMap smap;
smap.valid_through = eversion_t{1, 1};
smap.incr_since = eversion_t{1, 1};
smap.has_omap_keys = true; // to force omap checks
// fill the map with the objects relevant to this OSD
for (auto& obj : all_sets[osd_num]->objs) {
std::cout << fmt::format("{}: object: {}", __func__, obj.ghobj.hobj)
<< std::endl;
ScrubGenerator::add_object(smap, obj, osd_num);
}
std::cout << fmt::format("{}: {} inserting smap {:D}",
__func__,
osd_num,
smap)
<< std::endl;
sbe->insert_faked_smap(pg_shard_t{osd_num}, smap);
}
// create the snap_mapper state
for (const auto& robj : all_sets[i_am.osd]->objs) {
std::cout << fmt::format("{}: object: {}", __func__, robj.ghobj.hobj)
<< std::endl;
if (robj.ghobj.hobj.snap == CEPH_NOSNAP) {
// head object
auto objects_snapset = ScrubGenerator::all_clones(robj);
test_scrubber->set_snaps(objects_snapset);
}
}
}
void TestScrubBackend::insert_faked_smap(pg_shard_t shard, const ScrubMap& smap)
{
ASSERT_TRUE(this_chunk.has_value());
std::cout << fmt::format("{}: inserting faked smap for osd {}",
__func__,
shard.osd)
<< std::endl;
this_chunk->received_maps[shard] = smap;
}
// ///////////////////////////////////////////////////////////////////////////
// ///////////////////////////////////////////////////////////////////////////
using namespace ScrubGenerator;
class TestTScrubberBe_data_1 : public TestTScrubberBe {
public:
TestTScrubberBe_data_1() : TestTScrubberBe() {}
// test configuration
pool_conf_t pl{3, 3, 3, 3, "rep_pool"};
TestTScrubberBeParams inject_params() override
{
std::cout << fmt::format("{}: injecting params (minimal snaps conf.)",
__func__)
<< std::endl;
return TestTScrubberBeParams{
/* pool_conf */ pl,
/* real_objs_conf */ ScrubDatasets::minimal_snaps_configuration,
/*num_osds */ 3};
}
};
// some basic sanity checks
// (mainly testing the constructor)
TEST_F(TestTScrubberBe_data_1, creation_1)
{
/// \todo copy some osdmap tests from TestOSDMap.cc
ASSERT_TRUE(sbe);
ASSERT_TRUE(sbe->get_is_replicated());
ASSERT_FALSE(sbe->get_m_repair());
sbe->update_repair_status(true);
ASSERT_TRUE(sbe->get_m_repair());
// make sure *I* do not appear in 'all_but_me' set of OSDs
auto others = sbe->all_but_me();
auto in_others = std::find(others.begin(), others.end(), i_am);
EXPECT_EQ(others.end(), in_others);
}
TEST_F(TestTScrubberBe_data_1, smaps_creation_1)
{
ASSERT_TRUE(sbe);
ASSERT_EQ(sbe->get_omap_stats().omap_bytes, 0);
// for test data 'minimal_snaps_configuration':
// scrub_compare_maps() should not emmit any error, nor
// return any snap-mapper fix
auto [incons, fix_list] = sbe->scrub_compare_maps(true, *test_scrubber);
EXPECT_EQ(fix_list.size(), 0); // snap-mapper fix should be empty
EXPECT_EQ(incons.size(), 0); // no inconsistency
// make sure the test did execute *something*
EXPECT_TRUE(sbe->get_omap_stats().omap_bytes != 0);
}
// whitebox testing (OK if failing after a change to the backend internals)
// blackbox testing - testing the published functionality
// (should not depend on internals of the backend)
/// corrupt the snap_mapper data
TEST_F(TestTScrubberBe_data_1, snapmapper_1)
{
using snap_mapper_op_t = Scrub::snap_mapper_op_t;
ASSERT_TRUE(sbe);
// a bogus version of hobj_ms1_snp30 (a clone) snap_ids
hobject_t hobj_ms1_snp30_inpool = hobject_t{ScrubDatasets::hobj_ms1_snp30};
hobj_ms1_snp30_inpool.pool = pool_id;
all_clones_snaps_t bogus_30;
bogus_30[hobj_ms1_snp30_inpool] = {0x333, 0x666};
test_scrubber->set_snaps(bogus_30);
auto [incons, fix_list] = sbe->scrub_compare_maps(true, *test_scrubber);
EXPECT_EQ(fix_list.size(), 1);
// debug - print the fix-list:
for (const auto& fix : fix_list) {
std::cout << fmt::format("snapmapper_1: fix {}: {} {}->{}",
fix.hoid,
(fix.op == snap_mapper_op_t::add ? "add" : "upd"),
fix.wrong_snaps,
fix.snaps)
<< std::endl;
}
EXPECT_EQ(fix_list[0].hoid, hobj_ms1_snp30_inpool);
EXPECT_EQ(fix_list[0].snaps, std::set<snapid_t>{0x30});
EXPECT_EQ(incons.size(), 0); // no inconsistency
}
// a dataset similar to 'minimal_snaps_configuration',
// but with the hobj_ms1_snp30 clone being modified by a corruption
// function
class TestTScrubberBe_data_2 : public TestTScrubberBe {
public:
TestTScrubberBe_data_2() : TestTScrubberBe() {}
// basic test configuration - 3 OSDs, all involved in the pool
pool_conf_t pl{3, 3, 3, 3, "rep_pool"};
TestTScrubberBeParams inject_params() override
{
std::cout << fmt::format(
"{}: injecting params (minimal-snaps + size change)",
__func__)
<< std::endl;
TestTScrubberBeParams params{
/* pool_conf */ pl,
/* real_objs_conf */ ScrubDatasets::minimal_snaps_configuration,
/*num_osds */ 3};
// inject a corruption function that will modify osd.0's version of
// the object
params.objs_conf.objs[0].corrupt_funcs = &ScrubDatasets::crpt_funcs_set1;
return params;
}
};
TEST_F(TestTScrubberBe_data_2, smaps_clone_size)
{
ASSERT_TRUE(sbe);
EXPECT_EQ(sbe->get_omap_stats().omap_bytes, 0);
logger.set_expected_err_count(1);
auto [incons, fix_list] = sbe->scrub_compare_maps(true, *test_scrubber);
EXPECT_EQ(fix_list.size(), 0); // snap-mapper fix should be empty
EXPECT_EQ(incons.size(), 1); // one inconsistency
}
// Local Variables:
// compile-command: "cd ../.. ; make unittest_osdscrub ; ./unittest_osdscrub
// --log-to-stderr=true --debug-osd=20 # --gtest_filter=*.* " End:
| 19,420 | 27.986567 | 80 |
cc
|
null |
ceph-main/src/test/osd/types.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2011 New Dream Network
* Copyright (C) 2013 Cloudwatt <[email protected]>
*
* Author: Loic Dachary <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public
* License version 2, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include "include/types.h"
#include "osd/osd_types.h"
#include "osd/OSDMap.h"
#include "gtest/gtest.h"
#include "include/coredumpctl.h"
#include "common/Thread.h"
#include "include/stringify.h"
#include "osd/ReplicatedBackend.h"
#include <sstream>
using namespace std;
TEST(hobject, prefixes0)
{
uint32_t mask = 0xE947FA20;
uint32_t bits = 12;
int64_t pool = 0;
set<string> prefixes_correct;
prefixes_correct.insert(string("0000000000000000.02A"));
set<string> prefixes_out(hobject_t::get_prefixes(bits, mask, pool));
ASSERT_EQ(prefixes_out, prefixes_correct);
}
TEST(hobject, prefixes1)
{
uint32_t mask = 0x0000000F;
uint32_t bits = 6;
int64_t pool = 20;
set<string> prefixes_correct;
prefixes_correct.insert(string("0000000000000014.F0"));
prefixes_correct.insert(string("0000000000000014.F4"));
prefixes_correct.insert(string("0000000000000014.F8"));
prefixes_correct.insert(string("0000000000000014.FC"));
set<string> prefixes_out(hobject_t::get_prefixes(bits, mask, pool));
ASSERT_EQ(prefixes_out, prefixes_correct);
}
TEST(hobject, prefixes2)
{
uint32_t mask = 0xDEADBEAF;
uint32_t bits = 25;
int64_t pool = 0;
set<string> prefixes_correct;
prefixes_correct.insert(string("0000000000000000.FAEBDA0"));
prefixes_correct.insert(string("0000000000000000.FAEBDA2"));
prefixes_correct.insert(string("0000000000000000.FAEBDA4"));
prefixes_correct.insert(string("0000000000000000.FAEBDA6"));
prefixes_correct.insert(string("0000000000000000.FAEBDA8"));
prefixes_correct.insert(string("0000000000000000.FAEBDAA"));
prefixes_correct.insert(string("0000000000000000.FAEBDAC"));
prefixes_correct.insert(string("0000000000000000.FAEBDAE"));
set<string> prefixes_out(hobject_t::get_prefixes(bits, mask, pool));
ASSERT_EQ(prefixes_out, prefixes_correct);
}
TEST(hobject, prefixes3)
{
uint32_t mask = 0xE947FA20;
uint32_t bits = 32;
int64_t pool = 0x23;
set<string> prefixes_correct;
prefixes_correct.insert(string("0000000000000023.02AF749E"));
set<string> prefixes_out(hobject_t::get_prefixes(bits, mask, pool));
ASSERT_EQ(prefixes_out, prefixes_correct);
}
TEST(hobject, prefixes4)
{
uint32_t mask = 0xE947FA20;
uint32_t bits = 0;
int64_t pool = 0x23;
set<string> prefixes_correct;
prefixes_correct.insert(string("0000000000000023."));
set<string> prefixes_out(hobject_t::get_prefixes(bits, mask, pool));
ASSERT_EQ(prefixes_out, prefixes_correct);
}
TEST(hobject, prefixes5)
{
uint32_t mask = 0xDEADBEAF;
uint32_t bits = 1;
int64_t pool = 0x34AC5D00;
set<string> prefixes_correct;
prefixes_correct.insert(string("0000000034AC5D00.1"));
prefixes_correct.insert(string("0000000034AC5D00.3"));
prefixes_correct.insert(string("0000000034AC5D00.5"));
prefixes_correct.insert(string("0000000034AC5D00.7"));
prefixes_correct.insert(string("0000000034AC5D00.9"));
prefixes_correct.insert(string("0000000034AC5D00.B"));
prefixes_correct.insert(string("0000000034AC5D00.D"));
prefixes_correct.insert(string("0000000034AC5D00.F"));
set<string> prefixes_out(hobject_t::get_prefixes(bits, mask, pool));
ASSERT_EQ(prefixes_out, prefixes_correct);
}
TEST(pg_interval_t, check_new_interval)
{
// iterate through all 4 combinations
for (unsigned i = 0; i < 4; ++i) {
//
// Create a situation where osdmaps are the same so that
// each test case can diverge from it using minimal code.
//
int osd_id = 1;
epoch_t epoch = 40;
std::shared_ptr<OSDMap> osdmap(new OSDMap());
osdmap->set_max_osd(10);
osdmap->set_state(osd_id, CEPH_OSD_EXISTS);
osdmap->set_epoch(epoch);
std::shared_ptr<OSDMap> lastmap(new OSDMap());
lastmap->set_max_osd(10);
lastmap->set_state(osd_id, CEPH_OSD_EXISTS);
lastmap->set_epoch(epoch);
epoch_t same_interval_since = epoch;
epoch_t last_epoch_clean = same_interval_since;
int64_t pool_id = 200;
int pg_num = 4;
__u8 min_size = 2;
boost::scoped_ptr<IsPGRecoverablePredicate> recoverable(new ReplicatedBackend::RPCRecPred());
{
OSDMap::Incremental inc(epoch + 1);
inc.new_pools[pool_id].min_size = min_size;
inc.new_pools[pool_id].set_pg_num(pg_num);
inc.new_pools[pool_id].set_pg_num_pending(pg_num);
inc.new_up_thru[osd_id] = epoch + 1;
osdmap->apply_incremental(inc);
lastmap->apply_incremental(inc);
}
vector<int> new_acting;
new_acting.push_back(osd_id);
new_acting.push_back(osd_id + 1);
vector<int> old_acting = new_acting;
int old_primary = osd_id;
int new_primary = osd_id;
vector<int> new_up;
new_up.push_back(osd_id);
int old_up_primary = osd_id;
int new_up_primary = osd_id;
vector<int> old_up = new_up;
pg_t pgid;
pgid.set_pool(pool_id);
//
// Do nothing if there are no modifications in
// acting, up or pool size and that the pool is not
// being split
//
{
PastIntervals past_intervals;
ASSERT_TRUE(past_intervals.empty());
ASSERT_FALSE(PastIntervals::check_new_interval(old_primary,
new_primary,
old_acting,
new_acting,
old_up_primary,
new_up_primary,
old_up,
new_up,
same_interval_since,
last_epoch_clean,
osdmap,
lastmap,
pgid,
*recoverable,
&past_intervals));
ASSERT_TRUE(past_intervals.empty());
}
//
// The acting set has changed
//
{
vector<int> new_acting;
int _new_primary = osd_id + 1;
new_acting.push_back(_new_primary);
PastIntervals past_intervals;
ASSERT_TRUE(past_intervals.empty());
ASSERT_TRUE(PastIntervals::check_new_interval(old_primary,
new_primary,
old_acting,
new_acting,
old_up_primary,
new_up_primary,
old_up,
new_up,
same_interval_since,
last_epoch_clean,
osdmap,
lastmap,
pgid,
*recoverable,
&past_intervals));
old_primary = new_primary;
}
//
// The up set has changed
//
{
vector<int> new_up;
int _new_primary = osd_id + 1;
new_up.push_back(_new_primary);
PastIntervals past_intervals;
ASSERT_TRUE(past_intervals.empty());
ASSERT_TRUE(PastIntervals::check_new_interval(old_primary,
new_primary,
old_acting,
new_acting,
old_up_primary,
new_up_primary,
old_up,
new_up,
same_interval_since,
last_epoch_clean,
osdmap,
lastmap,
pgid,
*recoverable,
&past_intervals));
}
//
// The up primary has changed
//
{
vector<int> new_up;
int _new_up_primary = osd_id + 1;
PastIntervals past_intervals;
ASSERT_TRUE(past_intervals.empty());
ASSERT_TRUE(PastIntervals::check_new_interval(old_primary,
new_primary,
old_acting,
new_acting,
old_up_primary,
_new_up_primary,
old_up,
new_up,
same_interval_since,
last_epoch_clean,
osdmap,
lastmap,
pgid,
*recoverable,
&past_intervals));
}
//
// PG is splitting
//
{
std::shared_ptr<OSDMap> osdmap(new OSDMap());
osdmap->set_max_osd(10);
osdmap->set_state(osd_id, CEPH_OSD_EXISTS);
osdmap->set_epoch(epoch);
int new_pg_num = pg_num ^ 2;
OSDMap::Incremental inc(epoch + 1);
inc.new_pools[pool_id].min_size = min_size;
inc.new_pools[pool_id].set_pg_num(new_pg_num);
osdmap->apply_incremental(inc);
PastIntervals past_intervals;
ASSERT_TRUE(past_intervals.empty());
ASSERT_TRUE(PastIntervals::check_new_interval(old_primary,
new_primary,
old_acting,
new_acting,
old_up_primary,
new_up_primary,
old_up,
new_up,
same_interval_since,
last_epoch_clean,
osdmap,
lastmap,
pgid,
*recoverable,
&past_intervals));
}
//
// PG is pre-merge source
//
{
std::shared_ptr<OSDMap> osdmap(new OSDMap());
osdmap->set_max_osd(10);
osdmap->set_state(osd_id, CEPH_OSD_EXISTS);
osdmap->set_epoch(epoch);
OSDMap::Incremental inc(epoch + 1);
inc.new_pools[pool_id].min_size = min_size;
inc.new_pools[pool_id].set_pg_num(pg_num);
inc.new_pools[pool_id].set_pg_num_pending(pg_num - 1);
osdmap->apply_incremental(inc);
cout << "pg_num " << pg_num << std::endl;
PastIntervals past_intervals;
ASSERT_TRUE(past_intervals.empty());
ASSERT_TRUE(PastIntervals::check_new_interval(old_primary,
new_primary,
old_acting,
new_acting,
old_up_primary,
new_up_primary,
old_up,
new_up,
same_interval_since,
last_epoch_clean,
osdmap,
lastmap,
pg_t(pg_num - 1, pool_id),
*recoverable,
&past_intervals));
}
//
// PG was pre-merge source
//
{
std::shared_ptr<OSDMap> osdmap(new OSDMap());
osdmap->set_max_osd(10);
osdmap->set_state(osd_id, CEPH_OSD_EXISTS);
osdmap->set_epoch(epoch);
OSDMap::Incremental inc(epoch + 1);
inc.new_pools[pool_id].min_size = min_size;
inc.new_pools[pool_id].set_pg_num(pg_num);
inc.new_pools[pool_id].set_pg_num_pending(pg_num - 1);
osdmap->apply_incremental(inc);
cout << "pg_num " << pg_num << std::endl;
PastIntervals past_intervals;
ASSERT_TRUE(past_intervals.empty());
ASSERT_TRUE(PastIntervals::check_new_interval(old_primary,
new_primary,
old_acting,
new_acting,
old_up_primary,
new_up_primary,
old_up,
new_up,
same_interval_since,
last_epoch_clean,
lastmap, // reverse order!
osdmap,
pg_t(pg_num - 1, pool_id),
*recoverable,
&past_intervals));
}
//
// PG is merge source
//
{
std::shared_ptr<OSDMap> osdmap(new OSDMap());
osdmap->set_max_osd(10);
osdmap->set_state(osd_id, CEPH_OSD_EXISTS);
osdmap->set_epoch(epoch);
OSDMap::Incremental inc(epoch + 1);
inc.new_pools[pool_id].min_size = min_size;
inc.new_pools[pool_id].set_pg_num(pg_num - 1);
osdmap->apply_incremental(inc);
PastIntervals past_intervals;
ASSERT_TRUE(past_intervals.empty());
ASSERT_TRUE(PastIntervals::check_new_interval(old_primary,
new_primary,
old_acting,
new_acting,
old_up_primary,
new_up_primary,
old_up,
new_up,
same_interval_since,
last_epoch_clean,
osdmap,
lastmap,
pg_t(pg_num - 1, pool_id),
*recoverable,
&past_intervals));
}
//
// PG is pre-merge target
//
{
std::shared_ptr<OSDMap> osdmap(new OSDMap());
osdmap->set_max_osd(10);
osdmap->set_state(osd_id, CEPH_OSD_EXISTS);
osdmap->set_epoch(epoch);
OSDMap::Incremental inc(epoch + 1);
inc.new_pools[pool_id].min_size = min_size;
inc.new_pools[pool_id].set_pg_num_pending(pg_num - 1);
osdmap->apply_incremental(inc);
PastIntervals past_intervals;
ASSERT_TRUE(past_intervals.empty());
ASSERT_TRUE(PastIntervals::check_new_interval(old_primary,
new_primary,
old_acting,
new_acting,
old_up_primary,
new_up_primary,
old_up,
new_up,
same_interval_since,
last_epoch_clean,
osdmap,
lastmap,
pg_t(pg_num / 2 - 1, pool_id),
*recoverable,
&past_intervals));
}
//
// PG was pre-merge target
//
{
std::shared_ptr<OSDMap> osdmap(new OSDMap());
osdmap->set_max_osd(10);
osdmap->set_state(osd_id, CEPH_OSD_EXISTS);
osdmap->set_epoch(epoch);
OSDMap::Incremental inc(epoch + 1);
inc.new_pools[pool_id].min_size = min_size;
inc.new_pools[pool_id].set_pg_num_pending(pg_num - 1);
osdmap->apply_incremental(inc);
PastIntervals past_intervals;
ASSERT_TRUE(past_intervals.empty());
ASSERT_TRUE(PastIntervals::check_new_interval(old_primary,
new_primary,
old_acting,
new_acting,
old_up_primary,
new_up_primary,
old_up,
new_up,
same_interval_since,
last_epoch_clean,
lastmap, // reverse order!
osdmap,
pg_t(pg_num / 2 - 1, pool_id),
*recoverable,
&past_intervals));
}
//
// PG is merge target
//
{
std::shared_ptr<OSDMap> osdmap(new OSDMap());
osdmap->set_max_osd(10);
osdmap->set_state(osd_id, CEPH_OSD_EXISTS);
osdmap->set_epoch(epoch);
OSDMap::Incremental inc(epoch + 1);
inc.new_pools[pool_id].min_size = min_size;
inc.new_pools[pool_id].set_pg_num(pg_num - 1);
osdmap->apply_incremental(inc);
PastIntervals past_intervals;
ASSERT_TRUE(past_intervals.empty());
ASSERT_TRUE(PastIntervals::check_new_interval(old_primary,
new_primary,
old_acting,
new_acting,
old_up_primary,
new_up_primary,
old_up,
new_up,
same_interval_since,
last_epoch_clean,
osdmap,
lastmap,
pg_t(pg_num / 2 - 1, pool_id),
*recoverable,
&past_intervals));
}
//
// PG size has changed
//
{
std::shared_ptr<OSDMap> osdmap(new OSDMap());
osdmap->set_max_osd(10);
osdmap->set_state(osd_id, CEPH_OSD_EXISTS);
osdmap->set_epoch(epoch);
OSDMap::Incremental inc(epoch + 1);
__u8 new_min_size = min_size + 1;
inc.new_pools[pool_id].min_size = new_min_size;
inc.new_pools[pool_id].set_pg_num(pg_num);
osdmap->apply_incremental(inc);
PastIntervals past_intervals;
ASSERT_TRUE(past_intervals.empty());
ASSERT_TRUE(PastIntervals::check_new_interval(old_primary,
new_primary,
old_acting,
new_acting,
old_up_primary,
new_up_primary,
old_up,
new_up,
same_interval_since,
last_epoch_clean,
osdmap,
lastmap,
pgid,
*recoverable,
&past_intervals));
}
//
// The old acting set was empty : the previous interval could not
// have been rw
//
{
vector<int> old_acting;
PastIntervals past_intervals;
ostringstream out;
ASSERT_TRUE(past_intervals.empty());
ASSERT_TRUE(PastIntervals::check_new_interval(old_primary,
new_primary,
old_acting,
new_acting,
old_up_primary,
new_up_primary,
old_up,
new_up,
same_interval_since,
last_epoch_clean,
osdmap,
lastmap,
pgid,
*recoverable,
&past_intervals,
&out));
ASSERT_NE(string::npos, out.str().find("acting set is too small"));
}
//
// The old acting set did not have enough osd : it could
// not have been rw
//
{
vector<int> old_acting;
old_acting.push_back(osd_id);
//
// see http://tracker.ceph.com/issues/5780
// the size of the old acting set should be compared
// with the min_size of the old osdmap
//
// The new osdmap is created so that it triggers the
// bug.
//
std::shared_ptr<OSDMap> osdmap(new OSDMap());
osdmap->set_max_osd(10);
osdmap->set_state(osd_id, CEPH_OSD_EXISTS);
osdmap->set_epoch(epoch);
OSDMap::Incremental inc(epoch + 1);
__u8 new_min_size = old_acting.size();
inc.new_pools[pool_id].min_size = new_min_size;
inc.new_pools[pool_id].set_pg_num(pg_num);
osdmap->apply_incremental(inc);
ostringstream out;
PastIntervals past_intervals;
ASSERT_TRUE(past_intervals.empty());
ASSERT_TRUE(PastIntervals::check_new_interval(old_primary,
new_primary,
old_acting,
new_acting,
old_up_primary,
new_up_primary,
old_up,
new_up,
same_interval_since,
last_epoch_clean,
osdmap,
lastmap,
pgid,
*recoverable,
&past_intervals,
&out));
ASSERT_NE(string::npos, out.str().find("acting set is too small"));
}
//
// The acting set changes. The old acting set primary was up during the
// previous interval and may have been rw.
//
{
vector<int> new_acting;
new_acting.push_back(osd_id + 4);
new_acting.push_back(osd_id + 5);
ostringstream out;
PastIntervals past_intervals;
ASSERT_TRUE(past_intervals.empty());
ASSERT_TRUE(PastIntervals::check_new_interval(old_primary,
new_primary,
old_acting,
new_acting,
old_up_primary,
new_up_primary,
old_up,
new_up,
same_interval_since,
last_epoch_clean,
osdmap,
lastmap,
pgid,
*recoverable,
&past_intervals,
&out));
ASSERT_NE(string::npos, out.str().find("includes interval"));
}
//
// The acting set changes. The old acting set primary was not up
// during the old interval but last_epoch_clean is in the
// old interval and it may have been rw.
//
{
vector<int> new_acting;
new_acting.push_back(osd_id + 4);
new_acting.push_back(osd_id + 5);
std::shared_ptr<OSDMap> lastmap(new OSDMap());
lastmap->set_max_osd(10);
lastmap->set_state(osd_id, CEPH_OSD_EXISTS);
lastmap->set_epoch(epoch);
OSDMap::Incremental inc(epoch + 1);
inc.new_pools[pool_id].min_size = min_size;
inc.new_pools[pool_id].set_pg_num(pg_num);
inc.new_up_thru[osd_id] = epoch - 10;
lastmap->apply_incremental(inc);
ostringstream out;
PastIntervals past_intervals;
ASSERT_TRUE(past_intervals.empty());
ASSERT_TRUE(PastIntervals::check_new_interval(old_primary,
new_primary,
old_acting,
new_acting,
old_up_primary,
new_up_primary,
old_up,
new_up,
same_interval_since,
last_epoch_clean,
osdmap,
lastmap,
pgid,
*recoverable,
&past_intervals,
&out));
ASSERT_NE(string::npos, out.str().find("presumed to have been rw"));
}
//
// The acting set changes. The old acting set primary was not up
// during the old interval and last_epoch_clean is before the
// old interval : the previous interval could not possibly have
// been rw.
//
{
vector<int> new_acting;
new_acting.push_back(osd_id + 4);
new_acting.push_back(osd_id + 5);
epoch_t last_epoch_clean = epoch - 10;
std::shared_ptr<OSDMap> lastmap(new OSDMap());
lastmap->set_max_osd(10);
lastmap->set_state(osd_id, CEPH_OSD_EXISTS);
lastmap->set_epoch(epoch);
OSDMap::Incremental inc(epoch + 1);
inc.new_pools[pool_id].min_size = min_size;
inc.new_pools[pool_id].set_pg_num(pg_num);
inc.new_up_thru[osd_id] = last_epoch_clean;
lastmap->apply_incremental(inc);
ostringstream out;
PastIntervals past_intervals;
ASSERT_TRUE(past_intervals.empty());
ASSERT_TRUE(PastIntervals::check_new_interval(old_primary,
new_primary,
old_acting,
new_acting,
old_up_primary,
new_up_primary,
old_up,
new_up,
same_interval_since,
last_epoch_clean,
osdmap,
lastmap,
pgid,
*recoverable,
&past_intervals,
&out));
ASSERT_NE(string::npos, out.str().find("does not include interval"));
}
} // end for, didn't want to reindent
}
TEST(pg_t, get_ancestor)
{
ASSERT_EQ(pg_t(0, 0), pg_t(16, 0).get_ancestor(16));
ASSERT_EQ(pg_t(1, 0), pg_t(17, 0).get_ancestor(16));
ASSERT_EQ(pg_t(0, 0), pg_t(16, 0).get_ancestor(8));
ASSERT_EQ(pg_t(16, 0), pg_t(16, 0).get_ancestor(80));
ASSERT_EQ(pg_t(16, 0), pg_t(16, 0).get_ancestor(83));
ASSERT_EQ(pg_t(1, 0), pg_t(1321, 0).get_ancestor(123).get_ancestor(8));
ASSERT_EQ(pg_t(3, 0), pg_t(1323, 0).get_ancestor(123).get_ancestor(8));
ASSERT_EQ(pg_t(3, 0), pg_t(1323, 0).get_ancestor(8));
}
TEST(pg_t, split)
{
pg_t pgid(0, 0);
set<pg_t> s;
bool b;
s.clear();
b = pgid.is_split(1, 1, &s);
ASSERT_TRUE(!b);
s.clear();
b = pgid.is_split(2, 4, NULL);
ASSERT_TRUE(b);
b = pgid.is_split(2, 4, &s);
ASSERT_TRUE(b);
ASSERT_EQ(1u, s.size());
ASSERT_TRUE(s.count(pg_t(2, 0)));
s.clear();
b = pgid.is_split(2, 8, &s);
ASSERT_TRUE(b);
ASSERT_EQ(3u, s.size());
ASSERT_TRUE(s.count(pg_t(2, 0)));
ASSERT_TRUE(s.count(pg_t(4, 0)));
ASSERT_TRUE(s.count(pg_t(6, 0)));
s.clear();
b = pgid.is_split(3, 8, &s);
ASSERT_TRUE(b);
ASSERT_EQ(1u, s.size());
ASSERT_TRUE(s.count(pg_t(4, 0)));
s.clear();
b = pgid.is_split(6, 8, NULL);
ASSERT_TRUE(!b);
b = pgid.is_split(6, 8, &s);
ASSERT_TRUE(!b);
ASSERT_EQ(0u, s.size());
pgid = pg_t(1, 0);
s.clear();
b = pgid.is_split(2, 4, &s);
ASSERT_TRUE(b);
ASSERT_EQ(1u, s.size());
ASSERT_TRUE(s.count(pg_t(3, 0)));
s.clear();
b = pgid.is_split(2, 6, &s);
ASSERT_TRUE(b);
ASSERT_EQ(2u, s.size());
ASSERT_TRUE(s.count(pg_t(3, 0)));
ASSERT_TRUE(s.count(pg_t(5, 0)));
s.clear();
b = pgid.is_split(2, 8, &s);
ASSERT_TRUE(b);
ASSERT_EQ(3u, s.size());
ASSERT_TRUE(s.count(pg_t(3, 0)));
ASSERT_TRUE(s.count(pg_t(5, 0)));
ASSERT_TRUE(s.count(pg_t(7, 0)));
s.clear();
b = pgid.is_split(4, 8, &s);
ASSERT_TRUE(b);
ASSERT_EQ(1u, s.size());
ASSERT_TRUE(s.count(pg_t(5, 0)));
s.clear();
b = pgid.is_split(3, 8, &s);
ASSERT_TRUE(b);
ASSERT_EQ(3u, s.size());
ASSERT_TRUE(s.count(pg_t(3, 0)));
ASSERT_TRUE(s.count(pg_t(5, 0)));
ASSERT_TRUE(s.count(pg_t(7, 0)));
s.clear();
b = pgid.is_split(6, 8, &s);
ASSERT_TRUE(!b);
ASSERT_EQ(0u, s.size());
pgid = pg_t(3, 0);
s.clear();
b = pgid.is_split(7, 8, &s);
ASSERT_TRUE(b);
ASSERT_EQ(1u, s.size());
ASSERT_TRUE(s.count(pg_t(7, 0)));
s.clear();
b = pgid.is_split(7, 12, &s);
ASSERT_TRUE(b);
ASSERT_EQ(2u, s.size());
ASSERT_TRUE(s.count(pg_t(7, 0)));
ASSERT_TRUE(s.count(pg_t(11, 0)));
s.clear();
b = pgid.is_split(7, 11, &s);
ASSERT_TRUE(b);
ASSERT_EQ(1u, s.size());
ASSERT_TRUE(s.count(pg_t(7, 0)));
}
TEST(pg_t, merge)
{
pg_t pgid, parent;
bool b;
pgid = pg_t(7, 0);
b = pgid.is_merge_source(8, 7, &parent);
ASSERT_TRUE(b);
ASSERT_EQ(parent, pg_t(3, 0));
ASSERT_TRUE(parent.is_merge_target(8, 7));
b = pgid.is_merge_source(8, 5, &parent);
ASSERT_TRUE(b);
ASSERT_EQ(parent, pg_t(3, 0));
ASSERT_TRUE(parent.is_merge_target(8, 5));
b = pgid.is_merge_source(8, 4, &parent);
ASSERT_TRUE(b);
ASSERT_EQ(parent, pg_t(3, 0));
ASSERT_TRUE(parent.is_merge_target(8, 4));
b = pgid.is_merge_source(8, 3, &parent);
ASSERT_TRUE(b);
ASSERT_EQ(parent, pg_t(1, 0));
ASSERT_TRUE(parent.is_merge_target(8, 4));
b = pgid.is_merge_source(9, 8, &parent);
ASSERT_FALSE(b);
ASSERT_FALSE(parent.is_merge_target(9, 8));
}
TEST(ObjectCleanRegions, mark_data_region_dirty)
{
ObjectCleanRegions clean_regions;
uint64_t offset_1, len_1, offset_2, len_2;
offset_1 = 4096;
len_1 = 8192;
offset_2 = 40960;
len_2 = 4096;
interval_set<uint64_t> expect_dirty_region;
EXPECT_EQ(expect_dirty_region, clean_regions.get_dirty_regions());
expect_dirty_region.insert(offset_1, len_1);
expect_dirty_region.insert(offset_2, len_2);
clean_regions.mark_data_region_dirty(offset_1, len_1);
clean_regions.mark_data_region_dirty(offset_2, len_2);
EXPECT_EQ(expect_dirty_region, clean_regions.get_dirty_regions());
}
TEST(ObjectCleanRegions, mark_omap_dirty)
{
ObjectCleanRegions clean_regions;
EXPECT_FALSE(clean_regions.omap_is_dirty());
clean_regions.mark_omap_dirty();
EXPECT_TRUE(clean_regions.omap_is_dirty());
}
TEST(ObjectCleanRegions, merge)
{
ObjectCleanRegions cr1, cr2;
interval_set<uint64_t> cr1_expect;
interval_set<uint64_t> cr2_expect;
ASSERT_EQ(cr1_expect, cr1.get_dirty_regions());
ASSERT_EQ(cr2_expect, cr2.get_dirty_regions());
cr1.mark_data_region_dirty(4096, 4096);
cr1_expect.insert(4096, 4096);
ASSERT_EQ(cr1_expect, cr1.get_dirty_regions());
cr1.mark_data_region_dirty(12288, 8192);
cr1_expect.insert(12288, 8192);
ASSERT_TRUE(cr1_expect.subset_of(cr1.get_dirty_regions()));
cr1.mark_data_region_dirty(32768, 10240);
cr1_expect.insert(32768, 10240);
cr1_expect.erase(4096, 4096);
ASSERT_TRUE(cr1_expect.subset_of(cr1.get_dirty_regions()));
cr2.mark_data_region_dirty(20480, 12288);
cr2_expect.insert(20480, 12288);
ASSERT_EQ(cr2_expect, cr2.get_dirty_regions());
cr2.mark_data_region_dirty(102400, 4096);
cr2_expect.insert(102400, 4096);
cr2.mark_data_region_dirty(204800, 8192);
cr2_expect.insert(204800, 8192);
cr2.mark_data_region_dirty(409600, 4096);
cr2_expect.insert(409600, 4096);
ASSERT_TRUE(cr2_expect.subset_of(cr2.get_dirty_regions()));
ASSERT_FALSE(cr2.omap_is_dirty());
cr2.mark_omap_dirty();
ASSERT_FALSE(cr1.omap_is_dirty());
ASSERT_TRUE(cr2.omap_is_dirty());
cr1.merge(cr2);
cr1_expect.insert(204800, 8192);
ASSERT_TRUE(cr1_expect.subset_of(cr1.get_dirty_regions()));
ASSERT_TRUE(cr1.omap_is_dirty());
}
TEST(pg_missing_t, constructor)
{
pg_missing_t missing;
EXPECT_EQ((unsigned int)0, missing.num_missing());
EXPECT_FALSE(missing.have_missing());
}
TEST(pg_missing_t, have_missing)
{
hobject_t oid(object_t("objname"), "key", 123, 456, 0, "");
pg_missing_t missing;
EXPECT_FALSE(missing.have_missing());
missing.add(oid, eversion_t(), eversion_t(), false);
EXPECT_TRUE(missing.have_missing());
}
TEST(pg_missing_t, claim)
{
hobject_t oid(object_t("objname"), "key", 123, 456, 0, "");
pg_missing_t missing;
EXPECT_FALSE(missing.have_missing());
missing.add(oid, eversion_t(), eversion_t(), false);
EXPECT_TRUE(missing.have_missing());
pg_missing_t other;
EXPECT_FALSE(other.have_missing());
other.claim(std::move(missing));
EXPECT_TRUE(other.have_missing());
}
TEST(pg_missing_t, is_missing)
{
// pg_missing_t::is_missing(const hobject_t& oid) const
{
hobject_t oid(object_t("objname"), "key", 123, 456, 0, "");
pg_missing_t missing;
EXPECT_FALSE(missing.is_missing(oid));
missing.add(oid, eversion_t(), eversion_t(), false);
EXPECT_TRUE(missing.is_missing(oid));
}
// bool pg_missing_t::is_missing(const hobject_t& oid, eversion_t v) const
{
hobject_t oid(object_t("objname"), "key", 123, 456, 0, "");
pg_missing_t missing;
eversion_t need(10,5);
EXPECT_FALSE(missing.is_missing(oid, eversion_t()));
missing.add(oid, need, eversion_t(), false);
EXPECT_TRUE(missing.is_missing(oid));
EXPECT_FALSE(missing.is_missing(oid, eversion_t()));
EXPECT_TRUE(missing.is_missing(oid, need));
}
}
TEST(pg_missing_t, add_next_event)
{
hobject_t oid(object_t("objname"), "key", 123, 456, 0, "");
hobject_t oid_other(object_t("other"), "key", 9123, 9456, 0, "");
eversion_t version(10,5);
eversion_t prior_version(3,4);
pg_log_entry_t sample_e(pg_log_entry_t::DELETE, oid, version, prior_version,
0, osd_reqid_t(entity_name_t::CLIENT(777), 8, 999),
utime_t(8,9), 0);
// new object (MODIFY)
{
pg_missing_t missing;
pg_log_entry_t e = sample_e;
e.op = pg_log_entry_t::MODIFY;
e.prior_version = eversion_t();
EXPECT_TRUE(e.is_update());
EXPECT_TRUE(e.object_is_indexed());
EXPECT_TRUE(e.reqid_is_indexed());
EXPECT_FALSE(missing.is_missing(oid));
missing.add_next_event(e);
EXPECT_TRUE(missing.is_missing(oid));
EXPECT_EQ(eversion_t(), missing.get_items().at(oid).have);
EXPECT_EQ(oid, missing.get_rmissing().at(e.version.version));
EXPECT_EQ(1U, missing.num_missing());
EXPECT_EQ(1U, missing.get_rmissing().size());
// adding the same object replaces the previous one
missing.add_next_event(e);
EXPECT_TRUE(missing.is_missing(oid));
EXPECT_EQ(1U, missing.num_missing());
EXPECT_EQ(1U, missing.get_rmissing().size());
}
// new object (CLONE)
{
pg_missing_t missing;
pg_log_entry_t e = sample_e;
e.op = pg_log_entry_t::CLONE;
e.prior_version = eversion_t();
EXPECT_TRUE(e.is_clone());
EXPECT_TRUE(e.object_is_indexed());
EXPECT_FALSE(e.reqid_is_indexed());
EXPECT_FALSE(missing.is_missing(oid));
missing.add_next_event(e);
EXPECT_TRUE(missing.is_missing(oid));
EXPECT_EQ(eversion_t(), missing.get_items().at(oid).have);
EXPECT_EQ(oid, missing.get_rmissing().at(e.version.version));
EXPECT_EQ(1U, missing.num_missing());
EXPECT_EQ(1U, missing.get_rmissing().size());
// adding the same object replaces the previous one
missing.add_next_event(e);
EXPECT_TRUE(missing.is_missing(oid));
EXPECT_EQ(1U, missing.num_missing());
EXPECT_EQ(1U, missing.get_rmissing().size());
}
// existing object (MODIFY)
{
pg_missing_t missing;
pg_log_entry_t e = sample_e;
e.op = pg_log_entry_t::MODIFY;
e.prior_version = eversion_t();
EXPECT_TRUE(e.is_update());
EXPECT_TRUE(e.object_is_indexed());
EXPECT_TRUE(e.reqid_is_indexed());
EXPECT_FALSE(missing.is_missing(oid));
missing.add_next_event(e);
EXPECT_TRUE(missing.is_missing(oid));
EXPECT_EQ(eversion_t(), missing.get_items().at(oid).have);
EXPECT_EQ(oid, missing.get_rmissing().at(e.version.version));
EXPECT_EQ(1U, missing.num_missing());
EXPECT_EQ(1U, missing.get_rmissing().size());
// adding the same object with a different version
e.prior_version = prior_version;
missing.add_next_event(e);
EXPECT_EQ(eversion_t(), missing.get_items().at(oid).have);
EXPECT_TRUE(missing.is_missing(oid));
EXPECT_EQ(1U, missing.num_missing());
EXPECT_EQ(1U, missing.get_rmissing().size());
}
// object with prior version (MODIFY)
{
pg_missing_t missing;
pg_log_entry_t e = sample_e;
e.op = pg_log_entry_t::MODIFY;
EXPECT_TRUE(e.is_update());
EXPECT_TRUE(e.object_is_indexed());
EXPECT_TRUE(e.reqid_is_indexed());
EXPECT_FALSE(missing.is_missing(oid));
missing.add_next_event(e);
EXPECT_TRUE(missing.is_missing(oid));
EXPECT_EQ(prior_version, missing.get_items().at(oid).have);
EXPECT_EQ(version, missing.get_items().at(oid).need);
EXPECT_EQ(oid, missing.get_rmissing().at(e.version.version));
EXPECT_EQ(1U, missing.num_missing());
EXPECT_EQ(1U, missing.get_rmissing().size());
}
// adding a DELETE matching an existing event
{
pg_missing_t missing;
pg_log_entry_t e = sample_e;
e.op = pg_log_entry_t::MODIFY;
EXPECT_TRUE(e.is_update());
EXPECT_TRUE(e.object_is_indexed());
EXPECT_TRUE(e.reqid_is_indexed());
EXPECT_FALSE(missing.is_missing(oid));
missing.add_next_event(e);
EXPECT_TRUE(missing.is_missing(oid));
e.op = pg_log_entry_t::DELETE;
EXPECT_TRUE(e.is_delete());
missing.add_next_event(e);
EXPECT_TRUE(missing.is_missing(oid));
EXPECT_TRUE(missing.get_items().at(oid).is_delete());
EXPECT_EQ(prior_version, missing.get_items().at(oid).have);
EXPECT_EQ(version, missing.get_items().at(oid).need);
EXPECT_EQ(oid, missing.get_rmissing().at(e.version.version));
EXPECT_EQ(1U, missing.num_missing());
EXPECT_EQ(1U, missing.get_rmissing().size());
}
// adding a LOST_DELETE after an existing event
{
pg_missing_t missing;
pg_log_entry_t e = sample_e;
e.op = pg_log_entry_t::MODIFY;
EXPECT_TRUE(e.is_update());
EXPECT_TRUE(e.object_is_indexed());
EXPECT_TRUE(e.reqid_is_indexed());
EXPECT_FALSE(missing.is_missing(oid));
missing.add_next_event(e);
EXPECT_TRUE(missing.is_missing(oid));
EXPECT_FALSE(missing.get_items().at(oid).is_delete());
e.op = pg_log_entry_t::LOST_DELETE;
e.version.version++;
EXPECT_TRUE(e.is_delete());
missing.add_next_event(e);
EXPECT_TRUE(missing.is_missing(oid));
EXPECT_TRUE(missing.get_items().at(oid).is_delete());
EXPECT_EQ(prior_version, missing.get_items().at(oid).have);
EXPECT_EQ(e.version, missing.get_items().at(oid).need);
EXPECT_EQ(oid, missing.get_rmissing().at(e.version.version));
EXPECT_EQ(1U, missing.num_missing());
EXPECT_EQ(1U, missing.get_rmissing().size());
}
}
TEST(pg_missing_t, revise_need)
{
hobject_t oid(object_t("objname"), "key", 123, 456, 0, "");
pg_missing_t missing;
// create a new entry
EXPECT_FALSE(missing.is_missing(oid));
eversion_t need(10,10);
missing.revise_need(oid, need, false);
EXPECT_TRUE(missing.is_missing(oid));
EXPECT_EQ(eversion_t(), missing.get_items().at(oid).have);
EXPECT_EQ(need, missing.get_items().at(oid).need);
// update an existing entry and preserve have
eversion_t have(1,1);
missing.revise_have(oid, have);
eversion_t new_need(10,12);
EXPECT_EQ(have, missing.get_items().at(oid).have);
missing.revise_need(oid, new_need, false);
EXPECT_EQ(have, missing.get_items().at(oid).have);
EXPECT_EQ(new_need, missing.get_items().at(oid).need);
}
TEST(pg_missing_t, revise_have)
{
hobject_t oid(object_t("objname"), "key", 123, 456, 0, "");
pg_missing_t missing;
// a non existing entry means noop
EXPECT_FALSE(missing.is_missing(oid));
eversion_t have(1,1);
missing.revise_have(oid, have);
EXPECT_FALSE(missing.is_missing(oid));
// update an existing entry
eversion_t need(10,12);
missing.add(oid, need, have, false);
EXPECT_TRUE(missing.is_missing(oid));
eversion_t new_have(2,2);
EXPECT_EQ(have, missing.get_items().at(oid).have);
missing.revise_have(oid, new_have);
EXPECT_EQ(new_have, missing.get_items().at(oid).have);
EXPECT_EQ(need, missing.get_items().at(oid).need);
}
TEST(pg_missing_t, add)
{
hobject_t oid(object_t("objname"), "key", 123, 456, 0, "");
pg_missing_t missing;
EXPECT_FALSE(missing.is_missing(oid));
eversion_t have(1,1);
eversion_t need(10,10);
missing.add(oid, need, have, false);
EXPECT_TRUE(missing.is_missing(oid));
EXPECT_EQ(have, missing.get_items().at(oid).have);
EXPECT_EQ(need, missing.get_items().at(oid).need);
}
TEST(pg_missing_t, rm)
{
// void pg_missing_t::rm(const hobject_t& oid, eversion_t v)
{
hobject_t oid(object_t("objname"), "key", 123, 456, 0, "");
pg_missing_t missing;
EXPECT_FALSE(missing.is_missing(oid));
epoch_t epoch = 10;
eversion_t need(epoch,10);
missing.add(oid, need, eversion_t(), false);
EXPECT_TRUE(missing.is_missing(oid));
// rm of an older version is a noop
missing.rm(oid, eversion_t(epoch / 2,20));
EXPECT_TRUE(missing.is_missing(oid));
// rm of a later version removes the object
missing.rm(oid, eversion_t(epoch * 2,20));
EXPECT_FALSE(missing.is_missing(oid));
}
// void pg_missing_t::rm(const std::map<hobject_t, pg_missing_item>::iterator &m)
{
hobject_t oid(object_t("objname"), "key", 123, 456, 0, "");
pg_missing_t missing;
EXPECT_FALSE(missing.is_missing(oid));
missing.add(oid, eversion_t(), eversion_t(), false);
EXPECT_TRUE(missing.is_missing(oid));
auto m = missing.get_items().find(oid);
missing.rm(m);
EXPECT_FALSE(missing.is_missing(oid));
}
}
TEST(pg_missing_t, got)
{
// void pg_missing_t::got(const hobject_t& oid, eversion_t v)
{
hobject_t oid(object_t("objname"), "key", 123, 456, 0, "");
pg_missing_t missing;
// assert if the oid does not exist
{
PrCtl unset_dumpable;
EXPECT_DEATH(missing.got(oid, eversion_t()), "");
}
EXPECT_FALSE(missing.is_missing(oid));
epoch_t epoch = 10;
eversion_t need(epoch,10);
missing.add(oid, need, eversion_t(), false);
EXPECT_TRUE(missing.is_missing(oid));
// assert if that the version to be removed is lower than the version of the object
{
PrCtl unset_dumpable;
EXPECT_DEATH(missing.got(oid, eversion_t(epoch / 2,20)), "");
}
// remove of a later version removes the object
missing.got(oid, eversion_t(epoch * 2,20));
EXPECT_FALSE(missing.is_missing(oid));
}
// void pg_missing_t::got(const std::map<hobject_t, pg_missing_item>::iterator &m)
{
hobject_t oid(object_t("objname"), "key", 123, 456, 0, "");
pg_missing_t missing;
EXPECT_FALSE(missing.is_missing(oid));
missing.add(oid, eversion_t(), eversion_t(), false);
EXPECT_TRUE(missing.is_missing(oid));
auto m = missing.get_items().find(oid);
missing.got(m);
EXPECT_FALSE(missing.is_missing(oid));
}
}
TEST(pg_missing_t, split_into)
{
uint32_t hash1 = 1;
hobject_t oid1(object_t("objname"), "key1", 123, hash1, 0, "");
uint32_t hash2 = 2;
hobject_t oid2(object_t("objname"), "key2", 123, hash2, 0, "");
pg_missing_t missing;
missing.add(oid1, eversion_t(), eversion_t(), false);
missing.add(oid2, eversion_t(), eversion_t(), false);
pg_t child_pgid;
child_pgid.m_seed = 1;
pg_missing_t child;
unsigned split_bits = 1;
missing.split_into(child_pgid, split_bits, &child);
EXPECT_TRUE(child.is_missing(oid1));
EXPECT_FALSE(child.is_missing(oid2));
EXPECT_FALSE(missing.is_missing(oid1));
EXPECT_TRUE(missing.is_missing(oid2));
}
TEST(pg_pool_t_test, get_pg_num_divisor) {
pg_pool_t p;
p.set_pg_num(16);
p.set_pgp_num(16);
for (int i = 0; i < 16; ++i)
ASSERT_EQ(16u, p.get_pg_num_divisor(pg_t(i, 1)));
p.set_pg_num(12);
p.set_pgp_num(12);
ASSERT_EQ(16u, p.get_pg_num_divisor(pg_t(0, 1)));
ASSERT_EQ(16u, p.get_pg_num_divisor(pg_t(1, 1)));
ASSERT_EQ(16u, p.get_pg_num_divisor(pg_t(2, 1)));
ASSERT_EQ(16u, p.get_pg_num_divisor(pg_t(3, 1)));
ASSERT_EQ(8u, p.get_pg_num_divisor(pg_t(4, 1)));
ASSERT_EQ(8u, p.get_pg_num_divisor(pg_t(5, 1)));
ASSERT_EQ(8u, p.get_pg_num_divisor(pg_t(6, 1)));
ASSERT_EQ(8u, p.get_pg_num_divisor(pg_t(7, 1)));
ASSERT_EQ(16u, p.get_pg_num_divisor(pg_t(8, 1)));
ASSERT_EQ(16u, p.get_pg_num_divisor(pg_t(9, 1)));
ASSERT_EQ(16u, p.get_pg_num_divisor(pg_t(10, 1)));
ASSERT_EQ(16u, p.get_pg_num_divisor(pg_t(11, 1)));
}
TEST(pg_pool_t_test, get_random_pg_position) {
srand(getpid());
for (int i = 0; i < 100; ++i) {
pg_pool_t p;
p.set_pg_num(1 + (rand() % 1000));
p.set_pgp_num(p.get_pg_num());
pg_t pgid(rand() % p.get_pg_num(), 1);
uint32_t h = p.get_random_pg_position(pgid, rand());
uint32_t ps = p.raw_hash_to_pg(h);
cout << p.get_pg_num() << " " << pgid << ": "
<< h << " -> " << pg_t(ps, 1) << std::endl;
ASSERT_EQ(pgid.ps(), ps);
}
}
TEST(shard_id_t, iostream) {
set<shard_id_t> shards;
shards.insert(shard_id_t(0));
shards.insert(shard_id_t(1));
shards.insert(shard_id_t(2));
ostringstream out;
out << shards;
ASSERT_EQ(out.str(), "0,1,2");
shard_id_t noshard = shard_id_t::NO_SHARD;
shard_id_t zero(0);
ASSERT_GT(zero, noshard);
}
TEST(spg_t, parse) {
spg_t a(pg_t(1,2), shard_id_t::NO_SHARD);
spg_t aa, bb;
spg_t b(pg_t(3,2), shard_id_t(2));
std::string s = stringify(a);
ASSERT_TRUE(aa.parse(s.c_str()));
ASSERT_EQ(a, aa);
s = stringify(b);
ASSERT_TRUE(bb.parse(s.c_str()));
ASSERT_EQ(b, bb);
}
TEST(coll_t, parse) {
const char *ok[] = {
"meta",
"1.2_head",
"1.2_TEMP",
"1.2s3_head",
"1.3s2_TEMP",
"1.2s0_head",
0
};
const char *bad[] = {
"foo",
"1.2_food",
"1.2_head ",
//" 1.2_head", // hrm, this parses, which is not ideal.. pg_t's fault?
"1.2_temp",
"1.2_HEAD",
"1.xS3_HEAD",
"1.2s_HEAD",
"1.2sfoo_HEAD",
0
};
coll_t a;
for (int i = 0; ok[i]; ++i) {
cout << "check ok " << ok[i] << std::endl;
ASSERT_TRUE(a.parse(ok[i]));
ASSERT_EQ(string(ok[i]), a.to_str());
}
for (int i = 0; bad[i]; ++i) {
cout << "check bad " << bad[i] << std::endl;
ASSERT_FALSE(a.parse(bad[i]));
}
}
TEST(coll_t, temp) {
spg_t pgid;
coll_t foo(pgid);
ASSERT_EQ(foo.to_str(), string("0.0_head"));
coll_t temp = foo.get_temp();
ASSERT_EQ(temp.to_str(), string("0.0_TEMP"));
spg_t pgid2;
ASSERT_TRUE(temp.is_temp());
ASSERT_TRUE(temp.is_temp(&pgid2));
ASSERT_EQ(pgid, pgid2);
}
TEST(coll_t, assigment) {
spg_t pgid;
coll_t right(pgid);
ASSERT_EQ(right.to_str(), string("0.0_head"));
coll_t left, middle;
ASSERT_EQ(left.to_str(), string("meta"));
ASSERT_EQ(middle.to_str(), string("meta"));
left = middle = right;
ASSERT_EQ(left.to_str(), string("0.0_head"));
ASSERT_EQ(middle.to_str(), string("0.0_head"));
ASSERT_NE(middle.c_str(), right.c_str());
ASSERT_NE(left.c_str(), middle.c_str());
}
TEST(hobject_t, parse) {
const char *v[] = {
"MIN",
"MAX",
"-1:60c2fa6d:::inc_osdmap.1:0",
"-1:60c2fa6d:::inc_osdmap.1:333",
"0:00000000::::head",
"1:00000000:nspace:key:obj:head",
"-40:00000000:nspace::obj:head",
"20:00000000::key:obj:head",
"20:00000000:::o%fdj:head",
"20:00000000:::o%02fdj:head",
"20:00000000:::_zero_%00_:head",
NULL
};
for (unsigned i=0; v[i]; ++i) {
hobject_t o;
bool b = o.parse(v[i]);
if (!b) {
cout << "failed to parse " << v[i] << std::endl;
ASSERT_TRUE(false);
}
string s = stringify(o);
if (s != v[i]) {
cout << v[i] << " -> " << o << " -> " << s << std::endl;
ASSERT_EQ(s, string(v[i]));
}
}
}
TEST(ghobject_t, cmp) {
ghobject_t min;
ghobject_t sep;
sep.set_shard(shard_id_t(1));
sep.hobj.pool = -1;
cout << min << " < " << sep << std::endl;
ASSERT_TRUE(min < sep);
sep.set_shard(shard_id_t::NO_SHARD);
cout << "sep shard " << sep.shard_id << std::endl;
ghobject_t o(hobject_t(object_t(), string(), CEPH_NOSNAP, 0x42,
1, string()));
cout << "o " << o << std::endl;
ASSERT_TRUE(o > sep);
}
TEST(ghobject_t, parse) {
const char *v[] = {
"GHMIN",
"GHMAX",
"13#0:00000000::::head#",
"13#0:00000000::::head#deadbeef",
"#-1:60c2fa6d:::inc_osdmap.1:333#deadbeef",
"#-1:60c2fa6d:::inc%02osdmap.1:333#deadbeef",
"#-1:60c2fa6d:::inc_osdmap.1:333#",
"1#MIN#deadbeefff",
"1#MAX#",
"#MAX#123",
"#-40:00000000:nspace::obj:head#",
NULL
};
for (unsigned i=0; v[i]; ++i) {
ghobject_t o;
bool b = o.parse(v[i]);
if (!b) {
cout << "failed to parse " << v[i] << std::endl;
ASSERT_TRUE(false);
}
string s = stringify(o);
if (s != v[i]) {
cout << v[i] << " -> " << o << " -> " << s << std::endl;
ASSERT_EQ(s, string(v[i]));
}
}
}
TEST(pool_opts_t, invalid_opt) {
EXPECT_FALSE(pool_opts_t::is_opt_name("INVALID_OPT"));
PrCtl unset_dumpable;
EXPECT_DEATH(pool_opts_t::get_opt_desc("INVALID_OPT"), "");
}
TEST(pool_opts_t, scrub_min_interval) {
EXPECT_TRUE(pool_opts_t::is_opt_name("scrub_min_interval"));
EXPECT_EQ(pool_opts_t::get_opt_desc("scrub_min_interval"),
pool_opts_t::opt_desc_t(pool_opts_t::SCRUB_MIN_INTERVAL,
pool_opts_t::DOUBLE));
pool_opts_t opts;
EXPECT_FALSE(opts.is_set(pool_opts_t::SCRUB_MIN_INTERVAL));
{
PrCtl unset_dumpable;
EXPECT_DEATH(opts.get(pool_opts_t::SCRUB_MIN_INTERVAL), "");
}
double val;
EXPECT_FALSE(opts.get(pool_opts_t::SCRUB_MIN_INTERVAL, &val));
opts.set(pool_opts_t::SCRUB_MIN_INTERVAL, static_cast<double>(2015));
EXPECT_TRUE(opts.get(pool_opts_t::SCRUB_MIN_INTERVAL, &val));
EXPECT_EQ(val, 2015);
opts.unset(pool_opts_t::SCRUB_MIN_INTERVAL);
EXPECT_FALSE(opts.is_set(pool_opts_t::SCRUB_MIN_INTERVAL));
}
TEST(pool_opts_t, scrub_max_interval) {
EXPECT_TRUE(pool_opts_t::is_opt_name("scrub_max_interval"));
EXPECT_EQ(pool_opts_t::get_opt_desc("scrub_max_interval"),
pool_opts_t::opt_desc_t(pool_opts_t::SCRUB_MAX_INTERVAL,
pool_opts_t::DOUBLE));
pool_opts_t opts;
EXPECT_FALSE(opts.is_set(pool_opts_t::SCRUB_MAX_INTERVAL));
{
PrCtl unset_dumpable;
EXPECT_DEATH(opts.get(pool_opts_t::SCRUB_MAX_INTERVAL), "");
}
double val;
EXPECT_FALSE(opts.get(pool_opts_t::SCRUB_MAX_INTERVAL, &val));
opts.set(pool_opts_t::SCRUB_MAX_INTERVAL, static_cast<double>(2015));
EXPECT_TRUE(opts.get(pool_opts_t::SCRUB_MAX_INTERVAL, &val));
EXPECT_EQ(val, 2015);
opts.unset(pool_opts_t::SCRUB_MAX_INTERVAL);
EXPECT_FALSE(opts.is_set(pool_opts_t::SCRUB_MAX_INTERVAL));
}
TEST(pool_opts_t, deep_scrub_interval) {
EXPECT_TRUE(pool_opts_t::is_opt_name("deep_scrub_interval"));
EXPECT_EQ(pool_opts_t::get_opt_desc("deep_scrub_interval"),
pool_opts_t::opt_desc_t(pool_opts_t::DEEP_SCRUB_INTERVAL,
pool_opts_t::DOUBLE));
pool_opts_t opts;
EXPECT_FALSE(opts.is_set(pool_opts_t::DEEP_SCRUB_INTERVAL));
{
PrCtl unset_dumpable;
EXPECT_DEATH(opts.get(pool_opts_t::DEEP_SCRUB_INTERVAL), "");
}
double val;
EXPECT_FALSE(opts.get(pool_opts_t::DEEP_SCRUB_INTERVAL, &val));
opts.set(pool_opts_t::DEEP_SCRUB_INTERVAL, static_cast<double>(2015));
EXPECT_TRUE(opts.get(pool_opts_t::DEEP_SCRUB_INTERVAL, &val));
EXPECT_EQ(val, 2015);
opts.unset(pool_opts_t::DEEP_SCRUB_INTERVAL);
EXPECT_FALSE(opts.is_set(pool_opts_t::DEEP_SCRUB_INTERVAL));
}
struct RequiredPredicate : IsPGRecoverablePredicate {
unsigned required_size;
explicit RequiredPredicate(unsigned required_size) : required_size(required_size) {}
bool operator()(const set<pg_shard_t> &have) const override {
return have.size() >= required_size;
}
};
using namespace std;
struct MapPredicate {
map<int, pair<PastIntervals::osd_state_t, epoch_t>> states;
explicit MapPredicate(
const vector<pair<int, pair<PastIntervals::osd_state_t, epoch_t>>> &_states)
: states(_states.begin(), _states.end()) {}
PastIntervals::osd_state_t operator()(epoch_t start, int osd, epoch_t *lost_at) {
auto val = states.at(osd);
if (lost_at)
*lost_at = val.second;
return val.first;
}
};
using sit = shard_id_t;
using PI = PastIntervals;
using pst = pg_shard_t;
using ival = PastIntervals::pg_interval_t;
using ivallst = std::list<ival>;
const int N = 0x7fffffff /* CRUSH_ITEM_NONE, can't import crush.h here */;
struct PITest : ::testing::Test {
PITest() {}
void run(
bool ec_pool,
ivallst intervals,
epoch_t last_epoch_started,
unsigned min_to_peer,
vector<pair<int, pair<PastIntervals::osd_state_t, epoch_t>>> osd_states,
vector<int> up,
vector<int> acting,
set<pg_shard_t> probe,
set<int> down,
map<int, epoch_t> blocked_by,
bool pg_down) {
RequiredPredicate rec_pred(min_to_peer);
MapPredicate map_pred(osd_states);
PI::PriorSet correct(
ec_pool,
probe,
down,
blocked_by,
pg_down,
new RequiredPredicate(rec_pred));
PastIntervals compact;
for (auto &&i: intervals) {
compact.add_interval(ec_pool, i);
}
PI::PriorSet compact_ps = compact.get_prior_set(
ec_pool,
last_epoch_started,
new RequiredPredicate(rec_pred),
map_pred,
up,
acting,
nullptr);
ASSERT_EQ(correct, compact_ps);
}
};
TEST_F(PITest, past_intervals_rep) {
run(
/* ec_pool */ false,
/* intervals */
{ ival{{0, 1, 2}, {0, 1, 2}, 10, 20, true, 0, 0}
, ival{{ 1, 2}, { 1, 2}, 21, 30, true, 1, 1}
, ival{{ 2}, { 2}, 31, 35, false, 2, 2}
, ival{{0, 2}, {0, 2}, 36, 50, true, 0, 0}
},
/* les */ 5,
/* min_peer */ 1,
/* osd states at end */
{ make_pair(0, make_pair(PI::UP , 0))
, make_pair(1, make_pair(PI::UP , 0))
, make_pair(2, make_pair(PI::DOWN , 0))
},
/* acting */ {0, 1 },
/* up */ {0, 1 },
/* probe */ {pst(0), pst(1)},
/* down */ {2},
/* blocked_by */ {},
/* pg_down */ false);
}
TEST_F(PITest, past_intervals_ec) {
run(
/* ec_pool */ true,
/* intervals */
{ ival{{0, 1, 2}, {0, 1, 2}, 10, 20, true, 0, 0}
, ival{{N, 1, 2}, {N, 1, 2}, 21, 30, true, 1, 1}
},
/* les */ 5,
/* min_peer */ 2,
/* osd states at end */
{ make_pair(0, make_pair(PI::DOWN , 0))
, make_pair(1, make_pair(PI::UP , 0))
, make_pair(2, make_pair(PI::UP , 0))
},
/* acting */ {N, 1, 2},
/* up */ {N, 1, 2},
/* probe */ {pst(1, sit(1)), pst(2, sit(2))},
/* down */ {0},
/* blocked_by */ {},
/* pg_down */ false);
}
TEST_F(PITest, past_intervals_rep_down) {
run(
/* ec_pool */ false,
/* intervals */
{ ival{{0, 1, 2}, {0, 1, 2}, 10, 20, true, 0, 0}
, ival{{ 1, 2}, { 1, 2}, 21, 30, true, 1, 1}
, ival{{ 2}, { 2}, 31, 35, true, 2, 2}
, ival{{0, 2}, {0, 2}, 36, 50, true, 0, 0}
},
/* les */ 5,
/* min_peer */ 1,
/* osd states at end */
{ make_pair(0, make_pair(PI::UP , 0))
, make_pair(1, make_pair(PI::UP , 0))
, make_pair(2, make_pair(PI::DOWN , 0))
},
/* acting */ {0, 1 },
/* up */ {0, 1 },
/* probe */ {pst(0), pst(1)},
/* down */ {2},
/* blocked_by */ {{2, 0}},
/* pg_down */ true);
}
TEST_F(PITest, past_intervals_ec_down) {
run(
/* ec_pool */ true,
/* intervals */
{ ival{{0, 1, 2}, {0, 1, 2}, 10, 20, true, 0, 0}
, ival{{N, 1, 2}, {N, 1, 2}, 21, 30, true, 1, 1}
, ival{{N, N, 2}, {N, N, 2}, 31, 35, false, 2, 2}
},
/* les */ 5,
/* min_peer */ 2,
/* osd states at end */
{ make_pair(0, make_pair(PI::UP , 0))
, make_pair(1, make_pair(PI::DOWN , 0))
, make_pair(2, make_pair(PI::UP , 0))
},
/* acting */ {0, N, 2},
/* up */ {0, N, 2},
/* probe */ {pst(0, sit(0)), pst(2, sit(2))},
/* down */ {1},
/* blocked_by */ {{1, 0}},
/* pg_down */ true);
}
TEST_F(PITest, past_intervals_rep_no_subsets) {
run(
/* ec_pool */ false,
/* intervals */
{ ival{{0, 2}, {0, 2}, 10, 20, true, 0, 0}
, ival{{ 1, 2}, { 1, 2}, 21, 30, true, 1, 1}
, ival{{0, 1 }, {0, 1 }, 31, 35, true, 0, 0}
},
/* les */ 5,
/* min_peer */ 1,
/* osd states at end */
{ make_pair(0, make_pair(PI::UP , 0))
, make_pair(1, make_pair(PI::UP , 0))
, make_pair(2, make_pair(PI::DOWN , 0))
},
/* acting */ {0, 1 },
/* up */ {0, 1 },
/* probe */ {pst(0), pst(1)},
/* down */ {2},
/* blocked_by */ {},
/* pg_down */ false);
}
TEST_F(PITest, past_intervals_ec_no_subsets) {
run(
/* ec_pool */ true,
/* intervals */
{ ival{{0, N, 2}, {0, N, 2}, 10, 20, true, 0, 0}
, ival{{N, 1, 2}, {N, 1, 2}, 21, 30, true, 1, 1}
, ival{{0, 1, N}, {0, 1, N}, 31, 35, true, 0, 0}
},
/* les */ 5,
/* min_peer */ 2,
/* osd states at end */
{ make_pair(0, make_pair(PI::UP , 0))
, make_pair(1, make_pair(PI::DOWN , 0))
, make_pair(2, make_pair(PI::UP , 0))
},
/* acting */ {0, N, 2},
/* up */ {0, N, 2},
/* probe */ {pst(0, sit(0)), pst(2, sit(2))},
/* down */ {1},
/* blocked_by */ {{1, 0}},
/* pg_down */ true);
}
TEST_F(PITest, past_intervals_ec_no_subsets2) {
run(
/* ec_pool */ true,
/* intervals */
{ ival{{N, 1, 2}, {N, 1, 2}, 10, 20, true, 0, 0}
, ival{{0, N, 2}, {0, N, 2}, 21, 30, true, 1, 1}
, ival{{0, 3, N}, {0, 3, N}, 31, 35, true, 0, 0}
},
/* les */ 31,
/* min_peer */ 2,
/* osd states at end */
{ make_pair(0, make_pair(PI::UP , 0))
, make_pair(1, make_pair(PI::DOWN , 0))
, make_pair(2, make_pair(PI::UP , 0))
, make_pair(3, make_pair(PI::UP , 0))
},
/* acting */ {0, N, 2},
/* up */ {0, N, 2},
/* probe */ {pst(0, sit(0)), pst(2, sit(2)), pst(3, sit(1))},
/* down */ {1},
/* blocked_by */ {},
/* pg_down */ false);
}
TEST_F(PITest, past_intervals_rep_lost) {
run(
/* ec_pool */ false,
/* intervals */
{ ival{{0, 1, 2}, {0, 1, 2}, 10, 20, true, 0, 0}
, ival{{ 1, 2}, { 1, 2}, 21, 30, true, 1, 1}
, ival{{ 2}, { 2}, 31, 35, true, 2, 2}
, ival{{0, 2}, {0, 2}, 36, 50, true, 0, 0}
},
/* les */ 5,
/* min_peer */ 1,
/* osd states at end */
{ make_pair(0, make_pair(PI::UP , 0))
, make_pair(1, make_pair(PI::UP , 0))
, make_pair(2, make_pair(PI::LOST , 55))
},
/* acting */ {0, 1 },
/* up */ {0, 1 },
/* probe */ {pst(0), pst(1)},
/* down */ {2},
/* blocked_by */ {},
/* pg_down */ false);
}
TEST_F(PITest, past_intervals_ec_lost) {
run(
/* ec_pool */ true,
/* intervals */
{ ival{{0, N, 2}, {0, N, 2}, 10, 20, true, 0, 0}
, ival{{N, 1, 2}, {N, 1, 2}, 21, 30, true, 1, 1}
, ival{{0, 1, N}, {0, 1, N}, 31, 35, true, 0, 0}
},
/* les */ 5,
/* min_peer */ 2,
/* osd states at end */
{ make_pair(0, make_pair(PI::UP , 0))
, make_pair(1, make_pair(PI::LOST , 36))
, make_pair(2, make_pair(PI::UP , 0))
},
/* acting */ {0, N, 2},
/* up */ {0, N, 2},
/* probe */ {pst(0, sit(0)), pst(2, sit(2))},
/* down */ {1},
/* blocked_by */ {},
/* pg_down */ false);
}
void ci_ref_test(
object_manifest_t l,
object_manifest_t to_remove,
object_manifest_t g,
object_ref_delta_t expected_delta)
{
{
object_ref_delta_t delta;
to_remove.calc_refs_to_drop_on_removal(
&l,
&g,
delta);
ASSERT_EQ(
expected_delta,
delta);
}
// calc_refs_to_drop specifically handles nullptr identically to empty
// chunk_map
if (l.chunk_map.empty() || g.chunk_map.empty()) {
object_ref_delta_t delta;
to_remove.calc_refs_to_drop_on_removal(
l.chunk_map.empty() ? nullptr : &l,
g.chunk_map.empty() ? nullptr : &g,
delta);
ASSERT_EQ(
expected_delta,
delta);
}
}
void ci_ref_test_on_modify(
object_manifest_t l,
object_manifest_t to_remove,
ObjectCleanRegions clean_regions,
object_ref_delta_t expected_delta)
{
{
object_ref_delta_t delta;
to_remove.calc_refs_to_drop_on_modify(
&l,
clean_regions,
delta);
ASSERT_EQ(
expected_delta,
delta);
}
}
void ci_ref_test_inc_on_set(
object_manifest_t l,
object_manifest_t added_set,
object_manifest_t g,
object_ref_delta_t expected_delta)
{
{
object_ref_delta_t delta;
added_set.calc_refs_to_inc_on_set(
&l,
&g,
delta);
ASSERT_EQ(
expected_delta,
delta);
}
}
hobject_t mk_hobject(string name)
{
return hobject_t(
std::move(name),
string(),
CEPH_NOSNAP,
0x42,
1,
string());
}
object_manifest_t mk_manifest(
std::map<uint64_t, std::tuple<uint64_t, uint64_t, string>> m)
{
object_manifest_t ret;
ret.type = object_manifest_t::TYPE_CHUNKED;
for (auto &[offset, tgt] : m) {
auto &[tgt_off, length, name] = tgt;
auto &ci = ret.chunk_map[offset];
ci.offset = tgt_off;
ci.length = length;
ci.oid = mk_hobject(name);
}
return ret;
}
object_ref_delta_t mk_delta(std::map<string, int> _m) {
std::map<hobject_t, int> m;
for (auto &[name, delta] : _m) {
m.insert(
std::make_pair(
mk_hobject(name),
delta));
}
return object_ref_delta_t(std::move(m));
}
TEST(chunk_info_test, calc_refs_to_drop) {
ci_ref_test(
mk_manifest({}),
mk_manifest({{0, {0, 1024, "foo"}}}),
mk_manifest({}),
mk_delta({{"foo", -1}}));
}
TEST(chunk_info_test, calc_refs_to_drop_match) {
ci_ref_test(
mk_manifest({{0, {0, 1024, "foo"}}}),
mk_manifest({{0, {0, 1024, "foo"}}}),
mk_manifest({{0, {0, 1024, "foo"}}}),
mk_delta({}));
}
TEST(chunk_info_test, calc_refs_to_drop_head_match) {
ci_ref_test(
mk_manifest({}),
mk_manifest({{0, {0, 1024, "foo"}}}),
mk_manifest({{0, {0, 1024, "foo"}}}),
mk_delta({}));
}
TEST(chunk_info_test, calc_refs_to_drop_tail_match) {
ci_ref_test(
mk_manifest({{0, {0, 1024, "foo"}}}),
mk_manifest({{0, {0, 1024, "foo"}}}),
mk_manifest({}),
mk_delta({}));
}
TEST(chunk_info_test, calc_refs_to_drop_second_reference) {
ci_ref_test(
mk_manifest({{0, {0, 1024, "foo"}}}),
mk_manifest({{0, {0, 1024, "foo"}}, {4<<10, {0, 1<<10, "foo"}}}),
mk_manifest({}),
mk_delta({{"foo", -1}}));
}
TEST(chunk_info_test, calc_refs_offsets_dont_match) {
ci_ref_test(
mk_manifest({{0, {0, 1024, "foo"}}}),
mk_manifest({{512, {0, 1024, "foo"}}, {(4<<10) + 512, {0, 1<<10, "foo"}}}),
mk_manifest({}),
mk_delta({{"foo", -2}}));
}
TEST(chunk_info_test, calc_refs_g_l_match) {
ci_ref_test(
mk_manifest({{4096, {0, 1024, "foo"}}}),
mk_manifest({{0, {0, 1024, "foo"}}, {4096, {0, 1024, "bar"}}}),
mk_manifest({{4096, {0, 1024, "foo"}}}),
mk_delta({{"foo", -2}, {"bar", -1}}));
}
TEST(chunk_info_test, calc_refs_g_l_match_no_this) {
ci_ref_test(
mk_manifest({{4096, {0, 1024, "foo"}}}),
mk_manifest({{0, {0, 1024, "bar"}}}),
mk_manifest({{4096, {0, 1024, "foo"}}}),
mk_delta({{"foo", -1}, {"bar", -1}}));
}
TEST(chunk_info_test, calc_refs_modify_mismatch) {
ObjectCleanRegions clean_regions(0, 8192, false);
clean_regions.mark_data_region_dirty(0, 1024);
clean_regions.mark_data_region_dirty(512, 1024);
ci_ref_test_on_modify(
mk_manifest({{512, {2048, 1024, "foo"}}, {4096, {0, 1024, "foo"}}}),
mk_manifest({{0, {0, 1024, "bar"}}, {512, {2048, 1024, "ttt"}}}),
clean_regions,
mk_delta({{"bar", -1}, {"ttt", -1}}));
}
TEST(chunk_info_test, calc_refs_modify_match) {
ObjectCleanRegions clean_regions(0, 8192, false);
clean_regions.mark_data_region_dirty(0, 1024);
clean_regions.mark_data_region_dirty(512, 1024);
clean_regions.mark_data_region_dirty(4096, 1024);
ci_ref_test_on_modify(
mk_manifest({{512, {2048, 1024, "foo"}}, {4096, {0, 1024, "ttt"}}}),
mk_manifest({{0, {0, 1024, "bar"}}, {512, {2048, 1024, "foo"}}, {4096, {0, 1024, "ttt"}}}),
clean_regions,
mk_delta({{"bar", -1}}));
}
TEST(chunk_info_test, calc_refs_modify_match_dirty_overlap) {
ObjectCleanRegions clean_regions(0, 8192, false);
clean_regions.mark_data_region_dirty(0, 256);
clean_regions.mark_data_region_dirty(256, 4096);
ci_ref_test_on_modify(
mk_manifest({}),
mk_manifest({{0, {0, 256, "bar"}}, {512, {2048, 1024, "foo"}}, {4096, {0, 1024, "ttt"}}}),
clean_regions,
mk_delta({{"bar", -1}, {"foo", -1}, {"ttt", -1}}));
}
TEST(chunk_info_test, calc_refs_modify_match_dirty_overlap2) {
ObjectCleanRegions clean_regions(0, 8192, false);
clean_regions.mark_data_region_dirty(0, 256);
clean_regions.mark_data_region_dirty(256, 1024);
clean_regions.mark_data_region_dirty(3584, 1024);
ci_ref_test_on_modify(
mk_manifest({{512, {2048, 1024, "foo"}}, {4096, {0, 1024, "ttt"}}}),
mk_manifest({{0, {0, 256, "bar"}}, {512, {2048, 1024, "foo"}}, {4096, {0, 1024, "ttt"}}}),
clean_regions,
mk_delta({{"bar", -1}}));
}
TEST(chunk_info_test, calc_refs_modify_match_dirty_overlap3) {
ObjectCleanRegions clean_regions(0, 8192, false);
clean_regions.mark_data_region_dirty(0, 256);
clean_regions.mark_data_region_dirty(256, 4096);
ci_ref_test_on_modify(
mk_manifest({{512, {2048, 1024, "foo"}}, {4096, {0, 1024, "ttt"}}}),
mk_manifest({{0, {0, 256, "bar"}}, {512, {2048, 1024, "foo"}}, {4096, {0, 1024, "ttt"}}}),
clean_regions,
mk_delta({{"bar", -1}}));
}
TEST(chunk_info_test, calc_refs_modify_match_clone_overlap) {
ObjectCleanRegions clean_regions(0, 8192, false);
clean_regions.mark_data_region_dirty(0, 256);
clean_regions.mark_data_region_dirty(256, 1024);
clean_regions.mark_data_region_dirty(3584, 1024);
ci_ref_test_on_modify(
mk_manifest({{512, {2048, 1024, "foo"}}, {4096, {0, 1024, "ttt"}}}),
mk_manifest({{0, {0, 256, "bar"}}, {256, {2048, 1024, "foo"}}, {3584, {0, 1024, "ttt"}}}),
clean_regions,
mk_delta({{"bar", -1}, {"foo", -1}, {"ttt", -1}}));
}
TEST(chunk_info_test, calc_refs_modify_no_snap) {
ObjectCleanRegions clean_regions(0, 8192, false);
clean_regions.mark_data_region_dirty(0, 1024);
clean_regions.mark_data_region_dirty(512, 1024);
ci_ref_test_on_modify(
mk_manifest({}),
mk_manifest({{0, {0, 1024, "bar"}}, {512, {2048, 1024, "ttt"}}}),
clean_regions,
mk_delta({{"bar", -1}, {"ttt", -1}}));
}
TEST(chunk_info_test, calc_refs_inc) {
ci_ref_test_inc_on_set(
mk_manifest({{256, {0, 256, "aaa"}}, {4096, {0, 1024, "foo"}}}),
mk_manifest({{1024, {0, 1024, "bar"}}}),
mk_manifest({{4096, {0, 1024, "foo"}}}),
mk_delta({{"bar", 1}}));
}
TEST(chunk_info_test, calc_refs_inc2) {
ci_ref_test_inc_on_set(
mk_manifest({{512, {0, 1024, "aaa"}}, {4096, {0, 1024, "foo"}}}),
mk_manifest({{1024, {0, 1024, "bar"}}, {4096, {0, 1024, "bbb"}}}),
mk_manifest({{512, {0, 1024, "foo"}}}),
mk_delta({{"bar", 1}, {"bbb", 1}}));
}
TEST(chunk_info_test, calc_refs_inc_no_l) {
ci_ref_test_inc_on_set(
mk_manifest({}),
mk_manifest({{1024, {0, 1024, "bar"}}, {4096, {0, 1024, "bbb"}}}),
mk_manifest({{512, {0, 1024, "foo"}}}),
mk_delta({{"bar", 1}, {"bbb", 1}}));
}
TEST(chunk_info_test, calc_refs_inc_no_g) {
ci_ref_test_inc_on_set(
mk_manifest({{512, {0, 1024, "aaa"}}, {4096, {0, 1024, "foo"}}}),
mk_manifest({{1024, {0, 1024, "bar"}}, {4096, {0, 1024, "foo"}}}),
mk_manifest({}),
mk_delta({{"bar", 1}}));
}
TEST(chunk_info_test, calc_refs_inc_match_g_l) {
ci_ref_test_inc_on_set(
mk_manifest({{256, {0, 256, "aaa"}}, {4096, {0, 1024, "foo"}}}),
mk_manifest({{256, {0, 256, "aaa"}}, {4096, {0, 1024, "foo"}}}),
mk_manifest({{256, {0, 256, "aaa"}}, {4096, {0, 1024, "foo"}}}),
mk_delta({{"aaa", -1}, {"foo", -1}}));
}
TEST(chunk_info_test, calc_refs_inc_match) {
ci_ref_test_inc_on_set(
mk_manifest({{256, {0, 256, "bbb"}}, {4096, {0, 1024, "foo"}}}),
mk_manifest({{256, {0, 256, "aaa"}}, {4096, {0, 1024, "foo"}}}),
mk_manifest({{256, {0, 256, "aaa"}}, {4096, {0, 1024, "ccc"}}}),
mk_delta({}));
}
/*
* Local Variables:
* compile-command: "cd ../.. ;
* make unittest_osd_types ;
* ./unittest_osd_types # --gtest_filter=pg_missing_t.constructor
* "
* End:
*/
| 63,090 | 27.612698 | 95 |
cc
|
null |
ceph-main/src/test/osdc/FakeWriteback.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include <errno.h>
#include <time.h>
#include <thread>
#include "common/debug.h"
#include "common/Cond.h"
#include "common/Finisher.h"
#include "common/ceph_mutex.h"
#include "include/ceph_assert.h"
#include "common/ceph_time.h"
#include "FakeWriteback.h"
#define dout_subsys ceph_subsys_objectcacher
#undef dout_prefix
#define dout_prefix *_dout << "FakeWriteback(" << this << ") "
class C_Delay : public Context {
CephContext *m_cct;
Context *m_con;
ceph::timespan m_delay;
ceph::mutex *m_lock;
bufferlist *m_bl;
uint64_t m_off;
public:
C_Delay(CephContext *cct, Context *c, ceph::mutex *lock, uint64_t off,
bufferlist *pbl, uint64_t delay_ns=0)
: m_cct(cct), m_con(c), m_delay(delay_ns * std::chrono::nanoseconds(1)),
m_lock(lock), m_bl(pbl), m_off(off) {}
void finish(int r) override {
std::this_thread::sleep_for(m_delay);
if (m_bl) {
buffer::ptr bp(r);
bp.zero();
m_bl->append(bp);
ldout(m_cct, 20) << "finished read " << m_off << "~" << r << dendl;
}
std::lock_guard locker{*m_lock};
m_con->complete(r);
}
};
FakeWriteback::FakeWriteback(CephContext *cct, ceph::mutex *lock, uint64_t delay_ns)
: m_cct(cct), m_lock(lock), m_delay_ns(delay_ns)
{
m_finisher = new Finisher(cct);
m_finisher->start();
}
FakeWriteback::~FakeWriteback()
{
m_finisher->stop();
delete m_finisher;
}
void FakeWriteback::read(const object_t& oid, uint64_t object_no,
const object_locator_t& oloc,
uint64_t off, uint64_t len, snapid_t snapid,
bufferlist *pbl, uint64_t trunc_size,
__u32 trunc_seq, int op_flags,
const ZTracer::Trace &parent_trace,
Context *onfinish)
{
C_Delay *wrapper = new C_Delay(m_cct, onfinish, m_lock, off, pbl,
m_delay_ns);
m_finisher->queue(wrapper, len);
}
ceph_tid_t FakeWriteback::write(const object_t& oid,
const object_locator_t& oloc,
uint64_t off, uint64_t len,
const SnapContext& snapc,
const bufferlist &bl, ceph::real_time mtime,
uint64_t trunc_size, __u32 trunc_seq,
ceph_tid_t journal_tid,
const ZTracer::Trace &parent_trace,
Context *oncommit)
{
C_Delay *wrapper = new C_Delay(m_cct, oncommit, m_lock, off, NULL,
m_delay_ns);
m_finisher->queue(wrapper, 0);
return ++m_tid;
}
bool FakeWriteback::may_copy_on_write(const object_t&, uint64_t, uint64_t,
snapid_t)
{
return false;
}
| 2,576 | 26.414894 | 84 |
cc
|
null |
ceph-main/src/test/osdc/FakeWriteback.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_TEST_OSDC_FAKEWRITEBACK_H
#define CEPH_TEST_OSDC_FAKEWRITEBACK_H
#include "include/Context.h"
#include "include/types.h"
#include "osd/osd_types.h"
#include "osdc/WritebackHandler.h"
#include <atomic>
class Finisher;
class FakeWriteback : public WritebackHandler {
public:
FakeWriteback(CephContext *cct, ceph::mutex *lock, uint64_t delay_ns);
~FakeWriteback() override;
void read(const object_t& oid, uint64_t object_no,
const object_locator_t& oloc, uint64_t off, uint64_t len,
snapid_t snapid, bufferlist *pbl, uint64_t trunc_size,
__u32 trunc_seq, int op_flags,
const ZTracer::Trace &parent_trace,
Context *onfinish) override;
ceph_tid_t write(const object_t& oid, const object_locator_t& oloc,
uint64_t off, uint64_t len,
const SnapContext& snapc, const bufferlist &bl,
ceph::real_time mtime, uint64_t trunc_size,
__u32 trunc_seq, ceph_tid_t journal_tid,
const ZTracer::Trace &parent_trace,
Context *oncommit) override;
using WritebackHandler::write;
bool may_copy_on_write(const object_t&, uint64_t, uint64_t,
snapid_t) override;
private:
CephContext *m_cct;
ceph::mutex *m_lock;
uint64_t m_delay_ns;
std::atomic<unsigned> m_tid = { 0 };
Finisher *m_finisher;
};
#endif
| 1,424 | 28.6875 | 72 |
h
|
null |
ceph-main/src/test/osdc/MemWriteback.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include <errno.h>
#include <time.h>
#include <thread>
#include "common/debug.h"
#include "common/Cond.h"
#include "common/Finisher.h"
#include "common/ceph_mutex.h"
#include "include/ceph_assert.h"
#include "common/ceph_time.h"
#include "MemWriteback.h"
#define dout_context g_ceph_context
#define dout_subsys ceph_subsys_objectcacher
#undef dout_prefix
#define dout_prefix *_dout << "MemWriteback(" << this << ") "
class C_DelayRead : public Context {
MemWriteback *wb;
CephContext *m_cct;
Context *m_con;
ceph::timespan m_delay;
ceph::mutex *m_lock;
object_t m_oid;
uint64_t m_off;
uint64_t m_len;
bufferlist *m_bl;
public:
C_DelayRead(MemWriteback *mwb, CephContext *cct, Context *c, ceph::mutex *lock,
const object_t& oid, uint64_t off, uint64_t len, bufferlist *pbl,
uint64_t delay_ns=0)
: wb(mwb), m_cct(cct), m_con(c),
m_delay(delay_ns * std::chrono::nanoseconds(1)),
m_lock(lock), m_oid(oid), m_off(off), m_len(len), m_bl(pbl) {}
void finish(int r) override {
std::this_thread::sleep_for(m_delay);
std::lock_guard locker{*m_lock};
r = wb->read_object_data(m_oid, m_off, m_len, m_bl);
if (m_con)
m_con->complete(r);
}
};
class C_DelayWrite : public Context {
MemWriteback *wb;
CephContext *m_cct;
Context *m_con;
ceph::timespan m_delay;
ceph::mutex *m_lock;
object_t m_oid;
uint64_t m_off;
uint64_t m_len;
const bufferlist& m_bl;
public:
C_DelayWrite(MemWriteback *mwb, CephContext *cct, Context *c, ceph::mutex *lock,
const object_t& oid, uint64_t off, uint64_t len,
const bufferlist& bl, uint64_t delay_ns=0)
: wb(mwb), m_cct(cct), m_con(c),
m_delay(delay_ns * std::chrono::nanoseconds(1)),
m_lock(lock), m_oid(oid), m_off(off), m_len(len), m_bl(bl) {}
void finish(int r) override {
std::this_thread::sleep_for(m_delay);
std::lock_guard locker{*m_lock};
wb->write_object_data(m_oid, m_off, m_len, m_bl);
if (m_con)
m_con->complete(r);
}
};
MemWriteback::MemWriteback(CephContext *cct, ceph::mutex *lock, uint64_t delay_ns)
: m_cct(cct), m_lock(lock), m_delay_ns(delay_ns)
{
m_finisher = new Finisher(cct);
m_finisher->start();
}
MemWriteback::~MemWriteback()
{
m_finisher->stop();
delete m_finisher;
}
void MemWriteback::read(const object_t& oid, uint64_t object_no,
const object_locator_t& oloc,
uint64_t off, uint64_t len, snapid_t snapid,
bufferlist *pbl, uint64_t trunc_size,
__u32 trunc_seq, int op_flags,
const ZTracer::Trace &parent_trace,
Context *onfinish)
{
ceph_assert(snapid == CEPH_NOSNAP);
C_DelayRead *wrapper = new C_DelayRead(this, m_cct, onfinish, m_lock, oid,
off, len, pbl, m_delay_ns);
m_finisher->queue(wrapper, len);
}
ceph_tid_t MemWriteback::write(const object_t& oid,
const object_locator_t& oloc,
uint64_t off, uint64_t len,
const SnapContext& snapc,
const bufferlist &bl, ceph::real_time mtime,
uint64_t trunc_size, __u32 trunc_seq,
ceph_tid_t journal_tid,
const ZTracer::Trace &parent_trace,
Context *oncommit)
{
ceph_assert(snapc.seq == 0);
C_DelayWrite *wrapper = new C_DelayWrite(this, m_cct, oncommit, m_lock, oid,
off, len, bl, m_delay_ns);
m_finisher->queue(wrapper, 0);
return ++m_tid;
}
void MemWriteback::write_object_data(const object_t& oid, uint64_t off, uint64_t len,
const bufferlist& data_bl)
{
dout(1) << "writing " << oid << " " << off << "~" << len << dendl;
ceph_assert(len == data_bl.length());
bufferlist& obj_bl = object_data[oid];
bufferlist new_obj_bl;
// ensure size, or set it if new object
if (off + len > obj_bl.length()) {
obj_bl.append_zero(off + len - obj_bl.length());
}
// beginning
new_obj_bl.substr_of(obj_bl, 0, off);
// overwritten bit
new_obj_bl.append(data_bl);
// tail bit
bufferlist tmp;
tmp.substr_of(obj_bl, off+len, obj_bl.length()-(off+len));
new_obj_bl.append(tmp);
obj_bl.swap(new_obj_bl);
dout(1) << oid << " final size " << obj_bl.length() << dendl;
}
int MemWriteback::read_object_data(const object_t& oid, uint64_t off, uint64_t len,
bufferlist *data_bl)
{
dout(1) << "reading " << oid << " " << off << "~" << len << dendl;
auto obj_i = object_data.find(oid);
if (obj_i == object_data.end()) {
dout(1) << oid << "DNE!" << dendl;
return -ENOENT;
}
const bufferlist& obj_bl = obj_i->second;
dout(1) << "reading " << oid << " from total size " << obj_bl.length() << dendl;
uint64_t read_len = std::min(len, obj_bl.length()-off);
data_bl->substr_of(obj_bl, off, read_len);
return 0;
}
bool MemWriteback::may_copy_on_write(const object_t&, uint64_t, uint64_t,
snapid_t)
{
return false;
}
| 4,928 | 28.51497 | 85 |
cc
|
null |
ceph-main/src/test/osdc/MemWriteback.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_TEST_OSDC_MEMWRITEBACK_H
#define CEPH_TEST_OSDC_MEMWRITEBACK_H
#include "include/Context.h"
#include "include/types.h"
#include "osd/osd_types.h"
#include "osdc/WritebackHandler.h"
#include <atomic>
class Finisher;
class MemWriteback : public WritebackHandler {
public:
MemWriteback(CephContext *cct, ceph::mutex *lock, uint64_t delay_ns);
~MemWriteback() override;
void read(const object_t& oid, uint64_t object_no,
const object_locator_t& oloc, uint64_t off, uint64_t len,
snapid_t snapid, bufferlist *pbl, uint64_t trunc_size,
__u32 trunc_seq, int op_flags,
const ZTracer::Trace &parent_trace,
Context *onfinish) override;
ceph_tid_t write(const object_t& oid, const object_locator_t& oloc,
uint64_t off, uint64_t len,
const SnapContext& snapc, const bufferlist &bl,
ceph::real_time mtime, uint64_t trunc_size,
__u32 trunc_seq, ceph_tid_t journal_tid,
const ZTracer::Trace &parent_trace,
Context *oncommit) override;
using WritebackHandler::write;
bool may_copy_on_write(const object_t&, uint64_t, uint64_t,
snapid_t) override;
void write_object_data(const object_t& oid, uint64_t off, uint64_t len,
const bufferlist& data_bl);
int read_object_data(const object_t& oid, uint64_t off, uint64_t len,
bufferlist *data_bl);
private:
std::map<object_t, bufferlist> object_data;
CephContext *m_cct;
ceph::mutex *m_lock;
uint64_t m_delay_ns;
std::atomic<unsigned> m_tid = { 0 };
Finisher *m_finisher;
};
#endif
| 1,688 | 30.867925 | 73 |
h
|
null |
ceph-main/src/test/osdc/object_cacher_stress.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include <cstdlib>
#include <ctime>
#include <sstream>
#include <string>
#include <vector>
#include <boost/scoped_ptr.hpp>
#include "common/ceph_argparse.h"
#include "common/ceph_mutex.h"
#include "common/common_init.h"
#include "common/config.h"
#include "common/snap_types.h"
#include "global/global_init.h"
#include "include/buffer.h"
#include "include/Context.h"
#include "include/stringify.h"
#include "osdc/ObjectCacher.h"
#include "FakeWriteback.h"
#include "MemWriteback.h"
#include <atomic>
using namespace std;
// XXX: Only tests default namespace
struct op_data {
op_data(const std::string &oid, uint64_t offset, uint64_t len, bool read)
: extent(oid, 0, offset, len, 0), is_read(read)
{
extent.oloc.pool = 0;
extent.buffer_extents.push_back(make_pair(0, len));
}
ObjectExtent extent;
bool is_read;
ceph::bufferlist result;
std::atomic<unsigned> done = { 0 };
};
class C_Count : public Context {
op_data *m_op;
std::atomic<unsigned> *m_outstanding = nullptr;
public:
C_Count(op_data *op, std::atomic<unsigned> *outstanding)
: m_op(op), m_outstanding(outstanding) {}
void finish(int r) override {
m_op->done++;
ceph_assert(*m_outstanding > 0);
(*m_outstanding)--;
}
};
int stress_test(uint64_t num_ops, uint64_t num_objs,
uint64_t max_obj_size, uint64_t delay_ns,
uint64_t max_op_len, float percent_reads)
{
ceph::mutex lock = ceph::make_mutex("object_cacher_stress::object_cacher");
FakeWriteback writeback(g_ceph_context, &lock, delay_ns);
ObjectCacher obc(g_ceph_context, "test", writeback, lock, NULL, NULL,
g_conf()->client_oc_size,
g_conf()->client_oc_max_objects,
g_conf()->client_oc_max_dirty,
g_conf()->client_oc_target_dirty,
g_conf()->client_oc_max_dirty_age,
true);
obc.start();
std::atomic<unsigned> outstanding_reads = { 0 };
vector<std::shared_ptr<op_data> > ops;
ObjectCacher::ObjectSet object_set(NULL, 0, 0);
SnapContext snapc;
ceph::buffer::ptr bp(max_op_len);
ceph::bufferlist bl;
uint64_t journal_tid = 0;
bp.zero();
bl.append(bp);
// schedule ops
std::cout << "Test configuration:\n\n"
<< setw(10) << "ops: " << num_ops << "\n"
<< setw(10) << "objects: " << num_objs << "\n"
<< setw(10) << "obj size: " << max_obj_size << "\n"
<< setw(10) << "delay: " << delay_ns << "\n"
<< setw(10) << "max op len: " << max_op_len << "\n"
<< setw(10) << "percent reads: " << percent_reads << "\n\n";
for (uint64_t i = 0; i < num_ops; ++i) {
uint64_t offset = random() % max_obj_size;
uint64_t max_len = std::min(max_obj_size - offset, max_op_len);
// no zero-length operations
uint64_t length = random() % (std::max<uint64_t>(max_len - 1, 1)) + 1;
std::string oid = "test" + stringify(random() % num_objs);
bool is_read = random() < percent_reads * float(RAND_MAX);
std::shared_ptr<op_data> op(new op_data(oid, offset, length, is_read));
ops.push_back(op);
std::cout << "op " << i << " " << (is_read ? "read" : "write")
<< " " << op->extent << "\n";
if (op->is_read) {
ObjectCacher::OSDRead *rd = obc.prepare_read(CEPH_NOSNAP, &op->result, 0);
rd->extents.push_back(op->extent);
outstanding_reads++;
Context *completion = new C_Count(op.get(), &outstanding_reads);
lock.lock();
int r = obc.readx(rd, &object_set, completion);
lock.unlock();
ceph_assert(r >= 0);
if ((uint64_t)r == length)
completion->complete(r);
else
ceph_assert(r == 0);
} else {
ObjectCacher::OSDWrite *wr = obc.prepare_write(snapc, bl,
ceph::real_time::min(), 0,
++journal_tid);
wr->extents.push_back(op->extent);
lock.lock();
obc.writex(wr, &object_set, NULL);
lock.unlock();
}
}
// check that all reads completed
for (uint64_t i = 0; i < num_ops; ++i) {
if (!ops[i]->is_read)
continue;
std::cout << "waiting for read " << i << ops[i]->extent << std::endl;
uint64_t done = 0;
while (done == 0) {
done = ops[i]->done;
if (!done) {
usleep(500);
}
}
if (done > 1) {
std::cout << "completion called more than once!\n" << std::endl;
return EXIT_FAILURE;
}
}
lock.lock();
obc.release_set(&object_set);
lock.unlock();
int r = 0;
ceph::mutex mylock = ceph::make_mutex("librbd::ImageCtx::flush_cache");
ceph::condition_variable cond;
bool done;
Context *onfinish = new C_SafeCond(mylock, cond, &done, &r);
lock.lock();
bool already_flushed = obc.flush_set(&object_set, onfinish);
std::cout << "already flushed = " << already_flushed << std::endl;
lock.unlock();
{
std::unique_lock locker{mylock};
cond.wait(locker, [&done] { return done; });
}
lock.lock();
bool unclean = obc.release_set(&object_set);
lock.unlock();
if (unclean) {
std::cout << "unclean buffers left over!" << std::endl;
return EXIT_FAILURE;
}
obc.stop();
std::cout << "Test completed successfully." << std::endl;
return EXIT_SUCCESS;
}
int correctness_test(uint64_t delay_ns)
{
std::cerr << "starting correctness test" << std::endl;
ceph::mutex lock = ceph::make_mutex("object_cacher_stress::object_cacher");
MemWriteback writeback(g_ceph_context, &lock, delay_ns);
ObjectCacher obc(g_ceph_context, "test", writeback, lock, NULL, NULL,
1<<21, // max cache size, 2MB
1, // max objects, just one
1<<18, // max dirty, 256KB
1<<17, // target dirty, 128KB
g_conf()->client_oc_max_dirty_age,
true);
obc.start();
std::cerr << "just start()ed ObjectCacher" << std::endl;
SnapContext snapc;
ceph_tid_t journal_tid = 0;
std::string oid("correctness_test_obj");
ObjectCacher::ObjectSet object_set(NULL, 0, 0);
ceph::bufferlist zeroes_bl;
zeroes_bl.append_zero(1<<20);
// set up a 4MB all-zero object
std::cerr << "writing 4x1MB object" << std::endl;
std::map<int, C_SaferCond> create_finishers;
for (int i = 0; i < 4; ++i) {
ObjectCacher::OSDWrite *wr = obc.prepare_write(snapc, zeroes_bl,
ceph::real_time::min(), 0,
++journal_tid);
ObjectExtent extent(oid, 0, zeroes_bl.length()*i, zeroes_bl.length(), 0);
extent.oloc.pool = 0;
extent.buffer_extents.push_back(make_pair(0, 1<<20));
wr->extents.push_back(extent);
lock.lock();
obc.writex(wr, &object_set, &create_finishers[i]);
lock.unlock();
}
// write some 1-valued bits at 256-KB intervals for checking consistency
std::cerr << "Writing some 0xff values" << std::endl;
ceph::buffer::ptr ones(1<<16);
memset(ones.c_str(), 0xff, ones.length());
ceph::bufferlist ones_bl;
ones_bl.append(ones);
for (int i = 1<<18; i < 1<<22; i+=1<<18) {
ObjectCacher::OSDWrite *wr = obc.prepare_write(snapc, ones_bl,
ceph::real_time::min(), 0,
++journal_tid);
ObjectExtent extent(oid, 0, i, ones_bl.length(), 0);
extent.oloc.pool = 0;
extent.buffer_extents.push_back(make_pair(0, 1<<16));
wr->extents.push_back(extent);
lock.lock();
obc.writex(wr, &object_set, &create_finishers[i]);
lock.unlock();
}
for (auto i = create_finishers.begin(); i != create_finishers.end(); ++i) {
i->second.wait();
}
std::cout << "Finished setting up object" << std::endl;
lock.lock();
C_SaferCond flushcond;
bool done = obc.flush_all(&flushcond);
if (!done) {
std::cout << "Waiting for flush" << std::endl;
lock.unlock();
flushcond.wait();
lock.lock();
}
lock.unlock();
/* now read the back half of the object in, check consistency,
*/
std::cout << "Reading back half of object (1<<21~1<<21)" << std::endl;
bufferlist readbl;
C_SaferCond backreadcond;
ObjectCacher::OSDRead *back_half_rd = obc.prepare_read(CEPH_NOSNAP, &readbl, 0);
ObjectExtent back_half_extent(oid, 0, 1<<21, 1<<21, 0);
back_half_extent.oloc.pool = 0;
back_half_extent.buffer_extents.push_back(make_pair(0, 1<<21));
back_half_rd->extents.push_back(back_half_extent);
lock.lock();
int r = obc.readx(back_half_rd, &object_set, &backreadcond);
lock.unlock();
ceph_assert(r >= 0);
if (r == 0) {
std::cout << "Waiting to read data into cache" << std::endl;
r = backreadcond.wait();
}
ceph_assert(r == 1<<21);
/* Read the whole object in,
* verify we have to wait for it to complete,
* overwrite a small piece, (http://tracker.ceph.com/issues/16002),
* and check consistency */
readbl.clear();
std::cout<< "Reading whole object (0~1<<22)" << std::endl;
C_SaferCond frontreadcond;
ObjectCacher::OSDRead *whole_rd = obc.prepare_read(CEPH_NOSNAP, &readbl, 0);
ObjectExtent whole_extent(oid, 0, 0, 1<<22, 0);
whole_extent.oloc.pool = 0;
whole_extent.buffer_extents.push_back(make_pair(0, 1<<22));
whole_rd->extents.push_back(whole_extent);
lock.lock();
r = obc.readx(whole_rd, &object_set, &frontreadcond);
// we cleared out the cache by reading back half, it shouldn't pass immediately!
ceph_assert(r == 0);
std::cout << "Data (correctly) not available without fetching" << std::endl;
ObjectCacher::OSDWrite *verify_wr = obc.prepare_write(snapc, ones_bl,
ceph::real_time::min(), 0,
++journal_tid);
ObjectExtent verify_extent(oid, 0, (1<<18)+(1<<16), ones_bl.length(), 0);
verify_extent.oloc.pool = 0;
verify_extent.buffer_extents.push_back(make_pair(0, 1<<16));
verify_wr->extents.push_back(verify_extent);
C_SaferCond verify_finisher;
obc.writex(verify_wr, &object_set, &verify_finisher);
lock.unlock();
std::cout << "wrote dirtying data" << std::endl;
std::cout << "Waiting to read data into cache" << std::endl;
frontreadcond.wait();
verify_finisher.wait();
std::cout << "Validating data" << std::endl;
for (int i = 1<<18; i < 1<<22; i+=1<<18) {
bufferlist ones_maybe;
ones_maybe.substr_of(readbl, i, ones_bl.length());
ceph_assert(0 == memcmp(ones_maybe.c_str(), ones_bl.c_str(), ones_bl.length()));
}
bufferlist ones_maybe;
ones_maybe.substr_of(readbl, (1<<18)+(1<<16), ones_bl.length());
ceph_assert(0 == memcmp(ones_maybe.c_str(), ones_bl.c_str(), ones_bl.length()));
std::cout << "validated that data is 0xff where it should be" << std::endl;
lock.lock();
C_SaferCond flushcond2;
done = obc.flush_all(&flushcond2);
if (!done) {
std::cout << "Waiting for final write flush" << std::endl;
lock.unlock();
flushcond2.wait();
lock.lock();
}
bool unclean = obc.release_set(&object_set);
if (unclean) {
std::cout << "unclean buffers left over!" << std::endl;
vector<ObjectExtent> discard_extents;
int i = 0;
for (auto oi = object_set.objects.begin(); !oi.end(); ++oi) {
discard_extents.emplace_back(oid, i++, 0, 1<<22, 0);
}
obc.discard_set(&object_set, discard_extents);
lock.unlock();
obc.stop();
goto fail;
}
lock.unlock();
obc.stop();
std::cout << "Testing ObjectCacher correctness complete" << std::endl;
return EXIT_SUCCESS;
fail:
return EXIT_FAILURE;
}
int main(int argc, const char **argv)
{
auto args = argv_to_vec(argc, argv);
auto cct = global_init(nullptr, args, CEPH_ENTITY_TYPE_CLIENT,
CODE_ENVIRONMENT_UTILITY,
CINIT_FLAG_NO_DEFAULT_CONFIG_FILE);
long long delay_ns = 0;
long long num_ops = 1000;
long long obj_bytes = 4 << 20;
long long max_len = 128 << 10;
long long num_objs = 10;
float percent_reads = 0.90;
int seed = time(0) % 100000;
bool stress = false;
bool correctness = false;
std::ostringstream err;
std::vector<const char*>::iterator i;
for (i = args.begin(); i != args.end();) {
if (ceph_argparse_witharg(args, i, &delay_ns, err, "--delay-ns", (char*)NULL)) {
if (!err.str().empty()) {
cerr << argv[0] << ": " << err.str() << std::endl;
return EXIT_FAILURE;
}
} else if (ceph_argparse_witharg(args, i, &num_ops, err, "--ops", (char*)NULL)) {
if (!err.str().empty()) {
cerr << argv[0] << ": " << err.str() << std::endl;
return EXIT_FAILURE;
}
} else if (ceph_argparse_witharg(args, i, &num_objs, err, "--objects", (char*)NULL)) {
if (!err.str().empty()) {
cerr << argv[0] << ": " << err.str() << std::endl;
return EXIT_FAILURE;
}
} else if (ceph_argparse_witharg(args, i, &obj_bytes, err, "--obj-size", (char*)NULL)) {
if (!err.str().empty()) {
cerr << argv[0] << ": " << err.str() << std::endl;
return EXIT_FAILURE;
}
} else if (ceph_argparse_witharg(args, i, &max_len, err, "--max-op-size", (char*)NULL)) {
if (!err.str().empty()) {
cerr << argv[0] << ": " << err.str() << std::endl;
return EXIT_FAILURE;
}
} else if (ceph_argparse_witharg(args, i, &percent_reads, err, "--percent-read", (char*)NULL)) {
if (!err.str().empty()) {
cerr << argv[0] << ": " << err.str() << std::endl;
return EXIT_FAILURE;
}
} else if (ceph_argparse_witharg(args, i, &seed, err, "--seed", (char*)NULL)) {
if (!err.str().empty()) {
cerr << argv[0] << ": " << err.str() << std::endl;
return EXIT_FAILURE;
}
} else if (ceph_argparse_flag(args, i, "--stress-test", NULL)) {
stress = true;
} else if (ceph_argparse_flag(args, i, "--correctness-test", NULL)) {
correctness = true;
} else {
cerr << "unknown option " << *i << std::endl;
return EXIT_FAILURE;
}
}
if (stress) {
srandom(seed);
return stress_test(num_ops, num_objs, obj_bytes, delay_ns, max_len, percent_reads);
}
if (correctness) {
return correctness_test(delay_ns);
}
}
| 13,613 | 30.957746 | 100 |
cc
|
null |
ceph-main/src/test/rbd_mirror/random_write.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "common/ceph_argparse.h"
#include "common/config.h"
#include "common/debug.h"
#include "common/errno.h"
#include "common/Cond.h"
#include "include/rados/librados.hpp"
#include "include/rbd/librbd.hpp"
#include "global/global_init.h"
#include <string>
#include <vector>
#define dout_context g_ceph_context
#define dout_subsys ceph_subsys_rbd_mirror
#undef dout_prefix
#define dout_prefix *_dout << "random-write: "
namespace {
const uint32_t NUM_THREADS = 8;
const uint32_t MAX_IO_SIZE = 24576;
const uint32_t MIN_IO_SIZE = 4;
void usage() {
std::cout << "usage: ceph_test_rbd_mirror_random_write [options...] \\" << std::endl;
std::cout << " <pool> <image>" << std::endl;
std::cout << std::endl;
std::cout << " pool image pool" << std::endl;
std::cout << " image image to write" << std::endl;
std::cout << std::endl;
std::cout << "options:\n";
std::cout << " -m monaddress[:port] connect to specified monitor\n";
std::cout << " --keyring=<path> path to keyring for local cluster\n";
std::cout << " --log-file=<logfile> file to log debug output\n";
std::cout << " --debug-rbd-mirror=<log-level>/<memory-level> set rbd-mirror debug level\n";
generic_server_usage();
}
void rbd_bencher_completion(void *c, void *pc);
struct rbd_bencher {
librbd::Image *image;
ceph::mutex lock = ceph::make_mutex("rbd_bencher::lock");
ceph::condition_variable cond;
int in_flight;
explicit rbd_bencher(librbd::Image *i)
: image(i),
in_flight(0) {
}
bool start_write(int max, uint64_t off, uint64_t len, bufferlist& bl,
int op_flags) {
{
std::lock_guard l{lock};
if (in_flight >= max)
return false;
in_flight++;
}
librbd::RBD::AioCompletion *c =
new librbd::RBD::AioCompletion((void *)this, rbd_bencher_completion);
image->aio_write2(off, len, bl, c, op_flags);
//cout << "start " << c << " at " << off << "~" << len << std::endl;
return true;
}
void wait_for(int max) {
using namespace std::chrono_literals;
std::unique_lock l{lock};
while (in_flight > max) {
cond.wait_for(l, 200ms);
}
}
};
void rbd_bencher_completion(void *vc, void *pc) {
librbd::RBD::AioCompletion *c = (librbd::RBD::AioCompletion *)vc;
rbd_bencher *b = static_cast<rbd_bencher *>(pc);
//cout << "complete " << c << std::endl;
int ret = c->get_return_value();
if (ret != 0) {
std::cout << "write error: " << cpp_strerror(ret) << std::endl;
exit(ret < 0 ? -ret : ret);
}
b->lock.lock();
b->in_flight--;
b->cond.notify_all();
b->lock.unlock();
c->release();
}
void write_image(librbd::Image &image) {
srand(time(NULL) % (unsigned long) -1);
uint64_t max_io_bytes = MAX_IO_SIZE * 1024;
bufferptr bp(max_io_bytes);
memset(bp.c_str(), rand() & 0xff, bp.length());
bufferlist bl;
bl.push_back(bp);
uint64_t size = 0;
image.size(&size);
ceph_assert(size != 0);
std::vector<uint64_t> thread_offset;
uint64_t i;
uint64_t start_pos;
// disturb all thread's offset, used by seq write
for (i = 0; i < NUM_THREADS; i++) {
start_pos = (rand() % (size / max_io_bytes)) * max_io_bytes;
thread_offset.push_back(start_pos);
}
uint64_t total_ios = 0;
uint64_t total_bytes = 0;
rbd_bencher b(&image);
while (true) {
b.wait_for(NUM_THREADS - 1);
for (uint32_t i = 0; i < NUM_THREADS; ++i) {
// mostly small writes with a small chance of large writes
uint32_t io_modulo = MIN_IO_SIZE + 1;
if (rand() % 30 == 0) {
io_modulo += MAX_IO_SIZE;
}
uint32_t io_size = (((rand() % io_modulo) + MIN_IO_SIZE) * 1024);
thread_offset[i] = (rand() % (size / io_size)) * io_size;
if (!b.start_write(NUM_THREADS, thread_offset[i], io_size, bl,
LIBRADOS_OP_FLAG_FADVISE_RANDOM)) {
break;
}
++i;
++total_ios;
total_bytes += io_size;
if (total_ios % 100 == 0) {
std::cout << total_ios << " IOs, " << total_bytes << " bytes"
<< std::endl;
}
}
}
b.wait_for(0);
}
} // anonymous namespace
int main(int argc, const char **argv)
{
auto args = argv_to_vec(argc, argv);
if (args.empty()) {
std::cerr << argv[0] << ": -h or --help for usage" << std::endl;
exit(1);
}
if (ceph_argparse_need_usage(args)) {
usage();
exit(0);
}
auto cct = global_init(nullptr, args, CEPH_ENTITY_TYPE_CLIENT,
CODE_ENVIRONMENT_UTILITY,
CINIT_FLAG_NO_MON_CONFIG);
if (args.size() < 2) {
usage();
return EXIT_FAILURE;
}
std::string pool_name = args[0];
std::string image_name = args[1];
common_init_finish(g_ceph_context);
dout(5) << "connecting to cluster" << dendl;
librados::Rados rados;
librados::IoCtx io_ctx;
librbd::RBD rbd;
librbd::Image image;
int r = rados.init_with_context(g_ceph_context);
if (r < 0) {
derr << "could not initialize RADOS handle" << dendl;
return EXIT_FAILURE;
}
r = rados.connect();
if (r < 0) {
derr << "error connecting to local cluster" << dendl;
return EXIT_FAILURE;
}
r = rados.ioctx_create(pool_name.c_str(), io_ctx);
if (r < 0) {
derr << "error finding local pool " << pool_name << ": "
<< cpp_strerror(r) << dendl;
return EXIT_FAILURE;
}
r = rbd.open(io_ctx, image, image_name.c_str());
if (r < 0) {
derr << "error opening image " << image_name << ": "
<< cpp_strerror(r) << dendl;
return EXIT_FAILURE;
}
write_image(image);
return EXIT_SUCCESS;
}
| 5,727 | 26.146919 | 95 |
cc
|
null |
ceph-main/src/test/rbd_mirror/test_ClusterWatcher.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "include/rados/librados.hpp"
#include "common/Cond.h"
#include "common/errno.h"
#include "common/ceph_mutex.h"
#include "librbd/internal.h"
#include "librbd/api/Mirror.h"
#include "tools/rbd_mirror/ClusterWatcher.h"
#include "tools/rbd_mirror/ServiceDaemon.h"
#include "tools/rbd_mirror/Types.h"
#include "test/rbd_mirror/test_fixture.h"
#include "test/librados/test_cxx.h"
#include "test/librbd/test_support.h"
#include "gtest/gtest.h"
#include <boost/scope_exit.hpp>
#include <iostream>
#include <map>
#include <memory>
#include <set>
using rbd::mirror::ClusterWatcher;
using rbd::mirror::PeerSpec;
using rbd::mirror::RadosRef;
using std::map;
using std::set;
using std::string;
void register_test_cluster_watcher() {
}
class TestClusterWatcher : public ::rbd::mirror::TestFixture {
public:
TestClusterWatcher() {
m_cluster = std::make_shared<librados::Rados>();
EXPECT_EQ("", connect_cluster_pp(*m_cluster));
}
~TestClusterWatcher() override {
m_cluster->wait_for_latest_osdmap();
for (auto& pool : m_pools) {
EXPECT_EQ(0, m_cluster->pool_delete(pool.c_str()));
}
}
void SetUp() override {
TestFixture::SetUp();
m_service_daemon.reset(new rbd::mirror::ServiceDaemon<>(g_ceph_context,
m_cluster,
m_threads));
m_cluster_watcher.reset(new ClusterWatcher(m_cluster, m_lock,
m_service_daemon.get()));
}
void TearDown() override {
m_service_daemon.reset();
m_cluster_watcher.reset();
TestFixture::TearDown();
}
void create_pool(bool enable_mirroring, const PeerSpec &peer,
string *uuid = nullptr, string *name=nullptr) {
string pool_name = get_temp_pool_name("test-rbd-mirror-");
ASSERT_EQ(0, m_cluster->pool_create(pool_name.c_str()));
int64_t pool_id = m_cluster->pool_lookup(pool_name.c_str());
ASSERT_GE(pool_id, 0);
librados::IoCtx ioctx;
ASSERT_EQ(0, m_cluster->ioctx_create2(pool_id, ioctx));
ioctx.application_enable("rbd", true);
m_pools.insert(pool_name);
if (enable_mirroring) {
ASSERT_EQ(0, librbd::api::Mirror<>::mode_set(ioctx,
RBD_MIRROR_MODE_POOL));
std::string gen_uuid;
ASSERT_EQ(0, librbd::api::Mirror<>::peer_site_add(
ioctx, uuid != nullptr ? uuid : &gen_uuid,
RBD_MIRROR_PEER_DIRECTION_RX_TX,
peer.cluster_name, peer.client_name));
m_pool_peers[pool_id].insert(peer);
}
if (name != nullptr) {
*name = pool_name;
}
}
void delete_pool(const string &name, const PeerSpec &peer) {
int64_t pool_id = m_cluster->pool_lookup(name.c_str());
ASSERT_GE(pool_id, 0);
if (m_pool_peers.find(pool_id) != m_pool_peers.end()) {
m_pool_peers[pool_id].erase(peer);
if (m_pool_peers[pool_id].empty()) {
m_pool_peers.erase(pool_id);
}
}
m_pools.erase(name);
ASSERT_EQ(0, m_cluster->pool_delete(name.c_str()));
}
void set_peer_config_key(const std::string& pool_name,
const PeerSpec &peer) {
int64_t pool_id = m_cluster->pool_lookup(pool_name.c_str());
ASSERT_GE(pool_id, 0);
std::string json =
"{"
"\\\"mon_host\\\": \\\"" + peer.mon_host + "\\\", "
"\\\"key\\\": \\\"" + peer.key + "\\\""
"}";
bufferlist in_bl;
ASSERT_EQ(0, m_cluster->mon_command(
"{"
"\"prefix\": \"config-key set\","
"\"key\": \"" RBD_MIRROR_PEER_CONFIG_KEY_PREFIX + stringify(pool_id) +
"/" + peer.uuid + "\","
"\"val\": \"" + json + "\"" +
"}", in_bl, nullptr, nullptr));
}
void create_cache_pool(const string &base_pool, string *cache_pool_name) {
bufferlist inbl;
*cache_pool_name = get_temp_pool_name("test-rbd-mirror-");
ASSERT_EQ(0, m_cluster->pool_create(cache_pool_name->c_str()));
ASSERT_EQ(0, m_cluster->mon_command(
"{\"prefix\": \"osd tier add\", \"pool\": \"" + base_pool +
"\", \"tierpool\": \"" + *cache_pool_name +
"\", \"force_nonempty\": \"--force-nonempty\" }",
inbl, NULL, NULL));
ASSERT_EQ(0, m_cluster->mon_command(
"{\"prefix\": \"osd tier set-overlay\", \"pool\": \"" + base_pool +
"\", \"overlaypool\": \"" + *cache_pool_name + "\"}",
inbl, NULL, NULL));
ASSERT_EQ(0, m_cluster->mon_command(
"{\"prefix\": \"osd tier cache-mode\", \"pool\": \"" + *cache_pool_name +
"\", \"mode\": \"writeback\"}",
inbl, NULL, NULL));
m_cluster->wait_for_latest_osdmap();
}
void remove_cache_pool(const string &base_pool, const string &cache_pool) {
bufferlist inbl;
// tear down tiers
ASSERT_EQ(0, m_cluster->mon_command(
"{\"prefix\": \"osd tier remove-overlay\", \"pool\": \"" + base_pool +
"\"}",
inbl, NULL, NULL));
ASSERT_EQ(0, m_cluster->mon_command(
"{\"prefix\": \"osd tier remove\", \"pool\": \"" + base_pool +
"\", \"tierpool\": \"" + cache_pool + "\"}",
inbl, NULL, NULL));
m_cluster->wait_for_latest_osdmap();
m_cluster->pool_delete(cache_pool.c_str());
}
void check_peers() {
m_cluster_watcher->refresh_pools();
std::lock_guard l{m_lock};
ASSERT_EQ(m_pool_peers, m_cluster_watcher->get_pool_peers());
}
RadosRef m_cluster;
ceph::mutex m_lock = ceph::make_mutex("TestClusterWatcherLock");
std::unique_ptr<rbd::mirror::ServiceDaemon<>> m_service_daemon;
std::unique_ptr<ClusterWatcher> m_cluster_watcher;
set<string> m_pools;
ClusterWatcher::PoolPeers m_pool_peers;
};
TEST_F(TestClusterWatcher, NoPools) {
check_peers();
}
TEST_F(TestClusterWatcher, NoMirroredPools) {
check_peers();
create_pool(false, PeerSpec());
check_peers();
create_pool(false, PeerSpec());
check_peers();
create_pool(false, PeerSpec());
check_peers();
}
TEST_F(TestClusterWatcher, ReplicatedPools) {
PeerSpec site1("", "site1", "mirror1");
PeerSpec site2("", "site2", "mirror2");
string first_pool, last_pool;
check_peers();
create_pool(true, site1, &site1.uuid, &first_pool);
check_peers();
create_pool(false, PeerSpec());
check_peers();
create_pool(false, PeerSpec());
check_peers();
create_pool(false, PeerSpec());
check_peers();
create_pool(true, site2, &site2.uuid);
check_peers();
create_pool(true, site2, &site2.uuid);
check_peers();
create_pool(true, site2, &site2.uuid, &last_pool);
check_peers();
delete_pool(first_pool, site1);
check_peers();
delete_pool(last_pool, site2);
check_peers();
}
TEST_F(TestClusterWatcher, CachePools) {
PeerSpec site1("", "site1", "mirror1");
string base1, base2, cache1, cache2;
create_pool(true, site1, &site1.uuid, &base1);
check_peers();
create_cache_pool(base1, &cache1);
BOOST_SCOPE_EXIT( base1, cache1, this_ ) {
this_->remove_cache_pool(base1, cache1);
} BOOST_SCOPE_EXIT_END;
check_peers();
create_pool(false, PeerSpec(), nullptr, &base2);
create_cache_pool(base2, &cache2);
BOOST_SCOPE_EXIT( base2, cache2, this_ ) {
this_->remove_cache_pool(base2, cache2);
} BOOST_SCOPE_EXIT_END;
check_peers();
}
TEST_F(TestClusterWatcher, ConfigKey) {
REQUIRE(!is_librados_test_stub(*m_cluster));
std::string pool_name;
check_peers();
PeerSpec site1("", "site1", "mirror1");
create_pool(true, site1, &site1.uuid, &pool_name);
check_peers();
PeerSpec site2("", "site2", "mirror2");
site2.mon_host = "abc";
site2.key = "xyz";
create_pool(false, site2, &site2.uuid);
set_peer_config_key(pool_name, site2);
check_peers();
}
TEST_F(TestClusterWatcher, SiteName) {
REQUIRE(!is_librados_test_stub(*m_cluster));
std::string site_name;
librbd::RBD rbd;
ASSERT_EQ(0, rbd.mirror_site_name_get(*m_cluster, &site_name));
m_cluster_watcher->refresh_pools();
std::lock_guard l{m_lock};
ASSERT_EQ(site_name, m_cluster_watcher->get_site_name());
}
| 8,113 | 29.503759 | 79 |
cc
|
null |
ceph-main/src/test/rbd_mirror/test_ImageDeleter.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2016 SUSE LINUX GmbH
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include "include/rados/librados.hpp"
#include "include/rbd/librbd.hpp"
#include "include/stringify.h"
#include "cls/rbd/cls_rbd_types.h"
#include "cls/rbd/cls_rbd_client.h"
#include "tools/rbd_mirror/ImageDeleter.h"
#include "tools/rbd_mirror/ServiceDaemon.h"
#include "tools/rbd_mirror/Threads.h"
#include "tools/rbd_mirror/Throttler.h"
#include "tools/rbd_mirror/Types.h"
#include "librbd/ImageCtx.h"
#include "librbd/ImageState.h"
#include "librbd/Operations.h"
#include "librbd/Journal.h"
#include "librbd/internal.h"
#include "librbd/Utils.h"
#include "librbd/api/Image.h"
#include "librbd/api/Mirror.h"
#include "librbd/journal/DisabledPolicy.h"
#include "test/rbd_mirror/test_fixture.h"
#include "test/librados/test.h"
#include "gtest/gtest.h"
#define GLOBAL_IMAGE_ID "global_image_id"
#define GLOBAL_CLONE_IMAGE_ID "global_image_id_clone"
#define dout_subsys ceph_subsys_rbd_mirror
using rbd::mirror::RadosRef;
using rbd::mirror::TestFixture;
using namespace librbd;
using cls::rbd::MirrorImageMode;
using cls::rbd::MirrorImageState;
void register_test_rbd_mirror_image_deleter() {
}
class TestImageDeleter : public TestFixture {
public:
const std::string m_local_mirror_uuid = "local mirror uuid";
const std::string m_remote_mirror_uuid = "remote mirror uuid";
void SetUp() override {
TestFixture::SetUp();
m_image_deletion_throttler.reset(
new rbd::mirror::Throttler<>(g_ceph_context,
"rbd_mirror_concurrent_image_deletions"));
m_service_daemon.reset(new rbd::mirror::ServiceDaemon<>(g_ceph_context,
_rados, m_threads));
librbd::api::Mirror<>::mode_set(m_local_io_ctx, RBD_MIRROR_MODE_IMAGE);
m_deleter = new rbd::mirror::ImageDeleter<>(
m_local_io_ctx, m_threads, m_image_deletion_throttler.get(),
m_service_daemon.get());
m_local_image_id = librbd::util::generate_image_id(m_local_io_ctx);
librbd::ImageOptions image_opts;
image_opts.set(RBD_IMAGE_OPTION_FEATURES, RBD_FEATURES_ALL);
EXPECT_EQ(0, librbd::create(m_local_io_ctx, m_image_name, m_local_image_id,
1 << 20, image_opts, GLOBAL_IMAGE_ID,
m_remote_mirror_uuid, true));
cls::rbd::MirrorImage mirror_image(
MirrorImageMode::MIRROR_IMAGE_MODE_JOURNAL, GLOBAL_IMAGE_ID,
MirrorImageState::MIRROR_IMAGE_STATE_ENABLED);
EXPECT_EQ(0, cls_client::mirror_image_set(&m_local_io_ctx, m_local_image_id,
mirror_image));
}
void TearDown() override {
remove_image();
C_SaferCond ctx;
m_deleter->shut_down(&ctx);
ctx.wait();
delete m_deleter;
m_service_daemon.reset();
TestFixture::TearDown();
}
void init_image_deleter() {
C_SaferCond ctx;
m_deleter->init(&ctx);
ASSERT_EQ(0, ctx.wait());
}
void remove_image() {
cls::rbd::MirrorImage mirror_image;
int r = cls_client::mirror_image_get(&m_local_io_ctx, m_local_image_id,
&mirror_image);
EXPECT_EQ(1, r == 0 || r == -ENOENT);
if (r != -ENOENT) {
mirror_image.state = MirrorImageState::MIRROR_IMAGE_STATE_ENABLED;
EXPECT_EQ(0, cls_client::mirror_image_set(&m_local_io_ctx,
m_local_image_id,
mirror_image));
}
promote_image();
NoOpProgressContext ctx;
r = librbd::api::Image<>::remove(m_local_io_ctx, m_image_name, ctx);
EXPECT_EQ(1, r == 0 || r == -ENOENT);
}
void promote_image(ImageCtx *ictx=nullptr) {
bool close = false;
int r = 0;
if (!ictx) {
ictx = new ImageCtx("", m_local_image_id, "", m_local_io_ctx,
false);
r = ictx->state->open(0);
close = (r == 0);
}
EXPECT_EQ(1, r == 0 || r == -ENOENT);
if (r == 0) {
int r2 = librbd::api::Mirror<>::image_promote(ictx, true);
EXPECT_EQ(1, r2 == 0 || r2 == -EINVAL);
}
if (close) {
EXPECT_EQ(0, ictx->state->close());
}
}
void demote_image(ImageCtx *ictx=nullptr) {
bool close = false;
if (!ictx) {
ictx = new ImageCtx("", m_local_image_id, "", m_local_io_ctx,
false);
EXPECT_EQ(0, ictx->state->open(0));
close = true;
}
EXPECT_EQ(0, librbd::api::Mirror<>::image_demote(ictx));
if (close) {
EXPECT_EQ(0, ictx->state->close());
}
}
void create_snapshot(std::string snap_name="snap1", bool protect=false) {
ImageCtx *ictx = new ImageCtx("", m_local_image_id, "", m_local_io_ctx,
false);
EXPECT_EQ(0, ictx->state->open(0));
{
std::unique_lock image_locker{ictx->image_lock};
ictx->set_journal_policy(new librbd::journal::DisabledPolicy());
}
librbd::NoOpProgressContext prog_ctx;
EXPECT_EQ(0, ictx->operations->snap_create(
cls::rbd::UserSnapshotNamespace(), snap_name, 0, prog_ctx));
if (protect) {
EXPECT_EQ(0, ictx->operations->snap_protect(
cls::rbd::UserSnapshotNamespace(), snap_name));
}
EXPECT_EQ(0, ictx->state->close());
}
std::string create_clone() {
ImageCtx *ictx = new ImageCtx("", m_local_image_id, "", m_local_io_ctx,
false);
EXPECT_EQ(0, ictx->state->open(0));
{
std::unique_lock image_locker{ictx->image_lock};
ictx->set_journal_policy(new librbd::journal::DisabledPolicy());
}
librbd::NoOpProgressContext prog_ctx;
EXPECT_EQ(0, ictx->operations->snap_create(
cls::rbd::UserSnapshotNamespace(), "snap1", 0, prog_ctx));
EXPECT_EQ(0, ictx->operations->snap_protect(
cls::rbd::UserSnapshotNamespace(), "snap1"));
EXPECT_EQ(0, librbd::api::Image<>::snap_set(
ictx, cls::rbd::UserSnapshotNamespace(), "snap1"));
std::string clone_id = librbd::util::generate_image_id(m_local_io_ctx);
librbd::ImageOptions clone_opts;
clone_opts.set(RBD_IMAGE_OPTION_FEATURES, ictx->features);
EXPECT_EQ(0, librbd::clone(m_local_io_ctx, m_local_image_id.c_str(),
nullptr, "snap1", m_local_io_ctx,
clone_id.c_str(), "clone1", clone_opts,
GLOBAL_CLONE_IMAGE_ID, m_remote_mirror_uuid));
cls::rbd::MirrorImage mirror_image(
MirrorImageMode::MIRROR_IMAGE_MODE_JOURNAL, GLOBAL_CLONE_IMAGE_ID,
MirrorImageState::MIRROR_IMAGE_STATE_ENABLED);
EXPECT_EQ(0, cls_client::mirror_image_set(&m_local_io_ctx, clone_id,
mirror_image));
EXPECT_EQ(0, ictx->state->close());
return clone_id;
}
void check_image_deleted() {
ImageCtx *ictx = new ImageCtx("", m_local_image_id, "", m_local_io_ctx,
false);
EXPECT_EQ(-ENOENT, ictx->state->open(0));
cls::rbd::MirrorImage mirror_image;
EXPECT_EQ(-ENOENT, cls_client::mirror_image_get(&m_local_io_ctx,
m_local_image_id,
&mirror_image));
}
int trash_move(const std::string& global_image_id) {
C_SaferCond ctx;
rbd::mirror::ImageDeleter<>::trash_move(m_local_io_ctx, global_image_id,
true, m_threads->work_queue, &ctx);
return ctx.wait();
}
librbd::RBD rbd;
std::string m_local_image_id;
std::unique_ptr<rbd::mirror::Throttler<>> m_image_deletion_throttler;
std::unique_ptr<rbd::mirror::ServiceDaemon<>> m_service_daemon;
rbd::mirror::ImageDeleter<> *m_deleter;
};
TEST_F(TestImageDeleter, ExistingTrashMove) {
ASSERT_EQ(0, trash_move(GLOBAL_IMAGE_ID));
C_SaferCond ctx;
m_deleter->wait_for_deletion(m_local_image_id, false, &ctx);
init_image_deleter();
ASSERT_EQ(0, ctx.wait());
}
TEST_F(TestImageDeleter, LiveTrashMove) {
init_image_deleter();
C_SaferCond ctx;
m_deleter->wait_for_deletion(m_local_image_id, false, &ctx);
ASSERT_EQ(0, trash_move(GLOBAL_IMAGE_ID));
ASSERT_EQ(0, ctx.wait());
}
TEST_F(TestImageDeleter, Delete_Image_With_Snapshots) {
init_image_deleter();
create_snapshot("snap1");
create_snapshot("snap2");
C_SaferCond ctx;
m_deleter->wait_for_deletion(m_local_image_id, false, &ctx);
ASSERT_EQ(0, trash_move(GLOBAL_IMAGE_ID));
EXPECT_EQ(0, ctx.wait());
ASSERT_EQ(0u, m_deleter->get_delete_queue_items().size());
ASSERT_EQ(0u, m_deleter->get_failed_queue_items().size());
}
TEST_F(TestImageDeleter, Delete_Image_With_ProtectedSnapshots) {
init_image_deleter();
create_snapshot("snap1", true);
create_snapshot("snap2", true);
C_SaferCond ctx;
m_deleter->wait_for_deletion(m_local_image_id, false, &ctx);
ASSERT_EQ(0, trash_move(GLOBAL_IMAGE_ID));
EXPECT_EQ(0, ctx.wait());
ASSERT_EQ(0u, m_deleter->get_delete_queue_items().size());
ASSERT_EQ(0u, m_deleter->get_failed_queue_items().size());
}
TEST_F(TestImageDeleter, Delete_Image_With_Clone) {
init_image_deleter();
std::string clone_id = create_clone();
C_SaferCond ctx1;
m_deleter->set_busy_timer_interval(0.1);
m_deleter->wait_for_deletion(m_local_image_id, false, &ctx1);
ASSERT_EQ(0, trash_move(GLOBAL_IMAGE_ID));
EXPECT_EQ(-EBUSY, ctx1.wait());
C_SaferCond ctx2;
m_deleter->wait_for_deletion(clone_id, false, &ctx2);
ASSERT_EQ(0, trash_move(GLOBAL_CLONE_IMAGE_ID));
EXPECT_EQ(0, ctx2.wait());
C_SaferCond ctx3;
m_deleter->wait_for_deletion(m_local_image_id, true, &ctx3);
EXPECT_EQ(0, ctx3.wait());
ASSERT_EQ(0u, m_deleter->get_delete_queue_items().size());
ASSERT_EQ(0u, m_deleter->get_failed_queue_items().size());
}
| 10,210 | 31.519108 | 80 |
cc
|
null |
ceph-main/src/test/rbd_mirror/test_ImageReplayer.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph distributed storage system
*
* Copyright (C) 2016 Mirantis Inc
*
* Author: Mykola Golub <[email protected]>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
*/
#include "include/rados/librados.hpp"
#include "include/rbd/librbd.hpp"
#include "include/stringify.h"
#include "test/librbd/test_support.h"
#include "test/rbd_mirror/test_fixture.h"
#include "cls/journal/cls_journal_types.h"
#include "cls/journal/cls_journal_client.h"
#include "cls/rbd/cls_rbd_types.h"
#include "cls/rbd/cls_rbd_client.h"
#include "journal/Journaler.h"
#include "librbd/ExclusiveLock.h"
#include "librbd/ImageCtx.h"
#include "librbd/ImageState.h"
#include "librbd/Journal.h"
#include "librbd/Operations.h"
#include "librbd/Utils.h"
#include "librbd/internal.h"
#include "librbd/api/Io.h"
#include "librbd/api/Mirror.h"
#include "librbd/api/Snapshot.h"
#include "librbd/io/AioCompletion.h"
#include "librbd/io/ReadResult.h"
#include "tools/rbd_mirror/ImageReplayer.h"
#include "tools/rbd_mirror/InstanceWatcher.h"
#include "tools/rbd_mirror/MirrorStatusUpdater.h"
#include "tools/rbd_mirror/PoolMetaCache.h"
#include "tools/rbd_mirror/Threads.h"
#include "tools/rbd_mirror/Throttler.h"
#include "tools/rbd_mirror/Types.h"
#include "test/librados/test_cxx.h"
#include "gtest/gtest.h"
void register_test_rbd_mirror() {
}
#define TEST_IO_SIZE 512
#define TEST_IO_COUNT 11
namespace rbd {
namespace mirror {
template <typename T>
class TestImageReplayer : public TestFixture {
public:
static const cls::rbd::MirrorImageMode MIRROR_IMAGE_MODE =
T::MIRROR_IMAGE_MODE;
static const uint64_t FEATURES = T::FEATURES;
struct C_WatchCtx : public librados::WatchCtx2 {
TestImageReplayer *test;
std::string oid;
ceph::mutex lock = ceph::make_mutex("C_WatchCtx::lock");
ceph::condition_variable cond;
bool notified;
C_WatchCtx(TestImageReplayer *test, const std::string &oid)
: test(test), oid(oid), notified(false) {
}
void handle_notify(uint64_t notify_id, uint64_t cookie,
uint64_t notifier_id, bufferlist& bl_) override {
bufferlist bl;
test->m_remote_ioctx.notify_ack(oid, notify_id, cookie, bl);
std::lock_guard locker{lock};
notified = true;
cond.notify_all();
}
void handle_error(uint64_t cookie, int err) override {
ASSERT_EQ(0, err);
}
};
TestImageReplayer()
: m_local_cluster(new librados::Rados()), m_watch_handle(0)
{
EXPECT_EQ("", connect_cluster_pp(*m_local_cluster.get()));
EXPECT_EQ(0, m_local_cluster->conf_set("rbd_cache", "false"));
EXPECT_EQ(0, m_local_cluster->conf_set("rbd_mirror_journal_poll_age", "1"));
EXPECT_EQ(0, m_local_cluster->conf_set("rbd_mirror_journal_commit_age",
"0.1"));
m_local_pool_name = get_temp_pool_name();
EXPECT_EQ(0, m_local_cluster->pool_create(m_local_pool_name.c_str()));
EXPECT_EQ(0, m_local_cluster->ioctx_create(m_local_pool_name.c_str(),
m_local_ioctx));
m_local_ioctx.application_enable("rbd", true);
EXPECT_EQ("", connect_cluster_pp(m_remote_cluster));
EXPECT_EQ(0, m_remote_cluster.conf_set("rbd_cache", "false"));
m_remote_pool_name = get_temp_pool_name();
EXPECT_EQ(0, m_remote_cluster.pool_create(m_remote_pool_name.c_str()));
m_remote_pool_id = m_remote_cluster.pool_lookup(m_remote_pool_name.c_str());
EXPECT_GE(m_remote_pool_id, 0);
EXPECT_EQ(0, m_remote_cluster.ioctx_create(m_remote_pool_name.c_str(),
m_remote_ioctx));
m_remote_ioctx.application_enable("rbd", true);
// make snap id debugging easier when local/remote have different mappings
uint64_t snap_id;
EXPECT_EQ(0, m_remote_ioctx.selfmanaged_snap_create(&snap_id));
uint64_t features = FEATURES;
if (MIRROR_IMAGE_MODE == cls::rbd::MIRROR_IMAGE_MODE_JOURNAL) {
EXPECT_EQ(0, librbd::api::Mirror<>::mode_set(m_remote_ioctx,
RBD_MIRROR_MODE_POOL));
EXPECT_EQ(0, librbd::api::Mirror<>::mode_set(m_local_ioctx,
RBD_MIRROR_MODE_POOL));
} else {
EXPECT_EQ(0, librbd::api::Mirror<>::mode_set(m_remote_ioctx,
RBD_MIRROR_MODE_IMAGE));
EXPECT_EQ(0, librbd::api::Mirror<>::mode_set(m_local_ioctx,
RBD_MIRROR_MODE_IMAGE));
uuid_d uuid_gen;
uuid_gen.generate_random();
std::string remote_peer_uuid = uuid_gen.to_string();
EXPECT_EQ(0, librbd::cls_client::mirror_peer_add(
&m_remote_ioctx, {remote_peer_uuid,
cls::rbd::MIRROR_PEER_DIRECTION_RX_TX,
"siteA", "client", m_local_mirror_uuid}));
m_pool_meta_cache.set_remote_pool_meta(
m_remote_ioctx.get_id(), {m_remote_mirror_uuid, remote_peer_uuid});
}
EXPECT_EQ(0, librbd::api::Mirror<>::uuid_get(m_remote_ioctx,
&m_remote_mirror_uuid));
EXPECT_EQ(0, librbd::api::Mirror<>::uuid_get(m_local_ioctx,
&m_local_mirror_uuid));
m_image_name = get_temp_image_name();
int order = 0;
EXPECT_EQ(0, librbd::create(m_remote_ioctx, m_image_name.c_str(), 1 << 22,
false, features, &order, 0, 0));
if (MIRROR_IMAGE_MODE != cls::rbd::MIRROR_IMAGE_MODE_JOURNAL) {
librbd::ImageCtx* remote_image_ctx;
open_remote_image(&remote_image_ctx);
EXPECT_EQ(0,
librbd::api::Mirror<>::image_enable(
remote_image_ctx,
static_cast<rbd_mirror_image_mode_t>(MIRROR_IMAGE_MODE),
false));
close_image(remote_image_ctx);
}
m_remote_image_id = get_image_id(m_remote_ioctx, m_image_name);
m_global_image_id = get_global_image_id(m_remote_ioctx, m_remote_image_id);
auto cct = reinterpret_cast<CephContext*>(m_local_ioctx.cct());
m_threads.reset(new Threads<>(m_local_cluster));
m_image_sync_throttler.reset(new Throttler<>(
cct, "rbd_mirror_concurrent_image_syncs"));
m_instance_watcher = InstanceWatcher<>::create(
m_local_ioctx, *m_threads->asio_engine, nullptr,
m_image_sync_throttler.get());
m_instance_watcher->handle_acquire_leader();
EXPECT_EQ(0, m_local_ioctx.create(RBD_MIRRORING, false));
m_local_status_updater = MirrorStatusUpdater<>::create(
m_local_ioctx, m_threads.get(), "");
C_SaferCond status_updater_ctx;
m_local_status_updater->init(&status_updater_ctx);
EXPECT_EQ(0, status_updater_ctx.wait());
}
~TestImageReplayer() override
{
unwatch();
m_instance_watcher->handle_release_leader();
delete m_replayer;
delete m_instance_watcher;
C_SaferCond status_updater_ctx;
m_local_status_updater->shut_down(&status_updater_ctx);
EXPECT_EQ(0, status_updater_ctx.wait());
delete m_local_status_updater;
EXPECT_EQ(0, m_remote_cluster.pool_delete(m_remote_pool_name.c_str()));
EXPECT_EQ(0, m_local_cluster->pool_delete(m_local_pool_name.c_str()));
}
void create_replayer() {
m_replayer = new ImageReplayer<>(m_local_ioctx, m_local_mirror_uuid,
m_global_image_id, m_threads.get(),
m_instance_watcher, m_local_status_updater,
nullptr, &m_pool_meta_cache);
m_replayer->add_peer({"peer uuid", m_remote_ioctx,
{m_remote_mirror_uuid, "remote mirror peer uuid"},
nullptr});
}
void start()
{
C_SaferCond cond;
m_replayer->start(&cond);
ASSERT_EQ(0, cond.wait());
create_watch_ctx();
}
void create_watch_ctx() {
std::string oid;
if (MIRROR_IMAGE_MODE == cls::rbd::MIRROR_IMAGE_MODE_JOURNAL) {
oid = ::journal::Journaler::header_oid(m_remote_image_id);
} else {
oid = librbd::util::header_name(m_remote_image_id);
}
ASSERT_EQ(0U, m_watch_handle);
ASSERT_TRUE(m_watch_ctx == nullptr);
m_watch_ctx = new C_WatchCtx(this, oid);
ASSERT_EQ(0, m_remote_ioctx.watch2(oid, &m_watch_handle, m_watch_ctx));
}
void unwatch() {
if (m_watch_handle != 0) {
m_remote_ioctx.unwatch2(m_watch_handle);
delete m_watch_ctx;
m_watch_ctx = nullptr;
m_watch_handle = 0;
}
}
void stop()
{
unwatch();
C_SaferCond cond;
m_replayer->stop(&cond);
ASSERT_EQ(0, cond.wait());
}
void bootstrap()
{
create_replayer();
start();
wait_for_replay_complete();
stop();
}
std::string get_temp_image_name()
{
return "image" + stringify(++_image_number);
}
std::string get_image_id(librados::IoCtx &ioctx, const std::string &image_name)
{
std::string obj = librbd::util::id_obj_name(image_name);
std::string id;
EXPECT_EQ(0, librbd::cls_client::get_id(&ioctx, obj, &id));
return id;
}
std::string get_global_image_id(librados::IoCtx& io_ctx,
const std::string& image_id) {
cls::rbd::MirrorImage mirror_image;
EXPECT_EQ(0, librbd::cls_client::mirror_image_get(&io_ctx, image_id,
&mirror_image));
return mirror_image.global_image_id;
}
void open_image(librados::IoCtx &ioctx, const std::string &image_name,
bool readonly, librbd::ImageCtx **ictxp)
{
librbd::ImageCtx *ictx = new librbd::ImageCtx(image_name.c_str(),
"", "", ioctx, readonly);
EXPECT_EQ(0, ictx->state->open(0));
*ictxp = ictx;
}
void open_local_image(librbd::ImageCtx **ictxp)
{
open_image(m_local_ioctx, m_image_name, true, ictxp);
}
void open_remote_image(librbd::ImageCtx **ictxp)
{
open_image(m_remote_ioctx, m_image_name, false, ictxp);
}
void close_image(librbd::ImageCtx *ictx)
{
ictx->state->close();
}
void get_commit_positions(cls::journal::ObjectPosition *master_position,
cls::journal::ObjectPosition *mirror_position)
{
std::string master_client_id = "";
std::string mirror_client_id = m_local_mirror_uuid;
m_replayer->flush();
C_SaferCond cond;
uint64_t minimum_set;
uint64_t active_set;
std::set<cls::journal::Client> registered_clients;
std::string oid = ::journal::Journaler::header_oid(m_remote_image_id);
cls::journal::client::get_mutable_metadata(m_remote_ioctx, oid,
&minimum_set, &active_set,
®istered_clients, &cond);
ASSERT_EQ(0, cond.wait());
*master_position = cls::journal::ObjectPosition();
*mirror_position = cls::journal::ObjectPosition();
std::set<cls::journal::Client>::const_iterator c;
for (c = registered_clients.begin(); c != registered_clients.end(); ++c) {
std::cout << __func__ << ": client: " << *c << std::endl;
if (c->state != cls::journal::CLIENT_STATE_CONNECTED) {
continue;
}
cls::journal::ObjectPositions object_positions =
c->commit_position.object_positions;
cls::journal::ObjectPositions::const_iterator p =
object_positions.begin();
if (p != object_positions.end()) {
if (c->id == master_client_id) {
ASSERT_EQ(cls::journal::ObjectPosition(), *master_position);
*master_position = *p;
} else if (c->id == mirror_client_id) {
ASSERT_EQ(cls::journal::ObjectPosition(), *mirror_position);
*mirror_position = *p;
}
}
}
}
bool wait_for_watcher_notify(int seconds)
{
if (m_watch_handle == 0) {
return false;
}
std::unique_lock locker{m_watch_ctx->lock};
while (!m_watch_ctx->notified) {
if (m_watch_ctx->cond.wait_for(locker,
std::chrono::seconds(seconds)) ==
std::cv_status::timeout) {
return false;
}
}
m_watch_ctx->notified = false;
return true;
}
int get_last_mirror_snapshot(librados::IoCtx& io_ctx,
const std::string& image_id,
uint64_t* mirror_snap_id,
cls::rbd::MirrorSnapshotNamespace* mirror_ns) {
auto header_oid = librbd::util::header_name(image_id);
::SnapContext snapc;
int r = librbd::cls_client::get_snapcontext(&io_ctx, header_oid, &snapc);
if (r < 0) {
return r;
}
// stored in reverse order
for (auto snap_id : snapc.snaps) {
cls::rbd::SnapshotInfo snap_info;
r = librbd::cls_client::snapshot_get(&io_ctx, header_oid, snap_id,
&snap_info);
if (r < 0) {
return r;
}
auto ns = std::get_if<cls::rbd::MirrorSnapshotNamespace>(
&snap_info.snapshot_namespace);
if (ns != nullptr) {
*mirror_snap_id = snap_id;
*mirror_ns = *ns;
return 0;
}
}
return -ENOENT;
}
void wait_for_journal_synced() {
cls::journal::ObjectPosition master_position;
cls::journal::ObjectPosition mirror_position;
for (int i = 0; i < 100; i++) {
get_commit_positions(&master_position, &mirror_position);
if (master_position == mirror_position) {
break;
}
wait_for_watcher_notify(1);
}
ASSERT_EQ(master_position, mirror_position);
}
void wait_for_snapshot_synced() {
uint64_t remote_snap_id = CEPH_NOSNAP;
cls::rbd::MirrorSnapshotNamespace remote_mirror_ns;
ASSERT_EQ(0, get_last_mirror_snapshot(m_remote_ioctx, m_remote_image_id,
&remote_snap_id, &remote_mirror_ns));
std::cout << "remote_snap_id=" << remote_snap_id << std::endl;
std::string local_image_id;
ASSERT_EQ(0, librbd::cls_client::mirror_image_get_image_id(
&m_local_ioctx, m_global_image_id, &local_image_id));
uint64_t local_snap_id = CEPH_NOSNAP;
cls::rbd::MirrorSnapshotNamespace local_mirror_ns;
for (int i = 0; i < 100; i++) {
int r = get_last_mirror_snapshot(m_local_ioctx, local_image_id,
&local_snap_id, &local_mirror_ns);
if (r == 0 &&
((remote_mirror_ns.state ==
cls::rbd::MIRROR_SNAPSHOT_STATE_PRIMARY &&
local_mirror_ns.state ==
cls::rbd::MIRROR_SNAPSHOT_STATE_NON_PRIMARY) ||
(remote_mirror_ns.state ==
cls::rbd::MIRROR_SNAPSHOT_STATE_PRIMARY_DEMOTED &&
local_mirror_ns.state ==
cls::rbd::MIRROR_SNAPSHOT_STATE_NON_PRIMARY_DEMOTED)) &&
local_mirror_ns.primary_mirror_uuid == m_remote_mirror_uuid &&
local_mirror_ns.primary_snap_id == remote_snap_id &&
local_mirror_ns.complete) {
std::cout << "local_snap_id=" << local_snap_id << ", "
<< "local_snap_ns=" << local_mirror_ns << std::endl;
return;
}
wait_for_watcher_notify(1);
}
ADD_FAILURE() << "failed to locate matching snapshot: "
<< "remote_snap_id=" << remote_snap_id << ", "
<< "remote_snap_ns=" << remote_mirror_ns << ", "
<< "local_snap_id=" << local_snap_id << ", "
<< "local_snap_ns=" << local_mirror_ns;
}
void wait_for_replay_complete()
{
if (MIRROR_IMAGE_MODE == cls::rbd::MIRROR_IMAGE_MODE_JOURNAL) {
wait_for_journal_synced();
} else {
wait_for_snapshot_synced();
}
}
void wait_for_stopped() {
for (int i = 0; i < 100; i++) {
if (m_replayer->is_stopped()) {
break;
}
wait_for_watcher_notify(1);
}
ASSERT_TRUE(m_replayer->is_stopped());
}
void write_test_data(librbd::ImageCtx *ictx, const char *test_data, off_t off,
size_t len)
{
size_t written;
bufferlist bl;
bl.append(std::string(test_data, len));
written = librbd::api::Io<>::write(*ictx, off, len, std::move(bl), 0);
printf("wrote: %d\n", (int)written);
ASSERT_EQ(len, written);
}
void read_test_data(librbd::ImageCtx *ictx, const char *expected, off_t off,
size_t len)
{
ssize_t read;
char *result = (char *)malloc(len + 1);
ASSERT_NE(static_cast<char *>(NULL), result);
read = librbd::api::Io<>::read(
*ictx, off, len, librbd::io::ReadResult{result, len}, 0);
printf("read: %d\n", (int)read);
ASSERT_EQ(len, static_cast<size_t>(read));
result[len] = '\0';
if (memcmp(result, expected, len)) {
printf("read: %s\nexpected: %s\n", result, expected);
ASSERT_EQ(0, memcmp(result, expected, len));
}
free(result);
}
void generate_test_data() {
for (int i = 0; i < TEST_IO_SIZE; ++i) {
m_test_data[i] = (char) (rand() % (126 - 33) + 33);
}
m_test_data[TEST_IO_SIZE] = '\0';
}
void flush(librbd::ImageCtx *ictx)
{
C_SaferCond aio_flush_ctx;
auto c = librbd::io::AioCompletion::create(&aio_flush_ctx);
c->get();
librbd::api::Io<>::aio_flush(*ictx, c, true);
ASSERT_EQ(0, c->wait_for_complete());
c->put();
if (MIRROR_IMAGE_MODE == cls::rbd::MIRROR_IMAGE_MODE_JOURNAL) {
C_SaferCond journal_flush_ctx;
ictx->journal->flush_commit_position(&journal_flush_ctx);
ASSERT_EQ(0, journal_flush_ctx.wait());
} else {
uint64_t snap_id = CEPH_NOSNAP;
ASSERT_EQ(0, librbd::api::Mirror<>::image_snapshot_create(
ictx, 0, &snap_id));
}
printf("flushed\n");
}
static int _image_number;
PoolMetaCache m_pool_meta_cache{g_ceph_context};
std::shared_ptr<librados::Rados> m_local_cluster;
std::unique_ptr<Threads<>> m_threads;
std::unique_ptr<Throttler<>> m_image_sync_throttler;
librados::Rados m_remote_cluster;
InstanceWatcher<> *m_instance_watcher;
MirrorStatusUpdater<> *m_local_status_updater;
std::string m_local_mirror_uuid = "local mirror uuid";
std::string m_remote_mirror_uuid = "remote mirror uuid";
std::string m_local_pool_name, m_remote_pool_name;
librados::IoCtx m_local_ioctx, m_remote_ioctx;
std::string m_image_name;
int64_t m_remote_pool_id;
std::string m_remote_image_id;
std::string m_global_image_id;
ImageReplayer<> *m_replayer = nullptr;
C_WatchCtx *m_watch_ctx = nullptr;
uint64_t m_watch_handle = 0;
char m_test_data[TEST_IO_SIZE + 1];
std::string m_journal_commit_age;
};
template <typename T>
int TestImageReplayer<T>::_image_number;
template <cls::rbd::MirrorImageMode _mirror_image_mode, uint64_t _features>
class TestImageReplayerParams {
public:
static const cls::rbd::MirrorImageMode MIRROR_IMAGE_MODE = _mirror_image_mode;
static const uint64_t FEATURES = _features;
};
typedef ::testing::Types<TestImageReplayerParams<
cls::rbd::MIRROR_IMAGE_MODE_JOURNAL, 125>,
TestImageReplayerParams<
cls::rbd::MIRROR_IMAGE_MODE_SNAPSHOT, 1>,
TestImageReplayerParams<
cls::rbd::MIRROR_IMAGE_MODE_SNAPSHOT, 5>,
TestImageReplayerParams<
cls::rbd::MIRROR_IMAGE_MODE_SNAPSHOT, 61>,
TestImageReplayerParams<
cls::rbd::MIRROR_IMAGE_MODE_SNAPSHOT, 125>>
TestImageReplayerTypes;
TYPED_TEST_SUITE(TestImageReplayer, TestImageReplayerTypes);
TYPED_TEST(TestImageReplayer, Bootstrap)
{
this->bootstrap();
}
typedef TestImageReplayer<TestImageReplayerParams<
cls::rbd::MIRROR_IMAGE_MODE_JOURNAL, 125>> TestImageReplayerJournal;
TYPED_TEST(TestImageReplayer, BootstrapErrorLocalImageExists)
{
int order = 0;
EXPECT_EQ(0, librbd::create(this->m_local_ioctx, this->m_image_name.c_str(),
1 << 22, false, 0, &order, 0, 0));
this->create_replayer();
C_SaferCond cond;
this->m_replayer->start(&cond);
ASSERT_EQ(-EEXIST, cond.wait());
}
TEST_F(TestImageReplayerJournal, BootstrapErrorNoJournal)
{
ASSERT_EQ(0, librbd::Journal<>::remove(this->m_remote_ioctx,
this->m_remote_image_id));
this->create_replayer();
C_SaferCond cond;
this->m_replayer->start(&cond);
ASSERT_EQ(-ENOENT, cond.wait());
}
TYPED_TEST(TestImageReplayer, BootstrapErrorMirrorDisabled)
{
// disable remote image mirroring
ASSERT_EQ(0, librbd::api::Mirror<>::mode_set(this->m_remote_ioctx,
RBD_MIRROR_MODE_IMAGE));
librbd::ImageCtx *ictx;
this->open_remote_image(&ictx);
ASSERT_EQ(0, librbd::api::Mirror<>::image_disable(ictx, true));
this->close_image(ictx);
this->create_replayer();
C_SaferCond cond;
this->m_replayer->start(&cond);
ASSERT_EQ(-ENOENT, cond.wait());
}
TYPED_TEST(TestImageReplayer, BootstrapMirrorDisabling)
{
// set remote image mirroring state to DISABLING
if (gtest_TypeParam_::MIRROR_IMAGE_MODE ==
cls::rbd::MIRROR_IMAGE_MODE_JOURNAL) {
ASSERT_EQ(0, librbd::api::Mirror<>::mode_set(this->m_remote_ioctx,
RBD_MIRROR_MODE_IMAGE));
librbd::ImageCtx *ictx;
this->open_remote_image(&ictx);
ASSERT_EQ(0, librbd::api::Mirror<>::image_enable(
ictx, RBD_MIRROR_IMAGE_MODE_JOURNAL, false));
this->close_image(ictx);
}
cls::rbd::MirrorImage mirror_image;
ASSERT_EQ(0, librbd::cls_client::mirror_image_get(&this->m_remote_ioctx,
this->m_remote_image_id,
&mirror_image));
mirror_image.state = cls::rbd::MirrorImageState::MIRROR_IMAGE_STATE_DISABLING;
ASSERT_EQ(0, librbd::cls_client::mirror_image_set(&this->m_remote_ioctx,
this->m_remote_image_id,
mirror_image));
this->create_replayer();
C_SaferCond cond;
this->m_replayer->start(&cond);
ASSERT_EQ(-ENOENT, cond.wait());
ASSERT_TRUE(this->m_replayer->is_stopped());
}
TYPED_TEST(TestImageReplayer, BootstrapDemoted)
{
// demote remote image
librbd::ImageCtx *ictx;
this->open_remote_image(&ictx);
ASSERT_EQ(0, librbd::api::Mirror<>::image_demote(ictx));
this->close_image(ictx);
this->create_replayer();
C_SaferCond cond;
this->m_replayer->start(&cond);
ASSERT_EQ(-EREMOTEIO, cond.wait());
ASSERT_TRUE(this->m_replayer->is_stopped());
}
TYPED_TEST(TestImageReplayer, StartInterrupted)
{
this->create_replayer();
C_SaferCond start_cond, stop_cond;
this->m_replayer->start(&start_cond);
this->m_replayer->stop(&stop_cond);
int r = start_cond.wait();
printf("start returned %d\n", r);
// TODO: improve the test to avoid this race
ASSERT_TRUE(r == -ECANCELED || r == 0);
ASSERT_EQ(0, stop_cond.wait());
}
TEST_F(TestImageReplayerJournal, JournalReset)
{
this->bootstrap();
delete this->m_replayer;
ASSERT_EQ(0, librbd::Journal<>::reset(this->m_remote_ioctx,
this->m_remote_image_id));
// try to recover
this->bootstrap();
}
TEST_F(TestImageReplayerJournal, ErrorNoJournal)
{
this->bootstrap();
// disable remote journal journaling
// (reset before disabling, so it does not fail with EBUSY)
ASSERT_EQ(0, librbd::Journal<>::reset(this->m_remote_ioctx,
this->m_remote_image_id));
librbd::ImageCtx *ictx;
this->open_remote_image(&ictx);
uint64_t features;
ASSERT_EQ(0, librbd::get_features(ictx, &features));
ASSERT_EQ(0, ictx->operations->update_features(RBD_FEATURE_JOURNALING,
false));
this->close_image(ictx);
C_SaferCond cond;
this->m_replayer->start(&cond);
ASSERT_EQ(0, cond.wait());
}
TYPED_TEST(TestImageReplayer, StartStop)
{
this->bootstrap();
this->start();
this->wait_for_replay_complete();
this->stop();
}
TYPED_TEST(TestImageReplayer, WriteAndStartReplay)
{
this->bootstrap();
// Write to remote image and start replay
librbd::ImageCtx *ictx;
this->generate_test_data();
this->open_remote_image(&ictx);
for (int i = 0; i < TEST_IO_COUNT; ++i) {
this->write_test_data(ictx, this->m_test_data, TEST_IO_SIZE * i,
TEST_IO_SIZE);
}
this->flush(ictx);
this->close_image(ictx);
this->start();
this->wait_for_replay_complete();
this->stop();
this->open_local_image(&ictx);
for (int i = 0; i < TEST_IO_COUNT; ++i) {
this->read_test_data(ictx, this->m_test_data, TEST_IO_SIZE * i,
TEST_IO_SIZE);
}
this->close_image(ictx);
}
TYPED_TEST(TestImageReplayer, StartReplayAndWrite)
{
this->bootstrap();
// Start replay and write to remote image
librbd::ImageCtx *ictx;
this->start();
this->generate_test_data();
this->open_remote_image(&ictx);
for (int i = 0; i < TEST_IO_COUNT; ++i) {
this->write_test_data(ictx, this->m_test_data, TEST_IO_SIZE * i,
TEST_IO_SIZE);
}
this->flush(ictx);
this->wait_for_replay_complete();
for (int i = TEST_IO_COUNT; i < 2 * TEST_IO_COUNT; ++i) {
this->write_test_data(ictx, this->m_test_data, TEST_IO_SIZE * i,
TEST_IO_SIZE);
}
this->flush(ictx);
this->close_image(ictx);
this->wait_for_replay_complete();
this->open_local_image(&ictx);
for (int i = 0; i < 2 * TEST_IO_COUNT; ++i) {
this->read_test_data(ictx, this->m_test_data, TEST_IO_SIZE * i,
TEST_IO_SIZE);
}
this->close_image(ictx);
this->stop();
}
TEST_F(TestImageReplayerJournal, NextTag)
{
this->bootstrap();
// write, reopen, and write again to test switch to the next tag
librbd::ImageCtx *ictx;
this->start();
this->generate_test_data();
const int N = 10;
for (int j = 0; j < N; j++) {
this->open_remote_image(&ictx);
for (int i = j * TEST_IO_COUNT; i < (j + 1) * TEST_IO_COUNT; ++i) {
this->write_test_data(ictx, this->m_test_data, TEST_IO_SIZE * i,
TEST_IO_SIZE);
}
this->close_image(ictx);
}
this->wait_for_replay_complete();
this->open_local_image(&ictx);
for (int i = 0; i < N * TEST_IO_COUNT; ++i) {
this->read_test_data(ictx, this->m_test_data, TEST_IO_SIZE * i,
TEST_IO_SIZE);
}
this->close_image(ictx);
this->stop();
}
TYPED_TEST(TestImageReplayer, Resync)
{
this->bootstrap();
librbd::ImageCtx *ictx;
this->start();
this->generate_test_data();
this->open_remote_image(&ictx);
for (int i = 0; i < TEST_IO_COUNT; ++i) {
this->write_test_data(ictx, this->m_test_data, TEST_IO_SIZE * i,
TEST_IO_SIZE);
}
this->flush(ictx);
this->wait_for_replay_complete();
for (int i = TEST_IO_COUNT; i < 2 * TEST_IO_COUNT; ++i) {
this->write_test_data(ictx, this->m_test_data, TEST_IO_SIZE * i,
TEST_IO_SIZE);
}
this->flush(ictx);
this->close_image(ictx);
this->open_local_image(&ictx);
EXPECT_EQ(0, librbd::api::Mirror<>::image_resync(ictx));
this->close_image(ictx);
this->wait_for_stopped();
C_SaferCond cond;
this->m_replayer->start(&cond);
ASSERT_EQ(0, cond.wait());
ASSERT_TRUE(this->m_replayer->is_replaying());
this->wait_for_replay_complete();
this->open_local_image(&ictx);
for (int i = 0; i < 2 * TEST_IO_COUNT; ++i) {
this->read_test_data(ictx, this->m_test_data, TEST_IO_SIZE * i,
TEST_IO_SIZE);
}
this->close_image(ictx);
this->stop();
}
TYPED_TEST(TestImageReplayer, Resync_While_Stop)
{
this->bootstrap();
this->start();
this->generate_test_data();
librbd::ImageCtx *ictx;
this->open_remote_image(&ictx);
for (int i = 0; i < TEST_IO_COUNT; ++i) {
this->write_test_data(ictx, this->m_test_data, TEST_IO_SIZE * i,
TEST_IO_SIZE);
}
this->flush(ictx);
this->wait_for_replay_complete();
for (int i = TEST_IO_COUNT; i < 2 * TEST_IO_COUNT; ++i) {
this->write_test_data(ictx, this->m_test_data, TEST_IO_SIZE * i,
TEST_IO_SIZE);
}
this->flush(ictx);
this->close_image(ictx);
this->wait_for_replay_complete();
C_SaferCond cond;
this->m_replayer->stop(&cond);
ASSERT_EQ(0, cond.wait());
this->open_local_image(&ictx);
EXPECT_EQ(0, librbd::api::Mirror<>::image_resync(ictx));
this->close_image(ictx);
C_SaferCond cond2;
this->m_replayer->start(&cond2);
ASSERT_EQ(0, cond2.wait());
ASSERT_TRUE(this->m_replayer->is_stopped());
C_SaferCond cond3;
this->m_replayer->start(&cond3);
ASSERT_EQ(0, cond3.wait());
ASSERT_TRUE(this->m_replayer->is_replaying());
this->wait_for_replay_complete();
this->open_local_image(&ictx);
for (int i = 0; i < 2 * TEST_IO_COUNT; ++i) {
this->read_test_data(ictx, this->m_test_data, TEST_IO_SIZE * i,
TEST_IO_SIZE);
}
this->close_image(ictx);
this->stop();
}
TYPED_TEST(TestImageReplayer, Resync_StartInterrupted)
{
this->bootstrap();
librbd::ImageCtx *ictx;
this->open_local_image(&ictx);
EXPECT_EQ(0, librbd::api::Mirror<>::image_resync(ictx));
this->close_image(ictx);
C_SaferCond cond;
this->m_replayer->start(&cond);
ASSERT_EQ(0, cond.wait());
ASSERT_TRUE(this->m_replayer->is_stopped());
C_SaferCond cond2;
this->m_replayer->start(&cond2);
ASSERT_EQ(0, cond2.wait());
this->create_watch_ctx();
ASSERT_TRUE(this->m_replayer->is_replaying());
this->generate_test_data();
this->open_remote_image(&ictx);
for (int i = 0; i < TEST_IO_COUNT; ++i) {
this->write_test_data(ictx, this->m_test_data, TEST_IO_SIZE * i,
TEST_IO_SIZE);
}
this->flush(ictx);
this->wait_for_replay_complete();
for (int i = TEST_IO_COUNT; i < 2 * TEST_IO_COUNT; ++i) {
this->write_test_data(ictx, this->m_test_data, TEST_IO_SIZE * i,
TEST_IO_SIZE);
}
this->flush(ictx);
this->close_image(ictx);
this->wait_for_replay_complete();
this->open_local_image(&ictx);
for (int i = 0; i < 2 * TEST_IO_COUNT; ++i) {
this->read_test_data(ictx, this->m_test_data, TEST_IO_SIZE * i,
TEST_IO_SIZE);
}
this->close_image(ictx);
this->stop();
}
TEST_F(TestImageReplayerJournal, MultipleReplayFailures_SingleEpoch) {
this->bootstrap();
// inject a snapshot that cannot be unprotected
librbd::ImageCtx *ictx;
this->open_image(this->m_local_ioctx, this->m_image_name, false, &ictx);
ictx->features &= ~RBD_FEATURE_JOURNALING;
librbd::NoOpProgressContext prog_ctx;
ASSERT_EQ(0, ictx->operations->snap_create(cls::rbd::UserSnapshotNamespace(),
"foo", 0, prog_ctx));
ASSERT_EQ(0, ictx->operations->snap_protect(cls::rbd::UserSnapshotNamespace(),
"foo"));
ASSERT_EQ(0, librbd::cls_client::add_child(&ictx->md_ctx, RBD_CHILDREN,
{ictx->md_ctx.get_id(), "",
ictx->id,
ictx->snap_ids[{cls::rbd::UserSnapshotNamespace(), "foo"}]},
"dummy child id"));
this->close_image(ictx);
// race failed op shut down with new ops
this->open_remote_image(&ictx);
for (uint64_t i = 0; i < 10; ++i) {
std::shared_lock owner_locker{ictx->owner_lock};
C_SaferCond request_lock;
ictx->exclusive_lock->acquire_lock(&request_lock);
ASSERT_EQ(0, request_lock.wait());
C_SaferCond append_ctx;
ictx->journal->append_op_event(
i,
librbd::journal::EventEntry{
librbd::journal::SnapUnprotectEvent{i,
cls::rbd::UserSnapshotNamespace(),
"foo"}},
&append_ctx);
ASSERT_EQ(0, append_ctx.wait());
C_SaferCond commit_ctx;
ictx->journal->commit_op_event(i, 0, &commit_ctx);
ASSERT_EQ(0, commit_ctx.wait());
C_SaferCond release_ctx;
ictx->exclusive_lock->release_lock(&release_ctx);
ASSERT_EQ(0, release_ctx.wait());
}
for (uint64_t i = 0; i < 5; ++i) {
this->start();
this->wait_for_stopped();
this->unwatch();
}
this->close_image(ictx);
}
TEST_F(TestImageReplayerJournal, MultipleReplayFailures_MultiEpoch) {
this->bootstrap();
// inject a snapshot that cannot be unprotected
librbd::ImageCtx *ictx;
this->open_image(this->m_local_ioctx, this->m_image_name, false, &ictx);
ictx->features &= ~RBD_FEATURE_JOURNALING;
librbd::NoOpProgressContext prog_ctx;
ASSERT_EQ(0, ictx->operations->snap_create(cls::rbd::UserSnapshotNamespace(),
"foo", 0, prog_ctx));
ASSERT_EQ(0, ictx->operations->snap_protect(cls::rbd::UserSnapshotNamespace(),
"foo"));
ASSERT_EQ(0, librbd::cls_client::add_child(&ictx->md_ctx, RBD_CHILDREN,
{ictx->md_ctx.get_id(), "",
ictx->id,
ictx->snap_ids[{cls::rbd::UserSnapshotNamespace(),
"foo"}]},
"dummy child id"));
this->close_image(ictx);
// race failed op shut down with new tag flush
this->open_remote_image(&ictx);
{
std::shared_lock owner_locker{ictx->owner_lock};
C_SaferCond request_lock;
ictx->exclusive_lock->acquire_lock(&request_lock);
ASSERT_EQ(0, request_lock.wait());
C_SaferCond append_ctx;
ictx->journal->append_op_event(
1U,
librbd::journal::EventEntry{
librbd::journal::SnapUnprotectEvent{1U,
cls::rbd::UserSnapshotNamespace(),
"foo"}},
&append_ctx);
ASSERT_EQ(0, append_ctx.wait());
C_SaferCond commit_ctx;
ictx->journal->commit_op_event(1U, 0, &commit_ctx);
ASSERT_EQ(0, commit_ctx.wait());
C_SaferCond release_ctx;
ictx->exclusive_lock->release_lock(&release_ctx);
ASSERT_EQ(0, release_ctx.wait());
}
this->generate_test_data();
this->write_test_data(ictx, this->m_test_data, 0, TEST_IO_SIZE);
for (uint64_t i = 0; i < 5; ++i) {
this->start();
this->wait_for_stopped();
this->unwatch();
}
this->close_image(ictx);
}
TEST_F(TestImageReplayerJournal, Disconnect)
{
this->bootstrap();
// Make sure rbd_mirroring_resync_after_disconnect is not set
EXPECT_EQ(0, this->m_local_cluster->conf_set("rbd_mirroring_resync_after_disconnect", "false"));
// Test start fails if disconnected
librbd::ImageCtx *ictx;
this->generate_test_data();
this->open_remote_image(&ictx);
for (int i = 0; i < TEST_IO_COUNT; ++i) {
this->write_test_data(ictx, this->m_test_data, TEST_IO_SIZE * i,
TEST_IO_SIZE);
}
this->flush(ictx);
this->close_image(ictx);
std::string oid = ::journal::Journaler::header_oid(this->m_remote_image_id);
ASSERT_EQ(0,
cls::journal::client::client_update_state(this->m_remote_ioctx,
oid, this->m_local_mirror_uuid,
cls::journal::CLIENT_STATE_DISCONNECTED));
C_SaferCond cond1;
this->m_replayer->start(&cond1);
ASSERT_EQ(-ENOTCONN, cond1.wait());
// Test start succeeds after resync
this->open_local_image(&ictx);
librbd::Journal<>::request_resync(ictx);
this->close_image(ictx);
C_SaferCond cond2;
this->m_replayer->start(&cond2);
ASSERT_EQ(0, cond2.wait());
this->start();
this->wait_for_replay_complete();
// Test replay stopped after disconnect
this->open_remote_image(&ictx);
for (int i = TEST_IO_COUNT; i < 2 * TEST_IO_COUNT; ++i) {
this->write_test_data(ictx, this->m_test_data, TEST_IO_SIZE * i,
TEST_IO_SIZE);
}
this->flush(ictx);
this->close_image(ictx);
ASSERT_EQ(0,
cls::journal::client::client_update_state(this->m_remote_ioctx, oid,
this->m_local_mirror_uuid,
cls::journal::CLIENT_STATE_DISCONNECTED));
bufferlist bl;
ASSERT_EQ(0, this->m_remote_ioctx.notify2(oid, bl, 5000, NULL));
this->wait_for_stopped();
// Test start fails after disconnect
C_SaferCond cond3;
this->m_replayer->start(&cond3);
ASSERT_EQ(-ENOTCONN, cond3.wait());
C_SaferCond cond4;
this->m_replayer->start(&cond4);
ASSERT_EQ(-ENOTCONN, cond4.wait());
// Test automatic resync if rbd_mirroring_resync_after_disconnect is set
EXPECT_EQ(0, this->m_local_cluster->conf_set("rbd_mirroring_resync_after_disconnect", "true"));
// Resync is flagged on first start attempt
C_SaferCond cond5;
this->m_replayer->start(&cond5);
ASSERT_EQ(-ENOTCONN, cond5.wait());
C_SaferCond cond6;
this->m_replayer->start(&cond6);
ASSERT_EQ(0, cond6.wait());
this->wait_for_replay_complete();
this->stop();
}
TEST_F(TestImageReplayerJournal, UpdateFeatures)
{
// TODO add support to snapshot-based mirroring
const uint64_t FEATURES_TO_UPDATE =
RBD_FEATURE_OBJECT_MAP | RBD_FEATURE_FAST_DIFF;
uint64_t features;
librbd::ImageCtx *ictx;
// Make sure the features we will update are disabled initially
this->open_remote_image(&ictx);
ASSERT_EQ(0, librbd::get_features(ictx, &features));
features &= FEATURES_TO_UPDATE;
if (features) {
ASSERT_EQ(0, ictx->operations->update_features(FEATURES_TO_UPDATE,
false));
}
ASSERT_EQ(0, librbd::get_features(ictx, &features));
ASSERT_EQ(0U, features & FEATURES_TO_UPDATE);
this->close_image(ictx);
this->bootstrap();
this->open_remote_image(&ictx);
ASSERT_EQ(0, librbd::get_features(ictx, &features));
ASSERT_EQ(0U, features & FEATURES_TO_UPDATE);
this->close_image(ictx);
this->open_local_image(&ictx);
ASSERT_EQ(0, librbd::get_features(ictx, &features));
ASSERT_EQ(0U, features & FEATURES_TO_UPDATE);
this->close_image(ictx);
// Start replay and update features
this->start();
this->open_remote_image(&ictx);
ASSERT_EQ(0, ictx->operations->update_features(FEATURES_TO_UPDATE,
true));
ASSERT_EQ(0, librbd::get_features(ictx, &features));
ASSERT_EQ(FEATURES_TO_UPDATE, features & FEATURES_TO_UPDATE);
this->close_image(ictx);
this->wait_for_replay_complete();
this->open_local_image(&ictx);
ASSERT_EQ(0, librbd::get_features(ictx, &features));
ASSERT_EQ(FEATURES_TO_UPDATE, features & FEATURES_TO_UPDATE);
this->close_image(ictx);
this->open_remote_image(&ictx);
ASSERT_EQ(0, ictx->operations->update_features(FEATURES_TO_UPDATE,
false));
ASSERT_EQ(0, librbd::get_features(ictx, &features));
ASSERT_EQ(0U, features & FEATURES_TO_UPDATE);
this->close_image(ictx);
this->wait_for_replay_complete();
this->open_local_image(&ictx);
ASSERT_EQ(0, librbd::get_features(ictx, &features));
ASSERT_EQ(0U, features & FEATURES_TO_UPDATE);
this->close_image(ictx);
// Test update_features error does not stop replication
this->open_remote_image(&ictx);
ASSERT_EQ(0, librbd::get_features(ictx, &features));
ASSERT_NE(0U, features & RBD_FEATURE_EXCLUSIVE_LOCK);
ASSERT_EQ(-EINVAL, ictx->operations->update_features(RBD_FEATURE_EXCLUSIVE_LOCK,
false));
this->generate_test_data();
for (int i = 0; i < TEST_IO_COUNT; ++i) {
this->write_test_data(ictx, this->m_test_data, TEST_IO_SIZE * i,
TEST_IO_SIZE);
}
this->flush(ictx);
this->close_image(ictx);
this->wait_for_replay_complete();
this->open_local_image(&ictx);
for (int i = 0; i < TEST_IO_COUNT; ++i) {
this->read_test_data(ictx, this->m_test_data, TEST_IO_SIZE * i,
TEST_IO_SIZE);
}
this->close_image(ictx);
this->stop();
}
TEST_F(TestImageReplayerJournal, MetadataSetRemove)
{
// TODO add support to snapshot-based mirroring
const std::string KEY = "test_key";
const std::string VALUE = "test_value";
librbd::ImageCtx *ictx;
std::string value;
this->bootstrap();
this->start();
// Test metadata_set replication
this->open_remote_image(&ictx);
ASSERT_EQ(0, ictx->operations->metadata_set(KEY, VALUE));
value.clear();
ASSERT_EQ(0, librbd::metadata_get(ictx, KEY, &value));
ASSERT_EQ(VALUE, value);
this->close_image(ictx);
this->wait_for_replay_complete();
this->open_local_image(&ictx);
value.clear();
ASSERT_EQ(0, librbd::metadata_get(ictx, KEY, &value));
ASSERT_EQ(VALUE, value);
this->close_image(ictx);
// Test metadata_remove replication
this->open_remote_image(&ictx);
ASSERT_EQ(0, ictx->operations->metadata_remove(KEY));
ASSERT_EQ(-ENOENT, librbd::metadata_get(ictx, KEY, &value));
this->close_image(ictx);
this->wait_for_replay_complete();
this->open_local_image(&ictx);
ASSERT_EQ(-ENOENT, librbd::metadata_get(ictx, KEY, &value));
this->close_image(ictx);
this->stop();
}
TEST_F(TestImageReplayerJournal, MirroringDelay)
{
// TODO add support to snapshot-based mirroring
const double DELAY = 10; // set less than wait_for_replay_complete timeout
librbd::ImageCtx *ictx;
utime_t start_time;
double delay;
this->bootstrap();
ASSERT_EQ(0, this->m_local_cluster->conf_set("rbd_mirroring_replay_delay",
stringify(DELAY).c_str()));
this->open_local_image(&ictx);
ASSERT_EQ(DELAY, ictx->mirroring_replay_delay);
this->close_image(ictx);
this->start();
// Test delay
this->generate_test_data();
this->open_remote_image(&ictx);
start_time = ceph_clock_now();
for (int i = 0; i < TEST_IO_COUNT; ++i) {
this->write_test_data(ictx, this->m_test_data, TEST_IO_SIZE * i,
TEST_IO_SIZE);
}
this->flush(ictx);
this->close_image(ictx);
this->wait_for_replay_complete();
delay = ceph_clock_now() - start_time;
ASSERT_GE(delay, DELAY);
// Test stop when delaying replay
this->open_remote_image(&ictx);
start_time = ceph_clock_now();
for (int i = 0; i < TEST_IO_COUNT; ++i) {
this->write_test_data(ictx, this->m_test_data, TEST_IO_SIZE * i,
TEST_IO_SIZE);
}
this->close_image(ictx);
sleep(DELAY / 2);
this->stop();
this->start();
this->wait_for_replay_complete();
delay = ceph_clock_now() - start_time;
ASSERT_GE(delay, DELAY);
this->stop();
}
TYPED_TEST(TestImageReplayer, ImageRename) {
this->create_replayer();
this->start();
librbd::ImageCtx* remote_image_ctx = nullptr;
this->open_remote_image(&remote_image_ctx);
auto image_name = this->get_temp_image_name();
ASSERT_EQ(0, remote_image_ctx->operations->rename(image_name.c_str()));
this->flush(remote_image_ctx);
this->wait_for_replay_complete();
librbd::ImageCtx* local_image_ctx = nullptr;
this->open_image(this->m_local_ioctx, image_name, true, &local_image_ctx);
ASSERT_EQ(image_name, local_image_ctx->name);
this->close_image(local_image_ctx);
this->close_image(remote_image_ctx);
this->stop();
}
TYPED_TEST(TestImageReplayer, UpdateFeatures) {
const uint64_t FEATURES_TO_UPDATE =
RBD_FEATURE_OBJECT_MAP | RBD_FEATURE_FAST_DIFF | RBD_FEATURE_DEEP_FLATTEN;
REQUIRE((this->FEATURES & FEATURES_TO_UPDATE) == FEATURES_TO_UPDATE);
librbd::ImageCtx* remote_image_ctx = nullptr;
this->open_remote_image(&remote_image_ctx);
ASSERT_EQ(0, remote_image_ctx->operations->update_features(
(RBD_FEATURE_OBJECT_MAP | RBD_FEATURE_FAST_DIFF), false));
this->flush(remote_image_ctx);
this->create_replayer();
this->start();
this->wait_for_replay_complete();
librbd::ImageCtx* local_image_ctx = nullptr;
this->open_local_image(&local_image_ctx);
ASSERT_EQ(0U, local_image_ctx->features & (
RBD_FEATURE_OBJECT_MAP | RBD_FEATURE_FAST_DIFF));
// enable object-map/fast-diff
ASSERT_EQ(0, remote_image_ctx->operations->update_features(
(RBD_FEATURE_OBJECT_MAP | RBD_FEATURE_FAST_DIFF), true));
this->flush(remote_image_ctx);
this->wait_for_replay_complete();
ASSERT_EQ(0, local_image_ctx->state->refresh());
ASSERT_EQ(RBD_FEATURE_OBJECT_MAP | RBD_FEATURE_FAST_DIFF,
local_image_ctx->features & (
RBD_FEATURE_OBJECT_MAP | RBD_FEATURE_FAST_DIFF));
// disable deep-flatten
ASSERT_EQ(0, remote_image_ctx->operations->update_features(
RBD_FEATURE_DEEP_FLATTEN, false));
this->flush(remote_image_ctx);
this->wait_for_replay_complete();
ASSERT_EQ(0, local_image_ctx->state->refresh());
ASSERT_EQ(0, local_image_ctx->features & RBD_FEATURE_DEEP_FLATTEN);
this->close_image(local_image_ctx);
this->close_image(remote_image_ctx);
this->stop();
}
TYPED_TEST(TestImageReplayer, SnapshotUnprotect) {
librbd::ImageCtx* remote_image_ctx = nullptr;
this->open_remote_image(&remote_image_ctx);
// create a protected snapshot
librbd::NoOpProgressContext prog_ctx;
ASSERT_EQ(0, remote_image_ctx->operations->snap_create(
cls::rbd::UserSnapshotNamespace{}, "snap1", 0, prog_ctx));
ASSERT_EQ(0, remote_image_ctx->operations->snap_protect(
cls::rbd::UserSnapshotNamespace{}, "snap1"));
this->flush(remote_image_ctx);
this->create_replayer();
this->start();
this->wait_for_replay_complete();
librbd::ImageCtx* local_image_ctx = nullptr;
this->open_local_image(&local_image_ctx);
auto local_snap_id_it = local_image_ctx->snap_ids.find({
{cls::rbd::UserSnapshotNamespace{}}, "snap1"});
ASSERT_NE(local_image_ctx->snap_ids.end(), local_snap_id_it);
auto local_snap_id = local_snap_id_it->second;
auto local_snap_info_it = local_image_ctx->snap_info.find(local_snap_id);
ASSERT_NE(local_image_ctx->snap_info.end(), local_snap_info_it);
ASSERT_EQ(RBD_PROTECTION_STATUS_PROTECTED,
local_snap_info_it->second.protection_status);
// unprotect the snapshot
ASSERT_EQ(0, remote_image_ctx->operations->snap_unprotect(
cls::rbd::UserSnapshotNamespace{}, "snap1"));
this->flush(remote_image_ctx);
this->wait_for_replay_complete();
ASSERT_EQ(0, local_image_ctx->state->refresh());
local_snap_info_it = local_image_ctx->snap_info.find(local_snap_id);
ASSERT_NE(local_image_ctx->snap_info.end(), local_snap_info_it);
ASSERT_EQ(RBD_PROTECTION_STATUS_UNPROTECTED,
local_snap_info_it->second.protection_status);
this->close_image(local_image_ctx);
this->close_image(remote_image_ctx);
this->stop();
}
TYPED_TEST(TestImageReplayer, SnapshotProtect) {
librbd::ImageCtx* remote_image_ctx = nullptr;
this->open_remote_image(&remote_image_ctx);
// create an unprotected snapshot
librbd::NoOpProgressContext prog_ctx;
ASSERT_EQ(0, remote_image_ctx->operations->snap_create(
cls::rbd::UserSnapshotNamespace{}, "snap1", 0, prog_ctx));
this->flush(remote_image_ctx);
this->create_replayer();
this->start();
this->wait_for_replay_complete();
librbd::ImageCtx* local_image_ctx = nullptr;
this->open_local_image(&local_image_ctx);
auto local_snap_id_it = local_image_ctx->snap_ids.find({
{cls::rbd::UserSnapshotNamespace{}}, "snap1"});
ASSERT_NE(local_image_ctx->snap_ids.end(), local_snap_id_it);
auto local_snap_id = local_snap_id_it->second;
auto local_snap_info_it = local_image_ctx->snap_info.find(local_snap_id);
ASSERT_NE(local_image_ctx->snap_info.end(), local_snap_info_it);
ASSERT_EQ(RBD_PROTECTION_STATUS_UNPROTECTED,
local_snap_info_it->second.protection_status);
// protect the snapshot
ASSERT_EQ(0, remote_image_ctx->operations->snap_protect(
cls::rbd::UserSnapshotNamespace{}, "snap1"));
this->flush(remote_image_ctx);
this->wait_for_replay_complete();
ASSERT_EQ(0, local_image_ctx->state->refresh());
local_snap_info_it = local_image_ctx->snap_info.find(local_snap_id);
ASSERT_NE(local_image_ctx->snap_info.end(), local_snap_info_it);
ASSERT_EQ(RBD_PROTECTION_STATUS_PROTECTED,
local_snap_info_it->second.protection_status);
this->close_image(local_image_ctx);
this->close_image(remote_image_ctx);
this->stop();
}
TYPED_TEST(TestImageReplayer, SnapshotRemove) {
librbd::ImageCtx* remote_image_ctx = nullptr;
this->open_remote_image(&remote_image_ctx);
// create a user snapshot
librbd::NoOpProgressContext prog_ctx;
ASSERT_EQ(0, remote_image_ctx->operations->snap_create(
cls::rbd::UserSnapshotNamespace{}, "snap1", 0, prog_ctx));
this->flush(remote_image_ctx);
this->create_replayer();
this->start();
this->wait_for_replay_complete();
librbd::ImageCtx* local_image_ctx = nullptr;
this->open_local_image(&local_image_ctx);
auto local_snap_id_it = local_image_ctx->snap_ids.find({
{cls::rbd::UserSnapshotNamespace{}}, "snap1"});
ASSERT_NE(local_image_ctx->snap_ids.end(), local_snap_id_it);
// remove the snapshot
ASSERT_EQ(0, remote_image_ctx->operations->snap_remove(
cls::rbd::UserSnapshotNamespace{}, "snap1"));
this->flush(remote_image_ctx);
this->wait_for_replay_complete();
ASSERT_EQ(0, local_image_ctx->state->refresh());
local_snap_id_it = local_image_ctx->snap_ids.find({
{cls::rbd::UserSnapshotNamespace{}}, "snap1"});
ASSERT_EQ(local_image_ctx->snap_ids.end(), local_snap_id_it);
this->close_image(local_image_ctx);
this->close_image(remote_image_ctx);
this->stop();
}
TYPED_TEST(TestImageReplayer, SnapshotRename) {
librbd::ImageCtx* remote_image_ctx = nullptr;
this->open_remote_image(&remote_image_ctx);
// create a user snapshot
librbd::NoOpProgressContext prog_ctx;
ASSERT_EQ(0, remote_image_ctx->operations->snap_create(
cls::rbd::UserSnapshotNamespace{}, "snap1", 0, prog_ctx));
this->flush(remote_image_ctx);
this->create_replayer();
this->start();
this->wait_for_replay_complete();
librbd::ImageCtx* local_image_ctx = nullptr;
this->open_local_image(&local_image_ctx);
auto local_snap_id_it = local_image_ctx->snap_ids.find({
{cls::rbd::UserSnapshotNamespace{}}, "snap1"});
ASSERT_NE(local_image_ctx->snap_ids.end(), local_snap_id_it);
auto local_snap_id = local_snap_id_it->second;
auto local_snap_info_it = local_image_ctx->snap_info.find(local_snap_id);
ASSERT_NE(local_image_ctx->snap_info.end(), local_snap_info_it);
ASSERT_EQ(RBD_PROTECTION_STATUS_UNPROTECTED,
local_snap_info_it->second.protection_status);
// rename the snapshot
ASSERT_EQ(0, remote_image_ctx->operations->snap_rename(
"snap1", "snap1-renamed"));
this->flush(remote_image_ctx);
this->wait_for_replay_complete();
ASSERT_EQ(0, local_image_ctx->state->refresh());
local_snap_info_it = local_image_ctx->snap_info.find(local_snap_id);
ASSERT_NE(local_image_ctx->snap_info.end(), local_snap_info_it);
ASSERT_EQ("snap1-renamed", local_snap_info_it->second.name);
this->close_image(local_image_ctx);
this->close_image(remote_image_ctx);
this->stop();
}
TYPED_TEST(TestImageReplayer, SnapshotLimit) {
librbd::ImageCtx* remote_image_ctx = nullptr;
this->open_remote_image(&remote_image_ctx);
this->create_replayer();
this->start();
this->wait_for_replay_complete();
// update the snap limit
ASSERT_EQ(0, librbd::api::Snapshot<>::set_limit(remote_image_ctx, 123U));
this->flush(remote_image_ctx);
this->wait_for_replay_complete();
librbd::ImageCtx* local_image_ctx = nullptr;
this->open_local_image(&local_image_ctx);
uint64_t local_snap_limit;
ASSERT_EQ(0, librbd::api::Snapshot<>::get_limit(local_image_ctx,
&local_snap_limit));
ASSERT_EQ(123U, local_snap_limit);
// update the limit again
ASSERT_EQ(0, librbd::api::Snapshot<>::set_limit(
remote_image_ctx, std::numeric_limits<uint64_t>::max()));
this->flush(remote_image_ctx);
this->wait_for_replay_complete();
ASSERT_EQ(0, librbd::api::Snapshot<>::get_limit(local_image_ctx,
&local_snap_limit));
ASSERT_EQ(std::numeric_limits<uint64_t>::max(), local_snap_limit);
this->close_image(local_image_ctx);
this->close_image(remote_image_ctx);
this->stop();
}
} // namespace mirror
} // namespace rbd
| 52,219 | 30.363363 | 98 |
cc
|
null |
ceph-main/src/test/rbd_mirror/test_ImageSync.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "test/rbd_mirror/test_fixture.h"
#include "include/stringify.h"
#include "include/rbd/librbd.hpp"
#include "common/Cond.h"
#include "journal/Journaler.h"
#include "journal/Settings.h"
#include "librbd/ExclusiveLock.h"
#include "librbd/ImageCtx.h"
#include "librbd/ImageState.h"
#include "librbd/internal.h"
#include "librbd/Journal.h"
#include "librbd/Operations.h"
#include "librbd/api/Io.h"
#include "librbd/io/AioCompletion.h"
#include "librbd/io/ImageDispatchSpec.h"
#include "librbd/io/ReadResult.h"
#include "librbd/journal/Types.h"
#include "tools/rbd_mirror/ImageSync.h"
#include "tools/rbd_mirror/InstanceWatcher.h"
#include "tools/rbd_mirror/Threads.h"
#include "tools/rbd_mirror/Throttler.h"
#include "tools/rbd_mirror/image_replayer/journal/StateBuilder.h"
void register_test_image_sync() {
}
namespace rbd {
namespace mirror {
namespace {
int flush(librbd::ImageCtx *image_ctx) {
C_SaferCond ctx;
auto aio_comp = librbd::io::AioCompletion::create_and_start(
&ctx, image_ctx, librbd::io::AIO_TYPE_FLUSH);
auto req = librbd::io::ImageDispatchSpec::create_flush(
*image_ctx, librbd::io::IMAGE_DISPATCH_LAYER_INTERNAL_START, aio_comp,
librbd::io::FLUSH_SOURCE_INTERNAL, {});
req->send();
return ctx.wait();
}
void scribble(librbd::ImageCtx *image_ctx, int num_ops, uint64_t max_size)
{
max_size = std::min<uint64_t>(image_ctx->size, max_size);
for (int i=0; i<num_ops; i++) {
uint64_t off = rand() % (image_ctx->size - max_size + 1);
uint64_t len = 1 + rand() % max_size;
if (rand() % 4 == 0) {
ASSERT_EQ((int)len,
librbd::api::Io<>::discard(
*image_ctx, off, len, image_ctx->discard_granularity_bytes));
} else {
bufferlist bl;
bl.append(std::string(len, '1'));
ASSERT_EQ((int)len, librbd::api::Io<>::write(
*image_ctx, off, len, std::move(bl), 0));
}
}
std::shared_lock owner_locker{image_ctx->owner_lock};
ASSERT_EQ(0, flush(image_ctx));
}
} // anonymous namespace
class TestImageSync : public TestFixture {
public:
void SetUp() override {
TestFixture::SetUp();
create_and_open(m_local_io_ctx, &m_local_image_ctx);
create_and_open(m_remote_io_ctx, &m_remote_image_ctx);
auto cct = reinterpret_cast<CephContext*>(m_local_io_ctx.cct());
m_image_sync_throttler = rbd::mirror::Throttler<>::create(
cct, "rbd_mirror_concurrent_image_syncs");
m_instance_watcher = rbd::mirror::InstanceWatcher<>::create(
m_local_io_ctx, *m_threads->asio_engine, nullptr, m_image_sync_throttler);
m_instance_watcher->handle_acquire_leader();
ContextWQ* context_wq;
librbd::Journal<>::get_work_queue(cct, &context_wq);
m_remote_journaler = new ::journal::Journaler(
context_wq, m_threads->timer, &m_threads->timer_lock,
m_remote_io_ctx, m_remote_image_ctx->id, "mirror-uuid", {}, nullptr);
m_client_meta = {"image-id"};
librbd::journal::ClientData client_data(m_client_meta);
bufferlist client_data_bl;
encode(client_data, client_data_bl);
ASSERT_EQ(0, m_remote_journaler->register_client(client_data_bl));
m_state_builder = rbd::mirror::image_replayer::journal::StateBuilder<
librbd::ImageCtx>::create("global image id");
m_state_builder->remote_journaler = m_remote_journaler;
m_state_builder->remote_client_meta = m_client_meta;
m_sync_point_handler = m_state_builder->create_sync_point_handler();
}
void TearDown() override {
m_instance_watcher->handle_release_leader();
m_state_builder->remote_journaler = nullptr;
m_state_builder->destroy_sync_point_handler();
m_state_builder->destroy();
delete m_remote_journaler;
delete m_instance_watcher;
delete m_image_sync_throttler;
TestFixture::TearDown();
}
void create_and_open(librados::IoCtx &io_ctx, librbd::ImageCtx **image_ctx) {
librbd::RBD rbd;
ASSERT_EQ(0, create_image(rbd, io_ctx, m_image_name, m_image_size));
ASSERT_EQ(0, open_image(io_ctx, m_image_name, image_ctx));
C_SaferCond ctx;
{
std::shared_lock owner_locker{(*image_ctx)->owner_lock};
(*image_ctx)->exclusive_lock->try_acquire_lock(&ctx);
}
ASSERT_EQ(0, ctx.wait());
ASSERT_TRUE((*image_ctx)->exclusive_lock->is_lock_owner());
}
ImageSync<> *create_request(Context *ctx) {
return new ImageSync<>(m_threads, m_local_image_ctx, m_remote_image_ctx,
"mirror-uuid", m_sync_point_handler,
m_instance_watcher, nullptr, ctx);
}
librbd::ImageCtx *m_remote_image_ctx;
librbd::ImageCtx *m_local_image_ctx;
rbd::mirror::Throttler<> *m_image_sync_throttler;
rbd::mirror::InstanceWatcher<> *m_instance_watcher;
::journal::Journaler *m_remote_journaler;
librbd::journal::MirrorPeerClientMeta m_client_meta;
rbd::mirror::image_replayer::journal::StateBuilder<librbd::ImageCtx>* m_state_builder = nullptr;
rbd::mirror::image_sync::SyncPointHandler* m_sync_point_handler = nullptr;
};
TEST_F(TestImageSync, Empty) {
C_SaferCond ctx;
ImageSync<> *request = create_request(&ctx);
request->send();
ASSERT_EQ(0, ctx.wait());
ASSERT_EQ(0U, m_client_meta.sync_points.size());
ASSERT_EQ(0, m_remote_image_ctx->state->refresh());
ASSERT_EQ(0U, m_remote_image_ctx->snap_ids.size());
ASSERT_EQ(0, m_local_image_ctx->state->refresh());
ASSERT_EQ(1U, m_local_image_ctx->snap_ids.size()); // deleted on journal replay
}
TEST_F(TestImageSync, Simple) {
scribble(m_remote_image_ctx, 10, 102400);
C_SaferCond ctx;
ImageSync<> *request = create_request(&ctx);
request->send();
ASSERT_EQ(0, ctx.wait());
int64_t object_size = std::min<int64_t>(
m_remote_image_ctx->size, 1 << m_remote_image_ctx->order);
bufferlist read_remote_bl;
read_remote_bl.append(std::string(object_size, '1'));
bufferlist read_local_bl;
read_local_bl.append(std::string(object_size, '1'));
for (uint64_t offset = 0; offset < m_remote_image_ctx->size;
offset += object_size) {
ASSERT_LE(0, librbd::api::Io<>::read(
*m_remote_image_ctx, offset, object_size,
librbd::io::ReadResult{&read_remote_bl}, 0));
ASSERT_LE(0, librbd::api::Io<>::read(
*m_local_image_ctx, offset, object_size,
librbd::io::ReadResult{&read_local_bl}, 0));
ASSERT_TRUE(read_remote_bl.contents_equal(read_local_bl));
}
}
TEST_F(TestImageSync, Resize) {
int64_t object_size = std::min<int64_t>(
m_remote_image_ctx->size, 1 << m_remote_image_ctx->order);
uint64_t off = 0;
uint64_t len = object_size / 10;
bufferlist bl;
bl.append(std::string(len, '1'));
ASSERT_EQ((int)len, librbd::api::Io<>::write(
*m_remote_image_ctx, off, len, std::move(bl), 0));
{
std::shared_lock owner_locker{m_remote_image_ctx->owner_lock};
ASSERT_EQ(0, flush(m_remote_image_ctx));
}
ASSERT_EQ(0, create_snap(m_remote_image_ctx, "snap", nullptr));
uint64_t size = object_size - 1;
librbd::NoOpProgressContext no_op_progress_ctx;
ASSERT_EQ(0, m_remote_image_ctx->operations->resize(size, true,
no_op_progress_ctx));
C_SaferCond ctx;
ImageSync<> *request = create_request(&ctx);
request->send();
ASSERT_EQ(0, ctx.wait());
bufferlist read_remote_bl;
read_remote_bl.append(std::string(len, '\0'));
bufferlist read_local_bl;
read_local_bl.append(std::string(len, '\0'));
ASSERT_LE(0, librbd::api::Io<>::read(
*m_remote_image_ctx, off, len,
librbd::io::ReadResult{&read_remote_bl}, 0));
ASSERT_LE(0, librbd::api::Io<>::read(
*m_local_image_ctx, off, len,
librbd::io::ReadResult{&read_local_bl}, 0));
ASSERT_TRUE(read_remote_bl.contents_equal(read_local_bl));
}
TEST_F(TestImageSync, Discard) {
int64_t object_size = std::min<int64_t>(
m_remote_image_ctx->size, 1 << m_remote_image_ctx->order);
uint64_t off = 0;
uint64_t len = object_size / 10;
bufferlist bl;
bl.append(std::string(len, '1'));
ASSERT_EQ((int)len, librbd::api::Io<>::write(
*m_remote_image_ctx, off, len, std::move(bl), 0));
{
std::shared_lock owner_locker{m_remote_image_ctx->owner_lock};
ASSERT_EQ(0, flush(m_remote_image_ctx));
}
ASSERT_EQ(0, create_snap(m_remote_image_ctx, "snap", nullptr));
ASSERT_EQ((int)len - 2,
librbd::api::Io<>::discard(
*m_remote_image_ctx, off + 1, len - 2,
m_remote_image_ctx->discard_granularity_bytes));
{
std::shared_lock owner_locker{m_remote_image_ctx->owner_lock};
ASSERT_EQ(0, flush(m_remote_image_ctx));
}
C_SaferCond ctx;
ImageSync<> *request = create_request(&ctx);
request->send();
ASSERT_EQ(0, ctx.wait());
bufferlist read_remote_bl;
read_remote_bl.append(std::string(object_size, '\0'));
bufferlist read_local_bl;
read_local_bl.append(std::string(object_size, '\0'));
ASSERT_LE(0, librbd::api::Io<>::read(
*m_remote_image_ctx, off, len,
librbd::io::ReadResult{&read_remote_bl}, 0));
ASSERT_LE(0, librbd::api::Io<>::read(
*m_local_image_ctx, off, len,
librbd::io::ReadResult{&read_local_bl}, 0));
ASSERT_TRUE(read_remote_bl.contents_equal(read_local_bl));
}
TEST_F(TestImageSync, SnapshotStress) {
std::list<std::string> snap_names;
const int num_snaps = 4;
for (int idx = 0; idx <= num_snaps; ++idx) {
scribble(m_remote_image_ctx, 10, 102400);
librbd::NoOpProgressContext no_op_progress_ctx;
uint64_t size = 1 + rand() % m_image_size;
ASSERT_EQ(0, m_remote_image_ctx->operations->resize(size, true,
no_op_progress_ctx));
ASSERT_EQ(0, m_remote_image_ctx->state->refresh());
if (idx < num_snaps) {
snap_names.push_back("snap" + stringify(idx + 1));
ASSERT_EQ(0, create_snap(m_remote_image_ctx, snap_names.back().c_str(),
nullptr));
} else {
snap_names.push_back("");
}
}
C_SaferCond ctx;
ImageSync<> *request = create_request(&ctx);
request->send();
ASSERT_EQ(0, ctx.wait());
int64_t object_size = std::min<int64_t>(
m_remote_image_ctx->size, 1 << m_remote_image_ctx->order);
bufferlist read_remote_bl;
read_remote_bl.append(std::string(object_size, '1'));
bufferlist read_local_bl;
read_local_bl.append(std::string(object_size, '1'));
for (auto &snap_name : snap_names) {
uint64_t remote_snap_id;
{
std::shared_lock remote_image_locker{m_remote_image_ctx->image_lock};
remote_snap_id = m_remote_image_ctx->get_snap_id(
cls::rbd::UserSnapshotNamespace{}, snap_name);
}
uint64_t remote_size;
{
C_SaferCond ctx;
m_remote_image_ctx->state->snap_set(remote_snap_id, &ctx);
ASSERT_EQ(0, ctx.wait());
std::shared_lock remote_image_locker{m_remote_image_ctx->image_lock};
remote_size = m_remote_image_ctx->get_image_size(
m_remote_image_ctx->snap_id);
}
uint64_t local_snap_id;
{
std::shared_lock image_locker{m_local_image_ctx->image_lock};
local_snap_id = m_local_image_ctx->get_snap_id(
cls::rbd::UserSnapshotNamespace{}, snap_name);
}
uint64_t local_size;
{
C_SaferCond ctx;
m_local_image_ctx->state->snap_set(local_snap_id, &ctx);
ASSERT_EQ(0, ctx.wait());
std::shared_lock image_locker{m_local_image_ctx->image_lock};
local_size = m_local_image_ctx->get_image_size(
m_local_image_ctx->snap_id);
bool flags_set;
ASSERT_EQ(0, m_local_image_ctx->test_flags(m_local_image_ctx->snap_id,
RBD_FLAG_OBJECT_MAP_INVALID,
m_local_image_ctx->image_lock,
&flags_set));
ASSERT_FALSE(flags_set);
}
ASSERT_EQ(remote_size, local_size);
for (uint64_t offset = 0; offset < remote_size; offset += object_size) {
ASSERT_LE(0, librbd::api::Io<>::read(
*m_remote_image_ctx, offset, object_size,
librbd::io::ReadResult{&read_remote_bl}, 0));
ASSERT_LE(0, librbd::api::Io<>::read(
*m_local_image_ctx, offset, object_size,
librbd::io::ReadResult{&read_local_bl}, 0));
ASSERT_TRUE(read_remote_bl.contents_equal(read_local_bl));
}
}
}
} // namespace mirror
} // namespace rbd
| 12,634 | 32.693333 | 98 |
cc
|
null |
ceph-main/src/test/rbd_mirror/test_InstanceWatcher.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "include/rados/librados.hpp"
#include "include/stringify.h"
#include "cls/rbd/cls_rbd_types.h"
#include "cls/rbd/cls_rbd_client.h"
#include "librbd/Utils.h"
#include "librbd/internal.h"
#include "test/rbd_mirror/test_fixture.h"
#include "tools/rbd_mirror/InstanceWatcher.h"
#include "tools/rbd_mirror/Threads.h"
#include "common/Cond.h"
#include "test/librados/test_cxx.h"
#include "gtest/gtest.h"
using rbd::mirror::InstanceWatcher;
void register_test_instance_watcher() {
}
class TestInstanceWatcher : public ::rbd::mirror::TestFixture {
public:
std::string m_instance_id;
std::string m_oid;
void SetUp() override {
TestFixture::SetUp();
m_local_io_ctx.remove(RBD_MIRROR_LEADER);
EXPECT_EQ(0, m_local_io_ctx.create(RBD_MIRROR_LEADER, true));
m_instance_id = stringify(m_local_io_ctx.get_instance_id());
m_oid = RBD_MIRROR_INSTANCE_PREFIX + m_instance_id;
}
void get_instances(std::vector<std::string> *instance_ids) {
instance_ids->clear();
C_SaferCond on_get;
InstanceWatcher<>::get_instances(m_local_io_ctx, instance_ids, &on_get);
EXPECT_EQ(0, on_get.wait());
}
};
TEST_F(TestInstanceWatcher, InitShutdown)
{
InstanceWatcher<> instance_watcher(m_local_io_ctx, *m_threads->asio_engine,
nullptr, nullptr, m_instance_id);
std::vector<std::string> instance_ids;
get_instances(&instance_ids);
ASSERT_EQ(0U, instance_ids.size());
uint64_t size;
ASSERT_EQ(-ENOENT, m_local_io_ctx.stat(m_oid, &size, nullptr));
// Init
ASSERT_EQ(0, instance_watcher.init());
get_instances(&instance_ids);
ASSERT_EQ(1U, instance_ids.size());
ASSERT_EQ(m_instance_id, instance_ids[0]);
ASSERT_EQ(0, m_local_io_ctx.stat(m_oid, &size, nullptr));
std::list<obj_watch_t> watchers;
ASSERT_EQ(0, m_local_io_ctx.list_watchers(m_oid, &watchers));
ASSERT_EQ(1U, watchers.size());
ASSERT_EQ(m_instance_id, stringify(watchers.begin()->watcher_id));
get_instances(&instance_ids);
ASSERT_EQ(1U, instance_ids.size());
// Shutdown
instance_watcher.shut_down();
ASSERT_EQ(-ENOENT, m_local_io_ctx.stat(m_oid, &size, nullptr));
get_instances(&instance_ids);
ASSERT_EQ(0U, instance_ids.size());
}
TEST_F(TestInstanceWatcher, Remove)
{
std::string instance_id = "instance_id";
std::string oid = RBD_MIRROR_INSTANCE_PREFIX + instance_id;
std::vector<std::string> instance_ids;
get_instances(&instance_ids);
ASSERT_EQ(0U, instance_ids.size());
uint64_t size;
ASSERT_EQ(-ENOENT, m_local_io_ctx.stat(oid, &size, nullptr));
librados::Rados cluster;
librados::IoCtx io_ctx;
ASSERT_EQ("", connect_cluster_pp(cluster));
ASSERT_EQ(0, cluster.ioctx_create(_local_pool_name.c_str(), io_ctx));
InstanceWatcher<> instance_watcher(m_local_io_ctx, *m_threads->asio_engine,
nullptr, nullptr, "instance_id");
// Init
ASSERT_EQ(0, instance_watcher.init());
get_instances(&instance_ids);
ASSERT_EQ(1U, instance_ids.size());
ASSERT_EQ(instance_id, instance_ids[0]);
ASSERT_EQ(0, m_local_io_ctx.stat(oid, &size, nullptr));
std::list<obj_watch_t> watchers;
ASSERT_EQ(0, m_local_io_ctx.list_watchers(oid, &watchers));
ASSERT_EQ(1U, watchers.size());
// Remove
C_SaferCond on_remove;
InstanceWatcher<>::remove_instance(m_local_io_ctx, *m_threads->asio_engine,
"instance_id", &on_remove);
ASSERT_EQ(0, on_remove.wait());
ASSERT_EQ(-ENOENT, m_local_io_ctx.stat(oid, &size, nullptr));
get_instances(&instance_ids);
ASSERT_EQ(0U, instance_ids.size());
// Shutdown
instance_watcher.shut_down();
ASSERT_EQ(-ENOENT, m_local_io_ctx.stat(m_oid, &size, nullptr));
get_instances(&instance_ids);
ASSERT_EQ(0U, instance_ids.size());
// Remove NOENT
C_SaferCond on_remove_noent;
InstanceWatcher<>::remove_instance(m_local_io_ctx, *m_threads->asio_engine,
instance_id, &on_remove_noent);
ASSERT_EQ(0, on_remove_noent.wait());
}
| 4,097 | 29.81203 | 77 |
cc
|
null |
ceph-main/src/test/rbd_mirror/test_Instances.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "include/rados/librados.hpp"
#include "cls/rbd/cls_rbd_client.h"
#include "test/rbd_mirror/test_fixture.h"
#include "tools/rbd_mirror/InstanceWatcher.h"
#include "tools/rbd_mirror/Instances.h"
#include "tools/rbd_mirror/Threads.h"
#include "common/Cond.h"
#include "test/librados/test.h"
#include "gtest/gtest.h"
#include <vector>
using rbd::mirror::InstanceWatcher;
using rbd::mirror::Instances;
void register_test_instances() {
}
class TestInstances : public ::rbd::mirror::TestFixture {
public:
struct Listener : public rbd::mirror::instances::Listener {
std::mutex lock;
struct Instance {
uint32_t count = 0;
std::set<std::string> ids;
C_SaferCond ctx;
};
Instance add;
Instance remove;
void handle(const InstanceIds& instance_ids, Instance* instance) {
std::unique_lock<std::mutex> locker(lock);
for (auto& instance_id : instance_ids) {
ceph_assert(instance->count > 0);
--instance->count;
instance->ids.insert(instance_id);
if (instance->count == 0) {
instance->ctx.complete(0);
}
}
}
void handle_added(const InstanceIds& instance_ids) override {
handle(instance_ids, &add);
}
void handle_removed(const InstanceIds& instance_ids) override {
handle(instance_ids, &remove);
}
};
virtual void SetUp() {
TestFixture::SetUp();
m_local_io_ctx.remove(RBD_MIRROR_LEADER);
EXPECT_EQ(0, m_local_io_ctx.create(RBD_MIRROR_LEADER, true));
m_instance_id = stringify(m_local_io_ctx.get_instance_id());
}
Listener m_listener;
std::string m_instance_id;
};
TEST_F(TestInstances, InitShutdown)
{
m_listener.add.count = 1;
Instances<> instances(m_threads, m_local_io_ctx, m_instance_id, m_listener);
std::string instance_id = "instance_id";
ASSERT_EQ(0, librbd::cls_client::mirror_instances_add(&m_local_io_ctx,
instance_id));
C_SaferCond on_init;
instances.init(&on_init);
ASSERT_EQ(0, on_init.wait());
ASSERT_LT(0U, m_listener.add.count);
instances.unblock_listener();
ASSERT_EQ(0, m_listener.add.ctx.wait());
ASSERT_EQ(std::set<std::string>({instance_id}), m_listener.add.ids);
C_SaferCond on_shut_down;
instances.shut_down(&on_shut_down);
ASSERT_EQ(0, on_shut_down.wait());
}
TEST_F(TestInstances, InitEnoent)
{
Instances<> instances(m_threads, m_local_io_ctx, m_instance_id, m_listener);
m_local_io_ctx.remove(RBD_MIRROR_LEADER);
C_SaferCond on_init;
instances.init(&on_init);
ASSERT_EQ(0, on_init.wait());
C_SaferCond on_shut_down;
instances.shut_down(&on_shut_down);
ASSERT_EQ(0, on_shut_down.wait());
}
TEST_F(TestInstances, NotifyRemove)
{
// speed testing up a little
EXPECT_EQ(0, _rados->conf_set("rbd_mirror_leader_heartbeat_interval", "1"));
EXPECT_EQ(0, _rados->conf_set("rbd_mirror_leader_max_missed_heartbeats",
"2"));
EXPECT_EQ(0, _rados->conf_set("rbd_mirror_leader_max_acquire_attempts_before_break",
"0"));
m_listener.add.count = 2;
m_listener.remove.count = 1;
Instances<> instances(m_threads, m_local_io_ctx, m_instance_id, m_listener);
std::string instance_id1 = "instance_id1";
std::string instance_id2 = "instance_id2";
ASSERT_EQ(0, librbd::cls_client::mirror_instances_add(&m_local_io_ctx,
instance_id1));
C_SaferCond on_init;
instances.init(&on_init);
ASSERT_EQ(0, on_init.wait());
instances.acked({instance_id2});
ASSERT_LT(0U, m_listener.add.count);
instances.unblock_listener();
ASSERT_EQ(0, m_listener.add.ctx.wait());
ASSERT_EQ(std::set<std::string>({instance_id1, instance_id2}),
m_listener.add.ids);
std::vector<std::string> instance_ids;
for (int i = 0; i < 100; i++) {
instances.acked({instance_id1});
if (m_listener.remove.count > 0) {
usleep(250000);
}
}
instances.acked({instance_id1});
ASSERT_EQ(0, m_listener.remove.ctx.wait());
ASSERT_EQ(std::set<std::string>({instance_id2}),
m_listener.remove.ids);
C_SaferCond on_get;
instances.acked({instance_id1});
InstanceWatcher<>::get_instances(m_local_io_ctx, &instance_ids, &on_get);
EXPECT_EQ(0, on_get.wait());
EXPECT_EQ(1U, instance_ids.size());
ASSERT_EQ(instance_ids[0], instance_id1);
C_SaferCond on_shut_down;
instances.shut_down(&on_shut_down);
ASSERT_EQ(0, on_shut_down.wait());
}
| 4,603 | 26.90303 | 86 |
cc
|
null |
ceph-main/src/test/rbd_mirror/test_LeaderWatcher.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "include/rados/librados.hpp"
#include "librbd/internal.h"
#include "librbd/Utils.h"
#include "librbd/api/Mirror.h"
#include "test/librbd/test_support.h"
#include "test/rbd_mirror/test_fixture.h"
#include "tools/rbd_mirror/LeaderWatcher.h"
#include "tools/rbd_mirror/Threads.h"
#include "common/Cond.h"
#include "test/librados/test_cxx.h"
#include "gtest/gtest.h"
using librbd::util::unique_lock_name;
using rbd::mirror::LeaderWatcher;
void register_test_leader_watcher() {
}
class TestLeaderWatcher : public ::rbd::mirror::TestFixture {
public:
class Listener : public rbd::mirror::leader_watcher::Listener {
public:
Listener()
: m_test_lock(ceph::make_mutex(
unique_lock_name("LeaderWatcher::m_test_lock", this))) {
}
void on_acquire(int r, Context *ctx) {
std::lock_guard locker{m_test_lock};
m_on_acquire_r = r;
m_on_acquire = ctx;
}
void on_release(int r, Context *ctx) {
std::lock_guard locker{m_test_lock};
m_on_release_r = r;
m_on_release = ctx;
}
int acquire_count() const {
std::lock_guard locker{m_test_lock};
return m_acquire_count;
}
int release_count() const {
std::lock_guard locker{m_test_lock};
return m_release_count;
}
void post_acquire_handler(Context *on_finish) override {
std::lock_guard locker{m_test_lock};
m_acquire_count++;
on_finish->complete(m_on_acquire_r);
m_on_acquire_r = 0;
if (m_on_acquire != nullptr) {
m_on_acquire->complete(0);
m_on_acquire = nullptr;
}
}
void pre_release_handler(Context *on_finish) override {
std::lock_guard locker{m_test_lock};
m_release_count++;
on_finish->complete(m_on_release_r);
m_on_release_r = 0;
if (m_on_release != nullptr) {
m_on_release->complete(0);
m_on_release = nullptr;
}
}
void update_leader_handler(const std::string &leader_instance_id) override {
}
void handle_instances_added(const InstanceIds& instance_ids) override {
}
void handle_instances_removed(const InstanceIds& instance_ids) override {
}
private:
mutable ceph::mutex m_test_lock;
int m_acquire_count = 0;
int m_release_count = 0;
int m_on_acquire_r = 0;
int m_on_release_r = 0;
Context *m_on_acquire = nullptr;
Context *m_on_release = nullptr;
};
struct Connection {
librados::Rados cluster;
librados::IoCtx io_ctx;
};
std::list<std::unique_ptr<Connection> > m_connections;
void SetUp() override {
TestFixture::SetUp();
EXPECT_EQ(0, librbd::api::Mirror<>::mode_set(m_local_io_ctx,
RBD_MIRROR_MODE_POOL));
if (is_librados_test_stub(*_rados)) {
// speed testing up a little
EXPECT_EQ(0, _rados->conf_set("rbd_mirror_leader_heartbeat_interval",
"1"));
}
}
librados::IoCtx &create_connection(bool no_heartbeats = false) {
m_connections.push_back(std::unique_ptr<Connection>(new Connection()));
Connection *c = m_connections.back().get();
EXPECT_EQ("", connect_cluster_pp(c->cluster));
if (no_heartbeats) {
EXPECT_EQ(0, c->cluster.conf_set("rbd_mirror_leader_heartbeat_interval",
"3600"));
} else if (is_librados_test_stub(*_rados)) {
EXPECT_EQ(0, c->cluster.conf_set("rbd_mirror_leader_heartbeat_interval",
"1"));
}
EXPECT_EQ(0, c->cluster.ioctx_create(_local_pool_name.c_str(), c->io_ctx));
return c->io_ctx;
}
};
TEST_F(TestLeaderWatcher, InitShutdown)
{
Listener listener;
LeaderWatcher<> leader_watcher(m_threads, m_local_io_ctx, &listener);
C_SaferCond on_init_acquire;
listener.on_acquire(0, &on_init_acquire);
ASSERT_EQ(0, leader_watcher.init());
ASSERT_EQ(0, on_init_acquire.wait());
ASSERT_TRUE(leader_watcher.is_leader());
leader_watcher.shut_down();
ASSERT_EQ(1, listener.acquire_count());
ASSERT_EQ(1, listener.release_count());
ASSERT_FALSE(leader_watcher.is_leader());
}
TEST_F(TestLeaderWatcher, Release)
{
Listener listener;
LeaderWatcher<> leader_watcher(m_threads, m_local_io_ctx, &listener);
C_SaferCond on_init_acquire;
listener.on_acquire(0, &on_init_acquire);
ASSERT_EQ(0, leader_watcher.init());
ASSERT_EQ(0, on_init_acquire.wait());
ASSERT_TRUE(leader_watcher.is_leader());
C_SaferCond on_release;
C_SaferCond on_acquire;
listener.on_release(0, &on_release);
listener.on_acquire(0, &on_acquire);
leader_watcher.release_leader();
ASSERT_EQ(0, on_release.wait());
ASSERT_FALSE(leader_watcher.is_leader());
// wait for lock re-acquired due to no another locker
ASSERT_EQ(0, on_acquire.wait());
ASSERT_TRUE(leader_watcher.is_leader());
C_SaferCond on_release2;
listener.on_release(0, &on_release2);
leader_watcher.release_leader();
ASSERT_EQ(0, on_release2.wait());
leader_watcher.shut_down();
ASSERT_EQ(2, listener.acquire_count());
ASSERT_EQ(2, listener.release_count());
}
TEST_F(TestLeaderWatcher, ListenerError)
{
Listener listener;
LeaderWatcher<> leader_watcher(m_threads, m_local_io_ctx, &listener);
// make listener return error on acquire
C_SaferCond on_init_acquire, on_init_release;
listener.on_acquire(-EINVAL, &on_init_acquire);
listener.on_release(0, &on_init_release);
ASSERT_EQ(0, leader_watcher.init());
ASSERT_EQ(0, on_init_acquire.wait());
ASSERT_EQ(0, on_init_release.wait());
ASSERT_FALSE(leader_watcher.is_leader());
// wait for lock re-acquired due to no another locker
C_SaferCond on_acquire;
listener.on_acquire(0, &on_acquire);
ASSERT_EQ(0, on_acquire.wait());
ASSERT_TRUE(leader_watcher.is_leader());
// make listener return error on release
C_SaferCond on_release;
listener.on_release(-EINVAL, &on_release);
leader_watcher.release_leader();
ASSERT_EQ(0, on_release.wait());
ASSERT_FALSE(leader_watcher.is_leader());
leader_watcher.shut_down();
ASSERT_EQ(2, listener.acquire_count());
ASSERT_EQ(2, listener.release_count());
ASSERT_FALSE(leader_watcher.is_leader());
}
TEST_F(TestLeaderWatcher, Two)
{
Listener listener1;
LeaderWatcher<> leader_watcher1(m_threads, create_connection(), &listener1);
C_SaferCond on_init_acquire;
listener1.on_acquire(0, &on_init_acquire);
ASSERT_EQ(0, leader_watcher1.init());
ASSERT_EQ(0, on_init_acquire.wait());
Listener listener2;
LeaderWatcher<> leader_watcher2(m_threads, create_connection(), &listener2);
ASSERT_EQ(0, leader_watcher2.init());
ASSERT_TRUE(leader_watcher1.is_leader());
ASSERT_FALSE(leader_watcher2.is_leader());
C_SaferCond on_release;
C_SaferCond on_acquire;
listener1.on_release(0, &on_release);
listener2.on_acquire(0, &on_acquire);
leader_watcher1.release_leader();
ASSERT_EQ(0, on_release.wait());
ASSERT_FALSE(leader_watcher1.is_leader());
// wait for lock acquired by another watcher
ASSERT_EQ(0, on_acquire.wait());
ASSERT_TRUE(leader_watcher2.is_leader());
leader_watcher1.shut_down();
leader_watcher2.shut_down();
ASSERT_EQ(1, listener1.acquire_count());
ASSERT_EQ(1, listener1.release_count());
ASSERT_EQ(1, listener2.acquire_count());
ASSERT_EQ(1, listener2.release_count());
}
TEST_F(TestLeaderWatcher, Break)
{
Listener listener1, listener2;
LeaderWatcher<> leader_watcher1(m_threads,
create_connection(true /* no heartbeats */),
&listener1);
LeaderWatcher<> leader_watcher2(m_threads, create_connection(), &listener2);
C_SaferCond on_init_acquire;
listener1.on_acquire(0, &on_init_acquire);
ASSERT_EQ(0, leader_watcher1.init());
ASSERT_EQ(0, on_init_acquire.wait());
C_SaferCond on_acquire;
listener2.on_acquire(0, &on_acquire);
ASSERT_EQ(0, leader_watcher2.init());
ASSERT_FALSE(leader_watcher2.is_leader());
// wait for lock broken due to no heartbeats and re-acquired
ASSERT_EQ(0, on_acquire.wait());
ASSERT_TRUE(leader_watcher2.is_leader());
leader_watcher1.shut_down();
leader_watcher2.shut_down();
}
TEST_F(TestLeaderWatcher, Stress)
{
const int WATCHERS_COUNT = 20;
std::list<LeaderWatcher<> *> leader_watchers;
Listener listener;
for (int i = 0; i < WATCHERS_COUNT; i++) {
auto leader_watcher =
new LeaderWatcher<>(m_threads, create_connection(), &listener);
leader_watchers.push_back(leader_watcher);
}
C_SaferCond on_init_acquire;
listener.on_acquire(0, &on_init_acquire);
for (auto &leader_watcher : leader_watchers) {
ASSERT_EQ(0, leader_watcher->init());
}
ASSERT_EQ(0, on_init_acquire.wait());
while (true) {
C_SaferCond on_acquire;
listener.on_acquire(0, &on_acquire);
std::unique_ptr<LeaderWatcher<> > leader_watcher;
for (auto it = leader_watchers.begin(); it != leader_watchers.end(); ) {
if ((*it)->is_leader()) {
ASSERT_FALSE(leader_watcher);
leader_watcher.reset(*it);
it = leader_watchers.erase(it);
} else {
it++;
}
}
ASSERT_TRUE(leader_watcher);
leader_watcher->shut_down();
if (leader_watchers.empty()) {
break;
}
ASSERT_EQ(0, on_acquire.wait());
}
}
| 9,354 | 28.326019 | 80 |
cc
|
null |
ceph-main/src/test/rbd_mirror/test_PoolWatcher.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "include/rados/librados.hpp"
#include "include/rbd/librbd.hpp"
#include "include/stringify.h"
#include "test/rbd_mirror/test_fixture.h"
#include "cls/rbd/cls_rbd_types.h"
#include "cls/rbd/cls_rbd_client.h"
#include "include/rbd_types.h"
#include "librbd/internal.h"
#include "librbd/ImageCtx.h"
#include "librbd/ImageState.h"
#include "librbd/Operations.h"
#include "librbd/Utils.h"
#include "librbd/api/Mirror.h"
#include "common/Cond.h"
#include "common/errno.h"
#include "common/ceph_mutex.h"
#include "tools/rbd_mirror/PoolWatcher.h"
#include "tools/rbd_mirror/Threads.h"
#include "tools/rbd_mirror/Types.h"
#include "tools/rbd_mirror/pool_watcher/Types.h"
#include "test/librados/test_cxx.h"
#include "gtest/gtest.h"
#include <boost/scope_exit.hpp>
#include <iostream>
#include <map>
#include <memory>
#include <set>
#include <vector>
using namespace std::chrono_literals;
using rbd::mirror::ImageId;
using rbd::mirror::ImageIds;
using rbd::mirror::PoolWatcher;
using rbd::mirror::PeerSpec;
using rbd::mirror::RadosRef;
using std::map;
using std::set;
using std::string;
void register_test_pool_watcher() {
}
class TestPoolWatcher : public ::rbd::mirror::TestFixture {
public:
TestPoolWatcher()
: m_pool_watcher_listener(this),
m_image_number(0), m_snap_number(0)
{
m_cluster = std::make_shared<librados::Rados>();
EXPECT_EQ("", connect_cluster_pp(*m_cluster));
}
void TearDown() override {
if (m_pool_watcher) {
C_SaferCond ctx;
m_pool_watcher->shut_down(&ctx);
EXPECT_EQ(0, ctx.wait());
}
m_cluster->wait_for_latest_osdmap();
for (auto& pool : m_pools) {
EXPECT_EQ(0, m_cluster->pool_delete(pool.c_str()));
}
TestFixture::TearDown();
}
struct PoolWatcherListener : public rbd::mirror::pool_watcher::Listener {
TestPoolWatcher *test;
ceph::condition_variable cond;
ImageIds image_ids;
explicit PoolWatcherListener(TestPoolWatcher *test) : test(test) {
}
void handle_update(const std::string &mirror_uuid,
ImageIds &&added_image_ids,
ImageIds &&removed_image_ids) override {
std::lock_guard locker{test->m_lock};
for (auto &image_id : removed_image_ids) {
image_ids.erase(image_id);
}
image_ids.insert(added_image_ids.begin(), added_image_ids.end());
cond.notify_all();
}
};
void create_pool(bool enable_mirroring, const PeerSpec &peer, string *name=nullptr) {
string pool_name = get_temp_pool_name("test-rbd-mirror-");
ASSERT_EQ(0, m_cluster->pool_create(pool_name.c_str()));
int64_t pool_id = m_cluster->pool_lookup(pool_name.c_str());
ASSERT_GE(pool_id, 0);
m_pools.insert(pool_name);
librados::IoCtx ioctx;
ASSERT_EQ(0, m_cluster->ioctx_create2(pool_id, ioctx));
ioctx.application_enable("rbd", true);
m_pool_watcher.reset(new PoolWatcher<>(m_threads, ioctx, "mirror uuid",
m_pool_watcher_listener));
if (enable_mirroring) {
ASSERT_EQ(0, librbd::api::Mirror<>::mode_set(ioctx,
RBD_MIRROR_MODE_POOL));
std::string uuid;
ASSERT_EQ(0, librbd::api::Mirror<>::peer_site_add(
ioctx, &uuid, RBD_MIRROR_PEER_DIRECTION_RX_TX, peer.cluster_name,
peer.client_name));
}
if (name != nullptr) {
*name = pool_name;
}
m_pool_watcher->init();
}
string get_image_id(librados::IoCtx *ioctx, const string &image_name) {
string obj = librbd::util::id_obj_name(image_name);
string id;
EXPECT_EQ(0, librbd::cls_client::get_id(ioctx, obj, &id));
return id;
}
void create_image(const string &pool_name, bool mirrored=true,
string *image_name=nullptr) {
uint64_t features = librbd::util::get_rbd_default_features(g_ceph_context);
string name = "image" + stringify(++m_image_number);
if (mirrored) {
features |= RBD_FEATURE_EXCLUSIVE_LOCK | RBD_FEATURE_JOURNALING;
}
librados::IoCtx ioctx;
ASSERT_EQ(0, m_cluster->ioctx_create(pool_name.c_str(), ioctx));
int order = 0;
ASSERT_EQ(0, librbd::create(ioctx, name.c_str(), 1 << 22, false,
features, &order, 0, 0));
if (mirrored) {
librbd::Image image;
librbd::RBD rbd;
rbd.open(ioctx, image, name.c_str());
image.mirror_image_enable2(RBD_MIRROR_IMAGE_MODE_JOURNAL);
librbd::mirror_image_info_t mirror_image_info;
ASSERT_EQ(0, image.mirror_image_get_info(&mirror_image_info,
sizeof(mirror_image_info)));
image.close();
m_mirrored_images.insert(ImageId(
mirror_image_info.global_id, get_image_id(&ioctx, name)));
}
if (image_name != nullptr)
*image_name = name;
}
void clone_image(const string &parent_pool_name,
const string &parent_image_name,
const string &clone_pool_name,
bool mirrored=true,
string *image_name=nullptr) {
librados::IoCtx pioctx, cioctx;
ASSERT_EQ(0, m_cluster->ioctx_create(parent_pool_name.c_str(), pioctx));
ASSERT_EQ(0, m_cluster->ioctx_create(clone_pool_name.c_str(), cioctx));
string snap_name = "snap" + stringify(++m_snap_number);
{
librbd::ImageCtx *ictx = new librbd::ImageCtx(parent_image_name.c_str(),
"", "", pioctx, false);
ictx->state->open(0);
librbd::NoOpProgressContext prog_ctx;
EXPECT_EQ(0, ictx->operations->snap_create(cls::rbd::UserSnapshotNamespace(),
snap_name, 0, prog_ctx));
EXPECT_EQ(0, ictx->operations->snap_protect(cls::rbd::UserSnapshotNamespace(),
snap_name));
ictx->state->close();
}
uint64_t features = librbd::util::get_rbd_default_features(g_ceph_context);
string name = "clone" + stringify(++m_image_number);
if (mirrored) {
features |= RBD_FEATURE_EXCLUSIVE_LOCK | RBD_FEATURE_JOURNALING;
}
int order = 0;
librbd::clone(pioctx, parent_image_name.c_str(), snap_name.c_str(),
cioctx, name.c_str(), features, &order, 0, 0);
if (mirrored) {
librbd::Image image;
librbd::RBD rbd;
rbd.open(cioctx, image, name.c_str());
image.mirror_image_enable2(RBD_MIRROR_IMAGE_MODE_JOURNAL);
librbd::mirror_image_info_t mirror_image_info;
ASSERT_EQ(0, image.mirror_image_get_info(&mirror_image_info,
sizeof(mirror_image_info)));
image.close();
m_mirrored_images.insert(ImageId(
mirror_image_info.global_id, get_image_id(&cioctx, name)));
}
if (image_name != nullptr)
*image_name = name;
}
void check_images() {
std::unique_lock l{m_lock};
while (m_mirrored_images != m_pool_watcher_listener.image_ids) {
if (m_pool_watcher_listener.cond.wait_for(l, 10s) == std::cv_status::timeout) {
break;
}
}
ASSERT_EQ(m_mirrored_images, m_pool_watcher_listener.image_ids);
}
ceph::mutex m_lock = ceph::make_mutex("TestPoolWatcherLock");
RadosRef m_cluster;
PoolWatcherListener m_pool_watcher_listener;
std::unique_ptr<PoolWatcher<> > m_pool_watcher;
set<string> m_pools;
ImageIds m_mirrored_images;
uint64_t m_image_number;
uint64_t m_snap_number;
};
TEST_F(TestPoolWatcher, EmptyPool) {
string uuid1 = "00000000-0000-0000-0000-000000000001";
PeerSpec site1(uuid1, "site1", "mirror1");
create_pool(true, site1);
check_images();
}
TEST_F(TestPoolWatcher, ReplicatedPools) {
string uuid1 = "00000000-0000-0000-0000-000000000001";
PeerSpec site1(uuid1, "site1", "mirror1");
string first_pool, local_pool, last_pool;
create_pool(true, site1, &first_pool);
check_images();
create_image(first_pool);
check_images();
string parent_image, parent_image2;
create_image(first_pool, true, &parent_image);
check_images();
clone_image(first_pool, parent_image, first_pool);
check_images();
clone_image(first_pool, parent_image, first_pool, true, &parent_image2);
check_images();
create_image(first_pool, false);
check_images();
}
| 8,152 | 30.723735 | 87 |
cc
|
null |
ceph-main/src/test/rbd_mirror/test_fixture.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "cls/rbd/cls_rbd_types.h"
#include "test/rbd_mirror/test_fixture.h"
#include "include/stringify.h"
#include "include/rbd/librbd.hpp"
#include "librbd/ImageCtx.h"
#include "librbd/ImageState.h"
#include "librbd/Operations.h"
#include "librbd/internal.h"
#include "test/librados/test_cxx.h"
#include "tools/rbd_mirror/Threads.h"
namespace rbd {
namespace mirror {
std::string TestFixture::_local_pool_name;
std::string TestFixture::_remote_pool_name;
std::shared_ptr<librados::Rados> TestFixture::_rados;
uint64_t TestFixture::_image_number = 0;
std::string TestFixture::_data_pool;
TestFixture::TestFixture() {
}
void TestFixture::SetUpTestCase() {
_rados = std::shared_ptr<librados::Rados>(new librados::Rados());
ASSERT_EQ("", connect_cluster_pp(*_rados.get()));
ASSERT_EQ(0, _rados->conf_set("rbd_cache", "false"));
_local_pool_name = get_temp_pool_name("test-rbd-mirror-");
ASSERT_EQ(0, _rados->pool_create(_local_pool_name.c_str()));
librados::IoCtx local_ioctx;
ASSERT_EQ(0, _rados->ioctx_create(_local_pool_name.c_str(), local_ioctx));
local_ioctx.application_enable("rbd", true);
_remote_pool_name = get_temp_pool_name("test-rbd-mirror-");
ASSERT_EQ(0, _rados->pool_create(_remote_pool_name.c_str()));
librados::IoCtx remote_ioctx;
ASSERT_EQ(0, _rados->ioctx_create(_remote_pool_name.c_str(), remote_ioctx));
remote_ioctx.application_enable("rbd", true);
ASSERT_EQ(0, create_image_data_pool(_data_pool));
if (!_data_pool.empty()) {
printf("using image data pool: %s\n", _data_pool.c_str());
}
}
void TestFixture::TearDownTestCase() {
if (!_data_pool.empty()) {
ASSERT_EQ(0, _rados->pool_delete(_data_pool.c_str()));
}
ASSERT_EQ(0, _rados->pool_delete(_remote_pool_name.c_str()));
ASSERT_EQ(0, _rados->pool_delete(_local_pool_name.c_str()));
_rados->shutdown();
}
void TestFixture::SetUp() {
static bool seeded = false;
if (!seeded) {
seeded = true;
int seed = getpid();
std::cout << "seed " << seed << std::endl;
srand(seed);
}
ASSERT_EQ(0, _rados->ioctx_create(_local_pool_name.c_str(), m_local_io_ctx));
ASSERT_EQ(0, _rados->ioctx_create(_remote_pool_name.c_str(), m_remote_io_ctx));
m_image_name = get_temp_image_name();
m_threads = new rbd::mirror::Threads<>(_rados);
}
void TestFixture::TearDown() {
for (auto image_ctx : m_image_ctxs) {
image_ctx->state->close();
}
m_remote_io_ctx.close();
m_local_io_ctx.close();
delete m_threads;
}
int TestFixture::create_image(librbd::RBD &rbd, librados::IoCtx &ioctx,
const std::string &name, uint64_t size) {
int order = 18;
return rbd.create2(ioctx, name.c_str(), size, RBD_FEATURES_ALL, &order);
}
int TestFixture::open_image(librados::IoCtx &io_ctx,
const std::string &image_name,
librbd::ImageCtx **image_ctx) {
*image_ctx = new librbd::ImageCtx(image_name.c_str(), "", nullptr, io_ctx,
false);
m_image_ctxs.insert(*image_ctx);
return (*image_ctx)->state->open(0);
}
int TestFixture::create_snap(librbd::ImageCtx *image_ctx, const char* snap_name,
librados::snap_t *snap_id) {
librbd::NoOpProgressContext prog_ctx;
int r = image_ctx->operations->snap_create(cls::rbd::UserSnapshotNamespace(),
snap_name, 0, prog_ctx);
if (r < 0) {
return r;
}
r = image_ctx->state->refresh();
if (r < 0) {
return r;
}
if (image_ctx->snap_ids.count({cls::rbd::UserSnapshotNamespace(),
snap_name}) == 0) {
return -ENOENT;
}
if (snap_id != nullptr) {
*snap_id = image_ctx->snap_ids[{cls::rbd::UserSnapshotNamespace(),
snap_name}];
}
return 0;
}
std::string TestFixture::get_temp_image_name() {
++_image_number;
return "image" + stringify(_image_number);
}
int TestFixture::create_image_data_pool(std::string &data_pool) {
std::string pool;
int r = _rados->conf_get("rbd_default_data_pool", pool);
if (r != 0) {
return r;
} else if (pool.empty()) {
return 0;
}
r = _rados->pool_create(pool.c_str());
if (r < 0) {
return r;
}
librados::IoCtx data_ioctx;
r = _rados->ioctx_create(pool.c_str(), data_ioctx);
if (r < 0) {
return r;
}
data_ioctx.application_enable("rbd", true);
data_pool = pool;
return 0;
}
} // namespace mirror
} // namespace rbd
| 4,486 | 26.697531 | 81 |
cc
|
null |
ceph-main/src/test/rbd_mirror/test_fixture.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_TEST_RBD_MIRROR_TEST_FIXTURE_H
#define CEPH_TEST_RBD_MIRROR_TEST_FIXTURE_H
#include "include/int_types.h"
#include "include/rados/librados.hpp"
#include <gtest/gtest.h>
#include <memory>
#include <set>
namespace librbd {
class ImageCtx;
class RBD;
}
namespace rbd {
namespace mirror {
template <typename> class Threads;
class TestFixture : public ::testing::Test {
public:
TestFixture();
static void SetUpTestCase();
static void TearDownTestCase();
void SetUp() override;
void TearDown() override;
librados::IoCtx m_local_io_ctx;
librados::IoCtx m_remote_io_ctx;
std::string m_image_name;
uint64_t m_image_size = 1 << 24;
std::set<librbd::ImageCtx *> m_image_ctxs;
Threads<librbd::ImageCtx> *m_threads = nullptr;
int create_image(librbd::RBD &rbd, librados::IoCtx &ioctx,
const std::string &name, uint64_t size);
int open_image(librados::IoCtx &io_ctx, const std::string &image_name,
librbd::ImageCtx **image_ctx);
int create_snap(librbd::ImageCtx *image_ctx, const char* snap_name,
librados::snap_t *snap_id = nullptr);
static std::string get_temp_image_name();
static int create_image_data_pool(std::string &data_pool);
static std::string _local_pool_name;
static std::string _remote_pool_name;
static std::shared_ptr<librados::Rados> _rados;
static uint64_t _image_number;
static std::string _data_pool;
};
} // namespace mirror
} // namespace rbd
#endif // CEPH_TEST_RBD_MIRROR_TEST_FIXTURE_H
| 1,623 | 23.606061 | 72 |
h
|
null |
ceph-main/src/test/rbd_mirror/test_main.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "common/perf_counters.h"
#include "include/rados/librados.hpp"
#include "global/global_context.h"
#include "test/librados/test_cxx.h"
#include "gtest/gtest.h"
#include <iostream>
#include <string>
PerfCounters *g_journal_perf_counters = nullptr;
PerfCounters *g_snapshot_perf_counters = nullptr;
extern void register_test_cluster_watcher();
extern void register_test_image_policy();
extern void register_test_image_sync();
extern void register_test_instance_watcher();
extern void register_test_instances();
extern void register_test_leader_watcher();
extern void register_test_pool_watcher();
extern void register_test_rbd_mirror();
extern void register_test_rbd_mirror_image_deleter();
int main(int argc, char **argv)
{
register_test_cluster_watcher();
register_test_image_policy();
register_test_image_sync();
register_test_instance_watcher();
register_test_instances();
register_test_leader_watcher();
register_test_pool_watcher();
register_test_rbd_mirror();
register_test_rbd_mirror_image_deleter();
::testing::InitGoogleTest(&argc, argv);
librados::Rados rados;
std::string result = connect_cluster_pp(rados);
if (result != "" ) {
std::cerr << result << std::endl;
return 1;
}
g_ceph_context = reinterpret_cast<CephContext*>(rados.cct());
int r = rados.conf_set("lockdep", "true");
if (r < 0) {
std::cerr << "warning: failed to enable lockdep" << std::endl;
}
return RUN_ALL_TESTS();
}
| 1,558 | 27.87037 | 70 |
cc
|
null |
ceph-main/src/test/rbd_mirror/test_mock_ImageMap.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "test/rbd_mirror/test_mock_fixture.h"
#include "test/librados_test_stub/MockTestMemIoCtxImpl.h"
#include "test/librados_test_stub/MockTestMemRadosClient.h"
#include "test/librbd/mock/MockImageCtx.h"
#include "test/rbd_mirror/mock/MockContextWQ.h"
#include "test/rbd_mirror/mock/MockSafeTimer.h"
#include "librbd/MirroringWatcher.h"
#include "tools/rbd_mirror/Threads.h"
#include "tools/rbd_mirror/ImageMap.h"
#include "tools/rbd_mirror/image_map/LoadRequest.h"
#include "tools/rbd_mirror/image_map/UpdateRequest.h"
#include "tools/rbd_mirror/image_map/Types.h"
#include "include/stringify.h"
namespace librbd {
namespace {
struct MockTestImageCtx : public librbd::MockImageCtx {
MockTestImageCtx(librbd::ImageCtx &image_ctx)
: librbd::MockImageCtx(image_ctx) {
}
};
} // anonymous namespace
} // namespace librbd
namespace rbd {
namespace mirror {
template <>
struct Threads<librbd::MockTestImageCtx> {
MockSafeTimer *timer;
ceph::mutex &timer_lock;
MockContextWQ *work_queue;
Threads(Threads<librbd::ImageCtx> *threads)
: timer(new MockSafeTimer()),
timer_lock(threads->timer_lock),
work_queue(new MockContextWQ()) {
}
~Threads() {
delete timer;
delete work_queue;
}
};
namespace image_map {
template <>
struct LoadRequest<librbd::MockTestImageCtx> {
std::map<std::string, cls::rbd::MirrorImageMap> *image_map;
Context *on_finish = nullptr;
static LoadRequest *s_instance;
static LoadRequest *create(librados::IoCtx &ioctx,
std::map<std::string, cls::rbd::MirrorImageMap> *image_map,
Context *on_finish) {
ceph_assert(s_instance != nullptr);
s_instance->image_map = image_map;
s_instance->on_finish = on_finish;
return s_instance;
}
MOCK_METHOD0(send, void());
LoadRequest() {
s_instance = this;
}
};
template <>
struct UpdateRequest<librbd::MockTestImageCtx> {
Context *on_finish = nullptr;
static UpdateRequest *s_instance;
static UpdateRequest *create(librados::IoCtx &ioctx,
std::map<std::string, cls::rbd::MirrorImageMap> &&update_mapping,
std::set<std::string> &&global_image_ids,
Context *on_finish) {
ceph_assert(s_instance != nullptr);
s_instance->on_finish = on_finish;
return s_instance;
}
MOCK_METHOD0(send, void());
UpdateRequest() {
s_instance = this;
}
};
LoadRequest<librbd::MockTestImageCtx> *
LoadRequest<librbd::MockTestImageCtx>::s_instance = nullptr;
UpdateRequest<librbd::MockTestImageCtx> *
UpdateRequest<librbd::MockTestImageCtx>::s_instance = nullptr;
} // namespace image_map
} // namespace mirror
} // namespace rbd
// template definitions
#include "tools/rbd_mirror/ImageMap.cc"
namespace rbd {
namespace mirror {
using ::testing::_;
using ::testing::DoAll;
using ::testing::WithArg;
using ::testing::AtLeast;
using ::testing::InSequence;
using ::testing::Invoke;
using ::testing::ReturnArg;
using ::testing::StrEq;
using image_map::Listener;
using image_map::LoadRequest;
using image_map::UpdateRequest;
using ::rbd::mirror::Threads;
class TestMockImageMap : public TestMockFixture {
public:
typedef Threads<librbd::MockTestImageCtx> MockThreads;
typedef ImageMap<librbd::MockTestImageCtx> MockImageMap;
typedef LoadRequest<librbd::MockTestImageCtx> MockLoadRequest;
typedef UpdateRequest<librbd::MockTestImageCtx> MockUpdateRequest;
struct MockListener : Listener {
TestMockImageMap *test_mock_image_map;
MockListener(TestMockImageMap *test_mock_image_map)
: test_mock_image_map(test_mock_image_map) {
}
MOCK_METHOD2(mock_acquire_image, void(const std::string &, Context*));
MOCK_METHOD2(mock_release_image, void(const std::string &, Context*));
MOCK_METHOD3(mock_remove_image, void(const std::string &,
const std::string &, Context*));
void acquire_image(const std::string &global_image_id,
const std::string &instance_id, Context* on_finish) {
mock_acquire_image(global_image_id, on_finish);
}
void release_image(const std::string &global_image_id,
const std::string &instance_id, Context* on_finish) {
mock_release_image(global_image_id, on_finish);
}
void remove_image(const std::string &mirror_uuid,
const std::string &global_image_id,
const std::string &instance_id, Context* on_finish) {
mock_remove_image(mirror_uuid, global_image_id, on_finish);
}
};
TestMockImageMap() = default;
void SetUp() override {
TestFixture::SetUp();
m_local_instance_id = stringify(m_local_io_ctx.get_instance_id());
EXPECT_EQ(0, _rados->conf_set("rbd_mirror_image_policy_migration_throttle",
"0"));
EXPECT_EQ(0, _rados->conf_set("rbd_mirror_image_policy_type", "simple"));
}
void TearDown() override {
EXPECT_EQ(0, _rados->conf_set("rbd_mirror_image_policy_type", "none"));
TestFixture::TearDown();
}
void expect_work_queue(MockThreads &mock_threads) {
EXPECT_CALL(*mock_threads.work_queue, queue(_, _))
.WillRepeatedly(Invoke([this](Context *ctx, int r) {
m_threads->work_queue->queue(ctx, r);
}));
}
void expect_add_event(MockThreads &mock_threads) {
EXPECT_CALL(*mock_threads.timer, add_event_after(_,_))
.WillOnce(DoAll(WithArg<1>(Invoke([this](Context *ctx) {
auto wrapped_ctx = new LambdaContext([this, ctx](int r) {
std::lock_guard timer_locker{m_threads->timer_lock};
ctx->complete(r);
});
m_threads->work_queue->queue(wrapped_ctx, 0);
})), ReturnArg<1>()));
}
void expect_rebalance_event(MockThreads &mock_threads) {
EXPECT_CALL(*mock_threads.timer, add_event_after(_,_))
.WillOnce(DoAll(WithArg<1>(Invoke([this](Context *ctx) {
// disable rebalance so as to not reschedule it again
CephContext *cct = reinterpret_cast<CephContext *>(m_local_io_ctx.cct());
cct->_conf.set_val("rbd_mirror_image_policy_rebalance_timeout", "0");
auto wrapped_ctx = new LambdaContext([this, ctx](int r) {
std::lock_guard timer_locker{m_threads->timer_lock};
ctx->complete(r);
});
m_threads->work_queue->queue(wrapped_ctx, 0);
})), ReturnArg<1>()));
}
void expect_load_request(MockLoadRequest &request, int r) {
EXPECT_CALL(request, send())
.WillOnce(Invoke([&request, r]() {
request.on_finish->complete(r);
}));
}
void expect_update_request(MockUpdateRequest &request, int r) {
EXPECT_CALL(request, send())
.WillOnce(Invoke([this, &request, r]() {
request.on_finish->complete(r);
if (r == 0) {
std::lock_guard locker{m_lock};
++m_map_update_count;
m_cond.notify_all();
}
}));
}
void expect_listener_acquire_image(MockListener &mock_listener,
const std::string &global_image_id,
std::map<std::string, Context*> *peer_ack_ctxs) {
EXPECT_CALL(mock_listener, mock_acquire_image(global_image_id, _))
.WillOnce(WithArg<1>(Invoke([this, global_image_id, peer_ack_ctxs](Context* ctx) {
std::lock_guard locker{m_lock};
peer_ack_ctxs->insert({global_image_id, ctx});
++m_notify_update_count;
m_cond.notify_all();
})));
}
void expect_listener_release_image(MockListener &mock_listener,
const std::string &global_image_id,
std::map<std::string, Context*> *peer_ack_ctxs) {
EXPECT_CALL(mock_listener, mock_release_image(global_image_id, _))
.WillOnce(WithArg<1>(Invoke([this, global_image_id, peer_ack_ctxs](Context* ctx) {
std::lock_guard locker{m_lock};
peer_ack_ctxs->insert({global_image_id, ctx});
++m_notify_update_count;
m_cond.notify_all();
})));
}
void expect_listener_remove_image(MockListener &mock_listener,
const std::string &mirror_uuid,
const std::string &global_image_id,
std::map<std::string, Context*> *peer_ack_ctxs) {
EXPECT_CALL(mock_listener,
mock_remove_image(mirror_uuid, global_image_id, _))
.WillOnce(WithArg<2>(Invoke([this, global_image_id, peer_ack_ctxs](Context* ctx) {
std::lock_guard locker{m_lock};
peer_ack_ctxs->insert({global_image_id, ctx});
++m_notify_update_count;
m_cond.notify_all();
})));
}
void expect_listener_images_unmapped(MockListener &mock_listener, size_t count,
std::set<std::string> *global_image_ids,
std::map<std::string, Context*> *peer_ack_ctxs) {
EXPECT_CALL(mock_listener, mock_release_image(_, _))
.Times(count)
.WillRepeatedly(Invoke([this, global_image_ids, peer_ack_ctxs](std::string global_image_id, Context* ctx) {
std::lock_guard locker{m_lock};
global_image_ids->emplace(global_image_id);
peer_ack_ctxs->insert({global_image_id, ctx});
++m_notify_update_count;
m_cond.notify_all();
}));
}
void remote_peer_ack_nowait(MockImageMap *image_map,
const std::set<std::string> &global_image_ids,
int ret,
std::map<std::string, Context*> *peer_ack_ctxs) {
for (auto& global_image_id : global_image_ids) {
auto it = peer_ack_ctxs->find(global_image_id);
ASSERT_TRUE(it != peer_ack_ctxs->end());
auto ack_ctx = it->second;
peer_ack_ctxs->erase(it);
ack_ctx->complete(ret);
wait_for_scheduled_task();
}
}
void remote_peer_ack_wait(MockImageMap *image_map,
const std::set<std::string> &global_image_ids,
int ret,
std::map<std::string, Context*> *peer_ack_ctxs) {
for (auto& global_image_id : global_image_ids) {
auto it = peer_ack_ctxs->find(global_image_id);
ASSERT_TRUE(it != peer_ack_ctxs->end());
auto ack_ctx = it->second;
peer_ack_ctxs->erase(it);
ack_ctx->complete(ret);
wait_for_scheduled_task();
ASSERT_TRUE(wait_for_map_update(1));
}
}
void remote_peer_ack_listener_wait(MockImageMap *image_map,
const std::set<std::string> &global_image_ids,
int ret,
std::map<std::string, Context*> *peer_ack_ctxs) {
for (auto& global_image_id : global_image_ids) {
auto it = peer_ack_ctxs->find(global_image_id);
ASSERT_TRUE(it != peer_ack_ctxs->end());
auto ack_ctx = it->second;
peer_ack_ctxs->erase(it);
ack_ctx->complete(ret);
ASSERT_TRUE(wait_for_map_update(1));
ASSERT_TRUE(wait_for_listener_notify(1));
}
}
void update_map_and_acquire(MockThreads &mock_threads,
MockUpdateRequest &mock_update_request,
MockListener &mock_listener,
const std::set<std::string> &global_image_ids,
int ret,
std::map<std::string, Context*> *peer_ack_ctxs) {
for (auto const &global_image_id : global_image_ids) {
expect_add_event(mock_threads);
expect_update_request(mock_update_request, ret);
expect_add_event(mock_threads);
expect_listener_acquire_image(mock_listener, global_image_id,
peer_ack_ctxs);
}
}
void update_map_request(MockThreads &mock_threads,
MockUpdateRequest &mock_update_request,
const std::set<std::string> &global_image_ids, int ret) {
for (uint32_t i = 0; i < global_image_ids.size(); ++i) {
expect_add_event(mock_threads);
expect_update_request(mock_update_request, ret);
}
}
void wait_for_scheduled_task() {
m_threads->work_queue->drain();
}
bool wait_for_listener_notify(uint32_t count) {
std::unique_lock locker{m_lock};
while (m_notify_update_count < count) {
if (m_cond.wait_for(locker, 10s) == std::cv_status::timeout) {
break;
}
}
if (m_notify_update_count < count) {
return false;
}
m_notify_update_count -= count;
return true;
}
bool wait_for_map_update(uint32_t count) {
std::unique_lock locker{m_lock};
while (m_map_update_count < count) {
if (m_cond.wait_for(locker, 10s) == std::cv_status::timeout) {
break;
}
}
if (m_map_update_count < count) {
return false;
}
m_map_update_count -= count;
return true;
}
int when_shut_down(MockImageMap *image_map) {
C_SaferCond ctx;
image_map->shut_down(&ctx);
return ctx.wait();
}
void listener_acquire_images(MockListener &mock_listener,
const std::set<std::string> &global_image_ids,
std::map<std::string, Context*> *peer_ack_ctxs) {
for (auto const &global_image_id : global_image_ids) {
expect_listener_acquire_image(mock_listener, global_image_id,
peer_ack_ctxs);
}
}
void listener_release_images(MockListener &mock_listener,
const std::set<std::string> &global_image_ids,
std::map<std::string, Context*> *peer_ack_ctxs) {
for (auto const &global_image_id : global_image_ids) {
expect_listener_release_image(mock_listener, global_image_id,
peer_ack_ctxs);
}
}
void listener_remove_images(MockListener &mock_listener,
const std::string &mirror_uuid,
std::set<std::string> &global_image_ids,
std::map<std::string, Context*> *peer_ack_ctxs) {
for (auto const &global_image_id : global_image_ids) {
expect_listener_remove_image(mock_listener, mirror_uuid, global_image_id,
peer_ack_ctxs);
}
}
ceph::mutex m_lock = ceph::make_mutex("TestMockImageMap::m_lock");
ceph::condition_variable m_cond;
uint32_t m_notify_update_count = 0;
uint32_t m_map_update_count = 0;
std::string m_local_instance_id;
};
TEST_F(TestMockImageMap, SetLocalImages) {
MockThreads mock_threads(m_threads);
expect_work_queue(mock_threads);
InSequence seq;
MockLoadRequest mock_load_request;
expect_load_request(mock_load_request, 0);
MockListener mock_listener(this);
std::unique_ptr<MockImageMap> mock_image_map{
MockImageMap::create(m_local_io_ctx, &mock_threads, m_local_instance_id,
mock_listener)};
C_SaferCond cond;
mock_image_map->init(&cond);
ASSERT_EQ(0, cond.wait());
std::set<std::string> global_image_ids{
"global id 1", "global id 2"
};
std::set<std::string> global_image_ids_ack(global_image_ids);
// UPDATE_MAPPING+ACQUIRE
expect_add_event(mock_threads);
MockUpdateRequest mock_update_request;
expect_update_request(mock_update_request, 0);
expect_add_event(mock_threads);
std::map<std::string, Context*> peer_ack_ctxs;
listener_acquire_images(mock_listener, global_image_ids, &peer_ack_ctxs);
// initial image list
mock_image_map->update_images("", std::move(global_image_ids), {});
ASSERT_TRUE(wait_for_map_update(1));
ASSERT_TRUE(wait_for_listener_notify(global_image_ids_ack.size()));
// remote peer ACKs image acquire request
remote_peer_ack_nowait(mock_image_map.get(), global_image_ids_ack, 0,
&peer_ack_ctxs);
wait_for_scheduled_task();
ASSERT_EQ(0, when_shut_down(mock_image_map.get()));
}
TEST_F(TestMockImageMap, AddRemoveLocalImage) {
MockThreads mock_threads(m_threads);
expect_work_queue(mock_threads);
InSequence seq;
MockLoadRequest mock_load_request;
expect_load_request(mock_load_request, 0);
MockListener mock_listener(this);
std::unique_ptr<MockImageMap> mock_image_map{
MockImageMap::create(m_local_io_ctx, &mock_threads, m_local_instance_id,
mock_listener)};
C_SaferCond cond;
mock_image_map->init(&cond);
ASSERT_EQ(0, cond.wait());
std::set<std::string> initial_global_image_ids{
"global id 1", "global id 2"
};
std::set<std::string> initial_global_image_ids_ack(initial_global_image_ids);
std::set<std::string> remove_global_image_ids{
"global id 1", "global id 2"
};
std::set<std::string> remove_global_image_ids_ack(remove_global_image_ids);
// UPDATE_MAPPING+ACQUIRE
expect_add_event(mock_threads);
MockUpdateRequest mock_update_request;
expect_update_request(mock_update_request, 0);
expect_add_event(mock_threads);
std::map<std::string, Context*> peer_ack_ctxs;
listener_acquire_images(mock_listener, initial_global_image_ids,
&peer_ack_ctxs);
// initial image list
mock_image_map->update_images("", std::move(initial_global_image_ids), {});
ASSERT_TRUE(wait_for_map_update(1));
ASSERT_TRUE(wait_for_listener_notify(initial_global_image_ids_ack.size()));
// remote peer ACKs image acquire request
remote_peer_ack_nowait(mock_image_map.get(), initial_global_image_ids_ack, 0,
&peer_ack_ctxs);
// RELEASE+REMOVE_MAPPING
expect_add_event(mock_threads);
listener_release_images(mock_listener, remove_global_image_ids,
&peer_ack_ctxs);
update_map_request(mock_threads, mock_update_request, remove_global_image_ids,
0);
// remove images
mock_image_map->update_images("", {}, std::move(remove_global_image_ids));
ASSERT_TRUE(wait_for_listener_notify(remove_global_image_ids_ack.size()));
remote_peer_ack_wait(mock_image_map.get(), remove_global_image_ids_ack, 0,
&peer_ack_ctxs);
wait_for_scheduled_task();
ASSERT_EQ(0, when_shut_down(mock_image_map.get()));
}
TEST_F(TestMockImageMap, AddRemoveRemoteImage) {
MockThreads mock_threads(m_threads);
expect_work_queue(mock_threads);
InSequence seq;
MockLoadRequest mock_load_request;
expect_load_request(mock_load_request, 0);
MockListener mock_listener(this);
std::unique_ptr<MockImageMap> mock_image_map{
MockImageMap::create(m_local_io_ctx, &mock_threads, m_local_instance_id,
mock_listener)};
C_SaferCond cond;
mock_image_map->init(&cond);
ASSERT_EQ(0, cond.wait());
std::set<std::string> initial_global_image_ids{
"global id 1", "global id 2"
};
std::set<std::string> initial_global_image_ids_ack(initial_global_image_ids);
std::set<std::string> remove_global_image_ids{
"global id 1", "global id 2"
};
std::set<std::string> remove_global_image_ids_ack(remove_global_image_ids);
// UPDATE_MAPPING+ACQUIRE
expect_add_event(mock_threads);
MockUpdateRequest mock_update_request;
expect_update_request(mock_update_request, 0);
expect_add_event(mock_threads);
std::map<std::string, Context*> peer_ack_ctxs;
listener_acquire_images(mock_listener, initial_global_image_ids,
&peer_ack_ctxs);
// initial image list
mock_image_map->update_images("uuid1", std::move(initial_global_image_ids),
{});
ASSERT_TRUE(wait_for_map_update(1));
ASSERT_TRUE(wait_for_listener_notify(initial_global_image_ids_ack.size()));
// remote peer ACKs image acquire request
remote_peer_ack_nowait(mock_image_map.get(), initial_global_image_ids_ack, 0,
&peer_ack_ctxs);
// RELEASE+REMOVE_MAPPING
std::map<std::string, Context*> peer_remove_ack_ctxs;
listener_remove_images(mock_listener, "uuid1", remove_global_image_ids,
&peer_remove_ack_ctxs);
expect_add_event(mock_threads);
listener_release_images(mock_listener, remove_global_image_ids,
&peer_ack_ctxs);
update_map_request(mock_threads, mock_update_request, remove_global_image_ids,
0);
// remove images
mock_image_map->update_images("uuid1", {}, std::move(remove_global_image_ids));
ASSERT_TRUE(wait_for_listener_notify(remove_global_image_ids_ack.size() * 2));
remote_peer_ack_nowait(mock_image_map.get(), remove_global_image_ids_ack, 0,
&peer_remove_ack_ctxs);
remote_peer_ack_wait(mock_image_map.get(), remove_global_image_ids_ack, 0,
&peer_ack_ctxs);
wait_for_scheduled_task();
ASSERT_EQ(0, when_shut_down(mock_image_map.get()));
}
TEST_F(TestMockImageMap, AddRemoveRemoteImageDuplicateNotification) {
MockThreads mock_threads(m_threads);
expect_work_queue(mock_threads);
InSequence seq;
MockLoadRequest mock_load_request;
expect_load_request(mock_load_request, 0);
MockListener mock_listener(this);
std::unique_ptr<MockImageMap> mock_image_map{
MockImageMap::create(m_local_io_ctx, &mock_threads, m_local_instance_id,
mock_listener)};
C_SaferCond cond;
mock_image_map->init(&cond);
ASSERT_EQ(0, cond.wait());
std::set<std::string> initial_global_image_ids{
"global id 1", "global id 2"
};
std::set<std::string> initial_global_image_ids_dup(initial_global_image_ids);
std::set<std::string> initial_global_image_ids_ack(initial_global_image_ids);
std::set<std::string> remove_global_image_ids{
"global id 1", "global id 2"
};
std::set<std::string> remove_global_image_ids_dup(remove_global_image_ids);
std::set<std::string> remove_global_image_ids_ack(remove_global_image_ids);
// UPDATE_MAPPING+ACQUIRE
expect_add_event(mock_threads);
MockUpdateRequest mock_update_request;
expect_update_request(mock_update_request, 0);
expect_add_event(mock_threads);
std::map<std::string, Context*> peer_ack_ctxs;
listener_acquire_images(mock_listener, initial_global_image_ids,
&peer_ack_ctxs);
// initial image list
mock_image_map->update_images("uuid1", std::move(initial_global_image_ids), {});
ASSERT_TRUE(wait_for_map_update(1));
ASSERT_TRUE(wait_for_listener_notify(initial_global_image_ids_ack.size()));
// trigger duplicate "add" event
wait_for_scheduled_task();
mock_image_map->update_images("uuid1", std::move(initial_global_image_ids_dup), {});
// remote peer ACKs image acquire request
remote_peer_ack_nowait(mock_image_map.get(), initial_global_image_ids_ack, 0,
&peer_ack_ctxs);
// RELEASE+REMOVE_MAPPING
std::map<std::string, Context*> peer_remove_ack_ctxs;
listener_remove_images(mock_listener, "uuid1", remove_global_image_ids,
&peer_remove_ack_ctxs);
expect_add_event(mock_threads);
listener_release_images(mock_listener, remove_global_image_ids,
&peer_ack_ctxs);
update_map_request(mock_threads, mock_update_request, remove_global_image_ids, 0);
// remove images
mock_image_map->update_images("uuid1", {}, std::move(remove_global_image_ids));
ASSERT_TRUE(wait_for_listener_notify(remove_global_image_ids_ack.size() * 2));
remote_peer_ack_nowait(mock_image_map.get(), remove_global_image_ids_ack, 0,
&peer_remove_ack_ctxs);
remote_peer_ack_wait(mock_image_map.get(), remove_global_image_ids_ack, 0,
&peer_ack_ctxs);
// trigger duplicate "remove" notification
mock_image_map->update_images("uuid1", {}, std::move(remove_global_image_ids_dup));
wait_for_scheduled_task();
ASSERT_EQ(0, when_shut_down(mock_image_map.get()));
}
TEST_F(TestMockImageMap, AcquireImageErrorRetry) {
MockThreads mock_threads(m_threads);
expect_work_queue(mock_threads);
InSequence seq;
MockLoadRequest mock_load_request;
expect_load_request(mock_load_request, 0);
MockListener mock_listener(this);
std::unique_ptr<MockImageMap> mock_image_map{
MockImageMap::create(m_local_io_ctx, &mock_threads, m_local_instance_id,
mock_listener)};
C_SaferCond cond;
mock_image_map->init(&cond);
ASSERT_EQ(0, cond.wait());
std::set<std::string> initial_global_image_ids{
"global id 1", "global id 2"
};
std::set<std::string> initial_global_image_ids_ack(initial_global_image_ids);
// UPDATE_MAPPING failure
expect_add_event(mock_threads);
MockUpdateRequest mock_update_request;
expect_update_request(mock_update_request, -EIO);
// UPDATE_MAPPING+ACQUIRE
expect_add_event(mock_threads);
expect_update_request(mock_update_request, 0);
expect_add_event(mock_threads);
std::map<std::string, Context*> peer_ack_ctxs;
listener_acquire_images(mock_listener, initial_global_image_ids,
&peer_ack_ctxs);
// initial image list
mock_image_map->update_images("uuid1", std::move(initial_global_image_ids), {});
ASSERT_TRUE(wait_for_map_update(1));
ASSERT_TRUE(wait_for_listener_notify(initial_global_image_ids_ack.size()));
// remote peer ACKs image acquire request
remote_peer_ack_nowait(mock_image_map.get(), initial_global_image_ids_ack, 0,
&peer_ack_ctxs);
wait_for_scheduled_task();
ASSERT_EQ(0, when_shut_down(mock_image_map.get()));
}
TEST_F(TestMockImageMap, RemoveRemoteAndLocalImage) {
MockThreads mock_threads(m_threads);
expect_work_queue(mock_threads);
InSequence seq;
MockLoadRequest mock_load_request;
expect_load_request(mock_load_request, 0);
MockListener mock_listener(this);
std::unique_ptr<MockImageMap> mock_image_map{
MockImageMap::create(m_local_io_ctx, &mock_threads, m_local_instance_id,
mock_listener)};
C_SaferCond cond;
mock_image_map->init(&cond);
ASSERT_EQ(0, cond.wait());
// remote image set
std::set<std::string> initial_remote_global_image_ids{
"global id 1"
};
std::set<std::string> initial_remote_global_image_ids_ack(initial_remote_global_image_ids);
// local image set
std::set<std::string> initial_local_global_image_ids{
"global id 1"
};
// remote/local images to remove
std::set<std::string> remote_remove_global_image_ids{
"global id 1"
};
std::set<std::string> remote_remove_global_image_ids_ack(remote_remove_global_image_ids);
std::set<std::string> local_remove_global_image_ids{
"global id 1"
};
std::set<std::string> local_remove_global_image_ids_ack(local_remove_global_image_ids);
// UPDATE_MAPPING+ACQUIRE
expect_add_event(mock_threads);
MockUpdateRequest mock_update_request;
expect_update_request(mock_update_request, 0);
expect_add_event(mock_threads);
std::map<std::string, Context*> peer_ack_ctxs;
listener_acquire_images(mock_listener, initial_remote_global_image_ids,
&peer_ack_ctxs);
// initial remote image list
mock_image_map->update_images("uuid1", std::move(initial_remote_global_image_ids), {});
ASSERT_TRUE(wait_for_map_update(1));
ASSERT_TRUE(wait_for_listener_notify(initial_remote_global_image_ids_ack.size()));
// remote peer ACKs image acquire request
remote_peer_ack_nowait(mock_image_map.get(),
initial_remote_global_image_ids_ack, 0,
&peer_ack_ctxs);
// set initial local image list -- this is a no-op from policy pov
mock_image_map->update_images("", std::move(initial_local_global_image_ids), {});
// remove remote images -- this should be a no-op from policy pov
// except the listener notification
std::map<std::string, Context*> peer_ack_remove_ctxs;
listener_remove_images(mock_listener, "uuid1", remote_remove_global_image_ids,
&peer_ack_remove_ctxs);
mock_image_map->update_images("uuid1", {}, std::move(remote_remove_global_image_ids));
ASSERT_TRUE(wait_for_listener_notify(remote_remove_global_image_ids_ack.size()));
// RELEASE+REMOVE_MAPPING
expect_add_event(mock_threads);
listener_release_images(mock_listener, local_remove_global_image_ids,
&peer_ack_ctxs);
update_map_request(mock_threads, mock_update_request, local_remove_global_image_ids, 0);
// remove local images
mock_image_map->update_images("", {}, std::move(local_remove_global_image_ids));
ASSERT_TRUE(wait_for_listener_notify(local_remove_global_image_ids_ack.size()));
remote_peer_ack_nowait(mock_image_map.get(), local_remove_global_image_ids_ack,
0, &peer_ack_remove_ctxs);
remote_peer_ack_wait(mock_image_map.get(), local_remove_global_image_ids_ack,
0, &peer_ack_ctxs);
wait_for_scheduled_task();
ASSERT_EQ(0, when_shut_down(mock_image_map.get()));
}
TEST_F(TestMockImageMap, AddInstance) {
MockThreads mock_threads(m_threads);
expect_work_queue(mock_threads);
InSequence seq;
MockLoadRequest mock_load_request;
expect_load_request(mock_load_request, 0);
MockListener mock_listener(this);
std::unique_ptr<MockImageMap> mock_image_map{
MockImageMap::create(m_local_io_ctx, &mock_threads, m_local_instance_id,
mock_listener)};
C_SaferCond cond;
mock_image_map->init(&cond);
ASSERT_EQ(0, cond.wait());
std::set<std::string> global_image_ids{
"global id 1", "global id 2", "global id 3", "global id 4", "global id 5"
};
std::set<std::string> global_image_ids_ack(global_image_ids);
// UPDATE_MAPPING+ACQUIRE
expect_add_event(mock_threads);
MockUpdateRequest mock_update_request;
expect_update_request(mock_update_request, 0);
expect_add_event(mock_threads);
std::map<std::string, Context*> peer_ack_ctxs;
listener_acquire_images(mock_listener, global_image_ids,
&peer_ack_ctxs);
// initial image list
mock_image_map->update_images("uuid1", std::move(global_image_ids), {});
ASSERT_TRUE(wait_for_map_update(1));
ASSERT_TRUE(wait_for_listener_notify(global_image_ids_ack.size()));
// remote peer ACKs image acquire request
remote_peer_ack_nowait(mock_image_map.get(), global_image_ids_ack, 0,
&peer_ack_ctxs);
wait_for_scheduled_task();
mock_image_map->update_instances_added({m_local_instance_id});
std::set<std::string> shuffled_global_image_ids;
// RELEASE+UPDATE_MAPPING+ACQUIRE
expect_add_event(mock_threads);
expect_listener_images_unmapped(mock_listener, 3, &shuffled_global_image_ids,
&peer_ack_ctxs);
mock_image_map->update_instances_added({"9876"});
wait_for_scheduled_task();
ASSERT_TRUE(wait_for_listener_notify(shuffled_global_image_ids.size()));
update_map_and_acquire(mock_threads, mock_update_request,
mock_listener, shuffled_global_image_ids, 0,
&peer_ack_ctxs);
remote_peer_ack_listener_wait(mock_image_map.get(), shuffled_global_image_ids,
0, &peer_ack_ctxs);
// completion shuffle action for now (re)mapped images
remote_peer_ack_nowait(mock_image_map.get(), shuffled_global_image_ids, 0,
&peer_ack_ctxs);
wait_for_scheduled_task();
ASSERT_EQ(0, when_shut_down(mock_image_map.get()));
}
TEST_F(TestMockImageMap, RemoveInstance) {
MockThreads mock_threads(m_threads);
expect_work_queue(mock_threads);
InSequence seq;
MockLoadRequest mock_load_request;
expect_load_request(mock_load_request, 0);
MockListener mock_listener(this);
std::unique_ptr<MockImageMap> mock_image_map{
MockImageMap::create(m_local_io_ctx, &mock_threads, m_local_instance_id,
mock_listener)};
C_SaferCond cond;
mock_image_map->init(&cond);
ASSERT_EQ(0, cond.wait());
std::set<std::string> global_image_ids{
"global id 1", "global id 2", "global id 3", "global id 4", "global id 5"
};
std::set<std::string> global_image_ids_ack(global_image_ids);
expect_add_event(mock_threads);
// UPDATE_MAPPING+ACQUIRE
MockUpdateRequest mock_update_request;
expect_update_request(mock_update_request, 0);
expect_add_event(mock_threads);
std::map<std::string, Context*> peer_ack_ctxs;
listener_acquire_images(mock_listener, global_image_ids,
&peer_ack_ctxs);
// set initial image list
mock_image_map->update_images("uuid1", std::move(global_image_ids), {});
ASSERT_TRUE(wait_for_map_update(1));
ASSERT_TRUE(wait_for_listener_notify(global_image_ids_ack.size()));
// remote peer ACKs image acquire request -- completing action
remote_peer_ack_nowait(mock_image_map.get(), global_image_ids_ack, 0,
&peer_ack_ctxs);
wait_for_scheduled_task();
mock_image_map->update_instances_added({m_local_instance_id});
std::set<std::string> shuffled_global_image_ids;
// RELEASE+UPDATE_MAPPING+ACQUIRE
expect_add_event(mock_threads);
expect_listener_images_unmapped(mock_listener, 3, &shuffled_global_image_ids,
&peer_ack_ctxs);
mock_image_map->update_instances_added({"9876"});
wait_for_scheduled_task();
ASSERT_TRUE(wait_for_listener_notify(shuffled_global_image_ids.size()));
update_map_and_acquire(mock_threads, mock_update_request,
mock_listener, shuffled_global_image_ids, 0,
&peer_ack_ctxs);
remote_peer_ack_listener_wait(mock_image_map.get(), shuffled_global_image_ids,
0, &peer_ack_ctxs);
// completion shuffle action for now (re)mapped images
remote_peer_ack_nowait(mock_image_map.get(), shuffled_global_image_ids, 0,
&peer_ack_ctxs);
wait_for_scheduled_task();
shuffled_global_image_ids.clear();
// remove added instance
expect_add_event(mock_threads);
expect_listener_images_unmapped(mock_listener, 2, &shuffled_global_image_ids,
&peer_ack_ctxs);
mock_image_map->update_instances_removed({"9876"});
wait_for_scheduled_task();
ASSERT_TRUE(wait_for_listener_notify(shuffled_global_image_ids.size()));
update_map_and_acquire(mock_threads, mock_update_request,
mock_listener, shuffled_global_image_ids, 0,
&peer_ack_ctxs);
remote_peer_ack_listener_wait(mock_image_map.get(), shuffled_global_image_ids,
0, &peer_ack_ctxs);
// completion shuffle action for now (re)mapped images
remote_peer_ack_nowait(mock_image_map.get(), shuffled_global_image_ids, 0,
&peer_ack_ctxs);
wait_for_scheduled_task();
ASSERT_EQ(0, when_shut_down(mock_image_map.get()));
}
TEST_F(TestMockImageMap, AddInstancePingPongImageTest) {
EXPECT_EQ(0, _rados->conf_set("rbd_mirror_image_policy_migration_throttle", "600"));
MockThreads mock_threads(m_threads);
expect_work_queue(mock_threads);
InSequence seq;
std::set<std::string> global_image_ids{
"global id 1", "global id 2", "global id 3", "global id 4", "global id 5",
"global id 6", "global id 7", "global id 8", "global id 9", "global id 10",
"global id 11", "global id 12", "global id 13", "global id 14"
};
std::map<std::string, cls::rbd::MirrorImageMap> image_mapping;
for (auto& global_image_id : global_image_ids) {
image_mapping[global_image_id] = {m_local_instance_id, {}, {}};
}
// ACQUIRE
MockLoadRequest mock_load_request;
EXPECT_CALL(mock_load_request, send()).WillOnce(
Invoke([&mock_load_request, &image_mapping]() {
*mock_load_request.image_map = image_mapping;
mock_load_request.on_finish->complete(0);
}));
expect_add_event(mock_threads);
MockListener mock_listener(this);
std::map<std::string, Context*> peer_ack_ctxs;
listener_acquire_images(mock_listener, global_image_ids,
&peer_ack_ctxs);
std::unique_ptr<MockImageMap> mock_image_map{
MockImageMap::create(m_local_io_ctx, &mock_threads, m_local_instance_id,
mock_listener)};
C_SaferCond cond;
mock_image_map->init(&cond);
ASSERT_EQ(0, cond.wait());
mock_image_map->update_instances_added({m_local_instance_id});
std::set<std::string> global_image_ids_ack(global_image_ids);
// remote peer ACKs image acquire request -- completing action
ASSERT_TRUE(wait_for_listener_notify(global_image_ids_ack.size()));
remote_peer_ack_nowait(mock_image_map.get(), global_image_ids_ack, 0,
&peer_ack_ctxs);
wait_for_scheduled_task();
// RELEASE+UPDATE_MAPPING+ACQUIRE
expect_add_event(mock_threads);
MockUpdateRequest mock_update_request;
expect_update_request(mock_update_request, 0);
expect_add_event(mock_threads);
listener_acquire_images(mock_listener, global_image_ids,
&peer_ack_ctxs);
// set initial image list
mock_image_map->update_images("uuid1", std::move(global_image_ids), {});
ASSERT_TRUE(wait_for_map_update(1));
ASSERT_TRUE(wait_for_listener_notify(global_image_ids_ack.size()));
// remote peer ACKs image acquire request -- completing action
remote_peer_ack_nowait(mock_image_map.get(), global_image_ids_ack, 0,
&peer_ack_ctxs);
wait_for_scheduled_task();
std::set<std::string> shuffled_global_image_ids;
// RELEASE+UPDATE_MAPPING+ACQUIRE
expect_add_event(mock_threads);
expect_listener_images_unmapped(mock_listener, 7, &shuffled_global_image_ids,
&peer_ack_ctxs);
mock_image_map->update_instances_added({"9876"});
wait_for_scheduled_task();
ASSERT_TRUE(wait_for_listener_notify(shuffled_global_image_ids.size()));
update_map_and_acquire(mock_threads, mock_update_request,
mock_listener, shuffled_global_image_ids, 0,
&peer_ack_ctxs);
remote_peer_ack_listener_wait(mock_image_map.get(), shuffled_global_image_ids,
0, &peer_ack_ctxs);
// completion shuffle action for now (re)mapped images
remote_peer_ack_nowait(mock_image_map.get(), shuffled_global_image_ids, 0,
&peer_ack_ctxs);
wait_for_scheduled_task();
std::set<std::string> migrated_global_image_ids(shuffled_global_image_ids);
shuffled_global_image_ids.clear();
// RELEASE+UPDATE_MAPPING+ACQUIRE
expect_add_event(mock_threads);
expect_listener_images_unmapped(mock_listener, 3, &shuffled_global_image_ids,
&peer_ack_ctxs);
// add another instance
mock_image_map->update_instances_added({"5432"});
wait_for_scheduled_task();
ASSERT_TRUE(wait_for_listener_notify(shuffled_global_image_ids.size()));
update_map_and_acquire(mock_threads, mock_update_request,
mock_listener, shuffled_global_image_ids, 0,
&peer_ack_ctxs);
remote_peer_ack_listener_wait(mock_image_map.get(), shuffled_global_image_ids,
0, &peer_ack_ctxs);
// completion shuffle action for now (re)mapped images
remote_peer_ack_nowait(mock_image_map.get(), shuffled_global_image_ids, 0,
&peer_ack_ctxs);
// shuffle set should be distinct
std::set<std::string> reshuffled;
std::set_intersection(migrated_global_image_ids.begin(), migrated_global_image_ids.end(),
shuffled_global_image_ids.begin(), shuffled_global_image_ids.end(),
std::inserter(reshuffled, reshuffled.begin()));
ASSERT_TRUE(reshuffled.empty());
wait_for_scheduled_task();
ASSERT_EQ(0, when_shut_down(mock_image_map.get()));
}
TEST_F(TestMockImageMap, RemoveInstanceWithRemoveImage) {
MockThreads mock_threads(m_threads);
expect_work_queue(mock_threads);
InSequence seq;
MockLoadRequest mock_load_request;
expect_load_request(mock_load_request, 0);
MockListener mock_listener(this);
std::unique_ptr<MockImageMap> mock_image_map{
MockImageMap::create(m_local_io_ctx, &mock_threads, m_local_instance_id,
mock_listener)};
C_SaferCond cond;
mock_image_map->init(&cond);
ASSERT_EQ(0, cond.wait());
std::set<std::string> global_image_ids{
"global id 1", "global id 2", "global id 3", "remote id 4",
};
std::set<std::string> global_image_ids_ack(global_image_ids);
std::set<std::string> remove_global_image_ids{
"global id 1"
};
std::set<std::string> remove_global_image_ids_ack(remove_global_image_ids);
expect_add_event(mock_threads);
// UPDATE_MAPPING+ACQUIRE
MockUpdateRequest mock_update_request;
expect_update_request(mock_update_request, 0);
expect_add_event(mock_threads);
std::map<std::string, Context*> peer_ack_ctxs;
listener_acquire_images(mock_listener, global_image_ids,
&peer_ack_ctxs);
// initial image list
mock_image_map->update_images("uuid1", std::move(global_image_ids), {});
ASSERT_TRUE(wait_for_map_update(1));
ASSERT_TRUE(wait_for_listener_notify(global_image_ids_ack.size()));
remote_peer_ack_nowait(mock_image_map.get(), global_image_ids_ack, 0,
&peer_ack_ctxs);
wait_for_scheduled_task();
mock_image_map->update_instances_added({m_local_instance_id});
std::set<std::string> shuffled_global_image_ids;
// RELEASE+UPDATE_MAPPING+ACQUIRE
expect_add_event(mock_threads);
expect_listener_images_unmapped(mock_listener, 2, &shuffled_global_image_ids,
&peer_ack_ctxs);
mock_image_map->update_instances_added({"9876"});
wait_for_scheduled_task();
ASSERT_TRUE(wait_for_listener_notify(shuffled_global_image_ids.size()));
update_map_and_acquire(mock_threads, mock_update_request,
mock_listener, shuffled_global_image_ids, 0,
&peer_ack_ctxs);
remote_peer_ack_listener_wait(mock_image_map.get(), shuffled_global_image_ids,
0, &peer_ack_ctxs);
// completion shuffle action for now (re)mapped images
remote_peer_ack_nowait(mock_image_map.get(), shuffled_global_image_ids, 0,
&peer_ack_ctxs);
wait_for_scheduled_task();
std::set<std::string> shuffled_global_image_ids_ack(shuffled_global_image_ids);
// RELEASE
std::map<std::string, Context*> peer_ack_remove_ctxs;
listener_remove_images(mock_listener, "uuid1", shuffled_global_image_ids,
&peer_ack_remove_ctxs);
expect_add_event(mock_threads);
listener_release_images(mock_listener, shuffled_global_image_ids,
&peer_ack_ctxs);
expect_add_event(mock_threads);
expect_update_request(mock_update_request, 0);
expect_add_event(mock_threads);
expect_update_request(mock_update_request, 0);
mock_image_map->update_images("uuid1", {}, std::move(shuffled_global_image_ids));
ASSERT_TRUE(wait_for_listener_notify(shuffled_global_image_ids_ack.size() * 2));
// instance failed -- update policy for instance removal
mock_image_map->update_instances_removed({"9876"});
remote_peer_ack_nowait(mock_image_map.get(), shuffled_global_image_ids,
-ENOENT, &peer_ack_remove_ctxs);
remote_peer_ack_wait(mock_image_map.get(), shuffled_global_image_ids,
-EBLOCKLISTED, &peer_ack_ctxs);
wait_for_scheduled_task();
ASSERT_EQ(0, when_shut_down(mock_image_map.get()));
}
TEST_F(TestMockImageMap, AddErrorAndRemoveImage) {
MockThreads mock_threads(m_threads);
expect_work_queue(mock_threads);
InSequence seq;
MockLoadRequest mock_load_request;
expect_load_request(mock_load_request, 0);
MockListener mock_listener(this);
std::unique_ptr<MockImageMap> mock_image_map{
MockImageMap::create(m_local_io_ctx, &mock_threads, m_local_instance_id,
mock_listener)};
C_SaferCond cond;
mock_image_map->init(&cond);
ASSERT_EQ(0, cond.wait());
mock_image_map->update_instances_added({m_local_instance_id});
std::set<std::string> global_image_ids{
"global id 1", "global id 2", "global id 3", "remote id 4",
};
std::set<std::string> global_image_ids_ack(global_image_ids);
// UPDATE_MAPPING+ACQUIRE
expect_add_event(mock_threads);
MockUpdateRequest mock_update_request;
expect_update_request(mock_update_request, 0);
expect_add_event(mock_threads);
std::map<std::string, Context*> peer_ack_ctxs;
listener_acquire_images(mock_listener, global_image_ids,
&peer_ack_ctxs);
// initial image list
mock_image_map->update_images("uuid1", std::move(global_image_ids), {});
ASSERT_TRUE(wait_for_map_update(1));
ASSERT_TRUE(wait_for_listener_notify(global_image_ids_ack.size()));
// remote peer ACKs image acquire request
remote_peer_ack_nowait(mock_image_map.get(), global_image_ids_ack, 0,
&peer_ack_ctxs);
wait_for_scheduled_task();
std::set<std::string> shuffled_global_image_ids;
// RELEASE+UPDATE_MAPPING+ACQUIRE
expect_add_event(mock_threads);
expect_listener_images_unmapped(mock_listener, 2, &shuffled_global_image_ids,
&peer_ack_ctxs);
mock_image_map->update_instances_added({"9876"});
wait_for_scheduled_task();
ASSERT_TRUE(wait_for_listener_notify(shuffled_global_image_ids.size()));
update_map_and_acquire(mock_threads, mock_update_request,
mock_listener, shuffled_global_image_ids, 0,
&peer_ack_ctxs);
remote_peer_ack_listener_wait(mock_image_map.get(), shuffled_global_image_ids,
0, &peer_ack_ctxs);
wait_for_scheduled_task();
mock_image_map->update_instances_removed({"9876"});
std::set<std::string> released_global_image_ids;
std::map<std::string, Context*> release_peer_ack_ctxs;
expect_add_event(mock_threads);
expect_listener_images_unmapped(mock_listener, 1, &released_global_image_ids,
&release_peer_ack_ctxs);
expect_add_event(mock_threads);
expect_listener_images_unmapped(mock_listener, 1, &released_global_image_ids,
&release_peer_ack_ctxs);
// instance blocklisted -- ACQUIRE request fails
remote_peer_ack_nowait(mock_image_map.get(), shuffled_global_image_ids,
-EBLOCKLISTED, &peer_ack_ctxs);
ASSERT_TRUE(wait_for_listener_notify(shuffled_global_image_ids.size()));
std::map<std::string, Context*> remap_peer_ack_ctxs;
update_map_and_acquire(mock_threads, mock_update_request,
mock_listener, shuffled_global_image_ids, 0,
&remap_peer_ack_ctxs);
// instance blocklisted -- RELEASE request fails
remote_peer_ack_listener_wait(mock_image_map.get(), shuffled_global_image_ids,
-ENOENT, &release_peer_ack_ctxs);
wait_for_scheduled_task();
// new peer acks acquire request
remote_peer_ack_nowait(mock_image_map.get(), shuffled_global_image_ids, 0,
&remap_peer_ack_ctxs);
wait_for_scheduled_task();
std::set<std::string> shuffled_global_image_ids_ack(shuffled_global_image_ids);
// remove image
std::map<std::string, Context*> peer_ack_remove_ctxs;
listener_remove_images(mock_listener, "uuid1", shuffled_global_image_ids,
&peer_ack_remove_ctxs);
expect_add_event(mock_threads);
listener_release_images(mock_listener, shuffled_global_image_ids,
&peer_ack_ctxs);
update_map_request(mock_threads, mock_update_request, shuffled_global_image_ids, 0);
mock_image_map->update_images("uuid1", {}, std::move(shuffled_global_image_ids));
ASSERT_TRUE(wait_for_listener_notify(shuffled_global_image_ids_ack.size() * 2));
remote_peer_ack_nowait(mock_image_map.get(), shuffled_global_image_ids_ack, 0,
&peer_ack_remove_ctxs);
remote_peer_ack_wait(mock_image_map.get(), shuffled_global_image_ids_ack, 0,
&peer_ack_ctxs);
wait_for_scheduled_task();
ASSERT_EQ(0, when_shut_down(mock_image_map.get()));
}
TEST_F(TestMockImageMap, MirrorUUIDUpdated) {
MockThreads mock_threads(m_threads);
expect_work_queue(mock_threads);
InSequence seq;
MockLoadRequest mock_load_request;
expect_load_request(mock_load_request, 0);
MockListener mock_listener(this);
std::unique_ptr<MockImageMap> mock_image_map{
MockImageMap::create(m_local_io_ctx, &mock_threads, m_local_instance_id,
mock_listener)};
C_SaferCond cond;
mock_image_map->init(&cond);
ASSERT_EQ(0, cond.wait());
// remote image set
std::set<std::string> initial_remote_global_image_ids{
"global id 1", "global id 2", "global id 3"
};
std::set<std::string> initial_remote_global_image_ids_ack(initial_remote_global_image_ids);
// remote/local images to remove
std::set<std::string> remote_removed_global_image_ids{
"global id 1", "global id 2", "global id 3"
};
std::set<std::string> remote_removed_global_image_ids_ack(remote_removed_global_image_ids);
std::set<std::string> remote_added_global_image_ids{
"global id 1", "global id 2", "global id 3"
};
std::set<std::string> remote_added_global_image_ids_ack(remote_added_global_image_ids);
// UPDATE_MAPPING+ACQUIRE
expect_add_event(mock_threads);
MockUpdateRequest mock_update_request;
expect_update_request(mock_update_request, 0);
expect_add_event(mock_threads);
std::map<std::string, Context*> peer_ack_ctxs;
listener_acquire_images(mock_listener, initial_remote_global_image_ids,
&peer_ack_ctxs);
// initial remote image list
mock_image_map->update_images("uuid1", std::move(initial_remote_global_image_ids), {});
ASSERT_TRUE(wait_for_map_update(1));
ASSERT_TRUE(wait_for_listener_notify(initial_remote_global_image_ids_ack.size()));
// remote peer ACKs image acquire request
remote_peer_ack_nowait(mock_image_map.get(),
initial_remote_global_image_ids_ack, 0,
&peer_ack_ctxs);
wait_for_scheduled_task();
// RELEASE+REMOVE_MAPPING
std::map<std::string, Context*> peer_remove_ack_ctxs;
listener_remove_images(mock_listener, "uuid1", remote_removed_global_image_ids,
&peer_remove_ack_ctxs);
expect_add_event(mock_threads);
listener_release_images(mock_listener, remote_removed_global_image_ids,
&peer_ack_ctxs);
update_map_request(mock_threads, mock_update_request, remote_removed_global_image_ids, 0);
mock_image_map->update_images("uuid1", {}, std::move(remote_removed_global_image_ids));
ASSERT_TRUE(wait_for_listener_notify(remote_removed_global_image_ids_ack.size() * 2));
remote_peer_ack_nowait(mock_image_map.get(),
remote_removed_global_image_ids_ack, 0,
&peer_remove_ack_ctxs);
remote_peer_ack_wait(mock_image_map.get(),
remote_removed_global_image_ids_ack, 0,
&peer_ack_ctxs);
// UPDATE_MAPPING+ACQUIRE
expect_add_event(mock_threads);
expect_update_request(mock_update_request, 0);
expect_add_event(mock_threads);
listener_acquire_images(mock_listener, remote_added_global_image_ids,
&peer_ack_ctxs);
mock_image_map->update_images("uuid2", std::move(remote_added_global_image_ids), {});
ASSERT_TRUE(wait_for_map_update(1));
ASSERT_TRUE(wait_for_listener_notify(remote_added_global_image_ids_ack.size()));
// remote peer ACKs image acquire request
remote_peer_ack_nowait(mock_image_map.get(),
remote_added_global_image_ids_ack, 0,
&peer_ack_ctxs);
wait_for_scheduled_task();
ASSERT_EQ(0, when_shut_down(mock_image_map.get()));
}
TEST_F(TestMockImageMap, RebalanceImageMap) {
MockThreads mock_threads(m_threads);
expect_work_queue(mock_threads);
InSequence seq;
MockLoadRequest mock_load_request;
expect_load_request(mock_load_request, 0);
MockListener mock_listener(this);
std::unique_ptr<MockImageMap> mock_image_map{
MockImageMap::create(m_local_io_ctx, &mock_threads, m_local_instance_id,
mock_listener)};
C_SaferCond cond;
mock_image_map->init(&cond);
ASSERT_EQ(0, cond.wait());
std::set<std::string> global_image_ids{
"global id 1", "global id 2", "global id 3", "global id 4", "global id 5",
"global id 6", "global id 7", "global id 8", "global id 9", "global id 10",
};
std::set<std::string> global_image_ids_ack(global_image_ids);
// UPDATE_MAPPING+ACQUIRE
expect_add_event(mock_threads);
MockUpdateRequest mock_update_request;
expect_update_request(mock_update_request, 0);
expect_add_event(mock_threads);
std::map<std::string, Context*> peer_ack_ctxs;
listener_acquire_images(mock_listener, global_image_ids,
&peer_ack_ctxs);
// initial image list
mock_image_map->update_images("", std::move(global_image_ids), {});
ASSERT_TRUE(wait_for_map_update(1));
ASSERT_TRUE(wait_for_listener_notify(global_image_ids_ack.size()));
// remote peer ACKs image acquire request
remote_peer_ack_nowait(mock_image_map.get(), global_image_ids_ack, 0,
&peer_ack_ctxs);
wait_for_scheduled_task();
mock_image_map->update_instances_added({m_local_instance_id});
std::set<std::string> shuffled_global_image_ids;
// RELEASE+UPDATE_MAPPING+ACQUIRE
expect_add_event(mock_threads);
expect_listener_images_unmapped(mock_listener, 5, &shuffled_global_image_ids,
&peer_ack_ctxs);
mock_image_map->update_instances_added({"9876"});
wait_for_scheduled_task();
ASSERT_TRUE(wait_for_listener_notify(shuffled_global_image_ids.size()));
update_map_and_acquire(mock_threads, mock_update_request,
mock_listener, shuffled_global_image_ids, 0,
&peer_ack_ctxs);
remote_peer_ack_listener_wait(mock_image_map.get(), shuffled_global_image_ids,
0, &peer_ack_ctxs);
// completion shuffle action for now (re)mapped images
remote_peer_ack_nowait(mock_image_map.get(), shuffled_global_image_ids, 0,
&peer_ack_ctxs);
wait_for_scheduled_task();
// remove all shuffled images -- make way for rebalance
std::set<std::string> shuffled_global_image_ids_ack(shuffled_global_image_ids);
// RELEASE+REMOVE_MAPPING
expect_add_event(mock_threads);
listener_release_images(mock_listener, shuffled_global_image_ids,
&peer_ack_ctxs);
update_map_request(mock_threads, mock_update_request, shuffled_global_image_ids,
0);
mock_image_map->update_images("", {}, std::move(shuffled_global_image_ids));
ASSERT_TRUE(wait_for_listener_notify(shuffled_global_image_ids_ack.size()));
remote_peer_ack_wait(mock_image_map.get(), shuffled_global_image_ids_ack, 0,
&peer_ack_ctxs);
wait_for_scheduled_task();
shuffled_global_image_ids.clear();
shuffled_global_image_ids_ack.clear();
std::set<std::string> new_global_image_ids = {
"global id 11"
};
std::set<std::string> new_global_image_ids_ack(new_global_image_ids);
expect_add_event(mock_threads);
expect_update_request(mock_update_request, 0);
expect_add_event(mock_threads);
listener_acquire_images(mock_listener, new_global_image_ids, &peer_ack_ctxs);
expect_rebalance_event(mock_threads); // rebalance task
expect_add_event(mock_threads); // update task scheduled by
// rebalance task
expect_listener_images_unmapped(mock_listener, 2, &shuffled_global_image_ids,
&peer_ack_ctxs);
mock_image_map->update_images("", std::move(new_global_image_ids), {});
ASSERT_TRUE(wait_for_map_update(1));
ASSERT_TRUE(wait_for_listener_notify(new_global_image_ids_ack.size()));
// set rebalance interval
CephContext *cct = reinterpret_cast<CephContext *>(m_local_io_ctx.cct());
cct->_conf.set_val("rbd_mirror_image_policy_rebalance_timeout", "5");
remote_peer_ack_nowait(mock_image_map.get(), new_global_image_ids_ack, 0,
&peer_ack_ctxs);
wait_for_scheduled_task();
ASSERT_TRUE(wait_for_listener_notify(shuffled_global_image_ids.size()));
update_map_and_acquire(mock_threads, mock_update_request,
mock_listener, shuffled_global_image_ids, 0,
&peer_ack_ctxs);
remote_peer_ack_listener_wait(mock_image_map.get(), shuffled_global_image_ids,
0, &peer_ack_ctxs);
// completion shuffle action for now (re)mapped images
remote_peer_ack_nowait(mock_image_map.get(), shuffled_global_image_ids, 0,
&peer_ack_ctxs);
wait_for_scheduled_task();
ASSERT_EQ(0, when_shut_down(mock_image_map.get()));
}
} // namespace mirror
} // namespace rbd
| 57,432 | 35.166877 | 113 |
cc
|
null |
ceph-main/src/test/rbd_mirror/test_mock_ImageReplayer.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "cls/journal/cls_journal_types.h"
#include "librbd/journal/Types.h"
#include "librbd/journal/TypeTraits.h"
#include "tools/rbd_mirror/ImageDeleter.h"
#include "tools/rbd_mirror/ImageReplayer.h"
#include "tools/rbd_mirror/InstanceWatcher.h"
#include "tools/rbd_mirror/MirrorStatusUpdater.h"
#include "tools/rbd_mirror/Threads.h"
#include "tools/rbd_mirror/image_replayer/BootstrapRequest.h"
#include "tools/rbd_mirror/image_replayer/Replayer.h"
#include "tools/rbd_mirror/image_replayer/ReplayerListener.h"
#include "tools/rbd_mirror/image_replayer/StateBuilder.h"
#include "tools/rbd_mirror/image_replayer/Utils.h"
#include "test/rbd_mirror/test_mock_fixture.h"
#include "test/librbd/mock/MockImageCtx.h"
#include "test/rbd_mirror/mock/MockContextWQ.h"
#include "test/rbd_mirror/mock/MockSafeTimer.h"
namespace librbd {
namespace {
struct MockTestImageCtx : public MockImageCtx {
MockTestImageCtx(librbd::ImageCtx &image_ctx)
: librbd::MockImageCtx(image_ctx) {
}
};
} // anonymous namespace
} // namespace librbd
namespace rbd {
namespace mirror {
template <>
struct ImageDeleter<librbd::MockTestImageCtx> {
static ImageDeleter* s_instance;
static void trash_move(librados::IoCtx& local_io_ctx,
const std::string& global_image_id, bool resync,
MockContextWQ* work_queue, Context* on_finish) {
ceph_assert(s_instance != nullptr);
s_instance->trash_move(global_image_id, resync, on_finish);
}
MOCK_METHOD3(trash_move, void(const std::string&, bool, Context*));
ImageDeleter() {
s_instance = this;
}
};
ImageDeleter<librbd::MockTestImageCtx>* ImageDeleter<librbd::MockTestImageCtx>::s_instance = nullptr;
template <>
struct MirrorStatusUpdater<librbd::MockTestImageCtx> {
MOCK_METHOD1(exists, bool(const std::string&));
MOCK_METHOD3(set_mirror_image_status,
void(const std::string&, const cls::rbd::MirrorImageSiteStatus&,
bool));
MOCK_METHOD2(remove_refresh_mirror_image_status, void(const std::string&,
Context*));
MOCK_METHOD3(remove_mirror_image_status, void(const std::string&, bool,
Context*));
};
template <>
struct Threads<librbd::MockTestImageCtx> {
MockSafeTimer *timer;
ceph::mutex &timer_lock;
MockContextWQ *work_queue;
Threads(Threads<librbd::ImageCtx> *threads)
: timer(new MockSafeTimer()),
timer_lock(threads->timer_lock),
work_queue(new MockContextWQ()) {
}
~Threads() {
delete timer;
delete work_queue;
}
};
template<>
class InstanceWatcher<librbd::MockTestImageCtx> {
};
namespace image_replayer {
template<>
struct BootstrapRequest<librbd::MockTestImageCtx> {
static BootstrapRequest* s_instance;
StateBuilder<librbd::MockTestImageCtx>** state_builder = nullptr;
bool *do_resync = nullptr;
Context *on_finish = nullptr;
static BootstrapRequest* create(
Threads<librbd::MockTestImageCtx>* threads,
librados::IoCtx &local_io_ctx,
librados::IoCtx& remote_io_ctx,
rbd::mirror::InstanceWatcher<librbd::MockTestImageCtx> *instance_watcher,
const std::string &global_image_id,
const std::string &local_mirror_uuid,
const RemotePoolMeta& remote_pool_meta,
::journal::CacheManagerHandler *cache_manager_handler,
PoolMetaCache* pool_meta_cache,
rbd::mirror::ProgressContext *progress_ctx,
StateBuilder<librbd::MockTestImageCtx>** state_builder,
bool *do_resync, Context *on_finish) {
ceph_assert(s_instance != nullptr);
s_instance->state_builder = state_builder;
s_instance->do_resync = do_resync;
s_instance->on_finish = on_finish;
return s_instance;
}
BootstrapRequest() {
ceph_assert(s_instance == nullptr);
s_instance = this;
}
~BootstrapRequest() {
ceph_assert(s_instance == this);
s_instance = nullptr;
}
void put() {
}
void get() {
}
std::string get_local_image_name() const {
return "local image name";
}
inline bool is_syncing() const {
return false;
}
MOCK_METHOD0(send, void());
MOCK_METHOD0(cancel, void());
};
struct MockReplayer : public Replayer {
image_replayer::ReplayerListener* replayer_listener;
MOCK_METHOD0(destroy, void());
MOCK_METHOD1(init, void(Context*));
MOCK_METHOD1(shut_down, void(Context*));
MOCK_METHOD1(flush, void(Context*));
MOCK_METHOD2(get_replay_status, bool(std::string*, Context*));
MOCK_CONST_METHOD0(is_replaying, bool());
MOCK_CONST_METHOD0(is_resync_requested, bool());
MOCK_CONST_METHOD0(get_error_code, int());
MOCK_CONST_METHOD0(get_error_description, std::string());
};
template <>
struct StateBuilder<librbd::MockTestImageCtx> {
static StateBuilder* s_instance;
librbd::MockTestImageCtx* local_image_ctx = nullptr;
std::string local_image_id;
std::string remote_image_id;
void destroy() {
}
MOCK_METHOD1(close, void(Context*));
MOCK_METHOD5(create_replayer, Replayer*(Threads<librbd::MockTestImageCtx>*,
InstanceWatcher<librbd::MockTestImageCtx>*,
const std::string&, PoolMetaCache*,
ReplayerListener*));
StateBuilder() {
s_instance = this;
}
};
BootstrapRequest<librbd::MockTestImageCtx>* BootstrapRequest<librbd::MockTestImageCtx>::s_instance = nullptr;
StateBuilder<librbd::MockTestImageCtx>* StateBuilder<librbd::MockTestImageCtx>::s_instance = nullptr;
} // namespace image_replayer
} // namespace mirror
} // namespace rbd
// template definitions
#include "tools/rbd_mirror/ImageReplayer.cc"
namespace rbd {
namespace mirror {
using ::testing::_;
using ::testing::AtLeast;
using ::testing::DoAll;
using ::testing::InSequence;
using ::testing::Invoke;
using ::testing::MatcherCast;
using ::testing::Return;
using ::testing::ReturnArg;
using ::testing::SetArgPointee;
using ::testing::WithArg;
class TestMockImageReplayer : public TestMockFixture {
public:
typedef Threads<librbd::MockTestImageCtx> MockThreads;
typedef ImageDeleter<librbd::MockTestImageCtx> MockImageDeleter;
typedef MirrorStatusUpdater<librbd::MockTestImageCtx> MockMirrorStatusUpdater;
typedef image_replayer::BootstrapRequest<librbd::MockTestImageCtx> MockBootstrapRequest;
typedef image_replayer::StateBuilder<librbd::MockTestImageCtx> MockStateBuilder;
typedef image_replayer::MockReplayer MockReplayer;
typedef ImageReplayer<librbd::MockTestImageCtx> MockImageReplayer;
typedef InstanceWatcher<librbd::MockTestImageCtx> MockInstanceWatcher;
void SetUp() override {
TestMockFixture::SetUp();
librbd::RBD rbd;
ASSERT_EQ(0, create_image(rbd, m_remote_io_ctx, m_image_name, m_image_size));
ASSERT_EQ(0, open_image(m_remote_io_ctx, m_image_name, &m_remote_image_ctx));
}
void TearDown() override {
delete m_image_replayer;
TestMockFixture::TearDown();
}
void create_local_image() {
librbd::RBD rbd;
ASSERT_EQ(0, create_image(rbd, m_local_io_ctx, m_image_name, m_image_size));
ASSERT_EQ(0, open_image(m_local_io_ctx, m_image_name, &m_local_image_ctx));
}
void expect_work_queue_repeatedly(MockThreads &mock_threads) {
EXPECT_CALL(*mock_threads.work_queue, queue(_, _))
.WillRepeatedly(Invoke([this](Context *ctx, int r) {
m_threads->work_queue->queue(ctx, r);
}));
}
void expect_add_event_after_repeatedly(MockThreads &mock_threads) {
EXPECT_CALL(*mock_threads.timer, add_event_after(_, _))
.WillRepeatedly(
DoAll(Invoke([this](double seconds, Context *ctx) {
m_threads->timer->add_event_after(seconds, ctx);
}),
ReturnArg<1>()));
EXPECT_CALL(*mock_threads.timer, cancel_event(_))
.WillRepeatedly(
Invoke([this](Context *ctx) {
return m_threads->timer->cancel_event(ctx);
}));
}
void expect_trash_move(MockImageDeleter& mock_image_deleter,
const std::string& global_image_id,
bool ignore_orphan, int r) {
EXPECT_CALL(mock_image_deleter,
trash_move(global_image_id, ignore_orphan, _))
.WillOnce(WithArg<2>(Invoke([this, r](Context* ctx) {
m_threads->work_queue->queue(ctx, r);
})));
}
bufferlist encode_tag_data(const librbd::journal::TagData &tag_data) {
bufferlist bl;
encode(tag_data, bl);
return bl;
}
void expect_send(MockBootstrapRequest& mock_bootstrap_request,
MockStateBuilder& mock_state_builder,
librbd::MockTestImageCtx& mock_local_image_ctx,
bool do_resync, bool set_local_image, int r) {
EXPECT_CALL(mock_bootstrap_request, send())
.WillOnce(Invoke([this, &mock_bootstrap_request, &mock_state_builder,
&mock_local_image_ctx, set_local_image, do_resync,
r]() {
if (r == 0 || r == -ENOLINK) {
mock_state_builder.local_image_id = mock_local_image_ctx.id;
mock_state_builder.remote_image_id = m_remote_image_ctx->id;
*mock_bootstrap_request.state_builder = &mock_state_builder;
}
if (r == 0) {
mock_state_builder.local_image_ctx = &mock_local_image_ctx;
*mock_bootstrap_request.do_resync = do_resync;
}
if (r < 0 && r != -ENOENT) {
mock_state_builder.remote_image_id = "";
}
if (r == -ENOENT) {
*mock_bootstrap_request.state_builder = &mock_state_builder;
}
if (set_local_image) {
mock_state_builder.local_image_id = mock_local_image_ctx.id;
}
mock_bootstrap_request.on_finish->complete(r);
}));
}
void expect_create_replayer(MockStateBuilder& mock_state_builder,
MockReplayer& mock_replayer) {
EXPECT_CALL(mock_state_builder, create_replayer(_, _, _, _, _))
.WillOnce(WithArg<4>(
Invoke([&mock_replayer]
(image_replayer::ReplayerListener* replayer_listener) {
mock_replayer.replayer_listener = replayer_listener;
return &mock_replayer;
})));
}
void expect_close(MockStateBuilder& mock_state_builder, int r) {
EXPECT_CALL(mock_state_builder, close(_))
.WillOnce(Invoke([this, r](Context* ctx) {
m_threads->work_queue->queue(ctx, r);
}));
}
void expect_init(MockReplayer& mock_replayer, int r) {
EXPECT_CALL(mock_replayer, init(_))
.WillOnce(Invoke([this, r](Context* ctx) {
m_threads->work_queue->queue(ctx, r);
}));
}
void expect_shut_down(MockReplayer& mock_replayer, int r) {
EXPECT_CALL(mock_replayer, shut_down(_))
.WillOnce(Invoke([this, r](Context* ctx) {
m_threads->work_queue->queue(ctx, r);
}));
EXPECT_CALL(mock_replayer, destroy());
}
void expect_get_replay_status(MockReplayer& mock_replayer) {
EXPECT_CALL(mock_replayer, get_replay_status(_, _))
.WillRepeatedly(DoAll(WithArg<1>(CompleteContext(-EEXIST)),
Return(true)));
}
void expect_set_mirror_image_status_repeatedly() {
EXPECT_CALL(m_local_status_updater, set_mirror_image_status(_, _, _))
.WillRepeatedly(Invoke([](auto, auto, auto){}));
EXPECT_CALL(m_remote_status_updater, set_mirror_image_status(_, _, _))
.WillRepeatedly(Invoke([](auto, auto, auto){}));
}
void expect_mirror_image_status_exists(bool exists) {
EXPECT_CALL(m_local_status_updater, exists(_))
.WillOnce(Return(exists));
EXPECT_CALL(m_remote_status_updater, exists(_))
.WillOnce(Return(exists));
}
void create_image_replayer(MockThreads &mock_threads) {
m_image_replayer = new MockImageReplayer(
m_local_io_ctx, "local_mirror_uuid", "global image id",
&mock_threads, &m_instance_watcher, &m_local_status_updater, nullptr,
nullptr);
m_image_replayer->add_peer({"peer_uuid", m_remote_io_ctx,
{"remote mirror uuid",
"remote mirror peer uuid"},
&m_remote_status_updater});
}
void wait_for_stopped() {
for (int i = 0; i < 10000; i++) {
if (m_image_replayer->is_stopped()) {
break;
}
usleep(1000);
}
ASSERT_TRUE(m_image_replayer->is_stopped());
}
librbd::ImageCtx *m_remote_image_ctx;
librbd::ImageCtx *m_local_image_ctx = nullptr;
MockInstanceWatcher m_instance_watcher;
MockMirrorStatusUpdater m_local_status_updater;
MockMirrorStatusUpdater m_remote_status_updater;
MockImageReplayer *m_image_replayer = nullptr;
};
TEST_F(TestMockImageReplayer, StartStop) {
// START
create_local_image();
librbd::MockTestImageCtx mock_local_image_ctx(*m_local_image_ctx);
MockThreads mock_threads(m_threads);
expect_work_queue_repeatedly(mock_threads);
expect_add_event_after_repeatedly(mock_threads);
MockImageDeleter mock_image_deleter;
MockReplayer mock_replayer;
expect_get_replay_status(mock_replayer);
expect_set_mirror_image_status_repeatedly();
InSequence seq;
MockBootstrapRequest mock_bootstrap_request;
MockStateBuilder mock_state_builder;
expect_send(mock_bootstrap_request, mock_state_builder, mock_local_image_ctx,
false, false, 0);
expect_create_replayer(mock_state_builder, mock_replayer);
expect_init(mock_replayer, 0);
create_image_replayer(mock_threads);
C_SaferCond start_ctx;
m_image_replayer->start(&start_ctx);
ASSERT_EQ(0, start_ctx.wait());
ASSERT_EQ(image_replayer::HEALTH_STATE_OK,
m_image_replayer->get_health_state());
// STOP
expect_shut_down(mock_replayer, 0);
expect_close(mock_state_builder, 0);
expect_mirror_image_status_exists(false);
C_SaferCond stop_ctx;
m_image_replayer->stop(&stop_ctx);
ASSERT_EQ(0, stop_ctx.wait());
ASSERT_EQ(image_replayer::HEALTH_STATE_OK,
m_image_replayer->get_health_state());
}
TEST_F(TestMockImageReplayer, LocalImagePrimary) {
create_local_image();
librbd::MockTestImageCtx mock_local_image_ctx(*m_local_image_ctx);
MockThreads mock_threads(m_threads);
expect_work_queue_repeatedly(mock_threads);
expect_add_event_after_repeatedly(mock_threads);
MockImageDeleter mock_image_deleter;
MockBootstrapRequest mock_bootstrap_request;
expect_set_mirror_image_status_repeatedly();
InSequence seq;
MockStateBuilder mock_state_builder;
expect_send(mock_bootstrap_request, mock_state_builder, mock_local_image_ctx,
false, false, -ENOMSG);
expect_mirror_image_status_exists(false);
create_image_replayer(mock_threads);
C_SaferCond start_ctx;
m_image_replayer->start(&start_ctx);
ASSERT_EQ(0, start_ctx.wait());
}
TEST_F(TestMockImageReplayer, MetadataCleanup) {
// START
create_local_image();
librbd::MockTestImageCtx mock_local_image_ctx(*m_local_image_ctx);
MockThreads mock_threads(m_threads);
expect_work_queue_repeatedly(mock_threads);
expect_add_event_after_repeatedly(mock_threads);
MockImageDeleter mock_image_deleter;
MockBootstrapRequest mock_bootstrap_request;
MockReplayer mock_replayer;
expect_get_replay_status(mock_replayer);
expect_set_mirror_image_status_repeatedly();
InSequence seq;
MockStateBuilder mock_state_builder;
expect_send(mock_bootstrap_request, mock_state_builder, mock_local_image_ctx,
false, true, -ENOLINK);
expect_close(mock_state_builder, 0);
expect_trash_move(mock_image_deleter, "global image id", false, 0);
expect_mirror_image_status_exists(false);
create_image_replayer(mock_threads);
C_SaferCond start_ctx;
m_image_replayer->start(&start_ctx);
ASSERT_EQ(0, start_ctx.wait());
}
TEST_F(TestMockImageReplayer, BootstrapRemoteDeleted) {
create_local_image();
librbd::MockTestImageCtx mock_local_image_ctx(*m_local_image_ctx);
MockThreads mock_threads(m_threads);
expect_work_queue_repeatedly(mock_threads);
expect_add_event_after_repeatedly(mock_threads);
MockImageDeleter mock_image_deleter;
expect_set_mirror_image_status_repeatedly();
InSequence seq;
MockBootstrapRequest mock_bootstrap_request;
MockStateBuilder mock_state_builder;
expect_send(mock_bootstrap_request, mock_state_builder, mock_local_image_ctx,
false, false, -ENOLINK);
expect_close(mock_state_builder, 0);
expect_trash_move(mock_image_deleter, "global image id", false, 0);
expect_mirror_image_status_exists(false);
create_image_replayer(mock_threads);
C_SaferCond start_ctx;
m_image_replayer->start(&start_ctx);
ASSERT_EQ(0, start_ctx.wait());
}
TEST_F(TestMockImageReplayer, BootstrapResyncRequested) {
create_local_image();
librbd::MockTestImageCtx mock_local_image_ctx(*m_local_image_ctx);
MockThreads mock_threads(m_threads);
expect_work_queue_repeatedly(mock_threads);
expect_add_event_after_repeatedly(mock_threads);
MockImageDeleter mock_image_deleter;
expect_set_mirror_image_status_repeatedly();
InSequence seq;
MockBootstrapRequest mock_bootstrap_request;
MockStateBuilder mock_state_builder;
expect_send(mock_bootstrap_request, mock_state_builder, mock_local_image_ctx,
true, false, 0);
expect_close(mock_state_builder, 0);
expect_trash_move(mock_image_deleter, "global image id", true, 0);
expect_mirror_image_status_exists(false);
create_image_replayer(mock_threads);
C_SaferCond start_ctx;
m_image_replayer->start(&start_ctx);
ASSERT_EQ(0, start_ctx.wait());
}
TEST_F(TestMockImageReplayer, BootstrapError) {
create_local_image();
librbd::MockTestImageCtx mock_local_image_ctx(*m_local_image_ctx);
MockThreads mock_threads(m_threads);
expect_work_queue_repeatedly(mock_threads);
expect_add_event_after_repeatedly(mock_threads);
MockImageDeleter mock_image_deleter;
MockBootstrapRequest mock_bootstrap_request;
expect_set_mirror_image_status_repeatedly();
InSequence seq;
MockStateBuilder mock_state_builder;
expect_send(mock_bootstrap_request, mock_state_builder, mock_local_image_ctx,
false, false, -EINVAL);
expect_mirror_image_status_exists(false);
create_image_replayer(mock_threads);
C_SaferCond start_ctx;
m_image_replayer->start(&start_ctx);
ASSERT_EQ(-EINVAL, start_ctx.wait());
}
TEST_F(TestMockImageReplayer, BootstrapCancel) {
create_local_image();
librbd::MockTestImageCtx mock_local_image_ctx(*m_local_image_ctx);
MockThreads mock_threads(m_threads);
expect_work_queue_repeatedly(mock_threads);
expect_add_event_after_repeatedly(mock_threads);
MockImageDeleter mock_image_deleter;
expect_set_mirror_image_status_repeatedly();
InSequence seq;
create_image_replayer(mock_threads);
MockBootstrapRequest mock_bootstrap_request;
MockStateBuilder mock_state_builder;
EXPECT_CALL(mock_bootstrap_request, send())
.WillOnce(Invoke([this, &mock_bootstrap_request]() {
m_image_replayer->stop(nullptr);
mock_bootstrap_request.on_finish->complete(-ECANCELED);
}));
EXPECT_CALL(mock_bootstrap_request, cancel());
expect_mirror_image_status_exists(false);
C_SaferCond start_ctx;
m_image_replayer->start(&start_ctx);
ASSERT_EQ(-ECANCELED, start_ctx.wait());
}
TEST_F(TestMockImageReplayer, StopError) {
// START
create_local_image();
librbd::MockTestImageCtx mock_local_image_ctx(*m_local_image_ctx);
MockThreads mock_threads(m_threads);
expect_work_queue_repeatedly(mock_threads);
expect_add_event_after_repeatedly(mock_threads);
MockImageDeleter mock_image_deleter;
MockBootstrapRequest mock_bootstrap_request;
MockReplayer mock_replayer;
expect_get_replay_status(mock_replayer);
expect_set_mirror_image_status_repeatedly();
InSequence seq;
MockStateBuilder mock_state_builder;
expect_send(mock_bootstrap_request, mock_state_builder, mock_local_image_ctx,
false, false, 0);
expect_create_replayer(mock_state_builder, mock_replayer);
expect_init(mock_replayer, 0);
create_image_replayer(mock_threads);
C_SaferCond start_ctx;
m_image_replayer->start(&start_ctx);
ASSERT_EQ(0, start_ctx.wait());
// STOP (errors are ignored)
expect_shut_down(mock_replayer, -EINVAL);
expect_close(mock_state_builder, -EINVAL);
expect_mirror_image_status_exists(false);
C_SaferCond stop_ctx;
m_image_replayer->stop(&stop_ctx);
ASSERT_EQ(0, stop_ctx.wait());
}
TEST_F(TestMockImageReplayer, ReplayerError) {
create_local_image();
librbd::MockTestImageCtx mock_local_image_ctx(*m_local_image_ctx);
MockThreads mock_threads(m_threads);
expect_work_queue_repeatedly(mock_threads);
expect_add_event_after_repeatedly(mock_threads);
MockImageDeleter mock_image_deleter;
MockBootstrapRequest mock_bootstrap_request;
MockReplayer mock_replayer;
expect_set_mirror_image_status_repeatedly();
InSequence seq;
MockStateBuilder mock_state_builder;
expect_send(mock_bootstrap_request, mock_state_builder, mock_local_image_ctx,
false, false, 0);
expect_create_replayer(mock_state_builder, mock_replayer);
expect_init(mock_replayer, -EINVAL);
EXPECT_CALL(mock_replayer, get_error_description())
.WillOnce(Return("FAIL"));
EXPECT_CALL(mock_replayer, destroy());
expect_close(mock_state_builder, -EINVAL);
expect_mirror_image_status_exists(false);
create_image_replayer(mock_threads);
C_SaferCond start_ctx;
m_image_replayer->start(&start_ctx);
ASSERT_EQ(-EINVAL, start_ctx.wait());
}
TEST_F(TestMockImageReplayer, ReplayerResync) {
// START
create_local_image();
librbd::MockTestImageCtx mock_local_image_ctx(*m_local_image_ctx);
MockThreads mock_threads(m_threads);
expect_work_queue_repeatedly(mock_threads);
expect_add_event_after_repeatedly(mock_threads);
MockImageDeleter mock_image_deleter;
MockBootstrapRequest mock_bootstrap_request;
MockReplayer mock_replayer;
expect_get_replay_status(mock_replayer);
expect_set_mirror_image_status_repeatedly();
InSequence seq;
MockStateBuilder mock_state_builder;
expect_send(mock_bootstrap_request, mock_state_builder, mock_local_image_ctx,
false, false, 0);
expect_create_replayer(mock_state_builder, mock_replayer);
expect_init(mock_replayer, 0);
create_image_replayer(mock_threads);
C_SaferCond start_ctx;
m_image_replayer->start(&start_ctx);
ASSERT_EQ(0, start_ctx.wait());
// NOTIFY
EXPECT_CALL(mock_replayer, is_resync_requested())
.WillOnce(Return(true));
expect_shut_down(mock_replayer, 0);
expect_close(mock_state_builder, 0);
expect_trash_move(mock_image_deleter, "global image id", true, 0);
expect_mirror_image_status_exists(false);
mock_replayer.replayer_listener->handle_notification();
ASSERT_FALSE(m_image_replayer->is_running());
wait_for_stopped();
}
TEST_F(TestMockImageReplayer, ReplayerInterrupted) {
// START
create_local_image();
librbd::MockTestImageCtx mock_local_image_ctx(*m_local_image_ctx);
MockThreads mock_threads(m_threads);
expect_work_queue_repeatedly(mock_threads);
expect_add_event_after_repeatedly(mock_threads);
MockImageDeleter mock_image_deleter;
MockBootstrapRequest mock_bootstrap_request;
MockReplayer mock_replayer;
expect_get_replay_status(mock_replayer);
expect_set_mirror_image_status_repeatedly();
InSequence seq;
MockStateBuilder mock_state_builder;
expect_send(mock_bootstrap_request, mock_state_builder, mock_local_image_ctx,
false, false, 0);
expect_create_replayer(mock_state_builder, mock_replayer);
expect_init(mock_replayer, 0);
create_image_replayer(mock_threads);
C_SaferCond start_ctx;
m_image_replayer->start(&start_ctx);
ASSERT_EQ(0, start_ctx.wait());
// NOTIFY
EXPECT_CALL(mock_replayer, is_resync_requested())
.WillOnce(Return(false));
EXPECT_CALL(mock_replayer, is_replaying())
.WillOnce(Return(false));
EXPECT_CALL(mock_replayer, get_error_code())
.WillOnce(Return(-EINVAL));
EXPECT_CALL(mock_replayer, get_error_description())
.WillOnce(Return("INVALID"));
expect_shut_down(mock_replayer, 0);
expect_close(mock_state_builder, 0);
expect_mirror_image_status_exists(false);
mock_replayer.replayer_listener->handle_notification();
ASSERT_FALSE(m_image_replayer->is_running());
wait_for_stopped();
}
TEST_F(TestMockImageReplayer, ReplayerRenamed) {
// START
create_local_image();
librbd::MockTestImageCtx mock_local_image_ctx(*m_local_image_ctx);
MockThreads mock_threads(m_threads);
expect_work_queue_repeatedly(mock_threads);
expect_add_event_after_repeatedly(mock_threads);
MockImageDeleter mock_image_deleter;
MockBootstrapRequest mock_bootstrap_request;
MockReplayer mock_replayer;
expect_get_replay_status(mock_replayer);
expect_set_mirror_image_status_repeatedly();
InSequence seq;
MockStateBuilder mock_state_builder;
expect_send(mock_bootstrap_request, mock_state_builder, mock_local_image_ctx,
false, false, 0);
expect_create_replayer(mock_state_builder, mock_replayer);
expect_init(mock_replayer, 0);
create_image_replayer(mock_threads);
C_SaferCond start_ctx;
m_image_replayer->start(&start_ctx);
ASSERT_EQ(0, start_ctx.wait());
// NOTIFY
EXPECT_CALL(mock_replayer, is_resync_requested())
.WillOnce(Return(false));
EXPECT_CALL(mock_replayer, is_replaying())
.WillOnce(Return(true));
mock_local_image_ctx.name = "NEW NAME";
mock_replayer.replayer_listener->handle_notification();
// STOP
expect_shut_down(mock_replayer, 0);
expect_close(mock_state_builder, 0);
expect_mirror_image_status_exists(false);
C_SaferCond stop_ctx;
m_image_replayer->stop(&stop_ctx);
ASSERT_EQ(0, stop_ctx.wait());
auto image_spec = image_replayer::util::compute_image_spec(
m_local_io_ctx, "NEW NAME");
ASSERT_EQ(image_spec, m_image_replayer->get_name());
}
TEST_F(TestMockImageReplayer, StopJoinInterruptedReplayer) {
// START
create_local_image();
librbd::MockTestImageCtx mock_local_image_ctx(*m_local_image_ctx);
MockThreads mock_threads(m_threads);
expect_work_queue_repeatedly(mock_threads);
expect_add_event_after_repeatedly(mock_threads);
MockReplayer mock_replayer;
expect_get_replay_status(mock_replayer);
expect_set_mirror_image_status_repeatedly();
InSequence seq;
MockBootstrapRequest mock_bootstrap_request;
MockStateBuilder mock_state_builder;
expect_send(mock_bootstrap_request, mock_state_builder, mock_local_image_ctx,
false, false, 0);
expect_create_replayer(mock_state_builder, mock_replayer);
expect_init(mock_replayer, 0);
create_image_replayer(mock_threads);
C_SaferCond start_ctx;
m_image_replayer->start(&start_ctx);
ASSERT_EQ(0, start_ctx.wait());
// NOTIFY
EXPECT_CALL(mock_replayer, is_resync_requested())
.WillOnce(Return(false));
EXPECT_CALL(mock_replayer, is_replaying())
.WillOnce(Return(false));
EXPECT_CALL(mock_replayer, get_error_code())
.WillOnce(Return(-EINVAL));
EXPECT_CALL(mock_replayer, get_error_description())
.WillOnce(Return("INVALID"));
const double DELAY = 10;
EXPECT_CALL(mock_replayer, shut_down(_))
.WillOnce(Invoke([this, DELAY](Context* ctx) {
std::lock_guard l(m_threads->timer_lock);
m_threads->timer->add_event_after(DELAY, ctx);
}));
EXPECT_CALL(mock_replayer, destroy());
expect_close(mock_state_builder, 0);
expect_mirror_image_status_exists(false);
mock_replayer.replayer_listener->handle_notification();
ASSERT_FALSE(m_image_replayer->is_running());
C_SaferCond stop_ctx;
m_image_replayer->stop(&stop_ctx);
ASSERT_EQ(ETIMEDOUT, stop_ctx.wait_for(DELAY * 3 / 4));
ASSERT_EQ(0, stop_ctx.wait_for(DELAY));
}
TEST_F(TestMockImageReplayer, StopJoinRequestedStop) {
// START
create_local_image();
librbd::MockTestImageCtx mock_local_image_ctx(*m_local_image_ctx);
MockThreads mock_threads(m_threads);
expect_work_queue_repeatedly(mock_threads);
expect_add_event_after_repeatedly(mock_threads);
MockReplayer mock_replayer;
expect_get_replay_status(mock_replayer);
expect_set_mirror_image_status_repeatedly();
InSequence seq;
MockBootstrapRequest mock_bootstrap_request;
MockStateBuilder mock_state_builder;
expect_send(mock_bootstrap_request, mock_state_builder, mock_local_image_ctx,
false, false, 0);
expect_create_replayer(mock_state_builder, mock_replayer);
expect_init(mock_replayer, 0);
create_image_replayer(mock_threads);
C_SaferCond start_ctx;
m_image_replayer->start(&start_ctx);
ASSERT_EQ(0, start_ctx.wait());
// STOP
const double DELAY = 10;
EXPECT_CALL(mock_replayer, shut_down(_))
.WillOnce(Invoke([this, DELAY](Context* ctx) {
std::lock_guard l(m_threads->timer_lock);
m_threads->timer->add_event_after(DELAY, ctx);
}));
EXPECT_CALL(mock_replayer, destroy());
expect_close(mock_state_builder, 0);
expect_mirror_image_status_exists(false);
C_SaferCond stop_ctx1;
m_image_replayer->stop(&stop_ctx1);
C_SaferCond stop_ctx2;
m_image_replayer->stop(&stop_ctx2);
ASSERT_EQ(ETIMEDOUT, stop_ctx2.wait_for(DELAY * 3 / 4));
ASSERT_EQ(0, stop_ctx2.wait_for(DELAY));
ASSERT_EQ(0, stop_ctx1.wait_for(0));
}
} // namespace mirror
} // namespace rbd
| 29,542 | 30.065195 | 109 |
cc
|
null |
ceph-main/src/test/rbd_mirror/test_mock_ImageSync.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "test/rbd_mirror/test_mock_fixture.h"
#include "include/rbd/librbd.hpp"
#include "librbd/DeepCopyRequest.h"
#include "test/librados_test_stub/MockTestMemIoCtxImpl.h"
#include "test/librbd/mock/MockImageCtx.h"
#include "test/rbd_mirror/mock/image_sync/MockSyncPointHandler.h"
#include "tools/rbd_mirror/ImageSync.h"
#include "tools/rbd_mirror/Threads.h"
#include "tools/rbd_mirror/image_sync/SyncPointCreateRequest.h"
#include "tools/rbd_mirror/image_sync/SyncPointPruneRequest.h"
namespace librbd {
namespace {
struct MockTestImageCtx : public librbd::MockImageCtx {
explicit MockTestImageCtx(librbd::ImageCtx &image_ctx)
: librbd::MockImageCtx(image_ctx) {
}
};
} // anonymous namespace
template <>
class DeepCopyRequest<librbd::MockTestImageCtx> {
public:
static DeepCopyRequest* s_instance;
Context *on_finish;
static DeepCopyRequest* create(
librbd::MockTestImageCtx *src_image_ctx,
librbd::MockTestImageCtx *dst_image_ctx,
librados::snap_t src_snap_id_start, librados::snap_t src_snap_id_end,
librados::snap_t dst_snap_id_start, bool flatten,
const librbd::deep_copy::ObjectNumber &object_number,
librbd::asio::ContextWQ *work_queue, SnapSeqs *snap_seqs,
deep_copy::Handler *handler, Context *on_finish) {
ceph_assert(s_instance != nullptr);
s_instance->on_finish = on_finish;
return s_instance;
}
DeepCopyRequest() {
s_instance = this;
}
void put() {
}
void get() {
}
MOCK_METHOD0(cancel, void());
MOCK_METHOD0(send, void());
};
DeepCopyRequest<librbd::MockTestImageCtx>* DeepCopyRequest<librbd::MockTestImageCtx>::s_instance = nullptr;
} // namespace librbd
// template definitions
#include "tools/rbd_mirror/ImageSync.cc"
namespace rbd {
namespace mirror {
template <>
struct Threads<librbd::MockTestImageCtx> {
ceph::mutex &timer_lock;
SafeTimer *timer;
librbd::asio::ContextWQ *work_queue;
Threads(Threads<librbd::ImageCtx> *threads)
: timer_lock(threads->timer_lock), timer(threads->timer),
work_queue(threads->work_queue) {
}
};
template<>
struct InstanceWatcher<librbd::MockTestImageCtx> {
MOCK_METHOD2(notify_sync_request, void(const std::string, Context *));
MOCK_METHOD1(cancel_sync_request, bool(const std::string &));
MOCK_METHOD1(notify_sync_complete, void(const std::string &));
};
namespace image_sync {
template <>
class SyncPointCreateRequest<librbd::MockTestImageCtx> {
public:
static SyncPointCreateRequest *s_instance;
Context *on_finish;
static SyncPointCreateRequest* create(librbd::MockTestImageCtx *remote_image_ctx,
const std::string &mirror_uuid,
image_sync::SyncPointHandler* sync_point_handler,
Context *on_finish) {
ceph_assert(s_instance != nullptr);
s_instance->on_finish = on_finish;
return s_instance;
}
SyncPointCreateRequest() {
s_instance = this;
}
MOCK_METHOD0(send, void());
};
template <>
class SyncPointPruneRequest<librbd::MockTestImageCtx> {
public:
static SyncPointPruneRequest *s_instance;
Context *on_finish;
bool sync_complete;
static SyncPointPruneRequest* create(librbd::MockTestImageCtx *remote_image_ctx,
bool sync_complete,
image_sync::SyncPointHandler* sync_point_handler,
Context *on_finish) {
ceph_assert(s_instance != nullptr);
s_instance->on_finish = on_finish;
s_instance->sync_complete = sync_complete;
return s_instance;
}
SyncPointPruneRequest() {
s_instance = this;
}
MOCK_METHOD0(send, void());
};
SyncPointCreateRequest<librbd::MockTestImageCtx>* SyncPointCreateRequest<librbd::MockTestImageCtx>::s_instance = nullptr;
SyncPointPruneRequest<librbd::MockTestImageCtx>* SyncPointPruneRequest<librbd::MockTestImageCtx>::s_instance = nullptr;
} // namespace image_sync
using ::testing::_;
using ::testing::DoAll;
using ::testing::InSequence;
using ::testing::Invoke;
using ::testing::Return;
using ::testing::StrEq;
using ::testing::WithArg;
using ::testing::InvokeWithoutArgs;
class TestMockImageSync : public TestMockFixture {
public:
typedef Threads<librbd::MockTestImageCtx> MockThreads;
typedef ImageSync<librbd::MockTestImageCtx> MockImageSync;
typedef InstanceWatcher<librbd::MockTestImageCtx> MockInstanceWatcher;
typedef image_sync::SyncPointCreateRequest<librbd::MockTestImageCtx> MockSyncPointCreateRequest;
typedef image_sync::SyncPointPruneRequest<librbd::MockTestImageCtx> MockSyncPointPruneRequest;
typedef image_sync::MockSyncPointHandler MockSyncPointHandler;
typedef librbd::DeepCopyRequest<librbd::MockTestImageCtx> MockImageCopyRequest;
void SetUp() override {
TestMockFixture::SetUp();
librbd::RBD rbd;
ASSERT_EQ(0, create_image(rbd, m_remote_io_ctx, m_image_name, m_image_size));
ASSERT_EQ(0, open_image(m_remote_io_ctx, m_image_name, &m_remote_image_ctx));
ASSERT_EQ(0, create_image(rbd, m_local_io_ctx, m_image_name, m_image_size));
ASSERT_EQ(0, open_image(m_local_io_ctx, m_image_name, &m_local_image_ctx));
}
void expect_get_snap_id(librbd::MockTestImageCtx &mock_image_ctx) {
EXPECT_CALL(mock_image_ctx, get_snap_id(_, _))
.WillOnce(Return(123));
}
void expect_notify_sync_request(MockInstanceWatcher &mock_instance_watcher,
const std::string &sync_id, int r) {
EXPECT_CALL(mock_instance_watcher, notify_sync_request(sync_id, _))
.WillOnce(Invoke([this, r](const std::string &, Context *on_sync_start) {
m_threads->work_queue->queue(on_sync_start, r);
}));
}
void expect_cancel_sync_request(MockInstanceWatcher &mock_instance_watcher,
const std::string &sync_id, bool canceled) {
EXPECT_CALL(mock_instance_watcher, cancel_sync_request(sync_id))
.WillOnce(Return(canceled));
}
void expect_notify_sync_complete(MockInstanceWatcher &mock_instance_watcher,
const std::string &sync_id) {
EXPECT_CALL(mock_instance_watcher, notify_sync_complete(sync_id));
}
void expect_create_sync_point(librbd::MockTestImageCtx &mock_local_image_ctx,
MockSyncPointCreateRequest &mock_sync_point_create_request,
int r) {
EXPECT_CALL(mock_sync_point_create_request, send())
.WillOnce(Invoke([this, &mock_local_image_ctx, &mock_sync_point_create_request, r]() {
if (r == 0) {
mock_local_image_ctx.snap_ids[{cls::rbd::UserSnapshotNamespace(),
"snap1"}] = 123;
m_sync_points.emplace_back(cls::rbd::UserSnapshotNamespace(),
"snap1", "", boost::none);
}
m_threads->work_queue->queue(mock_sync_point_create_request.on_finish, r);
}));
}
void expect_copy_image(MockImageCopyRequest &mock_image_copy_request, int r) {
EXPECT_CALL(mock_image_copy_request, send())
.WillOnce(Invoke([this, &mock_image_copy_request, r]() {
m_threads->work_queue->queue(mock_image_copy_request.on_finish, r);
}));
}
void expect_flush_sync_point(MockSyncPointHandler& mock_sync_point_handler,
int r) {
EXPECT_CALL(mock_sync_point_handler, update_sync_points(_, _, false, _))
.WillOnce(WithArg<3>(CompleteContext(r)));
}
void expect_prune_sync_point(MockSyncPointPruneRequest &mock_sync_point_prune_request,
bool sync_complete, int r) {
EXPECT_CALL(mock_sync_point_prune_request, send())
.WillOnce(Invoke([this, &mock_sync_point_prune_request, sync_complete, r]() {
ASSERT_EQ(sync_complete, mock_sync_point_prune_request.sync_complete);
if (r == 0 && !m_sync_points.empty()) {
if (sync_complete) {
m_sync_points.pop_front();
} else {
while (m_sync_points.size() > 1) {
m_sync_points.pop_back();
}
}
}
m_threads->work_queue->queue(mock_sync_point_prune_request.on_finish, r);
}));
}
void expect_get_snap_seqs(MockSyncPointHandler& mock_sync_point_handler) {
EXPECT_CALL(mock_sync_point_handler, get_snap_seqs())
.WillRepeatedly(Return(librbd::SnapSeqs{}));
}
void expect_get_sync_points(MockSyncPointHandler& mock_sync_point_handler) {
EXPECT_CALL(mock_sync_point_handler, get_sync_points())
.WillRepeatedly(Invoke([this]() {
return m_sync_points;
}));
}
MockImageSync *create_request(MockThreads& mock_threads,
librbd::MockTestImageCtx &mock_remote_image_ctx,
librbd::MockTestImageCtx &mock_local_image_ctx,
MockSyncPointHandler& mock_sync_point_handler,
MockInstanceWatcher &mock_instance_watcher,
Context *ctx) {
return new MockImageSync(&mock_threads, &mock_local_image_ctx,
&mock_remote_image_ctx,
"mirror-uuid", &mock_sync_point_handler,
&mock_instance_watcher, nullptr, ctx);
}
librbd::ImageCtx *m_remote_image_ctx;
librbd::ImageCtx *m_local_image_ctx;
image_sync::SyncPoints m_sync_points;
};
TEST_F(TestMockImageSync, SimpleSync) {
MockThreads mock_threads(m_threads);
librbd::MockTestImageCtx mock_remote_image_ctx(*m_remote_image_ctx);
librbd::MockTestImageCtx mock_local_image_ctx(*m_local_image_ctx);
MockSyncPointHandler mock_sync_point_handler;
MockInstanceWatcher mock_instance_watcher;
MockImageCopyRequest mock_image_copy_request;
MockSyncPointCreateRequest mock_sync_point_create_request;
MockSyncPointPruneRequest mock_sync_point_prune_request;
expect_get_snap_seqs(mock_sync_point_handler);
expect_get_sync_points(mock_sync_point_handler);
InSequence seq;
expect_notify_sync_request(mock_instance_watcher, mock_local_image_ctx.id, 0);
expect_create_sync_point(mock_local_image_ctx, mock_sync_point_create_request, 0);
expect_get_snap_id(mock_remote_image_ctx);
expect_copy_image(mock_image_copy_request, 0);
expect_flush_sync_point(mock_sync_point_handler, 0);
expect_prune_sync_point(mock_sync_point_prune_request, true, 0);
expect_notify_sync_complete(mock_instance_watcher, mock_local_image_ctx.id);
C_SaferCond ctx;
MockImageSync *request = create_request(mock_threads, mock_remote_image_ctx,
mock_local_image_ctx,
mock_sync_point_handler,
mock_instance_watcher, &ctx);
request->send();
ASSERT_EQ(0, ctx.wait());
}
TEST_F(TestMockImageSync, RestartSync) {
MockThreads mock_threads(m_threads);
librbd::MockTestImageCtx mock_remote_image_ctx(*m_remote_image_ctx);
librbd::MockTestImageCtx mock_local_image_ctx(*m_local_image_ctx);
MockSyncPointHandler mock_sync_point_handler;
MockInstanceWatcher mock_instance_watcher;
MockImageCopyRequest mock_image_copy_request;
MockSyncPointCreateRequest mock_sync_point_create_request;
MockSyncPointPruneRequest mock_sync_point_prune_request;
m_sync_points = {{cls::rbd::UserSnapshotNamespace(), "snap1", "", boost::none},
{cls::rbd::UserSnapshotNamespace(), "snap2", "snap1", boost::none}};
mock_local_image_ctx.snap_ids[{cls::rbd::UserSnapshotNamespace(), "snap1"}] = 123;
mock_local_image_ctx.snap_ids[{cls::rbd::UserSnapshotNamespace(), "snap2"}] = 234;
expect_test_features(mock_local_image_ctx);
expect_get_snap_seqs(mock_sync_point_handler);
expect_get_sync_points(mock_sync_point_handler);
InSequence seq;
expect_notify_sync_request(mock_instance_watcher, mock_local_image_ctx.id, 0);
expect_prune_sync_point(mock_sync_point_prune_request, false, 0);
expect_get_snap_id(mock_remote_image_ctx);
expect_copy_image(mock_image_copy_request, 0);
expect_flush_sync_point(mock_sync_point_handler, 0);
expect_prune_sync_point(mock_sync_point_prune_request, true, 0);
expect_notify_sync_complete(mock_instance_watcher, mock_local_image_ctx.id);
C_SaferCond ctx;
MockImageSync *request = create_request(mock_threads, mock_remote_image_ctx,
mock_local_image_ctx,
mock_sync_point_handler,
mock_instance_watcher, &ctx);
request->send();
ASSERT_EQ(0, ctx.wait());
}
TEST_F(TestMockImageSync, CancelNotifySyncRequest) {
MockThreads mock_threads(m_threads);
librbd::MockTestImageCtx mock_remote_image_ctx(*m_remote_image_ctx);
librbd::MockTestImageCtx mock_local_image_ctx(*m_local_image_ctx);
MockSyncPointHandler mock_sync_point_handler;
MockInstanceWatcher mock_instance_watcher;
expect_get_snap_seqs(mock_sync_point_handler);
expect_get_sync_points(mock_sync_point_handler);
InSequence seq;
Context *on_sync_start = nullptr;
C_SaferCond notify_sync_ctx;
EXPECT_CALL(mock_instance_watcher,
notify_sync_request(mock_local_image_ctx.id, _))
.WillOnce(Invoke([&on_sync_start, ¬ify_sync_ctx](
const std::string &, Context *ctx) {
on_sync_start = ctx;
notify_sync_ctx.complete(0);
}));
EXPECT_CALL(mock_instance_watcher,
cancel_sync_request(mock_local_image_ctx.id))
.WillOnce(Invoke([&on_sync_start](const std::string &) {
EXPECT_NE(nullptr, on_sync_start);
on_sync_start->complete(-ECANCELED);
return true;
}));
C_SaferCond ctx;
MockImageSync *request = create_request(mock_threads, mock_remote_image_ctx,
mock_local_image_ctx,
mock_sync_point_handler,
mock_instance_watcher, &ctx);
request->get();
request->send();
// cancel the notify sync request once it starts
ASSERT_EQ(0, notify_sync_ctx.wait());
request->cancel();
request->put();
ASSERT_EQ(-ECANCELED, ctx.wait());
}
TEST_F(TestMockImageSync, CancelImageCopy) {
MockThreads mock_threads(m_threads);
librbd::MockTestImageCtx mock_remote_image_ctx(*m_remote_image_ctx);
librbd::MockTestImageCtx mock_local_image_ctx(*m_local_image_ctx);
MockSyncPointHandler mock_sync_point_handler;
MockInstanceWatcher mock_instance_watcher;
MockImageCopyRequest mock_image_copy_request;
MockSyncPointCreateRequest mock_sync_point_create_request;
MockSyncPointPruneRequest mock_sync_point_prune_request;
m_sync_points = {{cls::rbd::UserSnapshotNamespace(), "snap1", "", boost::none}};
expect_get_snap_seqs(mock_sync_point_handler);
expect_get_sync_points(mock_sync_point_handler);
InSequence seq;
expect_notify_sync_request(mock_instance_watcher, mock_local_image_ctx.id, 0);
expect_prune_sync_point(mock_sync_point_prune_request, false, 0);
expect_get_snap_id(mock_remote_image_ctx);
C_SaferCond image_copy_ctx;
EXPECT_CALL(mock_image_copy_request, send())
.WillOnce(Invoke([&image_copy_ctx]() {
image_copy_ctx.complete(0);
}));
expect_cancel_sync_request(mock_instance_watcher, mock_local_image_ctx.id,
false);
EXPECT_CALL(mock_image_copy_request, cancel());
expect_notify_sync_complete(mock_instance_watcher, mock_local_image_ctx.id);
C_SaferCond ctx;
MockImageSync *request = create_request(mock_threads, mock_remote_image_ctx,
mock_local_image_ctx,
mock_sync_point_handler,
mock_instance_watcher, &ctx);
request->get();
request->send();
// cancel the image copy once it starts
ASSERT_EQ(0, image_copy_ctx.wait());
request->cancel();
request->put();
m_threads->work_queue->queue(mock_image_copy_request.on_finish, 0);
ASSERT_EQ(-ECANCELED, ctx.wait());
}
TEST_F(TestMockImageSync, CancelAfterCopyImage) {
MockThreads mock_threads(m_threads);
librbd::MockTestImageCtx mock_remote_image_ctx(*m_remote_image_ctx);
librbd::MockTestImageCtx mock_local_image_ctx(*m_local_image_ctx);
MockSyncPointHandler mock_sync_point_handler;
MockInstanceWatcher mock_instance_watcher;
MockImageCopyRequest mock_image_copy_request;
MockSyncPointCreateRequest mock_sync_point_create_request;
MockSyncPointPruneRequest mock_sync_point_prune_request;
C_SaferCond ctx;
MockImageSync *request = create_request(mock_threads, mock_remote_image_ctx,
mock_local_image_ctx,
mock_sync_point_handler,
mock_instance_watcher, &ctx);
expect_get_snap_seqs(mock_sync_point_handler);
expect_get_sync_points(mock_sync_point_handler);
InSequence seq;
expect_notify_sync_request(mock_instance_watcher, mock_local_image_ctx.id, 0);
expect_create_sync_point(mock_local_image_ctx, mock_sync_point_create_request, 0);
expect_get_snap_id(mock_remote_image_ctx);
EXPECT_CALL(mock_image_copy_request, send())
.WillOnce((DoAll(InvokeWithoutArgs([request]() {
request->cancel();
}),
Invoke([this, &mock_image_copy_request]() {
m_threads->work_queue->queue(mock_image_copy_request.on_finish, 0);
}))));
expect_cancel_sync_request(mock_instance_watcher, mock_local_image_ctx.id,
false);
EXPECT_CALL(mock_image_copy_request, cancel());
expect_notify_sync_complete(mock_instance_watcher, mock_local_image_ctx.id);
request->send();
ASSERT_EQ(-ECANCELED, ctx.wait());
}
} // namespace mirror
} // namespace rbd
| 18,060 | 37.509595 | 121 |
cc
|
null |
ceph-main/src/test/rbd_mirror/test_mock_InstanceReplayer.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "test/librbd/mock/MockImageCtx.h"
#include "test/rbd_mirror/test_mock_fixture.h"
#include "test/rbd_mirror/mock/MockContextWQ.h"
#include "test/rbd_mirror/mock/MockSafeTimer.h"
#include "tools/rbd_mirror/ImageReplayer.h"
#include "tools/rbd_mirror/InstanceWatcher.h"
#include "tools/rbd_mirror/InstanceReplayer.h"
#include "tools/rbd_mirror/ServiceDaemon.h"
#include "tools/rbd_mirror/Threads.h"
#include "tools/rbd_mirror/image_replayer/Types.h"
namespace librbd {
namespace {
struct MockTestImageCtx : public MockImageCtx {
MockTestImageCtx(librbd::ImageCtx &image_ctx)
: librbd::MockImageCtx(image_ctx) {
}
};
} // anonymous namespace
} // namespace librbd
namespace rbd {
namespace mirror {
template <>
struct Threads<librbd::MockTestImageCtx> {
MockSafeTimer *timer;
ceph::mutex &timer_lock;
ceph::condition_variable timer_cond;
MockContextWQ *work_queue;
Threads(Threads<librbd::ImageCtx> *threads)
: timer(new MockSafeTimer()),
timer_lock(threads->timer_lock),
work_queue(new MockContextWQ()) {
}
~Threads() {
delete timer;
delete work_queue;
}
};
template<>
struct ServiceDaemon<librbd::MockTestImageCtx> {
MOCK_METHOD4(add_or_update_namespace_attribute,
void(int64_t, const std::string&, const std::string&,
const service_daemon::AttributeValue&));
};
template<>
struct InstanceWatcher<librbd::MockTestImageCtx> {
};
template<>
struct ImageReplayer<librbd::MockTestImageCtx> {
static ImageReplayer* s_instance;
std::string global_image_id;
static ImageReplayer *create(
librados::IoCtx &local_io_ctx, const std::string &local_mirror_uuid,
const std::string &global_image_id,
Threads<librbd::MockTestImageCtx> *threads,
InstanceWatcher<librbd::MockTestImageCtx> *instance_watcher,
MirrorStatusUpdater<librbd::MockTestImageCtx>* local_status_updater,
journal::CacheManagerHandler *cache_manager_handler,
PoolMetaCache* pool_meta_cache) {
ceph_assert(s_instance != nullptr);
s_instance->global_image_id = global_image_id;
return s_instance;
}
ImageReplayer() {
ceph_assert(s_instance == nullptr);
s_instance = this;
}
virtual ~ImageReplayer() {
ceph_assert(s_instance == this);
s_instance = nullptr;
}
MOCK_METHOD0(destroy, void());
MOCK_METHOD2(start, void(Context *, bool));
MOCK_METHOD2(stop, void(Context *, bool));
MOCK_METHOD1(restart, void(Context*));
MOCK_METHOD0(flush, void());
MOCK_METHOD1(print_status, void(Formatter *));
MOCK_METHOD1(add_peer, void(const Peer<librbd::MockTestImageCtx>& peer));
MOCK_METHOD0(get_global_image_id, const std::string &());
MOCK_METHOD0(get_local_image_id, const std::string &());
MOCK_METHOD0(is_running, bool());
MOCK_METHOD0(is_stopped, bool());
MOCK_METHOD0(is_blocklisted, bool());
MOCK_CONST_METHOD0(is_finished, bool());
MOCK_METHOD1(set_finished, void(bool));
MOCK_CONST_METHOD0(get_health_state, image_replayer::HealthState());
};
ImageReplayer<librbd::MockTestImageCtx>* ImageReplayer<librbd::MockTestImageCtx>::s_instance = nullptr;
template<>
struct MirrorStatusUpdater<librbd::MockTestImageCtx> {
};
} // namespace mirror
} // namespace rbd
// template definitions
#include "tools/rbd_mirror/InstanceReplayer.cc"
namespace rbd {
namespace mirror {
using ::testing::_;
using ::testing::DoAll;
using ::testing::InSequence;
using ::testing::Invoke;
using ::testing::Return;
using ::testing::ReturnArg;
using ::testing::ReturnRef;
using ::testing::WithArg;
class TestMockInstanceReplayer : public TestMockFixture {
public:
typedef Threads<librbd::MockTestImageCtx> MockThreads;
typedef ImageReplayer<librbd::MockTestImageCtx> MockImageReplayer;
typedef InstanceReplayer<librbd::MockTestImageCtx> MockInstanceReplayer;
typedef InstanceWatcher<librbd::MockTestImageCtx> MockInstanceWatcher;
typedef MirrorStatusUpdater<librbd::MockTestImageCtx> MockMirrorStatusUpdater;
typedef ServiceDaemon<librbd::MockTestImageCtx> MockServiceDaemon;
void expect_work_queue(MockThreads &mock_threads) {
EXPECT_CALL(*mock_threads.work_queue, queue(_, _))
.WillOnce(Invoke([this](Context *ctx, int r) {
m_threads->work_queue->queue(ctx, r);
}));
}
void expect_add_event_after(MockThreads &mock_threads,
Context** timer_ctx = nullptr) {
EXPECT_CALL(*mock_threads.timer, add_event_after(_, _))
.WillOnce(DoAll(
WithArg<1>(Invoke([this, &mock_threads, timer_ctx](Context *ctx) {
ceph_assert(ceph_mutex_is_locked(mock_threads.timer_lock));
if (timer_ctx != nullptr) {
*timer_ctx = ctx;
mock_threads.timer_cond.notify_one();
} else {
m_threads->work_queue->queue(
new LambdaContext([&mock_threads, ctx](int) {
std::lock_guard timer_lock{mock_threads.timer_lock};
ctx->complete(0);
}), 0);
}
})),
ReturnArg<1>()));
}
void expect_cancel_event(MockThreads &mock_threads, bool canceled) {
EXPECT_CALL(*mock_threads.timer, cancel_event(_))
.WillOnce(Return(canceled));
}
};
TEST_F(TestMockInstanceReplayer, AcquireReleaseImage) {
MockThreads mock_threads(m_threads);
MockServiceDaemon mock_service_daemon;
MockMirrorStatusUpdater mock_status_updater;
MockInstanceWatcher mock_instance_watcher;
MockImageReplayer mock_image_replayer;
MockInstanceReplayer instance_replayer(
m_local_io_ctx, "local_mirror_uuid",
&mock_threads, &mock_service_daemon, &mock_status_updater, nullptr,
nullptr);
std::string global_image_id("global_image_id");
EXPECT_CALL(mock_image_replayer, get_global_image_id())
.WillRepeatedly(ReturnRef(global_image_id));
InSequence seq;
expect_work_queue(mock_threads);
Context *timer_ctx = nullptr;
expect_add_event_after(mock_threads, &timer_ctx);
instance_replayer.init();
instance_replayer.add_peer({"peer_uuid", m_remote_io_ctx, {}, nullptr});
// Acquire
C_SaferCond on_acquire;
EXPECT_CALL(mock_image_replayer, add_peer(_));
EXPECT_CALL(mock_image_replayer, is_stopped()).WillOnce(Return(true));
EXPECT_CALL(mock_image_replayer, is_blocklisted()).WillOnce(Return(false));
EXPECT_CALL(mock_image_replayer, is_finished()).WillOnce(Return(false));
EXPECT_CALL(mock_image_replayer, start(_, false))
.WillOnce(CompleteContext(0));
expect_work_queue(mock_threads);
instance_replayer.acquire_image(&mock_instance_watcher, global_image_id,
&on_acquire);
ASSERT_EQ(0, on_acquire.wait());
// Release
C_SaferCond on_release;
EXPECT_CALL(mock_image_replayer, is_stopped())
.WillOnce(Return(false));
EXPECT_CALL(mock_image_replayer, is_running())
.WillOnce(Return(false));
expect_work_queue(mock_threads);
expect_add_event_after(mock_threads);
expect_work_queue(mock_threads);
EXPECT_CALL(mock_image_replayer, is_stopped())
.WillOnce(Return(false));
EXPECT_CALL(mock_image_replayer, is_running())
.WillOnce(Return(true));
EXPECT_CALL(mock_image_replayer, stop(_, false))
.WillOnce(CompleteContext(0));
expect_work_queue(mock_threads);
EXPECT_CALL(mock_image_replayer, is_stopped())
.WillOnce(Return(true));
expect_work_queue(mock_threads);
EXPECT_CALL(mock_image_replayer, destroy());
instance_replayer.release_image("global_image_id", &on_release);
ASSERT_EQ(0, on_release.wait());
expect_work_queue(mock_threads);
expect_cancel_event(mock_threads, true);
expect_work_queue(mock_threads);
instance_replayer.shut_down();
ASSERT_TRUE(timer_ctx != nullptr);
delete timer_ctx;
}
TEST_F(TestMockInstanceReplayer, RemoveFinishedImage) {
MockThreads mock_threads(m_threads);
MockServiceDaemon mock_service_daemon;
MockMirrorStatusUpdater mock_status_updater;
MockInstanceWatcher mock_instance_watcher;
MockImageReplayer mock_image_replayer;
MockInstanceReplayer instance_replayer(
m_local_io_ctx, "local_mirror_uuid",
&mock_threads, &mock_service_daemon, &mock_status_updater, nullptr,
nullptr);
std::string global_image_id("global_image_id");
EXPECT_CALL(mock_image_replayer, get_global_image_id())
.WillRepeatedly(ReturnRef(global_image_id));
InSequence seq;
expect_work_queue(mock_threads);
Context *timer_ctx1 = nullptr;
expect_add_event_after(mock_threads, &timer_ctx1);
instance_replayer.init();
instance_replayer.add_peer({"peer_uuid", m_remote_io_ctx, {}, nullptr});
// Acquire
C_SaferCond on_acquire;
EXPECT_CALL(mock_image_replayer, add_peer(_));
EXPECT_CALL(mock_image_replayer, is_stopped()).WillOnce(Return(true));
EXPECT_CALL(mock_image_replayer, is_blocklisted()).WillOnce(Return(false));
EXPECT_CALL(mock_image_replayer, is_finished()).WillOnce(Return(false));
EXPECT_CALL(mock_image_replayer, start(_, false))
.WillOnce(CompleteContext(0));
expect_work_queue(mock_threads);
instance_replayer.acquire_image(&mock_instance_watcher, global_image_id,
&on_acquire);
ASSERT_EQ(0, on_acquire.wait());
// periodic start timer
Context *timer_ctx2 = nullptr;
expect_add_event_after(mock_threads, &timer_ctx2);
Context *start_image_replayers_ctx = nullptr;
EXPECT_CALL(*mock_threads.work_queue, queue(_, 0))
.WillOnce(Invoke([&start_image_replayers_ctx](Context *ctx, int r) {
start_image_replayers_ctx = ctx;
}));
ASSERT_TRUE(timer_ctx1 != nullptr);
{
std::lock_guard timer_locker{mock_threads.timer_lock};
timer_ctx1->complete(0);
}
// remove finished image replayer
EXPECT_CALL(mock_image_replayer, get_health_state()).WillOnce(
Return(image_replayer::HEALTH_STATE_OK));
EXPECT_CALL(mock_image_replayer, is_stopped()).WillOnce(Return(true));
EXPECT_CALL(mock_image_replayer, is_blocklisted()).WillOnce(Return(false));
EXPECT_CALL(mock_image_replayer, is_finished()).WillOnce(Return(true));
EXPECT_CALL(mock_image_replayer, destroy());
EXPECT_CALL(mock_service_daemon,
add_or_update_namespace_attribute(_, _, _, _)).Times(3);
ASSERT_TRUE(start_image_replayers_ctx != nullptr);
start_image_replayers_ctx->complete(0);
// shut down
expect_work_queue(mock_threads);
expect_cancel_event(mock_threads, true);
expect_work_queue(mock_threads);
instance_replayer.shut_down();
ASSERT_TRUE(timer_ctx2 != nullptr);
delete timer_ctx2;
}
TEST_F(TestMockInstanceReplayer, Reacquire) {
MockThreads mock_threads(m_threads);
MockServiceDaemon mock_service_daemon;
MockMirrorStatusUpdater mock_status_updater;
MockInstanceWatcher mock_instance_watcher;
MockImageReplayer mock_image_replayer;
MockInstanceReplayer instance_replayer(
m_local_io_ctx, "local_mirror_uuid",
&mock_threads, &mock_service_daemon, &mock_status_updater, nullptr,
nullptr);
std::string global_image_id("global_image_id");
EXPECT_CALL(mock_image_replayer, get_global_image_id())
.WillRepeatedly(ReturnRef(global_image_id));
InSequence seq;
expect_work_queue(mock_threads);
Context *timer_ctx = nullptr;
expect_add_event_after(mock_threads, &timer_ctx);
instance_replayer.init();
instance_replayer.add_peer({"peer_uuid", m_remote_io_ctx, {}, nullptr});
// Acquire
EXPECT_CALL(mock_image_replayer, add_peer(_));
EXPECT_CALL(mock_image_replayer, is_stopped()).WillOnce(Return(true));
EXPECT_CALL(mock_image_replayer, is_blocklisted()).WillOnce(Return(false));
EXPECT_CALL(mock_image_replayer, is_finished()).WillOnce(Return(false));
EXPECT_CALL(mock_image_replayer, start(_, false))
.WillOnce(CompleteContext(0));
expect_work_queue(mock_threads);
C_SaferCond on_acquire1;
instance_replayer.acquire_image(&mock_instance_watcher, global_image_id,
&on_acquire1);
ASSERT_EQ(0, on_acquire1.wait());
// Re-acquire
EXPECT_CALL(mock_image_replayer, set_finished(false));
EXPECT_CALL(mock_image_replayer, restart(_))
.WillOnce(CompleteContext(0));
expect_work_queue(mock_threads);
C_SaferCond on_acquire2;
instance_replayer.acquire_image(&mock_instance_watcher, global_image_id,
&on_acquire2);
ASSERT_EQ(0, on_acquire2.wait());
expect_work_queue(mock_threads);
expect_cancel_event(mock_threads, true);
EXPECT_CALL(mock_image_replayer, is_stopped()).WillOnce(Return(true));
expect_work_queue(mock_threads);
expect_work_queue(mock_threads);
EXPECT_CALL(mock_image_replayer, is_stopped()).WillOnce(Return(true));
EXPECT_CALL(mock_image_replayer, destroy());
instance_replayer.shut_down();
ASSERT_TRUE(timer_ctx != nullptr);
delete timer_ctx;
}
} // namespace mirror
} // namespace rbd
| 12,869 | 32.603133 | 103 |
cc
|
null |
ceph-main/src/test/rbd_mirror/test_mock_InstanceWatcher.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librados/AioCompletionImpl.h"
#include "librbd/ManagedLock.h"
#include "test/librados/test_cxx.h"
#include "test/librados_test_stub/MockTestMemIoCtxImpl.h"
#include "test/librados_test_stub/MockTestMemRadosClient.h"
#include "test/librbd/mock/MockImageCtx.h"
#include "test/rbd_mirror/test_mock_fixture.h"
#include "tools/rbd_mirror/InstanceReplayer.h"
#include "tools/rbd_mirror/InstanceWatcher.h"
#include "tools/rbd_mirror/Threads.h"
namespace librbd {
namespace {
struct MockTestImageCtx : public MockImageCtx {
MockTestImageCtx(librbd::ImageCtx &image_ctx)
: librbd::MockImageCtx(image_ctx) {
}
};
} // anonymous namespace
template <>
struct ManagedLock<MockTestImageCtx> {
static ManagedLock* s_instance;
static ManagedLock *create(librados::IoCtx& ioctx,
librbd::AsioEngine& asio_engine,
const std::string& oid, librbd::Watcher *watcher,
managed_lock::Mode mode,
bool blocklist_on_break_lock,
uint32_t blocklist_expire_seconds) {
ceph_assert(s_instance != nullptr);
return s_instance;
}
ManagedLock() {
ceph_assert(s_instance == nullptr);
s_instance = this;
}
~ManagedLock() {
ceph_assert(s_instance == this);
s_instance = nullptr;
}
MOCK_METHOD0(destroy, void());
MOCK_METHOD1(shut_down, void(Context *));
MOCK_METHOD1(acquire_lock, void(Context *));
MOCK_METHOD2(get_locker, void(managed_lock::Locker *, Context *));
MOCK_METHOD3(break_lock, void(const managed_lock::Locker &, bool, Context *));
};
ManagedLock<MockTestImageCtx> *ManagedLock<MockTestImageCtx>::s_instance = nullptr;
} // namespace librbd
namespace rbd {
namespace mirror {
template <>
struct Threads<librbd::MockTestImageCtx> {
ceph::mutex &timer_lock;
SafeTimer *timer;
librbd::asio::ContextWQ *work_queue;
librbd::AsioEngine* asio_engine;
Threads(Threads<librbd::ImageCtx> *threads)
: timer_lock(threads->timer_lock), timer(threads->timer),
work_queue(threads->work_queue), asio_engine(threads->asio_engine) {
}
};
template <>
struct InstanceReplayer<librbd::MockTestImageCtx> {
MOCK_METHOD3(acquire_image, void(InstanceWatcher<librbd::MockTestImageCtx> *,
const std::string &, Context *));
MOCK_METHOD2(release_image, void(const std::string &, Context *));
MOCK_METHOD3(remove_peer_image, void(const std::string&, const std::string&,
Context *));
};
template <>
struct Throttler<librbd::MockTestImageCtx> {
static Throttler* s_instance;
Throttler() {
ceph_assert(s_instance == nullptr);
s_instance = this;
}
virtual ~Throttler() {
ceph_assert(s_instance == this);
s_instance = nullptr;
}
MOCK_METHOD3(start_op, void(const std::string &, const std::string &,
Context *));
MOCK_METHOD2(finish_op, void(const std::string &, const std::string &));
MOCK_METHOD2(drain, void(const std::string &, int));
};
Throttler<librbd::MockTestImageCtx>* Throttler<librbd::MockTestImageCtx>::s_instance = nullptr;
} // namespace mirror
} // namespace rbd
// template definitions
#include "tools/rbd_mirror/InstanceWatcher.cc"
namespace rbd {
namespace mirror {
using ::testing::_;
using ::testing::InSequence;
using ::testing::Invoke;
using ::testing::Return;
using ::testing::StrEq;
using ::testing::WithArg;
class TestMockInstanceWatcher : public TestMockFixture {
public:
typedef librbd::ManagedLock<librbd::MockTestImageCtx> MockManagedLock;
typedef InstanceReplayer<librbd::MockTestImageCtx> MockInstanceReplayer;
typedef InstanceWatcher<librbd::MockTestImageCtx> MockInstanceWatcher;
typedef Threads<librbd::MockTestImageCtx> MockThreads;
std::string m_instance_id;
std::string m_oid;
MockThreads *m_mock_threads;
void SetUp() override {
TestFixture::SetUp();
m_local_io_ctx.remove(RBD_MIRROR_LEADER);
EXPECT_EQ(0, m_local_io_ctx.create(RBD_MIRROR_LEADER, true));
m_instance_id = stringify(m_local_io_ctx.get_instance_id());
m_oid = RBD_MIRROR_INSTANCE_PREFIX + m_instance_id;
m_mock_threads = new MockThreads(m_threads);
}
void TearDown() override {
delete m_mock_threads;
TestMockFixture::TearDown();
}
void expect_register_watch(librados::MockTestMemIoCtxImpl &mock_io_ctx) {
EXPECT_CALL(mock_io_ctx, aio_watch(m_oid, _, _, _));
}
void expect_register_watch(librados::MockTestMemIoCtxImpl &mock_io_ctx,
const std::string &instance_id) {
std::string oid = RBD_MIRROR_INSTANCE_PREFIX + instance_id;
EXPECT_CALL(mock_io_ctx, aio_watch(oid, _, _, _));
}
void expect_unregister_watch(librados::MockTestMemIoCtxImpl &mock_io_ctx) {
EXPECT_CALL(mock_io_ctx, aio_unwatch(_, _));
}
void expect_register_instance(librados::MockTestMemIoCtxImpl &mock_io_ctx,
int r) {
EXPECT_CALL(mock_io_ctx, exec(RBD_MIRROR_LEADER, _, StrEq("rbd"),
StrEq("mirror_instances_add"), _, _, _, _))
.WillOnce(Return(r));
}
void expect_unregister_instance(librados::MockTestMemIoCtxImpl &mock_io_ctx,
int r) {
EXPECT_CALL(mock_io_ctx, exec(RBD_MIRROR_LEADER, _, StrEq("rbd"),
StrEq("mirror_instances_remove"), _, _, _, _))
.WillOnce(Return(r));
}
void expect_acquire_lock(MockManagedLock &mock_managed_lock, int r) {
EXPECT_CALL(mock_managed_lock, acquire_lock(_))
.WillOnce(CompleteContext(r));
}
void expect_release_lock(MockManagedLock &mock_managed_lock, int r) {
EXPECT_CALL(mock_managed_lock, shut_down(_)).WillOnce(CompleteContext(r));
}
void expect_destroy_lock(MockManagedLock &mock_managed_lock,
Context *ctx = nullptr) {
EXPECT_CALL(mock_managed_lock, destroy())
.WillOnce(Invoke([ctx]() {
if (ctx != nullptr) {
ctx->complete(0);
}
}));
}
void expect_get_locker(MockManagedLock &mock_managed_lock,
const librbd::managed_lock::Locker &locker, int r) {
EXPECT_CALL(mock_managed_lock, get_locker(_, _))
.WillOnce(Invoke([r, locker](librbd::managed_lock::Locker *out,
Context *ctx) {
if (r == 0) {
*out = locker;
}
ctx->complete(r);
}));
}
void expect_break_lock(MockManagedLock &mock_managed_lock,
const librbd::managed_lock::Locker &locker, int r) {
EXPECT_CALL(mock_managed_lock, break_lock(locker, true, _))
.WillOnce(WithArg<2>(CompleteContext(r)));
}
};
TEST_F(TestMockInstanceWatcher, InitShutdown) {
MockManagedLock mock_managed_lock;
librados::MockTestMemIoCtxImpl &mock_io_ctx(get_mock_io_ctx(m_local_io_ctx));
auto instance_watcher = new MockInstanceWatcher(
m_local_io_ctx, *m_mock_threads->asio_engine, nullptr, nullptr,
m_instance_id);
InSequence seq;
// Init
expect_register_instance(mock_io_ctx, 0);
expect_register_watch(mock_io_ctx);
expect_acquire_lock(mock_managed_lock, 0);
ASSERT_EQ(0, instance_watcher->init());
// Shutdown
expect_release_lock(mock_managed_lock, 0);
expect_unregister_watch(mock_io_ctx);
expect_unregister_instance(mock_io_ctx, 0);
instance_watcher->shut_down();
expect_destroy_lock(mock_managed_lock);
delete instance_watcher;
}
TEST_F(TestMockInstanceWatcher, InitError) {
MockManagedLock mock_managed_lock;
librados::MockTestMemIoCtxImpl &mock_io_ctx(get_mock_io_ctx(m_local_io_ctx));
auto instance_watcher = new MockInstanceWatcher(
m_local_io_ctx, *m_mock_threads->asio_engine, nullptr, nullptr,
m_instance_id);
InSequence seq;
expect_register_instance(mock_io_ctx, 0);
expect_register_watch(mock_io_ctx);
expect_acquire_lock(mock_managed_lock, -EINVAL);
expect_unregister_watch(mock_io_ctx);
expect_unregister_instance(mock_io_ctx, 0);
ASSERT_EQ(-EINVAL, instance_watcher->init());
expect_destroy_lock(mock_managed_lock);
delete instance_watcher;
}
TEST_F(TestMockInstanceWatcher, ShutdownError) {
MockManagedLock mock_managed_lock;
librados::MockTestMemIoCtxImpl &mock_io_ctx(get_mock_io_ctx(m_local_io_ctx));
auto instance_watcher = new MockInstanceWatcher(
m_local_io_ctx, *m_mock_threads->asio_engine, nullptr, nullptr,
m_instance_id);
InSequence seq;
// Init
expect_register_instance(mock_io_ctx, 0);
expect_register_watch(mock_io_ctx);
expect_acquire_lock(mock_managed_lock, 0);
ASSERT_EQ(0, instance_watcher->init());
// Shutdown
expect_release_lock(mock_managed_lock, -EINVAL);
expect_unregister_watch(mock_io_ctx);
expect_unregister_instance(mock_io_ctx, 0);
instance_watcher->shut_down();
expect_destroy_lock(mock_managed_lock);
delete instance_watcher;
}
TEST_F(TestMockInstanceWatcher, Remove) {
MockManagedLock mock_managed_lock;
librados::MockTestMemIoCtxImpl &mock_io_ctx(get_mock_io_ctx(m_local_io_ctx));
librbd::managed_lock::Locker
locker{entity_name_t::CLIENT(1), "auto 123", "1.2.3.4:0/0", 123};
InSequence seq;
expect_get_locker(mock_managed_lock, locker, 0);
expect_break_lock(mock_managed_lock, locker, 0);
expect_unregister_instance(mock_io_ctx, 0);
C_SaferCond on_destroy;
expect_destroy_lock(mock_managed_lock, &on_destroy);
C_SaferCond on_remove;
MockInstanceWatcher::remove_instance(m_local_io_ctx,
*m_mock_threads->asio_engine,
"instance_id", &on_remove);
ASSERT_EQ(0, on_remove.wait());
ASSERT_EQ(0, on_destroy.wait());
}
TEST_F(TestMockInstanceWatcher, RemoveNoent) {
MockManagedLock mock_managed_lock;
librados::MockTestMemIoCtxImpl &mock_io_ctx(get_mock_io_ctx(m_local_io_ctx));
InSequence seq;
expect_get_locker(mock_managed_lock, librbd::managed_lock::Locker(), -ENOENT);
expect_unregister_instance(mock_io_ctx, 0);
C_SaferCond on_destroy;
expect_destroy_lock(mock_managed_lock, &on_destroy);
C_SaferCond on_remove;
MockInstanceWatcher::remove_instance(m_local_io_ctx,
*m_mock_threads->asio_engine,
"instance_id", &on_remove);
ASSERT_EQ(0, on_remove.wait());
ASSERT_EQ(0, on_destroy.wait());
}
TEST_F(TestMockInstanceWatcher, ImageAcquireRelease) {
MockManagedLock mock_managed_lock;
librados::IoCtx& io_ctx1 = m_local_io_ctx;
std::string instance_id1 = m_instance_id;
librados::MockTestMemIoCtxImpl &mock_io_ctx1(get_mock_io_ctx(io_ctx1));
MockInstanceReplayer mock_instance_replayer1;
auto instance_watcher1 = MockInstanceWatcher::create(
io_ctx1, *m_mock_threads->asio_engine, &mock_instance_replayer1, nullptr);
librados::Rados cluster;
librados::IoCtx io_ctx2;
EXPECT_EQ("", connect_cluster_pp(cluster));
EXPECT_EQ(0, cluster.ioctx_create(_local_pool_name.c_str(), io_ctx2));
std::string instance_id2 = stringify(io_ctx2.get_instance_id());
librados::MockTestMemIoCtxImpl &mock_io_ctx2(get_mock_io_ctx(io_ctx2));
MockInstanceReplayer mock_instance_replayer2;
auto instance_watcher2 = MockInstanceWatcher::create(
io_ctx2, *m_mock_threads->asio_engine, &mock_instance_replayer2, nullptr);
InSequence seq;
// Init instance watcher 1
expect_register_instance(mock_io_ctx1, 0);
expect_register_watch(mock_io_ctx1, instance_id1);
expect_acquire_lock(mock_managed_lock, 0);
ASSERT_EQ(0, instance_watcher1->init());
// Init instance watcher 2
expect_register_instance(mock_io_ctx2, 0);
expect_register_watch(mock_io_ctx2, instance_id2);
expect_acquire_lock(mock_managed_lock, 0);
ASSERT_EQ(0, instance_watcher2->init());
// Acquire Image on the same instance
EXPECT_CALL(mock_instance_replayer1, acquire_image(instance_watcher1, "gid",
_))
.WillOnce(WithArg<2>(CompleteContext(0)));
C_SaferCond on_acquire1;
instance_watcher1->notify_image_acquire(instance_id1, "gid", &on_acquire1);
ASSERT_EQ(0, on_acquire1.wait());
// Acquire Image on the other instance
EXPECT_CALL(mock_instance_replayer2, acquire_image(instance_watcher2, "gid",
_))
.WillOnce(WithArg<2>(CompleteContext(0)));
C_SaferCond on_acquire2;
instance_watcher1->notify_image_acquire(instance_id2, "gid", &on_acquire2);
ASSERT_EQ(0, on_acquire2.wait());
// Release Image on the same instance
EXPECT_CALL(mock_instance_replayer1, release_image("gid", _))
.WillOnce(WithArg<1>(CompleteContext(0)));
C_SaferCond on_release1;
instance_watcher1->notify_image_release(instance_id1, "gid", &on_release1);
ASSERT_EQ(0, on_release1.wait());
// Release Image on the other instance
EXPECT_CALL(mock_instance_replayer2, release_image("gid", _))
.WillOnce(WithArg<1>(CompleteContext(0)));
C_SaferCond on_release2;
instance_watcher1->notify_image_release(instance_id2, "gid", &on_release2);
ASSERT_EQ(0, on_release2.wait());
// Shutdown instance watcher 1
expect_release_lock(mock_managed_lock, 0);
expect_unregister_watch(mock_io_ctx1);
expect_unregister_instance(mock_io_ctx1, 0);
instance_watcher1->shut_down();
expect_destroy_lock(mock_managed_lock);
delete instance_watcher1;
// Shutdown instance watcher 2
expect_release_lock(mock_managed_lock, 0);
expect_unregister_watch(mock_io_ctx2);
expect_unregister_instance(mock_io_ctx2, 0);
instance_watcher2->shut_down();
expect_destroy_lock(mock_managed_lock);
delete instance_watcher2;
}
TEST_F(TestMockInstanceWatcher, PeerImageRemoved) {
MockManagedLock mock_managed_lock;
librados::IoCtx& io_ctx1 = m_local_io_ctx;
std::string instance_id1 = m_instance_id;
librados::MockTestMemIoCtxImpl &mock_io_ctx1(get_mock_io_ctx(io_ctx1));
MockInstanceReplayer mock_instance_replayer1;
auto instance_watcher1 = MockInstanceWatcher::create(
io_ctx1, *m_mock_threads->asio_engine, &mock_instance_replayer1, nullptr);
librados::Rados cluster;
librados::IoCtx io_ctx2;
EXPECT_EQ("", connect_cluster_pp(cluster));
EXPECT_EQ(0, cluster.ioctx_create(_local_pool_name.c_str(), io_ctx2));
std::string instance_id2 = stringify(io_ctx2.get_instance_id());
librados::MockTestMemIoCtxImpl &mock_io_ctx2(get_mock_io_ctx(io_ctx2));
MockInstanceReplayer mock_instance_replayer2;
auto instance_watcher2 = MockInstanceWatcher::create(
io_ctx2, *m_mock_threads->asio_engine, &mock_instance_replayer2, nullptr);
InSequence seq;
// Init instance watcher 1
expect_register_instance(mock_io_ctx1, 0);
expect_register_watch(mock_io_ctx1, instance_id1);
expect_acquire_lock(mock_managed_lock, 0);
ASSERT_EQ(0, instance_watcher1->init());
// Init instance watcher 2
expect_register_instance(mock_io_ctx2, 0);
expect_register_watch(mock_io_ctx2, instance_id2);
expect_acquire_lock(mock_managed_lock, 0);
ASSERT_EQ(0, instance_watcher2->init());
// Peer Image Removed on the same instance
EXPECT_CALL(mock_instance_replayer1, remove_peer_image("gid", "uuid", _))
.WillOnce(WithArg<2>(CompleteContext(0)));
C_SaferCond on_removed1;
instance_watcher1->notify_peer_image_removed(instance_id1, "gid", "uuid",
&on_removed1);
ASSERT_EQ(0, on_removed1.wait());
// Peer Image Removed on the other instance
EXPECT_CALL(mock_instance_replayer2, remove_peer_image("gid", "uuid", _))
.WillOnce(WithArg<2>(CompleteContext(0)));
C_SaferCond on_removed2;
instance_watcher1->notify_peer_image_removed(instance_id2, "gid", "uuid",
&on_removed2);
ASSERT_EQ(0, on_removed2.wait());
// Shutdown instance watcher 1
expect_release_lock(mock_managed_lock, 0);
expect_unregister_watch(mock_io_ctx1);
expect_unregister_instance(mock_io_ctx1, 0);
instance_watcher1->shut_down();
expect_destroy_lock(mock_managed_lock);
delete instance_watcher1;
// Shutdown instance watcher 2
expect_release_lock(mock_managed_lock, 0);
expect_unregister_watch(mock_io_ctx2);
expect_unregister_instance(mock_io_ctx2, 0);
instance_watcher2->shut_down();
expect_destroy_lock(mock_managed_lock);
delete instance_watcher2;
}
TEST_F(TestMockInstanceWatcher, ImageAcquireReleaseCancel) {
MockManagedLock mock_managed_lock;
librados::MockTestMemIoCtxImpl &mock_io_ctx(get_mock_io_ctx(m_local_io_ctx));
auto instance_watcher = new MockInstanceWatcher(
m_local_io_ctx, *m_mock_threads->asio_engine, nullptr, nullptr,
m_instance_id);
InSequence seq;
// Init
expect_register_instance(mock_io_ctx, 0);
expect_register_watch(mock_io_ctx);
expect_acquire_lock(mock_managed_lock, 0);
ASSERT_EQ(0, instance_watcher->init());
// Send Acquire Image and cancel
EXPECT_CALL(mock_io_ctx, aio_notify(_, _, _, _, _))
.WillOnce(Invoke(
[this, instance_watcher, &mock_io_ctx](
const std::string& o, librados::AioCompletionImpl *c,
bufferlist& bl, uint64_t timeout_ms, bufferlist *pbl) {
c->get();
auto ctx = new LambdaContext(
[instance_watcher, &mock_io_ctx, c, pbl](int r) {
instance_watcher->cancel_notify_requests("other");
encode(librbd::watcher::NotifyResponse(), *pbl);
mock_io_ctx.get_mock_rados_client()->
finish_aio_completion(c, -ETIMEDOUT);
});
m_threads->work_queue->queue(ctx, 0);
}));
C_SaferCond on_acquire;
instance_watcher->notify_image_acquire("other", "gid", &on_acquire);
ASSERT_EQ(-ECANCELED, on_acquire.wait());
// Send Release Image and cancel
EXPECT_CALL(mock_io_ctx, aio_notify(_, _, _, _, _))
.WillOnce(Invoke(
[this, instance_watcher, &mock_io_ctx](
const std::string& o, librados::AioCompletionImpl *c,
bufferlist& bl, uint64_t timeout_ms, bufferlist *pbl) {
c->get();
auto ctx = new LambdaContext(
[instance_watcher, &mock_io_ctx, c, pbl](int r) {
instance_watcher->cancel_notify_requests("other");
encode(librbd::watcher::NotifyResponse(), *pbl);
mock_io_ctx.get_mock_rados_client()->
finish_aio_completion(c, -ETIMEDOUT);
});
m_threads->work_queue->queue(ctx, 0);
}));
C_SaferCond on_release;
instance_watcher->notify_image_release("other", "gid", &on_release);
ASSERT_EQ(-ECANCELED, on_release.wait());
// Shutdown
expect_release_lock(mock_managed_lock, 0);
expect_unregister_watch(mock_io_ctx);
expect_unregister_instance(mock_io_ctx, 0);
instance_watcher->shut_down();
expect_destroy_lock(mock_managed_lock);
delete instance_watcher;
}
TEST_F(TestMockInstanceWatcher, PeerImageAcquireWatchDNE) {
MockManagedLock mock_managed_lock;
librados::MockTestMemIoCtxImpl &mock_io_ctx(get_mock_io_ctx(m_local_io_ctx));
MockInstanceReplayer mock_instance_replayer;
auto instance_watcher = new MockInstanceWatcher(
m_local_io_ctx, *m_mock_threads->asio_engine, &mock_instance_replayer,
nullptr, m_instance_id);
InSequence seq;
// Init
expect_register_instance(mock_io_ctx, 0);
expect_register_watch(mock_io_ctx);
expect_acquire_lock(mock_managed_lock, 0);
ASSERT_EQ(0, instance_watcher->init());
// Acquire image on dead (blocklisted) instance
C_SaferCond on_acquire;
instance_watcher->notify_image_acquire("dead instance", "global image id",
&on_acquire);
ASSERT_EQ(-ENOENT, on_acquire.wait());
// Shutdown
expect_release_lock(mock_managed_lock, 0);
expect_unregister_watch(mock_io_ctx);
expect_unregister_instance(mock_io_ctx, 0);
instance_watcher->shut_down();
expect_destroy_lock(mock_managed_lock);
delete instance_watcher;
}
TEST_F(TestMockInstanceWatcher, PeerImageReleaseWatchDNE) {
MockManagedLock mock_managed_lock;
librados::MockTestMemIoCtxImpl &mock_io_ctx(get_mock_io_ctx(m_local_io_ctx));
MockInstanceReplayer mock_instance_replayer;
auto instance_watcher = new MockInstanceWatcher(
m_local_io_ctx, *m_mock_threads->asio_engine, &mock_instance_replayer,
nullptr, m_instance_id);
InSequence seq;
// Init
expect_register_instance(mock_io_ctx, 0);
expect_register_watch(mock_io_ctx);
expect_acquire_lock(mock_managed_lock, 0);
ASSERT_EQ(0, instance_watcher->init());
// Release image on dead (blocklisted) instance
C_SaferCond on_acquire;
instance_watcher->notify_image_release("dead instance", "global image id",
&on_acquire);
ASSERT_EQ(-ENOENT, on_acquire.wait());
// Shutdown
expect_release_lock(mock_managed_lock, 0);
expect_unregister_watch(mock_io_ctx);
expect_unregister_instance(mock_io_ctx, 0);
instance_watcher->shut_down();
expect_destroy_lock(mock_managed_lock);
delete instance_watcher;
}
TEST_F(TestMockInstanceWatcher, PeerImageRemovedCancel) {
MockManagedLock mock_managed_lock;
librados::MockTestMemIoCtxImpl &mock_io_ctx(get_mock_io_ctx(m_local_io_ctx));
auto instance_watcher = new MockInstanceWatcher(
m_local_io_ctx, *m_mock_threads->asio_engine, nullptr, nullptr,
m_instance_id);
InSequence seq;
// Init
expect_register_instance(mock_io_ctx, 0);
expect_register_watch(mock_io_ctx);
expect_acquire_lock(mock_managed_lock, 0);
ASSERT_EQ(0, instance_watcher->init());
// Send Acquire Image and cancel
EXPECT_CALL(mock_io_ctx, aio_notify(_, _, _, _, _))
.WillOnce(Invoke(
[this, instance_watcher, &mock_io_ctx](
const std::string& o, librados::AioCompletionImpl *c,
bufferlist& bl, uint64_t timeout_ms, bufferlist *pbl) {
c->get();
auto ctx = new LambdaContext(
[instance_watcher, &mock_io_ctx, c, pbl](int r) {
instance_watcher->cancel_notify_requests("other");
encode(librbd::watcher::NotifyResponse(), *pbl);
mock_io_ctx.get_mock_rados_client()->
finish_aio_completion(c, -ETIMEDOUT);
});
m_threads->work_queue->queue(ctx, 0);
}));
C_SaferCond on_acquire;
instance_watcher->notify_peer_image_removed("other", "gid", "uuid",
&on_acquire);
ASSERT_EQ(-ECANCELED, on_acquire.wait());
// Shutdown
expect_release_lock(mock_managed_lock, 0);
expect_unregister_watch(mock_io_ctx);
expect_unregister_instance(mock_io_ctx, 0);
instance_watcher->shut_down();
expect_destroy_lock(mock_managed_lock);
delete instance_watcher;
}
class TestMockInstanceWatcher_NotifySync : public TestMockInstanceWatcher {
public:
typedef Throttler<librbd::MockTestImageCtx> MockThrottler;
MockManagedLock mock_managed_lock;
MockThrottler mock_image_sync_throttler;
std::string instance_id1;
std::string instance_id2;
librados::Rados cluster;
librados::IoCtx io_ctx2;
MockInstanceWatcher *instance_watcher1;
MockInstanceWatcher *instance_watcher2;
void SetUp() override {
TestMockInstanceWatcher::SetUp();
instance_id1 = m_instance_id;
librados::IoCtx& io_ctx1 = m_local_io_ctx;
librados::MockTestMemIoCtxImpl &mock_io_ctx1(get_mock_io_ctx(io_ctx1));
instance_watcher1 = MockInstanceWatcher::create(io_ctx1,
*m_mock_threads->asio_engine,
nullptr,
&mock_image_sync_throttler);
EXPECT_EQ("", connect_cluster_pp(cluster));
EXPECT_EQ(0, cluster.ioctx_create(_local_pool_name.c_str(), io_ctx2));
instance_id2 = stringify(io_ctx2.get_instance_id());
librados::MockTestMemIoCtxImpl &mock_io_ctx2(get_mock_io_ctx(io_ctx2));
instance_watcher2 = MockInstanceWatcher::create(io_ctx2,
*m_mock_threads->asio_engine,
nullptr,
&mock_image_sync_throttler);
InSequence seq;
// Init instance watcher 1 (leader)
expect_register_instance(mock_io_ctx1, 0);
expect_register_watch(mock_io_ctx1, instance_id1);
expect_acquire_lock(mock_managed_lock, 0);
EXPECT_EQ(0, instance_watcher1->init());
instance_watcher1->handle_acquire_leader();
// Init instance watcher 2
expect_register_instance(mock_io_ctx2, 0);
expect_register_watch(mock_io_ctx2, instance_id2);
expect_acquire_lock(mock_managed_lock, 0);
EXPECT_EQ(0, instance_watcher2->init());
instance_watcher2->handle_update_leader(instance_id1);
}
void TearDown() override {
librados::IoCtx& io_ctx1 = m_local_io_ctx;
librados::MockTestMemIoCtxImpl &mock_io_ctx1(get_mock_io_ctx(io_ctx1));
librados::MockTestMemIoCtxImpl &mock_io_ctx2(get_mock_io_ctx(io_ctx2));
InSequence seq;
expect_throttler_drain();
instance_watcher1->handle_release_leader();
// Shutdown instance watcher 1
expect_release_lock(mock_managed_lock, 0);
expect_unregister_watch(mock_io_ctx1);
expect_unregister_instance(mock_io_ctx1, 0);
instance_watcher1->shut_down();
expect_destroy_lock(mock_managed_lock);
delete instance_watcher1;
// Shutdown instance watcher 2
expect_release_lock(mock_managed_lock, 0);
expect_unregister_watch(mock_io_ctx2);
expect_unregister_instance(mock_io_ctx2, 0);
instance_watcher2->shut_down();
expect_destroy_lock(mock_managed_lock);
delete instance_watcher2;
TestMockInstanceWatcher::TearDown();
}
void expect_throttler_start_op(const std::string &sync_id,
Context *on_call = nullptr,
Context **on_start_ctx = nullptr) {
EXPECT_CALL(mock_image_sync_throttler, start_op("", sync_id, _))
.WillOnce(Invoke([on_call, on_start_ctx] (const std::string &,
const std::string &,
Context *ctx) {
if (on_start_ctx != nullptr) {
*on_start_ctx = ctx;
} else {
ctx->complete(0);
}
if (on_call != nullptr) {
on_call->complete(0);
}
}));
}
void expect_throttler_finish_op(const std::string &sync_id,
Context *on_finish) {
EXPECT_CALL(mock_image_sync_throttler, finish_op("", "sync_id"))
.WillOnce(Invoke([on_finish](const std::string &, const std::string &) {
on_finish->complete(0);
}));
}
void expect_throttler_drain() {
EXPECT_CALL(mock_image_sync_throttler, drain("", -ESTALE));
}
};
TEST_F(TestMockInstanceWatcher_NotifySync, StartStopOnLeader) {
InSequence seq;
expect_throttler_start_op("sync_id");
C_SaferCond on_start;
instance_watcher1->notify_sync_request("sync_id", &on_start);
ASSERT_EQ(0, on_start.wait());
C_SaferCond on_finish;
expect_throttler_finish_op("sync_id", &on_finish);
instance_watcher1->notify_sync_complete("sync_id");
ASSERT_EQ(0, on_finish.wait());
}
TEST_F(TestMockInstanceWatcher_NotifySync, CancelStartedOnLeader) {
InSequence seq;
expect_throttler_start_op("sync_id");
C_SaferCond on_start;
instance_watcher1->notify_sync_request("sync_id", &on_start);
ASSERT_EQ(0, on_start.wait());
ASSERT_FALSE(instance_watcher1->cancel_sync_request("sync_id"));
C_SaferCond on_finish;
expect_throttler_finish_op("sync_id", &on_finish);
instance_watcher1->notify_sync_complete("sync_id");
ASSERT_EQ(0, on_finish.wait());
}
TEST_F(TestMockInstanceWatcher_NotifySync, StartStopOnNonLeader) {
InSequence seq;
expect_throttler_start_op("sync_id");
C_SaferCond on_start;
instance_watcher2->notify_sync_request("sync_id", &on_start);
ASSERT_EQ(0, on_start.wait());
C_SaferCond on_finish;
expect_throttler_finish_op("sync_id", &on_finish);
instance_watcher2->notify_sync_complete("sync_id");
ASSERT_EQ(0, on_finish.wait());
}
TEST_F(TestMockInstanceWatcher_NotifySync, CancelStartedOnNonLeader) {
InSequence seq;
expect_throttler_start_op("sync_id");
C_SaferCond on_start;
instance_watcher2->notify_sync_request("sync_id", &on_start);
ASSERT_EQ(0, on_start.wait());
ASSERT_FALSE(instance_watcher2->cancel_sync_request("sync_id"));
C_SaferCond on_finish;
expect_throttler_finish_op("sync_id", &on_finish);
instance_watcher2->notify_sync_complete("sync_id");
ASSERT_EQ(0, on_finish.wait());
}
TEST_F(TestMockInstanceWatcher_NotifySync, CancelWaitingOnNonLeader) {
InSequence seq;
C_SaferCond on_start_op_called;
Context *on_start_ctx;
expect_throttler_start_op("sync_id", &on_start_op_called,
&on_start_ctx);
C_SaferCond on_start;
instance_watcher2->notify_sync_request("sync_id", &on_start);
ASSERT_EQ(0, on_start_op_called.wait());
ASSERT_TRUE(instance_watcher2->cancel_sync_request("sync_id"));
// emulate watcher timeout
on_start_ctx->complete(-ETIMEDOUT);
ASSERT_EQ(-ECANCELED, on_start.wait());
}
TEST_F(TestMockInstanceWatcher_NotifySync, InFlightPrevNotification) {
// start sync when previous notification is still in flight
InSequence seq;
expect_throttler_start_op("sync_id");
C_SaferCond on_start1;
instance_watcher2->notify_sync_request("sync_id", &on_start1);
ASSERT_EQ(0, on_start1.wait());
C_SaferCond on_start2;
EXPECT_CALL(mock_image_sync_throttler, finish_op("", "sync_id"))
.WillOnce(Invoke([this, &on_start2](const std::string &,
const std::string &) {
instance_watcher2->notify_sync_request("sync_id", &on_start2);
}));
expect_throttler_start_op("sync_id");
instance_watcher2->notify_sync_complete("sync_id");
ASSERT_EQ(0, on_start2.wait());
C_SaferCond on_finish;
expect_throttler_finish_op("sync_id", &on_finish);
instance_watcher2->notify_sync_complete("sync_id");
ASSERT_EQ(0, on_finish.wait());
}
TEST_F(TestMockInstanceWatcher_NotifySync, NoInFlightReleaseAcquireLeader) {
InSequence seq;
expect_throttler_drain();
instance_watcher1->handle_release_leader();
instance_watcher1->handle_acquire_leader();
}
TEST_F(TestMockInstanceWatcher_NotifySync, StartedOnLeaderReleaseLeader) {
InSequence seq;
expect_throttler_drain();
instance_watcher1->handle_release_leader();
instance_watcher2->handle_acquire_leader();
expect_throttler_start_op("sync_id");
C_SaferCond on_start;
instance_watcher2->notify_sync_request("sync_id", &on_start);
ASSERT_EQ(0, on_start.wait());
expect_throttler_drain();
instance_watcher2->handle_release_leader();
instance_watcher2->notify_sync_complete("sync_id");
instance_watcher1->handle_acquire_leader();
}
TEST_F(TestMockInstanceWatcher_NotifySync, WaitingOnLeaderReleaseLeader) {
InSequence seq;
C_SaferCond on_start_op_called;
Context *on_start_ctx;
expect_throttler_start_op("sync_id", &on_start_op_called, &on_start_ctx);
C_SaferCond on_start;
instance_watcher1->notify_sync_request("sync_id", &on_start);
ASSERT_EQ(0, on_start_op_called.wait());
expect_throttler_drain();
instance_watcher1->handle_release_leader();
// emulate throttler queue drain on leader release
on_start_ctx->complete(-ESTALE);
expect_throttler_start_op("sync_id");
instance_watcher2->handle_acquire_leader();
instance_watcher1->handle_update_leader(instance_id2);
ASSERT_EQ(0, on_start.wait());
C_SaferCond on_finish;
expect_throttler_finish_op("sync_id", &on_finish);
instance_watcher1->notify_sync_complete("sync_id");
ASSERT_EQ(0, on_finish.wait());
expect_throttler_drain();
instance_watcher2->handle_release_leader();
instance_watcher1->handle_acquire_leader();
}
TEST_F(TestMockInstanceWatcher_NotifySync, StartedOnNonLeaderAcquireLeader) {
InSequence seq;
expect_throttler_drain();
instance_watcher1->handle_release_leader();
instance_watcher2->handle_acquire_leader();
instance_watcher1->handle_update_leader(instance_id2);
expect_throttler_start_op("sync_id");
C_SaferCond on_start;
instance_watcher1->notify_sync_request("sync_id", &on_start);
ASSERT_EQ(0, on_start.wait());
expect_throttler_drain();
instance_watcher2->handle_release_leader();
instance_watcher1->handle_acquire_leader();
instance_watcher2->handle_update_leader(instance_id1);
instance_watcher1->notify_sync_complete("sync_id");
}
TEST_F(TestMockInstanceWatcher_NotifySync, WaitingOnNonLeaderAcquireLeader) {
InSequence seq;
C_SaferCond on_start_op_called;
Context *on_start_ctx;
expect_throttler_start_op("sync_id", &on_start_op_called,
&on_start_ctx);
C_SaferCond on_start;
instance_watcher2->notify_sync_request("sync_id", &on_start);
ASSERT_EQ(0, on_start_op_called.wait());
expect_throttler_drain();
instance_watcher1->handle_release_leader();
// emulate throttler queue drain on leader release
on_start_ctx->complete(-ESTALE);
EXPECT_CALL(mock_image_sync_throttler, start_op("", "sync_id", _))
.WillOnce(WithArg<2>(CompleteContext(0)));
instance_watcher2->handle_acquire_leader();
instance_watcher1->handle_update_leader(instance_id2);
ASSERT_EQ(0, on_start.wait());
C_SaferCond on_finish;
expect_throttler_finish_op("sync_id", &on_finish);
instance_watcher2->notify_sync_complete("sync_id");
ASSERT_EQ(0, on_finish.wait());
expect_throttler_drain();
instance_watcher2->handle_release_leader();
instance_watcher1->handle_acquire_leader();
}
} // namespace mirror
} // namespace rbd
| 34,516 | 33.936235 | 95 |
cc
|
null |
ceph-main/src/test/rbd_mirror/test_mock_LeaderWatcher.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/AsioEngine.h"
#include "librbd/Utils.h"
#include "test/librbd/mock/MockImageCtx.h"
#include "test/rbd_mirror/test_mock_fixture.h"
#include "tools/rbd_mirror/LeaderWatcher.h"
#include "tools/rbd_mirror/Threads.h"
using librbd::util::create_async_context_callback;
namespace librbd {
namespace {
struct MockTestImageCtx : public MockImageCtx {
MockTestImageCtx(librbd::ImageCtx &image_ctx)
: librbd::MockImageCtx(image_ctx) {
}
};
} // anonymous namespace
struct MockManagedLock {
static MockManagedLock *s_instance;
static MockManagedLock &get_instance() {
ceph_assert(s_instance != nullptr);
return *s_instance;
}
MockManagedLock() {
s_instance = this;
}
bool m_release_lock_on_shutdown = false;
Context *m_on_released = nullptr;
MOCK_METHOD0(construct, void());
MOCK_METHOD0(destroy, void());
MOCK_CONST_METHOD0(is_lock_owner, bool());
MOCK_METHOD1(shut_down, void(Context *));
MOCK_METHOD1(try_acquire_lock, void(Context *));
MOCK_METHOD1(release_lock, void(Context *));
MOCK_METHOD0(reacquire_lock, void());
MOCK_METHOD3(break_lock, void(const managed_lock::Locker &, bool, Context *));
MOCK_METHOD2(get_locker, void(managed_lock::Locker *, Context *));
MOCK_METHOD0(set_state_post_acquiring, void());
MOCK_CONST_METHOD0(is_shutdown, bool());
MOCK_CONST_METHOD0(is_state_post_acquiring, bool());
MOCK_CONST_METHOD0(is_state_pre_releasing, bool());
MOCK_CONST_METHOD0(is_state_locked, bool());
};
MockManagedLock *MockManagedLock::s_instance = nullptr;
template <>
struct ManagedLock<MockTestImageCtx> {
ManagedLock(librados::IoCtx& ioctx, librbd::AsioEngine& asio_engine,
const std::string& oid, librbd::Watcher *watcher,
managed_lock::Mode mode, bool blocklist_on_break_lock,
uint32_t blocklist_expire_seconds)
: m_work_queue(asio_engine.get_work_queue()) {
MockManagedLock::get_instance().construct();
}
virtual ~ManagedLock() {
MockManagedLock::get_instance().destroy();
}
librbd::asio::ContextWQ *m_work_queue;
mutable ceph::mutex m_lock = ceph::make_mutex("ManagedLock::m_lock");
bool is_lock_owner() const {
return MockManagedLock::get_instance().is_lock_owner();
}
void shut_down(Context *on_shutdown) {
if (MockManagedLock::get_instance().m_release_lock_on_shutdown) {
on_shutdown = new LambdaContext(
[this, on_shutdown](int r) {
MockManagedLock::get_instance().m_release_lock_on_shutdown = false;
shut_down(on_shutdown);
});
release_lock(on_shutdown);
return;
}
MockManagedLock::get_instance().shut_down(on_shutdown);
}
void try_acquire_lock(Context *on_acquired) {
Context *post_acquire_ctx = create_async_context_callback(
m_work_queue, new LambdaContext(
[this, on_acquired](int r) {
post_acquire_lock_handler(r, on_acquired);
}));
MockManagedLock::get_instance().try_acquire_lock(post_acquire_ctx);
}
void release_lock(Context *on_released) {
ceph_assert(MockManagedLock::get_instance().m_on_released == nullptr);
MockManagedLock::get_instance().m_on_released = on_released;
Context *post_release_ctx = new LambdaContext(
[this](int r) {
ceph_assert(MockManagedLock::get_instance().m_on_released != nullptr);
post_release_lock_handler(false, r,
MockManagedLock::get_instance().m_on_released);
MockManagedLock::get_instance().m_on_released = nullptr;
});
Context *release_ctx = new LambdaContext(
[post_release_ctx](int r) {
if (r < 0) {
MockManagedLock::get_instance().m_on_released->complete(r);
} else {
MockManagedLock::get_instance().release_lock(post_release_ctx);
}
});
Context *pre_release_ctx = new LambdaContext(
[this, release_ctx](int r) {
bool shutting_down =
MockManagedLock::get_instance().m_release_lock_on_shutdown;
pre_release_lock_handler(shutting_down, release_ctx);
});
m_work_queue->queue(pre_release_ctx, 0);
}
void reacquire_lock(Context* on_finish) {
MockManagedLock::get_instance().reacquire_lock();
}
void get_locker(managed_lock::Locker *locker, Context *on_finish) {
MockManagedLock::get_instance().get_locker(locker, on_finish);
}
void break_lock(const managed_lock::Locker &locker, bool force_break_lock,
Context *on_finish) {
MockManagedLock::get_instance().break_lock(locker, force_break_lock,
on_finish);
}
void set_state_post_acquiring() {
MockManagedLock::get_instance().set_state_post_acquiring();
}
bool is_shutdown() const {
return MockManagedLock::get_instance().is_shutdown();
}
bool is_state_post_acquiring() const {
return MockManagedLock::get_instance().is_state_post_acquiring();
}
bool is_state_pre_releasing() const {
return MockManagedLock::get_instance().is_state_pre_releasing();
}
bool is_state_locked() const {
return MockManagedLock::get_instance().is_state_locked();
}
virtual void post_acquire_lock_handler(int r, Context *on_finish) = 0;
virtual void pre_release_lock_handler(bool shutting_down,
Context *on_finish) = 0;
virtual void post_release_lock_handler(bool shutting_down, int r,
Context *on_finish) = 0;
};
} // namespace librbd
namespace rbd {
namespace mirror {
template <>
struct Threads<librbd::MockTestImageCtx> {
ceph::mutex &timer_lock;
SafeTimer *timer;
librbd::asio::ContextWQ *work_queue;
librbd::AsioEngine* asio_engine;
Threads(Threads<librbd::ImageCtx> *threads)
: timer_lock(threads->timer_lock), timer(threads->timer),
work_queue(threads->work_queue), asio_engine(threads->asio_engine) {
}
};
template <>
struct Instances<librbd::MockTestImageCtx> {
static Instances* s_instance;
static Instances *create(Threads<librbd::MockTestImageCtx> *threads,
librados::IoCtx &ioctx,
const std::string& instance_id,
instances::Listener&) {
ceph_assert(s_instance != nullptr);
return s_instance;
}
Instances() {
ceph_assert(s_instance == nullptr);
s_instance = this;
}
~Instances() {
ceph_assert(s_instance == this);
s_instance = nullptr;
}
MOCK_METHOD0(destroy, void());
MOCK_METHOD1(init, void(Context *));
MOCK_METHOD1(shut_down, void(Context *));
MOCK_METHOD1(acked, void(const std::vector<std::string> &));
MOCK_METHOD0(unblock_listener, void());
};
Instances<librbd::MockTestImageCtx> *Instances<librbd::MockTestImageCtx>::s_instance = nullptr;
} // namespace mirror
} // namespace rbd
// template definitions
#include "tools/rbd_mirror/LeaderWatcher.cc"
namespace rbd {
namespace mirror {
using ::testing::_;
using ::testing::AtLeast;
using ::testing::DoAll;
using ::testing::InSequence;
using ::testing::Invoke;
using ::testing::Return;
using librbd::MockManagedLock;
struct MockListener : public leader_watcher::Listener {
static MockListener* s_instance;
MockListener() {
ceph_assert(s_instance == nullptr);
s_instance = this;
}
~MockListener() override {
ceph_assert(s_instance == this);
s_instance = nullptr;
}
MOCK_METHOD1(post_acquire_handler, void(Context *));
MOCK_METHOD1(pre_release_handler, void(Context *));
MOCK_METHOD1(update_leader_handler, void(const std::string &));
MOCK_METHOD1(handle_instances_added, void(const InstanceIds&));
MOCK_METHOD1(handle_instances_removed, void(const InstanceIds&));
};
MockListener *MockListener::s_instance = nullptr;
class TestMockLeaderWatcher : public TestMockFixture {
public:
typedef Instances<librbd::MockTestImageCtx> MockInstances;
typedef LeaderWatcher<librbd::MockTestImageCtx> MockLeaderWatcher;
typedef Threads<librbd::MockTestImageCtx> MockThreads;
void SetUp() override {
TestMockFixture::SetUp();
m_mock_threads = new MockThreads(m_threads);
}
void TearDown() override {
delete m_mock_threads;
TestMockFixture::TearDown();
}
void expect_construct(MockManagedLock &mock_managed_lock) {
EXPECT_CALL(mock_managed_lock, construct());
}
void expect_destroy(MockManagedLock &mock_managed_lock) {
EXPECT_CALL(mock_managed_lock, destroy());
}
void expect_is_lock_owner(MockManagedLock &mock_managed_lock, bool owner) {
EXPECT_CALL(mock_managed_lock, is_lock_owner())
.WillOnce(Return(owner));
}
void expect_shut_down(MockManagedLock &mock_managed_lock,
bool release_lock_on_shutdown, int r) {
mock_managed_lock.m_release_lock_on_shutdown = release_lock_on_shutdown;
EXPECT_CALL(mock_managed_lock, shut_down(_))
.WillOnce(CompleteContext(r));
}
void expect_try_acquire_lock(MockManagedLock &mock_managed_lock, int r) {
EXPECT_CALL(mock_managed_lock, try_acquire_lock(_))
.WillOnce(CompleteContext(r));
if (r == 0) {
expect_set_state_post_acquiring(mock_managed_lock);
}
}
void expect_release_lock(MockManagedLock &mock_managed_lock, int r,
Context *on_finish = nullptr) {
EXPECT_CALL(mock_managed_lock, release_lock(_))
.WillOnce(Invoke([on_finish, &mock_managed_lock, r](Context *ctx) {
if (on_finish != nullptr) {
auto on_released = mock_managed_lock.m_on_released;
ceph_assert(on_released != nullptr);
mock_managed_lock.m_on_released = new LambdaContext(
[on_released, on_finish](int r) {
on_released->complete(r);
on_finish->complete(r);
});
}
ctx->complete(r);
}));
}
void expect_get_locker(MockManagedLock &mock_managed_lock,
const librbd::managed_lock::Locker &locker, int r) {
EXPECT_CALL(mock_managed_lock, get_locker(_, _))
.WillOnce(Invoke([r, locker](librbd::managed_lock::Locker *out,
Context *ctx) {
if (r == 0) {
*out = locker;
}
ctx->complete(r);
}));
}
void expect_break_lock(MockManagedLock &mock_managed_lock,
const librbd::managed_lock::Locker &locker, int r,
Context *on_finish) {
EXPECT_CALL(mock_managed_lock, break_lock(locker, true, _))
.WillOnce(Invoke([on_finish, r](const librbd::managed_lock::Locker &,
bool, Context *ctx) {
ctx->complete(r);
on_finish->complete(0);
}));
}
void expect_set_state_post_acquiring(MockManagedLock &mock_managed_lock) {
EXPECT_CALL(mock_managed_lock, set_state_post_acquiring());
}
void expect_is_shutdown(MockManagedLock &mock_managed_lock) {
EXPECT_CALL(mock_managed_lock, is_shutdown())
.Times(AtLeast(0)).WillRepeatedly(Return(false));
}
void expect_is_leader(MockManagedLock &mock_managed_lock, bool post_acquiring,
bool locked) {
EXPECT_CALL(mock_managed_lock, is_state_post_acquiring())
.WillOnce(Return(post_acquiring));
if (!post_acquiring) {
EXPECT_CALL(mock_managed_lock, is_state_locked())
.WillOnce(Return(locked));
}
}
void expect_is_leader(MockManagedLock &mock_managed_lock) {
EXPECT_CALL(mock_managed_lock, is_state_post_acquiring())
.Times(AtLeast(0)).WillRepeatedly(Return(false));
EXPECT_CALL(mock_managed_lock, is_state_locked())
.Times(AtLeast(0)).WillRepeatedly(Return(false));
EXPECT_CALL(mock_managed_lock, is_state_pre_releasing())
.Times(AtLeast(0)).WillRepeatedly(Return(false));
}
void expect_notify_heartbeat(MockManagedLock &mock_managed_lock,
Context *on_finish) {
// is_leader in notify_heartbeat
EXPECT_CALL(mock_managed_lock, is_state_post_acquiring())
.WillOnce(Return(false));
EXPECT_CALL(mock_managed_lock, is_state_locked())
.WillOnce(Return(true));
// is_leader in handle_notify_heartbeat
EXPECT_CALL(mock_managed_lock, is_state_post_acquiring())
.WillOnce(Return(false));
EXPECT_CALL(mock_managed_lock, is_state_locked())
.WillOnce(DoAll(Invoke([on_finish]() {
on_finish->complete(0);
}),
Return(true)));
}
void expect_destroy(MockInstances &mock_instances) {
EXPECT_CALL(mock_instances, destroy());
}
void expect_init(MockInstances &mock_instances, int r) {
EXPECT_CALL(mock_instances, init(_))
.WillOnce(CompleteContext(m_mock_threads->work_queue, r));
}
void expect_shut_down(MockInstances &mock_instances, int r) {
EXPECT_CALL(mock_instances, shut_down(_))
.WillOnce(CompleteContext(m_mock_threads->work_queue, r));
expect_destroy(mock_instances);
}
void expect_acquire_notify(MockManagedLock &mock_managed_lock,
MockListener &mock_listener, int r) {
expect_is_leader(mock_managed_lock, true, false);
EXPECT_CALL(mock_listener, post_acquire_handler(_))
.WillOnce(CompleteContext(r));
expect_is_leader(mock_managed_lock, true, false);
}
void expect_release_notify(MockManagedLock &mock_managed_lock,
MockListener &mock_listener, int r) {
expect_is_leader(mock_managed_lock, false, false);
EXPECT_CALL(mock_listener, pre_release_handler(_))
.WillOnce(CompleteContext(r));
expect_is_leader(mock_managed_lock, false, false);
}
void expect_unblock_listener(MockInstances& mock_instances) {
EXPECT_CALL(mock_instances, unblock_listener());
}
void expect_instances_acked(MockInstances& mock_instances) {
EXPECT_CALL(mock_instances, acked(_));
}
MockThreads *m_mock_threads;
};
TEST_F(TestMockLeaderWatcher, InitShutdown) {
MockManagedLock mock_managed_lock;
MockInstances mock_instances;
MockListener listener;
expect_is_shutdown(mock_managed_lock);
expect_destroy(mock_managed_lock);
InSequence seq;
expect_construct(mock_managed_lock);
MockLeaderWatcher leader_watcher(m_mock_threads, m_local_io_ctx, &listener);
// Init
C_SaferCond on_heartbeat_finish;
expect_is_leader(mock_managed_lock, false, false);
expect_try_acquire_lock(mock_managed_lock, 0);
expect_init(mock_instances, 0);
expect_acquire_notify(mock_managed_lock, listener, 0);
expect_unblock_listener(mock_instances);
expect_notify_heartbeat(mock_managed_lock, &on_heartbeat_finish);
expect_instances_acked(mock_instances);
ASSERT_EQ(0, leader_watcher.init());
ASSERT_EQ(0, on_heartbeat_finish.wait());
// Shutdown
expect_release_notify(mock_managed_lock, listener, 0);
expect_shut_down(mock_instances, 0);
expect_release_lock(mock_managed_lock, 0);
expect_shut_down(mock_managed_lock, true, 0);
expect_is_leader(mock_managed_lock, false, false);
leader_watcher.shut_down();
}
TEST_F(TestMockLeaderWatcher, InitReleaseShutdown) {
MockManagedLock mock_managed_lock;
MockInstances mock_instances;
MockListener listener;
expect_is_shutdown(mock_managed_lock);
expect_destroy(mock_managed_lock);
InSequence seq;
expect_construct(mock_managed_lock);
MockLeaderWatcher leader_watcher(m_mock_threads, m_local_io_ctx, &listener);
// Init
C_SaferCond on_heartbeat_finish;
expect_is_leader(mock_managed_lock, false, false);
expect_try_acquire_lock(mock_managed_lock, 0);
expect_init(mock_instances, 0);
expect_acquire_notify(mock_managed_lock, listener, 0);
expect_unblock_listener(mock_instances);
expect_notify_heartbeat(mock_managed_lock, &on_heartbeat_finish);
expect_instances_acked(mock_instances);
ASSERT_EQ(0, leader_watcher.init());
ASSERT_EQ(0, on_heartbeat_finish.wait());
// Release
expect_is_leader(mock_managed_lock, false, true);
expect_release_notify(mock_managed_lock, listener, 0);
expect_shut_down(mock_instances, 0);
C_SaferCond on_release;
expect_release_lock(mock_managed_lock, 0, &on_release);
leader_watcher.release_leader();
ASSERT_EQ(0, on_release.wait());
// Shutdown
expect_shut_down(mock_managed_lock, false, 0);
expect_is_leader(mock_managed_lock, false, false);
leader_watcher.shut_down();
}
TEST_F(TestMockLeaderWatcher, AcquireError) {
MockManagedLock mock_managed_lock;
MockInstances mock_instances;
MockListener listener;
expect_is_shutdown(mock_managed_lock);
expect_is_leader(mock_managed_lock);
expect_destroy(mock_managed_lock);
InSequence seq;
expect_construct(mock_managed_lock);
MockLeaderWatcher leader_watcher(m_mock_threads, m_local_io_ctx, &listener);
// Init
C_SaferCond on_heartbeat_finish;
expect_is_leader(mock_managed_lock, false, false);
expect_try_acquire_lock(mock_managed_lock, -EAGAIN);
expect_get_locker(mock_managed_lock, librbd::managed_lock::Locker(), -ENOENT);
expect_try_acquire_lock(mock_managed_lock, 0);
expect_init(mock_instances, 0);
expect_acquire_notify(mock_managed_lock, listener, 0);
expect_unblock_listener(mock_instances);
expect_notify_heartbeat(mock_managed_lock, &on_heartbeat_finish);
expect_instances_acked(mock_instances);
ASSERT_EQ(0, leader_watcher.init());
ASSERT_EQ(0, on_heartbeat_finish.wait());
// Shutdown
expect_release_notify(mock_managed_lock, listener, 0);
expect_shut_down(mock_instances, 0);
expect_release_lock(mock_managed_lock, 0);
expect_shut_down(mock_managed_lock, true, 0);
expect_is_leader(mock_managed_lock, false, false);
leader_watcher.shut_down();
}
TEST_F(TestMockLeaderWatcher, Break) {
EXPECT_EQ(0, _rados->conf_set("rbd_mirror_leader_heartbeat_interval", "1"));
EXPECT_EQ(0, _rados->conf_set("rbd_mirror_leader_max_missed_heartbeats",
"1"));
CephContext *cct = reinterpret_cast<CephContext *>(m_local_io_ctx.cct());
int max_acquire_attempts = cct->_conf.get_val<uint64_t>(
"rbd_mirror_leader_max_acquire_attempts_before_break");
MockManagedLock mock_managed_lock;
MockInstances mock_instances;
MockListener listener;
librbd::managed_lock::Locker
locker{entity_name_t::CLIENT(1), "auto 123", "1.2.3.4:0/0", 123};
expect_is_shutdown(mock_managed_lock);
expect_is_leader(mock_managed_lock);
expect_destroy(mock_managed_lock);
EXPECT_CALL(listener, update_leader_handler(_));
InSequence seq;
expect_construct(mock_managed_lock);
MockLeaderWatcher leader_watcher(m_mock_threads, m_local_io_ctx, &listener);
// Init
expect_is_leader(mock_managed_lock, false, false);
for (int i = 0; i < max_acquire_attempts; i++) {
expect_try_acquire_lock(mock_managed_lock, -EAGAIN);
expect_get_locker(mock_managed_lock, locker, 0);
}
C_SaferCond on_break;
expect_break_lock(mock_managed_lock, locker, 0, &on_break);
C_SaferCond on_heartbeat_finish;
expect_try_acquire_lock(mock_managed_lock, 0);
expect_init(mock_instances, 0);
expect_acquire_notify(mock_managed_lock, listener, 0);
expect_unblock_listener(mock_instances);
expect_notify_heartbeat(mock_managed_lock, &on_heartbeat_finish);
expect_instances_acked(mock_instances);
ASSERT_EQ(0, leader_watcher.init());
ASSERT_EQ(0, on_heartbeat_finish.wait());
// Shutdown
expect_release_notify(mock_managed_lock, listener, 0);
expect_shut_down(mock_instances, 0);
expect_release_lock(mock_managed_lock, 0);
expect_shut_down(mock_managed_lock, true, 0);
expect_is_leader(mock_managed_lock, false, false);
leader_watcher.shut_down();
}
} // namespace mirror
} // namespace rbd
| 20,057 | 31.614634 | 95 |
cc
|
null |
ceph-main/src/test/rbd_mirror/test_mock_MirrorStatusUpdater.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "test/rbd_mirror/test_mock_fixture.h"
#include "include/stringify.h"
#include "tools/rbd_mirror/MirrorStatusUpdater.h"
#include "tools/rbd_mirror/MirrorStatusWatcher.h"
#include "tools/rbd_mirror/Threads.h"
#include "test/librados_test_stub/MockTestMemIoCtxImpl.h"
#include "test/librbd/mock/MockImageCtx.h"
#include "test/rbd_mirror/mock/MockContextWQ.h"
#include "test/rbd_mirror/mock/MockSafeTimer.h"
#include <map>
#include <string>
#include <utility>
namespace librbd {
namespace {
struct MockTestImageCtx : public MockImageCtx {
MockTestImageCtx(librbd::ImageCtx &image_ctx)
: librbd::MockImageCtx(image_ctx) {
}
};
} // anonymous namespace
} // namespace librbd
namespace rbd {
namespace mirror {
template <>
struct MirrorStatusWatcher<librbd::MockTestImageCtx> {
static MirrorStatusWatcher* s_instance;
static MirrorStatusWatcher* create(librados::IoCtx& io_ctx,
MockContextWQ* mock_context_wq) {
ceph_assert(s_instance != nullptr);
return s_instance;
}
MOCK_METHOD1(init, void(Context*));
MOCK_METHOD1(shut_down, void(Context*));
MirrorStatusWatcher() {
s_instance = this;
}
};
MirrorStatusWatcher<librbd::MockTestImageCtx>* MirrorStatusWatcher<librbd::MockTestImageCtx>::s_instance = nullptr;
template <>
struct Threads<librbd::MockTestImageCtx> {
MockSafeTimer *timer;
ceph::mutex &timer_lock;
MockContextWQ *work_queue;
Threads(Threads<librbd::ImageCtx> *threads)
: timer(new MockSafeTimer()),
timer_lock(threads->timer_lock),
work_queue(new MockContextWQ()) {
}
~Threads() {
delete timer;
delete work_queue;
}
};
} // namespace mirror
} // namespace rbd
#include "tools/rbd_mirror/MirrorStatusUpdater.cc"
namespace rbd {
namespace mirror {
using ::testing::_;
using ::testing::DoDefault;
using ::testing::InSequence;
using ::testing::Invoke;
using ::testing::StrEq;
using ::testing::Return;
using ::testing::WithArg;
class TestMockMirrorStatusUpdater : public TestMockFixture {
public:
typedef MirrorStatusUpdater<librbd::MockTestImageCtx> MockMirrorStatusUpdater;
typedef MirrorStatusWatcher<librbd::MockTestImageCtx> MockMirrorStatusWatcher;
typedef Threads<librbd::MockTestImageCtx> MockThreads;
typedef std::map<std::string, cls::rbd::MirrorImageSiteStatus>
MirrorImageSiteStatuses;
void SetUp() override {
TestMockFixture::SetUp();
m_mock_local_io_ctx = &get_mock_io_ctx(m_local_io_ctx);
m_mock_threads = new MockThreads(m_threads);
}
void TearDown() override {
delete m_mock_threads;
TestMockFixture::TearDown();
}
void expect_timer_add_event(Context** timer_event) {
EXPECT_CALL(*m_mock_threads->timer, add_event_after(_, _))
.WillOnce(WithArg<1>(Invoke([timer_event](Context *ctx) {
*timer_event = ctx;
return ctx;
})));
}
void expect_timer_cancel_event() {
EXPECT_CALL(*m_mock_threads->timer, cancel_event(_))
.WillOnce(Invoke([](Context* ctx) {
delete ctx;
return false;
}));
}
void expect_work_queue(bool async) {
EXPECT_CALL(*m_mock_threads->work_queue, queue(_, _))
.WillOnce(Invoke([this, async](Context *ctx, int r) {
if (async) {
m_threads->work_queue->queue(ctx, r);
} else {
ctx->complete(r);
}
}));
}
void expect_mirror_status_watcher_init(
MockMirrorStatusWatcher& mock_mirror_status_watcher, int r) {
EXPECT_CALL(*mock_mirror_status_watcher.s_instance, init(_))
.WillOnce(Invoke([this, r](Context* ctx) {
m_threads->work_queue->queue(ctx, r);
}));
}
void expect_mirror_status_watcher_shut_down(
MockMirrorStatusWatcher& mock_mirror_status_watcher, int r) {
EXPECT_CALL(*mock_mirror_status_watcher.s_instance, shut_down(_))
.WillOnce(Invoke([this, r](Context* ctx) {
m_threads->work_queue->queue(ctx, r);
}));
}
void expect_mirror_status_update(
const std::string& global_image_id,
const cls::rbd::MirrorImageSiteStatus& mirror_image_status, int r) {
EXPECT_CALL(*m_mock_local_io_ctx,
exec(RBD_MIRRORING, _, StrEq("rbd"),
StrEq("mirror_image_status_set"), _, _, _, _))
.WillOnce(WithArg<4>(Invoke(
[r, global_image_id, mirror_image_status](bufferlist& in_bl) {
auto bl_it = in_bl.cbegin();
std::string decode_global_image_id;
decode(decode_global_image_id, bl_it);
EXPECT_EQ(global_image_id, decode_global_image_id);
cls::rbd::MirrorImageSiteStatus decode_mirror_image_status;
decode(decode_mirror_image_status, bl_it);
EXPECT_EQ(mirror_image_status, decode_mirror_image_status);
return r;
})));
}
void expect_mirror_status_update(
const MirrorImageSiteStatuses& mirror_image_site_statuses,
const std::string& mirror_uuid, int r) {
EXPECT_CALL(*m_mock_local_io_ctx, aio_operate(_, _, _, _, _, _))
.WillOnce(Invoke([this](auto&&... args) {
int r = m_mock_local_io_ctx->do_aio_operate(decltype(args)(args)...);
m_mock_local_io_ctx->aio_flush();
return r;
}));
for (auto [global_image_id, mirror_image_status] :
mirror_image_site_statuses) {
mirror_image_status.mirror_uuid = mirror_uuid;
expect_mirror_status_update(global_image_id, mirror_image_status, r);
if (r < 0) {
break;
}
}
}
void expect_mirror_status_remove(const std::string& global_image_id, int r) {
EXPECT_CALL(*m_mock_local_io_ctx,
exec(RBD_MIRRORING, _, StrEq("rbd"),
StrEq("mirror_image_status_remove"), _, _, _, _))
.WillOnce(WithArg<4>(Invoke(
[r, global_image_id](bufferlist& in_bl) {
auto bl_it = in_bl.cbegin();
std::string decode_global_image_id;
decode(decode_global_image_id, bl_it);
EXPECT_EQ(global_image_id, decode_global_image_id);
return r;
})));
}
void expect_mirror_status_removes(const std::set<std::string>& mirror_images,
int r) {
EXPECT_CALL(*m_mock_local_io_ctx, aio_operate(_, _, _, _, _, _))
.WillOnce(Invoke([this](auto&&... args) {
int r = m_mock_local_io_ctx->do_aio_operate(decltype(args)(args)...);
m_mock_local_io_ctx->aio_flush();
return r;
}));
for (auto global_image_id : mirror_images) {
expect_mirror_status_remove(global_image_id, r);
if (r < 0) {
break;
}
}
}
void fire_timer_event(Context** timer_event,
Context** update_task) {
expect_timer_add_event(timer_event);
// timer queues the update task
EXPECT_CALL(*m_mock_threads->work_queue, queue(_, _))
.WillOnce(WithArg<0>(Invoke([update_task](Context* ctx) mutable {
*update_task = ctx;
})));
// fire the timer task
{
std::lock_guard timer_locker{m_mock_threads->timer_lock};
ceph_assert(*timer_event != nullptr);
(*timer_event)->complete(0);
}
}
void init_mirror_status_updater(
MockMirrorStatusUpdater& mock_mirror_status_updater,
MockMirrorStatusWatcher& mock_mirror_status_watcher,
Context** timer_event) {
expect_timer_add_event(timer_event);
expect_mirror_status_watcher_init(mock_mirror_status_watcher, 0);
expect_work_queue(true);
C_SaferCond ctx;
mock_mirror_status_updater.init(&ctx);
ASSERT_EQ(0, ctx.wait());
}
void shut_down_mirror_status_updater(
MockMirrorStatusUpdater& mock_mirror_status_updater,
MockMirrorStatusWatcher& mock_mirror_status_watcher) {
expect_timer_cancel_event();
expect_mirror_status_watcher_shut_down(mock_mirror_status_watcher, 0);
expect_work_queue(true);
C_SaferCond ctx;
mock_mirror_status_updater.shut_down(&ctx);
ASSERT_EQ(0, ctx.wait());
}
librados::MockTestMemIoCtxImpl* m_mock_local_io_ctx = nullptr;
MockThreads* m_mock_threads = nullptr;
};
TEST_F(TestMockMirrorStatusUpdater, InitShutDown) {
MockMirrorStatusUpdater mock_mirror_status_updater(m_local_io_ctx,
m_mock_threads, "");
MockMirrorStatusWatcher* mock_mirror_status_watcher =
new MockMirrorStatusWatcher();
Context* timer_event = nullptr;
init_mirror_status_updater(mock_mirror_status_updater,
*mock_mirror_status_watcher, &timer_event);
shut_down_mirror_status_updater(mock_mirror_status_updater,
*mock_mirror_status_watcher);
}
TEST_F(TestMockMirrorStatusUpdater, InitStatusWatcherError) {
MockMirrorStatusUpdater mock_mirror_status_updater(m_local_io_ctx,
m_mock_threads, "");
MockMirrorStatusWatcher* mock_mirror_status_watcher =
new MockMirrorStatusWatcher();
Context* timer_event = nullptr;
expect_timer_add_event(&timer_event);
expect_mirror_status_watcher_init(*mock_mirror_status_watcher, -EINVAL);
expect_timer_cancel_event();
expect_work_queue(true);
C_SaferCond ctx;
mock_mirror_status_updater.init(&ctx);
ASSERT_EQ(-EINVAL, ctx.wait());
}
TEST_F(TestMockMirrorStatusUpdater, ShutDownStatusWatcherError) {
MockMirrorStatusUpdater mock_mirror_status_updater(m_local_io_ctx,
m_mock_threads, "");
MockMirrorStatusWatcher* mock_mirror_status_watcher =
new MockMirrorStatusWatcher();
Context* timer_event = nullptr;
init_mirror_status_updater(mock_mirror_status_updater,
*mock_mirror_status_watcher, &timer_event);
C_SaferCond on_shutdown;
expect_timer_cancel_event();
expect_mirror_status_watcher_shut_down(*mock_mirror_status_watcher, -EINVAL);
expect_work_queue(true);
mock_mirror_status_updater.shut_down(&on_shutdown);
ASSERT_EQ(-EINVAL, on_shutdown.wait());
}
TEST_F(TestMockMirrorStatusUpdater, SmallBatch) {
MockMirrorStatusUpdater mock_mirror_status_updater(m_local_io_ctx,
m_mock_threads, "");
MockMirrorStatusWatcher* mock_mirror_status_watcher =
new MockMirrorStatusWatcher();
InSequence seq;
Context* timer_event = nullptr;
init_mirror_status_updater(mock_mirror_status_updater,
*mock_mirror_status_watcher, &timer_event);
MirrorImageSiteStatuses mirror_image_site_statuses;
for (auto i = 0; i < 100; ++i) {
auto pair = mirror_image_site_statuses.emplace(
stringify(i), cls::rbd::MirrorImageSiteStatus{});
mock_mirror_status_updater.set_mirror_image_status(pair.first->first,
pair.first->second,
false);
}
Context* update_task = nullptr;
fire_timer_event(&timer_event, &update_task);
expect_mirror_status_update(mirror_image_site_statuses, "", 0);
update_task->complete(0);
shut_down_mirror_status_updater(mock_mirror_status_updater,
*mock_mirror_status_watcher);
}
TEST_F(TestMockMirrorStatusUpdater, LargeBatch) {
MockMirrorStatusUpdater mock_mirror_status_updater(m_local_io_ctx,
m_mock_threads, "");
MockMirrorStatusWatcher* mock_mirror_status_watcher =
new MockMirrorStatusWatcher();
InSequence seq;
Context* timer_event = nullptr;
init_mirror_status_updater(mock_mirror_status_updater,
*mock_mirror_status_watcher, &timer_event);
MirrorImageSiteStatuses mirror_image_site_statuses;
for (auto i = 0; i < 200; ++i) {
auto pair = mirror_image_site_statuses.emplace(
stringify(i), cls::rbd::MirrorImageSiteStatus{});
mock_mirror_status_updater.set_mirror_image_status(pair.first->first,
pair.first->second,
false);
}
auto it_1 = mirror_image_site_statuses.begin();
auto it_2 = mirror_image_site_statuses.begin();
std::advance(it_2, 100);
MirrorImageSiteStatuses mirror_image_site_statuses_1{it_1, it_2};
it_1 = it_2;
std::advance(it_2, 100);
MirrorImageSiteStatuses mirror_image_site_statuses_2{it_1, it_2};
Context* update_task = nullptr;
fire_timer_event(&timer_event, &update_task);
expect_mirror_status_update(mirror_image_site_statuses_1, "", 0);
expect_mirror_status_update(mirror_image_site_statuses_2, "", 0);
update_task->complete(0);
shut_down_mirror_status_updater(mock_mirror_status_updater,
*mock_mirror_status_watcher);
}
TEST_F(TestMockMirrorStatusUpdater, OverwriteStatus) {
MockMirrorStatusUpdater mock_mirror_status_updater(m_local_io_ctx,
m_mock_threads, "");
MockMirrorStatusWatcher* mock_mirror_status_watcher =
new MockMirrorStatusWatcher();
InSequence seq;
Context* timer_event = nullptr;
init_mirror_status_updater(mock_mirror_status_updater,
*mock_mirror_status_watcher, &timer_event);
mock_mirror_status_updater.set_mirror_image_status("1", {}, false);
mock_mirror_status_updater.set_mirror_image_status(
"1", {"", cls::rbd::MIRROR_IMAGE_STATUS_STATE_REPLAYING, "description"},
false);
Context* update_task = nullptr;
fire_timer_event(&timer_event, &update_task);
expect_mirror_status_update(
{{"1", cls::rbd::MirrorImageSiteStatus{
"", cls::rbd::MIRROR_IMAGE_STATUS_STATE_REPLAYING, "description"}}},
"", 0);
update_task->complete(0);
shut_down_mirror_status_updater(mock_mirror_status_updater,
*mock_mirror_status_watcher);
}
TEST_F(TestMockMirrorStatusUpdater, RemoveStatus) {
MockMirrorStatusUpdater mock_mirror_status_updater(m_local_io_ctx,
m_mock_threads, "");
MockMirrorStatusWatcher* mock_mirror_status_watcher =
new MockMirrorStatusWatcher();
InSequence seq;
Context* timer_event = nullptr;
init_mirror_status_updater(mock_mirror_status_updater,
*mock_mirror_status_watcher, &timer_event);
C_SaferCond ctx;
mock_mirror_status_updater.set_mirror_image_status("1", {}, false);
expect_work_queue(false);
mock_mirror_status_updater.remove_mirror_image_status("1", false, &ctx);
ASSERT_EQ(0, ctx.wait());
Context* update_task = nullptr;
fire_timer_event(&timer_event, &update_task);
C_SaferCond remove_flush_ctx;
EXPECT_CALL(*m_mock_local_io_ctx, aio_operate(_, _, _, _, _, _))
.WillOnce(Invoke([this, &remove_flush_ctx](auto&&... args) {
int r = m_mock_local_io_ctx->do_aio_operate(decltype(args)(args)...);
m_mock_local_io_ctx->aio_flush();
remove_flush_ctx.complete(r);
return r;
}));
expect_mirror_status_remove("1", 0);
update_task->complete(0);
ASSERT_EQ(0, remove_flush_ctx.wait());
shut_down_mirror_status_updater(mock_mirror_status_updater,
*mock_mirror_status_watcher);
}
TEST_F(TestMockMirrorStatusUpdater, OverwriteRemoveStatus) {
MockMirrorStatusUpdater mock_mirror_status_updater(m_local_io_ctx,
m_mock_threads, "");
MockMirrorStatusWatcher* mock_mirror_status_watcher =
new MockMirrorStatusWatcher();
InSequence seq;
Context* timer_event = nullptr;
init_mirror_status_updater(mock_mirror_status_updater,
*mock_mirror_status_watcher, &timer_event);
C_SaferCond ctx;
mock_mirror_status_updater.set_mirror_image_status("1", {}, false);
expect_work_queue(false);
mock_mirror_status_updater.remove_mirror_image_status("1", false, &ctx);
ASSERT_EQ(0, ctx.wait());
mock_mirror_status_updater.set_mirror_image_status(
"1", {"", cls::rbd::MIRROR_IMAGE_STATUS_STATE_REPLAYING, "description"},
false);
Context* update_task = nullptr;
fire_timer_event(&timer_event, &update_task);
expect_mirror_status_update(
{{"1", cls::rbd::MirrorImageSiteStatus{
"", cls::rbd::MIRROR_IMAGE_STATUS_STATE_REPLAYING, "description"}}},
"", 0);
update_task->complete(0);
shut_down_mirror_status_updater(mock_mirror_status_updater,
*mock_mirror_status_watcher);
}
TEST_F(TestMockMirrorStatusUpdater, OverwriteStatusInFlight) {
MockMirrorStatusUpdater mock_mirror_status_updater(m_local_io_ctx,
m_mock_threads, "");
MockMirrorStatusWatcher* mock_mirror_status_watcher =
new MockMirrorStatusWatcher();
InSequence seq;
Context* timer_event = nullptr;
init_mirror_status_updater(mock_mirror_status_updater,
*mock_mirror_status_watcher, &timer_event);
mock_mirror_status_updater.set_mirror_image_status("1", {}, false);
Context* update_task = nullptr;
fire_timer_event(&timer_event, &update_task);
EXPECT_CALL(*m_mock_local_io_ctx, aio_operate(_, _, _, _, _, _))
.WillOnce(Invoke([this, &mock_mirror_status_updater](auto&&... args) {
mock_mirror_status_updater.set_mirror_image_status(
"1", {"", cls::rbd::MIRROR_IMAGE_STATUS_STATE_REPLAYING,
"description"},
true);
int r = m_mock_local_io_ctx->do_aio_operate(decltype(args)(args)...);
m_mock_local_io_ctx->aio_flush();
return r;
}));
expect_mirror_status_update("1", cls::rbd::MirrorImageSiteStatus{}, 0);
expect_work_queue(false);
expect_mirror_status_update(
{{"1", cls::rbd::MirrorImageSiteStatus{
"", cls::rbd::MIRROR_IMAGE_STATUS_STATE_REPLAYING, "description"}}},
"", 0);
update_task->complete(0);
shut_down_mirror_status_updater(mock_mirror_status_updater,
*mock_mirror_status_watcher);
}
TEST_F(TestMockMirrorStatusUpdater, ImmediateUpdate) {
MockMirrorStatusUpdater mock_mirror_status_updater(m_local_io_ctx,
m_mock_threads, "");
MockMirrorStatusWatcher* mock_mirror_status_watcher =
new MockMirrorStatusWatcher();
InSequence seq;
Context* timer_event = nullptr;
init_mirror_status_updater(mock_mirror_status_updater,
*mock_mirror_status_watcher, &timer_event);
expect_work_queue(false);
expect_mirror_status_update({{"1", cls::rbd::MirrorImageSiteStatus{}}},
"", 0);
mock_mirror_status_updater.set_mirror_image_status("1", {}, true);
shut_down_mirror_status_updater(mock_mirror_status_updater,
*mock_mirror_status_watcher);
}
TEST_F(TestMockMirrorStatusUpdater, RemoveImmediateUpdate) {
MockMirrorStatusUpdater mock_mirror_status_updater(m_local_io_ctx,
m_mock_threads, "");
MockMirrorStatusWatcher* mock_mirror_status_watcher =
new MockMirrorStatusWatcher();
InSequence seq;
Context* timer_event = nullptr;
init_mirror_status_updater(mock_mirror_status_updater,
*mock_mirror_status_watcher, &timer_event);
mock_mirror_status_updater.set_mirror_image_status("1", {}, false);
C_SaferCond ctx;
expect_work_queue(false);
expect_mirror_status_removes({"1"}, 0);
expect_work_queue(false);
mock_mirror_status_updater.remove_mirror_image_status("1", true, &ctx);
ASSERT_EQ(0, ctx.wait());
shut_down_mirror_status_updater(mock_mirror_status_updater,
*mock_mirror_status_watcher);
}
TEST_F(TestMockMirrorStatusUpdater, RemoveRefreshIdleStatus) {
MockMirrorStatusUpdater mock_mirror_status_updater(m_local_io_ctx,
m_mock_threads, "");
MockMirrorStatusWatcher* mock_mirror_status_watcher =
new MockMirrorStatusWatcher();
InSequence seq;
Context* timer_event = nullptr;
init_mirror_status_updater(mock_mirror_status_updater,
*mock_mirror_status_watcher, &timer_event);
mock_mirror_status_updater.set_mirror_image_status("1", {}, false);
C_SaferCond ctx;
expect_work_queue(true);
mock_mirror_status_updater.remove_refresh_mirror_image_status("1", &ctx);
ASSERT_EQ(0, ctx.wait());
shut_down_mirror_status_updater(mock_mirror_status_updater,
*mock_mirror_status_watcher);
}
TEST_F(TestMockMirrorStatusUpdater, RemoveRefreshInFlightStatus) {
MockMirrorStatusUpdater mock_mirror_status_updater(m_local_io_ctx,
m_mock_threads, "");
MockMirrorStatusWatcher* mock_mirror_status_watcher =
new MockMirrorStatusWatcher();
InSequence seq;
Context* timer_event = nullptr;
init_mirror_status_updater(mock_mirror_status_updater,
*mock_mirror_status_watcher, &timer_event);
mock_mirror_status_updater.set_mirror_image_status("1", {}, false);
Context* update_task = nullptr;
fire_timer_event(&timer_event, &update_task);
C_SaferCond on_removed;
EXPECT_CALL(*m_mock_local_io_ctx, aio_operate(_, _, _, _, _, _))
.WillOnce(Invoke(
[this, &mock_mirror_status_updater, &on_removed](auto&&... args) {
mock_mirror_status_updater.remove_refresh_mirror_image_status(
"1", &on_removed);
int r = m_mock_local_io_ctx->do_aio_operate(decltype(args)(args)...);
m_mock_local_io_ctx->aio_flush();
return r;
}));
update_task->complete(0);
ASSERT_EQ(0, on_removed.wait());
shut_down_mirror_status_updater(mock_mirror_status_updater,
*mock_mirror_status_watcher);
}
TEST_F(TestMockMirrorStatusUpdater, ShutDownWhileUpdating) {
MockMirrorStatusUpdater mock_mirror_status_updater(m_local_io_ctx,
m_mock_threads, "");
MockMirrorStatusWatcher* mock_mirror_status_watcher =
new MockMirrorStatusWatcher();
InSequence seq;
Context* timer_event = nullptr;
init_mirror_status_updater(mock_mirror_status_updater,
*mock_mirror_status_watcher, &timer_event);
mock_mirror_status_updater.set_mirror_image_status("1", {}, false);
Context* update_task = nullptr;
fire_timer_event(&timer_event, &update_task);
C_SaferCond on_shutdown;
EXPECT_CALL(*m_mock_local_io_ctx, aio_operate(_, _, _, _, _, _))
.WillOnce(Invoke(
[this, &mock_mirror_status_updater, &on_shutdown](auto&&... args) {
mock_mirror_status_updater.shut_down(&on_shutdown);
m_threads->work_queue->drain();
int r = m_mock_local_io_ctx->do_aio_operate(decltype(args)(args)...);
m_mock_local_io_ctx->aio_flush();
return r;
}));
expect_timer_cancel_event();
expect_mirror_status_watcher_shut_down(*mock_mirror_status_watcher, 0);
update_task->complete(0);
ASSERT_EQ(0, on_shutdown.wait());
}
TEST_F(TestMockMirrorStatusUpdater, MirrorPeerSitePing) {
MockMirrorStatusUpdater mock_mirror_status_updater(m_local_io_ctx,
m_mock_threads,
"mirror uuid");
MockMirrorStatusWatcher* mock_mirror_status_watcher =
new MockMirrorStatusWatcher();
InSequence seq;
Context* timer_event = nullptr;
init_mirror_status_updater(mock_mirror_status_updater,
*mock_mirror_status_watcher, &timer_event);
MirrorImageSiteStatuses mirror_image_site_statuses;
for (auto i = 0; i < 100; ++i) {
auto pair = mirror_image_site_statuses.emplace(
stringify(i), cls::rbd::MirrorImageSiteStatus{});
mock_mirror_status_updater.set_mirror_image_status(pair.first->first,
pair.first->second,
false);
}
Context* update_task = nullptr;
fire_timer_event(&timer_event, &update_task);
expect_mirror_status_update(mirror_image_site_statuses, "mirror uuid", 0);
update_task->complete(0);
shut_down_mirror_status_updater(mock_mirror_status_updater,
*mock_mirror_status_watcher);
}
} // namespace mirror
} // namespace rbd
| 24,494 | 33.646393 | 115 |
cc
|
null |
ceph-main/src/test/rbd_mirror/test_mock_NamespaceReplayer.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/api/Config.h"
#include "test/librbd/mock/MockImageCtx.h"
#include "test/rbd_mirror/test_mock_fixture.h"
#include "test/rbd_mirror/mock/MockContextWQ.h"
#include "test/rbd_mirror/mock/MockSafeTimer.h"
#include "tools/rbd_mirror/NamespaceReplayer.h"
#include "tools/rbd_mirror/ImageDeleter.h"
#include "tools/rbd_mirror/ImageMap.h"
#include "tools/rbd_mirror/InstanceWatcher.h"
#include "tools/rbd_mirror/InstanceReplayer.h"
#include "tools/rbd_mirror/MirrorStatusUpdater.h"
#include "tools/rbd_mirror/PoolWatcher.h"
#include "tools/rbd_mirror/ServiceDaemon.h"
#include "tools/rbd_mirror/Threads.h"
namespace librbd {
namespace {
struct MockTestImageCtx : public MockImageCtx {
MockTestImageCtx(librbd::ImageCtx &image_ctx)
: librbd::MockImageCtx(image_ctx) {
}
};
} // anonymous namespace
} // namespace librbd
namespace rbd {
namespace mirror {
template <>
struct ImageDeleter<librbd::MockTestImageCtx> {
static ImageDeleter* s_instance;
static ImageDeleter* create(
librados::IoCtx &ioctx, Threads<librbd::MockTestImageCtx> *threads,
Throttler<librbd::MockTestImageCtx> *image_deletion_throttler,
ServiceDaemon<librbd::MockTestImageCtx> *service_daemon) {
ceph_assert(s_instance != nullptr);
return s_instance;
}
MOCK_METHOD1(init, void(Context*));
MOCK_METHOD1(shut_down, void(Context*));
MOCK_METHOD2(print_status, void(Formatter*, std::stringstream*));
ImageDeleter() {
s_instance = this;
}
};
ImageDeleter<librbd::MockTestImageCtx>* ImageDeleter<librbd::MockTestImageCtx>::s_instance = nullptr;
template<>
struct ImageMap<librbd::MockTestImageCtx> {
static ImageMap* s_instance;
static ImageMap *create(librados::IoCtx &ioctx,
Threads<librbd::MockTestImageCtx> *threads,
const std::string& instance_id,
image_map::Listener &listener) {
ceph_assert(s_instance != nullptr);
return s_instance;
}
MOCK_METHOD1(init, void(Context*));
MOCK_METHOD1(shut_down, void(Context*));
MOCK_METHOD1(update_instances_added, void(const std::vector<std::string>&));
MOCK_METHOD1(update_instances_removed, void(const std::vector<std::string>&));
MOCK_METHOD3(update_images_mock, void(const std::string&,
const std::set<std::string>&,
const std::set<std::string>&));
void update_images(const std::string& mirror_uuid,
std::set<std::string>&& added,
std::set<std::string>&& removed) {
update_images_mock(mirror_uuid, added, removed);
}
ImageMap() {
s_instance = this;
}
};
ImageMap<librbd::MockTestImageCtx>* ImageMap<librbd::MockTestImageCtx>::s_instance = nullptr;
template<>
struct InstanceReplayer<librbd::MockTestImageCtx> {
static InstanceReplayer* s_instance;
static InstanceReplayer* create(
librados::IoCtx &local_io_ctx, const std::string &local_mirror_uuid,
Threads<librbd::MockTestImageCtx> *threads,
ServiceDaemon<librbd::MockTestImageCtx> *service_daemon,
MirrorStatusUpdater<librbd::MockTestImageCtx>* local_status_updater,
journal::CacheManagerHandler *cache_manager_handler,
PoolMetaCache* pool_meta_cache) {
ceph_assert(s_instance != nullptr);
return s_instance;
}
MOCK_METHOD0(start, void());
MOCK_METHOD0(stop, void());
MOCK_METHOD0(restart, void());
MOCK_METHOD0(flush, void());
MOCK_METHOD1(stop, void(Context *));
MOCK_METHOD2(print_status, void(Formatter*, std::stringstream*));
MOCK_METHOD1(add_peer, void(const Peer<librbd::MockTestImageCtx>&));
MOCK_METHOD1(init, void(Context*));
MOCK_METHOD1(shut_down, void(Context*));
MOCK_METHOD1(release_all, void(Context*));
InstanceReplayer() {
s_instance = this;
}
};
InstanceReplayer<librbd::MockTestImageCtx>* InstanceReplayer<librbd::MockTestImageCtx>::s_instance = nullptr;
template<>
struct InstanceWatcher<librbd::MockTestImageCtx> {
static InstanceWatcher* s_instance;
static InstanceWatcher* create(
librados::IoCtx &ioctx, librbd::AsioEngine& asio_engine,
InstanceReplayer<librbd::MockTestImageCtx>* instance_replayer,
Throttler<librbd::MockTestImageCtx> *image_sync_throttler) {
ceph_assert(s_instance != nullptr);
return s_instance;
}
MOCK_METHOD0(handle_acquire_leader, void());
MOCK_METHOD0(handle_release_leader, void());
MOCK_METHOD0(get_instance_id, std::string());
MOCK_METHOD2(print_sync_status, void(Formatter*, std::stringstream*));
MOCK_METHOD1(init, void(Context *));
MOCK_METHOD1(shut_down, void(Context *));
MOCK_METHOD3(notify_image_acquire, void(const std::string&,
const std::string&,
Context*));
MOCK_METHOD3(notify_image_release, void(const std::string&,
const std::string&,
Context*));
MOCK_METHOD4(notify_peer_image_removed, void(const std::string&,
const std::string&,
const std::string&,
Context*));
MOCK_METHOD1(handle_update_leader, void(const std::string&));
InstanceWatcher() {
s_instance = this;
}
};
InstanceWatcher<librbd::MockTestImageCtx>* InstanceWatcher<librbd::MockTestImageCtx>::s_instance = nullptr;
template <>
struct MirrorStatusUpdater<librbd::MockTestImageCtx> {
std::string local_mirror_uuid;
static std::map<std::string, MirrorStatusUpdater*> s_instance;
static MirrorStatusUpdater *create(librados::IoCtx &io_ctx,
Threads<librbd::MockTestImageCtx> *threads,
const std::string& local_mirror_uuid) {
ceph_assert(s_instance[local_mirror_uuid] != nullptr);
return s_instance[local_mirror_uuid];
}
MirrorStatusUpdater(const std::string_view& local_mirror_uuid)
: local_mirror_uuid(local_mirror_uuid) {
s_instance[std::string{local_mirror_uuid}] = this;
}
~MirrorStatusUpdater() {
s_instance.erase(local_mirror_uuid);
}
MOCK_METHOD1(init, void(Context *));
MOCK_METHOD1(shut_down, void(Context *));
};
std::map<std::string, MirrorStatusUpdater<librbd::MockTestImageCtx> *>
MirrorStatusUpdater<librbd::MockTestImageCtx>::s_instance;
template<>
struct PoolWatcher<librbd::MockTestImageCtx> {
int64_t pool_id = -1;
static std::map<int64_t, PoolWatcher *> s_instances;
static PoolWatcher *create(Threads<librbd::MockTestImageCtx> *threads,
librados::IoCtx &ioctx,
const std::string& mirror_uuid,
pool_watcher::Listener& listener) {
auto pool_id = ioctx.get_id();
ceph_assert(s_instances.count(pool_id));
return s_instances[pool_id];
}
MOCK_METHOD0(is_blocklisted, bool());
MOCK_METHOD0(get_image_count, uint64_t());
MOCK_METHOD1(init, void(Context*));
MOCK_METHOD1(shut_down, void(Context*));
PoolWatcher(int64_t pool_id) : pool_id(pool_id) {
ceph_assert(!s_instances.count(pool_id));
s_instances[pool_id] = this;
}
~PoolWatcher() {
s_instances.erase(pool_id);
}
};
std::map<int64_t, PoolWatcher<librbd::MockTestImageCtx> *> PoolWatcher<librbd::MockTestImageCtx>::s_instances;
template<>
struct ServiceDaemon<librbd::MockTestImageCtx> {
MOCK_METHOD4(add_or_update_namespace_attribute,
void(int64_t, const std::string&, const std::string&,
const service_daemon::AttributeValue&));
MOCK_METHOD2(remove_attribute,
void(int64_t, const std::string&));
MOCK_METHOD4(add_or_update_callout, uint64_t(int64_t, uint64_t,
service_daemon::CalloutLevel,
const std::string&));
MOCK_METHOD2(remove_callout, void(int64_t, uint64_t));
};
template <>
struct Threads<librbd::MockTestImageCtx> {
ceph::mutex &timer_lock;
SafeTimer *timer;
librbd::asio::ContextWQ *work_queue;
librbd::AsioEngine* asio_engine;
Threads(Threads<librbd::ImageCtx> *threads)
: timer_lock(threads->timer_lock), timer(threads->timer),
work_queue(threads->work_queue), asio_engine(threads->asio_engine) {
}
};
} // namespace mirror
} // namespace rbd
// template definitions
#include "tools/rbd_mirror/NamespaceReplayer.cc"
namespace rbd {
namespace mirror {
using ::testing::_;
using ::testing::DoAll;
using ::testing::InSequence;
using ::testing::Invoke;
using ::testing::Return;
using ::testing::StrEq;
using ::testing::WithArg;
class TestMockNamespaceReplayer : public TestMockFixture {
public:
typedef NamespaceReplayer<librbd::MockTestImageCtx> MockNamespaceReplayer;
typedef ImageDeleter<librbd::MockTestImageCtx> MockImageDeleter;
typedef ImageMap<librbd::MockTestImageCtx> MockImageMap;
typedef InstanceReplayer<librbd::MockTestImageCtx> MockInstanceReplayer;
typedef InstanceWatcher<librbd::MockTestImageCtx> MockInstanceWatcher;
typedef MirrorStatusUpdater<librbd::MockTestImageCtx> MockMirrorStatusUpdater;
typedef PoolWatcher<librbd::MockTestImageCtx> MockPoolWatcher;
typedef ServiceDaemon<librbd::MockTestImageCtx> MockServiceDaemon;
typedef Threads<librbd::MockTestImageCtx> MockThreads;
void SetUp() override {
TestMockFixture::SetUp();
m_mock_threads = new MockThreads(m_threads);
}
void TearDown() override {
delete m_mock_threads;
TestMockFixture::TearDown();
}
void expect_mirror_status_updater_init(
MockMirrorStatusUpdater &mock_mirror_status_updater, int r) {
EXPECT_CALL(mock_mirror_status_updater, init(_))
.WillOnce(CompleteContext(m_mock_threads->work_queue, r));
}
void expect_mirror_status_updater_shut_down(
MockMirrorStatusUpdater &mock_mirror_status_updater) {
EXPECT_CALL(mock_mirror_status_updater, shut_down(_))
.WillOnce(CompleteContext(m_mock_threads->work_queue, 0));
}
void expect_instance_replayer_init(
MockInstanceReplayer& mock_instance_replayer, int r) {
EXPECT_CALL(mock_instance_replayer, init(_))
.WillOnce(CompleteContext(m_mock_threads->work_queue, r));
}
void expect_instance_replayer_shut_down(
MockInstanceReplayer& mock_instance_replayer) {
EXPECT_CALL(mock_instance_replayer, shut_down(_))
.WillOnce(CompleteContext(m_mock_threads->work_queue, 0));
}
void expect_instance_replayer_stop(
MockInstanceReplayer& mock_instance_replayer) {
EXPECT_CALL(mock_instance_replayer, stop(_))
.WillOnce(CompleteContext(m_mock_threads->work_queue, 0));
}
void expect_instance_replayer_add_peer(
MockInstanceReplayer& mock_instance_replayer) {
EXPECT_CALL(mock_instance_replayer, add_peer(_));
}
void expect_instance_replayer_release_all(
MockInstanceReplayer& mock_instance_replayer) {
EXPECT_CALL(mock_instance_replayer, release_all(_))
.WillOnce(CompleteContext(m_mock_threads->work_queue, 0));
}
void expect_instance_watcher_get_instance_id(
MockInstanceWatcher& mock_instance_watcher,
const std::string &instance_id) {
EXPECT_CALL(mock_instance_watcher, get_instance_id())
.WillOnce(Return(instance_id));
}
void expect_instance_watcher_init(
MockInstanceWatcher& mock_instance_watcher, int r) {
EXPECT_CALL(mock_instance_watcher, init(_))
.WillOnce(CompleteContext(m_mock_threads->work_queue, r));
}
void expect_instance_watcher_shut_down(
MockInstanceWatcher& mock_instance_watcher) {
EXPECT_CALL(mock_instance_watcher, shut_down(_))
.WillOnce(CompleteContext(m_mock_threads->work_queue, 0));
}
void expect_instance_watcher_handle_acquire_leader(
MockInstanceWatcher& mock_instance_watcher) {
EXPECT_CALL(mock_instance_watcher, handle_acquire_leader());
}
void expect_instance_watcher_handle_release_leader(
MockInstanceWatcher& mock_instance_watcher) {
EXPECT_CALL(mock_instance_watcher, handle_release_leader());
}
void expect_image_map_init(MockInstanceWatcher &mock_instance_watcher,
MockImageMap& mock_image_map, int r) {
expect_instance_watcher_get_instance_id(mock_instance_watcher, "1234");
EXPECT_CALL(mock_image_map, init(_))
.WillOnce(CompleteContext(m_mock_threads->work_queue, r));
}
void expect_image_map_shut_down(MockImageMap& mock_image_map) {
EXPECT_CALL(mock_image_map, shut_down(_))
.WillOnce(CompleteContext(m_mock_threads->work_queue, 0));
}
void expect_pool_watcher_init(MockPoolWatcher& mock_pool_watcher, int r) {
EXPECT_CALL(mock_pool_watcher, init(_))
.WillOnce(CompleteContext(m_mock_threads->work_queue, r));
}
void expect_pool_watcher_shut_down(MockPoolWatcher& mock_pool_watcher) {
EXPECT_CALL(mock_pool_watcher, shut_down(_))
.WillOnce(CompleteContext(m_mock_threads->work_queue, 0));
}
void expect_image_deleter_init(MockImageDeleter& mock_image_deleter, int r) {
EXPECT_CALL(mock_image_deleter, init(_))
.WillOnce(CompleteContext(m_mock_threads->work_queue, r));
}
void expect_image_deleter_shut_down(MockImageDeleter& mock_image_deleter) {
EXPECT_CALL(mock_image_deleter, shut_down(_))
.WillOnce(CompleteContext(m_mock_threads->work_queue, 0));
}
MockThreads *m_mock_threads;
};
TEST_F(TestMockNamespaceReplayer, Init_LocalMirrorStatusUpdaterError) {
InSequence seq;
auto mock_local_mirror_status_updater = new MockMirrorStatusUpdater{""};
expect_mirror_status_updater_init(*mock_local_mirror_status_updater, -EINVAL);
MockNamespaceReplayer namespace_replayer(
{}, m_local_io_ctx, m_remote_io_ctx, "local mirror uuid",
"local peer uuid", {"remote mirror uuid", ""}, m_mock_threads,
nullptr, nullptr, nullptr, nullptr, nullptr);
C_SaferCond on_init;
namespace_replayer.init(&on_init);
ASSERT_EQ(-EINVAL, on_init.wait());
}
TEST_F(TestMockNamespaceReplayer, Init_RemoteMirrorStatusUpdaterError) {
InSequence seq;
auto mock_local_mirror_status_updater = new MockMirrorStatusUpdater{""};
expect_mirror_status_updater_init(*mock_local_mirror_status_updater, 0);
auto mock_remote_mirror_status_updater = new MockMirrorStatusUpdater{
"local mirror uuid"};
expect_mirror_status_updater_init(*mock_remote_mirror_status_updater,
-EINVAL);
expect_mirror_status_updater_shut_down(*mock_local_mirror_status_updater);
MockNamespaceReplayer namespace_replayer(
{}, m_local_io_ctx, m_remote_io_ctx, "local mirror uuid",
"local peer uuid", {"remote mirror uuid", ""}, m_mock_threads,
nullptr, nullptr, nullptr, nullptr, nullptr);
C_SaferCond on_init;
namespace_replayer.init(&on_init);
ASSERT_EQ(-EINVAL, on_init.wait());
}
TEST_F(TestMockNamespaceReplayer, Init_InstanceReplayerError) {
InSequence seq;
auto mock_local_mirror_status_updater = new MockMirrorStatusUpdater{""};
expect_mirror_status_updater_init(*mock_local_mirror_status_updater, 0);
auto mock_remote_mirror_status_updater = new MockMirrorStatusUpdater{
"local mirror uuid"};
expect_mirror_status_updater_init(*mock_remote_mirror_status_updater, 0);
auto mock_instance_replayer = new MockInstanceReplayer();
expect_instance_replayer_init(*mock_instance_replayer, -EINVAL);
expect_mirror_status_updater_shut_down(*mock_remote_mirror_status_updater);
expect_mirror_status_updater_shut_down(*mock_local_mirror_status_updater);
MockNamespaceReplayer namespace_replayer(
{}, m_local_io_ctx, m_remote_io_ctx, "local mirror uuid",
"local peer uuid", {"remote mirror uuid", ""}, m_mock_threads,
nullptr, nullptr, nullptr, nullptr, nullptr);
C_SaferCond on_init;
namespace_replayer.init(&on_init);
ASSERT_EQ(-EINVAL, on_init.wait());
}
TEST_F(TestMockNamespaceReplayer, Init_InstanceWatcherError) {
InSequence seq;
auto mock_local_mirror_status_updater = new MockMirrorStatusUpdater{""};
expect_mirror_status_updater_init(*mock_local_mirror_status_updater, 0);
auto mock_remote_mirror_status_updater = new MockMirrorStatusUpdater{
"local mirror uuid"};
expect_mirror_status_updater_init(*mock_remote_mirror_status_updater, 0);
auto mock_instance_replayer = new MockInstanceReplayer();
expect_instance_replayer_init(*mock_instance_replayer, 0);
expect_instance_replayer_add_peer(*mock_instance_replayer);
auto mock_instance_watcher = new MockInstanceWatcher();
expect_instance_watcher_init(*mock_instance_watcher, -EINVAL);
expect_instance_replayer_shut_down(*mock_instance_replayer);
expect_mirror_status_updater_shut_down(*mock_remote_mirror_status_updater);
expect_mirror_status_updater_shut_down(*mock_local_mirror_status_updater);
MockNamespaceReplayer namespace_replayer(
{}, m_local_io_ctx, m_remote_io_ctx, "local mirror uuid",
"local peer uuid", {"remote mirror uuid", ""}, m_mock_threads,
nullptr, nullptr, nullptr, nullptr, nullptr);
C_SaferCond on_init;
namespace_replayer.init(&on_init);
ASSERT_EQ(-EINVAL, on_init.wait());
}
TEST_F(TestMockNamespaceReplayer, Init) {
InSequence seq;
auto mock_local_mirror_status_updater = new MockMirrorStatusUpdater{""};
expect_mirror_status_updater_init(*mock_local_mirror_status_updater, 0);
auto mock_remote_mirror_status_updater = new MockMirrorStatusUpdater{
"local mirror uuid"};
expect_mirror_status_updater_init(*mock_remote_mirror_status_updater, 0);
auto mock_instance_replayer = new MockInstanceReplayer();
expect_instance_replayer_init(*mock_instance_replayer, 0);
expect_instance_replayer_add_peer(*mock_instance_replayer);
auto mock_instance_watcher = new MockInstanceWatcher();
expect_instance_watcher_init(*mock_instance_watcher, 0);
MockServiceDaemon mock_service_daemon;
MockNamespaceReplayer namespace_replayer(
{}, m_local_io_ctx, m_remote_io_ctx, "local mirror uuid",
"local peer uuid", {"remote mirror uuid", ""}, m_mock_threads,
nullptr, nullptr, &mock_service_daemon, nullptr, nullptr);
C_SaferCond on_init;
namespace_replayer.init(&on_init);
ASSERT_EQ(0, on_init.wait());
expect_instance_replayer_stop(*mock_instance_replayer);
expect_instance_watcher_shut_down(*mock_instance_watcher);
expect_instance_replayer_shut_down(*mock_instance_replayer);
expect_mirror_status_updater_shut_down(*mock_remote_mirror_status_updater);
expect_mirror_status_updater_shut_down(*mock_local_mirror_status_updater);
C_SaferCond on_shut_down;
namespace_replayer.shut_down(&on_shut_down);
ASSERT_EQ(0, on_shut_down.wait());
}
TEST_F(TestMockNamespaceReplayer, AcquireLeader) {
InSequence seq;
// init
auto mock_local_mirror_status_updater = new MockMirrorStatusUpdater{""};
expect_mirror_status_updater_init(*mock_local_mirror_status_updater, 0);
auto mock_remote_mirror_status_updater = new MockMirrorStatusUpdater{
"local mirror uuid"};
expect_mirror_status_updater_init(*mock_remote_mirror_status_updater, 0);
auto mock_instance_replayer = new MockInstanceReplayer();
expect_instance_replayer_init(*mock_instance_replayer, 0);
expect_instance_replayer_add_peer(*mock_instance_replayer);
auto mock_instance_watcher = new MockInstanceWatcher();
expect_instance_watcher_init(*mock_instance_watcher, 0);
MockServiceDaemon mock_service_daemon;
MockNamespaceReplayer namespace_replayer(
{}, m_local_io_ctx, m_remote_io_ctx, "local mirror uuid",
"local peer uuid", {"remote mirror uuid", ""}, m_mock_threads,
nullptr, nullptr, &mock_service_daemon, nullptr, nullptr);
C_SaferCond on_init;
namespace_replayer.init(&on_init);
ASSERT_EQ(0, on_init.wait());
// acquire leader
expect_instance_watcher_handle_acquire_leader(*mock_instance_watcher);
auto mock_image_map = new MockImageMap();
expect_image_map_init(*mock_instance_watcher, *mock_image_map, 0);
auto mock_local_pool_watcher = new MockPoolWatcher(m_local_io_ctx.get_id());
expect_pool_watcher_init(*mock_local_pool_watcher, 0);
auto mock_remote_pool_watcher = new MockPoolWatcher(m_remote_io_ctx.get_id());
expect_pool_watcher_init(*mock_remote_pool_watcher, 0);
auto mock_image_deleter = new MockImageDeleter();
expect_image_deleter_init(*mock_image_deleter, 0);
C_SaferCond on_acquire;
namespace_replayer.handle_acquire_leader(&on_acquire);
ASSERT_EQ(0, on_acquire.wait());
// release leader
expect_instance_watcher_handle_release_leader(*mock_instance_watcher);
expect_image_deleter_shut_down(*mock_image_deleter);
expect_pool_watcher_shut_down(*mock_local_pool_watcher);
expect_pool_watcher_shut_down(*mock_remote_pool_watcher);
expect_image_map_shut_down(*mock_image_map);
expect_instance_replayer_release_all(*mock_instance_replayer);
// shut down
expect_instance_replayer_stop(*mock_instance_replayer);
expect_instance_watcher_shut_down(*mock_instance_watcher);
expect_instance_replayer_shut_down(*mock_instance_replayer);
expect_mirror_status_updater_shut_down(*mock_remote_mirror_status_updater);
expect_mirror_status_updater_shut_down(*mock_local_mirror_status_updater);
C_SaferCond on_shut_down;
namespace_replayer.shut_down(&on_shut_down);
ASSERT_EQ(0, on_shut_down.wait());
}
} // namespace mirror
} // namespace rbd
| 21,510 | 34.148693 | 110 |
cc
|
null |
ceph-main/src/test/rbd_mirror/test_mock_PoolReplayer.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/api/Config.h"
#include "librbd/api/Namespace.h"
#include "test/librbd/mock/MockImageCtx.h"
#include "test/librados_test_stub/MockTestMemCluster.h"
#include "test/librados_test_stub/MockTestMemIoCtxImpl.h"
#include "test/librados_test_stub/MockTestMemRadosClient.h"
#include "test/rbd_mirror/test_mock_fixture.h"
#include "test/rbd_mirror/mock/MockContextWQ.h"
#include "test/rbd_mirror/mock/MockSafeTimer.h"
#include "tools/rbd_mirror/Throttler.h"
#include "tools/rbd_mirror/LeaderWatcher.h"
#include "tools/rbd_mirror/NamespaceReplayer.h"
#include "tools/rbd_mirror/PoolMetaCache.h"
#include "tools/rbd_mirror/PoolReplayer.h"
#include "tools/rbd_mirror/RemotePoolPoller.h"
#include "tools/rbd_mirror/ServiceDaemon.h"
#include "tools/rbd_mirror/Threads.h"
#include "common/Formatter.h"
namespace librbd {
namespace {
struct MockTestImageCtx : public MockImageCtx {
MockTestImageCtx(librbd::ImageCtx &image_ctx)
: librbd::MockImageCtx(image_ctx) {
}
};
} // anonymous namespace
namespace api {
template <>
class Config<MockTestImageCtx> {
public:
static void apply_pool_overrides(librados::IoCtx& io_ctx,
ConfigProxy* config_proxy) {
}
};
template <>
class Namespace<MockTestImageCtx> {
public:
static Namespace* s_instance;
static int list(librados::IoCtx& io_ctx, std::vector<std::string> *names) {
if (s_instance) {
return s_instance->list(names);
}
return 0;
}
Namespace() {
s_instance = this;
}
void add(const std::string &name) {
std::lock_guard locker{m_lock};
m_names.insert(name);
}
void remove(const std::string &name) {
std::lock_guard locker{m_lock};
m_names.erase(name);
}
void clear() {
std::lock_guard locker{m_lock};
m_names.clear();
}
private:
ceph::mutex m_lock = ceph::make_mutex("Namespace");
std::set<std::string> m_names;
int list(std::vector<std::string> *names) {
std::lock_guard locker{m_lock};
names->clear();
names->insert(names->begin(), m_names.begin(), m_names.end());
return 0;
}
};
Namespace<librbd::MockTestImageCtx>* Namespace<librbd::MockTestImageCtx>::s_instance = nullptr;
} // namespace api
} // namespace librbd
namespace rbd {
namespace mirror {
template <>
struct Throttler<librbd::MockTestImageCtx> {
static Throttler* s_instance;
static Throttler *create(
CephContext *cct,
const std::string &max_concurrent_ops_config_param_name) {
return s_instance;
}
Throttler() {
ceph_assert(s_instance == nullptr);
s_instance = this;
}
virtual ~Throttler() {
ceph_assert(s_instance == this);
s_instance = nullptr;
}
MOCK_METHOD1(print_status, void(Formatter*));
};
Throttler<librbd::MockTestImageCtx>* Throttler<librbd::MockTestImageCtx>::s_instance = nullptr;
template <>
struct NamespaceReplayer<librbd::MockTestImageCtx> {
static std::map<std::string, NamespaceReplayer *> s_instances;
static NamespaceReplayer *create(
const std::string &name,
librados::IoCtx &local_ioctx,
librados::IoCtx &remote_ioctx,
const std::string &local_mirror_uuid,
const std::string& local_mirror_peer_uuid,
const RemotePoolMeta& remote_pool_meta,
Threads<librbd::MockTestImageCtx> *threads,
Throttler<librbd::MockTestImageCtx> *image_sync_throttler,
Throttler<librbd::MockTestImageCtx> *image_deletion_throttler,
ServiceDaemon<librbd::MockTestImageCtx> *service_daemon,
journal::CacheManagerHandler *cache_manager_handler,
PoolMetaCache* pool_meta_cache) {
ceph_assert(s_instances.count(name));
auto namespace_replayer = s_instances[name];
s_instances.erase(name);
return namespace_replayer;
}
MOCK_METHOD0(is_blocklisted, bool());
MOCK_METHOD0(get_instance_id, std::string());
MOCK_METHOD1(init, void(Context*));
MOCK_METHOD1(shut_down, void(Context*));
MOCK_METHOD1(handle_acquire_leader, void(Context *));
MOCK_METHOD1(handle_release_leader, void(Context *));
MOCK_METHOD1(handle_update_leader, void(const std::string &));
MOCK_METHOD1(handle_instances_added, void(const std::vector<std::string> &));
MOCK_METHOD1(handle_instances_removed, void(const std::vector<std::string> &));
MOCK_METHOD1(print_status, void(Formatter*));
MOCK_METHOD0(start, void());
MOCK_METHOD0(stop, void());
MOCK_METHOD0(restart, void());
MOCK_METHOD0(flush, void());
NamespaceReplayer(const std::string &name = "") {
ceph_assert(!s_instances.count(name));
s_instances[name] = this;
}
};
std::map<std::string, NamespaceReplayer<librbd::MockTestImageCtx> *> NamespaceReplayer<librbd::MockTestImageCtx>::s_instances;
template<>
struct LeaderWatcher<librbd::MockTestImageCtx> {
static LeaderWatcher* s_instance;
leader_watcher::Listener* listener = nullptr;
static LeaderWatcher *create(Threads<librbd::MockTestImageCtx> *threads,
librados::IoCtx &ioctx,
leader_watcher::Listener* listener) {
ceph_assert(s_instance != nullptr);
s_instance->listener = listener;
return s_instance;
}
MOCK_METHOD0(is_blocklisted, bool());
MOCK_METHOD0(is_leader, bool());
MOCK_METHOD0(release_leader, void());
MOCK_METHOD1(get_leader_instance_id, bool(std::string*));
MOCK_METHOD1(list_instances, void(std::vector<std::string>*));
MOCK_METHOD0(init, int());
MOCK_METHOD0(shut_down, int());
LeaderWatcher() {
s_instance = this;
}
};
LeaderWatcher<librbd::MockTestImageCtx>* LeaderWatcher<librbd::MockTestImageCtx>::s_instance = nullptr;
template<>
struct RemotePoolPoller<librbd::MockTestImageCtx> {
static RemotePoolPoller* s_instance;
remote_pool_poller::Listener* listener = nullptr;
static RemotePoolPoller* create(
Threads<librbd::MockTestImageCtx>* threads,
librados::IoCtx& remote_io_ctx,
const std::string& local_site_name,
const std::string& local_mirror_uuid,
remote_pool_poller::Listener& listener) {
ceph_assert(s_instance != nullptr);
s_instance->listener = &listener;
return s_instance;
}
MOCK_METHOD1(init, void(Context*));
MOCK_METHOD1(shut_down, void(Context*));
RemotePoolPoller() {
s_instance = this;
}
};
RemotePoolPoller<librbd::MockTestImageCtx>* RemotePoolPoller<librbd::MockTestImageCtx>::s_instance = nullptr;
template<>
struct ServiceDaemon<librbd::MockTestImageCtx> {
MOCK_METHOD2(add_namespace, void(int64_t, const std::string &));
MOCK_METHOD2(remove_namespace, void(int64_t, const std::string &));
MOCK_METHOD3(add_or_update_attribute,
void(int64_t, const std::string&,
const service_daemon::AttributeValue&));
MOCK_METHOD2(remove_attribute,
void(int64_t, const std::string&));
MOCK_METHOD4(add_or_update_callout, uint64_t(int64_t, uint64_t,
service_daemon::CalloutLevel,
const std::string&));
MOCK_METHOD2(remove_callout, void(int64_t, uint64_t));
};
template <>
struct Threads<librbd::MockTestImageCtx> {
MockSafeTimer *timer;
ceph::mutex &timer_lock;
ceph::condition_variable timer_cond;
MockContextWQ *work_queue;
Threads(Threads<librbd::ImageCtx> *threads)
: timer(new MockSafeTimer()),
timer_lock(threads->timer_lock),
work_queue(new MockContextWQ()) {
}
~Threads() {
delete timer;
delete work_queue;
}
};
} // namespace mirror
} // namespace rbd
// template definitions
#include "tools/rbd_mirror/PoolReplayer.cc"
namespace rbd {
namespace mirror {
using ::testing::_;
using ::testing::AtLeast;
using ::testing::DoAll;
using ::testing::InSequence;
using ::testing::Invoke;
using ::testing::Return;
using ::testing::StrEq;
using ::testing::WithArg;
class TestMockPoolReplayer : public TestMockFixture {
public:
typedef librbd::api::Namespace<librbd::MockTestImageCtx> MockNamespace;
typedef PoolReplayer<librbd::MockTestImageCtx> MockPoolReplayer;
typedef Throttler<librbd::MockTestImageCtx> MockThrottler;
typedef NamespaceReplayer<librbd::MockTestImageCtx> MockNamespaceReplayer;
typedef RemotePoolPoller<librbd::MockTestImageCtx> MockRemotePoolPoller;
typedef LeaderWatcher<librbd::MockTestImageCtx> MockLeaderWatcher;
typedef ServiceDaemon<librbd::MockTestImageCtx> MockServiceDaemon;
typedef Threads<librbd::MockTestImageCtx> MockThreads;
void expect_work_queue(MockThreads &mock_threads) {
EXPECT_CALL(*mock_threads.work_queue, queue(_, _))
.WillRepeatedly(Invoke([this](Context *ctx, int r) {
m_threads->work_queue->queue(ctx, r);
}));
}
void expect_connect(librados::MockTestMemCluster& mock_cluster,
librados::MockTestMemRadosClient* mock_rados_client,
const std::string& cluster_name, CephContext** cct_ref) {
EXPECT_CALL(mock_cluster, create_rados_client(_))
.WillOnce(Invoke([cluster_name, mock_rados_client, cct_ref](CephContext* cct) {
EXPECT_EQ(cluster_name, cct->_conf->cluster);
if (cct_ref != nullptr) {
cct->get();
*cct_ref = cct;
}
return mock_rados_client;
}));
}
void expect_create_ioctx(librados::MockTestMemRadosClient* mock_rados_client,
librados::MockTestMemIoCtxImpl* mock_io_ctx_impl) {
EXPECT_CALL(*mock_rados_client, create_ioctx(_, _))
.WillOnce(Invoke([mock_io_ctx_impl](int64_t id, const std::string& name) {
return mock_io_ctx_impl;
}));
}
void expect_mirror_uuid_get(librados::MockTestMemIoCtxImpl *io_ctx_impl,
const std::string &uuid, int r) {
bufferlist out_bl;
encode(uuid, out_bl);
EXPECT_CALL(*io_ctx_impl,
exec(RBD_MIRRORING, _, StrEq("rbd"), StrEq("mirror_uuid_get"),
_, _, _, _))
.WillOnce(DoAll(WithArg<5>(Invoke([out_bl](bufferlist *bl) {
*bl = out_bl;
})),
Return(r)));
}
void expect_mirror_mode_get(librados::MockTestMemIoCtxImpl *io_ctx_impl,
cls::rbd::MirrorMode mirror_mode, int r) {
bufferlist out_bl;
encode(mirror_mode, out_bl);
EXPECT_CALL(*io_ctx_impl,
exec(RBD_MIRRORING, _, StrEq("rbd"), StrEq("mirror_mode_get"),
_, _, _, _))
.WillOnce(DoAll(WithArg<5>(Invoke([out_bl](bufferlist *bl) {
*bl = out_bl;
})),
Return(r)));
}
void expect_mirror_mode_get(librados::MockTestMemIoCtxImpl *io_ctx_impl) {
EXPECT_CALL(*io_ctx_impl,
exec(RBD_MIRRORING, _, StrEq("rbd"), StrEq("mirror_mode_get"),
_, _, _, _))
.WillRepeatedly(DoAll(WithArg<5>(Invoke([](bufferlist *bl) {
encode(cls::rbd::MIRROR_MODE_POOL, *bl);
})),
Return(0)));
}
void expect_leader_watcher_init(MockLeaderWatcher& mock_leader_watcher,
int r) {
EXPECT_CALL(mock_leader_watcher, init())
.WillOnce(Return(r));
}
void expect_leader_watcher_shut_down(MockLeaderWatcher& mock_leader_watcher) {
EXPECT_CALL(mock_leader_watcher, shut_down());
}
void expect_leader_watcher_get_leader_instance_id(
MockLeaderWatcher& mock_leader_watcher) {
EXPECT_CALL(mock_leader_watcher, get_leader_instance_id(_))
.WillRepeatedly(Return(true));
}
void expect_leader_watcher_list_instances(
MockLeaderWatcher& mock_leader_watcher) {
EXPECT_CALL(mock_leader_watcher, list_instances(_))
.Times(AtLeast(0));
}
void expect_remote_pool_poller_init(
MockRemotePoolPoller& mock_remote_pool_poller,
const RemotePoolMeta& remote_pool_meta, int r) {
EXPECT_CALL(mock_remote_pool_poller, init(_))
.WillOnce(Invoke(
[this, &mock_remote_pool_poller, remote_pool_meta, r]
(Context* ctx) {
if (r >= 0) {
mock_remote_pool_poller.listener->handle_updated(
remote_pool_meta);
}
m_threads->work_queue->queue(ctx, r);
}));
}
void expect_remote_pool_poller_shut_down(
MockRemotePoolPoller& mock_remote_pool_poller, int r) {
EXPECT_CALL(mock_remote_pool_poller, shut_down(_))
.WillOnce(Invoke(
[this, r](Context* ctx) {
m_threads->work_queue->queue(ctx, r);
}));
}
void expect_leader_watcher_is_blocklisted(
MockLeaderWatcher &mock_leader_watcher, bool blocklisted) {
EXPECT_CALL(mock_leader_watcher, is_blocklisted())
.WillRepeatedly(Return(blocklisted));
}
void expect_namespace_replayer_is_blocklisted(
MockNamespaceReplayer &mock_namespace_replayer,
bool blocklisted) {
EXPECT_CALL(mock_namespace_replayer, is_blocklisted())
.WillRepeatedly(Return(blocklisted));
}
void expect_namespace_replayer_get_instance_id(
MockNamespaceReplayer &mock_namespace_replayer,
const std::string &instance_id) {
EXPECT_CALL(mock_namespace_replayer, get_instance_id())
.WillOnce(Return(instance_id));
}
void expect_namespace_replayer_init(
MockNamespaceReplayer &mock_namespace_replayer, int r,
Context *on_init = nullptr) {
EXPECT_CALL(mock_namespace_replayer, init(_))
.WillOnce(Invoke([this, r, on_init](Context* ctx) {
m_threads->work_queue->queue(ctx, r);
if (on_init != nullptr) {
m_threads->work_queue->queue(on_init, r);
}
}));
}
void expect_namespace_replayer_shut_down(
MockNamespaceReplayer &mock_namespace_replayer,
Context *on_shut_down = nullptr) {
EXPECT_CALL(mock_namespace_replayer, shut_down(_))
.WillOnce(Invoke([this, on_shut_down](Context* ctx) {
m_threads->work_queue->queue(ctx);
if (on_shut_down != nullptr) {
m_threads->work_queue->queue(on_shut_down);
}
}));
}
void expect_namespace_replayer_handle_acquire_leader(
MockNamespaceReplayer &mock_namespace_replayer, int r,
Context *on_acquire = nullptr) {
EXPECT_CALL(mock_namespace_replayer, handle_acquire_leader(_))
.WillOnce(Invoke([this, r, on_acquire](Context* ctx) {
m_threads->work_queue->queue(ctx, r);
if (on_acquire != nullptr) {
m_threads->work_queue->queue(on_acquire, r);
}
}));
}
void expect_namespace_replayer_handle_release_leader(
MockNamespaceReplayer &mock_namespace_replayer, int r,
Context *on_release = nullptr) {
EXPECT_CALL(mock_namespace_replayer, handle_release_leader(_))
.WillOnce(Invoke([this, r, on_release](Context* ctx) {
m_threads->work_queue->queue(ctx, r);
if (on_release != nullptr) {
m_threads->work_queue->queue(on_release, r);
}
}));
}
void expect_namespace_replayer_handle_update_leader(
MockNamespaceReplayer &mock_namespace_replayer,
const std::string &leader_instance_id,
Context *on_update = nullptr) {
EXPECT_CALL(mock_namespace_replayer,
handle_update_leader(leader_instance_id))
.WillOnce(Invoke([on_update](const std::string &) {
if (on_update != nullptr) {
on_update->complete(0);
}
}));
}
void expect_namespace_replayer_handle_instances_added(
MockNamespaceReplayer &mock_namespace_replayer) {
EXPECT_CALL(mock_namespace_replayer, handle_instances_added(_));
}
void expect_namespace_replayer_handle_instances_removed(
MockNamespaceReplayer &mock_namespace_replayer) {
EXPECT_CALL(mock_namespace_replayer, handle_instances_removed(_));
}
void expect_service_daemon_add_namespace(
MockServiceDaemon &mock_service_daemon,
const std::string& namespace_name) {
EXPECT_CALL(mock_service_daemon,
add_namespace(m_local_io_ctx.get_id(), namespace_name));
}
void expect_service_daemon_remove_namespace(
MockServiceDaemon &mock_service_daemon,
const std::string& namespace_name) {
EXPECT_CALL(mock_service_daemon,
remove_namespace(m_local_io_ctx.get_id(), namespace_name));
}
void expect_service_daemon_add_or_update_attribute(
MockServiceDaemon &mock_service_daemon, const std::string& key,
const service_daemon::AttributeValue& value) {
EXPECT_CALL(mock_service_daemon, add_or_update_attribute(_, key, value));
}
void expect_service_daemon_remove_attribute(
MockServiceDaemon &mock_service_daemon, const std::string& key) {
EXPECT_CALL(mock_service_daemon, remove_attribute(_, key));
}
void expect_service_daemon_add_or_update_instance_id_attribute(
MockServiceDaemon &mock_service_daemon, const std::string &instance_id) {
expect_service_daemon_add_or_update_attribute(
mock_service_daemon, "instance_id", {instance_id});
}
PoolMetaCache m_pool_meta_cache{g_ceph_context};
};
TEST_F(TestMockPoolReplayer, ConfigKeyOverride) {
PeerSpec peer_spec{"uuid", "cluster name", "client.name"};
peer_spec.mon_host = "123";
peer_spec.key = "234";
auto mock_default_namespace_replayer = new MockNamespaceReplayer();
expect_namespace_replayer_is_blocklisted(*mock_default_namespace_replayer,
false);
MockThreads mock_threads(m_threads);
expect_work_queue(mock_threads);
auto mock_leader_watcher = new MockLeaderWatcher();
expect_leader_watcher_get_leader_instance_id(*mock_leader_watcher);
expect_leader_watcher_is_blocklisted(*mock_leader_watcher, false);
InSequence seq;
auto& mock_cluster = get_mock_cluster();
auto mock_local_rados_client = mock_cluster.do_create_rados_client(
g_ceph_context);
expect_connect(mock_cluster, mock_local_rados_client, "ceph", nullptr);
auto mock_remote_rados_client = mock_cluster.do_create_rados_client(
g_ceph_context);
CephContext* remote_cct = nullptr;
expect_connect(mock_cluster, mock_remote_rados_client, "cluster name",
&remote_cct);
auto mock_local_io_ctx = mock_local_rados_client->do_create_ioctx(
m_local_io_ctx.get_id(), m_local_io_ctx.get_pool_name());
expect_create_ioctx(mock_local_rados_client, mock_local_io_ctx);
expect_mirror_uuid_get(mock_local_io_ctx, "uuid", 0);
auto mock_remote_pool_poller = new MockRemotePoolPoller();
expect_remote_pool_poller_init(*mock_remote_pool_poller,
{"remote mirror uuid", ""}, 0);
expect_namespace_replayer_init(*mock_default_namespace_replayer, 0);
expect_leader_watcher_init(*mock_leader_watcher, 0);
MockServiceDaemon mock_service_daemon;
std::string instance_id = stringify(mock_local_io_ctx->get_instance_id());
expect_service_daemon_add_or_update_instance_id_attribute(
mock_service_daemon, instance_id);
MockPoolReplayer pool_replayer(&mock_threads, &mock_service_daemon, nullptr,
&m_pool_meta_cache,
m_local_io_ctx.get_id(), peer_spec, {});
pool_replayer.init("siteA");
ASSERT_TRUE(remote_cct != nullptr);
ASSERT_EQ("123", remote_cct->_conf.get_val<std::string>("mon_host"));
ASSERT_EQ("234", remote_cct->_conf.get_val<std::string>("key"));
remote_cct->put();
expect_leader_watcher_shut_down(*mock_leader_watcher);
expect_namespace_replayer_shut_down(*mock_default_namespace_replayer);
expect_remote_pool_poller_shut_down(*mock_remote_pool_poller, 0);
pool_replayer.shut_down();
}
TEST_F(TestMockPoolReplayer, AcquireReleaseLeader) {
PeerSpec peer_spec{"uuid", "cluster name", "client.name"};
peer_spec.mon_host = "123";
peer_spec.key = "234";
auto mock_default_namespace_replayer = new MockNamespaceReplayer();
expect_namespace_replayer_is_blocklisted(*mock_default_namespace_replayer,
false);
MockThreads mock_threads(m_threads);
expect_work_queue(mock_threads);
auto mock_leader_watcher = new MockLeaderWatcher();
expect_leader_watcher_get_leader_instance_id(*mock_leader_watcher);
expect_leader_watcher_list_instances(*mock_leader_watcher);
expect_leader_watcher_is_blocklisted(*mock_leader_watcher, false);
InSequence seq;
auto& mock_cluster = get_mock_cluster();
auto mock_local_rados_client = mock_cluster.do_create_rados_client(
g_ceph_context);
expect_connect(mock_cluster, mock_local_rados_client, "ceph", nullptr);
auto mock_remote_rados_client = mock_cluster.do_create_rados_client(
g_ceph_context);
expect_connect(mock_cluster, mock_remote_rados_client, "cluster name",
nullptr);
auto mock_local_io_ctx = mock_local_rados_client->do_create_ioctx(
m_local_io_ctx.get_id(), m_local_io_ctx.get_pool_name());
expect_create_ioctx(mock_local_rados_client, mock_local_io_ctx);
expect_mirror_uuid_get(mock_local_io_ctx, "uuid", 0);
auto mock_remote_pool_poller = new MockRemotePoolPoller();
expect_remote_pool_poller_init(*mock_remote_pool_poller,
{"remote mirror uuid", ""}, 0);
expect_namespace_replayer_init(*mock_default_namespace_replayer, 0);
expect_leader_watcher_init(*mock_leader_watcher, 0);
MockServiceDaemon mock_service_daemon;
std::string instance_id = stringify(mock_local_io_ctx->get_instance_id());
expect_service_daemon_add_or_update_instance_id_attribute(
mock_service_daemon, instance_id);
MockPoolReplayer pool_replayer(&mock_threads, &mock_service_daemon, nullptr,
&m_pool_meta_cache,
m_local_io_ctx.get_id(), peer_spec, {});
pool_replayer.init("siteA");
expect_service_daemon_add_or_update_attribute(
mock_service_daemon, SERVICE_DAEMON_LEADER_KEY, true);
expect_namespace_replayer_handle_acquire_leader(
*mock_default_namespace_replayer, 0);
C_SaferCond on_acquire;
mock_leader_watcher->listener->post_acquire_handler(&on_acquire);
ASSERT_EQ(0, on_acquire.wait());
expect_service_daemon_remove_attribute(mock_service_daemon,
SERVICE_DAEMON_LEADER_KEY);
expect_namespace_replayer_handle_release_leader(
*mock_default_namespace_replayer, 0);
C_SaferCond on_release;
mock_leader_watcher->listener->pre_release_handler(&on_release);
ASSERT_EQ(0, on_release.wait());
expect_leader_watcher_shut_down(*mock_leader_watcher);
expect_namespace_replayer_shut_down(*mock_default_namespace_replayer);
expect_remote_pool_poller_shut_down(*mock_remote_pool_poller, 0);
pool_replayer.shut_down();
}
TEST_F(TestMockPoolReplayer, Namespaces) {
PeerSpec peer_spec{"uuid", "cluster name", "client.name"};
peer_spec.mon_host = "123";
peer_spec.key = "234";
g_ceph_context->_conf.set_val(
"rbd_mirror_pool_replayers_refresh_interval", "1");
MockNamespace mock_namespace;
auto mock_default_namespace_replayer = new MockNamespaceReplayer();
expect_namespace_replayer_is_blocklisted(*mock_default_namespace_replayer,
false);
auto mock_ns1_namespace_replayer = new MockNamespaceReplayer("ns1");
expect_namespace_replayer_is_blocklisted(*mock_ns1_namespace_replayer,
false);
auto mock_ns2_namespace_replayer = new MockNamespaceReplayer("ns2");
expect_namespace_replayer_is_blocklisted(*mock_ns2_namespace_replayer,
false);
MockThreads mock_threads(m_threads);
expect_work_queue(mock_threads);
auto mock_leader_watcher = new MockLeaderWatcher();
expect_leader_watcher_get_leader_instance_id(*mock_leader_watcher);
expect_leader_watcher_list_instances(*mock_leader_watcher);
expect_leader_watcher_is_blocklisted(*mock_leader_watcher, false);
auto& mock_cluster = get_mock_cluster();
auto mock_local_rados_client = mock_cluster.do_create_rados_client(
g_ceph_context);
auto mock_local_io_ctx = mock_local_rados_client->do_create_ioctx(
m_local_io_ctx.get_id(), m_local_io_ctx.get_pool_name());
auto mock_remote_rados_client = mock_cluster.do_create_rados_client(
g_ceph_context);
expect_mirror_mode_get(mock_local_io_ctx);
InSequence seq;
expect_connect(mock_cluster, mock_local_rados_client, "ceph", nullptr);
expect_connect(mock_cluster, mock_remote_rados_client, "cluster name",
nullptr);
expect_create_ioctx(mock_local_rados_client, mock_local_io_ctx);
expect_mirror_uuid_get(mock_local_io_ctx, "uuid", 0);
auto mock_remote_pool_poller = new MockRemotePoolPoller();
expect_remote_pool_poller_init(*mock_remote_pool_poller,
{"remote mirror uuid", ""}, 0);
expect_namespace_replayer_init(*mock_default_namespace_replayer, 0);
expect_leader_watcher_init(*mock_leader_watcher, 0);
MockServiceDaemon mock_service_daemon;
std::string instance_id = stringify(mock_local_io_ctx->get_instance_id());
expect_service_daemon_add_or_update_instance_id_attribute(
mock_service_daemon, instance_id);
MockPoolReplayer pool_replayer(&mock_threads, &mock_service_daemon, nullptr,
&m_pool_meta_cache,
m_local_io_ctx.get_id(), peer_spec, {});
pool_replayer.init("siteA");
C_SaferCond on_ns1_init;
expect_namespace_replayer_init(*mock_ns1_namespace_replayer, 0);
expect_service_daemon_add_namespace(mock_service_daemon, "ns1");
expect_namespace_replayer_handle_update_leader(*mock_ns1_namespace_replayer,
"", &on_ns1_init);
mock_namespace.add("ns1");
ASSERT_EQ(0, on_ns1_init.wait());
expect_service_daemon_add_or_update_attribute(
mock_service_daemon, SERVICE_DAEMON_LEADER_KEY, true);
expect_namespace_replayer_handle_acquire_leader(
*mock_default_namespace_replayer, 0);
expect_namespace_replayer_handle_acquire_leader(
*mock_ns1_namespace_replayer, 0);
C_SaferCond on_acquire;
mock_leader_watcher->listener->post_acquire_handler(&on_acquire);
ASSERT_EQ(0, on_acquire.wait());
expect_namespace_replayer_init(*mock_ns2_namespace_replayer, 0);
expect_service_daemon_add_namespace(mock_service_daemon, "ns2");
C_SaferCond on_ns2_acquire;
expect_namespace_replayer_handle_acquire_leader(
*mock_ns2_namespace_replayer, 0, &on_ns2_acquire);
expect_namespace_replayer_handle_instances_added(
*mock_ns2_namespace_replayer);
mock_namespace.add("ns2");
ASSERT_EQ(0, on_ns2_acquire.wait());
C_SaferCond on_ns2_shut_down;
expect_service_daemon_remove_namespace(mock_service_daemon, "ns2");
expect_namespace_replayer_shut_down(*mock_ns2_namespace_replayer,
&on_ns2_shut_down);
mock_namespace.remove("ns2");
ASSERT_EQ(0, on_ns2_shut_down.wait());
expect_service_daemon_remove_attribute(mock_service_daemon,
SERVICE_DAEMON_LEADER_KEY);
expect_namespace_replayer_handle_release_leader(
*mock_default_namespace_replayer, 0);
expect_namespace_replayer_handle_release_leader(
*mock_ns1_namespace_replayer, 0);
C_SaferCond on_release;
mock_leader_watcher->listener->pre_release_handler(&on_release);
ASSERT_EQ(0, on_release.wait());
expect_service_daemon_remove_namespace(mock_service_daemon, "ns1");
expect_namespace_replayer_shut_down(*mock_ns1_namespace_replayer);
expect_leader_watcher_shut_down(*mock_leader_watcher);
expect_namespace_replayer_shut_down(*mock_default_namespace_replayer);
expect_remote_pool_poller_shut_down(*mock_remote_pool_poller, 0);
pool_replayer.shut_down();
}
TEST_F(TestMockPoolReplayer, NamespacesError) {
PeerSpec peer_spec{"uuid", "cluster name", "client.name"};
peer_spec.mon_host = "123";
peer_spec.key = "234";
g_ceph_context->_conf.set_val(
"rbd_mirror_pool_replayers_refresh_interval", "1");
MockNamespace mock_namespace;
auto mock_default_namespace_replayer = new MockNamespaceReplayer();
expect_namespace_replayer_is_blocklisted(*mock_default_namespace_replayer,
false);
auto mock_ns1_namespace_replayer = new MockNamespaceReplayer("ns1");
auto mock_ns2_namespace_replayer = new MockNamespaceReplayer("ns2");
expect_namespace_replayer_is_blocklisted(*mock_ns2_namespace_replayer,
false);
auto mock_ns3_namespace_replayer = new MockNamespaceReplayer("ns3");
MockThreads mock_threads(m_threads);
expect_work_queue(mock_threads);
auto mock_leader_watcher = new MockLeaderWatcher();
expect_leader_watcher_get_leader_instance_id(*mock_leader_watcher);
expect_leader_watcher_list_instances(*mock_leader_watcher);
expect_leader_watcher_is_blocklisted(*mock_leader_watcher, false);
auto& mock_cluster = get_mock_cluster();
auto mock_local_rados_client = mock_cluster.do_create_rados_client(
g_ceph_context);
auto mock_local_io_ctx = mock_local_rados_client->do_create_ioctx(
m_local_io_ctx.get_id(), m_local_io_ctx.get_pool_name());
auto mock_remote_rados_client = mock_cluster.do_create_rados_client(
g_ceph_context);
expect_mirror_mode_get(mock_local_io_ctx);
InSequence seq;
expect_connect(mock_cluster, mock_local_rados_client, "ceph", nullptr);
expect_connect(mock_cluster, mock_remote_rados_client, "cluster name",
nullptr);
expect_create_ioctx(mock_local_rados_client, mock_local_io_ctx);
expect_mirror_uuid_get(mock_local_io_ctx, "uuid", 0);
auto mock_remote_pool_poller = new MockRemotePoolPoller();
expect_remote_pool_poller_init(*mock_remote_pool_poller,
{"remote mirror uuid", ""}, 0);
expect_namespace_replayer_init(*mock_default_namespace_replayer, 0);
expect_leader_watcher_init(*mock_leader_watcher, 0);
MockServiceDaemon mock_service_daemon;
std::string instance_id = stringify(mock_local_io_ctx->get_instance_id());
expect_service_daemon_add_or_update_instance_id_attribute(
mock_service_daemon, instance_id);
MockPoolReplayer pool_replayer(&mock_threads, &mock_service_daemon, nullptr,
&m_pool_meta_cache,
m_local_io_ctx.get_id(), peer_spec, {});
pool_replayer.init("siteA");
// test namespace replayer init fails for non leader
C_SaferCond on_ns1_init;
Context* ctx = new LambdaContext(
[&mock_namespace, &on_ns1_init](int r) {
mock_namespace.remove("ns1");
on_ns1_init.complete(r);
});
expect_namespace_replayer_init(*mock_ns1_namespace_replayer, -EINVAL, ctx);
mock_namespace.add("ns1");
ASSERT_EQ(-EINVAL, on_ns1_init.wait());
// test acquire leader fails when default namespace replayer fails
expect_service_daemon_add_or_update_attribute(
mock_service_daemon, SERVICE_DAEMON_LEADER_KEY, true);
expect_namespace_replayer_handle_acquire_leader(
*mock_default_namespace_replayer, -EINVAL);
C_SaferCond on_acquire1;
mock_leader_watcher->listener->post_acquire_handler(&on_acquire1);
ASSERT_EQ(-EINVAL, on_acquire1.wait());
// test acquire leader succeeds when non-default namespace replayer fails
C_SaferCond on_ns2_init;
expect_namespace_replayer_init(*mock_ns2_namespace_replayer, 0);
expect_service_daemon_add_namespace(mock_service_daemon, "ns2");
expect_namespace_replayer_handle_update_leader(*mock_ns2_namespace_replayer,
"", &on_ns2_init);
mock_namespace.add("ns2");
ASSERT_EQ(0, on_ns2_init.wait());
expect_service_daemon_add_or_update_attribute(
mock_service_daemon, SERVICE_DAEMON_LEADER_KEY, true);
expect_namespace_replayer_handle_acquire_leader(
*mock_default_namespace_replayer, 0);
expect_namespace_replayer_handle_acquire_leader(*mock_ns2_namespace_replayer,
-EINVAL);
ctx = new LambdaContext(
[&mock_namespace](int) {
mock_namespace.remove("ns2");
});
expect_service_daemon_remove_namespace(mock_service_daemon, "ns2");
expect_namespace_replayer_shut_down(*mock_ns2_namespace_replayer, ctx);
mock_namespace.add("ns2");
C_SaferCond on_acquire2;
mock_leader_watcher->listener->post_acquire_handler(&on_acquire2);
ASSERT_EQ(0, on_acquire2.wait());
// test namespace replayer init fails on acquire leader
C_SaferCond on_ns3_shut_down;
ctx = new LambdaContext(
[&mock_namespace, &on_ns3_shut_down](int) {
mock_namespace.remove("ns3");
on_ns3_shut_down.complete(0);
});
expect_namespace_replayer_init(*mock_ns3_namespace_replayer, 0);
expect_service_daemon_add_namespace(mock_service_daemon, "ns3");
expect_namespace_replayer_handle_acquire_leader(*mock_ns3_namespace_replayer,
-EINVAL);
expect_service_daemon_remove_namespace(mock_service_daemon, "ns3");
expect_namespace_replayer_shut_down(*mock_ns3_namespace_replayer, ctx);
mock_namespace.add("ns3");
ASSERT_EQ(0, on_ns3_shut_down.wait());
expect_leader_watcher_shut_down(*mock_leader_watcher);
expect_namespace_replayer_shut_down(*mock_default_namespace_replayer);
expect_remote_pool_poller_shut_down(*mock_remote_pool_poller, 0);
pool_replayer.shut_down();
}
} // namespace mirror
} // namespace rbd
| 34,094 | 35.465241 | 126 |
cc
|
null |
ceph-main/src/test/rbd_mirror/test_mock_PoolWatcher.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "test/rbd_mirror/test_mock_fixture.h"
#include "test/librados_test_stub/MockTestMemIoCtxImpl.h"
#include "test/librados_test_stub/MockTestMemRadosClient.h"
#include "test/librbd/mock/MockImageCtx.h"
#include "test/rbd_mirror/mock/MockContextWQ.h"
#include "test/rbd_mirror/mock/MockSafeTimer.h"
#include "librbd/MirroringWatcher.h"
#include "tools/rbd_mirror/Threads.h"
#include "tools/rbd_mirror/PoolWatcher.h"
#include "tools/rbd_mirror/pool_watcher/RefreshImagesRequest.h"
#include "include/stringify.h"
using namespace std::chrono_literals;
namespace librbd {
namespace {
struct MockTestImageCtx : public librbd::MockImageCtx {
MockTestImageCtx(librbd::ImageCtx &image_ctx)
: librbd::MockImageCtx(image_ctx) {
}
};
} // anonymous namespace
struct MockMirroringWatcher {
static MockMirroringWatcher *s_instance;
static MockMirroringWatcher &get_instance() {
ceph_assert(s_instance != nullptr);
return *s_instance;
}
MockMirroringWatcher() {
s_instance = this;
}
MOCK_CONST_METHOD0(is_unregistered, bool());
MOCK_METHOD1(register_watch, void(Context*));
MOCK_METHOD1(unregister_watch, void(Context*));
MOCK_CONST_METHOD0(get_oid, std::string());
};
template <>
struct MirroringWatcher<MockTestImageCtx> {
static MirroringWatcher *s_instance;
MirroringWatcher(librados::IoCtx &io_ctx, ::MockContextWQ *work_queue) {
s_instance = this;
}
virtual ~MirroringWatcher() {
}
static MirroringWatcher<MockTestImageCtx> &get_instance() {
ceph_assert(s_instance != nullptr);
return *s_instance;
}
virtual void handle_rewatch_complete(int r) = 0;
virtual void handle_mode_updated(cls::rbd::MirrorMode mirror_mode) = 0;
virtual void handle_image_updated(cls::rbd::MirrorImageState state,
const std::string &remote_image_id,
const std::string &global_image_id) = 0;
bool is_unregistered() const {
return MockMirroringWatcher::get_instance().is_unregistered();
}
void register_watch(Context *ctx) {
MockMirroringWatcher::get_instance().register_watch(ctx);
}
void unregister_watch(Context *ctx) {
MockMirroringWatcher::get_instance().unregister_watch(ctx);
}
std::string get_oid() const {
return MockMirroringWatcher::get_instance().get_oid();
}
};
MockMirroringWatcher *MockMirroringWatcher::s_instance = nullptr;
MirroringWatcher<MockTestImageCtx> *MirroringWatcher<MockTestImageCtx>::s_instance = nullptr;
} // namespace librbd
namespace rbd {
namespace mirror {
template <>
struct Threads<librbd::MockTestImageCtx> {
MockSafeTimer *timer;
ceph::mutex &timer_lock;
MockContextWQ *work_queue;
Threads(Threads<librbd::ImageCtx> *threads)
: timer(new MockSafeTimer()),
timer_lock(threads->timer_lock),
work_queue(new MockContextWQ()) {
}
~Threads() {
delete timer;
delete work_queue;
}
};
namespace pool_watcher {
template <>
struct RefreshImagesRequest<librbd::MockTestImageCtx> {
ImageIds *image_ids = nullptr;
Context *on_finish = nullptr;
static RefreshImagesRequest *s_instance;
static RefreshImagesRequest *create(librados::IoCtx &io_ctx,
ImageIds *image_ids,
Context *on_finish) {
ceph_assert(s_instance != nullptr);
s_instance->image_ids = image_ids;
s_instance->on_finish = on_finish;
return s_instance;
}
MOCK_METHOD0(send, void());
RefreshImagesRequest() {
s_instance = this;
}
};
RefreshImagesRequest<librbd::MockTestImageCtx> *RefreshImagesRequest<librbd::MockTestImageCtx>::s_instance = nullptr;
} // namespace pool_watcher
} // namespace mirror
} // namespace rbd
// template definitions
#include "tools/rbd_mirror/PoolWatcher.cc"
namespace rbd {
namespace mirror {
using ::testing::_;
using ::testing::DoAll;
using ::testing::InSequence;
using ::testing::Invoke;
using ::testing::Return;
using ::testing::ReturnArg;
using ::testing::StrEq;
using ::testing::WithArg;
using ::testing::WithoutArgs;
class TestMockPoolWatcher : public TestMockFixture {
public:
typedef PoolWatcher<librbd::MockTestImageCtx> MockPoolWatcher;
typedef Threads<librbd::MockTestImageCtx> MockThreads;
typedef pool_watcher::RefreshImagesRequest<librbd::MockTestImageCtx> MockRefreshImagesRequest;
typedef librbd::MockMirroringWatcher MockMirroringWatcher;
typedef librbd::MirroringWatcher<librbd::MockTestImageCtx> MirroringWatcher;
struct MockListener : pool_watcher::Listener {
TestMockPoolWatcher *test;
MockListener(TestMockPoolWatcher *test) : test(test) {
}
MOCK_METHOD3(mock_handle_update, void(const std::string &, const ImageIds &,
const ImageIds &));
void handle_update(const std::string &mirror_uuid,
ImageIds &&added_image_ids,
ImageIds &&removed_image_ids) override {
mock_handle_update(mirror_uuid, added_image_ids, removed_image_ids);
}
};
TestMockPoolWatcher() = default;
void expect_work_queue(MockThreads &mock_threads) {
EXPECT_CALL(*mock_threads.work_queue, queue(_, _))
.WillRepeatedly(Invoke([this](Context *ctx, int r) {
m_threads->work_queue->queue(ctx, r);
}));
}
void expect_mirroring_watcher_is_unregistered(MockMirroringWatcher &mock_mirroring_watcher,
bool unregistered) {
EXPECT_CALL(mock_mirroring_watcher, is_unregistered())
.WillOnce(Return(unregistered));
}
void expect_mirroring_watcher_register(MockMirroringWatcher &mock_mirroring_watcher,
int r) {
EXPECT_CALL(mock_mirroring_watcher, register_watch(_))
.WillOnce(CompleteContext(r));
}
void expect_mirroring_watcher_unregister(MockMirroringWatcher &mock_mirroring_watcher,
int r) {
EXPECT_CALL(mock_mirroring_watcher, unregister_watch(_))
.WillOnce(CompleteContext(r));
}
void expect_refresh_images(MockRefreshImagesRequest &request,
const ImageIds &image_ids, int r) {
EXPECT_CALL(request, send())
.WillOnce(Invoke([&request, image_ids, r]() {
*request.image_ids = image_ids;
request.on_finish->complete(r);
}));
}
void expect_listener_handle_update(MockListener &mock_listener,
const std::string &mirror_uuid,
const ImageIds &added_image_ids,
const ImageIds &removed_image_ids) {
EXPECT_CALL(mock_listener, mock_handle_update(mirror_uuid, added_image_ids,
removed_image_ids))
.WillOnce(WithoutArgs(Invoke([this]() {
std::lock_guard locker{m_lock};
++m_update_count;
m_cond.notify_all();
})));
}
void expect_timer_add_event(MockThreads &mock_threads) {
EXPECT_CALL(*mock_threads.timer, add_event_after(_, _))
.WillOnce(DoAll(WithArg<1>(Invoke([this](Context *ctx) {
auto wrapped_ctx =
new LambdaContext([this, ctx](int r) {
std::lock_guard timer_locker{m_threads->timer_lock};
ctx->complete(r);
});
m_threads->work_queue->queue(wrapped_ctx, 0);
})),
ReturnArg<1>()));
}
int when_shut_down(MockPoolWatcher &mock_pool_watcher) {
C_SaferCond ctx;
mock_pool_watcher.shut_down(&ctx);
return ctx.wait();
}
bool wait_for_update(uint32_t count) {
std::unique_lock locker{m_lock};
if (m_cond.wait_for(locker, 10s,
[count, this] { return m_update_count >= count; })) {
m_update_count -= count;
return true;
} else {
return false;
}
}
ceph::mutex m_lock = ceph::make_mutex("TestMockPoolWatcher::m_lock");
ceph::condition_variable m_cond;
uint32_t m_update_count = 0;
};
TEST_F(TestMockPoolWatcher, EmptyPool) {
MockThreads mock_threads(m_threads);
expect_work_queue(mock_threads);
InSequence seq;
MockMirroringWatcher mock_mirroring_watcher;
expect_mirroring_watcher_is_unregistered(mock_mirroring_watcher, true);
expect_mirroring_watcher_register(mock_mirroring_watcher, 0);
MockRefreshImagesRequest mock_refresh_images_request;
expect_refresh_images(mock_refresh_images_request, {}, 0);
MockListener mock_listener(this);
expect_listener_handle_update(mock_listener, "remote uuid", {}, {});
MockPoolWatcher mock_pool_watcher(&mock_threads, m_remote_io_ctx,
"remote uuid", mock_listener);
C_SaferCond ctx;
mock_pool_watcher.init(&ctx);
ASSERT_EQ(0, ctx.wait());
ASSERT_TRUE(wait_for_update(1));
expect_mirroring_watcher_unregister(mock_mirroring_watcher, 0);
ASSERT_EQ(0, when_shut_down(mock_pool_watcher));
}
TEST_F(TestMockPoolWatcher, NonEmptyPool) {
MockThreads mock_threads(m_threads);
expect_work_queue(mock_threads);
InSequence seq;
MockMirroringWatcher mock_mirroring_watcher;
expect_mirroring_watcher_is_unregistered(mock_mirroring_watcher, true);
expect_mirroring_watcher_register(mock_mirroring_watcher, 0);
ImageIds image_ids{
{"global id 1", "remote id 1"},
{"global id 2", "remote id 2"}};
MockRefreshImagesRequest mock_refresh_images_request;
expect_refresh_images(mock_refresh_images_request, image_ids, 0);
MockListener mock_listener(this);
expect_listener_handle_update(mock_listener, "remote uuid", image_ids, {});
MockPoolWatcher mock_pool_watcher(&mock_threads, m_remote_io_ctx,
"remote uuid", mock_listener);
C_SaferCond ctx;
mock_pool_watcher.init(&ctx);
ASSERT_EQ(0, ctx.wait());
ASSERT_TRUE(wait_for_update(1));
expect_mirroring_watcher_unregister(mock_mirroring_watcher, 0);
ASSERT_EQ(0, when_shut_down(mock_pool_watcher));
}
TEST_F(TestMockPoolWatcher, NotifyDuringRefresh) {
MockThreads mock_threads(m_threads);
expect_work_queue(mock_threads);
InSequence seq;
MockMirroringWatcher mock_mirroring_watcher;
expect_mirroring_watcher_is_unregistered(mock_mirroring_watcher, true);
expect_mirroring_watcher_register(mock_mirroring_watcher, 0);
ImageIds image_ids{
{"global id 1", "remote id 1"},
{"global id 2", "remote id 2"}};
MockRefreshImagesRequest mock_refresh_images_request;
bool refresh_sent = false;
EXPECT_CALL(mock_refresh_images_request, send())
.WillOnce(Invoke([this, &mock_refresh_images_request, &image_ids,
&refresh_sent]() {
*mock_refresh_images_request.image_ids = image_ids;
std::lock_guard locker{m_lock};
refresh_sent = true;
m_cond.notify_all();
}));
MockListener mock_listener(this);
image_ids = {
{"global id 1", "remote id 1a"},
{"global id 3", "remote id 3"}};
expect_listener_handle_update(mock_listener, "remote uuid", image_ids, {});
MockPoolWatcher mock_pool_watcher(&mock_threads, m_remote_io_ctx,
"remote uuid", mock_listener);
mock_pool_watcher.init(nullptr);
{
std::unique_lock locker{m_lock};
m_cond.wait(locker, [&] { return refresh_sent; });
}
MirroringWatcher::get_instance().handle_image_updated(
cls::rbd::MIRROR_IMAGE_STATE_DISABLING, "remote id 2", "global id 2");
MirroringWatcher::get_instance().handle_image_updated(
cls::rbd::MIRROR_IMAGE_STATE_ENABLED, "remote id 1a", "global id 1");
MirroringWatcher::get_instance().handle_image_updated(
cls::rbd::MIRROR_IMAGE_STATE_ENABLED, "remote id 3", "global id 3");
mock_refresh_images_request.on_finish->complete(0);
ASSERT_TRUE(wait_for_update(1));
expect_mirroring_watcher_unregister(mock_mirroring_watcher, 0);
ASSERT_EQ(0, when_shut_down(mock_pool_watcher));
}
TEST_F(TestMockPoolWatcher, Notify) {
MockThreads mock_threads(m_threads);
InSequence seq;
MockMirroringWatcher mock_mirroring_watcher;
expect_mirroring_watcher_is_unregistered(mock_mirroring_watcher, true);
expect_mirroring_watcher_register(mock_mirroring_watcher, 0);
ImageIds image_ids{
{"global id 1", "remote id 1"},
{"global id 2", "remote id 2"}};
MockRefreshImagesRequest mock_refresh_images_request;
expect_refresh_images(mock_refresh_images_request, image_ids, 0);
EXPECT_CALL(*mock_threads.work_queue, queue(_, _))
.WillOnce(Invoke([this](Context *ctx, int r) {
m_threads->work_queue->queue(ctx, r);
}));
MockListener mock_listener(this);
expect_listener_handle_update(mock_listener, "remote uuid", image_ids, {});
Context *notify_ctx = nullptr;
EXPECT_CALL(*mock_threads.work_queue, queue(_, _))
.WillOnce(Invoke([this, ¬ify_ctx](Context *ctx, int r) {
std::lock_guard locker{m_lock};
ASSERT_EQ(nullptr, notify_ctx);
notify_ctx = ctx;
m_cond.notify_all();
}));
expect_listener_handle_update(
mock_listener, "remote uuid",
{{"global id 1", "remote id 1a"}, {"global id 3", "remote id 3"}},
{{"global id 1", "remote id 1"}, {"global id 2", "remote id 2"}});
MockPoolWatcher mock_pool_watcher(&mock_threads, m_remote_io_ctx,
"remote uuid", mock_listener);
C_SaferCond ctx;
mock_pool_watcher.init(&ctx);
ASSERT_EQ(0, ctx.wait());
ASSERT_TRUE(wait_for_update(1));
C_SaferCond flush_ctx;
m_threads->work_queue->queue(&flush_ctx, 0);
ASSERT_EQ(0, flush_ctx.wait());
MirroringWatcher::get_instance().handle_image_updated(
cls::rbd::MIRROR_IMAGE_STATE_DISABLING, "remote id 2", "global id 2");
MirroringWatcher::get_instance().handle_image_updated(
cls::rbd::MIRROR_IMAGE_STATE_DISABLED, "remote id 2", "global id 2");
MirroringWatcher::get_instance().handle_image_updated(
cls::rbd::MIRROR_IMAGE_STATE_ENABLED, "remote id 1a", "global id 1");
MirroringWatcher::get_instance().handle_image_updated(
cls::rbd::MIRROR_IMAGE_STATE_ENABLED, "remote id 3", "global id 3");
notify_ctx->complete(0);
ASSERT_TRUE(wait_for_update(1));
expect_mirroring_watcher_unregister(mock_mirroring_watcher, 0);
ASSERT_EQ(0, when_shut_down(mock_pool_watcher));
}
TEST_F(TestMockPoolWatcher, RegisterWatcherBlocklist) {
MockThreads mock_threads(m_threads);
expect_work_queue(mock_threads);
InSequence seq;
MockMirroringWatcher mock_mirroring_watcher;
expect_mirroring_watcher_is_unregistered(mock_mirroring_watcher, true);
expect_mirroring_watcher_register(mock_mirroring_watcher, -EBLOCKLISTED);
MockListener mock_listener(this);
MockPoolWatcher mock_pool_watcher(&mock_threads, m_remote_io_ctx,
"remote uuid", mock_listener);
C_SaferCond ctx;
mock_pool_watcher.init(&ctx);
ASSERT_EQ(-EBLOCKLISTED, ctx.wait());
ASSERT_TRUE(mock_pool_watcher.is_blocklisted());
expect_mirroring_watcher_unregister(mock_mirroring_watcher, 0);
ASSERT_EQ(0, when_shut_down(mock_pool_watcher));
}
TEST_F(TestMockPoolWatcher, RegisterWatcherMissing) {
MockThreads mock_threads(m_threads);
expect_work_queue(mock_threads);
InSequence seq;
MockMirroringWatcher mock_mirroring_watcher;
expect_mirroring_watcher_is_unregistered(mock_mirroring_watcher, true);
expect_mirroring_watcher_register(mock_mirroring_watcher, -ENOENT);
expect_timer_add_event(mock_threads);
expect_mirroring_watcher_is_unregistered(mock_mirroring_watcher, true);
expect_mirroring_watcher_register(mock_mirroring_watcher, 0);
MockRefreshImagesRequest mock_refresh_images_request;
expect_refresh_images(mock_refresh_images_request, {}, 0);
MockListener mock_listener(this);
expect_listener_handle_update(mock_listener, "remote uuid", {}, {});
MockPoolWatcher mock_pool_watcher(&mock_threads, m_remote_io_ctx,
"remote uuid", mock_listener);
C_SaferCond ctx;
mock_pool_watcher.init(&ctx);
ASSERT_EQ(-ENOENT, ctx.wait());
ASSERT_TRUE(wait_for_update(1));
expect_mirroring_watcher_unregister(mock_mirroring_watcher, 0);
ASSERT_EQ(0, when_shut_down(mock_pool_watcher));
}
TEST_F(TestMockPoolWatcher, RegisterWatcherError) {
MockThreads mock_threads(m_threads);
expect_work_queue(mock_threads);
InSequence seq;
MockMirroringWatcher mock_mirroring_watcher;
expect_mirroring_watcher_is_unregistered(mock_mirroring_watcher, true);
expect_mirroring_watcher_register(mock_mirroring_watcher, -EINVAL);
expect_timer_add_event(mock_threads);
expect_mirroring_watcher_is_unregistered(mock_mirroring_watcher, true);
expect_mirroring_watcher_register(mock_mirroring_watcher, 0);
MockRefreshImagesRequest mock_refresh_images_request;
expect_refresh_images(mock_refresh_images_request, {}, 0);
MockListener mock_listener(this);
expect_listener_handle_update(mock_listener, "remote uuid", {}, {});
MockPoolWatcher mock_pool_watcher(&mock_threads, m_remote_io_ctx,
"remote uuid", mock_listener);
C_SaferCond ctx;
mock_pool_watcher.init(&ctx);
ASSERT_EQ(0, ctx.wait());
ASSERT_TRUE(wait_for_update(1));
expect_mirroring_watcher_unregister(mock_mirroring_watcher, 0);
ASSERT_EQ(0, when_shut_down(mock_pool_watcher));
}
TEST_F(TestMockPoolWatcher, RefreshBlocklist) {
MockThreads mock_threads(m_threads);
expect_work_queue(mock_threads);
InSequence seq;
MockMirroringWatcher mock_mirroring_watcher;
expect_mirroring_watcher_is_unregistered(mock_mirroring_watcher, true);
expect_mirroring_watcher_register(mock_mirroring_watcher, 0);
MockRefreshImagesRequest mock_refresh_images_request;
expect_refresh_images(mock_refresh_images_request, {}, -EBLOCKLISTED);
MockListener mock_listener(this);
MockPoolWatcher mock_pool_watcher(&mock_threads, m_remote_io_ctx,
"remote uuid", mock_listener);
C_SaferCond ctx;
mock_pool_watcher.init(&ctx);
ASSERT_EQ(-EBLOCKLISTED, ctx.wait());
ASSERT_TRUE(mock_pool_watcher.is_blocklisted());
expect_mirroring_watcher_unregister(mock_mirroring_watcher, 0);
ASSERT_EQ(0, when_shut_down(mock_pool_watcher));
}
TEST_F(TestMockPoolWatcher, RefreshMissing) {
MockThreads mock_threads(m_threads);
expect_work_queue(mock_threads);
InSequence seq;
MockMirroringWatcher mock_mirroring_watcher;
expect_mirroring_watcher_is_unregistered(mock_mirroring_watcher, true);
expect_mirroring_watcher_register(mock_mirroring_watcher, 0);
MockRefreshImagesRequest mock_refresh_images_request;
expect_refresh_images(mock_refresh_images_request, {}, -ENOENT);
MockListener mock_listener(this);
expect_listener_handle_update(mock_listener, "remote uuid", {}, {});
MockPoolWatcher mock_pool_watcher(&mock_threads, m_remote_io_ctx,
"remote uuid", mock_listener);
C_SaferCond ctx;
mock_pool_watcher.init(&ctx);
ASSERT_EQ(0, ctx.wait());
ASSERT_TRUE(wait_for_update(1));
expect_mirroring_watcher_unregister(mock_mirroring_watcher, 0);
ASSERT_EQ(0, when_shut_down(mock_pool_watcher));
}
TEST_F(TestMockPoolWatcher, RefreshError) {
MockThreads mock_threads(m_threads);
expect_work_queue(mock_threads);
InSequence seq;
MockMirroringWatcher mock_mirroring_watcher;
expect_mirroring_watcher_is_unregistered(mock_mirroring_watcher, true);
expect_mirroring_watcher_register(mock_mirroring_watcher, 0);
MockRefreshImagesRequest mock_refresh_images_request;
expect_refresh_images(mock_refresh_images_request, {}, -EINVAL);
expect_timer_add_event(mock_threads);
expect_mirroring_watcher_is_unregistered(mock_mirroring_watcher, false);
expect_refresh_images(mock_refresh_images_request, {}, 0);
MockListener mock_listener(this);
expect_listener_handle_update(mock_listener, "remote uuid", {}, {});
MockPoolWatcher mock_pool_watcher(&mock_threads, m_remote_io_ctx,
"remote uuid", mock_listener);
C_SaferCond ctx;
mock_pool_watcher.init(&ctx);
ASSERT_EQ(0, ctx.wait());
ASSERT_TRUE(wait_for_update(1));
expect_mirroring_watcher_unregister(mock_mirroring_watcher, 0);
ASSERT_EQ(0, when_shut_down(mock_pool_watcher));
}
TEST_F(TestMockPoolWatcher, Rewatch) {
MockThreads mock_threads(m_threads);
expect_work_queue(mock_threads);
InSequence seq;
MockMirroringWatcher mock_mirroring_watcher;
expect_mirroring_watcher_is_unregistered(mock_mirroring_watcher, true);
expect_mirroring_watcher_register(mock_mirroring_watcher, 0);
MockRefreshImagesRequest mock_refresh_images_request;
expect_refresh_images(mock_refresh_images_request, {}, 0);
MockListener mock_listener(this);
expect_listener_handle_update(mock_listener, "remote uuid", {}, {});
expect_timer_add_event(mock_threads);
expect_mirroring_watcher_is_unregistered(mock_mirroring_watcher, false);
expect_refresh_images(mock_refresh_images_request, {{"global id", "image id"}}, 0);
expect_listener_handle_update(mock_listener, "remote uuid",
{{"global id", "image id"}}, {});
MockPoolWatcher mock_pool_watcher(&mock_threads, m_remote_io_ctx,
"remote uuid", mock_listener);
C_SaferCond ctx;
mock_pool_watcher.init(&ctx);
ASSERT_EQ(0, ctx.wait());
ASSERT_TRUE(wait_for_update(1));
MirroringWatcher::get_instance().handle_rewatch_complete(0);
ASSERT_TRUE(wait_for_update(1));
expect_mirroring_watcher_unregister(mock_mirroring_watcher, 0);
ASSERT_EQ(0, when_shut_down(mock_pool_watcher));
}
TEST_F(TestMockPoolWatcher, RewatchBlocklist) {
MockThreads mock_threads(m_threads);
expect_work_queue(mock_threads);
InSequence seq;
MockMirroringWatcher mock_mirroring_watcher;
expect_mirroring_watcher_is_unregistered(mock_mirroring_watcher, true);
expect_mirroring_watcher_register(mock_mirroring_watcher, 0);
MockRefreshImagesRequest mock_refresh_images_request;
expect_refresh_images(mock_refresh_images_request, {}, 0);
MockListener mock_listener(this);
expect_listener_handle_update(mock_listener, "remote uuid", {}, {});
MockPoolWatcher mock_pool_watcher(&mock_threads, m_remote_io_ctx,
"remote uuid", mock_listener);
C_SaferCond ctx;
mock_pool_watcher.init(&ctx);
ASSERT_EQ(0, ctx.wait());
ASSERT_TRUE(wait_for_update(1));
MirroringWatcher::get_instance().handle_rewatch_complete(-EBLOCKLISTED);
ASSERT_TRUE(mock_pool_watcher.is_blocklisted());
expect_mirroring_watcher_unregister(mock_mirroring_watcher, 0);
ASSERT_EQ(0, when_shut_down(mock_pool_watcher));
}
TEST_F(TestMockPoolWatcher, RewatchError) {
MockThreads mock_threads(m_threads);
expect_work_queue(mock_threads);
InSequence seq;
MockMirroringWatcher mock_mirroring_watcher;
expect_mirroring_watcher_is_unregistered(mock_mirroring_watcher, true);
expect_mirroring_watcher_register(mock_mirroring_watcher, 0);
MockRefreshImagesRequest mock_refresh_images_request;
expect_refresh_images(mock_refresh_images_request, {}, 0);
MockListener mock_listener(this);
expect_listener_handle_update(mock_listener, "remote uuid", {}, {});
expect_timer_add_event(mock_threads);
expect_mirroring_watcher_is_unregistered(mock_mirroring_watcher, false);
expect_refresh_images(mock_refresh_images_request, {{"global id", "image id"}}, 0);
expect_listener_handle_update(mock_listener, "remote uuid",
{{"global id", "image id"}}, {});
MockPoolWatcher mock_pool_watcher(&mock_threads, m_remote_io_ctx,
"remote uuid", mock_listener);
C_SaferCond ctx;
mock_pool_watcher.init(&ctx);
ASSERT_EQ(0, ctx.wait());
ASSERT_TRUE(wait_for_update(1));
MirroringWatcher::get_instance().handle_rewatch_complete(-EINVAL);
ASSERT_TRUE(wait_for_update(1));
expect_mirroring_watcher_unregister(mock_mirroring_watcher, 0);
ASSERT_EQ(0, when_shut_down(mock_pool_watcher));
}
TEST_F(TestMockPoolWatcher, DeferredRefresh) {
MockThreads mock_threads(m_threads);
expect_work_queue(mock_threads);
InSequence seq;
MockMirroringWatcher mock_mirroring_watcher;
expect_mirroring_watcher_is_unregistered(mock_mirroring_watcher, true);
expect_mirroring_watcher_register(mock_mirroring_watcher, 0);
MockRefreshImagesRequest mock_refresh_images_request;
EXPECT_CALL(mock_refresh_images_request, send())
.WillOnce(Invoke([&mock_refresh_images_request]() {
*mock_refresh_images_request.image_ids = {};
MirroringWatcher::get_instance().handle_rewatch_complete(0);
mock_refresh_images_request.on_finish->complete(0);
}));
expect_timer_add_event(mock_threads);
expect_mirroring_watcher_is_unregistered(mock_mirroring_watcher, false);
expect_refresh_images(mock_refresh_images_request, {}, 0);
MockListener mock_listener(this);
expect_listener_handle_update(mock_listener, "remote uuid", {}, {});
MockPoolWatcher mock_pool_watcher(&mock_threads, m_remote_io_ctx,
"remote uuid", mock_listener);
C_SaferCond ctx;
mock_pool_watcher.init(&ctx);
ASSERT_EQ(0, ctx.wait());
ASSERT_TRUE(wait_for_update(1));
expect_mirroring_watcher_unregister(mock_mirroring_watcher, 0);
ASSERT_EQ(0, when_shut_down(mock_pool_watcher));
}
} // namespace mirror
} // namespace rbd
| 25,382 | 33.723666 | 117 |
cc
|
null |
ceph-main/src/test/rbd_mirror/test_mock_Throttler.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2016 SUSE LINUX GmbH
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include "test/rbd_mirror/test_mock_fixture.h"
#include "test/librbd/mock/MockImageCtx.h"
namespace librbd {
namespace {
struct MockTestImageCtx : public librbd::MockImageCtx {
MockTestImageCtx(librbd::ImageCtx &image_ctx)
: librbd::MockImageCtx(image_ctx) {
}
};
} // anonymous namespace
} // namespace librbd
// template definitions
#include "tools/rbd_mirror/Throttler.cc"
namespace rbd {
namespace mirror {
class TestMockThrottler : public TestMockFixture {
public:
typedef Throttler<librbd::MockTestImageCtx> MockThrottler;
};
TEST_F(TestMockThrottler, Single_Sync) {
MockThrottler throttler(g_ceph_context, "rbd_mirror_concurrent_image_syncs");
C_SaferCond on_start;
throttler.start_op("ns", "id", &on_start);
ASSERT_EQ(0, on_start.wait());
throttler.finish_op("ns", "id");
}
TEST_F(TestMockThrottler, Multiple_Syncs) {
MockThrottler throttler(g_ceph_context, "rbd_mirror_concurrent_image_syncs");
throttler.set_max_concurrent_ops(2);
C_SaferCond on_start1;
throttler.start_op("ns", "id1", &on_start1);
C_SaferCond on_start2;
throttler.start_op("ns", "id2", &on_start2);
C_SaferCond on_start3;
throttler.start_op("ns", "id3", &on_start3);
C_SaferCond on_start4;
throttler.start_op("ns", "id4", &on_start4);
ASSERT_EQ(0, on_start2.wait());
throttler.finish_op("ns", "id2");
ASSERT_EQ(0, on_start3.wait());
throttler.finish_op("ns", "id3");
ASSERT_EQ(0, on_start1.wait());
throttler.finish_op("ns", "id1");
ASSERT_EQ(0, on_start4.wait());
throttler.finish_op("ns", "id4");
}
TEST_F(TestMockThrottler, Cancel_Running_Sync) {
MockThrottler throttler(g_ceph_context, "rbd_mirror_concurrent_image_syncs");
C_SaferCond on_start;
throttler.start_op("ns", "id", &on_start);
ASSERT_EQ(0, on_start.wait());
ASSERT_FALSE(throttler.cancel_op("ns", "id"));
throttler.finish_op("ns", "id");
}
TEST_F(TestMockThrottler, Cancel_Waiting_Sync) {
MockThrottler throttler(g_ceph_context, "rbd_mirror_concurrent_image_syncs");
throttler.set_max_concurrent_ops(1);
C_SaferCond on_start1;
throttler.start_op("ns", "id1", &on_start1);
C_SaferCond on_start2;
throttler.start_op("ns", "id2", &on_start2);
ASSERT_EQ(0, on_start1.wait());
ASSERT_TRUE(throttler.cancel_op("ns", "id2"));
ASSERT_EQ(-ECANCELED, on_start2.wait());
throttler.finish_op("ns", "id1");
}
TEST_F(TestMockThrottler, Cancel_Running_Sync_Start_Waiting) {
MockThrottler throttler(g_ceph_context, "rbd_mirror_concurrent_image_syncs");
throttler.set_max_concurrent_ops(1);
C_SaferCond on_start1;
throttler.start_op("ns", "id1", &on_start1);
C_SaferCond on_start2;
throttler.start_op("ns", "id2", &on_start2);
ASSERT_EQ(0, on_start1.wait());
ASSERT_FALSE(throttler.cancel_op("ns", "id1"));
throttler.finish_op("ns", "id1");
ASSERT_EQ(0, on_start2.wait());
throttler.finish_op("ns", "id2");
}
TEST_F(TestMockThrottler, Duplicate) {
MockThrottler throttler(g_ceph_context, "rbd_mirror_concurrent_image_syncs");
throttler.set_max_concurrent_ops(1);
C_SaferCond on_start1;
throttler.start_op("ns", "id1", &on_start1);
ASSERT_EQ(0, on_start1.wait());
C_SaferCond on_start2;
throttler.start_op("ns", "id1", &on_start2);
ASSERT_EQ(0, on_start2.wait());
C_SaferCond on_start3;
throttler.start_op("ns", "id2", &on_start3);
C_SaferCond on_start4;
throttler.start_op("ns", "id2", &on_start4);
ASSERT_EQ(-ENOENT, on_start3.wait());
throttler.finish_op("ns", "id1");
ASSERT_EQ(0, on_start4.wait());
throttler.finish_op("ns", "id2");
}
TEST_F(TestMockThrottler, Duplicate2) {
MockThrottler throttler(g_ceph_context, "rbd_mirror_concurrent_image_syncs");
throttler.set_max_concurrent_ops(2);
C_SaferCond on_start1;
throttler.start_op("ns", "id1", &on_start1);
ASSERT_EQ(0, on_start1.wait());
C_SaferCond on_start2;
throttler.start_op("ns", "id2", &on_start2);
ASSERT_EQ(0, on_start2.wait());
C_SaferCond on_start3;
throttler.start_op("ns", "id3", &on_start3);
C_SaferCond on_start4;
throttler.start_op("ns", "id3", &on_start4); // dup
ASSERT_EQ(-ENOENT, on_start3.wait());
C_SaferCond on_start5;
throttler.start_op("ns", "id4", &on_start5);
throttler.finish_op("ns", "id1");
ASSERT_EQ(0, on_start4.wait());
throttler.finish_op("ns", "id2");
ASSERT_EQ(0, on_start5.wait());
C_SaferCond on_start6;
throttler.start_op("ns", "id5", &on_start6);
throttler.finish_op("ns", "id3");
ASSERT_EQ(0, on_start6.wait());
throttler.finish_op("ns", "id4");
throttler.finish_op("ns", "id5");
}
TEST_F(TestMockThrottler, Increase_Max_Concurrent_Syncs) {
MockThrottler throttler(g_ceph_context, "rbd_mirror_concurrent_image_syncs");
throttler.set_max_concurrent_ops(2);
C_SaferCond on_start1;
throttler.start_op("ns", "id1", &on_start1);
C_SaferCond on_start2;
throttler.start_op("ns", "id2", &on_start2);
C_SaferCond on_start3;
throttler.start_op("ns", "id3", &on_start3);
C_SaferCond on_start4;
throttler.start_op("ns", "id4", &on_start4);
C_SaferCond on_start5;
throttler.start_op("ns", "id5", &on_start5);
ASSERT_EQ(0, on_start1.wait());
ASSERT_EQ(0, on_start2.wait());
throttler.set_max_concurrent_ops(4);
ASSERT_EQ(0, on_start3.wait());
ASSERT_EQ(0, on_start4.wait());
throttler.finish_op("ns", "id4");
ASSERT_EQ(0, on_start5.wait());
throttler.finish_op("ns", "id1");
throttler.finish_op("ns", "id2");
throttler.finish_op("ns", "id3");
throttler.finish_op("ns", "id5");
}
TEST_F(TestMockThrottler, Decrease_Max_Concurrent_Syncs) {
MockThrottler throttler(g_ceph_context, "rbd_mirror_concurrent_image_syncs");
throttler.set_max_concurrent_ops(4);
C_SaferCond on_start1;
throttler.start_op("ns", "id1", &on_start1);
C_SaferCond on_start2;
throttler.start_op("ns", "id2", &on_start2);
C_SaferCond on_start3;
throttler.start_op("ns", "id3", &on_start3);
C_SaferCond on_start4;
throttler.start_op("ns", "id4", &on_start4);
C_SaferCond on_start5;
throttler.start_op("ns", "id5", &on_start5);
ASSERT_EQ(0, on_start1.wait());
ASSERT_EQ(0, on_start2.wait());
ASSERT_EQ(0, on_start3.wait());
ASSERT_EQ(0, on_start4.wait());
throttler.set_max_concurrent_ops(2);
throttler.finish_op("ns", "id1");
throttler.finish_op("ns", "id2");
throttler.finish_op("ns", "id3");
ASSERT_EQ(0, on_start5.wait());
throttler.finish_op("ns", "id4");
throttler.finish_op("ns", "id5");
}
TEST_F(TestMockThrottler, Drain) {
MockThrottler throttler(g_ceph_context, "rbd_mirror_concurrent_image_syncs");
throttler.set_max_concurrent_ops(1);
C_SaferCond on_start1;
throttler.start_op("ns", "id1", &on_start1);
C_SaferCond on_start2;
throttler.start_op("ns", "id2", &on_start2);
ASSERT_EQ(0, on_start1.wait());
throttler.drain("ns", -ESTALE);
ASSERT_EQ(-ESTALE, on_start2.wait());
}
} // namespace mirror
} // namespace rbd
| 7,264 | 27.602362 | 79 |
cc
|
null |
ceph-main/src/test/rbd_mirror/test_mock_fixture.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "test/rbd_mirror/test_mock_fixture.h"
#include "include/rbd/librbd.hpp"
#include "test/librados_test_stub/LibradosTestStub.h"
#include "test/librados_test_stub/MockTestMemCluster.h"
#include "test/librados_test_stub/MockTestMemRadosClient.h"
#include "test/librbd/mock/MockImageCtx.h"
namespace rbd {
namespace mirror {
using ::testing::_;
using ::testing::Invoke;
using ::testing::WithArg;
TestMockFixture::TestClusterRef TestMockFixture::s_test_cluster;
void TestMockFixture::SetUpTestCase() {
s_test_cluster = librados_test_stub::get_cluster();
// use a mock version of the in-memory rados client
librados_test_stub::set_cluster(boost::shared_ptr<librados::TestCluster>(
new ::testing::NiceMock<librados::MockTestMemCluster>()));
TestFixture::SetUpTestCase();
}
void TestMockFixture::TearDownTestCase() {
TestFixture::TearDownTestCase();
librados_test_stub::set_cluster(s_test_cluster);
}
void TestMockFixture::TearDown() {
// Mock rados client lives across tests -- reset it to initial state
librados::MockTestMemRadosClient *mock_rados_client =
get_mock_io_ctx(m_local_io_ctx).get_mock_rados_client();
ASSERT_TRUE(mock_rados_client != nullptr);
::testing::Mock::VerifyAndClear(mock_rados_client);
mock_rados_client->default_to_dispatch();
dynamic_cast<librados::MockTestMemCluster*>(
librados_test_stub::get_cluster().get())->default_to_dispatch();
TestFixture::TearDown();
}
void TestMockFixture::expect_test_features(librbd::MockImageCtx &mock_image_ctx) {
EXPECT_CALL(mock_image_ctx, test_features(_, _))
.WillRepeatedly(WithArg<0>(Invoke([&mock_image_ctx](uint64_t features) {
return (mock_image_ctx.features & features) != 0;
})));
}
librados::MockTestMemCluster& TestMockFixture::get_mock_cluster() {
librados::MockTestMemCluster* mock_cluster = dynamic_cast<
librados::MockTestMemCluster*>(librados_test_stub::get_cluster().get());
ceph_assert(mock_cluster != nullptr);
return *mock_cluster;
}
} // namespace mirror
} // namespace rbd
| 2,136 | 31.876923 | 82 |
cc
|
null |
ceph-main/src/test/rbd_mirror/test_mock_fixture.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_TEST_RBD_MIRROR_TEST_MOCK_FIXTURE_H
#define CEPH_TEST_RBD_MIRROR_TEST_MOCK_FIXTURE_H
#include "test/rbd_mirror/test_fixture.h"
#include "test/librados_test_stub/LibradosTestStub.h"
#include "common/WorkQueue.h"
#include "librbd/asio/ContextWQ.h"
#include <boost/shared_ptr.hpp>
#include <gmock/gmock.h>
#include "include/ceph_assert.h"
namespace librados {
class TestRadosClient;
class MockTestMemCluster;
class MockTestMemIoCtxImpl;
class MockTestMemRadosClient;
}
namespace librbd {
class MockImageCtx;
}
ACTION_P(CopyInBufferlist, str) {
arg0->append(str);
}
ACTION_P(CompleteContext, r) {
arg0->complete(r);
}
ACTION_P2(CompleteContext, wq, r) {
auto context_wq = reinterpret_cast<librbd::asio::ContextWQ *>(wq);
context_wq->queue(arg0, r);
}
ACTION_P(GetReference, ref_object) {
ref_object->get();
}
MATCHER_P(ContentsEqual, bl, "") {
// TODO fix const-correctness of bufferlist
return const_cast<bufferlist &>(arg).contents_equal(
const_cast<bufferlist &>(bl));
}
namespace rbd {
namespace mirror {
class TestMockFixture : public TestFixture {
public:
typedef boost::shared_ptr<librados::TestCluster> TestClusterRef;
static void SetUpTestCase();
static void TearDownTestCase();
void TearDown() override;
void expect_test_features(librbd::MockImageCtx &mock_image_ctx);
librados::MockTestMemCluster& get_mock_cluster();
private:
static TestClusterRef s_test_cluster;
};
} // namespace mirror
} // namespace rbd
#endif // CEPH_TEST_RBD_MIRROR_TEST_MOCK_FIXTURE_H
| 1,628 | 21.315068 | 70 |
h
|
null |
ceph-main/src/test/rbd_mirror/image_deleter/test_mock_SnapshotPurgeRequest.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "test/rbd_mirror/test_mock_fixture.h"
#include "librbd/ExclusiveLock.h"
#include "librbd/ImageCtx.h"
#include "librbd/ImageState.h"
#include "librbd/Operations.h"
#include "tools/rbd_mirror/Threads.h"
#include "tools/rbd_mirror/image_deleter/SnapshotPurgeRequest.h"
#include "test/librados_test_stub/MockTestMemIoCtxImpl.h"
#include "test/librbd/mock/MockExclusiveLock.h"
#include "test/librbd/mock/MockImageCtx.h"
#include "test/librbd/mock/MockImageState.h"
#include "test/librbd/mock/MockOperations.h"
namespace librbd {
namespace {
struct MockTestImageCtx : public librbd::MockImageCtx {
static MockTestImageCtx *s_instance;
static MockTestImageCtx *create(const std::string &image_name,
const std::string &image_id,
const char *snap, librados::IoCtx& p,
bool read_only) {
ceph_assert(s_instance != nullptr);
return s_instance;
}
MockTestImageCtx(librbd::ImageCtx &image_ctx)
: librbd::MockImageCtx(image_ctx) {
s_instance = this;
}
};
MockTestImageCtx *MockTestImageCtx::s_instance = nullptr;
} // anonymous namespace
} // namespace librbd
#include "tools/rbd_mirror/image_deleter/SnapshotPurgeRequest.cc"
namespace rbd {
namespace mirror {
namespace image_deleter {
using ::testing::_;
using ::testing::Invoke;
using ::testing::InSequence;
using ::testing::WithArg;
class TestMockImageDeleterSnapshotPurgeRequest : public TestMockFixture {
public:
typedef SnapshotPurgeRequest<librbd::MockTestImageCtx> MockSnapshotPurgeRequest;
void SetUp() override {
TestMockFixture::SetUp();
librbd::RBD rbd;
ASSERT_EQ(0, create_image(rbd, m_local_io_ctx, m_image_name, m_image_size));
ASSERT_EQ(0, open_image(m_local_io_ctx, m_image_name, &m_local_image_ctx));
}
void expect_set_journal_policy(librbd::MockTestImageCtx &mock_image_ctx) {
EXPECT_CALL(mock_image_ctx, set_journal_policy(_))
.WillOnce(Invoke([](librbd::journal::Policy* policy) {
delete policy;
}));
}
void expect_open(librbd::MockTestImageCtx &mock_image_ctx, int r) {
EXPECT_CALL(*mock_image_ctx.state, open(true, _))
.WillOnce(WithArg<1>(Invoke([this, &mock_image_ctx, r](Context* ctx) {
EXPECT_EQ(0U, mock_image_ctx.read_only_mask &
librbd::IMAGE_READ_ONLY_FLAG_NON_PRIMARY);
m_threads->work_queue->queue(ctx, r);
})));
}
void expect_close(librbd::MockTestImageCtx &mock_image_ctx, int r) {
EXPECT_CALL(*mock_image_ctx.state, close(_))
.WillOnce(Invoke([this, r](Context* ctx) {
m_threads->work_queue->queue(ctx, r);
}));
}
void expect_acquire_lock(librbd::MockTestImageCtx &mock_image_ctx, int r) {
EXPECT_CALL(*mock_image_ctx.exclusive_lock, acquire_lock(_))
.WillOnce(Invoke([this, r](Context* ctx) {
m_threads->work_queue->queue(ctx, r);
}));
}
void expect_get_snap_namespace(librbd::MockTestImageCtx &mock_image_ctx,
uint64_t snap_id,
const cls::rbd::SnapshotNamespace &snap_namespace,
int r) {
EXPECT_CALL(mock_image_ctx, get_snap_namespace(snap_id, _))
.WillOnce(WithArg<1>(Invoke([snap_namespace, r](cls::rbd::SnapshotNamespace *ns) {
*ns = snap_namespace;
return r;
})));
}
void expect_get_snap_name(librbd::MockTestImageCtx &mock_image_ctx,
uint64_t snap_id, const std::string& name,
int r) {
EXPECT_CALL(mock_image_ctx, get_snap_name(snap_id, _))
.WillOnce(WithArg<1>(Invoke([name, r](std::string *n) {
*n = name;
return r;
})));
}
void expect_is_snap_protected(librbd::MockTestImageCtx &mock_image_ctx,
uint64_t snap_id, bool is_protected, int r) {
EXPECT_CALL(mock_image_ctx, is_snap_protected(snap_id, _))
.WillOnce(WithArg<1>(Invoke([is_protected, r](bool *prot) {
*prot = is_protected;
return r;
})));
}
void expect_snap_unprotect(librbd::MockTestImageCtx &mock_image_ctx,
const cls::rbd::SnapshotNamespace& ns,
const std::string& name, int r) {
EXPECT_CALL(*mock_image_ctx.operations, execute_snap_unprotect(ns, name, _))
.WillOnce(WithArg<2>(Invoke([this, r](Context* ctx) {
m_threads->work_queue->queue(ctx, r);
})));
}
void expect_snap_remove(librbd::MockTestImageCtx &mock_image_ctx,
const cls::rbd::SnapshotNamespace& ns,
const std::string& name, int r) {
EXPECT_CALL(*mock_image_ctx.operations, execute_snap_remove(ns, name, _))
.WillOnce(WithArg<2>(Invoke([this, r](Context* ctx) {
m_threads->work_queue->queue(ctx, r);
})));
}
void expect_start_op(librbd::MockTestImageCtx &mock_image_ctx, bool success) {
EXPECT_CALL(*mock_image_ctx.exclusive_lock, start_op(_))
.WillOnce(Invoke([success](int* r) {
auto f = [](int r) {};
if (!success) {
*r = -EROFS;
return static_cast<LambdaContext<decltype(f)>*>(nullptr);
}
return new LambdaContext(std::move(f));
}));
}
librbd::ImageCtx *m_local_image_ctx;
};
TEST_F(TestMockImageDeleterSnapshotPurgeRequest, SuccessJournal) {
{
std::unique_lock image_locker{m_local_image_ctx->image_lock};
m_local_image_ctx->add_snap(cls::rbd::UserSnapshotNamespace{}, "snap1", 1,
0, {}, RBD_PROTECTION_STATUS_PROTECTED, 0, {});
m_local_image_ctx->add_snap(cls::rbd::UserSnapshotNamespace{}, "snap2", 2,
0, {}, RBD_PROTECTION_STATUS_UNPROTECTED, 0,
{});
}
librbd::MockTestImageCtx mock_image_ctx(*m_local_image_ctx);
librbd::MockExclusiveLock mock_exclusive_lock;
mock_image_ctx.exclusive_lock = &mock_exclusive_lock;
InSequence seq;
expect_set_journal_policy(mock_image_ctx);
expect_open(mock_image_ctx, 0);
expect_acquire_lock(mock_image_ctx, 0);
expect_get_snap_namespace(mock_image_ctx, 2,
cls::rbd::UserSnapshotNamespace{}, 0);
expect_get_snap_name(mock_image_ctx, 2, "snap2", 0);
expect_is_snap_protected(mock_image_ctx, 2, false, 0);
expect_start_op(mock_image_ctx, true);
expect_snap_remove(mock_image_ctx, cls::rbd::UserSnapshotNamespace{}, "snap2",
0);
expect_get_snap_namespace(mock_image_ctx, 1,
cls::rbd::UserSnapshotNamespace{}, 0);
expect_get_snap_name(mock_image_ctx, 1, "snap1", 0);
expect_is_snap_protected(mock_image_ctx, 1, true, 0);
expect_start_op(mock_image_ctx, true);
expect_snap_unprotect(mock_image_ctx, cls::rbd::UserSnapshotNamespace{},
"snap1", 0);
expect_start_op(mock_image_ctx, true);
expect_snap_remove(mock_image_ctx, cls::rbd::UserSnapshotNamespace{}, "snap1",
0);
expect_close(mock_image_ctx, 0);
C_SaferCond ctx;
auto req = MockSnapshotPurgeRequest::create(m_local_io_ctx, mock_image_ctx.id,
&ctx);
req->send();
ASSERT_EQ(0, ctx.wait());
}
TEST_F(TestMockImageDeleterSnapshotPurgeRequest, SuccessSnapshot) {
{
std::unique_lock image_locker{m_local_image_ctx->image_lock};
m_local_image_ctx->add_snap(cls::rbd::UserSnapshotNamespace{}, "snap1", 1,
0, {}, RBD_PROTECTION_STATUS_PROTECTED, 0, {});
m_local_image_ctx->add_snap(cls::rbd::UserSnapshotNamespace{}, "snap2", 2,
0, {}, RBD_PROTECTION_STATUS_UNPROTECTED, 0,
{});
}
librbd::MockTestImageCtx mock_image_ctx(*m_local_image_ctx);
InSequence seq;
expect_set_journal_policy(mock_image_ctx);
expect_open(mock_image_ctx, 0);
expect_get_snap_namespace(mock_image_ctx, 2,
cls::rbd::UserSnapshotNamespace{}, 0);
expect_get_snap_name(mock_image_ctx, 2, "snap2", 0);
expect_is_snap_protected(mock_image_ctx, 2, false, 0);
expect_snap_remove(mock_image_ctx, cls::rbd::UserSnapshotNamespace{}, "snap2",
0);
expect_get_snap_namespace(mock_image_ctx, 1,
cls::rbd::UserSnapshotNamespace{}, 0);
expect_get_snap_name(mock_image_ctx, 1, "snap1", 0);
expect_is_snap_protected(mock_image_ctx, 1, true, 0);
expect_snap_unprotect(mock_image_ctx, cls::rbd::UserSnapshotNamespace{},
"snap1", 0);
expect_snap_remove(mock_image_ctx, cls::rbd::UserSnapshotNamespace{}, "snap1",
0);
expect_close(mock_image_ctx, 0);
C_SaferCond ctx;
auto req = MockSnapshotPurgeRequest::create(m_local_io_ctx, mock_image_ctx.id,
&ctx);
req->send();
ASSERT_EQ(0, ctx.wait());
}
TEST_F(TestMockImageDeleterSnapshotPurgeRequest, OpenError) {
{
std::unique_lock image_locker{m_local_image_ctx->image_lock};
m_local_image_ctx->add_snap(cls::rbd::UserSnapshotNamespace{}, "snap1", 1,
0, {}, RBD_PROTECTION_STATUS_UNPROTECTED, 0,
{});
}
librbd::MockTestImageCtx mock_image_ctx(*m_local_image_ctx);
librbd::MockExclusiveLock mock_exclusive_lock;
mock_image_ctx.exclusive_lock = &mock_exclusive_lock;
InSequence seq;
expect_set_journal_policy(mock_image_ctx);
expect_open(mock_image_ctx, -EPERM);
C_SaferCond ctx;
auto req = MockSnapshotPurgeRequest::create(m_local_io_ctx, mock_image_ctx.id,
&ctx);
req->send();
ASSERT_EQ(-EPERM, ctx.wait());
}
TEST_F(TestMockImageDeleterSnapshotPurgeRequest, AcquireLockError) {
{
std::unique_lock image_locker{m_local_image_ctx->image_lock};
m_local_image_ctx->add_snap(cls::rbd::UserSnapshotNamespace{}, "snap1", 1,
0, {}, RBD_PROTECTION_STATUS_UNPROTECTED, 0,
{});
}
librbd::MockTestImageCtx mock_image_ctx(*m_local_image_ctx);
librbd::MockExclusiveLock mock_exclusive_lock;
mock_image_ctx.exclusive_lock = &mock_exclusive_lock;
InSequence seq;
expect_set_journal_policy(mock_image_ctx);
expect_open(mock_image_ctx, 0);
expect_acquire_lock(mock_image_ctx, -EPERM);
expect_close(mock_image_ctx, -EINVAL);
C_SaferCond ctx;
auto req = MockSnapshotPurgeRequest::create(m_local_io_ctx, mock_image_ctx.id,
&ctx);
req->send();
ASSERT_EQ(-EPERM, ctx.wait());
}
TEST_F(TestMockImageDeleterSnapshotPurgeRequest, SnapUnprotectBusy) {
{
std::unique_lock image_locker{m_local_image_ctx->image_lock};
m_local_image_ctx->add_snap(cls::rbd::UserSnapshotNamespace{}, "snap1", 1,
0, {}, RBD_PROTECTION_STATUS_PROTECTED, 0, {});
}
librbd::MockTestImageCtx mock_image_ctx(*m_local_image_ctx);
librbd::MockExclusiveLock mock_exclusive_lock;
mock_image_ctx.exclusive_lock = &mock_exclusive_lock;
InSequence seq;
expect_set_journal_policy(mock_image_ctx);
expect_open(mock_image_ctx, 0);
expect_acquire_lock(mock_image_ctx, 0);
expect_get_snap_namespace(mock_image_ctx, 1,
cls::rbd::UserSnapshotNamespace{}, 0);
expect_get_snap_name(mock_image_ctx, 1, "snap1", 0);
expect_is_snap_protected(mock_image_ctx, 1, true, 0);
expect_start_op(mock_image_ctx, true);
expect_snap_unprotect(mock_image_ctx, cls::rbd::UserSnapshotNamespace{},
"snap1", -EBUSY);
expect_close(mock_image_ctx, -EINVAL);
C_SaferCond ctx;
auto req = MockSnapshotPurgeRequest::create(m_local_io_ctx, mock_image_ctx.id,
&ctx);
req->send();
ASSERT_EQ(-EBUSY, ctx.wait());
}
TEST_F(TestMockImageDeleterSnapshotPurgeRequest, SnapUnprotectError) {
{
std::unique_lock image_locker{m_local_image_ctx->image_lock};
m_local_image_ctx->add_snap(cls::rbd::UserSnapshotNamespace{}, "snap1", 1,
0, {}, RBD_PROTECTION_STATUS_PROTECTED, 0, {});
}
librbd::MockTestImageCtx mock_image_ctx(*m_local_image_ctx);
librbd::MockExclusiveLock mock_exclusive_lock;
mock_image_ctx.exclusive_lock = &mock_exclusive_lock;
InSequence seq;
expect_set_journal_policy(mock_image_ctx);
expect_open(mock_image_ctx, 0);
expect_acquire_lock(mock_image_ctx, 0);
expect_get_snap_namespace(mock_image_ctx, 1,
cls::rbd::UserSnapshotNamespace{}, 0);
expect_get_snap_name(mock_image_ctx, 1, "snap1", 0);
expect_is_snap_protected(mock_image_ctx, 1, true, 0);
expect_start_op(mock_image_ctx, true);
expect_snap_unprotect(mock_image_ctx, cls::rbd::UserSnapshotNamespace{},
"snap1", -EPERM);
expect_close(mock_image_ctx, -EINVAL);
C_SaferCond ctx;
auto req = MockSnapshotPurgeRequest::create(m_local_io_ctx, mock_image_ctx.id,
&ctx);
req->send();
ASSERT_EQ(-EPERM, ctx.wait());
}
TEST_F(TestMockImageDeleterSnapshotPurgeRequest, SnapRemoveError) {
{
std::unique_lock image_locker{m_local_image_ctx->image_lock};
m_local_image_ctx->add_snap(cls::rbd::UserSnapshotNamespace{}, "snap1", 1,
0, {}, RBD_PROTECTION_STATUS_UNPROTECTED, 0,
{});
}
librbd::MockTestImageCtx mock_image_ctx(*m_local_image_ctx);
librbd::MockExclusiveLock mock_exclusive_lock;
mock_image_ctx.exclusive_lock = &mock_exclusive_lock;
InSequence seq;
expect_set_journal_policy(mock_image_ctx);
expect_open(mock_image_ctx, 0);
expect_acquire_lock(mock_image_ctx, 0);
expect_get_snap_namespace(mock_image_ctx, 1,
cls::rbd::UserSnapshotNamespace{}, 0);
expect_get_snap_name(mock_image_ctx, 1, "snap1", 0);
expect_is_snap_protected(mock_image_ctx, 1, false, 0);
expect_start_op(mock_image_ctx, true);
expect_snap_remove(mock_image_ctx, cls::rbd::UserSnapshotNamespace{}, "snap1",
-EINVAL);
expect_close(mock_image_ctx, -EPERM);
C_SaferCond ctx;
auto req = MockSnapshotPurgeRequest::create(m_local_io_ctx, mock_image_ctx.id,
&ctx);
req->send();
ASSERT_EQ(-EINVAL, ctx.wait());
}
TEST_F(TestMockImageDeleterSnapshotPurgeRequest, CloseError) {
{
std::unique_lock image_locker{m_local_image_ctx->image_lock};
m_local_image_ctx->add_snap(cls::rbd::UserSnapshotNamespace{}, "snap1", 1,
0, {}, RBD_PROTECTION_STATUS_UNPROTECTED, 0,
{});
}
librbd::MockTestImageCtx mock_image_ctx(*m_local_image_ctx);
librbd::MockExclusiveLock mock_exclusive_lock;
mock_image_ctx.exclusive_lock = &mock_exclusive_lock;
InSequence seq;
expect_set_journal_policy(mock_image_ctx);
expect_open(mock_image_ctx, 0);
expect_acquire_lock(mock_image_ctx, 0);
expect_get_snap_namespace(mock_image_ctx, 1,
cls::rbd::UserSnapshotNamespace{}, 0);
expect_get_snap_name(mock_image_ctx, 1, "snap1", 0);
expect_is_snap_protected(mock_image_ctx, 1, false, 0);
expect_start_op(mock_image_ctx, true);
expect_snap_remove(mock_image_ctx, cls::rbd::UserSnapshotNamespace{}, "snap1",
0);
expect_close(mock_image_ctx, -EINVAL);
C_SaferCond ctx;
auto req = MockSnapshotPurgeRequest::create(m_local_io_ctx, mock_image_ctx.id,
&ctx);
req->send();
ASSERT_EQ(-EINVAL, ctx.wait());
}
} // namespace image_deleter
} // namespace mirror
} // namespace rbd
| 16,334 | 36.900232 | 88 |
cc
|
null |
ceph-main/src/test/rbd_mirror/image_deleter/test_mock_TrashMoveRequest.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "test/rbd_mirror/test_mock_fixture.h"
#include "librbd/ExclusiveLock.h"
#include "librbd/ImageCtx.h"
#include "librbd/ImageState.h"
#include "librbd/Operations.h"
#include "librbd/TrashWatcher.h"
#include "librbd/journal/ResetRequest.h"
#include "librbd/mirror/GetInfoRequest.h"
#include "librbd/mirror/ImageRemoveRequest.h"
#include "librbd/trash/MoveRequest.h"
#include "tools/rbd_mirror/Threads.h"
#include "tools/rbd_mirror/image_deleter/TrashMoveRequest.h"
#include "test/librados_test_stub/MockTestMemIoCtxImpl.h"
#include "test/librbd/mock/MockExclusiveLock.h"
#include "test/librbd/mock/MockImageCtx.h"
#include "test/librbd/mock/MockImageState.h"
#include "test/librbd/mock/MockOperations.h"
namespace librbd {
namespace {
struct MockTestImageCtx : public librbd::MockImageCtx {
static MockTestImageCtx *s_instance;
static MockTestImageCtx *create(const std::string &image_name,
const std::string &image_id,
const char *snap, librados::IoCtx& p,
bool read_only) {
ceph_assert(s_instance != nullptr);
return s_instance;
}
MockTestImageCtx(librbd::ImageCtx &image_ctx)
: librbd::MockImageCtx(image_ctx) {
s_instance = this;
}
};
MockTestImageCtx *MockTestImageCtx::s_instance = nullptr;
} // anonymous namespace
template<>
struct TrashWatcher<MockTestImageCtx> {
static TrashWatcher* s_instance;
static void notify_image_added(librados::IoCtx&, const std::string& image_id,
const cls::rbd::TrashImageSpec& spec,
Context *ctx) {
ceph_assert(s_instance != nullptr);
s_instance->notify_image_added(image_id, spec, ctx);
}
MOCK_METHOD3(notify_image_added, void(const std::string&,
const cls::rbd::TrashImageSpec&,
Context*));
TrashWatcher() {
s_instance = this;
}
};
TrashWatcher<MockTestImageCtx>* TrashWatcher<MockTestImageCtx>::s_instance = nullptr;
namespace journal {
template <>
struct ResetRequest<MockTestImageCtx> {
static ResetRequest* s_instance;
Context* on_finish = nullptr;
static ResetRequest* create(librados::IoCtx &io_ctx,
const std::string &image_id,
const std::string &client_id,
const std::string &mirror_uuid,
ContextWQ *op_work_queue,
Context *on_finish) {
ceph_assert(s_instance != nullptr);
EXPECT_EQ(librbd::Journal<>::LOCAL_MIRROR_UUID, mirror_uuid);
s_instance->on_finish = on_finish;
return s_instance;
}
MOCK_METHOD0(send, void());
ResetRequest() {
s_instance = this;
}
};
ResetRequest<MockTestImageCtx>* ResetRequest<MockTestImageCtx>::s_instance = nullptr;
} // namespace journal
namespace mirror {
template<>
struct GetInfoRequest<librbd::MockTestImageCtx> {
static GetInfoRequest* s_instance;
cls::rbd::MirrorImage *mirror_image;
PromotionState *promotion_state;
std::string *primary_mirror_uuid;
Context *on_finish = nullptr;
static GetInfoRequest* create(librados::IoCtx& io_ctx,
librbd::asio::ContextWQ* context_wq,
const std::string& image_id,
cls::rbd::MirrorImage *mirror_image,
PromotionState *promotion_state,
std::string* primary_mirror_uuid,
Context *on_finish) {
ceph_assert(s_instance != nullptr);
s_instance->mirror_image = mirror_image;
s_instance->promotion_state = promotion_state;
s_instance->primary_mirror_uuid = primary_mirror_uuid;
s_instance->on_finish = on_finish;
return s_instance;
}
GetInfoRequest() {
ceph_assert(s_instance == nullptr);
s_instance = this;
}
~GetInfoRequest() {
s_instance = nullptr;
}
MOCK_METHOD0(send, void());
};
GetInfoRequest<librbd::MockTestImageCtx>* GetInfoRequest<librbd::MockTestImageCtx>::s_instance = nullptr;
template<>
struct ImageRemoveRequest<librbd::MockTestImageCtx> {
static ImageRemoveRequest* s_instance;
std::string global_image_id;
std::string image_id;
Context* on_finish;
static ImageRemoveRequest *create(librados::IoCtx& io_ctx,
const std::string& global_image_id,
const std::string& image_id,
Context* on_finish) {
ceph_assert(s_instance != nullptr);
s_instance->global_image_id = global_image_id;
s_instance->image_id = image_id;
s_instance->on_finish = on_finish;
return s_instance;
}
ImageRemoveRequest() {
ceph_assert(s_instance == nullptr);
s_instance = this;
}
~ImageRemoveRequest() {
s_instance = nullptr;
}
MOCK_METHOD0(send, void());
};
ImageRemoveRequest<librbd::MockTestImageCtx>* ImageRemoveRequest<librbd::MockTestImageCtx>::s_instance = nullptr;
} // namespace mirror
namespace trash {
template <>
struct MoveRequest<MockTestImageCtx> {
static MoveRequest* s_instance;
Context* on_finish = nullptr;
typedef boost::optional<utime_t> DefermentEndTime;
static MoveRequest* create(librados::IoCtx& io_ctx,
const std::string& image_id,
const cls::rbd::TrashImageSpec& trash_image_spec,
Context* on_finish) {
ceph_assert(s_instance != nullptr);
s_instance->construct(image_id, trash_image_spec);
s_instance->on_finish = on_finish;
return s_instance;
}
MOCK_METHOD2(construct, void(const std::string&,
const cls::rbd::TrashImageSpec&));
MOCK_METHOD0(send, void());
MoveRequest() {
s_instance = this;
}
};
MoveRequest<MockTestImageCtx>* MoveRequest<MockTestImageCtx>::s_instance = nullptr;
} // namespace trash
} // namespace librbd
#include "tools/rbd_mirror/image_deleter/TrashMoveRequest.cc"
namespace rbd {
namespace mirror {
namespace image_deleter {
using ::testing::_;
using ::testing::DoAll;
using ::testing::InSequence;
using ::testing::Invoke;
using ::testing::Return;
using ::testing::StrEq;
using ::testing::WithArg;
using ::testing::WithArgs;
class TestMockImageDeleterTrashMoveRequest : public TestMockFixture {
public:
typedef TrashMoveRequest<librbd::MockTestImageCtx> MockTrashMoveRequest;
typedef librbd::journal::ResetRequest<librbd::MockTestImageCtx> MockJournalResetRequest;
typedef librbd::mirror::GetInfoRequest<librbd::MockTestImageCtx> MockGetMirrorInfoRequest;
typedef librbd::mirror::ImageRemoveRequest<librbd::MockTestImageCtx> MockImageRemoveRequest;
typedef librbd::trash::MoveRequest<librbd::MockTestImageCtx> MockLibrbdTrashMoveRequest;
typedef librbd::TrashWatcher<librbd::MockTestImageCtx> MockTrashWatcher;
void SetUp() override {
TestMockFixture::SetUp();
librbd::RBD rbd;
ASSERT_EQ(0, create_image(rbd, m_local_io_ctx, m_image_name, m_image_size));
ASSERT_EQ(0, open_image(m_local_io_ctx, m_image_name, &m_local_image_ctx));
}
void expect_mirror_image_get_image_id(const std::string& image_id, int r) {
bufferlist bl;
encode(image_id, bl);
EXPECT_CALL(get_mock_io_ctx(m_local_io_ctx),
exec(RBD_MIRRORING, _, StrEq("rbd"),
StrEq("mirror_image_get_image_id"), _, _, _, _))
.WillOnce(DoAll(WithArg<5>(Invoke([bl](bufferlist *out_bl) {
*out_bl = bl;
})),
Return(r)));
}
void expect_get_mirror_info(
MockGetMirrorInfoRequest &mock_get_mirror_info_request,
const cls::rbd::MirrorImage &mirror_image,
librbd::mirror::PromotionState promotion_state,
const std::string& primary_mirror_uuid, int r) {
EXPECT_CALL(mock_get_mirror_info_request, send())
.WillOnce(Invoke([this, &mock_get_mirror_info_request, mirror_image,
promotion_state, primary_mirror_uuid, r]() {
*mock_get_mirror_info_request.mirror_image = mirror_image;
*mock_get_mirror_info_request.promotion_state = promotion_state;
*mock_get_mirror_info_request.primary_mirror_uuid =
primary_mirror_uuid;
m_threads->work_queue->queue(
mock_get_mirror_info_request.on_finish, r);
}));
}
void expect_set_journal_policy(librbd::MockTestImageCtx &mock_image_ctx) {
EXPECT_CALL(mock_image_ctx, set_journal_policy(_))
.WillOnce(Invoke([](librbd::journal::Policy* policy) {
delete policy;
}));
}
void expect_open(librbd::MockTestImageCtx &mock_image_ctx, int r) {
EXPECT_CALL(*mock_image_ctx.state, open(true, _))
.WillOnce(WithArg<1>(Invoke([this, &mock_image_ctx, r](Context* ctx) {
EXPECT_EQ(0U, mock_image_ctx.read_only_mask &
librbd::IMAGE_READ_ONLY_FLAG_NON_PRIMARY);
m_threads->work_queue->queue(ctx, r);
})));
}
void expect_close(librbd::MockTestImageCtx &mock_image_ctx, int r) {
EXPECT_CALL(*mock_image_ctx.state, close(_))
.WillOnce(Invoke([this, r](Context* ctx) {
m_threads->work_queue->queue(ctx, r);
}));
}
void expect_block_requests(librbd::MockTestImageCtx &mock_image_ctx) {
EXPECT_CALL(*mock_image_ctx.exclusive_lock, block_requests(0)).Times(1);
}
void expect_acquire_lock(librbd::MockTestImageCtx &mock_image_ctx, int r) {
EXPECT_CALL(*mock_image_ctx.exclusive_lock, acquire_lock(_))
.WillOnce(Invoke([this, r](Context* ctx) {
m_threads->work_queue->queue(ctx, r);
}));
}
void expect_mirror_image_set(const std::string& image_id,
const cls::rbd::MirrorImage& mirror_image,
int r) {
bufferlist bl;
encode(image_id, bl);
encode(mirror_image, bl);
EXPECT_CALL(get_mock_io_ctx(m_local_io_ctx),
exec(RBD_MIRRORING, _, StrEq("rbd"),
StrEq("mirror_image_set"), ContentsEqual(bl), _, _, _))
.WillOnce(Return(r));
}
void expect_mirror_image_remove_request(
MockImageRemoveRequest& mock_image_remove_request, int r) {
EXPECT_CALL(mock_image_remove_request, send())
.WillOnce(Invoke([this, &mock_image_remove_request, r]() {
m_threads->work_queue->queue(mock_image_remove_request.on_finish, r);
}));
}
void expect_journal_reset(MockJournalResetRequest& mock_journal_reset_request,
int r) {
EXPECT_CALL(mock_journal_reset_request, send())
.WillOnce(Invoke([this, &mock_journal_reset_request, r]() {
m_threads->work_queue->queue(mock_journal_reset_request.on_finish, r);
}));
}
void expect_trash_move(MockLibrbdTrashMoveRequest& mock_trash_move_request,
const std::string& image_name,
const std::string& image_id,
const boost::optional<uint32_t>& delay, int r) {
EXPECT_CALL(mock_trash_move_request, construct(image_id, _))
.WillOnce(WithArg<1>(Invoke([image_name, delay](const cls::rbd::TrashImageSpec& spec) {
ASSERT_EQ(cls::rbd::TRASH_IMAGE_SOURCE_MIRRORING,
spec.source);
ASSERT_EQ(image_name, spec.name);
if (delay) {
utime_t time{spec.deletion_time};
time += *delay;
ASSERT_TRUE(time == spec.deferment_end_time);
} else {
ASSERT_EQ(spec.deletion_time, spec.deferment_end_time);
}
})));
EXPECT_CALL(mock_trash_move_request, send())
.WillOnce(Invoke([this, &mock_trash_move_request, r]() {
m_threads->work_queue->queue(mock_trash_move_request.on_finish, r);
}));
}
void expect_notify_image_added(MockTrashWatcher& mock_trash_watcher,
const std::string& image_id) {
EXPECT_CALL(mock_trash_watcher, notify_image_added(image_id, _, _))
.WillOnce(WithArg<2>(Invoke([this](Context *ctx) {
m_threads->work_queue->queue(ctx, 0);
})));
}
librbd::ImageCtx *m_local_image_ctx;
};
TEST_F(TestMockImageDeleterTrashMoveRequest, SuccessJournal) {
librbd::MockTestImageCtx mock_image_ctx(*m_local_image_ctx);
librbd::MockExclusiveLock mock_exclusive_lock;
mock_image_ctx.exclusive_lock = &mock_exclusive_lock;
InSequence seq;
expect_mirror_image_get_image_id("image id", 0);
MockGetMirrorInfoRequest mock_get_mirror_info_request;
expect_get_mirror_info(mock_get_mirror_info_request,
{cls::rbd::MIRROR_IMAGE_MODE_JOURNAL,
"global image id",
cls::rbd::MIRROR_IMAGE_STATE_ENABLED},
librbd::mirror::PROMOTION_STATE_ORPHAN,
"remote mirror uuid", 0);
expect_mirror_image_set("image id",
{cls::rbd::MIRROR_IMAGE_MODE_JOURNAL,
"global image id",
cls::rbd::MIRROR_IMAGE_STATE_DISABLING}, 0);
expect_set_journal_policy(mock_image_ctx);
expect_open(mock_image_ctx, 0);
MockJournalResetRequest mock_journal_reset_request;
expect_journal_reset(mock_journal_reset_request, 0);
expect_block_requests(mock_image_ctx);
expect_acquire_lock(mock_image_ctx, 0);
MockLibrbdTrashMoveRequest mock_librbd_trash_move_request;
expect_trash_move(mock_librbd_trash_move_request, m_image_name, "image id",
{}, 0);
MockImageRemoveRequest mock_image_remove_request;
expect_mirror_image_remove_request(mock_image_remove_request, 0);
expect_close(mock_image_ctx, 0);
MockTrashWatcher mock_trash_watcher;
expect_notify_image_added(mock_trash_watcher, "image id");
C_SaferCond ctx;
auto req = MockTrashMoveRequest::create(m_local_io_ctx, "global image id",
true,
m_local_image_ctx->op_work_queue,
&ctx);
req->send();
ASSERT_EQ(0, ctx.wait());
}
TEST_F(TestMockImageDeleterTrashMoveRequest, SuccessSnapshot) {
librbd::MockTestImageCtx mock_image_ctx(*m_local_image_ctx);
InSequence seq;
expect_mirror_image_get_image_id("image id", 0);
MockGetMirrorInfoRequest mock_get_mirror_info_request;
expect_get_mirror_info(mock_get_mirror_info_request,
{cls::rbd::MIRROR_IMAGE_MODE_SNAPSHOT,
"global image id",
cls::rbd::MIRROR_IMAGE_STATE_ENABLED},
librbd::mirror::PROMOTION_STATE_NON_PRIMARY,
"remote mirror uuid", 0);
expect_mirror_image_set("image id",
{cls::rbd::MIRROR_IMAGE_MODE_SNAPSHOT,
"global image id",
cls::rbd::MIRROR_IMAGE_STATE_DISABLING}, 0);
expect_set_journal_policy(mock_image_ctx);
expect_open(mock_image_ctx, 0);
MockLibrbdTrashMoveRequest mock_librbd_trash_move_request;
expect_trash_move(mock_librbd_trash_move_request, m_image_name, "image id",
{}, 0);
MockImageRemoveRequest mock_image_remove_request;
expect_mirror_image_remove_request(mock_image_remove_request, 0);
expect_close(mock_image_ctx, 0);
MockTrashWatcher mock_trash_watcher;
expect_notify_image_added(mock_trash_watcher, "image id");
C_SaferCond ctx;
auto req = MockTrashMoveRequest::create(m_local_io_ctx, "global image id",
false,
m_local_image_ctx->op_work_queue,
&ctx);
req->send();
ASSERT_EQ(0, ctx.wait());
}
TEST_F(TestMockImageDeleterTrashMoveRequest, GetImageIdDNE) {
InSequence seq;
expect_mirror_image_get_image_id("image id", -ENOENT);
C_SaferCond ctx;
auto req = MockTrashMoveRequest::create(m_local_io_ctx, "global image id",
true,
m_local_image_ctx->op_work_queue,
&ctx);
req->send();
ASSERT_EQ(-ENOENT, ctx.wait());
}
TEST_F(TestMockImageDeleterTrashMoveRequest, GetImageIdError) {
InSequence seq;
expect_mirror_image_get_image_id("image id", -EINVAL);
C_SaferCond ctx;
auto req = MockTrashMoveRequest::create(m_local_io_ctx, "global image id",
true,
m_local_image_ctx->op_work_queue,
&ctx);
req->send();
ASSERT_EQ(-EINVAL, ctx.wait());
}
TEST_F(TestMockImageDeleterTrashMoveRequest, GetMirrorInfoLocalPrimary) {
InSequence seq;
expect_mirror_image_get_image_id("image id", 0);
MockGetMirrorInfoRequest mock_get_mirror_info_request;
expect_get_mirror_info(mock_get_mirror_info_request,
{cls::rbd::MIRROR_IMAGE_MODE_JOURNAL,
"global image id",
cls::rbd::MIRROR_IMAGE_STATE_ENABLED},
librbd::mirror::PROMOTION_STATE_PRIMARY,
"remote mirror uuid", 0);
C_SaferCond ctx;
auto req = MockTrashMoveRequest::create(m_local_io_ctx, "global image id",
true,
m_local_image_ctx->op_work_queue,
&ctx);
req->send();
ASSERT_EQ(-EPERM, ctx.wait());
}
TEST_F(TestMockImageDeleterTrashMoveRequest, GetMirrorInfoOrphan) {
InSequence seq;
expect_mirror_image_get_image_id("image id", 0);
MockGetMirrorInfoRequest mock_get_mirror_info_request;
expect_get_mirror_info(mock_get_mirror_info_request,
{cls::rbd::MIRROR_IMAGE_MODE_JOURNAL,
"global image id",
cls::rbd::MIRROR_IMAGE_STATE_ENABLED},
librbd::mirror::PROMOTION_STATE_ORPHAN,
"remote mirror uuid", 0);
C_SaferCond ctx;
auto req = MockTrashMoveRequest::create(m_local_io_ctx, "global image id",
false,
m_local_image_ctx->op_work_queue,
&ctx);
req->send();
ASSERT_EQ(-EPERM, ctx.wait());
}
TEST_F(TestMockImageDeleterTrashMoveRequest, GetMirrorInfoDNE) {
librbd::MockTestImageCtx mock_image_ctx(*m_local_image_ctx);
librbd::MockExclusiveLock mock_exclusive_lock;
mock_image_ctx.exclusive_lock = &mock_exclusive_lock;
InSequence seq;
expect_mirror_image_get_image_id("image id", 0);
MockGetMirrorInfoRequest mock_get_mirror_info_request;
expect_get_mirror_info(mock_get_mirror_info_request,
{cls::rbd::MIRROR_IMAGE_MODE_JOURNAL,
"global image id",
cls::rbd::MIRROR_IMAGE_STATE_ENABLED},
librbd::mirror::PROMOTION_STATE_ORPHAN,
"remote mirror uuid", -ENOENT);
C_SaferCond ctx;
auto req = MockTrashMoveRequest::create(m_local_io_ctx, "global image id",
true,
m_local_image_ctx->op_work_queue,
&ctx);
req->send();
ASSERT_EQ(-ENOENT, ctx.wait());
}
TEST_F(TestMockImageDeleterTrashMoveRequest, GetMirrorInfoError) {
InSequence seq;
expect_mirror_image_get_image_id("image id", 0);
MockGetMirrorInfoRequest mock_get_mirror_info_request;
expect_get_mirror_info(mock_get_mirror_info_request,
{cls::rbd::MIRROR_IMAGE_MODE_JOURNAL,
"global image id",
cls::rbd::MIRROR_IMAGE_STATE_ENABLED},
librbd::mirror::PROMOTION_STATE_ORPHAN,
"remote mirror uuid", -EINVAL);
C_SaferCond ctx;
auto req = MockTrashMoveRequest::create(m_local_io_ctx, "global image id",
true,
m_local_image_ctx->op_work_queue,
&ctx);
req->send();
ASSERT_EQ(-EINVAL, ctx.wait());
}
TEST_F(TestMockImageDeleterTrashMoveRequest, DisableMirrorImageError) {
InSequence seq;
expect_mirror_image_get_image_id("image id", 0);
MockGetMirrorInfoRequest mock_get_mirror_info_request;
expect_get_mirror_info(mock_get_mirror_info_request,
{cls::rbd::MIRROR_IMAGE_MODE_JOURNAL,
"global image id",
cls::rbd::MIRROR_IMAGE_STATE_ENABLED},
librbd::mirror::PROMOTION_STATE_ORPHAN,
"remote mirror uuid", 0);
expect_mirror_image_set("image id",
{cls::rbd::MIRROR_IMAGE_MODE_JOURNAL,
"global image id",
cls::rbd::MIRROR_IMAGE_STATE_DISABLING}, -EINVAL);
C_SaferCond ctx;
auto req = MockTrashMoveRequest::create(m_local_io_ctx, "global image id",
true,
m_local_image_ctx->op_work_queue,
&ctx);
req->send();
ASSERT_EQ(-EINVAL, ctx.wait());
}
TEST_F(TestMockImageDeleterTrashMoveRequest, OpenImageError) {
librbd::MockTestImageCtx mock_image_ctx(*m_local_image_ctx);
librbd::MockExclusiveLock mock_exclusive_lock;
mock_image_ctx.exclusive_lock = &mock_exclusive_lock;
InSequence seq;
expect_mirror_image_get_image_id("image id", 0);
MockGetMirrorInfoRequest mock_get_mirror_info_request;
expect_get_mirror_info(mock_get_mirror_info_request,
{cls::rbd::MIRROR_IMAGE_MODE_JOURNAL,
"global image id",
cls::rbd::MIRROR_IMAGE_STATE_ENABLED},
librbd::mirror::PROMOTION_STATE_ORPHAN,
"remote mirror uuid", 0);
expect_mirror_image_set("image id",
{cls::rbd::MIRROR_IMAGE_MODE_JOURNAL,
"global image id",
cls::rbd::MIRROR_IMAGE_STATE_DISABLING}, 0);
expect_set_journal_policy(mock_image_ctx);
expect_open(mock_image_ctx, -EINVAL);
C_SaferCond ctx;
auto req = MockTrashMoveRequest::create(m_local_io_ctx, "global image id",
true,
m_local_image_ctx->op_work_queue,
&ctx);
req->send();
ASSERT_EQ(-EINVAL, ctx.wait());
}
TEST_F(TestMockImageDeleterTrashMoveRequest, ResetJournalError) {
librbd::MockTestImageCtx mock_image_ctx(*m_local_image_ctx);
librbd::MockExclusiveLock mock_exclusive_lock;
mock_image_ctx.exclusive_lock = &mock_exclusive_lock;
InSequence seq;
expect_mirror_image_get_image_id("image id", 0);
MockGetMirrorInfoRequest mock_get_mirror_info_request;
expect_get_mirror_info(mock_get_mirror_info_request,
{cls::rbd::MIRROR_IMAGE_MODE_JOURNAL,
"global image id",
cls::rbd::MIRROR_IMAGE_STATE_ENABLED},
librbd::mirror::PROMOTION_STATE_ORPHAN,
"remote mirror uuid", 0);
expect_mirror_image_set("image id",
{cls::rbd::MIRROR_IMAGE_MODE_JOURNAL,
"global image id",
cls::rbd::MIRROR_IMAGE_STATE_DISABLING}, 0);
expect_set_journal_policy(mock_image_ctx);
expect_open(mock_image_ctx, 0);
MockJournalResetRequest mock_journal_reset_request;
expect_journal_reset(mock_journal_reset_request, -EINVAL);
expect_close(mock_image_ctx, 0);
C_SaferCond ctx;
auto req = MockTrashMoveRequest::create(m_local_io_ctx, "global image id",
true,
m_local_image_ctx->op_work_queue,
&ctx);
req->send();
ASSERT_EQ(-EINVAL, ctx.wait());
}
TEST_F(TestMockImageDeleterTrashMoveRequest, AcquireLockError) {
librbd::MockTestImageCtx mock_image_ctx(*m_local_image_ctx);
librbd::MockExclusiveLock mock_exclusive_lock;
mock_image_ctx.exclusive_lock = &mock_exclusive_lock;
InSequence seq;
expect_mirror_image_get_image_id("image id", 0);
MockGetMirrorInfoRequest mock_get_mirror_info_request;
expect_get_mirror_info(mock_get_mirror_info_request,
{cls::rbd::MIRROR_IMAGE_MODE_JOURNAL,
"global image id",
cls::rbd::MIRROR_IMAGE_STATE_ENABLED},
librbd::mirror::PROMOTION_STATE_ORPHAN,
"remote mirror uuid", 0);
expect_mirror_image_set("image id",
{cls::rbd::MIRROR_IMAGE_MODE_JOURNAL,
"global image id",
cls::rbd::MIRROR_IMAGE_STATE_DISABLING}, 0);
expect_set_journal_policy(mock_image_ctx);
expect_open(mock_image_ctx, 0);
MockJournalResetRequest mock_journal_reset_request;
expect_journal_reset(mock_journal_reset_request, 0);
expect_block_requests(mock_image_ctx);
expect_acquire_lock(mock_image_ctx, -EINVAL);
expect_close(mock_image_ctx, 0);
C_SaferCond ctx;
auto req = MockTrashMoveRequest::create(m_local_io_ctx, "global image id",
true,
m_local_image_ctx->op_work_queue,
&ctx);
req->send();
ASSERT_EQ(-EINVAL, ctx.wait());
}
TEST_F(TestMockImageDeleterTrashMoveRequest, TrashMoveError) {
librbd::MockTestImageCtx mock_image_ctx(*m_local_image_ctx);
librbd::MockExclusiveLock mock_exclusive_lock;
mock_image_ctx.exclusive_lock = &mock_exclusive_lock;
InSequence seq;
expect_mirror_image_get_image_id("image id", 0);
MockGetMirrorInfoRequest mock_get_mirror_info_request;
expect_get_mirror_info(mock_get_mirror_info_request,
{cls::rbd::MIRROR_IMAGE_MODE_JOURNAL,
"global image id",
cls::rbd::MIRROR_IMAGE_STATE_ENABLED},
librbd::mirror::PROMOTION_STATE_ORPHAN,
"remote mirror uuid", 0);
expect_mirror_image_set("image id",
{cls::rbd::MIRROR_IMAGE_MODE_JOURNAL,
"global image id",
cls::rbd::MIRROR_IMAGE_STATE_DISABLING}, 0);
expect_set_journal_policy(mock_image_ctx);
expect_open(mock_image_ctx, 0);
MockJournalResetRequest mock_journal_reset_request;
expect_journal_reset(mock_journal_reset_request, 0);
expect_block_requests(mock_image_ctx);
expect_acquire_lock(mock_image_ctx, 0);
MockLibrbdTrashMoveRequest mock_librbd_trash_move_request;
expect_trash_move(mock_librbd_trash_move_request, m_image_name, "image id",
{}, -EINVAL);
expect_close(mock_image_ctx, 0);
C_SaferCond ctx;
auto req = MockTrashMoveRequest::create(m_local_io_ctx, "global image id",
true,
m_local_image_ctx->op_work_queue,
&ctx);
req->send();
ASSERT_EQ(-EINVAL, ctx.wait());
}
TEST_F(TestMockImageDeleterTrashMoveRequest, RemoveMirrorImageError) {
librbd::MockTestImageCtx mock_image_ctx(*m_local_image_ctx);
librbd::MockExclusiveLock mock_exclusive_lock;
mock_image_ctx.exclusive_lock = &mock_exclusive_lock;
InSequence seq;
expect_mirror_image_get_image_id("image id", 0);
MockGetMirrorInfoRequest mock_get_mirror_info_request;
expect_get_mirror_info(mock_get_mirror_info_request,
{cls::rbd::MIRROR_IMAGE_MODE_JOURNAL,
"global image id",
cls::rbd::MIRROR_IMAGE_STATE_ENABLED},
librbd::mirror::PROMOTION_STATE_ORPHAN,
"remote mirror uuid", 0);
expect_mirror_image_set("image id",
{cls::rbd::MIRROR_IMAGE_MODE_JOURNAL,
"global image id",
cls::rbd::MIRROR_IMAGE_STATE_DISABLING}, 0);
expect_set_journal_policy(mock_image_ctx);
expect_open(mock_image_ctx, 0);
MockJournalResetRequest mock_journal_reset_request;
expect_journal_reset(mock_journal_reset_request, 0);
expect_block_requests(mock_image_ctx);
expect_acquire_lock(mock_image_ctx, 0);
MockLibrbdTrashMoveRequest mock_librbd_trash_move_request;
expect_trash_move(mock_librbd_trash_move_request, m_image_name, "image id",
{}, 0);
MockImageRemoveRequest mock_image_remove_request;
expect_mirror_image_remove_request(mock_image_remove_request, -EINVAL);
expect_close(mock_image_ctx, 0);
MockTrashWatcher mock_trash_watcher;
expect_notify_image_added(mock_trash_watcher, "image id");
C_SaferCond ctx;
auto req = MockTrashMoveRequest::create(m_local_io_ctx, "global image id",
true,
m_local_image_ctx->op_work_queue,
&ctx);
req->send();
ASSERT_EQ(-EINVAL, ctx.wait());
}
TEST_F(TestMockImageDeleterTrashMoveRequest, CloseImageError) {
librbd::MockTestImageCtx mock_image_ctx(*m_local_image_ctx);
librbd::MockExclusiveLock mock_exclusive_lock;
mock_image_ctx.exclusive_lock = &mock_exclusive_lock;
InSequence seq;
expect_mirror_image_get_image_id("image id", 0);
MockGetMirrorInfoRequest mock_get_mirror_info_request;
expect_get_mirror_info(mock_get_mirror_info_request,
{cls::rbd::MIRROR_IMAGE_MODE_JOURNAL,
"global image id",
cls::rbd::MIRROR_IMAGE_STATE_ENABLED},
librbd::mirror::PROMOTION_STATE_ORPHAN,
"remote mirror uuid", 0);
expect_mirror_image_set("image id",
{cls::rbd::MIRROR_IMAGE_MODE_JOURNAL,
"global image id",
cls::rbd::MIRROR_IMAGE_STATE_DISABLING}, 0);
expect_set_journal_policy(mock_image_ctx);
expect_open(mock_image_ctx, 0);
MockJournalResetRequest mock_journal_reset_request;
expect_journal_reset(mock_journal_reset_request, 0);
expect_block_requests(mock_image_ctx);
expect_acquire_lock(mock_image_ctx, 0);
MockLibrbdTrashMoveRequest mock_librbd_trash_move_request;
expect_trash_move(mock_librbd_trash_move_request, m_image_name, "image id",
{}, 0);
MockImageRemoveRequest mock_image_remove_request;
expect_mirror_image_remove_request(mock_image_remove_request, 0);
expect_close(mock_image_ctx, -EINVAL);
MockTrashWatcher mock_trash_watcher;
expect_notify_image_added(mock_trash_watcher, "image id");
C_SaferCond ctx;
auto req = MockTrashMoveRequest::create(m_local_io_ctx, "global image id",
true,
m_local_image_ctx->op_work_queue,
&ctx);
req->send();
ASSERT_EQ(0, ctx.wait());
}
TEST_F(TestMockImageDeleterTrashMoveRequest, DelayedDelation) {
librbd::MockTestImageCtx mock_image_ctx(*m_local_image_ctx);
librbd::MockExclusiveLock mock_exclusive_lock;
mock_image_ctx.config.set_val("rbd_mirroring_delete_delay", "600");
mock_image_ctx.exclusive_lock = &mock_exclusive_lock;
InSequence seq;
expect_mirror_image_get_image_id("image id", 0);
MockGetMirrorInfoRequest mock_get_mirror_info_request;
expect_get_mirror_info(mock_get_mirror_info_request,
{cls::rbd::MIRROR_IMAGE_MODE_JOURNAL,
"global image id",
cls::rbd::MIRROR_IMAGE_STATE_ENABLED},
librbd::mirror::PROMOTION_STATE_NON_PRIMARY,
"remote mirror uuid", 0);
expect_mirror_image_set("image id",
{cls::rbd::MIRROR_IMAGE_MODE_JOURNAL,
"global image id",
cls::rbd::MIRROR_IMAGE_STATE_DISABLING}, 0);
expect_set_journal_policy(mock_image_ctx);
expect_open(mock_image_ctx, 0);
MockJournalResetRequest mock_journal_reset_request;
expect_journal_reset(mock_journal_reset_request, 0);
expect_block_requests(mock_image_ctx);
expect_acquire_lock(mock_image_ctx, 0);
MockLibrbdTrashMoveRequest mock_librbd_trash_move_request;
expect_trash_move(mock_librbd_trash_move_request, m_image_name, "image id",
600, 0);
MockImageRemoveRequest mock_image_remove_request;
expect_mirror_image_remove_request(mock_image_remove_request, 0);
expect_close(mock_image_ctx, 0);
MockTrashWatcher mock_trash_watcher;
expect_notify_image_added(mock_trash_watcher, "image id");
C_SaferCond ctx;
auto req = MockTrashMoveRequest::create(m_local_io_ctx, "global image id",
true,
m_local_image_ctx->op_work_queue,
&ctx);
req->send();
ASSERT_EQ(0, ctx.wait());
}
} // namespace image_deleter
} // namespace mirror
} // namespace rbd
| 34,172 | 36.885809 | 113 |
cc
|
null |
ceph-main/src/test/rbd_mirror/image_deleter/test_mock_TrashRemoveRequest.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "test/rbd_mirror/test_mock_fixture.h"
#include "cls/rbd/cls_rbd_types.h"
#include "librbd/ImageCtx.h"
#include "librbd/TrashWatcher.h"
#include "librbd/Utils.h"
#include "librbd/trash/RemoveRequest.h"
#include "tools/rbd_mirror/Threads.h"
#include "tools/rbd_mirror/image_deleter/SnapshotPurgeRequest.h"
#include "tools/rbd_mirror/image_deleter/TrashRemoveRequest.h"
#include "test/librados_test_stub/MockTestMemIoCtxImpl.h"
#include "test/librbd/mock/MockImageCtx.h"
namespace librbd {
namespace {
struct MockTestImageCtx : public librbd::MockImageCtx {
MockTestImageCtx(librbd::ImageCtx &image_ctx)
: librbd::MockImageCtx(image_ctx) {
}
};
} // anonymous namespace
template<>
struct TrashWatcher<MockTestImageCtx> {
static TrashWatcher* s_instance;
static void notify_image_removed(librados::IoCtx&,
const std::string& image_id, Context *ctx) {
ceph_assert(s_instance != nullptr);
s_instance->notify_image_removed(image_id, ctx);
}
MOCK_METHOD2(notify_image_removed, void(const std::string&, Context*));
TrashWatcher() {
s_instance = this;
}
};
TrashWatcher<MockTestImageCtx>* TrashWatcher<MockTestImageCtx>::s_instance = nullptr;
namespace trash {
template <>
struct RemoveRequest<librbd::MockTestImageCtx> {
static RemoveRequest *s_instance;
Context *on_finish = nullptr;
static RemoveRequest *create(librados::IoCtx &io_ctx,
const std::string &image_id,
librbd::asio::ContextWQ *work_queue,
bool force,
librbd::ProgressContext &progress_ctx,
Context *on_finish) {
ceph_assert(s_instance != nullptr);
EXPECT_TRUE(force);
s_instance->construct(image_id);
s_instance->on_finish = on_finish;
return s_instance;
}
MOCK_METHOD1(construct, void(const std::string&));
MOCK_METHOD0(send, void());
RemoveRequest() {
s_instance = this;
}
};
RemoveRequest<librbd::MockTestImageCtx>* RemoveRequest<librbd::MockTestImageCtx>::s_instance = nullptr;
} // namespace trash
} // namespace librbd
namespace rbd {
namespace mirror {
namespace image_deleter {
template <>
struct SnapshotPurgeRequest<librbd::MockTestImageCtx> {
static SnapshotPurgeRequest *s_instance;
Context *on_finish = nullptr;
static SnapshotPurgeRequest *create(librados::IoCtx &io_ctx,
const std::string &image_id,
Context *on_finish) {
ceph_assert(s_instance != nullptr);
s_instance->construct(image_id);
s_instance->on_finish = on_finish;
return s_instance;
}
MOCK_METHOD1(construct, void(const std::string&));
MOCK_METHOD0(send, void());
SnapshotPurgeRequest() {
s_instance = this;
}
};
SnapshotPurgeRequest<librbd::MockTestImageCtx>* SnapshotPurgeRequest<librbd::MockTestImageCtx>::s_instance = nullptr;
} // namespace image_deleter
} // namespace mirror
} // namespace rbd
#include "tools/rbd_mirror/image_deleter/TrashRemoveRequest.cc"
namespace rbd {
namespace mirror {
namespace image_deleter {
using ::testing::_;
using ::testing::DoAll;
using ::testing::Invoke;
using ::testing::InSequence;
using ::testing::Return;
using ::testing::StrEq;
using ::testing::WithArg;
using ::testing::WithArgs;
class TestMockImageDeleterTrashRemoveRequest : public TestMockFixture {
public:
typedef TrashRemoveRequest<librbd::MockTestImageCtx> MockTrashRemoveRequest;
typedef SnapshotPurgeRequest<librbd::MockTestImageCtx> MockSnapshotPurgeRequest;
typedef librbd::TrashWatcher<librbd::MockTestImageCtx> MockTrashWatcher;
typedef librbd::trash::RemoveRequest<librbd::MockTestImageCtx> MockLibrbdTrashRemoveRequest;
void expect_trash_get(const cls::rbd::TrashImageSpec& trash_spec, int r) {
using ceph::encode;
EXPECT_CALL(get_mock_io_ctx(m_local_io_ctx),
exec(StrEq(RBD_TRASH), _, StrEq("rbd"),
StrEq("trash_get"), _, _, _, _))
.WillOnce(WithArg<5>(Invoke([trash_spec, r](bufferlist* bl) {
encode(trash_spec, *bl);
return r;
})));
}
void expect_trash_state_set(const std::string& image_id, int r) {
bufferlist in_bl;
encode(image_id, in_bl);
encode(cls::rbd::TRASH_IMAGE_STATE_REMOVING, in_bl);
encode(cls::rbd::TRASH_IMAGE_STATE_NORMAL, in_bl);
EXPECT_CALL(get_mock_io_ctx(m_local_io_ctx),
exec(StrEq(RBD_TRASH), _, StrEq("rbd"),
StrEq("trash_state_set"),
ContentsEqual(in_bl), _, _, _))
.WillOnce(Return(r));
}
void expect_get_snapcontext(const std::string& image_id,
const ::SnapContext &snapc, int r) {
bufferlist bl;
encode(snapc, bl);
EXPECT_CALL(get_mock_io_ctx(m_local_io_ctx),
exec(librbd::util::header_name(image_id), _, StrEq("rbd"),
StrEq("get_snapcontext"), _, _, _, _))
.WillOnce(DoAll(WithArg<5>(Invoke([bl](bufferlist *out_bl) {
*out_bl = bl;
})),
Return(r)));
}
void expect_snapshot_purge(MockSnapshotPurgeRequest &snapshot_purge_request,
const std::string &image_id, int r) {
EXPECT_CALL(snapshot_purge_request, construct(image_id));
EXPECT_CALL(snapshot_purge_request, send())
.WillOnce(Invoke([this, &snapshot_purge_request, r]() {
m_threads->work_queue->queue(
snapshot_purge_request.on_finish, r);
}));
}
void expect_image_remove(MockLibrbdTrashRemoveRequest &image_remove_request,
const std::string &image_id, int r) {
EXPECT_CALL(image_remove_request, construct(image_id));
EXPECT_CALL(image_remove_request, send())
.WillOnce(Invoke([this, &image_remove_request, r]() {
m_threads->work_queue->queue(
image_remove_request.on_finish, r);
}));
}
void expect_notify_image_removed(MockTrashWatcher& mock_trash_watcher,
const std::string& image_id) {
EXPECT_CALL(mock_trash_watcher, notify_image_removed(image_id, _))
.WillOnce(WithArg<1>(Invoke([this](Context *ctx) {
m_threads->work_queue->queue(ctx, 0);
})));
}
};
TEST_F(TestMockImageDeleterTrashRemoveRequest, Success) {
InSequence seq;
cls::rbd::TrashImageSpec trash_image_spec{
cls::rbd::TRASH_IMAGE_SOURCE_MIRRORING, "image name", {}, {}};
expect_trash_get(trash_image_spec, 0);
expect_trash_state_set("image id", 0);
expect_get_snapcontext("image id", {1, {1}}, 0);
MockSnapshotPurgeRequest mock_snapshot_purge_request;
expect_snapshot_purge(mock_snapshot_purge_request, "image id", 0);
MockLibrbdTrashRemoveRequest mock_image_remove_request;
expect_image_remove(mock_image_remove_request, "image id", 0);
MockTrashWatcher mock_trash_watcher;
expect_notify_image_removed(mock_trash_watcher, "image id");
C_SaferCond ctx;
ErrorResult error_result;
auto req = MockTrashRemoveRequest::create(m_local_io_ctx, "image id",
&error_result,
m_threads->work_queue, &ctx);
req->send();
ASSERT_EQ(0, ctx.wait());
}
TEST_F(TestMockImageDeleterTrashRemoveRequest, TrashDNE) {
InSequence seq;
cls::rbd::TrashImageSpec trash_image_spec{
cls::rbd::TRASH_IMAGE_SOURCE_MIRRORING, "image name", {}, {}};
expect_trash_get(trash_image_spec, -ENOENT);
C_SaferCond ctx;
ErrorResult error_result;
auto req = MockTrashRemoveRequest::create(m_local_io_ctx, "image id",
&error_result,
m_threads->work_queue, &ctx);
req->send();
ASSERT_EQ(0, ctx.wait());
}
TEST_F(TestMockImageDeleterTrashRemoveRequest, TrashError) {
InSequence seq;
cls::rbd::TrashImageSpec trash_image_spec{
cls::rbd::TRASH_IMAGE_SOURCE_MIRRORING, "image name", {}, {}};
expect_trash_get(trash_image_spec, -EPERM);
C_SaferCond ctx;
ErrorResult error_result;
auto req = MockTrashRemoveRequest::create(m_local_io_ctx, "image id",
&error_result,
m_threads->work_queue, &ctx);
req->send();
ASSERT_EQ(-EPERM, ctx.wait());
}
TEST_F(TestMockImageDeleterTrashRemoveRequest, TrashSourceIncorrect) {
InSequence seq;
cls::rbd::TrashImageSpec trash_image_spec{
cls::rbd::TRASH_IMAGE_SOURCE_USER, "image name", {}, {}};
expect_trash_get(trash_image_spec, 0);
C_SaferCond ctx;
ErrorResult error_result;
auto req = MockTrashRemoveRequest::create(m_local_io_ctx, "image id",
&error_result,
m_threads->work_queue, &ctx);
req->send();
ASSERT_EQ(0, ctx.wait());
}
TEST_F(TestMockImageDeleterTrashRemoveRequest, TrashStateIncorrect) {
InSequence seq;
cls::rbd::TrashImageSpec trash_image_spec{
cls::rbd::TRASH_IMAGE_SOURCE_MIRRORING, "image name", {}, {}};
trash_image_spec.state = cls::rbd::TRASH_IMAGE_STATE_RESTORING;
expect_trash_get(trash_image_spec, 0);
C_SaferCond ctx;
ErrorResult error_result;
auto req = MockTrashRemoveRequest::create(m_local_io_ctx, "image id",
&error_result,
m_threads->work_queue, &ctx);
req->send();
ASSERT_EQ(-EBUSY, ctx.wait());
ASSERT_EQ(ERROR_RESULT_RETRY_IMMEDIATELY, error_result);
}
TEST_F(TestMockImageDeleterTrashRemoveRequest, TrashSetStateDNE) {
InSequence seq;
cls::rbd::TrashImageSpec trash_image_spec{
cls::rbd::TRASH_IMAGE_SOURCE_MIRRORING, "image name", {}, {}};
expect_trash_get(trash_image_spec, 0);
expect_trash_state_set("image id", -ENOENT);
C_SaferCond ctx;
ErrorResult error_result;
auto req = MockTrashRemoveRequest::create(m_local_io_ctx, "image id",
&error_result,
m_threads->work_queue, &ctx);
req->send();
ASSERT_EQ(0, ctx.wait());
}
TEST_F(TestMockImageDeleterTrashRemoveRequest, TrashSetStateError) {
InSequence seq;
cls::rbd::TrashImageSpec trash_image_spec{
cls::rbd::TRASH_IMAGE_SOURCE_MIRRORING, "image name", {}, {}};
expect_trash_get(trash_image_spec, 0);
expect_trash_state_set("image id", -EPERM);
C_SaferCond ctx;
ErrorResult error_result;
auto req = MockTrashRemoveRequest::create(m_local_io_ctx, "image id",
&error_result,
m_threads->work_queue, &ctx);
req->send();
ASSERT_EQ(-EPERM, ctx.wait());
}
TEST_F(TestMockImageDeleterTrashRemoveRequest, GetSnapContextDNE) {
InSequence seq;
cls::rbd::TrashImageSpec trash_image_spec{
cls::rbd::TRASH_IMAGE_SOURCE_MIRRORING, "image name", {}, {}};
expect_trash_get(trash_image_spec, 0);
expect_trash_state_set("image id", 0);
expect_get_snapcontext("image id", {1, {1}}, -ENOENT);
MockLibrbdTrashRemoveRequest mock_image_remove_request;
expect_image_remove(mock_image_remove_request, "image id", 0);
MockTrashWatcher mock_trash_watcher;
expect_notify_image_removed(mock_trash_watcher, "image id");
C_SaferCond ctx;
ErrorResult error_result;
auto req = MockTrashRemoveRequest::create(m_local_io_ctx, "image id",
&error_result,
m_threads->work_queue, &ctx);
req->send();
ASSERT_EQ(0, ctx.wait());
}
TEST_F(TestMockImageDeleterTrashRemoveRequest, GetSnapContextError) {
InSequence seq;
cls::rbd::TrashImageSpec trash_image_spec{
cls::rbd::TRASH_IMAGE_SOURCE_MIRRORING, "image name", {}, {}};
expect_trash_get(trash_image_spec, 0);
expect_trash_state_set("image id", 0);
expect_get_snapcontext("image id", {1, {1}}, -EINVAL);
C_SaferCond ctx;
ErrorResult error_result;
auto req = MockTrashRemoveRequest::create(m_local_io_ctx, "image id",
&error_result,
m_threads->work_queue, &ctx);
req->send();
ASSERT_EQ(-EINVAL, ctx.wait());
}
TEST_F(TestMockImageDeleterTrashRemoveRequest, PurgeSnapshotBusy) {
InSequence seq;
cls::rbd::TrashImageSpec trash_image_spec{
cls::rbd::TRASH_IMAGE_SOURCE_MIRRORING, "image name", {}, {}};
expect_trash_get(trash_image_spec, 0);
expect_trash_state_set("image id", 0);
expect_get_snapcontext("image id", {1, {1}}, 0);
MockSnapshotPurgeRequest mock_snapshot_purge_request;
expect_snapshot_purge(mock_snapshot_purge_request, "image id", -EBUSY);
C_SaferCond ctx;
ErrorResult error_result;
auto req = MockTrashRemoveRequest::create(m_local_io_ctx, "image id",
&error_result,
m_threads->work_queue, &ctx);
req->send();
ASSERT_EQ(-EBUSY, ctx.wait());
ASSERT_EQ(ERROR_RESULT_RETRY_IMMEDIATELY, error_result);
}
TEST_F(TestMockImageDeleterTrashRemoveRequest, PurgeSnapshotError) {
InSequence seq;
cls::rbd::TrashImageSpec trash_image_spec{
cls::rbd::TRASH_IMAGE_SOURCE_MIRRORING, "image name", {}, {}};
expect_trash_get(trash_image_spec, 0);
expect_trash_state_set("image id", 0);
expect_get_snapcontext("image id", {1, {1}}, 0);
MockSnapshotPurgeRequest mock_snapshot_purge_request;
expect_snapshot_purge(mock_snapshot_purge_request, "image id", -EINVAL);
C_SaferCond ctx;
ErrorResult error_result;
auto req = MockTrashRemoveRequest::create(m_local_io_ctx, "image id",
&error_result,
m_threads->work_queue, &ctx);
req->send();
ASSERT_EQ(-EINVAL, ctx.wait());
}
TEST_F(TestMockImageDeleterTrashRemoveRequest, RemoveError) {
InSequence seq;
cls::rbd::TrashImageSpec trash_image_spec{
cls::rbd::TRASH_IMAGE_SOURCE_MIRRORING, "image name", {}, {}};
expect_trash_get(trash_image_spec, 0);
expect_trash_state_set("image id", 0);
expect_get_snapcontext("image id", {1, {1}}, 0);
MockSnapshotPurgeRequest mock_snapshot_purge_request;
expect_snapshot_purge(mock_snapshot_purge_request, "image id", 0);
MockLibrbdTrashRemoveRequest mock_image_remove_request;
expect_image_remove(mock_image_remove_request, "image id", -EINVAL);
C_SaferCond ctx;
ErrorResult error_result;
auto req = MockTrashRemoveRequest::create(m_local_io_ctx, "image id",
&error_result,
m_threads->work_queue, &ctx);
req->send();
ASSERT_EQ(-EINVAL, ctx.wait());
}
} // namespace image_deleter
} // namespace mirror
} // namespace rbd
| 15,149 | 32.370044 | 117 |
cc
|
null |
ceph-main/src/test/rbd_mirror/image_deleter/test_mock_TrashWatcher.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "test/rbd_mirror/test_mock_fixture.h"
#include "test/librados_test_stub/MockTestMemIoCtxImpl.h"
#include "test/librados_test_stub/MockTestMemRadosClient.h"
#include "test/librbd/mock/MockImageCtx.h"
#include "test/rbd_mirror/mock/MockContextWQ.h"
#include "test/rbd_mirror/mock/MockSafeTimer.h"
#include "librbd/TrashWatcher.h"
#include "tools/rbd_mirror/Threads.h"
#include "tools/rbd_mirror/image_deleter/TrashWatcher.h"
namespace librbd {
namespace {
struct MockTestImageCtx : public librbd::MockImageCtx {
MockTestImageCtx(librbd::ImageCtx &image_ctx)
: librbd::MockImageCtx(image_ctx) {
}
};
} // anonymous namespace
struct MockTrashWatcher {
static MockTrashWatcher *s_instance;
static MockTrashWatcher &get_instance() {
ceph_assert(s_instance != nullptr);
return *s_instance;
}
MockTrashWatcher() {
s_instance = this;
}
MOCK_CONST_METHOD0(is_unregistered, bool());
MOCK_METHOD1(register_watch, void(Context*));
MOCK_METHOD1(unregister_watch, void(Context*));
};
template <>
struct TrashWatcher<MockTestImageCtx> {
static TrashWatcher *s_instance;
TrashWatcher(librados::IoCtx &io_ctx, ::MockContextWQ *work_queue) {
s_instance = this;
}
virtual ~TrashWatcher() {
}
static TrashWatcher<MockTestImageCtx> &get_instance() {
ceph_assert(s_instance != nullptr);
return *s_instance;
}
virtual void handle_rewatch_complete(int r) = 0;
virtual void handle_image_added(const std::string &image_id,
const cls::rbd::TrashImageSpec& spec) = 0;
virtual void handle_image_removed(const std::string &image_id) = 0;
bool is_unregistered() const {
return MockTrashWatcher::get_instance().is_unregistered();
}
void register_watch(Context *ctx) {
MockTrashWatcher::get_instance().register_watch(ctx);
}
void unregister_watch(Context *ctx) {
MockTrashWatcher::get_instance().unregister_watch(ctx);
}
};
MockTrashWatcher *MockTrashWatcher::s_instance = nullptr;
TrashWatcher<MockTestImageCtx> *TrashWatcher<MockTestImageCtx>::s_instance = nullptr;
} // namespace librbd
namespace rbd {
namespace mirror {
template <>
struct Threads<librbd::MockTestImageCtx> {
MockSafeTimer *timer;
ceph::mutex &timer_lock;
MockContextWQ *work_queue;
Threads(Threads<librbd::ImageCtx> *threads)
: timer(new MockSafeTimer()),
timer_lock(threads->timer_lock),
work_queue(new MockContextWQ()) {
}
~Threads() {
delete timer;
delete work_queue;
}
};
} // namespace mirror
} // namespace rbd
#include "tools/rbd_mirror/image_deleter/TrashWatcher.cc"
namespace rbd {
namespace mirror {
namespace image_deleter {
using ::testing::_;
using ::testing::DoAll;
using ::testing::InSequence;
using ::testing::Invoke;
using ::testing::Return;
using ::testing::ReturnArg;
using ::testing::StrEq;
using ::testing::WithArg;
class TestMockImageDeleterTrashWatcher : public TestMockFixture {
public:
typedef TrashWatcher<librbd::MockTestImageCtx> MockTrashWatcher;
typedef Threads<librbd::MockTestImageCtx> MockThreads;
typedef librbd::MockTrashWatcher MockLibrbdTrashWatcher;
typedef librbd::TrashWatcher<librbd::MockTestImageCtx> LibrbdTrashWatcher;
struct MockListener : TrashListener {
MOCK_METHOD2(handle_trash_image, void(const std::string&,
const ceph::real_clock::time_point&));
};
void expect_work_queue(MockThreads &mock_threads) {
EXPECT_CALL(*mock_threads.work_queue, queue(_, _))
.WillRepeatedly(Invoke([this](Context *ctx, int r) {
m_threads->work_queue->queue(ctx, r);
}));
}
void expect_trash_watcher_is_unregistered(MockLibrbdTrashWatcher &mock_trash_watcher,
bool unregistered) {
EXPECT_CALL(mock_trash_watcher, is_unregistered())
.WillOnce(Return(unregistered));
}
void expect_trash_watcher_register(MockLibrbdTrashWatcher &mock_trash_watcher,
int r) {
EXPECT_CALL(mock_trash_watcher, register_watch(_))
.WillOnce(CompleteContext(r));
}
void expect_trash_watcher_unregister(MockLibrbdTrashWatcher &mock_trash_watcher,
int r) {
EXPECT_CALL(mock_trash_watcher, unregister_watch(_))
.WillOnce(CompleteContext(r));
}
void expect_create_trash(librados::IoCtx &io_ctx, int r) {
EXPECT_CALL(get_mock_io_ctx(io_ctx), create(RBD_TRASH, false, _))
.WillOnce(Return(r));
}
void expect_trash_list(librados::IoCtx &io_ctx,
const std::string& last_image_id,
std::map<std::string, cls::rbd::TrashImageSpec>&& images,
int r) {
bufferlist bl;
encode(last_image_id, bl);
encode(static_cast<size_t>(1024), bl);
bufferlist out_bl;
encode(images, out_bl);
EXPECT_CALL(get_mock_io_ctx(io_ctx),
exec(RBD_TRASH, _, StrEq("rbd"), StrEq("trash_list"),
ContentsEqual(bl), _, _, _))
.WillOnce(DoAll(WithArg<5>(Invoke([out_bl](bufferlist *bl) {
*bl = out_bl;
})),
Return(r)));
}
void expect_timer_add_event(MockThreads &mock_threads) {
EXPECT_CALL(*mock_threads.timer, add_event_after(_, _))
.WillOnce(DoAll(WithArg<1>(Invoke([this](Context *ctx) {
auto wrapped_ctx =
new LambdaContext([this, ctx](int r) {
std::lock_guard timer_locker{m_threads->timer_lock};
ctx->complete(r);
});
m_threads->work_queue->queue(wrapped_ctx, 0);
})),
ReturnArg<1>()));
}
void expect_handle_trash_image(MockListener& mock_listener,
const std::string& global_image_id) {
EXPECT_CALL(mock_listener, handle_trash_image(global_image_id, _));
}
int when_shut_down(MockTrashWatcher &mock_trash_watcher) {
C_SaferCond ctx;
mock_trash_watcher.shut_down(&ctx);
return ctx.wait();
}
};
TEST_F(TestMockImageDeleterTrashWatcher, EmptyPool) {
MockThreads mock_threads(m_threads);
expect_work_queue(mock_threads);
InSequence seq;
expect_create_trash(m_local_io_ctx, 0);
MockLibrbdTrashWatcher mock_librbd_trash_watcher;
expect_trash_watcher_is_unregistered(mock_librbd_trash_watcher, true);
expect_trash_watcher_register(mock_librbd_trash_watcher, 0);
expect_trash_list(m_local_io_ctx, "", {}, 0);
MockListener mock_listener;
MockTrashWatcher mock_trash_watcher(m_local_io_ctx, &mock_threads,
mock_listener);
C_SaferCond ctx;
mock_trash_watcher.init(&ctx);
ASSERT_EQ(0, ctx.wait());
expect_trash_watcher_unregister(mock_librbd_trash_watcher, 0);
ASSERT_EQ(0, when_shut_down(mock_trash_watcher));
}
TEST_F(TestMockImageDeleterTrashWatcher, NonEmptyPool) {
MockThreads mock_threads(m_threads);
expect_work_queue(mock_threads);
MockListener mock_listener;
expect_handle_trash_image(mock_listener, "image0");
InSequence seq;
expect_create_trash(m_local_io_ctx, 0);
MockLibrbdTrashWatcher mock_librbd_trash_watcher;
expect_trash_watcher_is_unregistered(mock_librbd_trash_watcher, true);
expect_trash_watcher_register(mock_librbd_trash_watcher, 0);
std::map<std::string, cls::rbd::TrashImageSpec> images;
images["image0"] = {cls::rbd::TRASH_IMAGE_SOURCE_MIRRORING, "name", {}, {}};
for (auto idx = 1; idx < 1024; ++idx) {
images["image" + stringify(idx)] = {};
}
expect_trash_list(m_local_io_ctx, "", std::move(images), 0);
images.clear();
for (auto idx = 1024; idx < 2000; ++idx) {
images["image" + stringify(idx)] = {};
}
expect_trash_list(m_local_io_ctx, "image999", std::move(images), 0);
MockTrashWatcher mock_trash_watcher(m_local_io_ctx, &mock_threads,
mock_listener);
C_SaferCond ctx;
mock_trash_watcher.init(&ctx);
ASSERT_EQ(0, ctx.wait());
m_threads->work_queue->drain();
expect_trash_watcher_unregister(mock_librbd_trash_watcher, 0);
ASSERT_EQ(0, when_shut_down(mock_trash_watcher));
}
TEST_F(TestMockImageDeleterTrashWatcher, Notify) {
MockThreads mock_threads(m_threads);
expect_work_queue(mock_threads);
MockListener mock_listener;
expect_handle_trash_image(mock_listener, "image1");
InSequence seq;
expect_create_trash(m_local_io_ctx, 0);
MockLibrbdTrashWatcher mock_librbd_trash_watcher;
expect_trash_watcher_is_unregistered(mock_librbd_trash_watcher, true);
expect_trash_watcher_register(mock_librbd_trash_watcher, 0);
expect_trash_list(m_local_io_ctx, "", {}, 0);
MockTrashWatcher mock_trash_watcher(m_local_io_ctx, &mock_threads,
mock_listener);
C_SaferCond ctx;
mock_trash_watcher.init(&ctx);
ASSERT_EQ(0, ctx.wait());
LibrbdTrashWatcher::get_instance().handle_image_added(
"image1", {cls::rbd::TRASH_IMAGE_SOURCE_MIRRORING, "name", {}, {}});
m_threads->work_queue->drain();
expect_trash_watcher_unregister(mock_librbd_trash_watcher, 0);
ASSERT_EQ(0, when_shut_down(mock_trash_watcher));
}
TEST_F(TestMockImageDeleterTrashWatcher, CreateBlocklist) {
MockThreads mock_threads(m_threads);
expect_work_queue(mock_threads);
InSequence seq;
expect_create_trash(m_local_io_ctx, -EBLOCKLISTED);
MockListener mock_listener;
MockTrashWatcher mock_trash_watcher(m_local_io_ctx, &mock_threads,
mock_listener);
C_SaferCond ctx;
mock_trash_watcher.init(&ctx);
ASSERT_EQ(-EBLOCKLISTED, ctx.wait());
MockLibrbdTrashWatcher mock_librbd_trash_watcher;
expect_trash_watcher_unregister(mock_librbd_trash_watcher, 0);
ASSERT_EQ(0, when_shut_down(mock_trash_watcher));
}
TEST_F(TestMockImageDeleterTrashWatcher, CreateDNE) {
MockThreads mock_threads(m_threads);
expect_work_queue(mock_threads);
InSequence seq;
expect_create_trash(m_local_io_ctx, -ENOENT);
MockListener mock_listener;
MockTrashWatcher mock_trash_watcher(m_local_io_ctx, &mock_threads,
mock_listener);
C_SaferCond ctx;
mock_trash_watcher.init(&ctx);
ASSERT_EQ(-ENOENT, ctx.wait());
MockLibrbdTrashWatcher mock_librbd_trash_watcher;
expect_trash_watcher_unregister(mock_librbd_trash_watcher, 0);
ASSERT_EQ(0, when_shut_down(mock_trash_watcher));
}
TEST_F(TestMockImageDeleterTrashWatcher, CreateError) {
MockThreads mock_threads(m_threads);
expect_work_queue(mock_threads);
InSequence seq;
expect_create_trash(m_local_io_ctx, -EINVAL);
expect_timer_add_event(mock_threads);
expect_create_trash(m_local_io_ctx, 0);
MockLibrbdTrashWatcher mock_librbd_trash_watcher;
expect_trash_watcher_is_unregistered(mock_librbd_trash_watcher, true);
expect_trash_watcher_register(mock_librbd_trash_watcher, 0);
MockListener mock_listener;
MockTrashWatcher mock_trash_watcher(m_local_io_ctx, &mock_threads,
mock_listener);
C_SaferCond ctx;
mock_trash_watcher.init(&ctx);
ASSERT_EQ(0, ctx.wait());
expect_trash_watcher_unregister(mock_librbd_trash_watcher, 0);
ASSERT_EQ(0, when_shut_down(mock_trash_watcher));
}
TEST_F(TestMockImageDeleterTrashWatcher, RegisterWatcherBlocklist) {
MockThreads mock_threads(m_threads);
expect_work_queue(mock_threads);
InSequence seq;
expect_create_trash(m_local_io_ctx, 0);
MockLibrbdTrashWatcher mock_librbd_trash_watcher;
expect_trash_watcher_is_unregistered(mock_librbd_trash_watcher, true);
expect_trash_watcher_register(mock_librbd_trash_watcher, -EBLOCKLISTED);
MockListener mock_listener;
MockTrashWatcher mock_trash_watcher(m_local_io_ctx, &mock_threads,
mock_listener);
C_SaferCond ctx;
mock_trash_watcher.init(&ctx);
ASSERT_EQ(-EBLOCKLISTED, ctx.wait());
expect_trash_watcher_unregister(mock_librbd_trash_watcher, 0);
ASSERT_EQ(0, when_shut_down(mock_trash_watcher));
}
TEST_F(TestMockImageDeleterTrashWatcher, RegisterWatcherError) {
MockThreads mock_threads(m_threads);
expect_work_queue(mock_threads);
InSequence seq;
expect_create_trash(m_local_io_ctx, 0);
MockLibrbdTrashWatcher mock_librbd_trash_watcher;
expect_trash_watcher_is_unregistered(mock_librbd_trash_watcher, true);
expect_trash_watcher_register(mock_librbd_trash_watcher, -EINVAL);
expect_timer_add_event(mock_threads);
expect_create_trash(m_local_io_ctx, 0);
expect_trash_watcher_is_unregistered(mock_librbd_trash_watcher, true);
expect_trash_watcher_register(mock_librbd_trash_watcher, 0);
MockListener mock_listener;
MockTrashWatcher mock_trash_watcher(m_local_io_ctx, &mock_threads,
mock_listener);
C_SaferCond ctx;
mock_trash_watcher.init(&ctx);
ASSERT_EQ(0, ctx.wait());
expect_trash_watcher_unregister(mock_librbd_trash_watcher, 0);
ASSERT_EQ(0, when_shut_down(mock_trash_watcher));
}
TEST_F(TestMockImageDeleterTrashWatcher, TrashListBlocklist) {
MockThreads mock_threads(m_threads);
expect_work_queue(mock_threads);
InSequence seq;
expect_create_trash(m_local_io_ctx, 0);
MockLibrbdTrashWatcher mock_librbd_trash_watcher;
expect_trash_watcher_is_unregistered(mock_librbd_trash_watcher, true);
expect_trash_watcher_register(mock_librbd_trash_watcher, 0);
expect_trash_list(m_local_io_ctx, "", {}, -EBLOCKLISTED);
MockListener mock_listener;
MockTrashWatcher mock_trash_watcher(m_local_io_ctx, &mock_threads,
mock_listener);
C_SaferCond ctx;
mock_trash_watcher.init(&ctx);
ASSERT_EQ(-EBLOCKLISTED, ctx.wait());
expect_trash_watcher_unregister(mock_librbd_trash_watcher, 0);
ASSERT_EQ(0, when_shut_down(mock_trash_watcher));
}
TEST_F(TestMockImageDeleterTrashWatcher, TrashListError) {
MockThreads mock_threads(m_threads);
expect_work_queue(mock_threads);
InSequence seq;
expect_create_trash(m_local_io_ctx, 0);
MockLibrbdTrashWatcher mock_librbd_trash_watcher;
expect_trash_watcher_is_unregistered(mock_librbd_trash_watcher, true);
expect_trash_watcher_register(mock_librbd_trash_watcher, 0);
expect_trash_list(m_local_io_ctx, "", {}, -EINVAL);
expect_timer_add_event(mock_threads);
expect_create_trash(m_local_io_ctx, 0);
expect_trash_watcher_is_unregistered(mock_librbd_trash_watcher, false);
expect_trash_list(m_local_io_ctx, "", {}, 0);
MockListener mock_listener;
MockTrashWatcher mock_trash_watcher(m_local_io_ctx, &mock_threads,
mock_listener);
C_SaferCond ctx;
mock_trash_watcher.init(&ctx);
ASSERT_EQ(0, ctx.wait());
expect_trash_watcher_unregister(mock_librbd_trash_watcher, 0);
ASSERT_EQ(0, when_shut_down(mock_trash_watcher));
}
TEST_F(TestMockImageDeleterTrashWatcher, Rewatch) {
MockThreads mock_threads(m_threads);
expect_work_queue(mock_threads);
InSequence seq;
expect_create_trash(m_local_io_ctx, 0);
MockLibrbdTrashWatcher mock_librbd_trash_watcher;
expect_trash_watcher_is_unregistered(mock_librbd_trash_watcher, true);
expect_trash_watcher_register(mock_librbd_trash_watcher, 0);
expect_trash_list(m_local_io_ctx, "", {}, 0);
MockListener mock_listener;
MockTrashWatcher mock_trash_watcher(m_local_io_ctx, &mock_threads,
mock_listener);
C_SaferCond ctx;
mock_trash_watcher.init(&ctx);
ASSERT_EQ(0, ctx.wait());
expect_timer_add_event(mock_threads);
expect_create_trash(m_local_io_ctx, 0);
expect_trash_watcher_is_unregistered(mock_librbd_trash_watcher, false);
expect_trash_list(m_local_io_ctx, "", {}, 0);
LibrbdTrashWatcher::get_instance().handle_rewatch_complete(0);
m_threads->work_queue->drain();
expect_trash_watcher_unregister(mock_librbd_trash_watcher, 0);
ASSERT_EQ(0, when_shut_down(mock_trash_watcher));
}
TEST_F(TestMockImageDeleterTrashWatcher, RewatchBlocklist) {
MockThreads mock_threads(m_threads);
expect_work_queue(mock_threads);
InSequence seq;
expect_create_trash(m_local_io_ctx, 0);
MockLibrbdTrashWatcher mock_librbd_trash_watcher;
expect_trash_watcher_is_unregistered(mock_librbd_trash_watcher, true);
expect_trash_watcher_register(mock_librbd_trash_watcher, 0);
expect_trash_list(m_local_io_ctx, "", {}, 0);
MockListener mock_listener;
MockTrashWatcher mock_trash_watcher(m_local_io_ctx, &mock_threads,
mock_listener);
C_SaferCond ctx;
mock_trash_watcher.init(&ctx);
ASSERT_EQ(0, ctx.wait());
LibrbdTrashWatcher::get_instance().handle_rewatch_complete(-EBLOCKLISTED);
m_threads->work_queue->drain();
expect_trash_watcher_unregister(mock_librbd_trash_watcher, 0);
ASSERT_EQ(0, when_shut_down(mock_trash_watcher));
}
} // namespace image_deleter
} // namespace mirror
} // namespace rbd
| 16,868 | 31.440385 | 87 |
cc
|
null |
ceph-main/src/test/rbd_mirror/image_map/test_Policy.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "include/Context.h"
#include "test/rbd_mirror/test_fixture.h"
#include "tools/rbd_mirror/image_map/Types.h"
#include "tools/rbd_mirror/image_map/SimplePolicy.h"
#include "include/stringify.h"
#include "common/Thread.h"
void register_test_image_policy() {
}
namespace rbd {
namespace mirror {
namespace image_map {
class TestImageMapPolicy : public TestFixture {
public:
void SetUp() override {
TestFixture::SetUp();
EXPECT_EQ(0, _rados->conf_set("rbd_mirror_image_policy_migration_throttle",
"0"));
CephContext *cct = reinterpret_cast<CephContext *>(m_local_io_ctx.cct());
std::string policy_type = cct->_conf.get_val<std::string>("rbd_mirror_image_policy_type");
if (policy_type == "none" || policy_type == "simple") {
m_policy = image_map::SimplePolicy::create(m_local_io_ctx);
} else {
ceph_abort();
}
m_policy->init({});
}
void TearDown() override {
TestFixture::TearDown();
delete m_policy;
}
void map_image(const std::string &global_image_id) {
ASSERT_TRUE(m_policy->add_image(global_image_id));
ASSERT_EQ(ACTION_TYPE_MAP_UPDATE, m_policy->start_action(global_image_id));
ASSERT_TRUE(m_policy->finish_action(global_image_id, 0));
ASSERT_EQ(ACTION_TYPE_ACQUIRE, m_policy->start_action(global_image_id));
ASSERT_FALSE(m_policy->finish_action(global_image_id, 0));
}
void unmap_image(const std::string &global_image_id) {
ASSERT_TRUE(m_policy->remove_image(global_image_id));
ASSERT_EQ(ACTION_TYPE_RELEASE, m_policy->start_action(global_image_id));
ASSERT_TRUE(m_policy->finish_action(global_image_id, 0));
ASSERT_EQ(ACTION_TYPE_MAP_REMOVE, m_policy->start_action(global_image_id));
ASSERT_FALSE(m_policy->finish_action(global_image_id, 0));
}
void shuffle_image(const std::string &global_image_id) {
ASSERT_EQ(ACTION_TYPE_RELEASE, m_policy->start_action(global_image_id));
ASSERT_TRUE(m_policy->finish_action(global_image_id, 0));
ASSERT_EQ(ACTION_TYPE_MAP_UPDATE, m_policy->start_action(global_image_id));
ASSERT_TRUE(m_policy->finish_action(global_image_id, 0));
ASSERT_EQ(ACTION_TYPE_ACQUIRE, m_policy->start_action(global_image_id));
ASSERT_FALSE(m_policy->finish_action(global_image_id, 0));
}
Policy *m_policy;
};
TEST_F(TestImageMapPolicy, NegativeLookup) {
const std::string global_image_id = "global id 1";
LookupInfo info = m_policy->lookup(global_image_id);
ASSERT_TRUE(info.instance_id == UNMAPPED_INSTANCE_ID);
}
TEST_F(TestImageMapPolicy, Init) {
const std::string global_image_id = "global id 1";
m_policy->init({{global_image_id, {"9876", {}, {}}}});
ASSERT_EQ(ACTION_TYPE_ACQUIRE, m_policy->start_action(global_image_id));
ASSERT_FALSE(m_policy->finish_action(global_image_id, 0));
}
TEST_F(TestImageMapPolicy, MapImage) {
const std::string global_image_id = "global id 1";
map_image(global_image_id);
LookupInfo info = m_policy->lookup(global_image_id);
ASSERT_TRUE(info.instance_id != UNMAPPED_INSTANCE_ID);
}
TEST_F(TestImageMapPolicy, UnmapImage) {
const std::string global_image_id = "global id 1";
// map image
map_image(global_image_id);
LookupInfo info = m_policy->lookup(global_image_id);
ASSERT_TRUE(info.instance_id != UNMAPPED_INSTANCE_ID);
// unmap image
unmap_image(global_image_id);
info = m_policy->lookup(global_image_id);
ASSERT_TRUE(info.instance_id == UNMAPPED_INSTANCE_ID);
}
TEST_F(TestImageMapPolicy, ShuffleImageAddInstance) {
std::set<std::string> global_image_ids {
"global id 1", "global id 2", "global id 3", "global id 4", "global id 5", "global id 6"
};
for (auto const &global_image_id : global_image_ids) {
// map image
map_image(global_image_id);
LookupInfo info = m_policy->lookup(global_image_id);
ASSERT_TRUE(info.instance_id != UNMAPPED_INSTANCE_ID);
}
std::set<std::string> shuffle_global_image_ids;
m_policy->add_instances({"9876"}, &shuffle_global_image_ids);
for (auto const &global_image_id : shuffle_global_image_ids) {
shuffle_image(global_image_id);
LookupInfo info = m_policy->lookup(global_image_id);
ASSERT_TRUE(info.instance_id != UNMAPPED_INSTANCE_ID);
}
}
TEST_F(TestImageMapPolicy, ShuffleImageRemoveInstance) {
std::set<std::string> global_image_ids {
"global id 1", "global id 2", "global id 3", "global id 4", "global id 5"
};
std::set<std::string> shuffle_global_image_ids;
m_policy->add_instances({stringify(m_local_io_ctx.get_instance_id())},
&shuffle_global_image_ids);
for (auto const &global_image_id : global_image_ids) {
// map image
map_image(global_image_id);
LookupInfo info = m_policy->lookup(global_image_id);
ASSERT_TRUE(info.instance_id != UNMAPPED_INSTANCE_ID);
}
m_policy->add_instances({"9876"}, &shuffle_global_image_ids);
for (auto const &global_image_id : shuffle_global_image_ids) {
shuffle_image(global_image_id);
LookupInfo info = m_policy->lookup(global_image_id);
ASSERT_TRUE(info.instance_id != UNMAPPED_INSTANCE_ID);
}
// record which of the images got migrated to the new instance
std::set<std::string> remapped_global_image_ids;
for (auto const &global_image_id: shuffle_global_image_ids) {
LookupInfo info = m_policy->lookup(global_image_id);
if (info.instance_id == "9876") {
remapped_global_image_ids.emplace(global_image_id);
}
}
shuffle_global_image_ids.clear();
m_policy->remove_instances({"9876"}, &shuffle_global_image_ids);
ASSERT_TRUE(shuffle_global_image_ids == remapped_global_image_ids);
for (auto const &global_image_id : shuffle_global_image_ids) {
shuffle_image(global_image_id);
LookupInfo info = m_policy->lookup(global_image_id);
ASSERT_TRUE(info.instance_id != UNMAPPED_INSTANCE_ID);
}
}
TEST_F(TestImageMapPolicy, RetryMapUpdate) {
const std::string global_image_id = "global id 1";
ASSERT_TRUE(m_policy->add_image(global_image_id));
ASSERT_EQ(ACTION_TYPE_MAP_UPDATE, m_policy->start_action(global_image_id));
// on-disk map update failed
ASSERT_TRUE(m_policy->finish_action(global_image_id, -EIO));
ASSERT_EQ(ACTION_TYPE_MAP_UPDATE, m_policy->start_action(global_image_id));
ASSERT_TRUE(m_policy->finish_action(global_image_id, 0));
ASSERT_EQ(ACTION_TYPE_ACQUIRE, m_policy->start_action(global_image_id));
ASSERT_FALSE(m_policy->finish_action(global_image_id, 0));
LookupInfo info = m_policy->lookup(global_image_id);
ASSERT_TRUE(info.instance_id != UNMAPPED_INSTANCE_ID);
}
TEST_F(TestImageMapPolicy, MapFailureAndUnmap) {
const std::string global_image_id = "global id 1";
ASSERT_TRUE(m_policy->add_image(global_image_id));
ASSERT_EQ(ACTION_TYPE_MAP_UPDATE, m_policy->start_action(global_image_id));
ASSERT_TRUE(m_policy->finish_action(global_image_id, 0));
ASSERT_EQ(ACTION_TYPE_ACQUIRE, m_policy->start_action(global_image_id));
std::set<std::string> shuffle_global_image_ids;
m_policy->add_instances({"9876"}, &shuffle_global_image_ids);
ASSERT_TRUE(shuffle_global_image_ids.empty());
m_policy->remove_instances({stringify(m_local_io_ctx.get_instance_id())},
&shuffle_global_image_ids);
ASSERT_TRUE(shuffle_global_image_ids.empty());
ASSERT_TRUE(m_policy->finish_action(global_image_id, -EBLOCKLISTED));
ASSERT_EQ(ACTION_TYPE_RELEASE, m_policy->start_action(global_image_id));
ASSERT_TRUE(m_policy->finish_action(global_image_id, -ENOENT));
ASSERT_EQ(ACTION_TYPE_MAP_UPDATE, m_policy->start_action(global_image_id));
ASSERT_TRUE(m_policy->finish_action(global_image_id, 0));
ASSERT_EQ(ACTION_TYPE_ACQUIRE, m_policy->start_action(global_image_id));
ASSERT_FALSE(m_policy->finish_action(global_image_id, 0));
ASSERT_TRUE(m_policy->remove_image(global_image_id));
ASSERT_EQ(ACTION_TYPE_RELEASE, m_policy->start_action(global_image_id));
ASSERT_TRUE(m_policy->finish_action(global_image_id, 0));
ASSERT_EQ(ACTION_TYPE_MAP_REMOVE, m_policy->start_action(global_image_id));
ASSERT_FALSE(m_policy->finish_action(global_image_id, 0));
}
TEST_F(TestImageMapPolicy, ReshuffleWithMapFailure) {
std::set<std::string> global_image_ids {
"global id 1", "global id 2", "global id 3", "global id 4", "global id 5",
"global id 6"
};
std::set<std::string> shuffle_global_image_ids;
m_policy->add_instances({stringify(m_local_io_ctx.get_instance_id())},
&shuffle_global_image_ids);
for (auto const &global_image_id : global_image_ids) {
// map image
map_image(global_image_id);
LookupInfo info = m_policy->lookup(global_image_id);
ASSERT_TRUE(info.instance_id != UNMAPPED_INSTANCE_ID);
}
m_policy->add_instances({"9876"}, &shuffle_global_image_ids);
ASSERT_FALSE(shuffle_global_image_ids.empty());
const std::string global_image_id = *(shuffle_global_image_ids.begin());
shuffle_global_image_ids.clear();
ASSERT_EQ(ACTION_TYPE_RELEASE, m_policy->start_action(global_image_id));
ASSERT_TRUE(m_policy->finish_action(global_image_id, 0));
ASSERT_EQ(ACTION_TYPE_MAP_UPDATE, m_policy->start_action(global_image_id));
ASSERT_TRUE(m_policy->finish_action(global_image_id, 0));
ASSERT_EQ(ACTION_TYPE_ACQUIRE, m_policy->start_action(global_image_id));
// peer unavailable
m_policy->remove_instances({"9876"}, &shuffle_global_image_ids);
ASSERT_TRUE(shuffle_global_image_ids.empty());
ASSERT_TRUE(m_policy->finish_action(global_image_id, -EBLOCKLISTED));
ASSERT_EQ(ACTION_TYPE_RELEASE, m_policy->start_action(global_image_id));
ASSERT_TRUE(m_policy->finish_action(global_image_id, 0));
ASSERT_EQ(ACTION_TYPE_MAP_UPDATE, m_policy->start_action(global_image_id));
ASSERT_TRUE(m_policy->finish_action(global_image_id, 0));
ASSERT_EQ(ACTION_TYPE_ACQUIRE, m_policy->start_action(global_image_id));
ASSERT_FALSE(m_policy->finish_action(global_image_id, 0));
}
TEST_F(TestImageMapPolicy, ShuffleFailureAndRemove) {
std::set<std::string> global_image_ids {
"global id 1", "global id 2", "global id 3", "global id 4", "global id 5",
"global id 6"
};
std::set<std::string> shuffle_global_image_ids;
m_policy->add_instances({stringify(m_local_io_ctx.get_instance_id())},
&shuffle_global_image_ids);
for (auto const &global_image_id : global_image_ids) {
// map image
map_image(global_image_id);
LookupInfo info = m_policy->lookup(global_image_id);
ASSERT_TRUE(info.instance_id != UNMAPPED_INSTANCE_ID);
}
m_policy->add_instances({"9876"}, &shuffle_global_image_ids);
ASSERT_FALSE(shuffle_global_image_ids.empty());
std::string global_image_id = *(shuffle_global_image_ids.begin());
shuffle_global_image_ids.clear();
ASSERT_EQ(ACTION_TYPE_RELEASE, m_policy->start_action(global_image_id));
ASSERT_TRUE(m_policy->finish_action(global_image_id, 0));
ASSERT_EQ(ACTION_TYPE_MAP_UPDATE, m_policy->start_action(global_image_id));
ASSERT_TRUE(m_policy->finish_action(global_image_id, 0));
ASSERT_EQ(ACTION_TYPE_ACQUIRE, m_policy->start_action(global_image_id));
// peer unavailable
m_policy->remove_instances({"9876"}, &shuffle_global_image_ids);
ASSERT_TRUE(shuffle_global_image_ids.empty());
ASSERT_TRUE(m_policy->finish_action(global_image_id, -EBLOCKLISTED));
ASSERT_EQ(ACTION_TYPE_RELEASE, m_policy->start_action(global_image_id));
ASSERT_TRUE(m_policy->finish_action(global_image_id, 0));
ASSERT_EQ(ACTION_TYPE_MAP_UPDATE, m_policy->start_action(global_image_id));
ASSERT_TRUE(m_policy->finish_action(global_image_id, 0));
ASSERT_EQ(ACTION_TYPE_ACQUIRE, m_policy->start_action(global_image_id));
ASSERT_FALSE(m_policy->finish_action(global_image_id, 0));
ASSERT_TRUE(m_policy->remove_image(global_image_id));
ASSERT_EQ(ACTION_TYPE_RELEASE, m_policy->start_action(global_image_id));
ASSERT_TRUE(m_policy->finish_action(global_image_id, 0));
ASSERT_EQ(ACTION_TYPE_MAP_REMOVE, m_policy->start_action(global_image_id));
ASSERT_FALSE(m_policy->finish_action(global_image_id, 0));
LookupInfo info = m_policy->lookup(global_image_id);
ASSERT_TRUE(info.instance_id == UNMAPPED_INSTANCE_ID);
}
TEST_F(TestImageMapPolicy, InitialInstanceUpdate) {
const std::string global_image_id = "global id 1";
m_policy->init({{global_image_id, {"9876", {}, {}}}});
ASSERT_EQ(ACTION_TYPE_ACQUIRE, m_policy->start_action(global_image_id));
auto instance_id = stringify(m_local_io_ctx.get_instance_id());
std::set<std::string> shuffle_global_image_ids;
m_policy->add_instances({instance_id}, &shuffle_global_image_ids);
ASSERT_EQ(0U, shuffle_global_image_ids.size());
ASSERT_TRUE(m_policy->finish_action(global_image_id, -ENOENT));
ASSERT_EQ(ACTION_TYPE_RELEASE, m_policy->start_action(global_image_id));
ASSERT_TRUE(m_policy->finish_action(global_image_id, 0));
ASSERT_EQ(ACTION_TYPE_MAP_UPDATE, m_policy->start_action(global_image_id));
ASSERT_TRUE(m_policy->finish_action(global_image_id, 0));
ASSERT_EQ(ACTION_TYPE_ACQUIRE, m_policy->start_action(global_image_id));
ASSERT_FALSE(m_policy->finish_action(global_image_id, 0));
}
} // namespace image_map
} // namespace mirror
} // namespace rbd
| 13,298 | 34.18254 | 94 |
cc
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.