Search is not available for this dataset
repo
stringlengths 2
152
⌀ | file
stringlengths 15
239
| code
stringlengths 0
58.4M
| file_length
int64 0
58.4M
| avg_line_length
float64 0
1.81M
| max_line_length
int64 0
12.7M
| extension_type
stringclasses 364
values |
---|---|---|---|---|---|---|
null |
ceph-main/src/librados/librados_c.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include <limits.h>
#include "acconfig.h"
#include "common/config.h"
#include "common/errno.h"
#include "common/ceph_argparse.h"
#include "common/ceph_json.h"
#include "common/common_init.h"
#include "common/TracepointProvider.h"
#include "common/hobject.h"
#include "common/async/waiter.h"
#include "include/rados/librados.h"
#include "include/types.h"
#include <include/stringify.h>
#include "librados/librados_c.h"
#include "librados/AioCompletionImpl.h"
#include "librados/IoCtxImpl.h"
#include "librados/ObjectOperationImpl.h"
#include "librados/PoolAsyncCompletionImpl.h"
#include "librados/RadosClient.h"
#include "librados/RadosXattrIter.h"
#include "librados/ListObjectImpl.h"
#include "librados/librados_util.h"
#include <cls/lock/cls_lock_client.h>
#include <string>
#include <map>
#include <set>
#include <vector>
#include <list>
#include <stdexcept>
#ifdef WITH_LTTNG
#define TRACEPOINT_DEFINE
#define TRACEPOINT_PROBE_DYNAMIC_LINKAGE
#include "tracing/librados.h"
#undef TRACEPOINT_PROBE_DYNAMIC_LINKAGE
#undef TRACEPOINT_DEFINE
#else
#define tracepoint(...)
#endif
#if defined(HAVE_ASM_SYMVER) || defined(HAVE_ATTR_SYMVER)
// prefer __attribute__() over global asm(".symver"). because the latter
// is not parsed by the compiler and is partitioned away by GCC if
// lto-partitions is enabled, in other words, these asm() statements
// are dropped by the -flto option by default. the way to address it is
// to use __attribute__. so this information can be processed by the
// C compiler, and be preserved after LTO partitions the code
#ifdef HAVE_ATTR_SYMVER
#define LIBRADOS_C_API_BASE(fn) \
extern __typeof (_##fn##_base) _##fn##_base __attribute__((__symver__ (#fn "@")))
#define LIBRADOS_C_API_BASE_DEFAULT(fn) \
extern __typeof (_##fn) _##fn __attribute__((__symver__ (#fn "@@")))
#define LIBRADOS_C_API_DEFAULT(fn, ver) \
extern __typeof (_##fn) _##fn __attribute__((__symver__ (#fn "@@LIBRADOS_" #ver)))
#else
#define LIBRADOS_C_API_BASE(fn) \
asm(".symver _" #fn "_base, " #fn "@")
#define LIBRADOS_C_API_BASE_DEFAULT(fn) \
asm(".symver _" #fn ", " #fn "@@")
#define LIBRADOS_C_API_DEFAULT(fn, ver) \
asm(".symver _" #fn ", " #fn "@@LIBRADOS_" #ver)
#endif
#define LIBRADOS_C_API_BASE_F(fn) _ ## fn ## _base
#define LIBRADOS_C_API_DEFAULT_F(fn) _ ## fn
#else
#define LIBRADOS_C_API_BASE(fn)
#define LIBRADOS_C_API_BASE_DEFAULT(fn)
#define LIBRADOS_C_API_DEFAULT(fn, ver)
#define LIBRADOS_C_API_BASE_F(fn) _ ## fn ## _base
// There shouldn't be multiple default versions of the same
// function.
#define LIBRADOS_C_API_DEFAULT_F(fn) fn
#endif
using std::ostringstream;
using std::pair;
using std::string;
using std::map;
using std::set;
using std::vector;
using std::list;
#define dout_subsys ceph_subsys_rados
#undef dout_prefix
#define dout_prefix *_dout << "librados: "
#define RADOS_LIST_MAX_ENTRIES 1024
static TracepointProvider::Traits tracepoint_traits("librados_tp.so", "rados_tracing");
/*
* Structure of this file
*
* RadosClient and the related classes are the internal implementation of librados.
* Above that layer sits the C API, found in include/rados/librados.h, and
* the C++ API, found in include/rados/librados.hpp
*
* The C++ API sometimes implements things in terms of the C API.
* Both the C++ and C API rely on RadosClient.
*
* Visually:
* +--------------------------------------+
* | C++ API |
* +--------------------+ |
* | C API | |
* +--------------------+-----------------+
* | RadosClient |
* +--------------------------------------+
*/
///////////////////////////// C API //////////////////////////////
static CephContext *rados_create_cct(
const char * const clustername,
CephInitParameters *iparams)
{
// missing things compared to global_init:
// g_ceph_context, g_conf, g_lockdep, signal handlers
CephContext *cct = common_preinit(*iparams, CODE_ENVIRONMENT_LIBRARY, 0);
if (clustername)
cct->_conf->cluster = clustername;
cct->_conf.parse_env(cct->get_module_type()); // environment variables override
cct->_conf.apply_changes(nullptr);
TracepointProvider::initialize<tracepoint_traits>(cct);
return cct;
}
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_create)(
rados_t *pcluster,
const char * const id)
{
CephInitParameters iparams(CEPH_ENTITY_TYPE_CLIENT);
if (id) {
iparams.name.set(CEPH_ENTITY_TYPE_CLIENT, id);
}
CephContext *cct = rados_create_cct("", &iparams);
tracepoint(librados, rados_create_enter, id);
*pcluster = reinterpret_cast<rados_t>(new librados::RadosClient(cct));
tracepoint(librados, rados_create_exit, 0, *pcluster);
cct->put();
return 0;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_create);
// as above, but
// 1) don't assume 'client.'; name is a full type.id namestr
// 2) allow setting clustername
// 3) flags is for future expansion (maybe some of the global_init()
// behavior is appropriate for some consumers of librados, for instance)
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_create2)(
rados_t *pcluster,
const char *const clustername,
const char * const name,
uint64_t flags)
{
// client is assumed, but from_str will override
int retval = 0;
CephInitParameters iparams(CEPH_ENTITY_TYPE_CLIENT);
if (!name || !iparams.name.from_str(name)) {
retval = -EINVAL;
}
CephContext *cct = rados_create_cct(clustername, &iparams);
tracepoint(librados, rados_create2_enter, clustername, name, flags);
if (retval == 0) {
*pcluster = reinterpret_cast<rados_t>(new librados::RadosClient(cct));
}
tracepoint(librados, rados_create2_exit, retval, *pcluster);
cct->put();
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_create2);
/* This function is intended for use by Ceph daemons. These daemons have
* already called global_init and want to use that particular configuration for
* their cluster.
*/
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_create_with_context)(
rados_t *pcluster,
rados_config_t cct_)
{
CephContext *cct = (CephContext *)cct_;
TracepointProvider::initialize<tracepoint_traits>(cct);
tracepoint(librados, rados_create_with_context_enter, cct_);
librados::RadosClient *radosp = new librados::RadosClient(cct);
*pcluster = (void *)radosp;
tracepoint(librados, rados_create_with_context_exit, 0, *pcluster);
return 0;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_create_with_context);
extern "C" rados_config_t LIBRADOS_C_API_DEFAULT_F(rados_cct)(rados_t cluster)
{
tracepoint(librados, rados_cct_enter, cluster);
librados::RadosClient *client = (librados::RadosClient *)cluster;
rados_config_t retval = (rados_config_t)client->cct;
tracepoint(librados, rados_cct_exit, retval);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_cct);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_connect)(rados_t cluster)
{
tracepoint(librados, rados_connect_enter, cluster);
librados::RadosClient *client = (librados::RadosClient *)cluster;
int retval = client->connect();
tracepoint(librados, rados_connect_exit, retval);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_connect);
extern "C" void LIBRADOS_C_API_DEFAULT_F(rados_shutdown)(rados_t cluster)
{
tracepoint(librados, rados_shutdown_enter, cluster);
librados::RadosClient *radosp = (librados::RadosClient *)cluster;
radosp->shutdown();
delete radosp;
tracepoint(librados, rados_shutdown_exit);
}
LIBRADOS_C_API_BASE_DEFAULT(rados_shutdown);
extern "C" uint64_t LIBRADOS_C_API_DEFAULT_F(rados_get_instance_id)(
rados_t cluster)
{
tracepoint(librados, rados_get_instance_id_enter, cluster);
librados::RadosClient *client = (librados::RadosClient *)cluster;
uint64_t retval = client->get_instance_id();
tracepoint(librados, rados_get_instance_id_exit, retval);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_get_instance_id);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_get_min_compatible_osd)(
rados_t cluster,
int8_t* require_osd_release)
{
librados::RadosClient *client = (librados::RadosClient *)cluster;
return client->get_min_compatible_osd(require_osd_release);
}
LIBRADOS_C_API_BASE_DEFAULT(rados_get_min_compatible_osd);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_get_min_compatible_client)(
rados_t cluster,
int8_t* min_compat_client,
int8_t* require_min_compat_client)
{
librados::RadosClient *client = (librados::RadosClient *)cluster;
return client->get_min_compatible_client(min_compat_client,
require_min_compat_client);
}
LIBRADOS_C_API_BASE_DEFAULT(rados_get_min_compatible_client);
extern "C" void LIBRADOS_C_API_DEFAULT_F(rados_version)(
int *major, int *minor, int *extra)
{
tracepoint(librados, rados_version_enter, major, minor, extra);
if (major)
*major = LIBRADOS_VER_MAJOR;
if (minor)
*minor = LIBRADOS_VER_MINOR;
if (extra)
*extra = LIBRADOS_VER_EXTRA;
tracepoint(librados, rados_version_exit, LIBRADOS_VER_MAJOR, LIBRADOS_VER_MINOR, LIBRADOS_VER_EXTRA);
}
LIBRADOS_C_API_BASE_DEFAULT(rados_version);
// -- config --
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_conf_read_file)(
rados_t cluster,
const char *path_list)
{
tracepoint(librados, rados_conf_read_file_enter, cluster, path_list);
librados::RadosClient *client = (librados::RadosClient *)cluster;
auto& conf = client->cct->_conf;
ostringstream warnings;
int ret = conf.parse_config_files(path_list, &warnings, 0);
if (ret) {
if (warnings.tellp() > 0)
lderr(client->cct) << warnings.str() << dendl;
client->cct->_conf.complain_about_parse_error(client->cct);
tracepoint(librados, rados_conf_read_file_exit, ret);
return ret;
}
conf.parse_env(client->cct->get_module_type()); // environment variables override
conf.apply_changes(nullptr);
client->cct->_conf.complain_about_parse_error(client->cct);
tracepoint(librados, rados_conf_read_file_exit, 0);
return 0;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_conf_read_file);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_conf_parse_argv)(
rados_t cluster,
int argc,
const char **argv)
{
tracepoint(librados, rados_conf_parse_argv_enter, cluster, argc);
int i;
for(i = 0; i < argc; i++) {
tracepoint(librados, rados_conf_parse_argv_arg, argv[i]);
}
librados::RadosClient *client = (librados::RadosClient *)cluster;
auto& conf = client->cct->_conf;
auto args = argv_to_vec(argc, argv);
int ret = conf.parse_argv(args);
if (ret) {
tracepoint(librados, rados_conf_parse_argv_exit, ret);
return ret;
}
conf.apply_changes(nullptr);
tracepoint(librados, rados_conf_parse_argv_exit, 0);
return 0;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_conf_parse_argv);
// like above, but return the remainder of argv to contain remaining
// unparsed args. Must be allocated to at least argc by caller.
// remargv will contain n <= argc pointers to original argv[], the end
// of which may be NULL
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_conf_parse_argv_remainder)(
rados_t cluster, int argc,
const char **argv,
const char **remargv)
{
tracepoint(librados, rados_conf_parse_argv_remainder_enter, cluster, argc);
unsigned int i;
for(i = 0; i < (unsigned int) argc; i++) {
tracepoint(librados, rados_conf_parse_argv_remainder_arg, argv[i]);
}
librados::RadosClient *client = (librados::RadosClient *)cluster;
auto& conf = client->cct->_conf;
vector<const char*> args;
for (int i=0; i<argc; i++)
args.push_back(argv[i]);
int ret = conf.parse_argv(args);
if (ret) {
tracepoint(librados, rados_conf_parse_argv_remainder_exit, ret);
return ret;
}
conf.apply_changes(NULL);
ceph_assert(args.size() <= (unsigned int)argc);
for (i = 0; i < (unsigned int)argc; ++i) {
if (i < args.size())
remargv[i] = args[i];
else
remargv[i] = (const char *)NULL;
tracepoint(librados, rados_conf_parse_argv_remainder_remarg, remargv[i]);
}
tracepoint(librados, rados_conf_parse_argv_remainder_exit, 0);
return 0;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_conf_parse_argv_remainder);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_conf_parse_env)(
rados_t cluster, const char *env)
{
tracepoint(librados, rados_conf_parse_env_enter, cluster, env);
librados::RadosClient *client = (librados::RadosClient *)cluster;
auto& conf = client->cct->_conf;
conf.parse_env(client->cct->get_module_type(), env);
conf.apply_changes(nullptr);
tracepoint(librados, rados_conf_parse_env_exit, 0);
return 0;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_conf_parse_env);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_conf_set)(
rados_t cluster,
const char *option,
const char *value)
{
tracepoint(librados, rados_conf_set_enter, cluster, option, value);
librados::RadosClient *client = (librados::RadosClient *)cluster;
auto& conf = client->cct->_conf;
int ret = conf.set_val(option, value);
if (ret) {
tracepoint(librados, rados_conf_set_exit, ret);
return ret;
}
conf.apply_changes(nullptr);
tracepoint(librados, rados_conf_set_exit, 0);
return 0;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_conf_set);
/* cluster info */
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_cluster_stat)(
rados_t cluster,
rados_cluster_stat_t *result)
{
tracepoint(librados, rados_cluster_stat_enter, cluster);
librados::RadosClient *client = (librados::RadosClient *)cluster;
ceph_statfs stats;
int r = client->get_fs_stats(stats);
result->kb = stats.kb;
result->kb_used = stats.kb_used;
result->kb_avail = stats.kb_avail;
result->num_objects = stats.num_objects;
tracepoint(librados, rados_cluster_stat_exit, r, result->kb, result->kb_used, result->kb_avail, result->num_objects);
return r;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_cluster_stat);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_conf_get)(
rados_t cluster,
const char *option,
char *buf, size_t len)
{
tracepoint(librados, rados_conf_get_enter, cluster, option, len);
char *tmp = buf;
librados::RadosClient *client = (librados::RadosClient *)cluster;
const auto& conf = client->cct->_conf;
int retval = conf.get_val(option, &tmp, len);
tracepoint(librados, rados_conf_get_exit, retval, retval ? "" : option);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_conf_get);
extern "C" int64_t LIBRADOS_C_API_DEFAULT_F(rados_pool_lookup)(
rados_t cluster,
const char *name)
{
tracepoint(librados, rados_pool_lookup_enter, cluster, name);
librados::RadosClient *radosp = (librados::RadosClient *)cluster;
int64_t retval = radosp->lookup_pool(name);
tracepoint(librados, rados_pool_lookup_exit, retval);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_pool_lookup);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_pool_reverse_lookup)(
rados_t cluster,
int64_t id,
char *buf,
size_t maxlen)
{
tracepoint(librados, rados_pool_reverse_lookup_enter, cluster, id, maxlen);
librados::RadosClient *radosp = (librados::RadosClient *)cluster;
std::string name;
int r = radosp->pool_get_name(id, &name, true);
if (r < 0) {
tracepoint(librados, rados_pool_reverse_lookup_exit, r, "");
return r;
}
if (name.length() >= maxlen) {
tracepoint(librados, rados_pool_reverse_lookup_exit, -ERANGE, "");
return -ERANGE;
}
strcpy(buf, name.c_str());
int retval = name.length();
tracepoint(librados, rados_pool_reverse_lookup_exit, retval, buf);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_pool_reverse_lookup);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_cluster_fsid)(
rados_t cluster,
char *buf,
size_t maxlen)
{
tracepoint(librados, rados_cluster_fsid_enter, cluster, maxlen);
librados::RadosClient *radosp = (librados::RadosClient *)cluster;
std::string fsid;
radosp->get_fsid(&fsid);
if (fsid.length() >= maxlen) {
tracepoint(librados, rados_cluster_fsid_exit, -ERANGE, "");
return -ERANGE;
}
strcpy(buf, fsid.c_str());
int retval = fsid.length();
tracepoint(librados, rados_cluster_fsid_exit, retval, buf);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_cluster_fsid);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_wait_for_latest_osdmap)(
rados_t cluster)
{
tracepoint(librados, rados_wait_for_latest_osdmap_enter, cluster);
librados::RadosClient *radosp = (librados::RadosClient *)cluster;
int retval = radosp->wait_for_latest_osdmap();
tracepoint(librados, rados_wait_for_latest_osdmap_exit, retval);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_wait_for_latest_osdmap);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_blocklist_add)(
rados_t cluster,
char *client_address,
uint32_t expire_seconds)
{
librados::RadosClient *radosp = (librados::RadosClient *)cluster;
return radosp->blocklist_add(client_address, expire_seconds);
}
LIBRADOS_C_API_BASE_DEFAULT(rados_blocklist_add);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_blacklist_add)(
rados_t cluster,
char *client_address,
uint32_t expire_seconds)
{
return LIBRADOS_C_API_DEFAULT_F(rados_blocklist_add)(
cluster, client_address, expire_seconds);
}
LIBRADOS_C_API_BASE_DEFAULT(rados_blacklist_add);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_getaddrs)(
rados_t cluster,
char** addrs)
{
librados::RadosClient *radosp = (librados::RadosClient *)cluster;
auto s = radosp->get_addrs();
*addrs = strdup(s.c_str());
return 0;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_getaddrs);
extern "C" void LIBRADOS_C_API_DEFAULT_F(rados_set_osdmap_full_try)(
rados_ioctx_t io)
{
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
ctx->extra_op_flags |= CEPH_OSD_FLAG_FULL_TRY;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_set_osdmap_full_try);
extern "C" void LIBRADOS_C_API_DEFAULT_F(rados_unset_osdmap_full_try)(
rados_ioctx_t io)
{
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
ctx->extra_op_flags &= ~CEPH_OSD_FLAG_FULL_TRY;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_unset_osdmap_full_try);
extern "C" void LIBRADOS_C_API_DEFAULT_F(rados_set_pool_full_try)(
rados_ioctx_t io)
{
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
ctx->extra_op_flags |= CEPH_OSD_FLAG_FULL_TRY;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_set_pool_full_try);
extern "C" void LIBRADOS_C_API_DEFAULT_F(rados_unset_pool_full_try)(
rados_ioctx_t io)
{
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
ctx->extra_op_flags &= ~CEPH_OSD_FLAG_FULL_TRY;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_unset_pool_full_try);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_application_enable)(
rados_ioctx_t io,
const char *app_name,
int force)
{
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
return ctx->application_enable(app_name, force != 0);
}
LIBRADOS_C_API_BASE_DEFAULT(rados_application_enable);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_application_list)(
rados_ioctx_t io,
char *values,
size_t *values_len)
{
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
std::set<std::string> app_names;
int r = ctx->application_list(&app_names);
if (r < 0) {
return r;
}
size_t total_len = 0;
for (auto app_name : app_names) {
total_len += app_name.size() + 1;
}
if (*values_len < total_len) {
*values_len = total_len;
return -ERANGE;
}
char *values_p = values;
for (auto app_name : app_names) {
size_t len = app_name.size() + 1;
strncpy(values_p, app_name.c_str(), len);
values_p += len;
}
*values_p = '\0';
*values_len = total_len;
return 0;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_application_list);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_application_metadata_get)(
rados_ioctx_t io,
const char *app_name,
const char *key,
char *value,
size_t *value_len)
{
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
std::string value_str;
int r = ctx->application_metadata_get(app_name, key, &value_str);
if (r < 0) {
return r;
}
size_t len = value_str.size() + 1;
if (*value_len < len) {
*value_len = len;
return -ERANGE;
}
strncpy(value, value_str.c_str(), len);
*value_len = len;
return 0;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_application_metadata_get);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_application_metadata_set)(
rados_ioctx_t io,
const char *app_name,
const char *key,
const char *value)
{
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
return ctx->application_metadata_set(app_name, key, value);
}
LIBRADOS_C_API_BASE_DEFAULT(rados_application_metadata_set);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_application_metadata_remove)(
rados_ioctx_t io,
const char *app_name,
const char *key)
{
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
return ctx->application_metadata_remove(app_name, key);
}
LIBRADOS_C_API_BASE_DEFAULT(rados_application_metadata_remove);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_application_metadata_list)(
rados_ioctx_t io,
const char *app_name,
char *keys, size_t *keys_len,
char *values, size_t *vals_len)
{
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
std::map<std::string, std::string> metadata;
int r = ctx->application_metadata_list(app_name, &metadata);
if (r < 0) {
return r;
}
size_t total_key_len = 0;
size_t total_val_len = 0;
for (auto pair : metadata) {
total_key_len += pair.first.size() + 1;
total_val_len += pair.second.size() + 1;
}
if (*keys_len < total_key_len || *vals_len < total_val_len) {
*keys_len = total_key_len;
*vals_len = total_val_len;
return -ERANGE;
}
char *keys_p = keys;
char *vals_p = values;
for (auto pair : metadata) {
size_t key_len = pair.first.size() + 1;
strncpy(keys_p, pair.first.c_str(), key_len);
keys_p += key_len;
size_t val_len = pair.second.size() + 1;
strncpy(vals_p, pair.second.c_str(), val_len);
vals_p += val_len;
}
*keys_p = '\0';
*keys_len = total_key_len;
*vals_p = '\0';
*vals_len = total_val_len;
return 0;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_application_metadata_list);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_pool_list)(
rados_t cluster,
char *buf,
size_t len)
{
tracepoint(librados, rados_pool_list_enter, cluster, len);
librados::RadosClient *client = (librados::RadosClient *)cluster;
std::list<std::pair<int64_t, std::string> > pools;
int r = client->pool_list(pools);
if (r < 0) {
tracepoint(librados, rados_pool_list_exit, r);
return r;
}
if (len > 0 && !buf) {
tracepoint(librados, rados_pool_list_exit, -EINVAL);
return -EINVAL;
}
char *b = buf;
if (b) {
// FIPS zeroization audit 20191116: this memset is not security related.
memset(b, 0, len);
}
int needed = 0;
std::list<std::pair<int64_t, std::string> >::const_iterator i = pools.begin();
std::list<std::pair<int64_t, std::string> >::const_iterator p_end =
pools.end();
for (; i != p_end; ++i) {
int rl = i->second.length() + 1;
if (len < (unsigned)rl)
break;
const char* pool = i->second.c_str();
tracepoint(librados, rados_pool_list_pool, pool);
if (b) {
strncat(b, pool, rl);
b += rl;
}
needed += rl;
len -= rl;
}
for (; i != p_end; ++i) {
int rl = i->second.length() + 1;
needed += rl;
}
int retval = needed + 1;
tracepoint(librados, rados_pool_list_exit, retval);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_pool_list);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_inconsistent_pg_list)(
rados_t cluster,
int64_t pool_id,
char *buf,
size_t len)
{
tracepoint(librados, rados_inconsistent_pg_list_enter, cluster, pool_id, len);
librados::RadosClient *client = (librados::RadosClient *)cluster;
std::vector<std::string> pgs;
if (int r = client->get_inconsistent_pgs(pool_id, &pgs); r < 0) {
tracepoint(librados, rados_inconsistent_pg_list_exit, r);
return r;
}
if (len > 0 && !buf) {
tracepoint(librados, rados_inconsistent_pg_list_exit, -EINVAL);
return -EINVAL;
}
char *b = buf;
if (b) {
// FIPS zeroization audit 20191116: this memset is not security related.
memset(b, 0, len);
}
int needed = 0;
for (const auto& s : pgs) {
unsigned rl = s.length() + 1;
if (b && len >= rl) {
tracepoint(librados, rados_inconsistent_pg_list_pg, s.c_str());
strncat(b, s.c_str(), rl);
b += rl;
len -= rl;
}
needed += rl;
}
int retval = needed + 1;
tracepoint(librados, rados_inconsistent_pg_list_exit, retval);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_inconsistent_pg_list);
static void dict_to_map(const char *dict,
std::map<std::string, std::string>* dict_map)
{
while (*dict != '\0') {
const char* key = dict;
dict += strlen(key) + 1;
const char* value = dict;
dict += strlen(value) + 1;
(*dict_map)[key] = value;
}
}
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_service_register)(
rados_t cluster,
const char *service,
const char *daemon,
const char *metadata_dict)
{
librados::RadosClient *client = (librados::RadosClient *)cluster;
std::map<std::string, std::string> metadata;
dict_to_map(metadata_dict, &metadata);
return client->service_daemon_register(service, daemon, metadata);
}
LIBRADOS_C_API_BASE_DEFAULT(rados_service_register);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_service_update_status)(
rados_t cluster,
const char *status_dict)
{
librados::RadosClient *client = (librados::RadosClient *)cluster;
std::map<std::string, std::string> status;
dict_to_map(status_dict, &status);
return client->service_daemon_update_status(std::move(status));
}
LIBRADOS_C_API_BASE_DEFAULT(rados_service_update_status);
static void do_out_buffer(bufferlist& outbl, char **outbuf, size_t *outbuflen)
{
if (outbuf) {
if (outbl.length() > 0) {
*outbuf = (char *)malloc(outbl.length());
memcpy(*outbuf, outbl.c_str(), outbl.length());
} else {
*outbuf = NULL;
}
}
if (outbuflen)
*outbuflen = outbl.length();
}
static void do_out_buffer(string& outbl, char **outbuf, size_t *outbuflen)
{
if (outbuf) {
if (outbl.length() > 0) {
*outbuf = (char *)malloc(outbl.length());
memcpy(*outbuf, outbl.c_str(), outbl.length());
} else {
*outbuf = NULL;
}
}
if (outbuflen)
*outbuflen = outbl.length();
}
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_ping_monitor)(
rados_t cluster,
const char *mon_id,
char **outstr,
size_t *outstrlen)
{
tracepoint(librados, rados_ping_monitor_enter, cluster, mon_id);
librados::RadosClient *client = (librados::RadosClient *)cluster;
string str;
if (!mon_id) {
tracepoint(librados, rados_ping_monitor_exit, -EINVAL, NULL, NULL);
return -EINVAL;
}
int ret = client->ping_monitor(mon_id, &str);
if (ret == 0) {
do_out_buffer(str, outstr, outstrlen);
}
tracepoint(librados, rados_ping_monitor_exit, ret, ret < 0 ? NULL : outstr, ret < 0 ? NULL : outstrlen);
return ret;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_ping_monitor);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_mon_command)(
rados_t cluster,
const char **cmd, size_t cmdlen,
const char *inbuf, size_t inbuflen,
char **outbuf, size_t *outbuflen,
char **outs, size_t *outslen)
{
tracepoint(librados, rados_mon_command_enter, cluster, cmdlen, inbuf, inbuflen);
librados::RadosClient *client = (librados::RadosClient *)cluster;
bufferlist inbl;
bufferlist outbl;
string outstring;
vector<string> cmdvec;
for (size_t i = 0; i < cmdlen; i++) {
tracepoint(librados, rados_mon_command_cmd, cmd[i]);
cmdvec.push_back(cmd[i]);
}
inbl.append(inbuf, inbuflen);
int ret = client->mon_command(cmdvec, inbl, &outbl, &outstring);
do_out_buffer(outbl, outbuf, outbuflen);
do_out_buffer(outstring, outs, outslen);
tracepoint(librados, rados_mon_command_exit, ret, outbuf, outbuflen, outs, outslen);
return ret;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_mon_command);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_mon_command_target)(
rados_t cluster,
const char *name,
const char **cmd, size_t cmdlen,
const char *inbuf, size_t inbuflen,
char **outbuf, size_t *outbuflen,
char **outs, size_t *outslen)
{
tracepoint(librados, rados_mon_command_target_enter, cluster, name, cmdlen, inbuf, inbuflen);
librados::RadosClient *client = (librados::RadosClient *)cluster;
bufferlist inbl;
bufferlist outbl;
string outstring;
vector<string> cmdvec;
// is this a numeric id?
char *endptr;
errno = 0;
long rank = strtol(name, &endptr, 10);
if ((errno == ERANGE && (rank == LONG_MAX || rank == LONG_MIN)) ||
(errno != 0 && rank == 0) ||
endptr == name || // no digits
*endptr != '\0') { // extra characters
rank = -1;
}
for (size_t i = 0; i < cmdlen; i++) {
tracepoint(librados, rados_mon_command_target_cmd, cmd[i]);
cmdvec.push_back(cmd[i]);
}
inbl.append(inbuf, inbuflen);
int ret;
if (rank >= 0)
ret = client->mon_command(rank, cmdvec, inbl, &outbl, &outstring);
else
ret = client->mon_command(name, cmdvec, inbl, &outbl, &outstring);
do_out_buffer(outbl, outbuf, outbuflen);
do_out_buffer(outstring, outs, outslen);
tracepoint(librados, rados_mon_command_target_exit, ret, outbuf, outbuflen, outs, outslen);
return ret;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_mon_command_target);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_osd_command)(
rados_t cluster, int osdid, const char **cmd,
size_t cmdlen,
const char *inbuf, size_t inbuflen,
char **outbuf, size_t *outbuflen,
char **outs, size_t *outslen)
{
tracepoint(librados, rados_osd_command_enter, cluster, osdid, cmdlen, inbuf, inbuflen);
librados::RadosClient *client = (librados::RadosClient *)cluster;
bufferlist inbl;
bufferlist outbl;
string outstring;
vector<string> cmdvec;
for (size_t i = 0; i < cmdlen; i++) {
tracepoint(librados, rados_osd_command_cmd, cmd[i]);
cmdvec.push_back(cmd[i]);
}
inbl.append(inbuf, inbuflen);
int ret = client->osd_command(osdid, cmdvec, inbl, &outbl, &outstring);
do_out_buffer(outbl, outbuf, outbuflen);
do_out_buffer(outstring, outs, outslen);
tracepoint(librados, rados_osd_command_exit, ret, outbuf, outbuflen, outs, outslen);
return ret;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_osd_command);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_mgr_command)(
rados_t cluster, const char **cmd,
size_t cmdlen,
const char *inbuf, size_t inbuflen,
char **outbuf, size_t *outbuflen,
char **outs, size_t *outslen)
{
tracepoint(librados, rados_mgr_command_enter, cluster, cmdlen, inbuf,
inbuflen);
librados::RadosClient *client = (librados::RadosClient *)cluster;
bufferlist inbl;
bufferlist outbl;
string outstring;
vector<string> cmdvec;
for (size_t i = 0; i < cmdlen; i++) {
tracepoint(librados, rados_mgr_command_cmd, cmd[i]);
cmdvec.push_back(cmd[i]);
}
inbl.append(inbuf, inbuflen);
int ret = client->mgr_command(cmdvec, inbl, &outbl, &outstring);
do_out_buffer(outbl, outbuf, outbuflen);
do_out_buffer(outstring, outs, outslen);
tracepoint(librados, rados_mgr_command_exit, ret, outbuf, outbuflen, outs,
outslen);
return ret;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_mgr_command);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_mgr_command_target)(
rados_t cluster,
const char *name,
const char **cmd,
size_t cmdlen,
const char *inbuf, size_t inbuflen,
char **outbuf, size_t *outbuflen,
char **outs, size_t *outslen)
{
tracepoint(librados, rados_mgr_command_target_enter, cluster, name, cmdlen,
inbuf, inbuflen);
librados::RadosClient *client = (librados::RadosClient *)cluster;
bufferlist inbl;
bufferlist outbl;
string outstring;
vector<string> cmdvec;
for (size_t i = 0; i < cmdlen; i++) {
tracepoint(librados, rados_mgr_command_target_cmd, cmd[i]);
cmdvec.push_back(cmd[i]);
}
inbl.append(inbuf, inbuflen);
int ret = client->mgr_command(name, cmdvec, inbl, &outbl, &outstring);
do_out_buffer(outbl, outbuf, outbuflen);
do_out_buffer(outstring, outs, outslen);
tracepoint(librados, rados_mgr_command_target_exit, ret, outbuf, outbuflen,
outs, outslen);
return ret;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_mgr_command_target);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_pg_command)(
rados_t cluster, const char *pgstr,
const char **cmd, size_t cmdlen,
const char *inbuf, size_t inbuflen,
char **outbuf, size_t *outbuflen,
char **outs, size_t *outslen)
{
tracepoint(librados, rados_pg_command_enter, cluster, pgstr, cmdlen, inbuf, inbuflen);
librados::RadosClient *client = (librados::RadosClient *)cluster;
bufferlist inbl;
bufferlist outbl;
string outstring;
pg_t pgid;
vector<string> cmdvec;
for (size_t i = 0; i < cmdlen; i++) {
tracepoint(librados, rados_pg_command_cmd, cmd[i]);
cmdvec.push_back(cmd[i]);
}
inbl.append(inbuf, inbuflen);
if (!pgid.parse(pgstr))
return -EINVAL;
int ret = client->pg_command(pgid, cmdvec, inbl, &outbl, &outstring);
do_out_buffer(outbl, outbuf, outbuflen);
do_out_buffer(outstring, outs, outslen);
tracepoint(librados, rados_pg_command_exit, ret, outbuf, outbuflen, outs, outslen);
return ret;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_pg_command);
extern "C" void LIBRADOS_C_API_DEFAULT_F(rados_buffer_free)(char *buf)
{
tracepoint(librados, rados_buffer_free_enter, buf);
if (buf)
free(buf);
tracepoint(librados, rados_buffer_free_exit);
}
LIBRADOS_C_API_BASE_DEFAULT(rados_buffer_free);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_monitor_log)(
rados_t cluster,
const char *level,
rados_log_callback_t cb,
void *arg)
{
tracepoint(librados, rados_monitor_log_enter, cluster, level, cb, arg);
librados::RadosClient *client = (librados::RadosClient *)cluster;
int retval = client->monitor_log(level, cb, nullptr, arg);
tracepoint(librados, rados_monitor_log_exit, retval);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_monitor_log);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_monitor_log2)(
rados_t cluster,
const char *level,
rados_log_callback2_t cb,
void *arg)
{
tracepoint(librados, rados_monitor_log2_enter, cluster, level, cb, arg);
librados::RadosClient *client = (librados::RadosClient *)cluster;
int retval = client->monitor_log(level, nullptr, cb, arg);
tracepoint(librados, rados_monitor_log2_exit, retval);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_monitor_log2);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_ioctx_create)(
rados_t cluster,
const char *name,
rados_ioctx_t *io)
{
tracepoint(librados, rados_ioctx_create_enter, cluster, name);
librados::RadosClient *client = (librados::RadosClient *)cluster;
librados::IoCtxImpl *ctx;
int r = client->create_ioctx(name, &ctx);
if (r < 0) {
tracepoint(librados, rados_ioctx_create_exit, r, NULL);
return r;
}
*io = ctx;
ctx->get();
tracepoint(librados, rados_ioctx_create_exit, 0, ctx);
return 0;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_ioctx_create);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_ioctx_create2)(
rados_t cluster,
int64_t pool_id,
rados_ioctx_t *io)
{
tracepoint(librados, rados_ioctx_create2_enter, cluster, pool_id);
librados::RadosClient *client = (librados::RadosClient *)cluster;
librados::IoCtxImpl *ctx;
int r = client->create_ioctx(pool_id, &ctx);
if (r < 0) {
tracepoint(librados, rados_ioctx_create2_exit, r, NULL);
return r;
}
*io = ctx;
ctx->get();
tracepoint(librados, rados_ioctx_create2_exit, 0, ctx);
return 0;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_ioctx_create2);
extern "C" void LIBRADOS_C_API_DEFAULT_F(rados_ioctx_destroy)(rados_ioctx_t io)
{
tracepoint(librados, rados_ioctx_destroy_enter, io);
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
if (ctx) {
ctx->put();
}
tracepoint(librados, rados_ioctx_destroy_exit);
}
LIBRADOS_C_API_BASE_DEFAULT(rados_ioctx_destroy);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_ioctx_pool_stat)(
rados_ioctx_t io,
struct rados_pool_stat_t *stats)
{
tracepoint(librados, rados_ioctx_pool_stat_enter, io);
librados::IoCtxImpl *io_ctx_impl = (librados::IoCtxImpl *)io;
list<string> ls;
std::string pool_name;
int err = io_ctx_impl->client->pool_get_name(io_ctx_impl->get_id(), &pool_name);
if (err) {
tracepoint(librados, rados_ioctx_pool_stat_exit, err, stats);
return err;
}
ls.push_back(pool_name);
map<string, ::pool_stat_t> rawresult;
bool per_pool = false;
err = io_ctx_impl->client->get_pool_stats(ls, &rawresult, &per_pool);
if (err) {
tracepoint(librados, rados_ioctx_pool_stat_exit, err, stats);
return err;
}
::pool_stat_t& r = rawresult[pool_name];
uint64_t allocated_bytes = r.get_allocated_data_bytes(per_pool) +
r.get_allocated_omap_bytes(per_pool);
// FIXME: raw_used_rate is unknown hence use 1.0 here
// meaning we keep net amount aggregated over all replicas
// Not a big deal so far since this field isn't exposed
uint64_t user_bytes = r.get_user_data_bytes(1.0, per_pool) +
r.get_user_omap_bytes(1.0, per_pool);
stats->num_kb = shift_round_up(allocated_bytes, 10);
stats->num_bytes = allocated_bytes;
stats->num_objects = r.stats.sum.num_objects;
stats->num_object_clones = r.stats.sum.num_object_clones;
stats->num_object_copies = r.stats.sum.num_object_copies;
stats->num_objects_missing_on_primary = r.stats.sum.num_objects_missing_on_primary;
stats->num_objects_unfound = r.stats.sum.num_objects_unfound;
stats->num_objects_degraded =
r.stats.sum.num_objects_degraded +
r.stats.sum.num_objects_misplaced; // FIXME: this is imprecise
stats->num_rd = r.stats.sum.num_rd;
stats->num_rd_kb = r.stats.sum.num_rd_kb;
stats->num_wr = r.stats.sum.num_wr;
stats->num_wr_kb = r.stats.sum.num_wr_kb;
stats->num_user_bytes = user_bytes;
stats->compressed_bytes_orig = r.store_stats.data_compressed_original;
stats->compressed_bytes = r.store_stats.data_compressed;
stats->compressed_bytes_alloc = r.store_stats.data_compressed_allocated;
tracepoint(librados, rados_ioctx_pool_stat_exit, 0, stats);
return 0;
}
LIBRADOS_C_API_DEFAULT(rados_ioctx_pool_stat, 14.2.0);
extern "C" int LIBRADOS_C_API_BASE_F(rados_ioctx_pool_stat)(
rados_ioctx_t io, struct __librados_base::rados_pool_stat_t *stats)
{
struct rados_pool_stat_t new_stats;
int r = LIBRADOS_C_API_DEFAULT_F(rados_ioctx_pool_stat)(io, &new_stats);
if (r < 0) {
return r;
}
stats->num_bytes = new_stats.num_bytes;
stats->num_kb = new_stats.num_kb;
stats->num_objects = new_stats.num_objects;
stats->num_object_clones = new_stats.num_object_clones;
stats->num_object_copies = new_stats.num_object_copies;
stats->num_objects_missing_on_primary = new_stats.num_objects_missing_on_primary;
stats->num_objects_unfound = new_stats.num_objects_unfound;
stats->num_objects_degraded = new_stats.num_objects_degraded;
stats->num_rd = new_stats.num_rd;
stats->num_rd_kb = new_stats.num_rd_kb;
stats->num_wr = new_stats.num_wr;
stats->num_wr_kb = new_stats.num_wr_kb;
return 0;
}
LIBRADOS_C_API_BASE(rados_ioctx_pool_stat);
extern "C" rados_config_t LIBRADOS_C_API_DEFAULT_F(rados_ioctx_cct)(
rados_ioctx_t io)
{
tracepoint(librados, rados_ioctx_cct_enter, io);
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
rados_config_t retval = (rados_config_t)ctx->client->cct;
tracepoint(librados, rados_ioctx_cct_exit, retval);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_ioctx_cct);
extern "C" void LIBRADOS_C_API_DEFAULT_F(rados_ioctx_snap_set_read)(
rados_ioctx_t io,
rados_snap_t seq)
{
tracepoint(librados, rados_ioctx_snap_set_read_enter, io, seq);
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
ctx->set_snap_read((snapid_t)seq);
tracepoint(librados, rados_ioctx_snap_set_read_exit);
}
LIBRADOS_C_API_BASE_DEFAULT(rados_ioctx_snap_set_read);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_ioctx_selfmanaged_snap_set_write_ctx)(
rados_ioctx_t io,
rados_snap_t seq,
rados_snap_t *snaps,
int num_snaps)
{
tracepoint(librados, rados_ioctx_selfmanaged_snap_set_write_ctx_enter, io, seq, snaps, num_snaps);
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
vector<snapid_t> snv;
snv.resize(num_snaps);
for (int i=0; i<num_snaps; i++) {
snv[i] = (snapid_t)snaps[i];
}
int retval = ctx->set_snap_write_context((snapid_t)seq, snv);
tracepoint(librados, rados_ioctx_selfmanaged_snap_set_write_ctx_exit, retval);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_ioctx_selfmanaged_snap_set_write_ctx);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_write)(
rados_ioctx_t io,
const char *o,
const char *buf,
size_t len,
uint64_t off)
{
tracepoint(librados, rados_write_enter, io, o, buf, len, off);
if (len > UINT_MAX/2)
return -E2BIG;
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
object_t oid(o);
bufferlist bl;
bl.append(buf, len);
int retval = ctx->write(oid, bl, len, off);
tracepoint(librados, rados_write_exit, retval);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_write);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_append)(
rados_ioctx_t io,
const char *o,
const char *buf,
size_t len)
{
tracepoint(librados, rados_append_enter, io, o, buf, len);
if (len > UINT_MAX/2)
return -E2BIG;
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
object_t oid(o);
bufferlist bl;
bl.append(buf, len);
int retval = ctx->append(oid, bl, len);
tracepoint(librados, rados_append_exit, retval);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_append);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_write_full)(
rados_ioctx_t io,
const char *o,
const char *buf,
size_t len)
{
tracepoint(librados, rados_write_full_enter, io, o, buf, len);
if (len > UINT_MAX/2)
return -E2BIG;
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
object_t oid(o);
bufferlist bl;
bl.append(buf, len);
int retval = ctx->write_full(oid, bl);
tracepoint(librados, rados_write_full_exit, retval);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_write_full);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_writesame)(
rados_ioctx_t io,
const char *o,
const char *buf,
size_t data_len,
size_t write_len,
uint64_t off)
{
tracepoint(librados, rados_writesame_enter, io, o, buf, data_len, write_len, off);
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
object_t oid(o);
bufferlist bl;
bl.append(buf, data_len);
int retval = ctx->writesame(oid, bl, write_len, off);
tracepoint(librados, rados_writesame_exit, retval);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_writesame);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_trunc)(
rados_ioctx_t io,
const char *o,
uint64_t size)
{
tracepoint(librados, rados_trunc_enter, io, o, size);
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
object_t oid(o);
int retval = ctx->trunc(oid, size);
tracepoint(librados, rados_trunc_exit, retval);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_trunc);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_remove)(
rados_ioctx_t io,
const char *o)
{
tracepoint(librados, rados_remove_enter, io, o);
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
object_t oid(o);
int retval = ctx->remove(oid);
tracepoint(librados, rados_remove_exit, retval);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_remove);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_read)(
rados_ioctx_t io,
const char *o,
char *buf,
size_t len,
uint64_t off)
{
tracepoint(librados, rados_read_enter, io, o, buf, len, off);
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
int ret;
object_t oid(o);
bufferlist bl;
bufferptr bp = buffer::create_static(len, buf);
bl.push_back(bp);
ret = ctx->read(oid, bl, len, off);
if (ret >= 0) {
if (bl.length() > len) {
tracepoint(librados, rados_read_exit, -ERANGE, NULL);
return -ERANGE;
}
if (!bl.is_provided_buffer(buf))
bl.begin().copy(bl.length(), buf);
ret = bl.length(); // hrm :/
}
tracepoint(librados, rados_read_exit, ret, buf);
return ret;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_read);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_checksum)(
rados_ioctx_t io, const char *o,
rados_checksum_type_t type,
const char *init_value, size_t init_value_len,
size_t len, uint64_t off, size_t chunk_size,
char *pchecksum, size_t checksum_len)
{
tracepoint(librados, rados_checksum_enter, io, o, type, init_value,
init_value_len, len, off, chunk_size);
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
object_t oid(o);
bufferlist init_value_bl;
init_value_bl.append(init_value, init_value_len);
bufferlist checksum_bl;
int retval = ctx->checksum(oid, get_checksum_op_type(type), init_value_bl,
len, off, chunk_size, &checksum_bl);
if (retval >= 0) {
if (checksum_bl.length() > checksum_len) {
tracepoint(librados, rados_checksum_exit, -ERANGE, NULL, 0);
return -ERANGE;
}
checksum_bl.begin().copy(checksum_bl.length(), pchecksum);
}
tracepoint(librados, rados_checksum_exit, retval, pchecksum, checksum_len);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_checksum);
extern "C" uint64_t LIBRADOS_C_API_DEFAULT_F(rados_get_last_version)(
rados_ioctx_t io)
{
tracepoint(librados, rados_get_last_version_enter, io);
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
uint64_t retval = ctx->last_version();
tracepoint(librados, rados_get_last_version_exit, retval);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_get_last_version);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_pool_create)(
rados_t cluster,
const char *name)
{
tracepoint(librados, rados_pool_create_enter, cluster, name);
librados::RadosClient *radosp = (librados::RadosClient *)cluster;
string sname(name);
int retval = radosp->pool_create(sname);
tracepoint(librados, rados_pool_create_exit, retval);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_pool_create);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_pool_create_with_auid)(
rados_t cluster,
const char *name,
uint64_t auid)
{
tracepoint(librados, rados_pool_create_with_auid_enter, cluster, name, auid);
librados::RadosClient *radosp = (librados::RadosClient *)cluster;
string sname(name);
int retval = 0;
if (auid != CEPH_AUTH_UID_DEFAULT) {
retval = -EINVAL;
} else {
retval = radosp->pool_create(sname);
}
tracepoint(librados, rados_pool_create_with_auid_exit, retval);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_pool_create_with_auid);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_pool_create_with_crush_rule)(
rados_t cluster,
const char *name,
__u8 crush_rule_num)
{
tracepoint(librados, rados_pool_create_with_crush_rule_enter, cluster, name, crush_rule_num);
librados::RadosClient *radosp = (librados::RadosClient *)cluster;
string sname(name);
int retval = radosp->pool_create(sname, crush_rule_num);
tracepoint(librados, rados_pool_create_with_crush_rule_exit, retval);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_pool_create_with_crush_rule);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_pool_create_with_all)(
rados_t cluster,
const char *name,
uint64_t auid,
__u8 crush_rule_num)
{
tracepoint(librados, rados_pool_create_with_all_enter, cluster, name, auid, crush_rule_num);
librados::RadosClient *radosp = (librados::RadosClient *)cluster;
string sname(name);
int retval = 0;
if (auid != CEPH_AUTH_UID_DEFAULT) {
retval = -EINVAL;
} else {
retval = radosp->pool_create(sname, crush_rule_num);
}
tracepoint(librados, rados_pool_create_with_all_exit, retval);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_pool_create_with_all);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_pool_get_base_tier)(
rados_t cluster,
int64_t pool_id,
int64_t* base_tier)
{
tracepoint(librados, rados_pool_get_base_tier_enter, cluster, pool_id);
librados::RadosClient *client = (librados::RadosClient *)cluster;
int retval = client->pool_get_base_tier(pool_id, base_tier);
tracepoint(librados, rados_pool_get_base_tier_exit, retval, *base_tier);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_pool_get_base_tier);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_pool_delete)(
rados_t cluster,
const char *pool_name)
{
tracepoint(librados, rados_pool_delete_enter, cluster, pool_name);
librados::RadosClient *client = (librados::RadosClient *)cluster;
int retval = client->pool_delete(pool_name);
tracepoint(librados, rados_pool_delete_exit, retval);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_pool_delete);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_ioctx_pool_set_auid)(
rados_ioctx_t io,
uint64_t auid)
{
tracepoint(librados, rados_ioctx_pool_set_auid_enter, io, auid);
int retval = -EOPNOTSUPP;
tracepoint(librados, rados_ioctx_pool_set_auid_exit, retval);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_ioctx_pool_set_auid);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_ioctx_pool_get_auid)(
rados_ioctx_t io,
uint64_t *auid)
{
tracepoint(librados, rados_ioctx_pool_get_auid_enter, io);
int retval = -EOPNOTSUPP;
tracepoint(librados, rados_ioctx_pool_get_auid_exit, retval, *auid);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_ioctx_pool_get_auid);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_ioctx_pool_requires_alignment)(
rados_ioctx_t io)
{
tracepoint(librados, rados_ioctx_pool_requires_alignment_enter, io);
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
int retval = ctx->client->pool_requires_alignment(ctx->get_id());
tracepoint(librados, rados_ioctx_pool_requires_alignment_exit, retval);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_ioctx_pool_requires_alignment);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_ioctx_pool_requires_alignment2)(
rados_ioctx_t io,
int *req)
{
tracepoint(librados, rados_ioctx_pool_requires_alignment_enter2, io);
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
bool requires_alignment;
int retval = ctx->client->pool_requires_alignment2(ctx->get_id(),
&requires_alignment);
tracepoint(librados, rados_ioctx_pool_requires_alignment_exit2, retval,
requires_alignment);
if (req)
*req = requires_alignment;
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_ioctx_pool_requires_alignment2);
extern "C" uint64_t LIBRADOS_C_API_DEFAULT_F(rados_ioctx_pool_required_alignment)(
rados_ioctx_t io)
{
tracepoint(librados, rados_ioctx_pool_required_alignment_enter, io);
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
uint64_t retval = ctx->client->pool_required_alignment(ctx->get_id());
tracepoint(librados, rados_ioctx_pool_required_alignment_exit, retval);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_ioctx_pool_required_alignment);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_ioctx_pool_required_alignment2)(
rados_ioctx_t io,
uint64_t *alignment)
{
tracepoint(librados, rados_ioctx_pool_required_alignment_enter2, io);
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
int retval = ctx->client->pool_required_alignment2(ctx->get_id(),
alignment);
tracepoint(librados, rados_ioctx_pool_required_alignment_exit2, retval,
*alignment);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_ioctx_pool_required_alignment2);
extern "C" void LIBRADOS_C_API_DEFAULT_F(rados_ioctx_locator_set_key)(
rados_ioctx_t io,
const char *key)
{
tracepoint(librados, rados_ioctx_locator_set_key_enter, io, key);
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
if (key)
ctx->oloc.key = key;
else
ctx->oloc.key = "";
tracepoint(librados, rados_ioctx_locator_set_key_exit);
}
LIBRADOS_C_API_BASE_DEFAULT(rados_ioctx_locator_set_key);
extern "C" void LIBRADOS_C_API_DEFAULT_F(rados_ioctx_set_namespace)(
rados_ioctx_t io,
const char *nspace)
{
tracepoint(librados, rados_ioctx_set_namespace_enter, io, nspace);
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
if (nspace)
ctx->oloc.nspace = nspace;
else
ctx->oloc.nspace = "";
tracepoint(librados, rados_ioctx_set_namespace_exit);
}
LIBRADOS_C_API_BASE_DEFAULT(rados_ioctx_set_namespace);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_ioctx_get_namespace)(
rados_ioctx_t io,
char *s,
unsigned maxlen)
{
tracepoint(librados, rados_ioctx_get_namespace_enter, io, maxlen);
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
auto length = ctx->oloc.nspace.length();
if (length >= maxlen) {
tracepoint(librados, rados_ioctx_get_namespace_exit, -ERANGE, "");
return -ERANGE;
}
strcpy(s, ctx->oloc.nspace.c_str());
int retval = (int)length;
tracepoint(librados, rados_ioctx_get_namespace_exit, retval, s);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_ioctx_get_namespace);
extern "C" rados_t LIBRADOS_C_API_DEFAULT_F(rados_ioctx_get_cluster)(
rados_ioctx_t io)
{
tracepoint(librados, rados_ioctx_get_cluster_enter, io);
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
rados_t retval = (rados_t)ctx->client;
tracepoint(librados, rados_ioctx_get_cluster_exit, retval);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_ioctx_get_cluster);
extern "C" int64_t LIBRADOS_C_API_DEFAULT_F(rados_ioctx_get_id)(
rados_ioctx_t io)
{
tracepoint(librados, rados_ioctx_get_id_enter, io);
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
int64_t retval = ctx->get_id();
tracepoint(librados, rados_ioctx_get_id_exit, retval);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_ioctx_get_id);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_ioctx_get_pool_name)(
rados_ioctx_t io,
char *s,
unsigned maxlen)
{
tracepoint(librados, rados_ioctx_get_pool_name_enter, io, maxlen);
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
std::string pool_name;
int err = ctx->client->pool_get_name(ctx->get_id(), &pool_name);
if (err) {
tracepoint(librados, rados_ioctx_get_pool_name_exit, err, "");
return err;
}
if (pool_name.length() >= maxlen) {
tracepoint(librados, rados_ioctx_get_pool_name_exit, -ERANGE, "");
return -ERANGE;
}
strcpy(s, pool_name.c_str());
int retval = pool_name.length();
tracepoint(librados, rados_ioctx_get_pool_name_exit, retval, s);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_ioctx_get_pool_name);
// snaps
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_ioctx_snap_create)(
rados_ioctx_t io,
const char *snapname)
{
tracepoint(librados, rados_ioctx_snap_create_enter, io, snapname);
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
int retval = ctx->snap_create(snapname);
tracepoint(librados, rados_ioctx_snap_create_exit, retval);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_ioctx_snap_create);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_ioctx_snap_remove)(
rados_ioctx_t io,
const char *snapname)
{
tracepoint(librados, rados_ioctx_snap_remove_enter, io, snapname);
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
int retval = ctx->snap_remove(snapname);
tracepoint(librados, rados_ioctx_snap_remove_exit, retval);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_ioctx_snap_remove);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_ioctx_snap_rollback)(
rados_ioctx_t io,
const char *oid,
const char *snapname)
{
tracepoint(librados, rados_ioctx_snap_rollback_enter, io, oid, snapname);
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
int retval = ctx->rollback(oid, snapname);
tracepoint(librados, rados_ioctx_snap_rollback_exit, retval);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_ioctx_snap_rollback);
// Deprecated name kept for backward compatibility
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_rollback)(
rados_ioctx_t io,
const char *oid,
const char *snapname)
{
return LIBRADOS_C_API_DEFAULT_F(rados_ioctx_snap_rollback)(io, oid, snapname);
}
LIBRADOS_C_API_BASE_DEFAULT(rados_rollback);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_ioctx_selfmanaged_snap_create)(
rados_ioctx_t io,
uint64_t *snapid)
{
tracepoint(librados, rados_ioctx_selfmanaged_snap_create_enter, io);
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
int retval = ctx->selfmanaged_snap_create(snapid);
tracepoint(librados, rados_ioctx_selfmanaged_snap_create_exit, retval, *snapid);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_ioctx_selfmanaged_snap_create);
extern "C" void LIBRADOS_C_API_DEFAULT_F(rados_aio_ioctx_selfmanaged_snap_create)(
rados_ioctx_t io,
rados_snap_t *snapid,
rados_completion_t completion)
{
tracepoint(librados, rados_ioctx_selfmanaged_snap_create_enter, io);
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
librados::AioCompletionImpl *c = (librados::AioCompletionImpl*)completion;
ctx->aio_selfmanaged_snap_create(snapid, c);
tracepoint(librados, rados_ioctx_selfmanaged_snap_create_exit, 0, 0);
}
LIBRADOS_C_API_BASE_DEFAULT(rados_aio_ioctx_selfmanaged_snap_create);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_ioctx_selfmanaged_snap_remove)(
rados_ioctx_t io,
uint64_t snapid)
{
tracepoint(librados, rados_ioctx_selfmanaged_snap_remove_enter, io, snapid);
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
int retval = ctx->selfmanaged_snap_remove(snapid);
tracepoint(librados, rados_ioctx_selfmanaged_snap_remove_exit, retval);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_ioctx_selfmanaged_snap_remove);
extern "C" void LIBRADOS_C_API_DEFAULT_F(rados_aio_ioctx_selfmanaged_snap_remove)(
rados_ioctx_t io,
rados_snap_t snapid,
rados_completion_t completion)
{
tracepoint(librados, rados_ioctx_selfmanaged_snap_remove_enter, io, snapid);
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
librados::AioCompletionImpl *c = (librados::AioCompletionImpl*)completion;
ctx->aio_selfmanaged_snap_remove(snapid, c);
tracepoint(librados, rados_ioctx_selfmanaged_snap_remove_exit, 0);
}
LIBRADOS_C_API_BASE_DEFAULT(rados_aio_ioctx_selfmanaged_snap_remove);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_ioctx_selfmanaged_snap_rollback)(
rados_ioctx_t io,
const char *oid,
uint64_t snapid)
{
tracepoint(librados, rados_ioctx_selfmanaged_snap_rollback_enter, io, oid, snapid);
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
int retval = ctx->selfmanaged_snap_rollback_object(oid, ctx->snapc, snapid);
tracepoint(librados, rados_ioctx_selfmanaged_snap_rollback_exit, retval);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_ioctx_selfmanaged_snap_rollback);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_ioctx_snap_list)(
rados_ioctx_t io,
rados_snap_t *snaps,
int maxlen)
{
tracepoint(librados, rados_ioctx_snap_list_enter, io, maxlen);
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
vector<uint64_t> snapvec;
int r = ctx->snap_list(&snapvec);
if (r < 0) {
tracepoint(librados, rados_ioctx_snap_list_exit, r, snaps, 0);
return r;
}
if ((int)snapvec.size() <= maxlen) {
for (unsigned i=0; i<snapvec.size(); i++) {
snaps[i] = snapvec[i];
}
int retval = snapvec.size();
tracepoint(librados, rados_ioctx_snap_list_exit, retval, snaps, retval);
return retval;
}
int retval = -ERANGE;
tracepoint(librados, rados_ioctx_snap_list_exit, retval, snaps, 0);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_ioctx_snap_list);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_ioctx_snap_lookup)(
rados_ioctx_t io,
const char *name,
rados_snap_t *id)
{
tracepoint(librados, rados_ioctx_snap_lookup_enter, io, name);
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
int retval = ctx->snap_lookup(name, (uint64_t *)id);
tracepoint(librados, rados_ioctx_snap_lookup_exit, retval, *id);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_ioctx_snap_lookup);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_ioctx_snap_get_name)(
rados_ioctx_t io,
rados_snap_t id,
char *name,
int maxlen)
{
tracepoint(librados, rados_ioctx_snap_get_name_enter, io, id, maxlen);
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
std::string sname;
int r = ctx->snap_get_name(id, &sname);
if (r < 0) {
tracepoint(librados, rados_ioctx_snap_get_name_exit, r, "");
return r;
}
if ((int)sname.length() >= maxlen) {
int retval = -ERANGE;
tracepoint(librados, rados_ioctx_snap_get_name_exit, retval, "");
return retval;
}
strncpy(name, sname.c_str(), maxlen);
tracepoint(librados, rados_ioctx_snap_get_name_exit, 0, name);
return 0;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_ioctx_snap_get_name);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_ioctx_snap_get_stamp)(
rados_ioctx_t io,
rados_snap_t id,
time_t *t)
{
tracepoint(librados, rados_ioctx_snap_get_stamp_enter, io, id);
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
int retval = ctx->snap_get_stamp(id, t);
tracepoint(librados, rados_ioctx_snap_get_stamp_exit, retval, *t);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_ioctx_snap_get_stamp);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_cmpext)(
rados_ioctx_t io,
const char *o,
const char *cmp_buf,
size_t cmp_len,
uint64_t off)
{
tracepoint(librados, rados_cmpext_enter, io, o, cmp_buf, cmp_len, off);
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
int ret;
object_t oid(o);
bufferlist cmp_bl;
cmp_bl.append(cmp_buf, cmp_len);
ret = ctx->cmpext(oid, off, cmp_bl);
tracepoint(librados, rados_cmpext_exit, ret);
return ret;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_cmpext);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_getxattr)(
rados_ioctx_t io,
const char *o,
const char *name,
char *buf,
size_t len)
{
tracepoint(librados, rados_getxattr_enter, io, o, name, len);
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
int ret;
object_t oid(o);
bufferlist bl;
bl.push_back(buffer::create_static(len, buf));
ret = ctx->getxattr(oid, name, bl);
if (ret >= 0) {
if (bl.length() > len) {
tracepoint(librados, rados_getxattr_exit, -ERANGE, buf, 0);
return -ERANGE;
}
if (!bl.is_provided_buffer(buf))
bl.begin().copy(bl.length(), buf);
ret = bl.length();
}
tracepoint(librados, rados_getxattr_exit, ret, buf, ret);
return ret;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_getxattr);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_getxattrs)(
rados_ioctx_t io,
const char *oid,
rados_xattrs_iter_t *iter)
{
tracepoint(librados, rados_getxattrs_enter, io, oid);
librados::RadosXattrsIter *it = new librados::RadosXattrsIter();
if (!it) {
tracepoint(librados, rados_getxattrs_exit, -ENOMEM, NULL);
return -ENOMEM;
}
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
object_t obj(oid);
int ret = ctx->getxattrs(obj, it->attrset);
if (ret) {
delete it;
tracepoint(librados, rados_getxattrs_exit, ret, NULL);
return ret;
}
it->i = it->attrset.begin();
*iter = it;
tracepoint(librados, rados_getxattrs_exit, 0, *iter);
return 0;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_getxattrs);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_getxattrs_next)(
rados_xattrs_iter_t iter,
const char **name,
const char **val,
size_t *len)
{
tracepoint(librados, rados_getxattrs_next_enter, iter);
librados::RadosXattrsIter *it = static_cast<librados::RadosXattrsIter*>(iter);
if (it->val) {
free(it->val);
it->val = NULL;
}
if (it->i == it->attrset.end()) {
*name = NULL;
*val = NULL;
*len = 0;
tracepoint(librados, rados_getxattrs_next_exit, 0, NULL, NULL, 0);
return 0;
}
const std::string &s(it->i->first);
*name = s.c_str();
bufferlist &bl(it->i->second);
size_t bl_len = bl.length();
if (!bl_len) {
// malloc(0) is not guaranteed to return a valid pointer
*val = (char *)NULL;
} else {
it->val = (char*)malloc(bl_len);
if (!it->val) {
tracepoint(librados, rados_getxattrs_next_exit, -ENOMEM, *name, NULL, 0);
return -ENOMEM;
}
memcpy(it->val, bl.c_str(), bl_len);
*val = it->val;
}
*len = bl_len;
++it->i;
tracepoint(librados, rados_getxattrs_next_exit, 0, *name, *val, *len);
return 0;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_getxattrs_next);
extern "C" void LIBRADOS_C_API_DEFAULT_F(rados_getxattrs_end)(
rados_xattrs_iter_t iter)
{
tracepoint(librados, rados_getxattrs_end_enter, iter);
librados::RadosXattrsIter *it = static_cast<librados::RadosXattrsIter*>(iter);
delete it;
tracepoint(librados, rados_getxattrs_end_exit);
}
LIBRADOS_C_API_BASE_DEFAULT(rados_getxattrs_end);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_setxattr)(
rados_ioctx_t io,
const char *o,
const char *name,
const char *buf,
size_t len)
{
tracepoint(librados, rados_setxattr_enter, io, o, name, buf, len);
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
object_t oid(o);
bufferlist bl;
bl.append(buf, len);
int retval = ctx->setxattr(oid, name, bl);
tracepoint(librados, rados_setxattr_exit, retval);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_setxattr);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_rmxattr)(
rados_ioctx_t io,
const char *o,
const char *name)
{
tracepoint(librados, rados_rmxattr_enter, io, o, name);
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
object_t oid(o);
int retval = ctx->rmxattr(oid, name);
tracepoint(librados, rados_rmxattr_exit, retval);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_rmxattr);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_stat)(
rados_ioctx_t io,
const char *o,
uint64_t *psize,
time_t *pmtime)
{
tracepoint(librados, rados_stat_enter, io, o);
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
object_t oid(o);
int retval = ctx->stat(oid, psize, pmtime);
tracepoint(librados, rados_stat_exit, retval, psize, pmtime);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_stat);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_stat2)(
rados_ioctx_t io,
const char *o,
uint64_t *psize,
struct timespec *pmtime)
{
tracepoint(librados, rados_stat2_enter, io, o);
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
object_t oid(o);
int retval = ctx->stat2(oid, psize, pmtime);
tracepoint(librados, rados_stat2_exit, retval, psize, pmtime);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_stat2);
extern "C" int LIBRADOS_C_API_BASE_F(rados_tmap_update)(
rados_ioctx_t io,
const char *o,
const char *cmdbuf,
size_t cmdbuflen)
{
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
object_t oid(o);
bufferlist cmdbl;
cmdbl.append(cmdbuf, cmdbuflen);
return ctx->tmap_update(oid, cmdbl);
}
LIBRADOS_C_API_BASE(rados_tmap_update);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_tmap_update)(
rados_ioctx_t io,
const char *o,
const char *cmdbuf,
size_t cmdbuflen)
{
return -ENOTSUP;
}
LIBRADOS_C_API_DEFAULT(rados_tmap_update, 14.2.0);
extern "C" int LIBRADOS_C_API_BASE_F(rados_tmap_put)(
rados_ioctx_t io,
const char *o,
const char *buf,
size_t buflen)
{
bufferlist bl;
bl.append(buf, buflen);
bufferlist header;
std::map<std::string, bufferlist> m;
bufferlist::const_iterator bl_it = bl.begin();
decode(header, bl_it);
decode(m, bl_it);
bufferlist out_bl;
encode(header, out_bl);
encode(m, out_bl);
return LIBRADOS_C_API_DEFAULT_F(rados_write_full)(
io, o, out_bl.c_str(), out_bl.length());
}
LIBRADOS_C_API_BASE(rados_tmap_put);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_tmap_put)(
rados_ioctx_t io,
const char *o,
const char *buf,
size_t buflen)
{
return -EOPNOTSUPP;
}
LIBRADOS_C_API_DEFAULT(rados_tmap_put, 14.2.0);
extern "C" int LIBRADOS_C_API_BASE_F(rados_tmap_get)(
rados_ioctx_t io,
const char *o,
char *buf,
size_t buflen)
{
return LIBRADOS_C_API_DEFAULT_F(rados_read)(io, o, buf, buflen, 0);
}
LIBRADOS_C_API_BASE(rados_tmap_get);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_tmap_get)(
rados_ioctx_t io,
const char *o,
char *buf,
size_t buflen)
{
return -EOPNOTSUPP;
}
LIBRADOS_C_API_DEFAULT(rados_tmap_get, 14.2.0);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_exec)(
rados_ioctx_t io,
const char *o,
const char *cls,
const char *method,
const char *inbuf,
size_t in_len,
char *buf,
size_t out_len)
{
tracepoint(librados, rados_exec_enter, io, o, cls, method, inbuf, in_len, out_len);
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
object_t oid(o);
bufferlist inbl, outbl;
int ret;
inbl.append(inbuf, in_len);
ret = ctx->exec(oid, cls, method, inbl, outbl);
if (ret >= 0) {
if (outbl.length()) {
if (outbl.length() > out_len) {
tracepoint(librados, rados_exec_exit, -ERANGE, buf, 0);
return -ERANGE;
}
outbl.begin().copy(outbl.length(), buf);
ret = outbl.length(); // hrm :/
}
}
tracepoint(librados, rados_exec_exit, ret, buf, ret);
return ret;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_exec);
extern "C" rados_object_list_cursor LIBRADOS_C_API_DEFAULT_F(rados_object_list_begin)(
rados_ioctx_t io)
{
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
hobject_t *result = new hobject_t(ctx->objecter->enumerate_objects_begin());
return (rados_object_list_cursor)result;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_object_list_begin);
extern "C" rados_object_list_cursor LIBRADOS_C_API_DEFAULT_F(rados_object_list_end)(
rados_ioctx_t io)
{
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
hobject_t *result = new hobject_t(ctx->objecter->enumerate_objects_end());
return (rados_object_list_cursor)result;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_object_list_end);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_object_list_is_end)(
rados_ioctx_t io,
rados_object_list_cursor cur)
{
hobject_t *hobj = (hobject_t*)cur;
return hobj->is_max();
}
LIBRADOS_C_API_BASE_DEFAULT(rados_object_list_is_end);
extern "C" void LIBRADOS_C_API_DEFAULT_F(rados_object_list_cursor_free)(
rados_ioctx_t io,
rados_object_list_cursor cur)
{
hobject_t *hobj = (hobject_t*)cur;
delete hobj;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_object_list_cursor_free);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_object_list_cursor_cmp)(
rados_ioctx_t io,
rados_object_list_cursor lhs_cur,
rados_object_list_cursor rhs_cur)
{
hobject_t *lhs = (hobject_t*)lhs_cur;
hobject_t *rhs = (hobject_t*)rhs_cur;
return cmp(*lhs, *rhs);
}
LIBRADOS_C_API_BASE_DEFAULT(rados_object_list_cursor_cmp);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_object_list)(rados_ioctx_t io,
const rados_object_list_cursor start,
const rados_object_list_cursor finish,
const size_t result_item_count,
const char *filter_buf,
const size_t filter_buf_len,
rados_object_list_item *result_items,
rados_object_list_cursor *next)
{
ceph_assert(next);
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
// Zero out items so that they will be safe to free later
// FIPS zeroization audit 20191116: this memset is not security related.
memset(result_items, 0, sizeof(rados_object_list_item) * result_item_count);
bufferlist filter_bl;
if (filter_buf != nullptr) {
filter_bl.append(filter_buf, filter_buf_len);
}
ceph::async::waiter<boost::system::error_code,
std::vector<librados::ListObjectImpl>,
hobject_t> w;
ctx->objecter->enumerate_objects<librados::ListObjectImpl>(
ctx->poolid,
ctx->oloc.nspace,
*((hobject_t*)start),
*((hobject_t*)finish),
result_item_count,
filter_bl,
w);
hobject_t *next_hobj = (hobject_t*)(*next);
ceph_assert(next_hobj);
auto [ec, result, next_hash] = w.wait();
if (ec) {
*next_hobj = hobject_t::get_max();
return ceph::from_error_code(ec);
}
ceph_assert(result.size() <= result_item_count); // Don't overflow!
int k = 0;
for (auto i = result.begin(); i != result.end(); ++i) {
rados_object_list_item &item = result_items[k++];
do_out_buffer(i->oid, &item.oid, &item.oid_length);
do_out_buffer(i->nspace, &item.nspace, &item.nspace_length);
do_out_buffer(i->locator, &item.locator, &item.locator_length);
}
*next_hobj = next_hash;
return result.size();
}
LIBRADOS_C_API_BASE_DEFAULT(rados_object_list);
extern "C" void LIBRADOS_C_API_DEFAULT_F(rados_object_list_free)(
const size_t result_size,
rados_object_list_item *results)
{
ceph_assert(results);
for (unsigned int i = 0; i < result_size; ++i) {
LIBRADOS_C_API_DEFAULT_F(rados_buffer_free)(results[i].oid);
LIBRADOS_C_API_DEFAULT_F(rados_buffer_free)(results[i].locator);
LIBRADOS_C_API_DEFAULT_F(rados_buffer_free)(results[i].nspace);
}
}
LIBRADOS_C_API_BASE_DEFAULT(rados_object_list_free);
/* list objects */
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_nobjects_list_open)(
rados_ioctx_t io,
rados_list_ctx_t *listh)
{
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
tracepoint(librados, rados_nobjects_list_open_enter, io);
Objecter::NListContext *h = new Objecter::NListContext;
h->pool_id = ctx->poolid;
h->pool_snap_seq = ctx->snap_seq;
h->nspace = ctx->oloc.nspace; // After dropping compatibility need nspace
*listh = (void *)new librados::ObjListCtx(ctx, h);
tracepoint(librados, rados_nobjects_list_open_exit, 0, *listh);
return 0;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_nobjects_list_open);
extern "C" void LIBRADOS_C_API_DEFAULT_F(rados_nobjects_list_close)(
rados_list_ctx_t h)
{
tracepoint(librados, rados_nobjects_list_close_enter, h);
librados::ObjListCtx *lh = (librados::ObjListCtx *)h;
delete lh;
tracepoint(librados, rados_nobjects_list_close_exit);
}
LIBRADOS_C_API_BASE_DEFAULT(rados_nobjects_list_close);
extern "C" uint32_t LIBRADOS_C_API_DEFAULT_F(rados_nobjects_list_seek)(
rados_list_ctx_t listctx,
uint32_t pos)
{
librados::ObjListCtx *lh = (librados::ObjListCtx *)listctx;
tracepoint(librados, rados_nobjects_list_seek_enter, listctx, pos);
uint32_t r = lh->ctx->nlist_seek(lh->nlc, pos);
tracepoint(librados, rados_nobjects_list_seek_exit, r);
return r;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_nobjects_list_seek);
extern "C" uint32_t LIBRADOS_C_API_DEFAULT_F(rados_nobjects_list_seek_cursor)(
rados_list_ctx_t listctx,
rados_object_list_cursor cursor)
{
librados::ObjListCtx *lh = (librados::ObjListCtx *)listctx;
tracepoint(librados, rados_nobjects_list_seek_cursor_enter, listctx);
uint32_t r = lh->ctx->nlist_seek(lh->nlc, cursor);
tracepoint(librados, rados_nobjects_list_seek_cursor_exit, r);
return r;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_nobjects_list_seek_cursor);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_nobjects_list_get_cursor)(
rados_list_ctx_t listctx,
rados_object_list_cursor *cursor)
{
librados::ObjListCtx *lh = (librados::ObjListCtx *)listctx;
tracepoint(librados, rados_nobjects_list_get_cursor_enter, listctx);
*cursor = lh->ctx->nlist_get_cursor(lh->nlc);
tracepoint(librados, rados_nobjects_list_get_cursor_exit, 0);
return 0;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_nobjects_list_get_cursor);
extern "C" uint32_t LIBRADOS_C_API_DEFAULT_F(rados_nobjects_list_get_pg_hash_position)(
rados_list_ctx_t listctx)
{
librados::ObjListCtx *lh = (librados::ObjListCtx *)listctx;
tracepoint(librados, rados_nobjects_list_get_pg_hash_position_enter, listctx);
uint32_t retval = lh->nlc->get_pg_hash_position();
tracepoint(librados, rados_nobjects_list_get_pg_hash_position_exit, retval);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_nobjects_list_get_pg_hash_position);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_nobjects_list_next)(
rados_list_ctx_t listctx,
const char **entry,
const char **key,
const char **nspace)
{
tracepoint(librados, rados_nobjects_list_next_enter, listctx);
uint32_t retval = rados_nobjects_list_next2(listctx, entry, key, nspace, NULL, NULL, NULL);
tracepoint(librados, rados_nobjects_list_next_exit, 0, *entry, key, nspace);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_nobjects_list_next);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_nobjects_list_next2)(
rados_list_ctx_t listctx,
const char **entry,
const char **key,
const char **nspace,
size_t *entry_size,
size_t *key_size,
size_t *nspace_size)
{
tracepoint(librados, rados_nobjects_list_next2_enter, listctx);
librados::ObjListCtx *lh = (librados::ObjListCtx *)listctx;
Objecter::NListContext *h = lh->nlc;
// if the list is non-empty, this method has been called before
if (!h->list.empty())
// so let's kill the previously-returned object
h->list.pop_front();
if (h->list.empty()) {
int ret = lh->ctx->nlist(lh->nlc, RADOS_LIST_MAX_ENTRIES);
if (ret < 0) {
tracepoint(librados, rados_nobjects_list_next2_exit, ret, NULL, NULL, NULL, NULL, NULL, NULL);
return ret;
}
if (h->list.empty()) {
tracepoint(librados, rados_nobjects_list_next2_exit, -ENOENT, NULL, NULL, NULL, NULL, NULL, NULL);
return -ENOENT;
}
}
*entry = h->list.front().oid.c_str();
if (key) {
if (h->list.front().locator.size())
*key = h->list.front().locator.c_str();
else
*key = NULL;
}
if (nspace)
*nspace = h->list.front().nspace.c_str();
if (entry_size)
*entry_size = h->list.front().oid.size();
if (key_size)
*key_size = h->list.front().locator.size();
if (nspace_size)
*nspace_size = h->list.front().nspace.size();
tracepoint(librados, rados_nobjects_list_next2_exit, 0, entry, key, nspace,
entry_size, key_size, nspace_size);
return 0;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_nobjects_list_next2);
/*
* removed legacy v2 list objects stubs
*
* thse return -ENOTSUP where possible.
*/
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_objects_list_open)(
rados_ioctx_t io,
rados_list_ctx_t *ctx)
{
return -ENOTSUP;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_objects_list_open);
extern "C" uint32_t LIBRADOS_C_API_DEFAULT_F(rados_objects_list_get_pg_hash_position)(
rados_list_ctx_t ctx)
{
return 0;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_objects_list_get_pg_hash_position);
extern "C" uint32_t LIBRADOS_C_API_DEFAULT_F(rados_objects_list_seek)(
rados_list_ctx_t ctx,
uint32_t pos)
{
return 0;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_objects_list_seek);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_objects_list_next)(
rados_list_ctx_t ctx,
const char **entry,
const char **key)
{
return -ENOTSUP;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_objects_list_next);
extern "C" void LIBRADOS_C_API_DEFAULT_F(rados_objects_list_close)(
rados_list_ctx_t ctx)
{
}
LIBRADOS_C_API_BASE_DEFAULT(rados_objects_list_close);
// -------------------------
// aio
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_aio_create_completion)(
void *cb_arg,
rados_callback_t cb_complete,
rados_callback_t cb_safe,
rados_completion_t *pc)
{
tracepoint(librados, rados_aio_create_completion_enter, cb_arg, cb_complete, cb_safe);
librados::AioCompletionImpl *c = new librados::AioCompletionImpl;
if (cb_complete)
c->set_complete_callback(cb_arg, cb_complete);
if (cb_safe)
c->set_safe_callback(cb_arg, cb_safe);
*pc = c;
tracepoint(librados, rados_aio_create_completion_exit, 0, *pc);
return 0;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_aio_create_completion);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_aio_create_completion2)(
void *cb_arg,
rados_callback_t cb_complete,
rados_completion_t *pc)
{
tracepoint(librados, rados_aio_create_completion2_enter, cb_arg, cb_complete);
librados::AioCompletionImpl *c = new librados::AioCompletionImpl;
if (cb_complete)
c->set_complete_callback(cb_arg, cb_complete);
*pc = c;
tracepoint(librados, rados_aio_create_completion2_exit, 0, *pc);
return 0;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_aio_create_completion2);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_aio_wait_for_complete)(
rados_completion_t c)
{
tracepoint(librados, rados_aio_wait_for_complete_enter, c);
int retval = ((librados::AioCompletionImpl*)c)->wait_for_complete();
tracepoint(librados, rados_aio_wait_for_complete_exit, retval);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_aio_wait_for_complete);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_aio_wait_for_safe)(
rados_completion_t c)
{
tracepoint(librados, rados_aio_wait_for_safe_enter, c);
int retval = ((librados::AioCompletionImpl*)c)->wait_for_complete();
tracepoint(librados, rados_aio_wait_for_safe_exit, retval);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_aio_wait_for_safe);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_aio_is_complete)(
rados_completion_t c)
{
tracepoint(librados, rados_aio_is_complete_enter, c);
int retval = ((librados::AioCompletionImpl*)c)->is_complete();
tracepoint(librados, rados_aio_is_complete_exit, retval);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_aio_is_complete);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_aio_is_safe)(
rados_completion_t c)
{
tracepoint(librados, rados_aio_is_safe_enter, c);
int retval = ((librados::AioCompletionImpl*)c)->is_safe();
tracepoint(librados, rados_aio_is_safe_exit, retval);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_aio_is_safe);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_aio_wait_for_complete_and_cb)(
rados_completion_t c)
{
tracepoint(librados, rados_aio_wait_for_complete_and_cb_enter, c);
int retval = ((librados::AioCompletionImpl*)c)->wait_for_complete_and_cb();
tracepoint(librados, rados_aio_wait_for_complete_and_cb_exit, retval);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_aio_wait_for_complete_and_cb);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_aio_wait_for_safe_and_cb)(
rados_completion_t c)
{
tracepoint(librados, rados_aio_wait_for_safe_and_cb_enter, c);
int retval = ((librados::AioCompletionImpl*)c)->wait_for_safe_and_cb();
tracepoint(librados, rados_aio_wait_for_safe_and_cb_exit, retval);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_aio_wait_for_safe_and_cb);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_aio_is_complete_and_cb)(
rados_completion_t c)
{
tracepoint(librados, rados_aio_is_complete_and_cb_enter, c);
int retval = ((librados::AioCompletionImpl*)c)->is_complete_and_cb();
tracepoint(librados, rados_aio_is_complete_and_cb_exit, retval);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_aio_is_complete_and_cb);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_aio_is_safe_and_cb)(
rados_completion_t c)
{
tracepoint(librados, rados_aio_is_safe_and_cb_enter, c);
int retval = ((librados::AioCompletionImpl*)c)->is_safe_and_cb();
tracepoint(librados, rados_aio_is_safe_and_cb_exit, retval);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_aio_is_safe_and_cb);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_aio_get_return_value)(
rados_completion_t c)
{
tracepoint(librados, rados_aio_get_return_value_enter, c);
int retval = ((librados::AioCompletionImpl*)c)->get_return_value();
tracepoint(librados, rados_aio_get_return_value_exit, retval);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_aio_get_return_value);
extern "C" uint64_t LIBRADOS_C_API_DEFAULT_F(rados_aio_get_version)(
rados_completion_t c)
{
tracepoint(librados, rados_aio_get_version_enter, c);
uint64_t retval = ((librados::AioCompletionImpl*)c)->get_version();
tracepoint(librados, rados_aio_get_version_exit, retval);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_aio_get_version);
extern "C" void LIBRADOS_C_API_DEFAULT_F(rados_aio_release)(
rados_completion_t c)
{
tracepoint(librados, rados_aio_release_enter, c);
((librados::AioCompletionImpl*)c)->put();
tracepoint(librados, rados_aio_release_exit);
}
LIBRADOS_C_API_BASE_DEFAULT(rados_aio_release);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_aio_read)(
rados_ioctx_t io, const char *o,
rados_completion_t completion,
char *buf, size_t len, uint64_t off)
{
tracepoint(librados, rados_aio_read_enter, io, o, completion, len, off);
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
object_t oid(o);
int retval = ctx->aio_read(oid, (librados::AioCompletionImpl*)completion,
buf, len, off, ctx->snap_seq);
tracepoint(librados, rados_aio_read_exit, retval);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_aio_read);
#ifdef WITH_BLKIN
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_aio_read_traced)(
rados_ioctx_t io, const char *o,
rados_completion_t completion,
char *buf, size_t len, uint64_t off,
struct blkin_trace_info *info)
{
tracepoint(librados, rados_aio_read_enter, io, o, completion, len, off);
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
object_t oid(o);
int retval = ctx->aio_read(oid, (librados::AioCompletionImpl*)completion,
buf, len, off, ctx->snap_seq, info);
tracepoint(librados, rados_aio_read_exit, retval);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_aio_read_traced);
#endif
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_aio_write)(
rados_ioctx_t io, const char *o,
rados_completion_t completion,
const char *buf, size_t len, uint64_t off)
{
tracepoint(librados, rados_aio_write_enter, io, o, completion, buf, len, off);
if (len > UINT_MAX/2)
return -E2BIG;
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
object_t oid(o);
bufferlist bl;
bl.append(buf, len);
int retval = ctx->aio_write(oid, (librados::AioCompletionImpl*)completion,
bl, len, off);
tracepoint(librados, rados_aio_write_exit, retval);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_aio_write);
#ifdef WITH_BLKIN
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_aio_write_traced)(
rados_ioctx_t io, const char *o,
rados_completion_t completion,
const char *buf, size_t len, uint64_t off,
struct blkin_trace_info *info)
{
tracepoint(librados, rados_aio_write_enter, io, o, completion, buf, len, off);
if (len > UINT_MAX/2)
return -E2BIG;
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
object_t oid(o);
bufferlist bl;
bl.append(buf, len);
int retval = ctx->aio_write(oid, (librados::AioCompletionImpl*)completion,
bl, len, off, info);
tracepoint(librados, rados_aio_write_exit, retval);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_aio_write_traced);
#endif
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_aio_append)(
rados_ioctx_t io, const char *o,
rados_completion_t completion,
const char *buf, size_t len)
{
tracepoint(librados, rados_aio_append_enter, io, o, completion, buf, len);
if (len > UINT_MAX/2)
return -E2BIG;
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
object_t oid(o);
bufferlist bl;
bl.append(buf, len);
int retval = ctx->aio_append(oid, (librados::AioCompletionImpl*)completion,
bl, len);
tracepoint(librados, rados_aio_append_exit, retval);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_aio_append);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_aio_write_full)(
rados_ioctx_t io, const char *o,
rados_completion_t completion,
const char *buf, size_t len)
{
tracepoint(librados, rados_aio_write_full_enter, io, o, completion, buf, len);
if (len > UINT_MAX/2)
return -E2BIG;
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
object_t oid(o);
bufferlist bl;
bl.append(buf, len);
int retval = ctx->aio_write_full(oid, (librados::AioCompletionImpl*)completion, bl);
tracepoint(librados, rados_aio_write_full_exit, retval);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_aio_write_full);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_aio_writesame)(
rados_ioctx_t io, const char *o,
rados_completion_t completion,
const char *buf, size_t data_len,
size_t write_len, uint64_t off)
{
tracepoint(librados, rados_aio_writesame_enter, io, o, completion, buf,
data_len, write_len, off);
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
object_t oid(o);
bufferlist bl;
bl.append(buf, data_len);
int retval = ctx->aio_writesame(o, (librados::AioCompletionImpl*)completion,
bl, write_len, off);
tracepoint(librados, rados_aio_writesame_exit, retval);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_aio_writesame);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_aio_remove)(
rados_ioctx_t io, const char *o,
rados_completion_t completion)
{
tracepoint(librados, rados_aio_remove_enter, io, o, completion);
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
object_t oid(o);
int retval = ctx->aio_remove(oid, (librados::AioCompletionImpl*)completion);
tracepoint(librados, rados_aio_remove_exit, retval);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_aio_remove);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_aio_flush_async)(
rados_ioctx_t io,
rados_completion_t completion)
{
tracepoint(librados, rados_aio_flush_async_enter, io, completion);
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
ctx->flush_aio_writes_async((librados::AioCompletionImpl*)completion);
tracepoint(librados, rados_aio_flush_async_exit, 0);
return 0;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_aio_flush_async);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_aio_flush)(rados_ioctx_t io)
{
tracepoint(librados, rados_aio_flush_enter, io);
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
ctx->flush_aio_writes();
tracepoint(librados, rados_aio_flush_exit, 0);
return 0;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_aio_flush);
struct AioGetxattrData {
AioGetxattrData(char* buf, rados_completion_t c, size_t l) :
user_buf(buf), len(l), user_completion((librados::AioCompletionImpl*)c) {}
bufferlist bl;
char* user_buf;
size_t len;
struct librados::CB_AioCompleteAndSafe user_completion;
};
static void rados_aio_getxattr_complete(rados_completion_t c, void *arg) {
AioGetxattrData *cdata = reinterpret_cast<AioGetxattrData*>(arg);
int rc = LIBRADOS_C_API_DEFAULT_F(rados_aio_get_return_value)(c);
if (rc >= 0) {
if (cdata->bl.length() > cdata->len) {
rc = -ERANGE;
} else {
if (!cdata->bl.is_provided_buffer(cdata->user_buf))
cdata->bl.begin().copy(cdata->bl.length(), cdata->user_buf);
rc = cdata->bl.length();
}
}
cdata->user_completion(rc);
reinterpret_cast<librados::AioCompletionImpl*>(c)->put();
delete cdata;
}
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_aio_getxattr)(
rados_ioctx_t io, const char *o,
rados_completion_t completion,
const char *name, char *buf, size_t len)
{
tracepoint(librados, rados_aio_getxattr_enter, io, o, completion, name, len);
// create data object to be passed to async callback
AioGetxattrData *cdata = new AioGetxattrData(buf, completion, len);
if (!cdata) {
tracepoint(librados, rados_aio_getxattr_exit, -ENOMEM, NULL, 0);
return -ENOMEM;
}
cdata->bl.push_back(buffer::create_static(len, buf));
// create completion callback
librados::AioCompletionImpl *c = new librados::AioCompletionImpl;
c->set_complete_callback(cdata, rados_aio_getxattr_complete);
// call async getxattr of IoCtx
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
object_t oid(o);
int ret = ctx->aio_getxattr(oid, c, name, cdata->bl);
tracepoint(librados, rados_aio_getxattr_exit, ret, buf, ret);
return ret;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_aio_getxattr);
namespace {
struct AioGetxattrsData {
AioGetxattrsData(rados_completion_t c, rados_xattrs_iter_t *_iter) :
iter(_iter), user_completion((librados::AioCompletionImpl*)c) {
it = new librados::RadosXattrsIter();
}
~AioGetxattrsData() {
if (it) delete it;
}
librados::RadosXattrsIter *it;
rados_xattrs_iter_t *iter;
struct librados::CB_AioCompleteAndSafe user_completion;
};
}
static void rados_aio_getxattrs_complete(rados_completion_t c, void *arg) {
AioGetxattrsData *cdata = reinterpret_cast<AioGetxattrsData*>(arg);
int rc = LIBRADOS_C_API_DEFAULT_F(rados_aio_get_return_value)(c);
if (rc) {
cdata->user_completion(rc);
} else {
cdata->it->i = cdata->it->attrset.begin();
*cdata->iter = cdata->it;
cdata->it = 0;
cdata->user_completion(0);
}
reinterpret_cast<librados::AioCompletionImpl*>(c)->put();
delete cdata;
}
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_aio_getxattrs)(
rados_ioctx_t io, const char *oid,
rados_completion_t completion,
rados_xattrs_iter_t *iter)
{
tracepoint(librados, rados_aio_getxattrs_enter, io, oid, completion);
// create data object to be passed to async callback
AioGetxattrsData *cdata = new AioGetxattrsData(completion, iter);
if (!cdata) {
tracepoint(librados, rados_getxattrs_exit, -ENOMEM, NULL);
return -ENOMEM;
}
// create completion callback
librados::AioCompletionImpl *c = new librados::AioCompletionImpl;
c->set_complete_callback(cdata, rados_aio_getxattrs_complete);
// call async getxattrs of IoCtx
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
object_t obj(oid);
int ret = ctx->aio_getxattrs(obj, c, cdata->it->attrset);
tracepoint(librados, rados_aio_getxattrs_exit, ret, cdata->it);
return ret;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_aio_getxattrs);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_aio_setxattr)(
rados_ioctx_t io, const char *o,
rados_completion_t completion,
const char *name, const char *buf, size_t len)
{
tracepoint(librados, rados_aio_setxattr_enter, io, o, completion, name, buf, len);
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
object_t oid(o);
bufferlist bl;
bl.append(buf, len);
int retval = ctx->aio_setxattr(oid, (librados::AioCompletionImpl*)completion, name, bl);
tracepoint(librados, rados_aio_setxattr_exit, retval);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_aio_setxattr);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_aio_rmxattr)(
rados_ioctx_t io, const char *o,
rados_completion_t completion,
const char *name)
{
tracepoint(librados, rados_aio_rmxattr_enter, io, o, completion, name);
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
object_t oid(o);
int retval = ctx->aio_rmxattr(oid, (librados::AioCompletionImpl*)completion, name);
tracepoint(librados, rados_aio_rmxattr_exit, retval);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_aio_rmxattr);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_aio_stat)(
rados_ioctx_t io, const char *o,
rados_completion_t completion,
uint64_t *psize, time_t *pmtime)
{
tracepoint(librados, rados_aio_stat_enter, io, o, completion);
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
object_t oid(o);
int retval = ctx->aio_stat(oid, (librados::AioCompletionImpl*)completion,
psize, pmtime);
tracepoint(librados, rados_aio_stat_exit, retval);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_aio_stat);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_aio_stat2)(
rados_ioctx_t io, const char *o,
rados_completion_t completion,
uint64_t *psize, struct timespec *pmtime)
{
tracepoint(librados, rados_aio_stat2_enter, io, o, completion);
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
object_t oid(o);
int retval = ctx->aio_stat2(oid, (librados::AioCompletionImpl*)completion,
psize, pmtime);
tracepoint(librados, rados_aio_stat2_exit, retval);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_aio_stat2);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_aio_cmpext)(
rados_ioctx_t io, const char *o,
rados_completion_t completion, const char *cmp_buf,
size_t cmp_len, uint64_t off)
{
tracepoint(librados, rados_aio_cmpext_enter, io, o, completion, cmp_buf,
cmp_len, off);
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
object_t oid(o);
int retval = ctx->aio_cmpext(oid, (librados::AioCompletionImpl*)completion,
cmp_buf, cmp_len, off);
tracepoint(librados, rados_aio_cmpext_exit, retval);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_aio_cmpext);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_aio_cancel)(
rados_ioctx_t io,
rados_completion_t completion)
{
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
return ctx->aio_cancel((librados::AioCompletionImpl*)completion);
}
LIBRADOS_C_API_BASE_DEFAULT(rados_aio_cancel);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_aio_exec)(
rados_ioctx_t io, const char *o,
rados_completion_t completion,
const char *cls, const char *method,
const char *inbuf, size_t in_len,
char *buf, size_t out_len)
{
tracepoint(librados, rados_aio_exec_enter, io, o, completion);
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
object_t oid(o);
bufferlist inbl;
inbl.append(inbuf, in_len);
int retval = ctx->aio_exec(oid, (librados::AioCompletionImpl*)completion,
cls, method, inbl, buf, out_len);
tracepoint(librados, rados_aio_exec_exit, retval);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_aio_exec);
struct C_WatchCB : public librados::WatchCtx {
rados_watchcb_t wcb;
void *arg;
C_WatchCB(rados_watchcb_t _wcb, void *_arg) : wcb(_wcb), arg(_arg) {}
void notify(uint8_t opcode, uint64_t ver, bufferlist& bl) override {
wcb(opcode, ver, arg);
}
};
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_watch)(
rados_ioctx_t io, const char *o, uint64_t ver,
uint64_t *handle,
rados_watchcb_t watchcb, void *arg)
{
tracepoint(librados, rados_watch_enter, io, o, ver, watchcb, arg);
uint64_t *cookie = handle;
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
object_t oid(o);
C_WatchCB *wc = new C_WatchCB(watchcb, arg);
int retval = ctx->watch(oid, cookie, wc, NULL, true);
tracepoint(librados, rados_watch_exit, retval, *handle);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_watch);
struct C_WatchCB2 : public librados::WatchCtx2 {
rados_watchcb2_t wcb;
rados_watcherrcb_t errcb;
void *arg;
C_WatchCB2(rados_watchcb2_t _wcb,
rados_watcherrcb_t _errcb,
void *_arg) : wcb(_wcb), errcb(_errcb), arg(_arg) {}
void handle_notify(uint64_t notify_id,
uint64_t cookie,
uint64_t notifier_gid,
bufferlist& bl) override {
wcb(arg, notify_id, cookie, notifier_gid, bl.c_str(), bl.length());
}
void handle_error(uint64_t cookie, int err) override {
if (errcb)
errcb(arg, cookie, err);
}
};
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_watch3)(
rados_ioctx_t io, const char *o, uint64_t *handle,
rados_watchcb2_t watchcb,
rados_watcherrcb_t watcherrcb,
uint32_t timeout,
void *arg)
{
tracepoint(librados, rados_watch3_enter, io, o, handle, watchcb, timeout, arg);
int ret;
if (!watchcb || !o || !handle) {
ret = -EINVAL;
} else {
uint64_t *cookie = handle;
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
object_t oid(o);
C_WatchCB2 *wc = new C_WatchCB2(watchcb, watcherrcb, arg);
ret = ctx->watch(oid, cookie, NULL, wc, timeout, true);
}
tracepoint(librados, rados_watch3_exit, ret, handle ? *handle : 0);
return ret;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_watch3);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_watch2)(
rados_ioctx_t io, const char *o, uint64_t *handle,
rados_watchcb2_t watchcb,
rados_watcherrcb_t watcherrcb,
void *arg)
{
return LIBRADOS_C_API_DEFAULT_F(rados_watch3)(
io, o, handle, watchcb, watcherrcb, 0, arg);
}
LIBRADOS_C_API_BASE_DEFAULT(rados_watch2);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_aio_watch2)(
rados_ioctx_t io, const char *o,
rados_completion_t completion,
uint64_t *handle,
rados_watchcb2_t watchcb,
rados_watcherrcb_t watcherrcb,
uint32_t timeout, void *arg)
{
tracepoint(librados, rados_aio_watch2_enter, io, o, completion, handle, watchcb, timeout, arg);
int ret;
if (!completion || !watchcb || !o || !handle) {
ret = -EINVAL;
} else {
uint64_t *cookie = handle;
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
object_t oid(o);
librados::AioCompletionImpl *c =
reinterpret_cast<librados::AioCompletionImpl*>(completion);
C_WatchCB2 *wc = new C_WatchCB2(watchcb, watcherrcb, arg);
ret = ctx->aio_watch(oid, c, cookie, NULL, wc, timeout, true);
}
tracepoint(librados, rados_aio_watch2_exit, ret, handle ? *handle : 0);
return ret;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_aio_watch2);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_aio_watch)(
rados_ioctx_t io, const char *o,
rados_completion_t completion,
uint64_t *handle,
rados_watchcb2_t watchcb,
rados_watcherrcb_t watcherrcb, void *arg)
{
return LIBRADOS_C_API_DEFAULT_F(rados_aio_watch2)(
io, o, completion, handle, watchcb, watcherrcb, 0, arg);
}
LIBRADOS_C_API_BASE_DEFAULT(rados_aio_watch);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_unwatch)(
rados_ioctx_t io,
const char *o,
uint64_t handle)
{
tracepoint(librados, rados_unwatch_enter, io, o, handle);
uint64_t cookie = handle;
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
int retval = ctx->unwatch(cookie);
tracepoint(librados, rados_unwatch_exit, retval);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_unwatch);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_unwatch2)(
rados_ioctx_t io,
uint64_t handle)
{
tracepoint(librados, rados_unwatch2_enter, io, handle);
uint64_t cookie = handle;
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
int retval = ctx->unwatch(cookie);
tracepoint(librados, rados_unwatch2_exit, retval);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_unwatch2);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_aio_unwatch)(
rados_ioctx_t io, uint64_t handle,
rados_completion_t completion)
{
tracepoint(librados, rados_aio_unwatch_enter, io, handle, completion);
uint64_t cookie = handle;
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
librados::AioCompletionImpl *c =
reinterpret_cast<librados::AioCompletionImpl*>(completion);
int retval = ctx->aio_unwatch(cookie, c);
tracepoint(librados, rados_aio_unwatch_exit, retval);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_aio_unwatch);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_watch_check)(
rados_ioctx_t io,
uint64_t handle)
{
tracepoint(librados, rados_watch_check_enter, io, handle);
uint64_t cookie = handle;
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
int retval = ctx->watch_check(cookie);
tracepoint(librados, rados_watch_check_exit, retval);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_watch_check);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_notify)(
rados_ioctx_t io, const char *o,
uint64_t ver, const char *buf, int buf_len)
{
tracepoint(librados, rados_notify_enter, io, o, ver, buf, buf_len);
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
object_t oid(o);
bufferlist bl;
if (buf) {
bufferptr p = buffer::create(buf_len);
memcpy(p.c_str(), buf, buf_len);
bl.push_back(p);
}
int retval = ctx->notify(oid, bl, 0, NULL, NULL, NULL);
tracepoint(librados, rados_notify_exit, retval);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_notify);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_notify2)(
rados_ioctx_t io, const char *o,
const char *buf, int buf_len,
uint64_t timeout_ms,
char **reply_buffer,
size_t *reply_buffer_len)
{
tracepoint(librados, rados_notify2_enter, io, o, buf, buf_len, timeout_ms);
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
object_t oid(o);
bufferlist bl;
if (buf) {
bufferptr p = buffer::create(buf_len);
memcpy(p.c_str(), buf, buf_len);
bl.push_back(p);
}
int ret = ctx->notify(oid, bl, timeout_ms, NULL, reply_buffer, reply_buffer_len);
tracepoint(librados, rados_notify2_exit, ret);
return ret;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_notify2);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_decode_notify_response)(
char *reply_buffer, size_t reply_buffer_len,
struct notify_ack_t **acks, size_t *nr_acks,
struct notify_timeout_t **timeouts, size_t *nr_timeouts)
{
if (!reply_buffer || !reply_buffer_len) {
return -EINVAL;
}
bufferlist bl;
bl.append(reply_buffer, reply_buffer_len);
map<pair<uint64_t,uint64_t>,bufferlist> acked;
set<pair<uint64_t,uint64_t>> missed;
auto iter = bl.cbegin();
decode(acked, iter);
decode(missed, iter);
*acks = nullptr;
*nr_acks = acked.size();
if (*nr_acks) {
*acks = new notify_ack_t[*nr_acks];
struct notify_ack_t *ack = *acks;
for (auto &[who, payload] : acked) {
ack->notifier_id = who.first;
ack->cookie = who.second;
ack->payload = nullptr;
ack->payload_len = payload.length();
if (ack->payload_len) {
ack->payload = (char *)malloc(ack->payload_len);
memcpy(ack->payload, payload.c_str(), ack->payload_len);
}
ack++;
}
}
*timeouts = nullptr;
*nr_timeouts = missed.size();
if (*nr_timeouts) {
*timeouts = new notify_timeout_t[*nr_timeouts];
struct notify_timeout_t *timeout = *timeouts;
for (auto &[notifier_id, cookie] : missed) {
timeout->notifier_id = notifier_id;
timeout->cookie = cookie;
timeout++;
}
}
return 0;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_decode_notify_response);
extern "C" void LIBRADOS_C_API_DEFAULT_F(rados_free_notify_response)(
struct notify_ack_t *acks, size_t nr_acks,
struct notify_timeout_t *timeouts)
{
for (uint64_t n = 0; n < nr_acks; ++n) {
assert(acks);
if (acks[n].payload) {
free(acks[n].payload);
}
}
if (acks) {
delete[] acks;
}
if (timeouts) {
delete[] timeouts;
}
}
LIBRADOS_C_API_BASE_DEFAULT(rados_free_notify_response);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_aio_notify)(
rados_ioctx_t io, const char *o,
rados_completion_t completion,
const char *buf, int buf_len,
uint64_t timeout_ms, char **reply_buffer,
size_t *reply_buffer_len)
{
tracepoint(librados, rados_aio_notify_enter, io, o, completion, buf, buf_len,
timeout_ms);
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
object_t oid(o);
bufferlist bl;
if (buf) {
bl.push_back(buffer::copy(buf, buf_len));
}
librados::AioCompletionImpl *c =
reinterpret_cast<librados::AioCompletionImpl*>(completion);
int ret = ctx->aio_notify(oid, c, bl, timeout_ms, NULL, reply_buffer,
reply_buffer_len);
tracepoint(librados, rados_aio_notify_exit, ret);
return ret;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_aio_notify);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_notify_ack)(
rados_ioctx_t io, const char *o,
uint64_t notify_id, uint64_t handle,
const char *buf, int buf_len)
{
tracepoint(librados, rados_notify_ack_enter, io, o, notify_id, handle, buf, buf_len);
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
object_t oid(o);
bufferlist bl;
if (buf) {
bufferptr p = buffer::create(buf_len);
memcpy(p.c_str(), buf, buf_len);
bl.push_back(p);
}
ctx->notify_ack(oid, notify_id, handle, bl);
tracepoint(librados, rados_notify_ack_exit, 0);
return 0;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_notify_ack);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_watch_flush)(rados_t cluster)
{
tracepoint(librados, rados_watch_flush_enter, cluster);
librados::RadosClient *client = (librados::RadosClient *)cluster;
int retval = client->watch_flush();
tracepoint(librados, rados_watch_flush_exit, retval);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_watch_flush);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_aio_watch_flush)(
rados_t cluster,
rados_completion_t completion)
{
tracepoint(librados, rados_aio_watch_flush_enter, cluster, completion);
librados::RadosClient *client = (librados::RadosClient *)cluster;
librados::AioCompletionImpl *c = (librados::AioCompletionImpl*)completion;
int retval = client->async_watch_flush(c);
tracepoint(librados, rados_aio_watch_flush_exit, retval);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_aio_watch_flush);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_set_alloc_hint)(
rados_ioctx_t io, const char *o,
uint64_t expected_object_size,
uint64_t expected_write_size)
{
tracepoint(librados, rados_set_alloc_hint_enter, io, o, expected_object_size, expected_write_size);
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
object_t oid(o);
int retval = ctx->set_alloc_hint(oid, expected_object_size,
expected_write_size, 0);
tracepoint(librados, rados_set_alloc_hint_exit, retval);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_set_alloc_hint);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_set_alloc_hint2)(
rados_ioctx_t io, const char *o,
uint64_t expected_object_size,
uint64_t expected_write_size,
uint32_t flags)
{
tracepoint(librados, rados_set_alloc_hint2_enter, io, o, expected_object_size, expected_write_size, flags);
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
object_t oid(o);
int retval = ctx->set_alloc_hint(oid, expected_object_size,
expected_write_size, flags);
tracepoint(librados, rados_set_alloc_hint2_exit, retval);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_set_alloc_hint2);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_lock_exclusive)(
rados_ioctx_t io, const char * o,
const char * name, const char * cookie,
const char * desc,
struct timeval * duration, uint8_t flags)
{
tracepoint(librados, rados_lock_exclusive_enter, io, o, name, cookie, desc, duration, flags);
librados::IoCtx ctx;
librados::IoCtx::from_rados_ioctx_t(io, ctx);
int retval = ctx.lock_exclusive(o, name, cookie, desc, duration, flags);
tracepoint(librados, rados_lock_exclusive_exit, retval);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_lock_exclusive);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_lock_shared)(
rados_ioctx_t io, const char * o,
const char * name, const char * cookie,
const char * tag, const char * desc,
struct timeval * duration, uint8_t flags)
{
tracepoint(librados, rados_lock_shared_enter, io, o, name, cookie, tag, desc, duration, flags);
librados::IoCtx ctx;
librados::IoCtx::from_rados_ioctx_t(io, ctx);
int retval = ctx.lock_shared(o, name, cookie, tag, desc, duration, flags);
tracepoint(librados, rados_lock_shared_exit, retval);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_lock_shared);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_unlock)(
rados_ioctx_t io, const char *o, const char *name,
const char *cookie)
{
tracepoint(librados, rados_unlock_enter, io, o, name, cookie);
librados::IoCtx ctx;
librados::IoCtx::from_rados_ioctx_t(io, ctx);
int retval = ctx.unlock(o, name, cookie);
tracepoint(librados, rados_unlock_exit, retval);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_unlock);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_aio_unlock)(
rados_ioctx_t io, const char *o, const char *name,
const char *cookie, rados_completion_t completion)
{
tracepoint(librados, rados_aio_unlock_enter, io, o, name, cookie, completion);
librados::IoCtx ctx;
librados::IoCtx::from_rados_ioctx_t(io, ctx);
librados::AioCompletionImpl *comp = (librados::AioCompletionImpl*)completion;
comp->get();
librados::AioCompletion c(comp);
int retval = ctx.aio_unlock(o, name, cookie, &c);
tracepoint(librados, rados_aio_unlock_exit, retval);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_aio_unlock);
extern "C" ssize_t LIBRADOS_C_API_DEFAULT_F(rados_list_lockers)(
rados_ioctx_t io, const char *o,
const char *name, int *exclusive,
char *tag, size_t *tag_len,
char *clients, size_t *clients_len,
char *cookies, size_t *cookies_len,
char *addrs, size_t *addrs_len)
{
tracepoint(librados, rados_list_lockers_enter, io, o, name, *tag_len, *clients_len, *cookies_len, *addrs_len);
librados::IoCtx ctx;
librados::IoCtx::from_rados_ioctx_t(io, ctx);
std::string name_str = name;
std::string oid = o;
std::string tag_str;
int tmp_exclusive;
std::list<librados::locker_t> lockers;
int r = ctx.list_lockers(oid, name_str, &tmp_exclusive, &tag_str, &lockers);
if (r < 0) {
tracepoint(librados, rados_list_lockers_exit, r, *exclusive, "", *tag_len, *clients_len, *cookies_len, *addrs_len);
return r;
}
size_t clients_total = 0;
size_t cookies_total = 0;
size_t addrs_total = 0;
list<librados::locker_t>::const_iterator it;
for (it = lockers.begin(); it != lockers.end(); ++it) {
clients_total += it->client.length() + 1;
cookies_total += it->cookie.length() + 1;
addrs_total += it->address.length() + 1;
}
bool too_short = ((clients_total > *clients_len) ||
(cookies_total > *cookies_len) ||
(addrs_total > *addrs_len) ||
(tag_str.length() + 1 > *tag_len));
*clients_len = clients_total;
*cookies_len = cookies_total;
*addrs_len = addrs_total;
*tag_len = tag_str.length() + 1;
if (too_short) {
tracepoint(librados, rados_list_lockers_exit, -ERANGE, *exclusive, "", *tag_len, *clients_len, *cookies_len, *addrs_len);
return -ERANGE;
}
strcpy(tag, tag_str.c_str());
char *clients_p = clients;
char *cookies_p = cookies;
char *addrs_p = addrs;
for (it = lockers.begin(); it != lockers.end(); ++it) {
strcpy(clients_p, it->client.c_str());
strcpy(cookies_p, it->cookie.c_str());
strcpy(addrs_p, it->address.c_str());
tracepoint(librados, rados_list_lockers_locker, clients_p, cookies_p, addrs_p);
clients_p += it->client.length() + 1;
cookies_p += it->cookie.length() + 1;
addrs_p += it->address.length() + 1;
}
if (tmp_exclusive)
*exclusive = 1;
else
*exclusive = 0;
int retval = lockers.size();
tracepoint(librados, rados_list_lockers_exit, retval, *exclusive, tag, *tag_len, *clients_len, *cookies_len, *addrs_len);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_list_lockers);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_break_lock)(
rados_ioctx_t io, const char *o,
const char *name, const char *client,
const char *cookie)
{
tracepoint(librados, rados_break_lock_enter, io, o, name, client, cookie);
librados::IoCtx ctx;
librados::IoCtx::from_rados_ioctx_t(io, ctx);
int retval = ctx.break_lock(o, name, client, cookie);
tracepoint(librados, rados_break_lock_exit, retval);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_break_lock);
extern "C" rados_write_op_t LIBRADOS_C_API_DEFAULT_F(rados_create_write_op)()
{
tracepoint(librados, rados_create_write_op_enter);
rados_write_op_t retval = new (std::nothrow) librados::ObjectOperationImpl;
tracepoint(librados, rados_create_write_op_exit, retval);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_create_write_op);
extern "C" void LIBRADOS_C_API_DEFAULT_F(rados_release_write_op)(
rados_write_op_t write_op)
{
tracepoint(librados, rados_release_write_op_enter, write_op);
delete static_cast<librados::ObjectOperationImpl*>(write_op);
tracepoint(librados, rados_release_write_op_exit);
}
LIBRADOS_C_API_BASE_DEFAULT(rados_release_write_op);
static ::ObjectOperation* to_object_operation(rados_write_op_t write_op)
{
return &static_cast<librados::ObjectOperationImpl*>(write_op)->o;
}
extern "C" void LIBRADOS_C_API_DEFAULT_F(rados_write_op_set_flags)(
rados_write_op_t write_op,
int flags)
{
tracepoint(librados, rados_write_op_set_flags_enter, write_op, flags);
to_object_operation(write_op)->set_last_op_flags(get_op_flags(flags));
tracepoint(librados, rados_write_op_set_flags_exit);
}
LIBRADOS_C_API_BASE_DEFAULT(rados_write_op_set_flags);
extern "C" void LIBRADOS_C_API_DEFAULT_F(rados_write_op_assert_version)(
rados_write_op_t write_op,
uint64_t ver)
{
tracepoint(librados, rados_write_op_assert_version_enter, write_op, ver);
to_object_operation(write_op)->assert_version(ver);
tracepoint(librados, rados_write_op_assert_version_exit);
}
LIBRADOS_C_API_BASE_DEFAULT(rados_write_op_assert_version);
extern "C" void LIBRADOS_C_API_DEFAULT_F(rados_write_op_assert_exists)(
rados_write_op_t write_op)
{
tracepoint(librados, rados_write_op_assert_exists_enter, write_op);
to_object_operation(write_op)->stat(nullptr, nullptr, nullptr);
tracepoint(librados, rados_write_op_assert_exists_exit);
}
LIBRADOS_C_API_BASE_DEFAULT(rados_write_op_assert_exists);
extern "C" void LIBRADOS_C_API_DEFAULT_F(rados_write_op_cmpext)(
rados_write_op_t write_op,
const char *cmp_buf,
size_t cmp_len,
uint64_t off,
int *prval)
{
tracepoint(librados, rados_write_op_cmpext_enter, write_op, cmp_buf,
cmp_len, off, prval);
to_object_operation(write_op)->cmpext(off, cmp_len, cmp_buf, prval);
tracepoint(librados, rados_write_op_cmpext_exit);
}
LIBRADOS_C_API_BASE_DEFAULT(rados_write_op_cmpext);
extern "C" void LIBRADOS_C_API_DEFAULT_F(rados_write_op_cmpxattr)(
rados_write_op_t write_op,
const char *name,
uint8_t comparison_operator,
const char *value,
size_t value_len)
{
tracepoint(librados, rados_write_op_cmpxattr_enter, write_op, name, comparison_operator, value, value_len);
bufferlist bl;
bl.append(value, value_len);
to_object_operation(write_op)->cmpxattr(name,
comparison_operator,
CEPH_OSD_CMPXATTR_MODE_STRING,
bl);
tracepoint(librados, rados_write_op_cmpxattr_exit);
}
LIBRADOS_C_API_BASE_DEFAULT(rados_write_op_cmpxattr);
static void rados_c_omap_cmp(ObjectOperation *op,
const char *key,
uint8_t comparison_operator,
const char *val,
size_t key_len,
size_t val_len,
int *prval)
{
bufferlist bl;
bl.append(val, val_len);
std::map<std::string, pair<bufferlist, int> > assertions;
string lkey = string(key, key_len);
assertions[lkey] = std::make_pair(bl, comparison_operator);
op->omap_cmp(assertions, prval);
}
extern "C" void LIBRADOS_C_API_DEFAULT_F(rados_write_op_omap_cmp)(
rados_write_op_t write_op,
const char *key,
uint8_t comparison_operator,
const char *val,
size_t val_len,
int *prval)
{
tracepoint(librados, rados_write_op_omap_cmp_enter, write_op, key, comparison_operator, val, val_len, prval);
rados_c_omap_cmp(to_object_operation(write_op), key, comparison_operator,
val, strlen(key), val_len, prval);
tracepoint(librados, rados_write_op_omap_cmp_exit);
}
LIBRADOS_C_API_BASE_DEFAULT(rados_write_op_omap_cmp);
extern "C" void LIBRADOS_C_API_DEFAULT_F(rados_write_op_omap_cmp2)(
rados_write_op_t write_op,
const char *key,
uint8_t comparison_operator,
const char *val,
size_t key_len,
size_t val_len,
int *prval)
{
tracepoint(librados, rados_write_op_omap_cmp_enter, write_op, key, comparison_operator, val, val_len, prval);
rados_c_omap_cmp(to_object_operation(write_op), key, comparison_operator,
val, key_len, val_len, prval);
tracepoint(librados, rados_write_op_omap_cmp_exit);
}
LIBRADOS_C_API_BASE_DEFAULT(rados_write_op_omap_cmp2);
extern "C" void LIBRADOS_C_API_DEFAULT_F(rados_write_op_setxattr)(
rados_write_op_t write_op,
const char *name,
const char *value,
size_t value_len)
{
tracepoint(librados, rados_write_op_setxattr_enter, write_op, name, value, value_len);
bufferlist bl;
bl.append(value, value_len);
to_object_operation(write_op)->setxattr(name, bl);
tracepoint(librados, rados_write_op_setxattr_exit);
}
LIBRADOS_C_API_BASE_DEFAULT(rados_write_op_setxattr);
extern "C" void LIBRADOS_C_API_DEFAULT_F(rados_write_op_rmxattr)(
rados_write_op_t write_op,
const char *name)
{
tracepoint(librados, rados_write_op_rmxattr_enter, write_op, name);
to_object_operation(write_op)->rmxattr(name);
tracepoint(librados, rados_write_op_rmxattr_exit);
}
LIBRADOS_C_API_BASE_DEFAULT(rados_write_op_rmxattr);
extern "C" void LIBRADOS_C_API_DEFAULT_F(rados_write_op_create)(
rados_write_op_t write_op,
int exclusive,
const char* category) // unused
{
tracepoint(librados, rados_write_op_create_enter, write_op, exclusive);
to_object_operation(write_op)->create(!!exclusive);
tracepoint(librados, rados_write_op_create_exit);
}
LIBRADOS_C_API_BASE_DEFAULT(rados_write_op_create);
extern "C" void LIBRADOS_C_API_DEFAULT_F(rados_write_op_write)(
rados_write_op_t write_op,
const char *buffer,
size_t len,
uint64_t offset)
{
tracepoint(librados, rados_write_op_write_enter, write_op, buffer, len, offset);
bufferlist bl;
bl.append(buffer,len);
to_object_operation(write_op)->write(offset, bl);
tracepoint(librados, rados_write_op_write_exit);
}
LIBRADOS_C_API_BASE_DEFAULT(rados_write_op_write);
extern "C" void LIBRADOS_C_API_DEFAULT_F(rados_write_op_write_full)(
rados_write_op_t write_op,
const char *buffer,
size_t len)
{
tracepoint(librados, rados_write_op_write_full_enter, write_op, buffer, len);
bufferlist bl;
bl.append(buffer,len);
to_object_operation(write_op)->write_full(bl);
tracepoint(librados, rados_write_op_write_full_exit);
}
LIBRADOS_C_API_BASE_DEFAULT(rados_write_op_write_full);
extern "C" void LIBRADOS_C_API_DEFAULT_F(rados_write_op_writesame)(
rados_write_op_t write_op,
const char *buffer,
size_t data_len,
size_t write_len,
uint64_t offset)
{
tracepoint(librados, rados_write_op_writesame_enter, write_op, buffer, data_len, write_len, offset);
bufferlist bl;
bl.append(buffer, data_len);
to_object_operation(write_op)->writesame(offset, write_len, bl);
tracepoint(librados, rados_write_op_writesame_exit);
}
LIBRADOS_C_API_BASE_DEFAULT(rados_write_op_writesame);
extern "C" void LIBRADOS_C_API_DEFAULT_F(rados_write_op_append)(
rados_write_op_t write_op,
const char *buffer,
size_t len)
{
tracepoint(librados, rados_write_op_append_enter, write_op, buffer, len);
bufferlist bl;
bl.append(buffer,len);
to_object_operation(write_op)->append(bl);
tracepoint(librados, rados_write_op_append_exit);
}
LIBRADOS_C_API_BASE_DEFAULT(rados_write_op_append);
extern "C" void LIBRADOS_C_API_DEFAULT_F(rados_write_op_remove)(
rados_write_op_t write_op)
{
tracepoint(librados, rados_write_op_remove_enter, write_op);
to_object_operation(write_op)->remove();
tracepoint(librados, rados_write_op_remove_exit);
}
LIBRADOS_C_API_BASE_DEFAULT(rados_write_op_remove);
extern "C" void LIBRADOS_C_API_DEFAULT_F(rados_write_op_truncate)(
rados_write_op_t write_op,
uint64_t offset)
{
tracepoint(librados, rados_write_op_truncate_enter, write_op, offset);
to_object_operation(write_op)->truncate(offset);
tracepoint(librados, rados_write_op_truncate_exit);
}
LIBRADOS_C_API_BASE_DEFAULT(rados_write_op_truncate);
extern "C" void LIBRADOS_C_API_DEFAULT_F(rados_write_op_zero)(
rados_write_op_t write_op,
uint64_t offset,
uint64_t len)
{
tracepoint(librados, rados_write_op_zero_enter, write_op, offset, len);
to_object_operation(write_op)->zero(offset, len);
tracepoint(librados, rados_write_op_zero_exit);
}
LIBRADOS_C_API_BASE_DEFAULT(rados_write_op_zero);
extern "C" void LIBRADOS_C_API_DEFAULT_F(rados_write_op_exec)(
rados_write_op_t write_op,
const char *cls,
const char *method,
const char *in_buf,
size_t in_len,
int *prval)
{
tracepoint(librados, rados_write_op_exec_enter, write_op, cls, method, in_buf, in_len, prval);
bufferlist inbl;
inbl.append(in_buf, in_len);
to_object_operation(write_op)->call(cls, method, inbl, NULL, NULL, prval);
tracepoint(librados, rados_write_op_exec_exit);
}
LIBRADOS_C_API_BASE_DEFAULT(rados_write_op_exec);
extern "C" void LIBRADOS_C_API_DEFAULT_F(rados_write_op_omap_set)(
rados_write_op_t write_op,
char const* const* keys,
char const* const* vals,
const size_t *lens,
size_t num)
{
tracepoint(librados, rados_write_op_omap_set_enter, write_op, num);
std::map<std::string, bufferlist> entries;
for (size_t i = 0; i < num; ++i) {
tracepoint(librados, rados_write_op_omap_set_entry, keys[i], vals[i], lens[i]);
bufferlist bl(lens[i]);
bl.append(vals[i], lens[i]);
entries[keys[i]] = bl;
}
to_object_operation(write_op)->omap_set(entries);
tracepoint(librados, rados_write_op_omap_set_exit);
}
LIBRADOS_C_API_BASE_DEFAULT(rados_write_op_omap_set);
extern "C" void LIBRADOS_C_API_DEFAULT_F(rados_write_op_omap_set2)(
rados_write_op_t write_op,
char const* const* keys,
char const* const* vals,
const size_t *key_lens,
const size_t *val_lens,
size_t num)
{
tracepoint(librados, rados_write_op_omap_set_enter, write_op, num);
std::map<std::string, bufferlist> entries;
for (size_t i = 0; i < num; ++i) {
bufferlist bl(val_lens[i]);
bl.append(vals[i], val_lens[i]);
string key(keys[i], key_lens[i]);
entries[key] = bl;
}
to_object_operation(write_op)->omap_set(entries);
tracepoint(librados, rados_write_op_omap_set_exit);
}
LIBRADOS_C_API_BASE_DEFAULT(rados_write_op_omap_set2);
extern "C" void LIBRADOS_C_API_DEFAULT_F(rados_write_op_omap_rm_keys)(
rados_write_op_t write_op,
char const* const* keys,
size_t keys_len)
{
tracepoint(librados, rados_write_op_omap_rm_keys_enter, write_op, keys_len);
for(size_t i = 0; i < keys_len; i++) {
tracepoint(librados, rados_write_op_omap_rm_keys_entry, keys[i]);
}
std::set<std::string> to_remove(keys, keys + keys_len);
to_object_operation(write_op)->omap_rm_keys(to_remove);
tracepoint(librados, rados_write_op_omap_rm_keys_exit);
}
LIBRADOS_C_API_BASE_DEFAULT(rados_write_op_omap_rm_keys);
extern "C" void LIBRADOS_C_API_DEFAULT_F(rados_write_op_omap_rm_keys2)(
rados_write_op_t write_op,
char const* const* keys,
const size_t* key_lens,
size_t keys_len)
{
tracepoint(librados, rados_write_op_omap_rm_keys_enter, write_op, keys_len);
std::set<std::string> to_remove;
for(size_t i = 0; i < keys_len; i++) {
to_remove.emplace(keys[i], key_lens[i]);
}
to_object_operation(write_op)->omap_rm_keys(to_remove);
tracepoint(librados, rados_write_op_omap_rm_keys_exit);
}
LIBRADOS_C_API_BASE_DEFAULT(rados_write_op_omap_rm_keys2);
extern "C" void LIBRADOS_C_API_DEFAULT_F(rados_write_op_omap_rm_range2)(
rados_write_op_t write_op,
const char *key_begin,
size_t key_begin_len,
const char *key_end,
size_t key_end_len)
{
tracepoint(librados, rados_write_op_omap_rm_range_enter,
write_op, key_begin, key_end);
to_object_operation(write_op)->omap_rm_range({key_begin, key_begin_len},
{key_end, key_end_len});
tracepoint(librados, rados_write_op_omap_rm_range_exit);
}
LIBRADOS_C_API_BASE_DEFAULT(rados_write_op_omap_rm_range2);
extern "C" void LIBRADOS_C_API_DEFAULT_F(rados_write_op_omap_clear)(
rados_write_op_t write_op)
{
tracepoint(librados, rados_write_op_omap_clear_enter, write_op);
to_object_operation(write_op)->omap_clear();
tracepoint(librados, rados_write_op_omap_clear_exit);
}
LIBRADOS_C_API_BASE_DEFAULT(rados_write_op_omap_clear);
extern "C" void LIBRADOS_C_API_DEFAULT_F(rados_write_op_set_alloc_hint)(
rados_write_op_t write_op,
uint64_t expected_object_size,
uint64_t expected_write_size)
{
tracepoint(librados, rados_write_op_set_alloc_hint_enter, write_op, expected_object_size, expected_write_size);
to_object_operation(write_op)->set_alloc_hint(expected_object_size,
expected_write_size, 0);
tracepoint(librados, rados_write_op_set_alloc_hint_exit);
}
LIBRADOS_C_API_BASE_DEFAULT(rados_write_op_set_alloc_hint);
extern "C" void LIBRADOS_C_API_DEFAULT_F(rados_write_op_set_alloc_hint2)(
rados_write_op_t write_op,
uint64_t expected_object_size,
uint64_t expected_write_size,
uint32_t flags)
{
tracepoint(librados, rados_write_op_set_alloc_hint2_enter, write_op, expected_object_size, expected_write_size, flags);
to_object_operation(write_op)->set_alloc_hint(expected_object_size,
expected_write_size,
flags);
tracepoint(librados, rados_write_op_set_alloc_hint2_exit);
}
LIBRADOS_C_API_BASE_DEFAULT(rados_write_op_set_alloc_hint2);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_write_op_operate)(
rados_write_op_t write_op,
rados_ioctx_t io,
const char *oid,
time_t *mtime,
int flags)
{
tracepoint(librados, rados_write_op_operate_enter, write_op, io, oid, mtime, flags);
object_t obj(oid);
auto oimpl = static_cast<librados::ObjectOperationImpl*>(write_op);
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
if (mtime) {
oimpl->rt = ceph::real_clock::from_time_t(*mtime);
oimpl->prt = &oimpl->rt;
}
int retval = ctx->operate(obj, &oimpl->o, oimpl->prt, translate_flags(flags));
tracepoint(librados, rados_write_op_operate_exit, retval);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_write_op_operate);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_write_op_operate2)(
rados_write_op_t write_op,
rados_ioctx_t io,
const char *oid,
struct timespec *ts,
int flags)
{
tracepoint(librados, rados_write_op_operate2_enter, write_op, io, oid, ts, flags);
object_t obj(oid);
auto oimpl = static_cast<librados::ObjectOperationImpl*>(write_op);
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
if (ts) {
oimpl->rt = ceph::real_clock::from_timespec(*ts);
oimpl->prt = &oimpl->rt;
}
int retval = ctx->operate(obj, &oimpl->o, oimpl->prt, translate_flags(flags));
tracepoint(librados, rados_write_op_operate_exit, retval);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_write_op_operate2);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_aio_write_op_operate)(
rados_write_op_t write_op,
rados_ioctx_t io,
rados_completion_t completion,
const char *oid,
time_t *mtime,
int flags)
{
tracepoint(librados, rados_aio_write_op_operate_enter, write_op, io, completion, oid, mtime, flags);
object_t obj(oid);
auto oimpl = static_cast<librados::ObjectOperationImpl*>(write_op);
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
librados::AioCompletionImpl *c = (librados::AioCompletionImpl*)completion;
if (mtime) {
oimpl->rt = ceph::real_clock::from_time_t(*mtime);
oimpl->prt = &oimpl->rt;
}
int retval = ctx->aio_operate(obj, &oimpl->o, c, ctx->snapc, oimpl->prt, translate_flags(flags));
tracepoint(librados, rados_aio_write_op_operate_exit, retval);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_aio_write_op_operate);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_aio_write_op_operate2)(
rados_write_op_t write_op,
rados_ioctx_t io,
rados_completion_t completion,
const char *oid,
struct timespec *mtime,
int flags)
{
tracepoint(librados, rados_aio_write_op_operate2_enter, write_op, io, completion, oid, mtime, flags);
object_t obj(oid);
auto oimpl = static_cast<librados::ObjectOperationImpl*>(write_op);
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
librados::AioCompletionImpl *c = (librados::AioCompletionImpl*)completion;
if (mtime) {
oimpl->rt = ceph::real_clock::from_timespec(*mtime);
oimpl->prt = &oimpl->rt;
}
int retval = ctx->aio_operate(obj, &oimpl->o, c, ctx->snapc, oimpl->prt, translate_flags(flags));
tracepoint(librados, rados_aio_write_op_operate_exit, retval);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_aio_write_op_operate2);
extern "C" rados_read_op_t LIBRADOS_C_API_DEFAULT_F(rados_create_read_op)()
{
tracepoint(librados, rados_create_read_op_enter);
rados_read_op_t retval = new (std::nothrow)::ObjectOperation;
tracepoint(librados, rados_create_read_op_exit, retval);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_create_read_op);
extern "C" void LIBRADOS_C_API_DEFAULT_F(rados_release_read_op)(
rados_read_op_t read_op)
{
tracepoint(librados, rados_release_read_op_enter, read_op);
delete (::ObjectOperation *)read_op;
tracepoint(librados, rados_release_read_op_exit);
}
LIBRADOS_C_API_BASE_DEFAULT(rados_release_read_op);
extern "C" void LIBRADOS_C_API_DEFAULT_F(rados_read_op_set_flags)(
rados_read_op_t read_op,
int flags)
{
tracepoint(librados, rados_read_op_set_flags_enter, read_op, flags);
((::ObjectOperation *)read_op)->set_last_op_flags(get_op_flags(flags));
tracepoint(librados, rados_read_op_set_flags_exit);
}
LIBRADOS_C_API_BASE_DEFAULT(rados_read_op_set_flags);
extern "C" void LIBRADOS_C_API_DEFAULT_F(rados_read_op_assert_version)(
rados_read_op_t read_op,
uint64_t ver)
{
tracepoint(librados, rados_read_op_assert_version_enter, read_op, ver);
((::ObjectOperation *)read_op)->assert_version(ver);
tracepoint(librados, rados_read_op_assert_version_exit);
}
LIBRADOS_C_API_BASE_DEFAULT(rados_read_op_assert_version);
extern "C" void LIBRADOS_C_API_DEFAULT_F(rados_read_op_assert_exists)(
rados_read_op_t read_op)
{
tracepoint(librados, rados_read_op_assert_exists_enter, read_op);
((::ObjectOperation *)read_op)->stat(nullptr, nullptr, nullptr);
tracepoint(librados, rados_read_op_assert_exists_exit);
}
LIBRADOS_C_API_BASE_DEFAULT(rados_read_op_assert_exists);
extern "C" void LIBRADOS_C_API_DEFAULT_F(rados_read_op_cmpext)(
rados_read_op_t read_op,
const char *cmp_buf,
size_t cmp_len,
uint64_t off,
int *prval)
{
tracepoint(librados, rados_read_op_cmpext_enter, read_op, cmp_buf,
cmp_len, off, prval);
((::ObjectOperation *)read_op)->cmpext(off, cmp_len, cmp_buf, prval);
tracepoint(librados, rados_read_op_cmpext_exit);
}
LIBRADOS_C_API_BASE_DEFAULT(rados_read_op_cmpext);
extern "C" void LIBRADOS_C_API_DEFAULT_F(rados_read_op_cmpxattr)(
rados_read_op_t read_op,
const char *name,
uint8_t comparison_operator,
const char *value,
size_t value_len)
{
tracepoint(librados, rados_read_op_cmpxattr_enter, read_op, name, comparison_operator, value, value_len);
bufferlist bl;
bl.append(value, value_len);
((::ObjectOperation *)read_op)->cmpxattr(name,
comparison_operator,
CEPH_OSD_CMPXATTR_MODE_STRING,
bl);
tracepoint(librados, rados_read_op_cmpxattr_exit);
}
LIBRADOS_C_API_BASE_DEFAULT(rados_read_op_cmpxattr);
extern "C" void LIBRADOS_C_API_DEFAULT_F(rados_read_op_omap_cmp)(
rados_read_op_t read_op,
const char *key,
uint8_t comparison_operator,
const char *val,
size_t val_len,
int *prval)
{
tracepoint(librados, rados_read_op_omap_cmp_enter, read_op, key, comparison_operator, val, val_len, prval);
rados_c_omap_cmp((::ObjectOperation *)read_op, key, comparison_operator,
val, strlen(key), val_len, prval);
tracepoint(librados, rados_read_op_omap_cmp_exit);
}
LIBRADOS_C_API_BASE_DEFAULT(rados_read_op_omap_cmp);
extern "C" void LIBRADOS_C_API_DEFAULT_F(rados_read_op_omap_cmp2)(
rados_read_op_t read_op,
const char *key,
uint8_t comparison_operator,
const char *val,
size_t key_len,
size_t val_len,
int *prval)
{
tracepoint(librados, rados_read_op_omap_cmp_enter, read_op, key, comparison_operator, val, val_len, prval);
rados_c_omap_cmp((::ObjectOperation *)read_op, key, comparison_operator,
val, key_len, val_len, prval);
tracepoint(librados, rados_read_op_omap_cmp_exit);
}
LIBRADOS_C_API_BASE_DEFAULT(rados_read_op_omap_cmp2);
extern "C" void LIBRADOS_C_API_DEFAULT_F(rados_read_op_stat)(
rados_read_op_t read_op,
uint64_t *psize,
time_t *pmtime,
int *prval)
{
tracepoint(librados, rados_read_op_stat_enter, read_op, psize, pmtime, prval);
((::ObjectOperation *)read_op)->stat(psize, pmtime, prval);
tracepoint(librados, rados_read_op_stat_exit);
}
LIBRADOS_C_API_BASE_DEFAULT(rados_read_op_stat);
extern "C" void LIBRADOS_C_API_DEFAULT_F(rados_read_op_stat2)(
rados_read_op_t read_op,
uint64_t *psize,
struct timespec *pmtime,
int *prval)
{
tracepoint(librados, rados_read_op_stat2_enter, read_op, psize, pmtime, prval);
((::ObjectOperation *)read_op)->stat(psize, pmtime, prval);
tracepoint(librados, rados_read_op_stat2_exit);
}
LIBRADOS_C_API_BASE_DEFAULT(rados_read_op_stat2);
class C_bl_to_buf : public Context {
char *out_buf;
size_t out_len;
size_t *bytes_read;
int *prval;
public:
bufferlist out_bl;
C_bl_to_buf(char *out_buf,
size_t out_len,
size_t *bytes_read,
int *prval) : out_buf(out_buf), out_len(out_len),
bytes_read(bytes_read), prval(prval) {}
void finish(int r) override {
if (out_bl.length() > out_len) {
if (prval)
*prval = -ERANGE;
if (bytes_read)
*bytes_read = 0;
return;
}
if (bytes_read)
*bytes_read = out_bl.length();
if (out_buf && !out_bl.is_provided_buffer(out_buf))
out_bl.begin().copy(out_bl.length(), out_buf);
}
};
extern "C" void LIBRADOS_C_API_DEFAULT_F(rados_read_op_read)(
rados_read_op_t read_op,
uint64_t offset,
size_t len,
char *buf,
size_t *bytes_read,
int *prval)
{
tracepoint(librados, rados_read_op_read_enter, read_op, offset, len, buf, bytes_read, prval);
C_bl_to_buf *ctx = new C_bl_to_buf(buf, len, bytes_read, prval);
ctx->out_bl.push_back(buffer::create_static(len, buf));
((::ObjectOperation *)read_op)->read(offset, len, &ctx->out_bl, prval, ctx);
tracepoint(librados, rados_read_op_read_exit);
}
LIBRADOS_C_API_BASE_DEFAULT(rados_read_op_read);
extern "C" void LIBRADOS_C_API_DEFAULT_F(rados_read_op_checksum)(
rados_read_op_t read_op,
rados_checksum_type_t type,
const char *init_value,
size_t init_value_len,
uint64_t offset, size_t len,
size_t chunk_size, char *pchecksum,
size_t checksum_len, int *prval)
{
tracepoint(librados, rados_read_op_checksum_enter, read_op, type, init_value,
init_value_len, offset, len, chunk_size);
bufferlist init_value_bl;
init_value_bl.append(init_value, init_value_len);
C_bl_to_buf *ctx = nullptr;
if (pchecksum != nullptr) {
ctx = new C_bl_to_buf(pchecksum, checksum_len, nullptr, prval);
}
((::ObjectOperation *)read_op)->checksum(get_checksum_op_type(type),
init_value_bl, offset, len,
chunk_size,
(ctx ? &ctx->out_bl : nullptr),
prval, ctx);
tracepoint(librados, rados_read_op_checksum_exit);
}
LIBRADOS_C_API_BASE_DEFAULT(rados_read_op_checksum);
class C_out_buffer : public Context {
char **out_buf;
size_t *out_len;
public:
bufferlist out_bl;
C_out_buffer(char **out_buf, size_t *out_len) : out_buf(out_buf),
out_len(out_len) {}
void finish(int r) override {
// ignore r since we don't know the meaning of return values
// from custom class methods
do_out_buffer(out_bl, out_buf, out_len);
}
};
extern "C" void LIBRADOS_C_API_DEFAULT_F(rados_read_op_exec)(
rados_read_op_t read_op,
const char *cls,
const char *method,
const char *in_buf,
size_t in_len,
char **out_buf,
size_t *out_len,
int *prval)
{
tracepoint(librados, rados_read_op_exec_enter, read_op, cls, method, in_buf, in_len, out_buf, out_len, prval);
bufferlist inbl;
inbl.append(in_buf, in_len);
C_out_buffer *ctx = new C_out_buffer(out_buf, out_len);
((::ObjectOperation *)read_op)->call(cls, method, inbl, &ctx->out_bl, ctx,
prval);
tracepoint(librados, rados_read_op_exec_exit);
}
LIBRADOS_C_API_BASE_DEFAULT(rados_read_op_exec);
extern "C" void LIBRADOS_C_API_DEFAULT_F(rados_read_op_exec_user_buf)(
rados_read_op_t read_op,
const char *cls,
const char *method,
const char *in_buf,
size_t in_len,
char *out_buf,
size_t out_len,
size_t *used_len,
int *prval)
{
tracepoint(librados, rados_read_op_exec_user_buf_enter, read_op, cls, method, in_buf, in_len, out_buf, out_len, used_len, prval);
C_bl_to_buf *ctx = new C_bl_to_buf(out_buf, out_len, used_len, prval);
bufferlist inbl;
inbl.append(in_buf, in_len);
((::ObjectOperation *)read_op)->call(cls, method, inbl, &ctx->out_bl, ctx,
prval);
tracepoint(librados, rados_read_op_exec_user_buf_exit);
}
LIBRADOS_C_API_BASE_DEFAULT(rados_read_op_exec_user_buf);
struct RadosOmapIter {
std::map<std::string, bufferlist> values;
std::map<std::string, bufferlist>::iterator i;
};
class C_OmapIter : public Context {
RadosOmapIter *iter;
public:
explicit C_OmapIter(RadosOmapIter *iter) : iter(iter) {}
void finish(int r) override {
iter->i = iter->values.begin();
}
};
class C_XattrsIter : public Context {
librados::RadosXattrsIter *iter;
public:
explicit C_XattrsIter(librados::RadosXattrsIter *iter) : iter(iter) {}
void finish(int r) override {
iter->i = iter->attrset.begin();
}
};
extern "C" void LIBRADOS_C_API_DEFAULT_F(rados_read_op_getxattrs)(
rados_read_op_t read_op,
rados_xattrs_iter_t *iter,
int *prval)
{
tracepoint(librados, rados_read_op_getxattrs_enter, read_op, prval);
librados::RadosXattrsIter *xattrs_iter = new librados::RadosXattrsIter;
((::ObjectOperation *)read_op)->getxattrs(&xattrs_iter->attrset, prval);
((::ObjectOperation *)read_op)->set_handler(new C_XattrsIter(xattrs_iter));
*iter = xattrs_iter;
tracepoint(librados, rados_read_op_getxattrs_exit, *iter);
}
LIBRADOS_C_API_BASE_DEFAULT(rados_read_op_getxattrs);
extern "C" void LIBRADOS_C_API_DEFAULT_F(rados_read_op_omap_get_vals)(
rados_read_op_t read_op,
const char *start_after,
const char *filter_prefix,
uint64_t max_return,
rados_omap_iter_t *iter,
int *prval)
{
tracepoint(librados, rados_read_op_omap_get_vals_enter, read_op, start_after, filter_prefix, max_return, prval);
RadosOmapIter *omap_iter = new RadosOmapIter;
const char *start = start_after ? start_after : "";
const char *filter = filter_prefix ? filter_prefix : "";
((::ObjectOperation *)read_op)->omap_get_vals(
start,
filter,
max_return,
&omap_iter->values,
nullptr,
prval);
((::ObjectOperation *)read_op)->set_handler(new C_OmapIter(omap_iter));
*iter = omap_iter;
tracepoint(librados, rados_read_op_omap_get_vals_exit, *iter);
}
LIBRADOS_C_API_BASE_DEFAULT(rados_read_op_omap_get_vals);
extern "C" void LIBRADOS_C_API_DEFAULT_F(rados_read_op_omap_get_vals2)(
rados_read_op_t read_op,
const char *start_after,
const char *filter_prefix,
uint64_t max_return,
rados_omap_iter_t *iter,
unsigned char *pmore,
int *prval)
{
tracepoint(librados, rados_read_op_omap_get_vals_enter, read_op, start_after, filter_prefix, max_return, prval);
RadosOmapIter *omap_iter = new RadosOmapIter;
const char *start = start_after ? start_after : "";
const char *filter = filter_prefix ? filter_prefix : "";
((::ObjectOperation *)read_op)->omap_get_vals(
start,
filter,
max_return,
&omap_iter->values,
(bool*)pmore,
prval);
((::ObjectOperation *)read_op)->set_handler(new C_OmapIter(omap_iter));
*iter = omap_iter;
tracepoint(librados, rados_read_op_omap_get_vals_exit, *iter);
}
LIBRADOS_C_API_BASE_DEFAULT(rados_read_op_omap_get_vals2);
struct C_OmapKeysIter : public Context {
RadosOmapIter *iter;
std::set<std::string> keys;
explicit C_OmapKeysIter(RadosOmapIter *iter) : iter(iter) {}
void finish(int r) override {
// map each key to an empty bl
for (std::set<std::string>::const_iterator i = keys.begin();
i != keys.end(); ++i) {
iter->values[*i];
}
iter->i = iter->values.begin();
}
};
extern "C" void LIBRADOS_C_API_DEFAULT_F(rados_read_op_omap_get_keys)(
rados_read_op_t read_op,
const char *start_after,
uint64_t max_return,
rados_omap_iter_t *iter,
int *prval)
{
tracepoint(librados, rados_read_op_omap_get_keys_enter, read_op, start_after, max_return, prval);
RadosOmapIter *omap_iter = new RadosOmapIter;
C_OmapKeysIter *ctx = new C_OmapKeysIter(omap_iter);
((::ObjectOperation *)read_op)->omap_get_keys(
start_after ? start_after : "",
max_return, &ctx->keys, nullptr, prval);
((::ObjectOperation *)read_op)->set_handler(ctx);
*iter = omap_iter;
tracepoint(librados, rados_read_op_omap_get_keys_exit, *iter);
}
LIBRADOS_C_API_BASE_DEFAULT(rados_read_op_omap_get_keys);
extern "C" void LIBRADOS_C_API_DEFAULT_F(rados_read_op_omap_get_keys2)(
rados_read_op_t read_op,
const char *start_after,
uint64_t max_return,
rados_omap_iter_t *iter,
unsigned char *pmore,
int *prval)
{
tracepoint(librados, rados_read_op_omap_get_keys_enter, read_op, start_after, max_return, prval);
RadosOmapIter *omap_iter = new RadosOmapIter;
C_OmapKeysIter *ctx = new C_OmapKeysIter(omap_iter);
((::ObjectOperation *)read_op)->omap_get_keys(
start_after ? start_after : "",
max_return, &ctx->keys,
(bool*)pmore, prval);
((::ObjectOperation *)read_op)->set_handler(ctx);
*iter = omap_iter;
tracepoint(librados, rados_read_op_omap_get_keys_exit, *iter);
}
LIBRADOS_C_API_BASE_DEFAULT(rados_read_op_omap_get_keys2);
static void internal_rados_read_op_omap_get_vals_by_keys(rados_read_op_t read_op,
set<string>& to_get,
rados_omap_iter_t *iter,
int *prval)
{
RadosOmapIter *omap_iter = new RadosOmapIter;
((::ObjectOperation *)read_op)->omap_get_vals_by_keys(to_get,
&omap_iter->values,
prval);
((::ObjectOperation *)read_op)->set_handler(new C_OmapIter(omap_iter));
*iter = omap_iter;
}
extern "C" void LIBRADOS_C_API_DEFAULT_F(rados_read_op_omap_get_vals_by_keys)(
rados_read_op_t read_op,
char const* const* keys,
size_t keys_len,
rados_omap_iter_t *iter,
int *prval)
{
tracepoint(librados, rados_read_op_omap_get_vals_by_keys_enter, read_op, keys, keys_len, iter, prval);
std::set<std::string> to_get(keys, keys + keys_len);
internal_rados_read_op_omap_get_vals_by_keys(read_op, to_get, iter, prval);
tracepoint(librados, rados_read_op_omap_get_vals_by_keys_exit, *iter);
}
LIBRADOS_C_API_BASE_DEFAULT(rados_read_op_omap_get_vals_by_keys);
extern "C" void LIBRADOS_C_API_DEFAULT_F(rados_read_op_omap_get_vals_by_keys2)(
rados_read_op_t read_op,
char const* const* keys,
size_t num_keys,
const size_t* key_lens,
rados_omap_iter_t *iter,
int *prval)
{
tracepoint(librados, rados_read_op_omap_get_vals_by_keys_enter, read_op, keys, num_keys, iter, prval);
std::set<std::string> to_get;
for (size_t i = 0; i < num_keys; i++) {
to_get.emplace(keys[i], key_lens[i]);
}
internal_rados_read_op_omap_get_vals_by_keys(read_op, to_get, iter, prval);
tracepoint(librados, rados_read_op_omap_get_vals_by_keys_exit, *iter);
}
LIBRADOS_C_API_BASE_DEFAULT(rados_read_op_omap_get_vals_by_keys2);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_omap_get_next2)(
rados_omap_iter_t iter,
char **key,
char **val,
size_t *key_len,
size_t *val_len)
{
tracepoint(librados, rados_omap_get_next_enter, iter);
RadosOmapIter *it = static_cast<RadosOmapIter *>(iter);
if (it->i == it->values.end()) {
if (key)
*key = NULL;
if (val)
*val = NULL;
if (key_len)
*key_len = 0;
if (val_len)
*val_len = 0;
tracepoint(librados, rados_omap_get_next_exit, 0, key, val, val_len);
return 0;
}
if (key)
*key = (char*)it->i->first.c_str();
if (val)
*val = it->i->second.c_str();
if (key_len)
*key_len = it->i->first.length();
if (val_len)
*val_len = it->i->second.length();
++it->i;
tracepoint(librados, rados_omap_get_next_exit, 0, key, val, val_len);
return 0;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_omap_get_next2);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_omap_get_next)(
rados_omap_iter_t iter,
char **key,
char **val,
size_t *len)
{
return LIBRADOS_C_API_DEFAULT_F(rados_omap_get_next2)(iter, key, val, nullptr, len);
}
LIBRADOS_C_API_BASE_DEFAULT(rados_omap_get_next);
extern "C" unsigned int LIBRADOS_C_API_DEFAULT_F(rados_omap_iter_size)(
rados_omap_iter_t iter)
{
RadosOmapIter *it = static_cast<RadosOmapIter *>(iter);
return it->values.size();
}
LIBRADOS_C_API_BASE_DEFAULT(rados_omap_iter_size);
extern "C" void LIBRADOS_C_API_DEFAULT_F(rados_omap_get_end)(
rados_omap_iter_t iter)
{
tracepoint(librados, rados_omap_get_end_enter, iter);
RadosOmapIter *it = static_cast<RadosOmapIter *>(iter);
delete it;
tracepoint(librados, rados_omap_get_end_exit);
}
LIBRADOS_C_API_BASE_DEFAULT(rados_omap_get_end);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_read_op_operate)(
rados_read_op_t read_op,
rados_ioctx_t io,
const char *oid,
int flags)
{
tracepoint(librados, rados_read_op_operate_enter, read_op, io, oid, flags);
object_t obj(oid);
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
int retval = ctx->operate_read(obj, (::ObjectOperation *)read_op, NULL,
translate_flags(flags));
tracepoint(librados, rados_read_op_operate_exit, retval);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_read_op_operate);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_aio_read_op_operate)(
rados_read_op_t read_op,
rados_ioctx_t io,
rados_completion_t completion,
const char *oid,
int flags)
{
tracepoint(librados, rados_aio_read_op_operate_enter, read_op, io, completion, oid, flags);
object_t obj(oid);
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
librados::AioCompletionImpl *c = (librados::AioCompletionImpl*)completion;
int retval = ctx->aio_operate_read(obj, (::ObjectOperation *)read_op,
c, translate_flags(flags), NULL);
tracepoint(librados, rados_aio_read_op_operate_exit, retval);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_aio_read_op_operate);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_cache_pin)(
rados_ioctx_t io,
const char *o)
{
tracepoint(librados, rados_cache_pin_enter, io, o);
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
object_t oid(o);
int retval = ctx->cache_pin(oid);
tracepoint(librados, rados_cache_pin_exit, retval);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_cache_pin);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_cache_unpin)(
rados_ioctx_t io,
const char *o)
{
tracepoint(librados, rados_cache_unpin_enter, io, o);
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
object_t oid(o);
int retval = ctx->cache_unpin(oid);
tracepoint(librados, rados_cache_unpin_exit, retval);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_cache_unpin);
extern "C" void LIBRADOS_C_API_DEFAULT_F(rados_object_list_slice)(
rados_ioctx_t io,
const rados_object_list_cursor start,
const rados_object_list_cursor finish,
const size_t n,
const size_t m,
rados_object_list_cursor *split_start,
rados_object_list_cursor *split_finish)
{
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
ceph_assert(split_start);
ceph_assert(split_finish);
hobject_t *split_start_hobj = (hobject_t*)(*split_start);
hobject_t *split_finish_hobj = (hobject_t*)(*split_finish);
ceph_assert(split_start_hobj);
ceph_assert(split_finish_hobj);
hobject_t *start_hobj = (hobject_t*)(start);
hobject_t *finish_hobj = (hobject_t*)(finish);
ctx->object_list_slice(
*start_hobj,
*finish_hobj,
n,
m,
split_start_hobj,
split_finish_hobj);
}
LIBRADOS_C_API_BASE_DEFAULT(rados_object_list_slice);
| 150,658 | 31.157737 | 131 |
cc
|
null |
ceph-main/src/librados/librados_c.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef LIBRADOS_C_H
#define LIBRADOS_C_H
#include "include/types.h"
#include "include/rados/librados.h"
namespace __librados_base {
struct rados_pool_stat_t {
uint64_t num_bytes;
uint64_t num_kb;
uint64_t num_objects;
uint64_t num_object_clones;
uint64_t num_object_copies;
uint64_t num_objects_missing_on_primary;
uint64_t num_objects_unfound;
uint64_t num_objects_degraded;
uint64_t num_rd;
uint64_t num_rd_kb;
uint64_t num_wr;
uint64_t num_wr_kb;
};
} // namespace __librados_base
#endif // LIBRADOS_C_H
| 636 | 20.233333 | 70 |
h
|
null |
ceph-main/src/librados/librados_cxx.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2012 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include <limits.h>
#include "common/config.h"
#include "common/errno.h"
#include "common/ceph_argparse.h"
#include "common/ceph_json.h"
#include "common/common_init.h"
#include "common/TracepointProvider.h"
#include "common/hobject.h"
#include "common/async/waiter.h"
#include "include/rados/librados.h"
#include "include/rados/librados.hpp"
#include "include/types.h"
#include <include/stringify.h>
#include "librados/AioCompletionImpl.h"
#include "librados/IoCtxImpl.h"
#include "librados/ObjectOperationImpl.h"
#include "librados/PoolAsyncCompletionImpl.h"
#include "librados/RadosClient.h"
#include "librados/RadosXattrIter.h"
#include "librados/ListObjectImpl.h"
#include "librados/librados_util.h"
#include "cls/lock/cls_lock_client.h"
#include <string>
#include <map>
#include <set>
#include <vector>
#include <list>
#include <stdexcept>
#include <system_error>
#ifdef WITH_LTTNG
#define TRACEPOINT_DEFINE
#define TRACEPOINT_PROBE_DYNAMIC_LINKAGE
#include "tracing/librados.h"
#undef TRACEPOINT_PROBE_DYNAMIC_LINKAGE
#undef TRACEPOINT_DEFINE
#else
#define tracepoint(...)
#endif
using std::list;
using std::map;
using std::pair;
using std::set;
using std::string;
using std::stringstream;
using std::vector;
#define dout_subsys ceph_subsys_rados
#undef dout_prefix
#define dout_prefix *_dout << "librados: "
static TracepointProvider::Traits tracepoint_traits("librados_tp.so", "rados_tracing");
/*
* Structure of this file
*
* RadosClient and the related classes are the internal implementation of librados.
* Above that layer sits the C API, found in include/rados/librados.h, and
* the C++ API, found in include/rados/librados.hpp
*
* The C++ API sometimes implements things in terms of the C API.
* Both the C++ and C API rely on RadosClient.
*
* Visually:
* +--------------------------------------+
* | C++ API |
* +--------------------+ |
* | C API | |
* +--------------------+-----------------+
* | RadosClient |
* +--------------------------------------+
*/
size_t librados::ObjectOperation::size()
{
::ObjectOperation *o = &impl->o;
if (o)
return o->size();
else
return 0;
}
//deprcated
void librados::ObjectOperation::set_op_flags(ObjectOperationFlags flags)
{
set_op_flags2((int)flags);
}
void librados::ObjectOperation::set_op_flags2(int flags)
{
ceph_assert(impl);
impl->o.set_last_op_flags(get_op_flags(flags));
}
void librados::ObjectOperation::cmpext(uint64_t off,
const bufferlist &cmp_bl,
int *prval)
{
ceph_assert(impl);
::ObjectOperation *o = &impl->o;
bufferlist c = cmp_bl;
o->cmpext(off, c, prval);
}
void librados::ObjectOperation::cmpxattr(const char *name, uint8_t op, const bufferlist& v)
{
ceph_assert(impl);
::ObjectOperation *o = &impl->o;
o->cmpxattr(name, op, CEPH_OSD_CMPXATTR_MODE_STRING, v);
}
void librados::ObjectOperation::cmpxattr(const char *name, uint8_t op, uint64_t v)
{
ceph_assert(impl);
::ObjectOperation *o = &impl->o;
bufferlist bl;
encode(v, bl);
o->cmpxattr(name, op, CEPH_OSD_CMPXATTR_MODE_U64, bl);
}
void librados::ObjectOperation::assert_version(uint64_t ver)
{
ceph_assert(impl);
::ObjectOperation *o = &impl->o;
o->assert_version(ver);
}
void librados::ObjectOperation::assert_exists()
{
ceph_assert(impl);
::ObjectOperation *o = &impl->o;
o->stat(nullptr, nullptr, nullptr);
}
void librados::ObjectOperation::exec(const char *cls, const char *method,
bufferlist& inbl)
{
ceph_assert(impl);
::ObjectOperation *o = &impl->o;
o->call(cls, method, inbl);
}
void librados::ObjectOperation::exec(const char *cls, const char *method, bufferlist& inbl, bufferlist *outbl, int *prval)
{
ceph_assert(impl);
::ObjectOperation *o = &impl->o;
o->call(cls, method, inbl, outbl, NULL, prval);
}
class ObjectOpCompletionCtx : public Context {
librados::ObjectOperationCompletion *completion;
bufferlist bl;
public:
explicit ObjectOpCompletionCtx(librados::ObjectOperationCompletion *c) : completion(c) {}
void finish(int r) override {
completion->handle_completion(r, bl);
delete completion;
}
bufferlist *outbl() {
return &bl;
}
};
void librados::ObjectOperation::exec(const char *cls, const char *method, bufferlist& inbl, librados::ObjectOperationCompletion *completion)
{
ceph_assert(impl);
::ObjectOperation *o = &impl->o;
ObjectOpCompletionCtx *ctx = new ObjectOpCompletionCtx(completion);
o->call(cls, method, inbl, ctx->outbl(), ctx, NULL);
}
void librados::ObjectReadOperation::stat(uint64_t *psize, time_t *pmtime, int *prval)
{
ceph_assert(impl);
::ObjectOperation *o = &impl->o;
o->stat(psize, pmtime, prval);
}
void librados::ObjectReadOperation::stat2(uint64_t *psize, struct timespec *pts, int *prval)
{
ceph_assert(impl);
::ObjectOperation *o = &impl->o;
o->stat(psize, pts, prval);
}
void librados::ObjectReadOperation::read(size_t off, uint64_t len, bufferlist *pbl, int *prval)
{
ceph_assert(impl);
::ObjectOperation *o = &impl->o;
o->read(off, len, pbl, prval, NULL);
}
void librados::ObjectReadOperation::sparse_read(uint64_t off, uint64_t len,
std::map<uint64_t,uint64_t> *m,
bufferlist *data_bl, int *prval,
uint64_t truncate_size,
uint32_t truncate_seq)
{
ceph_assert(impl);
::ObjectOperation *o = &impl->o;
o->sparse_read(off, len, m, data_bl, prval, truncate_size, truncate_seq);
}
void librados::ObjectReadOperation::checksum(rados_checksum_type_t type,
const bufferlist &init_value_bl,
uint64_t off, size_t len,
size_t chunk_size, bufferlist *pbl,
int *prval)
{
ceph_assert(impl);
::ObjectOperation *o = &impl->o;
o->checksum(get_checksum_op_type(type), init_value_bl, off, len, chunk_size,
pbl, prval, nullptr);
}
void librados::ObjectReadOperation::getxattr(const char *name, bufferlist *pbl, int *prval)
{
ceph_assert(impl);
::ObjectOperation *o = &impl->o;
o->getxattr(name, pbl, prval);
}
void librados::ObjectReadOperation::omap_get_vals(
const std::string &start_after,
const std::string &filter_prefix,
uint64_t max_return,
std::map<std::string, bufferlist> *out_vals,
int *prval)
{
ceph_assert(impl);
::ObjectOperation *o = &impl->o;
o->omap_get_vals(start_after, filter_prefix, max_return, out_vals, nullptr,
prval);
}
void librados::ObjectReadOperation::omap_get_vals2(
const std::string &start_after,
const std::string &filter_prefix,
uint64_t max_return,
std::map<std::string, bufferlist> *out_vals,
bool *pmore,
int *prval)
{
ceph_assert(impl);
::ObjectOperation *o = &impl->o;
o->omap_get_vals(start_after, filter_prefix, max_return, out_vals, pmore,
prval);
}
void librados::ObjectReadOperation::omap_get_vals(
const std::string &start_after,
uint64_t max_return,
std::map<std::string, bufferlist> *out_vals,
int *prval)
{
ceph_assert(impl);
::ObjectOperation *o = &impl->o;
o->omap_get_vals(start_after, "", max_return, out_vals, nullptr, prval);
}
void librados::ObjectReadOperation::omap_get_vals2(
const std::string &start_after,
uint64_t max_return,
std::map<std::string, bufferlist> *out_vals,
bool *pmore,
int *prval)
{
ceph_assert(impl);
::ObjectOperation *o = &impl->o;
o->omap_get_vals(start_after, "", max_return, out_vals, pmore, prval);
}
void librados::ObjectReadOperation::omap_get_keys(
const std::string &start_after,
uint64_t max_return,
std::set<std::string> *out_keys,
int *prval)
{
ceph_assert(impl);
::ObjectOperation *o = &impl->o;
o->omap_get_keys(start_after, max_return, out_keys, nullptr, prval);
}
void librados::ObjectReadOperation::omap_get_keys2(
const std::string &start_after,
uint64_t max_return,
std::set<std::string> *out_keys,
bool *pmore,
int *prval)
{
ceph_assert(impl);
::ObjectOperation *o = &impl->o;
o->omap_get_keys(start_after, max_return, out_keys, pmore, prval);
}
void librados::ObjectReadOperation::omap_get_header(bufferlist *bl, int *prval)
{
ceph_assert(impl);
::ObjectOperation *o = &impl->o;
o->omap_get_header(bl, prval);
}
void librados::ObjectReadOperation::omap_get_vals_by_keys(
const std::set<std::string> &keys,
std::map<std::string, bufferlist> *map,
int *prval)
{
ceph_assert(impl);
::ObjectOperation *o = &impl->o;
o->omap_get_vals_by_keys(keys, map, prval);
}
void librados::ObjectOperation::omap_cmp(
const std::map<std::string, pair<bufferlist, int> > &assertions,
int *prval)
{
ceph_assert(impl);
::ObjectOperation *o = &impl->o;
o->omap_cmp(assertions, prval);
}
void librados::ObjectReadOperation::list_watchers(
list<obj_watch_t> *out_watchers,
int *prval)
{
ceph_assert(impl);
::ObjectOperation *o = &impl->o;
o->list_watchers(out_watchers, prval);
}
void librados::ObjectReadOperation::list_snaps(
snap_set_t *out_snaps,
int *prval)
{
ceph_assert(impl);
::ObjectOperation *o = &impl->o;
o->list_snaps(out_snaps, prval);
}
void librados::ObjectReadOperation::is_dirty(bool *is_dirty, int *prval)
{
ceph_assert(impl);
::ObjectOperation *o = &impl->o;
o->is_dirty(is_dirty, prval);
}
int librados::IoCtx::omap_get_vals(const std::string& oid,
const std::string& orig_start_after,
const std::string& filter_prefix,
uint64_t max_return,
std::map<std::string, bufferlist> *out_vals)
{
bool first = true;
string start_after = orig_start_after;
bool more = true;
while (max_return > 0 && more) {
std::map<std::string,bufferlist> out;
ObjectReadOperation op;
op.omap_get_vals2(start_after, filter_prefix, max_return, &out, &more,
nullptr);
bufferlist bl;
int ret = operate(oid, &op, &bl);
if (ret < 0) {
return ret;
}
if (more) {
if (out.empty()) {
return -EINVAL; // wth
}
start_after = out.rbegin()->first;
}
if (out.size() <= max_return) {
max_return -= out.size();
} else {
max_return = 0;
}
if (first) {
out_vals->swap(out);
first = false;
} else {
out_vals->insert(out.begin(), out.end());
out.clear();
}
}
return 0;
}
int librados::IoCtx::omap_get_vals2(
const std::string& oid,
const std::string& start_after,
const std::string& filter_prefix,
uint64_t max_return,
std::map<std::string, bufferlist> *out_vals,
bool *pmore)
{
ObjectReadOperation op;
int r;
op.omap_get_vals2(start_after, filter_prefix, max_return, out_vals, pmore, &r);
bufferlist bl;
int ret = operate(oid, &op, &bl);
if (ret < 0)
return ret;
return r;
}
void librados::ObjectReadOperation::getxattrs(map<string, bufferlist> *pattrs, int *prval)
{
ceph_assert(impl);
::ObjectOperation *o = &impl->o;
o->getxattrs(pattrs, prval);
}
void librados::ObjectWriteOperation::mtime(time_t *pt)
{
ceph_assert(impl);
if (pt) {
impl->rt = ceph::real_clock::from_time_t(*pt);
impl->prt = &impl->rt;
}
}
void librados::ObjectWriteOperation::mtime2(struct timespec *pts)
{
ceph_assert(impl);
if (pts) {
impl->rt = ceph::real_clock::from_timespec(*pts);
impl->prt = &impl->rt;
}
}
void librados::ObjectWriteOperation::create(bool exclusive)
{
ceph_assert(impl);
::ObjectOperation *o = &impl->o;
o->create(exclusive);
}
void librados::ObjectWriteOperation::create(bool exclusive,
const std::string& category) // unused
{
ceph_assert(impl);
::ObjectOperation *o = &impl->o;
o->create(exclusive);
}
void librados::ObjectWriteOperation::write(uint64_t off, const bufferlist& bl)
{
ceph_assert(impl);
::ObjectOperation *o = &impl->o;
bufferlist c = bl;
o->write(off, c);
}
void librados::ObjectWriteOperation::write_full(const bufferlist& bl)
{
ceph_assert(impl);
::ObjectOperation *o = &impl->o;
bufferlist c = bl;
o->write_full(c);
}
void librados::ObjectWriteOperation::writesame(uint64_t off, uint64_t write_len,
const bufferlist& bl)
{
ceph_assert(impl);
::ObjectOperation *o = &impl->o;
bufferlist c = bl;
o->writesame(off, write_len, c);
}
void librados::ObjectWriteOperation::append(const bufferlist& bl)
{
ceph_assert(impl);
::ObjectOperation *o = &impl->o;
bufferlist c = bl;
o->append(c);
}
void librados::ObjectWriteOperation::remove()
{
ceph_assert(impl);
::ObjectOperation *o = &impl->o;
o->remove();
}
void librados::ObjectWriteOperation::truncate(uint64_t off)
{
ceph_assert(impl);
::ObjectOperation *o = &impl->o;
o->truncate(off);
}
void librados::ObjectWriteOperation::zero(uint64_t off, uint64_t len)
{
ceph_assert(impl);
::ObjectOperation *o = &impl->o;
o->zero(off, len);
}
void librados::ObjectWriteOperation::rmxattr(const char *name)
{
ceph_assert(impl);
::ObjectOperation *o = &impl->o;
o->rmxattr(name);
}
void librados::ObjectWriteOperation::setxattr(const char *name, const bufferlist& v)
{
ceph_assert(impl);
::ObjectOperation *o = &impl->o;
o->setxattr(name, v);
}
void librados::ObjectWriteOperation::setxattr(const char *name,
const buffer::list&& v)
{
ceph_assert(impl);
::ObjectOperation *o = &impl->o;
o->setxattr(name, std::move(v));
}
void librados::ObjectWriteOperation::omap_set(
const map<string, bufferlist> &map)
{
ceph_assert(impl);
::ObjectOperation *o = &impl->o;
o->omap_set(map);
}
void librados::ObjectWriteOperation::omap_set_header(const bufferlist &bl)
{
ceph_assert(impl);
bufferlist c = bl;
::ObjectOperation *o = &impl->o;
o->omap_set_header(c);
}
void librados::ObjectWriteOperation::omap_clear()
{
ceph_assert(impl);
::ObjectOperation *o = &impl->o;
o->omap_clear();
}
void librados::ObjectWriteOperation::omap_rm_keys(
const std::set<std::string> &to_rm)
{
ceph_assert(impl);
::ObjectOperation *o = &impl->o;
o->omap_rm_keys(to_rm);
}
void librados::ObjectWriteOperation::copy_from(const std::string& src,
const IoCtx& src_ioctx,
uint64_t src_version,
uint32_t src_fadvise_flags)
{
ceph_assert(impl);
::ObjectOperation *o = &impl->o;
o->copy_from(object_t(src), src_ioctx.io_ctx_impl->snap_seq,
src_ioctx.io_ctx_impl->oloc, src_version, 0, src_fadvise_flags);
}
void librados::ObjectWriteOperation::copy_from2(const std::string& src,
const IoCtx& src_ioctx,
uint64_t src_version,
uint32_t truncate_seq,
uint64_t truncate_size,
uint32_t src_fadvise_flags)
{
ceph_assert(impl);
::ObjectOperation *o = &impl->o;
o->copy_from2(object_t(src), src_ioctx.io_ctx_impl->snap_seq,
src_ioctx.io_ctx_impl->oloc, src_version, 0,
truncate_seq, truncate_size, src_fadvise_flags);
}
void librados::ObjectWriteOperation::undirty()
{
ceph_assert(impl);
::ObjectOperation *o = &impl->o;
o->undirty();
}
void librados::ObjectReadOperation::cache_flush()
{
ceph_assert(impl);
::ObjectOperation *o = &impl->o;
o->cache_flush();
}
void librados::ObjectReadOperation::cache_try_flush()
{
ceph_assert(impl);
::ObjectOperation *o = &impl->o;
o->cache_try_flush();
}
void librados::ObjectReadOperation::cache_evict()
{
ceph_assert(impl);
::ObjectOperation *o = &impl->o;
o->cache_evict();
}
void librados::ObjectReadOperation::tier_flush()
{
ceph_assert(impl);
::ObjectOperation *o = &impl->o;
o->tier_flush();
}
void librados::ObjectReadOperation::tier_evict()
{
ceph_assert(impl);
::ObjectOperation *o = &impl->o;
o->tier_evict();
}
void librados::ObjectWriteOperation::set_redirect(const std::string& tgt_obj,
const IoCtx& tgt_ioctx,
uint64_t tgt_version,
int flag)
{
ceph_assert(impl);
::ObjectOperation *o = &impl->o;
o->set_redirect(object_t(tgt_obj), tgt_ioctx.io_ctx_impl->snap_seq,
tgt_ioctx.io_ctx_impl->oloc, tgt_version, flag);
}
void librados::ObjectReadOperation::set_chunk(uint64_t src_offset,
uint64_t src_length,
const IoCtx& tgt_ioctx,
string tgt_oid,
uint64_t tgt_offset,
int flag)
{
ceph_assert(impl);
::ObjectOperation *o = &impl->o;
o->set_chunk(src_offset, src_length,
tgt_ioctx.io_ctx_impl->oloc, object_t(tgt_oid), tgt_offset, flag);
}
void librados::ObjectWriteOperation::tier_promote()
{
ceph_assert(impl);
::ObjectOperation *o = &impl->o;
o->tier_promote();
}
void librados::ObjectWriteOperation::unset_manifest()
{
ceph_assert(impl);
::ObjectOperation *o = &impl->o;
o->unset_manifest();
}
void librados::ObjectWriteOperation::tmap_update(const bufferlist& cmdbl)
{
ceph_assert(impl);
::ObjectOperation *o = &impl->o;
bufferlist c = cmdbl;
o->tmap_update(c);
}
void librados::ObjectWriteOperation::selfmanaged_snap_rollback(snap_t snapid)
{
ceph_assert(impl);
::ObjectOperation *o = &impl->o;
o->rollback(snapid);
}
// You must specify the snapid not the name normally used with pool snapshots
void librados::ObjectWriteOperation::snap_rollback(snap_t snapid)
{
ceph_assert(impl);
::ObjectOperation *o = &impl->o;
o->rollback(snapid);
}
void librados::ObjectWriteOperation::set_alloc_hint(
uint64_t expected_object_size,
uint64_t expected_write_size)
{
ceph_assert(impl);
::ObjectOperation *o = &impl->o;
o->set_alloc_hint(expected_object_size, expected_write_size, 0);
}
void librados::ObjectWriteOperation::set_alloc_hint2(
uint64_t expected_object_size,
uint64_t expected_write_size,
uint32_t flags)
{
ceph_assert(impl);
::ObjectOperation *o = &impl->o;
o->set_alloc_hint(expected_object_size, expected_write_size, flags);
}
void librados::ObjectWriteOperation::cache_pin()
{
ceph_assert(impl);
::ObjectOperation *o = &impl->o;
o->cache_pin();
}
void librados::ObjectWriteOperation::cache_unpin()
{
ceph_assert(impl);
::ObjectOperation *o = &impl->o;
o->cache_unpin();
}
librados::WatchCtx::
~WatchCtx()
{
}
librados::WatchCtx2::
~WatchCtx2()
{
}
///////////////////////////// NObjectIteratorImpl /////////////////////////////
librados::NObjectIteratorImpl::NObjectIteratorImpl(ObjListCtx *ctx_)
: ctx(ctx_)
{
}
librados::NObjectIteratorImpl::~NObjectIteratorImpl()
{
ctx.reset();
}
librados::NObjectIteratorImpl::NObjectIteratorImpl(const NObjectIteratorImpl &rhs)
{
*this = rhs;
}
librados::NObjectIteratorImpl& librados::NObjectIteratorImpl::operator=(const librados::NObjectIteratorImpl &rhs)
{
if (&rhs == this)
return *this;
if (rhs.ctx.get() == NULL) {
ctx.reset();
return *this;
}
Objecter::NListContext *list_ctx = new Objecter::NListContext(*rhs.ctx->nlc);
ctx.reset(new ObjListCtx(rhs.ctx->ctx, list_ctx));
cur_obj = rhs.cur_obj;
return *this;
}
bool librados::NObjectIteratorImpl::operator==(const librados::NObjectIteratorImpl& rhs) const {
if (ctx.get() == NULL) {
if (rhs.ctx.get() == NULL)
return true;
return rhs.ctx->nlc->at_end();
}
if (rhs.ctx.get() == NULL) {
// Redundant but same as ObjectIterator version
if (ctx.get() == NULL)
return true;
return ctx->nlc->at_end();
}
return ctx.get() == rhs.ctx.get();
}
bool librados::NObjectIteratorImpl::operator!=(const librados::NObjectIteratorImpl& rhs) const {
return !(*this == rhs);
}
const librados::ListObject& librados::NObjectIteratorImpl::operator*() const {
return cur_obj;
}
const librados::ListObject* librados::NObjectIteratorImpl::operator->() const {
return &cur_obj;
}
librados::NObjectIteratorImpl& librados::NObjectIteratorImpl::operator++()
{
get_next();
return *this;
}
librados::NObjectIteratorImpl librados::NObjectIteratorImpl::operator++(int)
{
librados::NObjectIteratorImpl ret(*this);
get_next();
return ret;
}
uint32_t librados::NObjectIteratorImpl::seek(uint32_t pos)
{
uint32_t r = rados_nobjects_list_seek(ctx.get(), pos);
get_next();
return r;
}
uint32_t librados::NObjectIteratorImpl::seek(const ObjectCursor& cursor)
{
uint32_t r = rados_nobjects_list_seek_cursor(ctx.get(), (rados_object_list_cursor)cursor.c_cursor);
get_next();
return r;
}
librados::ObjectCursor librados::NObjectIteratorImpl::get_cursor()
{
librados::ObjListCtx *lh = (librados::ObjListCtx *)ctx.get();
librados::ObjectCursor oc;
oc.set(lh->ctx->nlist_get_cursor(lh->nlc));
return oc;
}
void librados::NObjectIteratorImpl::set_filter(const bufferlist &bl)
{
ceph_assert(ctx);
ctx->nlc->filter = bl;
}
void librados::NObjectIteratorImpl::get_next()
{
const char *entry, *key, *nspace;
size_t entry_size, key_size, nspace_size;
if (ctx->nlc->at_end())
return;
int ret = rados_nobjects_list_next2(ctx.get(), &entry, &key, &nspace,
&entry_size, &key_size, &nspace_size);
if (ret == -ENOENT) {
return;
}
else if (ret) {
throw std::system_error(-ret, std::system_category(),
"rados_nobjects_list_next2");
}
if (cur_obj.impl == NULL)
cur_obj.impl = new ListObjectImpl();
cur_obj.impl->nspace = string{nspace, nspace_size};
cur_obj.impl->oid = string{entry, entry_size};
cur_obj.impl->locator = key ? string(key, key_size) : string();
}
uint32_t librados::NObjectIteratorImpl::get_pg_hash_position() const
{
return ctx->nlc->get_pg_hash_position();
}
///////////////////////////// NObjectIterator /////////////////////////////
librados::NObjectIterator::NObjectIterator(ObjListCtx *ctx_)
{
impl = new NObjectIteratorImpl(ctx_);
}
librados::NObjectIterator::~NObjectIterator()
{
delete impl;
}
librados::NObjectIterator::NObjectIterator(const NObjectIterator &rhs)
{
if (rhs.impl == NULL) {
impl = NULL;
return;
}
impl = new NObjectIteratorImpl();
*impl = *(rhs.impl);
}
librados::NObjectIterator& librados::NObjectIterator::operator=(const librados::NObjectIterator &rhs)
{
if (rhs.impl == NULL) {
delete impl;
impl = NULL;
return *this;
}
if (impl == NULL)
impl = new NObjectIteratorImpl();
*impl = *(rhs.impl);
return *this;
}
bool librados::NObjectIterator::operator==(const librados::NObjectIterator& rhs) const
{
if (impl && rhs.impl) {
return *impl == *(rhs.impl);
} else {
return impl == rhs.impl;
}
}
bool librados::NObjectIterator::operator!=(const librados::NObjectIterator& rhs) const
{
return !(*this == rhs);
}
const librados::ListObject& librados::NObjectIterator::operator*() const {
ceph_assert(impl);
return *(impl->get_listobjectp());
}
const librados::ListObject* librados::NObjectIterator::operator->() const {
ceph_assert(impl);
return impl->get_listobjectp();
}
librados::NObjectIterator& librados::NObjectIterator::operator++()
{
ceph_assert(impl);
impl->get_next();
return *this;
}
librados::NObjectIterator librados::NObjectIterator::operator++(int)
{
librados::NObjectIterator ret(*this);
impl->get_next();
return ret;
}
uint32_t librados::NObjectIterator::seek(uint32_t pos)
{
ceph_assert(impl);
return impl->seek(pos);
}
uint32_t librados::NObjectIterator::seek(const ObjectCursor& cursor)
{
ceph_assert(impl);
return impl->seek(cursor);
}
librados::ObjectCursor librados::NObjectIterator::get_cursor()
{
ceph_assert(impl);
return impl->get_cursor();
}
void librados::NObjectIterator::set_filter(const bufferlist &bl)
{
impl->set_filter(bl);
}
void librados::NObjectIterator::get_next()
{
ceph_assert(impl);
impl->get_next();
}
uint32_t librados::NObjectIterator::get_pg_hash_position() const
{
ceph_assert(impl);
return impl->get_pg_hash_position();
}
const librados::NObjectIterator librados::NObjectIterator::__EndObjectIterator(NULL);
///////////////////////////// PoolAsyncCompletion //////////////////////////////
librados::PoolAsyncCompletion::PoolAsyncCompletion::~PoolAsyncCompletion()
{
auto c = reinterpret_cast<PoolAsyncCompletionImpl *>(pc);
c->release();
}
int librados::PoolAsyncCompletion::PoolAsyncCompletion::set_callback(void *cb_arg,
rados_callback_t cb)
{
PoolAsyncCompletionImpl *c = (PoolAsyncCompletionImpl *)pc;
return c->set_callback(cb_arg, cb);
}
int librados::PoolAsyncCompletion::PoolAsyncCompletion::wait()
{
PoolAsyncCompletionImpl *c = (PoolAsyncCompletionImpl *)pc;
return c->wait();
}
bool librados::PoolAsyncCompletion::PoolAsyncCompletion::is_complete()
{
PoolAsyncCompletionImpl *c = (PoolAsyncCompletionImpl *)pc;
return c->is_complete();
}
int librados::PoolAsyncCompletion::PoolAsyncCompletion::get_return_value()
{
PoolAsyncCompletionImpl *c = (PoolAsyncCompletionImpl *)pc;
return c->get_return_value();
}
void librados::PoolAsyncCompletion::PoolAsyncCompletion::release()
{
delete this;
}
///////////////////////////// AioCompletion //////////////////////////////
librados::AioCompletion::AioCompletion::~AioCompletion()
{
auto c = reinterpret_cast<AioCompletionImpl *>(pc);
c->release();
}
int librados::AioCompletion::AioCompletion::set_complete_callback(void *cb_arg, rados_callback_t cb)
{
AioCompletionImpl *c = (AioCompletionImpl *)pc;
return c->set_complete_callback(cb_arg, cb);
}
int librados::AioCompletion::AioCompletion::set_safe_callback(void *cb_arg, rados_callback_t cb)
{
AioCompletionImpl *c = (AioCompletionImpl *)pc;
return c->set_safe_callback(cb_arg, cb);
}
int librados::AioCompletion::AioCompletion::wait_for_complete()
{
AioCompletionImpl *c = (AioCompletionImpl *)pc;
return c->wait_for_complete();
}
int librados::AioCompletion::AioCompletion::wait_for_safe()
{
AioCompletionImpl *c = (AioCompletionImpl *)pc;
return c->wait_for_complete();
}
bool librados::AioCompletion::AioCompletion::is_complete()
{
AioCompletionImpl *c = (AioCompletionImpl *)pc;
return c->is_complete();
}
bool librados::AioCompletion::AioCompletion::is_safe()
{
AioCompletionImpl *c = (AioCompletionImpl *)pc;
return c->is_safe();
}
int librados::AioCompletion::AioCompletion::wait_for_complete_and_cb()
{
AioCompletionImpl *c = (AioCompletionImpl *)pc;
return c->wait_for_complete_and_cb();
}
int librados::AioCompletion::AioCompletion::wait_for_safe_and_cb()
{
AioCompletionImpl *c = (AioCompletionImpl *)pc;
return c->wait_for_safe_and_cb();
}
bool librados::AioCompletion::AioCompletion::is_complete_and_cb()
{
AioCompletionImpl *c = (AioCompletionImpl *)pc;
return c->is_complete_and_cb();
}
bool librados::AioCompletion::AioCompletion::is_safe_and_cb()
{
AioCompletionImpl *c = (AioCompletionImpl *)pc;
return c->is_safe_and_cb();
}
int librados::AioCompletion::AioCompletion::get_return_value()
{
AioCompletionImpl *c = (AioCompletionImpl *)pc;
return c->get_return_value();
}
int librados::AioCompletion::AioCompletion::get_version()
{
AioCompletionImpl *c = (AioCompletionImpl *)pc;
return c->get_version();
}
uint64_t librados::AioCompletion::AioCompletion::get_version64()
{
AioCompletionImpl *c = (AioCompletionImpl *)pc;
return c->get_version();
}
void librados::AioCompletion::AioCompletion::release()
{
delete this;
}
///////////////////////////// IoCtx //////////////////////////////
librados::IoCtx::IoCtx() : io_ctx_impl(NULL)
{
}
void librados::IoCtx::from_rados_ioctx_t(rados_ioctx_t p, IoCtx &io)
{
IoCtxImpl *io_ctx_impl = (IoCtxImpl*)p;
io.io_ctx_impl = io_ctx_impl;
if (io_ctx_impl) {
io_ctx_impl->get();
}
}
librados::IoCtx::IoCtx(const IoCtx& rhs)
{
io_ctx_impl = rhs.io_ctx_impl;
if (io_ctx_impl) {
io_ctx_impl->get();
}
}
librados::IoCtx& librados::IoCtx::operator=(const IoCtx& rhs)
{
if (io_ctx_impl)
io_ctx_impl->put();
io_ctx_impl = rhs.io_ctx_impl;
io_ctx_impl->get();
return *this;
}
librados::IoCtx::IoCtx(IoCtx&& rhs) noexcept
: io_ctx_impl(std::exchange(rhs.io_ctx_impl, nullptr))
{
}
librados::IoCtx& librados::IoCtx::operator=(IoCtx&& rhs) noexcept
{
if (io_ctx_impl)
io_ctx_impl->put();
io_ctx_impl = std::exchange(rhs.io_ctx_impl, nullptr);
return *this;
}
librados::IoCtx::~IoCtx()
{
close();
}
bool librados::IoCtx::is_valid() const {
return io_ctx_impl != nullptr;
}
void librados::IoCtx::close()
{
if (io_ctx_impl)
io_ctx_impl->put();
io_ctx_impl = 0;
}
void librados::IoCtx::dup(const IoCtx& rhs)
{
if (io_ctx_impl)
io_ctx_impl->put();
io_ctx_impl = new IoCtxImpl();
io_ctx_impl->get();
io_ctx_impl->dup(*rhs.io_ctx_impl);
}
int librados::IoCtx::set_auid(uint64_t auid_)
{
return -EOPNOTSUPP;
}
int librados::IoCtx::set_auid_async(uint64_t auid_, PoolAsyncCompletion *c)
{
return -EOPNOTSUPP;
}
int librados::IoCtx::get_auid(uint64_t *auid_)
{
return -EOPNOTSUPP;
}
bool librados::IoCtx::pool_requires_alignment()
{
return io_ctx_impl->client->pool_requires_alignment(get_id());
}
int librados::IoCtx::pool_requires_alignment2(bool *req)
{
return io_ctx_impl->client->pool_requires_alignment2(get_id(), req);
}
uint64_t librados::IoCtx::pool_required_alignment()
{
return io_ctx_impl->client->pool_required_alignment(get_id());
}
int librados::IoCtx::pool_required_alignment2(uint64_t *alignment)
{
return io_ctx_impl->client->pool_required_alignment2(get_id(), alignment);
}
std::string librados::IoCtx::get_pool_name()
{
std::string s;
io_ctx_impl->client->pool_get_name(get_id(), &s);
return s;
}
std::string librados::IoCtx::get_pool_name() const
{
return io_ctx_impl->get_cached_pool_name();
}
uint64_t librados::IoCtx::get_instance_id() const
{
return io_ctx_impl->client->get_instance_id();
}
int librados::IoCtx::create(const std::string& oid, bool exclusive)
{
object_t obj(oid);
return io_ctx_impl->create(obj, exclusive);
}
int librados::IoCtx::create(const std::string& oid, bool exclusive,
const std::string& category) // unused
{
object_t obj(oid);
return io_ctx_impl->create(obj, exclusive);
}
int librados::IoCtx::write(const std::string& oid, bufferlist& bl, size_t len, uint64_t off)
{
object_t obj(oid);
return io_ctx_impl->write(obj, bl, len, off);
}
int librados::IoCtx::append(const std::string& oid, bufferlist& bl, size_t len)
{
object_t obj(oid);
return io_ctx_impl->append(obj, bl, len);
}
int librados::IoCtx::write_full(const std::string& oid, bufferlist& bl)
{
object_t obj(oid);
return io_ctx_impl->write_full(obj, bl);
}
int librados::IoCtx::writesame(const std::string& oid, bufferlist& bl,
size_t write_len, uint64_t off)
{
object_t obj(oid);
return io_ctx_impl->writesame(obj, bl, write_len, off);
}
int librados::IoCtx::read(const std::string& oid, bufferlist& bl, size_t len, uint64_t off)
{
object_t obj(oid);
return io_ctx_impl->read(obj, bl, len, off);
}
int librados::IoCtx::checksum(const std::string& oid,
rados_checksum_type_t type,
const bufferlist &init_value_bl, size_t len,
uint64_t off, size_t chunk_size, bufferlist *pbl)
{
object_t obj(oid);
return io_ctx_impl->checksum(obj, get_checksum_op_type(type), init_value_bl,
len, off, chunk_size, pbl);
}
int librados::IoCtx::remove(const std::string& oid)
{
object_t obj(oid);
return io_ctx_impl->remove(obj);
}
int librados::IoCtx::remove(const std::string& oid, int flags)
{
object_t obj(oid);
return io_ctx_impl->remove(obj, flags);
}
int librados::IoCtx::trunc(const std::string& oid, uint64_t size)
{
object_t obj(oid);
return io_ctx_impl->trunc(obj, size);
}
int librados::IoCtx::mapext(const std::string& oid, uint64_t off, size_t len,
std::map<uint64_t,uint64_t>& m)
{
object_t obj(oid);
return io_ctx_impl->mapext(obj, off, len, m);
}
int librados::IoCtx::cmpext(const std::string& oid, uint64_t off, bufferlist& cmp_bl)
{
object_t obj(oid);
return io_ctx_impl->cmpext(obj, off, cmp_bl);
}
int librados::IoCtx::sparse_read(const std::string& oid, std::map<uint64_t,uint64_t>& m,
bufferlist& bl, size_t len, uint64_t off)
{
object_t obj(oid);
return io_ctx_impl->sparse_read(obj, m, bl, len, off);
}
int librados::IoCtx::getxattr(const std::string& oid, const char *name, bufferlist& bl)
{
object_t obj(oid);
return io_ctx_impl->getxattr(obj, name, bl);
}
int librados::IoCtx::getxattrs(const std::string& oid, map<std::string, bufferlist>& attrset)
{
object_t obj(oid);
return io_ctx_impl->getxattrs(obj, attrset);
}
int librados::IoCtx::setxattr(const std::string& oid, const char *name, bufferlist& bl)
{
object_t obj(oid);
return io_ctx_impl->setxattr(obj, name, bl);
}
int librados::IoCtx::rmxattr(const std::string& oid, const char *name)
{
object_t obj(oid);
return io_ctx_impl->rmxattr(obj, name);
}
int librados::IoCtx::stat(const std::string& oid, uint64_t *psize, time_t *pmtime)
{
object_t obj(oid);
return io_ctx_impl->stat(obj, psize, pmtime);
}
int librados::IoCtx::stat2(const std::string& oid, uint64_t *psize, struct timespec *pts)
{
object_t obj(oid);
return io_ctx_impl->stat2(obj, psize, pts);
}
int librados::IoCtx::exec(const std::string& oid, const char *cls, const char *method,
bufferlist& inbl, bufferlist& outbl)
{
object_t obj(oid);
return io_ctx_impl->exec(obj, cls, method, inbl, outbl);
}
int librados::IoCtx::tmap_update(const std::string& oid, bufferlist& cmdbl)
{
object_t obj(oid);
return io_ctx_impl->tmap_update(obj, cmdbl);
}
int librados::IoCtx::omap_get_vals(const std::string& oid,
const std::string& start_after,
uint64_t max_return,
std::map<std::string, bufferlist> *out_vals)
{
return omap_get_vals(oid, start_after, string(), max_return, out_vals);
}
int librados::IoCtx::omap_get_vals2(
const std::string& oid,
const std::string& start_after,
uint64_t max_return,
std::map<std::string, bufferlist> *out_vals,
bool *pmore)
{
ObjectReadOperation op;
int r;
op.omap_get_vals2(start_after, max_return, out_vals, pmore, &r);
bufferlist bl;
int ret = operate(oid, &op, &bl);
if (ret < 0)
return ret;
return r;
}
int librados::IoCtx::omap_get_keys(const std::string& oid,
const std::string& orig_start_after,
uint64_t max_return,
std::set<std::string> *out_keys)
{
bool first = true;
string start_after = orig_start_after;
bool more = true;
while (max_return > 0 && more) {
std::set<std::string> out;
ObjectReadOperation op;
op.omap_get_keys2(start_after, max_return, &out, &more, nullptr);
bufferlist bl;
int ret = operate(oid, &op, &bl);
if (ret < 0) {
return ret;
}
if (more) {
if (out.empty()) {
return -EINVAL; // wth
}
start_after = *out.rbegin();
}
if (out.size() <= max_return) {
max_return -= out.size();
} else {
max_return = 0;
}
if (first) {
out_keys->swap(out);
first = false;
} else {
out_keys->insert(out.begin(), out.end());
out.clear();
}
}
return 0;
}
int librados::IoCtx::omap_get_keys2(
const std::string& oid,
const std::string& start_after,
uint64_t max_return,
std::set<std::string> *out_keys,
bool *pmore)
{
ObjectReadOperation op;
int r;
op.omap_get_keys2(start_after, max_return, out_keys, pmore, &r);
bufferlist bl;
int ret = operate(oid, &op, &bl);
if (ret < 0)
return ret;
return r;
}
int librados::IoCtx::omap_get_header(const std::string& oid,
bufferlist *bl)
{
ObjectReadOperation op;
int r;
op.omap_get_header(bl, &r);
bufferlist b;
int ret = operate(oid, &op, &b);
if (ret < 0)
return ret;
return r;
}
int librados::IoCtx::omap_get_vals_by_keys(const std::string& oid,
const std::set<std::string>& keys,
std::map<std::string, bufferlist> *vals)
{
ObjectReadOperation op;
int r;
bufferlist bl;
op.omap_get_vals_by_keys(keys, vals, &r);
int ret = operate(oid, &op, &bl);
if (ret < 0)
return ret;
return r;
}
int librados::IoCtx::omap_set(const std::string& oid,
const map<string, bufferlist>& m)
{
ObjectWriteOperation op;
op.omap_set(m);
return operate(oid, &op);
}
int librados::IoCtx::omap_set_header(const std::string& oid,
const bufferlist& bl)
{
ObjectWriteOperation op;
op.omap_set_header(bl);
return operate(oid, &op);
}
int librados::IoCtx::omap_clear(const std::string& oid)
{
ObjectWriteOperation op;
op.omap_clear();
return operate(oid, &op);
}
int librados::IoCtx::omap_rm_keys(const std::string& oid,
const std::set<std::string>& keys)
{
ObjectWriteOperation op;
op.omap_rm_keys(keys);
return operate(oid, &op);
}
int librados::IoCtx::operate(const std::string& oid, librados::ObjectWriteOperation *o)
{
object_t obj(oid);
if (unlikely(!o->impl))
return -EINVAL;
return io_ctx_impl->operate(obj, &o->impl->o, (ceph::real_time *)o->impl->prt);
}
int librados::IoCtx::operate(const std::string& oid, librados::ObjectWriteOperation *o, int flags)
{
object_t obj(oid);
if (unlikely(!o->impl))
return -EINVAL;
return io_ctx_impl->operate(obj, &o->impl->o, (ceph::real_time *)o->impl->prt, translate_flags(flags));
}
int librados::IoCtx::operate(const std::string& oid, librados::ObjectReadOperation *o, bufferlist *pbl)
{
object_t obj(oid);
if (unlikely(!o->impl))
return -EINVAL;
return io_ctx_impl->operate_read(obj, &o->impl->o, pbl);
}
int librados::IoCtx::operate(const std::string& oid, librados::ObjectReadOperation *o, bufferlist *pbl, int flags)
{
object_t obj(oid);
if (unlikely(!o->impl))
return -EINVAL;
return io_ctx_impl->operate_read(obj, &o->impl->o, pbl, translate_flags(flags));
}
int librados::IoCtx::aio_operate(const std::string& oid, AioCompletion *c,
librados::ObjectWriteOperation *o)
{
object_t obj(oid);
if (unlikely(!o->impl))
return -EINVAL;
return io_ctx_impl->aio_operate(obj, &o->impl->o, c->pc,
io_ctx_impl->snapc, o->impl->prt, 0);
}
int librados::IoCtx::aio_operate(const std::string& oid, AioCompletion *c,
ObjectWriteOperation *o, int flags)
{
object_t obj(oid);
if (unlikely(!o->impl))
return -EINVAL;
return io_ctx_impl->aio_operate(obj, &o->impl->o, c->pc,
io_ctx_impl->snapc, o->impl->prt,
translate_flags(flags));
}
int librados::IoCtx::aio_operate(const std::string& oid, AioCompletion *c,
librados::ObjectWriteOperation *o,
snap_t snap_seq, std::vector<snap_t>& snaps)
{
if (unlikely(!o->impl))
return -EINVAL;
object_t obj(oid);
vector<snapid_t> snv;
snv.resize(snaps.size());
for (size_t i = 0; i < snaps.size(); ++i)
snv[i] = snaps[i];
SnapContext snapc(snap_seq, snv);
return io_ctx_impl->aio_operate(obj, &o->impl->o, c->pc,
snapc, o->impl->prt, 0);
}
int librados::IoCtx::aio_operate(const std::string& oid, AioCompletion *c,
librados::ObjectWriteOperation *o,
snap_t snap_seq, std::vector<snap_t>& snaps,
const blkin_trace_info *trace_info)
{
if (unlikely(!o->impl))
return -EINVAL;
object_t obj(oid);
vector<snapid_t> snv;
snv.resize(snaps.size());
for (size_t i = 0; i < snaps.size(); ++i)
snv[i] = snaps[i];
SnapContext snapc(snap_seq, snv);
return io_ctx_impl->aio_operate(obj, &o->impl->o, c->pc,
snapc, o->impl->prt, 0, trace_info);
}
int librados::IoCtx::aio_operate(const std::string& oid, AioCompletion *c,
librados::ObjectWriteOperation *o,
snap_t snap_seq, std::vector<snap_t>& snaps, int flags,
const blkin_trace_info *trace_info)
{
if (unlikely(!o->impl))
return -EINVAL;
object_t obj(oid);
vector<snapid_t> snv;
snv.resize(snaps.size());
for (size_t i = 0; i < snaps.size(); ++i)
snv[i] = snaps[i];
SnapContext snapc(snap_seq, snv);
return io_ctx_impl->aio_operate(obj, &o->impl->o, c->pc, snapc, o->impl->prt,
translate_flags(flags), trace_info);
}
int librados::IoCtx::aio_operate(const std::string& oid, AioCompletion *c,
librados::ObjectReadOperation *o,
bufferlist *pbl)
{
if (unlikely(!o->impl))
return -EINVAL;
object_t obj(oid);
return io_ctx_impl->aio_operate_read(obj, &o->impl->o, c->pc,
0, pbl);
}
// deprecated
int librados::IoCtx::aio_operate(const std::string& oid, AioCompletion *c,
librados::ObjectReadOperation *o,
snap_t snapid_unused_deprecated,
int flags, bufferlist *pbl)
{
if (unlikely(!o->impl))
return -EINVAL;
object_t obj(oid);
int op_flags = 0;
if (flags & OPERATION_BALANCE_READS)
op_flags |= CEPH_OSD_FLAG_BALANCE_READS;
if (flags & OPERATION_LOCALIZE_READS)
op_flags |= CEPH_OSD_FLAG_LOCALIZE_READS;
if (flags & OPERATION_ORDER_READS_WRITES)
op_flags |= CEPH_OSD_FLAG_RWORDERED;
return io_ctx_impl->aio_operate_read(obj, &o->impl->o, c->pc,
op_flags, pbl);
}
int librados::IoCtx::aio_operate(const std::string& oid, AioCompletion *c,
librados::ObjectReadOperation *o,
int flags, bufferlist *pbl)
{
if (unlikely(!o->impl))
return -EINVAL;
object_t obj(oid);
return io_ctx_impl->aio_operate_read(obj, &o->impl->o, c->pc,
translate_flags(flags), pbl);
}
int librados::IoCtx::aio_operate(const std::string& oid, AioCompletion *c,
librados::ObjectReadOperation *o,
int flags, bufferlist *pbl, const blkin_trace_info *trace_info)
{
if (unlikely(!o->impl))
return -EINVAL;
object_t obj(oid);
return io_ctx_impl->aio_operate_read(obj, &o->impl->o, c->pc,
translate_flags(flags), pbl, trace_info);
}
void librados::IoCtx::snap_set_read(snap_t seq)
{
io_ctx_impl->set_snap_read(seq);
}
int librados::IoCtx::selfmanaged_snap_set_write_ctx(snap_t seq, vector<snap_t>& snaps)
{
vector<snapid_t> snv;
snv.resize(snaps.size());
for (unsigned i=0; i<snaps.size(); i++)
snv[i] = snaps[i];
return io_ctx_impl->set_snap_write_context(seq, snv);
}
int librados::IoCtx::snap_create(const char *snapname)
{
return io_ctx_impl->snap_create(snapname);
}
int librados::IoCtx::snap_lookup(const char *name, snap_t *snapid)
{
return io_ctx_impl->snap_lookup(name, snapid);
}
int librados::IoCtx::snap_get_stamp(snap_t snapid, time_t *t)
{
return io_ctx_impl->snap_get_stamp(snapid, t);
}
int librados::IoCtx::snap_get_name(snap_t snapid, std::string *s)
{
return io_ctx_impl->snap_get_name(snapid, s);
}
int librados::IoCtx::snap_remove(const char *snapname)
{
return io_ctx_impl->snap_remove(snapname);
}
int librados::IoCtx::snap_list(std::vector<snap_t> *snaps)
{
return io_ctx_impl->snap_list(snaps);
}
int librados::IoCtx::snap_rollback(const std::string& oid, const char *snapname)
{
return io_ctx_impl->rollback(oid, snapname);
}
// Deprecated name kept for backward compatibility
int librados::IoCtx::rollback(const std::string& oid, const char *snapname)
{
return snap_rollback(oid, snapname);
}
int librados::IoCtx::selfmanaged_snap_create(uint64_t *snapid)
{
return io_ctx_impl->selfmanaged_snap_create(snapid);
}
void librados::IoCtx::aio_selfmanaged_snap_create(uint64_t *snapid,
AioCompletion *c)
{
io_ctx_impl->aio_selfmanaged_snap_create(snapid, c->pc);
}
int librados::IoCtx::selfmanaged_snap_remove(uint64_t snapid)
{
return io_ctx_impl->selfmanaged_snap_remove(snapid);
}
void librados::IoCtx::aio_selfmanaged_snap_remove(uint64_t snapid,
AioCompletion *c)
{
io_ctx_impl->aio_selfmanaged_snap_remove(snapid, c->pc);
}
int librados::IoCtx::selfmanaged_snap_rollback(const std::string& oid, uint64_t snapid)
{
return io_ctx_impl->selfmanaged_snap_rollback_object(oid,
io_ctx_impl->snapc,
snapid);
}
int librados::IoCtx::lock_exclusive(const std::string &oid, const std::string &name,
const std::string &cookie,
const std::string &description,
struct timeval * duration, uint8_t flags)
{
utime_t dur = utime_t();
if (duration)
dur.set_from_timeval(duration);
return rados::cls::lock::lock(this, oid, name, ClsLockType::EXCLUSIVE, cookie, "",
description, dur, flags);
}
int librados::IoCtx::lock_shared(const std::string &oid, const std::string &name,
const std::string &cookie, const std::string &tag,
const std::string &description,
struct timeval * duration, uint8_t flags)
{
utime_t dur = utime_t();
if (duration)
dur.set_from_timeval(duration);
return rados::cls::lock::lock(this, oid, name, ClsLockType::SHARED, cookie, tag,
description, dur, flags);
}
int librados::IoCtx::unlock(const std::string &oid, const std::string &name,
const std::string &cookie)
{
return rados::cls::lock::unlock(this, oid, name, cookie);
}
struct AioUnlockCompletion : public librados::ObjectOperationCompletion {
librados::AioCompletionImpl *completion;
AioUnlockCompletion(librados::AioCompletion *c) : completion(c->pc) {
completion->get();
};
void handle_completion(int r, bufferlist& outbl) override {
rados_callback_t cb = completion->callback_complete;
void *cb_arg = completion->callback_complete_arg;
cb(completion, cb_arg);
completion->lock.lock();
completion->callback_complete = NULL;
completion->cond.notify_all();
completion->put_unlock();
}
};
int librados::IoCtx::aio_unlock(const std::string &oid, const std::string &name,
const std::string &cookie, AioCompletion *c)
{
return rados::cls::lock::aio_unlock(this, oid, name, cookie, c);
}
int librados::IoCtx::break_lock(const std::string &oid, const std::string &name,
const std::string &client, const std::string &cookie)
{
entity_name_t locker;
if (!locker.parse(client))
return -EINVAL;
return rados::cls::lock::break_lock(this, oid, name, cookie, locker);
}
int librados::IoCtx::list_lockers(const std::string &oid, const std::string &name,
int *exclusive,
std::string *tag,
std::list<librados::locker_t> *lockers)
{
std::list<librados::locker_t> tmp_lockers;
map<rados::cls::lock::locker_id_t, rados::cls::lock::locker_info_t> rados_lockers;
std::string tmp_tag;
ClsLockType tmp_type;
int r = rados::cls::lock::get_lock_info(this, oid, name, &rados_lockers, &tmp_type, &tmp_tag);
if (r < 0)
return r;
map<rados::cls::lock::locker_id_t, rados::cls::lock::locker_info_t>::iterator map_it;
for (map_it = rados_lockers.begin(); map_it != rados_lockers.end(); ++map_it) {
librados::locker_t locker;
locker.client = stringify(map_it->first.locker);
locker.cookie = map_it->first.cookie;
locker.address = stringify(map_it->second.addr);
tmp_lockers.push_back(locker);
}
if (lockers)
*lockers = tmp_lockers;
if (tag)
*tag = tmp_tag;
if (exclusive) {
if (tmp_type == ClsLockType::EXCLUSIVE)
*exclusive = 1;
else
*exclusive = 0;
}
return tmp_lockers.size();
}
librados::NObjectIterator librados::IoCtx::nobjects_begin(
const bufferlist &filter)
{
rados_list_ctx_t listh;
rados_nobjects_list_open(io_ctx_impl, &listh);
NObjectIterator iter((ObjListCtx*)listh);
if (filter.length() > 0) {
iter.set_filter(filter);
}
iter.get_next();
return iter;
}
librados::NObjectIterator librados::IoCtx::nobjects_begin(
uint32_t pos, const bufferlist &filter)
{
rados_list_ctx_t listh;
rados_nobjects_list_open(io_ctx_impl, &listh);
NObjectIterator iter((ObjListCtx*)listh);
if (filter.length() > 0) {
iter.set_filter(filter);
}
iter.seek(pos);
return iter;
}
librados::NObjectIterator librados::IoCtx::nobjects_begin(
const ObjectCursor& cursor, const bufferlist &filter)
{
rados_list_ctx_t listh;
rados_nobjects_list_open(io_ctx_impl, &listh);
NObjectIterator iter((ObjListCtx*)listh);
if (filter.length() > 0) {
iter.set_filter(filter);
}
iter.seek(cursor);
return iter;
}
const librados::NObjectIterator& librados::IoCtx::nobjects_end() const
{
return NObjectIterator::__EndObjectIterator;
}
int librados::IoCtx::hit_set_list(uint32_t hash, AioCompletion *c,
std::list< std::pair<time_t, time_t> > *pls)
{
return io_ctx_impl->hit_set_list(hash, c->pc, pls);
}
int librados::IoCtx::hit_set_get(uint32_t hash, AioCompletion *c, time_t stamp,
bufferlist *pbl)
{
return io_ctx_impl->hit_set_get(hash, c->pc, stamp, pbl);
}
uint64_t librados::IoCtx::get_last_version()
{
return io_ctx_impl->last_version();
}
int librados::IoCtx::aio_read(const std::string& oid, librados::AioCompletion *c,
bufferlist *pbl, size_t len, uint64_t off)
{
return io_ctx_impl->aio_read(oid, c->pc, pbl, len, off,
io_ctx_impl->snap_seq);
}
int librados::IoCtx::aio_read(const std::string& oid, librados::AioCompletion *c,
bufferlist *pbl, size_t len, uint64_t off,
uint64_t snapid)
{
return io_ctx_impl->aio_read(oid, c->pc, pbl, len, off, snapid);
}
int librados::IoCtx::aio_exec(const std::string& oid,
librados::AioCompletion *c, const char *cls,
const char *method, bufferlist& inbl,
bufferlist *outbl)
{
object_t obj(oid);
return io_ctx_impl->aio_exec(obj, c->pc, cls, method, inbl, outbl);
}
int librados::IoCtx::aio_cmpext(const std::string& oid,
librados::AioCompletion *c,
uint64_t off,
bufferlist& cmp_bl)
{
return io_ctx_impl->aio_cmpext(oid, c->pc, off, cmp_bl);
}
int librados::IoCtx::aio_sparse_read(const std::string& oid, librados::AioCompletion *c,
std::map<uint64_t,uint64_t> *m, bufferlist *data_bl,
size_t len, uint64_t off)
{
return io_ctx_impl->aio_sparse_read(oid, c->pc,
m, data_bl, len, off,
io_ctx_impl->snap_seq);
}
int librados::IoCtx::aio_sparse_read(const std::string& oid, librados::AioCompletion *c,
std::map<uint64_t,uint64_t> *m, bufferlist *data_bl,
size_t len, uint64_t off, uint64_t snapid)
{
return io_ctx_impl->aio_sparse_read(oid, c->pc,
m, data_bl, len, off, snapid);
}
int librados::IoCtx::aio_write(const std::string& oid, librados::AioCompletion *c,
const bufferlist& bl, size_t len, uint64_t off)
{
return io_ctx_impl->aio_write(oid, c->pc, bl, len, off);
}
int librados::IoCtx::aio_append(const std::string& oid, librados::AioCompletion *c,
const bufferlist& bl, size_t len)
{
return io_ctx_impl->aio_append(oid, c->pc, bl, len);
}
int librados::IoCtx::aio_write_full(const std::string& oid, librados::AioCompletion *c,
const bufferlist& bl)
{
object_t obj(oid);
return io_ctx_impl->aio_write_full(obj, c->pc, bl);
}
int librados::IoCtx::aio_writesame(const std::string& oid, librados::AioCompletion *c,
const bufferlist& bl, size_t write_len,
uint64_t off)
{
return io_ctx_impl->aio_writesame(oid, c->pc, bl, write_len, off);
}
int librados::IoCtx::aio_remove(const std::string& oid, librados::AioCompletion *c)
{
return io_ctx_impl->aio_remove(oid, c->pc);
}
int librados::IoCtx::aio_remove(const std::string& oid, librados::AioCompletion *c, int flags)
{
return io_ctx_impl->aio_remove(oid, c->pc, flags);
}
int librados::IoCtx::aio_flush_async(librados::AioCompletion *c)
{
io_ctx_impl->flush_aio_writes_async(c->pc);
return 0;
}
int librados::IoCtx::aio_flush()
{
io_ctx_impl->flush_aio_writes();
return 0;
}
struct AioGetxattrDataPP {
AioGetxattrDataPP(librados::AioCompletionImpl *c, bufferlist *_bl) :
bl(_bl), completion(c) {}
bufferlist *bl;
struct librados::CB_AioCompleteAndSafe completion;
};
static void rados_aio_getxattr_completepp(rados_completion_t c, void *arg) {
AioGetxattrDataPP *cdata = reinterpret_cast<AioGetxattrDataPP*>(arg);
int rc = rados_aio_get_return_value(c);
if (rc >= 0) {
rc = cdata->bl->length();
}
cdata->completion(rc);
delete cdata;
}
int librados::IoCtx::aio_getxattr(const std::string& oid, librados::AioCompletion *c,
const char *name, bufferlist& bl)
{
// create data object to be passed to async callback
AioGetxattrDataPP *cdata = new AioGetxattrDataPP(c->pc, &bl);
if (!cdata) {
return -ENOMEM;
}
// create completion callback
librados::AioCompletionImpl *comp = new librados::AioCompletionImpl;
comp->set_complete_callback(cdata, rados_aio_getxattr_completepp);
// call actual getxattr from IoCtxImpl
object_t obj(oid);
return io_ctx_impl->aio_getxattr(obj, comp, name, bl);
}
int librados::IoCtx::aio_getxattrs(const std::string& oid, AioCompletion *c,
map<std::string, bufferlist>& attrset)
{
object_t obj(oid);
return io_ctx_impl->aio_getxattrs(obj, c->pc, attrset);
}
int librados::IoCtx::aio_setxattr(const std::string& oid, AioCompletion *c,
const char *name, bufferlist& bl)
{
object_t obj(oid);
return io_ctx_impl->aio_setxattr(obj, c->pc, name, bl);
}
int librados::IoCtx::aio_rmxattr(const std::string& oid, AioCompletion *c,
const char *name)
{
object_t obj(oid);
return io_ctx_impl->aio_rmxattr(obj, c->pc, name);
}
int librados::IoCtx::aio_stat(const std::string& oid, librados::AioCompletion *c,
uint64_t *psize, time_t *pmtime)
{
object_t obj(oid);
return io_ctx_impl->aio_stat(obj, c->pc, psize, pmtime);
}
int librados::IoCtx::aio_cancel(librados::AioCompletion *c)
{
return io_ctx_impl->aio_cancel(c->pc);
}
int librados::IoCtx::watch(const string& oid, uint64_t ver, uint64_t *cookie,
librados::WatchCtx *ctx)
{
object_t obj(oid);
return io_ctx_impl->watch(obj, cookie, ctx, NULL);
}
int librados::IoCtx::watch2(const string& oid, uint64_t *cookie,
librados::WatchCtx2 *ctx2)
{
object_t obj(oid);
return io_ctx_impl->watch(obj, cookie, NULL, ctx2);
}
int librados::IoCtx::watch3(const string& oid, uint64_t *cookie,
librados::WatchCtx2 *ctx2, uint32_t timeout)
{
object_t obj(oid);
return io_ctx_impl->watch(obj, cookie, NULL, ctx2, timeout);
}
int librados::IoCtx::aio_watch(const string& oid, AioCompletion *c,
uint64_t *cookie,
librados::WatchCtx2 *ctx2)
{
object_t obj(oid);
return io_ctx_impl->aio_watch(obj, c->pc, cookie, NULL, ctx2);
}
int librados::IoCtx::aio_watch2(const string& oid, AioCompletion *c,
uint64_t *cookie,
librados::WatchCtx2 *ctx2,
uint32_t timeout)
{
object_t obj(oid);
return io_ctx_impl->aio_watch(obj, c->pc, cookie, NULL, ctx2, timeout);
}
int librados::IoCtx::unwatch(const string& oid, uint64_t handle)
{
return io_ctx_impl->unwatch(handle);
}
int librados::IoCtx::unwatch2(uint64_t handle)
{
return io_ctx_impl->unwatch(handle);
}
int librados::IoCtx::aio_unwatch(uint64_t handle, AioCompletion *c)
{
return io_ctx_impl->aio_unwatch(handle, c->pc);
}
int librados::IoCtx::watch_check(uint64_t handle)
{
return io_ctx_impl->watch_check(handle);
}
int librados::IoCtx::notify(const string& oid, uint64_t ver, bufferlist& bl)
{
object_t obj(oid);
return io_ctx_impl->notify(obj, bl, 0, NULL, NULL, NULL);
}
int librados::IoCtx::notify2(const string& oid, bufferlist& bl,
uint64_t timeout_ms, bufferlist *preplybl)
{
object_t obj(oid);
return io_ctx_impl->notify(obj, bl, timeout_ms, preplybl, NULL, NULL);
}
int librados::IoCtx::aio_notify(const string& oid, AioCompletion *c,
bufferlist& bl, uint64_t timeout_ms,
bufferlist *preplybl)
{
object_t obj(oid);
return io_ctx_impl->aio_notify(obj, c->pc, bl, timeout_ms, preplybl, NULL,
NULL);
}
void librados::IoCtx::decode_notify_response(bufferlist &bl,
std::vector<librados::notify_ack_t> *acks,
std::vector<librados::notify_timeout_t> *timeouts)
{
map<pair<uint64_t,uint64_t>,bufferlist> acked;
set<pair<uint64_t,uint64_t>> missed;
auto iter = bl.cbegin();
decode(acked, iter);
decode(missed, iter);
for (auto &[who, payload] : acked) {
acks->emplace_back(librados::notify_ack_t{who.first, who.second, payload});
}
for (auto &[notifier_id, cookie] : missed) {
timeouts->emplace_back(librados::notify_timeout_t{notifier_id, cookie});
}
}
void librados::IoCtx::notify_ack(const std::string& o,
uint64_t notify_id, uint64_t handle,
bufferlist& bl)
{
io_ctx_impl->notify_ack(o, notify_id, handle, bl);
}
int librados::IoCtx::list_watchers(const std::string& oid,
std::list<obj_watch_t> *out_watchers)
{
ObjectReadOperation op;
int r;
op.list_watchers(out_watchers, &r);
bufferlist bl;
int ret = operate(oid, &op, &bl);
if (ret < 0)
return ret;
return r;
}
int librados::IoCtx::list_snaps(const std::string& oid,
snap_set_t *out_snaps)
{
ObjectReadOperation op;
int r;
if (io_ctx_impl->snap_seq != CEPH_SNAPDIR)
return -EINVAL;
op.list_snaps(out_snaps, &r);
bufferlist bl;
int ret = operate(oid, &op, &bl);
if (ret < 0)
return ret;
return r;
}
void librados::IoCtx::set_notify_timeout(uint32_t timeout)
{
io_ctx_impl->set_notify_timeout(timeout);
}
int librados::IoCtx::set_alloc_hint(const std::string& o,
uint64_t expected_object_size,
uint64_t expected_write_size)
{
object_t oid(o);
return io_ctx_impl->set_alloc_hint(oid, expected_object_size,
expected_write_size, 0);
}
int librados::IoCtx::set_alloc_hint2(const std::string& o,
uint64_t expected_object_size,
uint64_t expected_write_size,
uint32_t flags)
{
object_t oid(o);
return io_ctx_impl->set_alloc_hint(oid, expected_object_size,
expected_write_size, flags);
}
void librados::IoCtx::set_assert_version(uint64_t ver)
{
io_ctx_impl->set_assert_version(ver);
}
void librados::IoCtx::locator_set_key(const string& key)
{
io_ctx_impl->oloc.key = key;
}
void librados::IoCtx::set_namespace(const string& nspace)
{
io_ctx_impl->oloc.nspace = nspace;
}
std::string librados::IoCtx::get_namespace() const
{
return io_ctx_impl->oloc.nspace;
}
int64_t librados::IoCtx::get_id()
{
return io_ctx_impl->get_id();
}
uint32_t librados::IoCtx::get_object_hash_position(const std::string& oid)
{
uint32_t hash;
int r = io_ctx_impl->get_object_hash_position(oid, &hash);
if (r < 0)
hash = 0;
return hash;
}
uint32_t librados::IoCtx::get_object_pg_hash_position(const std::string& oid)
{
uint32_t hash;
int r = io_ctx_impl->get_object_pg_hash_position(oid, &hash);
if (r < 0)
hash = 0;
return hash;
}
int librados::IoCtx::get_object_hash_position2(
const std::string& oid, uint32_t *hash_position)
{
return io_ctx_impl->get_object_hash_position(oid, hash_position);
}
int librados::IoCtx::get_object_pg_hash_position2(
const std::string& oid, uint32_t *pg_hash_position)
{
return io_ctx_impl->get_object_pg_hash_position(oid, pg_hash_position);
}
librados::config_t librados::IoCtx::cct()
{
return (config_t)io_ctx_impl->client->cct;
}
librados::IoCtx::IoCtx(IoCtxImpl *io_ctx_impl_)
: io_ctx_impl(io_ctx_impl_)
{
}
void librados::IoCtx::set_osdmap_full_try()
{
io_ctx_impl->extra_op_flags |= CEPH_OSD_FLAG_FULL_TRY;
}
void librados::IoCtx::unset_osdmap_full_try()
{
io_ctx_impl->extra_op_flags &= ~CEPH_OSD_FLAG_FULL_TRY;
}
bool librados::IoCtx::get_pool_full_try()
{
return (io_ctx_impl->extra_op_flags & CEPH_OSD_FLAG_FULL_TRY) != 0;
}
void librados::IoCtx::set_pool_full_try()
{
io_ctx_impl->extra_op_flags |= CEPH_OSD_FLAG_FULL_TRY;
}
void librados::IoCtx::unset_pool_full_try()
{
io_ctx_impl->extra_op_flags &= ~CEPH_OSD_FLAG_FULL_TRY;
}
///////////////////////////// Rados //////////////////////////////
void librados::Rados::version(int *major, int *minor, int *extra)
{
rados_version(major, minor, extra);
}
librados::Rados::Rados() : client(NULL)
{
}
librados::Rados::Rados(IoCtx &ioctx)
{
client = ioctx.io_ctx_impl->client;
ceph_assert(client != NULL);
client->get();
}
librados::Rados::~Rados()
{
shutdown();
}
void librados::Rados::from_rados_t(rados_t cluster, Rados &rados) {
if (rados.client) {
rados.client->put();
}
rados.client = static_cast<RadosClient*>(cluster);
if (rados.client) {
rados.client->get();
}
}
int librados::Rados::init(const char * const id)
{
return rados_create((rados_t *)&client, id);
}
int librados::Rados::init2(const char * const name,
const char * const clustername, uint64_t flags)
{
return rados_create2((rados_t *)&client, clustername, name, flags);
}
int librados::Rados::init_with_context(config_t cct_)
{
return rados_create_with_context((rados_t *)&client, (rados_config_t)cct_);
}
int librados::Rados::connect()
{
return client->connect();
}
librados::config_t librados::Rados::cct()
{
return (config_t)client->cct;
}
int librados::Rados::watch_flush()
{
if (!client)
return -EINVAL;
return client->watch_flush();
}
int librados::Rados::aio_watch_flush(AioCompletion *c)
{
if (!client)
return -EINVAL;
return client->async_watch_flush(c->pc);
}
void librados::Rados::shutdown()
{
if (!client)
return;
if (client->put()) {
client->shutdown();
delete client;
client = NULL;
}
}
uint64_t librados::Rados::get_instance_id()
{
return client->get_instance_id();
}
int librados::Rados::get_min_compatible_osd(int8_t* require_osd_release)
{
return client->get_min_compatible_osd(require_osd_release);
}
int librados::Rados::get_min_compatible_client(int8_t* min_compat_client,
int8_t* require_min_compat_client)
{
return client->get_min_compatible_client(min_compat_client,
require_min_compat_client);
}
int librados::Rados::conf_read_file(const char * const path) const
{
return rados_conf_read_file((rados_t)client, path);
}
int librados::Rados::conf_parse_argv(int argc, const char ** argv) const
{
return rados_conf_parse_argv((rados_t)client, argc, argv);
}
int librados::Rados::conf_parse_argv_remainder(int argc, const char ** argv,
const char ** remargv) const
{
return rados_conf_parse_argv_remainder((rados_t)client, argc, argv, remargv);
}
int librados::Rados::conf_parse_env(const char *name) const
{
return rados_conf_parse_env((rados_t)client, name);
}
int librados::Rados::conf_set(const char *option, const char *value)
{
return rados_conf_set((rados_t)client, option, value);
}
int librados::Rados::conf_get(const char *option, std::string &val)
{
char *str = NULL;
const auto& conf = client->cct->_conf;
int ret = conf.get_val(option, &str, -1);
if (ret) {
free(str);
return ret;
}
val = str;
free(str);
return 0;
}
int librados::Rados::service_daemon_register(
const std::string& service, ///< service name (e.g., 'rgw')
const std::string& name, ///< daemon name (e.g., 'gwfoo')
const std::map<std::string,std::string>& metadata) ///< static metadata about daemon
{
return client->service_daemon_register(service, name, metadata);
}
int librados::Rados::service_daemon_update_status(
std::map<std::string,std::string>&& status)
{
return client->service_daemon_update_status(std::move(status));
}
int librados::Rados::pool_create(const char *name)
{
string str(name);
return client->pool_create(str);
}
int librados::Rados::pool_create(const char *name, uint64_t auid)
{
if (auid != CEPH_AUTH_UID_DEFAULT) {
return -EINVAL;
}
string str(name);
return client->pool_create(str);
}
int librados::Rados::pool_create(const char *name, uint64_t auid, __u8 crush_rule)
{
if (auid != CEPH_AUTH_UID_DEFAULT) {
return -EINVAL;
}
string str(name);
return client->pool_create(str, crush_rule);
}
int librados::Rados::pool_create_with_rule(const char *name, __u8 crush_rule)
{
string str(name);
return client->pool_create(str, crush_rule);
}
int librados::Rados::pool_create_async(const char *name, PoolAsyncCompletion *c)
{
string str(name);
return client->pool_create_async(str, c->pc);
}
int librados::Rados::pool_create_async(const char *name, uint64_t auid, PoolAsyncCompletion *c)
{
if (auid != CEPH_AUTH_UID_DEFAULT) {
return -EINVAL;
}
string str(name);
return client->pool_create_async(str, c->pc);
}
int librados::Rados::pool_create_async(const char *name, uint64_t auid, __u8 crush_rule,
PoolAsyncCompletion *c)
{
if (auid != CEPH_AUTH_UID_DEFAULT) {
return -EINVAL;
}
string str(name);
return client->pool_create_async(str, c->pc, crush_rule);
}
int librados::Rados::pool_create_with_rule_async(
const char *name, __u8 crush_rule,
PoolAsyncCompletion *c)
{
string str(name);
return client->pool_create_async(str, c->pc, crush_rule);
}
int librados::Rados::pool_get_base_tier(int64_t pool_id, int64_t* base_tier)
{
tracepoint(librados, rados_pool_get_base_tier_enter, (rados_t)client, pool_id);
int retval = client->pool_get_base_tier(pool_id, base_tier);
tracepoint(librados, rados_pool_get_base_tier_exit, retval, *base_tier);
return retval;
}
int librados::Rados::pool_delete(const char *name)
{
return client->pool_delete(name);
}
int librados::Rados::pool_delete_async(const char *name, PoolAsyncCompletion *c)
{
return client->pool_delete_async(name, c->pc);
}
int librados::Rados::pool_list(std::list<std::string>& v)
{
std::list<std::pair<int64_t, std::string> > pools;
int r = client->pool_list(pools);
if (r < 0) {
return r;
}
v.clear();
for (std::list<std::pair<int64_t, std::string> >::iterator it = pools.begin();
it != pools.end(); ++it) {
v.push_back(it->second);
}
return 0;
}
int librados::Rados::pool_list2(std::list<std::pair<int64_t, std::string> >& v)
{
return client->pool_list(v);
}
int64_t librados::Rados::pool_lookup(const char *name)
{
return client->lookup_pool(name);
}
int librados::Rados::pool_reverse_lookup(int64_t id, std::string *name)
{
return client->pool_get_name(id, name, true);
}
int librados::Rados::mon_command(string cmd, const bufferlist& inbl,
bufferlist *outbl, string *outs)
{
vector<string> cmdvec;
cmdvec.push_back(cmd);
return client->mon_command(cmdvec, inbl, outbl, outs);
}
int librados::Rados::osd_command(int osdid, std::string cmd, const bufferlist& inbl,
bufferlist *outbl, std::string *outs)
{
vector<string> cmdvec;
cmdvec.push_back(cmd);
return client->osd_command(osdid, cmdvec, inbl, outbl, outs);
}
int librados::Rados::mgr_command(std::string cmd, const bufferlist& inbl,
bufferlist *outbl, std::string *outs)
{
vector<string> cmdvec;
cmdvec.push_back(cmd);
return client->mgr_command(cmdvec, inbl, outbl, outs);
}
int librados::Rados::pg_command(const char *pgstr, std::string cmd, const bufferlist& inbl,
bufferlist *outbl, std::string *outs)
{
vector<string> cmdvec;
cmdvec.push_back(cmd);
pg_t pgid;
if (!pgid.parse(pgstr))
return -EINVAL;
return client->pg_command(pgid, cmdvec, inbl, outbl, outs);
}
int librados::Rados::ioctx_create(const char *name, IoCtx &io)
{
rados_ioctx_t p;
int ret = rados_ioctx_create((rados_t)client, name, &p);
if (ret)
return ret;
io.close();
io.io_ctx_impl = (IoCtxImpl*)p;
return 0;
}
int librados::Rados::ioctx_create2(int64_t pool_id, IoCtx &io)
{
rados_ioctx_t p;
int ret = rados_ioctx_create2((rados_t)client, pool_id, &p);
if (ret)
return ret;
io.close();
io.io_ctx_impl = (IoCtxImpl*)p;
return 0;
}
void librados::Rados::test_blocklist_self(bool set)
{
client->blocklist_self(set);
}
int librados::Rados::get_pool_stats(std::list<string>& v,
stats_map& result)
{
map<string,::pool_stat_t> rawresult;
bool per_pool = false;
int r = client->get_pool_stats(v, &rawresult, &per_pool);
for (map<string,::pool_stat_t>::iterator p = rawresult.begin();
p != rawresult.end();
++p) {
pool_stat_t& pv = result[p->first];
auto& pstat = p->second;
store_statfs_t &statfs = pstat.store_stats;
uint64_t allocated_bytes = pstat.get_allocated_data_bytes(per_pool) +
pstat.get_allocated_omap_bytes(per_pool);
// FIXME: raw_used_rate is unknown hence use 1.0 here
// meaning we keep net amount aggregated over all replicas
// Not a big deal so far since this field isn't exposed
uint64_t user_bytes = pstat.get_user_data_bytes(1.0, per_pool) +
pstat.get_user_omap_bytes(1.0, per_pool);
object_stat_sum_t *sum = &p->second.stats.sum;
pv.num_kb = shift_round_up(allocated_bytes, 10);
pv.num_bytes = allocated_bytes;
pv.num_objects = sum->num_objects;
pv.num_object_clones = sum->num_object_clones;
pv.num_object_copies = sum->num_object_copies;
pv.num_objects_missing_on_primary = sum->num_objects_missing_on_primary;
pv.num_objects_unfound = sum->num_objects_unfound;
pv.num_objects_degraded = sum->num_objects_degraded;
pv.num_rd = sum->num_rd;
pv.num_rd_kb = sum->num_rd_kb;
pv.num_wr = sum->num_wr;
pv.num_wr_kb = sum->num_wr_kb;
pv.num_user_bytes = user_bytes;
pv.compressed_bytes_orig = statfs.data_compressed_original;
pv.compressed_bytes = statfs.data_compressed;
pv.compressed_bytes_alloc = statfs.data_compressed_allocated;
}
return r;
}
int librados::Rados::get_pool_stats(std::list<string>& v,
std::map<string, stats_map>& result)
{
stats_map m;
int r = get_pool_stats(v, m);
if (r < 0)
return r;
for (map<string,pool_stat_t>::iterator p = m.begin();
p != m.end();
++p) {
result[p->first][string()] = p->second;
}
return r;
}
int librados::Rados::get_pool_stats(std::list<string>& v,
string& category, // unused
std::map<string, stats_map>& result)
{
return -EOPNOTSUPP;
}
bool librados::Rados::get_pool_is_selfmanaged_snaps_mode(const std::string& pool)
{
return client->get_pool_is_selfmanaged_snaps_mode(pool);
}
int librados::Rados::cluster_stat(cluster_stat_t& result)
{
ceph_statfs stats;
int r = client->get_fs_stats(stats);
result.kb = stats.kb;
result.kb_used = stats.kb_used;
result.kb_avail = stats.kb_avail;
result.num_objects = stats.num_objects;
return r;
}
int librados::Rados::cluster_fsid(string *fsid)
{
return client->get_fsid(fsid);
}
namespace librados {
struct PlacementGroupImpl {
pg_t pgid;
};
PlacementGroup::PlacementGroup()
: impl{new PlacementGroupImpl}
{}
PlacementGroup::PlacementGroup(const PlacementGroup& pg)
: impl{new PlacementGroupImpl}
{
impl->pgid = pg.impl->pgid;
}
PlacementGroup::~PlacementGroup()
{}
bool PlacementGroup::parse(const char* s)
{
return impl->pgid.parse(s);
}
}
std::ostream& librados::operator<<(std::ostream& out,
const librados::PlacementGroup& pg)
{
return out << pg.impl->pgid;
}
int librados::Rados::get_inconsistent_pgs(int64_t pool_id,
std::vector<PlacementGroup>* pgs)
{
std::vector<string> pgids;
if (auto ret = client->get_inconsistent_pgs(pool_id, &pgids); ret) {
return ret;
}
for (const auto& pgid : pgids) {
librados::PlacementGroup pg;
if (!pg.parse(pgid.c_str())) {
return -EINVAL;
}
pgs->emplace_back(pg);
}
return 0;
}
int librados::Rados::get_inconsistent_objects(const PlacementGroup& pg,
const object_id_t &start_after,
unsigned max_return,
AioCompletion *c,
std::vector<inconsistent_obj_t>* objects,
uint32_t* interval)
{
IoCtx ioctx;
const pg_t pgid = pg.impl->pgid;
int r = ioctx_create2(pgid.pool(), ioctx);
if (r < 0) {
return r;
}
return ioctx.io_ctx_impl->get_inconsistent_objects(pgid,
start_after,
max_return,
c->pc,
objects,
interval);
}
int librados::Rados::get_inconsistent_snapsets(const PlacementGroup& pg,
const object_id_t &start_after,
unsigned max_return,
AioCompletion *c,
std::vector<inconsistent_snapset_t>* snapsets,
uint32_t* interval)
{
IoCtx ioctx;
const pg_t pgid = pg.impl->pgid;
int r = ioctx_create2(pgid.pool(), ioctx);
if (r < 0) {
return r;
}
return ioctx.io_ctx_impl->get_inconsistent_snapsets(pgid,
start_after,
max_return,
c->pc,
snapsets,
interval);
}
int librados::Rados::wait_for_latest_osdmap()
{
return client->wait_for_latest_osdmap();
}
int librados::Rados::blocklist_add(const std::string& client_address,
uint32_t expire_seconds)
{
return client->blocklist_add(client_address, expire_seconds);
}
std::string librados::Rados::get_addrs() const {
return client->get_addrs();
}
librados::PoolAsyncCompletion *librados::Rados::pool_async_create_completion()
{
PoolAsyncCompletionImpl *c = new PoolAsyncCompletionImpl;
return new PoolAsyncCompletion(c);
}
librados::AioCompletion *librados::Rados::aio_create_completion()
{
AioCompletionImpl *c = new AioCompletionImpl;
return new AioCompletion(c);
}
librados::AioCompletion *librados::Rados::aio_create_completion(void *cb_arg,
callback_t cb_complete,
callback_t cb_safe)
{
AioCompletionImpl *c;
int r = rados_aio_create_completion(cb_arg, cb_complete, cb_safe, (void**)&c);
ceph_assert(r == 0);
return new AioCompletion(c);
}
librados::AioCompletion *librados::Rados::aio_create_completion(void *cb_arg,
callback_t cb_complete)
{
AioCompletionImpl *c;
int r = rados_aio_create_completion2(cb_arg, cb_complete, (void**)&c);
ceph_assert(r == 0);
return new AioCompletion(c);
}
librados::ObjectOperation::ObjectOperation() : impl(new ObjectOperationImpl) {}
librados::ObjectOperation::ObjectOperation(ObjectOperation&& rhs)
: impl(rhs.impl) {
rhs.impl = nullptr;
}
librados::ObjectOperation&
librados::ObjectOperation::operator =(ObjectOperation&& rhs) {
delete impl;
impl = rhs.impl;
rhs.impl = nullptr;
return *this;
}
librados::ObjectOperation::~ObjectOperation() {
delete impl;
}
///////////////////////////// ListObject //////////////////////////////
librados::ListObject::ListObject() : impl(NULL)
{
}
librados::ListObject::ListObject(librados::ListObjectImpl *i): impl(i)
{
}
librados::ListObject::ListObject(const ListObject& rhs)
{
if (rhs.impl == NULL) {
impl = NULL;
return;
}
impl = new ListObjectImpl();
*impl = *(rhs.impl);
}
librados::ListObject& librados::ListObject::operator=(const ListObject& rhs)
{
if (rhs.impl == NULL) {
delete impl;
impl = NULL;
return *this;
}
if (impl == NULL)
impl = new ListObjectImpl();
*impl = *(rhs.impl);
return *this;
}
librados::ListObject::~ListObject()
{
if (impl)
delete impl;
impl = NULL;
}
const std::string& librados::ListObject::get_nspace() const
{
return impl->get_nspace();
}
const std::string& librados::ListObject::get_oid() const
{
return impl->get_oid();
}
const std::string& librados::ListObject::get_locator() const
{
return impl->get_locator();
}
std::ostream& librados::operator<<(std::ostream& out, const librados::ListObject& lop)
{
out << *(lop.impl);
return out;
}
librados::ObjectCursor::ObjectCursor()
{
c_cursor = (rados_object_list_cursor)new hobject_t();
}
librados::ObjectCursor::~ObjectCursor()
{
hobject_t *h = (hobject_t *)c_cursor;
delete h;
}
librados::ObjectCursor::ObjectCursor(rados_object_list_cursor c)
{
if (!c) {
c_cursor = nullptr;
} else {
c_cursor = (rados_object_list_cursor)new hobject_t(*(hobject_t *)c);
}
}
librados::ObjectCursor& librados::ObjectCursor::operator=(const librados::ObjectCursor& rhs)
{
if (rhs.c_cursor != nullptr) {
hobject_t *h = (hobject_t*)rhs.c_cursor;
c_cursor = (rados_object_list_cursor)(new hobject_t(*h));
} else {
c_cursor = nullptr;
}
return *this;
}
bool librados::ObjectCursor::operator<(const librados::ObjectCursor &rhs) const
{
const hobject_t lhs_hobj = (c_cursor == nullptr) ? hobject_t() : *((hobject_t*)c_cursor);
const hobject_t rhs_hobj = (rhs.c_cursor == nullptr) ? hobject_t() : *((hobject_t*)(rhs.c_cursor));
return lhs_hobj < rhs_hobj;
}
bool librados::ObjectCursor::operator==(const librados::ObjectCursor &rhs) const
{
const hobject_t lhs_hobj = (c_cursor == nullptr) ? hobject_t() : *((hobject_t*)c_cursor);
const hobject_t rhs_hobj = (rhs.c_cursor == nullptr) ? hobject_t() : *((hobject_t*)(rhs.c_cursor));
return cmp(lhs_hobj, rhs_hobj) == 0;
}
librados::ObjectCursor::ObjectCursor(const librados::ObjectCursor &rhs)
{
*this = rhs;
}
librados::ObjectCursor librados::IoCtx::object_list_begin()
{
hobject_t *h = new hobject_t(io_ctx_impl->objecter->enumerate_objects_begin());
ObjectCursor oc;
oc.set((rados_object_list_cursor)h);
return oc;
}
librados::ObjectCursor librados::IoCtx::object_list_end()
{
hobject_t *h = new hobject_t(io_ctx_impl->objecter->enumerate_objects_end());
librados::ObjectCursor oc;
oc.set((rados_object_list_cursor)h);
return oc;
}
void librados::ObjectCursor::set(rados_object_list_cursor c)
{
delete (hobject_t*)c_cursor;
c_cursor = c;
}
string librados::ObjectCursor::to_str() const
{
stringstream ss;
ss << *(hobject_t *)c_cursor;
return ss.str();
}
bool librados::ObjectCursor::from_str(const string& s)
{
if (s.empty()) {
*(hobject_t *)c_cursor = hobject_t();
return true;
}
return ((hobject_t *)c_cursor)->parse(s);
}
CEPH_RADOS_API std::ostream& librados::operator<<(std::ostream& os, const librados::ObjectCursor& oc)
{
if (oc.c_cursor) {
os << *(hobject_t *)oc.c_cursor;
} else {
os << hobject_t();
}
return os;
}
bool librados::IoCtx::object_list_is_end(const ObjectCursor &oc)
{
hobject_t *h = (hobject_t *)oc.c_cursor;
return h->is_max();
}
int librados::IoCtx::object_list(const ObjectCursor &start,
const ObjectCursor &finish,
const size_t result_item_count,
const bufferlist &filter,
std::vector<ObjectItem> *result,
ObjectCursor *next)
{
ceph_assert(result != nullptr);
ceph_assert(next != nullptr);
result->clear();
ceph::async::waiter<boost::system::error_code,
std::vector<librados::ListObjectImpl>,
hobject_t> w;
io_ctx_impl->objecter->enumerate_objects<librados::ListObjectImpl>(
io_ctx_impl->poolid,
io_ctx_impl->oloc.nspace,
*((hobject_t*)start.c_cursor),
*((hobject_t*)finish.c_cursor),
result_item_count,
filter,
w);
auto [ec, obj_result, next_hash] = w.wait();
if (ec) {
next->set((rados_object_list_cursor)(new hobject_t(hobject_t::get_max())));
return ceph::from_error_code(ec);
}
next->set((rados_object_list_cursor)(new hobject_t(next_hash)));
for (auto i = obj_result.begin();
i != obj_result.end(); ++i) {
ObjectItem oi;
oi.oid = i->oid;
oi.nspace = i->nspace;
oi.locator = i->locator;
result->push_back(oi);
}
return obj_result.size();
}
void librados::IoCtx::object_list_slice(
const ObjectCursor start,
const ObjectCursor finish,
const size_t n,
const size_t m,
ObjectCursor *split_start,
ObjectCursor *split_finish)
{
ceph_assert(split_start != nullptr);
ceph_assert(split_finish != nullptr);
io_ctx_impl->object_list_slice(
*((hobject_t*)(start.c_cursor)),
*((hobject_t*)(finish.c_cursor)),
n,
m,
(hobject_t*)(split_start->c_cursor),
(hobject_t*)(split_finish->c_cursor));
}
int librados::IoCtx::application_enable(const std::string& app_name,
bool force)
{
return io_ctx_impl->application_enable(app_name, force);
}
int librados::IoCtx::application_enable_async(const std::string& app_name,
bool force,
PoolAsyncCompletion *c)
{
io_ctx_impl->application_enable_async(app_name, force, c->pc);
return 0;
}
int librados::IoCtx::application_list(std::set<std::string> *app_names)
{
return io_ctx_impl->application_list(app_names);
}
int librados::IoCtx::application_metadata_get(const std::string& app_name,
const std::string &key,
std::string* value)
{
return io_ctx_impl->application_metadata_get(app_name, key, value);
}
int librados::IoCtx::application_metadata_set(const std::string& app_name,
const std::string &key,
const std::string& value)
{
return io_ctx_impl->application_metadata_set(app_name, key, value);
}
int librados::IoCtx::application_metadata_remove(const std::string& app_name,
const std::string &key)
{
return io_ctx_impl->application_metadata_remove(app_name, key);
}
int librados::IoCtx::application_metadata_list(const std::string& app_name,
std::map<std::string, std::string> *values)
{
return io_ctx_impl->application_metadata_list(app_name, values);
}
| 82,069 | 24.881425 | 140 |
cc
|
null |
ceph-main/src/librados/librados_tp.cc
|
#include "acconfig.h"
#ifdef WITH_LTTNG
#define TRACEPOINT_DEFINE
#define TRACEPOINT_PROBE_DYNAMIC_LINKAGE
#include "tracing/librados.h"
#undef TRACEPOINT_PROBE_DYNAMIC_LINKAGE
#undef TRACEPOINT_DEFINE
#endif
| 210 | 20.1 | 40 |
cc
|
null |
ceph-main/src/librados/librados_util.cc
|
#include "librados_util.h"
uint8_t get_checksum_op_type(rados_checksum_type_t type) {
switch (type) {
case LIBRADOS_CHECKSUM_TYPE_XXHASH32:
return CEPH_OSD_CHECKSUM_OP_TYPE_XXHASH32;
case LIBRADOS_CHECKSUM_TYPE_XXHASH64:
return CEPH_OSD_CHECKSUM_OP_TYPE_XXHASH64;
case LIBRADOS_CHECKSUM_TYPE_CRC32C:
return CEPH_OSD_CHECKSUM_OP_TYPE_CRC32C;
default:
return -1;
}
}
int get_op_flags(int flags)
{
int rados_flags = 0;
if (flags & LIBRADOS_OP_FLAG_EXCL)
rados_flags |= CEPH_OSD_OP_FLAG_EXCL;
if (flags & LIBRADOS_OP_FLAG_FAILOK)
rados_flags |= CEPH_OSD_OP_FLAG_FAILOK;
if (flags & LIBRADOS_OP_FLAG_FADVISE_RANDOM)
rados_flags |= CEPH_OSD_OP_FLAG_FADVISE_RANDOM;
if (flags & LIBRADOS_OP_FLAG_FADVISE_SEQUENTIAL)
rados_flags |= CEPH_OSD_OP_FLAG_FADVISE_SEQUENTIAL;
if (flags & LIBRADOS_OP_FLAG_FADVISE_WILLNEED)
rados_flags |= CEPH_OSD_OP_FLAG_FADVISE_WILLNEED;
if (flags & LIBRADOS_OP_FLAG_FADVISE_DONTNEED)
rados_flags |= CEPH_OSD_OP_FLAG_FADVISE_DONTNEED;
if (flags & LIBRADOS_OP_FLAG_FADVISE_NOCACHE)
rados_flags |= CEPH_OSD_OP_FLAG_FADVISE_NOCACHE;
return rados_flags;
}
int translate_flags(int flags)
{
int op_flags = 0;
if (flags & librados::OPERATION_BALANCE_READS)
op_flags |= CEPH_OSD_FLAG_BALANCE_READS;
if (flags & librados::OPERATION_LOCALIZE_READS)
op_flags |= CEPH_OSD_FLAG_LOCALIZE_READS;
if (flags & librados::OPERATION_ORDER_READS_WRITES)
op_flags |= CEPH_OSD_FLAG_RWORDERED;
if (flags & librados::OPERATION_IGNORE_CACHE)
op_flags |= CEPH_OSD_FLAG_IGNORE_CACHE;
if (flags & librados::OPERATION_SKIPRWLOCKS)
op_flags |= CEPH_OSD_FLAG_SKIPRWLOCKS;
if (flags & librados::OPERATION_IGNORE_OVERLAY)
op_flags |= CEPH_OSD_FLAG_IGNORE_OVERLAY;
if (flags & librados::OPERATION_FULL_TRY)
op_flags |= CEPH_OSD_FLAG_FULL_TRY;
if (flags & librados::OPERATION_FULL_FORCE)
op_flags |= CEPH_OSD_FLAG_FULL_FORCE;
if (flags & librados::OPERATION_IGNORE_REDIRECT)
op_flags |= CEPH_OSD_FLAG_IGNORE_REDIRECT;
if (flags & librados::OPERATION_ORDERSNAP)
op_flags |= CEPH_OSD_FLAG_ORDERSNAP;
if (flags & librados::OPERATION_RETURNVEC)
op_flags |= CEPH_OSD_FLAG_RETURNVEC;
return op_flags;
}
| 2,228 | 33.828125 | 58 |
cc
|
null |
ceph-main/src/librados/librados_util.h
|
#include <cstdint>
#include "acconfig.h"
#include "include/rados/librados.h"
#include "IoCtxImpl.h"
#ifdef WITH_LTTNG
#include "tracing/librados.h"
#else
#define tracepoint(...)
#endif
uint8_t get_checksum_op_type(rados_checksum_type_t type);
int get_op_flags(int flags);
int translate_flags(int flags);
struct librados::ObjListCtx {
librados::IoCtxImpl dupctx;
librados::IoCtxImpl *ctx;
Objecter::NListContext *nlc;
bool legacy_list_api;
ObjListCtx(IoCtxImpl *c, Objecter::NListContext *nl, bool legacy=false)
: nlc(nl),
legacy_list_api(legacy) {
// Get our own private IoCtxImpl so that namespace setting isn't
// changed by caller between uses.
ctx = &dupctx;
dupctx.dup(*c);
}
~ObjListCtx() {
ctx = NULL;
delete nlc;
}
};
| 780 | 21.314286 | 73 |
h
|
null |
ceph-main/src/librados/snap_set_diff.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include <vector>
#include "snap_set_diff.h"
#include "common/ceph_context.h"
#include "include/rados/librados.hpp"
#include "include/interval_set.h"
#include "common/debug.h"
#define dout_subsys ceph_subsys_rados
using namespace std;
/**
* calculate intervals/extents that vary between two snapshots
*/
void calc_snap_set_diff(CephContext *cct, const librados::snap_set_t& snap_set,
librados::snap_t start, librados::snap_t end,
interval_set<uint64_t> *diff, uint64_t *end_size,
bool *end_exists, librados::snap_t *clone_end_snap_id,
bool *whole_object)
{
ldout(cct, 10) << "calc_snap_set_diff start " << start << " end " << end
<< ", snap_set seq " << snap_set.seq << dendl;
bool saw_start = false;
uint64_t start_size = 0;
diff->clear();
*end_size = 0;
*end_exists = false;
*clone_end_snap_id = 0;
*whole_object = false;
for (vector<librados::clone_info_t>::const_iterator r = snap_set.clones.begin();
r != snap_set.clones.end();
) {
// make an interval, and hide the fact that the HEAD doesn't
// include itself in the snaps list
librados::snap_t a, b;
if (r->cloneid == librados::SNAP_HEAD) {
// head is valid starting from right after the last seen seq
a = snap_set.seq + 1;
b = librados::SNAP_HEAD;
} else if (r->snaps.empty()) {
ldout(cct, 1) << "clone " << r->cloneid
<< ": empty snaps, return whole object" << dendl;
diff->clear();
*whole_object = true;
return;
} else {
a = r->snaps[0];
// note: b might be < r->cloneid if a snap has been trimmed.
b = r->snaps[r->snaps.size()-1];
}
ldout(cct, 20) << " clone " << r->cloneid << " snaps " << r->snaps
<< " -> [" << a << "," << b << "]"
<< " size " << r->size << " overlap to next " << r->overlap << dendl;
if (b < start) {
// this is before start
++r;
continue;
}
if (!saw_start) {
if (start < a) {
ldout(cct, 20) << " start, after " << start << dendl;
// this means the object didn't exist at start
if (r->size)
diff->insert(0, r->size);
start_size = 0;
} else {
ldout(cct, 20) << " start" << dendl;
start_size = r->size;
}
saw_start = true;
}
*end_size = r->size;
if (end < a) {
ldout(cct, 20) << " past end " << end << ", end object does not exist" << dendl;
*end_exists = false;
diff->clear();
if (start_size) {
diff->insert(0, start_size);
}
break;
}
if (end <= b) {
ldout(cct, 20) << " end" << dendl;
*end_exists = true;
*clone_end_snap_id = b;
break;
}
// start with the max(this size, next size), and subtract off any
// overlap
const vector<pair<uint64_t, uint64_t> > *overlap = &r->overlap;
interval_set<uint64_t> diff_to_next;
uint64_t max_size = r->size;
++r;
if (r != snap_set.clones.end()) {
if (r->size > max_size)
max_size = r->size;
}
if (max_size)
diff_to_next.insert(0, max_size);
for (vector<pair<uint64_t, uint64_t> >::const_iterator p = overlap->begin();
p != overlap->end();
++p) {
diff_to_next.erase(p->first, p->second);
}
ldout(cct, 20) << " diff_to_next " << diff_to_next << dendl;
diff->union_of(diff_to_next);
ldout(cct, 20) << " diff now " << *diff << dendl;
}
}
| 3,506 | 28.720339 | 86 |
cc
|
null |
ceph-main/src/librados/snap_set_diff.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef __CEPH_OSDC_SNAP_SET_DIFF_H
#define __CEPH_OSDC_SNAP_SET_DIFF_H
#include "include/common_fwd.h"
#include "include/rados/rados_types.hpp"
#include "include/interval_set.h"
void calc_snap_set_diff(CephContext *cct,
const librados::snap_set_t& snap_set,
librados::snap_t start, librados::snap_t end,
interval_set<uint64_t> *diff, uint64_t *end_size,
bool *end_exists, librados::snap_t *clone_end_snap_id,
bool *whole_object);
#endif
| 555 | 28.263158 | 70 |
h
|
null |
ceph-main/src/libradosstriper/MultiAioCompletionImpl.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2014 Sebastien Ponce <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include "common/dout.h"
#include "libradosstriper/MultiAioCompletionImpl.h"
void libradosstriper::MultiAioCompletionImpl::complete_request(ssize_t r)
{
lock.lock();
if (rval >= 0) {
if (r < 0 && r != -EEXIST)
rval = r;
else if (r > 0)
rval += r;
}
ceph_assert(pending_complete);
int count = --pending_complete;
if (!count && !building) {
complete();
}
put_unlock();
}
void libradosstriper::MultiAioCompletionImpl::safe_request(ssize_t r)
{
lock.lock();
if (rval >= 0) {
if (r < 0 && r != -EEXIST)
rval = r;
}
ceph_assert(pending_safe);
int count = --pending_safe;
if (!count && !building) {
safe();
}
put_unlock();
}
void libradosstriper::MultiAioCompletionImpl::finish_adding_requests()
{
std::scoped_lock l{lock};
ceph_assert(building);
building = false;
if (!pending_complete)
complete();
if (!pending_safe)
safe();
}
| 1,347 | 21.098361 | 73 |
cc
|
null |
ceph-main/src/libradosstriper/MultiAioCompletionImpl.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2014 Sebastien Ponce <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_LIBRADOSSTRIPERSTRIPER_MULTIAIOCOMPLETIONIMPL_H
#define CEPH_LIBRADOSSTRIPERSTRIPER_MULTIAIOCOMPLETIONIMPL_H
#include <list>
#include <mutex>
#include "common/ceph_mutex.h"
#include "include/radosstriper/libradosstriper.hpp"
namespace libradosstriper {
struct MultiAioCompletionImpl {
ceph::mutex lock = ceph::make_mutex("MultiAioCompletionImpl lock", false);
ceph::condition_variable cond;
int ref, rval;
int pending_complete, pending_safe;
rados_callback_t callback_complete, callback_safe;
void *callback_complete_arg, *callback_safe_arg;
bool building; ///< true if we are still building this completion
bufferlist bl; /// only used for read case in C api of rados striper
std::list<bufferlist*> bllist; /// keep temporary buffer lists used for destriping
MultiAioCompletionImpl()
: ref(1), rval(0),
pending_complete(0), pending_safe(0),
callback_complete(0), callback_safe(0),
callback_complete_arg(0), callback_safe_arg(0),
building(true) {};
~MultiAioCompletionImpl() {
// deallocate temporary buffer lists
for (std::list<bufferlist*>::iterator it = bllist.begin();
it != bllist.end();
it++) {
delete *it;
}
bllist.clear();
}
int set_complete_callback(void *cb_arg, rados_callback_t cb) {
std::scoped_lock l{lock};
callback_complete = cb;
callback_complete_arg = cb_arg;
return 0;
}
int set_safe_callback(void *cb_arg, rados_callback_t cb) {
std::scoped_lock l{lock};
callback_safe = cb;
callback_safe_arg = cb_arg;
return 0;
}
int wait_for_complete() {
std::unique_lock l{lock};
cond.wait(l, [this] { return !pending_complete; });
return 0;
}
int wait_for_safe() {
std::unique_lock l{lock};
cond.wait(l, [this] { return !pending_safe; });
return 0;
}
bool is_complete() {
std::scoped_lock l{lock};
return pending_complete == 0;
}
bool is_safe() {
std::scoped_lock l{lock};
return pending_safe == 0;
}
void wait_for_complete_and_cb() {
std::unique_lock l{lock};
cond.wait(l, [this] { return !pending_complete && !callback_complete; });
}
void wait_for_safe_and_cb() {
std::unique_lock l{lock};
cond.wait(l, [this] { return !pending_safe && !callback_safe; });
}
bool is_complete_and_cb() {
std::scoped_lock l{lock};
return ((0 == pending_complete) && !callback_complete);
}
bool is_safe_and_cb() {
std::scoped_lock l{lock};
return ((0 == pending_safe) && !callback_safe);
}
int get_return_value() {
std::scoped_lock l{lock};
return rval;
}
void get() {
std::scoped_lock l{lock};
_get();
}
void _get() {
ceph_assert(ceph_mutex_is_locked(lock));
ceph_assert(ref > 0);
++ref;
}
void put() {
lock.lock();
put_unlock();
}
void put_unlock() {
ceph_assert(ref > 0);
int n = --ref;
lock.unlock();
if (!n)
delete this;
}
void add_request() {
std::scoped_lock l{lock};
pending_complete++;
_get();
pending_safe++;
_get();
}
void add_safe_request() {
std::scoped_lock l{lock};
pending_complete++;
_get();
}
void complete() {
ceph_assert(ceph_mutex_is_locked(lock));
if (callback_complete) {
callback_complete(this, callback_complete_arg);
callback_complete = 0;
}
cond.notify_all();
}
void safe() {
ceph_assert(ceph_mutex_is_locked(lock));
if (callback_safe) {
callback_safe(this, callback_safe_arg);
callback_safe = 0;
}
cond.notify_all();
};
void complete_request(ssize_t r);
void safe_request(ssize_t r);
void finish_adding_requests();
};
inline void intrusive_ptr_add_ref(MultiAioCompletionImpl* ptr)
{
ptr->get();
}
inline void intrusive_ptr_release(MultiAioCompletionImpl* ptr)
{
ptr->put();
}
}
#endif // CEPH_LIBRADOSSTRIPERSTRIPER_MULTIAIOCOMPLETIONIMPL_H
| 4,333 | 24.494118 | 84 |
h
|
null |
ceph-main/src/libradosstriper/RadosStriperImpl.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2014 Sebastien Ponce <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include <boost/algorithm/string/replace.hpp>
#include "libradosstriper/RadosStriperImpl.h"
#include <errno.h>
#include <sstream>
#include <iomanip>
#include <algorithm>
#include "include/types.h"
#include "include/uuid.h"
#include "include/ceph_fs.h"
#include "common/dout.h"
#include "common/strtol.h"
#include "common/RefCountedObj.h"
#include "osdc/Striper.h"
#include "librados/AioCompletionImpl.h"
#include <cls/lock/cls_lock_client.h>
/*
* This file contents the actual implementation of the rados striped objects interface.
*
* Striped objects are stored in rados in a set of regular rados objects, after their
* content has been striped using the osdc/Striper interface.
*
* The external attributes of the striped object are mapped to the attributes of the
* first underlying object. This first object has a set of extra external attributes
* storing the layout of the striped object for future read back. These attributes are :
* - striper.layout.object_size : the size of rados objects used.
* Must be a multiple of striper.layout.stripe_unit
* - striper.layout.stripe_unit : the size of a stripe unit
* - striper.layout.stripe_count : the number of stripes used
* - striper.size : total striped object size
*
* In general operations on striped objects are not atomic.
* However, a certain number of safety guards have been put to make the interface closer
* to atomicity :
* - each data operation takes a shared lock on the first rados object for the
* whole time of the operation
* - the remove and trunc operations take an exclusive lock on the first rados object
* for the whole time of the operation
* This makes sure that no removal/truncation of a striped object occurs while
* data operations are happening and vice versa. It thus makes sure that the layout
* of a striped object does not change during data operation, which is essential for
* data consistency.
*
* Still the writing to a striped object is not atomic. This means in particular that
* the size of an object may not be in sync with its content at all times.
* As the size is always guaranteed to be updated first and in an atomic way, and as
* sparse striped objects are supported (see below), what will typically happen is
* that a reader that comes too soon after a write will read 0s instead of the actual
* data.
*
* Note that remove handles the pieces of the striped object in reverse order,
* so that the head object is removed last, making the completion of the deletion atomic.
*
* Striped objects can be sparse, typically in case data was written at the end of the
* striped object only. In such a case, some rados objects constituing the striped object
* may be missing. Other can be partial (only the beginning will have data)
* When dealing with such sparse striped files, missing objects are detected and
* considered as full of 0s. They are however not created until real data is written
* to them.
*
* There are a number of missing features/improvements that could be implemented.
* Here are some ideas :
* - implementation of missing entry points (compared to rados)
* In particular : clone_range, sparse_read, exec, aio_flush_async, tmaps, omaps, ...
*
*/
#define dout_subsys ceph_subsys_rados
#undef dout_prefix
#define dout_prefix *_dout << "libradosstriper: "
/// size of xattr buffer
#define XATTR_BUFFER_SIZE 32
/// names of the different xattr entries
#define XATTR_LAYOUT_STRIPE_UNIT "striper.layout.stripe_unit"
#define XATTR_LAYOUT_STRIPE_COUNT "striper.layout.stripe_count"
#define XATTR_LAYOUT_OBJECT_SIZE "striper.layout.object_size"
#define XATTR_SIZE "striper.size"
#define LOCK_PREFIX "lock."
/// name of the lock used on objects to ensure layout stability during IO
#define RADOS_LOCK_NAME "striper.lock"
/// format of the extension of rados objects created for a given striped object
#define RADOS_OBJECT_EXTENSION_FORMAT ".%016llx"
/// default object layout
static const struct ceph_file_layout default_file_layout = {
ceph_le32(1<<22), // fl_stripe_unit
ceph_le32(1), // fl_stripe_count
ceph_le32(1<<22), // fl_object_size
ceph_le32(0), // fl_cas_hash
ceph_le32(0), // fl_object_stripe_unit
ceph_le32(-1), // fl_unused
ceph_le32(-1), // fl_pg_pool
};
using std::map;
using std::pair;
using std::string;
using std::vector;
using libradosstriper::MultiAioCompletionImplPtr;
namespace {
///////////////////////// CompletionData /////////////////////////////
/**
* struct handling the data needed to pass to the call back
* function in asynchronous operations
*/
struct CompletionData : RefCountedObject {
/// complete method
void complete(int r);
/// striper to be used to handle the write completion
libradosstriper::RadosStriperImpl *m_striper;
/// striped object concerned by the write operation
std::string m_soid;
/// shared lock to be released at completion
std::string m_lockCookie;
/// completion handler
librados::IoCtxImpl::C_aio_Complete *m_ack;
protected:
CompletionData(libradosstriper::RadosStriperImpl * striper,
const std::string& soid,
const std::string& lockCookie,
librados::AioCompletionImpl *userCompletion = 0);
~CompletionData() override;
};
CompletionData::CompletionData
(libradosstriper::RadosStriperImpl* striper,
const std::string& soid,
const std::string& lockCookie,
librados::AioCompletionImpl *userCompletion) :
RefCountedObject(striper->cct()),
m_striper(striper), m_soid(soid), m_lockCookie(lockCookie), m_ack(0) {
m_striper->get();
if (userCompletion) {
m_ack = new librados::IoCtxImpl::C_aio_Complete(userCompletion);
userCompletion->io = striper->m_ioCtxImpl;
}
}
CompletionData::~CompletionData() {
if (m_ack) delete m_ack;
m_striper->put();
}
void CompletionData::complete(int r) {
if (m_ack) m_ack->finish(r);
}
/**
* struct handling the data needed to pass to the call back
* function in asynchronous read operations
*/
struct ReadCompletionData : CompletionData {
/// bufferlist containing final result
bufferlist* m_bl;
/// extents that will be read
std::vector<ObjectExtent>* m_extents;
/// intermediate results
std::vector<bufferlist>* m_resultbl;
/// return code of read completion, to be remembered until unlocking happened
int m_readRc;
/// completion object for the unlocking of the striped object at the end of the read
librados::AioCompletion *m_unlockCompletion;
/// complete method for when reading is over
void complete_read(int r);
/// complete method for when object is unlocked
void complete_unlock(int r);
private:
FRIEND_MAKE_REF(ReadCompletionData);
ReadCompletionData(libradosstriper::RadosStriperImpl * striper,
const std::string& soid,
const std::string& lockCookie,
librados::AioCompletionImpl *userCompletion,
bufferlist* bl,
std::vector<ObjectExtent>* extents,
std::vector<bufferlist>* resultbl);
~ReadCompletionData() override;
};
ReadCompletionData::ReadCompletionData
(libradosstriper::RadosStriperImpl* striper,
const std::string& soid,
const std::string& lockCookie,
librados::AioCompletionImpl *userCompletion,
bufferlist* bl,
std::vector<ObjectExtent>* extents,
std::vector<bufferlist>* resultbl) :
CompletionData(striper, soid, lockCookie, userCompletion),
m_bl(bl), m_extents(extents), m_resultbl(resultbl), m_readRc(0),
m_unlockCompletion(0) {}
ReadCompletionData::~ReadCompletionData() {
m_unlockCompletion->release();
delete m_extents;
delete m_resultbl;
}
void ReadCompletionData::complete_read(int r) {
// gather data into final buffer
Striper::StripedReadResult readResult;
vector<bufferlist>::iterator bit = m_resultbl->begin();
for (vector<ObjectExtent>::iterator eit = m_extents->begin();
eit != m_extents->end();
++eit, ++bit) {
readResult.add_partial_result(m_striper->cct(), *bit, eit->buffer_extents);
}
m_bl->clear();
readResult.assemble_result(m_striper->cct(), *m_bl, true);
// Remember return code
m_readRc = r;
}
void ReadCompletionData::complete_unlock(int r) {
// call parent's completion method
// Note that we ignore the return code of the unlock as we cannot do much about it
CompletionData::complete(m_readRc?m_readRc:m_bl->length());
}
/**
* struct handling the data needed to pass to the call back
* function in asynchronous write operations
*/
struct WriteCompletionData : CompletionData {
/// safe completion handler
librados::IoCtxImpl::C_aio_Complete *m_safe;
/// completion object for the unlocking of the striped object at the end of the write
librados::AioCompletion *m_unlockCompletion;
/// return code of write completion, to be remembered until unlocking happened
int m_writeRc;
/// complete method for when writing is over
void complete_write(int r);
/// complete method for when object is unlocked
void complete_unlock(int r);
/// safe method
void safe(int r);
private:
FRIEND_MAKE_REF(WriteCompletionData);
/// constructor
WriteCompletionData(libradosstriper::RadosStriperImpl * striper,
const std::string& soid,
const std::string& lockCookie,
librados::AioCompletionImpl *userCompletion);
/// destructor
~WriteCompletionData() override;
};
WriteCompletionData::WriteCompletionData
(libradosstriper::RadosStriperImpl* striper,
const std::string& soid,
const std::string& lockCookie,
librados::AioCompletionImpl *userCompletion) :
CompletionData(striper, soid, lockCookie, userCompletion),
m_safe(0), m_unlockCompletion(0), m_writeRc(0) {
if (userCompletion) {
m_safe = new librados::IoCtxImpl::C_aio_Complete(userCompletion);
}
}
WriteCompletionData::~WriteCompletionData() {
m_unlockCompletion->release();
if (m_safe) delete m_safe;
}
void WriteCompletionData::complete_unlock(int r) {
// call parent's completion method
// Note that we ignore the return code of the unlock as we cannot do much about it
CompletionData::complete(m_writeRc);
}
void WriteCompletionData::complete_write(int r) {
// Remember return code
m_writeRc = r;
}
void WriteCompletionData::safe(int r) {
if (m_safe) m_safe->finish(r);
}
struct RemoveCompletionData : CompletionData {
/// removal flags
int flags;
private:
FRIEND_MAKE_REF(RemoveCompletionData);
/**
* constructor
* note that the constructed object will take ownership of the lock
*/
RemoveCompletionData(libradosstriper::RadosStriperImpl * striper,
const std::string& soid,
const std::string& lockCookie,
librados::AioCompletionImpl *userCompletion,
int flags = 0) :
CompletionData(striper, soid, lockCookie, userCompletion), flags(flags) {}
};
/**
* struct handling the data needed to pass to the call back
* function in asynchronous truncate operations
*/
struct TruncateCompletionData : RefCountedObject {
/// striper to be used
libradosstriper::RadosStriperImpl *m_striper;
/// striped object concerned by the truncate operation
std::string m_soid;
/// the final size of the truncated object
uint64_t m_size;
private:
FRIEND_MAKE_REF(TruncateCompletionData);
/// constructor
TruncateCompletionData(libradosstriper::RadosStriperImpl* striper,
const std::string& soid,
uint64_t size) :
RefCountedObject(striper->cct()),
m_striper(striper), m_soid(soid), m_size(size) {
m_striper->get();
}
/// destructor
~TruncateCompletionData() override {
m_striper->put();
}
};
/**
* struct handling the data needed to pass to the call back
* function in asynchronous read operations of a Rados File
*/
struct RadosReadCompletionData : RefCountedObject {
/// the multi asynch io completion object to be used
MultiAioCompletionImplPtr m_multiAioCompl;
/// the expected number of bytes
uint64_t m_expectedBytes;
/// the bufferlist object where data have been written
bufferlist *m_bl;
private:
FRIEND_MAKE_REF(RadosReadCompletionData);
/// constructor
RadosReadCompletionData(MultiAioCompletionImplPtr multiAioCompl,
uint64_t expectedBytes,
bufferlist *bl,
CephContext *context) :
RefCountedObject(context),
m_multiAioCompl(multiAioCompl), m_expectedBytes(expectedBytes), m_bl(bl) {}
};
/**
* struct handling (most of) the data needed to pass to the call back
* function in asynchronous stat operations.
* Inherited by the actual type for adding time information in different
* versions (time_t or struct timespec)
*/
struct BasicStatCompletionData : CompletionData {
// MultiAioCompletionImpl used to handle the double aysnc
// call in the back (stat + getxattr)
libradosstriper::MultiAioCompletionImpl *m_multiCompletion;
// where to store the size of first objct
// this will be ignored but we need a place to store it when
// async stat is called
uint64_t m_objectSize;
// where to store the file size
uint64_t *m_psize;
/// the bufferlist object used for the getxattr call
bufferlist m_bl;
/// return code of the stat
int m_statRC;
/// return code of the getxattr
int m_getxattrRC;
protected:
/// constructor
BasicStatCompletionData(libradosstriper::RadosStriperImpl* striper,
const std::string& soid,
librados::AioCompletionImpl *userCompletion,
libradosstriper::MultiAioCompletionImpl *multiCompletion,
uint64_t *psize) :
CompletionData(striper, soid, "", userCompletion),
m_multiCompletion(multiCompletion), m_psize(psize),
m_statRC(0), m_getxattrRC(0) {};
};
/**
* struct handling the data needed to pass to the call back
* function in asynchronous stat operations.
* Simple templated extension of BasicStatCompletionData.
* The template parameter is the type of the time information
* (used with time_t for stat and struct timespec for stat2)
*/
template<class TimeType>
struct StatCompletionData : BasicStatCompletionData {
// where to store the file time
TimeType *m_pmtime;
private:
FRIEND_MAKE_REF(StatCompletionData);
/// constructor
StatCompletionData(libradosstriper::RadosStriperImpl* striper,
const std::string& soid,
librados::AioCompletionImpl *userCompletion,
libradosstriper::MultiAioCompletionImpl *multiCompletion,
uint64_t *psize,
TimeType *pmtime) :
BasicStatCompletionData(striper, soid, userCompletion, multiCompletion, psize),
m_pmtime(pmtime) {};
};
/**
* struct handling the data needed to pass to the call back
* function in asynchronous remove operations of a Rados File
*/
struct RadosRemoveCompletionData : RefCountedObject {
/// the multi asynch io completion object to be used
MultiAioCompletionImplPtr m_multiAioCompl;
private:
FRIEND_MAKE_REF(RadosRemoveCompletionData);
/// constructor
RadosRemoveCompletionData(MultiAioCompletionImplPtr multiAioCompl,
CephContext *context) :
RefCountedObject(context),
m_multiAioCompl(multiAioCompl) {};
};
} // namespace {
///////////////////////// constructor /////////////////////////////
libradosstriper::RadosStriperImpl::RadosStriperImpl(librados::IoCtx& ioctx, librados::IoCtxImpl *ioctx_impl) :
m_refCnt(0), m_radosCluster(ioctx), m_ioCtx(ioctx), m_ioCtxImpl(ioctx_impl),
m_layout(default_file_layout) {}
///////////////////////// layout /////////////////////////////
int libradosstriper::RadosStriperImpl::setObjectLayoutStripeUnit
(unsigned int stripe_unit)
{
/* stripe unit must be non-zero, 64k increment */
if (!stripe_unit || (stripe_unit & (CEPH_MIN_STRIPE_UNIT-1)))
return -EINVAL;
m_layout.fl_stripe_unit = stripe_unit;
return 0;
}
int libradosstriper::RadosStriperImpl::setObjectLayoutStripeCount
(unsigned int stripe_count)
{
/* stripe count must be non-zero */
if (!stripe_count)
return -EINVAL;
m_layout.fl_stripe_count = stripe_count;
return 0;
}
int libradosstriper::RadosStriperImpl::setObjectLayoutObjectSize
(unsigned int object_size)
{
/* object size must be non-zero, 64k increment */
if (!object_size || (object_size & (CEPH_MIN_STRIPE_UNIT-1)))
return -EINVAL;
/* object size must be a multiple of stripe unit */
if (object_size < m_layout.fl_stripe_unit ||
object_size % m_layout.fl_stripe_unit)
return -EINVAL;
m_layout.fl_object_size = object_size;
return 0;
}
///////////////////////// xattrs /////////////////////////////
int libradosstriper::RadosStriperImpl::getxattr(const object_t& soid,
const char *name,
bufferlist& bl)
{
std::string firstObjOid = getObjectId(soid, 0);
return m_ioCtx.getxattr(firstObjOid, name, bl);
}
int libradosstriper::RadosStriperImpl::setxattr(const object_t& soid,
const char *name,
bufferlist& bl)
{
std::string firstObjOid = getObjectId(soid, 0);
return m_ioCtx.setxattr(firstObjOid, name, bl);
}
int libradosstriper::RadosStriperImpl::getxattrs(const object_t& soid,
map<string, bufferlist>& attrset)
{
std::string firstObjOid = getObjectId(soid, 0);
int rc = m_ioCtx.getxattrs(firstObjOid, attrset);
if (rc) return rc;
// cleanup internal attributes dedicated to striping and locking
attrset.erase(XATTR_LAYOUT_STRIPE_UNIT);
attrset.erase(XATTR_LAYOUT_STRIPE_COUNT);
attrset.erase(XATTR_LAYOUT_OBJECT_SIZE);
attrset.erase(XATTR_SIZE);
attrset.erase(std::string(LOCK_PREFIX) + RADOS_LOCK_NAME);
return rc;
}
int libradosstriper::RadosStriperImpl::rmxattr(const object_t& soid,
const char *name)
{
std::string firstObjOid = getObjectId(soid, 0);
return m_ioCtx.rmxattr(firstObjOid, name);
}
///////////////////////// io /////////////////////////////
int libradosstriper::RadosStriperImpl::write(const std::string& soid,
const bufferlist& bl,
size_t len,
uint64_t off)
{
// open the object. This will create it if needed, retrieve its layout
// and size and take a shared lock on it
ceph_file_layout layout;
std::string lockCookie;
int rc = createAndOpenStripedObject(soid, &layout, len+off, &lockCookie, true);
if (rc) return rc;
return write_in_open_object(soid, layout, lockCookie, bl, len, off);
}
int libradosstriper::RadosStriperImpl::append(const std::string& soid,
const bufferlist& bl,
size_t len)
{
// open the object. This will create it if needed, retrieve its layout
// and size and take a shared lock on it
ceph_file_layout layout;
uint64_t size = len;
std::string lockCookie;
int rc = openStripedObjectForWrite(soid, &layout, &size, &lockCookie, false);
if (rc) return rc;
return write_in_open_object(soid, layout, lockCookie, bl, len, size);
}
int libradosstriper::RadosStriperImpl::write_full(const std::string& soid,
const bufferlist& bl)
{
int rc = trunc(soid, 0);
if (rc && rc != -ENOENT) return rc; // ENOENT is obviously ok
return write(soid, bl, bl.length(), 0);
}
int libradosstriper::RadosStriperImpl::read(const std::string& soid,
bufferlist* bl,
size_t len,
uint64_t off)
{
// create a completion object
librados::AioCompletionImpl c;
// call asynchronous method
int rc = aio_read(soid, &c, bl, len, off);
// and wait for completion
if (!rc) {
// wait for completion
c.wait_for_complete_and_cb();
// return result
rc = c.get_return_value();
}
return rc;
}
///////////////////////// asynchronous io /////////////////////////////
int libradosstriper::RadosStriperImpl::aio_write(const std::string& soid,
librados::AioCompletionImpl *c,
const bufferlist& bl,
size_t len,
uint64_t off)
{
ceph_file_layout layout;
std::string lockCookie;
int rc = createAndOpenStripedObject(soid, &layout, len+off, &lockCookie, true);
if (rc) return rc;
return aio_write_in_open_object(soid, c, layout, lockCookie, bl, len, off);
}
int libradosstriper::RadosStriperImpl::aio_append(const std::string& soid,
librados::AioCompletionImpl *c,
const bufferlist& bl,
size_t len)
{
ceph_file_layout layout;
uint64_t size = len;
std::string lockCookie;
int rc = openStripedObjectForWrite(soid, &layout, &size, &lockCookie, false);
if (rc) return rc;
// create a completion object
return aio_write_in_open_object(soid, c, layout, lockCookie, bl, len, size);
}
int libradosstriper::RadosStriperImpl::aio_write_full(const std::string& soid,
librados::AioCompletionImpl *c,
const bufferlist& bl)
{
int rc = trunc(soid, 0);
if (rc) return rc;
return aio_write(soid, c, bl, bl.length(), 0);
}
static void rados_read_aio_unlock_complete(rados_striper_multi_completion_t c, void *arg)
{
auto cdata = ceph::ref_t<ReadCompletionData>(static_cast<ReadCompletionData*>(arg), false);
libradosstriper::MultiAioCompletionImpl *comp =
reinterpret_cast<libradosstriper::MultiAioCompletionImpl*>(c);
cdata->complete_unlock(comp->rval);
}
static void striper_read_aio_req_complete(rados_striper_multi_completion_t c, void *arg)
{
auto cdata = static_cast<ReadCompletionData*>(arg);
// launch the async unlocking of the object
cdata->m_striper->aio_unlockObject(cdata->m_soid, cdata->m_lockCookie, cdata->m_unlockCompletion);
// complete the read part in parallel
libradosstriper::MultiAioCompletionImpl *comp =
reinterpret_cast<libradosstriper::MultiAioCompletionImpl*>(c);
cdata->complete_read(comp->rval);
}
static void rados_req_read_complete(rados_completion_t c, void *arg)
{
auto data = static_cast<RadosReadCompletionData*>(arg);
int rc = rados_aio_get_return_value(c);
// We need to handle the case of sparse files here
if (rc == -ENOENT) {
// the object did not exist at all. This can happen for sparse files.
// we consider we've read 0 bytes and it will fall into next case
rc = 0;
}
ssize_t nread = rc;
if (rc >= 0 && (((uint64_t)rc) < data->m_expectedBytes)) {
// only partial data were present in the object (or the object did not
// even exist if we've gone through previous case).
// This is typical of sparse file and we need to complete with 0s.
unsigned int lenOfZeros = data->m_expectedBytes-rc;
unsigned int existingDataToZero = std::min(data->m_bl->length()-rc, lenOfZeros);
if (existingDataToZero > 0) {
data->m_bl->zero(rc, existingDataToZero);
}
if (lenOfZeros > existingDataToZero) {
ceph::bufferptr zeros(ceph::buffer::create(lenOfZeros-existingDataToZero));
zeros.zero();
data->m_bl->push_back(zeros);
}
nread = data->m_expectedBytes;
}
auto multi_aio_comp = data->m_multiAioCompl;
multi_aio_comp->complete_request(nread);
multi_aio_comp->safe_request(rc);
}
int libradosstriper::RadosStriperImpl::aio_read(const std::string& soid,
librados::AioCompletionImpl *c,
bufferlist* bl,
size_t len,
uint64_t off)
{
// open the object. This will retrieve its layout and size
// and take a shared lock on it
ceph_file_layout layout;
uint64_t size;
std::string lockCookie;
int rc = openStripedObjectForRead(soid, &layout, &size, &lockCookie);
if (rc) return rc;
// find out the actual number of bytes we can read
uint64_t read_len;
if (off >= size) {
// nothing to read ! We are done.
read_len = 0;
} else {
read_len = std::min(len, (size_t)(size-off));
}
// get list of extents to be read from
vector<ObjectExtent> *extents = new vector<ObjectExtent>();
if (read_len > 0) {
std::string format = soid;
boost::replace_all(format, "%", "%%");
format += RADOS_OBJECT_EXTENSION_FORMAT;
file_layout_t l;
l.from_legacy(layout);
Striper::file_to_extents(cct(), format.c_str(), &l, off, read_len,
0, *extents);
}
// create a completion object and transfer ownership of extents and resultbl
vector<bufferlist> *resultbl = new vector<bufferlist>(extents->size());
auto cdata = ceph::make_ref<ReadCompletionData>(this, soid, lockCookie, c, bl, extents, resultbl);
c->is_read = true;
c->io = m_ioCtxImpl;
// create a completion for the unlocking of the striped object at the end of the read
librados::AioCompletion *unlock_completion =
librados::Rados::aio_create_completion(cdata->get() /* create ref! */, rados_read_aio_unlock_complete);
cdata->m_unlockCompletion = unlock_completion;
// create the multiCompletion object handling the reads
MultiAioCompletionImplPtr nc{new libradosstriper::MultiAioCompletionImpl,
false};
nc->set_complete_callback(cdata.get(), striper_read_aio_req_complete);
// go through the extents
int r = 0, i = 0;
for (vector<ObjectExtent>::iterator p = extents->begin(); p != extents->end(); ++p) {
// create a buffer list describing where to place data read from current extend
bufferlist *oid_bl = &((*resultbl)[i++]);
for (vector<pair<uint64_t,uint64_t> >::iterator q = p->buffer_extents.begin();
q != p->buffer_extents.end();
++q) {
bufferlist buffer_bl;
buffer_bl.substr_of(*bl, q->first, q->second);
oid_bl->append(buffer_bl);
}
// read all extends of a given object in one go
nc->add_request();
// we need 2 references on data as both rados_req_read_safe and rados_req_read_complete
// will release one
auto data = ceph::make_ref<RadosReadCompletionData>(nc, p->length, oid_bl, cct());
librados::AioCompletion *rados_completion =
librados::Rados::aio_create_completion(data.detach(), rados_req_read_complete);
r = m_ioCtx.aio_read(p->oid.name, rados_completion, oid_bl, p->length, p->offset);
rados_completion->release();
if (r < 0)
break;
}
nc->finish_adding_requests();
return r;
}
int libradosstriper::RadosStriperImpl::aio_read(const std::string& soid,
librados::AioCompletionImpl *c,
char* buf,
size_t len,
uint64_t off)
{
// create a buffer list and store it inside the completion object
c->bl.clear();
c->bl.push_back(buffer::create_static(len, buf));
// call the bufferlist version of this method
return aio_read(soid, c, &c->bl, len, off);
}
int libradosstriper::RadosStriperImpl::aio_flush()
{
int ret;
// pass to the rados level
ret = m_ioCtx.aio_flush();
if (ret < 0)
return ret;
//wait all CompletionData are released
std::unique_lock l{lock};
cond.wait(l, [this] {return m_refCnt <= 1;});
return ret;
}
///////////////////////// stat and deletion /////////////////////////////
int libradosstriper::RadosStriperImpl::stat(const std::string& soid, uint64_t *psize, time_t *pmtime)
{
// create a completion object
librados::AioCompletionImpl c;
// call asynchronous version of stat
int rc = aio_stat(soid, &c, psize, pmtime);
if (rc == 0) {
// wait for completion of the remove
c.wait_for_complete();
// get result
rc = c.get_return_value();
}
return rc;
}
static void striper_stat_aio_stat_complete(rados_completion_t c, void *arg) {
auto data = ceph::ref_t<BasicStatCompletionData>(static_cast<BasicStatCompletionData*>(arg), false);
int rc = rados_aio_get_return_value(c);
if (rc == -ENOENT) {
// remember this has failed
data->m_statRC = rc;
}
data->m_multiCompletion->complete_request(rc);
}
static void striper_stat_aio_getxattr_complete(rados_completion_t c, void *arg) {
auto data = ceph::ref_t<BasicStatCompletionData>(static_cast<BasicStatCompletionData*>(arg), false);
int rc = rados_aio_get_return_value(c);
// We need to handle the case of sparse files here
if (rc < 0) {
// remember this has failed
data->m_getxattrRC = rc;
} else {
// this intermediate string allows to add a null terminator before calling strtol
std::string err;
std::string strsize(data->m_bl.c_str(), data->m_bl.length());
*data->m_psize = strict_strtoll(strsize.c_str(), 10, &err);
if (!err.empty()) {
lderr(data->m_striper->cct()) << XATTR_SIZE << " : " << err << dendl;
data->m_getxattrRC = -EINVAL;
}
rc = 0;
}
data->m_multiCompletion->complete_request(rc);
}
static void striper_stat_aio_req_complete(rados_striper_multi_completion_t c,
void *arg) {
auto data = ceph::ref_t<BasicStatCompletionData>(static_cast<BasicStatCompletionData*>(arg), false);
if (data->m_statRC) {
data->complete(data->m_statRC);
} else {
if (data->m_getxattrRC < 0) {
data->complete(data->m_getxattrRC);
} else {
data->complete(0);
}
}
}
template<class TimeType>
int libradosstriper::RadosStriperImpl::aio_generic_stat
(const std::string& soid,
librados::AioCompletionImpl *c,
uint64_t *psize,
TimeType *pmtime,
typename libradosstriper::RadosStriperImpl::StatFunction<TimeType>::Type statFunction)
{
// use a MultiAioCompletion object for dealing with the fact
// that we'll do 2 asynchronous calls in parallel
MultiAioCompletionImplPtr multi_completion{
new libradosstriper::MultiAioCompletionImpl, false};
// Data object used for passing context to asynchronous calls
std::string firstObjOid = getObjectId(soid, 0);
auto cdata = ceph::make_ref<StatCompletionData<TimeType>>(this, firstObjOid, c, multi_completion.get(), psize, pmtime);
multi_completion->set_complete_callback(cdata->get() /* create ref! */, striper_stat_aio_req_complete);
// use a regular AioCompletion for the stat async call
librados::AioCompletion *stat_completion =
librados::Rados::aio_create_completion(cdata->get() /* create ref! */, striper_stat_aio_stat_complete);
multi_completion->add_safe_request();
object_t obj(firstObjOid);
int rc = (m_ioCtxImpl->*statFunction)(obj, stat_completion->pc,
&cdata->m_objectSize, cdata->m_pmtime);
stat_completion->release();
if (rc < 0) {
// nothing is really started so cancel everything
delete cdata.detach();
return rc;
}
// use a regular AioCompletion for the getxattr async call
librados::AioCompletion *getxattr_completion =
librados::Rados::aio_create_completion(cdata->get() /* create ref! */, striper_stat_aio_getxattr_complete);
multi_completion->add_safe_request();
// in parallel, get the pmsize from the first object asynchronously
rc = m_ioCtxImpl->aio_getxattr(obj, getxattr_completion->pc,
XATTR_SIZE, cdata->m_bl);
getxattr_completion->release();
multi_completion->finish_adding_requests();
if (rc < 0) {
// the async stat is ongoing, so we need to go on
// we mark the getxattr as failed in the data object
cdata->m_getxattrRC = rc;
multi_completion->complete_request(rc);
return rc;
}
return 0;
}
int libradosstriper::RadosStriperImpl::aio_stat(const std::string& soid,
librados::AioCompletionImpl *c,
uint64_t *psize,
time_t *pmtime)
{
return aio_generic_stat<time_t>(soid, c, psize, pmtime, &librados::IoCtxImpl::aio_stat);
}
int libradosstriper::RadosStriperImpl::stat2(const std::string& soid, uint64_t *psize, struct timespec *pts)
{
// create a completion object
librados::AioCompletionImpl c;
// call asynchronous version of stat
int rc = aio_stat2(soid, &c, psize, pts);
if (rc == 0) {
// wait for completion of the remove
c.wait_for_complete_and_cb();
// get result
rc = c.get_return_value();
}
return rc;
}
int libradosstriper::RadosStriperImpl::aio_stat2(const std::string& soid,
librados::AioCompletionImpl *c,
uint64_t *psize,
struct timespec *pts)
{
return aio_generic_stat<struct timespec>(soid, c, psize, pts, &librados::IoCtxImpl::aio_stat2);
}
static void rados_req_remove_complete(rados_completion_t c, void *arg)
{
auto cdata = static_cast<RadosRemoveCompletionData*>(arg);
int rc = rados_aio_get_return_value(c);
// in case the object did not exist, it means we had a sparse file, all is fine
if (rc == -ENOENT) {
rc = 0;
}
cdata->m_multiAioCompl->complete_request(rc);
cdata->m_multiAioCompl->safe_request(rc);
}
static void striper_remove_aio_req_complete(rados_striper_multi_completion_t c, void *arg)
{
auto cdata = ceph::ref_t<RemoveCompletionData>(static_cast<RemoveCompletionData*>(arg), false);
libradosstriper::MultiAioCompletionImpl *comp =
reinterpret_cast<libradosstriper::MultiAioCompletionImpl*>(c);
ldout(cdata->m_striper->cct(), 10)
<< "RadosStriperImpl : striper_remove_aio_req_complete called for "
<< cdata->m_soid << dendl;
int rc = comp->rval;
if (rc == 0) {
// All went fine, synchronously remove first object
rc = cdata->m_striper->m_ioCtx.remove(cdata->m_striper->getObjectId(cdata->m_soid, 0),
cdata->flags);
} else {
lderr(cdata->m_striper->cct())
<< "RadosStriperImpl : deletion/truncation incomplete for " << cdata->m_soid
<< ", as errors were encountered. The file is left present but it's content "
<< " has been partially removed"
<< dendl;
}
cdata->complete(rc);
}
int libradosstriper::RadosStriperImpl::remove(const std::string& soid, int flags)
{
// create a completion object
librados::AioCompletionImpl c;
// call asynchronous version of remove
int rc = aio_remove(soid, &c, flags);
if (rc == 0) {
// wait for completion of the remove
c.wait_for_complete_and_cb();
// get result
rc = c.get_return_value();
}
return rc;
}
int libradosstriper::RadosStriperImpl::aio_remove(const std::string& soid,
librados::AioCompletionImpl *c,
int flags)
{
// the RemoveCompletionData object will lock the given soid for the duration
// of the removal
std::string lockCookie = getUUID();
int rc = m_ioCtx.lock_exclusive(getObjectId(soid, 0), RADOS_LOCK_NAME, lockCookie, "", 0, 0);
if (rc) return rc;
// create CompletionData for the async remove call
auto cdata = ceph::make_ref<RemoveCompletionData>(this, soid, lockCookie, c, flags);
MultiAioCompletionImplPtr multi_completion{
new libradosstriper::MultiAioCompletionImpl, false};
multi_completion->set_complete_callback(cdata->get() /* create ref! */, striper_remove_aio_req_complete);
// call asynchronous internal version of remove
ldout(cct(), 10)
<< "RadosStriperImpl : Aio_remove starting for "
<< soid << dendl;
rc = internal_aio_remove(soid, multi_completion);
return rc;
}
int libradosstriper::RadosStriperImpl::internal_aio_remove(
const std::string& soid,
MultiAioCompletionImplPtr multi_completion,
int flags)
{
std::string firstObjOid = getObjectId(soid, 0);
try {
// check size and get number of rados objects to delete
uint64_t nb_objects = 0;
bufferlist bl2;
int rc = getxattr(soid, XATTR_SIZE, bl2);
if (rc < 0) {
// no object size (or not able to get it)
// try to find the number of object "by hand"
uint64_t psize;
time_t pmtime;
while (!m_ioCtx.stat(getObjectId(soid, nb_objects), &psize, &pmtime)) {
nb_objects++;
}
} else {
// count total number of rados objects in the striped object
std::string err;
// this intermediate string allows to add a null terminator before calling strtol
std::string strsize(bl2.c_str(), bl2.length());
uint64_t size = strict_strtoll(strsize.c_str(), 10, &err);
if (!err.empty()) {
lderr(cct()) << XATTR_SIZE << " : " << err << dendl;
return -EINVAL;
}
uint64_t object_size = m_layout.fl_object_size;
uint64_t su = m_layout.fl_stripe_unit;
uint64_t stripe_count = m_layout.fl_stripe_count;
uint64_t nb_complete_sets = size / (object_size*stripe_count);
uint64_t remaining_data = size % (object_size*stripe_count);
uint64_t remaining_stripe_units = (remaining_data + su -1) / su;
uint64_t remaining_objects = std::min(remaining_stripe_units, stripe_count);
nb_objects = nb_complete_sets * stripe_count + remaining_objects;
}
// delete rados objects in reverse order
// Note that we do not drop the first object. This one will only be dropped
// if all other removals have been successful, and this is done in the
// callback of the multi_completion object
int rcr = 0;
for (int i = nb_objects-1; i >= 1; i--) {
multi_completion->add_request();
auto data = ceph::make_ref<RadosRemoveCompletionData>(multi_completion, cct());
librados::AioCompletion *rados_completion =
librados::Rados::aio_create_completion(data->get() /* create ref! */,
rados_req_remove_complete);
if (flags == 0) {
rcr = m_ioCtx.aio_remove(getObjectId(soid, i), rados_completion);
} else {
rcr = m_ioCtx.aio_remove(getObjectId(soid, i), rados_completion, flags);
}
rados_completion->release();
if (rcr < 0 and -ENOENT != rcr) {
lderr(cct()) << "RadosStriperImpl::remove : deletion incomplete for " << soid
<< ", as " << getObjectId(soid, i) << " could not be deleted (rc=" << rc << ")"
<< dendl;
break;
}
}
// we are over adding requests to the multi_completion object
multi_completion->finish_adding_requests();
// return
return rcr;
} catch (ErrorCode &e) {
// error caught when trying to take the exclusive lock
return e.m_code;
}
}
int libradosstriper::RadosStriperImpl::trunc(const std::string& soid, uint64_t size)
{
// lock the object in exclusive mode
std::string firstObjOid = getObjectId(soid, 0);
librados::ObjectWriteOperation op;
op.assert_exists();
std::string lockCookie = RadosStriperImpl::getUUID();
utime_t dur = utime_t();
rados::cls::lock::lock(&op, RADOS_LOCK_NAME, ClsLockType::EXCLUSIVE, lockCookie, "", "", dur, 0);
int rc = m_ioCtx.operate(firstObjOid, &op);
if (rc) return rc;
// load layout and size
ceph_file_layout layout;
uint64_t original_size;
rc = internal_get_layout_and_size(firstObjOid, &layout, &original_size);
if (!rc) {
if (size < original_size) {
rc = truncate(soid, original_size, size, layout);
} else if (size > original_size) {
rc = grow(soid, original_size, size, layout);
}
}
// unlock object, ignore return code as we cannot do much
m_ioCtx.unlock(firstObjOid, RADOS_LOCK_NAME, lockCookie);
// final return
return rc;
}
///////////////////////// private helpers /////////////////////////////
std::string libradosstriper::RadosStriperImpl::getObjectId(const object_t& soid,
long long unsigned objectno)
{
std::ostringstream s;
s << soid << '.' << std::setfill ('0') << std::setw(16) << std::hex << objectno;
return s.str();
}
void libradosstriper::RadosStriperImpl::unlockObject(const std::string& soid,
const std::string& lockCookie)
{
// unlock the shared lock on the first rados object
std::string firstObjOid = getObjectId(soid, 0);
m_ioCtx.unlock(firstObjOid, RADOS_LOCK_NAME, lockCookie);
}
void libradosstriper::RadosStriperImpl::aio_unlockObject(const std::string& soid,
const std::string& lockCookie,
librados::AioCompletion *c)
{
// unlock the shared lock on the first rados object
std::string firstObjOid = getObjectId(soid, 0);
m_ioCtx.aio_unlock(firstObjOid, RADOS_LOCK_NAME, lockCookie, c);
}
static void rados_write_aio_unlock_complete(rados_striper_multi_completion_t c, void *arg)
{
auto cdata = ceph::ref_t<WriteCompletionData>(static_cast<WriteCompletionData*>(arg), false);
libradosstriper::MultiAioCompletionImpl *comp =
reinterpret_cast<libradosstriper::MultiAioCompletionImpl*>(c);
cdata->complete_unlock(comp->rval);
}
static void striper_write_aio_req_complete(rados_striper_multi_completion_t c, void *arg)
{
auto cdata = ceph::ref_t<WriteCompletionData>(static_cast<WriteCompletionData*>(arg), false);
// launch the async unlocking of the object
cdata->m_striper->aio_unlockObject(cdata->m_soid, cdata->m_lockCookie, cdata->m_unlockCompletion);
// complete the write part in parallel
libradosstriper::MultiAioCompletionImpl *comp =
reinterpret_cast<libradosstriper::MultiAioCompletionImpl*>(c);
cdata->complete_write(comp->rval);
}
static void striper_write_aio_req_safe(rados_striper_multi_completion_t c, void *arg)
{
auto cdata = ceph::ref_t<WriteCompletionData>(static_cast<WriteCompletionData*>(arg), false);
libradosstriper::MultiAioCompletionImpl *comp =
reinterpret_cast<libradosstriper::MultiAioCompletionImpl*>(c);
cdata->safe(comp->rval);
}
int libradosstriper::RadosStriperImpl::write_in_open_object(const std::string& soid,
const ceph_file_layout& layout,
const std::string& lockCookie,
const bufferlist& bl,
size_t len,
uint64_t off) {
// create a completion object to be passed to the callbacks of the multicompletion
// we need 3 references as striper_write_aio_req_complete will release two and
// striper_write_aio_req_safe will release one
auto cdata = ceph::make_ref<WriteCompletionData>(this, soid, lockCookie, nullptr);
// create a completion object for the unlocking of the striped object at the end of the write
librados::AioCompletion *unlock_completion =
librados::Rados::aio_create_completion(cdata->get() /* create ref! */, rados_write_aio_unlock_complete);
cdata->m_unlockCompletion = unlock_completion;
// create the multicompletion that will handle the write completion
MultiAioCompletionImplPtr c{new libradosstriper::MultiAioCompletionImpl,
false};
c->set_complete_callback(cdata->get() /* create ref! */, striper_write_aio_req_complete);
c->set_safe_callback(cdata->get() /* create ref! */, striper_write_aio_req_safe);
// call the asynchronous API
int rc = internal_aio_write(soid, c, bl, len, off, layout);
if (!rc) {
// wait for completion and safety of data
c->wait_for_complete_and_cb();
c->wait_for_safe_and_cb();
// wait for the unlocking
unlock_completion->wait_for_complete();
// return result
rc = c->get_return_value();
}
return rc;
}
int libradosstriper::RadosStriperImpl::aio_write_in_open_object(const std::string& soid,
librados::AioCompletionImpl *c,
const ceph_file_layout& layout,
const std::string& lockCookie,
const bufferlist& bl,
size_t len,
uint64_t off) {
// create a completion object to be passed to the callbacks of the multicompletion
// we need 3 references as striper_write_aio_req_complete will release two and
// striper_write_aio_req_safe will release one
auto cdata = ceph::make_ref<WriteCompletionData>(this, soid, lockCookie, c);
m_ioCtxImpl->get();
c->io = m_ioCtxImpl;
// create a completion object for the unlocking of the striped object at the end of the write
librados::AioCompletion *unlock_completion =
librados::Rados::aio_create_completion(cdata->get() /* create ref! */, rados_write_aio_unlock_complete);
cdata->m_unlockCompletion = unlock_completion;
// create the multicompletion that will handle the write completion
libradosstriper::MultiAioCompletionImplPtr nc{
new libradosstriper::MultiAioCompletionImpl, false};
nc->set_complete_callback(cdata->get() /* create ref! */, striper_write_aio_req_complete);
nc->set_safe_callback(cdata->get() /* create ref! */, striper_write_aio_req_safe);
// internal asynchronous API
int rc = internal_aio_write(soid, nc, bl, len, off, layout);
return rc;
}
static void rados_req_write_complete(rados_completion_t c, void *arg)
{
auto comp = reinterpret_cast<libradosstriper::MultiAioCompletionImpl*>(arg);
comp->complete_request(rados_aio_get_return_value(c));
comp->safe_request(rados_aio_get_return_value(c));
}
int
libradosstriper::RadosStriperImpl::internal_aio_write(const std::string& soid,
libradosstriper::MultiAioCompletionImplPtr c,
const bufferlist& bl,
size_t len,
uint64_t off,
const ceph_file_layout& layout)
{
int r = 0;
// Do not try anything if we are called with empty buffer,
// file_to_extents would raise an exception
if (len > 0) {
// get list of extents to be written to
vector<ObjectExtent> extents;
std::string format = soid;
boost::replace_all(format, "%", "%%");
format += RADOS_OBJECT_EXTENSION_FORMAT;
file_layout_t l;
l.from_legacy(layout);
Striper::file_to_extents(cct(), format.c_str(), &l, off, len, 0, extents);
// go through the extents
for (vector<ObjectExtent>::iterator p = extents.begin(); p != extents.end(); ++p) {
// assemble pieces of a given object into a single buffer list
bufferlist oid_bl;
for (vector<pair<uint64_t,uint64_t> >::iterator q = p->buffer_extents.begin();
q != p->buffer_extents.end();
++q) {
bufferlist buffer_bl;
buffer_bl.substr_of(bl, q->first, q->second);
oid_bl.append(buffer_bl);
}
// and write the object
c->add_request();
librados::AioCompletion *rados_completion =
librados::Rados::aio_create_completion(c.get(),
rados_req_write_complete);
r = m_ioCtx.aio_write(p->oid.name, rados_completion, oid_bl,
p->length, p->offset);
rados_completion->release();
if (r < 0)
break;
}
}
c->finish_adding_requests();
return r;
}
int libradosstriper::RadosStriperImpl::extract_uint32_attr
(std::map<std::string, bufferlist> &attrs,
const std::string& key,
ceph_le32 *value)
{
std::map<std::string, bufferlist>::iterator attrsIt = attrs.find(key);
if (attrsIt != attrs.end()) {
// this intermediate string allows to add a null terminator before calling strtol
std::string strvalue(attrsIt->second.c_str(), attrsIt->second.length());
std::string err;
*value = strict_strtol(strvalue.c_str(), 10, &err);
if (!err.empty()) {
lderr(cct()) << key << " : " << err << dendl;
return -EINVAL;
}
} else {
return -ENOENT;
}
return 0;
}
int libradosstriper::RadosStriperImpl::extract_sizet_attr
(std::map<std::string, bufferlist> &attrs,
const std::string& key,
size_t *value)
{
std::map<std::string, bufferlist>::iterator attrsIt = attrs.find(key);
if (attrsIt != attrs.end()) {
// this intermediate string allows to add a null terminator before calling strtol
std::string strvalue(attrsIt->second.c_str(), attrsIt->second.length());
std::string err;
*value = strict_strtoll(strvalue.c_str(), 10, &err);
if (!err.empty()) {
lderr(cct()) << key << " : " << err << dendl;
return -EINVAL;
}
} else {
return -ENOENT;
}
return 0;
}
int libradosstriper::RadosStriperImpl::internal_get_layout_and_size(
const std::string& oid,
ceph_file_layout *layout,
uint64_t *size)
{
// get external attributes of the first rados object
std::map<std::string, bufferlist> attrs;
int rc = m_ioCtx.getxattrs(oid, attrs);
if (rc) return rc;
// deal with stripe_unit
rc = extract_uint32_attr(attrs, XATTR_LAYOUT_STRIPE_UNIT, &layout->fl_stripe_unit);
if (rc) return rc;
// deal with stripe_count
rc = extract_uint32_attr(attrs, XATTR_LAYOUT_STRIPE_COUNT, &layout->fl_stripe_count);
if (rc) return rc;
// deal with object_size
rc = extract_uint32_attr(attrs, XATTR_LAYOUT_OBJECT_SIZE, &layout->fl_object_size);
if (rc) return rc;
// deal with size
size_t ssize;
rc = extract_sizet_attr(attrs, XATTR_SIZE, &ssize);
if (rc) {
return rc;
}
*size = ssize;
// make valgrind happy by setting unused fl_pg_pool
layout->fl_pg_pool = 0;
return 0;
}
int libradosstriper::RadosStriperImpl::openStripedObjectForRead(
const std::string& soid,
ceph_file_layout *layout,
uint64_t *size,
std::string *lockCookie)
{
// take a lock the first rados object, if it exists and gets its size
// check, lock and size reading must be atomic and are thus done within a single operation
librados::ObjectWriteOperation op;
op.assert_exists();
*lockCookie = getUUID();
utime_t dur = utime_t();
rados::cls::lock::lock(&op, RADOS_LOCK_NAME, ClsLockType::SHARED, *lockCookie, "Tag", "", dur, 0);
std::string firstObjOid = getObjectId(soid, 0);
int rc = m_ioCtx.operate(firstObjOid, &op);
if (rc) {
// error case (including -ENOENT)
return rc;
}
rc = internal_get_layout_and_size(firstObjOid, layout, size);
if (rc) {
unlockObject(soid, *lockCookie);
lderr(cct()) << "RadosStriperImpl::openStripedObjectForRead : "
<< "could not load layout and size for "
<< soid << " : rc = " << rc << dendl;
}
return rc;
}
int libradosstriper::RadosStriperImpl::openStripedObjectForWrite(const std::string& soid,
ceph_file_layout *layout,
uint64_t *size,
std::string *lockCookie,
bool isFileSizeAbsolute)
{
// take a lock the first rados object, if it exists
// check and lock must be atomic and are thus done within a single operation
librados::ObjectWriteOperation op;
op.assert_exists();
*lockCookie = getUUID();
utime_t dur = utime_t();
rados::cls::lock::lock(&op, RADOS_LOCK_NAME, ClsLockType::SHARED, *lockCookie, "Tag", "", dur, 0);
std::string firstObjOid = getObjectId(soid, 0);
int rc = m_ioCtx.operate(firstObjOid, &op);
if (rc) {
if (rc == -ENOENT) {
// object does not exist, delegate to createEmptyStripedObject
int rc = createAndOpenStripedObject(soid, layout, *size, lockCookie, isFileSizeAbsolute);
// return original size
*size = 0;
return rc;
} else {
return rc;
}
}
// all fine
uint64_t curSize;
rc = internal_get_layout_and_size(firstObjOid, layout, &curSize);
if (rc) {
unlockObject(soid, *lockCookie);
lderr(cct()) << "RadosStriperImpl::openStripedObjectForWrite : "
<< "could not load layout and size for "
<< soid << " : rc = " << rc << dendl;
return rc;
}
// atomically update object size, only if smaller than current one
if (!isFileSizeAbsolute)
*size += curSize;
librados::ObjectWriteOperation writeOp;
writeOp.cmpxattr(XATTR_SIZE, LIBRADOS_CMPXATTR_OP_GT, *size);
std::ostringstream oss;
oss << *size;
bufferlist bl;
bl.append(oss.str());
writeOp.setxattr(XATTR_SIZE, bl);
rc = m_ioCtx.operate(firstObjOid, &writeOp);
// return current size
*size = curSize;
// handle case where objectsize is already bigger than size
if (-ECANCELED == rc)
rc = 0;
if (rc) {
unlockObject(soid, *lockCookie);
lderr(cct()) << "RadosStriperImpl::openStripedObjectForWrite : "
<< "could not set new size for "
<< soid << " : rc = " << rc << dendl;
}
return rc;
}
int libradosstriper::RadosStriperImpl::createAndOpenStripedObject(const std::string& soid,
ceph_file_layout *layout,
uint64_t size,
std::string *lockCookie,
bool isFileSizeAbsolute)
{
// build atomic write operation
librados::ObjectWriteOperation writeOp;
writeOp.create(true);
// object_size
std::ostringstream oss_object_size;
oss_object_size << m_layout.fl_object_size;
bufferlist bl_object_size;
bl_object_size.append(oss_object_size.str());
writeOp.setxattr(XATTR_LAYOUT_OBJECT_SIZE, bl_object_size);
// stripe unit
std::ostringstream oss_stripe_unit;
oss_stripe_unit << m_layout.fl_stripe_unit;
bufferlist bl_stripe_unit;
bl_stripe_unit.append(oss_stripe_unit.str());
writeOp.setxattr(XATTR_LAYOUT_STRIPE_UNIT, bl_stripe_unit);
// stripe count
std::ostringstream oss_stripe_count;
oss_stripe_count << m_layout.fl_stripe_count;
bufferlist bl_stripe_count;
bl_stripe_count.append(oss_stripe_count.str());
writeOp.setxattr(XATTR_LAYOUT_STRIPE_COUNT, bl_stripe_count);
// size
std::ostringstream oss_size;
oss_size << (isFileSizeAbsolute?size:0);
bufferlist bl_size;
bl_size.append(oss_size.str());
writeOp.setxattr(XATTR_SIZE, bl_size);
// effectively change attributes
std::string firstObjOid = getObjectId(soid, 0);
int rc = m_ioCtx.operate(firstObjOid, &writeOp);
// in case of error (but no EEXIST which would mean the object existed), return
if (rc && -EEXIST != rc) return rc;
// Otherwise open the object
uint64_t fileSize = size;
return openStripedObjectForWrite(soid, layout, &fileSize, lockCookie, isFileSizeAbsolute);
}
static void striper_truncate_aio_req_complete(rados_striper_multi_completion_t c, void *arg)
{
auto cdata = ceph::ref_t<TruncateCompletionData>(static_cast<TruncateCompletionData*>(arg), false);
libradosstriper::MultiAioCompletionImpl *comp =
reinterpret_cast<libradosstriper::MultiAioCompletionImpl*>(c);
if (0 == comp->rval) {
// all went fine, change size in the external attributes
std::ostringstream oss;
oss << cdata->m_size;
bufferlist bl;
bl.append(oss.str());
cdata->m_striper->setxattr(cdata->m_soid, XATTR_SIZE, bl);
}
}
int libradosstriper::RadosStriperImpl::truncate(const std::string& soid,
uint64_t original_size,
uint64_t size,
ceph_file_layout &layout)
{
auto cdata = ceph::make_ref<TruncateCompletionData>(this, soid, size);
libradosstriper::MultiAioCompletionImplPtr multi_completion{
new libradosstriper::MultiAioCompletionImpl, false};
multi_completion->set_complete_callback(cdata->get() /* create ref! */, striper_truncate_aio_req_complete);
// call asynchrous version of truncate
int rc = aio_truncate(soid, multi_completion, original_size, size, layout);
// wait for completion of the truncation
multi_completion->finish_adding_requests();
multi_completion->wait_for_complete_and_cb();
// return result
if (rc == 0) {
rc = multi_completion->get_return_value();
}
return rc;
}
int libradosstriper::RadosStriperImpl::aio_truncate
(const std::string& soid,
libradosstriper::MultiAioCompletionImplPtr multi_completion,
uint64_t original_size,
uint64_t size,
ceph_file_layout &layout)
{
// handle the underlying rados objects. 3 cases here :
// -- the objects belonging to object sets entirely located
// before the truncation are unchanged
// -- the objects belonging to the object set where the
// truncation took place are truncated or removed
// -- the objects belonging to object sets entirely located
// after the truncation are removed
// Note that we do it backward and that we change the size in
// the external attributes only at the end. This make sure that
// no rados object stays behind if we remove the striped object
// after a truncation has failed
uint64_t trunc_objectsetno = size / layout.fl_object_size / layout.fl_stripe_count;
uint64_t last_objectsetno = original_size / layout.fl_object_size / layout.fl_stripe_count;
bool exists = false;
for (int64_t objectno = (last_objectsetno+1) * layout.fl_stripe_count-1;
objectno >= (int64_t)((trunc_objectsetno + 1) * layout.fl_stripe_count);
objectno--) {
// if no object existed so far, check object existence
if (!exists) {
uint64_t nb_full_object_set = objectno / layout.fl_stripe_count;
uint64_t object_index_in_set = objectno % layout.fl_stripe_count;
uint64_t set_start_off = nb_full_object_set * layout.fl_object_size * layout.fl_stripe_count;
uint64_t object_start_off = set_start_off + object_index_in_set * layout.fl_stripe_unit;
exists = (original_size > object_start_off);
}
if (exists) {
// remove asynchronously
multi_completion->add_request();
auto data = ceph::make_ref<RadosRemoveCompletionData>(multi_completion, cct());
librados::AioCompletion *rados_completion =
librados::Rados::aio_create_completion(data->get() /* create ref! */,
rados_req_remove_complete);
int rc = m_ioCtx.aio_remove(getObjectId(soid, objectno), rados_completion);
rados_completion->release();
// in case the object did not exist, it means we had a sparse file, all is fine
if (rc && rc != -ENOENT) return rc;
}
}
for (int64_t objectno = ((trunc_objectsetno + 1) * layout.fl_stripe_count) -1;
objectno >= (int64_t)(trunc_objectsetno * layout.fl_stripe_count);
objectno--) {
// if no object existed so far, check object existence
if (!exists) {
uint64_t object_start_off = ((objectno / layout.fl_stripe_count) * layout.fl_object_size) +
((objectno % layout.fl_stripe_count) * layout.fl_stripe_unit);
exists = (original_size > object_start_off);
}
if (exists) {
// truncate
file_layout_t l;
l.from_legacy(layout);
uint64_t new_object_size = Striper::object_truncate_size(cct(), &l, objectno, size);
int rc;
if (new_object_size > 0 or 0 == objectno) {
// trunc is synchronous as there is no async version
// but note that only a single object will be truncated
// reducing the overload to a fixed amount
rc = m_ioCtx.trunc(getObjectId(soid, objectno), new_object_size);
} else {
// removes are asynchronous in order to speed up truncations of big files
multi_completion->add_request();
auto data = ceph::make_ref<RadosRemoveCompletionData>(multi_completion, cct());
librados::AioCompletion *rados_completion =
librados::Rados::aio_create_completion(data->get() /* create ref! */,
rados_req_remove_complete);
rc = m_ioCtx.aio_remove(getObjectId(soid, objectno), rados_completion);
rados_completion->release();
}
// in case the object did not exist, it means we had a sparse file, all is fine
if (rc && rc != -ENOENT) return rc;
}
}
return 0;
}
int libradosstriper::RadosStriperImpl::grow(const std::string& soid,
uint64_t original_size,
uint64_t size,
ceph_file_layout &layout)
{
// handle the underlying rados objects. As we support sparse objects,
// we only have to change the size in the external attributes
std::ostringstream oss;
oss << size;
bufferlist bl;
bl.append(oss.str());
int rc = m_ioCtx.setxattr(getObjectId(soid, 0), XATTR_SIZE, bl);
return rc;
}
std::string libradosstriper::RadosStriperImpl::getUUID()
{
struct uuid_d uuid;
uuid.generate_random();
char suuid[37];
uuid.print(suuid);
return std::string(suuid);
}
| 58,551 | 35.345127 | 121 |
cc
|
null |
ceph-main/src/libradosstriper/RadosStriperImpl.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2014 Sebastien Ponce <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_LIBRADOSSTRIPER_RADOSSTRIPERIMPL_H
#define CEPH_LIBRADOSSTRIPER_RADOSSTRIPERIMPL_H
#include <string>
#include <boost/intrusive_ptr.hpp>
#include "include/rados/librados.h"
#include "include/rados/librados.hpp"
#include "include/radosstriper/libradosstriper.h"
#include "include/radosstriper/libradosstriper.hpp"
#include "MultiAioCompletionImpl.h"
#include "librados/IoCtxImpl.h"
#include "librados/AioCompletionImpl.h"
#include "common/RefCountedObj.h"
#include "common/ceph_context.h"
namespace libradosstriper {
using MultiAioCompletionImplPtr =
boost::intrusive_ptr<MultiAioCompletionImpl>;
struct RadosStriperImpl {
/**
* exception wrapper around an error code
*/
struct ErrorCode {
ErrorCode(int error) : m_code(error) {};
int m_code;
};
/*
* Constructor
* @param cluster_name name of the cluster, can be NULL
* @param client_name has 2 meanings depending on cluster_name
* - if cluster_name is null : this is the client id
* - else : this is the full client name in format type.id
*/
RadosStriperImpl(librados::IoCtx& ioctx, librados::IoCtxImpl *ioctx_impl);
/// Destructor
~RadosStriperImpl() {};
// configuration
int setObjectLayoutStripeUnit(unsigned int stripe_unit);
int setObjectLayoutStripeCount(unsigned int stripe_count);
int setObjectLayoutObjectSize(unsigned int object_size);
// xattrs
int getxattr(const object_t& soid, const char *name, bufferlist& bl);
int setxattr(const object_t& soid, const char *name, bufferlist& bl);
int getxattrs(const object_t& soid, std::map<std::string, bufferlist>& attrset);
int rmxattr(const object_t& soid, const char *name);
// io
int write(const std::string& soid, const bufferlist& bl, size_t len, uint64_t off);
int append(const std::string& soid, const bufferlist& bl, size_t len);
int write_full(const std::string& soid, const bufferlist& bl);
int read(const std::string& soid, bufferlist* pbl, size_t len, uint64_t off);
// asynchronous io
int aio_write(const std::string& soid, librados::AioCompletionImpl *c,
const bufferlist& bl, size_t len, uint64_t off);
int aio_append(const std::string& soid, librados::AioCompletionImpl *c,
const bufferlist& bl, size_t len);
int aio_write_full(const std::string& soid, librados::AioCompletionImpl *c,
const bufferlist& bl);
int aio_read(const std::string& soid, librados::AioCompletionImpl *c,
bufferlist* pbl, size_t len, uint64_t off);
int aio_read(const std::string& soid, librados::AioCompletionImpl *c,
char* buf, size_t len, uint64_t off);
int aio_flush();
// stat, deletion and truncation
int stat(const std::string& soid, uint64_t *psize, time_t *pmtime);
int stat2(const std::string& soid, uint64_t *psize, struct timespec *pts);
template<class TimeType>
struct StatFunction {
typedef int (librados::IoCtxImpl::*Type) (const object_t& oid,
librados::AioCompletionImpl *c,
uint64_t *psize, TimeType *pmtime);
};
template<class TimeType>
int aio_generic_stat(const std::string& soid, librados::AioCompletionImpl *c,
uint64_t *psize, TimeType *pmtime,
typename StatFunction<TimeType>::Type statFunction);
int aio_stat(const std::string& soid, librados::AioCompletionImpl *c,
uint64_t *psize, time_t *pmtime);
int aio_stat2(const std::string& soid, librados::AioCompletionImpl *c,
uint64_t *psize, struct timespec *pts);
int remove(const std::string& soid, int flags=0);
int trunc(const std::string& soid, uint64_t size);
// asynchronous remove. Note that the removal is not 100% parallelized :
// the removal of the first rados object of the striped object will be
// done via a syncrhonous call after the completion of all other removals.
// These are done asynchrounously and in parallel
int aio_remove(const std::string& soid, librados::AioCompletionImpl *c, int flags=0);
// reference counting
void get() {
std::lock_guard l{lock};
m_refCnt ++ ;
}
void put() {
bool deleteme = false;
lock.lock();
m_refCnt --;
if (m_refCnt == 0)
deleteme = true;
cond.notify_all();
lock.unlock();
if (deleteme)
delete this;
}
// objectid manipulation
std::string getObjectId(const object_t& soid, long long unsigned objectno);
// opening and closing of striped objects
void unlockObject(const std::string& soid,
const std::string& lockCookie);
void aio_unlockObject(const std::string& soid,
const std::string& lockCookie,
librados::AioCompletion *c);
// internal versions of IO method
int write_in_open_object(const std::string& soid,
const ceph_file_layout& layout,
const std::string& lockCookie,
const bufferlist& bl,
size_t len,
uint64_t off);
int aio_write_in_open_object(const std::string& soid,
librados::AioCompletionImpl *c,
const ceph_file_layout& layout,
const std::string& lockCookie,
const bufferlist& bl,
size_t len,
uint64_t off);
int internal_aio_write(const std::string& soid,
MultiAioCompletionImplPtr c,
const bufferlist& bl,
size_t len,
uint64_t off,
const ceph_file_layout& layout);
int extract_uint32_attr(std::map<std::string, bufferlist> &attrs,
const std::string& key,
ceph_le32 *value);
int extract_sizet_attr(std::map<std::string, bufferlist> &attrs,
const std::string& key,
size_t *value);
int internal_get_layout_and_size(const std::string& oid,
ceph_file_layout *layout,
uint64_t *size);
int internal_aio_remove(const std::string& soid,
MultiAioCompletionImplPtr multi_completion,
int flags=0);
/**
* opens an existing striped object and takes a shared lock on it
* @return 0 if everything is ok and the lock was taken. -errcode otherwise
* In particulae, if the striped object does not exists, -ENOENT is returned
* In case the return code in not 0, no lock is taken
*/
int openStripedObjectForRead(const std::string& soid,
ceph_file_layout *layout,
uint64_t *size,
std::string *lockCookie);
/**
* opens an existing striped object, takes a shared lock on it
* and sets its size to the size it will have after the write.
* In case the striped object does not exists, it will create it by
* calling createOrOpenStripedObject.
* @param layout this is filled with the layout of the file
* @param size new size of the file (together with isFileSizeAbsolute)
* In case of success, this is filled with the size of the file before the opening
* @param isFileSizeAbsolute if false, this means that the given size should
* be added to the current file size (append mode)
* @return 0 if everything is ok and the lock was taken. -errcode otherwise
* In case the return code in not 0, no lock is taken
*/
int openStripedObjectForWrite(const std::string& soid,
ceph_file_layout *layout,
uint64_t *size,
std::string *lockCookie,
bool isFileSizeAbsolute);
/**
* creates an empty striped object with the given size and opens it calling
* openStripedObjectForWrite, which implies taking a shared lock on it
* Also deals with the cases where the object was created in the mean time
* @param isFileSizeAbsolute if false, this means that the given size should
* be added to the current file size (append mode). This of course only makes
* sense in case the striped object already exists
* @return 0 if everything is ok and the lock was taken. -errcode otherwise
* In case the return code in not 0, no lock is taken
*/
int createAndOpenStripedObject(const std::string& soid,
ceph_file_layout *layout,
uint64_t size,
std::string *lockCookie,
bool isFileSizeAbsolute);
/**
* truncates an object synchronously. Should only be called with size < original_size
*/
int truncate(const std::string& soid,
uint64_t original_size,
uint64_t size,
ceph_file_layout &layout);
/**
* truncates an object asynchronously. Should only be called with size < original_size
* note that the method is not 100% asynchronous, only the removal of rados objects
* is, the (potential) truncation of the rados object residing just at the truncation
* point is synchronous for lack of asynchronous truncation in the rados layer
*/
int aio_truncate(const std::string& soid,
MultiAioCompletionImplPtr c,
uint64_t original_size,
uint64_t size,
ceph_file_layout &layout);
/**
* grows an object (adding 0s). Should only be called with size > original_size
*/
int grow(const std::string& soid,
uint64_t original_size,
uint64_t size,
ceph_file_layout &layout);
/**
* creates a unique identifier
*/
static std::string getUUID();
CephContext *cct() {
return (CephContext*)m_radosCluster.cct();
}
// reference counting
std::condition_variable cond;
int m_refCnt;
std::mutex lock;
// Context
librados::Rados m_radosCluster;
librados::IoCtx m_ioCtx;
librados::IoCtxImpl *m_ioCtxImpl;
// Default layout
ceph_file_layout m_layout;
};
}
#endif
| 9,688 | 33.978339 | 88 |
h
|
null |
ceph-main/src/libradosstriper/libradosstriper.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2014 Sebastien Ponce <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include <errno.h>
#include "libradosstriper/RadosStriperImpl.h"
#include "libradosstriper/MultiAioCompletionImpl.h"
#include "include/types.h"
#include "include/radosstriper/libradosstriper.h"
#include "include/radosstriper/libradosstriper.hpp"
#include "librados/RadosXattrIter.h"
/*
* This file implements the rados striper API.
* There are 2 flavours of it :
* - the C API, found in include/rados/libradosstriper.h
* - the C++ API, found in include/rados/libradosstriper.hpp
*/
///////////////////////////// C++ API //////////////////////////////
libradosstriper::MultiAioCompletion::~MultiAioCompletion()
{
ceph_assert(pc->ref == 1);
pc->put();
}
int libradosstriper::MultiAioCompletion::set_complete_callback
(void *cb_arg, rados_callback_t cb)
{
MultiAioCompletionImpl *c = (MultiAioCompletionImpl *)pc;
return c->set_complete_callback(cb_arg, cb);
}
int libradosstriper::MultiAioCompletion::set_safe_callback
(void *cb_arg, rados_callback_t cb)
{
MultiAioCompletionImpl *c = (MultiAioCompletionImpl *)pc;
return c->set_safe_callback(cb_arg, cb);
}
void libradosstriper::MultiAioCompletion::wait_for_complete()
{
MultiAioCompletionImpl *c = (MultiAioCompletionImpl *)pc;
c->wait_for_complete();
}
void libradosstriper::MultiAioCompletion::wait_for_safe()
{
MultiAioCompletionImpl *c = (MultiAioCompletionImpl *)pc;
c->wait_for_safe();
}
bool libradosstriper::MultiAioCompletion::is_complete()
{
MultiAioCompletionImpl *c = (MultiAioCompletionImpl *)pc;
return c->is_complete();
}
bool libradosstriper::MultiAioCompletion::is_safe()
{
MultiAioCompletionImpl *c = (MultiAioCompletionImpl *)pc;
return c->is_safe();
}
void libradosstriper::MultiAioCompletion::wait_for_complete_and_cb()
{
MultiAioCompletionImpl *c = (MultiAioCompletionImpl *)pc;
c->wait_for_complete_and_cb();
}
void libradosstriper::MultiAioCompletion::MultiAioCompletion::wait_for_safe_and_cb()
{
MultiAioCompletionImpl *c = (MultiAioCompletionImpl *)pc;
c->wait_for_safe_and_cb();
}
bool libradosstriper::MultiAioCompletion::is_complete_and_cb()
{
MultiAioCompletionImpl *c = (MultiAioCompletionImpl *)pc;
return c->is_complete_and_cb();
}
bool libradosstriper::MultiAioCompletion::is_safe_and_cb()
{
MultiAioCompletionImpl *c = (MultiAioCompletionImpl *)pc;
return c->is_safe_and_cb();
}
int libradosstriper::MultiAioCompletion::get_return_value()
{
MultiAioCompletionImpl *c = (MultiAioCompletionImpl *)pc;
return c->get_return_value();
}
void libradosstriper::MultiAioCompletion::release()
{
MultiAioCompletionImpl *c = (MultiAioCompletionImpl *)pc;
c->put();
delete this;
}
libradosstriper::RadosStriper::RadosStriper() :
rados_striper_impl(0)
{
}
void libradosstriper::RadosStriper::to_rados_striper_t(RadosStriper &striper, rados_striper_t *s)
{
*s = (rados_striper_t)striper.rados_striper_impl;
striper.rados_striper_impl->get();
}
libradosstriper::RadosStriper::RadosStriper(const RadosStriper& rs)
{
rados_striper_impl = rs.rados_striper_impl;
if (rados_striper_impl) {
rados_striper_impl->get();
}
}
libradosstriper::RadosStriper& libradosstriper::RadosStriper::operator=(const RadosStriper& rs)
{
if (rados_striper_impl)
rados_striper_impl->put();
rados_striper_impl = rs.rados_striper_impl;
rados_striper_impl->get();
return *this;
}
libradosstriper::RadosStriper::~RadosStriper()
{
if (rados_striper_impl)
rados_striper_impl->put();
rados_striper_impl = 0;
}
int libradosstriper::RadosStriper::striper_create(librados::IoCtx& ioctx,
RadosStriper *striper)
{
try {
striper->rados_striper_impl = new libradosstriper::RadosStriperImpl(ioctx, ioctx.io_ctx_impl);
striper->rados_striper_impl->get();
} catch (int rc) {
return rc;
}
return 0;
}
int libradosstriper::RadosStriper::set_object_layout_stripe_unit
(unsigned int stripe_unit)
{
return rados_striper_impl->setObjectLayoutStripeUnit(stripe_unit);
}
int libradosstriper::RadosStriper::set_object_layout_stripe_count
(unsigned int stripe_count)
{
return rados_striper_impl->setObjectLayoutStripeCount(stripe_count);
}
int libradosstriper::RadosStriper::set_object_layout_object_size
(unsigned int object_size)
{
return rados_striper_impl->setObjectLayoutObjectSize(object_size);
}
int libradosstriper::RadosStriper::getxattr(const std::string& oid, const char *name, bufferlist& bl)
{
return rados_striper_impl->getxattr(oid, name, bl);
}
int libradosstriper::RadosStriper::setxattr(const std::string& oid, const char *name, bufferlist& bl)
{
return rados_striper_impl->setxattr(oid, name, bl);
}
int libradosstriper::RadosStriper::rmxattr(const std::string& oid, const char *name)
{
return rados_striper_impl->rmxattr(oid, name);
}
int libradosstriper::RadosStriper::getxattrs(const std::string& oid,
std::map<std::string, bufferlist>& attrset)
{
return rados_striper_impl->getxattrs(oid, attrset);
}
int libradosstriper::RadosStriper::write(const std::string& soid,
const bufferlist& bl,
size_t len,
uint64_t off)
{
return rados_striper_impl->write(soid, bl, len, off);
}
int libradosstriper::RadosStriper::write_full(const std::string& soid,
const bufferlist& bl)
{
return rados_striper_impl->write_full(soid, bl);
}
int libradosstriper::RadosStriper::append(const std::string& soid,
const bufferlist& bl,
size_t len)
{
return rados_striper_impl->append(soid, bl, len);
}
int libradosstriper::RadosStriper::aio_write(const std::string& soid,
librados::AioCompletion *c,
const bufferlist& bl,
size_t len,
uint64_t off)
{
return rados_striper_impl->aio_write(soid, c->pc, bl, len, off);
}
int libradosstriper::RadosStriper::aio_write_full(const std::string& soid,
librados::AioCompletion *c,
const bufferlist& bl)
{
return rados_striper_impl->aio_write_full(soid, c->pc, bl);
}
int libradosstriper::RadosStriper::aio_append(const std::string& soid,
librados::AioCompletion *c,
const bufferlist& bl,
size_t len)
{
return rados_striper_impl->aio_append(soid, c->pc, bl, len);
}
int libradosstriper::RadosStriper::read(const std::string& soid,
bufferlist* bl,
size_t len,
uint64_t off)
{
bl->clear();
bl->push_back(buffer::create(len));
return rados_striper_impl->read(soid, bl, len, off);
}
int libradosstriper::RadosStriper::aio_read(const std::string& soid,
librados::AioCompletion *c,
bufferlist* bl,
size_t len,
uint64_t off)
{
bl->clear();
bl->push_back(buffer::create(len));
return rados_striper_impl->aio_read(soid, c->pc, bl, len, off);
}
int libradosstriper::RadosStriper::stat(const std::string& soid, uint64_t *psize, time_t *pmtime)
{
return rados_striper_impl->stat(soid, psize, pmtime);
}
int libradosstriper::RadosStriper::aio_stat(const std::string& soid,
librados::AioCompletion *c,
uint64_t *psize,
time_t *pmtime)
{
return rados_striper_impl->aio_stat(soid, c->pc, psize, pmtime);
}
int libradosstriper::RadosStriper::stat2(const std::string& soid, uint64_t *psize, struct timespec *pts)
{
return rados_striper_impl->stat2(soid, psize, pts);
}
int libradosstriper::RadosStriper::aio_stat2(const std::string& soid,
librados::AioCompletion *c,
uint64_t *psize,
struct timespec *pts)
{
return rados_striper_impl->aio_stat2(soid, c->pc, psize, pts);
}
int libradosstriper::RadosStriper::remove(const std::string& soid)
{
return rados_striper_impl->remove(soid);
}
int libradosstriper::RadosStriper::aio_remove(const std::string& soid,
librados::AioCompletion *c)
{
return rados_striper_impl->aio_remove(soid, c->pc);
}
int libradosstriper::RadosStriper::remove(const std::string& soid, int flags)
{
return rados_striper_impl->remove(soid, flags);
}
int libradosstriper::RadosStriper::aio_remove(const std::string& soid,
librados::AioCompletion *c,
int flags)
{
return rados_striper_impl->aio_remove(soid, c->pc, flags);
}
int libradosstriper::RadosStriper::trunc(const std::string& soid, uint64_t size)
{
return rados_striper_impl->trunc(soid, size);
}
int libradosstriper::RadosStriper::aio_flush()
{
return rados_striper_impl->aio_flush();
}
libradosstriper::MultiAioCompletion* libradosstriper::RadosStriper::multi_aio_create_completion()
{
MultiAioCompletionImpl *c = new MultiAioCompletionImpl;
return new MultiAioCompletion(c);
}
libradosstriper::MultiAioCompletion*
libradosstriper::RadosStriper::multi_aio_create_completion(void *cb_arg,
librados::callback_t cb_complete,
librados::callback_t cb_safe)
{
MultiAioCompletionImpl *c;
int r = rados_striper_multi_aio_create_completion(cb_arg, cb_complete, cb_safe, (void**)&c);
ceph_assert(r == 0);
return new MultiAioCompletion(c);
}
///////////////////////////// C API //////////////////////////////
extern "C" int rados_striper_create(rados_ioctx_t ioctx,
rados_striper_t *striper)
{
librados::IoCtx ctx;
librados::IoCtx::from_rados_ioctx_t(ioctx, ctx);
libradosstriper::RadosStriper striperp;
int rc = libradosstriper::RadosStriper::striper_create(ctx, &striperp);
if (0 == rc)
libradosstriper::RadosStriper::to_rados_striper_t(striperp, striper);
return rc;
}
extern "C" void rados_striper_destroy(rados_striper_t striper)
{
libradosstriper::RadosStriperImpl *impl = (libradosstriper::RadosStriperImpl *)striper;
impl->put();
}
extern "C" int rados_striper_set_object_layout_stripe_unit(rados_striper_t striper,
unsigned int stripe_unit)
{
libradosstriper::RadosStriperImpl *impl = (libradosstriper::RadosStriperImpl *)striper;
return impl->setObjectLayoutStripeUnit(stripe_unit);
}
extern "C" int rados_striper_set_object_layout_stripe_count(rados_striper_t striper,
unsigned int stripe_count)
{
libradosstriper::RadosStriperImpl *impl = (libradosstriper::RadosStriperImpl *)striper;
return impl->setObjectLayoutStripeCount(stripe_count);
}
extern "C" int rados_striper_set_object_layout_object_size(rados_striper_t striper,
unsigned int object_size)
{
libradosstriper::RadosStriperImpl *impl = (libradosstriper::RadosStriperImpl *)striper;
return impl->setObjectLayoutObjectSize(object_size);
}
extern "C" int rados_striper_write(rados_striper_t striper,
const char *soid,
const char *buf,
size_t len,
uint64_t off)
{
libradosstriper::RadosStriperImpl *impl = (libradosstriper::RadosStriperImpl *)striper;
bufferlist bl;
bl.append(buf, len);
return impl->write(soid, bl, len, off);
}
extern "C" int rados_striper_write_full(rados_striper_t striper,
const char *soid,
const char *buf,
size_t len)
{
libradosstriper::RadosStriperImpl *impl = (libradosstriper::RadosStriperImpl *)striper;
bufferlist bl;
bl.append(buf, len);
return impl->write_full(soid, bl);
}
extern "C" int rados_striper_append(rados_striper_t striper,
const char *soid,
const char *buf,
size_t len)
{
libradosstriper::RadosStriperImpl *impl = (libradosstriper::RadosStriperImpl *)striper;
bufferlist bl;
bl.append(buf, len);
return impl->append(soid, bl, len);
}
extern "C" int rados_striper_read(rados_striper_t striper,
const char *soid,
char *buf,
size_t len,
uint64_t off)
{
libradosstriper::RadosStriperImpl *impl = (libradosstriper::RadosStriperImpl *)striper;
bufferlist bl;
bufferptr bp = buffer::create_static(len, buf);
bl.push_back(bp);
int ret = impl->read(soid, &bl, len, off);
if (ret >= 0) {
if (bl.length() > len)
return -ERANGE;
if (!bl.is_provided_buffer(buf))
bl.begin().copy(bl.length(), buf);
ret = bl.length(); // hrm :/
}
return ret;
}
extern "C" int rados_striper_remove(rados_striper_t striper, const char* soid)
{
libradosstriper::RadosStriperImpl *impl = (libradosstriper::RadosStriperImpl *)striper;
return impl->remove(soid);
}
extern "C" int rados_striper_trunc(rados_striper_t striper, const char* soid, uint64_t size)
{
libradosstriper::RadosStriperImpl *impl = (libradosstriper::RadosStriperImpl *)striper;
return impl->trunc(soid, size);
}
extern "C" int rados_striper_getxattr(rados_striper_t striper,
const char *oid,
const char *name,
char *buf,
size_t len)
{
libradosstriper::RadosStriperImpl *impl = (libradosstriper::RadosStriperImpl *)striper;
object_t obj(oid);
bufferlist bl;
int ret = impl->getxattr(oid, name, bl);
if (ret >= 0) {
if (bl.length() > len)
return -ERANGE;
bl.begin().copy(bl.length(), buf);
ret = bl.length();
}
return ret;
}
extern "C" int rados_striper_setxattr(rados_striper_t striper,
const char *oid,
const char *name,
const char *buf,
size_t len)
{
libradosstriper::RadosStriperImpl *impl = (libradosstriper::RadosStriperImpl *)striper;
object_t obj(oid);
bufferlist bl;
bl.append(buf, len);
return impl->setxattr(obj, name, bl);
}
extern "C" int rados_striper_rmxattr(rados_striper_t striper,
const char *oid,
const char *name)
{
libradosstriper::RadosStriperImpl *impl = (libradosstriper::RadosStriperImpl *)striper;
object_t obj(oid);
return impl->rmxattr(obj, name);
}
extern "C" int rados_striper_getxattrs(rados_striper_t striper,
const char *oid,
rados_xattrs_iter_t *iter)
{
libradosstriper::RadosStriperImpl *impl = (libradosstriper::RadosStriperImpl *)striper;
object_t obj(oid);
librados::RadosXattrsIter *it = new librados::RadosXattrsIter();
if (!it)
return -ENOMEM;
int ret = impl->getxattrs(obj, it->attrset);
if (ret) {
delete it;
return ret;
}
it->i = it->attrset.begin();
*iter = it;
return 0;
}
extern "C" int rados_striper_getxattrs_next(rados_xattrs_iter_t iter,
const char **name,
const char **val,
size_t *len)
{
return rados_getxattrs_next(iter, name, val, len);
}
extern "C" void rados_striper_getxattrs_end(rados_xattrs_iter_t iter)
{
return rados_getxattrs_end(iter);
}
extern "C" int rados_striper_stat(rados_striper_t striper,
const char* soid,
uint64_t *psize,
time_t *pmtime)
{
libradosstriper::RadosStriperImpl *impl = (libradosstriper::RadosStriperImpl *)striper;
return impl->stat(soid, psize, pmtime);
}
extern "C" int rados_striper_stat2(rados_striper_t striper,
const char* soid,
uint64_t *psize,
struct timespec *pmtime)
{
libradosstriper::RadosStriperImpl *impl = (libradosstriper::RadosStriperImpl *)striper;
return impl->stat2(soid, psize, pmtime);
}
extern "C" int rados_striper_multi_aio_create_completion(void *cb_arg,
rados_callback_t cb_complete,
rados_callback_t cb_safe,
rados_striper_multi_completion_t *pc)
{
libradosstriper::MultiAioCompletionImpl *c = new libradosstriper::MultiAioCompletionImpl;
if (cb_complete)
c->set_complete_callback(cb_arg, cb_complete);
if (cb_safe)
c->set_safe_callback(cb_arg, cb_safe);
*pc = c;
return 0;
}
extern "C" void rados_striper_multi_aio_wait_for_complete(rados_striper_multi_completion_t c)
{
((libradosstriper::MultiAioCompletionImpl*)c)->wait_for_complete();
}
extern "C" void rados_striper_multi_aio_wait_for_safe(rados_striper_multi_completion_t c)
{
((libradosstriper::MultiAioCompletionImpl*)c)->wait_for_safe();
}
extern "C" int rados_striper_multi_aio_is_complete(rados_striper_multi_completion_t c)
{
return ((libradosstriper::MultiAioCompletionImpl*)c)->is_complete();
}
extern "C" int rados_striper_multi_aio_is_safe(rados_striper_multi_completion_t c)
{
return ((libradosstriper::MultiAioCompletionImpl*)c)->is_safe();
}
extern "C" void rados_striper_multi_aio_wait_for_complete_and_cb(rados_striper_multi_completion_t c)
{
((libradosstriper::MultiAioCompletionImpl*)c)->wait_for_complete_and_cb();
}
extern "C" void rados_striper_multi_aio_wait_for_safe_and_cb(rados_striper_multi_completion_t c)
{
((libradosstriper::MultiAioCompletionImpl*)c)->wait_for_safe_and_cb();
}
extern "C" int rados_striper_multi_aio_is_complete_and_cb(rados_striper_multi_completion_t c)
{
return ((libradosstriper::MultiAioCompletionImpl*)c)->is_complete_and_cb();
}
extern "C" int rados_striper_multi_aio_is_safe_and_cb(rados_striper_multi_completion_t c)
{
return ((libradosstriper::MultiAioCompletionImpl*)c)->is_safe_and_cb();
}
extern "C" int rados_striper_multi_aio_get_return_value(rados_striper_multi_completion_t c)
{
return ((libradosstriper::MultiAioCompletionImpl*)c)->get_return_value();
}
extern "C" void rados_striper_multi_aio_release(rados_striper_multi_completion_t c)
{
((libradosstriper::MultiAioCompletionImpl*)c)->put();
}
extern "C" int rados_striper_aio_write(rados_striper_t striper,
const char* soid,
rados_completion_t completion,
const char *buf,
size_t len,
uint64_t off)
{
libradosstriper::RadosStriperImpl *impl = (libradosstriper::RadosStriperImpl *)striper;
bufferlist bl;
bl.append(buf, len);
return impl->aio_write(soid, (librados::AioCompletionImpl*)completion, bl, len, off);
}
extern "C" int rados_striper_aio_append(rados_striper_t striper,
const char* soid,
rados_completion_t completion,
const char *buf,
size_t len)
{
libradosstriper::RadosStriperImpl *impl = (libradosstriper::RadosStriperImpl *)striper;
bufferlist bl;
bl.append(buf, len);
return impl->aio_append(soid, (librados::AioCompletionImpl*)completion, bl, len);
}
extern "C" int rados_striper_aio_write_full(rados_striper_t striper,
const char* soid,
rados_completion_t completion,
const char *buf,
size_t len)
{
libradosstriper::RadosStriperImpl *impl = (libradosstriper::RadosStriperImpl *)striper;
bufferlist bl;
bl.append(buf, len);
return impl->aio_write_full(soid, (librados::AioCompletionImpl*)completion, bl);
}
extern "C" int rados_striper_aio_read(rados_striper_t striper,
const char *soid,
rados_completion_t completion,
char *buf,
size_t len,
uint64_t off)
{
libradosstriper::RadosStriperImpl *impl = (libradosstriper::RadosStriperImpl *)striper;
return impl->aio_read(soid, (librados::AioCompletionImpl*)completion, buf, len, off);
}
extern "C" int rados_striper_aio_remove(rados_striper_t striper,
const char* soid,
rados_completion_t completion)
{
libradosstriper::RadosStriperImpl *impl = (libradosstriper::RadosStriperImpl *)striper;
return impl->aio_remove(soid, (librados::AioCompletionImpl*)completion);
}
extern "C" void rados_striper_aio_flush(rados_striper_t striper)
{
libradosstriper::RadosStriperImpl *impl = (libradosstriper::RadosStriperImpl *)striper;
impl->aio_flush();
}
extern "C" int rados_striper_aio_stat(rados_striper_t striper,
const char* soid,
rados_completion_t completion,
uint64_t *psize,
time_t *pmtime)
{
libradosstriper::RadosStriperImpl *impl = (libradosstriper::RadosStriperImpl *)striper;
return impl->aio_stat(soid, (librados::AioCompletionImpl*)completion, psize, pmtime);
}
extern "C" int rados_striper_aio_stat2(rados_striper_t striper,
const char* soid,
rados_completion_t completion,
uint64_t *psize,
struct timespec *pmtime)
{
libradosstriper::RadosStriperImpl *impl = (libradosstriper::RadosStriperImpl *)striper;
return impl->aio_stat2(soid, (librados::AioCompletionImpl*)completion, psize, pmtime);
}
| 20,082 | 28.148041 | 104 |
cc
|
null |
ceph-main/src/librbd/AsioEngine.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/AsioEngine.h"
#include "include/Context.h"
#include "include/neorados/RADOS.hpp"
#include "include/rados/librados.hpp"
#include "common/dout.h"
#include "librbd/asio/ContextWQ.h"
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::AsioEngine: " \
<< this << " " << __func__ << ": "
namespace librbd {
AsioEngine::AsioEngine(std::shared_ptr<librados::Rados> rados)
: m_rados_api(std::make_shared<neorados::RADOS>(
neorados::RADOS::make_with_librados(*rados))),
m_cct(m_rados_api->cct()),
m_io_context(m_rados_api->get_io_context()),
m_api_strand(std::make_unique<boost::asio::io_context::strand>(
m_io_context)),
m_context_wq(std::make_unique<asio::ContextWQ>(m_cct, m_io_context)) {
ldout(m_cct, 20) << dendl;
auto rados_threads = m_cct->_conf.get_val<uint64_t>("librados_thread_count");
auto rbd_threads = m_cct->_conf.get_val<uint64_t>("rbd_op_threads");
if (rbd_threads > rados_threads) {
// inherit the librados thread count -- but increase it if librbd wants to
// utilize more threads
m_cct->_conf.set_val_or_die("librados_thread_count",
std::to_string(rbd_threads));
m_cct->_conf.apply_changes(nullptr);
}
}
AsioEngine::AsioEngine(librados::IoCtx& io_ctx)
: AsioEngine(std::make_shared<librados::Rados>(io_ctx)) {
}
AsioEngine::~AsioEngine() {
ldout(m_cct, 20) << dendl;
m_api_strand.reset();
}
void AsioEngine::dispatch(Context* ctx, int r) {
dispatch([ctx, r]() { ctx->complete(r); });
}
void AsioEngine::post(Context* ctx, int r) {
post([ctx, r]() { ctx->complete(r); });
}
} // namespace librbd
| 1,798 | 30.561404 | 79 |
cc
|
null |
ceph-main/src/librbd/AsioEngine.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_ASIO_ENGINE_H
#define CEPH_LIBRBD_ASIO_ENGINE_H
#include "include/common_fwd.h"
#include "include/rados/librados_fwd.hpp"
#include <memory>
#include <boost/asio/dispatch.hpp>
#include <boost/asio/io_context.hpp>
#include <boost/asio/io_context_strand.hpp>
#include <boost/asio/post.hpp>
struct Context;
namespace neorados { struct RADOS; }
namespace librbd {
namespace asio { struct ContextWQ; }
class AsioEngine {
public:
explicit AsioEngine(std::shared_ptr<librados::Rados> rados);
explicit AsioEngine(librados::IoCtx& io_ctx);
~AsioEngine();
AsioEngine(AsioEngine&&) = delete;
AsioEngine(const AsioEngine&) = delete;
AsioEngine& operator=(const AsioEngine&) = delete;
inline neorados::RADOS& get_rados_api() {
return *m_rados_api;
}
inline boost::asio::io_context& get_io_context() {
return m_io_context;
}
inline operator boost::asio::io_context&() {
return m_io_context;
}
using executor_type = boost::asio::io_context::executor_type;
inline executor_type get_executor() {
return m_io_context.get_executor();
}
inline boost::asio::io_context::strand& get_api_strand() {
// API client callbacks should never fire concurrently
return *m_api_strand;
}
inline asio::ContextWQ* get_work_queue() {
return m_context_wq.get();
}
template <typename T>
void dispatch(T&& t) {
boost::asio::dispatch(m_io_context, std::forward<T>(t));
}
void dispatch(Context* ctx, int r);
template <typename T>
void post(T&& t) {
boost::asio::post(m_io_context, std::forward<T>(t));
}
void post(Context* ctx, int r);
private:
std::shared_ptr<neorados::RADOS> m_rados_api;
CephContext* m_cct;
boost::asio::io_context& m_io_context;
std::unique_ptr<boost::asio::io_context::strand> m_api_strand;
std::unique_ptr<asio::ContextWQ> m_context_wq;
};
} // namespace librbd
#endif // CEPH_LIBRBD_ASIO_ENGINE_H
| 2,015 | 23.888889 | 70 |
h
|
null |
ceph-main/src/librbd/AsyncObjectThrottle.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/AsyncObjectThrottle.h"
#include "librbd/AsyncRequest.h"
#include "librbd/ImageCtx.h"
#include "librbd/Utils.h"
#include "librbd/asio/ContextWQ.h"
namespace librbd
{
template <typename T>
AsyncObjectThrottle<T>::AsyncObjectThrottle(
const AsyncRequest<T>* async_request, T &image_ctx,
const ContextFactory& context_factory, Context *ctx,
ProgressContext *prog_ctx, uint64_t object_no, uint64_t end_object_no)
: m_lock(ceph::make_mutex(
util::unique_lock_name("librbd::AsyncThrottle::m_lock", this))),
m_async_request(async_request), m_image_ctx(image_ctx),
m_context_factory(context_factory), m_ctx(ctx), m_prog_ctx(prog_ctx),
m_object_no(object_no), m_end_object_no(end_object_no), m_current_ops(0),
m_ret(0)
{
}
template <typename T>
void AsyncObjectThrottle<T>::start_ops(uint64_t max_concurrent) {
ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
bool complete;
{
std::lock_guard l{m_lock};
for (uint64_t i = 0; i < max_concurrent; ++i) {
start_next_op();
if (m_ret < 0 && m_current_ops == 0) {
break;
}
}
complete = (m_current_ops == 0);
}
if (complete) {
// avoid re-entrant callback
m_image_ctx.op_work_queue->queue(m_ctx, m_ret);
delete this;
}
}
template <typename T>
void AsyncObjectThrottle<T>::finish_op(int r) {
bool complete;
{
std::shared_lock owner_locker{m_image_ctx.owner_lock};
std::lock_guard locker{m_lock};
--m_current_ops;
if (r < 0 && r != -ENOENT && m_ret == 0) {
m_ret = r;
}
start_next_op();
complete = (m_current_ops == 0);
}
if (complete) {
m_ctx->complete(m_ret);
delete this;
}
}
template <typename T>
void AsyncObjectThrottle<T>::start_next_op() {
bool done = false;
while (!done) {
if (m_async_request != NULL && m_async_request->is_canceled() &&
m_ret == 0) {
// allow in-flight ops to complete, but don't start new ops
m_ret = -ERESTART;
return;
} else if (m_ret != 0 || m_object_no >= m_end_object_no) {
return;
}
uint64_t ono = m_object_no++;
C_AsyncObjectThrottle<T> *ctx = m_context_factory(*this, ono);
int r = ctx->send();
if (r < 0) {
m_ret = r;
delete ctx;
return;
} else if (r > 0) {
// op completed immediately
delete ctx;
} else {
++m_current_ops;
done = true;
}
if (m_prog_ctx != NULL) {
r = m_prog_ctx->update_progress(ono, m_end_object_no);
if (r < 0) {
m_ret = r;
}
}
}
}
} // namespace librbd
#ifndef TEST_F
template class librbd::AsyncObjectThrottle<librbd::ImageCtx>;
#endif
| 2,763 | 24.357798 | 77 |
cc
|
null |
ceph-main/src/librbd/AsyncObjectThrottle.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_ASYNC_OBJECT_THROTTLE_H
#define CEPH_LIBRBD_ASYNC_OBJECT_THROTTLE_H
#include "include/int_types.h"
#include "include/Context.h"
#include <boost/function.hpp>
namespace librbd
{
template <typename ImageCtxT> class AsyncRequest;
class ProgressContext;
struct ImageCtx;
class AsyncObjectThrottleFinisher {
public:
virtual ~AsyncObjectThrottleFinisher() {};
virtual void finish_op(int r) = 0;
};
template <typename ImageCtxT = ImageCtx>
class C_AsyncObjectThrottle : public Context {
public:
C_AsyncObjectThrottle(AsyncObjectThrottleFinisher &finisher,
ImageCtxT &image_ctx)
: m_image_ctx(image_ctx), m_finisher(finisher) {
}
virtual int send() = 0;
protected:
ImageCtxT &m_image_ctx;
void finish(int r) override {
m_finisher.finish_op(r);
}
private:
AsyncObjectThrottleFinisher &m_finisher;
};
template <typename ImageCtxT = ImageCtx>
class AsyncObjectThrottle : public AsyncObjectThrottleFinisher {
public:
typedef boost::function<
C_AsyncObjectThrottle<ImageCtxT>* (AsyncObjectThrottle&,
uint64_t)> ContextFactory;
AsyncObjectThrottle(const AsyncRequest<ImageCtxT> *async_request,
ImageCtxT &image_ctx,
const ContextFactory& context_factory, Context *ctx,
ProgressContext *prog_ctx, uint64_t object_no,
uint64_t end_object_no);
void start_ops(uint64_t max_concurrent);
void finish_op(int r) override;
private:
ceph::mutex m_lock;
const AsyncRequest<ImageCtxT> *m_async_request;
ImageCtxT &m_image_ctx;
ContextFactory m_context_factory;
Context *m_ctx;
ProgressContext *m_prog_ctx;
uint64_t m_object_no;
uint64_t m_end_object_no;
uint64_t m_current_ops;
int m_ret;
void start_next_op();
};
} // namespace librbd
extern template class librbd::AsyncObjectThrottle<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_ASYNC_OBJECT_THROTTLE_H
| 2,043 | 24.55 | 74 |
h
|
null |
ceph-main/src/librbd/AsyncRequest.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/AsyncRequest.h"
#include "librbd/ImageCtx.h"
#include "librbd/Utils.h"
#include "librbd/asio/ContextWQ.h"
namespace librbd
{
template <typename T>
AsyncRequest<T>::AsyncRequest(T &image_ctx, Context *on_finish)
: m_image_ctx(image_ctx), m_on_finish(on_finish), m_canceled(false),
m_xlist_item(this) {
ceph_assert(m_on_finish != NULL);
start_request();
}
template <typename T>
AsyncRequest<T>::~AsyncRequest() {
}
template <typename T>
void AsyncRequest<T>::async_complete(int r) {
m_image_ctx.op_work_queue->queue(create_callback_context(), r);
}
template <typename T>
librados::AioCompletion *AsyncRequest<T>::create_callback_completion() {
return util::create_rados_callback(this);
}
template <typename T>
Context *AsyncRequest<T>::create_callback_context() {
return util::create_context_callback(this);
}
template <typename T>
Context *AsyncRequest<T>::create_async_callback_context() {
return util::create_context_callback<AsyncRequest<T>,
&AsyncRequest<T>::async_complete>(this);
}
template <typename T>
void AsyncRequest<T>::start_request() {
std::lock_guard async_ops_locker{m_image_ctx.async_ops_lock};
m_image_ctx.async_requests.push_back(&m_xlist_item);
}
template <typename T>
void AsyncRequest<T>::finish_request() {
decltype(m_image_ctx.async_requests_waiters) waiters;
{
std::lock_guard async_ops_locker{m_image_ctx.async_ops_lock};
ceph_assert(m_xlist_item.remove_myself());
if (m_image_ctx.async_requests.empty()) {
waiters = std::move(m_image_ctx.async_requests_waiters);
}
}
for (auto ctx : waiters) {
ctx->complete(0);
}
}
} // namespace librbd
#ifndef TEST_F
template class librbd::AsyncRequest<librbd::ImageCtx>;
#endif
| 1,866 | 24.930556 | 79 |
cc
|
null |
ceph-main/src/librbd/AsyncRequest.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_ASYNC_REQUEST_H
#define CEPH_LIBRBD_ASYNC_REQUEST_H
#include "include/Context.h"
#include "include/rados/librados.hpp"
#include "include/xlist.h"
#include "include/compat.h"
namespace librbd {
class ImageCtx;
template <typename ImageCtxT = ImageCtx>
class AsyncRequest
{
public:
AsyncRequest(ImageCtxT &image_ctx, Context *on_finish);
virtual ~AsyncRequest();
void complete(int r) {
if (should_complete(r)) {
r = filter_return_code(r);
finish_and_destroy(r);
}
}
virtual void send() = 0;
inline bool is_canceled() const {
return m_canceled;
}
inline void cancel() {
m_canceled = true;
}
protected:
ImageCtxT &m_image_ctx;
librados::AioCompletion *create_callback_completion();
Context *create_callback_context();
Context *create_async_callback_context();
void async_complete(int r);
virtual bool should_complete(int r) = 0;
virtual int filter_return_code(int r) const {
return r;
}
// NOTE: temporary until converted to new state machine format
virtual void finish_and_destroy(int r) {
finish(r);
delete this;
}
virtual void finish(int r) {
finish_request();
m_on_finish->complete(r);
}
private:
Context *m_on_finish;
bool m_canceled;
typename xlist<AsyncRequest<ImageCtxT> *>::item m_xlist_item;
void start_request();
void finish_request();
};
} // namespace librbd
extern template class librbd::AsyncRequest<librbd::ImageCtx>;
#endif //CEPH_LIBRBD_ASYNC_REQUEST_H
| 1,604 | 19.844156 | 70 |
h
|
null |
ceph-main/src/librbd/BlockGuard.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_IO_BLOCK_GUARD_H
#define CEPH_LIBRBD_IO_BLOCK_GUARD_H
#include "include/int_types.h"
#include "common/dout.h"
#include "common/ceph_mutex.h"
#include <boost/intrusive/list.hpp>
#include <boost/intrusive/set.hpp>
#include <deque>
#include <list>
#include "include/ceph_assert.h"
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::BlockGuard: " << this << " " \
<< __func__ << ": "
namespace librbd {
struct BlockExtent {
// [block_start, block_end)
uint64_t block_start = 0;
uint64_t block_end = 0;
BlockExtent() {
}
BlockExtent(uint64_t block_start, uint64_t block_end)
: block_start(block_start), block_end(block_end) {
}
friend std::ostream& operator<< (std::ostream& os, const BlockExtent& block_extent) {
os << "[block_start=" << block_extent.block_start
<< ", block_end=" << block_extent.block_end << "]";
return os;
}
};
struct BlockGuardCell {
};
/**
* Helper class to restrict and order concurrent IO to the same block. The
* definition of a block is dependent upon the user of this class. It might
* represent a backing object, 512 byte sectors, etc.
*/
template <typename BlockOperation>
class BlockGuard {
private:
struct DetainedBlockExtent;
public:
typedef std::list<BlockOperation> BlockOperations;
BlockGuard(CephContext *cct)
: m_cct(cct) {
}
BlockGuard(const BlockGuard&) = delete;
BlockGuard &operator=(const BlockGuard&) = delete;
/**
* Detain future IO for a range of blocks. the guard will keep
* ownership of the provided operation if the operation is blocked.
* @return 0 upon success and IO can be issued
* >0 if the IO is blocked,
* <0 upon error
*/
int detain(const BlockExtent &block_extent, BlockOperation *block_operation,
BlockGuardCell **cell) {
std::lock_guard locker{m_lock};
ldout(m_cct, 20) << block_extent
<< ", free_slots="
<< m_free_detained_block_extents.size()
<< dendl;
DetainedBlockExtent *detained_block_extent;
auto it = m_detained_block_extents.find(block_extent);
if (it != m_detained_block_extents.end()) {
// request against an already detained block
detained_block_extent = &(*it);
if (block_operation != nullptr) {
detained_block_extent->block_operations.emplace_back(
std::move(*block_operation));
}
// alert the caller that the IO was detained
*cell = nullptr;
return detained_block_extent->block_operations.size();
} else {
if (!m_free_detained_block_extents.empty()) {
detained_block_extent = &m_free_detained_block_extents.front();
detained_block_extent->block_operations.clear();
m_free_detained_block_extents.pop_front();
} else {
ldout(m_cct, 20) << "no free detained block cells" << dendl;
m_detained_block_extent_pool.emplace_back();
detained_block_extent = &m_detained_block_extent_pool.back();
}
detained_block_extent->block_extent = block_extent;
m_detained_block_extents.insert(*detained_block_extent);
*cell = reinterpret_cast<BlockGuardCell*>(detained_block_extent);
return 0;
}
}
/**
* Release any detained IO operations from the provided cell.
*/
void release(BlockGuardCell *cell, BlockOperations *block_operations) {
std::lock_guard locker{m_lock};
ceph_assert(cell != nullptr);
auto &detained_block_extent = reinterpret_cast<DetainedBlockExtent &>(
*cell);
ldout(m_cct, 20) << detained_block_extent.block_extent
<< ", pending_ops="
<< detained_block_extent.block_operations.size()
<< dendl;
*block_operations = std::move(detained_block_extent.block_operations);
m_detained_block_extents.erase(detained_block_extent.block_extent);
m_free_detained_block_extents.push_back(detained_block_extent);
}
private:
struct DetainedBlockExtent : public boost::intrusive::list_base_hook<>,
public boost::intrusive::set_base_hook<> {
BlockExtent block_extent;
BlockOperations block_operations;
};
struct DetainedBlockExtentKey {
typedef BlockExtent type;
const BlockExtent &operator()(const DetainedBlockExtent &value) {
return value.block_extent;
}
};
struct DetainedBlockExtentCompare {
bool operator()(const BlockExtent &lhs,
const BlockExtent &rhs) const {
// check for range overlap (lhs < rhs)
if (lhs.block_end <= rhs.block_start) {
return true;
}
return false;
}
};
typedef std::deque<DetainedBlockExtent> DetainedBlockExtentsPool;
typedef boost::intrusive::list<DetainedBlockExtent> DetainedBlockExtents;
typedef boost::intrusive::set<
DetainedBlockExtent,
boost::intrusive::compare<DetainedBlockExtentCompare>,
boost::intrusive::key_of_value<DetainedBlockExtentKey> >
BlockExtentToDetainedBlockExtents;
CephContext *m_cct;
ceph::mutex m_lock = ceph::make_mutex("librbd::BlockGuard::m_lock");
DetainedBlockExtentsPool m_detained_block_extent_pool;
DetainedBlockExtents m_free_detained_block_extents;
BlockExtentToDetainedBlockExtents m_detained_block_extents;
};
} // namespace librbd
#undef dout_subsys
#undef dout_prefix
#define dout_prefix *_dout
#endif // CEPH_LIBRBD_IO_BLOCK_GUARD_H
| 5,596 | 30.44382 | 87 |
h
|
null |
ceph-main/src/librbd/ConfigWatcher.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/ConfigWatcher.h"
#include "common/config_obs.h"
#include "common/dout.h"
#include "common/errno.h"
#include "librbd/ImageCtx.h"
#include "librbd/ImageState.h"
#include "librbd/api/Config.h"
#include <deque>
#include <string>
#include <vector>
#include <boost/algorithm/string/predicate.hpp>
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::ConfigWatcher: " \
<< __func__ << ": "
namespace librbd {
template <typename I>
struct ConfigWatcher<I>::Observer : public md_config_obs_t {
ConfigWatcher<I>* m_config_watcher;
std::deque<std::string> m_config_key_strs;
mutable std::vector<const char*> m_config_keys;
Observer(CephContext* cct, ConfigWatcher<I>* config_watcher)
: m_config_watcher(config_watcher) {
const std::string rbd_key_prefix("rbd_");
auto& schema = cct->_conf.get_schema();
for (auto& pair : schema) {
// watch all "rbd_" keys for simplicity
if (!boost::starts_with(pair.first, rbd_key_prefix)) {
continue;
}
m_config_key_strs.emplace_back(pair.first);
}
m_config_keys.reserve(m_config_key_strs.size());
for (auto& key : m_config_key_strs) {
m_config_keys.emplace_back(key.c_str());
}
m_config_keys.emplace_back(nullptr);
}
const char** get_tracked_conf_keys() const override {
ceph_assert(!m_config_keys.empty());
return &m_config_keys[0];
}
void handle_conf_change(const ConfigProxy& conf,
const std::set <std::string> &changed) override {
m_config_watcher->handle_global_config_change(changed);
}
};
template <typename I>
ConfigWatcher<I>::ConfigWatcher(I& image_ctx)
: m_image_ctx(image_ctx) {
}
template <typename I>
ConfigWatcher<I>::~ConfigWatcher() {
ceph_assert(m_observer == nullptr);
}
template <typename I>
void ConfigWatcher<I>::init() {
auto cct = m_image_ctx.cct;
ldout(cct, 10) << dendl;
m_observer = new Observer(cct, this);
cct->_conf.add_observer(m_observer);
}
template <typename I>
void ConfigWatcher<I>::shut_down() {
auto cct = m_image_ctx.cct;
ldout(cct, 10) << dendl;
ceph_assert(m_observer != nullptr);
cct->_conf.remove_observer(m_observer);
delete m_observer;
m_observer = nullptr;
}
template <typename I>
void ConfigWatcher<I>::handle_global_config_change(
std::set<std::string> changed_keys) {
{
// ignore any global changes that are being overridden
std::shared_lock image_locker{m_image_ctx.image_lock};
for (auto& key : m_image_ctx.config_overrides) {
changed_keys.erase(key);
}
}
if (changed_keys.empty()) {
return;
}
auto cct = m_image_ctx.cct;
ldout(cct, 10) << "changed_keys=" << changed_keys << dendl;
// refresh the image to pick up any global config overrides
m_image_ctx.state->handle_update_notification();
}
} // namespace librbd
template class librbd::ConfigWatcher<librbd::ImageCtx>;
| 3,053 | 25.102564 | 75 |
cc
|
null |
ceph-main/src/librbd/ConfigWatcher.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_CONFIG_WATCHER_H
#define CEPH_LIBRBD_CONFIG_WATCHER_H
#include <set>
#include <string>
struct Context;
namespace librbd {
struct ImageCtx;
template <typename ImageCtxT>
class ConfigWatcher {
public:
static ConfigWatcher* create(ImageCtxT& image_ctx) {
return new ConfigWatcher(image_ctx);
}
ConfigWatcher(ImageCtxT& image_ctx);
~ConfigWatcher();
ConfigWatcher(const ConfigWatcher&) = delete;
ConfigWatcher& operator=(const ConfigWatcher&) = delete;
void init();
void shut_down();
private:
struct Observer;
ImageCtxT& m_image_ctx;
Observer* m_observer = nullptr;
void handle_global_config_change(std::set<std::string> changed);
};
} // namespace librbd
extern template class librbd::ConfigWatcher<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_CONFIG_WATCHER_H
| 917 | 18.125 | 70 |
h
|
null |
ceph-main/src/librbd/DeepCopyRequest.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "DeepCopyRequest.h"
#include "common/errno.h"
#include "librbd/ExclusiveLock.h"
#include "librbd/ImageCtx.h"
#include "librbd/ObjectMap.h"
#include "librbd/Utils.h"
#include "librbd/deep_copy/ImageCopyRequest.h"
#include "librbd/deep_copy/MetadataCopyRequest.h"
#include "librbd/deep_copy/SnapshotCopyRequest.h"
#include "librbd/internal.h"
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::DeepCopyRequest: " \
<< this << " " << __func__ << ": "
namespace librbd {
using namespace librbd::deep_copy;
using librbd::util::create_context_callback;
using librbd::util::create_rados_callback;
using librbd::util::unique_lock_name;
template <typename I>
DeepCopyRequest<I>::DeepCopyRequest(I *src_image_ctx, I *dst_image_ctx,
librados::snap_t src_snap_id_start,
librados::snap_t src_snap_id_end,
librados::snap_t dst_snap_id_start,
bool flatten,
const ObjectNumber &object_number,
asio::ContextWQ *work_queue,
SnapSeqs *snap_seqs,
deep_copy::Handler *handler,
Context *on_finish)
: RefCountedObject(dst_image_ctx->cct), m_src_image_ctx(src_image_ctx),
m_dst_image_ctx(dst_image_ctx), m_src_snap_id_start(src_snap_id_start),
m_src_snap_id_end(src_snap_id_end), m_dst_snap_id_start(dst_snap_id_start),
m_flatten(flatten), m_object_number(object_number),
m_work_queue(work_queue), m_snap_seqs(snap_seqs), m_handler(handler),
m_on_finish(on_finish), m_cct(dst_image_ctx->cct),
m_lock(ceph::make_mutex(unique_lock_name("DeepCopyRequest::m_lock", this))) {
}
template <typename I>
DeepCopyRequest<I>::~DeepCopyRequest() {
ceph_assert(m_snapshot_copy_request == nullptr);
ceph_assert(m_image_copy_request == nullptr);
}
template <typename I>
void DeepCopyRequest<I>::send() {
if (!m_src_image_ctx->data_ctx.is_valid()) {
lderr(m_cct) << "missing data pool for source image" << dendl;
finish(-ENODEV);
return;
}
if (!m_dst_image_ctx->data_ctx.is_valid()) {
lderr(m_cct) << "missing data pool for destination image" << dendl;
finish(-ENODEV);
return;
}
int r = validate_copy_points();
if (r < 0) {
finish(r);
return;
}
send_copy_snapshots();
}
template <typename I>
void DeepCopyRequest<I>::cancel() {
std::lock_guard locker{m_lock};
ldout(m_cct, 20) << dendl;
m_canceled = true;
if (m_snapshot_copy_request != nullptr) {
m_snapshot_copy_request->cancel();
}
if (m_image_copy_request != nullptr) {
m_image_copy_request->cancel();
}
}
template <typename I>
void DeepCopyRequest<I>::send_copy_snapshots() {
m_lock.lock();
if (m_canceled) {
m_lock.unlock();
finish(-ECANCELED);
return;
}
ldout(m_cct, 20) << dendl;
Context *ctx = create_context_callback<
DeepCopyRequest<I>, &DeepCopyRequest<I>::handle_copy_snapshots>(this);
m_snapshot_copy_request = SnapshotCopyRequest<I>::create(
m_src_image_ctx, m_dst_image_ctx, m_src_snap_id_start, m_src_snap_id_end,
m_dst_snap_id_start, m_flatten, m_work_queue, m_snap_seqs, ctx);
m_snapshot_copy_request->get();
m_lock.unlock();
m_snapshot_copy_request->send();
}
template <typename I>
void DeepCopyRequest<I>::handle_copy_snapshots(int r) {
ldout(m_cct, 20) << "r=" << r << dendl;
{
std::lock_guard locker{m_lock};
m_snapshot_copy_request->put();
m_snapshot_copy_request = nullptr;
if (r == 0 && m_canceled) {
r = -ECANCELED;
}
}
if (r == -ECANCELED) {
ldout(m_cct, 10) << "snapshot copy canceled" << dendl;
finish(r);
return;
} else if (r < 0) {
lderr(m_cct) << "failed to copy snapshot metadata: " << cpp_strerror(r)
<< dendl;
finish(r);
return;
}
if (m_src_snap_id_end == CEPH_NOSNAP) {
(*m_snap_seqs)[CEPH_NOSNAP] = CEPH_NOSNAP;
}
send_copy_image();
}
template <typename I>
void DeepCopyRequest<I>::send_copy_image() {
m_lock.lock();
if (m_canceled) {
m_lock.unlock();
finish(-ECANCELED);
return;
}
ldout(m_cct, 20) << dendl;
Context *ctx = create_context_callback<
DeepCopyRequest<I>, &DeepCopyRequest<I>::handle_copy_image>(this);
m_image_copy_request = ImageCopyRequest<I>::create(
m_src_image_ctx, m_dst_image_ctx, m_src_snap_id_start, m_src_snap_id_end,
m_dst_snap_id_start, m_flatten, m_object_number, *m_snap_seqs, m_handler,
ctx);
m_image_copy_request->get();
m_lock.unlock();
m_image_copy_request->send();
}
template <typename I>
void DeepCopyRequest<I>::handle_copy_image(int r) {
ldout(m_cct, 20) << "r=" << r << dendl;
{
std::lock_guard locker{m_lock};
m_image_copy_request->put();
m_image_copy_request = nullptr;
if (r == 0 && m_canceled) {
r = -ECANCELED;
}
}
if (r == -ECANCELED) {
ldout(m_cct, 10) << "image copy canceled" << dendl;
finish(r);
return;
} else if (r < 0) {
lderr(m_cct) << "failed to copy image: " << cpp_strerror(r) << dendl;
finish(r);
return;
}
send_copy_object_map();
}
template <typename I>
void DeepCopyRequest<I>::send_copy_object_map() {
m_dst_image_ctx->owner_lock.lock_shared();
m_dst_image_ctx->image_lock.lock_shared();
if (!m_dst_image_ctx->test_features(RBD_FEATURE_OBJECT_MAP,
m_dst_image_ctx->image_lock)) {
m_dst_image_ctx->image_lock.unlock_shared();
m_dst_image_ctx->owner_lock.unlock_shared();
send_copy_metadata();
return;
}
if (m_src_snap_id_end == CEPH_NOSNAP) {
m_dst_image_ctx->image_lock.unlock_shared();
m_dst_image_ctx->owner_lock.unlock_shared();
send_refresh_object_map();
return;
}
ceph_assert(m_dst_image_ctx->object_map != nullptr);
ldout(m_cct, 20) << dendl;
Context *finish_op_ctx = nullptr;
int r;
if (m_dst_image_ctx->exclusive_lock != nullptr) {
finish_op_ctx = m_dst_image_ctx->exclusive_lock->start_op(&r);
}
if (finish_op_ctx == nullptr) {
lderr(m_cct) << "lost exclusive lock" << dendl;
m_dst_image_ctx->image_lock.unlock_shared();
m_dst_image_ctx->owner_lock.unlock_shared();
finish(r);
return;
}
// rollback the object map (copy snapshot object map to HEAD)
auto ctx = new LambdaContext([this, finish_op_ctx](int r) {
handle_copy_object_map(r);
finish_op_ctx->complete(0);
});
ceph_assert(m_snap_seqs->count(m_src_snap_id_end) > 0);
librados::snap_t copy_snap_id = (*m_snap_seqs)[m_src_snap_id_end];
m_dst_image_ctx->object_map->rollback(copy_snap_id, ctx);
m_dst_image_ctx->image_lock.unlock_shared();
m_dst_image_ctx->owner_lock.unlock_shared();
}
template <typename I>
void DeepCopyRequest<I>::handle_copy_object_map(int r) {
ldout(m_cct, 20) << dendl;
if (r < 0) {
lderr(m_cct) << "failed to roll back object map: " << cpp_strerror(r)
<< dendl;
finish(r);
return;
}
send_refresh_object_map();
}
template <typename I>
void DeepCopyRequest<I>::send_refresh_object_map() {
int r;
Context *finish_op_ctx = nullptr;
{
std::shared_lock owner_locker{m_dst_image_ctx->owner_lock};
if (m_dst_image_ctx->exclusive_lock != nullptr) {
finish_op_ctx = m_dst_image_ctx->exclusive_lock->start_op(&r);
}
}
if (finish_op_ctx == nullptr) {
lderr(m_cct) << "lost exclusive lock" << dendl;
finish(r);
return;
}
ldout(m_cct, 20) << dendl;
auto ctx = new LambdaContext([this, finish_op_ctx](int r) {
handle_refresh_object_map(r);
finish_op_ctx->complete(0);
});
m_object_map = m_dst_image_ctx->create_object_map(CEPH_NOSNAP);
m_object_map->open(ctx);
}
template <typename I>
void DeepCopyRequest<I>::handle_refresh_object_map(int r) {
ldout(m_cct, 20) << "r=" << r << dendl;
if (r < 0) {
lderr(m_cct) << "failed to open object map: " << cpp_strerror(r)
<< dendl;
delete m_object_map;
finish(r);
return;
}
{
std::unique_lock image_locker{m_dst_image_ctx->image_lock};
std::swap(m_dst_image_ctx->object_map, m_object_map);
}
m_object_map->put();
send_copy_metadata();
}
template <typename I>
void DeepCopyRequest<I>::send_copy_metadata() {
ldout(m_cct, 20) << dendl;
Context *ctx = create_context_callback<
DeepCopyRequest<I>, &DeepCopyRequest<I>::handle_copy_metadata>(this);
auto request = MetadataCopyRequest<I>::create(m_src_image_ctx,
m_dst_image_ctx, ctx);
request->send();
}
template <typename I>
void DeepCopyRequest<I>::handle_copy_metadata(int r) {
ldout(m_cct, 20) << "r=" << r << dendl;
if (r < 0) {
lderr(m_cct) << "failed to copy metadata: " << cpp_strerror(r) << dendl;
finish(r);
return;
}
finish(0);
}
template <typename I>
int DeepCopyRequest<I>::validate_copy_points() {
std::shared_lock image_locker{m_src_image_ctx->image_lock};
if (m_src_snap_id_start != 0 &&
m_src_image_ctx->snap_info.find(m_src_snap_id_start) ==
m_src_image_ctx->snap_info.end()) {
lderr(m_cct) << "invalid start snap_id " << m_src_snap_id_start << dendl;
return -EINVAL;
}
if (m_src_snap_id_end != CEPH_NOSNAP &&
m_src_image_ctx->snap_info.find(m_src_snap_id_end) ==
m_src_image_ctx->snap_info.end()) {
lderr(m_cct) << "invalid end snap_id " << m_src_snap_id_end << dendl;
return -EINVAL;
}
return 0;
}
template <typename I>
void DeepCopyRequest<I>::finish(int r) {
ldout(m_cct, 20) << "r=" << r << dendl;
m_on_finish->complete(r);
put();
}
} // namespace librbd
template class librbd::DeepCopyRequest<librbd::ImageCtx>;
| 9,954 | 26.5 | 81 |
cc
|
null |
ceph-main/src/librbd/DeepCopyRequest.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_DEEP_COPY_REQUEST_H
#define CEPH_LIBRBD_DEEP_COPY_REQUEST_H
#include "common/ceph_mutex.h"
#include "common/RefCountedObj.h"
#include "include/int_types.h"
#include "librbd/ImageCtx.h"
#include "librbd/Types.h"
#include "librbd/deep_copy/Types.h"
#include <map>
#include <vector>
class Context;
namespace librbd {
class ImageCtx;
namespace asio { struct ContextWQ; }
namespace deep_copy {
template <typename> class ImageCopyRequest;
template <typename> class SnapshotCopyRequest;
struct Handler;
}
template <typename ImageCtxT = ImageCtx>
class DeepCopyRequest : public RefCountedObject {
public:
static DeepCopyRequest* create(ImageCtxT *src_image_ctx,
ImageCtxT *dst_image_ctx,
librados::snap_t src_snap_id_start,
librados::snap_t src_snap_id_end,
librados::snap_t dst_snap_id_start,
bool flatten,
const deep_copy::ObjectNumber &object_number,
asio::ContextWQ *work_queue,
SnapSeqs *snap_seqs,
deep_copy::Handler *handler,
Context *on_finish) {
return new DeepCopyRequest(src_image_ctx, dst_image_ctx, src_snap_id_start,
src_snap_id_end, dst_snap_id_start, flatten,
object_number, work_queue, snap_seqs, handler,
on_finish);
}
DeepCopyRequest(ImageCtxT *src_image_ctx, ImageCtxT *dst_image_ctx,
librados::snap_t src_snap_id_start,
librados::snap_t src_snap_id_end,
librados::snap_t dst_snap_id_start,
bool flatten, const deep_copy::ObjectNumber &object_number,
asio::ContextWQ *work_queue, SnapSeqs *snap_seqs,
deep_copy::Handler *handler, Context *on_finish);
~DeepCopyRequest();
void send();
void cancel();
private:
/**
* @verbatim
*
* <start>
* |
* v
* COPY_SNAPSHOTS
* |
* v
* COPY_IMAGE . . . . . . . . . . . . . .
* | .
* v .
* COPY_OBJECT_MAP (skip if object .
* | map disabled) .
* v .
* REFRESH_OBJECT_MAP (skip if object . (image copy canceled)
* | map disabled) .
* v .
* COPY_METADATA .
* | .
* v .
* <finish> < . . . . . . . . . . . . . .
*
* @endverbatim
*/
typedef std::vector<librados::snap_t> SnapIds;
typedef std::map<librados::snap_t, SnapIds> SnapMap;
ImageCtxT *m_src_image_ctx;
ImageCtxT *m_dst_image_ctx;
librados::snap_t m_src_snap_id_start;
librados::snap_t m_src_snap_id_end;
librados::snap_t m_dst_snap_id_start;
bool m_flatten;
deep_copy::ObjectNumber m_object_number;
asio::ContextWQ *m_work_queue;
SnapSeqs *m_snap_seqs;
deep_copy::Handler *m_handler;
Context *m_on_finish;
CephContext *m_cct;
ceph::mutex m_lock;
bool m_canceled = false;
deep_copy::SnapshotCopyRequest<ImageCtxT> *m_snapshot_copy_request = nullptr;
deep_copy::ImageCopyRequest<ImageCtxT> *m_image_copy_request = nullptr;
decltype(ImageCtxT::object_map) m_object_map = nullptr;
void send_copy_snapshots();
void handle_copy_snapshots(int r);
void send_copy_image();
void handle_copy_image(int r);
void send_copy_object_map();
void handle_copy_object_map(int r);
void send_refresh_object_map();
void handle_refresh_object_map(int r);
void send_copy_metadata();
void handle_copy_metadata(int r);
int validate_copy_points();
void finish(int r);
};
} // namespace librbd
extern template class librbd::DeepCopyRequest<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_DEEP_COPY_REQUEST_H
| 4,194 | 29.179856 | 79 |
h
|
null |
ceph-main/src/librbd/ExclusiveLock.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/cache/Utils.h"
#include "librbd/ExclusiveLock.h"
#include "librbd/ImageCtx.h"
#include "librbd/ImageWatcher.h"
#include "librbd/ImageState.h"
#include "librbd/exclusive_lock/ImageDispatch.h"
#include "librbd/exclusive_lock/PreAcquireRequest.h"
#include "librbd/exclusive_lock/PostAcquireRequest.h"
#include "librbd/exclusive_lock/PreReleaseRequest.h"
#include "librbd/io/ImageDispatcherInterface.h"
#include "librbd/Utils.h"
#include "librbd/asio/ContextWQ.h"
#include "common/ceph_mutex.h"
#include "common/dout.h"
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::ExclusiveLock: " << this << " " \
<< __func__
namespace librbd {
using namespace exclusive_lock;
using librbd::util::create_context_callback;
template <typename I>
using ML = ManagedLock<I>;
template <typename I>
ExclusiveLock<I>::ExclusiveLock(I &image_ctx)
: RefCountedObject(image_ctx.cct),
ML<I>(image_ctx.md_ctx, *image_ctx.asio_engine, image_ctx.header_oid,
image_ctx.image_watcher, managed_lock::EXCLUSIVE,
image_ctx.config.template get_val<bool>("rbd_blocklist_on_break_lock"),
image_ctx.config.template get_val<uint64_t>("rbd_blocklist_expire_seconds")),
m_image_ctx(image_ctx) {
std::lock_guard locker{ML<I>::m_lock};
ML<I>::set_state_uninitialized();
}
template <typename I>
bool ExclusiveLock<I>::accept_request(OperationRequestType request_type,
int *ret_val) const {
std::lock_guard locker{ML<I>::m_lock};
bool accept_request =
(!ML<I>::is_state_shutdown() && ML<I>::is_state_locked() &&
(m_request_blocked_count == 0 ||
m_image_ctx.get_exclusive_lock_policy()->accept_blocked_request(
request_type)));
if (ret_val != nullptr) {
*ret_val = accept_request ? 0 : m_request_blocked_ret_val;
}
ldout(m_image_ctx.cct, 20) << "=" << accept_request << " (request_type="
<< request_type << ")" << dendl;
return accept_request;
}
template <typename I>
bool ExclusiveLock<I>::accept_ops() const {
std::lock_guard locker{ML<I>::m_lock};
bool accept = accept_ops(ML<I>::m_lock);
ldout(m_image_ctx.cct, 20) << "=" << accept << dendl;
return accept;
}
template <typename I>
bool ExclusiveLock<I>::accept_ops(const ceph::mutex &lock) const {
return (!ML<I>::is_state_shutdown() &&
(ML<I>::is_state_locked() || ML<I>::is_state_post_acquiring()));
}
template <typename I>
void ExclusiveLock<I>::set_require_lock(bool init_shutdown,
io::Direction direction,
Context* on_finish) {
m_image_dispatch->set_require_lock(init_shutdown, direction, on_finish);
}
template <typename I>
void ExclusiveLock<I>::unset_require_lock(io::Direction direction) {
m_image_dispatch->unset_require_lock(direction);
}
template <typename I>
void ExclusiveLock<I>::block_requests(int r) {
std::lock_guard locker{ML<I>::m_lock};
m_request_blocked_count++;
if (m_request_blocked_ret_val == 0) {
m_request_blocked_ret_val = r;
}
ldout(m_image_ctx.cct, 20) << ": r=" << r << dendl;
}
template <typename I>
void ExclusiveLock<I>::unblock_requests() {
std::lock_guard locker{ML<I>::m_lock};
ceph_assert(m_request_blocked_count > 0);
m_request_blocked_count--;
if (m_request_blocked_count == 0) {
m_request_blocked_ret_val = 0;
}
ldout(m_image_ctx.cct, 20) << dendl;
}
template <typename I>
int ExclusiveLock<I>::get_unlocked_op_error() const {
if (m_image_ctx.image_watcher->is_blocklisted()) {
return -EBLOCKLISTED;
}
return -EROFS;
}
template <typename I>
void ExclusiveLock<I>::init(uint64_t features, Context *on_init) {
ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
on_init = create_context_callback<Context>(on_init, this);
ldout(m_image_ctx.cct, 10) << ": features=" << features << dendl;
{
std::lock_guard locker{ML<I>::m_lock};
ML<I>::set_state_initializing();
}
m_image_dispatch = exclusive_lock::ImageDispatch<I>::create(&m_image_ctx);
m_image_ctx.io_image_dispatcher->register_dispatch(m_image_dispatch);
on_init = new LambdaContext([this, on_init](int r) {
{
std::lock_guard locker{ML<I>::m_lock};
ML<I>::set_state_unlocked();
}
on_init->complete(r);
});
bool pwl_enabled = cache::util::is_pwl_enabled(m_image_ctx);
if (m_image_ctx.clone_copy_on_read ||
(features & RBD_FEATURE_JOURNALING) != 0 ||
pwl_enabled) {
m_image_dispatch->set_require_lock(true, io::DIRECTION_BOTH, on_init);
} else {
m_image_dispatch->set_require_lock(true, io::DIRECTION_WRITE, on_init);
}
}
template <typename I>
void ExclusiveLock<I>::shut_down(Context *on_shut_down) {
ldout(m_image_ctx.cct, 10) << dendl;
auto ref = ceph::ref_t<ExclusiveLock<I>>(this);
on_shut_down = create_context_callback<Context>(on_shut_down, this);
ML<I>::shut_down(on_shut_down);
// if stalled in request state machine -- abort
handle_peer_notification(0);
}
template <typename I>
void ExclusiveLock<I>::handle_peer_notification(int r) {
std::lock_guard locker{ML<I>::m_lock};
if (!ML<I>::is_state_waiting_for_lock()) {
return;
}
ldout(m_image_ctx.cct, 10) << dendl;
ceph_assert(ML<I>::is_action_acquire_lock());
m_acquire_lock_peer_ret_val = r;
ML<I>::execute_next_action();
}
template <typename I>
Context *ExclusiveLock<I>::start_op(int* ret_val) {
ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
std::lock_guard locker{ML<I>::m_lock};
if (!accept_ops(ML<I>::m_lock)) {
*ret_val = get_unlocked_op_error();
return nullptr;
}
m_async_op_tracker.start_op();
return new LambdaContext([this](int r) {
m_async_op_tracker.finish_op();
});
}
template <typename I>
void ExclusiveLock<I>::shutdown_handler(int r, Context *on_finish) {
ldout(m_image_ctx.cct, 10) << dendl;
{
std::unique_lock owner_locker{m_image_ctx.owner_lock};
m_image_ctx.exclusive_lock = nullptr;
}
on_finish = new LambdaContext([this, on_finish](int r) {
m_image_dispatch = nullptr;
m_image_ctx.image_watcher->flush(on_finish);
});
m_image_ctx.io_image_dispatcher->shut_down_dispatch(
m_image_dispatch->get_dispatch_layer(), on_finish);
}
template <typename I>
void ExclusiveLock<I>::pre_acquire_lock_handler(Context *on_finish) {
ldout(m_image_ctx.cct, 10) << dendl;
int acquire_lock_peer_ret_val = 0;
{
std::lock_guard locker{ML<I>::m_lock};
std::swap(acquire_lock_peer_ret_val, m_acquire_lock_peer_ret_val);
}
if (acquire_lock_peer_ret_val == -EROFS) {
ldout(m_image_ctx.cct, 10) << ": peer nacked lock request" << dendl;
on_finish->complete(acquire_lock_peer_ret_val);
return;
}
PreAcquireRequest<I> *req = PreAcquireRequest<I>::create(m_image_ctx,
on_finish);
m_image_ctx.op_work_queue->queue(new LambdaContext([req](int r) {
req->send();
}));
}
template <typename I>
void ExclusiveLock<I>::post_acquire_lock_handler(int r, Context *on_finish) {
ldout(m_image_ctx.cct, 10) << ": r=" << r << dendl;
if (r == -EROFS) {
// peer refused to release the exclusive lock
on_finish->complete(r);
return;
} else if (r < 0) {
ML<I>::m_lock.lock();
ceph_assert(ML<I>::is_state_acquiring());
// PostAcquire state machine will not run, so we need complete prepare
m_image_ctx.state->handle_prepare_lock_complete();
// if lock is in-use by another client, request the lock
if (ML<I>::is_action_acquire_lock() && (r == -EBUSY || r == -EAGAIN)) {
ML<I>::set_state_waiting_for_lock();
ML<I>::m_lock.unlock();
// request the lock from a peer
m_image_ctx.image_watcher->notify_request_lock();
// inform manage lock that we have interrupted the state machine
r = -ECANCELED;
} else {
ML<I>::m_lock.unlock();
// clear error if peer owns lock
if (r == -EAGAIN) {
r = 0;
}
}
on_finish->complete(r);
return;
}
std::lock_guard locker{ML<I>::m_lock};
m_pre_post_callback = on_finish;
using EL = ExclusiveLock<I>;
PostAcquireRequest<I> *req = PostAcquireRequest<I>::create(m_image_ctx,
util::create_context_callback<EL, &EL::handle_post_acquiring_lock>(this),
util::create_context_callback<EL, &EL::handle_post_acquired_lock>(this));
m_image_ctx.op_work_queue->queue(new LambdaContext([req](int r) {
req->send();
}));
}
template <typename I>
void ExclusiveLock<I>::handle_post_acquiring_lock(int r) {
ldout(m_image_ctx.cct, 10) << dendl;
std::lock_guard locker{ML<I>::m_lock};
ceph_assert(r == 0);
// lock is owned at this point
ML<I>::set_state_post_acquiring();
}
template <typename I>
void ExclusiveLock<I>::handle_post_acquired_lock(int r) {
ldout(m_image_ctx.cct, 10) << ": r=" << r << dendl;
Context *on_finish = nullptr;
{
std::lock_guard locker{ML<I>::m_lock};
ceph_assert(ML<I>::is_state_acquiring() ||
ML<I>::is_state_post_acquiring());
assert (m_pre_post_callback != nullptr);
std::swap(m_pre_post_callback, on_finish);
}
if (r < 0) {
on_finish->complete(r);
return;
}
m_image_ctx.perfcounter->tset(l_librbd_lock_acquired_time,
ceph_clock_now());
m_image_ctx.image_watcher->notify_acquired_lock();
m_image_dispatch->unset_require_lock(io::DIRECTION_BOTH);
on_finish->complete(0);
}
template <typename I>
void ExclusiveLock<I>::pre_release_lock_handler(bool shutting_down,
Context *on_finish) {
ldout(m_image_ctx.cct, 10) << dendl;
std::lock_guard locker{ML<I>::m_lock};
auto req = PreReleaseRequest<I>::create(
m_image_ctx, m_image_dispatch, shutting_down, m_async_op_tracker,
on_finish);
m_image_ctx.op_work_queue->queue(new LambdaContext([req](int r) {
req->send();
}));
}
template <typename I>
void ExclusiveLock<I>::post_release_lock_handler(bool shutting_down, int r,
Context *on_finish) {
ldout(m_image_ctx.cct, 10) << ": r=" << r << " shutting_down="
<< shutting_down << dendl;
if (!shutting_down) {
{
std::lock_guard locker{ML<I>::m_lock};
ceph_assert(ML<I>::is_state_pre_releasing() ||
ML<I>::is_state_releasing());
}
if (r >= 0) {
m_image_ctx.image_watcher->notify_released_lock();
}
on_finish->complete(r);
} else {
{
std::unique_lock owner_locker{m_image_ctx.owner_lock};
m_image_ctx.exclusive_lock = nullptr;
}
on_finish = new LambdaContext([this, r, on_finish](int) {
m_image_dispatch = nullptr;
m_image_ctx.image_watcher->notify_released_lock();
on_finish->complete(r);
});
m_image_ctx.io_image_dispatcher->shut_down_dispatch(
m_image_dispatch->get_dispatch_layer(), on_finish);
}
}
template <typename I>
void ExclusiveLock<I>::post_reacquire_lock_handler(int r, Context *on_finish) {
ldout(m_image_ctx.cct, 10) << dendl;
if (r >= 0) {
m_image_ctx.image_watcher->notify_acquired_lock();
}
on_finish->complete(r);
}
} // namespace librbd
template class librbd::ExclusiveLock<librbd::ImageCtx>;
| 11,476 | 28.503856 | 87 |
cc
|
null |
ceph-main/src/librbd/ExclusiveLock.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_EXCLUSIVE_LOCK_H
#define CEPH_LIBRBD_EXCLUSIVE_LOCK_H
#include "common/AsyncOpTracker.h"
#include "librbd/ManagedLock.h"
#include "librbd/exclusive_lock/Policy.h"
#include "librbd/io/Types.h"
#include "common/RefCountedObj.h"
struct Context;
namespace librbd {
namespace exclusive_lock { template <typename> struct ImageDispatch; }
template <typename ImageCtxT = ImageCtx>
class ExclusiveLock : public RefCountedObject,
public ManagedLock<ImageCtxT> {
public:
static ExclusiveLock *create(ImageCtxT &image_ctx) {
return new ExclusiveLock<ImageCtxT>(image_ctx);
}
ExclusiveLock(ImageCtxT &image_ctx);
bool accept_request(exclusive_lock::OperationRequestType request_type,
int *ret_val) const;
bool accept_ops() const;
void set_require_lock(bool init_shutdown, io::Direction direction,
Context* on_finish);
void unset_require_lock(io::Direction direction);
void block_requests(int r);
void unblock_requests();
void init(uint64_t features, Context *on_init);
void shut_down(Context *on_shutdown);
void handle_peer_notification(int r);
int get_unlocked_op_error() const;
Context *start_op(int* ret_val);
protected:
void shutdown_handler(int r, Context *on_finish) override;
void pre_acquire_lock_handler(Context *on_finish) override;
void post_acquire_lock_handler(int r, Context *on_finish) override;
void pre_release_lock_handler(bool shutting_down,
Context *on_finish) override;
void post_release_lock_handler(bool shutting_down, int r,
Context *on_finish) override;
void post_reacquire_lock_handler(int r, Context *on_finish) override;
private:
/**
* @verbatim
*
* <start> * * > WAITING_FOR_REGISTER --------\
* | * (watch not registered) |
* | * |
* | * * > WAITING_FOR_PEER ------------\
* | * (request_lock busy) |
* | * |
* | * * * * * * * * * * * * * * |
* | * |
* v (init) (try_lock/request_lock) * |
* UNINITIALIZED -------> UNLOCKED ------------------------> ACQUIRING <--/
* ^ |
* | v
* RELEASING POST_ACQUIRING
* | |
* | |
* | (release_lock) v
* PRE_RELEASING <------------------------ LOCKED
*
* <LOCKED state>
* |
* v
* REACQUIRING -------------------------------------> <finish>
* . ^
* . |
* . . . > <RELEASE action> ---> <ACQUIRE action> ---/
*
* <UNLOCKED/LOCKED states>
* |
* |
* v
* PRE_SHUTTING_DOWN ---> SHUTTING_DOWN ---> SHUTDOWN ---> <finish>
*
* @endverbatim
*/
ImageCtxT& m_image_ctx;
exclusive_lock::ImageDispatch<ImageCtxT>* m_image_dispatch = nullptr;
Context *m_pre_post_callback = nullptr;
AsyncOpTracker m_async_op_tracker;
uint32_t m_request_blocked_count = 0;
int m_request_blocked_ret_val = 0;
int m_acquire_lock_peer_ret_val = 0;
bool accept_ops(const ceph::mutex &lock) const;
void handle_post_acquiring_lock(int r);
void handle_post_acquired_lock(int r);
};
} // namespace librbd
#endif // CEPH_LIBRBD_EXCLUSIVE_LOCK_H
| 4,124 | 33.957627 | 78 |
h
|
null |
ceph-main/src/librbd/Features.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include <boost/lexical_cast.hpp>
#include <boost/algorithm/string.hpp>
#include "librbd/Features.h"
#include "include/rbd/features.h"
#include <map>
#include <vector>
static const std::map<std::string, uint64_t> RBD_FEATURE_MAP = {
{RBD_FEATURE_NAME_LAYERING, RBD_FEATURE_LAYERING},
{RBD_FEATURE_NAME_STRIPINGV2, RBD_FEATURE_STRIPINGV2},
{RBD_FEATURE_NAME_EXCLUSIVE_LOCK, RBD_FEATURE_EXCLUSIVE_LOCK},
{RBD_FEATURE_NAME_OBJECT_MAP, RBD_FEATURE_OBJECT_MAP},
{RBD_FEATURE_NAME_FAST_DIFF, RBD_FEATURE_FAST_DIFF},
{RBD_FEATURE_NAME_DEEP_FLATTEN, RBD_FEATURE_DEEP_FLATTEN},
{RBD_FEATURE_NAME_JOURNALING, RBD_FEATURE_JOURNALING},
{RBD_FEATURE_NAME_DATA_POOL, RBD_FEATURE_DATA_POOL},
{RBD_FEATURE_NAME_OPERATIONS, RBD_FEATURE_OPERATIONS},
{RBD_FEATURE_NAME_MIGRATING, RBD_FEATURE_MIGRATING},
{RBD_FEATURE_NAME_NON_PRIMARY, RBD_FEATURE_NON_PRIMARY},
{RBD_FEATURE_NAME_DIRTY_CACHE, RBD_FEATURE_DIRTY_CACHE},
};
static_assert((RBD_FEATURE_DIRTY_CACHE << 1) > RBD_FEATURES_ALL,
"new RBD feature added");
namespace librbd {
std::string rbd_features_to_string(uint64_t features,
std::ostream *err)
{
std::string r;
for (auto& i : RBD_FEATURE_MAP) {
if (features & i.second) {
if (!r.empty()) {
r += ",";
}
r += i.first;
features &= ~i.second;
}
}
if (err && features) {
*err << "ignoring unknown feature mask 0x"
<< std::hex << features << std::dec;
}
return r;
}
uint64_t rbd_features_from_string(const std::string& orig_value,
std::ostream *err)
{
uint64_t features = 0;
std::string value = orig_value;
boost::trim(value);
// empty string means default features
if (!value.size()) {
return RBD_FEATURES_DEFAULT;
}
try {
// numeric?
features = boost::lexical_cast<uint64_t>(value);
// drop unrecognized bits
uint64_t unsupported_features = (features & ~RBD_FEATURES_ALL);
if (unsupported_features != 0ull) {
features &= RBD_FEATURES_ALL;
if (err) {
*err << "ignoring unknown feature mask 0x"
<< std::hex << unsupported_features << std::dec;
}
}
uint64_t ignore_features_mask = (
RBD_FEATURES_INTERNAL | RBD_FEATURES_MUTABLE_INTERNAL);
uint64_t ignored_features = (features & ignore_features_mask);
if (ignored_features != 0ULL) {
features &= ~ignore_features_mask;
if (err) {
*err << "ignoring feature mask 0x" << std::hex << ignored_features;
}
}
} catch (boost::bad_lexical_cast&) {
// feature name list?
bool errors = false;
std::vector<std::string> feature_names;
boost::split(feature_names, value, boost::is_any_of(","));
for (auto feature_name: feature_names) {
boost::trim(feature_name);
auto feature_it = RBD_FEATURE_MAP.find(feature_name);
if (feature_it != RBD_FEATURE_MAP.end()) {
features += feature_it->second;
} else if (err) {
if (errors) {
*err << ", ";
} else {
errors = true;
}
*err << "ignoring unknown feature " << feature_name;
}
}
}
return features;
}
} // namespace librbd
| 3,181 | 27.410714 | 70 |
cc
|
null |
ceph-main/src/librbd/Features.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <string>
#include <ostream>
namespace librbd {
std::string rbd_features_to_string(uint64_t features,
std::ostream *err);
uint64_t rbd_features_from_string(const std::string& value,
std::ostream *err);
} // librbd
| 359 | 20.176471 | 70 |
h
|
null |
ceph-main/src/librbd/ImageCtx.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include <errno.h>
#include <boost/assign/list_of.hpp>
#include <stddef.h>
#include "include/neorados/RADOS.hpp"
#include "common/ceph_context.h"
#include "common/dout.h"
#include "common/errno.h"
#include "common/perf_counters.h"
#include "common/Timer.h"
#include "librbd/AsioEngine.h"
#include "librbd/AsyncRequest.h"
#include "librbd/ExclusiveLock.h"
#include "librbd/internal.h"
#include "librbd/ImageCtx.h"
#include "librbd/ImageState.h"
#include "librbd/ImageWatcher.h"
#include "librbd/Journal.h"
#include "librbd/LibrbdAdminSocketHook.h"
#include "librbd/ObjectMap.h"
#include "librbd/Operations.h"
#include "librbd/PluginRegistry.h"
#include "librbd/Types.h"
#include "librbd/Utils.h"
#include "librbd/asio/ContextWQ.h"
#include "librbd/exclusive_lock/AutomaticPolicy.h"
#include "librbd/exclusive_lock/StandardPolicy.h"
#include "librbd/crypto/EncryptionFormat.h"
#include "librbd/io/AioCompletion.h"
#include "librbd/io/AsyncOperation.h"
#include "librbd/io/ImageDispatcher.h"
#include "librbd/io/ObjectDispatcher.h"
#include "librbd/io/QosImageDispatch.h"
#include "librbd/io/IoOperations.h"
#include "librbd/io/Utils.h"
#include "librbd/journal/StandardPolicy.h"
#include "librbd/operation/ResizeRequest.h"
#include "osdc/Striper.h"
#include <boost/algorithm/string/predicate.hpp>
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::ImageCtx: "
using std::map;
using std::pair;
using std::set;
using std::string;
using std::vector;
using ceph::bufferlist;
using librados::snap_t;
using librados::IoCtx;
namespace librbd {
namespace {
class SafeTimerSingleton : public CommonSafeTimer<ceph::mutex> {
public:
ceph::mutex lock = ceph::make_mutex("librbd::SafeTimerSingleton::lock");
explicit SafeTimerSingleton(CephContext *cct)
: SafeTimer(cct, lock, true) {
init();
}
~SafeTimerSingleton() {
std::lock_guard locker{lock};
shutdown();
}
};
librados::IoCtx duplicate_io_ctx(librados::IoCtx& io_ctx) {
librados::IoCtx dup_io_ctx;
dup_io_ctx.dup(io_ctx);
return dup_io_ctx;
}
} // anonymous namespace
const string ImageCtx::METADATA_CONF_PREFIX = "conf_";
ImageCtx::ImageCtx(const string &image_name, const string &image_id,
const char *snap, IoCtx& p, bool ro)
: cct((CephContext*)p.cct()),
config(cct->_conf),
perfcounter(NULL),
snap_id(CEPH_NOSNAP),
snap_exists(true),
read_only(ro),
read_only_flags(ro ? IMAGE_READ_ONLY_FLAG_USER : 0U),
exclusive_locked(false),
name(image_name),
asio_engine(std::make_shared<AsioEngine>(p)),
rados_api(asio_engine->get_rados_api()),
data_ctx(duplicate_io_ctx(p)),
md_ctx(duplicate_io_ctx(p)),
image_watcher(NULL),
journal(NULL),
owner_lock(ceph::make_shared_mutex(util::unique_lock_name("librbd::ImageCtx::owner_lock", this))),
image_lock(ceph::make_shared_mutex(util::unique_lock_name("librbd::ImageCtx::image_lock", this))),
timestamp_lock(ceph::make_shared_mutex(util::unique_lock_name("librbd::ImageCtx::timestamp_lock", this))),
async_ops_lock(ceph::make_mutex(util::unique_lock_name("librbd::ImageCtx::async_ops_lock", this))),
copyup_list_lock(ceph::make_mutex(util::unique_lock_name("librbd::ImageCtx::copyup_list_lock", this))),
extra_read_flags(0),
old_format(false),
order(0), size(0), features(0),
format_string(NULL),
id(image_id), parent(NULL),
stripe_unit(0), stripe_count(0), flags(0),
readahead(),
total_bytes_read(0),
state(new ImageState<>(this)),
operations(new Operations<>(*this)),
exclusive_lock(nullptr), object_map(nullptr),
op_work_queue(asio_engine->get_work_queue()),
plugin_registry(new PluginRegistry<ImageCtx>(this)),
event_socket_completions(32),
asok_hook(nullptr),
trace_endpoint("librbd")
{
ldout(cct, 10) << this << " " << __func__ << ": "
<< "image_name=" << image_name << ", "
<< "image_id=" << image_id << dendl;
if (snap)
snap_name = snap;
rebuild_data_io_context();
// FIPS zeroization audit 20191117: this memset is not security related.
memset(&header, 0, sizeof(header));
io_image_dispatcher = new io::ImageDispatcher<ImageCtx>(this);
io_object_dispatcher = new io::ObjectDispatcher<ImageCtx>(this);
if (cct->_conf.get_val<bool>("rbd_auto_exclusive_lock_until_manual_request")) {
exclusive_lock_policy = new exclusive_lock::AutomaticPolicy(this);
} else {
exclusive_lock_policy = new exclusive_lock::StandardPolicy(this);
}
journal_policy = new journal::StandardPolicy(this);
}
ImageCtx::ImageCtx(const string &image_name, const string &image_id,
uint64_t snap_id, IoCtx& p, bool ro)
: ImageCtx(image_name, image_id, "", p, ro) {
open_snap_id = snap_id;
}
ImageCtx::~ImageCtx() {
ldout(cct, 10) << this << " " << __func__ << dendl;
ceph_assert(config_watcher == nullptr);
ceph_assert(image_watcher == NULL);
ceph_assert(exclusive_lock == NULL);
ceph_assert(object_map == NULL);
ceph_assert(journal == NULL);
ceph_assert(asok_hook == NULL);
if (perfcounter) {
perf_stop();
}
delete[] format_string;
md_ctx.aio_flush();
if (data_ctx.is_valid()) {
data_ctx.aio_flush();
}
delete io_object_dispatcher;
delete io_image_dispatcher;
delete journal_policy;
delete exclusive_lock_policy;
delete operations;
delete state;
delete plugin_registry;
}
void ImageCtx::init() {
ceph_assert(!header_oid.empty());
ceph_assert(old_format || !id.empty());
asok_hook = new LibrbdAdminSocketHook(this);
string pname = string("librbd-") + id + string("-") +
md_ctx.get_pool_name() + string("-") + name;
if (!snap_name.empty()) {
pname += "-";
pname += snap_name;
}
trace_endpoint.copy_name(pname);
perf_start(pname);
ceph_assert(image_watcher == NULL);
image_watcher = new ImageWatcher<>(*this);
}
void ImageCtx::shutdown() {
delete image_watcher;
image_watcher = nullptr;
delete asok_hook;
asok_hook = nullptr;
}
void ImageCtx::init_layout(int64_t pool_id)
{
if (stripe_unit == 0 || stripe_count == 0) {
stripe_unit = 1ull << order;
stripe_count = 1;
}
vector<uint64_t> alignments;
alignments.push_back(stripe_count << order); // object set (in file striping terminology)
alignments.push_back(stripe_unit * stripe_count); // stripe
alignments.push_back(stripe_unit); // stripe unit
readahead.set_alignments(alignments);
layout = file_layout_t();
layout.stripe_unit = stripe_unit;
layout.stripe_count = stripe_count;
layout.object_size = 1ull << order;
layout.pool_id = pool_id; // FIXME: pool id overflow?
delete[] format_string;
size_t len = object_prefix.length() + 16;
format_string = new char[len];
if (old_format) {
snprintf(format_string, len, "%s.%%012llx", object_prefix.c_str());
} else {
snprintf(format_string, len, "%s.%%016llx", object_prefix.c_str());
}
ldout(cct, 10) << "init_layout stripe_unit " << stripe_unit
<< " stripe_count " << stripe_count
<< " object_size " << layout.object_size
<< " prefix " << object_prefix
<< " format " << format_string
<< dendl;
}
void ImageCtx::perf_start(string name) {
auto perf_prio = PerfCountersBuilder::PRIO_DEBUGONLY;
if (child == nullptr) {
// ensure top-level IO stats are exported for librbd daemons
perf_prio = PerfCountersBuilder::PRIO_USEFUL;
}
PerfCountersBuilder plb(cct, name, l_librbd_first, l_librbd_last);
plb.add_u64_counter(l_librbd_rd, "rd", "Reads", "r", perf_prio);
plb.add_u64_counter(l_librbd_rd_bytes, "rd_bytes", "Data size in reads",
"rb", perf_prio, unit_t(UNIT_BYTES));
plb.add_time_avg(l_librbd_rd_latency, "rd_latency", "Latency of reads",
"rl", perf_prio);
plb.add_u64_counter(l_librbd_wr, "wr", "Writes", "w", perf_prio);
plb.add_u64_counter(l_librbd_wr_bytes, "wr_bytes", "Written data",
"wb", perf_prio, unit_t(UNIT_BYTES));
plb.add_time_avg(l_librbd_wr_latency, "wr_latency", "Write latency",
"wl", perf_prio);
plb.add_u64_counter(l_librbd_discard, "discard", "Discards");
plb.add_u64_counter(l_librbd_discard_bytes, "discard_bytes", "Discarded data", NULL, 0, unit_t(UNIT_BYTES));
plb.add_time_avg(l_librbd_discard_latency, "discard_latency", "Discard latency");
plb.add_u64_counter(l_librbd_flush, "flush", "Flushes");
plb.add_time_avg(l_librbd_flush_latency, "flush_latency", "Latency of flushes");
plb.add_u64_counter(l_librbd_ws, "ws", "WriteSames");
plb.add_u64_counter(l_librbd_ws_bytes, "ws_bytes", "WriteSame data", NULL, 0, unit_t(UNIT_BYTES));
plb.add_time_avg(l_librbd_ws_latency, "ws_latency", "WriteSame latency");
plb.add_u64_counter(l_librbd_cmp, "cmp", "CompareAndWrites");
plb.add_u64_counter(l_librbd_cmp_bytes, "cmp_bytes", "Data size in cmps", NULL, 0, unit_t(UNIT_BYTES));
plb.add_time_avg(l_librbd_cmp_latency, "cmp_latency", "Latency of cmps");
plb.add_u64_counter(l_librbd_snap_create, "snap_create", "Snap creations");
plb.add_u64_counter(l_librbd_snap_remove, "snap_remove", "Snap removals");
plb.add_u64_counter(l_librbd_snap_rollback, "snap_rollback", "Snap rollbacks");
plb.add_u64_counter(l_librbd_snap_rename, "snap_rename", "Snap rename");
plb.add_u64_counter(l_librbd_notify, "notify", "Updated header notifications");
plb.add_u64_counter(l_librbd_resize, "resize", "Resizes");
plb.add_u64_counter(l_librbd_readahead, "readahead", "Read ahead");
plb.add_u64_counter(l_librbd_readahead_bytes, "readahead_bytes", "Data size in read ahead", NULL, 0, unit_t(UNIT_BYTES));
plb.add_u64_counter(l_librbd_invalidate_cache, "invalidate_cache", "Cache invalidates");
plb.add_time(l_librbd_opened_time, "opened_time", "Opened time",
"ots", perf_prio);
plb.add_time(l_librbd_lock_acquired_time, "lock_acquired_time",
"Lock acquired time", "lats", perf_prio);
perfcounter = plb.create_perf_counters();
cct->get_perfcounters_collection()->add(perfcounter);
perfcounter->tset(l_librbd_opened_time, ceph_clock_now());
}
void ImageCtx::perf_stop() {
ceph_assert(perfcounter);
cct->get_perfcounters_collection()->remove(perfcounter);
delete perfcounter;
}
void ImageCtx::set_read_flag(unsigned flag) {
extra_read_flags |= flag;
}
int ImageCtx::get_read_flags(snap_t snap_id) {
int flags = librados::OPERATION_NOFLAG | read_flags;
if (flags != 0)
return flags;
flags = librados::OPERATION_NOFLAG | extra_read_flags;
if (snap_id == LIBRADOS_SNAP_HEAD)
return flags;
if (config.get_val<bool>("rbd_balance_snap_reads"))
flags |= librados::OPERATION_BALANCE_READS;
else if (config.get_val<bool>("rbd_localize_snap_reads"))
flags |= librados::OPERATION_LOCALIZE_READS;
return flags;
}
int ImageCtx::snap_set(uint64_t in_snap_id) {
ceph_assert(ceph_mutex_is_wlocked(image_lock));
auto it = snap_info.find(in_snap_id);
if (in_snap_id != CEPH_NOSNAP && it != snap_info.end()) {
snap_id = in_snap_id;
snap_namespace = it->second.snap_namespace;
snap_name = it->second.name;
snap_exists = true;
if (data_ctx.is_valid()) {
data_ctx.snap_set_read(snap_id);
rebuild_data_io_context();
}
return 0;
}
return -ENOENT;
}
void ImageCtx::snap_unset()
{
ceph_assert(ceph_mutex_is_wlocked(image_lock));
snap_id = CEPH_NOSNAP;
snap_namespace = {};
snap_name = "";
snap_exists = true;
if (data_ctx.is_valid()) {
data_ctx.snap_set_read(snap_id);
rebuild_data_io_context();
}
}
snap_t ImageCtx::get_snap_id(const cls::rbd::SnapshotNamespace& in_snap_namespace,
const string& in_snap_name) const
{
ceph_assert(ceph_mutex_is_locked(image_lock));
auto it = snap_ids.find({in_snap_namespace, in_snap_name});
if (it != snap_ids.end()) {
return it->second;
}
return CEPH_NOSNAP;
}
const SnapInfo* ImageCtx::get_snap_info(snap_t in_snap_id) const
{
ceph_assert(ceph_mutex_is_locked(image_lock));
map<snap_t, SnapInfo>::const_iterator it =
snap_info.find(in_snap_id);
if (it != snap_info.end())
return &it->second;
return nullptr;
}
int ImageCtx::get_snap_name(snap_t in_snap_id,
string *out_snap_name) const
{
ceph_assert(ceph_mutex_is_locked(image_lock));
const SnapInfo *info = get_snap_info(in_snap_id);
if (info) {
*out_snap_name = info->name;
return 0;
}
return -ENOENT;
}
int ImageCtx::get_snap_namespace(snap_t in_snap_id,
cls::rbd::SnapshotNamespace *out_snap_namespace) const
{
ceph_assert(ceph_mutex_is_locked(image_lock));
const SnapInfo *info = get_snap_info(in_snap_id);
if (info) {
*out_snap_namespace = info->snap_namespace;
return 0;
}
return -ENOENT;
}
int ImageCtx::get_parent_spec(snap_t in_snap_id,
cls::rbd::ParentImageSpec *out_pspec) const
{
const SnapInfo *info = get_snap_info(in_snap_id);
if (info) {
*out_pspec = info->parent.spec;
return 0;
}
return -ENOENT;
}
uint64_t ImageCtx::get_current_size() const
{
ceph_assert(ceph_mutex_is_locked(image_lock));
return size;
}
uint64_t ImageCtx::get_object_size() const
{
return 1ull << order;
}
string ImageCtx::get_object_name(uint64_t num) const {
return util::data_object_name(this, num);
}
uint64_t ImageCtx::get_stripe_unit() const
{
return stripe_unit;
}
uint64_t ImageCtx::get_stripe_count() const
{
return stripe_count;
}
uint64_t ImageCtx::get_stripe_period() const
{
return stripe_count * (1ull << order);
}
utime_t ImageCtx::get_create_timestamp() const
{
return create_timestamp;
}
utime_t ImageCtx::get_access_timestamp() const
{
return access_timestamp;
}
utime_t ImageCtx::get_modify_timestamp() const
{
return modify_timestamp;
}
void ImageCtx::set_access_timestamp(utime_t at)
{
ceph_assert(ceph_mutex_is_wlocked(timestamp_lock));
access_timestamp = at;
}
void ImageCtx::set_modify_timestamp(utime_t mt)
{
ceph_assert(ceph_mutex_is_locked(timestamp_lock));
modify_timestamp = mt;
}
int ImageCtx::is_snap_protected(snap_t in_snap_id,
bool *is_protected) const
{
ceph_assert(ceph_mutex_is_locked(image_lock));
const SnapInfo *info = get_snap_info(in_snap_id);
if (info) {
*is_protected =
(info->protection_status == RBD_PROTECTION_STATUS_PROTECTED);
return 0;
}
return -ENOENT;
}
int ImageCtx::is_snap_unprotected(snap_t in_snap_id,
bool *is_unprotected) const
{
ceph_assert(ceph_mutex_is_locked(image_lock));
const SnapInfo *info = get_snap_info(in_snap_id);
if (info) {
*is_unprotected =
(info->protection_status == RBD_PROTECTION_STATUS_UNPROTECTED);
return 0;
}
return -ENOENT;
}
void ImageCtx::add_snap(cls::rbd::SnapshotNamespace in_snap_namespace,
string in_snap_name,
snap_t id, uint64_t in_size,
const ParentImageInfo &parent,
uint8_t protection_status, uint64_t flags,
utime_t timestamp)
{
ceph_assert(ceph_mutex_is_wlocked(image_lock));
snaps.push_back(id);
SnapInfo info(in_snap_name, in_snap_namespace,
in_size, parent, protection_status, flags, timestamp);
snap_info.insert({id, info});
snap_ids.insert({{in_snap_namespace, in_snap_name}, id});
}
void ImageCtx::rm_snap(cls::rbd::SnapshotNamespace in_snap_namespace,
string in_snap_name,
snap_t id)
{
ceph_assert(ceph_mutex_is_wlocked(image_lock));
snaps.erase(std::remove(snaps.begin(), snaps.end(), id), snaps.end());
snap_info.erase(id);
snap_ids.erase({in_snap_namespace, in_snap_name});
}
uint64_t ImageCtx::get_image_size(snap_t in_snap_id) const
{
ceph_assert(ceph_mutex_is_locked(image_lock));
if (in_snap_id == CEPH_NOSNAP) {
if (!resize_reqs.empty() &&
resize_reqs.front()->shrinking()) {
return resize_reqs.front()->get_image_size();
}
return size;
}
const SnapInfo *info = get_snap_info(in_snap_id);
if (info) {
return info->size;
}
return 0;
}
uint64_t ImageCtx::get_area_size(io::ImageArea area) const {
// image areas are defined only for the "opened at" snap_id
// (i.e. where encryption may be loaded)
uint64_t raw_size = get_image_size(snap_id);
if (raw_size == 0) {
return 0;
}
auto size = io::util::raw_to_area_offset(*this, raw_size);
ceph_assert(size.first <= raw_size && size.second == io::ImageArea::DATA);
switch (area) {
case io::ImageArea::DATA:
return size.first;
case io::ImageArea::CRYPTO_HEADER:
// CRYPTO_HEADER area ends where DATA area begins
return raw_size - size.first;
default:
ceph_abort();
}
}
uint64_t ImageCtx::get_object_count(snap_t in_snap_id) const {
ceph_assert(ceph_mutex_is_locked(image_lock));
uint64_t image_size = get_image_size(in_snap_id);
return Striper::get_num_objects(layout, image_size);
}
bool ImageCtx::test_features(uint64_t features) const
{
std::shared_lock l{image_lock};
return test_features(features, image_lock);
}
bool ImageCtx::test_features(uint64_t in_features,
const ceph::shared_mutex &in_image_lock) const
{
ceph_assert(ceph_mutex_is_locked(image_lock));
return ((features & in_features) == in_features);
}
bool ImageCtx::test_op_features(uint64_t in_op_features) const
{
std::shared_lock l{image_lock};
return test_op_features(in_op_features, image_lock);
}
bool ImageCtx::test_op_features(uint64_t in_op_features,
const ceph::shared_mutex &in_image_lock) const
{
ceph_assert(ceph_mutex_is_locked(image_lock));
return ((op_features & in_op_features) == in_op_features);
}
int ImageCtx::get_flags(librados::snap_t _snap_id, uint64_t *_flags) const
{
ceph_assert(ceph_mutex_is_locked(image_lock));
if (_snap_id == CEPH_NOSNAP) {
*_flags = flags;
return 0;
}
const SnapInfo *info = get_snap_info(_snap_id);
if (info) {
*_flags = info->flags;
return 0;
}
return -ENOENT;
}
int ImageCtx::test_flags(librados::snap_t in_snap_id,
uint64_t flags, bool *flags_set) const
{
std::shared_lock l{image_lock};
return test_flags(in_snap_id, flags, image_lock, flags_set);
}
int ImageCtx::test_flags(librados::snap_t in_snap_id,
uint64_t flags,
const ceph::shared_mutex &in_image_lock,
bool *flags_set) const
{
ceph_assert(ceph_mutex_is_locked(image_lock));
uint64_t snap_flags;
int r = get_flags(in_snap_id, &snap_flags);
if (r < 0) {
return r;
}
*flags_set = ((snap_flags & flags) == flags);
return 0;
}
int ImageCtx::update_flags(snap_t in_snap_id, uint64_t flag, bool enabled)
{
ceph_assert(ceph_mutex_is_wlocked(image_lock));
uint64_t *_flags;
if (in_snap_id == CEPH_NOSNAP) {
_flags = &flags;
} else {
map<snap_t, SnapInfo>::iterator it = snap_info.find(in_snap_id);
if (it == snap_info.end()) {
return -ENOENT;
}
_flags = &it->second.flags;
}
if (enabled) {
(*_flags) |= flag;
} else {
(*_flags) &= ~flag;
}
return 0;
}
const ParentImageInfo* ImageCtx::get_parent_info(snap_t in_snap_id) const
{
ceph_assert(ceph_mutex_is_locked(image_lock));
if (in_snap_id == CEPH_NOSNAP)
return &parent_md;
const SnapInfo *info = get_snap_info(in_snap_id);
if (info)
return &info->parent;
return NULL;
}
int64_t ImageCtx::get_parent_pool_id(snap_t in_snap_id) const
{
const auto info = get_parent_info(in_snap_id);
if (info)
return info->spec.pool_id;
return -1;
}
string ImageCtx::get_parent_image_id(snap_t in_snap_id) const
{
const auto info = get_parent_info(in_snap_id);
if (info)
return info->spec.image_id;
return "";
}
uint64_t ImageCtx::get_parent_snap_id(snap_t in_snap_id) const
{
const auto info = get_parent_info(in_snap_id);
if (info)
return info->spec.snap_id;
return CEPH_NOSNAP;
}
int ImageCtx::get_parent_overlap(snap_t in_snap_id,
uint64_t* raw_overlap) const {
const auto info = get_parent_info(in_snap_id);
if (info) {
*raw_overlap = info->overlap;
return 0;
}
return -ENOENT;
}
std::pair<uint64_t, io::ImageArea> ImageCtx::reduce_parent_overlap(
uint64_t raw_overlap, bool migration_write) const {
ceph_assert(ceph_mutex_is_locked(image_lock));
if (migration_write) {
// don't reduce migration write overlap -- it may be larger as
// it's the largest overlap across snapshots by construction
return io::util::raw_to_area_offset(*this, raw_overlap);
}
if (raw_overlap == 0 || parent == nullptr) {
// image opened with OPEN_FLAG_SKIP_OPEN_PARENT -> no overlap
return io::util::raw_to_area_offset(*this, 0);
}
// DATA area in the parent may be smaller than the part of DATA
// area in the clone that is still within the overlap (e.g. for
// LUKS2-encrypted parent + LUKS1-encrypted clone, due to LUKS2
// header usually being bigger than LUKS1 header)
auto overlap = io::util::raw_to_area_offset(*this, raw_overlap);
std::shared_lock parent_image_locker(parent->image_lock);
overlap.first = std::min(overlap.first,
parent->get_area_size(overlap.second));
return overlap;
}
uint64_t ImageCtx::prune_parent_extents(io::Extents& image_extents,
io::ImageArea area,
uint64_t raw_overlap,
bool migration_write) const {
ceph_assert(ceph_mutex_is_locked(image_lock));
ldout(cct, 10) << __func__ << ": image_extents=" << image_extents
<< " area=" << area << " raw_overlap=" << raw_overlap
<< " migration_write=" << migration_write << dendl;
if (raw_overlap == 0) {
image_extents.clear();
return 0;
}
auto overlap = reduce_parent_overlap(raw_overlap, migration_write);
if (area == overlap.second) {
// drop extents completely beyond the overlap
while (!image_extents.empty() &&
image_extents.back().first >= overlap.first) {
image_extents.pop_back();
}
if (!image_extents.empty()) {
// trim final overlapping extent
auto& last_extent = image_extents.back();
if (last_extent.first + last_extent.second > overlap.first) {
last_extent.second = overlap.first - last_extent.first;
}
}
} else if (area == io::ImageArea::DATA &&
overlap.second == io::ImageArea::CRYPTO_HEADER) {
// all extents completely beyond the overlap
image_extents.clear();
} else {
// all extents completely within the overlap
ceph_assert(area == io::ImageArea::CRYPTO_HEADER &&
overlap.second == io::ImageArea::DATA);
}
uint64_t overlap_bytes = 0;
for (auto [_, len] : image_extents) {
overlap_bytes += len;
}
ldout(cct, 10) << __func__ << ": overlap=" << overlap.first
<< "/" << overlap.second
<< " got overlap_bytes=" << overlap_bytes
<< " at " << image_extents << dendl;
return overlap_bytes;
}
void ImageCtx::register_watch(Context *on_finish) {
ceph_assert(image_watcher != NULL);
image_watcher->register_watch(on_finish);
}
void ImageCtx::cancel_async_requests() {
C_SaferCond ctx;
cancel_async_requests(&ctx);
ctx.wait();
}
void ImageCtx::cancel_async_requests(Context *on_finish) {
{
std::lock_guard async_ops_locker{async_ops_lock};
if (!async_requests.empty()) {
ldout(cct, 10) << "canceling async requests: count="
<< async_requests.size() << dendl;
for (auto req : async_requests) {
ldout(cct, 10) << "canceling async request: " << req << dendl;
req->cancel();
}
async_requests_waiters.push_back(on_finish);
return;
}
}
on_finish->complete(0);
}
void ImageCtx::apply_metadata(const std::map<std::string, bufferlist> &meta,
bool thread_safe) {
ldout(cct, 20) << __func__ << dendl;
std::unique_lock image_locker(image_lock);
// reset settings back to global defaults
config_overrides.clear();
config.set_config_values(cct->_conf.get_config_values());
// extract config overrides
for (auto meta_pair : meta) {
if (!boost::starts_with(meta_pair.first, METADATA_CONF_PREFIX)) {
continue;
}
std::string key = meta_pair.first.substr(METADATA_CONF_PREFIX.size());
if (!boost::starts_with(key, "rbd_")) {
// ignore non-RBD configuration keys
// TODO use option schema to determine applicable subsystem
ldout(cct, 0) << __func__ << ": ignoring config " << key << dendl;
continue;
}
if (config.find_option(key) != nullptr) {
std::string val(meta_pair.second.c_str(), meta_pair.second.length());
int r = config.set_val(key, val);
if (r >= 0) {
ldout(cct, 20) << __func__ << ": " << key << "=" << val << dendl;
config_overrides.insert(key);
} else {
lderr(cct) << __func__ << ": failed to set config " << key << " "
<< "with value " << val << ": " << cpp_strerror(r)
<< dendl;
}
}
}
image_locker.unlock();
#define ASSIGN_OPTION(param, type) \
param = config.get_val<type>("rbd_"#param)
bool skip_partial_discard = true;
ASSIGN_OPTION(non_blocking_aio, bool);
ASSIGN_OPTION(cache, bool);
ASSIGN_OPTION(sparse_read_threshold_bytes, Option::size_t);
ASSIGN_OPTION(clone_copy_on_read, bool);
ASSIGN_OPTION(enable_alloc_hint, bool);
ASSIGN_OPTION(mirroring_replay_delay, uint64_t);
ASSIGN_OPTION(mtime_update_interval, uint64_t);
ASSIGN_OPTION(atime_update_interval, uint64_t);
ASSIGN_OPTION(skip_partial_discard, bool);
ASSIGN_OPTION(discard_granularity_bytes, uint64_t);
ASSIGN_OPTION(blkin_trace_all, bool);
auto cache_policy = config.get_val<std::string>("rbd_cache_policy");
if (cache_policy == "writethrough" || cache_policy == "writeback") {
ASSIGN_OPTION(readahead_max_bytes, Option::size_t);
ASSIGN_OPTION(readahead_disable_after_bytes, Option::size_t);
}
#undef ASSIGN_OPTION
if (sparse_read_threshold_bytes == 0) {
sparse_read_threshold_bytes = get_object_size();
}
bool dirty_cache = test_features(RBD_FEATURE_DIRTY_CACHE);
if (!skip_partial_discard || dirty_cache) {
discard_granularity_bytes = 0;
}
alloc_hint_flags = 0;
auto compression_hint = config.get_val<std::string>("rbd_compression_hint");
if (compression_hint == "compressible") {
alloc_hint_flags |= librados::ALLOC_HINT_FLAG_COMPRESSIBLE;
} else if (compression_hint == "incompressible") {
alloc_hint_flags |= librados::ALLOC_HINT_FLAG_INCOMPRESSIBLE;
}
librados::Rados rados(md_ctx);
int8_t require_osd_release;
int r = rados.get_min_compatible_osd(&require_osd_release);
if (r == 0 && require_osd_release >= CEPH_RELEASE_OCTOPUS) {
read_flags = 0;
auto read_policy = config.get_val<std::string>("rbd_read_from_replica_policy");
if (read_policy == "balance") {
read_flags |= librados::OPERATION_BALANCE_READS;
} else if (read_policy == "localize") {
read_flags |= librados::OPERATION_LOCALIZE_READS;
}
}
io_image_dispatcher->apply_qos_schedule_tick_min(
config.get_val<uint64_t>("rbd_qos_schedule_tick_min"));
io_image_dispatcher->apply_qos_limit(
io::IMAGE_DISPATCH_FLAG_QOS_IOPS_THROTTLE,
config.get_val<uint64_t>("rbd_qos_iops_limit"),
config.get_val<uint64_t>("rbd_qos_iops_burst"),
config.get_val<uint64_t>("rbd_qos_iops_burst_seconds"));
io_image_dispatcher->apply_qos_limit(
io::IMAGE_DISPATCH_FLAG_QOS_BPS_THROTTLE,
config.get_val<uint64_t>("rbd_qos_bps_limit"),
config.get_val<uint64_t>("rbd_qos_bps_burst"),
config.get_val<uint64_t>("rbd_qos_bps_burst_seconds"));
io_image_dispatcher->apply_qos_limit(
io::IMAGE_DISPATCH_FLAG_QOS_READ_IOPS_THROTTLE,
config.get_val<uint64_t>("rbd_qos_read_iops_limit"),
config.get_val<uint64_t>("rbd_qos_read_iops_burst"),
config.get_val<uint64_t>("rbd_qos_read_iops_burst_seconds"));
io_image_dispatcher->apply_qos_limit(
io::IMAGE_DISPATCH_FLAG_QOS_WRITE_IOPS_THROTTLE,
config.get_val<uint64_t>("rbd_qos_write_iops_limit"),
config.get_val<uint64_t>("rbd_qos_write_iops_burst"),
config.get_val<uint64_t>("rbd_qos_write_iops_burst_seconds"));
io_image_dispatcher->apply_qos_limit(
io::IMAGE_DISPATCH_FLAG_QOS_READ_BPS_THROTTLE,
config.get_val<uint64_t>("rbd_qos_read_bps_limit"),
config.get_val<uint64_t>("rbd_qos_read_bps_burst"),
config.get_val<uint64_t>("rbd_qos_read_bps_burst_seconds"));
io_image_dispatcher->apply_qos_limit(
io::IMAGE_DISPATCH_FLAG_QOS_WRITE_BPS_THROTTLE,
config.get_val<uint64_t>("rbd_qos_write_bps_limit"),
config.get_val<uint64_t>("rbd_qos_write_bps_burst"),
config.get_val<uint64_t>("rbd_qos_write_bps_burst_seconds"));
io_image_dispatcher->apply_qos_exclude_ops(
librbd::io::rbd_io_operations_from_string(
config.get_val<std::string>("rbd_qos_exclude_ops"), nullptr));
if (!disable_zero_copy &&
config.get_val<bool>("rbd_disable_zero_copy_writes")) {
ldout(cct, 5) << this << ": disabling zero-copy writes" << dendl;
disable_zero_copy = true;
}
}
ExclusiveLock<ImageCtx> *ImageCtx::create_exclusive_lock() {
return new ExclusiveLock<ImageCtx>(*this);
}
ObjectMap<ImageCtx> *ImageCtx::create_object_map(uint64_t snap_id) {
return new ObjectMap<ImageCtx>(*this, snap_id);
}
Journal<ImageCtx> *ImageCtx::create_journal() {
return new Journal<ImageCtx>(*this);
}
void ImageCtx::set_image_name(const std::string &image_name) {
// update the name so rename can be invoked repeatedly
std::shared_lock owner_locker{owner_lock};
std::unique_lock image_locker{image_lock};
name = image_name;
if (old_format) {
header_oid = util::old_header_name(image_name);
}
}
void ImageCtx::notify_update() {
state->handle_update_notification();
ImageWatcher<>::notify_header_update(md_ctx, header_oid);
}
void ImageCtx::notify_update(Context *on_finish) {
state->handle_update_notification();
image_watcher->notify_header_update(on_finish);
}
exclusive_lock::Policy *ImageCtx::get_exclusive_lock_policy() const {
ceph_assert(ceph_mutex_is_locked(owner_lock));
ceph_assert(exclusive_lock_policy != nullptr);
return exclusive_lock_policy;
}
void ImageCtx::set_exclusive_lock_policy(exclusive_lock::Policy *policy) {
ceph_assert(ceph_mutex_is_wlocked(owner_lock));
ceph_assert(policy != nullptr);
delete exclusive_lock_policy;
exclusive_lock_policy = policy;
}
journal::Policy *ImageCtx::get_journal_policy() const {
ceph_assert(ceph_mutex_is_locked(image_lock));
ceph_assert(journal_policy != nullptr);
return journal_policy;
}
void ImageCtx::set_journal_policy(journal::Policy *policy) {
ceph_assert(ceph_mutex_is_wlocked(image_lock));
ceph_assert(policy != nullptr);
delete journal_policy;
journal_policy = policy;
}
void ImageCtx::rebuild_data_io_context() {
auto ctx = std::make_shared<neorados::IOContext>(
data_ctx.get_id(), data_ctx.get_namespace());
if (snap_id != CEPH_NOSNAP) {
ctx->read_snap(snap_id);
}
if (!snapc.snaps.empty()) {
ctx->write_snap_context(
{{snapc.seq, {snapc.snaps.begin(), snapc.snaps.end()}}});
}
if (data_ctx.get_pool_full_try()) {
ctx->full_try(true);
}
// atomically reset the data IOContext to new version
atomic_store(&data_io_context, ctx);
}
IOContext ImageCtx::get_data_io_context() const {
return atomic_load(&data_io_context);
}
IOContext ImageCtx::duplicate_data_io_context() const {
auto ctx = get_data_io_context();
return std::make_shared<neorados::IOContext>(*ctx);
}
void ImageCtx::get_timer_instance(CephContext *cct, SafeTimer **timer,
ceph::mutex **timer_lock) {
auto safe_timer_singleton =
&cct->lookup_or_create_singleton_object<SafeTimerSingleton>(
"librbd::journal::safe_timer", false, cct);
*timer = safe_timer_singleton;
*timer_lock = &safe_timer_singleton->lock;
}
}
| 33,692 | 31.71165 | 125 |
cc
|
null |
ceph-main/src/librbd/ImageCtx.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_IMAGECTX_H
#define CEPH_LIBRBD_IMAGECTX_H
#include "include/int_types.h"
#include <atomic>
#include <list>
#include <map>
#include <memory>
#include <set>
#include <string>
#include <vector>
#include "common/Timer.h"
#include "common/ceph_mutex.h"
#include "common/config_proxy.h"
#include "common/event_socket.h"
#include "common/Readahead.h"
#include "common/snap_types.h"
#include "common/zipkin_trace.h"
#include "include/common_fwd.h"
#include "include/buffer_fwd.h"
#include "include/rbd/librbd.hpp"
#include "include/rbd_types.h"
#include "include/types.h"
#include "include/xlist.h"
#include "cls/rbd/cls_rbd_types.h"
#include "cls/rbd/cls_rbd_client.h"
#include "librbd/AsyncRequest.h"
#include "librbd/Types.h"
#include <boost/lockfree/policies.hpp>
#include <boost/lockfree/queue.hpp>
namespace neorados {
class IOContext;
class RADOS;
} // namespace neorados
namespace librbd {
struct AsioEngine;
template <typename> class ConfigWatcher;
template <typename> class ExclusiveLock;
template <typename> class ImageState;
template <typename> class ImageWatcher;
template <typename> class Journal;
class LibrbdAdminSocketHook;
template <typename> class ObjectMap;
template <typename> class Operations;
template <typename> class PluginRegistry;
namespace asio { struct ContextWQ; }
namespace crypto { template <typename> class EncryptionFormat; }
namespace exclusive_lock { struct Policy; }
namespace io {
class AioCompletion;
class AsyncOperation;
template <typename> class CopyupRequest;
enum class ImageArea;
struct ImageDispatcherInterface;
struct ObjectDispatcherInterface;
}
namespace journal { struct Policy; }
namespace operation {
template <typename> class ResizeRequest;
}
struct ImageCtx {
typedef std::pair<cls::rbd::SnapshotNamespace, std::string> SnapKey;
struct SnapKeyComparator {
inline bool operator()(const SnapKey& lhs, const SnapKey& rhs) const {
// only compare by namespace type and name
if (lhs.first.index() != rhs.first.index()) {
return lhs.first.index() < rhs.first.index();
}
return lhs.second < rhs.second;
}
};
static const std::string METADATA_CONF_PREFIX;
CephContext *cct;
ConfigProxy config;
std::set<std::string> config_overrides;
PerfCounters *perfcounter;
struct rbd_obj_header_ondisk header;
::SnapContext snapc;
std::vector<librados::snap_t> snaps; // this mirrors snapc.snaps, but is in
// a format librados can understand
std::map<librados::snap_t, SnapInfo> snap_info;
std::map<SnapKey, librados::snap_t, SnapKeyComparator> snap_ids;
uint64_t open_snap_id = CEPH_NOSNAP;
uint64_t snap_id;
bool snap_exists; // false if our snap_id was deleted
// whether the image was opened read-only. cannot be changed after opening
bool read_only;
uint32_t read_only_flags = 0U;
uint32_t read_only_mask = ~0U;
std::map<rados::cls::lock::locker_id_t,
rados::cls::lock::locker_info_t> lockers;
bool exclusive_locked;
std::string lock_tag;
std::string name;
cls::rbd::SnapshotNamespace snap_namespace;
std::string snap_name;
std::shared_ptr<AsioEngine> asio_engine;
// New ASIO-style RADOS API
neorados::RADOS& rados_api;
// Legacy RADOS API
librados::IoCtx data_ctx;
librados::IoCtx md_ctx;
ConfigWatcher<ImageCtx> *config_watcher = nullptr;
ImageWatcher<ImageCtx> *image_watcher;
Journal<ImageCtx> *journal;
/**
* Lock ordering:
*
* owner_lock, image_lock
* async_op_lock, timestamp_lock
*/
ceph::shared_mutex owner_lock; // protects exclusive lock leadership updates
mutable ceph::shared_mutex image_lock; // protects snapshot-related member variables,
// features (and associated helper classes), and flags
// protects access to the mutable image metadata that
// isn't guarded by other locks below, and blocks writes
// when held exclusively, so snapshots can be consistent.
// Fields guarded include:
// total_bytes_read
// exclusive_locked
// lock_tag
// lockers
// object_map
// parent_md and parent
// encryption_format
ceph::shared_mutex timestamp_lock; // protects (create/access/modify)_timestamp
ceph::mutex async_ops_lock; // protects async_ops and async_requests
ceph::mutex copyup_list_lock; // protects copyup_waiting_list
unsigned extra_read_flags; // librados::OPERATION_*
bool old_format;
uint8_t order;
uint64_t size;
uint64_t features;
std::string object_prefix;
char *format_string;
std::string header_oid;
std::string id; // only used for new-format images
ParentImageInfo parent_md;
ImageCtx *parent;
ImageCtx *child = nullptr;
MigrationInfo migration_info;
cls::rbd::GroupSpec group_spec;
uint64_t stripe_unit, stripe_count;
uint64_t flags;
uint64_t op_features = 0;
bool operations_disabled = false;
utime_t create_timestamp;
utime_t access_timestamp;
utime_t modify_timestamp;
file_layout_t layout;
Readahead readahead;
std::atomic<uint64_t> total_bytes_read = {0};
std::map<uint64_t, io::CopyupRequest<ImageCtx>*> copyup_list;
xlist<io::AsyncOperation*> async_ops;
xlist<AsyncRequest<>*> async_requests;
std::list<Context*> async_requests_waiters;
ImageState<ImageCtx> *state;
Operations<ImageCtx> *operations;
ExclusiveLock<ImageCtx> *exclusive_lock;
ObjectMap<ImageCtx> *object_map;
xlist<operation::ResizeRequest<ImageCtx>*> resize_reqs;
io::ImageDispatcherInterface *io_image_dispatcher = nullptr;
io::ObjectDispatcherInterface *io_object_dispatcher = nullptr;
asio::ContextWQ *op_work_queue;
PluginRegistry<ImageCtx>* plugin_registry;
using Completions = boost::lockfree::queue<io::AioCompletion*>;
Completions event_socket_completions;
EventSocket event_socket;
bool ignore_migrating = false;
bool disable_zero_copy = false;
bool enable_sparse_copyup = false;
/// Cached latency-sensitive configuration settings
bool non_blocking_aio;
bool cache;
uint64_t sparse_read_threshold_bytes;
uint64_t readahead_max_bytes = 0;
uint64_t readahead_disable_after_bytes = 0;
bool clone_copy_on_read;
bool enable_alloc_hint;
uint32_t alloc_hint_flags = 0U;
uint32_t read_flags = 0U; // librados::OPERATION_*
uint32_t discard_granularity_bytes = 0;
bool blkin_trace_all;
uint64_t mirroring_replay_delay;
uint64_t mtime_update_interval;
uint64_t atime_update_interval;
LibrbdAdminSocketHook *asok_hook;
exclusive_lock::Policy *exclusive_lock_policy = nullptr;
journal::Policy *journal_policy = nullptr;
ZTracer::Endpoint trace_endpoint;
std::unique_ptr<crypto::EncryptionFormat<ImageCtx>> encryption_format;
// unit test mock helpers
static ImageCtx* create(const std::string &image_name,
const std::string &image_id,
const char *snap, IoCtx& p, bool read_only) {
return new ImageCtx(image_name, image_id, snap, p, read_only);
}
static ImageCtx* create(const std::string &image_name,
const std::string &image_id,
librados::snap_t snap_id, IoCtx& p,
bool read_only) {
return new ImageCtx(image_name, image_id, snap_id, p, read_only);
}
/**
* Either image_name or image_id must be set.
* If id is not known, pass the empty std::string,
* and init() will look it up.
*/
ImageCtx(const std::string &image_name, const std::string &image_id,
const char *snap, IoCtx& p, bool read_only);
ImageCtx(const std::string &image_name, const std::string &image_id,
librados::snap_t snap_id, IoCtx& p, bool read_only);
~ImageCtx();
void init();
void shutdown();
void init_layout(int64_t pool_id);
void perf_start(std::string name);
void perf_stop();
void set_read_flag(unsigned flag);
int get_read_flags(librados::snap_t snap_id);
int snap_set(uint64_t snap_id);
void snap_unset();
librados::snap_t get_snap_id(const cls::rbd::SnapshotNamespace& in_snap_namespace,
const std::string& in_snap_name) const;
const SnapInfo* get_snap_info(librados::snap_t in_snap_id) const;
int get_snap_name(librados::snap_t in_snap_id,
std::string *out_snap_name) const;
int get_snap_namespace(librados::snap_t in_snap_id,
cls::rbd::SnapshotNamespace *out_snap_namespace) const;
int get_parent_spec(librados::snap_t in_snap_id,
cls::rbd::ParentImageSpec *pspec) const;
int is_snap_protected(librados::snap_t in_snap_id,
bool *is_protected) const;
int is_snap_unprotected(librados::snap_t in_snap_id,
bool *is_unprotected) const;
uint64_t get_current_size() const;
uint64_t get_object_size() const;
std::string get_object_name(uint64_t num) const;
uint64_t get_stripe_unit() const;
uint64_t get_stripe_count() const;
uint64_t get_stripe_period() const;
utime_t get_create_timestamp() const;
utime_t get_access_timestamp() const;
utime_t get_modify_timestamp() const;
void set_access_timestamp(utime_t at);
void set_modify_timestamp(utime_t at);
void add_snap(cls::rbd::SnapshotNamespace in_snap_namespace,
std::string in_snap_name,
librados::snap_t id,
uint64_t in_size, const ParentImageInfo &parent,
uint8_t protection_status, uint64_t flags, utime_t timestamp);
void rm_snap(cls::rbd::SnapshotNamespace in_snap_namespace,
std::string in_snap_name,
librados::snap_t id);
uint64_t get_image_size(librados::snap_t in_snap_id) const;
uint64_t get_area_size(io::ImageArea area) const;
uint64_t get_object_count(librados::snap_t in_snap_id) const;
bool test_features(uint64_t test_features) const;
bool test_features(uint64_t test_features,
const ceph::shared_mutex &in_image_lock) const;
bool test_op_features(uint64_t op_features) const;
bool test_op_features(uint64_t op_features,
const ceph::shared_mutex &in_image_lock) const;
int get_flags(librados::snap_t in_snap_id, uint64_t *flags) const;
int test_flags(librados::snap_t in_snap_id,
uint64_t test_flags, bool *flags_set) const;
int test_flags(librados::snap_t in_snap_id,
uint64_t test_flags, const ceph::shared_mutex &in_image_lock,
bool *flags_set) const;
int update_flags(librados::snap_t in_snap_id, uint64_t flag, bool enabled);
const ParentImageInfo* get_parent_info(librados::snap_t in_snap_id) const;
int64_t get_parent_pool_id(librados::snap_t in_snap_id) const;
std::string get_parent_image_id(librados::snap_t in_snap_id) const;
uint64_t get_parent_snap_id(librados::snap_t in_snap_id) const;
int get_parent_overlap(librados::snap_t in_snap_id,
uint64_t* raw_overlap) const;
std::pair<uint64_t, io::ImageArea> reduce_parent_overlap(
uint64_t raw_overlap, bool migration_write) const;
uint64_t prune_parent_extents(
std::vector<std::pair<uint64_t, uint64_t>>& image_extents,
io::ImageArea area, uint64_t raw_overlap, bool migration_write) const;
void register_watch(Context *on_finish);
void cancel_async_requests();
void cancel_async_requests(Context *on_finish);
void apply_metadata(const std::map<std::string, bufferlist> &meta,
bool thread_safe);
ExclusiveLock<ImageCtx> *create_exclusive_lock();
ObjectMap<ImageCtx> *create_object_map(uint64_t snap_id);
Journal<ImageCtx> *create_journal();
void set_image_name(const std::string &name);
void notify_update();
void notify_update(Context *on_finish);
exclusive_lock::Policy *get_exclusive_lock_policy() const;
void set_exclusive_lock_policy(exclusive_lock::Policy *policy);
journal::Policy *get_journal_policy() const;
void set_journal_policy(journal::Policy *policy);
void rebuild_data_io_context();
IOContext get_data_io_context() const;
IOContext duplicate_data_io_context() const;
static void get_timer_instance(CephContext *cct, SafeTimer **timer,
ceph::mutex **timer_lock);
private:
std::shared_ptr<neorados::IOContext> data_io_context;
};
}
#endif
| 12,929 | 34.04065 | 89 |
h
|
null |
ceph-main/src/librbd/ImageState.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/ImageState.h"
#include "include/rbd/librbd.hpp"
#include "common/dout.h"
#include "common/errno.h"
#include "common/Cond.h"
#include "common/WorkQueue.h"
#include "librbd/AsioEngine.h"
#include "librbd/ImageCtx.h"
#include "librbd/TaskFinisher.h"
#include "librbd/Utils.h"
#include "librbd/asio/ContextWQ.h"
#include "librbd/image/CloseRequest.h"
#include "librbd/image/OpenRequest.h"
#include "librbd/image/RefreshRequest.h"
#include "librbd/image/SetSnapRequest.h"
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::ImageState: " << this << " "
namespace librbd {
using util::create_async_context_callback;
using util::create_context_callback;
class ImageUpdateWatchers {
public:
explicit ImageUpdateWatchers(CephContext *cct) : m_cct(cct),
m_lock(ceph::make_mutex(util::unique_lock_name("librbd::ImageUpdateWatchers::m_lock", this))) {
}
~ImageUpdateWatchers() {
ceph_assert(m_watchers.empty());
ceph_assert(m_in_flight.empty());
ceph_assert(m_pending_unregister.empty());
ceph_assert(m_on_shut_down_finish == nullptr);
destroy_work_queue();
}
void flush(Context *on_finish) {
ldout(m_cct, 20) << "ImageUpdateWatchers::" << __func__ << dendl;
{
std::lock_guard locker{m_lock};
if (!m_in_flight.empty()) {
Context *ctx = new LambdaContext(
[this, on_finish](int r) {
ldout(m_cct, 20) << "ImageUpdateWatchers::" << __func__
<< ": completing flush" << dendl;
on_finish->complete(r);
});
m_work_queue->queue(ctx, 0);
return;
}
}
ldout(m_cct, 20) << "ImageUpdateWatchers::" << __func__
<< ": completing flush" << dendl;
on_finish->complete(0);
}
void shut_down(Context *on_finish) {
ldout(m_cct, 20) << "ImageUpdateWatchers::" << __func__ << dendl;
{
std::lock_guard locker{m_lock};
ceph_assert(m_on_shut_down_finish == nullptr);
m_watchers.clear();
if (!m_in_flight.empty()) {
m_on_shut_down_finish = on_finish;
return;
}
}
ldout(m_cct, 20) << "ImageUpdateWatchers::" << __func__
<< ": completing shut down" << dendl;
on_finish->complete(0);
}
void register_watcher(UpdateWatchCtx *watcher, uint64_t *handle) {
ldout(m_cct, 20) << "ImageUpdateWatchers::" << __func__ << ": watcher="
<< watcher << dendl;
std::lock_guard locker{m_lock};
ceph_assert(m_on_shut_down_finish == nullptr);
create_work_queue();
*handle = m_next_handle++;
m_watchers.insert(std::make_pair(*handle, watcher));
}
void unregister_watcher(uint64_t handle, Context *on_finish) {
ldout(m_cct, 20) << "ImageUpdateWatchers::" << __func__ << ": handle="
<< handle << dendl;
int r = 0;
{
std::lock_guard locker{m_lock};
auto it = m_watchers.find(handle);
if (it == m_watchers.end()) {
r = -ENOENT;
} else {
if (m_in_flight.find(handle) != m_in_flight.end()) {
ceph_assert(m_pending_unregister.find(handle) == m_pending_unregister.end());
m_pending_unregister[handle] = on_finish;
on_finish = nullptr;
}
m_watchers.erase(it);
}
}
if (on_finish) {
ldout(m_cct, 20) << "ImageUpdateWatchers::" << __func__
<< ": completing unregister" << dendl;
on_finish->complete(r);
}
}
void notify() {
ldout(m_cct, 20) << "ImageUpdateWatchers::" << __func__ << dendl;
std::lock_guard locker{m_lock};
for (auto it : m_watchers) {
send_notify(it.first, it.second);
}
}
void send_notify(uint64_t handle, UpdateWatchCtx *watcher) {
ceph_assert(ceph_mutex_is_locked(m_lock));
ldout(m_cct, 20) << "ImageUpdateWatchers::" << __func__ << ": handle="
<< handle << ", watcher=" << watcher << dendl;
m_in_flight.insert(handle);
Context *ctx = new LambdaContext(
[this, handle, watcher](int r) {
handle_notify(handle, watcher);
});
m_work_queue->queue(ctx, 0);
}
void handle_notify(uint64_t handle, UpdateWatchCtx *watcher) {
ldout(m_cct, 20) << "ImageUpdateWatchers::" << __func__ << ": handle="
<< handle << ", watcher=" << watcher << dendl;
watcher->handle_notify();
Context *on_unregister_finish = nullptr;
Context *on_shut_down_finish = nullptr;
{
std::lock_guard locker{m_lock};
auto in_flight_it = m_in_flight.find(handle);
ceph_assert(in_flight_it != m_in_flight.end());
m_in_flight.erase(in_flight_it);
// If there is no more in flight notifications for this watcher
// and it is pending unregister, complete it now.
if (m_in_flight.find(handle) == m_in_flight.end()) {
auto it = m_pending_unregister.find(handle);
if (it != m_pending_unregister.end()) {
on_unregister_finish = it->second;
m_pending_unregister.erase(it);
}
}
if (m_in_flight.empty()) {
ceph_assert(m_pending_unregister.empty());
if (m_on_shut_down_finish != nullptr) {
std::swap(m_on_shut_down_finish, on_shut_down_finish);
}
}
}
if (on_unregister_finish != nullptr) {
ldout(m_cct, 20) << "ImageUpdateWatchers::" << __func__
<< ": completing unregister" << dendl;
on_unregister_finish->complete(0);
}
if (on_shut_down_finish != nullptr) {
ldout(m_cct, 20) << "ImageUpdateWatchers::" << __func__
<< ": completing shut down" << dendl;
on_shut_down_finish->complete(0);
}
}
private:
class ThreadPoolSingleton : public ThreadPool {
public:
explicit ThreadPoolSingleton(CephContext *cct)
: ThreadPool(cct, "librbd::ImageUpdateWatchers::thread_pool", "tp_librbd",
1) {
start();
}
~ThreadPoolSingleton() override {
stop();
}
};
CephContext *m_cct;
ceph::mutex m_lock;
ContextWQ *m_work_queue = nullptr;
std::map<uint64_t, UpdateWatchCtx*> m_watchers;
uint64_t m_next_handle = 0;
std::multiset<uint64_t> m_in_flight;
std::map<uint64_t, Context*> m_pending_unregister;
Context *m_on_shut_down_finish = nullptr;
void create_work_queue() {
if (m_work_queue != nullptr) {
return;
}
auto& thread_pool = m_cct->lookup_or_create_singleton_object<
ThreadPoolSingleton>("librbd::ImageUpdateWatchers::thread_pool",
false, m_cct);
m_work_queue = new ContextWQ("librbd::ImageUpdateWatchers::work_queue",
ceph::make_timespan(
m_cct->_conf.get_val<uint64_t>("rbd_op_thread_timeout")),
&thread_pool);
}
void destroy_work_queue() {
if (m_work_queue == nullptr) {
return;
}
m_work_queue->drain();
delete m_work_queue;
}
};
class QuiesceWatchers {
public:
explicit QuiesceWatchers(CephContext *cct, asio::ContextWQ* work_queue)
: m_cct(cct),
m_work_queue(work_queue),
m_lock(ceph::make_mutex(util::unique_lock_name(
"librbd::QuiesceWatchers::m_lock", this))) {
}
~QuiesceWatchers() {
ceph_assert(m_pending_unregister.empty());
ceph_assert(m_on_notify == nullptr);
}
void register_watcher(QuiesceWatchCtx *watcher, uint64_t *handle) {
ldout(m_cct, 20) << "QuiesceWatchers::" << __func__ << ": watcher="
<< watcher << dendl;
std::lock_guard locker{m_lock};
*handle = m_next_handle++;
m_watchers[*handle] = watcher;
}
void unregister_watcher(uint64_t handle, Context *on_finish) {
int r = 0;
{
std::lock_guard locker{m_lock};
auto it = m_watchers.find(handle);
if (it == m_watchers.end()) {
r = -ENOENT;
} else {
if (m_on_notify != nullptr) {
ceph_assert(!m_pending_unregister.count(handle));
m_pending_unregister[handle] = on_finish;
on_finish = nullptr;
}
m_watchers.erase(it);
}
}
if (on_finish) {
ldout(m_cct, 20) << "QuiesceWatchers::" << __func__
<< ": completing unregister " << handle << dendl;
on_finish->complete(r);
}
}
void notify_quiesce(Context *on_finish) {
std::lock_guard locker{m_lock};
if (m_blocked) {
ldout(m_cct, 20) << "QuiesceWatchers::" << __func__ << ": queue" << dendl;
m_pending_notify.push_back(on_finish);
return;
}
notify(QUIESCE, on_finish);
}
void notify_unquiesce(Context *on_finish) {
std::lock_guard locker{m_lock};
notify(UNQUIESCE, on_finish);
}
void quiesce_complete(uint64_t handle, int r) {
Context *on_notify = nullptr;
{
std::lock_guard locker{m_lock};
ceph_assert(m_on_notify != nullptr);
ceph_assert(m_handle_quiesce_cnt > 0);
m_handle_quiesce_cnt--;
if (r < 0) {
ldout(m_cct, 10) << "QuiesceWatchers::" << __func__ << ": watcher "
<< handle << " failed" << dendl;
m_failed_watchers.insert(handle);
m_ret_val = r;
}
if (m_handle_quiesce_cnt > 0) {
return;
}
std::swap(on_notify, m_on_notify);
r = m_ret_val;
}
on_notify->complete(r);
}
private:
enum EventType {QUIESCE, UNQUIESCE};
CephContext *m_cct;
asio::ContextWQ *m_work_queue;
ceph::mutex m_lock;
std::map<uint64_t, QuiesceWatchCtx*> m_watchers;
uint64_t m_next_handle = 0;
Context *m_on_notify = nullptr;
std::list<Context *> m_pending_notify;
std::map<uint64_t, Context*> m_pending_unregister;
uint64_t m_handle_quiesce_cnt = 0;
std::set<uint64_t> m_failed_watchers;
bool m_blocked = false;
int m_ret_val = 0;
void notify(EventType event_type, Context *on_finish) {
ceph_assert(ceph_mutex_is_locked(m_lock));
if (m_watchers.empty()) {
m_work_queue->queue(on_finish);
return;
}
ldout(m_cct, 20) << "QuiesceWatchers::" << __func__ << " event: "
<< event_type << dendl;
Context *ctx = nullptr;
if (event_type == QUIESCE) {
ceph_assert(!m_blocked);
ceph_assert(m_handle_quiesce_cnt == 0);
m_blocked = true;
m_handle_quiesce_cnt = m_watchers.size();
m_failed_watchers.clear();
m_ret_val = 0;
} else {
ceph_assert(event_type == UNQUIESCE);
ceph_assert(m_blocked);
ctx = create_async_context_callback(
m_work_queue, create_context_callback<
QuiesceWatchers, &QuiesceWatchers::handle_notify_unquiesce>(this));
}
auto gather_ctx = new C_Gather(m_cct, ctx);
ceph_assert(m_on_notify == nullptr);
m_on_notify = on_finish;
for (auto &[handle, watcher] : m_watchers) {
send_notify(handle, watcher, event_type, gather_ctx->new_sub());
}
gather_ctx->activate();
}
void send_notify(uint64_t handle, QuiesceWatchCtx *watcher,
EventType event_type, Context *on_finish) {
auto ctx = new LambdaContext(
[this, handle, watcher, event_type, on_finish](int) {
ldout(m_cct, 20) << "QuiesceWatchers::" << __func__ << ": handle="
<< handle << ", event_type=" << event_type << dendl;
switch (event_type) {
case QUIESCE:
watcher->handle_quiesce();
break;
case UNQUIESCE:
{
std::lock_guard locker{m_lock};
if (m_failed_watchers.count(handle)) {
ldout(m_cct, 20) << "QuiesceWatchers::" << __func__
<< ": skip for failed watcher" << dendl;
break;
}
}
watcher->handle_unquiesce();
break;
default:
ceph_abort_msgf("invalid event_type %d", event_type);
}
on_finish->complete(0);
});
m_work_queue->queue(ctx);
}
void handle_notify_unquiesce(int r) {
ldout(m_cct, 20) << "QuiesceWatchers::" << __func__ << ": r=" << r
<< dendl;
ceph_assert(r == 0);
std::unique_lock locker{m_lock};
if (!m_pending_unregister.empty()) {
std::map<uint64_t, Context*> pending_unregister;
std::swap(pending_unregister, m_pending_unregister);
locker.unlock();
for (auto &it : pending_unregister) {
ldout(m_cct, 20) << "QuiesceWatchers::" << __func__
<< ": completing unregister " << it.first << dendl;
it.second->complete(0);
}
locker.lock();
}
Context *on_notify = nullptr;
std::swap(on_notify, m_on_notify);
ceph_assert(m_blocked);
m_blocked = false;
if (!m_pending_notify.empty()) {
auto on_finish = m_pending_notify.front();
m_pending_notify.pop_front();
notify(QUIESCE, on_finish);
}
locker.unlock();
on_notify->complete(0);
}
};
template <typename I>
ImageState<I>::ImageState(I *image_ctx)
: m_image_ctx(image_ctx), m_state(STATE_UNINITIALIZED),
m_lock(ceph::make_mutex(util::unique_lock_name("librbd::ImageState::m_lock", this))),
m_last_refresh(0), m_refresh_seq(0),
m_update_watchers(new ImageUpdateWatchers(image_ctx->cct)),
m_quiesce_watchers(new QuiesceWatchers(
image_ctx->cct, image_ctx->asio_engine->get_work_queue())) {
}
template <typename I>
ImageState<I>::~ImageState() {
ceph_assert(m_state == STATE_UNINITIALIZED || m_state == STATE_CLOSED);
delete m_update_watchers;
delete m_quiesce_watchers;
}
template <typename I>
int ImageState<I>::open(uint64_t flags) {
C_SaferCond ctx;
open(flags, &ctx);
int r = ctx.wait();
return r;
}
template <typename I>
void ImageState<I>::open(uint64_t flags, Context *on_finish) {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 20) << __func__ << dendl;
m_lock.lock();
ceph_assert(m_state == STATE_UNINITIALIZED);
m_open_flags = flags;
Action action(ACTION_TYPE_OPEN);
action.refresh_seq = m_refresh_seq;
execute_action_unlock(action, on_finish);
}
template <typename I>
int ImageState<I>::close() {
C_SaferCond ctx;
close(&ctx);
int r = ctx.wait();
return r;
}
template <typename I>
void ImageState<I>::close(Context *on_finish) {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 20) << __func__ << dendl;
m_lock.lock();
ceph_assert(!is_closed());
Action action(ACTION_TYPE_CLOSE);
action.refresh_seq = m_refresh_seq;
execute_action_unlock(action, on_finish);
}
template <typename I>
void ImageState<I>::handle_update_notification() {
std::lock_guard locker{m_lock};
++m_refresh_seq;
CephContext *cct = m_image_ctx->cct;
ldout(cct, 20) << __func__ << ": refresh_seq = " << m_refresh_seq << ", "
<< "last_refresh = " << m_last_refresh << dendl;
switch (m_state) {
case STATE_UNINITIALIZED:
case STATE_CLOSED:
case STATE_OPENING:
case STATE_CLOSING:
ldout(cct, 5) << "dropping update notification to watchers" << dendl;
return;
default:
break;
}
m_update_watchers->notify();
}
template <typename I>
bool ImageState<I>::is_refresh_required() const {
std::lock_guard locker{m_lock};
return (m_last_refresh != m_refresh_seq || find_pending_refresh() != nullptr);
}
template <typename I>
int ImageState<I>::refresh() {
C_SaferCond refresh_ctx;
refresh(&refresh_ctx);
return refresh_ctx.wait();
}
template <typename I>
void ImageState<I>::refresh(Context *on_finish) {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 20) << __func__ << dendl;
m_lock.lock();
if (is_closed()) {
m_lock.unlock();
on_finish->complete(-ESHUTDOWN);
return;
}
Action action(ACTION_TYPE_REFRESH);
action.refresh_seq = m_refresh_seq;
execute_action_unlock(action, on_finish);
}
template <typename I>
int ImageState<I>::refresh_if_required() {
C_SaferCond ctx;
{
m_lock.lock();
Action action(ACTION_TYPE_REFRESH);
action.refresh_seq = m_refresh_seq;
auto refresh_action = find_pending_refresh();
if (refresh_action != nullptr) {
// if a refresh is in-flight, delay until it is finished
action = *refresh_action;
} else if (m_last_refresh == m_refresh_seq) {
m_lock.unlock();
return 0;
} else if (is_closed()) {
m_lock.unlock();
return -ESHUTDOWN;
}
execute_action_unlock(action, &ctx);
}
return ctx.wait();
}
template <typename I>
const typename ImageState<I>::Action *
ImageState<I>::find_pending_refresh() const {
ceph_assert(ceph_mutex_is_locked(m_lock));
auto it = std::find_if(m_actions_contexts.rbegin(),
m_actions_contexts.rend(),
[](const ActionContexts& action_contexts) {
return (action_contexts.first == ACTION_TYPE_REFRESH);
});
if (it != m_actions_contexts.rend()) {
return &it->first;
}
return nullptr;
}
template <typename I>
void ImageState<I>::snap_set(uint64_t snap_id, Context *on_finish) {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 20) << __func__ << ": snap_id=" << snap_id << dendl;
Action action(ACTION_TYPE_SET_SNAP);
action.snap_id = snap_id;
m_lock.lock();
execute_action_unlock(action, on_finish);
}
template <typename I>
void ImageState<I>::prepare_lock(Context *on_ready) {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 10) << __func__ << dendl;
m_lock.lock();
if (is_closed()) {
m_lock.unlock();
on_ready->complete(-ESHUTDOWN);
return;
}
Action action(ACTION_TYPE_LOCK);
action.on_ready = on_ready;
execute_action_unlock(action, nullptr);
}
template <typename I>
void ImageState<I>::handle_prepare_lock_complete() {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 10) << __func__ << dendl;
m_lock.lock();
if (m_state != STATE_PREPARING_LOCK) {
m_lock.unlock();
return;
}
complete_action_unlock(STATE_OPEN, 0);
}
template <typename I>
int ImageState<I>::register_update_watcher(UpdateWatchCtx *watcher,
uint64_t *handle) {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 20) << __func__ << dendl;
m_update_watchers->register_watcher(watcher, handle);
ldout(cct, 20) << __func__ << ": handle=" << *handle << dendl;
return 0;
}
template <typename I>
void ImageState<I>::unregister_update_watcher(uint64_t handle,
Context *on_finish) {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 20) << __func__ << ": handle=" << handle << dendl;
m_update_watchers->unregister_watcher(handle, on_finish);
}
template <typename I>
int ImageState<I>::unregister_update_watcher(uint64_t handle) {
C_SaferCond ctx;
unregister_update_watcher(handle, &ctx);
return ctx.wait();
}
template <typename I>
void ImageState<I>::flush_update_watchers(Context *on_finish) {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 20) << __func__ << dendl;
m_update_watchers->flush(on_finish);
}
template <typename I>
void ImageState<I>::shut_down_update_watchers(Context *on_finish) {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 20) << __func__ << dendl;
m_update_watchers->shut_down(on_finish);
}
template <typename I>
bool ImageState<I>::is_transition_state() const {
switch (m_state) {
case STATE_UNINITIALIZED:
case STATE_OPEN:
case STATE_CLOSED:
return false;
case STATE_OPENING:
case STATE_CLOSING:
case STATE_REFRESHING:
case STATE_SETTING_SNAP:
case STATE_PREPARING_LOCK:
break;
}
return true;
}
template <typename I>
bool ImageState<I>::is_closed() const {
ceph_assert(ceph_mutex_is_locked(m_lock));
return ((m_state == STATE_CLOSED) ||
(!m_actions_contexts.empty() &&
m_actions_contexts.back().first.action_type == ACTION_TYPE_CLOSE));
}
template <typename I>
void ImageState<I>::append_context(const Action &action, Context *context) {
ceph_assert(ceph_mutex_is_locked(m_lock));
ActionContexts *action_contexts = nullptr;
for (auto &action_ctxs : m_actions_contexts) {
if (action == action_ctxs.first) {
action_contexts = &action_ctxs;
break;
}
}
if (action_contexts == nullptr) {
m_actions_contexts.push_back({action, {}});
action_contexts = &m_actions_contexts.back();
}
if (context != nullptr) {
action_contexts->second.push_back(context);
}
}
template <typename I>
void ImageState<I>::execute_next_action_unlock() {
ceph_assert(ceph_mutex_is_locked(m_lock));
ceph_assert(!m_actions_contexts.empty());
switch (m_actions_contexts.front().first.action_type) {
case ACTION_TYPE_OPEN:
send_open_unlock();
return;
case ACTION_TYPE_CLOSE:
send_close_unlock();
return;
case ACTION_TYPE_REFRESH:
send_refresh_unlock();
return;
case ACTION_TYPE_SET_SNAP:
send_set_snap_unlock();
return;
case ACTION_TYPE_LOCK:
send_prepare_lock_unlock();
return;
}
ceph_abort();
}
template <typename I>
void ImageState<I>::execute_action_unlock(const Action &action,
Context *on_finish) {
ceph_assert(ceph_mutex_is_locked(m_lock));
append_context(action, on_finish);
if (!is_transition_state()) {
execute_next_action_unlock();
} else {
m_lock.unlock();
}
}
template <typename I>
void ImageState<I>::complete_action_unlock(State next_state, int r) {
ceph_assert(ceph_mutex_is_locked(m_lock));
ceph_assert(!m_actions_contexts.empty());
ActionContexts action_contexts(std::move(m_actions_contexts.front()));
m_actions_contexts.pop_front();
m_state = next_state;
m_lock.unlock();
if (next_state == STATE_CLOSED ||
(next_state == STATE_UNINITIALIZED && r < 0)) {
// the ImageCtx must be deleted outside the scope of its callback threads
auto ctx = new LambdaContext(
[image_ctx=m_image_ctx, contexts=std::move(action_contexts.second)]
(int r) {
delete image_ctx;
for (auto ctx : contexts) {
ctx->complete(r);
}
});
TaskFinisherSingleton::get_singleton(m_image_ctx->cct).queue(ctx, r);
} else {
for (auto ctx : action_contexts.second) {
if (next_state == STATE_OPEN) {
// we couldn't originally wrap the open callback w/ an async wrapper in
// case the image failed to open
ctx = create_async_context_callback(*m_image_ctx, ctx);
}
ctx->complete(r);
}
m_lock.lock();
if (!is_transition_state() && !m_actions_contexts.empty()) {
execute_next_action_unlock();
} else {
m_lock.unlock();
}
}
}
template <typename I>
void ImageState<I>::send_open_unlock() {
ceph_assert(ceph_mutex_is_locked(m_lock));
CephContext *cct = m_image_ctx->cct;
ldout(cct, 10) << this << " " << __func__ << dendl;
m_state = STATE_OPENING;
Context *ctx = create_context_callback<
ImageState<I>, &ImageState<I>::handle_open>(this);
image::OpenRequest<I> *req = image::OpenRequest<I>::create(
m_image_ctx, m_open_flags, ctx);
m_lock.unlock();
req->send();
}
template <typename I>
void ImageState<I>::handle_open(int r) {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 10) << this << " " << __func__ << ": r=" << r << dendl;
if (r < 0 && r != -ENOENT) {
lderr(cct) << "failed to open image: " << cpp_strerror(r) << dendl;
}
m_lock.lock();
complete_action_unlock(r < 0 ? STATE_UNINITIALIZED : STATE_OPEN, r);
}
template <typename I>
void ImageState<I>::send_close_unlock() {
ceph_assert(ceph_mutex_is_locked(m_lock));
CephContext *cct = m_image_ctx->cct;
ldout(cct, 10) << this << " " << __func__ << dendl;
m_state = STATE_CLOSING;
Context *ctx = create_context_callback<
ImageState<I>, &ImageState<I>::handle_close>(this);
image::CloseRequest<I> *req = image::CloseRequest<I>::create(
m_image_ctx, ctx);
m_lock.unlock();
req->send();
}
template <typename I>
void ImageState<I>::handle_close(int r) {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 10) << this << " " << __func__ << ": r=" << r << dendl;
if (r < 0) {
lderr(cct) << "error occurred while closing image: " << cpp_strerror(r)
<< dendl;
}
m_lock.lock();
complete_action_unlock(STATE_CLOSED, r);
}
template <typename I>
void ImageState<I>::send_refresh_unlock() {
ceph_assert(ceph_mutex_is_locked(m_lock));
CephContext *cct = m_image_ctx->cct;
ldout(cct, 10) << this << " " << __func__ << dendl;
m_state = STATE_REFRESHING;
ceph_assert(!m_actions_contexts.empty());
auto &action_context = m_actions_contexts.front().first;
ceph_assert(action_context.action_type == ACTION_TYPE_REFRESH);
Context *ctx = create_async_context_callback(
*m_image_ctx, create_context_callback<
ImageState<I>, &ImageState<I>::handle_refresh>(this));
image::RefreshRequest<I> *req = image::RefreshRequest<I>::create(
*m_image_ctx, false, false, ctx);
m_lock.unlock();
req->send();
}
template <typename I>
void ImageState<I>::handle_refresh(int r) {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 10) << this << " " << __func__ << ": r=" << r << dendl;
m_lock.lock();
ceph_assert(!m_actions_contexts.empty());
ActionContexts &action_contexts(m_actions_contexts.front());
ceph_assert(action_contexts.first.action_type == ACTION_TYPE_REFRESH);
ceph_assert(m_last_refresh <= action_contexts.first.refresh_seq);
if (r == -ERESTART) {
ldout(cct, 5) << "incomplete refresh: not updating sequence" << dendl;
r = 0;
} else {
m_last_refresh = action_contexts.first.refresh_seq;
}
complete_action_unlock(STATE_OPEN, r);
}
template <typename I>
void ImageState<I>::send_set_snap_unlock() {
ceph_assert(ceph_mutex_is_locked(m_lock));
m_state = STATE_SETTING_SNAP;
ceph_assert(!m_actions_contexts.empty());
ActionContexts &action_contexts(m_actions_contexts.front());
ceph_assert(action_contexts.first.action_type == ACTION_TYPE_SET_SNAP);
CephContext *cct = m_image_ctx->cct;
ldout(cct, 10) << this << " " << __func__ << ": "
<< "snap_id=" << action_contexts.first.snap_id << dendl;
Context *ctx = create_async_context_callback(
*m_image_ctx, create_context_callback<
ImageState<I>, &ImageState<I>::handle_set_snap>(this));
image::SetSnapRequest<I> *req = image::SetSnapRequest<I>::create(
*m_image_ctx, action_contexts.first.snap_id, ctx);
m_lock.unlock();
req->send();
}
template <typename I>
void ImageState<I>::handle_set_snap(int r) {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 10) << this << " " << __func__ << " r=" << r << dendl;
if (r < 0 && r != -ENOENT) {
lderr(cct) << "failed to set snapshot: " << cpp_strerror(r) << dendl;
}
m_lock.lock();
complete_action_unlock(STATE_OPEN, r);
}
template <typename I>
void ImageState<I>::send_prepare_lock_unlock() {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 10) << this << " " << __func__ << dendl;
ceph_assert(ceph_mutex_is_locked(m_lock));
m_state = STATE_PREPARING_LOCK;
ceph_assert(!m_actions_contexts.empty());
ActionContexts &action_contexts(m_actions_contexts.front());
ceph_assert(action_contexts.first.action_type == ACTION_TYPE_LOCK);
Context *on_ready = action_contexts.first.on_ready;
m_lock.unlock();
if (on_ready == nullptr) {
complete_action_unlock(STATE_OPEN, 0);
return;
}
// wake up the lock handler now that its safe to proceed
on_ready->complete(0);
}
template <typename I>
int ImageState<I>::register_quiesce_watcher(QuiesceWatchCtx *watcher,
uint64_t *handle) {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 20) << __func__ << dendl;
m_quiesce_watchers->register_watcher(watcher, handle);
ldout(cct, 20) << __func__ << ": handle=" << *handle << dendl;
return 0;
}
template <typename I>
int ImageState<I>::unregister_quiesce_watcher(uint64_t handle) {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 20) << __func__ << ": handle=" << handle << dendl;
C_SaferCond ctx;
m_quiesce_watchers->unregister_watcher(handle, &ctx);
return ctx.wait();
}
template <typename I>
void ImageState<I>::notify_quiesce(Context *on_finish) {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 20) << __func__ << dendl;
m_quiesce_watchers->notify_quiesce(on_finish);
}
template <typename I>
void ImageState<I>::notify_unquiesce(Context *on_finish) {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 20) << __func__ << dendl;
m_quiesce_watchers->notify_unquiesce(on_finish);
}
template <typename I>
void ImageState<I>::quiesce_complete(uint64_t handle, int r) {
CephContext *cct = m_image_ctx->cct;
ldout(cct, 20) << __func__ << ": handle=" << handle << " r=" << r << dendl;
m_quiesce_watchers->quiesce_complete(handle, r);
}
} // namespace librbd
template class librbd::ImageState<librbd::ImageCtx>;
| 28,604 | 26.478386 | 99 |
cc
|
null |
ceph-main/src/librbd/ImageState.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_IMAGE_STATE_H
#define CEPH_LIBRBD_IMAGE_STATE_H
#include "include/int_types.h"
#include "common/ceph_mutex.h"
#include <list>
#include <string>
#include <utility>
#include "cls/rbd/cls_rbd_types.h"
class Context;
class RWLock;
namespace librbd {
class QuiesceWatchCtx;
class QuiesceWatchers;
class ImageCtx;
class ImageUpdateWatchers;
class UpdateWatchCtx;
template <typename ImageCtxT = ImageCtx>
class ImageState {
public:
ImageState(ImageCtxT *image_ctx);
~ImageState();
int open(uint64_t flags);
void open(uint64_t flags, Context *on_finish);
int close();
void close(Context *on_finish);
void handle_update_notification();
bool is_refresh_required() const;
int refresh();
int refresh_if_required();
void refresh(Context *on_finish);
void snap_set(uint64_t snap_id, Context *on_finish);
void prepare_lock(Context *on_ready);
void handle_prepare_lock_complete();
int register_update_watcher(UpdateWatchCtx *watcher, uint64_t *handle);
void unregister_update_watcher(uint64_t handle, Context *on_finish);
int unregister_update_watcher(uint64_t handle);
void flush_update_watchers(Context *on_finish);
void shut_down_update_watchers(Context *on_finish);
int register_quiesce_watcher(QuiesceWatchCtx *watcher, uint64_t *handle);
int unregister_quiesce_watcher(uint64_t handle);
void notify_quiesce(Context *on_finish);
void notify_unquiesce(Context *on_finish);
void quiesce_complete(uint64_t handle, int r);
private:
enum State {
STATE_UNINITIALIZED,
STATE_OPEN,
STATE_CLOSED,
STATE_OPENING,
STATE_CLOSING,
STATE_REFRESHING,
STATE_SETTING_SNAP,
STATE_PREPARING_LOCK
};
enum ActionType {
ACTION_TYPE_OPEN,
ACTION_TYPE_CLOSE,
ACTION_TYPE_REFRESH,
ACTION_TYPE_SET_SNAP,
ACTION_TYPE_LOCK
};
struct Action {
ActionType action_type;
uint64_t refresh_seq = 0;
uint64_t snap_id = CEPH_NOSNAP;
Context *on_ready = nullptr;
Action(ActionType action_type) : action_type(action_type) {
}
inline bool operator==(const Action &action) const {
if (action_type != action.action_type) {
return false;
}
switch (action_type) {
case ACTION_TYPE_REFRESH:
return (refresh_seq == action.refresh_seq);
case ACTION_TYPE_SET_SNAP:
return (snap_id == action.snap_id);
case ACTION_TYPE_LOCK:
return false;
default:
return true;
}
}
};
typedef std::list<Context *> Contexts;
typedef std::pair<Action, Contexts> ActionContexts;
typedef std::list<ActionContexts> ActionsContexts;
ImageCtxT *m_image_ctx;
State m_state;
mutable ceph::mutex m_lock;
ActionsContexts m_actions_contexts;
uint64_t m_last_refresh;
uint64_t m_refresh_seq;
ImageUpdateWatchers *m_update_watchers;
QuiesceWatchers *m_quiesce_watchers;
uint64_t m_open_flags;
bool is_transition_state() const;
bool is_closed() const;
const Action *find_pending_refresh() const;
void append_context(const Action &action, Context *context);
void execute_next_action_unlock();
void execute_action_unlock(const Action &action, Context *context);
void complete_action_unlock(State next_state, int r);
void send_open_unlock();
void handle_open(int r);
void send_close_unlock();
void handle_close(int r);
void send_refresh_unlock();
void handle_refresh(int r);
void send_set_snap_unlock();
void handle_set_snap(int r);
void send_prepare_lock_unlock();
};
} // namespace librbd
extern template class librbd::ImageState<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_IMAGE_STATE_H
| 3,735 | 22.948718 | 75 |
h
|
null |
ceph-main/src/librbd/ImageWatcher.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/ImageWatcher.h"
#include "librbd/ExclusiveLock.h"
#include "librbd/ImageCtx.h"
#include "librbd/ImageState.h"
#include "librbd/internal.h"
#include "librbd/TaskFinisher.h"
#include "librbd/Types.h"
#include "librbd/Utils.h"
#include "librbd/asio/ContextWQ.h"
#include "librbd/exclusive_lock/Policy.h"
#include "librbd/image_watcher/NotifyLockOwner.h"
#include "librbd/io/AioCompletion.h"
#include "include/encoding.h"
#include "common/errno.h"
#include <boost/bind/bind.hpp>
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::ImageWatcher: "
namespace librbd {
using namespace image_watcher;
using namespace watch_notify;
using util::create_async_context_callback;
using util::create_context_callback;
using util::create_rados_callback;
using ceph::encode;
using ceph::decode;
using namespace boost::placeholders;
static const double RETRY_DELAY_SECONDS = 1.0;
template <typename I>
struct ImageWatcher<I>::C_ProcessPayload : public Context {
ImageWatcher *image_watcher;
uint64_t notify_id;
uint64_t handle;
std::unique_ptr<watch_notify::Payload> payload;
C_ProcessPayload(ImageWatcher *image_watcher, uint64_t notify_id,
uint64_t handle,
std::unique_ptr<watch_notify::Payload> &&payload)
: image_watcher(image_watcher), notify_id(notify_id), handle(handle),
payload(std::move(payload)) {
}
void finish(int r) override {
image_watcher->m_async_op_tracker.start_op();
if (image_watcher->notifications_blocked()) {
// requests are blocked -- just ack the notification
bufferlist bl;
image_watcher->acknowledge_notify(notify_id, handle, bl);
} else {
image_watcher->process_payload(notify_id, handle, payload.get());
}
image_watcher->m_async_op_tracker.finish_op();
}
};
template <typename I>
ImageWatcher<I>::ImageWatcher(I &image_ctx)
: Watcher(image_ctx.md_ctx, image_ctx.op_work_queue, image_ctx.header_oid),
m_image_ctx(image_ctx),
m_task_finisher(new TaskFinisher<Task>(*m_image_ctx.cct)),
m_async_request_lock(ceph::make_shared_mutex(
util::unique_lock_name("librbd::ImageWatcher::m_async_request_lock", this))),
m_owner_client_id_lock(ceph::make_mutex(
util::unique_lock_name("librbd::ImageWatcher::m_owner_client_id_lock", this)))
{
}
template <typename I>
ImageWatcher<I>::~ImageWatcher()
{
delete m_task_finisher;
}
template <typename I>
void ImageWatcher<I>::unregister_watch(Context *on_finish) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 10) << this << " unregistering image watcher" << dendl;
cancel_async_requests();
// flush the task finisher queue before completing
on_finish = create_async_context_callback(m_task_finisher, on_finish);
on_finish = new LambdaContext([this, on_finish](int r) {
cancel_quiesce_requests();
m_task_finisher->cancel_all();
m_async_op_tracker.wait_for_ops(on_finish);
});
Watcher::unregister_watch(on_finish);
}
template <typename I>
void ImageWatcher<I>::block_notifies(Context *on_finish) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 10) << this << " " << __func__ << dendl;
on_finish = new LambdaContext([this, on_finish](int r) {
cancel_async_requests();
on_finish->complete(r);
});
Watcher::block_notifies(on_finish);
}
template <typename I>
void ImageWatcher<I>::schedule_async_progress(const AsyncRequestId &request,
uint64_t offset, uint64_t total) {
auto ctx = new LambdaContext([this, request, offset, total](int r) {
if (r != -ECANCELED) {
notify_async_progress(request, offset, total);
}
});
m_task_finisher->queue(Task(TASK_CODE_ASYNC_PROGRESS, request), ctx);
}
template <typename I>
int ImageWatcher<I>::notify_async_progress(const AsyncRequestId &request,
uint64_t offset, uint64_t total) {
ldout(m_image_ctx.cct, 20) << this << " remote async request progress: "
<< request << " @ " << offset
<< "/" << total << dendl;
send_notify(new AsyncProgressPayload(request, offset, total));
return 0;
}
template <typename I>
void ImageWatcher<I>::schedule_async_complete(const AsyncRequestId &request,
int r) {
m_async_op_tracker.start_op();
auto ctx = new LambdaContext([this, request, ret_val=r](int r) {
if (r != -ECANCELED) {
notify_async_complete(request, ret_val);
}
});
m_task_finisher->queue(ctx);
}
template <typename I>
void ImageWatcher<I>::notify_async_complete(const AsyncRequestId &request,
int r) {
ldout(m_image_ctx.cct, 20) << this << " remote async request finished: "
<< request << "=" << r << dendl;
send_notify(new AsyncCompletePayload(request, r),
new LambdaContext(boost::bind(&ImageWatcher<I>::handle_async_complete,
this, request, r, _1)));
}
template <typename I>
void ImageWatcher<I>::handle_async_complete(const AsyncRequestId &request,
int r, int ret_val) {
ldout(m_image_ctx.cct, 20) << this << " " << __func__ << ": "
<< "request=" << request << ", r=" << ret_val
<< dendl;
if (ret_val < 0) {
lderr(m_image_ctx.cct) << this << " failed to notify async complete: "
<< cpp_strerror(ret_val) << dendl;
if (ret_val == -ETIMEDOUT && !is_unregistered()) {
schedule_async_complete(request, r);
m_async_op_tracker.finish_op();
return;
}
}
std::unique_lock async_request_locker{m_async_request_lock};
mark_async_request_complete(request, r);
m_async_op_tracker.finish_op();
}
template <typename I>
void ImageWatcher<I>::notify_flatten(uint64_t request_id,
ProgressContext &prog_ctx,
Context *on_finish) {
ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
ceph_assert(m_image_ctx.exclusive_lock &&
!m_image_ctx.exclusive_lock->is_lock_owner());
AsyncRequestId async_request_id(get_client_id(), request_id);
notify_async_request(async_request_id, new FlattenPayload(async_request_id),
prog_ctx, on_finish);
}
template <typename I>
void ImageWatcher<I>::notify_resize(uint64_t request_id, uint64_t size,
bool allow_shrink,
ProgressContext &prog_ctx,
Context *on_finish) {
ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
ceph_assert(m_image_ctx.exclusive_lock &&
!m_image_ctx.exclusive_lock->is_lock_owner());
AsyncRequestId async_request_id(get_client_id(), request_id);
notify_async_request(async_request_id,
new ResizePayload(async_request_id, size, allow_shrink),
prog_ctx, on_finish);
}
template <typename I>
void ImageWatcher<I>::notify_snap_create(uint64_t request_id,
const cls::rbd::SnapshotNamespace &snap_namespace,
const std::string &snap_name,
uint64_t flags,
ProgressContext &prog_ctx,
Context *on_finish) {
ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
ceph_assert(m_image_ctx.exclusive_lock &&
!m_image_ctx.exclusive_lock->is_lock_owner());
AsyncRequestId async_request_id(get_client_id(), request_id);
notify_async_request(async_request_id,
new SnapCreatePayload(async_request_id, snap_namespace,
snap_name, flags),
prog_ctx, on_finish);
}
template <typename I>
void ImageWatcher<I>::notify_snap_rename(uint64_t request_id,
const snapid_t &src_snap_id,
const std::string &dst_snap_name,
Context *on_finish) {
ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
ceph_assert(m_image_ctx.exclusive_lock &&
!m_image_ctx.exclusive_lock->is_lock_owner());
AsyncRequestId async_request_id(get_client_id(), request_id);
notify_async_request(
async_request_id,
new SnapRenamePayload(async_request_id, src_snap_id, dst_snap_name),
m_no_op_prog_ctx, on_finish);
}
template <typename I>
void ImageWatcher<I>::notify_snap_remove(
uint64_t request_id, const cls::rbd::SnapshotNamespace &snap_namespace,
const std::string &snap_name, Context *on_finish) {
ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
ceph_assert(m_image_ctx.exclusive_lock &&
!m_image_ctx.exclusive_lock->is_lock_owner());
AsyncRequestId async_request_id(get_client_id(), request_id);
notify_async_request(
async_request_id,
new SnapRemovePayload(async_request_id, snap_namespace, snap_name),
m_no_op_prog_ctx, on_finish);
}
template <typename I>
void ImageWatcher<I>::notify_snap_protect(
uint64_t request_id, const cls::rbd::SnapshotNamespace &snap_namespace,
const std::string &snap_name, Context *on_finish) {
ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
ceph_assert(m_image_ctx.exclusive_lock &&
!m_image_ctx.exclusive_lock->is_lock_owner());
AsyncRequestId async_request_id(get_client_id(), request_id);
notify_async_request(
async_request_id,
new SnapProtectPayload(async_request_id, snap_namespace, snap_name),
m_no_op_prog_ctx, on_finish);
}
template <typename I>
void ImageWatcher<I>::notify_snap_unprotect(
uint64_t request_id, const cls::rbd::SnapshotNamespace &snap_namespace,
const std::string &snap_name, Context *on_finish) {
ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
ceph_assert(m_image_ctx.exclusive_lock &&
!m_image_ctx.exclusive_lock->is_lock_owner());
AsyncRequestId async_request_id(get_client_id(), request_id);
notify_async_request(
async_request_id,
new SnapUnprotectPayload(async_request_id, snap_namespace, snap_name),
m_no_op_prog_ctx, on_finish);
}
template <typename I>
void ImageWatcher<I>::notify_rebuild_object_map(uint64_t request_id,
ProgressContext &prog_ctx,
Context *on_finish) {
ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
ceph_assert(m_image_ctx.exclusive_lock &&
!m_image_ctx.exclusive_lock->is_lock_owner());
AsyncRequestId async_request_id(get_client_id(), request_id);
notify_async_request(async_request_id,
new RebuildObjectMapPayload(async_request_id),
prog_ctx, on_finish);
}
template <typename I>
void ImageWatcher<I>::notify_rename(uint64_t request_id,
const std::string &image_name,
Context *on_finish) {
ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
ceph_assert(m_image_ctx.exclusive_lock &&
!m_image_ctx.exclusive_lock->is_lock_owner());
AsyncRequestId async_request_id(get_client_id(), request_id);
notify_async_request(async_request_id,
new RenamePayload(async_request_id, image_name),
m_no_op_prog_ctx, on_finish);
}
template <typename I>
void ImageWatcher<I>::notify_update_features(uint64_t request_id,
uint64_t features, bool enabled,
Context *on_finish) {
ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
ceph_assert(m_image_ctx.exclusive_lock &&
!m_image_ctx.exclusive_lock->is_lock_owner());
AsyncRequestId async_request_id(get_client_id(), request_id);
notify_async_request(async_request_id,
new UpdateFeaturesPayload(async_request_id, features, enabled),
m_no_op_prog_ctx, on_finish);
}
template <typename I>
void ImageWatcher<I>::notify_migrate(uint64_t request_id,
ProgressContext &prog_ctx,
Context *on_finish) {
ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
ceph_assert(m_image_ctx.exclusive_lock &&
!m_image_ctx.exclusive_lock->is_lock_owner());
AsyncRequestId async_request_id(get_client_id(), request_id);
notify_async_request(async_request_id, new MigratePayload(async_request_id),
prog_ctx, on_finish);
}
template <typename I>
void ImageWatcher<I>::notify_sparsify(uint64_t request_id, size_t sparse_size,
ProgressContext &prog_ctx,
Context *on_finish) {
ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
ceph_assert(m_image_ctx.exclusive_lock &&
!m_image_ctx.exclusive_lock->is_lock_owner());
AsyncRequestId async_request_id(get_client_id(), request_id);
notify_async_request(async_request_id,
new SparsifyPayload(async_request_id, sparse_size),
prog_ctx, on_finish);
}
template <typename I>
void ImageWatcher<I>::notify_header_update(Context *on_finish) {
ldout(m_image_ctx.cct, 10) << this << ": " << __func__ << dendl;
// supports legacy (empty buffer) clients
send_notify(new HeaderUpdatePayload(), on_finish);
}
template <typename I>
void ImageWatcher<I>::notify_header_update(librados::IoCtx &io_ctx,
const std::string &oid) {
// supports legacy (empty buffer) clients
bufferlist bl;
encode(NotifyMessage(new HeaderUpdatePayload()), bl);
io_ctx.notify2(oid, bl, watcher::Notifier::NOTIFY_TIMEOUT, nullptr);
}
template <typename I>
void ImageWatcher<I>::notify_quiesce(uint64_t *request_id,
ProgressContext &prog_ctx,
Context *on_finish) {
*request_id = util::reserve_async_request_id();
ldout(m_image_ctx.cct, 10) << this << " " << __func__ << ": request_id="
<< request_id << dendl;
AsyncRequestId async_request_id(get_client_id(), *request_id);
auto total_attempts = m_image_ctx.config.template get_val<uint64_t>(
"rbd_quiesce_notification_attempts");
notify_quiesce(async_request_id, 1, total_attempts, prog_ctx, on_finish);
}
template <typename I>
void ImageWatcher<I>::notify_quiesce(const AsyncRequestId &async_request_id,
size_t attempt, size_t total_attempts,
ProgressContext &prog_ctx,
Context *on_finish) {
ceph_assert(attempt <= total_attempts);
ldout(m_image_ctx.cct, 10) << this << " " << __func__ << ": async_request_id="
<< async_request_id << " attempts @ "
<< attempt << "/" << total_attempts << dendl;
auto notify_response = new watcher::NotifyResponse();
auto on_notify = new LambdaContext(
[notify_response=std::unique_ptr<watcher::NotifyResponse>(notify_response),
this, async_request_id, attempt, total_attempts, &prog_ctx,
on_finish](int r) {
prog_ctx.update_progress(attempt, total_attempts);
if (r == -ETIMEDOUT) {
ldout(m_image_ctx.cct, 10) << this << " " << __func__
<< ": async_request_id=" << async_request_id
<< " timed out" << dendl;
if (attempt < total_attempts) {
notify_quiesce(async_request_id, attempt + 1, total_attempts,
prog_ctx, on_finish);
return;
}
} else if (r == 0) {
for (auto &[client_id, bl] : notify_response->acks) {
if (bl.length() == 0) {
continue;
}
try {
auto iter = bl.cbegin();
ResponseMessage response_message;
using ceph::decode;
decode(response_message, iter);
if (response_message.result != -EOPNOTSUPP) {
r = response_message.result;
}
} catch (const buffer::error &err) {
r = -EINVAL;
}
if (r < 0) {
break;
}
}
}
if (r < 0) {
lderr(m_image_ctx.cct) << this << " failed to notify quiesce: "
<< cpp_strerror(r) << dendl;
}
on_finish->complete(r);
});
bufferlist bl;
encode(NotifyMessage(new QuiescePayload(async_request_id)), bl);
Watcher::send_notify(bl, notify_response, on_notify);
}
template <typename I>
void ImageWatcher<I>::notify_unquiesce(uint64_t request_id, Context *on_finish) {
ldout(m_image_ctx.cct, 10) << this << " " << __func__ << ": request_id="
<< request_id << dendl;
AsyncRequestId async_request_id(get_client_id(), request_id);
send_notify(new UnquiescePayload(async_request_id), on_finish);
}
template <typename I>
void ImageWatcher<I>::notify_metadata_set(uint64_t request_id,
const std::string &key,
const std::string &value,
Context *on_finish) {
ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
ceph_assert(m_image_ctx.exclusive_lock &&
!m_image_ctx.exclusive_lock->is_lock_owner());
AsyncRequestId async_request_id(get_client_id(), request_id);
notify_async_request(
async_request_id,
new MetadataUpdatePayload(async_request_id, key,
std::optional<std::string>{value}),
m_no_op_prog_ctx, on_finish);
}
template <typename I>
void ImageWatcher<I>::notify_metadata_remove(uint64_t request_id,
const std::string &key,
Context *on_finish) {
ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
ceph_assert(m_image_ctx.exclusive_lock &&
!m_image_ctx.exclusive_lock->is_lock_owner());
AsyncRequestId async_request_id(get_client_id(), request_id);
notify_async_request(
async_request_id,
new MetadataUpdatePayload(async_request_id, key, std::nullopt),
m_no_op_prog_ctx, on_finish);
}
template <typename I>
void ImageWatcher<I>::schedule_cancel_async_requests() {
auto ctx = new LambdaContext([this](int r) {
if (r != -ECANCELED) {
cancel_async_requests();
}
});
m_task_finisher->queue(TASK_CODE_CANCEL_ASYNC_REQUESTS, ctx);
}
template <typename I>
void ImageWatcher<I>::cancel_async_requests() {
std::unique_lock l{m_async_request_lock};
for (auto iter = m_async_requests.begin(); iter != m_async_requests.end(); ) {
if (iter->second.second == nullptr) {
// Quiesce notify request. Skip.
iter++;
} else {
iter->second.first->complete(-ERESTART);
iter = m_async_requests.erase(iter);
}
}
}
template <typename I>
void ImageWatcher<I>::set_owner_client_id(const ClientId& client_id) {
ceph_assert(ceph_mutex_is_locked(m_owner_client_id_lock));
m_owner_client_id = client_id;
ldout(m_image_ctx.cct, 10) << this << " current lock owner: "
<< m_owner_client_id << dendl;
}
template <typename I>
ClientId ImageWatcher<I>::get_client_id() {
std::shared_lock l{this->m_watch_lock};
return ClientId(m_image_ctx.md_ctx.get_instance_id(), this->m_watch_handle);
}
template <typename I>
void ImageWatcher<I>::notify_acquired_lock() {
ldout(m_image_ctx.cct, 10) << this << " notify acquired lock" << dendl;
ClientId client_id = get_client_id();
{
std::lock_guard owner_client_id_locker{m_owner_client_id_lock};
set_owner_client_id(client_id);
}
send_notify(new AcquiredLockPayload(client_id));
}
template <typename I>
void ImageWatcher<I>::notify_released_lock() {
ldout(m_image_ctx.cct, 10) << this << " notify released lock" << dendl;
{
std::lock_guard owner_client_id_locker{m_owner_client_id_lock};
set_owner_client_id(ClientId());
}
send_notify(new ReleasedLockPayload(get_client_id()));
}
template <typename I>
void ImageWatcher<I>::schedule_request_lock(bool use_timer, int timer_delay) {
ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
// see notify_request_lock()
if (m_image_ctx.exclusive_lock == nullptr ||
m_image_ctx.exclusive_lock->is_lock_owner()) {
return;
}
std::shared_lock watch_locker{this->m_watch_lock};
if (this->is_registered(this->m_watch_lock)) {
ldout(m_image_ctx.cct, 15) << this << " requesting exclusive lock" << dendl;
auto ctx = new LambdaContext([this](int r) {
if (r != -ECANCELED) {
notify_request_lock();
}
});
if (use_timer) {
if (timer_delay < 0) {
timer_delay = RETRY_DELAY_SECONDS;
}
m_task_finisher->add_event_after(TASK_CODE_REQUEST_LOCK,
timer_delay, ctx);
} else {
m_task_finisher->queue(TASK_CODE_REQUEST_LOCK, ctx);
}
}
}
template <typename I>
void ImageWatcher<I>::notify_request_lock() {
std::shared_lock owner_locker{m_image_ctx.owner_lock};
std::shared_lock image_locker{m_image_ctx.image_lock};
// ExclusiveLock state machine can be dynamically disabled or
// race with task cancel
if (m_image_ctx.exclusive_lock == nullptr ||
m_image_ctx.exclusive_lock->is_lock_owner()) {
return;
}
ldout(m_image_ctx.cct, 10) << this << " notify request lock" << dendl;
notify_lock_owner(new RequestLockPayload(get_client_id(), false),
create_context_callback<
ImageWatcher, &ImageWatcher<I>::handle_request_lock>(this));
}
template <typename I>
void ImageWatcher<I>::handle_request_lock(int r) {
std::shared_lock owner_locker{m_image_ctx.owner_lock};
std::shared_lock image_locker{m_image_ctx.image_lock};
// ExclusiveLock state machine cannot transition -- but can be
// dynamically disabled
if (m_image_ctx.exclusive_lock == nullptr) {
return;
}
if (r == -ETIMEDOUT) {
ldout(m_image_ctx.cct, 5) << this << " timed out requesting lock: retrying"
<< dendl;
// treat this is a dead client -- so retest acquiring the lock
m_image_ctx.exclusive_lock->handle_peer_notification(0);
} else if (r == -EROFS) {
ldout(m_image_ctx.cct, 5) << this << " peer will not release lock" << dendl;
m_image_ctx.exclusive_lock->handle_peer_notification(r);
} else if (r < 0) {
lderr(m_image_ctx.cct) << this << " error requesting lock: "
<< cpp_strerror(r) << dendl;
schedule_request_lock(true);
} else {
// lock owner acked -- but resend if we don't see them release the lock
int retry_timeout = m_image_ctx.cct->_conf.template get_val<int64_t>(
"client_notify_timeout");
ldout(m_image_ctx.cct, 15) << this << " will retry in " << retry_timeout
<< " seconds" << dendl;
schedule_request_lock(true, retry_timeout);
}
}
template <typename I>
void ImageWatcher<I>::notify_lock_owner(Payload *payload, Context *on_finish) {
ceph_assert(on_finish != nullptr);
ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
bufferlist bl;
encode(NotifyMessage(payload), bl);
NotifyLockOwner *notify_lock_owner = NotifyLockOwner::create(
m_image_ctx, this->m_notifier, std::move(bl), on_finish);
notify_lock_owner->send();
}
template <typename I>
bool ImageWatcher<I>::is_new_request(const AsyncRequestId &id) const {
ceph_assert(ceph_mutex_is_locked(m_async_request_lock));
return m_async_pending.count(id) == 0 && m_async_complete.count(id) == 0;
}
template <typename I>
bool ImageWatcher<I>::mark_async_request_complete(const AsyncRequestId &id,
int r) {
ceph_assert(ceph_mutex_is_locked(m_async_request_lock));
bool found = m_async_pending.erase(id);
auto now = ceph_clock_now();
auto it = m_async_complete_expiration.begin();
while (it != m_async_complete_expiration.end() && it->first < now) {
m_async_complete.erase(it->second);
it = m_async_complete_expiration.erase(it);
}
if (!m_async_complete.insert({id, r}).second) {
for (it = m_async_complete_expiration.begin();
it != m_async_complete_expiration.end(); it++) {
if (it->second == id) {
m_async_complete_expiration.erase(it);
break;
}
}
}
auto expiration_time = now;
expiration_time += 600;
m_async_complete_expiration.insert({expiration_time, id});
return found;
}
template <typename I>
Context *ImageWatcher<I>::remove_async_request(const AsyncRequestId &id) {
std::unique_lock async_request_locker{m_async_request_lock};
return remove_async_request(id, m_async_request_lock);
}
template <typename I>
Context *ImageWatcher<I>::remove_async_request(const AsyncRequestId &id,
ceph::shared_mutex &lock) {
ceph_assert(ceph_mutex_is_locked(lock));
ldout(m_image_ctx.cct, 20) << __func__ << ": " << id << dendl;
auto it = m_async_requests.find(id);
if (it != m_async_requests.end()) {
Context *on_complete = it->second.first;
m_async_requests.erase(it);
return on_complete;
}
return nullptr;
}
template <typename I>
void ImageWatcher<I>::schedule_async_request_timed_out(const AsyncRequestId &id) {
ldout(m_image_ctx.cct, 20) << "scheduling async request time out: " << id
<< dendl;
auto ctx = new LambdaContext([this, id](int r) {
if (r != -ECANCELED) {
async_request_timed_out(id);
}
});
Task task(TASK_CODE_ASYNC_REQUEST, id);
m_task_finisher->cancel(task);
m_task_finisher->add_event_after(
task, m_image_ctx.config.template get_val<uint64_t>("rbd_request_timed_out_seconds"),
ctx);
}
template <typename I>
void ImageWatcher<I>::async_request_timed_out(const AsyncRequestId &id) {
Context *on_complete = remove_async_request(id);
if (on_complete != nullptr) {
ldout(m_image_ctx.cct, 5) << "async request timed out: " << id << dendl;
m_image_ctx.op_work_queue->queue(on_complete, -ETIMEDOUT);
}
}
template <typename I>
void ImageWatcher<I>::notify_async_request(
const AsyncRequestId &async_request_id, Payload *payload,
ProgressContext& prog_ctx, Context *on_finish) {
ceph_assert(on_finish != nullptr);
ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
ldout(m_image_ctx.cct, 10) << this << " async request: " << async_request_id
<< dendl;
Context *on_notify = new LambdaContext([this, async_request_id](int r) {
if (r < 0) {
// notification failed -- don't expect updates
Context *on_complete = remove_async_request(async_request_id);
if (on_complete != nullptr) {
on_complete->complete(r);
}
}
});
Context *on_complete = new LambdaContext(
[this, async_request_id, on_finish](int r) {
m_task_finisher->cancel(Task(TASK_CODE_ASYNC_REQUEST, async_request_id));
on_finish->complete(r);
});
{
std::unique_lock async_request_locker{m_async_request_lock};
m_async_requests[async_request_id] = AsyncRequest(on_complete, &prog_ctx);
}
schedule_async_request_timed_out(async_request_id);
notify_lock_owner(payload, on_notify);
}
template <typename I>
int ImageWatcher<I>::prepare_async_request(const AsyncRequestId& async_request_id,
bool* new_request, Context** ctx,
ProgressContext** prog_ctx) {
if (async_request_id.client_id == get_client_id()) {
return -ERESTART;
} else {
std::unique_lock l{m_async_request_lock};
if (is_new_request(async_request_id)) {
m_async_pending.insert(async_request_id);
*new_request = true;
*prog_ctx = new RemoteProgressContext(*this, async_request_id);
*ctx = new RemoteContext(*this, async_request_id, *prog_ctx);
} else {
*new_request = false;
auto it = m_async_complete.find(async_request_id);
if (it != m_async_complete.end()) {
int r = it->second;
// reset complete request expiration time
mark_async_request_complete(async_request_id, r);
return r;
}
}
}
return 0;
}
template <typename I>
Context *ImageWatcher<I>::prepare_quiesce_request(
const AsyncRequestId &request, C_NotifyAck *ack_ctx) {
std::unique_lock locker{m_async_request_lock};
auto timeout = 2 * watcher::Notifier::NOTIFY_TIMEOUT / 1000;
if (!is_new_request(request)) {
auto it = m_async_requests.find(request);
if (it != m_async_requests.end()) {
delete it->second.first;
it->second.first = ack_ctx;
} else {
auto it = m_async_complete.find(request);
ceph_assert(it != m_async_complete.end());
m_task_finisher->queue(new C_ResponseMessage(ack_ctx), it->second);
// reset complete request expiration time
mark_async_request_complete(request, it->second);
}
locker.unlock();
m_task_finisher->reschedule_event_after(Task(TASK_CODE_QUIESCE, request),
timeout);
return nullptr;
}
m_async_pending.insert(request);
m_async_requests[request] = AsyncRequest(ack_ctx, nullptr);
m_async_op_tracker.start_op();
return new LambdaContext(
[this, request, timeout](int r) {
auto unquiesce_ctx = new LambdaContext(
[this, request](int r) {
if (r == 0) {
ldout(m_image_ctx.cct, 10) << this << " quiesce request "
<< request << " timed out" << dendl;
}
auto on_finish = new LambdaContext(
[this](int r) {
m_async_op_tracker.finish_op();
});
m_image_ctx.state->notify_unquiesce(on_finish);
});
m_task_finisher->add_event_after(Task(TASK_CODE_QUIESCE, request),
timeout, unquiesce_ctx);
std::unique_lock async_request_locker{m_async_request_lock};
mark_async_request_complete(request, r);
auto ctx = remove_async_request(request, m_async_request_lock);
async_request_locker.unlock();
if (ctx != nullptr) {
ctx = new C_ResponseMessage(static_cast<C_NotifyAck *>(ctx));
ctx->complete(r);
} else {
m_task_finisher->cancel(Task(TASK_CODE_QUIESCE, request));
}
});
}
template <typename I>
void ImageWatcher<I>::prepare_unquiesce_request(const AsyncRequestId &request) {
{
std::unique_lock async_request_locker{m_async_request_lock};
auto it = m_async_complete.find(request);
if (it == m_async_complete.end()) {
ldout(m_image_ctx.cct, 20) << this << " " << request
<< ": not found in complete" << dendl;
return;
}
// reset complete request expiration time
mark_async_request_complete(request, it->second);
}
bool canceled = m_task_finisher->cancel(Task(TASK_CODE_QUIESCE, request));
if (!canceled) {
ldout(m_image_ctx.cct, 20) << this << " " << request
<< ": timer task not found" << dendl;
}
}
template <typename I>
void ImageWatcher<I>::cancel_quiesce_requests() {
std::unique_lock l{m_async_request_lock};
for (auto it = m_async_requests.begin(); it != m_async_requests.end(); ) {
if (it->second.second == nullptr) {
// Quiesce notify request.
mark_async_request_complete(it->first, 0);
delete it->second.first;
it = m_async_requests.erase(it);
} else {
it++;
}
}
}
template <typename I>
bool ImageWatcher<I>::handle_operation_request(
const AsyncRequestId& async_request_id,
exclusive_lock::OperationRequestType request_type, Operation operation,
std::function<void(ProgressContext &prog_ctx, Context*)> execute,
C_NotifyAck *ack_ctx) {
std::shared_lock owner_locker{m_image_ctx.owner_lock};
if (m_image_ctx.exclusive_lock != nullptr) {
int r = 0;
if (m_image_ctx.exclusive_lock->accept_request(request_type, &r)) {
bool new_request;
Context *ctx;
ProgressContext *prog_ctx;
bool complete;
if (async_request_id) {
r = prepare_async_request(async_request_id, &new_request, &ctx,
&prog_ctx);
encode(ResponseMessage(r), ack_ctx->out);
complete = true;
} else {
new_request = true;
ctx = new C_ResponseMessage(ack_ctx);
prog_ctx = &m_no_op_prog_ctx;
complete = false;
}
if (r == 0 && new_request) {
ctx = new LambdaContext(
[this, operation, ctx](int r) {
m_image_ctx.operations->finish_op(operation, r);
ctx->complete(r);
});
ctx = new LambdaContext(
[this, execute, prog_ctx, ctx](int r) {
if (r < 0) {
ctx->complete(r);
return;
}
std::shared_lock l{m_image_ctx.owner_lock};
execute(*prog_ctx, ctx);
});
m_image_ctx.operations->start_op(operation, ctx);
}
return complete;
} else if (r < 0) {
encode(ResponseMessage(r), ack_ctx->out);
}
}
return true;
}
template <typename I>
bool ImageWatcher<I>::handle_payload(const HeaderUpdatePayload &payload,
C_NotifyAck *ack_ctx) {
ldout(m_image_ctx.cct, 10) << this << " image header updated" << dendl;
m_image_ctx.state->handle_update_notification();
m_image_ctx.perfcounter->inc(l_librbd_notify);
if (ack_ctx != nullptr) {
m_image_ctx.state->flush_update_watchers(new C_ResponseMessage(ack_ctx));
return false;
}
return true;
}
template <typename I>
bool ImageWatcher<I>::handle_payload(const AcquiredLockPayload &payload,
C_NotifyAck *ack_ctx) {
ldout(m_image_ctx.cct, 10) << this << " image exclusively locked announcement"
<< dendl;
bool cancel_async_requests = true;
if (payload.client_id.is_valid()) {
std::lock_guard owner_client_id_locker{m_owner_client_id_lock};
if (payload.client_id == m_owner_client_id) {
cancel_async_requests = false;
}
set_owner_client_id(payload.client_id);
}
std::shared_lock owner_locker{m_image_ctx.owner_lock};
if (m_image_ctx.exclusive_lock != nullptr) {
// potentially wake up the exclusive lock state machine now that
// a lock owner has advertised itself
m_image_ctx.exclusive_lock->handle_peer_notification(0);
}
if (cancel_async_requests &&
(m_image_ctx.exclusive_lock == nullptr ||
!m_image_ctx.exclusive_lock->is_lock_owner())) {
schedule_cancel_async_requests();
}
return true;
}
template <typename I>
bool ImageWatcher<I>::handle_payload(const ReleasedLockPayload &payload,
C_NotifyAck *ack_ctx) {
ldout(m_image_ctx.cct, 10) << this << " exclusive lock released" << dendl;
bool cancel_async_requests = true;
if (payload.client_id.is_valid()) {
std::lock_guard l{m_owner_client_id_lock};
if (payload.client_id != m_owner_client_id) {
ldout(m_image_ctx.cct, 10) << this << " unexpected owner: "
<< payload.client_id << " != "
<< m_owner_client_id << dendl;
cancel_async_requests = false;
} else {
set_owner_client_id(ClientId());
}
}
std::shared_lock owner_locker{m_image_ctx.owner_lock};
if (cancel_async_requests &&
(m_image_ctx.exclusive_lock == nullptr ||
!m_image_ctx.exclusive_lock->is_lock_owner())) {
schedule_cancel_async_requests();
}
// alert the exclusive lock state machine that the lock is available
if (m_image_ctx.exclusive_lock != nullptr &&
!m_image_ctx.exclusive_lock->is_lock_owner()) {
m_task_finisher->cancel(TASK_CODE_REQUEST_LOCK);
m_image_ctx.exclusive_lock->handle_peer_notification(0);
}
return true;
}
template <typename I>
bool ImageWatcher<I>::handle_payload(const RequestLockPayload &payload,
C_NotifyAck *ack_ctx) {
ldout(m_image_ctx.cct, 10) << this << " exclusive lock requested" << dendl;
if (payload.client_id == get_client_id()) {
return true;
}
std::shared_lock l{m_image_ctx.owner_lock};
if (m_image_ctx.exclusive_lock != nullptr &&
m_image_ctx.exclusive_lock->is_lock_owner()) {
int r = 0;
bool accept_request = m_image_ctx.exclusive_lock->accept_request(
exclusive_lock::OPERATION_REQUEST_TYPE_GENERAL, &r);
if (accept_request) {
ceph_assert(r == 0);
std::lock_guard owner_client_id_locker{m_owner_client_id_lock};
if (!m_owner_client_id.is_valid()) {
return true;
}
ldout(m_image_ctx.cct, 10) << this << " queuing release of exclusive lock"
<< dendl;
r = m_image_ctx.get_exclusive_lock_policy()->lock_requested(
payload.force);
}
encode(ResponseMessage(r), ack_ctx->out);
}
return true;
}
template <typename I>
bool ImageWatcher<I>::handle_payload(const AsyncProgressPayload &payload,
C_NotifyAck *ack_ctx) {
std::shared_lock l{m_async_request_lock};
std::map<AsyncRequestId, AsyncRequest>::iterator req_it =
m_async_requests.find(payload.async_request_id);
if (req_it != m_async_requests.end()) {
ldout(m_image_ctx.cct, 20) << this << " request progress: "
<< payload.async_request_id << " @ "
<< payload.offset << "/" << payload.total
<< dendl;
schedule_async_request_timed_out(payload.async_request_id);
req_it->second.second->update_progress(payload.offset, payload.total);
}
return true;
}
template <typename I>
bool ImageWatcher<I>::handle_payload(const AsyncCompletePayload &payload,
C_NotifyAck *ack_ctx) {
Context *on_complete = remove_async_request(payload.async_request_id);
if (on_complete != nullptr) {
ldout(m_image_ctx.cct, 10) << this << " request finished: "
<< payload.async_request_id << "="
<< payload.result << dendl;
on_complete->complete(payload.result);
}
return true;
}
template <typename I>
bool ImageWatcher<I>::handle_payload(const FlattenPayload &payload,
C_NotifyAck *ack_ctx) {
ldout(m_image_ctx.cct, 10) << this << " remote flatten request: "
<< payload.async_request_id << dendl;
return handle_operation_request(
payload.async_request_id, exclusive_lock::OPERATION_REQUEST_TYPE_GENERAL,
OPERATION_FLATTEN, std::bind(&Operations<I>::execute_flatten,
m_image_ctx.operations,
std::placeholders::_1,
std::placeholders::_2),
ack_ctx);
}
template <typename I>
bool ImageWatcher<I>::handle_payload(const ResizePayload &payload,
C_NotifyAck *ack_ctx) {
ldout(m_image_ctx.cct, 10) << this << " remote resize request: "
<< payload.async_request_id << " "
<< payload.size << " "
<< payload.allow_shrink << dendl;
return handle_operation_request(
payload.async_request_id, exclusive_lock::OPERATION_REQUEST_TYPE_GENERAL,
OPERATION_RESIZE, std::bind(&Operations<I>::execute_resize,
m_image_ctx.operations, payload.size,
payload.allow_shrink, std::placeholders::_1,
std::placeholders::_2, 0), ack_ctx);
}
template <typename I>
bool ImageWatcher<I>::handle_payload(const SnapCreatePayload &payload,
C_NotifyAck *ack_ctx) {
ldout(m_image_ctx.cct, 10) << this << " remote snap_create request: "
<< payload.async_request_id << " "
<< payload.snap_namespace << " "
<< payload.snap_name << " "
<< payload.flags << dendl;
auto request_type = exclusive_lock::OPERATION_REQUEST_TYPE_GENERAL;
// rbd-mirror needs to accept forced promotion orphan snap create requests
auto mirror_ns = std::get_if<cls::rbd::MirrorSnapshotNamespace>(
&payload.snap_namespace);
if (mirror_ns != nullptr && mirror_ns->is_orphan()) {
request_type = exclusive_lock::OPERATION_REQUEST_TYPE_FORCE_PROMOTION;
}
return handle_operation_request(
payload.async_request_id, request_type,
OPERATION_SNAP_CREATE, std::bind(&Operations<I>::execute_snap_create,
m_image_ctx.operations,
payload.snap_namespace,
payload.snap_name, std::placeholders::_2,
0, payload.flags, std::placeholders::_1),
ack_ctx);
}
template <typename I>
bool ImageWatcher<I>::handle_payload(const SnapRenamePayload &payload,
C_NotifyAck *ack_ctx) {
ldout(m_image_ctx.cct, 10) << this << " remote snap_rename request: "
<< payload.async_request_id << " "
<< payload.snap_id << " to "
<< payload.snap_name << dendl;
return handle_operation_request(
payload.async_request_id, exclusive_lock::OPERATION_REQUEST_TYPE_GENERAL,
OPERATION_SNAP_RENAME, std::bind(&Operations<I>::execute_snap_rename,
m_image_ctx.operations, payload.snap_id,
payload.snap_name,
std::placeholders::_2), ack_ctx);
}
template <typename I>
bool ImageWatcher<I>::handle_payload(const SnapRemovePayload &payload,
C_NotifyAck *ack_ctx) {
ldout(m_image_ctx.cct, 10) << this << " remote snap_remove request: "
<< payload.snap_name << dendl;
auto request_type = exclusive_lock::OPERATION_REQUEST_TYPE_GENERAL;
if (cls::rbd::get_snap_namespace_type(payload.snap_namespace) ==
cls::rbd::SNAPSHOT_NAMESPACE_TYPE_TRASH) {
request_type = exclusive_lock::OPERATION_REQUEST_TYPE_TRASH_SNAP_REMOVE;
}
return handle_operation_request(
payload.async_request_id, request_type, OPERATION_SNAP_REMOVE,
std::bind(&Operations<I>::execute_snap_remove, m_image_ctx.operations,
payload.snap_namespace, payload.snap_name,
std::placeholders::_2), ack_ctx);
}
template <typename I>
bool ImageWatcher<I>::handle_payload(const SnapProtectPayload& payload,
C_NotifyAck *ack_ctx) {
ldout(m_image_ctx.cct, 10) << this << " remote snap_protect request: "
<< payload.async_request_id << " "
<< payload.snap_name << dendl;
return handle_operation_request(
payload.async_request_id, exclusive_lock::OPERATION_REQUEST_TYPE_GENERAL,
OPERATION_SNAP_PROTECT, std::bind(&Operations<I>::execute_snap_protect,
m_image_ctx.operations,
payload.snap_namespace,
payload.snap_name,
std::placeholders::_2), ack_ctx);
}
template <typename I>
bool ImageWatcher<I>::handle_payload(const SnapUnprotectPayload& payload,
C_NotifyAck *ack_ctx) {
ldout(m_image_ctx.cct, 10) << this << " remote snap_unprotect request: "
<< payload.async_request_id << " "
<< payload.snap_name << dendl;
return handle_operation_request(
payload.async_request_id, exclusive_lock::OPERATION_REQUEST_TYPE_GENERAL,
OPERATION_SNAP_UNPROTECT, std::bind(&Operations<I>::execute_snap_unprotect,
m_image_ctx.operations,
payload.snap_namespace,
payload.snap_name,
std::placeholders::_2), ack_ctx);
}
template <typename I>
bool ImageWatcher<I>::handle_payload(const RebuildObjectMapPayload& payload,
C_NotifyAck *ack_ctx) {
ldout(m_image_ctx.cct, 10) << this << " remote rebuild object map request: "
<< payload.async_request_id << dendl;
return handle_operation_request(
payload.async_request_id, exclusive_lock::OPERATION_REQUEST_TYPE_GENERAL,
OPERATION_REBUILD_OBJECT_MAP,
std::bind(&Operations<I>::execute_rebuild_object_map,
m_image_ctx.operations, std::placeholders::_1,
std::placeholders::_2), ack_ctx);
}
template <typename I>
bool ImageWatcher<I>::handle_payload(const RenamePayload& payload,
C_NotifyAck *ack_ctx) {
ldout(m_image_ctx.cct, 10) << this << " remote rename request: "
<< payload.async_request_id << " "
<< payload.image_name << dendl;
return handle_operation_request(
payload.async_request_id, exclusive_lock::OPERATION_REQUEST_TYPE_GENERAL,
OPERATION_RENAME, std::bind(&Operations<I>::execute_rename,
m_image_ctx.operations, payload.image_name,
std::placeholders::_2), ack_ctx);
}
template <typename I>
bool ImageWatcher<I>::handle_payload(const UpdateFeaturesPayload& payload,
C_NotifyAck *ack_ctx) {
ldout(m_image_ctx.cct, 10) << this << " remote update_features request: "
<< payload.async_request_id << " "
<< payload.features << " "
<< (payload.enabled ? "enabled" : "disabled")
<< dendl;
return handle_operation_request(
payload.async_request_id, exclusive_lock::OPERATION_REQUEST_TYPE_GENERAL,
OPERATION_UPDATE_FEATURES,
std::bind(&Operations<I>::execute_update_features, m_image_ctx.operations,
payload.features, payload.enabled, std::placeholders::_2, 0),
ack_ctx);
}
template <typename I>
bool ImageWatcher<I>::handle_payload(const MigratePayload &payload,
C_NotifyAck *ack_ctx) {
ldout(m_image_ctx.cct, 10) << this << " remote migrate request: "
<< payload.async_request_id << dendl;
return handle_operation_request(
payload.async_request_id, exclusive_lock::OPERATION_REQUEST_TYPE_GENERAL,
OPERATION_MIGRATE, std::bind(&Operations<I>::execute_migrate,
m_image_ctx.operations,
std::placeholders::_1,
std::placeholders::_2), ack_ctx);
}
template <typename I>
bool ImageWatcher<I>::handle_payload(const SparsifyPayload &payload,
C_NotifyAck *ack_ctx) {
ldout(m_image_ctx.cct, 10) << this << " remote sparsify request: "
<< payload.async_request_id << dendl;
return handle_operation_request(
payload.async_request_id, exclusive_lock::OPERATION_REQUEST_TYPE_GENERAL,
OPERATION_SPARSIFY, std::bind(&Operations<I>::execute_sparsify,
m_image_ctx.operations,
payload.sparse_size, std::placeholders::_1,
std::placeholders::_2), ack_ctx);
}
template <typename I>
bool ImageWatcher<I>::handle_payload(const MetadataUpdatePayload &payload,
C_NotifyAck *ack_ctx) {
if (payload.value) {
ldout(m_image_ctx.cct, 10) << this << " remote metadata_set request: "
<< payload.async_request_id << " "
<< "key=" << payload.key << ", value="
<< *payload.value << dendl;
return handle_operation_request(
payload.async_request_id,
exclusive_lock::OPERATION_REQUEST_TYPE_GENERAL,
OPERATION_METADATA_UPDATE,
std::bind(&Operations<I>::execute_metadata_set,
m_image_ctx.operations, payload.key, *payload.value,
std::placeholders::_2),
ack_ctx);
} else {
ldout(m_image_ctx.cct, 10) << this << " remote metadata_remove request: "
<< payload.async_request_id << " "
<< "key=" << payload.key << dendl;
return handle_operation_request(
payload.async_request_id,
exclusive_lock::OPERATION_REQUEST_TYPE_GENERAL,
OPERATION_METADATA_UPDATE,
std::bind(&Operations<I>::execute_metadata_remove,
m_image_ctx.operations, payload.key, std::placeholders::_2),
ack_ctx);
}
}
template <typename I>
bool ImageWatcher<I>::handle_payload(const QuiescePayload &payload,
C_NotifyAck *ack_ctx) {
auto on_finish = prepare_quiesce_request(payload.async_request_id, ack_ctx);
if (on_finish == nullptr) {
ldout(m_image_ctx.cct, 10) << this << " duplicate quiesce request: "
<< payload.async_request_id << dendl;
return false;
}
ldout(m_image_ctx.cct, 10) << this << " quiesce request: "
<< payload.async_request_id << dendl;
m_image_ctx.state->notify_quiesce(on_finish);
return false;
}
template <typename I>
bool ImageWatcher<I>::handle_payload(const UnquiescePayload &payload,
C_NotifyAck *ack_ctx) {
ldout(m_image_ctx.cct, 10) << this << " unquiesce request: "
<< payload.async_request_id << dendl;
prepare_unquiesce_request(payload.async_request_id);
return true;
}
template <typename I>
bool ImageWatcher<I>::handle_payload(const UnknownPayload &payload,
C_NotifyAck *ack_ctx) {
std::shared_lock l{m_image_ctx.owner_lock};
if (m_image_ctx.exclusive_lock != nullptr) {
int r;
if (m_image_ctx.exclusive_lock->accept_request(
exclusive_lock::OPERATION_REQUEST_TYPE_GENERAL, &r) || r < 0) {
encode(ResponseMessage(-EOPNOTSUPP), ack_ctx->out);
}
}
return true;
}
template <typename I>
void ImageWatcher<I>::process_payload(uint64_t notify_id, uint64_t handle,
Payload *payload) {
auto ctx = new Watcher::C_NotifyAck(this, notify_id, handle);
bool complete;
switch (payload->get_notify_op()) {
case NOTIFY_OP_ACQUIRED_LOCK:
complete = handle_payload(*(static_cast<AcquiredLockPayload *>(payload)),
ctx);
break;
case NOTIFY_OP_RELEASED_LOCK:
complete = handle_payload(*(static_cast<ReleasedLockPayload *>(payload)),
ctx);
break;
case NOTIFY_OP_REQUEST_LOCK:
complete = handle_payload(*(static_cast<RequestLockPayload *>(payload)),
ctx);
break;
case NOTIFY_OP_HEADER_UPDATE:
complete = handle_payload(*(static_cast<HeaderUpdatePayload *>(payload)),
ctx);
break;
case NOTIFY_OP_ASYNC_PROGRESS:
complete = handle_payload(*(static_cast<AsyncProgressPayload *>(payload)),
ctx);
break;
case NOTIFY_OP_ASYNC_COMPLETE:
complete = handle_payload(*(static_cast<AsyncCompletePayload *>(payload)),
ctx);
break;
case NOTIFY_OP_FLATTEN:
complete = handle_payload(*(static_cast<FlattenPayload *>(payload)), ctx);
break;
case NOTIFY_OP_RESIZE:
complete = handle_payload(*(static_cast<ResizePayload *>(payload)), ctx);
break;
case NOTIFY_OP_SNAP_CREATE:
complete = handle_payload(*(static_cast<SnapCreatePayload *>(payload)),
ctx);
break;
case NOTIFY_OP_SNAP_REMOVE:
complete = handle_payload(*(static_cast<SnapRemovePayload *>(payload)),
ctx);
break;
case NOTIFY_OP_SNAP_RENAME:
complete = handle_payload(*(static_cast<SnapRenamePayload *>(payload)),
ctx);
break;
case NOTIFY_OP_SNAP_PROTECT:
complete = handle_payload(*(static_cast<SnapProtectPayload *>(payload)),
ctx);
break;
case NOTIFY_OP_SNAP_UNPROTECT:
complete = handle_payload(*(static_cast<SnapUnprotectPayload *>(payload)),
ctx);
break;
case NOTIFY_OP_REBUILD_OBJECT_MAP:
complete = handle_payload(*(static_cast<RebuildObjectMapPayload *>(payload)),
ctx);
break;
case NOTIFY_OP_RENAME:
complete = handle_payload(*(static_cast<RenamePayload *>(payload)), ctx);
break;
case NOTIFY_OP_UPDATE_FEATURES:
complete = handle_payload(*(static_cast<UpdateFeaturesPayload *>(payload)),
ctx);
break;
case NOTIFY_OP_MIGRATE:
complete = handle_payload(*(static_cast<MigratePayload *>(payload)), ctx);
break;
case NOTIFY_OP_SPARSIFY:
complete = handle_payload(*(static_cast<SparsifyPayload *>(payload)), ctx);
break;
case NOTIFY_OP_QUIESCE:
complete = handle_payload(*(static_cast<QuiescePayload *>(payload)), ctx);
break;
case NOTIFY_OP_UNQUIESCE:
complete = handle_payload(*(static_cast<UnquiescePayload *>(payload)), ctx);
break;
case NOTIFY_OP_METADATA_UPDATE:
complete = handle_payload(*(static_cast<MetadataUpdatePayload *>(payload)), ctx);
break;
default:
ceph_assert(payload->get_notify_op() == static_cast<NotifyOp>(-1));
complete = handle_payload(*(static_cast<UnknownPayload *>(payload)), ctx);
}
if (complete) {
ctx->complete(0);
}
}
template <typename I>
void ImageWatcher<I>::handle_notify(uint64_t notify_id, uint64_t handle,
uint64_t notifier_id, bufferlist &bl) {
NotifyMessage notify_message;
if (bl.length() == 0) {
// legacy notification for header updates
notify_message = NotifyMessage(new HeaderUpdatePayload());
} else {
try {
auto iter = bl.cbegin();
decode(notify_message, iter);
} catch (const buffer::error &err) {
lderr(m_image_ctx.cct) << this << " error decoding image notification: "
<< err.what() << dendl;
return;
}
}
// if an image refresh is required, refresh before processing the request
if (notify_message.check_for_refresh() &&
m_image_ctx.state->is_refresh_required()) {
m_image_ctx.state->refresh(
new C_ProcessPayload(this, notify_id, handle,
std::move(notify_message.payload)));
} else {
process_payload(notify_id, handle, notify_message.payload.get());
}
}
template <typename I>
void ImageWatcher<I>::handle_error(uint64_t handle, int err) {
lderr(m_image_ctx.cct) << this << " image watch failed: " << handle << ", "
<< cpp_strerror(err) << dendl;
{
std::lock_guard l{m_owner_client_id_lock};
set_owner_client_id(ClientId());
}
Watcher::handle_error(handle, err);
}
template <typename I>
void ImageWatcher<I>::handle_rewatch_complete(int r) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 10) << this << " " << __func__ << ": r=" << r << dendl;
{
std::shared_lock owner_locker{m_image_ctx.owner_lock};
if (m_image_ctx.exclusive_lock != nullptr) {
// update the lock cookie with the new watch handle
m_image_ctx.exclusive_lock->reacquire_lock(nullptr);
}
}
// image might have been updated while we didn't have active watch
handle_payload(HeaderUpdatePayload(), nullptr);
}
template <typename I>
void ImageWatcher<I>::send_notify(Payload *payload, Context *ctx) {
bufferlist bl;
encode(NotifyMessage(payload), bl);
Watcher::send_notify(bl, nullptr, ctx);
}
template <typename I>
void ImageWatcher<I>::RemoteContext::finish(int r) {
m_image_watcher.schedule_async_complete(m_async_request_id, r);
}
template <typename I>
void ImageWatcher<I>::C_ResponseMessage::finish(int r) {
CephContext *cct = notify_ack->cct;
ldout(cct, 10) << this << " C_ResponseMessage: r=" << r << dendl;
encode(ResponseMessage(r), notify_ack->out);
notify_ack->complete(0);
}
} // namespace librbd
template class librbd::ImageWatcher<librbd::ImageCtx>;
| 56,631 | 35.395887 | 91 |
cc
|
null |
ceph-main/src/librbd/ImageWatcher.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_IMAGE_WATCHER_H
#define CEPH_LIBRBD_IMAGE_WATCHER_H
#include "cls/rbd/cls_rbd_types.h"
#include "common/AsyncOpTracker.h"
#include "common/ceph_mutex.h"
#include "include/Context.h"
#include "include/rbd/librbd.hpp"
#include "librbd/Operations.h"
#include "librbd/Watcher.h"
#include "librbd/WatchNotifyTypes.h"
#include "librbd/exclusive_lock/Policy.h"
#include "librbd/internal.h"
#include <functional>
#include <set>
#include <string>
#include <utility>
class entity_name_t;
namespace librbd {
class ImageCtx;
template <typename> class TaskFinisher;
template <typename ImageCtxT = ImageCtx>
class ImageWatcher : public Watcher {
public:
ImageWatcher(ImageCtxT& image_ctx);
~ImageWatcher() override;
void unregister_watch(Context *on_finish) override;
void block_notifies(Context *on_finish) override;
void notify_flatten(uint64_t request_id, ProgressContext &prog_ctx,
Context *on_finish);
void notify_resize(uint64_t request_id, uint64_t size, bool allow_shrink,
ProgressContext &prog_ctx, Context *on_finish);
void notify_snap_create(uint64_t request_id,
const cls::rbd::SnapshotNamespace &snap_namespace,
const std::string &snap_name,
uint64_t flags,
ProgressContext &prog_ctx,
Context *on_finish);
void notify_snap_rename(uint64_t request_id,
const snapid_t &src_snap_id,
const std::string &dst_snap_name,
Context *on_finish);
void notify_snap_remove(uint64_t request_id,
const cls::rbd::SnapshotNamespace &snap_namespace,
const std::string &snap_name,
Context *on_finish);
void notify_snap_protect(uint64_t request_id,
const cls::rbd::SnapshotNamespace &snap_namespace,
const std::string &snap_name,
Context *on_finish);
void notify_snap_unprotect(uint64_t request_id,
const cls::rbd::SnapshotNamespace &snap_namespace,
const std::string &snap_name,
Context *on_finish);
void notify_rebuild_object_map(uint64_t request_id,
ProgressContext &prog_ctx, Context *on_finish);
void notify_rename(uint64_t request_id,
const std::string &image_name, Context *on_finish);
void notify_update_features(uint64_t request_id,
uint64_t features, bool enabled,
Context *on_finish);
void notify_migrate(uint64_t request_id, ProgressContext &prog_ctx,
Context *on_finish);
void notify_sparsify(uint64_t request_id, size_t sparse_size,
ProgressContext &prog_ctx, Context *on_finish);
void notify_acquired_lock();
void notify_released_lock();
void notify_request_lock();
void notify_header_update(Context *on_finish);
static void notify_header_update(librados::IoCtx &io_ctx,
const std::string &oid);
void notify_quiesce(uint64_t *request_id, ProgressContext &prog_ctx,
Context *on_finish);
void notify_unquiesce(uint64_t request_id, Context *on_finish);
void notify_metadata_set(uint64_t request_id,
const std::string &key, const std::string &value,
Context *on_finish);
void notify_metadata_remove(uint64_t request_id,
const std::string &key, Context *on_finish);
private:
enum TaskCode {
TASK_CODE_REQUEST_LOCK,
TASK_CODE_CANCEL_ASYNC_REQUESTS,
TASK_CODE_REREGISTER_WATCH,
TASK_CODE_ASYNC_REQUEST,
TASK_CODE_ASYNC_PROGRESS,
TASK_CODE_QUIESCE,
};
typedef std::pair<Context *, ProgressContext *> AsyncRequest;
class Task {
public:
Task(TaskCode task_code) : m_task_code(task_code) {}
Task(TaskCode task_code, const watch_notify::AsyncRequestId &id)
: m_task_code(task_code), m_async_request_id(id) {}
inline bool operator<(const Task& rhs) const {
if (m_task_code != rhs.m_task_code) {
return m_task_code < rhs.m_task_code;
} else if ((m_task_code == TASK_CODE_ASYNC_REQUEST ||
m_task_code == TASK_CODE_ASYNC_PROGRESS ||
m_task_code == TASK_CODE_QUIESCE) &&
m_async_request_id != rhs.m_async_request_id) {
return m_async_request_id < rhs.m_async_request_id;
}
return false;
}
private:
TaskCode m_task_code;
watch_notify::AsyncRequestId m_async_request_id;
};
class RemoteProgressContext : public ProgressContext {
public:
RemoteProgressContext(ImageWatcher &image_watcher,
const watch_notify::AsyncRequestId &id)
: m_image_watcher(image_watcher), m_async_request_id(id)
{
}
int update_progress(uint64_t offset, uint64_t total) override {
m_image_watcher.schedule_async_progress(m_async_request_id, offset,
total);
return 0;
}
private:
ImageWatcher &m_image_watcher;
watch_notify::AsyncRequestId m_async_request_id;
};
class RemoteContext : public Context {
public:
RemoteContext(ImageWatcher &image_watcher,
const watch_notify::AsyncRequestId &id,
ProgressContext *prog_ctx)
: m_image_watcher(image_watcher), m_async_request_id(id),
m_prog_ctx(prog_ctx)
{
}
~RemoteContext() override {
delete m_prog_ctx;
}
void finish(int r) override;
private:
ImageWatcher &m_image_watcher;
watch_notify::AsyncRequestId m_async_request_id;
ProgressContext *m_prog_ctx;
};
struct C_ProcessPayload;
struct C_ResponseMessage : public Context {
C_NotifyAck *notify_ack;
C_ResponseMessage(C_NotifyAck *notify_ack) : notify_ack(notify_ack) {
}
void finish(int r) override;
};
ImageCtxT &m_image_ctx;
TaskFinisher<Task> *m_task_finisher;
ceph::shared_mutex m_async_request_lock;
std::map<watch_notify::AsyncRequestId, AsyncRequest> m_async_requests;
std::set<watch_notify::AsyncRequestId> m_async_pending;
std::map<watch_notify::AsyncRequestId, int> m_async_complete;
std::set<std::pair<utime_t,
watch_notify::AsyncRequestId>> m_async_complete_expiration;
ceph::mutex m_owner_client_id_lock;
watch_notify::ClientId m_owner_client_id;
AsyncOpTracker m_async_op_tracker;
NoOpProgressContext m_no_op_prog_ctx;
void handle_register_watch(int r);
void schedule_cancel_async_requests();
void cancel_async_requests();
void set_owner_client_id(const watch_notify::ClientId &client_id);
watch_notify::ClientId get_client_id();
void handle_request_lock(int r);
void schedule_request_lock(bool use_timer, int timer_delay = -1);
void notify_lock_owner(watch_notify::Payload *payload, Context *on_finish);
bool is_new_request(const watch_notify::AsyncRequestId &id) const;
bool mark_async_request_complete(const watch_notify::AsyncRequestId &id,
int r);
Context *remove_async_request(const watch_notify::AsyncRequestId &id);
Context *remove_async_request(const watch_notify::AsyncRequestId &id,
ceph::shared_mutex &lock);
void schedule_async_request_timed_out(const watch_notify::AsyncRequestId &id);
void async_request_timed_out(const watch_notify::AsyncRequestId &id);
void notify_async_request(const watch_notify::AsyncRequestId &id,
watch_notify::Payload *payload,
ProgressContext& prog_ctx,
Context *on_finish);
void schedule_async_progress(const watch_notify::AsyncRequestId &id,
uint64_t offset, uint64_t total);
int notify_async_progress(const watch_notify::AsyncRequestId &id,
uint64_t offset, uint64_t total);
void schedule_async_complete(const watch_notify::AsyncRequestId &id, int r);
void notify_async_complete(const watch_notify::AsyncRequestId &id, int r);
void handle_async_complete(const watch_notify::AsyncRequestId &request, int r,
int ret_val);
int prepare_async_request(const watch_notify::AsyncRequestId& id,
bool* new_request, Context** ctx,
ProgressContext** prog_ctx);
Context *prepare_quiesce_request(const watch_notify::AsyncRequestId &request,
C_NotifyAck *ack_ctx);
void prepare_unquiesce_request(const watch_notify::AsyncRequestId &request);
void cancel_quiesce_requests();
void notify_quiesce(const watch_notify::AsyncRequestId &async_request_id,
size_t attempt, size_t total_attempts,
ProgressContext &prog_ctx, Context *on_finish);
bool handle_operation_request(
const watch_notify::AsyncRequestId& async_request_id,
exclusive_lock::OperationRequestType request_type, Operation operation,
std::function<void(ProgressContext &prog_ctx, Context*)> execute,
C_NotifyAck *ack_ctx);
bool handle_payload(const watch_notify::HeaderUpdatePayload& payload,
C_NotifyAck *ctx);
bool handle_payload(const watch_notify::AcquiredLockPayload& payload,
C_NotifyAck *ctx);
bool handle_payload(const watch_notify::ReleasedLockPayload& payload,
C_NotifyAck *ctx);
bool handle_payload(const watch_notify::RequestLockPayload& payload,
C_NotifyAck *ctx);
bool handle_payload(const watch_notify::AsyncProgressPayload& payload,
C_NotifyAck *ctx);
bool handle_payload(const watch_notify::AsyncCompletePayload& payload,
C_NotifyAck *ctx);
bool handle_payload(const watch_notify::FlattenPayload& payload,
C_NotifyAck *ctx);
bool handle_payload(const watch_notify::ResizePayload& payload,
C_NotifyAck *ctx);
bool handle_payload(const watch_notify::SnapCreatePayload& payload,
C_NotifyAck *ctx);
bool handle_payload(const watch_notify::SnapRenamePayload& payload,
C_NotifyAck *ctx);
bool handle_payload(const watch_notify::SnapRemovePayload& payload,
C_NotifyAck *ctx);
bool handle_payload(const watch_notify::SnapProtectPayload& payload,
C_NotifyAck *ctx);
bool handle_payload(const watch_notify::SnapUnprotectPayload& payload,
C_NotifyAck *ctx);
bool handle_payload(const watch_notify::RebuildObjectMapPayload& payload,
C_NotifyAck *ctx);
bool handle_payload(const watch_notify::RenamePayload& payload,
C_NotifyAck *ctx);
bool handle_payload(const watch_notify::UpdateFeaturesPayload& payload,
C_NotifyAck *ctx);
bool handle_payload(const watch_notify::MigratePayload& payload,
C_NotifyAck *ctx);
bool handle_payload(const watch_notify::SparsifyPayload& payload,
C_NotifyAck *ctx);
bool handle_payload(const watch_notify::QuiescePayload& payload,
C_NotifyAck *ctx);
bool handle_payload(const watch_notify::UnquiescePayload& payload,
C_NotifyAck *ctx);
bool handle_payload(const watch_notify::MetadataUpdatePayload& payload,
C_NotifyAck *ctx);
bool handle_payload(const watch_notify::UnknownPayload& payload,
C_NotifyAck *ctx);
void process_payload(uint64_t notify_id, uint64_t handle,
watch_notify::Payload *payload);
void handle_notify(uint64_t notify_id, uint64_t handle,
uint64_t notifier_id, bufferlist &bl) override;
void handle_error(uint64_t cookie, int err) override;
void handle_rewatch_complete(int r) override;
void send_notify(watch_notify::Payload *payload, Context *ctx = nullptr);
};
} // namespace librbd
extern template class librbd::ImageWatcher<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_IMAGE_WATCHER_H
| 12,304 | 38.187898 | 80 |
h
|
null |
ceph-main/src/librbd/Journal.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/Journal.h"
#include "include/rados/librados.hpp"
#include "common/AsyncOpTracker.h"
#include "common/errno.h"
#include "common/Timer.h"
#include "common/WorkQueue.h"
#include "cls/journal/cls_journal_types.h"
#include "journal/Journaler.h"
#include "journal/Policy.h"
#include "journal/ReplayEntry.h"
#include "journal/Settings.h"
#include "journal/Utils.h"
#include "librbd/ImageCtx.h"
#include "librbd/asio/ContextWQ.h"
#include "librbd/io/ObjectDispatchSpec.h"
#include "librbd/io/ObjectDispatcherInterface.h"
#include "librbd/journal/CreateRequest.h"
#include "librbd/journal/DemoteRequest.h"
#include "librbd/journal/ObjectDispatch.h"
#include "librbd/journal/OpenRequest.h"
#include "librbd/journal/RemoveRequest.h"
#include "librbd/journal/ResetRequest.h"
#include "librbd/journal/Replay.h"
#include "librbd/journal/PromoteRequest.h"
#include <boost/scope_exit.hpp>
#include <utility>
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::Journal: "
namespace librbd {
using util::create_async_context_callback;
using util::create_context_callback;
using journal::util::C_DecodeTag;
using journal::util::C_DecodeTags;
namespace {
// TODO: once journaler is 100% async and converted to ASIO, remove separate
// threads and reuse librbd's AsioEngine
class ThreadPoolSingleton : public ThreadPool {
public:
ContextWQ *work_queue;
explicit ThreadPoolSingleton(CephContext *cct)
: ThreadPool(cct, "librbd::Journal", "tp_librbd_journ", 1),
work_queue(new ContextWQ("librbd::journal::work_queue",
ceph::make_timespan(
cct->_conf.get_val<uint64_t>("rbd_op_thread_timeout")),
this)) {
start();
}
~ThreadPoolSingleton() override {
work_queue->drain();
delete work_queue;
stop();
}
};
template <typename I>
struct C_IsTagOwner : public Context {
librados::IoCtx &io_ctx;
std::string image_id;
bool *is_tag_owner;
asio::ContextWQ *op_work_queue;
Context *on_finish;
CephContext *cct = nullptr;
Journaler *journaler;
cls::journal::Client client;
journal::ImageClientMeta client_meta;
uint64_t tag_tid = 0;
journal::TagData tag_data;
C_IsTagOwner(librados::IoCtx &io_ctx, const std::string &image_id,
bool *is_tag_owner, asio::ContextWQ *op_work_queue,
Context *on_finish)
: io_ctx(io_ctx), image_id(image_id), is_tag_owner(is_tag_owner),
op_work_queue(op_work_queue), on_finish(on_finish),
cct(reinterpret_cast<CephContext*>(io_ctx.cct())),
journaler(new Journaler(io_ctx, image_id, Journal<>::IMAGE_CLIENT_ID,
{}, nullptr)) {
}
void finish(int r) override {
ldout(cct, 20) << this << " C_IsTagOwner::" << __func__ << ": r=" << r
<< dendl;
if (r < 0) {
lderr(cct) << this << " C_IsTagOwner::" << __func__ << ": "
<< "failed to get tag owner: " << cpp_strerror(r) << dendl;
} else {
*is_tag_owner = (tag_data.mirror_uuid == Journal<>::LOCAL_MIRROR_UUID);
}
Journaler *journaler = this->journaler;
Context *on_finish = this->on_finish;
auto ctx = new LambdaContext(
[journaler, on_finish](int r) {
on_finish->complete(r);
delete journaler;
});
op_work_queue->queue(ctx, r);
}
};
struct C_GetTagOwner : public Context {
std::string *mirror_uuid;
Context *on_finish;
Journaler journaler;
cls::journal::Client client;
journal::ImageClientMeta client_meta;
uint64_t tag_tid = 0;
journal::TagData tag_data;
C_GetTagOwner(librados::IoCtx &io_ctx, const std::string &image_id,
std::string *mirror_uuid, Context *on_finish)
: mirror_uuid(mirror_uuid), on_finish(on_finish),
journaler(io_ctx, image_id, Journal<>::IMAGE_CLIENT_ID, {}, nullptr) {
}
virtual void finish(int r) {
if (r >= 0) {
*mirror_uuid = tag_data.mirror_uuid;
}
on_finish->complete(r);
}
};
template <typename J>
struct GetTagsRequest {
CephContext *cct;
J *journaler;
cls::journal::Client *client;
journal::ImageClientMeta *client_meta;
uint64_t *tag_tid;
journal::TagData *tag_data;
Context *on_finish;
ceph::mutex lock = ceph::make_mutex("lock");
GetTagsRequest(CephContext *cct, J *journaler, cls::journal::Client *client,
journal::ImageClientMeta *client_meta, uint64_t *tag_tid,
journal::TagData *tag_data, Context *on_finish)
: cct(cct), journaler(journaler), client(client), client_meta(client_meta),
tag_tid(tag_tid), tag_data(tag_data), on_finish(on_finish) {
}
/**
* @verbatim
*
* <start>
* |
* v
* GET_CLIENT * * * * * * * * * * * *
* | *
* v *
* GET_TAGS * * * * * * * * * * * * * (error)
* | *
* v *
* <finish> * * * * * * * * * * * * *
*
* @endverbatim
*/
void send() {
send_get_client();
}
void send_get_client() {
ldout(cct, 20) << __func__ << dendl;
auto ctx = new LambdaContext(
[this](int r) {
handle_get_client(r);
});
journaler->get_client(Journal<ImageCtx>::IMAGE_CLIENT_ID, client, ctx);
}
void handle_get_client(int r) {
ldout(cct, 20) << __func__ << ": r=" << r << dendl;
if (r < 0) {
complete(r);
return;
}
librbd::journal::ClientData client_data;
auto bl_it = client->data.cbegin();
try {
decode(client_data, bl_it);
} catch (const buffer::error &err) {
lderr(cct) << this << " OpenJournalerRequest::" << __func__ << ": "
<< "failed to decode client data" << dendl;
complete(-EBADMSG);
return;
}
journal::ImageClientMeta *image_client_meta =
boost::get<journal::ImageClientMeta>(&client_data.client_meta);
if (image_client_meta == nullptr) {
lderr(cct) << this << " OpenJournalerRequest::" << __func__ << ": "
<< "failed to get client meta" << dendl;
complete(-EINVAL);
return;
}
*client_meta = *image_client_meta;
send_get_tags();
}
void send_get_tags() {
ldout(cct, 20) << __func__ << dendl;
auto ctx = new LambdaContext(
[this](int r) {
handle_get_tags(r);
});
C_DecodeTags *tags_ctx = new C_DecodeTags(cct, &lock, tag_tid, tag_data,
ctx);
journaler->get_tags(client_meta->tag_class, &tags_ctx->tags, tags_ctx);
}
void handle_get_tags(int r) {
ldout(cct, 20) << __func__ << ": r=" << r << dendl;
complete(r);
}
void complete(int r) {
on_finish->complete(r);
delete this;
}
};
template <typename J>
void get_tags(CephContext *cct, J *journaler,
cls::journal::Client *client,
journal::ImageClientMeta *client_meta,
uint64_t *tag_tid, journal::TagData *tag_data,
Context *on_finish) {
ldout(cct, 20) << __func__ << dendl;
GetTagsRequest<J> *req =
new GetTagsRequest<J>(cct, journaler, client, client_meta, tag_tid,
tag_data, on_finish);
req->send();
}
template <typename J>
int allocate_journaler_tag(CephContext *cct, J *journaler,
uint64_t tag_class,
const journal::TagPredecessor &predecessor,
const std::string &mirror_uuid,
cls::journal::Tag *new_tag) {
journal::TagData tag_data;
tag_data.mirror_uuid = mirror_uuid;
tag_data.predecessor = predecessor;
bufferlist tag_bl;
encode(tag_data, tag_bl);
C_SaferCond allocate_tag_ctx;
journaler->allocate_tag(tag_class, tag_bl, new_tag, &allocate_tag_ctx);
int r = allocate_tag_ctx.wait();
if (r < 0) {
lderr(cct) << __func__ << ": "
<< "failed to allocate tag: " << cpp_strerror(r) << dendl;
return r;
}
return 0;
}
} // anonymous namespace
// client id for local image
template <typename I>
const std::string Journal<I>::IMAGE_CLIENT_ID("");
// mirror uuid to use for local images
template <typename I>
const std::string Journal<I>::LOCAL_MIRROR_UUID("");
// mirror uuid to use for orphaned (demoted) images
template <typename I>
const std::string Journal<I>::ORPHAN_MIRROR_UUID("<orphan>");
template <typename I>
std::ostream &operator<<(std::ostream &os,
const typename Journal<I>::State &state) {
switch (state) {
case Journal<I>::STATE_UNINITIALIZED:
os << "Uninitialized";
break;
case Journal<I>::STATE_INITIALIZING:
os << "Initializing";
break;
case Journal<I>::STATE_REPLAYING:
os << "Replaying";
break;
case Journal<I>::STATE_FLUSHING_RESTART:
os << "FlushingRestart";
break;
case Journal<I>::STATE_RESTARTING_REPLAY:
os << "RestartingReplay";
break;
case Journal<I>::STATE_FLUSHING_REPLAY:
os << "FlushingReplay";
break;
case Journal<I>::STATE_READY:
os << "Ready";
break;
case Journal<I>::STATE_STOPPING:
os << "Stopping";
break;
case Journal<I>::STATE_CLOSING:
os << "Closing";
break;
case Journal<I>::STATE_CLOSED:
os << "Closed";
break;
default:
os << "Unknown (" << static_cast<uint32_t>(state) << ")";
break;
}
return os;
}
template <typename I>
void Journal<I>::MetadataListener::handle_update(::journal::JournalMetadata *) {
auto ctx = new LambdaContext([this](int r) {
journal->handle_metadata_updated();
});
journal->m_work_queue->queue(ctx, 0);
}
template <typename I>
void Journal<I>::get_work_queue(CephContext *cct, ContextWQ **work_queue) {
auto thread_pool_singleton =
&cct->lookup_or_create_singleton_object<ThreadPoolSingleton>(
"librbd::journal::thread_pool", false, cct);
*work_queue = thread_pool_singleton->work_queue;
}
template <typename I>
Journal<I>::Journal(I &image_ctx)
: RefCountedObject(image_ctx.cct),
m_image_ctx(image_ctx), m_journaler(NULL),
m_state(STATE_UNINITIALIZED),
m_error_result(0), m_replay_handler(this), m_close_pending(false),
m_event_tid(0),
m_blocking_writes(false), m_journal_replay(NULL),
m_metadata_listener(this) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 5) << this << ": ictx=" << &m_image_ctx << dendl;
get_work_queue(cct, &m_work_queue);
ImageCtx::get_timer_instance(cct, &m_timer, &m_timer_lock);
}
template <typename I>
Journal<I>::~Journal() {
if (m_work_queue != nullptr) {
m_work_queue->drain();
}
std::lock_guard locker{m_lock};
ceph_assert(m_state == STATE_UNINITIALIZED || m_state == STATE_CLOSED);
ceph_assert(m_journaler == NULL);
ceph_assert(m_journal_replay == NULL);
ceph_assert(m_wait_for_state_contexts.empty());
}
template <typename I>
bool Journal<I>::is_journal_supported(I &image_ctx) {
ceph_assert(ceph_mutex_is_locked(image_ctx.image_lock));
return ((image_ctx.features & RBD_FEATURE_JOURNALING) &&
!image_ctx.read_only && image_ctx.snap_id == CEPH_NOSNAP);
}
template <typename I>
int Journal<I>::create(librados::IoCtx &io_ctx, const std::string &image_id,
uint8_t order, uint8_t splay_width,
const std::string &object_pool) {
CephContext *cct = reinterpret_cast<CephContext *>(io_ctx.cct());
ldout(cct, 5) << __func__ << ": image=" << image_id << dendl;
ContextWQ *work_queue;
get_work_queue(cct, &work_queue);
C_SaferCond cond;
journal::TagData tag_data(LOCAL_MIRROR_UUID);
journal::CreateRequest<I> *req = journal::CreateRequest<I>::create(
io_ctx, image_id, order, splay_width, object_pool, cls::journal::Tag::TAG_CLASS_NEW,
tag_data, IMAGE_CLIENT_ID, work_queue, &cond);
req->send();
return cond.wait();
}
template <typename I>
int Journal<I>::remove(librados::IoCtx &io_ctx, const std::string &image_id) {
CephContext *cct = reinterpret_cast<CephContext *>(io_ctx.cct());
ldout(cct, 5) << __func__ << ": image=" << image_id << dendl;
ContextWQ *work_queue;
get_work_queue(cct, &work_queue);
C_SaferCond cond;
journal::RemoveRequest<I> *req = journal::RemoveRequest<I>::create(
io_ctx, image_id, IMAGE_CLIENT_ID, work_queue, &cond);
req->send();
return cond.wait();
}
template <typename I>
int Journal<I>::reset(librados::IoCtx &io_ctx, const std::string &image_id) {
CephContext *cct = reinterpret_cast<CephContext *>(io_ctx.cct());
ldout(cct, 5) << __func__ << ": image=" << image_id << dendl;
ContextWQ *work_queue;
get_work_queue(cct, &work_queue);
C_SaferCond cond;
auto req = journal::ResetRequest<I>::create(io_ctx, image_id, IMAGE_CLIENT_ID,
Journal<>::LOCAL_MIRROR_UUID,
work_queue, &cond);
req->send();
return cond.wait();
}
template <typename I>
void Journal<I>::is_tag_owner(I *image_ctx, bool *owner,
Context *on_finish) {
Journal<I>::is_tag_owner(image_ctx->md_ctx, image_ctx->id, owner,
image_ctx->op_work_queue, on_finish);
}
template <typename I>
void Journal<I>::is_tag_owner(librados::IoCtx& io_ctx, std::string& image_id,
bool *is_tag_owner,
asio::ContextWQ *op_work_queue,
Context *on_finish) {
CephContext *cct = reinterpret_cast<CephContext*>(io_ctx.cct());
ldout(cct, 20) << __func__ << dendl;
C_IsTagOwner<I> *is_tag_owner_ctx = new C_IsTagOwner<I>(
io_ctx, image_id, is_tag_owner, op_work_queue, on_finish);
get_tags(cct, is_tag_owner_ctx->journaler, &is_tag_owner_ctx->client,
&is_tag_owner_ctx->client_meta, &is_tag_owner_ctx->tag_tid,
&is_tag_owner_ctx->tag_data, is_tag_owner_ctx);
}
template <typename I>
void Journal<I>::get_tag_owner(IoCtx& io_ctx, std::string& image_id,
std::string *mirror_uuid,
asio::ContextWQ *op_work_queue,
Context *on_finish) {
CephContext *cct = static_cast<CephContext *>(io_ctx.cct());
ldout(cct, 20) << __func__ << dendl;
auto ctx = new C_GetTagOwner(io_ctx, image_id, mirror_uuid, on_finish);
get_tags(cct, &ctx->journaler, &ctx->client, &ctx->client_meta, &ctx->tag_tid,
&ctx->tag_data, create_async_context_callback(op_work_queue, ctx));
}
template <typename I>
int Journal<I>::request_resync(I *image_ctx) {
CephContext *cct = image_ctx->cct;
ldout(cct, 20) << __func__ << dendl;
Journaler journaler(image_ctx->md_ctx, image_ctx->id, IMAGE_CLIENT_ID, {},
nullptr);
ceph::mutex lock = ceph::make_mutex("lock");
journal::ImageClientMeta client_meta;
uint64_t tag_tid;
journal::TagData tag_data;
C_SaferCond open_ctx;
auto open_req = journal::OpenRequest<I>::create(image_ctx, &journaler, &lock,
&client_meta, &tag_tid,
&tag_data, &open_ctx);
open_req->send();
BOOST_SCOPE_EXIT_ALL(&journaler) {
journaler.shut_down();
};
int r = open_ctx.wait();
if (r < 0) {
return r;
}
client_meta.resync_requested = true;
journal::ClientData client_data(client_meta);
bufferlist client_data_bl;
encode(client_data, client_data_bl);
C_SaferCond update_client_ctx;
journaler.update_client(client_data_bl, &update_client_ctx);
r = update_client_ctx.wait();
if (r < 0) {
lderr(cct) << __func__ << ": "
<< "failed to update client: " << cpp_strerror(r) << dendl;
return r;
}
return 0;
}
template <typename I>
void Journal<I>::promote(I *image_ctx, Context *on_finish) {
CephContext *cct = image_ctx->cct;
ldout(cct, 20) << __func__ << dendl;
auto promote_req = journal::PromoteRequest<I>::create(image_ctx, false,
on_finish);
promote_req->send();
}
template <typename I>
void Journal<I>::demote(I *image_ctx, Context *on_finish) {
CephContext *cct = image_ctx->cct;
ldout(cct, 20) << __func__ << dendl;
auto req = journal::DemoteRequest<I>::create(*image_ctx, on_finish);
req->send();
}
template <typename I>
bool Journal<I>::is_journal_ready() const {
std::lock_guard locker{m_lock};
return (m_state == STATE_READY);
}
template <typename I>
bool Journal<I>::is_journal_replaying() const {
std::lock_guard locker{m_lock};
return is_journal_replaying(m_lock);
}
template <typename I>
bool Journal<I>::is_journal_replaying(const ceph::mutex &) const {
ceph_assert(ceph_mutex_is_locked(m_lock));
return (m_state == STATE_REPLAYING ||
m_state == STATE_FLUSHING_REPLAY ||
m_state == STATE_FLUSHING_RESTART ||
m_state == STATE_RESTARTING_REPLAY);
}
template <typename I>
bool Journal<I>::is_journal_appending() const {
ceph_assert(ceph_mutex_is_locked(m_image_ctx.image_lock));
std::lock_guard locker{m_lock};
return (m_state == STATE_READY &&
!m_image_ctx.get_journal_policy()->append_disabled());
}
template <typename I>
void Journal<I>::wait_for_journal_ready(Context *on_ready) {
on_ready = create_async_context_callback(m_image_ctx, on_ready);
std::lock_guard locker{m_lock};
if (m_state == STATE_READY) {
on_ready->complete(m_error_result);
} else {
wait_for_steady_state(on_ready);
}
}
template <typename I>
void Journal<I>::open(Context *on_finish) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << this << " " << __func__ << dendl;
on_finish = create_context_callback<Context>(on_finish, this);
on_finish = create_async_context_callback(m_image_ctx, on_finish);
// inject our handler into the object dispatcher chain
m_image_ctx.io_object_dispatcher->register_dispatch(
journal::ObjectDispatch<I>::create(&m_image_ctx, this));
std::lock_guard locker{m_lock};
ceph_assert(m_state == STATE_UNINITIALIZED);
wait_for_steady_state(on_finish);
create_journaler();
}
template <typename I>
void Journal<I>::close(Context *on_finish) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << this << " " << __func__ << dendl;
on_finish = create_context_callback<Context>(on_finish, this);
on_finish = new LambdaContext([this, on_finish](int r) {
// remove our handler from object dispatcher chain - preserve error
auto ctx = new LambdaContext([on_finish, r](int _) {
on_finish->complete(r);
});
m_image_ctx.io_object_dispatcher->shut_down_dispatch(
io::OBJECT_DISPATCH_LAYER_JOURNAL, ctx);
});
on_finish = create_async_context_callback(m_image_ctx, on_finish);
std::unique_lock locker{m_lock};
m_listener_cond.wait(locker, [this] { return !m_listener_notify; });
Listeners listeners(m_listeners);
m_listener_notify = true;
locker.unlock();
for (auto listener : listeners) {
listener->handle_close();
}
locker.lock();
m_listener_notify = false;
m_listener_cond.notify_all();
ceph_assert(m_state != STATE_UNINITIALIZED);
if (m_state == STATE_CLOSED) {
on_finish->complete(m_error_result);
return;
}
if (m_state == STATE_READY) {
stop_recording();
}
m_close_pending = true;
wait_for_steady_state(on_finish);
}
template <typename I>
bool Journal<I>::is_tag_owner() const {
std::lock_guard locker{m_lock};
return is_tag_owner(m_lock);
}
template <typename I>
bool Journal<I>::is_tag_owner(const ceph::mutex &) const {
ceph_assert(ceph_mutex_is_locked(m_lock));
return (m_tag_data.mirror_uuid == LOCAL_MIRROR_UUID);
}
template <typename I>
uint64_t Journal<I>::get_tag_tid() const {
std::lock_guard locker{m_lock};
return m_tag_tid;
}
template <typename I>
journal::TagData Journal<I>::get_tag_data() const {
std::lock_guard locker{m_lock};
return m_tag_data;
}
template <typename I>
void Journal<I>::allocate_local_tag(Context *on_finish) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << this << " " << __func__ << dendl;
journal::TagPredecessor predecessor;
predecessor.mirror_uuid = LOCAL_MIRROR_UUID;
{
std::lock_guard locker{m_lock};
ceph_assert(m_journaler != nullptr && is_tag_owner(m_lock));
cls::journal::Client client;
int r = m_journaler->get_cached_client(IMAGE_CLIENT_ID, &client);
if (r < 0) {
lderr(cct) << this << " " << __func__ << ": "
<< "failed to retrieve client: " << cpp_strerror(r) << dendl;
m_image_ctx.op_work_queue->queue(on_finish, r);
return;
}
// since we are primary, populate the predecessor with our known commit
// position
ceph_assert(m_tag_data.mirror_uuid == LOCAL_MIRROR_UUID);
if (!client.commit_position.object_positions.empty()) {
auto position = client.commit_position.object_positions.front();
predecessor.commit_valid = true;
predecessor.tag_tid = position.tag_tid;
predecessor.entry_tid = position.entry_tid;
}
}
allocate_tag(LOCAL_MIRROR_UUID, predecessor, on_finish);
}
template <typename I>
void Journal<I>::allocate_tag(const std::string &mirror_uuid,
const journal::TagPredecessor &predecessor,
Context *on_finish) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << this << " " << __func__ << ": mirror_uuid=" << mirror_uuid
<< dendl;
std::lock_guard locker{m_lock};
ceph_assert(m_journaler != nullptr);
journal::TagData tag_data;
tag_data.mirror_uuid = mirror_uuid;
tag_data.predecessor = predecessor;
bufferlist tag_bl;
encode(tag_data, tag_bl);
C_DecodeTag *decode_tag_ctx = new C_DecodeTag(cct, &m_lock, &m_tag_tid,
&m_tag_data, on_finish);
m_journaler->allocate_tag(m_tag_class, tag_bl, &decode_tag_ctx->tag,
decode_tag_ctx);
}
template <typename I>
void Journal<I>::flush_commit_position(Context *on_finish) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << this << " " << __func__ << dendl;
std::lock_guard locker{m_lock};
ceph_assert(m_journaler != nullptr);
m_journaler->flush_commit_position(on_finish);
}
template <typename I>
void Journal<I>::user_flushed() {
if (m_state == STATE_READY && !m_user_flushed.exchange(true) &&
m_image_ctx.config.template get_val<bool>("rbd_journal_object_writethrough_until_flush")) {
std::lock_guard locker{m_lock};
if (m_state == STATE_READY) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 5) << this << " " << __func__ << dendl;
ceph_assert(m_journaler != nullptr);
m_journaler->set_append_batch_options(
m_image_ctx.config.template get_val<uint64_t>("rbd_journal_object_flush_interval"),
m_image_ctx.config.template get_val<Option::size_t>("rbd_journal_object_flush_bytes"),
m_image_ctx.config.template get_val<double>("rbd_journal_object_flush_age"));
} else {
m_user_flushed = false;
}
}
}
template <typename I>
uint64_t Journal<I>::append_write_event(uint64_t offset, size_t length,
const bufferlist &bl,
bool flush_entry) {
ceph_assert(m_max_append_size > journal::AioWriteEvent::get_fixed_size());
uint64_t max_write_data_size =
m_max_append_size - journal::AioWriteEvent::get_fixed_size();
// ensure that the write event fits within the journal entry
Bufferlists bufferlists;
uint64_t bytes_remaining = length;
uint64_t event_offset = 0;
do {
uint64_t event_length = std::min(bytes_remaining, max_write_data_size);
bufferlist event_bl;
event_bl.substr_of(bl, event_offset, event_length);
journal::EventEntry event_entry(journal::AioWriteEvent(offset + event_offset,
event_length,
event_bl),
ceph_clock_now());
bufferlists.emplace_back();
encode(event_entry, bufferlists.back());
event_offset += event_length;
bytes_remaining -= event_length;
} while (bytes_remaining > 0);
return append_io_events(journal::EVENT_TYPE_AIO_WRITE, bufferlists, offset,
length, flush_entry, 0);
}
template <typename I>
uint64_t Journal<I>::append_compare_and_write_event(uint64_t offset,
size_t length,
const bufferlist &cmp_bl,
const bufferlist &write_bl,
bool flush_entry) {
ceph_assert(
m_max_append_size > journal::AioCompareAndWriteEvent::get_fixed_size());
uint64_t max_compare_and_write_data_size =
m_max_append_size - journal::AioCompareAndWriteEvent::get_fixed_size();
// we need double the size because we store cmp and write buffers
max_compare_and_write_data_size /= 2;
// ensure that the compare and write event fits within the journal entry
Bufferlists bufferlists;
uint64_t bytes_remaining = length;
uint64_t event_offset = 0;
do {
uint64_t event_length = std::min(bytes_remaining,
max_compare_and_write_data_size);
bufferlist event_cmp_bl;
event_cmp_bl.substr_of(cmp_bl, event_offset, event_length);
bufferlist event_write_bl;
event_write_bl.substr_of(write_bl, event_offset, event_length);
journal::EventEntry event_entry(
journal::AioCompareAndWriteEvent(offset + event_offset,
event_length,
event_cmp_bl,
event_write_bl),
ceph_clock_now());
bufferlists.emplace_back();
encode(event_entry, bufferlists.back());
event_offset += event_length;
bytes_remaining -= event_length;
} while (bytes_remaining > 0);
return append_io_events(journal::EVENT_TYPE_AIO_COMPARE_AND_WRITE,
bufferlists, offset, length, flush_entry, -EILSEQ);
}
template <typename I>
uint64_t Journal<I>::append_io_event(journal::EventEntry &&event_entry,
uint64_t offset, size_t length,
bool flush_entry, int filter_ret_val) {
bufferlist bl;
event_entry.timestamp = ceph_clock_now();
encode(event_entry, bl);
return append_io_events(event_entry.get_event_type(), {bl}, offset, length,
flush_entry, filter_ret_val);
}
template <typename I>
uint64_t Journal<I>::append_io_events(journal::EventType event_type,
const Bufferlists &bufferlists,
uint64_t offset, size_t length,
bool flush_entry, int filter_ret_val) {
ceph_assert(!bufferlists.empty());
uint64_t tid;
{
std::lock_guard locker{m_lock};
ceph_assert(m_state == STATE_READY);
tid = ++m_event_tid;
ceph_assert(tid != 0);
}
Futures futures;
for (auto &bl : bufferlists) {
ceph_assert(bl.length() <= m_max_append_size);
futures.push_back(m_journaler->append(m_tag_tid, bl));
}
{
std::lock_guard event_locker{m_event_lock};
m_events[tid] = Event(futures, offset, length, filter_ret_val);
}
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << this << " " << __func__ << ": "
<< "event=" << event_type << ", "
<< "offset=" << offset << ", "
<< "length=" << length << ", "
<< "flush=" << flush_entry << ", tid=" << tid << dendl;
Context *on_safe = create_async_context_callback(
m_image_ctx, new C_IOEventSafe(this, tid));
if (flush_entry) {
futures.back().flush(on_safe);
} else {
futures.back().wait(on_safe);
}
return tid;
}
template <typename I>
void Journal<I>::commit_io_event(uint64_t tid, int r) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << this << " " << __func__ << ": tid=" << tid << ", "
"r=" << r << dendl;
std::lock_guard event_locker{m_event_lock};
typename Events::iterator it = m_events.find(tid);
if (it == m_events.end()) {
return;
}
complete_event(it, r);
}
template <typename I>
void Journal<I>::commit_io_event_extent(uint64_t tid, uint64_t offset,
uint64_t length, int r) {
ceph_assert(length > 0);
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << this << " " << __func__ << ": tid=" << tid << ", "
<< "offset=" << offset << ", "
<< "length=" << length << ", "
<< "r=" << r << dendl;
std::lock_guard event_locker{m_event_lock};
typename Events::iterator it = m_events.find(tid);
if (it == m_events.end()) {
return;
}
Event &event = it->second;
if (event.ret_val == 0 && r < 0) {
event.ret_val = r;
}
ExtentInterval extent;
extent.insert(offset, length);
ExtentInterval intersect;
intersect.intersection_of(extent, event.pending_extents);
event.pending_extents.subtract(intersect);
if (!event.pending_extents.empty()) {
ldout(cct, 20) << this << " " << __func__ << ": "
<< "pending extents: " << event.pending_extents << dendl;
return;
}
complete_event(it, event.ret_val);
}
template <typename I>
void Journal<I>::append_op_event(uint64_t op_tid,
journal::EventEntry &&event_entry,
Context *on_safe) {
ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
bufferlist bl;
event_entry.timestamp = ceph_clock_now();
encode(event_entry, bl);
Future future;
{
std::lock_guard locker{m_lock};
ceph_assert(m_state == STATE_READY);
future = m_journaler->append(m_tag_tid, bl);
// delay committing op event to ensure consistent replay
ceph_assert(m_op_futures.count(op_tid) == 0);
m_op_futures[op_tid] = future;
}
on_safe = create_async_context_callback(m_image_ctx, on_safe);
on_safe = new LambdaContext([this, on_safe](int r) {
// ensure all committed IO before this op is committed
m_journaler->flush_commit_position(on_safe);
});
future.flush(on_safe);
CephContext *cct = m_image_ctx.cct;
ldout(cct, 10) << this << " " << __func__ << ": "
<< "op_tid=" << op_tid << ", "
<< "event=" << event_entry.get_event_type() << dendl;
}
template <typename I>
void Journal<I>::commit_op_event(uint64_t op_tid, int r, Context *on_safe) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 10) << this << " " << __func__ << ": op_tid=" << op_tid << ", "
<< "r=" << r << dendl;
journal::EventEntry event_entry((journal::OpFinishEvent(op_tid, r)),
ceph_clock_now());
bufferlist bl;
encode(event_entry, bl);
Future op_start_future;
Future op_finish_future;
{
std::lock_guard locker{m_lock};
ceph_assert(m_state == STATE_READY);
// ready to commit op event
auto it = m_op_futures.find(op_tid);
ceph_assert(it != m_op_futures.end());
op_start_future = it->second;
m_op_futures.erase(it);
op_finish_future = m_journaler->append(m_tag_tid, bl);
}
op_finish_future.flush(create_async_context_callback(
m_image_ctx, new C_OpEventSafe(this, op_tid, op_start_future,
op_finish_future, on_safe)));
}
template <typename I>
void Journal<I>::replay_op_ready(uint64_t op_tid, Context *on_resume) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 10) << this << " " << __func__ << ": op_tid=" << op_tid << dendl;
{
std::lock_guard locker{m_lock};
ceph_assert(m_journal_replay != nullptr);
m_journal_replay->replay_op_ready(op_tid, on_resume);
}
}
template <typename I>
void Journal<I>::flush_event(uint64_t tid, Context *on_safe) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << this << " " << __func__ << ": tid=" << tid << ", "
<< "on_safe=" << on_safe << dendl;
on_safe = create_context_callback<Context>(on_safe, this);
Future future;
{
std::lock_guard event_locker{m_event_lock};
future = wait_event(m_lock, tid, on_safe);
}
if (future.is_valid()) {
future.flush(nullptr);
}
}
template <typename I>
void Journal<I>::wait_event(uint64_t tid, Context *on_safe) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << this << " " << __func__ << ": tid=" << tid << ", "
<< "on_safe=" << on_safe << dendl;
on_safe = create_context_callback<Context>(on_safe, this);
std::lock_guard event_locker{m_event_lock};
wait_event(m_lock, tid, on_safe);
}
template <typename I>
typename Journal<I>::Future Journal<I>::wait_event(ceph::mutex &lock, uint64_t tid,
Context *on_safe) {
ceph_assert(ceph_mutex_is_locked(m_event_lock));
CephContext *cct = m_image_ctx.cct;
typename Events::iterator it = m_events.find(tid);
ceph_assert(it != m_events.end());
Event &event = it->second;
if (event.safe) {
// journal entry already safe
ldout(cct, 20) << this << " " << __func__ << ": "
<< "journal entry already safe" << dendl;
m_image_ctx.op_work_queue->queue(on_safe, event.ret_val);
return Future();
}
event.on_safe_contexts.push_back(create_async_context_callback(m_image_ctx,
on_safe));
return event.futures.back();
}
template <typename I>
void Journal<I>::start_external_replay(journal::Replay<I> **journal_replay,
Context *on_start) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << this << " " << __func__ << dendl;
std::lock_guard locker{m_lock};
ceph_assert(m_state == STATE_READY);
ceph_assert(m_journal_replay == nullptr);
on_start = util::create_async_context_callback(m_image_ctx, on_start);
on_start = new LambdaContext(
[this, journal_replay, on_start](int r) {
handle_start_external_replay(r, journal_replay, on_start);
});
// safely flush all in-flight events before starting external replay
m_journaler->stop_append(util::create_async_context_callback(m_image_ctx,
on_start));
}
template <typename I>
void Journal<I>::handle_start_external_replay(int r,
journal::Replay<I> **journal_replay,
Context *on_finish) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << this << " " << __func__ << dendl;
std::lock_guard locker{m_lock};
ceph_assert(m_state == STATE_READY);
ceph_assert(m_journal_replay == nullptr);
if (r < 0) {
lderr(cct) << this << " " << __func__ << ": "
<< "failed to stop recording: " << cpp_strerror(r) << dendl;
*journal_replay = nullptr;
// get back to a sane-state
start_append();
on_finish->complete(r);
return;
}
transition_state(STATE_REPLAYING, 0);
m_journal_replay = journal::Replay<I>::create(m_image_ctx);
*journal_replay = m_journal_replay;
on_finish->complete(0);
}
template <typename I>
void Journal<I>::stop_external_replay() {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << this << " " << __func__ << dendl;
std::lock_guard locker{m_lock};
ceph_assert(m_journal_replay != nullptr);
ceph_assert(m_state == STATE_REPLAYING);
delete m_journal_replay;
m_journal_replay = nullptr;
if (m_close_pending) {
destroy_journaler(0);
return;
}
start_append();
}
template <typename I>
void Journal<I>::create_journaler() {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << this << " " << __func__ << dendl;
ceph_assert(ceph_mutex_is_locked(m_lock));
ceph_assert(m_state == STATE_UNINITIALIZED || m_state == STATE_RESTARTING_REPLAY);
ceph_assert(m_journaler == NULL);
transition_state(STATE_INITIALIZING, 0);
::journal::Settings settings;
settings.commit_interval =
m_image_ctx.config.template get_val<double>("rbd_journal_commit_age");
settings.max_payload_bytes =
m_image_ctx.config.template get_val<Option::size_t>("rbd_journal_max_payload_bytes");
settings.max_concurrent_object_sets =
m_image_ctx.config.template get_val<uint64_t>("rbd_journal_max_concurrent_object_sets");
// TODO: a configurable filter to exclude certain peers from being
// disconnected.
settings.ignored_laggy_clients = {IMAGE_CLIENT_ID};
m_journaler = new Journaler(m_work_queue, m_timer, m_timer_lock,
m_image_ctx.md_ctx, m_image_ctx.id,
IMAGE_CLIENT_ID, settings, nullptr);
m_journaler->add_listener(&m_metadata_listener);
Context *ctx = create_async_context_callback(
m_image_ctx, create_context_callback<
Journal<I>, &Journal<I>::handle_open>(this));
auto open_req = journal::OpenRequest<I>::create(&m_image_ctx, m_journaler,
&m_lock, &m_client_meta,
&m_tag_tid, &m_tag_data, ctx);
open_req->send();
}
template <typename I>
void Journal<I>::destroy_journaler(int r) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << this << " " << __func__ << ": r=" << r << dendl;
ceph_assert(ceph_mutex_is_locked(m_lock));
delete m_journal_replay;
m_journal_replay = NULL;
m_journaler->remove_listener(&m_metadata_listener);
transition_state(STATE_CLOSING, r);
Context *ctx = create_async_context_callback(
m_image_ctx, create_context_callback<
Journal<I>, &Journal<I>::handle_journal_destroyed>(this));
ctx = new LambdaContext(
[this, ctx](int r) {
std::lock_guard locker{m_lock};
m_journaler->shut_down(ctx);
});
ctx = create_async_context_callback(m_image_ctx, ctx);
m_async_journal_op_tracker.wait_for_ops(ctx);
}
template <typename I>
void Journal<I>::recreate_journaler(int r) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << this << " " << __func__ << ": r=" << r << dendl;
ceph_assert(ceph_mutex_is_locked(m_lock));
ceph_assert(m_state == STATE_FLUSHING_RESTART ||
m_state == STATE_FLUSHING_REPLAY);
delete m_journal_replay;
m_journal_replay = NULL;
m_journaler->remove_listener(&m_metadata_listener);
transition_state(STATE_RESTARTING_REPLAY, r);
m_journaler->shut_down(create_async_context_callback(
m_image_ctx, create_context_callback<
Journal<I>, &Journal<I>::handle_journal_destroyed>(this)));
}
template <typename I>
void Journal<I>::complete_event(typename Events::iterator it, int r) {
ceph_assert(ceph_mutex_is_locked(m_event_lock));
ceph_assert(m_state == STATE_READY);
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << this << " " << __func__ << ": tid=" << it->first << " "
<< "r=" << r << dendl;
Event &event = it->second;
if (r < 0 && r == event.filter_ret_val) {
// ignore allowed error codes
r = 0;
}
if (r < 0) {
// event recorded to journal but failed to update disk, we cannot
// commit this IO event. this event must be replayed.
ceph_assert(event.safe);
lderr(cct) << this << " " << __func__ << ": "
<< "failed to commit IO to disk, replay required: "
<< cpp_strerror(r) << dendl;
}
event.committed_io = true;
if (event.safe) {
if (r >= 0) {
for (auto &future : event.futures) {
m_journaler->committed(future);
}
}
m_events.erase(it);
}
}
template <typename I>
void Journal<I>::start_append() {
ceph_assert(ceph_mutex_is_locked(m_lock));
m_journaler->start_append(
m_image_ctx.config.template get_val<uint64_t>("rbd_journal_object_max_in_flight_appends"));
if (!m_image_ctx.config.template get_val<bool>("rbd_journal_object_writethrough_until_flush")) {
m_journaler->set_append_batch_options(
m_image_ctx.config.template get_val<uint64_t>("rbd_journal_object_flush_interval"),
m_image_ctx.config.template get_val<Option::size_t>("rbd_journal_object_flush_bytes"),
m_image_ctx.config.template get_val<double>("rbd_journal_object_flush_age"));
}
transition_state(STATE_READY, 0);
}
template <typename I>
void Journal<I>::handle_open(int r) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << this << " " << __func__ << ": r=" << r << dendl;
std::lock_guard locker{m_lock};
ceph_assert(m_state == STATE_INITIALIZING);
if (r < 0) {
lderr(cct) << this << " " << __func__ << ": "
<< "failed to initialize journal: " << cpp_strerror(r)
<< dendl;
destroy_journaler(r);
return;
}
m_tag_class = m_client_meta.tag_class;
m_max_append_size = m_journaler->get_max_append_size();
ldout(cct, 20) << this << " " << __func__ << ": "
<< "tag_class=" << m_tag_class << ", "
<< "max_append_size=" << m_max_append_size << dendl;
transition_state(STATE_REPLAYING, 0);
m_journal_replay = journal::Replay<I>::create(m_image_ctx);
m_journaler->start_replay(&m_replay_handler);
}
template <typename I>
void Journal<I>::handle_replay_ready() {
CephContext *cct = m_image_ctx.cct;
ReplayEntry replay_entry;
{
std::lock_guard locker{m_lock};
if (m_state != STATE_REPLAYING) {
return;
}
ldout(cct, 20) << this << " " << __func__ << dendl;
if (!m_journaler->try_pop_front(&replay_entry)) {
return;
}
// only one entry should be in-flight at a time
ceph_assert(!m_processing_entry);
m_processing_entry = true;
}
m_async_journal_op_tracker.start_op();
bufferlist data = replay_entry.get_data();
auto it = data.cbegin();
journal::EventEntry event_entry;
int r = m_journal_replay->decode(&it, &event_entry);
if (r < 0) {
lderr(cct) << this << " " << __func__ << ": "
<< "failed to decode journal event entry" << dendl;
handle_replay_process_safe(replay_entry, r);
return;
}
Context *on_ready = create_context_callback<
Journal<I>, &Journal<I>::handle_replay_process_ready>(this);
Context *on_commit = new C_ReplayProcessSafe(this, std::move(replay_entry));
m_journal_replay->process(event_entry, on_ready, on_commit);
}
template <typename I>
void Journal<I>::handle_replay_complete(int r) {
CephContext *cct = m_image_ctx.cct;
bool cancel_ops = false;
{
std::lock_guard locker{m_lock};
if (m_state != STATE_REPLAYING) {
return;
}
ldout(cct, 20) << this << " " << __func__ << ": r=" << r << dendl;
if (r < 0) {
cancel_ops = true;
transition_state(STATE_FLUSHING_RESTART, r);
} else {
// state might change back to FLUSHING_RESTART on flush error
transition_state(STATE_FLUSHING_REPLAY, 0);
}
}
Context *ctx = new LambdaContext([this, cct](int r) {
ldout(cct, 20) << this << " handle_replay_complete: "
<< "handle shut down replay" << dendl;
State state;
{
std::lock_guard locker{m_lock};
ceph_assert(m_state == STATE_FLUSHING_RESTART ||
m_state == STATE_FLUSHING_REPLAY);
state = m_state;
}
if (state == STATE_FLUSHING_RESTART) {
handle_flushing_restart(0);
} else {
handle_flushing_replay();
}
});
ctx = new LambdaContext([this, ctx](int r) {
// ensure the commit position is flushed to disk
m_journaler->flush_commit_position(ctx);
});
ctx = create_async_context_callback(m_image_ctx, ctx);
ctx = new LambdaContext([this, ctx](int r) {
m_async_journal_op_tracker.wait_for_ops(ctx);
});
ctx = new LambdaContext([this, cct, cancel_ops, ctx](int r) {
ldout(cct, 20) << this << " handle_replay_complete: "
<< "shut down replay" << dendl;
m_journal_replay->shut_down(cancel_ops, ctx);
});
m_journaler->stop_replay(ctx);
}
template <typename I>
void Journal<I>::handle_replay_process_ready(int r) {
// journal::Replay is ready for more events -- attempt to pop another
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << this << " " << __func__ << dendl;
ceph_assert(r == 0);
{
std::lock_guard locker{m_lock};
ceph_assert(m_processing_entry);
m_processing_entry = false;
}
handle_replay_ready();
}
template <typename I>
void Journal<I>::handle_replay_process_safe(ReplayEntry replay_entry, int r) {
CephContext *cct = m_image_ctx.cct;
std::unique_lock locker{m_lock};
ceph_assert(m_state == STATE_REPLAYING ||
m_state == STATE_FLUSHING_RESTART ||
m_state == STATE_FLUSHING_REPLAY);
ldout(cct, 20) << this << " " << __func__ << ": r=" << r << dendl;
if (r < 0) {
if (r != -ECANCELED) {
lderr(cct) << this << " " << __func__ << ": "
<< "failed to commit journal event to disk: "
<< cpp_strerror(r) << dendl;
}
if (m_state == STATE_REPLAYING) {
// abort the replay if we have an error
transition_state(STATE_FLUSHING_RESTART, r);
locker.unlock();
// stop replay, shut down, and restart
Context* ctx = create_context_callback<
Journal<I>, &Journal<I>::handle_flushing_restart>(this);
ctx = new LambdaContext([this, ctx](int r) {
// ensure the commit position is flushed to disk
m_journaler->flush_commit_position(ctx);
});
ctx = new LambdaContext([this, cct, ctx](int r) {
ldout(cct, 20) << this << " handle_replay_process_safe: "
<< "shut down replay" << dendl;
{
std::lock_guard locker{m_lock};
ceph_assert(m_state == STATE_FLUSHING_RESTART);
}
m_journal_replay->shut_down(true, ctx);
});
m_journaler->stop_replay(ctx);
m_async_journal_op_tracker.finish_op();
return;
} else if (m_state == STATE_FLUSHING_REPLAY) {
// end-of-replay flush in-progress -- we need to restart replay
transition_state(STATE_FLUSHING_RESTART, r);
locker.unlock();
m_async_journal_op_tracker.finish_op();
return;
}
} else {
// only commit the entry if written successfully
m_journaler->committed(replay_entry);
}
locker.unlock();
m_async_journal_op_tracker.finish_op();
}
template <typename I>
void Journal<I>::handle_flushing_restart(int r) {
std::lock_guard locker{m_lock};
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << this << " " << __func__ << dendl;
ceph_assert(r == 0);
ceph_assert(m_state == STATE_FLUSHING_RESTART);
if (m_close_pending) {
destroy_journaler(r);
return;
}
recreate_journaler(r);
}
template <typename I>
void Journal<I>::handle_flushing_replay() {
std::lock_guard locker{m_lock};
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << this << " " << __func__ << dendl;
ceph_assert(m_state == STATE_FLUSHING_REPLAY ||
m_state == STATE_FLUSHING_RESTART);
if (m_close_pending) {
destroy_journaler(0);
return;
} else if (m_state == STATE_FLUSHING_RESTART) {
// failed to replay one-or-more events -- restart
recreate_journaler(0);
return;
}
delete m_journal_replay;
m_journal_replay = NULL;
m_error_result = 0;
start_append();
}
template <typename I>
void Journal<I>::handle_recording_stopped(int r) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << this << " " << __func__ << ": r=" << r << dendl;
std::lock_guard locker{m_lock};
ceph_assert(m_state == STATE_STOPPING);
destroy_journaler(r);
}
template <typename I>
void Journal<I>::handle_journal_destroyed(int r) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << this << " " << __func__ << ": r=" << r << dendl;
if (r < 0) {
lderr(cct) << this << " " << __func__
<< "error detected while closing journal: " << cpp_strerror(r)
<< dendl;
}
std::lock_guard locker{m_lock};
delete m_journaler;
m_journaler = nullptr;
ceph_assert(m_state == STATE_CLOSING || m_state == STATE_RESTARTING_REPLAY);
if (m_state == STATE_RESTARTING_REPLAY) {
create_journaler();
return;
}
transition_state(STATE_CLOSED, r);
}
template <typename I>
void Journal<I>::handle_io_event_safe(int r, uint64_t tid) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << this << " " << __func__ << ": r=" << r << ", "
<< "tid=" << tid << dendl;
// journal will be flushed before closing
ceph_assert(m_state == STATE_READY || m_state == STATE_STOPPING);
if (r < 0) {
lderr(cct) << this << " " << __func__ << ": "
<< "failed to commit IO event: " << cpp_strerror(r) << dendl;
}
Contexts on_safe_contexts;
{
std::lock_guard event_locker{m_event_lock};
typename Events::iterator it = m_events.find(tid);
ceph_assert(it != m_events.end());
Event &event = it->second;
on_safe_contexts.swap(event.on_safe_contexts);
if (r < 0 || event.committed_io) {
// failed journal write so IO won't be sent -- or IO extent was
// overwritten by future IO operations so this was a no-op IO event
event.ret_val = r;
for (auto &future : event.futures) {
m_journaler->committed(future);
}
}
if (event.committed_io) {
m_events.erase(it);
} else {
event.safe = true;
}
}
ldout(cct, 20) << this << " " << __func__ << ": "
<< "completing tid=" << tid << dendl;
// alert the cache about the journal event status
for (Contexts::iterator it = on_safe_contexts.begin();
it != on_safe_contexts.end(); ++it) {
(*it)->complete(r);
}
}
template <typename I>
void Journal<I>::handle_op_event_safe(int r, uint64_t tid,
const Future &op_start_future,
const Future &op_finish_future,
Context *on_safe) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << this << " " << __func__ << ": r=" << r << ", "
<< "tid=" << tid << dendl;
// journal will be flushed before closing
ceph_assert(m_state == STATE_READY || m_state == STATE_STOPPING);
if (r < 0) {
lderr(cct) << this << " " << __func__ << ": "
<< "failed to commit op event: " << cpp_strerror(r) << dendl;
}
m_journaler->committed(op_start_future);
m_journaler->committed(op_finish_future);
// reduce the replay window after committing an op event
m_journaler->flush_commit_position(on_safe);
}
template <typename I>
void Journal<I>::stop_recording() {
ceph_assert(ceph_mutex_is_locked(m_lock));
ceph_assert(m_journaler != NULL);
ceph_assert(m_state == STATE_READY);
transition_state(STATE_STOPPING, 0);
m_journaler->stop_append(util::create_async_context_callback(
m_image_ctx, create_context_callback<
Journal<I>, &Journal<I>::handle_recording_stopped>(this)));
}
template <typename I>
void Journal<I>::transition_state(State state, int r) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << this << " " << __func__ << ": new state=" << state << dendl;
ceph_assert(ceph_mutex_is_locked(m_lock));
m_state = state;
if (m_error_result == 0 && r < 0) {
m_error_result = r;
}
if (is_steady_state()) {
auto wait_for_state_contexts(std::move(m_wait_for_state_contexts));
m_wait_for_state_contexts.clear();
for (auto ctx : wait_for_state_contexts) {
ctx->complete(m_error_result);
}
}
}
template <typename I>
bool Journal<I>::is_steady_state() const {
ceph_assert(ceph_mutex_is_locked(m_lock));
switch (m_state) {
case STATE_READY:
case STATE_CLOSED:
return true;
case STATE_UNINITIALIZED:
case STATE_INITIALIZING:
case STATE_REPLAYING:
case STATE_FLUSHING_RESTART:
case STATE_RESTARTING_REPLAY:
case STATE_FLUSHING_REPLAY:
case STATE_STOPPING:
case STATE_CLOSING:
break;
}
return false;
}
template <typename I>
void Journal<I>::wait_for_steady_state(Context *on_state) {
ceph_assert(ceph_mutex_is_locked(m_lock));
ceph_assert(!is_steady_state());
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << this << " " << __func__ << ": on_state=" << on_state
<< dendl;
m_wait_for_state_contexts.push_back(on_state);
}
template <typename I>
int Journal<I>::is_resync_requested(bool *do_resync) {
std::lock_guard l{m_lock};
return check_resync_requested(do_resync);
}
template <typename I>
int Journal<I>::check_resync_requested(bool *do_resync) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << this << " " << __func__ << dendl;
ceph_assert(ceph_mutex_is_locked(m_lock));
ceph_assert(do_resync != nullptr);
cls::journal::Client client;
int r = m_journaler->get_cached_client(IMAGE_CLIENT_ID, &client);
if (r < 0) {
lderr(cct) << this << " " << __func__ << ": "
<< "failed to retrieve client: " << cpp_strerror(r) << dendl;
return r;
}
librbd::journal::ClientData client_data;
auto bl_it = client.data.cbegin();
try {
decode(client_data, bl_it);
} catch (const buffer::error &err) {
lderr(cct) << this << " " << __func__ << ": "
<< "failed to decode client data: " << err.what() << dendl;
return -EINVAL;
}
journal::ImageClientMeta *image_client_meta =
boost::get<journal::ImageClientMeta>(&client_data.client_meta);
if (image_client_meta == nullptr) {
lderr(cct) << this << " " << __func__ << ": "
<< "failed to access image client meta struct" << dendl;
return -EINVAL;
}
*do_resync = image_client_meta->resync_requested;
return 0;
}
struct C_RefreshTags : public Context {
AsyncOpTracker &async_op_tracker;
Context *on_finish = nullptr;
ceph::mutex lock =
ceph::make_mutex("librbd::Journal::C_RefreshTags::lock");
uint64_t tag_tid = 0;
journal::TagData tag_data;
explicit C_RefreshTags(AsyncOpTracker &async_op_tracker)
: async_op_tracker(async_op_tracker) {
async_op_tracker.start_op();
}
~C_RefreshTags() override {
async_op_tracker.finish_op();
}
void finish(int r) override {
on_finish->complete(r);
}
};
template <typename I>
void Journal<I>::handle_metadata_updated() {
CephContext *cct = m_image_ctx.cct;
std::lock_guard locker{m_lock};
if (m_state != STATE_READY && !is_journal_replaying(m_lock)) {
return;
} else if (is_tag_owner(m_lock)) {
ldout(cct, 20) << this << " " << __func__ << ": primary image" << dendl;
return;
} else if (m_listeners.empty()) {
ldout(cct, 20) << this << " " << __func__ << ": no listeners" << dendl;
return;
}
uint64_t refresh_sequence = ++m_refresh_sequence;
ldout(cct, 20) << this << " " << __func__ << ": "
<< "refresh_sequence=" << refresh_sequence << dendl;
// pull the most recent tags from the journal, decode, and
// update the internal tag state
C_RefreshTags *refresh_ctx = new C_RefreshTags(m_async_journal_op_tracker);
refresh_ctx->on_finish = new LambdaContext(
[this, refresh_sequence, refresh_ctx](int r) {
handle_refresh_metadata(refresh_sequence, refresh_ctx->tag_tid,
refresh_ctx->tag_data, r);
});
C_DecodeTags *decode_tags_ctx = new C_DecodeTags(
cct, &refresh_ctx->lock, &refresh_ctx->tag_tid,
&refresh_ctx->tag_data, refresh_ctx);
m_journaler->get_tags(m_tag_tid == 0 ? 0 : m_tag_tid - 1, m_tag_class,
&decode_tags_ctx->tags, decode_tags_ctx);
}
template <typename I>
void Journal<I>::handle_refresh_metadata(uint64_t refresh_sequence,
uint64_t tag_tid,
journal::TagData tag_data, int r) {
CephContext *cct = m_image_ctx.cct;
std::unique_lock locker{m_lock};
if (r < 0) {
lderr(cct) << this << " " << __func__ << ": failed to refresh metadata: "
<< cpp_strerror(r) << dendl;
return;
} else if (m_state != STATE_READY && !is_journal_replaying(m_lock)) {
return;
} else if (refresh_sequence != m_refresh_sequence) {
// another, more up-to-date refresh is in-flight
return;
}
ldout(cct, 20) << this << " " << __func__ << ": "
<< "refresh_sequence=" << refresh_sequence << ", "
<< "tag_tid=" << tag_tid << ", "
<< "tag_data=" << tag_data << dendl;
m_listener_cond.wait(locker, [this] { return !m_listener_notify; });
bool was_tag_owner = is_tag_owner(m_lock);
if (m_tag_tid < tag_tid) {
m_tag_tid = tag_tid;
m_tag_data = tag_data;
}
bool promoted_to_primary = (!was_tag_owner && is_tag_owner(m_lock));
bool resync_requested = false;
r = check_resync_requested(&resync_requested);
if (r < 0) {
lderr(cct) << this << " " << __func__ << ": "
<< "failed to check if a resync was requested" << dendl;
return;
}
Listeners listeners(m_listeners);
m_listener_notify = true;
locker.unlock();
if (promoted_to_primary) {
for (auto listener : listeners) {
listener->handle_promoted();
}
} else if (resync_requested) {
for (auto listener : listeners) {
listener->handle_resync();
}
}
locker.lock();
m_listener_notify = false;
m_listener_cond.notify_all();
}
template <typename I>
void Journal<I>::add_listener(journal::Listener *listener) {
std::lock_guard locker{m_lock};
m_listeners.insert(listener);
}
template <typename I>
void Journal<I>::remove_listener(journal::Listener *listener) {
std::unique_lock locker{m_lock};
m_listener_cond.wait(locker, [this] { return !m_listener_notify; });
m_listeners.erase(listener);
}
} // namespace librbd
#ifndef TEST_F
template class librbd::Journal<librbd::ImageCtx>;
#endif
| 57,986 | 30.125604 | 98 |
cc
|
null |
ceph-main/src/librbd/Journal.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_JOURNAL_H
#define CEPH_LIBRBD_JOURNAL_H
#include "include/int_types.h"
#include "include/Context.h"
#include "include/interval_set.h"
#include "include/rados/librados_fwd.hpp"
#include "common/AsyncOpTracker.h"
#include "common/Cond.h"
#include "common/Timer.h"
#include "common/RefCountedObj.h"
#include "journal/Future.h"
#include "journal/JournalMetadataListener.h"
#include "journal/ReplayEntry.h"
#include "journal/ReplayHandler.h"
#include "librbd/Utils.h"
#include "librbd/asio/ContextWQ.h"
#include "librbd/journal/Types.h"
#include "librbd/journal/TypeTraits.h"
#include <algorithm>
#include <list>
#include <string>
#include <atomic>
#include <unordered_map>
class ContextWQ;
namespace journal { class Journaler; }
namespace librbd {
class ImageCtx;
namespace journal { template <typename> class Replay; }
template <typename ImageCtxT = ImageCtx>
class Journal : public RefCountedObject {
public:
/**
* @verbatim
*
* <start>
* |
* v
* UNINITIALIZED ---> INITIALIZING ---> REPLAYING ------> FLUSHING ---> READY
* | * . ^ * . * |
* | * . | * . * |
* | * . | (error) * . . . . . . . * |
* | * . | * . * |
* | * . | v . * |
* | * . | FLUSHING_RESTART . * |
* | * . | | . * |
* | * . | | . * |
* | * . | v . * v
* | * . | RESTARTING < * * * * * STOPPING
* | * . | | . |
* | * . | | . |
* | * * * * * * . \-------------/ . |
* | * (error) . . |
* | * . . . . . . . . . . . . . . . . |
* | * . . |
* | v v v |
* | CLOSED <----- CLOSING <---------------------------------------/
* | |
* | v
* \---> <finish>
*
* @endverbatim
*/
enum State {
STATE_UNINITIALIZED,
STATE_INITIALIZING,
STATE_REPLAYING,
STATE_FLUSHING_RESTART,
STATE_RESTARTING_REPLAY,
STATE_FLUSHING_REPLAY,
STATE_READY,
STATE_STOPPING,
STATE_CLOSING,
STATE_CLOSED
};
static const std::string IMAGE_CLIENT_ID;
static const std::string LOCAL_MIRROR_UUID;
static const std::string ORPHAN_MIRROR_UUID;
Journal(ImageCtxT &image_ctx);
~Journal();
static void get_work_queue(CephContext *cct, ContextWQ **work_queue);
static bool is_journal_supported(ImageCtxT &image_ctx);
static int create(librados::IoCtx &io_ctx, const std::string &image_id,
uint8_t order, uint8_t splay_width,
const std::string &object_pool);
static int remove(librados::IoCtx &io_ctx, const std::string &image_id);
static int reset(librados::IoCtx &io_ctx, const std::string &image_id);
static void is_tag_owner(ImageCtxT *image_ctx, bool *is_tag_owner,
Context *on_finish);
static void is_tag_owner(librados::IoCtx& io_ctx, std::string& image_id,
bool *is_tag_owner, asio::ContextWQ *op_work_queue,
Context *on_finish);
static void get_tag_owner(librados::IoCtx& io_ctx, std::string& image_id,
std::string *mirror_uuid,
asio::ContextWQ *op_work_queue, Context *on_finish);
static int request_resync(ImageCtxT *image_ctx);
static void promote(ImageCtxT *image_ctx, Context *on_finish);
static void demote(ImageCtxT *image_ctx, Context *on_finish);
bool is_journal_ready() const;
bool is_journal_replaying() const;
bool is_journal_appending() const;
void wait_for_journal_ready(Context *on_ready);
void open(Context *on_finish);
void close(Context *on_finish);
bool is_tag_owner() const;
uint64_t get_tag_tid() const;
journal::TagData get_tag_data() const;
void allocate_local_tag(Context *on_finish);
void allocate_tag(const std::string &mirror_uuid,
const journal::TagPredecessor &predecessor,
Context *on_finish);
void flush_commit_position(Context *on_finish);
void user_flushed();
uint64_t append_write_event(uint64_t offset, size_t length,
const bufferlist &bl,
bool flush_entry);
uint64_t append_compare_and_write_event(uint64_t offset,
size_t length,
const bufferlist &cmp_bl,
const bufferlist &write_bl,
bool flush_entry);
uint64_t append_io_event(journal::EventEntry &&event_entry,
uint64_t offset, size_t length,
bool flush_entry, int filter_ret_val);
void commit_io_event(uint64_t tid, int r);
void commit_io_event_extent(uint64_t tid, uint64_t offset, uint64_t length,
int r);
void append_op_event(uint64_t op_tid, journal::EventEntry &&event_entry,
Context *on_safe);
void commit_op_event(uint64_t tid, int r, Context *on_safe);
void replay_op_ready(uint64_t op_tid, Context *on_resume);
void flush_event(uint64_t tid, Context *on_safe);
void wait_event(uint64_t tid, Context *on_safe);
uint64_t allocate_op_tid() {
uint64_t op_tid = ++m_op_tid;
ceph_assert(op_tid != 0);
return op_tid;
}
void start_external_replay(journal::Replay<ImageCtxT> **journal_replay,
Context *on_start);
void stop_external_replay();
void add_listener(journal::Listener *listener);
void remove_listener(journal::Listener *listener);
int is_resync_requested(bool *do_resync);
inline ContextWQ *get_work_queue() {
return m_work_queue;
}
private:
ImageCtxT &m_image_ctx;
// mock unit testing support
typedef journal::TypeTraits<ImageCtxT> TypeTraits;
typedef typename TypeTraits::Journaler Journaler;
typedef typename TypeTraits::Future Future;
typedef typename TypeTraits::ReplayEntry ReplayEntry;
typedef std::list<bufferlist> Bufferlists;
typedef std::list<Context *> Contexts;
typedef std::list<Future> Futures;
typedef interval_set<uint64_t> ExtentInterval;
struct Event {
Futures futures;
Contexts on_safe_contexts;
ExtentInterval pending_extents;
int filter_ret_val = 0;
bool committed_io = false;
bool safe = false;
int ret_val = 0;
Event() {
}
Event(const Futures &_futures, uint64_t offset, size_t length,
int filter_ret_val)
: futures(_futures), filter_ret_val(filter_ret_val) {
if (length > 0) {
pending_extents.insert(offset, length);
}
}
};
typedef std::unordered_map<uint64_t, Event> Events;
typedef std::unordered_map<uint64_t, Future> TidToFutures;
struct C_IOEventSafe : public Context {
Journal *journal;
uint64_t tid;
C_IOEventSafe(Journal *_journal, uint64_t _tid)
: journal(_journal), tid(_tid) {
}
void finish(int r) override {
journal->handle_io_event_safe(r, tid);
}
};
struct C_OpEventSafe : public Context {
Journal *journal;
uint64_t tid;
Future op_start_future;
Future op_finish_future;
Context *on_safe;
C_OpEventSafe(Journal *journal, uint64_t tid, const Future &op_start_future,
const Future &op_finish_future, Context *on_safe)
: journal(journal), tid(tid), op_start_future(op_start_future),
op_finish_future(op_finish_future), on_safe(on_safe) {
}
void finish(int r) override {
journal->handle_op_event_safe(r, tid, op_start_future, op_finish_future,
on_safe);
}
};
struct C_ReplayProcessSafe : public Context {
Journal *journal;
ReplayEntry replay_entry;
C_ReplayProcessSafe(Journal *journal, ReplayEntry &&replay_entry) :
journal(journal), replay_entry(std::move(replay_entry)) {
}
void finish(int r) override {
journal->handle_replay_process_safe(replay_entry, r);
}
};
struct ReplayHandler : public ::journal::ReplayHandler {
Journal *journal;
ReplayHandler(Journal *_journal) : journal(_journal) {
}
void handle_entries_available() override {
journal->handle_replay_ready();
}
void handle_complete(int r) override {
journal->handle_replay_complete(r);
}
};
ContextWQ *m_work_queue = nullptr;
SafeTimer *m_timer = nullptr;
ceph::mutex *m_timer_lock = nullptr;
Journaler *m_journaler;
mutable ceph::mutex m_lock = ceph::make_mutex("Journal<I>::m_lock");
State m_state;
uint64_t m_max_append_size = 0;
uint64_t m_tag_class = 0;
uint64_t m_tag_tid = 0;
journal::ImageClientMeta m_client_meta;
journal::TagData m_tag_data;
int m_error_result;
Contexts m_wait_for_state_contexts;
ReplayHandler m_replay_handler;
bool m_close_pending;
ceph::mutex m_event_lock = ceph::make_mutex("Journal<I>::m_event_lock");
uint64_t m_event_tid;
Events m_events;
std::atomic<bool> m_user_flushed = false;
std::atomic<uint64_t> m_op_tid = { 0 };
TidToFutures m_op_futures;
bool m_processing_entry = false;
bool m_blocking_writes;
journal::Replay<ImageCtxT> *m_journal_replay;
AsyncOpTracker m_async_journal_op_tracker;
struct MetadataListener : public ::journal::JournalMetadataListener {
Journal<ImageCtxT> *journal;
MetadataListener(Journal<ImageCtxT> *journal) : journal(journal) { }
void handle_update(::journal::JournalMetadata *) override;
} m_metadata_listener;
typedef std::set<journal::Listener *> Listeners;
Listeners m_listeners;
ceph::condition_variable m_listener_cond;
bool m_listener_notify = false;
uint64_t m_refresh_sequence = 0;
bool is_journal_replaying(const ceph::mutex &) const;
bool is_tag_owner(const ceph::mutex &) const;
uint64_t append_io_events(journal::EventType event_type,
const Bufferlists &bufferlists,
uint64_t offset, size_t length, bool flush_entry,
int filter_ret_val);
Future wait_event(ceph::mutex &lock, uint64_t tid, Context *on_safe);
void create_journaler();
void destroy_journaler(int r);
void recreate_journaler(int r);
void complete_event(typename Events::iterator it, int r);
void start_append();
void handle_open(int r);
void handle_replay_ready();
void handle_replay_complete(int r);
void handle_replay_process_ready(int r);
void handle_replay_process_safe(ReplayEntry replay_entry, int r);
void handle_start_external_replay(int r,
journal::Replay<ImageCtxT> **journal_replay,
Context *on_finish);
void handle_flushing_restart(int r);
void handle_flushing_replay();
void handle_recording_stopped(int r);
void handle_journal_destroyed(int r);
void handle_io_event_safe(int r, uint64_t tid);
void handle_op_event_safe(int r, uint64_t tid, const Future &op_start_future,
const Future &op_finish_future, Context *on_safe);
void stop_recording();
void transition_state(State state, int r);
bool is_steady_state() const;
void wait_for_steady_state(Context *on_state);
int check_resync_requested(bool *do_resync);
void handle_metadata_updated();
void handle_refresh_metadata(uint64_t refresh_sequence, uint64_t tag_tid,
journal::TagData tag_data, int r);
};
} // namespace librbd
extern template class librbd::Journal<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_JOURNAL_H
| 12,384 | 31.506562 | 80 |
h
|
null |
ceph-main/src/librbd/LibrbdAdminSocketHook.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "common/errno.h"
#include "librbd/ImageCtx.h"
#include "librbd/LibrbdAdminSocketHook.h"
#include "librbd/internal.h"
#include "librbd/api/Io.h"
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbdadminsocket: "
namespace librbd {
class LibrbdAdminSocketCommand {
public:
virtual ~LibrbdAdminSocketCommand() {}
virtual int call(Formatter *f) = 0;
};
class FlushCacheCommand : public LibrbdAdminSocketCommand {
public:
explicit FlushCacheCommand(ImageCtx *ictx) : ictx(ictx) {}
int call(Formatter *f) override {
return api::Io<>::flush(*ictx);
}
private:
ImageCtx *ictx;
};
struct InvalidateCacheCommand : public LibrbdAdminSocketCommand {
public:
explicit InvalidateCacheCommand(ImageCtx *ictx) : ictx(ictx) {}
int call(Formatter *f) override {
return invalidate_cache(ictx);
}
private:
ImageCtx *ictx;
};
LibrbdAdminSocketHook::LibrbdAdminSocketHook(ImageCtx *ictx) :
admin_socket(ictx->cct->get_admin_socket()) {
std::string command;
std::string imagename;
int r;
imagename = ictx->md_ctx.get_pool_name() + "/" + ictx->name;
command = "rbd cache flush " + imagename;
r = admin_socket->register_command(command, this,
"flush rbd image " + imagename +
" cache");
if (r == 0) {
commands[command] = new FlushCacheCommand(ictx);
}
command = "rbd cache invalidate " + imagename;
r = admin_socket->register_command(command, this,
"invalidate rbd image " + imagename +
" cache");
if (r == 0) {
commands[command] = new InvalidateCacheCommand(ictx);
}
}
LibrbdAdminSocketHook::~LibrbdAdminSocketHook() {
(void)admin_socket->unregister_commands(this);
for (Commands::const_iterator i = commands.begin(); i != commands.end();
++i) {
delete i->second;
}
}
int LibrbdAdminSocketHook::call(std::string_view command,
const cmdmap_t& cmdmap,
const bufferlist&,
Formatter *f,
std::ostream& errss,
bufferlist& out) {
Commands::const_iterator i = commands.find(command);
ceph_assert(i != commands.end());
return i->second->call(f);
}
} // namespace librbd
| 2,245 | 23.150538 | 74 |
cc
|
null |
ceph-main/src/librbd/LibrbdAdminSocketHook.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_LIBRBDADMINSOCKETHOOK_H
#define CEPH_LIBRBD_LIBRBDADMINSOCKETHOOK_H
#include <map>
#include "common/admin_socket.h"
namespace librbd {
struct ImageCtx;
class LibrbdAdminSocketCommand;
class LibrbdAdminSocketHook : public AdminSocketHook {
public:
LibrbdAdminSocketHook(ImageCtx *ictx);
~LibrbdAdminSocketHook() override;
int call(std::string_view command, const cmdmap_t& cmdmap,
const bufferlist&,
Formatter *f,
std::ostream& errss,
bufferlist& out) override;
private:
typedef std::map<std::string,LibrbdAdminSocketCommand*,
std::less<>> Commands;
AdminSocket *admin_socket;
Commands commands;
};
}
#endif
| 801 | 21.277778 | 70 |
h
|
null |
ceph-main/src/librbd/ManagedLock.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/ManagedLock.h"
#include "librbd/AsioEngine.h"
#include "librbd/ImageCtx.h"
#include "librbd/Watcher.h"
#include "librbd/asio/ContextWQ.h"
#include "librbd/managed_lock/AcquireRequest.h"
#include "librbd/managed_lock/BreakRequest.h"
#include "librbd/managed_lock/GetLockerRequest.h"
#include "librbd/managed_lock/ReleaseRequest.h"
#include "librbd/managed_lock/ReacquireRequest.h"
#include "librbd/managed_lock/Types.h"
#include "librbd/managed_lock/Utils.h"
#include "cls/lock/cls_lock_client.h"
#include "common/dout.h"
#include "common/errno.h"
#include "common/Cond.h"
#include "librbd/Utils.h"
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::ManagedLock: " << this << " " \
<< __func__ << ": "
namespace librbd {
using std::string;
using namespace managed_lock;
namespace {
template <typename R>
struct C_SendLockRequest : public Context {
R* request;
explicit C_SendLockRequest(R* request) : request(request) {
}
void finish(int r) override {
request->send();
}
};
struct C_Tracked : public Context {
AsyncOpTracker &tracker;
Context *ctx;
C_Tracked(AsyncOpTracker &tracker, Context *ctx)
: tracker(tracker), ctx(ctx) {
tracker.start_op();
}
~C_Tracked() override {
tracker.finish_op();
}
void finish(int r) override {
ctx->complete(r);
}
};
} // anonymous namespace
using librbd::util::create_context_callback;
using librbd::util::unique_lock_name;
using managed_lock::util::decode_lock_cookie;
using managed_lock::util::encode_lock_cookie;
template <typename I>
ManagedLock<I>::ManagedLock(librados::IoCtx &ioctx, AsioEngine& asio_engine,
const string& oid, Watcher *watcher, Mode mode,
bool blocklist_on_break_lock,
uint32_t blocklist_expire_seconds)
: m_lock(ceph::make_mutex(unique_lock_name("librbd::ManagedLock<I>::m_lock", this))),
m_ioctx(ioctx), m_cct(reinterpret_cast<CephContext *>(ioctx.cct())),
m_asio_engine(asio_engine),
m_work_queue(asio_engine.get_work_queue()),
m_oid(oid),
m_watcher(watcher),
m_mode(mode),
m_blocklist_on_break_lock(blocklist_on_break_lock),
m_blocklist_expire_seconds(blocklist_expire_seconds),
m_state(STATE_UNLOCKED) {
}
template <typename I>
ManagedLock<I>::~ManagedLock() {
std::lock_guard locker{m_lock};
ceph_assert(m_state == STATE_SHUTDOWN || m_state == STATE_UNLOCKED ||
m_state == STATE_UNINITIALIZED);
if (m_state == STATE_UNINITIALIZED) {
// never initialized -- ensure any in-flight ops are complete
// since we wouldn't expect shut_down to be invoked
C_SaferCond ctx;
m_async_op_tracker.wait_for_ops(&ctx);
ctx.wait();
}
ceph_assert(m_async_op_tracker.empty());
}
template <typename I>
bool ManagedLock<I>::is_lock_owner() const {
std::lock_guard locker{m_lock};
return is_lock_owner(m_lock);
}
template <typename I>
bool ManagedLock<I>::is_lock_owner(ceph::mutex &lock) const {
ceph_assert(ceph_mutex_is_locked(m_lock));
bool lock_owner;
switch (m_state) {
case STATE_LOCKED:
case STATE_REACQUIRING:
case STATE_PRE_SHUTTING_DOWN:
case STATE_POST_ACQUIRING:
case STATE_PRE_RELEASING:
lock_owner = true;
break;
default:
lock_owner = false;
break;
}
ldout(m_cct, 20) << lock_owner << dendl;
return lock_owner;
}
template <typename I>
void ManagedLock<I>::shut_down(Context *on_shut_down) {
ldout(m_cct, 10) << dendl;
std::lock_guard locker{m_lock};
ceph_assert(!is_state_shutdown());
if (m_state == STATE_WAITING_FOR_REGISTER) {
// abort stalled acquire lock state
ldout(m_cct, 10) << "woke up waiting (re)acquire" << dendl;
Action active_action = get_active_action();
ceph_assert(active_action == ACTION_TRY_LOCK ||
active_action == ACTION_ACQUIRE_LOCK);
complete_active_action(STATE_UNLOCKED, -ERESTART);
}
execute_action(ACTION_SHUT_DOWN, on_shut_down);
}
template <typename I>
void ManagedLock<I>::acquire_lock(Context *on_acquired) {
int r = 0;
{
std::lock_guard locker{m_lock};
if (is_state_shutdown()) {
r = -ERESTART;
} else if (m_state != STATE_LOCKED || !m_actions_contexts.empty()) {
ldout(m_cct, 10) << dendl;
execute_action(ACTION_ACQUIRE_LOCK, on_acquired);
return;
}
}
if (on_acquired != nullptr) {
on_acquired->complete(r);
}
}
template <typename I>
void ManagedLock<I>::try_acquire_lock(Context *on_acquired) {
int r = 0;
{
std::lock_guard locker{m_lock};
if (is_state_shutdown()) {
r = -ERESTART;
} else if (m_state != STATE_LOCKED || !m_actions_contexts.empty()) {
ldout(m_cct, 10) << dendl;
execute_action(ACTION_TRY_LOCK, on_acquired);
return;
}
}
if (on_acquired != nullptr) {
on_acquired->complete(r);
}
}
template <typename I>
void ManagedLock<I>::release_lock(Context *on_released) {
int r = 0;
{
std::lock_guard locker{m_lock};
if (is_state_shutdown()) {
r = -ERESTART;
} else if (m_state != STATE_UNLOCKED || !m_actions_contexts.empty()) {
ldout(m_cct, 10) << dendl;
execute_action(ACTION_RELEASE_LOCK, on_released);
return;
}
}
if (on_released != nullptr) {
on_released->complete(r);
}
}
template <typename I>
void ManagedLock<I>::reacquire_lock(Context *on_reacquired) {
{
std::lock_guard locker{m_lock};
if (m_state == STATE_WAITING_FOR_REGISTER) {
// restart the acquire lock process now that watch is valid
ldout(m_cct, 10) << "woke up waiting (re)acquire" << dendl;
Action active_action = get_active_action();
ceph_assert(active_action == ACTION_TRY_LOCK ||
active_action == ACTION_ACQUIRE_LOCK);
execute_next_action();
} else if (!is_state_shutdown() &&
(m_state == STATE_LOCKED ||
m_state == STATE_ACQUIRING ||
m_state == STATE_POST_ACQUIRING ||
m_state == STATE_WAITING_FOR_LOCK)) {
// interlock the lock operation with other state ops
ldout(m_cct, 10) << dendl;
execute_action(ACTION_REACQUIRE_LOCK, on_reacquired);
return;
}
}
// ignore request if shutdown or not in a locked-related state
if (on_reacquired != nullptr) {
on_reacquired->complete(0);
}
}
template <typename I>
void ManagedLock<I>::get_locker(managed_lock::Locker *locker,
Context *on_finish) {
ldout(m_cct, 10) << dendl;
int r;
{
std::lock_guard l{m_lock};
if (is_state_shutdown()) {
r = -ERESTART;
} else {
on_finish = new C_Tracked(m_async_op_tracker, on_finish);
auto req = managed_lock::GetLockerRequest<I>::create(
m_ioctx, m_oid, m_mode == EXCLUSIVE, locker, on_finish);
req->send();
return;
}
}
on_finish->complete(r);
}
template <typename I>
void ManagedLock<I>::break_lock(const managed_lock::Locker &locker,
bool force_break_lock, Context *on_finish) {
ldout(m_cct, 10) << dendl;
int r;
{
std::lock_guard l{m_lock};
if (is_state_shutdown()) {
r = -ERESTART;
} else if (is_lock_owner(m_lock)) {
r = -EBUSY;
} else {
on_finish = new C_Tracked(m_async_op_tracker, on_finish);
auto req = managed_lock::BreakRequest<I>::create(
m_ioctx, m_asio_engine, m_oid, locker, m_mode == EXCLUSIVE,
m_blocklist_on_break_lock, m_blocklist_expire_seconds, force_break_lock,
on_finish);
req->send();
return;
}
}
on_finish->complete(r);
}
template <typename I>
int ManagedLock<I>::assert_header_locked() {
ldout(m_cct, 10) << dendl;
librados::ObjectReadOperation op;
{
std::lock_guard locker{m_lock};
rados::cls::lock::assert_locked(&op, RBD_LOCK_NAME,
(m_mode == EXCLUSIVE ? ClsLockType::EXCLUSIVE :
ClsLockType::SHARED),
m_cookie,
managed_lock::util::get_watcher_lock_tag());
}
int r = m_ioctx.operate(m_oid, &op, nullptr);
if (r < 0) {
if (r == -EBLOCKLISTED) {
ldout(m_cct, 5) << "client is not lock owner -- client blocklisted"
<< dendl;
} else if (r == -ENOENT) {
ldout(m_cct, 5) << "client is not lock owner -- no lock detected"
<< dendl;
} else if (r == -EBUSY) {
ldout(m_cct, 5) << "client is not lock owner -- owned by different client"
<< dendl;
} else {
lderr(m_cct) << "failed to verify lock ownership: " << cpp_strerror(r)
<< dendl;
}
return r;
}
return 0;
}
template <typename I>
void ManagedLock<I>::shutdown_handler(int r, Context *on_finish) {
on_finish->complete(r);
}
template <typename I>
void ManagedLock<I>::pre_acquire_lock_handler(Context *on_finish) {
on_finish->complete(0);
}
template <typename I>
void ManagedLock<I>::post_acquire_lock_handler(int r, Context *on_finish) {
on_finish->complete(r);
}
template <typename I>
void ManagedLock<I>::pre_release_lock_handler(bool shutting_down,
Context *on_finish) {
on_finish->complete(0);
}
template <typename I>
void ManagedLock<I>::post_release_lock_handler(bool shutting_down, int r,
Context *on_finish) {
on_finish->complete(r);
}
template <typename I>
void ManagedLock<I>::post_reacquire_lock_handler(int r, Context *on_finish) {
on_finish->complete(r);
}
template <typename I>
bool ManagedLock<I>::is_transition_state() const {
switch (m_state) {
case STATE_ACQUIRING:
case STATE_WAITING_FOR_REGISTER:
case STATE_REACQUIRING:
case STATE_RELEASING:
case STATE_PRE_SHUTTING_DOWN:
case STATE_SHUTTING_DOWN:
case STATE_INITIALIZING:
case STATE_WAITING_FOR_LOCK:
case STATE_POST_ACQUIRING:
case STATE_PRE_RELEASING:
return true;
case STATE_UNLOCKED:
case STATE_LOCKED:
case STATE_SHUTDOWN:
case STATE_UNINITIALIZED:
break;
}
return false;
}
template <typename I>
void ManagedLock<I>::append_context(Action action, Context *ctx) {
ceph_assert(ceph_mutex_is_locked(m_lock));
for (auto &action_ctxs : m_actions_contexts) {
if (action == action_ctxs.first) {
if (ctx != nullptr) {
action_ctxs.second.push_back(ctx);
}
return;
}
}
Contexts contexts;
if (ctx != nullptr) {
contexts.push_back(ctx);
}
m_actions_contexts.push_back({action, std::move(contexts)});
}
template <typename I>
void ManagedLock<I>::execute_action(Action action, Context *ctx) {
ceph_assert(ceph_mutex_is_locked(m_lock));
append_context(action, ctx);
if (!is_transition_state()) {
execute_next_action();
}
}
template <typename I>
void ManagedLock<I>::execute_next_action() {
ceph_assert(ceph_mutex_is_locked(m_lock));
ceph_assert(!m_actions_contexts.empty());
switch (get_active_action()) {
case ACTION_ACQUIRE_LOCK:
case ACTION_TRY_LOCK:
send_acquire_lock();
break;
case ACTION_REACQUIRE_LOCK:
send_reacquire_lock();
break;
case ACTION_RELEASE_LOCK:
send_release_lock();
break;
case ACTION_SHUT_DOWN:
send_shutdown();
break;
default:
ceph_abort();
break;
}
}
template <typename I>
typename ManagedLock<I>::Action ManagedLock<I>::get_active_action() const {
ceph_assert(ceph_mutex_is_locked(m_lock));
ceph_assert(!m_actions_contexts.empty());
return m_actions_contexts.front().first;
}
template <typename I>
void ManagedLock<I>::complete_active_action(State next_state, int r) {
ceph_assert(ceph_mutex_is_locked(m_lock));
ceph_assert(!m_actions_contexts.empty());
ActionContexts action_contexts(std::move(m_actions_contexts.front()));
m_actions_contexts.pop_front();
m_state = next_state;
m_lock.unlock();
for (auto ctx : action_contexts.second) {
ctx->complete(r);
}
m_lock.lock();
if (!is_transition_state() && !m_actions_contexts.empty()) {
execute_next_action();
}
}
template <typename I>
bool ManagedLock<I>::is_state_shutdown() const {
ceph_assert(ceph_mutex_is_locked(m_lock));
switch (m_state) {
case STATE_PRE_SHUTTING_DOWN:
case STATE_SHUTTING_DOWN:
case STATE_SHUTDOWN:
return true;
default:
break;
}
return (!m_actions_contexts.empty() &&
m_actions_contexts.back().first == ACTION_SHUT_DOWN);
}
template <typename I>
void ManagedLock<I>::send_acquire_lock() {
ceph_assert(ceph_mutex_is_locked(m_lock));
if (m_state == STATE_LOCKED) {
complete_active_action(STATE_LOCKED, 0);
return;
}
ldout(m_cct, 10) << dendl;
uint64_t watch_handle = m_watcher->get_watch_handle();
if (watch_handle == 0) {
if (m_watcher->is_blocklisted()) {
lderr(m_cct) << "watcher not registered - client blocklisted" << dendl;
complete_active_action(STATE_UNLOCKED, -EBLOCKLISTED);
} else {
lderr(m_cct) << "watcher not registered - delaying request" << dendl;
m_state = STATE_WAITING_FOR_REGISTER;
// shut down might race w/ release/re-acquire of the lock
if (is_state_shutdown()) {
complete_active_action(STATE_UNLOCKED, -ERESTART);
}
}
return;
}
m_state = STATE_ACQUIRING;
m_cookie = encode_lock_cookie(watch_handle);
m_work_queue->queue(new LambdaContext([this](int r) {
pre_acquire_lock_handler(create_context_callback<
ManagedLock<I>, &ManagedLock<I>::handle_pre_acquire_lock>(this));
}));
}
template <typename I>
void ManagedLock<I>::handle_pre_acquire_lock(int r) {
ldout(m_cct, 10) << "r=" << r << dendl;
if (r < 0) {
handle_acquire_lock(r);
return;
}
using managed_lock::AcquireRequest;
AcquireRequest<I>* req = AcquireRequest<I>::create(
m_ioctx, m_watcher, m_asio_engine, m_oid, m_cookie, m_mode == EXCLUSIVE,
m_blocklist_on_break_lock, m_blocklist_expire_seconds,
create_context_callback<
ManagedLock<I>, &ManagedLock<I>::handle_acquire_lock>(this));
m_work_queue->queue(new C_SendLockRequest<AcquireRequest<I>>(req), 0);
}
template <typename I>
void ManagedLock<I>::handle_acquire_lock(int r) {
ldout(m_cct, 10) << "r=" << r << dendl;
if (r == -EBUSY || r == -EAGAIN || r == -EROFS) {
ldout(m_cct, 5) << "unable to acquire exclusive lock" << dendl;
} else if (r < 0) {
lderr(m_cct) << "failed to acquire exclusive lock: " << cpp_strerror(r)
<< dendl;
} else {
ldout(m_cct, 5) << "successfully acquired exclusive lock" << dendl;
}
m_post_next_state = (r < 0 ? STATE_UNLOCKED : STATE_LOCKED);
m_work_queue->queue(new LambdaContext([this, r](int ret) {
post_acquire_lock_handler(r, create_context_callback<
ManagedLock<I>, &ManagedLock<I>::handle_post_acquire_lock>(this));
}));
}
template <typename I>
void ManagedLock<I>::handle_post_acquire_lock(int r) {
ldout(m_cct, 10) << "r=" << r << dendl;
std::lock_guard locker{m_lock};
if (r < 0 && m_post_next_state == STATE_LOCKED) {
// release_lock without calling pre and post handlers
revert_to_unlock_state(r);
} else if (r != -ECANCELED) {
// fail the lock request
complete_active_action(m_post_next_state, r);
}
}
template <typename I>
void ManagedLock<I>::revert_to_unlock_state(int r) {
ldout(m_cct, 10) << "r=" << r << dendl;
using managed_lock::ReleaseRequest;
ReleaseRequest<I>* req = ReleaseRequest<I>::create(m_ioctx, m_watcher,
m_work_queue, m_oid, m_cookie,
new LambdaContext([this, r](int ret) {
std::lock_guard locker{m_lock};
ceph_assert(ret == 0);
complete_active_action(STATE_UNLOCKED, r);
}));
m_work_queue->queue(new C_SendLockRequest<ReleaseRequest<I>>(req));
}
template <typename I>
void ManagedLock<I>::send_reacquire_lock() {
ceph_assert(ceph_mutex_is_locked(m_lock));
if (m_state != STATE_LOCKED) {
complete_active_action(m_state, 0);
return;
}
ldout(m_cct, 10) << dendl;
m_state = STATE_REACQUIRING;
uint64_t watch_handle = m_watcher->get_watch_handle();
if (watch_handle == 0) {
// watch (re)failed while recovering
lderr(m_cct) << "aborting reacquire due to invalid watch handle"
<< dendl;
// treat double-watch failure as a lost lock and invoke the
// release/acquire handlers
release_acquire_lock();
complete_active_action(STATE_LOCKED, 0);
return;
}
m_new_cookie = encode_lock_cookie(watch_handle);
if (m_cookie == m_new_cookie && m_blocklist_on_break_lock) {
ldout(m_cct, 10) << "skipping reacquire since cookie still valid"
<< dendl;
auto ctx = create_context_callback<
ManagedLock, &ManagedLock<I>::handle_no_op_reacquire_lock>(this);
post_reacquire_lock_handler(0, ctx);
return;
}
auto ctx = create_context_callback<
ManagedLock, &ManagedLock<I>::handle_reacquire_lock>(this);
ctx = new LambdaContext([this, ctx](int r) {
post_reacquire_lock_handler(r, ctx);
});
using managed_lock::ReacquireRequest;
ReacquireRequest<I>* req = ReacquireRequest<I>::create(m_ioctx, m_oid,
m_cookie, m_new_cookie, m_mode == EXCLUSIVE, ctx);
m_work_queue->queue(new C_SendLockRequest<ReacquireRequest<I>>(req));
}
template <typename I>
void ManagedLock<I>::handle_reacquire_lock(int r) {
ldout(m_cct, 10) << "r=" << r << dendl;
std::lock_guard locker{m_lock};
ceph_assert(m_state == STATE_REACQUIRING);
if (r < 0) {
if (r == -EOPNOTSUPP) {
ldout(m_cct, 10) << "updating lock is not supported" << dendl;
} else {
lderr(m_cct) << "failed to update lock cookie: " << cpp_strerror(r)
<< dendl;
}
release_acquire_lock();
} else {
m_cookie = m_new_cookie;
}
complete_active_action(STATE_LOCKED, 0);
}
template <typename I>
void ManagedLock<I>::handle_no_op_reacquire_lock(int r) {
ldout(m_cct, 10) << "r=" << r << dendl;
ceph_assert(m_state == STATE_REACQUIRING);
ceph_assert(r >= 0);
complete_active_action(STATE_LOCKED, 0);
}
template <typename I>
void ManagedLock<I>::release_acquire_lock() {
assert(ceph_mutex_is_locked(m_lock));
if (!is_state_shutdown()) {
// queue a release and re-acquire of the lock since cookie cannot
// be updated on older OSDs
execute_action(ACTION_RELEASE_LOCK, nullptr);
ceph_assert(!m_actions_contexts.empty());
ActionContexts &action_contexts(m_actions_contexts.front());
// reacquire completes when the request lock completes
Contexts contexts;
std::swap(contexts, action_contexts.second);
if (contexts.empty()) {
execute_action(ACTION_ACQUIRE_LOCK, nullptr);
} else {
for (auto ctx : contexts) {
execute_action(ACTION_ACQUIRE_LOCK, ctx);
}
}
}
}
template <typename I>
void ManagedLock<I>::send_release_lock() {
ceph_assert(ceph_mutex_is_locked(m_lock));
if (m_state == STATE_UNLOCKED) {
complete_active_action(STATE_UNLOCKED, 0);
return;
}
ldout(m_cct, 10) << dendl;
m_state = STATE_PRE_RELEASING;
m_work_queue->queue(new LambdaContext([this](int r) {
pre_release_lock_handler(false, create_context_callback<
ManagedLock<I>, &ManagedLock<I>::handle_pre_release_lock>(this));
}));
}
template <typename I>
void ManagedLock<I>::handle_pre_release_lock(int r) {
ldout(m_cct, 10) << "r=" << r << dendl;
{
std::lock_guard locker{m_lock};
ceph_assert(m_state == STATE_PRE_RELEASING);
m_state = STATE_RELEASING;
}
if (r < 0) {
handle_release_lock(r);
return;
}
using managed_lock::ReleaseRequest;
ReleaseRequest<I>* req = ReleaseRequest<I>::create(m_ioctx, m_watcher,
m_work_queue, m_oid, m_cookie,
create_context_callback<
ManagedLock<I>, &ManagedLock<I>::handle_release_lock>(this));
m_work_queue->queue(new C_SendLockRequest<ReleaseRequest<I>>(req), 0);
}
template <typename I>
void ManagedLock<I>::handle_release_lock(int r) {
ldout(m_cct, 10) << "r=" << r << dendl;
std::lock_guard locker{m_lock};
ceph_assert(m_state == STATE_RELEASING);
if (r >= 0 || r == -EBLOCKLISTED || r == -ENOENT) {
m_cookie = "";
m_post_next_state = STATE_UNLOCKED;
} else {
m_post_next_state = STATE_LOCKED;
}
m_work_queue->queue(new LambdaContext([this, r](int ret) {
post_release_lock_handler(false, r, create_context_callback<
ManagedLock<I>, &ManagedLock<I>::handle_post_release_lock>(this));
}));
}
template <typename I>
void ManagedLock<I>::handle_post_release_lock(int r) {
ldout(m_cct, 10) << "r=" << r << dendl;
std::lock_guard locker{m_lock};
complete_active_action(m_post_next_state, r);
}
template <typename I>
void ManagedLock<I>::send_shutdown() {
ldout(m_cct, 10) << dendl;
ceph_assert(ceph_mutex_is_locked(m_lock));
if (m_state == STATE_UNLOCKED) {
m_state = STATE_SHUTTING_DOWN;
m_work_queue->queue(new LambdaContext([this](int r) {
shutdown_handler(r, create_context_callback<
ManagedLock<I>, &ManagedLock<I>::handle_shutdown>(this));
}));
return;
}
ceph_assert(m_state == STATE_LOCKED);
m_state = STATE_PRE_SHUTTING_DOWN;
m_lock.unlock();
m_work_queue->queue(new C_ShutDownRelease(this), 0);
m_lock.lock();
}
template <typename I>
void ManagedLock<I>::handle_shutdown(int r) {
ldout(m_cct, 10) << "r=" << r << dendl;
wait_for_tracked_ops(r);
}
template <typename I>
void ManagedLock<I>::send_shutdown_release() {
ldout(m_cct, 10) << dendl;
std::lock_guard locker{m_lock};
m_work_queue->queue(new LambdaContext([this](int r) {
pre_release_lock_handler(true, create_context_callback<
ManagedLock<I>, &ManagedLock<I>::handle_shutdown_pre_release>(this));
}));
}
template <typename I>
void ManagedLock<I>::handle_shutdown_pre_release(int r) {
ldout(m_cct, 10) << "r=" << r << dendl;
std::string cookie;
{
std::lock_guard locker{m_lock};
cookie = m_cookie;
ceph_assert(m_state == STATE_PRE_SHUTTING_DOWN);
m_state = STATE_SHUTTING_DOWN;
}
using managed_lock::ReleaseRequest;
ReleaseRequest<I>* req = ReleaseRequest<I>::create(m_ioctx, m_watcher,
m_work_queue, m_oid, cookie,
new LambdaContext([this, r](int l) {
int rst = r < 0 ? r : l;
post_release_lock_handler(true, rst, create_context_callback<
ManagedLock<I>, &ManagedLock<I>::handle_shutdown_post_release>(this));
}));
req->send();
}
template <typename I>
void ManagedLock<I>::handle_shutdown_post_release(int r) {
ldout(m_cct, 10) << "r=" << r << dendl;
wait_for_tracked_ops(r);
}
template <typename I>
void ManagedLock<I>::wait_for_tracked_ops(int r) {
ldout(m_cct, 10) << "r=" << r << dendl;
Context *ctx = new LambdaContext([this, r](int ret) {
complete_shutdown(r);
});
m_async_op_tracker.wait_for_ops(ctx);
}
template <typename I>
void ManagedLock<I>::complete_shutdown(int r) {
ldout(m_cct, 10) << "r=" << r << dendl;
if (r < 0) {
lderr(m_cct) << "failed to shut down lock: " << cpp_strerror(r)
<< dendl;
}
ActionContexts action_contexts;
{
std::lock_guard locker{m_lock};
ceph_assert(ceph_mutex_is_locked(m_lock));
ceph_assert(m_actions_contexts.size() == 1);
action_contexts = std::move(m_actions_contexts.front());
m_actions_contexts.pop_front();
m_state = STATE_SHUTDOWN;
}
// expect to be destroyed after firing callback
for (auto ctx : action_contexts.second) {
ctx->complete(r);
}
}
} // namespace librbd
template class librbd::ManagedLock<librbd::ImageCtx>;
| 23,787 | 26.660465 | 87 |
cc
|
null |
ceph-main/src/librbd/ManagedLock.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_MANAGED_LOCK_H
#define CEPH_LIBRBD_MANAGED_LOCK_H
#include "include/int_types.h"
#include "include/Context.h"
#include "include/rados/librados.hpp"
#include "common/AsyncOpTracker.h"
#include "cls/lock/cls_lock_types.h"
#include "librbd/watcher/Types.h"
#include "librbd/managed_lock/Types.h"
#include <list>
#include <string>
#include <utility>
namespace librbd {
struct AsioEngine;
struct ImageCtx;
namespace asio { struct ContextWQ; }
namespace managed_lock { struct Locker; }
template <typename ImageCtxT = librbd::ImageCtx>
class ManagedLock {
private:
typedef watcher::Traits<ImageCtxT> TypeTraits;
typedef typename TypeTraits::Watcher Watcher;
public:
static ManagedLock *create(librados::IoCtx& ioctx,
AsioEngine& asio_engine,
const std::string& oid, Watcher *watcher,
managed_lock::Mode mode,
bool blocklist_on_break_lock,
uint32_t blocklist_expire_seconds) {
return new ManagedLock(ioctx, asio_engine, oid, watcher, mode,
blocklist_on_break_lock, blocklist_expire_seconds);
}
void destroy() {
delete this;
}
ManagedLock(librados::IoCtx& ioctx, AsioEngine& asio_engine,
const std::string& oid, Watcher *watcher,
managed_lock::Mode mode, bool blocklist_on_break_lock,
uint32_t blocklist_expire_seconds);
virtual ~ManagedLock();
bool is_lock_owner() const;
void shut_down(Context *on_shutdown);
void acquire_lock(Context *on_acquired);
void try_acquire_lock(Context *on_acquired);
void release_lock(Context *on_released);
void reacquire_lock(Context *on_reacquired);
void get_locker(managed_lock::Locker *locker, Context *on_finish);
void break_lock(const managed_lock::Locker &locker, bool force_break_lock,
Context *on_finish);
int assert_header_locked();
bool is_shutdown() const {
std::lock_guard l{m_lock};
return is_state_shutdown();
}
protected:
mutable ceph::mutex m_lock;
inline void set_state_uninitialized() {
ceph_assert(ceph_mutex_is_locked(m_lock));
ceph_assert(m_state == STATE_UNLOCKED);
m_state = STATE_UNINITIALIZED;
}
inline void set_state_initializing() {
ceph_assert(ceph_mutex_is_locked(m_lock));
ceph_assert(m_state == STATE_UNINITIALIZED);
m_state = STATE_INITIALIZING;
}
inline void set_state_unlocked() {
ceph_assert(ceph_mutex_is_locked(m_lock));
ceph_assert(m_state == STATE_INITIALIZING || m_state == STATE_RELEASING);
m_state = STATE_UNLOCKED;
}
inline void set_state_waiting_for_lock() {
ceph_assert(ceph_mutex_is_locked(m_lock));
ceph_assert(m_state == STATE_ACQUIRING);
m_state = STATE_WAITING_FOR_LOCK;
}
inline void set_state_post_acquiring() {
ceph_assert(ceph_mutex_is_locked(m_lock));
ceph_assert(m_state == STATE_ACQUIRING);
m_state = STATE_POST_ACQUIRING;
}
bool is_state_shutdown() const;
inline bool is_state_acquiring() const {
ceph_assert(ceph_mutex_is_locked(m_lock));
return m_state == STATE_ACQUIRING;
}
inline bool is_state_post_acquiring() const {
ceph_assert(ceph_mutex_is_locked(m_lock));
return m_state == STATE_POST_ACQUIRING;
}
inline bool is_state_releasing() const {
ceph_assert(ceph_mutex_is_locked(m_lock));
return m_state == STATE_RELEASING;
}
inline bool is_state_pre_releasing() const {
ceph_assert(ceph_mutex_is_locked(m_lock));
return m_state == STATE_PRE_RELEASING;
}
inline bool is_state_locked() const {
ceph_assert(ceph_mutex_is_locked(m_lock));
return m_state == STATE_LOCKED;
}
inline bool is_state_waiting_for_lock() const {
ceph_assert(ceph_mutex_is_locked(m_lock));
return m_state == STATE_WAITING_FOR_LOCK;
}
inline bool is_action_acquire_lock() const {
ceph_assert(ceph_mutex_is_locked(m_lock));
return get_active_action() == ACTION_ACQUIRE_LOCK;
}
virtual void shutdown_handler(int r, Context *on_finish);
virtual void pre_acquire_lock_handler(Context *on_finish);
virtual void post_acquire_lock_handler(int r, Context *on_finish);
virtual void pre_release_lock_handler(bool shutting_down,
Context *on_finish);
virtual void post_release_lock_handler(bool shutting_down, int r,
Context *on_finish);
virtual void post_reacquire_lock_handler(int r, Context *on_finish);
void execute_next_action();
private:
/**
* @verbatim
*
* <start>
* |
* |
* v (acquire_lock)
* UNLOCKED -----------------------------------------> ACQUIRING
* ^ |
* | |
* RELEASING |
* | |
* | |
* | (release_lock) v
* PRE_RELEASING <----------------------------------------- LOCKED
*
* <LOCKED state>
* |
* v
* REACQUIRING -------------------------------------> <finish>
* . ^
* . |
* . . . > <RELEASE action> ---> <ACQUIRE action> ---/
*
* <UNLOCKED/LOCKED states>
* |
* |
* v
* PRE_SHUTTING_DOWN ---> SHUTTING_DOWN ---> SHUTDOWN ---> <finish>
*
* @endverbatim
*/
enum State {
STATE_UNINITIALIZED,
STATE_INITIALIZING,
STATE_UNLOCKED,
STATE_LOCKED,
STATE_ACQUIRING,
STATE_POST_ACQUIRING,
STATE_WAITING_FOR_REGISTER,
STATE_WAITING_FOR_LOCK,
STATE_REACQUIRING,
STATE_PRE_RELEASING,
STATE_RELEASING,
STATE_PRE_SHUTTING_DOWN,
STATE_SHUTTING_DOWN,
STATE_SHUTDOWN,
};
enum Action {
ACTION_TRY_LOCK,
ACTION_ACQUIRE_LOCK,
ACTION_REACQUIRE_LOCK,
ACTION_RELEASE_LOCK,
ACTION_SHUT_DOWN
};
typedef std::list<Context *> Contexts;
typedef std::pair<Action, Contexts> ActionContexts;
typedef std::list<ActionContexts> ActionsContexts;
struct C_ShutDownRelease : public Context {
ManagedLock *lock;
C_ShutDownRelease(ManagedLock *lock)
: lock(lock) {
}
void finish(int r) override {
lock->send_shutdown_release();
}
};
librados::IoCtx& m_ioctx;
CephContext *m_cct;
AsioEngine& m_asio_engine;
asio::ContextWQ* m_work_queue;
std::string m_oid;
Watcher *m_watcher;
managed_lock::Mode m_mode;
bool m_blocklist_on_break_lock;
uint32_t m_blocklist_expire_seconds;
std::string m_cookie;
std::string m_new_cookie;
State m_state;
State m_post_next_state;
ActionsContexts m_actions_contexts;
AsyncOpTracker m_async_op_tracker;
bool is_lock_owner(ceph::mutex &lock) const;
bool is_transition_state() const;
void append_context(Action action, Context *ctx);
void execute_action(Action action, Context *ctx);
Action get_active_action() const;
void complete_active_action(State next_state, int r);
void send_acquire_lock();
void handle_pre_acquire_lock(int r);
void handle_acquire_lock(int r);
void handle_no_op_reacquire_lock(int r);
void handle_post_acquire_lock(int r);
void revert_to_unlock_state(int r);
void send_reacquire_lock();
void handle_reacquire_lock(int r);
void release_acquire_lock();
void send_release_lock();
void handle_pre_release_lock(int r);
void handle_release_lock(int r);
void handle_post_release_lock(int r);
void send_shutdown();
void handle_shutdown(int r);
void send_shutdown_release();
void handle_shutdown_pre_release(int r);
void handle_shutdown_post_release(int r);
void wait_for_tracked_ops(int r);
void complete_shutdown(int r);
};
} // namespace librbd
extern template class librbd::ManagedLock<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_MANAGED_LOCK_H
| 8,221 | 29.339483 | 78 |
h
|
null |
ceph-main/src/librbd/MirroringWatcher.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/MirroringWatcher.h"
#include "include/rbd_types.h"
#include "include/rados/librados.hpp"
#include "common/errno.h"
#include "common/Cond.h"
#include "librbd/Utils.h"
#include "librbd/watcher/Utils.h"
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::MirroringWatcher: "
namespace librbd {
using namespace mirroring_watcher;
using namespace watcher;
using librbd::util::create_rados_callback;
namespace {
static const uint64_t NOTIFY_TIMEOUT_MS = 5000;
} // anonymous namespace
template <typename I>
MirroringWatcher<I>::MirroringWatcher(librados::IoCtx &io_ctx,
asio::ContextWQ *work_queue)
: Watcher(io_ctx, work_queue, RBD_MIRRORING) {
}
template <typename I>
int MirroringWatcher<I>::notify_mode_updated(librados::IoCtx &io_ctx,
cls::rbd::MirrorMode mirror_mode) {
C_SaferCond ctx;
notify_mode_updated(io_ctx, mirror_mode, &ctx);
return ctx.wait();
}
template <typename I>
void MirroringWatcher<I>::notify_mode_updated(librados::IoCtx &io_ctx,
cls::rbd::MirrorMode mirror_mode,
Context *on_finish) {
CephContext *cct = reinterpret_cast<CephContext*>(io_ctx.cct());
ldout(cct, 20) << dendl;
bufferlist bl;
encode(NotifyMessage{ModeUpdatedPayload{mirror_mode}}, bl);
librados::AioCompletion *comp = create_rados_callback(on_finish);
int r = io_ctx.aio_notify(RBD_MIRRORING, comp, bl, NOTIFY_TIMEOUT_MS,
nullptr);
ceph_assert(r == 0);
comp->release();
}
template <typename I>
int MirroringWatcher<I>::notify_image_updated(
librados::IoCtx &io_ctx, cls::rbd::MirrorImageState mirror_image_state,
const std::string &image_id, const std::string &global_image_id) {
C_SaferCond ctx;
notify_image_updated(io_ctx, mirror_image_state, image_id, global_image_id,
&ctx);
return ctx.wait();
}
template <typename I>
void MirroringWatcher<I>::notify_image_updated(
librados::IoCtx &io_ctx, cls::rbd::MirrorImageState mirror_image_state,
const std::string &image_id, const std::string &global_image_id,
Context *on_finish) {
CephContext *cct = reinterpret_cast<CephContext*>(io_ctx.cct());
ldout(cct, 20) << dendl;
bufferlist bl;
encode(NotifyMessage{ImageUpdatedPayload{
mirror_image_state, image_id, global_image_id}}, bl);
librados::AioCompletion *comp = create_rados_callback(on_finish);
int r = io_ctx.aio_notify(RBD_MIRRORING, comp, bl, NOTIFY_TIMEOUT_MS,
nullptr);
ceph_assert(r == 0);
comp->release();
}
template <typename I>
void MirroringWatcher<I>::handle_notify(uint64_t notify_id, uint64_t handle,
uint64_t notifier_id, bufferlist &bl) {
CephContext *cct = this->m_cct;
ldout(cct, 15) << ": notify_id=" << notify_id << ", "
<< "handle=" << handle << dendl;
NotifyMessage notify_message;
try {
auto iter = bl.cbegin();
decode(notify_message, iter);
} catch (const buffer::error &err) {
lderr(cct) << ": error decoding image notification: " << err.what()
<< dendl;
Context *ctx = new C_NotifyAck(this, notify_id, handle);
ctx->complete(0);
return;
}
apply_visitor(watcher::util::HandlePayloadVisitor<MirroringWatcher<I>>(
this, notify_id, handle), notify_message.payload);
}
template <typename I>
bool MirroringWatcher<I>::handle_payload(const ModeUpdatedPayload &payload,
Context *on_notify_ack) {
CephContext *cct = this->m_cct;
ldout(cct, 20) << ": mode updated: " << payload.mirror_mode << dendl;
handle_mode_updated(payload.mirror_mode);
return true;
}
template <typename I>
bool MirroringWatcher<I>::handle_payload(const ImageUpdatedPayload &payload,
Context *on_notify_ack) {
CephContext *cct = this->m_cct;
ldout(cct, 20) << ": image state updated" << dendl;
handle_image_updated(payload.mirror_image_state, payload.image_id,
payload.global_image_id);
return true;
}
template <typename I>
bool MirroringWatcher<I>::handle_payload(const UnknownPayload &payload,
Context *on_notify_ack) {
return true;
}
} // namespace librbd
template class librbd::MirroringWatcher<librbd::ImageCtx>;
| 4,595 | 31.13986 | 81 |
cc
|
null |
ceph-main/src/librbd/MirroringWatcher.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_MIRRORING_WATCHER_H
#define CEPH_LIBRBD_MIRRORING_WATCHER_H
#include "include/int_types.h"
#include "include/rados/librados_fwd.hpp"
#include "cls/rbd/cls_rbd_types.h"
#include "librbd/ImageCtx.h"
#include "librbd/Watcher.h"
#include "librbd/mirroring_watcher/Types.h"
namespace librbd {
namespace asio { struct ContextWQ; }
namespace watcher {
namespace util {
template <typename> struct HandlePayloadVisitor;
}
}
template <typename ImageCtxT = librbd::ImageCtx>
class MirroringWatcher : public Watcher {
friend struct watcher::util::HandlePayloadVisitor<MirroringWatcher<ImageCtxT>>;
public:
MirroringWatcher(librados::IoCtx &io_ctx, asio::ContextWQ *work_queue);
static int notify_mode_updated(librados::IoCtx &io_ctx,
cls::rbd::MirrorMode mirror_mode);
static void notify_mode_updated(librados::IoCtx &io_ctx,
cls::rbd::MirrorMode mirror_mode,
Context *on_finish);
static int notify_image_updated(librados::IoCtx &io_ctx,
cls::rbd::MirrorImageState mirror_image_state,
const std::string &image_id,
const std::string &global_image_id);
static void notify_image_updated(librados::IoCtx &io_ctx,
cls::rbd::MirrorImageState mirror_image_state,
const std::string &image_id,
const std::string &global_image_id,
Context *on_finish);
virtual void handle_mode_updated(cls::rbd::MirrorMode mirror_mode) = 0;
virtual void handle_image_updated(cls::rbd::MirrorImageState state,
const std::string &image_id,
const std::string &global_image_id) = 0;
private:
bool handle_payload(const mirroring_watcher::ModeUpdatedPayload &payload,
Context *on_notify_ack);
bool handle_payload(const mirroring_watcher::ImageUpdatedPayload &payload,
Context *on_notify_ack);
bool handle_payload(const mirroring_watcher::UnknownPayload &payload,
Context *on_notify_ack);
void handle_notify(uint64_t notify_id, uint64_t handle,
uint64_t notifier_id, bufferlist &bl) override;
};
} // namespace librbd
extern template class librbd::MirroringWatcher<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_MIRRORING_WATCHER_H
| 2,638 | 37.808824 | 81 |
h
|
null |
ceph-main/src/librbd/ObjectMap.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/ObjectMap.h"
#include "librbd/BlockGuard.h"
#include "librbd/ExclusiveLock.h"
#include "librbd/ImageCtx.h"
#include "librbd/asio/ContextWQ.h"
#include "librbd/object_map/RefreshRequest.h"
#include "librbd/object_map/ResizeRequest.h"
#include "librbd/object_map/SnapshotCreateRequest.h"
#include "librbd/object_map/SnapshotRemoveRequest.h"
#include "librbd/object_map/SnapshotRollbackRequest.h"
#include "librbd/object_map/UnlockRequest.h"
#include "librbd/object_map/UpdateRequest.h"
#include "librbd/Utils.h"
#include "common/dout.h"
#include "common/errno.h"
#include "include/rados/librados.hpp"
#include "cls/lock/cls_lock_client.h"
#include "cls/rbd/cls_rbd_types.h"
#include "include/stringify.h"
#include "osdc/Striper.h"
#include <sstream>
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::ObjectMap: " << this << " " << __func__ \
<< ": "
namespace librbd {
using librbd::util::create_context_callback;
template <typename I>
ObjectMap<I>::ObjectMap(I &image_ctx, uint64_t snap_id)
: RefCountedObject(image_ctx.cct),
m_image_ctx(image_ctx), m_snap_id(snap_id),
m_lock(ceph::make_shared_mutex(util::unique_lock_name("librbd::ObjectMap::lock", this))),
m_update_guard(new UpdateGuard(m_image_ctx.cct)) {
}
template <typename I>
ObjectMap<I>::~ObjectMap() {
delete m_update_guard;
}
template <typename I>
int ObjectMap<I>::aio_remove(librados::IoCtx &io_ctx, const std::string &image_id,
librados::AioCompletion *c) {
return io_ctx.aio_remove(object_map_name(image_id, CEPH_NOSNAP), c);
}
template <typename I>
std::string ObjectMap<I>::object_map_name(const std::string &image_id,
uint64_t snap_id) {
std::string oid(RBD_OBJECT_MAP_PREFIX + image_id);
if (snap_id != CEPH_NOSNAP) {
std::stringstream snap_suffix;
snap_suffix << "." << std::setfill('0') << std::setw(16) << std::hex
<< snap_id;
oid += snap_suffix.str();
}
return oid;
}
template <typename I>
bool ObjectMap<I>::is_compatible(const file_layout_t& layout, uint64_t size) {
uint64_t object_count = Striper::get_num_objects(layout, size);
return (object_count <= cls::rbd::MAX_OBJECT_MAP_OBJECT_COUNT);
}
template <typename I>
uint8_t ObjectMap<I>::operator[](uint64_t object_no) const
{
std::shared_lock locker{m_lock};
ceph_assert(object_no < m_object_map.size());
return m_object_map[object_no];
}
template <typename I>
bool ObjectMap<I>::object_may_exist(uint64_t object_no) const
{
ceph_assert(ceph_mutex_is_locked(m_image_ctx.image_lock));
// Fall back to default logic if object map is disabled or invalid
if (!m_image_ctx.test_features(RBD_FEATURE_OBJECT_MAP,
m_image_ctx.image_lock)) {
return true;
}
bool flags_set;
int r = m_image_ctx.test_flags(m_image_ctx.snap_id,
RBD_FLAG_OBJECT_MAP_INVALID,
m_image_ctx.image_lock, &flags_set);
if (r < 0 || flags_set) {
return true;
}
uint8_t state = (*this)[object_no];
bool exists = (state != OBJECT_NONEXISTENT);
ldout(m_image_ctx.cct, 20) << "object_no=" << object_no << " r=" << exists
<< dendl;
return exists;
}
template <typename I>
bool ObjectMap<I>::object_may_not_exist(uint64_t object_no) const
{
ceph_assert(ceph_mutex_is_locked(m_image_ctx.image_lock));
// Fall back to default logic if object map is disabled or invalid
if (!m_image_ctx.test_features(RBD_FEATURE_OBJECT_MAP,
m_image_ctx.image_lock)) {
return true;
}
bool flags_set;
int r = m_image_ctx.test_flags(m_image_ctx.snap_id,
RBD_FLAG_OBJECT_MAP_INVALID,
m_image_ctx.image_lock, &flags_set);
if (r < 0 || flags_set) {
return true;
}
uint8_t state = (*this)[object_no];
bool nonexistent = (state != OBJECT_EXISTS && state != OBJECT_EXISTS_CLEAN);
ldout(m_image_ctx.cct, 20) << "object_no=" << object_no << " r="
<< nonexistent << dendl;
return nonexistent;
}
template <typename I>
bool ObjectMap<I>::update_required(const ceph::BitVector<2>::Iterator& it,
uint8_t new_state) {
ceph_assert(ceph_mutex_is_locked(m_lock));
uint8_t state = *it;
if ((state == new_state) ||
(new_state == OBJECT_PENDING && state == OBJECT_NONEXISTENT) ||
(new_state == OBJECT_NONEXISTENT && state != OBJECT_PENDING)) {
return false;
}
return true;
}
template <typename I>
void ObjectMap<I>::open(Context *on_finish) {
Context *ctx = create_context_callback<Context>(on_finish, this);
auto req = object_map::RefreshRequest<I>::create(
m_image_ctx, &m_lock, &m_object_map, m_snap_id, ctx);
req->send();
}
template <typename I>
void ObjectMap<I>::close(Context *on_finish) {
Context *ctx = create_context_callback<Context>(on_finish, this);
if (m_snap_id != CEPH_NOSNAP) {
m_image_ctx.op_work_queue->queue(ctx, 0);
return;
}
ctx = new LambdaContext([this, ctx](int r) {
auto req = object_map::UnlockRequest<I>::create(m_image_ctx, ctx);
req->send();
});
// ensure the block guard for aio updates is empty before unlocking
// the object map
m_async_op_tracker.wait_for_ops(ctx);
}
template <typename I>
bool ObjectMap<I>::set_object_map(ceph::BitVector<2> &target_object_map) {
ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
ceph_assert(ceph_mutex_is_locked(m_image_ctx.image_lock));
ceph_assert(m_image_ctx.test_features(RBD_FEATURE_OBJECT_MAP,
m_image_ctx.image_lock));
std::unique_lock locker{m_lock};
m_object_map = target_object_map;
return true;
}
template <typename I>
void ObjectMap<I>::rollback(uint64_t snap_id, Context *on_finish) {
ceph_assert(ceph_mutex_is_locked(m_image_ctx.image_lock));
std::unique_lock locker{m_lock};
Context *ctx = create_context_callback<Context>(on_finish, this);
object_map::SnapshotRollbackRequest *req =
new object_map::SnapshotRollbackRequest(m_image_ctx, snap_id, ctx);
req->send();
}
template <typename I>
void ObjectMap<I>::snapshot_add(uint64_t snap_id, Context *on_finish) {
ceph_assert(ceph_mutex_is_locked(m_image_ctx.image_lock));
ceph_assert((m_image_ctx.features & RBD_FEATURE_OBJECT_MAP) != 0);
ceph_assert(snap_id != CEPH_NOSNAP);
Context *ctx = create_context_callback<Context>(on_finish, this);
object_map::SnapshotCreateRequest *req =
new object_map::SnapshotCreateRequest(m_image_ctx, &m_lock, &m_object_map,
snap_id, ctx);
req->send();
}
template <typename I>
void ObjectMap<I>::snapshot_remove(uint64_t snap_id, Context *on_finish) {
ceph_assert(ceph_mutex_is_wlocked(m_image_ctx.image_lock));
ceph_assert((m_image_ctx.features & RBD_FEATURE_OBJECT_MAP) != 0);
ceph_assert(snap_id != CEPH_NOSNAP);
Context *ctx = create_context_callback<Context>(on_finish, this);
object_map::SnapshotRemoveRequest *req =
new object_map::SnapshotRemoveRequest(m_image_ctx, &m_lock, &m_object_map,
snap_id, ctx);
req->send();
}
template <typename I>
void ObjectMap<I>::aio_save(Context *on_finish) {
ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
ceph_assert(ceph_mutex_is_locked(m_image_ctx.image_lock));
ceph_assert(m_image_ctx.test_features(RBD_FEATURE_OBJECT_MAP,
m_image_ctx.image_lock));
std::shared_lock locker{m_lock};
librados::ObjectWriteOperation op;
if (m_snap_id == CEPH_NOSNAP) {
rados::cls::lock::assert_locked(&op, RBD_LOCK_NAME, ClsLockType::EXCLUSIVE, "", "");
}
cls_client::object_map_save(&op, m_object_map);
Context *ctx = create_context_callback<Context>(on_finish, this);
std::string oid(object_map_name(m_image_ctx.id, m_snap_id));
librados::AioCompletion *comp = util::create_rados_callback(ctx);
int r = m_image_ctx.md_ctx.aio_operate(oid, comp, &op);
ceph_assert(r == 0);
comp->release();
}
template <typename I>
void ObjectMap<I>::aio_resize(uint64_t new_size, uint8_t default_object_state,
Context *on_finish) {
ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
ceph_assert(ceph_mutex_is_locked(m_image_ctx.image_lock));
ceph_assert(m_image_ctx.test_features(RBD_FEATURE_OBJECT_MAP,
m_image_ctx.image_lock));
ceph_assert(m_image_ctx.image_watcher != NULL);
ceph_assert(m_image_ctx.exclusive_lock == nullptr ||
m_image_ctx.exclusive_lock->is_lock_owner());
Context *ctx = create_context_callback<Context>(on_finish, this);
object_map::ResizeRequest *req = new object_map::ResizeRequest(
m_image_ctx, &m_lock, &m_object_map, m_snap_id, new_size,
default_object_state, ctx);
req->send();
}
template <typename I>
void ObjectMap<I>::detained_aio_update(UpdateOperation &&op) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << dendl;
ceph_assert(ceph_mutex_is_locked(m_image_ctx.image_lock));
ceph_assert(ceph_mutex_is_wlocked(m_lock));
BlockGuardCell *cell;
int r = m_update_guard->detain({op.start_object_no, op.end_object_no},
&op, &cell);
if (r < 0) {
lderr(cct) << "failed to detain object map update: " << cpp_strerror(r)
<< dendl;
m_image_ctx.op_work_queue->queue(op.on_finish, r);
m_async_op_tracker.finish_op();
return;
} else if (r > 0) {
ldout(cct, 20) << "detaining object map update due to in-flight update: "
<< "start=" << op.start_object_no << ", "
<< "end=" << op.end_object_no << ", "
<< (op.current_state ?
stringify(static_cast<uint32_t>(*op.current_state)) :
"")
<< "->" << static_cast<uint32_t>(op.new_state) << dendl;
return;
}
ldout(cct, 20) << "in-flight update cell: " << cell << dendl;
Context *on_finish = op.on_finish;
Context *ctx = new LambdaContext([this, cell, on_finish](int r) {
handle_detained_aio_update(cell, r, on_finish);
});
aio_update(CEPH_NOSNAP, op.start_object_no, op.end_object_no, op.new_state,
op.current_state, op.parent_trace, op.ignore_enoent, ctx);
}
template <typename I>
void ObjectMap<I>::handle_detained_aio_update(BlockGuardCell *cell, int r,
Context *on_finish) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << "cell=" << cell << ", r=" << r << dendl;
typename UpdateGuard::BlockOperations block_ops;
m_update_guard->release(cell, &block_ops);
{
std::shared_lock image_locker{m_image_ctx.image_lock};
std::unique_lock locker{m_lock};
for (auto &op : block_ops) {
detained_aio_update(std::move(op));
}
}
on_finish->complete(r);
m_async_op_tracker.finish_op();
}
template <typename I>
void ObjectMap<I>::aio_update(uint64_t snap_id, uint64_t start_object_no,
uint64_t end_object_no, uint8_t new_state,
const boost::optional<uint8_t> ¤t_state,
const ZTracer::Trace &parent_trace,
bool ignore_enoent, Context *on_finish) {
ceph_assert(ceph_mutex_is_locked(m_image_ctx.image_lock));
ceph_assert((m_image_ctx.features & RBD_FEATURE_OBJECT_MAP) != 0);
ceph_assert(m_image_ctx.image_watcher != nullptr);
ceph_assert(m_image_ctx.exclusive_lock == nullptr ||
m_image_ctx.exclusive_lock->is_lock_owner());
ceph_assert(start_object_no < end_object_no);
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << "start=" << start_object_no << ", "
<< "end=" << end_object_no << ", "
<< (current_state ?
stringify(static_cast<uint32_t>(*current_state)) : "")
<< "->" << static_cast<uint32_t>(new_state) << dendl;
if (snap_id == CEPH_NOSNAP) {
ceph_assert(ceph_mutex_is_wlocked(m_lock));
end_object_no = std::min(end_object_no, m_object_map.size());
if (start_object_no >= end_object_no) {
ldout(cct, 20) << "skipping update of invalid object map" << dendl;
m_image_ctx.op_work_queue->queue(on_finish, 0);
return;
}
auto it = m_object_map.begin() + start_object_no;
auto end_it = m_object_map.begin() + end_object_no;
for (; it != end_it; ++it) {
if (update_required(it, new_state)) {
break;
}
}
if (it == end_it) {
ldout(cct, 20) << "object map update not required" << dendl;
m_image_ctx.op_work_queue->queue(on_finish, 0);
return;
}
}
auto req = object_map::UpdateRequest<I>::create(
m_image_ctx, &m_lock, &m_object_map, snap_id, start_object_no,
end_object_no, new_state, current_state, parent_trace, ignore_enoent,
on_finish);
req->send();
}
} // namespace librbd
template class librbd::ObjectMap<librbd::ImageCtx>;
| 13,119 | 33.435696 | 93 |
cc
|
null |
ceph-main/src/librbd/ObjectMap.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_OBJECT_MAP_H
#define CEPH_LIBRBD_OBJECT_MAP_H
#include "include/int_types.h"
#include "include/fs_types.h"
#include "include/rados/librados_fwd.hpp"
#include "include/rbd/object_map_types.h"
#include "common/AsyncOpTracker.h"
#include "common/bit_vector.hpp"
#include "common/RefCountedObj.h"
#include "librbd/Utils.h"
#include <boost/optional.hpp>
class Context;
namespace ZTracer { struct Trace; }
namespace librbd {
template <typename Op> class BlockGuard;
struct BlockGuardCell;
class ImageCtx;
template <typename ImageCtxT = ImageCtx>
class ObjectMap : public RefCountedObject {
public:
static ObjectMap *create(ImageCtxT &image_ctx, uint64_t snap_id) {
return new ObjectMap(image_ctx, snap_id);
}
ObjectMap(ImageCtxT &image_ctx, uint64_t snap_id);
~ObjectMap();
static int aio_remove(librados::IoCtx &io_ctx, const std::string &image_id, librados::AioCompletion *c);
static std::string object_map_name(const std::string &image_id,
uint64_t snap_id);
static bool is_compatible(const file_layout_t& layout, uint64_t size);
uint8_t operator[](uint64_t object_no) const;
inline uint64_t size() const {
std::shared_lock locker{m_lock};
return m_object_map.size();
}
inline void set_state(uint64_t object_no, uint8_t new_state,
const boost::optional<uint8_t> ¤t_state) {
std::unique_lock locker{m_lock};
ceph_assert(object_no < m_object_map.size());
if (current_state && m_object_map[object_no] != *current_state) {
return;
}
m_object_map[object_no] = new_state;
}
void open(Context *on_finish);
void close(Context *on_finish);
bool set_object_map(ceph::BitVector<2> &target_object_map);
bool object_may_exist(uint64_t object_no) const;
bool object_may_not_exist(uint64_t object_no) const;
void aio_save(Context *on_finish);
void aio_resize(uint64_t new_size, uint8_t default_object_state,
Context *on_finish);
template <typename T, void(T::*MF)(int) = &T::complete>
bool aio_update(uint64_t snap_id, uint64_t start_object_no, uint8_t new_state,
const boost::optional<uint8_t> ¤t_state,
const ZTracer::Trace &parent_trace, bool ignore_enoent,
T *callback_object) {
return aio_update<T, MF>(snap_id, start_object_no, start_object_no + 1,
new_state, current_state, parent_trace,
ignore_enoent, callback_object);
}
template <typename T, void(T::*MF)(int) = &T::complete>
bool aio_update(uint64_t snap_id, uint64_t start_object_no,
uint64_t end_object_no, uint8_t new_state,
const boost::optional<uint8_t> ¤t_state,
const ZTracer::Trace &parent_trace, bool ignore_enoent,
T *callback_object) {
ceph_assert(start_object_no < end_object_no);
std::unique_lock locker{m_lock};
if (snap_id == CEPH_NOSNAP) {
end_object_no = std::min(end_object_no, m_object_map.size());
if (start_object_no >= end_object_no) {
return false;
}
auto it = m_object_map.begin() + start_object_no;
auto end_it = m_object_map.begin() + end_object_no;
for (; it != end_it; ++it) {
if (update_required(it, new_state)) {
break;
}
}
if (it == end_it) {
return false;
}
m_async_op_tracker.start_op();
UpdateOperation update_operation(start_object_no, end_object_no,
new_state, current_state, parent_trace,
ignore_enoent,
util::create_context_callback<T, MF>(
callback_object));
detained_aio_update(std::move(update_operation));
} else {
aio_update(snap_id, start_object_no, end_object_no, new_state,
current_state, parent_trace, ignore_enoent,
util::create_context_callback<T, MF>(callback_object));
}
return true;
}
void rollback(uint64_t snap_id, Context *on_finish);
void snapshot_add(uint64_t snap_id, Context *on_finish);
void snapshot_remove(uint64_t snap_id, Context *on_finish);
private:
struct UpdateOperation {
uint64_t start_object_no;
uint64_t end_object_no;
uint8_t new_state;
boost::optional<uint8_t> current_state;
ZTracer::Trace parent_trace;
bool ignore_enoent;
Context *on_finish;
UpdateOperation(uint64_t start_object_no, uint64_t end_object_no,
uint8_t new_state,
const boost::optional<uint8_t> ¤t_state,
const ZTracer::Trace &parent_trace,
bool ignore_enoent, Context *on_finish)
: start_object_no(start_object_no), end_object_no(end_object_no),
new_state(new_state), current_state(current_state),
parent_trace(parent_trace), ignore_enoent(ignore_enoent),
on_finish(on_finish) {
}
};
typedef BlockGuard<UpdateOperation> UpdateGuard;
ImageCtxT &m_image_ctx;
uint64_t m_snap_id;
mutable ceph::shared_mutex m_lock;
ceph::BitVector<2> m_object_map;
AsyncOpTracker m_async_op_tracker;
UpdateGuard *m_update_guard = nullptr;
void detained_aio_update(UpdateOperation &&update_operation);
void handle_detained_aio_update(BlockGuardCell *cell, int r,
Context *on_finish);
void aio_update(uint64_t snap_id, uint64_t start_object_no,
uint64_t end_object_no, uint8_t new_state,
const boost::optional<uint8_t> ¤t_state,
const ZTracer::Trace &parent_trace, bool ignore_enoent,
Context *on_finish);
bool update_required(const ceph::BitVector<2>::Iterator &it,
uint8_t new_state);
};
} // namespace librbd
extern template class librbd::ObjectMap<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_OBJECT_MAP_H
| 6,104 | 33.6875 | 106 |
h
|
null |
ceph-main/src/librbd/Operations.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "cls/rbd/cls_rbd_types.h"
#include "librbd/Operations.h"
#include "common/dout.h"
#include "common/errno.h"
#include "common/perf_counters.h"
#include "osdc/Striper.h"
#include "librbd/ExclusiveLock.h"
#include "librbd/ImageCtx.h"
#include "librbd/ImageState.h"
#include "librbd/ImageWatcher.h"
#include "librbd/ObjectMap.h"
#include "librbd/Types.h"
#include "librbd/Utils.h"
#include "librbd/api/Config.h"
#include "librbd/asio/ContextWQ.h"
#include "librbd/io/Utils.h"
#include "librbd/journal/DisabledPolicy.h"
#include "librbd/journal/StandardPolicy.h"
#include "librbd/operation/DisableFeaturesRequest.h"
#include "librbd/operation/EnableFeaturesRequest.h"
#include "librbd/operation/FlattenRequest.h"
#include "librbd/operation/MetadataRemoveRequest.h"
#include "librbd/operation/MetadataSetRequest.h"
#include "librbd/operation/MigrateRequest.h"
#include "librbd/operation/ObjectMapIterate.h"
#include "librbd/operation/RebuildObjectMapRequest.h"
#include "librbd/operation/RenameRequest.h"
#include "librbd/operation/ResizeRequest.h"
#include "librbd/operation/SnapshotCreateRequest.h"
#include "librbd/operation/SnapshotProtectRequest.h"
#include "librbd/operation/SnapshotRemoveRequest.h"
#include "librbd/operation/SnapshotRenameRequest.h"
#include "librbd/operation/SnapshotRollbackRequest.h"
#include "librbd/operation/SnapshotUnprotectRequest.h"
#include "librbd/operation/SnapshotLimitRequest.h"
#include "librbd/operation/SparsifyRequest.h"
#include <set>
#include <boost/bind/bind.hpp>
#include <boost/scope_exit.hpp>
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::Operations: "
namespace librbd {
using namespace boost::placeholders;
namespace {
std::ostream &operator<<(std::ostream &out, const Operation &op) {
switch (op) {
case OPERATION_CHECK_OBJECT_MAP:
out << "check object map";
break;
case OPERATION_FLATTEN:
out << "flatten";
break;
case OPERATION_METADATA_UPDATE:
out << "metadata update";
break;
case OPERATION_MIGRATE:
out << "migrate";
break;
case OPERATION_REBUILD_OBJECT_MAP:
out << "rebuild object map";
break;
case OPERATION_RENAME:
out << "rename";
break;
case OPERATION_RESIZE:
out << "resize";
break;
case OPERATION_SNAP_CREATE:
out << "snap create";
break;
case OPERATION_SNAP_PROTECT:
out << "snap protect";
break;
case OPERATION_SNAP_REMOVE:
out << "snap remove";
break;
case OPERATION_SNAP_RENAME:
out << "snap rename";
break;
case OPERATION_SNAP_ROLLBACK:
out << "snap rollback";
break;
case OPERATION_SNAP_UNPROTECT:
out << "snap unprotect";
break;
case OPERATION_SPARSIFY:
out << "sparsify";
break;
case OPERATION_UPDATE_FEATURES:
out << "update features";
break;
default:
ceph_abort();
break;
}
return out;
}
template <typename I>
struct C_NotifyUpdate : public Context {
I &image_ctx;
Context *on_finish;
bool notified = false;
C_NotifyUpdate(I &image_ctx, Context *on_finish)
: image_ctx(image_ctx), on_finish(on_finish) {
}
void complete(int r) override {
CephContext *cct = image_ctx.cct;
if (notified) {
if (r == -ETIMEDOUT) {
// don't fail the op if a peer fails to get the update notification
lderr(cct) << "update notification timed-out" << dendl;
r = 0;
} else if (r == -ENOENT) {
// don't fail if header is missing (e.g. v1 image rename)
ldout(cct, 5) << "update notification on missing header" << dendl;
r = 0;
} else if (r < 0) {
lderr(cct) << "update notification failed: " << cpp_strerror(r)
<< dendl;
}
Context::complete(r);
return;
}
if (r < 0) {
// op failed -- no need to send update notification
Context::complete(r);
return;
}
notified = true;
image_ctx.notify_update(this);
}
void finish(int r) override {
on_finish->complete(r);
}
};
template <typename I>
struct C_InvokeAsyncRequest : public Context {
/**
* @verbatim
*
* <start>
* |
* . . . . . . | . . . . . . . . . . . . . . . . . .
* . . | . .
* . v v v .
* . REFRESH_IMAGE (skip if not needed) .
* . | .
* . v .
* . ACQUIRE_LOCK (skip if exclusive lock .
* . | disabled or has lock) .
* . | .
* . /--------/ \--------\ . . . . . . . . . . . . .
* . | | .
* . v v .
* LOCAL_REQUEST REMOTE_REQUEST
* | |
* | |
* \--------\ /--------/
* |
* v
* <finish>
*
* @endverbatim
*/
I &image_ctx;
Operation operation;
exclusive_lock::OperationRequestType request_type;
bool permit_snapshot;
boost::function<void(Context*)> local;
boost::function<void(Context*)> remote;
std::set<int> filter_error_codes;
Context *on_finish;
bool request_lock = false;
C_InvokeAsyncRequest(I &image_ctx, Operation operation,
exclusive_lock::OperationRequestType request_type,
bool permit_snapshot,
const boost::function<void(Context*)>& local,
const boost::function<void(Context*)>& remote,
const std::set<int> &filter_error_codes,
Context *on_finish)
: image_ctx(image_ctx), operation(operation), request_type(request_type),
permit_snapshot(permit_snapshot), local(local), remote(remote),
filter_error_codes(filter_error_codes), on_finish(on_finish) {
}
void send() {
send_refresh_image();
}
void send_refresh_image() {
if (!image_ctx.state->is_refresh_required()) {
send_acquire_exclusive_lock();
return;
}
CephContext *cct = image_ctx.cct;
ldout(cct, 20) << __func__ << dendl;
Context *ctx = util::create_context_callback<
C_InvokeAsyncRequest<I>,
&C_InvokeAsyncRequest<I>::handle_refresh_image>(this);
image_ctx.state->refresh(ctx);
}
void handle_refresh_image(int r) {
CephContext *cct = image_ctx.cct;
ldout(cct, 20) << __func__ << ": r=" << r << dendl;
if (r < 0) {
lderr(cct) << "failed to refresh image: " << cpp_strerror(r) << dendl;
complete(r);
return;
}
send_acquire_exclusive_lock();
}
void send_acquire_exclusive_lock() {
// context can complete before owner_lock is unlocked
ceph::shared_mutex &owner_lock(image_ctx.owner_lock);
owner_lock.lock_shared();
image_ctx.image_lock.lock_shared();
if (image_ctx.read_only ||
(!permit_snapshot && image_ctx.snap_id != CEPH_NOSNAP)) {
image_ctx.image_lock.unlock_shared();
owner_lock.unlock_shared();
complete(-EROFS);
return;
}
image_ctx.image_lock.unlock_shared();
if (image_ctx.exclusive_lock == nullptr) {
send_local_request();
owner_lock.unlock_shared();
return;
} else if (image_ctx.image_watcher == nullptr) {
owner_lock.unlock_shared();
complete(-EROFS);
return;
}
if (image_ctx.exclusive_lock->is_lock_owner() &&
image_ctx.exclusive_lock->accept_request(request_type, nullptr)) {
send_local_request();
owner_lock.unlock_shared();
return;
}
CephContext *cct = image_ctx.cct;
ldout(cct, 20) << __func__ << dendl;
Context *ctx = util::create_async_context_callback(
image_ctx, util::create_context_callback<
C_InvokeAsyncRequest<I>,
&C_InvokeAsyncRequest<I>::handle_acquire_exclusive_lock>(
this, image_ctx.exclusive_lock));
if (request_lock) {
// current lock owner doesn't support op -- try to perform
// the action locally
request_lock = false;
image_ctx.exclusive_lock->acquire_lock(ctx);
} else {
image_ctx.exclusive_lock->try_acquire_lock(ctx);
}
owner_lock.unlock_shared();
}
void handle_acquire_exclusive_lock(int r) {
CephContext *cct = image_ctx.cct;
ldout(cct, 20) << __func__ << ": r=" << r << dendl;
if (r < 0) {
complete(r == -EBLOCKLISTED ? -EBLOCKLISTED : -EROFS);
return;
}
// context can complete before owner_lock is unlocked
ceph::shared_mutex &owner_lock(image_ctx.owner_lock);
owner_lock.lock_shared();
if (image_ctx.exclusive_lock == nullptr ||
image_ctx.exclusive_lock->is_lock_owner()) {
send_local_request();
owner_lock.unlock_shared();
return;
}
send_remote_request();
owner_lock.unlock_shared();
}
void send_remote_request() {
ceph_assert(ceph_mutex_is_locked(image_ctx.owner_lock));
CephContext *cct = image_ctx.cct;
ldout(cct, 20) << __func__ << dendl;
Context *ctx = util::create_async_context_callback(
image_ctx, util::create_context_callback<
C_InvokeAsyncRequest<I>,
&C_InvokeAsyncRequest<I>::handle_remote_request>(this));
remote(ctx);
}
void handle_remote_request(int r) {
CephContext *cct = image_ctx.cct;
ldout(cct, 20) << __func__ << ": r=" << r << dendl;
if (r == -EOPNOTSUPP) {
ldout(cct, 5) << operation << " not supported by current lock owner"
<< dendl;
request_lock = true;
send_refresh_image();
return;
} else if (r != -ETIMEDOUT && r != -ERESTART) {
image_ctx.state->handle_update_notification();
complete(r);
return;
}
ldout(cct, 5) << operation << " timed out notifying lock owner" << dendl;
send_refresh_image();
}
void send_local_request() {
auto ctx = new LambdaContext(
[this](int r) {
if (r == -ERESTART) {
image_ctx.operations->finish_op(operation, r);
send_refresh_image();
return;
}
execute_local_request();
});
image_ctx.operations->start_op(operation, ctx);
}
void execute_local_request() {
std::shared_lock owner_locker{image_ctx.owner_lock};
CephContext *cct = image_ctx.cct;
ldout(cct, 20) << __func__ << dendl;
Context *ctx = util::create_async_context_callback(
image_ctx, util::create_context_callback<
C_InvokeAsyncRequest<I>,
&C_InvokeAsyncRequest<I>::handle_local_request>(this));
local(ctx);
}
void handle_local_request(int r) {
CephContext *cct = image_ctx.cct;
ldout(cct, 20) << __func__ << ": r=" << r << dendl;
image_ctx.operations->finish_op(operation, r);
if (r == -ERESTART) {
send_refresh_image();
return;
}
complete(r);
}
void finish(int r) override {
if (filter_error_codes.count(r) != 0) {
r = 0;
}
on_finish->complete(r);
}
};
template <typename I>
bool needs_invalidate(I& image_ctx, uint64_t object_no,
uint8_t current_state, uint8_t new_state) {
if ( (current_state == OBJECT_EXISTS ||
current_state == OBJECT_EXISTS_CLEAN) &&
(new_state == OBJECT_NONEXISTENT ||
new_state == OBJECT_PENDING)) {
return false;
}
return true;
}
} // anonymous namespace
template <typename I>
Operations<I>::Operations(I &image_ctx)
: m_image_ctx(image_ctx),
m_queue_lock(ceph::make_mutex(
util::unique_lock_name("librbd::Operations::m_queue_lock",
this))) {
}
template <typename I>
void Operations<I>::start_op(Operation op, Context *ctx) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << __func__ << ": " << op << " " << ctx << dendl;
ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
bool requires_lock = m_image_ctx.exclusive_lock != nullptr;
ctx = util::create_async_context_callback(
m_image_ctx, new LambdaContext(
[this, op, requires_lock, ctx](int r) {
Context *finish_op_ctx = nullptr;
if (requires_lock && r == 0) {
std::shared_lock owner_locker{m_image_ctx.owner_lock};
std::shared_lock image_locker{m_image_ctx.image_lock};
auto exclusive_lock = m_image_ctx.exclusive_lock;
if (exclusive_lock == nullptr ||
(finish_op_ctx = exclusive_lock->start_op(&r)) == nullptr) {
ldout(m_image_ctx.cct, 20) << "lock owner lost, restarting"
<< dendl;
r = -ERESTART;
}
}
ldout(m_image_ctx.cct, 20) << "start " << op << " " << ctx << dendl;
ctx->complete(r);
if (finish_op_ctx != nullptr) {
finish_op_ctx->complete(0);
}
}));
std::unique_lock locker{m_queue_lock};
if (!m_in_flight_ops.insert(op).second) {
ldout(cct, 20) << __func__ << ": " << op << " in flight" << dendl;
m_queued_ops[op].push_back(ctx);
return;
}
ctx->complete(0);
}
template <typename I>
void Operations<I>::finish_op(Operation op, int r) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << __func__ << ": " << op << " r=" << r << dendl;
std::unique_lock locker{m_queue_lock};
auto &queue = m_queued_ops[op];
if (queue.empty()) {
m_in_flight_ops.erase(op);
return;
}
auto ctx = queue.front();
queue.pop_front();
// propagate -ERESTART through all the queue
ctx->complete(r == -ERESTART ? r : 0);
}
template <typename I>
int Operations<I>::flatten(ProgressContext &prog_ctx) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << "flatten" << dendl;
int r = m_image_ctx.state->refresh_if_required();
if (r < 0) {
return r;
}
if (m_image_ctx.read_only) {
return -EROFS;
}
{
std::shared_lock image_locker{m_image_ctx.image_lock};
if (m_image_ctx.parent_md.spec.pool_id == -1) {
lderr(cct) << "image has no parent" << dendl;
return -EINVAL;
}
}
uint64_t request_id = util::reserve_async_request_id();
r = invoke_async_request(OPERATION_FLATTEN,
exclusive_lock::OPERATION_REQUEST_TYPE_GENERAL,
false,
boost::bind(&Operations<I>::execute_flatten, this,
boost::ref(prog_ctx), _1),
boost::bind(&ImageWatcher<I>::notify_flatten,
m_image_ctx.image_watcher, request_id,
boost::ref(prog_ctx), _1));
if (r < 0 && r != -EINVAL) {
return r;
}
ldout(cct, 20) << "flatten finished" << dendl;
return 0;
}
template <typename I>
void Operations<I>::execute_flatten(ProgressContext &prog_ctx,
Context *on_finish) {
ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
ceph_assert(m_image_ctx.exclusive_lock == nullptr ||
m_image_ctx.exclusive_lock->is_lock_owner());
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << "flatten" << dendl;
if (m_image_ctx.read_only || m_image_ctx.operations_disabled) {
on_finish->complete(-EROFS);
return;
}
m_image_ctx.image_lock.lock_shared();
// can't flatten a non-clone
if (m_image_ctx.parent_md.spec.pool_id == -1) {
lderr(cct) << "image has no parent" << dendl;
m_image_ctx.image_lock.unlock_shared();
on_finish->complete(-EINVAL);
return;
}
if (m_image_ctx.snap_id != CEPH_NOSNAP) {
lderr(cct) << "snapshots cannot be flattened" << dendl;
m_image_ctx.image_lock.unlock_shared();
on_finish->complete(-EROFS);
return;
}
uint64_t crypto_header_objects = Striper::get_num_objects(
m_image_ctx.layout,
m_image_ctx.get_area_size(io::ImageArea::CRYPTO_HEADER));
uint64_t raw_overlap;
int r = m_image_ctx.get_parent_overlap(CEPH_NOSNAP, &raw_overlap);
ceph_assert(r == 0);
auto overlap = m_image_ctx.reduce_parent_overlap(raw_overlap, false);
uint64_t data_overlap_objects = Striper::get_num_objects(
m_image_ctx.layout,
(overlap.second == io::ImageArea::DATA ? overlap.first : 0));
m_image_ctx.image_lock.unlock_shared();
// leave encryption header flattening to format-specific handler
operation::FlattenRequest<I> *req = new operation::FlattenRequest<I>(
m_image_ctx, new C_NotifyUpdate<I>(m_image_ctx, on_finish),
crypto_header_objects, data_overlap_objects, prog_ctx);
req->send();
}
template <typename I>
int Operations<I>::rebuild_object_map(ProgressContext &prog_ctx) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 10) << "rebuild_object_map" << dendl;
int r = m_image_ctx.state->refresh_if_required();
if (r < 0) {
return r;
}
uint64_t request_id = util::reserve_async_request_id();
r = invoke_async_request(OPERATION_REBUILD_OBJECT_MAP,
exclusive_lock::OPERATION_REQUEST_TYPE_GENERAL, true,
boost::bind(&Operations<I>::execute_rebuild_object_map,
this, boost::ref(prog_ctx), _1),
boost::bind(&ImageWatcher<I>::notify_rebuild_object_map,
m_image_ctx.image_watcher, request_id,
boost::ref(prog_ctx), _1));
ldout(cct, 10) << "rebuild object map finished" << dendl;
if (r < 0) {
return r;
}
return 0;
}
template <typename I>
void Operations<I>::execute_rebuild_object_map(ProgressContext &prog_ctx,
Context *on_finish) {
ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
ceph_assert(m_image_ctx.exclusive_lock == nullptr ||
m_image_ctx.exclusive_lock->is_lock_owner());
CephContext *cct = m_image_ctx.cct;
ldout(cct, 5) << this << " " << __func__ << dendl;
if (m_image_ctx.read_only || m_image_ctx.operations_disabled) {
on_finish->complete(-EROFS);
return;
}
if (!m_image_ctx.test_features(RBD_FEATURE_OBJECT_MAP)) {
lderr(cct) << "image must support object-map feature" << dendl;
on_finish->complete(-EINVAL);
return;
}
operation::RebuildObjectMapRequest<I> *req =
new operation::RebuildObjectMapRequest<I>(
m_image_ctx, new C_NotifyUpdate<I>(m_image_ctx, on_finish), prog_ctx);
req->send();
}
template <typename I>
int Operations<I>::check_object_map(ProgressContext &prog_ctx) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 5) << this << " " << __func__ << dendl;
int r = m_image_ctx.state->refresh_if_required();
if (r < 0) {
return r;
}
r = invoke_async_request(OPERATION_CHECK_OBJECT_MAP,
exclusive_lock::OPERATION_REQUEST_TYPE_GENERAL, true,
boost::bind(&Operations<I>::check_object_map, this,
boost::ref(prog_ctx), _1),
[this](Context *c) {
m_image_ctx.op_work_queue->queue(c, -EOPNOTSUPP);
});
return r;
}
template <typename I>
void Operations<I>::object_map_iterate(ProgressContext &prog_ctx,
operation::ObjectIterateWork<I> handle_mismatch,
Context *on_finish) {
ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
ceph_assert(m_image_ctx.exclusive_lock == nullptr ||
m_image_ctx.exclusive_lock->is_lock_owner());
if (!m_image_ctx.test_features(RBD_FEATURE_OBJECT_MAP)) {
on_finish->complete(-EINVAL);
return;
}
operation::ObjectMapIterateRequest<I> *req =
new operation::ObjectMapIterateRequest<I>(m_image_ctx, on_finish,
prog_ctx, handle_mismatch);
req->send();
}
template <typename I>
void Operations<I>::check_object_map(ProgressContext &prog_ctx,
Context *on_finish) {
object_map_iterate(prog_ctx, needs_invalidate, on_finish);
}
template <typename I>
int Operations<I>::rename(const char *dstname) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 5) << this << " " << __func__ << ": dest_name=" << dstname
<< dendl;
int r = librbd::detect_format(m_image_ctx.md_ctx, dstname, NULL, NULL);
if (r < 0 && r != -ENOENT) {
lderr(cct) << "error checking for existing image called "
<< dstname << ":" << cpp_strerror(r) << dendl;
return r;
}
if (r == 0) {
lderr(cct) << "rbd image " << dstname << " already exists" << dendl;
return -EEXIST;
}
uint64_t request_id = util::reserve_async_request_id();
r = invoke_async_request(OPERATION_RENAME,
exclusive_lock::OPERATION_REQUEST_TYPE_GENERAL,
true,
boost::bind(&Operations<I>::execute_rename, this,
dstname, _1),
boost::bind(&ImageWatcher<I>::notify_rename,
m_image_ctx.image_watcher, request_id,
dstname, _1));
if (r < 0 && r != -EEXIST) {
return r;
}
m_image_ctx.set_image_name(dstname);
return 0;
}
template <typename I>
void Operations<I>::execute_rename(const std::string &dest_name,
Context *on_finish) {
ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
if (m_image_ctx.test_features(RBD_FEATURE_JOURNALING)) {
ceph_assert(m_image_ctx.exclusive_lock == nullptr ||
m_image_ctx.exclusive_lock->is_lock_owner());
}
if (m_image_ctx.operations_disabled) {
on_finish->complete(-EROFS);
return;
}
CephContext *cct = m_image_ctx.cct;
ldout(cct, 5) << this << " " << __func__ << ": dest_name=" << dest_name
<< dendl;
if (m_image_ctx.old_format) {
m_image_ctx.image_lock.lock_shared();
if (m_image_ctx.name == dest_name) {
m_image_ctx.image_lock.unlock_shared();
on_finish->complete(-EEXIST);
return;
}
m_image_ctx.image_lock.unlock_shared();
// unregister watch before and register back after rename
on_finish = new C_NotifyUpdate<I>(m_image_ctx, on_finish);
on_finish = new LambdaContext([this, on_finish](int r) {
if (m_image_ctx.old_format) {
m_image_ctx.image_watcher->set_oid(m_image_ctx.header_oid);
}
m_image_ctx.image_watcher->register_watch(on_finish);
});
on_finish = new LambdaContext([this, dest_name, on_finish](int r) {
std::shared_lock owner_locker{m_image_ctx.owner_lock};
operation::RenameRequest<I> *req = new operation::RenameRequest<I>(
m_image_ctx, on_finish, dest_name);
req->send();
});
m_image_ctx.image_watcher->unregister_watch(on_finish);
return;
}
operation::RenameRequest<I> *req = new operation::RenameRequest<I>(
m_image_ctx, on_finish, dest_name);
req->send();
}
template <typename I>
int Operations<I>::resize(uint64_t size, bool allow_shrink, ProgressContext& prog_ctx) {
CephContext *cct = m_image_ctx.cct;
m_image_ctx.image_lock.lock_shared();
uint64_t raw_size = io::util::area_to_raw_offset(m_image_ctx, size,
io::ImageArea::DATA);
ldout(cct, 5) << this << " " << __func__
<< ": size=" << size
<< " raw_size=" << m_image_ctx.size
<< " new_raw_size=" << raw_size << dendl;
m_image_ctx.image_lock.unlock_shared();
int r = m_image_ctx.state->refresh_if_required();
if (r < 0) {
return r;
}
if (m_image_ctx.test_features(RBD_FEATURE_OBJECT_MAP) &&
!ObjectMap<>::is_compatible(m_image_ctx.layout, raw_size)) {
lderr(cct) << "New size not compatible with object map" << dendl;
return -EINVAL;
}
uint64_t request_id = util::reserve_async_request_id();
r = invoke_async_request(OPERATION_RESIZE,
exclusive_lock::OPERATION_REQUEST_TYPE_GENERAL,
false,
boost::bind(&Operations<I>::execute_resize, this,
size, allow_shrink, boost::ref(prog_ctx), _1, 0),
boost::bind(&ImageWatcher<I>::notify_resize,
m_image_ctx.image_watcher, request_id,
size, allow_shrink, boost::ref(prog_ctx), _1));
m_image_ctx.perfcounter->inc(l_librbd_resize);
ldout(cct, 2) << "resize finished" << dendl;
return r;
}
template <typename I>
void Operations<I>::execute_resize(uint64_t size, bool allow_shrink, ProgressContext &prog_ctx,
Context *on_finish,
uint64_t journal_op_tid) {
ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
ceph_assert(m_image_ctx.exclusive_lock == nullptr ||
m_image_ctx.exclusive_lock->is_lock_owner());
CephContext *cct = m_image_ctx.cct;
m_image_ctx.image_lock.lock_shared();
uint64_t raw_size = io::util::area_to_raw_offset(m_image_ctx, size,
io::ImageArea::DATA);
ldout(cct, 5) << this << " " << __func__
<< ": size=" << size
<< " raw_size=" << m_image_ctx.size
<< " new_raw_size=" << raw_size << dendl;
if (m_image_ctx.snap_id != CEPH_NOSNAP || m_image_ctx.read_only ||
m_image_ctx.operations_disabled) {
m_image_ctx.image_lock.unlock_shared();
on_finish->complete(-EROFS);
return;
} else if (m_image_ctx.test_features(RBD_FEATURE_OBJECT_MAP,
m_image_ctx.image_lock) &&
!ObjectMap<>::is_compatible(m_image_ctx.layout, raw_size)) {
m_image_ctx.image_lock.unlock_shared();
on_finish->complete(-EINVAL);
return;
}
m_image_ctx.image_lock.unlock_shared();
operation::ResizeRequest<I> *req = new operation::ResizeRequest<I>(
m_image_ctx, new C_NotifyUpdate<I>(m_image_ctx, on_finish), raw_size,
allow_shrink, prog_ctx, journal_op_tid, false);
req->send();
}
template <typename I>
int Operations<I>::snap_create(const cls::rbd::SnapshotNamespace &snap_namespace,
const std::string& snap_name, uint64_t flags,
ProgressContext &prog_ctx) {
if (m_image_ctx.read_only) {
return -EROFS;
}
int r = m_image_ctx.state->refresh_if_required();
if (r < 0) {
return r;
}
C_SaferCond ctx;
snap_create(snap_namespace, snap_name, flags, prog_ctx, &ctx);
r = ctx.wait();
if (r < 0) {
return r;
}
m_image_ctx.perfcounter->inc(l_librbd_snap_create);
return r;
}
template <typename I>
void Operations<I>::snap_create(const cls::rbd::SnapshotNamespace &snap_namespace,
const std::string& snap_name, uint64_t flags,
ProgressContext &prog_ctx, Context *on_finish) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 5) << this << " " << __func__ << ": snap_name=" << snap_name
<< dendl;
if (m_image_ctx.read_only) {
on_finish->complete(-EROFS);
return;
}
m_image_ctx.image_lock.lock_shared();
if (m_image_ctx.get_snap_id(snap_namespace, snap_name) != CEPH_NOSNAP) {
m_image_ctx.image_lock.unlock_shared();
on_finish->complete(-EEXIST);
return;
}
m_image_ctx.image_lock.unlock_shared();
uint64_t request_id = util::reserve_async_request_id();
C_InvokeAsyncRequest<I> *req = new C_InvokeAsyncRequest<I>(
m_image_ctx, OPERATION_SNAP_CREATE,
exclusive_lock::OPERATION_REQUEST_TYPE_GENERAL, true,
boost::bind(&Operations<I>::execute_snap_create, this, snap_namespace, snap_name,
_1, 0, flags, boost::ref(prog_ctx)),
boost::bind(&ImageWatcher<I>::notify_snap_create, m_image_ctx.image_watcher,
request_id, snap_namespace, snap_name, flags,
boost::ref(prog_ctx), _1),
{-EEXIST}, on_finish);
req->send();
}
template <typename I>
void Operations<I>::execute_snap_create(const cls::rbd::SnapshotNamespace &snap_namespace,
const std::string &snap_name,
Context *on_finish,
uint64_t journal_op_tid,
uint64_t flags,
ProgressContext &prog_ctx) {
ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
ceph_assert(m_image_ctx.exclusive_lock == nullptr ||
m_image_ctx.exclusive_lock->is_lock_owner());
CephContext *cct = m_image_ctx.cct;
ldout(cct, 5) << this << " " << __func__ << ": snap_name=" << snap_name
<< dendl;
if (m_image_ctx.operations_disabled) {
on_finish->complete(-EROFS);
return;
}
m_image_ctx.image_lock.lock_shared();
if (m_image_ctx.get_snap_id(snap_namespace, snap_name) != CEPH_NOSNAP) {
m_image_ctx.image_lock.unlock_shared();
on_finish->complete(-EEXIST);
return;
}
m_image_ctx.image_lock.unlock_shared();
operation::SnapshotCreateRequest<I> *req =
new operation::SnapshotCreateRequest<I>(
m_image_ctx, new C_NotifyUpdate<I>(m_image_ctx, on_finish),
snap_namespace, snap_name, journal_op_tid, flags, prog_ctx);
req->send();
}
template <typename I>
int Operations<I>::snap_rollback(const cls::rbd::SnapshotNamespace& snap_namespace,
const std::string& snap_name,
ProgressContext& prog_ctx) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 5) << this << " " << __func__ << ": snap_name=" << snap_name
<< dendl;
int r = m_image_ctx.state->refresh_if_required();
if (r < 0)
return r;
C_SaferCond cond_ctx;
{
std::shared_lock owner_locker{m_image_ctx.owner_lock};
{
// need to drop image_lock before invalidating cache
std::shared_lock image_locker{m_image_ctx.image_lock};
if (!m_image_ctx.snap_exists) {
return -ENOENT;
}
if (m_image_ctx.snap_id != CEPH_NOSNAP || m_image_ctx.read_only) {
return -EROFS;
}
uint64_t snap_id = m_image_ctx.get_snap_id(snap_namespace, snap_name);
if (snap_id == CEPH_NOSNAP) {
lderr(cct) << "No such snapshot found." << dendl;
return -ENOENT;
}
}
r = prepare_image_update(exclusive_lock::OPERATION_REQUEST_TYPE_GENERAL,
false);
if (r < 0) {
return r;
}
Context *ctx = new LambdaContext(
[this, ctx=&cond_ctx](int r) {
m_image_ctx.operations->finish_op(OPERATION_SNAP_ROLLBACK, r);
ctx->complete(r);
});
ctx = new LambdaContext(
[this, snap_namespace, snap_name, &prog_ctx, ctx](int r) {
if (r < 0) {
ctx->complete(r);
return;
}
std::shared_lock l{m_image_ctx.owner_lock};
execute_snap_rollback(snap_namespace, snap_name, prog_ctx, ctx);
});
m_image_ctx.operations->start_op(OPERATION_SNAP_ROLLBACK, ctx);
}
r = cond_ctx.wait();
if (r < 0) {
return r;
}
m_image_ctx.perfcounter->inc(l_librbd_snap_rollback);
return r;
}
template <typename I>
void Operations<I>::execute_snap_rollback(const cls::rbd::SnapshotNamespace& snap_namespace,
const std::string &snap_name,
ProgressContext& prog_ctx,
Context *on_finish) {
ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
CephContext *cct = m_image_ctx.cct;
ldout(cct, 5) << this << " " << __func__ << ": snap_name=" << snap_name
<< dendl;
if (m_image_ctx.operations_disabled) {
on_finish->complete(-EROFS);
return;
}
m_image_ctx.image_lock.lock_shared();
uint64_t snap_id = m_image_ctx.get_snap_id(snap_namespace, snap_name);
if (snap_id == CEPH_NOSNAP) {
lderr(cct) << "No such snapshot found." << dendl;
m_image_ctx.image_lock.unlock_shared();
on_finish->complete(-ENOENT);
return;
}
uint64_t new_size = m_image_ctx.get_image_size(snap_id);
m_image_ctx.image_lock.unlock_shared();
// async mode used for journal replay
operation::SnapshotRollbackRequest<I> *request =
new operation::SnapshotRollbackRequest<I>(
m_image_ctx, new C_NotifyUpdate<I>(m_image_ctx, on_finish), snap_namespace, snap_name,
snap_id, new_size, prog_ctx);
request->send();
}
template <typename I>
int Operations<I>::snap_remove(const cls::rbd::SnapshotNamespace& snap_namespace,
const std::string& snap_name) {
if (m_image_ctx.read_only) {
return -EROFS;
}
int r = m_image_ctx.state->refresh_if_required();
if (r < 0) {
return r;
}
C_SaferCond ctx;
snap_remove(snap_namespace, snap_name, &ctx);
r = ctx.wait();
if (r < 0) {
return r;
}
m_image_ctx.perfcounter->inc(l_librbd_snap_remove);
return 0;
}
template <typename I>
void Operations<I>::snap_remove(const cls::rbd::SnapshotNamespace& snap_namespace,
const std::string& snap_name,
Context *on_finish) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 5) << this << " " << __func__ << ": snap_name=" << snap_name
<< dendl;
if (m_image_ctx.read_only) {
on_finish->complete(-EROFS);
return;
}
// quickly filter out duplicate ops
m_image_ctx.image_lock.lock_shared();
if (m_image_ctx.get_snap_id(snap_namespace, snap_name) == CEPH_NOSNAP) {
m_image_ctx.image_lock.unlock_shared();
on_finish->complete(-ENOENT);
return;
}
bool proxy_op = ((m_image_ctx.features & RBD_FEATURE_FAST_DIFF) != 0 ||
(m_image_ctx.features & RBD_FEATURE_JOURNALING) != 0);
m_image_ctx.image_lock.unlock_shared();
if (proxy_op) {
uint64_t request_id = util::reserve_async_request_id();
auto request_type = exclusive_lock::OPERATION_REQUEST_TYPE_GENERAL;
if (cls::rbd::get_snap_namespace_type(snap_namespace) ==
cls::rbd::SNAPSHOT_NAMESPACE_TYPE_TRASH) {
request_type = exclusive_lock::OPERATION_REQUEST_TYPE_TRASH_SNAP_REMOVE;
}
C_InvokeAsyncRequest<I> *req = new C_InvokeAsyncRequest<I>(
m_image_ctx, OPERATION_SNAP_REMOVE, request_type, true,
boost::bind(&Operations<I>::execute_snap_remove, this, snap_namespace,
snap_name, _1),
boost::bind(&ImageWatcher<I>::notify_snap_remove,
m_image_ctx.image_watcher, request_id, snap_namespace,
snap_name, _1),
{-ENOENT}, on_finish);
req->send();
} else {
std::shared_lock owner_lock{m_image_ctx.owner_lock};
execute_snap_remove(snap_namespace, snap_name, on_finish);
}
}
template <typename I>
void Operations<I>::execute_snap_remove(const cls::rbd::SnapshotNamespace& snap_namespace,
const std::string &snap_name,
Context *on_finish) {
ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
{
if ((m_image_ctx.features & RBD_FEATURE_FAST_DIFF) != 0) {
ceph_assert(m_image_ctx.exclusive_lock == nullptr ||
m_image_ctx.exclusive_lock->is_lock_owner());
}
}
CephContext *cct = m_image_ctx.cct;
ldout(cct, 5) << this << " " << __func__ << ": snap_name=" << snap_name
<< dendl;
if (m_image_ctx.operations_disabled) {
on_finish->complete(-EROFS);
return;
}
m_image_ctx.image_lock.lock_shared();
uint64_t snap_id = m_image_ctx.get_snap_id(snap_namespace, snap_name);
if (snap_id == CEPH_NOSNAP) {
lderr(m_image_ctx.cct) << "No such snapshot found." << dendl;
m_image_ctx.image_lock.unlock_shared();
on_finish->complete(-ENOENT);
return;
}
bool is_protected;
int r = m_image_ctx.is_snap_protected(snap_id, &is_protected);
if (r < 0) {
m_image_ctx.image_lock.unlock_shared();
on_finish->complete(r);
return;
} else if (is_protected) {
lderr(m_image_ctx.cct) << "snapshot is protected" << dendl;
m_image_ctx.image_lock.unlock_shared();
on_finish->complete(-EBUSY);
return;
}
m_image_ctx.image_lock.unlock_shared();
operation::SnapshotRemoveRequest<I> *req =
new operation::SnapshotRemoveRequest<I>(
m_image_ctx, new C_NotifyUpdate<I>(m_image_ctx, on_finish),
snap_namespace, snap_name, snap_id);
req->send();
}
template <typename I>
int Operations<I>::snap_rename(const char *srcname, const char *dstname) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 5) << this << " " << __func__ << ": "
<< "snap_name=" << srcname << ", "
<< "new_snap_name=" << dstname << dendl;
snapid_t snap_id;
if (m_image_ctx.read_only) {
return -EROFS;
}
int r = m_image_ctx.state->refresh_if_required();
if (r < 0)
return r;
{
std::shared_lock l{m_image_ctx.image_lock};
snap_id = m_image_ctx.get_snap_id(cls::rbd::UserSnapshotNamespace(), srcname);
if (snap_id == CEPH_NOSNAP) {
return -ENOENT;
}
if (m_image_ctx.get_snap_id(cls::rbd::UserSnapshotNamespace(), dstname) != CEPH_NOSNAP) {
return -EEXIST;
}
}
if (m_image_ctx.test_features(RBD_FEATURE_JOURNALING)) {
uint64_t request_id = util::reserve_async_request_id();
r = invoke_async_request(OPERATION_SNAP_RENAME,
exclusive_lock::OPERATION_REQUEST_TYPE_GENERAL,
true,
boost::bind(&Operations<I>::execute_snap_rename,
this, snap_id, dstname, _1),
boost::bind(&ImageWatcher<I>::notify_snap_rename,
m_image_ctx.image_watcher, request_id,
snap_id, dstname, _1));
if (r < 0 && r != -EEXIST) {
return r;
}
} else {
C_SaferCond cond_ctx;
{
std::shared_lock owner_lock{m_image_ctx.owner_lock};
execute_snap_rename(snap_id, dstname, &cond_ctx);
}
r = cond_ctx.wait();
if (r < 0) {
return r;
}
}
m_image_ctx.perfcounter->inc(l_librbd_snap_rename);
return 0;
}
template <typename I>
void Operations<I>::execute_snap_rename(const uint64_t src_snap_id,
const std::string &dest_snap_name,
Context *on_finish) {
ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
if ((m_image_ctx.features & RBD_FEATURE_JOURNALING) != 0) {
ceph_assert(m_image_ctx.exclusive_lock == nullptr ||
m_image_ctx.exclusive_lock->is_lock_owner());
}
if (m_image_ctx.operations_disabled) {
on_finish->complete(-EROFS);
return;
}
m_image_ctx.image_lock.lock_shared();
if (m_image_ctx.get_snap_id(cls::rbd::UserSnapshotNamespace(),
dest_snap_name) != CEPH_NOSNAP) {
// Renaming is supported for snapshots from user namespace only.
m_image_ctx.image_lock.unlock_shared();
on_finish->complete(-EEXIST);
return;
}
m_image_ctx.image_lock.unlock_shared();
CephContext *cct = m_image_ctx.cct;
ldout(cct, 5) << this << " " << __func__ << ": "
<< "snap_id=" << src_snap_id << ", "
<< "new_snap_name=" << dest_snap_name << dendl;
operation::SnapshotRenameRequest<I> *req =
new operation::SnapshotRenameRequest<I>(
m_image_ctx, new C_NotifyUpdate<I>(m_image_ctx, on_finish), src_snap_id,
dest_snap_name);
req->send();
}
template <typename I>
int Operations<I>::snap_protect(const cls::rbd::SnapshotNamespace& snap_namespace,
const std::string& snap_name) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 5) << this << " " << __func__ << ": snap_name=" << snap_name
<< dendl;
if (m_image_ctx.read_only) {
return -EROFS;
}
if (!m_image_ctx.test_features(RBD_FEATURE_LAYERING)) {
lderr(cct) << "image must support layering" << dendl;
return -ENOSYS;
}
int r = m_image_ctx.state->refresh_if_required();
if (r < 0) {
return r;
}
{
std::shared_lock image_locker{m_image_ctx.image_lock};
bool is_protected;
r = m_image_ctx.is_snap_protected(m_image_ctx.get_snap_id(snap_namespace, snap_name),
&is_protected);
if (r < 0) {
return r;
}
if (is_protected) {
return -EBUSY;
}
}
if (m_image_ctx.test_features(RBD_FEATURE_JOURNALING)) {
uint64_t request_id = util::reserve_async_request_id();
r = invoke_async_request(OPERATION_SNAP_PROTECT,
exclusive_lock::OPERATION_REQUEST_TYPE_GENERAL,
true,
boost::bind(&Operations<I>::execute_snap_protect,
this, snap_namespace, snap_name, _1),
boost::bind(&ImageWatcher<I>::notify_snap_protect,
m_image_ctx.image_watcher, request_id,
snap_namespace, snap_name, _1));
if (r < 0 && r != -EBUSY) {
return r;
}
} else {
C_SaferCond cond_ctx;
{
std::shared_lock owner_lock{m_image_ctx.owner_lock};
execute_snap_protect(snap_namespace, snap_name, &cond_ctx);
}
r = cond_ctx.wait();
if (r < 0) {
return r;
}
}
return 0;
}
template <typename I>
void Operations<I>::execute_snap_protect(const cls::rbd::SnapshotNamespace& snap_namespace,
const std::string &snap_name,
Context *on_finish) {
ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
if (m_image_ctx.test_features(RBD_FEATURE_JOURNALING)) {
ceph_assert(m_image_ctx.exclusive_lock == nullptr ||
m_image_ctx.exclusive_lock->is_lock_owner());
}
if (m_image_ctx.operations_disabled) {
on_finish->complete(-EROFS);
return;
}
m_image_ctx.image_lock.lock_shared();
bool is_protected;
int r = m_image_ctx.is_snap_protected(m_image_ctx.get_snap_id(snap_namespace, snap_name),
&is_protected);
if (r < 0) {
m_image_ctx.image_lock.unlock_shared();
on_finish->complete(r);
return;
} else if (is_protected) {
m_image_ctx.image_lock.unlock_shared();
on_finish->complete(-EBUSY);
return;
}
m_image_ctx.image_lock.unlock_shared();
CephContext *cct = m_image_ctx.cct;
ldout(cct, 5) << this << " " << __func__ << ": snap_name=" << snap_name
<< dendl;
operation::SnapshotProtectRequest<I> *request =
new operation::SnapshotProtectRequest<I>(
m_image_ctx, new C_NotifyUpdate<I>(m_image_ctx, on_finish), snap_namespace, snap_name);
request->send();
}
template <typename I>
int Operations<I>::snap_unprotect(const cls::rbd::SnapshotNamespace& snap_namespace,
const std::string& snap_name) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 5) << this << " " << __func__ << ": snap_name=" << snap_name
<< dendl;
if (m_image_ctx.read_only) {
return -EROFS;
}
int r = m_image_ctx.state->refresh_if_required();
if (r < 0) {
return r;
}
{
std::shared_lock image_locker{m_image_ctx.image_lock};
bool is_unprotected;
r = m_image_ctx.is_snap_unprotected(m_image_ctx.get_snap_id(snap_namespace, snap_name),
&is_unprotected);
if (r < 0) {
return r;
}
if (is_unprotected) {
return -EINVAL;
}
}
if (m_image_ctx.test_features(RBD_FEATURE_JOURNALING)) {
uint64_t request_id = util::reserve_async_request_id();
r = invoke_async_request(OPERATION_SNAP_UNPROTECT,
exclusive_lock::OPERATION_REQUEST_TYPE_GENERAL,
true,
boost::bind(&Operations<I>::execute_snap_unprotect,
this, snap_namespace, snap_name, _1),
boost::bind(&ImageWatcher<I>::notify_snap_unprotect,
m_image_ctx.image_watcher, request_id,
snap_namespace, snap_name, _1));
if (r < 0 && r != -EINVAL) {
return r;
}
} else {
C_SaferCond cond_ctx;
{
std::shared_lock owner_lock{m_image_ctx.owner_lock};
execute_snap_unprotect(snap_namespace, snap_name, &cond_ctx);
}
r = cond_ctx.wait();
if (r < 0) {
return r;
}
}
return 0;
}
template <typename I>
void Operations<I>::execute_snap_unprotect(const cls::rbd::SnapshotNamespace& snap_namespace,
const std::string &snap_name,
Context *on_finish) {
ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
if (m_image_ctx.test_features(RBD_FEATURE_JOURNALING)) {
ceph_assert(m_image_ctx.exclusive_lock == nullptr ||
m_image_ctx.exclusive_lock->is_lock_owner());
}
if (m_image_ctx.operations_disabled) {
on_finish->complete(-EROFS);
return;
}
m_image_ctx.image_lock.lock_shared();
bool is_unprotected;
int r = m_image_ctx.is_snap_unprotected(m_image_ctx.get_snap_id(snap_namespace, snap_name),
&is_unprotected);
if (r < 0) {
m_image_ctx.image_lock.unlock_shared();
on_finish->complete(r);
return;
} else if (is_unprotected) {
m_image_ctx.image_lock.unlock_shared();
on_finish->complete(-EINVAL);
return;
}
m_image_ctx.image_lock.unlock_shared();
CephContext *cct = m_image_ctx.cct;
ldout(cct, 5) << this << " " << __func__ << ": snap_name=" << snap_name
<< dendl;
operation::SnapshotUnprotectRequest<I> *request =
new operation::SnapshotUnprotectRequest<I>(
m_image_ctx, new C_NotifyUpdate<I>(m_image_ctx, on_finish), snap_namespace, snap_name);
request->send();
}
template <typename I>
int Operations<I>::snap_set_limit(uint64_t limit) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 5) << this << " " << __func__ << ": limit=" << limit << dendl;
if (m_image_ctx.read_only) {
return -EROFS;
}
int r = m_image_ctx.state->refresh_if_required();
if (r < 0) {
return r;
}
C_SaferCond limit_ctx;
{
std::shared_lock owner_lock{m_image_ctx.owner_lock};
r = prepare_image_update(exclusive_lock::OPERATION_REQUEST_TYPE_GENERAL,
true);
if (r < 0) {
return r;
}
execute_snap_set_limit(limit, &limit_ctx);
}
r = limit_ctx.wait();
return r;
}
template <typename I>
void Operations<I>::execute_snap_set_limit(const uint64_t limit,
Context *on_finish) {
ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
CephContext *cct = m_image_ctx.cct;
ldout(cct, 5) << this << " " << __func__ << ": limit=" << limit
<< dendl;
operation::SnapshotLimitRequest<I> *request =
new operation::SnapshotLimitRequest<I>(m_image_ctx, on_finish, limit);
request->send();
}
template <typename I>
int Operations<I>::update_features(uint64_t features, bool enabled) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 5) << this << " " << __func__ << ": features=" << features
<< ", enabled=" << enabled << dendl;
int r = m_image_ctx.state->refresh_if_required();
if (r < 0) {
return r;
}
if (m_image_ctx.read_only) {
return -EROFS;
} else if (m_image_ctx.old_format) {
lderr(cct) << "old-format images do not support features" << dendl;
return -EINVAL;
}
uint64_t disable_mask = (RBD_FEATURES_MUTABLE |
RBD_FEATURES_DISABLE_ONLY);
if ((enabled && (features & RBD_FEATURES_MUTABLE) != features) ||
(!enabled && (features & disable_mask) != features) ||
((features & ~RBD_FEATURES_MUTABLE_INTERNAL) != features)) {
lderr(cct) << "cannot update immutable features" << dendl;
return -EINVAL;
}
bool set_object_map = (features & RBD_FEATURE_OBJECT_MAP) == RBD_FEATURE_OBJECT_MAP;
bool set_fast_diff = (features & RBD_FEATURE_FAST_DIFF) == RBD_FEATURE_FAST_DIFF;
bool exist_fast_diff = (m_image_ctx.features & RBD_FEATURE_FAST_DIFF) != 0;
bool exist_object_map = (m_image_ctx.features & RBD_FEATURE_OBJECT_MAP) != 0;
if ((enabled && ((set_object_map && !exist_fast_diff) || (set_fast_diff && !exist_object_map)))
|| (!enabled && (set_object_map && exist_fast_diff))) {
features |= (RBD_FEATURE_OBJECT_MAP | RBD_FEATURE_FAST_DIFF);
}
if (features == 0) {
lderr(cct) << "update requires at least one feature" << dendl;
return -EINVAL;
}
{
std::shared_lock image_locker{m_image_ctx.image_lock};
if (enabled && (features & m_image_ctx.features) != 0) {
lderr(cct) << "one or more requested features are already enabled"
<< dendl;
return -EINVAL;
}
if (!enabled && (features & ~m_image_ctx.features) != 0) {
lderr(cct) << "one or more requested features are already disabled"
<< dendl;
return -EINVAL;
}
}
// if disabling journaling, avoid attempting to open the journal
// when acquiring the exclusive lock in case the journal is corrupt
bool disabling_journal = false;
if (!enabled && ((features & RBD_FEATURE_JOURNALING) != 0)) {
std::unique_lock image_locker{m_image_ctx.image_lock};
m_image_ctx.set_journal_policy(new journal::DisabledPolicy());
disabling_journal = true;
}
BOOST_SCOPE_EXIT_ALL( (this)(disabling_journal) ) {
if (disabling_journal) {
std::unique_lock image_locker{m_image_ctx.image_lock};
m_image_ctx.set_journal_policy(
new journal::StandardPolicy<I>(&m_image_ctx));
}
};
// The journal options are not passed to the lock owner in the
// update features request. Therefore, if journaling is being
// enabled, the lock should be locally acquired instead of
// attempting to send the request to the peer.
if (enabled && (features & RBD_FEATURE_JOURNALING) != 0) {
C_SaferCond cond_ctx;
{
std::shared_lock owner_lock{m_image_ctx.owner_lock};
r = prepare_image_update(exclusive_lock::OPERATION_REQUEST_TYPE_GENERAL,
true);
if (r < 0) {
return r;
}
execute_update_features(features, enabled, &cond_ctx, 0);
}
r = cond_ctx.wait();
} else {
uint64_t request_id = util::reserve_async_request_id();
r = invoke_async_request(OPERATION_UPDATE_FEATURES,
exclusive_lock::OPERATION_REQUEST_TYPE_GENERAL,
false,
boost::bind(&Operations<I>::execute_update_features,
this, features, enabled, _1, 0),
boost::bind(&ImageWatcher<I>::notify_update_features,
m_image_ctx.image_watcher, request_id,
features, enabled, _1));
}
ldout(cct, 2) << "update_features finished" << dendl;
return r;
}
template <typename I>
void Operations<I>::execute_update_features(uint64_t features, bool enabled,
Context *on_finish,
uint64_t journal_op_tid) {
ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
ceph_assert(m_image_ctx.exclusive_lock == nullptr ||
m_image_ctx.exclusive_lock->is_lock_owner());
CephContext *cct = m_image_ctx.cct;
ldout(cct, 5) << this << " " << __func__ << ": features=" << features
<< ", enabled=" << enabled << dendl;
if (m_image_ctx.operations_disabled) {
on_finish->complete(-EROFS);
return;
}
if (enabled) {
operation::EnableFeaturesRequest<I> *req =
new operation::EnableFeaturesRequest<I>(
m_image_ctx, on_finish, journal_op_tid, features);
req->send();
} else {
operation::DisableFeaturesRequest<I> *req =
new operation::DisableFeaturesRequest<I>(
m_image_ctx, on_finish, journal_op_tid, features, false);
req->send();
}
}
template <typename I>
int Operations<I>::metadata_set(const std::string &key,
const std::string &value) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 5) << this << " " << __func__ << ": key=" << key << ", value="
<< value << dendl;
std::string config_key;
bool config_override = util::is_metadata_config_override(key, &config_key);
if (config_override) {
// validate config setting
if (!librbd::api::Config<I>::is_option_name(&m_image_ctx, config_key)) {
lderr(cct) << "validation for " << key
<< " failed: not allowed image level override" << dendl;
return -EINVAL;
}
int r = ConfigProxy{false}.set_val(config_key.c_str(), value);
if (r < 0) {
return r;
}
}
int r = m_image_ctx.state->refresh_if_required();
if (r < 0) {
return r;
}
if (m_image_ctx.read_only) {
return -EROFS;
}
uint64_t request_id = util::reserve_async_request_id();
r = invoke_async_request(OPERATION_METADATA_UPDATE,
exclusive_lock::OPERATION_REQUEST_TYPE_GENERAL,
false,
boost::bind(&Operations<I>::execute_metadata_set,
this, key, value, _1),
boost::bind(&ImageWatcher<I>::notify_metadata_set,
m_image_ctx.image_watcher, request_id,
key, value, _1));
if (config_override && r >= 0) {
// apply new config key immediately
r = m_image_ctx.state->refresh_if_required();
}
ldout(cct, 20) << "metadata_set finished" << dendl;
return r;
}
template <typename I>
void Operations<I>::execute_metadata_set(const std::string &key,
const std::string &value,
Context *on_finish) {
ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
CephContext *cct = m_image_ctx.cct;
ldout(cct, 5) << this << " " << __func__ << ": key=" << key << ", value="
<< value << dendl;
if (m_image_ctx.operations_disabled) {
on_finish->complete(-EROFS);
return;
}
operation::MetadataSetRequest<I> *request =
new operation::MetadataSetRequest<I>(m_image_ctx,
new C_NotifyUpdate<I>(m_image_ctx, on_finish),
key, value);
request->send();
}
template <typename I>
int Operations<I>::metadata_remove(const std::string &key) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 5) << this << " " << __func__ << ": key=" << key << dendl;
int r = m_image_ctx.state->refresh_if_required();
if (r < 0) {
return r;
}
if (m_image_ctx.read_only) {
return -EROFS;
}
std::string value;
r = cls_client::metadata_get(&m_image_ctx.md_ctx, m_image_ctx.header_oid, key, &value);
if(r < 0)
return r;
uint64_t request_id = util::reserve_async_request_id();
r = invoke_async_request(OPERATION_METADATA_UPDATE,
exclusive_lock::OPERATION_REQUEST_TYPE_GENERAL,
false,
boost::bind(&Operations<I>::execute_metadata_remove,
this, key, _1),
boost::bind(&ImageWatcher<I>::notify_metadata_remove,
m_image_ctx.image_watcher, request_id,
key, _1));
if (r == -ENOENT) {
r = 0;
}
std::string config_key;
if (util::is_metadata_config_override(key, &config_key) && r >= 0) {
// apply new config key immediately
r = m_image_ctx.state->refresh_if_required();
}
ldout(cct, 20) << "metadata_remove finished" << dendl;
return r;
}
template <typename I>
void Operations<I>::execute_metadata_remove(const std::string &key,
Context *on_finish) {
ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
CephContext *cct = m_image_ctx.cct;
ldout(cct, 5) << this << " " << __func__ << ": key=" << key << dendl;
if (m_image_ctx.operations_disabled) {
on_finish->complete(-EROFS);
return;
}
operation::MetadataRemoveRequest<I> *request =
new operation::MetadataRemoveRequest<I>(
m_image_ctx,
new C_NotifyUpdate<I>(m_image_ctx, on_finish), key);
request->send();
}
template <typename I>
int Operations<I>::migrate(ProgressContext &prog_ctx) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << "migrate" << dendl;
int r = m_image_ctx.state->refresh_if_required();
if (r < 0) {
return r;
}
if (m_image_ctx.read_only) {
return -EROFS;
}
{
std::shared_lock image_locker{m_image_ctx.image_lock};
if (m_image_ctx.migration_info.empty()) {
lderr(cct) << "image has no migrating parent" << dendl;
return -EINVAL;
}
}
uint64_t request_id = util::reserve_async_request_id();
r = invoke_async_request(OPERATION_MIGRATE,
exclusive_lock::OPERATION_REQUEST_TYPE_GENERAL,
false,
boost::bind(&Operations<I>::execute_migrate, this,
boost::ref(prog_ctx), _1),
boost::bind(&ImageWatcher<I>::notify_migrate,
m_image_ctx.image_watcher, request_id,
boost::ref(prog_ctx), _1));
if (r < 0 && r != -EINVAL) {
return r;
}
ldout(cct, 20) << "migrate finished" << dendl;
return 0;
}
template <typename I>
void Operations<I>::execute_migrate(ProgressContext &prog_ctx,
Context *on_finish) {
ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
ceph_assert(m_image_ctx.exclusive_lock == nullptr ||
m_image_ctx.exclusive_lock->is_lock_owner());
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << "migrate" << dendl;
if (m_image_ctx.read_only || m_image_ctx.operations_disabled) {
on_finish->complete(-EROFS);
return;
}
m_image_ctx.image_lock.lock_shared();
if (m_image_ctx.migration_info.empty()) {
lderr(cct) << "image has no migrating parent" << dendl;
m_image_ctx.image_lock.unlock_shared();
on_finish->complete(-EINVAL);
return;
}
if (m_image_ctx.snap_id != CEPH_NOSNAP) {
lderr(cct) << "snapshots cannot be migrated" << dendl;
m_image_ctx.image_lock.unlock_shared();
on_finish->complete(-EROFS);
return;
}
m_image_ctx.image_lock.unlock_shared();
operation::MigrateRequest<I> *req = new operation::MigrateRequest<I>(
m_image_ctx, new C_NotifyUpdate<I>(m_image_ctx, on_finish), prog_ctx);
req->send();
}
template <typename I>
int Operations<I>::sparsify(size_t sparse_size, ProgressContext &prog_ctx) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << "sparsify" << dendl;
if (sparse_size < 4096 || sparse_size > m_image_ctx.get_object_size() ||
(sparse_size & (sparse_size - 1)) != 0) {
lderr(cct) << "sparse size should be power of two not less than 4096"
<< " and not larger image object size" << dendl;
return -EINVAL;
}
uint64_t request_id = util::reserve_async_request_id();
int r = invoke_async_request(OPERATION_SPARSIFY,
exclusive_lock::OPERATION_REQUEST_TYPE_GENERAL,
false,
boost::bind(&Operations<I>::execute_sparsify,
this, sparse_size,
boost::ref(prog_ctx), _1),
boost::bind(&ImageWatcher<I>::notify_sparsify,
m_image_ctx.image_watcher,
request_id, sparse_size,
boost::ref(prog_ctx), _1));
if (r < 0 && r != -EINVAL) {
return r;
}
ldout(cct, 20) << "resparsify finished" << dendl;
return 0;
}
template <typename I>
void Operations<I>::execute_sparsify(size_t sparse_size,
ProgressContext &prog_ctx,
Context *on_finish) {
ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
ceph_assert(m_image_ctx.exclusive_lock == nullptr ||
m_image_ctx.exclusive_lock->is_lock_owner());
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << "sparsify" << dendl;
if (m_image_ctx.operations_disabled) {
on_finish->complete(-EROFS);
return;
}
auto req = new operation::SparsifyRequest<I>(
m_image_ctx, sparse_size, new C_NotifyUpdate<I>(m_image_ctx, on_finish),
prog_ctx);
req->send();
}
template <typename I>
int Operations<I>::prepare_image_update(
exclusive_lock::OperationRequestType request_type, bool request_lock) {
ceph_assert(ceph_mutex_is_rlocked(m_image_ctx.owner_lock));
if (m_image_ctx.image_watcher == nullptr) {
return -EROFS;
}
// need to upgrade to a write lock
C_SaferCond ctx;
m_image_ctx.owner_lock.unlock_shared();
bool attempting_lock = false;
{
std::unique_lock owner_locker{m_image_ctx.owner_lock};
if (m_image_ctx.exclusive_lock != nullptr &&
(!m_image_ctx.exclusive_lock->is_lock_owner() ||
!m_image_ctx.exclusive_lock->accept_request(request_type, nullptr))) {
attempting_lock = true;
m_image_ctx.exclusive_lock->block_requests(0);
if (request_lock) {
m_image_ctx.exclusive_lock->acquire_lock(&ctx);
} else {
m_image_ctx.exclusive_lock->try_acquire_lock(&ctx);
}
}
}
int r = 0;
if (attempting_lock) {
r = ctx.wait();
}
m_image_ctx.owner_lock.lock_shared();
if (attempting_lock && m_image_ctx.exclusive_lock != nullptr) {
m_image_ctx.exclusive_lock->unblock_requests();
}
if (r == -EAGAIN || r == -EBUSY) {
r = 0;
}
if (r < 0) {
return r;
} else if (m_image_ctx.exclusive_lock != nullptr &&
!m_image_ctx.exclusive_lock->is_lock_owner()) {
return m_image_ctx.exclusive_lock->get_unlocked_op_error();
}
return 0;
}
template <typename I>
int Operations<I>::invoke_async_request(
Operation op, exclusive_lock::OperationRequestType request_type,
bool permit_snapshot, const boost::function<void(Context*)>& local_request,
const boost::function<void(Context*)>& remote_request) {
C_SaferCond ctx;
C_InvokeAsyncRequest<I> *req = new C_InvokeAsyncRequest<I>(m_image_ctx, op,
request_type,
permit_snapshot,
local_request,
remote_request,
{}, &ctx);
req->send();
return ctx.wait();
}
} // namespace librbd
template class librbd::Operations<librbd::ImageCtx>;
| 62,994 | 31.388175 | 97 |
cc
|
null |
ceph-main/src/librbd/Operations.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_OPERATIONS_H
#define CEPH_LIBRBD_OPERATIONS_H
#include "cls/rbd/cls_rbd_types.h"
#include "include/int_types.h"
#include "librbd/exclusive_lock/Policy.h"
#include "librbd/operation/ObjectMapIterate.h"
#include <atomic>
#include <string>
#include <list>
#include <map>
#include <set>
#include <boost/function.hpp>
class Context;
namespace librbd {
class ImageCtx;
class ProgressContext;
enum Operation {
OPERATION_CHECK_OBJECT_MAP,
OPERATION_FLATTEN,
OPERATION_METADATA_UPDATE,
OPERATION_MIGRATE,
OPERATION_REBUILD_OBJECT_MAP,
OPERATION_RENAME,
OPERATION_RESIZE,
OPERATION_SNAP_CREATE,
OPERATION_SNAP_PROTECT,
OPERATION_SNAP_REMOVE,
OPERATION_SNAP_RENAME,
OPERATION_SNAP_ROLLBACK,
OPERATION_SNAP_UNPROTECT,
OPERATION_SPARSIFY,
OPERATION_UPDATE_FEATURES,
};
template <typename ImageCtxT = ImageCtx>
class Operations {
public:
Operations(ImageCtxT &image_ctx);
void start_op(enum Operation op, Context *ctx);
void finish_op(enum Operation op, int r);
int flatten(ProgressContext &prog_ctx);
void execute_flatten(ProgressContext &prog_ctx, Context *on_finish);
int rebuild_object_map(ProgressContext &prog_ctx);
void execute_rebuild_object_map(ProgressContext &prog_ctx,
Context *on_finish);
int check_object_map(ProgressContext &prog_ctx);
void check_object_map(ProgressContext &prog_ctx, Context *on_finish);
void object_map_iterate(ProgressContext &prog_ctx,
operation::ObjectIterateWork<ImageCtxT> handle_mismatch,
Context* on_finish);
int rename(const char *dstname);
void execute_rename(const std::string &dest_name, Context *on_finish);
int resize(uint64_t size, bool allow_shrink, ProgressContext& prog_ctx);
void execute_resize(uint64_t size, bool allow_shrink, ProgressContext &prog_ctx,
Context *on_finish, uint64_t journal_op_tid);
int snap_create(const cls::rbd::SnapshotNamespace &snap_namespace,
const std::string& snap_name, uint64_t flags,
ProgressContext& prog_ctx);
void snap_create(const cls::rbd::SnapshotNamespace &snap_namespace,
const std::string& snap_name, uint64_t flags,
ProgressContext& prog_ctx, Context *on_finish);
void execute_snap_create(const cls::rbd::SnapshotNamespace &snap_namespace,
const std::string &snap_name, Context *on_finish,
uint64_t journal_op_tid, uint64_t flags,
ProgressContext &prog_ctx);
int snap_rollback(const cls::rbd::SnapshotNamespace& snap_namespace,
const std::string& snap_name,
ProgressContext& prog_ctx);
void execute_snap_rollback(const cls::rbd::SnapshotNamespace& snap_namespace,
const std::string &snap_name,
ProgressContext& prog_ctx, Context *on_finish);
int snap_remove(const cls::rbd::SnapshotNamespace& snap_namespace,
const std::string& snap_name);
void snap_remove(const cls::rbd::SnapshotNamespace& snap_namespace,
const std::string& snap_name,
Context *on_finish);
void execute_snap_remove(const cls::rbd::SnapshotNamespace& snap_namespace,
const std::string &snap_name,
Context *on_finish);
int snap_rename(const char *srcname, const char *dstname);
void execute_snap_rename(const uint64_t src_snap_id,
const std::string &dest_snap_name,
Context *on_finish);
int snap_protect(const cls::rbd::SnapshotNamespace& snap_namespace,
const std::string& snap_name);
void execute_snap_protect(const cls::rbd::SnapshotNamespace& snap_namespace,
const std::string &snap_name,
Context *on_finish);
int snap_unprotect(const cls::rbd::SnapshotNamespace& snap_namespace,
const std::string& snap_name);
void execute_snap_unprotect(const cls::rbd::SnapshotNamespace& snap_namespace,
const std::string &snap_name,
Context *on_finish);
int snap_set_limit(uint64_t limit);
void execute_snap_set_limit(uint64_t limit, Context *on_finish);
int update_features(uint64_t features, bool enabled);
void execute_update_features(uint64_t features, bool enabled,
Context *on_finish, uint64_t journal_op_tid);
int metadata_set(const std::string &key, const std::string &value);
void execute_metadata_set(const std::string &key, const std::string &value,
Context *on_finish);
int metadata_remove(const std::string &key);
void execute_metadata_remove(const std::string &key, Context *on_finish);
int migrate(ProgressContext &prog_ctx);
void execute_migrate(ProgressContext &prog_ctx, Context *on_finish);
int sparsify(size_t sparse_size, ProgressContext &prog_ctx);
void execute_sparsify(size_t sparse_size, ProgressContext &prog_ctx,
Context *on_finish);
int prepare_image_update(exclusive_lock::OperationRequestType request_type,
bool request_lock);
private:
ImageCtxT &m_image_ctx;
mutable ceph::mutex m_queue_lock;
std::set<Operation> m_in_flight_ops;
std::map<Operation, std::list<Context *>> m_queued_ops;
int invoke_async_request(Operation op,
exclusive_lock::OperationRequestType request_type,
bool permit_snapshot,
const boost::function<void(Context*)>& local,
const boost::function<void(Context*)>& remote);
};
} // namespace librbd
extern template class librbd::Operations<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_OPERATIONS_H
| 5,716 | 34.955975 | 82 |
h
|
null |
ceph-main/src/librbd/PluginRegistry.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/PluginRegistry.h"
#include "include/Context.h"
#include "common/dout.h"
#include "librbd/cache/ImageWriteback.h"
#include "librbd/ImageCtx.h"
#include "librbd/plugin/Api.h"
#include <boost/tokenizer.hpp>
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::PluginRegistry: " \
<< this << " " << __func__ << ": "
namespace librbd {
template <typename I>
PluginRegistry<I>::PluginRegistry(I* image_ctx)
: m_image_ctx(image_ctx), m_plugin_api(std::make_unique<plugin::Api<I>>()),
m_image_writeback(std::make_unique<cache::ImageWriteback<I>>(*image_ctx)) {
}
template <typename I>
PluginRegistry<I>::~PluginRegistry() {
}
template <typename I>
void PluginRegistry<I>::init(const std::string& plugins, Context* on_finish) {
auto cct = m_image_ctx->cct;
auto plugin_registry = cct->get_plugin_registry();
auto gather_ctx = new C_Gather(cct, on_finish);
boost::tokenizer<boost::escaped_list_separator<char>> tokenizer(plugins);
for (auto token : tokenizer) {
ldout(cct, 5) << "attempting to load plugin: " << token << dendl;
auto ctx = gather_ctx->new_sub();
auto plugin = dynamic_cast<plugin::Interface<I>*>(
plugin_registry->get_with_load("librbd", "librbd_" + token));
if (plugin == nullptr) {
lderr(cct) << "failed to load plugin: " << token << dendl;
ctx->complete(-ENOSYS);
break;
}
plugin->init(
m_image_ctx, *m_plugin_api, *m_image_writeback, m_plugin_hook_points, ctx);
}
gather_ctx->activate();
}
template <typename I>
void PluginRegistry<I>::acquired_exclusive_lock(Context* on_finish) {
auto cct = m_image_ctx->cct;
ldout(cct, 20) << dendl;
auto gather_ctx = new C_Gather(cct, on_finish);
for (auto &hook : m_plugin_hook_points) {
auto ctx = gather_ctx->new_sub();
hook->acquired_exclusive_lock(ctx);
}
gather_ctx->activate();
}
template <typename I>
void PluginRegistry<I>::prerelease_exclusive_lock(Context* on_finish) {
auto cct = m_image_ctx->cct;
ldout(cct, 20) << dendl;
auto gather_ctx = new C_Gather(cct, on_finish);
for (auto &hook : m_plugin_hook_points) {
auto ctx = gather_ctx->new_sub();
hook->prerelease_exclusive_lock(ctx);
}
gather_ctx->activate();
}
template <typename I>
void PluginRegistry<I>::discard(Context* on_finish) {
auto cct = m_image_ctx->cct;
ldout(cct, 20) << dendl;
auto gather_ctx = new C_Gather(cct, on_finish);
for (auto &hook : m_plugin_hook_points) {
auto ctx = gather_ctx->new_sub();
hook->discard(ctx);
}
gather_ctx->activate();
}
} // namespace librbd
template class librbd::PluginRegistry<librbd::ImageCtx>;
| 2,788 | 26.343137 | 79 |
cc
|
null |
ceph-main/src/librbd/PluginRegistry.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_PLUGIN_REGISTRY_H
#define CEPH_LIBRBD_PLUGIN_REGISTRY_H
#include "librbd/plugin/Types.h"
#include <memory>
#include <string>
#include <list>
struct Context;
namespace librbd {
struct ImageCtx;
namespace cache {
class ImageWritebackInterface;
}
namespace plugin { template <typename> struct Api; }
template <typename ImageCtxT>
class PluginRegistry {
public:
PluginRegistry(ImageCtxT* image_ctx);
~PluginRegistry();
void init(const std::string& plugins, Context* on_finish);
void acquired_exclusive_lock(Context* on_finish);
void prerelease_exclusive_lock(Context* on_finish);
void discard(Context* on_finish);
private:
ImageCtxT* m_image_ctx;
std::unique_ptr<plugin::Api<ImageCtxT>> m_plugin_api;
std::unique_ptr<cache::ImageWritebackInterface> m_image_writeback;
std::string m_plugins;
plugin::PluginHookPoints m_plugin_hook_points;
};
} // namespace librbd
extern template class librbd::PluginRegistry<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_PLUGIN_REGISTRY_H
| 1,120 | 20.557692 | 70 |
h
|
null |
ceph-main/src/librbd/TaskFinisher.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef LIBRBD_TASK_FINISHER_H
#define LIBRBD_TASK_FINISHER_H
#include "include/common_fwd.h"
#include "include/Context.h"
#include "common/ceph_context.h"
#include "common/Finisher.h"
#include "common/ceph_mutex.h"
#include "common/Timer.h"
#include <map>
#include <utility>
namespace librbd {
struct TaskFinisherSingleton {
ceph::mutex m_lock = ceph::make_mutex("librbd::TaskFinisher::m_lock");
SafeTimer *m_safe_timer;
Finisher *m_finisher;
static TaskFinisherSingleton& get_singleton(CephContext* cct) {
return cct->lookup_or_create_singleton_object<
TaskFinisherSingleton>("librbd::TaskFinisherSingleton", false, cct);
}
explicit TaskFinisherSingleton(CephContext *cct) {
m_safe_timer = new SafeTimer(cct, m_lock, false);
m_safe_timer->init();
m_finisher = new Finisher(cct, "librbd::TaskFinisher::m_finisher", "taskfin_librbd");
m_finisher->start();
}
virtual ~TaskFinisherSingleton() {
{
std::lock_guard l{m_lock};
m_safe_timer->shutdown();
delete m_safe_timer;
}
m_finisher->wait_for_empty();
m_finisher->stop();
delete m_finisher;
}
void queue(Context* ctx, int r) {
m_finisher->queue(ctx, r);
}
};
template <typename Task>
class TaskFinisher {
public:
TaskFinisher(CephContext &cct) : m_cct(cct) {
auto& singleton = TaskFinisherSingleton::get_singleton(&cct);
m_lock = &singleton.m_lock;
m_safe_timer = singleton.m_safe_timer;
m_finisher = singleton.m_finisher;
}
bool cancel(const Task& task) {
std::lock_guard l{*m_lock};
typename TaskContexts::iterator it = m_task_contexts.find(task);
if (it == m_task_contexts.end()) {
return false;
}
it->second.first->complete(-ECANCELED);
m_safe_timer->cancel_event(it->second.second);
m_task_contexts.erase(it);
return true;
}
void cancel_all() {
std::lock_guard l{*m_lock};
for (auto &[task, pair] : m_task_contexts) {
pair.first->complete(-ECANCELED);
m_safe_timer->cancel_event(pair.second);
}
m_task_contexts.clear();
}
bool add_event_after(const Task& task, double seconds, Context *ctx) {
std::lock_guard l{*m_lock};
if (m_task_contexts.count(task) != 0) {
// task already scheduled on finisher or timer
delete ctx;
return false;
}
C_Task *timer_ctx = new C_Task(this, task);
m_task_contexts[task] = std::make_pair(ctx, timer_ctx);
m_safe_timer->add_event_after(seconds, timer_ctx);
return true;
}
bool reschedule_event_after(const Task& task, double seconds) {
std::lock_guard l{*m_lock};
auto it = m_task_contexts.find(task);
if (it == m_task_contexts.end()) {
return false;
}
bool canceled = m_safe_timer->cancel_event(it->second.second);
if (!canceled) {
return false;
}
auto timer_ctx = new C_Task(this, task);
it->second.second = timer_ctx;
m_safe_timer->add_event_after(seconds, timer_ctx);
return true;
}
void queue(Context *ctx, int r = 0) {
m_finisher->queue(ctx, r);
}
bool queue(const Task& task, Context *ctx) {
std::lock_guard l{*m_lock};
typename TaskContexts::iterator it = m_task_contexts.find(task);
if (it != m_task_contexts.end()) {
if (it->second.second != NULL &&
m_safe_timer->cancel_event(it->second.second)) {
it->second.first->complete(-ECANCELED);
} else {
// task already scheduled on the finisher
ctx->complete(-ECANCELED);
return false;
}
}
m_task_contexts[task] = std::make_pair(ctx, reinterpret_cast<Context *>(0));
m_finisher->queue(new C_Task(this, task));
return true;
}
private:
class C_Task : public Context {
public:
C_Task(TaskFinisher *task_finisher, const Task& task)
: m_task_finisher(task_finisher), m_task(task)
{
}
protected:
void finish(int r) override {
m_task_finisher->complete(m_task);
}
private:
TaskFinisher *m_task_finisher;
Task m_task;
};
CephContext &m_cct;
ceph::mutex *m_lock;
Finisher *m_finisher;
SafeTimer *m_safe_timer;
typedef std::map<Task, std::pair<Context *, Context *> > TaskContexts;
TaskContexts m_task_contexts;
void complete(const Task& task) {
Context *ctx = NULL;
{
std::lock_guard l{*m_lock};
typename TaskContexts::iterator it = m_task_contexts.find(task);
if (it != m_task_contexts.end()) {
ctx = it->second.first;
m_task_contexts.erase(it);
}
}
if (ctx != NULL) {
ctx->complete(0);
}
}
};
} // namespace librbd
#endif // LIBRBD_TASK_FINISHER
| 4,727 | 25.266667 | 89 |
h
|
null |
ceph-main/src/librbd/TrashWatcher.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/TrashWatcher.h"
#include "include/rbd_types.h"
#include "include/rados/librados.hpp"
#include "common/errno.h"
#include "librbd/Utils.h"
#include "librbd/watcher/Utils.h"
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::TrashWatcher: " << __func__ << ": "
namespace librbd {
using namespace trash_watcher;
using namespace watcher;
using librbd::util::create_rados_callback;
namespace {
static const uint64_t NOTIFY_TIMEOUT_MS = 5000;
} // anonymous namespace
template <typename I>
TrashWatcher<I>::TrashWatcher(librados::IoCtx &io_ctx,
asio::ContextWQ *work_queue)
: Watcher(io_ctx, work_queue, RBD_TRASH) {
}
template <typename I>
void TrashWatcher<I>::notify_image_added(
librados::IoCtx &io_ctx, const std::string& image_id,
const cls::rbd::TrashImageSpec& trash_image_spec, Context *on_finish) {
CephContext *cct = reinterpret_cast<CephContext*>(io_ctx.cct());
ldout(cct, 20) << dendl;
bufferlist bl;
encode(NotifyMessage{ImageAddedPayload{image_id, trash_image_spec}}, bl);
librados::AioCompletion *comp = create_rados_callback(on_finish);
int r = io_ctx.aio_notify(RBD_TRASH, comp, bl, NOTIFY_TIMEOUT_MS, nullptr);
ceph_assert(r == 0);
comp->release();
}
template <typename I>
void TrashWatcher<I>::notify_image_removed(librados::IoCtx &io_ctx,
const std::string& image_id,
Context *on_finish) {
CephContext *cct = reinterpret_cast<CephContext*>(io_ctx.cct());
ldout(cct, 20) << dendl;
bufferlist bl;
encode(NotifyMessage{ImageRemovedPayload{image_id}}, bl);
librados::AioCompletion *comp = create_rados_callback(on_finish);
int r = io_ctx.aio_notify(RBD_TRASH, comp, bl, NOTIFY_TIMEOUT_MS, nullptr);
ceph_assert(r == 0);
comp->release();
}
template <typename I>
void TrashWatcher<I>::handle_notify(uint64_t notify_id, uint64_t handle,
uint64_t notifier_id, bufferlist &bl) {
CephContext *cct = this->m_cct;
ldout(cct, 15) << "notify_id=" << notify_id << ", "
<< "handle=" << handle << dendl;
NotifyMessage notify_message;
try {
auto iter = bl.cbegin();
decode(notify_message, iter);
} catch (const buffer::error &err) {
lderr(cct) << "error decoding image notification: " << err.what()
<< dendl;
Context *ctx = new C_NotifyAck(this, notify_id, handle);
ctx->complete(0);
return;
}
apply_visitor(watcher::util::HandlePayloadVisitor<TrashWatcher<I>>(
this, notify_id, handle), notify_message.payload);
}
template <typename I>
bool TrashWatcher<I>::handle_payload(const ImageAddedPayload &payload,
Context *on_notify_ack) {
CephContext *cct = this->m_cct;
ldout(cct, 20) << dendl;
handle_image_added(payload.image_id, payload.trash_image_spec);
return true;
}
template <typename I>
bool TrashWatcher<I>::handle_payload(const ImageRemovedPayload &payload,
Context *on_notify_ack) {
CephContext *cct = this->m_cct;
ldout(cct, 20) << dendl;
handle_image_removed(payload.image_id);
return true;
}
template <typename I>
bool TrashWatcher<I>::handle_payload(const UnknownPayload &payload,
Context *on_notify_ack) {
return true;
}
} // namespace librbd
template class librbd::TrashWatcher<librbd::ImageCtx>;
| 3,586 | 29.65812 | 77 |
cc
|
null |
ceph-main/src/librbd/TrashWatcher.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_TRASH_WATCHER_H
#define CEPH_LIBRBD_TRASH_WATCHER_H
#include "include/int_types.h"
#include "include/rados/librados_fwd.hpp"
#include "cls/rbd/cls_rbd_types.h"
#include "librbd/ImageCtx.h"
#include "librbd/Watcher.h"
#include "librbd/trash_watcher/Types.h"
namespace librbd {
namespace asio { struct ContextWQ; }
namespace watcher {
namespace util {
template <typename> struct HandlePayloadVisitor;
} // namespace util
} // namespace watcher
template <typename ImageCtxT = librbd::ImageCtx>
class TrashWatcher : public Watcher {
friend struct watcher::util::HandlePayloadVisitor<TrashWatcher<ImageCtxT>>;
public:
TrashWatcher(librados::IoCtx &io_ctx, asio::ContextWQ *work_queue);
static void notify_image_added(librados::IoCtx &io_ctx,
const std::string& image_id,
const cls::rbd::TrashImageSpec& spec,
Context *on_finish);
static void notify_image_removed(librados::IoCtx &io_ctx,
const std::string& image_id,
Context *on_finish);
protected:
virtual void handle_image_added(const std::string &image_id,
const cls::rbd::TrashImageSpec& spec) = 0;
virtual void handle_image_removed(const std::string &image_id) = 0;
private:
void handle_notify(uint64_t notify_id, uint64_t handle,
uint64_t notifier_id, bufferlist &bl) override;
bool handle_payload(const trash_watcher::ImageAddedPayload &payload,
Context *on_notify_ack);
bool handle_payload(const trash_watcher::ImageRemovedPayload &payload,
Context *on_notify_ack);
bool handle_payload(const trash_watcher::UnknownPayload &payload,
Context *on_notify_ack);
};
} // namespace librbd
extern template class librbd::TrashWatcher<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_TRASH_WATCHER_H
| 2,075 | 34.186441 | 77 |
h
|
null |
ceph-main/src/librbd/Types.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef LIBRBD_TYPES_H
#define LIBRBD_TYPES_H
#include "include/types.h"
#include "cls/rbd/cls_rbd_types.h"
#include "deep_copy/Types.h"
#include <map>
#include <memory>
#include <string>
namespace neorados { class IOContext; }
namespace librbd {
// Performance counters
enum {
l_librbd_first = 26000,
l_librbd_rd, // read ops
l_librbd_rd_bytes, // bytes read
l_librbd_rd_latency, // average latency
l_librbd_wr,
l_librbd_wr_bytes,
l_librbd_wr_latency,
l_librbd_discard,
l_librbd_discard_bytes,
l_librbd_discard_latency,
l_librbd_flush,
l_librbd_flush_latency,
l_librbd_ws,
l_librbd_ws_bytes,
l_librbd_ws_latency,
l_librbd_cmp,
l_librbd_cmp_bytes,
l_librbd_cmp_latency,
l_librbd_snap_create,
l_librbd_snap_remove,
l_librbd_snap_rollback,
l_librbd_snap_rename,
l_librbd_notify,
l_librbd_resize,
l_librbd_readahead,
l_librbd_readahead_bytes,
l_librbd_invalidate_cache,
l_librbd_opened_time,
l_librbd_lock_acquired_time,
l_librbd_last,
};
typedef std::shared_ptr<neorados::IOContext> IOContext;
typedef std::map<uint64_t, uint64_t> SnapSeqs;
/// Full information about an image's parent.
struct ParentImageInfo {
/// Identification of the parent.
cls::rbd::ParentImageSpec spec;
/** @brief Where the portion of data shared with the child image ends.
* Since images can be resized multiple times, the portion of data shared
* with the child image is not necessarily min(parent size, child size).
* If the child image is first shrunk and then enlarged, the common portion
* will be shorter. */
uint64_t overlap = 0;
};
struct SnapInfo {
std::string name;
cls::rbd::SnapshotNamespace snap_namespace;
uint64_t size;
ParentImageInfo parent;
uint8_t protection_status;
uint64_t flags;
utime_t timestamp;
SnapInfo(std::string _name,
const cls::rbd::SnapshotNamespace &_snap_namespace,
uint64_t _size, const ParentImageInfo &_parent,
uint8_t _protection_status, uint64_t _flags, utime_t _timestamp)
: name(_name), snap_namespace(_snap_namespace), size(_size),
parent(_parent), protection_status(_protection_status), flags(_flags),
timestamp(_timestamp) {
}
};
enum {
OPEN_FLAG_SKIP_OPEN_PARENT = 1 << 0,
OPEN_FLAG_OLD_FORMAT = 1 << 1,
OPEN_FLAG_IGNORE_MIGRATING = 1 << 2
};
enum ImageReadOnlyFlag {
IMAGE_READ_ONLY_FLAG_USER = 1 << 0,
IMAGE_READ_ONLY_FLAG_NON_PRIMARY = 1 << 1,
};
enum SnapCreateFlag {
SNAP_CREATE_FLAG_SKIP_OBJECT_MAP = 1 << 0,
SNAP_CREATE_FLAG_SKIP_NOTIFY_QUIESCE = 1 << 1,
SNAP_CREATE_FLAG_IGNORE_NOTIFY_QUIESCE_ERROR = 1 << 2,
};
struct MigrationInfo {
int64_t pool_id = -1;
std::string pool_namespace;
std::string image_name;
std::string image_id;
std::string source_spec;
deep_copy::SnapMap snap_map;
uint64_t overlap = 0;
bool flatten = false;
MigrationInfo() {
}
MigrationInfo(int64_t pool_id, const std::string& pool_namespace,
const std::string& image_name, const std::string& image_id,
const std::string& source_spec,
const deep_copy::SnapMap &snap_map, uint64_t overlap,
bool flatten)
: pool_id(pool_id), pool_namespace(pool_namespace), image_name(image_name),
image_id(image_id), source_spec(source_spec), snap_map(snap_map),
overlap(overlap), flatten(flatten) {
}
bool empty() const {
return (pool_id == -1 && source_spec.empty());
}
};
} // namespace librbd
#endif // LIBRBD_TYPES_H
| 3,665 | 24.636364 | 79 |
h
|
null |
ceph-main/src/librbd/Utils.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include <boost/algorithm/string.hpp>
#include <boost/lexical_cast.hpp>
#include "librbd/Utils.h"
#include "include/random.h"
#include "include/rbd_types.h"
#include "include/stringify.h"
#include "include/neorados/RADOS.hpp"
#include "include/rbd/features.h"
#include "common/dout.h"
#include "common/errno.h"
#include "librbd/ImageCtx.h"
#include "librbd/Features.h"
#include <boost/algorithm/string/predicate.hpp>
#include <bitset>
#include <random>
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::util::" << __func__ << ": "
namespace librbd {
namespace util {
namespace {
const std::string CONFIG_KEY_URI_PREFIX{"config://"};
} // anonymous namespace
const std::string group_header_name(const std::string &group_id)
{
return RBD_GROUP_HEADER_PREFIX + group_id;
}
const std::string id_obj_name(const std::string &name)
{
return RBD_ID_PREFIX + name;
}
const std::string header_name(const std::string &image_id)
{
return RBD_HEADER_PREFIX + image_id;
}
const std::string old_header_name(const std::string &image_name)
{
return image_name + RBD_SUFFIX;
}
std::string unique_lock_name(const std::string &name, void *address) {
return name + " (" + stringify(address) + ")";
}
librados::AioCompletion *create_rados_callback(Context *on_finish) {
return create_rados_callback<Context, &Context::complete>(on_finish);
}
std::string generate_image_id(librados::IoCtx &ioctx) {
librados::Rados rados(ioctx);
uint64_t bid = rados.get_instance_id();
std::mt19937 generator{random_device_t{}()};
std::uniform_int_distribution<uint32_t> distribution{0, 0xFFFFFFFF};
uint32_t extra = distribution(generator);
std::ostringstream bid_ss;
bid_ss << std::hex << bid << std::hex << extra;
std::string id = bid_ss.str();
// ensure the image id won't overflow the fixed block name size
if (id.length() > RBD_MAX_IMAGE_ID_LENGTH) {
id = id.substr(id.length() - RBD_MAX_IMAGE_ID_LENGTH);
}
return id;
}
uint64_t get_rbd_default_features(CephContext* cct)
{
auto value = cct->_conf.get_val<std::string>("rbd_default_features");
return librbd::rbd_features_from_string(value, nullptr);
}
bool calc_sparse_extent(const bufferptr &bp,
size_t sparse_size,
uint64_t length,
size_t *write_offset,
size_t *write_length,
size_t *offset) {
size_t extent_size;
if (*offset + sparse_size > length) {
extent_size = length - *offset;
} else {
extent_size = sparse_size;
}
bufferptr extent(bp, *offset, extent_size);
*offset += extent_size;
bool extent_is_zero = extent.is_zero();
if (!extent_is_zero) {
*write_length += extent_size;
}
if (extent_is_zero && *write_length == 0) {
*write_offset += extent_size;
}
if ((extent_is_zero || *offset == length) && *write_length != 0) {
return true;
}
return false;
}
bool is_metadata_config_override(const std::string& metadata_key,
std::string* config_key) {
size_t prefix_len = librbd::ImageCtx::METADATA_CONF_PREFIX.size();
if (metadata_key.size() > prefix_len &&
metadata_key.compare(0, prefix_len,
librbd::ImageCtx::METADATA_CONF_PREFIX) == 0) {
*config_key = metadata_key.substr(prefix_len,
metadata_key.size() - prefix_len);
return true;
}
return false;
}
int create_ioctx(librados::IoCtx& src_io_ctx, const std::string& pool_desc,
int64_t pool_id,
const std::optional<std::string>& pool_namespace,
librados::IoCtx* dst_io_ctx) {
auto cct = (CephContext *)src_io_ctx.cct();
librados::Rados rados(src_io_ctx);
int r = rados.ioctx_create2(pool_id, *dst_io_ctx);
if (r == -ENOENT) {
ldout(cct, 1) << pool_desc << " pool " << pool_id << " no longer exists"
<< dendl;
return r;
} else if (r < 0) {
lderr(cct) << "error accessing " << pool_desc << " pool " << pool_id
<< dendl;
return r;
}
dst_io_ctx->set_namespace(
pool_namespace ? *pool_namespace : src_io_ctx.get_namespace());
if (src_io_ctx.get_pool_full_try()) {
dst_io_ctx->set_pool_full_try();
}
return 0;
}
int snap_create_flags_api_to_internal(CephContext *cct, uint32_t api_flags,
uint64_t *internal_flags) {
*internal_flags = 0;
if (api_flags & RBD_SNAP_CREATE_SKIP_QUIESCE) {
*internal_flags |= SNAP_CREATE_FLAG_SKIP_NOTIFY_QUIESCE;
api_flags &= ~RBD_SNAP_CREATE_SKIP_QUIESCE;
} else if (api_flags & RBD_SNAP_CREATE_IGNORE_QUIESCE_ERROR) {
*internal_flags |= SNAP_CREATE_FLAG_IGNORE_NOTIFY_QUIESCE_ERROR;
api_flags &= ~RBD_SNAP_CREATE_IGNORE_QUIESCE_ERROR;
}
if (api_flags != 0) {
lderr(cct) << "invalid snap create flags: "
<< std::bitset<32>(api_flags) << dendl;
return -EINVAL;
}
return 0;
}
uint32_t get_default_snap_create_flags(ImageCtx *ictx) {
auto mode = ictx->config.get_val<std::string>(
"rbd_default_snapshot_quiesce_mode");
if (mode == "required") {
return 0;
} else if (mode == "ignore-error") {
return RBD_SNAP_CREATE_IGNORE_QUIESCE_ERROR;
} else if (mode == "skip") {
return RBD_SNAP_CREATE_SKIP_QUIESCE;
} else {
ceph_abort_msg("invalid rbd_default_snapshot_quiesce_mode");
}
}
SnapContext get_snap_context(
const std::optional<
std::pair<std::uint64_t,
std::vector<std::uint64_t>>>& write_snap_context) {
SnapContext snapc;
if (write_snap_context) {
snapc = SnapContext{write_snap_context->first,
{write_snap_context->second.begin(),
write_snap_context->second.end()}};
}
return snapc;
}
uint64_t reserve_async_request_id() {
static std::atomic<uint64_t> async_request_seq = 0;
return ++async_request_seq;
}
bool is_config_key_uri(const std::string& uri) {
return boost::starts_with(uri, CONFIG_KEY_URI_PREFIX);
}
int get_config_key(librados::Rados& rados, const std::string& uri,
std::string* value) {
auto cct = reinterpret_cast<CephContext*>(rados.cct());
if (!is_config_key_uri(uri)) {
return -EINVAL;
}
std::string key = uri.substr(CONFIG_KEY_URI_PREFIX.size());
std::string cmd =
"{"
"\"prefix\": \"config-key get\", "
"\"key\": \"" + key + "\""
"}";
bufferlist in_bl;
bufferlist out_bl;
int r = rados.mon_command(cmd, in_bl, &out_bl, nullptr);
if (r < 0) {
lderr(cct) << "failed to retrieve MON config key " << key << ": "
<< cpp_strerror(r) << dendl;
return r;
}
*value = std::string(out_bl.c_str(), out_bl.length());
return 0;
}
} // namespace util
} // namespace librbd
| 6,934 | 27.076923 | 76 |
cc
|
null |
ceph-main/src/librbd/Utils.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_UTILS_H
#define CEPH_LIBRBD_UTILS_H
#include "include/rados/librados.hpp"
#include "include/rbd_types.h"
#include "include/ceph_assert.h"
#include "include/Context.h"
#include "common/snap_types.h"
#include "common/zipkin_trace.h"
#include "common/RefCountedObj.h"
#include <atomic>
#include <optional>
#include <type_traits>
#include <utility>
#include <vector>
#include <stdio.h>
namespace librbd {
class ImageCtx;
namespace util {
namespace detail {
template <typename T>
void rados_callback(rados_completion_t c, void *arg) {
reinterpret_cast<T*>(arg)->complete(rados_aio_get_return_value(c));
}
template <typename T, void(T::*MF)(int)>
void rados_callback(rados_completion_t c, void *arg) {
T *obj = reinterpret_cast<T*>(arg);
int r = rados_aio_get_return_value(c);
(obj->*MF)(r);
}
template <typename T, Context*(T::*MF)(int*), bool destroy>
void rados_state_callback(rados_completion_t c, void *arg) {
T *obj = reinterpret_cast<T*>(arg);
int r = rados_aio_get_return_value(c);
Context *on_finish = (obj->*MF)(&r);
if (on_finish != nullptr) {
on_finish->complete(r);
if (destroy) {
delete obj;
}
}
}
template <typename T, void (T::*MF)(int)>
class C_CallbackAdapter : public Context {
T *obj;
public:
C_CallbackAdapter(T *obj) : obj(obj) {
}
protected:
void finish(int r) override {
(obj->*MF)(r);
}
};
template <typename T, void (T::*MF)(int)>
class C_RefCallbackAdapter : public Context {
RefCountedPtr refptr;
Context *on_finish;
public:
C_RefCallbackAdapter(T *obj, RefCountedPtr refptr)
: refptr(std::move(refptr)),
on_finish(new C_CallbackAdapter<T, MF>(obj)) {
}
protected:
void finish(int r) override {
on_finish->complete(r);
}
};
template <typename T, Context*(T::*MF)(int*), bool destroy>
class C_StateCallbackAdapter : public Context {
T *obj;
public:
C_StateCallbackAdapter(T *obj) : obj(obj){
}
protected:
void complete(int r) override {
Context *on_finish = (obj->*MF)(&r);
if (on_finish != nullptr) {
on_finish->complete(r);
if (destroy) {
delete obj;
}
}
Context::complete(r);
}
void finish(int r) override {
}
};
template <typename T, Context*(T::*MF)(int*)>
class C_RefStateCallbackAdapter : public Context {
RefCountedPtr refptr;
Context *on_finish;
public:
C_RefStateCallbackAdapter(T *obj, RefCountedPtr refptr)
: refptr(std::move(refptr)),
on_finish(new C_StateCallbackAdapter<T, MF, true>(obj)) {
}
protected:
void finish(int r) override {
on_finish->complete(r);
}
};
template <typename WQ>
struct C_AsyncCallback : public Context {
WQ *op_work_queue;
Context *on_finish;
C_AsyncCallback(WQ *op_work_queue, Context *on_finish)
: op_work_queue(op_work_queue), on_finish(on_finish) {
}
~C_AsyncCallback() override {
delete on_finish;
}
void finish(int r) override {
op_work_queue->queue(on_finish, r);
on_finish = nullptr;
}
};
} // namespace detail
std::string generate_image_id(librados::IoCtx &ioctx);
template <typename T>
inline std::string generate_image_id(librados::IoCtx &ioctx) {
return generate_image_id(ioctx);
}
const std::string group_header_name(const std::string &group_id);
const std::string id_obj_name(const std::string &name);
const std::string header_name(const std::string &image_id);
const std::string old_header_name(const std::string &image_name);
std::string unique_lock_name(const std::string &name, void *address);
template <typename I>
std::string data_object_name(I* image_ctx, uint64_t object_no) {
char buf[RBD_MAX_OBJ_NAME_SIZE];
size_t length = snprintf(buf, RBD_MAX_OBJ_NAME_SIZE,
image_ctx->format_string, object_no);
ceph_assert(length < RBD_MAX_OBJ_NAME_SIZE);
std::string oid;
oid.reserve(RBD_MAX_OBJ_NAME_SIZE);
oid.append(buf, length);
return oid;
}
librados::AioCompletion *create_rados_callback(Context *on_finish);
template <typename T>
librados::AioCompletion *create_rados_callback(T *obj) {
return librados::Rados::aio_create_completion(
obj, &detail::rados_callback<T>);
}
template <typename T, void(T::*MF)(int)>
librados::AioCompletion *create_rados_callback(T *obj) {
return librados::Rados::aio_create_completion(
obj, &detail::rados_callback<T, MF>);
}
template <typename T, Context*(T::*MF)(int*), bool destroy=true>
librados::AioCompletion *create_rados_callback(T *obj) {
return librados::Rados::aio_create_completion(
obj, &detail::rados_state_callback<T, MF, destroy>);
}
template <typename T, void(T::*MF)(int) = &T::complete>
Context *create_context_callback(T *obj) {
return new detail::C_CallbackAdapter<T, MF>(obj);
}
template <typename T, Context*(T::*MF)(int*), bool destroy=true>
Context *create_context_callback(T *obj) {
return new detail::C_StateCallbackAdapter<T, MF, destroy>(obj);
}
//for reference counting objects
template <typename T, void(T::*MF)(int) = &T::complete>
Context *create_context_callback(T *obj, RefCountedPtr refptr) {
return new detail::C_RefCallbackAdapter<T, MF>(obj, refptr);
}
template <typename T, Context*(T::*MF)(int*)>
Context *create_context_callback(T *obj, RefCountedPtr refptr) {
return new detail::C_RefStateCallbackAdapter<T, MF>(obj, refptr);
}
//for objects that don't inherit from RefCountedObj, to handle unit tests
template <typename T, void(T::*MF)(int) = &T::complete, typename R>
typename std::enable_if<not std::is_base_of<RefCountedPtr, R>::value, Context*>::type
create_context_callback(T *obj, R *refptr) {
return new detail::C_CallbackAdapter<T, MF>(obj);
}
template <typename T, Context*(T::*MF)(int*), typename R, bool destroy=true>
typename std::enable_if<not std::is_base_of<RefCountedPtr, R>::value, Context*>::type
create_context_callback(T *obj, R *refptr) {
return new detail::C_StateCallbackAdapter<T, MF, destroy>(obj);
}
template <typename I>
Context *create_async_context_callback(I &image_ctx, Context *on_finish) {
// use async callback to acquire a clean lock context
return new detail::C_AsyncCallback<
typename std::decay<decltype(*image_ctx.op_work_queue)>::type>(
image_ctx.op_work_queue, on_finish);
}
template <typename WQ>
Context *create_async_context_callback(WQ *work_queue, Context *on_finish) {
// use async callback to acquire a clean lock context
return new detail::C_AsyncCallback<WQ>(work_queue, on_finish);
}
// TODO: temporary until AioCompletion supports templated ImageCtx
inline ImageCtx *get_image_ctx(ImageCtx *image_ctx) {
return image_ctx;
}
uint64_t get_rbd_default_features(CephContext* cct);
bool calc_sparse_extent(const bufferptr &bp,
size_t sparse_size,
uint64_t length,
size_t *write_offset,
size_t *write_length,
size_t *offset);
template <typename I>
inline ZTracer::Trace create_trace(const I &image_ctx, const char *trace_name,
const ZTracer::Trace &parent_trace) {
if (parent_trace.valid()) {
return ZTracer::Trace(trace_name, &image_ctx.trace_endpoint, &parent_trace);
}
return ZTracer::Trace();
}
bool is_metadata_config_override(const std::string& metadata_key,
std::string* config_key);
int create_ioctx(librados::IoCtx& src_io_ctx, const std::string& pool_desc,
int64_t pool_id,
const std::optional<std::string>& pool_namespace,
librados::IoCtx* dst_io_ctx);
int snap_create_flags_api_to_internal(CephContext *cct, uint32_t api_flags,
uint64_t *internal_flags);
uint32_t get_default_snap_create_flags(ImageCtx *ictx);
SnapContext get_snap_context(
const std::optional<
std::pair<std::uint64_t,
std::vector<std::uint64_t>>>& write_snap_context);
uint64_t reserve_async_request_id();
bool is_config_key_uri(const std::string& uri);
int get_config_key(librados::Rados& rados, const std::string& uri,
std::string* value);
} // namespace util
} // namespace librbd
#endif // CEPH_LIBRBD_UTILS_H
| 8,251 | 27.752613 | 85 |
h
|
null |
ceph-main/src/librbd/WatchNotifyTypes.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "cls/rbd/cls_rbd_types.h"
#include "common/Formatter.h"
#include "include/ceph_assert.h"
#include "include/stringify.h"
#include "librbd/WatchNotifyTypes.h"
namespace librbd {
namespace watch_notify {
void AsyncRequestId::encode(bufferlist &bl) const {
using ceph::encode;
encode(client_id, bl);
encode(request_id, bl);
}
void AsyncRequestId::decode(bufferlist::const_iterator &iter) {
using ceph::decode;
decode(client_id, iter);
decode(request_id, iter);
}
void AsyncRequestId::dump(Formatter *f) const {
f->open_object_section("client_id");
client_id.dump(f);
f->close_section();
f->dump_unsigned("request_id", request_id);
}
void AcquiredLockPayload::encode(bufferlist &bl) const {
using ceph::encode;
encode(client_id, bl);
}
void AcquiredLockPayload::decode(__u8 version, bufferlist::const_iterator &iter) {
using ceph::decode;
if (version >= 2) {
decode(client_id, iter);
}
}
void AcquiredLockPayload::dump(Formatter *f) const {
f->open_object_section("client_id");
client_id.dump(f);
f->close_section();
}
void ReleasedLockPayload::encode(bufferlist &bl) const {
using ceph::encode;
encode(client_id, bl);
}
void ReleasedLockPayload::decode(__u8 version, bufferlist::const_iterator &iter) {
using ceph::decode;
if (version >= 2) {
decode(client_id, iter);
}
}
void ReleasedLockPayload::dump(Formatter *f) const {
f->open_object_section("client_id");
client_id.dump(f);
f->close_section();
}
void RequestLockPayload::encode(bufferlist &bl) const {
using ceph::encode;
encode(client_id, bl);
encode(force, bl);
}
void RequestLockPayload::decode(__u8 version, bufferlist::const_iterator &iter) {
using ceph::decode;
if (version >= 2) {
decode(client_id, iter);
}
if (version >= 3) {
decode(force, iter);
}
}
void RequestLockPayload::dump(Formatter *f) const {
f->open_object_section("client_id");
client_id.dump(f);
f->close_section();
f->dump_bool("force", force);
}
void HeaderUpdatePayload::encode(bufferlist &bl) const {
}
void HeaderUpdatePayload::decode(__u8 version, bufferlist::const_iterator &iter) {
}
void HeaderUpdatePayload::dump(Formatter *f) const {
}
void AsyncRequestPayloadBase::encode(bufferlist &bl) const {
using ceph::encode;
encode(async_request_id, bl);
}
void AsyncRequestPayloadBase::decode(__u8 version, bufferlist::const_iterator &iter) {
using ceph::decode;
decode(async_request_id, iter);
}
void AsyncRequestPayloadBase::dump(Formatter *f) const {
f->open_object_section("async_request_id");
async_request_id.dump(f);
f->close_section();
}
void AsyncProgressPayload::encode(bufferlist &bl) const {
using ceph::encode;
AsyncRequestPayloadBase::encode(bl);
encode(offset, bl);
encode(total, bl);
}
void AsyncProgressPayload::decode(__u8 version, bufferlist::const_iterator &iter) {
using ceph::decode;
AsyncRequestPayloadBase::decode(version, iter);
decode(offset, iter);
decode(total, iter);
}
void AsyncProgressPayload::dump(Formatter *f) const {
AsyncRequestPayloadBase::dump(f);
f->dump_unsigned("offset", offset);
f->dump_unsigned("total", total);
}
void AsyncCompletePayload::encode(bufferlist &bl) const {
using ceph::encode;
AsyncRequestPayloadBase::encode(bl);
encode(result, bl);
}
void AsyncCompletePayload::decode(__u8 version, bufferlist::const_iterator &iter) {
using ceph::decode;
AsyncRequestPayloadBase::decode(version, iter);
decode(result, iter);
}
void AsyncCompletePayload::dump(Formatter *f) const {
AsyncRequestPayloadBase::dump(f);
f->dump_int("result", result);
}
void ResizePayload::encode(bufferlist &bl) const {
using ceph::encode;
encode(size, bl);
AsyncRequestPayloadBase::encode(bl);
encode(allow_shrink, bl);
}
void ResizePayload::decode(__u8 version, bufferlist::const_iterator &iter) {
using ceph::decode;
decode(size, iter);
AsyncRequestPayloadBase::decode(version, iter);
if (version >= 4) {
decode(allow_shrink, iter);
}
}
void ResizePayload::dump(Formatter *f) const {
AsyncRequestPayloadBase::dump(f);
f->dump_unsigned("size", size);
f->dump_bool("allow_shrink", allow_shrink);
}
void SnapPayloadBase::encode(bufferlist &bl) const {
using ceph::encode;
encode(snap_name, bl);
encode(snap_namespace, bl);
encode(async_request_id, bl);
}
void SnapPayloadBase::decode(__u8 version, bufferlist::const_iterator &iter) {
using ceph::decode;
decode(snap_name, iter);
if (version >= 6) {
decode(snap_namespace, iter);
}
if (version >= 7) {
decode(async_request_id, iter);
}
}
void SnapPayloadBase::dump(Formatter *f) const {
AsyncRequestPayloadBase::dump(f);
f->dump_string("snap_name", snap_name);
snap_namespace.dump(f);
}
void SnapCreatePayload::encode(bufferlist &bl) const {
using ceph::encode;
SnapPayloadBase::encode(bl);
encode(flags, bl);
}
void SnapCreatePayload::decode(__u8 version, bufferlist::const_iterator &iter) {
using ceph::decode;
SnapPayloadBase::decode(version, iter);
if (version == 5) {
decode(snap_namespace, iter);
}
if (version >= 7) {
decode(flags, iter);
}
}
void SnapCreatePayload::dump(Formatter *f) const {
SnapPayloadBase::dump(f);
f->dump_unsigned("flags", flags);
}
void SnapRenamePayload::encode(bufferlist &bl) const {
using ceph::encode;
encode(snap_id, bl);
SnapPayloadBase::encode(bl);
}
void SnapRenamePayload::decode(__u8 version, bufferlist::const_iterator &iter) {
using ceph::decode;
decode(snap_id, iter);
SnapPayloadBase::decode(version, iter);
}
void SnapRenamePayload::dump(Formatter *f) const {
SnapPayloadBase::dump(f);
f->dump_unsigned("src_snap_id", snap_id);
}
void RenamePayload::encode(bufferlist &bl) const {
using ceph::encode;
encode(image_name, bl);
encode(async_request_id, bl);
}
void RenamePayload::decode(__u8 version, bufferlist::const_iterator &iter) {
using ceph::decode;
decode(image_name, iter);
if (version >= 7) {
decode(async_request_id, iter);
}
}
void RenamePayload::dump(Formatter *f) const {
AsyncRequestPayloadBase::dump(f);
f->dump_string("image_name", image_name);
}
void UpdateFeaturesPayload::encode(bufferlist &bl) const {
using ceph::encode;
encode(features, bl);
encode(enabled, bl);
encode(async_request_id, bl);
}
void UpdateFeaturesPayload::decode(__u8 version, bufferlist::const_iterator &iter) {
using ceph::decode;
decode(features, iter);
decode(enabled, iter);
if (version >= 7) {
decode(async_request_id, iter);
}
}
void UpdateFeaturesPayload::dump(Formatter *f) const {
AsyncRequestPayloadBase::dump(f);
f->dump_unsigned("features", features);
f->dump_bool("enabled", enabled);
}
void SparsifyPayload::encode(bufferlist &bl) const {
using ceph::encode;
AsyncRequestPayloadBase::encode(bl);
encode(sparse_size, bl);
}
void SparsifyPayload::decode(__u8 version, bufferlist::const_iterator &iter) {
using ceph::decode;
AsyncRequestPayloadBase::decode(version, iter);
decode(sparse_size, iter);
}
void SparsifyPayload::dump(Formatter *f) const {
AsyncRequestPayloadBase::dump(f);
f->dump_unsigned("sparse_size", sparse_size);
}
void MetadataUpdatePayload::encode(bufferlist &bl) const {
using ceph::encode;
encode(key, bl);
encode(value, bl);
encode(async_request_id, bl);
}
void MetadataUpdatePayload::decode(__u8 version, bufferlist::const_iterator &iter) {
using ceph::decode;
decode(key, iter);
decode(value, iter);
if (version >= 7) {
decode(async_request_id, iter);
}
}
void MetadataUpdatePayload::dump(Formatter *f) const {
AsyncRequestPayloadBase::dump(f);
f->dump_string("key", key);
f->dump_string("value", *value);
}
void UnknownPayload::encode(bufferlist &bl) const {
ceph_abort();
}
void UnknownPayload::decode(__u8 version, bufferlist::const_iterator &iter) {
}
void UnknownPayload::dump(Formatter *f) const {
}
bool NotifyMessage::check_for_refresh() const {
return payload->check_for_refresh();
}
void NotifyMessage::encode(bufferlist& bl) const {
ENCODE_START(7, 1, bl);
encode(static_cast<uint32_t>(payload->get_notify_op()), bl);
payload->encode(bl);
ENCODE_FINISH(bl);
}
void NotifyMessage::decode(bufferlist::const_iterator& iter) {
DECODE_START(1, iter);
uint32_t notify_op;
decode(notify_op, iter);
// select the correct payload variant based upon the encoded op
switch (notify_op) {
case NOTIFY_OP_ACQUIRED_LOCK:
payload.reset(new AcquiredLockPayload());
break;
case NOTIFY_OP_RELEASED_LOCK:
payload.reset(new ReleasedLockPayload());
break;
case NOTIFY_OP_REQUEST_LOCK:
payload.reset(new RequestLockPayload());
break;
case NOTIFY_OP_HEADER_UPDATE:
payload.reset(new HeaderUpdatePayload());
break;
case NOTIFY_OP_ASYNC_PROGRESS:
payload.reset(new AsyncProgressPayload());
break;
case NOTIFY_OP_ASYNC_COMPLETE:
payload.reset(new AsyncCompletePayload());
break;
case NOTIFY_OP_FLATTEN:
payload.reset(new FlattenPayload());
break;
case NOTIFY_OP_RESIZE:
payload.reset(new ResizePayload());
break;
case NOTIFY_OP_SNAP_CREATE:
payload.reset(new SnapCreatePayload());
break;
case NOTIFY_OP_SNAP_REMOVE:
payload.reset(new SnapRemovePayload());
break;
case NOTIFY_OP_SNAP_RENAME:
payload.reset(new SnapRenamePayload());
break;
case NOTIFY_OP_SNAP_PROTECT:
payload.reset(new SnapProtectPayload());
break;
case NOTIFY_OP_SNAP_UNPROTECT:
payload.reset(new SnapUnprotectPayload());
break;
case NOTIFY_OP_REBUILD_OBJECT_MAP:
payload.reset(new RebuildObjectMapPayload());
break;
case NOTIFY_OP_RENAME:
payload.reset(new RenamePayload());
break;
case NOTIFY_OP_UPDATE_FEATURES:
payload.reset(new UpdateFeaturesPayload());
break;
case NOTIFY_OP_MIGRATE:
payload.reset(new MigratePayload());
break;
case NOTIFY_OP_SPARSIFY:
payload.reset(new SparsifyPayload());
break;
case NOTIFY_OP_QUIESCE:
payload.reset(new QuiescePayload());
break;
case NOTIFY_OP_UNQUIESCE:
payload.reset(new UnquiescePayload());
break;
case NOTIFY_OP_METADATA_UPDATE:
payload.reset(new MetadataUpdatePayload());
break;
}
payload->decode(struct_v, iter);
DECODE_FINISH(iter);
}
void NotifyMessage::dump(Formatter *f) const {
payload->dump(f);
}
NotifyOp NotifyMessage::get_notify_op() const {
return payload->get_notify_op();
}
void NotifyMessage::generate_test_instances(std::list<NotifyMessage *> &o) {
o.push_back(new NotifyMessage(new AcquiredLockPayload(ClientId(1, 2))));
o.push_back(new NotifyMessage(new ReleasedLockPayload(ClientId(1, 2))));
o.push_back(new NotifyMessage(new RequestLockPayload(ClientId(1, 2), true)));
o.push_back(new NotifyMessage(new HeaderUpdatePayload()));
o.push_back(new NotifyMessage(new AsyncProgressPayload(AsyncRequestId(ClientId(0, 1), 2), 3, 4)));
o.push_back(new NotifyMessage(new AsyncCompletePayload(AsyncRequestId(ClientId(0, 1), 2), 3)));
o.push_back(new NotifyMessage(new FlattenPayload(AsyncRequestId(ClientId(0, 1), 2))));
o.push_back(new NotifyMessage(new ResizePayload(AsyncRequestId(ClientId(0, 1), 2), 123, true)));
o.push_back(new NotifyMessage(new SnapCreatePayload(AsyncRequestId(ClientId(0, 1), 2),
cls::rbd::UserSnapshotNamespace(),
"foo", 1)));
o.push_back(new NotifyMessage(new SnapRemovePayload(AsyncRequestId(ClientId(0, 1), 2),
cls::rbd::UserSnapshotNamespace(), "foo")));
o.push_back(new NotifyMessage(new SnapProtectPayload(AsyncRequestId(ClientId(0, 1), 2),
cls::rbd::UserSnapshotNamespace(), "foo")));
o.push_back(new NotifyMessage(new SnapUnprotectPayload(AsyncRequestId(ClientId(0, 1), 2),
cls::rbd::UserSnapshotNamespace(), "foo")));
o.push_back(new NotifyMessage(new RebuildObjectMapPayload(AsyncRequestId(ClientId(0, 1), 2))));
o.push_back(new NotifyMessage(new RenamePayload(AsyncRequestId(ClientId(0, 1), 2), "foo")));
o.push_back(new NotifyMessage(new UpdateFeaturesPayload(AsyncRequestId(ClientId(0, 1), 2),
1, true)));
o.push_back(new NotifyMessage(new MigratePayload(AsyncRequestId(ClientId(0, 1), 2))));
o.push_back(new NotifyMessage(new SparsifyPayload(AsyncRequestId(ClientId(0, 1), 2), 1)));
o.push_back(new NotifyMessage(new QuiescePayload(AsyncRequestId(ClientId(0, 1), 2))));
o.push_back(new NotifyMessage(new UnquiescePayload(AsyncRequestId(ClientId(0, 1), 2))));
o.push_back(new NotifyMessage(new MetadataUpdatePayload(AsyncRequestId(ClientId(0, 1), 2),
"foo", std::optional<std::string>{"xyz"})));
}
void ResponseMessage::encode(bufferlist& bl) const {
ENCODE_START(1, 1, bl);
encode(result, bl);
ENCODE_FINISH(bl);
}
void ResponseMessage::decode(bufferlist::const_iterator& iter) {
DECODE_START(1, iter);
decode(result, iter);
DECODE_FINISH(iter);
}
void ResponseMessage::dump(Formatter *f) const {
f->dump_int("result", result);
}
void ResponseMessage::generate_test_instances(std::list<ResponseMessage *> &o) {
o.push_back(new ResponseMessage(1));
}
std::ostream &operator<<(std::ostream &out,
const librbd::watch_notify::NotifyOp &op) {
using namespace librbd::watch_notify;
switch (op) {
case NOTIFY_OP_ACQUIRED_LOCK:
out << "AcquiredLock";
break;
case NOTIFY_OP_RELEASED_LOCK:
out << "ReleasedLock";
break;
case NOTIFY_OP_REQUEST_LOCK:
out << "RequestLock";
break;
case NOTIFY_OP_HEADER_UPDATE:
out << "HeaderUpdate";
break;
case NOTIFY_OP_ASYNC_PROGRESS:
out << "AsyncProgress";
break;
case NOTIFY_OP_ASYNC_COMPLETE:
out << "AsyncComplete";
break;
case NOTIFY_OP_FLATTEN:
out << "Flatten";
break;
case NOTIFY_OP_RESIZE:
out << "Resize";
break;
case NOTIFY_OP_SNAP_CREATE:
out << "SnapCreate";
break;
case NOTIFY_OP_SNAP_REMOVE:
out << "SnapRemove";
break;
case NOTIFY_OP_SNAP_RENAME:
out << "SnapRename";
break;
case NOTIFY_OP_SNAP_PROTECT:
out << "SnapProtect";
break;
case NOTIFY_OP_SNAP_UNPROTECT:
out << "SnapUnprotect";
break;
case NOTIFY_OP_REBUILD_OBJECT_MAP:
out << "RebuildObjectMap";
break;
case NOTIFY_OP_RENAME:
out << "Rename";
break;
case NOTIFY_OP_UPDATE_FEATURES:
out << "UpdateFeatures";
break;
case NOTIFY_OP_MIGRATE:
out << "Migrate";
break;
case NOTIFY_OP_SPARSIFY:
out << "Sparsify";
break;
case NOTIFY_OP_QUIESCE:
out << "Quiesce";
break;
case NOTIFY_OP_UNQUIESCE:
out << "Unquiesce";
break;
case NOTIFY_OP_METADATA_UPDATE:
out << "MetadataUpdate";
break;
default:
out << "Unknown (" << static_cast<uint32_t>(op) << ")";
break;
}
return out;
}
std::ostream &operator<<(std::ostream &out,
const librbd::watch_notify::AsyncRequestId &request) {
out << "[" << request.client_id.gid << "," << request.client_id.handle << ","
<< request.request_id << "]";
return out;
}
} // namespace watch_notify
} // namespace librbd
| 15,460 | 26.707885 | 102 |
cc
|
null |
ceph-main/src/librbd/WatchNotifyTypes.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef LIBRBD_WATCH_NOTIFY_TYPES_H
#define LIBRBD_WATCH_NOTIFY_TYPES_H
#include "cls/rbd/cls_rbd_types.h"
#include "include/int_types.h"
#include "include/buffer_fwd.h"
#include "include/encoding.h"
#include "librbd/watcher/Types.h"
#include <iosfwd>
#include <list>
#include <memory>
#include <string>
#include <boost/variant.hpp>
namespace ceph {
class Formatter;
}
namespace librbd {
namespace watch_notify {
using librbd::watcher::ClientId;
WRITE_CLASS_ENCODER(ClientId);
struct AsyncRequestId {
ClientId client_id;
uint64_t request_id;
AsyncRequestId() : request_id() {}
AsyncRequestId(const ClientId &client_id_, uint64_t request_id_)
: client_id(client_id_), request_id(request_id_) {}
void encode(bufferlist& bl) const;
void decode(bufferlist::const_iterator& it);
void dump(Formatter *f) const;
inline bool operator<(const AsyncRequestId &rhs) const {
if (client_id != rhs.client_id) {
return client_id < rhs.client_id;
} else {
return request_id < rhs.request_id;
}
}
inline bool operator!=(const AsyncRequestId &rhs) const {
return (client_id != rhs.client_id || request_id != rhs.request_id);
}
inline operator bool() const {
return (*this != AsyncRequestId());
}
};
enum NotifyOp {
NOTIFY_OP_ACQUIRED_LOCK = 0,
NOTIFY_OP_RELEASED_LOCK = 1,
NOTIFY_OP_REQUEST_LOCK = 2,
NOTIFY_OP_HEADER_UPDATE = 3,
NOTIFY_OP_ASYNC_PROGRESS = 4,
NOTIFY_OP_ASYNC_COMPLETE = 5,
NOTIFY_OP_FLATTEN = 6,
NOTIFY_OP_RESIZE = 7,
NOTIFY_OP_SNAP_CREATE = 8,
NOTIFY_OP_SNAP_REMOVE = 9,
NOTIFY_OP_REBUILD_OBJECT_MAP = 10,
NOTIFY_OP_SNAP_RENAME = 11,
NOTIFY_OP_SNAP_PROTECT = 12,
NOTIFY_OP_SNAP_UNPROTECT = 13,
NOTIFY_OP_RENAME = 14,
NOTIFY_OP_UPDATE_FEATURES = 15,
NOTIFY_OP_MIGRATE = 16,
NOTIFY_OP_SPARSIFY = 17,
NOTIFY_OP_QUIESCE = 18,
NOTIFY_OP_UNQUIESCE = 19,
NOTIFY_OP_METADATA_UPDATE = 20,
};
struct Payload {
virtual ~Payload() {}
virtual NotifyOp get_notify_op() const = 0;
virtual bool check_for_refresh() const = 0;
virtual void encode(bufferlist &bl) const = 0;
virtual void decode(__u8 version, bufferlist::const_iterator &iter) = 0;
virtual void dump(Formatter *f) const = 0;
};
struct AcquiredLockPayload : public Payload {
ClientId client_id;
AcquiredLockPayload() {}
AcquiredLockPayload(const ClientId &client_id) : client_id(client_id) {}
NotifyOp get_notify_op() const override {
return NOTIFY_OP_ACQUIRED_LOCK;
}
bool check_for_refresh() const override {
return false;
}
void encode(bufferlist &bl) const override;
void decode(__u8 version, bufferlist::const_iterator &iter) override;
void dump(Formatter *f) const override;
};
struct ReleasedLockPayload : public Payload {
ClientId client_id;
ReleasedLockPayload() {}
ReleasedLockPayload(const ClientId &client_id) : client_id(client_id) {}
NotifyOp get_notify_op() const override {
return NOTIFY_OP_RELEASED_LOCK;
}
bool check_for_refresh() const override {
return false;
}
void encode(bufferlist &bl) const override;
void decode(__u8 version, bufferlist::const_iterator &iter) override;
void dump(Formatter *f) const override;
};
struct RequestLockPayload : public Payload {
ClientId client_id;
bool force = false;
RequestLockPayload() {}
RequestLockPayload(const ClientId &client_id, bool force)
: client_id(client_id), force(force) {
}
NotifyOp get_notify_op() const override {
return NOTIFY_OP_REQUEST_LOCK;
}
bool check_for_refresh() const override {
return false;
}
void encode(bufferlist &bl) const override;
void decode(__u8 version, bufferlist::const_iterator &iter) override;
void dump(Formatter *f) const override;
};
struct HeaderUpdatePayload : public Payload {
NotifyOp get_notify_op() const override {
return NOTIFY_OP_HEADER_UPDATE;
}
bool check_for_refresh() const override {
return false;
}
void encode(bufferlist &bl) const override;
void decode(__u8 version, bufferlist::const_iterator &iter) override;
void dump(Formatter *f) const override;
};
struct AsyncRequestPayloadBase : public Payload {
public:
AsyncRequestId async_request_id;
void encode(bufferlist &bl) const override;
void decode(__u8 version, bufferlist::const_iterator &iter) override;
void dump(Formatter *f) const override;
protected:
AsyncRequestPayloadBase() {}
AsyncRequestPayloadBase(const AsyncRequestId &id) : async_request_id(id) {}
};
struct AsyncProgressPayload : public AsyncRequestPayloadBase {
uint64_t offset = 0;
uint64_t total = 0;
AsyncProgressPayload() {}
AsyncProgressPayload(const AsyncRequestId &id, uint64_t offset, uint64_t total)
: AsyncRequestPayloadBase(id), offset(offset), total(total) {}
NotifyOp get_notify_op() const override {
return NOTIFY_OP_ASYNC_PROGRESS;
}
bool check_for_refresh() const override {
return false;
}
void encode(bufferlist &bl) const override;
void decode(__u8 version, bufferlist::const_iterator &iter) override;
void dump(Formatter *f) const override;
};
struct AsyncCompletePayload : public AsyncRequestPayloadBase {
int result = 0;
AsyncCompletePayload() {}
AsyncCompletePayload(const AsyncRequestId &id, int r)
: AsyncRequestPayloadBase(id), result(r) {}
NotifyOp get_notify_op() const override {
return NOTIFY_OP_ASYNC_COMPLETE;
}
bool check_for_refresh() const override {
return false;
}
void encode(bufferlist &bl) const override;
void decode(__u8 version, bufferlist::const_iterator &iter) override;
void dump(Formatter *f) const override;
};
struct FlattenPayload : public AsyncRequestPayloadBase {
FlattenPayload() {}
FlattenPayload(const AsyncRequestId &id) : AsyncRequestPayloadBase(id) {}
NotifyOp get_notify_op() const override {
return NOTIFY_OP_FLATTEN;
}
bool check_for_refresh() const override {
return true;
}
};
struct ResizePayload : public AsyncRequestPayloadBase {
uint64_t size = 0;
bool allow_shrink = true;
ResizePayload() {}
ResizePayload(const AsyncRequestId &id, uint64_t size, bool allow_shrink)
: AsyncRequestPayloadBase(id), size(size), allow_shrink(allow_shrink) {}
NotifyOp get_notify_op() const override {
return NOTIFY_OP_RESIZE;
}
bool check_for_refresh() const override {
return true;
}
void encode(bufferlist &bl) const override;
void decode(__u8 version, bufferlist::const_iterator &iter) override;
void dump(Formatter *f) const override;
};
struct SnapPayloadBase : public AsyncRequestPayloadBase {
public:
cls::rbd::SnapshotNamespace snap_namespace;
std::string snap_name;
bool check_for_refresh() const override {
return true;
}
void encode(bufferlist &bl) const override;
void decode(__u8 version, bufferlist::const_iterator &iter) override;
void dump(Formatter *f) const override;
protected:
SnapPayloadBase() {}
SnapPayloadBase(const AsyncRequestId &id,
const cls::rbd::SnapshotNamespace& snap_namespace,
const std::string &name)
: AsyncRequestPayloadBase(id), snap_namespace(snap_namespace),
snap_name(name) {
}
};
struct SnapCreatePayload : public SnapPayloadBase {
uint64_t flags = 0;
SnapCreatePayload() {}
SnapCreatePayload(const AsyncRequestId &id,
const cls::rbd::SnapshotNamespace &snap_namespace,
const std::string &name, uint64_t flags)
: SnapPayloadBase(id, snap_namespace, name), flags(flags) {
}
NotifyOp get_notify_op() const override {
return NOTIFY_OP_SNAP_CREATE;
}
void encode(bufferlist &bl) const override;
void decode(__u8 version, bufferlist::const_iterator &iter) override;
void dump(Formatter *f) const override;
};
struct SnapRenamePayload : public SnapPayloadBase {
uint64_t snap_id = 0;
SnapRenamePayload() {}
SnapRenamePayload(const AsyncRequestId &id,
const uint64_t &src_snap_id,
const std::string &dst_name)
: SnapPayloadBase(id, cls::rbd::UserSnapshotNamespace(), dst_name),
snap_id(src_snap_id) {
}
NotifyOp get_notify_op() const override {
return NOTIFY_OP_SNAP_RENAME;
}
void encode(bufferlist &bl) const override;
void decode(__u8 version, bufferlist::const_iterator &iter) override;
void dump(Formatter *f) const override;
};
struct SnapRemovePayload : public SnapPayloadBase {
SnapRemovePayload() {}
SnapRemovePayload(const AsyncRequestId &id,
const cls::rbd::SnapshotNamespace& snap_namespace,
const std::string &name)
: SnapPayloadBase(id, snap_namespace, name) {
}
NotifyOp get_notify_op() const override {
return NOTIFY_OP_SNAP_REMOVE;
}
};
struct SnapProtectPayload : public SnapPayloadBase {
SnapProtectPayload() {}
SnapProtectPayload(const AsyncRequestId &id,
const cls::rbd::SnapshotNamespace& snap_namespace,
const std::string &name)
: SnapPayloadBase(id, snap_namespace, name) {
}
NotifyOp get_notify_op() const override {
return NOTIFY_OP_SNAP_PROTECT;
}
};
struct SnapUnprotectPayload : public SnapPayloadBase {
SnapUnprotectPayload() {}
SnapUnprotectPayload(const AsyncRequestId &id,
const cls::rbd::SnapshotNamespace& snap_namespace,
const std::string &name)
: SnapPayloadBase(id, snap_namespace, name) {
}
NotifyOp get_notify_op() const override {
return NOTIFY_OP_SNAP_UNPROTECT;
}
};
struct RebuildObjectMapPayload : public AsyncRequestPayloadBase {
RebuildObjectMapPayload() {}
RebuildObjectMapPayload(const AsyncRequestId &id)
: AsyncRequestPayloadBase(id) {}
NotifyOp get_notify_op() const override {
return NOTIFY_OP_REBUILD_OBJECT_MAP;
}
bool check_for_refresh() const override {
return true;
}
};
struct RenamePayload : public AsyncRequestPayloadBase {
std::string image_name;
RenamePayload() {}
RenamePayload(const AsyncRequestId &id, const std::string _image_name)
: AsyncRequestPayloadBase(id), image_name(_image_name) {
}
NotifyOp get_notify_op() const override {
return NOTIFY_OP_RENAME;
}
bool check_for_refresh() const override {
return true;
}
void encode(bufferlist &bl) const;
void decode(__u8 version, bufferlist::const_iterator &iter);
void dump(Formatter *f) const;
};
struct UpdateFeaturesPayload : public AsyncRequestPayloadBase {
uint64_t features = 0;
bool enabled = false;
UpdateFeaturesPayload() {}
UpdateFeaturesPayload(const AsyncRequestId &id, uint64_t features,
bool enabled)
: AsyncRequestPayloadBase(id), features(features), enabled(enabled) {
}
NotifyOp get_notify_op() const override {
return NOTIFY_OP_UPDATE_FEATURES;
}
bool check_for_refresh() const override {
return true;
}
void encode(bufferlist &bl) const override;
void decode(__u8 version, bufferlist::const_iterator &iter) override;
void dump(Formatter *f) const override;
};
struct MigratePayload : public AsyncRequestPayloadBase {
MigratePayload() {}
MigratePayload(const AsyncRequestId &id) : AsyncRequestPayloadBase(id) {}
NotifyOp get_notify_op() const override {
return NOTIFY_OP_MIGRATE;
}
bool check_for_refresh() const override {
return true;
}
};
struct SparsifyPayload : public AsyncRequestPayloadBase {
uint64_t sparse_size = 0;
SparsifyPayload() {}
SparsifyPayload(const AsyncRequestId &id, uint64_t sparse_size)
: AsyncRequestPayloadBase(id), sparse_size(sparse_size) {
}
NotifyOp get_notify_op() const override {
return NOTIFY_OP_SPARSIFY;
}
bool check_for_refresh() const override {
return true;
}
void encode(bufferlist &bl) const override;
void decode(__u8 version, bufferlist::const_iterator &iter) override;
void dump(Formatter *f) const override;
};
struct QuiescePayload : public AsyncRequestPayloadBase {
QuiescePayload() {}
QuiescePayload(const AsyncRequestId &id) : AsyncRequestPayloadBase(id) {}
NotifyOp get_notify_op() const override {
return NOTIFY_OP_QUIESCE;
}
bool check_for_refresh() const override {
return false;
}
};
struct UnquiescePayload : public AsyncRequestPayloadBase {
UnquiescePayload() {}
UnquiescePayload(const AsyncRequestId &id) : AsyncRequestPayloadBase(id) {}
NotifyOp get_notify_op() const override {
return NOTIFY_OP_UNQUIESCE;
}
bool check_for_refresh() const override {
return false;
}
};
struct MetadataUpdatePayload : public AsyncRequestPayloadBase {
std::string key;
std::optional<std::string> value;
MetadataUpdatePayload() {}
MetadataUpdatePayload(const AsyncRequestId &id, std::string key,
std::optional<std::string> value)
: AsyncRequestPayloadBase(id), key(key), value(value) {
}
NotifyOp get_notify_op() const override {
return NOTIFY_OP_METADATA_UPDATE;
}
bool check_for_refresh() const override {
return false;
}
void encode(bufferlist &bl) const;
void decode(__u8 version, bufferlist::const_iterator &iter);
void dump(Formatter *f) const;
};
struct UnknownPayload : public Payload {
NotifyOp get_notify_op() const override {
return static_cast<NotifyOp>(-1);
}
bool check_for_refresh() const override {
return false;
}
void encode(bufferlist &bl) const override;
void decode(__u8 version, bufferlist::const_iterator &iter) override;
void dump(Formatter *f) const override;
};
struct NotifyMessage {
NotifyMessage() : payload(new UnknownPayload()) {}
NotifyMessage(Payload *payload) : payload(payload) {}
std::unique_ptr<Payload> payload;
bool check_for_refresh() const;
void encode(bufferlist& bl) const;
void decode(bufferlist::const_iterator& it);
void dump(Formatter *f) const;
NotifyOp get_notify_op() const;
static void generate_test_instances(std::list<NotifyMessage *> &o);
};
struct ResponseMessage {
ResponseMessage() : result(0) {}
ResponseMessage(int result_) : result(result_) {}
int result;
void encode(bufferlist& bl) const;
void decode(bufferlist::const_iterator& it);
void dump(Formatter *f) const;
static void generate_test_instances(std::list<ResponseMessage *> &o);
};
std::ostream &operator<<(std::ostream &out,
const NotifyOp &op);
std::ostream &operator<<(std::ostream &out,
const AsyncRequestId &request);
WRITE_CLASS_ENCODER(AsyncRequestId);
WRITE_CLASS_ENCODER(NotifyMessage);
WRITE_CLASS_ENCODER(ResponseMessage);
} // namespace watch_notify
} // namespace librbd
#endif // LIBRBD_WATCH_NOTIFY_TYPES_H
| 14,766 | 26.705441 | 81 |
h
|
null |
ceph-main/src/librbd/Watcher.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/Watcher.h"
#include "librbd/watcher/RewatchRequest.h"
#include "librbd/Utils.h"
#include "librbd/TaskFinisher.h"
#include "librbd/asio/ContextWQ.h"
#include "include/encoding.h"
#include "common/errno.h"
#include <boost/bind/bind.hpp>
// re-include our assert to clobber the system one; fix dout:
#include "include/ceph_assert.h"
#define dout_subsys ceph_subsys_rbd
namespace librbd {
using namespace boost::placeholders;
using namespace watcher;
using util::create_context_callback;
using util::create_rados_callback;
using std::string;
namespace {
struct C_UnwatchAndFlush : public Context {
librados::Rados rados;
Context *on_finish;
bool flushing = false;
int ret_val = 0;
C_UnwatchAndFlush(librados::IoCtx &io_ctx, Context *on_finish)
: rados(io_ctx), on_finish(on_finish) {
}
void complete(int r) override {
if (ret_val == 0 && r < 0) {
ret_val = r;
}
if (!flushing) {
flushing = true;
librados::AioCompletion *aio_comp = create_rados_callback(this);
r = rados.aio_watch_flush(aio_comp);
ceph_assert(r == 0);
aio_comp->release();
return;
}
// ensure our reference to the RadosClient is released prior
// to completing the callback to avoid racing an explicit
// librados shutdown
Context *ctx = on_finish;
r = ret_val;
delete this;
ctx->complete(r);
}
void finish(int r) override {
}
};
} // anonymous namespace
#undef dout_prefix
#define dout_prefix *_dout << "librbd::Watcher::C_NotifyAck " << this << " " \
<< __func__ << ": "
Watcher::C_NotifyAck::C_NotifyAck(Watcher *watcher, uint64_t notify_id,
uint64_t handle)
: watcher(watcher), cct(watcher->m_cct), notify_id(notify_id),
handle(handle) {
ldout(cct, 10) << "id=" << notify_id << ", " << "handle=" << handle << dendl;
}
void Watcher::C_NotifyAck::finish(int r) {
ldout(cct, 10) << "r=" << r << dendl;
ceph_assert(r == 0);
watcher->acknowledge_notify(notify_id, handle, out);
}
#undef dout_prefix
#define dout_prefix *_dout << "librbd::Watcher: " << this << " " << __func__ \
<< ": "
Watcher::Watcher(librados::IoCtx& ioctx, asio::ContextWQ *work_queue,
const string& oid)
: m_ioctx(ioctx), m_work_queue(work_queue), m_oid(oid),
m_cct(reinterpret_cast<CephContext *>(ioctx.cct())),
m_watch_lock(ceph::make_shared_mutex(
util::unique_lock_name("librbd::Watcher::m_watch_lock", this))),
m_watch_handle(0), m_notifier(work_queue, ioctx, oid),
m_watch_state(WATCH_STATE_IDLE), m_watch_ctx(*this) {
}
Watcher::~Watcher() {
std::shared_lock l{m_watch_lock};
ceph_assert(is_unregistered(m_watch_lock));
}
void Watcher::register_watch(Context *on_finish) {
ldout(m_cct, 10) << dendl;
std::unique_lock watch_locker{m_watch_lock};
ceph_assert(is_unregistered(m_watch_lock));
m_watch_state = WATCH_STATE_REGISTERING;
m_watch_blocklisted = false;
librados::AioCompletion *aio_comp = create_rados_callback(
new C_RegisterWatch(this, on_finish));
int r = m_ioctx.aio_watch(m_oid, aio_comp, &m_watch_handle, &m_watch_ctx);
ceph_assert(r == 0);
aio_comp->release();
}
void Watcher::handle_register_watch(int r, Context *on_finish) {
ldout(m_cct, 10) << "r=" << r << dendl;
bool watch_error = false;
Context *unregister_watch_ctx = nullptr;
{
std::unique_lock watch_locker{m_watch_lock};
ceph_assert(m_watch_state == WATCH_STATE_REGISTERING);
m_watch_state = WATCH_STATE_IDLE;
if (r < 0) {
lderr(m_cct) << "failed to register watch: " << cpp_strerror(r)
<< dendl;
m_watch_handle = 0;
}
if (m_unregister_watch_ctx != nullptr) {
std::swap(unregister_watch_ctx, m_unregister_watch_ctx);
} else if (r == 0 && m_watch_error) {
lderr(m_cct) << "re-registering watch after error" << dendl;
m_watch_state = WATCH_STATE_REWATCHING;
watch_error = true;
} else {
m_watch_blocklisted = (r == -EBLOCKLISTED);
}
}
on_finish->complete(r);
if (unregister_watch_ctx != nullptr) {
unregister_watch_ctx->complete(0);
} else if (watch_error) {
rewatch();
}
}
void Watcher::unregister_watch(Context *on_finish) {
ldout(m_cct, 10) << dendl;
{
std::unique_lock watch_locker{m_watch_lock};
if (m_watch_state != WATCH_STATE_IDLE) {
ldout(m_cct, 10) << "delaying unregister until register completed"
<< dendl;
ceph_assert(m_unregister_watch_ctx == nullptr);
m_unregister_watch_ctx = new LambdaContext([this, on_finish](int r) {
unregister_watch(on_finish);
});
return;
} else if (is_registered(m_watch_lock)) {
librados::AioCompletion *aio_comp = create_rados_callback(
new C_UnwatchAndFlush(m_ioctx, on_finish));
int r = m_ioctx.aio_unwatch(m_watch_handle, aio_comp);
ceph_assert(r == 0);
aio_comp->release();
m_watch_handle = 0;
m_watch_blocklisted = false;
return;
}
}
on_finish->complete(0);
}
bool Watcher::notifications_blocked() const {
std::shared_lock locker{m_watch_lock};
bool blocked = (m_blocked_count > 0);
ldout(m_cct, 5) << "blocked=" << blocked << dendl;
return blocked;
}
void Watcher::block_notifies(Context *on_finish) {
{
std::unique_lock locker{m_watch_lock};
++m_blocked_count;
ldout(m_cct, 5) << "blocked_count=" << m_blocked_count << dendl;
}
m_async_op_tracker.wait_for_ops(on_finish);
}
void Watcher::unblock_notifies() {
std::unique_lock locker{m_watch_lock};
ceph_assert(m_blocked_count > 0);
--m_blocked_count;
ldout(m_cct, 5) << "blocked_count=" << m_blocked_count << dendl;
}
void Watcher::flush(Context *on_finish) {
m_notifier.flush(on_finish);
}
std::string Watcher::get_oid() const {
std::shared_lock locker{m_watch_lock};
return m_oid;
}
void Watcher::set_oid(const string& oid) {
std::unique_lock watch_locker{m_watch_lock};
ceph_assert(is_unregistered(m_watch_lock));
m_oid = oid;
}
void Watcher::handle_error(uint64_t handle, int err) {
lderr(m_cct) << "handle=" << handle << ": " << cpp_strerror(err) << dendl;
std::unique_lock watch_locker{m_watch_lock};
m_watch_error = true;
if (is_registered(m_watch_lock)) {
m_watch_state = WATCH_STATE_REWATCHING;
if (err == -EBLOCKLISTED) {
m_watch_blocklisted = true;
}
auto ctx = new LambdaContext(
boost::bind(&Watcher::rewatch, this));
m_work_queue->queue(ctx);
}
}
void Watcher::acknowledge_notify(uint64_t notify_id, uint64_t handle,
bufferlist &out) {
m_ioctx.notify_ack(m_oid, notify_id, handle, out);
}
void Watcher::rewatch() {
ldout(m_cct, 10) << dendl;
Context *unregister_watch_ctx = nullptr;
{
std::unique_lock watch_locker{m_watch_lock};
ceph_assert(m_watch_state == WATCH_STATE_REWATCHING);
if (m_unregister_watch_ctx != nullptr) {
m_watch_state = WATCH_STATE_IDLE;
std::swap(unregister_watch_ctx, m_unregister_watch_ctx);
} else {
m_watch_error = false;
auto ctx = create_context_callback<
Watcher, &Watcher::handle_rewatch>(this);
auto req = RewatchRequest::create(m_ioctx, m_oid, m_watch_lock,
&m_watch_ctx, &m_watch_handle, ctx);
req->send();
return;
}
}
unregister_watch_ctx->complete(0);
}
void Watcher::handle_rewatch(int r) {
ldout(m_cct, 10) << "r=" << r << dendl;
bool watch_error = false;
Context *unregister_watch_ctx = nullptr;
{
std::unique_lock watch_locker{m_watch_lock};
ceph_assert(m_watch_state == WATCH_STATE_REWATCHING);
m_watch_blocklisted = false;
if (m_unregister_watch_ctx != nullptr) {
ldout(m_cct, 10) << "image is closing, skip rewatch" << dendl;
m_watch_state = WATCH_STATE_IDLE;
std::swap(unregister_watch_ctx, m_unregister_watch_ctx);
} else if (r == -EBLOCKLISTED) {
lderr(m_cct) << "client blocklisted" << dendl;
m_watch_blocklisted = true;
} else if (r == -ENOENT) {
ldout(m_cct, 5) << "object does not exist" << dendl;
} else if (r < 0) {
lderr(m_cct) << "failed to rewatch: " << cpp_strerror(r) << dendl;
watch_error = true;
} else if (m_watch_error) {
lderr(m_cct) << "re-registering watch after error" << dendl;
watch_error = true;
}
}
if (unregister_watch_ctx != nullptr) {
unregister_watch_ctx->complete(0);
return;
} else if (watch_error) {
rewatch();
return;
}
auto ctx = create_context_callback<
Watcher, &Watcher::handle_rewatch_callback>(this);
m_work_queue->queue(ctx, r);
}
void Watcher::handle_rewatch_callback(int r) {
ldout(m_cct, 10) << "r=" << r << dendl;
handle_rewatch_complete(r);
bool watch_error = false;
Context *unregister_watch_ctx = nullptr;
{
std::unique_lock watch_locker{m_watch_lock};
ceph_assert(m_watch_state == WATCH_STATE_REWATCHING);
if (m_unregister_watch_ctx != nullptr) {
m_watch_state = WATCH_STATE_IDLE;
std::swap(unregister_watch_ctx, m_unregister_watch_ctx);
} else if (r == -EBLOCKLISTED || r == -ENOENT) {
m_watch_state = WATCH_STATE_IDLE;
} else if (r < 0 || m_watch_error) {
watch_error = true;
} else {
m_watch_state = WATCH_STATE_IDLE;
}
}
if (unregister_watch_ctx != nullptr) {
unregister_watch_ctx->complete(0);
} else if (watch_error) {
rewatch();
}
}
void Watcher::send_notify(bufferlist& payload,
watcher::NotifyResponse *response,
Context *on_finish) {
m_notifier.notify(payload, response, on_finish);
}
void Watcher::WatchCtx::handle_notify(uint64_t notify_id, uint64_t handle,
uint64_t notifier_id, bufferlist& bl) {
// if notifications are blocked, finish the notification w/o
// bubbling the notification up to the derived class
watcher.m_async_op_tracker.start_op();
if (watcher.notifications_blocked()) {
bufferlist bl;
watcher.acknowledge_notify(notify_id, handle, bl);
} else {
watcher.handle_notify(notify_id, handle, notifier_id, bl);
}
watcher.m_async_op_tracker.finish_op();
}
void Watcher::WatchCtx::handle_error(uint64_t handle, int err) {
watcher.handle_error(handle, err);
}
} // namespace librbd
| 10,531 | 27.38814 | 79 |
cc
|
null |
ceph-main/src/librbd/Watcher.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_WATCHER_H
#define CEPH_LIBRBD_WATCHER_H
#include "common/AsyncOpTracker.h"
#include "common/ceph_mutex.h"
#include "common/RWLock.h"
#include "include/rados/librados.hpp"
#include "librbd/watcher/Notifier.h"
#include "librbd/watcher/Types.h"
#include <string>
#include <utility>
namespace librbd {
namespace asio { struct ContextWQ; }
namespace watcher { struct NotifyResponse; }
class Watcher {
public:
struct C_NotifyAck : public Context {
Watcher *watcher;
CephContext *cct;
uint64_t notify_id;
uint64_t handle;
bufferlist out;
C_NotifyAck(Watcher *watcher, uint64_t notify_id, uint64_t handle);
void finish(int r) override;
};
Watcher(librados::IoCtx& ioctx, asio::ContextWQ *work_queue,
const std::string& oid);
virtual ~Watcher();
void register_watch(Context *on_finish);
virtual void unregister_watch(Context *on_finish);
void flush(Context *on_finish);
bool notifications_blocked() const;
virtual void block_notifies(Context *on_finish);
void unblock_notifies();
std::string get_oid() const;
void set_oid(const std::string& oid);
uint64_t get_watch_handle() const {
std::shared_lock watch_locker{m_watch_lock};
return m_watch_handle;
}
bool is_registered() const {
std::shared_lock locker{m_watch_lock};
return is_registered(m_watch_lock);
}
bool is_unregistered() const {
std::shared_lock locker{m_watch_lock};
return is_unregistered(m_watch_lock);
}
bool is_blocklisted() const {
std::shared_lock locker{m_watch_lock};
return m_watch_blocklisted;
}
protected:
enum WatchState {
WATCH_STATE_IDLE,
WATCH_STATE_REGISTERING,
WATCH_STATE_REWATCHING
};
librados::IoCtx& m_ioctx;
asio::ContextWQ *m_work_queue;
std::string m_oid;
CephContext *m_cct;
mutable ceph::shared_mutex m_watch_lock;
uint64_t m_watch_handle;
watcher::Notifier m_notifier;
WatchState m_watch_state;
bool m_watch_blocklisted = false;
AsyncOpTracker m_async_op_tracker;
bool is_registered(const ceph::shared_mutex&) const {
return (m_watch_state == WATCH_STATE_IDLE && m_watch_handle != 0);
}
bool is_unregistered(const ceph::shared_mutex&) const {
return (m_watch_state == WATCH_STATE_IDLE && m_watch_handle == 0);
}
void send_notify(bufferlist &payload,
watcher::NotifyResponse *response = nullptr,
Context *on_finish = nullptr);
virtual void handle_notify(uint64_t notify_id, uint64_t handle,
uint64_t notifier_id, bufferlist &bl) = 0;
virtual void handle_error(uint64_t cookie, int err);
void acknowledge_notify(uint64_t notify_id, uint64_t handle,
bufferlist &out);
virtual void handle_rewatch_complete(int r) { }
private:
/**
* @verbatim
*
* <start>
* |
* v
* UNREGISTERED
* |
* | (register_watch)
* |
* REGISTERING
* |
* v (watch error)
* REGISTERED * * * * * * * > ERROR
* | ^ |
* | | | (rewatch)
* | | v
* | | REWATCHING
* | | |
* | | |
* | \---------------------/
* |
* | (unregister_watch)
* |
* v
* UNREGISTERED
* |
* v
* <finish>
*
* @endverbatim
*/
struct WatchCtx : public librados::WatchCtx2 {
Watcher &watcher;
WatchCtx(Watcher &parent) : watcher(parent) {}
void handle_notify(uint64_t notify_id,
uint64_t handle,
uint64_t notifier_id,
bufferlist& bl) override;
void handle_error(uint64_t handle, int err) override;
};
struct C_RegisterWatch : public Context {
Watcher *watcher;
Context *on_finish;
C_RegisterWatch(Watcher *watcher, Context *on_finish)
: watcher(watcher), on_finish(on_finish) {
}
void finish(int r) override {
watcher->handle_register_watch(r, on_finish);
}
};
WatchCtx m_watch_ctx;
Context *m_unregister_watch_ctx = nullptr;
bool m_watch_error = false;
uint32_t m_blocked_count = 0;
void handle_register_watch(int r, Context *on_finish);
void rewatch();
void handle_rewatch(int r);
void handle_rewatch_callback(int r);
};
} // namespace librbd
#endif // CEPH_LIBRBD_WATCHER_H
| 4,543 | 23.695652 | 71 |
h
|
null |
ceph-main/src/librbd/internal.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "include/int_types.h"
#include <errno.h>
#include <limits.h>
#include "include/types.h"
#include "include/uuid.h"
#include "common/ceph_context.h"
#include "common/dout.h"
#include "common/errno.h"
#include "common/Throttle.h"
#include "common/event_socket.h"
#include "common/perf_counters.h"
#include "osdc/Striper.h"
#include "include/stringify.h"
#include "cls/lock/cls_lock_client.h"
#include "cls/rbd/cls_rbd.h"
#include "cls/rbd/cls_rbd_types.h"
#include "cls/rbd/cls_rbd_client.h"
#include "cls/journal/cls_journal_types.h"
#include "cls/journal/cls_journal_client.h"
#include "librbd/AsioEngine.h"
#include "librbd/ExclusiveLock.h"
#include "librbd/ImageCtx.h"
#include "librbd/ImageState.h"
#include "librbd/internal.h"
#include "librbd/Journal.h"
#include "librbd/ObjectMap.h"
#include "librbd/Operations.h"
#include "librbd/PluginRegistry.h"
#include "librbd/Types.h"
#include "librbd/Utils.h"
#include "librbd/api/Config.h"
#include "librbd/api/Image.h"
#include "librbd/api/Io.h"
#include "librbd/cache/Utils.h"
#include "librbd/exclusive_lock/AutomaticPolicy.h"
#include "librbd/exclusive_lock/StandardPolicy.h"
#include "librbd/deep_copy/MetadataCopyRequest.h"
#include "librbd/image/CloneRequest.h"
#include "librbd/image/CreateRequest.h"
#include "librbd/image/GetMetadataRequest.h"
#include "librbd/image/Types.h"
#include "librbd/io/AioCompletion.h"
#include "librbd/io/ImageDispatchSpec.h"
#include "librbd/io/ImageDispatcherInterface.h"
#include "librbd/io/ObjectDispatcherInterface.h"
#include "librbd/io/ObjectRequest.h"
#include "librbd/io/ReadResult.h"
#include "librbd/journal/Types.h"
#include "librbd/managed_lock/Types.h"
#include "librbd/mirror/EnableRequest.h"
#include "librbd/operation/TrimRequest.h"
#include "journal/Journaler.h"
#include <boost/scope_exit.hpp>
#include <boost/variant.hpp>
#include "include/ceph_assert.h"
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd: "
#define rbd_howmany(x, y) (((x) + (y) - 1) / (y))
using std::istringstream;
using std::map;
using std::pair;
using std::set;
using std::string;
using std::vector;
// list binds to list() here, so std::list is explicitly used below
using ceph::bufferlist;
using librados::snap_t;
using librados::IoCtx;
using librados::Rados;
namespace librbd {
namespace {
int validate_pool(IoCtx &io_ctx, CephContext *cct) {
if (!cct->_conf.get_val<bool>("rbd_validate_pool")) {
return 0;
}
int r = io_ctx.stat(RBD_DIRECTORY, NULL, NULL);
if (r == 0) {
return 0;
} else if (r < 0 && r != -ENOENT) {
lderr(cct) << "failed to stat RBD directory: " << cpp_strerror(r) << dendl;
return r;
}
// allocate a self-managed snapshot id if this a new pool to force
// self-managed snapshot mode
uint64_t snap_id;
r = io_ctx.selfmanaged_snap_create(&snap_id);
if (r == -EINVAL) {
lderr(cct) << "pool not configured for self-managed RBD snapshot support"
<< dendl;
return r;
} else if (r < 0) {
lderr(cct) << "failed to allocate self-managed snapshot: "
<< cpp_strerror(r) << dendl;
return r;
}
r = io_ctx.selfmanaged_snap_remove(snap_id);
if (r < 0) {
lderr(cct) << "failed to release self-managed snapshot " << snap_id
<< ": " << cpp_strerror(r) << dendl;
}
return 0;
}
} // anonymous namespace
int detect_format(IoCtx &io_ctx, const string &name,
bool *old_format, uint64_t *size)
{
CephContext *cct = (CephContext *)io_ctx.cct();
if (old_format)
*old_format = true;
int r = io_ctx.stat(util::old_header_name(name), size, NULL);
if (r == -ENOENT) {
if (old_format)
*old_format = false;
r = io_ctx.stat(util::id_obj_name(name), size, NULL);
if (r < 0)
return r;
} else if (r < 0) {
return r;
}
ldout(cct, 20) << "detect format of " << name << " : "
<< (old_format ? (*old_format ? "old" : "new") :
"don't care") << dendl;
return 0;
}
bool has_parent(int64_t parent_pool_id, uint64_t off, uint64_t overlap)
{
return (parent_pool_id != -1 && off <= overlap);
}
void init_rbd_header(struct rbd_obj_header_ondisk& ondisk,
uint64_t size, int order, uint64_t bid)
{
uint32_t hi = bid >> 32;
uint32_t lo = bid & 0xFFFFFFFF;
uint32_t extra = rand() % 0xFFFFFFFF;
// FIPS zeroization audit 20191117: this memset is not security related.
memset(&ondisk, 0, sizeof(ondisk));
memcpy(&ondisk.text, RBD_HEADER_TEXT, sizeof(RBD_HEADER_TEXT));
memcpy(&ondisk.signature, RBD_HEADER_SIGNATURE,
sizeof(RBD_HEADER_SIGNATURE));
memcpy(&ondisk.version, RBD_HEADER_VERSION, sizeof(RBD_HEADER_VERSION));
snprintf(ondisk.block_name, sizeof(ondisk.block_name), "rb.%x.%x.%x",
hi, lo, extra);
ondisk.image_size = size;
ondisk.options.order = order;
ondisk.options.crypt_type = RBD_CRYPT_NONE;
ondisk.options.comp_type = RBD_COMP_NONE;
ondisk.snap_seq = 0;
ondisk.snap_count = 0;
ondisk.reserved = 0;
ondisk.snap_names_len = 0;
}
void image_info(ImageCtx *ictx, image_info_t& info, size_t infosize)
{
int obj_order = ictx->order;
{
std::shared_lock locker{ictx->image_lock};
info.size = ictx->get_area_size(io::ImageArea::DATA);
}
info.obj_size = 1ULL << obj_order;
info.num_objs = Striper::get_num_objects(ictx->layout, info.size);
info.order = obj_order;
strncpy(info.block_name_prefix, ictx->object_prefix.c_str(),
RBD_MAX_BLOCK_NAME_SIZE);
info.block_name_prefix[RBD_MAX_BLOCK_NAME_SIZE - 1] = '\0';
// clear deprecated fields
info.parent_pool = -1L;
info.parent_name[0] = '\0';
}
uint64_t oid_to_object_no(const string& oid, const string& object_prefix)
{
istringstream iss(oid);
// skip object prefix and separator
iss.ignore(object_prefix.length() + 1);
uint64_t num;
iss >> std::hex >> num;
return num;
}
int read_header_bl(IoCtx& io_ctx, const string& header_oid,
bufferlist& header, uint64_t *ver)
{
int r;
uint64_t off = 0;
#define READ_SIZE 4096
do {
bufferlist bl;
r = io_ctx.read(header_oid, bl, READ_SIZE, off);
if (r < 0)
return r;
header.claim_append(bl);
off += r;
} while (r == READ_SIZE);
static_assert(sizeof(RBD_HEADER_TEXT) == sizeof(RBD_MIGRATE_HEADER_TEXT),
"length of rbd headers must be the same");
if (header.length() < sizeof(RBD_HEADER_TEXT) ||
(memcmp(RBD_HEADER_TEXT, header.c_str(),
sizeof(RBD_HEADER_TEXT)) != 0 &&
memcmp(RBD_MIGRATE_HEADER_TEXT, header.c_str(),
sizeof(RBD_MIGRATE_HEADER_TEXT)) != 0)) {
CephContext *cct = (CephContext *)io_ctx.cct();
lderr(cct) << "unrecognized header format" << dendl;
return -ENXIO;
}
if (ver)
*ver = io_ctx.get_last_version();
return 0;
}
int read_header(IoCtx& io_ctx, const string& header_oid,
struct rbd_obj_header_ondisk *header, uint64_t *ver)
{
bufferlist header_bl;
int r = read_header_bl(io_ctx, header_oid, header_bl, ver);
if (r < 0)
return r;
if (header_bl.length() < (int)sizeof(*header))
return -EIO;
memcpy(header, header_bl.c_str(), sizeof(*header));
return 0;
}
int tmap_set(IoCtx& io_ctx, const string& imgname)
{
bufferlist cmdbl, emptybl;
__u8 c = CEPH_OSD_TMAP_SET;
encode(c, cmdbl);
encode(imgname, cmdbl);
encode(emptybl, cmdbl);
return io_ctx.tmap_update(RBD_DIRECTORY, cmdbl);
}
int tmap_rm(IoCtx& io_ctx, const string& imgname)
{
bufferlist cmdbl;
__u8 c = CEPH_OSD_TMAP_RM;
encode(c, cmdbl);
encode(imgname, cmdbl);
return io_ctx.tmap_update(RBD_DIRECTORY, cmdbl);
}
typedef boost::variant<std::string,uint64_t> image_option_value_t;
typedef std::map<int,image_option_value_t> image_options_t;
typedef std::shared_ptr<image_options_t> image_options_ref;
enum image_option_type_t {
STR,
UINT64,
};
const std::map<int, image_option_type_t> IMAGE_OPTIONS_TYPE_MAPPING = {
{RBD_IMAGE_OPTION_FORMAT, UINT64},
{RBD_IMAGE_OPTION_FEATURES, UINT64},
{RBD_IMAGE_OPTION_ORDER, UINT64},
{RBD_IMAGE_OPTION_STRIPE_UNIT, UINT64},
{RBD_IMAGE_OPTION_STRIPE_COUNT, UINT64},
{RBD_IMAGE_OPTION_JOURNAL_ORDER, UINT64},
{RBD_IMAGE_OPTION_JOURNAL_SPLAY_WIDTH, UINT64},
{RBD_IMAGE_OPTION_JOURNAL_POOL, STR},
{RBD_IMAGE_OPTION_FEATURES_SET, UINT64},
{RBD_IMAGE_OPTION_FEATURES_CLEAR, UINT64},
{RBD_IMAGE_OPTION_DATA_POOL, STR},
{RBD_IMAGE_OPTION_FLATTEN, UINT64},
{RBD_IMAGE_OPTION_CLONE_FORMAT, UINT64},
{RBD_IMAGE_OPTION_MIRROR_IMAGE_MODE, UINT64},
};
std::string image_option_name(int optname) {
switch (optname) {
case RBD_IMAGE_OPTION_FORMAT:
return "format";
case RBD_IMAGE_OPTION_FEATURES:
return "features";
case RBD_IMAGE_OPTION_ORDER:
return "order";
case RBD_IMAGE_OPTION_STRIPE_UNIT:
return "stripe_unit";
case RBD_IMAGE_OPTION_STRIPE_COUNT:
return "stripe_count";
case RBD_IMAGE_OPTION_JOURNAL_ORDER:
return "journal_order";
case RBD_IMAGE_OPTION_JOURNAL_SPLAY_WIDTH:
return "journal_splay_width";
case RBD_IMAGE_OPTION_JOURNAL_POOL:
return "journal_pool";
case RBD_IMAGE_OPTION_FEATURES_SET:
return "features_set";
case RBD_IMAGE_OPTION_FEATURES_CLEAR:
return "features_clear";
case RBD_IMAGE_OPTION_DATA_POOL:
return "data_pool";
case RBD_IMAGE_OPTION_FLATTEN:
return "flatten";
case RBD_IMAGE_OPTION_CLONE_FORMAT:
return "clone_format";
case RBD_IMAGE_OPTION_MIRROR_IMAGE_MODE:
return "mirror_image_mode";
default:
return "unknown (" + stringify(optname) + ")";
}
}
void image_options_create(rbd_image_options_t* opts)
{
image_options_ref* opts_ = new image_options_ref(new image_options_t());
*opts = static_cast<rbd_image_options_t>(opts_);
}
void image_options_create_ref(rbd_image_options_t* opts,
rbd_image_options_t orig)
{
image_options_ref* orig_ = static_cast<image_options_ref*>(orig);
image_options_ref* opts_ = new image_options_ref(*orig_);
*opts = static_cast<rbd_image_options_t>(opts_);
}
void image_options_copy(rbd_image_options_t* opts,
const ImageOptions &orig)
{
image_options_ref* opts_ = new image_options_ref(new image_options_t());
*opts = static_cast<rbd_image_options_t>(opts_);
std::string str_val;
uint64_t uint64_val;
for (auto &i : IMAGE_OPTIONS_TYPE_MAPPING) {
switch (i.second) {
case STR:
if (orig.get(i.first, &str_val) == 0) {
image_options_set(*opts, i.first, str_val);
}
continue;
case UINT64:
if (orig.get(i.first, &uint64_val) == 0) {
image_options_set(*opts, i.first, uint64_val);
}
continue;
}
}
}
void image_options_destroy(rbd_image_options_t opts)
{
image_options_ref* opts_ = static_cast<image_options_ref*>(opts);
delete opts_;
}
int image_options_set(rbd_image_options_t opts, int optname,
const std::string& optval)
{
image_options_ref* opts_ = static_cast<image_options_ref*>(opts);
std::map<int, image_option_type_t>::const_iterator i =
IMAGE_OPTIONS_TYPE_MAPPING.find(optname);
if (i == IMAGE_OPTIONS_TYPE_MAPPING.end() || i->second != STR) {
return -EINVAL;
}
(*opts_->get())[optname] = optval;
return 0;
}
int image_options_set(rbd_image_options_t opts, int optname, uint64_t optval)
{
image_options_ref* opts_ = static_cast<image_options_ref*>(opts);
std::map<int, image_option_type_t>::const_iterator i =
IMAGE_OPTIONS_TYPE_MAPPING.find(optname);
if (i == IMAGE_OPTIONS_TYPE_MAPPING.end() || i->second != UINT64) {
return -EINVAL;
}
(*opts_->get())[optname] = optval;
return 0;
}
int image_options_get(rbd_image_options_t opts, int optname,
std::string* optval)
{
image_options_ref* opts_ = static_cast<image_options_ref*>(opts);
std::map<int, image_option_type_t>::const_iterator i =
IMAGE_OPTIONS_TYPE_MAPPING.find(optname);
if (i == IMAGE_OPTIONS_TYPE_MAPPING.end() || i->second != STR) {
return -EINVAL;
}
image_options_t::const_iterator j = (*opts_)->find(optname);
if (j == (*opts_)->end()) {
return -ENOENT;
}
*optval = boost::get<std::string>(j->second);
return 0;
}
int image_options_get(rbd_image_options_t opts, int optname, uint64_t* optval)
{
image_options_ref* opts_ = static_cast<image_options_ref*>(opts);
std::map<int, image_option_type_t>::const_iterator i =
IMAGE_OPTIONS_TYPE_MAPPING.find(optname);
if (i == IMAGE_OPTIONS_TYPE_MAPPING.end() || i->second != UINT64) {
return -EINVAL;
}
image_options_t::const_iterator j = (*opts_)->find(optname);
if (j == (*opts_)->end()) {
return -ENOENT;
}
*optval = boost::get<uint64_t>(j->second);
return 0;
}
int image_options_is_set(rbd_image_options_t opts, int optname,
bool* is_set)
{
if (IMAGE_OPTIONS_TYPE_MAPPING.find(optname) ==
IMAGE_OPTIONS_TYPE_MAPPING.end()) {
return -EINVAL;
}
image_options_ref* opts_ = static_cast<image_options_ref*>(opts);
*is_set = ((*opts_)->find(optname) != (*opts_)->end());
return 0;
}
int image_options_unset(rbd_image_options_t opts, int optname)
{
image_options_ref* opts_ = static_cast<image_options_ref*>(opts);
std::map<int, image_option_type_t>::const_iterator i =
IMAGE_OPTIONS_TYPE_MAPPING.find(optname);
if (i == IMAGE_OPTIONS_TYPE_MAPPING.end()) {
ceph_assert((*opts_)->find(optname) == (*opts_)->end());
return -EINVAL;
}
image_options_t::const_iterator j = (*opts_)->find(optname);
if (j == (*opts_)->end()) {
return -ENOENT;
}
(*opts_)->erase(j);
return 0;
}
void image_options_clear(rbd_image_options_t opts)
{
image_options_ref* opts_ = static_cast<image_options_ref*>(opts);
(*opts_)->clear();
}
bool image_options_is_empty(rbd_image_options_t opts)
{
image_options_ref* opts_ = static_cast<image_options_ref*>(opts);
return (*opts_)->empty();
}
int create_v1(IoCtx& io_ctx, const char *imgname, uint64_t size, int order)
{
CephContext *cct = (CephContext *)io_ctx.cct();
ldout(cct, 20) << __func__ << " " << &io_ctx << " name = " << imgname
<< " size = " << size << " order = " << order << dendl;
int r = validate_pool(io_ctx, cct);
if (r < 0) {
return r;
}
if (!io_ctx.get_namespace().empty()) {
lderr(cct) << "attempting to add v1 image to namespace" << dendl;
return -EINVAL;
}
ldout(cct, 2) << "adding rbd image to directory..." << dendl;
r = tmap_set(io_ctx, imgname);
if (r < 0) {
lderr(cct) << "error adding image to directory: " << cpp_strerror(r)
<< dendl;
return r;
}
Rados rados(io_ctx);
uint64_t bid = rados.get_instance_id();
ldout(cct, 2) << "creating rbd image..." << dendl;
struct rbd_obj_header_ondisk header;
init_rbd_header(header, size, order, bid);
bufferlist bl;
bl.append((const char *)&header, sizeof(header));
string header_oid = util::old_header_name(imgname);
r = io_ctx.write(header_oid, bl, bl.length(), 0);
if (r < 0) {
lderr(cct) << "Error writing image header: " << cpp_strerror(r)
<< dendl;
int remove_r = tmap_rm(io_ctx, imgname);
if (remove_r < 0) {
lderr(cct) << "Could not remove image from directory after "
<< "header creation failed: "
<< cpp_strerror(remove_r) << dendl;
}
return r;
}
ldout(cct, 2) << "done." << dendl;
return 0;
}
int create(librados::IoCtx& io_ctx, const char *imgname, uint64_t size,
int *order)
{
uint64_t order_ = *order;
ImageOptions opts;
int r = opts.set(RBD_IMAGE_OPTION_ORDER, order_);
ceph_assert(r == 0);
r = create(io_ctx, imgname, "", size, opts, "", "", false);
int r1 = opts.get(RBD_IMAGE_OPTION_ORDER, &order_);
ceph_assert(r1 == 0);
*order = order_;
return r;
}
int create(IoCtx& io_ctx, const char *imgname, uint64_t size,
bool old_format, uint64_t features, int *order,
uint64_t stripe_unit, uint64_t stripe_count)
{
if (!order)
return -EINVAL;
uint64_t order_ = *order;
uint64_t format = old_format ? 1 : 2;
ImageOptions opts;
int r;
r = opts.set(RBD_IMAGE_OPTION_FORMAT, format);
ceph_assert(r == 0);
r = opts.set(RBD_IMAGE_OPTION_FEATURES, features);
ceph_assert(r == 0);
r = opts.set(RBD_IMAGE_OPTION_ORDER, order_);
ceph_assert(r == 0);
r = opts.set(RBD_IMAGE_OPTION_STRIPE_UNIT, stripe_unit);
ceph_assert(r == 0);
r = opts.set(RBD_IMAGE_OPTION_STRIPE_COUNT, stripe_count);
ceph_assert(r == 0);
r = create(io_ctx, imgname, "", size, opts, "", "", false);
int r1 = opts.get(RBD_IMAGE_OPTION_ORDER, &order_);
ceph_assert(r1 == 0);
*order = order_;
return r;
}
int create(IoCtx& io_ctx, const std::string &image_name,
const std::string &image_id, uint64_t size,
ImageOptions& opts,
const std::string &non_primary_global_image_id,
const std::string &primary_mirror_uuid,
bool skip_mirror_enable)
{
std::string id(image_id);
if (id.empty()) {
id = util::generate_image_id(io_ctx);
}
CephContext *cct = (CephContext *)io_ctx.cct();
uint64_t option;
if (opts.get(RBD_IMAGE_OPTION_FLATTEN, &option) == 0) {
lderr(cct) << "create does not support 'flatten' image option" << dendl;
return -EINVAL;
}
if (opts.get(RBD_IMAGE_OPTION_CLONE_FORMAT, &option) == 0) {
lderr(cct) << "create does not support 'clone_format' image option"
<< dendl;
return -EINVAL;
}
ldout(cct, 10) << __func__ << " name=" << image_name << ", "
<< "id= " << id << ", "
<< "size=" << size << ", opts=" << opts << dendl;
uint64_t format;
if (opts.get(RBD_IMAGE_OPTION_FORMAT, &format) != 0)
format = cct->_conf.get_val<uint64_t>("rbd_default_format");
bool old_format = format == 1;
// make sure it doesn't already exist, in either format
int r = detect_format(io_ctx, image_name, NULL, NULL);
if (r != -ENOENT) {
if (r) {
lderr(cct) << "Could not tell if " << image_name << " already exists"
<< dendl;
return r;
}
lderr(cct) << "rbd image " << image_name << " already exists" << dendl;
return -EEXIST;
}
uint64_t order = 0;
if (opts.get(RBD_IMAGE_OPTION_ORDER, &order) != 0 || order == 0) {
order = cct->_conf.get_val<uint64_t>("rbd_default_order");
}
r = image::CreateRequest<>::validate_order(cct, order);
if (r < 0) {
return r;
}
if (old_format) {
if ( !getenv("RBD_FORCE_ALLOW_V1") ) {
lderr(cct) << "Format 1 image creation unsupported. " << dendl;
return -EINVAL;
}
lderr(cct) << "Forced V1 image creation. " << dendl;
r = create_v1(io_ctx, image_name.c_str(), size, order);
} else {
AsioEngine asio_engine(io_ctx);
ConfigProxy config{cct->_conf};
api::Config<>::apply_pool_overrides(io_ctx, &config);
uint32_t create_flags = 0U;
uint64_t mirror_image_mode = RBD_MIRROR_IMAGE_MODE_JOURNAL;
if (skip_mirror_enable) {
create_flags = image::CREATE_FLAG_SKIP_MIRROR_ENABLE;
} else if (opts.get(RBD_IMAGE_OPTION_MIRROR_IMAGE_MODE,
&mirror_image_mode) == 0) {
create_flags = image::CREATE_FLAG_FORCE_MIRROR_ENABLE;
}
C_SaferCond cond;
image::CreateRequest<> *req = image::CreateRequest<>::create(
config, io_ctx, image_name, id, size, opts, create_flags,
static_cast<cls::rbd::MirrorImageMode>(mirror_image_mode),
non_primary_global_image_id, primary_mirror_uuid,
asio_engine.get_work_queue(), &cond);
req->send();
r = cond.wait();
}
int r1 = opts.set(RBD_IMAGE_OPTION_ORDER, order);
ceph_assert(r1 == 0);
return r;
}
/*
* Parent may be in different pool, hence different IoCtx
*/
int clone(IoCtx& p_ioctx, const char *p_name, const char *p_snap_name,
IoCtx& c_ioctx, const char *c_name,
uint64_t features, int *c_order,
uint64_t stripe_unit, int stripe_count)
{
uint64_t order = *c_order;
ImageOptions opts;
opts.set(RBD_IMAGE_OPTION_FEATURES, features);
opts.set(RBD_IMAGE_OPTION_ORDER, order);
opts.set(RBD_IMAGE_OPTION_STRIPE_UNIT, stripe_unit);
opts.set(RBD_IMAGE_OPTION_STRIPE_COUNT, stripe_count);
int r = clone(p_ioctx, nullptr, p_name, p_snap_name, c_ioctx, nullptr,
c_name, opts, "", "");
opts.get(RBD_IMAGE_OPTION_ORDER, &order);
*c_order = order;
return r;
}
int clone(IoCtx& p_ioctx, const char *p_id, const char *p_name,
const char *p_snap_name, IoCtx& c_ioctx, const char *c_id,
const char *c_name, ImageOptions& c_opts,
const std::string &non_primary_global_image_id,
const std::string &primary_mirror_uuid)
{
ceph_assert((p_id == nullptr) ^ (p_name == nullptr));
CephContext *cct = (CephContext *)p_ioctx.cct();
if (p_snap_name == nullptr) {
lderr(cct) << "image to be cloned must be a snapshot" << dendl;
return -EINVAL;
}
uint64_t flatten;
if (c_opts.get(RBD_IMAGE_OPTION_FLATTEN, &flatten) == 0) {
lderr(cct) << "clone does not support 'flatten' image option" << dendl;
return -EINVAL;
}
int r;
std::string parent_id;
if (p_id == nullptr) {
r = cls_client::dir_get_id(&p_ioctx, RBD_DIRECTORY, p_name,
&parent_id);
if (r < 0) {
if (r != -ENOENT) {
lderr(cct) << "failed to retrieve parent image id: "
<< cpp_strerror(r) << dendl;
}
return r;
}
} else {
parent_id = p_id;
}
std::string clone_id;
if (c_id == nullptr) {
clone_id = util::generate_image_id(c_ioctx);
} else {
clone_id = c_id;
}
ldout(cct, 10) << __func__ << " "
<< "c_name=" << c_name << ", "
<< "c_id= " << clone_id << ", "
<< "c_opts=" << c_opts << dendl;
ConfigProxy config{reinterpret_cast<CephContext *>(c_ioctx.cct())->_conf};
api::Config<>::apply_pool_overrides(c_ioctx, &config);
AsioEngine asio_engine(p_ioctx);
C_SaferCond cond;
auto *req = image::CloneRequest<>::create(
config, p_ioctx, parent_id, p_snap_name,
{cls::rbd::UserSnapshotNamespace{}}, CEPH_NOSNAP, c_ioctx, c_name,
clone_id, c_opts, cls::rbd::MIRROR_IMAGE_MODE_JOURNAL,
non_primary_global_image_id, primary_mirror_uuid,
asio_engine.get_work_queue(), &cond);
req->send();
r = cond.wait();
if (r < 0) {
return r;
}
return 0;
}
int rename(IoCtx& io_ctx, const char *srcname, const char *dstname)
{
CephContext *cct = (CephContext *)io_ctx.cct();
ldout(cct, 20) << "rename " << &io_ctx << " " << srcname << " -> "
<< dstname << dendl;
ImageCtx *ictx = new ImageCtx(srcname, "", "", io_ctx, false);
int r = ictx->state->open(0);
if (r < 0) {
lderr(cct) << "error opening source image: " << cpp_strerror(r) << dendl;
return r;
}
BOOST_SCOPE_EXIT((ictx)) {
ictx->state->close();
} BOOST_SCOPE_EXIT_END
return ictx->operations->rename(dstname);
}
int info(ImageCtx *ictx, image_info_t& info, size_t infosize)
{
ldout(ictx->cct, 20) << "info " << ictx << dendl;
int r = ictx->state->refresh_if_required();
if (r < 0)
return r;
image_info(ictx, info, infosize);
return 0;
}
int get_old_format(ImageCtx *ictx, uint8_t *old)
{
int r = ictx->state->refresh_if_required();
if (r < 0)
return r;
*old = ictx->old_format;
return 0;
}
int get_size(ImageCtx *ictx, uint64_t *size)
{
int r = ictx->state->refresh_if_required();
if (r < 0)
return r;
std::shared_lock l2{ictx->image_lock};
*size = ictx->get_area_size(io::ImageArea::DATA);
return 0;
}
int get_features(ImageCtx *ictx, uint64_t *features)
{
int r = ictx->state->refresh_if_required();
if (r < 0)
return r;
std::shared_lock l{ictx->image_lock};
*features = ictx->features;
return 0;
}
int get_overlap(ImageCtx *ictx, uint64_t *overlap)
{
int r = ictx->state->refresh_if_required();
if (r < 0)
return r;
std::shared_lock image_locker{ictx->image_lock};
uint64_t raw_overlap;
r = ictx->get_parent_overlap(ictx->snap_id, &raw_overlap);
if (r < 0) {
return r;
}
auto _overlap = ictx->reduce_parent_overlap(raw_overlap, false);
*overlap = (_overlap.second == io::ImageArea::DATA ? _overlap.first : 0);
return 0;
}
int get_flags(ImageCtx *ictx, uint64_t *flags)
{
int r = ictx->state->refresh_if_required();
if (r < 0) {
return r;
}
std::shared_lock l2{ictx->image_lock};
return ictx->get_flags(ictx->snap_id, flags);
}
int set_image_notification(ImageCtx *ictx, int fd, int type)
{
CephContext *cct = ictx->cct;
ldout(cct, 20) << __func__ << " " << ictx << " fd " << fd << " type" << type << dendl;
int r = ictx->state->refresh_if_required();
if (r < 0) {
return r;
}
if (ictx->event_socket.is_valid())
return -EINVAL;
return ictx->event_socket.init(fd, type);
}
int is_exclusive_lock_owner(ImageCtx *ictx, bool *is_owner)
{
CephContext *cct = ictx->cct;
ldout(cct, 20) << __func__ << ": ictx=" << ictx << dendl;
*is_owner = false;
std::shared_lock owner_locker{ictx->owner_lock};
if (ictx->exclusive_lock == nullptr) {
return 0;
}
// might have been blocklisted by peer -- ensure we still own
// the lock by pinging the OSD
int r = ictx->exclusive_lock->assert_header_locked();
if (r == -EBUSY || r == -ENOENT) {
return 0;
} else if (r < 0) {
return r;
}
*is_owner = true;
return 0;
}
int lock_acquire(ImageCtx *ictx, rbd_lock_mode_t lock_mode)
{
CephContext *cct = ictx->cct;
ldout(cct, 20) << __func__ << ": ictx=" << ictx << ", "
<< "lock_mode=" << lock_mode << dendl;
if (lock_mode != RBD_LOCK_MODE_EXCLUSIVE) {
return -EOPNOTSUPP;
}
C_SaferCond lock_ctx;
{
std::unique_lock l{ictx->owner_lock};
if (ictx->exclusive_lock == nullptr) {
lderr(cct) << "exclusive-lock feature is not enabled" << dendl;
return -EINVAL;
}
if (ictx->get_exclusive_lock_policy()->may_auto_request_lock()) {
ictx->set_exclusive_lock_policy(
new exclusive_lock::StandardPolicy(ictx));
}
if (ictx->exclusive_lock->is_lock_owner()) {
return 0;
}
ictx->exclusive_lock->acquire_lock(&lock_ctx);
}
int r = lock_ctx.wait();
if (r < 0) {
lderr(cct) << "failed to request exclusive lock: " << cpp_strerror(r)
<< dendl;
return r;
}
std::shared_lock l{ictx->owner_lock};
if (ictx->exclusive_lock == nullptr) {
return -EINVAL;
} else if (!ictx->exclusive_lock->is_lock_owner()) {
lderr(cct) << "failed to acquire exclusive lock" << dendl;
return ictx->exclusive_lock->get_unlocked_op_error();
}
return 0;
}
int lock_release(ImageCtx *ictx)
{
CephContext *cct = ictx->cct;
ldout(cct, 20) << __func__ << ": ictx=" << ictx << dendl;
C_SaferCond lock_ctx;
{
std::unique_lock l{ictx->owner_lock};
if (ictx->exclusive_lock == nullptr ||
!ictx->exclusive_lock->is_lock_owner()) {
lderr(cct) << "not exclusive lock owner" << dendl;
return -EINVAL;
}
ictx->exclusive_lock->release_lock(&lock_ctx);
}
int r = lock_ctx.wait();
if (r < 0) {
lderr(cct) << "failed to release exclusive lock: " << cpp_strerror(r)
<< dendl;
return r;
}
return 0;
}
int lock_get_owners(ImageCtx *ictx, rbd_lock_mode_t *lock_mode,
std::list<std::string> *lock_owners)
{
CephContext *cct = ictx->cct;
ldout(cct, 20) << __func__ << ": ictx=" << ictx << dendl;
managed_lock::Locker locker;
C_SaferCond get_owner_ctx;
{
std::shared_lock owner_locker{ictx->owner_lock};
if (ictx->exclusive_lock == nullptr) {
lderr(cct) << "exclusive-lock feature is not enabled" << dendl;
return -EINVAL;
}
ictx->exclusive_lock->get_locker(&locker, &get_owner_ctx);
}
int r = get_owner_ctx.wait();
if (r == -ENOENT) {
return r;
} else if (r < 0) {
lderr(cct) << "failed to determine current lock owner: "
<< cpp_strerror(r) << dendl;
return r;
}
*lock_mode = RBD_LOCK_MODE_EXCLUSIVE;
lock_owners->clear();
lock_owners->emplace_back(locker.address);
return 0;
}
int lock_break(ImageCtx *ictx, rbd_lock_mode_t lock_mode,
const std::string &lock_owner) {
CephContext *cct = ictx->cct;
ldout(cct, 20) << __func__ << ": ictx=" << ictx << ", "
<< "lock_mode=" << lock_mode << ", "
<< "lock_owner=" << lock_owner << dendl;
if (lock_mode != RBD_LOCK_MODE_EXCLUSIVE) {
return -EOPNOTSUPP;
}
if (ictx->read_only) {
return -EROFS;
}
managed_lock::Locker locker;
C_SaferCond get_owner_ctx;
{
std::shared_lock l{ictx->owner_lock};
if (ictx->exclusive_lock == nullptr) {
lderr(cct) << "exclusive-lock feature is not enabled" << dendl;
return -EINVAL;
}
ictx->exclusive_lock->get_locker(&locker, &get_owner_ctx);
}
int r = get_owner_ctx.wait();
if (r == -ENOENT) {
return r;
} else if (r < 0) {
lderr(cct) << "failed to determine current lock owner: "
<< cpp_strerror(r) << dendl;
return r;
}
if (locker.address != lock_owner) {
return -EBUSY;
}
C_SaferCond break_ctx;
{
std::shared_lock l{ictx->owner_lock};
if (ictx->exclusive_lock == nullptr) {
lderr(cct) << "exclusive-lock feature is not enabled" << dendl;
return -EINVAL;
}
ictx->exclusive_lock->break_lock(locker, true, &break_ctx);
}
r = break_ctx.wait();
if (r == -ENOENT) {
return r;
} else if (r < 0) {
lderr(cct) << "failed to break lock: " << cpp_strerror(r) << dendl;
return r;
}
return 0;
}
int copy(ImageCtx *src, IoCtx& dest_md_ctx, const char *destname,
ImageOptions& opts, ProgressContext &prog_ctx, size_t sparse_size)
{
CephContext *cct = (CephContext *)dest_md_ctx.cct();
uint64_t option;
if (opts.get(RBD_IMAGE_OPTION_FLATTEN, &option) == 0) {
lderr(cct) << "copy does not support 'flatten' image option" << dendl;
return -EINVAL;
}
if (opts.get(RBD_IMAGE_OPTION_CLONE_FORMAT, &option) == 0) {
lderr(cct) << "copy does not support 'clone_format' image option"
<< dendl;
return -EINVAL;
}
ldout(cct, 20) << "copy " << src->name
<< (src->snap_name.length() ? "@" + src->snap_name : "")
<< " -> " << destname << " opts = " << opts << dendl;
src->image_lock.lock_shared();
uint64_t features = src->features;
uint64_t src_size = src->get_image_size(src->snap_id);
src->image_lock.unlock_shared();
uint64_t format = 2;
if (opts.get(RBD_IMAGE_OPTION_FORMAT, &format) != 0) {
opts.set(RBD_IMAGE_OPTION_FORMAT, format);
}
uint64_t stripe_unit = src->stripe_unit;
if (opts.get(RBD_IMAGE_OPTION_STRIPE_UNIT, &stripe_unit) != 0) {
opts.set(RBD_IMAGE_OPTION_STRIPE_UNIT, stripe_unit);
}
uint64_t stripe_count = src->stripe_count;
if (opts.get(RBD_IMAGE_OPTION_STRIPE_COUNT, &stripe_count) != 0) {
opts.set(RBD_IMAGE_OPTION_STRIPE_COUNT, stripe_count);
}
uint64_t order = src->order;
if (opts.get(RBD_IMAGE_OPTION_ORDER, &order) != 0) {
opts.set(RBD_IMAGE_OPTION_ORDER, order);
}
if (opts.get(RBD_IMAGE_OPTION_FEATURES, &features) != 0) {
opts.set(RBD_IMAGE_OPTION_FEATURES, features);
}
if (features & ~RBD_FEATURES_ALL) {
lderr(cct) << "librbd does not support requested features" << dendl;
return -ENOSYS;
}
int r = create(dest_md_ctx, destname, "", src_size, opts, "", "", false);
if (r < 0) {
lderr(cct) << "header creation failed" << dendl;
return r;
}
opts.set(RBD_IMAGE_OPTION_ORDER, static_cast<uint64_t>(order));
ImageCtx *dest = new librbd::ImageCtx(destname, "", nullptr, dest_md_ctx,
false);
r = dest->state->open(0);
if (r < 0) {
lderr(cct) << "failed to read newly created header" << dendl;
return r;
}
r = copy(src, dest, prog_ctx, sparse_size);
int close_r = dest->state->close();
if (r == 0 && close_r < 0) {
r = close_r;
}
return r;
}
class C_CopyWrite : public Context {
public:
C_CopyWrite(bufferlist *bl, Context* ctx)
: m_bl(bl), m_ctx(ctx) {}
void finish(int r) override {
delete m_bl;
m_ctx->complete(r);
}
private:
bufferlist *m_bl;
Context *m_ctx;
};
class C_CopyRead : public Context {
public:
C_CopyRead(SimpleThrottle *throttle, ImageCtx *dest, uint64_t offset,
bufferlist *bl, size_t sparse_size)
: m_throttle(throttle), m_dest(dest), m_offset(offset), m_bl(bl),
m_sparse_size(sparse_size) {
m_throttle->start_op();
}
void finish(int r) override {
if (r < 0) {
lderr(m_dest->cct) << "error reading from source image at offset "
<< m_offset << ": " << cpp_strerror(r) << dendl;
delete m_bl;
m_throttle->end_op(r);
return;
}
ceph_assert(m_bl->length() == (size_t)r);
if (m_bl->is_zero()) {
delete m_bl;
m_throttle->end_op(r);
return;
}
if (!m_sparse_size) {
m_sparse_size = (1 << m_dest->order);
}
auto *throttle = m_throttle;
auto *end_op_ctx = new LambdaContext([throttle](int r) {
throttle->end_op(r);
});
auto gather_ctx = new C_Gather(m_dest->cct, end_op_ctx);
m_bl->rebuild(buffer::ptr_node::create(m_bl->length()));
size_t write_offset = 0;
size_t write_length = 0;
size_t offset = 0;
size_t length = m_bl->length();
const auto& m_ptr = m_bl->front();
while (offset < length) {
if (util::calc_sparse_extent(m_ptr,
m_sparse_size,
length,
&write_offset,
&write_length,
&offset)) {
bufferlist *write_bl = new bufferlist();
write_bl->push_back(
buffer::ptr_node::create(m_ptr, write_offset, write_length));
Context *ctx = new C_CopyWrite(write_bl, gather_ctx->new_sub());
auto comp = io::AioCompletion::create(ctx);
// coordinate through AIO WQ to ensure lock is acquired if needed
api::Io<>::aio_write(*m_dest, comp, m_offset + write_offset,
write_length, std::move(*write_bl),
LIBRADOS_OP_FLAG_FADVISE_DONTNEED,
std::move(read_trace));
write_offset = offset;
write_length = 0;
}
}
delete m_bl;
ceph_assert(gather_ctx->get_sub_created_count() > 0);
gather_ctx->activate();
}
ZTracer::Trace read_trace;
private:
SimpleThrottle *m_throttle;
ImageCtx *m_dest;
uint64_t m_offset;
bufferlist *m_bl;
size_t m_sparse_size;
};
int copy(ImageCtx *src, ImageCtx *dest, ProgressContext &prog_ctx, size_t sparse_size)
{
src->image_lock.lock_shared();
uint64_t src_size = src->get_image_size(src->snap_id);
src->image_lock.unlock_shared();
dest->image_lock.lock_shared();
uint64_t dest_size = dest->get_image_size(dest->snap_id);
dest->image_lock.unlock_shared();
CephContext *cct = src->cct;
if (dest_size < src_size) {
lderr(cct) << " src size " << src_size << " > dest size "
<< dest_size << dendl;
return -EINVAL;
}
// ensure previous writes are visible to dest
C_SaferCond flush_ctx;
{
auto aio_comp = io::AioCompletion::create_and_start(&flush_ctx, src,
io::AIO_TYPE_FLUSH);
auto req = io::ImageDispatchSpec::create_flush(
*src, io::IMAGE_DISPATCH_LAYER_INTERNAL_START,
aio_comp, io::FLUSH_SOURCE_INTERNAL, {});
req->send();
}
int r = flush_ctx.wait();
if (r < 0) {
return r;
}
C_SaferCond ctx;
auto req = deep_copy::MetadataCopyRequest<>::create(
src, dest, &ctx);
req->send();
r = ctx.wait();
if (r < 0) {
lderr(cct) << "failed to copy metadata: " << cpp_strerror(r) << dendl;
return r;
}
ZTracer::Trace trace;
if (src->blkin_trace_all) {
trace.init("copy", &src->trace_endpoint);
}
SimpleThrottle throttle(src->config.get_val<uint64_t>("rbd_concurrent_management_ops"), false);
uint64_t period = src->get_stripe_period();
unsigned fadvise_flags = LIBRADOS_OP_FLAG_FADVISE_SEQUENTIAL |
LIBRADOS_OP_FLAG_FADVISE_NOCACHE;
uint64_t object_id = 0;
for (uint64_t offset = 0; offset < src_size; offset += period) {
if (throttle.pending_error()) {
return throttle.wait_for_ret();
}
{
std::shared_lock image_locker{src->image_lock};
if (src->object_map != nullptr) {
bool skip = true;
// each period is related to src->stripe_count objects, check them all
for (uint64_t i=0; i < src->stripe_count; i++) {
if (object_id < src->object_map->size() &&
src->object_map->object_may_exist(object_id)) {
skip = false;
}
++object_id;
}
if (skip) continue;
} else {
object_id += src->stripe_count;
}
}
uint64_t len = std::min(period, src_size - offset);
bufferlist *bl = new bufferlist();
auto ctx = new C_CopyRead(&throttle, dest, offset, bl, sparse_size);
auto comp = io::AioCompletion::create_and_start<Context>(
ctx, src, io::AIO_TYPE_READ);
auto req = io::ImageDispatchSpec::create_read(
*src, io::IMAGE_DISPATCH_LAYER_NONE, comp,
{{offset, len}}, io::ImageArea::DATA, io::ReadResult{bl},
src->get_data_io_context(), fadvise_flags, 0, trace);
ctx->read_trace = trace;
req->send();
prog_ctx.update_progress(offset, src_size);
}
r = throttle.wait_for_ret();
if (r >= 0)
prog_ctx.update_progress(src_size, src_size);
return r;
}
int list_lockers(ImageCtx *ictx,
std::list<locker_t> *lockers,
bool *exclusive,
string *tag)
{
ldout(ictx->cct, 20) << "list_locks on image " << ictx << dendl;
int r = ictx->state->refresh_if_required();
if (r < 0)
return r;
std::shared_lock locker{ictx->image_lock};
if (exclusive)
*exclusive = ictx->exclusive_locked;
if (tag)
*tag = ictx->lock_tag;
if (lockers) {
lockers->clear();
map<rados::cls::lock::locker_id_t,
rados::cls::lock::locker_info_t>::const_iterator it;
for (it = ictx->lockers.begin(); it != ictx->lockers.end(); ++it) {
locker_t locker;
locker.client = stringify(it->first.locker);
locker.cookie = it->first.cookie;
locker.address = it->second.addr.get_legacy_str();
lockers->push_back(locker);
}
}
return 0;
}
int lock(ImageCtx *ictx, bool exclusive, const string& cookie,
const string& tag)
{
ldout(ictx->cct, 20) << "lock image " << ictx << " exclusive=" << exclusive
<< " cookie='" << cookie << "' tag='" << tag << "'"
<< dendl;
int r = ictx->state->refresh_if_required();
if (r < 0)
return r;
/**
* If we wanted we could do something more intelligent, like local
* checks that we think we will succeed. But for now, let's not
* duplicate that code.
*/
{
std::shared_lock locker{ictx->image_lock};
r = rados::cls::lock::lock(&ictx->md_ctx, ictx->header_oid, RBD_LOCK_NAME,
exclusive ? ClsLockType::EXCLUSIVE : ClsLockType::SHARED,
cookie, tag, "", utime_t(), 0);
if (r < 0) {
return r;
}
}
ictx->notify_update();
return 0;
}
int unlock(ImageCtx *ictx, const string& cookie)
{
ldout(ictx->cct, 20) << "unlock image " << ictx
<< " cookie='" << cookie << "'" << dendl;
int r = ictx->state->refresh_if_required();
if (r < 0)
return r;
{
std::shared_lock locker{ictx->image_lock};
r = rados::cls::lock::unlock(&ictx->md_ctx, ictx->header_oid,
RBD_LOCK_NAME, cookie);
if (r < 0) {
return r;
}
}
ictx->notify_update();
return 0;
}
int break_lock(ImageCtx *ictx, const string& client,
const string& cookie)
{
ldout(ictx->cct, 20) << "break_lock image " << ictx << " client='" << client
<< "' cookie='" << cookie << "'" << dendl;
int r = ictx->state->refresh_if_required();
if (r < 0)
return r;
entity_name_t lock_client;
if (!lock_client.parse(client)) {
lderr(ictx->cct) << "Unable to parse client '" << client
<< "'" << dendl;
return -EINVAL;
}
if (ictx->config.get_val<bool>("rbd_blocklist_on_break_lock")) {
typedef std::map<rados::cls::lock::locker_id_t,
rados::cls::lock::locker_info_t> Lockers;
Lockers lockers;
ClsLockType lock_type;
std::string lock_tag;
r = rados::cls::lock::get_lock_info(&ictx->md_ctx, ictx->header_oid,
RBD_LOCK_NAME, &lockers, &lock_type,
&lock_tag);
if (r < 0) {
lderr(ictx->cct) << "unable to retrieve lock info: " << cpp_strerror(r)
<< dendl;
return r;
}
std::string client_address;
for (Lockers::iterator it = lockers.begin();
it != lockers.end(); ++it) {
if (it->first.locker == lock_client) {
client_address = it->second.addr.get_legacy_str();
break;
}
}
if (client_address.empty()) {
return -ENOENT;
}
librados::Rados rados(ictx->md_ctx);
r = rados.blocklist_add(
client_address,
ictx->config.get_val<uint64_t>("rbd_blocklist_expire_seconds"));
if (r < 0) {
lderr(ictx->cct) << "unable to blocklist client: " << cpp_strerror(r)
<< dendl;
return r;
}
}
r = rados::cls::lock::break_lock(&ictx->md_ctx, ictx->header_oid,
RBD_LOCK_NAME, cookie, lock_client);
if (r < 0)
return r;
ictx->notify_update();
return 0;
}
void rbd_ctx_cb(completion_t cb, void *arg)
{
Context *ctx = reinterpret_cast<Context *>(arg);
auto comp = reinterpret_cast<io::AioCompletion *>(cb);
ctx->complete(comp->get_return_value());
comp->release();
}
int64_t read_iterate(ImageCtx *ictx, uint64_t off, uint64_t len,
int (*cb)(uint64_t, size_t, const char *, void *),
void *arg)
{
coarse_mono_time start_time;
ceph::timespan elapsed;
ldout(ictx->cct, 20) << "read_iterate " << ictx << " off = " << off
<< " len = " << len << dendl;
int r = ictx->state->refresh_if_required();
if (r < 0)
return r;
uint64_t mylen = len;
ictx->image_lock.lock_shared();
r = clip_io(ictx, off, &mylen, io::ImageArea::DATA);
ictx->image_lock.unlock_shared();
if (r < 0)
return r;
int64_t total_read = 0;
uint64_t period = ictx->get_stripe_period();
uint64_t left = mylen;
ZTracer::Trace trace;
if (ictx->blkin_trace_all) {
trace.init("read_iterate", &ictx->trace_endpoint);
}
std::shared_lock owner_locker{ictx->owner_lock};
start_time = coarse_mono_clock::now();
while (left > 0) {
uint64_t period_off = off - (off % period);
uint64_t read_len = std::min(period_off + period - off, left);
bufferlist bl;
C_SaferCond ctx;
auto c = io::AioCompletion::create_and_start(&ctx, ictx,
io::AIO_TYPE_READ);
auto req = io::ImageDispatchSpec::create_read(
*ictx, io::IMAGE_DISPATCH_LAYER_NONE, c,
{{off, read_len}}, io::ImageArea::DATA, io::ReadResult{&bl},
ictx->get_data_io_context(), 0, 0, trace);
req->send();
int ret = ctx.wait();
if (ret < 0) {
return ret;
}
r = cb(total_read, ret, bl.c_str(), arg);
if (r < 0) {
return r;
}
total_read += ret;
left -= ret;
off += ret;
}
elapsed = coarse_mono_clock::now() - start_time;
ictx->perfcounter->tinc(l_librbd_rd_latency, elapsed);
ictx->perfcounter->inc(l_librbd_rd);
ictx->perfcounter->inc(l_librbd_rd_bytes, mylen);
return total_read;
}
// validate extent against area size; clip to area size if necessary
int clip_io(ImageCtx* ictx, uint64_t off, uint64_t* len, io::ImageArea area) {
ceph_assert(ceph_mutex_is_locked(ictx->image_lock));
if (ictx->snap_id != CEPH_NOSNAP &&
ictx->get_snap_info(ictx->snap_id) == nullptr) {
return -ENOENT;
}
// special-case "len == 0" requests: always valid
if (*len == 0)
return 0;
uint64_t area_size = ictx->get_area_size(area);
// can't start past end
if (off >= area_size)
return -EINVAL;
// clip requests that extend past end to just end
if ((off + *len) > area_size)
*len = (size_t)(area_size - off);
return 0;
}
int invalidate_cache(ImageCtx *ictx)
{
CephContext *cct = ictx->cct;
ldout(cct, 20) << "invalidate_cache " << ictx << dendl;
int r = ictx->state->refresh_if_required();
if (r < 0) {
return r;
}
C_SaferCond ctx;
{
ictx->io_image_dispatcher->invalidate_cache(&ctx);
}
r = ctx.wait();
if (r < 0) {
ldout(cct, 20) << "failed to invalidate image cache" << dendl;
return r;
}
ictx->perfcounter->inc(l_librbd_invalidate_cache);
// Delete writeback cache if it is not initialized
if ((!ictx->exclusive_lock ||
!ictx->exclusive_lock->is_lock_owner()) &&
ictx->test_features(RBD_FEATURE_DIRTY_CACHE)) {
C_SaferCond ctx3;
ictx->plugin_registry->discard(&ctx3);
r = ctx3.wait();
}
return r;
}
int poll_io_events(ImageCtx *ictx, io::AioCompletion **comps, int numcomp)
{
if (numcomp <= 0)
return -EINVAL;
CephContext *cct = ictx->cct;
ldout(cct, 20) << __func__ << " " << ictx << " numcomp = " << numcomp
<< dendl;
int i = 0;
while (i < numcomp && ictx->event_socket_completions.pop(comps[i])) {
++i;
}
return i;
}
int metadata_get(ImageCtx *ictx, const string &key, string *value)
{
CephContext *cct = ictx->cct;
ldout(cct, 20) << "metadata_get " << ictx << " key=" << key << dendl;
int r = ictx->state->refresh_if_required();
if (r < 0) {
return r;
}
return cls_client::metadata_get(&ictx->md_ctx, ictx->header_oid, key, value);
}
int metadata_list(ImageCtx *ictx, const string &start, uint64_t max, map<string, bufferlist> *pairs)
{
CephContext *cct = ictx->cct;
ldout(cct, 20) << "metadata_list " << ictx << dendl;
int r = ictx->state->refresh_if_required();
if (r < 0) {
return r;
}
C_SaferCond ctx;
auto req = image::GetMetadataRequest<>::create(
ictx->md_ctx, ictx->header_oid, false, "", start, max, pairs, &ctx);
req->send();
return ctx.wait();
}
int list_watchers(ImageCtx *ictx,
std::list<librbd::image_watcher_t> &watchers)
{
int r;
std::string header_oid;
std::list<obj_watch_t> obj_watchers;
if (ictx->old_format) {
header_oid = util::old_header_name(ictx->name);
} else {
header_oid = util::header_name(ictx->id);
}
r = ictx->md_ctx.list_watchers(header_oid, &obj_watchers);
if (r < 0) {
return r;
}
watchers.clear();
for (auto i = obj_watchers.begin(); i != obj_watchers.end(); ++i) {
librbd::image_watcher_t watcher;
watcher.addr = i->addr;
watcher.id = i->watcher_id;
watcher.cookie = i->cookie;
watchers.push_back(watcher);
}
return 0;
}
}
std::ostream &operator<<(std::ostream &os, const librbd::ImageOptions &opts) {
os << "[";
const char *delimiter = "";
for (auto &i : librbd::IMAGE_OPTIONS_TYPE_MAPPING) {
if (i.second == librbd::STR) {
std::string val;
if (opts.get(i.first, &val) == 0) {
os << delimiter << librbd::image_option_name(i.first) << "=" << val;
delimiter = ", ";
}
} else if (i.second == librbd::UINT64) {
uint64_t val;
if (opts.get(i.first, &val) == 0) {
os << delimiter << librbd::image_option_name(i.first) << "=" << val;
delimiter = ", ";
}
}
}
os << "]";
return os;
}
| 49,410 | 27.380816 | 102 |
cc
|
null |
ceph-main/src/librbd/internal.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_INTERNAL_H
#define CEPH_LIBRBD_INTERNAL_H
#include "include/int_types.h"
#include <map>
#include <set>
#include <string>
#include <vector>
#include "include/buffer_fwd.h"
#include "include/rbd/librbd.hpp"
#include "include/rbd_types.h"
#include "cls/rbd/cls_rbd_types.h"
#include "common/ceph_time.h"
#include "librbd/Types.h"
namespace librbd {
struct ImageCtx;
namespace io {
struct AioCompletion;
enum class ImageArea;
}
class NoOpProgressContext : public ProgressContext
{
public:
NoOpProgressContext()
{
}
int update_progress(uint64_t offset, uint64_t src_size) override
{
return 0;
}
};
int detect_format(librados::IoCtx &io_ctx, const std::string &name,
bool *old_format, uint64_t *size);
bool has_parent(int64_t parent_pool_id, uint64_t off, uint64_t overlap);
std::string image_option_name(int optname);
void image_options_create(rbd_image_options_t* opts);
void image_options_create_ref(rbd_image_options_t* opts,
rbd_image_options_t orig);
void image_options_copy(rbd_image_options_t *opts,
const ImageOptions &orig);
void image_options_destroy(rbd_image_options_t opts);
int image_options_set(rbd_image_options_t opts, int optname,
const std::string& optval);
int image_options_set(rbd_image_options_t opts, int optname, uint64_t optval);
int image_options_get(rbd_image_options_t opts, int optname,
std::string* optval);
int image_options_get(rbd_image_options_t opts, int optname,
uint64_t* optval);
int image_options_is_set(rbd_image_options_t opts, int optname,
bool* is_set);
int image_options_unset(rbd_image_options_t opts, int optname);
void image_options_clear(rbd_image_options_t opts);
bool image_options_is_empty(rbd_image_options_t opts);
int create(librados::IoCtx& io_ctx, const char *imgname, uint64_t size,
int *order);
int create(librados::IoCtx& io_ctx, const char *imgname, uint64_t size,
bool old_format, uint64_t features, int *order,
uint64_t stripe_unit, uint64_t stripe_count);
int create(IoCtx& io_ctx, const std::string &image_name,
const std::string &image_id, uint64_t size, ImageOptions& opts,
const std::string &non_primary_global_image_id,
const std::string &primary_mirror_uuid,
bool skip_mirror_enable);
int clone(IoCtx& p_ioctx, const char *p_name, const char *p_snap_name,
IoCtx& c_ioctx, const char *c_name,
uint64_t features, int *c_order,
uint64_t stripe_unit, int stripe_count);
int clone(IoCtx& p_ioctx, const char *p_id, const char *p_name,
const char *p_snap_name, IoCtx& c_ioctx, const char *c_id,
const char *c_name, ImageOptions& c_opts,
const std::string &non_primary_global_image_id,
const std::string &primary_mirror_uuid);
int rename(librados::IoCtx& io_ctx, const char *srcname, const char *dstname);
int info(ImageCtx *ictx, image_info_t& info, size_t image_size);
int get_old_format(ImageCtx *ictx, uint8_t *old);
int get_size(ImageCtx *ictx, uint64_t *size);
int get_features(ImageCtx *ictx, uint64_t *features);
int get_overlap(ImageCtx *ictx, uint64_t *overlap);
int get_flags(ImageCtx *ictx, uint64_t *flags);
int set_image_notification(ImageCtx *ictx, int fd, int type);
int is_exclusive_lock_owner(ImageCtx *ictx, bool *is_owner);
int lock_acquire(ImageCtx *ictx, rbd_lock_mode_t lock_mode);
int lock_release(ImageCtx *ictx);
int lock_get_owners(ImageCtx *ictx, rbd_lock_mode_t *lock_mode,
std::list<std::string> *lock_owners);
int lock_break(ImageCtx *ictx, rbd_lock_mode_t lock_mode,
const std::string &lock_owner);
int copy(ImageCtx *ictx, IoCtx& dest_md_ctx, const char *destname,
ImageOptions& opts, ProgressContext &prog_ctx, size_t sparse_size);
int copy(ImageCtx *src, ImageCtx *dest, ProgressContext &prog_ctx, size_t sparse_size);
/* cooperative locking */
int list_lockers(ImageCtx *ictx,
std::list<locker_t> *locks,
bool *exclusive,
std::string *tag);
int lock(ImageCtx *ictx, bool exclusive, const std::string& cookie,
const std::string& tag);
int lock_shared(ImageCtx *ictx, const std::string& cookie,
const std::string& tag);
int unlock(ImageCtx *ictx, const std::string& cookie);
int break_lock(ImageCtx *ictx, const std::string& client,
const std::string& cookie);
int read_header_bl(librados::IoCtx& io_ctx, const std::string& md_oid,
ceph::bufferlist& header, uint64_t *ver);
int read_header(librados::IoCtx& io_ctx, const std::string& md_oid,
struct rbd_obj_header_ondisk *header, uint64_t *ver);
int tmap_set(librados::IoCtx& io_ctx, const std::string& imgname);
int tmap_rm(librados::IoCtx& io_ctx, const std::string& imgname);
void image_info(const ImageCtx *ictx, image_info_t& info, size_t info_size);
uint64_t oid_to_object_no(const std::string& oid,
const std::string& object_prefix);
int clip_io(ImageCtx* ictx, uint64_t off, uint64_t* len, io::ImageArea area);
void init_rbd_header(struct rbd_obj_header_ondisk& ondisk,
uint64_t size, int order, uint64_t bid);
int64_t read_iterate(ImageCtx *ictx, uint64_t off, uint64_t len,
int (*cb)(uint64_t, size_t, const char *, void *),
void *arg);
int invalidate_cache(ImageCtx *ictx);
int poll_io_events(ImageCtx *ictx, io::AioCompletion **comps, int numcomp);
int metadata_list(ImageCtx *ictx, const std::string &last, uint64_t max,
std::map<std::string, bufferlist> *pairs);
int metadata_get(ImageCtx *ictx, const std::string &key, std::string *value);
int list_watchers(ImageCtx *ictx, std::list<librbd::image_watcher_t> &watchers);
}
std::ostream &operator<<(std::ostream &os, const librbd::ImageOptions &opts);
#endif
| 5,956 | 39.80137 | 89 |
h
|
null |
ceph-main/src/librbd/librbd.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2011 New Dream Network
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include "include/int_types.h"
#include <errno.h>
#include "common/deleter.h"
#include "common/dout.h"
#include "common/errno.h"
#include "common/TracepointProvider.h"
#include "include/Context.h"
#include "cls/rbd/cls_rbd_client.h"
#include "cls/rbd/cls_rbd_types.h"
#include "librbd/Features.h"
#include "librbd/ImageCtx.h"
#include "librbd/ImageState.h"
#include "librbd/internal.h"
#include "librbd/Operations.h"
#include "librbd/Utils.h"
#include "librbd/api/Config.h"
#include "librbd/api/DiffIterate.h"
#include "librbd/api/Group.h"
#include "librbd/api/Image.h"
#include "librbd/api/Io.h"
#include "librbd/api/Migration.h"
#include "librbd/api/Mirror.h"
#include "librbd/api/Namespace.h"
#include "librbd/api/Pool.h"
#include "librbd/api/PoolMetadata.h"
#include "librbd/api/Snapshot.h"
#include "librbd/api/Trash.h"
#include "librbd/io/AioCompletion.h"
#include "librbd/io/ReadResult.h"
#include <algorithm>
#include <string>
#include <vector>
#ifdef WITH_LTTNG
#define TRACEPOINT_DEFINE
#define TRACEPOINT_PROBE_DYNAMIC_LINKAGE
#include "tracing/librbd.h"
#undef TRACEPOINT_PROBE_DYNAMIC_LINKAGE
#undef TRACEPOINT_DEFINE
#else
#define tracepoint(...)
#endif
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd: "
using std::list;
using std::map;
using std::pair;
using std::set;
using std::string;
using std::vector;
using ceph::bufferlist;
using librados::snap_t;
using librados::IoCtx;
namespace {
TracepointProvider::Traits tracepoint_traits("librbd_tp.so", "rbd_tracing");
struct UserBufferDeleter : public deleter::impl {
CephContext* cct;
librbd::io::AioCompletion* aio_completion;
UserBufferDeleter(CephContext* cct, librbd::io::AioCompletion* aio_completion)
: deleter::impl(deleter()), cct(cct), aio_completion(aio_completion) {
aio_completion->block(cct);
}
~UserBufferDeleter() override {
aio_completion->unblock(cct);
}
};
static auto create_write_raw(librbd::ImageCtx *ictx, const char *buf,
size_t len,
librbd::io::AioCompletion* aio_completion) {
if (ictx->disable_zero_copy || aio_completion == nullptr) {
// must copy the buffer if writeback/writearound cache is in-use (or using
// non-AIO)
return buffer::copy(buf, len);
}
// avoid copying memory for AIO operations, but possibly delay completions
// until the last reference to the user's memory has been released
return ceph::unique_leakable_ptr<ceph::buffer::raw>(
buffer::claim_buffer(
len, const_cast<char*>(buf),
deleter(new UserBufferDeleter(ictx->cct, aio_completion))));
}
static int get_iovec_length(const struct iovec *iov, int iovcnt, size_t &len)
{
len = 0;
if (iovcnt <= 0) {
return -EINVAL;
}
for (int i = 0; i < iovcnt; ++i) {
const struct iovec &io = iov[i];
// check for overflow
if (len + io.iov_len < len) {
return -EINVAL;
}
len += io.iov_len;
}
return 0;
}
static bufferlist iovec_to_bufferlist(librbd::ImageCtx *ictx,
const struct iovec *iov,
int iovcnt,
librbd::io::AioCompletion* aio_completion)
{
bufferlist bl;
for (int i = 0; i < iovcnt; ++i) {
const struct iovec &io = iov[i];
bl.push_back(create_write_raw(ictx, static_cast<char*>(io.iov_base),
io.iov_len, aio_completion));
}
return bl;
}
CephContext* get_cct(IoCtx &io_ctx) {
return reinterpret_cast<CephContext*>(io_ctx.cct());
}
librbd::io::AioCompletion* get_aio_completion(librbd::RBD::AioCompletion *comp) {
return reinterpret_cast<librbd::io::AioCompletion *>(comp->pc);
}
struct C_AioCompletion : public Context {
CephContext *cct;
librbd::io::aio_type_t aio_type;
librbd::io::AioCompletion* aio_comp;
C_AioCompletion(librbd::ImageCtx *ictx, librbd::io::aio_type_t aio_type,
librbd::io::AioCompletion* aio_comp)
: cct(ictx->cct), aio_type(aio_type), aio_comp(aio_comp) {
aio_comp->init_time(ictx, aio_type);
aio_comp->get();
}
virtual ~C_AioCompletion() {
aio_comp->put();
}
void finish(int r) override {
ldout(cct, 20) << "C_AioCompletion::finish: r=" << r << dendl;
if (r < 0) {
aio_comp->fail(r);
} else {
aio_comp->complete();
}
}
};
struct C_OpenComplete : public C_AioCompletion {
librbd::ImageCtx *ictx;
void **ictxp;
C_OpenComplete(librbd::ImageCtx *ictx, librbd::io::AioCompletion* comp,
void **ictxp)
: C_AioCompletion(ictx, librbd::io::AIO_TYPE_OPEN, comp),
ictx(ictx), ictxp(ictxp) {
}
void finish(int r) override {
ldout(cct, 20) << "C_OpenComplete::finish: r=" << r << dendl;
if (r < 0) {
*ictxp = nullptr;
} else {
*ictxp = ictx;
}
C_AioCompletion::finish(r);
}
};
struct C_OpenAfterCloseComplete : public Context {
librbd::ImageCtx *ictx;
librbd::io::AioCompletion* comp;
void **ictxp;
C_OpenAfterCloseComplete(librbd::ImageCtx *ictx,
librbd::io::AioCompletion* comp,
void **ictxp)
: ictx(ictx), comp(comp), ictxp(ictxp) {
}
void finish(int r) override {
ldout(ictx->cct, 20) << "C_OpenAfterCloseComplete::finish: r=" << r
<< dendl;
*ictxp = nullptr;
ictx->state->open(0, new C_OpenComplete(ictx, comp, ictxp));
}
};
struct C_UpdateWatchCB : public librbd::UpdateWatchCtx {
rbd_update_callback_t watch_cb;
void *arg;
uint64_t handle = 0;
C_UpdateWatchCB(rbd_update_callback_t watch_cb, void *arg) :
watch_cb(watch_cb), arg(arg) {
}
void handle_notify() override {
watch_cb(arg);
}
};
struct C_QuiesceWatchCB : public librbd::QuiesceWatchCtx {
rbd_update_callback_t quiesce_cb;
rbd_update_callback_t unquiesce_cb;
void *arg;
uint64_t handle = 0;
C_QuiesceWatchCB(rbd_update_callback_t quiesce_cb,
rbd_update_callback_t unquiesce_cb, void *arg) :
quiesce_cb(quiesce_cb), unquiesce_cb(unquiesce_cb), arg(arg) {
}
void handle_quiesce() override {
quiesce_cb(arg);
}
void handle_unquiesce() override {
unquiesce_cb(arg);
}
};
void group_image_status_cpp_to_c(const librbd::group_image_info_t &cpp_info,
rbd_group_image_info_t *c_info) {
c_info->name = strdup(cpp_info.name.c_str());
c_info->pool = cpp_info.pool;
c_info->state = cpp_info.state;
}
void group_info_cpp_to_c(const librbd::group_info_t &cpp_info,
rbd_group_info_t *c_info) {
c_info->name = strdup(cpp_info.name.c_str());
c_info->pool = cpp_info.pool;
}
void group_snap_info_cpp_to_c(const librbd::group_snap_info_t &cpp_info,
rbd_group_snap_info_t *c_info) {
c_info->name = strdup(cpp_info.name.c_str());
c_info->state = cpp_info.state;
}
void mirror_image_info_cpp_to_c(const librbd::mirror_image_info_t &cpp_info,
rbd_mirror_image_info_t *c_info) {
c_info->global_id = strdup(cpp_info.global_id.c_str());
c_info->state = cpp_info.state;
c_info->primary = cpp_info.primary;
}
int get_local_mirror_image_site_status(
const librbd::mirror_image_global_status_t& status,
librbd::mirror_image_site_status_t* local_status) {
auto it = std::find_if(status.site_statuses.begin(),
status.site_statuses.end(),
[](const librbd::mirror_image_site_status_t& s) {
return (s.mirror_uuid ==
cls::rbd::MirrorImageSiteStatus::LOCAL_MIRROR_UUID);
});
if (it == status.site_statuses.end()) {
return -ENOENT;
}
*local_status = *it;
return 0;
}
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
int mirror_image_global_status_cpp_to_c(
const librbd::mirror_image_global_status_t &cpp_status,
rbd_mirror_image_status_t *c_status) {
c_status->name = strdup(cpp_status.name.c_str());
mirror_image_info_cpp_to_c(cpp_status.info, &c_status->info);
librbd::mirror_image_site_status_t local_status;
int r = get_local_mirror_image_site_status(cpp_status, &local_status);
if (r < 0) {
return r;
}
c_status->state = local_status.state;
c_status->description = strdup(local_status.description.c_str());
c_status->last_update = local_status.last_update;
c_status->up = local_status.up;
return 0;
}
#pragma GCC diagnostic pop
void mirror_image_global_status_cpp_to_c(
const librbd::mirror_image_global_status_t &cpp_status,
rbd_mirror_image_global_status_t *c_status) {
c_status->name = strdup(cpp_status.name.c_str());
mirror_image_info_cpp_to_c(cpp_status.info, &c_status->info);
c_status->site_statuses_count = cpp_status.site_statuses.size();
c_status->site_statuses = (rbd_mirror_image_site_status_t*)calloc(
cpp_status.site_statuses.size(), sizeof(rbd_mirror_image_site_status_t));
auto idx = 0U;
for (auto it = cpp_status.site_statuses.begin();
it != cpp_status.site_statuses.end(); ++it) {
auto& s_status = c_status->site_statuses[idx++];
s_status.mirror_uuid = strdup(it->mirror_uuid.c_str());
s_status.state = it->state;
s_status.description = strdup(it->description.c_str());
s_status.last_update = it->last_update;
s_status.up = it->up;
}
}
void trash_image_info_cpp_to_c(const librbd::trash_image_info_t &cpp_info,
rbd_trash_image_info_t *c_info) {
c_info->id = strdup(cpp_info.id.c_str());
c_info->name = strdup(cpp_info.name.c_str());
c_info->source = cpp_info.source;
c_info->deletion_time = cpp_info.deletion_time;
c_info->deferment_end_time = cpp_info.deferment_end_time;
}
void config_option_cpp_to_c(const librbd::config_option_t &cpp_option,
rbd_config_option_t *c_option) {
c_option->name = strdup(cpp_option.name.c_str());
c_option->value = strdup(cpp_option.value.c_str());
c_option->source = cpp_option.source;
}
void config_option_cleanup(rbd_config_option_t &option) {
free(option.name);
free(option.value);
}
struct C_MirrorImageGetInfo : public Context {
rbd_mirror_image_info_t *mirror_image_info;
Context *on_finish;
librbd::mirror_image_info_t cpp_mirror_image_info;
C_MirrorImageGetInfo(rbd_mirror_image_info_t *mirror_image_info,
Context *on_finish)
: mirror_image_info(mirror_image_info), on_finish(on_finish) {
}
void finish(int r) override {
if (r < 0) {
on_finish->complete(r);
return;
}
mirror_image_info_cpp_to_c(cpp_mirror_image_info, mirror_image_info);
on_finish->complete(0);
}
};
struct C_MirrorImageGetGlobalStatus : public Context {
rbd_mirror_image_global_status_t *mirror_image_global_status;
Context *on_finish;
librbd::mirror_image_global_status_t cpp_mirror_image_global_status;
C_MirrorImageGetGlobalStatus(
rbd_mirror_image_global_status_t *mirror_image_global_status,
Context *on_finish)
: mirror_image_global_status(mirror_image_global_status),
on_finish(on_finish) {
}
void finish(int r) override {
if (r < 0) {
on_finish->complete(r);
return;
}
mirror_image_global_status_cpp_to_c(cpp_mirror_image_global_status,
mirror_image_global_status);
on_finish->complete(0);
}
};
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
struct C_MirrorImageGetStatus : public Context {
librbd::mirror_image_status_t *mirror_image_status_cpp = nullptr;
rbd_mirror_image_status_t *mirror_image_status = nullptr;
Context *on_finish;
librbd::mirror_image_global_status_t cpp_mirror_image_global_status;
C_MirrorImageGetStatus(rbd_mirror_image_status_t *mirror_image_status,
Context *on_finish)
: mirror_image_status(mirror_image_status), on_finish(on_finish) {
}
C_MirrorImageGetStatus(librbd::mirror_image_status_t *mirror_image_status,
Context *on_finish)
: mirror_image_status_cpp(mirror_image_status), on_finish(on_finish) {
}
void finish(int r) override {
if (r < 0) {
on_finish->complete(r);
return;
}
if (mirror_image_status != nullptr) {
r = mirror_image_global_status_cpp_to_c(cpp_mirror_image_global_status,
mirror_image_status);
} else if (mirror_image_status_cpp != nullptr) {
librbd::mirror_image_site_status_t local_status;
r = get_local_mirror_image_site_status(cpp_mirror_image_global_status,
&local_status);
if (r >= 0) {
*mirror_image_status_cpp = {
cpp_mirror_image_global_status.name,
cpp_mirror_image_global_status.info,
local_status.state, local_status.description,
local_status.last_update, local_status.up};
}
}
on_finish->complete(r);
}
};
#pragma GCC diagnostic pop
} // anonymous namespace
namespace librbd {
ProgressContext::~ProgressContext()
{
}
class CProgressContext : public ProgressContext
{
public:
CProgressContext(librbd_progress_fn_t fn, void *data)
: m_fn(fn), m_data(data)
{
}
int update_progress(uint64_t offset, uint64_t src_size) override
{
return m_fn(offset, src_size, m_data);
}
private:
librbd_progress_fn_t m_fn;
void *m_data;
};
/*
* Pool stats
*/
PoolStats::PoolStats() {
rbd_pool_stats_create(&pool_stats);
}
PoolStats::~PoolStats() {
rbd_pool_stats_destroy(pool_stats);
}
int PoolStats::add(rbd_pool_stat_option_t option, uint64_t* opt_val) {
return rbd_pool_stats_option_add_uint64(pool_stats, option, opt_val);
}
/*
* RBD
*/
RBD::RBD()
{
}
RBD::~RBD()
{
}
void RBD::version(int *major, int *minor, int *extra)
{
rbd_version(major, minor, extra);
}
int RBD::open(IoCtx& io_ctx, Image& image, const char *name)
{
return open(io_ctx, image, name, NULL);
}
int RBD::open_by_id(IoCtx& io_ctx, Image& image, const char *id)
{
return open_by_id(io_ctx, image, id, nullptr);
}
int RBD::open(IoCtx& io_ctx, Image& image, const char *name,
const char *snap_name)
{
ImageCtx *ictx = new ImageCtx(name, "", snap_name, io_ctx, false);
TracepointProvider::initialize<tracepoint_traits>(get_cct(io_ctx));
tracepoint(librbd, open_image_enter, ictx, ictx->name.c_str(), ictx->id.c_str(), ictx->snap_name.c_str(), ictx->read_only);
if (image.ctx != NULL) {
reinterpret_cast<ImageCtx*>(image.ctx)->state->close();
image.ctx = NULL;
}
int r = ictx->state->open(0);
if (r < 0) {
tracepoint(librbd, open_image_exit, r);
return r;
}
image.ctx = (image_ctx_t) ictx;
tracepoint(librbd, open_image_exit, 0);
return 0;
}
int RBD::open_by_id(IoCtx& io_ctx, Image& image, const char *id,
const char *snap_name)
{
ImageCtx *ictx = new ImageCtx("", id, snap_name, io_ctx, false);
TracepointProvider::initialize<tracepoint_traits>(get_cct(io_ctx));
tracepoint(librbd, open_image_by_id_enter, ictx, ictx->id.c_str(),
ictx->snap_name.c_str(), ictx->read_only);
if (image.ctx != nullptr) {
reinterpret_cast<ImageCtx*>(image.ctx)->state->close();
image.ctx = nullptr;
}
int r = ictx->state->open(0);
if (r < 0) {
tracepoint(librbd, open_image_by_id_exit, r);
return r;
}
image.ctx = (image_ctx_t) ictx;
tracepoint(librbd, open_image_by_id_exit, 0);
return 0;
}
int RBD::aio_open(IoCtx& io_ctx, Image& image, const char *name,
const char *snap_name, RBD::AioCompletion *c)
{
ImageCtx *ictx = new ImageCtx(name, "", snap_name, io_ctx, false);
TracepointProvider::initialize<tracepoint_traits>(get_cct(io_ctx));
tracepoint(librbd, aio_open_image_enter, ictx, ictx->name.c_str(), ictx->id.c_str(), ictx->snap_name.c_str(), ictx->read_only, c->pc);
if (image.ctx != NULL) {
reinterpret_cast<ImageCtx*>(image.ctx)->state->close(
new C_OpenAfterCloseComplete(ictx, get_aio_completion(c), &image.ctx));
} else {
ictx->state->open(0, new C_OpenComplete(ictx, get_aio_completion(c),
&image.ctx));
}
tracepoint(librbd, aio_open_image_exit, 0);
return 0;
}
int RBD::aio_open_by_id(IoCtx& io_ctx, Image& image, const char *id,
const char *snap_name, RBD::AioCompletion *c)
{
ImageCtx *ictx = new ImageCtx("", id, snap_name, io_ctx, false);
TracepointProvider::initialize<tracepoint_traits>(get_cct(io_ctx));
tracepoint(librbd, aio_open_image_by_id_enter, ictx, ictx->id.c_str(),
ictx->snap_name.c_str(), ictx->read_only, c->pc);
if (image.ctx != nullptr) {
reinterpret_cast<ImageCtx*>(image.ctx)->state->close(
new C_OpenAfterCloseComplete(ictx, get_aio_completion(c), &image.ctx));
} else {
ictx->state->open(0, new C_OpenComplete(ictx, get_aio_completion(c),
&image.ctx));
}
tracepoint(librbd, aio_open_image_by_id_exit, 0);
return 0;
}
int RBD::open_read_only(IoCtx& io_ctx, Image& image, const char *name,
const char *snap_name)
{
ImageCtx *ictx = new ImageCtx(name, "", snap_name, io_ctx, true);
TracepointProvider::initialize<tracepoint_traits>(get_cct(io_ctx));
tracepoint(librbd, open_image_enter, ictx, ictx->name.c_str(), ictx->id.c_str(), ictx->snap_name.c_str(), ictx->read_only);
if (image.ctx != NULL) {
reinterpret_cast<ImageCtx*>(image.ctx)->state->close();
image.ctx = NULL;
}
int r = ictx->state->open(0);
if (r < 0) {
tracepoint(librbd, open_image_exit, r);
return r;
}
image.ctx = (image_ctx_t) ictx;
tracepoint(librbd, open_image_exit, 0);
return 0;
}
int RBD::open_by_id_read_only(IoCtx& io_ctx, Image& image, const char *id,
const char *snap_name)
{
ImageCtx *ictx = new ImageCtx("", id, snap_name, io_ctx, true);
TracepointProvider::initialize<tracepoint_traits>(get_cct(io_ctx));
tracepoint(librbd, open_image_by_id_enter, ictx, ictx->id.c_str(),
ictx->snap_name.c_str(), ictx->read_only);
if (image.ctx != nullptr) {
reinterpret_cast<ImageCtx*>(image.ctx)->state->close();
image.ctx = nullptr;
}
int r = ictx->state->open(0);
if (r < 0) {
tracepoint(librbd, open_image_by_id_exit, r);
return r;
}
image.ctx = (image_ctx_t) ictx;
tracepoint(librbd, open_image_by_id_exit, 0);
return 0;
}
int RBD::aio_open_read_only(IoCtx& io_ctx, Image& image, const char *name,
const char *snap_name, RBD::AioCompletion *c)
{
ImageCtx *ictx = new ImageCtx(name, "", snap_name, io_ctx, true);
TracepointProvider::initialize<tracepoint_traits>(get_cct(io_ctx));
tracepoint(librbd, aio_open_image_enter, ictx, ictx->name.c_str(), ictx->id.c_str(), ictx->snap_name.c_str(), ictx->read_only, c->pc);
if (image.ctx != NULL) {
reinterpret_cast<ImageCtx*>(image.ctx)->state->close(
new C_OpenAfterCloseComplete(ictx, get_aio_completion(c), &image.ctx));
} else {
ictx->state->open(0, new C_OpenComplete(ictx, get_aio_completion(c),
&image.ctx));
}
tracepoint(librbd, aio_open_image_exit, 0);
return 0;
}
int RBD::aio_open_by_id_read_only(IoCtx& io_ctx, Image& image, const char *id,
const char *snap_name, RBD::AioCompletion *c)
{
ImageCtx *ictx = new ImageCtx("", id, snap_name, io_ctx, true);
TracepointProvider::initialize<tracepoint_traits>(get_cct(io_ctx));
tracepoint(librbd, aio_open_image_by_id_enter, ictx, ictx->id.c_str(),
ictx->snap_name.c_str(), ictx->read_only, c->pc);
if (image.ctx != nullptr) {
reinterpret_cast<ImageCtx*>(image.ctx)->state->close(
new C_OpenAfterCloseComplete(ictx, get_aio_completion(c), &image.ctx));
} else {
ictx->state->open(0, new C_OpenComplete(ictx, get_aio_completion(c),
&image.ctx));
}
tracepoint(librbd, aio_open_image_by_id_exit, 0);
return 0;
}
int RBD::features_to_string(uint64_t features, std::string *str_features)
{
std::stringstream err;
*str_features = librbd::rbd_features_to_string(features, &err);
if (!err.str().empty()) {
return -EINVAL;
}
return 0;
}
int RBD::features_from_string(const std::string str_features, uint64_t *features)
{
std::stringstream err;
*features = librbd::rbd_features_from_string(str_features, &err);
if (!err.str().empty()) {
return -EINVAL;
}
return 0;
}
int RBD::create(IoCtx& io_ctx, const char *name, uint64_t size, int *order)
{
TracepointProvider::initialize<tracepoint_traits>(get_cct(io_ctx));
tracepoint(librbd, create_enter, io_ctx.get_pool_name().c_str(), io_ctx.get_id(), name, size, *order);
int r = librbd::create(io_ctx, name, size, order);
tracepoint(librbd, create_exit, r, *order);
return r;
}
int RBD::create2(IoCtx& io_ctx, const char *name, uint64_t size,
uint64_t features, int *order)
{
TracepointProvider::initialize<tracepoint_traits>(get_cct(io_ctx));
tracepoint(librbd, create2_enter, io_ctx.get_pool_name().c_str(), io_ctx.get_id(), name, size, features, *order);
int r = librbd::create(io_ctx, name, size, false, features, order, 0, 0);
tracepoint(librbd, create2_exit, r, *order);
return r;
}
int RBD::create3(IoCtx& io_ctx, const char *name, uint64_t size,
uint64_t features, int *order, uint64_t stripe_unit,
uint64_t stripe_count)
{
TracepointProvider::initialize<tracepoint_traits>(get_cct(io_ctx));
tracepoint(librbd, create3_enter, io_ctx.get_pool_name().c_str(), io_ctx.get_id(), name, size, features, *order, stripe_unit, stripe_count);
int r = librbd::create(io_ctx, name, size, false, features, order,
stripe_unit, stripe_count);
tracepoint(librbd, create3_exit, r, *order);
return r;
}
int RBD::create4(IoCtx& io_ctx, const char *name, uint64_t size,
ImageOptions& opts)
{
TracepointProvider::initialize<tracepoint_traits>(get_cct(io_ctx));
tracepoint(librbd, create4_enter, io_ctx.get_pool_name().c_str(), io_ctx.get_id(), name, size, opts.opts);
int r = librbd::create(io_ctx, name, "", size, opts, "", "", false);
tracepoint(librbd, create4_exit, r);
return r;
}
int RBD::clone(IoCtx& p_ioctx, const char *p_name, const char *p_snap_name,
IoCtx& c_ioctx, const char *c_name, uint64_t features,
int *c_order)
{
TracepointProvider::initialize<tracepoint_traits>(get_cct(p_ioctx));
tracepoint(librbd, clone_enter, p_ioctx.get_pool_name().c_str(), p_ioctx.get_id(), p_name, p_snap_name, c_ioctx.get_pool_name().c_str(), c_ioctx.get_id(), c_name, features);
int r = librbd::clone(p_ioctx, p_name, p_snap_name, c_ioctx, c_name,
features, c_order, 0, 0);
tracepoint(librbd, clone_exit, r, *c_order);
return r;
}
int RBD::clone2(IoCtx& p_ioctx, const char *p_name, const char *p_snap_name,
IoCtx& c_ioctx, const char *c_name, uint64_t features,
int *c_order, uint64_t stripe_unit, int stripe_count)
{
TracepointProvider::initialize<tracepoint_traits>(get_cct(p_ioctx));
tracepoint(librbd, clone2_enter, p_ioctx.get_pool_name().c_str(), p_ioctx.get_id(), p_name, p_snap_name, c_ioctx.get_pool_name().c_str(), c_ioctx.get_id(), c_name, features, stripe_unit, stripe_count);
int r = librbd::clone(p_ioctx, p_name, p_snap_name, c_ioctx, c_name,
features, c_order, stripe_unit, stripe_count);
tracepoint(librbd, clone2_exit, r, *c_order);
return r;
}
int RBD::clone3(IoCtx& p_ioctx, const char *p_name, const char *p_snap_name,
IoCtx& c_ioctx, const char *c_name, ImageOptions& c_opts)
{
TracepointProvider::initialize<tracepoint_traits>(get_cct(p_ioctx));
tracepoint(librbd, clone3_enter, p_ioctx.get_pool_name().c_str(), p_ioctx.get_id(), p_name, p_snap_name, c_ioctx.get_pool_name().c_str(), c_ioctx.get_id(), c_name, c_opts.opts);
int r = librbd::clone(p_ioctx, nullptr, p_name, p_snap_name, c_ioctx,
nullptr, c_name, c_opts, "", "");
tracepoint(librbd, clone3_exit, r);
return r;
}
int RBD::remove(IoCtx& io_ctx, const char *name)
{
TracepointProvider::initialize<tracepoint_traits>(get_cct(io_ctx));
tracepoint(librbd, remove_enter, io_ctx.get_pool_name().c_str(), io_ctx.get_id(), name);
librbd::NoOpProgressContext prog_ctx;
int r = librbd::api::Image<>::remove(io_ctx, name, prog_ctx);
tracepoint(librbd, remove_exit, r);
return r;
}
int RBD::remove_with_progress(IoCtx& io_ctx, const char *name,
ProgressContext& pctx)
{
TracepointProvider::initialize<tracepoint_traits>(get_cct(io_ctx));
tracepoint(librbd, remove_enter, io_ctx.get_pool_name().c_str(), io_ctx.get_id(), name);
int r = librbd::api::Image<>::remove(io_ctx, name, pctx);
tracepoint(librbd, remove_exit, r);
return r;
}
int RBD::trash_move(IoCtx &io_ctx, const char *name, uint64_t delay) {
TracepointProvider::initialize<tracepoint_traits>(get_cct(io_ctx));
tracepoint(librbd, trash_move_enter, io_ctx.get_pool_name().c_str(),
io_ctx.get_id(), name);
int r = librbd::api::Trash<>::move(io_ctx, RBD_TRASH_IMAGE_SOURCE_USER,
name, delay);
tracepoint(librbd, trash_move_exit, r);
return r;
}
int RBD::trash_get(IoCtx &io_ctx, const char *id, trash_image_info_t *info) {
return librbd::api::Trash<>::get(io_ctx, id, info);
}
int RBD::trash_list(IoCtx &io_ctx, vector<trash_image_info_t> &entries) {
TracepointProvider::initialize<tracepoint_traits>(get_cct(io_ctx));
tracepoint(librbd, trash_list_enter,
io_ctx.get_pool_name().c_str(), io_ctx.get_id());
int r = librbd::api::Trash<>::list(io_ctx, entries, true);
#ifdef WITH_LTTNG
if (r >= 0) {
for (const auto& entry : entries) {
tracepoint(librbd, trash_list_entry, entry.id.c_str());
}
}
#endif
tracepoint(librbd, trash_list_exit, r, r);
return r;
}
int RBD::trash_remove(IoCtx &io_ctx, const char *image_id, bool force) {
TracepointProvider::initialize<tracepoint_traits>(get_cct(io_ctx));
tracepoint(librbd, trash_remove_enter, io_ctx.get_pool_name().c_str(),
io_ctx.get_id(), image_id, force);
librbd::NoOpProgressContext prog_ctx;
int r = librbd::api::Trash<>::remove(io_ctx, image_id, force, prog_ctx);
tracepoint(librbd, trash_remove_exit, r);
return r;
}
int RBD::trash_remove_with_progress(IoCtx &io_ctx, const char *image_id,
bool force, ProgressContext &pctx) {
TracepointProvider::initialize<tracepoint_traits>(get_cct(io_ctx));
tracepoint(librbd, trash_remove_enter, io_ctx.get_pool_name().c_str(),
io_ctx.get_id(), image_id, force);
int r = librbd::api::Trash<>::remove(io_ctx, image_id, force, pctx);
tracepoint(librbd, trash_remove_exit, r);
return r;
}
int RBD::trash_restore(IoCtx &io_ctx, const char *id, const char *name) {
TracepointProvider::initialize<tracepoint_traits>(get_cct(io_ctx));
tracepoint(librbd, trash_undelete_enter, io_ctx.get_pool_name().c_str(),
io_ctx.get_id(), id, name);
int r = librbd::api::Trash<>::restore(
io_ctx, librbd::api::Trash<>::ALLOWED_RESTORE_SOURCES, id, name);
tracepoint(librbd, trash_undelete_exit, r);
return r;
}
int RBD::trash_purge(IoCtx &io_ctx, time_t expire_ts, float threshold) {
TracepointProvider::initialize<tracepoint_traits>(get_cct(io_ctx));
tracepoint(librbd, trash_purge_enter, io_ctx.get_pool_name().c_str(),
io_ctx.get_id(), expire_ts, threshold);
NoOpProgressContext nop_pctx;
int r = librbd::api::Trash<>::purge(io_ctx, expire_ts, threshold, nop_pctx);
tracepoint(librbd, trash_purge_exit, r);
return r;
}
int RBD::trash_purge_with_progress(IoCtx &io_ctx, time_t expire_ts,
float threshold, ProgressContext &pctx) {
TracepointProvider::initialize<tracepoint_traits>(get_cct(io_ctx));
tracepoint(librbd, trash_purge_enter, io_ctx.get_pool_name().c_str(),
io_ctx.get_id(), expire_ts, threshold);
int r = librbd::api::Trash<>::purge(io_ctx, expire_ts, threshold, pctx);
tracepoint(librbd, trash_purge_exit, r);
return r;
}
int RBD::namespace_create(IoCtx& io_ctx, const char *namespace_name) {
return librbd::api::Namespace<>::create(io_ctx, namespace_name);
}
int RBD::namespace_remove(IoCtx& io_ctx, const char *namespace_name) {
return librbd::api::Namespace<>::remove(io_ctx, namespace_name);
}
int RBD::namespace_list(IoCtx& io_ctx,
std::vector<std::string>* namespace_names) {
return librbd::api::Namespace<>::list(io_ctx, namespace_names);
}
int RBD::namespace_exists(IoCtx& io_ctx, const char *namespace_name,
bool *exists) {
return librbd::api::Namespace<>::exists(io_ctx, namespace_name, exists);
}
int RBD::pool_init(IoCtx& io_ctx, bool force) {
return librbd::api::Pool<>::init(io_ctx, force);
}
int RBD::pool_stats_get(IoCtx& io_ctx, PoolStats* stats) {
auto pool_stat_options =
reinterpret_cast<librbd::api::Pool<>::StatOptions*>(stats->pool_stats);
return librbd::api::Pool<>::get_stats(io_ctx, pool_stat_options);
}
int RBD::list(IoCtx& io_ctx, vector<string>& names)
{
std::vector<image_spec_t> image_specs;
int r = list2(io_ctx, &image_specs);
if (r < 0) {
return r;
}
names.clear();
for (auto& it : image_specs) {
names.push_back(it.name);
}
return 0;
}
int RBD::list2(IoCtx& io_ctx, std::vector<image_spec_t> *images)
{
TracepointProvider::initialize<tracepoint_traits>(get_cct(io_ctx));
tracepoint(librbd, list_enter, io_ctx.get_pool_name().c_str(),
io_ctx.get_id());
int r = librbd::api::Image<>::list_images(io_ctx, images);
#ifdef WITH_LTTNG
if (r >= 0) {
for (auto& it : *images) {
tracepoint(librbd, list_entry, it.name.c_str());
}
}
#endif
tracepoint(librbd, list_exit, r, r);
return r;
}
int RBD::rename(IoCtx& src_io_ctx, const char *srcname, const char *destname)
{
TracepointProvider::initialize<tracepoint_traits>(get_cct(src_io_ctx));
tracepoint(librbd, rename_enter, src_io_ctx.get_pool_name().c_str(), src_io_ctx.get_id(), srcname, destname);
int r = librbd::rename(src_io_ctx, srcname, destname);
tracepoint(librbd, rename_exit, r);
return r;
}
int RBD::migration_prepare(IoCtx& io_ctx, const char *image_name,
IoCtx& dest_io_ctx, const char *dest_image_name,
ImageOptions& opts)
{
TracepointProvider::initialize<tracepoint_traits>(get_cct(io_ctx));
tracepoint(librbd, migration_prepare_enter, io_ctx.get_pool_name().c_str(),
io_ctx.get_id(), image_name, dest_io_ctx.get_pool_name().c_str(),
dest_io_ctx.get_id(), dest_image_name, opts.opts);
int r = librbd::api::Migration<>::prepare(io_ctx, image_name, dest_io_ctx,
dest_image_name, opts);
tracepoint(librbd, migration_prepare_exit, r);
return r;
}
int RBD::migration_prepare_import(const char *source_spec, IoCtx& dest_io_ctx,
const char *dest_image_name,
ImageOptions& opts) {
return librbd::api::Migration<>::prepare_import(source_spec, dest_io_ctx,
dest_image_name, opts);
}
int RBD::migration_execute(IoCtx& io_ctx, const char *image_name)
{
TracepointProvider::initialize<tracepoint_traits>(get_cct(io_ctx));
tracepoint(librbd, migration_execute_enter, io_ctx.get_pool_name().c_str(),
io_ctx.get_id(), image_name);
librbd::NoOpProgressContext prog_ctx;
int r = librbd::api::Migration<>::execute(io_ctx, image_name, prog_ctx);
tracepoint(librbd, migration_execute_exit, r);
return r;
}
int RBD::migration_execute_with_progress(IoCtx& io_ctx,
const char *image_name,
librbd::ProgressContext &prog_ctx)
{
TracepointProvider::initialize<tracepoint_traits>(get_cct(io_ctx));
tracepoint(librbd, migration_execute_enter, io_ctx.get_pool_name().c_str(),
io_ctx.get_id(), image_name);
int r = librbd::api::Migration<>::execute(io_ctx, image_name, prog_ctx);
tracepoint(librbd, migration_execute_exit, r);
return r;
}
int RBD::migration_abort(IoCtx& io_ctx, const char *image_name)
{
TracepointProvider::initialize<tracepoint_traits>(get_cct(io_ctx));
tracepoint(librbd, migration_abort_enter, io_ctx.get_pool_name().c_str(),
io_ctx.get_id(), image_name);
librbd::NoOpProgressContext prog_ctx;
int r = librbd::api::Migration<>::abort(io_ctx, image_name, prog_ctx);
tracepoint(librbd, migration_abort_exit, r);
return r;
}
int RBD::migration_abort_with_progress(IoCtx& io_ctx, const char *image_name,
librbd::ProgressContext &prog_ctx)
{
TracepointProvider::initialize<tracepoint_traits>(get_cct(io_ctx));
tracepoint(librbd, migration_abort_enter, io_ctx.get_pool_name().c_str(),
io_ctx.get_id(), image_name);
int r = librbd::api::Migration<>::abort(io_ctx, image_name, prog_ctx);
tracepoint(librbd, migration_abort_exit, r);
return r;
}
int RBD::migration_commit(IoCtx& io_ctx, const char *image_name)
{
TracepointProvider::initialize<tracepoint_traits>(get_cct(io_ctx));
tracepoint(librbd, migration_commit_enter, io_ctx.get_pool_name().c_str(),
io_ctx.get_id(), image_name);
librbd::NoOpProgressContext prog_ctx;
int r = librbd::api::Migration<>::commit(io_ctx, image_name, prog_ctx);
tracepoint(librbd, migration_commit_exit, r);
return r;
}
int RBD::migration_commit_with_progress(IoCtx& io_ctx, const char *image_name,
librbd::ProgressContext &prog_ctx)
{
TracepointProvider::initialize<tracepoint_traits>(get_cct(io_ctx));
tracepoint(librbd, migration_commit_enter, io_ctx.get_pool_name().c_str(),
io_ctx.get_id(), image_name);
int r = librbd::api::Migration<>::commit(io_ctx, image_name, prog_ctx);
tracepoint(librbd, migration_commit_exit, r);
return r;
}
int RBD::migration_status(IoCtx& io_ctx, const char *image_name,
image_migration_status_t *status,
size_t status_size)
{
TracepointProvider::initialize<tracepoint_traits>(get_cct(io_ctx));
tracepoint(librbd, migration_status_enter, io_ctx.get_pool_name().c_str(),
io_ctx.get_id(), image_name);
if (status_size != sizeof(image_migration_status_t)) {
tracepoint(librbd, migration_status_exit, -ERANGE);
return -ERANGE;
}
int r = librbd::api::Migration<>::status(io_ctx, image_name, status);
tracepoint(librbd, migration_status_exit, r);
return r;
}
int RBD::mirror_mode_get(IoCtx& io_ctx, rbd_mirror_mode_t *mirror_mode) {
return librbd::api::Mirror<>::mode_get(io_ctx, mirror_mode);
}
int RBD::mirror_mode_set(IoCtx& io_ctx, rbd_mirror_mode_t mirror_mode) {
return librbd::api::Mirror<>::mode_set(io_ctx, mirror_mode);
}
int RBD::mirror_uuid_get(IoCtx& io_ctx, std::string* mirror_uuid) {
return librbd::api::Mirror<>::uuid_get(io_ctx, mirror_uuid);
}
int RBD::mirror_site_name_get(librados::Rados& rados,
std::string* site_name) {
return librbd::api::Mirror<>::site_name_get(rados, site_name);
}
int RBD::mirror_site_name_set(librados::Rados& rados,
const std::string& site_name) {
return librbd::api::Mirror<>::site_name_set(rados, site_name);
}
int RBD::mirror_peer_bootstrap_create(IoCtx& io_ctx, std::string* token) {
return librbd::api::Mirror<>::peer_bootstrap_create(io_ctx, token);
}
int RBD::mirror_peer_bootstrap_import(IoCtx& io_ctx,
rbd_mirror_peer_direction_t direction,
const std::string& token) {
return librbd::api::Mirror<>::peer_bootstrap_import(io_ctx, direction,
token);
}
int RBD::mirror_peer_site_add(IoCtx& io_ctx, std::string *uuid,
mirror_peer_direction_t direction,
const std::string &site_name,
const std::string &client_name) {
return librbd::api::Mirror<>::peer_site_add(
io_ctx, uuid, direction, site_name, client_name);
}
int RBD::mirror_peer_site_remove(IoCtx& io_ctx, const std::string &uuid) {
return librbd::api::Mirror<>::peer_site_remove(io_ctx, uuid);
}
int RBD::mirror_peer_site_list(
IoCtx& io_ctx, std::vector<mirror_peer_site_t> *peer_sites) {
return librbd::api::Mirror<>::peer_site_list(io_ctx, peer_sites);
}
int RBD::mirror_peer_site_set_client_name(
IoCtx& io_ctx, const std::string &uuid, const std::string &client_name) {
return librbd::api::Mirror<>::peer_site_set_client(io_ctx, uuid,
client_name);
}
int RBD::mirror_peer_site_set_name(IoCtx& io_ctx, const std::string &uuid,
const std::string &site_name) {
return librbd::api::Mirror<>::peer_site_set_name(io_ctx, uuid,
site_name);
}
int RBD::mirror_peer_site_set_direction(IoCtx& io_ctx,
const std::string& uuid,
mirror_peer_direction_t direction) {
return librbd::api::Mirror<>::peer_site_set_direction(io_ctx, uuid,
direction);
}
int RBD::mirror_peer_site_get_attributes(
IoCtx& io_ctx, const std::string &uuid,
std::map<std::string, std::string> *key_vals) {
return librbd::api::Mirror<>::peer_site_get_attributes(io_ctx, uuid,
key_vals);
}
int RBD::mirror_peer_site_set_attributes(
IoCtx& io_ctx, const std::string &uuid,
const std::map<std::string, std::string>& key_vals) {
return librbd::api::Mirror<>::peer_site_set_attributes(io_ctx, uuid,
key_vals);
}
int RBD::mirror_image_global_status_list(
IoCtx& io_ctx, const std::string &start_id, size_t max,
std::map<std::string, mirror_image_global_status_t> *global_statuses) {
return librbd::api::Mirror<>::image_global_status_list(
io_ctx, start_id, max, global_statuses);
}
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
int RBD::mirror_peer_add(IoCtx& io_ctx, std::string *uuid,
const std::string &cluster_name,
const std::string &client_name) {
return librbd::api::Mirror<>::peer_site_add(
io_ctx, uuid, RBD_MIRROR_PEER_DIRECTION_RX_TX, cluster_name, client_name);
}
int RBD::mirror_peer_remove(IoCtx& io_ctx, const std::string &uuid) {
return librbd::api::Mirror<>::peer_site_remove(io_ctx, uuid);
}
int RBD::mirror_peer_list(IoCtx& io_ctx, std::vector<mirror_peer_t> *peers) {
std::vector<mirror_peer_site_t> peer_sites;
int r = librbd::api::Mirror<>::peer_site_list(io_ctx, &peer_sites);
if (r < 0) {
return r;
}
peers->clear();
peers->reserve(peer_sites.size());
for (auto& peer_site : peer_sites) {
peers->push_back({peer_site.uuid, peer_site.site_name,
peer_site.client_name});
}
return 0;
}
int RBD::mirror_peer_set_client(IoCtx& io_ctx, const std::string &uuid,
const std::string &client_name) {
return librbd::api::Mirror<>::peer_site_set_client(io_ctx, uuid,
client_name);
}
int RBD::mirror_peer_set_cluster(IoCtx& io_ctx, const std::string &uuid,
const std::string &cluster_name) {
return librbd::api::Mirror<>::peer_site_set_name(io_ctx, uuid,
cluster_name);
}
int RBD::mirror_peer_get_attributes(
IoCtx& io_ctx, const std::string &uuid,
std::map<std::string, std::string> *key_vals) {
return librbd::api::Mirror<>::peer_site_get_attributes(io_ctx, uuid,
key_vals);
}
int RBD::mirror_peer_set_attributes(
IoCtx& io_ctx, const std::string &uuid,
const std::map<std::string, std::string>& key_vals) {
return librbd::api::Mirror<>::peer_site_set_attributes(io_ctx, uuid,
key_vals);
}
int RBD::mirror_image_status_list(IoCtx& io_ctx, const std::string &start_id,
size_t max, std::map<std::string, mirror_image_status_t> *images) {
std::map<std::string, mirror_image_global_status_t> global_statuses;
int r = librbd::api::Mirror<>::image_global_status_list(
io_ctx, start_id, max, &global_statuses);
if (r < 0) {
return r;
}
images->clear();
for (auto &[id, global_status] : global_statuses) {
if (global_status.site_statuses.empty() ||
global_status.site_statuses[0].mirror_uuid !=
cls::rbd::MirrorImageSiteStatus::LOCAL_MIRROR_UUID) {
continue;
}
auto& site_status = global_status.site_statuses[0];
(*images)[id] = mirror_image_status_t{
global_status.name, global_status.info, site_status.state,
site_status.description, site_status.last_update, site_status.up};
}
return 0;
}
#pragma GCC diagnostic pop
int RBD::mirror_image_status_summary(IoCtx& io_ctx,
std::map<mirror_image_status_state_t, int> *states) {
return librbd::api::Mirror<>::image_status_summary(io_ctx, states);
}
int RBD::mirror_image_instance_id_list(IoCtx& io_ctx,
const std::string &start_id, size_t max,
std::map<std::string, std::string> *instance_ids) {
return librbd::api::Mirror<>::image_instance_id_list(io_ctx, start_id, max,
instance_ids);
}
int RBD::mirror_image_info_list(
IoCtx& io_ctx, mirror_image_mode_t *mode_filter,
const std::string &start_id, size_t max,
std::map<std::string, std::pair<mirror_image_mode_t,
mirror_image_info_t>> *entries) {
return librbd::api::Mirror<>::image_info_list(io_ctx, mode_filter, start_id,
max, entries);
}
int RBD::group_create(IoCtx& io_ctx, const char *group_name)
{
TracepointProvider::initialize<tracepoint_traits>(get_cct(io_ctx));
tracepoint(librbd, group_create_enter, io_ctx.get_pool_name().c_str(),
io_ctx.get_id(), group_name);
int r = librbd::api::Group<>::create(io_ctx, group_name);
tracepoint(librbd, group_create_exit, r);
return r;
}
int RBD::group_remove(IoCtx& io_ctx, const char *group_name)
{
TracepointProvider::initialize<tracepoint_traits>(get_cct(io_ctx));
tracepoint(librbd, group_remove_enter, io_ctx.get_pool_name().c_str(),
io_ctx.get_id(), group_name);
int r = librbd::api::Group<>::remove(io_ctx, group_name);
tracepoint(librbd, group_remove_exit, r);
return r;
}
int RBD::group_list(IoCtx& io_ctx, vector<string> *names)
{
TracepointProvider::initialize<tracepoint_traits>(get_cct(io_ctx));
tracepoint(librbd, group_list_enter, io_ctx.get_pool_name().c_str(),
io_ctx.get_id());
int r = librbd::api::Group<>::list(io_ctx, names);
if (r >= 0) {
for (auto itr : *names) {
tracepoint(librbd, group_list_entry, itr.c_str());
}
}
tracepoint(librbd, group_list_exit, r);
return r;
}
int RBD::group_rename(IoCtx& io_ctx, const char *src_name,
const char *dest_name)
{
TracepointProvider::initialize<tracepoint_traits>(get_cct(io_ctx));
tracepoint(librbd, group_rename_enter, io_ctx.get_pool_name().c_str(),
io_ctx.get_id(), src_name, dest_name);
int r = librbd::api::Group<>::rename(io_ctx, src_name, dest_name);
tracepoint(librbd, group_rename_exit, r);
return r;
}
int RBD::group_image_add(IoCtx& group_ioctx, const char *group_name,
IoCtx& image_ioctx, const char *image_name)
{
TracepointProvider::initialize<tracepoint_traits>(get_cct(group_ioctx));
tracepoint(librbd, group_image_add_enter,
group_ioctx.get_pool_name().c_str(),
group_ioctx.get_id(), group_name,
image_ioctx.get_pool_name().c_str(),
image_ioctx.get_id(), image_name);
int r = librbd::api::Group<>::image_add(group_ioctx, group_name,
image_ioctx, image_name);
tracepoint(librbd, group_image_add_exit, r);
return r;
}
int RBD::group_image_remove(IoCtx& group_ioctx, const char *group_name,
IoCtx& image_ioctx, const char *image_name)
{
TracepointProvider::initialize<tracepoint_traits>(get_cct(group_ioctx));
tracepoint(librbd, group_image_remove_enter,
group_ioctx.get_pool_name().c_str(),
group_ioctx.get_id(), group_name,
image_ioctx.get_pool_name().c_str(),
image_ioctx.get_id(), image_name);
int r = librbd::api::Group<>::image_remove(group_ioctx, group_name,
image_ioctx, image_name);
tracepoint(librbd, group_image_remove_exit, r);
return r;
}
int RBD::group_image_remove_by_id(IoCtx& group_ioctx, const char *group_name,
IoCtx& image_ioctx, const char *image_id)
{
TracepointProvider::initialize<tracepoint_traits>(get_cct(group_ioctx));
tracepoint(librbd, group_image_remove_by_id_enter,
group_ioctx.get_pool_name().c_str(),
group_ioctx.get_id(), group_name,
image_ioctx.get_pool_name().c_str(),
image_ioctx.get_id(), image_id);
int r = librbd::api::Group<>::image_remove_by_id(group_ioctx, group_name,
image_ioctx, image_id);
tracepoint(librbd, group_image_remove_by_id_exit, r);
return r;
}
int RBD::group_image_list(IoCtx& group_ioctx, const char *group_name,
std::vector<group_image_info_t> *images,
size_t group_image_info_size)
{
TracepointProvider::initialize<tracepoint_traits>(get_cct(group_ioctx));
tracepoint(librbd, group_image_list_enter,
group_ioctx.get_pool_name().c_str(),
group_ioctx.get_id(), group_name);
if (group_image_info_size != sizeof(group_image_info_t)) {
tracepoint(librbd, group_image_list_exit, -ERANGE);
return -ERANGE;
}
int r = librbd::api::Group<>::image_list(group_ioctx, group_name, images);
tracepoint(librbd, group_image_list_exit, r);
return r;
}
int RBD::group_snap_create(IoCtx& group_ioctx, const char *group_name,
const char *snap_name) {
TracepointProvider::initialize<tracepoint_traits>(get_cct(group_ioctx));
tracepoint(librbd, group_snap_create_enter,
group_ioctx.get_pool_name().c_str(),
group_ioctx.get_id(), group_name, snap_name);
int r = librbd::api::Group<>::snap_create(group_ioctx, group_name,
snap_name, 0);
tracepoint(librbd, group_snap_create_exit, r);
return r;
}
int RBD::group_snap_create2(IoCtx& group_ioctx, const char *group_name,
const char *snap_name, uint32_t flags) {
TracepointProvider::initialize<tracepoint_traits>(get_cct(group_ioctx));
tracepoint(librbd, group_snap_create_enter,
group_ioctx.get_pool_name().c_str(),
group_ioctx.get_id(), group_name, snap_name);
int r = librbd::api::Group<>::snap_create(group_ioctx, group_name,
snap_name, flags);
tracepoint(librbd, group_snap_create_exit, r);
return r;
}
int RBD::group_snap_remove(IoCtx& group_ioctx, const char *group_name,
const char *snap_name) {
TracepointProvider::initialize<tracepoint_traits>(get_cct(group_ioctx));
tracepoint(librbd, group_snap_remove_enter,
group_ioctx.get_pool_name().c_str(),
group_ioctx.get_id(), group_name, snap_name);
int r = librbd::api::Group<>::snap_remove(group_ioctx, group_name,
snap_name);
tracepoint(librbd, group_snap_remove_exit, r);
return r;
}
int RBD::group_snap_list(IoCtx& group_ioctx, const char *group_name,
std::vector<group_snap_info_t> *snaps,
size_t group_snap_info_size)
{
TracepointProvider::initialize<tracepoint_traits>(get_cct(group_ioctx));
tracepoint(librbd, group_snap_list_enter,
group_ioctx.get_pool_name().c_str(),
group_ioctx.get_id(), group_name);
if (group_snap_info_size != sizeof(group_snap_info_t)) {
tracepoint(librbd, group_snap_list_exit, -ERANGE);
return -ERANGE;
}
int r = librbd::api::Group<>::snap_list(group_ioctx, group_name, snaps);
tracepoint(librbd, group_snap_list_exit, r);
return r;
}
int RBD::group_snap_rename(IoCtx& group_ioctx, const char *group_name,
const char *old_snap_name,
const char *new_snap_name)
{
TracepointProvider::initialize<tracepoint_traits>(get_cct(group_ioctx));
tracepoint(librbd, group_snap_rename_enter,
group_ioctx.get_pool_name().c_str(), group_ioctx.get_id(),
group_name, old_snap_name, new_snap_name);
int r = librbd::api::Group<>::snap_rename(group_ioctx, group_name,
old_snap_name, new_snap_name);
tracepoint(librbd, group_snap_list_exit, r);
return r;
}
int RBD::group_snap_rollback(IoCtx& group_ioctx, const char *group_name,
const char *snap_name) {
TracepointProvider::initialize<tracepoint_traits>(get_cct(group_ioctx));
tracepoint(librbd, group_snap_rollback_enter,
group_ioctx.get_pool_name().c_str(),
group_ioctx.get_id(), group_name, snap_name);
librbd::NoOpProgressContext prog_ctx;
int r = librbd::api::Group<>::snap_rollback(group_ioctx, group_name,
snap_name, prog_ctx);
tracepoint(librbd, group_snap_rollback_exit, r);
return r;
}
int RBD::group_snap_rollback_with_progress(IoCtx& group_ioctx,
const char *group_name,
const char *snap_name,
ProgressContext& prog_ctx) {
TracepointProvider::initialize<tracepoint_traits>(get_cct(group_ioctx));
tracepoint(librbd, group_snap_rollback_enter,
group_ioctx.get_pool_name().c_str(),
group_ioctx.get_id(), group_name, snap_name);
int r = librbd::api::Group<>::snap_rollback(group_ioctx, group_name,
snap_name, prog_ctx);
tracepoint(librbd, group_snap_rollback_exit, r);
return r;
}
int RBD::pool_metadata_get(IoCtx& ioctx, const std::string &key,
std::string *value)
{
int r = librbd::api::PoolMetadata<>::get(ioctx, key, value);
return r;
}
int RBD::pool_metadata_set(IoCtx& ioctx, const std::string &key,
const std::string &value)
{
int r = librbd::api::PoolMetadata<>::set(ioctx, key, value);
return r;
}
int RBD::pool_metadata_remove(IoCtx& ioctx, const std::string &key)
{
int r = librbd::api::PoolMetadata<>::remove(ioctx, key);
return r;
}
int RBD::pool_metadata_list(IoCtx& ioctx, const std::string &start,
uint64_t max, map<string, bufferlist> *pairs)
{
int r = librbd::api::PoolMetadata<>::list(ioctx, start, max, pairs);
return r;
}
int RBD::config_list(IoCtx& io_ctx, std::vector<config_option_t> *options) {
return librbd::api::Config<>::list(io_ctx, options);
}
RBD::AioCompletion::AioCompletion(void *cb_arg, callback_t complete_cb)
{
auto aio_comp = librbd::io::AioCompletion::create(
cb_arg, complete_cb, this);
aio_comp->external_callback = true;
pc = reinterpret_cast<void*>(aio_comp);
}
bool RBD::AioCompletion::is_complete()
{
librbd::io::AioCompletion *c = (librbd::io::AioCompletion *)pc;
return c->is_complete();
}
int RBD::AioCompletion::wait_for_complete()
{
librbd::io::AioCompletion *c = (librbd::io::AioCompletion *)pc;
return c->wait_for_complete();
}
ssize_t RBD::AioCompletion::get_return_value()
{
librbd::io::AioCompletion *c = (librbd::io::AioCompletion *)pc;
return c->get_return_value();
}
void *RBD::AioCompletion::get_arg()
{
librbd::io::AioCompletion *c = (librbd::io::AioCompletion *)pc;
return c->get_arg();
}
void RBD::AioCompletion::release()
{
librbd::io::AioCompletion *c = (librbd::io::AioCompletion *)pc;
c->release();
delete this;
}
/*
ImageOptions
*/
ImageOptions::ImageOptions()
{
librbd::image_options_create(&opts);
}
ImageOptions::ImageOptions(rbd_image_options_t opts_)
{
librbd::image_options_create_ref(&opts, opts_);
}
ImageOptions::ImageOptions(const ImageOptions &imgopts)
{
librbd::image_options_copy(&opts, imgopts);
}
ImageOptions::~ImageOptions()
{
librbd::image_options_destroy(opts);
}
int ImageOptions::set(int optname, const std::string& optval)
{
return librbd::image_options_set(opts, optname, optval);
}
int ImageOptions::set(int optname, uint64_t optval)
{
return librbd::image_options_set(opts, optname, optval);
}
int ImageOptions::get(int optname, std::string* optval) const
{
return librbd::image_options_get(opts, optname, optval);
}
int ImageOptions::get(int optname, uint64_t* optval) const
{
return librbd::image_options_get(opts, optname, optval);
}
int ImageOptions::is_set(int optname, bool* is_set)
{
return librbd::image_options_is_set(opts, optname, is_set);
}
int ImageOptions::unset(int optname)
{
return librbd::image_options_unset(opts, optname);
}
void ImageOptions::clear()
{
librbd::image_options_clear(opts);
}
bool ImageOptions::empty() const
{
return librbd::image_options_is_empty(opts);
}
/*
Image
*/
Image::Image() : ctx(NULL)
{
}
Image::~Image()
{
close();
}
int Image::close()
{
int r = 0;
if (ctx) {
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, close_image_enter, ictx, ictx->name.c_str(), ictx->id.c_str());
r = ictx->state->close();
ctx = NULL;
tracepoint(librbd, close_image_exit, r);
}
return r;
}
int Image::aio_close(RBD::AioCompletion *c)
{
if (!ctx) {
return -EINVAL;
}
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, aio_close_image_enter, ictx, ictx->name.c_str(), ictx->id.c_str(), c->pc);
ictx->state->close(new C_AioCompletion(ictx, librbd::io::AIO_TYPE_CLOSE,
get_aio_completion(c)));
ctx = NULL;
tracepoint(librbd, aio_close_image_exit, 0);
return 0;
}
int Image::resize(uint64_t size)
{
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, resize_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only, size);
librbd::NoOpProgressContext prog_ctx;
int r = ictx->operations->resize(size, true, prog_ctx);
tracepoint(librbd, resize_exit, r);
return r;
}
int Image::resize2(uint64_t size, bool allow_shrink, librbd::ProgressContext& pctx)
{
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, resize_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only, size);
int r = ictx->operations->resize(size, allow_shrink, pctx);
tracepoint(librbd, resize_exit, r);
return r;
}
int Image::resize_with_progress(uint64_t size, librbd::ProgressContext& pctx)
{
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, resize_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only, size);
int r = ictx->operations->resize(size, true, pctx);
tracepoint(librbd, resize_exit, r);
return r;
}
int Image::stat(image_info_t& info, size_t infosize)
{
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, stat_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only);
int r = librbd::info(ictx, info, infosize);
tracepoint(librbd, stat_exit, r, &info);
return r;
}
int Image::old_format(uint8_t *old)
{
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, get_old_format_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only);
int r = librbd::get_old_format(ictx, old);
tracepoint(librbd, get_old_format_exit, r, *old);
return r;
}
int Image::size(uint64_t *size)
{
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, get_size_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only);
int r = librbd::get_size(ictx, size);
tracepoint(librbd, get_size_exit, r, *size);
return r;
}
int Image::get_group(group_info_t *group_info, size_t group_info_size)
{
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, image_get_group_enter, ictx->name.c_str());
if (group_info_size != sizeof(group_info_t)) {
tracepoint(librbd, image_get_group_exit, -ERANGE);
return -ERANGE;
}
int r = librbd::api::Group<>::image_get_group(ictx, group_info);
tracepoint(librbd, image_get_group_exit, r);
return r;
}
int Image::features(uint64_t *features)
{
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, get_features_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only);
int r = librbd::get_features(ictx, features);
tracepoint(librbd, get_features_exit, r, *features);
return r;
}
int Image::update_features(uint64_t features, bool enabled)
{
ImageCtx *ictx = reinterpret_cast<ImageCtx *>(ctx);
tracepoint(librbd, update_features_enter, ictx, features, enabled);
int r = ictx->operations->update_features(features, enabled);
tracepoint(librbd, update_features_exit, r);
return r;
}
int Image::get_op_features(uint64_t *op_features)
{
ImageCtx *ictx = (ImageCtx *)ctx;
return librbd::api::Image<>::get_op_features(ictx, op_features);
}
uint64_t Image::get_stripe_unit() const
{
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, get_stripe_unit_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only);
uint64_t stripe_unit = ictx->get_stripe_unit();
tracepoint(librbd, get_stripe_unit_exit, 0, stripe_unit);
return stripe_unit;
}
uint64_t Image::get_stripe_count() const
{
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, get_stripe_count_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only);
uint64_t stripe_count = ictx->get_stripe_count();
tracepoint(librbd, get_stripe_count_exit, 0, stripe_count);
return stripe_count;
}
int Image::get_create_timestamp(struct timespec *timestamp)
{
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, get_create_timestamp_enter, ictx, ictx->name.c_str(),
ictx->read_only);
utime_t time = ictx->get_create_timestamp();
time.to_timespec(timestamp);
tracepoint(librbd, get_create_timestamp_exit, 0, timestamp);
return 0;
}
int Image::get_access_timestamp(struct timespec *timestamp)
{
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, get_access_timestamp_enter, ictx, ictx->name.c_str(),
ictx->read_only);
{
std::shared_lock timestamp_locker{ictx->timestamp_lock};
utime_t time = ictx->get_access_timestamp();
time.to_timespec(timestamp);
}
tracepoint(librbd, get_access_timestamp_exit, 0, timestamp);
return 0;
}
int Image::get_modify_timestamp(struct timespec *timestamp)
{
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, get_modify_timestamp_enter, ictx, ictx->name.c_str(),
ictx->read_only);
{
std::shared_lock timestamp_locker{ictx->timestamp_lock};
utime_t time = ictx->get_modify_timestamp();
time.to_timespec(timestamp);
}
tracepoint(librbd, get_modify_timestamp_exit, 0, timestamp);
return 0;
}
int Image::overlap(uint64_t *overlap)
{
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, get_overlap_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only);
int r = librbd::get_overlap(ictx, overlap);
tracepoint(librbd, get_overlap_exit, r, *overlap);
return r;
}
int Image::get_name(std::string *name)
{
ImageCtx *ictx = reinterpret_cast<ImageCtx *>(ctx);
*name = ictx->name;
return 0;
}
int Image::get_id(std::string *id)
{
ImageCtx *ictx = reinterpret_cast<ImageCtx *>(ctx);
if (ictx->old_format) {
return -EINVAL;
}
*id = ictx->id;
return 0;
}
std::string Image::get_block_name_prefix()
{
ImageCtx *ictx = reinterpret_cast<ImageCtx *>(ctx);
return ictx->object_prefix;
}
int64_t Image::get_data_pool_id()
{
ImageCtx *ictx = reinterpret_cast<ImageCtx *>(ctx);
return librbd::api::Image<>::get_data_pool_id(ictx);
}
int Image::parent_info(string *parent_pool_name, string *parent_name,
string *parent_snap_name)
{
librbd::linked_image_spec_t parent_image;
librbd::snap_spec_t parent_snap;
int r = get_parent(&parent_image, &parent_snap);
if (r >= 0) {
if (parent_pool_name != nullptr) {
*parent_pool_name = parent_image.pool_name;
}
if (parent_name != nullptr) {
*parent_name = parent_image.image_name;
}
if (parent_snap_name != nullptr) {
*parent_snap_name = parent_snap.name;
}
}
return r;
}
int Image::parent_info2(string *parent_pool_name, string *parent_name,
string *parent_id, string *parent_snap_name)
{
librbd::linked_image_spec_t parent_image;
librbd::snap_spec_t parent_snap;
int r = get_parent(&parent_image, &parent_snap);
if (r >= 0) {
if (parent_pool_name != nullptr) {
*parent_pool_name = parent_image.pool_name;
}
if (parent_name != nullptr) {
*parent_name = parent_image.image_name;
}
if (parent_id != nullptr) {
*parent_id = parent_image.image_id;
}
if (parent_snap_name != nullptr) {
*parent_snap_name = parent_snap.name;
}
}
return r;
}
int Image::get_parent(linked_image_spec_t *parent_image,
snap_spec_t *parent_snap)
{
auto ictx = reinterpret_cast<ImageCtx*>(ctx);
tracepoint(librbd, get_parent_info_enter, ictx, ictx->name.c_str(),
ictx->snap_name.c_str(), ictx->read_only);
int r = librbd::api::Image<>::get_parent(ictx, parent_image, parent_snap);
tracepoint(librbd, get_parent_info_exit, r,
parent_image->pool_name.c_str(),
parent_image->image_name.c_str(),
parent_image->image_id.c_str(),
parent_snap->name.c_str());
return r;
}
int Image::get_migration_source_spec(std::string* source_spec)
{
auto ictx = reinterpret_cast<ImageCtx*>(ctx);
return librbd::api::Migration<>::get_source_spec(ictx, source_spec);
}
int Image::get_flags(uint64_t *flags)
{
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, get_flags_enter, ictx);
int r = librbd::get_flags(ictx, flags);
tracepoint(librbd, get_flags_exit, ictx, r, *flags);
return r;
}
int Image::set_image_notification(int fd, int type)
{
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, set_image_notification_enter, ictx, fd, type);
int r = librbd::set_image_notification(ictx, fd, type);
tracepoint(librbd, set_image_notification_exit, ictx, r);
return r;
}
int Image::is_exclusive_lock_owner(bool *is_owner)
{
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, is_exclusive_lock_owner_enter, ictx);
int r = librbd::is_exclusive_lock_owner(ictx, is_owner);
tracepoint(librbd, is_exclusive_lock_owner_exit, ictx, r, *is_owner);
return r;
}
int Image::lock_acquire(rbd_lock_mode_t lock_mode)
{
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, lock_acquire_enter, ictx, lock_mode);
int r = librbd::lock_acquire(ictx, lock_mode);
tracepoint(librbd, lock_acquire_exit, ictx, r);
return r;
}
int Image::lock_release()
{
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, lock_release_enter, ictx);
int r = librbd::lock_release(ictx);
tracepoint(librbd, lock_release_exit, ictx, r);
return r;
}
int Image::lock_get_owners(rbd_lock_mode_t *lock_mode,
std::list<std::string> *lock_owners)
{
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, lock_get_owners_enter, ictx);
int r = librbd::lock_get_owners(ictx, lock_mode, lock_owners);
tracepoint(librbd, lock_get_owners_exit, ictx, r);
return r;
}
int Image::lock_break(rbd_lock_mode_t lock_mode,
const std::string &lock_owner)
{
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, lock_break_enter, ictx, lock_mode, lock_owner.c_str());
int r = librbd::lock_break(ictx, lock_mode, lock_owner);
tracepoint(librbd, lock_break_exit, ictx, r);
return r;
}
int Image::rebuild_object_map(ProgressContext &prog_ctx)
{
ImageCtx *ictx = reinterpret_cast<ImageCtx*>(ctx);
return ictx->operations->rebuild_object_map(prog_ctx);
}
int Image::check_object_map(ProgressContext &prog_ctx)
{
ImageCtx *ictx = reinterpret_cast<ImageCtx*>(ctx);
return ictx->operations->check_object_map(prog_ctx);
}
int Image::copy(IoCtx& dest_io_ctx, const char *destname)
{
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, copy_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only, dest_io_ctx.get_pool_name().c_str(), dest_io_ctx.get_id(), destname);
ImageOptions opts;
librbd::NoOpProgressContext prog_ctx;
int r = librbd::copy(ictx, dest_io_ctx, destname, opts, prog_ctx, 0);
tracepoint(librbd, copy_exit, r);
return r;
}
int Image::copy2(Image& dest)
{
ImageCtx *srcctx = (ImageCtx *)ctx;
ImageCtx *destctx = (ImageCtx *)dest.ctx;
tracepoint(librbd, copy2_enter, srcctx, srcctx->name.c_str(), srcctx->snap_name.c_str(), srcctx->read_only, destctx, destctx->name.c_str(), destctx->snap_name.c_str(), destctx->read_only);
librbd::NoOpProgressContext prog_ctx;
int r = librbd::copy(srcctx, destctx, prog_ctx, 0);
tracepoint(librbd, copy2_exit, r);
return r;
}
int Image::copy3(IoCtx& dest_io_ctx, const char *destname, ImageOptions& opts)
{
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, copy3_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only, dest_io_ctx.get_pool_name().c_str(), dest_io_ctx.get_id(), destname, opts.opts);
librbd::NoOpProgressContext prog_ctx;
int r = librbd::copy(ictx, dest_io_ctx, destname, opts, prog_ctx, 0);
tracepoint(librbd, copy3_exit, r);
return r;
}
int Image::copy4(IoCtx& dest_io_ctx, const char *destname, ImageOptions& opts, size_t sparse_size)
{
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, copy4_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only, dest_io_ctx.get_pool_name().c_str(), dest_io_ctx.get_id(), destname, opts.opts, sparse_size);
librbd::NoOpProgressContext prog_ctx;
int r = librbd::copy(ictx, dest_io_ctx, destname, opts, prog_ctx, sparse_size);
tracepoint(librbd, copy4_exit, r);
return r;
}
int Image::copy_with_progress(IoCtx& dest_io_ctx, const char *destname,
librbd::ProgressContext &pctx)
{
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, copy_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only, dest_io_ctx.get_pool_name().c_str(), dest_io_ctx.get_id(), destname);
ImageOptions opts;
int r = librbd::copy(ictx, dest_io_ctx, destname, opts, pctx, 0);
tracepoint(librbd, copy_exit, r);
return r;
}
int Image::copy_with_progress2(Image& dest, librbd::ProgressContext &pctx)
{
ImageCtx *srcctx = (ImageCtx *)ctx;
ImageCtx *destctx = (ImageCtx *)dest.ctx;
tracepoint(librbd, copy2_enter, srcctx, srcctx->name.c_str(), srcctx->snap_name.c_str(), srcctx->read_only, destctx, destctx->name.c_str(), destctx->snap_name.c_str(), destctx->read_only);
int r = librbd::copy(srcctx, destctx, pctx, 0);
tracepoint(librbd, copy2_exit, r);
return r;
}
int Image::copy_with_progress3(IoCtx& dest_io_ctx, const char *destname,
ImageOptions& opts,
librbd::ProgressContext &pctx)
{
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, copy3_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only, dest_io_ctx.get_pool_name().c_str(), dest_io_ctx.get_id(), destname, opts.opts);
int r = librbd::copy(ictx, dest_io_ctx, destname, opts, pctx, 0);
tracepoint(librbd, copy3_exit, r);
return r;
}
int Image::copy_with_progress4(IoCtx& dest_io_ctx, const char *destname,
ImageOptions& opts,
librbd::ProgressContext &pctx,
size_t sparse_size)
{
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, copy4_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only, dest_io_ctx.get_pool_name().c_str(), dest_io_ctx.get_id(), destname, opts.opts, sparse_size);
int r = librbd::copy(ictx, dest_io_ctx, destname, opts, pctx, sparse_size);
tracepoint(librbd, copy4_exit, r);
return r;
}
int Image::deep_copy(IoCtx& dest_io_ctx, const char *destname,
ImageOptions& opts)
{
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, deep_copy_enter, ictx, ictx->name.c_str(),
ictx->snap_name.c_str(), ictx->read_only,
dest_io_ctx.get_pool_name().c_str(), dest_io_ctx.get_id(),
destname, opts.opts);
librbd::NoOpProgressContext prog_ctx;
int r = librbd::api::Image<>::deep_copy(ictx, dest_io_ctx, destname, opts,
prog_ctx);
tracepoint(librbd, deep_copy_exit, r);
return r;
}
int Image::deep_copy_with_progress(IoCtx& dest_io_ctx, const char *destname,
ImageOptions& opts,
librbd::ProgressContext &prog_ctx)
{
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, deep_copy_enter, ictx, ictx->name.c_str(),
ictx->snap_name.c_str(), ictx->read_only,
dest_io_ctx.get_pool_name().c_str(), dest_io_ctx.get_id(),
destname, opts.opts);
int r = librbd::api::Image<>::deep_copy(ictx, dest_io_ctx, destname, opts,
prog_ctx);
tracepoint(librbd, deep_copy_exit, r);
return r;
}
int Image::encryption_format(encryption_format_t format,
encryption_options_t opts,
size_t opts_size)
{
ImageCtx *ictx = (ImageCtx *)ctx;
return librbd::api::Image<>::encryption_format(
ictx, format, opts, opts_size, false);
}
int Image::encryption_load(encryption_format_t format,
encryption_options_t opts,
size_t opts_size)
{
ImageCtx *ictx = (ImageCtx *)ctx;
encryption_spec_t spec = {format, opts, opts_size};
return librbd::api::Image<>::encryption_load(ictx, &spec, 1, false);
}
int Image::encryption_load2(const encryption_spec_t *specs, size_t spec_count)
{
ImageCtx *ictx = (ImageCtx *)ctx;
return librbd::api::Image<>::encryption_load(
ictx, specs, spec_count, false);
}
int Image::flatten()
{
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, flatten_enter, ictx, ictx->name.c_str(), ictx->id.c_str());
librbd::NoOpProgressContext prog_ctx;
int r = ictx->operations->flatten(prog_ctx);
tracepoint(librbd, flatten_exit, r);
return r;
}
int Image::flatten_with_progress(librbd::ProgressContext& prog_ctx)
{
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, flatten_enter, ictx, ictx->name.c_str(), ictx->id.c_str());
int r = ictx->operations->flatten(prog_ctx);
tracepoint(librbd, flatten_exit, r);
return r;
}
int Image::sparsify(size_t sparse_size)
{
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, sparsify_enter, ictx, ictx->name.c_str(), sparse_size,
ictx->id.c_str());
librbd::NoOpProgressContext prog_ctx;
int r = ictx->operations->sparsify(sparse_size, prog_ctx);
tracepoint(librbd, sparsify_exit, r);
return r;
}
int Image::sparsify_with_progress(size_t sparse_size,
librbd::ProgressContext& prog_ctx)
{
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, sparsify_enter, ictx, ictx->name.c_str(), sparse_size,
ictx->id.c_str());
int r = ictx->operations->sparsify(sparse_size, prog_ctx);
tracepoint(librbd, sparsify_exit, r);
return r;
}
int Image::list_children(set<pair<string, string> > *children)
{
std::vector<linked_image_spec_t> images;
int r = list_children3(&images);
if (r < 0) {
return r;
}
for (auto& image : images) {
if (!image.trash) {
children->insert({image.pool_name, image.image_name});
}
}
return 0;
}
int Image::list_children2(vector<librbd::child_info_t> *children)
{
std::vector<linked_image_spec_t> images;
int r = list_children3(&images);
if (r < 0) {
return r;
}
for (auto& image : images) {
children->push_back({
.pool_name = image.pool_name,
.image_name = image.image_name,
.image_id = image.image_id,
.trash = image.trash});
}
return 0;
}
int Image::list_children3(std::vector<linked_image_spec_t> *images)
{
auto ictx = reinterpret_cast<ImageCtx*>(ctx);
tracepoint(librbd, list_children_enter, ictx, ictx->name.c_str(),
ictx->snap_name.c_str(), ictx->read_only);
int r = librbd::api::Image<>::list_children(ictx, images);
#ifdef WITH_LTTNG
if (r >= 0) {
for (auto& it : *images) {
tracepoint(librbd, list_children_entry, it.pool_name.c_str(),
it.image_name.c_str());
}
}
#endif
tracepoint(librbd, list_children_exit, r);
return r;
}
int Image::list_descendants(std::vector<linked_image_spec_t> *images)
{
auto ictx = reinterpret_cast<ImageCtx*>(ctx);
images->clear();
int r = librbd::api::Image<>::list_descendants(ictx, {}, images);
return r;
}
int Image::list_lockers(std::list<librbd::locker_t> *lockers,
bool *exclusive, string *tag)
{
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, list_lockers_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only);
int r = librbd::list_lockers(ictx, lockers, exclusive, tag);
if (r >= 0) {
for (std::list<librbd::locker_t>::const_iterator it = lockers->begin();
it != lockers->end(); ++it) {
tracepoint(librbd, list_lockers_entry, it->client.c_str(), it->cookie.c_str(), it->address.c_str());
}
}
tracepoint(librbd, list_lockers_exit, r);
return r;
}
int Image::lock_exclusive(const string& cookie)
{
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, lock_exclusive_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only, cookie.c_str());
int r = librbd::lock(ictx, true, cookie, "");
tracepoint(librbd, lock_exclusive_exit, r);
return r;
}
int Image::lock_shared(const string& cookie, const std::string& tag)
{
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, lock_shared_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only, cookie.c_str(), tag.c_str());
int r = librbd::lock(ictx, false, cookie, tag);
tracepoint(librbd, lock_shared_exit, r);
return r;
}
int Image::unlock(const string& cookie)
{
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, unlock_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only, cookie.c_str());
int r = librbd::unlock(ictx, cookie);
tracepoint(librbd, unlock_exit, r);
return r;
}
int Image::break_lock(const string& client, const string& cookie)
{
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, break_lock_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only, client.c_str(), cookie.c_str());
int r = librbd::break_lock(ictx, client, cookie);
tracepoint(librbd, break_lock_exit, r);
return r;
}
int Image::snap_create(const char *snap_name)
{
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, snap_create_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only, snap_name);
auto flags = librbd::util::get_default_snap_create_flags(ictx);
librbd::NoOpProgressContext prog_ctx;
int r = librbd::api::Snapshot<>::create(ictx, snap_name, flags, prog_ctx);
tracepoint(librbd, snap_create_exit, r);
return r;
}
int Image::snap_create2(const char *snap_name, uint32_t flags,
ProgressContext& prog_ctx)
{
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, snap_create_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only, snap_name);
int r = librbd::api::Snapshot<>::create(ictx, snap_name, flags, prog_ctx);
tracepoint(librbd, snap_create_exit, r);
return r;
}
int Image::snap_remove(const char *snap_name)
{
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, snap_remove_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only, snap_name);
librbd::NoOpProgressContext prog_ctx;
int r = librbd::api::Snapshot<>::remove(ictx, snap_name, 0, prog_ctx);
tracepoint(librbd, snap_remove_exit, r);
return r;
}
int Image::snap_remove2(const char *snap_name, uint32_t flags, ProgressContext& pctx)
{
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, snap_remove2_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only, snap_name, flags);
int r = librbd::api::Snapshot<>::remove(ictx, snap_name, flags, pctx);
tracepoint(librbd, snap_remove_exit, r);
return r;
}
int Image::snap_remove_by_id(uint64_t snap_id)
{
ImageCtx *ictx = (ImageCtx *)ctx;
return librbd::api::Snapshot<>::remove(ictx, snap_id);
}
int Image::snap_rollback(const char *snap_name)
{
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, snap_rollback_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only, snap_name);
librbd::NoOpProgressContext prog_ctx;
int r = ictx->operations->snap_rollback(cls::rbd::UserSnapshotNamespace(), snap_name, prog_ctx);
tracepoint(librbd, snap_rollback_exit, r);
return r;
}
int Image::snap_rename(const char *srcname, const char *dstname)
{
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, snap_rename_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only, srcname, dstname);
int r = ictx->operations->snap_rename(srcname, dstname);
tracepoint(librbd, snap_rename_exit, r);
return r;
}
int Image::snap_rollback_with_progress(const char *snap_name,
ProgressContext& prog_ctx)
{
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, snap_rollback_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only, snap_name);
int r = ictx->operations->snap_rollback(cls::rbd::UserSnapshotNamespace(), snap_name, prog_ctx);
tracepoint(librbd, snap_rollback_exit, r);
return r;
}
int Image::snap_protect(const char *snap_name)
{
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, snap_protect_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only, snap_name);
int r = ictx->operations->snap_protect(cls::rbd::UserSnapshotNamespace(), snap_name);
tracepoint(librbd, snap_protect_exit, r);
return r;
}
int Image::snap_unprotect(const char *snap_name)
{
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, snap_unprotect_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only, snap_name);
int r = ictx->operations->snap_unprotect(cls::rbd::UserSnapshotNamespace(), snap_name);
tracepoint(librbd, snap_unprotect_exit, r);
return r;
}
int Image::snap_is_protected(const char *snap_name, bool *is_protected)
{
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, snap_is_protected_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only, snap_name);
int r = librbd::api::Snapshot<>::is_protected(ictx, snap_name, is_protected);
tracepoint(librbd, snap_is_protected_exit, r, *is_protected ? 1 : 0);
return r;
}
int Image::snap_list(vector<librbd::snap_info_t>& snaps)
{
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, snap_list_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only, &snaps);
int r = librbd::api::Snapshot<>::list(ictx, snaps);
if (r >= 0) {
for (int i = 0, n = snaps.size(); i < n; i++) {
tracepoint(librbd, snap_list_entry, snaps[i].id, snaps[i].size, snaps[i].name.c_str());
}
}
tracepoint(librbd, snap_list_exit, r, snaps.size());
if (r >= 0) {
// A little ugly, but the C++ API doesn't need a Image::snap_list_end,
// and we want the tracepoints to mirror the C API
tracepoint(librbd, snap_list_end_enter, &snaps);
tracepoint(librbd, snap_list_end_exit);
}
return r;
}
bool Image::snap_exists(const char *snap_name)
{
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, snap_exists_enter, ictx, ictx->name.c_str(),
ictx->snap_name.c_str(), ictx->read_only, snap_name);
bool exists;
int r = librbd::api::Snapshot<>::exists(ictx, cls::rbd::UserSnapshotNamespace(), snap_name, &exists);
tracepoint(librbd, snap_exists_exit, r, exists);
if (r < 0) {
// lie to caller since we don't know the real answer yet.
return false;
}
return exists;
}
// A safer version of snap_exists.
int Image::snap_exists2(const char *snap_name, bool *exists)
{
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, snap_exists_enter, ictx, ictx->name.c_str(),
ictx->snap_name.c_str(), ictx->read_only, snap_name);
int r = librbd::api::Snapshot<>::exists(ictx, cls::rbd::UserSnapshotNamespace(), snap_name, exists);
tracepoint(librbd, snap_exists_exit, r, *exists);
return r;
}
int Image::snap_get_timestamp(uint64_t snap_id, struct timespec *timestamp)
{
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, snap_get_timestamp_enter, ictx, ictx->name.c_str());
int r = librbd::api::Snapshot<>::get_timestamp(ictx, snap_id, timestamp);
tracepoint(librbd, snap_get_timestamp_exit, r);
return r;
}
int Image::snap_get_limit(uint64_t *limit)
{
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, snap_get_limit_enter, ictx, ictx->name.c_str());
int r = librbd::api::Snapshot<>::get_limit(ictx, limit);
tracepoint(librbd, snap_get_limit_exit, r, *limit);
return r;
}
int Image::snap_get_namespace_type(uint64_t snap_id,
snap_namespace_type_t *namespace_type) {
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, snap_get_namespace_type_enter, ictx, ictx->name.c_str());
int r = librbd::api::Snapshot<>::get_namespace_type(ictx, snap_id, namespace_type);
tracepoint(librbd, snap_get_namespace_type_exit, r);
return r;
}
int Image::snap_get_group_namespace(uint64_t snap_id,
snap_group_namespace_t *group_snap,
size_t group_snap_size) {
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, snap_get_group_namespace_enter, ictx,
ictx->name.c_str());
if (group_snap_size != sizeof(snap_group_namespace_t)) {
tracepoint(librbd, snap_get_group_namespace_exit, -ERANGE);
return -ERANGE;
}
int r = librbd::api::Snapshot<>::get_group_namespace(ictx, snap_id,
group_snap);
tracepoint(librbd, snap_get_group_namespace_exit, r);
return r;
}
int Image::snap_get_trash_namespace(uint64_t snap_id,
std::string* original_name) {
ImageCtx *ictx = (ImageCtx *)ctx;
return librbd::api::Snapshot<>::get_trash_namespace(ictx, snap_id,
original_name);
}
int Image::snap_get_mirror_namespace(
uint64_t snap_id, snap_mirror_namespace_t *mirror_snap,
size_t mirror_snap_size) {
ImageCtx *ictx = (ImageCtx *)ctx;
if (mirror_snap_size != sizeof(snap_mirror_namespace_t)) {
return -ERANGE;
}
int r = librbd::api::Snapshot<>::get_mirror_namespace(
ictx, snap_id, mirror_snap);
return r;
}
int Image::snap_set_limit(uint64_t limit)
{
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, snap_set_limit_enter, ictx, ictx->name.c_str(), limit);
int r = ictx->operations->snap_set_limit(limit);
tracepoint(librbd, snap_set_limit_exit, r);
return r;
}
int Image::snap_set(const char *snap_name)
{
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, snap_set_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only, snap_name);
int r = librbd::api::Image<>::snap_set(
ictx, cls::rbd::UserSnapshotNamespace(), snap_name);
tracepoint(librbd, snap_set_exit, r);
return r;
}
int Image::snap_set_by_id(uint64_t snap_id)
{
ImageCtx *ictx = (ImageCtx *)ctx;
return librbd::api::Image<>::snap_set(ictx, snap_id);
}
int Image::snap_get_name(uint64_t snap_id, std::string *snap_name)
{
ImageCtx *ictx = (ImageCtx *)ctx;
return librbd::api::Snapshot<>::get_name(ictx, snap_id, snap_name);
}
int Image::snap_get_id(const std::string snap_name, uint64_t *snap_id)
{
ImageCtx *ictx = (ImageCtx *)ctx;
return librbd::api::Snapshot<>::get_id(ictx, snap_name, snap_id);
}
ssize_t Image::read(uint64_t ofs, size_t len, bufferlist& bl)
{
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, read_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only, ofs, len);
int r = api::Io<>::read(*ictx, ofs, len, io::ReadResult{&bl}, 0);
tracepoint(librbd, read_exit, r);
return r;
}
ssize_t Image::read2(uint64_t ofs, size_t len, bufferlist& bl, int op_flags)
{
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, read2_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(),
ictx->read_only, ofs, len, op_flags);
int r = api::Io<>::read(*ictx, ofs, len, io::ReadResult{&bl}, op_flags);
tracepoint(librbd, read_exit, r);
return r;
}
int64_t Image::read_iterate(uint64_t ofs, size_t len,
int (*cb)(uint64_t, size_t, const char *, void *),
void *arg)
{
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, read_iterate_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only, ofs, len);
int64_t r = librbd::read_iterate(ictx, ofs, len, cb, arg);
tracepoint(librbd, read_iterate_exit, r);
return r;
}
int Image::read_iterate2(uint64_t ofs, uint64_t len,
int (*cb)(uint64_t, size_t, const char *, void *),
void *arg)
{
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, read_iterate2_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only, ofs, len);
int64_t r = librbd::read_iterate(ictx, ofs, len, cb, arg);
if (r > 0)
r = 0;
tracepoint(librbd, read_iterate2_exit, r);
return (int)r;
}
int Image::diff_iterate(const char *fromsnapname,
uint64_t ofs, uint64_t len,
int (*cb)(uint64_t, size_t, int, void *),
void *arg)
{
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, diff_iterate_enter, ictx, ictx->name.c_str(),
ictx->snap_name.c_str(), ictx->read_only, fromsnapname, ofs, len,
true, false);
int r = librbd::api::DiffIterate<>::diff_iterate(ictx,
cls::rbd::UserSnapshotNamespace(),
fromsnapname, ofs,
len, true, false, cb, arg);
tracepoint(librbd, diff_iterate_exit, r);
return r;
}
int Image::diff_iterate2(const char *fromsnapname, uint64_t ofs, uint64_t len,
bool include_parent, bool whole_object,
int (*cb)(uint64_t, size_t, int, void *), void *arg)
{
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, diff_iterate_enter, ictx, ictx->name.c_str(),
ictx->snap_name.c_str(), ictx->read_only, fromsnapname, ofs, len,
include_parent, whole_object);
int r = librbd::api::DiffIterate<>::diff_iterate(ictx,
cls::rbd::UserSnapshotNamespace(),
fromsnapname, ofs,
len, include_parent,
whole_object, cb, arg);
tracepoint(librbd, diff_iterate_exit, r);
return r;
}
ssize_t Image::write(uint64_t ofs, size_t len, bufferlist& bl)
{
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, write_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only, ofs, len, bl.length() < len ? NULL : bl.c_str());
if (bl.length() < len) {
tracepoint(librbd, write_exit, -EINVAL);
return -EINVAL;
}
int r = api::Io<>::write(*ictx, ofs, len, bufferlist{bl}, 0);
tracepoint(librbd, write_exit, r);
return r;
}
ssize_t Image::write2(uint64_t ofs, size_t len, bufferlist& bl, int op_flags)
{
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, write2_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only,
ofs, len, bl.length() < len ? NULL : bl.c_str(), op_flags);
if (bl.length() < len) {
tracepoint(librbd, write_exit, -EINVAL);
return -EINVAL;
}
int r = api::Io<>::write(*ictx, ofs, len, bufferlist{bl}, op_flags);
tracepoint(librbd, write_exit, r);
return r;
}
int Image::discard(uint64_t ofs, uint64_t len)
{
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, discard_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only, ofs, len);
if (len > static_cast<uint64_t>(std::numeric_limits<int32_t>::max())) {
tracepoint(librbd, discard_exit, -EINVAL);
return -EINVAL;
}
int r = api::Io<>::discard(
*ictx, ofs, len, ictx->discard_granularity_bytes);
tracepoint(librbd, discard_exit, r);
return r;
}
ssize_t Image::writesame(uint64_t ofs, size_t len, bufferlist& bl, int op_flags)
{
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, writesame_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(),
ictx->read_only, ofs, len, bl.length() == 0 ? NULL : bl.c_str(), bl.length(),
op_flags);
if (bl.length() == 0 || len % bl.length() ||
len > static_cast<size_t>(std::numeric_limits<int>::max())) {
tracepoint(librbd, writesame_exit, -EINVAL);
return -EINVAL;
}
bool discard_zero = ictx->config.get_val<bool>("rbd_discard_on_zeroed_write_same");
if (discard_zero && bl.is_zero()) {
int r = api::Io<>::write_zeroes(*ictx, ofs, len, 0U, op_flags);
tracepoint(librbd, writesame_exit, r);
return r;
}
int r = api::Io<>::write_same(*ictx, ofs, len, bufferlist{bl}, op_flags);
tracepoint(librbd, writesame_exit, r);
return r;
}
ssize_t Image::write_zeroes(uint64_t ofs, size_t len, int zero_flags,
int op_flags)
{
ImageCtx *ictx = (ImageCtx *)ctx;
return api::Io<>::write_zeroes(*ictx, ofs, len, zero_flags, op_flags);
}
ssize_t Image::compare_and_write(uint64_t ofs, size_t len,
ceph::bufferlist &cmp_bl, ceph::bufferlist& bl,
uint64_t *mismatch_off, int op_flags)
{
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, compare_and_write_enter, ictx, ictx->name.c_str(),
ictx->snap_name.c_str(),
ictx->read_only, ofs, len, cmp_bl.length() < len ? NULL : cmp_bl.c_str(),
bl.length() < len ? NULL : bl.c_str(), op_flags);
if (bl.length() < len || cmp_bl.length() < len) {
tracepoint(librbd, compare_and_write_exit, -EINVAL);
return -EINVAL;
}
int r = api::Io<>::compare_and_write(
*ictx, ofs, len, bufferlist{cmp_bl}, bufferlist{bl}, mismatch_off,
op_flags);
tracepoint(librbd, compare_and_write_exit, r);
return r;
}
int Image::aio_write(uint64_t off, size_t len, bufferlist& bl,
RBD::AioCompletion *c)
{
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, aio_write_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only, off, len, bl.length() < len ? NULL : bl.c_str(), c->pc);
if (bl.length() < len) {
tracepoint(librbd, aio_write_exit, -EINVAL);
return -EINVAL;
}
api::Io<>::aio_write(*ictx, get_aio_completion(c), off, len, bufferlist{bl},
0, true);
tracepoint(librbd, aio_write_exit, 0);
return 0;
}
int Image::aio_write2(uint64_t off, size_t len, bufferlist& bl,
RBD::AioCompletion *c, int op_flags)
{
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, aio_write2_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(),
ictx->read_only, off, len, bl.length() < len ? NULL : bl.c_str(), c->pc, op_flags);
if (bl.length() < len) {
tracepoint(librbd, aio_write_exit, -EINVAL);
return -EINVAL;
}
api::Io<>::aio_write(*ictx, get_aio_completion(c), off, len, bufferlist{bl},
op_flags, true);
tracepoint(librbd, aio_write_exit, 0);
return 0;
}
int Image::aio_read(uint64_t off, size_t len, bufferlist& bl,
RBD::AioCompletion *c)
{
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, aio_read_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only, off, len, bl.c_str(), c->pc);
ldout(ictx->cct, 10) << "Image::aio_read() buf=" << (void *)bl.c_str() << "~"
<< (void *)(bl.c_str() + len - 1) << dendl;
api::Io<>::aio_read(*ictx, get_aio_completion(c), off, len,
io::ReadResult{&bl}, 0, true);
tracepoint(librbd, aio_read_exit, 0);
return 0;
}
int Image::aio_read2(uint64_t off, size_t len, bufferlist& bl,
RBD::AioCompletion *c, int op_flags)
{
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, aio_read2_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(),
ictx->read_only, off, len, bl.c_str(), c->pc, op_flags);
ldout(ictx->cct, 10) << "Image::aio_read() buf=" << (void *)bl.c_str() << "~"
<< (void *)(bl.c_str() + len - 1) << dendl;
api::Io<>::aio_read(*ictx, get_aio_completion(c), off, len,
io::ReadResult{&bl}, op_flags, true);
tracepoint(librbd, aio_read_exit, 0);
return 0;
}
int Image::flush()
{
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, flush_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only);
int r = api::Io<>::flush(*ictx);
tracepoint(librbd, flush_exit, r);
return r;
}
int Image::aio_flush(RBD::AioCompletion *c)
{
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, aio_flush_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only, c->pc);
api::Io<>::aio_flush(*ictx, get_aio_completion(c), true);
tracepoint(librbd, aio_flush_exit, 0);
return 0;
}
int Image::aio_discard(uint64_t off, uint64_t len, RBD::AioCompletion *c)
{
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, aio_discard_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only, off, len, c->pc);
api::Io<>::aio_discard(
*ictx, get_aio_completion(c), off, len, ictx->discard_granularity_bytes,
true);
tracepoint(librbd, aio_discard_exit, 0);
return 0;
}
int Image::aio_writesame(uint64_t off, size_t len, bufferlist& bl,
RBD::AioCompletion *c, int op_flags)
{
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, aio_writesame_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(),
ictx->read_only, off, len, bl.length() <= len ? NULL : bl.c_str(), bl.length(),
c->pc, op_flags);
if (bl.length() == 0 || len % bl.length()) {
tracepoint(librbd, aio_writesame_exit, -EINVAL);
return -EINVAL;
}
bool discard_zero = ictx->config.get_val<bool>("rbd_discard_on_zeroed_write_same");
if (discard_zero && bl.is_zero()) {
api::Io<>::aio_write_zeroes(*ictx, get_aio_completion(c), off, len, 0U,
op_flags, true);
tracepoint(librbd, aio_writesame_exit, 0);
return 0;
}
api::Io<>::aio_write_same(*ictx, get_aio_completion(c), off, len,
bufferlist{bl}, op_flags, true);
tracepoint(librbd, aio_writesame_exit, 0);
return 0;
}
int Image::aio_write_zeroes(uint64_t off, size_t len, RBD::AioCompletion *c,
int zero_flags, int op_flags)
{
ImageCtx *ictx = (ImageCtx *)ctx;
api::Io<>::aio_write_zeroes(*ictx, get_aio_completion(c), off, len,
zero_flags, op_flags, true);
return 0;
}
int Image::aio_compare_and_write(uint64_t off, size_t len,
ceph::bufferlist& cmp_bl, ceph::bufferlist& bl,
RBD::AioCompletion *c, uint64_t *mismatch_off,
int op_flags)
{
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, aio_compare_and_write_enter, ictx, ictx->name.c_str(),
ictx->snap_name.c_str(),
ictx->read_only, off, len, cmp_bl.length() < len ? NULL : cmp_bl.c_str(),
bl.length() < len ? NULL : bl.c_str(), c->pc, op_flags);
if (bl.length() < len || cmp_bl.length() < len) {
tracepoint(librbd, aio_compare_and_write_exit, -EINVAL);
return -EINVAL;
}
api::Io<>::aio_compare_and_write(*ictx, get_aio_completion(c), off, len,
bufferlist{cmp_bl}, bufferlist{bl},
mismatch_off, op_flags, false);
tracepoint(librbd, aio_compare_and_write_exit, 0);
return 0;
}
int Image::invalidate_cache()
{
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, invalidate_cache_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only);
int r = librbd::invalidate_cache(ictx);
tracepoint(librbd, invalidate_cache_exit, r);
return r;
}
int Image::poll_io_events(RBD::AioCompletion **comps, int numcomp)
{
io::AioCompletion *cs[numcomp];
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, poll_io_events_enter, ictx, numcomp);
int r = librbd::poll_io_events(ictx, cs, numcomp);
tracepoint(librbd, poll_io_events_exit, r);
if (r > 0) {
for (int i = 0; i < r; ++i)
comps[i] = (RBD::AioCompletion *)cs[i]->rbd_comp;
}
return r;
}
int Image::metadata_get(const std::string &key, std::string *value)
{
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, metadata_get_enter, ictx, key.c_str());
int r = librbd::metadata_get(ictx, key, value);
if (r < 0) {
tracepoint(librbd, metadata_get_exit, r, key.c_str(), NULL);
} else {
tracepoint(librbd, metadata_get_exit, r, key.c_str(), value->c_str());
}
return r;
}
int Image::metadata_set(const std::string &key, const std::string &value)
{
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, metadata_set_enter, ictx, key.c_str(), value.c_str());
int r = ictx->operations->metadata_set(key, value);
tracepoint(librbd, metadata_set_exit, r);
return r;
}
int Image::metadata_remove(const std::string &key)
{
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, metadata_remove_enter, ictx, key.c_str());
int r = ictx->operations->metadata_remove(key);
tracepoint(librbd, metadata_remove_exit, r);
return r;
}
int Image::metadata_list(const std::string &start, uint64_t max, map<string, bufferlist> *pairs)
{
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, metadata_list_enter, ictx);
int r = librbd::metadata_list(ictx, start, max, pairs);
if (r >= 0) {
for (map<string, bufferlist>::iterator it = pairs->begin();
it != pairs->end(); ++it) {
tracepoint(librbd, metadata_list_entry, it->first.c_str(), it->second.c_str());
}
}
tracepoint(librbd, metadata_list_exit, r);
return r;
}
int Image::mirror_image_enable() {
return mirror_image_enable2(RBD_MIRROR_IMAGE_MODE_JOURNAL);
}
int Image::mirror_image_enable2(mirror_image_mode_t mode) {
ImageCtx *ictx = (ImageCtx *)ctx;
return librbd::api::Mirror<>::image_enable(ictx, mode, false);
}
int Image::mirror_image_disable(bool force) {
ImageCtx *ictx = (ImageCtx *)ctx;
return librbd::api::Mirror<>::image_disable(ictx, force);
}
int Image::mirror_image_promote(bool force) {
ImageCtx *ictx = (ImageCtx *)ctx;
return librbd::api::Mirror<>::image_promote(ictx, force);
}
int Image::mirror_image_demote() {
ImageCtx *ictx = (ImageCtx *)ctx;
return librbd::api::Mirror<>::image_demote(ictx);
}
int Image::mirror_image_resync()
{
ImageCtx *ictx = (ImageCtx *)ctx;
return librbd::api::Mirror<>::image_resync(ictx);
}
int Image::mirror_image_create_snapshot(uint64_t *snap_id)
{
ImageCtx *ictx = (ImageCtx *)ctx;
auto flags = librbd::util::get_default_snap_create_flags(ictx);
return librbd::api::Mirror<>::image_snapshot_create(ictx, flags, snap_id);
}
int Image::mirror_image_create_snapshot2(uint32_t flags, uint64_t *snap_id)
{
ImageCtx *ictx = (ImageCtx *)ctx;
return librbd::api::Mirror<>::image_snapshot_create(ictx, flags, snap_id);
}
int Image::mirror_image_get_info(mirror_image_info_t *mirror_image_info,
size_t info_size) {
ImageCtx *ictx = (ImageCtx *)ctx;
if (sizeof(mirror_image_info_t) != info_size) {
return -ERANGE;
}
return librbd::api::Mirror<>::image_get_info(ictx, mirror_image_info);
}
int Image::mirror_image_get_mode(mirror_image_mode_t *mode) {
ImageCtx *ictx = (ImageCtx *)ctx;
return librbd::api::Mirror<>::image_get_mode(ictx, mode);
}
int Image::mirror_image_get_global_status(
mirror_image_global_status_t *mirror_image_global_status,
size_t status_size) {
ImageCtx *ictx = (ImageCtx *)ctx;
if (sizeof(mirror_image_global_status_t) != status_size) {
return -ERANGE;
}
return librbd::api::Mirror<>::image_get_global_status(
ictx, mirror_image_global_status);
}
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
int Image::mirror_image_get_status(mirror_image_status_t *mirror_image_status,
size_t status_size) {
ImageCtx *ictx = (ImageCtx *)ctx;
if (sizeof(mirror_image_status_t) != status_size) {
return -ERANGE;
}
mirror_image_global_status_t mirror_image_global_status;
int r = librbd::api::Mirror<>::image_get_global_status(
ictx, &mirror_image_global_status);
if (r < 0) {
return r;
}
librbd::mirror_image_site_status_t local_status;
r = get_local_mirror_image_site_status(mirror_image_global_status,
&local_status);
if (r < 0) {
return r;
}
*mirror_image_status = mirror_image_status_t{
mirror_image_global_status.name, mirror_image_global_status.info,
local_status.state, local_status.description, local_status.last_update,
local_status.up};
return 0;
}
#pragma GCC diagnostic pop
int Image::mirror_image_get_instance_id(std::string *instance_id) {
ImageCtx *ictx = (ImageCtx *)ctx;
return librbd::api::Mirror<>::image_get_instance_id(ictx, instance_id);
}
int Image::aio_mirror_image_promote(bool force, RBD::AioCompletion *c) {
ImageCtx *ictx = (ImageCtx *)ctx;
librbd::api::Mirror<>::image_promote(
ictx, force, new C_AioCompletion(ictx, librbd::io::AIO_TYPE_GENERIC,
get_aio_completion(c)));
return 0;
}
int Image::aio_mirror_image_demote(RBD::AioCompletion *c) {
ImageCtx *ictx = (ImageCtx *)ctx;
librbd::api::Mirror<>::image_demote(
ictx, new C_AioCompletion(ictx, librbd::io::AIO_TYPE_GENERIC,
get_aio_completion(c)));
return 0;
}
int Image::aio_mirror_image_get_info(mirror_image_info_t *mirror_image_info,
size_t info_size,
RBD::AioCompletion *c) {
ImageCtx *ictx = (ImageCtx *)ctx;
if (sizeof(mirror_image_info_t) != info_size) {
return -ERANGE;
}
librbd::api::Mirror<>::image_get_info(
ictx, mirror_image_info,
new C_AioCompletion(ictx, librbd::io::AIO_TYPE_GENERIC,
get_aio_completion(c)));
return 0;
}
int Image::aio_mirror_image_get_mode(mirror_image_mode_t *mode,
RBD::AioCompletion *c) {
ImageCtx *ictx = (ImageCtx *)ctx;
librbd::api::Mirror<>::image_get_mode(
ictx, mode, new C_AioCompletion(ictx, librbd::io::AIO_TYPE_GENERIC,
get_aio_completion(c)));
return 0;
}
int Image::aio_mirror_image_get_global_status(
mirror_image_global_status_t *status, size_t status_size,
RBD::AioCompletion *c) {
ImageCtx *ictx = (ImageCtx *)ctx;
if (sizeof(mirror_image_global_status_t) != status_size) {
return -ERANGE;
}
librbd::api::Mirror<>::image_get_global_status(
ictx, status, new C_AioCompletion(ictx, librbd::io::AIO_TYPE_GENERIC,
get_aio_completion(c)));
return 0;
}
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
int Image::aio_mirror_image_get_status(mirror_image_status_t *status,
size_t status_size,
RBD::AioCompletion *c) {
ImageCtx *ictx = (ImageCtx *)ctx;
if (sizeof(mirror_image_status_t) != status_size) {
return -ERANGE;
}
auto ctx = new C_MirrorImageGetStatus(
status, new C_AioCompletion(ictx, librbd::io::AIO_TYPE_GENERIC,
get_aio_completion(c)));
librbd::api::Mirror<>::image_get_global_status(
ictx, &ctx->cpp_mirror_image_global_status, ctx);
return 0;
}
#pragma GCC diagnostic pop
int Image::aio_mirror_image_create_snapshot(uint32_t flags, uint64_t *snap_id,
RBD::AioCompletion *c) {
ImageCtx *ictx = (ImageCtx *)ctx;
librbd::api::Mirror<>::image_snapshot_create(
ictx, flags, snap_id, new C_AioCompletion(ictx,
librbd::io::AIO_TYPE_GENERIC,
get_aio_completion(c)));
return 0;
}
int Image::update_watch(UpdateWatchCtx *wctx, uint64_t *handle) {
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, update_watch_enter, ictx, wctx);
int r = ictx->state->register_update_watcher(wctx, handle);
tracepoint(librbd, update_watch_exit, r, *handle);
return r;
}
int Image::update_unwatch(uint64_t handle) {
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, update_unwatch_enter, ictx, handle);
int r = ictx->state->unregister_update_watcher(handle);
tracepoint(librbd, update_unwatch_exit, r);
return r;
}
int Image::list_watchers(std::list<librbd::image_watcher_t> &watchers) {
ImageCtx *ictx = (ImageCtx *)ctx;
tracepoint(librbd, list_watchers_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only);
int r = librbd::list_watchers(ictx, watchers);
#ifdef WITH_LTTNG
if (r >= 0) {
for (auto &watcher : watchers) {
tracepoint(librbd, list_watchers_entry, watcher.addr.c_str(), watcher.id, watcher.cookie);
}
}
#endif
tracepoint(librbd, list_watchers_exit, r, watchers.size());
return r;
}
int Image::config_list(std::vector<config_option_t> *options) {
ImageCtx *ictx = (ImageCtx *)ctx;
return librbd::api::Config<>::list(ictx, options);
}
int Image::quiesce_watch(QuiesceWatchCtx *wctx, uint64_t *handle) {
ImageCtx *ictx = (ImageCtx *)ctx;
int r = ictx->state->register_quiesce_watcher(wctx, handle);
return r;
}
int Image::quiesce_unwatch(uint64_t handle) {
ImageCtx *ictx = (ImageCtx *)ctx;
int r = ictx->state->unregister_quiesce_watcher(handle);
return r;
}
void Image::quiesce_complete(uint64_t handle, int r) {
ImageCtx *ictx = (ImageCtx *)ctx;
ictx->state->quiesce_complete(handle, r);
}
} // namespace librbd
extern "C" void rbd_version(int *major, int *minor, int *extra)
{
if (major)
*major = LIBRBD_VER_MAJOR;
if (minor)
*minor = LIBRBD_VER_MINOR;
if (extra)
*extra = LIBRBD_VER_EXTRA;
}
extern "C" void rbd_image_options_create(rbd_image_options_t* opts)
{
librbd::image_options_create(opts);
}
extern "C" void rbd_image_options_destroy(rbd_image_options_t opts)
{
librbd::image_options_destroy(opts);
}
extern "C" int rbd_image_options_set_string(rbd_image_options_t opts, int optname,
const char* optval)
{
return librbd::image_options_set(opts, optname, optval);
}
extern "C" int rbd_image_options_set_uint64(rbd_image_options_t opts, int optname,
uint64_t optval)
{
return librbd::image_options_set(opts, optname, optval);
}
extern "C" int rbd_image_options_get_string(rbd_image_options_t opts, int optname,
char* optval, size_t maxlen)
{
std::string optval_;
int r = librbd::image_options_get(opts, optname, &optval_);
if (r < 0) {
return r;
}
if (optval_.size() >= maxlen) {
return -E2BIG;
}
strncpy(optval, optval_.c_str(), maxlen);
return 0;
}
extern "C" int rbd_image_options_get_uint64(rbd_image_options_t opts, int optname,
uint64_t* optval)
{
return librbd::image_options_get(opts, optname, optval);
}
extern "C" int rbd_image_options_is_set(rbd_image_options_t opts, int optname,
bool* is_set)
{
return librbd::image_options_is_set(opts, optname, is_set);
}
extern "C" int rbd_image_options_unset(rbd_image_options_t opts, int optname)
{
return librbd::image_options_unset(opts, optname);
}
extern "C" void rbd_image_options_clear(rbd_image_options_t opts)
{
librbd::image_options_clear(opts);
}
extern "C" int rbd_image_options_is_empty(rbd_image_options_t opts)
{
return librbd::image_options_is_empty(opts);
}
/* pool mirroring */
extern "C" int rbd_mirror_site_name_get(rados_t cluster, char *name,
size_t *max_len) {
librados::Rados rados;
librados::Rados::from_rados_t(cluster, rados);
std::string site_name;
int r = librbd::api::Mirror<>::site_name_get(rados, &site_name);
if (r < 0) {
return r;
}
auto total_len = site_name.size() + 1;
if (*max_len < total_len) {
*max_len = total_len;
return -ERANGE;
}
*max_len = total_len;
strcpy(name, site_name.c_str());
return 0;
}
extern "C" int rbd_mirror_site_name_set(rados_t cluster, const char *name) {
librados::Rados rados;
librados::Rados::from_rados_t(cluster, rados);
return librbd::api::Mirror<>::site_name_set(rados, name);
}
extern "C" int rbd_mirror_mode_get(rados_ioctx_t p,
rbd_mirror_mode_t *mirror_mode) {
librados::IoCtx io_ctx;
librados::IoCtx::from_rados_ioctx_t(p, io_ctx);
return librbd::api::Mirror<>::mode_get(io_ctx, mirror_mode);
}
extern "C" int rbd_mirror_mode_set(rados_ioctx_t p,
rbd_mirror_mode_t mirror_mode) {
librados::IoCtx io_ctx;
librados::IoCtx::from_rados_ioctx_t(p, io_ctx);
return librbd::api::Mirror<>::mode_set(io_ctx, mirror_mode);
}
extern "C" int rbd_mirror_uuid_get(rados_ioctx_t p,
char *mirror_uuid, size_t *max_len) {
librados::IoCtx io_ctx;
librados::IoCtx::from_rados_ioctx_t(p, io_ctx);
std::string mirror_uuid_str;
int r = librbd::api::Mirror<>::uuid_get(io_ctx, &mirror_uuid_str);
if (r < 0) {
return r;
}
auto total_len = mirror_uuid_str.size() + 1;
if (*max_len < total_len) {
*max_len = total_len;
return -ERANGE;
}
*max_len = total_len;
strcpy(mirror_uuid, mirror_uuid_str.c_str());
return 0;
}
extern "C" int rbd_mirror_peer_bootstrap_create(rados_ioctx_t p, char *token,
size_t *max_len) {
librados::IoCtx io_ctx;
librados::IoCtx::from_rados_ioctx_t(p, io_ctx);
std::string token_str;
int r = librbd::api::Mirror<>::peer_bootstrap_create(io_ctx, &token_str);
if (r < 0) {
return r;
}
auto total_len = token_str.size() + 1;
if (*max_len < total_len) {
*max_len = total_len;
return -ERANGE;
}
*max_len = total_len;
strcpy(token, token_str.c_str());
return 0;
}
extern "C" int rbd_mirror_peer_bootstrap_import(
rados_ioctx_t p, rbd_mirror_peer_direction_t direction,
const char *token) {
librados::IoCtx io_ctx;
librados::IoCtx::from_rados_ioctx_t(p, io_ctx);
return librbd::api::Mirror<>::peer_bootstrap_import(io_ctx, direction, token);
}
extern "C" int rbd_mirror_peer_site_add(rados_ioctx_t p, char *uuid,
size_t uuid_max_length,
rbd_mirror_peer_direction_t direction,
const char *site_name,
const char *client_name) {
static const std::size_t UUID_LENGTH = 36;
librados::IoCtx io_ctx;
librados::IoCtx::from_rados_ioctx_t(p, io_ctx);
if (uuid_max_length < UUID_LENGTH + 1) {
return -E2BIG;
}
std::string uuid_str;
int r = librbd::api::Mirror<>::peer_site_add(io_ctx, &uuid_str, direction,
site_name, client_name);
if (r >= 0) {
strncpy(uuid, uuid_str.c_str(), uuid_max_length);
uuid[uuid_max_length - 1] = '\0';
}
return r;
}
extern "C" int rbd_mirror_peer_site_remove(rados_ioctx_t p, const char *uuid) {
librados::IoCtx io_ctx;
librados::IoCtx::from_rados_ioctx_t(p, io_ctx);
int r = librbd::api::Mirror<>::peer_site_remove(io_ctx, uuid);
return r;
}
extern "C" int rbd_mirror_peer_site_list(
rados_ioctx_t p, rbd_mirror_peer_site_t *peers, int *max_peers) {
librados::IoCtx io_ctx;
librados::IoCtx::from_rados_ioctx_t(p, io_ctx);
std::vector<librbd::mirror_peer_site_t> peer_vector;
int r = librbd::api::Mirror<>::peer_site_list(io_ctx, &peer_vector);
if (r < 0) {
return r;
}
if (*max_peers < static_cast<int>(peer_vector.size())) {
*max_peers = static_cast<int>(peer_vector.size());
return -ERANGE;
}
for (int i = 0; i < static_cast<int>(peer_vector.size()); ++i) {
peers[i].uuid = strdup(peer_vector[i].uuid.c_str());
peers[i].direction = peer_vector[i].direction;
peers[i].site_name = strdup(peer_vector[i].site_name.c_str());
peers[i].mirror_uuid = strdup(peer_vector[i].mirror_uuid.c_str());
peers[i].client_name = strdup(peer_vector[i].client_name.c_str());
}
*max_peers = static_cast<int>(peer_vector.size());
return 0;
}
extern "C" void rbd_mirror_peer_site_list_cleanup(rbd_mirror_peer_site_t *peers,
int max_peers) {
for (int i = 0; i < max_peers; ++i) {
free(peers[i].uuid);
free(peers[i].site_name);
free(peers[i].mirror_uuid);
free(peers[i].client_name);
}
}
extern "C" int rbd_mirror_peer_site_set_client_name(
rados_ioctx_t p, const char *uuid, const char *client_name) {
librados::IoCtx io_ctx;
librados::IoCtx::from_rados_ioctx_t(p, io_ctx);
return librbd::api::Mirror<>::peer_site_set_client(io_ctx, uuid, client_name);
}
extern "C" int rbd_mirror_peer_site_set_name(
rados_ioctx_t p, const char *uuid, const char *site_name) {
librados::IoCtx io_ctx;
librados::IoCtx::from_rados_ioctx_t(p, io_ctx);
return librbd::api::Mirror<>::peer_site_set_name(io_ctx, uuid, site_name);
}
extern "C" int rbd_mirror_peer_site_set_direction(
rados_ioctx_t p, const char *uuid, rbd_mirror_peer_direction_t direction) {
librados::IoCtx io_ctx;
librados::IoCtx::from_rados_ioctx_t(p, io_ctx);
return librbd::api::Mirror<>::peer_site_set_direction(io_ctx, uuid,
direction);
}
extern "C" int rbd_mirror_peer_site_get_attributes(
rados_ioctx_t p, const char *uuid, char *keys, size_t *max_key_len,
char *values, size_t *max_val_len, size_t *key_value_count) {
librados::IoCtx io_ctx;
librados::IoCtx::from_rados_ioctx_t(p, io_ctx);
std::map<std::string, std::string> attributes;
int r = librbd::api::Mirror<>::peer_site_get_attributes(
io_ctx, uuid, &attributes);
if (r < 0) {
return r;
}
size_t key_total_len = 0, val_total_len = 0;
for (auto& it : attributes) {
key_total_len += it.first.size() + 1;
val_total_len += it.second.length() + 1;
}
bool too_short = ((*max_key_len < key_total_len) ||
(*max_val_len < val_total_len));
*max_key_len = key_total_len;
*max_val_len = val_total_len;
*key_value_count = attributes.size();
if (too_short) {
return -ERANGE;
}
char *keys_p = keys;
char *values_p = values;
for (auto& it : attributes) {
strncpy(keys_p, it.first.c_str(), it.first.size() + 1);
keys_p += it.first.size() + 1;
strncpy(values_p, it.second.c_str(), it.second.length() + 1);
values_p += it.second.length() + 1;
}
return 0;
}
extern "C" int rbd_mirror_peer_site_set_attributes(
rados_ioctx_t p, const char *uuid, const char *keys, const char *values,
size_t count) {
librados::IoCtx io_ctx;
librados::IoCtx::from_rados_ioctx_t(p, io_ctx);
std::map<std::string, std::string> attributes;
for (size_t i = 0; i < count; ++i) {
const char* key = keys;
keys += strlen(key) + 1;
const char* value = values;
values += strlen(value) + 1;
attributes[key] = value;
}
return librbd::api::Mirror<>::peer_site_set_attributes(
io_ctx, uuid, attributes);
}
extern "C" int rbd_mirror_image_global_status_list(rados_ioctx_t p,
const char *start_id, size_t max, char **image_ids,
rbd_mirror_image_global_status_t *images, size_t *len) {
librados::IoCtx io_ctx;
librados::IoCtx::from_rados_ioctx_t(p, io_ctx);
std::map<std::string, librbd::mirror_image_global_status_t> cpp_images;
int r = librbd::api::Mirror<>::image_global_status_list(
io_ctx, start_id, max, &cpp_images);
if (r < 0) {
return r;
}
size_t i = 0;
for (auto &it : cpp_images) {
ceph_assert(i < max);
const std::string &image_id = it.first;
image_ids[i] = strdup(image_id.c_str());
mirror_image_global_status_cpp_to_c(it.second, &images[i]);
i++;
}
*len = i;
return 0;
}
extern "C" void rbd_mirror_image_global_status_cleanup(
rbd_mirror_image_global_status_t *global_status) {
free(global_status->name);
rbd_mirror_image_get_info_cleanup(&global_status->info);
for (auto idx = 0U; idx < global_status->site_statuses_count; ++idx) {
free(global_status->site_statuses[idx].mirror_uuid);
free(global_status->site_statuses[idx].description);
}
free(global_status->site_statuses);
}
extern "C" void rbd_mirror_image_global_status_list_cleanup(
char **image_ids, rbd_mirror_image_global_status_t *images, size_t len) {
for (size_t i = 0; i < len; i++) {
free(image_ids[i]);
rbd_mirror_image_global_status_cleanup(&images[i]);
}
}
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
extern "C" int rbd_mirror_peer_add(rados_ioctx_t p, char *uuid,
size_t uuid_max_length,
const char *cluster_name,
const char *client_name) {
return rbd_mirror_peer_site_add(
p, uuid, uuid_max_length, RBD_MIRROR_PEER_DIRECTION_RX_TX, cluster_name,
client_name);
}
extern "C" int rbd_mirror_peer_remove(rados_ioctx_t p, const char *uuid) {
return rbd_mirror_peer_site_remove(p, uuid);
}
extern "C" int rbd_mirror_peer_list(rados_ioctx_t p,
rbd_mirror_peer_t *peers, int *max_peers) {
librados::IoCtx io_ctx;
librados::IoCtx::from_rados_ioctx_t(p, io_ctx);
std::vector<librbd::mirror_peer_site_t> peer_vector;
int r = librbd::api::Mirror<>::peer_site_list(io_ctx, &peer_vector);
if (r < 0) {
return r;
}
if (*max_peers < static_cast<int>(peer_vector.size())) {
*max_peers = static_cast<int>(peer_vector.size());
return -ERANGE;
}
for (int i = 0; i < static_cast<int>(peer_vector.size()); ++i) {
peers[i].uuid = strdup(peer_vector[i].uuid.c_str());
peers[i].cluster_name = strdup(peer_vector[i].site_name.c_str());
peers[i].client_name = strdup(peer_vector[i].client_name.c_str());
}
*max_peers = static_cast<int>(peer_vector.size());
return 0;
}
extern "C" void rbd_mirror_peer_list_cleanup(rbd_mirror_peer_t *peers,
int max_peers) {
for (int i = 0; i < max_peers; ++i) {
free(peers[i].uuid);
free(peers[i].cluster_name);
free(peers[i].client_name);
}
}
extern "C" int rbd_mirror_peer_set_client(rados_ioctx_t p, const char *uuid,
const char *client_name) {
return rbd_mirror_peer_site_set_client_name(p, uuid, client_name);
}
extern "C" int rbd_mirror_peer_set_cluster(rados_ioctx_t p, const char *uuid,
const char *cluster_name) {
return rbd_mirror_peer_site_set_name(p, uuid, cluster_name);
}
extern "C" int rbd_mirror_peer_get_attributes(
rados_ioctx_t p, const char *uuid, char *keys, size_t *max_key_len,
char *values, size_t *max_val_len, size_t *key_value_count) {
return rbd_mirror_peer_site_get_attributes(
p, uuid, keys, max_key_len, values, max_val_len, key_value_count);
}
extern "C" int rbd_mirror_peer_set_attributes(
rados_ioctx_t p, const char *uuid, const char *keys, const char *values,
size_t count) {
return rbd_mirror_peer_site_set_attributes(
p, uuid, keys, values, count);
}
extern "C" int rbd_mirror_image_status_list(rados_ioctx_t p,
const char *start_id, size_t max, char **image_ids,
rbd_mirror_image_status_t *images, size_t *len) {
librados::IoCtx io_ctx;
librados::IoCtx::from_rados_ioctx_t(p, io_ctx);
std::map<std::string, librbd::mirror_image_global_status_t> cpp_images;
int r = librbd::api::Mirror<>::image_global_status_list(
io_ctx, start_id, max, &cpp_images);
if (r < 0) {
return r;
}
size_t i = 0;
for (auto &it : cpp_images) {
ceph_assert(i < max);
const std::string &image_id = it.first;
image_ids[i] = strdup(image_id.c_str());
mirror_image_global_status_cpp_to_c(it.second, &images[i]);
i++;
}
*len = i;
return 0;
}
extern "C" void rbd_mirror_image_status_list_cleanup(char **image_ids,
rbd_mirror_image_status_t *images, size_t len) {
for (size_t i = 0; i < len; i++) {
free(image_ids[i]);
free(images[i].name);
rbd_mirror_image_get_info_cleanup(&images[i].info);
free(images[i].description);
}
}
#pragma GCC diagnostic pop
extern "C" int rbd_mirror_image_status_summary(rados_ioctx_t p,
rbd_mirror_image_status_state_t *states, int *counts, size_t *maxlen) {
librados::IoCtx io_ctx;
librados::IoCtx::from_rados_ioctx_t(p, io_ctx);
std::map<librbd::mirror_image_status_state_t, int> states_;
int r = librbd::api::Mirror<>::image_status_summary(io_ctx, &states_);
if (r < 0) {
return r;
}
size_t i = 0;
for (auto &it : states_) {
if (i == *maxlen) {
return -ERANGE;
}
states[i] = it.first;
counts[i] = it.second;
i++;
}
*maxlen = i;
return 0;
}
extern "C" int rbd_mirror_image_instance_id_list(
rados_ioctx_t p, const char *start_id, size_t max, char **image_ids,
char **instance_ids, size_t *len) {
librados::IoCtx io_ctx;
librados::IoCtx::from_rados_ioctx_t(p, io_ctx);
std::map<std::string, std::string> cpp_instance_ids;
int r = librbd::api::Mirror<>::image_instance_id_list(io_ctx, start_id, max,
&cpp_instance_ids);
if (r < 0) {
return r;
}
size_t i = 0;
for (auto &it : cpp_instance_ids) {
ceph_assert(i < max);
image_ids[i] = strdup(it.first.c_str());
instance_ids[i] = strdup(it.second.c_str());
i++;
}
*len = i;
return 0;
}
extern "C" void rbd_mirror_image_instance_id_list_cleanup(
char **image_ids, char **instance_ids, size_t len) {
for (size_t i = 0; i < len; i++) {
free(image_ids[i]);
free(instance_ids[i]);
}
}
extern "C" int rbd_mirror_image_info_list(
rados_ioctx_t p, rbd_mirror_image_mode_t *mode_filter,
const char *start_id, size_t max, char **image_ids,
rbd_mirror_image_mode_t *mode_entries,
rbd_mirror_image_info_t *info_entries, size_t *num_entries) {
librados::IoCtx io_ctx;
librados::IoCtx::from_rados_ioctx_t(p, io_ctx);
std::map<std::string, std::pair<librbd::mirror_image_mode_t,
librbd::mirror_image_info_t>> cpp_entries;
int r = librbd::api::Mirror<>::image_info_list(io_ctx, mode_filter, start_id,
max, &cpp_entries);
if (r < 0) {
return r;
}
ceph_assert(cpp_entries.size() <= max);
for (auto &it : cpp_entries) {
*(image_ids++) = strdup(it.first.c_str());
*(mode_entries++) = it.second.first;
mirror_image_info_cpp_to_c(it.second.second, info_entries++);
}
*num_entries = cpp_entries.size();
return 0;
}
extern "C" void rbd_mirror_image_info_list_cleanup(
char **image_ids, rbd_mirror_image_info_t *info_entries,
size_t num_entries) {
for (size_t i = 0; i < num_entries; i++) {
free(*(image_ids++));
rbd_mirror_image_get_info_cleanup(info_entries++);
}
}
/* helpers */
extern "C" void rbd_image_spec_cleanup(rbd_image_spec_t *image)
{
free(image->id);
free(image->name);
}
extern "C" void rbd_image_spec_list_cleanup(rbd_image_spec_t *images,
size_t num_images)
{
for (size_t idx = 0; idx < num_images; ++idx) {
rbd_image_spec_cleanup(&images[idx]);
}
}
extern "C" void rbd_linked_image_spec_cleanup(rbd_linked_image_spec_t *image)
{
free(image->pool_name);
free(image->pool_namespace);
free(image->image_id);
free(image->image_name);
}
extern "C" void rbd_linked_image_spec_list_cleanup(
rbd_linked_image_spec_t *images, size_t num_images)
{
for (size_t idx = 0; idx < num_images; ++idx) {
rbd_linked_image_spec_cleanup(&images[idx]);
}
}
extern "C" void rbd_snap_spec_cleanup(rbd_snap_spec_t *snap)
{
free(snap->name);
}
/* images */
extern "C" int rbd_list(rados_ioctx_t p, char *names, size_t *size)
{
librados::IoCtx io_ctx;
librados::IoCtx::from_rados_ioctx_t(p, io_ctx);
TracepointProvider::initialize<tracepoint_traits>(get_cct(io_ctx));
tracepoint(librbd, list_enter, io_ctx.get_pool_name().c_str(),
io_ctx.get_id());
std::vector<librbd::image_spec_t> cpp_image_specs;
int r = librbd::api::Image<>::list_images(io_ctx, &cpp_image_specs);
if (r < 0) {
tracepoint(librbd, list_exit, r, *size);
return r;
}
size_t expected_size = 0;
for (auto& it : cpp_image_specs) {
expected_size += it.name.size() + 1;
}
if (*size < expected_size) {
*size = expected_size;
tracepoint(librbd, list_exit, -ERANGE, *size);
return -ERANGE;
}
if (names == NULL) {
tracepoint(librbd, list_exit, -EINVAL, *size);
return -EINVAL;
}
for (auto& it : cpp_image_specs) {
const char* name = it.name.c_str();
tracepoint(librbd, list_entry, name);
strcpy(names, name);
names += strlen(names) + 1;
}
tracepoint(librbd, list_exit, (int)expected_size, *size);
return (int)expected_size;
}
extern "C" int rbd_list2(rados_ioctx_t p, rbd_image_spec_t *images,
size_t *size)
{
librados::IoCtx io_ctx;
librados::IoCtx::from_rados_ioctx_t(p, io_ctx);
TracepointProvider::initialize<tracepoint_traits>(get_cct(io_ctx));
tracepoint(librbd, list_enter, io_ctx.get_pool_name().c_str(),
io_ctx.get_id());
// FIPS zeroization audit 20191117: this memset is not security related.
memset(images, 0, sizeof(*images) * *size);
std::vector<librbd::image_spec_t> cpp_image_specs;
int r = librbd::api::Image<>::list_images(io_ctx, &cpp_image_specs);
if (r < 0) {
tracepoint(librbd, list_exit, r, *size);
return r;
}
size_t expected_size = cpp_image_specs.size();
if (*size < expected_size) {
*size = expected_size;
tracepoint(librbd, list_exit, -ERANGE, *size);
return -ERANGE;
}
*size = expected_size;
for (size_t idx = 0; idx < expected_size; ++idx) {
images[idx].id = strdup(cpp_image_specs[idx].id.c_str());
images[idx].name = strdup(cpp_image_specs[idx].name.c_str());
}
tracepoint(librbd, list_exit, 0, *size);
return 0;
}
extern "C" int rbd_create(rados_ioctx_t p, const char *name, uint64_t size, int *order)
{
librados::IoCtx io_ctx;
librados::IoCtx::from_rados_ioctx_t(p, io_ctx);
TracepointProvider::initialize<tracepoint_traits>(get_cct(io_ctx));
tracepoint(librbd, create_enter, io_ctx.get_pool_name().c_str(), io_ctx.get_id(), name, size, *order);
int r = librbd::create(io_ctx, name, size, order);
tracepoint(librbd, create_exit, r, *order);
return r;
}
extern "C" int rbd_create2(rados_ioctx_t p, const char *name,
uint64_t size, uint64_t features,
int *order)
{
librados::IoCtx io_ctx;
librados::IoCtx::from_rados_ioctx_t(p, io_ctx);
TracepointProvider::initialize<tracepoint_traits>(get_cct(io_ctx));
tracepoint(librbd, create2_enter, io_ctx.get_pool_name().c_str(), io_ctx.get_id(), name, size, features, *order);
int r = librbd::create(io_ctx, name, size, false, features, order, 0, 0);
tracepoint(librbd, create2_exit, r, *order);
return r;
}
extern "C" int rbd_create3(rados_ioctx_t p, const char *name,
uint64_t size, uint64_t features,
int *order,
uint64_t stripe_unit, uint64_t stripe_count)
{
librados::IoCtx io_ctx;
librados::IoCtx::from_rados_ioctx_t(p, io_ctx);
TracepointProvider::initialize<tracepoint_traits>(get_cct(io_ctx));
tracepoint(librbd, create3_enter, io_ctx.get_pool_name().c_str(), io_ctx.get_id(), name, size, features, *order, stripe_unit, stripe_count);
int r = librbd::create(io_ctx, name, size, false, features, order,
stripe_unit, stripe_count);
tracepoint(librbd, create3_exit, r, *order);
return r;
}
extern "C" int rbd_create4(rados_ioctx_t p, const char *name,
uint64_t size, rbd_image_options_t opts)
{
librados::IoCtx io_ctx;
librados::IoCtx::from_rados_ioctx_t(p, io_ctx);
TracepointProvider::initialize<tracepoint_traits>(get_cct(io_ctx));
tracepoint(librbd, create4_enter, io_ctx.get_pool_name().c_str(), io_ctx.get_id(), name, size, opts);
librbd::ImageOptions opts_(opts);
int r = librbd::create(io_ctx, name, "", size, opts_, "", "", false);
tracepoint(librbd, create4_exit, r);
return r;
}
extern "C" int rbd_clone(rados_ioctx_t p_ioctx, const char *p_name,
const char *p_snap_name, rados_ioctx_t c_ioctx,
const char *c_name, uint64_t features, int *c_order)
{
librados::IoCtx p_ioc, c_ioc;
librados::IoCtx::from_rados_ioctx_t(p_ioctx, p_ioc);
librados::IoCtx::from_rados_ioctx_t(c_ioctx, c_ioc);
TracepointProvider::initialize<tracepoint_traits>(get_cct(p_ioc));
tracepoint(librbd, clone_enter, p_ioc.get_pool_name().c_str(), p_ioc.get_id(), p_name, p_snap_name, c_ioc.get_pool_name().c_str(), c_ioc.get_id(), c_name, features);
int r = librbd::clone(p_ioc, p_name, p_snap_name, c_ioc, c_name,
features, c_order, 0, 0);
tracepoint(librbd, clone_exit, r, *c_order);
return r;
}
extern "C" int rbd_clone2(rados_ioctx_t p_ioctx, const char *p_name,
const char *p_snap_name, rados_ioctx_t c_ioctx,
const char *c_name, uint64_t features, int *c_order,
uint64_t stripe_unit, int stripe_count)
{
librados::IoCtx p_ioc, c_ioc;
librados::IoCtx::from_rados_ioctx_t(p_ioctx, p_ioc);
librados::IoCtx::from_rados_ioctx_t(c_ioctx, c_ioc);
TracepointProvider::initialize<tracepoint_traits>(get_cct(p_ioc));
tracepoint(librbd, clone2_enter, p_ioc.get_pool_name().c_str(), p_ioc.get_id(), p_name, p_snap_name, c_ioc.get_pool_name().c_str(), c_ioc.get_id(), c_name, features, stripe_unit, stripe_count);
int r = librbd::clone(p_ioc, p_name, p_snap_name, c_ioc, c_name,
features, c_order, stripe_unit, stripe_count);
tracepoint(librbd, clone2_exit, r, *c_order);
return r;
}
extern "C" int rbd_clone3(rados_ioctx_t p_ioctx, const char *p_name,
const char *p_snap_name, rados_ioctx_t c_ioctx,
const char *c_name, rbd_image_options_t c_opts)
{
librados::IoCtx p_ioc, c_ioc;
librados::IoCtx::from_rados_ioctx_t(p_ioctx, p_ioc);
librados::IoCtx::from_rados_ioctx_t(c_ioctx, c_ioc);
TracepointProvider::initialize<tracepoint_traits>(get_cct(p_ioc));
tracepoint(librbd, clone3_enter, p_ioc.get_pool_name().c_str(), p_ioc.get_id(), p_name, p_snap_name, c_ioc.get_pool_name().c_str(), c_ioc.get_id(), c_name, c_opts);
librbd::ImageOptions c_opts_(c_opts);
int r = librbd::clone(p_ioc, nullptr, p_name, p_snap_name, c_ioc, nullptr,
c_name, c_opts_, "", "");
tracepoint(librbd, clone3_exit, r);
return r;
}
extern "C" int rbd_remove(rados_ioctx_t p, const char *name)
{
librados::IoCtx io_ctx;
librados::IoCtx::from_rados_ioctx_t(p, io_ctx);
TracepointProvider::initialize<tracepoint_traits>(get_cct(io_ctx));
tracepoint(librbd, remove_enter, io_ctx.get_pool_name().c_str(), io_ctx.get_id(), name);
librbd::NoOpProgressContext prog_ctx;
int r = librbd::api::Image<>::remove(io_ctx, name, prog_ctx);
tracepoint(librbd, remove_exit, r);
return r;
}
extern "C" int rbd_remove_with_progress(rados_ioctx_t p, const char *name,
librbd_progress_fn_t cb, void *cbdata)
{
librados::IoCtx io_ctx;
librados::IoCtx::from_rados_ioctx_t(p, io_ctx);
TracepointProvider::initialize<tracepoint_traits>(get_cct(io_ctx));
tracepoint(librbd, remove_enter, io_ctx.get_pool_name().c_str(), io_ctx.get_id(), name);
librbd::CProgressContext prog_ctx(cb, cbdata);
int r = librbd::api::Image<>::remove(io_ctx, name, prog_ctx);
tracepoint(librbd, remove_exit, r);
return r;
}
extern "C" int rbd_trash_move(rados_ioctx_t p, const char *name,
uint64_t delay) {
librados::IoCtx io_ctx;
librados::IoCtx::from_rados_ioctx_t(p, io_ctx);
TracepointProvider::initialize<tracepoint_traits>(get_cct(io_ctx));
tracepoint(librbd, trash_move_enter, io_ctx.get_pool_name().c_str(),
io_ctx.get_id(), name);
int r = librbd::api::Trash<>::move(io_ctx, RBD_TRASH_IMAGE_SOURCE_USER, name,
delay);
tracepoint(librbd, trash_move_exit, r);
return r;
}
extern "C" int rbd_trash_get(rados_ioctx_t io, const char *id,
rbd_trash_image_info_t *info) {
librados::IoCtx io_ctx;
librados::IoCtx::from_rados_ioctx_t(io, io_ctx);
librbd::trash_image_info_t cpp_info;
int r = librbd::api::Trash<>::get(io_ctx, id, &cpp_info);
if (r < 0) {
return r;
}
trash_image_info_cpp_to_c(cpp_info, info);
return 0;
}
extern "C" void rbd_trash_get_cleanup(rbd_trash_image_info_t *info) {
free(info->id);
free(info->name);
}
extern "C" int rbd_trash_list(rados_ioctx_t p, rbd_trash_image_info_t *entries,
size_t *num_entries) {
librados::IoCtx io_ctx;
librados::IoCtx::from_rados_ioctx_t(p, io_ctx);
TracepointProvider::initialize<tracepoint_traits>(get_cct(io_ctx));
tracepoint(librbd, trash_list_enter,
io_ctx.get_pool_name().c_str(), io_ctx.get_id());
// FIPS zeroization audit 20191117: this memset is not security related.
memset(entries, 0, sizeof(*entries) * *num_entries);
vector<librbd::trash_image_info_t> cpp_entries;
int r = librbd::api::Trash<>::list(io_ctx, cpp_entries, true);
if (r < 0) {
tracepoint(librbd, trash_list_exit, r, *num_entries);
return r;
}
if (*num_entries < cpp_entries.size()) {
*num_entries = cpp_entries.size();
tracepoint(librbd, trash_list_exit, -ERANGE, *num_entries);
return -ERANGE;
}
int i=0;
for (const auto &entry : cpp_entries) {
trash_image_info_cpp_to_c(entry, &entries[i++]);
}
*num_entries = cpp_entries.size();
return *num_entries;
}
extern "C" void rbd_trash_list_cleanup(rbd_trash_image_info_t *entries,
size_t num_entries) {
for (size_t i=0; i < num_entries; i++) {
rbd_trash_get_cleanup(&entries[i]);
}
}
extern "C" int rbd_trash_purge(rados_ioctx_t io, time_t expire_ts,
float threshold) {
librados::IoCtx io_ctx;
librados::IoCtx::from_rados_ioctx_t(io, io_ctx);
TracepointProvider::initialize<tracepoint_traits>(get_cct(io_ctx));
tracepoint(librbd, trash_purge_enter, io_ctx.get_pool_name().c_str(),
io_ctx.get_id(), expire_ts, threshold);
librbd::NoOpProgressContext nop_pctx;
int r = librbd::api::Trash<>::purge(io_ctx, expire_ts, threshold, nop_pctx);
tracepoint(librbd, trash_purge_exit, r);
return r;
}
extern "C" int rbd_trash_purge_with_progress(rados_ioctx_t io, time_t expire_ts,
float threshold, librbd_progress_fn_t cb, void* cbdata) {
librados::IoCtx io_ctx;
librados::IoCtx::from_rados_ioctx_t(io, io_ctx);
TracepointProvider::initialize<tracepoint_traits>(get_cct(io_ctx));
tracepoint(librbd, trash_purge_enter, io_ctx.get_pool_name().c_str(),
io_ctx.get_id(), expire_ts, threshold);
librbd::CProgressContext pctx(cb, cbdata);
int r = librbd::api::Trash<>::purge(io_ctx, expire_ts, threshold, pctx);
tracepoint(librbd, trash_purge_exit, r);
return r;
}
extern "C" int rbd_trash_remove(rados_ioctx_t p, const char *image_id,
bool force) {
librados::IoCtx io_ctx;
librados::IoCtx::from_rados_ioctx_t(p, io_ctx);
TracepointProvider::initialize<tracepoint_traits>(get_cct(io_ctx));
tracepoint(librbd, trash_remove_enter, io_ctx.get_pool_name().c_str(),
io_ctx.get_id(), image_id, force);
librbd::NoOpProgressContext prog_ctx;
int r = librbd::api::Trash<>::remove(io_ctx, image_id, force, prog_ctx);
tracepoint(librbd, trash_remove_exit, r);
return r;
}
extern "C" int rbd_trash_remove_with_progress(rados_ioctx_t p,
const char *image_id,
bool force,
librbd_progress_fn_t cb,
void *cbdata) {
librados::IoCtx io_ctx;
librados::IoCtx::from_rados_ioctx_t(p, io_ctx);
TracepointProvider::initialize<tracepoint_traits>(get_cct(io_ctx));
tracepoint(librbd, trash_remove_enter, io_ctx.get_pool_name().c_str(),
io_ctx.get_id(), image_id, force);
librbd::CProgressContext prog_ctx(cb, cbdata);
int r = librbd::api::Trash<>::remove(io_ctx, image_id, force, prog_ctx);
tracepoint(librbd, trash_remove_exit, r);
return r;
}
extern "C" int rbd_trash_restore(rados_ioctx_t p, const char *id,
const char *name) {
librados::IoCtx io_ctx;
librados::IoCtx::from_rados_ioctx_t(p, io_ctx);
TracepointProvider::initialize<tracepoint_traits>(get_cct(io_ctx));
tracepoint(librbd, trash_undelete_enter, io_ctx.get_pool_name().c_str(),
io_ctx.get_id(), id, name);
int r = librbd::api::Trash<>::restore(
io_ctx, librbd::api::Trash<>::ALLOWED_RESTORE_SOURCES, id, name);
tracepoint(librbd, trash_undelete_exit, r);
return r;
}
extern "C" int rbd_namespace_create(rados_ioctx_t io,
const char *namespace_name) {
librados::IoCtx io_ctx;
librados::IoCtx::from_rados_ioctx_t(io, io_ctx);
return librbd::api::Namespace<>::create(io_ctx, namespace_name);
}
extern "C" int rbd_namespace_remove(rados_ioctx_t io,
const char *namespace_name) {
librados::IoCtx io_ctx;
librados::IoCtx::from_rados_ioctx_t(io, io_ctx);
return librbd::api::Namespace<>::remove(io_ctx, namespace_name);
}
extern "C" int rbd_namespace_list(rados_ioctx_t io, char *names, size_t *size) {
librados::IoCtx io_ctx;
librados::IoCtx::from_rados_ioctx_t(io, io_ctx);
if (names == nullptr || size == nullptr) {
return -EINVAL;
}
std::vector<std::string> cpp_names;
int r = librbd::api::Namespace<>::list(io_ctx, &cpp_names);
if (r < 0) {
return r;
}
size_t expected_size = 0;
for (size_t i = 0; i < cpp_names.size(); i++) {
expected_size += cpp_names[i].size() + 1;
}
if (*size < expected_size) {
*size = expected_size;
return -ERANGE;
}
*size = expected_size;
for (int i = 0; i < (int)cpp_names.size(); i++) {
const char* name = cpp_names[i].c_str();
strcpy(names, name);
names += strlen(names) + 1;
}
return (int)expected_size;
}
extern "C" int rbd_namespace_exists(rados_ioctx_t io,
const char *namespace_name,
bool *exists) {
librados::IoCtx io_ctx;
librados::IoCtx::from_rados_ioctx_t(io, io_ctx);
return librbd::api::Namespace<>::exists(io_ctx, namespace_name, exists);
}
extern "C" int rbd_pool_init(rados_ioctx_t io, bool force) {
librados::IoCtx io_ctx;
librados::IoCtx::from_rados_ioctx_t(io, io_ctx);
return librbd::api::Pool<>::init(io_ctx, force);
}
extern "C" void rbd_pool_stats_create(rbd_pool_stats_t *stats) {
*stats = reinterpret_cast<rbd_pool_stats_t>(
new librbd::api::Pool<>::StatOptions{});
}
extern "C" void rbd_pool_stats_destroy(rbd_pool_stats_t stats) {
auto pool_stat_options =
reinterpret_cast<librbd::api::Pool<>::StatOptions*>(stats);
delete pool_stat_options;
}
extern "C" int rbd_pool_stats_option_add_uint64(rbd_pool_stats_t stats,
int stat_option,
uint64_t* stat_val) {
auto pool_stat_options =
reinterpret_cast<librbd::api::Pool<>::StatOptions*>(stats);
return librbd::api::Pool<>::add_stat_option(
pool_stat_options, static_cast<rbd_pool_stat_option_t>(stat_option),
stat_val);
}
extern "C" int rbd_pool_stats_get(
rados_ioctx_t io, rbd_pool_stats_t pool_stats) {
librados::IoCtx io_ctx;
librados::IoCtx::from_rados_ioctx_t(io, io_ctx);
auto pool_stat_options =
reinterpret_cast<librbd::api::Pool<>::StatOptions*>(pool_stats);
return librbd::api::Pool<>::get_stats(io_ctx, pool_stat_options);
}
extern "C" int rbd_copy(rbd_image_t image, rados_ioctx_t dest_p,
const char *destname)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
librados::IoCtx dest_io_ctx;
librados::IoCtx::from_rados_ioctx_t(dest_p, dest_io_ctx);
tracepoint(librbd, copy_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only, dest_io_ctx.get_pool_name().c_str(), dest_io_ctx.get_id(), destname);
librbd::ImageOptions opts;
librbd::NoOpProgressContext prog_ctx;
int r = librbd::copy(ictx, dest_io_ctx, destname, opts, prog_ctx, 0);
tracepoint(librbd, copy_exit, r);
return r;
}
extern "C" int rbd_copy2(rbd_image_t srcp, rbd_image_t destp)
{
librbd::ImageCtx *src = (librbd::ImageCtx *)srcp;
librbd::ImageCtx *dest = (librbd::ImageCtx *)destp;
tracepoint(librbd, copy2_enter, src, src->name.c_str(), src->snap_name.c_str(), src->read_only, dest, dest->name.c_str(), dest->snap_name.c_str(), dest->read_only);
librbd::NoOpProgressContext prog_ctx;
int r = librbd::copy(src, dest, prog_ctx, 0);
tracepoint(librbd, copy2_exit, r);
return r;
}
extern "C" int rbd_copy3(rbd_image_t image, rados_ioctx_t dest_p,
const char *destname, rbd_image_options_t c_opts)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
librados::IoCtx dest_io_ctx;
librados::IoCtx::from_rados_ioctx_t(dest_p, dest_io_ctx);
tracepoint(librbd, copy3_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only, dest_io_ctx.get_pool_name().c_str(), dest_io_ctx.get_id(), destname, c_opts);
librbd::ImageOptions c_opts_(c_opts);
librbd::NoOpProgressContext prog_ctx;
int r = librbd::copy(ictx, dest_io_ctx, destname, c_opts_, prog_ctx, 0);
tracepoint(librbd, copy3_exit, r);
return r;
}
extern "C" int rbd_copy4(rbd_image_t image, rados_ioctx_t dest_p,
const char *destname, rbd_image_options_t c_opts, size_t sparse_size)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
librados::IoCtx dest_io_ctx;
librados::IoCtx::from_rados_ioctx_t(dest_p, dest_io_ctx);
tracepoint(librbd, copy4_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only, dest_io_ctx.get_pool_name().c_str(), dest_io_ctx.get_id(), destname, c_opts, sparse_size);
librbd::ImageOptions c_opts_(c_opts);
librbd::NoOpProgressContext prog_ctx;
int r = librbd::copy(ictx, dest_io_ctx, destname, c_opts_, prog_ctx, sparse_size);
tracepoint(librbd, copy4_exit, r);
return r;
}
extern "C" int rbd_copy_with_progress(rbd_image_t image, rados_ioctx_t dest_p,
const char *destname,
librbd_progress_fn_t fn, void *data)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
librados::IoCtx dest_io_ctx;
librados::IoCtx::from_rados_ioctx_t(dest_p, dest_io_ctx);
tracepoint(librbd, copy_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only, dest_io_ctx.get_pool_name().c_str(), dest_io_ctx.get_id(), destname);
librbd::ImageOptions opts;
librbd::CProgressContext prog_ctx(fn, data);
int ret = librbd::copy(ictx, dest_io_ctx, destname, opts, prog_ctx, 0);
tracepoint(librbd, copy_exit, ret);
return ret;
}
extern "C" int rbd_copy_with_progress2(rbd_image_t srcp, rbd_image_t destp,
librbd_progress_fn_t fn, void *data)
{
librbd::ImageCtx *src = (librbd::ImageCtx *)srcp;
librbd::ImageCtx *dest = (librbd::ImageCtx *)destp;
tracepoint(librbd, copy2_enter, src, src->name.c_str(), src->snap_name.c_str(), src->read_only, dest, dest->name.c_str(), dest->snap_name.c_str(), dest->read_only);
librbd::CProgressContext prog_ctx(fn, data);
int ret = librbd::copy(src, dest, prog_ctx, 0);
tracepoint(librbd, copy2_exit, ret);
return ret;
}
extern "C" int rbd_copy_with_progress3(rbd_image_t image, rados_ioctx_t dest_p,
const char *destname,
rbd_image_options_t dest_opts,
librbd_progress_fn_t fn, void *data)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
librados::IoCtx dest_io_ctx;
librados::IoCtx::from_rados_ioctx_t(dest_p, dest_io_ctx);
tracepoint(librbd, copy3_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only, dest_io_ctx.get_pool_name().c_str(), dest_io_ctx.get_id(), destname, dest_opts);
librbd::ImageOptions dest_opts_(dest_opts);
librbd::CProgressContext prog_ctx(fn, data);
int ret = librbd::copy(ictx, dest_io_ctx, destname, dest_opts_, prog_ctx, 0);
tracepoint(librbd, copy3_exit, ret);
return ret;
}
extern "C" int rbd_copy_with_progress4(rbd_image_t image, rados_ioctx_t dest_p,
const char *destname,
rbd_image_options_t dest_opts,
librbd_progress_fn_t fn, void *data, size_t sparse_size)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
librados::IoCtx dest_io_ctx;
librados::IoCtx::from_rados_ioctx_t(dest_p, dest_io_ctx);
tracepoint(librbd, copy4_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only, dest_io_ctx.get_pool_name().c_str(), dest_io_ctx.get_id(), destname, dest_opts, sparse_size);
librbd::ImageOptions dest_opts_(dest_opts);
librbd::CProgressContext prog_ctx(fn, data);
int ret = librbd::copy(ictx, dest_io_ctx, destname, dest_opts_, prog_ctx, sparse_size);
tracepoint(librbd, copy4_exit, ret);
return ret;
}
extern "C" int rbd_deep_copy(rbd_image_t image, rados_ioctx_t dest_p,
const char *destname, rbd_image_options_t c_opts)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
librados::IoCtx dest_io_ctx;
librados::IoCtx::from_rados_ioctx_t(dest_p, dest_io_ctx);
tracepoint(librbd, deep_copy_enter, ictx, ictx->name.c_str(),
ictx->snap_name.c_str(), ictx->read_only,
dest_io_ctx.get_pool_name().c_str(), dest_io_ctx.get_id(),
destname, c_opts);
librbd::ImageOptions opts(c_opts);
librbd::NoOpProgressContext prog_ctx;
int r = librbd::api::Image<>::deep_copy(ictx, dest_io_ctx, destname, opts,
prog_ctx);
tracepoint(librbd, deep_copy_exit, r);
return r;
}
extern "C" int rbd_deep_copy_with_progress(rbd_image_t image,
rados_ioctx_t dest_p,
const char *destname,
rbd_image_options_t dest_opts,
librbd_progress_fn_t fn, void *data)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
librados::IoCtx dest_io_ctx;
librados::IoCtx::from_rados_ioctx_t(dest_p, dest_io_ctx);
tracepoint(librbd, deep_copy_enter, ictx, ictx->name.c_str(),
ictx->snap_name.c_str(), ictx->read_only,
dest_io_ctx.get_pool_name().c_str(), dest_io_ctx.get_id(),
destname, dest_opts);
librbd::ImageOptions opts(dest_opts);
librbd::CProgressContext prog_ctx(fn, data);
int ret = librbd::api::Image<>::deep_copy(ictx, dest_io_ctx, destname, opts,
prog_ctx);
tracepoint(librbd, deep_copy_exit, ret);
return ret;
}
extern "C" int rbd_encryption_format(rbd_image_t image,
rbd_encryption_format_t format,
rbd_encryption_options_t opts,
size_t opts_size)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
return librbd::api::Image<>::encryption_format(
ictx, format, opts, opts_size, true);
}
extern "C" int rbd_encryption_load(rbd_image_t image,
rbd_encryption_format_t format,
rbd_encryption_options_t opts,
size_t opts_size)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
librbd::encryption_spec_t spec = {format, opts, opts_size};
return librbd::api::Image<>::encryption_load(ictx, &spec, 1, true);
}
extern "C" int rbd_encryption_load2(rbd_image_t image,
const rbd_encryption_spec_t *specs,
size_t spec_count)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
return librbd::api::Image<>::encryption_load(ictx, specs, spec_count, true);
}
extern "C" int rbd_flatten(rbd_image_t image)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
tracepoint(librbd, flatten_enter, ictx, ictx->name.c_str(), ictx->id.c_str());
librbd::NoOpProgressContext prog_ctx;
int r = ictx->operations->flatten(prog_ctx);
tracepoint(librbd, flatten_exit, r);
return r;
}
extern "C" int rbd_flatten_with_progress(rbd_image_t image,
librbd_progress_fn_t cb, void *cbdata)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
tracepoint(librbd, flatten_enter, ictx, ictx->name.c_str(), ictx->id.c_str());
librbd::CProgressContext prog_ctx(cb, cbdata);
int r = ictx->operations->flatten(prog_ctx);
tracepoint(librbd, flatten_exit, r);
return r;
}
extern "C" int rbd_sparsify(rbd_image_t image, size_t sparse_size)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
tracepoint(librbd, sparsify_enter, ictx, ictx->name.c_str(), sparse_size,
ictx->id.c_str());
librbd::NoOpProgressContext prog_ctx;
int r = ictx->operations->sparsify(sparse_size, prog_ctx);
tracepoint(librbd, sparsify_exit, r);
return r;
}
extern "C" int rbd_sparsify_with_progress(rbd_image_t image, size_t sparse_size,
librbd_progress_fn_t cb, void *cbdata)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
tracepoint(librbd, sparsify_enter, ictx, ictx->name.c_str(), sparse_size,
ictx->id.c_str());
librbd::CProgressContext prog_ctx(cb, cbdata);
int r = ictx->operations->sparsify(sparse_size, prog_ctx);
tracepoint(librbd, sparsify_exit, r);
return r;
}
extern "C" int rbd_rename(rados_ioctx_t src_p, const char *srcname,
const char *destname)
{
librados::IoCtx src_io_ctx;
librados::IoCtx::from_rados_ioctx_t(src_p, src_io_ctx);
TracepointProvider::initialize<tracepoint_traits>(get_cct(src_io_ctx));
tracepoint(librbd, rename_enter, src_io_ctx.get_pool_name().c_str(), src_io_ctx.get_id(), srcname, destname);
int r = librbd::rename(src_io_ctx, srcname, destname);
tracepoint(librbd, rename_exit, r);
return r;
}
extern "C" int rbd_migration_prepare(rados_ioctx_t p, const char *image_name,
rados_ioctx_t dest_p,
const char *dest_image_name,
rbd_image_options_t opts_)
{
librados::IoCtx io_ctx;
librados::IoCtx::from_rados_ioctx_t(p, io_ctx);
librados::IoCtx dest_io_ctx;
librados::IoCtx::from_rados_ioctx_t(dest_p, dest_io_ctx);
tracepoint(librbd, migration_prepare_enter, io_ctx.get_pool_name().c_str(),
io_ctx.get_id(), image_name, dest_io_ctx.get_pool_name().c_str(),
dest_io_ctx.get_id(), dest_image_name, opts_);
librbd::ImageOptions opts(opts_);
int r = librbd::api::Migration<>::prepare(io_ctx, image_name, dest_io_ctx,
dest_image_name, opts);
tracepoint(librbd, migration_prepare_exit, r);
return r;
}
extern "C" int rbd_migration_prepare_import(
const char *source_spec, rados_ioctx_t dest_p,
const char *dest_image_name, rbd_image_options_t opts_) {
librados::IoCtx dest_io_ctx;
librados::IoCtx::from_rados_ioctx_t(dest_p, dest_io_ctx);
librbd::ImageOptions opts(opts_);
return librbd::api::Migration<>::prepare_import(source_spec, dest_io_ctx,
dest_image_name, opts);
}
extern "C" int rbd_migration_execute(rados_ioctx_t p, const char *image_name)
{
librados::IoCtx io_ctx;
librados::IoCtx::from_rados_ioctx_t(p, io_ctx);
TracepointProvider::initialize<tracepoint_traits>(get_cct(io_ctx));
tracepoint(librbd, migration_execute_enter, io_ctx.get_pool_name().c_str(),
io_ctx.get_id(), image_name);
librbd::NoOpProgressContext prog_ctx;
int r = librbd::api::Migration<>::execute(io_ctx, image_name, prog_ctx);
tracepoint(librbd, migration_execute_exit, r);
return r;
}
extern "C" int rbd_migration_execute_with_progress(rados_ioctx_t p,
const char *name,
librbd_progress_fn_t fn,
void *data)
{
librados::IoCtx io_ctx;
librados::IoCtx::from_rados_ioctx_t(p, io_ctx);
TracepointProvider::initialize<tracepoint_traits>(get_cct(io_ctx));
tracepoint(librbd, migration_execute_enter, io_ctx.get_pool_name().c_str(),
io_ctx.get_id(), name);
librbd::CProgressContext prog_ctx(fn, data);
int r = librbd::api::Migration<>::execute(io_ctx, name, prog_ctx);
tracepoint(librbd, migration_execute_exit, r);
return r;
}
extern "C" int rbd_migration_abort(rados_ioctx_t p, const char *image_name)
{
librados::IoCtx io_ctx;
librados::IoCtx::from_rados_ioctx_t(p, io_ctx);
TracepointProvider::initialize<tracepoint_traits>(get_cct(io_ctx));
tracepoint(librbd, migration_abort_enter, io_ctx.get_pool_name().c_str(),
io_ctx.get_id(), image_name);
librbd::NoOpProgressContext prog_ctx;
int r = librbd::api::Migration<>::abort(io_ctx, image_name, prog_ctx);
tracepoint(librbd, migration_abort_exit, r);
return r;
}
extern "C" int rbd_migration_abort_with_progress(rados_ioctx_t p,
const char *name,
librbd_progress_fn_t fn,
void *data)
{
librados::IoCtx io_ctx;
librados::IoCtx::from_rados_ioctx_t(p, io_ctx);
TracepointProvider::initialize<tracepoint_traits>(get_cct(io_ctx));
tracepoint(librbd, migration_abort_enter, io_ctx.get_pool_name().c_str(),
io_ctx.get_id(), name);
librbd::CProgressContext prog_ctx(fn, data);
int r = librbd::api::Migration<>::abort(io_ctx, name, prog_ctx);
tracepoint(librbd, migration_abort_exit, r);
return r;
}
extern "C" int rbd_migration_commit(rados_ioctx_t p, const char *image_name)
{
librados::IoCtx io_ctx;
librados::IoCtx::from_rados_ioctx_t(p, io_ctx);
TracepointProvider::initialize<tracepoint_traits>(get_cct(io_ctx));
tracepoint(librbd, migration_commit_enter, io_ctx.get_pool_name().c_str(),
io_ctx.get_id(), image_name);
librbd::NoOpProgressContext prog_ctx;
int r = librbd::api::Migration<>::commit(io_ctx, image_name, prog_ctx);
tracepoint(librbd, migration_commit_exit, r);
return r;
}
extern "C" int rbd_migration_commit_with_progress(rados_ioctx_t p,
const char *name,
librbd_progress_fn_t fn,
void *data)
{
librados::IoCtx io_ctx;
librados::IoCtx::from_rados_ioctx_t(p, io_ctx);
TracepointProvider::initialize<tracepoint_traits>(get_cct(io_ctx));
tracepoint(librbd, migration_commit_enter, io_ctx.get_pool_name().c_str(),
io_ctx.get_id(), name);
librbd::CProgressContext prog_ctx(fn, data);
int r = librbd::api::Migration<>::commit(io_ctx, name, prog_ctx);
tracepoint(librbd, migration_commit_exit, r);
return r;
}
extern "C" int rbd_migration_status(rados_ioctx_t p, const char *image_name,
rbd_image_migration_status_t *status,
size_t status_size)
{
librados::IoCtx io_ctx;
librados::IoCtx::from_rados_ioctx_t(p, io_ctx);
TracepointProvider::initialize<tracepoint_traits>(get_cct(io_ctx));
tracepoint(librbd, migration_status_enter, io_ctx.get_pool_name().c_str(),
io_ctx.get_id(), image_name);
if (status_size != sizeof(rbd_image_migration_status_t)) {
tracepoint(librbd, migration_status_exit, -ERANGE);
return -ERANGE;
}
librbd::image_migration_status_t cpp_status;
int r = librbd::api::Migration<>::status(io_ctx, image_name, &cpp_status);
if (r >= 0) {
status->source_pool_id = cpp_status.source_pool_id;
status->source_pool_namespace =
strdup(cpp_status.source_pool_namespace.c_str());
status->source_image_name = strdup(cpp_status.source_image_name.c_str());
status->source_image_id = strdup(cpp_status.source_image_id.c_str());
status->dest_pool_id = cpp_status.dest_pool_id;
status->dest_pool_namespace =
strdup(cpp_status.dest_pool_namespace.c_str());
status->dest_image_name = strdup(cpp_status.dest_image_name.c_str());
status->dest_image_id = strdup(cpp_status.dest_image_id.c_str());
status->state = cpp_status.state;
status->state_description = strdup(cpp_status.state_description.c_str());
}
tracepoint(librbd, migration_status_exit, r);
return r;
}
extern "C" void rbd_migration_status_cleanup(rbd_image_migration_status_t *s)
{
free(s->source_pool_namespace);
free(s->source_image_name);
free(s->source_image_id);
free(s->dest_pool_namespace);
free(s->dest_image_name);
free(s->dest_image_id);
free(s->state_description);
}
extern "C" int rbd_pool_metadata_get(rados_ioctx_t p, const char *key,
char *value, size_t *vallen)
{
librados::IoCtx io_ctx;
librados::IoCtx::from_rados_ioctx_t(p, io_ctx);
string val_s;
int r = librbd::api::PoolMetadata<>::get(io_ctx, key, &val_s);
if (*vallen < val_s.size() + 1) {
r = -ERANGE;
*vallen = val_s.size() + 1;
} else {
strncpy(value, val_s.c_str(), val_s.size() + 1);
}
return r;
}
extern "C" int rbd_pool_metadata_set(rados_ioctx_t p, const char *key,
const char *value)
{
librados::IoCtx io_ctx;
librados::IoCtx::from_rados_ioctx_t(p, io_ctx);
int r = librbd::api::PoolMetadata<>::set(io_ctx, key, value);
return r;
}
extern "C" int rbd_pool_metadata_remove(rados_ioctx_t p, const char *key)
{
librados::IoCtx io_ctx;
librados::IoCtx::from_rados_ioctx_t(p, io_ctx);
int r = librbd::api::PoolMetadata<>::remove(io_ctx, key);
return r;
}
extern "C" int rbd_pool_metadata_list(rados_ioctx_t p, const char *start,
uint64_t max, char *key, size_t *key_len,
char *value, size_t *val_len)
{
librados::IoCtx io_ctx;
librados::IoCtx::from_rados_ioctx_t(p, io_ctx);
map<string, bufferlist> pairs;
int r = librbd::api::PoolMetadata<>::list(io_ctx, start, max, &pairs);
if (r < 0) {
return r;
}
size_t key_total_len = 0, val_total_len = 0;
for (auto &it : pairs) {
key_total_len += it.first.size() + 1;
val_total_len += it.second.length() + 1;
}
if (*key_len < key_total_len || *val_len < val_total_len) {
*key_len = key_total_len;
*val_len = val_total_len;
return -ERANGE;
}
*key_len = key_total_len;
*val_len = val_total_len;
char *key_p = key, *value_p = value;
for (auto &it : pairs) {
strncpy(key_p, it.first.c_str(), it.first.size() + 1);
key_p += it.first.size() + 1;
strncpy(value_p, it.second.c_str(), it.second.length());
value_p += it.second.length();
*value_p = '\0';
value_p++;
}
return 0;
}
extern "C" int rbd_config_pool_list(rados_ioctx_t p,
rbd_config_option_t *options,
int *max_options) {
librados::IoCtx io_ctx;
librados::IoCtx::from_rados_ioctx_t(p, io_ctx);
std::vector<librbd::config_option_t> option_vector;
int r = librbd::api::Config<>::list(io_ctx, &option_vector);
if (r < 0) {
return r;
}
if (*max_options < static_cast<int>(option_vector.size())) {
*max_options = static_cast<int>(option_vector.size());
return -ERANGE;
}
for (int i = 0; i < static_cast<int>(option_vector.size()); ++i) {
config_option_cpp_to_c(option_vector[i], &options[i]);
}
*max_options = static_cast<int>(option_vector.size());
return 0;
}
extern "C" void rbd_config_pool_list_cleanup(rbd_config_option_t *options,
int max_options) {
for (int i = 0; i < max_options; ++i) {
config_option_cleanup(options[i]);
}
}
extern "C" int rbd_open(rados_ioctx_t p, const char *name, rbd_image_t *image,
const char *snap_name)
{
librados::IoCtx io_ctx;
librados::IoCtx::from_rados_ioctx_t(p, io_ctx);
TracepointProvider::initialize<tracepoint_traits>(get_cct(io_ctx));
librbd::ImageCtx *ictx = new librbd::ImageCtx(name, "", snap_name, io_ctx,
false);
tracepoint(librbd, open_image_enter, ictx, ictx->name.c_str(), ictx->id.c_str(), ictx->snap_name.c_str(), ictx->read_only);
int r = ictx->state->open(0);
if (r >= 0) {
*image = (rbd_image_t)ictx;
}
tracepoint(librbd, open_image_exit, r);
return r;
}
extern "C" int rbd_open_by_id(rados_ioctx_t p, const char *id,
rbd_image_t *image, const char *snap_name)
{
librados::IoCtx io_ctx;
librados::IoCtx::from_rados_ioctx_t(p, io_ctx);
TracepointProvider::initialize<tracepoint_traits>(get_cct(io_ctx));
librbd::ImageCtx *ictx = new librbd::ImageCtx("", id, snap_name, io_ctx,
false);
tracepoint(librbd, open_image_enter, ictx, ictx->name.c_str(),
ictx->id.c_str(), ictx->snap_name.c_str(), ictx->read_only);
int r = ictx->state->open(0);
if (r >= 0) {
*image = (rbd_image_t)ictx;
}
tracepoint(librbd, open_image_exit, r);
return r;
}
extern "C" int rbd_aio_open(rados_ioctx_t p, const char *name,
rbd_image_t *image, const char *snap_name,
rbd_completion_t c)
{
librados::IoCtx io_ctx;
librados::IoCtx::from_rados_ioctx_t(p, io_ctx);
TracepointProvider::initialize<tracepoint_traits>(get_cct(io_ctx));
librbd::ImageCtx *ictx = new librbd::ImageCtx(name, "", snap_name, io_ctx,
false);
librbd::RBD::AioCompletion *comp = (librbd::RBD::AioCompletion *)c;
tracepoint(librbd, aio_open_image_enter, ictx, ictx->name.c_str(), ictx->id.c_str(), ictx->snap_name.c_str(), ictx->read_only, comp->pc);
ictx->state->open(0, new C_OpenComplete(ictx, get_aio_completion(comp),
image));
tracepoint(librbd, aio_open_image_exit, 0);
return 0;
}
extern "C" int rbd_aio_open_by_id(rados_ioctx_t p, const char *id,
rbd_image_t *image, const char *snap_name,
rbd_completion_t c)
{
librados::IoCtx io_ctx;
librados::IoCtx::from_rados_ioctx_t(p, io_ctx);
TracepointProvider::initialize<tracepoint_traits>(get_cct(io_ctx));
librbd::ImageCtx *ictx = new librbd::ImageCtx("", id, snap_name, io_ctx,
false);
librbd::RBD::AioCompletion *comp = (librbd::RBD::AioCompletion *)c;
tracepoint(librbd, aio_open_image_enter, ictx, ictx->name.c_str(),
ictx->id.c_str(), ictx->snap_name.c_str(), ictx->read_only,
comp->pc);
ictx->state->open(0, new C_OpenComplete(ictx, get_aio_completion(comp),
image));
tracepoint(librbd, aio_open_image_exit, 0);
return 0;
}
extern "C" int rbd_open_read_only(rados_ioctx_t p, const char *name,
rbd_image_t *image, const char *snap_name)
{
librados::IoCtx io_ctx;
librados::IoCtx::from_rados_ioctx_t(p, io_ctx);
TracepointProvider::initialize<tracepoint_traits>(get_cct(io_ctx));
librbd::ImageCtx *ictx = new librbd::ImageCtx(name, "", snap_name, io_ctx,
true);
tracepoint(librbd, open_image_enter, ictx, ictx->name.c_str(), ictx->id.c_str(), ictx->snap_name.c_str(), ictx->read_only);
int r = ictx->state->open(0);
if (r >= 0) {
*image = (rbd_image_t)ictx;
}
tracepoint(librbd, open_image_exit, r);
return r;
}
extern "C" int rbd_open_by_id_read_only(rados_ioctx_t p, const char *id,
rbd_image_t *image, const char *snap_name)
{
librados::IoCtx io_ctx;
librados::IoCtx::from_rados_ioctx_t(p, io_ctx);
TracepointProvider::initialize<tracepoint_traits>(get_cct(io_ctx));
librbd::ImageCtx *ictx = new librbd::ImageCtx("", id, snap_name, io_ctx,
true);
tracepoint(librbd, open_image_enter, ictx, ictx->name.c_str(),
ictx->id.c_str(), ictx->snap_name.c_str(), ictx->read_only);
int r = ictx->state->open(0);
if (r >= 0) {
*image = (rbd_image_t)ictx;
}
tracepoint(librbd, open_image_exit, r);
return r;
}
extern "C" int rbd_aio_open_read_only(rados_ioctx_t p, const char *name,
rbd_image_t *image, const char *snap_name,
rbd_completion_t c)
{
librados::IoCtx io_ctx;
librados::IoCtx::from_rados_ioctx_t(p, io_ctx);
TracepointProvider::initialize<tracepoint_traits>(get_cct(io_ctx));
librbd::ImageCtx *ictx = new librbd::ImageCtx(name, "", snap_name, io_ctx,
true);
librbd::RBD::AioCompletion *comp = (librbd::RBD::AioCompletion *)c;
tracepoint(librbd, aio_open_image_enter, ictx, ictx->name.c_str(), ictx->id.c_str(), ictx->snap_name.c_str(), ictx->read_only, comp->pc);
ictx->state->open(0, new C_OpenComplete(ictx, get_aio_completion(comp),
image));
tracepoint(librbd, aio_open_image_exit, 0);
return 0;
}
extern "C" int rbd_aio_open_by_id_read_only(rados_ioctx_t p, const char *id,
rbd_image_t *image,
const char *snap_name,
rbd_completion_t c)
{
librados::IoCtx io_ctx;
librados::IoCtx::from_rados_ioctx_t(p, io_ctx);
TracepointProvider::initialize<tracepoint_traits>(get_cct(io_ctx));
librbd::ImageCtx *ictx = new librbd::ImageCtx("", id, snap_name, io_ctx,
true);
librbd::RBD::AioCompletion *comp = (librbd::RBD::AioCompletion *)c;
tracepoint(librbd, aio_open_image_enter, ictx, ictx->name.c_str(),
ictx->id.c_str(), ictx->snap_name.c_str(), ictx->read_only, comp->pc);
ictx->state->open(0, new C_OpenComplete(ictx, get_aio_completion(comp),
image));
tracepoint(librbd, aio_open_image_exit, 0);
return 0;
}
extern "C" int rbd_features_to_string(uint64_t features, char *str_features, size_t *size)
{
std::stringstream err;
std::string get_str_features = librbd::rbd_features_to_string(features, &err);
if (!err.str().empty()) {
return -EINVAL;
}
uint64_t expected_size = get_str_features.size();
if (*size <= expected_size) {
*size = expected_size + 1;
return -ERANGE;
}
strncpy(str_features, get_str_features.c_str(), expected_size);
str_features[expected_size] = '\0';
*size = expected_size + 1;
return 0;
}
extern "C" int rbd_features_from_string(const char *str_features, uint64_t *features)
{
std::stringstream err;
*features = librbd::rbd_features_from_string(str_features, &err);
if (!err.str().empty()) {
return -EINVAL;
}
return 0;
}
extern "C" int rbd_close(rbd_image_t image)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
tracepoint(librbd, close_image_enter, ictx, ictx->name.c_str(), ictx->id.c_str());
int r = ictx->state->close();
tracepoint(librbd, close_image_exit, r);
return r;
}
extern "C" int rbd_aio_close(rbd_image_t image, rbd_completion_t c)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
librbd::RBD::AioCompletion *comp = (librbd::RBD::AioCompletion *)c;
tracepoint(librbd, aio_close_image_enter, ictx, ictx->name.c_str(), ictx->id.c_str(), comp->pc);
ictx->state->close(new C_AioCompletion(ictx, librbd::io::AIO_TYPE_CLOSE,
get_aio_completion(comp)));
tracepoint(librbd, aio_close_image_exit, 0);
return 0;
}
extern "C" int rbd_resize(rbd_image_t image, uint64_t size)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
tracepoint(librbd, resize_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only, size);
librbd::NoOpProgressContext prog_ctx;
int r = ictx->operations->resize(size, true, prog_ctx);
tracepoint(librbd, resize_exit, r);
return r;
}
extern "C" int rbd_resize2(rbd_image_t image, uint64_t size, bool allow_shrink,
librbd_progress_fn_t cb, void *cbdata)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
tracepoint(librbd, resize_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only, size);
librbd::CProgressContext prog_ctx(cb, cbdata);
int r = ictx->operations->resize(size, allow_shrink, prog_ctx);
tracepoint(librbd, resize_exit, r);
return r;
}
extern "C" int rbd_resize_with_progress(rbd_image_t image, uint64_t size,
librbd_progress_fn_t cb, void *cbdata)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
tracepoint(librbd, resize_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only, size);
librbd::CProgressContext prog_ctx(cb, cbdata);
int r = ictx->operations->resize(size, true, prog_ctx);
tracepoint(librbd, resize_exit, r);
return r;
}
extern "C" int rbd_stat(rbd_image_t image, rbd_image_info_t *info,
size_t infosize)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
tracepoint(librbd, stat_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only);
int r = librbd::info(ictx, *info, infosize);
tracepoint(librbd, stat_exit, r, info);
return r;
}
extern "C" int rbd_get_old_format(rbd_image_t image, uint8_t *old)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
tracepoint(librbd, get_old_format_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only);
int r = librbd::get_old_format(ictx, old);
tracepoint(librbd, get_old_format_exit, r, *old);
return r;
}
extern "C" int rbd_get_size(rbd_image_t image, uint64_t *size)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
tracepoint(librbd, get_size_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only);
int r = librbd::get_size(ictx, size);
tracepoint(librbd, get_size_exit, r, *size);
return r;
}
extern "C" int rbd_get_features(rbd_image_t image, uint64_t *features)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
tracepoint(librbd, get_features_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only);
int r = librbd::get_features(ictx, features);
tracepoint(librbd, get_features_exit, r, *features);
return r;
}
extern "C" int rbd_update_features(rbd_image_t image, uint64_t features,
uint8_t enabled)
{
librbd::ImageCtx *ictx = reinterpret_cast<librbd::ImageCtx *>(image);
bool features_enabled = enabled != 0;
tracepoint(librbd, update_features_enter, ictx, features, features_enabled);
int r = ictx->operations->update_features(features, features_enabled);
tracepoint(librbd, update_features_exit, r);
return r;
}
extern "C" int rbd_get_op_features(rbd_image_t image, uint64_t *op_features)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
return librbd::api::Image<>::get_op_features(ictx, op_features);
}
extern "C" int rbd_get_stripe_unit(rbd_image_t image, uint64_t *stripe_unit)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
tracepoint(librbd, get_stripe_unit_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only);
*stripe_unit = ictx->get_stripe_unit();
tracepoint(librbd, get_stripe_unit_exit, 0, *stripe_unit);
return 0;
}
extern "C" int rbd_get_stripe_count(rbd_image_t image, uint64_t *stripe_count)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
tracepoint(librbd, get_stripe_count_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only);
*stripe_count = ictx->get_stripe_count();
tracepoint(librbd, get_stripe_count_exit, 0, *stripe_count);
return 0;
}
extern "C" int rbd_get_create_timestamp(rbd_image_t image,
struct timespec *timestamp)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
tracepoint(librbd, get_create_timestamp_enter, ictx, ictx->name.c_str(),
ictx->read_only);
utime_t time = ictx->get_create_timestamp();
time.to_timespec(timestamp);
tracepoint(librbd, get_create_timestamp_exit, 0, timestamp);
return 0;
}
extern "C" int rbd_get_access_timestamp(rbd_image_t image,
struct timespec *timestamp)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
tracepoint(librbd, get_access_timestamp_enter, ictx, ictx->name.c_str(),
ictx->read_only);
utime_t time = ictx->get_access_timestamp();
time.to_timespec(timestamp);
tracepoint(librbd, get_access_timestamp_exit, 0, timestamp);
return 0;
}
extern "C" int rbd_get_modify_timestamp(rbd_image_t image,
struct timespec *timestamp)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
tracepoint(librbd, get_modify_timestamp_enter, ictx, ictx->name.c_str(),
ictx->read_only);
utime_t time = ictx->get_modify_timestamp();
time.to_timespec(timestamp);
tracepoint(librbd, get_modify_timestamp_exit, 0, timestamp);
return 0;
}
extern "C" int rbd_get_overlap(rbd_image_t image, uint64_t *overlap)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
tracepoint(librbd, get_overlap_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only);
int r = librbd::get_overlap(ictx, overlap);
tracepoint(librbd, get_overlap_exit, r, *overlap);
return r;
}
extern "C" int rbd_get_name(rbd_image_t image, char *name, size_t *name_len)
{
librbd::ImageCtx *ictx = reinterpret_cast<librbd::ImageCtx *>(image);
if (*name_len <= ictx->name.size()) {
*name_len = ictx->name.size() + 1;
return -ERANGE;
}
strncpy(name, ictx->name.c_str(), ictx->name.size());
name[ictx->name.size()] = '\0';
*name_len = ictx->name.size() + 1;
return 0;
}
extern "C" int rbd_get_id(rbd_image_t image, char *id, size_t id_len)
{
librbd::ImageCtx *ictx = reinterpret_cast<librbd::ImageCtx *>(image);
if (ictx->old_format) {
return -EINVAL;
}
if (ictx->id.size() >= id_len) {
return -ERANGE;
}
strncpy(id, ictx->id.c_str(), id_len - 1);
id[id_len - 1] = '\0';
return 0;
}
extern "C" int rbd_get_block_name_prefix(rbd_image_t image, char *prefix,
size_t prefix_len)
{
librbd::ImageCtx *ictx = reinterpret_cast<librbd::ImageCtx *>(image);
if (ictx->object_prefix.size() >= prefix_len) {
return -ERANGE;
}
strncpy(prefix, ictx->object_prefix.c_str(), prefix_len - 1);
prefix[prefix_len - 1] = '\0';
return 0;
}
extern "C" int64_t rbd_get_data_pool_id(rbd_image_t image)
{
librbd::ImageCtx *ictx = reinterpret_cast<librbd::ImageCtx *>(image);
return librbd::api::Image<>::get_data_pool_id(ictx);
}
extern "C" int rbd_get_parent_info(rbd_image_t image,
char *parent_pool_name, size_t ppool_namelen,
char *parent_name, size_t pnamelen,
char *parent_snap_name, size_t psnap_namelen)
{
auto ictx = reinterpret_cast<librbd::ImageCtx*>(image);
tracepoint(librbd, get_parent_info_enter, ictx, ictx->name.c_str(),
ictx->snap_name.c_str(), ictx->read_only);
librbd::linked_image_spec_t parent_image;
librbd::snap_spec_t parent_snap;
int r = librbd::api::Image<>::get_parent(ictx, &parent_image, &parent_snap);
if (r >= 0) {
if (parent_pool_name) {
if (parent_image.pool_name.length() + 1 > ppool_namelen) {
r = -ERANGE;
} else {
strcpy(parent_pool_name, parent_image.pool_name.c_str());
}
}
if (parent_name) {
if (parent_image.image_name.length() + 1 > pnamelen) {
r = -ERANGE;
} else {
strcpy(parent_name, parent_image.image_name.c_str());
}
}
if (parent_snap_name) {
if (parent_snap.name.length() + 1 > psnap_namelen) {
r = -ERANGE;
} else {
strcpy(parent_snap_name, parent_snap.name.c_str());
}
}
}
if (r < 0) {
tracepoint(librbd, get_parent_info_exit, r, NULL, NULL, NULL, NULL);
return r;
}
tracepoint(librbd, get_parent_info_exit, r,
parent_image.pool_name.c_str(),
parent_image.image_name.c_str(),
parent_image.image_id.c_str(),
parent_snap.name.c_str());
return 0;
}
extern "C" int rbd_get_parent_info2(rbd_image_t image,
char *parent_pool_name,
size_t ppool_namelen,
char *parent_name, size_t pnamelen,
char *parent_id, size_t pidlen,
char *parent_snap_name,
size_t psnap_namelen)
{
auto ictx = reinterpret_cast<librbd::ImageCtx*>(image);
tracepoint(librbd, get_parent_info_enter, ictx, ictx->name.c_str(),
ictx->snap_name.c_str(), ictx->read_only);
librbd::linked_image_spec_t parent_image;
librbd::snap_spec_t parent_snap;
int r = librbd::api::Image<>::get_parent(ictx, &parent_image, &parent_snap);
if (r >= 0) {
if (parent_pool_name) {
if (parent_image.pool_name.length() + 1 > ppool_namelen) {
r = -ERANGE;
} else {
strcpy(parent_pool_name, parent_image.pool_name.c_str());
}
}
if (parent_name) {
if (parent_image.image_name.length() + 1 > pnamelen) {
r = -ERANGE;
} else {
strcpy(parent_name, parent_image.image_name.c_str());
}
}
if (parent_id) {
if (parent_image.image_id.length() + 1 > pidlen) {
r = -ERANGE;
} else {
strcpy(parent_id, parent_image.image_id.c_str());
}
}
if (parent_snap_name) {
if (parent_snap.name.length() + 1 > psnap_namelen) {
r = -ERANGE;
} else {
strcpy(parent_snap_name, parent_snap.name.c_str());
}
}
}
if (r < 0) {
tracepoint(librbd, get_parent_info_exit, r, NULL, NULL, NULL, NULL);
return r;
}
tracepoint(librbd, get_parent_info_exit, r,
parent_image.pool_name.c_str(),
parent_image.image_name.c_str(),
parent_image.image_id.c_str(),
parent_snap.name.c_str());
return 0;
}
extern "C" int rbd_get_parent(rbd_image_t image,
rbd_linked_image_spec_t *parent_image,
rbd_snap_spec_t *parent_snap)
{
auto ictx = reinterpret_cast<librbd::ImageCtx*>(image);
tracepoint(librbd, get_parent_info_enter, ictx, ictx->name.c_str(),
ictx->snap_name.c_str(), ictx->read_only);
librbd::linked_image_spec_t cpp_parent_image;
librbd::snap_spec_t cpp_parent_snap;
int r = librbd::api::Image<>::get_parent(ictx, &cpp_parent_image,
&cpp_parent_snap);
if (r < 0) {
// FIPS zeroization audit 20191117: these memsets are not security related.
memset(parent_image, 0, sizeof(rbd_linked_image_spec_t));
memset(parent_snap, 0, sizeof(rbd_snap_spec_t));
} else {
*parent_image = {
.pool_id = cpp_parent_image.pool_id,
.pool_name = strdup(cpp_parent_image.pool_name.c_str()),
.pool_namespace = strdup(cpp_parent_image.pool_namespace.c_str()),
.image_id = strdup(cpp_parent_image.image_id.c_str()),
.image_name = strdup(cpp_parent_image.image_name.c_str()),
.trash = cpp_parent_image.trash};
*parent_snap = {
.id = cpp_parent_snap.id,
.namespace_type = cpp_parent_snap.namespace_type,
.name = strdup(cpp_parent_snap.name.c_str())};
}
tracepoint(librbd, get_parent_info_exit, r,
parent_image->pool_name,
parent_image->image_name,
parent_image->image_id,
parent_snap->name);
return r;
}
extern "C" int rbd_get_migration_source_spec(rbd_image_t image,
char* source_spec,
size_t* max_len)
{
auto ictx = reinterpret_cast<librbd::ImageCtx*>(image);
std::string cpp_source_spec;
int r = librbd::api::Migration<>::get_source_spec(ictx, &cpp_source_spec);
if (r < 0) {
return r;
}
size_t expected_size = cpp_source_spec.size();
if (expected_size >= *max_len) {
*max_len = expected_size + 1;
return -ERANGE;
}
strncpy(source_spec, cpp_source_spec.c_str(), expected_size);
source_spec[expected_size] = '\0';
*max_len = expected_size + 1;
return 0;
}
extern "C" int rbd_get_flags(rbd_image_t image, uint64_t *flags)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
tracepoint(librbd, get_flags_enter, ictx);
int r = librbd::get_flags(ictx, flags);
tracepoint(librbd, get_flags_exit, ictx, r, *flags);
return r;
}
extern "C" int rbd_get_group(rbd_image_t image, rbd_group_info_t *group_info,
size_t group_info_size)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
tracepoint(librbd, image_get_group_enter, ictx->name.c_str());
if (group_info_size != sizeof(rbd_group_info_t)) {
tracepoint(librbd, image_get_group_exit, -ERANGE);
return -ERANGE;
}
librbd::group_info_t cpp_group_info;
int r = librbd::api::Group<>::image_get_group(ictx, &cpp_group_info);
if (r >= 0) {
group_info_cpp_to_c(cpp_group_info, group_info);
} else {
group_info->name = NULL;
}
tracepoint(librbd, image_get_group_exit, r);
return r;
}
extern "C" int rbd_set_image_notification(rbd_image_t image, int fd, int type)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
tracepoint(librbd, set_image_notification_enter, ictx, fd, type);
int r = librbd::set_image_notification(ictx, fd, type);
tracepoint(librbd, set_image_notification_exit, ictx, r);
return r;
}
extern "C" int rbd_is_exclusive_lock_owner(rbd_image_t image, int *is_owner)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
tracepoint(librbd, is_exclusive_lock_owner_enter, ictx);
bool owner;
int r = librbd::is_exclusive_lock_owner(ictx, &owner);
*is_owner = owner ? 1 : 0;
tracepoint(librbd, is_exclusive_lock_owner_exit, ictx, r, *is_owner);
return r;
}
extern "C" int rbd_lock_acquire(rbd_image_t image, rbd_lock_mode_t lock_mode)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
tracepoint(librbd, lock_acquire_enter, ictx, lock_mode);
int r = librbd::lock_acquire(ictx, lock_mode);
tracepoint(librbd, lock_acquire_exit, ictx, r);
return r;
}
extern "C" int rbd_lock_release(rbd_image_t image)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
tracepoint(librbd, lock_release_enter, ictx);
int r = librbd::lock_release(ictx);
tracepoint(librbd, lock_release_exit, ictx, r);
return r;
}
extern "C" int rbd_lock_get_owners(rbd_image_t image,
rbd_lock_mode_t *lock_mode,
char **lock_owners,
size_t *max_lock_owners)
{
librbd::ImageCtx *ictx = reinterpret_cast<librbd::ImageCtx*>(image);
tracepoint(librbd, lock_get_owners_enter, ictx);
// FIPS zeroization audit 20191117: this memset is not security related.
memset(lock_owners, 0, sizeof(*lock_owners) * *max_lock_owners);
std::list<std::string> lock_owner_list;
int r = librbd::lock_get_owners(ictx, lock_mode, &lock_owner_list);
if (r >= 0) {
if (*max_lock_owners >= lock_owner_list.size()) {
*max_lock_owners = 0;
for (auto &lock_owner : lock_owner_list) {
lock_owners[(*max_lock_owners)++] = strdup(lock_owner.c_str());
}
} else {
*max_lock_owners = lock_owner_list.size();
r = -ERANGE;
}
}
tracepoint(librbd, lock_get_owners_exit, ictx, r);
return r;
}
extern "C" void rbd_lock_get_owners_cleanup(char **lock_owners,
size_t lock_owner_count)
{
for (size_t i = 0; i < lock_owner_count; ++i) {
free(lock_owners[i]);
}
}
extern "C" int rbd_lock_break(rbd_image_t image, rbd_lock_mode_t lock_mode,
const char *lock_owner)
{
librbd::ImageCtx *ictx = reinterpret_cast<librbd::ImageCtx*>(image);
tracepoint(librbd, lock_break_enter, ictx, lock_mode, lock_owner);
int r = librbd::lock_break(ictx, lock_mode, lock_owner);
tracepoint(librbd, lock_break_exit, ictx, r);
return r;
}
extern "C" int rbd_rebuild_object_map(rbd_image_t image,
librbd_progress_fn_t cb, void *cbdata)
{
librbd::ImageCtx *ictx = reinterpret_cast<librbd::ImageCtx*>(image);
librbd::CProgressContext prog_ctx(cb, cbdata);
return ictx->operations->rebuild_object_map(prog_ctx);
}
/* snapshots */
extern "C" int rbd_snap_create(rbd_image_t image, const char *snap_name)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
tracepoint(librbd, snap_create_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only, snap_name);
auto flags = librbd::util::get_default_snap_create_flags(ictx);
librbd::NoOpProgressContext prog_ctx;
int r = librbd::api::Snapshot<>::create(ictx, snap_name, flags, prog_ctx);
tracepoint(librbd, snap_create_exit, r);
return r;
}
extern "C" int rbd_snap_create2(rbd_image_t image, const char *snap_name,
uint32_t flags, librbd_progress_fn_t cb,
void *cbdata)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
tracepoint(librbd, snap_create_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only, snap_name);
librbd::CProgressContext prog_ctx(cb, cbdata);
int r = librbd::api::Snapshot<>::create(ictx, snap_name, flags, prog_ctx);
tracepoint(librbd, snap_create_exit, r);
return r;
}
extern "C" int rbd_snap_rename(rbd_image_t image, const char *srcname, const char *dstname)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
tracepoint(librbd, snap_rename_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only, srcname, dstname);
int r = ictx->operations->snap_rename(srcname, dstname);
tracepoint(librbd, snap_rename_exit, r);
return r;
}
extern "C" int rbd_snap_remove(rbd_image_t image, const char *snap_name)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
tracepoint(librbd, snap_remove_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only, snap_name);
librbd::NoOpProgressContext prog_ctx;
int r = librbd::api::Snapshot<>::remove(ictx, snap_name, 0, prog_ctx);
tracepoint(librbd, snap_remove_exit, r);
return r;
}
extern "C" int rbd_snap_remove2(rbd_image_t image, const char *snap_name, uint32_t flags,
librbd_progress_fn_t cb, void *cbdata)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
tracepoint(librbd, snap_remove2_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only, snap_name, flags);
librbd::CProgressContext prog_ctx(cb, cbdata);
int r = librbd::api::Snapshot<>::remove(ictx, snap_name, flags, prog_ctx);
tracepoint(librbd, snap_remove_exit, r);
return r;
}
extern "C" int rbd_snap_remove_by_id(rbd_image_t image, uint64_t snap_id)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
return librbd::api::Snapshot<>::remove(ictx, snap_id);
}
extern "C" int rbd_snap_rollback(rbd_image_t image, const char *snap_name)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
tracepoint(librbd, snap_rollback_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only, snap_name);
librbd::NoOpProgressContext prog_ctx;
int r = ictx->operations->snap_rollback(cls::rbd::UserSnapshotNamespace(), snap_name, prog_ctx);
tracepoint(librbd, snap_rollback_exit, r);
return r;
}
extern "C" int rbd_snap_rollback_with_progress(rbd_image_t image,
const char *snap_name,
librbd_progress_fn_t cb,
void *cbdata)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
tracepoint(librbd, snap_rollback_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only, snap_name);
librbd::CProgressContext prog_ctx(cb, cbdata);
int r = ictx->operations->snap_rollback(cls::rbd::UserSnapshotNamespace(), snap_name, prog_ctx);
tracepoint(librbd, snap_rollback_exit, r);
return r;
}
extern "C" int rbd_snap_list(rbd_image_t image, rbd_snap_info_t *snaps,
int *max_snaps)
{
vector<librbd::snap_info_t> cpp_snaps;
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
tracepoint(librbd, snap_list_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only, snaps);
if (!max_snaps) {
tracepoint(librbd, snap_list_exit, -EINVAL, 0);
return -EINVAL;
}
// FIPS zeroization audit 20191117: this memset is not security related.
memset(snaps, 0, sizeof(*snaps) * *max_snaps);
int r = librbd::api::Snapshot<>::list(ictx, cpp_snaps);
if (r == -ENOENT) {
tracepoint(librbd, snap_list_exit, 0, *max_snaps);
return 0;
}
if (r < 0) {
tracepoint(librbd, snap_list_exit, r, *max_snaps);
return r;
}
if (*max_snaps < (int)cpp_snaps.size() + 1) {
*max_snaps = (int)cpp_snaps.size() + 1;
tracepoint(librbd, snap_list_exit, -ERANGE, *max_snaps);
return -ERANGE;
}
int i;
for (i = 0; i < (int)cpp_snaps.size(); i++) {
snaps[i].id = cpp_snaps[i].id;
snaps[i].size = cpp_snaps[i].size;
snaps[i].name = strdup(cpp_snaps[i].name.c_str());
if (!snaps[i].name) {
for (int j = 0; j < i; j++)
free((void *)snaps[j].name);
tracepoint(librbd, snap_list_exit, -ENOMEM, *max_snaps);
return -ENOMEM;
}
tracepoint(librbd, snap_list_entry, snaps[i].id, snaps[i].size, snaps[i].name);
}
snaps[i].id = 0;
snaps[i].size = 0;
snaps[i].name = NULL;
r = (int)cpp_snaps.size();
tracepoint(librbd, snap_list_exit, r, *max_snaps);
return r;
}
extern "C" void rbd_snap_list_end(rbd_snap_info_t *snaps)
{
tracepoint(librbd, snap_list_end_enter, snaps);
while (snaps->name) {
free((void *)snaps->name);
snaps++;
}
tracepoint(librbd, snap_list_end_exit);
}
extern "C" int rbd_snap_protect(rbd_image_t image, const char *snap_name)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
tracepoint(librbd, snap_protect_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only, snap_name);
int r = ictx->operations->snap_protect(cls::rbd::UserSnapshotNamespace(), snap_name);
tracepoint(librbd, snap_protect_exit, r);
return r;
}
extern "C" int rbd_snap_unprotect(rbd_image_t image, const char *snap_name)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
tracepoint(librbd, snap_unprotect_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only, snap_name);
int r = ictx->operations->snap_unprotect(cls::rbd::UserSnapshotNamespace(), snap_name);
tracepoint(librbd, snap_unprotect_exit, r);
return r;
}
extern "C" int rbd_snap_is_protected(rbd_image_t image, const char *snap_name,
int *is_protected)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
tracepoint(librbd, snap_is_protected_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only, snap_name);
bool protected_snap;
int r = librbd::api::Snapshot<>::is_protected(ictx, snap_name, &protected_snap);
if (r < 0) {
tracepoint(librbd, snap_is_protected_exit, r, *is_protected ? 1 : 0);
return r;
}
*is_protected = protected_snap ? 1 : 0;
tracepoint(librbd, snap_is_protected_exit, 0, *is_protected ? 1 : 0);
return 0;
}
extern "C" int rbd_snap_get_limit(rbd_image_t image, uint64_t *limit)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
tracepoint(librbd, snap_get_limit_enter, ictx, ictx->name.c_str());
int r = librbd::api::Snapshot<>::get_limit(ictx, limit);
tracepoint(librbd, snap_get_limit_exit, r, *limit);
return r;
}
extern "C" int rbd_snap_exists(rbd_image_t image, const char *snapname, bool *exists)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
tracepoint(librbd, snap_exists_enter, ictx, ictx->name.c_str(),
ictx->snap_name.c_str(), ictx->read_only, snapname);
int r = librbd::api::Snapshot<>::exists(ictx, cls::rbd::UserSnapshotNamespace(), snapname, exists);
tracepoint(librbd, snap_exists_exit, r, *exists);
return r;
}
extern "C" int rbd_snap_get_timestamp(rbd_image_t image, uint64_t snap_id, struct timespec *timestamp)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
tracepoint(librbd, snap_get_timestamp_enter, ictx, ictx->name.c_str());
int r = librbd::api::Snapshot<>::get_timestamp(ictx, snap_id, timestamp);
tracepoint(librbd, snap_get_timestamp_exit, r);
return r;
}
extern "C" int rbd_snap_set_limit(rbd_image_t image, uint64_t limit)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
tracepoint(librbd, snap_set_limit_enter, ictx, ictx->name.c_str(), limit);
int r = librbd::api::Snapshot<>::set_limit(ictx, limit);
tracepoint(librbd, snap_set_limit_exit, r);
return r;
}
extern "C" int rbd_snap_set(rbd_image_t image, const char *snap_name)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
tracepoint(librbd, snap_set_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only, snap_name);
int r = librbd::api::Image<>::snap_set(
ictx, cls::rbd::UserSnapshotNamespace(), snap_name);
tracepoint(librbd, snap_set_exit, r);
return r;
}
extern "C" int rbd_snap_set_by_id(rbd_image_t image, uint64_t snap_id)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
return librbd::api::Image<>::snap_set(ictx, snap_id);
}
extern "C" int rbd_snap_get_name(rbd_image_t image, uint64_t snap_id, char *snapname, size_t *name_len)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
std::string snap_name;
int r = librbd::api::Snapshot<>::get_name(ictx, snap_id, &snap_name);
size_t expected_size = snap_name.size();
if (*name_len <= expected_size) {
*name_len = expected_size + 1;
return -ERANGE;
}
strncpy(snapname, snap_name.c_str(), expected_size);
snapname[expected_size] = '\0';
*name_len = expected_size + 1;
return r;
}
extern "C" int rbd_snap_get_id(rbd_image_t image, const char *snapname, uint64_t *snap_id)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
return librbd::api::Snapshot<>::get_id(ictx, snapname, snap_id);
}
extern "C" ssize_t rbd_list_children(rbd_image_t image, char *pools,
size_t *pools_len, char *images,
size_t *images_len)
{
auto ictx = reinterpret_cast<librbd::ImageCtx*>(image);
tracepoint(librbd, list_children_enter, ictx, ictx->name.c_str(),
ictx->snap_name.c_str(), ictx->read_only);
std::vector<librbd::linked_image_spec_t> cpp_images;
int r = librbd::api::Image<>::list_children(ictx, &cpp_images);
if (r < 0) {
tracepoint(librbd, list_children_exit, r);
return r;
}
std::set<std::pair<std::string, std::string>> image_set;
for (auto& image : cpp_images) {
if (!image.trash) {
image_set.insert({image.pool_name, image.image_name});
}
}
size_t pools_total = 0;
size_t images_total = 0;
for (auto it : image_set) {
pools_total += it.first.length() + 1;
images_total += it.second.length() + 1;
}
bool too_short = false;
if (pools_total > *pools_len)
too_short = true;
if (images_total > *images_len)
too_short = true;
*pools_len = pools_total;
*images_len = images_total;
if (too_short) {
tracepoint(librbd, list_children_exit, -ERANGE);
return -ERANGE;
}
char *pools_p = pools;
char *images_p = images;
for (auto it : image_set) {
const char* pool = it.first.c_str();
strcpy(pools_p, pool);
pools_p += it.first.length() + 1;
const char* image = it.second.c_str();
strcpy(images_p, image);
images_p += it.second.length() + 1;
tracepoint(librbd, list_children_entry, pool, image);
}
ssize_t ret = image_set.size();
tracepoint(librbd, list_children_exit, ret);
return ret;
}
extern "C" int rbd_list_children2(rbd_image_t image,
rbd_child_info_t *children,
int *max_children)
{
auto ictx = reinterpret_cast<librbd::ImageCtx*>(image);
tracepoint(librbd, list_children_enter, ictx, ictx->name.c_str(),
ictx->snap_name.c_str(), ictx->read_only);
// FIPS zeroization audit 20191117: this memset is not security related.
memset(children, 0, sizeof(*children) * *max_children);
if (!max_children) {
tracepoint(librbd, list_children_exit, -EINVAL);
return -EINVAL;
}
std::vector<librbd::linked_image_spec_t> cpp_children;
int r = librbd::api::Image<>::list_children(ictx, &cpp_children);
if (r < 0) {
tracepoint(librbd, list_children_exit, r);
return r;
}
if (*max_children < (int)cpp_children.size() + 1) {
*max_children = (int)cpp_children.size() + 1;
tracepoint(librbd, list_children_exit, *max_children);
return -ERANGE;
}
int i;
for (i = 0; i < (int)cpp_children.size(); i++) {
children[i].pool_name = strdup(cpp_children[i].pool_name.c_str());
children[i].image_name = strdup(cpp_children[i].image_name.c_str());
children[i].image_id = strdup(cpp_children[i].image_id.c_str());
children[i].trash = cpp_children[i].trash;
tracepoint(librbd, list_children_entry, children[i].pool_name,
children[i].image_name);
}
children[i].pool_name = NULL;
children[i].image_name = NULL;
children[i].image_id = NULL;
r = (int)cpp_children.size();
tracepoint(librbd, list_children_exit, *max_children);
return r;
}
extern "C" void rbd_list_child_cleanup(rbd_child_info_t *child)
{
free((void *)child->pool_name);
free((void *)child->image_name);
free((void *)child->image_id);
}
extern "C" void rbd_list_children_cleanup(rbd_child_info_t *children,
size_t num_children)
{
for (size_t i=0; i < num_children; i++) {
free((void *)children[i].pool_name);
free((void *)children[i].image_name);
free((void *)children[i].image_id);
}
}
extern "C" int rbd_list_children3(rbd_image_t image,
rbd_linked_image_spec_t *images,
size_t *max_images)
{
auto ictx = reinterpret_cast<librbd::ImageCtx*>(image);
tracepoint(librbd, list_children_enter, ictx, ictx->name.c_str(),
ictx->snap_name.c_str(), ictx->read_only);
// FIPS zeroization audit 20191117: this memset is not security related.
memset(images, 0, sizeof(*images) * *max_images);
std::vector<librbd::linked_image_spec_t> cpp_children;
int r = librbd::api::Image<>::list_children(ictx, &cpp_children);
if (r < 0) {
tracepoint(librbd, list_children_exit, r);
return r;
}
if (*max_images < cpp_children.size()) {
*max_images = cpp_children.size();
return -ERANGE;
}
*max_images = cpp_children.size();
for (size_t idx = 0; idx < cpp_children.size(); ++idx) {
images[idx] = {
.pool_id = cpp_children[idx].pool_id,
.pool_name = strdup(cpp_children[idx].pool_name.c_str()),
.pool_namespace = strdup(cpp_children[idx].pool_namespace.c_str()),
.image_id = strdup(cpp_children[idx].image_id.c_str()),
.image_name = strdup(cpp_children[idx].image_name.c_str()),
.trash = cpp_children[idx].trash};
tracepoint(librbd, list_children_entry, images[idx].pool_name,
images[idx].image_name);
}
return 0;
}
extern "C" int rbd_list_descendants(rbd_image_t image,
rbd_linked_image_spec_t *images,
size_t *max_images)
{
auto ictx = reinterpret_cast<librbd::ImageCtx*>(image);
// FIPS zeroization audit 20191117: this memset is not security related.
memset(images, 0, sizeof(*images) * *max_images);
std::vector<librbd::linked_image_spec_t> cpp_children;
int r = librbd::api::Image<>::list_descendants(ictx, {}, &cpp_children);
if (r < 0) {
return r;
}
if (*max_images < cpp_children.size()) {
*max_images = cpp_children.size();
return -ERANGE;
}
*max_images = cpp_children.size();
for (size_t idx = 0; idx < cpp_children.size(); ++idx) {
images[idx] = {
.pool_id = cpp_children[idx].pool_id,
.pool_name = strdup(cpp_children[idx].pool_name.c_str()),
.pool_namespace = strdup(cpp_children[idx].pool_namespace.c_str()),
.image_id = strdup(cpp_children[idx].image_id.c_str()),
.image_name = strdup(cpp_children[idx].image_name.c_str()),
.trash = cpp_children[idx].trash};
}
return 0;
}
extern "C" ssize_t rbd_list_lockers(rbd_image_t image, int *exclusive,
char *tag, size_t *tag_len,
char *clients, size_t *clients_len,
char *cookies, size_t *cookies_len,
char *addrs, size_t *addrs_len)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
tracepoint(librbd, list_lockers_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only);
std::list<librbd::locker_t> lockers;
bool exclusive_bool;
string tag_str;
int r = list_lockers(ictx, &lockers, &exclusive_bool, &tag_str);
if (r < 0) {
tracepoint(librbd, list_lockers_exit, r);
return r;
}
ldout(ictx->cct, 20) << "list_lockers r = " << r << " lockers.size() = " << lockers.size() << dendl;
*exclusive = (int)exclusive_bool;
size_t clients_total = 0;
size_t cookies_total = 0;
size_t addrs_total = 0;
for (list<librbd::locker_t>::const_iterator it = lockers.begin();
it != lockers.end(); ++it) {
clients_total += it->client.length() + 1;
cookies_total += it->cookie.length() + 1;
addrs_total += it->address.length() + 1;
}
bool too_short = ((clients_total > *clients_len) ||
(cookies_total > *cookies_len) ||
(addrs_total > *addrs_len) ||
(tag_str.length() + 1 > *tag_len));
*clients_len = clients_total;
*cookies_len = cookies_total;
*addrs_len = addrs_total;
*tag_len = tag_str.length() + 1;
if (too_short) {
tracepoint(librbd, list_lockers_exit, -ERANGE);
return -ERANGE;
}
strcpy(tag, tag_str.c_str());
char *clients_p = clients;
char *cookies_p = cookies;
char *addrs_p = addrs;
for (list<librbd::locker_t>::const_iterator it = lockers.begin();
it != lockers.end(); ++it) {
const char* client = it->client.c_str();
strcpy(clients_p, client);
clients_p += it->client.length() + 1;
const char* cookie = it->cookie.c_str();
strcpy(cookies_p, cookie);
cookies_p += it->cookie.length() + 1;
const char* address = it->address.c_str();
strcpy(addrs_p, address);
addrs_p += it->address.length() + 1;
tracepoint(librbd, list_lockers_entry, client, cookie, address);
}
ssize_t ret = lockers.size();
tracepoint(librbd, list_lockers_exit, ret);
return ret;
}
extern "C" int rbd_lock_exclusive(rbd_image_t image, const char *cookie)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
tracepoint(librbd, lock_exclusive_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only, cookie);
int r = librbd::lock(ictx, true, cookie ? cookie : "", "");
tracepoint(librbd, lock_exclusive_exit, r);
return r;
}
extern "C" int rbd_lock_shared(rbd_image_t image, const char *cookie,
const char *tag)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
tracepoint(librbd, lock_shared_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only, cookie, tag);
int r = librbd::lock(ictx, false, cookie ? cookie : "", tag ? tag : "");
tracepoint(librbd, lock_shared_exit, r);
return r;
}
extern "C" int rbd_unlock(rbd_image_t image, const char *cookie)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
tracepoint(librbd, unlock_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only, cookie);
int r = librbd::unlock(ictx, cookie ? cookie : "");
tracepoint(librbd, unlock_exit, r);
return r;
}
extern "C" int rbd_break_lock(rbd_image_t image, const char *client,
const char *cookie)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
tracepoint(librbd, break_lock_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only, client, cookie);
int r = librbd::break_lock(ictx, client, cookie ? cookie : "");
tracepoint(librbd, break_lock_exit, r);
return r;
}
/* I/O */
extern "C" ssize_t rbd_read(rbd_image_t image, uint64_t ofs, size_t len,
char *buf)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
tracepoint(librbd, read_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only, ofs, len);
int r = librbd::api::Io<>::read(
*ictx, ofs, len, librbd::io::ReadResult{buf, len}, 0);
tracepoint(librbd, read_exit, r);
return r;
}
extern "C" ssize_t rbd_read2(rbd_image_t image, uint64_t ofs, size_t len,
char *buf, int op_flags)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
tracepoint(librbd, read2_enter, ictx, ictx->name.c_str(),
ictx->snap_name.c_str(), ictx->read_only, ofs, len, op_flags);
int r = librbd::api::Io<>::read(
*ictx, ofs, len, librbd::io::ReadResult{buf, len}, op_flags);
tracepoint(librbd, read_exit, r);
return r;
}
extern "C" int64_t rbd_read_iterate(rbd_image_t image, uint64_t ofs, size_t len,
int (*cb)(uint64_t, size_t, const char *, void *),
void *arg)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
tracepoint(librbd, read_iterate_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only, ofs, len);
int64_t r = librbd::read_iterate(ictx, ofs, len, cb, arg);
tracepoint(librbd, read_iterate_exit, r);
return r;
}
extern "C" int rbd_read_iterate2(rbd_image_t image, uint64_t ofs, uint64_t len,
int (*cb)(uint64_t, size_t, const char *, void *),
void *arg)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
tracepoint(librbd, read_iterate2_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only, ofs, len);
int64_t r = librbd::read_iterate(ictx, ofs, len, cb, arg);
if (r > 0)
r = 0;
tracepoint(librbd, read_iterate2_exit, r);
return (int)r;
}
extern "C" int rbd_diff_iterate(rbd_image_t image,
const char *fromsnapname,
uint64_t ofs, uint64_t len,
int (*cb)(uint64_t, size_t, int, void *),
void *arg)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
tracepoint(librbd, diff_iterate_enter, ictx, ictx->name.c_str(),
ictx->snap_name.c_str(), ictx->read_only, fromsnapname, ofs, len,
true, false);
int r = librbd::api::DiffIterate<>::diff_iterate(ictx,
cls::rbd::UserSnapshotNamespace(),
fromsnapname, ofs, len,
true, false, cb, arg);
tracepoint(librbd, diff_iterate_exit, r);
return r;
}
extern "C" int rbd_diff_iterate2(rbd_image_t image, const char *fromsnapname,
uint64_t ofs, uint64_t len,
uint8_t include_parent, uint8_t whole_object,
int (*cb)(uint64_t, size_t, int, void *),
void *arg)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
tracepoint(librbd, diff_iterate_enter, ictx, ictx->name.c_str(),
ictx->snap_name.c_str(), ictx->read_only, fromsnapname, ofs, len,
include_parent != 0, whole_object != 0);
int r = librbd::api::DiffIterate<>::diff_iterate(ictx,
cls::rbd::UserSnapshotNamespace(),
fromsnapname, ofs, len,
include_parent, whole_object,
cb, arg);
tracepoint(librbd, diff_iterate_exit, r);
return r;
}
extern "C" ssize_t rbd_write(rbd_image_t image, uint64_t ofs, size_t len,
const char *buf)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
tracepoint(librbd, write_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only, ofs, len, buf);
bufferlist bl;
bl.push_back(create_write_raw(ictx, buf, len, nullptr));
int r = librbd::api::Io<>::write(*ictx, ofs, len, std::move(bl), 0);
tracepoint(librbd, write_exit, r);
return r;
}
extern "C" ssize_t rbd_write2(rbd_image_t image, uint64_t ofs, size_t len,
const char *buf, int op_flags)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
tracepoint(librbd, write2_enter, ictx, ictx->name.c_str(),
ictx->snap_name.c_str(), ictx->read_only, ofs, len, buf, op_flags);
bufferlist bl;
bl.push_back(create_write_raw(ictx, buf, len, nullptr));
int r = librbd::api::Io<>::write(*ictx, ofs, len, std::move(bl), op_flags);
tracepoint(librbd, write_exit, r);
return r;
}
extern "C" int rbd_discard(rbd_image_t image, uint64_t ofs, uint64_t len)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
tracepoint(librbd, discard_enter, ictx, ictx->name.c_str(),
ictx->snap_name.c_str(), ictx->read_only, ofs, len);
if (len > static_cast<uint64_t>(std::numeric_limits<int>::max())) {
tracepoint(librbd, discard_exit, -EINVAL);
return -EINVAL;
}
int r = librbd::api::Io<>::discard(
*ictx, ofs, len, ictx->discard_granularity_bytes);
tracepoint(librbd, discard_exit, r);
return r;
}
extern "C" ssize_t rbd_writesame(rbd_image_t image, uint64_t ofs, size_t len,
const char *buf, size_t data_len, int op_flags)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
tracepoint(librbd, writesame_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(),
ictx->read_only, ofs, len, data_len == 0 ? NULL : buf, data_len, op_flags);
if (data_len == 0 || len % data_len ||
len > static_cast<uint64_t>(std::numeric_limits<int>::max())) {
tracepoint(librbd, writesame_exit, -EINVAL);
return -EINVAL;
}
bool discard_zero = ictx->config.get_val<bool>("rbd_discard_on_zeroed_write_same");
if (discard_zero && mem_is_zero(buf, data_len)) {
int r = librbd::api::Io<>::write_zeroes(*ictx, ofs, len, 0, op_flags);
tracepoint(librbd, writesame_exit, r);
return r;
}
bufferlist bl;
bl.push_back(create_write_raw(ictx, buf, data_len, nullptr));
int r = librbd::api::Io<>::write_same(
*ictx, ofs, len, std::move(bl), op_flags);
tracepoint(librbd, writesame_exit, r);
return r;
}
extern "C" ssize_t rbd_write_zeroes(rbd_image_t image, uint64_t ofs, size_t len,
int zero_flags, int op_flags)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
return librbd::api::Io<>::write_zeroes(*ictx, ofs, len, zero_flags, op_flags);
}
extern "C" ssize_t rbd_compare_and_write(rbd_image_t image,
uint64_t ofs, size_t len,
const char *cmp_buf,
const char *buf,
uint64_t *mismatch_off,
int op_flags)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
tracepoint(librbd, compare_and_write_enter, ictx, ictx->name.c_str(),
ictx->snap_name.c_str(), ictx->read_only, ofs,
len, cmp_buf, buf, op_flags);
bufferlist cmp_bl;
cmp_bl.push_back(create_write_raw(ictx, cmp_buf, len, nullptr));
bufferlist bl;
bl.push_back(create_write_raw(ictx, buf, len, nullptr));
int r = librbd::api::Io<>::compare_and_write(
*ictx, ofs, len, std::move(cmp_bl), std::move(bl), mismatch_off, op_flags);
tracepoint(librbd, compare_and_write_exit, r);
return r;
}
extern "C" int rbd_aio_create_completion(void *cb_arg,
rbd_callback_t complete_cb,
rbd_completion_t *c)
{
librbd::RBD::AioCompletion *rbd_comp =
new librbd::RBD::AioCompletion(cb_arg, complete_cb);
*c = (rbd_completion_t) rbd_comp;
return 0;
}
extern "C" int rbd_aio_write(rbd_image_t image, uint64_t off, size_t len,
const char *buf, rbd_completion_t c)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
librbd::RBD::AioCompletion *comp = (librbd::RBD::AioCompletion *)c;
tracepoint(librbd, aio_write_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only, off, len, buf, comp->pc);
auto aio_completion = get_aio_completion(comp);
bufferlist bl;
bl.push_back(create_write_raw(ictx, buf, len, aio_completion));
librbd::api::Io<>::aio_write(
*ictx, aio_completion, off, len, std::move(bl), 0, true);
tracepoint(librbd, aio_write_exit, 0);
return 0;
}
extern "C" int rbd_aio_write2(rbd_image_t image, uint64_t off, size_t len,
const char *buf, rbd_completion_t c, int op_flags)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
librbd::RBD::AioCompletion *comp = (librbd::RBD::AioCompletion *)c;
tracepoint(librbd, aio_write2_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(),
ictx->read_only, off, len, buf, comp->pc, op_flags);
auto aio_completion = get_aio_completion(comp);
bufferlist bl;
bl.push_back(create_write_raw(ictx, buf, len, aio_completion));
librbd::api::Io<>::aio_write(
*ictx, aio_completion, off, len, std::move(bl), op_flags, true);
tracepoint(librbd, aio_write_exit, 0);
return 0;
}
extern "C" int rbd_aio_writev(rbd_image_t image, const struct iovec *iov,
int iovcnt, uint64_t off, rbd_completion_t c)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
librbd::RBD::AioCompletion *comp = (librbd::RBD::AioCompletion *)c;
size_t len;
int r = get_iovec_length(iov, iovcnt, len);
tracepoint(librbd, aio_write_enter, ictx, ictx->name.c_str(),
ictx->snap_name.c_str(), ictx->read_only, off, len, NULL,
comp->pc);
if (r == 0) {
auto aio_completion = get_aio_completion(comp);
auto bl = iovec_to_bufferlist(ictx, iov, iovcnt, aio_completion);
librbd::api::Io<>::aio_write(
*ictx, aio_completion, off, len, std::move(bl), 0, true);
}
tracepoint(librbd, aio_write_exit, r);
return r;
}
extern "C" int rbd_aio_read(rbd_image_t image, uint64_t off, size_t len,
char *buf, rbd_completion_t c)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
librbd::RBD::AioCompletion *comp = (librbd::RBD::AioCompletion *)c;
tracepoint(librbd, aio_read_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only, off, len, buf, comp->pc);
librbd::api::Io<>::aio_read(
*ictx, get_aio_completion(comp), off, len, librbd::io::ReadResult{buf, len},
0, true);
tracepoint(librbd, aio_read_exit, 0);
return 0;
}
extern "C" int rbd_aio_read2(rbd_image_t image, uint64_t off, size_t len,
char *buf, rbd_completion_t c, int op_flags)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
librbd::RBD::AioCompletion *comp = (librbd::RBD::AioCompletion *)c;
tracepoint(librbd, aio_read2_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(),
ictx->read_only, off, len, buf, comp->pc, op_flags);
librbd::api::Io<>::aio_read(
*ictx, get_aio_completion(comp), off, len, librbd::io::ReadResult{buf, len},
op_flags, true);
tracepoint(librbd, aio_read_exit, 0);
return 0;
}
extern "C" int rbd_aio_readv(rbd_image_t image, const struct iovec *iov,
int iovcnt, uint64_t off, rbd_completion_t c)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
librbd::RBD::AioCompletion *comp = (librbd::RBD::AioCompletion *)c;
size_t len;
int r = get_iovec_length(iov, iovcnt, len);
tracepoint(librbd, aio_read_enter, ictx, ictx->name.c_str(),
ictx->snap_name.c_str(), ictx->read_only, off, len, NULL,
comp->pc);
if (r == 0) {
librbd::io::ReadResult read_result;
if (iovcnt == 1) {
read_result = librbd::io::ReadResult(
static_cast<char *>(iov[0].iov_base), iov[0].iov_len);
} else {
read_result = librbd::io::ReadResult(iov, iovcnt);
}
librbd::api::Io<>::aio_read(
*ictx, get_aio_completion(comp), off, len, std::move(read_result), 0,
true);
}
tracepoint(librbd, aio_read_exit, r);
return r;
}
extern "C" int rbd_flush(rbd_image_t image)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
tracepoint(librbd, flush_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only);
int r = librbd::api::Io<>::flush(*ictx);
tracepoint(librbd, flush_exit, r);
return r;
}
extern "C" int rbd_aio_flush(rbd_image_t image, rbd_completion_t c)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
librbd::RBD::AioCompletion *comp = (librbd::RBD::AioCompletion *)c;
tracepoint(librbd, aio_flush_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only, comp->pc);
librbd::api::Io<>::aio_flush(*ictx, get_aio_completion(comp), true);
tracepoint(librbd, aio_flush_exit, 0);
return 0;
}
extern "C" int rbd_aio_discard(rbd_image_t image, uint64_t off, uint64_t len,
rbd_completion_t c)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
librbd::RBD::AioCompletion *comp = (librbd::RBD::AioCompletion *)c;
tracepoint(librbd, aio_discard_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only, off, len, comp->pc);
librbd::api::Io<>::aio_discard(
*ictx, get_aio_completion(comp), off, len,
ictx->discard_granularity_bytes, true);
tracepoint(librbd, aio_discard_exit, 0);
return 0;
}
extern "C" int rbd_aio_writesame(rbd_image_t image, uint64_t off, size_t len,
const char *buf, size_t data_len, rbd_completion_t c,
int op_flags)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
librbd::RBD::AioCompletion *comp = (librbd::RBD::AioCompletion *)c;
tracepoint(librbd, aio_writesame_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(),
ictx->read_only, off, len, data_len == 0 ? NULL : buf, data_len, comp->pc,
op_flags);
if (data_len == 0 || len % data_len) {
tracepoint(librbd, aio_writesame_exit, -EINVAL);
return -EINVAL;
}
bool discard_zero = ictx->config.get_val<bool>("rbd_discard_on_zeroed_write_same");
if (discard_zero && mem_is_zero(buf, data_len)) {
librbd::api::Io<>::aio_write_zeroes(
*ictx, get_aio_completion(comp), off, len, 0, op_flags, true);
tracepoint(librbd, aio_writesame_exit, 0);
return 0;
}
auto aio_completion = get_aio_completion(comp);
bufferlist bl;
bl.push_back(create_write_raw(ictx, buf, data_len, aio_completion));
librbd::api::Io<>::aio_write_same(
*ictx, aio_completion, off, len, std::move(bl), op_flags, true);
tracepoint(librbd, aio_writesame_exit, 0);
return 0;
}
extern "C" int rbd_aio_write_zeroes(rbd_image_t image, uint64_t off, size_t len,
rbd_completion_t c, int zero_flags,
int op_flags)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
librbd::RBD::AioCompletion *comp = (librbd::RBD::AioCompletion *)c;
librbd::api::Io<>::aio_write_zeroes(*ictx, get_aio_completion(comp), off, len,
zero_flags, op_flags, true);
return 0;
}
extern "C" ssize_t rbd_aio_compare_and_write(rbd_image_t image, uint64_t off,
size_t len, const char *cmp_buf,
const char *buf, rbd_completion_t c,
uint64_t *mismatch_off,
int op_flags)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
librbd::RBD::AioCompletion *comp = (librbd::RBD::AioCompletion *)c;
tracepoint(librbd, aio_compare_and_write_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(),
ictx->read_only, off, len, cmp_buf, buf, comp->pc, op_flags);
auto aio_completion = get_aio_completion(comp);
bufferlist cmp_bl;
cmp_bl.push_back(create_write_raw(ictx, cmp_buf, len, aio_completion));
bufferlist bl;
bl.push_back(create_write_raw(ictx, buf, len, aio_completion));
librbd::api::Io<>::aio_compare_and_write(
*ictx, aio_completion, off, len, std::move(cmp_bl), std::move(bl),
mismatch_off, op_flags, false);
tracepoint(librbd, aio_compare_and_write_exit, 0);
return 0;
}
extern "C" ssize_t rbd_aio_compare_and_writev(rbd_image_t image,
uint64_t off,
const struct iovec *cmp_iov,
int cmp_iovcnt,
const struct iovec *iov,
int iovcnt,
rbd_completion_t c,
uint64_t *mismatch_off,
int op_flags)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
librbd::RBD::AioCompletion *comp = (librbd::RBD::AioCompletion *)c;
size_t cmp_len;
int r = get_iovec_length(cmp_iov, cmp_iovcnt, cmp_len);
tracepoint(librbd, aio_compare_and_write_enter, ictx, ictx->name.c_str(),
ictx->snap_name.c_str(), ictx->read_only, off, cmp_len, NULL, NULL,
comp->pc, op_flags);
if (r != 0) {
tracepoint(librbd, aio_compare_and_write_exit, r);
return r;
}
size_t write_len;
r = get_iovec_length(iov, iovcnt, write_len);
if (r != 0) {
tracepoint(librbd, aio_compare_and_write_exit, r);
return r;
}
if (cmp_len != write_len) {
tracepoint(librbd, aio_compare_and_write_exit, -EINVAL);
return -EINVAL;
}
auto aio_completion = get_aio_completion(comp);
auto cmp_bl = iovec_to_bufferlist(ictx, cmp_iov, cmp_iovcnt, aio_completion);
auto bl = iovec_to_bufferlist(ictx, iov, iovcnt, aio_completion);
librbd::api::Io<>::aio_compare_and_write(*ictx, aio_completion, off, cmp_len,
std::move(cmp_bl), std::move(bl),
mismatch_off, op_flags, false);
tracepoint(librbd, aio_compare_and_write_exit, 0);
return 0;
}
extern "C" int rbd_invalidate_cache(rbd_image_t image)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
tracepoint(librbd, invalidate_cache_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only);
int r = librbd::invalidate_cache(ictx);
tracepoint(librbd, invalidate_cache_exit, r);
return r;
}
extern "C" int rbd_poll_io_events(rbd_image_t image, rbd_completion_t *comps, int numcomp)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
librbd::io::AioCompletion *cs[numcomp];
tracepoint(librbd, poll_io_events_enter, ictx, numcomp);
int r = librbd::poll_io_events(ictx, cs, numcomp);
tracepoint(librbd, poll_io_events_exit, r);
if (r > 0) {
for (int i = 0; i < r; ++i)
comps[i] = cs[i]->rbd_comp;
}
return r;
}
extern "C" int rbd_metadata_get(rbd_image_t image, const char *key, char *value, size_t *vallen)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
string val_s;
tracepoint(librbd, metadata_get_enter, ictx, key);
int r = librbd::metadata_get(ictx, key, &val_s);
if (r < 0) {
tracepoint(librbd, metadata_get_exit, r, key, NULL);
return r;
}
if (*vallen < val_s.size() + 1) {
r = -ERANGE;
*vallen = val_s.size() + 1;
tracepoint(librbd, metadata_get_exit, r, key, NULL);
} else {
strncpy(value, val_s.c_str(), val_s.size() + 1);
tracepoint(librbd, metadata_get_exit, r, key, value);
}
return r;
}
extern "C" int rbd_metadata_set(rbd_image_t image, const char *key, const char *value)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
tracepoint(librbd, metadata_set_enter, ictx, key, value);
int r = ictx->operations->metadata_set(key, value);
tracepoint(librbd, metadata_set_exit, r);
return r;
}
extern "C" int rbd_metadata_remove(rbd_image_t image, const char *key)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
tracepoint(librbd, metadata_remove_enter, ictx, key);
int r = ictx->operations->metadata_remove(key);
tracepoint(librbd, metadata_remove_exit, r);
return r;
}
extern "C" int rbd_metadata_list(rbd_image_t image, const char *start, uint64_t max,
char *key, size_t *key_len, char *value, size_t *val_len)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
tracepoint(librbd, metadata_list_enter, ictx);
map<string, bufferlist> pairs;
int r = librbd::metadata_list(ictx, start, max, &pairs);
size_t key_total_len = 0, val_total_len = 0;
bool too_short = false;
for (map<string, bufferlist>::iterator it = pairs.begin();
it != pairs.end(); ++it) {
key_total_len += it->first.size() + 1;
val_total_len += it->second.length() + 1;
}
if (*key_len < key_total_len || *val_len < val_total_len)
too_short = true;
*key_len = key_total_len;
*val_len = val_total_len;
if (too_short) {
tracepoint(librbd, metadata_list_exit, -ERANGE);
return -ERANGE;
}
char *key_p = key, *value_p = value;
for (map<string, bufferlist>::iterator it = pairs.begin();
it != pairs.end(); ++it) {
strncpy(key_p, it->first.c_str(), it->first.size() + 1);
key_p += it->first.size() + 1;
strncpy(value_p, it->second.c_str(), it->second.length());
value_p += it->second.length();
*value_p = '\0';
value_p++;
tracepoint(librbd, metadata_list_entry, it->first.c_str(), it->second.c_str());
}
tracepoint(librbd, metadata_list_exit, r);
return r;
}
extern "C" int rbd_mirror_image_enable(rbd_image_t image)
{
return rbd_mirror_image_enable2(image, RBD_MIRROR_IMAGE_MODE_JOURNAL);
}
extern "C" int rbd_mirror_image_enable2(rbd_image_t image,
rbd_mirror_image_mode_t mode)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
return librbd::api::Mirror<>::image_enable(ictx, mode, false);
}
extern "C" int rbd_mirror_image_disable(rbd_image_t image, bool force)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
return librbd::api::Mirror<>::image_disable(ictx, force);
}
extern "C" int rbd_mirror_image_promote(rbd_image_t image, bool force)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
return librbd::api::Mirror<>::image_promote(ictx, force);
}
extern "C" int rbd_mirror_image_demote(rbd_image_t image)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
return librbd::api::Mirror<>::image_demote(ictx);
}
extern "C" int rbd_mirror_image_resync(rbd_image_t image)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
return librbd::api::Mirror<>::image_resync(ictx);
}
extern "C" int rbd_mirror_image_create_snapshot(rbd_image_t image,
uint64_t *snap_id)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
auto flags = librbd::util::get_default_snap_create_flags(ictx);
return librbd::api::Mirror<>::image_snapshot_create(ictx, flags, snap_id);
}
extern "C" int rbd_mirror_image_create_snapshot2(rbd_image_t image,
uint32_t flags,
uint64_t *snap_id)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
return librbd::api::Mirror<>::image_snapshot_create(ictx, flags, snap_id);
}
extern "C" int rbd_mirror_image_get_info(rbd_image_t image,
rbd_mirror_image_info_t *mirror_image_info,
size_t info_size)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
if (sizeof(rbd_mirror_image_info_t) != info_size) {
return -ERANGE;
}
librbd::mirror_image_info_t cpp_mirror_image;
int r = librbd::api::Mirror<>::image_get_info(ictx, &cpp_mirror_image);
if (r < 0) {
return r;
}
mirror_image_info_cpp_to_c(cpp_mirror_image, mirror_image_info);
return 0;
}
extern "C" void rbd_mirror_image_get_info_cleanup(
rbd_mirror_image_info_t *mirror_image_info)
{
free(mirror_image_info->global_id);
}
extern "C" int rbd_mirror_image_get_mode(rbd_image_t image,
rbd_mirror_image_mode_t *mode)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
return librbd::api::Mirror<>::image_get_mode(ictx, mode);
}
extern "C" int rbd_mirror_image_get_global_status(
rbd_image_t image, rbd_mirror_image_global_status_t *status,
size_t status_size)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
if (sizeof(rbd_mirror_image_global_status_t) != status_size) {
return -ERANGE;
}
librbd::mirror_image_global_status_t cpp_status;
int r = librbd::api::Mirror<>::image_get_global_status(ictx, &cpp_status);
if (r < 0) {
return r;
}
mirror_image_global_status_cpp_to_c(cpp_status, status);
return 0;
}
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
extern "C" int rbd_mirror_image_get_status(rbd_image_t image,
rbd_mirror_image_status_t *status,
size_t status_size)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
if (sizeof(rbd_mirror_image_status_t) != status_size) {
return -ERANGE;
}
librbd::mirror_image_global_status_t cpp_status;
int r = librbd::api::Mirror<>::image_get_global_status(ictx, &cpp_status);
if (r < 0) {
return r;
}
mirror_image_global_status_cpp_to_c(cpp_status, status);
return 0;
}
#pragma GCC diagnostic pop
extern "C" int rbd_mirror_image_get_instance_id(rbd_image_t image,
char *instance_id,
size_t *instance_id_max_length)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
std::string cpp_instance_id;
int r = librbd::api::Mirror<>::image_get_instance_id(ictx, &cpp_instance_id);
if (r < 0) {
return r;
}
if (cpp_instance_id.size() >= *instance_id_max_length) {
*instance_id_max_length = cpp_instance_id.size() + 1;
return -ERANGE;
}
strcpy(instance_id, cpp_instance_id.c_str());
*instance_id_max_length = cpp_instance_id.size() + 1;
return 0;
}
extern "C" int rbd_aio_mirror_image_promote(rbd_image_t image, bool force,
rbd_completion_t c) {
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
librbd::RBD::AioCompletion *comp = (librbd::RBD::AioCompletion *)c;
librbd::api::Mirror<>::image_promote(
ictx, force, new C_AioCompletion(ictx, librbd::io::AIO_TYPE_GENERIC,
get_aio_completion(comp)));
return 0;
}
extern "C" int rbd_aio_mirror_image_demote(rbd_image_t image,
rbd_completion_t c) {
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
librbd::RBD::AioCompletion *comp = (librbd::RBD::AioCompletion *)c;
librbd::api::Mirror<>::image_demote(
ictx, new C_AioCompletion(ictx, librbd::io::AIO_TYPE_GENERIC,
get_aio_completion(comp)));
return 0;
}
extern "C" int rbd_aio_mirror_image_get_info(rbd_image_t image,
rbd_mirror_image_info_t *info,
size_t info_size,
rbd_completion_t c) {
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
librbd::RBD::AioCompletion *comp = (librbd::RBD::AioCompletion *)c;
if (sizeof(rbd_mirror_image_info_t) != info_size) {
return -ERANGE;
}
auto ctx = new C_MirrorImageGetInfo(
info, new C_AioCompletion(ictx, librbd::io::AIO_TYPE_GENERIC,
get_aio_completion(comp)));
librbd::api::Mirror<>::image_get_info(
ictx, &ctx->cpp_mirror_image_info, ctx);
return 0;
}
extern "C" int rbd_aio_mirror_image_get_mode(rbd_image_t image,
rbd_mirror_image_mode_t *mode,
rbd_completion_t c) {
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
librbd::RBD::AioCompletion *comp = (librbd::RBD::AioCompletion *)c;
librbd::api::Mirror<>::image_get_mode(
ictx, mode, new C_AioCompletion(ictx, librbd::io::AIO_TYPE_GENERIC,
get_aio_completion(comp)));
return 0;
}
extern "C" int rbd_aio_mirror_image_get_global_status(
rbd_image_t image, rbd_mirror_image_global_status_t *status,
size_t status_size, rbd_completion_t c) {
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
librbd::RBD::AioCompletion *comp = (librbd::RBD::AioCompletion *)c;
if (sizeof(rbd_mirror_image_global_status_t) != status_size) {
return -ERANGE;
}
auto ctx = new C_MirrorImageGetGlobalStatus(
status, new C_AioCompletion(ictx, librbd::io::AIO_TYPE_GENERIC,
get_aio_completion(comp)));
librbd::api::Mirror<>::image_get_global_status(
ictx, &ctx->cpp_mirror_image_global_status, ctx);
return 0;
}
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
extern "C" int rbd_aio_mirror_image_get_status(
rbd_image_t image, rbd_mirror_image_status_t *status, size_t status_size,
rbd_completion_t c) {
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
librbd::RBD::AioCompletion *comp = (librbd::RBD::AioCompletion *)c;
if (sizeof(rbd_mirror_image_status_t) != status_size) {
return -ERANGE;
}
auto ctx = new C_MirrorImageGetStatus(
status, new C_AioCompletion(ictx, librbd::io::AIO_TYPE_GENERIC,
get_aio_completion(comp)));
librbd::api::Mirror<>::image_get_global_status(
ictx, &ctx->cpp_mirror_image_global_status, ctx);
return 0;
}
#pragma GCC diagnostic pop
extern "C" int rbd_aio_mirror_image_create_snapshot(rbd_image_t image,
uint32_t flags,
uint64_t *snap_id,
rbd_completion_t c) {
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
librbd::RBD::AioCompletion *comp = (librbd::RBD::AioCompletion *)c;
librbd::api::Mirror<>::image_snapshot_create(
ictx, flags, snap_id, new C_AioCompletion(ictx,
librbd::io::AIO_TYPE_GENERIC,
get_aio_completion(comp)));
return 0;
}
extern "C" int rbd_update_watch(rbd_image_t image, uint64_t *handle,
rbd_update_callback_t watch_cb, void *arg)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
C_UpdateWatchCB *wctx = new C_UpdateWatchCB(watch_cb, arg);
tracepoint(librbd, update_watch_enter, ictx, wctx);
int r = ictx->state->register_update_watcher(wctx, &wctx->handle);
tracepoint(librbd, update_watch_exit, r, wctx->handle);
*handle = reinterpret_cast<uint64_t>(wctx);
return r;
}
extern "C" int rbd_update_unwatch(rbd_image_t image, uint64_t handle)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
C_UpdateWatchCB *wctx = reinterpret_cast<C_UpdateWatchCB *>(handle);
tracepoint(librbd, update_unwatch_enter, ictx, wctx->handle);
int r = ictx->state->unregister_update_watcher(wctx->handle);
delete wctx;
tracepoint(librbd, update_unwatch_exit, r);
return r;
}
extern "C" int rbd_aio_is_complete(rbd_completion_t c)
{
librbd::RBD::AioCompletion *comp = (librbd::RBD::AioCompletion *)c;
return comp->is_complete();
}
extern "C" int rbd_aio_wait_for_complete(rbd_completion_t c)
{
librbd::RBD::AioCompletion *comp = (librbd::RBD::AioCompletion *)c;
return comp->wait_for_complete();
}
extern "C" ssize_t rbd_aio_get_return_value(rbd_completion_t c)
{
librbd::RBD::AioCompletion *comp = (librbd::RBD::AioCompletion *)c;
return comp->get_return_value();
}
extern "C" void *rbd_aio_get_arg(rbd_completion_t c)
{
librbd::RBD::AioCompletion *comp = (librbd::RBD::AioCompletion *)c;
return comp->get_arg();
}
extern "C" void rbd_aio_release(rbd_completion_t c)
{
librbd::RBD::AioCompletion *comp = (librbd::RBD::AioCompletion *)c;
comp->release();
}
extern "C" int rbd_group_create(rados_ioctx_t p, const char *name)
{
librados::IoCtx io_ctx;
librados::IoCtx::from_rados_ioctx_t(p, io_ctx);
TracepointProvider::initialize<tracepoint_traits>(get_cct(io_ctx));
tracepoint(librbd, group_create_enter, io_ctx.get_pool_name().c_str(),
io_ctx.get_id(), name);
int r = librbd::api::Group<>::create(io_ctx, name);
tracepoint(librbd, group_create_exit, r);
return r;
}
extern "C" int rbd_group_remove(rados_ioctx_t p, const char *name)
{
librados::IoCtx io_ctx;
librados::IoCtx::from_rados_ioctx_t(p, io_ctx);
TracepointProvider::initialize<tracepoint_traits>(get_cct(io_ctx));
tracepoint(librbd, group_remove_enter, io_ctx.get_pool_name().c_str(),
io_ctx.get_id(), name);
int r = librbd::api::Group<>::remove(io_ctx, name);
tracepoint(librbd, group_remove_exit, r);
return r;
}
extern "C" int rbd_group_list(rados_ioctx_t p, char *names, size_t *size)
{
librados::IoCtx io_ctx;
librados::IoCtx::from_rados_ioctx_t(p, io_ctx);
TracepointProvider::initialize<tracepoint_traits>(get_cct(io_ctx));
tracepoint(librbd, group_list_enter, io_ctx.get_pool_name().c_str(),
io_ctx.get_id());
vector<string> cpp_names;
int r = librbd::api::Group<>::list(io_ctx, &cpp_names);
if (r < 0) {
tracepoint(librbd, group_list_exit, r);
return r;
}
size_t expected_size = 0;
for (size_t i = 0; i < cpp_names.size(); i++) {
expected_size += cpp_names[i].size() + 1;
}
if (*size < expected_size) {
*size = expected_size;
tracepoint(librbd, group_list_exit, -ERANGE);
return -ERANGE;
}
if (names == NULL) {
tracepoint(librbd, group_list_exit, -EINVAL);
return -EINVAL;
}
for (int i = 0; i < (int)cpp_names.size(); i++) {
const char* name = cpp_names[i].c_str();
tracepoint(librbd, group_list_entry, name);
strcpy(names, name);
names += strlen(names) + 1;
}
tracepoint(librbd, group_list_exit, (int)expected_size);
return (int)expected_size;
}
extern "C" int rbd_group_rename(rados_ioctx_t p, const char *src_name,
const char *dest_name)
{
librados::IoCtx io_ctx;
librados::IoCtx::from_rados_ioctx_t(p, io_ctx);
TracepointProvider::initialize<tracepoint_traits>(get_cct(io_ctx));
tracepoint(librbd, group_rename_enter, io_ctx.get_pool_name().c_str(),
io_ctx.get_id(), src_name, dest_name);
int r = librbd::api::Group<>::rename(io_ctx, src_name, dest_name);
tracepoint(librbd, group_rename_exit, r);
return r;
}
extern "C" int rbd_group_image_add(rados_ioctx_t group_p,
const char *group_name,
rados_ioctx_t image_p,
const char *image_name)
{
librados::IoCtx group_ioctx;
librados::IoCtx image_ioctx;
librados::IoCtx::from_rados_ioctx_t(group_p, group_ioctx);
librados::IoCtx::from_rados_ioctx_t(image_p, image_ioctx);
TracepointProvider::initialize<tracepoint_traits>(get_cct(group_ioctx));
tracepoint(librbd, group_image_add_enter, group_ioctx.get_pool_name().c_str(),
group_ioctx.get_id(), group_name, image_ioctx.get_pool_name().c_str(),
image_ioctx.get_id(), image_name);
int r = librbd::api::Group<>::image_add(group_ioctx, group_name, image_ioctx,
image_name);
tracepoint(librbd, group_image_add_exit, r);
return r;
}
extern "C" int rbd_group_image_remove(rados_ioctx_t group_p,
const char *group_name,
rados_ioctx_t image_p,
const char *image_name)
{
librados::IoCtx group_ioctx;
librados::IoCtx image_ioctx;
librados::IoCtx::from_rados_ioctx_t(group_p, group_ioctx);
librados::IoCtx::from_rados_ioctx_t(image_p, image_ioctx);
TracepointProvider::initialize<tracepoint_traits>(get_cct(group_ioctx));
tracepoint(librbd, group_image_remove_enter, group_ioctx.get_pool_name().c_str(),
group_ioctx.get_id(), group_name, image_ioctx.get_pool_name().c_str(),
image_ioctx.get_id(), image_name);
int r = librbd::api::Group<>::image_remove(group_ioctx, group_name,
image_ioctx, image_name);
tracepoint(librbd, group_image_remove_exit, r);
return r;
}
extern "C" int rbd_group_image_remove_by_id(rados_ioctx_t group_p,
const char *group_name,
rados_ioctx_t image_p,
const char *image_id)
{
librados::IoCtx group_ioctx;
librados::IoCtx image_ioctx;
librados::IoCtx::from_rados_ioctx_t(group_p, group_ioctx);
librados::IoCtx::from_rados_ioctx_t(image_p, image_ioctx);
TracepointProvider::initialize<tracepoint_traits>(get_cct(group_ioctx));
tracepoint(librbd, group_image_remove_by_id_enter,
group_ioctx.get_pool_name().c_str(),
group_ioctx.get_id(), group_name,
image_ioctx.get_pool_name().c_str(),
image_ioctx.get_id(), image_id);
int r = librbd::api::Group<>::image_remove_by_id(group_ioctx, group_name,
image_ioctx, image_id);
tracepoint(librbd, group_image_remove_by_id_exit, r);
return r;
}
extern "C" int rbd_group_image_list(rados_ioctx_t group_p,
const char *group_name,
rbd_group_image_info_t *images,
size_t group_image_info_size,
size_t *image_size)
{
librados::IoCtx group_ioctx;
librados::IoCtx::from_rados_ioctx_t(group_p, group_ioctx);
TracepointProvider::initialize<tracepoint_traits>(get_cct(group_ioctx));
tracepoint(librbd, group_image_list_enter,
group_ioctx.get_pool_name().c_str(),
group_ioctx.get_id(), group_name);
// FIPS zeroization audit 20191117: this memset is not security related.
memset(images, 0, sizeof(*images) * *image_size);
if (group_image_info_size != sizeof(rbd_group_image_info_t)) {
*image_size = 0;
tracepoint(librbd, group_image_list_exit, -ERANGE);
return -ERANGE;
}
std::vector<librbd::group_image_info_t> cpp_images;
int r = librbd::api::Group<>::image_list(group_ioctx, group_name,
&cpp_images);
if (r == -ENOENT) {
tracepoint(librbd, group_image_list_exit, 0);
*image_size = 0;
return 0;
}
if (r < 0) {
tracepoint(librbd, group_image_list_exit, r);
return r;
}
if (*image_size < cpp_images.size()) {
*image_size = cpp_images.size();
tracepoint(librbd, group_image_list_exit, -ERANGE);
return -ERANGE;
}
for (size_t i = 0; i < cpp_images.size(); ++i) {
group_image_status_cpp_to_c(cpp_images[i], &images[i]);
}
r = *image_size = cpp_images.size();
tracepoint(librbd, group_image_list_exit, r);
return r;
}
extern "C" int rbd_group_info_cleanup(rbd_group_info_t *group_info,
size_t group_info_size) {
if (group_info_size != sizeof(rbd_group_info_t)) {
return -ERANGE;
}
free(group_info->name);
return 0;
}
extern "C" int rbd_group_image_list_cleanup(rbd_group_image_info_t *images,
size_t group_image_info_size,
size_t len) {
if (group_image_info_size != sizeof(rbd_group_image_info_t)) {
return -ERANGE;
}
for (size_t i = 0; i < len; ++i) {
free(images[i].name);
}
return 0;
}
extern "C" int rbd_group_snap_create(rados_ioctx_t group_p,
const char *group_name,
const char *snap_name)
{
librados::IoCtx group_ioctx;
librados::IoCtx::from_rados_ioctx_t(group_p, group_ioctx);
TracepointProvider::initialize<tracepoint_traits>(get_cct(group_ioctx));
tracepoint(librbd, group_snap_create_enter,
group_ioctx.get_pool_name().c_str(),
group_ioctx.get_id(), group_name, snap_name);
int r = librbd::api::Group<>::snap_create(group_ioctx, group_name,
snap_name, 0);
tracepoint(librbd, group_snap_create_exit, r);
return r;
}
extern "C" int rbd_group_snap_create2(rados_ioctx_t group_p,
const char *group_name,
const char *snap_name,
uint32_t flags)
{
librados::IoCtx group_ioctx;
librados::IoCtx::from_rados_ioctx_t(group_p, group_ioctx);
TracepointProvider::initialize<tracepoint_traits>(get_cct(group_ioctx));
tracepoint(librbd, group_snap_create_enter,
group_ioctx.get_pool_name().c_str(),
group_ioctx.get_id(), group_name, snap_name);
int r = librbd::api::Group<>::snap_create(group_ioctx, group_name, snap_name,
flags);
tracepoint(librbd, group_snap_create_exit, r);
return r;
}
extern "C" int rbd_group_snap_remove(rados_ioctx_t group_p,
const char *group_name,
const char *snap_name)
{
librados::IoCtx group_ioctx;
librados::IoCtx::from_rados_ioctx_t(group_p, group_ioctx);
TracepointProvider::initialize<tracepoint_traits>(get_cct(group_ioctx));
tracepoint(librbd, group_snap_remove_enter,
group_ioctx.get_pool_name().c_str(),
group_ioctx.get_id(), group_name, snap_name);
int r = librbd::api::Group<>::snap_remove(group_ioctx, group_name, snap_name);
tracepoint(librbd, group_snap_remove_exit, r);
return r;
}
extern "C" int rbd_group_snap_rename(rados_ioctx_t group_p,
const char *group_name,
const char *old_snap_name,
const char *new_snap_name)
{
librados::IoCtx group_ioctx;
librados::IoCtx::from_rados_ioctx_t(group_p, group_ioctx);
TracepointProvider::initialize<tracepoint_traits>(get_cct(group_ioctx));
tracepoint(librbd, group_snap_rename_enter,
group_ioctx.get_pool_name().c_str(), group_ioctx.get_id(),
group_name, old_snap_name, new_snap_name);
int r = librbd::api::Group<>::snap_rename(group_ioctx, group_name,
old_snap_name, new_snap_name);
tracepoint(librbd, group_snap_list_exit, r);
return r;
}
extern "C" int rbd_group_snap_list(rados_ioctx_t group_p,
const char *group_name,
rbd_group_snap_info_t *snaps,
size_t group_snap_info_size,
size_t *snaps_size)
{
librados::IoCtx group_ioctx;
librados::IoCtx::from_rados_ioctx_t(group_p, group_ioctx);
TracepointProvider::initialize<tracepoint_traits>(get_cct(group_ioctx));
tracepoint(librbd, group_snap_list_enter, group_ioctx.get_pool_name().c_str(),
group_ioctx.get_id(), group_name);
// FIPS zeroization audit 20191117: this memset is not security related.
memset(snaps, 0, sizeof(*snaps) * *snaps_size);
if (group_snap_info_size != sizeof(rbd_group_snap_info_t)) {
*snaps_size = 0;
tracepoint(librbd, group_snap_list_exit, -ERANGE);
return -ERANGE;
}
std::vector<librbd::group_snap_info_t> cpp_snaps;
int r = librbd::api::Group<>::snap_list(group_ioctx, group_name, &cpp_snaps);
if (r == -ENOENT) {
*snaps_size = 0;
tracepoint(librbd, group_snap_list_exit, 0);
return 0;
}
if (r < 0) {
tracepoint(librbd, group_snap_list_exit, r);
return r;
}
if (*snaps_size < cpp_snaps.size()) {
*snaps_size = cpp_snaps.size();
tracepoint(librbd, group_snap_list_exit, -ERANGE);
return -ERANGE;
}
for (size_t i = 0; i < cpp_snaps.size(); ++i) {
group_snap_info_cpp_to_c(cpp_snaps[i], &snaps[i]);
}
r = *snaps_size = cpp_snaps.size();
tracepoint(librbd, group_snap_list_exit, r);
return r;
}
extern "C" int rbd_group_snap_list_cleanup(rbd_group_snap_info_t *snaps,
size_t group_snap_info_size,
size_t len) {
if (group_snap_info_size != sizeof(rbd_group_snap_info_t)) {
return -ERANGE;
}
for (size_t i = 0; i < len; ++i) {
free(snaps[i].name);
}
return 0;
}
extern "C" int rbd_group_snap_rollback(rados_ioctx_t group_p,
const char *group_name,
const char *snap_name)
{
librados::IoCtx group_ioctx;
librados::IoCtx::from_rados_ioctx_t(group_p, group_ioctx);
TracepointProvider::initialize<tracepoint_traits>(get_cct(group_ioctx));
tracepoint(librbd, group_snap_rollback_enter,
group_ioctx.get_pool_name().c_str(),
group_ioctx.get_id(), group_name, snap_name);
librbd::NoOpProgressContext prog_ctx;
int r = librbd::api::Group<>::snap_rollback(group_ioctx, group_name,
snap_name, prog_ctx);
tracepoint(librbd, group_snap_rollback_exit, r);
return r;
}
extern "C" int rbd_group_snap_rollback_with_progress(rados_ioctx_t group_p,
const char *group_name,
const char *snap_name,
librbd_progress_fn_t cb,
void *cbdata)
{
librados::IoCtx group_ioctx;
librados::IoCtx::from_rados_ioctx_t(group_p, group_ioctx);
TracepointProvider::initialize<tracepoint_traits>(get_cct(group_ioctx));
tracepoint(librbd, group_snap_rollback_enter,
group_ioctx.get_pool_name().c_str(),
group_ioctx.get_id(), group_name, snap_name);
librbd::CProgressContext prog_ctx(cb, cbdata);
int r = librbd::api::Group<>::snap_rollback(group_ioctx, group_name,
snap_name, prog_ctx);
tracepoint(librbd, group_snap_rollback_exit, r);
return r;
}
extern "C" int rbd_snap_get_namespace_type(rbd_image_t image,
uint64_t snap_id,
rbd_snap_namespace_type_t *namespace_type) {
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
tracepoint(librbd, snap_get_namespace_type_enter, ictx, ictx->name.c_str());
int r = librbd::api::Snapshot<>::get_namespace_type(ictx, snap_id,
namespace_type);
tracepoint(librbd, snap_get_namespace_type_exit, r);
return r;
}
extern "C" int rbd_snap_get_group_namespace(rbd_image_t image, uint64_t snap_id,
rbd_snap_group_namespace_t *group_snap,
size_t snap_group_namespace_size) {
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
tracepoint(librbd, snap_get_group_namespace_enter, ictx,
ictx->name.c_str());
if (snap_group_namespace_size != sizeof(rbd_snap_group_namespace_t)) {
tracepoint(librbd, snap_get_group_namespace_exit, -ERANGE);
return -ERANGE;
}
librbd::snap_group_namespace_t group_namespace;
int r = librbd::api::Snapshot<>::get_group_namespace(ictx, snap_id,
&group_namespace);
if (r >= 0) {
group_snap->group_pool = group_namespace.group_pool;
group_snap->group_name = strdup(group_namespace.group_name.c_str());
group_snap->group_snap_name =
strdup(group_namespace.group_snap_name.c_str());
}
tracepoint(librbd, snap_get_group_namespace_exit, r);
return r;
}
extern "C" int rbd_snap_group_namespace_cleanup(rbd_snap_group_namespace_t *group_snap,
size_t snap_group_namespace_size) {
if (snap_group_namespace_size != sizeof(rbd_snap_group_namespace_t)) {
return -ERANGE;
}
free(group_snap->group_name);
free(group_snap->group_snap_name);
return 0;
}
extern "C" int rbd_snap_get_trash_namespace(rbd_image_t image, uint64_t snap_id,
char *original_name,
size_t max_length) {
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
std::string cpp_original_name;
int r = librbd::api::Snapshot<>::get_trash_namespace(ictx, snap_id,
&cpp_original_name);
if (r < 0) {
return r;
}
if (cpp_original_name.length() >= max_length) {
return -ERANGE;
}
strcpy(original_name, cpp_original_name.c_str());
return 0;
}
extern "C" int rbd_snap_get_mirror_namespace(
rbd_image_t image, uint64_t snap_id,
rbd_snap_mirror_namespace_t *mirror_snap,
size_t mirror_snap_size) {
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
if (mirror_snap_size != sizeof(rbd_snap_mirror_namespace_t)) {
return -ERANGE;
}
librbd::snap_mirror_namespace_t mirror_namespace;
int r = librbd::api::Snapshot<>::get_mirror_namespace(
ictx, snap_id, &mirror_namespace);
if (r < 0) {
return r;
}
mirror_snap->state = mirror_namespace.state;
mirror_snap->primary_mirror_uuid =
strdup(mirror_namespace.primary_mirror_uuid.c_str());
mirror_snap->primary_snap_id = mirror_namespace.primary_snap_id;
mirror_snap->mirror_peer_uuids_count =
mirror_namespace.mirror_peer_uuids.size();
size_t len = 0;
for (auto &peer : mirror_namespace.mirror_peer_uuids) {
len += peer.size() + 1;
}
mirror_snap->mirror_peer_uuids = (char *)malloc(len);
char *p = mirror_snap->mirror_peer_uuids;
for (auto &peer : mirror_namespace.mirror_peer_uuids) {
strncpy(p, peer.c_str(), peer.size() + 1);
p += peer.size() + 1;
}
mirror_snap->complete = mirror_namespace.complete;
mirror_snap->last_copied_object_number =
mirror_namespace.last_copied_object_number;
return 0;
}
extern "C" int rbd_snap_mirror_namespace_cleanup(
rbd_snap_mirror_namespace_t *mirror_snap,
size_t mirror_snap_size) {
if (mirror_snap_size != sizeof(rbd_snap_mirror_namespace_t)) {
return -ERANGE;
}
free(mirror_snap->primary_mirror_uuid);
free(mirror_snap->mirror_peer_uuids);
return 0;
}
extern "C" int rbd_watchers_list(rbd_image_t image,
rbd_image_watcher_t *watchers,
size_t *max_watchers) {
std::list<librbd::image_watcher_t> watcher_list;
librbd::ImageCtx *ictx = (librbd::ImageCtx*)image;
tracepoint(librbd, list_watchers_enter, ictx, ictx->name.c_str(), ictx->snap_name.c_str(), ictx->read_only);
// FIPS zeroization audit 20191117: this memset is not security related.
memset(watchers, 0, sizeof(*watchers) * *max_watchers);
int r = librbd::list_watchers(ictx, watcher_list);
if (r < 0) {
tracepoint(librbd, list_watchers_exit, r, 0);
return r;
}
if (watcher_list.size() > *max_watchers) {
*max_watchers = watcher_list.size();
tracepoint(librbd, list_watchers_exit, -ERANGE, watcher_list.size());
return -ERANGE;
}
*max_watchers = 0;
for (auto &watcher : watcher_list) {
tracepoint(librbd, list_watchers_entry, watcher.addr.c_str(), watcher.id, watcher.cookie);
watchers[*max_watchers].addr = strdup(watcher.addr.c_str());
watchers[*max_watchers].id = watcher.id;
watchers[*max_watchers].cookie = watcher.cookie;
*max_watchers += 1;
}
tracepoint(librbd, list_watchers_exit, r, watcher_list.size());
return 0;
}
extern "C" void rbd_watchers_list_cleanup(rbd_image_watcher_t *watchers,
size_t num_watchers) {
for (size_t i = 0; i < num_watchers; ++i) {
free(watchers[i].addr);
}
}
extern "C" int rbd_config_image_list(rbd_image_t image,
rbd_config_option_t *options,
int *max_options) {
librbd::ImageCtx *ictx = (librbd::ImageCtx*)image;
std::vector<librbd::config_option_t> option_vector;
int r = librbd::api::Config<>::list(ictx, &option_vector);
if (r < 0) {
return r;
}
if (*max_options < static_cast<int>(option_vector.size())) {
*max_options = static_cast<int>(option_vector.size());
return -ERANGE;
}
for (int i = 0; i < static_cast<int>(option_vector.size()); ++i) {
config_option_cpp_to_c(option_vector[i], &options[i]);
}
*max_options = static_cast<int>(option_vector.size());
return 0;
}
extern "C" void rbd_config_image_list_cleanup(rbd_config_option_t *options,
int max_options) {
for (int i = 0; i < max_options; ++i) {
config_option_cleanup(options[i]);
}
}
extern "C" int rbd_quiesce_watch(rbd_image_t image,
rbd_update_callback_t quiesce_cb,
rbd_update_callback_t unquiesce_cb,
void *arg, uint64_t *handle)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
auto wctx = new C_QuiesceWatchCB(quiesce_cb, unquiesce_cb, arg);
int r = ictx->state->register_quiesce_watcher(wctx, &wctx->handle);
if (r < 0) {
delete wctx;
return r;
}
*handle = reinterpret_cast<uint64_t>(wctx);
return 0;
}
extern "C" int rbd_quiesce_unwatch(rbd_image_t image, uint64_t handle)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
auto *wctx = reinterpret_cast<C_QuiesceWatchCB *>(handle);
int r = ictx->state->unregister_quiesce_watcher(wctx->handle);
delete wctx;
return r;
}
extern "C" void rbd_quiesce_complete(rbd_image_t image, uint64_t handle, int r)
{
librbd::ImageCtx *ictx = (librbd::ImageCtx *)image;
ictx->state->quiesce_complete(handle, r);
}
| 263,251 | 34.288472 | 205 |
cc
|
null |
ceph-main/src/librbd/api/Config.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/api/Config.h"
#include "common/dout.h"
#include "common/errno.h"
#include "common/Cond.h"
#include "librbd/ImageCtx.h"
#include "librbd/Utils.h"
#include "librbd/api/PoolMetadata.h"
#include "librbd/image/GetMetadataRequest.h"
#include <algorithm>
#include <boost/algorithm/string/predicate.hpp>
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::Config: " << __func__ << ": "
namespace librbd {
namespace api {
namespace {
const uint32_t MAX_KEYS = 64;
typedef std::map<std::string_view, std::pair<std::string, config_source_t>> Parent;
static std::set<std::string_view> EXCLUDE_OPTIONS {
"rbd_auto_exclusive_lock_until_manual_request",
"rbd_default_format",
"rbd_default_pool",
"rbd_discard_on_zeroed_write_same",
"rbd_op_thread_timeout",
"rbd_op_threads",
"rbd_tracing",
"rbd_validate_names",
"rbd_validate_pool",
"rbd_mirror_pool_replayers_refresh_interval",
"rbd_config_pool_override_update_timestamp"
};
static std::set<std::string_view> EXCLUDE_IMAGE_OPTIONS {
"rbd_default_clone_format",
"rbd_default_data_pool",
"rbd_default_features",
"rbd_default_format",
"rbd_default_order",
"rbd_default_stripe_count",
"rbd_default_stripe_unit",
"rbd_journal_order",
"rbd_journal_pool",
"rbd_journal_splay_width"
};
struct Options : Parent {
librados::IoCtx m_io_ctx;
Options(librados::IoCtx& io_ctx, bool image_apply_only_options) {
m_io_ctx.dup(io_ctx);
m_io_ctx.set_namespace("");
CephContext *cct = reinterpret_cast<CephContext *>(m_io_ctx.cct());
const std::string rbd_key_prefix("rbd_");
const std::string rbd_mirror_key_prefix("rbd_mirror_");
auto& schema = cct->_conf.get_schema();
for (auto& pair : schema) {
if (!boost::starts_with(pair.first, rbd_key_prefix)) {
continue;
} else if (EXCLUDE_OPTIONS.count(pair.first) != 0) {
continue;
} else if (image_apply_only_options &&
EXCLUDE_IMAGE_OPTIONS.count(pair.first) != 0) {
continue;
} else if (image_apply_only_options &&
boost::starts_with(pair.first, rbd_mirror_key_prefix)) {
continue;
}
insert({pair.first, {}});
}
}
int init() {
CephContext *cct = (CephContext *)m_io_ctx.cct();
for (auto& [k,v] : *this) {
int r = cct->_conf.get_val(k, &v.first);
ceph_assert(r == 0);
v.second = RBD_CONFIG_SOURCE_CONFIG;
}
std::string last_key = ImageCtx::METADATA_CONF_PREFIX;
bool more_results = true;
while (more_results) {
std::map<std::string, bufferlist> pairs;
int r = librbd::api::PoolMetadata<>::list(m_io_ctx, last_key, MAX_KEYS,
&pairs);
if (r < 0) {
return r;
}
if (pairs.empty()) {
break;
}
more_results = (pairs.size() == MAX_KEYS);
last_key = pairs.rbegin()->first;
for (auto kv : pairs) {
std::string key;
if (!util::is_metadata_config_override(kv.first, &key)) {
more_results = false;
break;
}
auto it = find(key);
if (it != end()) {
it->second = {{kv.second.c_str(), kv.second.length()},
RBD_CONFIG_SOURCE_POOL};
}
}
}
return 0;
}
};
} // anonymous namespace
template <typename I>
bool Config<I>::is_option_name(librados::IoCtx& io_ctx,
const std::string &name) {
Options opts(io_ctx, false);
return (opts.find(name) != opts.end());
}
template <typename I>
int Config<I>::list(librados::IoCtx& io_ctx,
std::vector<config_option_t> *options) {
Options opts(io_ctx, false);
int r = opts.init();
if (r < 0) {
return r;
}
for (auto& [k,v] : opts) {
options->push_back({std::string{k}, v.first, v.second});
}
return 0;
}
template <typename I>
bool Config<I>::is_option_name(I *image_ctx, const std::string &name) {
Options opts(image_ctx->md_ctx, true);
return (opts.find(name) != opts.end());
}
template <typename I>
int Config<I>::list(I *image_ctx, std::vector<config_option_t> *options) {
CephContext *cct = image_ctx->cct;
Options opts(image_ctx->md_ctx, true);
int r = opts.init();
if (r < 0) {
return r;
}
std::map<std::string, bufferlist> pairs;
C_SaferCond ctx;
auto req = image::GetMetadataRequest<I>::create(
image_ctx->md_ctx, image_ctx->header_oid, true,
ImageCtx::METADATA_CONF_PREFIX, ImageCtx::METADATA_CONF_PREFIX, 0U, &pairs,
&ctx);
req->send();
r = ctx.wait();
if (r < 0) {
lderr(cct) << "failed reading image metadata: " << cpp_strerror(r)
<< dendl;
return r;
}
for (auto kv : pairs) {
std::string key;
if (!util::is_metadata_config_override(kv.first, &key)) {
break;
}
auto it = opts.find(key);
if (it != opts.end()) {
it->second = {{kv.second.c_str(), kv.second.length()},
RBD_CONFIG_SOURCE_IMAGE};
}
}
for (auto& [k,v] : opts) {
options->push_back({std::string{k}, v.first, v.second});
}
return 0;
}
template <typename I>
void Config<I>::apply_pool_overrides(librados::IoCtx& io_ctx,
ConfigProxy* config) {
CephContext *cct = reinterpret_cast<CephContext *>(io_ctx.cct());
Options opts(io_ctx, false);
int r = opts.init();
if (r < 0) {
lderr(cct) << "failed to read pool config overrides: " << cpp_strerror(r)
<< dendl;
return;
}
for (auto& [k,v] : opts) {
if (v.second == RBD_CONFIG_SOURCE_POOL) {
r = config->set_val(k, v.first);
if (r < 0) {
lderr(cct) << "failed to override pool config " << k << "="
<< v.first << ": " << cpp_strerror(r) << dendl;
}
}
}
}
} // namespace api
} // namespace librbd
template class librbd::api::Config<librbd::ImageCtx>;
| 6,087 | 25.017094 | 83 |
cc
|
null |
ceph-main/src/librbd/api/Config.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_API_CONFIG_H
#define CEPH_LIBRBD_API_CONFIG_H
#include "common/config_fwd.h"
#include "include/common_fwd.h"
#include "include/rbd/librbd.hpp"
#include "include/rados/librados_fwd.hpp"
namespace librbd {
class ImageCtx;
namespace api {
template <typename ImageCtxT = librbd::ImageCtx>
class Config {
public:
static bool is_option_name(librados::IoCtx& io_ctx, const std::string &name);
static int list(librados::IoCtx& io_ctx,
std::vector<config_option_t> *options);
static bool is_option_name(ImageCtxT *image_ctx, const std::string &name);
static int list(ImageCtxT *image_ctx, std::vector<config_option_t> *options);
static void apply_pool_overrides(librados::IoCtx& io_ctx,
ConfigProxy* config);
};
} // namespace api
} // namespace librbd
extern template class librbd::api::Config<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_API_CONFIG_H
| 1,032 | 26.184211 | 79 |
h
|
null |
ceph-main/src/librbd/api/DiffIterate.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/api/DiffIterate.h"
#include "librbd/ImageCtx.h"
#include "librbd/ImageState.h"
#include "librbd/ObjectMap.h"
#include "librbd/Utils.h"
#include "librbd/internal.h"
#include "librbd/io/AioCompletion.h"
#include "librbd/io/ImageDispatchSpec.h"
#include "librbd/object_map/DiffRequest.h"
#include "include/rados/librados.hpp"
#include "include/interval_set.h"
#include "common/errno.h"
#include "common/Cond.h"
#include "common/Throttle.h"
#include "osdc/Striper.h"
#include <boost/tuple/tuple.hpp>
#include <list>
#include <map>
#include <vector>
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::DiffIterate: "
namespace librbd {
namespace api {
namespace {
struct DiffContext {
DiffIterate<>::Callback callback;
void *callback_arg;
bool whole_object;
bool include_parent;
uint64_t from_snap_id;
uint64_t end_snap_id;
OrderedThrottle throttle;
template <typename I>
DiffContext(I &image_ctx, DiffIterate<>::Callback callback,
void *callback_arg, bool _whole_object, bool _include_parent,
uint64_t _from_snap_id, uint64_t _end_snap_id)
: callback(callback), callback_arg(callback_arg),
whole_object(_whole_object), include_parent(_include_parent),
from_snap_id(_from_snap_id), end_snap_id(_end_snap_id),
throttle(image_ctx.config.template get_val<uint64_t>("rbd_concurrent_management_ops"), true) {
}
};
template <typename I>
class C_DiffObject : public Context {
public:
C_DiffObject(I &image_ctx, DiffContext &diff_context, uint64_t image_offset,
uint64_t image_length)
: m_image_ctx(image_ctx), m_cct(image_ctx.cct),
m_diff_context(diff_context), m_image_offset(image_offset),
m_image_length(image_length) {
}
void send() {
Context* ctx = m_diff_context.throttle.start_op(this);
auto aio_comp = io::AioCompletion::create_and_start(
ctx, util::get_image_ctx(&m_image_ctx), io::AIO_TYPE_GENERIC);
int list_snaps_flags = 0;
if (!m_diff_context.include_parent || m_diff_context.from_snap_id != 0) {
list_snaps_flags |= io::LIST_SNAPS_FLAG_DISABLE_LIST_FROM_PARENT;
}
if (m_diff_context.whole_object) {
list_snaps_flags |= io::LIST_SNAPS_FLAG_WHOLE_OBJECT;
}
auto req = io::ImageDispatchSpec::create_list_snaps(
m_image_ctx, io::IMAGE_DISPATCH_LAYER_INTERNAL_START,
aio_comp, {{m_image_offset, m_image_length}}, io::ImageArea::DATA,
{m_diff_context.from_snap_id, m_diff_context.end_snap_id},
list_snaps_flags, &m_snapshot_delta, {});
req->send();
}
protected:
typedef boost::tuple<uint64_t, size_t, bool> Diff;
typedef std::list<Diff> Diffs;
void finish(int r) override {
CephContext *cct = m_cct;
if (r < 0) {
ldout(cct, 20) << "list_snaps failed: " << m_image_offset << "~"
<< m_image_length << ": " << cpp_strerror(r) << dendl;
}
Diffs diffs;
ldout(cct, 20) << "image extent " << m_image_offset << "~"
<< m_image_length << ": list_snaps complete" << dendl;
compute_diffs(&diffs);
for (Diffs::const_iterator d = diffs.begin(); d != diffs.end(); ++d) {
r = m_diff_context.callback(d->get<0>(), d->get<1>(), d->get<2>(),
m_diff_context.callback_arg);
if (r < 0) {
break;
}
}
m_diff_context.throttle.end_op(r);
}
private:
I& m_image_ctx;
CephContext *m_cct;
DiffContext &m_diff_context;
uint64_t m_image_offset;
uint64_t m_image_length;
io::SnapshotDelta m_snapshot_delta;
void compute_diffs(Diffs *diffs) {
CephContext *cct = m_cct;
// merge per-snapshot deltas into an aggregate
io::SparseExtents aggregate_snapshot_extents;
for (auto& [key, snapshot_extents] : m_snapshot_delta) {
for (auto& snapshot_extent : snapshot_extents) {
auto state = snapshot_extent.get_val().state;
// ignore DNE object (and parent)
if ((state == io::SPARSE_EXTENT_STATE_DNE) ||
(key == io::INITIAL_WRITE_READ_SNAP_IDS &&
state == io::SPARSE_EXTENT_STATE_ZEROED)) {
continue;
}
aggregate_snapshot_extents.insert(
snapshot_extent.get_off(), snapshot_extent.get_len(),
{state, snapshot_extent.get_len()});
}
}
// build delta callback set
for (auto& snapshot_extent : aggregate_snapshot_extents) {
ldout(cct, 20) << "off=" << snapshot_extent.get_off() << ", "
<< "len=" << snapshot_extent.get_len() << ", "
<< "state=" << snapshot_extent.get_val().state << dendl;
diffs->emplace_back(
snapshot_extent.get_off(), snapshot_extent.get_len(),
snapshot_extent.get_val().state == io::SPARSE_EXTENT_STATE_DATA);
}
}
};
int simple_diff_cb(uint64_t off, size_t len, int exists, void *arg) {
// it's possible for a discard to create a hole in the parent image -- ignore
if (exists) {
interval_set<uint64_t> *diff = static_cast<interval_set<uint64_t> *>(arg);
diff->insert(off, len);
}
return 0;
}
} // anonymous namespace
template <typename I>
int DiffIterate<I>::diff_iterate(I *ictx,
const cls::rbd::SnapshotNamespace& from_snap_namespace,
const char *fromsnapname,
uint64_t off, uint64_t len,
bool include_parent, bool whole_object,
int (*cb)(uint64_t, size_t, int, void *),
void *arg)
{
ldout(ictx->cct, 20) << "diff_iterate " << ictx << " off = " << off
<< " len = " << len << dendl;
if (!ictx->data_ctx.is_valid()) {
return -ENODEV;
}
// ensure previous writes are visible to listsnaps
C_SaferCond flush_ctx;
{
std::shared_lock owner_locker{ictx->owner_lock};
auto aio_comp = io::AioCompletion::create_and_start(&flush_ctx, ictx,
io::AIO_TYPE_FLUSH);
auto req = io::ImageDispatchSpec::create_flush(
*ictx, io::IMAGE_DISPATCH_LAYER_INTERNAL_START,
aio_comp, io::FLUSH_SOURCE_INTERNAL, {});
req->send();
}
int r = flush_ctx.wait();
if (r < 0) {
return r;
}
r = ictx->state->refresh_if_required();
if (r < 0) {
return r;
}
ictx->image_lock.lock_shared();
r = clip_io(ictx, off, &len, io::ImageArea::DATA);
ictx->image_lock.unlock_shared();
if (r < 0) {
return r;
}
DiffIterate command(*ictx, from_snap_namespace, fromsnapname, off, len,
include_parent, whole_object, cb, arg);
r = command.execute();
return r;
}
template <typename I>
int DiffIterate<I>::execute() {
CephContext* cct = m_image_ctx.cct;
ceph_assert(m_image_ctx.data_ctx.is_valid());
librados::snap_t from_snap_id = 0;
librados::snap_t end_snap_id;
uint64_t from_size = 0;
uint64_t end_size;
{
std::shared_lock image_locker{m_image_ctx.image_lock};
if (m_from_snap_name) {
from_snap_id = m_image_ctx.get_snap_id(m_from_snap_namespace,
m_from_snap_name);
from_size = m_image_ctx.get_image_size(from_snap_id);
}
end_snap_id = m_image_ctx.snap_id;
end_size = m_image_ctx.get_image_size(end_snap_id);
}
if (from_snap_id == CEPH_NOSNAP) {
return -ENOENT;
}
if (from_snap_id == end_snap_id) {
// no diff.
return 0;
}
if (from_snap_id >= end_snap_id) {
return -EINVAL;
}
int r;
bool fast_diff_enabled = false;
BitVector<2> object_diff_state;
interval_set<uint64_t> parent_diff;
if (m_whole_object) {
C_SaferCond ctx;
auto req = object_map::DiffRequest<I>::create(&m_image_ctx, from_snap_id,
end_snap_id,
&object_diff_state, &ctx);
req->send();
r = ctx.wait();
if (r < 0) {
ldout(cct, 5) << "fast diff disabled" << dendl;
} else {
ldout(cct, 5) << "fast diff enabled" << dendl;
fast_diff_enabled = true;
// check parent overlap only if we are comparing to the beginning of time
if (m_include_parent && from_snap_id == 0) {
std::shared_lock image_locker{m_image_ctx.image_lock};
uint64_t raw_overlap = 0;
m_image_ctx.get_parent_overlap(m_image_ctx.snap_id, &raw_overlap);
auto overlap = m_image_ctx.reduce_parent_overlap(raw_overlap, false);
if (overlap.first > 0 && overlap.second == io::ImageArea::DATA) {
ldout(cct, 10) << " first getting parent diff" << dendl;
DiffIterate diff_parent(*m_image_ctx.parent, {}, nullptr, 0,
overlap.first, true, true, &simple_diff_cb,
&parent_diff);
r = diff_parent.execute();
if (r < 0) {
return r;
}
}
}
}
}
ldout(cct, 5) << "diff_iterate from " << from_snap_id << " to "
<< end_snap_id << " size from " << from_size
<< " to " << end_size << dendl;
DiffContext diff_context(m_image_ctx, m_callback, m_callback_arg,
m_whole_object, m_include_parent, from_snap_id,
end_snap_id);
uint64_t period = m_image_ctx.get_stripe_period();
uint64_t off = m_offset;
uint64_t left = m_length;
while (left > 0) {
uint64_t period_off = off - (off % period);
uint64_t read_len = std::min(period_off + period - off, left);
if (fast_diff_enabled) {
// map to extents
std::map<object_t,std::vector<ObjectExtent> > object_extents;
Striper::file_to_extents(cct, m_image_ctx.format_string,
&m_image_ctx.layout, off, read_len, 0,
object_extents, 0);
// get diff info for each object and merge adjacent stripe units
// into an aggregate (this also sorts them)
io::SparseExtents aggregate_sparse_extents;
for (auto& [object, extents] : object_extents) {
const uint64_t object_no = extents.front().objectno;
uint8_t diff_state = object_diff_state[object_no];
ldout(cct, 20) << "object " << object << ": diff_state="
<< (int)diff_state << dendl;
if (diff_state == object_map::DIFF_STATE_HOLE &&
from_snap_id == 0 && !parent_diff.empty()) {
// no data in child object -- report parent diff instead
for (auto& oe : extents) {
for (auto& be : oe.buffer_extents) {
interval_set<uint64_t> o;
o.insert(off + be.first, be.second);
o.intersection_of(parent_diff);
ldout(cct, 20) << " reporting parent overlap " << o << dendl;
for (auto e = o.begin(); e != o.end(); ++e) {
aggregate_sparse_extents.insert(e.get_start(), e.get_len(),
{io::SPARSE_EXTENT_STATE_DATA,
e.get_len()});
}
}
}
} else if (diff_state == object_map::DIFF_STATE_HOLE_UPDATED ||
diff_state == object_map::DIFF_STATE_DATA_UPDATED) {
auto state = (diff_state == object_map::DIFF_STATE_HOLE_UPDATED ?
io::SPARSE_EXTENT_STATE_ZEROED : io::SPARSE_EXTENT_STATE_DATA);
for (auto& oe : extents) {
for (auto& be : oe.buffer_extents) {
aggregate_sparse_extents.insert(off + be.first, be.second,
{state, be.second});
}
}
}
}
for (const auto& se : aggregate_sparse_extents) {
ldout(cct, 20) << "off=" << se.get_off() << ", len=" << se.get_len()
<< ", state=" << se.get_val().state << dendl;
r = m_callback(se.get_off(), se.get_len(),
se.get_val().state == io::SPARSE_EXTENT_STATE_DATA,
m_callback_arg);
if (r < 0) {
return r;
}
}
} else {
auto diff_object = new C_DiffObject<I>(m_image_ctx, diff_context, off,
read_len);
diff_object->send();
if (diff_context.throttle.pending_error()) {
r = diff_context.throttle.wait_for_ret();
return r;
}
}
left -= read_len;
off += read_len;
}
r = diff_context.throttle.wait_for_ret();
if (r < 0) {
return r;
}
return 0;
}
} // namespace api
} // namespace librbd
template class librbd::api::DiffIterate<librbd::ImageCtx>;
| 12,738 | 32.612137 | 100 |
cc
|
null |
ceph-main/src/librbd/api/DiffIterate.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_API_DIFF_ITERATE_H
#define CEPH_LIBRBD_API_DIFF_ITERATE_H
#include "include/int_types.h"
#include "common/bit_vector.hpp"
#include "cls/rbd/cls_rbd_types.h"
namespace librbd {
class ImageCtx;
namespace api {
template <typename ImageCtxT = librbd::ImageCtx>
class DiffIterate {
public:
typedef int (*Callback)(uint64_t, size_t, int, void *);
static int diff_iterate(ImageCtxT *ictx,
const cls::rbd::SnapshotNamespace& from_snap_namespace,
const char *fromsnapname,
uint64_t off, uint64_t len, bool include_parent,
bool whole_object,
int (*cb)(uint64_t, size_t, int, void *),
void *arg);
private:
ImageCtxT &m_image_ctx;
cls::rbd::SnapshotNamespace m_from_snap_namespace;
const char* m_from_snap_name;
uint64_t m_offset;
uint64_t m_length;
bool m_include_parent;
bool m_whole_object;
Callback m_callback;
void *m_callback_arg;
DiffIterate(ImageCtxT &image_ctx,
const cls::rbd::SnapshotNamespace& from_snap_namespace,
const char *from_snap_name, uint64_t off, uint64_t len,
bool include_parent, bool whole_object, Callback callback,
void *callback_arg)
: m_image_ctx(image_ctx), m_from_snap_namespace(from_snap_namespace),
m_from_snap_name(from_snap_name), m_offset(off),
m_length(len), m_include_parent(include_parent),
m_whole_object(whole_object), m_callback(callback),
m_callback_arg(callback_arg)
{
}
int execute();
int diff_object_map(uint64_t from_snap_id, uint64_t to_snap_id,
BitVector<2>* object_diff_state);
};
} // namespace api
} // namespace librbd
extern template class librbd::api::DiffIterate<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_API_DIFF_ITERATE_H
| 1,897 | 27.328358 | 74 |
h
|
null |
ceph-main/src/librbd/api/Group.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "common/Cond.h"
#include "common/errno.h"
#include "librbd/ExclusiveLock.h"
#include "librbd/api/Group.h"
#include "librbd/ImageCtx.h"
#include "librbd/ImageState.h"
#include "librbd/ImageWatcher.h"
#include "librbd/Operations.h"
#include "librbd/Utils.h"
#include "librbd/internal.h"
#include "librbd/io/AioCompletion.h"
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::api::Group: " << __func__ << ": "
using std::map;
using std::pair;
using std::set;
using std::string;
using std::vector;
// list binds to list() here, so std::list is explicitly used below
using ceph::bufferlist;
using librados::snap_t;
using librados::IoCtx;
using librados::Rados;
namespace librbd {
namespace api {
namespace {
template <typename I>
snap_t get_group_snap_id(I* ictx,
const cls::rbd::SnapshotNamespace& in_snap_namespace) {
ceph_assert(ceph_mutex_is_locked(ictx->image_lock));
auto it = ictx->snap_ids.lower_bound({cls::rbd::GroupSnapshotNamespace{},
""});
for (; it != ictx->snap_ids.end(); ++it) {
if (it->first.first == in_snap_namespace) {
return it->second;
} else if (!std::holds_alternative<cls::rbd::GroupSnapshotNamespace>(
it->first.first)) {
break;
}
}
return CEPH_NOSNAP;
}
string generate_uuid(librados::IoCtx& io_ctx)
{
Rados rados(io_ctx);
uint64_t bid = rados.get_instance_id();
uint32_t extra = rand() % 0xFFFFFFFF;
std::ostringstream bid_ss;
bid_ss << std::hex << bid << std::hex << extra;
return bid_ss.str();
}
int group_snap_list(librados::IoCtx& group_ioctx, const char *group_name,
std::vector<cls::rbd::GroupSnapshot> *cls_snaps)
{
CephContext *cct = (CephContext *)group_ioctx.cct();
string group_id;
vector<string> ind_snap_names;
int r = cls_client::dir_get_id(&group_ioctx, RBD_GROUP_DIRECTORY,
group_name, &group_id);
if (r < 0) {
lderr(cct) << "error reading group id object: "
<< cpp_strerror(r)
<< dendl;
return r;
}
string group_header_oid = util::group_header_name(group_id);
const int max_read = 1024;
cls::rbd::GroupSnapshot snap_last;
for (;;) {
vector<cls::rbd::GroupSnapshot> snaps_page;
r = cls_client::group_snap_list(&group_ioctx, group_header_oid,
snap_last, max_read, &snaps_page);
if (r < 0) {
lderr(cct) << "error reading snap list from group: "
<< cpp_strerror(-r) << dendl;
return r;
}
cls_snaps->insert(cls_snaps->end(), snaps_page.begin(), snaps_page.end());
if (snaps_page.size() < max_read) {
break;
}
snap_last = *snaps_page.rbegin();
}
return 0;
}
std::string calc_ind_image_snap_name(uint64_t pool_id,
const std::string &group_id,
const std::string &snap_id)
{
std::stringstream ind_snap_name_stream;
ind_snap_name_stream << ".group." << std::hex << pool_id << "_"
<< group_id << "_" << snap_id;
return ind_snap_name_stream.str();
}
int group_image_list(librados::IoCtx& group_ioctx, const char *group_name,
std::vector<cls::rbd::GroupImageStatus> *image_ids)
{
CephContext *cct = (CephContext *)group_ioctx.cct();
string group_id;
int r = cls_client::dir_get_id(&group_ioctx, RBD_GROUP_DIRECTORY,
group_name, &group_id);
if (r < 0) {
lderr(cct) << "error reading group id object: "
<< cpp_strerror(r)
<< dendl;
return r;
}
string group_header_oid = util::group_header_name(group_id);
ldout(cct, 20) << "listing images in group name "
<< group_name << " group id " << group_header_oid << dendl;
image_ids->clear();
const int max_read = 1024;
cls::rbd::GroupImageSpec start_last;
do {
std::vector<cls::rbd::GroupImageStatus> image_ids_page;
r = cls_client::group_image_list(&group_ioctx, group_header_oid,
start_last, max_read, &image_ids_page);
if (r < 0) {
lderr(cct) << "error reading image list from group: "
<< cpp_strerror(-r) << dendl;
return r;
}
image_ids->insert(image_ids->end(),
image_ids_page.begin(), image_ids_page.end());
if (image_ids_page.size() > 0)
start_last = image_ids_page.rbegin()->spec;
r = image_ids_page.size();
} while (r == max_read);
return 0;
}
int group_image_remove(librados::IoCtx& group_ioctx, string group_id,
librados::IoCtx& image_ioctx, string image_id)
{
CephContext *cct = (CephContext *)group_ioctx.cct();
string group_header_oid = util::group_header_name(group_id);
string image_header_oid = util::header_name(image_id);
ldout(cct, 20) << "removing image " << image_id
<< " image id " << image_header_oid << dendl;
cls::rbd::GroupSpec group_spec(group_id, group_ioctx.get_id());
cls::rbd::GroupImageStatus incomplete_st(image_id, image_ioctx.get_id(),
cls::rbd::GROUP_IMAGE_LINK_STATE_INCOMPLETE);
cls::rbd::GroupImageSpec spec(image_id, image_ioctx.get_id());
int r = cls_client::group_image_set(&group_ioctx, group_header_oid,
incomplete_st);
if (r < 0) {
lderr(cct) << "couldn't put image into removing state: "
<< cpp_strerror(-r) << dendl;
return r;
}
r = cls_client::image_group_remove(&image_ioctx, image_header_oid,
group_spec);
if ((r < 0) && (r != -ENOENT)) {
lderr(cct) << "couldn't remove group reference from image"
<< cpp_strerror(-r) << dendl;
return r;
} else if (r >= 0) {
ImageWatcher<>::notify_header_update(image_ioctx, image_header_oid);
}
r = cls_client::group_image_remove(&group_ioctx, group_header_oid, spec);
if (r < 0) {
lderr(cct) << "couldn't remove image from group"
<< cpp_strerror(-r) << dendl;
return r;
}
return 0;
}
int group_snap_remove_by_record(librados::IoCtx& group_ioctx,
const cls::rbd::GroupSnapshot& group_snap,
const std::string& group_id,
const std::string& group_header_oid) {
CephContext *cct = (CephContext *)group_ioctx.cct();
std::vector<C_SaferCond*> on_finishes;
int r, ret_code;
std::vector<librbd::ImageCtx*> ictxs;
cls::rbd::GroupSnapshotNamespace ne{group_ioctx.get_id(), group_id,
group_snap.id};
ldout(cct, 20) << "Removing snapshots" << dendl;
int snap_count = group_snap.snaps.size();
for (int i = 0; i < snap_count; ++i) {
librbd::IoCtx image_io_ctx;
r = util::create_ioctx(group_ioctx, "image", group_snap.snaps[i].pool, {},
&image_io_ctx);
if (r < 0) {
return r;
}
librbd::ImageCtx* image_ctx = new ImageCtx("", group_snap.snaps[i].image_id,
nullptr, image_io_ctx, false);
C_SaferCond* on_finish = new C_SaferCond;
image_ctx->state->open(0, on_finish);
ictxs.push_back(image_ctx);
on_finishes.push_back(on_finish);
}
ret_code = 0;
for (int i = 0; i < snap_count; ++i) {
r = on_finishes[i]->wait();
delete on_finishes[i];
if (r < 0) {
ictxs[i] = nullptr;
ret_code = r;
}
}
if (ret_code != 0) {
goto finish;
}
ldout(cct, 20) << "Opened participating images. " <<
"Deleting snapshots themselves." << dendl;
for (int i = 0; i < snap_count; ++i) {
ImageCtx *ictx = ictxs[i];
on_finishes[i] = new C_SaferCond;
std::string snap_name;
ictx->image_lock.lock_shared();
snap_t snap_id = get_group_snap_id(ictx, ne);
r = ictx->get_snap_name(snap_id, &snap_name);
ictx->image_lock.unlock_shared();
if (r >= 0) {
ldout(cct, 20) << "removing individual snapshot from image " << ictx->name
<< dendl;
ictx->operations->snap_remove(ne, snap_name, on_finishes[i]);
} else {
// We are ok to ignore missing image snapshots. The snapshot could have
// been inconsistent in the first place.
on_finishes[i]->complete(0);
}
}
for (int i = 0; i < snap_count; ++i) {
r = on_finishes[i]->wait();
delete on_finishes[i];
if (r < 0 && r != -ENOENT) {
// if previous attempts to remove this snapshot failed then the image's
// snapshot may not exist
lderr(cct) << "Failed deleting image snapshot. Ret code: " << r << dendl;
ret_code = r;
}
}
if (ret_code != 0) {
goto finish;
}
ldout(cct, 20) << "Removed images snapshots removing snapshot record."
<< dendl;
r = cls_client::group_snap_remove(&group_ioctx, group_header_oid,
group_snap.id);
if (r < 0) {
ret_code = r;
goto finish;
}
finish:
for (int i = 0; i < snap_count; ++i) {
if (ictxs[i] != nullptr) {
ictxs[i]->state->close();
}
}
return ret_code;
}
int group_snap_rollback_by_record(librados::IoCtx& group_ioctx,
const cls::rbd::GroupSnapshot& group_snap,
const std::string& group_id,
const std::string& group_header_oid,
ProgressContext& pctx) {
CephContext *cct = (CephContext *)group_ioctx.cct();
std::vector<C_SaferCond*> on_finishes;
int r, ret_code;
std::vector<librbd::ImageCtx*> ictxs;
cls::rbd::GroupSnapshotNamespace ne{group_ioctx.get_id(), group_id,
group_snap.id};
ldout(cct, 20) << "Rolling back snapshots" << dendl;
int snap_count = group_snap.snaps.size();
for (int i = 0; i < snap_count; ++i) {
librados::IoCtx image_io_ctx;
r = util::create_ioctx(group_ioctx, "image", group_snap.snaps[i].pool, {},
&image_io_ctx);
if (r < 0) {
return r;
}
librbd::ImageCtx* image_ctx = new ImageCtx("", group_snap.snaps[i].image_id,
nullptr, image_io_ctx, false);
C_SaferCond* on_finish = new C_SaferCond;
image_ctx->state->open(0, on_finish);
ictxs.push_back(image_ctx);
on_finishes.push_back(on_finish);
}
ret_code = 0;
for (int i = 0; i < snap_count; ++i) {
r = on_finishes[i]->wait();
delete on_finishes[i];
if (r < 0) {
ictxs[i] = nullptr;
ret_code = r;
}
}
if (ret_code != 0) {
goto finish;
}
ldout(cct, 20) << "Requesting exclusive locks for images" << dendl;
for (auto ictx: ictxs) {
std::shared_lock owner_lock{ictx->owner_lock};
if (ictx->exclusive_lock != nullptr) {
ictx->exclusive_lock->block_requests(-EBUSY);
}
}
for (int i = 0; i < snap_count; ++i) {
ImageCtx *ictx = ictxs[i];
std::shared_lock owner_lock{ictx->owner_lock};
on_finishes[i] = new C_SaferCond;
if (ictx->exclusive_lock != nullptr) {
ictx->exclusive_lock->acquire_lock(on_finishes[i]);
}
}
ret_code = 0;
for (int i = 0; i < snap_count; ++i) {
r = 0;
ImageCtx *ictx = ictxs[i];
if (ictx->exclusive_lock != nullptr) {
r = on_finishes[i]->wait();
}
delete on_finishes[i];
if (r < 0) {
ret_code = r;
}
}
if (ret_code != 0) {
goto finish;
}
for (int i = 0; i < snap_count; ++i) {
ImageCtx *ictx = ictxs[i];
on_finishes[i] = new C_SaferCond;
std::shared_lock owner_locker{ictx->owner_lock};
std::string snap_name;
ictx->image_lock.lock_shared();
snap_t snap_id = get_group_snap_id(ictx, ne);
r = ictx->get_snap_name(snap_id, &snap_name);
ictx->image_lock.unlock_shared();
if (r >= 0) {
ldout(cct, 20) << "rolling back to individual snapshot for image " << ictx->name
<< dendl;
ictx->operations->execute_snap_rollback(ne, snap_name, pctx, on_finishes[i]);
} else {
on_finishes[i]->complete(r);
}
}
for (int i = 0; i < snap_count; ++i) {
r = on_finishes[i]->wait();
delete on_finishes[i];
if (r < 0 && r != -ENOENT) {
lderr(cct) << "Failed rolling back group to snapshot. Ret code: " << r << dendl;
ret_code = r;
}
}
finish:
for (int i = 0; i < snap_count; ++i) {
if (ictxs[i] != nullptr) {
ictxs[i]->state->close();
}
}
return ret_code;
}
template <typename I>
void notify_unquiesce(std::vector<I*> &ictxs,
const std::vector<uint64_t> &requests) {
if (requests.empty()) {
return;
}
ceph_assert(requests.size() == ictxs.size());
int image_count = ictxs.size();
std::vector<C_SaferCond> on_finishes(image_count);
for (int i = 0; i < image_count; ++i) {
ImageCtx *ictx = ictxs[i];
ictx->image_watcher->notify_unquiesce(requests[i], &on_finishes[i]);
}
for (int i = 0; i < image_count; ++i) {
on_finishes[i].wait();
}
}
template <typename I>
int notify_quiesce(std::vector<I*> &ictxs, ProgressContext &prog_ctx,
std::vector<uint64_t> *requests) {
int image_count = ictxs.size();
std::vector<C_SaferCond> on_finishes(image_count);
requests->resize(image_count);
for (int i = 0; i < image_count; ++i) {
auto ictx = ictxs[i];
ictx->image_watcher->notify_quiesce(&(*requests)[i], prog_ctx,
&on_finishes[i]);
}
int ret_code = 0;
for (int i = 0; i < image_count; ++i) {
int r = on_finishes[i].wait();
if (r < 0) {
ret_code = r;
}
}
if (ret_code != 0) {
notify_unquiesce(ictxs, *requests);
}
return ret_code;
}
} // anonymous namespace
template <typename I>
int Group<I>::image_remove_by_id(librados::IoCtx& group_ioctx,
const char *group_name,
librados::IoCtx& image_ioctx,
const char *image_id)
{
CephContext *cct = (CephContext *)group_ioctx.cct();
ldout(cct, 20) << "io_ctx=" << &group_ioctx
<< " group name " << group_name << " image "
<< &image_ioctx << " id " << image_id << dendl;
string group_id;
int r = cls_client::dir_get_id(&group_ioctx, RBD_GROUP_DIRECTORY, group_name,
&group_id);
if (r < 0) {
lderr(cct) << "error reading group id object: "
<< cpp_strerror(r)
<< dendl;
return r;
}
ldout(cct, 20) << "removing image from group name " << group_name
<< " group id " << group_id << dendl;
return group_image_remove(group_ioctx, group_id, image_ioctx, string(image_id));
}
template <typename I>
int Group<I>::create(librados::IoCtx& io_ctx, const char *group_name)
{
CephContext *cct = (CephContext *)io_ctx.cct();
string id = generate_uuid(io_ctx);
ldout(cct, 2) << "adding group to directory..." << dendl;
int r = cls_client::group_dir_add(&io_ctx, RBD_GROUP_DIRECTORY, group_name,
id);
if (r < 0) {
lderr(cct) << "error adding group to directory: "
<< cpp_strerror(r)
<< dendl;
return r;
}
string header_oid = util::group_header_name(id);
r = io_ctx.create(header_oid, true);
if (r < 0) {
lderr(cct) << "error creating group header: " << cpp_strerror(r) << dendl;
goto err_remove_from_dir;
}
return 0;
err_remove_from_dir:
int remove_r = cls_client::group_dir_remove(&io_ctx, RBD_GROUP_DIRECTORY,
group_name, id);
if (remove_r < 0) {
lderr(cct) << "error cleaning up group from rbd_directory "
<< "object after creation failed: " << cpp_strerror(remove_r)
<< dendl;
}
return r;
}
template <typename I>
int Group<I>::remove(librados::IoCtx& io_ctx, const char *group_name)
{
CephContext *cct((CephContext *)io_ctx.cct());
ldout(cct, 20) << "group_remove " << &io_ctx << " " << group_name << dendl;
std::string group_id;
int r = cls_client::dir_get_id(&io_ctx, RBD_GROUP_DIRECTORY,
std::string(group_name), &group_id);
if (r < 0 && r != -ENOENT) {
lderr(cct) << "error getting id of group" << dendl;
return r;
}
string group_header_oid = util::group_header_name(group_id);
std::vector<cls::rbd::GroupSnapshot> snaps;
r = group_snap_list(io_ctx, group_name, &snaps);
if (r < 0 && r != -ENOENT) {
lderr(cct) << "error listing group snapshots" << dendl;
return r;
}
for (auto &snap : snaps) {
r = group_snap_remove_by_record(io_ctx, snap, group_id, group_header_oid);
if (r < 0) {
return r;
}
}
std::vector<cls::rbd::GroupImageStatus> images;
r = group_image_list(io_ctx, group_name, &images);
if (r < 0 && r != -ENOENT) {
lderr(cct) << "error listing group images" << dendl;
return r;
}
for (auto image : images) {
IoCtx image_ioctx;
r = util::create_ioctx(io_ctx, "image", image.spec.pool_id, {},
&image_ioctx);
if (r < 0) {
return r;
}
r = group_image_remove(io_ctx, group_id, image_ioctx, image.spec.image_id);
if (r < 0 && r != -ENOENT) {
lderr(cct) << "error removing image from a group" << dendl;
return r;
}
}
string header_oid = util::group_header_name(group_id);
r = io_ctx.remove(header_oid);
if (r < 0 && r != -ENOENT) {
lderr(cct) << "error removing header: " << cpp_strerror(-r) << dendl;
return r;
}
r = cls_client::group_dir_remove(&io_ctx, RBD_GROUP_DIRECTORY,
group_name, group_id);
if (r < 0 && r != -ENOENT) {
lderr(cct) << "error removing group from directory" << dendl;
return r;
}
return 0;
}
template <typename I>
int Group<I>::list(IoCtx& io_ctx, vector<string> *names)
{
CephContext *cct = (CephContext *)io_ctx.cct();
ldout(cct, 20) << "io_ctx=" << &io_ctx << dendl;
int max_read = 1024;
string last_read = "";
int r;
do {
map<string, string> groups;
r = cls_client::group_dir_list(&io_ctx, RBD_GROUP_DIRECTORY, last_read,
max_read, &groups);
if (r < 0) {
if (r != -ENOENT) {
lderr(cct) << "error listing group in directory: "
<< cpp_strerror(r) << dendl;
} else {
r = 0;
}
return r;
}
for (pair<string, string> group : groups) {
names->push_back(group.first);
}
if (!groups.empty()) {
last_read = groups.rbegin()->first;
}
r = groups.size();
} while (r == max_read);
return 0;
}
template <typename I>
int Group<I>::image_add(librados::IoCtx& group_ioctx, const char *group_name,
librados::IoCtx& image_ioctx, const char *image_name)
{
CephContext *cct = (CephContext *)group_ioctx.cct();
ldout(cct, 20) << "io_ctx=" << &group_ioctx
<< " group name " << group_name << " image "
<< &image_ioctx << " name " << image_name << dendl;
if (group_ioctx.get_namespace() != image_ioctx.get_namespace()) {
lderr(cct) << "group and image cannot be in different namespaces" << dendl;
return -EINVAL;
}
string group_id;
int r = cls_client::dir_get_id(&group_ioctx, RBD_GROUP_DIRECTORY, group_name,
&group_id);
if (r < 0) {
lderr(cct) << "error reading group id object: "
<< cpp_strerror(r)
<< dendl;
return r;
}
string group_header_oid = util::group_header_name(group_id);
ldout(cct, 20) << "adding image to group name " << group_name
<< " group id " << group_header_oid << dendl;
string image_id;
r = cls_client::dir_get_id(&image_ioctx, RBD_DIRECTORY, image_name,
&image_id);
if (r < 0) {
lderr(cct) << "error reading image id object: "
<< cpp_strerror(-r) << dendl;
return r;
}
string image_header_oid = util::header_name(image_id);
ldout(cct, 20) << "adding image " << image_name
<< " image id " << image_header_oid << dendl;
cls::rbd::GroupImageStatus incomplete_st(
image_id, image_ioctx.get_id(),
cls::rbd::GROUP_IMAGE_LINK_STATE_INCOMPLETE);
cls::rbd::GroupImageStatus attached_st(
image_id, image_ioctx.get_id(), cls::rbd::GROUP_IMAGE_LINK_STATE_ATTACHED);
r = cls_client::group_image_set(&group_ioctx, group_header_oid,
incomplete_st);
cls::rbd::GroupSpec group_spec(group_id, group_ioctx.get_id());
if (r < 0) {
lderr(cct) << "error adding image reference to group: "
<< cpp_strerror(-r) << dendl;
return r;
}
r = cls_client::image_group_add(&image_ioctx, image_header_oid, group_spec);
if (r < 0) {
lderr(cct) << "error adding group reference to image: "
<< cpp_strerror(-r) << dendl;
cls::rbd::GroupImageSpec spec(image_id, image_ioctx.get_id());
cls_client::group_image_remove(&group_ioctx, group_header_oid, spec);
// Ignore errors in the clean up procedure.
return r;
}
ImageWatcher<>::notify_header_update(image_ioctx, image_header_oid);
r = cls_client::group_image_set(&group_ioctx, group_header_oid,
attached_st);
return r;
}
template <typename I>
int Group<I>::image_remove(librados::IoCtx& group_ioctx, const char *group_name,
librados::IoCtx& image_ioctx, const char *image_name)
{
CephContext *cct = (CephContext *)group_ioctx.cct();
ldout(cct, 20) << "io_ctx=" << &group_ioctx
<< " group name " << group_name << " image "
<< &image_ioctx << " name " << image_name << dendl;
if (group_ioctx.get_namespace() != image_ioctx.get_namespace()) {
lderr(cct) << "group and image cannot be in different namespaces" << dendl;
return -EINVAL;
}
string group_id;
int r = cls_client::dir_get_id(&group_ioctx, RBD_GROUP_DIRECTORY, group_name,
&group_id);
if (r < 0) {
lderr(cct) << "error reading group id object: "
<< cpp_strerror(r)
<< dendl;
return r;
}
ldout(cct, 20) << "removing image from group name " << group_name
<< " group id " << group_id << dendl;
string image_id;
r = cls_client::dir_get_id(&image_ioctx, RBD_DIRECTORY, image_name,
&image_id);
if (r < 0) {
lderr(cct) << "error reading image id object: "
<< cpp_strerror(-r) << dendl;
return r;
}
r = group_image_remove(group_ioctx, group_id, image_ioctx, image_id);
return r;
}
template <typename I>
int Group<I>::image_list(librados::IoCtx& group_ioctx,
const char *group_name,
std::vector<group_image_info_t>* images)
{
CephContext *cct = (CephContext *)group_ioctx.cct();
ldout(cct, 20) << "io_ctx=" << &group_ioctx
<< " group name " << group_name << dendl;
std::vector<cls::rbd::GroupImageStatus> image_ids;
group_image_list(group_ioctx, group_name, &image_ids);
for (auto image_id : image_ids) {
IoCtx ioctx;
int r = util::create_ioctx(group_ioctx, "image", image_id.spec.pool_id, {},
&ioctx);
if (r < 0) {
return r;
}
std::string image_name;
r = cls_client::dir_get_name(&ioctx, RBD_DIRECTORY,
image_id.spec.image_id, &image_name);
if (r < 0) {
return r;
}
images->push_back(
group_image_info_t {
image_name,
ioctx.get_id(),
static_cast<group_image_state_t>(image_id.state)});
}
return 0;
}
template <typename I>
int Group<I>::rename(librados::IoCtx& io_ctx, const char *src_name,
const char *dest_name)
{
CephContext *cct((CephContext *)io_ctx.cct());
ldout(cct, 20) << "group_rename " << &io_ctx << " " << src_name
<< " -> " << dest_name << dendl;
std::string group_id;
int r = cls_client::dir_get_id(&io_ctx, RBD_GROUP_DIRECTORY,
std::string(src_name), &group_id);
if (r < 0) {
if (r != -ENOENT)
lderr(cct) << "error getting id of group" << dendl;
return r;
}
r = cls_client::group_dir_rename(&io_ctx, RBD_GROUP_DIRECTORY,
src_name, dest_name, group_id);
if (r < 0 && r != -ENOENT) {
lderr(cct) << "error renaming group from directory" << dendl;
return r;
}
return 0;
}
template <typename I>
int Group<I>::image_get_group(I *ictx, group_info_t *group_info)
{
int r = ictx->state->refresh_if_required();
if (r < 0)
return r;
if (RBD_GROUP_INVALID_POOL != ictx->group_spec.pool_id) {
IoCtx ioctx;
r = util::create_ioctx(ictx->md_ctx, "group", ictx->group_spec.pool_id, {},
&ioctx);
if (r < 0) {
return r;
}
std::string group_name;
r = cls_client::dir_get_name(&ioctx, RBD_GROUP_DIRECTORY,
ictx->group_spec.group_id, &group_name);
if (r < 0)
return r;
group_info->pool = ioctx.get_id();
group_info->name = group_name;
} else {
group_info->pool = RBD_GROUP_INVALID_POOL;
group_info->name = "";
}
return 0;
}
template <typename I>
int Group<I>::snap_create(librados::IoCtx& group_ioctx,
const char *group_name, const char *snap_name,
uint32_t flags) {
CephContext *cct = (CephContext *)group_ioctx.cct();
string group_id;
cls::rbd::GroupSnapshot group_snap;
vector<cls::rbd::ImageSnapshotSpec> image_snaps;
std::string ind_snap_name;
std::vector<librbd::ImageCtx*> ictxs;
std::vector<C_SaferCond*> on_finishes;
std::vector<uint64_t> quiesce_requests;
NoOpProgressContext prog_ctx;
uint64_t internal_flags = 0;
int r = util::snap_create_flags_api_to_internal(cct, flags, &internal_flags);
if (r < 0) {
return r;
}
internal_flags &= ~(SNAP_CREATE_FLAG_SKIP_NOTIFY_QUIESCE |
SNAP_CREATE_FLAG_IGNORE_NOTIFY_QUIESCE_ERROR);
r = cls_client::dir_get_id(&group_ioctx, RBD_GROUP_DIRECTORY, group_name,
&group_id);
if (r < 0) {
lderr(cct) << "error reading group id object: "
<< cpp_strerror(r)
<< dendl;
return r;
}
std::vector<cls::rbd::GroupImageStatus> images;
r = group_image_list(group_ioctx, group_name, &images);
if (r < 0) {
return r;
}
int image_count = images.size();
ldout(cct, 20) << "Found " << image_count << " images in group" << dendl;
image_snaps = vector<cls::rbd::ImageSnapshotSpec>(image_count,
cls::rbd::ImageSnapshotSpec());
for (int i = 0; i < image_count; ++i) {
image_snaps[i].pool = images[i].spec.pool_id;
image_snaps[i].image_id = images[i].spec.image_id;
}
string group_header_oid = util::group_header_name(group_id);
group_snap.id = generate_uuid(group_ioctx);
group_snap.name = string(snap_name);
group_snap.state = cls::rbd::GROUP_SNAPSHOT_STATE_INCOMPLETE;
group_snap.snaps = image_snaps;
cls::rbd::GroupSnapshotNamespace ne{group_ioctx.get_id(), group_id,
group_snap.id};
r = cls_client::group_snap_set(&group_ioctx, group_header_oid, group_snap);
if (r == -EEXIST) {
lderr(cct) << "snapshot with this name already exists: "
<< cpp_strerror(r)
<< dendl;
}
int ret_code = 0;
if (r < 0) {
ret_code = r;
goto finish;
}
for (auto image: images) {
librbd::IoCtx image_io_ctx;
r = util::create_ioctx(group_ioctx, "image", image.spec.pool_id, {},
&image_io_ctx);
if (r < 0) {
ret_code = r;
goto finish;
}
ldout(cct, 20) << "Opening image with id " << image.spec.image_id << dendl;
librbd::ImageCtx* image_ctx = new ImageCtx("", image.spec.image_id.c_str(),
nullptr, image_io_ctx, false);
C_SaferCond* on_finish = new C_SaferCond;
image_ctx->state->open(0, on_finish);
ictxs.push_back(image_ctx);
on_finishes.push_back(on_finish);
}
ldout(cct, 20) << "Issued open request waiting for the completion" << dendl;
ret_code = 0;
for (int i = 0; i < image_count; ++i) {
ldout(cct, 20) << "Waiting for completion on on_finish: " <<
on_finishes[i] << dendl;
r = on_finishes[i]->wait();
delete on_finishes[i];
if (r < 0) {
ictxs[i] = nullptr;
ret_code = r;
}
}
if (ret_code != 0) {
goto remove_record;
}
if ((flags & RBD_SNAP_CREATE_SKIP_QUIESCE) == 0) {
ldout(cct, 20) << "Sending quiesce notification" << dendl;
ret_code = notify_quiesce(ictxs, prog_ctx, &quiesce_requests);
if (ret_code != 0 && (flags & RBD_SNAP_CREATE_IGNORE_QUIESCE_ERROR) == 0) {
goto remove_record;
}
}
ldout(cct, 20) << "Requesting exclusive locks for images" << dendl;
for (auto ictx: ictxs) {
std::shared_lock owner_lock{ictx->owner_lock};
if (ictx->exclusive_lock != nullptr) {
ictx->exclusive_lock->block_requests(-EBUSY);
}
}
for (int i = 0; i < image_count; ++i) {
ImageCtx *ictx = ictxs[i];
std::shared_lock owner_lock{ictx->owner_lock};
on_finishes[i] = new C_SaferCond;
if (ictx->exclusive_lock != nullptr) {
ictx->exclusive_lock->acquire_lock(on_finishes[i]);
}
}
ret_code = 0;
for (int i = 0; i < image_count; ++i) {
r = 0;
ImageCtx *ictx = ictxs[i];
if (ictx->exclusive_lock != nullptr) {
r = on_finishes[i]->wait();
}
delete on_finishes[i];
if (r < 0) {
ret_code = r;
}
}
if (ret_code != 0) {
notify_unquiesce(ictxs, quiesce_requests);
goto remove_record;
}
ind_snap_name = calc_ind_image_snap_name(group_ioctx.get_id(), group_id,
group_snap.id);
for (int i = 0; i < image_count; ++i) {
ImageCtx *ictx = ictxs[i];
C_SaferCond* on_finish = new C_SaferCond;
std::shared_lock owner_locker{ictx->owner_lock};
ictx->operations->execute_snap_create(
ne, ind_snap_name.c_str(), on_finish, 0,
SNAP_CREATE_FLAG_SKIP_NOTIFY_QUIESCE, prog_ctx);
on_finishes[i] = on_finish;
}
ret_code = 0;
for (int i = 0; i < image_count; ++i) {
r = on_finishes[i]->wait();
delete on_finishes[i];
if (r < 0) {
ret_code = r;
} else {
ImageCtx *ictx = ictxs[i];
ictx->image_lock.lock_shared();
snap_t snap_id = get_group_snap_id(ictx, ne);
ictx->image_lock.unlock_shared();
if (snap_id == CEPH_NOSNAP) {
ldout(cct, 20) << "Couldn't find created snapshot with namespace: "
<< ne << dendl;
ret_code = -ENOENT;
} else {
image_snaps[i].snap_id = snapid_t(snap_id);
image_snaps[i].pool = ictx->md_ctx.get_id();
image_snaps[i].image_id = ictx->id;
}
}
}
if (ret_code != 0) {
goto remove_image_snaps;
}
group_snap.snaps = image_snaps;
group_snap.state = cls::rbd::GROUP_SNAPSHOT_STATE_COMPLETE;
r = cls_client::group_snap_set(&group_ioctx, group_header_oid, group_snap);
if (r < 0) {
ret_code = r;
goto remove_image_snaps;
}
ldout(cct, 20) << "Sending unquiesce notification" << dendl;
notify_unquiesce(ictxs, quiesce_requests);
goto finish;
remove_image_snaps:
notify_unquiesce(ictxs, quiesce_requests);
for (int i = 0; i < image_count; ++i) {
ImageCtx *ictx = ictxs[i];
ldout(cct, 20) << "Removing individual snapshot with name: " <<
ind_snap_name << dendl;
on_finishes[i] = new C_SaferCond;
std::string snap_name;
ictx->image_lock.lock_shared();
snap_t snap_id = get_group_snap_id(ictx, ne);
r = ictx->get_snap_name(snap_id, &snap_name);
ictx->image_lock.unlock_shared();
if (r >= 0) {
ictx->operations->snap_remove(ne, snap_name.c_str(), on_finishes[i]);
} else {
// Ignore missing image snapshots. The whole snapshot could have been
// inconsistent.
on_finishes[i]->complete(0);
}
}
for (int i = 0, n = on_finishes.size(); i < n; ++i) {
r = on_finishes[i]->wait();
delete on_finishes[i];
if (r < 0 && r != -ENOENT) { // if previous attempts to remove this snapshot failed then the image's snapshot may not exist
lderr(cct) << "Failed cleaning up image snapshot. Ret code: " << r << dendl;
// just report error, but don't abort the process
}
}
remove_record:
r = cls_client::group_snap_remove(&group_ioctx, group_header_oid,
group_snap.id);
if (r < 0) {
lderr(cct) << "error while cleaning up group snapshot" << dendl;
// we ignore return value in clean up
}
finish:
for (int i = 0, n = ictxs.size(); i < n; ++i) {
if (ictxs[i] != nullptr) {
ictxs[i]->state->close();
}
}
return ret_code;
}
template <typename I>
int Group<I>::snap_remove(librados::IoCtx& group_ioctx, const char *group_name,
const char *snap_name)
{
CephContext *cct = (CephContext *)group_ioctx.cct();
string group_id;
int r = cls_client::dir_get_id(&group_ioctx, RBD_GROUP_DIRECTORY,
group_name, &group_id);
if (r < 0) {
lderr(cct) << "error reading group id object: "
<< cpp_strerror(r)
<< dendl;
return r;
}
std::vector<cls::rbd::GroupSnapshot> snaps;
r = group_snap_list(group_ioctx, group_name, &snaps);
if (r < 0) {
return r;
}
cls::rbd::GroupSnapshot *group_snap = nullptr;
for (auto &snap : snaps) {
if (snap.name == string(snap_name)) {
group_snap = &snap;
break;
}
}
if (group_snap == nullptr) {
return -ENOENT;
}
string group_header_oid = util::group_header_name(group_id);
r = group_snap_remove_by_record(group_ioctx, *group_snap, group_id,
group_header_oid);
return r;
}
template <typename I>
int Group<I>::snap_rename(librados::IoCtx& group_ioctx, const char *group_name,
const char *old_snap_name,
const char *new_snap_name) {
CephContext *cct = (CephContext *)group_ioctx.cct();
if (0 == strcmp(old_snap_name, new_snap_name))
return -EEXIST;
std::string group_id;
int r = cls_client::dir_get_id(&group_ioctx, RBD_GROUP_DIRECTORY,
group_name, &group_id);
if (r == -ENOENT) {
return r;
} else if (r < 0) {
lderr(cct) << "error reading group id object: " << cpp_strerror(r) << dendl;
return r;
}
std::vector<cls::rbd::GroupSnapshot> group_snaps;
r = group_snap_list(group_ioctx, group_name, &group_snaps);
if (r < 0) {
return r;
}
cls::rbd::GroupSnapshot group_snap;
for (auto &snap : group_snaps) {
if (snap.name == old_snap_name) {
group_snap = snap;
break;
}
}
if (group_snap.id.empty()) {
return -ENOENT;
}
std::string group_header_oid = util::group_header_name(group_id);
group_snap.name = new_snap_name;
r = cls_client::group_snap_set(&group_ioctx, group_header_oid, group_snap);
if (r < 0) {
return r;
}
return 0;
}
template <typename I>
int Group<I>::snap_list(librados::IoCtx& group_ioctx, const char *group_name,
std::vector<group_snap_info_t> *snaps)
{
std::vector<cls::rbd::GroupSnapshot> cls_snaps;
int r = group_snap_list(group_ioctx, group_name, &cls_snaps);
if (r < 0) {
return r;
}
for (auto snap : cls_snaps) {
snaps->push_back(
group_snap_info_t {
snap.name,
static_cast<group_snap_state_t>(snap.state)});
}
return 0;
}
template <typename I>
int Group<I>::snap_rollback(librados::IoCtx& group_ioctx,
const char *group_name, const char *snap_name,
ProgressContext& pctx)
{
CephContext *cct = (CephContext *)group_ioctx.cct();
string group_id;
int r = cls_client::dir_get_id(&group_ioctx, RBD_GROUP_DIRECTORY,
group_name, &group_id);
if (r < 0) {
lderr(cct) << "error reading group id object: "
<< cpp_strerror(r) << dendl;
return r;
}
std::vector<cls::rbd::GroupSnapshot> snaps;
r = group_snap_list(group_ioctx, group_name, &snaps);
if (r < 0) {
return r;
}
cls::rbd::GroupSnapshot *group_snap = nullptr;
for (auto &snap : snaps) {
if (snap.name == string(snap_name)) {
group_snap = &snap;
break;
}
}
if (group_snap == nullptr) {
return -ENOENT;
}
string group_header_oid = util::group_header_name(group_id);
r = group_snap_rollback_by_record(group_ioctx, *group_snap, group_id,
group_header_oid, pctx);
return r;
}
} // namespace api
} // namespace librbd
template class librbd::api::Group<librbd::ImageCtx>;
| 35,799 | 26.795031 | 127 |
cc
|
null |
ceph-main/src/librbd/api/Group.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_API_GROUP_H
#define CEPH_LIBRBD_API_GROUP_H
#include "include/rbd/librbd.hpp"
#include "include/rados/librados_fwd.hpp"
#include <string>
#include <vector>
namespace librbd {
struct ImageCtx;
namespace api {
template <typename ImageCtxT = librbd::ImageCtx>
struct Group {
static int create(librados::IoCtx& io_ctx, const char *group_name);
static int remove(librados::IoCtx& io_ctx, const char *group_name);
static int list(librados::IoCtx& io_ctx, std::vector<std::string> *names);
static int rename(librados::IoCtx& io_ctx, const char *src_group_name,
const char *dest_group_name);
static int image_add(librados::IoCtx& group_ioctx, const char *group_name,
librados::IoCtx& image_ioctx, const char *image_name);
static int image_remove(librados::IoCtx& group_ioctx, const char *group_name,
librados::IoCtx& image_ioctx, const char *image_name);
static int image_remove_by_id(librados::IoCtx& group_ioctx,
const char *group_name,
librados::IoCtx& image_ioctx,
const char *image_id);
static int image_list(librados::IoCtx& group_ioctx, const char *group_name,
std::vector<group_image_info_t> *images);
static int image_get_group(ImageCtxT *ictx, group_info_t *group_info);
static int snap_create(librados::IoCtx& group_ioctx,
const char *group_name, const char *snap_name,
uint32_t flags);
static int snap_remove(librados::IoCtx& group_ioctx,
const char *group_name, const char *snap_name);
static int snap_rename(librados::IoCtx& group_ioctx, const char *group_name,
const char *old_snap_name, const char *new_snap_name);
static int snap_list(librados::IoCtx& group_ioctx, const char *group_name,
std::vector<group_snap_info_t> *snaps);
static int snap_rollback(librados::IoCtx& group_ioctx,
const char *group_name, const char *snap_name,
ProgressContext& pctx);
};
} // namespace api
} // namespace librbd
extern template class librbd::api::Group<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_API_GROUP_H
| 2,386 | 38.131148 | 79 |
h
|
null |
ceph-main/src/librbd/api/Image.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/api/Image.h"
#include "include/rados/librados.hpp"
#include "common/dout.h"
#include "common/errno.h"
#include "common/Cond.h"
#include "cls/rbd/cls_rbd_client.h"
#include "librbd/AsioEngine.h"
#include "librbd/DeepCopyRequest.h"
#include "librbd/ExclusiveLock.h"
#include "librbd/ImageCtx.h"
#include "librbd/ImageState.h"
#include "librbd/internal.h"
#include "librbd/Operations.h"
#include "librbd/Utils.h"
#include "librbd/api/Config.h"
#include "librbd/api/Trash.h"
#include "librbd/api/Utils.h"
#include "librbd/crypto/FormatRequest.h"
#include "librbd/crypto/LoadRequest.h"
#include "librbd/deep_copy/Handler.h"
#include "librbd/image/CloneRequest.h"
#include "librbd/image/RemoveRequest.h"
#include "librbd/image/PreRemoveRequest.h"
#include "librbd/io/ImageDispatcherInterface.h"
#include "librbd/io/ObjectDispatcherInterface.h"
#include "librbd/io/AioCompletion.h"
#include "librbd/io/ImageDispatchSpec.h"
#include <boost/scope_exit.hpp>
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::api::Image: " << __func__ << ": "
using std::map;
using std::string;
using librados::snap_t;
namespace librbd {
namespace api {
namespace {
bool compare_by_pool(const librbd::linked_image_spec_t& lhs,
const librbd::linked_image_spec_t& rhs)
{
if (lhs.pool_id != rhs.pool_id) {
return lhs.pool_id < rhs.pool_id;
} else if (lhs.pool_namespace != rhs.pool_namespace) {
return lhs.pool_namespace < rhs.pool_namespace;
}
return false;
}
bool compare(const librbd::linked_image_spec_t& lhs,
const librbd::linked_image_spec_t& rhs)
{
if (lhs.pool_name != rhs.pool_name) {
return lhs.pool_name < rhs.pool_name;
} else if (lhs.pool_id != rhs.pool_id) {
return lhs.pool_id < rhs.pool_id;
} else if (lhs.pool_namespace != rhs.pool_namespace) {
return lhs.pool_namespace < rhs.pool_namespace;
} else if (lhs.image_name != rhs.image_name) {
return lhs.image_name < rhs.image_name;
} else if (lhs.image_id != rhs.image_id) {
return lhs.image_id < rhs.image_id;
}
return false;
}
template <typename I>
int pre_remove_image(librados::IoCtx& io_ctx, const std::string& image_id) {
I *image_ctx = I::create("", image_id, nullptr, io_ctx, false);
int r = image_ctx->state->open(OPEN_FLAG_SKIP_OPEN_PARENT);
if (r < 0) {
return r;
}
C_SaferCond ctx;
auto req = image::PreRemoveRequest<I>::create(image_ctx, false, &ctx);
req->send();
r = ctx.wait();
image_ctx->state->close();
return r;
}
} // anonymous namespace
template <typename I>
int64_t Image<I>::get_data_pool_id(I *ictx) {
if (ictx->data_ctx.is_valid()) {
return ictx->data_ctx.get_id();
}
int64_t pool_id;
int r = cls_client::get_data_pool(&ictx->md_ctx, ictx->header_oid, &pool_id);
if (r < 0) {
CephContext *cct = ictx->cct;
lderr(cct) << "error getting data pool ID: " << cpp_strerror(r) << dendl;
return r;
}
return pool_id;
}
template <typename I>
int Image<I>::get_op_features(I *ictx, uint64_t *op_features) {
CephContext *cct = ictx->cct;
ldout(cct, 20) << "image_ctx=" << ictx << dendl;
int r = ictx->state->refresh_if_required();
if (r < 0) {
return r;
}
std::shared_lock image_locker{ictx->image_lock};
*op_features = ictx->op_features;
return 0;
}
template <typename I>
int Image<I>::list_images(librados::IoCtx& io_ctx,
std::vector<image_spec_t> *images) {
CephContext *cct = (CephContext *)io_ctx.cct();
ldout(cct, 20) << "list " << &io_ctx << dendl;
int r;
images->clear();
if (io_ctx.get_namespace().empty()) {
bufferlist bl;
r = io_ctx.read(RBD_DIRECTORY, bl, 0, 0);
if (r == -ENOENT) {
return 0;
} else if (r < 0) {
lderr(cct) << "error listing v1 images: " << cpp_strerror(r) << dendl;
return r;
}
// V1 format images are in a tmap
if (bl.length()) {
auto p = bl.cbegin();
bufferlist header;
std::map<std::string, bufferlist> m;
decode(header, p);
decode(m, p);
for (auto& it : m) {
images->push_back({.id ="", .name = it.first});
}
}
}
// V2 format images
std::map<std::string, std::string> image_names_to_ids;
r = list_images_v2(io_ctx, &image_names_to_ids);
if (r < 0) {
lderr(cct) << "error listing v2 images: " << cpp_strerror(r) << dendl;
return r;
}
for (const auto& img_pair : image_names_to_ids) {
images->push_back({.id = img_pair.second,
.name = img_pair.first});
}
// include V2 images in a partially removed state
std::vector<librbd::trash_image_info_t> trash_images;
r = Trash<I>::list(io_ctx, trash_images, false);
if (r < 0 && r != -EOPNOTSUPP) {
lderr(cct) << "error listing trash images: " << cpp_strerror(r) << dendl;
return r;
}
for (const auto& trash_image : trash_images) {
if (trash_image.source == RBD_TRASH_IMAGE_SOURCE_REMOVING) {
images->push_back({.id = trash_image.id,
.name = trash_image.name});
}
}
return 0;
}
template <typename I>
int Image<I>::list_images_v2(librados::IoCtx& io_ctx, ImageNameToIds *images) {
CephContext *cct = (CephContext *)io_ctx.cct();
ldout(cct, 20) << "io_ctx=" << &io_ctx << dendl;
// new format images are accessed by class methods
int r;
int max_read = 1024;
string last_read = "";
do {
map<string, string> images_page;
r = cls_client::dir_list(&io_ctx, RBD_DIRECTORY, last_read, max_read,
&images_page);
if (r < 0 && r != -ENOENT) {
lderr(cct) << "error listing image in directory: "
<< cpp_strerror(r) << dendl;
return r;
} else if (r == -ENOENT) {
break;
}
for (map<string, string>::const_iterator it = images_page.begin();
it != images_page.end(); ++it) {
images->insert(*it);
}
if (!images_page.empty()) {
last_read = images_page.rbegin()->first;
}
r = images_page.size();
} while (r == max_read);
return 0;
}
template <typename I>
int Image<I>::get_parent(I *ictx,
librbd::linked_image_spec_t *parent_image,
librbd::snap_spec_t *parent_snap) {
auto cct = ictx->cct;
ldout(cct, 20) << "image_ctx=" << ictx << dendl;
int r = ictx->state->refresh_if_required();
if (r < 0) {
return r;
}
std::shared_lock image_locker{ictx->image_lock};
bool release_image_lock = false;
BOOST_SCOPE_EXIT_ALL(ictx, &release_image_lock) {
if (release_image_lock) {
ictx->parent->image_lock.unlock_shared();
}
};
// if a migration is in-progress, the true parent is the parent
// of the migration source image
auto parent = ictx->parent;
if (!ictx->migration_info.empty() && ictx->parent != nullptr) {
release_image_lock = true;
ictx->parent->image_lock.lock_shared();
parent = ictx->parent->parent;
}
if (parent == nullptr) {
return -ENOENT;
}
parent_image->pool_id = parent->md_ctx.get_id();
parent_image->pool_name = parent->md_ctx.get_pool_name();
parent_image->pool_namespace = parent->md_ctx.get_namespace();
std::shared_lock parent_image_locker{parent->image_lock};
parent_snap->id = parent->snap_id;
parent_snap->namespace_type = RBD_SNAP_NAMESPACE_TYPE_USER;
if (parent->snap_id != CEPH_NOSNAP) {
auto snap_info = parent->get_snap_info(parent->snap_id);
if (snap_info == nullptr) {
lderr(cct) << "error finding parent snap name: " << cpp_strerror(r)
<< dendl;
return -ENOENT;
}
parent_snap->namespace_type = static_cast<snap_namespace_type_t>(
cls::rbd::get_snap_namespace_type(snap_info->snap_namespace));
parent_snap->name = snap_info->name;
}
parent_image->image_id = parent->id;
parent_image->image_name = parent->name;
parent_image->trash = true;
librbd::trash_image_info_t trash_info;
r = Trash<I>::get(parent->md_ctx, parent->id, &trash_info);
if (r == -ENOENT || r == -EOPNOTSUPP) {
parent_image->trash = false;
} else if (r < 0) {
lderr(cct) << "error looking up trash status: " << cpp_strerror(r)
<< dendl;
return r;
}
return 0;
}
template <typename I>
int Image<I>::list_children(I *ictx,
std::vector<librbd::linked_image_spec_t> *images) {
images->clear();
return list_descendants(ictx, 1, images);
}
template <typename I>
int Image<I>::list_children(I *ictx,
const cls::rbd::ParentImageSpec &parent_spec,
std::vector<librbd::linked_image_spec_t> *images) {
images->clear();
return list_descendants(ictx, parent_spec, 1, images);
}
template <typename I>
int Image<I>::list_descendants(
librados::IoCtx& io_ctx, const std::string &image_id,
const std::optional<size_t> &max_level,
std::vector<librbd::linked_image_spec_t> *images) {
ImageCtx *ictx = new librbd::ImageCtx("", image_id, nullptr,
io_ctx, true);
CephContext *cct = ictx->cct;
int r = ictx->state->open(OPEN_FLAG_SKIP_OPEN_PARENT);
if (r < 0) {
if (r == -ENOENT) {
return 0;
}
lderr(cct) << "failed to open descendant " << image_id
<< " from pool " << io_ctx.get_pool_name() << ":"
<< cpp_strerror(r) << dendl;
return r;
}
r = list_descendants(ictx, max_level, images);
int r1 = ictx->state->close();
if (r1 < 0) {
lderr(cct) << "error when closing descendant " << image_id
<< " from pool " << io_ctx.get_pool_name() << ":"
<< cpp_strerror(r1) << dendl;
}
return r;
}
template <typename I>
int Image<I>::list_descendants(
I *ictx, const std::optional<size_t> &max_level,
std::vector<librbd::linked_image_spec_t> *images) {
std::shared_lock l{ictx->image_lock};
std::vector<librados::snap_t> snap_ids;
if (ictx->snap_id != CEPH_NOSNAP) {
snap_ids.push_back(ictx->snap_id);
} else {
snap_ids = ictx->snaps;
}
for (auto snap_id : snap_ids) {
cls::rbd::ParentImageSpec parent_spec{ictx->md_ctx.get_id(),
ictx->md_ctx.get_namespace(),
ictx->id, snap_id};
int r = list_descendants(ictx, parent_spec, max_level, images);
if (r < 0) {
return r;
}
}
return 0;
}
template <typename I>
int Image<I>::list_descendants(
I *ictx, const cls::rbd::ParentImageSpec &parent_spec,
const std::optional<size_t> &max_level,
std::vector<librbd::linked_image_spec_t> *images) {
auto child_max_level = max_level;
if (child_max_level) {
if (child_max_level == 0) {
return 0;
}
(*child_max_level)--;
}
CephContext *cct = ictx->cct;
ldout(cct, 20) << "ictx=" << ictx << dendl;
// no children for non-layered or old format image
if (!ictx->test_features(RBD_FEATURE_LAYERING, ictx->image_lock)) {
return 0;
}
librados::Rados rados(ictx->md_ctx);
// search all pools for clone v1 children dependent on this snapshot
std::list<std::pair<int64_t, std::string> > pools;
int r = rados.pool_list2(pools);
if (r < 0) {
lderr(cct) << "error listing pools: " << cpp_strerror(r) << dendl;
return r;
}
for (auto& it : pools) {
int64_t base_tier;
r = rados.pool_get_base_tier(it.first, &base_tier);
if (r == -ENOENT) {
ldout(cct, 1) << "pool " << it.second << " no longer exists" << dendl;
continue;
} else if (r < 0) {
lderr(cct) << "error retrieving base tier for pool " << it.second
<< dendl;
return r;
}
if (it.first != base_tier) {
// pool is a cache; skip it
continue;
}
IoCtx ioctx;
r = librbd::util::create_ioctx(
ictx->md_ctx, "child image", it.first, {}, &ioctx);
if (r == -ENOENT) {
continue;
} else if (r < 0) {
return r;
}
std::set<std::string> image_ids;
r = cls_client::get_children(&ioctx, RBD_CHILDREN, parent_spec,
image_ids);
if (r < 0 && r != -ENOENT) {
lderr(cct) << "error reading list of children from pool " << it.second
<< dendl;
return r;
}
for (auto& image_id : image_ids) {
images->push_back({
it.first, "", ictx->md_ctx.get_namespace(), image_id, "", false});
r = list_descendants(ioctx, image_id, child_max_level, images);
if (r < 0) {
return r;
}
}
}
// retrieve clone v2 children attached to this snapshot
IoCtx parent_io_ctx;
r = librbd::util::create_ioctx(
ictx->md_ctx, "parent image",parent_spec.pool_id,
parent_spec.pool_namespace, &parent_io_ctx);
if (r < 0) {
return r;
}
cls::rbd::ChildImageSpecs child_images;
r = cls_client::children_list(
&parent_io_ctx, librbd::util::header_name(parent_spec.image_id),
parent_spec.snap_id, &child_images);
if (r < 0 && r != -ENOENT && r != -EOPNOTSUPP) {
lderr(cct) << "error retrieving children: " << cpp_strerror(r) << dendl;
return r;
}
for (auto& child_image : child_images) {
images->push_back({
child_image.pool_id, "", child_image.pool_namespace,
child_image.image_id, "", false});
if (!child_max_level || *child_max_level > 0) {
IoCtx ioctx;
r = librbd::util::create_ioctx(
ictx->md_ctx, "child image", child_image.pool_id,
child_image.pool_namespace, &ioctx);
if (r == -ENOENT) {
continue;
} else if (r < 0) {
return r;
}
r = list_descendants(ioctx, child_image.image_id, child_max_level,
images);
if (r < 0) {
return r;
}
}
}
// batch lookups by pool + namespace
std::sort(images->begin(), images->end(), compare_by_pool);
int64_t child_pool_id = -1;
librados::IoCtx child_io_ctx;
std::map<std::string, std::pair<std::string, bool>> child_image_id_to_info;
for (auto& image : *images) {
if (child_pool_id == -1 || child_pool_id != image.pool_id ||
child_io_ctx.get_namespace() != image.pool_namespace) {
r = librbd::util::create_ioctx(
ictx->md_ctx, "child image", image.pool_id, image.pool_namespace,
&child_io_ctx);
if (r == -ENOENT) {
image.pool_name = "";
image.image_name = "";
continue;
} else if (r < 0) {
return r;
}
child_pool_id = image.pool_id;
child_image_id_to_info.clear();
std::map<std::string, std::string> image_names_to_ids;
r = list_images_v2(child_io_ctx, &image_names_to_ids);
if (r < 0) {
lderr(cct) << "error listing v2 images: " << cpp_strerror(r) << dendl;
return r;
}
for (auto& [name, id] : image_names_to_ids) {
child_image_id_to_info.insert({id, {name, false}});
}
std::vector<librbd::trash_image_info_t> trash_images;
r = Trash<I>::list(child_io_ctx, trash_images, false);
if (r < 0 && r != -EOPNOTSUPP) {
lderr(cct) << "error listing trash images: " << cpp_strerror(r)
<< dendl;
return r;
}
for (auto& it : trash_images) {
child_image_id_to_info.insert({
it.id,
{it.name,
it.source == RBD_TRASH_IMAGE_SOURCE_REMOVING ? false : true}});
}
}
auto it = child_image_id_to_info.find(image.image_id);
if (it == child_image_id_to_info.end()) {
lderr(cct) << "error looking up name for image id "
<< image.image_id << " in pool "
<< child_io_ctx.get_pool_name()
<< (image.pool_namespace.empty() ?
"" : "/" + image.pool_namespace) << dendl;
return -ENOENT;
}
image.pool_name = child_io_ctx.get_pool_name();
image.image_name = it->second.first;
image.trash = it->second.second;
}
// final sort by pool + image names
std::sort(images->begin(), images->end(), compare);
return 0;
}
template <typename I>
int Image<I>::deep_copy(I *src, librados::IoCtx& dest_md_ctx,
const char *destname, ImageOptions& opts,
ProgressContext &prog_ctx) {
CephContext *cct = (CephContext *)dest_md_ctx.cct();
ldout(cct, 20) << src->name
<< (src->snap_name.length() ? "@" + src->snap_name : "")
<< " -> " << destname << " opts = " << opts << dendl;
uint64_t features;
uint64_t src_size;
{
std::shared_lock image_locker{src->image_lock};
if (!src->migration_info.empty()) {
lderr(cct) << "cannot deep copy migrating image" << dendl;
return -EBUSY;
}
features = src->features;
src_size = src->get_image_size(src->snap_id);
}
uint64_t format = 2;
if (opts.get(RBD_IMAGE_OPTION_FORMAT, &format) != 0) {
opts.set(RBD_IMAGE_OPTION_FORMAT, format);
}
if (format == 1) {
lderr(cct) << "old format not supported for destination image" << dendl;
return -EINVAL;
}
uint64_t stripe_unit = src->stripe_unit;
if (opts.get(RBD_IMAGE_OPTION_STRIPE_UNIT, &stripe_unit) != 0) {
opts.set(RBD_IMAGE_OPTION_STRIPE_UNIT, stripe_unit);
}
uint64_t stripe_count = src->stripe_count;
if (opts.get(RBD_IMAGE_OPTION_STRIPE_COUNT, &stripe_count) != 0) {
opts.set(RBD_IMAGE_OPTION_STRIPE_COUNT, stripe_count);
}
uint64_t order = src->order;
if (opts.get(RBD_IMAGE_OPTION_ORDER, &order) != 0) {
opts.set(RBD_IMAGE_OPTION_ORDER, order);
}
if (opts.get(RBD_IMAGE_OPTION_FEATURES, &features) != 0) {
opts.set(RBD_IMAGE_OPTION_FEATURES, features);
}
if (features & ~RBD_FEATURES_ALL) {
lderr(cct) << "librbd does not support requested features" << dendl;
return -ENOSYS;
}
uint64_t flatten = 0;
if (opts.get(RBD_IMAGE_OPTION_FLATTEN, &flatten) == 0) {
opts.unset(RBD_IMAGE_OPTION_FLATTEN);
}
cls::rbd::ParentImageSpec parent_spec;
if (flatten > 0) {
parent_spec.pool_id = -1;
} else {
std::shared_lock image_locker{src->image_lock};
// use oldest snapshot or HEAD for parent spec
if (!src->snap_info.empty()) {
parent_spec = src->snap_info.begin()->second.parent.spec;
} else {
parent_spec = src->parent_md.spec;
}
}
int r;
if (parent_spec.pool_id == -1) {
r = create(dest_md_ctx, destname, "", src_size, opts, "", "", false);
} else {
librados::IoCtx parent_io_ctx;
r = librbd::util::create_ioctx(
src->md_ctx, "parent image", parent_spec.pool_id,
parent_spec.pool_namespace, &parent_io_ctx);
if (r < 0) {
return r;
}
ConfigProxy config{cct->_conf};
api::Config<I>::apply_pool_overrides(dest_md_ctx, &config);
C_SaferCond ctx;
std::string dest_id = librbd::util::generate_image_id(dest_md_ctx);
auto *req = image::CloneRequest<I>::create(
config, parent_io_ctx, parent_spec.image_id, "", {}, parent_spec.snap_id,
dest_md_ctx, destname, dest_id, opts, cls::rbd::MIRROR_IMAGE_MODE_JOURNAL,
"", "", src->op_work_queue, &ctx);
req->send();
r = ctx.wait();
}
if (r < 0) {
lderr(cct) << "header creation failed" << dendl;
return r;
}
opts.set(RBD_IMAGE_OPTION_ORDER, static_cast<uint64_t>(order));
auto dest = new I(destname, "", nullptr, dest_md_ctx, false);
r = dest->state->open(0);
if (r < 0) {
lderr(cct) << "failed to read newly created header" << dendl;
return r;
}
C_SaferCond lock_ctx;
{
std::unique_lock locker{dest->owner_lock};
if (dest->exclusive_lock == nullptr ||
dest->exclusive_lock->is_lock_owner()) {
lock_ctx.complete(0);
} else {
dest->exclusive_lock->acquire_lock(&lock_ctx);
}
}
r = lock_ctx.wait();
if (r < 0) {
lderr(cct) << "failed to request exclusive lock: " << cpp_strerror(r)
<< dendl;
dest->state->close();
return r;
}
r = deep_copy(src, dest, flatten > 0, prog_ctx);
int close_r = dest->state->close();
if (r == 0 && close_r < 0) {
r = close_r;
}
return r;
}
template <typename I>
int Image<I>::deep_copy(I *src, I *dest, bool flatten,
ProgressContext &prog_ctx) {
// ensure previous writes are visible to dest
C_SaferCond flush_ctx;
{
std::shared_lock owner_locker{src->owner_lock};
auto aio_comp = io::AioCompletion::create_and_start(&flush_ctx, src,
io::AIO_TYPE_FLUSH);
auto req = io::ImageDispatchSpec::create_flush(
*src, io::IMAGE_DISPATCH_LAYER_INTERNAL_START,
aio_comp, io::FLUSH_SOURCE_INTERNAL, {});
req->send();
}
int r = flush_ctx.wait();
if (r < 0) {
return r;
}
librados::snap_t snap_id_start = 0;
librados::snap_t snap_id_end;
{
std::shared_lock image_locker{src->image_lock};
snap_id_end = src->snap_id;
}
AsioEngine asio_engine(src->md_ctx);
C_SaferCond cond;
SnapSeqs snap_seqs;
deep_copy::ProgressHandler progress_handler{&prog_ctx};
auto req = DeepCopyRequest<I>::create(
src, dest, snap_id_start, snap_id_end, 0U, flatten, boost::none,
asio_engine.get_work_queue(), &snap_seqs, &progress_handler, &cond);
req->send();
r = cond.wait();
if (r < 0) {
return r;
}
return 0;
}
template <typename I>
int Image<I>::snap_set(I *ictx,
const cls::rbd::SnapshotNamespace &snap_namespace,
const char *snap_name) {
ldout(ictx->cct, 20) << "snap_set " << ictx << " snap = "
<< (snap_name ? snap_name : "NULL") << dendl;
// ignore return value, since we may be set to a non-existent
// snapshot and the user is trying to fix that
ictx->state->refresh_if_required();
uint64_t snap_id = CEPH_NOSNAP;
std::string name(snap_name == nullptr ? "" : snap_name);
if (!name.empty()) {
std::shared_lock image_locker{ictx->image_lock};
snap_id = ictx->get_snap_id(snap_namespace, snap_name);
if (snap_id == CEPH_NOSNAP) {
return -ENOENT;
}
}
return snap_set(ictx, snap_id);
}
template <typename I>
int Image<I>::snap_set(I *ictx, uint64_t snap_id) {
ldout(ictx->cct, 20) << "snap_set " << ictx << " "
<< "snap_id=" << snap_id << dendl;
// ignore return value, since we may be set to a non-existent
// snapshot and the user is trying to fix that
ictx->state->refresh_if_required();
C_SaferCond ctx;
ictx->state->snap_set(snap_id, &ctx);
int r = ctx.wait();
if (r < 0) {
if (r != -ENOENT) {
lderr(ictx->cct) << "failed to " << (snap_id == CEPH_NOSNAP ? "un" : "")
<< "set snapshot: " << cpp_strerror(r) << dendl;
}
return r;
}
return 0;
}
template <typename I>
int Image<I>::remove(IoCtx& io_ctx, const std::string &image_name,
ProgressContext& prog_ctx)
{
CephContext *cct((CephContext *)io_ctx.cct());
ldout(cct, 20) << "name=" << image_name << dendl;
// look up the V2 image id based on the image name
std::string image_id;
int r = cls_client::dir_get_id(&io_ctx, RBD_DIRECTORY, image_name,
&image_id);
if (r == -ENOENT) {
// check if it already exists in trash from an aborted trash remove attempt
std::vector<trash_image_info_t> trash_entries;
r = Trash<I>::list(io_ctx, trash_entries, false);
if (r < 0) {
return r;
}
for (auto& entry : trash_entries) {
if (entry.name == image_name &&
entry.source == RBD_TRASH_IMAGE_SOURCE_REMOVING) {
cls::rbd::TrashImageSpec spec;
r = cls_client::trash_get(&io_ctx, entry.id, &spec);
if (r < 0) {
lderr(cct) << "error getting image id " << entry.id
<< " info from trash: " << cpp_strerror(r) << dendl;
return r;
}
if (spec.state == cls::rbd::TRASH_IMAGE_STATE_MOVING) {
r = Trash<I>::move(io_ctx, entry.source, entry.name, entry.id, 0);
if (r < 0) {
return r;
}
}
return Trash<I>::remove(io_ctx, entry.id, true, prog_ctx);
}
}
// fall-through if we failed to locate the image in the V2 directory and
// trash
} else if (r < 0) {
lderr(cct) << "failed to retrieve image id: " << cpp_strerror(r) << dendl;
return r;
} else {
// attempt to move the image to the trash (and optionally immediately
// delete the image)
ConfigProxy config(cct->_conf);
Config<I>::apply_pool_overrides(io_ctx, &config);
rbd_trash_image_source_t trash_image_source =
RBD_TRASH_IMAGE_SOURCE_REMOVING;
uint64_t expire_seconds = 0;
if (config.get_val<bool>("rbd_move_to_trash_on_remove")) {
// keep the image in the trash upon remove requests
trash_image_source = RBD_TRASH_IMAGE_SOURCE_USER;
expire_seconds = config.get_val<uint64_t>(
"rbd_move_to_trash_on_remove_expire_seconds");
} else {
// attempt to pre-validate the removal before moving to trash and
// removing
r = pre_remove_image<I>(io_ctx, image_id);
if (r == -ECHILD) {
if (config.get_val<bool>("rbd_move_parent_to_trash_on_remove")) {
// keep the image in the trash until the last child is removed
trash_image_source = RBD_TRASH_IMAGE_SOURCE_USER_PARENT;
} else {
lderr(cct) << "image has snapshots - not removing" << dendl;
return -ENOTEMPTY;
}
} else if (r < 0 && r != -ENOENT) {
return r;
}
}
r = Trash<I>::move(io_ctx, trash_image_source, image_name, image_id,
expire_seconds);
if (r >= 0) {
if (trash_image_source == RBD_TRASH_IMAGE_SOURCE_REMOVING) {
// proceed with attempting to immediately remove the image
r = Trash<I>::remove(io_ctx, image_id, true, prog_ctx);
if (r == -ENOTEMPTY || r == -EBUSY || r == -EMLINK) {
// best-effort try to restore the image if the removal
// failed for possible expected reasons
Trash<I>::restore(io_ctx, {cls::rbd::TRASH_IMAGE_SOURCE_REMOVING},
image_id, image_name);
}
}
return r;
} else if (r < 0 && r != -EOPNOTSUPP) {
return r;
}
// fall-through if trash isn't supported
}
AsioEngine asio_engine(io_ctx);
// might be a V1 image format that cannot be moved to the trash
// and would not have been listed in the V2 directory -- or the OSDs
// are too old and don't support the trash feature
C_SaferCond cond;
auto req = librbd::image::RemoveRequest<I>::create(
io_ctx, image_name, "", false, false, prog_ctx,
asio_engine.get_work_queue(), &cond);
req->send();
return cond.wait();
}
template <typename I>
int Image<I>::flatten_children(I *ictx, const char* snap_name,
ProgressContext& pctx) {
CephContext *cct = ictx->cct;
ldout(cct, 20) << "children flatten " << ictx->name << dendl;
int r = ictx->state->refresh_if_required();
if (r < 0) {
return r;
}
std::shared_lock l{ictx->image_lock};
snap_t snap_id = ictx->get_snap_id(cls::rbd::UserSnapshotNamespace(),
snap_name);
cls::rbd::ParentImageSpec parent_spec{ictx->md_ctx.get_id(),
ictx->md_ctx.get_namespace(),
ictx->id, snap_id};
std::vector<librbd::linked_image_spec_t> child_images;
r = list_children(ictx, parent_spec, &child_images);
if (r < 0) {
return r;
}
size_t size = child_images.size();
if (size == 0) {
return 0;
}
librados::IoCtx child_io_ctx;
int64_t child_pool_id = -1;
size_t i = 0;
for (auto &child_image : child_images){
std::string pool = child_image.pool_name;
if (child_pool_id == -1 ||
child_pool_id != child_image.pool_id ||
child_io_ctx.get_namespace() != child_image.pool_namespace) {
r = librbd::util::create_ioctx(
ictx->md_ctx, "child image", child_image.pool_id,
child_image.pool_namespace, &child_io_ctx);
if (r < 0) {
return r;
}
child_pool_id = child_image.pool_id;
}
ImageCtx *imctx = new ImageCtx("", child_image.image_id, nullptr,
child_io_ctx, false);
r = imctx->state->open(0);
if (r < 0) {
lderr(cct) << "error opening image: " << cpp_strerror(r) << dendl;
return r;
}
if ((imctx->features & RBD_FEATURE_DEEP_FLATTEN) == 0 &&
!imctx->snaps.empty()) {
lderr(cct) << "snapshot in-use by " << pool << "/" << imctx->name
<< dendl;
imctx->state->close();
return -EBUSY;
}
librbd::NoOpProgressContext prog_ctx;
r = imctx->operations->flatten(prog_ctx);
if (r < 0) {
lderr(cct) << "error flattening image: " << pool << "/"
<< (child_image.pool_namespace.empty() ?
"" : "/" + child_image.pool_namespace)
<< child_image.image_name << cpp_strerror(r) << dendl;
imctx->state->close();
return r;
}
r = imctx->state->close();
if (r < 0) {
lderr(cct) << "failed to close image: " << cpp_strerror(r) << dendl;
return r;
}
pctx.update_progress(++i, size);
ceph_assert(i <= size);
}
return 0;
}
template <typename I>
int Image<I>::encryption_format(I* ictx, encryption_format_t format,
encryption_options_t opts, size_t opts_size,
bool c_api) {
crypto::EncryptionFormat<I>* result_format;
auto r = util::create_encryption_format(
ictx->cct, format, opts, opts_size, c_api, &result_format);
if (r != 0) {
return r;
}
C_SaferCond cond;
auto req = librbd::crypto::FormatRequest<I>::create(
ictx, std::unique_ptr<crypto::EncryptionFormat<I>>(result_format),
&cond);
req->send();
return cond.wait();
}
template <typename I>
int Image<I>::encryption_load(I* ictx, const encryption_spec_t *specs,
size_t spec_count, bool c_api) {
std::vector<std::unique_ptr<crypto::EncryptionFormat<I>>> formats;
for (size_t i = 0; i < spec_count; ++i) {
crypto::EncryptionFormat<I>* result_format;
auto r = util::create_encryption_format(
ictx->cct, specs[i].format, specs[i].opts, specs[i].opts_size,
c_api, &result_format);
if (r != 0) {
return r;
}
formats.emplace_back(result_format);
}
C_SaferCond cond;
auto req = librbd::crypto::LoadRequest<I>::create(
ictx, std::move(formats), &cond);
req->send();
return cond.wait();
}
} // namespace api
} // namespace librbd
template class librbd::api::Image<librbd::ImageCtx>;
| 30,982 | 29.495079 | 80 |
cc
|
null |
ceph-main/src/librbd/api/Image.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef LIBRBD_API_IMAGE_H
#define LIBRBD_API_IMAGE_H
#include "include/rbd/librbd.hpp"
#include "include/rados/librados_fwd.hpp"
#include "librbd/Types.h"
#include <map>
#include <set>
#include <string>
namespace librbd {
class ImageOptions;
class ProgressContext;
struct ImageCtx;
namespace api {
template <typename ImageCtxT = librbd::ImageCtx>
struct Image {
typedef std::map<std::string, std::string> ImageNameToIds;
static int64_t get_data_pool_id(ImageCtxT *ictx);
static int get_op_features(ImageCtxT *ictx, uint64_t *op_features);
static int list_images(librados::IoCtx& io_ctx,
std::vector<image_spec_t> *images);
static int list_images_v2(librados::IoCtx& io_ctx,
ImageNameToIds *images);
static int get_parent(ImageCtxT *ictx,
librbd::linked_image_spec_t *parent_image,
librbd::snap_spec_t *parent_snap);
static int list_children(ImageCtxT *ictx,
std::vector<librbd::linked_image_spec_t> *images);
static int list_children(ImageCtxT *ictx,
const cls::rbd::ParentImageSpec &parent_spec,
std::vector<librbd::linked_image_spec_t> *images);
static int list_descendants(IoCtx& io_ctx, const std::string &image_id,
const std::optional<size_t> &max_level,
std::vector<librbd::linked_image_spec_t> *images);
static int list_descendants(ImageCtxT *ictx,
const std::optional<size_t> &max_level,
std::vector<librbd::linked_image_spec_t> *images);
static int list_descendants(ImageCtxT *ictx,
const cls::rbd::ParentImageSpec &parent_spec,
const std::optional<size_t> &max_level,
std::vector<librbd::linked_image_spec_t> *images);
static int deep_copy(ImageCtxT *ictx, librados::IoCtx& dest_md_ctx,
const char *destname, ImageOptions& opts,
ProgressContext &prog_ctx);
static int deep_copy(ImageCtxT *src, ImageCtxT *dest, bool flatten,
ProgressContext &prog_ctx);
static int snap_set(ImageCtxT *ictx,
const cls::rbd::SnapshotNamespace &snap_namespace,
const char *snap_name);
static int snap_set(ImageCtxT *ictx, uint64_t snap_id);
static int remove(librados::IoCtx& io_ctx, const std::string &image_name,
ProgressContext& prog_ctx);
static int flatten_children(ImageCtxT *ictx, const char* snap_name, ProgressContext& pctx);
static int encryption_format(ImageCtxT *ictx, encryption_format_t format,
encryption_options_t opts, size_t opts_size,
bool c_api);
static int encryption_load(ImageCtxT *ictx, const encryption_spec_t *specs,
size_t spec_count, bool c_api);
};
} // namespace api
} // namespace librbd
extern template class librbd::api::Image<librbd::ImageCtx>;
#endif // LIBRBD_API_IMAGE_H
| 3,265 | 36.976744 | 93 |
h
|
null |
ceph-main/src/librbd/api/Io.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/api/Io.h"
#include "include/intarith.h"
#include "common/dout.h"
#include "common/errno.h"
#include "common/Cond.h"
#include "common/EventTrace.h"
#include "librbd/ImageCtx.h"
#include "librbd/internal.h"
#include "librbd/Utils.h"
#include "librbd/io/AioCompletion.h"
#include "librbd/io/ImageDispatchSpec.h"
#include "librbd/io/Types.h"
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::api::Io " << __func__ << ": "
namespace librbd {
namespace api {
namespace {
template <typename I>
bool is_valid_io(I& image_ctx, io::AioCompletion* aio_comp) {
auto cct = image_ctx.cct;
if (!image_ctx.data_ctx.is_valid()) {
lderr(cct) << "missing data pool" << dendl;
aio_comp->fail(-ENODEV);
return false;
}
return true;
}
} // anonymous namespace
template <typename I>
ssize_t Io<I>::read(
I &image_ctx, uint64_t off, uint64_t len, io::ReadResult &&read_result,
int op_flags) {
auto cct = image_ctx.cct;
ldout(cct, 20) << "ictx=" << &image_ctx << ", off=" << off << ", "
<< "len = " << len << dendl;
C_SaferCond ctx;
auto aio_comp = io::AioCompletion::create(&ctx);
aio_read(image_ctx, aio_comp, off, len, std::move(read_result), op_flags,
false);
return ctx.wait();
}
template <typename I>
ssize_t Io<I>::write(
I &image_ctx, uint64_t off, uint64_t len, bufferlist &&bl, int op_flags) {
auto cct = image_ctx.cct;
ldout(cct, 20) << "ictx=" << &image_ctx << ", off=" << off << ", "
<< "len = " << len << dendl;
image_ctx.image_lock.lock_shared();
int r = clip_io(util::get_image_ctx(&image_ctx), off, &len,
io::ImageArea::DATA);
image_ctx.image_lock.unlock_shared();
if (r < 0) {
lderr(cct) << "invalid IO request: " << cpp_strerror(r) << dendl;
return r;
}
C_SaferCond ctx;
auto aio_comp = io::AioCompletion::create(&ctx);
aio_write(image_ctx, aio_comp, off, len, std::move(bl), op_flags, false);
r = ctx.wait();
if (r < 0) {
return r;
}
return len;
}
template <typename I>
ssize_t Io<I>::discard(
I &image_ctx, uint64_t off, uint64_t len,
uint32_t discard_granularity_bytes) {
auto cct = image_ctx.cct;
ldout(cct, 20) << "ictx=" << &image_ctx << ", off=" << off << ", "
<< "len = " << len << dendl;
image_ctx.image_lock.lock_shared();
int r = clip_io(util::get_image_ctx(&image_ctx), off, &len,
io::ImageArea::DATA);
image_ctx.image_lock.unlock_shared();
if (r < 0) {
lderr(cct) << "invalid IO request: " << cpp_strerror(r) << dendl;
return r;
}
C_SaferCond ctx;
auto aio_comp = io::AioCompletion::create(&ctx);
aio_discard(image_ctx, aio_comp, off, len, discard_granularity_bytes, false);
r = ctx.wait();
if (r < 0) {
return r;
}
return len;
}
template <typename I>
ssize_t Io<I>::write_same(
I &image_ctx, uint64_t off, uint64_t len, bufferlist &&bl, int op_flags) {
auto cct = image_ctx.cct;
ldout(cct, 20) << "ictx=" << &image_ctx << ", off=" << off << ", "
<< "len = " << len << ", data_len " << bl.length() << dendl;
image_ctx.image_lock.lock_shared();
int r = clip_io(util::get_image_ctx(&image_ctx), off, &len,
io::ImageArea::DATA);
image_ctx.image_lock.unlock_shared();
if (r < 0) {
lderr(cct) << "invalid IO request: " << cpp_strerror(r) << dendl;
return r;
}
C_SaferCond ctx;
auto aio_comp = io::AioCompletion::create(&ctx);
aio_write_same(image_ctx, aio_comp, off, len, std::move(bl), op_flags, false);
r = ctx.wait();
if (r < 0) {
return r;
}
return len;
}
template <typename I>
ssize_t Io<I>::write_zeroes(I& image_ctx, uint64_t off, uint64_t len,
int zero_flags, int op_flags) {
auto cct = image_ctx.cct;
ldout(cct, 20) << "ictx=" << &image_ctx << ", off=" << off << ", "
<< "len = " << len << dendl;
image_ctx.image_lock.lock_shared();
int r = clip_io(util::get_image_ctx(&image_ctx), off, &len,
io::ImageArea::DATA);
image_ctx.image_lock.unlock_shared();
if (r < 0) {
lderr(cct) << "invalid IO request: " << cpp_strerror(r) << dendl;
return r;
}
C_SaferCond ctx;
auto aio_comp = io::AioCompletion::create(&ctx);
aio_write_zeroes(image_ctx, aio_comp, off, len, zero_flags, op_flags, false);
r = ctx.wait();
if (r < 0) {
return r;
}
return len;
}
template <typename I>
ssize_t Io<I>::compare_and_write(
I &image_ctx, uint64_t off, uint64_t len, bufferlist &&cmp_bl,
bufferlist &&bl, uint64_t *mismatch_off, int op_flags) {
auto cct = image_ctx.cct;
ldout(cct, 20) << "compare_and_write ictx=" << &image_ctx << ", off="
<< off << ", " << "len = " << len << dendl;
image_ctx.image_lock.lock_shared();
int r = clip_io(util::get_image_ctx(&image_ctx), off, &len,
io::ImageArea::DATA);
image_ctx.image_lock.unlock_shared();
if (r < 0) {
lderr(cct) << "invalid IO request: " << cpp_strerror(r) << dendl;
return r;
}
C_SaferCond ctx;
auto aio_comp = io::AioCompletion::create(&ctx);
aio_compare_and_write(image_ctx, aio_comp, off, len, std::move(cmp_bl),
std::move(bl), mismatch_off, op_flags, false);
r = ctx.wait();
if (r < 0) {
return r;
}
return len;
}
template <typename I>
int Io<I>::flush(I &image_ctx) {
auto cct = image_ctx.cct;
ldout(cct, 20) << "ictx=" << &image_ctx << dendl;
C_SaferCond ctx;
auto aio_comp = io::AioCompletion::create(&ctx);
aio_flush(image_ctx, aio_comp, false);
int r = ctx.wait();
if (r < 0) {
return r;
}
return 0;
}
template <typename I>
void Io<I>::aio_read(I &image_ctx, io::AioCompletion *aio_comp, uint64_t off,
uint64_t len, io::ReadResult &&read_result, int op_flags,
bool native_async) {
auto cct = image_ctx.cct;
FUNCTRACE(cct);
ZTracer::Trace trace;
if (image_ctx.blkin_trace_all) {
trace.init("io: read", &image_ctx.trace_endpoint);
trace.event("init");
}
aio_comp->init_time(util::get_image_ctx(&image_ctx), io::AIO_TYPE_READ);
ldout(cct, 20) << "ictx=" << &image_ctx << ", "
<< "completion=" << aio_comp << ", off=" << off << ", "
<< "len=" << len << ", " << "flags=" << op_flags << dendl;
if (native_async && image_ctx.event_socket.is_valid()) {
aio_comp->set_event_notify(true);
}
if (!is_valid_io(image_ctx, aio_comp)) {
return;
}
auto req = io::ImageDispatchSpec::create_read(
image_ctx, io::IMAGE_DISPATCH_LAYER_API_START, aio_comp,
{{off, len}}, io::ImageArea::DATA, std::move(read_result),
image_ctx.get_data_io_context(), op_flags, 0, trace);
req->send();
}
template <typename I>
void Io<I>::aio_write(I &image_ctx, io::AioCompletion *aio_comp, uint64_t off,
uint64_t len, bufferlist &&bl, int op_flags,
bool native_async) {
auto cct = image_ctx.cct;
FUNCTRACE(cct);
ZTracer::Trace trace;
if (image_ctx.blkin_trace_all) {
trace.init("io: write", &image_ctx.trace_endpoint);
trace.event("init");
}
aio_comp->init_time(util::get_image_ctx(&image_ctx), io::AIO_TYPE_WRITE);
ldout(cct, 20) << "ictx=" << &image_ctx << ", "
<< "completion=" << aio_comp << ", off=" << off << ", "
<< "len=" << len << ", flags=" << op_flags << dendl;
if (native_async && image_ctx.event_socket.is_valid()) {
aio_comp->set_event_notify(true);
}
if (!is_valid_io(image_ctx, aio_comp)) {
return;
}
auto req = io::ImageDispatchSpec::create_write(
image_ctx, io::IMAGE_DISPATCH_LAYER_API_START, aio_comp,
{{off, len}}, io::ImageArea::DATA, std::move(bl), op_flags, trace);
req->send();
}
template <typename I>
void Io<I>::aio_discard(I &image_ctx, io::AioCompletion *aio_comp, uint64_t off,
uint64_t len, uint32_t discard_granularity_bytes,
bool native_async) {
auto cct = image_ctx.cct;
FUNCTRACE(cct);
ZTracer::Trace trace;
if (image_ctx.blkin_trace_all) {
trace.init("io: discard", &image_ctx.trace_endpoint);
trace.event("init");
}
aio_comp->init_time(util::get_image_ctx(&image_ctx), io::AIO_TYPE_DISCARD);
ldout(cct, 20) << "ictx=" << &image_ctx << ", "
<< "completion=" << aio_comp << ", off=" << off << ", "
<< "len=" << len << dendl;
if (native_async && image_ctx.event_socket.is_valid()) {
aio_comp->set_event_notify(true);
}
if (!is_valid_io(image_ctx, aio_comp)) {
return;
}
auto req = io::ImageDispatchSpec::create_discard(
image_ctx, io::IMAGE_DISPATCH_LAYER_API_START, aio_comp,
{{off, len}}, io::ImageArea::DATA, discard_granularity_bytes, trace);
req->send();
}
template <typename I>
void Io<I>::aio_write_same(I &image_ctx, io::AioCompletion *aio_comp,
uint64_t off, uint64_t len, bufferlist &&bl,
int op_flags, bool native_async) {
auto cct = image_ctx.cct;
FUNCTRACE(cct);
ZTracer::Trace trace;
if (image_ctx.blkin_trace_all) {
trace.init("io: writesame", &image_ctx.trace_endpoint);
trace.event("init");
}
aio_comp->init_time(util::get_image_ctx(&image_ctx), io::AIO_TYPE_WRITESAME);
ldout(cct, 20) << "ictx=" << &image_ctx << ", "
<< "completion=" << aio_comp << ", off=" << off << ", "
<< "len=" << len << ", data_len = " << bl.length() << ", "
<< "flags=" << op_flags << dendl;
if (native_async && image_ctx.event_socket.is_valid()) {
aio_comp->set_event_notify(true);
}
if (!is_valid_io(image_ctx, aio_comp)) {
return;
}
auto req = io::ImageDispatchSpec::create_write_same(
image_ctx, io::IMAGE_DISPATCH_LAYER_API_START, aio_comp,
{{off, len}}, io::ImageArea::DATA, std::move(bl), op_flags, trace);
req->send();
}
template <typename I>
void Io<I>::aio_write_zeroes(I& image_ctx, io::AioCompletion *aio_comp,
uint64_t off, uint64_t len, int zero_flags,
int op_flags, bool native_async) {
auto cct = image_ctx.cct;
FUNCTRACE(cct);
ZTracer::Trace trace;
if (image_ctx.blkin_trace_all) {
trace.init("io: write_zeroes", &image_ctx.trace_endpoint);
trace.event("init");
}
auto io_type = io::AIO_TYPE_DISCARD;
if ((zero_flags & RBD_WRITE_ZEROES_FLAG_THICK_PROVISION) != 0) {
zero_flags &= ~RBD_WRITE_ZEROES_FLAG_THICK_PROVISION;
io_type = io::AIO_TYPE_WRITESAME;
}
aio_comp->init_time(util::get_image_ctx(&image_ctx), io_type);
ldout(cct, 20) << "ictx=" << &image_ctx << ", "
<< "completion=" << aio_comp << ", off=" << off << ", "
<< "len=" << len << dendl;
if (native_async && image_ctx.event_socket.is_valid()) {
aio_comp->set_event_notify(true);
}
// validate the supported flags
if (zero_flags != 0U) {
aio_comp->fail(-EINVAL);
return;
}
if (!is_valid_io(image_ctx, aio_comp)) {
return;
}
if (io_type == io::AIO_TYPE_WRITESAME) {
// write-same needs to be aligned to its buffer but librbd has never forced
// block alignment. Hide that requirement from the user by adding optional
// writes.
const uint64_t data_length = 512;
uint64_t write_same_offset = p2roundup(off, data_length);
uint64_t write_same_offset_end = p2align(off + len, data_length);
uint64_t write_same_length = 0;
if (write_same_offset_end > write_same_offset) {
write_same_length = write_same_offset_end - write_same_offset;
}
uint64_t prepend_offset = off;
uint64_t prepend_length = write_same_offset - off;
uint64_t append_offset = write_same_offset + write_same_length;
uint64_t append_length = len - prepend_length - write_same_length;
ldout(cct, 20) << "prepend_offset=" << prepend_offset << ", "
<< "prepend_length=" << prepend_length << ", "
<< "write_same_offset=" << write_same_offset << ", "
<< "write_same_length=" << write_same_length << ", "
<< "append_offset=" << append_offset << ", "
<< "append_length=" << append_length << dendl;
ceph_assert(prepend_length + write_same_length + append_length == len);
if (write_same_length <= data_length) {
// unaligned or small write-zeroes request -- use single write
bufferlist bl;
bl.append_zero(len);
aio_comp->aio_type = io::AIO_TYPE_WRITE;
auto req = io::ImageDispatchSpec::create_write(
image_ctx, io::IMAGE_DISPATCH_LAYER_API_START, aio_comp,
{{off, len}}, io::ImageArea::DATA, std::move(bl), op_flags, trace);
req->send();
return;
} else if (prepend_length == 0 && append_length == 0) {
// fully aligned -- use a single write-same image request
bufferlist bl;
bl.append_zero(data_length);
auto req = io::ImageDispatchSpec::create_write_same(
image_ctx, io::IMAGE_DISPATCH_LAYER_API_START, aio_comp,
{{off, len}}, io::ImageArea::DATA, std::move(bl), op_flags, trace);
req->send();
return;
}
// to reach this point, we need at least one prepend/append write along with
// a write-same -- therefore we will need to wrap the provided AioCompletion
auto request_count = 1;
if (prepend_length > 0) {
++request_count;
}
if (append_length > 0) {
++request_count;
}
ceph_assert(request_count > 1);
aio_comp->start_op();
aio_comp->set_request_count(request_count);
if (prepend_length > 0) {
bufferlist bl;
bl.append_zero(prepend_length);
Context* prepend_ctx = new io::C_AioRequest(aio_comp);
auto prepend_aio_comp = io::AioCompletion::create_and_start(
prepend_ctx, &image_ctx, io::AIO_TYPE_WRITE);
auto prepend_req = io::ImageDispatchSpec::create_write(
image_ctx, io::IMAGE_DISPATCH_LAYER_API_START, prepend_aio_comp,
{{prepend_offset, prepend_length}}, io::ImageArea::DATA,
std::move(bl), op_flags, trace);
prepend_req->send();
}
if (append_length > 0) {
bufferlist bl;
bl.append_zero(append_length);
Context* append_ctx = new io::C_AioRequest(aio_comp);
auto append_aio_comp = io::AioCompletion::create_and_start(
append_ctx, &image_ctx, io::AIO_TYPE_WRITE);
auto append_req = io::ImageDispatchSpec::create_write(
image_ctx, io::IMAGE_DISPATCH_LAYER_API_START, append_aio_comp,
{{append_offset, append_length}}, io::ImageArea::DATA,
std::move(bl), op_flags, trace);
append_req->send();
}
bufferlist bl;
bl.append_zero(data_length);
Context* write_same_ctx = new io::C_AioRequest(aio_comp);
auto write_same_aio_comp = io::AioCompletion::create_and_start(
write_same_ctx, &image_ctx, io::AIO_TYPE_WRITESAME);
auto req = io::ImageDispatchSpec::create_write_same(
image_ctx, io::IMAGE_DISPATCH_LAYER_API_START, write_same_aio_comp,
{{write_same_offset, write_same_length}}, io::ImageArea::DATA,
std::move(bl), op_flags, trace);
req->send();
return;
}
// enable partial discard (zeroing) of objects
uint32_t discard_granularity_bytes = 0;
auto req = io::ImageDispatchSpec::create_discard(
image_ctx, io::IMAGE_DISPATCH_LAYER_API_START, aio_comp,
{{off, len}}, io::ImageArea::DATA, discard_granularity_bytes, trace);
req->send();
}
template <typename I>
void Io<I>::aio_compare_and_write(I &image_ctx, io::AioCompletion *aio_comp,
uint64_t off, uint64_t len,
bufferlist &&cmp_bl,
bufferlist &&bl, uint64_t *mismatch_off,
int op_flags, bool native_async) {
auto cct = image_ctx.cct;
FUNCTRACE(cct);
ZTracer::Trace trace;
if (image_ctx.blkin_trace_all) {
trace.init("io: compare_and_write", &image_ctx.trace_endpoint);
trace.event("init");
}
aio_comp->init_time(util::get_image_ctx(&image_ctx),
io::AIO_TYPE_COMPARE_AND_WRITE);
ldout(cct, 20) << "ictx=" << &image_ctx << ", "
<< "completion=" << aio_comp << ", off=" << off << ", "
<< "len=" << len << dendl;
if (native_async && image_ctx.event_socket.is_valid()) {
aio_comp->set_event_notify(true);
}
if (!is_valid_io(image_ctx, aio_comp)) {
return;
}
auto req = io::ImageDispatchSpec::create_compare_and_write(
image_ctx, io::IMAGE_DISPATCH_LAYER_API_START, aio_comp,
{{off, len}}, io::ImageArea::DATA, std::move(cmp_bl), std::move(bl),
mismatch_off, op_flags, trace);
req->send();
}
template <typename I>
void Io<I>::aio_flush(I &image_ctx, io::AioCompletion *aio_comp,
bool native_async) {
auto cct = image_ctx.cct;
FUNCTRACE(cct);
ZTracer::Trace trace;
if (image_ctx.blkin_trace_all) {
trace.init("io: flush", &image_ctx.trace_endpoint);
trace.event("init");
}
aio_comp->init_time(util::get_image_ctx(&image_ctx), io::AIO_TYPE_FLUSH);
ldout(cct, 20) << "ictx=" << &image_ctx << ", "
<< "completion=" << aio_comp << dendl;
if (native_async && image_ctx.event_socket.is_valid()) {
aio_comp->set_event_notify(true);
}
if (!is_valid_io(image_ctx, aio_comp)) {
return;
}
auto req = io::ImageDispatchSpec::create_flush(
image_ctx, io::IMAGE_DISPATCH_LAYER_API_START, aio_comp,
io::FLUSH_SOURCE_USER, trace);
req->send();
}
} // namespace api
} // namespace librbd
template class librbd::api::Io<librbd::ImageCtx>;
| 17,870 | 31.142086 | 80 |
cc
|
null |
ceph-main/src/librbd/api/Io.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef LIBRBD_API_IO_H
#define LIBRBD_API_IO_H
#include "include/int_types.h"
#include "librbd/io/ReadResult.h"
namespace librbd {
struct ImageCtx;
namespace io { struct AioCompletion; }
namespace api {
template<typename ImageCtxT = ImageCtx>
struct Io {
static ssize_t read(ImageCtxT &image_ctx, uint64_t off, uint64_t len,
io::ReadResult &&read_result, int op_flags);
static ssize_t write(ImageCtxT &image_ctx, uint64_t off, uint64_t len,
bufferlist &&bl, int op_flags);
static ssize_t discard(ImageCtxT &image_ctx, uint64_t off, uint64_t len,
uint32_t discard_granularity_bytes);
static ssize_t write_same(ImageCtxT &image_ctx, uint64_t off, uint64_t len,
bufferlist &&bl, int op_flags);
static ssize_t write_zeroes(ImageCtxT &image_ctx, uint64_t off, uint64_t len,
int zero_flags, int op_flags);
static ssize_t compare_and_write(ImageCtxT &image_ctx, uint64_t off,
uint64_t len, bufferlist &&cmp_bl,
bufferlist &&bl, uint64_t *mismatch_off,
int op_flags);
static int flush(ImageCtxT &image_ctx);
static void aio_read(ImageCtxT &image_ctx, io::AioCompletion *c, uint64_t off,
uint64_t len, io::ReadResult &&read_result, int op_flags,
bool native_async);
static void aio_write(ImageCtxT &image_ctx, io::AioCompletion *c,
uint64_t off, uint64_t len, bufferlist &&bl,
int op_flags, bool native_async);
static void aio_discard(ImageCtxT &image_ctx, io::AioCompletion *c,
uint64_t off, uint64_t len,
uint32_t discard_granularity_bytes,
bool native_async);
static void aio_write_same(ImageCtxT &image_ctx, io::AioCompletion *c,
uint64_t off, uint64_t len, bufferlist &&bl,
int op_flags, bool native_async);
static void aio_write_zeroes(ImageCtxT &image_ctx, io::AioCompletion *c,
uint64_t off, uint64_t len, int zero_flags,
int op_flags, bool native_async);
static void aio_compare_and_write(ImageCtxT &image_ctx, io::AioCompletion *c,
uint64_t off, uint64_t len,
bufferlist &&cmp_bl, bufferlist &&bl,
uint64_t *mismatch_off, int op_flags,
bool native_async);
static void aio_flush(ImageCtxT &image_ctx, io::AioCompletion *c,
bool native_async);
};
} // namespace api
} // namespace librbd
extern template class librbd::api::Io<librbd::ImageCtx>;
#endif // LIBRBD_API_IO_H
| 3,000 | 44.469697 | 80 |
h
|
null |
ceph-main/src/librbd/api/Migration.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/api/Migration.h"
#include "include/rados/librados.hpp"
#include "include/stringify.h"
#include "common/dout.h"
#include "common/errno.h"
#include "common/Cond.h"
#include "cls/rbd/cls_rbd_client.h"
#include "librbd/AsioEngine.h"
#include "librbd/ExclusiveLock.h"
#include "librbd/ImageCtx.h"
#include "librbd/ImageState.h"
#include "librbd/Operations.h"
#include "librbd/Utils.h"
#include "librbd/api/Config.h"
#include "librbd/api/Group.h"
#include "librbd/api/Image.h"
#include "librbd/api/Snapshot.h"
#include "librbd/api/Trash.h"
#include "librbd/deep_copy/Handler.h"
#include "librbd/deep_copy/ImageCopyRequest.h"
#include "librbd/deep_copy/MetadataCopyRequest.h"
#include "librbd/deep_copy/SnapshotCopyRequest.h"
#include "librbd/exclusive_lock/Policy.h"
#include "librbd/image/AttachChildRequest.h"
#include "librbd/image/AttachParentRequest.h"
#include "librbd/image/CloneRequest.h"
#include "librbd/image/CreateRequest.h"
#include "librbd/image/DetachChildRequest.h"
#include "librbd/image/DetachParentRequest.h"
#include "librbd/image/ListWatchersRequest.h"
#include "librbd/image/RemoveRequest.h"
#include "librbd/image/Types.h"
#include "librbd/internal.h"
#include "librbd/migration/FormatInterface.h"
#include "librbd/migration/OpenSourceImageRequest.h"
#include "librbd/migration/NativeFormat.h"
#include "librbd/mirror/DisableRequest.h"
#include "librbd/mirror/EnableRequest.h"
#include <boost/scope_exit.hpp>
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::Migration: " << __func__ << ": "
namespace librbd {
inline bool operator==(const linked_image_spec_t& rhs,
const linked_image_spec_t& lhs) {
bool result = (rhs.pool_id == lhs.pool_id &&
rhs.pool_namespace == lhs.pool_namespace &&
rhs.image_id == lhs.image_id);
return result;
}
namespace api {
using util::create_rados_callback;
namespace {
class MigrationProgressContext : public ProgressContext {
public:
MigrationProgressContext(librados::IoCtx& io_ctx,
const std::string &header_oid,
cls::rbd::MigrationState state,
ProgressContext *prog_ctx)
: m_io_ctx(io_ctx), m_header_oid(header_oid), m_state(state),
m_prog_ctx(prog_ctx), m_cct(reinterpret_cast<CephContext*>(io_ctx.cct())),
m_lock(ceph::make_mutex(
util::unique_lock_name("librbd::api::MigrationProgressContext",
this))) {
ceph_assert(m_prog_ctx != nullptr);
}
~MigrationProgressContext() {
wait_for_in_flight_updates();
}
int update_progress(uint64_t offset, uint64_t total) override {
ldout(m_cct, 20) << "offset=" << offset << ", total=" << total << dendl;
m_prog_ctx->update_progress(offset, total);
std::string description = stringify(offset * 100 / total) + "% complete";
send_state_description_update(description);
return 0;
}
private:
librados::IoCtx& m_io_ctx;
std::string m_header_oid;
cls::rbd::MigrationState m_state;
ProgressContext *m_prog_ctx;
CephContext* m_cct;
mutable ceph::mutex m_lock;
ceph::condition_variable m_cond;
std::string m_state_description;
bool m_pending_update = false;
int m_in_flight_state_updates = 0;
void send_state_description_update(const std::string &description) {
std::lock_guard locker{m_lock};
if (description == m_state_description) {
return;
}
m_state_description = description;
if (m_in_flight_state_updates > 0) {
m_pending_update = true;
return;
}
set_state_description();
}
void set_state_description() {
ldout(m_cct, 20) << "state_description=" << m_state_description << dendl;
ceph_assert(ceph_mutex_is_locked(m_lock));
librados::ObjectWriteOperation op;
cls_client::migration_set_state(&op, m_state, m_state_description);
using klass = MigrationProgressContext;
librados::AioCompletion *comp =
create_rados_callback<klass, &klass::handle_set_state_description>(this);
int r = m_io_ctx.aio_operate(m_header_oid, comp, &op);
ceph_assert(r == 0);
comp->release();
m_in_flight_state_updates++;
}
void handle_set_state_description(int r) {
ldout(m_cct, 20) << "r=" << r << dendl;
std::lock_guard locker{m_lock};
m_in_flight_state_updates--;
if (r < 0) {
lderr(m_cct) << "failed to update migration state: " << cpp_strerror(r)
<< dendl;
} else if (m_pending_update) {
set_state_description();
m_pending_update = false;
} else {
m_cond.notify_all();
}
}
void wait_for_in_flight_updates() {
std::unique_lock locker{m_lock};
ldout(m_cct, 20) << "m_in_flight_state_updates="
<< m_in_flight_state_updates << dendl;
m_pending_update = false;
m_cond.wait(locker, [this] { return m_in_flight_state_updates <= 0; });
}
};
int trash_search(librados::IoCtx &io_ctx, rbd_trash_image_source_t source,
const std::string &image_name, std::string *image_id) {
std::vector<trash_image_info_t> entries;
int r = Trash<>::list(io_ctx, entries, false);
if (r < 0) {
return r;
}
for (auto &entry : entries) {
if (entry.source == source && entry.name == image_name) {
*image_id = entry.id;
return 0;
}
}
return -ENOENT;
}
template <typename I>
int open_images(librados::IoCtx& io_ctx, const std::string &image_name,
I **src_image_ctx, I **dst_image_ctx,
cls::rbd::MigrationSpec* src_migration_spec,
cls::rbd::MigrationSpec* dst_migration_spec,
bool skip_open_dst_image) {
CephContext* cct = reinterpret_cast<CephContext *>(io_ctx.cct());
*src_image_ctx = nullptr;
*dst_image_ctx = nullptr;
ldout(cct, 10) << "trying to open image by name " << io_ctx.get_pool_name()
<< "/" << image_name << dendl;
auto image_ctx = I::create(image_name, "", nullptr, io_ctx, false);
int r = image_ctx->state->open(OPEN_FLAG_IGNORE_MIGRATING);
if (r == -ENOENT) {
// presume user passed the source image so we need to search the trash
ldout(cct, 10) << "Source image is not found. Trying trash" << dendl;
std::string src_image_id;
r = trash_search(io_ctx, RBD_TRASH_IMAGE_SOURCE_MIGRATION, image_name,
&src_image_id);
if (r < 0) {
lderr(cct) << "failed to determine image id: " << cpp_strerror(r)
<< dendl;
return r;
}
ldout(cct, 10) << "source image id from trash: " << src_image_id << dendl;
image_ctx = I::create(image_name, src_image_id, nullptr, io_ctx, false);
r = image_ctx->state->open(OPEN_FLAG_IGNORE_MIGRATING);
}
if (r < 0) {
if (r != -ENOENT) {
lderr(cct) << "failed to open image: " << cpp_strerror(r) << dendl;
return r;
}
image_ctx = nullptr;
}
BOOST_SCOPE_EXIT_TPL(&r, &image_ctx, src_image_ctx, dst_image_ctx) {
if (r != 0) {
if (*src_image_ctx != nullptr) {
(*src_image_ctx)->state->close();
}
if (*dst_image_ctx != nullptr) {
(*dst_image_ctx)->state->close();
}
if (image_ctx != nullptr) {
image_ctx->state->close();
}
}
} BOOST_SCOPE_EXIT_END;
// The opened image is either a source or destination
cls::rbd::MigrationSpec migration_spec;
r = cls_client::migration_get(&image_ctx->md_ctx, image_ctx->header_oid,
&migration_spec);
if (r < 0) {
lderr(cct) << "failed retrieving migration header: " << cpp_strerror(r)
<< dendl;
return r;
}
ldout(cct, 10) << "migration spec: " << migration_spec << dendl;
if (migration_spec.header_type == cls::rbd::MIGRATION_HEADER_TYPE_SRC) {
ldout(cct, 10) << "the source image is opened" << dendl;
*src_image_ctx = image_ctx;
*src_migration_spec = migration_spec;
image_ctx = nullptr;
} else if (migration_spec.header_type ==
cls::rbd::MIGRATION_HEADER_TYPE_DST) {
ldout(cct, 10) << "the destination image is opened" << dendl;
std::string image_id = image_ctx->id;
image_ctx->state->close();
image_ctx = I::create(image_name, image_id, nullptr, io_ctx, false);
if (!skip_open_dst_image) {
ldout(cct, 10) << "re-opening the destination image" << dendl;
r = image_ctx->state->open(0);
if (r < 0) {
image_ctx = nullptr;
lderr(cct) << "failed to re-open destination image: " << cpp_strerror(r)
<< dendl;
return r;
}
}
*dst_image_ctx = image_ctx;
*dst_migration_spec = migration_spec;
image_ctx = nullptr;
} else {
lderr(cct) << "unexpected migration header type: "
<< migration_spec.header_type << dendl;
r = -EINVAL;
return r;
}
// attempt to open the other (paired) image
I** other_image_ctx = nullptr;
std::string other_image_type;
std::string other_image_name;
std::string other_image_id;
cls::rbd::MigrationSpec* other_migration_spec = nullptr;
librados::IoCtx other_io_ctx;
int flags = OPEN_FLAG_IGNORE_MIGRATING;
if (*src_image_ctx == nullptr &&
dst_migration_spec->source_spec.empty()) {
r = util::create_ioctx(io_ctx, "source image", migration_spec.pool_id,
migration_spec.pool_namespace, &other_io_ctx);
if (r < 0) {
return r;
}
other_image_type = "source";
other_image_ctx = src_image_ctx;
other_migration_spec = src_migration_spec;
other_image_name = migration_spec.image_name;
other_image_id = migration_spec.image_id;
if (other_image_id.empty()) {
ldout(cct, 20) << "trying to open v1 image by name "
<< other_io_ctx.get_pool_name() << "/"
<< other_image_name << dendl;
flags |= OPEN_FLAG_OLD_FORMAT;
} else {
ldout(cct, 20) << "trying to open v2 image by id "
<< other_io_ctx.get_pool_name() << "/"
<< other_image_id << dendl;
}
*src_image_ctx = I::create(other_image_name, other_image_id, nullptr,
other_io_ctx, false);
} else if (*dst_image_ctx == nullptr) {
r = util::create_ioctx(io_ctx, "destination image", migration_spec.pool_id,
migration_spec.pool_namespace, &other_io_ctx);
if (r < 0) {
return r;
}
other_image_name = migration_spec.image_name;
if (skip_open_dst_image) {
other_image_id = migration_spec.image_id;
} else {
other_image_type = "destination";
other_image_ctx = dst_image_ctx;
other_migration_spec = dst_migration_spec;
other_image_id = migration_spec.image_id;
}
*dst_image_ctx = I::create(other_image_name, other_image_id, nullptr,
other_io_ctx, false);
}
if (other_image_ctx != nullptr) {
r = (*other_image_ctx)->state->open(flags);
if (r < 0) {
lderr(cct) << "failed to open " << other_image_type << " image "
<< other_io_ctx.get_pool_name()
<< "/" << (other_image_id.empty() ?
other_image_name : other_image_id)
<< ": " << cpp_strerror(r) << dendl;
*other_image_ctx = nullptr;
return r;
}
r = cls_client::migration_get(&(*other_image_ctx)->md_ctx,
(*other_image_ctx)->header_oid,
other_migration_spec);
if (r < 0) {
lderr(cct) << "failed retrieving migration header: " << cpp_strerror(r)
<< dendl;
return r;
}
ldout(cct, 20) << other_image_type << " migration spec: "
<< *other_migration_spec << dendl;
}
if (!skip_open_dst_image) {
// legacy clients will only store status in the source images
if (dst_migration_spec->source_spec.empty()) {
dst_migration_spec->state = migration_spec.state;
dst_migration_spec->state_description =
migration_spec.state_description;
}
}
return 0;
}
class SteppedProgressContext : public ProgressContext {
public:
SteppedProgressContext(ProgressContext* progress_ctx, size_t total_steps)
: m_progress_ctx(progress_ctx), m_total_steps(total_steps) {
}
void next_step() {
ceph_assert(m_current_step < m_total_steps);
++m_current_step;
}
int update_progress(uint64_t object_number,
uint64_t object_count) override {
return m_progress_ctx->update_progress(
object_number + (object_count * (m_current_step - 1)),
object_count * m_total_steps);
}
private:
ProgressContext* m_progress_ctx;
size_t m_total_steps;
size_t m_current_step = 1;
};
} // anonymous namespace
template <typename I>
int Migration<I>::prepare(librados::IoCtx& io_ctx,
const std::string &image_name,
librados::IoCtx& dest_io_ctx,
const std::string &dest_image_name_,
ImageOptions& opts) {
CephContext* cct = reinterpret_cast<CephContext *>(io_ctx.cct());
std::string dest_image_name = dest_image_name_.empty() ? image_name :
dest_image_name_;
ldout(cct, 10) << io_ctx.get_pool_name() << "/" << image_name << " -> "
<< dest_io_ctx.get_pool_name() << "/" << dest_image_name
<< ", opts=" << opts << dendl;
auto src_image_ctx = I::create(image_name, "", nullptr, io_ctx, false);
int r = src_image_ctx->state->open(0);
if (r < 0) {
lderr(cct) << "failed to open image: " << cpp_strerror(r) << dendl;
return r;
}
BOOST_SCOPE_EXIT_TPL(src_image_ctx) {
src_image_ctx->state->close();
} BOOST_SCOPE_EXIT_END;
std::list<obj_watch_t> watchers;
int flags = librbd::image::LIST_WATCHERS_FILTER_OUT_MY_INSTANCE |
librbd::image::LIST_WATCHERS_FILTER_OUT_MIRROR_INSTANCES;
C_SaferCond on_list_watchers;
auto list_watchers_request = librbd::image::ListWatchersRequest<I>::create(
*src_image_ctx, flags, &watchers, &on_list_watchers);
list_watchers_request->send();
r = on_list_watchers.wait();
if (r < 0) {
lderr(cct) << "failed listing watchers:" << cpp_strerror(r) << dendl;
return r;
}
if (!watchers.empty()) {
lderr(cct) << "image has watchers - not migrating" << dendl;
return -EBUSY;
}
uint64_t format = 2;
if (opts.get(RBD_IMAGE_OPTION_FORMAT, &format) != 0) {
opts.set(RBD_IMAGE_OPTION_FORMAT, format);
}
if (format != 2) {
lderr(cct) << "unsupported destination image format: " << format << dendl;
return -EINVAL;
}
uint64_t features;
{
std::shared_lock image_locker{src_image_ctx->image_lock};
features = src_image_ctx->features;
}
opts.get(RBD_IMAGE_OPTION_FEATURES, &features);
if ((features & ~RBD_FEATURES_ALL) != 0) {
lderr(cct) << "librbd does not support requested features" << dendl;
return -ENOSYS;
}
opts.set(RBD_IMAGE_OPTION_FEATURES, features);
uint64_t order = src_image_ctx->order;
if (opts.get(RBD_IMAGE_OPTION_ORDER, &order) != 0) {
opts.set(RBD_IMAGE_OPTION_ORDER, order);
}
r = image::CreateRequest<I>::validate_order(cct, order);
if (r < 0) {
return r;
}
uint64_t stripe_unit = src_image_ctx->stripe_unit;
if (opts.get(RBD_IMAGE_OPTION_STRIPE_UNIT, &stripe_unit) != 0) {
opts.set(RBD_IMAGE_OPTION_STRIPE_UNIT, stripe_unit);
}
uint64_t stripe_count = src_image_ctx->stripe_count;
if (opts.get(RBD_IMAGE_OPTION_STRIPE_COUNT, &stripe_count) != 0) {
opts.set(RBD_IMAGE_OPTION_STRIPE_COUNT, stripe_count);
}
uint64_t flatten = 0;
if (opts.get(RBD_IMAGE_OPTION_FLATTEN, &flatten) == 0) {
opts.unset(RBD_IMAGE_OPTION_FLATTEN);
}
ldout(cct, 20) << "updated opts=" << opts << dendl;
auto dst_image_ctx = I::create(
dest_image_name, util::generate_image_id(dest_io_ctx), nullptr,
dest_io_ctx, false);
src_image_ctx->image_lock.lock_shared();
cls::rbd::MigrationSpec dst_migration_spec{
cls::rbd::MIGRATION_HEADER_TYPE_DST,
src_image_ctx->md_ctx.get_id(), src_image_ctx->md_ctx.get_namespace(),
src_image_ctx->name, src_image_ctx->id, "", {}, 0, false,
cls::rbd::MIRROR_IMAGE_MODE_JOURNAL, flatten > 0,
cls::rbd::MIGRATION_STATE_PREPARING, ""};
src_image_ctx->image_lock.unlock_shared();
Migration migration(src_image_ctx, dst_image_ctx, dst_migration_spec,
opts, nullptr);
r = migration.prepare();
return r;
}
template <typename I>
int Migration<I>::prepare_import(
const std::string& source_spec, librados::IoCtx& dest_io_ctx,
const std::string &dest_image_name, ImageOptions& opts) {
if (source_spec.empty() || !dest_io_ctx.is_valid() ||
dest_image_name.empty()) {
return -EINVAL;
}
auto cct = reinterpret_cast<CephContext *>(dest_io_ctx.cct());
ldout(cct, 10) << source_spec << " -> "
<< dest_io_ctx.get_pool_name() << "/"
<< dest_image_name << ", opts=" << opts << dendl;
I* src_image_ctx = nullptr;
C_SaferCond open_ctx;
auto req = migration::OpenSourceImageRequest<I>::create(
dest_io_ctx, nullptr, CEPH_NOSNAP,
{-1, "", "", "", source_spec, {}, 0, false}, &src_image_ctx, &open_ctx);
req->send();
int r = open_ctx.wait();
if (r < 0) {
lderr(cct) << "failed to open source image: " << cpp_strerror(r) << dendl;
return r;
}
auto asio_engine = src_image_ctx->asio_engine;
BOOST_SCOPE_EXIT_TPL(src_image_ctx) {
src_image_ctx->state->close();
} BOOST_SCOPE_EXIT_END;
uint64_t image_format = 2;
if (opts.get(RBD_IMAGE_OPTION_FORMAT, &image_format) != 0) {
opts.set(RBD_IMAGE_OPTION_FORMAT, image_format);
}
if (image_format != 2) {
lderr(cct) << "unsupported destination image format: " << image_format
<< dendl;
return -EINVAL;
}
ldout(cct, 20) << "updated opts=" << opts << dendl;
// use json-spirit to clean-up json formatting
json_spirit::mObject source_spec_object;
json_spirit::mValue json_root;
if(json_spirit::read(source_spec, json_root)) {
try {
source_spec_object = json_root.get_obj();
} catch (std::runtime_error&) {
lderr(cct) << "failed to clean source spec" << dendl;
return -EINVAL;
}
}
auto dst_image_ctx = I::create(
dest_image_name, util::generate_image_id(dest_io_ctx), nullptr,
dest_io_ctx, false);
cls::rbd::MigrationSpec dst_migration_spec{
cls::rbd::MIGRATION_HEADER_TYPE_DST, -1, "", "", "",
json_spirit::write(source_spec_object), {},
0, false, cls::rbd::MIRROR_IMAGE_MODE_JOURNAL, true,
cls::rbd::MIGRATION_STATE_PREPARING, ""};
Migration migration(src_image_ctx, dst_image_ctx, dst_migration_spec,
opts, nullptr);
return migration.prepare_import();
if (r < 0) {
return r;
}
return 0;
}
template <typename I>
int Migration<I>::execute(librados::IoCtx& io_ctx,
const std::string &image_name,
ProgressContext &prog_ctx) {
CephContext* cct = reinterpret_cast<CephContext *>(io_ctx.cct());
ldout(cct, 10) << io_ctx.get_pool_name() << "/" << image_name << dendl;
I *src_image_ctx;
I *dst_image_ctx;
cls::rbd::MigrationSpec src_migration_spec;
cls::rbd::MigrationSpec dst_migration_spec;
int r = open_images(io_ctx, image_name, &src_image_ctx, &dst_image_ctx,
&src_migration_spec, &dst_migration_spec, false);
if (r < 0) {
return r;
}
// ensure the destination loads the migration info
dst_image_ctx->ignore_migrating = false;
r = dst_image_ctx->state->refresh();
if (r < 0) {
lderr(cct) << "failed to refresh destination image: " << cpp_strerror(r)
<< dendl;
return r;
}
BOOST_SCOPE_EXIT_TPL(src_image_ctx, dst_image_ctx) {
dst_image_ctx->state->close();
if (src_image_ctx != nullptr) {
src_image_ctx->state->close();
}
} BOOST_SCOPE_EXIT_END;
if (dst_migration_spec.state != cls::rbd::MIGRATION_STATE_PREPARED &&
dst_migration_spec.state != cls::rbd::MIGRATION_STATE_EXECUTING) {
lderr(cct) << "current migration state is '" << dst_migration_spec.state
<< "' (should be 'prepared')" << dendl;
return -EINVAL;
}
ldout(cct, 5) << "migrating ";
if (!dst_migration_spec.source_spec.empty()) {
*_dout << dst_migration_spec.source_spec;
} else {
*_dout << src_image_ctx->md_ctx.get_pool_name() << "/"
<< src_image_ctx->name;
}
*_dout << " -> " << dst_image_ctx->md_ctx.get_pool_name() << "/"
<< dst_image_ctx->name << dendl;
ImageOptions opts;
Migration migration(src_image_ctx, dst_image_ctx, dst_migration_spec,
opts, &prog_ctx);
r = migration.execute();
if (r < 0) {
return r;
}
return 0;
}
template <typename I>
int Migration<I>::abort(librados::IoCtx& io_ctx, const std::string &image_name,
ProgressContext &prog_ctx) {
CephContext* cct = reinterpret_cast<CephContext *>(io_ctx.cct());
ldout(cct, 10) << io_ctx.get_pool_name() << "/" << image_name << dendl;
I *src_image_ctx;
I *dst_image_ctx;
cls::rbd::MigrationSpec src_migration_spec;
cls::rbd::MigrationSpec dst_migration_spec;
int r = open_images(io_ctx, image_name, &src_image_ctx, &dst_image_ctx,
&src_migration_spec, &dst_migration_spec, true);
if (r < 0) {
return r;
}
ldout(cct, 5) << "canceling incomplete migration ";
if (!dst_migration_spec.source_spec.empty()) {
*_dout << dst_migration_spec.source_spec;
} else {
*_dout << src_image_ctx->md_ctx.get_pool_name() << "/"
<< src_image_ctx->name;
}
*_dout << " -> " << dst_image_ctx->md_ctx.get_pool_name() << "/"
<< dst_image_ctx->name << dendl;
ImageOptions opts;
Migration migration(src_image_ctx, dst_image_ctx, dst_migration_spec,
opts, &prog_ctx);
r = migration.abort();
if (src_image_ctx != nullptr) {
src_image_ctx->state->close();
}
if (r < 0) {
return r;
}
return 0;
}
template <typename I>
int Migration<I>::commit(librados::IoCtx& io_ctx,
const std::string &image_name,
ProgressContext &prog_ctx) {
CephContext* cct = reinterpret_cast<CephContext *>(io_ctx.cct());
ldout(cct, 10) << io_ctx.get_pool_name() << "/" << image_name << dendl;
I *src_image_ctx;
I *dst_image_ctx;
cls::rbd::MigrationSpec src_migration_spec;
cls::rbd::MigrationSpec dst_migration_spec;
int r = open_images(io_ctx, image_name, &src_image_ctx, &dst_image_ctx,
&src_migration_spec, &dst_migration_spec, false);
if (r < 0) {
return r;
}
if (dst_migration_spec.state != cls::rbd::MIGRATION_STATE_EXECUTED) {
lderr(cct) << "current migration state is '" << dst_migration_spec.state
<< "' (should be 'executed')" << dendl;
dst_image_ctx->state->close();
if (src_image_ctx != nullptr) {
src_image_ctx->state->close();
}
return -EINVAL;
}
// ensure the destination loads the migration info
dst_image_ctx->ignore_migrating = false;
r = dst_image_ctx->state->refresh();
if (r < 0) {
lderr(cct) << "failed to refresh destination image: " << cpp_strerror(r)
<< dendl;
return r;
}
ldout(cct, 5) << "migrating ";
if (!dst_migration_spec.source_spec.empty()) {
*_dout << dst_migration_spec.source_spec;
} else {
*_dout << src_image_ctx->md_ctx.get_pool_name() << "/"
<< src_image_ctx->name;
}
*_dout << " -> " << dst_image_ctx->md_ctx.get_pool_name() << "/"
<< dst_image_ctx->name << dendl;
ImageOptions opts;
Migration migration(src_image_ctx, dst_image_ctx, dst_migration_spec,
opts, &prog_ctx);
r = migration.commit();
// image_ctx is closed in commit when removing src image
if (r < 0) {
return r;
}
return 0;
}
template <typename I>
int Migration<I>::status(librados::IoCtx& io_ctx,
const std::string &image_name,
image_migration_status_t *status) {
CephContext* cct = reinterpret_cast<CephContext *>(io_ctx.cct());
ldout(cct, 10) << io_ctx.get_pool_name() << "/" << image_name << dendl;
I *src_image_ctx;
I *dst_image_ctx;
cls::rbd::MigrationSpec src_migration_spec;
cls::rbd::MigrationSpec dst_migration_spec;
int r = open_images(io_ctx, image_name, &src_image_ctx, &dst_image_ctx,
&src_migration_spec, &dst_migration_spec, false);
if (r < 0) {
return r;
}
ldout(cct, 5) << "migrating ";
if (!dst_migration_spec.source_spec.empty()) {
*_dout << dst_migration_spec.source_spec;
} else {
*_dout << src_image_ctx->md_ctx.get_pool_name() << "/"
<< src_image_ctx->name;
}
*_dout << " -> " << dst_image_ctx->md_ctx.get_pool_name() << "/"
<< dst_image_ctx->name << dendl;
ImageOptions opts;
Migration migration(src_image_ctx, dst_image_ctx, dst_migration_spec,
opts, nullptr);
r = migration.status(status);
dst_image_ctx->state->close();
if (src_image_ctx != nullptr) {
src_image_ctx->state->close();
}
if (r < 0) {
return r;
}
return 0;
}
template <typename I>
int Migration<I>::get_source_spec(I* image_ctx, std::string* source_spec) {
auto cct = image_ctx->cct;
ldout(cct, 10) << dendl;
image_ctx->image_lock.lock_shared();
auto migration_info = image_ctx->migration_info;
image_ctx->image_lock.unlock_shared();
if (migration_info.empty()) {
// attempt to directly read the spec in case the state is EXECUTED
cls::rbd::MigrationSpec migration_spec;
int r = cls_client::migration_get(&image_ctx->md_ctx, image_ctx->header_oid,
&migration_spec);
if (r == -ENOENT) {
return r;
} else if (r < 0) {
lderr(cct) << "failed retrieving migration header: " << cpp_strerror(r)
<< dendl;
return r;
}
migration_info = {
migration_spec.pool_id, migration_spec.pool_namespace,
migration_spec.image_name, migration_spec.image_id,
migration_spec.source_spec, {}, 0, false};
}
if (!migration_info.source_spec.empty()) {
*source_spec = migration_info.source_spec;
} else {
// legacy migration source
*source_spec = migration::NativeFormat<I>::build_source_spec(
migration_info.pool_id,
migration_info.pool_namespace,
migration_info.image_name,
migration_info.image_id);
}
return 0;
}
template <typename I>
Migration<I>::Migration(ImageCtx* src_image_ctx,
ImageCtx* dst_image_ctx,
const cls::rbd::MigrationSpec& dst_migration_spec,
ImageOptions& opts, ProgressContext *prog_ctx)
: m_cct(dst_image_ctx->cct),
m_src_image_ctx(src_image_ctx), m_dst_image_ctx(dst_image_ctx),
m_dst_io_ctx(dst_image_ctx->md_ctx), m_dst_image_name(dst_image_ctx->name),
m_dst_image_id(dst_image_ctx->id),
m_dst_header_oid(util::header_name(m_dst_image_id)),
m_image_options(opts), m_flatten(dst_migration_spec.flatten),
m_mirroring(dst_migration_spec.mirroring),
m_mirror_image_mode(dst_migration_spec.mirror_image_mode),
m_prog_ctx(prog_ctx),
m_src_migration_spec(cls::rbd::MIGRATION_HEADER_TYPE_SRC,
m_dst_io_ctx.get_id(), m_dst_io_ctx.get_namespace(),
m_dst_image_name, m_dst_image_id, "", {}, 0,
m_mirroring, m_mirror_image_mode, m_flatten,
dst_migration_spec.state,
dst_migration_spec.state_description),
m_dst_migration_spec(dst_migration_spec) {
m_dst_io_ctx.dup(dst_image_ctx->md_ctx);
}
template <typename I>
int Migration<I>::prepare() {
ldout(m_cct, 10) << dendl;
BOOST_SCOPE_EXIT_TPL(&m_dst_image_ctx) {
if (m_dst_image_ctx != nullptr) {
m_dst_image_ctx->state->close();
}
} BOOST_SCOPE_EXIT_END;
int r = validate_src_snaps(m_src_image_ctx);
if (r < 0) {
return r;
}
r = disable_mirroring(m_src_image_ctx, &m_mirroring, &m_mirror_image_mode);
if (r < 0) {
return r;
}
r = unlink_src_image(m_src_image_ctx);
if (r < 0) {
enable_mirroring(m_src_image_ctx, m_mirroring, m_mirror_image_mode);
return r;
}
r = set_src_migration(m_src_image_ctx);
if (r < 0) {
relink_src_image(m_src_image_ctx);
enable_mirroring(m_src_image_ctx, m_mirroring, m_mirror_image_mode);
return r;
}
r = create_dst_image(&m_dst_image_ctx);
if (r < 0) {
abort();
return r;
}
ldout(m_cct, 10) << "succeeded" << dendl;
return 0;
}
template <typename I>
int Migration<I>::prepare_import() {
ldout(m_cct, 10) << dendl;
BOOST_SCOPE_EXIT_TPL(&m_dst_image_ctx) {
if (m_dst_image_ctx != nullptr) {
m_dst_image_ctx->state->close();
}
} BOOST_SCOPE_EXIT_END;
int r = create_dst_image(&m_dst_image_ctx);
if (r < 0) {
abort();
return r;
}
return 0;
}
template <typename I>
int Migration<I>::execute() {
ldout(m_cct, 10) << dendl;
int r = set_state(cls::rbd::MIGRATION_STATE_EXECUTING, "");
if (r < 0) {
return r;
}
{
MigrationProgressContext dst_prog_ctx(
m_dst_image_ctx->md_ctx, m_dst_image_ctx->header_oid,
cls::rbd::MIGRATION_STATE_EXECUTING, m_prog_ctx);
std::optional<MigrationProgressContext> src_prog_ctx;
if (m_src_image_ctx != nullptr) {
src_prog_ctx.emplace(m_src_image_ctx->md_ctx, m_src_image_ctx->header_oid,
cls::rbd::MIGRATION_STATE_EXECUTING, &dst_prog_ctx);
}
while (true) {
r = m_dst_image_ctx->operations->migrate(
*(src_prog_ctx ? &src_prog_ctx.value() : &dst_prog_ctx));
if (r == -EROFS) {
std::shared_lock owner_locker{m_dst_image_ctx->owner_lock};
if (m_dst_image_ctx->exclusive_lock != nullptr &&
!m_dst_image_ctx->exclusive_lock->accept_ops()) {
ldout(m_cct, 5) << "lost exclusive lock, retrying remote" << dendl;
continue;
}
}
break;
}
}
if (r < 0) {
lderr(m_cct) << "migration failed: " << cpp_strerror(r) << dendl;
return r;
}
r = set_state(cls::rbd::MIGRATION_STATE_EXECUTED, "");
if (r < 0) {
return r;
}
m_dst_image_ctx->notify_update();
ldout(m_cct, 10) << "succeeded" << dendl;
return 0;
}
template <typename I>
int Migration<I>::abort() {
ldout(m_cct, 10) << dendl;
int r;
if (m_src_image_ctx != nullptr) {
m_src_image_ctx->owner_lock.lock_shared();
if (m_src_image_ctx->exclusive_lock != nullptr &&
!m_src_image_ctx->exclusive_lock->is_lock_owner()) {
C_SaferCond ctx;
m_src_image_ctx->exclusive_lock->acquire_lock(&ctx);
m_src_image_ctx->owner_lock.unlock_shared();
r = ctx.wait();
if (r < 0) {
lderr(m_cct) << "error acquiring exclusive lock: " << cpp_strerror(r)
<< dendl;
return r;
}
} else {
m_src_image_ctx->owner_lock.unlock_shared();
}
}
group_info_t group_info;
group_info.pool = -1;
r = m_dst_image_ctx->state->open(OPEN_FLAG_IGNORE_MIGRATING);
if (r < 0) {
ldout(m_cct, 1) << "failed to open destination image: " << cpp_strerror(r)
<< dendl;
m_dst_image_ctx = nullptr;
} else {
BOOST_SCOPE_EXIT_TPL(&m_dst_image_ctx) {
if (m_dst_image_ctx != nullptr) {
m_dst_image_ctx->state->close();
}
} BOOST_SCOPE_EXIT_END;
std::list<obj_watch_t> watchers;
int flags = librbd::image::LIST_WATCHERS_FILTER_OUT_MY_INSTANCE |
librbd::image::LIST_WATCHERS_FILTER_OUT_MIRROR_INSTANCES;
C_SaferCond on_list_watchers;
auto list_watchers_request = librbd::image::ListWatchersRequest<I>::create(
*m_dst_image_ctx, flags, &watchers, &on_list_watchers);
list_watchers_request->send();
r = on_list_watchers.wait();
if (r < 0) {
lderr(m_cct) << "failed listing watchers:" << cpp_strerror(r) << dendl;
return r;
}
if (!watchers.empty()) {
lderr(m_cct) << "image has watchers - cannot abort migration" << dendl;
return -EBUSY;
}
// ensure destination image is now read-only
r = set_state(cls::rbd::MIGRATION_STATE_ABORTING, "");
if (r < 0) {
return r;
}
SteppedProgressContext progress_ctx(
m_prog_ctx, (m_src_image_ctx != nullptr ? 2 : 1));
if (m_src_image_ctx != nullptr) {
// copy dst HEAD -> src HEAD
revert_data(m_dst_image_ctx, m_src_image_ctx, &progress_ctx);
progress_ctx.next_step();
ldout(m_cct, 10) << "relinking children" << dendl;
r = relink_children(m_dst_image_ctx, m_src_image_ctx);
if (r < 0) {
return r;
}
}
ldout(m_cct, 10) << "removing dst image snapshots" << dendl;
std::vector<librbd::snap_info_t> snaps;
r = Snapshot<I>::list(m_dst_image_ctx, snaps);
if (r < 0) {
lderr(m_cct) << "failed listing snapshots: " << cpp_strerror(r)
<< dendl;
return r;
}
for (auto &snap : snaps) {
librbd::NoOpProgressContext prog_ctx;
int r = Snapshot<I>::remove(m_dst_image_ctx, snap.name.c_str(),
RBD_SNAP_REMOVE_UNPROTECT, prog_ctx);
if (r < 0) {
lderr(m_cct) << "failed removing snapshot: " << cpp_strerror(r)
<< dendl;
return r;
}
}
ldout(m_cct, 10) << "removing group" << dendl;
r = remove_group(m_dst_image_ctx, &group_info);
if (r < 0 && r != -ENOENT) {
return r;
}
ldout(m_cct, 10) << "removing dst image" << dendl;
ceph_assert(m_dst_image_ctx->ignore_migrating);
auto asio_engine = m_dst_image_ctx->asio_engine;
librados::IoCtx dst_io_ctx(m_dst_image_ctx->md_ctx);
C_SaferCond on_remove;
auto req = librbd::image::RemoveRequest<>::create(
dst_io_ctx, m_dst_image_ctx, false, false, progress_ctx,
asio_engine->get_work_queue(), &on_remove);
req->send();
r = on_remove.wait();
m_dst_image_ctx = nullptr;
if (r < 0) {
lderr(m_cct) << "failed removing destination image '"
<< dst_io_ctx.get_pool_name() << "/" << m_dst_image_name
<< " (" << m_dst_image_id << ")': " << cpp_strerror(r)
<< dendl;
return r;
}
}
if (m_src_image_ctx != nullptr) {
r = relink_src_image(m_src_image_ctx);
if (r < 0) {
return r;
}
r = add_group(m_src_image_ctx, group_info);
if (r < 0) {
return r;
}
r = remove_migration(m_src_image_ctx);
if (r < 0) {
return r;
}
r = enable_mirroring(m_src_image_ctx, m_mirroring, m_mirror_image_mode);
if (r < 0) {
return r;
}
}
ldout(m_cct, 10) << "succeeded" << dendl;
return 0;
}
template <typename I>
int Migration<I>::commit() {
ldout(m_cct, 10) << dendl;
BOOST_SCOPE_EXIT_TPL(&m_dst_image_ctx, &m_src_image_ctx) {
m_dst_image_ctx->state->close();
if (m_src_image_ctx != nullptr) {
m_src_image_ctx->state->close();
}
} BOOST_SCOPE_EXIT_END;
int r = remove_migration(m_dst_image_ctx);
if (r < 0) {
return r;
}
if (m_src_image_ctx != nullptr) {
r = remove_src_image(&m_src_image_ctx);
if (r < 0) {
return r;
}
}
r = enable_mirroring(m_dst_image_ctx, m_mirroring, m_mirror_image_mode);
if (r < 0) {
return r;
}
ldout(m_cct, 10) << "succeeded" << dendl;
return 0;
}
template <typename I>
int Migration<I>::status(image_migration_status_t *status) {
ldout(m_cct, 10) << dendl;
status->source_pool_id = m_dst_migration_spec.pool_id;
status->source_pool_namespace = m_dst_migration_spec.pool_namespace;
status->source_image_name = m_dst_migration_spec.image_name;
status->source_image_id = m_dst_migration_spec.image_id;
status->dest_pool_id = m_src_migration_spec.pool_id;
status->dest_pool_namespace = m_src_migration_spec.pool_namespace;
status->dest_image_name = m_src_migration_spec.image_name;
status->dest_image_id = m_src_migration_spec.image_id;
switch (m_src_migration_spec.state) {
case cls::rbd::MIGRATION_STATE_ERROR:
status->state = RBD_IMAGE_MIGRATION_STATE_ERROR;
break;
case cls::rbd::MIGRATION_STATE_PREPARING:
status->state = RBD_IMAGE_MIGRATION_STATE_PREPARING;
break;
case cls::rbd::MIGRATION_STATE_PREPARED:
status->state = RBD_IMAGE_MIGRATION_STATE_PREPARED;
break;
case cls::rbd::MIGRATION_STATE_EXECUTING:
status->state = RBD_IMAGE_MIGRATION_STATE_EXECUTING;
break;
case cls::rbd::MIGRATION_STATE_EXECUTED:
status->state = RBD_IMAGE_MIGRATION_STATE_EXECUTED;
break;
default:
status->state = RBD_IMAGE_MIGRATION_STATE_UNKNOWN;
break;
}
status->state_description = m_src_migration_spec.state_description;
return 0;
}
template <typename I>
int Migration<I>::set_state(I* image_ctx, const std::string& image_description,
cls::rbd::MigrationState state,
const std::string &description) {
int r = cls_client::migration_set_state(&image_ctx->md_ctx,
image_ctx->header_oid,
state, description);
if (r < 0) {
lderr(m_cct) << "failed to set " << image_description << " "
<< "migration header: " << cpp_strerror(r) << dendl;
return r;
}
return 0;
}
template <typename I>
int Migration<I>::set_state(cls::rbd::MigrationState state,
const std::string &description) {
int r;
if (m_src_image_ctx != nullptr) {
r = set_state(m_src_image_ctx, "source", state, description);
if (r < 0) {
return r;
}
}
r = set_state(m_dst_image_ctx, "destination", state, description);
if (r < 0) {
return r;
}
return 0;
}
template <typename I>
int Migration<I>::list_src_snaps(I* image_ctx,
std::vector<librbd::snap_info_t> *snaps) {
ldout(m_cct, 10) << dendl;
int r = Snapshot<I>::list(image_ctx, *snaps);
if (r < 0) {
lderr(m_cct) << "failed listing snapshots: " << cpp_strerror(r) << dendl;
return r;
}
for (auto &snap : *snaps) {
librbd::snap_namespace_type_t namespace_type;
r = Snapshot<I>::get_namespace_type(image_ctx, snap.id,
&namespace_type);
if (r < 0) {
lderr(m_cct) << "error getting snap namespace type: " << cpp_strerror(r)
<< dendl;
return r;
}
if (namespace_type != RBD_SNAP_NAMESPACE_TYPE_USER) {
if (namespace_type == RBD_SNAP_NAMESPACE_TYPE_TRASH) {
lderr(m_cct) << "image has snapshots with linked clones that must be "
<< "deleted or flattened before the image can be migrated"
<< dendl;
} else {
lderr(m_cct) << "image has non-user type snapshots "
<< "that are not supported by migration" << dendl;
}
return -EBUSY;
}
}
return 0;
}
template <typename I>
int Migration<I>::validate_src_snaps(I* image_ctx) {
ldout(m_cct, 10) << dendl;
std::vector<librbd::snap_info_t> snaps;
int r = list_src_snaps(image_ctx, &snaps);
if (r < 0) {
return r;
}
uint64_t dst_features = 0;
r = m_image_options.get(RBD_IMAGE_OPTION_FEATURES, &dst_features);
ceph_assert(r == 0);
if (!image_ctx->test_features(RBD_FEATURE_LAYERING)) {
return 0;
}
for (auto &snap : snaps) {
std::shared_lock image_locker{image_ctx->image_lock};
cls::rbd::ParentImageSpec parent_spec{image_ctx->md_ctx.get_id(),
image_ctx->md_ctx.get_namespace(),
image_ctx->id, snap.id};
std::vector<librbd::linked_image_spec_t> child_images;
r = api::Image<I>::list_children(image_ctx, parent_spec,
&child_images);
if (r < 0) {
lderr(m_cct) << "failed listing children: " << cpp_strerror(r)
<< dendl;
return r;
}
if (!child_images.empty()) {
ldout(m_cct, 1) << image_ctx->name << "@" << snap.name
<< " has children" << dendl;
if ((dst_features & RBD_FEATURE_LAYERING) == 0) {
lderr(m_cct) << "can't migrate to destination without layering feature: "
<< "image has children" << dendl;
return -EINVAL;
}
}
}
return 0;
}
template <typename I>
int Migration<I>::set_src_migration(I* image_ctx) {
ldout(m_cct, 10) << dendl;
image_ctx->ignore_migrating = true;
int r = cls_client::migration_set(&image_ctx->md_ctx, image_ctx->header_oid,
m_src_migration_spec);
if (r < 0) {
lderr(m_cct) << "failed to set source migration header: " << cpp_strerror(r)
<< dendl;
return r;
}
image_ctx->notify_update();
return 0;
}
template <typename I>
int Migration<I>::remove_migration(I *image_ctx) {
ldout(m_cct, 10) << dendl;
int r;
r = cls_client::migration_remove(&image_ctx->md_ctx, image_ctx->header_oid);
if (r == -ENOENT) {
r = 0;
}
if (r < 0) {
lderr(m_cct) << "failed removing migration header: " << cpp_strerror(r)
<< dendl;
return r;
}
image_ctx->notify_update();
return 0;
}
template <typename I>
int Migration<I>::unlink_src_image(I* image_ctx) {
if (image_ctx->old_format) {
return v1_unlink_src_image(image_ctx);
} else {
return v2_unlink_src_image(image_ctx);
}
}
template <typename I>
int Migration<I>::v1_unlink_src_image(I* image_ctx) {
ldout(m_cct, 10) << dendl;
std::shared_lock image_locker{image_ctx->image_lock};
int r = tmap_rm(image_ctx->md_ctx, image_ctx->name);
if (r < 0) {
lderr(m_cct) << "failed removing " << image_ctx->name << " from tmap: "
<< cpp_strerror(r) << dendl;
return r;
}
return 0;
}
template <typename I>
int Migration<I>::v2_unlink_src_image(I* image_ctx) {
ldout(m_cct, 10) << dendl;
image_ctx->owner_lock.lock_shared();
if (image_ctx->exclusive_lock != nullptr &&
image_ctx->exclusive_lock->is_lock_owner()) {
C_SaferCond ctx;
image_ctx->exclusive_lock->release_lock(&ctx);
image_ctx->owner_lock.unlock_shared();
int r = ctx.wait();
if (r < 0) {
lderr(m_cct) << "error releasing exclusive lock: " << cpp_strerror(r)
<< dendl;
return r;
}
} else {
image_ctx->owner_lock.unlock_shared();
}
int r = Trash<I>::move(image_ctx->md_ctx, RBD_TRASH_IMAGE_SOURCE_MIGRATION,
image_ctx->name, 0);
if (r < 0) {
lderr(m_cct) << "failed moving image to trash: " << cpp_strerror(r)
<< dendl;
return r;
}
return 0;
}
template <typename I>
int Migration<I>::relink_src_image(I* image_ctx) {
if (image_ctx->old_format) {
return v1_relink_src_image(image_ctx);
} else {
return v2_relink_src_image(image_ctx);
}
}
template <typename I>
int Migration<I>::v1_relink_src_image(I* image_ctx) {
ldout(m_cct, 10) << dendl;
std::shared_lock image_locker{image_ctx->image_lock};
int r = tmap_set(image_ctx->md_ctx, image_ctx->name);
if (r < 0) {
lderr(m_cct) << "failed adding " << image_ctx->name << " to tmap: "
<< cpp_strerror(r) << dendl;
return r;
}
return 0;
}
template <typename I>
int Migration<I>::v2_relink_src_image(I* image_ctx) {
ldout(m_cct, 10) << dendl;
std::shared_lock image_locker{image_ctx->image_lock};
int r = Trash<I>::restore(image_ctx->md_ctx,
{cls::rbd::TRASH_IMAGE_SOURCE_MIGRATION},
image_ctx->id, image_ctx->name);
if (r < 0) {
lderr(m_cct) << "failed restoring image from trash: " << cpp_strerror(r)
<< dendl;
return r;
}
return 0;
}
template <typename I>
int Migration<I>::create_dst_image(I** image_ctx) {
ldout(m_cct, 10) << dendl;
uint64_t size;
cls::rbd::ParentImageSpec parent_spec;
{
std::shared_lock image_locker{m_src_image_ctx->image_lock};
size = m_src_image_ctx->size;
// use oldest snapshot or HEAD for parent spec
if (!m_src_image_ctx->snap_info.empty()) {
parent_spec = m_src_image_ctx->snap_info.begin()->second.parent.spec;
} else {
parent_spec = m_src_image_ctx->parent_md.spec;
}
}
ConfigProxy config{m_cct->_conf};
api::Config<I>::apply_pool_overrides(m_dst_io_ctx, &config);
uint64_t mirror_image_mode;
if (m_image_options.get(RBD_IMAGE_OPTION_MIRROR_IMAGE_MODE,
&mirror_image_mode) == 0) {
m_mirroring = true;
m_mirror_image_mode = static_cast<cls::rbd::MirrorImageMode>(
mirror_image_mode);
m_image_options.unset(RBD_IMAGE_OPTION_MIRROR_IMAGE_MODE);
}
int r;
C_SaferCond on_create;
librados::IoCtx parent_io_ctx;
if (parent_spec.pool_id == -1) {
auto *req = image::CreateRequest<I>::create(
config, m_dst_io_ctx, m_dst_image_name, m_dst_image_id, size,
m_image_options, image::CREATE_FLAG_SKIP_MIRROR_ENABLE,
cls::rbd::MIRROR_IMAGE_MODE_JOURNAL, "", "",
m_src_image_ctx->op_work_queue, &on_create);
req->send();
} else {
r = util::create_ioctx(m_src_image_ctx->md_ctx, "parent image",
parent_spec.pool_id, parent_spec.pool_namespace,
&parent_io_ctx);
if (r < 0) {
return r;
}
auto *req = image::CloneRequest<I>::create(
config, parent_io_ctx, parent_spec.image_id, "", {}, parent_spec.snap_id,
m_dst_io_ctx, m_dst_image_name, m_dst_image_id, m_image_options,
cls::rbd::MIRROR_IMAGE_MODE_JOURNAL, "", "",
m_src_image_ctx->op_work_queue, &on_create);
req->send();
}
r = on_create.wait();
if (r < 0) {
lderr(m_cct) << "header creation failed: " << cpp_strerror(r) << dendl;
return r;
}
auto dst_image_ctx = *image_ctx;
dst_image_ctx->id = m_dst_image_id;
*image_ctx = nullptr; // prevent prepare from cleaning up the ImageCtx
r = dst_image_ctx->state->open(OPEN_FLAG_IGNORE_MIGRATING);
if (r < 0) {
lderr(m_cct) << "failed to open newly created header: " << cpp_strerror(r)
<< dendl;
return r;
}
BOOST_SCOPE_EXIT_TPL(dst_image_ctx) {
dst_image_ctx->state->close();
} BOOST_SCOPE_EXIT_END;
{
std::shared_lock owner_locker{dst_image_ctx->owner_lock};
r = dst_image_ctx->operations->prepare_image_update(
exclusive_lock::OPERATION_REQUEST_TYPE_GENERAL, true);
if (r < 0) {
lderr(m_cct) << "cannot obtain exclusive lock" << dendl;
return r;
}
if (dst_image_ctx->exclusive_lock != nullptr) {
dst_image_ctx->exclusive_lock->block_requests(0);
}
}
SnapSeqs snap_seqs;
C_SaferCond on_snapshot_copy;
auto snapshot_copy_req = librbd::deep_copy::SnapshotCopyRequest<I>::create(
m_src_image_ctx, dst_image_ctx, 0, CEPH_NOSNAP, 0, m_flatten,
m_src_image_ctx->op_work_queue, &snap_seqs, &on_snapshot_copy);
snapshot_copy_req->send();
r = on_snapshot_copy.wait();
if (r < 0) {
lderr(m_cct) << "failed to copy snapshots: " << cpp_strerror(r) << dendl;
return r;
}
if (!m_src_image_ctx->header_oid.empty()) {
C_SaferCond on_metadata_copy;
auto metadata_copy_req = librbd::deep_copy::MetadataCopyRequest<I>::create(
m_src_image_ctx, dst_image_ctx, &on_metadata_copy);
metadata_copy_req->send();
r = on_metadata_copy.wait();
if (r < 0) {
lderr(m_cct) << "failed to copy metadata: " << cpp_strerror(r) << dendl;
return r;
}
}
m_dst_migration_spec.snap_seqs = snap_seqs;
m_dst_migration_spec.overlap = size;
m_dst_migration_spec.mirroring = m_mirroring;
m_dst_migration_spec.mirror_image_mode = m_mirror_image_mode;
m_dst_migration_spec.flatten = m_flatten;
r = cls_client::migration_set(&m_dst_io_ctx, m_dst_header_oid,
m_dst_migration_spec);
if (r < 0) {
lderr(m_cct) << "failed to set migration header: " << cpp_strerror(r)
<< dendl;
return r;
}
if (m_dst_migration_spec.source_spec.empty()) {
r = update_group(m_src_image_ctx, dst_image_ctx);
if (r < 0) {
return r;
}
r = set_state(m_src_image_ctx, "source",
cls::rbd::MIGRATION_STATE_PREPARED, "");
if (r < 0) {
return r;
}
}
r = set_state(dst_image_ctx, "destination",
cls::rbd::MIGRATION_STATE_PREPARED, "");
if (r < 0) {
return r;
}
if (m_dst_migration_spec.source_spec.empty()) {
r = dst_image_ctx->state->refresh();
if (r < 0) {
lderr(m_cct) << "failed to refresh destination image: " << cpp_strerror(r)
<< dendl;
return r;
}
r = relink_children(m_src_image_ctx, dst_image_ctx);
if (r < 0) {
return r;
}
}
return 0;
}
template <typename I>
int Migration<I>::remove_group(I *image_ctx, group_info_t *group_info) {
int r = librbd::api::Group<I>::image_get_group(image_ctx, group_info);
if (r < 0) {
lderr(m_cct) << "failed to get image group: " << cpp_strerror(r) << dendl;
return r;
}
if (group_info->pool == -1) {
return -ENOENT;
}
ceph_assert(!image_ctx->id.empty());
ldout(m_cct, 10) << dendl;
IoCtx group_ioctx;
r = util::create_ioctx(image_ctx->md_ctx, "group", group_info->pool, {},
&group_ioctx);
if (r < 0) {
return r;
}
r = librbd::api::Group<I>::image_remove_by_id(group_ioctx,
group_info->name.c_str(),
image_ctx->md_ctx,
image_ctx->id.c_str());
if (r < 0) {
lderr(m_cct) << "failed to remove image from group: " << cpp_strerror(r)
<< dendl;
return r;
}
return 0;
}
template <typename I>
int Migration<I>::add_group(I *image_ctx, group_info_t &group_info) {
if (group_info.pool == -1) {
return 0;
}
ldout(m_cct, 10) << dendl;
IoCtx group_ioctx;
int r = util::create_ioctx(image_ctx->md_ctx, "group", group_info.pool, {},
&group_ioctx);
if (r < 0) {
return r;
}
r = librbd::api::Group<I>::image_add(group_ioctx, group_info.name.c_str(),
image_ctx->md_ctx,
image_ctx->name.c_str());
if (r < 0) {
lderr(m_cct) << "failed to add image to group: " << cpp_strerror(r)
<< dendl;
return r;
}
return 0;
}
template <typename I>
int Migration<I>::update_group(I *from_image_ctx, I *to_image_ctx) {
ldout(m_cct, 10) << dendl;
group_info_t group_info;
int r = remove_group(from_image_ctx, &group_info);
if (r < 0) {
return r == -ENOENT ? 0 : r;
}
r = add_group(to_image_ctx, group_info);
if (r < 0) {
return r;
}
return 0;
}
template <typename I>
int Migration<I>::disable_mirroring(
I *image_ctx, bool *was_enabled,
cls::rbd::MirrorImageMode *mirror_image_mode) {
*was_enabled = false;
cls::rbd::MirrorImage mirror_image;
int r = cls_client::mirror_image_get(&image_ctx->md_ctx, image_ctx->id,
&mirror_image);
if (r == -ENOENT) {
ldout(m_cct, 10) << "mirroring is not enabled for this image" << dendl;
return 0;
}
if (r < 0) {
lderr(m_cct) << "failed to retrieve mirror image: " << cpp_strerror(r)
<< dendl;
return r;
}
if (mirror_image.state == cls::rbd::MIRROR_IMAGE_STATE_ENABLED) {
*was_enabled = true;
*mirror_image_mode = mirror_image.mode;
}
ldout(m_cct, 10) << dendl;
C_SaferCond ctx;
auto req = mirror::DisableRequest<I>::create(image_ctx, false, true, &ctx);
req->send();
r = ctx.wait();
if (r < 0) {
lderr(m_cct) << "failed to disable mirroring: " << cpp_strerror(r)
<< dendl;
return r;
}
m_src_migration_spec.mirroring = true;
return 0;
}
template <typename I>
int Migration<I>::enable_mirroring(
I *image_ctx, bool was_enabled,
cls::rbd::MirrorImageMode mirror_image_mode) {
cls::rbd::MirrorMode mirror_mode;
int r = cls_client::mirror_mode_get(&image_ctx->md_ctx, &mirror_mode);
if (r < 0 && r != -ENOENT) {
lderr(m_cct) << "failed to retrieve mirror mode: " << cpp_strerror(r)
<< dendl;
return r;
}
if (mirror_mode == cls::rbd::MIRROR_MODE_DISABLED) {
ldout(m_cct, 10) << "mirroring is not enabled for destination pool"
<< dendl;
return 0;
}
if (mirror_mode == cls::rbd::MIRROR_MODE_IMAGE && !was_enabled) {
ldout(m_cct, 10) << "mirroring is not enabled for image" << dendl;
return 0;
}
ldout(m_cct, 10) << dendl;
C_SaferCond ctx;
auto req = mirror::EnableRequest<I>::create(
image_ctx, mirror_image_mode, "", false, &ctx);
req->send();
r = ctx.wait();
if (r < 0) {
lderr(m_cct) << "failed to enable mirroring: " << cpp_strerror(r)
<< dendl;
return r;
}
return 0;
}
// When relinking children we should be careful as it my be interrupted
// at any moment by some reason and we may end up in an inconsistent
// state, which we have to be able to fix with "migration abort". Below
// are all possible states during migration (P1 - source parent, P2 -
// destination parent, C - child):
//
// P1 P2 P1 P2 P1 P2 P1 P2
// ^\ \ ^ \ /^ /^
// \v v/ v/ v/
// C C C C
//
// 1 2 3 4
//
// (1) and (4) are the initial and the final consistent states. (2)
// and (3) are intermediate inconsistent states that have to be fixed
// by relink_children running in "migration abort" mode. For this, it
// scans P2 for all children attached and relinks (fixes) states (3)
// and (4) to state (1). Then it scans P1 for remaining children and
// fixes the states (2).
template <typename I>
int Migration<I>::relink_children(I *from_image_ctx, I *to_image_ctx) {
ldout(m_cct, 10) << dendl;
bool migration_abort = (to_image_ctx == m_src_image_ctx);
std::vector<librbd::snap_info_t> snaps;
int r = list_src_snaps(
migration_abort ? to_image_ctx : from_image_ctx, &snaps);
if (r < 0) {
return r;
}
for (auto it = snaps.begin(); it != snaps.end(); it++) {
auto &snap = *it;
std::vector<librbd::linked_image_spec_t> src_child_images;
if (from_image_ctx != m_src_image_ctx) {
ceph_assert(migration_abort);
// We run list snaps against the src image to get only those snapshots
// that are migrated. If the "from" image is not the src image
// (abort migration case), we need to remap snap ids.
// Also collect the list of the children currently attached to the
// source, so we could make a proper decision later about relinking.
std::shared_lock src_image_locker{to_image_ctx->image_lock};
cls::rbd::ParentImageSpec src_parent_spec{to_image_ctx->md_ctx.get_id(),
to_image_ctx->md_ctx.get_namespace(),
to_image_ctx->id, snap.id};
r = api::Image<I>::list_children(to_image_ctx, src_parent_spec,
&src_child_images);
if (r < 0) {
lderr(m_cct) << "failed listing children: " << cpp_strerror(r)
<< dendl;
return r;
}
std::shared_lock image_locker{from_image_ctx->image_lock};
snap.id = from_image_ctx->get_snap_id(cls::rbd::UserSnapshotNamespace(),
snap.name);
if (snap.id == CEPH_NOSNAP) {
ldout(m_cct, 5) << "skipping snapshot " << snap.name << dendl;
continue;
}
}
std::vector<librbd::linked_image_spec_t> child_images;
{
std::shared_lock image_locker{from_image_ctx->image_lock};
cls::rbd::ParentImageSpec parent_spec{from_image_ctx->md_ctx.get_id(),
from_image_ctx->md_ctx.get_namespace(),
from_image_ctx->id, snap.id};
r = api::Image<I>::list_children(from_image_ctx, parent_spec,
&child_images);
if (r < 0) {
lderr(m_cct) << "failed listing children: " << cpp_strerror(r)
<< dendl;
return r;
}
}
for (auto &child_image : child_images) {
r = relink_child(from_image_ctx, to_image_ctx, snap, child_image,
migration_abort, true);
if (r < 0) {
return r;
}
src_child_images.erase(std::remove(src_child_images.begin(),
src_child_images.end(), child_image),
src_child_images.end());
}
for (auto &child_image : src_child_images) {
r = relink_child(from_image_ctx, to_image_ctx, snap, child_image,
migration_abort, false);
if (r < 0) {
return r;
}
}
}
return 0;
}
template <typename I>
int Migration<I>::relink_child(I *from_image_ctx, I *to_image_ctx,
const librbd::snap_info_t &from_snap,
const librbd::linked_image_spec_t &child_image,
bool migration_abort, bool reattach_child) {
ldout(m_cct, 10) << from_snap.name << " " << child_image.pool_name << "/"
<< child_image.pool_namespace << "/"
<< child_image.image_name << " (migration_abort="
<< migration_abort << ", reattach_child=" << reattach_child
<< ")" << dendl;
librados::snap_t to_snap_id;
{
std::shared_lock image_locker{to_image_ctx->image_lock};
to_snap_id = to_image_ctx->get_snap_id(cls::rbd::UserSnapshotNamespace(),
from_snap.name);
if (to_snap_id == CEPH_NOSNAP) {
lderr(m_cct) << "no snapshot " << from_snap.name << " on destination image"
<< dendl;
return -ENOENT;
}
}
librados::IoCtx child_io_ctx;
int r = util::create_ioctx(to_image_ctx->md_ctx,
"child image " + child_image.image_name,
child_image.pool_id, child_image.pool_namespace,
&child_io_ctx);
if (r < 0) {
return r;
}
I *child_image_ctx = I::create("", child_image.image_id, nullptr,
child_io_ctx, false);
r = child_image_ctx->state->open(OPEN_FLAG_SKIP_OPEN_PARENT);
if (r < 0) {
lderr(m_cct) << "failed to open child image: " << cpp_strerror(r) << dendl;
return r;
}
BOOST_SCOPE_EXIT_TPL(child_image_ctx) {
child_image_ctx->state->close();
} BOOST_SCOPE_EXIT_END;
uint32_t clone_format = 1;
if (child_image_ctx->test_op_features(RBD_OPERATION_FEATURE_CLONE_CHILD)) {
clone_format = 2;
}
cls::rbd::ParentImageSpec parent_spec;
uint64_t parent_overlap;
{
std::shared_lock image_locker{child_image_ctx->image_lock};
// use oldest snapshot or HEAD for parent spec
if (!child_image_ctx->snap_info.empty()) {
parent_spec = child_image_ctx->snap_info.begin()->second.parent.spec;
parent_overlap = child_image_ctx->snap_info.begin()->second.parent.overlap;
} else {
parent_spec = child_image_ctx->parent_md.spec;
parent_overlap = child_image_ctx->parent_md.overlap;
}
}
if (migration_abort &&
parent_spec.pool_id == to_image_ctx->md_ctx.get_id() &&
parent_spec.pool_namespace == to_image_ctx->md_ctx.get_namespace() &&
parent_spec.image_id == to_image_ctx->id &&
parent_spec.snap_id == to_snap_id) {
ldout(m_cct, 10) << "no need for parent re-attach" << dendl;
} else {
if (parent_spec.pool_id != from_image_ctx->md_ctx.get_id() ||
parent_spec.pool_namespace != from_image_ctx->md_ctx.get_namespace() ||
parent_spec.image_id != from_image_ctx->id ||
parent_spec.snap_id != from_snap.id) {
lderr(m_cct) << "parent is not source image: " << parent_spec.pool_id
<< "/" << parent_spec.pool_namespace << "/"
<< parent_spec.image_id << "@" << parent_spec.snap_id
<< dendl;
return -ESTALE;
}
parent_spec.pool_id = to_image_ctx->md_ctx.get_id();
parent_spec.pool_namespace = to_image_ctx->md_ctx.get_namespace();
parent_spec.image_id = to_image_ctx->id;
parent_spec.snap_id = to_snap_id;
C_SaferCond on_reattach_parent;
auto reattach_parent_req = image::AttachParentRequest<I>::create(
*child_image_ctx, parent_spec, parent_overlap, true, &on_reattach_parent);
reattach_parent_req->send();
r = on_reattach_parent.wait();
if (r < 0) {
lderr(m_cct) << "failed to re-attach parent: " << cpp_strerror(r) << dendl;
return r;
}
}
if (reattach_child) {
C_SaferCond on_reattach_child;
auto reattach_child_req = image::AttachChildRequest<I>::create(
child_image_ctx, to_image_ctx, to_snap_id, from_image_ctx, from_snap.id,
clone_format, &on_reattach_child);
reattach_child_req->send();
r = on_reattach_child.wait();
if (r < 0) {
lderr(m_cct) << "failed to re-attach child: " << cpp_strerror(r) << dendl;
return r;
}
}
child_image_ctx->notify_update();
return 0;
}
template <typename I>
int Migration<I>::remove_src_image(I** image_ctx) {
ldout(m_cct, 10) << dendl;
auto src_image_ctx = *image_ctx;
std::vector<librbd::snap_info_t> snaps;
int r = list_src_snaps(src_image_ctx, &snaps);
if (r < 0) {
return r;
}
for (auto it = snaps.rbegin(); it != snaps.rend(); it++) {
auto &snap = *it;
librbd::NoOpProgressContext prog_ctx;
int r = Snapshot<I>::remove(src_image_ctx, snap.name.c_str(),
RBD_SNAP_REMOVE_UNPROTECT, prog_ctx);
if (r < 0) {
lderr(m_cct) << "failed removing source image snapshot '" << snap.name
<< "': " << cpp_strerror(r) << dendl;
return r;
}
}
ceph_assert(src_image_ctx->ignore_migrating);
auto asio_engine = src_image_ctx->asio_engine;
auto src_image_id = src_image_ctx->id;
librados::IoCtx src_io_ctx(src_image_ctx->md_ctx);
C_SaferCond on_remove;
auto req = librbd::image::RemoveRequest<I>::create(
src_io_ctx, src_image_ctx, false, true, *m_prog_ctx,
asio_engine->get_work_queue(), &on_remove);
req->send();
r = on_remove.wait();
*image_ctx = nullptr;
// For old format image it will return -ENOENT due to expected
// tmap_rm failure at the end.
if (r < 0 && r != -ENOENT) {
lderr(m_cct) << "failed removing source image: " << cpp_strerror(r)
<< dendl;
return r;
}
if (!src_image_id.empty()) {
r = cls_client::trash_remove(&src_io_ctx, src_image_id);
if (r < 0 && r != -ENOENT) {
lderr(m_cct) << "error removing image " << src_image_id
<< " from rbd_trash object" << dendl;
}
}
return 0;
}
template <typename I>
int Migration<I>::revert_data(I* src_image_ctx, I* dst_image_ctx,
ProgressContext* prog_ctx) {
ldout(m_cct, 10) << dendl;
cls::rbd::MigrationSpec migration_spec;
int r = cls_client::migration_get(&src_image_ctx->md_ctx,
src_image_ctx->header_oid,
&migration_spec);
if (r < 0) {
lderr(m_cct) << "failed retrieving migration header: " << cpp_strerror(r)
<< dendl;
return r;
}
if (migration_spec.header_type != cls::rbd::MIGRATION_HEADER_TYPE_DST) {
lderr(m_cct) << "unexpected migration header type: "
<< migration_spec.header_type << dendl;
return -EINVAL;
}
uint64_t src_snap_id_start = 0;
uint64_t src_snap_id_end = CEPH_NOSNAP;
uint64_t dst_snap_id_start = 0;
if (!migration_spec.snap_seqs.empty()) {
src_snap_id_start = migration_spec.snap_seqs.rbegin()->second;
}
// we only care about the HEAD revision so only add a single mapping to
// represent the most recent state
SnapSeqs snap_seqs;
snap_seqs[CEPH_NOSNAP] = CEPH_NOSNAP;
ldout(m_cct, 20) << "src_snap_id_start=" << src_snap_id_start << ", "
<< "src_snap_id_end=" << src_snap_id_end << ", "
<< "dst_snap_id_start=" << dst_snap_id_start << ", "
<< "snap_seqs=" << snap_seqs << dendl;
C_SaferCond ctx;
deep_copy::ProgressHandler progress_handler(prog_ctx);
auto request = deep_copy::ImageCopyRequest<I>::create(
src_image_ctx, dst_image_ctx, src_snap_id_start, src_snap_id_end,
dst_snap_id_start, false, {}, snap_seqs, &progress_handler, &ctx);
request->send();
r = ctx.wait();
if (r < 0) {
lderr(m_cct) << "error reverting destination image data blocks back to "
<< "source image: " << cpp_strerror(r) << dendl;
return r;
}
return 0;
}
} // namespace api
} // namespace librbd
template class librbd::api::Migration<librbd::ImageCtx>;
| 65,448 | 29.770569 | 85 |
cc
|
null |
ceph-main/src/librbd/api/Migration.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_API_MIGRATION_H
#define CEPH_LIBRBD_API_MIGRATION_H
#include "include/int_types.h"
#include "include/rados/librados_fwd.hpp"
#include "include/rbd/librbd.hpp"
#include "cls/rbd/cls_rbd_types.h"
#include <vector>
namespace librbd {
class ImageCtx;
namespace api {
template <typename ImageCtxT = librbd::ImageCtx>
class Migration {
public:
static int prepare(librados::IoCtx& io_ctx, const std::string &image_name,
librados::IoCtx& dest_io_ctx,
const std::string &dest_image_name, ImageOptions& opts);
static int prepare_import(const std::string& source_spec,
librados::IoCtx& dest_io_ctx,
const std::string &dest_image_name,
ImageOptions& opts);
static int execute(librados::IoCtx& io_ctx, const std::string &image_name,
ProgressContext &prog_ctx);
static int abort(librados::IoCtx& io_ctx, const std::string &image_name,
ProgressContext &prog_ctx);
static int commit(librados::IoCtx& io_ctx, const std::string &image_name,
ProgressContext &prog_ctx);
static int status(librados::IoCtx& io_ctx, const std::string &image_name,
image_migration_status_t *status);
static int get_source_spec(ImageCtxT* image_ctx, std::string* source_spec);
private:
CephContext* m_cct;
ImageCtx* m_src_image_ctx;
ImageCtx* m_dst_image_ctx;
librados::IoCtx m_dst_io_ctx;
std::string m_dst_image_name;
std::string m_dst_image_id;
std::string m_dst_header_oid;
ImageOptions &m_image_options;
bool m_flatten;
bool m_mirroring;
cls::rbd::MirrorImageMode m_mirror_image_mode;
ProgressContext *m_prog_ctx;
cls::rbd::MigrationSpec m_src_migration_spec;
cls::rbd::MigrationSpec m_dst_migration_spec;
Migration(ImageCtx* src_image_ctx, ImageCtx* dst_image_ctx,
const cls::rbd::MigrationSpec& dst_migration_spec,
ImageOptions& opts, ProgressContext *prog_ctx);
int prepare();
int prepare_import();
int execute();
int abort();
int commit();
int status(image_migration_status_t *status);
int set_state(ImageCtxT* image_ctx, const std::string& image_description,
cls::rbd::MigrationState state,
const std::string &description);
int set_state(cls::rbd::MigrationState state, const std::string &description);
int list_src_snaps(ImageCtxT* image_ctx,
std::vector<librbd::snap_info_t> *snaps);
int validate_src_snaps(ImageCtxT* image_ctx);
int disable_mirroring(ImageCtxT* image_ctx, bool *was_enabled,
cls::rbd::MirrorImageMode *mirror_image_mode);
int enable_mirroring(ImageCtxT* image_ctx, bool was_enabled,
cls::rbd::MirrorImageMode mirror_image_mode);
int set_src_migration(ImageCtxT* image_ctx);
int unlink_src_image(ImageCtxT* image_ctx);
int relink_src_image(ImageCtxT* image_ctx);
int create_dst_image(ImageCtxT** image_ctx);
int remove_group(ImageCtxT* image_ctx, group_info_t *group_info);
int add_group(ImageCtxT* image_ctx, group_info_t &group_info);
int update_group(ImageCtxT *from_image_ctx, ImageCtxT *to_image_ctx);
int remove_migration(ImageCtxT* image_ctx);
int relink_children(ImageCtxT *from_image_ctx, ImageCtxT *to_image_ctx);
int remove_src_image(ImageCtxT** image_ctx);
int v1_set_src_migration(ImageCtxT* image_ctx);
int v2_set_src_migration(ImageCtxT* image_ctx);
int v1_unlink_src_image(ImageCtxT* image_ctx);
int v2_unlink_src_image(ImageCtxT* image_ctx);
int v1_relink_src_image(ImageCtxT* image_ctx);
int v2_relink_src_image(ImageCtxT* image_ctx);
int relink_child(ImageCtxT *from_image_ctx, ImageCtxT *to_image_ctx,
const librbd::snap_info_t &src_snap,
const librbd::linked_image_spec_t &child_image,
bool migration_abort, bool reattach_child);
int revert_data(ImageCtxT* src_image_ctx, ImageCtxT* dst_image_ctx,
ProgressContext *prog_ctx);
};
} // namespace api
} // namespace librbd
extern template class librbd::api::Migration<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_API_MIGRATION_H
| 4,328 | 36.973684 | 80 |
h
|
null |
ceph-main/src/librbd/api/Mirror.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/api/Mirror.h"
#include "include/rados/librados.hpp"
#include "include/stringify.h"
#include "common/ceph_json.h"
#include "common/dout.h"
#include "common/errno.h"
#include "cls/rbd/cls_rbd_client.h"
#include "librbd/AsioEngine.h"
#include "librbd/ImageCtx.h"
#include "librbd/ImageState.h"
#include "librbd/Journal.h"
#include "librbd/MirroringWatcher.h"
#include "librbd/Operations.h"
#include "librbd/Utils.h"
#include "librbd/api/Image.h"
#include "librbd/api/Namespace.h"
#include "librbd/mirror/DemoteRequest.h"
#include "librbd/mirror/DisableRequest.h"
#include "librbd/mirror/EnableRequest.h"
#include "librbd/mirror/GetInfoRequest.h"
#include "librbd/mirror/GetStatusRequest.h"
#include "librbd/mirror/GetUuidRequest.h"
#include "librbd/mirror/PromoteRequest.h"
#include "librbd/mirror/Types.h"
#include "librbd/MirroringWatcher.h"
#include "librbd/mirror/snapshot/CreatePrimaryRequest.h"
#include "librbd/mirror/snapshot/ImageMeta.h"
#include "librbd/mirror/snapshot/UnlinkPeerRequest.h"
#include "librbd/mirror/snapshot/Utils.h"
#include <boost/algorithm/string/trim.hpp>
#include <boost/algorithm/string/replace.hpp>
#include <boost/scope_exit.hpp>
#include "json_spirit/json_spirit.h"
#include <algorithm>
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::api::Mirror: " << __func__ << ": "
namespace librbd {
namespace api {
namespace {
int get_config_key(librados::Rados& rados, const std::string& key,
std::string* value) {
std::string cmd =
"{"
"\"prefix\": \"config-key get\", "
"\"key\": \"" + key + "\""
"}";
bufferlist in_bl;
bufferlist out_bl;
int r = rados.mon_command(cmd, in_bl, &out_bl, nullptr);
if (r == -EINVAL) {
return -EOPNOTSUPP;
} else if (r < 0 && r != -ENOENT) {
return r;
}
*value = out_bl.to_str();
return 0;
}
int set_config_key(librados::Rados& rados, const std::string& key,
const std::string& value) {
std::string cmd;
if (value.empty()) {
cmd = "{"
"\"prefix\": \"config-key rm\", "
"\"key\": \"" + key + "\""
"}";
} else {
cmd = "{"
"\"prefix\": \"config-key set\", "
"\"key\": \"" + key + "\", "
"\"val\": \"" + value + "\""
"}";
}
bufferlist in_bl;
bufferlist out_bl;
int r = rados.mon_command(cmd, in_bl, &out_bl, nullptr);
if (r == -EINVAL) {
return -EOPNOTSUPP;
} else if (r < 0) {
return r;
}
return 0;
}
std::string get_peer_config_key_name(int64_t pool_id,
const std::string& peer_uuid) {
return RBD_MIRROR_PEER_CONFIG_KEY_PREFIX + stringify(pool_id) + "/" +
peer_uuid;
}
int remove_peer_config_key(librados::IoCtx& io_ctx,
const std::string& peer_uuid) {
int64_t pool_id = io_ctx.get_id();
auto key = get_peer_config_key_name(pool_id, peer_uuid);
librados::Rados rados(io_ctx);
int r = set_config_key(rados, key, "");
if (r < 0 && r != -ENOENT && r != -EPERM) {
return r;
}
return 0;
}
std::string get_mon_host(CephContext* cct) {
std::string mon_host;
if (auto mon_addrs = cct->get_mon_addrs();
mon_addrs != nullptr && !mon_addrs->empty()) {
CachedStackStringStream css;
for (auto it = mon_addrs->begin(); it != mon_addrs->end(); ++it) {
if (it != mon_addrs->begin()) {
*css << ",";
}
*css << *it;
}
mon_host = css->str();
} else {
ldout(cct, 20) << "falling back to mon_host in conf" << dendl;
mon_host = cct->_conf.get_val<std::string>("mon_host");
}
ldout(cct, 20) << "mon_host=" << mon_host << dendl;
return mon_host;
}
int create_bootstrap_user(CephContext* cct, librados::Rados& rados,
std::string* peer_client_id, std::string* cephx_key) {
ldout(cct, 20) << dendl;
// retrieve peer CephX user from config-key
int r = get_config_key(rados, RBD_MIRROR_PEER_CLIENT_ID_CONFIG_KEY,
peer_client_id);
if (r == -EACCES) {
ldout(cct, 5) << "insufficient permissions to get peer-client-id "
<< "config-key" << dendl;
return r;
} else if (r < 0 && r != -ENOENT) {
lderr(cct) << "failed to retrieve peer client id key: "
<< cpp_strerror(r) << dendl;
return r;
} else if (r == -ENOENT || peer_client_id->empty()) {
ldout(cct, 20) << "creating new peer-client-id config-key" << dendl;
*peer_client_id = "rbd-mirror-peer";
r = set_config_key(rados, RBD_MIRROR_PEER_CLIENT_ID_CONFIG_KEY,
*peer_client_id);
if (r == -EACCES) {
ldout(cct, 5) << "insufficient permissions to update peer-client-id "
<< "config-key" << dendl;
return r;
} else if (r < 0) {
lderr(cct) << "failed to update peer client id key: "
<< cpp_strerror(r) << dendl;
return r;
}
}
ldout(cct, 20) << "peer_client_id=" << *peer_client_id << dendl;
// create peer client user
std::string cmd =
R"({)" \
R"( "prefix": "auth get-or-create",)" \
R"( "entity": "client.)" + *peer_client_id + R"(",)" \
R"( "caps": [)" \
R"( "mon", "profile rbd-mirror-peer",)" \
R"( "osd", "profile rbd"],)" \
R"( "format": "json")" \
R"(})";
bufferlist in_bl;
bufferlist out_bl;
r = rados.mon_command(cmd, in_bl, &out_bl, nullptr);
if (r == -EINVAL) {
ldout(cct, 5) << "caps mismatch for existing user" << dendl;
return -EEXIST;
} else if (r == -EACCES) {
ldout(cct, 5) << "insufficient permissions to create user" << dendl;
return r;
} else if (r < 0) {
lderr(cct) << "failed to create or update RBD mirroring bootstrap user: "
<< cpp_strerror(r) << dendl;
return r;
}
// extract key from response
bool json_valid = false;
json_spirit::mValue json_root;
if(json_spirit::read(out_bl.to_str(), json_root)) {
try {
auto& json_obj = json_root.get_array()[0].get_obj();
*cephx_key = json_obj["key"].get_str();
json_valid = true;
} catch (std::runtime_error&) {
}
}
if (!json_valid) {
lderr(cct) << "invalid auth keyring JSON received" << dendl;
return -EBADMSG;
}
return 0;
}
int create_bootstrap_peer(CephContext* cct, librados::IoCtx& io_ctx,
mirror_peer_direction_t direction,
const std::string& site_name, const std::string& fsid,
const std::string& client_id, const std::string& key,
const std::string& mon_host,
const std::string& cluster1,
const std::string& cluster2) {
ldout(cct, 20) << dendl;
std::string peer_uuid;
std::vector<mirror_peer_site_t> peers;
int r = Mirror<>::peer_site_list(io_ctx, &peers);
if (r < 0 && r != -ENOENT) {
lderr(cct) << "failed to list mirror peers: " << cpp_strerror(r) << dendl;
return r;
}
if (peers.empty()) {
r = Mirror<>::peer_site_add(io_ctx, &peer_uuid, direction, site_name,
"client." + client_id);
if (r < 0) {
lderr(cct) << "failed to add " << cluster1 << " peer to "
<< cluster2 << " " << "cluster: " << cpp_strerror(r) << dendl;
return r;
}
} else if (peers[0].site_name != site_name &&
peers[0].site_name != fsid) {
// only support a single peer
lderr(cct) << "multiple peers are not currently supported" << dendl;
return -EINVAL;
} else {
peer_uuid = peers[0].uuid;
if (peers[0].site_name != site_name) {
r = Mirror<>::peer_site_set_name(io_ctx, peer_uuid, site_name);
if (r < 0) {
// non-fatal attempt to update site name
lderr(cct) << "failed to update peer site name" << dendl;
}
}
}
Mirror<>::Attributes attributes {
{"mon_host", mon_host},
{"key", key}};
r = Mirror<>::peer_site_set_attributes(io_ctx, peer_uuid, attributes);
if (r < 0) {
lderr(cct) << "failed to update " << cluster1 << " cluster connection "
<< "attributes in " << cluster2 << " cluster: "
<< cpp_strerror(r) << dendl;
return r;
}
return 0;
}
int list_mirror_images(librados::IoCtx& io_ctx,
std::set<std::string>& mirror_image_ids) {
CephContext *cct = reinterpret_cast<CephContext *>(io_ctx.cct());
std::string last_read = "";
int max_read = 1024;
int r;
do {
std::map<std::string, std::string> mirror_images;
r = cls_client::mirror_image_list(&io_ctx, last_read, max_read,
&mirror_images);
if (r < 0 && r != -ENOENT) {
lderr(cct) << "error listing mirrored image directory: "
<< cpp_strerror(r) << dendl;
return r;
}
for (auto it = mirror_images.begin(); it != mirror_images.end(); ++it) {
mirror_image_ids.insert(it->first);
}
if (!mirror_images.empty()) {
last_read = mirror_images.rbegin()->first;
}
r = mirror_images.size();
} while (r == max_read);
return 0;
}
template <typename I>
const char *pool_or_namespace(I *ictx) {
if (!ictx->md_ctx.get_namespace().empty()) {
return "namespace";
} else {
return "pool";
}
}
struct C_ImageGetInfo : public Context {
mirror_image_info_t *mirror_image_info;
mirror_image_mode_t *mirror_image_mode;
Context *on_finish;
cls::rbd::MirrorImage mirror_image;
mirror::PromotionState promotion_state = mirror::PROMOTION_STATE_PRIMARY;
std::string primary_mirror_uuid;
C_ImageGetInfo(mirror_image_info_t *mirror_image_info,
mirror_image_mode_t *mirror_image_mode, Context *on_finish)
: mirror_image_info(mirror_image_info),
mirror_image_mode(mirror_image_mode), on_finish(on_finish) {
}
void finish(int r) override {
if (r < 0 && r != -ENOENT) {
on_finish->complete(r);
return;
}
if (mirror_image_info != nullptr) {
mirror_image_info->global_id = mirror_image.global_image_id;
mirror_image_info->state = static_cast<rbd_mirror_image_state_t>(
mirror_image.state);
mirror_image_info->primary = (
promotion_state == mirror::PROMOTION_STATE_PRIMARY);
}
if (mirror_image_mode != nullptr) {
*mirror_image_mode =
static_cast<rbd_mirror_image_mode_t>(mirror_image.mode);
}
on_finish->complete(0);
}
};
struct C_ImageGetGlobalStatus : public C_ImageGetInfo {
std::string image_name;
mirror_image_global_status_t *mirror_image_global_status;
cls::rbd::MirrorImageStatus mirror_image_status_internal;
C_ImageGetGlobalStatus(
const std::string &image_name,
mirror_image_global_status_t *mirror_image_global_status,
Context *on_finish)
: C_ImageGetInfo(&mirror_image_global_status->info, nullptr, on_finish),
image_name(image_name),
mirror_image_global_status(mirror_image_global_status) {
}
void finish(int r) override {
if (r < 0 && r != -ENOENT) {
on_finish->complete(r);
return;
}
mirror_image_global_status->name = image_name;
mirror_image_global_status->site_statuses.clear();
mirror_image_global_status->site_statuses.reserve(
mirror_image_status_internal.mirror_image_site_statuses.size());
for (auto& site_status :
mirror_image_status_internal.mirror_image_site_statuses) {
mirror_image_global_status->site_statuses.push_back({
site_status.mirror_uuid,
static_cast<mirror_image_status_state_t>(site_status.state),
site_status.description, site_status.last_update.sec(),
site_status.up});
}
C_ImageGetInfo::finish(0);
}
};
template <typename I>
struct C_ImageSnapshotCreate : public Context {
I *ictx;
uint64_t snap_create_flags;
uint64_t *snap_id;
Context *on_finish;
cls::rbd::MirrorImage mirror_image;
mirror::PromotionState promotion_state;
std::string primary_mirror_uuid;
C_ImageSnapshotCreate(I *ictx, uint64_t snap_create_flags, uint64_t *snap_id,
Context *on_finish)
: ictx(ictx), snap_create_flags(snap_create_flags), snap_id(snap_id),
on_finish(on_finish) {
}
void finish(int r) override {
if (r < 0 && r != -ENOENT) {
on_finish->complete(r);
return;
}
if (mirror_image.mode != cls::rbd::MIRROR_IMAGE_MODE_SNAPSHOT ||
mirror_image.state != cls::rbd::MIRROR_IMAGE_STATE_ENABLED) {
lderr(ictx->cct) << "snapshot based mirroring is not enabled" << dendl;
on_finish->complete(-EINVAL);
return;
}
auto req = mirror::snapshot::CreatePrimaryRequest<I>::create(
ictx, mirror_image.global_image_id, CEPH_NOSNAP, snap_create_flags, 0U,
snap_id, on_finish);
req->send();
}
};
} // anonymous namespace
template <typename I>
int Mirror<I>::image_enable(I *ictx, mirror_image_mode_t mode,
bool relax_same_pool_parent_check) {
CephContext *cct = ictx->cct;
ldout(cct, 20) << "ictx=" << ictx << " mode=" << mode
<< " relax_same_pool_parent_check="
<< relax_same_pool_parent_check << dendl;
int r = ictx->state->refresh_if_required();
if (r < 0) {
return r;
}
cls::rbd::MirrorMode mirror_mode;
r = cls_client::mirror_mode_get(&ictx->md_ctx, &mirror_mode);
if (r < 0) {
lderr(cct) << "cannot enable mirroring: failed to retrieve mirror mode: "
<< cpp_strerror(r) << dendl;
return r;
}
if (mirror_mode == cls::rbd::MIRROR_MODE_DISABLED) {
lderr(cct) << "cannot enable mirroring: mirroring is not enabled on a "
<< pool_or_namespace(ictx) << dendl;
return -EINVAL;
}
if (mirror_mode != cls::rbd::MIRROR_MODE_IMAGE) {
lderr(cct) << "cannot enable mirroring: " << pool_or_namespace(ictx)
<< " is not in image mirror mode" << dendl;
return -EINVAL;
}
// is mirroring not enabled for the parent?
{
std::shared_lock image_locker{ictx->image_lock};
ImageCtx *parent = ictx->parent;
if (parent) {
if (parent->md_ctx.get_id() != ictx->md_ctx.get_id() ||
!relax_same_pool_parent_check) {
cls::rbd::MirrorImage mirror_image_internal;
r = cls_client::mirror_image_get(&(parent->md_ctx), parent->id,
&mirror_image_internal);
if (r == -ENOENT) {
lderr(cct) << "mirroring is not enabled for the parent" << dendl;
return -EINVAL;
}
}
}
}
if (mode == RBD_MIRROR_IMAGE_MODE_JOURNAL &&
!ictx->test_features(RBD_FEATURE_JOURNALING)) {
uint64_t features = RBD_FEATURE_JOURNALING;
if (!ictx->test_features(RBD_FEATURE_EXCLUSIVE_LOCK)) {
features |= RBD_FEATURE_EXCLUSIVE_LOCK;
}
r = ictx->operations->update_features(features, true);
if (r < 0) {
lderr(cct) << "cannot enable journaling: " << cpp_strerror(r) << dendl;
return r;
}
}
C_SaferCond ctx;
auto req = mirror::EnableRequest<ImageCtx>::create(
ictx, static_cast<cls::rbd::MirrorImageMode>(mode), "", false, &ctx);
req->send();
r = ctx.wait();
if (r < 0) {
lderr(cct) << "cannot enable mirroring: " << cpp_strerror(r) << dendl;
return r;
}
return 0;
}
template <typename I>
int Mirror<I>::image_disable(I *ictx, bool force) {
CephContext *cct = ictx->cct;
ldout(cct, 20) << "ictx=" << ictx << dendl;
int r = ictx->state->refresh_if_required();
if (r < 0) {
return r;
}
cls::rbd::MirrorMode mirror_mode;
r = cls_client::mirror_mode_get(&ictx->md_ctx, &mirror_mode);
if (r < 0) {
lderr(cct) << "cannot disable mirroring: failed to retrieve pool "
"mirroring mode: " << cpp_strerror(r) << dendl;
return r;
}
if (mirror_mode != cls::rbd::MIRROR_MODE_IMAGE) {
lderr(cct) << "cannot disable mirroring in the current pool mirroring "
"mode" << dendl;
return -EINVAL;
}
// is mirroring enabled for the image?
cls::rbd::MirrorImage mirror_image_internal;
r = cls_client::mirror_image_get(&ictx->md_ctx, ictx->id,
&mirror_image_internal);
if (r == -ENOENT) {
// mirroring is not enabled for this image
ldout(cct, 20) << "ignoring disable command: mirroring is not enabled for "
<< "this image" << dendl;
return 0;
} else if (r == -EOPNOTSUPP) {
ldout(cct, 5) << "mirroring not supported by OSD" << dendl;
return r;
} else if (r < 0) {
lderr(cct) << "failed to retrieve mirror image metadata: "
<< cpp_strerror(r) << dendl;
return r;
}
mirror_image_internal.state = cls::rbd::MIRROR_IMAGE_STATE_DISABLING;
r = cls_client::mirror_image_set(&ictx->md_ctx, ictx->id,
mirror_image_internal);
if (r < 0) {
lderr(cct) << "cannot disable mirroring: " << cpp_strerror(r) << dendl;
return r;
}
bool rollback = false;
BOOST_SCOPE_EXIT_ALL(ictx, &mirror_image_internal, &rollback) {
if (rollback) {
// restore the mask bit for treating the non-primary feature as read-only
ictx->image_lock.lock();
ictx->read_only_mask |= IMAGE_READ_ONLY_FLAG_NON_PRIMARY;
ictx->image_lock.unlock();
ictx->state->handle_update_notification();
// attempt to restore the image state
CephContext *cct = ictx->cct;
mirror_image_internal.state = cls::rbd::MIRROR_IMAGE_STATE_ENABLED;
int r = cls_client::mirror_image_set(&ictx->md_ctx, ictx->id,
mirror_image_internal);
if (r < 0) {
lderr(cct) << "failed to re-enable image mirroring: "
<< cpp_strerror(r) << dendl;
}
}
};
std::unique_lock image_locker{ictx->image_lock};
std::map<librados::snap_t, SnapInfo> snap_info = ictx->snap_info;
for (auto &info : snap_info) {
cls::rbd::ParentImageSpec parent_spec{ictx->md_ctx.get_id(),
ictx->md_ctx.get_namespace(),
ictx->id, info.first};
std::vector<librbd::linked_image_spec_t> child_images;
r = Image<I>::list_children(ictx, parent_spec, &child_images);
if (r < 0) {
rollback = true;
return r;
}
if (child_images.empty()) {
continue;
}
librados::IoCtx child_io_ctx;
int64_t child_pool_id = -1;
for (auto &child_image : child_images){
std::string pool = child_image.pool_name;
if (child_pool_id == -1 ||
child_pool_id != child_image.pool_id ||
child_io_ctx.get_namespace() != child_image.pool_namespace) {
r = util::create_ioctx(ictx->md_ctx, "child image",
child_image.pool_id,
child_image.pool_namespace,
&child_io_ctx);
if (r < 0) {
rollback = true;
return r;
}
child_pool_id = child_image.pool_id;
}
cls::rbd::MirrorImage child_mirror_image_internal;
r = cls_client::mirror_image_get(&child_io_ctx, child_image.image_id,
&child_mirror_image_internal);
if (r != -ENOENT) {
rollback = true;
lderr(cct) << "mirroring is enabled on one or more children "
<< dendl;
return -EBUSY;
}
}
}
image_locker.unlock();
if (mirror_image_internal.mode == cls::rbd::MIRROR_IMAGE_MODE_SNAPSHOT) {
// don't let the non-primary feature bit prevent image updates
ictx->image_lock.lock();
ictx->read_only_mask &= ~IMAGE_READ_ONLY_FLAG_NON_PRIMARY;
ictx->image_lock.unlock();
r = ictx->state->refresh();
if (r < 0) {
rollback = true;
return r;
}
// remove any snapshot-based mirroring image-meta from image
std::string mirror_uuid;
r = uuid_get(ictx->md_ctx, &mirror_uuid);
if (r < 0) {
rollback = true;
return r;
}
r = ictx->operations->metadata_remove(
mirror::snapshot::util::get_image_meta_key(mirror_uuid));
if (r < 0 && r != -ENOENT) {
lderr(cct) << "cannot remove snapshot image-meta key: " << cpp_strerror(r)
<< dendl;
rollback = true;
return r;
}
}
C_SaferCond ctx;
auto req = mirror::DisableRequest<ImageCtx>::create(ictx, force, true,
&ctx);
req->send();
r = ctx.wait();
if (r < 0) {
lderr(cct) << "cannot disable mirroring: " << cpp_strerror(r) << dendl;
rollback = true;
return r;
}
if (mirror_image_internal.mode == cls::rbd::MIRROR_IMAGE_MODE_JOURNAL) {
r = ictx->operations->update_features(RBD_FEATURE_JOURNALING, false);
if (r < 0) {
lderr(cct) << "cannot disable journaling: " << cpp_strerror(r) << dendl;
// not fatal
}
}
return 0;
}
template <typename I>
int Mirror<I>::image_promote(I *ictx, bool force) {
CephContext *cct = ictx->cct;
C_SaferCond ctx;
Mirror<I>::image_promote(ictx, force, &ctx);
int r = ctx.wait();
if (r < 0) {
lderr(cct) << "failed to promote image" << dendl;
return r;
}
return 0;
}
template <typename I>
void Mirror<I>::image_promote(I *ictx, bool force, Context *on_finish) {
CephContext *cct = ictx->cct;
ldout(cct, 20) << "ictx=" << ictx << ", "
<< "force=" << force << dendl;
// don't let the non-primary feature bit prevent image updates
ictx->image_lock.lock();
ictx->read_only_mask &= ~IMAGE_READ_ONLY_FLAG_NON_PRIMARY;
ictx->image_lock.unlock();
auto on_promote = new LambdaContext([ictx, on_finish](int r) {
ictx->image_lock.lock();
ictx->read_only_mask |= IMAGE_READ_ONLY_FLAG_NON_PRIMARY;
ictx->image_lock.unlock();
ictx->state->handle_update_notification();
on_finish->complete(r);
});
auto on_refresh = new LambdaContext([ictx, force, on_promote](int r) {
if (r < 0) {
lderr(ictx->cct) << "refresh failed: " << cpp_strerror(r) << dendl;
on_promote->complete(r);
return;
}
auto req = mirror::PromoteRequest<>::create(*ictx, force, on_promote);
req->send();
});
ictx->state->refresh(on_refresh);
}
template <typename I>
int Mirror<I>::image_demote(I *ictx) {
CephContext *cct = ictx->cct;
C_SaferCond ctx;
Mirror<I>::image_demote(ictx, &ctx);
int r = ctx.wait();
if (r < 0) {
lderr(cct) << "failed to demote image" << dendl;
return r;
}
return 0;
}
template <typename I>
void Mirror<I>::image_demote(I *ictx, Context *on_finish) {
CephContext *cct = ictx->cct;
ldout(cct, 20) << "ictx=" << ictx << dendl;
auto on_cleanup = new LambdaContext([ictx, on_finish](int r) {
ictx->image_lock.lock();
ictx->read_only_mask |= IMAGE_READ_ONLY_FLAG_NON_PRIMARY;
ictx->image_lock.unlock();
ictx->state->handle_update_notification();
on_finish->complete(r);
});
auto on_refresh = new LambdaContext([ictx, on_cleanup](int r) {
if (r < 0) {
lderr(ictx->cct) << "refresh failed: " << cpp_strerror(r) << dendl;
on_cleanup->complete(r);
return;
}
auto req = mirror::DemoteRequest<>::create(*ictx, on_cleanup);
req->send();
});
// ensure we can create a snapshot after setting the non-primary
// feature bit
ictx->image_lock.lock();
ictx->read_only_mask &= ~IMAGE_READ_ONLY_FLAG_NON_PRIMARY;
ictx->image_lock.unlock();
ictx->state->refresh(on_refresh);
}
template <typename I>
int Mirror<I>::image_resync(I *ictx) {
CephContext *cct = ictx->cct;
ldout(cct, 20) << "ictx=" << ictx << dendl;
int r = ictx->state->refresh_if_required();
if (r < 0) {
return r;
}
cls::rbd::MirrorImage mirror_image;
mirror::PromotionState promotion_state;
std::string primary_mirror_uuid;
C_SaferCond get_info_ctx;
auto req = mirror::GetInfoRequest<I>::create(*ictx, &mirror_image,
&promotion_state,
&primary_mirror_uuid,
&get_info_ctx);
req->send();
r = get_info_ctx.wait();
if (r < 0) {
return r;
}
if (promotion_state == mirror::PROMOTION_STATE_PRIMARY) {
lderr(cct) << "image is primary, cannot resync to itself" << dendl;
return -EINVAL;
}
if (mirror_image.mode == cls::rbd::MIRROR_IMAGE_MODE_JOURNAL) {
// flag the journal indicating that we want to rebuild the local image
r = Journal<I>::request_resync(ictx);
if (r < 0) {
lderr(cct) << "failed to request resync: " << cpp_strerror(r) << dendl;
return r;
}
} else if (mirror_image.mode == cls::rbd::MIRROR_IMAGE_MODE_SNAPSHOT) {
std::string mirror_uuid;
r = uuid_get(ictx->md_ctx, &mirror_uuid);
if (r < 0) {
return r;
}
mirror::snapshot::ImageMeta image_meta(ictx, mirror_uuid);
C_SaferCond load_meta_ctx;
image_meta.load(&load_meta_ctx);
r = load_meta_ctx.wait();
if (r < 0 && r != -ENOENT) {
lderr(cct) << "failed to load mirror image-meta: " << cpp_strerror(r)
<< dendl;
return r;
}
image_meta.resync_requested = true;
C_SaferCond save_meta_ctx;
image_meta.save(&save_meta_ctx);
r = save_meta_ctx.wait();
if (r < 0) {
lderr(cct) << "failed to request resync: " << cpp_strerror(r) << dendl;
return r;
}
} else {
lderr(cct) << "unknown mirror mode" << dendl;
return -EINVAL;
}
return 0;
}
template <typename I>
void Mirror<I>::image_get_info(I *ictx, mirror_image_info_t *mirror_image_info,
Context *on_finish) {
CephContext *cct = ictx->cct;
ldout(cct, 20) << "ictx=" << ictx << dendl;
auto on_refresh = new LambdaContext(
[ictx, mirror_image_info, on_finish](int r) {
if (r < 0) {
lderr(ictx->cct) << "refresh failed: " << cpp_strerror(r) << dendl;
on_finish->complete(r);
return;
}
auto ctx = new C_ImageGetInfo(mirror_image_info, nullptr, on_finish);
auto req = mirror::GetInfoRequest<I>::create(*ictx, &ctx->mirror_image,
&ctx->promotion_state,
&ctx->primary_mirror_uuid,
ctx);
req->send();
});
if (ictx->state->is_refresh_required()) {
ictx->state->refresh(on_refresh);
} else {
on_refresh->complete(0);
}
}
template <typename I>
int Mirror<I>::image_get_info(I *ictx, mirror_image_info_t *mirror_image_info) {
C_SaferCond ctx;
image_get_info(ictx, mirror_image_info, &ctx);
int r = ctx.wait();
if (r < 0) {
return r;
}
return 0;
}
template <typename I>
void Mirror<I>::image_get_info(librados::IoCtx& io_ctx,
asio::ContextWQ *op_work_queue,
const std::string &image_id,
mirror_image_info_t *mirror_image_info,
Context *on_finish) {
auto cct = reinterpret_cast<CephContext *>(io_ctx.cct());
ldout(cct, 20) << "pool_id=" << io_ctx.get_id() << ", image_id=" << image_id
<< dendl;
auto ctx = new C_ImageGetInfo(mirror_image_info, nullptr, on_finish);
auto req = mirror::GetInfoRequest<I>::create(io_ctx, op_work_queue, image_id,
&ctx->mirror_image,
&ctx->promotion_state,
&ctx->primary_mirror_uuid, ctx);
req->send();
}
template <typename I>
int Mirror<I>::image_get_info(librados::IoCtx& io_ctx,
asio::ContextWQ *op_work_queue,
const std::string &image_id,
mirror_image_info_t *mirror_image_info) {
C_SaferCond ctx;
image_get_info(io_ctx, op_work_queue, image_id, mirror_image_info, &ctx);
int r = ctx.wait();
if (r < 0) {
return r;
}
return 0;
}
template <typename I>
void Mirror<I>::image_get_mode(I *ictx, mirror_image_mode_t *mode,
Context *on_finish) {
CephContext *cct = ictx->cct;
ldout(cct, 20) << "ictx=" << ictx << dendl;
auto ctx = new C_ImageGetInfo(nullptr, mode, on_finish);
auto req = mirror::GetInfoRequest<I>::create(*ictx, &ctx->mirror_image,
&ctx->promotion_state,
&ctx->primary_mirror_uuid, ctx);
req->send();
}
template <typename I>
int Mirror<I>::image_get_mode(I *ictx, mirror_image_mode_t *mode) {
C_SaferCond ctx;
image_get_mode(ictx, mode, &ctx);
int r = ctx.wait();
if (r < 0) {
return r;
}
return 0;
}
template <typename I>
void Mirror<I>::image_get_global_status(I *ictx,
mirror_image_global_status_t *status,
Context *on_finish) {
CephContext *cct = ictx->cct;
ldout(cct, 20) << "ictx=" << ictx << dendl;
auto ctx = new C_ImageGetGlobalStatus(ictx->name, status, on_finish);
auto req = mirror::GetStatusRequest<I>::create(
*ictx, &ctx->mirror_image_status_internal, &ctx->mirror_image,
&ctx->promotion_state, ctx);
req->send();
}
template <typename I>
int Mirror<I>::image_get_global_status(I *ictx,
mirror_image_global_status_t *status) {
C_SaferCond ctx;
image_get_global_status(ictx, status, &ctx);
int r = ctx.wait();
if (r < 0) {
return r;
}
return 0;
}
template <typename I>
int Mirror<I>::image_get_instance_id(I *ictx, std::string *instance_id) {
CephContext *cct = ictx->cct;
ldout(cct, 20) << "ictx=" << ictx << dendl;
cls::rbd::MirrorImage mirror_image;
int r = cls_client::mirror_image_get(&ictx->md_ctx, ictx->id, &mirror_image);
if (r < 0 && r != -ENOENT) {
lderr(cct) << "failed to retrieve mirroring state: " << cpp_strerror(r)
<< dendl;
return r;
} else if (mirror_image.state != cls::rbd::MIRROR_IMAGE_STATE_ENABLED) {
lderr(cct) << "mirroring is not currently enabled" << dendl;
return -EINVAL;
}
entity_inst_t instance;
r = cls_client::mirror_image_instance_get(&ictx->md_ctx,
mirror_image.global_image_id,
&instance);
if (r < 0) {
if (r != -ENOENT && r != -ESTALE) {
lderr(cct) << "failed to get mirror image instance: " << cpp_strerror(r)
<< dendl;
}
return r;
}
*instance_id = stringify(instance.name.num());
return 0;
}
template <typename I>
int Mirror<I>::site_name_get(librados::Rados& rados, std::string* name) {
CephContext *cct = reinterpret_cast<CephContext *>(rados.cct());
ldout(cct, 20) << dendl;
int r = get_config_key(rados, RBD_MIRROR_SITE_NAME_CONFIG_KEY, name);
if (r == -EOPNOTSUPP) {
return r;
} else if (r == -ENOENT || name->empty()) {
// default to the cluster fsid
r = rados.cluster_fsid(name);
if (r < 0) {
lderr(cct) << "failed to retrieve cluster fsid: " << cpp_strerror(r)
<< dendl;
}
return r;
} else if (r < 0) {
lderr(cct) << "failed to retrieve site name: " << cpp_strerror(r)
<< dendl;
return r;
}
return 0;
}
template <typename I>
int Mirror<I>::site_name_set(librados::Rados& rados, const std::string& name) {
CephContext *cct = reinterpret_cast<CephContext *>(rados.cct());
std::string site_name{name};
boost::algorithm::trim(site_name);
ldout(cct, 20) << "site_name=" << site_name << dendl;
int r = set_config_key(rados, RBD_MIRROR_SITE_NAME_CONFIG_KEY, name);
if (r == -EOPNOTSUPP) {
return r;
} else if (r < 0 && r != -ENOENT) {
lderr(cct) << "failed to update site name: " << cpp_strerror(r)
<< dendl;
return r;
}
return 0;
}
template <typename I>
int Mirror<I>::mode_get(librados::IoCtx& io_ctx,
rbd_mirror_mode_t *mirror_mode) {
CephContext *cct = reinterpret_cast<CephContext *>(io_ctx.cct());
ldout(cct, 20) << dendl;
cls::rbd::MirrorMode mirror_mode_internal;
int r = cls_client::mirror_mode_get(&io_ctx, &mirror_mode_internal);
if (r < 0) {
lderr(cct) << "failed to retrieve mirror mode: " << cpp_strerror(r)
<< dendl;
return r;
}
switch (mirror_mode_internal) {
case cls::rbd::MIRROR_MODE_DISABLED:
case cls::rbd::MIRROR_MODE_IMAGE:
case cls::rbd::MIRROR_MODE_POOL:
*mirror_mode = static_cast<rbd_mirror_mode_t>(mirror_mode_internal);
break;
default:
lderr(cct) << "unknown mirror mode ("
<< static_cast<uint32_t>(mirror_mode_internal) << ")"
<< dendl;
return -EINVAL;
}
return 0;
}
template <typename I>
int Mirror<I>::mode_set(librados::IoCtx& io_ctx,
rbd_mirror_mode_t mirror_mode) {
CephContext *cct = reinterpret_cast<CephContext *>(io_ctx.cct());
ldout(cct, 20) << dendl;
cls::rbd::MirrorMode next_mirror_mode;
switch (mirror_mode) {
case RBD_MIRROR_MODE_DISABLED:
case RBD_MIRROR_MODE_IMAGE:
case RBD_MIRROR_MODE_POOL:
next_mirror_mode = static_cast<cls::rbd::MirrorMode>(mirror_mode);
break;
default:
lderr(cct) << "unknown mirror mode ("
<< static_cast<uint32_t>(mirror_mode) << ")" << dendl;
return -EINVAL;
}
int r;
if (next_mirror_mode == cls::rbd::MIRROR_MODE_DISABLED) {
// fail early if pool still has peers registered and attempting to disable
std::vector<cls::rbd::MirrorPeer> mirror_peers;
r = cls_client::mirror_peer_list(&io_ctx, &mirror_peers);
if (r < 0 && r != -ENOENT) {
lderr(cct) << "failed to list peers: " << cpp_strerror(r) << dendl;
return r;
} else if (!mirror_peers.empty()) {
lderr(cct) << "mirror peers still registered" << dendl;
return -EBUSY;
}
}
cls::rbd::MirrorMode current_mirror_mode;
r = cls_client::mirror_mode_get(&io_ctx, ¤t_mirror_mode);
if (r < 0) {
lderr(cct) << "failed to retrieve mirror mode: " << cpp_strerror(r)
<< dendl;
return r;
}
if (current_mirror_mode == next_mirror_mode) {
return 0;
} else if (current_mirror_mode == cls::rbd::MIRROR_MODE_DISABLED) {
uuid_d uuid_gen;
uuid_gen.generate_random();
r = cls_client::mirror_uuid_set(&io_ctx, uuid_gen.to_string());
if (r < 0) {
lderr(cct) << "failed to allocate mirroring uuid: " << cpp_strerror(r)
<< dendl;
return r;
}
}
if (current_mirror_mode != cls::rbd::MIRROR_MODE_IMAGE) {
r = cls_client::mirror_mode_set(&io_ctx, cls::rbd::MIRROR_MODE_IMAGE);
if (r < 0) {
lderr(cct) << "failed to set mirror mode to image: "
<< cpp_strerror(r) << dendl;
return r;
}
r = MirroringWatcher<>::notify_mode_updated(io_ctx,
cls::rbd::MIRROR_MODE_IMAGE);
if (r < 0) {
lderr(cct) << "failed to send update notification: " << cpp_strerror(r)
<< dendl;
}
}
if (next_mirror_mode == cls::rbd::MIRROR_MODE_IMAGE) {
return 0;
}
if (next_mirror_mode == cls::rbd::MIRROR_MODE_POOL) {
std::map<std::string, std::string> images;
r = Image<I>::list_images_v2(io_ctx, &images);
if (r < 0) {
lderr(cct) << "failed listing images: " << cpp_strerror(r) << dendl;
return r;
}
for (const auto& img_pair : images) {
uint64_t features;
uint64_t incompatible_features;
r = cls_client::get_features(&io_ctx, util::header_name(img_pair.second),
true, &features, &incompatible_features);
if (r < 0) {
lderr(cct) << "error getting features for image " << img_pair.first
<< ": " << cpp_strerror(r) << dendl;
return r;
}
// Enable only journal based mirroring
if ((features & RBD_FEATURE_JOURNALING) != 0) {
I *img_ctx = I::create("", img_pair.second, nullptr, io_ctx, false);
r = img_ctx->state->open(0);
if (r < 0) {
lderr(cct) << "error opening image "<< img_pair.first << ": "
<< cpp_strerror(r) << dendl;
return r;
}
r = image_enable(img_ctx, RBD_MIRROR_IMAGE_MODE_JOURNAL, true);
int close_r = img_ctx->state->close();
if (r < 0) {
lderr(cct) << "error enabling mirroring for image "
<< img_pair.first << ": " << cpp_strerror(r) << dendl;
return r;
} else if (close_r < 0) {
lderr(cct) << "failed to close image " << img_pair.first << ": "
<< cpp_strerror(close_r) << dendl;
return close_r;
}
}
}
} else if (next_mirror_mode == cls::rbd::MIRROR_MODE_DISABLED) {
while (true) {
bool retry_busy = false;
bool pending_busy = false;
std::set<std::string> image_ids;
r = list_mirror_images(io_ctx, image_ids);
if (r < 0) {
lderr(cct) << "failed listing images: " << cpp_strerror(r) << dendl;
return r;
}
for (const auto& img_id : image_ids) {
if (current_mirror_mode == cls::rbd::MIRROR_MODE_IMAGE) {
cls::rbd::MirrorImage mirror_image;
r = cls_client::mirror_image_get(&io_ctx, img_id, &mirror_image);
if (r < 0 && r != -ENOENT) {
lderr(cct) << "failed to retrieve mirroring state for image id "
<< img_id << ": " << cpp_strerror(r) << dendl;
return r;
}
if (mirror_image.state == cls::rbd::MIRROR_IMAGE_STATE_ENABLED) {
lderr(cct) << "failed to disable mirror mode: there are still "
<< "images with mirroring enabled" << dendl;
return -EINVAL;
}
} else {
I *img_ctx = I::create("", img_id, nullptr, io_ctx, false);
r = img_ctx->state->open(0);
if (r < 0) {
lderr(cct) << "error opening image id "<< img_id << ": "
<< cpp_strerror(r) << dendl;
return r;
}
r = image_disable(img_ctx, false);
int close_r = img_ctx->state->close();
if (r == -EBUSY) {
pending_busy = true;
} else if (r < 0) {
lderr(cct) << "error disabling mirroring for image id " << img_id
<< cpp_strerror(r) << dendl;
return r;
} else if (close_r < 0) {
lderr(cct) << "failed to close image id " << img_id << ": "
<< cpp_strerror(close_r) << dendl;
return close_r;
} else if (pending_busy) {
// at least one mirrored image was successfully disabled, so we can
// retry any failures caused by busy parent/child relationships
retry_busy = true;
}
}
}
if (!retry_busy && pending_busy) {
lderr(cct) << "error disabling mirroring for one or more images"
<< dendl;
return -EBUSY;
} else if (!retry_busy) {
break;
}
}
}
r = cls_client::mirror_mode_set(&io_ctx, next_mirror_mode);
if (r < 0) {
lderr(cct) << "failed to set mirror mode: " << cpp_strerror(r) << dendl;
return r;
}
r = MirroringWatcher<>::notify_mode_updated(io_ctx, next_mirror_mode);
if (r < 0) {
lderr(cct) << "failed to send update notification: " << cpp_strerror(r)
<< dendl;
}
return 0;
}
template <typename I>
int Mirror<I>::uuid_get(librados::IoCtx& io_ctx, std::string* mirror_uuid) {
CephContext *cct = reinterpret_cast<CephContext *>(io_ctx.cct());
ldout(cct, 20) << dendl;
C_SaferCond ctx;
uuid_get(io_ctx, mirror_uuid, &ctx);
int r = ctx.wait();
if (r < 0) {
if (r != -ENOENT) {
lderr(cct) << "failed to retrieve mirroring uuid: " << cpp_strerror(r)
<< dendl;
}
return r;
}
return 0;
}
template <typename I>
void Mirror<I>::uuid_get(librados::IoCtx& io_ctx, std::string* mirror_uuid,
Context* on_finish) {
CephContext *cct = reinterpret_cast<CephContext *>(io_ctx.cct());
ldout(cct, 20) << dendl;
auto req = mirror::GetUuidRequest<I>::create(io_ctx, mirror_uuid, on_finish);
req->send();
}
template <typename I>
int Mirror<I>::peer_bootstrap_create(librados::IoCtx& io_ctx,
std::string* token) {
CephContext *cct = reinterpret_cast<CephContext *>(io_ctx.cct());
ldout(cct, 20) << dendl;
auto mirror_mode = cls::rbd::MIRROR_MODE_DISABLED;
int r = cls_client::mirror_mode_get(&io_ctx, &mirror_mode);
if (r < 0 && r != -ENOENT) {
lderr(cct) << "failed to retrieve mirroring mode: " << cpp_strerror(r)
<< dendl;
return r;
} else if (mirror_mode == cls::rbd::MIRROR_MODE_DISABLED) {
return -EINVAL;
}
// retrieve the cluster fsid
std::string fsid;
librados::Rados rados(io_ctx);
r = rados.cluster_fsid(&fsid);
if (r < 0) {
lderr(cct) << "failed to retrieve cluster fsid: " << cpp_strerror(r)
<< dendl;
return r;
}
std::string peer_client_id;
std::string cephx_key;
r = create_bootstrap_user(cct, rados, &peer_client_id, &cephx_key);
if (r < 0) {
return r;
}
std::string mon_host = get_mon_host(cct);
// format the token response
bufferlist token_bl;
token_bl.append(
R"({)" \
R"("fsid":")" + fsid + R"(",)" + \
R"("client_id":")" + peer_client_id + R"(",)" + \
R"("key":")" + cephx_key + R"(",)" + \
R"("mon_host":")" + \
boost::replace_all_copy(mon_host, "\"", "\\\"") + R"(")" + \
R"(})");
ldout(cct, 20) << "token=" << token_bl.to_str() << dendl;
bufferlist base64_bl;
token_bl.encode_base64(base64_bl);
*token = base64_bl.to_str();
return 0;
}
template <typename I>
int Mirror<I>::peer_bootstrap_import(librados::IoCtx& io_ctx,
rbd_mirror_peer_direction_t direction,
const std::string& token) {
CephContext *cct = reinterpret_cast<CephContext *>(io_ctx.cct());
ldout(cct, 20) << dendl;
if (direction != RBD_MIRROR_PEER_DIRECTION_RX &&
direction != RBD_MIRROR_PEER_DIRECTION_RX_TX) {
lderr(cct) << "invalid mirror peer direction" << dendl;
return -EINVAL;
}
bufferlist token_bl;
try {
bufferlist base64_bl;
base64_bl.append(token);
token_bl.decode_base64(base64_bl);
} catch (buffer::error& err) {
lderr(cct) << "failed to decode base64" << dendl;
return -EINVAL;
}
ldout(cct, 20) << "token=" << token_bl.to_str() << dendl;
bool json_valid = false;
std::string expected_remote_fsid;
std::string remote_client_id;
std::string remote_key;
std::string remote_mon_host;
json_spirit::mValue json_root;
if(json_spirit::read(token_bl.to_str(), json_root)) {
try {
auto& json_obj = json_root.get_obj();
expected_remote_fsid = json_obj["fsid"].get_str();
remote_client_id = json_obj["client_id"].get_str();
remote_key = json_obj["key"].get_str();
remote_mon_host = json_obj["mon_host"].get_str();
json_valid = true;
} catch (std::runtime_error&) {
}
}
if (!json_valid) {
lderr(cct) << "invalid bootstrap token JSON received" << dendl;
return -EINVAL;
}
// sanity check import process
std::string local_fsid;
librados::Rados rados(io_ctx);
int r = rados.cluster_fsid(&local_fsid);
if (r < 0) {
lderr(cct) << "failed to retrieve cluster fsid: " << cpp_strerror(r)
<< dendl;
return r;
}
std::string local_site_name;
r = site_name_get(rados, &local_site_name);
if (r < 0) {
lderr(cct) << "failed to retrieve cluster site name: " << cpp_strerror(r)
<< dendl;
return r;
}
// attempt to connect to remote cluster
librados::Rados remote_rados;
remote_rados.init(remote_client_id.c_str());
auto remote_cct = reinterpret_cast<CephContext*>(remote_rados.cct());
remote_cct->_conf.set_val("mon_host", remote_mon_host);
remote_cct->_conf.set_val("key", remote_key);
r = remote_rados.connect();
if (r < 0) {
lderr(cct) << "failed to connect to peer cluster: " << cpp_strerror(r)
<< dendl;
return r;
}
std::string remote_fsid;
r = remote_rados.cluster_fsid(&remote_fsid);
if (r < 0) {
lderr(cct) << "failed to retrieve remote cluster fsid: "
<< cpp_strerror(r) << dendl;
return r;
} else if (local_fsid == remote_fsid) {
lderr(cct) << "cannot import token for local cluster" << dendl;
return -EINVAL;
} else if (expected_remote_fsid != remote_fsid) {
lderr(cct) << "unexpected remote cluster fsid" << dendl;
return -EINVAL;
}
std::string remote_site_name;
r = site_name_get(remote_rados, &remote_site_name);
if (r < 0) {
lderr(cct) << "failed to retrieve remote cluster site name: "
<< cpp_strerror(r) << dendl;
return r;
} else if (local_site_name == remote_site_name) {
lderr(cct) << "cannot import token for duplicate site name" << dendl;
return -EINVAL;
}
librados::IoCtx remote_io_ctx;
r = remote_rados.ioctx_create(io_ctx.get_pool_name().c_str(), remote_io_ctx);
if (r == -ENOENT) {
ldout(cct, 10) << "remote pool does not exist" << dendl;
return r;
} else if (r < 0) {
lderr(cct) << "failed to open remote pool '" << io_ctx.get_pool_name()
<< "': " << cpp_strerror(r) << dendl;
return r;
}
auto remote_mirror_mode = cls::rbd::MIRROR_MODE_DISABLED;
r = cls_client::mirror_mode_get(&remote_io_ctx, &remote_mirror_mode);
if (r < 0 && r != -ENOENT) {
lderr(cct) << "failed to retrieve remote mirroring mode: "
<< cpp_strerror(r) << dendl;
return r;
} else if (remote_mirror_mode == cls::rbd::MIRROR_MODE_DISABLED) {
return -ENOSYS;
}
auto local_mirror_mode = cls::rbd::MIRROR_MODE_DISABLED;
r = cls_client::mirror_mode_get(&io_ctx, &local_mirror_mode);
if (r < 0 && r != -ENOENT) {
lderr(cct) << "failed to retrieve local mirroring mode: " << cpp_strerror(r)
<< dendl;
return r;
} else if (local_mirror_mode == cls::rbd::MIRROR_MODE_DISABLED) {
// copy mirror mode from remote peer
r = mode_set(io_ctx, static_cast<rbd_mirror_mode_t>(remote_mirror_mode));
if (r < 0) {
return r;
}
}
if (direction == RBD_MIRROR_PEER_DIRECTION_RX_TX) {
// create a local mirror peer user and export it to the remote cluster
std::string local_client_id;
std::string local_key;
r = create_bootstrap_user(cct, rados, &local_client_id, &local_key);
if (r < 0) {
return r;
}
std::string local_mon_host = get_mon_host(cct);
// create local cluster peer in remote cluster
r = create_bootstrap_peer(cct, remote_io_ctx,
RBD_MIRROR_PEER_DIRECTION_RX_TX, local_site_name,
local_fsid, local_client_id, local_key,
local_mon_host, "local", "remote");
if (r < 0) {
return r;
}
}
// create remote cluster peer in local cluster
r = create_bootstrap_peer(cct, io_ctx, direction, remote_site_name,
remote_fsid, remote_client_id, remote_key,
remote_mon_host, "remote", "local");
if (r < 0) {
return r;
}
return 0;
}
template <typename I>
int Mirror<I>::peer_site_add(librados::IoCtx& io_ctx, std::string *uuid,
mirror_peer_direction_t direction,
const std::string &site_name,
const std::string &client_name) {
CephContext *cct = reinterpret_cast<CephContext *>(io_ctx.cct());
ldout(cct, 20) << "name=" << site_name << ", "
<< "client=" << client_name << dendl;
if (cct->_conf->cluster == site_name) {
lderr(cct) << "cannot add self as remote peer" << dendl;
return -EINVAL;
}
if (direction == RBD_MIRROR_PEER_DIRECTION_TX) {
return -EINVAL;
}
int r;
do {
uuid_d uuid_gen;
uuid_gen.generate_random();
*uuid = uuid_gen.to_string();
r = cls_client::mirror_peer_add(
&io_ctx, {*uuid, static_cast<cls::rbd::MirrorPeerDirection>(direction),
site_name, client_name, ""});
if (r == -ESTALE) {
ldout(cct, 5) << "duplicate UUID detected, retrying" << dendl;
} else if (r < 0) {
lderr(cct) << "failed to add mirror peer '" << site_name << "': "
<< cpp_strerror(r) << dendl;
return r;
}
} while (r == -ESTALE);
return 0;
}
template <typename I>
int Mirror<I>::peer_site_remove(librados::IoCtx& io_ctx,
const std::string &uuid) {
CephContext *cct = reinterpret_cast<CephContext *>(io_ctx.cct());
ldout(cct, 20) << "uuid=" << uuid << dendl;
int r = remove_peer_config_key(io_ctx, uuid);
if (r < 0) {
lderr(cct) << "failed to remove peer attributes '" << uuid << "': "
<< cpp_strerror(r) << dendl;
return r;
}
r = cls_client::mirror_peer_remove(&io_ctx, uuid);
if (r < 0 && r != -ENOENT) {
lderr(cct) << "failed to remove peer '" << uuid << "': "
<< cpp_strerror(r) << dendl;
return r;
}
std::vector<std::string> names;
r = Namespace<I>::list(io_ctx, &names);
if (r < 0) {
return r;
}
names.push_back("");
librados::IoCtx ns_io_ctx;
ns_io_ctx.dup(io_ctx);
for (auto &name : names) {
ns_io_ctx.set_namespace(name);
std::set<std::string> image_ids;
r = list_mirror_images(ns_io_ctx, image_ids);
if (r < 0) {
lderr(cct) << "failed listing images in "
<< (name.empty() ? "default" : name) << " namespace : "
<< cpp_strerror(r) << dendl;
return r;
}
for (const auto& image_id : image_ids) {
cls::rbd::MirrorImage mirror_image;
r = cls_client::mirror_image_get(&ns_io_ctx, image_id, &mirror_image);
if (r == -ENOENT) {
continue;
}
if (r < 0) {
lderr(cct) << "error getting mirror info for image " << image_id
<< ": " << cpp_strerror(r) << dendl;
return r;
}
if (mirror_image.mode != cls::rbd::MIRROR_IMAGE_MODE_SNAPSHOT) {
continue;
}
// Snapshot based mirroring. Unlink the peer from mirroring snapshots.
// TODO: optimize.
I *img_ctx = I::create("", image_id, nullptr, ns_io_ctx, false);
img_ctx->read_only_mask &= ~IMAGE_READ_ONLY_FLAG_NON_PRIMARY;
r = img_ctx->state->open(0);
if (r == -ENOENT) {
continue;
}
if (r < 0) {
lderr(cct) << "error opening image " << image_id << ": "
<< cpp_strerror(r) << dendl;
return r;
}
std::list<uint64_t> snap_ids;
{
std::shared_lock image_locker{img_ctx->image_lock};
for (auto &it : img_ctx->snap_info) {
auto info = std::get_if<cls::rbd::MirrorSnapshotNamespace>(
&it.second.snap_namespace);
if (info && info->mirror_peer_uuids.count(uuid)) {
snap_ids.push_back(it.first);
}
}
}
for (auto snap_id : snap_ids) {
C_SaferCond cond;
auto req = mirror::snapshot::UnlinkPeerRequest<I>::create(
img_ctx, snap_id, uuid, true, &cond);
req->send();
r = cond.wait();
if (r == -ENOENT) {
r = 0;
}
if (r < 0) {
break;
}
}
int close_r = img_ctx->state->close();
if (r < 0) {
lderr(cct) << "error unlinking peer for image " << image_id << ": "
<< cpp_strerror(r) << dendl;
return r;
} else if (close_r < 0) {
lderr(cct) << "failed to close image " << image_id << ": "
<< cpp_strerror(close_r) << dendl;
return close_r;
}
}
}
return 0;
}
template <typename I>
int Mirror<I>::peer_site_list(librados::IoCtx& io_ctx,
std::vector<mirror_peer_site_t> *peers) {
CephContext *cct = reinterpret_cast<CephContext *>(io_ctx.cct());
ldout(cct, 20) << dendl;
std::vector<cls::rbd::MirrorPeer> mirror_peers;
int r = cls_client::mirror_peer_list(&io_ctx, &mirror_peers);
if (r < 0 && r != -ENOENT) {
lderr(cct) << "failed to list peers: " << cpp_strerror(r) << dendl;
return r;
}
peers->clear();
peers->reserve(mirror_peers.size());
for (auto &mirror_peer : mirror_peers) {
mirror_peer_site_t peer;
peer.uuid = mirror_peer.uuid;
peer.direction = static_cast<mirror_peer_direction_t>(
mirror_peer.mirror_peer_direction);
peer.site_name = mirror_peer.site_name;
peer.mirror_uuid = mirror_peer.mirror_uuid;
peer.client_name = mirror_peer.client_name;
peer.last_seen = mirror_peer.last_seen.sec();
peers->push_back(peer);
}
return 0;
}
template <typename I>
int Mirror<I>::peer_site_set_client(librados::IoCtx& io_ctx,
const std::string &uuid,
const std::string &client_name) {
CephContext *cct = reinterpret_cast<CephContext *>(io_ctx.cct());
ldout(cct, 20) << "uuid=" << uuid << ", "
<< "client=" << client_name << dendl;
int r = cls_client::mirror_peer_set_client(&io_ctx, uuid, client_name);
if (r < 0) {
lderr(cct) << "failed to update client '" << uuid << "': "
<< cpp_strerror(r) << dendl;
return r;
}
return 0;
}
template <typename I>
int Mirror<I>::peer_site_set_name(librados::IoCtx& io_ctx,
const std::string &uuid,
const std::string &site_name) {
CephContext *cct = reinterpret_cast<CephContext *>(io_ctx.cct());
ldout(cct, 20) << "uuid=" << uuid << ", "
<< "name=" << site_name << dendl;
if (cct->_conf->cluster == site_name) {
lderr(cct) << "cannot set self as remote peer" << dendl;
return -EINVAL;
}
int r = cls_client::mirror_peer_set_cluster(&io_ctx, uuid, site_name);
if (r < 0) {
lderr(cct) << "failed to update site '" << uuid << "': "
<< cpp_strerror(r) << dendl;
return r;
}
return 0;
}
template <typename I>
int Mirror<I>::peer_site_set_direction(librados::IoCtx& io_ctx,
const std::string &uuid,
mirror_peer_direction_t direction) {
cls::rbd::MirrorPeerDirection mirror_peer_direction = static_cast<
cls::rbd::MirrorPeerDirection>(direction);
CephContext *cct = reinterpret_cast<CephContext *>(io_ctx.cct());
ldout(cct, 20) << "uuid=" << uuid << ", "
<< "direction=" << mirror_peer_direction << dendl;
int r = cls_client::mirror_peer_set_direction(&io_ctx, uuid,
mirror_peer_direction);
if (r < 0) {
lderr(cct) << "failed to update direction '" << uuid << "': "
<< cpp_strerror(r) << dendl;
return r;
}
return 0;
}
template <typename I>
int Mirror<I>::peer_site_get_attributes(librados::IoCtx& io_ctx,
const std::string &uuid,
Attributes* attributes) {
CephContext *cct = reinterpret_cast<CephContext *>(io_ctx.cct());
ldout(cct, 20) << "uuid=" << uuid << dendl;
attributes->clear();
librados::Rados rados(io_ctx);
std::string value;
int r = get_config_key(rados, get_peer_config_key_name(io_ctx.get_id(), uuid),
&value);
if (r == -ENOENT || value.empty()) {
return -ENOENT;
} else if (r < 0) {
lderr(cct) << "failed to retrieve peer attributes: " << cpp_strerror(r)
<< dendl;
return r;
}
bool json_valid = false;
json_spirit::mValue json_root;
if(json_spirit::read(value, json_root)) {
try {
auto& json_obj = json_root.get_obj();
for (auto& pairs : json_obj) {
(*attributes)[pairs.first] = pairs.second.get_str();
}
json_valid = true;
} catch (std::runtime_error&) {
}
}
if (!json_valid) {
lderr(cct) << "invalid peer attributes JSON received" << dendl;
return -EINVAL;
}
return 0;
}
template <typename I>
int Mirror<I>::peer_site_set_attributes(librados::IoCtx& io_ctx,
const std::string &uuid,
const Attributes& attributes) {
CephContext *cct = reinterpret_cast<CephContext *>(io_ctx.cct());
ldout(cct, 20) << "uuid=" << uuid << ", "
<< "attributes=" << attributes << dendl;
std::vector<mirror_peer_site_t> mirror_peers;
int r = peer_site_list(io_ctx, &mirror_peers);
if (r < 0) {
return r;
}
if (std::find_if(mirror_peers.begin(), mirror_peers.end(),
[&uuid](const librbd::mirror_peer_site_t& peer) {
return uuid == peer.uuid;
}) == mirror_peers.end()) {
ldout(cct, 5) << "mirror peer uuid " << uuid << " does not exist" << dendl;
return -ENOENT;
}
std::stringstream ss;
ss << "{";
for (auto& pair : attributes) {
ss << "\\\"" << pair.first << "\\\": "
<< "\\\"" << pair.second << "\\\"";
if (&pair != &(*attributes.rbegin())) {
ss << ", ";
}
}
ss << "}";
librados::Rados rados(io_ctx);
r = set_config_key(rados, get_peer_config_key_name(io_ctx.get_id(), uuid),
ss.str());
if (r < 0 && r != -ENOENT) {
lderr(cct) << "failed to update peer attributes: " << cpp_strerror(r)
<< dendl;
return r;
}
return 0;
}
template <typename I>
int Mirror<I>::image_global_status_list(
librados::IoCtx& io_ctx, const std::string &start_id, size_t max,
IdToMirrorImageGlobalStatus *images) {
CephContext *cct = reinterpret_cast<CephContext *>(io_ctx.cct());
int r;
std::map<std::string, std::string> id_to_name;
{
std::map<std::string, std::string> name_to_id;
r = Image<I>::list_images_v2(io_ctx, &name_to_id);
if (r < 0) {
return r;
}
for (auto it : name_to_id) {
id_to_name[it.second] = it.first;
}
}
std::map<std::string, cls::rbd::MirrorImage> images_;
std::map<std::string, cls::rbd::MirrorImageStatus> statuses_;
r = librbd::cls_client::mirror_image_status_list(&io_ctx, start_id, max,
&images_, &statuses_);
if (r < 0 && r != -ENOENT) {
lderr(cct) << "failed to list mirror image statuses: "
<< cpp_strerror(r) << dendl;
return r;
}
const std::string STATUS_NOT_FOUND("status not found");
for (auto it = images_.begin(); it != images_.end(); ++it) {
auto &image_id = it->first;
auto &info = it->second;
if (info.state == cls::rbd::MIRROR_IMAGE_STATE_DISABLED) {
continue;
}
auto &image_name = id_to_name[image_id];
if (image_name.empty()) {
lderr(cct) << "failed to find image name for image " << image_id << ", "
<< "using image id as name" << dendl;
image_name = image_id;
}
mirror_image_global_status_t& global_status = (*images)[image_id];
global_status.name = image_name;
global_status.info = mirror_image_info_t{
info.global_image_id,
static_cast<mirror_image_state_t>(info.state),
false}; // XXX: To set "primary" right would require an additional call.
bool found_local_site_status = false;
auto s_it = statuses_.find(image_id);
if (s_it != statuses_.end()) {
auto& status = s_it->second;
global_status.site_statuses.reserve(
status.mirror_image_site_statuses.size());
for (auto& site_status : status.mirror_image_site_statuses) {
if (site_status.mirror_uuid ==
cls::rbd::MirrorImageSiteStatus::LOCAL_MIRROR_UUID) {
found_local_site_status = true;
}
global_status.site_statuses.push_back(mirror_image_site_status_t{
site_status.mirror_uuid,
static_cast<mirror_image_status_state_t>(site_status.state),
site_status.state == cls::rbd::MIRROR_IMAGE_STATUS_STATE_UNKNOWN ?
STATUS_NOT_FOUND : site_status.description,
site_status.last_update.sec(), site_status.up});
}
}
if (!found_local_site_status) {
global_status.site_statuses.push_back(mirror_image_site_status_t{
cls::rbd::MirrorImageSiteStatus::LOCAL_MIRROR_UUID,
MIRROR_IMAGE_STATUS_STATE_UNKNOWN, STATUS_NOT_FOUND, 0, false});
}
}
return 0;
}
template <typename I>
int Mirror<I>::image_status_summary(librados::IoCtx& io_ctx,
MirrorImageStatusStates *states) {
CephContext *cct = reinterpret_cast<CephContext *>(io_ctx.cct());
std::vector<cls::rbd::MirrorPeer> mirror_peers;
int r = cls_client::mirror_peer_list(&io_ctx, &mirror_peers);
if (r < 0 && r != -ENOENT) {
lderr(cct) << "failed to list mirror peers: " << cpp_strerror(r) << dendl;
return r;
}
std::map<cls::rbd::MirrorImageStatusState, int32_t> states_;
r = cls_client::mirror_image_status_get_summary(&io_ctx, mirror_peers,
&states_);
if (r < 0 && r != -ENOENT) {
lderr(cct) << "failed to get mirror status summary: "
<< cpp_strerror(r) << dendl;
return r;
}
for (auto &s : states_) {
(*states)[static_cast<mirror_image_status_state_t>(s.first)] = s.second;
}
return 0;
}
template <typename I>
int Mirror<I>::image_instance_id_list(
librados::IoCtx& io_ctx, const std::string &start_image_id, size_t max,
std::map<std::string, std::string> *instance_ids) {
CephContext *cct = reinterpret_cast<CephContext *>(io_ctx.cct());
std::map<std::string, entity_inst_t> instances;
int r = librbd::cls_client::mirror_image_instance_list(
&io_ctx, start_image_id, max, &instances);
if (r < 0 && r != -ENOENT) {
lderr(cct) << "failed to list mirror image instances: " << cpp_strerror(r)
<< dendl;
return r;
}
for (auto it : instances) {
(*instance_ids)[it.first] = stringify(it.second.name.num());
}
return 0;
}
template <typename I>
int Mirror<I>::image_info_list(
librados::IoCtx& io_ctx, mirror_image_mode_t *mode_filter,
const std::string &start_id, size_t max,
std::map<std::string, std::pair<mirror_image_mode_t,
mirror_image_info_t>> *entries) {
CephContext *cct = reinterpret_cast<CephContext *>(io_ctx.cct());
ldout(cct, 20) << "pool=" << io_ctx.get_pool_name() << ", mode_filter="
<< (mode_filter ? stringify(*mode_filter) : "null")
<< ", start_id=" << start_id << ", max=" << max << dendl;
std::string last_read = start_id;
entries->clear();
while (entries->size() < max) {
std::map<std::string, cls::rbd::MirrorImage> images;
std::map<std::string, cls::rbd::MirrorImageStatus> statuses;
int r = librbd::cls_client::mirror_image_status_list(&io_ctx, last_read,
max, &images,
&statuses);
if (r < 0 && r != -ENOENT) {
lderr(cct) << "failed to list mirror image statuses: "
<< cpp_strerror(r) << dendl;
return r;
}
if (images.empty()) {
break;
}
AsioEngine asio_engine(io_ctx);
for (auto &it : images) {
auto &image_id = it.first;
auto &image = it.second;
auto mode = static_cast<mirror_image_mode_t>(image.mode);
if ((mode_filter && mode != *mode_filter) ||
image.state != cls::rbd::MIRROR_IMAGE_STATE_ENABLED) {
continue;
}
// need to call get_info for every image to retrieve promotion state
mirror_image_info_t info;
r = image_get_info(io_ctx, asio_engine.get_work_queue(), image_id, &info);
if (r < 0) {
continue;
}
(*entries)[image_id] = std::make_pair(mode, info);
if (entries->size() == max) {
break;
}
}
last_read = images.rbegin()->first;
}
return 0;
}
template <typename I>
int Mirror<I>::image_snapshot_create(I *ictx, uint32_t flags,
uint64_t *snap_id) {
C_SaferCond ctx;
Mirror<I>::image_snapshot_create(ictx, flags, snap_id, &ctx);
return ctx.wait();
}
template <typename I>
void Mirror<I>::image_snapshot_create(I *ictx, uint32_t flags,
uint64_t *snap_id, Context *on_finish) {
CephContext *cct = ictx->cct;
ldout(cct, 20) << "ictx=" << ictx << dendl;
uint64_t snap_create_flags = 0;
int r = util::snap_create_flags_api_to_internal(cct, flags,
&snap_create_flags);
if (r < 0) {
on_finish->complete(r);
return;
}
auto on_refresh = new LambdaContext(
[ictx, snap_create_flags, snap_id, on_finish](int r) {
if (r < 0) {
lderr(ictx->cct) << "refresh failed: " << cpp_strerror(r) << dendl;
on_finish->complete(r);
return;
}
auto ctx = new C_ImageSnapshotCreate<I>(ictx, snap_create_flags, snap_id,
on_finish);
auto req = mirror::GetInfoRequest<I>::create(*ictx, &ctx->mirror_image,
&ctx->promotion_state,
&ctx->primary_mirror_uuid,
ctx);
req->send();
});
if (ictx->state->is_refresh_required()) {
ictx->state->refresh(on_refresh);
} else {
on_refresh->complete(0);
}
}
} // namespace api
} // namespace librbd
template class librbd::api::Mirror<librbd::ImageCtx>;
| 66,186 | 30.442755 | 80 |
cc
|
null |
ceph-main/src/librbd/api/Mirror.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef LIBRBD_API_MIRROR_H
#define LIBRBD_API_MIRROR_H
#include "include/rbd/librbd.hpp"
#include <map>
#include <string>
#include <vector>
struct Context;
namespace librbd {
struct ImageCtx;
namespace asio { struct ContextWQ; }
namespace api {
template <typename ImageCtxT = librbd::ImageCtx>
struct Mirror {
typedef std::map<std::string, std::string> Attributes;
typedef std::map<std::string, mirror_image_global_status_t>
IdToMirrorImageGlobalStatus;
typedef std::map<mirror_image_status_state_t, int> MirrorImageStatusStates;
static int site_name_get(librados::Rados& rados, std::string* name);
static int site_name_set(librados::Rados& rados, const std::string& name);
static int mode_get(librados::IoCtx& io_ctx, rbd_mirror_mode_t *mirror_mode);
static int mode_set(librados::IoCtx& io_ctx, rbd_mirror_mode_t mirror_mode);
static int uuid_get(librados::IoCtx& io_ctx, std::string* mirror_uuid);
static void uuid_get(librados::IoCtx& io_ctx, std::string* mirror_uuid,
Context* on_finish);
static int peer_bootstrap_create(librados::IoCtx& io_ctx, std::string* token);
static int peer_bootstrap_import(librados::IoCtx& io_ctx,
rbd_mirror_peer_direction_t direction,
const std::string& token);
static int peer_site_add(librados::IoCtx& io_ctx, std::string *uuid,
mirror_peer_direction_t direction,
const std::string &site_name,
const std::string &client_name);
static int peer_site_remove(librados::IoCtx& io_ctx, const std::string &uuid);
static int peer_site_list(librados::IoCtx& io_ctx,
std::vector<mirror_peer_site_t> *peers);
static int peer_site_set_client(librados::IoCtx& io_ctx,
const std::string &uuid,
const std::string &client_name);
static int peer_site_set_name(librados::IoCtx& io_ctx,
const std::string &uuid,
const std::string &site_name);
static int peer_site_set_direction(librados::IoCtx& io_ctx,
const std::string &uuid,
mirror_peer_direction_t direction);
static int peer_site_get_attributes(librados::IoCtx& io_ctx,
const std::string &uuid,
Attributes* attributes);
static int peer_site_set_attributes(librados::IoCtx& io_ctx,
const std::string &uuid,
const Attributes& attributes);
static int image_global_status_list(librados::IoCtx& io_ctx,
const std::string &start_id, size_t max,
IdToMirrorImageGlobalStatus *images);
static int image_status_summary(librados::IoCtx& io_ctx,
MirrorImageStatusStates *states);
static int image_instance_id_list(librados::IoCtx& io_ctx,
const std::string &start_image_id,
size_t max,
std::map<std::string, std::string> *ids);
static int image_info_list(
librados::IoCtx& io_ctx, mirror_image_mode_t *mode_filter,
const std::string &start_id, size_t max,
std::map<std::string, std::pair<mirror_image_mode_t,
mirror_image_info_t>> *entries);
static int image_enable(ImageCtxT *ictx, mirror_image_mode_t mode,
bool relax_same_pool_parent_check);
static int image_disable(ImageCtxT *ictx, bool force);
static int image_promote(ImageCtxT *ictx, bool force);
static void image_promote(ImageCtxT *ictx, bool force, Context *on_finish);
static int image_demote(ImageCtxT *ictx);
static void image_demote(ImageCtxT *ictx, Context *on_finish);
static int image_resync(ImageCtxT *ictx);
static int image_get_info(ImageCtxT *ictx,
mirror_image_info_t *mirror_image_info);
static void image_get_info(ImageCtxT *ictx,
mirror_image_info_t *mirror_image_info,
Context *on_finish);
static int image_get_info(librados::IoCtx& io_ctx,
asio::ContextWQ *op_work_queue,
const std::string &image_id,
mirror_image_info_t *mirror_image_info);
static void image_get_info(librados::IoCtx& io_ctx,
asio::ContextWQ *op_work_queue,
const std::string &image_id,
mirror_image_info_t *mirror_image_info,
Context *on_finish);
static int image_get_mode(ImageCtxT *ictx, mirror_image_mode_t *mode);
static void image_get_mode(ImageCtxT *ictx, mirror_image_mode_t *mode,
Context *on_finish);
static int image_get_global_status(ImageCtxT *ictx,
mirror_image_global_status_t *status);
static void image_get_global_status(ImageCtxT *ictx,
mirror_image_global_status_t *status,
Context *on_finish);
static int image_get_instance_id(ImageCtxT *ictx, std::string *instance_id);
static int image_snapshot_create(ImageCtxT *ictx, uint32_t flags,
uint64_t *snap_id);
static void image_snapshot_create(ImageCtxT *ictx, uint32_t flags,
uint64_t *snap_id, Context *on_finish);
};
} // namespace api
} // namespace librbd
extern template class librbd::api::Mirror<librbd::ImageCtx>;
#endif // LIBRBD_API_MIRROR_H
| 5,990 | 46.173228 | 80 |
h
|
null |
ceph-main/src/librbd/api/Namespace.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "common/errno.h"
#include "cls/rbd/cls_rbd_client.h"
#include "librbd/api/Mirror.h"
#include "librbd/api/Namespace.h"
#include "librbd/ImageCtx.h"
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::api::Namespace: " << __func__ << ": "
namespace librbd {
namespace api {
namespace {
const std::list<std::string> POOL_OBJECTS {
RBD_CHILDREN,
RBD_GROUP_DIRECTORY,
RBD_INFO,
RBD_MIRRORING,
RBD_TASK,
RBD_TRASH,
RBD_DIRECTORY
};
} // anonymous namespace
template <typename I>
int Namespace<I>::create(librados::IoCtx& io_ctx, const std::string& name)
{
CephContext *cct = (CephContext *)io_ctx.cct();
ldout(cct, 5) << "name=" << name << dendl;
if (name.empty()) {
return -EINVAL;
}
librados::Rados rados(io_ctx);
int8_t require_osd_release;
int r = rados.get_min_compatible_osd(&require_osd_release);
if (r < 0) {
lderr(cct) << "failed to retrieve min OSD release: " << cpp_strerror(r)
<< dendl;
return r;
}
if (require_osd_release < CEPH_RELEASE_NAUTILUS) {
ldout(cct, 1) << "namespace support requires nautilus or later OSD"
<< dendl;
return -ENOSYS;
}
librados::IoCtx default_ns_ctx;
default_ns_ctx.dup(io_ctx);
default_ns_ctx.set_namespace("");
r = cls_client::namespace_add(&default_ns_ctx, name);
if (r < 0) {
lderr(cct) << "failed to add namespace: " << cpp_strerror(r) << dendl;
return r;
}
librados::IoCtx ns_ctx;
ns_ctx.dup(io_ctx);
ns_ctx.set_namespace(name);
r = cls_client::dir_state_set(&ns_ctx, RBD_DIRECTORY,
cls::rbd::DIRECTORY_STATE_READY);
if (r < 0) {
lderr(cct) << "failed to initialize image directory: " << cpp_strerror(r)
<< dendl;
goto rollback;
}
return 0;
rollback:
int ret_val = cls_client::namespace_remove(&default_ns_ctx, name);
if (ret_val < 0) {
lderr(cct) << "failed to remove namespace: " << cpp_strerror(ret_val) << dendl;
}
return r;
}
template <typename I>
int Namespace<I>::remove(librados::IoCtx& io_ctx, const std::string& name)
{
CephContext *cct = (CephContext *)io_ctx.cct();
ldout(cct, 5) << "name=" << name << dendl;
if (name.empty()) {
return -EINVAL;
}
librados::IoCtx default_ns_ctx;
default_ns_ctx.dup(io_ctx);
default_ns_ctx.set_namespace("");
librados::IoCtx ns_ctx;
ns_ctx.dup(io_ctx);
ns_ctx.set_namespace(name);
std::map<std::string, cls::rbd::TrashImageSpec> trash_entries;
librados::ObjectWriteOperation dir_op;
librbd::cls_client::dir_state_set(
&dir_op, cls::rbd::DIRECTORY_STATE_ADD_DISABLED);
dir_op.remove();
int r = ns_ctx.operate(RBD_DIRECTORY, &dir_op);
if (r == -EBUSY) {
ldout(cct, 5) << "image directory not empty" << dendl;
goto rollback;
} else if (r < 0 && r != -ENOENT) {
lderr(cct) << "failed to disable the namespace: " << cpp_strerror(r)
<< dendl;
return r;
}
r = cls_client::trash_list(&ns_ctx, "", 1, &trash_entries);
if (r < 0 && r != -ENOENT) {
lderr(cct) << "failed to list trash directory: " << cpp_strerror(r)
<< dendl;
return r;
} else if (!trash_entries.empty()) {
ldout(cct, 5) << "image trash not empty" << dendl;
goto rollback;
}
r = Mirror<I>::mode_set(ns_ctx, RBD_MIRROR_MODE_DISABLED);
if (r < 0) {
lderr(cct) << "failed to disable mirroring: " << cpp_strerror(r)
<< dendl;
return r;
}
for (auto& oid : POOL_OBJECTS) {
r = ns_ctx.remove(oid);
if (r < 0 && r != -ENOENT) {
lderr(cct) << "failed to remove object '" << oid << "': "
<< cpp_strerror(r) << dendl;
return r;
}
}
r = cls_client::namespace_remove(&default_ns_ctx, name);
if (r < 0) {
lderr(cct) << "failed to remove namespace: " << cpp_strerror(r) << dendl;
return r;
}
return 0;
rollback:
r = librbd::cls_client::dir_state_set(
&ns_ctx, RBD_DIRECTORY, cls::rbd::DIRECTORY_STATE_READY);
if (r < 0) {
lderr(cct) << "failed to restore directory state: " << cpp_strerror(r)
<< dendl;
}
return -EBUSY;
}
template <typename I>
int Namespace<I>::list(IoCtx& io_ctx, std::vector<std::string> *names)
{
CephContext *cct = (CephContext *)io_ctx.cct();
ldout(cct, 5) << dendl;
librados::IoCtx default_ns_ctx;
default_ns_ctx.dup(io_ctx);
default_ns_ctx.set_namespace("");
int r;
int max_read = 1024;
std::string last_read = "";
do {
std::list<std::string> name_list;
r = cls_client::namespace_list(&default_ns_ctx, last_read, max_read,
&name_list);
if (r == -ENOENT) {
return 0;
} else if (r < 0) {
lderr(cct) << "error listing namespaces: " << cpp_strerror(r) << dendl;
return r;
}
names->insert(names->end(), name_list.begin(), name_list.end());
if (!name_list.empty()) {
last_read = name_list.back();
}
r = name_list.size();
} while (r == max_read);
return 0;
}
template <typename I>
int Namespace<I>::exists(librados::IoCtx& io_ctx, const std::string& name, bool *exists)
{
CephContext *cct = (CephContext *)io_ctx.cct();
ldout(cct, 5) << "name=" << name << dendl;
*exists = false;
if (name.empty()) {
return -EINVAL;
}
librados::IoCtx ns_ctx;
ns_ctx.dup(io_ctx);
ns_ctx.set_namespace(name);
int r = librbd::cls_client::dir_state_assert(&ns_ctx, RBD_DIRECTORY,
cls::rbd::DIRECTORY_STATE_READY);
if (r == 0) {
*exists = true;
} else if (r != -ENOENT) {
lderr(cct) << "error asserting namespace: " << cpp_strerror(r) << dendl;
return r;
}
return 0;
}
} // namespace api
} // namespace librbd
template class librbd::api::Namespace<librbd::ImageCtx>;
| 5,911 | 24.050847 | 88 |
cc
|
null |
ceph-main/src/librbd/api/Namespace.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_API_NAMESPACE_H
#define CEPH_LIBRBD_API_NAMESPACE_H
#include "include/rados/librados_fwd.hpp"
#include "include/rbd/librbd.hpp"
#include <string>
#include <vector>
namespace librbd {
struct ImageCtx;
namespace api {
template <typename ImageCtxT = librbd::ImageCtx>
struct Namespace {
static int create(librados::IoCtx& io_ctx, const std::string& name);
static int remove(librados::IoCtx& io_ctx, const std::string& name);
static int list(librados::IoCtx& io_ctx, std::vector<std::string>* names);
static int exists(librados::IoCtx& io_ctx, const std::string& name, bool *exists);
};
} // namespace api
} // namespace librbd
extern template class librbd::api::Namespace<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_API_NAMESPACE_H
| 863 | 24.411765 | 84 |
h
|
null |
ceph-main/src/librbd/api/Pool.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/api/Pool.h"
#include "include/rados/librados.hpp"
#include "common/dout.h"
#include "common/errno.h"
#include "common/Cond.h"
#include "common/Throttle.h"
#include "cls/rbd/cls_rbd_client.h"
#include "osd/osd_types.h"
#include "librbd/AsioEngine.h"
#include "librbd/ImageCtx.h"
#include "librbd/Utils.h"
#include "librbd/api/Config.h"
#include "librbd/api/Image.h"
#include "librbd/api/Trash.h"
#include "librbd/image/ValidatePoolRequest.h"
#define dout_subsys ceph_subsys_rbd
namespace librbd {
namespace api {
namespace {
#undef dout_prefix
#define dout_prefix *_dout << "librbd::api::Pool::ImageStatRequest: " \
<< __func__ << " " << this << ": " \
<< "(id=" << m_image_id << "): "
template <typename I>
class ImageStatRequest {
public:
ImageStatRequest(librados::IoCtx& io_ctx, SimpleThrottle& throttle,
const std::string& image_id, bool scan_snaps,
std::atomic<uint64_t>* bytes,
std::atomic<uint64_t>* max_bytes,
std::atomic<uint64_t>* snaps)
: m_cct(reinterpret_cast<CephContext*>(io_ctx.cct())),
m_io_ctx(io_ctx), m_throttle(throttle), m_image_id(image_id),
m_scan_snaps(scan_snaps), m_bytes(bytes), m_max_bytes(max_bytes),
m_snaps(snaps) {
m_throttle.start_op();
}
void send() {
get_head();
}
protected:
void finish(int r) {
(*m_max_bytes) += m_max_size;
m_throttle.end_op(r);
delete this;
}
private:
CephContext* m_cct;
librados::IoCtx& m_io_ctx;
SimpleThrottle& m_throttle;
const std::string& m_image_id;
bool m_scan_snaps;
std::atomic<uint64_t>* m_bytes;
std::atomic<uint64_t>* m_max_bytes;
std::atomic<uint64_t>* m_snaps;
bufferlist m_out_bl;
uint64_t m_max_size = 0;
::SnapContext m_snapc;
void get_head() {
ldout(m_cct, 15) << dendl;
librados::ObjectReadOperation op;
cls_client::get_size_start(&op, CEPH_NOSNAP);
if (m_scan_snaps) {
cls_client::get_snapcontext_start(&op);
}
m_out_bl.clear();
auto aio_comp = util::create_rados_callback<
ImageStatRequest<I>, &ImageStatRequest<I>::handle_get_head>(this);
int r = m_io_ctx.aio_operate(util::header_name(m_image_id), aio_comp, &op,
&m_out_bl);
ceph_assert(r == 0);
aio_comp->release();
}
void handle_get_head(int r) {
ldout(m_cct, 15) << "r=" << r << dendl;
auto it = m_out_bl.cbegin();
if (r == 0) {
uint8_t order;
r = cls_client::get_size_finish(&it, &m_max_size, &order);
if (r == 0) {
(*m_bytes) += m_max_size;
}
}
if (m_scan_snaps && r == 0) {
r = cls_client::get_snapcontext_finish(&it, &m_snapc);
if (r == 0) {
(*m_snaps) += m_snapc.snaps.size();
}
}
if (r == -ENOENT) {
finish(r);
return;
} else if (r < 0) {
lderr(m_cct) << "failed to stat image: " << cpp_strerror(r) << dendl;
finish(r);
return;
}
if (!m_snapc.is_valid()) {
lderr(m_cct) << "snap context is invalid" << dendl;
finish(-EIO);
return;
}
get_snaps();
}
void get_snaps() {
if (!m_scan_snaps || m_snapc.snaps.empty()) {
finish(0);
return;
}
ldout(m_cct, 15) << dendl;
librados::ObjectReadOperation op;
for (auto snap_seq : m_snapc.snaps) {
cls_client::get_size_start(&op, snap_seq);
}
m_out_bl.clear();
auto aio_comp = util::create_rados_callback<
ImageStatRequest<I>, &ImageStatRequest<I>::handle_get_snaps>(this);
int r = m_io_ctx.aio_operate(util::header_name(m_image_id), aio_comp, &op,
&m_out_bl);
ceph_assert(r == 0);
aio_comp->release();
}
void handle_get_snaps(int r) {
ldout(m_cct, 15) << "r=" << r << dendl;
auto it = m_out_bl.cbegin();
for ([[maybe_unused]] auto snap_seq : m_snapc.snaps) {
uint64_t size;
if (r == 0) {
uint8_t order;
r = cls_client::get_size_finish(&it, &size, &order);
}
if (r == 0 && m_max_size < size) {
m_max_size = size;
}
}
if (r == -ENOENT) {
ldout(m_cct, 15) << "out-of-sync metadata" << dendl;
get_head();
} else if (r < 0) {
lderr(m_cct) << "failed to retrieve snap size: " << cpp_strerror(r)
<< dendl;
finish(r);
} else {
finish(0);
}
}
};
template <typename I>
void get_pool_stat_option_value(typename Pool<I>::StatOptions* stat_options,
rbd_pool_stat_option_t option,
uint64_t** value) {
auto it = stat_options->find(option);
if (it == stat_options->end()) {
*value = nullptr;
} else {
*value = it->second;
}
}
template <typename I>
int get_pool_stats(librados::IoCtx& io_ctx, const ConfigProxy& config,
const std::vector<std::string>& image_ids, uint64_t* image_count,
uint64_t* provisioned_bytes, uint64_t* max_provisioned_bytes,
uint64_t* snapshot_count) {
bool scan_snaps = ((max_provisioned_bytes != nullptr) ||
(snapshot_count != nullptr));
SimpleThrottle throttle(
config.template get_val<uint64_t>("rbd_concurrent_management_ops"), true);
std::atomic<uint64_t> bytes{0};
std::atomic<uint64_t> max_bytes{0};
std::atomic<uint64_t> snaps{0};
for (auto& image_id : image_ids) {
if (throttle.pending_error()) {
break;
}
auto req = new ImageStatRequest<I>(io_ctx, throttle, image_id,
scan_snaps, &bytes, &max_bytes, &snaps);
req->send();
}
int r = throttle.wait_for_ret();
if (r < 0) {
return r;
}
if (image_count != nullptr) {
*image_count = image_ids.size();
}
if (provisioned_bytes != nullptr) {
*provisioned_bytes = bytes.load();
}
if (max_provisioned_bytes != nullptr) {
*max_provisioned_bytes = max_bytes.load();
}
if (snapshot_count != nullptr) {
*snapshot_count = snaps.load();
}
return 0;
}
} // anonymous namespace
#undef dout_prefix
#define dout_prefix *_dout << "librbd::api::Pool: " << __func__ << ": "
template <typename I>
int Pool<I>::init(librados::IoCtx& io_ctx, bool force) {
auto cct = reinterpret_cast<CephContext*>(io_ctx.cct());
ldout(cct, 10) << dendl;
int r = io_ctx.application_enable(pg_pool_t::APPLICATION_NAME_RBD, force);
if (r < 0) {
return r;
}
ConfigProxy config{cct->_conf};
api::Config<I>::apply_pool_overrides(io_ctx, &config);
if (!config.get_val<bool>("rbd_validate_pool")) {
return 0;
}
C_SaferCond ctx;
auto req = image::ValidatePoolRequest<I>::create(io_ctx, &ctx);
req->send();
return ctx.wait();
}
template <typename I>
int Pool<I>::add_stat_option(StatOptions* stat_options,
rbd_pool_stat_option_t option,
uint64_t* value) {
switch (option) {
case RBD_POOL_STAT_OPTION_IMAGES:
case RBD_POOL_STAT_OPTION_IMAGE_PROVISIONED_BYTES:
case RBD_POOL_STAT_OPTION_IMAGE_MAX_PROVISIONED_BYTES:
case RBD_POOL_STAT_OPTION_IMAGE_SNAPSHOTS:
case RBD_POOL_STAT_OPTION_TRASH_IMAGES:
case RBD_POOL_STAT_OPTION_TRASH_PROVISIONED_BYTES:
case RBD_POOL_STAT_OPTION_TRASH_MAX_PROVISIONED_BYTES:
case RBD_POOL_STAT_OPTION_TRASH_SNAPSHOTS:
stat_options->emplace(option, value);
return 0;
default:
break;
}
return -ENOENT;
}
template <typename I>
int Pool<I>::get_stats(librados::IoCtx& io_ctx, StatOptions* stat_options) {
auto cct = reinterpret_cast<CephContext*>(io_ctx.cct());
ldout(cct, 10) << dendl;
ConfigProxy config{cct->_conf};
api::Config<I>::apply_pool_overrides(io_ctx, &config);
uint64_t* image_count;
uint64_t* provisioned_bytes;
uint64_t* max_provisioned_bytes;
uint64_t* snapshot_count;
std::vector<trash_image_info_t> trash_entries;
int r = Trash<I>::list(io_ctx, trash_entries, false);
if (r < 0 && r != -EOPNOTSUPP) {
return r;
}
get_pool_stat_option_value<I>(
stat_options, RBD_POOL_STAT_OPTION_IMAGES, &image_count);
get_pool_stat_option_value<I>(
stat_options, RBD_POOL_STAT_OPTION_IMAGE_PROVISIONED_BYTES,
&provisioned_bytes);
get_pool_stat_option_value<I>(
stat_options, RBD_POOL_STAT_OPTION_IMAGE_MAX_PROVISIONED_BYTES,
&max_provisioned_bytes);
get_pool_stat_option_value<I>(
stat_options, RBD_POOL_STAT_OPTION_IMAGE_SNAPSHOTS, &snapshot_count);
if (image_count != nullptr || provisioned_bytes != nullptr ||
max_provisioned_bytes != nullptr || snapshot_count != nullptr) {
typename Image<I>::ImageNameToIds images;
int r = Image<I>::list_images_v2(io_ctx, &images);
if (r < 0) {
return r;
}
std::vector<std::string> image_ids;
image_ids.reserve(images.size() + trash_entries.size());
for (auto& it : images) {
image_ids.push_back(std::move(it.second));
}
for (auto& it : trash_entries) {
if (it.source == RBD_TRASH_IMAGE_SOURCE_REMOVING) {
image_ids.push_back(std::move(it.id));
}
}
r = get_pool_stats<I>(io_ctx, config, image_ids, image_count,
provisioned_bytes, max_provisioned_bytes,
snapshot_count);
if (r < 0) {
return r;
}
}
get_pool_stat_option_value<I>(
stat_options, RBD_POOL_STAT_OPTION_TRASH_IMAGES, &image_count);
get_pool_stat_option_value<I>(
stat_options, RBD_POOL_STAT_OPTION_TRASH_PROVISIONED_BYTES,
&provisioned_bytes);
get_pool_stat_option_value<I>(
stat_options, RBD_POOL_STAT_OPTION_TRASH_MAX_PROVISIONED_BYTES,
&max_provisioned_bytes);
get_pool_stat_option_value<I>(
stat_options, RBD_POOL_STAT_OPTION_TRASH_SNAPSHOTS, &snapshot_count);
if (image_count != nullptr || provisioned_bytes != nullptr ||
max_provisioned_bytes != nullptr || snapshot_count != nullptr) {
std::vector<std::string> image_ids;
image_ids.reserve(trash_entries.size());
for (auto& it : trash_entries) {
if (it.source == RBD_TRASH_IMAGE_SOURCE_REMOVING) {
continue;
}
image_ids.push_back(std::move(it.id));
}
r = get_pool_stats<I>(io_ctx, config, image_ids, image_count,
provisioned_bytes, max_provisioned_bytes,
snapshot_count);
if (r < 0) {
return r;
}
}
return 0;
}
} // namespace api
} // namespace librbd
template class librbd::api::Pool<librbd::ImageCtx>;
| 10,605 | 27.207447 | 79 |
cc
|
null |
ceph-main/src/librbd/api/Pool.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_API_POOL_H
#define CEPH_LIBRBD_API_POOL_H
#include "include/int_types.h"
#include "include/rados/librados_fwd.hpp"
#include "include/rbd/librbd.h"
#include <map>
namespace librbd {
struct ImageCtx;
namespace api {
template <typename ImageCtxT = librbd::ImageCtx>
class Pool {
public:
typedef std::map<rbd_pool_stat_option_t, uint64_t*> StatOptions;
static int init(librados::IoCtx& io_ctx, bool force);
static int add_stat_option(StatOptions* stat_options,
rbd_pool_stat_option_t option,
uint64_t* value);
static int get_stats(librados::IoCtx& io_ctx, StatOptions* stat_options);
};
} // namespace api
} // namespace librbd
extern template class librbd::api::Pool<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_API_POOL_H
| 913 | 22.435897 | 75 |
h
|
null |
ceph-main/src/librbd/api/PoolMetadata.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/api/PoolMetadata.h"
#include "cls/rbd/cls_rbd_client.h"
#include "common/dout.h"
#include "common/errno.h"
#include "common/Cond.h"
#include "librbd/Utils.h"
#include "librbd/api/Config.h"
#include "librbd/image/GetMetadataRequest.h"
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::PoolMetadata: " << __func__ << ": "
namespace librbd {
namespace api {
namespace {
void update_pool_timestamp(librados::IoCtx& io_ctx) {
CephContext *cct = (CephContext *)io_ctx.cct();
auto now = ceph_clock_now();
std::string cmd =
R"({)"
R"("prefix": "config set", )"
R"("who": "global", )"
R"("name": "rbd_config_pool_override_update_timestamp", )"
R"("value": ")" + stringify(now.sec()) + R"(")"
R"(})";
librados::Rados rados(io_ctx);
bufferlist in_bl;
std::string ss;
int r = rados.mon_command(cmd, in_bl, nullptr, &ss);
if (r < 0) {
lderr(cct) << "failed to notify clients of pool config update: "
<< cpp_strerror(r) << dendl;
}
}
} // anonymous namespace
template <typename I>
int PoolMetadata<I>::get(librados::IoCtx& io_ctx,
const std::string &key, std::string *value) {
CephContext *cct = (CephContext *)io_ctx.cct();
int r = cls_client::metadata_get(&io_ctx, RBD_INFO, key, value);
if (r < 0 && r != -ENOENT) {
lderr(cct) << "failed reading metadata " << key << ": " << cpp_strerror(r)
<< dendl;
}
return r;
}
template <typename I>
int PoolMetadata<I>::set(librados::IoCtx& io_ctx, const std::string &key,
const std::string &value) {
CephContext *cct = (CephContext *)io_ctx.cct();
bool need_update_pool_timestamp = false;
std::string config_key;
if (util::is_metadata_config_override(key, &config_key)) {
if (!librbd::api::Config<I>::is_option_name(io_ctx, config_key)) {
lderr(cct) << "validation for " << key
<< " failed: not allowed pool level override" << dendl;
return -EINVAL;
}
int r = ConfigProxy{false}.set_val(config_key.c_str(), value);
if (r < 0) {
lderr(cct) << "validation for " << key << " failed: " << cpp_strerror(r)
<< dendl;
return -EINVAL;
}
need_update_pool_timestamp = true;
}
ceph::bufferlist bl;
bl.append(value);
int r = cls_client::metadata_set(&io_ctx, RBD_INFO, {{key, bl}});
if (r < 0) {
lderr(cct) << "failed setting metadata " << key << ": " << cpp_strerror(r)
<< dendl;
return r;
}
if (need_update_pool_timestamp) {
update_pool_timestamp(io_ctx);
}
return 0;
}
template <typename I>
int PoolMetadata<I>::remove(librados::IoCtx& io_ctx, const std::string &key) {
CephContext *cct = (CephContext *)io_ctx.cct();
std::string value;
int r = cls_client::metadata_get(&io_ctx, RBD_INFO, key, &value);
if (r < 0) {
if (r == -ENOENT) {
ldout(cct, 1) << "metadata " << key << " does not exist" << dendl;
} else {
lderr(cct) << "failed reading metadata " << key << ": " << cpp_strerror(r)
<< dendl;
}
return r;
}
r = cls_client::metadata_remove(&io_ctx, RBD_INFO, key);
if (r < 0) {
lderr(cct) << "failed removing metadata " << key << ": " << cpp_strerror(r)
<< dendl;
return r;
}
std::string config_key;
if (util::is_metadata_config_override(key, &config_key)) {
update_pool_timestamp(io_ctx);
}
return 0;
}
template <typename I>
int PoolMetadata<I>::list(librados::IoCtx& io_ctx, const std::string &start,
uint64_t max,
std::map<std::string, ceph::bufferlist> *pairs) {
CephContext *cct = (CephContext *)io_ctx.cct();
pairs->clear();
C_SaferCond ctx;
auto req = image::GetMetadataRequest<I>::create(
io_ctx, RBD_INFO, false, "", start, max, pairs, &ctx);
req->send();
int r = ctx.wait();
if (r < 0) {
lderr(cct) << "failed listing metadata: " << cpp_strerror(r)
<< dendl;
return r;
}
return 0;
}
} // namespace api
} // namespace librbd
template class librbd::api::PoolMetadata<librbd::ImageCtx>;
| 4,276 | 26.242038 | 80 |
cc
|
null |
ceph-main/src/librbd/api/PoolMetadata.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_API_POOL_METADATA_H
#define CEPH_LIBRBD_API_POOL_METADATA_H
#include "include/buffer_fwd.h"
#include "include/rados/librados_fwd.hpp"
#include <cstdint>
#include <map>
#include <string>
namespace librbd {
class ImageCtx;
namespace api {
template <typename ImageCtxT = librbd::ImageCtx>
class PoolMetadata {
public:
static int get(librados::IoCtx& io_ctx, const std::string &key,
std::string *value);
static int set(librados::IoCtx& io_ctx, const std::string &key,
const std::string &value);
static int remove(librados::IoCtx& io_ctx, const std::string &key);
static int list(librados::IoCtx& io_ctx, const std::string &start,
uint64_t max, std::map<std::string, ceph::bufferlist> *pairs);
};
} // namespace api
} // namespace librbd
extern template class librbd::api::PoolMetadata<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_API_POOL_METADATA_H
| 1,030 | 26.131579 | 80 |
h
|
null |
ceph-main/src/librbd/api/Snapshot.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/api/Snapshot.h"
#include "cls/rbd/cls_rbd_types.h"
#include "common/errno.h"
#include "librbd/internal.h"
#include "librbd/ImageCtx.h"
#include "librbd/ImageState.h"
#include "librbd/Operations.h"
#include "librbd/Utils.h"
#include "librbd/api/Image.h"
#include "include/Context.h"
#include "common/Cond.h"
#include <boost/variant.hpp>
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::api::Snapshot: " << __func__ << ": "
using librados::snap_t;
namespace librbd {
namespace api {
namespace {
class GetGroupVisitor {
public:
CephContext* cct;
librados::IoCtx *image_ioctx;
snap_group_namespace_t *group_snap;
explicit GetGroupVisitor(CephContext* cct, librados::IoCtx *_image_ioctx,
snap_group_namespace_t *group_snap)
: cct(cct), image_ioctx(_image_ioctx), group_snap(group_snap) {};
template <typename T>
inline int operator()(const T&) const {
// ignore other than GroupSnapshotNamespace types.
return -EINVAL;
}
inline int operator()(
const cls::rbd::GroupSnapshotNamespace& snap_namespace) {
IoCtx group_ioctx;
int r = util::create_ioctx(*image_ioctx, "group", snap_namespace.group_pool,
{}, &group_ioctx);
if (r < 0) {
return r;
}
cls::rbd::GroupSnapshot group_snapshot;
std::string group_name;
r = cls_client::dir_get_name(&group_ioctx, RBD_GROUP_DIRECTORY,
snap_namespace.group_id, &group_name);
if (r < 0) {
lderr(cct) << "failed to retrieve group name: " << cpp_strerror(r)
<< dendl;
return r;
}
std::string group_header_oid = util::group_header_name(snap_namespace.group_id);
r = cls_client::group_snap_get_by_id(&group_ioctx,
group_header_oid,
snap_namespace.group_snapshot_id,
&group_snapshot);
if (r < 0) {
lderr(cct) << "failed to retrieve group snapshot: " << cpp_strerror(r)
<< dendl;
return r;
}
group_snap->group_pool = group_ioctx.get_id();
group_snap->group_name = group_name;
group_snap->group_snap_name = group_snapshot.name;
return 0;
}
};
class GetTrashVisitor {
public:
std::string* original_name;
explicit GetTrashVisitor(std::string* original_name)
: original_name(original_name) {
}
template <typename T>
inline int operator()(const T&) const {
return -EINVAL;
}
inline int operator()(
const cls::rbd::TrashSnapshotNamespace& snap_namespace) {
*original_name = snap_namespace.original_name;
return 0;
}
};
class GetMirrorVisitor {
public:
snap_mirror_namespace_t *mirror_snap;
explicit GetMirrorVisitor(snap_mirror_namespace_t *mirror_snap)
: mirror_snap(mirror_snap) {
}
template <typename T>
inline int operator()(const T&) const {
return -EINVAL;
}
inline int operator()(
const cls::rbd::MirrorSnapshotNamespace& snap_namespace) {
mirror_snap->state = static_cast<snap_mirror_state_t>(snap_namespace.state);
mirror_snap->complete = snap_namespace.complete;
mirror_snap->mirror_peer_uuids = snap_namespace.mirror_peer_uuids;
mirror_snap->primary_mirror_uuid = snap_namespace.primary_mirror_uuid;
mirror_snap->primary_snap_id = snap_namespace.primary_snap_id;
mirror_snap->last_copied_object_number =
snap_namespace.last_copied_object_number;
return 0;
}
};
} // anonymous namespace
template <typename I>
int Snapshot<I>::get_group_namespace(I *ictx, uint64_t snap_id,
snap_group_namespace_t *group_snap) {
int r = ictx->state->refresh_if_required();
if (r < 0) {
return r;
}
std::shared_lock image_locker{ictx->image_lock};
auto snap_info = ictx->get_snap_info(snap_id);
if (snap_info == nullptr) {
return -ENOENT;
}
GetGroupVisitor ggv = GetGroupVisitor(ictx->cct, &ictx->md_ctx, group_snap);
r = snap_info->snap_namespace.visit(ggv);
if (r < 0) {
return r;
}
return 0;
}
template <typename I>
int Snapshot<I>::get_trash_namespace(I *ictx, uint64_t snap_id,
std::string* original_name) {
int r = ictx->state->refresh_if_required();
if (r < 0) {
return r;
}
std::shared_lock image_locker{ictx->image_lock};
auto snap_info = ictx->get_snap_info(snap_id);
if (snap_info == nullptr) {
return -ENOENT;
}
auto visitor = GetTrashVisitor(original_name);
r = snap_info->snap_namespace.visit(visitor);
if (r < 0) {
return r;
}
return 0;
}
template <typename I>
int Snapshot<I>::get_mirror_namespace(
I *ictx, uint64_t snap_id, snap_mirror_namespace_t *mirror_snap) {
int r = ictx->state->refresh_if_required();
if (r < 0) {
return r;
}
std::shared_lock image_locker{ictx->image_lock};
auto snap_info = ictx->get_snap_info(snap_id);
if (snap_info == nullptr) {
return -ENOENT;
}
auto gmv = GetMirrorVisitor(mirror_snap);
r = snap_info->snap_namespace.visit(gmv);
if (r < 0) {
return r;
}
return 0;
}
template <typename I>
int Snapshot<I>::get_namespace_type(I *ictx, uint64_t snap_id,
snap_namespace_type_t *namespace_type) {
int r = ictx->state->refresh_if_required();
if (r < 0) {
return r;
}
std::shared_lock l{ictx->image_lock};
auto snap_info = ictx->get_snap_info(snap_id);
if (snap_info == nullptr) {
return -ENOENT;
}
*namespace_type = static_cast<snap_namespace_type_t>(
cls::rbd::get_snap_namespace_type(snap_info->snap_namespace));
return 0;
}
template <typename I>
int Snapshot<I>::remove(I *ictx, uint64_t snap_id) {
ldout(ictx->cct, 20) << "snap_remove " << ictx << " " << snap_id << dendl;
int r = ictx->state->refresh_if_required();
if (r < 0) {
return r;
}
cls::rbd::SnapshotNamespace snapshot_namespace;
std::string snapshot_name;
{
std::shared_lock image_locker{ictx->image_lock};
auto it = ictx->snap_info.find(snap_id);
if (it == ictx->snap_info.end()) {
return -ENOENT;
}
snapshot_namespace = it->second.snap_namespace;
snapshot_name = it->second.name;
}
C_SaferCond ctx;
ictx->operations->snap_remove(snapshot_namespace, snapshot_name, &ctx);
r = ctx.wait();
return r;
}
template <typename I>
int Snapshot<I>::get_name(I *ictx, uint64_t snap_id, std::string *snap_name)
{
ldout(ictx->cct, 20) << "snap_get_name " << ictx << " " << snap_id << dendl;
int r = ictx->state->refresh_if_required();
if (r < 0)
return r;
std::shared_lock image_locker{ictx->image_lock};
r = ictx->get_snap_name(snap_id, snap_name);
return r;
}
template <typename I>
int Snapshot<I>::get_id(I *ictx, const std::string& snap_name, uint64_t *snap_id)
{
ldout(ictx->cct, 20) << "snap_get_id " << ictx << " " << snap_name << dendl;
int r = ictx->state->refresh_if_required();
if (r < 0)
return r;
std::shared_lock image_locker{ictx->image_lock};
*snap_id = ictx->get_snap_id(cls::rbd::UserSnapshotNamespace(), snap_name);
if (*snap_id == CEPH_NOSNAP)
return -ENOENT;
return 0;
}
template <typename I>
int Snapshot<I>::list(I *ictx, std::vector<snap_info_t>& snaps) {
ldout(ictx->cct, 20) << "snap_list " << ictx << dendl;
int r = ictx->state->refresh_if_required();
if (r < 0)
return r;
std::shared_lock l{ictx->image_lock};
for (auto &it : ictx->snap_info) {
snap_info_t info;
info.name = it.second.name;
info.id = it.first;
info.size = it.second.size;
snaps.push_back(info);
}
return 0;
}
template <typename I>
int Snapshot<I>::exists(I *ictx, const cls::rbd::SnapshotNamespace& snap_namespace,
const char *snap_name, bool *exists) {
ldout(ictx->cct, 20) << "snap_exists " << ictx << " " << snap_name << dendl;
int r = ictx->state->refresh_if_required();
if (r < 0)
return r;
std::shared_lock l{ictx->image_lock};
*exists = ictx->get_snap_id(snap_namespace, snap_name) != CEPH_NOSNAP;
return 0;
}
template <typename I>
int Snapshot<I>::create(I *ictx, const char *snap_name, uint32_t flags,
ProgressContext& pctx) {
ldout(ictx->cct, 20) << "snap_create " << ictx << " " << snap_name
<< " flags: " << flags << dendl;
uint64_t internal_flags = 0;
int r = util::snap_create_flags_api_to_internal(ictx->cct, flags,
&internal_flags);
if (r < 0) {
return r;
}
return ictx->operations->snap_create(cls::rbd::UserSnapshotNamespace(),
snap_name, internal_flags, pctx);
}
template <typename I>
int Snapshot<I>::remove(I *ictx, const char *snap_name, uint32_t flags,
ProgressContext& pctx) {
ldout(ictx->cct, 20) << "snap_remove " << ictx << " " << snap_name << " flags: " << flags << dendl;
int r = 0;
r = ictx->state->refresh_if_required();
if (r < 0)
return r;
if (flags & RBD_SNAP_REMOVE_FLATTEN) {
r = Image<I>::flatten_children(ictx, snap_name, pctx);
if (r < 0) {
return r;
}
}
bool protect;
r = is_protected(ictx, snap_name, &protect);
if (r < 0) {
return r;
}
if (protect && flags & RBD_SNAP_REMOVE_UNPROTECT) {
r = ictx->operations->snap_unprotect(cls::rbd::UserSnapshotNamespace(), snap_name);
if (r < 0) {
lderr(ictx->cct) << "failed to unprotect snapshot: " << snap_name << dendl;
return r;
}
r = is_protected(ictx, snap_name, &protect);
if (r < 0) {
return r;
}
if (protect) {
lderr(ictx->cct) << "snapshot is still protected after unprotection" << dendl;
ceph_abort();
}
}
C_SaferCond ctx;
ictx->operations->snap_remove(cls::rbd::UserSnapshotNamespace(), snap_name, &ctx);
r = ctx.wait();
return r;
}
template <typename I>
int Snapshot<I>::get_timestamp(I *ictx, uint64_t snap_id, struct timespec *timestamp) {
auto snap_it = ictx->snap_info.find(snap_id);
ceph_assert(snap_it != ictx->snap_info.end());
utime_t time = snap_it->second.timestamp;
time.to_timespec(timestamp);
return 0;
}
template <typename I>
int Snapshot<I>::get_limit(I *ictx, uint64_t *limit) {
int r = cls_client::snapshot_get_limit(&ictx->md_ctx, ictx->header_oid,
limit);
if (r == -EOPNOTSUPP) {
*limit = UINT64_MAX;
r = 0;
}
return r;
}
template <typename I>
int Snapshot<I>::set_limit(I *ictx, uint64_t limit) {
return ictx->operations->snap_set_limit(limit);
}
template <typename I>
int Snapshot<I>::is_protected(I *ictx, const char *snap_name, bool *protect) {
ldout(ictx->cct, 20) << "snap_is_protected " << ictx << " " << snap_name
<< dendl;
int r = ictx->state->refresh_if_required();
if (r < 0)
return r;
std::shared_lock l{ictx->image_lock};
snap_t snap_id = ictx->get_snap_id(cls::rbd::UserSnapshotNamespace(), snap_name);
if (snap_id == CEPH_NOSNAP)
return -ENOENT;
bool is_unprotected;
r = ictx->is_snap_unprotected(snap_id, &is_unprotected);
// consider both PROTECTED or UNPROTECTING to be 'protected',
// since in either state they can't be deleted
*protect = !is_unprotected;
return r;
}
template <typename I>
int Snapshot<I>::get_namespace(I *ictx, const char *snap_name,
cls::rbd::SnapshotNamespace *snap_namespace) {
ldout(ictx->cct, 20) << "get_snap_namespace " << ictx << " " << snap_name
<< dendl;
int r = ictx->state->refresh_if_required();
if (r < 0)
return r;
std::shared_lock l{ictx->image_lock};
snap_t snap_id = ictx->get_snap_id(*snap_namespace, snap_name);
if (snap_id == CEPH_NOSNAP)
return -ENOENT;
r = ictx->get_snap_namespace(snap_id, snap_namespace);
return r;
}
} // namespace api
} // namespace librbd
template class librbd::api::Snapshot<librbd::ImageCtx>;
| 12,009 | 25.988764 | 101 |
cc
|
null |
ceph-main/src/librbd/api/Snapshot.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_API_SNAPSHOT_H
#define CEPH_LIBRBD_API_SNAPSHOT_H
#include "include/rbd/librbd.hpp"
#include "cls/rbd/cls_rbd_types.h"
#include <string>
namespace librbd {
struct ImageCtx;
namespace api {
template <typename ImageCtxT = librbd::ImageCtx>
struct Snapshot {
static int get_group_namespace(ImageCtxT *ictx, uint64_t snap_id,
snap_group_namespace_t *group_snap);
static int get_trash_namespace(ImageCtxT *ictx, uint64_t snap_id,
std::string *original_name);
static int get_mirror_namespace(
ImageCtxT *ictx, uint64_t snap_id,
snap_mirror_namespace_t *mirror_snap);
static int get_namespace_type(ImageCtxT *ictx, uint64_t snap_id,
snap_namespace_type_t *namespace_type);
static int remove(ImageCtxT *ictx, uint64_t snap_id);
static int get_name(ImageCtxT *ictx, uint64_t snap_id, std::string *snap_name);
static int get_id(ImageCtxT *ictx, const std::string& snap_name, uint64_t *snap_id);
static int list(ImageCtxT *ictx, std::vector<snap_info_t>& snaps);
static int exists(ImageCtxT *ictx, const cls::rbd::SnapshotNamespace& snap_namespace,
const char *snap_name, bool *exists);
static int create(ImageCtxT *ictx, const char *snap_name, uint32_t flags,
ProgressContext& pctx);
static int remove(ImageCtxT *ictx, const char *snap_name, uint32_t flags, ProgressContext& pctx);
static int get_limit(ImageCtxT *ictx, uint64_t *limit);
static int set_limit(ImageCtxT *ictx, uint64_t limit);
static int get_timestamp(ImageCtxT *ictx, uint64_t snap_id, struct timespec *timestamp);
static int is_protected(ImageCtxT *ictx, const char *snap_name, bool *protect);
static int get_namespace(ImageCtxT *ictx, const char *snap_name,
cls::rbd::SnapshotNamespace *snap_namespace);
};
} // namespace api
} // namespace librbd
extern template class librbd::api::Snapshot<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_API_SNAPSHOT_H
| 2,125 | 30.264706 | 99 |
h
|
null |
ceph-main/src/librbd/api/Trash.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/api/Trash.h"
#include "include/rados/librados.hpp"
#include "common/dout.h"
#include "common/errno.h"
#include "common/Cond.h"
#include "cls/rbd/cls_rbd_client.h"
#include "librbd/AsioEngine.h"
#include "librbd/ExclusiveLock.h"
#include "librbd/ImageCtx.h"
#include "librbd/ImageState.h"
#include "librbd/internal.h"
#include "librbd/Operations.h"
#include "librbd/TrashWatcher.h"
#include "librbd/Utils.h"
#include "librbd/api/DiffIterate.h"
#include "librbd/exclusive_lock/Policy.h"
#include "librbd/image/RemoveRequest.h"
#include "librbd/mirror/DisableRequest.h"
#include "librbd/mirror/EnableRequest.h"
#include "librbd/trash/MoveRequest.h"
#include "librbd/trash/RemoveRequest.h"
#include <json_spirit/json_spirit.h>
#include "librbd/journal/DisabledPolicy.h"
#include "librbd/image/ListWatchersRequest.h"
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::api::Trash: " << __func__ << ": "
namespace librbd {
namespace api {
template <typename I>
const typename Trash<I>::TrashImageSources Trash<I>::ALLOWED_RESTORE_SOURCES {
cls::rbd::TRASH_IMAGE_SOURCE_USER,
cls::rbd::TRASH_IMAGE_SOURCE_MIRRORING,
cls::rbd::TRASH_IMAGE_SOURCE_USER_PARENT
};
namespace {
template <typename I>
int disable_mirroring(I *ictx) {
ldout(ictx->cct, 10) << dendl;
C_SaferCond ctx;
auto req = mirror::DisableRequest<I>::create(ictx, false, true, &ctx);
req->send();
int r = ctx.wait();
if (r < 0) {
lderr(ictx->cct) << "failed to disable mirroring: " << cpp_strerror(r)
<< dendl;
return r;
}
return 0;
}
template <typename I>
int enable_mirroring(IoCtx &io_ctx, const std::string &image_id) {
auto cct = reinterpret_cast<CephContext*>(io_ctx.cct());
uint64_t features;
uint64_t incompatible_features;
int r = cls_client::get_features(&io_ctx, util::header_name(image_id), true,
&features, &incompatible_features);
if (r < 0) {
lderr(cct) << "failed to retrieve features: " << cpp_strerror(r) << dendl;
return r;
}
if ((features & RBD_FEATURE_JOURNALING) == 0) {
return 0;
}
cls::rbd::MirrorMode mirror_mode;
r = cls_client::mirror_mode_get(&io_ctx, &mirror_mode);
if (r < 0 && r != -ENOENT) {
lderr(cct) << "failed to retrieve mirror mode: " << cpp_strerror(r)
<< dendl;
return r;
}
if (mirror_mode != cls::rbd::MIRROR_MODE_POOL) {
ldout(cct, 10) << "not pool mirroring mode" << dendl;
return 0;
}
ldout(cct, 10) << dendl;
AsioEngine asio_engine(io_ctx);
C_SaferCond ctx;
auto req = mirror::EnableRequest<I>::create(
io_ctx, image_id, cls::rbd::MIRROR_IMAGE_MODE_JOURNAL, "", false,
asio_engine.get_work_queue(), &ctx);
req->send();
r = ctx.wait();
if (r < 0) {
lderr(cct) << "failed to enable mirroring: " << cpp_strerror(r)
<< dendl;
return r;
}
return 0;
}
int list_trash_image_specs(
librados::IoCtx &io_ctx,
std::map<std::string, cls::rbd::TrashImageSpec>* trash_image_specs,
bool exclude_user_remove_source) {
CephContext *cct((CephContext *)io_ctx.cct());
ldout(cct, 20) << "list_trash_image_specs " << &io_ctx << dendl;
bool more_entries;
uint32_t max_read = 1024;
std::string last_read;
do {
std::map<std::string, cls::rbd::TrashImageSpec> trash_entries;
int r = cls_client::trash_list(&io_ctx, last_read, max_read,
&trash_entries);
if (r < 0 && r != -ENOENT) {
lderr(cct) << "error listing rbd trash entries: " << cpp_strerror(r)
<< dendl;
return r;
} else if (r == -ENOENT) {
break;
}
if (trash_entries.empty()) {
break;
}
for (const auto &entry : trash_entries) {
if (exclude_user_remove_source &&
entry.second.source == cls::rbd::TRASH_IMAGE_SOURCE_REMOVING) {
continue;
}
trash_image_specs->insert({entry.first, entry.second});
}
last_read = trash_entries.rbegin()->first;
more_entries = (trash_entries.size() >= max_read);
} while (more_entries);
return 0;
}
} // anonymous namespace
template <typename I>
int Trash<I>::move(librados::IoCtx &io_ctx, rbd_trash_image_source_t source,
const std::string &image_name, const std::string &image_id,
uint64_t delay) {
ceph_assert(!image_name.empty() && !image_id.empty());
CephContext *cct((CephContext *)io_ctx.cct());
ldout(cct, 20) << &io_ctx << " name=" << image_name << ", id=" << image_id
<< dendl;
auto ictx = new I("", image_id, nullptr, io_ctx, false);
int r = ictx->state->open(OPEN_FLAG_SKIP_OPEN_PARENT);
if (r < 0 && r != -ENOENT) {
lderr(cct) << "failed to open image: " << cpp_strerror(r) << dendl;
return r;
}
if (r == 0) {
cls::rbd::MirrorImage mirror_image;
int mirror_r = cls_client::mirror_image_get(&ictx->md_ctx, ictx->id,
&mirror_image);
if (mirror_r == -ENOENT) {
ldout(ictx->cct, 10) << "mirroring is not enabled for this image"
<< dendl;
} else if (mirror_r < 0) {
lderr(ictx->cct) << "failed to retrieve mirror image: "
<< cpp_strerror(mirror_r) << dendl;
return mirror_r;
} else if (mirror_image.mode == cls::rbd::MIRROR_IMAGE_MODE_SNAPSHOT) {
// a remote rbd-mirror might own the exclusive-lock on this image
// and therefore we need to disable mirroring so that it closes the image
r = disable_mirroring<I>(ictx);
if (r < 0) {
ictx->state->close();
return r;
}
}
if (ictx->test_features(RBD_FEATURE_JOURNALING)) {
std::unique_lock image_locker{ictx->image_lock};
ictx->set_journal_policy(new journal::DisabledPolicy());
}
ictx->owner_lock.lock_shared();
if (ictx->exclusive_lock != nullptr) {
ictx->exclusive_lock->block_requests(0);
r = ictx->operations->prepare_image_update(
exclusive_lock::OPERATION_REQUEST_TYPE_GENERAL, true);
if (r < 0) {
lderr(cct) << "cannot obtain exclusive lock - not removing" << dendl;
ictx->owner_lock.unlock_shared();
ictx->state->close();
return -EBUSY;
}
}
ictx->owner_lock.unlock_shared();
ictx->image_lock.lock_shared();
if (!ictx->migration_info.empty()) {
lderr(cct) << "cannot move migrating image to trash" << dendl;
ictx->image_lock.unlock_shared();
ictx->state->close();
return -EBUSY;
}
ictx->image_lock.unlock_shared();
if (mirror_r >= 0 &&
mirror_image.mode != cls::rbd::MIRROR_IMAGE_MODE_SNAPSHOT) {
r = disable_mirroring<I>(ictx);
if (r < 0) {
ictx->state->close();
return r;
}
}
ictx->state->close();
}
utime_t delete_time{ceph_clock_now()};
utime_t deferment_end_time{delete_time};
deferment_end_time += delay;
cls::rbd::TrashImageSpec trash_image_spec{
static_cast<cls::rbd::TrashImageSource>(source), image_name,
delete_time, deferment_end_time};
trash_image_spec.state = cls::rbd::TRASH_IMAGE_STATE_MOVING;
C_SaferCond ctx;
auto req = trash::MoveRequest<I>::create(io_ctx, image_id, trash_image_spec,
&ctx);
req->send();
r = ctx.wait();
trash_image_spec.state = cls::rbd::TRASH_IMAGE_STATE_NORMAL;
int ret = cls_client::trash_state_set(&io_ctx, image_id,
trash_image_spec.state,
cls::rbd::TRASH_IMAGE_STATE_MOVING);
if (ret < 0 && ret != -EOPNOTSUPP) {
lderr(cct) << "error setting trash image state: "
<< cpp_strerror(ret) << dendl;
return ret;
}
if (r < 0) {
return r;
}
C_SaferCond notify_ctx;
TrashWatcher<I>::notify_image_added(io_ctx, image_id, trash_image_spec,
¬ify_ctx);
r = notify_ctx.wait();
if (r < 0) {
lderr(cct) << "failed to send update notification: " << cpp_strerror(r)
<< dendl;
}
return 0;
}
template <typename I>
int Trash<I>::move(librados::IoCtx &io_ctx, rbd_trash_image_source_t source,
const std::string &image_name, uint64_t delay) {
CephContext *cct((CephContext *)io_ctx.cct());
ldout(cct, 20) << &io_ctx << " name=" << image_name << dendl;
// try to get image id from the directory
std::string image_id;
int r = cls_client::dir_get_id(&io_ctx, RBD_DIRECTORY, image_name,
&image_id);
if (r == -ENOENT) {
r = io_ctx.stat(util::old_header_name(image_name), nullptr, nullptr);
if (r == 0) {
// cannot move V1 image to trash
ldout(cct, 10) << "cannot move v1 image to trash" << dendl;
return -EOPNOTSUPP;
}
// search for an interrupted trash move request
std::map<std::string, cls::rbd::TrashImageSpec> trash_image_specs;
int r = list_trash_image_specs(io_ctx, &trash_image_specs, true);
if (r < 0) {
return r;
}
if (auto found_image =
std::find_if(
trash_image_specs.begin(), trash_image_specs.end(),
[&](const auto& pair) {
const auto& spec = pair.second;
return (spec.source == cls::rbd::TRASH_IMAGE_SOURCE_USER &&
spec.state == cls::rbd::TRASH_IMAGE_STATE_MOVING &&
spec.name == image_name);
});
found_image != trash_image_specs.end()) {
image_id = found_image->first;
} else {
return -ENOENT;
}
ldout(cct, 15) << "derived image id " << image_id << " from existing "
<< "trash entry" << dendl;
} else if (r < 0) {
lderr(cct) << "failed to retrieve image id: " << cpp_strerror(r) << dendl;
return r;
}
if (image_name.empty() || image_id.empty()) {
lderr(cct) << "invalid image name/id" << dendl;
return -EINVAL;
}
return Trash<I>::move(io_ctx, source, image_name, image_id, delay);
}
template <typename I>
int Trash<I>::get(IoCtx &io_ctx, const std::string &id,
trash_image_info_t *info) {
CephContext *cct((CephContext *)io_ctx.cct());
ldout(cct, 20) << __func__ << " " << &io_ctx << dendl;
cls::rbd::TrashImageSpec spec;
int r = cls_client::trash_get(&io_ctx, id, &spec);
if (r == -ENOENT) {
return r;
} else if (r < 0) {
lderr(cct) << "error retrieving trash entry: " << cpp_strerror(r)
<< dendl;
return r;
}
rbd_trash_image_source_t source = static_cast<rbd_trash_image_source_t>(
spec.source);
*info = trash_image_info_t{id, spec.name, source, spec.deletion_time.sec(),
spec.deferment_end_time.sec()};
return 0;
}
template <typename I>
int Trash<I>::list(IoCtx &io_ctx, std::vector<trash_image_info_t> &entries,
bool exclude_user_remove_source) {
CephContext *cct((CephContext *)io_ctx.cct());
ldout(cct, 20) << __func__ << " " << &io_ctx << dendl;
std::map<std::string, cls::rbd::TrashImageSpec> trash_image_specs;
int r = list_trash_image_specs(io_ctx, &trash_image_specs,
exclude_user_remove_source);
if (r < 0) {
return r;
}
entries.reserve(trash_image_specs.size());
for (const auto& [image_id, spec] : trash_image_specs) {
rbd_trash_image_source_t source =
static_cast<rbd_trash_image_source_t>(spec.source);
entries.push_back({image_id, spec.name, source,
spec.deletion_time.sec(),
spec.deferment_end_time.sec()});
}
return 0;
}
template <typename I>
int Trash<I>::purge(IoCtx& io_ctx, time_t expire_ts,
float threshold, ProgressContext& pctx) {
auto *cct((CephContext *) io_ctx.cct());
ldout(cct, 20) << &io_ctx << dendl;
std::vector<librbd::trash_image_info_t> trash_entries;
int r = librbd::api::Trash<I>::list(io_ctx, trash_entries, true);
if (r < 0) {
return r;
}
trash_entries.erase(
std::remove_if(trash_entries.begin(), trash_entries.end(),
[](librbd::trash_image_info_t info) {
return info.source != RBD_TRASH_IMAGE_SOURCE_USER &&
info.source != RBD_TRASH_IMAGE_SOURCE_USER_PARENT;
}),
trash_entries.end());
std::set<std::string> to_be_removed;
if (threshold != -1) {
if (threshold < 0 || threshold > 1) {
lderr(cct) << "argument 'threshold' is out of valid range"
<< dendl;
return -EINVAL;
}
librados::bufferlist inbl;
librados::bufferlist outbl;
std::string pool_name = io_ctx.get_pool_name();
librados::Rados rados(io_ctx);
rados.mon_command(R"({"prefix": "df", "format": "json"})", inbl,
&outbl, nullptr);
json_spirit::mValue json;
if (!json_spirit::read(outbl.to_str(), json)) {
lderr(cct) << "ceph df json output could not be parsed"
<< dendl;
return -EBADMSG;
}
json_spirit::mArray arr = json.get_obj()["pools"].get_array();
double pool_percent_used = 0;
uint64_t pool_total_bytes = 0;
std::map<std::string, std::vector<std::string>> datapools;
std::sort(trash_entries.begin(), trash_entries.end(),
[](librbd::trash_image_info_t a, librbd::trash_image_info_t b) {
return a.deferment_end_time < b.deferment_end_time;
}
);
for (const auto &entry : trash_entries) {
int64_t data_pool_id = -1;
r = cls_client::get_data_pool(&io_ctx, util::header_name(entry.id),
&data_pool_id);
if (r < 0 && r != -ENOENT && r != -EOPNOTSUPP) {
lderr(cct) << "failed to query data pool: " << cpp_strerror(r) << dendl;
return r;
} else if (data_pool_id == -1) {
data_pool_id = io_ctx.get_id();
}
if (data_pool_id != io_ctx.get_id()) {
librados::IoCtx data_io_ctx;
r = util::create_ioctx(io_ctx, "image", data_pool_id,
{}, &data_io_ctx);
if (r < 0) {
lderr(cct) << "error accessing data pool" << dendl;
continue;
}
auto data_pool = data_io_ctx.get_pool_name();
datapools[data_pool].push_back(entry.id);
} else {
datapools[pool_name].push_back(entry.id);
}
}
uint64_t bytes_to_free = 0;
for (uint8_t i = 0; i < arr.size(); ++i) {
json_spirit::mObject obj = arr[i].get_obj();
std::string name = obj.find("name")->second.get_str();
auto img = datapools.find(name);
if (img != datapools.end()) {
json_spirit::mObject stats = arr[i].get_obj()["stats"].get_obj();
pool_percent_used = stats["percent_used"].get_real();
if (pool_percent_used <= threshold) continue;
bytes_to_free = 0;
pool_total_bytes = stats["max_avail"].get_uint64() +
stats["bytes_used"].get_uint64();
auto bytes_threshold = (uint64_t) (pool_total_bytes *
(pool_percent_used - threshold));
for (const auto &it : img->second) {
auto ictx = new I("", it, nullptr, io_ctx, false);
r = ictx->state->open(OPEN_FLAG_SKIP_OPEN_PARENT);
if (r == -ENOENT) {
continue;
} else if (r < 0) {
lderr(cct) << "failed to open image " << it << ": "
<< cpp_strerror(r) << dendl;
}
r = librbd::api::DiffIterate<I>::diff_iterate(
ictx, cls::rbd::UserSnapshotNamespace(), nullptr, 0, ictx->size,
false, true,
[](uint64_t offset, size_t len, int exists, void *arg) {
auto *to_free = reinterpret_cast<uint64_t *>(arg);
if (exists)
(*to_free) += len;
return 0;
}, &bytes_to_free);
ictx->state->close();
if (r < 0) {
lderr(cct) << "failed to calculate disk usage for image " << it
<< ": " << cpp_strerror(r) << dendl;
continue;
}
to_be_removed.insert(it);
if (bytes_to_free >= bytes_threshold) {
break;
}
}
}
}
if (bytes_to_free == 0) {
ldout(cct, 10) << "pool usage is lower than or equal to "
<< (threshold * 100)
<< "%" << dendl;
return 0;
}
}
if (expire_ts == 0) {
struct timespec now;
clock_gettime(CLOCK_REALTIME, &now);
expire_ts = now.tv_sec;
}
for (const auto &entry : trash_entries) {
if (expire_ts >= entry.deferment_end_time) {
to_be_removed.insert(entry.id);
}
}
NoOpProgressContext remove_pctx;
uint64_t list_size = to_be_removed.size(), i = 0;
int remove_err = 1;
while (!to_be_removed.empty() && remove_err == 1) {
remove_err = 0;
for (auto it = to_be_removed.begin(); it != to_be_removed.end(); ) {
trash_image_info_t trash_info;
r = Trash<I>::get(io_ctx, *it, &trash_info);
if (r == -ENOENT) {
// likely RBD_TRASH_IMAGE_SOURCE_USER_PARENT image removed as a side
// effect of a preceeding remove (last child detach)
pctx.update_progress(++i, list_size);
it = to_be_removed.erase(it);
continue;
} else if (r < 0) {
lderr(cct) << "error getting image id " << *it
<< " info: " << cpp_strerror(r) << dendl;
return r;
}
r = Trash<I>::remove(io_ctx, *it, true, remove_pctx);
if (r == -ENOTEMPTY || r == -EBUSY || r == -EMLINK || r == -EUCLEAN) {
if (!remove_err) {
remove_err = r;
}
++it;
continue;
} else if (r < 0) {
lderr(cct) << "error removing image id " << *it
<< ": " << cpp_strerror(r) << dendl;
return r;
}
pctx.update_progress(++i, list_size);
it = to_be_removed.erase(it);
remove_err = 1;
}
ldout(cct, 20) << "remove_err=" << remove_err << dendl;
}
if (!to_be_removed.empty()) {
ceph_assert(remove_err < 0);
ldout(cct, 10) << "couldn't remove " << to_be_removed.size()
<< " expired images" << dendl;
return remove_err;
}
return 0;
}
template <typename I>
int Trash<I>::remove(IoCtx &io_ctx, const std::string &image_id, bool force,
ProgressContext& prog_ctx) {
CephContext *cct((CephContext *)io_ctx.cct());
ldout(cct, 20) << "trash_remove " << &io_ctx << " " << image_id
<< " " << force << dendl;
cls::rbd::TrashImageSpec trash_spec;
int r = cls_client::trash_get(&io_ctx, image_id, &trash_spec);
if (r < 0) {
lderr(cct) << "error getting image id " << image_id
<< " info from trash: " << cpp_strerror(r) << dendl;
return r;
}
utime_t now = ceph_clock_now();
if (now < trash_spec.deferment_end_time && !force) {
lderr(cct) << "error: deferment time has not expired." << dendl;
return -EPERM;
}
if (trash_spec.state == cls::rbd::TRASH_IMAGE_STATE_MOVING) {
lderr(cct) << "error: image is pending moving to the trash."
<< dendl;
return -EUCLEAN;
} else if (trash_spec.state != cls::rbd::TRASH_IMAGE_STATE_NORMAL &&
trash_spec.state != cls::rbd::TRASH_IMAGE_STATE_REMOVING) {
lderr(cct) << "error: image is pending restoration." << dendl;
return -EBUSY;
}
AsioEngine asio_engine(io_ctx);
C_SaferCond cond;
auto req = librbd::trash::RemoveRequest<I>::create(
io_ctx, image_id, asio_engine.get_work_queue(), force, prog_ctx, &cond);
req->send();
r = cond.wait();
if (r < 0) {
return r;
}
C_SaferCond notify_ctx;
TrashWatcher<I>::notify_image_removed(io_ctx, image_id, ¬ify_ctx);
r = notify_ctx.wait();
if (r < 0) {
lderr(cct) << "failed to send update notification: " << cpp_strerror(r)
<< dendl;
}
return 0;
}
template <typename I>
int Trash<I>::restore(librados::IoCtx &io_ctx,
const TrashImageSources& trash_image_sources,
const std::string &image_id,
const std::string &image_new_name) {
CephContext *cct((CephContext *)io_ctx.cct());
ldout(cct, 20) << "trash_restore " << &io_ctx << " " << image_id << " "
<< image_new_name << dendl;
cls::rbd::TrashImageSpec trash_spec;
int r = cls_client::trash_get(&io_ctx, image_id, &trash_spec);
if (r < 0) {
lderr(cct) << "error getting image id " << image_id
<< " info from trash: " << cpp_strerror(r) << dendl;
return r;
}
if (trash_image_sources.count(trash_spec.source) == 0) {
lderr(cct) << "Current trash source '" << trash_spec.source << "' "
<< "does not match expected: "
<< trash_image_sources << dendl;
return -EINVAL;
}
std::string image_name = image_new_name;
if (trash_spec.state != cls::rbd::TRASH_IMAGE_STATE_NORMAL &&
trash_spec.state != cls::rbd::TRASH_IMAGE_STATE_RESTORING) {
lderr(cct) << "error restoring image id " << image_id
<< ", which is pending deletion" << dendl;
return -EBUSY;
}
r = cls_client::trash_state_set(&io_ctx, image_id,
cls::rbd::TRASH_IMAGE_STATE_RESTORING,
cls::rbd::TRASH_IMAGE_STATE_NORMAL);
if (r < 0 && r != -EOPNOTSUPP) {
lderr(cct) << "error setting trash image state: "
<< cpp_strerror(r) << dendl;
return r;
}
if (image_name.empty()) {
// if user didn't specify a new name, let's try using the old name
image_name = trash_spec.name;
ldout(cct, 20) << "restoring image id " << image_id << " with name "
<< image_name << dendl;
}
// check if no image exists with the same name
bool create_id_obj = true;
std::string existing_id;
r = cls_client::get_id(&io_ctx, util::id_obj_name(image_name), &existing_id);
if (r < 0 && r != -ENOENT) {
lderr(cct) << "error checking if image " << image_name << " exists: "
<< cpp_strerror(r) << dendl;
int ret = cls_client::trash_state_set(&io_ctx, image_id,
cls::rbd::TRASH_IMAGE_STATE_NORMAL,
cls::rbd::TRASH_IMAGE_STATE_RESTORING);
if (ret < 0 && ret != -EOPNOTSUPP) {
lderr(cct) << "error setting trash image state: "
<< cpp_strerror(ret) << dendl;
}
return r;
} else if (r != -ENOENT){
// checking if we are recovering from an incomplete restore
if (existing_id != image_id) {
ldout(cct, 2) << "an image with the same name already exists" << dendl;
int r2 = cls_client::trash_state_set(&io_ctx, image_id,
cls::rbd::TRASH_IMAGE_STATE_NORMAL,
cls::rbd::TRASH_IMAGE_STATE_RESTORING);
if (r2 < 0 && r2 != -EOPNOTSUPP) {
lderr(cct) << "error setting trash image state: "
<< cpp_strerror(r2) << dendl;
}
return -EEXIST;
}
create_id_obj = false;
}
if (create_id_obj) {
ldout(cct, 2) << "adding id object" << dendl;
librados::ObjectWriteOperation op;
op.create(true);
cls_client::set_id(&op, image_id);
r = io_ctx.operate(util::id_obj_name(image_name), &op);
if (r < 0) {
lderr(cct) << "error adding id object for image " << image_name
<< ": " << cpp_strerror(r) << dendl;
return r;
}
}
ldout(cct, 2) << "adding rbd image to v2 directory..." << dendl;
r = cls_client::dir_add_image(&io_ctx, RBD_DIRECTORY, image_name,
image_id);
if (r < 0 && r != -EEXIST) {
lderr(cct) << "error adding image to v2 directory: "
<< cpp_strerror(r) << dendl;
return r;
}
r = enable_mirroring<I>(io_ctx, image_id);
if (r < 0) {
// not fatal -- ignore
}
ldout(cct, 2) << "removing image from trash..." << dendl;
r = cls_client::trash_remove(&io_ctx, image_id);
if (r < 0 && r != -ENOENT) {
lderr(cct) << "error removing image id " << image_id << " from trash: "
<< cpp_strerror(r) << dendl;
return r;
}
C_SaferCond notify_ctx;
TrashWatcher<I>::notify_image_removed(io_ctx, image_id, ¬ify_ctx);
r = notify_ctx.wait();
if (r < 0) {
lderr(cct) << "failed to send update notification: " << cpp_strerror(r)
<< dendl;
}
return 0;
}
} // namespace api
} // namespace librbd
template class librbd::api::Trash<librbd::ImageCtx>;
| 24,816 | 31.653947 | 82 |
cc
|
null |
ceph-main/src/librbd/api/Trash.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef LIBRBD_API_TRASH_H
#define LIBRBD_API_TRASH_H
#include "include/rados/librados_fwd.hpp"
#include "include/rbd/librbd.hpp"
#include "cls/rbd/cls_rbd_types.h"
#include <set>
#include <string>
#include <vector>
namespace librbd {
class ProgressContext;
struct ImageCtx;
namespace api {
template <typename ImageCtxT = librbd::ImageCtx>
struct Trash {
typedef std::set<cls::rbd::TrashImageSource> TrashImageSources;
static const TrashImageSources ALLOWED_RESTORE_SOURCES;
static int move(librados::IoCtx &io_ctx, rbd_trash_image_source_t source,
const std::string &image_name, uint64_t delay);
static int move(librados::IoCtx &io_ctx, rbd_trash_image_source_t source,
const std::string &image_name, const std::string &image_id,
uint64_t delay);
static int get(librados::IoCtx &io_ctx, const std::string &id,
trash_image_info_t *info);
static int list(librados::IoCtx &io_ctx,
std::vector<trash_image_info_t> &entries,
bool exclude_user_remove_source);
static int purge(IoCtx& io_ctx, time_t expire_ts,
float threshold, ProgressContext& pctx);
static int remove(librados::IoCtx &io_ctx, const std::string &image_id,
bool force, ProgressContext& prog_ctx);
static int restore(librados::IoCtx &io_ctx,
const TrashImageSources& trash_image_sources,
const std::string &image_id,
const std::string &image_new_name);
};
} // namespace api
} // namespace librbd
extern template class librbd::api::Trash<librbd::ImageCtx>;
#endif // LIBRBD_API_TRASH_H
| 1,783 | 32.037037 | 77 |
h
|
null |
ceph-main/src/librbd/api/Utils.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/api/Utils.h"
#include "common/dout.h"
#if defined(HAVE_LIBCRYPTSETUP)
#include "librbd/crypto/luks/LUKSEncryptionFormat.h"
#endif
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::api::util: " << __func__ << ": "
namespace librbd {
namespace api {
namespace util {
template <typename I>
int create_encryption_format(
CephContext* cct, encryption_format_t format,
encryption_options_t opts, size_t opts_size, bool c_api,
crypto::EncryptionFormat<I>** result_format) {
size_t expected_opts_size;
switch (format) {
#if defined(HAVE_LIBCRYPTSETUP)
case RBD_ENCRYPTION_FORMAT_LUKS1: {
if (c_api) {
expected_opts_size = sizeof(rbd_encryption_luks1_format_options_t);
if (expected_opts_size == opts_size) {
auto c_opts = (rbd_encryption_luks1_format_options_t*)opts;
*result_format = new crypto::luks::LUKS1EncryptionFormat<I>(
c_opts->alg, {c_opts->passphrase, c_opts->passphrase_size});
}
} else {
expected_opts_size = sizeof(encryption_luks1_format_options_t);
if (expected_opts_size == opts_size) {
auto cpp_opts = (encryption_luks1_format_options_t*)opts;
*result_format = new crypto::luks::LUKS1EncryptionFormat<I>(
cpp_opts->alg, cpp_opts->passphrase);
}
}
break;
}
case RBD_ENCRYPTION_FORMAT_LUKS2: {
if (c_api) {
expected_opts_size = sizeof(rbd_encryption_luks2_format_options_t);
if (expected_opts_size == opts_size) {
auto c_opts = (rbd_encryption_luks2_format_options_t*)opts;
*result_format = new crypto::luks::LUKS2EncryptionFormat<I>(
c_opts->alg, {c_opts->passphrase, c_opts->passphrase_size});
}
} else {
expected_opts_size = sizeof(encryption_luks2_format_options_t);
if (expected_opts_size == opts_size) {
auto cpp_opts = (encryption_luks2_format_options_t*)opts;
*result_format = new crypto::luks::LUKS2EncryptionFormat<I>(
cpp_opts->alg, cpp_opts->passphrase);
}
}
break;
}
case RBD_ENCRYPTION_FORMAT_LUKS: {
if (c_api) {
expected_opts_size = sizeof(rbd_encryption_luks_format_options_t);
if (expected_opts_size == opts_size) {
auto c_opts = (rbd_encryption_luks_format_options_t*)opts;
*result_format = new crypto::luks::LUKSEncryptionFormat<I>(
{c_opts->passphrase, c_opts->passphrase_size});
}
} else {
expected_opts_size = sizeof(encryption_luks_format_options_t);
if (expected_opts_size == opts_size) {
auto cpp_opts = (encryption_luks_format_options_t*)opts;
*result_format = new crypto::luks::LUKSEncryptionFormat<I>(
cpp_opts->passphrase);
}
}
break;
}
#endif
default:
lderr(cct) << "unsupported encryption format: " << format << dendl;
return -ENOTSUP;
}
if (expected_opts_size != opts_size) {
lderr(cct) << "expected opts_size: " << expected_opts_size << dendl;
return -EINVAL;
}
return 0;
}
} // namespace util
} // namespace api
} // namespace librbd
template int librbd::api::util::create_encryption_format(
CephContext* cct, encryption_format_t format, encryption_options_t opts,
size_t opts_size, bool c_api,
crypto::EncryptionFormat<librbd::ImageCtx>** result_format);
| 3,607 | 34.029126 | 78 |
cc
|
null |
ceph-main/src/librbd/api/Utils.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_API_UTILS_H
#define CEPH_LIBRBD_API_UTILS_H
#include "include/rbd/librbd.hpp"
#include "librbd/ImageCtx.h"
#include "librbd/crypto/EncryptionFormat.h"
namespace librbd {
struct ImageCtx;
namespace api {
namespace util {
template <typename ImageCtxT = librbd::ImageCtx>
int create_encryption_format(
CephContext* cct, encryption_format_t format,
encryption_options_t opts, size_t opts_size, bool c_api,
crypto::EncryptionFormat<ImageCtxT>** result_format);
} // namespace util
} // namespace api
} // namespace librbd
#endif // CEPH_LIBRBD_API_UTILS_H
| 701 | 23.206897 | 70 |
h
|
null |
ceph-main/src/librbd/asio/ContextWQ.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/asio/ContextWQ.h"
#include "include/Context.h"
#include "common/Cond.h"
#include "common/dout.h"
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::asio::ContextWQ: " \
<< this << " " << __func__ << ": "
namespace librbd {
namespace asio {
ContextWQ::ContextWQ(CephContext* cct, boost::asio::io_context& io_context)
: m_cct(cct), m_io_context(io_context),
m_strand(std::make_unique<boost::asio::io_context::strand>(io_context)),
m_queued_ops(0) {
ldout(m_cct, 20) << dendl;
}
ContextWQ::~ContextWQ() {
ldout(m_cct, 20) << dendl;
drain();
m_strand.reset();
}
void ContextWQ::drain() {
ldout(m_cct, 20) << dendl;
C_SaferCond ctx;
drain_handler(&ctx);
ctx.wait();
}
void ContextWQ::drain_handler(Context* ctx) {
if (m_queued_ops == 0) {
ctx->complete(0);
return;
}
// new items might be queued while we are trying to drain, so we
// might need to post the handler multiple times
boost::asio::post(*m_strand, [this, ctx]() { drain_handler(ctx); });
}
} // namespace asio
} // namespace librbd
| 1,227 | 23.56 | 76 |
cc
|
null |
ceph-main/src/librbd/asio/ContextWQ.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_ASIO_CONTEXT_WQ_H
#define CEPH_LIBRBD_ASIO_CONTEXT_WQ_H
#include "include/common_fwd.h"
#include "include/Context.h"
#include <atomic>
#include <memory>
#include <boost/asio/io_context.hpp>
#include <boost/asio/io_context_strand.hpp>
#include <boost/asio/post.hpp>
namespace librbd {
namespace asio {
class ContextWQ {
public:
explicit ContextWQ(CephContext* cct, boost::asio::io_context& io_context);
~ContextWQ();
void drain();
void queue(Context *ctx, int r = 0) {
++m_queued_ops;
// ensure all legacy ContextWQ users are dispatched sequentially for
// backwards compatibility (i.e. might not be concurrent thread-safe)
boost::asio::post(*m_strand, [this, ctx, r]() {
ctx->complete(r);
ceph_assert(m_queued_ops > 0);
--m_queued_ops;
});
}
private:
CephContext* m_cct;
boost::asio::io_context& m_io_context;
std::unique_ptr<boost::asio::io_context::strand> m_strand;
std::atomic<uint64_t> m_queued_ops;
void drain_handler(Context* ctx);
};
} // namespace asio
} // namespace librbd
#endif // CEPH_LIBRBD_ASIO_CONTEXT_WQ_H
| 1,214 | 21.924528 | 76 |
h
|
null |
ceph-main/src/librbd/asio/Utils.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_ASIO_UTILS_H
#define CEPH_LIBRBD_ASIO_UTILS_H
#include "include/Context.h"
#include "include/rados/librados_fwd.hpp"
#include <boost/system/error_code.hpp>
namespace librbd {
namespace asio {
namespace util {
template <typename T>
auto get_context_adapter(T&& t) {
return [t = std::move(t)](boost::system::error_code ec) {
t->complete(-ec.value());
};
}
template <typename T>
auto get_callback_adapter(T&& t) {
return [t = std::move(t)](boost::system::error_code ec, auto&& ... args) {
t(-ec.value(), std::forward<decltype(args)>(args)...);
};
}
} // namespace util
} // namespace asio
} // namespace librbd
#endif // CEPH_LIBRBD_ASIO_UTILS_H
| 792 | 22.323529 | 76 |
h
|
null |
ceph-main/src/librbd/cache/ImageWriteback.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "ImageWriteback.h"
#include "include/buffer.h"
#include "common/dout.h"
#include "librbd/ImageCtx.h"
#include "librbd/Utils.h"
#include "librbd/io/AioCompletion.h"
#include "librbd/io/ImageDispatchSpec.h"
#include "librbd/io/ImageRequest.h"
#include "librbd/io/ReadResult.h"
#undef dout_subsys
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::ImageWriteback: " << __func__ << ": "
namespace librbd {
namespace cache {
template <typename I>
ImageWriteback<I>::ImageWriteback(I &image_ctx) : m_image_ctx(image_ctx) {
}
template <typename I>
void ImageWriteback<I>::aio_read(Extents &&image_extents, bufferlist *bl,
int fadvise_flags, Context *on_finish) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << "image_extents=" << image_extents << ", "
<< "on_finish=" << on_finish << dendl;
ImageCtx *image_ctx = util::get_image_ctx(&m_image_ctx);
auto aio_comp = io::AioCompletion::create_and_start(
on_finish, image_ctx, io::AIO_TYPE_READ);
ZTracer::Trace trace;
auto req = io::ImageDispatchSpec::create_read(
*image_ctx, io::IMAGE_DISPATCH_LAYER_WRITEBACK_CACHE, aio_comp,
std::move(image_extents), io::ImageArea::DATA, io::ReadResult{bl},
image_ctx->get_data_io_context(), fadvise_flags, 0, trace);
req->send();
}
template <typename I>
void ImageWriteback<I>::aio_write(Extents &&image_extents,
ceph::bufferlist&& bl,
int fadvise_flags, Context *on_finish) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << "image_extents=" << image_extents << ", "
<< "on_finish=" << on_finish << dendl;
ImageCtx *image_ctx = util::get_image_ctx(&m_image_ctx);
auto aio_comp = io::AioCompletion::create_and_start(
on_finish, image_ctx, io::AIO_TYPE_WRITE);
ZTracer::Trace trace;
auto req = io::ImageDispatchSpec::create_write(
*image_ctx, io::IMAGE_DISPATCH_LAYER_WRITEBACK_CACHE, aio_comp,
std::move(image_extents), io::ImageArea::DATA, std::move(bl),
fadvise_flags, trace);
req->send();
}
template <typename I>
void ImageWriteback<I>::aio_discard(uint64_t offset, uint64_t length,
uint32_t discard_granularity_bytes,
Context *on_finish) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << "offset=" << offset << ", "
<< "length=" << length << ", "
<< "on_finish=" << on_finish << dendl;
ImageCtx *image_ctx = util::get_image_ctx(&m_image_ctx);
auto aio_comp = io::AioCompletion::create_and_start(
on_finish, image_ctx, io::AIO_TYPE_DISCARD);
ZTracer::Trace trace;
auto req = io::ImageDispatchSpec::create_discard(
*image_ctx, io::IMAGE_DISPATCH_LAYER_WRITEBACK_CACHE, aio_comp,
{{offset, length}}, io::ImageArea::DATA, discard_granularity_bytes, trace);
req->send();
}
template <typename I>
void ImageWriteback<I>::aio_flush(io::FlushSource flush_source,
Context *on_finish) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << "on_finish=" << on_finish << dendl;
ImageCtx *image_ctx = util::get_image_ctx(&m_image_ctx);
auto aio_comp = io::AioCompletion::create_and_start(
on_finish, image_ctx, io::AIO_TYPE_FLUSH);
ZTracer::Trace trace;
auto req = io::ImageDispatchSpec::create_flush(
*image_ctx, io::IMAGE_DISPATCH_LAYER_WRITEBACK_CACHE, aio_comp,
flush_source, trace);
req->send();
}
template <typename I>
void ImageWriteback<I>::aio_writesame(uint64_t offset, uint64_t length,
ceph::bufferlist&& bl,
int fadvise_flags, Context *on_finish) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << "offset=" << offset << ", "
<< "length=" << length << ", "
<< "data_len=" << bl.length() << ", "
<< "on_finish=" << on_finish << dendl;
ImageCtx *image_ctx = util::get_image_ctx(&m_image_ctx);
auto aio_comp = io::AioCompletion::create_and_start(
on_finish, image_ctx, io::AIO_TYPE_WRITESAME);
ZTracer::Trace trace;
auto req = io::ImageDispatchSpec::create_write_same(
*image_ctx, io::IMAGE_DISPATCH_LAYER_WRITEBACK_CACHE, aio_comp,
{{offset, length}}, io::ImageArea::DATA, std::move(bl),
fadvise_flags, trace);
req->send();
}
template <typename I>
void ImageWriteback<I>::aio_compare_and_write(Extents &&image_extents,
ceph::bufferlist&& cmp_bl,
ceph::bufferlist&& bl,
uint64_t *mismatch_offset,
int fadvise_flags,
Context *on_finish) {
CephContext *cct = m_image_ctx.cct;
ldout(cct, 20) << "image_extents=" << image_extents << ", "
<< "on_finish=" << on_finish << dendl;
ImageCtx *image_ctx = util::get_image_ctx(&m_image_ctx);
auto aio_comp = io::AioCompletion::create_and_start(
on_finish, image_ctx, io::AIO_TYPE_COMPARE_AND_WRITE);
ZTracer::Trace trace;
auto req = io::ImageDispatchSpec::create_compare_and_write(
*image_ctx, io::IMAGE_DISPATCH_LAYER_WRITEBACK_CACHE, aio_comp,
std::move(image_extents), io::ImageArea::DATA, std::move(cmp_bl),
std::move(bl), mismatch_offset, fadvise_flags, trace);
req->send();
}
} // namespace cache
} // namespace librbd
template class librbd::cache::ImageWriteback<librbd::ImageCtx>;
| 5,731 | 37.993197 | 79 |
cc
|
null |
ceph-main/src/librbd/cache/ImageWriteback.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_CACHE_IMAGE_WRITEBACK
#define CEPH_LIBRBD_CACHE_IMAGE_WRITEBACK
#include "include/buffer_fwd.h"
#include "include/int_types.h"
#include "librbd/io/Types.h"
#include <vector>
class Context;
namespace librbd {
struct ImageCtx;
namespace cache {
class ImageWritebackInterface {
public:
typedef std::vector<std::pair<uint64_t,uint64_t> > Extents;
virtual ~ImageWritebackInterface() {
}
virtual void aio_read(Extents &&image_extents, ceph::bufferlist *bl,
int fadvise_flags, Context *on_finish) = 0;
virtual void aio_write(Extents &&image_extents, ceph::bufferlist&& bl,
int fadvise_flags, Context *on_finish) = 0;
virtual void aio_discard(uint64_t offset, uint64_t length,
uint32_t discard_granularity_bytes, Context *on_finish) = 0;
virtual void aio_flush(io::FlushSource flush_source, Context *on_finish) = 0 ;
virtual void aio_writesame(uint64_t offset, uint64_t length,
ceph::bufferlist&& bl,
int fadvise_flags, Context *on_finish) = 0;
virtual void aio_compare_and_write(Extents &&image_extents,
ceph::bufferlist&& cmp_bl,
ceph::bufferlist&& bl,
uint64_t *mismatch_offset,
int fadvise_flags, Context *on_finish) = 0;
};
/**
* client-side, image extent cache writeback handler
*/
template <typename ImageCtxT = librbd::ImageCtx>
class ImageWriteback : public ImageWritebackInterface {
public:
using ImageWritebackInterface::Extents;
explicit ImageWriteback(ImageCtxT &image_ctx);
void aio_read(Extents &&image_extents, ceph::bufferlist *bl,
int fadvise_flags, Context *on_finish);
void aio_write(Extents &&image_extents, ceph::bufferlist&& bl,
int fadvise_flags, Context *on_finish);
void aio_discard(uint64_t offset, uint64_t length,
uint32_t discard_granularity_bytes, Context *on_finish);
void aio_flush(io::FlushSource flush_source, Context *on_finish);
void aio_writesame(uint64_t offset, uint64_t length,
ceph::bufferlist&& bl,
int fadvise_flags, Context *on_finish);
void aio_compare_and_write(Extents &&image_extents,
ceph::bufferlist&& cmp_bl,
ceph::bufferlist&& bl,
uint64_t *mismatch_offset,
int fadvise_flags, Context *on_finish);
private:
ImageCtxT &m_image_ctx;
};
} // namespace cache
} // namespace librbd
extern template class librbd::cache::ImageWriteback<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_CACHE_IMAGE_WRITEBACK
| 2,899 | 36.179487 | 87 |
h
|
null |
ceph-main/src/librbd/cache/ObjectCacherObjectDispatch.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/cache/ObjectCacherObjectDispatch.h"
#include "include/neorados/RADOS.hpp"
#include "common/errno.h"
#include "librbd/ImageCtx.h"
#include "librbd/Journal.h"
#include "librbd/Utils.h"
#include "librbd/asio/ContextWQ.h"
#include "librbd/cache/ObjectCacherWriteback.h"
#include "librbd/io/ObjectDispatchSpec.h"
#include "librbd/io/ObjectDispatcherInterface.h"
#include "librbd/io/ReadResult.h"
#include "librbd/io/Types.h"
#include "librbd/io/Utils.h"
#include "osd/osd_types.h"
#include "osdc/WritebackHandler.h"
#include <vector>
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::cache::ObjectCacherObjectDispatch: " \
<< this << " " << __func__ << ": "
namespace librbd {
namespace cache {
using librbd::util::data_object_name;
namespace {
typedef std::vector<ObjectExtent> ObjectExtents;
} // anonymous namespace
template <typename I>
struct ObjectCacherObjectDispatch<I>::C_InvalidateCache : public Context {
ObjectCacherObjectDispatch* dispatcher;
bool purge_on_error;
Context *on_finish;
C_InvalidateCache(ObjectCacherObjectDispatch* dispatcher,
bool purge_on_error, Context *on_finish)
: dispatcher(dispatcher), purge_on_error(purge_on_error),
on_finish(on_finish) {
}
void finish(int r) override {
ceph_assert(ceph_mutex_is_locked(dispatcher->m_cache_lock));
auto cct = dispatcher->m_image_ctx->cct;
if (r == -EBLOCKLISTED) {
lderr(cct) << "blocklisted during flush (purging)" << dendl;
dispatcher->m_object_cacher->purge_set(dispatcher->m_object_set);
} else if (r < 0 && purge_on_error) {
lderr(cct) << "failed to invalidate cache (purging): "
<< cpp_strerror(r) << dendl;
dispatcher->m_object_cacher->purge_set(dispatcher->m_object_set);
} else if (r != 0) {
lderr(cct) << "failed to invalidate cache: " << cpp_strerror(r) << dendl;
}
auto unclean = dispatcher->m_object_cacher->release_set(
dispatcher->m_object_set);
if (unclean == 0) {
r = 0;
} else {
lderr(cct) << "could not release all objects from cache: "
<< unclean << " bytes remain" << dendl;
if (r == 0) {
r = -EBUSY;
}
}
on_finish->complete(r);
}
};
template <typename I>
ObjectCacherObjectDispatch<I>::ObjectCacherObjectDispatch(
I* image_ctx, size_t max_dirty, bool writethrough_until_flush)
: m_image_ctx(image_ctx), m_max_dirty(max_dirty),
m_writethrough_until_flush(writethrough_until_flush),
m_cache_lock(ceph::make_mutex(util::unique_lock_name(
"librbd::cache::ObjectCacherObjectDispatch::cache_lock", this))) {
ceph_assert(m_image_ctx->data_ctx.is_valid());
}
template <typename I>
ObjectCacherObjectDispatch<I>::~ObjectCacherObjectDispatch() {
delete m_object_cacher;
delete m_object_set;
delete m_writeback_handler;
}
template <typename I>
void ObjectCacherObjectDispatch<I>::init() {
auto cct = m_image_ctx->cct;
ldout(cct, 5) << dendl;
m_cache_lock.lock();
ldout(cct, 5) << "enabling caching..." << dendl;
m_writeback_handler = new ObjectCacherWriteback(m_image_ctx, m_cache_lock);
auto init_max_dirty = m_max_dirty;
if (m_writethrough_until_flush) {
init_max_dirty = 0;
}
auto cache_size =
m_image_ctx->config.template get_val<Option::size_t>("rbd_cache_size");
auto target_dirty =
m_image_ctx->config.template get_val<Option::size_t>("rbd_cache_target_dirty");
auto max_dirty_age =
m_image_ctx->config.template get_val<double>("rbd_cache_max_dirty_age");
auto block_writes_upfront =
m_image_ctx->config.template get_val<bool>("rbd_cache_block_writes_upfront");
auto max_dirty_object =
m_image_ctx->config.template get_val<uint64_t>("rbd_cache_max_dirty_object");
ldout(cct, 5) << "Initial cache settings:"
<< " size=" << cache_size
<< " num_objects=" << 10
<< " max_dirty=" << init_max_dirty
<< " target_dirty=" << target_dirty
<< " max_dirty_age=" << max_dirty_age << dendl;
m_object_cacher = new ObjectCacher(cct, m_image_ctx->perfcounter->get_name(),
*m_writeback_handler, m_cache_lock,
nullptr, nullptr, cache_size,
10, /* reset this in init */
init_max_dirty, target_dirty,
max_dirty_age, block_writes_upfront);
// size object cache appropriately
if (max_dirty_object == 0) {
max_dirty_object = std::min<uint64_t>(
2000, std::max<uint64_t>(10, cache_size / 100 /
sizeof(ObjectCacher::Object)));
}
ldout(cct, 5) << " cache bytes " << cache_size
<< " -> about " << max_dirty_object << " objects" << dendl;
m_object_cacher->set_max_objects(max_dirty_object);
m_object_set = new ObjectCacher::ObjectSet(nullptr,
m_image_ctx->data_ctx.get_id(), 0);
m_object_cacher->start();
m_cache_lock.unlock();
// add ourself to the IO object dispatcher chain
if (m_max_dirty > 0) {
m_image_ctx->disable_zero_copy = true;
}
m_image_ctx->io_object_dispatcher->register_dispatch(this);
}
template <typename I>
void ObjectCacherObjectDispatch<I>::shut_down(Context* on_finish) {
auto cct = m_image_ctx->cct;
ldout(cct, 5) << dendl;
// chain shut down in reverse order
// shut down the cache
on_finish = new LambdaContext([this, on_finish](int r) {
m_object_cacher->stop();
on_finish->complete(r);
});
// ensure we aren't holding the cache lock post-flush
on_finish = util::create_async_context_callback(*m_image_ctx, on_finish);
// invalidate any remaining cache entries
on_finish = new C_InvalidateCache(this, true, on_finish);
// flush all pending writeback state
std::lock_guard locker{m_cache_lock};
m_object_cacher->release_set(m_object_set);
m_object_cacher->flush_set(m_object_set, on_finish);
}
template <typename I>
bool ObjectCacherObjectDispatch<I>::read(
uint64_t object_no, io::ReadExtents* extents, IOContext io_context,
int op_flags, int read_flags, const ZTracer::Trace &parent_trace,
uint64_t* version, int* object_dispatch_flags,
io::DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched) {
// IO chained in reverse order
auto cct = m_image_ctx->cct;
ldout(cct, 20) << "object_no=" << object_no << " " << *extents << dendl;
if (extents->size() == 0) {
ldout(cct, 20) << "no extents to read" << dendl;
return false;
}
if (version != nullptr) {
// we currently don't cache read versions
// and don't support reading more than one extent
return false;
}
// ensure we aren't holding the cache lock post-read
on_dispatched = util::create_async_context_callback(*m_image_ctx,
on_dispatched);
// embed the RBD-internal read flags in the generic RADOS op_flags and
op_flags = ((op_flags & ~ObjectCacherWriteback::READ_FLAGS_MASK) |
((read_flags << ObjectCacherWriteback::READ_FLAGS_SHIFT) &
ObjectCacherWriteback::READ_FLAGS_MASK));
ceph::bufferlist* bl;
if (extents->size() > 1) {
auto req = new io::ReadResult::C_ObjectReadMergedExtents(
cct, extents, on_dispatched);
on_dispatched = req;
bl = &req->bl;
} else {
bl = &extents->front().bl;
}
m_image_ctx->image_lock.lock_shared();
auto rd = m_object_cacher->prepare_read(
io_context->read_snap().value_or(CEPH_NOSNAP), bl, op_flags);
m_image_ctx->image_lock.unlock_shared();
uint64_t off = 0;
for (auto& read_extent: *extents) {
ObjectExtent extent(data_object_name(m_image_ctx, object_no), object_no,
read_extent.offset, read_extent.length, 0);
extent.oloc.pool = m_image_ctx->data_ctx.get_id();
extent.buffer_extents.push_back({off, read_extent.length});
rd->extents.push_back(extent);
off += read_extent.length;
}
ZTracer::Trace trace(parent_trace);
*dispatch_result = io::DISPATCH_RESULT_COMPLETE;
m_cache_lock.lock();
int r = m_object_cacher->readx(rd, m_object_set, on_dispatched, &trace);
m_cache_lock.unlock();
if (r != 0) {
on_dispatched->complete(r);
}
return true;
}
template <typename I>
bool ObjectCacherObjectDispatch<I>::discard(
uint64_t object_no, uint64_t object_off, uint64_t object_len,
IOContext io_context, int discard_flags,
const ZTracer::Trace &parent_trace, int* object_dispatch_flags,
uint64_t* journal_tid, io::DispatchResult* dispatch_result,
Context** on_finish, Context* on_dispatched) {
auto cct = m_image_ctx->cct;
ldout(cct, 20) << "object_no=" << object_no << " " << object_off << "~"
<< object_len << dendl;
ObjectExtents object_extents;
object_extents.emplace_back(data_object_name(m_image_ctx, object_no),
object_no, object_off, object_len, 0);
// discard the cache state after changes are committed to disk (and to
// prevent races w/ readahead)
auto ctx = *on_finish;
*on_finish = new LambdaContext(
[this, object_extents, ctx](int r) {
m_cache_lock.lock();
m_object_cacher->discard_set(m_object_set, object_extents);
m_cache_lock.unlock();
ctx->complete(r);
});
// ensure we aren't holding the cache lock post-write
on_dispatched = util::create_async_context_callback(*m_image_ctx,
on_dispatched);
*dispatch_result = io::DISPATCH_RESULT_CONTINUE;
// ensure any in-flight writeback is complete before advancing
// the discard request
std::lock_guard locker{m_cache_lock};
m_object_cacher->discard_writeback(m_object_set, object_extents,
on_dispatched);
return true;
}
template <typename I>
bool ObjectCacherObjectDispatch<I>::write(
uint64_t object_no, uint64_t object_off, ceph::bufferlist&& data,
IOContext io_context, int op_flags, int write_flags,
std::optional<uint64_t> assert_version,
const ZTracer::Trace &parent_trace, int* object_dispatch_flags,
uint64_t* journal_tid, io::DispatchResult* dispatch_result,
Context** on_finish, Context* on_dispatched) {
auto cct = m_image_ctx->cct;
ldout(cct, 20) << "object_no=" << object_no << " " << object_off << "~"
<< data.length() << dendl;
// ensure we aren't holding the cache lock post-write
on_dispatched = util::create_async_context_callback(*m_image_ctx,
on_dispatched);
// cache layer does not handle version checking
if (assert_version.has_value() ||
(write_flags & io::OBJECT_WRITE_FLAG_CREATE_EXCLUSIVE) != 0) {
ObjectExtents object_extents;
object_extents.emplace_back(data_object_name(m_image_ctx, object_no),
object_no, object_off, data.length(), 0);
*dispatch_result = io::DISPATCH_RESULT_CONTINUE;
// ensure any in-flight writeback is complete before advancing
// the write request
std::lock_guard locker{m_cache_lock};
m_object_cacher->discard_writeback(m_object_set, object_extents,
on_dispatched);
return true;
}
SnapContext snapc;
if (io_context->write_snap_context()) {
auto write_snap_context = *io_context->write_snap_context();
snapc = SnapContext(write_snap_context.first,
{write_snap_context.second.begin(),
write_snap_context.second.end()});
}
m_image_ctx->image_lock.lock_shared();
ObjectCacher::OSDWrite *wr = m_object_cacher->prepare_write(
snapc, data, ceph::real_time::min(), op_flags, *journal_tid);
m_image_ctx->image_lock.unlock_shared();
ObjectExtent extent(data_object_name(m_image_ctx, object_no),
object_no, object_off, data.length(), 0);
extent.oloc.pool = m_image_ctx->data_ctx.get_id();
extent.buffer_extents.push_back({0, data.length()});
wr->extents.push_back(extent);
ZTracer::Trace trace(parent_trace);
*dispatch_result = io::DISPATCH_RESULT_COMPLETE;
std::lock_guard locker{m_cache_lock};
m_object_cacher->writex(wr, m_object_set, on_dispatched, &trace);
return true;
}
template <typename I>
bool ObjectCacherObjectDispatch<I>::write_same(
uint64_t object_no, uint64_t object_off, uint64_t object_len,
io::LightweightBufferExtents&& buffer_extents, ceph::bufferlist&& data,
IOContext io_context, int op_flags,
const ZTracer::Trace &parent_trace, int* object_dispatch_flags,
uint64_t* journal_tid, io::DispatchResult* dispatch_result,
Context** on_finish, Context* on_dispatched) {
auto cct = m_image_ctx->cct;
ldout(cct, 20) << "object_no=" << object_no << " " << object_off << "~"
<< object_len << dendl;
// ObjectCacher doesn't support write-same so convert to regular write
io::LightweightObjectExtent extent(object_no, object_off, object_len, 0);
extent.buffer_extents = std::move(buffer_extents);
bufferlist ws_data;
io::util::assemble_write_same_extent(extent, data, &ws_data, true);
return write(object_no, object_off, std::move(ws_data), io_context, op_flags,
0, std::nullopt, parent_trace, object_dispatch_flags,
journal_tid, dispatch_result, on_finish, on_dispatched);
}
template <typename I>
bool ObjectCacherObjectDispatch<I>::compare_and_write(
uint64_t object_no, uint64_t object_off, ceph::bufferlist&& cmp_data,
ceph::bufferlist&& write_data, IOContext io_context, int op_flags,
const ZTracer::Trace &parent_trace, uint64_t* mismatch_offset,
int* object_dispatch_flags, uint64_t* journal_tid,
io::DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched) {
auto cct = m_image_ctx->cct;
ldout(cct, 20) << "object_no=" << object_no << " " << object_off << "~"
<< cmp_data.length() << dendl;
// pass-through the compare-and-write request since it's not a supported
// operation of the ObjectCacher
ObjectExtents object_extents;
object_extents.emplace_back(data_object_name(m_image_ctx, object_no),
object_no, object_off, cmp_data.length(), 0);
// if compare succeeds, discard the cache state after changes are
// committed to disk
auto ctx = *on_finish;
*on_finish = new LambdaContext(
[this, object_extents, ctx](int r) {
// ObjectCacher doesn't provide a way to reliably invalidate
// extents: in case of a racing read (if the bh is in RX state),
// release_set() just returns while discard_set() populates the
// extent with zeroes. Neither is OK but the latter is better
// because it is at least deterministic...
if (r == 0) {
m_cache_lock.lock();
m_object_cacher->discard_set(m_object_set, object_extents);
m_cache_lock.unlock();
}
ctx->complete(r);
});
// ensure we aren't holding the cache lock post-flush
on_dispatched = util::create_async_context_callback(*m_image_ctx,
on_dispatched);
// flush any pending writes from the cache before compare
ZTracer::Trace trace(parent_trace);
*dispatch_result = io::DISPATCH_RESULT_CONTINUE;
std::lock_guard cache_locker{m_cache_lock};
m_object_cacher->flush_set(m_object_set, object_extents, &trace,
on_dispatched);
return true;
}
template <typename I>
bool ObjectCacherObjectDispatch<I>::flush(
io::FlushSource flush_source, const ZTracer::Trace &parent_trace,
uint64_t* journal_tid, io::DispatchResult* dispatch_result,
Context** on_finish, Context* on_dispatched) {
auto cct = m_image_ctx->cct;
ldout(cct, 20) << dendl;
// ensure we aren't holding the cache lock post-flush
on_dispatched = util::create_async_context_callback(*m_image_ctx,
on_dispatched);
std::lock_guard locker{m_cache_lock};
if (flush_source == io::FLUSH_SOURCE_USER && !m_user_flushed) {
m_user_flushed = true;
if (m_writethrough_until_flush && m_max_dirty > 0) {
m_object_cacher->set_max_dirty(m_max_dirty);
ldout(cct, 5) << "saw first user flush, enabling writeback" << dendl;
}
}
*dispatch_result = io::DISPATCH_RESULT_CONTINUE;
m_object_cacher->flush_set(m_object_set, on_dispatched);
return true;
}
template <typename I>
bool ObjectCacherObjectDispatch<I>::invalidate_cache(Context* on_finish) {
auto cct = m_image_ctx->cct;
ldout(cct, 5) << dendl;
// ensure we aren't holding the cache lock post-flush
on_finish = util::create_async_context_callback(*m_image_ctx, on_finish);
// invalidate any remaining cache entries
on_finish = new C_InvalidateCache(this, false, on_finish);
std::lock_guard locker{m_cache_lock};
m_object_cacher->release_set(m_object_set);
m_object_cacher->flush_set(m_object_set, on_finish);
return true;
}
template <typename I>
bool ObjectCacherObjectDispatch<I>::reset_existence_cache(
Context* on_finish) {
auto cct = m_image_ctx->cct;
ldout(cct, 5) << dendl;
std::lock_guard locker{m_cache_lock};
m_object_cacher->clear_nonexistence(m_object_set);
return false;
}
} // namespace cache
} // namespace librbd
template class librbd::cache::ObjectCacherObjectDispatch<librbd::ImageCtx>;
| 17,654 | 35.252567 | 83 |
cc
|
null |
ceph-main/src/librbd/cache/ObjectCacherObjectDispatch.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_CACHE_OBJECT_CACHER_OBJECT_DISPATCH_H
#define CEPH_LIBRBD_CACHE_OBJECT_CACHER_OBJECT_DISPATCH_H
#include "librbd/io/ObjectDispatchInterface.h"
#include "common/ceph_mutex.h"
#include "osdc/ObjectCacher.h"
struct WritebackHandler;
namespace librbd {
class ImageCtx;
namespace cache {
/**
* Facade around the OSDC object cacher to make it align with
* the object dispatcher interface
*/
template <typename ImageCtxT = ImageCtx>
class ObjectCacherObjectDispatch : public io::ObjectDispatchInterface {
public:
static ObjectCacherObjectDispatch* create(ImageCtxT* image_ctx,
size_t max_dirty,
bool writethrough_until_flush) {
return new ObjectCacherObjectDispatch(image_ctx, max_dirty,
writethrough_until_flush);
}
ObjectCacherObjectDispatch(ImageCtxT* image_ctx, size_t max_dirty,
bool writethrough_until_flush);
~ObjectCacherObjectDispatch() override;
io::ObjectDispatchLayer get_dispatch_layer() const override {
return io::OBJECT_DISPATCH_LAYER_CACHE;
}
void init();
void shut_down(Context* on_finish) override;
bool read(
uint64_t object_no, io::ReadExtents* extents, IOContext io_context,
int op_flags, int read_flags, const ZTracer::Trace &parent_trace,
uint64_t* version, int* object_dispatch_flags,
io::DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched) override;
bool discard(
uint64_t object_no, uint64_t object_off, uint64_t object_len,
IOContext io_context, int discard_flags,
const ZTracer::Trace &parent_trace, int* object_dispatch_flags,
uint64_t* journal_tid, io::DispatchResult* dispatch_result,
Context** on_finish, Context* on_dispatched) override;
bool write(
uint64_t object_no, uint64_t object_off, ceph::bufferlist&& data,
IOContext io_context, int op_flags, int write_flags,
std::optional<uint64_t> assert_version,
const ZTracer::Trace &parent_trace, int* object_dispatch_flags,
uint64_t* journal_tid, io::DispatchResult* dispatch_result,
Context** on_finish, Context* on_dispatched) override;
bool write_same(
uint64_t object_no, uint64_t object_off, uint64_t object_len,
io::LightweightBufferExtents&& buffer_extents, ceph::bufferlist&& data,
IOContext io_context, int op_flags,
const ZTracer::Trace &parent_trace, int* object_dispatch_flags,
uint64_t* journal_tid, io::DispatchResult* dispatch_result,
Context** on_finish, Context* on_dispatched) override;
bool compare_and_write(
uint64_t object_no, uint64_t object_off, ceph::bufferlist&& cmp_data,
ceph::bufferlist&& write_data, IOContext io_context, int op_flags,
const ZTracer::Trace &parent_trace, uint64_t* mismatch_offset,
int* object_dispatch_flags, uint64_t* journal_tid,
io::DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched) override;
bool flush(
io::FlushSource flush_source, const ZTracer::Trace &parent_trace,
uint64_t* journal_tid, io::DispatchResult* dispatch_result,
Context** on_finish, Context* on_dispatched) override;
bool list_snaps(
uint64_t object_no, io::Extents&& extents, io::SnapIds&& snap_ids,
int list_snap_flags, const ZTracer::Trace &parent_trace,
io::SnapshotDelta* snapshot_delta, int* object_dispatch_flags,
io::DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched) override {
return false;
}
bool invalidate_cache(Context* on_finish) override;
bool reset_existence_cache(Context* on_finish) override;
void extent_overwritten(
uint64_t object_no, uint64_t object_off, uint64_t object_len,
uint64_t journal_tid, uint64_t new_journal_tid) {
}
int prepare_copyup(
uint64_t object_no,
io::SnapshotSparseBufferlist* snapshot_sparse_bufferlist) override {
return 0;
}
private:
struct C_InvalidateCache;
ImageCtxT* m_image_ctx;
size_t m_max_dirty;
bool m_writethrough_until_flush;
ceph::mutex m_cache_lock;
ObjectCacher *m_object_cacher = nullptr;
ObjectCacher::ObjectSet *m_object_set = nullptr;
WritebackHandler *m_writeback_handler = nullptr;
bool m_user_flushed = false;
};
} // namespace cache
} // namespace librbd
extern template class librbd::cache::ObjectCacherObjectDispatch<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_CACHE_OBJECT_CACHER_OBJECT_DISPATCH_H
| 4,671 | 34.12782 | 82 |
h
|
null |
ceph-main/src/librbd/cache/ObjectCacherWriteback.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include <errno.h>
#include "librbd/cache/ObjectCacherWriteback.h"
#include "common/ceph_context.h"
#include "common/dout.h"
#include "common/ceph_mutex.h"
#include "osdc/Striper.h"
#include "include/Context.h"
#include "include/neorados/RADOS.hpp"
#include "include/rados/librados.hpp"
#include "include/rbd/librbd.hpp"
#include "librbd/ExclusiveLock.h"
#include "librbd/ImageCtx.h"
#include "librbd/internal.h"
#include "librbd/ObjectMap.h"
#include "librbd/Journal.h"
#include "librbd/Utils.h"
#include "librbd/asio/ContextWQ.h"
#include "librbd/io/AioCompletion.h"
#include "librbd/io/ObjectDispatchSpec.h"
#include "librbd/io/ObjectDispatcherInterface.h"
#include "librbd/io/ReadResult.h"
#include "librbd/io/Utils.h"
#include "include/ceph_assert.h"
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::cache::ObjectCacherWriteback: "
using namespace std;
namespace librbd {
namespace cache {
/**
* context to wrap another context in a Mutex
*
* @param cct cct
* @param c context to finish
* @param l mutex to lock
*/
class C_ReadRequest : public Context {
public:
C_ReadRequest(CephContext *cct, Context *c, ceph::mutex *cache_lock)
: m_cct(cct), m_ctx(c), m_cache_lock(cache_lock) {
}
void finish(int r) override {
ldout(m_cct, 20) << "aio_cb completing " << dendl;
{
std::lock_guard cache_locker{*m_cache_lock};
m_ctx->complete(r);
}
ldout(m_cct, 20) << "aio_cb finished" << dendl;
}
private:
CephContext *m_cct;
Context *m_ctx;
ceph::mutex *m_cache_lock;
};
class C_OrderedWrite : public Context {
public:
C_OrderedWrite(CephContext *cct,
ObjectCacherWriteback::write_result_d *result,
const ZTracer::Trace &trace, ObjectCacherWriteback *wb)
: m_cct(cct), m_result(result), m_trace(trace), m_wb_handler(wb) {}
~C_OrderedWrite() override {}
void finish(int r) override {
ldout(m_cct, 20) << "C_OrderedWrite completing " << m_result << dendl;
{
std::lock_guard l{m_wb_handler->m_lock};
ceph_assert(!m_result->done);
m_result->done = true;
m_result->ret = r;
m_wb_handler->complete_writes(m_result->oid);
}
ldout(m_cct, 20) << "C_OrderedWrite finished " << m_result << dendl;
m_trace.event("finish");
}
private:
CephContext *m_cct;
ObjectCacherWriteback::write_result_d *m_result;
ZTracer::Trace m_trace;
ObjectCacherWriteback *m_wb_handler;
};
struct C_CommitIOEventExtent : public Context {
ImageCtx *image_ctx;
uint64_t journal_tid;
uint64_t offset;
uint64_t length;
C_CommitIOEventExtent(ImageCtx *image_ctx, uint64_t journal_tid,
uint64_t offset, uint64_t length)
: image_ctx(image_ctx), journal_tid(journal_tid), offset(offset),
length(length) {
}
void finish(int r) override {
// all IO operations are flushed prior to closing the journal
ceph_assert(image_ctx->journal != nullptr);
image_ctx->journal->commit_io_event_extent(journal_tid, offset, length, r);
}
};
ObjectCacherWriteback::ObjectCacherWriteback(ImageCtx *ictx, ceph::mutex& lock)
: m_tid(0), m_lock(lock), m_ictx(ictx) {
}
void ObjectCacherWriteback::read(const object_t& oid, uint64_t object_no,
const object_locator_t& oloc,
uint64_t off, uint64_t len, snapid_t snapid,
bufferlist *pbl, uint64_t trunc_size,
__u32 trunc_seq, int op_flags,
const ZTracer::Trace &parent_trace,
Context *onfinish)
{
ZTracer::Trace trace;
if (parent_trace.valid()) {
trace.init("", &m_ictx->trace_endpoint, &parent_trace);
trace.copy_name("cache read " + oid.name);
trace.event("start");
}
// on completion, take the mutex and then call onfinish.
onfinish = new C_ReadRequest(m_ictx->cct, onfinish, &m_lock);
// re-use standard object read state machine
auto aio_comp = io::AioCompletion::create_and_start(onfinish, m_ictx,
io::AIO_TYPE_READ);
aio_comp->read_result = io::ReadResult{pbl};
aio_comp->set_request_count(1);
auto req_comp = new io::ReadResult::C_ObjectReadRequest(
aio_comp, {{off, len, {{0, len}}}});
auto io_context = m_ictx->duplicate_data_io_context();
if (snapid != CEPH_NOSNAP) {
io_context->read_snap(snapid);
}
// extract the embedded RBD read flags from the op_flags
int read_flags = (op_flags & READ_FLAGS_MASK) >> READ_FLAGS_SHIFT;
op_flags &= ~READ_FLAGS_MASK;
auto req = io::ObjectDispatchSpec::create_read(
m_ictx, io::OBJECT_DISPATCH_LAYER_CACHE, object_no, &req_comp->extents,
io_context, op_flags, read_flags, trace, nullptr, req_comp);
req->send();
}
bool ObjectCacherWriteback::may_copy_on_write(const object_t& oid,
uint64_t read_off,
uint64_t read_len,
snapid_t snapid)
{
std::shared_lock image_locker(m_ictx->image_lock);
uint64_t raw_overlap = 0;
uint64_t object_overlap = 0;
m_ictx->get_parent_overlap(m_ictx->snap_id, &raw_overlap);
if (raw_overlap > 0) {
uint64_t object_no = oid_to_object_no(oid.name, m_ictx->object_prefix);
auto [parent_extents, area] = io::util::object_to_area_extents(
m_ictx, object_no, {{0, m_ictx->layout.object_size}});
object_overlap = m_ictx->prune_parent_extents(parent_extents, area,
raw_overlap, false);
}
bool may = object_overlap > 0;
ldout(m_ictx->cct, 10) << "may_copy_on_write " << oid << " " << read_off
<< "~" << read_len << " = " << may << dendl;
return may;
}
ceph_tid_t ObjectCacherWriteback::write(const object_t& oid,
const object_locator_t& oloc,
uint64_t off, uint64_t len,
const SnapContext& snapc,
const bufferlist &bl,
ceph::real_time mtime,
uint64_t trunc_size,
__u32 trunc_seq, ceph_tid_t journal_tid,
const ZTracer::Trace &parent_trace,
Context *oncommit)
{
ZTracer::Trace trace;
if (parent_trace.valid()) {
trace.init("", &m_ictx->trace_endpoint, &parent_trace);
trace.copy_name("writeback " + oid.name);
trace.event("start");
}
uint64_t object_no = oid_to_object_no(oid.name, m_ictx->object_prefix);
write_result_d *result = new write_result_d(oid.name, oncommit);
m_writes[oid.name].push(result);
ldout(m_ictx->cct, 20) << "write will wait for result " << result << dendl;
bufferlist bl_copy(bl);
Context *ctx = new C_OrderedWrite(m_ictx->cct, result, trace, this);
ctx = util::create_async_context_callback(*m_ictx, ctx);
auto io_context = m_ictx->duplicate_data_io_context();
if (!snapc.empty()) {
io_context->write_snap_context(
{{snapc.seq, {snapc.snaps.begin(), snapc.snaps.end()}}});
}
auto req = io::ObjectDispatchSpec::create_write(
m_ictx, io::OBJECT_DISPATCH_LAYER_CACHE, object_no, off, std::move(bl_copy),
io_context, 0, 0, std::nullopt, journal_tid, trace, ctx);
req->object_dispatch_flags = (
io::OBJECT_DISPATCH_FLAG_FLUSH |
io::OBJECT_DISPATCH_FLAG_WILL_RETRY_ON_ERROR);
req->send();
return ++m_tid;
}
void ObjectCacherWriteback::overwrite_extent(const object_t& oid, uint64_t off,
uint64_t len,
ceph_tid_t original_journal_tid,
ceph_tid_t new_journal_tid) {
ldout(m_ictx->cct, 20) << __func__ << ": " << oid << " "
<< off << "~" << len << " "
<< "journal_tid=" << original_journal_tid << ", "
<< "new_journal_tid=" << new_journal_tid << dendl;
uint64_t object_no = oid_to_object_no(oid.name, m_ictx->object_prefix);
// all IO operations are flushed prior to closing the journal
ceph_assert(original_journal_tid != 0 && m_ictx->journal != NULL);
auto [image_extents, _] = io::util::object_to_area_extents(m_ictx, object_no,
{{off, len}});
for (auto it = image_extents.begin(); it != image_extents.end(); ++it) {
if (new_journal_tid != 0) {
// ensure new journal event is safely committed to disk before
// committing old event
m_ictx->journal->flush_event(
new_journal_tid, new C_CommitIOEventExtent(m_ictx,
original_journal_tid,
it->first, it->second));
} else {
m_ictx->journal->commit_io_event_extent(original_journal_tid, it->first,
it->second, 0);
}
}
}
void ObjectCacherWriteback::complete_writes(const std::string& oid)
{
ceph_assert(ceph_mutex_is_locked(m_lock));
std::queue<write_result_d*>& results = m_writes[oid];
ldout(m_ictx->cct, 20) << "complete_writes() oid " << oid << dendl;
std::list<write_result_d*> finished;
while (!results.empty()) {
write_result_d *result = results.front();
if (!result->done)
break;
finished.push_back(result);
results.pop();
}
if (results.empty())
m_writes.erase(oid);
for (std::list<write_result_d*>::iterator it = finished.begin();
it != finished.end(); ++it) {
write_result_d *result = *it;
ldout(m_ictx->cct, 20) << "complete_writes() completing " << result
<< dendl;
result->oncommit->complete(result->ret);
delete result;
}
}
} // namespace cache
} // namespace librbd
| 10,156 | 34.267361 | 80 |
cc
|
null |
ceph-main/src/librbd/cache/ObjectCacherWriteback.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_CACHE_OBJECT_CACHER_WRITEBACK_H
#define CEPH_LIBRBD_CACHE_OBJECT_CACHER_WRITEBACK_H
#include "common/snap_types.h"
#include "osd/osd_types.h"
#include "osdc/WritebackHandler.h"
#include <queue>
class Context;
namespace librbd {
struct ImageCtx;
namespace cache {
class ObjectCacherWriteback : public WritebackHandler {
public:
static const int READ_FLAGS_MASK = 0xF000;
static const int READ_FLAGS_SHIFT = 24;
ObjectCacherWriteback(ImageCtx *ictx, ceph::mutex& lock);
// Note that oloc, trunc_size, and trunc_seq are ignored
void read(const object_t& oid, uint64_t object_no,
const object_locator_t& oloc, uint64_t off, uint64_t len,
snapid_t snapid, bufferlist *pbl, uint64_t trunc_size,
__u32 trunc_seq, int op_flags,
const ZTracer::Trace &parent_trace, Context *onfinish) override;
// Determine whether a read to this extent could be affected by a
// write-triggered copy-on-write
bool may_copy_on_write(const object_t& oid, uint64_t read_off,
uint64_t read_len, snapid_t snapid) override;
// Note that oloc, trunc_size, and trunc_seq are ignored
ceph_tid_t write(const object_t& oid, const object_locator_t& oloc,
uint64_t off, uint64_t len,
const SnapContext& snapc, const bufferlist &bl,
ceph::real_time mtime, uint64_t trunc_size,
__u32 trunc_seq, ceph_tid_t journal_tid,
const ZTracer::Trace &parent_trace,
Context *oncommit) override;
using WritebackHandler::write;
void overwrite_extent(const object_t& oid, uint64_t off,
uint64_t len, ceph_tid_t original_journal_tid,
ceph_tid_t new_journal_tid) override;
struct write_result_d {
bool done;
int ret;
std::string oid;
Context *oncommit;
write_result_d(const std::string& oid, Context *oncommit) :
done(false), ret(0), oid(oid), oncommit(oncommit) {}
private:
write_result_d(const write_result_d& rhs);
const write_result_d& operator=(const write_result_d& rhs);
};
private:
void complete_writes(const std::string& oid);
ceph_tid_t m_tid;
ceph::mutex& m_lock;
librbd::ImageCtx *m_ictx;
ceph::unordered_map<std::string, std::queue<write_result_d*> > m_writes;
friend class C_OrderedWrite;
};
} // namespace cache
} // namespace librbd
#endif // CEPH_LIBRBD_CACHE_OBJECT_CACHER_WRITEBACK_H
| 2,594 | 31.848101 | 76 |
h
|
null |
ceph-main/src/librbd/cache/ParentCacheObjectDispatch.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "common/errno.h"
#include "include/neorados/RADOS.hpp"
#include "librbd/ImageCtx.h"
#include "librbd/Utils.h"
#include "librbd/asio/ContextWQ.h"
#include "librbd/cache/ParentCacheObjectDispatch.h"
#include "librbd/io/ObjectDispatchSpec.h"
#include "librbd/io/ObjectDispatcherInterface.h"
#include "librbd/plugin/Api.h"
#include "osd/osd_types.h"
#include "osdc/WritebackHandler.h"
#include <vector>
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::cache::ParentCacheObjectDispatch: " \
<< this << " " << __func__ << ": "
using namespace std;
using namespace ceph::immutable_obj_cache;
using librbd::util::data_object_name;
namespace librbd {
namespace cache {
template <typename I>
ParentCacheObjectDispatch<I>::ParentCacheObjectDispatch(
I* image_ctx, plugin::Api<I>& plugin_api)
: m_image_ctx(image_ctx), m_plugin_api(plugin_api),
m_lock(ceph::make_mutex(
"librbd::cache::ParentCacheObjectDispatch::lock", true, false)) {
ceph_assert(m_image_ctx->data_ctx.is_valid());
auto controller_path = image_ctx->cct->_conf.template get_val<std::string>(
"immutable_object_cache_sock");
m_cache_client = new CacheClient(controller_path.c_str(), m_image_ctx->cct);
}
template <typename I>
ParentCacheObjectDispatch<I>::~ParentCacheObjectDispatch() {
delete m_cache_client;
m_cache_client = nullptr;
}
template <typename I>
void ParentCacheObjectDispatch<I>::init(Context* on_finish) {
auto cct = m_image_ctx->cct;
ldout(cct, 5) << dendl;
if (m_image_ctx->child == nullptr) {
ldout(cct, 5) << "non-parent image: skipping" << dendl;
if (on_finish != nullptr) {
on_finish->complete(-EINVAL);
}
return;
}
m_image_ctx->io_object_dispatcher->register_dispatch(this);
std::unique_lock locker{m_lock};
create_cache_session(on_finish, false);
}
template <typename I>
bool ParentCacheObjectDispatch<I>::read(
uint64_t object_no, io::ReadExtents* extents, IOContext io_context,
int op_flags, int read_flags, const ZTracer::Trace &parent_trace,
uint64_t* version, int* object_dispatch_flags,
io::DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched) {
auto cct = m_image_ctx->cct;
ldout(cct, 20) << "object_no=" << object_no << " " << *extents << dendl;
if (version != nullptr) {
// we currently don't cache read versions
return false;
}
string oid = data_object_name(m_image_ctx, object_no);
/* if RO daemon still don't startup, or RO daemon crash,
* or session occur any error, try to re-connect daemon.*/
std::unique_lock locker{m_lock};
if (!m_cache_client->is_session_work()) {
create_cache_session(nullptr, true);
ldout(cct, 5) << "Parent cache try to re-connect to RO daemon. "
<< "dispatch current request to lower object layer" << dendl;
return false;
}
CacheGenContextURef ctx = make_gen_lambda_context<ObjectCacheRequest*,
std::function<void(ObjectCacheRequest*)>>
([this, extents, dispatch_result, on_dispatched, object_no, io_context,
read_flags, &parent_trace]
(ObjectCacheRequest* ack) {
handle_read_cache(ack, object_no, extents, io_context, read_flags,
parent_trace, dispatch_result, on_dispatched);
});
m_cache_client->lookup_object(m_image_ctx->data_ctx.get_namespace(),
m_image_ctx->data_ctx.get_id(),
io_context->read_snap().value_or(CEPH_NOSNAP),
m_image_ctx->layout.object_size,
oid, std::move(ctx));
return true;
}
template <typename I>
void ParentCacheObjectDispatch<I>::handle_read_cache(
ObjectCacheRequest* ack, uint64_t object_no, io::ReadExtents* extents,
IOContext io_context, int read_flags, const ZTracer::Trace &parent_trace,
io::DispatchResult* dispatch_result, Context* on_dispatched) {
auto cct = m_image_ctx->cct;
ldout(cct, 20) << dendl;
if(ack->type != RBDSC_READ_REPLY) {
// go back to read rados
*dispatch_result = io::DISPATCH_RESULT_CONTINUE;
on_dispatched->complete(0);
return;
}
ceph_assert(ack->type == RBDSC_READ_REPLY);
std::string file_path = ((ObjectCacheReadReplyData*)ack)->cache_path;
if (file_path.empty()) {
if ((read_flags & io::READ_FLAG_DISABLE_READ_FROM_PARENT) != 0) {
on_dispatched->complete(-ENOENT);
return;
}
auto ctx = new LambdaContext(
[this, dispatch_result, on_dispatched](int r) {
if (r < 0 && r != -ENOENT) {
lderr(m_image_ctx->cct) << "failed to read parent: "
<< cpp_strerror(r) << dendl;
}
*dispatch_result = io::DISPATCH_RESULT_COMPLETE;
on_dispatched->complete(r);
});
m_plugin_api.read_parent(m_image_ctx, object_no, extents,
io_context->read_snap().value_or(CEPH_NOSNAP),
parent_trace, ctx);
return;
}
int read_len = 0;
for (auto& extent: *extents) {
// try to read from parent image cache
int r = read_object(file_path, &extent.bl, extent.offset, extent.length,
on_dispatched);
if (r < 0) {
// cache read error, fall back to read rados
for (auto& read_extent: *extents) {
// clear read bufferlists
if (&read_extent == &extent) {
break;
}
read_extent.bl.clear();
}
*dispatch_result = io::DISPATCH_RESULT_CONTINUE;
on_dispatched->complete(0);
return;
}
read_len += r;
}
*dispatch_result = io::DISPATCH_RESULT_COMPLETE;
on_dispatched->complete(read_len);
}
template <typename I>
int ParentCacheObjectDispatch<I>::handle_register_client(bool reg) {
auto cct = m_image_ctx->cct;
ldout(cct, 20) << dendl;
if (!reg) {
lderr(cct) << "Parent cache register fails." << dendl;
}
return 0;
}
template <typename I>
void ParentCacheObjectDispatch<I>::create_cache_session(Context* on_finish,
bool is_reconnect) {
ceph_assert(ceph_mutex_is_locked_by_me(m_lock));
if (m_connecting) {
return;
}
m_connecting = true;
auto cct = m_image_ctx->cct;
ldout(cct, 20) << dendl;
Context* register_ctx = new LambdaContext([this, cct, on_finish](int ret) {
if (ret < 0) {
lderr(cct) << "Parent cache fail to register client." << dendl;
}
handle_register_client(ret < 0 ? false : true);
ceph_assert(m_connecting);
m_connecting = false;
if (on_finish != nullptr) {
on_finish->complete(0);
}
});
Context* connect_ctx = new LambdaContext(
[this, cct, register_ctx](int ret) {
if (ret < 0) {
lderr(cct) << "Parent cache fail to connect RO daemon." << dendl;
register_ctx->complete(ret);
return;
}
ldout(cct, 20) << "Parent cache connected to RO daemon." << dendl;
m_cache_client->register_client(register_ctx);
});
if (m_cache_client != nullptr && is_reconnect) {
// CacheClient's destruction will cleanup all details on old session.
delete m_cache_client;
// create new CacheClient to connect RO daemon.
auto controller_path = cct->_conf.template get_val<std::string>(
"immutable_object_cache_sock");
m_cache_client = new CacheClient(controller_path.c_str(), m_image_ctx->cct);
}
m_cache_client->run();
m_cache_client->connect(connect_ctx);
}
template <typename I>
int ParentCacheObjectDispatch<I>::read_object(
std::string file_path, ceph::bufferlist* read_data, uint64_t offset,
uint64_t length, Context *on_finish) {
auto *cct = m_image_ctx->cct;
ldout(cct, 20) << "file path: " << file_path << dendl;
std::string error;
int ret = read_data->pread_file(file_path.c_str(), offset, length, &error);
if (ret < 0) {
ldout(cct, 5) << "read from file return error: " << error
<< "file path= " << file_path
<< dendl;
return ret;
}
return read_data->length();
}
} // namespace cache
} // namespace librbd
template class librbd::cache::ParentCacheObjectDispatch<librbd::ImageCtx>;
| 8,334 | 30.812977 | 80 |
cc
|
null |
ceph-main/src/librbd/cache/ParentCacheObjectDispatch.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_CACHE_PARENT_CACHER_OBJECT_DISPATCH_H
#define CEPH_LIBRBD_CACHE_PARENT_CACHER_OBJECT_DISPATCH_H
#include "librbd/io/ObjectDispatchInterface.h"
#include "common/ceph_mutex.h"
#include "librbd/cache/TypeTraits.h"
#include "tools/immutable_object_cache/CacheClient.h"
#include "tools/immutable_object_cache/Types.h"
namespace librbd {
class ImageCtx;
namespace plugin { template <typename> struct Api; }
namespace cache {
template <typename ImageCtxT = ImageCtx>
class ParentCacheObjectDispatch : public io::ObjectDispatchInterface {
// mock unit testing support
typedef cache::TypeTraits<ImageCtxT> TypeTraits;
typedef typename TypeTraits::CacheClient CacheClient;
public:
static ParentCacheObjectDispatch* create(ImageCtxT* image_ctx,
plugin::Api<ImageCtxT>& plugin_api) {
return new ParentCacheObjectDispatch(image_ctx, plugin_api);
}
ParentCacheObjectDispatch(ImageCtxT* image_ctx,
plugin::Api<ImageCtxT>& plugin_api);
~ParentCacheObjectDispatch() override;
io::ObjectDispatchLayer get_dispatch_layer() const override {
return io::OBJECT_DISPATCH_LAYER_PARENT_CACHE;
}
void init(Context* on_finish = nullptr);
void shut_down(Context* on_finish) {
m_image_ctx->op_work_queue->queue(on_finish, 0);
}
bool read(
uint64_t object_no, io::ReadExtents* extents, IOContext io_context,
int op_flags, int read_flags, const ZTracer::Trace &parent_trace,
uint64_t* version, int* object_dispatch_flags,
io::DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched) override;
bool discard(
uint64_t object_no, uint64_t object_off, uint64_t object_len,
IOContext io_context, int discard_flags,
const ZTracer::Trace &parent_trace, int* object_dispatch_flags,
uint64_t* journal_tid, io::DispatchResult* dispatch_result,
Context** on_finish, Context* on_dispatched) {
return false;
}
bool write(
uint64_t object_no, uint64_t object_off, ceph::bufferlist&& data,
IOContext io_context, int op_flags, int write_flags,
std::optional<uint64_t> assert_version,
const ZTracer::Trace &parent_trace, int* object_dispatch_flags,
uint64_t* journal_tid, io::DispatchResult* dispatch_result,
Context** on_finish, Context* on_dispatched) {
return false;
}
bool write_same(
uint64_t object_no, uint64_t object_off, uint64_t object_len,
io::LightweightBufferExtents&& buffer_extents, ceph::bufferlist&& data,
IOContext io_context, int op_flags,
const ZTracer::Trace &parent_trace, int* object_dispatch_flags,
uint64_t* journal_tid, io::DispatchResult* dispatch_result,
Context** on_finish, Context* on_dispatched) {
return false;
}
bool compare_and_write(
uint64_t object_no, uint64_t object_off, ceph::bufferlist&& cmp_data,
ceph::bufferlist&& write_data, IOContext io_context, int op_flags,
const ZTracer::Trace &parent_trace, uint64_t* mismatch_offset,
int* object_dispatch_flags, uint64_t* journal_tid,
io::DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched) {
return false;
}
bool flush(
io::FlushSource flush_source, const ZTracer::Trace &parent_trace,
uint64_t* journal_id, io::DispatchResult* dispatch_result,
Context** on_finish, Context* on_dispatched) {
return false;
}
bool list_snaps(
uint64_t object_no, io::Extents&& extents, io::SnapIds&& snap_ids,
int list_snap_flags, const ZTracer::Trace &parent_trace,
io::SnapshotDelta* snapshot_delta, int* object_dispatch_flags,
io::DispatchResult* dispatch_result, Context** on_finish,
Context* on_dispatched) override {
return false;
}
bool invalidate_cache(Context* on_finish) {
return false;
}
bool reset_existence_cache(Context* on_finish) {
return false;
}
void extent_overwritten(
uint64_t object_no, uint64_t object_off, uint64_t object_len,
uint64_t journal_tid, uint64_t new_journal_tid) {
}
int prepare_copyup(
uint64_t object_no,
io::SnapshotSparseBufferlist* snapshot_sparse_bufferlist) override {
return 0;
}
ImageCtxT* get_image_ctx() {
return m_image_ctx;
}
CacheClient* get_cache_client() {
return m_cache_client;
}
private:
int read_object(std::string file_path, ceph::bufferlist* read_data,
uint64_t offset, uint64_t length, Context *on_finish);
void handle_read_cache(ceph::immutable_obj_cache::ObjectCacheRequest* ack,
uint64_t object_no, io::ReadExtents* extents,
IOContext io_context, int read_flags,
const ZTracer::Trace &parent_trace,
io::DispatchResult* dispatch_result,
Context* on_dispatched);
int handle_register_client(bool reg);
void create_cache_session(Context* on_finish, bool is_reconnect);
ImageCtxT* m_image_ctx;
plugin::Api<ImageCtxT>& m_plugin_api;
ceph::mutex m_lock;
CacheClient *m_cache_client = nullptr;
bool m_connecting = false;
};
} // namespace cache
} // namespace librbd
extern template class librbd::cache::ParentCacheObjectDispatch<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_CACHE_PARENT_CACHER_OBJECT_DISPATCH_H
| 5,482 | 32.845679 | 81 |
h
|
null |
ceph-main/src/librbd/cache/TypeTraits.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_CACHE_TYPE_TRAITS_H
#define CEPH_LIBRBD_CACHE_TYPE_TRAITS_H
namespace ceph {
namespace immutable_obj_cache {
class CacheClient;
} // namespace immutable_obj_cache
} // namespace ceph
namespace librbd {
namespace cache {
template <typename ImageCtxT>
struct TypeTraits {
typedef ceph::immutable_obj_cache::CacheClient CacheClient;
};
} // namespace librbd
} // namespace cache
#endif
| 515 | 18.111111 | 70 |
h
|
null |
ceph-main/src/librbd/cache/Types.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_CACHE_TYPES_H
#define CEPH_LIBRBD_CACHE_TYPES_H
#include <list>
#include <string>
class Context;
namespace librbd {
namespace cache {
enum ImageCacheType {
IMAGE_CACHE_TYPE_RWL = 1,
IMAGE_CACHE_TYPE_SSD,
IMAGE_CACHE_TYPE_UNKNOWN
};
typedef std::list<Context *> Contexts;
const std::string PERSISTENT_CACHE_STATE = ".rbd_persistent_cache_state";
} // namespace cache
} // namespace librbd
#endif // CEPH_LIBRBD_CACHE_TYPES_H
| 557 | 18.241379 | 73 |
h
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.