Search is not available for this dataset
repo
stringlengths 2
152
⌀ | file
stringlengths 15
239
| code
stringlengths 0
58.4M
| file_length
int64 0
58.4M
| avg_line_length
float64 0
1.81M
| max_line_length
int64 0
12.7M
| extension_type
stringclasses 364
values |
---|---|---|---|---|---|---|
null |
ceph-main/src/rgw/driver/rados/rgw_gc.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab ft=cpp
#pragma once
#include "include/types.h"
#include "include/rados/librados.hpp"
#include "common/ceph_mutex.h"
#include "common/Cond.h"
#include "common/Thread.h"
#include "rgw_common.h"
#include "rgw_sal.h"
#include "rgw_rados.h"
#include "cls/rgw/cls_rgw_types.h"
#include <atomic>
class RGWGCIOManager;
class RGWGC : public DoutPrefixProvider {
CephContext *cct;
RGWRados *store;
int max_objs;
std::string *obj_names;
std::atomic<bool> down_flag = { false };
static constexpr uint64_t seed = 8675309;
int tag_index(const std::string& tag);
int send_chain(const cls_rgw_obj_chain& chain, const std::string& tag, optional_yield y);
class GCWorker : public Thread {
const DoutPrefixProvider *dpp;
CephContext *cct;
RGWGC *gc;
ceph::mutex lock = ceph::make_mutex("GCWorker");
ceph::condition_variable cond;
public:
GCWorker(const DoutPrefixProvider *_dpp, CephContext *_cct, RGWGC *_gc) : dpp(_dpp), cct(_cct), gc(_gc) {}
void *entry() override;
void stop();
};
GCWorker *worker;
public:
RGWGC() : cct(NULL), store(NULL), max_objs(0), obj_names(NULL), worker(NULL) {}
~RGWGC() {
stop_processor();
finalize();
}
std::vector<bool> transitioned_objects_cache;
std::tuple<int, std::optional<cls_rgw_obj_chain>> send_split_chain(const cls_rgw_obj_chain& chain, const std::string& tag, optional_yield y);
// asynchronously defer garbage collection on an object that's still being read
int async_defer_chain(const std::string& tag, const cls_rgw_obj_chain& info);
// callback for when async_defer_chain() fails with ECANCELED
void on_defer_canceled(const cls_rgw_gc_obj_info& info);
int remove(int index, const std::vector<std::string>& tags, librados::AioCompletion **pc, optional_yield y);
int remove(int index, int num_entries, optional_yield y);
void initialize(CephContext *_cct, RGWRados *_store, optional_yield y);
void finalize();
int list(int *index, std::string& marker, uint32_t max, bool expired_only, std::list<cls_rgw_gc_obj_info>& result, bool *truncated, bool& processing_queue);
void list_init(int *index) { *index = 0; }
int process(int index, int process_max_secs, bool expired_only,
RGWGCIOManager& io_manager, optional_yield y);
int process(bool expired_only, optional_yield y);
bool going_down();
void start_processor();
void stop_processor();
CephContext *get_cct() const override { return store->ctx(); }
unsigned get_subsys() const;
std::ostream& gen_prefix(std::ostream& out) const;
};
| 2,650 | 30.939759 | 158 |
h
|
null |
ceph-main/src/rgw/driver/rados/rgw_gc_log.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab ft=cpp
#include "rgw_gc_log.h"
#include "cls/rgw/cls_rgw_client.h"
#include "cls/rgw_gc/cls_rgw_gc_client.h"
#include "cls/version/cls_version_client.h"
void gc_log_init2(librados::ObjectWriteOperation& op,
uint64_t max_size, uint64_t max_deferred)
{
obj_version objv; // objv.ver = 0
cls_version_check(op, objv, VER_COND_EQ);
cls_rgw_gc_queue_init(op, max_size, max_deferred);
objv.ver = 1;
cls_version_set(op, objv);
}
void gc_log_enqueue1(librados::ObjectWriteOperation& op,
uint32_t expiration, cls_rgw_gc_obj_info& info)
{
obj_version objv; // objv.ver = 0
cls_version_check(op, objv, VER_COND_EQ);
cls_rgw_gc_set_entry(op, expiration, info);
}
void gc_log_enqueue2(librados::ObjectWriteOperation& op,
uint32_t expiration, const cls_rgw_gc_obj_info& info)
{
obj_version objv;
objv.ver = 1;
cls_version_check(op, objv, VER_COND_EQ);
cls_rgw_gc_queue_enqueue(op, expiration, info);
}
void gc_log_defer1(librados::ObjectWriteOperation& op,
uint32_t expiration, const cls_rgw_gc_obj_info& info)
{
obj_version objv; // objv.ver = 0
cls_version_check(op, objv, VER_COND_EQ);
cls_rgw_gc_defer_entry(op, expiration, info.tag);
}
void gc_log_defer2(librados::ObjectWriteOperation& op,
uint32_t expiration, const cls_rgw_gc_obj_info& info)
{
obj_version objv;
objv.ver = 1;
cls_version_check(op, objv, VER_COND_EQ);
cls_rgw_gc_queue_defer_entry(op, expiration, info);
// TODO: conditional on whether omap is known to be empty
cls_rgw_gc_remove(op, {info.tag});
}
| 1,704 | 29.446429 | 74 |
cc
|
null |
ceph-main/src/rgw/driver/rados/rgw_lc_tier.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab ft=cpp
#include <string.h>
#include <iostream>
#include <map>
#include "common/Formatter.h"
#include <common/errno.h>
#include "rgw_lc.h"
#include "rgw_lc_tier.h"
#include "rgw_string.h"
#include "rgw_zone.h"
#include "rgw_common.h"
#include "rgw_rest.h"
#include "svc_zone.h"
#include <boost/algorithm/string/split.hpp>
#include <boost/algorithm/string.hpp>
#include <boost/algorithm/string/predicate.hpp>
#define dout_context g_ceph_context
#define dout_subsys ceph_subsys_rgw
using namespace std;
struct rgw_lc_multipart_part_info {
int part_num{0};
uint64_t ofs{0};
uint64_t size{0};
std::string etag;
};
struct rgw_lc_obj_properties {
ceph::real_time mtime;
std::string etag;
uint64_t versioned_epoch{0};
std::map<std::string, RGWTierACLMapping>& target_acl_mappings;
std::string target_storage_class;
rgw_lc_obj_properties(ceph::real_time _mtime, std::string _etag,
uint64_t _versioned_epoch, std::map<std::string,
RGWTierACLMapping>& _t_acl_mappings,
std::string _t_storage_class) :
mtime(_mtime), etag(_etag),
versioned_epoch(_versioned_epoch),
target_acl_mappings(_t_acl_mappings),
target_storage_class(_t_storage_class) {}
};
struct rgw_lc_multipart_upload_info {
std::string upload_id;
uint64_t obj_size;
ceph::real_time mtime;
std::string etag;
void encode(bufferlist& bl) const {
ENCODE_START(1, 1, bl);
encode(upload_id, bl);
encode(obj_size, bl);
encode(mtime, bl);
encode(etag, bl);
ENCODE_FINISH(bl);
}
void decode(bufferlist::const_iterator& bl) {
DECODE_START(1, bl);
decode(upload_id, bl);
decode(obj_size, bl);
decode(mtime, bl);
decode(etag, bl);
DECODE_FINISH(bl);
}
};
WRITE_CLASS_ENCODER(rgw_lc_multipart_upload_info)
static inline string get_key_instance(const rgw_obj_key& key)
{
if (!key.instance.empty() &&
!key.have_null_instance()) {
return "-" + key.instance;
}
return "";
}
static inline string get_key_oid(const rgw_obj_key& key)
{
string oid = key.name;
if (!key.instance.empty() &&
!key.have_null_instance()) {
oid += string("-") + key.instance;
}
return oid;
}
static inline string obj_to_aws_path(const rgw_obj& obj)
{
string path = obj.bucket.name + "/" + get_key_oid(obj.key);
return path;
}
static int read_upload_status(const DoutPrefixProvider *dpp, rgw::sal::Driver *driver,
const rgw_raw_obj *status_obj, rgw_lc_multipart_upload_info *status)
{
int ret = 0;
rgw::sal::RadosStore *rados = dynamic_cast<rgw::sal::RadosStore*>(driver);
if (!rados) {
ldpp_dout(dpp, 0) << "ERROR: Not a RadosStore. Cannot be transitioned to cloud." << dendl;
return -1;
}
auto& pool = status_obj->pool;
const auto oid = status_obj->oid;
auto sysobj = rados->svc()->sysobj;
bufferlist bl;
ret = rgw_get_system_obj(sysobj, pool, oid, bl, nullptr, nullptr,
null_yield, dpp);
if (ret < 0) {
return ret;
}
if (bl.length() > 0) {
try {
auto p = bl.cbegin();
status->decode(p);
} catch (buffer::error& e) {
ldpp_dout(dpp, 10) << "failed to decode status obj: "
<< e.what() << dendl;
return -EIO;
}
} else {
return -EIO;
}
return 0;
}
static int put_upload_status(const DoutPrefixProvider *dpp, rgw::sal::Driver *driver,
const rgw_raw_obj *status_obj, rgw_lc_multipart_upload_info *status)
{
int ret = 0;
rgw::sal::RadosStore *rados = dynamic_cast<rgw::sal::RadosStore*>(driver);
if (!rados) {
ldpp_dout(dpp, 0) << "ERROR: Not a RadosStore. Cannot be transitioned to cloud." << dendl;
return -1;
}
auto& pool = status_obj->pool;
const auto oid = status_obj->oid;
auto sysobj = rados->svc()->sysobj;
bufferlist bl;
status->encode(bl);
ret = rgw_put_system_obj(dpp, sysobj, pool, oid, bl, true, nullptr,
real_time{}, null_yield);
return ret;
}
static int delete_upload_status(const DoutPrefixProvider *dpp, rgw::sal::Driver *driver,
const rgw_raw_obj *status_obj)
{
int ret = 0;
rgw::sal::RadosStore *rados = dynamic_cast<rgw::sal::RadosStore*>(driver);
if (!rados) {
ldpp_dout(dpp, 0) << "ERROR: Not a RadosStore. Cannot be transitioned to cloud." << dendl;
return -1;
}
auto& pool = status_obj->pool;
const auto oid = status_obj->oid;
auto sysobj = rados->svc()->sysobj;
ret = rgw_delete_system_obj(dpp, sysobj, pool, oid, nullptr, null_yield);
return ret;
}
static std::set<string> keep_headers = { "CONTENT_TYPE",
"CONTENT_ENCODING",
"CONTENT_DISPOSITION",
"CONTENT_LANGUAGE" };
/*
* mapping between rgw object attrs and output http fields
*
static const struct rgw_http_attr base_rgw_to_http_attrs[] = {
{ RGW_ATTR_CONTENT_LANG, "Content-Language" },
{ RGW_ATTR_EXPIRES, "Expires" },
{ RGW_ATTR_CACHE_CONTROL, "Cache-Control" },
{ RGW_ATTR_CONTENT_DISP, "Content-Disposition" },
{ RGW_ATTR_CONTENT_ENC, "Content-Encoding" },
{ RGW_ATTR_USER_MANIFEST, "X-Object-Manifest" },
{ RGW_ATTR_X_ROBOTS_TAG , "X-Robots-Tag" },
{ RGW_ATTR_STORAGE_CLASS , "X-Amz-Storage-Class" },
// RGW_ATTR_AMZ_WEBSITE_REDIRECT_LOCATION header depends on access mode:
// S3 endpoint: x-amz-website-redirect-location
// S3Website endpoint: Location
{ RGW_ATTR_AMZ_WEBSITE_REDIRECT_LOCATION, "x-amz-website-redirect-location" },
}; */
static void init_headers(map<string, bufferlist>& attrs,
map<string, string>& headers)
{
for (auto& kv : attrs) {
const char * name = kv.first.c_str();
const auto aiter = rgw_to_http_attrs.find(name);
if (aiter != std::end(rgw_to_http_attrs)) {
headers[aiter->second] = rgw_bl_str(kv.second);
} else if (strncmp(name, RGW_ATTR_META_PREFIX,
sizeof(RGW_ATTR_META_PREFIX)-1) == 0) {
name += sizeof(RGW_ATTR_META_PREFIX) - 1;
string sname(name);
string name_prefix = RGW_ATTR_META_PREFIX;
char full_name_buf[name_prefix.size() + sname.size() + 1];
snprintf(full_name_buf, sizeof(full_name_buf), "%.*s%.*s",
static_cast<int>(name_prefix.length()),
name_prefix.data(),
static_cast<int>(sname.length()),
sname.data());
headers[full_name_buf] = rgw_bl_str(kv.second);
} else if (strcmp(name,RGW_ATTR_CONTENT_TYPE) == 0) {
headers["CONTENT_TYPE"] = rgw_bl_str(kv.second);
}
}
}
/* Read object or just head from remote endpoint. For now initializes only headers,
* but can be extended to fetch etag, mtime etc if needed.
*/
static int cloud_tier_get_object(RGWLCCloudTierCtx& tier_ctx, bool head,
std::map<std::string, std::string>& headers) {
RGWRESTConn::get_obj_params req_params;
std::string target_obj_name;
int ret = 0;
rgw_lc_obj_properties obj_properties(tier_ctx.o.meta.mtime, tier_ctx.o.meta.etag,
tier_ctx.o.versioned_epoch, tier_ctx.acl_mappings,
tier_ctx.target_storage_class);
std::string etag;
RGWRESTStreamRWRequest *in_req;
rgw_bucket dest_bucket;
dest_bucket.name = tier_ctx.target_bucket_name;
target_obj_name = tier_ctx.bucket_info.bucket.name + "/" +
tier_ctx.obj->get_name();
if (!tier_ctx.o.is_current()) {
target_obj_name += get_key_instance(tier_ctx.obj->get_key());
}
rgw_obj dest_obj(dest_bucket, rgw_obj_key(target_obj_name));
/* init input connection */
req_params.get_op = !head;
req_params.prepend_metadata = true;
req_params.rgwx_stat = true;
req_params.sync_manifest = true;
req_params.skip_decrypt = true;
ret = tier_ctx.conn.get_obj(tier_ctx.dpp, dest_obj, req_params, true /* send */, &in_req);
if (ret < 0) {
ldpp_dout(tier_ctx.dpp, 0) << "ERROR: " << __func__ << "(): conn.get_obj() returned ret=" << ret << dendl;
return ret;
}
/* fetch headers */
ret = tier_ctx.conn.complete_request(in_req, nullptr, nullptr, nullptr, nullptr, &headers, null_yield);
if (ret < 0 && ret != -ENOENT) {
ldpp_dout(tier_ctx.dpp, 20) << "ERROR: " << __func__ << "(): conn.complete_request() returned ret=" << ret << dendl;
return ret;
}
return 0;
}
static bool is_already_tiered(const DoutPrefixProvider *dpp,
std::map<std::string, std::string>& headers,
ceph::real_time& mtime) {
char buf[32];
map<string, string> attrs = headers;
for (const auto& a : attrs) {
ldpp_dout(dpp, 20) << "GetCrf attr[" << a.first << "] = " << a.second <<dendl;
}
utime_t ut(mtime);
snprintf(buf, sizeof(buf), "%lld.%09lld",
(long long)ut.sec(),
(long long)ut.nsec());
string s = attrs["X_AMZ_META_RGWX_SOURCE_MTIME"];
if (s.empty())
s = attrs["x_amz_meta_rgwx_source_mtime"];
ldpp_dout(dpp, 20) << "is_already_tiered attrs[X_AMZ_META_RGWX_SOURCE_MTIME] = " << s <<dendl;
ldpp_dout(dpp, 20) << "is_already_tiered mtime buf = " << buf <<dendl;
if (!s.empty() && !strcmp(s.c_str(), buf)){
return 1;
}
return 0;
}
/* Read object locally & also initialize dest rest obj based on read attrs */
class RGWLCStreamRead
{
CephContext *cct;
const DoutPrefixProvider *dpp;
std::map<std::string, bufferlist> attrs;
uint64_t obj_size;
rgw::sal::Object *obj;
const real_time &mtime;
bool multipart{false};
uint64_t m_part_size{0};
off_t m_part_off{0};
off_t m_part_end{0};
std::unique_ptr<rgw::sal::Object::ReadOp> read_op;
off_t ofs{0};
off_t end{0};
rgw_rest_obj rest_obj;
int retcode{0};
public:
RGWLCStreamRead(CephContext *_cct, const DoutPrefixProvider *_dpp,
rgw::sal::Object *_obj, const real_time &_mtime) :
cct(_cct), dpp(_dpp), obj(_obj), mtime(_mtime),
read_op(obj->get_read_op()) {}
~RGWLCStreamRead() {};
int set_range(off_t _ofs, off_t _end);
int get_range(off_t &_ofs, off_t &_end);
rgw_rest_obj& get_rest_obj();
void set_multipart(uint64_t part_size, off_t part_off, off_t part_end);
int init();
int init_rest_obj();
int read(off_t ofs, off_t end, RGWGetDataCB *out_cb);
};
/* Send PUT op to remote endpoint */
class RGWLCCloudStreamPut
{
const DoutPrefixProvider *dpp;
rgw_lc_obj_properties obj_properties;
RGWRESTConn& conn;
const rgw_obj& dest_obj;
std::string etag;
RGWRESTStreamS3PutObj *out_req{nullptr};
struct multipart_info {
bool is_multipart{false};
std::string upload_id;
int part_num{0};
uint64_t part_size;
} multipart;
int retcode;
public:
RGWLCCloudStreamPut(const DoutPrefixProvider *_dpp,
const rgw_lc_obj_properties& _obj_properties,
RGWRESTConn& _conn,
const rgw_obj& _dest_obj) :
dpp(_dpp), obj_properties(_obj_properties), conn(_conn), dest_obj(_dest_obj) {
}
int init();
static bool keep_attr(const std::string& h);
static void init_send_attrs(const DoutPrefixProvider *dpp, const rgw_rest_obj& rest_obj,
const rgw_lc_obj_properties& obj_properties,
std::map<std::string, std::string>& attrs);
void send_ready(const DoutPrefixProvider *dpp, const rgw_rest_obj& rest_obj);
void handle_headers(const std::map<std::string, std::string>& headers);
bool get_etag(std::string *petag);
void set_multipart(const std::string& upload_id, int part_num, uint64_t part_size);
int send();
RGWGetDataCB *get_cb();
int complete_request();
};
int RGWLCStreamRead::set_range(off_t _ofs, off_t _end) {
ofs = _ofs;
end = _end;
return 0;
}
int RGWLCStreamRead::get_range(off_t &_ofs, off_t &_end) {
_ofs = ofs;
_end = end;
return 0;
}
rgw_rest_obj& RGWLCStreamRead::get_rest_obj() {
return rest_obj;
}
void RGWLCStreamRead::set_multipart(uint64_t part_size, off_t part_off, off_t part_end) {
multipart = true;
m_part_size = part_size;
m_part_off = part_off;
m_part_end = part_end;
}
int RGWLCStreamRead::init() {
optional_yield y = null_yield;
real_time read_mtime;
read_op->params.lastmod = &read_mtime;
int ret = read_op->prepare(y, dpp);
if (ret < 0) {
ldpp_dout(dpp, 0) << "ERROR: fail to prepare read_op, ret = " << ret << dendl;
return ret;
}
if (read_mtime != mtime) {
/* raced */
return -ECANCELED;
}
attrs = obj->get_attrs();
obj_size = obj->get_obj_size();
ret = init_rest_obj();
if (ret < 0) {
ldpp_dout(dpp, 0) << "ERROR: fail to initialize rest_obj, ret = " << ret << dendl;
return ret;
}
if (!multipart) {
set_range(0, obj_size - 1);
} else {
set_range(m_part_off, m_part_end);
}
return 0;
}
int RGWLCStreamRead::init_rest_obj() {
/* Initialize rgw_rest_obj.
* Reference: do_decode_rest_obj
* Check how to copy headers content */
rest_obj.init(obj->get_key());
if (!multipart) {
rest_obj.content_len = obj_size;
} else {
rest_obj.content_len = m_part_size;
}
/* For mulitpart attrs are sent as part of InitMultipartCR itself */
if (multipart) {
return 0;
}
/*
* XXX: verify if its right way to copy attrs into rest obj
*/
init_headers(attrs, rest_obj.attrs);
rest_obj.acls.set_ctx(cct);
const auto aiter = attrs.find(RGW_ATTR_ACL);
if (aiter != attrs.end()) {
bufferlist& bl = aiter->second;
auto bliter = bl.cbegin();
try {
rest_obj.acls.decode(bliter);
} catch (buffer::error& err) {
ldpp_dout(dpp, 0) << "ERROR: failed to decode policy off attrs" << dendl;
return -EIO;
}
} else {
ldpp_dout(dpp, 0) << "WARNING: acl attrs not provided" << dendl;
}
return 0;
}
int RGWLCStreamRead::read(off_t ofs, off_t end, RGWGetDataCB *out_cb) {
int ret = read_op->iterate(dpp, ofs, end, out_cb, null_yield);
return ret;
}
int RGWLCCloudStreamPut::init() {
/* init output connection */
if (multipart.is_multipart) {
char buf[32];
snprintf(buf, sizeof(buf), "%d", multipart.part_num);
rgw_http_param_pair params[] = { { "uploadId", multipart.upload_id.c_str() },
{ "partNumber", buf },
{ nullptr, nullptr } };
conn.put_obj_send_init(dest_obj, params, &out_req);
} else {
conn.put_obj_send_init(dest_obj, nullptr, &out_req);
}
return 0;
}
bool RGWLCCloudStreamPut::keep_attr(const string& h) {
return (keep_headers.find(h) != keep_headers.end());
}
void RGWLCCloudStreamPut::init_send_attrs(const DoutPrefixProvider *dpp,
const rgw_rest_obj& rest_obj,
const rgw_lc_obj_properties& obj_properties,
std::map<string, string>& attrs) {
map<string, RGWTierACLMapping>& acl_mappings(obj_properties.target_acl_mappings);
const std::string& target_storage_class = obj_properties.target_storage_class;
attrs.clear();
for (auto& hi : rest_obj.attrs) {
if (keep_attr(hi.first)) {
attrs.insert(hi);
} else {
std::string s1 = boost::algorithm::to_lower_copy(hi.first);
const char* k = std::strstr(s1.c_str(), "x-amz");
if (k) {
attrs[k] = hi.second;
}
}
}
const auto acl = rest_obj.acls.get_acl();
map<int, vector<string> > access_map;
if (!acl_mappings.empty()) {
for (auto& grant : acl.get_grant_map()) {
auto& orig_grantee = grant.first;
auto& perm = grant.second;
string grantee;
const auto& am = acl_mappings;
const auto iter = am.find(orig_grantee);
if (iter == am.end()) {
ldpp_dout(dpp, 20) << "acl_mappings: Could not find " << orig_grantee << " .. ignoring" << dendl;
continue;
}
grantee = iter->second.dest_id;
string type;
switch (iter->second.type) {
case ACL_TYPE_CANON_USER:
type = "id";
break;
case ACL_TYPE_EMAIL_USER:
type = "emailAddress";
break;
case ACL_TYPE_GROUP:
type = "uri";
break;
default:
continue;
}
string tv = type + "=" + grantee;
int flags = perm.get_permission().get_permissions();
if ((flags & RGW_PERM_FULL_CONTROL) == RGW_PERM_FULL_CONTROL) {
access_map[flags].push_back(tv);
continue;
}
for (int i = 1; i <= RGW_PERM_WRITE_ACP; i <<= 1) {
if (flags & i) {
access_map[i].push_back(tv);
}
}
}
}
for (const auto& aiter : access_map) {
int grant_type = aiter.first;
string header_str("x-amz-grant-");
switch (grant_type) {
case RGW_PERM_READ:
header_str.append("read");
break;
case RGW_PERM_WRITE:
header_str.append("write");
break;
case RGW_PERM_READ_ACP:
header_str.append("read-acp");
break;
case RGW_PERM_WRITE_ACP:
header_str.append("write-acp");
break;
case RGW_PERM_FULL_CONTROL:
header_str.append("full-control");
break;
}
string s;
for (const auto& viter : aiter.second) {
if (!s.empty()) {
s.append(", ");
}
s.append(viter);
}
ldpp_dout(dpp, 20) << "acl_mappings: set acl: " << header_str << "=" << s << dendl;
attrs[header_str] = s;
}
/* Copy target storage class */
if (!target_storage_class.empty()) {
attrs["x-amz-storage-class"] = target_storage_class;
} else {
attrs["x-amz-storage-class"] = "STANDARD";
}
/* New attribute to specify its transitioned from RGW */
attrs["x-amz-meta-rgwx-source"] = "rgw";
attrs["x-rgw-cloud"] = "true";
attrs["x-rgw-cloud-keep-attrs"] = "true";
char buf[32];
snprintf(buf, sizeof(buf), "%llu", (long long)obj_properties.versioned_epoch);
attrs["x-amz-meta-rgwx-versioned-epoch"] = buf;
utime_t ut(obj_properties.mtime);
snprintf(buf, sizeof(buf), "%lld.%09lld",
(long long)ut.sec(),
(long long)ut.nsec());
attrs["x-amz-meta-rgwx-source-mtime"] = buf;
attrs["x-amz-meta-rgwx-source-etag"] = obj_properties.etag;
attrs["x-amz-meta-rgwx-source-key"] = rest_obj.key.name;
if (!rest_obj.key.instance.empty()) {
attrs["x-amz-meta-rgwx-source-version-id"] = rest_obj.key.instance;
}
for (const auto& a : attrs) {
ldpp_dout(dpp, 30) << "init_send_attrs attr[" << a.first << "] = " << a.second <<dendl;
}
}
void RGWLCCloudStreamPut::send_ready(const DoutPrefixProvider *dpp, const rgw_rest_obj& rest_obj) {
auto r = static_cast<RGWRESTStreamS3PutObj *>(out_req);
std::map<std::string, std::string> new_attrs;
if (!multipart.is_multipart) {
init_send_attrs(dpp, rest_obj, obj_properties, new_attrs);
}
r->set_send_length(rest_obj.content_len);
RGWAccessControlPolicy policy;
r->send_ready(dpp, conn.get_key(), new_attrs, policy);
}
void RGWLCCloudStreamPut::handle_headers(const map<string, string>& headers) {
for (const auto& h : headers) {
if (h.first == "ETAG") {
etag = h.second;
}
}
}
bool RGWLCCloudStreamPut::get_etag(string *petag) {
if (etag.empty()) {
return false;
}
*petag = etag;
return true;
}
void RGWLCCloudStreamPut::set_multipart(const string& upload_id, int part_num, uint64_t part_size) {
multipart.is_multipart = true;
multipart.upload_id = upload_id;
multipart.part_num = part_num;
multipart.part_size = part_size;
}
int RGWLCCloudStreamPut::send() {
int ret = RGWHTTP::send(out_req);
return ret;
}
RGWGetDataCB *RGWLCCloudStreamPut::get_cb() {
return out_req->get_out_cb();
}
int RGWLCCloudStreamPut::complete_request() {
int ret = conn.complete_request(out_req, etag, &obj_properties.mtime, null_yield);
return ret;
}
/* Read local copy and write to Cloud endpoint */
static int cloud_tier_transfer_object(const DoutPrefixProvider* dpp,
RGWLCStreamRead* readf, RGWLCCloudStreamPut* writef) {
std::string url;
bufferlist bl;
bool sent_attrs{false};
int ret{0};
off_t ofs;
off_t end;
ret = readf->init();
if (ret < 0) {
ldpp_dout(dpp, 0) << "ERROR: fail to initialize in_crf, ret = " << ret << dendl;
return ret;
}
readf->get_range(ofs, end);
rgw_rest_obj& rest_obj = readf->get_rest_obj();
if (!sent_attrs) {
ret = writef->init();
if (ret < 0) {
ldpp_dout(dpp, 0) << "ERROR: fail to initialize out_crf, ret = " << ret << dendl;
return ret;
}
writef->send_ready(dpp, rest_obj);
ret = writef->send();
if (ret < 0) {
return ret;
}
sent_attrs = true;
}
ret = readf->read(ofs, end, writef->get_cb());
if (ret < 0) {
ldpp_dout(dpp, 0) << "ERROR: fail to read from in_crf, ret = " << ret << dendl;
return ret;
}
ret = writef->complete_request();
if (ret < 0) {
ldpp_dout(dpp, 0) << "ERROR: fail to complete request, ret = " << ret << dendl;
return ret;
}
return 0;
}
static int cloud_tier_plain_transfer(RGWLCCloudTierCtx& tier_ctx) {
int ret;
rgw_lc_obj_properties obj_properties(tier_ctx.o.meta.mtime, tier_ctx.o.meta.etag,
tier_ctx.o.versioned_epoch, tier_ctx.acl_mappings,
tier_ctx.target_storage_class);
std::string target_obj_name;
rgw_bucket dest_bucket;
dest_bucket.name = tier_ctx.target_bucket_name;
target_obj_name = tier_ctx.bucket_info.bucket.name + "/" +
tier_ctx.obj->get_name();
if (!tier_ctx.o.is_current()) {
target_obj_name += get_key_instance(tier_ctx.obj->get_key());
}
rgw_obj dest_obj(dest_bucket, rgw_obj_key(target_obj_name));
tier_ctx.obj->set_atomic();
/* Prepare Read from source */
/* TODO: Define readf, writef as stack variables. For some reason,
* when used as stack variables (esp., readf), the transition seems to
* be taking lot of time eventually erroring out at times.
*/
std::shared_ptr<RGWLCStreamRead> readf;
readf.reset(new RGWLCStreamRead(tier_ctx.cct, tier_ctx.dpp,
tier_ctx.obj, tier_ctx.o.meta.mtime));
std::shared_ptr<RGWLCCloudStreamPut> writef;
writef.reset(new RGWLCCloudStreamPut(tier_ctx.dpp, obj_properties, tier_ctx.conn,
dest_obj));
/* actual Read & Write */
ret = cloud_tier_transfer_object(tier_ctx.dpp, readf.get(), writef.get());
return ret;
}
static int cloud_tier_send_multipart_part(RGWLCCloudTierCtx& tier_ctx,
const std::string& upload_id,
const rgw_lc_multipart_part_info& part_info,
std::string *petag) {
int ret;
rgw_lc_obj_properties obj_properties(tier_ctx.o.meta.mtime, tier_ctx.o.meta.etag,
tier_ctx.o.versioned_epoch, tier_ctx.acl_mappings,
tier_ctx.target_storage_class);
std::string target_obj_name;
off_t end;
rgw_bucket dest_bucket;
dest_bucket.name = tier_ctx.target_bucket_name;
target_obj_name = tier_ctx.bucket_info.bucket.name + "/" +
tier_ctx.obj->get_name();
if (!tier_ctx.o.is_current()) {
target_obj_name += get_key_instance(tier_ctx.obj->get_key());
}
rgw_obj dest_obj(dest_bucket, rgw_obj_key(target_obj_name));
tier_ctx.obj->set_atomic();
/* TODO: Define readf, writef as stack variables. For some reason,
* when used as stack variables (esp., readf), the transition seems to
* be taking lot of time eventually erroring out at times. */
std::shared_ptr<RGWLCStreamRead> readf;
readf.reset(new RGWLCStreamRead(tier_ctx.cct, tier_ctx.dpp,
tier_ctx.obj, tier_ctx.o.meta.mtime));
std::shared_ptr<RGWLCCloudStreamPut> writef;
writef.reset(new RGWLCCloudStreamPut(tier_ctx.dpp, obj_properties, tier_ctx.conn,
dest_obj));
/* Prepare Read from source */
end = part_info.ofs + part_info.size - 1;
readf->set_multipart(part_info.size, part_info.ofs, end);
/* Prepare write */
writef->set_multipart(upload_id, part_info.part_num, part_info.size);
/* actual Read & Write */
ret = cloud_tier_transfer_object(tier_ctx.dpp, readf.get(), writef.get());
if (ret < 0) {
return ret;
}
if (!(writef->get_etag(petag))) {
ldpp_dout(tier_ctx.dpp, 0) << "ERROR: failed to get etag from PUT request" << dendl;
return -EIO;
}
return 0;
}
static int cloud_tier_abort_multipart(const DoutPrefixProvider *dpp,
RGWRESTConn& dest_conn, const rgw_obj& dest_obj,
const std::string& upload_id) {
int ret;
bufferlist out_bl;
bufferlist bl;
rgw_http_param_pair params[] = { { "uploadId", upload_id.c_str() }, {nullptr, nullptr} };
string resource = obj_to_aws_path(dest_obj);
ret = dest_conn.send_resource(dpp, "DELETE", resource, params, nullptr,
out_bl, &bl, nullptr, null_yield);
if (ret < 0) {
ldpp_dout(dpp, 0) << "ERROR: failed to abort multipart upload for dest object=" << dest_obj << " (ret=" << ret << ")" << dendl;
return ret;
}
return 0;
}
static int cloud_tier_init_multipart(const DoutPrefixProvider *dpp,
RGWRESTConn& dest_conn, const rgw_obj& dest_obj,
uint64_t obj_size, std::map<std::string, std::string>& attrs,
std::string& upload_id) {
bufferlist out_bl;
bufferlist bl;
struct InitMultipartResult {
std::string bucket;
std::string key;
std::string upload_id;
void decode_xml(XMLObj *obj) {
RGWXMLDecoder::decode_xml("Bucket", bucket, obj);
RGWXMLDecoder::decode_xml("Key", key, obj);
RGWXMLDecoder::decode_xml("UploadId", upload_id, obj);
}
} result;
int ret;
rgw_http_param_pair params[] = { { "uploads", nullptr }, {nullptr, nullptr} };
string resource = obj_to_aws_path(dest_obj);
ret = dest_conn.send_resource(dpp, "POST", resource, params, &attrs,
out_bl, &bl, nullptr, null_yield);
if (ret < 0) {
ldpp_dout(dpp, 0) << "ERROR: failed to initialize multipart upload for dest object=" << dest_obj << dendl;
return ret;
}
/*
* If one of the following fails we cannot abort upload, as we cannot
* extract the upload id. If one of these fail it's very likely that that's
* the least of our problem.
*/
RGWXMLDecoder::XMLParser parser;
if (!parser.init()) {
ldpp_dout(dpp, 0) << "ERROR: failed to initialize xml parser for parsing multipart init response from server" << dendl;
return -EIO;
}
if (!parser.parse(out_bl.c_str(), out_bl.length(), 1)) {
string str(out_bl.c_str(), out_bl.length());
ldpp_dout(dpp, 5) << "ERROR: failed to parse xml initmultipart: " << str << dendl;
return -EIO;
}
try {
RGWXMLDecoder::decode_xml("InitiateMultipartUploadResult", result, &parser, true);
} catch (RGWXMLDecoder::err& err) {
string str(out_bl.c_str(), out_bl.length());
ldpp_dout(dpp, 5) << "ERROR: unexpected xml: " << str << dendl;
return -EIO;
}
ldpp_dout(dpp, 20) << "init multipart result: bucket=" << result.bucket << " key=" << result.key << " upload_id=" << result.upload_id << dendl;
upload_id = result.upload_id;
return 0;
}
static int cloud_tier_complete_multipart(const DoutPrefixProvider *dpp,
RGWRESTConn& dest_conn, const rgw_obj& dest_obj,
std::string& upload_id,
const std::map<int, rgw_lc_multipart_part_info>& parts) {
rgw_http_param_pair params[] = { { "uploadId", upload_id.c_str() }, {nullptr, nullptr} };
stringstream ss;
XMLFormatter formatter;
int ret;
bufferlist bl, out_bl;
string resource = obj_to_aws_path(dest_obj);
struct CompleteMultipartReq {
std::map<int, rgw_lc_multipart_part_info> parts;
explicit CompleteMultipartReq(const std::map<int, rgw_lc_multipart_part_info>& _parts) : parts(_parts) {}
void dump_xml(Formatter *f) const {
for (const auto& p : parts) {
f->open_object_section("Part");
encode_xml("PartNumber", p.first, f);
encode_xml("ETag", p.second.etag, f);
f->close_section();
};
}
} req_enc(parts);
struct CompleteMultipartResult {
std::string location;
std::string bucket;
std::string key;
std::string etag;
void decode_xml(XMLObj *obj) {
RGWXMLDecoder::decode_xml("Location", bucket, obj);
RGWXMLDecoder::decode_xml("Bucket", bucket, obj);
RGWXMLDecoder::decode_xml("Key", key, obj);
RGWXMLDecoder::decode_xml("ETag", etag, obj);
}
} result;
encode_xml("CompleteMultipartUpload", req_enc, &formatter);
formatter.flush(ss);
bl.append(ss.str());
ret = dest_conn.send_resource(dpp, "POST", resource, params, nullptr,
out_bl, &bl, nullptr, null_yield);
if (ret < 0) {
ldpp_dout(dpp, 0) << "ERROR: failed to complete multipart upload for dest object=" << dest_obj << dendl;
return ret;
}
/*
* If one of the following fails we cannot abort upload, as we cannot
* extract the upload id. If one of these fail it's very likely that that's
* the least of our problem.
*/
RGWXMLDecoder::XMLParser parser;
if (!parser.init()) {
ldpp_dout(dpp, 0) << "ERROR: failed to initialize xml parser for parsing multipart init response from server" << dendl;
return -EIO;
}
if (!parser.parse(out_bl.c_str(), out_bl.length(), 1)) {
string str(out_bl.c_str(), out_bl.length());
ldpp_dout(dpp, 5) << "ERROR: failed to parse xml Completemultipart: " << str << dendl;
return -EIO;
}
try {
RGWXMLDecoder::decode_xml("CompleteMultipartUploadResult", result, &parser, true);
} catch (RGWXMLDecoder::err& err) {
string str(out_bl.c_str(), out_bl.length());
ldpp_dout(dpp, 5) << "ERROR: unexpected xml: " << str << dendl;
return -EIO;
}
ldpp_dout(dpp, 20) << "complete multipart result: location=" << result.location << " bucket=" << result.bucket << " key=" << result.key << " etag=" << result.etag << dendl;
return ret;
}
static int cloud_tier_abort_multipart_upload(RGWLCCloudTierCtx& tier_ctx,
const rgw_obj& dest_obj, const rgw_raw_obj& status_obj,
const std::string& upload_id) {
int ret;
ret = cloud_tier_abort_multipart(tier_ctx.dpp, tier_ctx.conn, dest_obj, upload_id);
if (ret < 0) {
ldpp_dout(tier_ctx.dpp, 0) << "ERROR: failed to abort multipart upload dest obj=" << dest_obj << " upload_id=" << upload_id << " ret=" << ret << dendl;
/* ignore error, best effort */
}
/* remove status obj */
ret = delete_upload_status(tier_ctx.dpp, tier_ctx.driver, &status_obj);
if (ret < 0) {
ldpp_dout(tier_ctx.dpp, 0) << "ERROR: failed to remove sync status obj obj=" << status_obj << " ret=" << ret << dendl;
// ignore error, best effort
}
return 0;
}
static int cloud_tier_multipart_transfer(RGWLCCloudTierCtx& tier_ctx) {
rgw_obj src_obj;
rgw_obj dest_obj;
uint64_t obj_size;
std::string src_etag;
rgw_rest_obj rest_obj;
rgw_lc_multipart_upload_info status;
std::map<std::string, std::string> new_attrs;
rgw_raw_obj status_obj;
RGWBucketInfo b;
std::string target_obj_name;
rgw_bucket target_bucket;
int ret;
rgw_lc_obj_properties obj_properties(tier_ctx.o.meta.mtime, tier_ctx.o.meta.etag,
tier_ctx.o.versioned_epoch, tier_ctx.acl_mappings,
tier_ctx.target_storage_class);
uint32_t part_size{0};
uint32_t num_parts{0};
int cur_part{0};
uint64_t cur_ofs{0};
std::map<int, rgw_lc_multipart_part_info> parts;
obj_size = tier_ctx.o.meta.size;
target_bucket.name = tier_ctx.target_bucket_name;
target_obj_name = tier_ctx.bucket_info.bucket.name + "/" +
tier_ctx.obj->get_name();
if (!tier_ctx.o.is_current()) {
target_obj_name += get_key_instance(tier_ctx.obj->get_key());
}
dest_obj.init(target_bucket, target_obj_name);
rgw_pool pool = static_cast<rgw::sal::RadosStore*>(tier_ctx.driver)->svc()->zone->get_zone_params().log_pool;
status_obj = rgw_raw_obj(pool, "lc_multipart_" + tier_ctx.obj->get_oid());
ret = read_upload_status(tier_ctx.dpp, tier_ctx.driver, &status_obj, &status);
if (ret < 0 && ret != -ENOENT) {
ldpp_dout(tier_ctx.dpp, 0) << "ERROR: failed to read sync status of object " << src_obj << " ret=" << ret << dendl;
return ret;
}
if (ret >= 0) {
// check here that mtime and size did not change
if (status.mtime != obj_properties.mtime || status.obj_size != obj_size ||
status.etag != obj_properties.etag) {
cloud_tier_abort_multipart_upload(tier_ctx, dest_obj, status_obj, status.upload_id);
ret = -ENOENT;
}
}
if (ret == -ENOENT) {
RGWLCStreamRead readf(tier_ctx.cct, tier_ctx.dpp, tier_ctx.obj, tier_ctx.o.meta.mtime);
readf.init();
rest_obj = readf.get_rest_obj();
RGWLCCloudStreamPut::init_send_attrs(tier_ctx.dpp, rest_obj, obj_properties, new_attrs);
ret = cloud_tier_init_multipart(tier_ctx.dpp, tier_ctx.conn, dest_obj, obj_size, new_attrs, status.upload_id);
if (ret < 0) {
return ret;
}
status.obj_size = obj_size;
status.mtime = obj_properties.mtime;
status.etag = obj_properties.etag;
ret = put_upload_status(tier_ctx.dpp, tier_ctx.driver, &status_obj, &status);
if (ret < 0) {
ldpp_dout(tier_ctx.dpp, 0) << "ERROR: failed to driver multipart upload state, ret=" << ret << dendl;
// continue with upload anyway
}
#define MULTIPART_MAX_PARTS 10000
#define MULTIPART_MAX_PARTS 10000
uint64_t min_part_size = obj_size / MULTIPART_MAX_PARTS;
uint64_t min_conf_size = tier_ctx.multipart_min_part_size;
if (min_conf_size < MULTIPART_MIN_POSSIBLE_PART_SIZE) {
min_conf_size = MULTIPART_MIN_POSSIBLE_PART_SIZE;
}
part_size = std::max(min_conf_size, min_part_size);
num_parts = (obj_size + part_size - 1) / part_size;
cur_part = 1;
cur_ofs = 0;
}
for (; (uint32_t)cur_part <= num_parts; ++cur_part) {
ldpp_dout(tier_ctx.dpp, 20) << "cur_part = "<< cur_part << ", info.ofs = " << cur_ofs << ", info.size = " << part_size << ", obj size = " << obj_size<< ", num_parts:" << num_parts << dendl;
rgw_lc_multipart_part_info& cur_part_info = parts[cur_part];
cur_part_info.part_num = cur_part;
cur_part_info.ofs = cur_ofs;
cur_part_info.size = std::min((uint64_t)part_size, obj_size - cur_ofs);
cur_ofs += cur_part_info.size;
ret = cloud_tier_send_multipart_part(tier_ctx,
status.upload_id,
cur_part_info,
&cur_part_info.etag);
if (ret < 0) {
ldpp_dout(tier_ctx.dpp, 0) << "ERROR: failed to send multipart part of obj=" << tier_ctx.obj << ", sync via multipart upload, upload_id=" << status.upload_id << " part number " << cur_part << " (error: " << cpp_strerror(-ret) << ")" << dendl;
cloud_tier_abort_multipart_upload(tier_ctx, dest_obj, status_obj, status.upload_id);
return ret;
}
}
ret = cloud_tier_complete_multipart(tier_ctx.dpp, tier_ctx.conn, dest_obj, status.upload_id, parts);
if (ret < 0) {
ldpp_dout(tier_ctx.dpp, 0) << "ERROR: failed to complete multipart upload of obj=" << tier_ctx.obj << " (error: " << cpp_strerror(-ret) << ")" << dendl;
cloud_tier_abort_multipart_upload(tier_ctx, dest_obj, status_obj, status.upload_id);
return ret;
}
/* remove status obj */
ret = delete_upload_status(tier_ctx.dpp, tier_ctx.driver, &status_obj);
if (ret < 0) {
ldpp_dout(tier_ctx.dpp, 0) << "ERROR: failed to abort multipart upload obj=" << tier_ctx.obj << " upload_id=" << status.upload_id << " part number " << cur_part << " (" << cpp_strerror(-ret) << ")" << dendl;
// ignore error, best effort
}
return 0;
}
/* Check if object has already been transitioned */
static int cloud_tier_check_object(RGWLCCloudTierCtx& tier_ctx, bool& already_tiered) {
int ret;
std::map<std::string, std::string> headers;
/* Fetch Head object */
ret = cloud_tier_get_object(tier_ctx, true, headers);
if (ret < 0) {
ldpp_dout(tier_ctx.dpp, 0) << "ERROR: failed to fetch HEAD from cloud for obj=" << tier_ctx.obj << " , ret = " << ret << dendl;
return ret;
}
already_tiered = is_already_tiered(tier_ctx.dpp, headers, tier_ctx.o.meta.mtime);
if (already_tiered) {
ldpp_dout(tier_ctx.dpp, 20) << "is_already_tiered true" << dendl;
} else {
ldpp_dout(tier_ctx.dpp, 20) << "is_already_tiered false..going with out_crf writing" << dendl;
}
return ret;
}
static int cloud_tier_create_bucket(RGWLCCloudTierCtx& tier_ctx) {
bufferlist out_bl;
int ret = 0;
pair<string, string> key(tier_ctx.storage_class, tier_ctx.target_bucket_name);
struct CreateBucketResult {
std::string code;
void decode_xml(XMLObj *obj) {
RGWXMLDecoder::decode_xml("Code", code, obj);
}
} result;
ldpp_dout(tier_ctx.dpp, 30) << "Cloud_tier_ctx: creating bucket:" << tier_ctx.target_bucket_name << dendl;
bufferlist bl;
string resource = tier_ctx.target_bucket_name;
ret = tier_ctx.conn.send_resource(tier_ctx.dpp, "PUT", resource, nullptr, nullptr,
out_bl, &bl, nullptr, null_yield);
if (ret < 0 ) {
ldpp_dout(tier_ctx.dpp, 0) << "create target bucket : " << tier_ctx.target_bucket_name << " returned ret:" << ret << dendl;
}
if (out_bl.length() > 0) {
RGWXMLDecoder::XMLParser parser;
if (!parser.init()) {
ldpp_dout(tier_ctx.dpp, 0) << "ERROR: failed to initialize xml parser for parsing create_bucket response from server" << dendl;
return -EIO;
}
if (!parser.parse(out_bl.c_str(), out_bl.length(), 1)) {
string str(out_bl.c_str(), out_bl.length());
ldpp_dout(tier_ctx.dpp, 5) << "ERROR: failed to parse xml createbucket: " << str << dendl;
return -EIO;
}
try {
RGWXMLDecoder::decode_xml("Error", result, &parser, true);
} catch (RGWXMLDecoder::err& err) {
string str(out_bl.c_str(), out_bl.length());
ldpp_dout(tier_ctx.dpp, 5) << "ERROR: unexpected xml: " << str << dendl;
return -EIO;
}
if (result.code != "BucketAlreadyOwnedByYou" && result.code != "BucketAlreadyExists") {
ldpp_dout(tier_ctx.dpp, 0) << "ERROR: Creating target bucket failed with error: " << result.code << dendl;
return -EIO;
}
}
return 0;
}
int rgw_cloud_tier_transfer_object(RGWLCCloudTierCtx& tier_ctx, std::set<std::string>& cloud_targets) {
int ret = 0;
// check if target_path is already created
std::set<std::string>::iterator it;
it = cloud_targets.find(tier_ctx.target_bucket_name);
tier_ctx.target_bucket_created = (it != cloud_targets.end());
/* If run first time attempt to create the target bucket */
if (!tier_ctx.target_bucket_created) {
ret = cloud_tier_create_bucket(tier_ctx);
if (ret < 0) {
ldpp_dout(tier_ctx.dpp, 0) << "ERROR: failed to create target bucket on the cloud endpoint ret=" << ret << dendl;
return ret;
}
tier_ctx.target_bucket_created = true;
cloud_targets.insert(tier_ctx.target_bucket_name);
}
/* Since multiple zones may try to transition the same object to the cloud,
* verify if the object is already transitioned. And since its just a best
* effort, do not bail out in case of any errors.
*/
bool already_tiered = false;
ret = cloud_tier_check_object(tier_ctx, already_tiered);
if (ret < 0) {
ldpp_dout(tier_ctx.dpp, 0) << "ERROR: failed to check object on the cloud endpoint ret=" << ret << dendl;
}
if (already_tiered) {
ldpp_dout(tier_ctx.dpp, 20) << "Object (" << tier_ctx.o.key << ") is already tiered" << dendl;
return 0;
}
uint64_t size = tier_ctx.o.meta.size;
uint64_t multipart_sync_threshold = tier_ctx.multipart_sync_threshold;
if (multipart_sync_threshold < MULTIPART_MIN_POSSIBLE_PART_SIZE) {
multipart_sync_threshold = MULTIPART_MIN_POSSIBLE_PART_SIZE;
}
if (size < multipart_sync_threshold) {
ret = cloud_tier_plain_transfer(tier_ctx);
} else {
tier_ctx.is_multipart_upload = true;
ret = cloud_tier_multipart_transfer(tier_ctx);
}
if (ret < 0) {
ldpp_dout(tier_ctx.dpp, 0) << "ERROR: failed to transition object ret=" << ret << dendl;
}
return ret;
}
| 39,702 | 29.284516 | 248 |
cc
|
null |
ceph-main/src/rgw/driver/rados/rgw_lc_tier.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab ft=cpp
#pragma once
#include "rgw_lc.h"
#include "rgw_rest_conn.h"
#include "rgw_rados.h"
#include "rgw_zone.h"
#include "rgw_sal_rados.h"
#include "rgw_cr_rest.h"
#define DEFAULT_MULTIPART_SYNC_PART_SIZE (32 * 1024 * 1024)
#define MULTIPART_MIN_POSSIBLE_PART_SIZE (5 * 1024 * 1024)
struct RGWLCCloudTierCtx {
CephContext *cct;
const DoutPrefixProvider *dpp;
/* Source */
rgw_bucket_dir_entry& o;
rgw::sal::Driver *driver;
RGWBucketInfo& bucket_info;
std::string storage_class;
rgw::sal::Object *obj;
/* Remote */
RGWRESTConn& conn;
std::string target_bucket_name;
std::string target_storage_class;
std::map<std::string, RGWTierACLMapping> acl_mappings;
uint64_t multipart_min_part_size;
uint64_t multipart_sync_threshold;
bool is_multipart_upload{false};
bool target_bucket_created{true};
RGWLCCloudTierCtx(CephContext* _cct, const DoutPrefixProvider *_dpp,
rgw_bucket_dir_entry& _o, rgw::sal::Driver *_driver,
RGWBucketInfo &_binfo, rgw::sal::Object *_obj,
RGWRESTConn& _conn, std::string& _bucket,
std::string& _storage_class) :
cct(_cct), dpp(_dpp), o(_o), driver(_driver), bucket_info(_binfo),
obj(_obj), conn(_conn), target_bucket_name(_bucket),
target_storage_class(_storage_class) {}
};
/* Transition object to cloud endpoint */
int rgw_cloud_tier_transfer_object(RGWLCCloudTierCtx& tier_ctx, std::set<std::string>& cloud_targets);
| 1,521 | 28.269231 | 102 |
h
|
null |
ceph-main/src/rgw/driver/rados/rgw_log_backing.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab ft=cpp
#include "cls/log/cls_log_client.h"
#include "cls/version/cls_version_client.h"
#include "rgw_log_backing.h"
#include "rgw_tools.h"
#include "cls_fifo_legacy.h"
using namespace std::chrono_literals;
namespace cb = ceph::buffer;
static constexpr auto dout_subsys = ceph_subsys_rgw;
enum class shard_check { dne, omap, fifo, corrupt };
inline std::ostream& operator <<(std::ostream& m, const shard_check& t) {
switch (t) {
case shard_check::dne:
return m << "shard_check::dne";
case shard_check::omap:
return m << "shard_check::omap";
case shard_check::fifo:
return m << "shard_check::fifo";
case shard_check::corrupt:
return m << "shard_check::corrupt";
}
return m << "shard_check::UNKNOWN=" << static_cast<uint32_t>(t);
}
namespace {
/// Return the shard type, and a bool to see whether it has entries.
shard_check
probe_shard(const DoutPrefixProvider *dpp, librados::IoCtx& ioctx, const std::string& oid,
bool& fifo_unsupported, optional_yield y)
{
ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< " probing oid=" << oid
<< dendl;
if (!fifo_unsupported) {
std::unique_ptr<rgw::cls::fifo::FIFO> fifo;
auto r = rgw::cls::fifo::FIFO::open(dpp, ioctx, oid,
&fifo, y,
std::nullopt, true);
switch (r) {
case 0:
ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< ": oid=" << oid << " is FIFO"
<< dendl;
return shard_check::fifo;
case -ENODATA:
ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< ": oid=" << oid << " is empty and therefore OMAP"
<< dendl;
return shard_check::omap;
case -ENOENT:
ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< ": oid=" << oid << " does not exist"
<< dendl;
return shard_check::dne;
case -EPERM:
ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< ": FIFO is unsupported, marking."
<< dendl;
fifo_unsupported = true;
return shard_check::omap;
default:
ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< ": error probing: r=" << r
<< ", oid=" << oid << dendl;
return shard_check::corrupt;
}
} else {
// Since FIFO is unsupported, OMAP is the only alternative
return shard_check::omap;
}
}
tl::expected<log_type, bs::error_code>
handle_dne(const DoutPrefixProvider *dpp, librados::IoCtx& ioctx,
log_type def,
std::string oid,
bool fifo_unsupported,
optional_yield y)
{
if (def == log_type::fifo) {
if (fifo_unsupported) {
ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< " WARNING: FIFO set as default but not supported by OSD. "
<< "Falling back to OMAP." << dendl;
return log_type::omap;
}
std::unique_ptr<rgw::cls::fifo::FIFO> fifo;
auto r = rgw::cls::fifo::FIFO::create(dpp, ioctx, oid,
&fifo, y,
std::nullopt);
if (r < 0) {
ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< " error creating FIFO: r=" << r
<< ", oid=" << oid << dendl;
return tl::unexpected(bs::error_code(-r, bs::system_category()));
}
}
return def;
}
}
tl::expected<log_type, bs::error_code>
log_backing_type(const DoutPrefixProvider *dpp,
librados::IoCtx& ioctx,
log_type def,
int shards,
const fu2::unique_function<std::string(int) const>& get_oid,
optional_yield y)
{
auto check = shard_check::dne;
bool fifo_unsupported = false;
for (int i = 0; i < shards; ++i) {
auto c = probe_shard(dpp, ioctx, get_oid(i), fifo_unsupported, y);
if (c == shard_check::corrupt)
return tl::unexpected(bs::error_code(EIO, bs::system_category()));
if (c == shard_check::dne) continue;
if (check == shard_check::dne) {
check = c;
continue;
}
if (check != c) {
ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< " clashing types: check=" << check
<< ", c=" << c << dendl;
return tl::unexpected(bs::error_code(EIO, bs::system_category()));
}
}
if (check == shard_check::corrupt) {
ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< " should be unreachable!" << dendl;
return tl::unexpected(bs::error_code(EIO, bs::system_category()));
}
if (check == shard_check::dne)
return handle_dne(dpp, ioctx,
def,
get_oid(0),
fifo_unsupported,
y);
return (check == shard_check::fifo ? log_type::fifo : log_type::omap);
}
bs::error_code log_remove(const DoutPrefixProvider *dpp,
librados::IoCtx& ioctx,
int shards,
const fu2::unique_function<std::string(int) const>& get_oid,
bool leave_zero,
optional_yield y)
{
bs::error_code ec;
for (int i = 0; i < shards; ++i) {
auto oid = get_oid(i);
rados::cls::fifo::info info;
uint32_t part_header_size = 0, part_entry_overhead = 0;
auto r = rgw::cls::fifo::get_meta(dpp, ioctx, oid, std::nullopt, &info,
&part_header_size, &part_entry_overhead,
0, y, true);
if (r == -ENOENT) continue;
if (r == 0 && info.head_part_num > -1) {
for (auto j = info.tail_part_num; j <= info.head_part_num; ++j) {
librados::ObjectWriteOperation op;
op.remove();
auto part_oid = info.part_oid(j);
auto subr = rgw_rados_operate(dpp, ioctx, part_oid, &op, y);
if (subr < 0 && subr != -ENOENT) {
if (!ec)
ec = bs::error_code(-subr, bs::system_category());
ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< ": failed removing FIFO part: part_oid=" << part_oid
<< ", subr=" << subr << dendl;
}
}
}
if (r < 0 && r != -ENODATA) {
if (!ec)
ec = bs::error_code(-r, bs::system_category());
ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< ": failed checking FIFO part: oid=" << oid
<< ", r=" << r << dendl;
}
librados::ObjectWriteOperation op;
if (i == 0 && leave_zero) {
// Leave shard 0 in existence, but remove contents and
// omap. cls_lock stores things in the xattrs. And sync needs to
// rendezvous with locks on generation 0 shard 0.
op.omap_set_header({});
op.omap_clear();
op.truncate(0);
} else {
op.remove();
}
r = rgw_rados_operate(dpp, ioctx, oid, &op, y);
if (r < 0 && r != -ENOENT) {
if (!ec)
ec = bs::error_code(-r, bs::system_category());
ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< ": failed removing shard: oid=" << oid
<< ", r=" << r << dendl;
}
}
return ec;
}
logback_generations::~logback_generations() {
if (watchcookie > 0) {
auto cct = static_cast<CephContext*>(ioctx.cct());
auto r = ioctx.unwatch2(watchcookie);
if (r < 0) {
lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< ": failed unwatching oid=" << oid
<< ", r=" << r << dendl;
}
}
}
bs::error_code logback_generations::setup(const DoutPrefixProvider *dpp,
log_type def,
optional_yield y) noexcept
{
try {
// First, read.
auto cct = static_cast<CephContext*>(ioctx.cct());
auto res = read(dpp, y);
if (!res && res.error() != bs::errc::no_such_file_or_directory) {
return res.error();
}
if (res) {
std::unique_lock lock(m);
std::tie(entries_, version) = std::move(*res);
} else {
// Are we the first? Then create generation 0 and the generations
// metadata.
librados::ObjectWriteOperation op;
auto type = log_backing_type(dpp, ioctx, def, shards,
[this](int shard) {
return this->get_oid(0, shard);
}, y);
if (!type)
return type.error();
logback_generation l;
l.type = *type;
std::unique_lock lock(m);
version.ver = 1;
static constexpr auto TAG_LEN = 24;
version.tag.clear();
append_rand_alpha(cct, version.tag, version.tag, TAG_LEN);
op.create(true);
cls_version_set(op, version);
cb::list bl;
entries_.emplace(0, std::move(l));
encode(entries_, bl);
lock.unlock();
op.write_full(bl);
auto r = rgw_rados_operate(dpp, ioctx, oid, &op, y);
if (r < 0 && r != -EEXIST) {
ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< ": failed writing oid=" << oid
<< ", r=" << r << dendl;
bs::system_error(-r, bs::system_category());
}
// Did someone race us? Then re-read.
if (r != 0) {
res = read(dpp, y);
if (!res)
return res.error();
if (res->first.empty())
return bs::error_code(EIO, bs::system_category());
auto l = res->first.begin()->second;
// In the unlikely event that someone raced us, created
// generation zero, incremented, then erased generation zero,
// don't leave generation zero lying around.
if (l.gen_id != 0) {
auto ec = log_remove(dpp, ioctx, shards,
[this](int shard) {
return this->get_oid(0, shard);
}, true, y);
if (ec) return ec;
}
std::unique_lock lock(m);
std::tie(entries_, version) = std::move(*res);
}
}
// Pass all non-empty generations to the handler
std::unique_lock lock(m);
auto i = lowest_nomempty(entries_);
entries_t e;
std::copy(i, entries_.cend(),
std::inserter(e, e.end()));
m.unlock();
auto ec = watch();
if (ec) {
ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< ": failed to re-establish watch, unsafe to continue: oid="
<< oid << ", ec=" << ec.message() << dendl;
}
return handle_init(std::move(e));
} catch (const std::bad_alloc&) {
return bs::error_code(ENOMEM, bs::system_category());
}
}
bs::error_code logback_generations::update(const DoutPrefixProvider *dpp, optional_yield y) noexcept
{
try {
auto res = read(dpp, y);
if (!res) {
return res.error();
}
std::unique_lock l(m);
auto& [es, v] = *res;
if (v == version) {
// Nothing to do!
return {};
}
// Check consistency and prepare update
if (es.empty()) {
ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< ": INCONSISTENCY! Read empty update." << dendl;
return bs::error_code(EFAULT, bs::system_category());
}
auto cur_lowest = lowest_nomempty(entries_);
// Straight up can't happen
assert(cur_lowest != entries_.cend());
auto new_lowest = lowest_nomempty(es);
if (new_lowest == es.cend()) {
ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< ": INCONSISTENCY! Read update with no active head." << dendl;
return bs::error_code(EFAULT, bs::system_category());
}
if (new_lowest->first < cur_lowest->first) {
ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< ": INCONSISTENCY! Tail moved wrong way." << dendl;
return bs::error_code(EFAULT, bs::system_category());
}
std::optional<uint64_t> highest_empty;
if (new_lowest->first > cur_lowest->first && new_lowest != es.begin()) {
--new_lowest;
highest_empty = new_lowest->first;
}
entries_t new_entries;
if ((es.end() - 1)->first < (entries_.end() - 1)->first) {
ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< ": INCONSISTENCY! Head moved wrong way." << dendl;
return bs::error_code(EFAULT, bs::system_category());
}
if ((es.end() - 1)->first > (entries_.end() - 1)->first) {
auto ei = es.lower_bound((entries_.end() - 1)->first + 1);
std::copy(ei, es.end(), std::inserter(new_entries, new_entries.end()));
}
// Everything checks out!
version = v;
entries_ = es;
l.unlock();
if (highest_empty) {
auto ec = handle_empty_to(*highest_empty);
if (ec) return ec;
}
if (!new_entries.empty()) {
auto ec = handle_new_gens(std::move(new_entries));
if (ec) return ec;
}
} catch (const std::bad_alloc&) {
return bs::error_code(ENOMEM, bs::system_category());
}
return {};
}
auto logback_generations::read(const DoutPrefixProvider *dpp, optional_yield y) noexcept ->
tl::expected<std::pair<entries_t, obj_version>, bs::error_code>
{
try {
librados::ObjectReadOperation op;
std::unique_lock l(m);
cls_version_check(op, version, VER_COND_GE);
l.unlock();
obj_version v2;
cls_version_read(op, &v2);
cb::list bl;
op.read(0, 0, &bl, nullptr);
auto r = rgw_rados_operate(dpp, ioctx, oid, &op, nullptr, y);
if (r < 0) {
if (r == -ENOENT) {
ldpp_dout(dpp, 5) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< ": oid=" << oid
<< " not found" << dendl;
} else {
ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< ": failed reading oid=" << oid
<< ", r=" << r << dendl;
}
return tl::unexpected(bs::error_code(-r, bs::system_category()));
}
auto bi = bl.cbegin();
entries_t e;
try {
decode(e, bi);
} catch (const cb::error& err) {
return tl::unexpected(err.code());
}
return std::pair{ std::move(e), std::move(v2) };
} catch (const std::bad_alloc&) {
return tl::unexpected(bs::error_code(ENOMEM, bs::system_category()));
}
}
bs::error_code logback_generations::write(const DoutPrefixProvider *dpp, entries_t&& e,
std::unique_lock<std::mutex>&& l_,
optional_yield y) noexcept
{
auto l = std::move(l_);
ceph_assert(l.mutex() == &m &&
l.owns_lock());
try {
librados::ObjectWriteOperation op;
cls_version_check(op, version, VER_COND_GE);
cb::list bl;
encode(e, bl);
op.write_full(bl);
cls_version_inc(op);
auto r = rgw_rados_operate(dpp, ioctx, oid, &op, y);
if (r == 0) {
entries_ = std::move(e);
version.inc();
return {};
}
l.unlock();
if (r < 0 && r != -ECANCELED) {
ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< ": failed reading oid=" << oid
<< ", r=" << r << dendl;
return { -r, bs::system_category() };
}
if (r == -ECANCELED) {
auto ec = update(dpp, y);
if (ec) {
return ec;
} else {
return { ECANCELED, bs::system_category() };
}
}
} catch (const std::bad_alloc&) {
return { ENOMEM, bs::system_category() };
}
return {};
}
bs::error_code logback_generations::watch() noexcept {
try {
auto cct = static_cast<CephContext*>(ioctx.cct());
auto r = ioctx.watch2(oid, &watchcookie, this);
if (r < 0) {
lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< ": failed to set watch oid=" << oid
<< ", r=" << r << dendl;
return { -r, bs::system_category() };
}
} catch (const std::bad_alloc&) {
return bs::error_code(ENOMEM, bs::system_category());
}
return {};
}
bs::error_code logback_generations::new_backing(const DoutPrefixProvider *dpp,
log_type type,
optional_yield y) noexcept {
static constexpr auto max_tries = 10;
try {
auto ec = update(dpp, y);
if (ec) return ec;
auto tries = 0;
entries_t new_entries;
do {
std::unique_lock l(m);
auto last = entries_.end() - 1;
if (last->second.type == type) {
// Nothing to be done
return {};
}
auto newgenid = last->first + 1;
logback_generation newgen;
newgen.gen_id = newgenid;
newgen.type = type;
new_entries.emplace(newgenid, newgen);
auto es = entries_;
es.emplace(newgenid, std::move(newgen));
ec = write(dpp, std::move(es), std::move(l), y);
++tries;
} while (ec == bs::errc::operation_canceled &&
tries < max_tries);
if (tries >= max_tries) {
ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< ": exhausted retry attempts." << dendl;
return ec;
}
if (ec) {
ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< ": write failed with ec=" << ec.message() << dendl;
return ec;
}
cb::list bl, rbl;
auto r = rgw_rados_notify(dpp, ioctx, oid, bl, 10'000, &rbl, y);
if (r < 0) {
ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< ": notify failed with r=" << r << dendl;
return { -r, bs::system_category() };
}
ec = handle_new_gens(new_entries);
} catch (const std::bad_alloc&) {
return bs::error_code(ENOMEM, bs::system_category());
}
return {};
}
bs::error_code logback_generations::empty_to(const DoutPrefixProvider *dpp,
uint64_t gen_id,
optional_yield y) noexcept {
static constexpr auto max_tries = 10;
try {
auto ec = update(dpp, y);
if (ec) return ec;
auto tries = 0;
uint64_t newtail = 0;
do {
std::unique_lock l(m);
{
auto last = entries_.end() - 1;
if (gen_id >= last->first) {
ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< ": Attempt to trim beyond the possible." << dendl;
return bs::error_code(EINVAL, bs::system_category());
}
}
auto es = entries_;
auto ei = es.upper_bound(gen_id);
if (ei == es.begin()) {
// Nothing to be done.
return {};
}
for (auto i = es.begin(); i < ei; ++i) {
newtail = i->first;
i->second.pruned = ceph::real_clock::now();
}
ec = write(dpp, std::move(es), std::move(l), y);
++tries;
} while (ec == bs::errc::operation_canceled &&
tries < max_tries);
if (tries >= max_tries) {
ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< ": exhausted retry attempts." << dendl;
return ec;
}
if (ec) {
ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< ": write failed with ec=" << ec.message() << dendl;
return ec;
}
cb::list bl, rbl;
auto r = rgw_rados_notify(dpp, ioctx, oid, bl, 10'000, &rbl, y);
if (r < 0) {
ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< ": notify failed with r=" << r << dendl;
return { -r, bs::system_category() };
}
ec = handle_empty_to(newtail);
} catch (const std::bad_alloc&) {
return bs::error_code(ENOMEM, bs::system_category());
}
return {};
}
bs::error_code logback_generations::remove_empty(const DoutPrefixProvider *dpp, optional_yield y) noexcept {
static constexpr auto max_tries = 10;
try {
auto ec = update(dpp, y);
if (ec) return ec;
auto tries = 0;
entries_t new_entries;
std::unique_lock l(m);
ceph_assert(!entries_.empty());
{
auto i = lowest_nomempty(entries_);
if (i == entries_.begin()) {
return {};
}
}
entries_t es;
auto now = ceph::real_clock::now();
l.unlock();
do {
std::copy_if(entries_.cbegin(), entries_.cend(),
std::inserter(es, es.end()),
[now](const auto& e) {
if (!e.second.pruned)
return false;
auto pruned = *e.second.pruned;
return (now - pruned) >= 1h;
});
auto es2 = entries_;
for (const auto& [gen_id, e] : es) {
ceph_assert(e.pruned);
auto ec = log_remove(dpp, ioctx, shards,
[this, gen_id = gen_id](int shard) {
return this->get_oid(gen_id, shard);
}, (gen_id == 0), y);
if (ec) {
ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< ": Error pruning: gen_id=" << gen_id
<< " ec=" << ec.message() << dendl;
}
if (auto i = es2.find(gen_id); i != es2.end()) {
es2.erase(i);
}
}
l.lock();
es.clear();
ec = write(dpp, std::move(es2), std::move(l), y);
++tries;
} while (ec == bs::errc::operation_canceled &&
tries < max_tries);
if (tries >= max_tries) {
ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< ": exhausted retry attempts." << dendl;
return ec;
}
if (ec) {
ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< ": write failed with ec=" << ec.message() << dendl;
return ec;
}
} catch (const std::bad_alloc&) {
return bs::error_code(ENOMEM, bs::system_category());
}
return {};
}
void logback_generations::handle_notify(uint64_t notify_id,
uint64_t cookie,
uint64_t notifier_id,
bufferlist& bl)
{
auto cct = static_cast<CephContext*>(ioctx.cct());
const DoutPrefix dp(cct, dout_subsys, "logback generations handle_notify: ");
if (notifier_id != my_id) {
auto ec = update(&dp, null_yield);
if (ec) {
lderr(cct)
<< __PRETTY_FUNCTION__ << ":" << __LINE__
<< ": update failed, no one to report to and no safe way to continue."
<< dendl;
abort();
}
}
cb::list rbl;
ioctx.notify_ack(oid, notify_id, watchcookie, rbl);
}
void logback_generations::handle_error(uint64_t cookie, int err) {
auto cct = static_cast<CephContext*>(ioctx.cct());
auto r = ioctx.unwatch2(watchcookie);
if (r < 0) {
lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< ": failed to set unwatch oid=" << oid
<< ", r=" << r << dendl;
}
auto ec = watch();
if (ec) {
lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< ": failed to re-establish watch, unsafe to continue: oid="
<< oid << ", ec=" << ec.message() << dendl;
}
}
| 21,319 | 29.070522 | 108 |
cc
|
null |
ceph-main/src/rgw/driver/rados/rgw_log_backing.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab ft=cpp
#pragma once
#include <optional>
#include <iostream>
#include <string>
#include <string_view>
#include <strings.h>
#include <boost/container/flat_map.hpp>
#include <boost/system/error_code.hpp>
#include <fmt/format.h>
#include "include/rados/librados.hpp"
#include "include/encoding.h"
#include "include/expected.hpp"
#include "include/function2.hpp"
#include "cls/version/cls_version_types.h"
#include "common/async/yield_context.h"
#include "common/Formatter.h"
#include "common/strtol.h"
namespace bc = boost::container;
namespace bs = boost::system;
#include "cls_fifo_legacy.h"
/// Type of log backing, stored in the mark used in the quick check,
/// and passed to checking functions.
enum class log_type {
omap = 0,
fifo = 1
};
inline void encode(const log_type& type, ceph::buffer::list& bl) {
auto t = static_cast<uint8_t>(type);
encode(t, bl);
}
inline void decode(log_type& type, bufferlist::const_iterator& bl) {
uint8_t t;
decode(t, bl);
type = static_cast<log_type>(t);
}
inline std::optional<log_type> to_log_type(std::string_view s) {
if (strncasecmp(s.data(), "omap", s.length()) == 0) {
return log_type::omap;
} else if (strncasecmp(s.data(), "fifo", s.length()) == 0) {
return log_type::fifo;
} else {
return std::nullopt;
}
}
inline std::ostream& operator <<(std::ostream& m, const log_type& t) {
switch (t) {
case log_type::omap:
return m << "log_type::omap";
case log_type::fifo:
return m << "log_type::fifo";
}
return m << "log_type::UNKNOWN=" << static_cast<uint32_t>(t);
}
/// Look over the shards in a log and determine the type.
tl::expected<log_type, bs::error_code>
log_backing_type(const DoutPrefixProvider *dpp,
librados::IoCtx& ioctx,
log_type def,
int shards, //< Total number of shards
/// A function taking a shard number and
/// returning an oid.
const fu2::unique_function<std::string(int) const>& get_oid,
optional_yield y);
/// Remove all log shards and associated parts of fifos.
bs::error_code log_remove(librados::IoCtx& ioctx,
int shards, //< Total number of shards
/// A function taking a shard number and
/// returning an oid.
const fu2::unique_function<std::string(int) const>& get_oid,
bool leave_zero,
optional_yield y);
struct logback_generation {
uint64_t gen_id = 0;
log_type type;
std::optional<ceph::real_time> pruned;
void encode(ceph::buffer::list& bl) const {
ENCODE_START(1, 1, bl);
encode(gen_id, bl);
encode(type, bl);
encode(pruned, bl);
ENCODE_FINISH(bl);
}
void decode(bufferlist::const_iterator& bl) {
DECODE_START(1, bl);
decode(gen_id, bl);
decode(type, bl);
decode(pruned, bl);
DECODE_FINISH(bl);
}
};
WRITE_CLASS_ENCODER(logback_generation)
inline std::ostream& operator <<(std::ostream& m, const logback_generation& g) {
return m << "[" << g.gen_id << "," << g.type << ","
<< (g.pruned ? "PRUNED" : "NOT PRUNED") << "]";
}
class logback_generations : public librados::WatchCtx2 {
public:
using entries_t = bc::flat_map<uint64_t, logback_generation>;
protected:
librados::IoCtx& ioctx;
logback_generations(librados::IoCtx& ioctx,
std::string oid,
fu2::unique_function<std::string(
uint64_t, int) const>&& get_oid,
int shards) noexcept
: ioctx(ioctx), oid(oid), get_oid(std::move(get_oid)),
shards(shards) {}
uint64_t my_id = ioctx.get_instance_id();
private:
const std::string oid;
const fu2::unique_function<std::string(uint64_t, int) const> get_oid;
protected:
const int shards;
private:
uint64_t watchcookie = 0;
obj_version version;
std::mutex m;
entries_t entries_;
tl::expected<std::pair<entries_t, obj_version>, bs::error_code>
read(const DoutPrefixProvider *dpp, optional_yield y) noexcept;
bs::error_code write(const DoutPrefixProvider *dpp, entries_t&& e, std::unique_lock<std::mutex>&& l_,
optional_yield y) noexcept;
bs::error_code setup(const DoutPrefixProvider *dpp, log_type def, optional_yield y) noexcept;
bs::error_code watch() noexcept;
auto lowest_nomempty(const entries_t& es) {
return std::find_if(es.begin(), es.end(),
[](const auto& e) {
return !e.second.pruned;
});
}
public:
/// For the use of watch/notify.
void handle_notify(uint64_t notify_id,
uint64_t cookie,
uint64_t notifier_id,
bufferlist& bl) override final;
void handle_error(uint64_t cookie, int err) override final;
/// Public interface
virtual ~logback_generations();
template<typename T, typename... Args>
static tl::expected<std::unique_ptr<T>, bs::error_code>
init(const DoutPrefixProvider *dpp, librados::IoCtx& ioctx_, std::string oid_,
fu2::unique_function<std::string(uint64_t, int) const>&& get_oid_,
int shards_, log_type def, optional_yield y,
Args&& ...args) noexcept {
try {
T* lgp = new T(ioctx_, std::move(oid_),
std::move(get_oid_),
shards_, std::forward<Args>(args)...);
std::unique_ptr<T> lg(lgp);
lgp = nullptr;
auto ec = lg->setup(dpp, def, y);
if (ec)
return tl::unexpected(ec);
// Obnoxiousness for C++ Compiler in Bionic Beaver
return tl::expected<std::unique_ptr<T>, bs::error_code>(std::move(lg));
} catch (const std::bad_alloc&) {
return tl::unexpected(bs::error_code(ENOMEM, bs::system_category()));
}
}
bs::error_code update(const DoutPrefixProvider *dpp, optional_yield y) noexcept;
entries_t entries() const {
return entries_;
}
bs::error_code new_backing(const DoutPrefixProvider *dpp, log_type type, optional_yield y) noexcept;
bs::error_code empty_to(const DoutPrefixProvider *dpp, uint64_t gen_id, optional_yield y) noexcept;
bs::error_code remove_empty(const DoutPrefixProvider *dpp, optional_yield y) noexcept;
// Callbacks, to be defined by descendant.
/// Handle initialization on startup
///
/// @param e All non-empty generations
virtual bs::error_code handle_init(entries_t e) noexcept = 0;
/// Handle new generations.
///
/// @param e Map of generations added since last update
virtual bs::error_code handle_new_gens(entries_t e) noexcept = 0;
/// Handle generations being marked empty
///
/// @param new_tail Lowest non-empty generation
virtual bs::error_code handle_empty_to(uint64_t new_tail) noexcept = 0;
};
inline std::string gencursor(uint64_t gen_id, std::string_view cursor) {
return (gen_id > 0 ?
fmt::format("G{:0>20}@{}", gen_id, cursor) :
std::string(cursor));
}
inline std::pair<uint64_t, std::string_view>
cursorgen(std::string_view cursor_) {
if (cursor_.empty()) {
return { 0, "" };
}
std::string_view cursor = cursor_;
if (cursor[0] != 'G') {
return { 0, cursor };
}
cursor.remove_prefix(1);
auto gen_id = ceph::consume<uint64_t>(cursor);
if (!gen_id || cursor[0] != '@') {
return { 0, cursor_ };
}
cursor.remove_prefix(1);
return { *gen_id, cursor };
}
class LazyFIFO {
librados::IoCtx& ioctx;
std::string oid;
std::mutex m;
std::unique_ptr<rgw::cls::fifo::FIFO> fifo;
int lazy_init(const DoutPrefixProvider *dpp, optional_yield y) {
std::unique_lock l(m);
if (fifo) return 0;
auto r = rgw::cls::fifo::FIFO::create(dpp, ioctx, oid, &fifo, y);
if (r) {
fifo.reset();
}
return r;
}
public:
LazyFIFO(librados::IoCtx& ioctx, std::string oid)
: ioctx(ioctx), oid(std::move(oid)) {}
int read_meta(const DoutPrefixProvider *dpp, optional_yield y) {
auto r = lazy_init(dpp, y);
if (r < 0) return r;
return fifo->read_meta(dpp, y);
}
int meta(const DoutPrefixProvider *dpp, rados::cls::fifo::info& info, optional_yield y) {
auto r = lazy_init(dpp, y);
if (r < 0) return r;
info = fifo->meta();
return 0;
}
int get_part_layout_info(const DoutPrefixProvider *dpp,
std::uint32_t& part_header_size,
std::uint32_t& part_entry_overhead,
optional_yield y) {
auto r = lazy_init(dpp, y);
if (r < 0) return r;
std::tie(part_header_size, part_entry_overhead)
= fifo->get_part_layout_info();
return 0;
}
int push(const DoutPrefixProvider *dpp,
const ceph::buffer::list& bl,
optional_yield y) {
auto r = lazy_init(dpp, y);
if (r < 0) return r;
return fifo->push(dpp, bl, y);
}
int push(const DoutPrefixProvider *dpp,
ceph::buffer::list& bl,
librados::AioCompletion* c,
optional_yield y) {
auto r = lazy_init(dpp, y);
if (r < 0) return r;
fifo->push(dpp, bl, c);
return 0;
}
int push(const DoutPrefixProvider *dpp,
const std::vector<ceph::buffer::list>& data_bufs,
optional_yield y) {
auto r = lazy_init(dpp, y);
if (r < 0) return r;
return fifo->push(dpp, data_bufs, y);
}
int push(const DoutPrefixProvider *dpp,
const std::vector<ceph::buffer::list>& data_bufs,
librados::AioCompletion* c,
optional_yield y) {
auto r = lazy_init(dpp, y);
if (r < 0) return r;
fifo->push(dpp, data_bufs, c);
return 0;
}
int list(const DoutPrefixProvider *dpp,
int max_entries, std::optional<std::string_view> markstr,
std::vector<rgw::cls::fifo::list_entry>* out,
bool* more, optional_yield y) {
auto r = lazy_init(dpp, y);
if (r < 0) return r;
return fifo->list(dpp, max_entries, markstr, out, more, y);
}
int list(const DoutPrefixProvider *dpp, int max_entries, std::optional<std::string_view> markstr,
std::vector<rgw::cls::fifo::list_entry>* out, bool* more,
librados::AioCompletion* c, optional_yield y) {
auto r = lazy_init(dpp, y);
if (r < 0) return r;
fifo->list(dpp, max_entries, markstr, out, more, c);
return 0;
}
int trim(const DoutPrefixProvider *dpp, std::string_view markstr, bool exclusive, optional_yield y) {
auto r = lazy_init(dpp, y);
if (r < 0) return r;
return fifo->trim(dpp, markstr, exclusive, y);
}
int trim(const DoutPrefixProvider *dpp, std::string_view markstr, bool exclusive, librados::AioCompletion* c,
optional_yield y) {
auto r = lazy_init(dpp, y);
if (r < 0) return r;
fifo->trim(dpp, markstr, exclusive, c);
return 0;
}
int get_part_info(const DoutPrefixProvider *dpp, int64_t part_num, rados::cls::fifo::part_header* header,
optional_yield y) {
auto r = lazy_init(dpp, y);
if (r < 0) return r;
return fifo->get_part_info(dpp, part_num, header, y);
}
int get_part_info(const DoutPrefixProvider *dpp, int64_t part_num, rados::cls::fifo::part_header* header,
librados::AioCompletion* c, optional_yield y) {
auto r = lazy_init(dpp, y);
if (r < 0) return r;
fifo->get_part_info(part_num, header, c);
return 0;
}
int get_head_info(const DoutPrefixProvider *dpp, fu2::unique_function<
void(int r, rados::cls::fifo::part_header&&)>&& f,
librados::AioCompletion* c,
optional_yield y) {
auto r = lazy_init(dpp, y);
if (r < 0) return r;
fifo->get_head_info(dpp, std::move(f), c);
return 0;
}
};
| 11,255 | 27.496203 | 111 |
h
|
null |
ceph-main/src/rgw/driver/rados/rgw_metadata.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab ft=cpp
#include "rgw_metadata.h"
#include "rgw_zone.h"
#include "rgw_mdlog.h"
#include "services/svc_zone.h"
#include "services/svc_cls.h"
#define dout_subsys ceph_subsys_rgw
using namespace std;
const std::string RGWMetadataLogHistory::oid = "meta.history";
struct obj_version;
void rgw_shard_name(const string& prefix, unsigned max_shards, const string& key, string& name, int *shard_id)
{
uint32_t val = ceph_str_hash_linux(key.c_str(), key.size());
char buf[16];
if (shard_id) {
*shard_id = val % max_shards;
}
snprintf(buf, sizeof(buf), "%u", (unsigned)(val % max_shards));
name = prefix + buf;
}
void rgw_shard_name(const string& prefix, unsigned max_shards, const string& section, const string& key, string& name)
{
uint32_t val = ceph_str_hash_linux(key.c_str(), key.size());
val ^= ceph_str_hash_linux(section.c_str(), section.size());
char buf[16];
snprintf(buf, sizeof(buf), "%u", (unsigned)(val % max_shards));
name = prefix + buf;
}
void rgw_shard_name(const string& prefix, unsigned shard_id, string& name)
{
char buf[16];
snprintf(buf, sizeof(buf), "%u", shard_id);
name = prefix + buf;
}
int RGWMetadataLog::add_entry(const DoutPrefixProvider *dpp, const string& hash_key, const string& section, const string& key, bufferlist& bl) {
if (!svc.zone->need_to_log_metadata())
return 0;
string oid;
int shard_id;
rgw_shard_name(prefix, cct->_conf->rgw_md_log_max_shards, hash_key, oid, &shard_id);
mark_modified(shard_id);
real_time now = real_clock::now();
return svc.cls->timelog.add(dpp, oid, now, section, key, bl, null_yield);
}
int RGWMetadataLog::get_shard_id(const string& hash_key, int *shard_id)
{
string oid;
rgw_shard_name(prefix, cct->_conf->rgw_md_log_max_shards, hash_key, oid, shard_id);
return 0;
}
int RGWMetadataLog::store_entries_in_shard(const DoutPrefixProvider *dpp, list<cls_log_entry>& entries, int shard_id, librados::AioCompletion *completion)
{
string oid;
mark_modified(shard_id);
rgw_shard_name(prefix, shard_id, oid);
return svc.cls->timelog.add(dpp, oid, entries, completion, false, null_yield);
}
void RGWMetadataLog::init_list_entries(int shard_id, const real_time& from_time, const real_time& end_time,
const string& marker, void **handle)
{
LogListCtx *ctx = new LogListCtx();
ctx->cur_shard = shard_id;
ctx->from_time = from_time;
ctx->end_time = end_time;
ctx->marker = marker;
get_shard_oid(ctx->cur_shard, ctx->cur_oid);
*handle = (void *)ctx;
}
void RGWMetadataLog::complete_list_entries(void *handle) {
LogListCtx *ctx = static_cast<LogListCtx *>(handle);
delete ctx;
}
int RGWMetadataLog::list_entries(const DoutPrefixProvider *dpp, void *handle,
int max_entries,
list<cls_log_entry>& entries,
string *last_marker,
bool *truncated) {
LogListCtx *ctx = static_cast<LogListCtx *>(handle);
if (!max_entries) {
*truncated = false;
return 0;
}
std::string next_marker;
int ret = svc.cls->timelog.list(dpp, ctx->cur_oid, ctx->from_time, ctx->end_time,
max_entries, entries, ctx->marker,
&next_marker, truncated, null_yield);
if ((ret < 0) && (ret != -ENOENT))
return ret;
ctx->marker = std::move(next_marker);
if (last_marker) {
*last_marker = ctx->marker;
}
if (ret == -ENOENT)
*truncated = false;
return 0;
}
int RGWMetadataLog::get_info(const DoutPrefixProvider *dpp, int shard_id, RGWMetadataLogInfo *info)
{
string oid;
get_shard_oid(shard_id, oid);
cls_log_header header;
int ret = svc.cls->timelog.info(dpp, oid, &header, null_yield);
if ((ret < 0) && (ret != -ENOENT))
return ret;
info->marker = header.max_marker;
info->last_update = header.max_time.to_real_time();
return 0;
}
static void _mdlog_info_completion(librados::completion_t cb, void *arg)
{
auto infoc = static_cast<RGWMetadataLogInfoCompletion *>(arg);
infoc->finish(cb);
infoc->put(); // drop the ref from get_info_async()
}
RGWMetadataLogInfoCompletion::RGWMetadataLogInfoCompletion(info_callback_t cb)
: completion(librados::Rados::aio_create_completion((void *)this,
_mdlog_info_completion)),
callback(cb)
{
}
RGWMetadataLogInfoCompletion::~RGWMetadataLogInfoCompletion()
{
completion->release();
}
int RGWMetadataLog::get_info_async(const DoutPrefixProvider *dpp, int shard_id, RGWMetadataLogInfoCompletion *completion)
{
string oid;
get_shard_oid(shard_id, oid);
completion->get(); // hold a ref until the completion fires
return svc.cls->timelog.info_async(dpp, completion->get_io_obj(), oid,
&completion->get_header(),
completion->get_completion());
}
int RGWMetadataLog::trim(const DoutPrefixProvider *dpp, int shard_id, const real_time& from_time, const real_time& end_time,
const string& start_marker, const string& end_marker)
{
string oid;
get_shard_oid(shard_id, oid);
return svc.cls->timelog.trim(dpp, oid, from_time, end_time, start_marker,
end_marker, nullptr, null_yield);
}
int RGWMetadataLog::lock_exclusive(const DoutPrefixProvider *dpp, int shard_id, timespan duration, string& zone_id, string& owner_id) {
string oid;
get_shard_oid(shard_id, oid);
return svc.cls->lock.lock_exclusive(dpp, svc.zone->get_zone_params().log_pool, oid, duration, zone_id, owner_id);
}
int RGWMetadataLog::unlock(const DoutPrefixProvider *dpp, int shard_id, string& zone_id, string& owner_id) {
string oid;
get_shard_oid(shard_id, oid);
return svc.cls->lock.unlock(dpp, svc.zone->get_zone_params().log_pool, oid, zone_id, owner_id);
}
void RGWMetadataLog::mark_modified(int shard_id)
{
lock.get_read();
if (modified_shards.find(shard_id) != modified_shards.end()) {
lock.unlock();
return;
}
lock.unlock();
std::unique_lock wl{lock};
modified_shards.insert(shard_id);
}
void RGWMetadataLog::read_clear_modified(set<int> &modified)
{
std::unique_lock wl{lock};
modified.swap(modified_shards);
modified_shards.clear();
}
void RGWMetadataLogInfo::dump(Formatter *f) const
{
encode_json("marker", marker, f);
utime_t ut(last_update);
encode_json("last_update", ut, f);
}
void RGWMetadataLogInfo::decode_json(JSONObj *obj)
{
JSONDecoder::decode_json("marker", marker, obj);
utime_t ut;
JSONDecoder::decode_json("last_update", ut, obj);
last_update = ut.to_real_time();
}
| 6,677 | 27.538462 | 154 |
cc
|
null |
ceph-main/src/rgw/driver/rados/rgw_metadata.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab ft=cpp
#pragma once
#include <string>
#include <utility>
#include <boost/optional.hpp>
#include "include/types.h"
#include "rgw_common.h"
#include "rgw_period_history.h"
#include "rgw_mdlog_types.h"
#include "cls/version/cls_version_types.h"
#include "cls/log/cls_log_types.h"
#include "common/RefCountedObj.h"
#include "common/ceph_time.h"
#include "services/svc_meta_be.h"
#include "rgw_sal_fwd.h"
class RGWCoroutine;
class JSONObj;
struct RGWObjVersionTracker;
struct obj_version;
class RGWMetadataObject {
protected:
obj_version objv;
ceph::real_time mtime;
std::map<std::string, bufferlist> *pattrs{nullptr};
public:
RGWMetadataObject() {}
RGWMetadataObject(const obj_version& v,
real_time m) : objv(v), mtime(m) {}
virtual ~RGWMetadataObject() {}
obj_version& get_version();
real_time& get_mtime() { return mtime; }
void set_pattrs(std::map<std::string, bufferlist> *_pattrs) {
pattrs = _pattrs;
}
std::map<std::string, bufferlist> *get_pattrs() {
return pattrs;
}
virtual void dump(Formatter *f) const {}
};
class RGWMetadataManager;
class RGWMetadataHandler {
friend class RGWMetadataManager;
protected:
CephContext *cct;
public:
RGWMetadataHandler() {}
virtual ~RGWMetadataHandler();
virtual std::string get_type() = 0;
void base_init(CephContext *_cct) {
cct = _cct;
}
virtual RGWMetadataObject *get_meta_obj(JSONObj *jo, const obj_version& objv, const ceph::real_time& mtime) = 0;
virtual int get(std::string& entry, RGWMetadataObject **obj, optional_yield, const DoutPrefixProvider *dpp) = 0;
virtual int put(std::string& entry,
RGWMetadataObject *obj,
RGWObjVersionTracker& objv_tracker,
optional_yield,
const DoutPrefixProvider *dpp,
RGWMDLogSyncType type,
bool from_remote_zone) = 0;
virtual int remove(std::string& entry, RGWObjVersionTracker& objv_tracker, optional_yield, const DoutPrefixProvider *dpp) = 0;
virtual int mutate(const std::string& entry,
const ceph::real_time& mtime,
RGWObjVersionTracker *objv_tracker,
optional_yield y,
const DoutPrefixProvider *dpp,
RGWMDLogStatus op_type,
std::function<int()> f) = 0;
virtual int list_keys_init(const DoutPrefixProvider *dpp, const std::string& marker, void **phandle) = 0;
virtual int list_keys_next(const DoutPrefixProvider *dpp, void *handle, int max, std::list<std::string>& keys, bool *truncated) = 0;
virtual void list_keys_complete(void *handle) = 0;
virtual std::string get_marker(void *handle) = 0;
virtual int get_shard_id(const std::string& entry, int *shard_id) {
*shard_id = 0;
return 0;
}
virtual int attach(RGWMetadataManager *manager);
};
class RGWMetadataHandler_GenericMetaBE : public RGWMetadataHandler {
friend class RGWSI_MetaBackend;
friend class RGWMetadataManager;
friend class Put;
public:
class Put;
protected:
RGWSI_MetaBackend_Handler *be_handler;
virtual int do_get(RGWSI_MetaBackend_Handler::Op *op, std::string& entry, RGWMetadataObject **obj, optional_yield y, const DoutPrefixProvider *dpp) = 0;
virtual int do_put(RGWSI_MetaBackend_Handler::Op *op, std::string& entry, RGWMetadataObject *obj,
RGWObjVersionTracker& objv_tracker, optional_yield y,
const DoutPrefixProvider *dpp, RGWMDLogSyncType type,
bool from_remote_zone) = 0;
virtual int do_put_operate(Put *put_op, const DoutPrefixProvider *dpp);
virtual int do_remove(RGWSI_MetaBackend_Handler::Op *op, std::string& entry, RGWObjVersionTracker& objv_tracker, optional_yield y, const DoutPrefixProvider *dpp) = 0;
public:
RGWMetadataHandler_GenericMetaBE() {}
void base_init(CephContext *_cct,
RGWSI_MetaBackend_Handler *_be_handler) {
RGWMetadataHandler::base_init(_cct);
be_handler = _be_handler;
}
RGWSI_MetaBackend_Handler *get_be_handler() {
return be_handler;
}
class Put {
protected:
RGWMetadataHandler_GenericMetaBE *handler;
RGWSI_MetaBackend_Handler::Op *op;
std::string& entry;
RGWMetadataObject *obj;
RGWObjVersionTracker& objv_tracker;
RGWMDLogSyncType apply_type;
optional_yield y;
bool from_remote_zone{false};
int get(RGWMetadataObject **obj, const DoutPrefixProvider *dpp) {
return handler->do_get(op, entry, obj, y, dpp);
}
public:
Put(RGWMetadataHandler_GenericMetaBE *_handler, RGWSI_MetaBackend_Handler::Op *_op,
std::string& _entry, RGWMetadataObject *_obj,
RGWObjVersionTracker& _objv_tracker, optional_yield _y,
RGWMDLogSyncType _type, bool from_remote_zone);
virtual ~Put() {}
virtual int put_pre(const DoutPrefixProvider *dpp) {
return 0;
}
virtual int put(const DoutPrefixProvider *dpp) {
return 0;
}
virtual int put_post(const DoutPrefixProvider *dpp) {
return 0;
}
virtual int finalize() {
return 0;
}
};
int get(std::string& entry, RGWMetadataObject **obj, optional_yield, const DoutPrefixProvider *dpp) override;
int put(std::string& entry, RGWMetadataObject *obj, RGWObjVersionTracker& objv_tracker, optional_yield, const DoutPrefixProvider *dpp, RGWMDLogSyncType type, bool from_remote_zone) override;
int remove(std::string& entry, RGWObjVersionTracker& objv_tracker, optional_yield, const DoutPrefixProvider *dpp) override;
int mutate(const std::string& entry,
const ceph::real_time& mtime,
RGWObjVersionTracker *objv_tracker,
optional_yield y,
const DoutPrefixProvider *dpp,
RGWMDLogStatus op_type,
std::function<int()> f) override;
int get_shard_id(const std::string& entry, int *shard_id) override;
int list_keys_init(const DoutPrefixProvider *dpp, const std::string& marker, void **phandle) override;
int list_keys_next(const DoutPrefixProvider *dpp, void *handle, int max, std::list<std::string>& keys, bool *truncated) override;
void list_keys_complete(void *handle) override;
std::string get_marker(void *handle) override;
/**
* Compare an incoming versus on-disk tag/version+mtime combo against
* the sync mode to see if the new one should replace the on-disk one.
*
* @return true if the update should proceed, false otherwise.
*/
static bool check_versions(bool exists,
const obj_version& ondisk, const real_time& ondisk_time,
const obj_version& incoming, const real_time& incoming_time,
RGWMDLogSyncType sync_mode) {
switch (sync_mode) {
case APPLY_UPDATES:
if ((ondisk.tag != incoming.tag) ||
(ondisk.ver >= incoming.ver))
return false;
break;
case APPLY_NEWER:
if (ondisk_time >= incoming_time)
return false;
break;
case APPLY_EXCLUSIVE:
if (exists)
return false;
break;
case APPLY_ALWAYS: //deliberate fall-thru -- we always apply!
default: break;
}
return true;
}
};
class RGWMetadataTopHandler;
class RGWMetadataManager {
friend class RGWMetadataHandler;
CephContext *cct;
RGWSI_Meta *meta_svc;
std::map<std::string, RGWMetadataHandler *> handlers;
std::unique_ptr<RGWMetadataTopHandler> md_top_handler;
int find_handler(const std::string& metadata_key, RGWMetadataHandler **handler, std::string& entry);
int register_handler(RGWMetadataHandler *handler);
public:
RGWMetadataManager(RGWSI_Meta *_meta_svc);
~RGWMetadataManager();
RGWMetadataHandler *get_handler(const std::string& type);
int get(std::string& metadata_key, Formatter *f, optional_yield y, const DoutPrefixProvider *dpp);
int put(std::string& metadata_key, bufferlist& bl, optional_yield y,
const DoutPrefixProvider *dpp,
RGWMDLogSyncType sync_mode,
bool from_remote_zone,
obj_version *existing_version = NULL);
int remove(std::string& metadata_key, optional_yield y, const DoutPrefixProvider *dpp);
int mutate(const std::string& metadata_key,
const ceph::real_time& mtime,
RGWObjVersionTracker *objv_tracker,
optional_yield y,
const DoutPrefixProvider *dpp,
RGWMDLogStatus op_type,
std::function<int()> f);
int list_keys_init(const DoutPrefixProvider *dpp, const std::string& section, void **phandle);
int list_keys_init(const DoutPrefixProvider *dpp, const std::string& section, const std::string& marker, void **phandle);
int list_keys_next(const DoutPrefixProvider *dpp, void *handle, int max, std::list<std::string>& keys, bool *truncated);
void list_keys_complete(void *handle);
std::string get_marker(void *handle);
void dump_log_entry(cls_log_entry& entry, Formatter *f);
void get_sections(std::list<std::string>& sections);
void parse_metadata_key(const std::string& metadata_key, std::string& type, std::string& entry);
int get_shard_id(const std::string& section, const std::string& key, int *shard_id);
};
class RGWMetadataHandlerPut_SObj : public RGWMetadataHandler_GenericMetaBE::Put
{
protected:
std::unique_ptr<RGWMetadataObject> oo;
RGWMetadataObject *old_obj{nullptr};
bool exists{false};
public:
RGWMetadataHandlerPut_SObj(RGWMetadataHandler_GenericMetaBE *handler, RGWSI_MetaBackend_Handler::Op *op,
std::string& entry, RGWMetadataObject *obj, RGWObjVersionTracker& objv_tracker,
optional_yield y,
RGWMDLogSyncType type, bool from_remote_zone);
~RGWMetadataHandlerPut_SObj();
int put_pre(const DoutPrefixProvider *dpp) override;
int put(const DoutPrefixProvider *dpp) override;
virtual int put_check(const DoutPrefixProvider *dpp) {
return 0;
}
virtual int put_checked(const DoutPrefixProvider *dpp);
virtual void encode_obj(bufferlist *bl) {}
};
void rgw_shard_name(const std::string& prefix, unsigned max_shards, const std::string& key, std::string& name, int *shard_id);
void rgw_shard_name(const std::string& prefix, unsigned max_shards, const std::string& section, const std::string& key, std::string& name);
void rgw_shard_name(const std::string& prefix, unsigned shard_id, std::string& name);
| 10,364 | 33.665552 | 192 |
h
|
null |
ceph-main/src/rgw/driver/rados/rgw_notify.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "rgw_notify.h"
#include "cls/2pc_queue/cls_2pc_queue_client.h"
#include "cls/lock/cls_lock_client.h"
#include <memory>
#include <boost/algorithm/hex.hpp>
#include <boost/context/protected_fixedsize_stack.hpp>
#include <spawn/spawn.hpp>
#include "rgw_sal_rados.h"
#include "rgw_pubsub.h"
#include "rgw_pubsub_push.h"
#include "rgw_perf_counters.h"
#include "common/dout.h"
#include <chrono>
#define dout_subsys ceph_subsys_rgw
namespace rgw::notify {
struct event_entry_t {
rgw_pubsub_s3_event event;
std::string push_endpoint;
std::string push_endpoint_args;
std::string arn_topic;
void encode(bufferlist& bl) const {
ENCODE_START(1, 1, bl);
encode(event, bl);
encode(push_endpoint, bl);
encode(push_endpoint_args, bl);
encode(arn_topic, bl);
ENCODE_FINISH(bl);
}
void decode(bufferlist::const_iterator& bl) {
DECODE_START(1, bl);
decode(event, bl);
decode(push_endpoint, bl);
decode(push_endpoint_args, bl);
decode(arn_topic, bl);
DECODE_FINISH(bl);
}
};
WRITE_CLASS_ENCODER(event_entry_t)
using queues_t = std::set<std::string>;
// use mmap/mprotect to allocate 128k coroutine stacks
auto make_stack_allocator() {
return boost::context::protected_fixedsize_stack{128*1024};
}
const std::string Q_LIST_OBJECT_NAME = "queues_list_object";
class Manager : public DoutPrefixProvider {
const size_t max_queue_size;
const uint32_t queues_update_period_ms;
const uint32_t queues_update_retry_ms;
const uint32_t queue_idle_sleep_us;
const utime_t failover_time;
CephContext* const cct;
static constexpr auto COOKIE_LEN = 16;
const std::string lock_cookie;
boost::asio::io_context io_context;
boost::asio::executor_work_guard<boost::asio::io_context::executor_type> work_guard;
const uint32_t worker_count;
std::vector<std::thread> workers;
const uint32_t stale_reservations_period_s;
const uint32_t reservations_cleanup_period_s;
public:
librados::IoCtx& rados_ioctx;
private:
CephContext *get_cct() const override { return cct; }
unsigned get_subsys() const override { return dout_subsys; }
std::ostream& gen_prefix(std::ostream& out) const override { return out << "rgw notify: "; }
// read the list of queues from the queue list object
int read_queue_list(queues_t& queues, optional_yield y) {
constexpr auto max_chunk = 1024U;
std::string start_after;
bool more = true;
int rval;
while (more) {
librados::ObjectReadOperation op;
queues_t queues_chunk;
op.omap_get_keys2(start_after, max_chunk, &queues_chunk, &more, &rval);
const auto ret = rgw_rados_operate(this, rados_ioctx, Q_LIST_OBJECT_NAME, &op, nullptr, y);
if (ret == -ENOENT) {
// queue list object was not created - nothing to do
return 0;
}
if (ret < 0) {
// TODO: do we need to check on rval as well as ret?
ldpp_dout(this, 1) << "ERROR: failed to read queue list. error: " << ret << dendl;
return ret;
}
queues.merge(queues_chunk);
}
return 0;
}
// set m1 to be the minimum between m1 and m2
static int set_min_marker(std::string& m1, const std::string m2) {
cls_queue_marker mr1;
cls_queue_marker mr2;
if (mr1.from_str(m1.c_str()) < 0 || mr2.from_str(m2.c_str()) < 0) {
return -EINVAL;
}
if (mr2.gen <= mr1.gen && mr2.offset < mr1.offset) {
m1 = m2;
}
return 0;
}
using Clock = ceph::coarse_mono_clock;
using Executor = boost::asio::io_context::executor_type;
using Timer = boost::asio::basic_waitable_timer<Clock,
boost::asio::wait_traits<Clock>, Executor>;
class tokens_waiter {
const std::chrono::hours infinite_duration;
size_t pending_tokens;
Timer timer;
struct token {
tokens_waiter& waiter;
token(tokens_waiter& _waiter) : waiter(_waiter) {
++waiter.pending_tokens;
}
~token() {
--waiter.pending_tokens;
if (waiter.pending_tokens == 0) {
waiter.timer.cancel();
}
}
};
public:
tokens_waiter(boost::asio::io_context& io_context) :
infinite_duration(1000),
pending_tokens(0),
timer(io_context) {}
void async_wait(yield_context yield) {
if (pending_tokens == 0) {
return;
}
timer.expires_from_now(infinite_duration);
boost::system::error_code ec;
timer.async_wait(yield[ec]);
ceph_assert(ec == boost::system::errc::operation_canceled);
}
token make_token() {
return token(*this);
}
};
// processing of a specific entry
// return whether processing was successfull (true) or not (false)
bool process_entry(const cls_queue_entry& entry, yield_context yield) {
event_entry_t event_entry;
auto iter = entry.data.cbegin();
try {
decode(event_entry, iter);
} catch (buffer::error& err) {
ldpp_dout(this, 5) << "WARNING: failed to decode entry. error: " << err.what() << dendl;
return false;
}
try {
// TODO move endpoint creation to queue level
const auto push_endpoint = RGWPubSubEndpoint::create(event_entry.push_endpoint, event_entry.arn_topic,
RGWHTTPArgs(event_entry.push_endpoint_args, this),
cct);
ldpp_dout(this, 20) << "INFO: push endpoint created: " << event_entry.push_endpoint <<
" for entry: " << entry.marker << dendl;
const auto ret = push_endpoint->send_to_completion_async(cct, event_entry.event, optional_yield(io_context, yield));
if (ret < 0) {
ldpp_dout(this, 5) << "WARNING: push entry: " << entry.marker << " to endpoint: " << event_entry.push_endpoint
<< " failed. error: " << ret << " (will retry)" << dendl;
return false;
} else {
ldpp_dout(this, 20) << "INFO: push entry: " << entry.marker << " to endpoint: " << event_entry.push_endpoint
<< " ok" << dendl;
if (perfcounter) perfcounter->inc(l_rgw_pubsub_push_ok);
return true;
}
} catch (const RGWPubSubEndpoint::configuration_error& e) {
ldpp_dout(this, 5) << "WARNING: failed to create push endpoint: "
<< event_entry.push_endpoint << " for entry: " << entry.marker << ". error: " << e.what() << " (will retry) " << dendl;
return false;
}
}
// clean stale reservation from queue
void cleanup_queue(const std::string& queue_name, yield_context yield) {
while (true) {
ldpp_dout(this, 20) << "INFO: trying to perform stale reservation cleanup for queue: " << queue_name << dendl;
const auto now = ceph::coarse_real_time::clock::now();
const auto stale_time = now - std::chrono::seconds(stale_reservations_period_s);
librados::ObjectWriteOperation op;
op.assert_exists();
rados::cls::lock::assert_locked(&op, queue_name+"_lock",
ClsLockType::EXCLUSIVE,
lock_cookie,
"" /*no tag*/);
cls_2pc_queue_expire_reservations(op, stale_time);
// check ownership and do reservation cleanup in one batch
auto ret = rgw_rados_operate(this, rados_ioctx, queue_name, &op, optional_yield(io_context, yield));
if (ret == -ENOENT) {
// queue was deleted
ldpp_dout(this, 5) << "INFO: queue: "
<< queue_name << ". was removed. cleanup will stop" << dendl;
return;
}
if (ret == -EBUSY) {
ldpp_dout(this, 5) << "WARNING: queue: " << queue_name << " ownership moved to another daemon. processing will stop" << dendl;
return;
}
if (ret < 0) {
ldpp_dout(this, 5) << "WARNING: failed to cleanup stale reservation from queue and/or lock queue: " << queue_name
<< ". error: " << ret << dendl;
}
Timer timer(io_context);
timer.expires_from_now(std::chrono::seconds(reservations_cleanup_period_s));
boost::system::error_code ec;
timer.async_wait(yield[ec]);
}
}
// processing of a specific queue
void process_queue(const std::string& queue_name, yield_context yield) {
constexpr auto max_elements = 1024;
auto is_idle = false;
const std::string start_marker;
// start a the cleanup coroutine for the queue
spawn::spawn(io_context, [this, queue_name](yield_context yield) {
cleanup_queue(queue_name, yield);
}, make_stack_allocator());
while (true) {
// if queue was empty the last time, sleep for idle timeout
if (is_idle) {
Timer timer(io_context);
timer.expires_from_now(std::chrono::microseconds(queue_idle_sleep_us));
boost::system::error_code ec;
timer.async_wait(yield[ec]);
}
// get list of entries in the queue
is_idle = true;
bool truncated = false;
std::string end_marker;
std::vector<cls_queue_entry> entries;
auto total_entries = 0U;
{
librados::ObjectReadOperation op;
op.assert_exists();
bufferlist obl;
int rval;
rados::cls::lock::assert_locked(&op, queue_name+"_lock",
ClsLockType::EXCLUSIVE,
lock_cookie,
"" /*no tag*/);
cls_2pc_queue_list_entries(op, start_marker, max_elements, &obl, &rval);
// check ownership and list entries in one batch
auto ret = rgw_rados_operate(this, rados_ioctx, queue_name, &op, nullptr, optional_yield(io_context, yield));
if (ret == -ENOENT) {
// queue was deleted
ldpp_dout(this, 5) << "INFO: queue: "
<< queue_name << ". was removed. processing will stop" << dendl;
return;
}
if (ret == -EBUSY) {
ldpp_dout(this, 5) << "WARNING: queue: " << queue_name << " ownership moved to another daemon. processing will stop" << dendl;
return;
}
if (ret < 0) {
ldpp_dout(this, 5) << "WARNING: failed to get list of entries in queue and/or lock queue: "
<< queue_name << ". error: " << ret << " (will retry)" << dendl;
continue;
}
ret = cls_2pc_queue_list_entries_result(obl, entries, &truncated, end_marker);
if (ret < 0) {
ldpp_dout(this, 5) << "WARNING: failed to parse list of entries in queue: "
<< queue_name << ". error: " << ret << " (will retry)" << dendl;
continue;
}
}
total_entries = entries.size();
if (total_entries == 0) {
// nothing in the queue
continue;
}
// log when queue is not idle
ldpp_dout(this, 20) << "INFO: found: " << total_entries << " entries in: " << queue_name <<
". end marker is: " << end_marker << dendl;
is_idle = false;
auto has_error = false;
auto remove_entries = false;
auto entry_idx = 1U;
tokens_waiter waiter(io_context);
for (auto& entry : entries) {
if (has_error) {
// bail out on first error
break;
}
// TODO pass entry pointer instead of by-value
spawn::spawn(yield, [this, &queue_name, entry_idx, total_entries, &end_marker, &remove_entries, &has_error, &waiter, entry](yield_context yield) {
const auto token = waiter.make_token();
if (process_entry(entry, yield)) {
ldpp_dout(this, 20) << "INFO: processing of entry: " <<
entry.marker << " (" << entry_idx << "/" << total_entries << ") from: " << queue_name << " ok" << dendl;
remove_entries = true;
} else {
if (set_min_marker(end_marker, entry.marker) < 0) {
ldpp_dout(this, 1) << "ERROR: cannot determin minimum between malformed markers: " << end_marker << ", " << entry.marker << dendl;
} else {
ldpp_dout(this, 20) << "INFO: new end marker for removal: " << end_marker << " from: " << queue_name << dendl;
}
has_error = true;
ldpp_dout(this, 20) << "INFO: processing of entry: " <<
entry.marker << " (" << entry_idx << "/" << total_entries << ") from: " << queue_name << " failed" << dendl;
}
}, make_stack_allocator());
++entry_idx;
}
// wait for all pending work to finish
waiter.async_wait(yield);
// delete all published entries from queue
if (remove_entries) {
librados::ObjectWriteOperation op;
op.assert_exists();
rados::cls::lock::assert_locked(&op, queue_name+"_lock",
ClsLockType::EXCLUSIVE,
lock_cookie,
"" /*no tag*/);
cls_2pc_queue_remove_entries(op, end_marker);
// check ownership and deleted entries in one batch
const auto ret = rgw_rados_operate(this, rados_ioctx, queue_name, &op, optional_yield(io_context, yield));
if (ret == -ENOENT) {
// queue was deleted
ldpp_dout(this, 5) << "INFO: queue: "
<< queue_name << ". was removed. processing will stop" << dendl;
return;
}
if (ret == -EBUSY) {
ldpp_dout(this, 5) << "WARNING: queue: " << queue_name << " ownership moved to another daemon. processing will stop" << dendl;
return;
}
if (ret < 0) {
ldpp_dout(this, 1) << "ERROR: failed to remove entries and/or lock queue up to: " << end_marker << " from queue: "
<< queue_name << ". error: " << ret << dendl;
} else {
ldpp_dout(this, 20) << "INFO: removed entries up to: " << end_marker << " from queue: "
<< queue_name << dendl;
}
}
}
}
// lits of owned queues
using owned_queues_t = std::unordered_set<std::string>;
// process all queues
// find which of the queues is owned by this daemon and process it
void process_queues(yield_context yield) {
auto has_error = false;
owned_queues_t owned_queues;
// add randomness to the duration between queue checking
// to make sure that different daemons are not synced
std::random_device seed;
std::mt19937 rnd_gen(seed());
const auto min_jitter = 100; // ms
const auto max_jitter = 500; // ms
std::uniform_int_distribution<> duration_jitter(min_jitter, max_jitter);
std::vector<std::string> queue_gc;
std::mutex queue_gc_lock;
while (true) {
Timer timer(io_context);
const auto duration = (has_error ?
std::chrono::milliseconds(queues_update_retry_ms) : std::chrono::milliseconds(queues_update_period_ms)) +
std::chrono::milliseconds(duration_jitter(rnd_gen));
timer.expires_from_now(duration);
const auto tp = ceph::coarse_real_time::clock::to_time_t(ceph::coarse_real_time::clock::now() + duration);
ldpp_dout(this, 20) << "INFO: next queues processing will happen at: " << std::ctime(&tp) << dendl;
boost::system::error_code ec;
timer.async_wait(yield[ec]);
queues_t queues;
auto ret = read_queue_list(queues, optional_yield(io_context, yield));
if (ret < 0) {
has_error = true;
continue;
}
for (const auto& queue_name : queues) {
// try to lock the queue to check if it is owned by this rgw
// or if ownershif needs to be taken
librados::ObjectWriteOperation op;
op.assert_exists();
rados::cls::lock::lock(&op, queue_name+"_lock",
ClsLockType::EXCLUSIVE,
lock_cookie,
"" /*no tag*/,
"" /*no description*/,
failover_time,
LOCK_FLAG_MAY_RENEW);
ret = rgw_rados_operate(this, rados_ioctx, queue_name, &op, optional_yield(io_context, yield));
if (ret == -EBUSY) {
// lock is already taken by another RGW
ldpp_dout(this, 20) << "INFO: queue: " << queue_name << " owned (locked) by another daemon" << dendl;
// if queue was owned by this RGW, processing should be stopped, queue would be deleted from list afterwards
continue;
}
if (ret == -ENOENT) {
// queue is deleted - processing will stop the next time we try to read from the queue
ldpp_dout(this, 10) << "INFO: queue: " << queue_name << " should not be locked - already deleted" << dendl;
continue;
}
if (ret < 0) {
// failed to lock for another reason, continue to process other queues
ldpp_dout(this, 1) << "ERROR: failed to lock queue: " << queue_name << ". error: " << ret << dendl;
has_error = true;
continue;
}
// add queue to list of owned queues
if (owned_queues.insert(queue_name).second) {
ldpp_dout(this, 10) << "INFO: queue: " << queue_name << " now owned (locked) by this daemon" << dendl;
// start processing this queue
spawn::spawn(io_context, [this, &queue_gc, &queue_gc_lock, queue_name](yield_context yield) {
process_queue(queue_name, yield);
// if queue processing ended, it measn that the queue was removed or not owned anymore
// mark it for deletion
std::lock_guard lock_guard(queue_gc_lock);
queue_gc.push_back(queue_name);
ldpp_dout(this, 10) << "INFO: queue: " << queue_name << " marked for removal" << dendl;
}, make_stack_allocator());
} else {
ldpp_dout(this, 20) << "INFO: queue: " << queue_name << " ownership (lock) renewed" << dendl;
}
}
// erase all queue that were deleted
{
std::lock_guard lock_guard(queue_gc_lock);
std::for_each(queue_gc.begin(), queue_gc.end(), [this, &owned_queues](const std::string& queue_name) {
owned_queues.erase(queue_name);
ldpp_dout(this, 20) << "INFO: queue: " << queue_name << " removed" << dendl;
});
queue_gc.clear();
}
}
}
public:
~Manager() {
work_guard.reset();
io_context.stop();
std::for_each(workers.begin(), workers.end(), [] (auto& worker) { worker.join(); });
}
// ctor: start all threads
Manager(CephContext* _cct, uint32_t _max_queue_size, uint32_t _queues_update_period_ms,
uint32_t _queues_update_retry_ms, uint32_t _queue_idle_sleep_us, u_int32_t failover_time_ms,
uint32_t _stale_reservations_period_s, uint32_t _reservations_cleanup_period_s,
uint32_t _worker_count, rgw::sal::RadosStore* store) :
max_queue_size(_max_queue_size),
queues_update_period_ms(_queues_update_period_ms),
queues_update_retry_ms(_queues_update_retry_ms),
queue_idle_sleep_us(_queue_idle_sleep_us),
failover_time(std::chrono::milliseconds(failover_time_ms)),
cct(_cct),
lock_cookie(gen_rand_alphanumeric(cct, COOKIE_LEN)),
work_guard(boost::asio::make_work_guard(io_context)),
worker_count(_worker_count),
stale_reservations_period_s(_stale_reservations_period_s),
reservations_cleanup_period_s(_reservations_cleanup_period_s),
rados_ioctx(store->getRados()->get_notif_pool_ctx())
{
spawn::spawn(io_context, [this] (yield_context yield) {
process_queues(yield);
}, make_stack_allocator());
// start the worker threads to do the actual queue processing
const std::string WORKER_THREAD_NAME = "notif-worker";
for (auto worker_id = 0U; worker_id < worker_count; ++worker_id) {
workers.emplace_back([this]() {
try {
io_context.run();
} catch (const std::exception& err) {
ldpp_dout(this, 10) << "Notification worker failed with error: " << err.what() << dendl;
throw(err);
}
});
const auto rc = ceph_pthread_setname(workers.back().native_handle(),
(WORKER_THREAD_NAME+std::to_string(worker_id)).c_str());
ceph_assert(rc == 0);
}
ldpp_dout(this, 10) << "Started notification manager with: " << worker_count << " workers" << dendl;
}
int add_persistent_topic(const std::string& topic_name, optional_yield y) {
if (topic_name == Q_LIST_OBJECT_NAME) {
ldpp_dout(this, 1) << "ERROR: topic name cannot be: " << Q_LIST_OBJECT_NAME << " (conflict with queue list object name)" << dendl;
return -EINVAL;
}
librados::ObjectWriteOperation op;
op.create(true);
cls_2pc_queue_init(op, topic_name, max_queue_size);
auto ret = rgw_rados_operate(this, rados_ioctx, topic_name, &op, y);
if (ret == -EEXIST) {
// queue already exists - nothing to do
ldpp_dout(this, 20) << "INFO: queue for topic: " << topic_name << " already exists. nothing to do" << dendl;
return 0;
}
if (ret < 0) {
// failed to create queue
ldpp_dout(this, 1) << "ERROR: failed to create queue for topic: " << topic_name << ". error: " << ret << dendl;
return ret;
}
bufferlist empty_bl;
std::map<std::string, bufferlist> new_topic{{topic_name, empty_bl}};
op.omap_set(new_topic);
ret = rgw_rados_operate(this, rados_ioctx, Q_LIST_OBJECT_NAME, &op, y);
if (ret < 0) {
ldpp_dout(this, 1) << "ERROR: failed to add queue: " << topic_name << " to queue list. error: " << ret << dendl;
return ret;
}
ldpp_dout(this, 20) << "INFO: queue: " << topic_name << " added to queue list" << dendl;
return 0;
}
};
// singleton manager
// note that the manager itself is not a singleton, and multiple instances may co-exist
// TODO make the pointer atomic in allocation and deallocation to avoid race conditions
static Manager* s_manager = nullptr;
constexpr size_t MAX_QUEUE_SIZE = 128*1000*1000; // 128MB
constexpr uint32_t Q_LIST_UPDATE_MSEC = 1000*30; // check queue list every 30seconds
constexpr uint32_t Q_LIST_RETRY_MSEC = 1000; // retry every second if queue list update failed
constexpr uint32_t IDLE_TIMEOUT_USEC = 100*1000; // idle sleep 100ms
constexpr uint32_t FAILOVER_TIME_MSEC = 3*Q_LIST_UPDATE_MSEC; // FAILOVER TIME 3x renew time
constexpr uint32_t WORKER_COUNT = 1; // 1 worker thread
constexpr uint32_t STALE_RESERVATIONS_PERIOD_S = 120; // cleanup reservations that are more than 2 minutes old
constexpr uint32_t RESERVATIONS_CLEANUP_PERIOD_S = 30; // reservation cleanup every 30 seconds
bool init(CephContext* cct, rgw::sal::RadosStore* store, const DoutPrefixProvider *dpp) {
if (s_manager) {
return false;
}
// TODO: take conf from CephContext
s_manager = new Manager(cct, MAX_QUEUE_SIZE,
Q_LIST_UPDATE_MSEC, Q_LIST_RETRY_MSEC,
IDLE_TIMEOUT_USEC, FAILOVER_TIME_MSEC,
STALE_RESERVATIONS_PERIOD_S, RESERVATIONS_CLEANUP_PERIOD_S,
WORKER_COUNT,
store);
return true;
}
void shutdown() {
delete s_manager;
s_manager = nullptr;
}
int add_persistent_topic(const std::string& topic_name, optional_yield y) {
if (!s_manager) {
return -EAGAIN;
}
return s_manager->add_persistent_topic(topic_name, y);
}
int remove_persistent_topic(const DoutPrefixProvider* dpp, librados::IoCtx& rados_ioctx, const std::string& topic_name, optional_yield y) {
librados::ObjectWriteOperation op;
op.remove();
auto ret = rgw_rados_operate(dpp, rados_ioctx, topic_name, &op, y);
if (ret == -ENOENT) {
// queue already removed - nothing to do
ldpp_dout(dpp, 20) << "INFO: queue for topic: " << topic_name << " already removed. nothing to do" << dendl;
return 0;
}
if (ret < 0) {
// failed to remove queue
ldpp_dout(dpp, 1) << "ERROR: failed to remove queue for topic: " << topic_name << ". error: " << ret << dendl;
return ret;
}
std::set<std::string> topic_to_remove{{topic_name}};
op.omap_rm_keys(topic_to_remove);
ret = rgw_rados_operate(dpp, rados_ioctx, Q_LIST_OBJECT_NAME, &op, y);
if (ret < 0) {
ldpp_dout(dpp, 1) << "ERROR: failed to remove queue: " << topic_name << " from queue list. error: " << ret << dendl;
return ret;
}
ldpp_dout(dpp, 20) << "INFO: queue: " << topic_name << " removed from queue list" << dendl;
return 0;
}
int remove_persistent_topic(const std::string& topic_name, optional_yield y) {
if (!s_manager) {
return -EAGAIN;
}
return remove_persistent_topic(s_manager, s_manager->rados_ioctx, topic_name, y);
}
rgw::sal::Object* get_object_with_atttributes(
const reservation_t& res, rgw::sal::Object* obj) {
// in case of copy obj, the tags and metadata are taken from source
const auto src_obj = res.src_object ? res.src_object : obj;
if (src_obj->get_attrs().empty()) {
if (!src_obj->get_bucket()) {
src_obj->set_bucket(res.bucket);
}
const auto ret = src_obj->get_obj_attrs(res.yield, res.dpp);
if (ret < 0) {
ldpp_dout(res.dpp, 20) << "failed to get attributes from object: " <<
src_obj->get_key() << ". ret = " << ret << dendl;
return nullptr;
}
}
return src_obj;
}
static inline void filter_amz_meta(meta_map_t& dest, const meta_map_t& src) {
std::copy_if(src.cbegin(), src.cend(),
std::inserter(dest, dest.end()),
[](const auto& m) {
return (boost::algorithm::starts_with(m.first, RGW_AMZ_META_PREFIX));
});
}
static inline void metadata_from_attributes(
reservation_t& res, rgw::sal::Object* obj) {
auto& metadata = res.x_meta_map;
const auto src_obj = get_object_with_atttributes(res, obj);
if (!src_obj) {
return;
}
res.metadata_fetched_from_attributes = true;
for (auto& attr : src_obj->get_attrs()) {
if (boost::algorithm::starts_with(attr.first, RGW_ATTR_META_PREFIX)) {
std::string_view key(attr.first);
key.remove_prefix(sizeof(RGW_ATTR_PREFIX)-1);
// we want to pass a null terminated version
// of the bufferlist, hence "to_str().c_str()"
metadata.emplace(key, attr.second.to_str().c_str());
}
}
}
static inline void tags_from_attributes(
const reservation_t& res, rgw::sal::Object* obj, KeyMultiValueMap& tags) {
const auto src_obj = get_object_with_atttributes(res, obj);
if (!src_obj) {
return;
}
const auto& attrs = src_obj->get_attrs();
const auto attr_iter = attrs.find(RGW_ATTR_TAGS);
if (attr_iter != attrs.end()) {
auto bliter = attr_iter->second.cbegin();
RGWObjTags obj_tags;
try {
::decode(obj_tags, bliter);
} catch(buffer::error&) {
// not able to decode tags
return;
}
tags = std::move(obj_tags.get_tags());
}
}
// populate event from request
static inline void populate_event(reservation_t& res,
rgw::sal::Object* obj,
uint64_t size,
const ceph::real_time& mtime,
const std::string& etag,
const std::string& version,
EventType event_type,
rgw_pubsub_s3_event& event) {
event.eventTime = mtime;
event.eventName = to_event_string(event_type);
event.userIdentity = res.user_id; // user that triggered the change
event.x_amz_request_id = res.req_id; // request ID of the original change
event.x_amz_id_2 = res.store->getRados()->host_id; // RGW on which the change was made
// configurationId is filled from notification configuration
event.bucket_name = res.bucket->get_name();
event.bucket_ownerIdentity = res.bucket->get_owner() ?
res.bucket->get_owner()->get_id().id : res.bucket->get_info().owner.id;
const auto region = res.store->get_zone()->get_zonegroup().get_api_name();
rgw::ARN bucket_arn(res.bucket->get_key());
bucket_arn.region = region;
event.bucket_arn = to_string(bucket_arn);
event.object_key = res.object_name ? *res.object_name : obj->get_name();
event.object_size = size;
event.object_etag = etag;
event.object_versionId = version;
event.awsRegion = region;
// use timestamp as per key sequence id (hex encoded)
const utime_t ts(real_clock::now());
boost::algorithm::hex((const char*)&ts, (const char*)&ts + sizeof(utime_t),
std::back_inserter(event.object_sequencer));
set_event_id(event.id, etag, ts);
event.bucket_id = res.bucket->get_bucket_id();
// pass meta data
if (!res.metadata_fetched_from_attributes) {
// either no metadata exist or no metadata filter was used
metadata_from_attributes(res, obj);
}
event.x_meta_map = res.x_meta_map;
// pass tags
if (!res.tagset ||
(*res.tagset).get_tags().empty()) {
// try to fetch the tags from the attributes
tags_from_attributes(res, obj, event.tags);
} else {
event.tags = (*res.tagset).get_tags();
}
// opaque data will be filled from topic configuration
}
static inline bool notification_match(reservation_t& res,
const rgw_pubsub_topic_filter& filter,
EventType event,
const RGWObjTags* req_tags) {
if (!match(filter.events, event)) {
return false;
}
const auto obj = res.object;
if (!match(filter.s3_filter.key_filter,
res.object_name ? *res.object_name : obj->get_name())) {
return false;
}
if (!filter.s3_filter.metadata_filter.kv.empty()) {
// metadata filter exists
if (res.s) {
filter_amz_meta(res.x_meta_map, res.s->info.x_meta_map);
}
metadata_from_attributes(res, obj);
if (!match(filter.s3_filter.metadata_filter, res.x_meta_map)) {
return false;
}
}
if (!filter.s3_filter.tag_filter.kv.empty()) {
// tag filter exists
if (req_tags) {
// tags in the request
if (!match(filter.s3_filter.tag_filter, req_tags->get_tags())) {
return false;
}
} else if (res.tagset && !(*res.tagset).get_tags().empty()) {
// tags were cached in req_state
if (!match(filter.s3_filter.tag_filter, (*res.tagset).get_tags())) {
return false;
}
} else {
// try to fetch tags from the attributes
KeyMultiValueMap tags;
tags_from_attributes(res, obj, tags);
if (!match(filter.s3_filter.tag_filter, tags)) {
return false;
}
}
}
return true;
}
int publish_reserve(const DoutPrefixProvider* dpp,
EventType event_type,
reservation_t& res,
const RGWObjTags* req_tags)
{
const RGWPubSub ps(res.store, res.user_tenant);
const RGWPubSub::Bucket ps_bucket(ps, res.bucket);
rgw_pubsub_bucket_topics bucket_topics;
auto rc = ps_bucket.get_topics(res.dpp, bucket_topics, res.yield);
if (rc < 0) {
// failed to fetch bucket topics
return rc;
}
for (const auto& bucket_topic : bucket_topics.topics) {
const rgw_pubsub_topic_filter& topic_filter = bucket_topic.second;
const rgw_pubsub_topic& topic_cfg = topic_filter.topic;
if (!notification_match(res, topic_filter, event_type, req_tags)) {
// notification does not apply to req_state
continue;
}
ldpp_dout(res.dpp, 20) << "INFO: notification: '" << topic_filter.s3_id <<
"' on topic: '" << topic_cfg.dest.arn_topic <<
"' and bucket: '" << res.bucket->get_name() <<
"' (unique topic: '" << topic_cfg.name <<
"') apply to event of type: '" << to_string(event_type) << "'" << dendl;
cls_2pc_reservation::id_t res_id;
if (topic_cfg.dest.persistent) {
// TODO: take default reservation size from conf
constexpr auto DEFAULT_RESERVATION = 4*1024U; // 4K
res.size = DEFAULT_RESERVATION;
librados::ObjectWriteOperation op;
bufferlist obl;
int rval;
const auto& queue_name = topic_cfg.dest.arn_topic;
cls_2pc_queue_reserve(op, res.size, 1, &obl, &rval);
auto ret = rgw_rados_operate(
res.dpp, res.store->getRados()->get_notif_pool_ctx(),
queue_name, &op, res.yield, librados::OPERATION_RETURNVEC);
if (ret < 0) {
ldpp_dout(res.dpp, 1) <<
"ERROR: failed to reserve notification on queue: "
<< queue_name << ". error: " << ret << dendl;
// if no space is left in queue we ask client to slow down
return (ret == -ENOSPC) ? -ERR_RATE_LIMITED : ret;
}
ret = cls_2pc_queue_reserve_result(obl, res_id);
if (ret < 0) {
ldpp_dout(res.dpp, 1) << "ERROR: failed to parse reservation id. error: " << ret << dendl;
return ret;
}
}
res.topics.emplace_back(topic_filter.s3_id, topic_cfg, res_id);
}
return 0;
}
int publish_commit(rgw::sal::Object* obj,
uint64_t size,
const ceph::real_time& mtime,
const std::string& etag,
const std::string& version,
EventType event_type,
reservation_t& res,
const DoutPrefixProvider* dpp)
{
for (auto& topic : res.topics) {
if (topic.cfg.dest.persistent &&
topic.res_id == cls_2pc_reservation::NO_ID) {
// nothing to commit or already committed/aborted
continue;
}
event_entry_t event_entry;
populate_event(res, obj, size, mtime, etag, version, event_type, event_entry.event);
event_entry.event.configurationId = topic.configurationId;
event_entry.event.opaque_data = topic.cfg.opaque_data;
if (topic.cfg.dest.persistent) {
event_entry.push_endpoint = std::move(topic.cfg.dest.push_endpoint);
event_entry.push_endpoint_args =
std::move(topic.cfg.dest.push_endpoint_args);
event_entry.arn_topic = topic.cfg.dest.arn_topic;
bufferlist bl;
encode(event_entry, bl);
const auto& queue_name = topic.cfg.dest.arn_topic;
if (bl.length() > res.size) {
// try to make a larger reservation, fail only if this is not possible
ldpp_dout(dpp, 5) << "WARNING: committed size: " << bl.length()
<< " exceeded reserved size: " << res.size
<<
" . trying to make a larger reservation on queue:" << queue_name
<< dendl;
// first cancel the existing reservation
librados::ObjectWriteOperation op;
cls_2pc_queue_abort(op, topic.res_id);
auto ret = rgw_rados_operate(
dpp, res.store->getRados()->get_notif_pool_ctx(),
topic.cfg.dest.arn_topic, &op,
res.yield);
if (ret < 0) {
ldpp_dout(dpp, 1) << "ERROR: failed to abort reservation: "
<< topic.res_id <<
" when trying to make a larger reservation on queue: " << queue_name
<< ". error: " << ret << dendl;
return ret;
}
// now try to make a bigger one
buffer::list obl;
int rval;
cls_2pc_queue_reserve(op, bl.length(), 1, &obl, &rval);
ret = rgw_rados_operate(
dpp, res.store->getRados()->get_notif_pool_ctx(),
queue_name, &op, res.yield, librados::OPERATION_RETURNVEC);
if (ret < 0) {
ldpp_dout(dpp, 1) << "ERROR: failed to reserve extra space on queue: "
<< queue_name
<< ". error: " << ret << dendl;
return (ret == -ENOSPC) ? -ERR_RATE_LIMITED : ret;
}
ret = cls_2pc_queue_reserve_result(obl, topic.res_id);
if (ret < 0) {
ldpp_dout(dpp, 1) << "ERROR: failed to parse reservation id for "
"extra space. error: " << ret << dendl;
return ret;
}
}
std::vector<buffer::list> bl_data_vec{std::move(bl)};
librados::ObjectWriteOperation op;
cls_2pc_queue_commit(op, bl_data_vec, topic.res_id);
const auto ret = rgw_rados_operate(
dpp, res.store->getRados()->get_notif_pool_ctx(),
queue_name, &op, res.yield);
topic.res_id = cls_2pc_reservation::NO_ID;
if (ret < 0) {
ldpp_dout(dpp, 1) << "ERROR: failed to commit reservation to queue: "
<< queue_name << ". error: " << ret
<< dendl;
return ret;
}
} else {
try {
// TODO add endpoint LRU cache
const auto push_endpoint = RGWPubSubEndpoint::create(
topic.cfg.dest.push_endpoint,
topic.cfg.dest.arn_topic,
RGWHTTPArgs(topic.cfg.dest.push_endpoint_args, dpp),
dpp->get_cct());
ldpp_dout(res.dpp, 20) << "INFO: push endpoint created: "
<< topic.cfg.dest.push_endpoint << dendl;
const auto ret = push_endpoint->send_to_completion_async(
dpp->get_cct(), event_entry.event, res.yield);
if (ret < 0) {
ldpp_dout(dpp, 1) << "ERROR: push to endpoint "
<< topic.cfg.dest.push_endpoint
<< " failed. error: " << ret << dendl;
if (perfcounter) perfcounter->inc(l_rgw_pubsub_push_failed);
return ret;
}
if (perfcounter) perfcounter->inc(l_rgw_pubsub_push_ok);
} catch (const RGWPubSubEndpoint::configuration_error& e) {
ldpp_dout(dpp, 1) << "ERROR: failed to create push endpoint: "
<< topic.cfg.dest.push_endpoint << ". error: " << e.what() << dendl;
if (perfcounter) perfcounter->inc(l_rgw_pubsub_push_failed);
return -EINVAL;
}
}
}
return 0;
}
int publish_abort(reservation_t& res) {
for (auto& topic : res.topics) {
if (!topic.cfg.dest.persistent ||
topic.res_id == cls_2pc_reservation::NO_ID) {
// nothing to abort or already committed/aborted
continue;
}
const auto& queue_name = topic.cfg.dest.arn_topic;
librados::ObjectWriteOperation op;
cls_2pc_queue_abort(op, topic.res_id);
const auto ret = rgw_rados_operate(
res.dpp, res.store->getRados()->get_notif_pool_ctx(),
queue_name, &op, res.yield);
if (ret < 0) {
ldpp_dout(res.dpp, 1) << "ERROR: failed to abort reservation: "
<< topic.res_id <<
" from queue: " << queue_name << ". error: " << ret << dendl;
return ret;
}
topic.res_id = cls_2pc_reservation::NO_ID;
}
return 0;
}
int get_persistent_queue_stats_by_topic_name(const DoutPrefixProvider *dpp, librados::IoCtx &rados_ioctx,
const std::string &topic_name, rgw_topic_stats &stats, optional_yield y)
{
cls_2pc_reservations reservations;
auto ret = cls_2pc_queue_list_reservations(rados_ioctx, topic_name, reservations);
if (ret < 0) {
ldpp_dout(dpp, 1) << "ERROR: failed to read queue list reservation: " << ret << dendl;
return ret;
}
stats.queue_reservations = reservations.size();
ret = cls_2pc_queue_get_topic_stats(rados_ioctx, topic_name, stats.queue_entries, stats.queue_size);
if (ret < 0) {
ldpp_dout(dpp, 1) << "ERROR: failed to get the queue size or the number of entries: " << ret << dendl;
return ret;
}
return 0;
}
reservation_t::reservation_t(const DoutPrefixProvider* _dpp,
rgw::sal::RadosStore* _store,
const req_state* _s,
rgw::sal::Object* _object,
rgw::sal::Object* _src_object,
const std::string* _object_name,
optional_yield y) :
dpp(_s), store(_store), s(_s), size(0) /* XXX */,
object(_object), src_object(_src_object), bucket(_s->bucket.get()),
object_name(_object_name),
tagset(_s->tagset),
metadata_fetched_from_attributes(false),
user_id(_s->user->get_id().id),
user_tenant(_s->user->get_id().tenant),
req_id(_s->req_id),
yield(y)
{
filter_amz_meta(x_meta_map, _s->info.x_meta_map);
}
reservation_t::reservation_t(const DoutPrefixProvider* _dpp,
rgw::sal::RadosStore* _store,
rgw::sal::Object* _object,
rgw::sal::Object* _src_object,
rgw::sal::Bucket* _bucket,
const std::string& _user_id,
const std::string& _user_tenant,
const std::string& _req_id,
optional_yield y) :
dpp(_dpp), store(_store), s(nullptr), size(0) /* XXX */,
object(_object), src_object(_src_object), bucket(_bucket),
object_name(nullptr),
metadata_fetched_from_attributes(false),
user_id(_user_id),
user_tenant(_user_tenant),
req_id(_req_id),
yield(y)
{}
reservation_t::~reservation_t() {
publish_abort(*this);
}
void rgw_topic_stats::dump(Formatter *f) const {
f->open_object_section("Topic Stats");
f->dump_int("Reservations", queue_reservations);
f->dump_int("Size", queue_size);
f->dump_int("Entries", queue_entries);
f->close_section();
}
} // namespace rgw::notify
| 39,803 | 36.836502 | 154 |
cc
|
null |
ceph-main/src/rgw/driver/rados/rgw_notify.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab ft=cpp
#pragma once
#include <string>
#include "common/ceph_time.h"
#include "include/common_fwd.h"
#include "rgw_notify_event_type.h"
#include "common/async/yield_context.h"
#include "cls/2pc_queue/cls_2pc_queue_types.h"
#include "rgw_pubsub.h"
// forward declarations
namespace rgw::sal {
class RadosStore;
class RGWObject;
}
class RGWRados;
struct rgw_obj_key;
namespace rgw::notify {
// initialize the notification manager
// notification manager is dequeing the 2-phase-commit queues
// and send the notifications to the endpoints
bool init(CephContext* cct, rgw::sal::RadosStore* store, const DoutPrefixProvider *dpp);
// shutdown the notification manager
void shutdown();
// create persistent delivery queue for a topic (endpoint)
// this operation also add a topic name to the common (to all RGWs) list of all topics
int add_persistent_topic(const std::string& topic_name, optional_yield y);
// remove persistent delivery queue for a topic (endpoint)
// this operation also remove the topic name from the common (to all RGWs) list of all topics
int remove_persistent_topic(const std::string& topic_name, optional_yield y);
// same as the above, expect you need to provide the IoCtx, the above uses rgw::notify::Manager::rados_ioctx
int remove_persistent_topic(const DoutPrefixProvider* dpp, librados::IoCtx& rados_ioctx, const std::string& topic_name, optional_yield y);
// struct holding reservation information
// populated in the publish_reserve call
// then used to commit or abort the reservation
struct reservation_t {
struct topic_t {
topic_t(const std::string& _configurationId, const rgw_pubsub_topic& _cfg,
cls_2pc_reservation::id_t _res_id) :
configurationId(_configurationId), cfg(_cfg), res_id(_res_id) {}
const std::string configurationId;
const rgw_pubsub_topic cfg;
// res_id is reset after topic is committed/aborted
cls_2pc_reservation::id_t res_id;
};
const DoutPrefixProvider* const dpp;
std::vector<topic_t> topics;
rgw::sal::RadosStore* const store;
const req_state* const s;
size_t size;
rgw::sal::Object* const object;
rgw::sal::Object* const src_object; // may differ from object
rgw::sal::Bucket* const bucket;
const std::string* const object_name;
boost::optional<const RGWObjTags&> tagset;
meta_map_t x_meta_map; // metadata cached by value
bool metadata_fetched_from_attributes;
const std::string user_id;
const std::string user_tenant;
const std::string req_id;
optional_yield yield;
/* ctor for rgw_op callers */
reservation_t(const DoutPrefixProvider* _dpp,
rgw::sal::RadosStore* _store,
const req_state* _s,
rgw::sal::Object* _object,
rgw::sal::Object* _src_object,
const std::string* _object_name,
optional_yield y);
/* ctor for non-request caller (e.g., lifecycle) */
reservation_t(const DoutPrefixProvider* _dpp,
rgw::sal::RadosStore* _store,
rgw::sal::Object* _object,
rgw::sal::Object* _src_object,
rgw::sal::Bucket* _bucket,
const std::string& _user_id,
const std::string& _user_tenant,
const std::string& _req_id,
optional_yield y);
// dtor doing resource leak guarding
// aborting the reservation if not already committed or aborted
~reservation_t();
};
struct rgw_topic_stats {
std::size_t queue_reservations; // number of reservations
uint64_t queue_size; // in bytes
uint32_t queue_entries; // number of entries
void dump(Formatter *f) const;
};
// create a reservation on the 2-phase-commit queue
int publish_reserve(const DoutPrefixProvider *dpp,
EventType event_type,
reservation_t& reservation,
const RGWObjTags* req_tags);
// commit the reservation to the queue
int publish_commit(rgw::sal::Object* obj,
uint64_t size,
const ceph::real_time& mtime,
const std::string& etag,
const std::string& version,
EventType event_type,
reservation_t& reservation,
const DoutPrefixProvider *dpp);
// cancel the reservation
int publish_abort(reservation_t& reservation);
int get_persistent_queue_stats_by_topic_name(const DoutPrefixProvider *dpp, librados::IoCtx &rados_ioctx,
const std::string &topic_name, rgw_topic_stats &stats, optional_yield y);
}
| 4,392 | 31.540741 | 138 |
h
|
null |
ceph-main/src/rgw/driver/rados/rgw_obj_manifest.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab ft=cpp
#include "rgw_obj_manifest.h"
#include "services/svc_zone.h"
#include "rgw_rados.h"
#include "rgw_bucket.h"
#define dout_context g_ceph_context
#define dout_subsys ceph_subsys_rgw
using namespace std;
int RGWObjManifest::generator::create_next(uint64_t ofs)
{
if (ofs < last_ofs) /* only going forward */
return -EINVAL;
uint64_t max_head_size = manifest->get_max_head_size();
if (ofs < max_head_size) {
manifest->set_head_size(ofs);
}
if (ofs >= max_head_size) {
manifest->set_head_size(max_head_size);
cur_stripe = (ofs - max_head_size) / rule.stripe_max_size;
cur_stripe_size = rule.stripe_max_size;
if (cur_part_id == 0 && max_head_size > 0) {
cur_stripe++;
}
}
last_ofs = ofs;
manifest->set_obj_size(ofs);
manifest->get_implicit_location(cur_part_id, cur_stripe, ofs, NULL, &cur_obj);
return 0;
}
int RGWObjManifest::append(const DoutPrefixProvider *dpp, RGWObjManifest& m, const RGWZoneGroup& zonegroup,
const RGWZoneParams& zone_params)
{
if (explicit_objs || m.explicit_objs) {
return append_explicit(dpp, m, zonegroup, zone_params);
}
if (rules.empty()) {
*this = m;
return 0;
}
string override_prefix;
if (prefix.empty()) {
prefix = m.prefix;
}
if (prefix != m.prefix) {
override_prefix = m.prefix;
}
map<uint64_t, RGWObjManifestRule>::iterator miter = m.rules.begin();
if (miter == m.rules.end()) {
return append_explicit(dpp, m, zonegroup, zone_params);
}
for (; miter != m.rules.end(); ++miter) {
map<uint64_t, RGWObjManifestRule>::reverse_iterator last_rule = rules.rbegin();
RGWObjManifestRule& rule = last_rule->second;
if (rule.part_size == 0) {
rule.part_size = obj_size - rule.start_ofs;
}
RGWObjManifestRule& next_rule = miter->second;
if (!next_rule.part_size) {
next_rule.part_size = m.obj_size - next_rule.start_ofs;
}
string rule_prefix = prefix;
if (!rule.override_prefix.empty()) {
rule_prefix = rule.override_prefix;
}
string next_rule_prefix = m.prefix;
if (!next_rule.override_prefix.empty()) {
next_rule_prefix = next_rule.override_prefix;
}
if (rule.part_size != next_rule.part_size ||
rule.stripe_max_size != next_rule.stripe_max_size ||
rule_prefix != next_rule_prefix) {
if (next_rule_prefix != prefix) {
append_rules(m, miter, &next_rule_prefix);
} else {
append_rules(m, miter, NULL);
}
break;
}
uint64_t expected_part_num = rule.start_part_num + 1;
if (rule.part_size > 0) {
expected_part_num = rule.start_part_num + (obj_size + next_rule.start_ofs - rule.start_ofs) / rule.part_size;
}
if (expected_part_num != next_rule.start_part_num) {
append_rules(m, miter, NULL);
break;
}
}
set_obj_size(obj_size + m.obj_size);
return 0;
}
void RGWObjManifest::append_rules(RGWObjManifest& m, map<uint64_t, RGWObjManifestRule>::iterator& miter,
string *override_prefix)
{
for (; miter != m.rules.end(); ++miter) {
RGWObjManifestRule rule = miter->second;
rule.start_ofs += obj_size;
if (override_prefix)
rule.override_prefix = *override_prefix;
rules[rule.start_ofs] = rule;
}
}
void RGWObjManifest::convert_to_explicit(const DoutPrefixProvider *dpp, const RGWZoneGroup& zonegroup, const RGWZoneParams& zone_params)
{
if (explicit_objs) {
return;
}
obj_iterator iter = obj_begin(dpp);
while (iter != obj_end(dpp)) {
RGWObjManifestPart& part = objs[iter.get_stripe_ofs()];
const rgw_obj_select& os = iter.get_location();
const rgw_raw_obj& raw_loc = os.get_raw_obj(zonegroup, zone_params);
part.loc_ofs = 0;
uint64_t ofs = iter.get_stripe_ofs();
if (ofs == 0) {
part.loc = obj;
} else {
RGWSI_Tier_RADOS::raw_obj_to_obj(tail_placement.bucket, raw_loc, &part.loc);
}
++iter;
uint64_t next_ofs = iter.get_stripe_ofs();
part.size = next_ofs - ofs;
}
explicit_objs = true;
rules.clear();
prefix.clear();
}
int RGWObjManifest::append_explicit(const DoutPrefixProvider *dpp, RGWObjManifest& m, const RGWZoneGroup& zonegroup, const RGWZoneParams& zone_params)
{
if (!explicit_objs) {
convert_to_explicit(dpp, zonegroup, zone_params);
}
if (!m.explicit_objs) {
m.convert_to_explicit(dpp, zonegroup, zone_params);
}
map<uint64_t, RGWObjManifestPart>::iterator iter;
uint64_t base = obj_size;
for (iter = m.objs.begin(); iter != m.objs.end(); ++iter) {
RGWObjManifestPart& part = iter->second;
objs[base + iter->first] = part;
}
obj_size += m.obj_size;
return 0;
}
bool RGWObjManifest::get_rule(uint64_t ofs, RGWObjManifestRule *rule)
{
if (rules.empty()) {
return false;
}
map<uint64_t, RGWObjManifestRule>::iterator iter = rules.upper_bound(ofs);
if (iter != rules.begin()) {
--iter;
}
*rule = iter->second;
return true;
}
int RGWObjManifest::generator::create_begin(CephContext *cct, RGWObjManifest *_m,
const rgw_placement_rule& head_placement_rule,
const rgw_placement_rule *tail_placement_rule,
const rgw_bucket& _b, const rgw_obj& _obj)
{
manifest = _m;
if (!tail_placement_rule) {
manifest->set_tail_placement(head_placement_rule, _b);
} else {
rgw_placement_rule new_tail_rule = *tail_placement_rule;
new_tail_rule.inherit_from(head_placement_rule);
manifest->set_tail_placement(new_tail_rule, _b);
}
manifest->set_head(head_placement_rule, _obj, 0);
last_ofs = 0;
if (manifest->get_prefix().empty()) {
char buf[33];
gen_rand_alphanumeric(cct, buf, sizeof(buf) - 1);
string oid_prefix = ".";
oid_prefix.append(buf);
oid_prefix.append("_");
manifest->set_prefix(oid_prefix);
}
bool found = manifest->get_rule(0, &rule);
if (!found) {
derr << "ERROR: manifest->get_rule() could not find rule" << dendl;
return -EIO;
}
uint64_t head_size = manifest->get_head_size();
if (head_size > 0) {
cur_stripe_size = head_size;
} else {
cur_stripe_size = rule.stripe_max_size;
}
cur_part_id = rule.start_part_num;
manifest->get_implicit_location(cur_part_id, cur_stripe, 0, NULL, &cur_obj);
// Normal object which not generated through copy operation
manifest->set_tail_instance(_obj.key.instance);
return 0;
}
void RGWObjManifestPart::generate_test_instances(std::list<RGWObjManifestPart*>& o)
{
o.push_back(new RGWObjManifestPart);
RGWObjManifestPart *p = new RGWObjManifestPart;
rgw_bucket b;
init_bucket(&b, "tenant", "bucket", ".pool", ".index_pool", "marker_", "12");
p->loc = rgw_obj(b, "object");
p->loc_ofs = 512 * 1024;
p->size = 128 * 1024;
o.push_back(p);
}
void RGWObjManifest::generate_test_instances(std::list<RGWObjManifest*>& o)
{
RGWObjManifest *m = new RGWObjManifest;
map<uint64_t, RGWObjManifestPart> objs;
uint64_t total_size = 0;
for (int i = 0; i<10; i++) {
RGWObjManifestPart p;
rgw_bucket b;
init_bucket(&b, "tenant", "bucket", ".pool", ".index_pool", "marker_", "12");
p.loc = rgw_obj(b, "object");
p.loc_ofs = 0;
p.size = 512 * 1024;
total_size += p.size;
objs[total_size] = p;
}
m->set_explicit(total_size, objs);
o.push_back(m);
o.push_back(new RGWObjManifest);
}
void RGWObjManifestPart::dump(Formatter *f) const
{
f->open_object_section("loc");
loc.dump(f);
f->close_section();
f->dump_unsigned("loc_ofs", loc_ofs);
f->dump_unsigned("size", size);
}
void RGWObjManifest::obj_iterator::dump(Formatter *f) const
{
f->dump_unsigned("part_ofs", part_ofs);
f->dump_unsigned("stripe_ofs", stripe_ofs);
f->dump_unsigned("ofs", ofs);
f->dump_unsigned("stripe_size", stripe_size);
f->dump_int("cur_part_id", cur_part_id);
f->dump_int("cur_stripe", cur_stripe);
f->dump_string("cur_override_prefix", cur_override_prefix);
f->dump_object("location", location);
}
void RGWObjManifest::dump(Formatter *f) const
{
map<uint64_t, RGWObjManifestPart>::const_iterator iter = objs.begin();
f->open_array_section("objs");
for (; iter != objs.end(); ++iter) {
f->dump_unsigned("ofs", iter->first);
f->open_object_section("part");
iter->second.dump(f);
f->close_section();
}
f->close_section();
f->dump_unsigned("obj_size", obj_size);
::encode_json("explicit_objs", explicit_objs, f);
::encode_json("head_size", head_size, f);
::encode_json("max_head_size", max_head_size, f);
::encode_json("prefix", prefix, f);
::encode_json("rules", rules, f);
::encode_json("tail_instance", tail_instance, f);
::encode_json("tail_placement", tail_placement, f);
::encode_json("tier_type", tier_type, f);
if (tier_type == "cloud-s3") {
::encode_json("tier_config", tier_config, f);
}
// nullptr being passed into iterators since there
// is no cct and we aren't doing anything with these
// iterators that would write do the log
f->dump_object("begin_iter", obj_begin(nullptr));
f->dump_object("end_iter", obj_end(nullptr));
}
void RGWObjManifestRule::dump(Formatter *f) const
{
encode_json("start_part_num", start_part_num, f);
encode_json("start_ofs", start_ofs, f);
encode_json("part_size", part_size, f);
encode_json("stripe_max_size", stripe_max_size, f);
encode_json("override_prefix", override_prefix, f);
}
void rgw_obj_select::dump(Formatter *f) const
{
f->dump_string("placement_rule", placement_rule.to_str());
f->dump_object("obj", obj);
f->dump_object("raw_obj", raw_obj);
f->dump_bool("is_raw", is_raw);
}
void RGWObjTier::dump(Formatter *f) const
{
encode_json("name", name, f);
encode_json("tier_placement", tier_placement, f);
encode_json("is_multipart_upload", is_multipart_upload, f);
}
// returns true on success, false on failure
static bool rgw_get_obj_data_pool(const RGWZoneGroup& zonegroup, const RGWZoneParams& zone_params,
const rgw_placement_rule& head_placement_rule,
const rgw_obj& obj, rgw_pool *pool)
{
if (!zone_params.get_head_data_pool(head_placement_rule, obj, pool)) {
RGWZonePlacementInfo placement;
if (!zone_params.get_placement(zonegroup.default_placement.name, &placement)) {
return false;
}
if (!obj.in_extra_data) {
*pool = placement.get_data_pool(zonegroup.default_placement.storage_class);
} else {
*pool = placement.get_data_extra_pool();
}
}
return true;
}
static bool rgw_obj_to_raw(const RGWZoneGroup& zonegroup, const RGWZoneParams& zone_params,
const rgw_placement_rule& head_placement_rule,
const rgw_obj& obj, rgw_raw_obj *raw_obj)
{
get_obj_bucket_and_oid_loc(obj, raw_obj->oid, raw_obj->loc);
return rgw_get_obj_data_pool(zonegroup, zone_params, head_placement_rule, obj, &raw_obj->pool);
}
rgw_raw_obj rgw_obj_select::get_raw_obj(const RGWZoneGroup& zonegroup, const RGWZoneParams& zone_params) const
{
if (!is_raw) {
rgw_raw_obj r;
rgw_obj_to_raw(zonegroup, zone_params, placement_rule, obj, &r);
return r;
}
return raw_obj;
}
// returns true on success, false on failure
bool RGWRados::get_obj_data_pool(const rgw_placement_rule& placement_rule, const rgw_obj& obj, rgw_pool *pool)
{
return rgw_get_obj_data_pool(svc.zone->get_zonegroup(), svc.zone->get_zone_params(), placement_rule, obj, pool);
}
| 11,655 | 27.429268 | 150 |
cc
|
null |
ceph-main/src/rgw/driver/rados/rgw_obj_manifest.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab ft=cpp
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2019 Red Hat, Inc.
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
/* N.B., this header defines fundamental serialized types. Do not
* introduce changes or include files which can only be compiled in
* radosgw or OSD contexts (e.g., rgw_sal.h, rgw_common.h)
*/
#pragma once
#include "rgw_zone_types.h"
#include "rgw_bucket_types.h"
#include "rgw_obj_types.h"
#include "rgw_placement_types.h"
#include "common/dout.h"
#include "common/Formatter.h"
class RGWSI_Zone;
struct RGWZoneGroup;
struct RGWZoneParams;
class RGWRados;
namespace rgw { namespace sal {
class RadosStore;
} };
class rgw_obj_select {
rgw_placement_rule placement_rule;
rgw_obj obj;
rgw_raw_obj raw_obj;
bool is_raw;
public:
rgw_obj_select() : is_raw(false) {}
explicit rgw_obj_select(const rgw_obj& _obj) : obj(_obj), is_raw(false) {}
explicit rgw_obj_select(const rgw_raw_obj& _raw_obj) : raw_obj(_raw_obj), is_raw(true) {}
rgw_obj_select(const rgw_obj_select& rhs) {
placement_rule = rhs.placement_rule;
is_raw = rhs.is_raw;
if (is_raw) {
raw_obj = rhs.raw_obj;
} else {
obj = rhs.obj;
}
}
rgw_raw_obj get_raw_obj(const RGWZoneGroup& zonegroup, const RGWZoneParams& zone_params) const;
rgw_raw_obj get_raw_obj(RGWRados* store) const;
rgw_obj_select& operator=(const rgw_obj& rhs) {
obj = rhs;
is_raw = false;
return *this;
}
rgw_obj_select& operator=(const rgw_raw_obj& rhs) {
raw_obj = rhs;
is_raw = true;
return *this;
}
void set_placement_rule(const rgw_placement_rule& rule) {
placement_rule = rule;
}
void dump(Formatter *f) const;
};
struct RGWObjManifestPart {
rgw_obj loc; /* the object where the data is located */
uint64_t loc_ofs; /* the offset at that object where the data is located */
uint64_t size; /* the part size */
RGWObjManifestPart() : loc_ofs(0), size(0) {}
void encode(bufferlist& bl) const {
ENCODE_START(2, 2, bl);
encode(loc, bl);
encode(loc_ofs, bl);
encode(size, bl);
ENCODE_FINISH(bl);
}
void decode(bufferlist::const_iterator& bl) {
DECODE_START_LEGACY_COMPAT_LEN_32(2, 2, 2, bl);
decode(loc, bl);
decode(loc_ofs, bl);
decode(size, bl);
DECODE_FINISH(bl);
}
void dump(Formatter *f) const;
static void generate_test_instances(std::list<RGWObjManifestPart*>& o);
};
WRITE_CLASS_ENCODER(RGWObjManifestPart)
/*
The manifest defines a set of rules for structuring the object parts.
There are a few terms to note:
- head: the head part of the object, which is the part that contains
the first chunk of data. An object might not have a head (as in the
case of multipart-part objects).
- stripe: data portion of a single rgw object that resides on a single
rados object.
- part: a collection of stripes that make a contiguous part of an
object. A regular object will only have one part (although might have
many stripes), a multipart object might have many parts. Each part
has a fixed stripe size, although the last stripe of a part might
be smaller than that. Consecutive parts may be merged if their stripe
value is the same.
*/
struct RGWObjManifestRule {
uint32_t start_part_num;
uint64_t start_ofs;
uint64_t part_size; /* each part size, 0 if there's no part size, meaning it's unlimited */
uint64_t stripe_max_size; /* underlying obj max size */
std::string override_prefix;
RGWObjManifestRule() : start_part_num(0), start_ofs(0), part_size(0), stripe_max_size(0) {}
RGWObjManifestRule(uint32_t _start_part_num, uint64_t _start_ofs, uint64_t _part_size, uint64_t _stripe_max_size) :
start_part_num(_start_part_num), start_ofs(_start_ofs), part_size(_part_size), stripe_max_size(_stripe_max_size) {}
void encode(bufferlist& bl) const {
ENCODE_START(2, 1, bl);
encode(start_part_num, bl);
encode(start_ofs, bl);
encode(part_size, bl);
encode(stripe_max_size, bl);
encode(override_prefix, bl);
ENCODE_FINISH(bl);
}
void decode(bufferlist::const_iterator& bl) {
DECODE_START(2, bl);
decode(start_part_num, bl);
decode(start_ofs, bl);
decode(part_size, bl);
decode(stripe_max_size, bl);
if (struct_v >= 2)
decode(override_prefix, bl);
DECODE_FINISH(bl);
}
void dump(Formatter *f) const;
};
WRITE_CLASS_ENCODER(RGWObjManifestRule)
struct RGWObjTier {
std::string name;
RGWZoneGroupPlacementTier tier_placement;
bool is_multipart_upload{false};
RGWObjTier(): name("none") {}
void encode(bufferlist& bl) const {
ENCODE_START(2, 2, bl);
encode(name, bl);
encode(tier_placement, bl);
encode(is_multipart_upload, bl);
ENCODE_FINISH(bl);
}
void decode(bufferlist::const_iterator& bl) {
DECODE_START_LEGACY_COMPAT_LEN(2, 2, 2, bl);
decode(name, bl);
decode(tier_placement, bl);
decode(is_multipart_upload, bl);
DECODE_FINISH(bl);
}
void dump(Formatter *f) const;
};
WRITE_CLASS_ENCODER(RGWObjTier)
class RGWObjManifest {
protected:
bool explicit_objs{false}; /* really old manifest? */
std::map<uint64_t, RGWObjManifestPart> objs;
uint64_t obj_size{0};
rgw_obj obj;
uint64_t head_size{0};
rgw_placement_rule head_placement_rule;
uint64_t max_head_size{0};
std::string prefix;
rgw_bucket_placement tail_placement; /* might be different than the original bucket,
as object might have been copied across pools */
std::map<uint64_t, RGWObjManifestRule> rules;
std::string tail_instance; /* tail object's instance */
std::string tier_type;
RGWObjTier tier_config;
void convert_to_explicit(const DoutPrefixProvider *dpp, const RGWZoneGroup& zonegroup, const RGWZoneParams& zone_params);
int append_explicit(const DoutPrefixProvider *dpp, RGWObjManifest& m, const RGWZoneGroup& zonegroup, const RGWZoneParams& zone_params);
void append_rules(RGWObjManifest& m, std::map<uint64_t, RGWObjManifestRule>::iterator& iter, std::string *override_prefix);
public:
RGWObjManifest() = default;
RGWObjManifest(const RGWObjManifest& rhs) {
*this = rhs;
}
RGWObjManifest& operator=(const RGWObjManifest& rhs) {
explicit_objs = rhs.explicit_objs;
objs = rhs.objs;
obj_size = rhs.obj_size;
obj = rhs.obj;
head_size = rhs.head_size;
max_head_size = rhs.max_head_size;
prefix = rhs.prefix;
tail_placement = rhs.tail_placement;
rules = rhs.rules;
tail_instance = rhs.tail_instance;
tier_type = rhs.tier_type;
tier_config = rhs.tier_config;
return *this;
}
std::map<uint64_t, RGWObjManifestPart>& get_explicit_objs() {
return objs;
}
void set_explicit(uint64_t _size, std::map<uint64_t, RGWObjManifestPart>& _objs) {
explicit_objs = true;
objs.swap(_objs);
set_obj_size(_size);
}
void get_implicit_location(uint64_t cur_part_id, uint64_t cur_stripe, uint64_t ofs,
std::string *override_prefix, rgw_obj_select *location) const;
void set_trivial_rule(uint64_t tail_ofs, uint64_t stripe_max_size) {
RGWObjManifestRule rule(0, tail_ofs, 0, stripe_max_size);
rules[0] = rule;
max_head_size = tail_ofs;
}
void set_multipart_part_rule(uint64_t stripe_max_size, uint64_t part_num) {
RGWObjManifestRule rule(0, 0, 0, stripe_max_size);
rule.start_part_num = part_num;
rules[0] = rule;
max_head_size = 0;
}
void encode(bufferlist& bl) const {
ENCODE_START(8, 6, bl);
encode(obj_size, bl);
encode(objs, bl);
encode(explicit_objs, bl);
encode(obj, bl);
encode(head_size, bl);
encode(max_head_size, bl);
encode(prefix, bl);
encode(rules, bl);
bool encode_tail_bucket = !(tail_placement.bucket == obj.bucket);
encode(encode_tail_bucket, bl);
if (encode_tail_bucket) {
encode(tail_placement.bucket, bl);
}
bool encode_tail_instance = (tail_instance != obj.key.instance);
encode(encode_tail_instance, bl);
if (encode_tail_instance) {
encode(tail_instance, bl);
}
encode(head_placement_rule, bl);
encode(tail_placement.placement_rule, bl);
encode(tier_type, bl);
encode(tier_config, bl);
ENCODE_FINISH(bl);
}
void decode(bufferlist::const_iterator& bl) {
DECODE_START_LEGACY_COMPAT_LEN_32(7, 2, 2, bl);
decode(obj_size, bl);
decode(objs, bl);
if (struct_v >= 3) {
decode(explicit_objs, bl);
decode(obj, bl);
decode(head_size, bl);
decode(max_head_size, bl);
decode(prefix, bl);
decode(rules, bl);
} else {
explicit_objs = true;
if (!objs.empty()) {
std::map<uint64_t, RGWObjManifestPart>::iterator iter = objs.begin();
obj = iter->second.loc;
head_size = iter->second.size;
max_head_size = head_size;
}
}
if (explicit_objs && head_size > 0 && !objs.empty()) {
/* patch up manifest due to issue 16435:
* the first object in the explicit objs list might not be the one we need to access, use the
* head object instead if set. This would happen if we had an old object that was created
* when the explicit objs manifest was around, and it got copied.
*/
rgw_obj& obj_0 = objs[0].loc;
if (!obj_0.get_oid().empty() && obj_0.key.ns.empty()) {
objs[0].loc = obj;
objs[0].size = head_size;
}
}
if (struct_v >= 4) {
if (struct_v < 6) {
decode(tail_placement.bucket, bl);
} else {
bool need_to_decode;
decode(need_to_decode, bl);
if (need_to_decode) {
decode(tail_placement.bucket, bl);
} else {
tail_placement.bucket = obj.bucket;
}
}
}
if (struct_v >= 5) {
if (struct_v < 6) {
decode(tail_instance, bl);
} else {
bool need_to_decode;
decode(need_to_decode, bl);
if (need_to_decode) {
decode(tail_instance, bl);
} else {
tail_instance = obj.key.instance;
}
}
} else { // old object created before 'tail_instance' field added to manifest
tail_instance = obj.key.instance;
}
if (struct_v >= 7) {
decode(head_placement_rule, bl);
decode(tail_placement.placement_rule, bl);
}
if (struct_v >= 8) {
decode(tier_type, bl);
decode(tier_config, bl);
}
DECODE_FINISH(bl);
}
void dump(Formatter *f) const;
static void generate_test_instances(std::list<RGWObjManifest*>& o);
int append(const DoutPrefixProvider *dpp, RGWObjManifest& m, const RGWZoneGroup& zonegroup,
const RGWZoneParams& zone_params);
bool get_rule(uint64_t ofs, RGWObjManifestRule *rule);
bool empty() const {
if (explicit_objs)
return objs.empty();
return rules.empty();
}
bool has_explicit_objs() const {
return explicit_objs;
}
bool has_tail() const {
if (explicit_objs) {
if (objs.size() == 1) {
auto iter = objs.begin();
const rgw_obj& o = iter->second.loc;
return !(obj == o);
}
return (objs.size() >= 2);
}
return (obj_size > head_size);
}
void set_head(const rgw_placement_rule& placement_rule, const rgw_obj& _o, uint64_t _s) {
head_placement_rule = placement_rule;
obj = _o;
head_size = _s;
if (explicit_objs && head_size > 0) {
objs[0].loc = obj;
objs[0].size = head_size;
}
}
const rgw_obj& get_obj() const {
return obj;
}
void set_tail_placement(const rgw_placement_rule& placement_rule, const rgw_bucket& _b) {
tail_placement.placement_rule = placement_rule;
tail_placement.bucket = _b;
}
const rgw_bucket_placement& get_tail_placement() const {
return tail_placement;
}
const rgw_placement_rule& get_head_placement_rule() const {
return head_placement_rule;
}
void set_prefix(const std::string& _p) {
prefix = _p;
}
const std::string& get_prefix() const {
return prefix;
}
void set_tail_instance(const std::string& _ti) {
tail_instance = _ti;
}
const std::string& get_tail_instance() const {
return tail_instance;
}
void set_head_size(uint64_t _s) {
head_size = _s;
}
void set_obj_size(uint64_t s) {
obj_size = s;
}
uint64_t get_obj_size() const {
return obj_size;
}
uint64_t get_head_size() const {
return head_size;
}
uint64_t get_max_head_size() const {
return max_head_size;
}
const std::string& get_tier_type() {
return tier_type;
}
inline void set_tier_type(std::string value) {
/* Only "cloud-s3" tier-type is supported for now */
if (value == "cloud-s3") {
tier_type = value;
}
}
inline void set_tier_config(RGWObjTier t) {
/* Set only if tier_type set to "cloud-s3" */
if (tier_type != "cloud-s3")
return;
tier_config.name = t.name;
tier_config.tier_placement = t.tier_placement;
tier_config.is_multipart_upload = t.is_multipart_upload;
}
inline const void get_tier_config(RGWObjTier* t) {
if (tier_type != "cloud-s3")
return;
t->name = tier_config.name;
t->tier_placement = tier_config.tier_placement;
t->is_multipart_upload = tier_config.is_multipart_upload;
}
class obj_iterator {
const DoutPrefixProvider *dpp;
const RGWObjManifest *manifest = nullptr;
uint64_t part_ofs = 0; /* where current part starts */
uint64_t stripe_ofs = 0; /* where current stripe starts */
uint64_t ofs = 0; /* current position within the object */
uint64_t stripe_size = 0; /* current part size */
int cur_part_id = 0;
int cur_stripe = 0;
std::string cur_override_prefix;
rgw_obj_select location;
std::map<uint64_t, RGWObjManifestRule>::const_iterator rule_iter;
std::map<uint64_t, RGWObjManifestRule>::const_iterator next_rule_iter;
std::map<uint64_t, RGWObjManifestPart>::const_iterator explicit_iter;
void update_explicit_pos();
public:
obj_iterator() = default;
explicit obj_iterator(const DoutPrefixProvider *_dpp, const RGWObjManifest *_m)
: obj_iterator(_dpp, _m, 0)
{}
obj_iterator(const DoutPrefixProvider *_dpp, const RGWObjManifest *_m, uint64_t _ofs) : dpp(_dpp), manifest(_m) {
seek(_ofs);
}
void seek(uint64_t ofs);
void operator++();
bool operator==(const obj_iterator& rhs) const {
return (ofs == rhs.ofs);
}
bool operator!=(const obj_iterator& rhs) const {
return (ofs != rhs.ofs);
}
const rgw_obj_select& get_location() {
return location;
}
/* where current part starts */
uint64_t get_part_ofs() const {
return part_ofs;
}
/* start of current stripe */
uint64_t get_stripe_ofs() {
if (manifest->explicit_objs) {
return explicit_iter->first;
}
return stripe_ofs;
}
/* current ofs relative to start of rgw object */
uint64_t get_ofs() const {
return ofs;
}
/* stripe number */
int get_cur_stripe() const {
return cur_stripe;
}
/* current stripe size */
uint64_t get_stripe_size() {
if (manifest->explicit_objs) {
return explicit_iter->second.size;
}
return stripe_size;
}
/* offset where data starts within current stripe */
uint64_t location_ofs() {
if (manifest->explicit_objs) {
return explicit_iter->second.loc_ofs;
}
return 0; /* all stripes start at zero offset */
}
void update_location();
void dump(Formatter *f) const;
}; // class obj_iterator
obj_iterator obj_begin(const DoutPrefixProvider *dpp) const { return obj_iterator{dpp, this}; }
obj_iterator obj_end(const DoutPrefixProvider *dpp) const { return obj_iterator{dpp, this, obj_size}; }
obj_iterator obj_find(const DoutPrefixProvider *dpp, uint64_t ofs) const {
return obj_iterator{dpp, this, std::min(ofs, obj_size)};
}
/*
* simple object generator. Using a simple single rule manifest.
*/
class generator {
RGWObjManifest *manifest;
uint64_t last_ofs;
uint64_t cur_part_ofs;
int cur_part_id;
int cur_stripe;
uint64_t cur_stripe_size;
std::string cur_oid;
std::string oid_prefix;
rgw_obj_select cur_obj;
RGWObjManifestRule rule;
public:
generator() : manifest(NULL), last_ofs(0), cur_part_ofs(0), cur_part_id(0),
cur_stripe(0), cur_stripe_size(0) {}
int create_begin(CephContext *cct, RGWObjManifest *manifest,
const rgw_placement_rule& head_placement_rule,
const rgw_placement_rule *tail_placement_rule,
const rgw_bucket& bucket,
const rgw_obj& obj);
int create_next(uint64_t ofs);
rgw_raw_obj get_cur_obj(RGWZoneGroup& zonegroup, RGWZoneParams& zone_params) { return cur_obj.get_raw_obj(zonegroup, zone_params); }
rgw_raw_obj get_cur_obj(RGWRados* store) const { return cur_obj.get_raw_obj(store); }
/* total max size of current stripe (including head obj) */
uint64_t cur_stripe_max_size() const {
return cur_stripe_size;
}
};
};
WRITE_CLASS_ENCODER(RGWObjManifest)
| 17,607 | 27.44588 | 138 |
h
|
null |
ceph-main/src/rgw/driver/rados/rgw_object_expirer_core.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab ft=cpp
#include <errno.h>
#include <iostream>
#include <sstream>
#include <string>
#include "auth/Crypto.h"
#include "common/armor.h"
#include "common/ceph_json.h"
#include "common/config.h"
#include "common/ceph_argparse.h"
#include "common/Formatter.h"
#include "common/errno.h"
#include "global/global_init.h"
#include "include/utime.h"
#include "include/str_list.h"
#include "rgw_user.h"
#include "rgw_bucket.h"
#include "rgw_acl.h"
#include "rgw_acl_s3.h"
#include "rgw_log.h"
#include "rgw_formats.h"
#include "rgw_usage.h"
#include "rgw_object_expirer_core.h"
#include "rgw_zone.h"
#include "rgw_sal_rados.h"
#include "services/svc_rados.h"
#include "services/svc_zone.h"
#include "services/svc_sys_obj.h"
#include "services/svc_bi_rados.h"
#include "cls/lock/cls_lock_client.h"
#include "cls/timeindex/cls_timeindex_client.h"
#define dout_context g_ceph_context
#define dout_subsys ceph_subsys_rgw
using namespace std;
static string objexp_lock_name = "gc_process";
static string objexp_hint_get_shardname(int shard_num)
{
char buf[64];
snprintf(buf, sizeof(buf), "obj_delete_at_hint.%010u", (unsigned)shard_num);
return buf;
}
static int objexp_key_shard(const rgw_obj_index_key& key, int num_shards)
{
string obj_key = key.name + key.instance;
return RGWSI_BucketIndex_RADOS::bucket_shard_index(obj_key, num_shards);
}
static string objexp_hint_get_keyext(const string& tenant_name,
const string& bucket_name,
const string& bucket_id,
const rgw_obj_key& obj_key) {
return tenant_name + (tenant_name.empty() ? "" : ":") + bucket_name + ":" + bucket_id +
":" + obj_key.name + ":" + obj_key.instance;
}
static void objexp_get_shard(int shard_num,
string *shard)
{
*shard = objexp_hint_get_shardname(shard_num);
}
static int objexp_hint_parse(const DoutPrefixProvider *dpp, CephContext *cct, cls_timeindex_entry &ti_entry,
objexp_hint_entry *hint_entry)
{
try {
auto iter = ti_entry.value.cbegin();
decode(*hint_entry, iter);
} catch (buffer::error& err) {
ldpp_dout(dpp, 0) << "ERROR: couldn't decode avail_pools" << dendl;
}
return 0;
}
int RGWObjExpStore::objexp_hint_add(const DoutPrefixProvider *dpp,
const ceph::real_time& delete_at,
const string& tenant_name,
const string& bucket_name,
const string& bucket_id,
const rgw_obj_index_key& obj_key)
{
const string keyext = objexp_hint_get_keyext(tenant_name, bucket_name,
bucket_id, obj_key);
objexp_hint_entry he = {
.tenant = tenant_name,
.bucket_name = bucket_name,
.bucket_id = bucket_id,
.obj_key = obj_key,
.exp_time = delete_at };
bufferlist hebl;
encode(he, hebl);
librados::ObjectWriteOperation op;
cls_timeindex_add(op, utime_t(delete_at), keyext, hebl);
string shard_name = objexp_hint_get_shardname(objexp_key_shard(obj_key, cct->_conf->rgw_objexp_hints_num_shards));
auto obj = rados_svc->obj(rgw_raw_obj(driver->svc()->zone->get_zone_params().log_pool, shard_name));
int r = obj.open(dpp);
if (r < 0) {
ldpp_dout(dpp, 0) << "ERROR: " << __func__ << "(): failed to open obj=" << obj << " (r=" << r << ")" << dendl;
return r;
}
return obj.operate(dpp, &op, null_yield);
}
int RGWObjExpStore::objexp_hint_list(const DoutPrefixProvider *dpp,
const string& oid,
const ceph::real_time& start_time,
const ceph::real_time& end_time,
const int max_entries,
const string& marker,
list<cls_timeindex_entry>& entries, /* out */
string *out_marker, /* out */
bool *truncated) /* out */
{
librados::ObjectReadOperation op;
cls_timeindex_list(op, utime_t(start_time), utime_t(end_time), marker, max_entries, entries,
out_marker, truncated);
auto obj = rados_svc->obj(rgw_raw_obj(driver->svc()->zone->get_zone_params().log_pool, oid));
int r = obj.open(dpp);
if (r < 0) {
ldpp_dout(dpp, 0) << "ERROR: " << __func__ << "(): failed to open obj=" << obj << " (r=" << r << ")" << dendl;
return r;
}
bufferlist obl;
int ret = obj.operate(dpp, &op, &obl, null_yield);
if ((ret < 0 ) && (ret != -ENOENT)) {
return ret;
}
if ((ret == -ENOENT) && truncated) {
*truncated = false;
}
return 0;
}
static int cls_timeindex_trim_repeat(const DoutPrefixProvider *dpp,
rgw_rados_ref ref,
const string& oid,
const utime_t& from_time,
const utime_t& to_time,
const string& from_marker,
const string& to_marker, optional_yield y)
{
bool done = false;
do {
librados::ObjectWriteOperation op;
cls_timeindex_trim(op, from_time, to_time, from_marker, to_marker);
int r = rgw_rados_operate(dpp, ref.pool.ioctx(), oid, &op, null_yield);
if (r == -ENODATA)
done = true;
else if (r < 0)
return r;
} while (!done);
return 0;
}
int RGWObjExpStore::objexp_hint_trim(const DoutPrefixProvider *dpp,
const string& oid,
const ceph::real_time& start_time,
const ceph::real_time& end_time,
const string& from_marker,
const string& to_marker, optional_yield y)
{
auto obj = rados_svc->obj(rgw_raw_obj(driver->svc()->zone->get_zone_params().log_pool, oid));
int r = obj.open(dpp);
if (r < 0) {
ldpp_dout(dpp, 0) << "ERROR: " << __func__ << "(): failed to open obj=" << obj << " (r=" << r << ")" << dendl;
return r;
}
auto& ref = obj.get_ref();
int ret = cls_timeindex_trim_repeat(dpp, ref, oid, utime_t(start_time), utime_t(end_time),
from_marker, to_marker, y);
if ((ret < 0 ) && (ret != -ENOENT)) {
return ret;
}
return 0;
}
int RGWObjectExpirer::garbage_single_object(const DoutPrefixProvider *dpp, objexp_hint_entry& hint)
{
RGWBucketInfo bucket_info;
std::unique_ptr<rgw::sal::Bucket> bucket;
int ret = driver->get_bucket(dpp, nullptr, rgw_bucket(hint.tenant, hint.bucket_name, hint.bucket_id), &bucket, null_yield);
if (-ENOENT == ret) {
ldpp_dout(dpp, 15) << "NOTICE: cannot find bucket = " \
<< hint.bucket_name << ". The object must be already removed" << dendl;
return -ERR_PRECONDITION_FAILED;
} else if (ret < 0) {
ldpp_dout(dpp, 1) << "ERROR: could not init bucket = " \
<< hint.bucket_name << "due to ret = " << ret << dendl;
return ret;
}
rgw_obj_key key = hint.obj_key;
if (key.instance.empty()) {
key.instance = "null";
}
std::unique_ptr<rgw::sal::Object> obj = bucket->get_object(key);
obj->set_atomic();
ret = obj->delete_object(dpp, null_yield);
return ret;
}
void RGWObjectExpirer::garbage_chunk(const DoutPrefixProvider *dpp,
list<cls_timeindex_entry>& entries, /* in */
bool& need_trim) /* out */
{
need_trim = false;
for (list<cls_timeindex_entry>::iterator iter = entries.begin();
iter != entries.end();
++iter)
{
objexp_hint_entry hint;
ldpp_dout(dpp, 15) << "got removal hint for: " << iter->key_ts.sec() \
<< " - " << iter->key_ext << dendl;
int ret = objexp_hint_parse(dpp, driver->ctx(), *iter, &hint);
if (ret < 0) {
ldpp_dout(dpp, 1) << "cannot parse removal hint for " << hint.obj_key << dendl;
continue;
}
/* PRECOND_FAILED simply means that our hint is not valid.
* We can silently ignore that and move forward. */
ret = garbage_single_object(dpp, hint);
if (ret == -ERR_PRECONDITION_FAILED) {
ldpp_dout(dpp, 15) << "not actual hint for object: " << hint.obj_key << dendl;
} else if (ret < 0) {
ldpp_dout(dpp, 1) << "cannot remove expired object: " << hint.obj_key << dendl;
}
need_trim = true;
}
return;
}
void RGWObjectExpirer::trim_chunk(const DoutPrefixProvider *dpp,
const string& shard,
const utime_t& from,
const utime_t& to,
const string& from_marker,
const string& to_marker, optional_yield y)
{
ldpp_dout(dpp, 20) << "trying to trim removal hints to=" << to
<< ", to_marker=" << to_marker << dendl;
real_time rt_from = from.to_real_time();
real_time rt_to = to.to_real_time();
int ret = exp_store.objexp_hint_trim(dpp, shard, rt_from, rt_to,
from_marker, to_marker, y);
if (ret < 0) {
ldpp_dout(dpp, 0) << "ERROR during trim: " << ret << dendl;
}
return;
}
bool RGWObjectExpirer::process_single_shard(const DoutPrefixProvider *dpp,
const string& shard,
const utime_t& last_run,
const utime_t& round_start, optional_yield y)
{
string marker;
string out_marker;
bool truncated = false;
bool done = true;
CephContext *cct = driver->ctx();
int num_entries = cct->_conf->rgw_objexp_chunk_size;
int max_secs = cct->_conf->rgw_objexp_gc_interval;
utime_t end = ceph_clock_now();
end += max_secs;
rados::cls::lock::Lock l(objexp_lock_name);
utime_t time(max_secs, 0);
l.set_duration(time);
int ret = l.lock_exclusive(&static_cast<rgw::sal::RadosStore*>(driver)->getRados()->objexp_pool_ctx, shard);
if (ret == -EBUSY) { /* already locked by another processor */
ldpp_dout(dpp, 5) << __func__ << "(): failed to acquire lock on " << shard << dendl;
return false;
}
do {
real_time rt_last = last_run.to_real_time();
real_time rt_start = round_start.to_real_time();
list<cls_timeindex_entry> entries;
ret = exp_store.objexp_hint_list(dpp, shard, rt_last, rt_start,
num_entries, marker, entries,
&out_marker, &truncated);
if (ret < 0) {
ldpp_dout(dpp, 10) << "cannot get removal hints from shard: " << shard
<< dendl;
continue;
}
bool need_trim;
garbage_chunk(dpp, entries, need_trim);
if (need_trim) {
trim_chunk(dpp, shard, last_run, round_start, marker, out_marker, y);
}
utime_t now = ceph_clock_now();
if (now >= end) {
done = false;
break;
}
marker = out_marker;
} while (truncated);
l.unlock(&static_cast<rgw::sal::RadosStore*>(driver)->getRados()->objexp_pool_ctx, shard);
return done;
}
/* Returns true if all shards have been processed successfully. */
bool RGWObjectExpirer::inspect_all_shards(const DoutPrefixProvider *dpp,
const utime_t& last_run,
const utime_t& round_start, optional_yield y)
{
CephContext * const cct = driver->ctx();
int num_shards = cct->_conf->rgw_objexp_hints_num_shards;
bool all_done = true;
for (int i = 0; i < num_shards; i++) {
string shard;
objexp_get_shard(i, &shard);
ldpp_dout(dpp, 20) << "processing shard = " << shard << dendl;
if (! process_single_shard(dpp, shard, last_run, round_start, y)) {
all_done = false;
}
}
return all_done;
}
bool RGWObjectExpirer::going_down()
{
return down_flag;
}
void RGWObjectExpirer::start_processor()
{
worker = new OEWorker(driver->ctx(), this);
worker->create("rgw_obj_expirer");
}
void RGWObjectExpirer::stop_processor()
{
down_flag = true;
if (worker) {
worker->stop();
worker->join();
}
delete worker;
worker = NULL;
}
void *RGWObjectExpirer::OEWorker::entry() {
utime_t last_run;
do {
utime_t start = ceph_clock_now();
ldpp_dout(this, 2) << "object expiration: start" << dendl;
if (oe->inspect_all_shards(this, last_run, start, null_yield)) {
/* All shards have been processed properly. Next time we can start
* from this moment. */
last_run = start;
}
ldpp_dout(this, 2) << "object expiration: stop" << dendl;
if (oe->going_down())
break;
utime_t end = ceph_clock_now();
end -= start;
int secs = cct->_conf->rgw_objexp_gc_interval;
if (secs <= end.sec())
continue; // next round
secs -= end.sec();
std::unique_lock l{lock};
cond.wait_for(l, std::chrono::seconds(secs));
} while (!oe->going_down());
return NULL;
}
void RGWObjectExpirer::OEWorker::stop()
{
std::lock_guard l{lock};
cond.notify_all();
}
CephContext *RGWObjectExpirer::OEWorker::get_cct() const
{
return cct;
}
unsigned RGWObjectExpirer::OEWorker::get_subsys() const
{
return dout_subsys;
}
std::ostream& RGWObjectExpirer::OEWorker::gen_prefix(std::ostream& out) const
{
return out << "rgw object expirer Worker thread: ";
}
| 13,535 | 29.555305 | 125 |
cc
|
null |
ceph-main/src/rgw/driver/rados/rgw_object_expirer_core.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab ft=cpp
#pragma once
#include <atomic>
#include <string>
#include <cerrno>
#include <sstream>
#include <iostream>
#include "auth/Crypto.h"
#include "common/armor.h"
#include "common/ceph_json.h"
#include "common/config.h"
#include "common/ceph_argparse.h"
#include "common/Formatter.h"
#include "common/errno.h"
#include "common/ceph_mutex.h"
#include "common/Cond.h"
#include "common/Thread.h"
#include "global/global_init.h"
#include "include/common_fwd.h"
#include "include/utime.h"
#include "include/str_list.h"
#include "rgw_sal_rados.h"
class RGWSI_RADOS;
class RGWSI_Zone;
class RGWBucketInfo;
class cls_timeindex_entry;
class RGWObjExpStore {
CephContext *cct;
RGWSI_RADOS *rados_svc;
rgw::sal::RadosStore* driver;
public:
RGWObjExpStore(CephContext *_cct, RGWSI_RADOS *_rados_svc, rgw::sal::RadosStore* _driver) : cct(_cct),
rados_svc(_rados_svc),
driver(_driver) {}
int objexp_hint_add(const DoutPrefixProvider *dpp,
const ceph::real_time& delete_at,
const std::string& tenant_name,
const std::string& bucket_name,
const std::string& bucket_id,
const rgw_obj_index_key& obj_key);
int objexp_hint_list(const DoutPrefixProvider *dpp,
const std::string& oid,
const ceph::real_time& start_time,
const ceph::real_time& end_time,
const int max_entries,
const std::string& marker,
std::list<cls_timeindex_entry>& entries, /* out */
std::string *out_marker, /* out */
bool *truncated); /* out */
int objexp_hint_trim(const DoutPrefixProvider *dpp,
const std::string& oid,
const ceph::real_time& start_time,
const ceph::real_time& end_time,
const std::string& from_marker,
const std::string& to_marker, optional_yield y);
};
class RGWObjectExpirer {
protected:
rgw::sal::Driver* driver;
RGWObjExpStore exp_store;
class OEWorker : public Thread, public DoutPrefixProvider {
CephContext *cct;
RGWObjectExpirer *oe;
ceph::mutex lock = ceph::make_mutex("OEWorker");
ceph::condition_variable cond;
public:
OEWorker(CephContext * const cct,
RGWObjectExpirer * const oe)
: cct(cct),
oe(oe) {
}
void *entry() override;
void stop();
CephContext *get_cct() const override;
unsigned get_subsys() const override;
std::ostream& gen_prefix(std::ostream& out) const override;
};
OEWorker *worker{nullptr};
std::atomic<bool> down_flag = { false };
public:
explicit RGWObjectExpirer(rgw::sal::Driver* _driver)
: driver(_driver),
exp_store(_driver->ctx(), static_cast<rgw::sal::RadosStore*>(driver)->svc()->rados, static_cast<rgw::sal::RadosStore*>(driver)),
worker(NULL) {
}
~RGWObjectExpirer() {
stop_processor();
}
int hint_add(const DoutPrefixProvider *dpp,
const ceph::real_time& delete_at,
const std::string& tenant_name,
const std::string& bucket_name,
const std::string& bucket_id,
const rgw_obj_index_key& obj_key) {
return exp_store.objexp_hint_add(dpp, delete_at, tenant_name, bucket_name,
bucket_id, obj_key);
}
int garbage_single_object(const DoutPrefixProvider *dpp, objexp_hint_entry& hint);
void garbage_chunk(const DoutPrefixProvider *dpp,
std::list<cls_timeindex_entry>& entries, /* in */
bool& need_trim); /* out */
void trim_chunk(const DoutPrefixProvider *dpp,
const std::string& shard,
const utime_t& from,
const utime_t& to,
const std::string& from_marker,
const std::string& to_marker, optional_yield y);
bool process_single_shard(const DoutPrefixProvider *dpp,
const std::string& shard,
const utime_t& last_run,
const utime_t& round_start, optional_yield y);
bool inspect_all_shards(const DoutPrefixProvider *dpp,
const utime_t& last_run,
const utime_t& round_start, optional_yield y);
bool going_down();
void start_processor();
void stop_processor();
};
| 4,865 | 32.102041 | 134 |
h
|
null |
ceph-main/src/rgw/driver/rados/rgw_otp.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab ft=cpp
#include <errno.h>
#include <string>
#include <map>
#include <boost/algorithm/string.hpp>
#include "common/errno.h"
#include "common/Formatter.h"
#include "common/ceph_json.h"
#include "rgw_otp.h"
#include "rgw_zone.h"
#include "rgw_metadata.h"
#include "include/types.h"
#include "rgw_common.h"
#include "rgw_tools.h"
#include "services/svc_zone.h"
#include "services/svc_meta.h"
#include "services/svc_meta_be.h"
#include "services/svc_meta_be_otp.h"
#include "services/svc_otp.h"
#define dout_subsys ceph_subsys_rgw
using namespace std;
class RGWOTPMetadataHandler;
class RGWOTPMetadataObject : public RGWMetadataObject {
friend class RGWOTPMetadataHandler;
otp_devices_list_t devices;
public:
RGWOTPMetadataObject() {}
RGWOTPMetadataObject(otp_devices_list_t&& _devices, const obj_version& v, const real_time m) {
devices = std::move(_devices);
objv = v;
mtime = m;
}
void dump(Formatter *f) const override {
encode_json("devices", devices, f);
}
otp_devices_list_t& get_devs() {
return devices;
}
};
class RGWOTPMetadataHandler : public RGWOTPMetadataHandlerBase {
friend class RGWOTPCtl;
struct Svc {
RGWSI_Zone *zone;
RGWSI_MetaBackend *meta_be;
RGWSI_OTP *otp;
} svc;
int init(RGWSI_Zone *zone,
RGWSI_MetaBackend *_meta_be,
RGWSI_OTP *_otp) {
base_init(zone->ctx(), _otp->get_be_handler().get());
svc.zone = zone;
svc.meta_be = _meta_be;
svc.otp = _otp;
return 0;
}
int call(std::function<int(RGWSI_OTP_BE_Ctx& ctx)> f) {
return be_handler->call([&](RGWSI_MetaBackend_Handler::Op *op) {
RGWSI_OTP_BE_Ctx ctx(op->ctx());
return f(ctx);
});
}
RGWMetadataObject *get_meta_obj(JSONObj *jo, const obj_version& objv, const ceph::real_time& mtime) override {
otp_devices_list_t devices;
try {
JSONDecoder::decode_json("devices", devices, jo);
} catch (JSONDecoder::err& e) {
return nullptr;
}
return new RGWOTPMetadataObject(std::move(devices), objv, mtime);
}
int do_get(RGWSI_MetaBackend_Handler::Op *op, string& entry, RGWMetadataObject **obj, optional_yield y, const DoutPrefixProvider *dpp) override {
RGWObjVersionTracker objv_tracker;
std::unique_ptr<RGWOTPMetadataObject> mdo(new RGWOTPMetadataObject);
RGWSI_OTP_BE_Ctx be_ctx(op->ctx());
int ret = svc.otp->read_all(be_ctx,
entry,
&mdo->get_devs(),
&mdo->get_mtime(),
&objv_tracker,
y,
dpp);
if (ret < 0) {
return ret;
}
mdo->objv = objv_tracker.read_version;
*obj = mdo.release();
return 0;
}
int do_put(RGWSI_MetaBackend_Handler::Op *op, string& entry,
RGWMetadataObject *_obj, RGWObjVersionTracker& objv_tracker,
optional_yield y,
const DoutPrefixProvider *dpp,
RGWMDLogSyncType type, bool from_remote_zone) override {
RGWOTPMetadataObject *obj = static_cast<RGWOTPMetadataObject *>(_obj);
RGWSI_OTP_BE_Ctx be_ctx(op->ctx());
int ret = svc.otp->store_all(dpp, be_ctx,
entry,
obj->devices,
obj->mtime,
&objv_tracker,
y);
if (ret < 0) {
return ret;
}
return STATUS_APPLIED;
}
int do_remove(RGWSI_MetaBackend_Handler::Op *op, string& entry, RGWObjVersionTracker& objv_tracker,
optional_yield y, const DoutPrefixProvider *dpp) override {
RGWSI_MBOTP_RemoveParams params;
RGWSI_OTP_BE_Ctx be_ctx(op->ctx());
return svc.otp->remove_all(dpp, be_ctx,
entry,
&objv_tracker,
y);
}
public:
RGWOTPMetadataHandler() {}
string get_type() override { return "otp"; }
};
RGWOTPCtl::RGWOTPCtl(RGWSI_Zone *zone_svc,
RGWSI_OTP *otp_svc)
{
svc.zone = zone_svc;
svc.otp = otp_svc;
}
void RGWOTPCtl::init(RGWOTPMetadataHandler *_meta_handler)
{
meta_handler = _meta_handler;
be_handler = meta_handler->get_be_handler();
}
int RGWOTPCtl::read_all(const rgw_user& uid,
RGWOTPInfo *info,
optional_yield y,
const DoutPrefixProvider *dpp,
const GetParams& params)
{
info->uid = uid;
return meta_handler->call([&](RGWSI_OTP_BE_Ctx& ctx) {
return svc.otp->read_all(ctx, uid, &info->devices, params.mtime, params.objv_tracker, y, dpp);
});
}
int RGWOTPCtl::store_all(const DoutPrefixProvider *dpp,
const RGWOTPInfo& info,
optional_yield y,
const PutParams& params)
{
return meta_handler->call([&](RGWSI_OTP_BE_Ctx& ctx) {
return svc.otp->store_all(dpp, ctx, info.uid, info.devices, params.mtime, params.objv_tracker, y);
});
}
int RGWOTPCtl::remove_all(const DoutPrefixProvider *dpp,
const rgw_user& uid,
optional_yield y,
const RemoveParams& params)
{
return meta_handler->call([&](RGWSI_OTP_BE_Ctx& ctx) {
return svc.otp->remove_all(dpp, ctx, uid, params.objv_tracker, y);
});
}
RGWMetadataHandler *RGWOTPMetaHandlerAllocator::alloc()
{
return new RGWOTPMetadataHandler();
}
| 5,638 | 25.599057 | 147 |
cc
|
null |
ceph-main/src/rgw/driver/rados/rgw_otp.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab ft=cpp
#pragma once
#include "rgw_sal_fwd.h"
#include "cls/otp/cls_otp_types.h"
#include "services/svc_meta_be_otp.h"
#include "rgw_basic_types.h"
#include "rgw_metadata.h"
class RGWObjVersionTracker;
class RGWMetadataHandler;
class RGWOTPMetadataHandler;
class RGWSI_Zone;
class RGWSI_OTP;
class RGWSI_MetaBackend;
class RGWOTPMetadataHandlerBase : public RGWMetadataHandler_GenericMetaBE {
public:
virtual ~RGWOTPMetadataHandlerBase() {}
virtual int init(RGWSI_Zone *zone,
RGWSI_MetaBackend *_meta_be,
RGWSI_OTP *_otp) = 0;
};
class RGWOTPMetaHandlerAllocator {
public:
static RGWMetadataHandler *alloc();
};
struct RGWOTPInfo {
rgw_user uid;
otp_devices_list_t devices;
};
class RGWOTPCtl
{
struct Svc {
RGWSI_Zone *zone{nullptr};
RGWSI_OTP *otp{nullptr};
} svc;
RGWOTPMetadataHandler *meta_handler;
RGWSI_MetaBackend_Handler *be_handler;
public:
RGWOTPCtl(RGWSI_Zone *zone_svc,
RGWSI_OTP *otp_svc);
void init(RGWOTPMetadataHandler *_meta_handler);
struct GetParams {
RGWObjVersionTracker *objv_tracker{nullptr};
ceph::real_time *mtime{nullptr};
GetParams() {}
GetParams& set_objv_tracker(RGWObjVersionTracker *_objv_tracker) {
objv_tracker = _objv_tracker;
return *this;
}
GetParams& set_mtime(ceph::real_time *_mtime) {
mtime = _mtime;
return *this;
}
};
struct PutParams {
RGWObjVersionTracker *objv_tracker{nullptr};
ceph::real_time mtime;
PutParams() {}
PutParams& set_objv_tracker(RGWObjVersionTracker *_objv_tracker) {
objv_tracker = _objv_tracker;
return *this;
}
PutParams& set_mtime(const ceph::real_time& _mtime) {
mtime = _mtime;
return *this;
}
};
struct RemoveParams {
RGWObjVersionTracker *objv_tracker{nullptr};
RemoveParams() {}
RemoveParams& set_objv_tracker(RGWObjVersionTracker *_objv_tracker) {
objv_tracker = _objv_tracker;
return *this;
}
};
int read_all(const rgw_user& uid, RGWOTPInfo *info, optional_yield y,
const DoutPrefixProvider *dpp,
const GetParams& params = {});
int store_all(const DoutPrefixProvider *dpp,
const RGWOTPInfo& info, optional_yield y,
const PutParams& params = {});
int remove_all(const DoutPrefixProvider *dpp,
const rgw_user& user, optional_yield y,
const RemoveParams& params = {});
};
| 2,559 | 22.063063 | 75 |
h
|
null |
ceph-main/src/rgw/driver/rados/rgw_period.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab ft=cpp
#include "rgw_sync.h"
#include "services/svc_zone.h"
#define dout_subsys ceph_subsys_rgw
using namespace std;
using namespace rgw_zone_defaults;
int RGWPeriod::get_zonegroup(RGWZoneGroup& zonegroup,
const string& zonegroup_id) const
{
map<string, RGWZoneGroup>::const_iterator iter;
if (!zonegroup_id.empty()) {
iter = period_map.zonegroups.find(zonegroup_id);
} else {
iter = period_map.zonegroups.find("default");
}
if (iter != period_map.zonegroups.end()) {
zonegroup = iter->second;
return 0;
}
return -ENOENT;
}
int RGWPeriod::get_latest_epoch(const DoutPrefixProvider *dpp, epoch_t& latest_epoch, optional_yield y)
{
RGWPeriodLatestEpochInfo info;
int ret = read_latest_epoch(dpp, info, y);
if (ret < 0) {
return ret;
}
latest_epoch = info.epoch;
return 0;
}
int RGWPeriod::delete_obj(const DoutPrefixProvider *dpp, optional_yield y)
{
rgw_pool pool(get_pool(cct));
// delete the object for each period epoch
for (epoch_t e = 1; e <= epoch; e++) {
RGWPeriod p{get_id(), e};
rgw_raw_obj oid{pool, p.get_period_oid()};
auto sysobj = sysobj_svc->get_obj(oid);
int ret = sysobj.wop().remove(dpp, y);
if (ret < 0) {
ldpp_dout(dpp, 0) << "WARNING: failed to delete period object " << oid
<< ": " << cpp_strerror(-ret) << dendl;
}
}
// delete the .latest_epoch object
rgw_raw_obj oid{pool, get_period_oid_prefix() + get_latest_epoch_oid()};
auto sysobj = sysobj_svc->get_obj(oid);
int ret = sysobj.wop().remove(dpp, y);
if (ret < 0) {
ldpp_dout(dpp, 0) << "WARNING: failed to delete period object " << oid
<< ": " << cpp_strerror(-ret) << dendl;
}
return ret;
}
int RGWPeriod::add_zonegroup(const DoutPrefixProvider *dpp, const RGWZoneGroup& zonegroup, optional_yield y)
{
if (zonegroup.realm_id != realm_id) {
return 0;
}
int ret = period_map.update(zonegroup, cct);
if (ret < 0) {
ldpp_dout(dpp, 0) << "ERROR: updating period map: " << cpp_strerror(-ret) << dendl;
return ret;
}
return store_info(dpp, false, y);
}
int RGWPeriod::update(const DoutPrefixProvider *dpp, optional_yield y)
{
auto zone_svc = sysobj_svc->get_zone_svc();
ldpp_dout(dpp, 20) << __func__ << " realm " << realm_id << " period " << get_id() << dendl;
list<string> zonegroups;
int ret = zone_svc->list_zonegroups(dpp, zonegroups);
if (ret < 0) {
ldpp_dout(dpp, 0) << "ERROR: failed to list zonegroups: " << cpp_strerror(-ret) << dendl;
return ret;
}
// clear zone short ids of removed zones. period_map.update() will add the
// remaining zones back
period_map.short_zone_ids.clear();
for (auto& iter : zonegroups) {
RGWZoneGroup zg(string(), iter);
ret = zg.init(dpp, cct, sysobj_svc, y);
if (ret < 0) {
ldpp_dout(dpp, 0) << "WARNING: zg.init() failed: " << cpp_strerror(-ret) << dendl;
continue;
}
if (zg.realm_id != realm_id) {
ldpp_dout(dpp, 20) << "skipping zonegroup " << zg.get_name() << " zone realm id " << zg.realm_id << ", not on our realm " << realm_id << dendl;
continue;
}
if (zg.master_zone.empty()) {
ldpp_dout(dpp, 0) << "ERROR: zonegroup " << zg.get_name() << " should have a master zone " << dendl;
return -EINVAL;
}
if (zg.zones.find(zg.master_zone) == zg.zones.end()) {
ldpp_dout(dpp, 0) << "ERROR: zonegroup " << zg.get_name()
<< " has a non existent master zone "<< dendl;
return -EINVAL;
}
if (zg.is_master_zonegroup()) {
master_zonegroup = zg.get_id();
master_zone = zg.master_zone;
}
int ret = period_map.update(zg, cct);
if (ret < 0) {
return ret;
}
}
ret = period_config.read(dpp, sysobj_svc, realm_id, y);
if (ret < 0 && ret != -ENOENT) {
ldpp_dout(dpp, 0) << "ERROR: failed to read period config: "
<< cpp_strerror(ret) << dendl;
return ret;
}
return 0;
}
void RGWPeriod::fork()
{
ldout(cct, 20) << __func__ << " realm " << realm_id << " period " << id << dendl;
predecessor_uuid = id;
id = get_staging_id(realm_id);
period_map.reset();
realm_epoch++;
}
static int read_sync_status(const DoutPrefixProvider *dpp, rgw::sal::Driver* driver, rgw_meta_sync_status *sync_status)
{
rgw::sal::RadosStore* rados_store = static_cast<rgw::sal::RadosStore*>(driver);
// initialize a sync status manager to read the status
RGWMetaSyncStatusManager mgr(rados_store, rados_store->svc()->rados->get_async_processor());
int r = mgr.init(dpp);
if (r < 0) {
return r;
}
r = mgr.read_sync_status(dpp, sync_status);
mgr.stop();
return r;
}
int RGWPeriod::update_sync_status(const DoutPrefixProvider *dpp,
rgw::sal::Driver* driver, /* for now */
const RGWPeriod ¤t_period,
std::ostream& error_stream,
bool force_if_stale)
{
rgw_meta_sync_status status;
int r = read_sync_status(dpp, driver, &status);
if (r < 0) {
ldpp_dout(dpp, 0) << "period failed to read sync status: "
<< cpp_strerror(-r) << dendl;
return r;
}
std::vector<std::string> markers;
const auto current_epoch = current_period.get_realm_epoch();
if (current_epoch != status.sync_info.realm_epoch) {
// no sync status markers for the current period
ceph_assert(current_epoch > status.sync_info.realm_epoch);
const int behind = current_epoch - status.sync_info.realm_epoch;
if (!force_if_stale && current_epoch > 1) {
error_stream << "ERROR: This zone is " << behind << " period(s) behind "
"the current master zone in metadata sync. If this zone is promoted "
"to master, any metadata changes during that time are likely to "
"be lost.\n"
"Waiting for this zone to catch up on metadata sync (see "
"'radosgw-admin sync status') is recommended.\n"
"To promote this zone to master anyway, add the flag "
"--yes-i-really-mean-it." << std::endl;
return -EINVAL;
}
// empty sync status markers - other zones will skip this period during
// incremental metadata sync
markers.resize(status.sync_info.num_shards);
} else {
markers.reserve(status.sync_info.num_shards);
for (auto& i : status.sync_markers) {
auto& marker = i.second;
// filter out markers from other periods
if (marker.realm_epoch != current_epoch) {
marker.marker.clear();
}
markers.emplace_back(std::move(marker.marker));
}
}
std::swap(sync_status, markers);
return 0;
}
int RGWPeriod::commit(const DoutPrefixProvider *dpp,
rgw::sal::Driver* driver,
RGWRealm& realm, const RGWPeriod& current_period,
std::ostream& error_stream, optional_yield y,
bool force_if_stale)
{
auto zone_svc = sysobj_svc->get_zone_svc();
ldpp_dout(dpp, 20) << __func__ << " realm " << realm.get_id() << " period " << current_period.get_id() << dendl;
// gateway must be in the master zone to commit
if (master_zone != zone_svc->get_zone_params().get_id()) {
error_stream << "Cannot commit period on zone "
<< zone_svc->get_zone_params().get_id() << ", it must be sent to "
"the period's master zone " << master_zone << '.' << std::endl;
return -EINVAL;
}
// period predecessor must match current period
if (predecessor_uuid != current_period.get_id()) {
error_stream << "Period predecessor " << predecessor_uuid
<< " does not match current period " << current_period.get_id()
<< ". Use 'period pull' to get the latest period from the master, "
"reapply your changes, and try again." << std::endl;
return -EINVAL;
}
// realm epoch must be 1 greater than current period
if (realm_epoch != current_period.get_realm_epoch() + 1) {
error_stream << "Period's realm epoch " << realm_epoch
<< " does not come directly after current realm epoch "
<< current_period.get_realm_epoch() << ". Use 'realm pull' to get the "
"latest realm and period from the master zone, reapply your changes, "
"and try again." << std::endl;
return -EINVAL;
}
// did the master zone change?
if (master_zone != current_period.get_master_zone()) {
// store the current metadata sync status in the period
int r = update_sync_status(dpp, driver, current_period, error_stream, force_if_stale);
if (r < 0) {
ldpp_dout(dpp, 0) << "failed to update metadata sync status: "
<< cpp_strerror(-r) << dendl;
return r;
}
// create an object with a new period id
r = create(dpp, y, true);
if (r < 0) {
ldpp_dout(dpp, 0) << "failed to create new period: " << cpp_strerror(-r) << dendl;
return r;
}
// set as current period
r = realm.set_current_period(dpp, *this, y);
if (r < 0) {
ldpp_dout(dpp, 0) << "failed to update realm's current period: "
<< cpp_strerror(-r) << dendl;
return r;
}
ldpp_dout(dpp, 4) << "Promoted to master zone and committed new period "
<< id << dendl;
realm.notify_new_period(dpp, *this, y);
return 0;
}
// period must be based on current epoch
if (epoch != current_period.get_epoch()) {
error_stream << "Period epoch " << epoch << " does not match "
"predecessor epoch " << current_period.get_epoch()
<< ". Use 'period pull' to get the latest epoch from the master zone, "
"reapply your changes, and try again." << std::endl;
return -EINVAL;
}
// set period as next epoch
set_id(current_period.get_id());
set_epoch(current_period.get_epoch() + 1);
set_predecessor(current_period.get_predecessor());
realm_epoch = current_period.get_realm_epoch();
// write the period to rados
int r = store_info(dpp, false, y);
if (r < 0) {
ldpp_dout(dpp, 0) << "failed to store period: " << cpp_strerror(-r) << dendl;
return r;
}
// set as latest epoch
r = update_latest_epoch(dpp, epoch, y);
if (r == -EEXIST) {
// already have this epoch (or a more recent one)
return 0;
}
if (r < 0) {
ldpp_dout(dpp, 0) << "failed to set latest epoch: " << cpp_strerror(-r) << dendl;
return r;
}
r = reflect(dpp, y);
if (r < 0) {
ldpp_dout(dpp, 0) << "failed to update local objects: " << cpp_strerror(-r) << dendl;
return r;
}
ldpp_dout(dpp, 4) << "Committed new epoch " << epoch
<< " for period " << id << dendl;
realm.notify_new_period(dpp, *this, y);
return 0;
}
void RGWPeriod::generate_test_instances(list<RGWPeriod*> &o)
{
RGWPeriod *z = new RGWPeriod;
o.push_back(z);
o.push_back(new RGWPeriod);
}
| 10,811 | 32.267692 | 149 |
cc
|
null |
ceph-main/src/rgw/driver/rados/rgw_pubsub_push.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab ft=cpp
#include "rgw_pubsub_push.h"
#include <string>
#include <sstream>
#include <algorithm>
#include "include/buffer_fwd.h"
#include "common/Formatter.h"
#include "common/iso_8601.h"
#include "common/async/completion.h"
#include "rgw_common.h"
#include "rgw_data_sync.h"
#include "rgw_pubsub.h"
#include "acconfig.h"
#ifdef WITH_RADOSGW_AMQP_ENDPOINT
#include "rgw_amqp.h"
#endif
#ifdef WITH_RADOSGW_KAFKA_ENDPOINT
#include "rgw_kafka.h"
#endif
#include <boost/asio/yield.hpp>
#include <boost/algorithm/string.hpp>
#include <functional>
#include "rgw_perf_counters.h"
using namespace rgw;
template<typename EventType>
std::string json_format_pubsub_event(const EventType& event) {
std::stringstream ss;
JSONFormatter f(false);
{
Formatter::ObjectSection s(f, EventType::json_type_plural);
{
Formatter::ArraySection s(f, EventType::json_type_plural);
encode_json("", event, &f);
}
}
f.flush(ss);
return ss.str();
}
bool get_bool(const RGWHTTPArgs& args, const std::string& name, bool default_value) {
bool value;
bool exists;
if (args.get_bool(name.c_str(), &value, &exists) == -EINVAL) {
throw RGWPubSubEndpoint::configuration_error("invalid boolean value for " + name);
}
if (!exists) {
return default_value;
}
return value;
}
class RGWPubSubHTTPEndpoint : public RGWPubSubEndpoint {
private:
const std::string endpoint;
typedef unsigned ack_level_t;
ack_level_t ack_level; // TODO: not used for now
const bool verify_ssl;
const bool cloudevents;
static const ack_level_t ACK_LEVEL_ANY = 0;
static const ack_level_t ACK_LEVEL_NON_ERROR = 1;
public:
RGWPubSubHTTPEndpoint(const std::string& _endpoint, const RGWHTTPArgs& args) :
endpoint(_endpoint), verify_ssl(get_bool(args, "verify-ssl", true)), cloudevents(get_bool(args, "cloudevents", false))
{
bool exists;
const auto& str_ack_level = args.get("http-ack-level", &exists);
if (!exists || str_ack_level == "any") {
// "any" is default
ack_level = ACK_LEVEL_ANY;
} else if (str_ack_level == "non-error") {
ack_level = ACK_LEVEL_NON_ERROR;
} else {
ack_level = std::atoi(str_ack_level.c_str());
if (ack_level < 100 || ack_level >= 600) {
throw configuration_error("HTTP/S: invalid http-ack-level: " + str_ack_level);
}
}
}
int send_to_completion_async(CephContext* cct, const rgw_pubsub_s3_event& event, optional_yield y) override {
bufferlist read_bl;
RGWPostHTTPData request(cct, "POST", endpoint, &read_bl, verify_ssl);
const auto post_data = json_format_pubsub_event(event);
if (cloudevents) {
// following: https://github.com/cloudevents/spec/blob/v1.0.1/http-protocol-binding.md
// using "Binary Content Mode"
request.append_header("ce-specversion", "1.0");
request.append_header("ce-type", "com.amazonaws." + event.eventName);
request.append_header("ce-time", to_iso_8601(event.eventTime));
// default output of iso8601 is also RFC3339 compatible
request.append_header("ce-id", event.x_amz_request_id + "." + event.x_amz_id_2);
request.append_header("ce-source", event.eventSource + "." + event.awsRegion + "." + event.bucket_name);
request.append_header("ce-subject", event.object_key);
}
request.set_post_data(post_data);
request.set_send_length(post_data.length());
request.append_header("Content-Type", "application/json");
if (perfcounter) perfcounter->inc(l_rgw_pubsub_push_pending);
const auto rc = RGWHTTP::process(&request, y);
if (perfcounter) perfcounter->dec(l_rgw_pubsub_push_pending);
// TODO: use read_bl to process return code and handle according to ack level
return rc;
}
std::string to_str() const override {
std::string str("HTTP/S Endpoint");
str += "\nURI: " + endpoint;
str += (verify_ssl ? "\nverify SSL" : "\ndon't verify SSL");
return str;
}
};
#ifdef WITH_RADOSGW_AMQP_ENDPOINT
class RGWPubSubAMQPEndpoint : public RGWPubSubEndpoint {
private:
enum class ack_level_t {
None,
Broker,
Routable
};
CephContext* const cct;
const std::string endpoint;
const std::string topic;
const std::string exchange;
ack_level_t ack_level;
amqp::connection_id_t conn_id;
bool get_verify_ssl(const RGWHTTPArgs& args) {
bool exists;
auto str_verify_ssl = args.get("verify-ssl", &exists);
if (!exists) {
// verify server certificate by default
return true;
}
boost::algorithm::to_lower(str_verify_ssl);
if (str_verify_ssl == "true") {
return true;
}
if (str_verify_ssl == "false") {
return false;
}
throw configuration_error("'verify-ssl' must be true/false, not: " + str_verify_ssl);
}
std::string get_exchange(const RGWHTTPArgs& args) {
bool exists;
const auto exchange = args.get("amqp-exchange", &exists);
if (!exists) {
throw configuration_error("AMQP: missing amqp-exchange");
}
return exchange;
}
ack_level_t get_ack_level(const RGWHTTPArgs& args) {
bool exists;
const auto& str_ack_level = args.get("amqp-ack-level", &exists);
if (!exists || str_ack_level == "broker") {
// "broker" is default
return ack_level_t::Broker;
}
if (str_ack_level == "none") {
return ack_level_t::None;
}
if (str_ack_level == "routable") {
return ack_level_t::Routable;
}
throw configuration_error("AMQP: invalid amqp-ack-level: " + str_ack_level);
}
public:
RGWPubSubAMQPEndpoint(const std::string& _endpoint,
const std::string& _topic,
const RGWHTTPArgs& args,
CephContext* _cct) :
cct(_cct),
endpoint(_endpoint),
topic(_topic),
exchange(get_exchange(args)),
ack_level(get_ack_level(args)) {
if (!amqp::connect(conn_id, endpoint, exchange, (ack_level == ack_level_t::Broker), get_verify_ssl(args), args.get_optional("ca-location"))) {
throw configuration_error("AMQP: failed to create connection to: " + endpoint);
}
}
// this allows waiting untill "finish()" is called from a different thread
// waiting could be blocking the waiting thread or yielding, depending
// with compilation flag support and whether the optional_yield is set
class Waiter {
using Signature = void(boost::system::error_code);
using Completion = ceph::async::Completion<Signature>;
std::unique_ptr<Completion> completion = nullptr;
int ret;
mutable std::atomic<bool> done = false;
mutable std::mutex lock;
mutable std::condition_variable cond;
template <typename ExecutionContext, typename CompletionToken>
auto async_wait(ExecutionContext& ctx, CompletionToken&& token) {
boost::asio::async_completion<CompletionToken, Signature> init(token);
auto& handler = init.completion_handler;
{
std::unique_lock l{lock};
completion = Completion::create(ctx.get_executor(), std::move(handler));
}
return init.result.get();
}
public:
int wait(optional_yield y) {
if (done) {
return ret;
}
if (y) {
auto& io_ctx = y.get_io_context();
auto& yield_ctx = y.get_yield_context();
boost::system::error_code ec;
async_wait(io_ctx, yield_ctx[ec]);
return -ec.value();
}
std::unique_lock l(lock);
cond.wait(l, [this]{return (done==true);});
return ret;
}
void finish(int r) {
std::unique_lock l{lock};
ret = r;
done = true;
if (completion) {
boost::system::error_code ec(-ret, boost::system::system_category());
Completion::post(std::move(completion), ec);
} else {
cond.notify_all();
}
}
};
int send_to_completion_async(CephContext* cct, const rgw_pubsub_s3_event& event, optional_yield y) override {
if (ack_level == ack_level_t::None) {
return amqp::publish(conn_id, topic, json_format_pubsub_event(event));
} else {
// TODO: currently broker and routable are the same - this will require different flags but the same mechanism
// note: dynamic allocation of Waiter is needed when this is invoked from a beast coroutine
auto w = std::unique_ptr<Waiter>(new Waiter);
const auto rc = amqp::publish_with_confirm(conn_id,
topic,
json_format_pubsub_event(event),
std::bind(&Waiter::finish, w.get(), std::placeholders::_1));
if (rc < 0) {
// failed to publish, does not wait for reply
return rc;
}
return w->wait(y);
}
}
std::string to_str() const override {
std::string str("AMQP(0.9.1) Endpoint");
str += "\nURI: " + endpoint;
str += "\nTopic: " + topic;
str += "\nExchange: " + exchange;
return str;
}
};
static const std::string AMQP_0_9_1("0-9-1");
static const std::string AMQP_1_0("1-0");
static const std::string AMQP_SCHEMA("amqp");
#endif // ifdef WITH_RADOSGW_AMQP_ENDPOINT
#ifdef WITH_RADOSGW_KAFKA_ENDPOINT
class RGWPubSubKafkaEndpoint : public RGWPubSubEndpoint {
private:
enum class ack_level_t {
None,
Broker,
};
CephContext* const cct;
const std::string topic;
const ack_level_t ack_level;
std::string conn_name;
ack_level_t get_ack_level(const RGWHTTPArgs& args) {
bool exists;
const auto& str_ack_level = args.get("kafka-ack-level", &exists);
if (!exists || str_ack_level == "broker") {
// "broker" is default
return ack_level_t::Broker;
}
if (str_ack_level == "none") {
return ack_level_t::None;
}
throw configuration_error("Kafka: invalid kafka-ack-level: " + str_ack_level);
}
public:
RGWPubSubKafkaEndpoint(const std::string& _endpoint,
const std::string& _topic,
const RGWHTTPArgs& args,
CephContext* _cct) :
cct(_cct),
topic(_topic),
ack_level(get_ack_level(args)) {
if (!kafka::connect(conn_name, _endpoint, get_bool(args, "use-ssl", false), get_bool(args, "verify-ssl", true),
args.get_optional("ca-location"), args.get_optional("mechanism"))) {
throw configuration_error("Kafka: failed to create connection to: " + _endpoint);
}
}
// this allows waiting untill "finish()" is called from a different thread
// waiting could be blocking the waiting thread or yielding, depending
// with compilation flag support and whether the optional_yield is set
class Waiter {
using Signature = void(boost::system::error_code);
using Completion = ceph::async::Completion<Signature>;
std::unique_ptr<Completion> completion = nullptr;
int ret;
mutable std::atomic<bool> done = false;
mutable std::mutex lock;
mutable std::condition_variable cond;
template <typename ExecutionContext, typename CompletionToken>
auto async_wait(ExecutionContext& ctx, CompletionToken&& token) {
boost::asio::async_completion<CompletionToken, Signature> init(token);
auto& handler = init.completion_handler;
{
std::unique_lock l{lock};
completion = Completion::create(ctx.get_executor(), std::move(handler));
}
return init.result.get();
}
public:
int wait(optional_yield y) {
if (done) {
return ret;
}
if (y) {
auto& io_ctx = y.get_io_context();
auto& yield_ctx = y.get_yield_context();
boost::system::error_code ec;
async_wait(io_ctx, yield_ctx[ec]);
return -ec.value();
}
std::unique_lock l(lock);
cond.wait(l, [this]{return (done==true);});
return ret;
}
void finish(int r) {
std::unique_lock l{lock};
ret = r;
done = true;
if (completion) {
boost::system::error_code ec(-ret, boost::system::system_category());
Completion::post(std::move(completion), ec);
} else {
cond.notify_all();
}
}
};
int send_to_completion_async(CephContext* cct, const rgw_pubsub_s3_event& event, optional_yield y) override {
if (ack_level == ack_level_t::None) {
return kafka::publish(conn_name, topic, json_format_pubsub_event(event));
} else {
// note: dynamic allocation of Waiter is needed when this is invoked from a beast coroutine
auto w = std::unique_ptr<Waiter>(new Waiter);
const auto rc = kafka::publish_with_confirm(conn_name,
topic,
json_format_pubsub_event(event),
std::bind(&Waiter::finish, w.get(), std::placeholders::_1));
if (rc < 0) {
// failed to publish, does not wait for reply
return rc;
}
return w->wait(y);
}
}
std::string to_str() const override {
std::string str("Kafka Endpoint");
str += "\nBroker: " + conn_name;
str += "\nTopic: " + topic;
return str;
}
};
static const std::string KAFKA_SCHEMA("kafka");
#endif // ifdef WITH_RADOSGW_KAFKA_ENDPOINT
static const std::string WEBHOOK_SCHEMA("webhook");
static const std::string UNKNOWN_SCHEMA("unknown");
static const std::string NO_SCHEMA("");
const std::string& get_schema(const std::string& endpoint) {
if (endpoint.empty()) {
return NO_SCHEMA;
}
const auto pos = endpoint.find(':');
if (pos == std::string::npos) {
return UNKNOWN_SCHEMA;
}
const auto& schema = endpoint.substr(0,pos);
if (schema == "http" || schema == "https") {
return WEBHOOK_SCHEMA;
#ifdef WITH_RADOSGW_AMQP_ENDPOINT
} else if (schema == "amqp" || schema == "amqps") {
return AMQP_SCHEMA;
#endif
#ifdef WITH_RADOSGW_KAFKA_ENDPOINT
} else if (schema == "kafka") {
return KAFKA_SCHEMA;
#endif
}
return UNKNOWN_SCHEMA;
}
RGWPubSubEndpoint::Ptr RGWPubSubEndpoint::create(const std::string& endpoint,
const std::string& topic,
const RGWHTTPArgs& args,
CephContext* cct) {
const auto& schema = get_schema(endpoint);
if (schema == WEBHOOK_SCHEMA) {
return Ptr(new RGWPubSubHTTPEndpoint(endpoint, args));
#ifdef WITH_RADOSGW_AMQP_ENDPOINT
} else if (schema == AMQP_SCHEMA) {
bool exists;
std::string version = args.get("amqp-version", &exists);
if (!exists) {
version = AMQP_0_9_1;
}
if (version == AMQP_0_9_1) {
return Ptr(new RGWPubSubAMQPEndpoint(endpoint, topic, args, cct));
} else if (version == AMQP_1_0) {
throw configuration_error("AMQP: v1.0 not supported");
return nullptr;
} else {
throw configuration_error("AMQP: unknown version: " + version);
return nullptr;
}
#endif
#ifdef WITH_RADOSGW_KAFKA_ENDPOINT
} else if (schema == KAFKA_SCHEMA) {
return Ptr(new RGWPubSubKafkaEndpoint(endpoint, topic, args, cct));
#endif
}
throw configuration_error("unknown schema in: " + endpoint);
return nullptr;
}
| 14,786 | 31.075922 | 146 |
cc
|
null |
ceph-main/src/rgw/driver/rados/rgw_pubsub_push.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab ft=cpp
#pragma once
#include <string>
#include <memory>
#include <stdexcept>
#include "include/buffer_fwd.h"
#include "include/common_fwd.h"
#include "common/async/yield_context.h"
// TODO the env should be used as a template parameter to differentiate the source that triggers the pushes
class RGWDataSyncEnv;
class RGWHTTPArgs;
struct rgw_pubsub_s3_event;
// endpoint base class all endpoint - types should derive from it
class RGWPubSubEndpoint {
public:
RGWPubSubEndpoint() = default;
// endpoint should not be copied
RGWPubSubEndpoint(const RGWPubSubEndpoint&) = delete;
const RGWPubSubEndpoint& operator=(const RGWPubSubEndpoint&) = delete;
typedef std::unique_ptr<RGWPubSubEndpoint> Ptr;
// factory method for the actual notification endpoint
// derived class specific arguments are passed in http args format
// may throw a configuration_error if creation fails
static Ptr create(const std::string& endpoint, const std::string& topic, const RGWHTTPArgs& args, CephContext *cct=nullptr);
// this method is used in order to send notification (S3 compliant) and wait for completion
// in async manner via a coroutine when invoked in the frontend environment
virtual int send_to_completion_async(CephContext* cct, const rgw_pubsub_s3_event& event, optional_yield y) = 0;
// present as string
virtual std::string to_str() const { return ""; }
virtual ~RGWPubSubEndpoint() = default;
// exception object for configuration error
struct configuration_error : public std::logic_error {
configuration_error(const std::string& what_arg) :
std::logic_error("pubsub endpoint configuration error: " + what_arg) {}
};
};
| 1,776 | 36.020833 | 126 |
h
|
null |
ceph-main/src/rgw/driver/rados/rgw_putobj_processor.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab ft=cpp
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2018 Red Hat, Inc.
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include "include/rados/librados.hpp"
#include "rgw_aio.h"
#include "rgw_putobj_processor.h"
#include "rgw_multi.h"
#include "rgw_compression.h"
#include "services/svc_sys_obj.h"
#include "services/svc_zone.h"
#include "rgw_sal_rados.h"
#define dout_subsys ceph_subsys_rgw
using namespace std;
namespace rgw::putobj {
/*
* For the cloudtiered objects, update the object manifest with the
* cloudtier config info read from the attrs.
* Since these attrs are used internally for only replication, do not store them
* in the head object.
*/
void read_cloudtier_info_from_attrs(rgw::sal::Attrs& attrs, RGWObjCategory& category,
RGWObjManifest& manifest) {
auto attr_iter = attrs.find(RGW_ATTR_CLOUD_TIER_TYPE);
if (attr_iter != attrs.end()) {
auto i = attr_iter->second;
string m = i.to_str();
if (m == "cloud-s3") {
category = RGWObjCategory::CloudTiered;
manifest.set_tier_type("cloud-s3");
auto config_iter = attrs.find(RGW_ATTR_CLOUD_TIER_CONFIG);
if (config_iter != attrs.end()) {
auto i = config_iter->second.cbegin();
RGWObjTier tier_config;
try {
using ceph::decode;
decode(tier_config, i);
manifest.set_tier_config(tier_config);
attrs.erase(config_iter);
} catch (buffer::error& err) {
}
}
}
attrs.erase(attr_iter);
}
}
int HeadObjectProcessor::process(bufferlist&& data, uint64_t logical_offset)
{
const bool flush = (data.length() == 0);
// capture the first chunk for special handling
if (data_offset < head_chunk_size || data_offset == 0) {
if (flush) {
// flush partial chunk
return process_first_chunk(std::move(head_data), &processor);
}
auto remaining = head_chunk_size - data_offset;
auto count = std::min<uint64_t>(data.length(), remaining);
data.splice(0, count, &head_data);
data_offset += count;
if (data_offset == head_chunk_size) {
// process the first complete chunk
ceph_assert(head_data.length() == head_chunk_size);
int r = process_first_chunk(std::move(head_data), &processor);
if (r < 0) {
return r;
}
}
if (data.length() == 0) { // avoid flushing stripe processor
return 0;
}
}
ceph_assert(processor); // process_first_chunk() must initialize
// send everything else through the processor
auto write_offset = data_offset;
data_offset += data.length();
return processor->process(std::move(data), write_offset);
}
static int process_completed(const AioResultList& completed, RawObjSet *written)
{
std::optional<int> error;
for (auto& r : completed) {
if (r.result >= 0) {
written->insert(r.obj);
} else if (!error) { // record first error code
error = r.result;
}
}
return error.value_or(0);
}
void RadosWriter::add_write_hint(librados::ObjectWriteOperation& op) {
const RGWObjStateManifest *sm = obj_ctx.get_state(head_obj);
const bool compressed = sm->state.compressed;
uint32_t alloc_hint_flags = 0;
if (compressed) {
alloc_hint_flags |= librados::ALLOC_HINT_FLAG_INCOMPRESSIBLE;
}
op.set_alloc_hint2(0, 0, alloc_hint_flags);
}
int RadosWriter::set_stripe_obj(const rgw_raw_obj& raw_obj)
{
stripe_obj = store->svc.rados->obj(raw_obj);
return stripe_obj.open(dpp);
}
int RadosWriter::process(bufferlist&& bl, uint64_t offset)
{
bufferlist data = std::move(bl);
const uint64_t cost = data.length();
if (cost == 0) { // no empty writes, use aio directly for creates
return 0;
}
librados::ObjectWriteOperation op;
add_write_hint(op);
if (offset == 0) {
op.write_full(data);
} else {
op.write(offset, data);
}
constexpr uint64_t id = 0; // unused
auto& ref = stripe_obj.get_ref();
auto c = aio->get(ref.obj, Aio::librados_op(ref.pool.ioctx(), std::move(op), y), cost, id);
return process_completed(c, &written);
}
int RadosWriter::write_exclusive(const bufferlist& data)
{
const uint64_t cost = data.length();
librados::ObjectWriteOperation op;
op.create(true); // exclusive create
add_write_hint(op);
op.write_full(data);
constexpr uint64_t id = 0; // unused
auto& ref = stripe_obj.get_ref();
auto c = aio->get(ref.obj, Aio::librados_op(ref.pool.ioctx(), std::move(op), y), cost, id);
auto d = aio->drain();
c.splice(c.end(), d);
return process_completed(c, &written);
}
int RadosWriter::drain()
{
return process_completed(aio->drain(), &written);
}
RadosWriter::~RadosWriter()
{
// wait on any outstanding aio completions
process_completed(aio->drain(), &written);
bool need_to_remove_head = false;
std::optional<rgw_raw_obj> raw_head;
if (!head_obj.empty()) {
raw_head.emplace();
store->obj_to_raw(bucket_info.placement_rule, head_obj, &*raw_head);
}
/**
* We should delete the object in the "multipart" namespace to avoid race condition.
* Such race condition is caused by the fact that the multipart object is the gatekeeper of a multipart
* upload, when it is deleted, a second upload would start with the same suffix("2/"), therefore, objects
* written by the second upload may be deleted by the first upload.
* details is describled on #11749
*
* The above comment still stands, but instead of searching for a specific object in the multipart
* namespace, we just make sure that we remove the object that is marked as the head object after
* we remove all the other raw objects. Note that we use different call to remove the head object,
* as this one needs to go via the bucket index prepare/complete 2-phase commit scheme.
*/
for (const auto& obj : written) {
if (raw_head && obj == *raw_head) {
ldpp_dout(dpp, 5) << "NOTE: we should not process the head object (" << obj << ") here" << dendl;
need_to_remove_head = true;
continue;
}
int r = store->delete_raw_obj(dpp, obj, y);
if (r < 0 && r != -ENOENT) {
ldpp_dout(dpp, 0) << "WARNING: failed to remove obj (" << obj << "), leaked" << dendl;
}
}
if (need_to_remove_head) {
std::string version_id;
ldpp_dout(dpp, 5) << "NOTE: we are going to process the head obj (" << *raw_head << ")" << dendl;
int r = store->delete_obj(dpp, obj_ctx, bucket_info, head_obj, 0, y, 0);
if (r < 0 && r != -ENOENT) {
ldpp_dout(dpp, 0) << "WARNING: failed to remove obj (" << *raw_head << "), leaked" << dendl;
}
}
}
// advance to the next stripe
int ManifestObjectProcessor::next(uint64_t offset, uint64_t *pstripe_size)
{
// advance the manifest
int r = manifest_gen.create_next(offset);
if (r < 0) {
return r;
}
rgw_raw_obj stripe_obj = manifest_gen.get_cur_obj(store);
uint64_t chunk_size = 0;
r = store->get_max_chunk_size(stripe_obj.pool, &chunk_size, dpp);
if (r < 0) {
return r;
}
r = writer.set_stripe_obj(stripe_obj);
if (r < 0) {
return r;
}
chunk = ChunkProcessor(&writer, chunk_size);
*pstripe_size = manifest_gen.cur_stripe_max_size();
return 0;
}
int AtomicObjectProcessor::process_first_chunk(bufferlist&& data,
DataProcessor **processor)
{
first_chunk = std::move(data);
*processor = &stripe;
return 0;
}
int AtomicObjectProcessor::prepare(optional_yield y)
{
uint64_t max_head_chunk_size;
uint64_t head_max_size;
uint64_t chunk_size = 0;
uint64_t alignment;
rgw_pool head_pool;
if (!store->get_obj_data_pool(bucket_info.placement_rule, head_obj, &head_pool)) {
return -EIO;
}
int r = store->get_max_chunk_size(head_pool, &max_head_chunk_size, dpp, &alignment);
if (r < 0) {
return r;
}
bool same_pool = true;
if (bucket_info.placement_rule != tail_placement_rule) {
rgw_pool tail_pool;
if (!store->get_obj_data_pool(tail_placement_rule, head_obj, &tail_pool)) {
return -EIO;
}
if (tail_pool != head_pool) {
same_pool = false;
r = store->get_max_chunk_size(tail_pool, &chunk_size, dpp);
if (r < 0) {
return r;
}
head_max_size = 0;
}
}
if (same_pool) {
RGWZonePlacementInfo placement_info;
if (!store->svc.zone->get_zone_params().get_placement(bucket_info.placement_rule.name, &placement_info) || placement_info.inline_data) {
head_max_size = max_head_chunk_size;
} else {
head_max_size = 0;
}
chunk_size = max_head_chunk_size;
}
uint64_t stripe_size;
const uint64_t default_stripe_size = store->ctx()->_conf->rgw_obj_stripe_size;
store->get_max_aligned_size(default_stripe_size, alignment, &stripe_size);
manifest.set_trivial_rule(head_max_size, stripe_size);
r = manifest_gen.create_begin(store->ctx(), &manifest,
bucket_info.placement_rule,
&tail_placement_rule,
head_obj.bucket, head_obj);
if (r < 0) {
return r;
}
rgw_raw_obj stripe_obj = manifest_gen.get_cur_obj(store);
r = writer.set_stripe_obj(stripe_obj);
if (r < 0) {
return r;
}
set_head_chunk_size(head_max_size);
// initialize the processors
chunk = ChunkProcessor(&writer, chunk_size);
stripe = StripeProcessor(&chunk, this, head_max_size);
return 0;
}
int AtomicObjectProcessor::complete(size_t accounted_size,
const std::string& etag,
ceph::real_time *mtime,
ceph::real_time set_mtime,
rgw::sal::Attrs& attrs,
ceph::real_time delete_at,
const char *if_match,
const char *if_nomatch,
const std::string *user_data,
rgw_zone_set *zones_trace,
bool *pcanceled, optional_yield y)
{
int r = writer.drain();
if (r < 0) {
return r;
}
const uint64_t actual_size = get_actual_size();
r = manifest_gen.create_next(actual_size);
if (r < 0) {
return r;
}
obj_ctx.set_atomic(head_obj);
RGWRados::Object op_target(store, bucket_info, obj_ctx, head_obj);
/* some object types shouldn't be versioned, e.g., multipart parts */
op_target.set_versioning_disabled(!bucket_info.versioning_enabled());
RGWRados::Object::Write obj_op(&op_target);
obj_op.meta.data = &first_chunk;
obj_op.meta.manifest = &manifest;
obj_op.meta.ptag = &unique_tag; /* use req_id as operation tag */
obj_op.meta.if_match = if_match;
obj_op.meta.if_nomatch = if_nomatch;
obj_op.meta.mtime = mtime;
obj_op.meta.set_mtime = set_mtime;
obj_op.meta.owner = owner;
obj_op.meta.flags = PUT_OBJ_CREATE;
obj_op.meta.olh_epoch = olh_epoch;
obj_op.meta.delete_at = delete_at;
obj_op.meta.user_data = user_data;
obj_op.meta.zones_trace = zones_trace;
obj_op.meta.modify_tail = true;
read_cloudtier_info_from_attrs(attrs, obj_op.meta.category, manifest);
r = obj_op.write_meta(dpp, actual_size, accounted_size, attrs, y);
if (r < 0) {
if (r == -ETIMEDOUT) {
// The head object write may eventually succeed, clear the set of objects for deletion. if it
// doesn't ever succeed, we'll orphan any tail objects as if we'd crashed before that write
writer.clear_written();
}
return r;
}
if (!obj_op.meta.canceled) {
// on success, clear the set of objects for deletion
writer.clear_written();
}
if (pcanceled) {
*pcanceled = obj_op.meta.canceled;
}
return 0;
}
int MultipartObjectProcessor::process_first_chunk(bufferlist&& data,
DataProcessor **processor)
{
// write the first chunk of the head object as part of an exclusive create,
// then drain to wait for the result in case of EEXIST
int r = writer.write_exclusive(data);
if (r == -EEXIST) {
// randomize the oid prefix and reprepare the head/manifest
std::string oid_rand = gen_rand_alphanumeric(store->ctx(), 32);
mp.init(target_obj.key.name, upload_id, oid_rand);
manifest.set_prefix(target_obj.key.name + "." + oid_rand);
r = prepare_head();
if (r < 0) {
return r;
}
// resubmit the write op on the new head object
r = writer.write_exclusive(data);
}
if (r < 0) {
return r;
}
*processor = &stripe;
return 0;
}
int MultipartObjectProcessor::prepare_head()
{
const uint64_t default_stripe_size = store->ctx()->_conf->rgw_obj_stripe_size;
uint64_t chunk_size;
uint64_t stripe_size;
uint64_t alignment;
int r = store->get_max_chunk_size(tail_placement_rule, target_obj, &chunk_size, dpp, &alignment);
if (r < 0) {
ldpp_dout(dpp, 0) << "ERROR: unexpected: get_max_chunk_size(): placement_rule=" << tail_placement_rule.to_str() << " obj=" << target_obj << " returned r=" << r << dendl;
return r;
}
store->get_max_aligned_size(default_stripe_size, alignment, &stripe_size);
manifest.set_multipart_part_rule(stripe_size, part_num);
r = manifest_gen.create_begin(store->ctx(), &manifest,
bucket_info.placement_rule,
&tail_placement_rule,
target_obj.bucket, target_obj);
if (r < 0) {
return r;
}
rgw_raw_obj stripe_obj = manifest_gen.get_cur_obj(store);
RGWSI_Tier_RADOS::raw_obj_to_obj(head_obj.bucket, stripe_obj, &head_obj);
head_obj.index_hash_source = target_obj.key.name;
r = writer.set_stripe_obj(stripe_obj);
if (r < 0) {
return r;
}
stripe_size = manifest_gen.cur_stripe_max_size();
set_head_chunk_size(stripe_size);
chunk = ChunkProcessor(&writer, chunk_size);
stripe = StripeProcessor(&chunk, this, stripe_size);
return 0;
}
int MultipartObjectProcessor::prepare(optional_yield y)
{
manifest.set_prefix(target_obj.key.name + "." + upload_id);
return prepare_head();
}
int MultipartObjectProcessor::complete(size_t accounted_size,
const std::string& etag,
ceph::real_time *mtime,
ceph::real_time set_mtime,
std::map<std::string, bufferlist>& attrs,
ceph::real_time delete_at,
const char *if_match,
const char *if_nomatch,
const std::string *user_data,
rgw_zone_set *zones_trace,
bool *pcanceled, optional_yield y)
{
int r = writer.drain();
if (r < 0) {
return r;
}
const uint64_t actual_size = get_actual_size();
r = manifest_gen.create_next(actual_size);
if (r < 0) {
return r;
}
RGWRados::Object op_target(store, bucket_info, obj_ctx, head_obj);
op_target.set_versioning_disabled(true);
op_target.set_meta_placement_rule(&tail_placement_rule);
RGWRados::Object::Write obj_op(&op_target);
obj_op.meta.set_mtime = set_mtime;
obj_op.meta.mtime = mtime;
obj_op.meta.owner = owner;
obj_op.meta.delete_at = delete_at;
obj_op.meta.zones_trace = zones_trace;
obj_op.meta.modify_tail = true;
r = obj_op.write_meta(dpp, actual_size, accounted_size, attrs, y);
if (r < 0)
return r;
RGWUploadPartInfo info;
string p = "part.";
bool sorted_omap = is_v2_upload_id(upload_id);
if (sorted_omap) {
char buf[32];
snprintf(buf, sizeof(buf), "%08d", part_num);
p.append(buf);
} else {
p.append(part_num_str);
}
info.num = part_num;
info.etag = etag;
info.size = actual_size;
info.accounted_size = accounted_size;
info.modified = real_clock::now();
info.manifest = manifest;
bool compressed;
r = rgw_compression_info_from_attrset(attrs, compressed, info.cs_info);
if (r < 0) {
ldpp_dout(dpp, 1) << "cannot get compression info" << dendl;
return r;
}
rgw_obj meta_obj;
meta_obj.init_ns(bucket_info.bucket, mp.get_meta(), RGW_OBJ_NS_MULTIPART);
meta_obj.set_in_extra_data(true);
rgw_raw_obj meta_raw_obj;
store->obj_to_raw(bucket_info.placement_rule, meta_obj, &meta_raw_obj);
rgw_rados_ref meta_obj_ref;
r = store->get_raw_obj_ref(dpp, meta_raw_obj, &meta_obj_ref);
if (r < 0) {
ldpp_dout(dpp, -1) << "ERROR: failed to get obj ref of meta obj with ret=" << r << dendl;
return r;
}
librados::ObjectWriteOperation op;
cls_rgw_mp_upload_part_info_update(op, p, info);
r = rgw_rados_operate(dpp, meta_obj_ref.pool.ioctx(), meta_obj_ref.obj.oid, &op, y);
ldpp_dout(dpp, 20) << "Update meta: " << meta_obj_ref.obj.oid << " part " << p << " prefix " << info.manifest.get_prefix() << " return " << r << dendl;
if (r == -EOPNOTSUPP) {
// New CLS call to update part info is not yet supported. Fall back to the old handling.
bufferlist bl;
encode(info, bl);
map<string, bufferlist> m;
m[p] = bl;
op = librados::ObjectWriteOperation{};
op.assert_exists(); // detect races with abort
op.omap_set(m);
r = rgw_rados_operate(dpp, meta_obj_ref.pool.ioctx(), meta_obj_ref.obj.oid, &op, y);
}
if (r < 0) {
return r == -ENOENT ? -ERR_NO_SUCH_UPLOAD : r;
}
if (!obj_op.meta.canceled) {
// on success, clear the set of objects for deletion
writer.clear_written();
}
if (pcanceled) {
*pcanceled = obj_op.meta.canceled;
}
return 0;
}
int AppendObjectProcessor::process_first_chunk(bufferlist &&data, rgw::sal::DataProcessor **processor)
{
int r = writer.write_exclusive(data);
if (r < 0) {
return r;
}
*processor = &stripe;
return 0;
}
int AppendObjectProcessor::prepare(optional_yield y)
{
RGWObjState *astate;
int r = store->get_obj_state(dpp, &obj_ctx, bucket_info, head_obj,
&astate, &cur_manifest, y);
if (r < 0) {
return r;
}
cur_size = astate->size;
*cur_accounted_size = astate->accounted_size;
if (!astate->exists) {
if (position != 0) {
ldpp_dout(dpp, 5) << "ERROR: Append position should be zero" << dendl;
return -ERR_POSITION_NOT_EQUAL_TO_LENGTH;
} else {
cur_part_num = 1;
//set the prefix
char buf[33];
gen_rand_alphanumeric(store->ctx(), buf, sizeof(buf) - 1);
string oid_prefix = head_obj.key.name;
oid_prefix.append(".");
oid_prefix.append(buf);
oid_prefix.append("_");
manifest.set_prefix(oid_prefix);
}
} else {
// check whether the object appendable
map<string, bufferlist>::iterator iter = astate->attrset.find(RGW_ATTR_APPEND_PART_NUM);
if (iter == astate->attrset.end()) {
ldpp_dout(dpp, 5) << "ERROR: The object is not appendable" << dendl;
return -ERR_OBJECT_NOT_APPENDABLE;
}
if (position != *cur_accounted_size) {
ldpp_dout(dpp, 5) << "ERROR: Append position should be equal to the obj size" << dendl;
return -ERR_POSITION_NOT_EQUAL_TO_LENGTH;
}
try {
using ceph::decode;
decode(cur_part_num, iter->second);
} catch (buffer::error& err) {
ldpp_dout(dpp, 5) << "ERROR: failed to decode part num" << dendl;
return -EIO;
}
cur_part_num++;
//get the current obj etag
iter = astate->attrset.find(RGW_ATTR_ETAG);
if (iter != astate->attrset.end()) {
string s = rgw_string_unquote(iter->second.c_str());
size_t pos = s.find("-");
cur_etag = s.substr(0, pos);
}
iter = astate->attrset.find(RGW_ATTR_STORAGE_CLASS);
if (iter != astate->attrset.end()) {
tail_placement_rule.storage_class = iter->second.to_str();
} else {
tail_placement_rule.storage_class = RGW_STORAGE_CLASS_STANDARD;
}
manifest.set_prefix(cur_manifest->get_prefix());
astate->keep_tail = true;
}
manifest.set_multipart_part_rule(store->ctx()->_conf->rgw_obj_stripe_size, cur_part_num);
r = manifest_gen.create_begin(store->ctx(), &manifest, bucket_info.placement_rule, &tail_placement_rule, head_obj.bucket, head_obj);
if (r < 0) {
return r;
}
rgw_raw_obj stripe_obj = manifest_gen.get_cur_obj(store);
uint64_t chunk_size = 0;
r = store->get_max_chunk_size(stripe_obj.pool, &chunk_size, dpp);
if (r < 0) {
return r;
}
r = writer.set_stripe_obj(std::move(stripe_obj));
if (r < 0) {
return r;
}
uint64_t stripe_size = manifest_gen.cur_stripe_max_size();
uint64_t max_head_size = std::min(chunk_size, stripe_size);
set_head_chunk_size(max_head_size);
// initialize the processors
chunk = ChunkProcessor(&writer, chunk_size);
stripe = StripeProcessor(&chunk, this, stripe_size);
return 0;
}
int AppendObjectProcessor::complete(size_t accounted_size, const string &etag, ceph::real_time *mtime,
ceph::real_time set_mtime, rgw::sal::Attrs& attrs,
ceph::real_time delete_at, const char *if_match, const char *if_nomatch,
const string *user_data, rgw_zone_set *zones_trace, bool *pcanceled,
optional_yield y)
{
int r = writer.drain();
if (r < 0)
return r;
const uint64_t actual_size = get_actual_size();
r = manifest_gen.create_next(actual_size);
if (r < 0) {
return r;
}
obj_ctx.set_atomic(head_obj);
RGWRados::Object op_target(store, bucket_info, obj_ctx, head_obj);
//For Append obj, disable versioning
op_target.set_versioning_disabled(true);
RGWRados::Object::Write obj_op(&op_target);
if (cur_manifest) {
cur_manifest->append(dpp, manifest, store->svc.zone->get_zonegroup(), store->svc.zone->get_zone_params());
obj_op.meta.manifest = cur_manifest;
} else {
obj_op.meta.manifest = &manifest;
}
obj_op.meta.ptag = &unique_tag; /* use req_id as operation tag */
obj_op.meta.mtime = mtime;
obj_op.meta.set_mtime = set_mtime;
obj_op.meta.owner = owner;
obj_op.meta.flags = PUT_OBJ_CREATE;
obj_op.meta.delete_at = delete_at;
obj_op.meta.user_data = user_data;
obj_op.meta.zones_trace = zones_trace;
obj_op.meta.modify_tail = true;
obj_op.meta.appendable = true;
//Add the append part number
bufferlist cur_part_num_bl;
using ceph::encode;
encode(cur_part_num, cur_part_num_bl);
attrs[RGW_ATTR_APPEND_PART_NUM] = cur_part_num_bl;
//calculate the etag
if (!cur_etag.empty()) {
MD5 hash;
// Allow use of MD5 digest in FIPS mode for non-cryptographic purposes
hash.SetFlags(EVP_MD_CTX_FLAG_NON_FIPS_ALLOW);
char petag[CEPH_CRYPTO_MD5_DIGESTSIZE];
char final_etag[CEPH_CRYPTO_MD5_DIGESTSIZE];
char final_etag_str[CEPH_CRYPTO_MD5_DIGESTSIZE * 2 + 16];
hex_to_buf(cur_etag.c_str(), petag, CEPH_CRYPTO_MD5_DIGESTSIZE);
hash.Update((const unsigned char *)petag, sizeof(petag));
hex_to_buf(etag.c_str(), petag, CEPH_CRYPTO_MD5_DIGESTSIZE);
hash.Update((const unsigned char *)petag, sizeof(petag));
hash.Final((unsigned char *)final_etag);
buf_to_hex((unsigned char *)final_etag, sizeof(final_etag), final_etag_str);
snprintf(&final_etag_str[CEPH_CRYPTO_MD5_DIGESTSIZE * 2], sizeof(final_etag_str) - CEPH_CRYPTO_MD5_DIGESTSIZE * 2,
"-%lld", (long long)cur_part_num);
bufferlist etag_bl;
etag_bl.append(final_etag_str, strlen(final_etag_str) + 1);
attrs[RGW_ATTR_ETAG] = etag_bl;
}
r = obj_op.write_meta(dpp, actual_size + cur_size,
accounted_size + *cur_accounted_size,
attrs, y);
if (r < 0) {
return r;
}
if (!obj_op.meta.canceled) {
// on success, clear the set of objects for deletion
writer.clear_written();
}
if (pcanceled) {
*pcanceled = obj_op.meta.canceled;
}
*cur_accounted_size += accounted_size;
return 0;
}
} // namespace rgw::putobj
| 24,091 | 30.534031 | 173 |
cc
|
null |
ceph-main/src/rgw/driver/rados/rgw_putobj_processor.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab ft=cpp
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2018 Red Hat, Inc.
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#pragma once
#include <optional>
#include "rgw_putobj.h"
#include "services/svc_rados.h"
#include "services/svc_tier_rados.h"
#include "rgw_sal.h"
#include "rgw_obj_manifest.h"
namespace rgw {
namespace sal {
class RadosStore;
}
class Aio;
namespace putobj {
// an object processor with special handling for the first chunk of the head.
// the virtual process_first_chunk() function returns a processor to handle the
// rest of the object
class HeadObjectProcessor : public rgw::sal::ObjectProcessor {
uint64_t head_chunk_size;
// buffer to capture the first chunk of the head object
bufferlist head_data;
// initialized after process_first_chunk() to process everything else
rgw::sal::DataProcessor *processor = nullptr;
uint64_t data_offset = 0; // maximum offset of data written (ie compressed)
protected:
uint64_t get_actual_size() const { return data_offset; }
// process the first chunk of data and return a processor for the rest
virtual int process_first_chunk(bufferlist&& data,
rgw::sal::DataProcessor **processor) = 0;
public:
HeadObjectProcessor(uint64_t head_chunk_size)
: head_chunk_size(head_chunk_size)
{}
void set_head_chunk_size(uint64_t size) { head_chunk_size = size; }
// cache first chunk for process_first_chunk(), then forward everything else
// to the returned processor
int process(bufferlist&& data, uint64_t logical_offset) final override;
};
using RawObjSet = std::set<rgw_raw_obj>;
// a data sink that writes to rados objects and deletes them on cancelation
class RadosWriter : public rgw::sal::DataProcessor {
Aio *const aio;
RGWRados *const store;
const RGWBucketInfo& bucket_info;
RGWObjectCtx& obj_ctx;
const rgw_obj head_obj;
RGWSI_RADOS::Obj stripe_obj; // current stripe object
RawObjSet written; // set of written objects for deletion
const DoutPrefixProvider *dpp;
optional_yield y;
public:
RadosWriter(Aio *aio, RGWRados *store,
const RGWBucketInfo& bucket_info,
RGWObjectCtx& obj_ctx, const rgw_obj& _head_obj,
const DoutPrefixProvider *dpp, optional_yield y)
: aio(aio), store(store), bucket_info(bucket_info),
obj_ctx(obj_ctx), head_obj(_head_obj), dpp(dpp), y(y)
{}
~RadosWriter();
// add alloc hint to osd
void add_write_hint(librados::ObjectWriteOperation& op);
// change the current stripe object
int set_stripe_obj(const rgw_raw_obj& obj);
// write the data at the given offset of the current stripe object
int process(bufferlist&& data, uint64_t stripe_offset) override;
// write the data as an exclusive create and wait for it to complete
int write_exclusive(const bufferlist& data);
int drain();
// when the operation completes successfully, clear the set of written objects
// so they aren't deleted on destruction
void clear_written() { written.clear(); }
};
// a rados object processor that stripes according to RGWObjManifest
class ManifestObjectProcessor : public HeadObjectProcessor,
public StripeGenerator {
protected:
RGWRados* const store;
RGWBucketInfo& bucket_info;
rgw_placement_rule tail_placement_rule;
rgw_user owner;
RGWObjectCtx& obj_ctx;
rgw_obj head_obj;
RadosWriter writer;
RGWObjManifest manifest;
RGWObjManifest::generator manifest_gen;
ChunkProcessor chunk;
StripeProcessor stripe;
const DoutPrefixProvider *dpp;
// implements StripeGenerator
int next(uint64_t offset, uint64_t *stripe_size) override;
public:
ManifestObjectProcessor(Aio *aio, RGWRados* store,
RGWBucketInfo& bucket_info,
const rgw_placement_rule *ptail_placement_rule,
const rgw_user& owner, RGWObjectCtx& _obj_ctx,
const rgw_obj& _head_obj,
const DoutPrefixProvider* dpp, optional_yield y)
: HeadObjectProcessor(0),
store(store), bucket_info(bucket_info),
owner(owner),
obj_ctx(_obj_ctx), head_obj(_head_obj),
writer(aio, store, bucket_info, obj_ctx, head_obj, dpp, y),
chunk(&writer, 0), stripe(&chunk, this, 0), dpp(dpp) {
if (ptail_placement_rule) {
tail_placement_rule = *ptail_placement_rule;
}
}
void set_owner(const rgw_user& _owner) {
owner = _owner;
}
void set_tail_placement(const rgw_placement_rule& tpr) {
tail_placement_rule = tpr;
}
void set_tail_placement(const rgw_placement_rule&& tpr) {
tail_placement_rule = tpr;
}
};
// a processor that completes with an atomic write to the head object as part of
// a bucket index transaction
class AtomicObjectProcessor : public ManifestObjectProcessor {
const std::optional<uint64_t> olh_epoch;
const std::string unique_tag;
bufferlist first_chunk; // written with the head in complete()
int process_first_chunk(bufferlist&& data, rgw::sal::DataProcessor **processor) override;
public:
AtomicObjectProcessor(Aio *aio, RGWRados* store,
RGWBucketInfo& bucket_info,
const rgw_placement_rule *ptail_placement_rule,
const rgw_user& owner,
RGWObjectCtx& obj_ctx, const rgw_obj& _head_obj,
std::optional<uint64_t> olh_epoch,
const std::string& unique_tag,
const DoutPrefixProvider *dpp, optional_yield y)
: ManifestObjectProcessor(aio, store, bucket_info, ptail_placement_rule,
owner, obj_ctx, _head_obj, dpp, y),
olh_epoch(olh_epoch), unique_tag(unique_tag)
{}
// prepare a trivial manifest
int prepare(optional_yield y) override;
// write the head object atomically in a bucket index transaction
int complete(size_t accounted_size, const std::string& etag,
ceph::real_time *mtime, ceph::real_time set_mtime,
std::map<std::string, bufferlist>& attrs,
ceph::real_time delete_at,
const char *if_match, const char *if_nomatch,
const std::string *user_data,
rgw_zone_set *zones_trace, bool *canceled,
optional_yield y) override;
};
// a processor for multipart parts, which don't require atomic completion. the
// part's head is written with an exclusive create to detect racing uploads of
// the same part/upload id, which are restarted with a random oid prefix
class MultipartObjectProcessor : public ManifestObjectProcessor {
const rgw_obj target_obj; // target multipart object
const std::string upload_id;
const int part_num;
const std::string part_num_str;
RGWMPObj mp;
// write the first chunk and wait on aio->drain() for its completion.
// on EEXIST, retry with random prefix
int process_first_chunk(bufferlist&& data, rgw::sal::DataProcessor **processor) override;
// prepare the head stripe and manifest
int prepare_head();
public:
MultipartObjectProcessor(Aio *aio, RGWRados* store,
RGWBucketInfo& bucket_info,
const rgw_placement_rule *ptail_placement_rule,
const rgw_user& owner, RGWObjectCtx& obj_ctx,
const rgw_obj& _head_obj,
const std::string& upload_id, uint64_t part_num,
const std::string& part_num_str,
const DoutPrefixProvider *dpp, optional_yield y)
: ManifestObjectProcessor(aio, store, bucket_info, ptail_placement_rule,
owner, obj_ctx, _head_obj, dpp, y),
target_obj(head_obj), upload_id(upload_id),
part_num(part_num), part_num_str(part_num_str),
mp(head_obj.key.name, upload_id)
{}
// prepare a multipart manifest
int prepare(optional_yield y) override;
// write the head object attributes in a bucket index transaction, then
// register the completed part with the multipart meta object
int complete(size_t accounted_size, const std::string& etag,
ceph::real_time *mtime, ceph::real_time set_mtime,
std::map<std::string, bufferlist>& attrs,
ceph::real_time delete_at,
const char *if_match, const char *if_nomatch,
const std::string *user_data,
rgw_zone_set *zones_trace, bool *canceled,
optional_yield y) override;
};
class AppendObjectProcessor : public ManifestObjectProcessor {
uint64_t cur_part_num;
uint64_t position;
uint64_t cur_size;
uint64_t *cur_accounted_size;
std::string cur_etag;
const std::string unique_tag;
RGWObjManifest *cur_manifest;
int process_first_chunk(bufferlist&& data, rgw::sal::DataProcessor **processor) override;
public:
AppendObjectProcessor(Aio *aio, RGWRados* store,
RGWBucketInfo& bucket_info,
const rgw_placement_rule *ptail_placement_rule,
const rgw_user& owner, RGWObjectCtx& obj_ctx,
const rgw_obj& _head_obj,
const std::string& unique_tag, uint64_t position,
uint64_t *cur_accounted_size,
const DoutPrefixProvider *dpp, optional_yield y)
: ManifestObjectProcessor(aio, store, bucket_info, ptail_placement_rule,
owner, obj_ctx, _head_obj, dpp, y),
position(position), cur_size(0), cur_accounted_size(cur_accounted_size),
unique_tag(unique_tag), cur_manifest(nullptr)
{}
int prepare(optional_yield y) override;
int complete(size_t accounted_size, const std::string& etag,
ceph::real_time *mtime, ceph::real_time set_mtime,
std::map<std::string, bufferlist>& attrs, ceph::real_time delete_at,
const char *if_match, const char *if_nomatch, const std::string *user_data,
rgw_zone_set *zones_trace, bool *canceled,
optional_yield y) override;
};
} // namespace putobj
} // namespace rgw
| 10,568 | 36.34629 | 93 |
h
|
null |
ceph-main/src/rgw/driver/rados/rgw_rados.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab ft=cpp
#include "include/compat.h"
#include <errno.h>
#include <stdlib.h>
#include <sys/types.h>
#include <sstream>
#include <boost/algorithm/string.hpp>
#include <string_view>
#include <boost/container/flat_set.hpp>
#include <boost/format.hpp>
#include <boost/optional.hpp>
#include <boost/utility/in_place_factory.hpp>
#include "common/ceph_json.h"
#include "common/errno.h"
#include "common/Formatter.h"
#include "common/Throttle.h"
#include "common/BackTrace.h"
#include "rgw_sal.h"
#include "rgw_zone.h"
#include "rgw_cache.h"
#include "rgw_acl.h"
#include "rgw_acl_s3.h" /* for dumping s3policy in debug log */
#include "rgw_aio_throttle.h"
#include "driver/rados/rgw_bucket.h"
#include "rgw_rest_conn.h"
#include "rgw_cr_rados.h"
#include "rgw_cr_rest.h"
#include "rgw_datalog.h"
#include "rgw_putobj_processor.h"
#include "cls/rgw/cls_rgw_ops.h"
#include "cls/rgw/cls_rgw_client.h"
#include "cls/rgw/cls_rgw_const.h"
#include "cls/refcount/cls_refcount_client.h"
#include "cls/version/cls_version_client.h"
#include "osd/osd_types.h"
#include "rgw_tools.h"
#include "rgw_coroutine.h"
#include "rgw_compression.h"
#include "rgw_etag_verifier.h"
#include "rgw_worker.h"
#include "rgw_notify.h"
#include "rgw_http_errors.h"
#undef fork // fails to compile RGWPeriod::fork() below
#include "common/Clock.h"
#include <string>
#include <iostream>
#include <vector>
#include <atomic>
#include <list>
#include <map>
#include "include/random.h"
#include "rgw_gc.h"
#include "rgw_lc.h"
#include "rgw_object_expirer_core.h"
#include "rgw_sync.h"
#include "rgw_sync_counters.h"
#include "rgw_sync_trace.h"
#include "rgw_trim_datalog.h"
#include "rgw_trim_mdlog.h"
#include "rgw_data_sync.h"
#include "rgw_realm_watcher.h"
#include "rgw_reshard.h"
#include "rgw_cr_rados.h"
#include "services/svc_zone.h"
#include "services/svc_zone_utils.h"
#include "services/svc_quota.h"
#include "services/svc_sync_modules.h"
#include "services/svc_sys_obj.h"
#include "services/svc_sys_obj_cache.h"
#include "services/svc_bucket.h"
#include "services/svc_mdlog.h"
#include "compressor/Compressor.h"
#include "rgw_d3n_datacache.h"
#ifdef WITH_LTTNG
#define TRACEPOINT_DEFINE
#define TRACEPOINT_PROBE_DYNAMIC_LINKAGE
#include "tracing/rgw_rados.h"
#undef TRACEPOINT_PROBE_DYNAMIC_LINKAGE
#undef TRACEPOINT_DEFINE
#else
#define tracepoint(...)
#endif
#define dout_context g_ceph_context
#define dout_subsys ceph_subsys_rgw
using namespace std;
using namespace librados;
#define ldout_bitx(_bitx, _dpp, _level) if(_bitx) { ldpp_dout(_dpp, 0) << "BITX: "
#define ldout_bitx_c(_bitx, _ctx, _level) if(_bitx) { ldout(_ctx, 0) << "BITX: "
#define dendl_bitx dendl ; }
static string shadow_ns = "shadow";
static string default_bucket_index_pool_suffix = "rgw.buckets.index";
static string default_storage_extra_pool_suffix = "rgw.buckets.non-ec";
static RGWObjCategory main_category = RGWObjCategory::Main;
#define RGW_USAGE_OBJ_PREFIX "usage."
rgw_raw_obj rgw_obj_select::get_raw_obj(RGWRados* store) const
{
if (!is_raw) {
rgw_raw_obj r;
store->obj_to_raw(placement_rule, obj, &r);
return r;
}
return raw_obj;
}
void RGWObjVersionTracker::prepare_op_for_read(ObjectReadOperation* op)
{
obj_version* check_objv = version_for_check();
if (check_objv) {
cls_version_check(*op, *check_objv, VER_COND_EQ);
}
cls_version_read(*op, &read_version);
}
void RGWObjVersionTracker::prepare_op_for_write(ObjectWriteOperation *op)
{
obj_version* check_objv = version_for_check();
obj_version* modify_version = version_for_write();
if (check_objv) {
cls_version_check(*op, *check_objv, VER_COND_EQ);
}
if (modify_version) {
cls_version_set(*op, *modify_version);
} else {
cls_version_inc(*op);
}
}
void RGWObjVersionTracker::apply_write()
{
const bool checked = (read_version.ver != 0);
const bool incremented = (write_version.ver == 0);
if (checked && incremented) {
// apply cls_version_inc() so our next operation can recheck it
++read_version.ver;
} else {
read_version = write_version;
}
write_version = obj_version();
}
RGWObjStateManifest *RGWObjectCtx::get_state(const rgw_obj& obj) {
RGWObjStateManifest *result;
typename std::map<rgw_obj, RGWObjStateManifest>::iterator iter;
lock.lock_shared();
assert (!obj.empty());
iter = objs_state.find(obj);
if (iter != objs_state.end()) {
result = &iter->second;
lock.unlock_shared();
} else {
lock.unlock_shared();
lock.lock();
result = &objs_state[obj];
lock.unlock();
}
return result;
}
void RGWObjectCtx::set_compressed(const rgw_obj& obj) {
std::unique_lock wl{lock};
assert (!obj.empty());
objs_state[obj].state.compressed = true;
}
void RGWObjectCtx::set_atomic(const rgw_obj& obj) {
std::unique_lock wl{lock};
assert (!obj.empty());
objs_state[obj].state.is_atomic = true;
}
void RGWObjectCtx::set_prefetch_data(const rgw_obj& obj) {
std::unique_lock wl{lock};
assert (!obj.empty());
objs_state[obj].state.prefetch_data = true;
}
void RGWObjectCtx::invalidate(const rgw_obj& obj) {
std::unique_lock wl{lock};
auto iter = objs_state.find(obj);
if (iter == objs_state.end()) {
return;
}
bool is_atomic = iter->second.state.is_atomic;
bool prefetch_data = iter->second.state.prefetch_data;
bool compressed = iter->second.state.compressed;
objs_state.erase(iter);
if (is_atomic || prefetch_data) {
auto& sm = objs_state[obj];
sm.state.is_atomic = is_atomic;
sm.state.prefetch_data = prefetch_data;
sm.state.compressed = compressed;
}
}
class RGWMetaNotifierManager : public RGWCoroutinesManager {
RGWRados* store;
RGWHTTPManager http_manager;
public:
RGWMetaNotifierManager(RGWRados *_driver) : RGWCoroutinesManager(_driver->ctx(), _driver->get_cr_registry()), store(_driver),
http_manager(store->ctx(), completion_mgr) {
http_manager.start();
}
int notify_all(const DoutPrefixProvider *dpp, map<rgw_zone_id, RGWRESTConn *>& conn_map, set<int>& shards) {
rgw_http_param_pair pairs[] = { { "type", "metadata" },
{ "notify", NULL },
{ NULL, NULL } };
list<RGWCoroutinesStack *> stacks;
for (auto iter = conn_map.begin(); iter != conn_map.end(); ++iter) {
RGWRESTConn *conn = iter->second;
RGWCoroutinesStack *stack = new RGWCoroutinesStack(store->ctx(), this);
stack->call(new RGWPostRESTResourceCR<set<int>, int>(store->ctx(), conn, &http_manager, "/admin/log", pairs, shards, NULL));
stacks.push_back(stack);
}
return run(dpp, stacks);
}
};
class RGWDataNotifierManager : public RGWCoroutinesManager {
RGWRados* store;
RGWHTTPManager http_manager;
public:
RGWDataNotifierManager(RGWRados *_driver) : RGWCoroutinesManager(_driver->ctx(), _driver->get_cr_registry()), store(_driver),
http_manager(store->ctx(), completion_mgr) {
http_manager.start();
}
int notify_all(const DoutPrefixProvider *dpp, map<rgw_zone_id, RGWRESTConn *>& conn_map,
bc::flat_map<int, bc::flat_set<rgw_data_notify_entry> >& shards) {
list<RGWCoroutinesStack *> stacks;
const char *source_zone = store->svc.zone->get_zone_params().get_id().c_str();
for (auto iter = conn_map.begin(); iter != conn_map.end(); ++iter) {
RGWRESTConn *conn = iter->second;
RGWCoroutinesStack *stack = new RGWCoroutinesStack(store->ctx(), this);
stack->call(new RGWDataPostNotifyCR(store, http_manager, shards, source_zone, conn));
stacks.push_back(stack);
}
return run(dpp, stacks);
}
};
/* class RGWRadosThread */
void RGWRadosThread::start()
{
worker = new Worker(cct, this);
worker->create(thread_name.c_str());
}
void RGWRadosThread::stop()
{
down_flag = true;
stop_process();
if (worker) {
worker->signal();
worker->join();
}
delete worker;
worker = NULL;
}
void *RGWRadosThread::Worker::entry() {
uint64_t msec = processor->interval_msec();
auto interval = std::chrono::milliseconds(msec);
do {
auto start = ceph::real_clock::now();
int r = processor->process(this);
if (r < 0) {
ldpp_dout(this, 0) << "ERROR: processor->process() returned error r=" << r << dendl;
}
if (processor->going_down())
break;
auto end = ceph::real_clock::now() - start;
uint64_t cur_msec = processor->interval_msec();
if (cur_msec != msec) { /* was it reconfigured? */
msec = cur_msec;
interval = std::chrono::milliseconds(msec);
}
if (cur_msec > 0) {
if (interval <= end)
continue; // next round
auto wait_time = interval - end;
wait_interval(wait_time);
} else {
wait();
}
} while (!processor->going_down());
return NULL;
}
class RGWMetaNotifier : public RGWRadosThread {
RGWMetaNotifierManager notify_mgr;
RGWMetadataLog *const log;
uint64_t interval_msec() override {
return cct->_conf->rgw_md_notify_interval_msec;
}
void stop_process() override {
notify_mgr.stop();
}
public:
RGWMetaNotifier(RGWRados *_driver, RGWMetadataLog* log)
: RGWRadosThread(_driver, "meta-notifier"), notify_mgr(_driver), log(log) {}
int process(const DoutPrefixProvider *dpp) override;
};
int RGWMetaNotifier::process(const DoutPrefixProvider *dpp)
{
set<int> shards;
log->read_clear_modified(shards);
if (shards.empty()) {
return 0;
}
for (set<int>::iterator iter = shards.begin(); iter != shards.end(); ++iter) {
ldpp_dout(dpp, 20) << __func__ << "(): notifying mdlog change, shard_id=" << *iter << dendl;
}
notify_mgr.notify_all(dpp, store->svc.zone->get_zone_conn_map(), shards);
return 0;
}
class RGWDataNotifier : public RGWRadosThread {
RGWDataNotifierManager notify_mgr;
bc::flat_set<rgw_data_notify_entry> entry;
uint64_t interval_msec() override {
return cct->_conf.get_val<int64_t>("rgw_data_notify_interval_msec");
}
void stop_process() override {
notify_mgr.stop();
}
public:
RGWDataNotifier(RGWRados *_driver) : RGWRadosThread(_driver, "data-notifier"), notify_mgr(_driver) {}
int process(const DoutPrefixProvider *dpp) override;
};
int RGWDataNotifier::process(const DoutPrefixProvider *dpp)
{
auto data_log = store->svc.datalog_rados;
if (!data_log) {
return 0;
}
auto shards = data_log->read_clear_modified();
if (shards.empty()) {
return 0;
}
for (const auto& [shard_id, entries] : shards) {
bc::flat_set<rgw_data_notify_entry>::iterator it;
for (const auto& entry : entries) {
ldpp_dout(dpp, 20) << __func__ << "(): notifying datalog change, shard_id="
<< shard_id << ":" << entry.gen << ":" << entry.key << dendl;
}
}
notify_mgr.notify_all(dpp, store->svc.zone->get_zone_data_notify_to_map(), shards);
return 0;
}
class RGWSyncProcessorThread : public RGWRadosThread {
public:
RGWSyncProcessorThread(RGWRados *_driver, const string& thread_name = "radosgw") : RGWRadosThread(_driver, thread_name) {}
RGWSyncProcessorThread(RGWRados *_driver) : RGWRadosThread(_driver) {}
~RGWSyncProcessorThread() override {}
int init(const DoutPrefixProvider *dpp) override = 0 ;
int process(const DoutPrefixProvider *dpp) override = 0;
};
class RGWMetaSyncProcessorThread : public RGWSyncProcessorThread
{
RGWMetaSyncStatusManager sync;
uint64_t interval_msec() override {
return 0; /* no interval associated, it'll run once until stopped */
}
void stop_process() override {
sync.stop();
}
public:
RGWMetaSyncProcessorThread(rgw::sal::RadosStore* _driver, RGWAsyncRadosProcessor *async_rados)
: RGWSyncProcessorThread(_driver->getRados(), "meta-sync"), sync(_driver, async_rados) {}
void wakeup_sync_shards(set<int>& shard_ids) {
for (set<int>::iterator iter = shard_ids.begin(); iter != shard_ids.end(); ++iter) {
sync.wakeup(*iter);
}
}
RGWMetaSyncStatusManager* get_manager() { return &sync; }
int init(const DoutPrefixProvider *dpp) override {
int ret = sync.init(dpp);
if (ret < 0) {
ldpp_dout(dpp, 0) << "ERROR: sync.init() returned " << ret << dendl;
return ret;
}
return 0;
}
int process(const DoutPrefixProvider *dpp) override {
sync.run(dpp, null_yield);
return 0;
}
};
class RGWDataSyncProcessorThread : public RGWSyncProcessorThread
{
PerfCountersRef counters;
RGWDataSyncStatusManager sync;
bool initialized;
uint64_t interval_msec() override {
if (initialized) {
return 0; /* no interval associated, it'll run once until stopped */
} else {
#define DATA_SYNC_INIT_WAIT_SEC 20
return DATA_SYNC_INIT_WAIT_SEC * 1000;
}
}
void stop_process() override {
sync.stop();
}
public:
RGWDataSyncProcessorThread(rgw::sal::RadosStore* _driver, RGWAsyncRadosProcessor *async_rados,
const RGWZone* source_zone)
: RGWSyncProcessorThread(_driver->getRados(), "data-sync"),
counters(sync_counters::build(store->ctx(), std::string("data-sync-from-") + source_zone->name)),
sync(_driver, async_rados, source_zone->id, counters.get()),
initialized(false) {}
void wakeup_sync_shards(bc::flat_map<int, bc::flat_set<rgw_data_notify_entry> >& entries) {
for (bc::flat_map<int, bc::flat_set<rgw_data_notify_entry> >::iterator iter = entries.begin(); iter != entries.end(); ++iter) {
sync.wakeup(iter->first, iter->second);
}
}
RGWDataSyncStatusManager* get_manager() { return &sync; }
int init(const DoutPrefixProvider *dpp) override {
return 0;
}
int process(const DoutPrefixProvider *dpp) override {
while (!initialized) {
if (going_down()) {
return 0;
}
int ret = sync.init(dpp);
if (ret >= 0) {
initialized = true;
break;
}
/* we'll be back! */
return 0;
}
sync.run(dpp);
return 0;
}
};
class RGWSyncLogTrimThread : public RGWSyncProcessorThread, DoutPrefixProvider
{
RGWCoroutinesManager crs;
rgw::sal::RadosStore* store;
rgw::BucketTrimManager *bucket_trim;
RGWHTTPManager http;
const utime_t trim_interval;
uint64_t interval_msec() override { return 0; }
void stop_process() override { crs.stop(); }
public:
RGWSyncLogTrimThread(rgw::sal::RadosStore* store, rgw::BucketTrimManager *bucket_trim,
int interval)
: RGWSyncProcessorThread(store->getRados(), "sync-log-trim"),
crs(store->ctx(), store->getRados()->get_cr_registry()), store(store),
bucket_trim(bucket_trim),
http(store->ctx(), crs.get_completion_mgr()),
trim_interval(interval, 0)
{}
int init(const DoutPrefixProvider *dpp) override {
return http.start();
}
int process(const DoutPrefixProvider *dpp) override {
list<RGWCoroutinesStack*> stacks;
auto metatrimcr = create_meta_log_trim_cr(this, static_cast<rgw::sal::RadosStore*>(store), &http,
cct->_conf->rgw_md_log_max_shards,
trim_interval);
if (!metatrimcr) {
ldpp_dout(dpp, -1) << "Bailing out of trim thread!" << dendl;
return -EINVAL;
}
auto meta = new RGWCoroutinesStack(store->ctx(), &crs);
meta->call(metatrimcr);
stacks.push_back(meta);
if (store->svc()->zone->sync_module_exports_data()) {
auto data = new RGWCoroutinesStack(store->ctx(), &crs);
data->call(create_data_log_trim_cr(dpp, static_cast<rgw::sal::RadosStore*>(store), &http,
cct->_conf->rgw_data_log_num_shards,
trim_interval));
stacks.push_back(data);
auto bucket = new RGWCoroutinesStack(store->ctx(), &crs);
bucket->call(bucket_trim->create_bucket_trim_cr(&http));
stacks.push_back(bucket);
}
crs.run(dpp, stacks);
return 0;
}
// implements DoutPrefixProvider
CephContext *get_cct() const override { return store->ctx(); }
unsigned get_subsys() const override
{
return dout_subsys;
}
std::ostream& gen_prefix(std::ostream& out) const override
{
return out << "sync log trim: ";
}
};
void RGWRados::wakeup_meta_sync_shards(set<int>& shard_ids)
{
std::lock_guard l{meta_sync_thread_lock};
if (meta_sync_processor_thread) {
meta_sync_processor_thread->wakeup_sync_shards(shard_ids);
}
}
void RGWRados::wakeup_data_sync_shards(const DoutPrefixProvider *dpp, const rgw_zone_id& source_zone, bc::flat_map<int, bc::flat_set<rgw_data_notify_entry> >& entries)
{
ldpp_dout(dpp, 20) << __func__ << ": source_zone=" << source_zone << ", entries=" << entries << dendl;
for (bc::flat_map<int, bc::flat_set<rgw_data_notify_entry> >::iterator iter = entries.begin(); iter != entries.end(); ++iter) {
ldpp_dout(dpp, 20) << __func__ << "(): updated shard=" << iter->first << dendl;
bc::flat_set<rgw_data_notify_entry>& entries = iter->second;
for (const auto& [key, gen] : entries) {
ldpp_dout(dpp, 20) << __func__ << ": source_zone=" << source_zone << ", key=" << key
<< ", gen=" << gen << dendl;
}
}
std::lock_guard l{data_sync_thread_lock};
auto iter = data_sync_processor_threads.find(source_zone);
if (iter == data_sync_processor_threads.end()) {
ldpp_dout(dpp, 10) << __func__ << ": couldn't find sync thread for zone " << source_zone << ", skipping async data sync processing" << dendl;
return;
}
RGWDataSyncProcessorThread *thread = iter->second;
ceph_assert(thread);
thread->wakeup_sync_shards(entries);
}
RGWMetaSyncStatusManager* RGWRados::get_meta_sync_manager()
{
std::lock_guard l{meta_sync_thread_lock};
if (meta_sync_processor_thread) {
return meta_sync_processor_thread->get_manager();
}
return nullptr;
}
RGWDataSyncStatusManager* RGWRados::get_data_sync_manager(const rgw_zone_id& source_zone)
{
std::lock_guard l{data_sync_thread_lock};
auto thread = data_sync_processor_threads.find(source_zone);
if (thread == data_sync_processor_threads.end()) {
return nullptr;
}
return thread->second->get_manager();
}
int RGWRados::get_required_alignment(const DoutPrefixProvider *dpp, const rgw_pool& pool, uint64_t *alignment)
{
IoCtx ioctx;
int r = open_pool_ctx(dpp, pool, ioctx, false, true);
if (r < 0) {
ldpp_dout(dpp, 0) << "ERROR: open_pool_ctx() returned " << r << dendl;
return r;
}
bool req;
r = ioctx.pool_requires_alignment2(&req);
if (r < 0) {
ldpp_dout(dpp, 0) << "ERROR: ioctx.pool_requires_alignment2() returned "
<< r << dendl;
return r;
}
if (!req) {
*alignment = 0;
return 0;
}
uint64_t align;
r = ioctx.pool_required_alignment2(&align);
if (r < 0) {
ldpp_dout(dpp, 0) << "ERROR: ioctx.pool_required_alignment2() returned "
<< r << dendl;
return r;
}
if (align != 0) {
ldpp_dout(dpp, 20) << "required alignment=" << align << dendl;
}
*alignment = align;
return 0;
}
void RGWRados::get_max_aligned_size(uint64_t size, uint64_t alignment, uint64_t *max_size)
{
if (alignment == 0) {
*max_size = size;
return;
}
if (size <= alignment) {
*max_size = alignment;
return;
}
*max_size = size - (size % alignment);
}
int RGWRados::get_max_chunk_size(const rgw_pool& pool, uint64_t *max_chunk_size, const DoutPrefixProvider *dpp, uint64_t *palignment)
{
uint64_t alignment;
int r = get_required_alignment(dpp, pool, &alignment);
if (r < 0) {
return r;
}
if (palignment) {
*palignment = alignment;
}
uint64_t config_chunk_size = cct->_conf->rgw_max_chunk_size;
get_max_aligned_size(config_chunk_size, alignment, max_chunk_size);
ldpp_dout(dpp, 20) << "max_chunk_size=" << *max_chunk_size << dendl;
return 0;
}
int RGWRados::get_max_chunk_size(const rgw_placement_rule& placement_rule, const rgw_obj& obj,
uint64_t *max_chunk_size, const DoutPrefixProvider *dpp, uint64_t *palignment)
{
rgw_pool pool;
if (!get_obj_data_pool(placement_rule, obj, &pool)) {
ldpp_dout(dpp, 0) << "ERROR: failed to get data pool for object " << obj << dendl;
return -EIO;
}
return get_max_chunk_size(pool, max_chunk_size, dpp, palignment);
}
void add_datalog_entry(const DoutPrefixProvider* dpp,
RGWDataChangesLog* datalog,
const RGWBucketInfo& bucket_info,
uint32_t shard_id, optional_yield y)
{
const auto& logs = bucket_info.layout.logs;
if (logs.empty()) {
return;
}
int r = datalog->add_entry(dpp, bucket_info, logs.back(), shard_id, y);
if (r < 0) {
ldpp_dout(dpp, -1) << "ERROR: failed writing data log" << dendl;
} // datalog error is not fatal
}
class RGWIndexCompletionManager;
struct complete_op_data {
ceph::mutex lock = ceph::make_mutex("complete_op_data");
AioCompletion *rados_completion{nullptr};
int manager_shard_id{-1};
RGWIndexCompletionManager *manager{nullptr};
rgw_obj obj;
RGWModifyOp op;
string tag;
rgw_bucket_entry_ver ver;
cls_rgw_obj_key key;
rgw_bucket_dir_entry_meta dir_meta;
list<cls_rgw_obj_key> remove_objs;
bool log_op;
uint16_t bilog_op;
rgw_zone_set zones_trace;
bool stopped{false};
void stop() {
std::lock_guard l{lock};
stopped = true;
}
};
class RGWIndexCompletionManager {
RGWRados* const store;
const uint32_t num_shards;
ceph::containers::tiny_vector<ceph::mutex> locks;
std::vector<set<complete_op_data*>> completions;
std::vector<complete_op_data*> retry_completions;
std::condition_variable cond;
std::mutex retry_completions_lock;
bool _stop{false};
std::thread retry_thread;
// used to distribute the completions and the locks they use across
// their respective vectors; it will get incremented and can wrap
// around back to 0 without issue
std::atomic<uint32_t> cur_shard {0};
void process();
void add_completion(complete_op_data *completion);
void stop() {
if (retry_thread.joinable()) {
_stop = true;
cond.notify_all();
retry_thread.join();
}
for (uint32_t i = 0; i < num_shards; ++i) {
std::lock_guard l{locks[i]};
for (auto c : completions[i]) {
c->stop();
}
}
completions.clear();
}
uint32_t next_shard() {
return cur_shard++ % num_shards;
}
public:
RGWIndexCompletionManager(RGWRados *_driver) :
store(_driver),
num_shards(store->ctx()->_conf->rgw_thread_pool_size),
locks{ceph::make_lock_container<ceph::mutex>(
num_shards,
[](const size_t i) {
return ceph::make_mutex("RGWIndexCompletionManager::lock::" +
std::to_string(i));
})},
completions(num_shards),
retry_thread(&RGWIndexCompletionManager::process, this)
{}
~RGWIndexCompletionManager() {
stop();
}
void create_completion(const rgw_obj& obj,
RGWModifyOp op, string& tag,
rgw_bucket_entry_ver& ver,
const cls_rgw_obj_key& key,
rgw_bucket_dir_entry_meta& dir_meta,
list<cls_rgw_obj_key> *remove_objs, bool log_op,
uint16_t bilog_op,
rgw_zone_set *zones_trace,
complete_op_data **result);
bool handle_completion(completion_t cb, complete_op_data *arg);
CephContext* ctx() {
return store->ctx();
}
};
static void obj_complete_cb(completion_t cb, void *arg)
{
complete_op_data *completion = reinterpret_cast<complete_op_data*>(arg);
completion->lock.lock();
if (completion->stopped) {
completion->lock.unlock(); /* can drop lock, no one else is referencing us */
delete completion;
return;
}
bool need_delete = completion->manager->handle_completion(cb, completion);
completion->lock.unlock();
if (need_delete) {
delete completion;
}
}
void RGWIndexCompletionManager::process()
{
DoutPrefix dpp(store->ctx(), dout_subsys, "rgw index completion thread: ");
while(!_stop) {
std::vector<complete_op_data*> comps;
{
std::unique_lock l{retry_completions_lock};
cond.wait(l, [this](){return _stop || !retry_completions.empty();});
if (_stop) {
return;
}
retry_completions.swap(comps);
}
for (auto c : comps) {
std::unique_ptr<complete_op_data> up{c};
ldpp_dout(&dpp, 20) << __func__ << "(): handling completion for key=" << c->key << dendl;
RGWRados::BucketShard bs(store);
RGWBucketInfo bucket_info;
int r = bs.init(c->obj.bucket, c->obj, &bucket_info, &dpp, null_yield);
if (r < 0) {
ldpp_dout(&dpp, 0) << "ERROR: " << __func__ << "(): failed to initialize BucketShard, obj=" << c->obj << " r=" << r << dendl;
/* not much to do */
continue;
}
r = store->guard_reshard(&dpp, &bs, c->obj, bucket_info,
[&](RGWRados::BucketShard *bs) -> int {
const bool bitx = ctx()->_conf->rgw_bucket_index_transaction_instrumentation;
ldout_bitx(bitx, &dpp, 10) <<
"ENTERING " << __func__ << ": bucket-shard=" << bs <<
" obj=" << c->obj << " tag=" << c->tag <<
" op=" << c->op << ", remove_objs=" << c->remove_objs << dendl_bitx;
ldout_bitx(bitx, &dpp, 25) <<
"BACKTRACE: " << __func__ << ": " << ClibBackTrace(1) << dendl_bitx;
librados::ObjectWriteOperation o;
o.assert_exists();
cls_rgw_guard_bucket_resharding(o, -ERR_BUSY_RESHARDING);
cls_rgw_bucket_complete_op(o, c->op, c->tag, c->ver, c->key, c->dir_meta, &c->remove_objs,
c->log_op, c->bilog_op, &c->zones_trace);
int ret = bs->bucket_obj.operate(&dpp, &o, null_yield);
ldout_bitx(bitx, &dpp, 10) <<
"EXITING " << __func__ << ": ret=" << dendl_bitx;
return ret;
}, null_yield);
if (r < 0) {
ldpp_dout(&dpp, 0) << "ERROR: " << __func__ << "(): bucket index completion failed, obj=" << c->obj << " r=" << r << dendl;
/* ignoring error, can't do anything about it */
continue;
}
// This null_yield can stay, for now, since we're in our own thread
add_datalog_entry(&dpp, store->svc.datalog_rados, bucket_info,
bs.shard_id, null_yield);
}
}
}
void RGWIndexCompletionManager::create_completion(const rgw_obj& obj,
RGWModifyOp op, string& tag,
rgw_bucket_entry_ver& ver,
const cls_rgw_obj_key& key,
rgw_bucket_dir_entry_meta& dir_meta,
list<cls_rgw_obj_key> *remove_objs, bool log_op,
uint16_t bilog_op,
rgw_zone_set *zones_trace,
complete_op_data **result)
{
complete_op_data *entry = new complete_op_data;
int shard_id = next_shard();
entry->manager_shard_id = shard_id;
entry->manager = this;
entry->obj = obj;
entry->op = op;
entry->tag = tag;
entry->ver = ver;
entry->key = key;
entry->dir_meta = dir_meta;
entry->log_op = log_op;
entry->bilog_op = bilog_op;
if (remove_objs) {
for (auto iter = remove_objs->begin(); iter != remove_objs->end(); ++iter) {
entry->remove_objs.push_back(*iter);
}
}
if (zones_trace) {
entry->zones_trace = *zones_trace;
} else {
entry->zones_trace.insert(store->svc.zone->get_zone().id, obj.bucket.get_key());
}
*result = entry;
entry->rados_completion = librados::Rados::aio_create_completion(entry, obj_complete_cb);
std::lock_guard l{locks[shard_id]};
const auto ok = completions[shard_id].insert(entry).second;
ceph_assert(ok);
}
void RGWIndexCompletionManager::add_completion(complete_op_data *completion) {
{
std::lock_guard l{retry_completions_lock};
retry_completions.push_back(completion);
}
cond.notify_all();
}
bool RGWIndexCompletionManager::handle_completion(completion_t cb, complete_op_data *arg)
{
int shard_id = arg->manager_shard_id;
{
std::lock_guard l{locks[shard_id]};
auto& comps = completions[shard_id];
auto iter = comps.find(arg);
if (iter == comps.end()) {
ldout(arg->manager->ctx(), 0) << __func__ << "(): cannot find completion for obj=" << arg->key << dendl;
return true;
}
comps.erase(iter);
}
int r = rados_aio_get_return_value(cb);
if (r != -ERR_BUSY_RESHARDING) {
ldout(arg->manager->ctx(), 20) << __func__ << "(): completion " <<
(r == 0 ? "ok" : "failed with " + to_string(r)) <<
" for obj=" << arg->key << dendl;
return true;
}
add_completion(arg);
ldout(arg->manager->ctx(), 20) << __func__ << "(): async completion added for obj=" << arg->key << dendl;
return false;
}
void RGWRados::finalize()
{
/* Before joining any sync threads, drain outstanding requests &
* mark the async_processor as going_down() */
if (svc.rados) {
svc.rados->stop_processor();
}
if (run_sync_thread) {
std::lock_guard l{meta_sync_thread_lock};
meta_sync_processor_thread->stop();
std::lock_guard dl{data_sync_thread_lock};
for (auto iter : data_sync_processor_threads) {
RGWDataSyncProcessorThread *thread = iter.second;
thread->stop();
}
if (sync_log_trimmer) {
sync_log_trimmer->stop();
}
}
if (run_sync_thread) {
delete meta_sync_processor_thread;
meta_sync_processor_thread = NULL;
std::lock_guard dl{data_sync_thread_lock};
for (auto iter : data_sync_processor_threads) {
RGWDataSyncProcessorThread *thread = iter.second;
delete thread;
}
data_sync_processor_threads.clear();
delete sync_log_trimmer;
sync_log_trimmer = nullptr;
bucket_trim = boost::none;
}
if (meta_notifier) {
meta_notifier->stop();
delete meta_notifier;
}
if (data_notifier) {
data_notifier->stop();
delete data_notifier;
}
delete sync_tracer;
delete lc;
lc = NULL;
delete gc;
gc = NULL;
delete obj_expirer;
obj_expirer = NULL;
RGWQuotaHandler::free_handler(quota_handler);
if (cr_registry) {
cr_registry->put();
}
svc.shutdown();
delete binfo_cache;
delete obj_tombstone_cache;
delete topic_cache;
if (d3n_data_cache)
delete d3n_data_cache;
if (reshard_wait.get()) {
reshard_wait->stop();
reshard_wait.reset();
}
if (run_reshard_thread) {
reshard->stop_processor();
}
delete reshard;
delete index_completion_manager;
if (run_notification_thread) {
rgw::notify::shutdown();
}
}
/**
* Initialize the RADOS instance and prepare to do other ops
* Returns 0 on success, -ERR# on failure.
*/
int RGWRados::init_rados()
{
int ret = 0;
ret = rados.init_with_context(cct);
if (ret < 0) {
return ret;
}
ret = rados.connect();
if (ret < 0) {
return ret;
}
auto crs = std::unique_ptr<RGWCoroutinesManagerRegistry>{
new RGWCoroutinesManagerRegistry(cct)};
ret = crs->hook_to_admin_command("cr dump");
if (ret < 0) {
return ret;
}
cr_registry = crs.release();
if (use_datacache) {
d3n_data_cache = new D3nDataCache();
d3n_data_cache->init(cct);
}
return ret;
}
int RGWRados::register_to_service_map(const DoutPrefixProvider *dpp, const string& daemon_type, const map<string, string>& meta)
{
string name = cct->_conf->name.get_id();
if (name.compare(0, 4, "rgw.") == 0) {
name = name.substr(4);
}
map<string,string> metadata = meta;
metadata["num_handles"] = "1"s;
metadata["zonegroup_id"] = svc.zone->get_zonegroup().get_id();
metadata["zonegroup_name"] = svc.zone->get_zonegroup().get_name();
metadata["zone_name"] = svc.zone->zone_name();
metadata["zone_id"] = svc.zone->zone_id().id;
metadata["realm_name"] = svc.zone->get_realm().get_name();
metadata["realm_id"] = svc.zone->get_realm().get_id();
metadata["id"] = name;
int ret = rados.service_daemon_register(
daemon_type,
stringify(rados.get_instance_id()),
metadata);
if (ret < 0) {
ldpp_dout(dpp, 0) << "ERROR: service_daemon_register() returned ret=" << ret << ": " << cpp_strerror(-ret) << dendl;
return ret;
}
return 0;
}
int RGWRados::update_service_map(const DoutPrefixProvider *dpp, std::map<std::string, std::string>&& status)
{
int ret = rados.service_daemon_update_status(move(status));
if (ret < 0) {
ldpp_dout(dpp, 0) << "ERROR: service_daemon_update_status() returned ret=" << ret << ": " << cpp_strerror(-ret) << dendl;
return ret;
}
return 0;
}
/**
* Initialize the RADOS instance and prepare to do other ops
* Returns 0 on success, -ERR# on failure.
*/
int RGWRados::init_complete(const DoutPrefixProvider *dpp, optional_yield y)
{
int ret;
/*
* create sync module instance even if we don't run sync thread, might need it for radosgw-admin
*/
sync_module = svc.sync_modules->get_sync_module();
ret = open_root_pool_ctx(dpp);
if (ret < 0)
return ret;
ret = open_gc_pool_ctx(dpp);
if (ret < 0)
return ret;
ret = open_lc_pool_ctx(dpp);
if (ret < 0)
return ret;
ret = open_objexp_pool_ctx(dpp);
if (ret < 0)
return ret;
ret = open_reshard_pool_ctx(dpp);
if (ret < 0)
return ret;
ret = open_notif_pool_ctx(dpp);
if (ret < 0)
return ret;
pools_initialized = true;
if (use_gc) {
gc = new RGWGC();
gc->initialize(cct, this, y);
} else {
ldpp_dout(dpp, 5) << "note: GC not initialized" << dendl;
}
obj_expirer = new RGWObjectExpirer(this->driver);
if (use_gc_thread && use_gc) {
gc->start_processor();
obj_expirer->start_processor();
}
auto& current_period = svc.zone->get_current_period();
auto& zonegroup = svc.zone->get_zonegroup();
auto& zone_params = svc.zone->get_zone_params();
auto& zone = svc.zone->get_zone();
/* no point of running sync thread if we don't have a master zone configured
or there is no rest_master_conn */
if (!svc.zone->need_to_sync()) {
run_sync_thread = false;
}
if (svc.zone->is_meta_master()) {
auto md_log = svc.mdlog->get_log(current_period.get_id());
meta_notifier = new RGWMetaNotifier(this, md_log);
meta_notifier->start();
}
/* init it anyway, might run sync through radosgw-admin explicitly */
sync_tracer = new RGWSyncTraceManager(cct, cct->_conf->rgw_sync_trace_history_size);
sync_tracer->init(this);
ret = sync_tracer->hook_to_admin_command();
if (ret < 0) {
return ret;
}
if (run_sync_thread) {
for (const auto &pt: zonegroup.placement_targets) {
if (zone_params.placement_pools.find(pt.second.name)
== zone_params.placement_pools.end()){
ldpp_dout(dpp, 0) << "WARNING: This zone does not contain the placement target "
<< pt.second.name << " present in zonegroup" << dendl;
}
}
auto async_processor = svc.rados->get_async_processor();
std::lock_guard l{meta_sync_thread_lock};
meta_sync_processor_thread = new RGWMetaSyncProcessorThread(this->driver, async_processor);
ret = meta_sync_processor_thread->init(dpp);
if (ret < 0) {
ldpp_dout(dpp, 0) << "ERROR: failed to initialize meta sync thread" << dendl;
return ret;
}
meta_sync_processor_thread->start();
// configure the bucket trim manager
rgw::BucketTrimConfig config;
rgw::configure_bucket_trim(cct, config);
bucket_trim.emplace(this->driver, config);
ret = bucket_trim->init();
if (ret < 0) {
ldpp_dout(dpp, 0) << "ERROR: failed to start bucket trim manager" << dendl;
return ret;
}
svc.datalog_rados->set_observer(&*bucket_trim);
std::lock_guard dl{data_sync_thread_lock};
for (auto source_zone : svc.zone->get_data_sync_source_zones()) {
ldpp_dout(dpp, 5) << "starting data sync thread for zone " << source_zone->name << dendl;
auto *thread = new RGWDataSyncProcessorThread(this->driver, svc.rados->get_async_processor(), source_zone);
ret = thread->init(dpp);
if (ret < 0) {
ldpp_dout(dpp, 0) << "ERROR: failed to initialize data sync thread" << dendl;
return ret;
}
thread->start();
data_sync_processor_threads[rgw_zone_id(source_zone->id)] = thread;
}
auto interval = cct->_conf->rgw_sync_log_trim_interval;
if (interval > 0) {
sync_log_trimmer = new RGWSyncLogTrimThread(this->driver, &*bucket_trim, interval);
ret = sync_log_trimmer->init(dpp);
if (ret < 0) {
ldpp_dout(dpp, 0) << "ERROR: failed to initialize sync log trim thread" << dendl;
return ret;
}
sync_log_trimmer->start();
}
}
if (cct->_conf->rgw_data_notify_interval_msec) {
data_notifier = new RGWDataNotifier(this);
data_notifier->start();
}
binfo_cache = new RGWChainedCacheImpl<bucket_info_entry>;
binfo_cache->init(svc.cache);
topic_cache = new RGWChainedCacheImpl<pubsub_bucket_topics_entry>;
topic_cache->init(svc.cache);
lc = new RGWLC();
lc->initialize(cct, this->driver);
if (use_lc_thread)
lc->start_processor();
quota_handler = RGWQuotaHandler::generate_handler(dpp, this->driver, quota_threads);
bucket_index_max_shards = (cct->_conf->rgw_override_bucket_index_max_shards ? cct->_conf->rgw_override_bucket_index_max_shards :
zone.bucket_index_max_shards);
if (bucket_index_max_shards > get_max_bucket_shards()) {
bucket_index_max_shards = get_max_bucket_shards();
ldpp_dout(dpp, 1) << __func__ << " bucket index max shards is too large, reset to value: "
<< get_max_bucket_shards() << dendl;
}
ldpp_dout(dpp, 20) << __func__ << " bucket index max shards: " << bucket_index_max_shards << dendl;
bool need_tombstone_cache = !svc.zone->get_zone_data_notify_to_map().empty(); /* have zones syncing from us */
if (need_tombstone_cache) {
obj_tombstone_cache = new tombstone_cache_t(cct->_conf->rgw_obj_tombstone_cache_size);
}
reshard_wait = std::make_shared<RGWReshardWait>();
reshard = new RGWReshard(this->driver);
// disable reshard thread based on zone/zonegroup support
run_reshard_thread = run_reshard_thread && svc.zone->can_reshard();
if (run_reshard_thread) {
reshard->start_processor();
}
index_completion_manager = new RGWIndexCompletionManager(this);
if (run_notification_thread) {
ret = rgw::notify::init(cct, driver, dpp);
if (ret < 0 ) {
ldpp_dout(dpp, 1) << "ERROR: failed to initialize notification manager" << dendl;
}
}
return ret;
}
int RGWRados::init_svc(bool raw, const DoutPrefixProvider *dpp)
{
if (raw) {
return svc.init_raw(cct, use_cache, null_yield, dpp);
}
return svc.init(cct, use_cache, run_sync_thread, null_yield, dpp);
}
int RGWRados::init_ctl(const DoutPrefixProvider *dpp)
{
return ctl.init(&svc, driver, dpp);
}
/**
* Initialize the RADOS instance and prepare to do other ops
* Returns 0 on success, -ERR# on failure.
*/
int RGWRados::init_begin(const DoutPrefixProvider *dpp)
{
int ret;
inject_notify_timeout_probability =
cct->_conf.get_val<double>("rgw_inject_notify_timeout_probability");
max_notify_retries = cct->_conf.get_val<uint64_t>("rgw_max_notify_retries");
ret = init_svc(false, dpp);
if (ret < 0) {
ldpp_dout(dpp, 0) << "ERROR: failed to init services (ret=" << cpp_strerror(-ret) << ")" << dendl;
return ret;
}
ret = init_ctl(dpp);
if (ret < 0) {
ldpp_dout(dpp, 0) << "ERROR: failed to init ctls (ret=" << cpp_strerror(-ret) << ")" << dendl;
return ret;
}
host_id = svc.zone_utils->gen_host_id();
return init_rados();
}
/**
* Open the pool used as root for this gateway
* Returns: 0 on success, -ERR# otherwise.
*/
int RGWRados::open_root_pool_ctx(const DoutPrefixProvider *dpp)
{
return rgw_init_ioctx(dpp, get_rados_handle(), svc.zone->get_zone_params().domain_root, root_pool_ctx, true, true);
}
int RGWRados::open_gc_pool_ctx(const DoutPrefixProvider *dpp)
{
return rgw_init_ioctx(dpp, get_rados_handle(), svc.zone->get_zone_params().gc_pool, gc_pool_ctx, true, true);
}
int RGWRados::open_lc_pool_ctx(const DoutPrefixProvider *dpp)
{
return rgw_init_ioctx(dpp, get_rados_handle(), svc.zone->get_zone_params().lc_pool, lc_pool_ctx, true, true);
}
int RGWRados::open_objexp_pool_ctx(const DoutPrefixProvider *dpp)
{
return rgw_init_ioctx(dpp, get_rados_handle(), svc.zone->get_zone_params().log_pool, objexp_pool_ctx, true, true);
}
int RGWRados::open_reshard_pool_ctx(const DoutPrefixProvider *dpp)
{
return rgw_init_ioctx(dpp, get_rados_handle(), svc.zone->get_zone_params().reshard_pool, reshard_pool_ctx, true, true);
}
int RGWRados::open_notif_pool_ctx(const DoutPrefixProvider *dpp)
{
return rgw_init_ioctx(dpp, get_rados_handle(), svc.zone->get_zone_params().notif_pool, notif_pool_ctx, true, true);
}
int RGWRados::open_pool_ctx(const DoutPrefixProvider *dpp, const rgw_pool& pool, librados::IoCtx& io_ctx,
bool mostly_omap, bool bulk)
{
constexpr bool create = true; // create the pool if it doesn't exist
return rgw_init_ioctx(dpp, get_rados_handle(), pool, io_ctx, create, mostly_omap, bulk);
}
/**** logs ****/
struct log_list_state {
string prefix;
librados::IoCtx io_ctx;
librados::NObjectIterator obit;
};
int RGWRados::log_list_init(const DoutPrefixProvider *dpp, const string& prefix, RGWAccessHandle *handle)
{
log_list_state *state = new log_list_state;
int r = rgw_init_ioctx(dpp, get_rados_handle(), svc.zone->get_zone_params().log_pool, state->io_ctx);
if (r < 0) {
delete state;
return r;
}
try {
state->prefix = prefix;
state->obit = state->io_ctx.nobjects_begin();
*handle = (RGWAccessHandle)state;
return 0;
} catch (const std::system_error& e) {
r = -e.code().value();
ldpp_dout(dpp, 10) << "nobjects_begin threw " << e.what()
<< ", returning " << r << dendl;
return r;
} catch (const std::exception& e) {
ldpp_dout(dpp, 10) << "nobjects_begin threw " << e.what()
<< ", returning -5" << dendl;
return -EIO;
}
}
int RGWRados::log_list_next(RGWAccessHandle handle, string *name)
{
log_list_state *state = static_cast<log_list_state *>(handle);
while (true) {
if (state->obit == state->io_ctx.nobjects_end()) {
delete state;
return -ENOENT;
}
if (state->prefix.length() &&
state->obit->get_oid().find(state->prefix) != 0) {
state->obit++;
continue;
}
*name = state->obit->get_oid();
state->obit++;
break;
}
return 0;
}
int RGWRados::log_remove(const DoutPrefixProvider *dpp, const string& name)
{
librados::IoCtx io_ctx;
int r = rgw_init_ioctx(dpp, get_rados_handle(), svc.zone->get_zone_params().log_pool, io_ctx);
if (r < 0)
return r;
return io_ctx.remove(name);
}
struct log_show_state {
librados::IoCtx io_ctx;
bufferlist bl;
bufferlist::const_iterator p;
string name;
uint64_t pos;
bool eof;
log_show_state() : pos(0), eof(false) {}
};
int RGWRados::log_show_init(const DoutPrefixProvider *dpp, const string& name, RGWAccessHandle *handle)
{
log_show_state *state = new log_show_state;
int r = rgw_init_ioctx(dpp, get_rados_handle(), svc.zone->get_zone_params().log_pool, state->io_ctx);
if (r < 0) {
delete state;
return r;
}
state->name = name;
*handle = (RGWAccessHandle)state;
return 0;
}
int RGWRados::log_show_next(const DoutPrefixProvider *dpp, RGWAccessHandle handle, rgw_log_entry *entry)
{
log_show_state *state = static_cast<log_show_state *>(handle);
off_t off = state->p.get_off();
ldpp_dout(dpp, 10) << "log_show_next pos " << state->pos << " bl " << state->bl.length()
<< " off " << off
<< " eof " << (int)state->eof
<< dendl;
// read some?
unsigned chunk = 1024*1024;
if ((state->bl.length() - off) < chunk/2 && !state->eof) {
bufferlist more;
int r = state->io_ctx.read(state->name, more, chunk, state->pos);
if (r < 0)
return r;
state->pos += r;
bufferlist old;
try {
old.substr_of(state->bl, off, state->bl.length() - off);
} catch (buffer::error& err) {
return -EINVAL;
}
state->bl = std::move(old);
state->bl.claim_append(more);
state->p = state->bl.cbegin();
if ((unsigned)r < chunk)
state->eof = true;
ldpp_dout(dpp, 10) << " read " << r << dendl;
}
if (state->p.end())
return 0; // end of file
try {
decode(*entry, state->p);
}
catch (const buffer::error &e) {
return -EINVAL;
}
return 1;
}
/**
* usage_log_hash: get usage log key hash, based on name and index
*
* Get the usage object name. Since a user may have more than 1
* object holding that info (multiple shards), we use index to
* specify that shard number. Once index exceeds max shards it
* wraps.
* If name is not being set, results for all users will be returned
* and index will wrap only after total shards number.
*
* @param cct [in] ceph context
* @param name [in] user name
* @param hash [out] hash value
* @param index [in] shard index number
*/
static void usage_log_hash(CephContext *cct, const string& name, string& hash, uint32_t index)
{
uint32_t val = index;
if (!name.empty()) {
int max_user_shards = cct->_conf->rgw_usage_max_user_shards;
val %= max_user_shards;
val += ceph_str_hash_linux(name.c_str(), name.size());
}
char buf[17];
int max_shards = cct->_conf->rgw_usage_max_shards;
snprintf(buf, sizeof(buf), RGW_USAGE_OBJ_PREFIX "%u", (unsigned)(val % max_shards));
hash = buf;
}
int RGWRados::log_usage(const DoutPrefixProvider *dpp, map<rgw_user_bucket, RGWUsageBatch>& usage_info, optional_yield y)
{
uint32_t index = 0;
map<string, rgw_usage_log_info> log_objs;
string hash;
string last_user;
/* restructure usage map, zone by object hash */
map<rgw_user_bucket, RGWUsageBatch>::iterator iter;
for (iter = usage_info.begin(); iter != usage_info.end(); ++iter) {
const rgw_user_bucket& ub = iter->first;
RGWUsageBatch& info = iter->second;
if (ub.user.empty()) {
ldpp_dout(dpp, 0) << "WARNING: RGWRados::log_usage(): user name empty (bucket=" << ub.bucket << "), skipping" << dendl;
continue;
}
if (ub.user != last_user) {
/* index *should* be random, but why waste extra cycles
in most cases max user shards is not going to exceed 1,
so just incrementing it */
usage_log_hash(cct, ub.user, hash, index++);
}
last_user = ub.user;
vector<rgw_usage_log_entry>& v = log_objs[hash].entries;
for (auto miter = info.m.begin(); miter != info.m.end(); ++miter) {
v.push_back(miter->second);
}
}
map<string, rgw_usage_log_info>::iterator liter;
for (liter = log_objs.begin(); liter != log_objs.end(); ++liter) {
int r = cls_obj_usage_log_add(dpp, liter->first, liter->second, y);
if (r < 0)
return r;
}
return 0;
}
int RGWRados::read_usage(const DoutPrefixProvider *dpp, const rgw_user& user, const string& bucket_name, uint64_t start_epoch, uint64_t end_epoch,
uint32_t max_entries, bool *is_truncated, RGWUsageIter& usage_iter, map<rgw_user_bucket,
rgw_usage_log_entry>& usage)
{
uint32_t num = max_entries;
string hash, first_hash;
string user_str = user.to_str();
usage_log_hash(cct, user_str, first_hash, 0);
if (usage_iter.index) {
usage_log_hash(cct, user_str, hash, usage_iter.index);
} else {
hash = first_hash;
}
usage.clear();
do {
map<rgw_user_bucket, rgw_usage_log_entry> ret_usage;
map<rgw_user_bucket, rgw_usage_log_entry>::iterator iter;
int ret = cls_obj_usage_log_read(dpp, hash, user_str, bucket_name, start_epoch, end_epoch, num,
usage_iter.read_iter, ret_usage, is_truncated);
if (ret == -ENOENT)
goto next;
if (ret < 0)
return ret;
num -= ret_usage.size();
for (iter = ret_usage.begin(); iter != ret_usage.end(); ++iter) {
usage[iter->first].aggregate(iter->second);
}
next:
if (!*is_truncated) {
usage_iter.read_iter.clear();
usage_log_hash(cct, user_str, hash, ++usage_iter.index);
}
} while (num && !*is_truncated && hash != first_hash);
return 0;
}
int RGWRados::trim_usage(const DoutPrefixProvider *dpp, const rgw_user& user, const string& bucket_name, uint64_t start_epoch, uint64_t end_epoch, optional_yield y)
{
uint32_t index = 0;
string hash, first_hash;
string user_str = user.to_str();
usage_log_hash(cct, user_str, first_hash, index);
hash = first_hash;
do {
int ret = cls_obj_usage_log_trim(dpp, hash, user_str, bucket_name, start_epoch, end_epoch, y);
if (ret < 0 && ret != -ENOENT)
return ret;
usage_log_hash(cct, user_str, hash, ++index);
} while (hash != first_hash);
return 0;
}
int RGWRados::clear_usage(const DoutPrefixProvider *dpp, optional_yield y)
{
auto max_shards = cct->_conf->rgw_usage_max_shards;
int ret=0;
for (unsigned i=0; i < max_shards; i++){
string oid = RGW_USAGE_OBJ_PREFIX + to_string(i);
ret = cls_obj_usage_log_clear(dpp, oid, y);
if (ret < 0){
ldpp_dout(dpp,0) << "usage clear on oid="<< oid << "failed with ret=" << ret << dendl;
return ret;
}
}
return ret;
}
int RGWRados::decode_policy(const DoutPrefixProvider *dpp,
ceph::buffer::list& bl,
ACLOwner *owner)
{
auto i = bl.cbegin();
RGWAccessControlPolicy policy(cct);
try {
policy.decode_owner(i);
} catch (buffer::error& err) {
ldpp_dout(dpp, 0) << "ERROR: could not decode policy, caught buffer::error" << dendl;
return -EIO;
}
*owner = policy.get_owner();
return 0;
}
int RGWRados::Bucket::update_bucket_id(const string& new_bucket_id, const DoutPrefixProvider *dpp, optional_yield y)
{
rgw_bucket bucket = bucket_info.bucket;
bucket.update_bucket_id(new_bucket_id);
bucket_info.objv_tracker.clear();
int ret = store->get_bucket_instance_info(bucket, bucket_info, nullptr, nullptr, y, dpp);
if (ret < 0) {
return ret;
}
return 0;
}
/**
* Get ordered listing of the objects in a bucket.
*
* max_p: maximum number of results to return
* bucket: bucket to list contents of
* prefix: only return results that match this prefix
* delim: do not include results that match this string.
* Any skipped results will have the matching portion of their name
* inserted in common_prefixes with a "true" mark.
* marker: if filled in, begin the listing with this object.
* end_marker: if filled in, end the listing with this object.
* result: the objects are put in here.
* common_prefixes: if delim is filled in, any matching prefixes are
* placed here.
* is_truncated: if number of objects in the bucket is bigger than
* max, then truncated.
*/
int RGWRados::Bucket::List::list_objects_ordered(
const DoutPrefixProvider *dpp,
int64_t max_p,
std::vector<rgw_bucket_dir_entry> *result,
std::map<std::string, bool> *common_prefixes,
bool *is_truncated,
optional_yield y)
{
RGWRados *store = target->get_store();
CephContext *cct = store->ctx();
int shard_id = target->get_shard_id();
const auto& current_index = target->get_bucket_info().layout.current_index;
int count = 0;
bool truncated = true;
bool cls_filtered = false;
const int64_t max = // protect against memory issues and negative vals
std::min(bucket_list_objects_absolute_max, std::max(int64_t(0), max_p));
int read_ahead = std::max(cct->_conf->rgw_list_bucket_min_readahead, max);
result->clear();
// use a local marker; either the marker will have a previous entry
// or it will be empty; either way it's OK to copy
rgw_obj_key marker_obj(params.marker.name,
params.marker.instance,
params.ns.empty() ? params.marker.ns : params.ns);
rgw_obj_index_key cur_marker;
marker_obj.get_index_key(&cur_marker);
rgw_obj_key end_marker_obj(params.end_marker.name,
params.end_marker.instance,
params.ns.empty() ? params.end_marker.ns : params.ns);
rgw_obj_index_key cur_end_marker;
end_marker_obj.get_index_key(&cur_end_marker);
const bool cur_end_marker_valid = !params.end_marker.empty();
rgw_obj_key prefix_obj(params.prefix);
prefix_obj.set_ns(params.ns);
std::string cur_prefix = prefix_obj.get_index_key_name();
std::string after_delim_s; /* needed in !params.delim.empty() AND later */
if (!params.delim.empty()) {
after_delim_s = cls_rgw_after_delim(params.delim);
/* if marker points at a common prefix, fast forward it into its
* upper bound string */
int delim_pos = cur_marker.name.find(params.delim, cur_prefix.size());
if (delim_pos >= 0) {
string s = cur_marker.name.substr(0, delim_pos);
s.append(after_delim_s);
cur_marker = s;
}
}
// we'll stop after this many attempts as long we return at least
// one entry; but we will also go beyond this number of attempts
// until we return at least one entry
constexpr uint16_t SOFT_MAX_ATTEMPTS = 8;
rgw_obj_index_key prev_marker;
for (uint16_t attempt = 1; /* empty */; ++attempt) {
ldpp_dout(dpp, 20) << __func__ <<
": starting attempt " << attempt << dendl;
if (attempt > 1 && !(prev_marker < cur_marker)) {
// we've failed to make forward progress
ldpp_dout(dpp, 0) << "ERROR: " << __func__ <<
" marker failed to make forward progress; attempt=" << attempt <<
", prev_marker=" << prev_marker <<
", cur_marker=" << cur_marker << dendl;
break;
}
prev_marker = cur_marker;
ent_map_t ent_map;
ent_map.reserve(read_ahead);
int r = store->cls_bucket_list_ordered(dpp,
target->get_bucket_info(),
current_index,
shard_id,
cur_marker,
cur_prefix,
params.delim,
read_ahead + 1 - count,
params.list_versions,
attempt,
ent_map,
&truncated,
&cls_filtered,
&cur_marker,
y,
params.force_check_filter);
if (r < 0) {
return r;
}
for (auto eiter = ent_map.begin(); eiter != ent_map.end(); ++eiter) {
rgw_bucket_dir_entry& entry = eiter->second;
rgw_obj_index_key index_key = entry.key;
rgw_obj_key obj(index_key);
ldpp_dout(dpp, 20) << __func__ <<
": considering entry " << entry.key << dendl;
/* note that parse_raw_oid() here will not set the correct
* object's instance, as rgw_obj_index_key encodes that
* separately. We don't need to set the instance because it's
* not needed for the checks here and we end up using the raw
* entry for the return vector
*/
bool valid = rgw_obj_key::parse_raw_oid(index_key.name, &obj);
if (!valid) {
ldpp_dout(dpp, 0) << "ERROR: " << __func__ <<
" could not parse object name: " << obj.name << dendl;
continue;
}
bool matched_ns = (obj.ns == params.ns);
if (!params.list_versions && !entry.is_visible()) {
ldpp_dout(dpp, 10) << __func__ <<
": skipping not visible entry \"" << entry.key << "\"" << dendl;
continue;
}
if (params.enforce_ns && !matched_ns) {
if (!params.ns.empty()) {
/* we've iterated past the namespace we're searching -- done now */
truncated = false;
ldpp_dout(dpp, 10) << __func__ <<
": finished due to getting past requested namespace \"" <<
params.ns << "\"" << dendl;
goto done;
}
/* we're skipping past namespaced objects */
ldpp_dout(dpp, 20) << __func__ <<
": skipping past namespaced objects, including \"" << entry.key <<
"\"" << dendl;
continue;
}
if (cur_end_marker_valid && cur_end_marker <= index_key) {
truncated = false;
ldpp_dout(dpp, 10) << __func__ <<
": finished due to gitting end marker of \"" << cur_end_marker <<
"\" with \"" << entry.key << "\"" << dendl;
goto done;
}
if (count < max) {
params.marker = index_key;
next_marker = index_key;
}
if (params.access_list_filter &&
! params.access_list_filter->filter(obj.name, index_key.name)) {
ldpp_dout(dpp, 20) << __func__ <<
": skipping past namespaced objects, including \"" << entry.key <<
"\"" << dendl;
continue;
}
if (params.prefix.size() &&
0 != obj.name.compare(0, params.prefix.size(), params.prefix)) {
ldpp_dout(dpp, 20) << __func__ <<
": skipping object \"" << entry.key <<
"\" that doesn't match prefix \"" << params.prefix << "\"" << dendl;
continue;
}
if (!params.delim.empty()) {
const int delim_pos = obj.name.find(params.delim, params.prefix.size());
if (delim_pos >= 0) {
// run either the code where delimiter filtering is done a)
// in the OSD/CLS or b) here.
if (cls_filtered) {
// NOTE: this condition is for the newer versions of the
// OSD that does filtering on the CLS side should only
// find one delimiter at the end if it finds any after the
// prefix
if (delim_pos !=
int(obj.name.length() - params.delim.length())) {
ldpp_dout(dpp, 0) << "WARNING: " << __func__ <<
" found delimiter in place other than the end of "
"the prefix; obj.name=" << obj.name <<
", prefix=" << params.prefix << dendl;
}
if (common_prefixes) {
if (count >= max) {
truncated = true;
ldpp_dout(dpp, 10) << __func__ <<
": stopping early with common prefix \"" << entry.key <<
"\" because requested number (" << max <<
") reached (cls filtered)" << dendl;
goto done;
}
(*common_prefixes)[obj.name] = true;
count++;
}
ldpp_dout(dpp, 20) << __func__ <<
": finished entry with common prefix \"" << entry.key <<
"\" so continuing loop (cls filtered)" << dendl;
continue;
} else {
// NOTE: this condition is for older versions of the OSD
// that do not filter on the CLS side, so the following code
// must do the filtering; once we reach version 16 of ceph,
// this code can be removed along with the conditional that
// can lead this way
/* extract key -with trailing delimiter- for CommonPrefix */
string prefix_key =
obj.name.substr(0, delim_pos + params.delim.length());
if (common_prefixes &&
common_prefixes->find(prefix_key) == common_prefixes->end()) {
if (count >= max) {
truncated = true;
ldpp_dout(dpp, 10) << __func__ <<
": stopping early with common prefix \"" << entry.key <<
"\" because requested number (" << max <<
") reached (not cls filtered)" << dendl;
goto done;
}
next_marker = prefix_key;
(*common_prefixes)[prefix_key] = true;
count++;
}
ldpp_dout(dpp, 20) << __func__ <<
": finished entry with common prefix \"" << entry.key <<
"\" so continuing loop (not cls filtered)" << dendl;
continue;
} // if we're running an older OSD version
} // if a delimiter was found after prefix
} // if a delimiter was passed in
if (count >= max) {
truncated = true;
ldpp_dout(dpp, 10) << __func__ <<
": stopping early with entry \"" << entry.key <<
"\" because requested number (" << max <<
") reached" << dendl;
goto done;
}
ldpp_dout(dpp, 20) << __func__ <<
": adding entry " << entry.key << " to result" << dendl;
result->emplace_back(std::move(entry));
count++;
} // eiter for loop
// NOTE: the following conditional is needed by older versions of
// the OSD that don't do delimiter filtering on the CLS side; once
// we reach version 16 of ceph, the following conditional and the
// code within can be removed
if (!cls_filtered && !params.delim.empty()) {
int marker_delim_pos =
cur_marker.name.find(params.delim, cur_prefix.size());
if (marker_delim_pos >= 0) {
std::string skip_after_delim =
cur_marker.name.substr(0, marker_delim_pos);
skip_after_delim.append(after_delim_s);
ldpp_dout(dpp, 20) << __func__ <<
": skip_after_delim=" << skip_after_delim << dendl;
if (skip_after_delim > cur_marker.name) {
cur_marker = skip_after_delim;
ldpp_dout(dpp, 20) << __func__ <<
": setting cur_marker=" << cur_marker.name <<
"[" << cur_marker.instance << "]" << dendl;
}
}
} // if older osd didn't do delimiter filtering
ldpp_dout(dpp, 10) << __func__ <<
": end of outer loop, truncated=" << truncated <<
", count=" << count << ", attempt=" << attempt << dendl;
if (!truncated || count >= (max + 1) / 2) {
// if we finished listing, or if we're returning at least half the
// requested entries, that's enough; S3 and swift protocols allow
// returning fewer than max entries
ldpp_dout(dpp, 10) << __func__ <<
": exiting attempt loop because we reached end (" << truncated <<
") or we're returning half the requested entries (" << count <<
" of " << max << ")" << dendl;
break;
} else if (attempt > SOFT_MAX_ATTEMPTS && count >= 1) {
// if we've made at least 8 attempts and we have some, but very
// few, results, return with what we have
ldpp_dout(dpp, 10) << __func__ <<
": exiting attempt loop because we made " << attempt <<
" attempts and we're returning " << count << " entries" << dendl;
break;
}
} // for (uint16_t attempt...
done:
if (is_truncated) {
*is_truncated = truncated;
}
return 0;
} // list_objects_ordered
/**
* Get listing of the objects in a bucket and allow the results to be out
* of order.
*
* Even though there are key differences with the ordered counterpart,
* the parameters are the same to maintain some compatability.
*
* max: maximum number of results to return
* bucket: bucket to list contents of
* prefix: only return results that match this prefix
* delim: should not be set; if it is we should have indicated an error
* marker: if filled in, begin the listing with this object.
* end_marker: if filled in, end the listing with this object.
* result: the objects are put in here.
* common_prefixes: this is never filled with an unordered list; the param
* is maintained for compatibility
* is_truncated: if number of objects in the bucket is bigger than max, then
* truncated.
*/
int RGWRados::Bucket::List::list_objects_unordered(const DoutPrefixProvider *dpp,
int64_t max_p,
std::vector<rgw_bucket_dir_entry>* result,
std::map<std::string, bool>* common_prefixes,
bool* is_truncated,
optional_yield y)
{
RGWRados *store = target->get_store();
int shard_id = target->get_shard_id();
const auto& current_index = target->get_bucket_info().layout.current_index;
int count = 0;
bool truncated = true;
const int64_t max = // protect against memory issues and negative vals
std::min(bucket_list_objects_absolute_max, std::max(int64_t(0), max_p));
// read a few extra in each call to cls_bucket_list_unordered in
// case some are filtered out due to namespace matching, versioning,
// filtering, etc.
const int64_t max_read_ahead = 100;
const uint32_t read_ahead = uint32_t(max + std::min(max, max_read_ahead));
result->clear();
// use a local marker; either the marker will have a previous entry
// or it will be empty; either way it's OK to copy
rgw_obj_key marker_obj(params.marker.name,
params.marker.instance,
params.ns.empty() ? params.marker.ns : params.ns);
rgw_obj_index_key cur_marker;
marker_obj.get_index_key(&cur_marker);
rgw_obj_key end_marker_obj(params.end_marker.name,
params.end_marker.instance,
params.ns.empty() ? params.end_marker.ns : params.ns);
rgw_obj_index_key cur_end_marker;
end_marker_obj.get_index_key(&cur_end_marker);
const bool cur_end_marker_valid = !params.end_marker.empty();
rgw_obj_key prefix_obj(params.prefix);
prefix_obj.set_ns(params.ns);
std::string cur_prefix = prefix_obj.get_index_key_name();
while (truncated && count <= max) {
std::vector<rgw_bucket_dir_entry> ent_list;
ent_list.reserve(read_ahead);
int r = store->cls_bucket_list_unordered(dpp,
target->get_bucket_info(),
current_index,
shard_id,
cur_marker,
cur_prefix,
read_ahead,
params.list_versions,
ent_list,
&truncated,
&cur_marker,
y);
if (r < 0) {
ldpp_dout(dpp, 0) << "ERROR: " << __func__ <<
" cls_bucket_list_unordered returned " << r << " for " <<
target->get_bucket_info().bucket << dendl;
return r;
}
// NB: while regions of ent_list will be sorted, we have no
// guarantee that all items will be sorted since they can cross
// shard boundaries
for (auto& entry : ent_list) {
rgw_obj_index_key index_key = entry.key;
rgw_obj_key obj(index_key);
if (count < max) {
params.marker.set(index_key);
next_marker.set(index_key);
}
/* note that parse_raw_oid() here will not set the correct
* object's instance, as rgw_obj_index_key encodes that
* separately. We don't need to set the instance because it's
* not needed for the checks here and we end up using the raw
* entry for the return vector
*/
bool valid = rgw_obj_key::parse_raw_oid(index_key.name, &obj);
if (!valid) {
ldpp_dout(dpp, 0) << "ERROR: " << __func__ <<
" could not parse object name: " << obj.name << dendl;
continue;
}
if (!params.list_versions && !entry.is_visible()) {
ldpp_dout(dpp, 20) << __func__ <<
": skippping \"" << index_key <<
"\" because not listing versions and entry not visibile" << dendl;
continue;
}
if (params.enforce_ns && obj.ns != params.ns) {
ldpp_dout(dpp, 20) << __func__ <<
": skippping \"" << index_key <<
"\" because namespace does not match" << dendl;
continue;
}
if (cur_end_marker_valid && cur_end_marker <= index_key) {
// we're not guaranteed items will come in order, so we have
// to loop through all
ldpp_dout(dpp, 20) << __func__ <<
": skippping \"" << index_key <<
"\" because after end_marker" << dendl;
continue;
}
if (params.access_list_filter &&
!params.access_list_filter->filter(obj.name, index_key.name)) {
ldpp_dout(dpp, 20) << __func__ <<
": skippping \"" << index_key <<
"\" because doesn't match filter" << dendl;
continue;
}
if (params.prefix.size() &&
(0 != obj.name.compare(0, params.prefix.size(), params.prefix))) {
ldpp_dout(dpp, 20) << __func__ <<
": skippping \"" << index_key <<
"\" because doesn't match prefix" << dendl;
continue;
}
if (count >= max) {
truncated = true;
goto done;
}
result->emplace_back(std::move(entry));
count++;
} // for (auto& entry : ent_list)
} // while (truncated && count <= max)
done:
if (is_truncated) {
*is_truncated = truncated;
}
return 0;
} // list_objects_unordered
/**
* create a rados pool, associated meta info
* returns 0 on success, -ERR# otherwise.
*/
int RGWRados::create_pool(const DoutPrefixProvider *dpp, const rgw_pool& pool)
{
librados::IoCtx io_ctx;
constexpr bool create = true;
return rgw_init_ioctx(dpp, get_rados_handle(), pool, io_ctx, create);
}
void RGWRados::create_bucket_id(string *bucket_id)
{
uint64_t iid = instance_id();
uint64_t bid = next_bucket_id();
char buf[svc.zone->get_zone_params().get_id().size() + 48];
snprintf(buf, sizeof(buf), "%s.%" PRIu64 ".%" PRIu64,
svc.zone->get_zone_params().get_id().c_str(), iid, bid);
*bucket_id = buf;
}
int RGWRados::create_bucket(const RGWUserInfo& owner, rgw_bucket& bucket,
const string& zonegroup_id,
const rgw_placement_rule& placement_rule,
const string& swift_ver_location,
const RGWQuotaInfo * pquota_info,
map<std::string, bufferlist>& attrs,
RGWBucketInfo& info,
obj_version *pobjv,
obj_version *pep_objv,
real_time creation_time,
rgw_bucket *pmaster_bucket,
uint32_t *pmaster_num_shards,
optional_yield y,
const DoutPrefixProvider *dpp,
bool exclusive)
{
#define MAX_CREATE_RETRIES 20 /* need to bound retries */
rgw_placement_rule selected_placement_rule;
RGWZonePlacementInfo rule_info;
for (int i = 0; i < MAX_CREATE_RETRIES; i++) {
int ret = 0;
ret = svc.zone->select_bucket_placement(dpp, owner, zonegroup_id, placement_rule,
&selected_placement_rule, &rule_info, y);
if (ret < 0)
return ret;
if (!pmaster_bucket) {
create_bucket_id(&bucket.marker);
bucket.bucket_id = bucket.marker;
} else {
bucket.marker = pmaster_bucket->marker;
bucket.bucket_id = pmaster_bucket->bucket_id;
}
RGWObjVersionTracker& objv_tracker = info.objv_tracker;
objv_tracker.read_version.clear();
if (pobjv) {
objv_tracker.write_version = *pobjv;
} else {
objv_tracker.generate_new_write_ver(cct);
}
info.bucket = bucket;
info.owner = owner.user_id;
info.zonegroup = zonegroup_id;
info.placement_rule = selected_placement_rule;
info.swift_ver_location = swift_ver_location;
info.swift_versioning = (!swift_ver_location.empty());
init_default_bucket_layout(cct, info.layout, svc.zone->get_zone(),
pmaster_num_shards ?
std::optional{*pmaster_num_shards} :
std::nullopt,
rule_info.index_type);
info.requester_pays = false;
if (real_clock::is_zero(creation_time)) {
info.creation_time = ceph::real_clock::now();
} else {
info.creation_time = creation_time;
}
if (pquota_info) {
info.quota = *pquota_info;
}
int r = svc.bi->init_index(dpp, info, info.layout.current_index);
if (r < 0) {
return r;
}
ret = put_linked_bucket_info(info, exclusive, ceph::real_time(), pep_objv, &attrs, true, dpp, y);
if (ret == -ECANCELED) {
ret = -EEXIST;
}
if (ret == -EEXIST) {
/* we need to reread the info and return it, caller will have a use for it */
RGWBucketInfo orig_info;
r = get_bucket_info(&svc, bucket.tenant, bucket.name, orig_info, NULL, y, NULL);
if (r < 0) {
if (r == -ENOENT) {
continue;
}
ldpp_dout(dpp, 0) << "get_bucket_info returned " << r << dendl;
return r;
}
/* only remove it if it's a different bucket instance */
if (orig_info.bucket.bucket_id != bucket.bucket_id) {
int r = svc.bi->clean_index(dpp, info, info.layout.current_index);
if (r < 0) {
ldpp_dout(dpp, 0) << "WARNING: could not remove bucket index (r=" << r << ")" << dendl;
}
r = ctl.bucket->remove_bucket_instance_info(info.bucket, info, y, dpp);
if (r < 0) {
ldpp_dout(dpp, 0) << "WARNING: " << __func__ << "(): failed to remove bucket instance info: bucket instance=" << info.bucket.get_key() << ": r=" << r << dendl;
/* continue anyway */
}
}
info = std::move(orig_info);
/* ret == -EEXIST here */
}
return ret;
}
/* this is highly unlikely */
ldpp_dout(dpp, 0) << "ERROR: could not create bucket, continuously raced with bucket creation and removal" << dendl;
return -ENOENT;
}
bool RGWRados::obj_to_raw(const rgw_placement_rule& placement_rule, const rgw_obj& obj, rgw_raw_obj *raw_obj)
{
get_obj_bucket_and_oid_loc(obj, raw_obj->oid, raw_obj->loc);
return get_obj_data_pool(placement_rule, obj, &raw_obj->pool);
}
std::string RGWRados::get_cluster_fsid(const DoutPrefixProvider *dpp, optional_yield y)
{
return svc.rados->cluster_fsid();
}
int RGWRados::get_obj_head_ioctx(const DoutPrefixProvider *dpp,
const RGWBucketInfo& bucket_info,
const rgw_obj& obj,
librados::IoCtx *ioctx)
{
std::string oid, key;
get_obj_bucket_and_oid_loc(obj, oid, key);
rgw_pool pool;
if (!get_obj_data_pool(bucket_info.placement_rule, obj, &pool)) {
ldpp_dout(dpp, 0) << "ERROR: cannot get data pool for obj=" << obj <<
", probably misconfiguration" << dendl;
return -EIO;
}
int r = open_pool_ctx(dpp, pool, *ioctx, false, true);
if (r < 0) {
ldpp_dout(dpp, 0) << "ERROR: unable to open data-pool=" << pool.to_str() <<
" for obj=" << obj << " with error-code=" << r << dendl;
return r;
}
ioctx->locator_set_key(key);
return 0;
}
int RGWRados::get_obj_head_ref(const DoutPrefixProvider *dpp,
const rgw_placement_rule& target_placement_rule,
const rgw_obj& obj,
rgw_rados_ref *ref)
{
get_obj_bucket_and_oid_loc(obj, ref->obj.oid, ref->obj.loc);
rgw_pool pool;
if (!get_obj_data_pool(target_placement_rule, obj, &pool)) {
ldpp_dout(dpp, 0) << "ERROR: cannot get data pool for obj=" << obj << ", probably misconfiguration" << dendl;
return -EIO;
}
ref->pool = svc.rados->pool(pool);
int r = ref->pool.open(dpp, RGWSI_RADOS::OpenParams()
.set_mostly_omap(false));
if (r < 0) {
ldpp_dout(dpp, 0) << "ERROR: failed opening data pool (pool=" << pool << "); r=" << r << dendl;
return r;
}
ref->pool.ioctx().locator_set_key(ref->obj.loc);
return 0;
}
int RGWRados::get_obj_head_ref(const DoutPrefixProvider *dpp,
const RGWBucketInfo& bucket_info,
const rgw_obj& obj,
rgw_rados_ref *ref)
{
return get_obj_head_ref(dpp, bucket_info.placement_rule, obj, ref);
}
int RGWRados::get_raw_obj_ref(const DoutPrefixProvider *dpp, const rgw_raw_obj& obj, rgw_rados_ref *ref)
{
ref->obj = obj;
if (ref->obj.oid.empty()) {
ref->obj.oid = obj.pool.to_str();
ref->obj.pool = svc.zone->get_zone_params().domain_root;
}
ref->pool = svc.rados->pool(obj.pool);
int r = ref->pool.open(dpp, RGWSI_RADOS::OpenParams()
.set_mostly_omap(false));
if (r < 0) {
ldpp_dout(dpp, 0) << "ERROR: failed opening pool (pool=" << obj.pool << "); r=" << r << dendl;
return r;
}
ref->pool.ioctx().locator_set_key(ref->obj.loc);
return 0;
}
int RGWRados::get_system_obj_ref(const DoutPrefixProvider *dpp, const rgw_raw_obj& obj, rgw_rados_ref *ref)
{
return get_raw_obj_ref(dpp, obj, ref);
}
/*
* fixes an issue where head objects were supposed to have a locator created, but ended
* up without one
*/
int RGWRados::fix_head_obj_locator(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, bool copy_obj, bool remove_bad, rgw_obj_key& key, optional_yield y)
{
const rgw_bucket& bucket = bucket_info.bucket;
string oid;
string locator;
rgw_obj obj(bucket, key);
get_obj_bucket_and_oid_loc(obj, oid, locator);
if (locator.empty()) {
ldpp_dout(dpp, 20) << "object does not have a locator, nothing to fix" << dendl;
return 0;
}
librados::IoCtx ioctx;
int ret = get_obj_head_ioctx(dpp, bucket_info, obj, &ioctx);
if (ret < 0) {
cerr << "ERROR: get_obj_head_ioctx() returned ret=" << ret << std::endl;
return ret;
}
ioctx.locator_set_key(string()); /* override locator for this object, use empty locator */
uint64_t size;
bufferlist data;
struct timespec mtime_ts;
map<string, bufferlist> attrs;
librados::ObjectReadOperation op;
op.getxattrs(&attrs, NULL);
op.stat2(&size, &mtime_ts, NULL);
#define HEAD_SIZE 512 * 1024
op.read(0, HEAD_SIZE, &data, NULL);
ret = rgw_rados_operate(dpp, ioctx, oid, &op, &data, y);
if (ret < 0) {
ldpp_dout(dpp, -1) << "ERROR: rgw_rados_operate(oid=" << oid << ") returned ret=" << ret << dendl;
return ret;
}
if (size > HEAD_SIZE) {
ldpp_dout(dpp, -1) << "ERROR: returned object size (" << size << ") > HEAD_SIZE (" << HEAD_SIZE << ")" << dendl;
return -EIO;
}
if (size != data.length()) {
ldpp_dout(dpp, -1) << "ERROR: returned object size (" << size << ") != data.length() (" << data.length() << ")" << dendl;
return -EIO;
}
if (copy_obj) {
librados::ObjectWriteOperation wop;
wop.mtime2(&mtime_ts);
map<string, bufferlist>::iterator iter;
for (iter = attrs.begin(); iter != attrs.end(); ++iter) {
wop.setxattr(iter->first.c_str(), iter->second);
}
wop.write(0, data);
ioctx.locator_set_key(locator);
rgw_rados_operate(dpp, ioctx, oid, &wop, y);
}
if (remove_bad) {
ioctx.locator_set_key(string());
ret = ioctx.remove(oid);
if (ret < 0) {
ldpp_dout(dpp, -1) << "ERROR: failed to remove original bad object" << dendl;
return ret;
}
}
return 0;
}
int RGWRados::move_rados_obj(const DoutPrefixProvider *dpp,
librados::IoCtx& src_ioctx,
const string& src_oid, const string& src_locator,
librados::IoCtx& dst_ioctx,
const string& dst_oid, const string& dst_locator, optional_yield y)
{
#define COPY_BUF_SIZE (4 * 1024 * 1024)
bool done = false;
uint64_t chunk_size = COPY_BUF_SIZE;
uint64_t ofs = 0;
int ret = 0;
real_time mtime;
struct timespec mtime_ts;
uint64_t size;
if (src_oid == dst_oid && src_locator == dst_locator) {
return 0;
}
src_ioctx.locator_set_key(src_locator);
dst_ioctx.locator_set_key(dst_locator);
do {
bufferlist data;
ObjectReadOperation rop;
ObjectWriteOperation wop;
if (ofs == 0) {
rop.stat2(&size, &mtime_ts, NULL);
mtime = real_clock::from_timespec(mtime_ts);
}
rop.read(ofs, chunk_size, &data, NULL);
ret = rgw_rados_operate(dpp, src_ioctx, src_oid, &rop, &data, y);
if (ret < 0) {
goto done_err;
}
if (data.length() == 0) {
break;
}
if (ofs == 0) {
wop.create(true); /* make it exclusive */
wop.mtime2(&mtime_ts);
mtime = real_clock::from_timespec(mtime_ts);
}
wop.write(ofs, data);
ret = rgw_rados_operate(dpp, dst_ioctx, dst_oid, &wop, y);
if (ret < 0) {
goto done_err;
}
ofs += data.length();
done = data.length() != chunk_size;
} while (!done);
if (ofs != size) {
ldpp_dout(dpp, -1) << "ERROR: " << __func__ << ": copying " << src_oid << " -> " << dst_oid
<< ": expected " << size << " bytes to copy, ended up with " << ofs << dendl;
ret = -EIO;
goto done_err;
}
src_ioctx.remove(src_oid);
return 0;
done_err:
// TODO: clean up dst_oid if we created it
ldpp_dout(dpp, -1) << "ERROR: failed to copy " << src_oid << " -> " << dst_oid << dendl;
return ret;
}
/*
* fixes an issue where head objects were supposed to have a locator created, but ended
* up without one
*/
int RGWRados::fix_tail_obj_locator(const DoutPrefixProvider *dpp,
RGWBucketInfo& bucket_info, rgw_obj_key& key,
bool fix, bool *need_fix, optional_yield y)
{
const rgw_bucket& bucket = bucket_info.bucket;
rgw_obj obj(bucket, key);
if (need_fix) {
*need_fix = false;
}
rgw_rados_ref ref;
int r = get_obj_head_ref(dpp, bucket_info, obj, &ref);
if (r < 0) {
return r;
}
RGWObjState *astate = nullptr;
RGWObjManifest* manifest = nullptr;
RGWObjectCtx rctx(this->driver);
r = get_obj_state(dpp, &rctx, bucket_info, obj, &astate, &manifest, false, y);
if (r < 0)
return r;
if (manifest) {
RGWObjManifest::obj_iterator miter;
for (miter = manifest->obj_begin(dpp); miter != manifest->obj_end(dpp); ++miter) {
rgw_raw_obj raw_loc = miter.get_location().get_raw_obj(this);
rgw_obj loc;
string oid;
string locator;
RGWSI_Tier_RADOS::raw_obj_to_obj(manifest->get_tail_placement().bucket, raw_loc, &loc);
if (loc.key.ns.empty()) {
/* continue, we're only interested in tail objects */
continue;
}
auto& ioctx = ref.pool.ioctx();
get_obj_bucket_and_oid_loc(loc, oid, locator);
ref.pool.ioctx().locator_set_key(locator);
ldpp_dout(dpp, 20) << __func__ << ": key=" << key << " oid=" << oid << " locator=" << locator << dendl;
r = ioctx.stat(oid, NULL, NULL);
if (r != -ENOENT) {
continue;
}
string bad_loc;
prepend_bucket_marker(bucket, loc.key.name, bad_loc);
/* create a new ioctx with the bad locator */
librados::IoCtx src_ioctx;
src_ioctx.dup(ioctx);
src_ioctx.locator_set_key(bad_loc);
r = src_ioctx.stat(oid, NULL, NULL);
if (r != 0) {
/* cannot find a broken part */
continue;
}
ldpp_dout(dpp, 20) << __func__ << ": found bad object part: " << loc << dendl;
if (need_fix) {
*need_fix = true;
}
if (fix) {
r = move_rados_obj(dpp, src_ioctx, oid, bad_loc, ioctx, oid, locator, y);
if (r < 0) {
ldpp_dout(dpp, -1) << "ERROR: copy_rados_obj() on oid=" << oid << " returned r=" << r << dendl;
}
}
}
}
return 0;
}
int RGWRados::BucketShard::init(const rgw_bucket& _bucket,
const rgw_obj& obj,
RGWBucketInfo* bucket_info_out,
const DoutPrefixProvider *dpp, optional_yield y)
{
bucket = _bucket;
RGWBucketInfo bucket_info;
RGWBucketInfo* bucket_info_p =
bucket_info_out ? bucket_info_out : &bucket_info;
int ret = store->get_bucket_instance_info(bucket, *bucket_info_p, NULL, NULL, y, dpp);
if (ret < 0) {
return ret;
}
string oid;
ret = store->svc.bi_rados->open_bucket_index_shard(dpp, *bucket_info_p, obj.get_hash_object(), &bucket_obj, &shard_id);
if (ret < 0) {
ldpp_dout(dpp, 0) << "ERROR: open_bucket_index_shard() returned ret=" << ret << dendl;
return ret;
}
ldpp_dout(dpp, 20) << " bucket index object: " << bucket_obj.get_raw_obj() << dendl;
return 0;
}
int RGWRados::BucketShard::init(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info,
const rgw_obj& obj, optional_yield y)
{
bucket = bucket_info.bucket;
int ret = store->svc.bi_rados->open_bucket_index_shard(dpp, bucket_info,
obj.get_hash_object(),
&bucket_obj,
&shard_id);
if (ret < 0) {
ldpp_dout(dpp, 0) << "ERROR: open_bucket_index_shard() returned ret=" << ret << dendl;
return ret;
}
ldpp_dout(dpp, 20) << " bucket index object: " << bucket_obj << dendl;
return 0;
}
int RGWRados::BucketShard::init(const DoutPrefixProvider *dpp,
const RGWBucketInfo& bucket_info,
const rgw::bucket_index_layout_generation& index,
int sid, optional_yield y)
{
bucket = bucket_info.bucket;
shard_id = sid;
int ret = store->svc.bi_rados->open_bucket_index_shard(dpp, bucket_info, index,
shard_id, &bucket_obj);
if (ret < 0) {
ldpp_dout(dpp, 0) << "ERROR: open_bucket_index_shard() returned ret=" << ret << dendl;
return ret;
}
ldpp_dout(dpp, 20) << " bucket index object: " << bucket_obj << dendl;
return 0;
}
/* Execute @handler on last item in bucket listing for bucket specified
* in @bucket_info. @obj_prefix and @obj_delim narrow down the listing
* to objects matching these criterias. */
int RGWRados::on_last_entry_in_listing(const DoutPrefixProvider *dpp,
RGWBucketInfo& bucket_info,
const std::string& obj_prefix,
const std::string& obj_delim,
std::function<int(const rgw_bucket_dir_entry&)> handler, optional_yield y)
{
RGWRados::Bucket target(this, bucket_info);
RGWRados::Bucket::List list_op(&target);
list_op.params.prefix = obj_prefix;
list_op.params.delim = obj_delim;
ldpp_dout(dpp, 20) << "iterating listing for bucket=" << bucket_info.bucket.name
<< ", obj_prefix=" << obj_prefix
<< ", obj_delim=" << obj_delim
<< dendl;
bool is_truncated = false;
boost::optional<rgw_bucket_dir_entry> last_entry;
/* We need to rewind to the last object in a listing. */
do {
/* List bucket entries in chunks. */
static constexpr int MAX_LIST_OBJS = 100;
std::vector<rgw_bucket_dir_entry> entries(MAX_LIST_OBJS);
int ret = list_op.list_objects(dpp, MAX_LIST_OBJS, &entries, nullptr,
&is_truncated, y);
if (ret < 0) {
return ret;
} else if (!entries.empty()) {
last_entry = entries.back();
}
} while (is_truncated);
if (last_entry) {
return handler(*last_entry);
}
/* Empty listing - no items we can run handler on. */
return 0;
}
bool RGWRados::swift_versioning_enabled(const RGWBucketInfo& bucket_info) const
{
return bucket_info.has_swift_versioning() &&
bucket_info.swift_ver_location.size();
}
int RGWRados::swift_versioning_copy(RGWObjectCtx& obj_ctx,
const rgw_user& user,
RGWBucketInfo& bucket_info,
const rgw_obj& obj,
const DoutPrefixProvider *dpp,
optional_yield y)
{
if (! swift_versioning_enabled(bucket_info)) {
return 0;
}
obj_ctx.set_atomic(obj);
RGWObjState * state = nullptr;
RGWObjManifest *manifest = nullptr;
int r = get_obj_state(dpp, &obj_ctx, bucket_info, obj, &state, &manifest, false, y);
if (r < 0) {
return r;
}
if (!state->exists) {
return 0;
}
const string& src_name = obj.get_oid();
char buf[src_name.size() + 32];
struct timespec ts = ceph::real_clock::to_timespec(state->mtime);
snprintf(buf, sizeof(buf), "%03x%s/%lld.%06ld", (int)src_name.size(),
src_name.c_str(), (long long)ts.tv_sec, ts.tv_nsec / 1000);
RGWBucketInfo dest_bucket_info;
r = get_bucket_info(&svc, bucket_info.bucket.tenant, bucket_info.swift_ver_location, dest_bucket_info, NULL, y, NULL);
if (r < 0) {
ldpp_dout(dpp, 10) << "failed to read dest bucket info: r=" << r << dendl;
if (r == -ENOENT) {
return -ERR_PRECONDITION_FAILED;
}
return r;
}
if (dest_bucket_info.owner != bucket_info.owner) {
return -ERR_PRECONDITION_FAILED;
}
rgw_obj dest_obj(dest_bucket_info.bucket, buf);
if (dest_bucket_info.versioning_enabled()){
gen_rand_obj_instance_name(&dest_obj);
}
obj_ctx.set_atomic(dest_obj);
rgw_zone_id no_zone;
r = copy_obj(obj_ctx,
user,
NULL, /* req_info *info */
no_zone,
dest_obj,
obj,
dest_bucket_info,
bucket_info,
bucket_info.placement_rule,
NULL, /* time_t *src_mtime */
NULL, /* time_t *mtime */
NULL, /* const time_t *mod_ptr */
NULL, /* const time_t *unmod_ptr */
false, /* bool high_precision_time */
NULL, /* const char *if_match */
NULL, /* const char *if_nomatch */
RGWRados::ATTRSMOD_NONE,
true, /* bool copy_if_newer */
state->attrset,
RGWObjCategory::Main,
0, /* uint64_t olh_epoch */
real_time(), /* time_t delete_at */
NULL, /* string *version_id */
NULL, /* string *ptag */
NULL, /* string *petag */
NULL, /* void (*progress_cb)(off_t, void *) */
NULL, /* void *progress_data */
dpp,
y);
if (r == -ECANCELED || r == -ENOENT) {
/* Has already been overwritten, meaning another rgw process already
* copied it out */
return 0;
}
return r;
}
int RGWRados::swift_versioning_restore(RGWObjectCtx& obj_ctx,
const rgw_user& user,
RGWBucketInfo& bucket_info,
rgw_obj& obj,
bool& restored,
const DoutPrefixProvider *dpp, optional_yield y)
{
if (! swift_versioning_enabled(bucket_info)) {
return 0;
}
/* Bucket info of the bucket that stores previous versions of our object. */
RGWBucketInfo archive_binfo;
int ret = get_bucket_info(&svc, bucket_info.bucket.tenant,
bucket_info.swift_ver_location,
archive_binfo, nullptr, y, nullptr);
if (ret < 0) {
return ret;
}
/* Abort the operation if the bucket storing our archive belongs to someone
* else. This is a limitation in comparison to Swift as we aren't taking ACLs
* into consideration. For we can live with that.
*
* TODO: delegate this check to un upper layer and compare with ACLs. */
if (bucket_info.owner != archive_binfo.owner) {
return -EPERM;
}
/* This code will be executed on latest version of the object. */
const auto handler = [&](const rgw_bucket_dir_entry& entry) -> int {
rgw_zone_id no_zone;
/* We don't support object versioning of Swift API on those buckets that
* are already versioned using the S3 mechanism. This affects also bucket
* storing archived objects. Otherwise the delete operation would create
* a deletion marker. */
if (archive_binfo.versioned()) {
restored = false;
return -ERR_PRECONDITION_FAILED;
}
/* We are requesting ATTRSMOD_NONE so the attr attribute is perfectly
* irrelevant and may be safely skipped. */
std::map<std::string, ceph::bufferlist> no_attrs;
rgw_obj archive_obj(archive_binfo.bucket, entry.key);
if (bucket_info.versioning_enabled()){
gen_rand_obj_instance_name(&obj);
}
obj_ctx.set_atomic(archive_obj);
obj_ctx.set_atomic(obj);
int ret = copy_obj(obj_ctx,
user,
nullptr, /* req_info *info */
no_zone,
obj, /* dest obj */
archive_obj, /* src obj */
bucket_info, /* dest bucket info */
archive_binfo, /* src bucket info */
bucket_info.placement_rule, /* placement_rule */
nullptr, /* time_t *src_mtime */
nullptr, /* time_t *mtime */
nullptr, /* const time_t *mod_ptr */
nullptr, /* const time_t *unmod_ptr */
false, /* bool high_precision_time */
nullptr, /* const char *if_match */
nullptr, /* const char *if_nomatch */
RGWRados::ATTRSMOD_NONE,
true, /* bool copy_if_newer */
no_attrs,
RGWObjCategory::Main,
0, /* uint64_t olh_epoch */
real_time(), /* time_t delete_at */
nullptr, /* string *version_id */
nullptr, /* string *ptag */
nullptr, /* string *petag */
nullptr, /* void (*progress_cb)(off_t, void *) */
nullptr, /* void *progress_data */
dpp,
y);
if (ret == -ECANCELED || ret == -ENOENT) {
/* Has already been overwritten, meaning another rgw process already
* copied it out */
return 0;
} else if (ret < 0) {
return ret;
} else {
restored = true;
}
/* Need to remove the archived copy. */
ret = delete_obj(dpp, obj_ctx, archive_binfo, archive_obj,
archive_binfo.versioning_status(), y);
return ret;
};
const std::string& obj_name = obj.get_oid();
const auto prefix = boost::str(boost::format("%03x%s") % obj_name.size()
% obj_name);
return on_last_entry_in_listing(dpp, archive_binfo, prefix, std::string(),
handler, y);
}
int RGWRados::Object::Write::_do_write_meta(const DoutPrefixProvider *dpp,
uint64_t size, uint64_t accounted_size,
map<string, bufferlist>& attrs,
bool assume_noent, bool modify_tail,
void *_index_op, optional_yield y)
{
RGWRados::Bucket::UpdateIndex *index_op = static_cast<RGWRados::Bucket::UpdateIndex *>(_index_op);
RGWRados *store = target->get_store();
ObjectWriteOperation op;
#ifdef WITH_LTTNG
const req_state* s = get_req_state();
string req_id;
if (!s) {
// fake req_id
req_id = store->svc.zone_utils->unique_id(store->driver->get_new_req_id());
} else {
req_id = s->req_id;
}
#endif
RGWObjState *state;
RGWObjManifest *manifest = nullptr;
int r = target->get_state(dpp, &state, &manifest, false, y, assume_noent);
if (r < 0)
return r;
rgw_obj& obj = target->get_obj();
if (obj.get_oid().empty()) {
ldpp_dout(dpp, 0) << "ERROR: " << __func__ << "(): cannot write object with empty name" << dendl;
return -EIO;
}
rgw_rados_ref ref;
r = store->get_obj_head_ref(dpp, target->get_meta_placement_rule(), obj, &ref);
if (r < 0)
return r;
bool is_olh = state->is_olh;
bool reset_obj = (meta.flags & PUT_OBJ_CREATE) != 0;
const string *ptag = meta.ptag;
if (!ptag && !index_op->get_optag()->empty()) {
ptag = index_op->get_optag();
}
r = target->prepare_atomic_modification(dpp, op, reset_obj, ptag, meta.if_match, meta.if_nomatch, false, modify_tail, y);
if (r < 0)
return r;
if (real_clock::is_zero(meta.set_mtime)) {
meta.set_mtime = real_clock::now();
}
if (target->get_bucket_info().obj_lock_enabled() && target->get_bucket_info().obj_lock.has_rule() && meta.flags == PUT_OBJ_CREATE) {
auto iter = attrs.find(RGW_ATTR_OBJECT_RETENTION);
if (iter == attrs.end()) {
real_time lock_until_date = target->get_bucket_info().obj_lock.get_lock_until_date(meta.set_mtime);
string mode = target->get_bucket_info().obj_lock.get_mode();
RGWObjectRetention obj_retention(mode, lock_until_date);
bufferlist bl;
obj_retention.encode(bl);
op.setxattr(RGW_ATTR_OBJECT_RETENTION, bl);
}
}
if (state->is_olh) {
op.setxattr(RGW_ATTR_OLH_ID_TAG, state->olh_tag);
}
struct timespec mtime_ts = real_clock::to_timespec(meta.set_mtime);
op.mtime2(&mtime_ts);
if (meta.data) {
/* if we want to overwrite the data, we also want to overwrite the
xattrs, so just remove the object */
op.write_full(*meta.data);
if (state->compressed) {
uint32_t alloc_hint_flags = librados::ALLOC_HINT_FLAG_INCOMPRESSIBLE;
op.set_alloc_hint2(0, 0, alloc_hint_flags);
}
}
string etag;
string content_type;
bufferlist acl_bl;
string storage_class;
map<string, bufferlist>::iterator iter;
if (meta.rmattrs) {
for (iter = meta.rmattrs->begin(); iter != meta.rmattrs->end(); ++iter) {
const string& name = iter->first;
op.rmxattr(name.c_str());
}
}
if (meta.manifest) {
storage_class = meta.manifest->get_tail_placement().placement_rule.storage_class;
/* remove existing manifest attr */
iter = attrs.find(RGW_ATTR_MANIFEST);
if (iter != attrs.end())
attrs.erase(iter);
bufferlist bl;
encode(*meta.manifest, bl);
op.setxattr(RGW_ATTR_MANIFEST, bl);
}
for (iter = attrs.begin(); iter != attrs.end(); ++iter) {
const string& name = iter->first;
bufferlist& bl = iter->second;
if (!bl.length())
continue;
op.setxattr(name.c_str(), bl);
if (name.compare(RGW_ATTR_ETAG) == 0) {
etag = rgw_bl_str(bl);
} else if (name.compare(RGW_ATTR_CONTENT_TYPE) == 0) {
content_type = rgw_bl_str(bl);
} else if (name.compare(RGW_ATTR_ACL) == 0) {
acl_bl = bl;
}
}
if (attrs.find(RGW_ATTR_PG_VER) == attrs.end()) {
cls_rgw_obj_store_pg_ver(op, RGW_ATTR_PG_VER);
}
if (attrs.find(RGW_ATTR_SOURCE_ZONE) == attrs.end()) {
bufferlist bl;
encode(store->svc.zone->get_zone_short_id(), bl);
op.setxattr(RGW_ATTR_SOURCE_ZONE, bl);
}
if (!storage_class.empty()) {
bufferlist bl;
bl.append(storage_class);
op.setxattr(RGW_ATTR_STORAGE_CLASS, bl);
}
if (!op.size())
return 0;
uint64_t epoch;
int64_t poolid;
bool orig_exists;
uint64_t orig_size;
if (!reset_obj) { //Multipart upload, it has immutable head.
orig_exists = false;
orig_size = 0;
} else {
orig_exists = state->exists;
orig_size = state->accounted_size;
}
bool versioned_target = (meta.olh_epoch && *meta.olh_epoch > 0) ||
!obj.key.instance.empty();
bool versioned_op = (target->versioning_enabled() || is_olh || versioned_target);
if (versioned_op) {
index_op->set_bilog_flags(RGW_BILOG_FLAG_VERSIONED_OP);
}
if (!index_op->is_prepared()) {
tracepoint(rgw_rados, prepare_enter, req_id.c_str());
r = index_op->prepare(dpp, CLS_RGW_OP_ADD, &state->write_tag, y);
tracepoint(rgw_rados, prepare_exit, req_id.c_str());
if (r < 0)
return r;
}
auto& ioctx = ref.pool.ioctx();
tracepoint(rgw_rados, operate_enter, req_id.c_str());
r = rgw_rados_operate(dpp, ref.pool.ioctx(), ref.obj.oid, &op, y);
tracepoint(rgw_rados, operate_exit, req_id.c_str());
if (r < 0) { /* we can expect to get -ECANCELED if object was replaced under,
or -ENOENT if was removed, or -EEXIST if it did not exist
before and now it does */
if (r == -EEXIST && assume_noent) {
target->invalidate_state();
return r;
}
goto done_cancel;
}
epoch = ioctx.get_last_version();
poolid = ioctx.get_id();
r = target->complete_atomic_modification(dpp, y);
if (r < 0) {
ldpp_dout(dpp, 0) << "ERROR: complete_atomic_modification returned r=" << r << dendl;
}
tracepoint(rgw_rados, complete_enter, req_id.c_str());
r = index_op->complete(dpp, poolid, epoch, size, accounted_size,
meta.set_mtime, etag, content_type,
storage_class, &acl_bl,
meta.category, meta.remove_objs, y,
meta.user_data, meta.appendable);
tracepoint(rgw_rados, complete_exit, req_id.c_str());
if (r < 0)
goto done_cancel;
if (meta.mtime) {
*meta.mtime = meta.set_mtime;
}
/* note that index_op was using state so we couldn't invalidate it earlier */
target->invalidate_state();
state = NULL;
if (versioned_op && meta.olh_epoch) {
r = store->set_olh(dpp, target->get_ctx(), target->get_bucket_info(), obj, false, NULL, *meta.olh_epoch, real_time(), false, y, meta.zones_trace);
if (r < 0) {
return r;
}
}
if (!real_clock::is_zero(meta.delete_at)) {
rgw_obj_index_key obj_key;
obj.key.get_index_key(&obj_key);
r = store->obj_expirer->hint_add(dpp, meta.delete_at, obj.bucket.tenant, obj.bucket.name,
obj.bucket.bucket_id, obj_key);
if (r < 0) {
ldpp_dout(dpp, 0) << "ERROR: objexp_hint_add() returned r=" << r << ", object will not get removed" << dendl;
/* ignoring error, nothing we can do at this point */
}
}
meta.canceled = false;
/* update quota cache */
if (meta.completeMultipart){
store->quota_handler->update_stats(meta.owner, obj.bucket, (orig_exists ? 0 : 1),
0, orig_size);
}
else {
store->quota_handler->update_stats(meta.owner, obj.bucket, (orig_exists ? 0 : 1),
accounted_size, orig_size);
}
return 0;
done_cancel:
int ret = index_op->cancel(dpp, meta.remove_objs, y);
if (ret < 0) {
ldpp_dout(dpp, 0) << "ERROR: index_op.cancel() returned ret=" << ret << dendl;
}
meta.canceled = true;
/* we lost in a race. There are a few options:
* - existing object was rewritten (ECANCELED)
* - non existing object was created (EEXIST)
* - object was removed (ENOENT)
* should treat it as a success
*/
if (meta.if_match == NULL && meta.if_nomatch == NULL) {
if (r == -ECANCELED || r == -ENOENT || r == -EEXIST) {
r = 0;
}
} else {
if (meta.if_match != NULL) {
// only overwrite existing object
if (strcmp(meta.if_match, "*") == 0) {
if (r == -ENOENT) {
r = -ERR_PRECONDITION_FAILED;
} else if (r == -ECANCELED) {
r = 0;
}
}
}
if (meta.if_nomatch != NULL) {
// only create a new object
if (strcmp(meta.if_nomatch, "*") == 0) {
if (r == -EEXIST) {
r = -ERR_PRECONDITION_FAILED;
} else if (r == -ENOENT) {
r = 0;
}
}
}
}
return r;
}
int RGWRados::Object::Write::write_meta(const DoutPrefixProvider *dpp, uint64_t size, uint64_t accounted_size,
map<string, bufferlist>& attrs, optional_yield y)
{
RGWBucketInfo& bucket_info = target->get_bucket_info();
RGWRados::Bucket bop(target->get_store(), bucket_info);
RGWRados::Bucket::UpdateIndex index_op(&bop, target->get_obj());
index_op.set_zones_trace(meta.zones_trace);
bool assume_noent = (meta.if_match == NULL && meta.if_nomatch == NULL);
int r;
if (assume_noent) {
r = _do_write_meta(dpp, size, accounted_size, attrs, assume_noent, meta.modify_tail, (void *)&index_op, y);
if (r == -EEXIST) {
assume_noent = false;
}
}
if (!assume_noent) {
r = _do_write_meta(dpp, size, accounted_size, attrs, assume_noent, meta.modify_tail, (void *)&index_op, y);
}
return r;
}
class RGWRadosPutObj : public RGWHTTPStreamRWRequest::ReceiveCB
{
const DoutPrefixProvider *dpp;
CephContext* cct;
rgw_obj obj;
rgw::sal::DataProcessor *filter;
boost::optional<RGWPutObj_Compress>& compressor;
bool try_etag_verify;
rgw::putobj::etag_verifier_ptr etag_verifier;
boost::optional<rgw::putobj::ChunkProcessor> buffering;
CompressorRef& plugin;
rgw::sal::ObjectProcessor *processor;
void (*progress_cb)(off_t, void *);
void *progress_data;
bufferlist extra_data_bl, manifest_bl;
std::optional<RGWCompressionInfo> compression_info;
uint64_t extra_data_left{0};
bool need_to_process_attrs{true};
uint64_t data_len{0};
map<string, bufferlist> src_attrs;
uint64_t ofs{0};
uint64_t lofs{0}; /* logical ofs */
std::function<int(map<string, bufferlist>&)> attrs_handler;
public:
RGWRadosPutObj(const DoutPrefixProvider *dpp,
CephContext* cct,
CompressorRef& plugin,
boost::optional<RGWPutObj_Compress>& compressor,
rgw::sal::ObjectProcessor *p,
void (*_progress_cb)(off_t, void *),
void *_progress_data,
std::function<int(map<string, bufferlist>&)> _attrs_handler) :
dpp(dpp),
cct(cct),
filter(p),
compressor(compressor),
try_etag_verify(cct->_conf->rgw_sync_obj_etag_verify),
plugin(plugin),
processor(p),
progress_cb(_progress_cb),
progress_data(_progress_data),
attrs_handler(_attrs_handler) {}
int process_attrs(void) {
bool encrypted = false;
if (extra_data_bl.length()) {
JSONParser jp;
if (!jp.parse(extra_data_bl.c_str(), extra_data_bl.length())) {
ldpp_dout(dpp, 0) << "failed to parse response extra data. len=" << extra_data_bl.length() << " data=" << extra_data_bl.c_str() << dendl;
return -EIO;
}
JSONDecoder::decode_json("attrs", src_attrs, &jp);
encrypted = src_attrs.count(RGW_ATTR_CRYPT_MODE);
if (encrypted) {
// we won't have access to the decrypted data for checksumming
try_etag_verify = false;
}
// if the object is both compressed and encrypted, it was transferred
// in its encrypted+compressed form. we need to preserve the original
// RGW_ATTR_COMPRESSION instead of falling back to default compression
// settings
auto iter = src_attrs.find(RGW_ATTR_COMPRESSION);
if (iter != src_attrs.end() && !encrypted) {
const bufferlist bl = std::move(iter->second);
src_attrs.erase(iter); // don't preserve source compression info
if (try_etag_verify) {
// if we're trying to verify etags, we need to convert compressed
// ranges in the manifest back into logical multipart part offsets
RGWCompressionInfo info;
bool compressed = false;
int r = rgw_compression_info_from_attr(bl, compressed, info);
if (r < 0) {
ldpp_dout(dpp, 4) << "failed to decode compression info, "
"disabling etag verification" << dendl;
try_etag_verify = false;
} else if (compressed) {
compression_info = std::move(info);
}
}
}
/* We need the manifest to recompute the ETag for verification */
iter = src_attrs.find(RGW_ATTR_MANIFEST);
if (iter != src_attrs.end()) {
manifest_bl = std::move(iter->second);
src_attrs.erase(iter);
}
// filter out olh attributes
iter = src_attrs.lower_bound(RGW_ATTR_OLH_PREFIX);
while (iter != src_attrs.end()) {
if (!boost::algorithm::starts_with(iter->first, RGW_ATTR_OLH_PREFIX)) {
break;
}
iter = src_attrs.erase(iter);
}
}
int ret = attrs_handler(src_attrs);
if (ret < 0) {
return ret;
}
// do not compress if object is encrypted
if (plugin && !encrypted) {
compressor = boost::in_place(cct, plugin, filter);
// add a filter that buffers data so we don't try to compress tiny blocks.
// libcurl reads in 16k at a time, and we need at least 64k to get a good
// compression ratio
constexpr unsigned buffer_size = 512 * 1024;
buffering = boost::in_place(&*compressor, buffer_size);
filter = &*buffering;
}
if (try_etag_verify) {
ret = rgw::putobj::create_etag_verifier(dpp, cct, filter, manifest_bl,
compression_info,
etag_verifier);
if (ret < 0) {
ldpp_dout(dpp, 4) << "failed to initial etag verifier, "
"disabling etag verification" << dendl;
} else {
filter = etag_verifier.get();
}
}
need_to_process_attrs = false;
return 0;
}
int handle_data(bufferlist& bl, bool *pause) override {
if (progress_cb) {
progress_cb(data_len, progress_data);
}
if (extra_data_left) {
uint64_t extra_len = bl.length();
if (extra_len > extra_data_left)
extra_len = extra_data_left;
bufferlist extra;
bl.splice(0, extra_len, &extra);
extra_data_bl.append(extra);
extra_data_left -= extra_len;
if (extra_data_left == 0) {
int res = process_attrs();
if (res < 0)
return res;
}
ofs += extra_len;
if (bl.length() == 0) {
return 0;
}
}
if (need_to_process_attrs) {
/* need to call process_attrs() even if we don't get any attrs,
* need it to call attrs_handler().
*/
int res = process_attrs();
if (res < 0) {
return res;
}
}
ceph_assert(uint64_t(ofs) >= extra_data_len);
uint64_t size = bl.length();
ofs += size;
const uint64_t lofs = data_len;
data_len += size;
return filter->process(std::move(bl), lofs);
}
int flush() {
return filter->process({}, data_len);
}
bufferlist& get_extra_data() { return extra_data_bl; }
map<string, bufferlist>& get_attrs() { return src_attrs; }
void set_extra_data_len(uint64_t len) override {
extra_data_left = len;
RGWHTTPStreamRWRequest::ReceiveCB::set_extra_data_len(len);
}
uint64_t get_data_len() {
return data_len;
}
std::string get_verifier_etag() {
if (etag_verifier) {
etag_verifier->calculate_etag();
return etag_verifier->get_calculated_etag();
} else {
return "";
}
}
};
/*
* prepare attrset depending on attrs_mod.
*/
static void set_copy_attrs(map<string, bufferlist>& src_attrs,
map<string, bufferlist>& attrs,
RGWRados::AttrsMod attrs_mod)
{
switch (attrs_mod) {
case RGWRados::ATTRSMOD_NONE:
attrs = src_attrs;
break;
case RGWRados::ATTRSMOD_REPLACE:
if (!attrs[RGW_ATTR_ETAG].length()) {
attrs[RGW_ATTR_ETAG] = src_attrs[RGW_ATTR_ETAG];
}
if (!attrs[RGW_ATTR_TAIL_TAG].length()) {
auto ttiter = src_attrs.find(RGW_ATTR_TAIL_TAG);
if (ttiter != src_attrs.end()) {
attrs[RGW_ATTR_TAIL_TAG] = src_attrs[RGW_ATTR_TAIL_TAG];
}
}
break;
case RGWRados::ATTRSMOD_MERGE:
for (map<string, bufferlist>::iterator it = src_attrs.begin(); it != src_attrs.end(); ++it) {
if (attrs.find(it->first) == attrs.end()) {
attrs[it->first] = it->second;
}
}
break;
}
}
int RGWRados::rewrite_obj(RGWBucketInfo& dest_bucket_info, const rgw_obj& obj, const DoutPrefixProvider *dpp, optional_yield y)
{
RGWObjectCtx rctx(this->driver);
rgw::sal::Attrs attrset;
uint64_t obj_size;
ceph::real_time mtime;
RGWRados::Object op_target(this, dest_bucket_info, rctx, obj);
RGWRados::Object::Read read_op(&op_target);
read_op.params.attrs = &attrset;
read_op.params.obj_size = &obj_size;
read_op.params.lastmod = &mtime;
int ret = read_op.prepare(y, dpp);
if (ret < 0)
return ret;
attrset.erase(RGW_ATTR_ID_TAG);
attrset.erase(RGW_ATTR_TAIL_TAG);
attrset.erase(RGW_ATTR_STORAGE_CLASS);
return copy_obj_data(rctx, dest_bucket_info, dest_bucket_info.placement_rule,
read_op, obj_size - 1, obj, NULL, mtime,
attrset, 0, real_time(), NULL, dpp, y);
}
int RGWRados::reindex_obj(const RGWBucketInfo& bucket_info,
const rgw_obj& obj,
const DoutPrefixProvider* dpp,
optional_yield y)
{
if (bucket_info.versioned()) {
ldpp_dout(dpp, 10) << "WARNING: " << __func__ <<
": cannot process versioned bucket \"" <<
bucket_info.bucket.get_key() << "\"" <<
dendl;
return -ENOTSUP;
}
Bucket target(this, bucket_info);
RGWRados::Bucket::UpdateIndex update_idx(&target, obj);
const std::string* no_write_tag = nullptr;
int ret = update_idx.prepare(dpp, RGWModifyOp::CLS_RGW_OP_ADD, no_write_tag, y);
if (ret < 0) {
ldpp_dout(dpp, 0) << "ERROR: " << __func__ <<
": update index prepare for \"" << obj << "\" returned: " <<
cpp_strerror(-ret) << dendl;
return ret;
}
return 0;
}
struct obj_time_weight {
real_time mtime;
uint32_t zone_short_id;
uint64_t pg_ver;
bool high_precision;
obj_time_weight() : zone_short_id(0), pg_ver(0), high_precision(false) {}
bool compare_low_precision(const obj_time_weight& rhs) {
struct timespec l = ceph::real_clock::to_timespec(mtime);
struct timespec r = ceph::real_clock::to_timespec(rhs.mtime);
l.tv_nsec = 0;
r.tv_nsec = 0;
if (l > r) {
return false;
}
if (l < r) {
return true;
}
if (!zone_short_id || !rhs.zone_short_id) {
/* don't compare zone ids, if one wasn't provided */
return false;
}
if (zone_short_id != rhs.zone_short_id) {
return (zone_short_id < rhs.zone_short_id);
}
return (pg_ver < rhs.pg_ver);
}
bool operator<(const obj_time_weight& rhs) {
if (!high_precision || !rhs.high_precision) {
return compare_low_precision(rhs);
}
if (mtime > rhs.mtime) {
return false;
}
if (mtime < rhs.mtime) {
return true;
}
if (!zone_short_id || !rhs.zone_short_id) {
/* don't compare zone ids, if one wasn't provided */
return false;
}
if (zone_short_id != rhs.zone_short_id) {
return (zone_short_id < rhs.zone_short_id);
}
return (pg_ver < rhs.pg_ver);
}
void init(const real_time& _mtime, uint32_t _short_id, uint64_t _pg_ver) {
mtime = _mtime;
zone_short_id = _short_id;
pg_ver = _pg_ver;
}
void init(RGWObjState *state) {
mtime = state->mtime;
zone_short_id = state->zone_short_id;
pg_ver = state->pg_ver;
}
};
inline ostream& operator<<(ostream& out, const obj_time_weight &o) {
out << o.mtime;
if (o.zone_short_id != 0 || o.pg_ver != 0) {
out << "[zid=" << o.zone_short_id << ", pgv=" << o.pg_ver << "]";
}
return out;
}
class RGWGetExtraDataCB : public RGWHTTPStreamRWRequest::ReceiveCB {
bufferlist extra_data;
public:
RGWGetExtraDataCB() {}
int handle_data(bufferlist& bl, bool *pause) override {
int bl_len = (int)bl.length();
if (extra_data.length() < extra_data_len) {
off_t max = extra_data_len - extra_data.length();
if (max > bl_len) {
max = bl_len;
}
bl.splice(0, max, &extra_data);
}
return bl_len;
}
bufferlist& get_extra_data() {
return extra_data;
}
};
int RGWRados::stat_remote_obj(const DoutPrefixProvider *dpp,
RGWObjectCtx& obj_ctx,
const rgw_user& user_id,
req_info *info,
const rgw_zone_id& source_zone,
const rgw_obj& src_obj,
const RGWBucketInfo *src_bucket_info,
real_time *src_mtime,
uint64_t *psize,
const real_time *mod_ptr,
const real_time *unmod_ptr,
bool high_precision_time,
const char *if_match,
const char *if_nomatch,
map<string, bufferlist> *pattrs,
map<string, string> *pheaders,
string *version_id,
string *ptag,
string *petag, optional_yield y)
{
/* source is in a different zonegroup, copy from there */
RGWRESTStreamRWRequest *in_stream_req;
string tag;
map<string, bufferlist> src_attrs;
append_rand_alpha(cct, tag, tag, 32);
obj_time_weight set_mtime_weight;
set_mtime_weight.high_precision = high_precision_time;
RGWRESTConn *conn;
if (source_zone.empty()) {
if (!src_bucket_info || src_bucket_info->zonegroup.empty()) {
/* source is in the master zonegroup */
conn = svc.zone->get_master_conn();
} else {
auto& zonegroup_conn_map = svc.zone->get_zonegroup_conn_map();
map<string, RGWRESTConn *>::iterator iter = zonegroup_conn_map.find(src_bucket_info->zonegroup);
if (iter == zonegroup_conn_map.end()) {
ldpp_dout(dpp, 0) << "could not find zonegroup connection to zonegroup: " << source_zone << dendl;
return -ENOENT;
}
conn = iter->second;
}
} else {
auto& zone_conn_map = svc.zone->get_zone_conn_map();
auto iter = zone_conn_map.find(source_zone);
if (iter == zone_conn_map.end()) {
ldpp_dout(dpp, 0) << "could not find zone connection to zone: " << source_zone << dendl;
return -ENOENT;
}
conn = iter->second;
}
RGWGetExtraDataCB cb;
map<string, string> req_headers;
real_time set_mtime;
const real_time *pmod = mod_ptr;
obj_time_weight dest_mtime_weight;
constexpr bool prepend_meta = true;
constexpr bool get_op = true;
constexpr bool rgwx_stat = true;
constexpr bool sync_manifest = true;
constexpr bool skip_decrypt = true;
constexpr bool sync_cloudtiered = true;
int ret = conn->get_obj(dpp, user_id, info, src_obj, pmod, unmod_ptr,
dest_mtime_weight.zone_short_id, dest_mtime_weight.pg_ver,
prepend_meta, get_op, rgwx_stat,
sync_manifest, skip_decrypt, nullptr, sync_cloudtiered,
true, &cb, &in_stream_req);
if (ret < 0) {
return ret;
}
ret = conn->complete_request(in_stream_req, nullptr, &set_mtime, psize,
nullptr, pheaders, y);
if (ret < 0) {
return ret;
}
bufferlist& extra_data_bl = cb.get_extra_data();
if (extra_data_bl.length()) {
JSONParser jp;
if (!jp.parse(extra_data_bl.c_str(), extra_data_bl.length())) {
ldpp_dout(dpp, 0) << "failed to parse response extra data. len=" << extra_data_bl.length() << " data=" << extra_data_bl.c_str() << dendl;
return -EIO;
}
JSONDecoder::decode_json("attrs", src_attrs, &jp);
src_attrs.erase(RGW_ATTR_MANIFEST); // not interested in original object layout
}
if (src_mtime) {
*src_mtime = set_mtime;
}
if (petag) {
map<string, bufferlist>::iterator iter = src_attrs.find(RGW_ATTR_ETAG);
if (iter != src_attrs.end()) {
bufferlist& etagbl = iter->second;
*petag = etagbl.to_str();
while (petag->size() > 0 && (*petag)[petag->size() - 1] == '\0') {
*petag = petag->substr(0, petag->size() - 1);
}
}
}
if (pattrs) {
*pattrs = std::move(src_attrs);
}
return 0;
}
int RGWFetchObjFilter_Default::filter(CephContext *cct,
const rgw_obj_key& source_key,
const RGWBucketInfo& dest_bucket_info,
std::optional<rgw_placement_rule> dest_placement_rule,
const map<string, bufferlist>& obj_attrs,
std::optional<rgw_user> *poverride_owner,
const rgw_placement_rule **prule)
{
const rgw_placement_rule *ptail_rule = (dest_placement_rule ? &(*dest_placement_rule) : nullptr);
if (!ptail_rule) {
auto iter = obj_attrs.find(RGW_ATTR_STORAGE_CLASS);
if (iter != obj_attrs.end()) {
dest_rule.storage_class = iter->second.to_str();
dest_rule.inherit_from(dest_bucket_info.placement_rule);
ptail_rule = &dest_rule;
} else {
ptail_rule = &dest_bucket_info.placement_rule;
}
}
*prule = ptail_rule;
return 0;
}
int RGWRados::fetch_remote_obj(RGWObjectCtx& obj_ctx,
const rgw_user& user_id,
req_info *info,
const rgw_zone_id& source_zone,
const rgw_obj& dest_obj,
const rgw_obj& src_obj,
RGWBucketInfo& dest_bucket_info,
RGWBucketInfo *src_bucket_info,
std::optional<rgw_placement_rule> dest_placement_rule,
real_time *src_mtime,
real_time *mtime,
const real_time *mod_ptr,
const real_time *unmod_ptr,
bool high_precision_time,
const char *if_match,
const char *if_nomatch,
AttrsMod attrs_mod,
bool copy_if_newer,
rgw::sal::Attrs& attrs,
RGWObjCategory category,
std::optional<uint64_t> olh_epoch,
real_time delete_at,
string *ptag,
string *petag,
void (*progress_cb)(off_t, void *),
void *progress_data,
const DoutPrefixProvider *dpp,
RGWFetchObjFilter *filter, optional_yield y,
bool stat_follow_olh,
const rgw_obj& stat_dest_obj,
const rgw_zone_set_entry& source_trace_entry,
rgw_zone_set *zones_trace,
std::optional<uint64_t>* bytes_transferred)
{
/* source is in a different zonegroup, copy from there */
RGWRESTStreamRWRequest *in_stream_req;
string tag;
int i;
append_rand_alpha(cct, tag, tag, 32);
obj_time_weight set_mtime_weight;
set_mtime_weight.high_precision = high_precision_time;
int ret;
rgw::BlockingAioThrottle aio(cct->_conf->rgw_put_obj_min_window_size);
using namespace rgw::putobj;
AtomicObjectProcessor processor(&aio, this, dest_bucket_info, nullptr,
user_id, obj_ctx, dest_obj, olh_epoch,
tag, dpp, y);
RGWRESTConn *conn;
auto& zone_conn_map = svc.zone->get_zone_conn_map();
auto& zonegroup_conn_map = svc.zone->get_zonegroup_conn_map();
if (source_zone.empty()) {
if (!src_bucket_info || src_bucket_info->zonegroup.empty()) {
/* source is in the master zonegroup */
conn = svc.zone->get_master_conn();
} else {
map<string, RGWRESTConn *>::iterator iter = zonegroup_conn_map.find(src_bucket_info->zonegroup);
if (iter == zonegroup_conn_map.end()) {
ldpp_dout(dpp, 0) << "could not find zonegroup connection to zonegroup: " << source_zone << dendl;
return -ENOENT;
}
conn = iter->second;
}
} else {
auto iter = zone_conn_map.find(source_zone);
if (iter == zone_conn_map.end()) {
ldpp_dout(dpp, 0) << "could not find zone connection to zone: " << source_zone << dendl;
return -ENOENT;
}
conn = iter->second;
}
boost::optional<RGWPutObj_Compress> compressor;
CompressorRef plugin;
RGWFetchObjFilter_Default source_filter;
if (!filter) {
filter = &source_filter;
}
std::optional<rgw_user> override_owner;
RGWRadosPutObj cb(dpp, cct, plugin, compressor, &processor, progress_cb, progress_data,
[&](map<string, bufferlist>& obj_attrs) {
const rgw_placement_rule *ptail_rule;
int ret = filter->filter(cct,
src_obj.key,
dest_bucket_info,
dest_placement_rule,
obj_attrs,
&override_owner,
&ptail_rule);
if (ret < 0) {
ldpp_dout(dpp, 5) << "Aborting fetch: source object filter returned ret=" << ret << dendl;
return ret;
}
processor.set_tail_placement(*ptail_rule);
const auto& compression_type = svc.zone->get_zone_params().get_compression_type(*ptail_rule);
if (compression_type != "none") {
plugin = Compressor::create(cct, compression_type);
if (!plugin) {
ldpp_dout(dpp, 1) << "Cannot load plugin for compression type "
<< compression_type << dendl;
}
}
ret = processor.prepare(y);
if (ret < 0) {
return ret;
}
return 0;
});
string etag;
real_time set_mtime;
uint64_t expected_size = 0;
RGWObjState *dest_state = NULL;
RGWObjManifest *manifest = nullptr;
const real_time *pmod = mod_ptr;
obj_time_weight dest_mtime_weight;
rgw_zone_set_entry dst_zone_trace(svc.zone->get_zone().id, dest_bucket_info.bucket.get_key());
if (copy_if_newer) {
/* need to get mtime for destination */
ret = get_obj_state(dpp, &obj_ctx, dest_bucket_info, stat_dest_obj, &dest_state, &manifest, stat_follow_olh, y);
if (ret < 0)
goto set_err_state;
if (!real_clock::is_zero(dest_state->mtime)) {
dest_mtime_weight.init(dest_state);
pmod = &dest_mtime_weight.mtime;
}
}
static constexpr bool prepend_meta = true;
static constexpr bool get_op = true;
static constexpr bool rgwx_stat = false;
static constexpr bool sync_manifest = true;
static constexpr bool skip_decrypt = true;
static constexpr bool sync_cloudtiered = true;
ret = conn->get_obj(dpp, user_id, info, src_obj, pmod, unmod_ptr,
dest_mtime_weight.zone_short_id, dest_mtime_weight.pg_ver,
prepend_meta, get_op, rgwx_stat,
sync_manifest, skip_decrypt, &dst_zone_trace,
sync_cloudtiered, true,
&cb, &in_stream_req);
if (ret < 0) {
goto set_err_state;
}
ret = conn->complete_request(in_stream_req, &etag, &set_mtime,
&expected_size, nullptr, nullptr, y);
if (ret < 0) {
goto set_err_state;
}
ret = cb.flush();
if (ret < 0) {
goto set_err_state;
}
if (cb.get_data_len() != expected_size) {
ret = -EIO;
ldpp_dout(dpp, 0) << "ERROR: object truncated during fetching, expected "
<< expected_size << " bytes but received " << cb.get_data_len() << dendl;
goto set_err_state;
}
if (compressor && compressor->is_compressed()) {
bufferlist tmp;
RGWCompressionInfo cs_info;
cs_info.compression_type = plugin->get_type_name();
cs_info.orig_size = cb.get_data_len();
cs_info.compressor_message = compressor->get_compressor_message();
cs_info.blocks = move(compressor->get_compression_blocks());
encode(cs_info, tmp);
cb.get_attrs()[RGW_ATTR_COMPRESSION] = tmp;
}
if (override_owner) {
processor.set_owner(*override_owner);
auto& obj_attrs = cb.get_attrs();
RGWUserInfo owner_info;
if (ctl.user->get_info_by_uid(dpp, *override_owner, &owner_info, y) < 0) {
ldpp_dout(dpp, 10) << "owner info does not exist" << dendl;
return -EINVAL;
}
RGWAccessControlPolicy acl;
auto aiter = obj_attrs.find(RGW_ATTR_ACL);
if (aiter == obj_attrs.end()) {
ldpp_dout(dpp, 0) << "WARNING: " << __func__ << "(): object doesn't have ACL attribute, setting default ACLs" << dendl;
acl.create_default(owner_info.user_id, owner_info.display_name);
} else {
auto iter = aiter->second.cbegin();
try {
acl.decode(iter);
} catch (buffer::error& err) {
ldpp_dout(dpp, 0) << "ERROR: " << __func__ << "(): could not decode policy, caught buffer::error" << dendl;
return -EIO;
}
}
ACLOwner new_owner;
new_owner.set_id(*override_owner);
new_owner.set_name(owner_info.display_name);
acl.set_owner(new_owner);
bufferlist bl;
acl.encode(bl);
obj_attrs[RGW_ATTR_ACL] = std::move(bl);
}
if (source_zone.empty()) { /* need to preserve expiration if copy in the same zonegroup */
cb.get_attrs().erase(RGW_ATTR_DELETE_AT);
} else {
map<string, bufferlist>::iterator iter = cb.get_attrs().find(RGW_ATTR_DELETE_AT);
if (iter != cb.get_attrs().end()) {
try {
decode(delete_at, iter->second);
} catch (buffer::error& err) {
ldpp_dout(dpp, 0) << "ERROR: failed to decode delete_at field in intra zone copy" << dendl;
}
}
}
if (src_mtime) {
*src_mtime = set_mtime;
}
if (petag) {
const auto iter = cb.get_attrs().find(RGW_ATTR_ETAG);
if (iter != cb.get_attrs().end()) {
*petag = iter->second.to_str();
}
}
//erase the append attr
cb.get_attrs().erase(RGW_ATTR_APPEND_PART_NUM);
{ // add x-amz-replication-status=REPLICA
auto& bl = cb.get_attrs()[RGW_ATTR_OBJ_REPLICATION_STATUS];
bl.clear(); // overwrite source's status
bl.append("REPLICA");
}
{ // update replication trace
std::vector<rgw_zone_set_entry> trace;
if (auto i = cb.get_attrs().find(RGW_ATTR_OBJ_REPLICATION_TRACE);
i != cb.get_attrs().end()) {
try {
decode(trace, i->second);
} catch (const buffer::error&) {}
}
// add the source entry to the end
trace.push_back(source_trace_entry);
bufferlist bl;
encode(trace, bl);
cb.get_attrs()[RGW_ATTR_OBJ_REPLICATION_TRACE] = std::move(bl);
}
if (source_zone.empty()) {
set_copy_attrs(cb.get_attrs(), attrs, attrs_mod);
} else {
attrs = cb.get_attrs();
}
if (copy_if_newer) {
uint64_t pg_ver = 0;
auto i = attrs.find(RGW_ATTR_PG_VER);
if (i != attrs.end() && i->second.length() > 0) {
auto iter = i->second.cbegin();
try {
decode(pg_ver, iter);
} catch (buffer::error& err) {
ldpp_dout(dpp, 0) << "ERROR: failed to decode pg ver attribute, ignoring" << dendl;
/* non critical error */
}
}
set_mtime_weight.init(set_mtime, svc.zone->get_zone_short_id(), pg_ver);
}
/* Perform ETag verification is we have computed the object's MD5 sum at our end */
if (const auto& verifier_etag = cb.get_verifier_etag();
!verifier_etag.empty()) {
string trimmed_etag = etag;
/* Remove the leading and trailing double quotes from etag */
trimmed_etag.erase(std::remove(trimmed_etag.begin(), trimmed_etag.end(),'\"'),
trimmed_etag.end());
if (verifier_etag != trimmed_etag) {
ret = -EIO;
ldpp_dout(dpp, 0) << "ERROR: source and destination objects don't match. Expected etag:"
<< trimmed_etag << " Computed etag:" << verifier_etag << dendl;
goto set_err_state;
}
}
#define MAX_COMPLETE_RETRY 100
for (i = 0; i < MAX_COMPLETE_RETRY; i++) {
bool canceled = false;
ret = processor.complete(cb.get_data_len(), etag, mtime, set_mtime,
attrs, delete_at, nullptr, nullptr, nullptr,
zones_trace, &canceled, y);
if (ret < 0) {
goto set_err_state;
}
if (copy_if_newer && canceled) {
ldpp_dout(dpp, 20) << "raced with another write of obj: " << dest_obj << dendl;
obj_ctx.invalidate(dest_obj); /* object was overwritten */
ret = get_obj_state(dpp, &obj_ctx, dest_bucket_info, stat_dest_obj, &dest_state, &manifest, stat_follow_olh, y);
if (ret < 0) {
ldpp_dout(dpp, 0) << "ERROR: " << __func__ << ": get_err_state() returned ret=" << ret << dendl;
goto set_err_state;
}
dest_mtime_weight.init(dest_state);
dest_mtime_weight.high_precision = high_precision_time;
if (!dest_state->exists ||
dest_mtime_weight < set_mtime_weight) {
ldpp_dout(dpp, 20) << "retrying writing object mtime=" << set_mtime << " dest_state->mtime=" << dest_state->mtime << " dest_state->exists=" << dest_state->exists << dendl;
continue;
} else {
ldpp_dout(dpp, 20) << "not retrying writing object mtime=" << set_mtime << " dest_state->mtime=" << dest_state->mtime << " dest_state->exists=" << dest_state->exists << dendl;
}
}
break;
}
if (i == MAX_COMPLETE_RETRY) {
ldpp_dout(dpp, 0) << "ERROR: retried object completion too many times, something is wrong!" << dendl;
ret = -EIO;
goto set_err_state;
}
if (bytes_transferred) {
*bytes_transferred = cb.get_data_len();
}
return 0;
set_err_state:
if (copy_if_newer && ret == -ERR_NOT_MODIFIED) {
// we may have already fetched during sync of OP_ADD, but were waiting
// for OP_LINK_OLH to call set_olh() with a real olh_epoch
if (olh_epoch && *olh_epoch > 0) {
constexpr bool log_data_change = true;
ret = set_olh(dpp, obj_ctx, dest_bucket_info, dest_obj, false, nullptr,
*olh_epoch, real_time(), false, y, zones_trace, log_data_change);
} else {
// we already have the latest copy
ret = 0;
}
}
return ret;
}
int RGWRados::copy_obj_to_remote_dest(const DoutPrefixProvider *dpp,
RGWObjState *astate,
map<string, bufferlist>& src_attrs,
RGWRados::Object::Read& read_op,
const rgw_user& user_id,
const rgw_obj& dest_obj,
real_time *mtime, optional_yield y)
{
string etag;
RGWRESTStreamS3PutObj *out_stream_req;
auto rest_master_conn = svc.zone->get_master_conn();
int ret = rest_master_conn->put_obj_async_init(dpp, user_id, dest_obj, src_attrs, &out_stream_req);
if (ret < 0) {
return ret;
}
out_stream_req->set_send_length(astate->size);
ret = RGWHTTP::send(out_stream_req);
if (ret < 0) {
delete out_stream_req;
return ret;
}
ret = read_op.iterate(dpp, 0, astate->size - 1, out_stream_req->get_out_cb(), y);
if (ret < 0) {
delete out_stream_req;
return ret;
}
ret = rest_master_conn->complete_request(out_stream_req, etag, mtime, y);
if (ret < 0)
return ret;
return 0;
}
/**
* Copy an object.
* dest_obj: the object to copy into
* src_obj: the object to copy from
* attrs: usage depends on attrs_mod parameter
* attrs_mod: the modification mode of the attrs, may have the following values:
* ATTRSMOD_NONE - the attributes of the source object will be
* copied without modifications, attrs parameter is ignored;
* ATTRSMOD_REPLACE - new object will have the attributes provided by attrs
* parameter, source object attributes are not copied;
* ATTRSMOD_MERGE - any conflicting meta keys on the source object's attributes
* are overwritten by values contained in attrs parameter.
* err: stores any errors resulting from the get of the original object
* Returns: 0 on success, -ERR# otherwise.
*/
int RGWRados::copy_obj(RGWObjectCtx& obj_ctx,
const rgw_user& user_id,
req_info *info,
const rgw_zone_id& source_zone,
const rgw_obj& dest_obj,
const rgw_obj& src_obj,
RGWBucketInfo& dest_bucket_info,
RGWBucketInfo& src_bucket_info,
const rgw_placement_rule& dest_placement,
real_time *src_mtime,
real_time *mtime,
const real_time *mod_ptr,
const real_time *unmod_ptr,
bool high_precision_time,
const char *if_match,
const char *if_nomatch,
AttrsMod attrs_mod,
bool copy_if_newer,
rgw::sal::Attrs& attrs,
RGWObjCategory category,
uint64_t olh_epoch,
real_time delete_at,
string *version_id,
string *ptag,
string *petag,
void (*progress_cb)(off_t, void *),
void *progress_data,
const DoutPrefixProvider *dpp,
optional_yield y)
{
int ret;
uint64_t obj_size;
rgw_obj shadow_obj = dest_obj;
string shadow_oid;
bool remote_src;
bool remote_dest;
bool stat_follow_olh = false;
rgw_obj stat_dest_obj = dest_obj;
append_rand_alpha(cct, dest_obj.get_oid(), shadow_oid, 32);
shadow_obj.init_ns(dest_obj.bucket, shadow_oid, shadow_ns);
auto& zonegroup = svc.zone->get_zonegroup();
remote_dest = !zonegroup.equals(dest_bucket_info.zonegroup);
remote_src = !zonegroup.equals(src_bucket_info.zonegroup);
if (remote_src && remote_dest) {
ldpp_dout(dpp, 0) << "ERROR: can't copy object when both src and dest buckets are remote" << dendl;
return -EINVAL;
}
ldpp_dout(dpp, 5) << "Copy object " << src_obj.bucket << ":" << src_obj.get_oid() << " => " << dest_obj.bucket << ":" << dest_obj.get_oid() << dendl;
if (remote_src || !source_zone.empty()) {
rgw_zone_set_entry source_trace_entry{source_zone.id, std::nullopt};
return fetch_remote_obj(obj_ctx, user_id, info, source_zone,
dest_obj, src_obj, dest_bucket_info, &src_bucket_info,
dest_placement, src_mtime, mtime, mod_ptr,
unmod_ptr, high_precision_time,
if_match, if_nomatch, attrs_mod, copy_if_newer, attrs, category,
olh_epoch, delete_at, ptag, petag, progress_cb, progress_data, dpp,
nullptr /* filter */, y, stat_follow_olh, stat_dest_obj, source_trace_entry);
}
map<string, bufferlist> src_attrs;
RGWRados::Object src_op_target(this, src_bucket_info, obj_ctx, src_obj);
RGWRados::Object::Read read_op(&src_op_target);
read_op.conds.mod_ptr = mod_ptr;
read_op.conds.unmod_ptr = unmod_ptr;
read_op.conds.high_precision_time = high_precision_time;
read_op.conds.if_match = if_match;
read_op.conds.if_nomatch = if_nomatch;
read_op.params.attrs = &src_attrs;
read_op.params.lastmod = src_mtime;
read_op.params.obj_size = &obj_size;
ret = read_op.prepare(y, dpp);
if (ret < 0) {
return ret;
}
if (src_attrs.count(RGW_ATTR_CRYPT_MODE)) {
// Current implementation does not follow S3 spec and even
// may result in data corruption silently when copying
// multipart objects acorss pools. So reject COPY operations
//on encrypted objects before it is fully functional.
ldpp_dout(dpp, 0) << "ERROR: copy op for encrypted object " << src_obj
<< " has not been implemented." << dendl;
return -ERR_NOT_IMPLEMENTED;
}
src_attrs[RGW_ATTR_ACL] = attrs[RGW_ATTR_ACL];
src_attrs.erase(RGW_ATTR_DELETE_AT);
src_attrs.erase(RGW_ATTR_OBJECT_RETENTION);
src_attrs.erase(RGW_ATTR_OBJECT_LEGAL_HOLD);
map<string, bufferlist>::iterator rt = attrs.find(RGW_ATTR_OBJECT_RETENTION);
if (rt != attrs.end())
src_attrs[RGW_ATTR_OBJECT_RETENTION] = rt->second;
map<string, bufferlist>::iterator lh = attrs.find(RGW_ATTR_OBJECT_LEGAL_HOLD);
if (lh != attrs.end())
src_attrs[RGW_ATTR_OBJECT_LEGAL_HOLD] = lh->second;
set_copy_attrs(src_attrs, attrs, attrs_mod);
attrs.erase(RGW_ATTR_ID_TAG);
attrs.erase(RGW_ATTR_PG_VER);
attrs.erase(RGW_ATTR_SOURCE_ZONE);
map<string, bufferlist>::iterator cmp = src_attrs.find(RGW_ATTR_COMPRESSION);
if (cmp != src_attrs.end())
attrs[RGW_ATTR_COMPRESSION] = cmp->second;
RGWObjManifest manifest;
RGWObjState *astate = NULL;
RGWObjManifest *amanifest = nullptr;
ret = get_obj_state(dpp, &obj_ctx, src_bucket_info, src_obj, &astate, &amanifest, y);
if (ret < 0) {
return ret;
}
vector<rgw_raw_obj> ref_objs;
if (remote_dest) {
/* dest is in a different zonegroup, copy it there */
return copy_obj_to_remote_dest(dpp, astate, attrs, read_op, user_id, dest_obj, mtime, y);
}
uint64_t max_chunk_size;
ret = get_max_chunk_size(dest_bucket_info.placement_rule, dest_obj, &max_chunk_size, dpp);
if (ret < 0) {
ldpp_dout(dpp, 0) << "ERROR: failed to get max_chunk_size() for bucket " << dest_obj.bucket << dendl;
return ret;
}
rgw_pool src_pool;
rgw_pool dest_pool;
const rgw_placement_rule *src_rule{nullptr};
if (amanifest) {
src_rule = &amanifest->get_tail_placement().placement_rule;
ldpp_dout(dpp, 20) << __func__ << "(): manifest src_rule=" << src_rule->to_str() << dendl;
}
if (!src_rule || src_rule->empty()) {
src_rule = &src_bucket_info.placement_rule;
}
if (!get_obj_data_pool(*src_rule, src_obj, &src_pool)) {
ldpp_dout(dpp, 0) << "ERROR: failed to locate data pool for " << src_obj << dendl;
return -EIO;
}
if (!get_obj_data_pool(dest_placement, dest_obj, &dest_pool)) {
ldpp_dout(dpp, 0) << "ERROR: failed to locate data pool for " << dest_obj << dendl;
return -EIO;
}
ldpp_dout(dpp, 20) << __func__ << "(): src_rule=" << src_rule->to_str() << " src_pool=" << src_pool
<< " dest_rule=" << dest_placement.to_str() << " dest_pool=" << dest_pool << dendl;
bool copy_data = (!amanifest) ||
(*src_rule != dest_placement) ||
(src_pool != dest_pool);
bool copy_first = false;
if (amanifest) {
if (!amanifest->has_tail()) {
copy_data = true;
} else {
uint64_t head_size = amanifest->get_head_size();
if (head_size > 0) {
if (head_size > max_chunk_size) {
copy_data = true;
} else {
copy_first = true;
}
}
}
}
if (petag) {
const auto iter = attrs.find(RGW_ATTR_ETAG);
if (iter != attrs.end()) {
*petag = iter->second.to_str();
}
}
if (copy_data) { /* refcounting tail wouldn't work here, just copy the data */
attrs.erase(RGW_ATTR_TAIL_TAG);
return copy_obj_data(obj_ctx, dest_bucket_info, dest_placement, read_op, obj_size - 1, dest_obj,
mtime, real_time(), attrs, olh_epoch, delete_at, petag, dpp, y);
}
/* This has been in for 2 years, so we can safely assume amanifest is not NULL */
RGWObjManifest::obj_iterator miter = amanifest->obj_begin(dpp);
if (copy_first) { // we need to copy first chunk, not increase refcount
++miter;
}
bufferlist first_chunk;
const bool copy_itself = (dest_obj == src_obj);
RGWObjManifest *pmanifest;
ldpp_dout(dpp, 20) << "dest_obj=" << dest_obj << " src_obj=" << src_obj << " copy_itself=" << (int)copy_itself << dendl;
RGWRados::Object dest_op_target(this, dest_bucket_info, obj_ctx, dest_obj);
RGWRados::Object::Write write_op(&dest_op_target);
string tag;
if (ptag) {
tag = *ptag;
}
if (tag.empty()) {
append_rand_alpha(cct, tag, tag, 32);
}
std::unique_ptr<rgw::Aio> aio;
rgw::AioResultList all_results;
if (!copy_itself) {
aio = rgw::make_throttle(cct->_conf->rgw_max_copy_obj_concurrent_io, y);
attrs.erase(RGW_ATTR_TAIL_TAG);
manifest = *amanifest;
const rgw_bucket_placement& tail_placement = manifest.get_tail_placement();
if (tail_placement.bucket.name.empty()) {
manifest.set_tail_placement(tail_placement.placement_rule, src_obj.bucket);
}
string ref_tag;
for (; miter != amanifest->obj_end(dpp); ++miter) {
ObjectWriteOperation op;
ref_tag = tag + '\0';
cls_refcount_get(op, ref_tag, true);
auto obj = svc.rados->obj(miter.get_location().get_raw_obj(this));
ret = obj.open(dpp);
if (ret < 0) {
ldpp_dout(dpp, 0) << "failed to open rados context for " << obj << dendl;
goto done_ret;
}
static constexpr uint64_t cost = 1; // 1 throttle unit per request
static constexpr uint64_t id = 0; // ids unused
auto& ref = obj.get_ref();
rgw::AioResultList completed = aio->get(ref.obj, rgw::Aio::librados_op(ref.pool.ioctx(), std::move(op), y), cost, id);
ret = rgw::check_for_errors(completed);
all_results.splice(all_results.end(), completed);
if (ret < 0) {
ldpp_dout(dpp, 0) << "ERROR: failed to copy obj=" << obj << ", the error code = " << ret << dendl;
goto done_ret;
}
}
rgw::AioResultList completed = aio->drain();
ret = rgw::check_for_errors(completed);
all_results.splice(all_results.end(), completed);
if (ret < 0) {
ldpp_dout(dpp, 0) << "ERROR: failed to drain ios, the error code = " << ret <<dendl;
goto done_ret;
}
pmanifest = &manifest;
} else {
pmanifest = amanifest;
/* don't send the object's tail for garbage collection */
astate->keep_tail = true;
}
if (copy_first) {
ret = read_op.read(0, max_chunk_size, first_chunk, y, dpp);
if (ret < 0) {
goto done_ret;
}
pmanifest->set_head(dest_bucket_info.placement_rule, dest_obj, first_chunk.length());
} else {
pmanifest->set_head(dest_bucket_info.placement_rule, dest_obj, 0);
}
write_op.meta.data = &first_chunk;
write_op.meta.manifest = pmanifest;
write_op.meta.ptag = &tag;
write_op.meta.owner = dest_bucket_info.owner;
write_op.meta.mtime = mtime;
write_op.meta.flags = PUT_OBJ_CREATE;
write_op.meta.category = category;
write_op.meta.olh_epoch = olh_epoch;
write_op.meta.delete_at = delete_at;
write_op.meta.modify_tail = !copy_itself;
ret = write_op.write_meta(dpp, obj_size, astate->accounted_size, attrs, y);
if (ret < 0) {
goto done_ret;
}
return 0;
done_ret:
if (!copy_itself) {
/* wait all pending op done */
rgw::AioResultList completed = aio->drain();
all_results.splice(all_results.end(), completed);
/* rollback reference */
string ref_tag = tag + '\0';
int ret2 = 0;
for (auto& r : all_results) {
if (r.result < 0) {
continue; // skip errors
}
auto obj = svc.rados->obj(r.obj);
ret2 = obj.open(dpp);
if (ret2 < 0) {
continue;
}
auto& ref = obj.get_ref();
ObjectWriteOperation op;
cls_refcount_put(op, ref_tag, true);
static constexpr uint64_t cost = 1; // 1 throttle unit per request
static constexpr uint64_t id = 0; // ids unused
rgw::AioResultList completed = aio->get(ref.obj, rgw::Aio::librados_op(ref.pool.ioctx(), std::move(op), y), cost, id);
ret2 = rgw::check_for_errors(completed);
if (ret2 < 0) {
ldpp_dout(dpp, 0) << "ERROR: cleanup after error failed to drop reference on obj=" << r.obj << dendl;
}
}
completed = aio->drain();
ret2 = rgw::check_for_errors(completed);
if (ret2 < 0) {
ldpp_dout(dpp, 0) << "ERROR: failed to drain rollback ios, the error code = " << ret2 <<dendl;
}
}
return ret;
}
int RGWRados::copy_obj_data(RGWObjectCtx& obj_ctx,
RGWBucketInfo& dest_bucket_info,
const rgw_placement_rule& dest_placement,
RGWRados::Object::Read& read_op, off_t end,
const rgw_obj& dest_obj,
real_time *mtime,
real_time set_mtime,
rgw::sal::Attrs& attrs,
uint64_t olh_epoch,
real_time delete_at,
string *petag,
const DoutPrefixProvider *dpp,
optional_yield y)
{
string tag;
append_rand_alpha(cct, tag, tag, 32);
auto aio = rgw::make_throttle(cct->_conf->rgw_put_obj_min_window_size, y);
using namespace rgw::putobj;
AtomicObjectProcessor processor(aio.get(), this, dest_bucket_info,
&dest_placement, dest_bucket_info.owner,
obj_ctx, dest_obj, olh_epoch, tag, dpp, y);
int ret = processor.prepare(y);
if (ret < 0)
return ret;
off_t ofs = 0;
do {
bufferlist bl;
ret = read_op.read(ofs, end, bl, y, dpp);
if (ret < 0) {
ldpp_dout(dpp, 0) << "ERROR: fail to read object data, ret = " << ret << dendl;
return ret;
}
uint64_t read_len = ret;
ret = processor.process(std::move(bl), ofs);
if (ret < 0) {
return ret;
}
ofs += read_len;
} while (ofs <= end);
// flush
ret = processor.process({}, ofs);
if (ret < 0) {
return ret;
}
string etag;
auto iter = attrs.find(RGW_ATTR_ETAG);
if (iter != attrs.end()) {
bufferlist& bl = iter->second;
etag = bl.to_str();
if (petag) {
*petag = etag;
}
}
uint64_t accounted_size;
{
bool compressed{false};
RGWCompressionInfo cs_info;
ret = rgw_compression_info_from_attrset(attrs, compressed, cs_info);
if (ret < 0) {
ldpp_dout(dpp, 0) << "ERROR: failed to read compression info" << dendl;
return ret;
}
// pass original size if compressed
accounted_size = compressed ? cs_info.orig_size : ofs;
}
return processor.complete(accounted_size, etag, mtime, set_mtime, attrs, delete_at,
nullptr, nullptr, nullptr, nullptr, nullptr, y);
}
int RGWRados::transition_obj(RGWObjectCtx& obj_ctx,
RGWBucketInfo& bucket_info,
const rgw_obj& obj,
const rgw_placement_rule& placement_rule,
const real_time& mtime,
uint64_t olh_epoch,
const DoutPrefixProvider *dpp,
optional_yield y)
{
rgw::sal::Attrs attrs;
real_time read_mtime;
uint64_t obj_size;
obj_ctx.set_atomic(obj);
RGWRados::Object op_target(this, bucket_info, obj_ctx, obj);
RGWRados::Object::Read read_op(&op_target);
read_op.params.attrs = &attrs;
read_op.params.lastmod = &read_mtime;
read_op.params.obj_size = &obj_size;
int ret = read_op.prepare(y, dpp);
if (ret < 0) {
return ret;
}
if (read_mtime != mtime) {
/* raced */
ldpp_dout(dpp, 0) << __func__ << " ERROR: failed to transition obj(" << obj.key << ") read_mtime = " << read_mtime << " doesn't match mtime = " << mtime << dendl;
return -ECANCELED;
}
attrs.erase(RGW_ATTR_ID_TAG);
attrs.erase(RGW_ATTR_TAIL_TAG);
ret = copy_obj_data(obj_ctx,
bucket_info,
placement_rule,
read_op,
obj_size - 1,
obj,
nullptr /* pmtime */,
mtime,
attrs,
olh_epoch,
real_time(),
nullptr /* petag */,
dpp,
y);
if (ret < 0) {
return ret;
}
return 0;
}
int RGWRados::check_bucket_empty(const DoutPrefixProvider *dpp, RGWBucketInfo& bucket_info, optional_yield y)
{
constexpr uint NUM_ENTRIES = 1000u;
rgw_obj_index_key marker;
string prefix;
bool is_truncated;
do {
std::vector<rgw_bucket_dir_entry> ent_list;
ent_list.reserve(NUM_ENTRIES);
int r = cls_bucket_list_unordered(dpp,
bucket_info,
bucket_info.layout.current_index,
RGW_NO_SHARD,
marker,
prefix,
NUM_ENTRIES,
true,
ent_list,
&is_truncated,
&marker,
y);
if (r < 0) {
return r;
}
string ns;
for (auto const& dirent : ent_list) {
rgw_obj_key obj;
if (rgw_obj_key::oid_to_key_in_ns(dirent.key.name, &obj, ns)) {
return -ENOTEMPTY;
}
}
} while (is_truncated);
return 0;
}
/**
* Delete a bucket.
* bucket: the name of the bucket to delete
* Returns 0 on success, -ERR# otherwise.
*/
int RGWRados::delete_bucket(RGWBucketInfo& bucket_info, RGWObjVersionTracker& objv_tracker, optional_yield y, const DoutPrefixProvider *dpp, bool check_empty)
{
const rgw_bucket& bucket = bucket_info.bucket;
RGWSI_RADOS::Pool index_pool;
map<int, string> bucket_objs;
int r = svc.bi_rados->open_bucket_index(dpp, bucket_info, std::nullopt, bucket_info.layout.current_index, &index_pool, &bucket_objs, nullptr);
if (r < 0)
return r;
if (check_empty) {
r = check_bucket_empty(dpp, bucket_info, y);
if (r < 0) {
return r;
}
}
bool remove_ep = true;
if (objv_tracker.read_version.empty()) {
RGWBucketEntryPoint ep;
r = ctl.bucket->read_bucket_entrypoint_info(bucket_info.bucket,
&ep,
null_yield,
dpp,
RGWBucketCtl::Bucket::GetParams()
.set_objv_tracker(&objv_tracker));
if (r < 0 ||
(!bucket_info.bucket.bucket_id.empty() &&
ep.bucket.bucket_id != bucket_info.bucket.bucket_id)) {
if (r != -ENOENT) {
ldpp_dout(dpp, 0) << "ERROR: read_bucket_entrypoint_info() bucket=" << bucket_info.bucket << " returned error: r=" << r << dendl;
/* we have no idea what caused the error, will not try to remove it */
}
/*
* either failed to read bucket entrypoint, or it points to a different bucket instance than
* requested
*/
remove_ep = false;
}
}
if (remove_ep) {
r = ctl.bucket->remove_bucket_entrypoint_info(bucket_info.bucket, y, dpp,
RGWBucketCtl::Bucket::RemoveParams()
.set_objv_tracker(&objv_tracker));
if (r < 0)
return r;
}
/* if the bucket is not synced we can remove the meta file */
if (!svc.zone->is_syncing_bucket_meta(bucket)) {
RGWObjVersionTracker objv_tracker;
r = ctl.bucket->remove_bucket_instance_info(bucket, bucket_info, y, dpp);
if (r < 0) {
return r;
}
/* remove bucket index objects asynchronously by best effort */
(void) CLSRGWIssueBucketIndexClean(index_pool.ioctx(),
bucket_objs,
cct->_conf->rgw_bucket_index_max_aio)();
}
return 0;
}
int RGWRados::set_bucket_owner(rgw_bucket& bucket, ACLOwner& owner, const DoutPrefixProvider *dpp, optional_yield y)
{
RGWBucketInfo info;
map<string, bufferlist> attrs;
int r;
if (bucket.bucket_id.empty()) {
r = get_bucket_info(&svc, bucket.tenant, bucket.name, info, NULL, y, dpp, &attrs);
} else {
r = get_bucket_instance_info(bucket, info, nullptr, &attrs, y, dpp);
}
if (r < 0) {
ldpp_dout(dpp, 0) << "NOTICE: get_bucket_info on bucket=" << bucket.name << " returned err=" << r << dendl;
return r;
}
info.owner = owner.get_id();
r = put_bucket_instance_info(info, false, real_time(), &attrs, dpp, y);
if (r < 0) {
ldpp_dout(dpp, 0) << "NOTICE: put_bucket_info on bucket=" << bucket.name << " returned err=" << r << dendl;
return r;
}
return 0;
}
int RGWRados::set_buckets_enabled(vector<rgw_bucket>& buckets, bool enabled, const DoutPrefixProvider *dpp, optional_yield y)
{
int ret = 0;
vector<rgw_bucket>::iterator iter;
for (iter = buckets.begin(); iter != buckets.end(); ++iter) {
rgw_bucket& bucket = *iter;
if (enabled) {
ldpp_dout(dpp, 20) << "enabling bucket name=" << bucket.name << dendl;
} else {
ldpp_dout(dpp, 20) << "disabling bucket name=" << bucket.name << dendl;
}
RGWBucketInfo info;
map<string, bufferlist> attrs;
int r = get_bucket_info(&svc, bucket.tenant, bucket.name, info, NULL, y, dpp, &attrs);
if (r < 0) {
ldpp_dout(dpp, 0) << "NOTICE: get_bucket_info on bucket=" << bucket.name << " returned err=" << r << ", skipping bucket" << dendl;
ret = r;
continue;
}
if (enabled) {
info.flags &= ~BUCKET_SUSPENDED;
} else {
info.flags |= BUCKET_SUSPENDED;
}
r = put_bucket_instance_info(info, false, real_time(), &attrs, dpp, y);
if (r < 0) {
ldpp_dout(dpp, 0) << "NOTICE: put_bucket_info on bucket=" << bucket.name << " returned err=" << r << ", skipping bucket" << dendl;
ret = r;
continue;
}
}
return ret;
}
int RGWRados::bucket_suspended(const DoutPrefixProvider *dpp, rgw_bucket& bucket, bool *suspended, optional_yield y)
{
RGWBucketInfo bucket_info;
int ret = get_bucket_info(&svc, bucket.tenant, bucket.name, bucket_info, NULL, y, dpp);
if (ret < 0) {
return ret;
}
*suspended = ((bucket_info.flags & BUCKET_SUSPENDED) != 0);
return 0;
}
int RGWRados::Object::complete_atomic_modification(const DoutPrefixProvider *dpp, optional_yield y)
{
if ((!manifest)|| state->keep_tail)
return 0;
cls_rgw_obj_chain chain;
store->update_gc_chain(dpp, obj, *manifest, &chain);
if (chain.empty()) {
return 0;
}
string tag = (state->tail_tag.length() > 0 ? state->tail_tag.to_str() : state->obj_tag.to_str());
if (store->gc == nullptr) {
ldpp_dout(dpp, 0) << "deleting objects inline since gc isn't initialized" << dendl;
//Delete objects inline just in case gc hasn't been initialised, prevents crashes
store->delete_objs_inline(dpp, chain, tag);
} else {
auto [ret, leftover_chain] = store->gc->send_split_chain(chain, tag, y); // do it synchronously
if (ret < 0 && leftover_chain) {
//Delete objects inline if send chain to gc fails
store->delete_objs_inline(dpp, *leftover_chain, tag);
}
}
return 0;
}
void RGWRados::update_gc_chain(const DoutPrefixProvider *dpp, rgw_obj head_obj, RGWObjManifest& manifest, cls_rgw_obj_chain *chain)
{
RGWObjManifest::obj_iterator iter;
rgw_raw_obj raw_head;
obj_to_raw(manifest.get_head_placement_rule(), head_obj, &raw_head);
for (iter = manifest.obj_begin(dpp); iter != manifest.obj_end(dpp); ++iter) {
const rgw_raw_obj& mobj = iter.get_location().get_raw_obj(this);
if (mobj == raw_head)
continue;
cls_rgw_obj_key key(mobj.oid);
chain->push_obj(mobj.pool.to_str(), key, mobj.loc);
}
}
std::tuple<int, std::optional<cls_rgw_obj_chain>> RGWRados::send_chain_to_gc(cls_rgw_obj_chain& chain, const string& tag, optional_yield y)
{
if (chain.empty()) {
return {0, std::nullopt};
}
return gc->send_split_chain(chain, tag, y);
}
void RGWRados::delete_objs_inline(const DoutPrefixProvider *dpp, cls_rgw_obj_chain& chain, const string& tag)
{
string last_pool;
std::unique_ptr<IoCtx> ctx(new IoCtx);
int ret = 0;
for (auto liter = chain.objs.begin(); liter != chain.objs.end(); ++liter) {
cls_rgw_obj& obj = *liter;
if (obj.pool != last_pool) {
ctx.reset(new IoCtx);
ret = rgw_init_ioctx(dpp, get_rados_handle(), obj.pool, *ctx);
if (ret < 0) {
last_pool = "";
ldpp_dout(dpp, 0) << "ERROR: failed to create ioctx pool=" <<
obj.pool << dendl;
continue;
}
last_pool = obj.pool;
}
ctx->locator_set_key(obj.loc);
const string& oid = obj.key.name; /* just stored raw oid there */
ldpp_dout(dpp, 5) << "delete_objs_inline: removing " << obj.pool <<
":" << obj.key.name << dendl;
ObjectWriteOperation op;
cls_refcount_put(op, tag, true);
ret = ctx->operate(oid, &op);
if (ret < 0) {
ldpp_dout(dpp, 5) << "delete_objs_inline: refcount put returned error " << ret << dendl;
}
}
}
static void accumulate_raw_stats(const rgw_bucket_dir_header& header,
map<RGWObjCategory, RGWStorageStats>& stats)
{
for (const auto& pair : header.stats) {
const RGWObjCategory category = static_cast<RGWObjCategory>(pair.first);
const rgw_bucket_category_stats& header_stats = pair.second;
RGWStorageStats& s = stats[category];
s.category = category;
s.size += header_stats.total_size;
s.size_rounded += header_stats.total_size_rounded;
s.size_utilized += header_stats.actual_size;
s.num_objects += header_stats.num_entries;
}
}
int RGWRados::bucket_check_index(const DoutPrefixProvider *dpp, RGWBucketInfo& bucket_info,
map<RGWObjCategory, RGWStorageStats> *existing_stats,
map<RGWObjCategory, RGWStorageStats> *calculated_stats)
{
RGWSI_RADOS::Pool index_pool;
// key - bucket index object id
// value - bucket index check OP returned result with the given bucket index object (shard)
map<int, string> oids;
int ret = svc.bi_rados->open_bucket_index(dpp, bucket_info, std::nullopt, bucket_info.layout.current_index, &index_pool, &oids, nullptr);
if (ret < 0) {
return ret;
}
// declare and pre-populate
map<int, struct rgw_cls_check_index_ret> bucket_objs_ret;
for (auto& iter : oids) {
bucket_objs_ret.emplace(iter.first, rgw_cls_check_index_ret());
}
ret = CLSRGWIssueBucketCheck(index_pool.ioctx(), oids, bucket_objs_ret, cct->_conf->rgw_bucket_index_max_aio)();
if (ret < 0) {
return ret;
}
// aggregate results (from different shards if there are any)
for (const auto& iter : bucket_objs_ret) {
accumulate_raw_stats(iter.second.existing_header, *existing_stats);
accumulate_raw_stats(iter.second.calculated_header, *calculated_stats);
}
return 0;
}
int RGWRados::bucket_rebuild_index(const DoutPrefixProvider *dpp, RGWBucketInfo& bucket_info)
{
RGWSI_RADOS::Pool index_pool;
map<int, string> bucket_objs;
int r = svc.bi_rados->open_bucket_index(dpp, bucket_info, std::nullopt, bucket_info.layout.current_index, &index_pool, &bucket_objs, nullptr);
if (r < 0) {
return r;
}
return CLSRGWIssueBucketRebuild(index_pool.ioctx(), bucket_objs, cct->_conf->rgw_bucket_index_max_aio)();
}
int RGWRados::bucket_set_reshard(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, const cls_rgw_bucket_instance_entry& entry)
{
RGWSI_RADOS::Pool index_pool;
map<int, string> bucket_objs;
int r = svc.bi_rados->open_bucket_index(dpp, bucket_info, std::nullopt, bucket_info.layout.current_index, &index_pool, &bucket_objs, nullptr);
if (r < 0) {
ldpp_dout(dpp, 0) << "ERROR: " << __func__ <<
": unable to open bucket index, r=" << r << " (" <<
cpp_strerror(-r) << ")" << dendl;
return r;
}
r = CLSRGWIssueSetBucketResharding(index_pool.ioctx(), bucket_objs, entry, cct->_conf->rgw_bucket_index_max_aio)();
if (r < 0) {
ldpp_dout(dpp, 0) << "ERROR: " << __func__ <<
": unable to issue set bucket resharding, r=" << r << " (" <<
cpp_strerror(-r) << ")" << dendl;
}
return r;
}
int RGWRados::defer_gc(const DoutPrefixProvider *dpp, RGWObjectCtx* rctx, RGWBucketInfo& bucket_info, const rgw_obj& obj, optional_yield y)
{
std::string oid, key;
get_obj_bucket_and_oid_loc(obj, oid, key);
if (!rctx)
return 0;
RGWObjState *state = NULL;
RGWObjManifest *manifest = nullptr;
int r = get_obj_state(dpp, rctx, bucket_info, obj, &state, &manifest, false, y);
if (r < 0)
return r;
if (!state->is_atomic) {
ldpp_dout(dpp, 20) << "state for obj=" << obj << " is not atomic, not deferring gc operation" << dendl;
return -EINVAL;
}
string tag;
if (state->tail_tag.length() > 0) {
tag = state->tail_tag.c_str();
} else if (state->obj_tag.length() > 0) {
tag = state->obj_tag.c_str();
} else {
ldpp_dout(dpp, 20) << "state->obj_tag is empty, not deferring gc operation" << dendl;
return -EINVAL;
}
ldpp_dout(dpp, 0) << "defer chain tag=" << tag << dendl;
cls_rgw_obj_chain chain;
update_gc_chain(dpp, state->obj, *manifest, &chain);
return gc->async_defer_chain(tag, chain);
}
void RGWRados::remove_rgw_head_obj(ObjectWriteOperation& op)
{
list<string> prefixes;
prefixes.push_back(RGW_ATTR_OLH_PREFIX);
cls_rgw_remove_obj(op, prefixes);
}
void RGWRados::cls_obj_check_prefix_exist(ObjectOperation& op, const string& prefix, bool fail_if_exist)
{
cls_rgw_obj_check_attrs_prefix(op, prefix, fail_if_exist);
}
void RGWRados::cls_obj_check_mtime(ObjectOperation& op, const real_time& mtime, bool high_precision_time, RGWCheckMTimeType type)
{
cls_rgw_obj_check_mtime(op, mtime, high_precision_time, type);
}
struct tombstone_entry {
ceph::real_time mtime;
uint32_t zone_short_id;
uint64_t pg_ver;
tombstone_entry() = default;
explicit tombstone_entry(const RGWObjState& state)
: mtime(state.mtime), zone_short_id(state.zone_short_id),
pg_ver(state.pg_ver) {}
};
/**
* Delete an object.
* bucket: name of the bucket storing the object
* obj: name of the object to delete
* Returns: 0 on success, -ERR# otherwise.
*/
int RGWRados::Object::Delete::delete_obj(optional_yield y, const DoutPrefixProvider *dpp)
{
RGWRados *store = target->get_store();
const rgw_obj& src_obj = target->get_obj();
const string& instance = src_obj.key.instance;
rgw_obj obj = target->get_obj();
if (instance == "null") {
obj.key.instance.clear();
}
bool explicit_marker_version = (!params.marker_version_id.empty());
if (params.versioning_status & BUCKET_VERSIONED || explicit_marker_version) {
if (instance.empty() || explicit_marker_version) {
rgw_obj marker = obj;
marker.key.instance.clear();
if (!params.marker_version_id.empty()) {
if (params.marker_version_id != "null") {
marker.key.set_instance(params.marker_version_id);
}
} else if ((params.versioning_status & BUCKET_VERSIONS_SUSPENDED) == 0) {
store->gen_rand_obj_instance_name(&marker);
}
result.version_id = marker.key.instance;
if (result.version_id.empty())
result.version_id = "null";
result.delete_marker = true;
struct rgw_bucket_dir_entry_meta meta;
meta.owner = params.obj_owner.get_id().to_str();
meta.owner_display_name = params.obj_owner.get_display_name();
if (real_clock::is_zero(params.mtime)) {
meta.mtime = real_clock::now();
} else {
meta.mtime = params.mtime;
}
int r = store->set_olh(dpp, target->get_ctx(), target->get_bucket_info(), marker, true, &meta, params.olh_epoch, params.unmod_since, params.high_precision_time, y, params.zones_trace);
if (r < 0) {
return r;
}
} else {
rgw_bucket_dir_entry dirent;
int r = store->bi_get_instance(dpp, target->get_bucket_info(), obj, &dirent, y);
if (r < 0) {
return r;
}
result.delete_marker = dirent.is_delete_marker();
r = store->unlink_obj_instance(dpp, target->get_ctx(), target->get_bucket_info(), obj, params.olh_epoch, y, params.zones_trace);
if (r < 0) {
return r;
}
result.version_id = instance;
}
BucketShard *bs = nullptr;
int r = target->get_bucket_shard(&bs, dpp, y);
if (r < 0) {
ldpp_dout(dpp, 5) << "failed to get BucketShard object: r=" << r << dendl;
return r;
}
add_datalog_entry(dpp, store->svc.datalog_rados,
target->get_bucket_info(), bs->shard_id, y);
return 0;
}
rgw_rados_ref ref;
int r = store->get_obj_head_ref(dpp, target->get_bucket_info(), obj, &ref);
if (r < 0) {
return r;
}
RGWObjState *state;
RGWObjManifest *manifest = nullptr;
r = target->get_state(dpp, &state, &manifest, false, y);
if (r < 0)
return r;
ObjectWriteOperation op;
if (!real_clock::is_zero(params.unmod_since)) {
struct timespec ctime = ceph::real_clock::to_timespec(state->mtime);
struct timespec unmod = ceph::real_clock::to_timespec(params.unmod_since);
if (!params.high_precision_time) {
ctime.tv_nsec = 0;
unmod.tv_nsec = 0;
}
ldpp_dout(dpp, 10) << "If-UnModified-Since: " << params.unmod_since << " Last-Modified: " << ctime << dendl;
if (ctime > unmod) {
return -ERR_PRECONDITION_FAILED;
}
/* only delete object if mtime is less than or equal to params.unmod_since */
store->cls_obj_check_mtime(op, params.unmod_since, params.high_precision_time, CLS_RGW_CHECK_TIME_MTIME_LE);
}
uint64_t obj_accounted_size = state->accounted_size;
if(params.abortmp) {
obj_accounted_size = params.parts_accounted_size;
}
if (!real_clock::is_zero(params.expiration_time)) {
bufferlist bl;
real_time delete_at;
if (state->get_attr(RGW_ATTR_DELETE_AT, bl)) {
try {
auto iter = bl.cbegin();
decode(delete_at, iter);
} catch (buffer::error& err) {
ldpp_dout(dpp, 0) << "ERROR: couldn't decode RGW_ATTR_DELETE_AT" << dendl;
return -EIO;
}
if (params.expiration_time != delete_at) {
return -ERR_PRECONDITION_FAILED;
}
} else {
return -ERR_PRECONDITION_FAILED;
}
}
if (!state->exists) {
target->invalidate_state();
return -ENOENT;
}
r = target->prepare_atomic_modification(dpp, op, false, NULL, NULL, NULL, true, false, y);
if (r < 0)
return r;
RGWBucketInfo& bucket_info = target->get_bucket_info();
RGWRados::Bucket bop(store, bucket_info);
RGWRados::Bucket::UpdateIndex index_op(&bop, obj);
index_op.set_zones_trace(params.zones_trace);
index_op.set_bilog_flags(params.bilog_flags);
r = index_op.prepare(dpp, CLS_RGW_OP_DEL, &state->write_tag, y);
if (r < 0)
return r;
store->remove_rgw_head_obj(op);
auto& ioctx = ref.pool.ioctx();
r = rgw_rados_operate(dpp, ioctx, ref.obj.oid, &op, y);
/* raced with another operation, object state is indeterminate */
const bool need_invalidate = (r == -ECANCELED);
int64_t poolid = ioctx.get_id();
if (r >= 0) {
tombstone_cache_t *obj_tombstone_cache = store->get_tombstone_cache();
if (obj_tombstone_cache) {
tombstone_entry entry{*state};
obj_tombstone_cache->add(obj, entry);
}
r = index_op.complete_del(dpp, poolid, ioctx.get_last_version(), state->mtime, params.remove_objs, y);
int ret = target->complete_atomic_modification(dpp, y);
if (ret < 0) {
ldpp_dout(dpp, 0) << "ERROR: complete_atomic_modification returned ret=" << ret << dendl;
}
/* other than that, no need to propagate error */
} else {
int ret = index_op.cancel(dpp, params.remove_objs, y);
if (ret < 0) {
ldpp_dout(dpp, 0) << "ERROR: index_op.cancel() returned ret=" << ret << dendl;
}
}
if (need_invalidate) {
target->invalidate_state();
}
if (r < 0)
return r;
/* update quota cache */
store->quota_handler->update_stats(params.bucket_owner, obj.bucket, -1, 0, obj_accounted_size);
return 0;
}
int RGWRados::delete_obj(const DoutPrefixProvider *dpp,
RGWObjectCtx& obj_ctx,
const RGWBucketInfo& bucket_info,
const rgw_obj& obj,
int versioning_status, optional_yield y,// versioning flags defined in enum RGWBucketFlags
uint16_t bilog_flags,
const real_time& expiration_time,
rgw_zone_set *zones_trace)
{
RGWRados::Object del_target(this, bucket_info, obj_ctx, obj);
RGWRados::Object::Delete del_op(&del_target);
del_op.params.bucket_owner = bucket_info.owner;
del_op.params.versioning_status = versioning_status;
del_op.params.bilog_flags = bilog_flags;
del_op.params.expiration_time = expiration_time;
del_op.params.zones_trace = zones_trace;
return del_op.delete_obj(y, dpp);
}
int RGWRados::delete_raw_obj(const DoutPrefixProvider *dpp, const rgw_raw_obj& obj, optional_yield y)
{
rgw_rados_ref ref;
int r = get_raw_obj_ref(dpp, obj, &ref);
if (r < 0) {
return r;
}
ObjectWriteOperation op;
op.remove();
r = rgw_rados_operate(dpp, ref.pool.ioctx(), ref.obj.oid, &op, y);
if (r < 0)
return r;
return 0;
}
int RGWRados::delete_obj_index(const rgw_obj& obj, ceph::real_time mtime,
const DoutPrefixProvider *dpp, optional_yield y)
{
std::string oid, key;
get_obj_bucket_and_oid_loc(obj, oid, key);
RGWBucketInfo bucket_info;
int ret = get_bucket_instance_info(obj.bucket, bucket_info, NULL, NULL, y, dpp);
if (ret < 0) {
ldpp_dout(dpp, 0) << "ERROR: " << __func__ << "() get_bucket_instance_info(bucket=" << obj.bucket << ") returned ret=" << ret << dendl;
return ret;
}
RGWRados::Bucket bop(this, bucket_info);
RGWRados::Bucket::UpdateIndex index_op(&bop, obj);
return index_op.complete_del(dpp, -1 /* pool */, 0, mtime, nullptr, y);
}
static void generate_fake_tag(const DoutPrefixProvider *dpp, RGWRados* store, map<string, bufferlist>& attrset, RGWObjManifest& manifest, bufferlist& manifest_bl, bufferlist& tag_bl)
{
string tag;
RGWObjManifest::obj_iterator mi = manifest.obj_begin(dpp);
if (mi != manifest.obj_end(dpp)) {
if (manifest.has_tail()) // first object usually points at the head, let's skip to a more unique part
++mi;
tag = mi.get_location().get_raw_obj(store).oid;
tag.append("_");
}
unsigned char md5[CEPH_CRYPTO_MD5_DIGESTSIZE];
char md5_str[CEPH_CRYPTO_MD5_DIGESTSIZE * 2 + 1];
MD5 hash;
// Allow use of MD5 digest in FIPS mode for non-cryptographic purposes
hash.SetFlags(EVP_MD_CTX_FLAG_NON_FIPS_ALLOW);
hash.Update((const unsigned char *)manifest_bl.c_str(), manifest_bl.length());
map<string, bufferlist>::iterator iter = attrset.find(RGW_ATTR_ETAG);
if (iter != attrset.end()) {
bufferlist& bl = iter->second;
hash.Update((const unsigned char *)bl.c_str(), bl.length());
}
hash.Final(md5);
buf_to_hex(md5, CEPH_CRYPTO_MD5_DIGESTSIZE, md5_str);
tag.append(md5_str);
ldpp_dout(dpp, 10) << "generate_fake_tag new tag=" << tag << dendl;
tag_bl.append(tag.c_str(), tag.size() + 1);
}
static bool is_olh(map<string, bufferlist>& attrs)
{
map<string, bufferlist>::iterator iter = attrs.find(RGW_ATTR_OLH_INFO);
return (iter != attrs.end());
}
static bool has_olh_tag(map<string, bufferlist>& attrs)
{
map<string, bufferlist>::iterator iter = attrs.find(RGW_ATTR_OLH_ID_TAG);
return (iter != attrs.end());
}
int RGWRados::get_olh_target_state(const DoutPrefixProvider *dpp, RGWObjectCtx&
obj_ctx, RGWBucketInfo& bucket_info,
const rgw_obj& obj, RGWObjState *olh_state,
RGWObjState **target_state,
RGWObjManifest **target_manifest, optional_yield y)
{
ceph_assert(olh_state->is_olh);
rgw_obj target;
int r = RGWRados::follow_olh(dpp, bucket_info, obj_ctx, olh_state, obj, &target, y); /* might return -EAGAIN */
if (r < 0) {
return r;
}
r = get_obj_state(dpp, &obj_ctx, bucket_info, target, target_state,
target_manifest, false, y);
if (r < 0) {
return r;
}
return 0;
}
int RGWRados::get_obj_state_impl(const DoutPrefixProvider *dpp, RGWObjectCtx *rctx,
RGWBucketInfo& bucket_info, const rgw_obj& obj,
RGWObjState **state, RGWObjManifest** manifest,
bool follow_olh, optional_yield y, bool assume_noent)
{
if (obj.empty()) {
return -EINVAL;
}
bool need_follow_olh = follow_olh && obj.key.instance.empty();
*manifest = nullptr;
RGWObjStateManifest *sm = rctx->get_state(obj);
RGWObjState *s = &(sm->state);
ldpp_dout(dpp, 20) << "get_obj_state: rctx=" << (void *)rctx << " obj=" << obj << " state=" << (void *)s << " s->prefetch_data=" << s->prefetch_data << dendl;
*state = s;
if (sm->manifest) {
*manifest = &(*sm->manifest);
}
if (s->has_attrs) {
if (s->is_olh && need_follow_olh) {
return get_olh_target_state(dpp, *rctx, bucket_info, obj, s, state, manifest, y);
}
return 0;
}
s->obj = obj;
rgw_raw_obj raw_obj;
obj_to_raw(bucket_info.placement_rule, obj, &raw_obj);
int r = -ENOENT;
if (!assume_noent) {
r = RGWRados::raw_obj_stat(dpp, raw_obj, &s->size, &s->mtime, &s->epoch, &s->attrset, (s->prefetch_data ? &s->data : NULL), NULL, y);
}
if (r == -ENOENT) {
s->exists = false;
s->has_attrs = true;
tombstone_entry entry;
if (obj_tombstone_cache && obj_tombstone_cache->find(obj, entry)) {
s->mtime = entry.mtime;
s->zone_short_id = entry.zone_short_id;
s->pg_ver = entry.pg_ver;
ldpp_dout(dpp, 20) << __func__ << "(): found obj in tombstone cache: obj=" << obj
<< " mtime=" << s->mtime << " pgv=" << s->pg_ver << dendl;
} else {
s->mtime = real_time();
}
return 0;
}
if (r < 0)
return r;
s->exists = true;
s->has_attrs = true;
s->accounted_size = s->size;
auto iter = s->attrset.find(RGW_ATTR_ETAG);
if (iter != s->attrset.end()) {
/* get rid of extra null character at the end of the etag, as we used to store it like that */
bufferlist& bletag = iter->second;
if (bletag.length() > 0 && bletag[bletag.length() - 1] == '\0') {
bufferlist newbl;
bletag.splice(0, bletag.length() - 1, &newbl);
bletag = std::move(newbl);
}
}
iter = s->attrset.find(RGW_ATTR_COMPRESSION);
const bool compressed = (iter != s->attrset.end());
if (compressed) {
// use uncompressed size for accounted_size
try {
RGWCompressionInfo info;
auto p = iter->second.cbegin();
decode(info, p);
s->accounted_size = info.orig_size;
} catch (buffer::error&) {
ldpp_dout(dpp, 0) << "ERROR: could not decode compression info for object: " << obj << dendl;
return -EIO;
}
}
if (iter = s->attrset.find(RGW_ATTR_SHADOW_OBJ); iter != s->attrset.end()) {
const bufferlist& bl = iter->second;
auto it = bl.begin();
it.copy(bl.length(), s->shadow_obj);
s->shadow_obj[bl.length()] = '\0';
}
if (iter = s->attrset.find(RGW_ATTR_ID_TAG); iter != s->attrset.end()) {
s->obj_tag = iter->second;
}
if (iter = s->attrset.find(RGW_ATTR_TAIL_TAG); iter != s->attrset.end()) {
s->tail_tag = iter->second;
}
if (iter = s->attrset.find(RGW_ATTR_MANIFEST); iter != s->attrset.end()) {
bufferlist manifest_bl = iter->second;
auto miter = manifest_bl.cbegin();
try {
sm->manifest.emplace();
decode(*sm->manifest, miter);
sm->manifest->set_head(bucket_info.placement_rule, obj, s->size); /* patch manifest to reflect the head we just read, some manifests might be
broken due to old bugs */
s->size = sm->manifest->get_obj_size();
if (!compressed)
s->accounted_size = s->size;
} catch (buffer::error& err) {
ldpp_dout(dpp, 0) << "ERROR: couldn't decode manifest" << dendl;
return -EIO;
}
*manifest = &(*sm->manifest);
ldpp_dout(dpp, 10) << "manifest: total_size = " << sm->manifest->get_obj_size() << dendl;
if (cct->_conf->subsys.should_gather<ceph_subsys_rgw, 20>() && \
sm->manifest->has_explicit_objs()) {
RGWObjManifest::obj_iterator mi;
for (mi = sm->manifest->obj_begin(dpp); mi != sm->manifest->obj_end(dpp); ++mi) {
ldpp_dout(dpp, 20) << "manifest: ofs=" << mi.get_ofs() << " loc=" << mi.get_location().get_raw_obj(this) << dendl;
}
}
if (!s->obj_tag.length()) {
/*
* Uh oh, something's wrong, object with manifest should have tag. Let's
* create one out of the manifest, would be unique
*/
generate_fake_tag(dpp, this, s->attrset, *sm->manifest, manifest_bl, s->obj_tag);
s->fake_tag = true;
}
}
if (iter = s->attrset.find(RGW_ATTR_PG_VER); iter != s->attrset.end()) {
const bufferlist& pg_ver_bl = iter->second;
if (pg_ver_bl.length()) {
auto pgbl = pg_ver_bl.cbegin();
try {
decode(s->pg_ver, pgbl);
} catch (buffer::error& err) {
ldpp_dout(dpp, 0) << "ERROR: couldn't decode pg ver attr for object " << s->obj << ", non-critical error, ignoring" << dendl;
}
}
}
if (iter = s->attrset.find(RGW_ATTR_SOURCE_ZONE); iter != s->attrset.end()) {
const bufferlist& zone_short_id_bl = iter->second;
if (zone_short_id_bl.length()) {
auto zbl = zone_short_id_bl.cbegin();
try {
decode(s->zone_short_id, zbl);
} catch (buffer::error& err) {
ldpp_dout(dpp, 0) << "ERROR: couldn't decode zone short id attr for object " << s->obj << ", non-critical error, ignoring" << dendl;
}
}
}
if (s->obj_tag.length()) {
ldpp_dout(dpp, 20) << "get_obj_state: setting s->obj_tag to " << s->obj_tag.c_str() << dendl;
} else {
ldpp_dout(dpp, 20) << "get_obj_state: s->obj_tag was set empty" << dendl;
}
/* an object might not be olh yet, but could have olh id tag, so we should set it anyway if
* it exist, and not only if is_olh() returns true
*/
if (iter = s->attrset.find(RGW_ATTR_OLH_ID_TAG); iter != s->attrset.end()) {
s->olh_tag = iter->second;
}
if (is_olh(s->attrset)) {
s->is_olh = true;
ldpp_dout(dpp, 20) << __func__ << ": setting s->olh_tag to " << string(s->olh_tag.c_str(), s->olh_tag.length()) << dendl;
if (need_follow_olh) {
return get_olh_target_state(dpp, *rctx, bucket_info, obj, s, state, manifest, y);
} else if (obj.key.have_null_instance() && !sm->manifest) {
// read null version, and the head object only have olh info
s->exists = false;
return -ENOENT;
}
}
return 0;
}
int RGWRados::get_obj_state(const DoutPrefixProvider *dpp, RGWObjectCtx *rctx, RGWBucketInfo& bucket_info, const rgw_obj& obj, RGWObjState **state, RGWObjManifest** manifest,
bool follow_olh, optional_yield y, bool assume_noent)
{
int ret;
do {
ret = get_obj_state_impl(dpp, rctx, bucket_info, obj, state, manifest, follow_olh, y, assume_noent);
} while (ret == -EAGAIN);
return ret;
}
int RGWRados::Object::get_manifest(const DoutPrefixProvider *dpp, RGWObjManifest **pmanifest, optional_yield y)
{
RGWObjState *astate;
int r = get_state(dpp, &astate, pmanifest, true, y);
if (r < 0) {
return r;
}
return 0;
}
int RGWRados::Object::Read::get_attr(const DoutPrefixProvider *dpp, const char *name, bufferlist& dest, optional_yield y)
{
RGWObjState *state;
RGWObjManifest *manifest = nullptr;
int r = source->get_state(dpp, &state, &manifest, true, y);
if (r < 0)
return r;
if (!state->exists)
return -ENOENT;
if (!state->get_attr(name, dest))
return -ENODATA;
return 0;
}
int RGWRados::Object::Stat::stat_async(const DoutPrefixProvider *dpp)
{
RGWObjectCtx& ctx = source->get_ctx();
rgw_obj& obj = source->get_obj();
RGWRados *store = source->get_store();
RGWObjStateManifest *sm = ctx.get_state(obj);
result.obj = obj;
if (sm->state.has_attrs) {
state.ret = 0;
result.size = sm->state.size;
result.mtime = ceph::real_clock::to_timespec(sm->state.mtime);
result.attrs = sm->state.attrset;
result.manifest = sm->manifest;
return 0;
}
string oid;
string loc;
get_obj_bucket_and_oid_loc(obj, oid, loc);
int r = store->get_obj_head_ioctx(dpp, source->get_bucket_info(), obj, &state.io_ctx);
if (r < 0) {
return r;
}
librados::ObjectReadOperation op;
op.stat2(&result.size, &result.mtime, NULL);
op.getxattrs(&result.attrs, NULL);
state.completion = librados::Rados::aio_create_completion(nullptr, nullptr);
state.io_ctx.locator_set_key(loc);
r = state.io_ctx.aio_operate(oid, state.completion, &op, NULL);
if (r < 0) {
ldpp_dout(dpp, 5) << __func__
<< ": ERROR: aio_operate() returned ret=" << r
<< dendl;
return r;
}
return 0;
}
int RGWRados::Object::Stat::wait(const DoutPrefixProvider *dpp)
{
if (!state.completion) {
return state.ret;
}
state.completion->wait_for_complete();
state.ret = state.completion->get_return_value();
state.completion->release();
if (state.ret != 0) {
return state.ret;
}
return finish(dpp);
}
int RGWRados::Object::Stat::finish(const DoutPrefixProvider *dpp)
{
map<string, bufferlist>::iterator iter = result.attrs.find(RGW_ATTR_MANIFEST);
if (iter != result.attrs.end()) {
bufferlist& bl = iter->second;
auto biter = bl.cbegin();
try {
result.manifest.emplace();
decode(*result.manifest, biter);
} catch (buffer::error& err) {
ldpp_dout(dpp, 0) << "ERROR: " << __func__ << ": failed to decode manifest" << dendl;
return -EIO;
}
}
return 0;
}
int RGWRados::append_atomic_test(const DoutPrefixProvider *dpp, RGWObjectCtx* rctx,
RGWBucketInfo& bucket_info, const rgw_obj& obj,
ObjectOperation& op, RGWObjState **pstate,
RGWObjManifest** pmanifest, optional_yield y)
{
if (!rctx)
return 0;
int r = get_obj_state(dpp, rctx, bucket_info, obj, pstate, pmanifest, false, y);
if (r < 0)
return r;
return append_atomic_test(dpp, *pstate, op);
}
int RGWRados::append_atomic_test(const DoutPrefixProvider *dpp,
const RGWObjState* state,
librados::ObjectOperation& op)
{
if (!state->is_atomic) {
ldpp_dout(dpp, 20) << "state for obj=" << state->obj << " is not atomic, not appending atomic test" << dendl;
return 0;
}
if (state->obj_tag.length() > 0 && !state->fake_tag) {// check for backward compatibility
op.cmpxattr(RGW_ATTR_ID_TAG, LIBRADOS_CMPXATTR_OP_EQ, state->obj_tag);
} else {
ldpp_dout(dpp, 20) << "state->obj_tag is empty, not appending atomic test" << dendl;
}
return 0;
}
int RGWRados::Object::get_state(const DoutPrefixProvider *dpp, RGWObjState **pstate, RGWObjManifest **pmanifest, bool follow_olh, optional_yield y, bool assume_noent)
{
return store->get_obj_state(dpp, &ctx, bucket_info, obj, pstate, pmanifest, follow_olh, y, assume_noent);
}
void RGWRados::Object::invalidate_state()
{
ctx.invalidate(obj);
}
int RGWRados::Object::prepare_atomic_modification(const DoutPrefixProvider *dpp,
ObjectWriteOperation& op, bool reset_obj, const string *ptag,
const char *if_match, const char *if_nomatch, bool removal_op,
bool modify_tail, optional_yield y)
{
int r = get_state(dpp, &state, &manifest, false, y);
if (r < 0)
return r;
bool need_guard = ((manifest) || (state->obj_tag.length() != 0) ||
if_match != NULL || if_nomatch != NULL) &&
(!state->fake_tag);
if (!state->is_atomic) {
ldpp_dout(dpp, 20) << "prepare_atomic_modification: state is not atomic. state=" << (void *)state << dendl;
if (reset_obj) {
op.create(false);
store->remove_rgw_head_obj(op); // we're not dropping reference here, actually removing object
}
return 0;
}
if (need_guard) {
/* first verify that the object wasn't replaced under */
if (if_nomatch == NULL || strcmp(if_nomatch, "*") != 0) {
op.cmpxattr(RGW_ATTR_ID_TAG, LIBRADOS_CMPXATTR_OP_EQ, state->obj_tag);
// FIXME: need to add FAIL_NOTEXIST_OK for racing deletion
}
if (if_match) {
if (strcmp(if_match, "*") == 0) {
// test the object is existing
if (!state->exists) {
return -ERR_PRECONDITION_FAILED;
}
} else {
bufferlist bl;
if (!state->get_attr(RGW_ATTR_ETAG, bl) ||
strncmp(if_match, bl.c_str(), bl.length()) != 0) {
return -ERR_PRECONDITION_FAILED;
}
}
}
if (if_nomatch) {
if (strcmp(if_nomatch, "*") == 0) {
// test the object is NOT existing
if (state->exists) {
return -ERR_PRECONDITION_FAILED;
}
} else {
bufferlist bl;
if (!state->get_attr(RGW_ATTR_ETAG, bl) ||
strncmp(if_nomatch, bl.c_str(), bl.length()) == 0) {
return -ERR_PRECONDITION_FAILED;
}
}
}
}
if (reset_obj) {
if (state->exists) {
op.create(false);
store->remove_rgw_head_obj(op);
} else {
op.create(true);
}
}
if (removal_op) {
/* the object is being removed, no need to update its tag */
return 0;
}
if (ptag) {
state->write_tag = *ptag;
} else {
append_rand_alpha(store->ctx(), state->write_tag, state->write_tag, 32);
}
bufferlist bl;
bl.append(state->write_tag.c_str(), state->write_tag.size() + 1);
ldpp_dout(dpp, 10) << "setting object write_tag=" << state->write_tag << dendl;
op.setxattr(RGW_ATTR_ID_TAG, bl);
if (modify_tail) {
op.setxattr(RGW_ATTR_TAIL_TAG, bl);
}
return 0;
}
/**
* Set an attr on an object.
* bucket: name of the bucket holding the object
* obj: name of the object to set the attr on
* name: the attr to set
* bl: the contents of the attr
* Returns: 0 on success, -ERR# otherwise.
*/
int RGWRados::set_attr(const DoutPrefixProvider *dpp, RGWObjectCtx* rctx, RGWBucketInfo& bucket_info, const rgw_obj& obj, const char *name, bufferlist& bl, optional_yield y)
{
map<string, bufferlist> attrs;
attrs[name] = bl;
return set_attrs(dpp, rctx, bucket_info, obj, attrs, NULL, y);
}
int RGWRados::set_attrs(const DoutPrefixProvider *dpp, RGWObjectCtx* rctx, RGWBucketInfo& bucket_info, const rgw_obj& src_obj,
map<string, bufferlist>& attrs,
map<string, bufferlist>* rmattrs,
optional_yield y)
{
rgw_obj obj = src_obj;
if (obj.key.instance == "null") {
obj.key.instance.clear();
}
rgw_rados_ref ref;
int r = get_obj_head_ref(dpp, bucket_info, obj, &ref);
if (r < 0) {
return r;
}
ObjectWriteOperation op;
RGWObjState *state = NULL;
RGWObjManifest *manifest = nullptr;
r = append_atomic_test(dpp, rctx, bucket_info, obj, op, &state, &manifest, y);
if (r < 0)
return r;
// ensure null version object exist
if (src_obj.key.instance == "null" && !manifest) {
return -ENOENT;
}
map<string, bufferlist>::iterator iter;
if (rmattrs) {
for (iter = rmattrs->begin(); iter != rmattrs->end(); ++iter) {
const string& name = iter->first;
op.rmxattr(name.c_str());
}
}
const rgw_bucket& bucket = obj.bucket;
for (iter = attrs.begin(); iter != attrs.end(); ++iter) {
const string& name = iter->first;
bufferlist& bl = iter->second;
if (!bl.length())
continue;
op.setxattr(name.c_str(), bl);
if (name.compare(RGW_ATTR_DELETE_AT) == 0) {
real_time ts;
try {
decode(ts, bl);
rgw_obj_index_key obj_key;
obj.key.get_index_key(&obj_key);
obj_expirer->hint_add(dpp, ts, bucket.tenant, bucket.name, bucket.bucket_id, obj_key);
} catch (buffer::error& err) {
ldpp_dout(dpp, 0) << "ERROR: failed to decode " RGW_ATTR_DELETE_AT << " attr" << dendl;
}
}
}
if (!op.size())
return 0;
bufferlist bl;
RGWRados::Bucket bop(this, bucket_info);
RGWRados::Bucket::UpdateIndex index_op(&bop, obj);
if (state) {
string tag;
append_rand_alpha(cct, tag, tag, 32);
state->write_tag = tag;
r = index_op.prepare(dpp, CLS_RGW_OP_ADD, &state->write_tag, y);
if (r < 0)
return r;
bl.append(tag.c_str(), tag.size() + 1);
op.setxattr(RGW_ATTR_ID_TAG, bl);
}
/* As per https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingMetadata.html,
* the only way for users to modify object metadata is to make a copy of the object and
* set the metadata.
* Hence do not update mtime for any other attr changes */
real_time mtime = state->mtime;
struct timespec mtime_ts = real_clock::to_timespec(mtime);
op.mtime2(&mtime_ts);
auto& ioctx = ref.pool.ioctx();
r = rgw_rados_operate(dpp, ioctx, ref.obj.oid, &op, y);
if (state) {
if (r >= 0) {
bufferlist acl_bl;
if (iter = attrs.find(RGW_ATTR_ACL); iter != attrs.end()) {
acl_bl = iter->second;
}
std::string etag;
if (iter = attrs.find(RGW_ATTR_ETAG); iter != attrs.end()) {
etag = rgw_bl_str(iter->second);
}
std::string content_type;
if (iter = attrs.find(RGW_ATTR_CONTENT_TYPE); iter != attrs.end()) {
content_type = rgw_bl_str(iter->second);
}
string storage_class;
if (iter = attrs.find(RGW_ATTR_STORAGE_CLASS); iter != attrs.end()) {
storage_class = rgw_bl_str(iter->second);
}
uint64_t epoch = ioctx.get_last_version();
int64_t poolid = ioctx.get_id();
r = index_op.complete(dpp, poolid, epoch, state->size, state->accounted_size,
mtime, etag, content_type, storage_class, &acl_bl,
RGWObjCategory::Main, nullptr, y);
} else {
int ret = index_op.cancel(dpp, nullptr, y);
if (ret < 0) {
ldpp_dout(dpp, 0) << "ERROR: complete_update_index_cancel() returned ret=" << ret << dendl;
}
}
}
if (r < 0)
return r;
if (state) {
state->obj_tag.swap(bl);
if (rmattrs) {
for (iter = rmattrs->begin(); iter != rmattrs->end(); ++iter) {
state->attrset.erase(iter->first);
}
}
for (iter = attrs.begin(); iter != attrs.end(); ++iter) {
state->attrset[iter->first] = iter->second;
}
auto iter = state->attrset.find(RGW_ATTR_ID_TAG);
if (iter != state->attrset.end()) {
iter->second = state->obj_tag;
}
}
return 0;
}
int RGWRados::Object::Read::prepare(optional_yield y, const DoutPrefixProvider *dpp)
{
RGWRados *store = source->get_store();
CephContext *cct = store->ctx();
bufferlist etag;
map<string, bufferlist>::iterator iter;
RGWObjState *astate;
RGWObjManifest *manifest = nullptr;
int r = source->get_state(dpp, &astate, &manifest, true, y);
if (r < 0)
return r;
if (!astate->exists) {
return -ENOENT;
}
const RGWBucketInfo& bucket_info = source->get_bucket_info();
state.obj = astate->obj;
store->obj_to_raw(bucket_info.placement_rule, state.obj, &state.head_obj);
state.cur_pool = state.head_obj.pool;
state.cur_ioctx = &state.io_ctxs[state.cur_pool];
r = store->get_obj_head_ioctx(dpp, bucket_info, state.obj, state.cur_ioctx);
if (r < 0) {
return r;
}
if (params.target_obj) {
*params.target_obj = state.obj;
}
if (params.attrs) {
*params.attrs = astate->attrset;
if (cct->_conf->subsys.should_gather<ceph_subsys_rgw, 20>()) {
for (iter = params.attrs->begin(); iter != params.attrs->end(); ++iter) {
ldpp_dout(dpp, 20) << "Read xattr rgw_rados: " << iter->first << dendl;
}
}
}
/* Convert all times go GMT to make them compatible */
if (conds.mod_ptr || conds.unmod_ptr) {
obj_time_weight src_weight;
src_weight.init(astate);
src_weight.high_precision = conds.high_precision_time;
obj_time_weight dest_weight;
dest_weight.high_precision = conds.high_precision_time;
if (conds.mod_ptr && !conds.if_nomatch) {
dest_weight.init(*conds.mod_ptr, conds.mod_zone_id, conds.mod_pg_ver);
ldpp_dout(dpp, 10) << "If-Modified-Since: " << dest_weight << " Last-Modified: " << src_weight << dendl;
if (!(dest_weight < src_weight)) {
return -ERR_NOT_MODIFIED;
}
}
if (conds.unmod_ptr && !conds.if_match) {
dest_weight.init(*conds.unmod_ptr, conds.mod_zone_id, conds.mod_pg_ver);
ldpp_dout(dpp, 10) << "If-UnModified-Since: " << dest_weight << " Last-Modified: " << src_weight << dendl;
if (dest_weight < src_weight) {
return -ERR_PRECONDITION_FAILED;
}
}
}
if (conds.if_match || conds.if_nomatch) {
r = get_attr(dpp, RGW_ATTR_ETAG, etag, y);
if (r < 0)
return r;
if (conds.if_match) {
string if_match_str = rgw_string_unquote(conds.if_match);
ldpp_dout(dpp, 10) << "ETag: " << string(etag.c_str(), etag.length()) << " " << " If-Match: " << if_match_str << dendl;
if (if_match_str.compare(0, etag.length(), etag.c_str(), etag.length()) != 0) {
return -ERR_PRECONDITION_FAILED;
}
}
if (conds.if_nomatch) {
string if_nomatch_str = rgw_string_unquote(conds.if_nomatch);
ldpp_dout(dpp, 10) << "ETag: " << string(etag.c_str(), etag.length()) << " " << " If-NoMatch: " << if_nomatch_str << dendl;
if (if_nomatch_str.compare(0, etag.length(), etag.c_str(), etag.length()) == 0) {
return -ERR_NOT_MODIFIED;
}
}
}
if (params.obj_size)
*params.obj_size = astate->size;
if (params.lastmod)
*params.lastmod = astate->mtime;
return 0;
}
int RGWRados::Object::Read::range_to_ofs(uint64_t obj_size, int64_t &ofs, int64_t &end)
{
if (ofs < 0) {
ofs += obj_size;
if (ofs < 0)
ofs = 0;
end = obj_size - 1;
} else if (end < 0) {
end = obj_size - 1;
}
if (obj_size > 0) {
if (ofs >= (off_t)obj_size) {
return -ERANGE;
}
if (end >= (off_t)obj_size) {
end = obj_size - 1;
}
}
return 0;
}
int RGWRados::Bucket::UpdateIndex::guard_reshard(const DoutPrefixProvider *dpp, const rgw_obj& obj_instance, BucketShard **pbs, std::function<int(BucketShard *)> call, optional_yield y)
{
RGWRados *store = target->get_store();
BucketShard *bs = nullptr;
int r;
#define NUM_RESHARD_RETRIES 10
for (int i = 0; i < NUM_RESHARD_RETRIES; ++i) {
int ret = get_bucket_shard(&bs, dpp, y);
if (ret < 0) {
ldpp_dout(dpp, 0) << "ERROR: failed to get BucketShard object. obj=" <<
obj_instance.key << ". ret=" << ret << dendl;
return ret;
}
r = call(bs);
if (r != -ERR_BUSY_RESHARDING) {
break;
}
ldpp_dout(dpp, 10) <<
"NOTICE: resharding operation on bucket index detected, blocking. obj=" <<
obj_instance.key << dendl;
r = store->block_while_resharding(bs, obj_instance, target->bucket_info, y, dpp);
if (r == -ERR_BUSY_RESHARDING) {
ldpp_dout(dpp, 10) << __func__ <<
" NOTICE: block_while_resharding() still busy. obj=" <<
obj_instance.key << dendl;
continue;
} else if (r < 0) {
ldpp_dout(dpp, 0) << __func__ <<
" ERROR: block_while_resharding() failed. obj=" <<
obj_instance.key << ". ret=" << cpp_strerror(-r) << dendl;
return r;
}
ldpp_dout(dpp, 20) << "reshard completion identified. obj=" << obj_instance.key << dendl;
i = 0; /* resharding is finished, make sure we can retry */
invalidate_bs();
} // for loop
if (r < 0) {
ldpp_dout(dpp, 0) << "ERROR: bucket shard callback failed. obj=" <<
obj_instance.key << ". ret=" << cpp_strerror(-r) << dendl;
return r;
}
if (pbs) {
*pbs = bs;
}
return 0;
}
int RGWRados::Bucket::UpdateIndex::prepare(const DoutPrefixProvider *dpp, RGWModifyOp op, const string *write_tag, optional_yield y)
{
if (blind) {
return 0;
}
RGWRados *store = target->get_store();
if (write_tag && write_tag->length()) {
optag = string(write_tag->c_str(), write_tag->length());
} else {
if (optag.empty()) {
append_rand_alpha(store->ctx(), optag, optag, 32);
}
}
int r = guard_reshard(dpp, obj, nullptr, [&](BucketShard *bs) -> int {
return store->cls_obj_prepare_op(dpp, *bs, op, optag, obj, bilog_flags, y, zones_trace);
}, y);
if (r < 0) {
return r;
}
prepared = true;
return 0;
}
int RGWRados::Bucket::UpdateIndex::complete(const DoutPrefixProvider *dpp, int64_t poolid, uint64_t epoch,
uint64_t size, uint64_t accounted_size,
ceph::real_time& ut, const string& etag,
const string& content_type, const string& storage_class,
bufferlist *acl_bl,
RGWObjCategory category,
list<rgw_obj_index_key> *remove_objs,
optional_yield y,
const string *user_data,
bool appendable)
{
if (blind) {
return 0;
}
RGWRados *store = target->get_store();
BucketShard *bs = nullptr;
int ret = get_bucket_shard(&bs, dpp, y);
if (ret < 0) {
ldpp_dout(dpp, 5) << "failed to get BucketShard object: ret=" << ret << dendl;
return ret;
}
rgw_bucket_dir_entry ent;
obj.key.get_index_key(&ent.key);
ent.meta.size = size;
ent.meta.accounted_size = accounted_size;
ent.meta.mtime = ut;
ent.meta.etag = etag;
ent.meta.storage_class = storage_class;
if (user_data)
ent.meta.user_data = *user_data;
ACLOwner owner;
if (acl_bl && acl_bl->length()) {
int ret = store->decode_policy(dpp, *acl_bl, &owner);
if (ret < 0) {
ldpp_dout(dpp, 0) << "WARNING: could not decode policy ret=" << ret << dendl;
}
}
ent.meta.owner = owner.get_id().to_str();
ent.meta.owner_display_name = owner.get_display_name();
ent.meta.content_type = content_type;
ent.meta.appendable = appendable;
ret = store->cls_obj_complete_add(*bs, obj, optag, poolid, epoch, ent, category, remove_objs, bilog_flags, zones_trace);
add_datalog_entry(dpp, store->svc.datalog_rados,
target->bucket_info, bs->shard_id, y);
return ret;
}
int RGWRados::Bucket::UpdateIndex::complete_del(const DoutPrefixProvider *dpp,
int64_t poolid, uint64_t epoch,
real_time& removed_mtime,
list<rgw_obj_index_key> *remove_objs,
optional_yield y)
{
if (blind) {
return 0;
}
RGWRados *store = target->get_store();
BucketShard *bs = nullptr;
int ret = get_bucket_shard(&bs, dpp, y);
if (ret < 0) {
ldpp_dout(dpp, 5) << "failed to get BucketShard object: ret=" << ret << dendl;
return ret;
}
ret = store->cls_obj_complete_del(*bs, optag, poolid, epoch, obj, removed_mtime, remove_objs, bilog_flags, zones_trace);
add_datalog_entry(dpp, store->svc.datalog_rados,
target->bucket_info, bs->shard_id, y);
return ret;
}
int RGWRados::Bucket::UpdateIndex::cancel(const DoutPrefixProvider *dpp,
list<rgw_obj_index_key> *remove_objs,
optional_yield y)
{
if (blind) {
return 0;
}
RGWRados *store = target->get_store();
BucketShard *bs;
int ret = guard_reshard(dpp, obj, &bs, [&](BucketShard *bs) -> int {
return store->cls_obj_complete_cancel(*bs, optag, obj, remove_objs, bilog_flags, zones_trace);
}, y);
/*
* need to update data log anyhow, so that whoever follows needs to update its internal markers
* for following the specific bucket shard log. Otherwise they end up staying behind, and users
* have no way to tell that they're all caught up
*/
add_datalog_entry(dpp, store->svc.datalog_rados,
target->bucket_info, bs->shard_id, y);
return ret;
}
/*
* Read up through index `end` inclusive. Number of bytes read is up
* to `end - ofs + 1`.
*/
int RGWRados::Object::Read::read(int64_t ofs, int64_t end,
bufferlist& bl, optional_yield y,
const DoutPrefixProvider *dpp)
{
RGWRados *store = source->get_store();
rgw_raw_obj read_obj;
uint64_t read_ofs = ofs;
uint64_t len, read_len;
bool reading_from_head = true;
ObjectReadOperation op;
bool merge_bl = false;
bufferlist *pbl = &bl;
bufferlist read_bl;
uint64_t max_chunk_size;
RGWObjState *astate;
RGWObjManifest *manifest = nullptr;
int r = source->get_state(dpp, &astate, &manifest, true, y);
if (r < 0)
return r;
if (astate->size == 0) {
end = 0;
} else if (end >= (int64_t)astate->size) {
end = astate->size - 1;
}
if (end < 0)
len = 0;
else
len = end - ofs + 1;
if (manifest && manifest->has_tail()) {
/* now get the relevant object part */
RGWObjManifest::obj_iterator iter = manifest->obj_find(dpp, ofs);
uint64_t stripe_ofs = iter.get_stripe_ofs();
read_obj = iter.get_location().get_raw_obj(store);
len = std::min(len, iter.get_stripe_size() - (ofs - stripe_ofs));
read_ofs = iter.location_ofs() + (ofs - stripe_ofs);
reading_from_head = (read_obj == state.head_obj);
} else {
read_obj = state.head_obj;
}
r = store->get_max_chunk_size(read_obj.pool, &max_chunk_size, dpp);
if (r < 0) {
ldpp_dout(dpp, 0) << "ERROR: failed to get max_chunk_size() for pool " << read_obj.pool << dendl;
return r;
}
if (len > max_chunk_size)
len = max_chunk_size;
read_len = len;
if (reading_from_head) {
/* only when reading from the head object do we need to do the atomic test */
r = store->append_atomic_test(dpp, &source->get_ctx(), source->get_bucket_info(), state.obj, op, &astate, &manifest, y);
if (r < 0)
return r;
if (astate && astate->prefetch_data) {
if (!ofs && astate->data.length() >= len) {
bl = astate->data;
return bl.length();
}
if (ofs < astate->data.length()) {
unsigned copy_len = std::min((uint64_t)astate->data.length() - ofs, len);
astate->data.begin(ofs).copy(copy_len, bl);
read_len -= copy_len;
read_ofs += copy_len;
if (!read_len)
return bl.length();
merge_bl = true;
pbl = &read_bl;
}
}
}
ldpp_dout(dpp, 20) << "rados->read obj-ofs=" << ofs << " read_ofs=" << read_ofs << " read_len=" << read_len << dendl;
op.read(read_ofs, read_len, pbl, NULL);
if (state.cur_pool != read_obj.pool) {
auto iter = state.io_ctxs.find(read_obj.pool);
if (iter == state.io_ctxs.end()) {
state.cur_ioctx = &state.io_ctxs[read_obj.pool];
r = store->open_pool_ctx(dpp, read_obj.pool, *state.cur_ioctx, false, true);
if (r < 0) {
ldpp_dout(dpp, 20) << "ERROR: failed to open pool context for pool=" << read_obj.pool << " r=" << r << dendl;
return r;
}
} else {
state.cur_ioctx = &iter->second;
}
state.cur_pool = read_obj.pool;
}
state.cur_ioctx->locator_set_key(read_obj.loc);
r = state.cur_ioctx->operate(read_obj.oid, &op, NULL);
ldpp_dout(dpp, 20) << "rados->read r=" << r << " bl.length=" << bl.length() << dendl;
if (r < 0) {
return r;
}
if (merge_bl) {
bl.append(read_bl);
}
return bl.length();
}
int get_obj_data::flush(rgw::AioResultList&& results) {
int r = rgw::check_for_errors(results);
if (r < 0) {
return r;
}
std::list<bufferlist> bl_list;
auto cmp = [](const auto& lhs, const auto& rhs) { return lhs.id < rhs.id; };
results.sort(cmp); // merge() requires results to be sorted first
completed.merge(results, cmp); // merge results in sorted order
while (!completed.empty() && completed.front().id == offset) {
auto bl = std::move(completed.front().data);
bl_list.push_back(bl);
offset += bl.length();
int r = client_cb->handle_data(bl, 0, bl.length());
if (r < 0) {
return r;
}
if (rgwrados->get_use_datacache()) {
const std::lock_guard l(d3n_get_data.d3n_lock);
auto oid = completed.front().obj.oid;
if (bl.length() <= g_conf()->rgw_get_obj_max_req_size && !d3n_bypass_cache_write) {
lsubdout(g_ceph_context, rgw_datacache, 10) << "D3nDataCache: " << __func__ << "(): bl.length <= rgw_get_obj_max_req_size (default 4MB) - write to datacache, bl.length=" << bl.length() << dendl;
rgwrados->d3n_data_cache->put(bl, bl.length(), oid);
} else {
lsubdout(g_ceph_context, rgw_datacache, 10) << "D3nDataCache: " << __func__ << "(): not writing to datacache - bl.length > rgw_get_obj_max_req_size (default 4MB), bl.length=" << bl.length() << " or d3n_bypass_cache_write=" << d3n_bypass_cache_write << dendl;
}
}
completed.pop_front_and_dispose(std::default_delete<rgw::AioResultEntry>{});
}
return 0;
}
static int _get_obj_iterate_cb(const DoutPrefixProvider *dpp,
const rgw_raw_obj& read_obj, off_t obj_ofs,
off_t read_ofs, off_t len, bool is_head_obj,
RGWObjState *astate, void *arg)
{
struct get_obj_data* d = static_cast<struct get_obj_data*>(arg);
return d->rgwrados->get_obj_iterate_cb(dpp, read_obj, obj_ofs, read_ofs, len,
is_head_obj, astate, arg);
}
int RGWRados::get_obj_iterate_cb(const DoutPrefixProvider *dpp,
const rgw_raw_obj& read_obj, off_t obj_ofs,
off_t read_ofs, off_t len, bool is_head_obj,
RGWObjState *astate, void *arg)
{
ObjectReadOperation op;
struct get_obj_data* d = static_cast<struct get_obj_data*>(arg);
string oid, key;
if (is_head_obj) {
/* only when reading from the head object do we need to do the atomic test */
int r = append_atomic_test(dpp, astate, op);
if (r < 0)
return r;
if (astate &&
obj_ofs < astate->data.length()) {
unsigned chunk_len = std::min((uint64_t)astate->data.length() - obj_ofs, (uint64_t)len);
r = d->client_cb->handle_data(astate->data, obj_ofs, chunk_len);
if (r < 0)
return r;
len -= chunk_len;
d->offset += chunk_len;
read_ofs += chunk_len;
obj_ofs += chunk_len;
if (!len)
return 0;
}
}
auto obj = d->rgwrados->svc.rados->obj(read_obj);
int r = obj.open(dpp);
if (r < 0) {
ldpp_dout(dpp, 4) << "failed to open rados context for " << read_obj << dendl;
return r;
}
ldpp_dout(dpp, 20) << "rados->get_obj_iterate_cb oid=" << read_obj.oid << " obj-ofs=" << obj_ofs << " read_ofs=" << read_ofs << " len=" << len << dendl;
op.read(read_ofs, len, nullptr, nullptr);
const uint64_t cost = len;
const uint64_t id = obj_ofs; // use logical object offset for sorting replies
auto& ref = obj.get_ref();
auto completed = d->aio->get(ref.obj, rgw::Aio::librados_op(ref.pool.ioctx(), std::move(op), d->yield), cost, id);
return d->flush(std::move(completed));
}
int RGWRados::Object::Read::iterate(const DoutPrefixProvider *dpp, int64_t ofs, int64_t end, RGWGetDataCB *cb,
optional_yield y)
{
RGWRados *store = source->get_store();
CephContext *cct = store->ctx();
const uint64_t chunk_size = cct->_conf->rgw_get_obj_max_req_size;
const uint64_t window_size = cct->_conf->rgw_get_obj_window_size;
auto aio = rgw::make_throttle(window_size, y);
get_obj_data data(store, cb, &*aio, ofs, y);
int r = store->iterate_obj(dpp, source->get_ctx(), source->get_bucket_info(), state.obj,
ofs, end, chunk_size, _get_obj_iterate_cb, &data, y);
if (r < 0) {
ldpp_dout(dpp, 0) << "iterate_obj() failed with " << r << dendl;
data.cancel(); // drain completions without writing back to client
return r;
}
return data.drain();
}
int RGWRados::iterate_obj(const DoutPrefixProvider *dpp, RGWObjectCtx& obj_ctx,
RGWBucketInfo& bucket_info, const rgw_obj& obj,
off_t ofs, off_t end, uint64_t max_chunk_size,
iterate_obj_cb cb, void *arg, optional_yield y)
{
rgw_raw_obj head_obj;
rgw_raw_obj read_obj;
uint64_t read_ofs = ofs;
uint64_t len;
bool reading_from_head = true;
RGWObjState *astate = NULL;
RGWObjManifest *manifest = nullptr;
obj_to_raw(bucket_info.placement_rule, obj, &head_obj);
int r = get_obj_state(dpp, &obj_ctx, bucket_info, obj, &astate, &manifest, false, y);
if (r < 0) {
return r;
}
if (end < 0)
len = 0;
else
len = end - ofs + 1;
if (manifest) {
/* now get the relevant object stripe */
RGWObjManifest::obj_iterator iter = manifest->obj_find(dpp, ofs);
RGWObjManifest::obj_iterator obj_end = manifest->obj_end(dpp);
for (; iter != obj_end && ofs <= end; ++iter) {
off_t stripe_ofs = iter.get_stripe_ofs();
off_t next_stripe_ofs = stripe_ofs + iter.get_stripe_size();
while (ofs < next_stripe_ofs && ofs <= end) {
read_obj = iter.get_location().get_raw_obj(this);
uint64_t read_len = std::min(len, iter.get_stripe_size() - (ofs - stripe_ofs));
read_ofs = iter.location_ofs() + (ofs - stripe_ofs);
if (read_len > max_chunk_size) {
read_len = max_chunk_size;
}
reading_from_head = (read_obj == head_obj);
r = cb(dpp, read_obj, ofs, read_ofs, read_len, reading_from_head, astate, arg);
if (r < 0) {
return r;
}
len -= read_len;
ofs += read_len;
}
}
} else {
while (ofs <= end) {
read_obj = head_obj;
uint64_t read_len = std::min(len, max_chunk_size);
r = cb(dpp, read_obj, ofs, ofs, read_len, reading_from_head, astate, arg);
if (r < 0) {
return r;
}
len -= read_len;
ofs += read_len;
}
}
return 0;
}
int RGWRados::obj_operate(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, const rgw_obj& obj, ObjectWriteOperation *op, optional_yield y)
{
rgw_rados_ref ref;
int r = get_obj_head_ref(dpp, bucket_info, obj, &ref);
if (r < 0) {
return r;
}
return rgw_rados_operate(dpp, ref.pool.ioctx(), ref.obj.oid, op, y);
}
int RGWRados::obj_operate(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, const rgw_obj& obj, ObjectReadOperation *op, optional_yield y)
{
rgw_rados_ref ref;
int r = get_obj_head_ref(dpp, bucket_info, obj, &ref);
if (r < 0) {
return r;
}
bufferlist outbl;
return rgw_rados_operate(dpp, ref.pool.ioctx(), ref.obj.oid, op, &outbl, y);
}
int RGWRados::olh_init_modification_impl(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, RGWObjState& state, const rgw_obj& olh_obj, string *op_tag, optional_yield y)
{
ObjectWriteOperation op;
ceph_assert(olh_obj.key.instance.empty());
bool has_tag = (state.exists && has_olh_tag(state.attrset));
if (!state.exists) {
op.create(true);
} else {
op.assert_exists();
struct timespec mtime_ts = real_clock::to_timespec(state.mtime);
op.mtime2(&mtime_ts);
}
/*
* 3 possible cases: olh object doesn't exist, it exists as an olh, it exists as a regular object.
* If it exists as a regular object we'll need to transform it into an olh. We'll do it in two
* steps, first change its tag and set the olh pending attrs. Once write is done we'll need to
* truncate it, remove extra attrs, and send it to the garbage collection. The bucket index olh
* log will reflect that.
*
* Need to generate separate olh and obj tags, as olh can be colocated with object data. obj_tag
* is used for object data instance, olh_tag for olh instance.
*/
if (has_tag) {
/* guard against racing writes */
bucket_index_guard_olh_op(dpp, state, op);
}
if (!has_tag) {
/* obj tag */
string obj_tag = gen_rand_alphanumeric_lower(cct, 32);
bufferlist bl;
bl.append(obj_tag.c_str(), obj_tag.size());
op.setxattr(RGW_ATTR_ID_TAG, bl);
state.attrset[RGW_ATTR_ID_TAG] = bl;
state.obj_tag = bl;
/* olh tag */
string olh_tag = gen_rand_alphanumeric_lower(cct, 32);
bufferlist olh_bl;
olh_bl.append(olh_tag.c_str(), olh_tag.size());
op.setxattr(RGW_ATTR_OLH_ID_TAG, olh_bl);
state.attrset[RGW_ATTR_OLH_ID_TAG] = olh_bl;
state.olh_tag = olh_bl;
state.is_olh = true;
bufferlist verbl;
op.setxattr(RGW_ATTR_OLH_VER, verbl);
}
bufferlist bl;
RGWOLHPendingInfo pending_info;
pending_info.time = real_clock::now();
encode(pending_info, bl);
#define OLH_PENDING_TAG_LEN 32
/* tag will start with current time epoch, this so that entries are sorted by time */
char buf[32];
utime_t ut(pending_info.time);
snprintf(buf, sizeof(buf), "%016llx", (unsigned long long)ut.sec());
*op_tag = buf;
string s = gen_rand_alphanumeric_lower(cct, OLH_PENDING_TAG_LEN - op_tag->size());
op_tag->append(s);
string attr_name = RGW_ATTR_OLH_PENDING_PREFIX;
attr_name.append(*op_tag);
op.setxattr(attr_name.c_str(), bl);
int ret = obj_operate(dpp, bucket_info, olh_obj, &op, y);
if (ret < 0) {
return ret;
}
state.exists = true;
state.attrset[attr_name] = bl;
return 0;
}
int RGWRados::olh_init_modification(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, RGWObjState& state, const rgw_obj& obj, string *op_tag, optional_yield y)
{
int ret;
ret = olh_init_modification_impl(dpp, bucket_info, state, obj, op_tag, y);
if (ret == -EEXIST) {
ret = -ECANCELED;
}
return ret;
}
int RGWRados::guard_reshard(const DoutPrefixProvider *dpp,
BucketShard *bs,
const rgw_obj& obj_instance,
RGWBucketInfo& bucket_info,
std::function<int(BucketShard *)> call, optional_yield y)
{
rgw_obj obj;
const rgw_obj *pobj = &obj_instance;
int r;
for (int i = 0; i < NUM_RESHARD_RETRIES; ++i) {
r = bs->init(pobj->bucket, *pobj, nullptr /* no RGWBucketInfo */, dpp, y);
if (r < 0) {
ldpp_dout(dpp, 5) << "bs.init() returned ret=" << r << dendl;
return r;
}
r = call(bs);
if (r != -ERR_BUSY_RESHARDING) {
break;
}
ldpp_dout(dpp, 10) <<
"NOTICE: resharding operation on bucket index detected, blocking. obj=" <<
obj_instance.key << dendl;
r = block_while_resharding(bs, obj_instance, bucket_info, y, dpp);
if (r == -ERR_BUSY_RESHARDING) {
ldpp_dout(dpp, 10) << __func__ <<
" NOTICE: block_while_resharding() still busy. obj=" <<
obj_instance.key << dendl;
continue;
} else if (r < 0) {
ldpp_dout(dpp, 0) << __func__ <<
" ERROR: block_while_resharding() failed. obj=" <<
obj_instance.key << ". ret=" << cpp_strerror(-r) << dendl;
return r;
}
ldpp_dout(dpp, 20) << "reshard completion identified" << dendl;
i = 0; /* resharding is finished, make sure we can retry */
} // for loop
if (r < 0) {
ldpp_dout(dpp, 0) << "ERROR: bucket shard callback failed. obj=" <<
obj_instance.key << ". ret=" << cpp_strerror(-r) << dendl;
return r;
}
return 0;
}
int RGWRados::block_while_resharding(RGWRados::BucketShard *bs,
const rgw_obj& obj_instance,
RGWBucketInfo& bucket_info,
optional_yield y,
const DoutPrefixProvider *dpp)
{
int ret = 0;
cls_rgw_bucket_instance_entry entry;
// gets loaded by fetch_new_bucket_info; can be used by
// clear_resharding
std::map<std::string, bufferlist> bucket_attrs;
// since we want to run this recovery code from two distinct places,
// let's just put it in a lambda so we can easily re-use; if the
// lambda successfully fetches a new bucket id, it sets
// new_bucket_id and returns 0, otherwise it returns a negative
// error code
auto fetch_new_bucket_info =
[this, bs, &obj_instance, &bucket_info, &bucket_attrs, &y, dpp](const std::string& log_tag) -> int {
int ret = get_bucket_info(&svc, bs->bucket.tenant, bs->bucket.name,
bucket_info, nullptr, y, dpp, &bucket_attrs);
if (ret < 0) {
ldpp_dout(dpp, 0) << __func__ <<
" ERROR: failed to refresh bucket info after reshard at " <<
log_tag << ": " << cpp_strerror(-ret) << dendl;
return ret;
}
ret = bs->init(dpp, bucket_info, obj_instance, y);
if (ret < 0) {
ldpp_dout(dpp, 0) << __func__ <<
" ERROR: failed to refresh bucket shard generation after reshard at " <<
log_tag << ": " << cpp_strerror(-ret) << dendl;
return ret;
}
const auto gen = bucket_info.layout.logs.empty() ? -1 : bucket_info.layout.logs.back().gen;
ldpp_dout(dpp, 20) << __func__ <<
" INFO: refreshed bucket info after reshard at " <<
log_tag << ". new shard_id=" << bs->shard_id << ". gen=" << gen << dendl;
return 0;
}; // lambda fetch_new_bucket_info
constexpr int num_retries = 10;
for (int i = 1; i <= num_retries; i++) { // nb: 1-based for loop
auto& ref = bs->bucket_obj.get_ref();
ret = cls_rgw_get_bucket_resharding(ref.pool.ioctx(), ref.obj.oid, &entry);
if (ret == -ENOENT) {
ret = fetch_new_bucket_info("get_bucket_resharding_failed");
if (ret < 0) {
ldpp_dout(dpp, 0) << "ERROR: " << __func__ <<
" failed to refresh bucket info after reshard when get bucket "
"resharding failed, error: " << cpp_strerror(-ret) << dendl;
return ret;
}
} else if (ret < 0) {
ldpp_dout(dpp, 0) << __func__ <<
" ERROR: failed to get bucket resharding : " << cpp_strerror(-ret) <<
dendl;
return ret;
}
if (!entry.resharding_in_progress()) {
ret = fetch_new_bucket_info("get_bucket_resharding_succeeded");
if (ret < 0) {
ldpp_dout(dpp, 0) << "ERROR: " << __func__ <<
" failed to refresh bucket info after reshard when get bucket "
"resharding succeeded, error: " << cpp_strerror(-ret) << dendl;
}
return ret;
}
ldpp_dout(dpp, 20) << __func__ << " NOTICE: reshard still in progress; " <<
(i < num_retries ? "retrying" : "too many retries") << dendl;
if (i == num_retries) {
break;
}
// If bucket is erroneously marked as resharding (e.g., crash or
// other error) then fix it. If we can take the bucket reshard
// lock then it means no other resharding should be taking place,
// and we're free to clear the flags.
{
// since we expect to do this rarely, we'll do our work in a
// block and erase our work after each try
RGWObjectCtx obj_ctx(this->driver);
const rgw_bucket& b = bs->bucket;
std::string bucket_id = b.get_key();
RGWBucketReshardLock reshard_lock(this->driver, bucket_info, true);
ret = reshard_lock.lock(dpp);
if (ret == -ENOENT) {
continue;
} else if (ret < 0) {
ldpp_dout(dpp, 20) << __func__ <<
" ERROR: failed to take reshard lock for bucket " <<
bucket_id << "; expected if resharding underway" << dendl;
// the reshard may have finished, so refresh bucket_obj to avoid
// acquiring reshard lock conflicts
ret = fetch_new_bucket_info("trying_to_refresh_bucket_info");
if (ret < 0) {
ldpp_dout(dpp, 0) << __func__ <<
" ERROR: failed to refresh bucket_obj for bucket " <<
bs->bucket.name << dendl;
return ret;
}
} else {
ldpp_dout(dpp, 10) << __func__ <<
" INFO: was able to take reshard lock for bucket " <<
bucket_id << dendl;
// the reshard may have finished, so call clear_resharding()
// with its current bucket info; ALSO this will load
// bucket_attrs for call to clear_resharding below
ret = fetch_new_bucket_info("trying_to_clear_resharding");
if (ret < 0) {
reshard_lock.unlock();
ldpp_dout(dpp, 0) << __func__ <<
" ERROR: failed to update bucket info before clear resharding for bucket " <<
bucket_id << dendl;
continue; // try again
}
ret = RGWBucketReshard::clear_resharding(this->driver, bucket_info, bucket_attrs, dpp, y);
reshard_lock.unlock();
if (ret == -ENOENT) {
ldpp_dout(dpp, 5) << __func__ <<
" INFO: no need to reset reshard flags; old shards apparently"
" removed after successful resharding of bucket " <<
bucket_id << dendl;
continue; // immediately test again
} else if (ret < 0) {
ldpp_dout(dpp, 0) << __func__ <<
" ERROR: failed to clear resharding flags for bucket " <<
bucket_id << ", " << cpp_strerror(-ret) << dendl;
// wait and then test again
} else {
ldpp_dout(dpp, 5) << __func__ <<
" INFO: apparently successfully cleared resharding flags for "
"bucket " << bucket_id << dendl;
continue; // if we apparently succeed immediately test again
} // if clear resharding succeeded
} // if taking of lock succeeded
} // block to encapsulate recovery from incomplete reshard
ret = reshard_wait->wait(y);
if (ret < 0) {
ldpp_dout(dpp, 0) << __func__ <<
" ERROR: bucket is still resharding, please retry" << dendl;
return ret;
}
} // for loop
ldpp_dout(dpp, 0) << __func__ <<
" ERROR: bucket is still resharding, please retry" << dendl;
return -ERR_BUSY_RESHARDING;
}
int RGWRados::bucket_index_link_olh(const DoutPrefixProvider *dpp, RGWBucketInfo& bucket_info,
RGWObjState& olh_state, const rgw_obj& obj_instance,
bool delete_marker, const string& op_tag,
struct rgw_bucket_dir_entry_meta *meta,
uint64_t olh_epoch,
real_time unmod_since, bool high_precision_time,
optional_yield y,
rgw_zone_set *_zones_trace, bool log_data_change)
{
rgw_rados_ref ref;
int r = get_obj_head_ref(dpp, bucket_info, obj_instance, &ref);
if (r < 0) {
return r;
}
rgw_zone_set zones_trace;
if (_zones_trace) {
zones_trace = *_zones_trace;
}
zones_trace.insert(svc.zone->get_zone().id, bucket_info.bucket.get_key());
BucketShard bs(this);
r = guard_reshard(dpp, &bs, obj_instance, bucket_info,
[&](BucketShard *bs) -> int {
cls_rgw_obj_key key(obj_instance.key.get_index_key_name(), obj_instance.key.instance);
auto& ref = bs->bucket_obj.get_ref();
librados::ObjectWriteOperation op;
op.assert_exists(); // bucket index shard must exist
cls_rgw_guard_bucket_resharding(op, -ERR_BUSY_RESHARDING);
cls_rgw_bucket_link_olh(op, key, olh_state.olh_tag,
delete_marker, op_tag, meta, olh_epoch,
unmod_since, high_precision_time,
svc.zone->need_to_log_data(), zones_trace);
return rgw_rados_operate(dpp, ref.pool.ioctx(), ref.obj.oid, &op, y);
}, y);
if (r < 0) {
ldpp_dout(dpp, 20) << "rgw_rados_operate() after cls_rgw_bucket_link_olh() returned r=" << r << dendl;
return r;
}
if (log_data_change) {
add_datalog_entry(dpp, svc.datalog_rados, bucket_info, bs.shard_id, y);
}
return 0;
}
void RGWRados::bucket_index_guard_olh_op(const DoutPrefixProvider *dpp, RGWObjState& olh_state, ObjectOperation& op)
{
ldpp_dout(dpp, 20) << __func__ << "(): olh_state.olh_tag=" << string(olh_state.olh_tag.c_str(), olh_state.olh_tag.length()) << dendl;
op.cmpxattr(RGW_ATTR_OLH_ID_TAG, CEPH_OSD_CMPXATTR_OP_EQ, olh_state.olh_tag);
}
int RGWRados::bucket_index_unlink_instance(const DoutPrefixProvider *dpp,
RGWBucketInfo& bucket_info,
const rgw_obj& obj_instance,
const string& op_tag, const string& olh_tag,
uint64_t olh_epoch, optional_yield y, rgw_zone_set *_zones_trace)
{
rgw_rados_ref ref;
int r = get_obj_head_ref(dpp, bucket_info, obj_instance, &ref);
if (r < 0) {
return r;
}
rgw_zone_set zones_trace;
if (_zones_trace) {
zones_trace = *_zones_trace;
}
zones_trace.insert(svc.zone->get_zone().id, bucket_info.bucket.get_key());
BucketShard bs(this);
cls_rgw_obj_key key(obj_instance.key.get_index_key_name(), obj_instance.key.instance);
r = guard_reshard(dpp, &bs, obj_instance, bucket_info,
[&](BucketShard *bs) -> int {
auto& ref = bs->bucket_obj.get_ref();
librados::ObjectWriteOperation op;
op.assert_exists(); // bucket index shard must exist
cls_rgw_guard_bucket_resharding(op, -ERR_BUSY_RESHARDING);
cls_rgw_bucket_unlink_instance(op, key, op_tag,
olh_tag, olh_epoch, svc.zone->need_to_log_data(), zones_trace);
return rgw_rados_operate(dpp, ref.pool.ioctx(), ref.obj.oid, &op, y);
}, y);
if (r < 0) {
ldpp_dout(dpp, 20) << "rgw_rados_operate() after cls_rgw_bucket_link_instance() returned r=" << r << dendl;
return r;
}
return 0;
}
int RGWRados::bucket_index_read_olh_log(const DoutPrefixProvider *dpp,
RGWBucketInfo& bucket_info, RGWObjState& state,
const rgw_obj& obj_instance, uint64_t ver_marker,
std::map<uint64_t, std::vector<rgw_bucket_olh_log_entry> > *log,
bool *is_truncated, optional_yield y)
{
rgw_rados_ref ref;
int r = get_obj_head_ref(dpp, bucket_info, obj_instance, &ref);
if (r < 0) {
return r;
}
BucketShard bs(this);
int ret =
bs.init(obj_instance.bucket, obj_instance, nullptr /* no RGWBucketInfo */, dpp, y);
if (ret < 0) {
ldpp_dout(dpp, 5) << "bs.init() returned ret=" << ret << dendl;
return ret;
}
string olh_tag(state.olh_tag.c_str(), state.olh_tag.length());
cls_rgw_obj_key key(obj_instance.key.get_index_key_name(), string());
auto& shard_ref = bs.bucket_obj.get_ref();
ObjectReadOperation op;
rgw_cls_read_olh_log_ret log_ret;
int op_ret = 0;
cls_rgw_get_olh_log(op, key, ver_marker, olh_tag, log_ret, op_ret);
bufferlist outbl;
r = rgw_rados_operate(dpp, shard_ref.pool.ioctx(), shard_ref.obj.oid, &op, &outbl, y);
if (r < 0) {
return r;
}
if (op_ret < 0) {
ldpp_dout(dpp, 20) << "cls_rgw_get_olh_log() returned op_ret=" << op_ret << dendl;
return op_ret;
}
*log = std::move(log_ret.log);
*is_truncated = log_ret.is_truncated;
return 0;
}
// a multisite sync bug resulted in the OLH head attributes being overwritten by
// the attributes from another zone, causing link_olh() to fail endlessly due to
// olh_tag mismatch. this attempts to detect this case and reconstruct the OLH
// attributes from the bucket index. see http://tracker.ceph.com/issues/37792
int RGWRados::repair_olh(const DoutPrefixProvider *dpp, RGWObjState* state, const RGWBucketInfo& bucket_info,
const rgw_obj& obj, optional_yield y)
{
// fetch the current olh entry from the bucket index
rgw_bucket_olh_entry olh;
int r = bi_get_olh(dpp, bucket_info, obj, &olh, y);
if (r < 0) {
ldpp_dout(dpp, 0) << "repair_olh failed to read olh entry for " << obj << dendl;
return r;
}
if (olh.tag == rgw_bl_str(state->olh_tag)) { // mismatch already resolved?
return 0;
}
ldpp_dout(dpp, 4) << "repair_olh setting olh_tag=" << olh.tag
<< " key=" << olh.key << " delete_marker=" << olh.delete_marker << dendl;
// rewrite OLH_ID_TAG and OLH_INFO from current olh
ObjectWriteOperation op;
// assert this is the same olh tag we think we're fixing
bucket_index_guard_olh_op(dpp, *state, op);
// preserve existing mtime
struct timespec mtime_ts = ceph::real_clock::to_timespec(state->mtime);
op.mtime2(&mtime_ts);
{
bufferlist bl;
bl.append(olh.tag.c_str(), olh.tag.size());
op.setxattr(RGW_ATTR_OLH_ID_TAG, bl);
}
{
RGWOLHInfo info;
info.target = rgw_obj(bucket_info.bucket, olh.key);
info.removed = olh.delete_marker;
bufferlist bl;
encode(info, bl);
op.setxattr(RGW_ATTR_OLH_INFO, bl);
}
rgw_rados_ref ref;
r = get_obj_head_ref(dpp, bucket_info, obj, &ref);
if (r < 0) {
return r;
}
r = rgw_rados_operate(dpp, ref.pool.ioctx(), ref.obj.oid, &op, y);
if (r < 0) {
ldpp_dout(dpp, 0) << "repair_olh failed to write olh attributes with "
<< cpp_strerror(r) << dendl;
return r;
}
return 0;
}
int RGWRados::bucket_index_trim_olh_log(const DoutPrefixProvider *dpp,
RGWBucketInfo& bucket_info,
RGWObjState& state,
const rgw_obj& obj_instance, uint64_t ver, optional_yield y)
{
rgw_rados_ref ref;
int r = get_obj_head_ref(dpp, bucket_info, obj_instance, &ref);
if (r < 0) {
return r;
}
BucketShard bs(this);
int ret =
bs.init(obj_instance.bucket, obj_instance, nullptr /* no RGWBucketInfo */, dpp, y);
if (ret < 0) {
ldpp_dout(dpp, 5) << "bs.init() returned ret=" << ret << dendl;
return ret;
}
string olh_tag(state.olh_tag.c_str(), state.olh_tag.length());
cls_rgw_obj_key key(obj_instance.key.get_index_key_name(), string());
ret = guard_reshard(dpp, &bs, obj_instance, bucket_info,
[&](BucketShard *pbs) -> int {
ObjectWriteOperation op;
op.assert_exists(); // bucket index shard must exist
cls_rgw_guard_bucket_resharding(op, -ERR_BUSY_RESHARDING);
cls_rgw_trim_olh_log(op, key, ver, olh_tag);
return pbs->bucket_obj.operate(dpp, &op, y);
}, y);
if (ret < 0) {
ldpp_dout(dpp, 20) << "cls_rgw_trim_olh_log() returned r=" << ret << dendl;
return ret;
}
return 0;
}
int RGWRados::bucket_index_clear_olh(const DoutPrefixProvider *dpp,
RGWBucketInfo& bucket_info,
RGWObjState& state,
const rgw_obj& obj_instance, optional_yield y)
{
rgw_rados_ref ref;
int r = get_obj_head_ref(dpp, bucket_info, obj_instance, &ref);
if (r < 0) {
return r;
}
BucketShard bs(this);
string olh_tag(state.olh_tag.c_str(), state.olh_tag.length());
cls_rgw_obj_key key(obj_instance.key.get_index_key_name(), string());
int ret = guard_reshard(dpp, &bs, obj_instance, bucket_info,
[&](BucketShard *pbs) -> int {
ObjectWriteOperation op;
op.assert_exists(); // bucket index shard must exist
auto& ref = pbs->bucket_obj.get_ref();
cls_rgw_guard_bucket_resharding(op, -ERR_BUSY_RESHARDING);
cls_rgw_clear_olh(op, key, olh_tag);
return rgw_rados_operate(dpp, ref.pool.ioctx(), ref.obj.oid, &op, y);
}, y);
if (ret < 0) {
ldpp_dout(dpp, 5) << "rgw_rados_operate() after cls_rgw_clear_olh() returned ret=" << ret << dendl;
return ret;
}
return 0;
}
static int decode_olh_info(const DoutPrefixProvider *dpp, CephContext* cct, const bufferlist& bl, RGWOLHInfo *olh)
{
try {
auto biter = bl.cbegin();
decode(*olh, biter);
return 0;
} catch (buffer::error& err) {
ldpp_dout(dpp, 0) << "ERROR: failed to decode olh info" << dendl;
return -EIO;
}
}
int RGWRados::apply_olh_log(const DoutPrefixProvider *dpp,
RGWObjectCtx& obj_ctx,
RGWObjState& state,
RGWBucketInfo& bucket_info,
const rgw_obj& obj,
bufferlist& olh_tag,
std::map<uint64_t, std::vector<rgw_bucket_olh_log_entry> >& log,
uint64_t *plast_ver,
optional_yield y, rgw_zone_set* zones_trace)
{
if (log.empty()) {
return 0;
}
librados::ObjectWriteOperation op;
uint64_t last_ver = log.rbegin()->first;
*plast_ver = last_ver;
map<uint64_t, vector<rgw_bucket_olh_log_entry> >::iterator iter = log.begin();
op.cmpxattr(RGW_ATTR_OLH_ID_TAG, CEPH_OSD_CMPXATTR_OP_EQ, olh_tag);
op.cmpxattr(RGW_ATTR_OLH_VER, CEPH_OSD_CMPXATTR_OP_GTE, last_ver);
bufferlist ver_bl;
string last_ver_s = to_string(last_ver);
ver_bl.append(last_ver_s.c_str(), last_ver_s.size());
op.setxattr(RGW_ATTR_OLH_VER, ver_bl);
struct timespec mtime_ts = real_clock::to_timespec(state.mtime);
op.mtime2(&mtime_ts);
bool need_to_link = false;
uint64_t link_epoch = 0;
cls_rgw_obj_key key;
bool delete_marker = false;
list<cls_rgw_obj_key> remove_instances;
bool need_to_remove = false;
// decode current epoch and instance
auto olh_ver = state.attrset.find(RGW_ATTR_OLH_VER);
if (olh_ver != state.attrset.end()) {
std::string str = olh_ver->second.to_str();
std::string err;
link_epoch = strict_strtoll(str.c_str(), 10, &err);
}
auto olh_info = state.attrset.find(RGW_ATTR_OLH_INFO);
if (olh_info != state.attrset.end()) {
RGWOLHInfo info;
int r = decode_olh_info(dpp, cct, olh_info->second, &info);
if (r < 0) {
return r;
}
info.target.key.get_index_key(&key);
delete_marker = info.removed;
}
for (iter = log.begin(); iter != log.end(); ++iter) {
vector<rgw_bucket_olh_log_entry>::iterator viter = iter->second.begin();
for (; viter != iter->second.end(); ++viter) {
rgw_bucket_olh_log_entry& entry = *viter;
ldpp_dout(dpp, 20) << "olh_log_entry: epoch=" << iter->first << " op=" << (int)entry.op
<< " key=" << entry.key.name << "[" << entry.key.instance << "] "
<< (entry.delete_marker ? "(delete)" : "") << dendl;
switch (entry.op) {
case CLS_RGW_OLH_OP_REMOVE_INSTANCE:
remove_instances.push_back(entry.key);
break;
case CLS_RGW_OLH_OP_LINK_OLH:
// only overwrite a link of the same epoch if its key sorts before
if (link_epoch < iter->first || key.instance.empty() ||
key.instance > entry.key.instance) {
ldpp_dout(dpp, 20) << "apply_olh_log applying key=" << entry.key << " epoch=" << iter->first << " delete_marker=" << entry.delete_marker
<< " over current=" << key << " epoch=" << link_epoch << " delete_marker=" << delete_marker << dendl;
need_to_link = true;
need_to_remove = false;
key = entry.key;
delete_marker = entry.delete_marker;
} else {
ldpp_dout(dpp, 20) << "apply_olh skipping key=" << entry.key<< " epoch=" << iter->first << " delete_marker=" << entry.delete_marker
<< " before current=" << key << " epoch=" << link_epoch << " delete_marker=" << delete_marker << dendl;
}
break;
case CLS_RGW_OLH_OP_UNLINK_OLH:
need_to_remove = true;
need_to_link = false;
break;
default:
ldpp_dout(dpp, 0) << "ERROR: apply_olh_log: invalid op: " << (int)entry.op << dendl;
return -EIO;
}
string attr_name = RGW_ATTR_OLH_PENDING_PREFIX;
attr_name.append(entry.op_tag);
op.rmxattr(attr_name.c_str());
}
}
rgw_rados_ref ref;
int r = get_obj_head_ref(dpp, bucket_info, obj, &ref);
if (r < 0) {
return r;
}
const rgw_bucket& bucket = obj.bucket;
if (need_to_link) {
rgw_obj target(bucket, key);
RGWOLHInfo info;
info.target = target;
info.removed = delete_marker;
bufferlist bl;
encode(info, bl);
op.setxattr(RGW_ATTR_OLH_INFO, bl);
}
/* first remove object instances */
for (list<cls_rgw_obj_key>::iterator liter = remove_instances.begin();
liter != remove_instances.end(); ++liter) {
cls_rgw_obj_key& key = *liter;
rgw_obj obj_instance(bucket, key);
int ret = delete_obj(dpp, obj_ctx, bucket_info, obj_instance, 0, y, RGW_BILOG_FLAG_VERSIONED_OP, ceph::real_time(), zones_trace);
if (ret < 0 && ret != -ENOENT) {
ldpp_dout(dpp, 0) << "ERROR: delete_obj() returned " << ret << " obj_instance=" << obj_instance << dendl;
return ret;
}
}
/* update olh object */
r = rgw_rados_operate(dpp, ref.pool.ioctx(), ref.obj.oid, &op, y);
if (r < 0) {
ldpp_dout(dpp, 0) << "ERROR: could not apply olh update, r=" << r << dendl;
return r;
}
r = bucket_index_trim_olh_log(dpp, bucket_info, state, obj, last_ver, y);
if (r < 0) {
ldpp_dout(dpp, 0) << "ERROR: could not trim olh log, r=" << r << dendl;
return r;
}
if (need_to_remove) {
ObjectWriteOperation rm_op;
rm_op.cmpxattr(RGW_ATTR_OLH_ID_TAG, CEPH_OSD_CMPXATTR_OP_EQ, olh_tag);
rm_op.cmpxattr(RGW_ATTR_OLH_VER, CEPH_OSD_CMPXATTR_OP_EQ, last_ver);
cls_obj_check_prefix_exist(rm_op, RGW_ATTR_OLH_PENDING_PREFIX, true); /* fail if found one of these, pending modification */
rm_op.remove();
r = rgw_rados_operate(dpp, ref.pool.ioctx(), ref.obj.oid, &rm_op, y);
if (r == -ECANCELED) {
return 0; /* someone else won this race */
} else {
/*
* only clear if was successful, otherwise we might clobber pending operations on this object
*/
r = bucket_index_clear_olh(dpp, bucket_info, state, obj, y);
if (r < 0) {
ldpp_dout(dpp, 0) << "ERROR: could not clear bucket index olh entries r=" << r << dendl;
return r;
}
}
}
return 0;
}
/*
* read olh log and apply it
*/
int RGWRados::update_olh(const DoutPrefixProvider *dpp, RGWObjectCtx& obj_ctx, RGWObjState *state, RGWBucketInfo& bucket_info, const rgw_obj& obj, optional_yield y, rgw_zone_set *zones_trace)
{
map<uint64_t, vector<rgw_bucket_olh_log_entry> > log;
bool is_truncated;
uint64_t ver_marker = 0;
do {
int ret = bucket_index_read_olh_log(dpp, bucket_info, *state, obj, ver_marker, &log, &is_truncated, y);
if (ret < 0) {
return ret;
}
ret = apply_olh_log(dpp, obj_ctx, *state, bucket_info, obj, state->olh_tag, log, &ver_marker, y, zones_trace);
if (ret < 0) {
return ret;
}
} while (is_truncated);
return 0;
}
int RGWRados::set_olh(const DoutPrefixProvider *dpp, RGWObjectCtx& obj_ctx,
RGWBucketInfo& bucket_info,
const rgw_obj& target_obj, bool delete_marker,
rgw_bucket_dir_entry_meta *meta,
uint64_t olh_epoch, real_time unmod_since, bool high_precision_time,
optional_yield y, rgw_zone_set *zones_trace, bool log_data_change)
{
string op_tag;
rgw_obj olh_obj = target_obj;
olh_obj.key.instance.clear();
RGWObjState *state = NULL;
RGWObjManifest *manifest = nullptr;
int ret = 0;
int i;
#define MAX_ECANCELED_RETRY 100
for (i = 0; i < MAX_ECANCELED_RETRY; i++) {
if (ret == -ECANCELED) {
obj_ctx.invalidate(olh_obj);
}
ret = get_obj_state(dpp, &obj_ctx, bucket_info, olh_obj, &state, &manifest, false, y); /* don't follow olh */
if (ret < 0) {
return ret;
}
ret = olh_init_modification(dpp, bucket_info, *state, olh_obj, &op_tag, y);
if (ret < 0) {
ldpp_dout(dpp, 20) << "olh_init_modification() target_obj=" << target_obj << " delete_marker=" << (int)delete_marker << " returned " << ret << dendl;
if (ret == -ECANCELED) {
continue;
}
return ret;
}
ret = bucket_index_link_olh(dpp, bucket_info, *state, target_obj,
delete_marker, op_tag, meta, olh_epoch, unmod_since,
high_precision_time, y, zones_trace, log_data_change);
if (ret < 0) {
ldpp_dout(dpp, 20) << "bucket_index_link_olh() target_obj=" << target_obj << " delete_marker=" << (int)delete_marker << " returned " << ret << dendl;
if (ret == -ECANCELED) {
// the bucket index rejected the link_olh() due to olh tag mismatch;
// attempt to reconstruct olh head attributes based on the bucket index
int r2 = repair_olh(dpp, state, bucket_info, olh_obj, y);
if (r2 < 0 && r2 != -ECANCELED) {
return r2;
}
continue;
}
return ret;
}
break;
}
if (i == MAX_ECANCELED_RETRY) {
ldpp_dout(dpp, 0) << "ERROR: exceeded max ECANCELED retries, aborting (EIO)" << dendl;
return -EIO;
}
ret = update_olh(dpp, obj_ctx, state, bucket_info, olh_obj, y);
if (ret == -ECANCELED) { /* already did what we needed, no need to retry, raced with another user */
ret = 0;
}
if (ret < 0) {
ldpp_dout(dpp, 20) << "update_olh() target_obj=" << target_obj << " returned " << ret << dendl;
return ret;
}
return 0;
}
int RGWRados::unlink_obj_instance(const DoutPrefixProvider *dpp, RGWObjectCtx& obj_ctx, RGWBucketInfo& bucket_info, const rgw_obj& target_obj,
uint64_t olh_epoch, optional_yield y, rgw_zone_set *zones_trace)
{
string op_tag;
rgw_obj olh_obj = target_obj;
olh_obj.key.instance.clear();
RGWObjState *state = NULL;
RGWObjManifest *manifest = NULL;
int ret = 0;
int i;
for (i = 0; i < MAX_ECANCELED_RETRY; i++) {
if (ret == -ECANCELED) {
obj_ctx.invalidate(olh_obj);
}
ret = get_obj_state(dpp, &obj_ctx, bucket_info, olh_obj, &state, &manifest, false, y); /* don't follow olh */
if (ret < 0)
return ret;
ret = olh_init_modification(dpp, bucket_info, *state, olh_obj, &op_tag, y);
if (ret < 0) {
ldpp_dout(dpp, 20) << "olh_init_modification() target_obj=" << target_obj << " returned " << ret << dendl;
if (ret == -ECANCELED) {
continue;
}
return ret;
}
string olh_tag(state->olh_tag.c_str(), state->olh_tag.length());
ret = bucket_index_unlink_instance(dpp, bucket_info, target_obj, op_tag, olh_tag, olh_epoch, y, zones_trace);
if (ret < 0) {
ldpp_dout(dpp, 20) << "bucket_index_unlink_instance() target_obj=" << target_obj << " returned " << ret << dendl;
if (ret == -ECANCELED) {
continue;
}
return ret;
}
break;
}
if (i == MAX_ECANCELED_RETRY) {
ldpp_dout(dpp, 0) << "ERROR: exceeded max ECANCELED retries, aborting (EIO)" << dendl;
return -EIO;
}
ret = update_olh(dpp, obj_ctx, state, bucket_info, olh_obj, y, zones_trace);
if (ret == -ECANCELED) { /* already did what we needed, no need to retry, raced with another user */
return 0;
}
if (ret < 0) {
ldpp_dout(dpp, 20) << "update_olh() target_obj=" << target_obj << " returned " << ret << dendl;
return ret;
}
return 0;
}
void RGWRados::gen_rand_obj_instance_name(rgw_obj_key *target_key)
{
#define OBJ_INSTANCE_LEN 32
char buf[OBJ_INSTANCE_LEN + 1];
gen_rand_alphanumeric_no_underscore(cct, buf, OBJ_INSTANCE_LEN); /* don't want it to get url escaped,
no underscore for instance name due to the way we encode the raw keys */
target_key->set_instance(buf);
}
void RGWRados::gen_rand_obj_instance_name(rgw_obj *target_obj)
{
gen_rand_obj_instance_name(&target_obj->key);
}
int RGWRados::get_olh(const DoutPrefixProvider *dpp, RGWBucketInfo& bucket_info, const rgw_obj& obj, RGWOLHInfo *olh, optional_yield y)
{
map<string, bufferlist> attrset;
ObjectReadOperation op;
op.getxattrs(&attrset, NULL);
int r = obj_operate(dpp, bucket_info, obj, &op, y);
if (r < 0) {
return r;
}
auto iter = attrset.find(RGW_ATTR_OLH_INFO);
if (iter == attrset.end()) { /* not an olh */
return -EINVAL;
}
return decode_olh_info(dpp, cct, iter->second, olh);
}
void RGWRados::check_pending_olh_entries(const DoutPrefixProvider *dpp,
map<string, bufferlist>& pending_entries,
map<string, bufferlist> *rm_pending_entries)
{
map<string, bufferlist>::iterator iter = pending_entries.begin();
real_time now = real_clock::now();
while (iter != pending_entries.end()) {
auto biter = iter->second.cbegin();
RGWOLHPendingInfo pending_info;
try {
decode(pending_info, biter);
} catch (buffer::error& err) {
/* skipping bad entry, we could remove it but it might hide a bug */
ldpp_dout(dpp, 0) << "ERROR: failed to decode pending entry " << iter->first << dendl;
++iter;
continue;
}
map<string, bufferlist>::iterator cur_iter = iter;
++iter;
if (now - pending_info.time >= make_timespan(cct->_conf->rgw_olh_pending_timeout_sec)) {
(*rm_pending_entries)[cur_iter->first] = cur_iter->second;
pending_entries.erase(cur_iter);
} else {
/* entries names are sorted by time (rounded to a second) */
break;
}
}
}
int RGWRados::remove_olh_pending_entries(const DoutPrefixProvider *dpp, RGWBucketInfo& bucket_info, RGWObjState& state, const rgw_obj& olh_obj, map<string, bufferlist>& pending_attrs, optional_yield y)
{
rgw_rados_ref ref;
int r = get_obj_head_ref(dpp, bucket_info, olh_obj, &ref);
if (r < 0) {
return r;
}
// trim no more than 1000 entries per osd op
constexpr int max_entries = 1000;
auto i = pending_attrs.begin();
while (i != pending_attrs.end()) {
ObjectWriteOperation op;
bucket_index_guard_olh_op(dpp, state, op);
for (int n = 0; n < max_entries && i != pending_attrs.end(); ++n, ++i) {
op.rmxattr(i->first.c_str());
}
r = rgw_rados_operate(dpp, ref.pool.ioctx(), ref.obj.oid, &op, y);
if (r == -ENOENT || r == -ECANCELED) {
/* raced with some other change, shouldn't sweat about it */
return 0;
}
if (r < 0) {
ldpp_dout(dpp, 0) << "ERROR: could not apply olh update, r=" << r << dendl;
return r;
}
}
return 0;
}
int RGWRados::follow_olh(const DoutPrefixProvider *dpp, RGWBucketInfo& bucket_info, RGWObjectCtx& obj_ctx, RGWObjState *state, const rgw_obj& olh_obj, rgw_obj *target, optional_yield y)
{
map<string, bufferlist> pending_entries;
rgw_filter_attrset(state->attrset, RGW_ATTR_OLH_PENDING_PREFIX, &pending_entries);
map<string, bufferlist> rm_pending_entries;
check_pending_olh_entries(dpp, pending_entries, &rm_pending_entries);
if (!rm_pending_entries.empty()) {
int ret = remove_olh_pending_entries(dpp, bucket_info, *state, olh_obj, rm_pending_entries, y);
if (ret < 0) {
ldpp_dout(dpp, 20) << "ERROR: rm_pending_entries returned ret=" << ret << dendl;
return ret;
}
}
if (!pending_entries.empty()) {
ldpp_dout(dpp, 20) << __func__ << "(): found pending entries, need to update_olh() on bucket=" << olh_obj.bucket << dendl;
int ret = update_olh(dpp, obj_ctx, state, bucket_info, olh_obj, y);
if (ret < 0) {
if (ret == -ECANCELED) {
// In this context, ECANCELED means that the OLH tag changed in either the bucket index entry or the OLH object.
// If the OLH tag changed, it indicates that a previous OLH entry was removed since this request started. We
// return ENOENT to indicate that the OLH object was removed.
ret = -ENOENT;
}
return ret;
}
}
auto iter = state->attrset.find(RGW_ATTR_OLH_INFO);
if (iter == state->attrset.end()) {
return -EINVAL;
}
RGWOLHInfo olh;
int ret = decode_olh_info(dpp, cct, iter->second, &olh);
if (ret < 0) {
return ret;
}
if (olh.removed) {
return -ENOENT;
}
*target = olh.target;
return 0;
}
int RGWRados::raw_obj_stat(const DoutPrefixProvider *dpp,
rgw_raw_obj& obj, uint64_t *psize, real_time *pmtime, uint64_t *epoch,
map<string, bufferlist> *attrs, bufferlist *first_chunk,
RGWObjVersionTracker *objv_tracker, optional_yield y)
{
rgw_rados_ref ref;
int r = get_raw_obj_ref(dpp, obj, &ref);
if (r < 0) {
return r;
}
map<string, bufferlist> unfiltered_attrset;
uint64_t size = 0;
struct timespec mtime_ts;
ObjectReadOperation op;
if (objv_tracker) {
objv_tracker->prepare_op_for_read(&op);
}
if (attrs) {
op.getxattrs(&unfiltered_attrset, NULL);
}
if (psize || pmtime) {
op.stat2(&size, &mtime_ts, NULL);
}
if (first_chunk) {
op.read(0, cct->_conf->rgw_max_chunk_size, first_chunk, NULL);
}
bufferlist outbl;
r = rgw_rados_operate(dpp, ref.pool.ioctx(), ref.obj.oid, &op, &outbl, y);
if (epoch) {
*epoch = ref.pool.ioctx().get_last_version();
}
if (r < 0)
return r;
if (psize)
*psize = size;
if (pmtime)
*pmtime = ceph::real_clock::from_timespec(mtime_ts);
if (attrs) {
rgw_filter_attrset(unfiltered_attrset, RGW_ATTR_PREFIX, attrs);
}
return 0;
}
int RGWRados::get_bucket_stats(const DoutPrefixProvider *dpp,
RGWBucketInfo& bucket_info,
const rgw::bucket_index_layout_generation& idx_layout,
int shard_id, string *bucket_ver, string *master_ver,
map<RGWObjCategory, RGWStorageStats>& stats,
string *max_marker, bool *syncstopped)
{
vector<rgw_bucket_dir_header> headers;
map<int, string> bucket_instance_ids;
int r = cls_bucket_head(dpp, bucket_info, idx_layout, shard_id, headers, &bucket_instance_ids);
if (r < 0) {
return r;
}
ceph_assert(headers.size() == bucket_instance_ids.size());
auto iter = headers.begin();
map<int, string>::iterator viter = bucket_instance_ids.begin();
BucketIndexShardsManager ver_mgr;
BucketIndexShardsManager master_ver_mgr;
BucketIndexShardsManager marker_mgr;
char buf[64];
for(; iter != headers.end(); ++iter, ++viter) {
accumulate_raw_stats(*iter, stats);
snprintf(buf, sizeof(buf), "%lu", (unsigned long)iter->ver);
ver_mgr.add(viter->first, string(buf));
snprintf(buf, sizeof(buf), "%lu", (unsigned long)iter->master_ver);
master_ver_mgr.add(viter->first, string(buf));
if (shard_id >= 0) {
*max_marker = iter->max_marker;
} else {
marker_mgr.add(viter->first, iter->max_marker);
}
if (syncstopped != NULL)
*syncstopped = iter->syncstopped;
}
ver_mgr.to_string(bucket_ver);
master_ver_mgr.to_string(master_ver);
if (shard_id < 0) {
marker_mgr.to_string(max_marker);
}
return 0;
}
class RGWGetBucketStatsContext : public RGWGetDirHeader_CB {
RGWGetBucketStats_CB *cb;
uint32_t pendings;
map<RGWObjCategory, RGWStorageStats> stats;
int ret_code;
bool should_cb;
ceph::mutex lock = ceph::make_mutex("RGWGetBucketStatsContext");
public:
RGWGetBucketStatsContext(RGWGetBucketStats_CB *_cb, uint32_t _pendings)
: cb(_cb), pendings(_pendings), stats(), ret_code(0), should_cb(true)
{}
void handle_response(int r, rgw_bucket_dir_header& header) override {
std::lock_guard l{lock};
if (should_cb) {
if ( r >= 0) {
accumulate_raw_stats(header, stats);
} else {
ret_code = r;
}
// Are we all done?
if (--pendings == 0) {
if (!ret_code) {
cb->set_response(&stats);
}
cb->handle_response(ret_code);
cb->put();
}
}
}
void unset_cb() {
std::lock_guard l{lock};
should_cb = false;
}
};
int RGWRados::get_bucket_stats_async(const DoutPrefixProvider *dpp, RGWBucketInfo& bucket_info, const rgw::bucket_index_layout_generation& idx_layout, int shard_id, RGWGetBucketStats_CB *ctx)
{
int num_aio = 0;
RGWGetBucketStatsContext *get_ctx = new RGWGetBucketStatsContext(ctx, bucket_info.layout.current_index.layout.normal.num_shards ? : 1);
ceph_assert(get_ctx);
int r = cls_bucket_head_async(dpp, bucket_info, idx_layout, shard_id, get_ctx, &num_aio);
if (r < 0) {
ctx->put();
if (num_aio) {
get_ctx->unset_cb();
}
}
get_ctx->put();
return r;
}
int RGWRados::get_bucket_instance_info(const string& meta_key,
RGWBucketInfo& info,
real_time *pmtime,
map<string, bufferlist> *pattrs,
optional_yield y,
const DoutPrefixProvider *dpp)
{
rgw_bucket bucket;
rgw_bucket_parse_bucket_key(cct, meta_key, &bucket, nullptr);
return get_bucket_instance_info(bucket, info, pmtime, pattrs, y, dpp);
}
int RGWRados::get_bucket_instance_info(const rgw_bucket& bucket, RGWBucketInfo& info,
real_time *pmtime, map<string, bufferlist> *pattrs, optional_yield y,
const DoutPrefixProvider *dpp)
{
return ctl.bucket->read_bucket_instance_info(bucket, &info,
y,
dpp,
RGWBucketCtl::BucketInstance::GetParams()
.set_mtime(pmtime)
.set_attrs(pattrs));
}
int RGWRados::get_bucket_info(RGWServices *svc,
const string& tenant, const string& bucket_name,
RGWBucketInfo& info,
real_time *pmtime,
optional_yield y,
const DoutPrefixProvider *dpp, map<string, bufferlist> *pattrs)
{
rgw_bucket bucket;
bucket.tenant = tenant;
bucket.name = bucket_name;
return ctl.bucket->read_bucket_info(bucket, &info, y, dpp,
RGWBucketCtl::BucketInstance::GetParams()
.set_mtime(pmtime)
.set_attrs(pattrs));
}
int RGWRados::try_refresh_bucket_info(RGWBucketInfo& info,
ceph::real_time *pmtime,
const DoutPrefixProvider *dpp, optional_yield y,
map<string, bufferlist> *pattrs)
{
rgw_bucket bucket = info.bucket;
bucket.bucket_id.clear();
auto rv = info.objv_tracker.read_version;
return ctl.bucket->read_bucket_info(bucket, &info, y, dpp,
RGWBucketCtl::BucketInstance::GetParams()
.set_mtime(pmtime)
.set_attrs(pattrs)
.set_refresh_version(rv));
}
int RGWRados::put_bucket_instance_info(RGWBucketInfo& info, bool exclusive,
real_time mtime, map<string, bufferlist> *pattrs,
const DoutPrefixProvider *dpp, optional_yield y)
{
return ctl.bucket->store_bucket_instance_info(info.bucket, info, y, dpp,
RGWBucketCtl::BucketInstance::PutParams()
.set_exclusive(exclusive)
.set_mtime(mtime)
.set_attrs(pattrs));
}
int RGWRados::put_linked_bucket_info(RGWBucketInfo& info, bool exclusive, real_time mtime, obj_version *pep_objv,
map<string, bufferlist> *pattrs, bool create_entry_point,
const DoutPrefixProvider *dpp, optional_yield y)
{
bool create_head = !info.has_instance_obj || create_entry_point;
int ret = put_bucket_instance_info(info, exclusive, mtime, pattrs, dpp, y);
if (ret < 0) {
return ret;
}
if (!create_head)
return 0; /* done! */
RGWBucketEntryPoint entry_point;
entry_point.bucket = info.bucket;
entry_point.owner = info.owner;
entry_point.creation_time = info.creation_time;
entry_point.linked = true;
RGWObjVersionTracker ot;
if (pep_objv && !pep_objv->tag.empty()) {
ot.write_version = *pep_objv;
} else {
ot.generate_new_write_ver(cct);
if (pep_objv) {
*pep_objv = ot.write_version;
}
}
ret = ctl.bucket->store_bucket_entrypoint_info(info.bucket, entry_point, y, dpp, RGWBucketCtl::Bucket::PutParams()
.set_exclusive(exclusive)
.set_objv_tracker(&ot)
.set_mtime(mtime));
if (ret < 0)
return ret;
return 0;
}
int RGWRados::update_containers_stats(map<string, RGWBucketEnt>& m, const DoutPrefixProvider *dpp, optional_yield y)
{
map<string, RGWBucketEnt>::iterator iter;
for (iter = m.begin(); iter != m.end(); ++iter) {
RGWBucketEnt& ent = iter->second;
rgw_bucket& bucket = ent.bucket;
ent.count = 0;
ent.size = 0;
ent.size_rounded = 0;
vector<rgw_bucket_dir_header> headers;
RGWBucketInfo bucket_info;
int ret = get_bucket_instance_info(bucket, bucket_info, NULL, NULL, y, dpp);
if (ret < 0) {
return ret;
}
int r = cls_bucket_head(dpp, bucket_info, bucket_info.layout.current_index, RGW_NO_SHARD, headers);
if (r < 0)
return r;
auto hiter = headers.begin();
for (; hiter != headers.end(); ++hiter) {
RGWObjCategory category = main_category;
auto iter = (hiter->stats).find(category);
if (iter != hiter->stats.end()) {
struct rgw_bucket_category_stats& stats = iter->second;
ent.count += stats.num_entries;
ent.size += stats.total_size;
ent.size_rounded += stats.total_size_rounded;
}
}
// fill in placement_rule from the bucket instance for use in swift's
// per-storage policy statistics
ent.placement_rule = std::move(bucket_info.placement_rule);
}
return m.size();
}
int RGWRados::append_async(const DoutPrefixProvider *dpp, rgw_raw_obj& obj, size_t size, bufferlist& bl)
{
rgw_rados_ref ref;
int r = get_raw_obj_ref(dpp, obj, &ref);
if (r < 0) {
return r;
}
librados::Rados *rad = get_rados_handle();
librados::AioCompletion *completion = rad->aio_create_completion(nullptr, nullptr);
r = ref.pool.ioctx().aio_append(ref.obj.oid, completion, bl, size);
completion->release();
return r;
}
int RGWRados::pool_iterate_begin(const DoutPrefixProvider *dpp, const rgw_pool& pool, RGWPoolIterCtx& ctx)
{
librados::IoCtx& io_ctx = ctx.io_ctx;
librados::NObjectIterator& iter = ctx.iter;
int r = open_pool_ctx(dpp, pool, io_ctx, false, false);
if (r < 0)
return r;
iter = io_ctx.nobjects_begin();
return 0;
}
int RGWRados::pool_iterate_begin(const DoutPrefixProvider *dpp, const rgw_pool& pool, const string& cursor, RGWPoolIterCtx& ctx)
{
librados::IoCtx& io_ctx = ctx.io_ctx;
librados::NObjectIterator& iter = ctx.iter;
int r = open_pool_ctx(dpp, pool, io_ctx, false, false);
if (r < 0)
return r;
librados::ObjectCursor oc;
if (!oc.from_str(cursor)) {
ldpp_dout(dpp, 10) << "failed to parse cursor: " << cursor << dendl;
return -EINVAL;
}
try {
iter = io_ctx.nobjects_begin(oc);
return 0;
} catch (const std::system_error& e) {
r = -e.code().value();
ldpp_dout(dpp, 10) << "nobjects_begin threw " << e.what()
<< ", returning " << r << dendl;
return r;
} catch (const std::exception& e) {
ldpp_dout(dpp, 10) << "nobjects_begin threw " << e.what()
<< ", returning -5" << dendl;
return -EIO;
}
}
string RGWRados::pool_iterate_get_cursor(RGWPoolIterCtx& ctx)
{
return ctx.iter.get_cursor().to_str();
}
static int do_pool_iterate(const DoutPrefixProvider *dpp, CephContext* cct, RGWPoolIterCtx& ctx, uint32_t num,
vector<rgw_bucket_dir_entry>& objs,
bool *is_truncated, RGWAccessListFilter *filter)
{
librados::IoCtx& io_ctx = ctx.io_ctx;
librados::NObjectIterator& iter = ctx.iter;
if (iter == io_ctx.nobjects_end())
return -ENOENT;
uint32_t i;
for (i = 0; i < num && iter != io_ctx.nobjects_end(); ++i, ++iter) {
rgw_bucket_dir_entry e;
string oid = iter->get_oid();
ldpp_dout(dpp, 20) << "RGWRados::pool_iterate: got " << oid << dendl;
// fill it in with initial values; we may correct later
if (filter && !filter->filter(oid, oid))
continue;
e.key = oid;
objs.push_back(e);
}
if (is_truncated)
*is_truncated = (iter != io_ctx.nobjects_end());
return objs.size();
}
int RGWRados::pool_iterate(const DoutPrefixProvider *dpp, RGWPoolIterCtx& ctx, uint32_t num, vector<rgw_bucket_dir_entry>& objs,
bool *is_truncated, RGWAccessListFilter *filter)
{
// catch exceptions from NObjectIterator::operator++()
try {
return do_pool_iterate(dpp, cct, ctx, num, objs, is_truncated, filter);
} catch (const std::system_error& e) {
int r = -e.code().value();
ldpp_dout(dpp, 10) << "NObjectIterator threw exception " << e.what()
<< ", returning " << r << dendl;
return r;
} catch (const std::exception& e) {
ldpp_dout(dpp, 10) << "NObjectIterator threw exception " << e.what()
<< ", returning -5" << dendl;
return -EIO;
}
}
int RGWRados::list_raw_objects_init(const DoutPrefixProvider *dpp, const rgw_pool& pool, const string& marker, RGWListRawObjsCtx *ctx)
{
if (!ctx->initialized) {
int r = pool_iterate_begin(dpp, pool, marker, ctx->iter_ctx);
if (r < 0) {
ldpp_dout(dpp, 10) << "failed to list objects pool_iterate_begin() returned r=" << r << dendl;
return r;
}
ctx->initialized = true;
}
return 0;
}
int RGWRados::list_raw_objects_next(const DoutPrefixProvider *dpp, const string& prefix_filter, int max,
RGWListRawObjsCtx& ctx, list<string>& oids,
bool *is_truncated)
{
if (!ctx.initialized) {
return -EINVAL;
}
RGWAccessListFilterPrefix filter(prefix_filter);
vector<rgw_bucket_dir_entry> objs;
int r = pool_iterate(dpp, ctx.iter_ctx, max, objs, is_truncated, &filter);
if (r < 0) {
if(r != -ENOENT)
ldpp_dout(dpp, 10) << "failed to list objects pool_iterate returned r=" << r << dendl;
return r;
}
vector<rgw_bucket_dir_entry>::iterator iter;
for (iter = objs.begin(); iter != objs.end(); ++iter) {
oids.push_back(iter->key.name);
}
return oids.size();
}
int RGWRados::list_raw_objects(const DoutPrefixProvider *dpp, const rgw_pool& pool, const string& prefix_filter,
int max, RGWListRawObjsCtx& ctx, list<string>& oids,
bool *is_truncated)
{
if (!ctx.initialized) {
int r = list_raw_objects_init(dpp, pool, string(), &ctx);
if (r < 0) {
return r;
}
}
return list_raw_objects_next(dpp, prefix_filter, max, ctx, oids, is_truncated);
}
string RGWRados::list_raw_objs_get_cursor(RGWListRawObjsCtx& ctx)
{
return pool_iterate_get_cursor(ctx.iter_ctx);
}
int RGWRados::bi_get_instance(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, const rgw_obj& obj,
rgw_bucket_dir_entry *dirent, optional_yield y)
{
rgw_cls_bi_entry bi_entry;
int r = bi_get(dpp, bucket_info, obj, BIIndexType::Instance, &bi_entry, y);
if (r < 0 && r != -ENOENT) {
ldpp_dout(dpp, 0) << "ERROR: bi_get() returned r=" << r << dendl;
}
if (r < 0) {
return r;
}
auto iter = bi_entry.data.cbegin();
try {
decode(*dirent, iter);
} catch (buffer::error& err) {
ldpp_dout(dpp, 0) << "ERROR: failed to decode bi_entry()" << dendl;
return -EIO;
}
return 0;
}
int RGWRados::bi_get_olh(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, const rgw_obj& obj,
rgw_bucket_olh_entry *olh, optional_yield y)
{
rgw_cls_bi_entry bi_entry;
int r = bi_get(dpp, bucket_info, obj, BIIndexType::OLH, &bi_entry, y);
if (r < 0 && r != -ENOENT) {
ldpp_dout(dpp, 0) << "ERROR: bi_get() returned r=" << r << dendl;
}
if (r < 0) {
return r;
}
auto iter = bi_entry.data.cbegin();
try {
decode(*olh, iter);
} catch (buffer::error& err) {
ldpp_dout(dpp, 0) << "ERROR: failed to decode bi_entry()" << dendl;
return -EIO;
}
return 0;
}
int RGWRados::bi_get(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, const rgw_obj& obj,
BIIndexType index_type, rgw_cls_bi_entry *entry, optional_yield y)
{
BucketShard bs(this);
int ret = bs.init(dpp, bucket_info, obj, y);
if (ret < 0) {
ldpp_dout(dpp, 5) << "bs.init() returned ret=" << ret << dendl;
return ret;
}
cls_rgw_obj_key key(obj.key.get_index_key_name(), obj.key.instance);
auto& ref = bs.bucket_obj.get_ref();
return cls_rgw_bi_get(ref.pool.ioctx(), ref.obj.oid, index_type, key, entry);
}
void RGWRados::bi_put(ObjectWriteOperation& op, BucketShard& bs, rgw_cls_bi_entry& entry, optional_yield y)
{
auto& ref = bs.bucket_obj.get_ref();
cls_rgw_bi_put(op, ref.obj.oid, entry);
}
int RGWRados::bi_put(BucketShard& bs, rgw_cls_bi_entry& entry, optional_yield y)
{
auto& ref = bs.bucket_obj.get_ref();
int ret = cls_rgw_bi_put(ref.pool.ioctx(), ref.obj.oid, entry);
if (ret < 0)
return ret;
return 0;
}
int RGWRados::bi_put(const DoutPrefixProvider *dpp, rgw_bucket& bucket, rgw_obj& obj, rgw_cls_bi_entry& entry, optional_yield y)
{
// make sure incomplete multipart uploads are hashed correctly
if (obj.key.ns == RGW_OBJ_NS_MULTIPART) {
RGWMPObj mp;
mp.from_meta(obj.key.name);
obj.index_hash_source = mp.get_key();
}
BucketShard bs(this);
int ret = bs.init(bucket, obj, nullptr /* no RGWBucketInfo */, dpp, y);
if (ret < 0) {
ldpp_dout(dpp, 5) << "bs.init() returned ret=" << ret << dendl;
return ret;
}
return bi_put(bs, entry, y);
}
int RGWRados::bi_list(const DoutPrefixProvider *dpp, rgw_bucket& bucket,
const string& obj_name_filter, const string& marker, uint32_t max,
list<rgw_cls_bi_entry> *entries, bool *is_truncated, optional_yield y)
{
rgw_obj obj(bucket, obj_name_filter);
BucketShard bs(this);
int ret = bs.init(bucket, obj, nullptr /* no RGWBucketInfo */, dpp, y);
if (ret < 0) {
ldpp_dout(dpp, 5) << "bs.init() returned ret=" << ret << dendl;
return ret;
}
auto& ref = bs.bucket_obj.get_ref();
ret = cls_rgw_bi_list(ref.pool.ioctx(), ref.obj.oid, obj_name_filter, marker, max, entries, is_truncated);
if (ret == -ENOENT) {
*is_truncated = false;
}
if (ret < 0)
return ret;
return 0;
}
int RGWRados::bi_list(BucketShard& bs, const string& obj_name_filter, const string& marker, uint32_t max,
list<rgw_cls_bi_entry> *entries, bool *is_truncated, optional_yield y)
{
auto& ref = bs.bucket_obj.get_ref();
int ret = cls_rgw_bi_list(ref.pool.ioctx(), ref.obj.oid, obj_name_filter, marker, max, entries, is_truncated);
if (ret < 0)
return ret;
return 0;
}
int RGWRados::bi_list(const DoutPrefixProvider *dpp,
const RGWBucketInfo& bucket_info, int shard_id, const string& obj_name_filter, const string& marker, uint32_t max,
list<rgw_cls_bi_entry> *entries, bool *is_truncated, optional_yield y)
{
BucketShard bs(this);
int ret = bs.init(dpp, bucket_info,
bucket_info.layout.current_index,
shard_id, y);
if (ret < 0) {
ldpp_dout(dpp, 5) << "bs.init() returned ret=" << ret << dendl;
return ret;
}
return bi_list(bs, obj_name_filter, marker, max, entries, is_truncated, y);
}
int RGWRados::bi_remove(const DoutPrefixProvider *dpp, BucketShard& bs)
{
auto& ref = bs.bucket_obj.get_ref();
int ret = ref.pool.ioctx().remove(ref.obj.oid);
if (ret == -ENOENT) {
ret = 0;
}
if (ret < 0) {
ldpp_dout(dpp, 5) << "bs.index_ctx.remove(" << bs.bucket_obj << ") returned ret=" << ret << dendl;
return ret;
}
return 0;
}
int RGWRados::gc_operate(const DoutPrefixProvider *dpp, string& oid, librados::ObjectWriteOperation *op, optional_yield y)
{
return rgw_rados_operate(dpp, gc_pool_ctx, oid, op, y);
}
int RGWRados::gc_aio_operate(const string& oid, librados::AioCompletion *c,
librados::ObjectWriteOperation *op)
{
return gc_pool_ctx.aio_operate(oid, c, op);
}
int RGWRados::gc_operate(const DoutPrefixProvider *dpp, string& oid, librados::ObjectReadOperation *op, bufferlist *pbl, optional_yield y)
{
return rgw_rados_operate(dpp, gc_pool_ctx, oid, op, pbl, y);
}
int RGWRados::list_gc_objs(int *index, string& marker, uint32_t max, bool expired_only, std::list<cls_rgw_gc_obj_info>& result, bool *truncated, bool& processing_queue)
{
return gc->list(index, marker, max, expired_only, result, truncated, processing_queue);
}
int RGWRados::process_gc(bool expired_only, optional_yield y)
{
return gc->process(expired_only, y);
}
int RGWRados::list_lc_progress(string& marker, uint32_t max_entries,
vector<std::unique_ptr<rgw::sal::Lifecycle::LCEntry>>& progress_map,
int& index)
{
return lc->list_lc_progress(marker, max_entries, progress_map, index);
}
int RGWRados::process_lc(const std::unique_ptr<rgw::sal::Bucket>& optional_bucket)
{
RGWLC lc;
lc.initialize(cct, this->driver);
RGWLC::LCWorker worker(&lc, cct, &lc, 0);
auto ret = lc.process(&worker, optional_bucket, true /* once */);
lc.stop_processor(); // sets down_flag, but returns immediately
return ret;
}
bool RGWRados::process_expire_objects(const DoutPrefixProvider *dpp, optional_yield y)
{
return obj_expirer->inspect_all_shards(dpp, utime_t(), ceph_clock_now(), y);
}
int RGWRados::cls_obj_prepare_op(const DoutPrefixProvider *dpp, BucketShard& bs, RGWModifyOp op, string& tag,
rgw_obj& obj, uint16_t bilog_flags, optional_yield y, rgw_zone_set *_zones_trace)
{
const bool bitx = cct->_conf->rgw_bucket_index_transaction_instrumentation;
ldout_bitx(bitx, dpp, 10) << "ENTERING " << __func__ << ": bucket-shard=" << bs << " obj=" << obj << " tag=" << tag << " op=" << op << dendl_bitx;
ldout_bitx(bitx, dpp, 25) << "BACKTRACE: " << __func__ << ": " << ClibBackTrace(0) << dendl_bitx;
rgw_zone_set zones_trace;
if (_zones_trace) {
zones_trace = *_zones_trace;
}
zones_trace.insert(svc.zone->get_zone().id, bs.bucket.get_key());
ObjectWriteOperation o;
o.assert_exists(); // bucket index shard must exist
cls_rgw_obj_key key(obj.key.get_index_key_name(), obj.key.instance);
cls_rgw_guard_bucket_resharding(o, -ERR_BUSY_RESHARDING);
cls_rgw_bucket_prepare_op(o, op, tag, key, obj.key.get_loc(), svc.zone->need_to_log_data(), bilog_flags, zones_trace);
int ret = bs.bucket_obj.operate(dpp, &o, y);
ldout_bitx(bitx, dpp, 10) << "EXITING " << __func__ << ": ret=" << ret << dendl_bitx;
return ret;
}
int RGWRados::cls_obj_complete_op(BucketShard& bs, const rgw_obj& obj, RGWModifyOp op, string& tag,
int64_t pool, uint64_t epoch,
rgw_bucket_dir_entry& ent, RGWObjCategory category,
list<rgw_obj_index_key> *remove_objs, uint16_t bilog_flags, rgw_zone_set *_zones_trace)
{
const bool bitx = cct->_conf->rgw_bucket_index_transaction_instrumentation;
ldout_bitx_c(bitx, cct, 10) << "ENTERING " << __func__ << ": bucket-shard=" << bs <<
" obj=" << obj << " tag=" << tag << " op=" << op <<
", remove_objs=" << (remove_objs ? *remove_objs : std::list<rgw_obj_index_key>()) << dendl_bitx;
ldout_bitx_c(bitx, cct, 25) << "BACKTRACE: " << __func__ << ": " << ClibBackTrace(0) << dendl_bitx;
ObjectWriteOperation o;
o.assert_exists(); // bucket index shard must exist
rgw_bucket_dir_entry_meta dir_meta;
dir_meta = ent.meta;
dir_meta.category = category;
rgw_zone_set zones_trace;
if (_zones_trace) {
zones_trace = *_zones_trace;
}
zones_trace.insert(svc.zone->get_zone().id, bs.bucket.get_key());
rgw_bucket_entry_ver ver;
ver.pool = pool;
ver.epoch = epoch;
cls_rgw_obj_key key(ent.key.name, ent.key.instance);
cls_rgw_guard_bucket_resharding(o, -ERR_BUSY_RESHARDING);
cls_rgw_bucket_complete_op(o, op, tag, ver, key, dir_meta, remove_objs,
svc.zone->need_to_log_data(), bilog_flags, &zones_trace);
complete_op_data *arg;
index_completion_manager->create_completion(obj, op, tag, ver, key, dir_meta, remove_objs,
svc.zone->need_to_log_data(), bilog_flags, &zones_trace, &arg);
librados::AioCompletion *completion = arg->rados_completion;
int ret = bs.bucket_obj.aio_operate(arg->rados_completion, &o);
completion->release(); /* can't reference arg here, as it might have already been released */
ldout_bitx_c(bitx, cct, 10) << "EXITING " << __func__ << ": ret=" << ret << dendl_bitx;
return ret;
}
int RGWRados::cls_obj_complete_add(BucketShard& bs, const rgw_obj& obj, string& tag,
int64_t pool, uint64_t epoch,
rgw_bucket_dir_entry& ent, RGWObjCategory category,
list<rgw_obj_index_key> *remove_objs, uint16_t bilog_flags, rgw_zone_set *zones_trace)
{
return cls_obj_complete_op(bs, obj, CLS_RGW_OP_ADD, tag, pool, epoch, ent, category, remove_objs, bilog_flags, zones_trace);
}
int RGWRados::cls_obj_complete_del(BucketShard& bs, string& tag,
int64_t pool, uint64_t epoch,
rgw_obj& obj,
real_time& removed_mtime,
list<rgw_obj_index_key> *remove_objs,
uint16_t bilog_flags,
rgw_zone_set *zones_trace)
{
rgw_bucket_dir_entry ent;
ent.meta.mtime = removed_mtime;
obj.key.get_index_key(&ent.key);
return cls_obj_complete_op(bs, obj, CLS_RGW_OP_DEL, tag, pool, epoch,
ent, RGWObjCategory::None, remove_objs,
bilog_flags, zones_trace);
}
int RGWRados::cls_obj_complete_cancel(BucketShard& bs, string& tag, rgw_obj& obj,
list<rgw_obj_index_key> *remove_objs,
uint16_t bilog_flags, rgw_zone_set *zones_trace)
{
rgw_bucket_dir_entry ent;
obj.key.get_index_key(&ent.key);
return cls_obj_complete_op(bs, obj, CLS_RGW_OP_CANCEL, tag,
-1 /* pool id */, 0, ent,
RGWObjCategory::None, remove_objs, bilog_flags,
zones_trace);
}
int RGWRados::cls_obj_set_bucket_tag_timeout(const DoutPrefixProvider *dpp, RGWBucketInfo& bucket_info, uint64_t timeout)
{
RGWSI_RADOS::Pool index_pool;
map<int, string> bucket_objs;
int r = svc.bi_rados->open_bucket_index(dpp, bucket_info, std::nullopt, bucket_info.layout.current_index, &index_pool, &bucket_objs, nullptr);
if (r < 0)
return r;
return CLSRGWIssueSetTagTimeout(index_pool.ioctx(), bucket_objs, cct->_conf->rgw_bucket_index_max_aio, timeout)();
}
// returns 0 if there is an error in calculation
uint32_t RGWRados::calc_ordered_bucket_list_per_shard(uint32_t num_entries,
uint32_t num_shards)
{
if (num_shards == 0) {
// we'll get a floating point exception since we divide by
// num_shards
return 0;
}
// We want to minimize the chances that when num_shards >>
// num_entries that we return much fewer than num_entries to the
// client. Given all the overhead of making a cls call to the osd,
// returning a few entries is not much more work than returning one
// entry. This minimum might be better tuned based on future
// experiments where num_shards >> num_entries. (Note: ">>" should
// be interpreted as "much greater than".)
constexpr uint32_t min_read = 8;
// The following is based on _"Balls into Bins" -- A Simple and
// Tight Analysis_ by Raab and Steger. We add 1 as a way to handle
// cases when num_shards >> num_entries (it almost serves as a
// ceiling calculation). We also assume alpha is 1.0 and extract it
// from the calculation. Future work could involve memoizing some of
// the transcendental functions to minimize repeatedly re-calling
// them with the same parameters, which we expect to be the case the
// majority of the time.
uint32_t calc_read =
1 +
static_cast<uint32_t>((num_entries / num_shards) +
sqrt((2 * num_entries) *
log(num_shards) / num_shards));
return std::max(min_read, calc_read);
}
int RGWRados::cls_bucket_list_ordered(const DoutPrefixProvider *dpp,
RGWBucketInfo& bucket_info,
const rgw::bucket_index_layout_generation& idx_layout,
const int shard_id,
const rgw_obj_index_key& start_after,
const std::string& prefix,
const std::string& delimiter,
const uint32_t num_entries,
const bool list_versions,
const uint16_t expansion_factor,
ent_map_t& m,
bool* is_truncated,
bool* cls_filtered,
rgw_obj_index_key* last_entry,
optional_yield y,
RGWBucketListNameFilter force_check_filter)
{
const bool bitx = cct->_conf->rgw_bucket_index_transaction_instrumentation;
/* expansion_factor allows the number of entries to read to grow
* exponentially; this is used when earlier reads are producing too
* few results, perhaps due to filtering or to a series of
* namespaced entries */
ldout_bitx(bitx, dpp, 10) << "ENTERING " << __func__ << ": " << bucket_info.bucket <<
" start_after=\"" << start_after.to_string() <<
"\", prefix=\"" << prefix <<
", delimiter=\"" << delimiter <<
"\", shard_id=" << shard_id <<
"\", num_entries=" << num_entries <<
", shard_id=" << shard_id <<
", list_versions=" << list_versions <<
", expansion_factor=" << expansion_factor <<
", force_check_filter is " <<
(force_check_filter ? "set" : "unset") << dendl_bitx;
ldout_bitx(bitx, dpp, 25) << "BACKTRACE: " << __func__ << ": " << ClibBackTrace(0) << dendl_bitx;
m.clear();
RGWSI_RADOS::Pool index_pool;
// key - oid (for different shards if there is any)
// value - list result for the corresponding oid (shard), it is filled by
// the AIO callback
std::map<int, std::string> shard_oids;
int r = svc.bi_rados->open_bucket_index(dpp, bucket_info, shard_id, idx_layout,
&index_pool, &shard_oids,
nullptr);
if (r < 0) {
ldpp_dout(dpp, 0) << __func__ <<
": open_bucket_index for " << bucket_info.bucket << " failed" << dendl;
return r;
}
const uint32_t shard_count = shard_oids.size();
if (shard_count == 0) {
ldpp_dout(dpp, 0) << "ERROR: " << __func__ <<
": the bucket index shard count appears to be 0, "
"which is an illegal value" << dendl;
return -ERR_INVALID_BUCKET_STATE;
}
uint32_t num_entries_per_shard;
if (expansion_factor == 0) {
num_entries_per_shard =
calc_ordered_bucket_list_per_shard(num_entries, shard_count);
} else if (expansion_factor <= 11) {
// we'll max out the exponential multiplication factor at 1024 (2<<10)
num_entries_per_shard =
std::min(num_entries,
(uint32_t(1 << (expansion_factor - 1)) *
calc_ordered_bucket_list_per_shard(num_entries, shard_count)));
} else {
num_entries_per_shard = num_entries;
}
if (num_entries_per_shard == 0) {
ldpp_dout(dpp, 0) << "ERROR: " << __func__ <<
": unable to calculate the number of entries to read from each "
"bucket index shard" << dendl;
return -ERR_INVALID_BUCKET_STATE;
}
ldpp_dout(dpp, 10) << __func__ <<
": request from each of " << shard_count <<
" shard(s) for " << num_entries_per_shard << " entries to get " <<
num_entries << " total entries" << dendl;
auto& ioctx = index_pool.ioctx();
std::map<int, rgw_cls_list_ret> shard_list_results;
cls_rgw_obj_key start_after_key(start_after.name, start_after.instance);
r = CLSRGWIssueBucketList(ioctx, start_after_key, prefix, delimiter,
num_entries_per_shard,
list_versions, shard_oids, shard_list_results,
cct->_conf->rgw_bucket_index_max_aio)();
if (r < 0) {
ldpp_dout(dpp, 0) << __func__ <<
": CLSRGWIssueBucketList for " << bucket_info.bucket <<
" failed" << dendl;
return r;
}
// to manage the iterators through each shard's list results
struct ShardTracker {
const size_t shard_idx;
rgw_cls_list_ret& result;
const std::string& oid_name;
RGWRados::ent_map_t::iterator cursor;
RGWRados::ent_map_t::iterator end;
// manages an iterator through a shard and provides other
// accessors
ShardTracker(size_t _shard_idx,
rgw_cls_list_ret& _result,
const std::string& _oid_name):
shard_idx(_shard_idx),
result(_result),
oid_name(_oid_name),
cursor(_result.dir.m.begin()),
end(_result.dir.m.end())
{}
inline const std::string& entry_name() const {
return cursor->first;
}
rgw_bucket_dir_entry& dir_entry() const {
return cursor->second;
}
inline bool is_truncated() const {
return result.is_truncated;
}
inline ShardTracker& advance() {
++cursor;
// return a self-reference to allow for chaining of calls, such
// as x.advance().at_end()
return *this;
}
inline bool at_end() const {
return cursor == end;
}
}; // ShardTracker
// add the next unique candidate, or return false if we reach the end
auto next_candidate = [] (CephContext *cct, ShardTracker& t,
std::multimap<std::string, size_t>& candidates,
size_t tracker_idx) {
if (!t.at_end()) {
candidates.emplace(t.entry_name(), tracker_idx);
}
return;
};
// one tracker per shard requested (may not be all shards)
std::vector<ShardTracker> results_trackers;
results_trackers.reserve(shard_list_results.size());
for (auto& r : shard_list_results) {
results_trackers.emplace_back(r.first, r.second, shard_oids[r.first]);
// if any *one* shard's result is trucated, the entire result is
// truncated
*is_truncated = *is_truncated || r.second.is_truncated;
// unless *all* are shards are cls_filtered, the entire result is
// not filtered
*cls_filtered = *cls_filtered && r.second.cls_filtered;
}
// create a map to track the next candidate entry from ShardTracker
// (key=candidate, value=index into results_trackers); as we consume
// entries from shards, we replace them with the next entries in the
// shards until we run out
std::multimap<std::string, size_t> candidates;
size_t tracker_idx = 0;
std::vector<size_t> vidx;
vidx.reserve(shard_list_results.size());
for (auto& t : results_trackers) {
// it's important that the values in the map refer to the index
// into the results_trackers vector, which may not be the same
// as the shard number (i.e., when not all shards are requested)
next_candidate(cct, t, candidates, tracker_idx);
++tracker_idx;
}
rgw_bucket_dir_entry*
last_entry_visited = nullptr; // to set last_entry (marker)
std::map<std::string, bufferlist> updates;
uint32_t count = 0;
while (count < num_entries && !candidates.empty()) {
r = 0;
// select the next entry in lexical order (first key in map);
// again tracker_idx is not necessarily shard number, but is index
// into results_trackers vector
tracker_idx = candidates.begin()->second;
auto& tracker = results_trackers.at(tracker_idx);
const std::string& name = tracker.entry_name();
rgw_bucket_dir_entry& dirent = tracker.dir_entry();
ldpp_dout(dpp, 20) << __func__ << ": currently processing " <<
dirent.key << " from shard " << tracker.shard_idx << dendl;
const bool force_check =
force_check_filter && force_check_filter(dirent.key.name);
if ((!dirent.exists &&
!dirent.is_delete_marker() &&
!dirent.is_common_prefix()) ||
!dirent.pending_map.empty() ||
force_check) {
/* there are uncommitted ops. We need to check the current
* state, and if the tags are old we need to do clean-up as
* well. */
librados::IoCtx sub_ctx;
sub_ctx.dup(ioctx);
ldout_bitx(bitx, dpp, 20) << "INFO: " << __func__ <<
" calling check_disk_state bucket=" << bucket_info.bucket <<
" entry=" << dirent.key << dendl_bitx;
r = check_disk_state(dpp, sub_ctx, bucket_info, dirent, dirent,
updates[tracker.oid_name], y);
if (r < 0 && r != -ENOENT) {
ldpp_dout(dpp, 0) << __func__ <<
": check_disk_state for \"" << dirent.key <<
"\" failed with r=" << r << dendl;
return r;
}
} else {
r = 0;
}
const cls_rgw_obj_key dirent_key = dirent.key;
// at this point either r >= 0 or r == -ENOENT
if (r >= 0) { // i.e., if r != -ENOENT
ldpp_dout(dpp, 10) << __func__ << ": got " <<
dirent_key << dendl;
auto [it, inserted] = m.insert_or_assign(name, std::move(dirent));
last_entry_visited = &it->second;
if (inserted) {
++count;
} else {
ldpp_dout(dpp, 0) << "WARNING: " << __func__ <<
" reassigned map value at \"" << name <<
"\", which should not happen" << dendl;
}
} else {
ldpp_dout(dpp, 10) << __func__ << ": skipping " <<
dirent.key.name << "[" << dirent.key.instance << "]" << dendl;
last_entry_visited = &tracker.dir_entry();
}
// refresh the candidates map
vidx.clear();
bool need_to_stop = false;
auto range = candidates.equal_range(name);
for (auto i = range.first; i != range.second; ++i) {
vidx.push_back(i->second);
}
candidates.erase(range.first, range.second);
for (auto idx : vidx) {
auto& tracker_match = results_trackers.at(idx);
tracker_match.advance();
next_candidate(cct, tracker_match, candidates, idx);
if (tracker_match.at_end() && tracker_match.is_truncated()) {
need_to_stop = true;
break;
}
}
if (need_to_stop) {
// once we exhaust one shard that is truncated, we need to stop,
// as we cannot be certain that one of the next entries needs to
// come from that shard; S3 and swift protocols allow returning
// fewer than what was requested
ldpp_dout(dpp, 10) << __func__ <<
": stopped accumulating results at count=" << count <<
", dirent=\"" << dirent_key <<
"\", because its shard is truncated and exhausted" << dendl;
break;
}
} // while we haven't provided requested # of result entries
// suggest updates if there are any
for (auto& miter : updates) {
if (miter.second.length()) {
ObjectWriteOperation o;
cls_rgw_suggest_changes(o, miter.second);
// we don't care if we lose suggested updates, send them off blindly
AioCompletion *c =
librados::Rados::aio_create_completion(nullptr, nullptr);
ldout_bitx(bitx, dpp, 10) << "INFO: " << __func__ <<
": doing dir_suggest on " << miter.first << dendl_bitx;
ioctx.aio_operate(miter.first, c, &o);
c->release();
}
} // updates loop
// determine truncation by checking if all the returned entries are
// consumed or not
*is_truncated = false;
for (const auto& t : results_trackers) {
if (!t.at_end() || t.is_truncated()) {
*is_truncated = true;
break;
}
}
ldpp_dout(dpp, 20) << __func__ <<
": returning, count=" << count << ", is_truncated=" << *is_truncated <<
dendl;
if (*is_truncated && count < num_entries) {
ldpp_dout(dpp, 10) << __func__ <<
": requested " << num_entries << " entries but returning " <<
count << ", which is truncated" << dendl;
}
if (last_entry_visited != nullptr && last_entry) {
*last_entry = last_entry_visited->key;
ldpp_dout(dpp, 20) << __func__ <<
": returning, last_entry=" << *last_entry << dendl;
} else {
ldpp_dout(dpp, 20) << __func__ <<
": returning, last_entry NOT SET" << dendl;
}
ldout_bitx(bitx, dpp, 10) << "EXITING " << __func__ << dendl_bitx;
return 0;
} // RGWRados::cls_bucket_list_ordered
// A helper function to retrieve the hash source from an incomplete
// multipart entry by removing everything from the second to last
// period on.
static int parse_index_hash_source(const std::string& oid_wo_ns, std::string *index_hash_source) {
std::size_t found = oid_wo_ns.rfind('.');
if (found == std::string::npos || found < 1) {
return -EINVAL;
}
found = oid_wo_ns.rfind('.', found - 1);
if (found == std::string::npos || found < 1) {
return -EINVAL;
}
*index_hash_source = oid_wo_ns.substr(0, found);
return 0;
}
int RGWRados::cls_bucket_list_unordered(const DoutPrefixProvider *dpp,
RGWBucketInfo& bucket_info,
const rgw::bucket_index_layout_generation& idx_layout,
int shard_id,
const rgw_obj_index_key& start_after,
const std::string& prefix,
uint32_t num_entries,
bool list_versions,
std::vector<rgw_bucket_dir_entry>& ent_list,
bool *is_truncated,
rgw_obj_index_key *last_entry,
optional_yield y,
RGWBucketListNameFilter force_check_filter) {
const bool bitx = cct->_conf->rgw_bucket_index_transaction_instrumentation;
ldout_bitx(bitx, dpp, 10) << "ENTERING " << __func__ << ": " << bucket_info.bucket <<
" start_after=\"" << start_after <<
"\", prefix=\"" << prefix <<
"\", shard_id=" << shard_id <<
"\", num_entries=" << num_entries <<
", list_versions=" << list_versions <<
(force_check_filter ? "set" : "unset") << dendl_bitx;
ldout_bitx(bitx, dpp, 25) << "BACKTRACE: " << __func__ << ": " << ClibBackTrace(0) << dendl_bitx;
ent_list.clear();
static MultipartMetaFilter multipart_meta_filter;
*is_truncated = false;
RGWSI_RADOS::Pool index_pool;
std::map<int, std::string> oids;
int r = svc.bi_rados->open_bucket_index(dpp, bucket_info, shard_id, idx_layout, &index_pool, &oids, nullptr);
if (r < 0) {
return r;
}
auto& ioctx = index_pool.ioctx();
const uint32_t num_shards = oids.size();
rgw_obj_index_key marker = start_after;
uint32_t current_shard;
if (shard_id >= 0) {
current_shard = shard_id;
} else if (start_after.empty()) {
current_shard = 0u;
} else {
// at this point we have a marker (start_after) that has something
// in it, so we need to get to the bucket shard index, so we can
// start reading from there
// now convert the key (oid) to an rgw_obj_key since that will
// separate out the namespace, name, and instance
rgw_obj_key obj_key;
bool parsed = rgw_obj_key::parse_raw_oid(start_after.name, &obj_key);
if (!parsed) {
ldpp_dout(dpp, 0) << "ERROR: " << __func__ <<
" received an invalid start marker: \"" << start_after << "\"" <<
dendl;
return -EINVAL;
} else if (obj_key.name.empty()) {
// if the name is empty that means the object name came in with
// a namespace only, and therefore we need to start our scan at
// the first bucket index shard
current_shard = 0u;
} else {
// so now we have the key used to compute the bucket index shard
// and can extract the specific shard from it
if (obj_key.ns == RGW_OBJ_NS_MULTIPART) {
// Use obj_key.ns == RGW_OBJ_NS_MULTIPART instead of
// the implementation relying on MultipartMetaFilter
// because MultipartMetaFilter only checks .meta suffix, which may
// exclude data multiparts but include some regular objects with .meta suffix
// by mistake.
string index_hash_source;
r = parse_index_hash_source(obj_key.name, &index_hash_source);
if (r < 0) {
ldpp_dout(dpp, 0) << "ERROR: " << __func__ <<
" parse_index_hash_source unable to parse \"" << obj_key.name <<
"\", r=" << r << dendl;
return r;
}
current_shard = svc.bi_rados->bucket_shard_index(index_hash_source, num_shards);
} else {
current_shard = svc.bi_rados->bucket_shard_index(obj_key.name, num_shards);
}
}
}
uint32_t count = 0u;
std::map<std::string, bufferlist> updates;
rgw_obj_index_key last_added_entry;
while (count <= num_entries &&
((shard_id >= 0 && current_shard == uint32_t(shard_id)) ||
current_shard < num_shards)) {
const std::string& oid = oids[current_shard];
rgw_cls_list_ret result;
librados::ObjectReadOperation op;
const std::string empty_delimiter;
cls_rgw_bucket_list_op(op, marker, prefix, empty_delimiter,
num_entries,
list_versions, &result);
r = rgw_rados_operate(dpp, ioctx, oid, &op, nullptr, y);
if (r < 0) {
ldpp_dout(dpp, 0) << "ERROR: " << __func__ <<
": error in rgw_rados_operate (bucket list op), r=" << r << dendl;
return r;
}
for (auto& entry : result.dir.m) {
rgw_bucket_dir_entry& dirent = entry.second;
bool force_check = force_check_filter &&
force_check_filter(dirent.key.name);
if ((!dirent.exists && !dirent.is_delete_marker()) ||
!dirent.pending_map.empty() ||
force_check) {
/* there are uncommitted ops. We need to check the current state,
* and if the tags are old we need to do cleanup as well. */
librados::IoCtx sub_ctx;
sub_ctx.dup(ioctx);
ldout_bitx(bitx, dpp, 20) << "INFO: " << __func__ <<
": calling check_disk_state bucket=" << bucket_info.bucket <<
" entry=" << dirent.key << dendl_bitx;
r = check_disk_state(dpp, sub_ctx, bucket_info, dirent, dirent, updates[oid], y);
if (r < 0 && r != -ENOENT) {
ldpp_dout(dpp, 0) << "ERROR: " << __func__ <<
": error in check_disk_state, r=" << r << dendl;
return r;
}
} else {
r = 0;
}
// at this point either r >= 0 or r == -ENOENT
if (r >= 0) { // i.e., if r != -ENOENT
ldpp_dout(dpp, 10) << __func__ << ": got " <<
dirent.key << dendl;
if (count < num_entries) {
marker = last_added_entry = dirent.key; // double assign
ent_list.emplace_back(std::move(dirent));
++count;
} else {
last_added_entry = dirent.key;
*is_truncated = true;
ldpp_dout(dpp, 10) << "INFO: " << __func__ <<
": reached max entries (" << num_entries << ") to return at \"" <<
dirent.key << "\"" << dendl;
goto check_updates;
}
} else { // r == -ENOENT
// in the case of -ENOENT, make sure we're advancing marker
// for possible next call to CLSRGWIssueBucketList
marker = dirent.key;
}
} // entry for loop
if (!result.is_truncated) {
// if we reached the end of the shard read next shard
++current_shard;
marker = rgw_obj_index_key();
}
} // shard loop
check_updates:
// suggest updates if there is any
std::map<std::string, bufferlist>::iterator miter = updates.begin();
for (; miter != updates.end(); ++miter) {
if (miter->second.length()) {
ObjectWriteOperation o;
cls_rgw_suggest_changes(o, miter->second);
// we don't care if we lose suggested updates, send them off blindly
AioCompletion *c = librados::Rados::aio_create_completion(nullptr, nullptr);
ldout_bitx(bitx, dpp, 10) << "INFO: " << __func__ <<
" doing dir_suggest on " << miter->first << dendl_bitx;
ioctx.aio_operate(miter->first, c, &o);
c->release();
}
}
if (last_entry && !ent_list.empty()) {
*last_entry = last_added_entry;
}
ldout_bitx(bitx, dpp, 10) << "EXITING " << __func__ << dendl_bitx;
return 0;
} // RGWRados::cls_bucket_list_unordered
int RGWRados::cls_obj_usage_log_add(const DoutPrefixProvider *dpp, const string& oid,
rgw_usage_log_info& info, optional_yield y)
{
rgw_raw_obj obj(svc.zone->get_zone_params().usage_log_pool, oid);
rgw_rados_ref ref;
int r = get_raw_obj_ref(dpp, obj, &ref);
if (r < 0) {
return r;
}
ObjectWriteOperation op;
cls_rgw_usage_log_add(op, info);
r = rgw_rados_operate(dpp, ref.pool.ioctx(), ref.obj.oid, &op, y);
return r;
}
int RGWRados::cls_obj_usage_log_read(const DoutPrefixProvider *dpp, const string& oid, const string& user, const string& bucket,
uint64_t start_epoch, uint64_t end_epoch, uint32_t max_entries,
string& read_iter, map<rgw_user_bucket, rgw_usage_log_entry>& usage,
bool *is_truncated)
{
rgw_raw_obj obj(svc.zone->get_zone_params().usage_log_pool, oid);
rgw_rados_ref ref;
int r = get_raw_obj_ref(dpp, obj, &ref);
if (r < 0) {
return r;
}
*is_truncated = false;
r = cls_rgw_usage_log_read(ref.pool.ioctx(), ref.obj.oid, user, bucket, start_epoch, end_epoch,
max_entries, read_iter, usage, is_truncated);
return r;
}
static int cls_rgw_usage_log_trim_repeat(const DoutPrefixProvider *dpp, rgw_rados_ref ref, const string& user, const string& bucket, uint64_t start_epoch, uint64_t end_epoch, optional_yield y)
{
bool done = false;
do {
librados::ObjectWriteOperation op;
cls_rgw_usage_log_trim(op, user, bucket, start_epoch, end_epoch);
int r = rgw_rados_operate(dpp, ref.pool.ioctx(), ref.obj.oid, &op, y);
if (r == -ENODATA)
done = true;
else if (r < 0)
return r;
} while (!done);
return 0;
}
int RGWRados::cls_obj_usage_log_trim(const DoutPrefixProvider *dpp, const string& oid, const string& user, const string& bucket,
uint64_t start_epoch, uint64_t end_epoch, optional_yield y)
{
rgw_raw_obj obj(svc.zone->get_zone_params().usage_log_pool, oid);
rgw_rados_ref ref;
int r = get_raw_obj_ref(dpp, obj, &ref);
if (r < 0) {
return r;
}
r = cls_rgw_usage_log_trim_repeat(dpp, ref, user, bucket, start_epoch, end_epoch, y);
return r;
}
int RGWRados::cls_obj_usage_log_clear(const DoutPrefixProvider *dpp, string& oid, optional_yield y)
{
rgw_raw_obj obj(svc.zone->get_zone_params().usage_log_pool, oid);
rgw_rados_ref ref;
int r = get_raw_obj_ref(dpp, obj, &ref);
if (r < 0) {
return r;
}
librados::ObjectWriteOperation op;
cls_rgw_usage_log_clear(op);
r = rgw_rados_operate(dpp, ref.pool.ioctx(), ref.obj.oid, &op, y);
return r;
}
// note: this removes entries from the rados bucket index objects
// without going through CLS; this is known to be called from
// "radosgw-admin unlink" and "radosgw-admin bucket check --fix"
int RGWRados::remove_objs_from_index(const DoutPrefixProvider *dpp,
RGWBucketInfo& bucket_info,
const std::list<rgw_obj_index_key>& entry_key_list)
{
const bool bitx = cct->_conf->rgw_bucket_index_transaction_instrumentation;
ldout_bitx(bitx, dpp, 10) << "ENTERING " << __func__ << ": bucket=" << bucket_info.bucket <<
" entry_key_list.size()=" << entry_key_list.size() << dendl_bitx;
ldout_bitx(bitx, dpp, 25) << "BACKTRACE: " << __func__ << ": " << ClibBackTrace(0) << dendl_bitx;
const auto& current_index = bucket_info.get_current_index();
if (is_layout_indexless(current_index)) {
return -EINVAL;
}
const uint32_t num_shards = current_index.layout.normal.num_shards;
RGWSI_RADOS::Pool index_pool;
std::map<int, std::string> index_oids;
int r = svc.bi_rados->open_bucket_index(dpp, bucket_info, std::nullopt,
bucket_info.layout.current_index,
&index_pool, &index_oids, nullptr);
if (r < 0) {
ldout_bitx(bitx, dpp, 0) << "ERROR: " << __func__ <<
" open_bucket_index returned " << r << dendl_bitx;
return r;
}
// split up removals by shard
std::map<int, std::set<std::string>> sharded_removals;
for (const auto& entry_key : entry_key_list) {
const rgw_obj_key obj_key(entry_key);
const uint32_t shard =
RGWSI_BucketIndex_RADOS::bucket_shard_index(obj_key, num_shards);
// entry_key already combines namespace and name, so we first have
// to break that apart before we can then combine with instance
std::string name;
std::string ns; // namespace
rgw_obj_key::parse_index_key(entry_key.name, &name, &ns);
rgw_obj_key full_key(name, entry_key.instance, ns);
std::string combined_key = full_key.get_oid();
sharded_removals[shard].insert(combined_key);
ldout_bitx(bitx, dpp, 20) << "INFO: " << __func__ <<
": removal from bucket index, bucket=" << bucket_info.bucket <<
" key=" << combined_key << " designated for shard " << shard <<
dendl_bitx;
}
for (const auto& removals : sharded_removals) {
const int shard = removals.first;
const std::string& oid = index_oids[shard];
ldout_bitx(bitx, dpp, 10) << "INFO: " << __func__ <<
": removal from bucket index, bucket=" << bucket_info.bucket <<
", shard=" << shard << ", oid=" << oid << ", num_keys=" <<
removals.second.size() << dendl_bitx;
r = index_pool.ioctx().omap_rm_keys(oid, removals.second);
if (r < 0) {
ldout_bitx(bitx, dpp, 0) << "ERROR: " << __func__ <<
": omap_rm_keys returned ret=" << r <<
dendl_bitx;
return r;
}
}
ldout_bitx(bitx, dpp, 5) <<
"EXITING " << __func__ << " and returning " << r << dendl_bitx;
return r;
}
int RGWRados::check_disk_state(const DoutPrefixProvider *dpp,
librados::IoCtx io_ctx,
RGWBucketInfo& bucket_info,
rgw_bucket_dir_entry& list_state,
rgw_bucket_dir_entry& object,
bufferlist& suggested_updates,
optional_yield y)
{
const bool bitx = cct->_conf->rgw_bucket_index_transaction_instrumentation;
ldout_bitx(bitx, dpp, 10) << "ENTERING " << __func__ << ": bucket=" <<
bucket_info.bucket << " dir_entry=" << list_state.key << dendl_bitx;
uint8_t suggest_flag = (svc.zone->need_to_log_data() ? CEPH_RGW_DIR_SUGGEST_LOG_OP : 0);
std::string loc;
rgw_obj obj(bucket_info.bucket, list_state.key);
MultipartMetaFilter multipart_meta_filter;
string temp_key;
if (multipart_meta_filter.filter(list_state.key.name, temp_key)) {
obj.in_extra_data = true;
}
string oid;
get_obj_bucket_and_oid_loc(obj, oid, loc);
if (loc != list_state.locator) {
ldpp_dout(dpp, 0) << "WARNING: generated locator (" << loc << ") is different from listed locator (" << list_state.locator << ")" << dendl;
}
io_ctx.locator_set_key(list_state.locator);
RGWObjState *astate = NULL;
RGWObjManifest *manifest = nullptr;
RGWObjectCtx rctx(this->driver);
int r = get_obj_state(dpp, &rctx, bucket_info, obj, &astate, &manifest, false, y);
if (r < 0)
return r;
list_state.pending_map.clear(); // we don't need this and it inflates size
if (!list_state.is_delete_marker() && !astate->exists) {
ldout_bitx(bitx, dpp, 10) << "INFO: " << __func__ << ": disk state exists" << dendl_bitx;
/* object doesn't exist right now -- hopefully because it's
* marked as !exists and got deleted */
if (list_state.exists) {
ldout_bitx(bitx, dpp, 10) << "INFO: " << __func__ << ": index list state exists" << dendl_bitx;
/* FIXME: what should happen now? Work out if there are any
* non-bad ways this could happen (there probably are, but annoying
* to handle!) */
}
// encode a suggested removal of that key
list_state.ver.epoch = io_ctx.get_last_version();
list_state.ver.pool = io_ctx.get_id();
ldout_bitx(bitx, dpp, 10) << "INFO: " << __func__ << ": encoding remove of " << list_state.key << " on suggested_updates" << dendl_bitx;
cls_rgw_encode_suggestion(CEPH_RGW_REMOVE | suggest_flag, list_state, suggested_updates);
return -ENOENT;
}
string etag;
string content_type;
string storage_class;
ACLOwner owner;
bool appendable = false;
object.meta.size = astate->size;
object.meta.accounted_size = astate->accounted_size;
object.meta.mtime = astate->mtime;
map<string, bufferlist>::iterator iter = astate->attrset.find(RGW_ATTR_ETAG);
if (iter != astate->attrset.end()) {
etag = rgw_bl_str(iter->second);
}
iter = astate->attrset.find(RGW_ATTR_CONTENT_TYPE);
if (iter != astate->attrset.end()) {
content_type = rgw_bl_str(iter->second);
}
iter = astate->attrset.find(RGW_ATTR_STORAGE_CLASS);
if (iter != astate->attrset.end()) {
storage_class = rgw_bl_str(iter->second);
}
iter = astate->attrset.find(RGW_ATTR_ACL);
if (iter != astate->attrset.end()) {
r = decode_policy(dpp, iter->second, &owner);
if (r < 0) {
ldpp_dout(dpp, 0) << "WARNING: could not decode policy for object: " << obj << dendl;
}
}
iter = astate->attrset.find(RGW_ATTR_APPEND_PART_NUM);
if (iter != astate->attrset.end()) {
appendable = true;
}
if (manifest) {
RGWObjManifest::obj_iterator miter;
for (miter = manifest->obj_begin(dpp); miter != manifest->obj_end(dpp); ++miter) {
const rgw_raw_obj& raw_loc = miter.get_location().get_raw_obj(this);
rgw_obj loc;
RGWSI_Tier_RADOS::raw_obj_to_obj(manifest->get_obj().bucket, raw_loc, &loc);
if (loc.key.ns == RGW_OBJ_NS_MULTIPART) {
ldout_bitx(bitx, dpp, 10) << "INFO: " << __func__ << " removing manifest part from index loc=" << loc << dendl_bitx;
r = delete_obj_index(loc, astate->mtime, dpp, y);
if (r < 0) {
ldout_bitx(bitx, dpp, 0) <<
"WARNING: " << __func__ << ": delete_obj_index returned r=" << r << dendl_bitx;
}
}
}
}
object.meta.etag = etag;
object.meta.content_type = content_type;
object.meta.storage_class = storage_class;
object.meta.owner = owner.get_id().to_str();
object.meta.owner_display_name = owner.get_display_name();
object.meta.appendable = appendable;
// encode suggested updates
list_state.meta.size = object.meta.size;
list_state.meta.accounted_size = object.meta.accounted_size;
list_state.meta.mtime = object.meta.mtime;
list_state.meta.category = main_category;
list_state.meta.etag = etag;
list_state.meta.appendable = appendable;
list_state.meta.content_type = content_type;
list_state.meta.storage_class = storage_class;
librados::IoCtx head_obj_ctx; // initialize to data pool so we can get pool id
r = get_obj_head_ioctx(dpp, bucket_info, obj, &head_obj_ctx);
if (r < 0) {
ldpp_dout(dpp, 0) << __func__ <<
" WARNING: unable to find head object data pool for \"" <<
obj << "\", not updating version pool/epoch" << dendl;
} else {
list_state.ver.pool = head_obj_ctx.get_id();
list_state.ver.epoch = astate->epoch;
}
if (astate->obj_tag.length() > 0) {
list_state.tag = astate->obj_tag.c_str();
}
list_state.meta.owner = owner.get_id().to_str();
list_state.meta.owner_display_name = owner.get_display_name();
list_state.exists = true;
ldout_bitx(bitx, dpp, 10) << "INFO: " << __func__ <<
": encoding update of " << list_state.key << " on suggested_updates" << dendl_bitx;
cls_rgw_encode_suggestion(CEPH_RGW_UPDATE | suggest_flag, list_state, suggested_updates);
ldout_bitx(bitx, dpp, 10) << "EXITING " << __func__ << dendl_bitx;
return 0;
} // RGWRados::check_disk_state
int RGWRados::cls_bucket_head(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, const rgw::bucket_index_layout_generation& idx_layout, int shard_id, vector<rgw_bucket_dir_header>& headers, map<int, string> *bucket_instance_ids)
{
RGWSI_RADOS::Pool index_pool;
map<int, string> oids;
map<int, struct rgw_cls_list_ret> list_results;
int r = svc.bi_rados->open_bucket_index(dpp, bucket_info, shard_id, idx_layout, &index_pool, &oids, bucket_instance_ids);
if (r < 0) {
ldpp_dout(dpp, 20) << "cls_bucket_head: open_bucket_index() returned "
<< r << dendl;
return r;
}
r = CLSRGWIssueGetDirHeader(index_pool.ioctx(), oids, list_results, cct->_conf->rgw_bucket_index_max_aio)();
if (r < 0) {
ldpp_dout(dpp, 20) << "cls_bucket_head: CLSRGWIssueGetDirHeader() returned "
<< r << dendl;
return r;
}
map<int, struct rgw_cls_list_ret>::iterator iter = list_results.begin();
for(; iter != list_results.end(); ++iter) {
headers.push_back(std::move(iter->second.dir.header));
}
return 0;
}
int RGWRados::cls_bucket_head_async(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, const rgw::bucket_index_layout_generation& idx_layout, int shard_id, RGWGetDirHeader_CB *ctx, int *num_aio)
{
RGWSI_RADOS::Pool index_pool;
map<int, string> bucket_objs;
int r = svc.bi_rados->open_bucket_index(dpp, bucket_info, shard_id, idx_layout, &index_pool, &bucket_objs, nullptr);
if (r < 0)
return r;
map<int, string>::iterator iter = bucket_objs.begin();
for (; iter != bucket_objs.end(); ++iter) {
r = cls_rgw_get_dir_header_async(index_pool.ioctx(), iter->second, static_cast<RGWGetDirHeader_CB*>(ctx->get()));
if (r < 0) {
ctx->put();
break;
} else {
(*num_aio)++;
}
}
return r;
}
int RGWRados::check_bucket_shards(const RGWBucketInfo& bucket_info,
const rgw_bucket& bucket,
uint64_t num_objs,
const DoutPrefixProvider *dpp, optional_yield y)
{
if (! cct->_conf.get_val<bool>("rgw_dynamic_resharding")) {
return 0;
}
bool need_resharding = false;
uint32_t num_source_shards = rgw::current_num_shards(bucket_info.layout);
const uint32_t max_dynamic_shards =
uint32_t(cct->_conf.get_val<uint64_t>("rgw_max_dynamic_shards"));
if (num_source_shards >= max_dynamic_shards) {
return 0;
}
uint32_t suggested_num_shards = 0;
const uint64_t max_objs_per_shard =
cct->_conf.get_val<uint64_t>("rgw_max_objs_per_shard");
// TODO: consider per-bucket sync policy here?
const bool is_multisite = svc.zone->need_to_log_data();
quota_handler->check_bucket_shards(dpp, max_objs_per_shard, num_source_shards,
num_objs, is_multisite, need_resharding,
&suggested_num_shards);
if (! need_resharding) {
return 0;
}
const uint32_t final_num_shards =
RGWBucketReshard::get_preferred_shards(suggested_num_shards,
max_dynamic_shards);
// final verification, so we don't reduce number of shards
if (final_num_shards <= num_source_shards) {
return 0;
}
ldpp_dout(dpp, 1) << "RGWRados::" << __func__ << " bucket " << bucket.name <<
" needs resharding; current num shards " << bucket_info.layout.current_index.layout.normal.num_shards <<
"; new num shards " << final_num_shards << " (suggested " <<
suggested_num_shards << ")" << dendl;
return add_bucket_to_reshard(dpp, bucket_info, final_num_shards, y);
}
int RGWRados::add_bucket_to_reshard(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, uint32_t new_num_shards, optional_yield y)
{
RGWReshard reshard(this->driver, dpp);
uint32_t num_source_shards = rgw::current_num_shards(bucket_info.layout);
new_num_shards = std::min(new_num_shards, get_max_bucket_shards());
if (new_num_shards <= num_source_shards) {
ldpp_dout(dpp, 20) << "not resharding bucket name=" << bucket_info.bucket.name << ", orig_num=" << num_source_shards << ", new_num_shards=" << new_num_shards << dendl;
return 0;
}
cls_rgw_reshard_entry entry;
entry.time = real_clock::now();
entry.tenant = bucket_info.owner.tenant;
entry.bucket_name = bucket_info.bucket.name;
entry.bucket_id = bucket_info.bucket.bucket_id;
entry.old_num_shards = num_source_shards;
entry.new_num_shards = new_num_shards;
return reshard.add(dpp, entry, y);
}
int RGWRados::check_quota(const DoutPrefixProvider *dpp, const rgw_user& bucket_owner, rgw_bucket& bucket,
RGWQuota& quota,
uint64_t obj_size, optional_yield y,
bool check_size_only)
{
// if we only check size, then num_objs will set to 0
if(check_size_only)
return quota_handler->check_quota(dpp, bucket_owner, bucket, quota, 0, obj_size, y);
return quota_handler->check_quota(dpp, bucket_owner, bucket, quota, 1, obj_size, y);
}
int RGWRados::get_target_shard_id(const rgw::bucket_index_normal_layout& layout, const string& obj_key,
int *shard_id)
{
int r = 0;
switch (layout.hash_type) {
case rgw::BucketHashType::Mod:
if (!layout.num_shards) {
if (shard_id) {
*shard_id = -1;
}
} else {
uint32_t sid = svc.bi_rados->bucket_shard_index(obj_key, layout.num_shards);
if (shard_id) {
*shard_id = (int)sid;
}
}
break;
default:
r = -ENOTSUP;
}
return r;
}
uint64_t RGWRados::instance_id()
{
return get_rados_handle()->get_instance_id();
}
uint64_t RGWRados::next_bucket_id()
{
std::lock_guard l{bucket_id_lock};
return ++max_bucket_id;
}
librados::Rados* RGWRados::get_rados_handle()
{
return &rados;
}
int RGWRados::delete_raw_obj_aio(const DoutPrefixProvider *dpp, const rgw_raw_obj& obj, list<librados::AioCompletion *>& handles)
{
rgw_rados_ref ref;
int ret = get_raw_obj_ref(dpp, obj, &ref);
if (ret < 0) {
ldpp_dout(dpp, -1) << "ERROR: failed to get obj ref with ret=" << ret << dendl;
return ret;
}
ObjectWriteOperation op;
list<string> prefixes;
cls_rgw_remove_obj(op, prefixes);
AioCompletion *c = librados::Rados::aio_create_completion(nullptr, nullptr);
ret = ref.pool.ioctx().aio_operate(ref.obj.oid, c, &op);
if (ret < 0) {
ldpp_dout(dpp, -1) << "ERROR: AioOperate failed with ret=" << ret << dendl;
c->release();
return ret;
}
handles.push_back(c);
return 0;
}
int RGWRados::delete_obj_aio(const DoutPrefixProvider *dpp, const rgw_obj& obj,
RGWBucketInfo& bucket_info, RGWObjState *astate,
list<librados::AioCompletion *>& handles, bool keep_index_consistent,
optional_yield y)
{
rgw_rados_ref ref;
int ret = get_obj_head_ref(dpp, bucket_info, obj, &ref);
if (ret < 0) {
ldpp_dout(dpp, -1) << "ERROR: failed to get obj ref with ret=" << ret << dendl;
return ret;
}
if (keep_index_consistent) {
RGWRados::Bucket bop(this, bucket_info);
RGWRados::Bucket::UpdateIndex index_op(&bop, obj);
ret = index_op.prepare(dpp, CLS_RGW_OP_DEL, &astate->write_tag, y);
if (ret < 0) {
ldpp_dout(dpp, -1) << "ERROR: failed to prepare index op with ret=" << ret << dendl;
return ret;
}
}
ObjectWriteOperation op;
list<string> prefixes;
cls_rgw_remove_obj(op, prefixes);
AioCompletion *c = librados::Rados::aio_create_completion(nullptr, nullptr);
ret = ref.pool.ioctx().aio_operate(ref.obj.oid, c, &op);
if (ret < 0) {
ldpp_dout(dpp, -1) << "ERROR: AioOperate failed with ret=" << ret << dendl;
c->release();
return ret;
}
handles.push_back(c);
if (keep_index_consistent) {
ret = delete_obj_index(obj, astate->mtime, dpp, y);
if (ret < 0) {
ldpp_dout(dpp, -1) << "ERROR: failed to delete obj index with ret=" << ret << dendl;
return ret;
}
}
return ret;
}
void objexp_hint_entry::generate_test_instances(list<objexp_hint_entry*>& o)
{
auto it = new objexp_hint_entry;
it->tenant = "tenant1";
it->bucket_name = "bucket1";
it->bucket_id = "1234";
it->obj_key = rgw_obj_key("obj");
o.push_back(it);
o.push_back(new objexp_hint_entry);
}
void objexp_hint_entry::dump(Formatter *f) const
{
f->open_object_section("objexp_hint_entry");
encode_json("tenant", tenant, f);
encode_json("bucket_name", bucket_name, f);
encode_json("bucket_id", bucket_id, f);
encode_json("rgw_obj_key", obj_key, f);
utime_t ut(exp_time);
encode_json("exp_time", ut, f);
f->close_section();
}
void RGWOLHInfo::generate_test_instances(list<RGWOLHInfo*> &o)
{
RGWOLHInfo *olh = new RGWOLHInfo;
olh->removed = false;
o.push_back(olh);
o.push_back(new RGWOLHInfo);
}
void RGWOLHInfo::dump(Formatter *f) const
{
encode_json("target", target, f);
}
void RGWOLHPendingInfo::dump(Formatter *f) const
{
utime_t ut(time);
encode_json("time", ut, f);
}
| 316,224 | 31.251402 | 266 |
cc
|
null |
ceph-main/src/rgw/driver/rados/rgw_rados.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab ft=cpp
#pragma once
#include <iostream>
#include <functional>
#include <boost/container/flat_map.hpp>
#include <boost/container/flat_set.hpp>
#include "include/rados/librados.hpp"
#include "include/Context.h"
#include "include/random.h"
#include "common/RefCountedObj.h"
#include "common/ceph_time.h"
#include "common/Timer.h"
#include "rgw_common.h"
#include "cls/rgw/cls_rgw_types.h"
#include "cls/version/cls_version_types.h"
#include "cls/log/cls_log_types.h"
#include "cls/timeindex/cls_timeindex_types.h"
#include "cls/otp/cls_otp_types.h"
#include "rgw_quota.h"
#include "rgw_log.h"
#include "rgw_metadata.h"
#include "rgw_meta_sync_status.h"
#include "rgw_period_puller.h"
#include "rgw_obj_manifest.h"
#include "rgw_sync_module.h"
#include "rgw_trim_bilog.h"
#include "rgw_service.h"
#include "rgw_sal.h"
#include "rgw_aio.h"
#include "rgw_d3n_cacherequest.h"
#include "services/svc_rados.h"
#include "services/svc_bi_rados.h"
#include "common/Throttle.h"
#include "common/ceph_mutex.h"
#include "rgw_cache.h"
#include "rgw_sal_fwd.h"
#include "rgw_pubsub.h"
struct D3nDataCache;
class RGWWatcher;
class ACLOwner;
class RGWGC;
class RGWMetaNotifier;
class RGWDataNotifier;
class RGWLC;
class RGWObjectExpirer;
class RGWMetaSyncProcessorThread;
class RGWDataSyncProcessorThread;
class RGWSyncLogTrimThread;
class RGWSyncTraceManager;
struct RGWZoneGroup;
struct RGWZoneParams;
class RGWReshard;
class RGWReshardWait;
struct get_obj_data;
/* flags for put_obj_meta() */
#define PUT_OBJ_CREATE 0x01
#define PUT_OBJ_EXCL 0x02
#define PUT_OBJ_CREATE_EXCL (PUT_OBJ_CREATE | PUT_OBJ_EXCL)
static inline void prepend_bucket_marker(const rgw_bucket& bucket, const std::string& orig_oid, std::string& oid)
{
if (bucket.marker.empty() || orig_oid.empty()) {
oid = orig_oid;
} else {
oid = bucket.marker;
oid.append("_");
oid.append(orig_oid);
}
}
static inline void get_obj_bucket_and_oid_loc(const rgw_obj& obj, std::string& oid, std::string& locator)
{
const rgw_bucket& bucket = obj.bucket;
prepend_bucket_marker(bucket, obj.get_oid(), oid);
const std::string& loc = obj.key.get_loc();
if (!loc.empty()) {
prepend_bucket_marker(bucket, loc, locator);
} else {
locator.clear();
}
}
struct RGWOLHInfo {
rgw_obj target;
bool removed;
RGWOLHInfo() : removed(false) {}
void encode(bufferlist& bl) const {
ENCODE_START(1, 1, bl);
encode(target, bl);
encode(removed, bl);
ENCODE_FINISH(bl);
}
void decode(bufferlist::const_iterator& bl) {
DECODE_START(1, bl);
decode(target, bl);
decode(removed, bl);
DECODE_FINISH(bl);
}
static void generate_test_instances(std::list<RGWOLHInfo*>& o);
void dump(Formatter *f) const;
};
WRITE_CLASS_ENCODER(RGWOLHInfo)
struct RGWOLHPendingInfo {
ceph::real_time time;
RGWOLHPendingInfo() {}
void encode(bufferlist& bl) const {
ENCODE_START(1, 1, bl);
encode(time, bl);
ENCODE_FINISH(bl);
}
void decode(bufferlist::const_iterator& bl) {
DECODE_START(1, bl);
decode(time, bl);
DECODE_FINISH(bl);
}
void dump(Formatter *f) const;
};
WRITE_CLASS_ENCODER(RGWOLHPendingInfo)
struct RGWUsageBatch {
std::map<ceph::real_time, rgw_usage_log_entry> m;
void insert(ceph::real_time& t, rgw_usage_log_entry& entry, bool *account) {
bool exists = m.find(t) != m.end();
*account = !exists;
m[t].aggregate(entry);
}
};
struct RGWCloneRangeInfo {
rgw_obj src;
off_t src_ofs;
off_t dst_ofs;
uint64_t len;
};
class RGWFetchObjFilter {
public:
virtual ~RGWFetchObjFilter() {}
virtual int filter(CephContext *cct,
const rgw_obj_key& source_key,
const RGWBucketInfo& dest_bucket_info,
std::optional<rgw_placement_rule> dest_placement_rule,
const std::map<std::string, bufferlist>& obj_attrs,
std::optional<rgw_user> *poverride_owner,
const rgw_placement_rule **prule) = 0;
};
class RGWFetchObjFilter_Default : public RGWFetchObjFilter {
protected:
rgw_placement_rule dest_rule;
public:
RGWFetchObjFilter_Default() {}
int filter(CephContext *cct,
const rgw_obj_key& source_key,
const RGWBucketInfo& dest_bucket_info,
std::optional<rgw_placement_rule> dest_placement_rule,
const std::map<std::string, bufferlist>& obj_attrs,
std::optional<rgw_user> *poverride_owner,
const rgw_placement_rule **prule) override;
};
struct RGWObjStateManifest {
RGWObjState state;
std::optional<RGWObjManifest> manifest;
};
class RGWObjectCtx {
rgw::sal::Driver* driver;
ceph::shared_mutex lock = ceph::make_shared_mutex("RGWObjectCtx");
std::map<rgw_obj, RGWObjStateManifest> objs_state;
public:
explicit RGWObjectCtx(rgw::sal::Driver* _driver) : driver(_driver) {}
RGWObjectCtx(RGWObjectCtx& _o) {
std::unique_lock wl{lock};
this->driver = _o.driver;
this->objs_state = _o.objs_state;
}
rgw::sal::Driver* get_driver() {
return driver;
}
RGWObjStateManifest *get_state(const rgw_obj& obj);
void set_compressed(const rgw_obj& obj);
void set_atomic(const rgw_obj& obj);
void set_prefetch_data(const rgw_obj& obj);
void invalidate(const rgw_obj& obj);
};
struct RGWRawObjState {
rgw_raw_obj obj;
bool has_attrs{false};
bool exists{false};
uint64_t size{0};
ceph::real_time mtime;
uint64_t epoch{0};
bufferlist obj_tag;
bool has_data{false};
bufferlist data;
bool prefetch_data{false};
uint64_t pg_ver{0};
/* important! don't forget to update copy constructor */
RGWObjVersionTracker objv_tracker;
std::map<std::string, bufferlist> attrset;
RGWRawObjState() {}
RGWRawObjState(const RGWRawObjState& rhs) : obj (rhs.obj) {
has_attrs = rhs.has_attrs;
exists = rhs.exists;
size = rhs.size;
mtime = rhs.mtime;
epoch = rhs.epoch;
if (rhs.obj_tag.length()) {
obj_tag = rhs.obj_tag;
}
has_data = rhs.has_data;
if (rhs.data.length()) {
data = rhs.data;
}
prefetch_data = rhs.prefetch_data;
pg_ver = rhs.pg_ver;
objv_tracker = rhs.objv_tracker;
}
};
struct RGWPoolIterCtx {
librados::IoCtx io_ctx;
librados::NObjectIterator iter;
};
struct RGWListRawObjsCtx {
bool initialized;
RGWPoolIterCtx iter_ctx;
RGWListRawObjsCtx() : initialized(false) {}
};
struct objexp_hint_entry {
std::string tenant;
std::string bucket_name;
std::string bucket_id;
rgw_obj_key obj_key;
ceph::real_time exp_time;
void encode(bufferlist& bl) const {
ENCODE_START(2, 1, bl);
encode(bucket_name, bl);
encode(bucket_id, bl);
encode(obj_key, bl);
encode(exp_time, bl);
encode(tenant, bl);
ENCODE_FINISH(bl);
}
void decode(bufferlist::const_iterator& bl) {
// XXX Do we want DECODE_START_LEGACY_COMPAT_LEN(2, 1, 1, bl); ?
DECODE_START(2, bl);
decode(bucket_name, bl);
decode(bucket_id, bl);
decode(obj_key, bl);
decode(exp_time, bl);
if (struct_v >= 2) {
decode(tenant, bl);
} else {
tenant.clear();
}
DECODE_FINISH(bl);
}
void dump(Formatter *f) const;
static void generate_test_instances(std::list<objexp_hint_entry*>& o);
};
WRITE_CLASS_ENCODER(objexp_hint_entry)
class RGWMetaSyncStatusManager;
class RGWDataSyncStatusManager;
class RGWCoroutinesManagerRegistry;
class RGWGetDirHeader_CB;
class RGWGetUserHeader_CB;
namespace rgw { namespace sal {
class RadosStore;
class MPRadosSerializer;
class LCRadosSerializer;
} }
class RGWAsyncRadosProcessor;
template <class T>
class RGWChainedCacheImpl;
struct bucket_info_entry {
RGWBucketInfo info;
real_time mtime;
std::map<std::string, bufferlist> attrs;
};
struct pubsub_bucket_topics_entry {
rgw_pubsub_bucket_topics info;
RGWObjVersionTracker objv_tracker;
real_time mtime;
};
struct tombstone_entry;
template <class K, class V>
class lru_map;
using tombstone_cache_t = lru_map<rgw_obj, tombstone_entry>;
class RGWIndexCompletionManager;
class RGWRados
{
friend class RGWGC;
friend class RGWMetaNotifier;
friend class RGWDataNotifier;
friend class RGWObjectExpirer;
friend class RGWMetaSyncProcessorThread;
friend class RGWDataSyncProcessorThread;
friend class RGWReshard;
friend class RGWBucketReshard;
friend class RGWBucketReshardLock;
friend class BucketIndexLockGuard;
friend class rgw::sal::MPRadosSerializer;
friend class rgw::sal::LCRadosSerializer;
friend class rgw::sal::RadosStore;
/** Open the pool used as root for this gateway */
int open_root_pool_ctx(const DoutPrefixProvider *dpp);
int open_gc_pool_ctx(const DoutPrefixProvider *dpp);
int open_lc_pool_ctx(const DoutPrefixProvider *dpp);
int open_objexp_pool_ctx(const DoutPrefixProvider *dpp);
int open_reshard_pool_ctx(const DoutPrefixProvider *dpp);
int open_notif_pool_ctx(const DoutPrefixProvider *dpp);
int open_pool_ctx(const DoutPrefixProvider *dpp, const rgw_pool& pool, librados::IoCtx& io_ctx,
bool mostly_omap, bool bulk);
ceph::mutex lock{ceph::make_mutex("rados_timer_lock")};
SafeTimer* timer{nullptr};
rgw::sal::RadosStore* driver{nullptr};
RGWGC* gc{nullptr};
RGWLC* lc{nullptr};
RGWObjectExpirer* obj_expirer{nullptr};
bool use_gc_thread{false};
bool use_lc_thread{false};
bool quota_threads{false};
bool run_sync_thread{false};
bool run_reshard_thread{false};
bool run_notification_thread{false};
RGWMetaNotifier* meta_notifier{nullptr};
RGWDataNotifier* data_notifier{nullptr};
RGWMetaSyncProcessorThread* meta_sync_processor_thread{nullptr};
RGWSyncTraceManager* sync_tracer{nullptr};
std::map<rgw_zone_id, RGWDataSyncProcessorThread *> data_sync_processor_threads;
boost::optional<rgw::BucketTrimManager> bucket_trim;
RGWSyncLogTrimThread* sync_log_trimmer{nullptr};
ceph::mutex meta_sync_thread_lock{ceph::make_mutex("meta_sync_thread_lock")};
ceph::mutex data_sync_thread_lock{ceph::make_mutex("data_sync_thread_lock")};
librados::IoCtx root_pool_ctx; // .rgw
double inject_notify_timeout_probability{0.0};
unsigned max_notify_retries{0};
friend class RGWWatcher;
ceph::mutex bucket_id_lock{ceph::make_mutex("rados_bucket_id")};
// This field represents the number of bucket index object shards
uint32_t bucket_index_max_shards{0};
std::string get_cluster_fsid(const DoutPrefixProvider *dpp, optional_yield y);
int get_obj_head_ref(const DoutPrefixProvider *dpp, const rgw_placement_rule& target_placement_rule, const rgw_obj& obj, rgw_rados_ref *ref);
int get_obj_head_ref(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, const rgw_obj& obj, rgw_rados_ref *ref);
int get_system_obj_ref(const DoutPrefixProvider *dpp, const rgw_raw_obj& obj, rgw_rados_ref *ref);
uint64_t max_bucket_id{0};
int get_olh_target_state(const DoutPrefixProvider *dpp, RGWObjectCtx& rctx,
RGWBucketInfo& bucket_info, const rgw_obj& obj,
RGWObjState *olh_state, RGWObjState **target_state,
RGWObjManifest **target_manifest, optional_yield y);
int get_obj_state_impl(const DoutPrefixProvider *dpp, RGWObjectCtx *rctx, RGWBucketInfo& bucket_info, const rgw_obj& obj, RGWObjState **state, RGWObjManifest** manifest,
bool follow_olh, optional_yield y, bool assume_noent = false);
int append_atomic_test(const DoutPrefixProvider *dpp, RGWObjectCtx* rctx, RGWBucketInfo& bucket_info, const rgw_obj& obj,
librados::ObjectOperation& op, RGWObjState **state,
RGWObjManifest** pmanifest, optional_yield y);
int update_placement_map();
int store_bucket_info(RGWBucketInfo& info, std::map<std::string, bufferlist> *pattrs, RGWObjVersionTracker *objv_tracker, bool exclusive);
void remove_rgw_head_obj(librados::ObjectWriteOperation& op);
void cls_obj_check_prefix_exist(librados::ObjectOperation& op, const std::string& prefix, bool fail_if_exist);
void cls_obj_check_mtime(librados::ObjectOperation& op, const real_time& mtime, bool high_precision_time, RGWCheckMTimeType type);
protected:
CephContext* cct{nullptr};
librados::Rados rados;
using RGWChainedCacheImpl_bucket_info_entry = RGWChainedCacheImpl<bucket_info_entry>;
RGWChainedCacheImpl_bucket_info_entry* binfo_cache{nullptr};
tombstone_cache_t* obj_tombstone_cache{nullptr};
using RGWChainedCacheImpl_bucket_topics_entry = RGWChainedCacheImpl<pubsub_bucket_topics_entry>;
RGWChainedCacheImpl_bucket_topics_entry* topic_cache{nullptr};
librados::IoCtx gc_pool_ctx; // .rgw.gc
librados::IoCtx lc_pool_ctx; // .rgw.lc
librados::IoCtx objexp_pool_ctx;
librados::IoCtx reshard_pool_ctx;
librados::IoCtx notif_pool_ctx; // .rgw.notif
bool pools_initialized{false};
RGWQuotaHandler* quota_handler{nullptr};
RGWCoroutinesManagerRegistry* cr_registry{nullptr};
RGWSyncModuleInstanceRef sync_module;
bool writeable_zone{false};
RGWIndexCompletionManager *index_completion_manager{nullptr};
bool use_cache{false};
bool use_gc{true};
bool use_datacache{false};
int get_obj_head_ioctx(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, const rgw_obj& obj, librados::IoCtx *ioctx);
public:
RGWRados() = default;
RGWRados& set_use_cache(bool status) {
use_cache = status;
return *this;
}
RGWRados& set_use_gc(bool status) {
use_gc = status;
return *this;
}
RGWRados& set_use_datacache(bool status) {
use_datacache = status;
return *this;
}
bool get_use_datacache() {
return use_datacache;
}
RGWLC *get_lc() {
return lc;
}
RGWGC *get_gc() {
return gc;
}
RGWRados& set_run_gc_thread(bool _use_gc_thread) {
use_gc_thread = _use_gc_thread;
return *this;
}
RGWRados& set_run_lc_thread(bool _use_lc_thread) {
use_lc_thread = _use_lc_thread;
return *this;
}
RGWRados& set_run_quota_threads(bool _run_quota_threads) {
quota_threads = _run_quota_threads;
return *this;
}
RGWRados& set_run_sync_thread(bool _run_sync_thread) {
run_sync_thread = _run_sync_thread;
return *this;
}
RGWRados& set_run_reshard_thread(bool _run_reshard_thread) {
run_reshard_thread = _run_reshard_thread;
return *this;
}
RGWRados& set_run_notification_thread(bool _run_notification_thread) {
run_notification_thread = _run_notification_thread;
return *this;
}
librados::IoCtx* get_lc_pool_ctx() {
return &lc_pool_ctx;
}
librados::IoCtx& get_notif_pool_ctx() {
return notif_pool_ctx;
}
void set_context(CephContext *_cct) {
cct = _cct;
}
void set_store(rgw::sal::RadosStore* _driver) {
driver = _driver;
}
RGWServices svc;
RGWCtl ctl;
RGWCtl* const pctl{&ctl};
/**
* AmazonS3 errors contain a HostId string, but is an opaque base64 blob; we
* try to be more transparent. This has a wrapper so we can update it when zonegroup/zone are changed.
*/
std::string host_id;
RGWReshard* reshard{nullptr};
std::shared_ptr<RGWReshardWait> reshard_wait;
virtual ~RGWRados() = default;
tombstone_cache_t *get_tombstone_cache() {
return obj_tombstone_cache;
}
const RGWSyncModuleInstanceRef& get_sync_module() {
return sync_module;
}
RGWSyncTraceManager *get_sync_tracer() {
return sync_tracer;
}
int get_required_alignment(const DoutPrefixProvider *dpp, const rgw_pool& pool, uint64_t *alignment);
void get_max_aligned_size(uint64_t size, uint64_t alignment, uint64_t *max_size);
int get_max_chunk_size(const rgw_pool& pool, uint64_t *max_chunk_size, const DoutPrefixProvider *dpp, uint64_t *palignment = nullptr);
int get_max_chunk_size(const rgw_placement_rule& placement_rule, const rgw_obj& obj, uint64_t *max_chunk_size, const DoutPrefixProvider *dpp, uint64_t *palignment = nullptr);
uint32_t get_max_bucket_shards() {
return RGWSI_BucketIndex_RADOS::shards_max();
}
int get_raw_obj_ref(const DoutPrefixProvider *dpp, const rgw_raw_obj& obj, rgw_rados_ref *ref);
int list_raw_objects_init(const DoutPrefixProvider *dpp, const rgw_pool& pool, const std::string& marker, RGWListRawObjsCtx *ctx);
int list_raw_objects_next(const DoutPrefixProvider *dpp, const std::string& prefix_filter, int max,
RGWListRawObjsCtx& ctx, std::list<std::string>& oids,
bool *is_truncated);
int list_raw_objects(const DoutPrefixProvider *dpp, const rgw_pool& pool, const std::string& prefix_filter, int max,
RGWListRawObjsCtx& ctx, std::list<std::string>& oids,
bool *is_truncated);
std::string list_raw_objs_get_cursor(RGWListRawObjsCtx& ctx);
CephContext *ctx() { return cct; }
/** do all necessary setup of the storage device */
int init_begin(CephContext *_cct, const DoutPrefixProvider *dpp) {
set_context(_cct);
return init_begin(dpp);
}
/** Initialize the RADOS instance and prepare to do other ops */
int init_svc(bool raw, const DoutPrefixProvider *dpp);
int init_ctl(const DoutPrefixProvider *dpp);
virtual int init_rados();
int init_begin(const DoutPrefixProvider *dpp);
int init_complete(const DoutPrefixProvider *dpp, optional_yield y);
void finalize();
int register_to_service_map(const DoutPrefixProvider *dpp, const std::string& daemon_type, const std::map<std::string, std::string>& meta);
int update_service_map(const DoutPrefixProvider *dpp, std::map<std::string, std::string>&& status);
/// list logs
int log_list_init(const DoutPrefixProvider *dpp, const std::string& prefix, RGWAccessHandle *handle);
int log_list_next(RGWAccessHandle handle, std::string *name);
/// remove log
int log_remove(const DoutPrefixProvider *dpp, const std::string& name);
/// show log
int log_show_init(const DoutPrefixProvider *dpp, const std::string& name, RGWAccessHandle *handle);
int log_show_next(const DoutPrefixProvider *dpp, RGWAccessHandle handle, rgw_log_entry *entry);
// log bandwidth info
int log_usage(const DoutPrefixProvider *dpp, std::map<rgw_user_bucket, RGWUsageBatch>& usage_info, optional_yield y);
int read_usage(const DoutPrefixProvider *dpp, const rgw_user& user, const std::string& bucket_name, uint64_t start_epoch, uint64_t end_epoch,
uint32_t max_entries, bool *is_truncated, RGWUsageIter& read_iter, std::map<rgw_user_bucket,
rgw_usage_log_entry>& usage);
int trim_usage(const DoutPrefixProvider *dpp, const rgw_user& user, const std::string& bucket_name, uint64_t start_epoch, uint64_t end_epoch, optional_yield y);
int clear_usage(const DoutPrefixProvider *dpp, optional_yield y);
int create_pool(const DoutPrefixProvider *dpp, const rgw_pool& pool);
void create_bucket_id(std::string *bucket_id);
bool get_obj_data_pool(const rgw_placement_rule& placement_rule, const rgw_obj& obj, rgw_pool *pool);
bool obj_to_raw(const rgw_placement_rule& placement_rule, const rgw_obj& obj, rgw_raw_obj *raw_obj);
int create_bucket(const RGWUserInfo& owner, rgw_bucket& bucket,
const std::string& zonegroup_id,
const rgw_placement_rule& placement_rule,
const std::string& swift_ver_location,
const RGWQuotaInfo * pquota_info,
std::map<std::string,bufferlist>& attrs,
RGWBucketInfo& bucket_info,
obj_version *pobjv,
obj_version *pep_objv,
ceph::real_time creation_time,
rgw_bucket *master_bucket,
uint32_t *master_num_shards,
optional_yield y,
const DoutPrefixProvider *dpp,
bool exclusive = true);
RGWCoroutinesManagerRegistry *get_cr_registry() { return cr_registry; }
struct BucketShard {
RGWRados *store;
rgw_bucket bucket;
int shard_id;
RGWSI_RADOS::Obj bucket_obj;
explicit BucketShard(RGWRados *_store) : store(_store), shard_id(-1) {}
int init(const rgw_bucket& _bucket, const rgw_obj& obj,
RGWBucketInfo* out, const DoutPrefixProvider *dpp, optional_yield y);
int init(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, const rgw_obj& obj, optional_yield y);
int init(const DoutPrefixProvider *dpp,
const RGWBucketInfo& bucket_info,
const rgw::bucket_index_layout_generation& index, int sid, optional_yield y);
friend std::ostream& operator<<(std::ostream& out, const BucketShard& bs) {
out << "BucketShard:{ bucket=" << bs.bucket <<
", shard_id=" << bs.shard_id <<
", bucket_ojb=" << bs.bucket_obj << "}";
return out;
}
};
class Object {
RGWRados *store;
RGWBucketInfo bucket_info;
RGWObjectCtx& ctx;
rgw_obj obj;
BucketShard bs;
RGWObjState *state;
RGWObjManifest *manifest;
bool versioning_disabled;
bool bs_initialized;
const rgw_placement_rule *pmeta_placement_rule;
protected:
int get_state(const DoutPrefixProvider *dpp, RGWObjState **pstate, RGWObjManifest **pmanifest, bool follow_olh, optional_yield y, bool assume_noent = false);
void invalidate_state();
int prepare_atomic_modification(const DoutPrefixProvider *dpp, librados::ObjectWriteOperation& op, bool reset_obj, const std::string *ptag,
const char *ifmatch, const char *ifnomatch, bool removal_op, bool modify_tail, optional_yield y);
int complete_atomic_modification(const DoutPrefixProvider *dpp, optional_yield y);
public:
Object(RGWRados *_store, const RGWBucketInfo& _bucket_info, RGWObjectCtx& _ctx, const rgw_obj& _obj) : store(_store), bucket_info(_bucket_info),
ctx(_ctx), obj(_obj), bs(store),
state(NULL), manifest(nullptr), versioning_disabled(false),
bs_initialized(false),
pmeta_placement_rule(nullptr) {}
RGWRados *get_store() { return store; }
rgw_obj& get_obj() { return obj; }
RGWObjectCtx& get_ctx() { return ctx; }
RGWBucketInfo& get_bucket_info() { return bucket_info; }
//const std::string& get_instance() { return obj->get_instance(); }
//rgw::sal::Object* get_target() { return obj; }
int get_manifest(const DoutPrefixProvider *dpp, RGWObjManifest **pmanifest, optional_yield y);
int get_bucket_shard(BucketShard **pbs, const DoutPrefixProvider *dpp, optional_yield y) {
if (!bs_initialized) {
int r =
bs.init(bucket_info.bucket, obj, nullptr /* no RGWBucketInfo */, dpp, y);
if (r < 0) {
return r;
}
bs_initialized = true;
}
*pbs = &bs;
return 0;
}
void set_versioning_disabled(bool status) {
versioning_disabled = status;
}
bool versioning_enabled() {
return (!versioning_disabled && bucket_info.versioning_enabled());
}
void set_meta_placement_rule(const rgw_placement_rule *p) {
pmeta_placement_rule = p;
}
const rgw_placement_rule& get_meta_placement_rule() {
return pmeta_placement_rule ? *pmeta_placement_rule : bucket_info.placement_rule;
}
struct Read {
RGWRados::Object *source;
struct GetObjState {
std::map<rgw_pool, librados::IoCtx> io_ctxs;
rgw_pool cur_pool;
librados::IoCtx *cur_ioctx{nullptr};
rgw_obj obj;
rgw_raw_obj head_obj;
} state;
struct ConditionParams {
const ceph::real_time *mod_ptr;
const ceph::real_time *unmod_ptr;
bool high_precision_time;
uint32_t mod_zone_id;
uint64_t mod_pg_ver;
const char *if_match;
const char *if_nomatch;
ConditionParams() :
mod_ptr(NULL), unmod_ptr(NULL), high_precision_time(false), mod_zone_id(0), mod_pg_ver(0),
if_match(NULL), if_nomatch(NULL) {}
} conds;
struct Params {
ceph::real_time *lastmod;
uint64_t *obj_size;
std::map<std::string, bufferlist> *attrs;
rgw_obj *target_obj;
Params() : lastmod(nullptr), obj_size(nullptr), attrs(nullptr),
target_obj(nullptr) {}
} params;
explicit Read(RGWRados::Object *_source) : source(_source) {}
int prepare(optional_yield y, const DoutPrefixProvider *dpp);
static int range_to_ofs(uint64_t obj_size, int64_t &ofs, int64_t &end);
int read(int64_t ofs, int64_t end, bufferlist& bl, optional_yield y, const DoutPrefixProvider *dpp);
int iterate(const DoutPrefixProvider *dpp, int64_t ofs, int64_t end, RGWGetDataCB *cb, optional_yield y);
int get_attr(const DoutPrefixProvider *dpp, const char *name, bufferlist& dest, optional_yield y);
};
struct Write {
RGWRados::Object *target;
struct MetaParams {
ceph::real_time *mtime;
std::map<std::string, bufferlist>* rmattrs;
const bufferlist *data;
RGWObjManifest *manifest;
const std::string *ptag;
std::list<rgw_obj_index_key> *remove_objs;
ceph::real_time set_mtime;
rgw_user owner;
RGWObjCategory category;
int flags;
const char *if_match;
const char *if_nomatch;
std::optional<uint64_t> olh_epoch;
ceph::real_time delete_at;
bool canceled;
const std::string *user_data;
rgw_zone_set *zones_trace;
bool modify_tail;
bool completeMultipart;
bool appendable;
MetaParams() : mtime(NULL), rmattrs(NULL), data(NULL), manifest(NULL), ptag(NULL),
remove_objs(NULL), category(RGWObjCategory::Main), flags(0),
if_match(NULL), if_nomatch(NULL), canceled(false), user_data(nullptr), zones_trace(nullptr),
modify_tail(false), completeMultipart(false), appendable(false) {}
} meta;
explicit Write(RGWRados::Object *_target) : target(_target) {}
int _do_write_meta(const DoutPrefixProvider *dpp,
uint64_t size, uint64_t accounted_size,
std::map<std::string, bufferlist>& attrs,
bool modify_tail, bool assume_noent,
void *index_op, optional_yield y);
int write_meta(const DoutPrefixProvider *dpp, uint64_t size, uint64_t accounted_size,
std::map<std::string, bufferlist>& attrs, optional_yield y);
int write_data(const char *data, uint64_t ofs, uint64_t len, bool exclusive);
const req_state* get_req_state() {
return nullptr; /* XXX dang Only used by LTTng, and it handles null anyway */
}
};
struct Delete {
RGWRados::Object *target;
struct DeleteParams {
rgw_user bucket_owner;
int versioning_status; // versioning flags defined in enum RGWBucketFlags
ACLOwner obj_owner; // needed for creation of deletion marker
uint64_t olh_epoch;
std::string marker_version_id;
uint32_t bilog_flags;
std::list<rgw_obj_index_key> *remove_objs;
ceph::real_time expiration_time;
ceph::real_time unmod_since;
ceph::real_time mtime; /* for setting delete marker mtime */
bool high_precision_time;
rgw_zone_set *zones_trace;
bool abortmp;
uint64_t parts_accounted_size;
DeleteParams() : versioning_status(0), olh_epoch(0), bilog_flags(0), remove_objs(NULL), high_precision_time(false), zones_trace(nullptr), abortmp(false), parts_accounted_size(0) {}
} params;
struct DeleteResult {
bool delete_marker;
std::string version_id;
DeleteResult() : delete_marker(false) {}
} result;
explicit Delete(RGWRados::Object *_target) : target(_target) {}
int delete_obj(optional_yield y, const DoutPrefixProvider *dpp);
};
struct Stat {
RGWRados::Object *source;
struct Result {
rgw_obj obj;
std::optional<RGWObjManifest> manifest;
uint64_t size{0};
struct timespec mtime {};
std::map<std::string, bufferlist> attrs;
} result;
struct State {
librados::IoCtx io_ctx;
librados::AioCompletion *completion;
int ret;
State() : completion(NULL), ret(0) {}
} state;
explicit Stat(RGWRados::Object *_source) : source(_source) {}
int stat_async(const DoutPrefixProvider *dpp);
int wait(const DoutPrefixProvider *dpp);
int stat();
private:
int finish(const DoutPrefixProvider *dpp);
};
};
class Bucket {
RGWRados *store;
RGWBucketInfo bucket_info;
rgw_bucket& bucket;
int shard_id;
public:
Bucket(RGWRados *_store, const RGWBucketInfo& _bucket_info) : store(_store), bucket_info(_bucket_info), bucket(bucket_info.bucket),
shard_id(RGW_NO_SHARD) {}
RGWRados *get_store() { return store; }
rgw_bucket& get_bucket() { return bucket; }
RGWBucketInfo& get_bucket_info() { return bucket_info; }
int update_bucket_id(const std::string& new_bucket_id, const DoutPrefixProvider *dpp, optional_yield y);
int get_shard_id() { return shard_id; }
void set_shard_id(int id) {
shard_id = id;
}
class UpdateIndex {
RGWRados::Bucket *target;
std::string optag;
rgw_obj obj;
uint16_t bilog_flags{0};
BucketShard bs;
bool bs_initialized{false};
bool blind;
bool prepared{false};
rgw_zone_set *zones_trace{nullptr};
int init_bs(const DoutPrefixProvider *dpp, optional_yield y) {
int r =
bs.init(target->get_bucket(), obj, &target->bucket_info, dpp, y);
if (r < 0) {
return r;
}
bs_initialized = true;
return 0;
}
void invalidate_bs() {
bs_initialized = false;
}
int guard_reshard(const DoutPrefixProvider *dpp, const rgw_obj& obj_instance, BucketShard **pbs, std::function<int(BucketShard *)> call, optional_yield y);
public:
UpdateIndex(RGWRados::Bucket *_target, const rgw_obj& _obj) : target(_target), obj(_obj),
bs(target->get_store()) {
blind = (target->get_bucket_info().layout.current_index.layout.type == rgw::BucketIndexType::Indexless);
}
int get_bucket_shard(BucketShard **pbs, const DoutPrefixProvider *dpp, optional_yield y) {
if (!bs_initialized) {
int r = init_bs(dpp, y);
if (r < 0) {
return r;
}
}
*pbs = &bs;
return 0;
}
void set_bilog_flags(uint16_t flags) {
bilog_flags = flags;
}
void set_zones_trace(rgw_zone_set *_zones_trace) {
zones_trace = _zones_trace;
}
int prepare(const DoutPrefixProvider *dpp, RGWModifyOp, const std::string *write_tag, optional_yield y);
int complete(const DoutPrefixProvider *dpp, int64_t poolid, uint64_t epoch, uint64_t size,
uint64_t accounted_size, ceph::real_time& ut,
const std::string& etag, const std::string& content_type,
const std::string& storage_class,
bufferlist *acl_bl, RGWObjCategory category,
std::list<rgw_obj_index_key> *remove_objs,
optional_yield y,
const std::string *user_data = nullptr,
bool appendable = false);
int complete_del(const DoutPrefixProvider *dpp,
int64_t poolid, uint64_t epoch,
ceph::real_time& removed_mtime, /* mtime of removed object */
std::list<rgw_obj_index_key> *remove_objs,
optional_yield y);
int cancel(const DoutPrefixProvider *dpp,
std::list<rgw_obj_index_key> *remove_objs,
optional_yield y);
const std::string *get_optag() { return &optag; }
bool is_prepared() { return prepared; }
}; // class UpdateIndex
class List {
protected:
// absolute maximum number of objects that
// list_objects_(un)ordered can return
static constexpr int64_t bucket_list_objects_absolute_max = 25000;
RGWRados::Bucket *target;
rgw_obj_key next_marker;
int list_objects_ordered(const DoutPrefixProvider *dpp,
int64_t max,
std::vector<rgw_bucket_dir_entry> *result,
std::map<std::string, bool> *common_prefixes,
bool *is_truncated,
optional_yield y);
int list_objects_unordered(const DoutPrefixProvider *dpp,
int64_t max,
std::vector<rgw_bucket_dir_entry> *result,
std::map<std::string, bool> *common_prefixes,
bool *is_truncated,
optional_yield y);
public:
struct Params {
std::string prefix;
std::string delim;
rgw_obj_key marker;
rgw_obj_key end_marker;
std::string ns;
bool enforce_ns;
RGWAccessListFilter* access_list_filter;
RGWBucketListNameFilter force_check_filter;
bool list_versions;
bool allow_unordered;
Params() :
enforce_ns(true),
access_list_filter(nullptr),
list_versions(false),
allow_unordered(false)
{}
} params;
explicit List(RGWRados::Bucket *_target) : target(_target) {}
int list_objects(const DoutPrefixProvider *dpp, int64_t max,
std::vector<rgw_bucket_dir_entry> *result,
std::map<std::string, bool> *common_prefixes,
bool *is_truncated,
optional_yield y) {
if (params.allow_unordered) {
return list_objects_unordered(dpp, max, result, common_prefixes,
is_truncated, y);
} else {
return list_objects_ordered(dpp, max, result, common_prefixes,
is_truncated, y);
}
}
rgw_obj_key& get_next_marker() {
return next_marker;
}
}; // class List
}; // class Bucket
int on_last_entry_in_listing(const DoutPrefixProvider *dpp,
RGWBucketInfo& bucket_info,
const std::string& obj_prefix,
const std::string& obj_delim,
std::function<int(const rgw_bucket_dir_entry&)> handler, optional_yield y);
bool swift_versioning_enabled(const RGWBucketInfo& bucket_info) const;
int swift_versioning_copy(RGWObjectCtx& obj_ctx, /* in/out */
const rgw_user& user, /* in */
RGWBucketInfo& bucket_info, /* in */
const rgw_obj& obj, /* in */
const DoutPrefixProvider *dpp, /* in */
optional_yield y); /* in */
int swift_versioning_restore(RGWObjectCtx& obj_ctx, /* in/out */
const rgw_user& user, /* in */
RGWBucketInfo& bucket_info, /* in */
rgw_obj& obj, /* in/out */
bool& restored, /* out */
const DoutPrefixProvider *dpp, optional_yield y); /* in */
int copy_obj_to_remote_dest(const DoutPrefixProvider *dpp,
RGWObjState *astate,
std::map<std::string, bufferlist>& src_attrs,
RGWRados::Object::Read& read_op,
const rgw_user& user_id,
const rgw_obj& dest_obj,
ceph::real_time *mtime, optional_yield y);
enum AttrsMod {
ATTRSMOD_NONE = 0,
ATTRSMOD_REPLACE = 1,
ATTRSMOD_MERGE = 2
};
D3nDataCache* d3n_data_cache{nullptr};
int rewrite_obj(RGWBucketInfo& dest_bucket_info, const rgw_obj& obj, const DoutPrefixProvider *dpp, optional_yield y);
int reindex_obj(const RGWBucketInfo& dest_bucket_info,
const rgw_obj& obj,
const DoutPrefixProvider* dpp,
optional_yield y);
int stat_remote_obj(const DoutPrefixProvider *dpp,
RGWObjectCtx& obj_ctx,
const rgw_user& user_id,
req_info *info,
const rgw_zone_id& source_zone,
const rgw_obj& src_obj,
const RGWBucketInfo *src_bucket_info,
real_time *src_mtime,
uint64_t *psize,
const real_time *mod_ptr,
const real_time *unmod_ptr,
bool high_precision_time,
const char *if_match,
const char *if_nomatch,
std::map<std::string, bufferlist> *pattrs,
std::map<std::string, std::string> *pheaders,
std::string *version_id,
std::string *ptag,
std::string *petag, optional_yield y);
int fetch_remote_obj(RGWObjectCtx& obj_ctx,
const rgw_user& user_id,
req_info *info,
const rgw_zone_id& source_zone,
const rgw_obj& dest_obj,
const rgw_obj& src_obj,
RGWBucketInfo& dest_bucket_info,
RGWBucketInfo *src_bucket_info,
std::optional<rgw_placement_rule> dest_placement,
ceph::real_time *src_mtime,
ceph::real_time *mtime,
const ceph::real_time *mod_ptr,
const ceph::real_time *unmod_ptr,
bool high_precision_time,
const char *if_match,
const char *if_nomatch,
AttrsMod attrs_mod,
bool copy_if_newer,
rgw::sal::Attrs& attrs,
RGWObjCategory category,
std::optional<uint64_t> olh_epoch,
ceph::real_time delete_at,
std::string *ptag,
std::string *petag,
void (*progress_cb)(off_t, void *),
void *progress_data,
const DoutPrefixProvider *dpp,
RGWFetchObjFilter *filter, optional_yield y,
bool stat_follow_olh,
const rgw_obj& stat_dest_obj,
const rgw_zone_set_entry& source_trace_entry,
rgw_zone_set *zones_trace = nullptr,
std::optional<uint64_t>* bytes_transferred = 0);
/**
* Copy an object.
* dest_obj: the object to copy into
* src_obj: the object to copy from
* attrs: usage depends on attrs_mod parameter
* attrs_mod: the modification mode of the attrs, may have the following values:
* ATTRSMOD_NONE - the attributes of the source object will be
* copied without modifications, attrs parameter is ignored;
* ATTRSMOD_REPLACE - new object will have the attributes provided by attrs
* parameter, source object attributes are not copied;
* ATTRSMOD_MERGE - any conflicting meta keys on the source object's attributes
* are overwritten by values contained in attrs parameter.
* Returns: 0 on success, -ERR# otherwise.
*/
int copy_obj(RGWObjectCtx& obj_ctx,
const rgw_user& user_id,
req_info *info,
const rgw_zone_id& source_zone,
const rgw_obj& dest_obj,
const rgw_obj& src_obj,
RGWBucketInfo& dest_bucket_info,
RGWBucketInfo& src_bucket_info,
const rgw_placement_rule& dest_placement,
ceph::real_time *src_mtime,
ceph::real_time *mtime,
const ceph::real_time *mod_ptr,
const ceph::real_time *unmod_ptr,
bool high_precision_time,
const char *if_match,
const char *if_nomatch,
AttrsMod attrs_mod,
bool copy_if_newer,
std::map<std::string, bufferlist>& attrs,
RGWObjCategory category,
uint64_t olh_epoch,
ceph::real_time delete_at,
std::string *version_id,
std::string *ptag,
std::string *petag,
void (*progress_cb)(off_t, void *),
void *progress_data,
const DoutPrefixProvider *dpp,
optional_yield y);
int copy_obj_data(RGWObjectCtx& obj_ctx,
RGWBucketInfo& dest_bucket_info,
const rgw_placement_rule& dest_placement,
RGWRados::Object::Read& read_op, off_t end,
const rgw_obj& dest_obj,
ceph::real_time *mtime,
ceph::real_time set_mtime,
std::map<std::string, bufferlist>& attrs,
uint64_t olh_epoch,
ceph::real_time delete_at,
std::string *petag,
const DoutPrefixProvider *dpp,
optional_yield y);
int transition_obj(RGWObjectCtx& obj_ctx,
RGWBucketInfo& bucket_info,
const rgw_obj& obj,
const rgw_placement_rule& placement_rule,
const real_time& mtime,
uint64_t olh_epoch,
const DoutPrefixProvider *dpp,
optional_yield y);
int check_bucket_empty(const DoutPrefixProvider *dpp, RGWBucketInfo& bucket_info, optional_yield y);
/**
* Delete a bucket.
* bucket: the name of the bucket to delete
* Returns 0 on success, -ERR# otherwise.
*/
int delete_bucket(RGWBucketInfo& bucket_info, RGWObjVersionTracker& objv_tracker, optional_yield y, const DoutPrefixProvider *dpp, bool check_empty = true);
void wakeup_meta_sync_shards(std::set<int>& shard_ids);
void wakeup_data_sync_shards(const DoutPrefixProvider *dpp, const rgw_zone_id& source_zone, bc::flat_map<int, bc::flat_set<rgw_data_notify_entry> >& entries);
RGWMetaSyncStatusManager* get_meta_sync_manager();
RGWDataSyncStatusManager* get_data_sync_manager(const rgw_zone_id& source_zone);
int set_bucket_owner(rgw_bucket& bucket, ACLOwner& owner, const DoutPrefixProvider *dpp, optional_yield y);
int set_buckets_enabled(std::vector<rgw_bucket>& buckets, bool enabled, const DoutPrefixProvider *dpp, optional_yield y);
int bucket_suspended(const DoutPrefixProvider *dpp, rgw_bucket& bucket, bool *suspended, optional_yield y);
/** Delete an object.*/
int delete_obj(const DoutPrefixProvider *dpp,
RGWObjectCtx& obj_ctx,
const RGWBucketInfo& bucket_info,
const rgw_obj& obj,
int versioning_status, optional_yield y, // versioning flags defined in enum RGWBucketFlags
uint16_t bilog_flags = 0,
const ceph::real_time& expiration_time = ceph::real_time(),
rgw_zone_set *zones_trace = nullptr);
int delete_raw_obj(const DoutPrefixProvider *dpp, const rgw_raw_obj& obj, optional_yield y);
/** Remove an object from the bucket index */
int delete_obj_index(const rgw_obj& obj, ceph::real_time mtime,
const DoutPrefixProvider *dpp, optional_yield y);
/**
* Set an attr on an object.
* bucket: name of the bucket holding the object
* obj: name of the object to set the attr on
* name: the attr to set
* bl: the contents of the attr
* Returns: 0 on success, -ERR# otherwise.
*/
int set_attr(const DoutPrefixProvider *dpp, RGWObjectCtx* ctx, RGWBucketInfo& bucket_info, const rgw_obj& obj, const char *name, bufferlist& bl, optional_yield y);
int set_attrs(const DoutPrefixProvider *dpp, RGWObjectCtx* ctx, RGWBucketInfo& bucket_info, const rgw_obj& obj,
std::map<std::string, bufferlist>& attrs,
std::map<std::string, bufferlist>* rmattrs,
optional_yield y);
int get_obj_state(const DoutPrefixProvider *dpp, RGWObjectCtx *rctx, RGWBucketInfo& bucket_info, const rgw_obj& obj, RGWObjState **state, RGWObjManifest** manifest,
bool follow_olh, optional_yield y, bool assume_noent = false);
int get_obj_state(const DoutPrefixProvider *dpp, RGWObjectCtx *rctx, RGWBucketInfo& bucket_info, const rgw_obj& obj, RGWObjState **state, RGWObjManifest** manifest, optional_yield y) {
return get_obj_state(dpp, rctx, bucket_info, obj, state, manifest, true, y);
}
using iterate_obj_cb = int (*)(const DoutPrefixProvider*, const rgw_raw_obj&, off_t, off_t,
off_t, bool, RGWObjState*, void*);
int iterate_obj(const DoutPrefixProvider *dpp, RGWObjectCtx& ctx, RGWBucketInfo& bucket_info,
const rgw_obj& obj, off_t ofs, off_t end,
uint64_t max_chunk_size, iterate_obj_cb cb, void *arg,
optional_yield y);
int append_atomic_test(const DoutPrefixProvider *dpp, const RGWObjState* astate, librados::ObjectOperation& op);
virtual int get_obj_iterate_cb(const DoutPrefixProvider *dpp,
const rgw_raw_obj& read_obj, off_t obj_ofs,
off_t read_ofs, off_t len, bool is_head_obj,
RGWObjState *astate, void *arg);
/**
* a simple object read without keeping state
*/
int raw_obj_stat(const DoutPrefixProvider *dpp,
rgw_raw_obj& obj, uint64_t *psize, ceph::real_time *pmtime, uint64_t *epoch,
std::map<std::string, bufferlist> *attrs, bufferlist *first_chunk,
RGWObjVersionTracker *objv_tracker, optional_yield y);
int obj_operate(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, const rgw_obj& obj, librados::ObjectWriteOperation *op, optional_yield y);
int obj_operate(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, const rgw_obj& obj, librados::ObjectReadOperation *op, optional_yield y);
int guard_reshard(const DoutPrefixProvider *dpp,
BucketShard *bs,
const rgw_obj& obj_instance,
RGWBucketInfo& bucket_info,
std::function<int(BucketShard *)> call, optional_yield y);
int block_while_resharding(RGWRados::BucketShard *bs,
const rgw_obj& obj_instance,
RGWBucketInfo& bucket_info,
optional_yield y,
const DoutPrefixProvider *dpp);
void bucket_index_guard_olh_op(const DoutPrefixProvider *dpp, RGWObjState& olh_state, librados::ObjectOperation& op);
int olh_init_modification(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, RGWObjState& state, const rgw_obj& olh_obj, std::string *op_tag, optional_yield y);
int olh_init_modification_impl(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, RGWObjState& state, const rgw_obj& olh_obj, std::string *op_tag, optional_yield y);
int bucket_index_link_olh(const DoutPrefixProvider *dpp,
RGWBucketInfo& bucket_info, RGWObjState& olh_state,
const rgw_obj& obj_instance, bool delete_marker,
const std::string& op_tag, struct rgw_bucket_dir_entry_meta *meta,
uint64_t olh_epoch,
ceph::real_time unmod_since, bool high_precision_time,
optional_yield y,
rgw_zone_set *zones_trace = nullptr,
bool log_data_change = false);
int bucket_index_unlink_instance(const DoutPrefixProvider *dpp,
RGWBucketInfo& bucket_info,
const rgw_obj& obj_instance,
const std::string& op_tag, const std::string& olh_tag,
uint64_t olh_epoch, optional_yield y, rgw_zone_set *zones_trace = nullptr);
int bucket_index_read_olh_log(const DoutPrefixProvider *dpp,
RGWBucketInfo& bucket_info, RGWObjState& state,
const rgw_obj& obj_instance, uint64_t ver_marker,
std::map<uint64_t, std::vector<rgw_bucket_olh_log_entry> > *log, bool *is_truncated, optional_yield y);
int bucket_index_trim_olh_log(const DoutPrefixProvider *dpp, RGWBucketInfo& bucket_info, RGWObjState& obj_state, const rgw_obj& obj_instance, uint64_t ver, optional_yield y);
int bucket_index_clear_olh(const DoutPrefixProvider *dpp, RGWBucketInfo& bucket_info, RGWObjState& state, const rgw_obj& obj_instance, optional_yield y);
int apply_olh_log(const DoutPrefixProvider *dpp, RGWObjectCtx& obj_ctx, RGWObjState& obj_state, RGWBucketInfo& bucket_info, const rgw_obj& obj,
bufferlist& obj_tag, std::map<uint64_t, std::vector<rgw_bucket_olh_log_entry> >& log,
uint64_t *plast_ver, optional_yield y, rgw_zone_set *zones_trace = nullptr);
int update_olh(const DoutPrefixProvider *dpp, RGWObjectCtx& obj_ctx, RGWObjState *state, RGWBucketInfo& bucket_info, const rgw_obj& obj, optional_yield y, rgw_zone_set *zones_trace = nullptr);
int set_olh(const DoutPrefixProvider *dpp, RGWObjectCtx& obj_ctx, RGWBucketInfo& bucket_info, const rgw_obj& target_obj, bool delete_marker, rgw_bucket_dir_entry_meta *meta,
uint64_t olh_epoch, ceph::real_time unmod_since, bool high_precision_time,
optional_yield y, rgw_zone_set *zones_trace = nullptr, bool log_data_change = false);
int repair_olh(const DoutPrefixProvider *dpp, RGWObjState* state, const RGWBucketInfo& bucket_info,
const rgw_obj& obj, optional_yield y);
int unlink_obj_instance(const DoutPrefixProvider *dpp, RGWObjectCtx& obj_ctx, RGWBucketInfo& bucket_info, const rgw_obj& target_obj,
uint64_t olh_epoch, optional_yield y, rgw_zone_set *zones_trace = nullptr);
void check_pending_olh_entries(const DoutPrefixProvider *dpp, std::map<std::string, bufferlist>& pending_entries, std::map<std::string, bufferlist> *rm_pending_entries);
int remove_olh_pending_entries(const DoutPrefixProvider *dpp, RGWBucketInfo& bucket_info, RGWObjState& state, const rgw_obj& olh_obj, std::map<std::string, bufferlist>& pending_attrs, optional_yield y);
int follow_olh(const DoutPrefixProvider *dpp, RGWBucketInfo& bucket_info, RGWObjectCtx& ctx, RGWObjState *state, const rgw_obj& olh_obj, rgw_obj *target, optional_yield y);
int get_olh(const DoutPrefixProvider *dpp, RGWBucketInfo& bucket_info, const rgw_obj& obj, RGWOLHInfo *olh, optional_yield y);
void gen_rand_obj_instance_name(rgw_obj_key *target_key);
void gen_rand_obj_instance_name(rgw_obj *target);
int update_containers_stats(std::map<std::string, RGWBucketEnt>& m, const DoutPrefixProvider *dpp, optional_yield y);
int append_async(const DoutPrefixProvider *dpp, rgw_raw_obj& obj, size_t size, bufferlist& bl);
public:
void set_atomic(void *ctx, const rgw_obj& obj) {
RGWObjectCtx *rctx = static_cast<RGWObjectCtx *>(ctx);
rctx->set_atomic(obj);
}
void set_prefetch_data(void *ctx, const rgw_obj& obj) {
RGWObjectCtx *rctx = static_cast<RGWObjectCtx *>(ctx);
rctx->set_prefetch_data(obj);
}
void set_compressed(void *ctx, const rgw_obj& obj) {
RGWObjectCtx *rctx = static_cast<RGWObjectCtx *>(ctx);
rctx->set_compressed(obj);
}
int decode_policy(const DoutPrefixProvider *dpp, bufferlist& bl, ACLOwner *owner);
int get_bucket_stats(const DoutPrefixProvider *dpp, RGWBucketInfo& bucket_info, const rgw::bucket_index_layout_generation& idx_layout, int shard_id, std::string *bucket_ver, std::string *master_ver,
std::map<RGWObjCategory, RGWStorageStats>& stats, std::string *max_marker, bool* syncstopped = NULL);
int get_bucket_stats_async(const DoutPrefixProvider *dpp, RGWBucketInfo& bucket_info, const rgw::bucket_index_layout_generation& idx_layout, int shard_id, RGWGetBucketStats_CB *cb);
int put_bucket_instance_info(RGWBucketInfo& info, bool exclusive, ceph::real_time mtime, std::map<std::string, bufferlist> *pattrs, const DoutPrefixProvider *dpp, optional_yield y);
/* xxx dang obj_ctx -> svc */
int get_bucket_instance_info(const std::string& meta_key, RGWBucketInfo& info, ceph::real_time *pmtime, std::map<std::string, bufferlist> *pattrs, optional_yield y, const DoutPrefixProvider *dpp);
int get_bucket_instance_info(const rgw_bucket& bucket, RGWBucketInfo& info, ceph::real_time *pmtime, std::map<std::string, bufferlist> *pattrs, optional_yield y, const DoutPrefixProvider *dpp);
static void make_bucket_entry_name(const std::string& tenant_name, const std::string& bucket_name, std::string& bucket_entry);
int get_bucket_info(RGWServices *svc,
const std::string& tenant_name, const std::string& bucket_name,
RGWBucketInfo& info,
ceph::real_time *pmtime, optional_yield y,
const DoutPrefixProvider *dpp, std::map<std::string, bufferlist> *pattrs = NULL);
RGWChainedCacheImpl_bucket_topics_entry *get_topic_cache() { return topic_cache; }
// Returns 0 on successful refresh. Returns error code if there was
// an error or the version stored on the OSD is the same as that
// presented in the BucketInfo structure.
//
int try_refresh_bucket_info(RGWBucketInfo& info,
ceph::real_time *pmtime,
const DoutPrefixProvider *dpp, optional_yield y,
std::map<std::string, bufferlist> *pattrs = nullptr);
int put_linked_bucket_info(RGWBucketInfo& info, bool exclusive, ceph::real_time mtime, obj_version *pep_objv,
std::map<std::string, bufferlist> *pattrs, bool create_entry_point,
const DoutPrefixProvider *dpp, optional_yield y);
int cls_obj_prepare_op(const DoutPrefixProvider *dpp, BucketShard& bs, RGWModifyOp op, std::string& tag, rgw_obj& obj, uint16_t bilog_flags, optional_yield y, rgw_zone_set *zones_trace = nullptr);
int cls_obj_complete_op(BucketShard& bs, const rgw_obj& obj, RGWModifyOp op, std::string& tag, int64_t pool, uint64_t epoch,
rgw_bucket_dir_entry& ent, RGWObjCategory category, std::list<rgw_obj_index_key> *remove_objs, uint16_t bilog_flags, rgw_zone_set *zones_trace = nullptr);
int cls_obj_complete_add(BucketShard& bs, const rgw_obj& obj, std::string& tag, int64_t pool, uint64_t epoch, rgw_bucket_dir_entry& ent,
RGWObjCategory category, std::list<rgw_obj_index_key> *remove_objs, uint16_t bilog_flags, rgw_zone_set *zones_trace = nullptr);
int cls_obj_complete_del(BucketShard& bs, std::string& tag, int64_t pool, uint64_t epoch, rgw_obj& obj,
ceph::real_time& removed_mtime, std::list<rgw_obj_index_key> *remove_objs, uint16_t bilog_flags, rgw_zone_set *zones_trace = nullptr);
int cls_obj_complete_cancel(BucketShard& bs, std::string& tag, rgw_obj& obj,
std::list<rgw_obj_index_key> *remove_objs,
uint16_t bilog_flags, rgw_zone_set *zones_trace = nullptr);
int cls_obj_set_bucket_tag_timeout(const DoutPrefixProvider *dpp, RGWBucketInfo& bucket_info, uint64_t timeout);
using ent_map_t =
boost::container::flat_map<std::string, rgw_bucket_dir_entry>;
int cls_bucket_list_ordered(const DoutPrefixProvider *dpp,
RGWBucketInfo& bucket_info,
const rgw::bucket_index_layout_generation& idx_layout,
const int shard_id,
const rgw_obj_index_key& start_after,
const std::string& prefix,
const std::string& delimiter,
const uint32_t num_entries,
const bool list_versions,
const uint16_t exp_factor, // 0 means ignore
ent_map_t& m,
bool* is_truncated,
bool* cls_filtered,
rgw_obj_index_key *last_entry,
optional_yield y,
RGWBucketListNameFilter force_check_filter = {});
int cls_bucket_list_unordered(const DoutPrefixProvider *dpp,
RGWBucketInfo& bucket_info,
const rgw::bucket_index_layout_generation& idx_layout,
int shard_id,
const rgw_obj_index_key& start_after,
const std::string& prefix,
uint32_t num_entries,
bool list_versions,
std::vector<rgw_bucket_dir_entry>& ent_list,
bool *is_truncated,
rgw_obj_index_key *last_entry,
optional_yield y,
RGWBucketListNameFilter force_check_filter = {});
int cls_bucket_head(const DoutPrefixProvider *dpp,
const RGWBucketInfo& bucket_info,
const rgw::bucket_index_layout_generation& idx_layout,
int shard_id, std::vector<rgw_bucket_dir_header>& headers,
std::map<int, std::string> *bucket_instance_ids = NULL);
int cls_bucket_head_async(const DoutPrefixProvider *dpp,
const RGWBucketInfo& bucket_info,
const rgw::bucket_index_layout_generation& idx_layout,
int shard_id, RGWGetDirHeader_CB *ctx, int *num_aio);
int bi_get_instance(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, const rgw_obj& obj, rgw_bucket_dir_entry *dirent, optional_yield y);
int bi_get_olh(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, const rgw_obj& obj, rgw_bucket_olh_entry *olh, optional_yield y);
int bi_get(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, const rgw_obj& obj, BIIndexType index_type, rgw_cls_bi_entry *entry, optional_yield y);
void bi_put(librados::ObjectWriteOperation& op, BucketShard& bs, rgw_cls_bi_entry& entry, optional_yield y);
int bi_put(BucketShard& bs, rgw_cls_bi_entry& entry, optional_yield y);
int bi_put(const DoutPrefixProvider *dpp, rgw_bucket& bucket, rgw_obj& obj, rgw_cls_bi_entry& entry, optional_yield y);
int bi_list(const DoutPrefixProvider *dpp,
const RGWBucketInfo& bucket_info,
int shard_id,
const std::string& filter_obj,
const std::string& marker,
uint32_t max,
std::list<rgw_cls_bi_entry> *entries,
bool *is_truncated, optional_yield y);
int bi_list(BucketShard& bs, const std::string& filter_obj, const std::string& marker, uint32_t max, std::list<rgw_cls_bi_entry> *entries, bool *is_truncated, optional_yield y);
int bi_list(const DoutPrefixProvider *dpp, rgw_bucket& bucket, const std::string& obj_name, const std::string& marker, uint32_t max,
std::list<rgw_cls_bi_entry> *entries, bool *is_truncated, optional_yield y);
int bi_remove(const DoutPrefixProvider *dpp, BucketShard& bs);
int cls_obj_usage_log_add(const DoutPrefixProvider *dpp, const std::string& oid, rgw_usage_log_info& info, optional_yield y);
int cls_obj_usage_log_read(const DoutPrefixProvider *dpp, const std::string& oid, const std::string& user, const std::string& bucket, uint64_t start_epoch,
uint64_t end_epoch, uint32_t max_entries, std::string& read_iter,
std::map<rgw_user_bucket, rgw_usage_log_entry>& usage, bool *is_truncated);
int cls_obj_usage_log_trim(const DoutPrefixProvider *dpp, const std::string& oid, const std::string& user, const std::string& bucket, uint64_t start_epoch,
uint64_t end_epoch, optional_yield y);
int cls_obj_usage_log_clear(const DoutPrefixProvider *dpp, std::string& oid, optional_yield y);
int get_target_shard_id(const rgw::bucket_index_normal_layout& layout, const std::string& obj_key, int *shard_id);
int lock_exclusive(const rgw_pool& pool, const std::string& oid, ceph::timespan& duration, rgw_zone_id& zone_id, std::string& owner_id);
int unlock(const rgw_pool& pool, const std::string& oid, rgw_zone_id& zone_id, std::string& owner_id);
void update_gc_chain(const DoutPrefixProvider *dpp, rgw_obj head_obj, RGWObjManifest& manifest, cls_rgw_obj_chain *chain);
std::tuple<int, std::optional<cls_rgw_obj_chain>> send_chain_to_gc(cls_rgw_obj_chain& chain, const std::string& tag, optional_yield y);
void delete_objs_inline(const DoutPrefixProvider *dpp, cls_rgw_obj_chain& chain, const std::string& tag);
int gc_operate(const DoutPrefixProvider *dpp, std::string& oid, librados::ObjectWriteOperation *op, optional_yield y);
int gc_aio_operate(const std::string& oid, librados::AioCompletion *c,
librados::ObjectWriteOperation *op);
int gc_operate(const DoutPrefixProvider *dpp, std::string& oid, librados::ObjectReadOperation *op, bufferlist *pbl, optional_yield y);
int list_gc_objs(int *index, std::string& marker, uint32_t max, bool expired_only, std::list<cls_rgw_gc_obj_info>& result, bool *truncated, bool& processing_queue);
int process_gc(bool expired_only, optional_yield y);
bool process_expire_objects(const DoutPrefixProvider *dpp, optional_yield y);
int defer_gc(const DoutPrefixProvider *dpp, RGWObjectCtx* ctx, RGWBucketInfo& bucket_info, const rgw_obj& obj, optional_yield y);
int process_lc(const std::unique_ptr<rgw::sal::Bucket>& optional_bucket);
int list_lc_progress(std::string& marker, uint32_t max_entries,
std::vector<std::unique_ptr<rgw::sal::Lifecycle::LCEntry>>& progress_map,
int& index);
int bucket_check_index(const DoutPrefixProvider *dpp, RGWBucketInfo& bucket_info,
std::map<RGWObjCategory, RGWStorageStats> *existing_stats,
std::map<RGWObjCategory, RGWStorageStats> *calculated_stats);
int bucket_rebuild_index(const DoutPrefixProvider *dpp, RGWBucketInfo& bucket_info);
int bucket_set_reshard(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, const cls_rgw_bucket_instance_entry& entry);
int remove_objs_from_index(const DoutPrefixProvider *dpp,
RGWBucketInfo& bucket_info,
const std::list<rgw_obj_index_key>& oid_list);
int move_rados_obj(const DoutPrefixProvider *dpp,
librados::IoCtx& src_ioctx,
const std::string& src_oid, const std::string& src_locator,
librados::IoCtx& dst_ioctx,
const std::string& dst_oid, const std::string& dst_locator, optional_yield y);
int fix_head_obj_locator(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, bool copy_obj, bool remove_bad, rgw_obj_key& key, optional_yield y);
int fix_tail_obj_locator(const DoutPrefixProvider *dpp, RGWBucketInfo& bucket_info,
rgw_obj_key& key, bool fix, bool *need_fix, optional_yield y);
int check_quota(const DoutPrefixProvider *dpp, const rgw_user& bucket_owner, rgw_bucket& bucket,
RGWQuota& quota, uint64_t obj_size,
optional_yield y, bool check_size_only = false);
int check_bucket_shards(const RGWBucketInfo& bucket_info, const rgw_bucket& bucket,
uint64_t num_objs, const DoutPrefixProvider *dpp, optional_yield y);
int add_bucket_to_reshard(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, uint32_t new_num_shards, optional_yield y);
uint64_t instance_id();
librados::Rados* get_rados_handle();
int delete_raw_obj_aio(const DoutPrefixProvider *dpp, const rgw_raw_obj& obj, std::list<librados::AioCompletion *>& handles);
int delete_obj_aio(const DoutPrefixProvider *dpp, const rgw_obj& obj, RGWBucketInfo& info, RGWObjState *astate,
std::list<librados::AioCompletion *>& handles, bool keep_index_consistent,
optional_yield y);
private:
/**
* Check the actual on-disk state of the object specified
* by list_state, and fill in the time and size of object.
* Then append any changes to suggested_updates for
* the rgw class' dir_suggest_changes function.
*
* Note that this can maul list_state; don't use it afterwards. Also
* it expects object to already be filled in from list_state; it only
* sets the size and mtime.
*
* Returns 0 on success, -ENOENT if the object doesn't exist on disk,
* and -errno on other failures. (-ENOENT is not a failure, and it
* will encode that info as a suggested update.)
*/
int check_disk_state(const DoutPrefixProvider *dpp,
librados::IoCtx io_ctx,
RGWBucketInfo& bucket_info,
rgw_bucket_dir_entry& list_state,
rgw_bucket_dir_entry& object,
bufferlist& suggested_updates,
optional_yield y);
/**
* Init pool iteration
* pool: pool to use for the ctx initialization
* ctx: context object to use for the iteration
* Returns: 0 on success, -ERR# otherwise.
*/
int pool_iterate_begin(const DoutPrefixProvider *dpp, const rgw_pool& pool, RGWPoolIterCtx& ctx);
/**
* Init pool iteration
* pool: pool to use
* cursor: position to start iteration
* ctx: context object to use for the iteration
* Returns: 0 on success, -ERR# otherwise.
*/
int pool_iterate_begin(const DoutPrefixProvider *dpp, const rgw_pool& pool, const std::string& cursor, RGWPoolIterCtx& ctx);
/**
* Get pool iteration position
* ctx: context object to use for the iteration
* Returns: std::string representation of position
*/
std::string pool_iterate_get_cursor(RGWPoolIterCtx& ctx);
/**
* Iterate over pool return object names, use optional filter
* ctx: iteration context, initialized with pool_iterate_begin()
* num: max number of objects to return
* objs: a vector that the results will append into
* is_truncated: if not NULL, will hold true iff iteration is complete
* filter: if not NULL, will be used to filter returned objects
* Returns: 0 on success, -ERR# otherwise.
*/
int pool_iterate(const DoutPrefixProvider *dpp, RGWPoolIterCtx& ctx, uint32_t num,
std::vector<rgw_bucket_dir_entry>& objs,
bool *is_truncated, RGWAccessListFilter *filter);
uint64_t next_bucket_id();
/**
* This is broken out to facilitate unit testing.
*/
static uint32_t calc_ordered_bucket_list_per_shard(uint32_t num_entries,
uint32_t num_shards);
};
struct get_obj_data {
RGWRados* rgwrados;
RGWGetDataCB* client_cb = nullptr;
rgw::Aio* aio;
uint64_t offset; // next offset to write to client
rgw::AioResultList completed; // completed read results, sorted by offset
optional_yield yield;
get_obj_data(RGWRados* rgwrados, RGWGetDataCB* cb, rgw::Aio* aio,
uint64_t offset, optional_yield yield)
: rgwrados(rgwrados), client_cb(cb), aio(aio), offset(offset), yield(yield) {}
~get_obj_data() {
if (rgwrados->get_use_datacache()) {
const std::lock_guard l(d3n_get_data.d3n_lock);
}
}
D3nGetObjData d3n_get_data;
std::atomic_bool d3n_bypass_cache_write{false};
int flush(rgw::AioResultList&& results);
void cancel() {
// wait for all completions to drain and ignore the results
aio->drain();
}
int drain() {
auto c = aio->wait();
while (!c.empty()) {
int r = flush(std::move(c));
if (r < 0) {
cancel();
return r;
}
c = aio->wait();
}
return flush(std::move(c));
}
};
| 67,543 | 39.985437 | 204 |
h
|
null |
ceph-main/src/rgw/driver/rados/rgw_reshard.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab ft=cpp
#include <limits>
#include <sstream>
#include "rgw_zone.h"
#include "driver/rados/rgw_bucket.h"
#include "rgw_reshard.h"
#include "rgw_sal.h"
#include "rgw_sal_rados.h"
#include "cls/rgw/cls_rgw_client.h"
#include "cls/lock/cls_lock_client.h"
#include "common/errno.h"
#include "common/ceph_json.h"
#include "common/dout.h"
#include "services/svc_zone.h"
#include "services/svc_sys_obj.h"
#include "services/svc_tier_rados.h"
#include "services/svc_bilog_rados.h"
#define dout_context g_ceph_context
#define dout_subsys ceph_subsys_rgw
using namespace std;
const string reshard_oid_prefix = "reshard.";
const string reshard_lock_name = "reshard_process";
const string bucket_instance_lock_name = "bucket_instance_lock";
/* All primes up to 2000 used to attempt to make dynamic sharding use
* a prime numbers of shards. Note: this list also includes 1 for when
* 1 shard is the most appropriate, even though 1 is not prime.
*/
const std::initializer_list<uint16_t> RGWBucketReshard::reshard_primes = {
1, 2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61,
67, 71, 73, 79, 83, 89, 97, 101, 103, 107, 109, 113, 127, 131, 137,
139, 149, 151, 157, 163, 167, 173, 179, 181, 191, 193, 197, 199, 211,
223, 227, 229, 233, 239, 241, 251, 257, 263, 269, 271, 277, 281, 283,
293, 307, 311, 313, 317, 331, 337, 347, 349, 353, 359, 367, 373, 379,
383, 389, 397, 401, 409, 419, 421, 431, 433, 439, 443, 449, 457, 461,
463, 467, 479, 487, 491, 499, 503, 509, 521, 523, 541, 547, 557, 563,
569, 571, 577, 587, 593, 599, 601, 607, 613, 617, 619, 631, 641, 643,
647, 653, 659, 661, 673, 677, 683, 691, 701, 709, 719, 727, 733, 739,
743, 751, 757, 761, 769, 773, 787, 797, 809, 811, 821, 823, 827, 829,
839, 853, 857, 859, 863, 877, 881, 883, 887, 907, 911, 919, 929, 937,
941, 947, 953, 967, 971, 977, 983, 991, 997, 1009, 1013, 1019, 1021,
1031, 1033, 1039, 1049, 1051, 1061, 1063, 1069, 1087, 1091, 1093,
1097, 1103, 1109, 1117, 1123, 1129, 1151, 1153, 1163, 1171, 1181,
1187, 1193, 1201, 1213, 1217, 1223, 1229, 1231, 1237, 1249, 1259,
1277, 1279, 1283, 1289, 1291, 1297, 1301, 1303, 1307, 1319, 1321,
1327, 1361, 1367, 1373, 1381, 1399, 1409, 1423, 1427, 1429, 1433,
1439, 1447, 1451, 1453, 1459, 1471, 1481, 1483, 1487, 1489, 1493,
1499, 1511, 1523, 1531, 1543, 1549, 1553, 1559, 1567, 1571, 1579,
1583, 1597, 1601, 1607, 1609, 1613, 1619, 1621, 1627, 1637, 1657,
1663, 1667, 1669, 1693, 1697, 1699, 1709, 1721, 1723, 1733, 1741,
1747, 1753, 1759, 1777, 1783, 1787, 1789, 1801, 1811, 1823, 1831,
1847, 1861, 1867, 1871, 1873, 1877, 1879, 1889, 1901, 1907, 1913,
1931, 1933, 1949, 1951, 1973, 1979, 1987, 1993, 1997, 1999
};
class BucketReshardShard {
rgw::sal::RadosStore* store;
const RGWBucketInfo& bucket_info;
int shard_id;
RGWRados::BucketShard bs;
vector<rgw_cls_bi_entry> entries;
map<RGWObjCategory, rgw_bucket_category_stats> stats;
deque<librados::AioCompletion *>& aio_completions;
uint64_t max_aio_completions;
uint64_t reshard_shard_batch_size;
int wait_next_completion() {
librados::AioCompletion *c = aio_completions.front();
aio_completions.pop_front();
c->wait_for_complete();
int ret = c->get_return_value();
c->release();
if (ret < 0) {
derr << "ERROR: reshard rados operation failed: " << cpp_strerror(-ret) << dendl;
return ret;
}
return 0;
}
int get_completion(librados::AioCompletion **c) {
if (aio_completions.size() >= max_aio_completions) {
int ret = wait_next_completion();
if (ret < 0) {
return ret;
}
}
*c = librados::Rados::aio_create_completion(nullptr, nullptr);
aio_completions.push_back(*c);
return 0;
}
public:
BucketReshardShard(const DoutPrefixProvider *dpp,
rgw::sal::RadosStore *_store, const RGWBucketInfo& _bucket_info,
const rgw::bucket_index_layout_generation& index,
int shard_id, deque<librados::AioCompletion *>& _completions) :
store(_store), bucket_info(_bucket_info), shard_id(shard_id),
bs(store->getRados()), aio_completions(_completions)
{
bs.init(dpp, bucket_info, index, shard_id, null_yield);
max_aio_completions =
store->ctx()->_conf.get_val<uint64_t>("rgw_reshard_max_aio");
reshard_shard_batch_size =
store->ctx()->_conf.get_val<uint64_t>("rgw_reshard_batch_size");
}
int get_shard_id() const {
return shard_id;
}
int add_entry(rgw_cls_bi_entry& entry, bool account, RGWObjCategory category,
const rgw_bucket_category_stats& entry_stats) {
entries.push_back(entry);
if (account) {
rgw_bucket_category_stats& target = stats[category];
target.num_entries += entry_stats.num_entries;
target.total_size += entry_stats.total_size;
target.total_size_rounded += entry_stats.total_size_rounded;
target.actual_size += entry_stats.actual_size;
}
if (entries.size() >= reshard_shard_batch_size) {
int ret = flush();
if (ret < 0) {
return ret;
}
}
return 0;
}
int flush() {
if (entries.size() == 0) {
return 0;
}
librados::ObjectWriteOperation op;
for (auto& entry : entries) {
store->getRados()->bi_put(op, bs, entry, null_yield);
}
cls_rgw_bucket_update_stats(op, false, stats);
librados::AioCompletion *c;
int ret = get_completion(&c);
if (ret < 0) {
return ret;
}
ret = bs.bucket_obj.aio_operate(c, &op);
if (ret < 0) {
derr << "ERROR: failed to store entries in target bucket shard (bs=" << bs.bucket << "/" << bs.shard_id << ") error=" << cpp_strerror(-ret) << dendl;
return ret;
}
entries.clear();
stats.clear();
return 0;
}
int wait_all_aio() {
int ret = 0;
while (!aio_completions.empty()) {
int r = wait_next_completion();
if (r < 0) {
ret = r;
}
}
return ret;
}
}; // class BucketReshardShard
class BucketReshardManager {
rgw::sal::RadosStore *store;
deque<librados::AioCompletion *> completions;
vector<BucketReshardShard> target_shards;
public:
BucketReshardManager(const DoutPrefixProvider *dpp,
rgw::sal::RadosStore *_store,
const RGWBucketInfo& bucket_info,
const rgw::bucket_index_layout_generation& target)
: store(_store)
{
const uint32_t num_shards = rgw::num_shards(target.layout.normal);
target_shards.reserve(num_shards);
for (uint32_t i = 0; i < num_shards; ++i) {
target_shards.emplace_back(dpp, store, bucket_info, target, i, completions);
}
}
~BucketReshardManager() {
for (auto& shard : target_shards) {
int ret = shard.wait_all_aio();
if (ret < 0) {
ldout(store->ctx(), 20) << __func__ <<
": shard->wait_all_aio() returned ret=" << ret << dendl;
}
}
}
int add_entry(int shard_index,
rgw_cls_bi_entry& entry, bool account, RGWObjCategory category,
const rgw_bucket_category_stats& entry_stats) {
int ret = target_shards[shard_index].add_entry(entry, account, category,
entry_stats);
if (ret < 0) {
derr << "ERROR: target_shards.add_entry(" << entry.idx <<
") returned error: " << cpp_strerror(-ret) << dendl;
return ret;
}
return 0;
}
int finish() {
int ret = 0;
for (auto& shard : target_shards) {
int r = shard.flush();
if (r < 0) {
derr << "ERROR: target_shards[" << shard.get_shard_id() << "].flush() returned error: " << cpp_strerror(-r) << dendl;
ret = r;
}
}
for (auto& shard : target_shards) {
int r = shard.wait_all_aio();
if (r < 0) {
derr << "ERROR: target_shards[" << shard.get_shard_id() << "].wait_all_aio() returned error: " << cpp_strerror(-r) << dendl;
ret = r;
}
}
target_shards.clear();
return ret;
}
}; // class BucketReshardManager
RGWBucketReshard::RGWBucketReshard(rgw::sal::RadosStore* _store,
const RGWBucketInfo& _bucket_info,
const std::map<std::string, bufferlist>& _bucket_attrs,
RGWBucketReshardLock* _outer_reshard_lock) :
store(_store), bucket_info(_bucket_info), bucket_attrs(_bucket_attrs),
reshard_lock(store, bucket_info, true),
outer_reshard_lock(_outer_reshard_lock)
{ }
// sets reshard status of bucket index shards for the current index layout
static int set_resharding_status(const DoutPrefixProvider *dpp,
rgw::sal::RadosStore* store,
const RGWBucketInfo& bucket_info,
cls_rgw_reshard_status status)
{
cls_rgw_bucket_instance_entry instance_entry;
instance_entry.set_status(status);
int ret = store->getRados()->bucket_set_reshard(dpp, bucket_info, instance_entry);
if (ret < 0) {
ldpp_dout(dpp, 0) << "RGWReshard::" << __func__ << " ERROR: error setting bucket resharding flag on bucket index: "
<< cpp_strerror(-ret) << dendl;
return ret;
}
return 0;
}
static int remove_old_reshard_instance(rgw::sal::RadosStore* store,
const rgw_bucket& bucket,
const DoutPrefixProvider* dpp, optional_yield y)
{
RGWBucketInfo info;
int r = store->getRados()->get_bucket_instance_info(bucket, info, nullptr,
nullptr, y, dpp);
if (r < 0) {
return r;
}
// delete its shard objects (ignore errors)
store->svc()->bi->clean_index(dpp, info, info.layout.current_index);
// delete the bucket instance metadata
return store->ctl()->bucket->remove_bucket_instance_info(bucket, info, y, dpp);
}
// initialize the new bucket index shard objects
static int init_target_index(rgw::sal::RadosStore* store,
RGWBucketInfo& bucket_info,
const rgw::bucket_index_layout_generation& index,
const DoutPrefixProvider* dpp)
{
int ret = store->svc()->bi->init_index(dpp, bucket_info, index);
if (ret < 0) {
ldpp_dout(dpp, 0) << "ERROR: " << __func__ << " failed to initialize "
"target index shard objects: " << cpp_strerror(ret) << dendl;
return ret;
}
if (!bucket_info.datasync_flag_enabled()) {
// if bucket sync is disabled, disable it on each of the new shards too
auto log = rgw::log_layout_from_index(0, index);
ret = store->svc()->bilog_rados->log_stop(dpp, bucket_info, log, -1);
if (ret < 0) {
ldpp_dout(dpp, 0) << "ERROR: " << __func__ << " failed to disable "
"bucket sync on the target index shard objects: "
<< cpp_strerror(ret) << dendl;
store->svc()->bi->clean_index(dpp, bucket_info, index);
return ret;
}
}
return ret;
}
// initialize a target index layout, create its bucket index shard objects, and
// write the target layout to the bucket instance metadata
static int init_target_layout(rgw::sal::RadosStore* store,
RGWBucketInfo& bucket_info,
std::map<std::string, bufferlist>& bucket_attrs,
ReshardFaultInjector& fault,
uint32_t new_num_shards,
const DoutPrefixProvider* dpp, optional_yield y)
{
auto prev = bucket_info.layout; // make a copy for cleanup
const auto current = prev.current_index;
// initialize a new normal target index layout generation
rgw::bucket_index_layout_generation target;
target.layout.type = rgw::BucketIndexType::Normal;
target.layout.normal.num_shards = new_num_shards;
target.gen = current.gen + 1;
if (bucket_info.reshard_status == cls_rgw_reshard_status::IN_PROGRESS) {
// backward-compatible cleanup of old reshards, where the target was in a
// different bucket instance
if (!bucket_info.new_bucket_instance_id.empty()) {
rgw_bucket new_bucket = bucket_info.bucket;
new_bucket.bucket_id = bucket_info.new_bucket_instance_id;
ldout(store->ctx(), 10) << __func__ << " removing target bucket instance "
"from a previous reshard attempt" << dendl;
// ignore errors
remove_old_reshard_instance(store, new_bucket, dpp, y);
}
bucket_info.reshard_status = cls_rgw_reshard_status::NOT_RESHARDING;
}
if (bucket_info.layout.target_index) {
// a previous reshard failed or stalled, and its reshard lock dropped
ldpp_dout(dpp, 10) << __func__ << " removing existing target index "
"objects from a previous reshard attempt" << dendl;
// delete its existing shard objects (ignore errors)
store->svc()->bi->clean_index(dpp, bucket_info, *bucket_info.layout.target_index);
// don't reuse this same generation in the new target layout, in case
// something is still trying to operate on its shard objects
target.gen = bucket_info.layout.target_index->gen + 1;
}
// create the index shard objects
int ret = init_target_index(store, bucket_info, target, dpp);
if (ret < 0) {
return ret;
}
// retry in case of racing writes to the bucket instance metadata
static constexpr auto max_retries = 10;
int tries = 0;
do {
// update resharding state
bucket_info.layout.target_index = target;
bucket_info.layout.resharding = rgw::BucketReshardState::InProgress;
if (ret = fault.check("set_target_layout");
ret == 0) { // no fault injected, write the bucket instance metadata
ret = store->getRados()->put_bucket_instance_info(bucket_info, false,
real_time(), &bucket_attrs, dpp, y);
} else if (ret == -ECANCELED) {
fault.clear(); // clear the fault so a retry can succeed
}
if (ret == -ECANCELED) {
// racing write detected, read the latest bucket info and try again
int ret2 = store->getRados()->get_bucket_instance_info(
bucket_info.bucket, bucket_info,
nullptr, &bucket_attrs, y, dpp);
if (ret2 < 0) {
ldpp_dout(dpp, 0) << "ERROR: " << __func__ << " failed to read "
"bucket info: " << cpp_strerror(ret2) << dendl;
ret = ret2;
break;
}
// check that we're still in the reshard state we started in
if (bucket_info.layout.resharding != rgw::BucketReshardState::None ||
bucket_info.layout.current_index != current) {
ldpp_dout(dpp, 1) << "WARNING: " << __func__ << " raced with "
"another reshard" << dendl;
break;
}
prev = bucket_info.layout; // update the copy
}
++tries;
} while (ret == -ECANCELED && tries < max_retries);
if (ret < 0) {
ldpp_dout(dpp, 0) << "ERROR: " << __func__ << " failed to write "
"target index layout to bucket info: " << cpp_strerror(ret) << dendl;
bucket_info.layout = std::move(prev); // restore in-memory layout
// delete the target shard objects (ignore errors)
store->svc()->bi->clean_index(dpp, bucket_info, target);
return ret;
}
return 0;
} // init_target_layout
// delete the bucket index shards associated with the target layout and remove
// it from the bucket instance metadata
static int revert_target_layout(rgw::sal::RadosStore* store,
RGWBucketInfo& bucket_info,
std::map<std::string, bufferlist>& bucket_attrs,
ReshardFaultInjector& fault,
const DoutPrefixProvider* dpp, optional_yield y)
{
auto prev = bucket_info.layout; // make a copy for cleanup
// remove target index shard objects
int ret = store->svc()->bi->clean_index(dpp, bucket_info, *prev.target_index);
if (ret < 0) {
ldpp_dout(dpp, 1) << "WARNING: " << __func__ << " failed to remove "
"target index with: " << cpp_strerror(ret) << dendl;
ret = 0; // non-fatal error
}
// retry in case of racing writes to the bucket instance metadata
static constexpr auto max_retries = 10;
int tries = 0;
do {
// clear target_index and resharding state
bucket_info.layout.target_index = std::nullopt;
bucket_info.layout.resharding = rgw::BucketReshardState::None;
if (ret = fault.check("revert_target_layout");
ret == 0) { // no fault injected, revert the bucket instance metadata
ret = store->getRados()->put_bucket_instance_info(bucket_info, false,
real_time(),
&bucket_attrs, dpp, y);
} else if (ret == -ECANCELED) {
fault.clear(); // clear the fault so a retry can succeed
}
if (ret == -ECANCELED) {
// racing write detected, read the latest bucket info and try again
int ret2 = store->getRados()->get_bucket_instance_info(
bucket_info.bucket, bucket_info,
nullptr, &bucket_attrs, y, dpp);
if (ret2 < 0) {
ldpp_dout(dpp, 0) << "ERROR: " << __func__ << " failed to read "
"bucket info: " << cpp_strerror(ret2) << dendl;
ret = ret2;
break;
}
// check that we're still in the reshard state we started in
if (bucket_info.layout.resharding == rgw::BucketReshardState::None) {
ldpp_dout(dpp, 1) << "WARNING: " << __func__ << " raced with "
"reshard cancel" << dendl;
return -ECANCELED;
}
if (bucket_info.layout.current_index != prev.current_index ||
bucket_info.layout.target_index != prev.target_index) {
ldpp_dout(dpp, 1) << "WARNING: " << __func__ << " raced with "
"another reshard" << dendl;
return -ECANCELED;
}
prev = bucket_info.layout; // update the copy
}
++tries;
} while (ret == -ECANCELED && tries < max_retries);
if (ret < 0) {
ldpp_dout(dpp, 0) << "ERROR: " << __func__ << " failed to clear "
"target index layout in bucket info: " << cpp_strerror(ret) << dendl;
bucket_info.layout = std::move(prev); // restore in-memory layout
return ret;
}
return 0;
} // remove_target_layout
static int init_reshard(rgw::sal::RadosStore* store,
RGWBucketInfo& bucket_info,
std::map<std::string, bufferlist>& bucket_attrs,
ReshardFaultInjector& fault,
uint32_t new_num_shards,
const DoutPrefixProvider *dpp, optional_yield y)
{
if (new_num_shards == 0) {
ldpp_dout(dpp, 0) << "ERROR: " << __func__ << " got invalid new_num_shards=0" << dendl;
return -EINVAL;
}
int ret = init_target_layout(store, bucket_info, bucket_attrs, fault, new_num_shards, dpp, y);
if (ret < 0) {
return ret;
}
if (ret = fault.check("block_writes");
ret == 0) { // no fault injected, block writes to the current index shards
ret = set_resharding_status(dpp, store, bucket_info,
cls_rgw_reshard_status::IN_PROGRESS);
}
if (ret < 0) {
ldpp_dout(dpp, 0) << "ERROR: " << __func__ << " failed to pause "
"writes to the current index: " << cpp_strerror(ret) << dendl;
// clean up the target layout (ignore errors)
revert_target_layout(store, bucket_info, bucket_attrs, fault, dpp, y);
return ret;
}
return 0;
} // init_reshard
static int cancel_reshard(rgw::sal::RadosStore* store,
RGWBucketInfo& bucket_info,
std::map<std::string, bufferlist>& bucket_attrs,
ReshardFaultInjector& fault,
const DoutPrefixProvider *dpp, optional_yield y)
{
// unblock writes to the current index shard objects
int ret = set_resharding_status(dpp, store, bucket_info,
cls_rgw_reshard_status::NOT_RESHARDING);
if (ret < 0) {
ldpp_dout(dpp, 1) << "WARNING: " << __func__ << " failed to unblock "
"writes to current index objects: " << cpp_strerror(ret) << dendl;
ret = 0; // non-fatal error
}
if (bucket_info.layout.target_index) {
return revert_target_layout(store, bucket_info, bucket_attrs, fault, dpp, y);
}
// there is nothing to revert
return 0;
} // cancel_reshard
static int commit_target_layout(rgw::sal::RadosStore* store,
RGWBucketInfo& bucket_info,
std::map<std::string, bufferlist>& bucket_attrs,
ReshardFaultInjector& fault,
const DoutPrefixProvider *dpp, optional_yield y)
{
auto& layout = bucket_info.layout;
const auto next_log_gen = layout.logs.empty() ? 1 :
layout.logs.back().gen + 1;
if (!store->svc()->zone->need_to_log_data()) {
// if we're not syncing data, we can drop any existing logs
layout.logs.clear();
}
// use the new index layout as current
ceph_assert(layout.target_index);
layout.current_index = std::move(*layout.target_index);
layout.target_index = std::nullopt;
layout.resharding = rgw::BucketReshardState::None;
// add the in-index log layout
layout.logs.push_back(log_layout_from_index(next_log_gen, layout.current_index));
int ret = fault.check("commit_target_layout");
if (ret == 0) { // no fault injected, write the bucket instance metadata
ret = store->getRados()->put_bucket_instance_info(
bucket_info, false, real_time(), &bucket_attrs, dpp, y);
} else if (ret == -ECANCELED) {
fault.clear(); // clear the fault so a retry can succeed
}
return ret;
} // commit_target_layout
static int commit_reshard(rgw::sal::RadosStore* store,
RGWBucketInfo& bucket_info,
std::map<std::string, bufferlist>& bucket_attrs,
ReshardFaultInjector& fault,
const DoutPrefixProvider *dpp, optional_yield y)
{
auto prev = bucket_info.layout; // make a copy for cleanup
// retry in case of racing writes to the bucket instance metadata
static constexpr auto max_retries = 10;
int tries = 0;
int ret = 0;
do {
ret = commit_target_layout(store, bucket_info, bucket_attrs, fault, dpp, y);
if (ret == -ECANCELED) {
// racing write detected, read the latest bucket info and try again
int ret2 = store->getRados()->get_bucket_instance_info(
bucket_info.bucket, bucket_info,
nullptr, &bucket_attrs, y, dpp);
if (ret2 < 0) {
ldpp_dout(dpp, 0) << "ERROR: " << __func__ << " failed to read "
"bucket info: " << cpp_strerror(ret2) << dendl;
ret = ret2;
break;
}
// check that we're still in the reshard state we started in
if (bucket_info.layout.resharding != rgw::BucketReshardState::InProgress) {
ldpp_dout(dpp, 1) << "WARNING: " << __func__ << " raced with "
"reshard cancel" << dendl;
return -ECANCELED; // whatever canceled us already did the cleanup
}
if (bucket_info.layout.current_index != prev.current_index ||
bucket_info.layout.target_index != prev.target_index) {
ldpp_dout(dpp, 1) << "WARNING: " << __func__ << " raced with "
"another reshard" << dendl;
return -ECANCELED; // whatever canceled us already did the cleanup
}
prev = bucket_info.layout; // update the copy
}
++tries;
} while (ret == -ECANCELED && tries < max_retries);
if (ret < 0) {
ldpp_dout(dpp, 0) << "ERROR: " << __func__ << " failed to commit "
"target index layout: " << cpp_strerror(ret) << dendl;
bucket_info.layout = std::move(prev); // restore in-memory layout
// unblock writes to the current index shard objects
int ret2 = set_resharding_status(dpp, store, bucket_info,
cls_rgw_reshard_status::NOT_RESHARDING);
if (ret2 < 0) {
ldpp_dout(dpp, 1) << "WARNING: " << __func__ << " failed to unblock "
"writes to current index objects: " << cpp_strerror(ret2) << dendl;
// non-fatal error
}
return ret;
}
if (store->svc()->zone->need_to_log_data() && !prev.logs.empty() &&
prev.current_index.layout.type == rgw::BucketIndexType::Normal) {
// write a datalog entry for each shard of the previous index. triggering
// sync on the old shards will force them to detect the end-of-log for that
// generation, and eventually transition to the next
// TODO: use a log layout to support types other than BucketLogType::InIndex
for (uint32_t shard_id = 0; shard_id < rgw::num_shards(prev.current_index.layout.normal); ++shard_id) {
// This null_yield can stay, for now, since we're in our own thread
ret = store->svc()->datalog_rados->add_entry(dpp, bucket_info, prev.logs.back(), shard_id,
null_yield);
if (ret < 0) {
ldpp_dout(dpp, 1) << "WARNING: failed writing data log (bucket_info.bucket="
<< bucket_info.bucket << ", shard_id=" << shard_id << "of generation="
<< prev.logs.back().gen << ")" << dendl;
} // datalog error is not fatal
}
}
// check whether the old index objects are still needed for bilogs
const auto& logs = bucket_info.layout.logs;
auto log = std::find_if(logs.begin(), logs.end(),
[&prev] (const rgw::bucket_log_layout_generation& log) {
return log.layout.type == rgw::BucketLogType::InIndex
&& log.layout.in_index.gen == prev.current_index.gen;
});
if (log == logs.end()) {
// delete the index objects (ignore errors)
store->svc()->bi->clean_index(dpp, bucket_info, prev.current_index);
}
return 0;
} // commit_reshard
int RGWBucketReshard::clear_resharding(rgw::sal::RadosStore* store,
RGWBucketInfo& bucket_info,
std::map<std::string, bufferlist>& bucket_attrs,
const DoutPrefixProvider* dpp, optional_yield y)
{
ReshardFaultInjector no_fault;
return cancel_reshard(store, bucket_info, bucket_attrs, no_fault, dpp, y);
}
int RGWBucketReshard::cancel(const DoutPrefixProvider* dpp, optional_yield y)
{
int ret = reshard_lock.lock(dpp);
if (ret < 0) {
return ret;
}
if (bucket_info.layout.resharding != rgw::BucketReshardState::InProgress) {
ldpp_dout(dpp, -1) << "ERROR: bucket is not resharding" << dendl;
ret = -EINVAL;
} else {
ret = clear_resharding(store, bucket_info, bucket_attrs, dpp, y);
}
reshard_lock.unlock();
return ret;
}
RGWBucketReshardLock::RGWBucketReshardLock(rgw::sal::RadosStore* _store,
const std::string& reshard_lock_oid,
bool _ephemeral) :
store(_store),
lock_oid(reshard_lock_oid),
ephemeral(_ephemeral),
internal_lock(reshard_lock_name)
{
const int lock_dur_secs = store->ctx()->_conf.get_val<uint64_t>(
"rgw_reshard_bucket_lock_duration");
duration = std::chrono::seconds(lock_dur_secs);
#define COOKIE_LEN 16
char cookie_buf[COOKIE_LEN + 1];
gen_rand_alphanumeric(store->ctx(), cookie_buf, sizeof(cookie_buf) - 1);
cookie_buf[COOKIE_LEN] = '\0';
internal_lock.set_cookie(cookie_buf);
internal_lock.set_duration(duration);
}
int RGWBucketReshardLock::lock(const DoutPrefixProvider *dpp) {
internal_lock.set_must_renew(false);
int ret;
if (ephemeral) {
ret = internal_lock.lock_exclusive_ephemeral(&store->getRados()->reshard_pool_ctx,
lock_oid);
} else {
ret = internal_lock.lock_exclusive(&store->getRados()->reshard_pool_ctx, lock_oid);
}
if (ret == -EBUSY) {
ldout(store->ctx(), 0) << "INFO: RGWReshardLock::" << __func__ <<
" found lock on " << lock_oid <<
" to be held by another RGW process; skipping for now" << dendl;
return ret;
} else if (ret < 0) {
ldpp_dout(dpp, -1) << "ERROR: RGWReshardLock::" << __func__ <<
" failed to acquire lock on " << lock_oid << ": " <<
cpp_strerror(-ret) << dendl;
return ret;
}
reset_time(Clock::now());
return 0;
}
void RGWBucketReshardLock::unlock() {
int ret = internal_lock.unlock(&store->getRados()->reshard_pool_ctx, lock_oid);
if (ret < 0) {
ldout(store->ctx(), 0) << "WARNING: RGWBucketReshardLock::" << __func__ <<
" failed to drop lock on " << lock_oid << " ret=" << ret << dendl;
}
}
int RGWBucketReshardLock::renew(const Clock::time_point& now) {
internal_lock.set_must_renew(true);
int ret;
if (ephemeral) {
ret = internal_lock.lock_exclusive_ephemeral(&store->getRados()->reshard_pool_ctx,
lock_oid);
} else {
ret = internal_lock.lock_exclusive(&store->getRados()->reshard_pool_ctx, lock_oid);
}
if (ret < 0) { /* expired or already locked by another processor */
std::stringstream error_s;
if (-ENOENT == ret) {
error_s << "ENOENT (lock expired or never initially locked)";
} else {
error_s << ret << " (" << cpp_strerror(-ret) << ")";
}
ldout(store->ctx(), 5) << __func__ << "(): failed to renew lock on " <<
lock_oid << " with error " << error_s.str() << dendl;
return ret;
}
internal_lock.set_must_renew(false);
reset_time(now);
ldout(store->ctx(), 20) << __func__ << "(): successfully renewed lock on " <<
lock_oid << dendl;
return 0;
}
int RGWBucketReshard::do_reshard(const rgw::bucket_index_layout_generation& current,
const rgw::bucket_index_layout_generation& target,
int max_entries,
bool verbose,
ostream *out,
Formatter *formatter,
const DoutPrefixProvider *dpp, optional_yield y)
{
if (out) {
(*out) << "tenant: " << bucket_info.bucket.tenant << std::endl;
(*out) << "bucket name: " << bucket_info.bucket.name << std::endl;
}
/* update bucket info -- in progress*/
list<rgw_cls_bi_entry> entries;
if (max_entries < 0) {
ldpp_dout(dpp, 0) << __func__ <<
": can't reshard, negative max_entries" << dendl;
return -EINVAL;
}
BucketReshardManager target_shards_mgr(dpp, store, bucket_info, target);
bool verbose_json_out = verbose && (formatter != nullptr) && (out != nullptr);
if (verbose_json_out) {
formatter->open_array_section("entries");
}
uint64_t total_entries = 0;
if (!verbose_json_out && out) {
(*out) << "total entries:";
}
const uint32_t num_source_shards = rgw::num_shards(current.layout.normal);
string marker;
for (uint32_t i = 0; i < num_source_shards; ++i) {
bool is_truncated = true;
marker.clear();
const std::string null_object_filter; // empty string since we're not filtering by object
while (is_truncated) {
entries.clear();
int ret = store->getRados()->bi_list(dpp, bucket_info, i, null_object_filter, marker, max_entries, &entries, &is_truncated, y);
if (ret == -ENOENT) {
ldpp_dout(dpp, 1) << "WARNING: " << __func__ << " failed to find shard "
<< i << ", skipping" << dendl;
// break out of the is_truncated loop and move on to the next shard
break;
} else if (ret < 0) {
derr << "ERROR: bi_list(): " << cpp_strerror(-ret) << dendl;
return ret;
}
for (auto iter = entries.begin(); iter != entries.end(); ++iter) {
rgw_cls_bi_entry& entry = *iter;
if (verbose_json_out) {
formatter->open_object_section("entry");
encode_json("shard_id", i, formatter);
encode_json("num_entry", total_entries, formatter);
encode_json("entry", entry, formatter);
}
total_entries++;
marker = entry.idx;
int target_shard_id;
cls_rgw_obj_key cls_key;
RGWObjCategory category;
rgw_bucket_category_stats stats;
bool account = entry.get_info(&cls_key, &category, &stats);
rgw_obj_key key(cls_key);
if (entry.type == BIIndexType::OLH && key.empty()) {
// bogus entry created by https://tracker.ceph.com/issues/46456
// to fix, skip so it doesn't get include in the new bucket instance
total_entries--;
ldpp_dout(dpp, 10) << "Dropping entry with empty name, idx=" << marker << dendl;
continue;
}
rgw_obj obj(bucket_info.bucket, key);
RGWMPObj mp;
if (key.ns == RGW_OBJ_NS_MULTIPART && mp.from_meta(key.name)) {
// place the multipart .meta object on the same shard as its head object
obj.index_hash_source = mp.get_key();
}
ret = store->getRados()->get_target_shard_id(bucket_info.layout.target_index->layout.normal,
obj.get_hash_object(), &target_shard_id);
if (ret < 0) {
ldpp_dout(dpp, -1) << "ERROR: get_target_shard_id() returned ret=" << ret << dendl;
return ret;
}
int shard_index = (target_shard_id > 0 ? target_shard_id : 0);
ret = target_shards_mgr.add_entry(shard_index, entry, account,
category, stats);
if (ret < 0) {
return ret;
}
Clock::time_point now = Clock::now();
if (reshard_lock.should_renew(now)) {
// assume outer locks have timespans at least the size of ours, so
// can call inside conditional
if (outer_reshard_lock) {
ret = outer_reshard_lock->renew(now);
if (ret < 0) {
return ret;
}
}
ret = reshard_lock.renew(now);
if (ret < 0) {
ldpp_dout(dpp, -1) << "Error renewing bucket lock: " << ret << dendl;
return ret;
}
}
if (verbose_json_out) {
formatter->close_section();
formatter->flush(*out);
} else if (out && !(total_entries % 1000)) {
(*out) << " " << total_entries;
}
} // entries loop
}
}
if (verbose_json_out) {
formatter->close_section();
formatter->flush(*out);
} else if (out) {
(*out) << " " << total_entries << std::endl;
}
int ret = target_shards_mgr.finish();
if (ret < 0) {
ldpp_dout(dpp, -1) << "ERROR: failed to reshard" << dendl;
return -EIO;
}
return 0;
} // RGWBucketReshard::do_reshard
int RGWBucketReshard::get_status(const DoutPrefixProvider *dpp, list<cls_rgw_bucket_instance_entry> *status)
{
return store->svc()->bi_rados->get_reshard_status(dpp, bucket_info, status);
}
int RGWBucketReshard::execute(int num_shards,
ReshardFaultInjector& fault,
int max_op_entries,
const DoutPrefixProvider *dpp, optional_yield y,
bool verbose, ostream *out,
Formatter *formatter,
RGWReshard* reshard_log)
{
// take a reshard lock on the bucket
int ret = reshard_lock.lock(dpp);
if (ret < 0) {
return ret;
}
// unlock when scope exits
auto unlock = make_scope_guard([this] { reshard_lock.unlock(); });
if (reshard_log) {
ret = reshard_log->update(dpp, bucket_info, y);
if (ret < 0) {
return ret;
}
}
// prepare the target index and add its layout the bucket info
ret = init_reshard(store, bucket_info, bucket_attrs, fault, num_shards, dpp, y);
if (ret < 0) {
return ret;
}
if (ret = fault.check("do_reshard");
ret == 0) { // no fault injected, do the reshard
ret = do_reshard(bucket_info.layout.current_index,
*bucket_info.layout.target_index,
max_op_entries, verbose, out, formatter, dpp, y);
}
if (ret < 0) {
cancel_reshard(store, bucket_info, bucket_attrs, fault, dpp, y);
ldpp_dout(dpp, 1) << __func__ << " INFO: reshard of bucket \""
<< bucket_info.bucket.name << "\" canceled due to errors" << dendl;
return ret;
}
ret = commit_reshard(store, bucket_info, bucket_attrs, fault, dpp, y);
if (ret < 0) {
return ret;
}
ldpp_dout(dpp, 1) << __func__ << " INFO: reshard of bucket \""
<< bucket_info.bucket.name << "\" completed successfully" << dendl;
return 0;
} // execute
bool RGWBucketReshard::can_reshard(const RGWBucketInfo& bucket,
const RGWSI_Zone* zone_svc)
{
return !zone_svc->need_to_log_data() ||
bucket.layout.logs.size() < max_bilog_history;
}
RGWReshard::RGWReshard(rgw::sal::RadosStore* _store, bool _verbose, ostream *_out,
Formatter *_formatter) :
store(_store), instance_lock(bucket_instance_lock_name),
verbose(_verbose), out(_out), formatter(_formatter)
{
num_logshards = store->ctx()->_conf.get_val<uint64_t>("rgw_reshard_num_logs");
}
string RGWReshard::get_logshard_key(const string& tenant,
const string& bucket_name)
{
return tenant + ":" + bucket_name;
}
#define MAX_RESHARD_LOGSHARDS_PRIME 7877
void RGWReshard::get_bucket_logshard_oid(const string& tenant, const string& bucket_name, string *oid)
{
string key = get_logshard_key(tenant, bucket_name);
uint32_t sid = ceph_str_hash_linux(key.c_str(), key.size());
uint32_t sid2 = sid ^ ((sid & 0xFF) << 24);
sid = sid2 % MAX_RESHARD_LOGSHARDS_PRIME % num_logshards;
get_logshard_oid(int(sid), oid);
}
int RGWReshard::add(const DoutPrefixProvider *dpp, cls_rgw_reshard_entry& entry, optional_yield y)
{
if (!store->svc()->zone->can_reshard()) {
ldpp_dout(dpp, 20) << __func__ << " Resharding is disabled" << dendl;
return 0;
}
string logshard_oid;
get_bucket_logshard_oid(entry.tenant, entry.bucket_name, &logshard_oid);
librados::ObjectWriteOperation op;
cls_rgw_reshard_add(op, entry);
int ret = rgw_rados_operate(dpp, store->getRados()->reshard_pool_ctx, logshard_oid, &op, y);
if (ret < 0) {
ldpp_dout(dpp, -1) << "ERROR: failed to add entry to reshard log, oid=" << logshard_oid << " tenant=" << entry.tenant << " bucket=" << entry.bucket_name << dendl;
return ret;
}
return 0;
}
int RGWReshard::update(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, optional_yield y)
{
cls_rgw_reshard_entry entry;
entry.bucket_name = bucket_info.bucket.name;
entry.bucket_id = bucket_info.bucket.bucket_id;
entry.tenant = bucket_info.owner.tenant;
int ret = get(dpp, entry);
if (ret < 0) {
return ret;
}
ret = add(dpp, entry, y);
if (ret < 0) {
ldpp_dout(dpp, 0) << __func__ << ":Error in updating entry bucket " << entry.bucket_name << ": " <<
cpp_strerror(-ret) << dendl;
}
return ret;
}
int RGWReshard::list(const DoutPrefixProvider *dpp, int logshard_num, string& marker, uint32_t max, std::list<cls_rgw_reshard_entry>& entries, bool *is_truncated)
{
string logshard_oid;
get_logshard_oid(logshard_num, &logshard_oid);
int ret = cls_rgw_reshard_list(store->getRados()->reshard_pool_ctx, logshard_oid, marker, max, entries, is_truncated);
if (ret == -ENOENT) {
// these shard objects aren't created until we actually write something to
// them, so treat ENOENT as a successful empty listing
*is_truncated = false;
ret = 0;
} else if (ret == -EACCES) {
ldpp_dout(dpp, -1) << "ERROR: access denied to pool " << store->svc()->zone->get_zone_params().reshard_pool
<< ". Fix the pool access permissions of your client" << dendl;
} else if (ret < 0) {
ldpp_dout(dpp, -1) << "ERROR: failed to list reshard log entries, oid="
<< logshard_oid << " marker=" << marker << " " << cpp_strerror(ret) << dendl;
}
return ret;
}
int RGWReshard::get(const DoutPrefixProvider *dpp, cls_rgw_reshard_entry& entry)
{
string logshard_oid;
get_bucket_logshard_oid(entry.tenant, entry.bucket_name, &logshard_oid);
int ret = cls_rgw_reshard_get(store->getRados()->reshard_pool_ctx, logshard_oid, entry);
if (ret < 0) {
if (ret != -ENOENT) {
ldpp_dout(dpp, -1) << "ERROR: failed to get entry from reshard log, oid=" << logshard_oid << " tenant=" << entry.tenant <<
" bucket=" << entry.bucket_name << dendl;
}
return ret;
}
return 0;
}
int RGWReshard::remove(const DoutPrefixProvider *dpp, const cls_rgw_reshard_entry& entry, optional_yield y)
{
string logshard_oid;
get_bucket_logshard_oid(entry.tenant, entry.bucket_name, &logshard_oid);
librados::ObjectWriteOperation op;
cls_rgw_reshard_remove(op, entry);
int ret = rgw_rados_operate(dpp, store->getRados()->reshard_pool_ctx, logshard_oid, &op, y);
if (ret < 0) {
ldpp_dout(dpp, -1) << "ERROR: failed to remove entry from reshard log, oid=" << logshard_oid << " tenant=" << entry.tenant << " bucket=" << entry.bucket_name << dendl;
return ret;
}
return ret;
}
int RGWReshard::clear_bucket_resharding(const DoutPrefixProvider *dpp, const string& bucket_instance_oid, cls_rgw_reshard_entry& entry)
{
int ret = cls_rgw_clear_bucket_resharding(store->getRados()->reshard_pool_ctx, bucket_instance_oid);
if (ret < 0) {
ldpp_dout(dpp, -1) << "ERROR: failed to clear bucket resharding, bucket_instance_oid=" << bucket_instance_oid << dendl;
return ret;
}
return 0;
}
int RGWReshardWait::wait(optional_yield y)
{
std::unique_lock lock(mutex);
if (going_down) {
return -ECANCELED;
}
if (y) {
auto& context = y.get_io_context();
auto& yield = y.get_yield_context();
Waiter waiter(context);
waiters.push_back(waiter);
lock.unlock();
waiter.timer.expires_after(duration);
boost::system::error_code ec;
waiter.timer.async_wait(yield[ec]);
lock.lock();
waiters.erase(waiters.iterator_to(waiter));
return -ec.value();
}
cond.wait_for(lock, duration);
if (going_down) {
return -ECANCELED;
}
return 0;
}
void RGWReshardWait::stop()
{
std::scoped_lock lock(mutex);
going_down = true;
cond.notify_all();
for (auto& waiter : waiters) {
// unblock any waiters with ECANCELED
waiter.timer.cancel();
}
}
int RGWReshard::process_entry(const cls_rgw_reshard_entry& entry,
int max_entries, const DoutPrefixProvider *dpp, optional_yield y)
{
ldpp_dout(dpp, 20) << __func__ << " resharding " <<
entry.bucket_name << dendl;
rgw_bucket bucket;
RGWBucketInfo bucket_info;
std::map<std::string, bufferlist> bucket_attrs;
int ret = store->getRados()->get_bucket_info(store->svc(),
entry.tenant,
entry.bucket_name,
bucket_info, nullptr,
y, dpp,
&bucket_attrs);
if (ret < 0 || bucket_info.bucket.bucket_id != entry.bucket_id) {
if (ret < 0) {
ldpp_dout(dpp, 0) << __func__ <<
": Error in get_bucket_info for bucket " << entry.bucket_name <<
": " << cpp_strerror(-ret) << dendl;
if (ret != -ENOENT) {
// any error other than ENOENT will abort
return ret;
}
} else {
ldpp_dout(dpp, 0) << __func__ <<
": Bucket: " << entry.bucket_name <<
" already resharded by someone, skipping " << dendl;
}
// we've encountered a reshard queue entry for an apparently
// non-existent bucket; let's try to recover by cleaning up
ldpp_dout(dpp, 0) << __func__ <<
": removing reshard queue entry for a resharded or non-existent bucket" <<
entry.bucket_name << dendl;
ret = remove(dpp, entry, y);
if (ret < 0) {
ldpp_dout(dpp, 0) << __func__ <<
": Error removing non-existent bucket " <<
entry.bucket_name << " from resharding queue: " <<
cpp_strerror(-ret) << dendl;
return ret;
}
// we cleaned up, move on to the next entry
return 0;
}
if (!RGWBucketReshard::can_reshard(bucket_info, store->svc()->zone)) {
ldpp_dout(dpp, 1) << "Bucket " << bucket_info.bucket << " is not "
"eligible for resharding until peer zones finish syncing one "
"or more of its old log generations" << dendl;
return remove(dpp, entry, y);
}
RGWBucketReshard br(store, bucket_info, bucket_attrs, nullptr);
ReshardFaultInjector f; // no fault injected
ret = br.execute(entry.new_num_shards, f, max_entries, dpp, y,
false, nullptr, nullptr, this);
if (ret < 0) {
ldpp_dout(dpp, 0) << __func__ <<
": Error during resharding bucket " << entry.bucket_name << ":" <<
cpp_strerror(-ret)<< dendl;
return ret;
}
ldpp_dout(dpp, 20) << __func__ <<
" removing reshard queue entry for bucket " << entry.bucket_name <<
dendl;
ret = remove(dpp, entry, y);
if (ret < 0) {
ldpp_dout(dpp, 0) << __func__ << ": Error removing bucket " <<
entry.bucket_name << " from resharding queue: " <<
cpp_strerror(-ret) << dendl;
return ret;
}
return 0;
}
int RGWReshard::process_single_logshard(int logshard_num, const DoutPrefixProvider *dpp, optional_yield y)
{
string marker;
bool truncated = true;
constexpr uint32_t max_entries = 1000;
string logshard_oid;
get_logshard_oid(logshard_num, &logshard_oid);
RGWBucketReshardLock logshard_lock(store, logshard_oid, false);
int ret = logshard_lock.lock(dpp);
if (ret < 0) {
ldpp_dout(dpp, 5) << __func__ << "(): failed to acquire lock on " <<
logshard_oid << ", ret = " << ret <<dendl;
return ret;
}
do {
std::list<cls_rgw_reshard_entry> entries;
ret = list(dpp, logshard_num, marker, max_entries, entries, &truncated);
if (ret < 0) {
ldpp_dout(dpp, 10) << "cannot list all reshards in logshard oid=" <<
logshard_oid << dendl;
continue;
}
for(auto& entry: entries) { // logshard entries
process_entry(entry, max_entries, dpp, y);
if (ret < 0) {
return ret;
}
Clock::time_point now = Clock::now();
if (logshard_lock.should_renew(now)) {
ret = logshard_lock.renew(now);
if (ret < 0) {
return ret;
}
}
entry.get_key(&marker);
} // entry for loop
} while (truncated);
logshard_lock.unlock();
return 0;
}
void RGWReshard::get_logshard_oid(int shard_num, string *logshard)
{
char buf[32];
snprintf(buf, sizeof(buf), "%010u", (unsigned)shard_num);
string objname(reshard_oid_prefix);
*logshard = objname + buf;
}
int RGWReshard::process_all_logshards(const DoutPrefixProvider *dpp, optional_yield y)
{
int ret = 0;
for (int i = 0; i < num_logshards; i++) {
string logshard;
get_logshard_oid(i, &logshard);
ldpp_dout(dpp, 20) << "processing logshard = " << logshard << dendl;
ret = process_single_logshard(i, dpp, y);
ldpp_dout(dpp, 20) << "finish processing logshard = " << logshard << " , ret = " << ret << dendl;
}
return 0;
}
bool RGWReshard::going_down()
{
return down_flag;
}
void RGWReshard::start_processor()
{
worker = new ReshardWorker(store->ctx(), this);
worker->create("rgw_reshard");
}
void RGWReshard::stop_processor()
{
down_flag = true;
if (worker) {
worker->stop();
worker->join();
}
delete worker;
worker = nullptr;
}
void *RGWReshard::ReshardWorker::entry() {
do {
utime_t start = ceph_clock_now();
reshard->process_all_logshards(this, null_yield);
if (reshard->going_down())
break;
utime_t end = ceph_clock_now();
end -= start;
int secs = cct->_conf.get_val<uint64_t>("rgw_reshard_thread_interval");
if (secs <= end.sec())
continue; // next round
secs -= end.sec();
std::unique_lock locker{lock};
cond.wait_for(locker, std::chrono::seconds(secs));
} while (!reshard->going_down());
return NULL;
}
void RGWReshard::ReshardWorker::stop()
{
std::lock_guard l{lock};
cond.notify_all();
}
CephContext *RGWReshard::ReshardWorker::get_cct() const
{
return cct;
}
unsigned RGWReshard::ReshardWorker::get_subsys() const
{
return dout_subsys;
}
std::ostream& RGWReshard::ReshardWorker::gen_prefix(std::ostream& out) const
{
return out << "rgw reshard worker thread: ";
}
| 47,469 | 32.429577 | 171 |
cc
|
null |
ceph-main/src/rgw/driver/rados/rgw_reshard.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab ft=cpp
#pragma once
#include <vector>
#include <initializer_list>
#include <functional>
#include <iterator>
#include <algorithm>
#include <boost/intrusive/list.hpp>
#include <boost/asio/basic_waitable_timer.hpp>
#include "include/common_fwd.h"
#include "include/rados/librados.hpp"
#include "common/ceph_time.h"
#include "common/async/yield_context.h"
#include "cls/rgw/cls_rgw_types.h"
#include "cls/lock/cls_lock_client.h"
#include "rgw_common.h"
#include "common/fault_injector.h"
class RGWReshard;
namespace rgw { namespace sal {
class RadosStore;
} }
using ReshardFaultInjector = FaultInjector<std::string_view>;
class RGWBucketReshardLock {
using Clock = ceph::coarse_mono_clock;
rgw::sal::RadosStore* store;
const std::string lock_oid;
const bool ephemeral;
rados::cls::lock::Lock internal_lock;
std::chrono::seconds duration;
Clock::time_point start_time;
Clock::time_point renew_thresh;
void reset_time(const Clock::time_point& now) {
start_time = now;
renew_thresh = start_time + duration / 2;
}
public:
RGWBucketReshardLock(rgw::sal::RadosStore* _store,
const std::string& reshard_lock_oid,
bool _ephemeral);
RGWBucketReshardLock(rgw::sal::RadosStore* _store,
const RGWBucketInfo& bucket_info,
bool _ephemeral) :
RGWBucketReshardLock(_store, bucket_info.bucket.get_key(':'), _ephemeral)
{}
int lock(const DoutPrefixProvider *dpp);
void unlock();
int renew(const Clock::time_point&);
bool should_renew(const Clock::time_point& now) const {
return now >= renew_thresh;
}
}; // class RGWBucketReshardLock
class RGWBucketReshard {
public:
using Clock = ceph::coarse_mono_clock;
private:
rgw::sal::RadosStore *store;
RGWBucketInfo bucket_info;
std::map<std::string, bufferlist> bucket_attrs;
RGWBucketReshardLock reshard_lock;
RGWBucketReshardLock* outer_reshard_lock;
// using an initializer_list as an array in contiguous memory
// allocated in at once
static const std::initializer_list<uint16_t> reshard_primes;
int do_reshard(const rgw::bucket_index_layout_generation& current,
const rgw::bucket_index_layout_generation& target,
int max_entries,
bool verbose,
std::ostream *os,
Formatter *formatter,
const DoutPrefixProvider *dpp, optional_yield y);
public:
// pass nullptr for the final parameter if no outer reshard lock to
// manage
RGWBucketReshard(rgw::sal::RadosStore* _store,
const RGWBucketInfo& _bucket_info,
const std::map<std::string, bufferlist>& _bucket_attrs,
RGWBucketReshardLock* _outer_reshard_lock);
int execute(int num_shards, ReshardFaultInjector& f,
int max_op_entries, const DoutPrefixProvider *dpp, optional_yield y,
bool verbose = false, std::ostream *out = nullptr,
ceph::Formatter *formatter = nullptr,
RGWReshard *reshard_log = nullptr);
int get_status(const DoutPrefixProvider *dpp, std::list<cls_rgw_bucket_instance_entry> *status);
int cancel(const DoutPrefixProvider* dpp, optional_yield y);
static int clear_resharding(rgw::sal::RadosStore* store,
RGWBucketInfo& bucket_info,
std::map<std::string, bufferlist>& bucket_attrs,
const DoutPrefixProvider* dpp, optional_yield y);
static uint32_t get_max_prime_shards() {
return *std::crbegin(reshard_primes);
}
// returns the prime in our list less than or equal to the
// parameter; the lowest value that can be returned is 1
static uint32_t get_prime_shards_less_or_equal(uint32_t requested_shards) {
auto it = std::upper_bound(reshard_primes.begin(), reshard_primes.end(),
requested_shards);
if (it == reshard_primes.begin()) {
return 1;
} else {
return *(--it);
}
}
// returns the prime in our list greater than or equal to the
// parameter; if we do not have such a prime, 0 is returned
static uint32_t get_prime_shards_greater_or_equal(
uint32_t requested_shards)
{
auto it = std::lower_bound(reshard_primes.begin(), reshard_primes.end(),
requested_shards);
if (it == reshard_primes.end()) {
return 0;
} else {
return *it;
}
}
// returns a preferred number of shards given a calculated number of
// shards based on max_dynamic_shards and the list of prime values
static uint32_t get_preferred_shards(uint32_t suggested_shards,
uint32_t max_dynamic_shards) {
// use a prime if max is within our prime range, otherwise use
// specified max
const uint32_t absolute_max =
max_dynamic_shards >= get_max_prime_shards() ?
max_dynamic_shards :
get_prime_shards_less_or_equal(max_dynamic_shards);
// if we can use a prime number, use it, otherwise use suggested;
// note get_prime_shards_greater_or_equal will return 0 if no prime in
// prime range
const uint32_t prime_ish_num_shards =
std::max(get_prime_shards_greater_or_equal(suggested_shards),
suggested_shards);
// dynamic sharding cannot reshard more than defined maximum
const uint32_t final_num_shards =
std::min(prime_ish_num_shards, absolute_max);
return final_num_shards;
}
const std::map<std::string, bufferlist>& get_bucket_attrs() const {
return bucket_attrs;
}
// for multisite, the RGWBucketInfo keeps a history of old log generations
// until all peers are done with them. prevent this log history from growing
// too large by refusing to reshard the bucket until the old logs get trimmed
static constexpr size_t max_bilog_history = 4;
static bool can_reshard(const RGWBucketInfo& bucket,
const RGWSI_Zone* zone_svc);
}; // RGWBucketReshard
class RGWReshard {
public:
using Clock = ceph::coarse_mono_clock;
private:
rgw::sal::RadosStore* store;
std::string lock_name;
rados::cls::lock::Lock instance_lock;
int num_logshards;
bool verbose;
std::ostream *out;
Formatter *formatter;
void get_logshard_oid(int shard_num, std::string *shard);
protected:
class ReshardWorker : public Thread, public DoutPrefixProvider {
CephContext *cct;
RGWReshard *reshard;
ceph::mutex lock = ceph::make_mutex("ReshardWorker");
ceph::condition_variable cond;
public:
ReshardWorker(CephContext * const _cct,
RGWReshard * const _reshard)
: cct(_cct),
reshard(_reshard) {}
void *entry() override;
void stop();
CephContext *get_cct() const override;
unsigned get_subsys() const override;
std::ostream& gen_prefix(std::ostream& out) const override;
};
ReshardWorker *worker = nullptr;
std::atomic<bool> down_flag = { false };
std::string get_logshard_key(const std::string& tenant, const std::string& bucket_name);
void get_bucket_logshard_oid(const std::string& tenant, const std::string& bucket_name, std::string *oid);
public:
RGWReshard(rgw::sal::RadosStore* _store, bool _verbose = false, std::ostream *_out = nullptr, Formatter *_formatter = nullptr);
int add(const DoutPrefixProvider *dpp, cls_rgw_reshard_entry& entry, optional_yield y);
int update(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, optional_yield y);
int get(const DoutPrefixProvider *dpp, cls_rgw_reshard_entry& entry);
int remove(const DoutPrefixProvider *dpp, const cls_rgw_reshard_entry& entry, optional_yield y);
int list(const DoutPrefixProvider *dpp, int logshard_num, std::string& marker, uint32_t max, std::list<cls_rgw_reshard_entry>& entries, bool *is_truncated);
int clear_bucket_resharding(const DoutPrefixProvider *dpp, const std::string& bucket_instance_oid, cls_rgw_reshard_entry& entry);
/* reshard thread */
int process_entry(const cls_rgw_reshard_entry& entry, int max_entries,
const DoutPrefixProvider *dpp, optional_yield y);
int process_single_logshard(int logshard_num, const DoutPrefixProvider *dpp, optional_yield y);
int process_all_logshards(const DoutPrefixProvider *dpp, optional_yield y);
bool going_down();
void start_processor();
void stop_processor();
};
class RGWReshardWait {
public:
// the blocking wait uses std::condition_variable::wait_for(), which uses the
// std::chrono::steady_clock. use that for the async waits as well
using Clock = std::chrono::steady_clock;
private:
const ceph::timespan duration;
ceph::mutex mutex = ceph::make_mutex("RGWReshardWait::lock");
ceph::condition_variable cond;
struct Waiter : boost::intrusive::list_base_hook<> {
using Executor = boost::asio::io_context::executor_type;
using Timer = boost::asio::basic_waitable_timer<Clock,
boost::asio::wait_traits<Clock>, Executor>;
Timer timer;
explicit Waiter(boost::asio::io_context& ioc) : timer(ioc) {}
};
boost::intrusive::list<Waiter> waiters;
bool going_down{false};
public:
RGWReshardWait(ceph::timespan duration = std::chrono::seconds(5))
: duration(duration) {}
~RGWReshardWait() {
ceph_assert(going_down);
}
int wait(optional_yield y);
// unblock any threads waiting on reshard
void stop();
};
| 9,281 | 32.752727 | 158 |
h
|
null |
ceph-main/src/rgw/driver/rados/rgw_rest_bucket.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab ft=cpp
#include "rgw_op.h"
#include "driver/rados/rgw_bucket.h"
#include "rgw_rest_bucket.h"
#include "rgw_sal.h"
#include "include/str_list.h"
#include "services/svc_sys_obj.h"
#include "services/svc_zone.h"
#define dout_subsys ceph_subsys_rgw
using namespace std;
class RGWOp_Bucket_Info : public RGWRESTOp {
public:
RGWOp_Bucket_Info() {}
int check_caps(const RGWUserCaps& caps) override {
return caps.check_cap("buckets", RGW_CAP_READ);
}
void execute(optional_yield y) override;
const char* name() const override { return "get_bucket_info"; }
};
void RGWOp_Bucket_Info::execute(optional_yield y)
{
RGWBucketAdminOpState op_state;
bool fetch_stats;
std::string bucket;
string uid_str;
RESTArgs::get_string(s, "uid", uid_str, &uid_str);
rgw_user uid(uid_str);
RESTArgs::get_string(s, "bucket", bucket, &bucket);
RESTArgs::get_bool(s, "stats", false, &fetch_stats);
op_state.set_user_id(uid);
op_state.set_bucket_name(bucket);
op_state.set_fetch_stats(fetch_stats);
op_ret = RGWBucketAdminOp::info(driver, op_state, flusher, y, this);
}
class RGWOp_Get_Policy : public RGWRESTOp {
public:
RGWOp_Get_Policy() {}
int check_caps(const RGWUserCaps& caps) override {
return caps.check_cap("buckets", RGW_CAP_READ);
}
void execute(optional_yield y) override;
const char* name() const override { return "get_policy"; }
};
void RGWOp_Get_Policy::execute(optional_yield y)
{
RGWBucketAdminOpState op_state;
std::string bucket;
std::string object;
RESTArgs::get_string(s, "bucket", bucket, &bucket);
RESTArgs::get_string(s, "object", object, &object);
op_state.set_bucket_name(bucket);
op_state.set_object(object);
op_ret = RGWBucketAdminOp::get_policy(driver, op_state, flusher, this, y);
}
class RGWOp_Check_Bucket_Index : public RGWRESTOp {
public:
RGWOp_Check_Bucket_Index() {}
int check_caps(const RGWUserCaps& caps) override {
return caps.check_cap("buckets", RGW_CAP_WRITE);
}
void execute(optional_yield y) override;
const char* name() const override { return "check_bucket_index"; }
};
void RGWOp_Check_Bucket_Index::execute(optional_yield y)
{
std::string bucket;
bool fix_index;
bool check_objects;
RGWBucketAdminOpState op_state;
RESTArgs::get_string(s, "bucket", bucket, &bucket);
RESTArgs::get_bool(s, "fix", false, &fix_index);
RESTArgs::get_bool(s, "check-objects", false, &check_objects);
op_state.set_bucket_name(bucket);
op_state.set_fix_index(fix_index);
op_state.set_check_objects(check_objects);
op_ret = RGWBucketAdminOp::check_index(driver, op_state, flusher, s->yield, s);
}
class RGWOp_Bucket_Link : public RGWRESTOp {
public:
RGWOp_Bucket_Link() {}
int check_caps(const RGWUserCaps& caps) override {
return caps.check_cap("buckets", RGW_CAP_WRITE);
}
void execute(optional_yield y) override;
const char* name() const override { return "link_bucket"; }
};
void RGWOp_Bucket_Link::execute(optional_yield y)
{
std::string uid_str;
std::string bucket;
std::string bucket_id;
std::string new_bucket_name;
RGWBucketAdminOpState op_state;
RESTArgs::get_string(s, "uid", uid_str, &uid_str);
RESTArgs::get_string(s, "bucket", bucket, &bucket);
RESTArgs::get_string(s, "bucket-id", bucket_id, &bucket_id);
RESTArgs::get_string(s, "new-bucket-name", new_bucket_name, &new_bucket_name);
rgw_user uid(uid_str);
op_state.set_user_id(uid);
op_state.set_bucket_name(bucket);
op_state.set_bucket_id(bucket_id);
op_state.set_new_bucket_name(new_bucket_name);
bufferlist data;
op_ret = driver->forward_request_to_master(s, s->user.get(), nullptr, data, nullptr, s->info, y);
if (op_ret < 0) {
ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl;
return;
}
op_ret = RGWBucketAdminOp::link(driver, op_state, s, y);
}
class RGWOp_Bucket_Unlink : public RGWRESTOp {
public:
RGWOp_Bucket_Unlink() {}
int check_caps(const RGWUserCaps& caps) override {
return caps.check_cap("buckets", RGW_CAP_WRITE);
}
void execute(optional_yield y) override;
const char* name() const override { return "unlink_bucket"; }
};
void RGWOp_Bucket_Unlink::execute(optional_yield y)
{
std::string uid_str;
std::string bucket;
RGWBucketAdminOpState op_state;
RESTArgs::get_string(s, "uid", uid_str, &uid_str);
rgw_user uid(uid_str);
RESTArgs::get_string(s, "bucket", bucket, &bucket);
op_state.set_user_id(uid);
op_state.set_bucket_name(bucket);
bufferlist data;
op_ret = driver->forward_request_to_master(s, s->user.get(), nullptr, data, nullptr, s->info, y);
if (op_ret < 0) {
ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl;
return;
}
op_ret = RGWBucketAdminOp::unlink(driver, op_state, s, y);
}
class RGWOp_Bucket_Remove : public RGWRESTOp {
public:
RGWOp_Bucket_Remove() {}
int check_caps(const RGWUserCaps& caps) override {
return caps.check_cap("buckets", RGW_CAP_WRITE);
}
void execute(optional_yield y) override;
const char* name() const override { return "remove_bucket"; }
};
void RGWOp_Bucket_Remove::execute(optional_yield y)
{
std::string bucket_name;
bool delete_children;
std::unique_ptr<rgw::sal::Bucket> bucket;
RESTArgs::get_string(s, "bucket", bucket_name, &bucket_name);
RESTArgs::get_bool(s, "purge-objects", false, &delete_children);
/* FIXME We're abusing the owner of the bucket to pass the user, so that it can be forwarded to
* the master. This user is actually the OP caller, not the bucket owner. */
op_ret = driver->get_bucket(s, s->user.get(), string(), bucket_name, &bucket, y);
if (op_ret < 0) {
ldpp_dout(this, 0) << "get_bucket returned ret=" << op_ret << dendl;
if (op_ret == -ENOENT) {
op_ret = -ERR_NO_SUCH_BUCKET;
}
return;
}
op_ret = bucket->remove_bucket(s, delete_children, true, &s->info, s->yield);
}
class RGWOp_Set_Bucket_Quota : public RGWRESTOp {
public:
RGWOp_Set_Bucket_Quota() {}
int check_caps(const RGWUserCaps& caps) override {
return caps.check_cap("buckets", RGW_CAP_WRITE);
}
void execute(optional_yield y) override;
const char* name() const override { return "set_bucket_quota"; }
};
#define QUOTA_INPUT_MAX_LEN 1024
void RGWOp_Set_Bucket_Quota::execute(optional_yield y)
{
bool uid_arg_existed = false;
std::string uid_str;
RESTArgs::get_string(s, "uid", uid_str, &uid_str, &uid_arg_existed);
if (! uid_arg_existed) {
op_ret = -EINVAL;
return;
}
rgw_user uid(uid_str);
bool bucket_arg_existed = false;
std::string bucket_name;
RESTArgs::get_string(s, "bucket", bucket_name, &bucket_name, &bucket_arg_existed);
if (! bucket_arg_existed) {
op_ret = -EINVAL;
return;
}
bool use_http_params;
if (s->content_length > 0) {
use_http_params = false;
} else {
const char *encoding = s->info.env->get("HTTP_TRANSFER_ENCODING");
use_http_params = (!encoding || strcmp(encoding, "chunked") != 0);
}
RGWQuotaInfo quota;
if (!use_http_params) {
bool empty;
op_ret = get_json_input(driver->ctx(), s, quota, QUOTA_INPUT_MAX_LEN, &empty);
if (op_ret < 0) {
if (!empty)
return;
/* was probably chunked input, but no content provided, configure via http params */
use_http_params = true;
}
}
if (use_http_params) {
std::unique_ptr<rgw::sal::Bucket> bucket;
op_ret = driver->get_bucket(s, nullptr, uid.tenant, bucket_name, &bucket, s->yield);
if (op_ret < 0) {
return;
}
RGWQuotaInfo *old_quota = &bucket->get_info().quota;
int64_t old_max_size_kb = rgw_rounded_kb(old_quota->max_size);
int64_t max_size_kb;
bool has_max_size_kb = false;
RESTArgs::get_int64(s, "max-objects", old_quota->max_objects, "a.max_objects);
RESTArgs::get_int64(s, "max-size", old_quota->max_size, "a.max_size);
RESTArgs::get_int64(s, "max-size-kb", old_max_size_kb, &max_size_kb, &has_max_size_kb);
if (has_max_size_kb)
quota.max_size = max_size_kb * 1024;
RESTArgs::get_bool(s, "enabled", old_quota->enabled, "a.enabled);
}
RGWBucketAdminOpState op_state;
op_state.set_user_id(uid);
op_state.set_bucket_name(bucket_name);
op_state.set_quota(quota);
op_ret = RGWBucketAdminOp::set_quota(driver, op_state, s, y);
}
class RGWOp_Sync_Bucket : public RGWRESTOp {
public:
RGWOp_Sync_Bucket() {}
int check_caps(const RGWUserCaps& caps) override {
return caps.check_cap("buckets", RGW_CAP_WRITE);
}
void execute(optional_yield y) override;
const char* name() const override { return "sync_bucket"; }
};
void RGWOp_Sync_Bucket::execute(optional_yield y)
{
std::string bucket;
std::string tenant;
bool sync_bucket;
RGWBucketAdminOpState op_state;
RESTArgs::get_string(s, "bucket", bucket, &bucket);
RESTArgs::get_string(s, "tenant", tenant, &tenant);
RESTArgs::get_bool(s, "sync", true, &sync_bucket);
op_state.set_bucket_name(bucket);
op_state.set_tenant(tenant);
op_state.set_sync_bucket(sync_bucket);
op_ret = RGWBucketAdminOp::sync_bucket(driver, op_state, s, y);
}
class RGWOp_Object_Remove: public RGWRESTOp {
public:
RGWOp_Object_Remove() {}
int check_caps(const RGWUserCaps& caps) override {
return caps.check_cap("buckets", RGW_CAP_WRITE);
}
void execute(optional_yield y) override;
const char* name() const override { return "remove_object"; }
};
void RGWOp_Object_Remove::execute(optional_yield y)
{
std::string bucket;
std::string object;
RGWBucketAdminOpState op_state;
RESTArgs::get_string(s, "bucket", bucket, &bucket);
RESTArgs::get_string(s, "object", object, &object);
op_state.set_bucket_name(bucket);
op_state.set_object(object);
op_ret = RGWBucketAdminOp::remove_object(driver, op_state, s, y);
}
RGWOp *RGWHandler_Bucket::op_get()
{
if (s->info.args.sub_resource_exists("policy"))
return new RGWOp_Get_Policy;
if (s->info.args.sub_resource_exists("index"))
return new RGWOp_Check_Bucket_Index;
return new RGWOp_Bucket_Info;
}
RGWOp *RGWHandler_Bucket::op_put()
{
if (s->info.args.sub_resource_exists("quota"))
return new RGWOp_Set_Bucket_Quota;
if (s->info.args.sub_resource_exists("sync"))
return new RGWOp_Sync_Bucket;
return new RGWOp_Bucket_Link;
}
RGWOp *RGWHandler_Bucket::op_post()
{
return new RGWOp_Bucket_Unlink;
}
RGWOp *RGWHandler_Bucket::op_delete()
{
if (s->info.args.sub_resource_exists("object"))
return new RGWOp_Object_Remove;
return new RGWOp_Bucket_Remove;
}
| 10,640 | 24.702899 | 99 |
cc
|
null |
ceph-main/src/rgw/driver/rados/rgw_rest_bucket.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab ft=cpp
#pragma once
#include "rgw_rest.h"
#include "rgw_rest_s3.h"
class RGWHandler_Bucket : public RGWHandler_Auth_S3 {
protected:
RGWOp *op_get() override;
RGWOp *op_put() override;
RGWOp *op_post() override;
RGWOp *op_delete() override;
public:
using RGWHandler_Auth_S3::RGWHandler_Auth_S3;
~RGWHandler_Bucket() override = default;
int read_permissions(RGWOp*, optional_yield y) override {
return 0;
}
};
class RGWRESTMgr_Bucket : public RGWRESTMgr {
public:
RGWRESTMgr_Bucket() = default;
~RGWRESTMgr_Bucket() override = default;
RGWHandler_REST* get_handler(rgw::sal::Driver* driver,
req_state*,
const rgw::auth::StrategyRegistry& auth_registry,
const std::string&) override {
return new RGWHandler_Bucket(auth_registry);
}
};
| 941 | 24.459459 | 80 |
h
|
null |
ceph-main/src/rgw/driver/rados/rgw_rest_log.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab ft=cpp
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2013 eNovance SAS <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include "common/ceph_json.h"
#include "common/strtol.h"
#include "rgw_rest.h"
#include "rgw_op.h"
#include "rgw_rest_s3.h"
#include "rgw_rest_log.h"
#include "rgw_client_io.h"
#include "rgw_sync.h"
#include "rgw_data_sync.h"
#include "rgw_common.h"
#include "rgw_zone.h"
#include "rgw_mdlog.h"
#include "rgw_datalog_notify.h"
#include "rgw_trim_bilog.h"
#include "services/svc_zone.h"
#include "services/svc_mdlog.h"
#include "services/svc_bilog_rados.h"
#include "common/errno.h"
#include "include/ceph_assert.h"
#define dout_context g_ceph_context
#define LOG_CLASS_LIST_MAX_ENTRIES (1000)
#define dout_subsys ceph_subsys_rgw
using namespace std;
void RGWOp_MDLog_List::execute(optional_yield y) {
string period = s->info.args.get("period");
string shard = s->info.args.get("id");
string max_entries_str = s->info.args.get("max-entries");
string marker = s->info.args.get("marker"),
err;
void *handle;
unsigned shard_id, max_entries = LOG_CLASS_LIST_MAX_ENTRIES;
if (s->info.args.exists("start-time") ||
s->info.args.exists("end-time")) {
ldpp_dout(this, 5) << "start-time and end-time are no longer accepted" << dendl;
op_ret = -EINVAL;
return;
}
shard_id = (unsigned)strict_strtol(shard.c_str(), 10, &err);
if (!err.empty()) {
ldpp_dout(this, 5) << "Error parsing shard_id " << shard << dendl;
op_ret = -EINVAL;
return;
}
if (!max_entries_str.empty()) {
max_entries = (unsigned)strict_strtol(max_entries_str.c_str(), 10, &err);
if (!err.empty()) {
ldpp_dout(this, 5) << "Error parsing max-entries " << max_entries_str << dendl;
op_ret = -EINVAL;
return;
}
if (max_entries > LOG_CLASS_LIST_MAX_ENTRIES) {
max_entries = LOG_CLASS_LIST_MAX_ENTRIES;
}
}
if (period.empty()) {
ldpp_dout(this, 5) << "Missing period id trying to use current" << dendl;
period = driver->get_zone()->get_current_period_id();
if (period.empty()) {
ldpp_dout(this, 5) << "Missing period id" << dendl;
op_ret = -EINVAL;
return;
}
}
RGWMetadataLog meta_log{s->cct, static_cast<rgw::sal::RadosStore*>(driver)->svc()->zone, static_cast<rgw::sal::RadosStore*>(driver)->svc()->cls, period};
meta_log.init_list_entries(shard_id, {}, {}, marker, &handle);
op_ret = meta_log.list_entries(this, handle, max_entries, entries,
&last_marker, &truncated);
meta_log.complete_list_entries(handle);
}
void RGWOp_MDLog_List::send_response() {
set_req_state_err(s, op_ret);
dump_errno(s);
end_header(s);
if (op_ret < 0)
return;
s->formatter->open_object_section("log_entries");
s->formatter->dump_string("marker", last_marker);
s->formatter->dump_bool("truncated", truncated);
{
s->formatter->open_array_section("entries");
for (list<cls_log_entry>::iterator iter = entries.begin();
iter != entries.end(); ++iter) {
cls_log_entry& entry = *iter;
static_cast<rgw::sal::RadosStore*>(driver)->ctl()->meta.mgr->dump_log_entry(entry, s->formatter);
flusher.flush();
}
s->formatter->close_section();
}
s->formatter->close_section();
flusher.flush();
}
void RGWOp_MDLog_Info::execute(optional_yield y) {
num_objects = s->cct->_conf->rgw_md_log_max_shards;
period = static_cast<rgw::sal::RadosStore*>(driver)->svc()->mdlog->read_oldest_log_period(y, s);
op_ret = period.get_error();
}
void RGWOp_MDLog_Info::send_response() {
set_req_state_err(s, op_ret);
dump_errno(s);
end_header(s);
s->formatter->open_object_section("mdlog");
s->formatter->dump_unsigned("num_objects", num_objects);
if (period) {
s->formatter->dump_string("period", period.get_period().get_id());
s->formatter->dump_unsigned("realm_epoch", period.get_epoch());
}
s->formatter->close_section();
flusher.flush();
}
void RGWOp_MDLog_ShardInfo::execute(optional_yield y) {
string period = s->info.args.get("period");
string shard = s->info.args.get("id");
string err;
unsigned shard_id = (unsigned)strict_strtol(shard.c_str(), 10, &err);
if (!err.empty()) {
ldpp_dout(this, 5) << "Error parsing shard_id " << shard << dendl;
op_ret = -EINVAL;
return;
}
if (period.empty()) {
ldpp_dout(this, 5) << "Missing period id trying to use current" << dendl;
period = driver->get_zone()->get_current_period_id();
if (period.empty()) {
ldpp_dout(this, 5) << "Missing period id" << dendl;
op_ret = -EINVAL;
return;
}
}
RGWMetadataLog meta_log{s->cct, static_cast<rgw::sal::RadosStore*>(driver)->svc()->zone, static_cast<rgw::sal::RadosStore*>(driver)->svc()->cls, period};
op_ret = meta_log.get_info(this, shard_id, &info);
}
void RGWOp_MDLog_ShardInfo::send_response() {
set_req_state_err(s, op_ret);
dump_errno(s);
end_header(s);
encode_json("info", info, s->formatter);
flusher.flush();
}
void RGWOp_MDLog_Delete::execute(optional_yield y) {
string marker = s->info.args.get("marker"),
period = s->info.args.get("period"),
shard = s->info.args.get("id"),
err;
unsigned shard_id;
if (s->info.args.exists("start-time") ||
s->info.args.exists("end-time")) {
ldpp_dout(this, 5) << "start-time and end-time are no longer accepted" << dendl;
op_ret = -EINVAL;
}
if (s->info.args.exists("start-marker")) {
ldpp_dout(this, 5) << "start-marker is no longer accepted" << dendl;
op_ret = -EINVAL;
}
if (s->info.args.exists("end-marker")) {
if (!s->info.args.exists("marker")) {
marker = s->info.args.get("end-marker");
} else {
ldpp_dout(this, 5) << "end-marker and marker cannot both be provided" << dendl;
op_ret = -EINVAL;
}
}
op_ret = 0;
shard_id = (unsigned)strict_strtol(shard.c_str(), 10, &err);
if (!err.empty()) {
ldpp_dout(this, 5) << "Error parsing shard_id " << shard << dendl;
op_ret = -EINVAL;
return;
}
if (marker.empty()) { /* bounding end */
op_ret = -EINVAL;
return;
}
if (period.empty()) {
ldpp_dout(this, 5) << "Missing period id trying to use current" << dendl;
period = driver->get_zone()->get_current_period_id();
if (period.empty()) {
ldpp_dout(this, 5) << "Missing period id" << dendl;
op_ret = -EINVAL;
return;
}
}
RGWMetadataLog meta_log{s->cct, static_cast<rgw::sal::RadosStore*>(driver)->svc()->zone, static_cast<rgw::sal::RadosStore*>(driver)->svc()->cls, period};
op_ret = meta_log.trim(this, shard_id, {}, {}, {}, marker);
}
void RGWOp_MDLog_Lock::execute(optional_yield y) {
string period, shard_id_str, duration_str, locker_id, zone_id;
unsigned shard_id;
op_ret = 0;
period = s->info.args.get("period");
shard_id_str = s->info.args.get("id");
duration_str = s->info.args.get("length");
locker_id = s->info.args.get("locker-id");
zone_id = s->info.args.get("zone-id");
if (period.empty()) {
ldpp_dout(this, 5) << "Missing period id trying to use current" << dendl;
period = driver->get_zone()->get_current_period_id();
}
if (period.empty() ||
shard_id_str.empty() ||
(duration_str.empty()) ||
locker_id.empty() ||
zone_id.empty()) {
ldpp_dout(this, 5) << "Error invalid parameter list" << dendl;
op_ret = -EINVAL;
return;
}
string err;
shard_id = (unsigned)strict_strtol(shard_id_str.c_str(), 10, &err);
if (!err.empty()) {
ldpp_dout(this, 5) << "Error parsing shard_id param " << shard_id_str << dendl;
op_ret = -EINVAL;
return;
}
RGWMetadataLog meta_log{s->cct, static_cast<rgw::sal::RadosStore*>(driver)->svc()->zone, static_cast<rgw::sal::RadosStore*>(driver)->svc()->cls, period};
unsigned dur;
dur = (unsigned)strict_strtol(duration_str.c_str(), 10, &err);
if (!err.empty() || dur <= 0) {
ldpp_dout(this, 5) << "invalid length param " << duration_str << dendl;
op_ret = -EINVAL;
return;
}
op_ret = meta_log.lock_exclusive(s, shard_id, make_timespan(dur), zone_id,
locker_id);
if (op_ret == -EBUSY)
op_ret = -ERR_LOCKED;
}
void RGWOp_MDLog_Unlock::execute(optional_yield y) {
string period, shard_id_str, locker_id, zone_id;
unsigned shard_id;
op_ret = 0;
period = s->info.args.get("period");
shard_id_str = s->info.args.get("id");
locker_id = s->info.args.get("locker-id");
zone_id = s->info.args.get("zone-id");
if (period.empty()) {
ldpp_dout(this, 5) << "Missing period id trying to use current" << dendl;
period = driver->get_zone()->get_current_period_id();
}
if (period.empty() ||
shard_id_str.empty() ||
locker_id.empty() ||
zone_id.empty()) {
ldpp_dout(this, 5) << "Error invalid parameter list" << dendl;
op_ret = -EINVAL;
return;
}
string err;
shard_id = (unsigned)strict_strtol(shard_id_str.c_str(), 10, &err);
if (!err.empty()) {
ldpp_dout(this, 5) << "Error parsing shard_id param " << shard_id_str << dendl;
op_ret = -EINVAL;
return;
}
RGWMetadataLog meta_log{s->cct, static_cast<rgw::sal::RadosStore*>(driver)->svc()->zone, static_cast<rgw::sal::RadosStore*>(driver)->svc()->cls, period};
op_ret = meta_log.unlock(s, shard_id, zone_id, locker_id);
}
void RGWOp_MDLog_Notify::execute(optional_yield y) {
#define LARGE_ENOUGH_BUF (128 * 1024)
int r = 0;
bufferlist data;
std::tie(r, data) = read_all_input(s, LARGE_ENOUGH_BUF);
if (r < 0) {
op_ret = r;
return;
}
char* buf = data.c_str();
ldpp_dout(this, 20) << __func__ << "(): read data: " << buf << dendl;
JSONParser p;
r = p.parse(buf, data.length());
if (r < 0) {
ldpp_dout(this, 0) << "ERROR: failed to parse JSON" << dendl;
op_ret = r;
return;
}
set<int> updated_shards;
try {
decode_json_obj(updated_shards, &p);
} catch (JSONDecoder::err& err) {
ldpp_dout(this, 0) << "ERROR: failed to decode JSON" << dendl;
op_ret = -EINVAL;
return;
}
if (driver->ctx()->_conf->subsys.should_gather<ceph_subsys_rgw, 20>()) {
for (set<int>::iterator iter = updated_shards.begin(); iter != updated_shards.end(); ++iter) {
ldpp_dout(this, 20) << __func__ << "(): updated shard=" << *iter << dendl;
}
}
driver->wakeup_meta_sync_shards(updated_shards);
op_ret = 0;
}
void RGWOp_BILog_List::execute(optional_yield y) {
bool gen_specified = false;
string tenant_name = s->info.args.get("tenant"),
bucket_name = s->info.args.get("bucket"),
marker = s->info.args.get("marker"),
max_entries_str = s->info.args.get("max-entries"),
bucket_instance = s->info.args.get("bucket-instance"),
gen_str = s->info.args.get("generation", &gen_specified),
format_version_str = s->info.args.get("format-ver");
std::unique_ptr<rgw::sal::Bucket> bucket;
rgw_bucket b(rgw_bucket_key(tenant_name, bucket_name));
unsigned max_entries;
if (bucket_name.empty() && bucket_instance.empty()) {
ldpp_dout(this, 5) << "ERROR: neither bucket nor bucket instance specified" << dendl;
op_ret = -EINVAL;
return;
}
string err;
std::optional<uint64_t> gen;
if (gen_specified) {
gen = strict_strtoll(gen_str.c_str(), 10, &err);
if (!err.empty()) {
ldpp_dout(s, 5) << "Error parsing generation param " << gen_str << dendl;
op_ret = -EINVAL;
return;
}
}
if (!format_version_str.empty()) {
format_ver = strict_strtoll(format_version_str.c_str(), 10, &err);
if (!err.empty()) {
ldpp_dout(s, 5) << "Failed to parse format-ver param: " << format_ver << dendl;
op_ret = -EINVAL;
return;
}
}
int shard_id;
string bn;
op_ret = rgw_bucket_parse_bucket_instance(bucket_instance, &bn, &bucket_instance, &shard_id);
if (op_ret < 0) {
return;
}
if (!bucket_instance.empty()) {
b.name = bn;
b.bucket_id = bucket_instance;
}
op_ret = driver->get_bucket(s, nullptr, b, &bucket, y);
if (op_ret < 0) {
ldpp_dout(this, 5) << "could not get bucket info for bucket=" << bucket_name << dendl;
return;
}
const auto& logs = bucket->get_info().layout.logs;
if (logs.empty()) {
ldpp_dout(s, 5) << "ERROR: bucket=" << bucket_name << " has no log layouts" << dendl;
op_ret = -ENOENT;
return;
}
auto log = std::prev(logs.end());
if (gen) {
log = std::find_if(logs.begin(), logs.end(), rgw::matches_gen(*gen));
if (log == logs.end()) {
ldpp_dout(s, 5) << "ERROR: no log layout with gen=" << *gen << dendl;
op_ret = -ENOENT;
return;
}
}
if (auto next = std::next(log); next != logs.end()) {
next_log_layout = *next; // get the next log after the current latest
}
auto& log_layout = *log; // current log layout for log listing
unsigned count = 0;
max_entries = (unsigned)strict_strtol(max_entries_str.c_str(), 10, &err);
if (!err.empty())
max_entries = LOG_CLASS_LIST_MAX_ENTRIES;
send_response();
do {
list<rgw_bi_log_entry> entries;
int ret = static_cast<rgw::sal::RadosStore*>(driver)->svc()->bilog_rados->log_list(s, bucket->get_info(), log_layout, shard_id,
marker, max_entries - count,
entries, &truncated);
if (ret < 0) {
ldpp_dout(this, 5) << "ERROR: list_bi_log_entries()" << dendl;
return;
}
count += entries.size();
send_response(entries, marker);
} while (truncated && count < max_entries);
send_response_end();
}
void RGWOp_BILog_List::send_response() {
if (sent_header)
return;
set_req_state_err(s, op_ret);
dump_errno(s);
end_header(s);
sent_header = true;
if (op_ret < 0)
return;
if (format_ver >= 2) {
s->formatter->open_object_section("result");
}
s->formatter->open_array_section("entries");
}
void RGWOp_BILog_List::send_response(list<rgw_bi_log_entry>& entries, string& marker)
{
for (list<rgw_bi_log_entry>::iterator iter = entries.begin(); iter != entries.end(); ++iter) {
rgw_bi_log_entry& entry = *iter;
encode_json("entry", entry, s->formatter);
marker = entry.id;
flusher.flush();
}
}
void RGWOp_BILog_List::send_response_end() {
s->formatter->close_section();
if (format_ver >= 2) {
encode_json("truncated", truncated, s->formatter);
if (next_log_layout) {
s->formatter->open_object_section("next_log");
encode_json("generation", next_log_layout->gen, s->formatter);
encode_json("num_shards", rgw::num_shards(next_log_layout->layout.in_index.layout), s->formatter);
s->formatter->close_section(); // next_log
}
s->formatter->close_section(); // result
}
flusher.flush();
}
void RGWOp_BILog_Info::execute(optional_yield y) {
string tenant_name = s->info.args.get("tenant"),
bucket_name = s->info.args.get("bucket"),
bucket_instance = s->info.args.get("bucket-instance");
std::unique_ptr<rgw::sal::Bucket> bucket;
rgw_bucket b(rgw_bucket_key(tenant_name, bucket_name));
if (bucket_name.empty() && bucket_instance.empty()) {
ldpp_dout(this, 5) << "ERROR: neither bucket nor bucket instance specified" << dendl;
op_ret = -EINVAL;
return;
}
int shard_id;
string bn;
op_ret = rgw_bucket_parse_bucket_instance(bucket_instance, &bn, &bucket_instance, &shard_id);
if (op_ret < 0) {
return;
}
if (!bucket_instance.empty()) {
b.name = bn;
b.bucket_id = bucket_instance;
}
op_ret = driver->get_bucket(s, nullptr, b, &bucket, y);
if (op_ret < 0) {
ldpp_dout(this, 5) << "could not get bucket info for bucket=" << bucket_name << dendl;
return;
}
const auto& logs = bucket->get_info().layout.logs;
if (logs.empty()) {
ldpp_dout(s, 5) << "ERROR: bucket=" << bucket_name << " has no log layouts" << dendl;
op_ret = -ENOENT;
return;
}
map<RGWObjCategory, RGWStorageStats> stats;
const auto& index = log_to_index_layout(logs.back());
int ret = bucket->read_stats(s, index, shard_id, &bucket_ver, &master_ver, stats, &max_marker, &syncstopped);
if (ret < 0 && ret != -ENOENT) {
op_ret = ret;
return;
}
oldest_gen = logs.front().gen;
latest_gen = logs.back().gen;
for (auto& log : logs) {
uint32_t num_shards = rgw::num_shards(log.layout.in_index.layout);
generations.push_back({log.gen, num_shards});
}
}
void RGWOp_BILog_Info::send_response() {
set_req_state_err(s, op_ret);
dump_errno(s);
end_header(s);
if (op_ret < 0)
return;
s->formatter->open_object_section("info");
encode_json("bucket_ver", bucket_ver, s->formatter);
encode_json("master_ver", master_ver, s->formatter);
encode_json("max_marker", max_marker, s->formatter);
encode_json("syncstopped", syncstopped, s->formatter);
encode_json("oldest_gen", oldest_gen, s->formatter);
encode_json("latest_gen", latest_gen, s->formatter);
encode_json("generations", generations, s->formatter);
s->formatter->close_section();
flusher.flush();
}
void RGWOp_BILog_Delete::execute(optional_yield y) {
bool gen_specified = false;
string tenant_name = s->info.args.get("tenant"),
bucket_name = s->info.args.get("bucket"),
start_marker = s->info.args.get("start-marker"),
end_marker = s->info.args.get("end-marker"),
bucket_instance = s->info.args.get("bucket-instance"),
gen_str = s->info.args.get("generation", &gen_specified);
std::unique_ptr<rgw::sal::Bucket> bucket;
rgw_bucket b(rgw_bucket_key(tenant_name, bucket_name));
op_ret = 0;
if ((bucket_name.empty() && bucket_instance.empty()) ||
end_marker.empty()) {
ldpp_dout(this, 5) << "ERROR: one of bucket or bucket instance, and also end-marker is mandatory" << dendl;
op_ret = -EINVAL;
return;
}
string err;
uint64_t gen = 0;
if (gen_specified) {
gen = strict_strtoll(gen_str.c_str(), 10, &err);
if (!err.empty()) {
ldpp_dout(s, 5) << "Error parsing generation param " << gen_str << dendl;
op_ret = -EINVAL;
return;
}
}
int shard_id;
string bn;
op_ret = rgw_bucket_parse_bucket_instance(bucket_instance, &bn, &bucket_instance, &shard_id);
if (op_ret < 0) {
return;
}
if (!bucket_instance.empty()) {
b.name = bn;
b.bucket_id = bucket_instance;
}
op_ret = driver->get_bucket(s, nullptr, b, &bucket, y);
if (op_ret < 0) {
ldpp_dout(this, 5) << "could not get bucket info for bucket=" << bucket_name << dendl;
return;
}
op_ret = bilog_trim(this, static_cast<rgw::sal::RadosStore*>(driver),
bucket->get_info(), gen, shard_id,
start_marker, end_marker);
if (op_ret < 0) {
ldpp_dout(s, 5) << "bilog_trim failed with op_ret=" << op_ret << dendl;
}
return;
}
void RGWOp_DATALog_List::execute(optional_yield y) {
string shard = s->info.args.get("id");
string max_entries_str = s->info.args.get("max-entries"),
marker = s->info.args.get("marker"),
err;
unsigned shard_id, max_entries = LOG_CLASS_LIST_MAX_ENTRIES;
if (s->info.args.exists("start-time") ||
s->info.args.exists("end-time")) {
ldpp_dout(this, 5) << "start-time and end-time are no longer accepted" << dendl;
op_ret = -EINVAL;
}
s->info.args.get_bool("extra-info", &extra_info, false);
shard_id = (unsigned)strict_strtol(shard.c_str(), 10, &err);
if (!err.empty()) {
ldpp_dout(this, 5) << "Error parsing shard_id " << shard << dendl;
op_ret = -EINVAL;
return;
}
if (!max_entries_str.empty()) {
max_entries = (unsigned)strict_strtol(max_entries_str.c_str(), 10, &err);
if (!err.empty()) {
ldpp_dout(this, 5) << "Error parsing max-entries " << max_entries_str << dendl;
op_ret = -EINVAL;
return;
}
if (max_entries > LOG_CLASS_LIST_MAX_ENTRIES) {
max_entries = LOG_CLASS_LIST_MAX_ENTRIES;
}
}
// Note that last_marker is updated to be the marker of the last
// entry listed
op_ret = static_cast<rgw::sal::RadosStore*>(driver)->svc()->
datalog_rados->list_entries(this, shard_id, max_entries, entries,
marker, &last_marker, &truncated, y);
}
void RGWOp_DATALog_List::send_response() {
set_req_state_err(s, op_ret);
dump_errno(s);
end_header(s);
if (op_ret < 0)
return;
s->formatter->open_object_section("log_entries");
s->formatter->dump_string("marker", last_marker);
s->formatter->dump_bool("truncated", truncated);
{
s->formatter->open_array_section("entries");
for (const auto& entry : entries) {
if (!extra_info) {
encode_json("entry", entry.entry, s->formatter);
} else {
encode_json("entry", entry, s->formatter);
}
flusher.flush();
}
s->formatter->close_section();
}
s->formatter->close_section();
flusher.flush();
}
void RGWOp_DATALog_Info::execute(optional_yield y) {
num_objects = s->cct->_conf->rgw_data_log_num_shards;
op_ret = 0;
}
void RGWOp_DATALog_Info::send_response() {
set_req_state_err(s, op_ret);
dump_errno(s);
end_header(s);
s->formatter->open_object_section("num_objects");
s->formatter->dump_unsigned("num_objects", num_objects);
s->formatter->close_section();
flusher.flush();
}
void RGWOp_DATALog_ShardInfo::execute(optional_yield y) {
string shard = s->info.args.get("id");
string err;
unsigned shard_id = (unsigned)strict_strtol(shard.c_str(), 10, &err);
if (!err.empty()) {
ldpp_dout(this, 5) << "Error parsing shard_id " << shard << dendl;
op_ret = -EINVAL;
return;
}
op_ret = static_cast<rgw::sal::RadosStore*>(driver)->svc()->
datalog_rados->get_info(this, shard_id, &info, y);
}
void RGWOp_DATALog_ShardInfo::send_response() {
set_req_state_err(s, op_ret);
dump_errno(s);
end_header(s);
encode_json("info", info, s->formatter);
flusher.flush();
}
void RGWOp_DATALog_Notify::execute(optional_yield y) {
string source_zone = s->info.args.get("source-zone");
#define LARGE_ENOUGH_BUF (128 * 1024)
int r = 0;
bufferlist data;
std::tie(r, data) = read_all_input(s, LARGE_ENOUGH_BUF);
if (r < 0) {
op_ret = r;
return;
}
char* buf = data.c_str();
ldpp_dout(this, 20) << __func__ << "(): read data: " << buf << dendl;
JSONParser p;
r = p.parse(buf, data.length());
if (r < 0) {
ldpp_dout(this, 0) << "ERROR: failed to parse JSON" << dendl;
op_ret = r;
return;
}
bc::flat_map<int, bc::flat_set<rgw_data_notify_entry>> updated_shards;
try {
auto decoder = rgw_data_notify_v1_decoder{updated_shards};
decode_json_obj(decoder, &p);
} catch (JSONDecoder::err& err) {
ldpp_dout(this, 0) << "ERROR: failed to decode JSON" << dendl;
op_ret = -EINVAL;
return;
}
if (driver->ctx()->_conf->subsys.should_gather<ceph_subsys_rgw, 20>()) {
for (bc::flat_map<int, bc::flat_set<rgw_data_notify_entry> >::iterator iter = updated_shards.begin(); iter != updated_shards.end(); ++iter) {
ldpp_dout(this, 20) << __func__ << "(): updated shard=" << iter->first << dendl;
bc::flat_set<rgw_data_notify_entry>& entries = iter->second;
for (const auto& [key, gen] : entries) {
ldpp_dout(this, 20) << __func__ << "(): modified key=" << key
<< " of gen=" << gen << dendl;
}
}
}
driver->wakeup_data_sync_shards(this, source_zone, updated_shards);
op_ret = 0;
}
void RGWOp_DATALog_Notify2::execute(optional_yield y) {
string source_zone = s->info.args.get("source-zone");
#define LARGE_ENOUGH_BUF (128 * 1024)
int r = 0;
bufferlist data;
std::tie(r, data) = rgw_rest_read_all_input(s, LARGE_ENOUGH_BUF);
if (r < 0) {
op_ret = r;
return;
}
char* buf = data.c_str();
ldout(s->cct, 20) << __func__ << "(): read data: " << buf << dendl;
JSONParser p;
r = p.parse(buf, data.length());
if (r < 0) {
ldout(s->cct, 0) << "ERROR: failed to parse JSON" << dendl;
op_ret = r;
return;
}
bc::flat_map<int, bc::flat_set<rgw_data_notify_entry> > updated_shards;
try {
decode_json_obj(updated_shards, &p);
} catch (JSONDecoder::err& err) {
ldpp_dout(this, 0) << "ERROR: failed to decode JSON" << dendl;
op_ret = -EINVAL;
return;
}
if (driver->ctx()->_conf->subsys.should_gather<ceph_subsys_rgw, 20>()) {
for (bc::flat_map<int, bc::flat_set<rgw_data_notify_entry> >::iterator iter =
updated_shards.begin(); iter != updated_shards.end(); ++iter) {
ldpp_dout(this, 20) << __func__ << "(): updated shard=" << iter->first << dendl;
bc::flat_set<rgw_data_notify_entry>& entries = iter->second;
for (const auto& [key, gen] : entries) {
ldpp_dout(this, 20) << __func__ << "(): modified key=" << key <<
" of generation=" << gen << dendl;
}
}
}
driver->wakeup_data_sync_shards(this, source_zone, updated_shards);
op_ret = 0;
}
void RGWOp_DATALog_Delete::execute(optional_yield y) {
string marker = s->info.args.get("marker"),
shard = s->info.args.get("id"),
err;
unsigned shard_id;
op_ret = 0;
if (s->info.args.exists("start-time") ||
s->info.args.exists("end-time")) {
ldpp_dout(this, 5) << "start-time and end-time are no longer accepted" << dendl;
op_ret = -EINVAL;
}
if (s->info.args.exists("start-marker")) {
ldpp_dout(this, 5) << "start-marker is no longer accepted" << dendl;
op_ret = -EINVAL;
}
if (s->info.args.exists("end-marker")) {
if (!s->info.args.exists("marker")) {
marker = s->info.args.get("end-marker");
} else {
ldpp_dout(this, 5) << "end-marker and marker cannot both be provided" << dendl;
op_ret = -EINVAL;
}
}
shard_id = (unsigned)strict_strtol(shard.c_str(), 10, &err);
if (!err.empty()) {
ldpp_dout(this, 5) << "Error parsing shard_id " << shard << dendl;
op_ret = -EINVAL;
return;
}
if (marker.empty()) { /* bounding end */
op_ret = -EINVAL;
return;
}
op_ret = static_cast<rgw::sal::RadosStore*>(driver)->svc()->
datalog_rados->trim_entries(this, shard_id, marker, y);
}
// not in header to avoid pulling in rgw_sync.h
class RGWOp_MDLog_Status : public RGWRESTOp {
rgw_meta_sync_status status;
public:
int check_caps(const RGWUserCaps& caps) override {
return caps.check_cap("mdlog", RGW_CAP_READ);
}
int verify_permission(optional_yield) override {
return check_caps(s->user->get_caps());
}
void execute(optional_yield y) override;
void send_response() override;
const char* name() const override { return "get_metadata_log_status"; }
};
void RGWOp_MDLog_Status::execute(optional_yield y)
{
auto sync = static_cast<rgw::sal::RadosStore*>(driver)->getRados()->get_meta_sync_manager();
if (sync == nullptr) {
ldpp_dout(this, 1) << "no sync manager" << dendl;
op_ret = -ENOENT;
return;
}
op_ret = sync->read_sync_status(this, &status);
}
void RGWOp_MDLog_Status::send_response()
{
set_req_state_err(s, op_ret);
dump_errno(s);
end_header(s);
if (op_ret >= 0) {
encode_json("status", status, s->formatter);
}
flusher.flush();
}
// not in header to avoid pulling in rgw_data_sync.h
class RGWOp_BILog_Status : public RGWRESTOp {
bilog_status_v2 status;
int version = 1;
public:
int check_caps(const RGWUserCaps& caps) override {
return caps.check_cap("bilog", RGW_CAP_READ);
}
int verify_permission(optional_yield y) override {
return check_caps(s->user->get_caps());
}
void execute(optional_yield y) override;
void send_response() override;
const char* name() const override { return "get_bucket_index_log_status"; }
};
void RGWOp_BILog_Status::execute(optional_yield y)
{
const auto options = s->info.args.get("options");
bool merge = (options == "merge");
const auto source_zone = s->info.args.get("source-zone");
const auto source_key = s->info.args.get("source-bucket");
auto key = s->info.args.get("bucket");
op_ret = s->info.args.get_int("version", &version, 1);
if (key.empty()) {
key = source_key;
}
if (key.empty()) {
ldpp_dout(this, 4) << "no 'bucket' provided" << dendl;
op_ret = -EINVAL;
return;
}
rgw_bucket b;
int shard_id{-1}; // unused
op_ret = rgw_bucket_parse_bucket_key(s->cct, key, &b, &shard_id);
if (op_ret < 0) {
ldpp_dout(this, 4) << "invalid 'bucket' provided" << dendl;
op_ret = -EINVAL;
return;
}
// read the bucket instance info for num_shards
std::unique_ptr<rgw::sal::Bucket> bucket;
op_ret = driver->get_bucket(s, nullptr, b, &bucket, y);
if (op_ret < 0) {
ldpp_dout(this, 4) << "failed to read bucket info: " << cpp_strerror(op_ret) << dendl;
return;
}
rgw_bucket source_bucket;
if (source_key.empty() ||
source_key == key) {
source_bucket = bucket->get_key();
} else {
op_ret = rgw_bucket_parse_bucket_key(s->cct, source_key, &source_bucket, nullptr);
if (op_ret < 0) {
ldpp_dout(this, 4) << "invalid 'source-bucket' provided (key=" << source_key << ")" << dendl;
return;
}
}
const auto& local_zone_id = driver->get_zone()->get_id();
if (!merge) {
rgw_sync_bucket_pipe pipe;
pipe.source.zone = source_zone;
pipe.source.bucket = source_bucket;
pipe.dest.zone = local_zone_id;
pipe.dest.bucket = bucket->get_key();
ldpp_dout(this, 20) << "RGWOp_BILog_Status::execute(optional_yield y): getting sync status for pipe=" << pipe << dendl;
op_ret = rgw_read_bucket_full_sync_status(
this,
static_cast<rgw::sal::RadosStore*>(driver),
pipe,
&status.sync_status,
s->yield);
if (op_ret < 0) {
ldpp_dout(this, -1) << "ERROR: rgw_read_bucket_full_sync_status() on pipe=" << pipe << " returned ret=" << op_ret << dendl;
return;
}
status.inc_status.resize(status.sync_status.shards_done_with_gen.size());
op_ret = rgw_read_bucket_inc_sync_status(
this,
static_cast<rgw::sal::RadosStore*>(driver),
pipe,
status.sync_status.incremental_gen,
&status.inc_status);
if (op_ret < 0) {
ldpp_dout(this, -1) << "ERROR: rgw_read_bucket_inc_sync_status() on pipe=" << pipe << " returned ret=" << op_ret << dendl;
}
return;
}
rgw_zone_id source_zone_id(source_zone);
RGWBucketSyncPolicyHandlerRef source_handler;
op_ret = driver->get_sync_policy_handler(s, source_zone_id, source_bucket, &source_handler, y);
if (op_ret < 0) {
ldpp_dout(this, -1) << "could not get bucket sync policy handler (r=" << op_ret << ")" << dendl;
return;
}
auto local_dests = source_handler->get_all_dests_in_zone(local_zone_id);
std::vector<rgw_bucket_shard_sync_info> current_status;
for (auto& entry : local_dests) {
auto pipe = entry.second;
ldpp_dout(this, 20) << "RGWOp_BILog_Status::execute(optional_yield y): getting sync status for pipe=" << pipe << dendl;
RGWBucketInfo *pinfo = &bucket->get_info();
std::optional<RGWBucketInfo> opt_dest_info;
if (!pipe.dest.bucket) {
/* Uh oh, something went wrong */
ldpp_dout(this, 20) << "ERROR: RGWOp_BILog_Status::execute(optional_yield y): BUG: pipe.dest.bucket was not initialized" << pipe << dendl;
op_ret = -EIO;
return;
}
if (*pipe.dest.bucket != pinfo->bucket) {
opt_dest_info.emplace();
std::unique_ptr<rgw::sal::Bucket> dest_bucket;
op_ret = driver->get_bucket(s, nullptr, *pipe.dest.bucket, &dest_bucket, y);
if (op_ret < 0) {
ldpp_dout(this, 4) << "failed to read target bucket info (bucket=: " << cpp_strerror(op_ret) << dendl;
return;
}
*opt_dest_info = dest_bucket->get_info();
pinfo = &(*opt_dest_info);
pipe.dest.bucket = pinfo->bucket;
}
op_ret = rgw_read_bucket_full_sync_status(
this,
static_cast<rgw::sal::RadosStore*>(driver),
pipe,
&status.sync_status,
s->yield);
if (op_ret < 0) {
ldpp_dout(this, -1) << "ERROR: rgw_read_bucket_full_sync_status() on pipe=" << pipe << " returned ret=" << op_ret << dendl;
return;
}
current_status.resize(status.sync_status.shards_done_with_gen.size());
int r = rgw_read_bucket_inc_sync_status(this, static_cast<rgw::sal::RadosStore*>(driver),
pipe, status.sync_status.incremental_gen, ¤t_status);
if (r < 0) {
ldpp_dout(this, -1) << "ERROR: rgw_read_bucket_inc_sync_status() on pipe=" << pipe << " returned ret=" << r << dendl;
op_ret = r;
return;
}
if (status.inc_status.empty()) {
status.inc_status = std::move(current_status);
} else {
if (current_status.size() != status.inc_status.size()) {
op_ret = -EINVAL;
ldpp_dout(this, -1) << "ERROR: different number of shards for sync status of buckets "
"syncing from the same source: status.size()= "
<< status.inc_status.size()
<< " current_status.size()="
<< current_status.size() << dendl;
return;
}
auto m = status.inc_status.begin();
for (auto& cur_shard_status : current_status) {
auto& result_shard_status = *m++;
// always take the first marker, or any later marker that's smaller
if (cur_shard_status.inc_marker.position < result_shard_status.inc_marker.position) {
result_shard_status = std::move(cur_shard_status);
}
}
}
}
}
void RGWOp_BILog_Status::send_response()
{
set_req_state_err(s, op_ret);
dump_errno(s);
end_header(s);
if (op_ret >= 0) {
if (version < 2) {
encode_json("status", status.inc_status, s->formatter);
} else {
encode_json("status", status, s->formatter);
}
}
flusher.flush();
}
// not in header to avoid pulling in rgw_data_sync.h
class RGWOp_DATALog_Status : public RGWRESTOp {
rgw_data_sync_status status;
public:
int check_caps(const RGWUserCaps& caps) override {
return caps.check_cap("datalog", RGW_CAP_READ);
}
int verify_permission(optional_yield y) override {
return check_caps(s->user->get_caps());
}
void execute(optional_yield y) override ;
void send_response() override;
const char* name() const override { return "get_data_changes_log_status"; }
};
void RGWOp_DATALog_Status::execute(optional_yield y)
{
const auto source_zone = s->info.args.get("source-zone");
auto sync = driver->get_data_sync_manager(source_zone);
if (sync == nullptr) {
ldpp_dout(this, 1) << "no sync manager for source-zone " << source_zone << dendl;
op_ret = -ENOENT;
return;
}
op_ret = sync->read_sync_status(this, &status);
}
void RGWOp_DATALog_Status::send_response()
{
set_req_state_err(s, op_ret);
dump_errno(s);
end_header(s);
if (op_ret >= 0) {
encode_json("status", status, s->formatter);
}
flusher.flush();
}
RGWOp *RGWHandler_Log::op_get() {
bool exists;
string type = s->info.args.get("type", &exists);
if (!exists) {
return NULL;
}
if (type.compare("metadata") == 0) {
if (s->info.args.exists("id")) {
if (s->info.args.exists("info")) {
return new RGWOp_MDLog_ShardInfo;
} else {
return new RGWOp_MDLog_List;
}
} else if (s->info.args.exists("status")) {
return new RGWOp_MDLog_Status;
} else {
return new RGWOp_MDLog_Info;
}
} else if (type.compare("bucket-index") == 0) {
if (s->info.args.exists("info")) {
return new RGWOp_BILog_Info;
} else if (s->info.args.exists("status")) {
return new RGWOp_BILog_Status;
} else {
return new RGWOp_BILog_List;
}
} else if (type.compare("data") == 0) {
if (s->info.args.exists("id")) {
if (s->info.args.exists("info")) {
return new RGWOp_DATALog_ShardInfo;
} else {
return new RGWOp_DATALog_List;
}
} else if (s->info.args.exists("status")) {
return new RGWOp_DATALog_Status;
} else {
return new RGWOp_DATALog_Info;
}
}
return NULL;
}
RGWOp *RGWHandler_Log::op_delete() {
bool exists;
string type = s->info.args.get("type", &exists);
if (!exists) {
return NULL;
}
if (type.compare("metadata") == 0)
return new RGWOp_MDLog_Delete;
else if (type.compare("bucket-index") == 0)
return new RGWOp_BILog_Delete;
else if (type.compare("data") == 0)
return new RGWOp_DATALog_Delete;
return NULL;
}
RGWOp *RGWHandler_Log::op_post() {
bool exists;
string type = s->info.args.get("type", &exists);
if (!exists) {
return NULL;
}
if (type.compare("metadata") == 0) {
if (s->info.args.exists("lock"))
return new RGWOp_MDLog_Lock;
else if (s->info.args.exists("unlock"))
return new RGWOp_MDLog_Unlock;
else if (s->info.args.exists("notify"))
return new RGWOp_MDLog_Notify;
} else if (type.compare("data") == 0) {
if (s->info.args.exists("notify")) {
return new RGWOp_DATALog_Notify;
} else if (s->info.args.exists("notify2")) {
return new RGWOp_DATALog_Notify2;
}
}
return NULL;
}
| 37,272 | 28.371946 | 155 |
cc
|
null |
ceph-main/src/rgw/driver/rados/rgw_rest_log.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab ft=cpp
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2013 eNovance SAS <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#pragma once
#include "rgw_datalog.h"
#include "rgw_rest.h"
#include "rgw_rest_s3.h"
#include "rgw_metadata.h"
#include "rgw_mdlog.h"
#include "rgw_data_sync.h"
class RGWOp_BILog_List : public RGWRESTOp {
bool sent_header;
uint32_t format_ver{0};
bool truncated{false};
std::optional<rgw::bucket_log_layout_generation> next_log_layout;
public:
RGWOp_BILog_List() : sent_header(false) {}
~RGWOp_BILog_List() override {}
int check_caps(const RGWUserCaps& caps) override {
return caps.check_cap("bilog", RGW_CAP_READ);
}
int verify_permission(optional_yield y) override {
return check_caps(s->user->get_caps());
}
void send_response() override;
virtual void send_response(std::list<rgw_bi_log_entry>& entries, std::string& marker);
virtual void send_response_end();
void execute(optional_yield y) override;
const char* name() const override {
return "list_bucket_index_log";
}
};
class RGWOp_BILog_Info : public RGWRESTOp {
std::string bucket_ver;
std::string master_ver;
std::string max_marker;
bool syncstopped;
uint64_t oldest_gen = 0;
uint64_t latest_gen = 0;
std::vector<store_gen_shards> generations;
public:
RGWOp_BILog_Info() : bucket_ver(), master_ver(), syncstopped(false) {}
~RGWOp_BILog_Info() override {}
int check_caps(const RGWUserCaps& caps) override {
return caps.check_cap("bilog", RGW_CAP_READ);
}
int verify_permission(optional_yield y) override {
return check_caps(s->user->get_caps());
}
void send_response() override;
void execute(optional_yield y) override;
const char* name() const override {
return "bucket_index_log_info";
}
};
class RGWOp_BILog_Delete : public RGWRESTOp {
public:
RGWOp_BILog_Delete() {}
~RGWOp_BILog_Delete() override {}
int check_caps(const RGWUserCaps& caps) override {
return caps.check_cap("bilog", RGW_CAP_WRITE);
}
void execute(optional_yield y) override;
const char* name() const override {
return "trim_bucket_index_log";
}
};
class RGWOp_MDLog_List : public RGWRESTOp {
std::list<cls_log_entry> entries;
std::string last_marker;
bool truncated;
public:
RGWOp_MDLog_List() : truncated(false) {}
~RGWOp_MDLog_List() override {}
int check_caps(const RGWUserCaps& caps) override {
return caps.check_cap("mdlog", RGW_CAP_READ);
}
int verify_permission(optional_yield y) override {
return check_caps(s->user->get_caps());
}
void execute(optional_yield y) override;
void send_response() override;
const char* name() const override {
return "list_metadata_log";
}
};
class RGWOp_MDLog_Info : public RGWRESTOp {
unsigned num_objects;
RGWPeriodHistory::Cursor period;
public:
RGWOp_MDLog_Info() : num_objects(0) {}
~RGWOp_MDLog_Info() override {}
int check_caps(const RGWUserCaps& caps) override {
return caps.check_cap("mdlog", RGW_CAP_READ);
}
int verify_permission(optional_yield y) override {
return check_caps(s->user->get_caps());
}
void execute(optional_yield y) override;
void send_response() override;
const char* name() const override {
return "get_metadata_log_info";
}
};
class RGWOp_MDLog_ShardInfo : public RGWRESTOp {
RGWMetadataLogInfo info;
public:
RGWOp_MDLog_ShardInfo() {}
~RGWOp_MDLog_ShardInfo() override {}
int check_caps(const RGWUserCaps& caps) override {
return caps.check_cap("mdlog", RGW_CAP_READ);
}
int verify_permission(optional_yield y) override {
return check_caps(s->user->get_caps());
}
void execute(optional_yield y) override;
void send_response() override;
const char* name() const override {
return "get_metadata_log_shard_info";
}
};
class RGWOp_MDLog_Lock : public RGWRESTOp {
public:
RGWOp_MDLog_Lock() {}
~RGWOp_MDLog_Lock() override {}
int check_caps(const RGWUserCaps& caps) override {
return caps.check_cap("mdlog", RGW_CAP_WRITE);
}
void execute(optional_yield y) override;
const char* name() const override {
return "lock_mdlog_object";
}
};
class RGWOp_MDLog_Unlock : public RGWRESTOp {
public:
RGWOp_MDLog_Unlock() {}
~RGWOp_MDLog_Unlock() override {}
int check_caps(const RGWUserCaps& caps) override {
return caps.check_cap("mdlog", RGW_CAP_WRITE);
}
void execute(optional_yield y) override;
const char* name() const override {
return "unlock_mdlog_object";
}
};
class RGWOp_MDLog_Notify : public RGWRESTOp {
public:
RGWOp_MDLog_Notify() {}
~RGWOp_MDLog_Notify() override {}
int check_caps(const RGWUserCaps& caps) override {
return caps.check_cap("mdlog", RGW_CAP_WRITE);
}
void execute(optional_yield y) override;
const char* name() const override {
return "mdlog_notify";
}
RGWOpType get_type() override { return RGW_OP_SYNC_MDLOG_NOTIFY; }
};
class RGWOp_MDLog_Delete : public RGWRESTOp {
public:
RGWOp_MDLog_Delete() {}
~RGWOp_MDLog_Delete() override {}
int check_caps(const RGWUserCaps& caps) override {
return caps.check_cap("mdlog", RGW_CAP_WRITE);
}
void execute(optional_yield y) override;
const char* name() const override {
return "trim_metadata_log";
}
};
class RGWOp_DATALog_List : public RGWRESTOp {
std::vector<rgw_data_change_log_entry> entries;
std::string last_marker;
bool truncated;
bool extra_info;
public:
RGWOp_DATALog_List() : truncated(false), extra_info(false) {}
~RGWOp_DATALog_List() override {}
int check_caps(const RGWUserCaps& caps) override {
return caps.check_cap("datalog", RGW_CAP_READ);
}
int verify_permission(optional_yield y) override {
return check_caps(s->user->get_caps());
}
void execute(optional_yield y) override;
void send_response() override;
const char* name() const override {
return "list_data_changes_log";
}
};
class RGWOp_DATALog_Info : public RGWRESTOp {
unsigned num_objects;
public:
RGWOp_DATALog_Info() : num_objects(0) {}
~RGWOp_DATALog_Info() override {}
int check_caps(const RGWUserCaps& caps) override {
return caps.check_cap("datalog", RGW_CAP_READ);
}
int verify_permission(optional_yield y) override {
return check_caps(s->user->get_caps());
}
void execute(optional_yield y) override;
void send_response() override;
const char* name() const override {
return "get_data_changes_log_info";
}
};
class RGWOp_DATALog_ShardInfo : public RGWRESTOp {
RGWDataChangesLogInfo info;
public:
RGWOp_DATALog_ShardInfo() {}
~RGWOp_DATALog_ShardInfo() override {}
int check_caps(const RGWUserCaps& caps) override {
return caps.check_cap("datalog", RGW_CAP_READ);
}
int verify_permission(optional_yield y) override {
return check_caps(s->user->get_caps());
}
void execute(optional_yield y) override;
void send_response() override;
const char* name() const override {
return "get_data_changes_log_shard_info";
}
};
class RGWOp_DATALog_Notify : public RGWRESTOp {
public:
RGWOp_DATALog_Notify() {}
~RGWOp_DATALog_Notify() override {}
int check_caps(const RGWUserCaps& caps) override {
return caps.check_cap("datalog", RGW_CAP_WRITE);
}
void execute(optional_yield y) override;
const char* name() const override {
return "datalog_notify";
}
RGWOpType get_type() override { return RGW_OP_SYNC_DATALOG_NOTIFY; }
};
class RGWOp_DATALog_Notify2 : public RGWRESTOp {
rgw_data_notify_entry data_notify;
public:
RGWOp_DATALog_Notify2() {}
~RGWOp_DATALog_Notify2() override {}
int check_caps(const RGWUserCaps& caps) override {
return caps.check_cap("datalog", RGW_CAP_WRITE);
}
void execute(optional_yield y) override;
const char* name() const override {
return "datalog_notify2";
}
RGWOpType get_type() override { return RGW_OP_SYNC_DATALOG_NOTIFY2; }
};
class RGWOp_DATALog_Delete : public RGWRESTOp {
public:
RGWOp_DATALog_Delete() {}
~RGWOp_DATALog_Delete() override {}
int check_caps(const RGWUserCaps& caps) override {
return caps.check_cap("datalog", RGW_CAP_WRITE);
}
void execute(optional_yield y) override;
const char* name() const override {
return "trim_data_changes_log";
}
};
class RGWHandler_Log : public RGWHandler_Auth_S3 {
protected:
RGWOp *op_get() override;
RGWOp *op_delete() override;
RGWOp *op_post() override;
int read_permissions(RGWOp*, optional_yield) override {
return 0;
}
public:
using RGWHandler_Auth_S3::RGWHandler_Auth_S3;
~RGWHandler_Log() override = default;
};
class RGWRESTMgr_Log : public RGWRESTMgr {
public:
RGWRESTMgr_Log() = default;
~RGWRESTMgr_Log() override = default;
RGWHandler_REST* get_handler(rgw::sal::Driver* driver,
req_state* const,
const rgw::auth::StrategyRegistry& auth_registry,
const std::string& frontend_prefixs) override {
return new RGWHandler_Log(auth_registry);
}
};
| 9,229 | 26.307692 | 88 |
h
|
null |
ceph-main/src/rgw/driver/rados/rgw_rest_pubsub.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include "rgw_rest_s3.h"
// s3 compliant notification handler factory
class RGWHandler_REST_PSNotifs_S3 : public RGWHandler_REST_S3 {
protected:
int init_permissions(RGWOp* op, optional_yield y) override {return 0;}
int read_permissions(RGWOp* op, optional_yield y) override {return 0;}
bool supports_quota() override {return false;}
RGWOp* op_get() override;
RGWOp* op_put() override;
RGWOp* op_delete() override;
public:
using RGWHandler_REST_S3::RGWHandler_REST_S3;
virtual ~RGWHandler_REST_PSNotifs_S3() = default;
// following are used to generate the operations when invoked by another REST handler
static RGWOp* create_get_op();
static RGWOp* create_put_op();
static RGWOp* create_delete_op();
};
// AWS compliant topics handler factory
class RGWHandler_REST_PSTopic_AWS : public RGWHandler_REST {
const rgw::auth::StrategyRegistry& auth_registry;
protected:
RGWOp* op_post() override;
public:
RGWHandler_REST_PSTopic_AWS(const rgw::auth::StrategyRegistry& _auth_registry) :
auth_registry(_auth_registry) {}
virtual ~RGWHandler_REST_PSTopic_AWS() = default;
int postauth_init(optional_yield) override { return 0; }
int authorize(const DoutPrefixProvider* dpp, optional_yield y) override;
static bool action_exists(const req_state* s);
};
| 1,405 | 35.051282 | 87 |
h
|
null |
ceph-main/src/rgw/driver/rados/rgw_rest_realm.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab ft=cpp
#include "common/errno.h"
#include "rgw_rest_realm.h"
#include "rgw_rest_s3.h"
#include "rgw_rest_config.h"
#include "rgw_zone.h"
#include "rgw_sal_rados.h"
#include "services/svc_zone.h"
#include "services/svc_mdlog.h"
#include "include/ceph_assert.h"
#define dout_subsys ceph_subsys_rgw
using namespace std;
// reject 'period push' if we would have to fetch too many intermediate periods
static const uint32_t PERIOD_HISTORY_FETCH_MAX = 64;
// base period op, shared between Get and Post
class RGWOp_Period_Base : public RGWRESTOp {
protected:
RGWPeriod period;
std::ostringstream error_stream;
public:
int verify_permission(optional_yield) override { return 0; }
void send_response() override;
};
// reply with the period object on success
void RGWOp_Period_Base::send_response()
{
set_req_state_err(s, op_ret, error_stream.str());
dump_errno(s);
if (op_ret < 0) {
if (!s->err.message.empty()) {
ldpp_dout(this, 4) << "Request failed with " << op_ret
<< ": " << s->err.message << dendl;
}
end_header(s);
return;
}
encode_json("period", period, s->formatter);
end_header(s, NULL, "application/json", s->formatter->get_len());
flusher.flush();
}
// GET /admin/realm/period
class RGWOp_Period_Get : public RGWOp_Period_Base {
public:
void execute(optional_yield y) override;
int check_caps(const RGWUserCaps& caps) override {
return caps.check_cap("zone", RGW_CAP_READ);
}
int verify_permission(optional_yield) override {
return check_caps(s->user->get_caps());
}
const char* name() const override { return "get_period"; }
};
void RGWOp_Period_Get::execute(optional_yield y)
{
string realm_id, realm_name, period_id;
epoch_t epoch = 0;
RESTArgs::get_string(s, "realm_id", realm_id, &realm_id);
RESTArgs::get_string(s, "realm_name", realm_name, &realm_name);
RESTArgs::get_string(s, "period_id", period_id, &period_id);
RESTArgs::get_uint32(s, "epoch", 0, &epoch);
period.set_id(period_id);
period.set_epoch(epoch);
op_ret = period.init(this, driver->ctx(), static_cast<rgw::sal::RadosStore*>(driver)->svc()->sysobj, realm_id, y, realm_name);
if (op_ret < 0)
ldpp_dout(this, 5) << "failed to read period" << dendl;
}
// POST /admin/realm/period
class RGWOp_Period_Post : public RGWOp_Period_Base {
public:
void execute(optional_yield y) override;
int check_caps(const RGWUserCaps& caps) override {
return caps.check_cap("zone", RGW_CAP_WRITE);
}
int verify_permission(optional_yield) override {
return check_caps(s->user->get_caps());
}
const char* name() const override { return "post_period"; }
RGWOpType get_type() override { return RGW_OP_PERIOD_POST; }
};
void RGWOp_Period_Post::execute(optional_yield y)
{
auto cct = driver->ctx();
// initialize the period without reading from rados
period.init(this, cct, static_cast<rgw::sal::RadosStore*>(driver)->svc()->sysobj, y, false);
// decode the period from input
const auto max_size = cct->_conf->rgw_max_put_param_size;
bool empty;
op_ret = get_json_input(cct, s, period, max_size, &empty);
if (op_ret < 0) {
ldpp_dout(this, -1) << "failed to decode period" << dendl;
return;
}
// require period.realm_id to match our realm
if (period.get_realm() != static_cast<rgw::sal::RadosStore*>(driver)->svc()->zone->get_realm().get_id()) {
error_stream << "period with realm id " << period.get_realm()
<< " doesn't match current realm " << static_cast<rgw::sal::RadosStore*>(driver)->svc()->zone->get_realm().get_id() << std::endl;
op_ret = -EINVAL;
return;
}
// load the realm and current period from rados; there may be a more recent
// period that we haven't restarted with yet. we also don't want to modify
// the objects in use by RGWRados
RGWRealm realm(period.get_realm());
op_ret = realm.init(this, cct, static_cast<rgw::sal::RadosStore*>(driver)->svc()->sysobj, y);
if (op_ret < 0) {
ldpp_dout(this, -1) << "failed to read current realm: "
<< cpp_strerror(-op_ret) << dendl;
return;
}
RGWPeriod current_period;
op_ret = current_period.init(this, cct, static_cast<rgw::sal::RadosStore*>(driver)->svc()->sysobj, realm.get_id(), y);
if (op_ret < 0) {
ldpp_dout(this, -1) << "failed to read current period: "
<< cpp_strerror(-op_ret) << dendl;
return;
}
// if period id is empty, handle as 'period commit'
if (period.get_id().empty()) {
op_ret = period.commit(this, driver, realm, current_period, error_stream, y);
if (op_ret < 0) {
ldpp_dout(this, -1) << "master zone failed to commit period" << dendl;
}
return;
}
// if it's not period commit, nobody is allowed to push to the master zone
if (period.get_master_zone() == static_cast<rgw::sal::RadosStore*>(driver)->svc()->zone->get_zone_params().get_id()) {
ldpp_dout(this, 10) << "master zone rejecting period id="
<< period.get_id() << " epoch=" << period.get_epoch() << dendl;
op_ret = -EINVAL; // XXX: error code
return;
}
// write the period to rados
op_ret = period.store_info(this, false, y);
if (op_ret < 0) {
ldpp_dout(this, -1) << "failed to store period " << period.get_id() << dendl;
return;
}
// set as latest epoch
op_ret = period.update_latest_epoch(this, period.get_epoch(), y);
if (op_ret == -EEXIST) {
// already have this epoch (or a more recent one)
ldpp_dout(this, 4) << "already have epoch >= " << period.get_epoch()
<< " for period " << period.get_id() << dendl;
op_ret = 0;
return;
}
if (op_ret < 0) {
ldpp_dout(this, -1) << "failed to set latest epoch" << dendl;
return;
}
auto period_history = static_cast<rgw::sal::RadosStore*>(driver)->svc()->mdlog->get_period_history();
// decide whether we can set_current_period() or set_latest_epoch()
if (period.get_id() != current_period.get_id()) {
auto current_epoch = current_period.get_realm_epoch();
// discard periods in the past
if (period.get_realm_epoch() < current_epoch) {
ldpp_dout(this, 10) << "discarding period " << period.get_id()
<< " with realm epoch " << period.get_realm_epoch()
<< " older than current epoch " << current_epoch << dendl;
// return success to ack that we have this period
return;
}
// discard periods too far in the future
if (period.get_realm_epoch() > current_epoch + PERIOD_HISTORY_FETCH_MAX) {
ldpp_dout(this, -1) << "discarding period " << period.get_id()
<< " with realm epoch " << period.get_realm_epoch() << " too far in "
"the future from current epoch " << current_epoch << dendl;
op_ret = -ENOENT; // XXX: error code
return;
}
// attach a copy of the period into the period history
auto cursor = period_history->attach(this, RGWPeriod{period}, y);
if (!cursor) {
// we're missing some history between the new period and current_period
op_ret = cursor.get_error();
ldpp_dout(this, -1) << "failed to collect the periods between current period "
<< current_period.get_id() << " (realm epoch " << current_epoch
<< ") and the new period " << period.get_id()
<< " (realm epoch " << period.get_realm_epoch()
<< "): " << cpp_strerror(-op_ret) << dendl;
return;
}
if (cursor.has_next()) {
// don't switch if we have a newer period in our history
ldpp_dout(this, 4) << "attached period " << period.get_id()
<< " to history, but the history contains newer periods" << dendl;
return;
}
// set as current period
op_ret = realm.set_current_period(this, period, y);
if (op_ret < 0) {
ldpp_dout(this, -1) << "failed to update realm's current period" << dendl;
return;
}
ldpp_dout(this, 4) << "period " << period.get_id()
<< " is newer than current period " << current_period.get_id()
<< ", updating realm's current period and notifying zone" << dendl;
realm.notify_new_period(this, period, y);
return;
}
// reflect the period into our local objects
op_ret = period.reflect(this, y);
if (op_ret < 0) {
ldpp_dout(this, -1) << "failed to update local objects: "
<< cpp_strerror(-op_ret) << dendl;
return;
}
ldpp_dout(this, 4) << "period epoch " << period.get_epoch()
<< " is newer than current epoch " << current_period.get_epoch()
<< ", updating period's latest epoch and notifying zone" << dendl;
realm.notify_new_period(this, period, y);
// update the period history
period_history->insert(RGWPeriod{period});
}
class RGWHandler_Period : public RGWHandler_Auth_S3 {
protected:
using RGWHandler_Auth_S3::RGWHandler_Auth_S3;
RGWOp *op_get() override { return new RGWOp_Period_Get; }
RGWOp *op_post() override { return new RGWOp_Period_Post; }
};
class RGWRESTMgr_Period : public RGWRESTMgr {
public:
RGWHandler_REST* get_handler(rgw::sal::Driver* driver,
req_state*,
const rgw::auth::StrategyRegistry& auth_registry,
const std::string&) override {
return new RGWHandler_Period(auth_registry);
}
};
// GET /admin/realm
class RGWOp_Realm_Get : public RGWRESTOp {
std::unique_ptr<RGWRealm> realm;
public:
int check_caps(const RGWUserCaps& caps) override {
return caps.check_cap("zone", RGW_CAP_READ);
}
int verify_permission(optional_yield) override {
return check_caps(s->user->get_caps());
}
void execute(optional_yield y) override;
void send_response() override;
const char* name() const override { return "get_realm"; }
};
void RGWOp_Realm_Get::execute(optional_yield y)
{
string id;
RESTArgs::get_string(s, "id", id, &id);
string name;
RESTArgs::get_string(s, "name", name, &name);
// read realm
realm.reset(new RGWRealm(id, name));
op_ret = realm->init(this, g_ceph_context, static_cast<rgw::sal::RadosStore*>(driver)->svc()->sysobj, y);
if (op_ret < 0)
ldpp_dout(this, -1) << "failed to read realm id=" << id
<< " name=" << name << dendl;
}
void RGWOp_Realm_Get::send_response()
{
set_req_state_err(s, op_ret);
dump_errno(s);
if (op_ret < 0) {
end_header(s);
return;
}
encode_json("realm", *realm, s->formatter);
end_header(s, NULL, "application/json", s->formatter->get_len());
flusher.flush();
}
// GET /admin/realm?list
class RGWOp_Realm_List : public RGWRESTOp {
std::string default_id;
std::list<std::string> realms;
public:
int check_caps(const RGWUserCaps& caps) override {
return caps.check_cap("zone", RGW_CAP_READ);
}
int verify_permission(optional_yield) override {
return check_caps(s->user->get_caps());
}
void execute(optional_yield y) override;
void send_response() override;
const char* name() const override { return "list_realms"; }
};
void RGWOp_Realm_List::execute(optional_yield y)
{
{
// read default realm
RGWRealm realm(driver->ctx(), static_cast<rgw::sal::RadosStore*>(driver)->svc()->sysobj);
[[maybe_unused]] int ret = realm.read_default_id(this, default_id, y);
}
op_ret = static_cast<rgw::sal::RadosStore*>(driver)->svc()->zone->list_realms(this, realms);
if (op_ret < 0)
ldpp_dout(this, -1) << "failed to list realms" << dendl;
}
void RGWOp_Realm_List::send_response()
{
set_req_state_err(s, op_ret);
dump_errno(s);
if (op_ret < 0) {
end_header(s);
return;
}
s->formatter->open_object_section("realms_list");
encode_json("default_info", default_id, s->formatter);
encode_json("realms", realms, s->formatter);
s->formatter->close_section();
end_header(s, NULL, "application/json", s->formatter->get_len());
flusher.flush();
}
class RGWHandler_Realm : public RGWHandler_Auth_S3 {
protected:
using RGWHandler_Auth_S3::RGWHandler_Auth_S3;
RGWOp *op_get() override {
if (s->info.args.sub_resource_exists("list"))
return new RGWOp_Realm_List;
return new RGWOp_Realm_Get;
}
};
RGWRESTMgr_Realm::RGWRESTMgr_Realm()
{
// add the /admin/realm/period resource
register_resource("period", new RGWRESTMgr_Period);
}
RGWHandler_REST*
RGWRESTMgr_Realm::get_handler(rgw::sal::Driver* driver,
req_state*,
const rgw::auth::StrategyRegistry& auth_registry,
const std::string&)
{
return new RGWHandler_Realm(auth_registry);
}
| 12,494 | 32.143236 | 137 |
cc
|
null |
ceph-main/src/rgw/driver/rados/rgw_rest_realm.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab ft=cpp
#pragma once
#include "rgw_rest.h"
class RGWRESTMgr_Realm : public RGWRESTMgr {
public:
RGWRESTMgr_Realm();
RGWHandler_REST* get_handler(rgw::sal::Driver* driver,
req_state*,
const rgw::auth::StrategyRegistry& auth_registry,
const std::string&) override;
};
| 443 | 25.117647 | 80 |
h
|
null |
ceph-main/src/rgw/driver/rados/rgw_rest_user.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab ft=cpp
#include "common/ceph_json.h"
#include "rgw_op.h"
#include "rgw_user.h"
#include "rgw_rest_user.h"
#include "rgw_sal.h"
#include "include/str_list.h"
#include "include/ceph_assert.h"
#include "services/svc_zone.h"
#include "services/svc_sys_obj.h"
#include "rgw_zone.h"
#define dout_subsys ceph_subsys_rgw
using namespace std;
int fetch_access_keys_from_master(const DoutPrefixProvider *dpp, rgw::sal::Driver* driver, RGWUserAdminOpState &op_state, req_state *s, optional_yield y) {
bufferlist data;
JSONParser jp;
RGWUserInfo ui;
int op_ret = driver->forward_request_to_master(s, s->user.get(), nullptr, data, &jp, s->info, y);
if (op_ret < 0) {
ldpp_dout(dpp, 0) << "forward_request_to_master returned ret=" << op_ret << dendl;
return op_ret;
}
ui.decode_json(&jp);
op_state.op_access_keys = std::move(ui.access_keys);
return 0;
}
class RGWOp_User_List : public RGWRESTOp {
public:
RGWOp_User_List() {}
int check_caps(const RGWUserCaps& caps) override {
return caps.check_cap("users", RGW_CAP_READ);
}
void execute(optional_yield y) override;
const char* name() const override { return "list_user"; }
};
void RGWOp_User_List::execute(optional_yield y)
{
RGWUserAdminOpState op_state(driver);
uint32_t max_entries;
std::string marker;
RESTArgs::get_uint32(s, "max-entries", 1000, &max_entries);
RESTArgs::get_string(s, "marker", marker, &marker);
op_state.max_entries = max_entries;
op_state.marker = marker;
op_ret = RGWUserAdminOp_User::list(this, driver, op_state, flusher);
}
class RGWOp_User_Info : public RGWRESTOp {
public:
RGWOp_User_Info() {}
int check_caps(const RGWUserCaps& caps) override {
return caps.check_cap("users", RGW_CAP_READ);
}
void execute(optional_yield y) override;
const char* name() const override { return "get_user_info"; }
};
void RGWOp_User_Info::execute(optional_yield y)
{
RGWUserAdminOpState op_state(driver);
std::string uid_str, access_key_str;
bool fetch_stats;
bool sync_stats;
RESTArgs::get_string(s, "uid", uid_str, &uid_str);
RESTArgs::get_string(s, "access-key", access_key_str, &access_key_str);
// if uid was not supplied in rest argument, error out now, otherwise we'll
// end up initializing anonymous user, for which keys.init will eventually
// return -EACESS
if (uid_str.empty() && access_key_str.empty()){
op_ret=-EINVAL;
return;
}
rgw_user uid(uid_str);
RESTArgs::get_bool(s, "stats", false, &fetch_stats);
RESTArgs::get_bool(s, "sync", false, &sync_stats);
op_state.set_user_id(uid);
op_state.set_access_key(access_key_str);
op_state.set_fetch_stats(fetch_stats);
op_state.set_sync_stats(sync_stats);
op_ret = RGWUserAdminOp_User::info(s, driver, op_state, flusher, y);
}
class RGWOp_User_Create : public RGWRESTOp {
public:
RGWOp_User_Create() {}
int check_caps(const RGWUserCaps& caps) override {
return caps.check_cap("users", RGW_CAP_WRITE);
}
void execute(optional_yield y) override;
const char* name() const override { return "create_user"; }
};
void RGWOp_User_Create::execute(optional_yield y)
{
std::string uid_str;
std::string display_name;
std::string email;
std::string access_key;
std::string secret_key;
std::string key_type_str;
std::string caps;
std::string tenant_name;
std::string op_mask_str;
std::string default_placement_str;
std::string placement_tags_str;
bool gen_key;
bool suspended;
bool system;
bool exclusive;
int32_t max_buckets;
const int32_t default_max_buckets =
s->cct->_conf.get_val<int64_t>("rgw_user_max_buckets");
RGWUserAdminOpState op_state(driver);
RESTArgs::get_string(s, "uid", uid_str, &uid_str);
rgw_user uid(uid_str);
RESTArgs::get_string(s, "display-name", display_name, &display_name);
RESTArgs::get_string(s, "email", email, &email);
RESTArgs::get_string(s, "access-key", access_key, &access_key);
RESTArgs::get_string(s, "secret-key", secret_key, &secret_key);
RESTArgs::get_string(s, "key-type", key_type_str, &key_type_str);
RESTArgs::get_string(s, "user-caps", caps, &caps);
RESTArgs::get_string(s, "tenant", tenant_name, &tenant_name);
RESTArgs::get_bool(s, "generate-key", true, &gen_key);
RESTArgs::get_bool(s, "suspended", false, &suspended);
RESTArgs::get_int32(s, "max-buckets", default_max_buckets, &max_buckets);
RESTArgs::get_bool(s, "system", false, &system);
RESTArgs::get_bool(s, "exclusive", false, &exclusive);
RESTArgs::get_string(s, "op-mask", op_mask_str, &op_mask_str);
RESTArgs::get_string(s, "default-placement", default_placement_str, &default_placement_str);
RESTArgs::get_string(s, "placement-tags", placement_tags_str, &placement_tags_str);
if (!s->user->get_info().system && system) {
ldpp_dout(this, 0) << "cannot set system flag by non-system user" << dendl;
op_ret = -EINVAL;
return;
}
if (!tenant_name.empty()) {
uid.tenant = tenant_name;
}
// TODO: validate required args are passed in. (for eg. uid and display_name here)
op_state.set_user_id(uid);
op_state.set_display_name(display_name);
op_state.set_user_email(email);
op_state.set_caps(caps);
op_state.set_access_key(access_key);
op_state.set_secret_key(secret_key);
if (!op_mask_str.empty()) {
uint32_t op_mask;
int ret = rgw_parse_op_type_list(op_mask_str, &op_mask);
if (ret < 0) {
ldpp_dout(this, 0) << "failed to parse op_mask: " << ret << dendl;
op_ret = -EINVAL;
return;
}
op_state.set_op_mask(op_mask);
}
if (!key_type_str.empty()) {
int32_t key_type = KEY_TYPE_UNDEFINED;
if (key_type_str.compare("swift") == 0)
key_type = KEY_TYPE_SWIFT;
else if (key_type_str.compare("s3") == 0)
key_type = KEY_TYPE_S3;
op_state.set_key_type(key_type);
}
if (max_buckets != default_max_buckets) {
if (max_buckets < 0) {
max_buckets = -1;
}
op_state.set_max_buckets(max_buckets);
}
if (s->info.args.exists("suspended"))
op_state.set_suspension(suspended);
if (s->info.args.exists("system"))
op_state.set_system(system);
if (s->info.args.exists("exclusive"))
op_state.set_exclusive(exclusive);
if (!default_placement_str.empty()) {
rgw_placement_rule target_rule;
target_rule.from_str(default_placement_str);
if (!driver->valid_placement(target_rule)) {
ldpp_dout(this, 0) << "NOTICE: invalid dest placement: " << target_rule.to_str() << dendl;
op_ret = -EINVAL;
return;
}
op_state.set_default_placement(target_rule);
}
if (!placement_tags_str.empty()) {
list<string> placement_tags_list;
get_str_list(placement_tags_str, ",", placement_tags_list);
op_state.set_placement_tags(placement_tags_list);
}
if(!(driver->is_meta_master())) {
op_ret = fetch_access_keys_from_master(this, driver, op_state, s, y);
if(op_ret < 0) {
return;
} else {
// set_generate_key() is not set if keys have already been fetched from master zone
gen_key = false;
}
}
if (gen_key) {
op_state.set_generate_key();
}
op_ret = RGWUserAdminOp_User::create(s, driver, op_state, flusher, y);
}
class RGWOp_User_Modify : public RGWRESTOp {
public:
RGWOp_User_Modify() {}
int check_caps(const RGWUserCaps& caps) override {
return caps.check_cap("users", RGW_CAP_WRITE);
}
void execute(optional_yield y) override;
const char* name() const override { return "modify_user"; }
};
void RGWOp_User_Modify::execute(optional_yield y)
{
std::string uid_str;
std::string display_name;
std::string email;
std::string access_key;
std::string secret_key;
std::string key_type_str;
std::string op_mask_str;
std::string default_placement_str;
std::string placement_tags_str;
bool gen_key;
bool suspended;
bool system;
bool email_set;
bool quota_set;
int32_t max_buckets;
RGWUserAdminOpState op_state(driver);
RESTArgs::get_string(s, "uid", uid_str, &uid_str);
rgw_user uid(uid_str);
RESTArgs::get_string(s, "display-name", display_name, &display_name);
RESTArgs::get_string(s, "email", email, &email, &email_set);
RESTArgs::get_string(s, "access-key", access_key, &access_key);
RESTArgs::get_string(s, "secret-key", secret_key, &secret_key);
RESTArgs::get_bool(s, "generate-key", false, &gen_key);
RESTArgs::get_bool(s, "suspended", false, &suspended);
RESTArgs::get_int32(s, "max-buckets", RGW_DEFAULT_MAX_BUCKETS, &max_buckets, "a_set);
RESTArgs::get_string(s, "key-type", key_type_str, &key_type_str);
RESTArgs::get_bool(s, "system", false, &system);
RESTArgs::get_string(s, "op-mask", op_mask_str, &op_mask_str);
RESTArgs::get_string(s, "default-placement", default_placement_str, &default_placement_str);
RESTArgs::get_string(s, "placement-tags", placement_tags_str, &placement_tags_str);
if (!s->user->get_info().system && system) {
ldpp_dout(this, 0) << "cannot set system flag by non-system user" << dendl;
op_ret = -EINVAL;
return;
}
op_state.set_user_id(uid);
op_state.set_display_name(display_name);
if (email_set)
op_state.set_user_email(email);
op_state.set_access_key(access_key);
op_state.set_secret_key(secret_key);
if (quota_set) {
if (max_buckets < 0 ) {
max_buckets = -1;
}
op_state.set_max_buckets(max_buckets);
}
if (!key_type_str.empty()) {
int32_t key_type = KEY_TYPE_UNDEFINED;
if (key_type_str.compare("swift") == 0)
key_type = KEY_TYPE_SWIFT;
else if (key_type_str.compare("s3") == 0)
key_type = KEY_TYPE_S3;
op_state.set_key_type(key_type);
}
if (!op_mask_str.empty()) {
uint32_t op_mask;
if (rgw_parse_op_type_list(op_mask_str, &op_mask) < 0) {
ldpp_dout(this, 0) << "failed to parse op_mask" << dendl;
op_ret = -EINVAL;
return;
}
op_state.set_op_mask(op_mask);
}
if (s->info.args.exists("suspended"))
op_state.set_suspension(suspended);
if (s->info.args.exists("system"))
op_state.set_system(system);
if (!op_mask_str.empty()) {
uint32_t op_mask;
int ret = rgw_parse_op_type_list(op_mask_str, &op_mask);
if (ret < 0) {
ldpp_dout(this, 0) << "failed to parse op_mask: " << ret << dendl;
op_ret = -EINVAL;
return;
}
op_state.set_op_mask(op_mask);
}
if (!default_placement_str.empty()) {
rgw_placement_rule target_rule;
target_rule.from_str(default_placement_str);
if (!driver->valid_placement(target_rule)) {
ldpp_dout(this, 0) << "NOTICE: invalid dest placement: " << target_rule.to_str() << dendl;
op_ret = -EINVAL;
return;
}
op_state.set_default_placement(target_rule);
}
if (!placement_tags_str.empty()) {
list<string> placement_tags_list;
get_str_list(placement_tags_str, ",", placement_tags_list);
op_state.set_placement_tags(placement_tags_list);
}
if(!(driver->is_meta_master())) {
op_ret = fetch_access_keys_from_master(this, driver, op_state, s, y);
if(op_ret < 0) {
return;
} else {
// set_generate_key() is not set if keys have already been fetched from master zone
gen_key = false;
}
}
if (gen_key) {
op_state.set_generate_key();
}
op_ret = RGWUserAdminOp_User::modify(s, driver, op_state, flusher, y);
}
class RGWOp_User_Remove : public RGWRESTOp {
public:
RGWOp_User_Remove() {}
int check_caps(const RGWUserCaps& caps) override {
return caps.check_cap("users", RGW_CAP_WRITE);
}
void execute(optional_yield y) override;
const char* name() const override { return "remove_user"; }
};
void RGWOp_User_Remove::execute(optional_yield y)
{
std::string uid_str;
bool purge_data;
RGWUserAdminOpState op_state(driver);
RESTArgs::get_string(s, "uid", uid_str, &uid_str);
rgw_user uid(uid_str);
RESTArgs::get_bool(s, "purge-data", false, &purge_data);
// FIXME: no double checking
if (!uid.empty())
op_state.set_user_id(uid);
op_state.set_purge_data(purge_data);
bufferlist data;
op_ret = driver->forward_request_to_master(s, s->user.get(), nullptr, data, nullptr, s->info, y);
if (op_ret < 0) {
ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl;
return;
}
op_ret = RGWUserAdminOp_User::remove(s, driver, op_state, flusher, s->yield);
}
class RGWOp_Subuser_Create : public RGWRESTOp {
public:
RGWOp_Subuser_Create() {}
int check_caps(const RGWUserCaps& caps) override {
return caps.check_cap("users", RGW_CAP_WRITE);
}
void execute(optional_yield y) override;
const char* name() const override { return "create_subuser"; }
};
void RGWOp_Subuser_Create::execute(optional_yield y)
{
std::string uid_str;
std::string subuser;
std::string secret_key;
std::string access_key;
std::string perm_str;
std::string key_type_str;
bool gen_subuser = false; // FIXME placeholder
bool gen_secret;
bool gen_access;
uint32_t perm_mask = 0;
int32_t key_type = KEY_TYPE_SWIFT;
RGWUserAdminOpState op_state(driver);
RESTArgs::get_string(s, "uid", uid_str, &uid_str);
rgw_user uid(uid_str);
RESTArgs::get_string(s, "subuser", subuser, &subuser);
RESTArgs::get_string(s, "access-key", access_key, &access_key);
RESTArgs::get_string(s, "secret-key", secret_key, &secret_key);
RESTArgs::get_string(s, "access", perm_str, &perm_str);
RESTArgs::get_string(s, "key-type", key_type_str, &key_type_str);
RESTArgs::get_bool(s, "generate-secret", false, &gen_secret);
RESTArgs::get_bool(s, "gen-access-key", false, &gen_access);
perm_mask = rgw_str_to_perm(perm_str.c_str());
op_state.set_perm(perm_mask);
op_state.set_user_id(uid);
op_state.set_subuser(subuser);
op_state.set_access_key(access_key);
op_state.set_secret_key(secret_key);
op_state.set_generate_subuser(gen_subuser);
if (gen_access)
op_state.set_gen_access();
if (gen_secret)
op_state.set_gen_secret();
if (!key_type_str.empty()) {
if (key_type_str.compare("swift") == 0)
key_type = KEY_TYPE_SWIFT;
else if (key_type_str.compare("s3") == 0)
key_type = KEY_TYPE_S3;
}
op_state.set_key_type(key_type);
bufferlist data;
op_ret = driver->forward_request_to_master(s, s->user.get(), nullptr, data, nullptr, s->info, y);
if (op_ret < 0) {
ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl;
return;
}
op_ret = RGWUserAdminOp_Subuser::create(s, driver, op_state, flusher, y);
}
class RGWOp_Subuser_Modify : public RGWRESTOp {
public:
RGWOp_Subuser_Modify() {}
int check_caps(const RGWUserCaps& caps) override {
return caps.check_cap("users", RGW_CAP_WRITE);
}
void execute(optional_yield y) override;
const char* name() const override { return "modify_subuser"; }
};
void RGWOp_Subuser_Modify::execute(optional_yield y)
{
std::string uid_str;
std::string subuser;
std::string secret_key;
std::string key_type_str;
std::string perm_str;
RGWUserAdminOpState op_state(driver);
uint32_t perm_mask;
int32_t key_type = KEY_TYPE_SWIFT;
bool gen_secret;
RESTArgs::get_string(s, "uid", uid_str, &uid_str);
rgw_user uid(uid_str);
RESTArgs::get_string(s, "subuser", subuser, &subuser);
RESTArgs::get_string(s, "secret-key", secret_key, &secret_key);
RESTArgs::get_string(s, "access", perm_str, &perm_str);
RESTArgs::get_string(s, "key-type", key_type_str, &key_type_str);
RESTArgs::get_bool(s, "generate-secret", false, &gen_secret);
perm_mask = rgw_str_to_perm(perm_str.c_str());
op_state.set_perm(perm_mask);
op_state.set_user_id(uid);
op_state.set_subuser(subuser);
if (!secret_key.empty())
op_state.set_secret_key(secret_key);
if (gen_secret)
op_state.set_gen_secret();
if (!key_type_str.empty()) {
if (key_type_str.compare("swift") == 0)
key_type = KEY_TYPE_SWIFT;
else if (key_type_str.compare("s3") == 0)
key_type = KEY_TYPE_S3;
}
op_state.set_key_type(key_type);
bufferlist data;
op_ret = driver->forward_request_to_master(s, s->user.get(), nullptr, data, nullptr, s->info, y);
if (op_ret < 0) {
ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl;
return;
}
op_ret = RGWUserAdminOp_Subuser::modify(s, driver, op_state, flusher, y);
}
class RGWOp_Subuser_Remove : public RGWRESTOp {
public:
RGWOp_Subuser_Remove() {}
int check_caps(const RGWUserCaps& caps) override {
return caps.check_cap("users", RGW_CAP_WRITE);
}
void execute(optional_yield y) override;
const char* name() const override { return "remove_subuser"; }
};
void RGWOp_Subuser_Remove::execute(optional_yield y)
{
std::string uid_str;
std::string subuser;
bool purge_keys;
RGWUserAdminOpState op_state(driver);
RESTArgs::get_string(s, "uid", uid_str, &uid_str);
rgw_user uid(uid_str);
RESTArgs::get_string(s, "subuser", subuser, &subuser);
RESTArgs::get_bool(s, "purge-keys", true, &purge_keys);
op_state.set_user_id(uid);
op_state.set_subuser(subuser);
if (purge_keys)
op_state.set_purge_keys();
bufferlist data;
op_ret = driver->forward_request_to_master(s, s->user.get(), nullptr, data, nullptr, s->info, y);
if (op_ret < 0) {
ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl;
return;
}
op_ret = RGWUserAdminOp_Subuser::remove(s, driver, op_state, flusher, y);
}
class RGWOp_Key_Create : public RGWRESTOp {
public:
RGWOp_Key_Create() {}
int check_caps(const RGWUserCaps& caps) override {
return caps.check_cap("users", RGW_CAP_WRITE);
}
void execute(optional_yield y) override;
const char* name() const override { return "create_access_key"; }
};
void RGWOp_Key_Create::execute(optional_yield y)
{
std::string uid_str;
std::string subuser;
std::string access_key;
std::string secret_key;
std::string key_type_str;
bool gen_key;
RGWUserAdminOpState op_state(driver);
RESTArgs::get_string(s, "uid", uid_str, &uid_str);
rgw_user uid(uid_str);
RESTArgs::get_string(s, "subuser", subuser, &subuser);
RESTArgs::get_string(s, "access-key", access_key, &access_key);
RESTArgs::get_string(s, "secret-key", secret_key, &secret_key);
RESTArgs::get_string(s, "key-type", key_type_str, &key_type_str);
RESTArgs::get_bool(s, "generate-key", true, &gen_key);
op_state.set_user_id(uid);
op_state.set_subuser(subuser);
op_state.set_access_key(access_key);
op_state.set_secret_key(secret_key);
if (gen_key)
op_state.set_generate_key();
if (!key_type_str.empty()) {
int32_t key_type = KEY_TYPE_UNDEFINED;
if (key_type_str.compare("swift") == 0)
key_type = KEY_TYPE_SWIFT;
else if (key_type_str.compare("s3") == 0)
key_type = KEY_TYPE_S3;
op_state.set_key_type(key_type);
}
op_ret = RGWUserAdminOp_Key::create(s, driver, op_state, flusher, y);
}
class RGWOp_Key_Remove : public RGWRESTOp {
public:
RGWOp_Key_Remove() {}
int check_caps(const RGWUserCaps& caps) override {
return caps.check_cap("users", RGW_CAP_WRITE);
}
void execute(optional_yield y) override;
const char* name() const override { return "remove_access_key"; }
};
void RGWOp_Key_Remove::execute(optional_yield y)
{
std::string uid_str;
std::string subuser;
std::string access_key;
std::string key_type_str;
RGWUserAdminOpState op_state(driver);
RESTArgs::get_string(s, "uid", uid_str, &uid_str);
rgw_user uid(uid_str);
RESTArgs::get_string(s, "subuser", subuser, &subuser);
RESTArgs::get_string(s, "access-key", access_key, &access_key);
RESTArgs::get_string(s, "key-type", key_type_str, &key_type_str);
op_state.set_user_id(uid);
op_state.set_subuser(subuser);
op_state.set_access_key(access_key);
if (!key_type_str.empty()) {
int32_t key_type = KEY_TYPE_UNDEFINED;
if (key_type_str.compare("swift") == 0)
key_type = KEY_TYPE_SWIFT;
else if (key_type_str.compare("s3") == 0)
key_type = KEY_TYPE_S3;
op_state.set_key_type(key_type);
}
op_ret = RGWUserAdminOp_Key::remove(s, driver, op_state, flusher, y);
}
class RGWOp_Caps_Add : public RGWRESTOp {
public:
RGWOp_Caps_Add() {}
int check_caps(const RGWUserCaps& caps) override {
return caps.check_cap("users", RGW_CAP_WRITE);
}
void execute(optional_yield y) override;
const char* name() const override { return "add_user_caps"; }
};
void RGWOp_Caps_Add::execute(optional_yield y)
{
std::string uid_str;
std::string caps;
RGWUserAdminOpState op_state(driver);
RESTArgs::get_string(s, "uid", uid_str, &uid_str);
rgw_user uid(uid_str);
RESTArgs::get_string(s, "user-caps", caps, &caps);
op_state.set_user_id(uid);
op_state.set_caps(caps);
bufferlist data;
op_ret = driver->forward_request_to_master(s, s->user.get(), nullptr, data, nullptr, s->info, y);
if (op_ret < 0) {
ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl;
return;
}
op_ret = RGWUserAdminOp_Caps::add(s, driver, op_state, flusher, y);
}
class RGWOp_Caps_Remove : public RGWRESTOp {
public:
RGWOp_Caps_Remove() {}
int check_caps(const RGWUserCaps& caps) override {
return caps.check_cap("users", RGW_CAP_WRITE);
}
void execute(optional_yield y) override;
const char* name() const override { return "remove_user_caps"; }
};
void RGWOp_Caps_Remove::execute(optional_yield y)
{
std::string uid_str;
std::string caps;
RGWUserAdminOpState op_state(driver);
RESTArgs::get_string(s, "uid", uid_str, &uid_str);
rgw_user uid(uid_str);
RESTArgs::get_string(s, "user-caps", caps, &caps);
op_state.set_user_id(uid);
op_state.set_caps(caps);
bufferlist data;
op_ret = driver->forward_request_to_master(s, s->user.get(), nullptr, data, nullptr, s->info, y);
if (op_ret < 0) {
ldpp_dout(this, 0) << "forward_request_to_master returned ret=" << op_ret << dendl;
return;
}
op_ret = RGWUserAdminOp_Caps::remove(s, driver, op_state, flusher, y);
}
struct UserQuotas {
RGWQuota quota;
UserQuotas() {}
explicit UserQuotas(RGWUserInfo& info){
quota.bucket_quota = info.quota.bucket_quota;
quota.user_quota = info.quota.user_quota;
}
void dump(Formatter *f) const {
encode_json("bucket_quota", quota.bucket_quota, f);
encode_json("user_quota", quota.user_quota, f);
}
void decode_json(JSONObj *obj) {
JSONDecoder::decode_json("bucket_quota", quota.bucket_quota, obj);
JSONDecoder::decode_json("user_quota", quota.user_quota, obj);
}
};
class RGWOp_Quota_Info : public RGWRESTOp {
public:
RGWOp_Quota_Info() {}
int check_caps(const RGWUserCaps& caps) override {
return caps.check_cap("users", RGW_CAP_READ);
}
void execute(optional_yield y) override;
const char* name() const override { return "get_quota_info"; }
};
void RGWOp_Quota_Info::execute(optional_yield y)
{
RGWUserAdminOpState op_state(driver);
std::string uid_str;
std::string quota_type;
RESTArgs::get_string(s, "uid", uid_str, &uid_str);
RESTArgs::get_string(s, "quota-type", quota_type, "a_type);
if (uid_str.empty()) {
op_ret = -EINVAL;
return;
}
rgw_user uid(uid_str);
bool show_all = quota_type.empty();
bool show_bucket = show_all || (quota_type == "bucket");
bool show_user = show_all || (quota_type == "user");
if (!(show_all || show_bucket || show_user)) {
op_ret = -EINVAL;
return;
}
op_state.set_user_id(uid);
RGWUser user;
op_ret = user.init(s, driver, op_state, y);
if (op_ret < 0)
return;
if (!op_state.has_existing_user()) {
op_ret = -ERR_NO_SUCH_USER;
return;
}
RGWUserInfo info;
string err_msg;
op_ret = user.info(info, &err_msg);
if (op_ret < 0)
return;
flusher.start(0);
if (show_all) {
UserQuotas quotas(info);
encode_json("quota", quotas, s->formatter);
} else if (show_user) {
encode_json("user_quota", info.quota.user_quota, s->formatter);
} else {
encode_json("bucket_quota", info.quota.bucket_quota, s->formatter);
}
flusher.flush();
}
class RGWOp_Quota_Set : public RGWRESTOp {
public:
RGWOp_Quota_Set() {}
int check_caps(const RGWUserCaps& caps) override {
return caps.check_cap("users", RGW_CAP_WRITE);
}
void execute(optional_yield y) override;
const char* name() const override { return "set_quota_info"; }
};
/**
* set quota
*
* two different ways to set the quota info: as json struct in the message body or via http params.
*
* as json:
*
* PUT /admin/user?uid=<uid>["a-type=<type>]
*
* whereas quota-type is optional and is either user, or bucket
*
* if quota-type is not specified then we expect to get a structure that contains both quotas,
* otherwise we'll only get the relevant configuration.
*
* E.g., if quota type not specified:
* {
* "user_quota" : {
* "max_size_kb" : 4096,
* "max_objects" : -1,
* "enabled" : false
* },
* "bucket_quota" : {
* "max_size_kb" : 1024,
* "max_objects" : -1,
* "enabled" : true
* }
* }
*
*
* or if quota type is specified:
* {
* "max_size_kb" : 4096,
* "max_objects" : -1,
* "enabled" : false
* }
*
* Another option is not to pass any body and set the following http params:
*
*
* max-size-kb=<size>
* max-objects=<max objects>
* enabled[={true,false}]
*
* all params are optionals and default to the current settings. With this type of configuration the
* quota-type param is mandatory.
*
*/
void RGWOp_Quota_Set::execute(optional_yield y)
{
RGWUserAdminOpState op_state(driver);
std::string uid_str;
std::string quota_type;
RESTArgs::get_string(s, "uid", uid_str, &uid_str);
RESTArgs::get_string(s, "quota-type", quota_type, "a_type);
if (uid_str.empty()) {
op_ret = -EINVAL;
return;
}
rgw_user uid(uid_str);
bool set_all = quota_type.empty();
bool set_bucket = set_all || (quota_type == "bucket");
bool set_user = set_all || (quota_type == "user");
if (!(set_all || set_bucket || set_user)) {
ldpp_dout(this, 20) << "invalid quota type" << dendl;
op_ret = -EINVAL;
return;
}
bool use_http_params;
if (s->content_length > 0) {
use_http_params = false;
} else {
const char *encoding = s->info.env->get("HTTP_TRANSFER_ENCODING");
use_http_params = (!encoding || strcmp(encoding, "chunked") != 0);
}
if (use_http_params && set_all) {
ldpp_dout(this, 20) << "quota type was not specified, can't set all quotas via http headers" << dendl;
op_ret = -EINVAL;
return;
}
op_state.set_user_id(uid);
RGWUser user;
op_ret = user.init(s, driver, op_state, y);
if (op_ret < 0) {
ldpp_dout(this, 20) << "failed initializing user info: " << op_ret << dendl;
return;
}
if (!op_state.has_existing_user()) {
op_ret = -ERR_NO_SUCH_USER;
return;
}
#define QUOTA_INPUT_MAX_LEN 1024
if (set_all) {
UserQuotas quotas;
if ((op_ret = get_json_input(driver->ctx(), s, quotas, QUOTA_INPUT_MAX_LEN, NULL)) < 0) {
ldpp_dout(this, 20) << "failed to retrieve input" << dendl;
return;
}
op_state.set_user_quota(quotas.quota.user_quota);
op_state.set_bucket_quota(quotas.quota.bucket_quota);
} else {
RGWQuotaInfo quota;
if (!use_http_params) {
bool empty;
op_ret = get_json_input(driver->ctx(), s, quota, QUOTA_INPUT_MAX_LEN, &empty);
if (op_ret < 0) {
ldpp_dout(this, 20) << "failed to retrieve input" << dendl;
if (!empty)
return;
/* was probably chunked input, but no content provided, configure via http params */
use_http_params = true;
}
}
if (use_http_params) {
RGWUserInfo info;
string err_msg;
op_ret = user.info(info, &err_msg);
if (op_ret < 0) {
ldpp_dout(this, 20) << "failed to get user info: " << op_ret << dendl;
return;
}
RGWQuotaInfo *old_quota;
if (set_user) {
old_quota = &info.quota.user_quota;
} else {
old_quota = &info.quota.bucket_quota;
}
RESTArgs::get_int64(s, "max-objects", old_quota->max_objects, "a.max_objects);
RESTArgs::get_int64(s, "max-size", old_quota->max_size, "a.max_size);
int64_t max_size_kb;
bool has_max_size_kb = false;
RESTArgs::get_int64(s, "max-size-kb", 0, &max_size_kb, &has_max_size_kb);
if (has_max_size_kb) {
quota.max_size = max_size_kb * 1024;
}
RESTArgs::get_bool(s, "enabled", old_quota->enabled, "a.enabled);
}
if (set_user) {
op_state.set_user_quota(quota);
} else {
op_state.set_bucket_quota(quota);
}
}
string err;
op_ret = user.modify(s, op_state, y, &err);
if (op_ret < 0) {
ldpp_dout(this, 20) << "failed updating user info: " << op_ret << ": " << err << dendl;
return;
}
}
RGWOp *RGWHandler_User::op_get()
{
if (s->info.args.sub_resource_exists("quota"))
return new RGWOp_Quota_Info;
if (s->info.args.sub_resource_exists("list"))
return new RGWOp_User_List;
return new RGWOp_User_Info;
}
RGWOp *RGWHandler_User::op_put()
{
if (s->info.args.sub_resource_exists("subuser"))
return new RGWOp_Subuser_Create;
if (s->info.args.sub_resource_exists("key"))
return new RGWOp_Key_Create;
if (s->info.args.sub_resource_exists("caps"))
return new RGWOp_Caps_Add;
if (s->info.args.sub_resource_exists("quota"))
return new RGWOp_Quota_Set;
return new RGWOp_User_Create;
}
RGWOp *RGWHandler_User::op_post()
{
if (s->info.args.sub_resource_exists("subuser"))
return new RGWOp_Subuser_Modify;
return new RGWOp_User_Modify;
}
RGWOp *RGWHandler_User::op_delete()
{
if (s->info.args.sub_resource_exists("subuser"))
return new RGWOp_Subuser_Remove;
if (s->info.args.sub_resource_exists("key"))
return new RGWOp_Key_Remove;
if (s->info.args.sub_resource_exists("caps"))
return new RGWOp_Caps_Remove;
return new RGWOp_User_Remove;
}
| 30,043 | 25.400703 | 155 |
cc
|
null |
ceph-main/src/rgw/driver/rados/rgw_rest_user.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab ft=cpp
#pragma once
#include "rgw_rest.h"
#include "rgw_rest_s3.h"
class RGWHandler_User : public RGWHandler_Auth_S3 {
protected:
RGWOp *op_get() override;
RGWOp *op_put() override;
RGWOp *op_post() override;
RGWOp *op_delete() override;
public:
using RGWHandler_Auth_S3::RGWHandler_Auth_S3;
~RGWHandler_User() override = default;
int read_permissions(RGWOp*, optional_yield) override {
return 0;
}
};
class RGWRESTMgr_User : public RGWRESTMgr {
public:
RGWRESTMgr_User() = default;
~RGWRESTMgr_User() override = default;
RGWHandler_REST *get_handler(rgw::sal::Driver* driver,
req_state*,
const rgw::auth::StrategyRegistry& auth_registry,
const std::string&) override {
return new RGWHandler_User(auth_registry);
}
};
| 927 | 24.081081 | 80 |
h
|
null |
ceph-main/src/rgw/driver/rados/rgw_sal_rados.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab ft=cpp
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2020 Red Hat, Inc.
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include <errno.h>
#include <stdlib.h>
#include <system_error>
#include <filesystem>
#include <unistd.h>
#include <sstream>
#include <boost/algorithm/string.hpp>
#include <boost/process.hpp>
#include "common/Clock.h"
#include "common/errno.h"
#include "rgw_sal.h"
#include "rgw_sal_rados.h"
#include "rgw_bucket.h"
#include "rgw_multi.h"
#include "rgw_acl.h"
#include "rgw_acl_s3.h"
#include "rgw_aio.h"
#include "rgw_aio_throttle.h"
#include "rgw_tracer.h"
#include "rgw_zone.h"
#include "rgw_rest_conn.h"
#include "rgw_service.h"
#include "rgw_lc.h"
#include "rgw_lc_tier.h"
#include "rgw_rest_admin.h"
#include "rgw_rest_bucket.h"
#include "rgw_rest_metadata.h"
#include "rgw_rest_log.h"
#include "rgw_rest_config.h"
#include "rgw_rest_ratelimit.h"
#include "rgw_rest_realm.h"
#include "rgw_rest_user.h"
#include "services/svc_sys_obj.h"
#include "services/svc_meta.h"
#include "services/svc_meta_be_sobj.h"
#include "services/svc_cls.h"
#include "services/svc_zone.h"
#include "services/svc_tier_rados.h"
#include "services/svc_quota.h"
#include "services/svc_config_key.h"
#include "services/svc_zone_utils.h"
#include "services/svc_role_rados.h"
#include "services/svc_user.h"
#include "services/svc_sys_obj_cache.h"
#include "cls/rgw/cls_rgw_client.h"
#include "rgw_pubsub.h"
#define dout_subsys ceph_subsys_rgw
using namespace std;
static string mp_ns = RGW_OBJ_NS_MULTIPART;
namespace rgw::sal {
// default number of entries to list with each bucket listing call
// (use marker to bridge between calls)
static constexpr size_t listing_max_entries = 1000;
static std::string pubsub_oid_prefix = "pubsub.";
static int decode_policy(CephContext* cct,
bufferlist& bl,
RGWAccessControlPolicy* policy)
{
auto iter = bl.cbegin();
try {
policy->decode(iter);
} catch (buffer::error& err) {
ldout(cct, 0) << "ERROR: could not decode policy, caught buffer::error" << dendl;
return -EIO;
}
if (cct->_conf->subsys.should_gather<ceph_subsys_rgw, 15>()) {
ldout(cct, 15) << __func__ << " Read AccessControlPolicy";
RGWAccessControlPolicy_S3* s3policy = static_cast<RGWAccessControlPolicy_S3 *>(policy);
s3policy->to_xml(*_dout);
*_dout << dendl;
}
return 0;
}
static int rgw_op_get_bucket_policy_from_attr(const DoutPrefixProvider* dpp,
RadosStore* store,
User* user,
Attrs& bucket_attrs,
RGWAccessControlPolicy* policy,
optional_yield y)
{
auto aiter = bucket_attrs.find(RGW_ATTR_ACL);
if (aiter != bucket_attrs.end()) {
int ret = decode_policy(store->ctx(), aiter->second, policy);
if (ret < 0)
return ret;
} else {
ldout(store->ctx(), 0) << "WARNING: couldn't find acl header for bucket, generating default" << dendl;
/* object exists, but policy is broken */
int r = user->load_user(dpp, y);
if (r < 0)
return r;
policy->create_default(user->get_id(), user->get_display_name());
}
return 0;
}
static int drain_aio(std::list<librados::AioCompletion*>& handles)
{
int ret = 0;
while (!handles.empty()) {
librados::AioCompletion* handle = handles.front();
handles.pop_front();
handle->wait_for_complete();
int r = handle->get_return_value();
handle->release();
if (r < 0) {
ret = r;
}
}
return ret;
}
int RadosUser::list_buckets(const DoutPrefixProvider* dpp, const std::string& marker,
const std::string& end_marker, uint64_t max, bool need_stats,
BucketList &buckets, optional_yield y)
{
RGWUserBuckets ulist;
bool is_truncated = false;
int ret;
buckets.clear();
ret = store->ctl()->user->list_buckets(dpp, info.user_id, marker, end_marker, max,
need_stats, &ulist, &is_truncated, y);
if (ret < 0)
return ret;
buckets.set_truncated(is_truncated);
for (const auto& ent : ulist.get_buckets()) {
buckets.add(std::unique_ptr<Bucket>(new RadosBucket(this->store, ent.second, this)));
}
return 0;
}
int RadosUser::create_bucket(const DoutPrefixProvider* dpp,
const rgw_bucket& b,
const std::string& zonegroup_id,
rgw_placement_rule& placement_rule,
std::string& swift_ver_location,
const RGWQuotaInfo * pquota_info,
const RGWAccessControlPolicy& policy,
Attrs& attrs,
RGWBucketInfo& info,
obj_version& ep_objv,
bool exclusive,
bool obj_lock_enabled,
bool* existed,
req_info& req_info,
std::unique_ptr<Bucket>* bucket_out,
optional_yield y)
{
int ret;
bufferlist in_data;
RGWBucketInfo master_info;
rgw_bucket* pmaster_bucket;
uint32_t* pmaster_num_shards;
real_time creation_time;
std::unique_ptr<Bucket> bucket;
obj_version objv,* pobjv = NULL;
/* If it exists, look it up; otherwise create it */
ret = store->get_bucket(dpp, this, b, &bucket, y);
if (ret < 0 && ret != -ENOENT)
return ret;
if (ret != -ENOENT) {
RGWAccessControlPolicy old_policy(store->ctx());
*existed = true;
if (swift_ver_location.empty()) {
swift_ver_location = bucket->get_info().swift_ver_location;
}
placement_rule.inherit_from(bucket->get_info().placement_rule);
// don't allow changes to the acl policy
int r = rgw_op_get_bucket_policy_from_attr(dpp, store, this, bucket->get_attrs(),
&old_policy, y);
if (r >= 0 && old_policy != policy) {
bucket_out->swap(bucket);
return -EEXIST;
}
} else {
bucket = std::unique_ptr<Bucket>(new RadosBucket(store, b, this));
*existed = false;
bucket->set_attrs(attrs);
}
if (!store->svc()->zone->is_meta_master()) {
JSONParser jp;
ret = store->forward_request_to_master(dpp, this, NULL, in_data, &jp, req_info, y);
if (ret < 0) {
return ret;
}
JSONDecoder::decode_json("entry_point_object_ver", ep_objv, &jp);
JSONDecoder::decode_json("object_ver", objv, &jp);
JSONDecoder::decode_json("bucket_info", master_info, &jp);
ldpp_dout(dpp, 20) << "parsed: objv.tag=" << objv.tag << " objv.ver=" << objv.ver << dendl;
std::time_t ctime = ceph::real_clock::to_time_t(master_info.creation_time);
ldpp_dout(dpp, 20) << "got creation time: << " << std::put_time(std::localtime(&ctime), "%F %T") << dendl;
pmaster_bucket= &master_info.bucket;
creation_time = master_info.creation_time;
pmaster_num_shards = &master_info.layout.current_index.layout.normal.num_shards;
pobjv = &objv;
if (master_info.obj_lock_enabled()) {
info.flags = BUCKET_VERSIONED | BUCKET_OBJ_LOCK_ENABLED;
}
} else {
pmaster_bucket = NULL;
pmaster_num_shards = NULL;
if (obj_lock_enabled)
info.flags = BUCKET_VERSIONED | BUCKET_OBJ_LOCK_ENABLED;
}
std::string zid = zonegroup_id;
if (zid.empty()) {
zid = store->svc()->zone->get_zonegroup().get_id();
}
if (*existed) {
rgw_placement_rule selected_placement_rule;
ret = store->svc()->zone->select_bucket_placement(dpp, this->get_info(),
zid, placement_rule,
&selected_placement_rule, nullptr, y);
if (selected_placement_rule != info.placement_rule) {
ret = -EEXIST;
bucket_out->swap(bucket);
return ret;
}
} else {
ret = store->getRados()->create_bucket(this->get_info(), bucket->get_key(),
zid, placement_rule, swift_ver_location, pquota_info,
attrs, info, pobjv, &ep_objv, creation_time,
pmaster_bucket, pmaster_num_shards, y, dpp,
exclusive);
if (ret == -EEXIST) {
*existed = true;
/* bucket already existed, might have raced with another bucket creation,
* or might be partial bucket creation that never completed. Read existing
* bucket info, verify that the reported bucket owner is the current user.
* If all is ok then update the user's list of buckets. Otherwise inform
* client about a name conflict.
*/
if (info.owner.compare(this->get_id()) != 0) {
return -EEXIST;
}
ret = 0;
} else if (ret != 0) {
return ret;
}
}
bucket->set_version(ep_objv);
bucket->get_info() = info;
RadosBucket* rbucket = static_cast<RadosBucket*>(bucket.get());
ret = rbucket->link(dpp, this, y, false);
if (ret && !*existed && ret != -EEXIST) {
/* if it exists (or previously existed), don't remove it! */
ret = rbucket->unlink(dpp, this, y);
if (ret < 0) {
ldpp_dout(dpp, 0) << "WARNING: failed to unlink bucket: ret=" << ret
<< dendl;
}
} else if (ret == -EEXIST || (ret == 0 && *existed)) {
ret = -ERR_BUCKET_EXISTS;
}
bucket_out->swap(bucket);
return ret;
}
int RadosUser::read_attrs(const DoutPrefixProvider* dpp, optional_yield y)
{
return store->ctl()->user->get_attrs_by_uid(dpp, get_id(), &attrs, y, &objv_tracker);
}
int RadosUser::merge_and_store_attrs(const DoutPrefixProvider* dpp, Attrs& new_attrs, optional_yield y)
{
for(auto& it : new_attrs) {
attrs[it.first] = it.second;
}
return store_user(dpp, y, false);
}
int RadosUser::read_stats(const DoutPrefixProvider *dpp,
optional_yield y, RGWStorageStats* stats,
ceph::real_time* last_stats_sync,
ceph::real_time* last_stats_update)
{
return store->ctl()->user->read_stats(dpp, get_id(), stats, y, last_stats_sync, last_stats_update);
}
int RadosUser::read_stats_async(const DoutPrefixProvider *dpp, RGWGetUserStats_CB* cb)
{
return store->svc()->user->read_stats_async(dpp, get_id(), cb);
}
int RadosUser::complete_flush_stats(const DoutPrefixProvider *dpp, optional_yield y)
{
return store->svc()->user->complete_flush_stats(dpp, get_id(), y);
}
int RadosUser::read_usage(const DoutPrefixProvider *dpp, uint64_t start_epoch, uint64_t end_epoch,
uint32_t max_entries, bool* is_truncated,
RGWUsageIter& usage_iter,
map<rgw_user_bucket, rgw_usage_log_entry>& usage)
{
std::string bucket_name;
return store->getRados()->read_usage(dpp, get_id(), bucket_name, start_epoch,
end_epoch, max_entries, is_truncated,
usage_iter, usage);
}
int RadosUser::trim_usage(const DoutPrefixProvider *dpp, uint64_t start_epoch, uint64_t end_epoch, optional_yield y)
{
std::string bucket_name;
return store->getRados()->trim_usage(dpp, get_id(), bucket_name, start_epoch, end_epoch, y);
}
int RadosUser::load_user(const DoutPrefixProvider* dpp, optional_yield y)
{
return store->ctl()->user->get_info_by_uid(dpp, info.user_id, &info, y, RGWUserCtl::GetParams().set_objv_tracker(&objv_tracker).set_attrs(&attrs));
}
int RadosUser::store_user(const DoutPrefixProvider* dpp, optional_yield y, bool exclusive, RGWUserInfo* old_info)
{
return store->ctl()->user->store_info(dpp, info, y,
RGWUserCtl::PutParams().set_objv_tracker(&objv_tracker)
.set_exclusive(exclusive)
.set_attrs(&attrs)
.set_old_info(old_info));
}
int RadosUser::remove_user(const DoutPrefixProvider* dpp, optional_yield y)
{
return store->ctl()->user->remove_info(dpp, info, y,
RGWUserCtl::RemoveParams().set_objv_tracker(&objv_tracker));
}
int RadosUser::verify_mfa(const std::string& mfa_str, bool* verified,
const DoutPrefixProvider* dpp, optional_yield y)
{
vector<string> params;
get_str_vec(mfa_str, " ", params);
if (params.size() != 2) {
ldpp_dout(dpp, 5) << "NOTICE: invalid mfa string provided: " << mfa_str << dendl;
return -EINVAL;
}
string& serial = params[0];
string& pin = params[1];
auto i = info.mfa_ids.find(serial);
if (i == info.mfa_ids.end()) {
ldpp_dout(dpp, 5) << "NOTICE: user does not have mfa device with serial=" << serial << dendl;
return -EACCES;
}
int ret = store->svc()->cls->mfa.check_mfa(dpp, info.user_id, serial, pin, y);
if (ret < 0) {
ldpp_dout(dpp, 20) << "NOTICE: failed to check MFA, serial=" << serial << dendl;
return -EACCES;
}
*verified = true;
return 0;
}
RadosBucket::~RadosBucket() {}
int RadosBucket::remove_bucket(const DoutPrefixProvider* dpp,
bool delete_children,
bool forward_to_master,
req_info* req_info,
optional_yield y)
{
int ret;
// Refresh info
ret = load_bucket(dpp, y);
if (ret < 0) {
return ret;
}
ListParams params;
params.list_versions = true;
params.allow_unordered = true;
ListResults results;
do {
results.objs.clear();
ret = list(dpp, params, 1000, results, y);
if (ret < 0) {
return ret;
}
if (!results.objs.empty() && !delete_children) {
ldpp_dout(dpp, -1) << "ERROR: could not remove non-empty bucket " << info.bucket.name <<
dendl;
return -ENOTEMPTY;
}
for (const auto& obj : results.objs) {
rgw_obj_key key(obj.key);
/* xxx dang */
ret = rgw_remove_object(dpp, store, this, key, y);
if (ret < 0 && ret != -ENOENT) {
return ret;
}
}
} while(results.is_truncated);
ret = abort_multiparts(dpp, store->ctx(), y);
if (ret < 0) {
return ret;
}
// remove lifecycle config, if any (XXX note could be made generic)
(void) store->getRados()->get_lc()->remove_bucket_config(
this, get_attrs());
ret = store->ctl()->bucket->sync_user_stats(dpp, info.owner, info, y, nullptr);
if (ret < 0) {
ldout(store->ctx(), 1) << "WARNING: failed sync user stats before bucket delete. ret=" << ret << dendl;
}
RGWObjVersionTracker ot;
// if we deleted children above we will force delete, as any that
// remain is detrius from a prior bug
ret = store->getRados()->delete_bucket(info, ot, y, dpp, !delete_children);
if (ret < 0) {
ldpp_dout(dpp, -1) << "ERROR: could not remove bucket " <<
info.bucket.name << dendl;
return ret;
}
// if bucket has notification definitions associated with it
// they should be removed (note that any pending notifications on the bucket are still going to be sent)
const RGWPubSub ps(store, info.owner.tenant);
const RGWPubSub::Bucket ps_bucket(ps, this);
const auto ps_ret = ps_bucket.remove_notifications(dpp, y);
if (ps_ret < 0 && ps_ret != -ENOENT) {
ldpp_dout(dpp, -1) << "ERROR: unable to remove notifications from bucket. ret=" << ps_ret << dendl;
}
ret = store->ctl()->bucket->unlink_bucket(info.owner, info.bucket, y, dpp, false);
if (ret < 0) {
ldpp_dout(dpp, -1) << "ERROR: unable to remove user bucket information" << dendl;
}
if (forward_to_master) {
bufferlist in_data;
ret = store->forward_request_to_master(dpp, owner, &ot.read_version, in_data, nullptr, *req_info, y);
if (ret < 0) {
if (ret == -ENOENT) {
/* adjust error, we want to return with NoSuchBucket and not
* NoSuchKey */
ret = -ERR_NO_SUCH_BUCKET;
}
return ret;
}
}
return ret;
}
int RadosBucket::remove_bucket_bypass_gc(int concurrent_max, bool
keep_index_consistent,
optional_yield y, const
DoutPrefixProvider *dpp)
{
int ret;
map<RGWObjCategory, RGWStorageStats> stats;
map<string, bool> common_prefixes;
RGWObjectCtx obj_ctx(store);
CephContext *cct = store->ctx();
string bucket_ver, master_ver;
ret = load_bucket(dpp, y);
if (ret < 0)
return ret;
const auto& index = info.get_current_index();
ret = read_stats(dpp, index, RGW_NO_SHARD, &bucket_ver, &master_ver, stats, NULL);
if (ret < 0)
return ret;
ret = abort_multiparts(dpp, cct, y);
if (ret < 0) {
return ret;
}
rgw::sal::Bucket::ListParams params;
rgw::sal::Bucket::ListResults results;
params.list_versions = true;
params.allow_unordered = true;
std::list<librados::AioCompletion*> handles;
int max_aio = concurrent_max;
results.is_truncated = true;
while (results.is_truncated) {
ret = list(dpp, params, listing_max_entries, results, y);
if (ret < 0)
return ret;
std::vector<rgw_bucket_dir_entry>::iterator it = results.objs.begin();
for (; it != results.objs.end(); ++it) {
RGWObjState *astate = NULL;
RGWObjManifest *amanifest = nullptr;
rgw_obj obj{get_key(), it->key};
ret = store->getRados()->get_obj_state(dpp, &obj_ctx, get_info(),
obj, &astate, &amanifest,
false, y);
if (ret == -ENOENT) {
ldpp_dout(dpp, 1) << "WARNING: cannot find obj state for obj " << obj << dendl;
continue;
}
if (ret < 0) {
ldpp_dout(dpp, -1) << "ERROR: get obj state returned with error " << ret << dendl;
return ret;
}
if (amanifest) {
RGWObjManifest& manifest = *amanifest;
RGWObjManifest::obj_iterator miter = manifest.obj_begin(dpp);
const rgw_obj head_obj = manifest.get_obj();
rgw_raw_obj raw_head_obj;
store->get_raw_obj(manifest.get_head_placement_rule(), head_obj, &raw_head_obj);
for (; miter != manifest.obj_end(dpp) && max_aio--; ++miter) {
if (!max_aio) {
ret = drain_aio(handles);
if (ret < 0) {
ldpp_dout(dpp, -1) << "ERROR: could not drain handles as aio completion returned with " << ret << dendl;
return ret;
}
max_aio = concurrent_max;
}
rgw_raw_obj last_obj = miter.get_location().get_raw_obj(store->getRados());
if (last_obj == raw_head_obj) {
// have the head obj deleted at the end
continue;
}
ret = store->getRados()->delete_raw_obj_aio(dpp, last_obj, handles);
if (ret < 0) {
ldpp_dout(dpp, -1) << "ERROR: delete obj aio failed with " << ret << dendl;
return ret;
}
} // for all shadow objs
ret = store->getRados()->delete_obj_aio(dpp, head_obj, get_info(), astate,
handles, keep_index_consistent, y);
if (ret < 0) {
ldpp_dout(dpp, -1) << "ERROR: delete obj aio failed with " << ret << dendl;
return ret;
}
}
if (!max_aio) {
ret = drain_aio(handles);
if (ret < 0) {
ldpp_dout(dpp, -1) << "ERROR: could not drain handles as aio completion returned with " << ret << dendl;
return ret;
}
max_aio = concurrent_max;
}
obj_ctx.invalidate(obj);
} // for all RGW objects in results
} // while is_truncated
ret = drain_aio(handles);
if (ret < 0) {
ldpp_dout(dpp, -1) << "ERROR: could not drain handles as aio completion returned with " << ret << dendl;
return ret;
}
sync_user_stats(dpp, y);
if (ret < 0) {
ldpp_dout(dpp, 1) << "WARNING: failed sync user stats before bucket delete. ret=" << ret << dendl;
}
RGWObjVersionTracker objv_tracker;
// this function can only be run if caller wanted children to be
// deleted, so we can ignore the check for children as any that
// remain are detritus from a prior bug
ret = remove_bucket(dpp, true, false, nullptr, y);
if (ret < 0) {
ldpp_dout(dpp, -1) << "ERROR: could not remove bucket " << this << dendl;
return ret;
}
return ret;
}
int RadosBucket::load_bucket(const DoutPrefixProvider* dpp, optional_yield y, bool get_stats)
{
int ret;
RGWSI_MetaBackend_CtxParams bectx_params = RGWSI_MetaBackend_CtxParams_SObj();
RGWObjVersionTracker ep_ot;
if (info.bucket.bucket_id.empty()) {
ret = store->ctl()->bucket->read_bucket_info(info.bucket, &info, y, dpp,
RGWBucketCtl::BucketInstance::GetParams()
.set_mtime(&mtime)
.set_attrs(&attrs)
.set_bectx_params(bectx_params),
&ep_ot);
} else {
ret = store->ctl()->bucket->read_bucket_instance_info(info.bucket, &info, y, dpp,
RGWBucketCtl::BucketInstance::GetParams()
.set_mtime(&mtime)
.set_attrs(&attrs)
.set_bectx_params(bectx_params));
}
if (ret != 0) {
return ret;
}
bucket_version = ep_ot.read_version;
if (get_stats) {
ret = store->ctl()->bucket->read_bucket_stats(info.bucket, &ent, y, dpp);
}
return ret;
}
int RadosBucket::read_stats(const DoutPrefixProvider *dpp,
const bucket_index_layout_generation& idx_layout,
int shard_id, std::string* bucket_ver, std::string* master_ver,
std::map<RGWObjCategory, RGWStorageStats>& stats,
std::string* max_marker, bool* syncstopped)
{
return store->getRados()->get_bucket_stats(dpp, info, idx_layout, shard_id, bucket_ver, master_ver, stats, max_marker, syncstopped);
}
int RadosBucket::read_stats_async(const DoutPrefixProvider *dpp,
const bucket_index_layout_generation& idx_layout,
int shard_id, RGWGetBucketStats_CB* ctx)
{
return store->getRados()->get_bucket_stats_async(dpp, get_info(), idx_layout, shard_id, ctx);
}
int RadosBucket::sync_user_stats(const DoutPrefixProvider *dpp, optional_yield y)
{
return store->ctl()->bucket->sync_user_stats(dpp, owner->get_id(), info, y, &ent);
}
int RadosBucket::update_container_stats(const DoutPrefixProvider* dpp, optional_yield y)
{
int ret;
map<std::string, RGWBucketEnt> m;
m[info.bucket.name] = ent;
ret = store->getRados()->update_containers_stats(m, dpp, y);
if (!ret)
return -EEXIST;
if (ret < 0)
return ret;
map<std::string, RGWBucketEnt>::iterator iter = m.find(info.bucket.name);
if (iter == m.end())
return -EINVAL;
ent.count = iter->second.count;
ent.size = iter->second.size;
ent.size_rounded = iter->second.size_rounded;
ent.creation_time = iter->second.creation_time;
ent.placement_rule = std::move(iter->second.placement_rule);
info.creation_time = ent.creation_time;
info.placement_rule = ent.placement_rule;
return 0;
}
int RadosBucket::check_bucket_shards(const DoutPrefixProvider* dpp, optional_yield y)
{
return store->getRados()->check_bucket_shards(info, info.bucket, get_count(), dpp, y);
}
int RadosBucket::link(const DoutPrefixProvider* dpp, User* new_user, optional_yield y, bool update_entrypoint, RGWObjVersionTracker* objv)
{
RGWBucketEntryPoint ep;
ep.bucket = info.bucket;
ep.owner = new_user->get_id();
ep.creation_time = get_creation_time();
ep.linked = true;
Attrs ep_attrs;
rgw_ep_info ep_data{ep, ep_attrs};
int r = store->ctl()->bucket->link_bucket(new_user->get_id(), info.bucket,
get_creation_time(), y, dpp, update_entrypoint,
&ep_data);
if (r < 0)
return r;
if (objv)
*objv = ep_data.ep_objv;
return r;
}
int RadosBucket::unlink(const DoutPrefixProvider* dpp, User* new_user, optional_yield y, bool update_entrypoint)
{
return store->ctl()->bucket->unlink_bucket(new_user->get_id(), info.bucket, y, dpp, update_entrypoint);
}
int RadosBucket::chown(const DoutPrefixProvider* dpp, User& new_user, optional_yield y)
{
std::string obj_marker;
int r;
if (!owner) {
ldpp_dout(dpp, 0) << __func__ << " Cannot chown without an owner " << dendl;
return -EINVAL;
}
r = this->unlink(dpp, owner, y);
if (r < 0) {
return r;
}
return this->link(dpp, &new_user, y);
}
int RadosBucket::put_info(const DoutPrefixProvider* dpp, bool exclusive, ceph::real_time _mtime, optional_yield y)
{
mtime = _mtime;
return store->getRados()->put_bucket_instance_info(info, exclusive, mtime, &attrs, dpp, y);
}
/* Make sure to call get_bucket_info() if you need it first */
bool RadosBucket::is_owner(User* user)
{
return (info.owner.compare(user->get_id()) == 0);
}
int RadosBucket::check_empty(const DoutPrefixProvider* dpp, optional_yield y)
{
return store->getRados()->check_bucket_empty(dpp, info, y);
}
int RadosBucket::check_quota(const DoutPrefixProvider *dpp, RGWQuota& quota, uint64_t obj_size,
optional_yield y, bool check_size_only)
{
return store->getRados()->check_quota(dpp, info.owner, get_key(),
quota, obj_size, y, check_size_only);
}
int RadosBucket::merge_and_store_attrs(const DoutPrefixProvider* dpp, Attrs& new_attrs, optional_yield y)
{
for(auto& it : new_attrs) {
attrs[it.first] = it.second;
}
return store->ctl()->bucket->set_bucket_instance_attrs(get_info(),
new_attrs, &get_info().objv_tracker, y, dpp);
}
int RadosBucket::try_refresh_info(const DoutPrefixProvider* dpp, ceph::real_time* pmtime, optional_yield y)
{
return store->getRados()->try_refresh_bucket_info(info, pmtime, dpp, y, &attrs);
}
int RadosBucket::read_usage(const DoutPrefixProvider *dpp, uint64_t start_epoch, uint64_t end_epoch,
uint32_t max_entries, bool* is_truncated,
RGWUsageIter& usage_iter,
map<rgw_user_bucket, rgw_usage_log_entry>& usage)
{
return store->getRados()->read_usage(dpp, owner->get_id(), get_name(), start_epoch,
end_epoch, max_entries, is_truncated,
usage_iter, usage);
}
int RadosBucket::trim_usage(const DoutPrefixProvider *dpp, uint64_t start_epoch, uint64_t end_epoch, optional_yield y)
{
return store->getRados()->trim_usage(dpp, owner->get_id(), get_name(), start_epoch, end_epoch, y);
}
int RadosBucket::remove_objs_from_index(const DoutPrefixProvider *dpp, std::list<rgw_obj_index_key>& objs_to_unlink)
{
return store->getRados()->remove_objs_from_index(dpp, info, objs_to_unlink);
}
int RadosBucket::check_index(const DoutPrefixProvider *dpp, std::map<RGWObjCategory, RGWStorageStats>& existing_stats, std::map<RGWObjCategory, RGWStorageStats>& calculated_stats)
{
return store->getRados()->bucket_check_index(dpp, info, &existing_stats, &calculated_stats);
}
int RadosBucket::rebuild_index(const DoutPrefixProvider *dpp)
{
return store->getRados()->bucket_rebuild_index(dpp, info);
}
int RadosBucket::set_tag_timeout(const DoutPrefixProvider *dpp, uint64_t timeout)
{
return store->getRados()->cls_obj_set_bucket_tag_timeout(dpp, info, timeout);
}
int RadosBucket::purge_instance(const DoutPrefixProvider* dpp, optional_yield y)
{
int max_shards = (info.layout.current_index.layout.normal.num_shards > 0 ? info.layout.current_index.layout.normal.num_shards : 1);
for (int i = 0; i < max_shards; i++) {
RGWRados::BucketShard bs(store->getRados());
int shard_id = (info.layout.current_index.layout.normal.num_shards > 0 ? i : -1);
int ret = bs.init(dpp, info, info.layout.current_index, shard_id, y);
if (ret < 0) {
cerr << "ERROR: bs.init(bucket=" << info.bucket << ", shard=" << shard_id
<< "): " << cpp_strerror(-ret) << std::endl;
return ret;
}
ret = store->getRados()->bi_remove(dpp, bs);
if (ret < 0) {
cerr << "ERROR: failed to remove bucket index object: "
<< cpp_strerror(-ret) << std::endl;
return ret;
}
}
return 0;
}
int RadosBucket::set_acl(const DoutPrefixProvider* dpp, RGWAccessControlPolicy &acl, optional_yield y)
{
bufferlist aclbl;
acls = acl;
acl.encode(aclbl);
map<string, bufferlist>& attrs = get_attrs();
attrs[RGW_ATTR_ACL] = aclbl;
info.owner = acl.get_owner().get_id();
int r = store->ctl()->bucket->store_bucket_instance_info(info.bucket,
info, y, dpp,
RGWBucketCtl::BucketInstance::PutParams().set_attrs(&attrs));
if (r < 0) {
cerr << "ERROR: failed to set bucket owner: " << cpp_strerror(-r) << std::endl;
return r;
}
return 0;
}
std::unique_ptr<Object> RadosBucket::get_object(const rgw_obj_key& k)
{
return std::make_unique<RadosObject>(this->store, k, this);
}
int RadosBucket::list(const DoutPrefixProvider* dpp, ListParams& params, int max, ListResults& results, optional_yield y)
{
RGWRados::Bucket target(store->getRados(), get_info());
if (params.shard_id >= 0) {
target.set_shard_id(params.shard_id);
}
RGWRados::Bucket::List list_op(&target);
list_op.params.prefix = params.prefix;
list_op.params.delim = params.delim;
list_op.params.marker = params.marker;
list_op.params.ns = params.ns;
list_op.params.end_marker = params.end_marker;
list_op.params.ns = params.ns;
list_op.params.enforce_ns = params.enforce_ns;
list_op.params.access_list_filter = params.access_list_filter;
list_op.params.force_check_filter = params.force_check_filter;
list_op.params.list_versions = params.list_versions;
list_op.params.allow_unordered = params.allow_unordered;
int ret = list_op.list_objects(dpp, max, &results.objs, &results.common_prefixes, &results.is_truncated, y);
if (ret >= 0) {
results.next_marker = list_op.get_next_marker();
params.marker = results.next_marker;
}
return ret;
}
std::unique_ptr<MultipartUpload> RadosBucket::get_multipart_upload(
const std::string& oid,
std::optional<std::string> upload_id,
ACLOwner owner, ceph::real_time mtime)
{
return std::make_unique<RadosMultipartUpload>(this->store, this, oid, upload_id,
std::move(owner), mtime);
}
int RadosBucket::list_multiparts(const DoutPrefixProvider *dpp,
const string& prefix,
string& marker,
const string& delim,
const int& max_uploads,
vector<std::unique_ptr<MultipartUpload>>& uploads,
map<string, bool> *common_prefixes,
bool *is_truncated, optional_yield y)
{
rgw::sal::Bucket::ListParams params;
rgw::sal::Bucket::ListResults results;
MultipartMetaFilter mp_filter;
params.prefix = prefix;
params.delim = delim;
params.marker = marker;
params.ns = RGW_OBJ_NS_MULTIPART;
params.access_list_filter = &mp_filter;
int ret = list(dpp, params, max_uploads, results, y);
if (ret < 0)
return ret;
if (!results.objs.empty()) {
for (const rgw_bucket_dir_entry& dentry : results.objs) {
rgw_obj_key key(dentry.key);
ACLOwner owner(rgw_user(dentry.meta.owner));
owner.set_name(dentry.meta.owner_display_name);
uploads.push_back(this->get_multipart_upload(key.name,
std::nullopt, std::move(owner), dentry.meta.mtime));
}
}
if (common_prefixes) {
*common_prefixes = std::move(results.common_prefixes);
}
*is_truncated = results.is_truncated;
marker = params.marker.name;
return 0;
}
int RadosBucket::abort_multiparts(const DoutPrefixProvider* dpp,
CephContext* cct, optional_yield y)
{
constexpr int max = 1000;
int ret, num_deleted = 0;
vector<std::unique_ptr<MultipartUpload>> uploads;
string marker;
bool is_truncated;
const std::string empty_delim;
const std::string empty_prefix;
do {
ret = list_multiparts(dpp, empty_prefix, marker, empty_delim,
max, uploads, nullptr, &is_truncated, y);
if (ret < 0) {
ldpp_dout(dpp, 0) << __func__ <<
" ERROR : calling list_bucket_multiparts; ret=" << ret <<
"; bucket=\"" << this << "\"" << dendl;
return ret;
}
ldpp_dout(dpp, 20) << __func__ <<
" INFO: aborting and cleaning up multipart upload(s); bucket=\"" <<
this << "\"; uploads.size()=" << uploads.size() <<
"; is_truncated=" << is_truncated << dendl;
if (!uploads.empty()) {
for (const auto& upload : uploads) {
ret = upload->abort(dpp, cct, y);
if (ret < 0) {
// we're doing a best-effort; if something cannot be found,
// log it and keep moving forward
if (ret != -ENOENT && ret != -ERR_NO_SUCH_UPLOAD) {
ldpp_dout(dpp, 0) << __func__ <<
" ERROR : failed to abort and clean-up multipart upload \"" <<
upload->get_meta() << "\"" << dendl;
return ret;
} else {
ldpp_dout(dpp, 10) << __func__ <<
" NOTE : unable to find part(s) of "
"aborted multipart upload of \"" << upload->get_meta() <<
"\" for cleaning up" << dendl;
}
}
num_deleted++;
}
if (num_deleted) {
ldpp_dout(dpp, 0) << __func__ <<
" WARNING : aborted " << num_deleted <<
" incomplete multipart uploads" << dendl;
}
}
} while (is_truncated);
return 0;
}
std::string RadosBucket::topics_oid() const {
return pubsub_oid_prefix + get_tenant() + ".bucket." + get_name() + "/" + get_marker();
}
int RadosBucket::read_topics(rgw_pubsub_bucket_topics& notifications,
RGWObjVersionTracker* objv_tracker, optional_yield y, const DoutPrefixProvider *dpp)
{
// read from cache
auto cache = store->getRados()->get_topic_cache();
const std::string key = store->svc()->zone->get_zone_params().log_pool.to_str() + topics_oid();
if (auto e = cache->find(key)) {
notifications = e->info;
return 0;
}
bufferlist bl;
rgw_cache_entry_info cache_info;
const int ret = rgw_get_system_obj(store->svc()->sysobj,
store->svc()->zone->get_zone_params().log_pool,
topics_oid(),
bl,
objv_tracker, nullptr,
y, dpp, nullptr, &cache_info);
if (ret < 0) {
return ret;
}
auto iter = bl.cbegin();
try {
decode(notifications, iter);
} catch (buffer::error& err) {
ldpp_dout(dpp, 20) << " failed to decode bucket notifications from oid: " << topics_oid() << ". for bucket: "
<< get_name() << ". error: " << err.what() << dendl;
return -EIO;
}
pubsub_bucket_topics_entry e;
e.info = notifications;
if (!cache->put(dpp, store->getRados()->svc.cache, key, &e, { &cache_info })) {
ldpp_dout(dpp, 10) << "couldn't put bucket topics cache entry" << dendl;
}
return 0;
}
int RadosBucket::write_topics(const rgw_pubsub_bucket_topics& notifications,
RGWObjVersionTracker* objv_tracker, optional_yield y, const DoutPrefixProvider *dpp) {
bufferlist bl;
encode(notifications, bl);
return rgw_put_system_obj(dpp, store->svc()->sysobj,
store->svc()->zone->get_zone_params().log_pool,
topics_oid(),
bl, false, objv_tracker, real_time(), y);
}
int RadosBucket::remove_topics(RGWObjVersionTracker* objv_tracker,
optional_yield y, const DoutPrefixProvider *dpp) {
return rgw_delete_system_obj(dpp, store->svc()->sysobj,
store->svc()->zone->get_zone_params().log_pool,
topics_oid(),
objv_tracker, y);
}
std::unique_ptr<User> RadosStore::get_user(const rgw_user &u)
{
return std::make_unique<RadosUser>(this, u);
}
std::string RadosStore::get_cluster_id(const DoutPrefixProvider* dpp, optional_yield y)
{
return getRados()->get_cluster_fsid(dpp, y);
}
int RadosStore::get_user_by_access_key(const DoutPrefixProvider* dpp, const std::string& key, optional_yield y, std::unique_ptr<User>* user)
{
RGWUserInfo uinfo;
User* u;
RGWObjVersionTracker objv_tracker;
int r = ctl()->user->get_info_by_access_key(dpp, key, &uinfo, y, RGWUserCtl::GetParams().set_objv_tracker(&objv_tracker));
if (r < 0)
return r;
u = new RadosUser(this, uinfo);
if (!u)
return -ENOMEM;
u->get_version_tracker() = objv_tracker;
user->reset(u);
return 0;
}
int RadosStore::get_user_by_email(const DoutPrefixProvider* dpp, const std::string& email, optional_yield y, std::unique_ptr<User>* user)
{
RGWUserInfo uinfo;
User* u;
RGWObjVersionTracker objv_tracker;
int r = ctl()->user->get_info_by_email(dpp, email, &uinfo, y, RGWUserCtl::GetParams().set_objv_tracker(&objv_tracker));
if (r < 0)
return r;
u = new RadosUser(this, uinfo);
if (!u)
return -ENOMEM;
u->get_version_tracker() = objv_tracker;
user->reset(u);
return 0;
}
int RadosStore::get_user_by_swift(const DoutPrefixProvider* dpp, const std::string& user_str, optional_yield y, std::unique_ptr<User>* user)
{
RGWUserInfo uinfo;
User* u;
RGWObjVersionTracker objv_tracker;
int r = ctl()->user->get_info_by_swift(dpp, user_str, &uinfo, y, RGWUserCtl::GetParams().set_objv_tracker(&objv_tracker));
if (r < 0)
return r;
u = new RadosUser(this, uinfo);
if (!u)
return -ENOMEM;
u->get_version_tracker() = objv_tracker;
user->reset(u);
return 0;
}
std::unique_ptr<Object> RadosStore::get_object(const rgw_obj_key& k)
{
return std::make_unique<RadosObject>(this, k);
}
int RadosStore::get_bucket(const DoutPrefixProvider* dpp, User* u, const rgw_bucket& b, std::unique_ptr<Bucket>* bucket, optional_yield y)
{
int ret;
Bucket* bp;
bp = new RadosBucket(this, b, u);
ret = bp->load_bucket(dpp, y);
if (ret < 0) {
delete bp;
return ret;
}
bucket->reset(bp);
return 0;
}
int RadosStore::get_bucket(User* u, const RGWBucketInfo& i, std::unique_ptr<Bucket>* bucket)
{
Bucket* bp;
bp = new RadosBucket(this, i, u);
/* Don't need to fetch the bucket info, use the provided one */
bucket->reset(bp);
return 0;
}
int RadosStore::get_bucket(const DoutPrefixProvider* dpp, User* u, const std::string& tenant, const std::string& name, std::unique_ptr<Bucket>* bucket, optional_yield y)
{
rgw_bucket b;
b.tenant = tenant;
b.name = name;
return get_bucket(dpp, u, b, bucket, y);
}
bool RadosStore::is_meta_master()
{
return svc()->zone->is_meta_master();
}
int RadosStore::forward_request_to_master(const DoutPrefixProvider *dpp, User* user, obj_version* objv,
bufferlist& in_data,
JSONParser* jp, req_info& info,
optional_yield y)
{
if (is_meta_master()) {
/* We're master, don't forward */
return 0;
}
if (!svc()->zone->get_master_conn()) {
ldpp_dout(dpp, 0) << "rest connection is invalid" << dendl;
return -EINVAL;
}
ldpp_dout(dpp, 0) << "sending request to master zonegroup" << dendl;
bufferlist response;
std::string uid_str = user->get_id().to_str();
#define MAX_REST_RESPONSE (128 * 1024) // we expect a very small response
int ret = svc()->zone->get_master_conn()->forward(dpp, rgw_user(uid_str), info,
objv, MAX_REST_RESPONSE,
&in_data, &response, y);
if (ret < 0)
return ret;
ldpp_dout(dpp, 20) << "response: " << response.c_str() << dendl;
if (jp && !jp->parse(response.c_str(), response.length())) {
ldpp_dout(dpp, 0) << "failed parsing response from master zonegroup" << dendl;
return -EINVAL;
}
return 0;
}
int RadosStore::forward_iam_request_to_master(const DoutPrefixProvider *dpp, const RGWAccessKey& key, obj_version* objv,
bufferlist& in_data,
RGWXMLDecoder::XMLParser* parser, req_info& info,
optional_yield y)
{
if (is_meta_master()) {
/* We're master, don't forward */
return 0;
}
if (!svc()->zone->get_master_conn()) {
ldpp_dout(dpp, 0) << "rest connection is invalid" << dendl;
return -EINVAL;
}
ldpp_dout(dpp, 0) << "sending request to master zonegroup" << dendl;
bufferlist response;
#define MAX_REST_RESPONSE (128 * 1024) // we expect a very small response
int ret = svc()->zone->get_master_conn()->forward_iam_request(dpp, key, info,
objv, MAX_REST_RESPONSE,
&in_data, &response, y);
if (ret < 0)
return ret;
ldpp_dout(dpp, 20) << "response: " << response.c_str() << dendl;
std::string r = response.c_str();
std::string str_to_search = """;
std::string str_to_replace = "\"";
boost::replace_all(r, str_to_search, str_to_replace);
ldpp_dout(dpp, 20) << "r: " << r.c_str() << dendl;
if (parser && !parser->parse(r.c_str(), r.length(), 1)) {
ldpp_dout(dpp, 0) << "ERROR: failed to parse response from master zonegroup" << dendl;
return -EIO;
}
return 0;
}
std::string RadosStore::zone_unique_id(uint64_t unique_num)
{
return svc()->zone_utils->unique_id(unique_num);
}
std::string RadosStore::zone_unique_trans_id(const uint64_t unique_num)
{
return svc()->zone_utils->unique_trans_id(unique_num);
}
int RadosStore::get_zonegroup(const std::string& id,
std::unique_ptr<ZoneGroup>* zonegroup)
{
ZoneGroup* zg;
RGWZoneGroup rzg;
int r = svc()->zone->get_zonegroup(id, rzg);
if (r < 0)
return r;
zg = new RadosZoneGroup(this, rzg);
if (!zg)
return -ENOMEM;
zonegroup->reset(zg);
return 0;
}
int RadosStore::list_all_zones(const DoutPrefixProvider* dpp, std::list<std::string>& zone_ids)
{
return svc()->zone->list_zones(dpp, zone_ids);
}
int RadosStore::cluster_stat(RGWClusterStat& stats)
{
rados_cluster_stat_t rados_stats;
int ret;
ret = rados->get_rados_handle()->cluster_stat(rados_stats);
if (ret < 0)
return ret;
stats.kb = rados_stats.kb;
stats.kb_used = rados_stats.kb_used;
stats.kb_avail = rados_stats.kb_avail;
stats.num_objects = rados_stats.num_objects;
return ret;
}
std::unique_ptr<Lifecycle> RadosStore::get_lifecycle(void)
{
return std::make_unique<RadosLifecycle>(this);
}
std::unique_ptr<Notification> RadosStore::get_notification(
rgw::sal::Object* obj, rgw::sal::Object* src_obj, req_state* s, rgw::notify::EventType event_type, optional_yield y, const std::string* object_name)
{
return std::make_unique<RadosNotification>(s, this, obj, src_obj, s, event_type, y, object_name);
}
std::unique_ptr<Notification> RadosStore::get_notification(const DoutPrefixProvider* dpp, rgw::sal::Object* obj, rgw::sal::Object* src_obj, rgw::notify::EventType event_type, rgw::sal::Bucket* _bucket, std::string& _user_id, std::string& _user_tenant, std::string& _req_id, optional_yield y)
{
return std::make_unique<RadosNotification>(dpp, this, obj, src_obj, event_type, _bucket, _user_id, _user_tenant, _req_id, y);
}
std::string RadosStore::topics_oid(const std::string& tenant) const {
return pubsub_oid_prefix + tenant;
}
int RadosStore::read_topics(const std::string& tenant, rgw_pubsub_topics& topics, RGWObjVersionTracker* objv_tracker,
optional_yield y, const DoutPrefixProvider *dpp) {
bufferlist bl;
const int ret = rgw_get_system_obj(svc()->sysobj,
svc()->zone->get_zone_params().log_pool,
topics_oid(tenant),
bl,
objv_tracker,
nullptr, y, dpp, nullptr);
if (ret < 0) {
return ret;
}
auto iter = bl.cbegin();
try {
decode(topics, iter);
} catch (buffer::error& err) {
ldpp_dout(dpp, 20) << " failed to decode topics from oid: " << topics_oid(tenant) <<
". error: " << err.what() << dendl;
return -EIO;
}
return 0;
}
int RadosStore::write_topics(const std::string& tenant, const rgw_pubsub_topics& topics, RGWObjVersionTracker* objv_tracker,
optional_yield y, const DoutPrefixProvider *dpp) {
bufferlist bl;
encode(topics, bl);
return rgw_put_system_obj(dpp, svc()->sysobj,
svc()->zone->get_zone_params().log_pool,
topics_oid(tenant),
bl, false, objv_tracker, real_time(), y);
}
int RadosStore::remove_topics(const std::string& tenant, RGWObjVersionTracker* objv_tracker,
optional_yield y, const DoutPrefixProvider *dpp) {
return rgw_delete_system_obj(dpp, svc()->sysobj,
svc()->zone->get_zone_params().log_pool,
topics_oid(tenant),
objv_tracker, y);
}
int RadosStore::delete_raw_obj(const DoutPrefixProvider *dpp, const rgw_raw_obj& obj, optional_yield y)
{
return rados->delete_raw_obj(dpp, obj, y);
}
void RadosStore::get_raw_obj(const rgw_placement_rule& placement_rule, const rgw_obj& obj, rgw_raw_obj* raw_obj)
{
rados->obj_to_raw(placement_rule, obj, raw_obj);
}
int RadosStore::get_raw_chunk_size(const DoutPrefixProvider* dpp, const rgw_raw_obj& obj, uint64_t* chunk_size)
{
return rados->get_max_chunk_size(obj.pool, chunk_size, dpp);
}
int RadosStore::initialize(CephContext *cct, const DoutPrefixProvider *dpp)
{
std::unique_ptr<ZoneGroup> zg =
std::make_unique<RadosZoneGroup>(this, svc()->zone->get_zonegroup());
zone = make_unique<RadosZone>(this, std::move(zg));
return 0;
}
int RadosStore::log_usage(const DoutPrefixProvider *dpp, map<rgw_user_bucket, RGWUsageBatch>& usage_info, optional_yield y)
{
return rados->log_usage(dpp, usage_info, y);
}
int RadosStore::log_op(const DoutPrefixProvider *dpp, std::string& oid, bufferlist& bl)
{
rgw_raw_obj obj(svc()->zone->get_zone_params().log_pool, oid);
int ret = rados->append_async(dpp, obj, bl.length(), bl);
if (ret == -ENOENT) {
ret = rados->create_pool(dpp, svc()->zone->get_zone_params().log_pool);
if (ret < 0)
return ret;
// retry
ret = rados->append_async(dpp, obj, bl.length(), bl);
}
return ret;
}
int RadosStore::register_to_service_map(const DoutPrefixProvider *dpp, const std::string& daemon_type,
const map<std::string, std::string>& meta)
{
return rados->register_to_service_map(dpp, daemon_type, meta);
}
void RadosStore::get_quota(RGWQuota& quota)
{
quota.bucket_quota = svc()->quota->get_bucket_quota();
quota.user_quota = svc()->quota->get_user_quota();
}
void RadosStore::get_ratelimit(RGWRateLimitInfo& bucket_ratelimit, RGWRateLimitInfo& user_ratelimit, RGWRateLimitInfo& anon_ratelimit)
{
bucket_ratelimit = svc()->zone->get_current_period().get_config().bucket_ratelimit;
user_ratelimit = svc()->zone->get_current_period().get_config().user_ratelimit;
anon_ratelimit = svc()->zone->get_current_period().get_config().anon_ratelimit;
}
int RadosStore::set_buckets_enabled(const DoutPrefixProvider* dpp, vector<rgw_bucket>& buckets, bool enabled, optional_yield y)
{
return rados->set_buckets_enabled(buckets, enabled, dpp, y);
}
int RadosStore::get_sync_policy_handler(const DoutPrefixProvider* dpp,
std::optional<rgw_zone_id> zone,
std::optional<rgw_bucket> bucket,
RGWBucketSyncPolicyHandlerRef* phandler,
optional_yield y)
{
return ctl()->bucket->get_sync_policy_handler(zone, bucket, phandler, y, dpp);
}
RGWDataSyncStatusManager* RadosStore::get_data_sync_manager(const rgw_zone_id& source_zone)
{
return rados->get_data_sync_manager(source_zone);
}
int RadosStore::read_all_usage(const DoutPrefixProvider *dpp, uint64_t start_epoch, uint64_t end_epoch,
uint32_t max_entries, bool* is_truncated,
RGWUsageIter& usage_iter,
map<rgw_user_bucket, rgw_usage_log_entry>& usage)
{
rgw_user uid;
std::string bucket_name;
return rados->read_usage(dpp, uid, bucket_name, start_epoch, end_epoch, max_entries,
is_truncated, usage_iter, usage);
}
int RadosStore::trim_all_usage(const DoutPrefixProvider *dpp, uint64_t start_epoch, uint64_t end_epoch, optional_yield y)
{
rgw_user uid;
std::string bucket_name;
return rados->trim_usage(dpp, uid, bucket_name, start_epoch, end_epoch, y);
}
int RadosStore::get_config_key_val(std::string name, bufferlist* bl)
{
return svc()->config_key->get(name, true, bl);
}
int RadosStore::meta_list_keys_init(const DoutPrefixProvider *dpp, const std::string& section, const std::string& marker, void** phandle)
{
return ctl()->meta.mgr->list_keys_init(dpp, section, marker, phandle);
}
int RadosStore::meta_list_keys_next(const DoutPrefixProvider *dpp, void* handle, int max, list<std::string>& keys, bool* truncated)
{
return ctl()->meta.mgr->list_keys_next(dpp, handle, max, keys, truncated);
}
void RadosStore::meta_list_keys_complete(void* handle)
{
ctl()->meta.mgr->list_keys_complete(handle);
}
std::string RadosStore::meta_get_marker(void* handle)
{
return ctl()->meta.mgr->get_marker(handle);
}
int RadosStore::meta_remove(const DoutPrefixProvider* dpp, std::string& metadata_key, optional_yield y)
{
return ctl()->meta.mgr->remove(metadata_key, y, dpp);
}
void RadosStore::finalize(void)
{
if (rados)
rados->finalize();
}
void RadosStore::register_admin_apis(RGWRESTMgr* mgr)
{
mgr->register_resource("user", new RGWRESTMgr_User);
mgr->register_resource("bucket", new RGWRESTMgr_Bucket);
/*Registering resource for /admin/metadata */
mgr->register_resource("metadata", new RGWRESTMgr_Metadata);
mgr->register_resource("log", new RGWRESTMgr_Log);
/* XXX These may become global when cbodley is done with his zone work */
mgr->register_resource("config", new RGWRESTMgr_Config);
mgr->register_resource("realm", new RGWRESTMgr_Realm);
mgr->register_resource("ratelimit", new RGWRESTMgr_Ratelimit);
}
std::unique_ptr<LuaManager> RadosStore::get_lua_manager()
{
return std::make_unique<RadosLuaManager>(this);
}
std::unique_ptr<RGWRole> RadosStore::get_role(std::string name,
std::string tenant,
std::string path,
std::string trust_policy,
std::string max_session_duration_str,
std::multimap<std::string,std::string> tags)
{
return std::make_unique<RadosRole>(this, name, tenant, path, trust_policy, max_session_duration_str, tags);
}
std::unique_ptr<RGWRole> RadosStore::get_role(std::string id)
{
return std::make_unique<RadosRole>(this, id);
}
std::unique_ptr<RGWRole> RadosStore::get_role(const RGWRoleInfo& info)
{
return std::make_unique<RadosRole>(this, info);
}
int RadosStore::get_roles(const DoutPrefixProvider *dpp,
optional_yield y,
const std::string& path_prefix,
const std::string& tenant,
vector<std::unique_ptr<RGWRole>>& roles)
{
auto pool = svc()->zone->get_zone_params().roles_pool;
std::string prefix;
// List all roles if path prefix is empty
if (! path_prefix.empty()) {
prefix = tenant + RGWRole::role_path_oid_prefix + path_prefix;
} else {
prefix = tenant + RGWRole::role_path_oid_prefix;
}
//Get the filtered objects
list<std::string> result;
bool is_truncated;
RGWListRawObjsCtx ctx;
do {
list<std::string> oids;
int r = rados->list_raw_objects(dpp, pool, prefix, 1000, ctx, oids, &is_truncated);
if (r < 0) {
ldpp_dout(dpp, 0) << "ERROR: listing filtered objects failed: "
<< prefix << ": " << cpp_strerror(-r) << dendl;
return r;
}
for (const auto& iter : oids) {
result.push_back(iter.substr(RGWRole::role_path_oid_prefix.size()));
}
} while (is_truncated);
for (const auto& it : result) {
//Find the role oid prefix from the end
size_t pos = it.rfind(RGWRole::role_oid_prefix);
if (pos == std::string::npos) {
continue;
}
// Split the result into path and info_oid + id
std::string path = it.substr(0, pos);
/*Make sure that prefix is part of path (False results could've been returned)
because of the role info oid + id appended to the path)*/
if(path_prefix.empty() || path.find(path_prefix) != std::string::npos) {
//Get id from info oid prefix + id
std::string id = it.substr(pos + RGWRole::role_oid_prefix.length());
std::unique_ptr<rgw::sal::RGWRole> role = get_role(id);
int ret = role->read_info(dpp, y);
if (ret < 0) {
return ret;
}
roles.push_back(std::move(role));
}
}
return 0;
}
std::unique_ptr<RGWOIDCProvider> RadosStore::get_oidc_provider()
{
return std::make_unique<RadosOIDCProvider>(this);
}
int RadosStore::get_oidc_providers(const DoutPrefixProvider *dpp,
const std::string& tenant,
vector<std::unique_ptr<RGWOIDCProvider>>& providers, optional_yield y)
{
std::string prefix = tenant + RGWOIDCProvider::oidc_url_oid_prefix;
auto pool = svc()->zone->get_zone_params().oidc_pool;
//Get the filtered objects
list<std::string> result;
bool is_truncated;
RGWListRawObjsCtx ctx;
do {
list<std::string> oids;
int r = rados->list_raw_objects(dpp, pool, prefix, 1000, ctx, oids, &is_truncated);
if (r < 0) {
ldpp_dout(dpp, 0) << "ERROR: listing filtered objects failed: OIDC pool: "
<< pool.name << ": " << prefix << ": " << cpp_strerror(-r) << dendl;
return r;
}
for (const auto& iter : oids) {
std::unique_ptr<rgw::sal::RGWOIDCProvider> provider = get_oidc_provider();
bufferlist bl;
r = rgw_get_system_obj(svc()->sysobj, pool, iter, bl, nullptr, nullptr, y, dpp);
if (r < 0) {
return r;
}
try {
using ceph::decode;
auto iter = bl.cbegin();
decode(*provider, iter);
} catch (buffer::error& err) {
ldpp_dout(dpp, 0) << "ERROR: failed to decode oidc provider info from pool: "
<< pool.name << ": " << iter << dendl;
return -EIO;
}
providers.push_back(std::move(provider));
}
} while (is_truncated);
return 0;
}
std::unique_ptr<Writer> RadosStore::get_append_writer(const DoutPrefixProvider *dpp,
optional_yield y,
rgw::sal::Object* obj,
const rgw_user& owner,
const rgw_placement_rule *ptail_placement_rule,
const std::string& unique_tag,
uint64_t position,
uint64_t *cur_accounted_size)
{
RGWBucketInfo& bucket_info = obj->get_bucket()->get_info();
RGWObjectCtx& obj_ctx = static_cast<RadosObject*>(obj)->get_ctx();
auto aio = rgw::make_throttle(ctx()->_conf->rgw_put_obj_min_window_size, y);
return std::make_unique<RadosAppendWriter>(dpp, y,
bucket_info, obj_ctx, obj->get_obj(),
this, std::move(aio), owner,
ptail_placement_rule,
unique_tag, position,
cur_accounted_size);
}
std::unique_ptr<Writer> RadosStore::get_atomic_writer(const DoutPrefixProvider *dpp,
optional_yield y,
rgw::sal::Object* obj,
const rgw_user& owner,
const rgw_placement_rule *ptail_placement_rule,
uint64_t olh_epoch,
const std::string& unique_tag)
{
RGWBucketInfo& bucket_info = obj->get_bucket()->get_info();
RGWObjectCtx& obj_ctx = static_cast<RadosObject*>(obj)->get_ctx();
auto aio = rgw::make_throttle(ctx()->_conf->rgw_put_obj_min_window_size, y);
return std::make_unique<RadosAtomicWriter>(dpp, y,
bucket_info, obj_ctx, obj->get_obj(),
this, std::move(aio), owner,
ptail_placement_rule,
olh_epoch, unique_tag);
}
const std::string& RadosStore::get_compression_type(const rgw_placement_rule& rule)
{
return svc()->zone->get_zone_params().get_compression_type(rule);
}
bool RadosStore::valid_placement(const rgw_placement_rule& rule)
{
return svc()->zone->get_zone_params().valid_placement(rule);
}
int RadosStore::get_obj_head_ioctx(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, const rgw_obj& obj, librados::IoCtx* ioctx)
{
return rados->get_obj_head_ioctx(dpp, bucket_info, obj, ioctx);
}
RadosObject::~RadosObject()
{
if (rados_ctx_owned)
delete rados_ctx;
}
int RadosObject::get_obj_state(const DoutPrefixProvider* dpp, RGWObjState **pstate, optional_yield y, bool follow_olh)
{
int ret = store->getRados()->get_obj_state(dpp, rados_ctx, bucket->get_info(), get_obj(), pstate, &manifest, follow_olh, y);
if (ret < 0) {
return ret;
}
/* Don't overwrite obj, atomic, or prefetch */
rgw_obj obj = get_obj();
bool is_atomic = state.is_atomic;
bool prefetch_data = state.prefetch_data;
state = **pstate;
state.obj = obj;
state.is_atomic = is_atomic;
state.prefetch_data = prefetch_data;
return ret;
}
int RadosObject::read_attrs(const DoutPrefixProvider* dpp, RGWRados::Object::Read &read_op, optional_yield y, rgw_obj* target_obj)
{
read_op.params.attrs = &state.attrset;
read_op.params.target_obj = target_obj;
read_op.params.obj_size = &state.size;
read_op.params.lastmod = &state.mtime;
return read_op.prepare(y, dpp);
}
int RadosObject::set_obj_attrs(const DoutPrefixProvider* dpp, Attrs* setattrs, Attrs* delattrs, optional_yield y)
{
Attrs empty;
return store->getRados()->set_attrs(dpp, rados_ctx,
bucket->get_info(),
get_obj(),
setattrs ? *setattrs : empty,
delattrs ? delattrs : nullptr,
y);
}
int RadosObject::get_obj_attrs(optional_yield y, const DoutPrefixProvider* dpp, rgw_obj* target_obj)
{
RGWRados::Object op_target(store->getRados(), bucket->get_info(), *rados_ctx, get_obj());
RGWRados::Object::Read read_op(&op_target);
return read_attrs(dpp, read_op, y, target_obj);
}
int RadosObject::modify_obj_attrs(const char* attr_name, bufferlist& attr_val, optional_yield y, const DoutPrefixProvider* dpp)
{
rgw_obj target = get_obj();
rgw_obj save = get_obj();
int r = get_obj_attrs(y, dpp, &target);
if (r < 0) {
return r;
}
/* Temporarily set target */
state.obj = target;
set_atomic();
state.attrset[attr_name] = attr_val;
r = set_obj_attrs(dpp, &state.attrset, nullptr, y);
/* Restore target */
state.obj = save;
return r;
}
int RadosObject::delete_obj_attrs(const DoutPrefixProvider* dpp, const char* attr_name, optional_yield y)
{
Attrs rmattr;
bufferlist bl;
set_atomic();
rmattr[attr_name] = bl;
return set_obj_attrs(dpp, nullptr, &rmattr, y);
}
bool RadosObject::is_expired() {
auto iter = state.attrset.find(RGW_ATTR_DELETE_AT);
if (iter == state.attrset.end()) {
return false;
}
utime_t delete_at;
try {
auto bufit = iter->second.cbegin();
decode(delete_at, bufit);
} catch (buffer::error& err) {
ldout(store->ctx(), 0) << "ERROR: " << __func__ << ": failed to decode " RGW_ATTR_DELETE_AT " attr" << dendl;
return false;
}
return delete_at <= ceph_clock_now() && !delete_at.is_zero();
}
void RadosObject::gen_rand_obj_instance_name()
{
store->getRados()->gen_rand_obj_instance_name(&state.obj.key);
}
void RadosObject::raw_obj_to_obj(const rgw_raw_obj& raw_obj)
{
rgw_obj tobj = get_obj();
RGWSI_Tier_RADOS::raw_obj_to_obj(get_bucket()->get_key(), raw_obj, &tobj);
set_key(tobj.key);
}
void RadosObject::get_raw_obj(rgw_raw_obj* raw_obj)
{
store->getRados()->obj_to_raw((bucket->get_info()).placement_rule, get_obj(), raw_obj);
}
int RadosObject::get_torrent_info(const DoutPrefixProvider* dpp,
optional_yield y, bufferlist& bl)
{
// try to read torrent info from attr
int ret = StoreObject::get_torrent_info(dpp, y, bl);
if (ret >= 0) {
return ret;
}
// try falling back to old torrent info stored in omap
rgw_raw_obj raw_obj;
get_raw_obj(&raw_obj);
rgw_rados_ref ref;
ret = store->getRados()->get_raw_obj_ref(dpp, raw_obj, &ref);
if (ret < 0) {
return ret;
}
const std::set<std::string> keys = {"rgw.torrent"};
std::map<std::string, bufferlist> result;
librados::ObjectReadOperation op;
op.omap_get_vals_by_keys(keys, &result, nullptr);
ret = rgw_rados_operate(dpp, ref.pool.ioctx(), ref.obj.oid, &op, nullptr, y);
if (ret < 0) {
return ret;
}
if (result.empty()) { // omap key not found
return -ENOENT;
}
bl = std::move(result.begin()->second);
return 0;
}
int RadosObject::omap_get_vals_by_keys(const DoutPrefixProvider *dpp, const std::string& oid,
const std::set<std::string>& keys,
Attrs* vals)
{
int ret;
rgw_raw_obj head_obj;
librados::IoCtx cur_ioctx;
rgw_obj obj = get_obj();
store->getRados()->obj_to_raw(bucket->get_placement_rule(), obj, &head_obj);
ret = store->get_obj_head_ioctx(dpp, bucket->get_info(), obj, &cur_ioctx);
if (ret < 0) {
return ret;
}
return cur_ioctx.omap_get_vals_by_keys(oid, keys, vals);
}
int RadosObject::omap_set_val_by_key(const DoutPrefixProvider *dpp, const std::string& key, bufferlist& val,
bool must_exist, optional_yield y)
{
rgw_raw_obj raw_meta_obj;
rgw_obj obj = get_obj();
store->getRados()->obj_to_raw(bucket->get_placement_rule(), obj, &raw_meta_obj);
auto sysobj = store->svc()->sysobj->get_obj(raw_meta_obj);
return sysobj.omap().set_must_exist(must_exist).set(dpp, key, val, y);
}
int RadosObject::chown(User& new_user, const DoutPrefixProvider* dpp, optional_yield y)
{
int r = get_obj_attrs(y, dpp);
if (r < 0) {
ldpp_dout(dpp, 0) << "ERROR: failed to read object attrs " << get_name() << cpp_strerror(-r) << dendl;
return r;
}
const auto& aiter = get_attrs().find(RGW_ATTR_ACL);
if (aiter == get_attrs().end()) {
ldpp_dout(dpp, 0) << "ERROR: no acls found for object " << get_name() << dendl;
return -EINVAL;
}
bufferlist& bl = aiter->second;
RGWAccessControlPolicy policy(store->ctx());
ACLOwner owner;
auto bliter = bl.cbegin();
try {
policy.decode(bliter);
owner = policy.get_owner();
} catch (buffer::error& err) {
ldpp_dout(dpp, 0) << "ERROR: decode policy failed" << err.what()
<< dendl;
return -EIO;
}
//Get the ACL from the policy
RGWAccessControlList& acl = policy.get_acl();
//Remove grant that is set to old owner
acl.remove_canon_user_grant(owner.get_id());
//Create a grant and add grant
ACLGrant grant;
grant.set_canon(new_user.get_id(), new_user.get_display_name(), RGW_PERM_FULL_CONTROL);
acl.add_grant(&grant);
//Update the ACL owner to the new user
owner.set_id(new_user.get_id());
owner.set_name(new_user.get_display_name());
policy.set_owner(owner);
bl.clear();
encode(policy, bl);
set_atomic();
map<string, bufferlist> attrs;
attrs[RGW_ATTR_ACL] = bl;
r = set_obj_attrs(dpp, &attrs, nullptr, y);
if (r < 0) {
ldpp_dout(dpp, 0) << "ERROR: modify attr failed " << cpp_strerror(-r) << dendl;
return r;
}
return 0;
}
std::unique_ptr<MPSerializer> RadosObject::get_serializer(const DoutPrefixProvider *dpp, const std::string& lock_name)
{
return std::make_unique<MPRadosSerializer>(dpp, store, this, lock_name);
}
int RadosObject::transition(Bucket* bucket,
const rgw_placement_rule& placement_rule,
const real_time& mtime,
uint64_t olh_epoch,
const DoutPrefixProvider* dpp,
optional_yield y)
{
return store->getRados()->transition_obj(*rados_ctx, bucket->get_info(), get_obj(), placement_rule, mtime, olh_epoch, dpp, y);
}
int RadosObject::transition_to_cloud(Bucket* bucket,
rgw::sal::PlacementTier* tier,
rgw_bucket_dir_entry& o,
std::set<std::string>& cloud_targets,
CephContext* cct,
bool update_object,
const DoutPrefixProvider* dpp,
optional_yield y)
{
/* init */
rgw::sal::RadosPlacementTier* rtier = static_cast<rgw::sal::RadosPlacementTier*>(tier);
string id = "cloudid";
string endpoint = rtier->get_rt().t.s3.endpoint;
RGWAccessKey key = rtier->get_rt().t.s3.key;
string region = rtier->get_rt().t.s3.region;
HostStyle host_style = rtier->get_rt().t.s3.host_style;
string bucket_name = rtier->get_rt().t.s3.target_path;
const rgw::sal::ZoneGroup& zonegroup = store->get_zone()->get_zonegroup();
if (bucket_name.empty()) {
bucket_name = "rgwx-" + zonegroup.get_name() + "-" + tier->get_storage_class() +
"-cloud-bucket";
boost::algorithm::to_lower(bucket_name);
}
/* Create RGW REST connection */
S3RESTConn conn(cct, id, { endpoint }, key, zonegroup.get_id(), region, host_style);
RGWLCCloudTierCtx tier_ctx(cct, dpp, o, store, bucket->get_info(),
this, conn, bucket_name,
rtier->get_rt().t.s3.target_storage_class);
tier_ctx.acl_mappings = rtier->get_rt().t.s3.acl_mappings;
tier_ctx.multipart_min_part_size = rtier->get_rt().t.s3.multipart_min_part_size;
tier_ctx.multipart_sync_threshold = rtier->get_rt().t.s3.multipart_sync_threshold;
tier_ctx.storage_class = tier->get_storage_class();
ldpp_dout(dpp, 0) << "Transitioning object(" << o.key << ") to the cloud endpoint(" << endpoint << ")" << dendl;
/* Transition object to cloud end point */
int ret = rgw_cloud_tier_transfer_object(tier_ctx, cloud_targets);
if (ret < 0) {
ldpp_dout(dpp, 0) << "ERROR: failed to transfer object(" << o.key << ") to the cloud endpoint(" << endpoint << ") ret=" << ret << dendl;
return ret;
}
if (update_object) {
real_time read_mtime;
std::unique_ptr<rgw::sal::Object::ReadOp> read_op(get_read_op());
read_op->params.lastmod = &read_mtime;
ret = read_op->prepare(y, dpp);
if (ret < 0) {
ldpp_dout(dpp, 0) << "ERROR: Updating tier object(" << o.key << ") failed ret=" << ret << dendl;
return ret;
}
if (read_mtime != tier_ctx.o.meta.mtime) {
/* raced */
ldpp_dout(dpp, 0) << "ERROR: Updating tier object(" << o.key << ") failed ret=" << -ECANCELED << dendl;
return -ECANCELED;
}
rgw_placement_rule target_placement;
target_placement.inherit_from(tier_ctx.bucket_info.placement_rule);
target_placement.storage_class = tier->get_storage_class();
ret = write_cloud_tier(dpp, y, tier_ctx.o.versioned_epoch,
tier, tier_ctx.is_multipart_upload,
target_placement, tier_ctx.obj);
}
return ret;
}
int RadosObject::write_cloud_tier(const DoutPrefixProvider* dpp,
optional_yield y,
uint64_t olh_epoch,
PlacementTier* tier,
bool is_multipart_upload,
rgw_placement_rule& target_placement,
Object* head_obj)
{
rgw::sal::RadosPlacementTier* rtier = static_cast<rgw::sal::RadosPlacementTier*>(tier);
map<string, bufferlist> attrs = get_attrs();
RGWRados::Object op_target(store->getRados(), bucket->get_info(), *rados_ctx, get_obj());
RGWRados::Object::Write obj_op(&op_target);
obj_op.meta.modify_tail = true;
obj_op.meta.flags = PUT_OBJ_CREATE;
obj_op.meta.category = RGWObjCategory::CloudTiered;
obj_op.meta.delete_at = real_time();
bufferlist blo;
obj_op.meta.data = &blo;
obj_op.meta.if_match = NULL;
obj_op.meta.if_nomatch = NULL;
obj_op.meta.user_data = NULL;
obj_op.meta.zones_trace = NULL;
obj_op.meta.delete_at = real_time();
obj_op.meta.olh_epoch = olh_epoch;
RGWObjManifest *pmanifest;
RGWObjManifest manifest;
pmanifest = &manifest;
RGWObjTier tier_config;
tier_config.name = tier->get_storage_class();
tier_config.tier_placement = rtier->get_rt();
tier_config.is_multipart_upload = is_multipart_upload;
pmanifest->set_tier_type("cloud-s3");
pmanifest->set_tier_config(tier_config);
/* check if its necessary */
pmanifest->set_head(target_placement, head_obj->get_obj(), 0);
pmanifest->set_tail_placement(target_placement, head_obj->get_obj().bucket);
pmanifest->set_obj_size(0);
obj_op.meta.manifest = pmanifest;
/* update storage class */
bufferlist bl;
bl.append(tier->get_storage_class());
attrs[RGW_ATTR_STORAGE_CLASS] = bl;
attrs.erase(RGW_ATTR_ID_TAG);
attrs.erase(RGW_ATTR_TAIL_TAG);
return obj_op.write_meta(dpp, 0, 0, attrs, y);
}
int RadosObject::get_max_chunk_size(const DoutPrefixProvider* dpp, rgw_placement_rule placement_rule, uint64_t* max_chunk_size, uint64_t* alignment)
{
return store->getRados()->get_max_chunk_size(placement_rule, get_obj(), max_chunk_size, dpp, alignment);
}
void RadosObject::get_max_aligned_size(uint64_t size, uint64_t alignment,
uint64_t* max_size)
{
store->getRados()->get_max_aligned_size(size, alignment, max_size);
}
bool RadosObject::placement_rules_match(rgw_placement_rule& r1, rgw_placement_rule& r2)
{
rgw_obj obj;
rgw_pool p1, p2;
obj = get_obj();
if (r1 == r2)
return true;
if (!store->getRados()->get_obj_data_pool(r1, obj, &p1)) {
return false;
}
if (!store->getRados()->get_obj_data_pool(r2, obj, &p2)) {
return false;
}
return p1 == p2;
}
int RadosObject::dump_obj_layout(const DoutPrefixProvider *dpp, optional_yield y, Formatter* f)
{
int ret;
RGWObjManifest *amanifest{nullptr};
rgw_raw_obj head_obj;
RGWRados::Object op_target(store->getRados(), bucket->get_info(), *rados_ctx, get_obj());
RGWRados::Object::Read parent_op(&op_target);
uint64_t obj_size;
parent_op.params.obj_size = &obj_size;
parent_op.params.attrs = &get_attrs();
ret = parent_op.prepare(y, dpp);
if (ret < 0) {
return ret;
}
head_obj = parent_op.state.head_obj;
ret = op_target.get_manifest(dpp, &amanifest, y);
if (ret < 0) {
return ret;
}
::encode_json("head", head_obj, f);
::encode_json("manifest", *amanifest, f);
f->open_array_section("data_location");
for (auto miter = amanifest->obj_begin(dpp); miter != amanifest->obj_end(dpp); ++miter) {
f->open_object_section("obj");
rgw_raw_obj raw_loc = miter.get_location().get_raw_obj(store->getRados());
uint64_t ofs = miter.get_ofs();
uint64_t left = amanifest->get_obj_size() - ofs;
::encode_json("ofs", miter.get_ofs(), f);
::encode_json("loc", raw_loc, f);
::encode_json("loc_ofs", miter.location_ofs(), f);
uint64_t loc_size = miter.get_stripe_size();
if (loc_size > left) {
loc_size = left;
}
::encode_json("loc_size", loc_size, f);
f->close_section();
}
f->close_section();
return 0;
}
std::unique_ptr<Object::ReadOp> RadosObject::get_read_op()
{
return std::make_unique<RadosObject::RadosReadOp>(this, rados_ctx);
}
RadosObject::RadosReadOp::RadosReadOp(RadosObject *_source, RGWObjectCtx *_rctx) :
source(_source),
rctx(_rctx),
op_target(_source->store->getRados(),
_source->get_bucket()->get_info(),
*static_cast<RGWObjectCtx *>(rctx),
_source->get_obj()),
parent_op(&op_target)
{ }
int RadosObject::RadosReadOp::prepare(optional_yield y, const DoutPrefixProvider* dpp)
{
uint64_t obj_size;
parent_op.conds.mod_ptr = params.mod_ptr;
parent_op.conds.unmod_ptr = params.unmod_ptr;
parent_op.conds.high_precision_time = params.high_precision_time;
parent_op.conds.mod_zone_id = params.mod_zone_id;
parent_op.conds.mod_pg_ver = params.mod_pg_ver;
parent_op.conds.if_match = params.if_match;
parent_op.conds.if_nomatch = params.if_nomatch;
parent_op.params.lastmod = params.lastmod;
parent_op.params.target_obj = params.target_obj;
parent_op.params.obj_size = &obj_size;
parent_op.params.attrs = &source->get_attrs();
int ret = parent_op.prepare(y, dpp);
if (ret < 0)
return ret;
source->set_key(parent_op.state.obj.key);
source->set_obj_size(obj_size);
return ret;
}
int RadosObject::RadosReadOp::read(int64_t ofs, int64_t end, bufferlist& bl, optional_yield y, const DoutPrefixProvider* dpp)
{
return parent_op.read(ofs, end, bl, y, dpp);
}
int RadosObject::RadosReadOp::get_attr(const DoutPrefixProvider* dpp, const char* name, bufferlist& dest, optional_yield y)
{
return parent_op.get_attr(dpp, name, dest, y);
}
std::unique_ptr<Object::DeleteOp> RadosObject::get_delete_op()
{
return std::make_unique<RadosObject::RadosDeleteOp>(this);
}
RadosObject::RadosDeleteOp::RadosDeleteOp(RadosObject *_source) :
source(_source),
op_target(_source->store->getRados(),
_source->get_bucket()->get_info(),
_source->get_ctx(),
_source->get_obj()),
parent_op(&op_target)
{ }
int RadosObject::RadosDeleteOp::delete_obj(const DoutPrefixProvider* dpp, optional_yield y)
{
parent_op.params.bucket_owner = params.bucket_owner.get_id();
parent_op.params.versioning_status = params.versioning_status;
parent_op.params.obj_owner = params.obj_owner;
parent_op.params.olh_epoch = params.olh_epoch;
parent_op.params.marker_version_id = params.marker_version_id;
parent_op.params.bilog_flags = params.bilog_flags;
parent_op.params.remove_objs = params.remove_objs;
parent_op.params.expiration_time = params.expiration_time;
parent_op.params.unmod_since = params.unmod_since;
parent_op.params.mtime = params.mtime;
parent_op.params.high_precision_time = params.high_precision_time;
parent_op.params.zones_trace = params.zones_trace;
parent_op.params.abortmp = params.abortmp;
parent_op.params.parts_accounted_size = params.parts_accounted_size;
int ret = parent_op.delete_obj(y, dpp);
if (ret < 0)
return ret;
result.delete_marker = parent_op.result.delete_marker;
result.version_id = parent_op.result.version_id;
return ret;
}
int RadosObject::delete_object(const DoutPrefixProvider* dpp,
optional_yield y,
bool prevent_versioning)
{
RGWRados::Object del_target(store->getRados(), bucket->get_info(), *rados_ctx, get_obj());
RGWRados::Object::Delete del_op(&del_target);
del_op.params.bucket_owner = bucket->get_info().owner;
del_op.params.versioning_status = prevent_versioning ? 0 : bucket->get_info().versioning_status();
return del_op.delete_obj(y, dpp);
}
int RadosObject::copy_object(User* user,
req_info* info,
const rgw_zone_id& source_zone,
rgw::sal::Object* dest_object,
rgw::sal::Bucket* dest_bucket,
rgw::sal::Bucket* src_bucket,
const rgw_placement_rule& dest_placement,
ceph::real_time* src_mtime,
ceph::real_time* mtime,
const ceph::real_time* mod_ptr,
const ceph::real_time* unmod_ptr,
bool high_precision_time,
const char* if_match,
const char* if_nomatch,
AttrsMod attrs_mod,
bool copy_if_newer,
Attrs& attrs,
RGWObjCategory category,
uint64_t olh_epoch,
boost::optional<ceph::real_time> delete_at,
std::string* version_id,
std::string* tag,
std::string* etag,
void (*progress_cb)(off_t, void *),
void* progress_data,
const DoutPrefixProvider* dpp,
optional_yield y)
{
return store->getRados()->copy_obj(*rados_ctx,
user->get_id(),
info,
source_zone,
dest_object->get_obj(),
get_obj(),
dest_bucket->get_info(),
src_bucket->get_info(),
dest_placement,
src_mtime,
mtime,
mod_ptr,
unmod_ptr,
high_precision_time,
if_match,
if_nomatch,
static_cast<RGWRados::AttrsMod>(attrs_mod),
copy_if_newer,
attrs,
category,
olh_epoch,
(delete_at ? *delete_at : real_time()),
version_id,
tag,
etag,
progress_cb,
progress_data,
dpp,
y);
}
int RadosObject::RadosReadOp::iterate(const DoutPrefixProvider* dpp, int64_t ofs, int64_t end, RGWGetDataCB* cb, optional_yield y)
{
return parent_op.iterate(dpp, ofs, end, cb, y);
}
int RadosObject::swift_versioning_restore(bool& restored,
const DoutPrefixProvider* dpp, optional_yield y)
{
rgw_obj obj = get_obj();
return store->getRados()->swift_versioning_restore(*rados_ctx,
bucket->get_owner()->get_id(),
bucket->get_info(),
obj,
restored,
dpp, y);
}
int RadosObject::swift_versioning_copy(const DoutPrefixProvider* dpp, optional_yield y)
{
return store->getRados()->swift_versioning_copy(*rados_ctx,
bucket->get_info().owner,
bucket->get_info(),
get_obj(),
dpp,
y);
}
int RadosMultipartUpload::cleanup_part_history(const DoutPrefixProvider* dpp,
optional_yield y,
RadosMultipartPart *part,
list<rgw_obj_index_key>& remove_objs)
{
cls_rgw_obj_chain chain;
for (auto& ppfx : part->get_past_prefixes()) {
rgw_obj past_obj;
past_obj.init_ns(bucket->get_key(), ppfx + "." + std::to_string(part->info.num), mp_ns);
rgw_obj_index_key past_key;
past_obj.key.get_index_key(&past_key);
// Remove past upload part objects from index, too.
remove_objs.push_back(past_key);
RGWObjManifest manifest = part->get_manifest();
manifest.set_prefix(ppfx);
RGWObjManifest::obj_iterator miter = manifest.obj_begin(dpp);
for (; miter != manifest.obj_end(dpp); ++miter) {
rgw_raw_obj raw_part_obj = miter.get_location().get_raw_obj(store->getRados());
cls_rgw_obj_key part_key(raw_part_obj.oid);
chain.push_obj(raw_part_obj.pool.to_str(), part_key, raw_part_obj.loc);
}
}
if (store->getRados()->get_gc() == nullptr) {
// Delete objects inline if gc hasn't been initialised (in case when bypass gc is specified)
store->getRados()->delete_objs_inline(dpp, chain, mp_obj.get_upload_id());
} else {
// use upload id as tag and do it synchronously
auto [ret, leftover_chain] = store->getRados()->send_chain_to_gc(chain, mp_obj.get_upload_id(), y);
if (ret < 0 && leftover_chain) {
ldpp_dout(dpp, 5) << __func__ << ": gc->send_chain() returned " << ret << dendl;
if (ret == -ENOENT) {
return -ERR_NO_SUCH_UPLOAD;
}
// Delete objects inline if send chain to gc fails
store->getRados()->delete_objs_inline(dpp, *leftover_chain, mp_obj.get_upload_id());
}
}
return 0;
}
int RadosMultipartUpload::abort(const DoutPrefixProvider *dpp, CephContext *cct, optional_yield y)
{
std::unique_ptr<rgw::sal::Object> meta_obj = get_meta_obj();
meta_obj->set_in_extra_data(true);
meta_obj->set_hash_source(mp_obj.get_key());
cls_rgw_obj_chain chain;
list<rgw_obj_index_key> remove_objs;
bool truncated;
int marker = 0;
int ret;
uint64_t parts_accounted_size = 0;
do {
ret = list_parts(dpp, cct, 1000, marker, &marker, &truncated, y);
if (ret < 0) {
ldpp_dout(dpp, 20) << __func__ << ": RadosMultipartUpload::list_parts returned " <<
ret << dendl;
return (ret == -ENOENT) ? -ERR_NO_SUCH_UPLOAD : ret;
}
for (auto part_it = parts.begin();
part_it != parts.end();
++part_it) {
RadosMultipartPart* obj_part = dynamic_cast<RadosMultipartPart*>(part_it->second.get());
if (obj_part->info.manifest.empty()) {
std::unique_ptr<rgw::sal::Object> obj = bucket->get_object(
rgw_obj_key(obj_part->oid, std::string(), RGW_OBJ_NS_MULTIPART));
obj->set_hash_source(mp_obj.get_key());
ret = obj->delete_object(dpp, y);
if (ret < 0 && ret != -ENOENT)
return ret;
} else {
auto target = meta_obj->get_obj();
store->getRados()->update_gc_chain(dpp, target, obj_part->info.manifest, &chain);
RGWObjManifest::obj_iterator oiter = obj_part->info.manifest.obj_begin(dpp);
if (oiter != obj_part->info.manifest.obj_end(dpp)) {
std::unique_ptr<rgw::sal::Object> head = bucket->get_object(rgw_obj_key());
rgw_raw_obj raw_head = oiter.get_location().get_raw_obj(store->getRados());
dynamic_cast<rgw::sal::RadosObject*>(head.get())->raw_obj_to_obj(raw_head);
rgw_obj_index_key key;
head->get_key().get_index_key(&key);
remove_objs.push_back(key);
cleanup_part_history(dpp, null_yield, obj_part, remove_objs);
}
}
parts_accounted_size += obj_part->info.accounted_size;
}
} while (truncated);
if (store->getRados()->get_gc() == nullptr) {
//Delete objects inline if gc hasn't been initialised (in case when bypass gc is specified)
store->getRados()->delete_objs_inline(dpp, chain, mp_obj.get_upload_id());
} else {
/* use upload id as tag and do it synchronously */
auto [ret, leftover_chain] = store->getRados()->send_chain_to_gc(chain, mp_obj.get_upload_id(), y);
if (ret < 0 && leftover_chain) {
ldpp_dout(dpp, 5) << __func__ << ": gc->send_chain() returned " << ret << dendl;
if (ret == -ENOENT) {
return -ERR_NO_SUCH_UPLOAD;
}
//Delete objects inline if send chain to gc fails
store->getRados()->delete_objs_inline(dpp, *leftover_chain, mp_obj.get_upload_id());
}
}
std::unique_ptr<rgw::sal::Object::DeleteOp> del_op = meta_obj->get_delete_op();
del_op->params.bucket_owner = bucket->get_acl_owner();
del_op->params.versioning_status = 0;
if (!remove_objs.empty()) {
del_op->params.remove_objs = &remove_objs;
}
del_op->params.abortmp = true;
del_op->params.parts_accounted_size = parts_accounted_size;
// and also remove the metadata obj
ret = del_op->delete_obj(dpp, y);
if (ret < 0) {
ldpp_dout(dpp, 20) << __func__ << ": del_op.delete_obj returned " <<
ret << dendl;
}
return (ret == -ENOENT) ? -ERR_NO_SUCH_UPLOAD : ret;
}
std::unique_ptr<rgw::sal::Object> RadosMultipartUpload::get_meta_obj()
{
return bucket->get_object(rgw_obj_key(get_meta(), string(), mp_ns));
}
int RadosMultipartUpload::init(const DoutPrefixProvider *dpp, optional_yield y, ACLOwner& owner, rgw_placement_rule& dest_placement, rgw::sal::Attrs& attrs)
{
int ret;
std::string oid = mp_obj.get_key();
RGWObjectCtx obj_ctx(store);
do {
char buf[33];
string tmp_obj_name;
std::unique_ptr<rgw::sal::Object> obj;
gen_rand_alphanumeric(store->ctx(), buf, sizeof(buf) - 1);
std::string upload_id = MULTIPART_UPLOAD_ID_PREFIX; /* v2 upload id */
upload_id.append(buf);
mp_obj.init(oid, upload_id);
tmp_obj_name = mp_obj.get_meta();
obj = bucket->get_object(rgw_obj_key(tmp_obj_name, string(), mp_ns));
// the meta object will be indexed with 0 size, we c
obj->set_in_extra_data(true);
obj->set_hash_source(oid);
RGWRados::Object op_target(store->getRados(),
obj->get_bucket()->get_info(),
obj_ctx, obj->get_obj());
RGWRados::Object::Write obj_op(&op_target);
op_target.set_versioning_disabled(true); /* no versioning for multipart meta */
obj_op.meta.owner = owner.get_id();
obj_op.meta.category = RGWObjCategory::MultiMeta;
obj_op.meta.flags = PUT_OBJ_CREATE_EXCL;
obj_op.meta.mtime = &mtime;
multipart_upload_info upload_info;
upload_info.dest_placement = dest_placement;
bufferlist bl;
encode(upload_info, bl);
obj_op.meta.data = &bl;
ret = obj_op.write_meta(dpp, bl.length(), 0, attrs, y);
} while (ret == -EEXIST);
return ret;
}
int RadosMultipartUpload::list_parts(const DoutPrefixProvider *dpp, CephContext *cct,
int num_parts, int marker,
int *next_marker, bool *truncated, optional_yield y,
bool assume_unsorted)
{
map<string, bufferlist> parts_map;
map<string, bufferlist>::iterator iter;
rgw_obj_key key(get_meta(), std::string(), RGW_OBJ_NS_MULTIPART);
rgw_obj obj(bucket->get_key(), key);
obj.in_extra_data = true;
rgw_raw_obj raw_obj;
store->getRados()->obj_to_raw(bucket->get_placement_rule(), obj, &raw_obj);
auto sysobj = store->svc()->sysobj->get_obj(raw_obj);
bool sorted_omap = is_v2_upload_id(get_upload_id()) && !assume_unsorted;
parts.clear();
int ret;
if (sorted_omap) {
string p;
p = "part.";
char buf[32];
snprintf(buf, sizeof(buf), "%08d", marker);
p.append(buf);
ret = sysobj.omap().get_vals(dpp, p, num_parts + 1, &parts_map,
nullptr, y);
} else {
ret = sysobj.omap().get_all(dpp, &parts_map, y);
}
if (ret < 0) {
return ret;
}
int i;
int last_num = 0;
uint32_t expected_next = marker + 1;
for (i = 0, iter = parts_map.begin();
(i < num_parts || !sorted_omap) && iter != parts_map.end();
++iter, ++i) {
bufferlist& bl = iter->second;
auto bli = bl.cbegin();
std::unique_ptr<RadosMultipartPart> part = std::make_unique<RadosMultipartPart>();
try {
decode(part->info, bli);
} catch (buffer::error& err) {
ldpp_dout(dpp, 0) << "ERROR: could not part info, caught buffer::error" <<
dendl;
return -EIO;
}
if (sorted_omap) {
if (part->info.num != expected_next) {
/* ouch, we expected a specific part num here, but we got a
* different one. Either a part is missing, or it could be a
* case of mixed rgw versions working on the same upload,
* where one gateway doesn't support correctly sorted omap
* keys for multipart upload just assume data is unsorted.
*/
return list_parts(dpp, cct, num_parts, marker, next_marker, truncated, y, true);
}
expected_next++;
}
if (sorted_omap ||
(int)part->info.num > marker) {
last_num = part->info.num;
parts[part->info.num] = std::move(part);
}
}
if (sorted_omap) {
if (truncated) {
*truncated = (iter != parts_map.end());
}
} else {
/* rebuild a map with only num_parts entries */
std::map<uint32_t, std::unique_ptr<MultipartPart>> new_parts;
std::map<uint32_t, std::unique_ptr<MultipartPart>>::iterator piter;
for (i = 0, piter = parts.begin();
i < num_parts && piter != parts.end();
++i, ++piter) {
last_num = piter->first;
new_parts[piter->first] = std::move(piter->second);
}
if (truncated) {
*truncated = (piter != parts.end());
}
parts.swap(new_parts);
}
if (next_marker) {
*next_marker = last_num;
}
return 0;
}
int RadosMultipartUpload::complete(const DoutPrefixProvider *dpp,
optional_yield y, CephContext* cct,
map<int, string>& part_etags,
list<rgw_obj_index_key>& remove_objs,
uint64_t& accounted_size, bool& compressed,
RGWCompressionInfo& cs_info, off_t& ofs,
std::string& tag, ACLOwner& owner,
uint64_t olh_epoch,
rgw::sal::Object* target_obj)
{
char final_etag[CEPH_CRYPTO_MD5_DIGESTSIZE];
char final_etag_str[CEPH_CRYPTO_MD5_DIGESTSIZE * 2 + 16];
std::string etag;
bufferlist etag_bl;
MD5 hash;
// Allow use of MD5 digest in FIPS mode for non-cryptographic purposes
hash.SetFlags(EVP_MD_CTX_FLAG_NON_FIPS_ALLOW);
bool truncated;
int ret;
int total_parts = 0;
int handled_parts = 0;
int max_parts = 1000;
int marker = 0;
uint64_t min_part_size = cct->_conf->rgw_multipart_min_part_size;
auto etags_iter = part_etags.begin();
rgw::sal::Attrs attrs = target_obj->get_attrs();
do {
ret = list_parts(dpp, cct, max_parts, marker, &marker, &truncated, y);
if (ret == -ENOENT) {
ret = -ERR_NO_SUCH_UPLOAD;
}
if (ret < 0)
return ret;
total_parts += parts.size();
if (!truncated && total_parts != (int)part_etags.size()) {
ldpp_dout(dpp, 0) << "NOTICE: total parts mismatch: have: " << total_parts
<< " expected: " << part_etags.size() << dendl;
ret = -ERR_INVALID_PART;
return ret;
}
for (auto obj_iter = parts.begin(); etags_iter != part_etags.end() && obj_iter != parts.end(); ++etags_iter, ++obj_iter, ++handled_parts) {
RadosMultipartPart* part = dynamic_cast<rgw::sal::RadosMultipartPart*>(obj_iter->second.get());
uint64_t part_size = part->get_size();
if (handled_parts < (int)part_etags.size() - 1 &&
part_size < min_part_size) {
ret = -ERR_TOO_SMALL;
return ret;
}
char petag[CEPH_CRYPTO_MD5_DIGESTSIZE];
if (etags_iter->first != (int)obj_iter->first) {
ldpp_dout(dpp, 0) << "NOTICE: parts num mismatch: next requested: "
<< etags_iter->first << " next uploaded: "
<< obj_iter->first << dendl;
ret = -ERR_INVALID_PART;
return ret;
}
string part_etag = rgw_string_unquote(etags_iter->second);
if (part_etag.compare(part->get_etag()) != 0) {
ldpp_dout(dpp, 0) << "NOTICE: etag mismatch: part: " << etags_iter->first
<< " etag: " << etags_iter->second << dendl;
ret = -ERR_INVALID_PART;
return ret;
}
hex_to_buf(part->get_etag().c_str(), petag,
CEPH_CRYPTO_MD5_DIGESTSIZE);
hash.Update((const unsigned char *)petag, sizeof(petag));
RGWUploadPartInfo& obj_part = part->info;
/* update manifest for part */
string oid = mp_obj.get_part(part->info.num);
rgw_obj src_obj;
src_obj.init_ns(bucket->get_key(), oid, mp_ns);
if (obj_part.manifest.empty()) {
ldpp_dout(dpp, 0) << "ERROR: empty manifest for object part: obj="
<< src_obj << dendl;
ret = -ERR_INVALID_PART;
return ret;
} else {
manifest.append(dpp, obj_part.manifest, store->svc()->zone->get_zonegroup(), store->svc()->zone->get_zone_params());
auto manifest_prefix = part->info.manifest.get_prefix();
if (not manifest_prefix.empty()) {
// It has an explicit prefix. Override the default one.
src_obj.init_ns(bucket->get_key(), manifest_prefix + "." + std::to_string(part->info.num), mp_ns);
}
}
bool part_compressed = (obj_part.cs_info.compression_type != "none");
if ((handled_parts > 0) &&
((part_compressed != compressed) ||
(cs_info.compression_type != obj_part.cs_info.compression_type))) {
ldpp_dout(dpp, 0) << "ERROR: compression type was changed during multipart upload ("
<< cs_info.compression_type << ">>" << obj_part.cs_info.compression_type << ")" << dendl;
ret = -ERR_INVALID_PART;
return ret;
}
if (part_compressed) {
int64_t new_ofs; // offset in compression data for new part
if (cs_info.blocks.size() > 0)
new_ofs = cs_info.blocks.back().new_ofs + cs_info.blocks.back().len;
else
new_ofs = 0;
for (const auto& block : obj_part.cs_info.blocks) {
compression_block cb;
cb.old_ofs = block.old_ofs + cs_info.orig_size;
cb.new_ofs = new_ofs;
cb.len = block.len;
cs_info.blocks.push_back(cb);
new_ofs = cb.new_ofs + cb.len;
}
if (!compressed)
cs_info.compression_type = obj_part.cs_info.compression_type;
cs_info.orig_size += obj_part.cs_info.orig_size;
compressed = true;
}
rgw_obj_index_key remove_key;
src_obj.key.get_index_key(&remove_key);
remove_objs.push_back(remove_key);
cleanup_part_history(dpp, y, part, remove_objs);
ofs += obj_part.size;
accounted_size += obj_part.accounted_size;
}
} while (truncated);
hash.Final((unsigned char *)final_etag);
buf_to_hex((unsigned char *)final_etag, sizeof(final_etag), final_etag_str);
snprintf(&final_etag_str[CEPH_CRYPTO_MD5_DIGESTSIZE * 2],
sizeof(final_etag_str) - CEPH_CRYPTO_MD5_DIGESTSIZE * 2,
"-%lld", (long long)part_etags.size());
etag = final_etag_str;
ldpp_dout(dpp, 10) << "calculated etag: " << etag << dendl;
etag_bl.append(etag);
attrs[RGW_ATTR_ETAG] = etag_bl;
if (compressed) {
// write compression attribute to full object
bufferlist tmp;
encode(cs_info, tmp);
attrs[RGW_ATTR_COMPRESSION] = tmp;
}
target_obj->set_atomic();
RGWRados::Object op_target(store->getRados(),
target_obj->get_bucket()->get_info(),
dynamic_cast<RadosObject*>(target_obj)->get_ctx(),
target_obj->get_obj());
RGWRados::Object::Write obj_op(&op_target);
obj_op.meta.manifest = &manifest;
obj_op.meta.remove_objs = &remove_objs;
obj_op.meta.ptag = &tag; /* use req_id as operation tag */
obj_op.meta.owner = owner.get_id();
obj_op.meta.flags = PUT_OBJ_CREATE;
obj_op.meta.modify_tail = true;
obj_op.meta.completeMultipart = true;
obj_op.meta.olh_epoch = olh_epoch;
ret = obj_op.write_meta(dpp, ofs, accounted_size, attrs, y);
if (ret < 0)
return ret;
return ret;
}
int RadosMultipartUpload::get_info(const DoutPrefixProvider *dpp, optional_yield y, rgw_placement_rule** rule, rgw::sal::Attrs* attrs)
{
if (!rule && !attrs) {
return 0;
}
if (rule) {
if (!placement.empty()) {
*rule = &placement;
if (!attrs) {
/* Don't need attrs, done */
return 0;
}
} else {
*rule = nullptr;
}
}
/* We need either attributes or placement, so we need a read */
std::unique_ptr<rgw::sal::Object> meta_obj;
meta_obj = get_meta_obj();
meta_obj->set_in_extra_data(true);
multipart_upload_info upload_info;
bufferlist headbl;
/* Read the obj head which contains the multipart_upload_info */
std::unique_ptr<rgw::sal::Object::ReadOp> read_op = meta_obj->get_read_op();
meta_obj->set_prefetch_data();
int ret = read_op->prepare(y, dpp);
if (ret < 0) {
if (ret == -ENOENT) {
return -ERR_NO_SUCH_UPLOAD;
}
return ret;
}
extract_span_context(meta_obj->get_attrs(), trace_ctx);
if (attrs) {
/* Attrs are filled in by prepare */
*attrs = meta_obj->get_attrs();
if (!rule || *rule != nullptr) {
/* placement was cached; don't actually read */
return 0;
}
}
/* Now read the placement from the head */
ret = read_op->read(0, store->ctx()->_conf->rgw_max_chunk_size, headbl, y, dpp);
if (ret < 0) {
if (ret == -ENOENT) {
return -ERR_NO_SUCH_UPLOAD;
}
return ret;
}
if (headbl.length() <= 0) {
return -ERR_NO_SUCH_UPLOAD;
}
/* Decode multipart_upload_info */
auto hiter = headbl.cbegin();
try {
decode(upload_info, hiter);
} catch (buffer::error& err) {
ldpp_dout(dpp, 0) << "ERROR: failed to decode multipart upload info" << dendl;
return -EIO;
}
placement = upload_info.dest_placement;
*rule = &placement;
return 0;
}
std::unique_ptr<Writer> RadosMultipartUpload::get_writer(
const DoutPrefixProvider *dpp,
optional_yield y,
rgw::sal::Object* obj,
const rgw_user& owner,
const rgw_placement_rule *ptail_placement_rule,
uint64_t part_num,
const std::string& part_num_str)
{
RGWBucketInfo& bucket_info = obj->get_bucket()->get_info();
RGWObjectCtx& obj_ctx = static_cast<RadosObject*>(obj)->get_ctx();
auto aio = rgw::make_throttle(store->ctx()->_conf->rgw_put_obj_min_window_size, y);
return std::make_unique<RadosMultipartWriter>(dpp, y, get_upload_id(),
bucket_info, obj_ctx,
obj->get_obj(), store, std::move(aio), owner,
ptail_placement_rule, part_num, part_num_str);
}
MPRadosSerializer::MPRadosSerializer(const DoutPrefixProvider *dpp, RadosStore* store, RadosObject* obj, const std::string& lock_name) :
lock(lock_name)
{
rgw_pool meta_pool;
rgw_raw_obj raw_obj;
obj->get_raw_obj(&raw_obj);
oid = raw_obj.oid;
store->getRados()->get_obj_data_pool(obj->get_bucket()->get_placement_rule(),
obj->get_obj(), &meta_pool);
store->getRados()->open_pool_ctx(dpp, meta_pool, ioctx, true, true);
}
int MPRadosSerializer::try_lock(const DoutPrefixProvider *dpp, utime_t dur, optional_yield y)
{
op.assert_exists();
lock.set_duration(dur);
lock.lock_exclusive(&op);
int ret = rgw_rados_operate(dpp, ioctx, oid, &op, y);
if (! ret) {
locked = true;
}
return ret;
}
LCRadosSerializer::LCRadosSerializer(RadosStore* store, const std::string& _oid, const std::string& lock_name, const std::string& cookie) :
StoreLCSerializer(_oid),
lock(lock_name)
{
ioctx = &store->getRados()->lc_pool_ctx;
lock.set_cookie(cookie);
}
int LCRadosSerializer::try_lock(const DoutPrefixProvider *dpp, utime_t dur, optional_yield y)
{
lock.set_duration(dur);
return lock.lock_exclusive(ioctx, oid);
}
int RadosLifecycle::get_entry(const std::string& oid, const std::string& marker,
std::unique_ptr<LCEntry>* entry)
{
cls_rgw_lc_entry cls_entry;
int ret = cls_rgw_lc_get_entry(*store->getRados()->get_lc_pool_ctx(), oid, marker, cls_entry);
if (ret)
return ret;
LCEntry* e;
e = new StoreLCEntry(cls_entry.bucket, cls_entry.start_time, cls_entry.status);
if (!e)
return -ENOMEM;
entry->reset(e);
return 0;
}
int RadosLifecycle::get_next_entry(const std::string& oid, const std::string& marker,
std::unique_ptr<LCEntry>* entry)
{
cls_rgw_lc_entry cls_entry;
int ret = cls_rgw_lc_get_next_entry(*store->getRados()->get_lc_pool_ctx(), oid, marker,
cls_entry);
if (ret)
return ret;
LCEntry* e;
e = new StoreLCEntry(cls_entry.bucket, cls_entry.start_time, cls_entry.status);
if (!e)
return -ENOMEM;
entry->reset(e);
return 0;
}
int RadosLifecycle::set_entry(const std::string& oid, LCEntry& entry)
{
cls_rgw_lc_entry cls_entry;
cls_entry.bucket = entry.get_bucket();
cls_entry.start_time = entry.get_start_time();
cls_entry.status = entry.get_status();
return cls_rgw_lc_set_entry(*store->getRados()->get_lc_pool_ctx(), oid, cls_entry);
}
int RadosLifecycle::list_entries(const std::string& oid, const std::string& marker,
uint32_t max_entries, std::vector<std::unique_ptr<LCEntry>>& entries)
{
entries.clear();
vector<cls_rgw_lc_entry> cls_entries;
int ret = cls_rgw_lc_list(*store->getRados()->get_lc_pool_ctx(), oid, marker, max_entries, cls_entries);
if (ret < 0)
return ret;
for (auto& entry : cls_entries) {
entries.push_back(std::make_unique<StoreLCEntry>(entry.bucket, oid,
entry.start_time, entry.status));
}
return ret;
}
int RadosLifecycle::rm_entry(const std::string& oid, LCEntry& entry)
{
cls_rgw_lc_entry cls_entry;
cls_entry.bucket = entry.get_bucket();
cls_entry.start_time = entry.get_start_time();
cls_entry.status = entry.get_status();
return cls_rgw_lc_rm_entry(*store->getRados()->get_lc_pool_ctx(), oid, cls_entry);
}
int RadosLifecycle::get_head(const std::string& oid, std::unique_ptr<LCHead>* head)
{
cls_rgw_lc_obj_head cls_head;
int ret = cls_rgw_lc_get_head(*store->getRados()->get_lc_pool_ctx(), oid, cls_head);
if (ret)
return ret;
LCHead* h;
h = new StoreLCHead(cls_head.start_date, cls_head.shard_rollover_date, cls_head.marker);
if (!h)
return -ENOMEM;
head->reset(h);
return 0;
}
int RadosLifecycle::put_head(const std::string& oid, LCHead& head)
{
cls_rgw_lc_obj_head cls_head;
cls_head.marker = head.get_marker();
cls_head.start_date = head.get_start_date();
cls_head.shard_rollover_date = head.get_shard_rollover_date();
return cls_rgw_lc_put_head(*store->getRados()->get_lc_pool_ctx(), oid, cls_head);
}
std::unique_ptr<LCSerializer> RadosLifecycle::get_serializer(const std::string& lock_name,
const std::string& oid,
const std::string& cookie)
{
return std::make_unique<LCRadosSerializer>(store, oid, lock_name, cookie);
}
int RadosNotification::publish_reserve(const DoutPrefixProvider *dpp, RGWObjTags* obj_tags)
{
return rgw::notify::publish_reserve(dpp, event_type, res, obj_tags);
}
int RadosNotification::publish_commit(const DoutPrefixProvider* dpp, uint64_t size,
const ceph::real_time& mtime, const std::string& etag, const std::string& version)
{
return rgw::notify::publish_commit(obj, size, mtime, etag, version, event_type, res, dpp);
}
int RadosAtomicWriter::prepare(optional_yield y)
{
return processor.prepare(y);
}
int RadosAtomicWriter::process(bufferlist&& data, uint64_t offset)
{
return processor.process(std::move(data), offset);
}
int RadosAtomicWriter::complete(size_t accounted_size, const std::string& etag,
ceph::real_time *mtime, ceph::real_time set_mtime,
std::map<std::string, bufferlist>& attrs,
ceph::real_time delete_at,
const char *if_match, const char *if_nomatch,
const std::string *user_data,
rgw_zone_set *zones_trace, bool *canceled,
optional_yield y)
{
return processor.complete(accounted_size, etag, mtime, set_mtime, attrs, delete_at,
if_match, if_nomatch, user_data, zones_trace, canceled, y);
}
int RadosAppendWriter::prepare(optional_yield y)
{
return processor.prepare(y);
}
int RadosAppendWriter::process(bufferlist&& data, uint64_t offset)
{
return processor.process(std::move(data), offset);
}
int RadosAppendWriter::complete(size_t accounted_size, const std::string& etag,
ceph::real_time *mtime, ceph::real_time set_mtime,
std::map<std::string, bufferlist>& attrs,
ceph::real_time delete_at,
const char *if_match, const char *if_nomatch,
const std::string *user_data,
rgw_zone_set *zones_trace, bool *canceled,
optional_yield y)
{
return processor.complete(accounted_size, etag, mtime, set_mtime, attrs, delete_at,
if_match, if_nomatch, user_data, zones_trace, canceled, y);
}
int RadosMultipartWriter::prepare(optional_yield y)
{
return processor.prepare(y);
}
int RadosMultipartWriter::process(bufferlist&& data, uint64_t offset)
{
return processor.process(std::move(data), offset);
}
int RadosMultipartWriter::complete(size_t accounted_size, const std::string& etag,
ceph::real_time *mtime, ceph::real_time set_mtime,
std::map<std::string, bufferlist>& attrs,
ceph::real_time delete_at,
const char *if_match, const char *if_nomatch,
const std::string *user_data,
rgw_zone_set *zones_trace, bool *canceled,
optional_yield y)
{
return processor.complete(accounted_size, etag, mtime, set_mtime, attrs, delete_at,
if_match, if_nomatch, user_data, zones_trace, canceled, y);
}
const std::string& RadosZoneGroup::get_endpoint() const
{
if (!group.endpoints.empty()) {
return group.endpoints.front();
} else {
// use zonegroup's master zone endpoints
auto z = group.zones.find(group.master_zone);
if (z != group.zones.end() && !z->second.endpoints.empty()) {
return z->second.endpoints.front();
}
}
return empty;
}
bool RadosZoneGroup::placement_target_exists(std::string& target) const
{
return !!group.placement_targets.count(target);
}
int RadosZoneGroup::get_placement_target_names(std::set<std::string>& names) const
{
for (const auto& target : group.placement_targets) {
names.emplace(target.second.name);
}
return 0;
}
int RadosZoneGroup::get_placement_tier(const rgw_placement_rule& rule,
std::unique_ptr<PlacementTier>* tier)
{
std::map<std::string, RGWZoneGroupPlacementTarget>::const_iterator titer;
titer = group.placement_targets.find(rule.name);
if (titer == group.placement_targets.end()) {
return -ENOENT;
}
const auto& target_rule = titer->second;
std::map<std::string, RGWZoneGroupPlacementTier>::const_iterator ttier;
ttier = target_rule.tier_targets.find(rule.storage_class);
if (ttier == target_rule.tier_targets.end()) {
// not found
return -ENOENT;
}
PlacementTier* t;
t = new RadosPlacementTier(store, ttier->second);
if (!t)
return -ENOMEM;
tier->reset(t);
return 0;
}
int RadosZoneGroup::get_zone_by_id(const std::string& id, std::unique_ptr<Zone>* zone)
{
RGWZone* rz = store->svc()->zone->find_zone(id);
if (!rz)
return -ENOENT;
Zone* z = new RadosZone(store, clone(), *rz);
zone->reset(z);
return 0;
}
int RadosZoneGroup::get_zone_by_name(const std::string& name, std::unique_ptr<Zone>* zone)
{
rgw_zone_id id;
int ret = store->svc()->zone->find_zone_id_by_name(name, &id);
if (ret < 0)
return ret;
RGWZone* rz = store->svc()->zone->find_zone(id.id);
if (!rz)
return -ENOENT;
Zone* z = new RadosZone(store, clone(), *rz);
zone->reset(z);
return 0;
}
int RadosZoneGroup::list_zones(std::list<std::string>& zone_ids)
{
for (const auto& entry : group.zones)
{
zone_ids.push_back(entry.second.id);
}
return 0;
}
std::unique_ptr<Zone> RadosZone::clone()
{
if (local_zone)
return std::make_unique<RadosZone>(store, group->clone());
return std::make_unique<RadosZone>(store, group->clone(), rgw_zone);
}
const std::string& RadosZone::get_id()
{
if (local_zone)
return store->svc()->zone->zone_id().id;
return rgw_zone.id;
}
const std::string& RadosZone::get_name() const
{
if (local_zone)
return store->svc()->zone->zone_name();
return rgw_zone.name;
}
bool RadosZone::is_writeable()
{
if (local_zone)
return store->svc()->zone->zone_is_writeable();
return !rgw_zone.read_only;
}
bool RadosZone::get_redirect_endpoint(std::string* endpoint)
{
if (local_zone)
return store->svc()->zone->get_redirect_zone_endpoint(endpoint);
endpoint = &rgw_zone.redirect_zone;
return true;
}
bool RadosZone::has_zonegroup_api(const std::string& api) const
{
return store->svc()->zone->has_zonegroup_api(api);
}
const std::string& RadosZone::get_current_period_id()
{
return store->svc()->zone->get_current_period_id();
}
const RGWAccessKey& RadosZone::get_system_key()
{
return store->svc()->zone->get_zone_params().system_key;
}
const std::string& RadosZone::get_realm_name()
{
return store->svc()->zone->get_realm().get_name();
}
const std::string& RadosZone::get_realm_id()
{
return store->svc()->zone->get_realm().get_id();
}
const std::string_view RadosZone::get_tier_type()
{
if (local_zone)
return store->svc()->zone->get_zone().tier_type;
return rgw_zone.id;
}
RGWBucketSyncPolicyHandlerRef RadosZone::get_sync_policy_handler()
{
return store->svc()->zone->get_sync_policy_handler(get_id());
}
RadosLuaManager::RadosLuaManager(RadosStore* _s) :
store(_s),
pool((store->svc() && store->svc()->zone) ? store->svc()->zone->get_zone_params().log_pool : rgw_pool())
{ }
int RadosLuaManager::get_script(const DoutPrefixProvider* dpp, optional_yield y, const std::string& key, std::string& script)
{
if (pool.empty()) {
ldpp_dout(dpp, 10) << "WARNING: missing pool when reading lua script " << dendl;
return 0;
}
bufferlist bl;
int r = rgw_get_system_obj(store->svc()->sysobj, pool, key, bl, nullptr, nullptr, y, dpp);
if (r < 0) {
return r;
}
auto iter = bl.cbegin();
try {
ceph::decode(script, iter);
} catch (buffer::error& err) {
return -EIO;
}
return 0;
}
int RadosLuaManager::put_script(const DoutPrefixProvider* dpp, optional_yield y, const std::string& key, const std::string& script)
{
if (pool.empty()) {
ldpp_dout(dpp, 10) << "WARNING: missing pool when writing lua script " << dendl;
return 0;
}
bufferlist bl;
ceph::encode(script, bl);
int r = rgw_put_system_obj(dpp, store->svc()->sysobj, pool, key, bl, false, nullptr, real_time(), y);
if (r < 0) {
return r;
}
return 0;
}
int RadosLuaManager::del_script(const DoutPrefixProvider* dpp, optional_yield y, const std::string& key)
{
if (pool.empty()) {
ldpp_dout(dpp, 10) << "WARNING: missing pool when deleting lua script " << dendl;
return 0;
}
int r = rgw_delete_system_obj(dpp, store->svc()->sysobj, pool, key, nullptr, y);
if (r < 0 && r != -ENOENT) {
return r;
}
return 0;
}
const std::string PACKAGE_LIST_OBJECT_NAME = "lua_package_allowlist";
int RadosLuaManager::add_package(const DoutPrefixProvider *dpp, optional_yield y, const std::string& package_name)
{
// add package to list
const bufferlist empty_bl;
std::map<std::string, bufferlist> new_package{{package_name, empty_bl}};
librados::ObjectWriteOperation op;
op.omap_set(new_package);
auto ret = rgw_rados_operate(dpp, *(store->getRados()->get_lc_pool_ctx()),
PACKAGE_LIST_OBJECT_NAME, &op, y);
if (ret < 0) {
return ret;
}
return 0;
}
int RadosLuaManager::remove_package(const DoutPrefixProvider *dpp, optional_yield y, const std::string& package_name)
{
librados::ObjectWriteOperation op;
size_t pos = package_name.find(" ");
if (pos != package_name.npos) {
// remove specfic version of the the package
op.omap_rm_keys(std::set<std::string>({package_name}));
auto ret = rgw_rados_operate(dpp, *(store->getRados()->get_lc_pool_ctx()),
PACKAGE_LIST_OBJECT_NAME, &op, y);
if (ret < 0) {
return ret;
}
return 0;
}
// otherwise, remove any existing versions of the package
rgw::lua::packages_t packages;
auto ret = list_packages(dpp, y, packages);
if (ret < 0 && ret != -ENOENT) {
return ret;
}
for(const auto& package : packages) {
const std::string package_no_version = package.substr(0, package.find(" "));
if (package_no_version.compare(package_name) == 0) {
op.omap_rm_keys(std::set<std::string>({package}));
ret = rgw_rados_operate(dpp, *(store->getRados()->get_lc_pool_ctx()),
PACKAGE_LIST_OBJECT_NAME, &op, y);
if (ret < 0) {
return ret;
}
}
}
return 0;
}
int RadosLuaManager::list_packages(const DoutPrefixProvider *dpp, optional_yield y, rgw::lua::packages_t& packages)
{
constexpr auto max_chunk = 1024U;
std::string start_after;
bool more = true;
int rval;
while (more) {
librados::ObjectReadOperation op;
rgw::lua::packages_t packages_chunk;
op.omap_get_keys2(start_after, max_chunk, &packages_chunk, &more, &rval);
const auto ret = rgw_rados_operate(dpp, *(store->getRados()->get_lc_pool_ctx()),
PACKAGE_LIST_OBJECT_NAME, &op, nullptr, y);
if (ret < 0) {
return ret;
}
packages.merge(packages_chunk);
}
return 0;
}
int RadosOIDCProvider::store_url(const DoutPrefixProvider *dpp, const std::string& url, bool exclusive, optional_yield y)
{
auto sysobj = store->svc()->sysobj;
std::string oid = tenant + get_url_oid_prefix() + url;
bufferlist bl;
using ceph::encode;
encode(*this, bl);
return rgw_put_system_obj(dpp, sysobj, store->svc()->zone->get_zone_params().oidc_pool, oid, bl, exclusive, nullptr, real_time(), y);
}
int RadosOIDCProvider::read_url(const DoutPrefixProvider *dpp, const std::string& url, const std::string& tenant, optional_yield y)
{
auto sysobj = store->svc()->sysobj;
auto& pool = store->svc()->zone->get_zone_params().oidc_pool;
std::string oid = tenant + get_url_oid_prefix() + url;
bufferlist bl;
int ret = rgw_get_system_obj(sysobj, pool, oid, bl, nullptr, nullptr, y, dpp);
if (ret < 0) {
return ret;
}
try {
using ceph::decode;
auto iter = bl.cbegin();
decode(*this, iter);
} catch (buffer::error& err) {
ldpp_dout(dpp, 0) << "ERROR: failed to decode oidc provider info from pool: " << pool.name <<
": " << url << dendl;
return -EIO;
}
return 0;
}
int RadosOIDCProvider::delete_obj(const DoutPrefixProvider *dpp, optional_yield y)
{
auto& pool = store->svc()->zone->get_zone_params().oidc_pool;
std::string url, tenant;
auto ret = get_tenant_url_from_arn(tenant, url);
if (ret < 0) {
ldpp_dout(dpp, 0) << "ERROR: failed to parse arn" << dendl;
return -EINVAL;
}
if (this->tenant != tenant) {
ldpp_dout(dpp, 0) << "ERROR: tenant in arn doesn't match that of user " << this->tenant << ", "
<< tenant << ": " << dendl;
return -EINVAL;
}
// Delete url
std::string oid = tenant + get_url_oid_prefix() + url;
ret = rgw_delete_system_obj(dpp, store->svc()->sysobj, pool, oid, nullptr, y);
if (ret < 0) {
ldpp_dout(dpp, 0) << "ERROR: deleting oidc url from pool: " << pool.name << ": "
<< provider_url << ": " << cpp_strerror(-ret) << dendl;
}
return ret;
}
int RadosRole::store_info(const DoutPrefixProvider *dpp, bool exclusive, optional_yield y)
{
using ceph::encode;
std::string oid;
oid = info.id;
bufferlist bl;
encode(this->info, bl);
if (!this->info.tags.empty()) {
bufferlist bl_tags;
encode(this->info.tags, bl_tags);
map<string, bufferlist> attrs;
attrs.emplace("tagging", bl_tags);
RGWSI_MBSObj_PutParams params(bl, &attrs, info.mtime, exclusive);
std::unique_ptr<RGWSI_MetaBackend::Context> ctx(store->svc()->role->svc.meta_be->alloc_ctx());
ctx->init(store->svc()->role->get_be_handler());
return store->svc()->role->svc.meta_be->put(ctx.get(), oid, params, &info.objv_tracker, y, dpp);
} else {
RGWSI_MBSObj_PutParams params(bl, nullptr, info.mtime, exclusive);
std::unique_ptr<RGWSI_MetaBackend::Context> ctx(store->svc()->role->svc.meta_be->alloc_ctx());
ctx->init(store->svc()->role->get_be_handler());
return store->svc()->role->svc.meta_be->put(ctx.get(), oid, params, &info.objv_tracker, y, dpp);
}
}
int RadosRole::store_name(const DoutPrefixProvider *dpp, bool exclusive, optional_yield y)
{
auto sysobj = store->svc()->sysobj;
RGWNameToId nameToId;
nameToId.obj_id = info.id;
std::string oid = info.tenant + get_names_oid_prefix() + info.name;
bufferlist bl;
using ceph::encode;
encode(nameToId, bl);
return rgw_put_system_obj(dpp, sysobj, store->svc()->zone->get_zone_params().roles_pool, oid, bl, exclusive, &info.objv_tracker, real_time(), y);
}
int RadosRole::store_path(const DoutPrefixProvider *dpp, bool exclusive, optional_yield y)
{
auto sysobj = store->svc()->sysobj;
std::string oid = info.tenant + get_path_oid_prefix() + info.path + get_info_oid_prefix() + info.id;
bufferlist bl;
return rgw_put_system_obj(dpp, sysobj, store->svc()->zone->get_zone_params().roles_pool, oid, bl, exclusive, &info.objv_tracker, real_time(), y);
}
int RadosRole::read_id(const DoutPrefixProvider *dpp, const std::string& role_name, const std::string& tenant, std::string& role_id, optional_yield y)
{
auto sysobj = store->svc()->sysobj;
std::string oid = info.tenant + get_names_oid_prefix() + role_name;
bufferlist bl;
int ret = rgw_get_system_obj(sysobj, store->svc()->zone->get_zone_params().roles_pool, oid, bl, nullptr, nullptr, y, dpp);
if (ret < 0) {
return ret;
}
RGWNameToId nameToId;
try {
auto iter = bl.cbegin();
using ceph::decode;
decode(nameToId, iter);
} catch (buffer::error& err) {
ldpp_dout(dpp, 0) << "ERROR: failed to decode role from Role pool: " << role_name << dendl;
return -EIO;
}
role_id = nameToId.obj_id;
return 0;
}
int RadosRole::read_name(const DoutPrefixProvider *dpp, optional_yield y)
{
auto sysobj = store->svc()->sysobj;
std::string oid = info.tenant + get_names_oid_prefix() + info.name;
bufferlist bl;
int ret = rgw_get_system_obj(sysobj, store->svc()->zone->get_zone_params().roles_pool, oid, bl, nullptr, nullptr, y, dpp);
if (ret < 0) {
ldpp_dout(dpp, 0) << "ERROR: failed reading role name from Role pool: " << info.name <<
": " << cpp_strerror(-ret) << dendl;
return ret;
}
RGWNameToId nameToId;
try {
using ceph::decode;
auto iter = bl.cbegin();
decode(nameToId, iter);
} catch (buffer::error& err) {
ldpp_dout(dpp, 0) << "ERROR: failed to decode role name from Role pool: " << info.name << dendl;
return -EIO;
}
info.id = nameToId.obj_id;
return 0;
}
int RadosRole::read_info(const DoutPrefixProvider *dpp, optional_yield y)
{
std::string oid;
oid = info.id;
ldpp_dout(dpp, 20) << "INFO: oid in read_info is: " << oid << dendl;
bufferlist bl;
RGWSI_MBSObj_GetParams params(&bl, &info.attrs, &info.mtime);
std::unique_ptr<RGWSI_MetaBackend::Context> ctx(store->svc()->role->svc.meta_be->alloc_ctx());
ctx->init(store->svc()->role->get_be_handler());
int ret = store->svc()->role->svc.meta_be->get(ctx.get(), oid, params, &info.objv_tracker, y, dpp, true);
if (ret < 0) {
ldpp_dout(dpp, 0) << "ERROR: failed reading role info from Role pool: " << info.id << ": " << cpp_strerror(-ret) << dendl;
return ret;
}
try {
using ceph::decode;
auto iter = bl.cbegin();
decode(this->info, iter);
} catch (buffer::error& err) {
ldpp_dout(dpp, 0) << "ERROR: failed to decode role info from Role pool: " << info.id << dendl;
return -EIO;
}
auto it = info.attrs.find("tagging");
if (it != info.attrs.end()) {
bufferlist bl_tags = it->second;
try {
using ceph::decode;
auto iter = bl_tags.cbegin();
decode(info.tags, iter);
} catch (buffer::error& err) {
ldpp_dout(dpp, 0) << "ERROR: failed to decode attrs" << info.id << dendl;
return -EIO;
}
}
return 0;
}
int RadosRole::create(const DoutPrefixProvider *dpp, bool exclusive, const std::string& role_id, optional_yield y)
{
int ret;
if (! validate_input(dpp)) {
return -EINVAL;
}
if (!role_id.empty()) {
info.id = role_id;
}
/* check to see the name is not used */
ret = read_id(dpp, info.name, info.tenant, info.id, y);
if (exclusive && ret == 0) {
ldpp_dout(dpp, 0) << "ERROR: name " << info.name << " already in use for role id "
<< info.id << dendl;
return -EEXIST;
} else if ( ret < 0 && ret != -ENOENT) {
ldpp_dout(dpp, 0) << "failed reading role id " << info.id << ": "
<< cpp_strerror(-ret) << dendl;
return ret;
}
if (info.id.empty()) {
/* create unique id */
uuid_d new_uuid;
char uuid_str[37];
new_uuid.generate_random();
new_uuid.print(uuid_str);
info.id = uuid_str;
}
//arn
info.arn = role_arn_prefix + info.tenant + ":role" + info.path + info.name;
// Creation time
real_clock::time_point t = real_clock::now();
struct timeval tv;
real_clock::to_timeval(t, tv);
char buf[30];
struct tm result;
gmtime_r(&tv.tv_sec, &result);
strftime(buf,30,"%Y-%m-%dT%H:%M:%S", &result);
sprintf(buf + strlen(buf),".%dZ",(int)tv.tv_usec/1000);
info.creation_date.assign(buf, strlen(buf));
auto& pool = store->svc()->zone->get_zone_params().roles_pool;
ret = store_info(dpp, exclusive, y);
if (ret < 0) {
ldpp_dout(dpp, 0) << "ERROR: storing role info in Role pool: "
<< info.id << ": " << cpp_strerror(-ret) << dendl;
return ret;
}
ret = store_name(dpp, exclusive, y);
if (ret < 0) {
ldpp_dout(dpp, 0) << "ERROR: storing role name in Role pool: "
<< info.name << ": " << cpp_strerror(-ret) << dendl;
//Delete the role info that was stored in the previous call
std::string oid = get_info_oid_prefix() + info.id;
int info_ret = rgw_delete_system_obj(dpp, store->svc()->sysobj, pool, oid, nullptr, y);
if (info_ret < 0) {
ldpp_dout(dpp, 0) << "ERROR: cleanup of role id from Role pool: "
<< info.id << ": " << cpp_strerror(-info_ret) << dendl;
}
return ret;
}
ret = store_path(dpp, exclusive, y);
if (ret < 0) {
ldpp_dout(dpp, 0) << "ERROR: storing role path in Role pool: "
<< info.path << ": " << cpp_strerror(-ret) << dendl;
//Delete the role info that was stored in the previous call
std::string oid = get_info_oid_prefix() + info.id;
int info_ret = rgw_delete_system_obj(dpp, store->svc()->sysobj, pool, oid, nullptr, y);
if (info_ret < 0) {
ldpp_dout(dpp, 0) << "ERROR: cleanup of role id from Role pool: "
<< info.id << ": " << cpp_strerror(-info_ret) << dendl;
}
//Delete role name that was stored in previous call
oid = info.tenant + get_names_oid_prefix() + info.name;
int name_ret = rgw_delete_system_obj(dpp, store->svc()->sysobj, pool, oid, nullptr, y);
if (name_ret < 0) {
ldpp_dout(dpp, 0) << "ERROR: cleanup of role name from Role pool: "
<< info.name << ": " << cpp_strerror(-name_ret) << dendl;
}
return ret;
}
return 0;
}
int RadosRole::delete_obj(const DoutPrefixProvider *dpp, optional_yield y)
{
auto& pool = store->svc()->zone->get_zone_params().roles_pool;
int ret = read_name(dpp, y);
if (ret < 0) {
return ret;
}
ret = read_info(dpp, y);
if (ret < 0) {
return ret;
}
if (! info.perm_policy_map.empty()) {
return -ERR_DELETE_CONFLICT;
}
// Delete id
std::string oid = get_info_oid_prefix() + info.id;
ret = rgw_delete_system_obj(dpp, store->svc()->sysobj, pool, oid, nullptr, y);
if (ret < 0) {
ldpp_dout(dpp, 0) << "ERROR: deleting role id from Role pool: "
<< info.id << ": " << cpp_strerror(-ret) << dendl;
}
// Delete name
oid = info.tenant + get_names_oid_prefix() + info.name;
ret = rgw_delete_system_obj(dpp, store->svc()->sysobj, pool, oid, nullptr, y);
if (ret < 0) {
ldpp_dout(dpp, 0) << "ERROR: deleting role name from Role pool: "
<< info.name << ": " << cpp_strerror(-ret) << dendl;
}
// Delete path
oid = info.tenant + get_path_oid_prefix() + info.path + get_info_oid_prefix() + info.id;
ret = rgw_delete_system_obj(dpp, store->svc()->sysobj, pool, oid, nullptr, y);
if (ret < 0) {
ldpp_dout(dpp, 0) << "ERROR: deleting role path from Role pool: "
<< info.path << ": " << cpp_strerror(-ret) << dendl;
}
return ret;
}
} // namespace rgw::sal
extern "C" {
void* newRadosStore(void)
{
rgw::sal::RadosStore* store = new rgw::sal::RadosStore();
if (store) {
RGWRados* rados = new RGWRados();
if (!rados) {
delete store; store = nullptr;
} else {
store->setRados(rados);
rados->set_store(store);
}
}
return store;
}
}
| 119,695 | 30.041494 | 291 |
cc
|
null |
ceph-main/src/rgw/driver/rados/rgw_sal_rados.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab ft=cpp
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2020 Red Hat, Inc.
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#pragma once
#include "rgw_sal_store.h"
#include "rgw_rados.h"
#include "rgw_notify.h"
#include "rgw_oidc_provider.h"
#include "rgw_role.h"
#include "rgw_multi.h"
#include "rgw_putobj_processor.h"
#include "services/svc_tier_rados.h"
#include "cls/lock/cls_lock_client.h"
namespace rgw { namespace sal {
class RadosMultipartUpload;
class RadosPlacementTier: public StorePlacementTier {
RadosStore* store;
RGWZoneGroupPlacementTier tier;
public:
RadosPlacementTier(RadosStore* _store, const RGWZoneGroupPlacementTier& _tier) : store(_store), tier(_tier) {}
virtual ~RadosPlacementTier() = default;
virtual const std::string& get_tier_type() { return tier.tier_type; }
virtual const std::string& get_storage_class() { return tier.storage_class; }
virtual bool retain_head_object() { return tier.retain_head_object; }
RGWZoneGroupPlacementTier& get_rt() { return tier; }
};
class RadosZoneGroup : public StoreZoneGroup {
RadosStore* store;
const RGWZoneGroup group;
std::string empty;
public:
RadosZoneGroup(RadosStore* _store, const RGWZoneGroup& _group) : store(_store), group(_group) {}
virtual ~RadosZoneGroup() = default;
virtual const std::string& get_id() const override { return group.get_id(); };
virtual const std::string& get_name() const override { return group.get_name(); };
virtual int equals(const std::string& other_zonegroup) const override {
return group.equals(other_zonegroup);
};
/** Get the endpoint from zonegroup, or from master zone if not set */
virtual const std::string& get_endpoint() const override;
virtual bool placement_target_exists(std::string& target) const override;
virtual bool is_master_zonegroup() const override {
return group.is_master_zonegroup();
};
virtual const std::string& get_api_name() const override { return group.api_name; };
virtual int get_placement_target_names(std::set<std::string>& names) const override;
virtual const std::string& get_default_placement_name() const override {
return group.default_placement.name; };
virtual int get_hostnames(std::list<std::string>& names) const override {
names = group.hostnames;
return 0;
};
virtual int get_s3website_hostnames(std::list<std::string>& names) const override {
names = group.hostnames_s3website;
return 0;
};
virtual int get_zone_count() const override {
return group.zones.size();
}
virtual int get_placement_tier(const rgw_placement_rule& rule, std::unique_ptr<PlacementTier>* tier);
virtual int get_zone_by_id(const std::string& id, std::unique_ptr<Zone>* zone) override;
virtual int get_zone_by_name(const std::string& name, std::unique_ptr<Zone>* zone) override;
virtual int list_zones(std::list<std::string>& zone_ids) override;
virtual std::unique_ptr<ZoneGroup> clone() override {
return std::make_unique<RadosZoneGroup>(store, group);
}
const RGWZoneGroup& get_group() const { return group; }
};
class RadosZone : public StoreZone {
protected:
RadosStore* store;
std::unique_ptr<ZoneGroup> group;
RGWZone rgw_zone;
bool local_zone{false};
public:
RadosZone(RadosStore* _store, std::unique_ptr<ZoneGroup> _zg) : store(_store), group(std::move(_zg)), local_zone(true) {}
RadosZone(RadosStore* _store, std::unique_ptr<ZoneGroup> _zg, RGWZone& z) : store(_store), group(std::move(_zg)), rgw_zone(z) {}
~RadosZone() = default;
virtual std::unique_ptr<Zone> clone() override;
virtual ZoneGroup& get_zonegroup() override { return *(group.get()); }
virtual const std::string& get_id() override;
virtual const std::string& get_name() const override;
virtual bool is_writeable() override;
virtual bool get_redirect_endpoint(std::string* endpoint) override;
virtual bool has_zonegroup_api(const std::string& api) const override;
virtual const std::string& get_current_period_id() override;
virtual const RGWAccessKey& get_system_key() override;
virtual const std::string& get_realm_name() override;
virtual const std::string& get_realm_id() override;
virtual const std::string_view get_tier_type() override;
virtual RGWBucketSyncPolicyHandlerRef get_sync_policy_handler() override;
};
class RadosStore : public StoreDriver {
private:
RGWRados* rados;
RGWUserCtl* user_ctl;
std::unique_ptr<RadosZone> zone;
std::string topics_oid(const std::string& tenant) const;
public:
RadosStore()
: rados(nullptr) {
}
~RadosStore() {
delete rados;
}
virtual int initialize(CephContext *cct, const DoutPrefixProvider *dpp) override;
virtual const std::string get_name() const override {
return "rados";
}
virtual std::string get_cluster_id(const DoutPrefixProvider* dpp, optional_yield y) override;
virtual std::unique_ptr<User> get_user(const rgw_user& u) override;
virtual int get_user_by_access_key(const DoutPrefixProvider* dpp, const std::string& key, optional_yield y, std::unique_ptr<User>* user) override;
virtual int get_user_by_email(const DoutPrefixProvider* dpp, const std::string& email, optional_yield y, std::unique_ptr<User>* user) override;
virtual int get_user_by_swift(const DoutPrefixProvider* dpp, const std::string& user_str, optional_yield y, std::unique_ptr<User>* user) override;
virtual std::unique_ptr<Object> get_object(const rgw_obj_key& k) override;
virtual int get_bucket(const DoutPrefixProvider* dpp, User* u, const rgw_bucket& b, std::unique_ptr<Bucket>* bucket, optional_yield y) override;
virtual int get_bucket(User* u, const RGWBucketInfo& i, std::unique_ptr<Bucket>* bucket) override;
virtual int get_bucket(const DoutPrefixProvider* dpp, User* u, const std::string& tenant, const std::string&name, std::unique_ptr<Bucket>* bucket, optional_yield y) override;
virtual bool is_meta_master() override;
virtual int forward_request_to_master(const DoutPrefixProvider *dpp, User* user, obj_version* objv,
bufferlist& in_data, JSONParser* jp, req_info& info,
optional_yield y) override;
virtual int forward_iam_request_to_master(const DoutPrefixProvider *dpp, const RGWAccessKey& key, obj_version* objv,
bufferlist& in_data,
RGWXMLDecoder::XMLParser* parser, req_info& info,
optional_yield y) override;
virtual Zone* get_zone() { return zone.get(); }
virtual std::string zone_unique_id(uint64_t unique_num) override;
virtual std::string zone_unique_trans_id(const uint64_t unique_num) override;
virtual int get_zonegroup(const std::string& id, std::unique_ptr<ZoneGroup>* zonegroup) override;
virtual int list_all_zones(const DoutPrefixProvider* dpp, std::list<std::string>& zone_ids) override;
virtual int cluster_stat(RGWClusterStat& stats) override;
virtual std::unique_ptr<Lifecycle> get_lifecycle(void) override;
virtual std::unique_ptr<Notification> get_notification(rgw::sal::Object* obj, rgw::sal::Object* src_obj, req_state* s, rgw::notify::EventType event_type, optional_yield y, const std::string* object_name=nullptr) override;
virtual std::unique_ptr<Notification> get_notification(
const DoutPrefixProvider* dpp, rgw::sal::Object* obj, rgw::sal::Object* src_obj,
rgw::notify::EventType event_type, rgw::sal::Bucket* _bucket, std::string& _user_id, std::string& _user_tenant,
std::string& _req_id, optional_yield y) override;
int read_topics(const std::string& tenant, rgw_pubsub_topics& topics, RGWObjVersionTracker* objv_tracker,
optional_yield y, const DoutPrefixProvider *dpp) override;
int write_topics(const std::string& tenant, const rgw_pubsub_topics& topics, RGWObjVersionTracker* objv_tracker,
optional_yield y, const DoutPrefixProvider *dpp) override;
int remove_topics(const std::string& tenant, RGWObjVersionTracker* objv_tracker,
optional_yield y, const DoutPrefixProvider *dpp) override;
virtual RGWLC* get_rgwlc(void) override { return rados->get_lc(); }
virtual RGWCoroutinesManagerRegistry* get_cr_registry() override { return rados->get_cr_registry(); }
virtual int log_usage(const DoutPrefixProvider *dpp, std::map<rgw_user_bucket, RGWUsageBatch>& usage_info, optional_yield y) override;
virtual int log_op(const DoutPrefixProvider *dpp, std::string& oid, bufferlist& bl) override;
virtual int register_to_service_map(const DoutPrefixProvider *dpp, const std::string& daemon_type,
const std::map<std::string, std::string>& meta) override;
virtual void get_quota(RGWQuota& quota) override;
virtual void get_ratelimit(RGWRateLimitInfo& bucket_ratelimit, RGWRateLimitInfo& user_ratelimit, RGWRateLimitInfo& anon_ratelimit) override;
virtual int set_buckets_enabled(const DoutPrefixProvider* dpp, std::vector<rgw_bucket>& buckets, bool enabled, optional_yield y) override;
virtual int get_sync_policy_handler(const DoutPrefixProvider* dpp,
std::optional<rgw_zone_id> zone,
std::optional<rgw_bucket> bucket,
RGWBucketSyncPolicyHandlerRef* phandler,
optional_yield y) override;
virtual RGWDataSyncStatusManager* get_data_sync_manager(const rgw_zone_id& source_zone) override;
virtual void wakeup_meta_sync_shards(std::set<int>& shard_ids) override { rados->wakeup_meta_sync_shards(shard_ids); }
virtual void wakeup_data_sync_shards(const DoutPrefixProvider *dpp, const rgw_zone_id& source_zone, boost::container::flat_map<int, boost::container::flat_set<rgw_data_notify_entry>>& shard_ids) override { rados->wakeup_data_sync_shards(dpp, source_zone, shard_ids); }
virtual int clear_usage(const DoutPrefixProvider *dpp, optional_yield y) override { return rados->clear_usage(dpp, y); }
virtual int read_all_usage(const DoutPrefixProvider *dpp, uint64_t start_epoch, uint64_t end_epoch,
uint32_t max_entries, bool* is_truncated,
RGWUsageIter& usage_iter,
std::map<rgw_user_bucket, rgw_usage_log_entry>& usage) override;
virtual int trim_all_usage(const DoutPrefixProvider *dpp, uint64_t start_epoch, uint64_t end_epoch, optional_yield y) override;
virtual int get_config_key_val(std::string name, bufferlist* bl) override;
virtual int meta_list_keys_init(const DoutPrefixProvider *dpp, const std::string& section, const std::string& marker, void** phandle) override;
virtual int meta_list_keys_next(const DoutPrefixProvider *dpp, void* handle, int max, std::list<std::string>& keys, bool* truncated) override;
virtual void meta_list_keys_complete(void* handle) override;
virtual std::string meta_get_marker(void* handle) override;
virtual int meta_remove(const DoutPrefixProvider* dpp, std::string& metadata_key, optional_yield y) override;
virtual const RGWSyncModuleInstanceRef& get_sync_module() { return rados->get_sync_module(); }
virtual std::string get_host_id() { return rados->host_id; }
virtual std::unique_ptr<LuaManager> get_lua_manager() override;
virtual std::unique_ptr<RGWRole> get_role(std::string name,
std::string tenant,
std::string path="",
std::string trust_policy="",
std::string max_session_duration_str="",
std::multimap<std::string,std::string> tags={}) override;
virtual std::unique_ptr<RGWRole> get_role(std::string id) override;
virtual std::unique_ptr<RGWRole> get_role(const RGWRoleInfo& info) override;
virtual int get_roles(const DoutPrefixProvider *dpp,
optional_yield y,
const std::string& path_prefix,
const std::string& tenant,
std::vector<std::unique_ptr<RGWRole>>& roles) override;
virtual std::unique_ptr<RGWOIDCProvider> get_oidc_provider() override;
virtual int get_oidc_providers(const DoutPrefixProvider *dpp,
const std::string& tenant,
std::vector<std::unique_ptr<RGWOIDCProvider>>& providers, optional_yield y) override;
virtual std::unique_ptr<Writer> get_append_writer(const DoutPrefixProvider *dpp,
optional_yield y,
rgw::sal::Object* obj,
const rgw_user& owner,
const rgw_placement_rule *ptail_placement_rule,
const std::string& unique_tag,
uint64_t position,
uint64_t *cur_accounted_size) override;
virtual std::unique_ptr<Writer> get_atomic_writer(const DoutPrefixProvider *dpp,
optional_yield y,
rgw::sal::Object* obj,
const rgw_user& owner,
const rgw_placement_rule *ptail_placement_rule,
uint64_t olh_epoch,
const std::string& unique_tag) override;
virtual const std::string& get_compression_type(const rgw_placement_rule& rule) override;
virtual bool valid_placement(const rgw_placement_rule& rule) override;
virtual void finalize(void) override;
virtual CephContext* ctx(void) override { return rados->ctx(); }
virtual void register_admin_apis(RGWRESTMgr* mgr) override;
/* Unique to RadosStore */
int get_obj_head_ioctx(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, const rgw_obj& obj,
librados::IoCtx* ioctx);
int delete_raw_obj(const DoutPrefixProvider *dpp, const rgw_raw_obj& obj, optional_yield y);
void get_raw_obj(const rgw_placement_rule& placement_rule, const rgw_obj& obj, rgw_raw_obj* raw_obj);
int get_raw_chunk_size(const DoutPrefixProvider* dpp, const rgw_raw_obj& obj, uint64_t* chunk_size);
void setRados(RGWRados * st) { rados = st; }
RGWRados* getRados(void) { return rados; }
RGWServices* svc() { return &rados->svc; }
const RGWServices* svc() const { return &rados->svc; }
RGWCtl* ctl() { return &rados->ctl; }
const RGWCtl* ctl() const { return &rados->ctl; }
void setUserCtl(RGWUserCtl *_ctl) { user_ctl = _ctl; }
};
class RadosUser : public StoreUser {
private:
RadosStore* store;
public:
RadosUser(RadosStore *_st, const rgw_user& _u) : StoreUser(_u), store(_st) { }
RadosUser(RadosStore *_st, const RGWUserInfo& _i) : StoreUser(_i), store(_st) { }
RadosUser(RadosStore *_st) : store(_st) { }
RadosUser(RadosUser& _o) = default;
virtual std::unique_ptr<User> clone() override {
return std::unique_ptr<User>(new RadosUser(*this));
}
int list_buckets(const DoutPrefixProvider* dpp, const std::string& marker, const std::string& end_marker,
uint64_t max, bool need_stats, BucketList& buckets,
optional_yield y) override;
virtual int create_bucket(const DoutPrefixProvider* dpp,
const rgw_bucket& b,
const std::string& zonegroup_id,
rgw_placement_rule& placement_rule,
std::string& swift_ver_location,
const RGWQuotaInfo * pquota_info,
const RGWAccessControlPolicy& policy,
Attrs& attrs,
RGWBucketInfo& info,
obj_version& ep_objv,
bool exclusive,
bool obj_lock_enabled,
bool* existed,
req_info& req_info,
std::unique_ptr<Bucket>* bucket,
optional_yield y) override;
virtual int read_attrs(const DoutPrefixProvider* dpp, optional_yield y) override;
virtual int merge_and_store_attrs(const DoutPrefixProvider* dpp, Attrs& new_attrs, optional_yield y) override;
virtual int read_stats(const DoutPrefixProvider *dpp,
optional_yield y, RGWStorageStats* stats,
ceph::real_time* last_stats_sync = nullptr,
ceph::real_time* last_stats_update = nullptr) override;
virtual int read_stats_async(const DoutPrefixProvider *dpp, RGWGetUserStats_CB* cb) override;
virtual int complete_flush_stats(const DoutPrefixProvider *dpp, optional_yield y) override;
virtual int read_usage(const DoutPrefixProvider *dpp, uint64_t start_epoch, uint64_t end_epoch, uint32_t max_entries,
bool* is_truncated, RGWUsageIter& usage_iter,
std::map<rgw_user_bucket, rgw_usage_log_entry>& usage) override;
virtual int trim_usage(const DoutPrefixProvider *dpp, uint64_t start_epoch, uint64_t end_epoch, optional_yield y) override;
virtual int load_user(const DoutPrefixProvider* dpp, optional_yield y) override;
virtual int store_user(const DoutPrefixProvider* dpp, optional_yield y, bool exclusive, RGWUserInfo* old_info = nullptr) override;
virtual int remove_user(const DoutPrefixProvider* dpp, optional_yield y) override;
virtual int verify_mfa(const std::string& mfa_str, bool* verified, const DoutPrefixProvider* dpp, optional_yield y) override;
friend class RadosBucket;
};
class RadosObject : public StoreObject {
private:
RadosStore* store;
RGWAccessControlPolicy acls;
RGWObjManifest *manifest{nullptr};
RGWObjectCtx* rados_ctx;
bool rados_ctx_owned;
public:
struct RadosReadOp : public ReadOp {
private:
RadosObject* source;
RGWObjectCtx* rctx;
RGWRados::Object op_target;
RGWRados::Object::Read parent_op;
public:
RadosReadOp(RadosObject *_source, RGWObjectCtx *_rctx);
virtual int prepare(optional_yield y, const DoutPrefixProvider* dpp) override;
/*
* Both `read` and `iterate` read up through index `end`
* *inclusive*. The number of bytes that could be returned is
* `end - ofs + 1`.
*/
virtual int read(int64_t ofs, int64_t end,
bufferlist& bl, optional_yield y,
const DoutPrefixProvider* dpp) override;
virtual int iterate(const DoutPrefixProvider* dpp,
int64_t ofs, int64_t end,
RGWGetDataCB* cb, optional_yield y) override;
virtual int get_attr(const DoutPrefixProvider* dpp, const char* name, bufferlist& dest, optional_yield y) override;
};
struct RadosDeleteOp : public DeleteOp {
private:
RadosObject* source;
RGWRados::Object op_target;
RGWRados::Object::Delete parent_op;
public:
RadosDeleteOp(RadosObject* _source);
virtual int delete_obj(const DoutPrefixProvider* dpp, optional_yield y) override;
};
RadosObject(RadosStore *_st, const rgw_obj_key& _k)
: StoreObject(_k),
store(_st),
acls(),
rados_ctx(new RGWObjectCtx(dynamic_cast<Driver*>(store))),
rados_ctx_owned(true) {
}
RadosObject(RadosStore *_st, const rgw_obj_key& _k, Bucket* _b)
: StoreObject(_k, _b),
store(_st),
acls(),
rados_ctx(new RGWObjectCtx(dynamic_cast<Driver*>(store))) ,
rados_ctx_owned(true) {
}
RadosObject(RadosObject& _o) : StoreObject(_o) {
store = _o.store;
acls = _o.acls;
manifest = _o.manifest;
rados_ctx = _o.rados_ctx;
rados_ctx_owned = false;
}
virtual ~RadosObject();
virtual void invalidate() override {
StoreObject::invalidate();
rados_ctx->invalidate(get_obj());
}
virtual int delete_object(const DoutPrefixProvider* dpp,
optional_yield y, bool prevent_versioning) override;
virtual int copy_object(User* user,
req_info* info, const rgw_zone_id& source_zone,
rgw::sal::Object* dest_object, rgw::sal::Bucket* dest_bucket,
rgw::sal::Bucket* src_bucket,
const rgw_placement_rule& dest_placement,
ceph::real_time* src_mtime, ceph::real_time* mtime,
const ceph::real_time* mod_ptr, const ceph::real_time* unmod_ptr,
bool high_precision_time,
const char* if_match, const char* if_nomatch,
AttrsMod attrs_mod, bool copy_if_newer, Attrs& attrs,
RGWObjCategory category, uint64_t olh_epoch,
boost::optional<ceph::real_time> delete_at,
std::string* version_id, std::string* tag, std::string* etag,
void (*progress_cb)(off_t, void *), void* progress_data,
const DoutPrefixProvider* dpp, optional_yield y) override;
virtual RGWAccessControlPolicy& get_acl(void) override { return acls; }
virtual int set_acl(const RGWAccessControlPolicy& acl) override { acls = acl; return 0; }
virtual void set_atomic() override {
rados_ctx->set_atomic(state.obj);
StoreObject::set_atomic();
}
virtual void set_prefetch_data() override {
rados_ctx->set_prefetch_data(state.obj);
StoreObject::set_prefetch_data();
}
virtual void set_compressed() override {
rados_ctx->set_compressed(state.obj);
StoreObject::set_compressed();
}
virtual int get_obj_state(const DoutPrefixProvider* dpp, RGWObjState **state, optional_yield y, bool follow_olh = true) override;
virtual int set_obj_attrs(const DoutPrefixProvider* dpp, Attrs* setattrs, Attrs* delattrs, optional_yield y) override;
virtual int get_obj_attrs(optional_yield y, const DoutPrefixProvider* dpp, rgw_obj* target_obj = NULL) override;
virtual int modify_obj_attrs(const char* attr_name, bufferlist& attr_val, optional_yield y, const DoutPrefixProvider* dpp) override;
virtual int delete_obj_attrs(const DoutPrefixProvider* dpp, const char* attr_name, optional_yield y) override;
virtual bool is_expired() override;
virtual void gen_rand_obj_instance_name() override;
void get_raw_obj(rgw_raw_obj* raw_obj);
virtual std::unique_ptr<Object> clone() override {
return std::unique_ptr<Object>(new RadosObject(*this));
}
virtual std::unique_ptr<MPSerializer> get_serializer(const DoutPrefixProvider *dpp,
const std::string& lock_name) override;
virtual int transition(Bucket* bucket,
const rgw_placement_rule& placement_rule,
const real_time& mtime,
uint64_t olh_epoch,
const DoutPrefixProvider* dpp,
optional_yield y) override;
virtual int transition_to_cloud(Bucket* bucket,
rgw::sal::PlacementTier* tier,
rgw_bucket_dir_entry& o,
std::set<std::string>& cloud_targets,
CephContext* cct,
bool update_object,
const DoutPrefixProvider* dpp,
optional_yield y) override;
virtual bool placement_rules_match(rgw_placement_rule& r1, rgw_placement_rule& r2) override;
virtual int dump_obj_layout(const DoutPrefixProvider *dpp, optional_yield y, Formatter* f) override;
/* Swift versioning */
virtual int swift_versioning_restore(bool& restored,
const DoutPrefixProvider* dpp, optional_yield y) override;
virtual int swift_versioning_copy(const DoutPrefixProvider* dpp,
optional_yield y) override;
/* OPs */
virtual std::unique_ptr<ReadOp> get_read_op() override;
virtual std::unique_ptr<DeleteOp> get_delete_op() override;
virtual int get_torrent_info(const DoutPrefixProvider* dpp,
optional_yield y, bufferlist& bl) override;
/* OMAP */
virtual int omap_get_vals_by_keys(const DoutPrefixProvider *dpp, const std::string& oid,
const std::set<std::string>& keys,
Attrs* vals) override;
virtual int omap_set_val_by_key(const DoutPrefixProvider *dpp, const std::string& key, bufferlist& val,
bool must_exist, optional_yield y) override;
virtual int chown(User& new_user, const DoutPrefixProvider* dpp, optional_yield y) override;
/* Internal to RadosStore */
int get_max_chunk_size(const DoutPrefixProvider* dpp,
rgw_placement_rule placement_rule,
uint64_t* max_chunk_size,
uint64_t* alignment = nullptr);
void get_max_aligned_size(uint64_t size, uint64_t alignment, uint64_t* max_size);
void raw_obj_to_obj(const rgw_raw_obj& raw_obj);
int write_cloud_tier(const DoutPrefixProvider* dpp,
optional_yield y,
uint64_t olh_epoch,
rgw::sal::PlacementTier* tier,
bool is_multipart_upload,
rgw_placement_rule& target_placement,
Object* head_obj);
RGWObjManifest* get_manifest() { return manifest; }
RGWObjectCtx& get_ctx() { return *rados_ctx; }
private:
int read_attrs(const DoutPrefixProvider* dpp, RGWRados::Object::Read &read_op, optional_yield y, rgw_obj* target_obj = nullptr);
};
class RadosBucket : public StoreBucket {
private:
RadosStore* store;
RGWAccessControlPolicy acls;
std::string topics_oid() const;
public:
RadosBucket(RadosStore *_st)
: store(_st),
acls() {
}
RadosBucket(RadosStore *_st, User* _u)
: StoreBucket(_u),
store(_st),
acls() {
}
RadosBucket(RadosStore *_st, const rgw_bucket& _b)
: StoreBucket(_b),
store(_st),
acls() {
}
RadosBucket(RadosStore *_st, const RGWBucketEnt& _e)
: StoreBucket(_e),
store(_st),
acls() {
}
RadosBucket(RadosStore *_st, const RGWBucketInfo& _i)
: StoreBucket(_i),
store(_st),
acls() {
}
RadosBucket(RadosStore *_st, const rgw_bucket& _b, User* _u)
: StoreBucket(_b, _u),
store(_st),
acls() {
}
RadosBucket(RadosStore *_st, const RGWBucketEnt& _e, User* _u)
: StoreBucket(_e, _u),
store(_st),
acls() {
}
RadosBucket(RadosStore *_st, const RGWBucketInfo& _i, User* _u)
: StoreBucket(_i, _u),
store(_st),
acls() {
}
virtual ~RadosBucket();
virtual std::unique_ptr<Object> get_object(const rgw_obj_key& k) override;
virtual int list(const DoutPrefixProvider* dpp, ListParams&, int, ListResults&, optional_yield y) override;
virtual int remove_bucket(const DoutPrefixProvider* dpp, bool delete_children, bool forward_to_master, req_info* req_info, optional_yield y) override;
virtual int remove_bucket_bypass_gc(int concurrent_max, bool
keep_index_consistent,
optional_yield y, const
DoutPrefixProvider *dpp) override;
virtual RGWAccessControlPolicy& get_acl(void) override { return acls; }
virtual int set_acl(const DoutPrefixProvider* dpp, RGWAccessControlPolicy& acl, optional_yield y) override;
virtual int load_bucket(const DoutPrefixProvider* dpp, optional_yield y, bool get_stats = false) override;
virtual int read_stats(const DoutPrefixProvider *dpp,
const bucket_index_layout_generation& idx_layout,
int shard_id, std::string* bucket_ver, std::string* master_ver,
std::map<RGWObjCategory, RGWStorageStats>& stats,
std::string* max_marker = nullptr,
bool* syncstopped = nullptr) override;
virtual int read_stats_async(const DoutPrefixProvider *dpp,
const bucket_index_layout_generation& idx_layout,
int shard_id, RGWGetBucketStats_CB* ctx) override;
virtual int sync_user_stats(const DoutPrefixProvider *dpp, optional_yield y) override;
virtual int update_container_stats(const DoutPrefixProvider* dpp, optional_yield y) override;
virtual int check_bucket_shards(const DoutPrefixProvider* dpp, optional_yield y) override;
virtual int chown(const DoutPrefixProvider* dpp, User& new_user, optional_yield y) override;
virtual int put_info(const DoutPrefixProvider* dpp, bool exclusive, ceph::real_time mtime, optional_yield y) override;
virtual bool is_owner(User* user) override;
virtual int check_empty(const DoutPrefixProvider* dpp, optional_yield y) override;
virtual int check_quota(const DoutPrefixProvider *dpp, RGWQuota& quota, uint64_t obj_size, optional_yield y, bool check_size_only = false) override;
virtual int merge_and_store_attrs(const DoutPrefixProvider* dpp, Attrs& attrs, optional_yield y) override;
virtual int try_refresh_info(const DoutPrefixProvider* dpp, ceph::real_time* pmtime, optional_yield y) override;
virtual int read_usage(const DoutPrefixProvider *dpp, uint64_t start_epoch, uint64_t end_epoch, uint32_t max_entries,
bool* is_truncated, RGWUsageIter& usage_iter,
std::map<rgw_user_bucket, rgw_usage_log_entry>& usage) override;
virtual int trim_usage(const DoutPrefixProvider *dpp, uint64_t start_epoch, uint64_t end_epoch, optional_yield y) override;
virtual int remove_objs_from_index(const DoutPrefixProvider *dpp, std::list<rgw_obj_index_key>& objs_to_unlink) override;
virtual int check_index(const DoutPrefixProvider *dpp, std::map<RGWObjCategory, RGWStorageStats>& existing_stats, std::map<RGWObjCategory, RGWStorageStats>& calculated_stats) override;
virtual int rebuild_index(const DoutPrefixProvider *dpp) override;
virtual int set_tag_timeout(const DoutPrefixProvider *dpp, uint64_t timeout) override;
virtual int purge_instance(const DoutPrefixProvider* dpp, optional_yield y) override;
virtual std::unique_ptr<Bucket> clone() override {
return std::make_unique<RadosBucket>(*this);
}
virtual std::unique_ptr<MultipartUpload> get_multipart_upload(
const std::string& oid,
std::optional<std::string> upload_id=std::nullopt,
ACLOwner owner={}, ceph::real_time mtime=real_clock::now()) override;
virtual int list_multiparts(const DoutPrefixProvider *dpp,
const std::string& prefix,
std::string& marker,
const std::string& delim,
const int& max_uploads,
std::vector<std::unique_ptr<MultipartUpload>>& uploads,
std::map<std::string, bool> *common_prefixes,
bool *is_truncated, optional_yield y) override;
virtual int abort_multiparts(const DoutPrefixProvider* dpp,
CephContext* cct, optional_yield y) override;
int read_topics(rgw_pubsub_bucket_topics& notifications, RGWObjVersionTracker* objv_tracker,
optional_yield y, const DoutPrefixProvider *dpp) override;
int write_topics(const rgw_pubsub_bucket_topics& notifications, RGWObjVersionTracker* objv_tracker,
optional_yield y, const DoutPrefixProvider *dpp) override;
int remove_topics(RGWObjVersionTracker* objv_tracker,
optional_yield y, const DoutPrefixProvider *dpp) override;
private:
int link(const DoutPrefixProvider* dpp, User* new_user, optional_yield y, bool update_entrypoint = true, RGWObjVersionTracker* objv = nullptr);
int unlink(const DoutPrefixProvider* dpp, User* new_user, optional_yield y, bool update_entrypoint = true);
friend class RadosUser;
};
class RadosMultipartPart : public StoreMultipartPart {
protected:
RGWUploadPartInfo info;
public:
RadosMultipartPart() = default;
virtual ~RadosMultipartPart() = default;
virtual uint32_t get_num() { return info.num; }
virtual uint64_t get_size() { return info.accounted_size; }
virtual const std::string& get_etag() { return info.etag; }
virtual ceph::real_time& get_mtime() { return info.modified; }
/* For RadosStore code */
RGWObjManifest& get_manifest() { return info.manifest; }
const std::set<std::string>& get_past_prefixes() const { return info.past_prefixes; }
friend class RadosMultipartUpload;
};
class RadosMultipartUpload : public StoreMultipartUpload {
RadosStore* store;
RGWMPObj mp_obj;
ACLOwner owner;
ceph::real_time mtime;
rgw_placement_rule placement;
RGWObjManifest manifest;
public:
RadosMultipartUpload(RadosStore* _store, Bucket* _bucket, const std::string& oid,
std::optional<std::string> upload_id, ACLOwner owner,
ceph::real_time _mtime)
: StoreMultipartUpload(_bucket), store(_store), mp_obj(oid, upload_id),
owner(owner), mtime(_mtime) {}
virtual ~RadosMultipartUpload() = default;
virtual const std::string& get_meta() const override { return mp_obj.get_meta(); }
virtual const std::string& get_key() const override { return mp_obj.get_key(); }
virtual const std::string& get_upload_id() const override { return mp_obj.get_upload_id(); }
virtual const ACLOwner& get_owner() const override { return owner; }
virtual ceph::real_time& get_mtime() override { return mtime; }
virtual std::unique_ptr<rgw::sal::Object> get_meta_obj() override;
virtual int init(const DoutPrefixProvider* dpp, optional_yield y, ACLOwner& owner, rgw_placement_rule& dest_placement, rgw::sal::Attrs& attrs) override;
virtual int list_parts(const DoutPrefixProvider* dpp, CephContext* cct,
int num_parts, int marker,
int* next_marker, bool* truncated, optional_yield y,
bool assume_unsorted = false) override;
virtual int abort(const DoutPrefixProvider* dpp, CephContext* cct, optional_yield y) override;
virtual int complete(const DoutPrefixProvider* dpp,
optional_yield y, CephContext* cct,
std::map<int, std::string>& part_etags,
std::list<rgw_obj_index_key>& remove_objs,
uint64_t& accounted_size, bool& compressed,
RGWCompressionInfo& cs_info, off_t& ofs,
std::string& tag, ACLOwner& owner,
uint64_t olh_epoch,
rgw::sal::Object* target_obj) override;
virtual int get_info(const DoutPrefixProvider *dpp, optional_yield y, rgw_placement_rule** rule, rgw::sal::Attrs* attrs = nullptr) override;
virtual std::unique_ptr<Writer> get_writer(const DoutPrefixProvider *dpp,
optional_yield y,
rgw::sal::Object* obj,
const rgw_user& owner,
const rgw_placement_rule *ptail_placement_rule,
uint64_t part_num,
const std::string& part_num_str) override;
protected:
int cleanup_part_history(const DoutPrefixProvider* dpp,
optional_yield y,
RadosMultipartPart* part,
std::list<rgw_obj_index_key>& remove_objs);
};
class MPRadosSerializer : public StoreMPSerializer {
librados::IoCtx ioctx;
rados::cls::lock::Lock lock;
librados::ObjectWriteOperation op;
public:
MPRadosSerializer(const DoutPrefixProvider *dpp, RadosStore* store, RadosObject* obj, const std::string& lock_name);
virtual int try_lock(const DoutPrefixProvider *dpp, utime_t dur, optional_yield y) override;
virtual int unlock() override {
return lock.unlock(&ioctx, oid);
}
};
class LCRadosSerializer : public StoreLCSerializer {
librados::IoCtx* ioctx;
rados::cls::lock::Lock lock;
public:
LCRadosSerializer(RadosStore* store, const std::string& oid, const std::string& lock_name, const std::string& cookie);
virtual int try_lock(const DoutPrefixProvider *dpp, utime_t dur, optional_yield y) override;
virtual int unlock() override {
return lock.unlock(ioctx, oid);
}
};
class RadosLifecycle : public StoreLifecycle {
RadosStore* store;
public:
RadosLifecycle(RadosStore* _st) : store(_st) {}
using StoreLifecycle::get_entry;
virtual int get_entry(const std::string& oid, const std::string& marker, std::unique_ptr<LCEntry>* entry) override;
virtual int get_next_entry(const std::string& oid, const std::string& marker, std::unique_ptr<LCEntry>* entry) override;
virtual int set_entry(const std::string& oid, LCEntry& entry) override;
virtual int list_entries(const std::string& oid, const std::string& marker,
uint32_t max_entries,
std::vector<std::unique_ptr<LCEntry>>& entries) override;
virtual int rm_entry(const std::string& oid, LCEntry& entry) override;
virtual int get_head(const std::string& oid, std::unique_ptr<LCHead>* head) override;
virtual int put_head(const std::string& oid, LCHead& head) override;
virtual std::unique_ptr<LCSerializer> get_serializer(const std::string& lock_name,
const std::string& oid,
const std::string& cookie) override;
};
class RadosNotification : public StoreNotification {
RadosStore* store;
/* XXX it feels incorrect to me that rgw::notify::reservation_t is
* currently RADOS-specific; instead, I think notification types such as
* reservation_t should be generally visible, whereas the internal
* notification behavior should be made portable (e.g., notification
* to non-RADOS message sinks) */
rgw::notify::reservation_t res;
public:
RadosNotification(const DoutPrefixProvider* _dpp, RadosStore* _store, Object* _obj, Object* _src_obj, req_state* _s, rgw::notify::EventType _type, optional_yield y, const std::string* object_name) :
StoreNotification(_obj, _src_obj, _type), store(_store), res(_dpp, _store, _s, _obj, _src_obj, object_name, y) { }
RadosNotification(const DoutPrefixProvider* _dpp, RadosStore* _store, Object* _obj, Object* _src_obj, rgw::notify::EventType _type, rgw::sal::Bucket* _bucket, std::string& _user_id, std::string& _user_tenant, std::string& _req_id, optional_yield y) :
StoreNotification(_obj, _src_obj, _type), store(_store), res(_dpp, _store, _obj, _src_obj, _bucket, _user_id, _user_tenant, _req_id, y) {}
~RadosNotification() = default;
rgw::notify::reservation_t& get_reservation(void) {
return res;
}
virtual int publish_reserve(const DoutPrefixProvider *dpp, RGWObjTags* obj_tags = nullptr) override;
virtual int publish_commit(const DoutPrefixProvider* dpp, uint64_t size,
const ceph::real_time& mtime, const std::string& etag, const std::string& version) override;
};
class RadosAtomicWriter : public StoreWriter {
protected:
rgw::sal::RadosStore* store;
std::unique_ptr<Aio> aio;
RGWObjectCtx& obj_ctx;
rgw::putobj::AtomicObjectProcessor processor;
public:
RadosAtomicWriter(const DoutPrefixProvider *dpp,
optional_yield y,
RGWBucketInfo& bucket_info,
RGWObjectCtx& obj_ctx,
const rgw_obj& obj,
RadosStore* _store, std::unique_ptr<Aio> _aio,
const rgw_user& owner,
const rgw_placement_rule *ptail_placement_rule,
uint64_t olh_epoch,
const std::string& unique_tag) :
StoreWriter(dpp, y),
store(_store),
aio(std::move(_aio)),
obj_ctx(obj_ctx),
processor(&*aio, store->getRados(), bucket_info,
ptail_placement_rule, owner, obj_ctx,
obj, olh_epoch, unique_tag,
dpp, y)
{}
~RadosAtomicWriter() = default;
// prepare to start processing object data
virtual int prepare(optional_yield y) override;
// Process a bufferlist
virtual int process(bufferlist&& data, uint64_t offset) override;
// complete the operation and make its result visible to clients
virtual int complete(size_t accounted_size, const std::string& etag,
ceph::real_time *mtime, ceph::real_time set_mtime,
std::map<std::string, bufferlist>& attrs,
ceph::real_time delete_at,
const char *if_match, const char *if_nomatch,
const std::string *user_data,
rgw_zone_set *zones_trace, bool *canceled,
optional_yield y) override;
};
class RadosAppendWriter : public StoreWriter {
protected:
rgw::sal::RadosStore* store;
std::unique_ptr<Aio> aio;
RGWObjectCtx& obj_ctx;
rgw::putobj::AppendObjectProcessor processor;
public:
RadosAppendWriter(const DoutPrefixProvider *dpp,
optional_yield y,
RGWBucketInfo& bucket_info,
RGWObjectCtx& obj_ctx,
const rgw_obj& obj,
RadosStore* _store, std::unique_ptr<Aio> _aio,
const rgw_user& owner,
const rgw_placement_rule *ptail_placement_rule,
const std::string& unique_tag,
uint64_t position,
uint64_t *cur_accounted_size) :
StoreWriter(dpp, y),
store(_store),
aio(std::move(_aio)),
obj_ctx(obj_ctx),
processor(&*aio, store->getRados(), bucket_info,
ptail_placement_rule, owner, obj_ctx,
obj, unique_tag, position,
cur_accounted_size, dpp, y)
{}
~RadosAppendWriter() = default;
// prepare to start processing object data
virtual int prepare(optional_yield y) override;
// Process a bufferlist
virtual int process(bufferlist&& data, uint64_t offset) override;
// complete the operation and make its result visible to clients
virtual int complete(size_t accounted_size, const std::string& etag,
ceph::real_time *mtime, ceph::real_time set_mtime,
std::map<std::string, bufferlist>& attrs,
ceph::real_time delete_at,
const char *if_match, const char *if_nomatch,
const std::string *user_data,
rgw_zone_set *zones_trace, bool *canceled,
optional_yield y) override;
};
class RadosMultipartWriter : public StoreWriter {
protected:
rgw::sal::RadosStore* store;
std::unique_ptr<Aio> aio;
RGWObjectCtx& obj_ctx;
rgw::putobj::MultipartObjectProcessor processor;
public:
RadosMultipartWriter(const DoutPrefixProvider *dpp,
optional_yield y, const std::string& upload_id,
RGWBucketInfo& bucket_info,
RGWObjectCtx& obj_ctx,
const rgw_obj& obj,
RadosStore* _store, std::unique_ptr<Aio> _aio,
const rgw_user& owner,
const rgw_placement_rule *ptail_placement_rule,
uint64_t part_num, const std::string& part_num_str) :
StoreWriter(dpp, y),
store(_store),
aio(std::move(_aio)),
obj_ctx(obj_ctx),
processor(&*aio, store->getRados(), bucket_info,
ptail_placement_rule, owner, obj_ctx,
obj, upload_id,
part_num, part_num_str, dpp, y)
{}
~RadosMultipartWriter() = default;
// prepare to start processing object data
virtual int prepare(optional_yield y) override;
// Process a bufferlist
virtual int process(bufferlist&& data, uint64_t offset) override;
// complete the operation and make its result visible to clients
virtual int complete(size_t accounted_size, const std::string& etag,
ceph::real_time *mtime, ceph::real_time set_mtime,
std::map<std::string, bufferlist>& attrs,
ceph::real_time delete_at,
const char *if_match, const char *if_nomatch,
const std::string *user_data,
rgw_zone_set *zones_trace, bool *canceled,
optional_yield y) override;
};
class RadosLuaManager : public StoreLuaManager {
RadosStore* const store;
rgw_pool pool;
public:
RadosLuaManager(RadosStore* _s);
virtual ~RadosLuaManager() = default;
virtual int get_script(const DoutPrefixProvider* dpp, optional_yield y, const std::string& key, std::string& script);
virtual int put_script(const DoutPrefixProvider* dpp, optional_yield y, const std::string& key, const std::string& script);
virtual int del_script(const DoutPrefixProvider* dpp, optional_yield y, const std::string& key);
virtual int add_package(const DoutPrefixProvider* dpp, optional_yield y, const std::string& package_name);
virtual int remove_package(const DoutPrefixProvider* dpp, optional_yield y, const std::string& package_name);
virtual int list_packages(const DoutPrefixProvider* dpp, optional_yield y, rgw::lua::packages_t& packages);
};
class RadosOIDCProvider : public RGWOIDCProvider {
RadosStore* store;
public:
RadosOIDCProvider(RadosStore* _store) : store(_store) {}
~RadosOIDCProvider() = default;
virtual int store_url(const DoutPrefixProvider *dpp, const std::string& url, bool exclusive, optional_yield y) override;
virtual int read_url(const DoutPrefixProvider *dpp, const std::string& url, const std::string& tenant, optional_yield y) override;
virtual int delete_obj(const DoutPrefixProvider *dpp, optional_yield y) override;
void encode(bufferlist& bl) const {
RGWOIDCProvider::encode(bl);
}
void decode(bufferlist::const_iterator& bl) {
RGWOIDCProvider::decode(bl);
}
};
class RadosRole : public RGWRole {
RadosStore* store;
public:
RadosRole(RadosStore* _store, std::string name,
std::string tenant,
std::string path,
std::string trust_policy,
std::string max_session_duration,
std::multimap<std::string,std::string> tags) : RGWRole(name, tenant, path, trust_policy, max_session_duration, tags), store(_store) {}
RadosRole(RadosStore* _store, std::string id) : RGWRole(id), store(_store) {}
RadosRole(RadosStore* _store, const RGWRoleInfo& info) : RGWRole(info), store(_store) {}
RadosRole(RadosStore* _store) : store(_store) {}
~RadosRole() = default;
virtual int store_info(const DoutPrefixProvider *dpp, bool exclusive, optional_yield y) override;
virtual int store_name(const DoutPrefixProvider *dpp, bool exclusive, optional_yield y) override;
virtual int store_path(const DoutPrefixProvider *dpp, bool exclusive, optional_yield y) override;
virtual int read_id(const DoutPrefixProvider *dpp, const std::string& role_name, const std::string& tenant, std::string& role_id, optional_yield y) override;
virtual int read_name(const DoutPrefixProvider *dpp, optional_yield y) override;
virtual int read_info(const DoutPrefixProvider *dpp, optional_yield y) override;
virtual int create(const DoutPrefixProvider *dpp, bool exclusive, const std::string& role_id, optional_yield y) override;
virtual int delete_obj(const DoutPrefixProvider *dpp, optional_yield y) override;
};
}} // namespace rgw::sal
WRITE_CLASS_ENCODER(rgw::sal::RadosOIDCProvider)
| 45,173 | 45.95842 | 272 |
h
|
null |
ceph-main/src/rgw/driver/rados/rgw_service.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab ft=cpp
#include "rgw_service.h"
#include "services/svc_finisher.h"
#include "services/svc_bi_rados.h"
#include "services/svc_bilog_rados.h"
#include "services/svc_bucket_sobj.h"
#include "services/svc_bucket_sync_sobj.h"
#include "services/svc_cls.h"
#include "services/svc_config_key_rados.h"
#include "services/svc_mdlog.h"
#include "services/svc_meta.h"
#include "services/svc_meta_be.h"
#include "services/svc_meta_be_sobj.h"
#include "services/svc_meta_be_otp.h"
#include "services/svc_notify.h"
#include "services/svc_otp.h"
#include "services/svc_rados.h"
#include "services/svc_zone.h"
#include "services/svc_zone_utils.h"
#include "services/svc_quota.h"
#include "services/svc_sync_modules.h"
#include "services/svc_sys_obj.h"
#include "services/svc_sys_obj_cache.h"
#include "services/svc_sys_obj_core.h"
#include "services/svc_user_rados.h"
#include "services/svc_role_rados.h"
#include "common/errno.h"
#include "rgw_bucket.h"
#include "rgw_datalog.h"
#include "rgw_metadata.h"
#include "rgw_otp.h"
#include "rgw_user.h"
#include "rgw_role.h"
#define dout_subsys ceph_subsys_rgw
using namespace std;
RGWServices_Def::RGWServices_Def() = default;
RGWServices_Def::~RGWServices_Def()
{
shutdown();
}
int RGWServices_Def::init(CephContext *cct,
bool have_cache,
bool raw,
bool run_sync,
optional_yield y,
const DoutPrefixProvider *dpp)
{
finisher = std::make_unique<RGWSI_Finisher>(cct);
bucket_sobj = std::make_unique<RGWSI_Bucket_SObj>(cct);
bucket_sync_sobj = std::make_unique<RGWSI_Bucket_Sync_SObj>(cct);
bi_rados = std::make_unique<RGWSI_BucketIndex_RADOS>(cct);
bilog_rados = std::make_unique<RGWSI_BILog_RADOS>(cct);
cls = std::make_unique<RGWSI_Cls>(cct);
config_key_rados = std::make_unique<RGWSI_ConfigKey_RADOS>(cct);
datalog_rados = std::make_unique<RGWDataChangesLog>(cct);
mdlog = std::make_unique<RGWSI_MDLog>(cct, run_sync);
meta = std::make_unique<RGWSI_Meta>(cct);
meta_be_sobj = std::make_unique<RGWSI_MetaBackend_SObj>(cct);
meta_be_otp = std::make_unique<RGWSI_MetaBackend_OTP>(cct);
notify = std::make_unique<RGWSI_Notify>(cct);
otp = std::make_unique<RGWSI_OTP>(cct);
rados = std::make_unique<RGWSI_RADOS>(cct);
zone = std::make_unique<RGWSI_Zone>(cct);
zone_utils = std::make_unique<RGWSI_ZoneUtils>(cct);
quota = std::make_unique<RGWSI_Quota>(cct);
sync_modules = std::make_unique<RGWSI_SyncModules>(cct);
sysobj = std::make_unique<RGWSI_SysObj>(cct);
sysobj_core = std::make_unique<RGWSI_SysObj_Core>(cct);
user_rados = std::make_unique<RGWSI_User_RADOS>(cct);
role_rados = std::make_unique<RGWSI_Role_RADOS>(cct);
if (have_cache) {
sysobj_cache = std::make_unique<RGWSI_SysObj_Cache>(dpp, cct);
}
vector<RGWSI_MetaBackend *> meta_bes{meta_be_sobj.get(), meta_be_otp.get()};
finisher->init();
bi_rados->init(zone.get(), rados.get(), bilog_rados.get(), datalog_rados.get());
bilog_rados->init(bi_rados.get());
bucket_sobj->init(zone.get(), sysobj.get(), sysobj_cache.get(),
bi_rados.get(), meta.get(), meta_be_sobj.get(),
sync_modules.get(), bucket_sync_sobj.get());
bucket_sync_sobj->init(zone.get(),
sysobj.get(),
sysobj_cache.get(),
bucket_sobj.get());
cls->init(zone.get(), rados.get());
config_key_rados->init(rados.get());
mdlog->init(rados.get(), zone.get(), sysobj.get(), cls.get());
meta->init(sysobj.get(), mdlog.get(), meta_bes);
meta_be_sobj->init(sysobj.get(), mdlog.get());
meta_be_otp->init(sysobj.get(), mdlog.get(), cls.get());
notify->init(zone.get(), rados.get(), finisher.get());
otp->init(zone.get(), meta.get(), meta_be_otp.get());
rados->init();
zone->init(sysobj.get(), rados.get(), sync_modules.get(), bucket_sync_sobj.get());
zone_utils->init(rados.get(), zone.get());
quota->init(zone.get());
sync_modules->init(zone.get());
sysobj_core->core_init(rados.get(), zone.get());
if (have_cache) {
sysobj_cache->init(rados.get(), zone.get(), notify.get());
sysobj->init(rados.get(), sysobj_cache.get());
} else {
sysobj->init(rados.get(), sysobj_core.get());
}
user_rados->init(rados.get(), zone.get(), sysobj.get(), sysobj_cache.get(),
meta.get(), meta_be_sobj.get(), sync_modules.get());
role_rados->init(zone.get(), meta.get(), meta_be_sobj.get(), sysobj.get());
can_shutdown = true;
int r = finisher->start(y, dpp);
if (r < 0) {
ldpp_dout(dpp, 0) << "ERROR: failed to start finisher service (" << cpp_strerror(-r) << dendl;
return r;
}
if (!raw) {
r = notify->start(y, dpp);
if (r < 0) {
ldpp_dout(dpp, 0) << "ERROR: failed to start notify service (" << cpp_strerror(-r) << dendl;
return r;
}
}
r = rados->start(y, dpp);
if (r < 0) {
ldpp_dout(dpp, 0) << "ERROR: failed to start rados service (" << cpp_strerror(-r) << dendl;
return r;
}
if (!raw) {
r = zone->start(y, dpp);
if (r < 0) {
ldpp_dout(dpp, 0) << "ERROR: failed to start zone service (" << cpp_strerror(-r) << dendl;
return r;
}
r = datalog_rados->start(dpp, &zone->get_zone(),
zone->get_zone_params(),
rados->get_rados_handle());
if (r < 0) {
ldpp_dout(dpp, 0) << "ERROR: failed to start datalog_rados service (" << cpp_strerror(-r) << dendl;
return r;
}
r = mdlog->start(y, dpp);
if (r < 0) {
ldpp_dout(dpp, 0) << "ERROR: failed to start mdlog service (" << cpp_strerror(-r) << dendl;
return r;
}
r = sync_modules->start(y, dpp);
if (r < 0) {
ldpp_dout(dpp, 0) << "ERROR: failed to start sync modules service (" << cpp_strerror(-r) << dendl;
return r;
}
}
r = cls->start(y, dpp);
if (r < 0) {
ldpp_dout(dpp, 0) << "ERROR: failed to start cls service (" << cpp_strerror(-r) << dendl;
return r;
}
r = config_key_rados->start(y, dpp);
if (r < 0) {
ldpp_dout(dpp, 0) << "ERROR: failed to start config_key service (" << cpp_strerror(-r) << dendl;
return r;
}
r = zone_utils->start(y, dpp);
if (r < 0) {
ldpp_dout(dpp, 0) << "ERROR: failed to start zone_utils service (" << cpp_strerror(-r) << dendl;
return r;
}
r = quota->start(y, dpp);
if (r < 0) {
ldpp_dout(dpp, 0) << "ERROR: failed to start quota service (" << cpp_strerror(-r) << dendl;
return r;
}
r = sysobj_core->start(y, dpp);
if (r < 0) {
ldpp_dout(dpp, 0) << "ERROR: failed to start sysobj_core service (" << cpp_strerror(-r) << dendl;
return r;
}
if (have_cache) {
r = sysobj_cache->start(y, dpp);
if (r < 0) {
ldpp_dout(dpp, 0) << "ERROR: failed to start sysobj_cache service (" << cpp_strerror(-r) << dendl;
return r;
}
}
r = sysobj->start(y, dpp);
if (r < 0) {
ldpp_dout(dpp, 0) << "ERROR: failed to start sysobj service (" << cpp_strerror(-r) << dendl;
return r;
}
if (!raw) {
r = meta_be_sobj->start(y, dpp);
if (r < 0) {
ldpp_dout(dpp, 0) << "ERROR: failed to start meta_be_sobj service (" << cpp_strerror(-r) << dendl;
return r;
}
r = meta->start(y, dpp);
if (r < 0) {
ldpp_dout(dpp, 0) << "ERROR: failed to start meta service (" << cpp_strerror(-r) << dendl;
return r;
}
r = bucket_sobj->start(y, dpp);
if (r < 0) {
ldpp_dout(dpp, 0) << "ERROR: failed to start bucket service (" << cpp_strerror(-r) << dendl;
return r;
}
r = bucket_sync_sobj->start(y, dpp);
if (r < 0) {
ldpp_dout(dpp, 0) << "ERROR: failed to start bucket_sync service (" << cpp_strerror(-r) << dendl;
return r;
}
r = user_rados->start(y, dpp);
if (r < 0) {
ldpp_dout(dpp, 0) << "ERROR: failed to start user_rados service (" << cpp_strerror(-r) << dendl;
return r;
}
r = otp->start(y, dpp);
if (r < 0) {
ldpp_dout(dpp, 0) << "ERROR: failed to start otp service (" << cpp_strerror(-r) << dendl;
return r;
}
r = role_rados->start(y, dpp);
if (r < 0) {
ldout(cct, 0) << "ERROR: failed to start role_rados service (" << cpp_strerror(-r) << dendl;
return r;
}
}
/* cache or core services will be started by sysobj */
return 0;
}
void RGWServices_Def::shutdown()
{
if (!can_shutdown) {
return;
}
if (has_shutdown) {
return;
}
role_rados->shutdown();
datalog_rados.reset();
user_rados->shutdown();
sync_modules->shutdown();
otp->shutdown();
notify->shutdown();
meta_be_otp->shutdown();
meta_be_sobj->shutdown();
meta->shutdown();
mdlog->shutdown();
config_key_rados->shutdown();
cls->shutdown();
bilog_rados->shutdown();
bi_rados->shutdown();
bucket_sync_sobj->shutdown();
bucket_sobj->shutdown();
finisher->shutdown();
sysobj->shutdown();
sysobj_core->shutdown();
notify->shutdown();
if (sysobj_cache) {
sysobj_cache->shutdown();
}
quota->shutdown();
zone_utils->shutdown();
zone->shutdown();
rados->shutdown();
has_shutdown = true;
}
int RGWServices::do_init(CephContext *_cct, bool have_cache, bool raw, bool run_sync, optional_yield y, const DoutPrefixProvider *dpp)
{
cct = _cct;
int r = _svc.init(cct, have_cache, raw, run_sync, y, dpp);
if (r < 0) {
return r;
}
finisher = _svc.finisher.get();
bi_rados = _svc.bi_rados.get();
bi = bi_rados;
bilog_rados = _svc.bilog_rados.get();
bucket_sobj = _svc.bucket_sobj.get();
bucket = bucket_sobj;
bucket_sync_sobj = _svc.bucket_sync_sobj.get();
bucket_sync = bucket_sync_sobj;
cls = _svc.cls.get();
config_key_rados = _svc.config_key_rados.get();
config_key = config_key_rados;
datalog_rados = _svc.datalog_rados.get();
mdlog = _svc.mdlog.get();
meta = _svc.meta.get();
meta_be_sobj = _svc.meta_be_sobj.get();
meta_be_otp = _svc.meta_be_otp.get();
notify = _svc.notify.get();
otp = _svc.otp.get();
rados = _svc.rados.get();
zone = _svc.zone.get();
zone_utils = _svc.zone_utils.get();
quota = _svc.quota.get();
sync_modules = _svc.sync_modules.get();
sysobj = _svc.sysobj.get();
cache = _svc.sysobj_cache.get();
core = _svc.sysobj_core.get();
user = _svc.user_rados.get();
role = _svc.role_rados.get();
return 0;
}
RGWServiceInstance::~RGWServiceInstance() {}
int RGWServiceInstance::start(optional_yield y, const DoutPrefixProvider *dpp)
{
if (start_state != StateInit) {
return 0;
}
start_state = StateStarting;; /* setting started prior to do_start() on purpose so that circular
references can call start() on each other */
int r = do_start(y, dpp);
if (r < 0) {
return r;
}
start_state = StateStarted;
return 0;
}
RGWCtlDef::RGWCtlDef() {}
RGWCtlDef::~RGWCtlDef() {}
RGWCtlDef::_meta::_meta() {}
RGWCtlDef::_meta::~_meta() {}
int RGWCtlDef::init(RGWServices& svc, rgw::sal::Driver* driver, const DoutPrefixProvider *dpp)
{
meta.mgr.reset(new RGWMetadataManager(svc.meta));
meta.user.reset(RGWUserMetaHandlerAllocator::alloc(svc.user));
auto sync_module = svc.sync_modules->get_sync_module();
if (sync_module) {
meta.bucket.reset(sync_module->alloc_bucket_meta_handler());
meta.bucket_instance.reset(sync_module->alloc_bucket_instance_meta_handler(driver));
} else {
meta.bucket.reset(RGWBucketMetaHandlerAllocator::alloc());
meta.bucket_instance.reset(RGWBucketInstanceMetaHandlerAllocator::alloc(driver));
}
meta.otp.reset(RGWOTPMetaHandlerAllocator::alloc());
meta.role = std::make_unique<rgw::sal::RGWRoleMetadataHandler>(driver, svc.role);
user.reset(new RGWUserCtl(svc.zone, svc.user, (RGWUserMetadataHandler *)meta.user.get()));
bucket.reset(new RGWBucketCtl(svc.zone,
svc.bucket,
svc.bucket_sync,
svc.bi, svc.user));
otp.reset(new RGWOTPCtl(svc.zone, svc.otp));
RGWBucketMetadataHandlerBase *bucket_meta_handler = static_cast<RGWBucketMetadataHandlerBase *>(meta.bucket.get());
RGWBucketInstanceMetadataHandlerBase *bi_meta_handler = static_cast<RGWBucketInstanceMetadataHandlerBase *>(meta.bucket_instance.get());
bucket_meta_handler->init(svc.bucket, bucket.get());
bi_meta_handler->init(svc.zone, svc.bucket, svc.bi);
RGWOTPMetadataHandlerBase *otp_handler = static_cast<RGWOTPMetadataHandlerBase *>(meta.otp.get());
otp_handler->init(svc.zone, svc.meta_be_otp, svc.otp);
user->init(bucket.get());
bucket->init(user.get(),
(RGWBucketMetadataHandler *)bucket_meta_handler,
(RGWBucketInstanceMetadataHandler *)bi_meta_handler,
svc.datalog_rados,
dpp);
otp->init((RGWOTPMetadataHandler *)meta.otp.get());
return 0;
}
int RGWCtl::init(RGWServices *_svc, rgw::sal::Driver* driver, const DoutPrefixProvider *dpp)
{
svc = _svc;
cct = svc->cct;
int r = _ctl.init(*svc, driver, dpp);
if (r < 0) {
ldpp_dout(dpp, 0) << "ERROR: failed to start init ctls (" << cpp_strerror(-r) << dendl;
return r;
}
meta.mgr = _ctl.meta.mgr.get();
meta.user = _ctl.meta.user.get();
meta.bucket = _ctl.meta.bucket.get();
meta.bucket_instance = _ctl.meta.bucket_instance.get();
meta.otp = _ctl.meta.otp.get();
meta.role = _ctl.meta.role.get();
user = _ctl.user.get();
bucket = _ctl.bucket.get();
otp = _ctl.otp.get();
r = meta.user->attach(meta.mgr);
if (r < 0) {
ldout(cct, 0) << "ERROR: failed to start init meta.user ctl (" << cpp_strerror(-r) << dendl;
return r;
}
r = meta.bucket->attach(meta.mgr);
if (r < 0) {
ldout(cct, 0) << "ERROR: failed to start init meta.bucket ctl (" << cpp_strerror(-r) << dendl;
return r;
}
r = meta.bucket_instance->attach(meta.mgr);
if (r < 0) {
ldout(cct, 0) << "ERROR: failed to start init meta.bucket_instance ctl (" << cpp_strerror(-r) << dendl;
return r;
}
r = meta.otp->attach(meta.mgr);
if (r < 0) {
ldout(cct, 0) << "ERROR: failed to start init otp ctl (" << cpp_strerror(-r) << dendl;
return r;
}
r = meta.role->attach(meta.mgr);
if (r < 0) {
ldout(cct, 0) << "ERROR: failed to start init otp ctl (" << cpp_strerror(-r) << dendl;
return r;
}
return 0;
}
| 14,354 | 29.09434 | 138 |
cc
|
null |
ceph-main/src/rgw/driver/rados/rgw_service.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab ft=cpp
#pragma once
#include <string>
#include <vector>
#include <memory>
#include "common/async/yield_context.h"
#include "rgw_common.h"
struct RGWServices_Def;
class RGWServiceInstance
{
friend struct RGWServices_Def;
protected:
CephContext *cct;
enum StartState {
StateInit = 0,
StateStarting = 1,
StateStarted = 2,
} start_state{StateInit};
virtual void shutdown() {}
virtual int do_start(optional_yield, const DoutPrefixProvider *dpp) {
return 0;
}
public:
RGWServiceInstance(CephContext *_cct) : cct(_cct) {}
virtual ~RGWServiceInstance();
int start(optional_yield y, const DoutPrefixProvider *dpp);
bool is_started() {
return (start_state == StateStarted);
}
CephContext *ctx() {
return cct;
}
};
class RGWSI_Finisher;
class RGWSI_Bucket;
class RGWSI_Bucket_SObj;
class RGWSI_Bucket_Sync;
class RGWSI_Bucket_Sync_SObj;
class RGWSI_BucketIndex;
class RGWSI_BucketIndex_RADOS;
class RGWSI_BILog_RADOS;
class RGWSI_Cls;
class RGWSI_ConfigKey;
class RGWSI_ConfigKey_RADOS;
class RGWSI_MDLog;
class RGWSI_Meta;
class RGWSI_MetaBackend;
class RGWSI_MetaBackend_SObj;
class RGWSI_MetaBackend_OTP;
class RGWSI_Notify;
class RGWSI_OTP;
class RGWSI_RADOS;
class RGWSI_Zone;
class RGWSI_ZoneUtils;
class RGWSI_Quota;
class RGWSI_SyncModules;
class RGWSI_SysObj;
class RGWSI_SysObj_Core;
class RGWSI_SysObj_Cache;
class RGWSI_User;
class RGWSI_User_RADOS;
class RGWDataChangesLog;
class RGWSI_Role_RADOS;
struct RGWServices_Def
{
bool can_shutdown{false};
bool has_shutdown{false};
std::unique_ptr<RGWSI_Finisher> finisher;
std::unique_ptr<RGWSI_Bucket_SObj> bucket_sobj;
std::unique_ptr<RGWSI_Bucket_Sync_SObj> bucket_sync_sobj;
std::unique_ptr<RGWSI_BucketIndex_RADOS> bi_rados;
std::unique_ptr<RGWSI_BILog_RADOS> bilog_rados;
std::unique_ptr<RGWSI_Cls> cls;
std::unique_ptr<RGWSI_ConfigKey_RADOS> config_key_rados;
std::unique_ptr<RGWSI_MDLog> mdlog;
std::unique_ptr<RGWSI_Meta> meta;
std::unique_ptr<RGWSI_MetaBackend_SObj> meta_be_sobj;
std::unique_ptr<RGWSI_MetaBackend_OTP> meta_be_otp;
std::unique_ptr<RGWSI_Notify> notify;
std::unique_ptr<RGWSI_OTP> otp;
std::unique_ptr<RGWSI_RADOS> rados;
std::unique_ptr<RGWSI_Zone> zone;
std::unique_ptr<RGWSI_ZoneUtils> zone_utils;
std::unique_ptr<RGWSI_Quota> quota;
std::unique_ptr<RGWSI_SyncModules> sync_modules;
std::unique_ptr<RGWSI_SysObj> sysobj;
std::unique_ptr<RGWSI_SysObj_Core> sysobj_core;
std::unique_ptr<RGWSI_SysObj_Cache> sysobj_cache;
std::unique_ptr<RGWSI_User_RADOS> user_rados;
std::unique_ptr<RGWDataChangesLog> datalog_rados;
std::unique_ptr<RGWSI_Role_RADOS> role_rados;
RGWServices_Def();
~RGWServices_Def();
int init(CephContext *cct, bool have_cache, bool raw_storage, bool run_sync, optional_yield y, const DoutPrefixProvider *dpp);
void shutdown();
};
struct RGWServices
{
RGWServices_Def _svc;
CephContext *cct;
RGWSI_Finisher *finisher{nullptr};
RGWSI_Bucket *bucket{nullptr};
RGWSI_Bucket_SObj *bucket_sobj{nullptr};
RGWSI_Bucket_Sync *bucket_sync{nullptr};
RGWSI_Bucket_Sync_SObj *bucket_sync_sobj{nullptr};
RGWSI_BucketIndex *bi{nullptr};
RGWSI_BucketIndex_RADOS *bi_rados{nullptr};
RGWSI_BILog_RADOS *bilog_rados{nullptr};
RGWSI_Cls *cls{nullptr};
RGWSI_ConfigKey_RADOS *config_key_rados{nullptr};
RGWSI_ConfigKey *config_key{nullptr};
RGWDataChangesLog *datalog_rados{nullptr};
RGWSI_MDLog *mdlog{nullptr};
RGWSI_Meta *meta{nullptr};
RGWSI_MetaBackend *meta_be_sobj{nullptr};
RGWSI_MetaBackend *meta_be_otp{nullptr};
RGWSI_Notify *notify{nullptr};
RGWSI_OTP *otp{nullptr};
RGWSI_RADOS *rados{nullptr};
RGWSI_Zone *zone{nullptr};
RGWSI_ZoneUtils *zone_utils{nullptr};
RGWSI_Quota *quota{nullptr};
RGWSI_SyncModules *sync_modules{nullptr};
RGWSI_SysObj *sysobj{nullptr};
RGWSI_SysObj_Cache *cache{nullptr};
RGWSI_SysObj_Core *core{nullptr};
RGWSI_User *user{nullptr};
RGWSI_Role_RADOS *role{nullptr};
int do_init(CephContext *cct, bool have_cache, bool raw_storage, bool run_sync, optional_yield y, const DoutPrefixProvider *dpp);
int init(CephContext *cct, bool have_cache, bool run_sync, optional_yield y, const DoutPrefixProvider *dpp) {
return do_init(cct, have_cache, false, run_sync, y, dpp);
}
int init_raw(CephContext *cct, bool have_cache, optional_yield y, const DoutPrefixProvider *dpp) {
return do_init(cct, have_cache, true, false, y, dpp);
}
void shutdown() {
_svc.shutdown();
}
};
class RGWMetadataManager;
class RGWMetadataHandler;
class RGWUserCtl;
class RGWBucketCtl;
class RGWOTPCtl;
struct RGWCtlDef {
struct _meta {
std::unique_ptr<RGWMetadataManager> mgr;
std::unique_ptr<RGWMetadataHandler> bucket;
std::unique_ptr<RGWMetadataHandler> bucket_instance;
std::unique_ptr<RGWMetadataHandler> user;
std::unique_ptr<RGWMetadataHandler> otp;
std::unique_ptr<RGWMetadataHandler> role;
_meta();
~_meta();
} meta;
std::unique_ptr<RGWUserCtl> user;
std::unique_ptr<RGWBucketCtl> bucket;
std::unique_ptr<RGWOTPCtl> otp;
RGWCtlDef();
~RGWCtlDef();
int init(RGWServices& svc, rgw::sal::Driver* driver, const DoutPrefixProvider *dpp);
};
struct RGWCtl {
CephContext *cct{nullptr};
RGWServices *svc{nullptr};
RGWCtlDef _ctl;
struct _meta {
RGWMetadataManager *mgr{nullptr};
RGWMetadataHandler *bucket{nullptr};
RGWMetadataHandler *bucket_instance{nullptr};
RGWMetadataHandler *user{nullptr};
RGWMetadataHandler *otp{nullptr};
RGWMetadataHandler *role{nullptr};
} meta;
RGWUserCtl *user{nullptr};
RGWBucketCtl *bucket{nullptr};
RGWOTPCtl *otp{nullptr};
int init(RGWServices *_svc, rgw::sal::Driver* driver, const DoutPrefixProvider *dpp);
};
| 5,845 | 26.064815 | 131 |
h
|
null |
ceph-main/src/rgw/driver/rados/rgw_sync.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab ft=cpp
#include "rgw_sync.h"
#include "rgw_rest_conn.h"
#include "rgw_cr_rados.h"
#include "rgw_cr_rest.h"
#include "services/svc_zone.h"
#include "services/svc_mdlog.h"
#include "services/svc_cls.h"
#include <boost/asio/yield.hpp>
#define dout_subsys ceph_subsys_rgw
#undef dout_prefix
#define dout_prefix (*_dout << "meta sync: ")
using namespace std;
static string mdlog_sync_status_oid = "mdlog.sync-status";
static string mdlog_sync_status_shard_prefix = "mdlog.sync-status.shard";
static string mdlog_sync_full_sync_index_prefix = "meta.full-sync.index";
static const string meta_sync_bids_oid = "meta-sync-bids";
RGWContinuousLeaseCR::~RGWContinuousLeaseCR() {}
RGWSyncErrorLogger::RGWSyncErrorLogger(rgw::sal::RadosStore* _store, const string &oid_prefix, int _num_shards) : store(_store), num_shards(_num_shards) {
for (int i = 0; i < num_shards; i++) {
oids.push_back(get_shard_oid(oid_prefix, i));
}
}
string RGWSyncErrorLogger::get_shard_oid(const string& oid_prefix, int shard_id) {
char buf[oid_prefix.size() + 16];
snprintf(buf, sizeof(buf), "%s.%d", oid_prefix.c_str(), shard_id);
return string(buf);
}
RGWCoroutine *RGWSyncErrorLogger::log_error_cr(const DoutPrefixProvider *dpp, const string& source_zone, const string& section, const string& name, uint32_t error_code, const string& message) {
cls_log_entry entry;
rgw_sync_error_info info(source_zone, error_code, message);
bufferlist bl;
encode(info, bl);
store->svc()->cls->timelog.prepare_entry(entry, real_clock::now(), section, name, bl);
uint32_t shard_id = ++counter % num_shards;
return new RGWRadosTimelogAddCR(dpp, store, oids[shard_id], entry);
}
void RGWSyncBackoff::update_wait_time()
{
if (cur_wait == 0) {
cur_wait = 1;
} else {
cur_wait = (cur_wait << 1);
}
if (cur_wait >= max_secs) {
cur_wait = max_secs;
}
}
void RGWSyncBackoff::backoff_sleep()
{
update_wait_time();
sleep(cur_wait);
}
void RGWSyncBackoff::backoff(RGWCoroutine *op)
{
update_wait_time();
op->wait(utime_t(cur_wait, 0));
}
int RGWBackoffControlCR::operate(const DoutPrefixProvider *dpp) {
reenter(this) {
// retry the operation until it succeeds
while (true) {
yield {
std::lock_guard l{lock};
cr = alloc_cr();
cr->get();
call(cr);
}
{
std::lock_guard l{lock};
cr->put();
cr = NULL;
}
if (retcode >= 0) {
break;
}
if (retcode != -EBUSY && retcode != -EAGAIN) {
ldout(cct, 0) << "ERROR: RGWBackoffControlCR called coroutine returned " << retcode << dendl;
if (exit_on_error) {
return set_cr_error(retcode);
}
}
if (reset_backoff) {
backoff.reset();
}
yield backoff.backoff(this);
}
// run an optional finisher
yield call(alloc_finisher_cr());
if (retcode < 0) {
ldout(cct, 0) << "ERROR: call to finisher_cr() failed: retcode=" << retcode << dendl;
return set_cr_error(retcode);
}
return set_cr_done();
}
return 0;
}
void rgw_mdlog_info::decode_json(JSONObj *obj) {
JSONDecoder::decode_json("num_objects", num_shards, obj);
JSONDecoder::decode_json("period", period, obj);
JSONDecoder::decode_json("realm_epoch", realm_epoch, obj);
}
void rgw_mdlog_entry::decode_json(JSONObj *obj) {
JSONDecoder::decode_json("id", id, obj);
JSONDecoder::decode_json("section", section, obj);
JSONDecoder::decode_json("name", name, obj);
utime_t ut;
JSONDecoder::decode_json("timestamp", ut, obj);
timestamp = ut.to_real_time();
JSONDecoder::decode_json("data", log_data, obj);
}
void rgw_mdlog_shard_data::decode_json(JSONObj *obj) {
JSONDecoder::decode_json("marker", marker, obj);
JSONDecoder::decode_json("truncated", truncated, obj);
JSONDecoder::decode_json("entries", entries, obj);
};
int RGWShardCollectCR::operate(const DoutPrefixProvider *dpp) {
reenter(this) {
while (spawn_next()) {
current_running++;
if (current_running >= max_concurrent) {
int child_ret;
yield wait_for_child();
if (collect_next(&child_ret)) {
current_running--;
child_ret = handle_result(child_ret);
if (child_ret < 0) {
status = child_ret;
}
}
}
}
while (current_running > 0) {
int child_ret;
yield wait_for_child();
if (collect_next(&child_ret)) {
current_running--;
child_ret = handle_result(child_ret);
if (child_ret < 0) {
status = child_ret;
}
}
}
if (status < 0) {
return set_cr_error(status);
}
return set_cr_done();
}
return 0;
}
class RGWReadRemoteMDLogInfoCR : public RGWShardCollectCR {
RGWMetaSyncEnv *sync_env;
const std::string& period;
int num_shards;
map<int, RGWMetadataLogInfo> *mdlog_info;
int shard_id;
#define READ_MDLOG_MAX_CONCURRENT 10
int handle_result(int r) override {
if (r == -ENOENT) { // ENOENT is not a fatal error
return 0;
}
if (r < 0) {
ldout(cct, 4) << "failed to fetch mdlog status: " << cpp_strerror(r) << dendl;
}
return r;
}
public:
RGWReadRemoteMDLogInfoCR(RGWMetaSyncEnv *_sync_env,
const std::string& period, int _num_shards,
map<int, RGWMetadataLogInfo> *_mdlog_info) : RGWShardCollectCR(_sync_env->cct, READ_MDLOG_MAX_CONCURRENT),
sync_env(_sync_env),
period(period), num_shards(_num_shards),
mdlog_info(_mdlog_info), shard_id(0) {}
bool spawn_next() override;
};
class RGWListRemoteMDLogCR : public RGWShardCollectCR {
RGWMetaSyncEnv *sync_env;
const std::string& period;
map<int, string> shards;
int max_entries_per_shard;
map<int, rgw_mdlog_shard_data> *result;
map<int, string>::iterator iter;
#define READ_MDLOG_MAX_CONCURRENT 10
int handle_result(int r) override {
if (r == -ENOENT) { // ENOENT is not a fatal error
return 0;
}
if (r < 0) {
ldout(cct, 4) << "failed to list remote mdlog shard: " << cpp_strerror(r) << dendl;
}
return r;
}
public:
RGWListRemoteMDLogCR(RGWMetaSyncEnv *_sync_env,
const std::string& period, map<int, string>& _shards,
int _max_entries_per_shard,
map<int, rgw_mdlog_shard_data> *_result) : RGWShardCollectCR(_sync_env->cct, READ_MDLOG_MAX_CONCURRENT),
sync_env(_sync_env), period(period),
max_entries_per_shard(_max_entries_per_shard),
result(_result) {
shards.swap(_shards);
iter = shards.begin();
}
bool spawn_next() override;
};
int RGWRemoteMetaLog::read_log_info(const DoutPrefixProvider *dpp, rgw_mdlog_info *log_info)
{
rgw_http_param_pair pairs[] = { { "type", "metadata" },
{ NULL, NULL } };
int ret = conn->get_json_resource(dpp, "/admin/log", pairs, null_yield, *log_info);
if (ret < 0) {
ldpp_dout(dpp, 0) << "ERROR: failed to fetch mdlog info" << dendl;
return ret;
}
ldpp_dout(dpp, 20) << "remote mdlog, num_shards=" << log_info->num_shards << dendl;
return 0;
}
int RGWRemoteMetaLog::read_master_log_shards_info(const DoutPrefixProvider *dpp, const string &master_period, map<int, RGWMetadataLogInfo> *shards_info)
{
if (store->svc()->zone->is_meta_master()) {
return 0;
}
rgw_mdlog_info log_info;
int ret = read_log_info(dpp, &log_info);
if (ret < 0) {
return ret;
}
return run(dpp, new RGWReadRemoteMDLogInfoCR(&sync_env, master_period, log_info.num_shards, shards_info));
}
int RGWRemoteMetaLog::read_master_log_shards_next(const DoutPrefixProvider *dpp, const string& period, map<int, string> shard_markers, map<int, rgw_mdlog_shard_data> *result)
{
if (store->svc()->zone->is_meta_master()) {
return 0;
}
return run(dpp, new RGWListRemoteMDLogCR(&sync_env, period, shard_markers, 1, result));
}
int RGWRemoteMetaLog::init()
{
conn = store->svc()->zone->get_master_conn();
int ret = http_manager.start();
if (ret < 0) {
ldpp_dout(dpp, 0) << "failed in http_manager.start() ret=" << ret << dendl;
return ret;
}
error_logger = new RGWSyncErrorLogger(store, RGW_SYNC_ERROR_LOG_SHARD_PREFIX, ERROR_LOGGER_SHARDS);
init_sync_env(&sync_env);
tn = sync_env.sync_tracer->add_node(sync_env.sync_tracer->root_node, "meta");
return 0;
}
#define CLONE_MAX_ENTRIES 100
int RGWMetaSyncStatusManager::init(const DoutPrefixProvider *dpp)
{
if (store->svc()->zone->is_meta_master()) {
return 0;
}
if (!store->svc()->zone->get_master_conn()) {
ldpp_dout(dpp, -1) << "no REST connection to master zone" << dendl;
return -EIO;
}
int r = rgw_init_ioctx(dpp, store->getRados()->get_rados_handle(), store->svc()->zone->get_zone_params().log_pool, ioctx, true);
if (r < 0) {
ldpp_dout(dpp, -1) << "ERROR: failed to open log pool (" << store->svc()->zone->get_zone_params().log_pool << " ret=" << r << dendl;
return r;
}
r = master_log.init();
if (r < 0) {
ldpp_dout(dpp, -1) << "ERROR: failed to init remote log, r=" << r << dendl;
return r;
}
RGWMetaSyncEnv& sync_env = master_log.get_sync_env();
rgw_meta_sync_status sync_status;
r = read_sync_status(dpp, &sync_status);
if (r < 0 && r != -ENOENT) {
ldpp_dout(dpp, -1) << "ERROR: failed to read sync status, r=" << r << dendl;
return r;
}
int num_shards = sync_status.sync_info.num_shards;
for (int i = 0; i < num_shards; i++) {
shard_objs[i] = rgw_raw_obj(store->svc()->zone->get_zone_params().log_pool, sync_env.shard_obj_name(i));
}
std::unique_lock wl{ts_to_shard_lock};
for (int i = 0; i < num_shards; i++) {
clone_markers.push_back(string());
utime_shard ut;
ut.shard_id = i;
ts_to_shard[ut] = i;
}
return 0;
}
void RGWMetaSyncEnv::init(const DoutPrefixProvider *_dpp, CephContext *_cct, rgw::sal::RadosStore* _store, RGWRESTConn *_conn,
RGWAsyncRadosProcessor *_async_rados, RGWHTTPManager *_http_manager,
RGWSyncErrorLogger *_error_logger, RGWSyncTraceManager *_sync_tracer) {
dpp = _dpp;
cct = _cct;
store = _store;
conn = _conn;
async_rados = _async_rados;
http_manager = _http_manager;
error_logger = _error_logger;
sync_tracer = _sync_tracer;
}
string RGWMetaSyncEnv::status_oid()
{
return mdlog_sync_status_oid;
}
string RGWMetaSyncEnv::shard_obj_name(int shard_id)
{
char buf[mdlog_sync_status_shard_prefix.size() + 16];
snprintf(buf, sizeof(buf), "%s.%d", mdlog_sync_status_shard_prefix.c_str(), shard_id);
return string(buf);
}
class RGWAsyncReadMDLogEntries : public RGWAsyncRadosRequest {
const DoutPrefixProvider *dpp;
rgw::sal::RadosStore* store;
RGWMetadataLog *mdlog;
int shard_id;
int max_entries;
protected:
int _send_request(const DoutPrefixProvider *dpp) override {
real_time from_time;
real_time end_time;
void *handle;
mdlog->init_list_entries(shard_id, from_time, end_time, marker, &handle);
int ret = mdlog->list_entries(dpp, handle, max_entries, entries, &marker, &truncated);
mdlog->complete_list_entries(handle);
return ret;
}
public:
string marker;
list<cls_log_entry> entries;
bool truncated;
RGWAsyncReadMDLogEntries(const DoutPrefixProvider *dpp, RGWCoroutine *caller, RGWAioCompletionNotifier *cn, rgw::sal::RadosStore* _store,
RGWMetadataLog* mdlog, int _shard_id,
std::string _marker, int _max_entries)
: RGWAsyncRadosRequest(caller, cn), dpp(dpp), store(_store), mdlog(mdlog),
shard_id(_shard_id), max_entries(_max_entries), marker(std::move(_marker)) {}
};
class RGWReadMDLogEntriesCR : public RGWSimpleCoroutine {
RGWMetaSyncEnv *sync_env;
RGWMetadataLog *const mdlog;
int shard_id;
string marker;
string *pmarker;
int max_entries;
list<cls_log_entry> *entries;
bool *truncated;
RGWAsyncReadMDLogEntries *req{nullptr};
public:
RGWReadMDLogEntriesCR(RGWMetaSyncEnv *_sync_env, RGWMetadataLog* mdlog,
int _shard_id, string*_marker, int _max_entries,
list<cls_log_entry> *_entries, bool *_truncated)
: RGWSimpleCoroutine(_sync_env->cct), sync_env(_sync_env), mdlog(mdlog),
shard_id(_shard_id), pmarker(_marker), max_entries(_max_entries),
entries(_entries), truncated(_truncated) {}
~RGWReadMDLogEntriesCR() override {
if (req) {
req->finish();
}
}
int send_request(const DoutPrefixProvider *dpp) override {
marker = *pmarker;
req = new RGWAsyncReadMDLogEntries(dpp, this, stack->create_completion_notifier(),
sync_env->store, mdlog, shard_id, marker,
max_entries);
sync_env->async_rados->queue(req);
return 0;
}
int request_complete() override {
*pmarker = std::move(req->marker);
*entries = std::move(req->entries);
*truncated = req->truncated;
return req->get_ret_status();
}
};
class RGWReadRemoteMDLogShardInfoCR : public RGWCoroutine {
RGWMetaSyncEnv *env;
RGWRESTReadResource *http_op;
const std::string& period;
int shard_id;
RGWMetadataLogInfo *shard_info;
public:
RGWReadRemoteMDLogShardInfoCR(RGWMetaSyncEnv *env, const std::string& period,
int _shard_id, RGWMetadataLogInfo *_shard_info)
: RGWCoroutine(env->store->ctx()), env(env), http_op(NULL),
period(period), shard_id(_shard_id), shard_info(_shard_info) {}
int operate(const DoutPrefixProvider *dpp) override {
auto store = env->store;
RGWRESTConn *conn = store->svc()->zone->get_master_conn();
reenter(this) {
yield {
char buf[16];
snprintf(buf, sizeof(buf), "%d", shard_id);
rgw_http_param_pair pairs[] = { { "type" , "metadata" },
{ "id", buf },
{ "period", period.c_str() },
{ "info" , NULL },
{ NULL, NULL } };
string p = "/admin/log/";
http_op = new RGWRESTReadResource(conn, p, pairs, NULL,
env->http_manager);
init_new_io(http_op);
int ret = http_op->aio_read(dpp);
if (ret < 0) {
ldpp_dout(env->dpp, 0) << "ERROR: failed to read from " << p << dendl;
log_error() << "failed to send http operation: " << http_op->to_str() << " ret=" << ret << std::endl;
http_op->put();
return set_cr_error(ret);
}
return io_block(0);
}
yield {
int ret = http_op->wait(shard_info, null_yield);
http_op->put();
if (ret < 0) {
return set_cr_error(ret);
}
return set_cr_done();
}
}
return 0;
}
};
RGWCoroutine* create_read_remote_mdlog_shard_info_cr(RGWMetaSyncEnv *env,
const std::string& period,
int shard_id,
RGWMetadataLogInfo* info)
{
return new RGWReadRemoteMDLogShardInfoCR(env, period, shard_id, info);
}
class RGWListRemoteMDLogShardCR : public RGWSimpleCoroutine {
RGWMetaSyncEnv *sync_env;
RGWRESTReadResource *http_op;
const std::string& period;
int shard_id;
string marker;
uint32_t max_entries;
rgw_mdlog_shard_data *result;
public:
RGWListRemoteMDLogShardCR(RGWMetaSyncEnv *env, const std::string& period,
int _shard_id, const string& _marker, uint32_t _max_entries,
rgw_mdlog_shard_data *_result)
: RGWSimpleCoroutine(env->store->ctx()), sync_env(env), http_op(NULL),
period(period), shard_id(_shard_id), marker(_marker), max_entries(_max_entries), result(_result) {}
int send_request(const DoutPrefixProvider *dpp) override {
RGWRESTConn *conn = sync_env->conn;
char buf[32];
snprintf(buf, sizeof(buf), "%d", shard_id);
char max_entries_buf[32];
snprintf(max_entries_buf, sizeof(max_entries_buf), "%d", (int)max_entries);
const char *marker_key = (marker.empty() ? "" : "marker");
rgw_http_param_pair pairs[] = { { "type", "metadata" },
{ "id", buf },
{ "period", period.c_str() },
{ "max-entries", max_entries_buf },
{ marker_key, marker.c_str() },
{ NULL, NULL } };
string p = "/admin/log/";
http_op = new RGWRESTReadResource(conn, p, pairs, NULL, sync_env->http_manager);
init_new_io(http_op);
int ret = http_op->aio_read(dpp);
if (ret < 0) {
ldpp_dout(dpp, 0) << "ERROR: failed to read from " << p << dendl;
log_error() << "failed to send http operation: " << http_op->to_str() << " ret=" << ret << std::endl;
http_op->put();
return ret;
}
return 0;
}
int request_complete() override {
int ret = http_op->wait(result, null_yield);
http_op->put();
if (ret < 0 && ret != -ENOENT) {
ldpp_dout(sync_env->dpp, 0) << "ERROR: failed to list remote mdlog shard, ret=" << ret << dendl;
return ret;
}
return 0;
}
};
RGWCoroutine* create_list_remote_mdlog_shard_cr(RGWMetaSyncEnv *env,
const std::string& period,
int shard_id,
const std::string& marker,
uint32_t max_entries,
rgw_mdlog_shard_data *result)
{
return new RGWListRemoteMDLogShardCR(env, period, shard_id, marker,
max_entries, result);
}
bool RGWReadRemoteMDLogInfoCR::spawn_next() {
if (shard_id >= num_shards) {
return false;
}
spawn(new RGWReadRemoteMDLogShardInfoCR(sync_env, period, shard_id, &(*mdlog_info)[shard_id]), false);
shard_id++;
return true;
}
bool RGWListRemoteMDLogCR::spawn_next() {
if (iter == shards.end()) {
return false;
}
spawn(new RGWListRemoteMDLogShardCR(sync_env, period, iter->first, iter->second, max_entries_per_shard, &(*result)[iter->first]), false);
++iter;
return true;
}
class RGWInitSyncStatusCoroutine : public RGWCoroutine {
RGWMetaSyncEnv *sync_env;
rgw_meta_sync_info status;
vector<RGWMetadataLogInfo> shards_info;
boost::intrusive_ptr<RGWContinuousLeaseCR> lease_cr;
boost::intrusive_ptr<RGWCoroutinesStack> lease_stack;
public:
RGWInitSyncStatusCoroutine(RGWMetaSyncEnv *_sync_env,
const rgw_meta_sync_info &status)
: RGWCoroutine(_sync_env->store->ctx()), sync_env(_sync_env),
status(status), shards_info(status.num_shards),
lease_cr(nullptr), lease_stack(nullptr) {}
~RGWInitSyncStatusCoroutine() override {
if (lease_cr) {
lease_cr->abort();
}
}
int operate(const DoutPrefixProvider *dpp) override {
int ret;
reenter(this) {
yield {
set_status("acquiring sync lock");
uint32_t lock_duration = cct->_conf->rgw_sync_lease_period;
string lock_name = "sync_lock";
rgw::sal::RadosStore* store = sync_env->store;
lease_cr.reset(new RGWContinuousLeaseCR(sync_env->async_rados, store,
rgw_raw_obj(store->svc()->zone->get_zone_params().log_pool, sync_env->status_oid()),
lock_name, lock_duration, this, nullptr));
lease_stack.reset(spawn(lease_cr.get(), false));
}
while (!lease_cr->is_locked()) {
if (lease_cr->is_done()) {
ldpp_dout(dpp, 5) << "failed to take lease" << dendl;
set_status("lease lock failed, early abort");
return set_cr_error(lease_cr->get_ret_status());
}
set_sleeping(true);
yield;
}
yield {
set_status("writing sync status");
rgw::sal::RadosStore* store = sync_env->store;
call(new RGWSimpleRadosWriteCR<rgw_meta_sync_info>(dpp, store,
rgw_raw_obj(store->svc()->zone->get_zone_params().log_pool, sync_env->status_oid()),
status));
}
if (retcode < 0) {
set_status("failed to write sync status");
ldpp_dout(dpp, 0) << "ERROR: failed to write sync status, retcode=" << retcode << dendl;
yield lease_cr->go_down();
return set_cr_error(retcode);
}
/* fetch current position in logs */
set_status("fetching remote log position");
yield {
for (int i = 0; i < (int)status.num_shards; i++) {
spawn(new RGWReadRemoteMDLogShardInfoCR(sync_env, status.period, i,
&shards_info[i]), false);
}
}
drain_all_but_stack(lease_stack.get()); /* the lease cr still needs to run */
yield {
set_status("updating sync status");
for (int i = 0; i < (int)status.num_shards; i++) {
rgw_meta_sync_marker marker;
RGWMetadataLogInfo& info = shards_info[i];
marker.next_step_marker = info.marker;
marker.timestamp = info.last_update;
rgw::sal::RadosStore* store = sync_env->store;
spawn(new RGWSimpleRadosWriteCR<rgw_meta_sync_marker>(dpp,
store,
rgw_raw_obj(store->svc()->zone->get_zone_params().log_pool, sync_env->shard_obj_name(i)),
marker), true);
}
}
yield {
set_status("changing sync state: build full sync maps");
status.state = rgw_meta_sync_info::StateBuildingFullSyncMaps;
rgw::sal::RadosStore* store = sync_env->store;
call(new RGWSimpleRadosWriteCR<rgw_meta_sync_info>(dpp, store,
rgw_raw_obj(store->svc()->zone->get_zone_params().log_pool, sync_env->status_oid()),
status));
}
set_status("drop lock lease");
yield lease_cr->go_down();
while (collect(&ret, NULL)) {
if (ret < 0) {
return set_cr_error(ret);
}
yield;
}
drain_all();
return set_cr_done();
}
return 0;
}
};
class RGWReadSyncStatusMarkersCR : public RGWShardCollectCR {
static constexpr int MAX_CONCURRENT_SHARDS = 16;
RGWMetaSyncEnv *env;
const int num_shards;
int shard_id{0};
map<uint32_t, rgw_meta_sync_marker>& markers;
int handle_result(int r) override {
if (r == -ENOENT) { // ENOENT is not a fatal error
return 0;
}
if (r < 0) {
ldout(cct, 4) << "failed to read metadata sync markers: "
<< cpp_strerror(r) << dendl;
}
return r;
}
public:
RGWReadSyncStatusMarkersCR(RGWMetaSyncEnv *env, int num_shards,
map<uint32_t, rgw_meta_sync_marker>& markers)
: RGWShardCollectCR(env->cct, MAX_CONCURRENT_SHARDS),
env(env), num_shards(num_shards), markers(markers)
{}
bool spawn_next() override;
};
bool RGWReadSyncStatusMarkersCR::spawn_next()
{
if (shard_id >= num_shards) {
return false;
}
using CR = RGWSimpleRadosReadCR<rgw_meta_sync_marker>;
rgw_raw_obj obj{env->store->svc()->zone->get_zone_params().log_pool,
env->shard_obj_name(shard_id)};
spawn(new CR(env->dpp, env->store, obj, &markers[shard_id]), false);
shard_id++;
return true;
}
class RGWReadSyncStatusCoroutine : public RGWCoroutine {
RGWMetaSyncEnv *sync_env;
rgw_meta_sync_status *sync_status;
public:
RGWReadSyncStatusCoroutine(RGWMetaSyncEnv *_sync_env,
rgw_meta_sync_status *_status)
: RGWCoroutine(_sync_env->cct), sync_env(_sync_env), sync_status(_status)
{}
int operate(const DoutPrefixProvider *dpp) override;
};
int RGWReadSyncStatusCoroutine::operate(const DoutPrefixProvider *dpp)
{
reenter(this) {
// read sync info
using ReadInfoCR = RGWSimpleRadosReadCR<rgw_meta_sync_info>;
yield {
bool empty_on_enoent = false; // fail on ENOENT
rgw_raw_obj obj{sync_env->store->svc()->zone->get_zone_params().log_pool,
sync_env->status_oid()};
call(new ReadInfoCR(dpp, sync_env->store, obj,
&sync_status->sync_info, empty_on_enoent));
}
if (retcode < 0) {
ldpp_dout(dpp, 4) << "failed to read sync status info with "
<< cpp_strerror(retcode) << dendl;
return set_cr_error(retcode);
}
// read shard markers
using ReadMarkersCR = RGWReadSyncStatusMarkersCR;
yield call(new ReadMarkersCR(sync_env, sync_status->sync_info.num_shards,
sync_status->sync_markers));
if (retcode < 0) {
ldpp_dout(dpp, 4) << "failed to read sync status markers with "
<< cpp_strerror(retcode) << dendl;
return set_cr_error(retcode);
}
return set_cr_done();
}
return 0;
}
class RGWFetchAllMetaCR : public RGWCoroutine {
RGWMetaSyncEnv *sync_env;
int num_shards;
int ret_status;
list<string> sections;
list<string>::iterator sections_iter;
struct meta_list_result {
list<string> keys;
string marker;
uint64_t count{0};
bool truncated{false};
void decode_json(JSONObj *obj) {
JSONDecoder::decode_json("keys", keys, obj);
JSONDecoder::decode_json("marker", marker, obj);
JSONDecoder::decode_json("count", count, obj);
JSONDecoder::decode_json("truncated", truncated, obj);
}
} result;
list<string>::iterator iter;
std::unique_ptr<RGWShardedOmapCRManager> entries_index;
boost::intrusive_ptr<RGWContinuousLeaseCR> lease_cr;
boost::intrusive_ptr<RGWCoroutinesStack> lease_stack;
bool lost_lock;
bool failed;
string marker;
map<uint32_t, rgw_meta_sync_marker>& markers;
RGWSyncTraceNodeRef tn;
public:
RGWFetchAllMetaCR(RGWMetaSyncEnv *_sync_env, int _num_shards,
map<uint32_t, rgw_meta_sync_marker>& _markers,
RGWSyncTraceNodeRef& _tn_parent) : RGWCoroutine(_sync_env->cct), sync_env(_sync_env),
num_shards(_num_shards),
ret_status(0), lease_cr(nullptr), lease_stack(nullptr),
lost_lock(false), failed(false), markers(_markers) {
tn = sync_env->sync_tracer->add_node(_tn_parent, "fetch_all_meta");
}
~RGWFetchAllMetaCR() override {
}
void append_section_from_set(set<string>& all_sections, const string& name) {
set<string>::iterator iter = all_sections.find(name);
if (iter != all_sections.end()) {
sections.emplace_back(std::move(*iter));
all_sections.erase(iter);
}
}
/*
* meta sync should go in the following order: user, bucket.instance, bucket
* then whatever other sections exist (if any)
*/
void rearrange_sections() {
set<string> all_sections;
std::move(sections.begin(), sections.end(),
std::inserter(all_sections, all_sections.end()));
sections.clear();
append_section_from_set(all_sections, "user");
append_section_from_set(all_sections, "bucket.instance");
append_section_from_set(all_sections, "bucket");
append_section_from_set(all_sections, "roles");
std::move(all_sections.begin(), all_sections.end(),
std::back_inserter(sections));
}
int operate(const DoutPrefixProvider *dpp) override {
RGWRESTConn *conn = sync_env->conn;
reenter(this) {
yield {
set_status(string("acquiring lock (") + sync_env->status_oid() + ")");
uint32_t lock_duration = cct->_conf->rgw_sync_lease_period;
string lock_name = "sync_lock";
lease_cr.reset(new RGWContinuousLeaseCR(sync_env->async_rados,
sync_env->store,
rgw_raw_obj(sync_env->store->svc()->zone->get_zone_params().log_pool, sync_env->status_oid()),
lock_name, lock_duration, this, nullptr));
lease_stack.reset(spawn(lease_cr.get(), false));
}
while (!lease_cr->is_locked()) {
if (lease_cr->is_done()) {
ldpp_dout(dpp, 5) << "failed to take lease" << dendl;
set_status("lease lock failed, early abort");
return set_cr_error(lease_cr->get_ret_status());
}
set_sleeping(true);
yield;
}
entries_index.reset(new RGWShardedOmapCRManager(sync_env->async_rados, sync_env->store, this, num_shards,
sync_env->store->svc()->zone->get_zone_params().log_pool,
mdlog_sync_full_sync_index_prefix));
yield {
call(new RGWReadRESTResourceCR<list<string> >(cct, conn, sync_env->http_manager,
"/admin/metadata", NULL, §ions));
}
if (get_ret_status() < 0) {
ldpp_dout(dpp, 0) << "ERROR: failed to fetch metadata sections" << dendl;
yield entries_index->finish();
yield lease_cr->go_down();
drain_all();
return set_cr_error(get_ret_status());
}
rearrange_sections();
sections_iter = sections.begin();
for (; sections_iter != sections.end(); ++sections_iter) {
do {
yield {
#define META_FULL_SYNC_CHUNK_SIZE "1000"
string entrypoint = string("/admin/metadata/") + *sections_iter;
rgw_http_param_pair pairs[] = { { "max-entries", META_FULL_SYNC_CHUNK_SIZE },
{ "marker", result.marker.c_str() },
{ NULL, NULL } };
result.keys.clear();
call(new RGWReadRESTResourceCR<meta_list_result >(cct, conn, sync_env->http_manager,
entrypoint, pairs, &result));
}
ret_status = get_ret_status();
if (ret_status == -ENOENT) {
set_retcode(0); /* reset coroutine status so that we don't return it */
ret_status = 0;
}
if (ret_status < 0) {
tn->log(0, SSTR("ERROR: failed to fetch metadata section: " << *sections_iter));
yield entries_index->finish();
yield lease_cr->go_down();
drain_all();
return set_cr_error(ret_status);
}
iter = result.keys.begin();
for (; iter != result.keys.end(); ++iter) {
if (!lease_cr->is_locked()) {
lost_lock = true;
tn->log(1, "lease is lost, abort");
break;
}
yield; // allow entries_index consumer to make progress
tn->log(20, SSTR("list metadata: section=" << *sections_iter << " key=" << *iter));
string s = *sections_iter + ":" + *iter;
int shard_id;
rgw::sal::RadosStore* store = sync_env->store;
int ret = store->ctl()->meta.mgr->get_shard_id(*sections_iter, *iter, &shard_id);
if (ret < 0) {
tn->log(0, SSTR("ERROR: could not determine shard id for " << *sections_iter << ":" << *iter));
ret_status = ret;
break;
}
if (!entries_index->append(s, shard_id)) {
break;
}
}
} while (result.truncated);
}
yield {
if (!entries_index->finish()) {
failed = true;
}
}
if (!failed) {
for (map<uint32_t, rgw_meta_sync_marker>::iterator iter = markers.begin(); iter != markers.end(); ++iter) {
int shard_id = (int)iter->first;
rgw_meta_sync_marker& marker = iter->second;
marker.total_entries = entries_index->get_total_entries(shard_id);
spawn(new RGWSimpleRadosWriteCR<rgw_meta_sync_marker>(dpp, sync_env->store,
rgw_raw_obj(sync_env->store->svc()->zone->get_zone_params().log_pool, sync_env->shard_obj_name(shard_id)),
marker), true);
}
}
drain_all_but_stack(lease_stack.get()); /* the lease cr still needs to run */
yield lease_cr->go_down();
int ret;
while (collect(&ret, NULL)) {
if (ret < 0) {
return set_cr_error(ret);
}
yield;
}
drain_all();
if (failed) {
yield return set_cr_error(-EIO);
}
if (lost_lock) {
yield return set_cr_error(-EBUSY);
}
if (ret_status < 0) {
yield return set_cr_error(ret_status);
}
yield return set_cr_done();
}
return 0;
}
};
static string full_sync_index_shard_oid(int shard_id)
{
char buf[mdlog_sync_full_sync_index_prefix.size() + 16];
snprintf(buf, sizeof(buf), "%s.%d", mdlog_sync_full_sync_index_prefix.c_str(), shard_id);
return string(buf);
}
class RGWReadRemoteMetadataCR : public RGWCoroutine {
RGWMetaSyncEnv *sync_env;
RGWRESTReadResource *http_op;
string section;
string key;
bufferlist *pbl;
RGWSyncTraceNodeRef tn;
public:
RGWReadRemoteMetadataCR(RGWMetaSyncEnv *_sync_env,
const string& _section, const string& _key, bufferlist *_pbl,
const RGWSyncTraceNodeRef& _tn_parent) : RGWCoroutine(_sync_env->cct), sync_env(_sync_env),
http_op(NULL),
section(_section),
key(_key),
pbl(_pbl) {
tn = sync_env->sync_tracer->add_node(_tn_parent, "read_remote_meta",
section + ":" + key);
}
int operate(const DoutPrefixProvider *dpp) override {
RGWRESTConn *conn = sync_env->conn;
reenter(this) {
yield {
string key_encode;
url_encode(key, key_encode);
rgw_http_param_pair pairs[] = { { "key" , key.c_str()},
{ NULL, NULL } };
string p = string("/admin/metadata/") + section + "/" + key_encode;
http_op = new RGWRESTReadResource(conn, p, pairs, NULL, sync_env->http_manager);
init_new_io(http_op);
int ret = http_op->aio_read(dpp);
if (ret < 0) {
ldpp_dout(dpp, 0) << "ERROR: failed to fetch mdlog data" << dendl;
log_error() << "failed to send http operation: " << http_op->to_str() << " ret=" << ret << std::endl;
http_op->put();
return set_cr_error(ret);
}
return io_block(0);
}
yield {
int ret = http_op->wait(pbl, null_yield);
http_op->put();
if (ret < 0) {
return set_cr_error(ret);
}
return set_cr_done();
}
}
return 0;
}
};
class RGWAsyncMetaStoreEntry : public RGWAsyncRadosRequest {
rgw::sal::RadosStore* store;
string raw_key;
bufferlist bl;
const DoutPrefixProvider *dpp;
protected:
int _send_request(const DoutPrefixProvider *dpp) override {
int ret = store->ctl()->meta.mgr->put(raw_key, bl, null_yield, dpp, RGWMDLogSyncType::APPLY_ALWAYS, true);
if (ret < 0) {
ldpp_dout(dpp, 0) << "ERROR: can't store key: " << raw_key << " ret=" << ret << dendl;
return ret;
}
return 0;
}
public:
RGWAsyncMetaStoreEntry(RGWCoroutine *caller, RGWAioCompletionNotifier *cn, rgw::sal::RadosStore* _store,
const string& _raw_key,
bufferlist& _bl,
const DoutPrefixProvider *dpp) : RGWAsyncRadosRequest(caller, cn), store(_store),
raw_key(_raw_key), bl(_bl), dpp(dpp) {}
};
class RGWMetaStoreEntryCR : public RGWSimpleCoroutine {
RGWMetaSyncEnv *sync_env;
string raw_key;
bufferlist bl;
RGWAsyncMetaStoreEntry *req;
public:
RGWMetaStoreEntryCR(RGWMetaSyncEnv *_sync_env,
const string& _raw_key,
bufferlist& _bl) : RGWSimpleCoroutine(_sync_env->cct), sync_env(_sync_env),
raw_key(_raw_key), bl(_bl), req(NULL) {
}
~RGWMetaStoreEntryCR() override {
if (req) {
req->finish();
}
}
int send_request(const DoutPrefixProvider *dpp) override {
req = new RGWAsyncMetaStoreEntry(this, stack->create_completion_notifier(),
sync_env->store, raw_key, bl, dpp);
sync_env->async_rados->queue(req);
return 0;
}
int request_complete() override {
return req->get_ret_status();
}
};
class RGWAsyncMetaRemoveEntry : public RGWAsyncRadosRequest {
rgw::sal::RadosStore* store;
string raw_key;
const DoutPrefixProvider *dpp;
protected:
int _send_request(const DoutPrefixProvider *dpp) override {
int ret = store->ctl()->meta.mgr->remove(raw_key, null_yield, dpp);
if (ret < 0) {
ldpp_dout(dpp, 0) << "ERROR: can't remove key: " << raw_key << " ret=" << ret << dendl;
return ret;
}
return 0;
}
public:
RGWAsyncMetaRemoveEntry(RGWCoroutine *caller, RGWAioCompletionNotifier *cn, rgw::sal::RadosStore* _store,
const string& _raw_key, const DoutPrefixProvider *dpp) : RGWAsyncRadosRequest(caller, cn), store(_store),
raw_key(_raw_key), dpp(dpp) {}
};
class RGWMetaRemoveEntryCR : public RGWSimpleCoroutine {
RGWMetaSyncEnv *sync_env;
string raw_key;
RGWAsyncMetaRemoveEntry *req;
public:
RGWMetaRemoveEntryCR(RGWMetaSyncEnv *_sync_env,
const string& _raw_key) : RGWSimpleCoroutine(_sync_env->cct), sync_env(_sync_env),
raw_key(_raw_key), req(NULL) {
}
~RGWMetaRemoveEntryCR() override {
if (req) {
req->finish();
}
}
int send_request(const DoutPrefixProvider *dpp) override {
req = new RGWAsyncMetaRemoveEntry(this, stack->create_completion_notifier(),
sync_env->store, raw_key, dpp);
sync_env->async_rados->queue(req);
return 0;
}
int request_complete() override {
int r = req->get_ret_status();
if (r == -ENOENT) {
r = 0;
}
return r;
}
};
#define META_SYNC_UPDATE_MARKER_WINDOW 10
int RGWLastCallerWinsCR::operate(const DoutPrefixProvider *dpp) {
RGWCoroutine *call_cr;
reenter(this) {
while (cr) {
call_cr = cr;
cr = nullptr;
yield call(call_cr);
/* cr might have been modified at this point */
if (retcode < 0) {
ldpp_dout(dpp, 0) << "ERROR: RGWLastCallerWinsCR() failed: retcode=" << retcode << dendl;
return set_cr_error(retcode);
}
}
return set_cr_done();
}
return 0;
}
class RGWMetaSyncShardMarkerTrack : public RGWSyncShardMarkerTrack<string, string> {
RGWMetaSyncEnv *sync_env;
string marker_oid;
rgw_meta_sync_marker sync_marker;
RGWSyncTraceNodeRef tn;
public:
RGWMetaSyncShardMarkerTrack(RGWMetaSyncEnv *_sync_env,
const string& _marker_oid,
const rgw_meta_sync_marker& _marker,
RGWSyncTraceNodeRef& _tn) : RGWSyncShardMarkerTrack(META_SYNC_UPDATE_MARKER_WINDOW),
sync_env(_sync_env),
marker_oid(_marker_oid),
sync_marker(_marker),
tn(_tn){}
RGWCoroutine *store_marker(const string& new_marker, uint64_t index_pos, const real_time& timestamp) override {
sync_marker.marker = new_marker;
if (index_pos > 0) {
sync_marker.pos = index_pos;
}
if (!real_clock::is_zero(timestamp)) {
sync_marker.timestamp = timestamp;
}
ldpp_dout(sync_env->dpp, 20) << __func__ << "(): updating marker marker_oid=" << marker_oid << " marker=" << new_marker << " realm_epoch=" << sync_marker.realm_epoch << dendl;
tn->log(20, SSTR("new marker=" << new_marker));
rgw::sal::RadosStore* store = sync_env->store;
return new RGWSimpleRadosWriteCR<rgw_meta_sync_marker>(sync_env->dpp, store,
rgw_raw_obj(store->svc()->zone->get_zone_params().log_pool, marker_oid),
sync_marker);
}
RGWOrderCallCR *allocate_order_control_cr() override {
return new RGWLastCallerWinsCR(sync_env->cct);
}
};
RGWMetaSyncSingleEntryCR::RGWMetaSyncSingleEntryCR(RGWMetaSyncEnv *_sync_env,
const string& _raw_key, const string& _entry_marker,
const RGWMDLogStatus& _op_status,
RGWMetaSyncShardMarkerTrack *_marker_tracker, const RGWSyncTraceNodeRef& _tn_parent) : RGWCoroutine(_sync_env->cct),
sync_env(_sync_env),
raw_key(_raw_key), entry_marker(_entry_marker),
op_status(_op_status),
pos(0), sync_status(0),
marker_tracker(_marker_tracker), tries(0) {
error_injection = (sync_env->cct->_conf->rgw_sync_meta_inject_err_probability > 0);
tn = sync_env->sync_tracer->add_node(_tn_parent, "entry", raw_key);
}
int RGWMetaSyncSingleEntryCR::operate(const DoutPrefixProvider *dpp) {
reenter(this) {
#define NUM_TRANSIENT_ERROR_RETRIES 10
if (error_injection &&
rand() % 10000 < cct->_conf->rgw_sync_meta_inject_err_probability * 10000.0) {
return set_cr_error(-EIO);
}
if (op_status != MDLOG_STATUS_COMPLETE) {
tn->log(20, "skipping pending operation");
yield call(marker_tracker->finish(entry_marker));
if (retcode < 0) {
return set_cr_error(retcode);
}
return set_cr_done();
}
tn->set_flag(RGW_SNS_FLAG_ACTIVE);
for (tries = 0; tries < NUM_TRANSIENT_ERROR_RETRIES; tries++) {
yield {
pos = raw_key.find(':');
section = raw_key.substr(0, pos);
key = raw_key.substr(pos + 1);
tn->log(10, SSTR("fetching remote metadata entry" << (tries == 0 ? "" : " (retry)")));
call(new RGWReadRemoteMetadataCR(sync_env, section, key, &md_bl, tn));
}
sync_status = retcode;
if (sync_status == -ENOENT) {
break;
}
if (sync_status < 0) {
if (tries < NUM_TRANSIENT_ERROR_RETRIES - 1) {
ldpp_dout(dpp, 20) << *this << ": failed to fetch remote metadata entry: " << section << ":" << key << ", will retry" << dendl;
continue;
}
tn->log(10, SSTR("failed to read remote metadata entry: section=" << section << " key=" << key << " status=" << sync_status));
log_error() << "failed to read remote metadata entry: section=" << section << " key=" << key << " status=" << sync_status << std::endl;
yield call(sync_env->error_logger->log_error_cr(dpp, sync_env->conn->get_remote_id(), section, key, -sync_status,
string("failed to read remote metadata entry: ") + cpp_strerror(-sync_status)));
return set_cr_error(sync_status);
}
break;
}
retcode = 0;
for (tries = 0; tries < NUM_TRANSIENT_ERROR_RETRIES; tries++) {
if (sync_status != -ENOENT) {
tn->log(10, SSTR("storing local metadata entry: " << section << ":" << key));
yield call(new RGWMetaStoreEntryCR(sync_env, raw_key, md_bl));
} else {
tn->log(10, SSTR("removing local metadata entry:" << section << ":" << key));
yield call(new RGWMetaRemoveEntryCR(sync_env, raw_key));
if (retcode == -ENOENT) {
retcode = 0;
break;
}
}
if ((retcode < 0) && (tries < NUM_TRANSIENT_ERROR_RETRIES - 1)) {
ldpp_dout(dpp, 20) << *this << ": failed to store metadata entry: " << section << ":" << key << ", got retcode=" << retcode << ", will retry" << dendl;
continue;
}
break;
}
sync_status = retcode;
if (sync_status == 0 && marker_tracker) {
/* update marker */
yield call(marker_tracker->finish(entry_marker));
sync_status = retcode;
}
if (sync_status < 0) {
tn->log(10, SSTR("failed, status=" << sync_status));
return set_cr_error(sync_status);
}
tn->log(10, "success");
return set_cr_done();
}
return 0;
}
class RGWCloneMetaLogCoroutine : public RGWCoroutine {
RGWMetaSyncEnv *sync_env;
RGWMetadataLog *mdlog;
const std::string& period;
int shard_id;
string marker;
bool truncated = false;
string *new_marker;
int max_entries = CLONE_MAX_ENTRIES;
RGWRESTReadResource *http_op = nullptr;
boost::intrusive_ptr<RGWMetadataLogInfoCompletion> completion;
RGWMetadataLogInfo shard_info;
rgw_mdlog_shard_data data;
public:
RGWCloneMetaLogCoroutine(RGWMetaSyncEnv *_sync_env, RGWMetadataLog* mdlog,
const std::string& period, int _id,
const string& _marker, string *_new_marker)
: RGWCoroutine(_sync_env->cct), sync_env(_sync_env), mdlog(mdlog),
period(period), shard_id(_id), marker(_marker), new_marker(_new_marker) {
if (new_marker) {
*new_marker = marker;
}
}
~RGWCloneMetaLogCoroutine() override {
if (http_op) {
http_op->put();
}
if (completion) {
completion->cancel();
}
}
int operate(const DoutPrefixProvider *dpp) override;
int state_init();
int state_read_shard_status();
int state_read_shard_status_complete();
int state_send_rest_request(const DoutPrefixProvider *dpp);
int state_receive_rest_response();
int state_store_mdlog_entries();
int state_store_mdlog_entries_complete();
};
class RGWMetaSyncShardCR : public RGWCoroutine {
RGWMetaSyncEnv *sync_env;
const rgw_pool& pool;
const std::string& period; //< currently syncing period id
const epoch_t realm_epoch; //< realm_epoch of period
RGWMetadataLog* mdlog; //< log of syncing period
uint32_t shard_id;
rgw_meta_sync_marker& sync_marker;
boost::optional<rgw_meta_sync_marker> temp_marker; //< for pending updates
string marker;
string max_marker;
const std::string& period_marker; //< max marker stored in next period
RGWRadosGetOmapKeysCR::ResultPtr omapkeys;
std::set<std::string> entries;
std::set<std::string>::iterator iter;
string oid;
RGWMetaSyncShardMarkerTrack *marker_tracker = nullptr;
list<cls_log_entry> log_entries;
list<cls_log_entry>::iterator log_iter;
bool truncated = false;
string mdlog_marker;
string raw_key;
rgw_mdlog_entry mdlog_entry;
ceph::mutex inc_lock = ceph::make_mutex("RGWMetaSyncShardCR::inc_lock");
ceph::condition_variable inc_cond;
boost::asio::coroutine incremental_cr;
boost::asio::coroutine full_cr;
boost::intrusive_ptr<RGWContinuousLeaseCR> lease_cr;
boost::intrusive_ptr<RGWCoroutinesStack> lease_stack;
bool lost_lock = false;
bool lost_bid = false;
bool *reset_backoff;
// hold a reference to the cr stack while it's in the map
using StackRef = boost::intrusive_ptr<RGWCoroutinesStack>;
map<StackRef, string> stack_to_pos;
map<string, string> pos_to_prev;
bool can_adjust_marker = false;
bool done_with_period = false;
int total_entries = 0;
RGWSyncTraceNodeRef tn;
public:
RGWMetaSyncShardCR(RGWMetaSyncEnv *_sync_env, const rgw_pool& _pool,
const std::string& period, epoch_t realm_epoch,
RGWMetadataLog* mdlog, uint32_t _shard_id,
rgw_meta_sync_marker& _marker,
const std::string& period_marker, bool *_reset_backoff,
RGWSyncTraceNodeRef& _tn)
: RGWCoroutine(_sync_env->cct), sync_env(_sync_env), pool(_pool),
period(period), realm_epoch(realm_epoch), mdlog(mdlog),
shard_id(_shard_id), sync_marker(_marker),
period_marker(period_marker),
reset_backoff(_reset_backoff), tn(_tn) {
*reset_backoff = false;
}
~RGWMetaSyncShardCR() override {
delete marker_tracker;
if (lease_cr) {
lease_cr->abort();
}
}
void set_marker_tracker(RGWMetaSyncShardMarkerTrack *mt) {
delete marker_tracker;
marker_tracker = mt;
}
int operate(const DoutPrefixProvider *dpp) override {
int r;
while (true) {
switch (sync_marker.state) {
case rgw_meta_sync_marker::FullSync:
r = full_sync();
if (r < 0) {
ldpp_dout(dpp, 10) << "sync: full_sync: shard_id=" << shard_id << " r=" << r << dendl;
return set_cr_error(r);
}
return 0;
case rgw_meta_sync_marker::IncrementalSync:
r = incremental_sync();
if (r < 0) {
ldpp_dout(dpp, 10) << "sync: incremental_sync: shard_id=" << shard_id << " r=" << r << dendl;
return set_cr_error(r);
}
return 0;
} // switch
} // while (true)
/* unreachable */
return 0;
} // RGWMetaSyncShardCR::operate()
void collect_children()
{
int child_ret;
RGWCoroutinesStack *child;
while (collect_next(&child_ret, &child)) {
auto iter = stack_to_pos.find(child);
if (iter == stack_to_pos.end()) {
/* some other stack that we don't care about */
continue;
}
string& pos = iter->second;
if (child_ret < 0) {
ldpp_dout(sync_env->dpp, 0) << *this << ": child operation stack=" << child << " entry=" << pos << " returned " << child_ret << dendl;
// on any error code from RGWMetaSyncSingleEntryCR, we do not advance
// the sync status marker past this entry, and set
// can_adjust_marker=false to exit out of RGWMetaSyncShardCR.
// RGWMetaSyncShardControlCR will rerun RGWMetaSyncShardCR from the
// previous marker and retry
can_adjust_marker = false;
}
map<string, string>::iterator prev_iter = pos_to_prev.find(pos);
ceph_assert(prev_iter != pos_to_prev.end());
if (pos_to_prev.size() == 1) {
if (can_adjust_marker) {
sync_marker.marker = pos;
}
pos_to_prev.erase(prev_iter);
} else {
ceph_assert(pos_to_prev.size() > 1);
pos_to_prev.erase(prev_iter);
prev_iter = pos_to_prev.begin();
if (can_adjust_marker) {
sync_marker.marker = prev_iter->second;
}
}
ldpp_dout(sync_env->dpp, 4) << *this << ": adjusting marker pos=" << sync_marker.marker << dendl;
stack_to_pos.erase(iter);
}
}
int full_sync() {
#define OMAP_GET_MAX_ENTRIES 100
int max_entries = OMAP_GET_MAX_ENTRIES;
reenter(&full_cr) {
set_status("full_sync");
tn->log(10, "start full sync");
oid = full_sync_index_shard_oid(shard_id);
can_adjust_marker = true;
/* grab lock */
if (!sync_env->bid_manager->is_highest_bidder(shard_id)) {
tn->log(10, "not the highest bidder");
return -EBUSY;
}
yield {
uint32_t lock_duration = cct->_conf->rgw_sync_lease_period;
string lock_name = "sync_lock";
rgw::sal::RadosStore* store = sync_env->store;
lease_cr.reset(new RGWContinuousLeaseCR(sync_env->async_rados, store,
rgw_raw_obj(pool, sync_env->shard_obj_name(shard_id)),
lock_name, lock_duration, this, nullptr));
lease_stack.reset(spawn(lease_cr.get(), false));
lost_lock = false;
}
while (!lease_cr->is_locked()) {
if (lease_cr->is_done()) {
drain_all();
tn->log(5, "failed to take lease");
return lease_cr->get_ret_status();
}
set_sleeping(true);
yield;
}
tn->log(10, "took lease");
/* lock succeeded, a retry now should avoid previous backoff status */
*reset_backoff = true;
/* prepare marker tracker */
set_marker_tracker(new RGWMetaSyncShardMarkerTrack(sync_env,
sync_env->shard_obj_name(shard_id),
sync_marker, tn));
marker = sync_marker.marker;
total_entries = sync_marker.pos;
/* sync! */
do {
if (!lease_cr->is_locked()) {
tn->log(1, "lease is lost, abort");
lost_lock = true;
break;
}
if (!sync_env->bid_manager->is_highest_bidder(shard_id)) {
tn->log(1, "lost bid");
lost_bid = true;
break;
}
omapkeys = std::make_shared<RGWRadosGetOmapKeysCR::Result>();
yield call(new RGWRadosGetOmapKeysCR(sync_env->store, rgw_raw_obj(pool, oid),
marker, max_entries, omapkeys));
if (retcode < 0) {
ldpp_dout(sync_env->dpp, 0) << "ERROR: " << __func__ << "(): RGWRadosGetOmapKeysCR() returned ret=" << retcode << dendl;
tn->log(0, SSTR("ERROR: failed to list omap keys, status=" << retcode));
yield lease_cr->go_down();
drain_all();
return retcode;
}
entries = std::move(omapkeys->entries);
tn->log(20, SSTR("retrieved " << entries.size() << " entries to sync"));
if (entries.size() > 0) {
tn->set_flag(RGW_SNS_FLAG_ACTIVE); /* actually have entries to sync */
}
iter = entries.begin();
for (; iter != entries.end(); ++iter) {
marker = *iter;
tn->log(20, SSTR("full sync: " << marker));
total_entries++;
if (!marker_tracker->start(marker, total_entries, real_time())) {
tn->log(0, SSTR("ERROR: cannot start syncing " << marker << ". Duplicate entry?"));
} else {
// fetch remote and write locally
yield {
RGWCoroutinesStack *stack = spawn(new RGWMetaSyncSingleEntryCR(sync_env, marker, marker, MDLOG_STATUS_COMPLETE, marker_tracker, tn), false);
// stack_to_pos holds a reference to the stack
stack_to_pos[stack] = marker;
pos_to_prev[marker] = marker;
}
// limit spawn window
while (num_spawned() > static_cast<size_t>(cct->_conf->rgw_meta_sync_spawn_window)) {
yield wait_for_child();
collect_children();
}
}
}
collect_children();
} while (omapkeys->more && can_adjust_marker);
tn->unset_flag(RGW_SNS_FLAG_ACTIVE); /* actually have entries to sync */
while (num_spawned() > 1) {
yield wait_for_child();
collect_children();
}
if (lost_bid) {
yield call(marker_tracker->flush());
} else if (!lost_lock) {
/* update marker to reflect we're done with full sync */
if (can_adjust_marker) {
// apply updates to a temporary marker, or operate() will send us
// to incremental_sync() after we yield
temp_marker = sync_marker;
temp_marker->state = rgw_meta_sync_marker::IncrementalSync;
temp_marker->marker = std::move(temp_marker->next_step_marker);
temp_marker->next_step_marker.clear();
temp_marker->realm_epoch = realm_epoch;
ldpp_dout(sync_env->dpp, 4) << *this << ": saving marker pos=" << temp_marker->marker << " realm_epoch=" << realm_epoch << dendl;
using WriteMarkerCR = RGWSimpleRadosWriteCR<rgw_meta_sync_marker>;
yield call(new WriteMarkerCR(sync_env->dpp, sync_env->store,
rgw_raw_obj(pool, sync_env->shard_obj_name(shard_id)),
*temp_marker));
}
if (retcode < 0) {
ldpp_dout(sync_env->dpp, 0) << "ERROR: failed to set sync marker: retcode=" << retcode << dendl;
yield lease_cr->go_down();
drain_all();
return retcode;
}
// clean up full sync index
yield {
auto oid = full_sync_index_shard_oid(shard_id);
call(new RGWRadosRemoveCR(sync_env->store, {pool, oid}));
}
}
/*
* if we reached here, it means that lost_lock is true, otherwise the state
* change in the previous block will prevent us from reaching here
*/
yield lease_cr->go_down();
lease_cr.reset();
drain_all();
if (!can_adjust_marker) {
return -EAGAIN;
}
if (lost_lock || lost_bid) {
return -EBUSY;
}
tn->log(10, "full sync complete");
// apply the sync marker update
ceph_assert(temp_marker);
sync_marker = std::move(*temp_marker);
temp_marker = boost::none;
// must not yield after this point!
}
return 0;
}
int incremental_sync() {
reenter(&incremental_cr) {
set_status("incremental_sync");
tn->log(10, "start incremental sync");
can_adjust_marker = true;
/* grab lock */
if (!sync_env->bid_manager->is_highest_bidder(shard_id)) {
tn->log(10, "not the highest bidder");
return -EBUSY;
}
if (!lease_cr) { /* could have had a lease_cr lock from previous state */
yield {
uint32_t lock_duration = cct->_conf->rgw_sync_lease_period;
string lock_name = "sync_lock";
rgw::sal::RadosStore* store = sync_env->store;
lease_cr.reset( new RGWContinuousLeaseCR(sync_env->async_rados, store,
rgw_raw_obj(pool, sync_env->shard_obj_name(shard_id)),
lock_name, lock_duration, this, nullptr));
lease_stack.reset(spawn(lease_cr.get(), false));
lost_lock = false;
}
while (!lease_cr->is_locked()) {
if (lease_cr->is_done()) {
drain_all();
tn->log(5, "failed to take lease");
return lease_cr->get_ret_status();
}
set_sleeping(true);
yield;
}
}
tn->log(10, "took lease");
// if the period has advanced, we can't use the existing marker
if (sync_marker.realm_epoch < realm_epoch) {
ldpp_dout(sync_env->dpp, 4) << "clearing marker=" << sync_marker.marker
<< " from old realm_epoch=" << sync_marker.realm_epoch
<< " (now " << realm_epoch << ')' << dendl;
sync_marker.realm_epoch = realm_epoch;
sync_marker.marker.clear();
}
mdlog_marker = sync_marker.marker;
set_marker_tracker(new RGWMetaSyncShardMarkerTrack(sync_env,
sync_env->shard_obj_name(shard_id),
sync_marker, tn));
/*
* mdlog_marker: the remote sync marker positiion
* sync_marker: the local sync marker position
* max_marker: the max mdlog position that we fetched
* marker: the current position we try to sync
* period_marker: the last marker before the next period begins (optional)
*/
marker = max_marker = sync_marker.marker;
/* inc sync */
do {
if (!lease_cr->is_locked()) {
lost_lock = true;
tn->log(1, "lease is lost, abort");
break;
}
if (!sync_env->bid_manager->is_highest_bidder(shard_id)) {
tn->log(1, "lost bid");
lost_bid = true;
break;
}
#define INCREMENTAL_MAX_ENTRIES 100
ldpp_dout(sync_env->dpp, 20) << __func__ << ":" << __LINE__ << ": shard_id=" << shard_id << " mdlog_marker=" << mdlog_marker << " sync_marker.marker=" << sync_marker.marker << " period_marker=" << period_marker << " truncated=" << truncated << dendl;
if (!period_marker.empty() && period_marker <= mdlog_marker) {
tn->log(10, SSTR("finished syncing current period: mdlog_marker=" << mdlog_marker << " sync_marker=" << sync_marker.marker << " period_marker=" << period_marker));
done_with_period = true;
break;
}
if (mdlog_marker <= max_marker || !truncated) {
/* we're at the tip, try to bring more entries */
ldpp_dout(sync_env->dpp, 20) << __func__ << ":" << __LINE__ << ": shard_id=" << shard_id << " syncing mdlog for shard_id=" << shard_id << dendl;
yield call(new RGWCloneMetaLogCoroutine(sync_env, mdlog,
period, shard_id,
mdlog_marker, &mdlog_marker));
}
if (retcode < 0) {
tn->log(10, SSTR(*this << ": failed to fetch more log entries, retcode=" << retcode));
yield lease_cr->go_down();
drain_all();
*reset_backoff = false; // back off and try again later
return retcode;
}
truncated = true;
*reset_backoff = true; /* if we got to this point, all systems function */
if (mdlog_marker > max_marker) {
tn->set_flag(RGW_SNS_FLAG_ACTIVE); /* actually have entries to sync */
tn->log(20, SSTR("mdlog_marker=" << mdlog_marker << " sync_marker=" << sync_marker.marker));
marker = max_marker;
yield call(new RGWReadMDLogEntriesCR(sync_env, mdlog, shard_id,
&max_marker, INCREMENTAL_MAX_ENTRIES,
&log_entries, &truncated));
if (retcode < 0) {
tn->log(10, SSTR("failed to list mdlog entries, retcode=" << retcode));
yield lease_cr->go_down();
drain_all();
*reset_backoff = false; // back off and try again later
return retcode;
}
for (log_iter = log_entries.begin(); log_iter != log_entries.end() && !done_with_period; ++log_iter) {
if (!period_marker.empty() && period_marker <= log_iter->id) {
done_with_period = true;
if (period_marker < log_iter->id) {
tn->log(10, SSTR("found key=" << log_iter->id
<< " past period_marker=" << period_marker));
break;
}
ldpp_dout(sync_env->dpp, 10) << "found key at period_marker=" << period_marker << dendl;
// sync this entry, then return control to RGWMetaSyncCR
}
if (!mdlog_entry.convert_from(*log_iter)) {
tn->log(0, SSTR("ERROR: failed to convert mdlog entry, shard_id=" << shard_id << " log_entry: " << log_iter->id << ":" << log_iter->section << ":" << log_iter->name << ":" << log_iter->timestamp << " ... skipping entry"));
continue;
}
tn->log(20, SSTR("log_entry: " << log_iter->id << ":" << log_iter->section << ":" << log_iter->name << ":" << log_iter->timestamp));
if (!marker_tracker->start(log_iter->id, 0, log_iter->timestamp.to_real_time())) {
ldpp_dout(sync_env->dpp, 0) << "ERROR: cannot start syncing " << log_iter->id << ". Duplicate entry?" << dendl;
} else {
raw_key = log_iter->section + ":" + log_iter->name;
yield {
RGWCoroutinesStack *stack = spawn(new RGWMetaSyncSingleEntryCR(sync_env, raw_key, log_iter->id, mdlog_entry.log_data.status, marker_tracker, tn), false);
ceph_assert(stack);
// stack_to_pos holds a reference to the stack
stack_to_pos[stack] = log_iter->id;
pos_to_prev[log_iter->id] = marker;
}
// limit spawn window
while (num_spawned() > static_cast<size_t>(cct->_conf->rgw_meta_sync_spawn_window)) {
yield wait_for_child();
collect_children();
}
}
marker = log_iter->id;
}
}
collect_children();
ldpp_dout(sync_env->dpp, 20) << __func__ << ":" << __LINE__ << ": shard_id=" << shard_id << " mdlog_marker=" << mdlog_marker << " max_marker=" << max_marker << " sync_marker.marker=" << sync_marker.marker << " period_marker=" << period_marker << dendl;
if (done_with_period) {
// return control to RGWMetaSyncCR and advance to the next period
tn->log(10, SSTR(*this << ": done with period"));
break;
}
if (mdlog_marker == max_marker && can_adjust_marker) {
tn->unset_flag(RGW_SNS_FLAG_ACTIVE);
yield wait(utime_t(cct->_conf->rgw_meta_sync_poll_interval, 0));
}
} while (can_adjust_marker);
tn->unset_flag(RGW_SNS_FLAG_ACTIVE);
while (num_spawned() > 1) {
yield wait_for_child();
collect_children();
}
yield lease_cr->go_down();
drain_all();
if (lost_lock || lost_bid) {
return -EBUSY;
}
if (!can_adjust_marker) {
return -EAGAIN;
}
return set_cr_done();
}
/* TODO */
return 0;
}
};
class RGWMetaSyncShardControlCR : public RGWBackoffControlCR
{
RGWMetaSyncEnv *sync_env;
const rgw_pool& pool;
const std::string& period;
epoch_t realm_epoch;
RGWMetadataLog* mdlog;
uint32_t shard_id;
rgw_meta_sync_marker sync_marker;
const std::string period_marker;
RGWSyncTraceNodeRef tn;
static constexpr bool exit_on_error = false; // retry on all errors
public:
RGWMetaSyncShardControlCR(RGWMetaSyncEnv *_sync_env, const rgw_pool& _pool,
const std::string& period, epoch_t realm_epoch,
RGWMetadataLog* mdlog, uint32_t _shard_id,
const rgw_meta_sync_marker& _marker,
std::string&& period_marker,
RGWSyncTraceNodeRef& _tn_parent)
: RGWBackoffControlCR(_sync_env->cct, exit_on_error), sync_env(_sync_env),
pool(_pool), period(period), realm_epoch(realm_epoch), mdlog(mdlog),
shard_id(_shard_id), sync_marker(_marker),
period_marker(std::move(period_marker)) {
tn = sync_env->sync_tracer->add_node(_tn_parent, "shard",
std::to_string(shard_id));
}
RGWCoroutine *alloc_cr() override {
return new RGWMetaSyncShardCR(sync_env, pool, period, realm_epoch, mdlog,
shard_id, sync_marker, period_marker, backoff_ptr(), tn);
}
RGWCoroutine *alloc_finisher_cr() override {
rgw::sal::RadosStore* store = sync_env->store;
return new RGWSimpleRadosReadCR<rgw_meta_sync_marker>(sync_env->dpp, store,
rgw_raw_obj(pool, sync_env->shard_obj_name(shard_id)),
&sync_marker);
}
};
class RGWMetaSyncShardNotifyCR : public RGWCoroutine {
RGWMetaSyncEnv *sync_env;
RGWSyncTraceNodeRef tn;
public:
RGWMetaSyncShardNotifyCR(RGWMetaSyncEnv *_sync_env, RGWSyncTraceNodeRef& _tn)
: RGWCoroutine(_sync_env->cct),
sync_env(_sync_env), tn(_tn) {}
int operate(const DoutPrefixProvider* dpp) override
{
reenter(this) {
for (;;) {
set_status("sync lock notification");
yield call(sync_env->bid_manager->notify_cr());
if (retcode < 0) {
tn->log(5, SSTR("ERROR: failed to notify bidding information" << retcode));
return set_cr_error(retcode);
}
set_status("sleeping");
yield wait(utime_t(cct->_conf->rgw_sync_lease_period, 0));
}
}
return 0;
}
};
class RGWMetaSyncCR : public RGWCoroutine {
RGWMetaSyncEnv *sync_env;
const rgw_pool& pool;
RGWPeriodHistory::Cursor cursor; //< sync position in period history
RGWPeriodHistory::Cursor next; //< next period in history
rgw_meta_sync_status sync_status;
RGWSyncTraceNodeRef tn;
std::mutex mutex; //< protect access to shard_crs
// TODO: it should be enough to hold a reference on the stack only, as calling
// RGWCoroutinesStack::wakeup() doesn't refer to the RGWCoroutine if it has
// already completed
using ControlCRRef = boost::intrusive_ptr<RGWMetaSyncShardControlCR>;
using StackRef = boost::intrusive_ptr<RGWCoroutinesStack>;
using RefPair = std::pair<ControlCRRef, StackRef>;
boost::intrusive_ptr<RGWCoroutinesStack> notify_stack;
map<int, RefPair> shard_crs;
int ret{0};
public:
RGWMetaSyncCR(RGWMetaSyncEnv *_sync_env, const RGWPeriodHistory::Cursor &cursor,
const rgw_meta_sync_status& _sync_status, RGWSyncTraceNodeRef& _tn)
: RGWCoroutine(_sync_env->cct), sync_env(_sync_env),
pool(sync_env->store->svc()->zone->get_zone_params().log_pool),
cursor(cursor), sync_status(_sync_status), tn(_tn) {}
~RGWMetaSyncCR() {
}
int operate(const DoutPrefixProvider *dpp) override {
reenter(this) {
yield {
ldpp_dout(dpp, 10) << "broadcast sync lock notify" << dendl;
notify_stack.reset(spawn(new RGWMetaSyncShardNotifyCR(sync_env, tn), false));
}
// loop through one period at a time
tn->log(1, "start");
for (;;) {
if (cursor == sync_env->store->svc()->mdlog->get_period_history()->get_current()) {
next = RGWPeriodHistory::Cursor{};
if (cursor) {
ldpp_dout(dpp, 10) << "RGWMetaSyncCR on current period="
<< cursor.get_period().get_id() << dendl;
} else {
ldpp_dout(dpp, 10) << "RGWMetaSyncCR with no period" << dendl;
}
} else {
next = cursor;
next.next();
ldpp_dout(dpp, 10) << "RGWMetaSyncCR on period="
<< cursor.get_period().get_id() << ", next="
<< next.get_period().get_id() << dendl;
}
yield {
// get the mdlog for the current period (may be empty)
auto& period_id = sync_status.sync_info.period;
auto realm_epoch = sync_status.sync_info.realm_epoch;
auto mdlog = sync_env->store->svc()->mdlog->get_log(period_id);
tn->log(1, SSTR("realm epoch=" << realm_epoch << " period id=" << period_id));
// prevent wakeup() from accessing shard_crs while we're spawning them
std::lock_guard<std::mutex> lock(mutex);
// sync this period on each shard
for (const auto& m : sync_status.sync_markers) {
uint32_t shard_id = m.first;
auto& marker = m.second;
std::string period_marker;
if (next) {
// read the maximum marker from the next period's sync status
period_marker = next.get_period().get_sync_status()[shard_id];
if (period_marker.empty()) {
// no metadata changes have occurred on this shard, skip it
ldpp_dout(dpp, 10) << "RGWMetaSyncCR: skipping shard " << shard_id
<< " with empty period marker" << dendl;
continue;
}
}
using ShardCR = RGWMetaSyncShardControlCR;
auto cr = new ShardCR(sync_env, pool, period_id, realm_epoch,
mdlog, shard_id, marker,
std::move(period_marker), tn);
auto stack = spawn(cr, false);
shard_crs[shard_id] = RefPair{cr, stack};
}
}
// wait for each shard to complete
while (ret == 0 && num_spawned() > 1) {
yield wait_for_child();
collect(&ret, nullptr);
}
drain_all_but_stack(notify_stack.get());
{
// drop shard cr refs under lock
std::lock_guard<std::mutex> lock(mutex);
shard_crs.clear();
}
if (ret < 0) {
return set_cr_error(ret);
}
// advance to the next period
ceph_assert(next);
cursor = next;
// write the updated sync info
sync_status.sync_info.period = cursor.get_period().get_id();
sync_status.sync_info.realm_epoch = cursor.get_epoch();
yield call(new RGWSimpleRadosWriteCR<rgw_meta_sync_info>(dpp, sync_env->store,
rgw_raw_obj(pool, sync_env->status_oid()),
sync_status.sync_info));
}
notify_stack.get()->cancel();
drain_all();
}
return 0;
}
void wakeup(int shard_id) {
std::lock_guard<std::mutex> lock(mutex);
auto iter = shard_crs.find(shard_id);
if (iter == shard_crs.end()) {
return;
}
iter->second.first->wakeup();
}
};
void RGWRemoteMetaLog::init_sync_env(RGWMetaSyncEnv *env) {
env->dpp = dpp;
env->cct = store->ctx();
env->store = store;
env->conn = conn;
env->async_rados = async_rados;
env->http_manager = &http_manager;
env->error_logger = error_logger;
env->sync_tracer = store->getRados()->get_sync_tracer();
}
int RGWRemoteMetaLog::read_sync_status(const DoutPrefixProvider *dpp, rgw_meta_sync_status *sync_status)
{
if (store->svc()->zone->is_meta_master()) {
return 0;
}
// cannot run concurrently with run_sync(), so run in a separate manager
RGWCoroutinesManager crs(store->ctx(), store->getRados()->get_cr_registry());
RGWHTTPManager http_manager(store->ctx(), crs.get_completion_mgr());
int ret = http_manager.start();
if (ret < 0) {
ldpp_dout(dpp, 0) << "failed in http_manager.start() ret=" << ret << dendl;
return ret;
}
RGWMetaSyncEnv sync_env_local = sync_env;
sync_env_local.http_manager = &http_manager;
tn->log(20, "read sync status");
ret = crs.run(dpp, new RGWReadSyncStatusCoroutine(&sync_env_local, sync_status));
http_manager.stop();
return ret;
}
int RGWRemoteMetaLog::init_sync_status(const DoutPrefixProvider *dpp)
{
if (store->svc()->zone->is_meta_master()) {
return 0;
}
rgw_mdlog_info mdlog_info;
int r = read_log_info(dpp, &mdlog_info);
if (r < 0) {
ldpp_dout(dpp, -1) << "ERROR: fail to fetch master log info (r=" << r << ")" << dendl;
return r;
}
rgw_meta_sync_info sync_info;
sync_info.num_shards = mdlog_info.num_shards;
auto cursor = store->svc()->mdlog->get_period_history()->get_current();
if (cursor) {
sync_info.period = cursor.get_period().get_id();
sync_info.realm_epoch = cursor.get_epoch();
}
return run(dpp, new RGWInitSyncStatusCoroutine(&sync_env, sync_info));
}
int RGWRemoteMetaLog::store_sync_info(const DoutPrefixProvider *dpp, const rgw_meta_sync_info& sync_info)
{
tn->log(20, "store sync info");
return run(dpp, new RGWSimpleRadosWriteCR<rgw_meta_sync_info>(dpp, store,
rgw_raw_obj(store->svc()->zone->get_zone_params().log_pool, sync_env.status_oid()),
sync_info));
}
// return a cursor to the period at our sync position
static RGWPeriodHistory::Cursor get_period_at(const DoutPrefixProvider *dpp,
rgw::sal::RadosStore* store,
const rgw_meta_sync_info& info,
optional_yield y)
{
if (info.period.empty()) {
// return an empty cursor with error=0
return RGWPeriodHistory::Cursor{};
}
// look for an existing period in our history
auto cursor = store->svc()->mdlog->get_period_history()->lookup(info.realm_epoch);
if (cursor) {
// verify that the period ids match
auto& existing = cursor.get_period().get_id();
if (existing != info.period) {
ldpp_dout(dpp, -1) << "ERROR: sync status period=" << info.period
<< " does not match period=" << existing
<< " in history at realm epoch=" << info.realm_epoch << dendl;
return RGWPeriodHistory::Cursor{-EEXIST};
}
return cursor;
}
// read the period from rados or pull it from the master
RGWPeriod period;
int r = store->svc()->mdlog->pull_period(dpp, info.period, period, y);
if (r < 0) {
ldpp_dout(dpp, -1) << "ERROR: failed to read period id "
<< info.period << ": " << cpp_strerror(r) << dendl;
return RGWPeriodHistory::Cursor{r};
}
// attach the period to our history
cursor = store->svc()->mdlog->get_period_history()->attach(dpp, std::move(period), y);
if (!cursor) {
r = cursor.get_error();
ldpp_dout(dpp, -1) << "ERROR: failed to read period history back to "
<< info.period << ": " << cpp_strerror(r) << dendl;
}
return cursor;
}
int RGWRemoteMetaLog::run_sync(const DoutPrefixProvider *dpp, optional_yield y)
{
if (store->svc()->zone->is_meta_master()) {
return 0;
}
int r = 0;
// get shard count and oldest log period from master
rgw_mdlog_info mdlog_info;
for (;;) {
if (going_down) {
ldpp_dout(dpp, 1) << __func__ << "(): going down" << dendl;
return 0;
}
r = read_log_info(dpp, &mdlog_info);
if (r == -EIO || r == -ENOENT) {
// keep retrying if master isn't alive or hasn't initialized the log
ldpp_dout(dpp, 10) << __func__ << "(): waiting for master.." << dendl;
backoff.backoff_sleep();
continue;
}
backoff.reset();
if (r < 0) {
ldpp_dout(dpp, -1) << "ERROR: fail to fetch master log info (r=" << r << ")" << dendl;
return r;
}
break;
}
rgw_meta_sync_status sync_status;
do {
if (going_down) {
ldpp_dout(dpp, 1) << __func__ << "(): going down" << dendl;
return 0;
}
r = run(dpp, new RGWReadSyncStatusCoroutine(&sync_env, &sync_status));
if (r < 0 && r != -ENOENT) {
ldpp_dout(dpp, 0) << "ERROR: failed to fetch sync status r=" << r << dendl;
return r;
}
if (!mdlog_info.period.empty()) {
// restart sync if the remote has a period, but:
// a) our status does not, or
// b) our sync period comes before the remote's oldest log period
if (sync_status.sync_info.period.empty() ||
sync_status.sync_info.realm_epoch < mdlog_info.realm_epoch) {
sync_status.sync_info.state = rgw_meta_sync_info::StateInit;
string reason;
if (sync_status.sync_info.period.empty()) {
reason = "period is empty";
} else {
reason = SSTR("sync_info realm epoch is behind: " << sync_status.sync_info.realm_epoch << " < " << mdlog_info.realm_epoch);
}
tn->log(1, "initialize sync (reason: " + reason + ")");
ldpp_dout(dpp, 1) << "epoch=" << sync_status.sync_info.realm_epoch
<< " in sync status comes before remote's oldest mdlog epoch="
<< mdlog_info.realm_epoch << ", restarting sync" << dendl;
}
}
if (sync_status.sync_info.state == rgw_meta_sync_info::StateInit) {
ldpp_dout(dpp, 20) << __func__ << "(): init" << dendl;
sync_status.sync_info.num_shards = mdlog_info.num_shards;
auto cursor = store->svc()->mdlog->get_period_history()->get_current();
if (cursor) {
// run full sync, then start incremental from the current period/epoch
sync_status.sync_info.period = cursor.get_period().get_id();
sync_status.sync_info.realm_epoch = cursor.get_epoch();
}
r = run(dpp, new RGWInitSyncStatusCoroutine(&sync_env, sync_status.sync_info));
if (r == -EBUSY) {
backoff.backoff_sleep();
continue;
}
backoff.reset();
if (r < 0) {
ldpp_dout(dpp, 0) << "ERROR: failed to init sync status r=" << r << dendl;
return r;
}
}
} while (sync_status.sync_info.state == rgw_meta_sync_info::StateInit);
auto num_shards = sync_status.sync_info.num_shards;
if (num_shards != mdlog_info.num_shards) {
ldpp_dout(dpp, -1) << "ERROR: can't sync, mismatch between num shards, master num_shards=" << mdlog_info.num_shards << " local num_shards=" << num_shards << dendl;
return -EINVAL;
}
// construct and start the bid manager for sync fairness
const auto& control_pool = store->svc()->zone->get_zone_params().control_pool;
auto control_obj = rgw_raw_obj{control_pool, meta_sync_bids_oid};
auto bid_manager = rgw::sync_fairness::create_rados_bid_manager(
store, control_obj, num_shards);
r = bid_manager->start();
if (r < 0) {
return r;
}
sync_env.bid_manager = bid_manager.get();
RGWPeriodHistory::Cursor cursor;
do {
r = run(dpp, new RGWReadSyncStatusCoroutine(&sync_env, &sync_status));
if (r < 0 && r != -ENOENT) {
tn->log(0, SSTR("ERROR: failed to fetch sync status r=" << r));
return r;
}
switch ((rgw_meta_sync_info::SyncState)sync_status.sync_info.state) {
case rgw_meta_sync_info::StateBuildingFullSyncMaps:
tn->log(20, "building full sync maps");
r = run(dpp, new RGWFetchAllMetaCR(&sync_env, num_shards, sync_status.sync_markers, tn));
if (r == -EBUSY || r == -EIO) {
backoff.backoff_sleep();
continue;
}
backoff.reset();
if (r < 0) {
tn->log(0, SSTR("ERROR: failed to fetch all metadata keys (r=" << r << ")"));
return r;
}
sync_status.sync_info.state = rgw_meta_sync_info::StateSync;
r = store_sync_info(dpp, sync_status.sync_info);
if (r < 0) {
tn->log(0, SSTR("ERROR: failed to update sync status (r=" << r << ")"));
return r;
}
/* fall through */
case rgw_meta_sync_info::StateSync:
tn->log(20, "sync");
// find our position in the period history (if any)
cursor = get_period_at(dpp, store, sync_status.sync_info, y);
r = cursor.get_error();
if (r < 0) {
return r;
}
meta_sync_cr = new RGWMetaSyncCR(&sync_env, cursor, sync_status, tn);
r = run(dpp, meta_sync_cr);
if (r < 0) {
tn->log(0, "ERROR: failed to fetch all metadata keys");
return r;
}
break;
default:
tn->log(0, "ERROR: bad sync state!");
return -EIO;
}
} while (!going_down);
return 0;
}
void RGWRemoteMetaLog::wakeup(int shard_id)
{
if (!meta_sync_cr) {
return;
}
meta_sync_cr->wakeup(shard_id);
}
int RGWCloneMetaLogCoroutine::operate(const DoutPrefixProvider *dpp)
{
reenter(this) {
do {
yield {
ldpp_dout(dpp, 20) << __func__ << ": shard_id=" << shard_id << ": init request" << dendl;
return state_init();
}
yield {
ldpp_dout(dpp, 20) << __func__ << ": shard_id=" << shard_id << ": reading shard status" << dendl;
return state_read_shard_status();
}
yield {
ldpp_dout(dpp, 20) << __func__ << ": shard_id=" << shard_id << ": reading shard status complete" << dendl;
return state_read_shard_status_complete();
}
yield {
ldpp_dout(dpp, 20) << __func__ << ": shard_id=" << shard_id << ": sending rest request" << dendl;
return state_send_rest_request(dpp);
}
yield {
ldpp_dout(dpp, 20) << __func__ << ": shard_id=" << shard_id << ": receiving rest response" << dendl;
return state_receive_rest_response();
}
yield {
ldpp_dout(dpp, 20) << __func__ << ": shard_id=" << shard_id << ": storing mdlog entries" << dendl;
return state_store_mdlog_entries();
}
} while (truncated);
yield {
ldpp_dout(dpp, 20) << __func__ << ": shard_id=" << shard_id << ": storing mdlog entries complete" << dendl;
return state_store_mdlog_entries_complete();
}
}
return 0;
}
int RGWCloneMetaLogCoroutine::state_init()
{
data = rgw_mdlog_shard_data();
return 0;
}
int RGWCloneMetaLogCoroutine::state_read_shard_status()
{
const bool add_ref = false; // default constructs with refs=1
completion.reset(new RGWMetadataLogInfoCompletion(
[this](int ret, const cls_log_header& header) {
if (ret < 0) {
if (ret != -ENOENT) {
ldpp_dout(sync_env->dpp, 1) << "ERROR: failed to read mdlog info with "
<< cpp_strerror(ret) << dendl;
}
} else {
shard_info.marker = header.max_marker;
shard_info.last_update = header.max_time.to_real_time();
}
// wake up parent stack
io_complete();
}), add_ref);
int ret = mdlog->get_info_async(sync_env->dpp, shard_id, completion.get());
if (ret < 0) {
ldpp_dout(sync_env->dpp, 0) << "ERROR: mdlog->get_info_async() returned ret=" << ret << dendl;
return set_cr_error(ret);
}
return io_block(0);
}
int RGWCloneMetaLogCoroutine::state_read_shard_status_complete()
{
completion.reset();
ldpp_dout(sync_env->dpp, 20) << "shard_id=" << shard_id << " marker=" << shard_info.marker << " last_update=" << shard_info.last_update << dendl;
marker = shard_info.marker;
return 0;
}
int RGWCloneMetaLogCoroutine::state_send_rest_request(const DoutPrefixProvider *dpp)
{
RGWRESTConn *conn = sync_env->conn;
char buf[32];
snprintf(buf, sizeof(buf), "%d", shard_id);
char max_entries_buf[32];
snprintf(max_entries_buf, sizeof(max_entries_buf), "%d", max_entries);
const char *marker_key = (marker.empty() ? "" : "marker");
rgw_http_param_pair pairs[] = { { "type", "metadata" },
{ "id", buf },
{ "period", period.c_str() },
{ "max-entries", max_entries_buf },
{ marker_key, marker.c_str() },
{ NULL, NULL } };
http_op = new RGWRESTReadResource(conn, "/admin/log", pairs, NULL, sync_env->http_manager);
init_new_io(http_op);
int ret = http_op->aio_read(dpp);
if (ret < 0) {
ldpp_dout(dpp, 0) << "ERROR: failed to fetch mdlog data" << dendl;
log_error() << "failed to send http operation: " << http_op->to_str() << " ret=" << ret << std::endl;
http_op->put();
http_op = NULL;
return set_cr_error(ret);
}
return io_block(0);
}
int RGWCloneMetaLogCoroutine::state_receive_rest_response()
{
int ret = http_op->wait(&data, null_yield);
if (ret < 0) {
error_stream << "http operation failed: " << http_op->to_str() << " status=" << http_op->get_http_status() << std::endl;
ldpp_dout(sync_env->dpp, 5) << "failed to wait for op, ret=" << ret << dendl;
http_op->put();
http_op = NULL;
return set_cr_error(ret);
}
http_op->put();
http_op = NULL;
ldpp_dout(sync_env->dpp, 20) << "remote mdlog, shard_id=" << shard_id << " num of shard entries: " << data.entries.size() << dendl;
truncated = ((int)data.entries.size() == max_entries);
if (data.entries.empty()) {
if (new_marker) {
*new_marker = marker;
}
return set_cr_done();
}
if (new_marker) {
*new_marker = data.entries.back().id;
}
return 0;
}
int RGWCloneMetaLogCoroutine::state_store_mdlog_entries()
{
list<cls_log_entry> dest_entries;
vector<rgw_mdlog_entry>::iterator iter;
for (iter = data.entries.begin(); iter != data.entries.end(); ++iter) {
rgw_mdlog_entry& entry = *iter;
ldpp_dout(sync_env->dpp, 20) << "entry: name=" << entry.name << dendl;
cls_log_entry dest_entry;
dest_entry.id = entry.id;
dest_entry.section = entry.section;
dest_entry.name = entry.name;
dest_entry.timestamp = utime_t(entry.timestamp);
encode(entry.log_data, dest_entry.data);
dest_entries.push_back(dest_entry);
marker = entry.id;
}
RGWAioCompletionNotifier *cn = stack->create_completion_notifier();
int ret = mdlog->store_entries_in_shard(sync_env->dpp, dest_entries, shard_id, cn->completion());
if (ret < 0) {
cn->put();
ldpp_dout(sync_env->dpp, 10) << "failed to store md log entries shard_id=" << shard_id << " ret=" << ret << dendl;
return set_cr_error(ret);
}
return io_block(0);
}
int RGWCloneMetaLogCoroutine::state_store_mdlog_entries_complete()
{
return set_cr_done();
}
void rgw_meta_sync_info::decode_json(JSONObj *obj)
{
string s;
JSONDecoder::decode_json("status", s, obj);
if (s == "init") {
state = StateInit;
} else if (s == "building-full-sync-maps") {
state = StateBuildingFullSyncMaps;
} else if (s == "sync") {
state = StateSync;
}
JSONDecoder::decode_json("num_shards", num_shards, obj);
JSONDecoder::decode_json("period", period, obj);
JSONDecoder::decode_json("realm_epoch", realm_epoch, obj);
}
void rgw_meta_sync_info::dump(Formatter *f) const
{
string s;
switch ((SyncState)state) {
case StateInit:
s = "init";
break;
case StateBuildingFullSyncMaps:
s = "building-full-sync-maps";
break;
case StateSync:
s = "sync";
break;
default:
s = "unknown";
break;
}
encode_json("status", s, f);
encode_json("num_shards", num_shards, f);
encode_json("period", period, f);
encode_json("realm_epoch", realm_epoch, f);
}
void rgw_meta_sync_marker::decode_json(JSONObj *obj)
{
int s;
JSONDecoder::decode_json("state", s, obj);
state = s;
JSONDecoder::decode_json("marker", marker, obj);
JSONDecoder::decode_json("next_step_marker", next_step_marker, obj);
JSONDecoder::decode_json("total_entries", total_entries, obj);
JSONDecoder::decode_json("pos", pos, obj);
utime_t ut;
JSONDecoder::decode_json("timestamp", ut, obj);
timestamp = ut.to_real_time();
JSONDecoder::decode_json("realm_epoch", realm_epoch, obj);
}
void rgw_meta_sync_marker::dump(Formatter *f) const
{
encode_json("state", (int)state, f);
encode_json("marker", marker, f);
encode_json("next_step_marker", next_step_marker, f);
encode_json("total_entries", total_entries, f);
encode_json("pos", pos, f);
encode_json("timestamp", utime_t(timestamp), f);
encode_json("realm_epoch", realm_epoch, f);
}
void rgw_meta_sync_status::decode_json(JSONObj *obj)
{
JSONDecoder::decode_json("info", sync_info, obj);
JSONDecoder::decode_json("markers", sync_markers, obj);
}
void rgw_meta_sync_status::dump(Formatter *f) const {
encode_json("info", sync_info, f);
encode_json("markers", sync_markers, f);
}
void rgw_sync_error_info::dump(Formatter *f) const {
encode_json("source_zone", source_zone, f);
encode_json("error_code", error_code, f);
encode_json("message", message, f);
}
| 90,697 | 33.238581 | 258 |
cc
|
null |
ceph-main/src/rgw/driver/rados/rgw_sync.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab ft=cpp
#pragma once
#include <atomic>
#include "include/stringify.h"
#include "rgw_coroutine.h"
#include "rgw_http_client.h"
#include "rgw_metadata.h"
#include "rgw_meta_sync_status.h"
#include "rgw_sal.h"
#include "rgw_sal_rados.h"
#include "rgw_sync_trace.h"
#include "rgw_mdlog.h"
#include "sync_fairness.h"
#define ERROR_LOGGER_SHARDS 32
#define RGW_SYNC_ERROR_LOG_SHARD_PREFIX "sync.error-log"
struct rgw_mdlog_info {
uint32_t num_shards;
std::string period; //< period id of the master's oldest metadata log
epoch_t realm_epoch; //< realm epoch of oldest metadata log
rgw_mdlog_info() : num_shards(0), realm_epoch(0) {}
void decode_json(JSONObj *obj);
};
struct rgw_mdlog_entry {
std::string id;
std::string section;
std::string name;
ceph::real_time timestamp;
RGWMetadataLogData log_data;
void decode_json(JSONObj *obj);
bool convert_from(cls_log_entry& le) {
id = le.id;
section = le.section;
name = le.name;
timestamp = le.timestamp.to_real_time();
try {
auto iter = le.data.cbegin();
decode(log_data, iter);
} catch (buffer::error& err) {
return false;
}
return true;
}
};
struct rgw_mdlog_shard_data {
std::string marker;
bool truncated;
std::vector<rgw_mdlog_entry> entries;
void decode_json(JSONObj *obj);
};
class RGWAsyncRadosProcessor;
class RGWMetaSyncStatusManager;
class RGWMetaSyncCR;
class RGWRESTConn;
class RGWSyncTraceManager;
class RGWSyncErrorLogger {
rgw::sal::RadosStore* store;
std::vector<std::string> oids;
int num_shards;
std::atomic<int64_t> counter = { 0 };
public:
RGWSyncErrorLogger(rgw::sal::RadosStore* _store, const std::string &oid_prefix, int _num_shards);
RGWCoroutine *log_error_cr(const DoutPrefixProvider *dpp, const std::string& source_zone, const std::string& section, const std::string& name, uint32_t error_code, const std::string& message);
static std::string get_shard_oid(const std::string& oid_prefix, int shard_id);
};
struct rgw_sync_error_info {
std::string source_zone;
uint32_t error_code;
std::string message;
rgw_sync_error_info() : error_code(0) {}
rgw_sync_error_info(const std::string& _source_zone, uint32_t _error_code, const std::string& _message) : source_zone(_source_zone), error_code(_error_code), message(_message) {}
void encode(bufferlist& bl) const {
ENCODE_START(1, 1, bl);
encode(source_zone, bl);
encode(error_code, bl);
encode(message, bl);
ENCODE_FINISH(bl);
}
void decode(bufferlist::const_iterator& bl) {
DECODE_START(1, bl);
decode(source_zone, bl);
decode(error_code, bl);
decode(message, bl);
DECODE_FINISH(bl);
}
void dump(Formatter *f) const;
};
WRITE_CLASS_ENCODER(rgw_sync_error_info)
#define DEFAULT_BACKOFF_MAX 30
class RGWSyncBackoff {
int cur_wait;
int max_secs;
void update_wait_time();
public:
explicit RGWSyncBackoff(int _max_secs = DEFAULT_BACKOFF_MAX) : cur_wait(0), max_secs(_max_secs) {}
void backoff_sleep();
void reset() {
cur_wait = 0;
}
void backoff(RGWCoroutine *op);
};
class RGWBackoffControlCR : public RGWCoroutine
{
RGWCoroutine *cr;
ceph::mutex lock;
RGWSyncBackoff backoff;
bool reset_backoff;
bool exit_on_error;
protected:
bool *backoff_ptr() {
return &reset_backoff;
}
ceph::mutex& cr_lock() {
return lock;
}
RGWCoroutine *get_cr() {
return cr;
}
public:
RGWBackoffControlCR(CephContext *_cct, bool _exit_on_error)
: RGWCoroutine(_cct),
cr(nullptr),
lock(ceph::make_mutex("RGWBackoffControlCR::lock:" + stringify(this))),
reset_backoff(false), exit_on_error(_exit_on_error) {
}
~RGWBackoffControlCR() override {
if (cr) {
cr->put();
}
}
virtual RGWCoroutine *alloc_cr() = 0;
virtual RGWCoroutine *alloc_finisher_cr() { return NULL; }
int operate(const DoutPrefixProvider *dpp) override;
};
struct RGWMetaSyncEnv {
const DoutPrefixProvider *dpp;
CephContext *cct{nullptr};
rgw::sal::RadosStore* store{nullptr};
RGWRESTConn *conn{nullptr};
RGWAsyncRadosProcessor *async_rados{nullptr};
RGWHTTPManager *http_manager{nullptr};
RGWSyncErrorLogger *error_logger{nullptr};
RGWSyncTraceManager *sync_tracer{nullptr};
rgw::sync_fairness::BidManager* bid_manager{nullptr};
RGWMetaSyncEnv() {}
void init(const DoutPrefixProvider *_dpp, CephContext *_cct, rgw::sal::RadosStore* _store, RGWRESTConn *_conn,
RGWAsyncRadosProcessor *_async_rados, RGWHTTPManager *_http_manager,
RGWSyncErrorLogger *_error_logger, RGWSyncTraceManager *_sync_tracer);
std::string shard_obj_name(int shard_id);
std::string status_oid();
};
class RGWRemoteMetaLog : public RGWCoroutinesManager {
const DoutPrefixProvider *dpp;
rgw::sal::RadosStore* store;
RGWRESTConn *conn;
RGWAsyncRadosProcessor *async_rados;
RGWHTTPManager http_manager;
RGWMetaSyncStatusManager *status_manager;
RGWSyncErrorLogger *error_logger{nullptr};
RGWSyncTraceManager *sync_tracer{nullptr};
RGWMetaSyncCR *meta_sync_cr{nullptr};
RGWSyncBackoff backoff;
RGWMetaSyncEnv sync_env;
void init_sync_env(RGWMetaSyncEnv *env);
int store_sync_info(const DoutPrefixProvider *dpp, const rgw_meta_sync_info& sync_info);
std::atomic<bool> going_down = { false };
RGWSyncTraceNodeRef tn;
public:
RGWRemoteMetaLog(const DoutPrefixProvider *dpp, rgw::sal::RadosStore* _store,
RGWAsyncRadosProcessor *async_rados,
RGWMetaSyncStatusManager *_sm)
: RGWCoroutinesManager(_store->ctx(), _store->getRados()->get_cr_registry()),
dpp(dpp), store(_store), conn(NULL), async_rados(async_rados),
http_manager(store->ctx(), completion_mgr),
status_manager(_sm) {}
~RGWRemoteMetaLog() override;
int init();
void finish();
int read_log_info(const DoutPrefixProvider *dpp, rgw_mdlog_info *log_info);
int read_master_log_shards_info(const DoutPrefixProvider *dpp, const std::string& master_period, std::map<int, RGWMetadataLogInfo> *shards_info);
int read_master_log_shards_next(const DoutPrefixProvider *dpp, const std::string& period, std::map<int, std::string> shard_markers, std::map<int, rgw_mdlog_shard_data> *result);
int read_sync_status(const DoutPrefixProvider *dpp, rgw_meta_sync_status *sync_status);
int init_sync_status(const DoutPrefixProvider *dpp);
int run_sync(const DoutPrefixProvider *dpp, optional_yield y);
void wakeup(int shard_id);
RGWMetaSyncEnv& get_sync_env() {
return sync_env;
}
};
class RGWMetaSyncStatusManager : public DoutPrefixProvider {
rgw::sal::RadosStore* store;
librados::IoCtx ioctx;
RGWRemoteMetaLog master_log;
std::map<int, rgw_raw_obj> shard_objs;
struct utime_shard {
real_time ts;
int shard_id;
utime_shard() : shard_id(-1) {}
bool operator<(const utime_shard& rhs) const {
if (ts == rhs.ts) {
return shard_id < rhs.shard_id;
}
return ts < rhs.ts;
}
};
ceph::shared_mutex ts_to_shard_lock = ceph::make_shared_mutex("ts_to_shard_lock");
std::map<utime_shard, int> ts_to_shard;
std::vector<std::string> clone_markers;
public:
RGWMetaSyncStatusManager(rgw::sal::RadosStore* _store, RGWAsyncRadosProcessor *async_rados)
: store(_store), master_log(this, store, async_rados, this)
{}
virtual ~RGWMetaSyncStatusManager() override;
int init(const DoutPrefixProvider *dpp);
int read_sync_status(const DoutPrefixProvider *dpp, rgw_meta_sync_status *sync_status) {
return master_log.read_sync_status(dpp, sync_status);
}
int init_sync_status(const DoutPrefixProvider *dpp) { return master_log.init_sync_status(dpp); }
int read_log_info(const DoutPrefixProvider *dpp, rgw_mdlog_info *log_info) {
return master_log.read_log_info(dpp, log_info);
}
int read_master_log_shards_info(const DoutPrefixProvider *dpp, const std::string& master_period, std::map<int, RGWMetadataLogInfo> *shards_info) {
return master_log.read_master_log_shards_info(dpp, master_period, shards_info);
}
int read_master_log_shards_next(const DoutPrefixProvider *dpp, const std::string& period, std::map<int, std::string> shard_markers, std::map<int, rgw_mdlog_shard_data> *result) {
return master_log.read_master_log_shards_next(dpp, period, shard_markers, result);
}
int run(const DoutPrefixProvider *dpp, optional_yield y) { return master_log.run_sync(dpp, y); }
// implements DoutPrefixProvider
CephContext *get_cct() const override { return store->ctx(); }
unsigned get_subsys() const override;
std::ostream& gen_prefix(std::ostream& out) const override;
void wakeup(int shard_id) { return master_log.wakeup(shard_id); }
void stop() {
master_log.finish();
}
};
class RGWOrderCallCR : public RGWCoroutine
{
public:
RGWOrderCallCR(CephContext *cct) : RGWCoroutine(cct) {}
virtual void call_cr(RGWCoroutine *_cr) = 0;
};
class RGWLastCallerWinsCR : public RGWOrderCallCR
{
RGWCoroutine *cr{nullptr};
public:
explicit RGWLastCallerWinsCR(CephContext *cct) : RGWOrderCallCR(cct) {}
~RGWLastCallerWinsCR() {
if (cr) {
cr->put();
}
}
int operate(const DoutPrefixProvider *dpp) override;
void call_cr(RGWCoroutine *_cr) override {
if (cr) {
cr->put();
}
cr = _cr;
}
};
template <class T, class K>
class RGWSyncShardMarkerTrack {
struct marker_entry {
uint64_t pos;
real_time timestamp;
marker_entry() : pos(0) {}
marker_entry(uint64_t _p, const real_time& _ts) : pos(_p), timestamp(_ts) {}
};
typename std::map<T, marker_entry> pending;
std::map<T, marker_entry> finish_markers;
int window_size;
int updates_since_flush;
RGWOrderCallCR *order_cr{nullptr};
protected:
typename std::set<K> need_retry_set;
virtual RGWCoroutine *store_marker(const T& new_marker, uint64_t index_pos, const real_time& timestamp) = 0;
virtual RGWOrderCallCR *allocate_order_control_cr() = 0;
virtual void handle_finish(const T& marker) { }
public:
RGWSyncShardMarkerTrack(int _window_size) : window_size(_window_size), updates_since_flush(0) {}
virtual ~RGWSyncShardMarkerTrack() {
if (order_cr) {
order_cr->put();
}
}
bool start(const T& pos, int index_pos, const real_time& timestamp) {
if (pending.find(pos) != pending.end()) {
return false;
}
pending[pos] = marker_entry(index_pos, timestamp);
return true;
}
void try_update_high_marker(const T& pos, int index_pos, const real_time& timestamp) {
finish_markers[pos] = marker_entry(index_pos, timestamp);
}
RGWCoroutine *finish(const T& pos) {
if (pending.empty()) {
/* can happen, due to a bug that ended up with multiple objects with the same name and version
* -- which can happen when versioning is enabled an the version is 'null'.
*/
return NULL;
}
typename std::map<T, marker_entry>::iterator iter = pending.begin();
bool is_first = (pos == iter->first);
typename std::map<T, marker_entry>::iterator pos_iter = pending.find(pos);
if (pos_iter == pending.end()) {
/* see pending.empty() comment */
return NULL;
}
finish_markers[pos] = pos_iter->second;
pending.erase(pos);
handle_finish(pos);
updates_since_flush++;
if (is_first && (updates_since_flush >= window_size || pending.empty())) {
return flush();
}
return NULL;
}
RGWCoroutine *flush() {
if (finish_markers.empty()) {
return NULL;
}
typename std::map<T, marker_entry>::iterator i;
if (pending.empty()) {
i = finish_markers.end();
} else {
i = finish_markers.lower_bound(pending.begin()->first);
}
if (i == finish_markers.begin()) {
return NULL;
}
updates_since_flush = 0;
auto last = i;
--i;
const T& high_marker = i->first;
marker_entry& high_entry = i->second;
RGWCoroutine *cr = order(store_marker(high_marker, high_entry.pos, high_entry.timestamp));
finish_markers.erase(finish_markers.begin(), last);
return cr;
}
/*
* a key needs retry if it was processing when another marker that points
* to the same bucket shards arrives. Instead of processing it, we mark
* it as need_retry so that when we finish processing the original, we
* retry the processing on the same bucket shard, in case there are more
* entries to process. This closes a race that can happen.
*/
bool need_retry(const K& key) {
return (need_retry_set.find(key) != need_retry_set.end());
}
void set_need_retry(const K& key) {
need_retry_set.insert(key);
}
void reset_need_retry(const K& key) {
need_retry_set.erase(key);
}
RGWCoroutine *order(RGWCoroutine *cr) {
/* either returns a new RGWLastWriteWinsCR, or update existing one, in which case it returns
* nothing and the existing one will call the cr
*/
if (order_cr && order_cr->is_done()) {
order_cr->put();
order_cr = nullptr;
}
if (!order_cr) {
order_cr = allocate_order_control_cr();
order_cr->get();
order_cr->call_cr(cr);
return order_cr;
}
order_cr->call_cr(cr);
return nullptr; /* don't call it a second time */
}
};
class RGWMetaSyncShardMarkerTrack;
class RGWMetaSyncSingleEntryCR : public RGWCoroutine {
RGWMetaSyncEnv *sync_env;
std::string raw_key;
std::string entry_marker;
RGWMDLogStatus op_status;
ssize_t pos;
std::string section;
std::string key;
int sync_status;
bufferlist md_bl;
RGWMetaSyncShardMarkerTrack *marker_tracker;
int tries;
bool error_injection;
RGWSyncTraceNodeRef tn;
public:
RGWMetaSyncSingleEntryCR(RGWMetaSyncEnv *_sync_env,
const std::string& _raw_key, const std::string& _entry_marker,
const RGWMDLogStatus& _op_status,
RGWMetaSyncShardMarkerTrack *_marker_tracker, const RGWSyncTraceNodeRef& _tn_parent);
int operate(const DoutPrefixProvider *dpp) override;
};
class RGWShardCollectCR : public RGWCoroutine {
int current_running = 0;
protected:
int max_concurrent;
int status = 0;
// called with the result of each child. error codes can be ignored by
// returning 0. if handle_result() returns a negative value, it's
// treated as an error and stored in 'status'. the last such error is
// reported to the caller with set_cr_error()
virtual int handle_result(int r) = 0;
public:
RGWShardCollectCR(CephContext *_cct, int _max_concurrent)
: RGWCoroutine(_cct), max_concurrent(_max_concurrent)
{}
virtual bool spawn_next() = 0;
int operate(const DoutPrefixProvider *dpp) override;
};
// factory functions for meta sync coroutines needed in mdlog trimming
RGWCoroutine* create_read_remote_mdlog_shard_info_cr(RGWMetaSyncEnv *env,
const std::string& period,
int shard_id,
RGWMetadataLogInfo* info);
RGWCoroutine* create_list_remote_mdlog_shard_cr(RGWMetaSyncEnv *env,
const std::string& period,
int shard_id,
const std::string& marker,
uint32_t max_entries,
rgw_mdlog_shard_data *result);
| 15,552 | 27.278182 | 194 |
h
|
null |
ceph-main/src/rgw/driver/rados/rgw_sync_counters.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab ft=cpp
#include "common/ceph_context.h"
#include "rgw_sync_counters.h"
namespace sync_counters {
PerfCountersRef build(CephContext *cct, const std::string& name)
{
PerfCountersBuilder b(cct, name, l_first, l_last);
// share these counters with ceph-mgr
b.set_prio_default(PerfCountersBuilder::PRIO_USEFUL);
b.add_u64_avg(l_fetch, "fetch_bytes", "Number of object bytes replicated");
b.add_u64_counter(l_fetch_not_modified, "fetch_not_modified", "Number of objects already replicated");
b.add_u64_counter(l_fetch_err, "fetch_errors", "Number of object replication errors");
b.add_time_avg(l_poll, "poll_latency", "Average latency of replication log requests");
b.add_u64_counter(l_poll_err, "poll_errors", "Number of replication log request errors");
auto logger = PerfCountersRef{ b.create_perf_counters(), cct };
cct->get_perfcounters_collection()->add(logger.get());
return logger;
}
} // namespace sync_counters
| 1,043 | 35 | 104 |
cc
|
null |
ceph-main/src/rgw/driver/rados/rgw_sync_counters.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab ft=cpp
#pragma once
#include "common/perf_counters_collection.h"
namespace sync_counters {
enum {
l_first = 805000,
l_fetch,
l_fetch_not_modified,
l_fetch_err,
l_poll,
l_poll_err,
l_last,
};
PerfCountersRef build(CephContext *cct, const std::string& name);
} // namespace sync_counters
| 407 | 14.692308 | 70 |
h
|
null |
ceph-main/src/rgw/driver/rados/rgw_sync_error_repo.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab ft=cpp
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2020 Red Hat, Inc
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*/
#include "rgw_sync_error_repo.h"
#include "rgw_coroutine.h"
#include "rgw_sal.h"
#include "services/svc_rados.h"
#include "cls/cmpomap/client.h"
namespace rgw::error_repo {
// prefix for the binary encoding of keys. this particular value is not
// valid as the first byte of a utf8 code point, so we use this to
// differentiate the binary encoding from existing string keys for
// backward-compatibility
constexpr uint8_t binary_key_prefix = 0x80;
struct key_type {
rgw_bucket_shard bs;
std::optional<uint64_t> gen;
};
void encode(const key_type& k, bufferlist& bl, uint64_t f=0)
{
ENCODE_START(1, 1, bl);
encode(k.bs, bl);
encode(k.gen, bl);
ENCODE_FINISH(bl);
}
void decode(key_type& k, bufferlist::const_iterator& bl)
{
DECODE_START(1, bl);
decode(k.bs, bl);
decode(k.gen, bl);
DECODE_FINISH(bl);
}
std::string encode_key(const rgw_bucket_shard& bs,
std::optional<uint64_t> gen)
{
using ceph::encode;
const auto key = key_type{bs, gen};
bufferlist bl;
encode(binary_key_prefix, bl);
encode(key, bl);
return bl.to_str();
}
int decode_key(std::string encoded,
rgw_bucket_shard& bs,
std::optional<uint64_t>& gen)
{
using ceph::decode;
key_type key;
const auto bl = bufferlist::static_from_string(encoded);
auto p = bl.cbegin();
try {
uint8_t prefix;
decode(prefix, p);
if (prefix != binary_key_prefix) {
return -EINVAL;
}
decode(key, p);
} catch (const buffer::error&) {
return -EIO;
}
if (!p.end()) {
return -EIO; // buffer contained unexpected bytes
}
bs = std::move(key.bs);
gen = key.gen;
return 0;
}
ceph::real_time decode_value(const bufferlist& bl)
{
uint64_t value;
try {
using ceph::decode;
decode(value, bl);
} catch (const buffer::error&) {
value = 0; // empty buffer = 0
}
return ceph::real_clock::zero() + ceph::timespan(value);
}
int write(librados::ObjectWriteOperation& op,
const std::string& key,
ceph::real_time timestamp)
{
// overwrite the existing timestamp if value is greater
const uint64_t value = timestamp.time_since_epoch().count();
using namespace ::cls::cmpomap;
const bufferlist zero = u64_buffer(0); // compare against 0 for missing keys
return cmp_set_vals(op, Mode::U64, Op::GT, {{key, u64_buffer(value)}}, zero);
}
int remove(librados::ObjectWriteOperation& op,
const std::string& key,
ceph::real_time timestamp)
{
// remove the omap key if value >= existing
const uint64_t value = timestamp.time_since_epoch().count();
using namespace ::cls::cmpomap;
return cmp_rm_keys(op, Mode::U64, Op::GTE, {{key, u64_buffer(value)}});
}
class RGWErrorRepoWriteCR : public RGWSimpleCoroutine {
RGWSI_RADOS::Obj obj;
std::string key;
ceph::real_time timestamp;
boost::intrusive_ptr<RGWAioCompletionNotifier> cn;
public:
RGWErrorRepoWriteCR(RGWSI_RADOS* rados, const rgw_raw_obj& raw_obj,
const std::string& key, ceph::real_time timestamp)
: RGWSimpleCoroutine(rados->ctx()),
obj(rados->obj(raw_obj)),
key(key), timestamp(timestamp)
{}
int send_request(const DoutPrefixProvider *dpp) override {
librados::ObjectWriteOperation op;
int r = write(op, key, timestamp);
if (r < 0) {
return r;
}
r = obj.open(dpp);
if (r < 0) {
return r;
}
cn = stack->create_completion_notifier();
return obj.aio_operate(cn->completion(), &op);
}
int request_complete() override {
return cn->completion()->get_return_value();
}
};
RGWCoroutine* write_cr(RGWSI_RADOS* rados,
const rgw_raw_obj& obj,
const std::string& key,
ceph::real_time timestamp)
{
return new RGWErrorRepoWriteCR(rados, obj, key, timestamp);
}
class RGWErrorRepoRemoveCR : public RGWSimpleCoroutine {
RGWSI_RADOS::Obj obj;
std::string key;
ceph::real_time timestamp;
boost::intrusive_ptr<RGWAioCompletionNotifier> cn;
public:
RGWErrorRepoRemoveCR(RGWSI_RADOS* rados, const rgw_raw_obj& raw_obj,
const std::string& key, ceph::real_time timestamp)
: RGWSimpleCoroutine(rados->ctx()),
obj(rados->obj(raw_obj)),
key(key), timestamp(timestamp)
{}
int send_request(const DoutPrefixProvider *dpp) override {
librados::ObjectWriteOperation op;
int r = remove(op, key, timestamp);
if (r < 0) {
return r;
}
r = obj.open(dpp);
if (r < 0) {
return r;
}
cn = stack->create_completion_notifier();
return obj.aio_operate(cn->completion(), &op);
}
int request_complete() override {
return cn->completion()->get_return_value();
}
};
RGWCoroutine* remove_cr(RGWSI_RADOS* rados,
const rgw_raw_obj& obj,
const std::string& key,
ceph::real_time timestamp)
{
return new RGWErrorRepoRemoveCR(rados, obj, key, timestamp);
}
} // namespace rgw::error_repo
| 5,425 | 25.339806 | 79 |
cc
|
null |
ceph-main/src/rgw/driver/rados/rgw_sync_error_repo.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab ft=cpp
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2020 Red Hat, Inc
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*/
#pragma once
#include <optional>
#include "include/rados/librados_fwd.hpp"
#include "include/buffer_fwd.h"
#include "common/ceph_time.h"
class RGWSI_RADOS;
class RGWCoroutine;
struct rgw_raw_obj;
struct rgw_bucket_shard;
namespace rgw::error_repo {
// binary-encode a bucket/shard/gen and return it as a string
std::string encode_key(const rgw_bucket_shard& bs,
std::optional<uint64_t> gen);
// try to decode a key. returns -EINVAL if not in binary format
int decode_key(std::string encoded,
rgw_bucket_shard& bs,
std::optional<uint64_t>& gen);
// decode a timestamp as a uint64_t for CMPXATTR_MODE_U64
ceph::real_time decode_value(const ceph::bufferlist& bl);
// write an omap key iff the given timestamp is newer
int write(librados::ObjectWriteOperation& op,
const std::string& key,
ceph::real_time timestamp);
RGWCoroutine* write_cr(RGWSI_RADOS* rados,
const rgw_raw_obj& obj,
const std::string& key,
ceph::real_time timestamp);
// remove an omap key iff there isn't a newer timestamp
int remove(librados::ObjectWriteOperation& op,
const std::string& key,
ceph::real_time timestamp);
RGWCoroutine* remove_cr(RGWSI_RADOS* rados,
const rgw_raw_obj& obj,
const std::string& key,
ceph::real_time timestamp);
} // namespace rgw::error_repo
| 1,892 | 30.55 | 70 |
h
|
null |
ceph-main/src/rgw/driver/rados/rgw_sync_module.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab ft=cpp
#include "rgw_common.h"
#include "rgw_coroutine.h"
#include "rgw_cr_rados.h"
#include "rgw_sync_module.h"
#include "rgw_data_sync.h"
#include "rgw_bucket.h"
#include "rgw_sync_module_log.h"
#include "rgw_sync_module_es.h"
#include "rgw_sync_module_aws.h"
#include <boost/asio/yield.hpp>
#define dout_subsys ceph_subsys_rgw
RGWMetadataHandler *RGWSyncModuleInstance::alloc_bucket_meta_handler()
{
return RGWBucketMetaHandlerAllocator::alloc();
}
RGWBucketInstanceMetadataHandlerBase* RGWSyncModuleInstance::alloc_bucket_instance_meta_handler(rgw::sal::Driver* driver)
{
return RGWBucketInstanceMetaHandlerAllocator::alloc(driver);
}
RGWStatRemoteObjCBCR::RGWStatRemoteObjCBCR(RGWDataSyncCtx *_sc,
rgw_bucket& _src_bucket, rgw_obj_key& _key) : RGWCoroutine(_sc->cct),
sc(_sc), sync_env(_sc->env),
src_bucket(_src_bucket), key(_key) {
}
RGWCallStatRemoteObjCR::RGWCallStatRemoteObjCR(RGWDataSyncCtx *_sc,
rgw_bucket& _src_bucket, rgw_obj_key& _key) : RGWCoroutine(_sc->cct),
sc(_sc), sync_env(_sc->env),
src_bucket(_src_bucket), key(_key) {
}
int RGWCallStatRemoteObjCR::operate(const DoutPrefixProvider *dpp) {
reenter(this) {
yield {
call(new RGWStatRemoteObjCR(sync_env->async_rados, sync_env->driver,
sc->source_zone,
src_bucket, key, &mtime, &size, &etag, &attrs, &headers));
}
if (retcode < 0) {
ldpp_dout(dpp, 10) << "RGWStatRemoteObjCR() returned " << retcode << dendl;
return set_cr_error(retcode);
}
ldpp_dout(dpp, 20) << "stat of remote obj: z=" << sc->source_zone
<< " b=" << src_bucket << " k=" << key
<< " size=" << size << " mtime=" << mtime << dendl;
yield {
RGWStatRemoteObjCBCR *cb = allocate_callback();
if (cb) {
cb->set_result(mtime, size, etag, std::move(attrs), std::move(headers));
call(cb);
}
}
if (retcode < 0) {
ldpp_dout(dpp, 10) << "RGWStatRemoteObjCR() callback returned " << retcode << dendl;
return set_cr_error(retcode);
}
return set_cr_done();
}
return 0;
}
void rgw_register_sync_modules(RGWSyncModulesManager *modules_manager)
{
RGWSyncModuleRef default_module(std::make_shared<RGWDefaultSyncModule>());
modules_manager->register_module("rgw", default_module, true);
RGWSyncModuleRef archive_module(std::make_shared<RGWArchiveSyncModule>());
modules_manager->register_module("archive", archive_module);
RGWSyncModuleRef log_module(std::make_shared<RGWLogSyncModule>());
modules_manager->register_module("log", log_module);
RGWSyncModuleRef es_module(std::make_shared<RGWElasticSyncModule>());
modules_manager->register_module("elasticsearch", es_module);
RGWSyncModuleRef aws_module(std::make_shared<RGWAWSSyncModule>());
modules_manager->register_module("cloud", aws_module);
}
| 3,371 | 37.318182 | 133 |
cc
|
null |
ceph-main/src/rgw/driver/rados/rgw_sync_module.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab ft=cpp
#pragma once
#include "rgw_common.h"
#include "rgw_coroutine.h"
class RGWBucketInfo;
class RGWRemoteDataLog;
struct RGWDataSyncCtx;
struct RGWDataSyncEnv;
struct rgw_bucket_entry_owner;
struct rgw_obj_key;
struct rgw_bucket_sync_pipe;
class RGWDataSyncModule {
public:
RGWDataSyncModule() {}
virtual ~RGWDataSyncModule() {}
virtual void init(RGWDataSyncCtx *sync_env, uint64_t instance_id) {}
virtual RGWCoroutine *init_sync(const DoutPrefixProvider *dpp, RGWDataSyncCtx *sc) {
return nullptr;
}
virtual RGWCoroutine *start_sync(const DoutPrefixProvider *dpp, RGWDataSyncCtx *sc) {
return nullptr;
}
virtual RGWCoroutine *sync_object(const DoutPrefixProvider *dpp, RGWDataSyncCtx *sc,
rgw_bucket_sync_pipe& sync_pipe, rgw_obj_key& key,
std::optional<uint64_t> versioned_epoch,
const rgw_zone_set_entry& my_trace_entry,
rgw_zone_set *zones_trace) = 0;
virtual RGWCoroutine *remove_object(const DoutPrefixProvider *dpp, RGWDataSyncCtx *sc, rgw_bucket_sync_pipe& bucket_info, rgw_obj_key& key, real_time& mtime,
bool versioned, uint64_t versioned_epoch, rgw_zone_set *zones_trace) = 0;
virtual RGWCoroutine *create_delete_marker(const DoutPrefixProvider *dpp, RGWDataSyncCtx *sc, rgw_bucket_sync_pipe& bucket_info, rgw_obj_key& key, real_time& mtime,
rgw_bucket_entry_owner& owner, bool versioned, uint64_t versioned_epoch, rgw_zone_set *zones_trace) = 0;
};
class RGWRESTMgr;
class RGWMetadataHandler;
class RGWBucketInstanceMetadataHandlerBase;
class RGWSyncModuleInstance {
public:
RGWSyncModuleInstance() {}
virtual ~RGWSyncModuleInstance() {}
virtual RGWDataSyncModule *get_data_handler() = 0;
virtual RGWRESTMgr *get_rest_filter(int dialect, RGWRESTMgr *orig) {
return orig;
}
virtual bool supports_user_writes() {
return false;
}
virtual RGWMetadataHandler *alloc_bucket_meta_handler();
virtual RGWBucketInstanceMetadataHandlerBase *alloc_bucket_instance_meta_handler(rgw::sal::Driver* driver);
// indication whether the sync module start with full sync (default behavior)
// incremental sync would follow anyway
virtual bool should_full_sync() const {
return true;
}
};
typedef std::shared_ptr<RGWSyncModuleInstance> RGWSyncModuleInstanceRef;
class JSONFormattable;
class RGWSyncModule {
public:
RGWSyncModule() {}
virtual ~RGWSyncModule() {}
virtual bool supports_writes() {
return false;
}
virtual bool supports_data_export() = 0;
virtual int create_instance(const DoutPrefixProvider *dpp, CephContext *cct, const JSONFormattable& config, RGWSyncModuleInstanceRef *instance) = 0;
};
typedef std::shared_ptr<RGWSyncModule> RGWSyncModuleRef;
class RGWSyncModulesManager {
ceph::mutex lock = ceph::make_mutex("RGWSyncModulesManager");
std::map<std::string, RGWSyncModuleRef> modules;
public:
RGWSyncModulesManager() = default;
void register_module(const std::string& name, RGWSyncModuleRef& module, bool is_default = false) {
std::lock_guard l{lock};
modules[name] = module;
if (is_default) {
modules[std::string()] = module;
}
}
bool get_module(const std::string& name, RGWSyncModuleRef *module) {
std::lock_guard l{lock};
auto iter = modules.find(name);
if (iter == modules.end()) {
return false;
}
if (module != nullptr) {
*module = iter->second;
}
return true;
}
bool supports_data_export(const std::string& name) {
RGWSyncModuleRef module;
if (!get_module(name, &module)) {
return false;
}
return module->supports_data_export();
}
int create_instance(const DoutPrefixProvider *dpp, CephContext *cct, const std::string& name, const JSONFormattable& config, RGWSyncModuleInstanceRef *instance) {
RGWSyncModuleRef module;
if (!get_module(name, &module)) {
return -ENOENT;
}
return module.get()->create_instance(dpp, cct, config, instance);
}
std::vector<std::string> get_registered_module_names() const {
std::vector<std::string> names;
for (auto& i: modules) {
if (!i.first.empty()) {
names.push_back(i.first);
}
}
return names;
}
};
class RGWStatRemoteObjCBCR : public RGWCoroutine {
protected:
RGWDataSyncCtx *sc;
RGWDataSyncEnv *sync_env;
rgw_bucket src_bucket;
rgw_obj_key key;
ceph::real_time mtime;
uint64_t size = 0;
std::string etag;
std::map<std::string, bufferlist> attrs;
std::map<std::string, std::string> headers;
public:
RGWStatRemoteObjCBCR(RGWDataSyncCtx *_sc,
rgw_bucket& _src_bucket, rgw_obj_key& _key);
~RGWStatRemoteObjCBCR() override {}
void set_result(ceph::real_time& _mtime,
uint64_t _size,
const std::string& _etag,
std::map<std::string, bufferlist>&& _attrs,
std::map<std::string, std::string>&& _headers) {
mtime = _mtime;
size = _size;
etag = _etag;
attrs = std::move(_attrs);
headers = std::move(_headers);
}
};
class RGWCallStatRemoteObjCR : public RGWCoroutine {
ceph::real_time mtime;
uint64_t size{0};
std::string etag;
std::map<std::string, bufferlist> attrs;
std::map<std::string, std::string> headers;
protected:
RGWDataSyncCtx *sc;
RGWDataSyncEnv *sync_env;
rgw_bucket src_bucket;
rgw_obj_key key;
public:
RGWCallStatRemoteObjCR(RGWDataSyncCtx *_sc,
rgw_bucket& _src_bucket, rgw_obj_key& _key);
~RGWCallStatRemoteObjCR() override {}
int operate(const DoutPrefixProvider *dpp) override;
virtual RGWStatRemoteObjCBCR *allocate_callback() {
return nullptr;
}
};
void rgw_register_sync_modules(RGWSyncModulesManager *modules_manager);
| 5,993 | 28.382353 | 166 |
h
|
null |
ceph-main/src/rgw/driver/rados/rgw_sync_module_aws.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab ft=cpp
#include "common/errno.h"
#include "rgw_common.h"
#include "rgw_coroutine.h"
#include "rgw_sync_module.h"
#include "rgw_data_sync.h"
#include "rgw_sync_module_aws.h"
#include "rgw_cr_rados.h"
#include "rgw_rest_conn.h"
#include "rgw_cr_rest.h"
#include "rgw_acl.h"
#include "rgw_zone.h"
#include "services/svc_zone.h"
#include <boost/asio/yield.hpp>
#define dout_subsys ceph_subsys_rgw
#define DEFAULT_MULTIPART_SYNC_PART_SIZE (32 * 1024 * 1024)
using namespace std;
static string default_target_path = "rgw-${zonegroup}-${sid}/${bucket}";
static string get_key_oid(const rgw_obj_key& key)
{
string oid = key.name;
if (!key.instance.empty() &&
!key.have_null_instance()) {
oid += string(":") + key.instance;
}
return oid;
}
static string obj_to_aws_path(const rgw_obj& obj)
{
return obj.bucket.name + "/" + get_key_oid(obj.key);
}
/*
json configuration definition:
{
"connection": {
"access_key": <access>,
"secret": <secret>,
"endpoint": <endpoint>,
"host_style": <path | virtual>,
},
"acls": [ { "type": <id | email | uri>,
"source_id": <source_id>,
"dest_id": <dest_id> } ... ], # optional, acl mappings, no mappings if does not exist
"target_path": <target_path>, # override default
# anything below here is for non trivial configuration
# can be used in conjuction with the above
"default": {
"connection": {
"access_key": <access>,
"secret": <secret>,
"endpoint": <endpoint>,
"host_style" <path | virtual>,
},
"acls": [ # list of source uids and how they map into destination uids in the dest objects acls
{
"type" : <id | email | uri>, # optional, default is id
"source_id": <id>,
"dest_id": <id>
} ... ]
"target_path": "rgwx-${sid}/${bucket}" # how a bucket name is mapped to destination path,
# final object name will be target_path + "/" + obj
},
"connections": [
{
"id": <id>,
"access_key": <access>,
"secret": <secret>,
"endpoint": <endpoint>,
} ... ],
"acl_profiles": [
{
"id": <id>, # acl mappings
"acls": [ {
"type": <id | email | uri>,
"source_id": <id>,
"dest_id": <id>
} ... ]
}
],
"profiles": [
{
"source_bucket": <source>, # can specify either specific bucket name (foo), or prefix (foo*)
"target_path": <dest>, # (override default)
"connection_id": <connection_id>, # optional, if empty references default connection
"acls_id": <mappings_id>, # optional, if empty references default mappings
} ... ],
}
target path optional variables:
(evaluated at init)
sid: sync instance id, randomly generated by sync process on first sync initalization
zonegroup: zonegroup name
zonegroup_id: zonegroup name
zone: zone name
zone_id: zone name
(evaluated when syncing)
bucket: bucket name
owner: bucket owner
*/
struct ACLMapping {
ACLGranteeTypeEnum type{ACL_TYPE_CANON_USER};
string source_id;
string dest_id;
ACLMapping() = default;
ACLMapping(ACLGranteeTypeEnum t,
const string& s,
const string& d) : type(t),
source_id(s),
dest_id(d) {}
void init(const JSONFormattable& config) {
const string& t = config["type"];
if (t == "email") {
type = ACL_TYPE_EMAIL_USER;
} else if (t == "uri") {
type = ACL_TYPE_GROUP;
} else {
type = ACL_TYPE_CANON_USER;
}
source_id = config["source_id"];
dest_id = config["dest_id"];
}
void dump_conf(CephContext *cct, JSONFormatter& jf) const {
Formatter::ObjectSection os(jf, "acl_mapping");
string s;
switch (type) {
case ACL_TYPE_EMAIL_USER:
s = "email";
break;
case ACL_TYPE_GROUP:
s = "uri";
break;
default:
s = "id";
break;
}
encode_json("type", s, &jf);
encode_json("source_id", source_id, &jf);
encode_json("dest_id", dest_id, &jf);
}
};
struct ACLMappings {
map<string, ACLMapping> acl_mappings;
void init(const JSONFormattable& config) {
for (auto& c : config.array()) {
ACLMapping m;
m.init(c);
acl_mappings.emplace(std::make_pair(m.source_id, m));
}
}
void dump_conf(CephContext *cct, JSONFormatter& jf) const {
Formatter::ArraySection os(jf, "acls");
for (auto& i : acl_mappings) {
i.second.dump_conf(cct, jf);
}
}
};
struct AWSSyncConfig_ACLProfiles {
map<string, std::shared_ptr<ACLMappings> > acl_profiles;
void init(const JSONFormattable& config) {
for (auto& c : config.array()) {
const string& profile_id = c["id"];
std::shared_ptr<ACLMappings> ap{new ACLMappings};
ap->init(c["acls"]);
acl_profiles[profile_id] = ap;
}
}
void dump_conf(CephContext *cct, JSONFormatter& jf) const {
Formatter::ArraySection section(jf, "acl_profiles");
for (auto& p : acl_profiles) {
Formatter::ObjectSection section(jf, "profile");
encode_json("id", p.first, &jf);
p.second->dump_conf(cct, jf);
}
}
bool find(const string& profile_id, ACLMappings *result) const {
auto iter = acl_profiles.find(profile_id);
if (iter == acl_profiles.end()) {
return false;
}
*result = *iter->second;
return true;
}
};
struct AWSSyncConfig_Connection {
string connection_id;
string endpoint;
RGWAccessKey key;
std::optional<string> region;
HostStyle host_style{PathStyle};
bool has_endpoint{false};
bool has_key{false};
bool has_host_style{false};
void init(const JSONFormattable& config) {
has_endpoint = config.exists("endpoint");
has_key = config.exists("access_key") || config.exists("secret");
has_host_style = config.exists("host_style");
connection_id = config["id"];
endpoint = config["endpoint"];
key = RGWAccessKey(config["access_key"], config["secret"]);
if (config.exists("region")) {
region = config["region"];
} else {
region.reset();
}
string host_style_str = config["host_style"];
if (host_style_str != "virtual") {
host_style = PathStyle;
} else {
host_style = VirtualStyle;
}
}
void dump_conf(CephContext *cct, JSONFormatter& jf) const {
Formatter::ObjectSection section(jf, "connection");
encode_json("id", connection_id, &jf);
encode_json("endpoint", endpoint, &jf);
string s = (host_style == PathStyle ? "path" : "virtual");
encode_json("region", region, &jf);
encode_json("host_style", s, &jf);
{
Formatter::ObjectSection os(jf, "key");
encode_json("access_key", key.id, &jf);
string secret = (key.key.empty() ? "" : "******");
encode_json("secret", secret, &jf);
}
}
};
static int conf_to_uint64(const DoutPrefixProvider *dpp, CephContext *cct, const JSONFormattable& config, const string& key, uint64_t *pval)
{
string sval;
if (config.find(key, &sval)) {
string err;
uint64_t val = strict_strtoll(sval.c_str(), 10, &err);
if (!err.empty()) {
ldpp_dout(dpp, 0) << "ERROR: could not parse configurable value for cloud sync module: " << key << ": " << sval << dendl;
return -EINVAL;
}
*pval = val;
}
return 0;
}
struct AWSSyncConfig_S3 {
uint64_t multipart_sync_threshold{DEFAULT_MULTIPART_SYNC_PART_SIZE};
uint64_t multipart_min_part_size{DEFAULT_MULTIPART_SYNC_PART_SIZE};
int init(const DoutPrefixProvider *dpp, CephContext *cct, const JSONFormattable& config) {
int r = conf_to_uint64(dpp, cct, config, "multipart_sync_threshold", &multipart_sync_threshold);
if (r < 0) {
return r;
}
r = conf_to_uint64(dpp, cct, config, "multipart_min_part_size", &multipart_min_part_size);
if (r < 0) {
return r;
}
#define MULTIPART_MIN_POSSIBLE_PART_SIZE (5 * 1024 * 1024)
if (multipart_min_part_size < MULTIPART_MIN_POSSIBLE_PART_SIZE) {
multipart_min_part_size = MULTIPART_MIN_POSSIBLE_PART_SIZE;
}
return 0;
}
void dump_conf(CephContext *cct, JSONFormatter& jf) const {
Formatter::ObjectSection section(jf, "s3");
encode_json("multipart_sync_threshold", multipart_sync_threshold, &jf);
encode_json("multipart_min_part_size", multipart_min_part_size, &jf);
}
};
struct AWSSyncConfig_Profile {
string source_bucket;
bool prefix{false};
string target_path;
string connection_id;
string acls_id;
std::shared_ptr<AWSSyncConfig_Connection> conn_conf;
std::shared_ptr<ACLMappings> acls;
std::shared_ptr<RGWRESTConn> conn;
void init(const JSONFormattable& config) {
source_bucket = config["source_bucket"];
prefix = (!source_bucket.empty() && source_bucket[source_bucket.size() - 1] == '*');
if (prefix) {
source_bucket = source_bucket.substr(0, source_bucket.size() - 1);
}
target_path = config["target_path"];
connection_id = config["connection_id"];
acls_id = config["acls_id"];
if (config.exists("connection")) {
conn_conf = make_shared<AWSSyncConfig_Connection>();
conn_conf->init(config["connection"]);
}
if (config.exists("acls")) {
acls = make_shared<ACLMappings>();
acls->init(config["acls"]);
}
}
void dump_conf(CephContext *cct, JSONFormatter& jf, const char *section = "config") const {
Formatter::ObjectSection config(jf, section);
string sb{source_bucket};
if (prefix) {
sb.append("*");
}
encode_json("source_bucket", sb, &jf);
encode_json("target_path", target_path, &jf);
encode_json("connection_id", connection_id, &jf);
encode_json("acls_id", acls_id, &jf);
if (conn_conf.get()) {
conn_conf->dump_conf(cct, jf);
}
if (acls.get()) {
acls->dump_conf(cct, jf);
}
}
};
static void find_and_replace(const string& src, const string& find, const string& replace, string *dest)
{
string s = src;
size_t pos = s.find(find);
while (pos != string::npos) {
size_t next_ofs = pos + find.size();
s = s.substr(0, pos) + replace + s.substr(next_ofs);
pos = s.find(find, next_ofs);
}
*dest = s;
}
static void apply_meta_param(const string& src, const string& param, const string& val, string *dest)
{
string s = string("${") + param + "}";
find_and_replace(src, s, val, dest);
}
struct AWSSyncConfig {
AWSSyncConfig_Profile default_profile;
std::shared_ptr<AWSSyncConfig_Profile> root_profile;
map<string, std::shared_ptr<AWSSyncConfig_Connection> > connections;
AWSSyncConfig_ACLProfiles acl_profiles;
map<string, std::shared_ptr<AWSSyncConfig_Profile> > explicit_profiles;
AWSSyncConfig_S3 s3;
int init_profile(const DoutPrefixProvider *dpp, CephContext *cct, const JSONFormattable& profile_conf, AWSSyncConfig_Profile& profile,
bool connection_must_exist) {
if (!profile.connection_id.empty()) {
if (profile.conn_conf) {
ldpp_dout(dpp, 0) << "ERROR: ambiguous profile connection configuration, connection_id=" << profile.connection_id << dendl;
return -EINVAL;
}
if (connections.find(profile.connection_id) == connections.end()) {
ldpp_dout(dpp, 0) << "ERROR: profile configuration reference non-existent connection_id=" << profile.connection_id << dendl;
return -EINVAL;
}
profile.conn_conf = connections[profile.connection_id];
} else if (!profile.conn_conf) {
profile.connection_id = default_profile.connection_id;
auto i = connections.find(profile.connection_id);
if (i != connections.end()) {
profile.conn_conf = i->second;
}
}
if (connection_must_exist && !profile.conn_conf) {
ldpp_dout(dpp, 0) << "ERROR: remote connection undefined for sync profile" << dendl;
return -EINVAL;
}
if (profile.conn_conf && default_profile.conn_conf) {
if (!profile.conn_conf->has_endpoint) {
profile.conn_conf->endpoint = default_profile.conn_conf->endpoint;
}
if (!profile.conn_conf->has_host_style) {
profile.conn_conf->host_style = default_profile.conn_conf->host_style;
}
if (!profile.conn_conf->has_key) {
profile.conn_conf->key = default_profile.conn_conf->key;
}
}
ACLMappings acl_mappings;
if (!profile.acls_id.empty()) {
if (!acl_profiles.find(profile.acls_id, &acl_mappings)) {
ldpp_dout(dpp, 0) << "ERROR: profile configuration reference non-existent acls id=" << profile.acls_id << dendl;
return -EINVAL;
}
profile.acls = acl_profiles.acl_profiles[profile.acls_id];
} else if (!profile.acls) {
if (default_profile.acls) {
profile.acls = default_profile.acls;
profile.acls_id = default_profile.acls_id;
}
}
if (profile.target_path.empty()) {
profile.target_path = default_profile.target_path;
}
if (profile.target_path.empty()) {
profile.target_path = default_target_path;
}
return 0;
}
int init_target(const DoutPrefixProvider *dpp, CephContext *cct, const JSONFormattable& profile_conf, std::shared_ptr<AWSSyncConfig_Profile> *ptarget) {
std::shared_ptr<AWSSyncConfig_Profile> profile;
profile.reset(new AWSSyncConfig_Profile);
profile->init(profile_conf);
int ret = init_profile(dpp, cct, profile_conf, *profile, true);
if (ret < 0) {
return ret;
}
auto& sb = profile->source_bucket;
if (explicit_profiles.find(sb) != explicit_profiles.end()) {
ldpp_dout(dpp, 0) << "WARNING: duplicate target configuration in sync module" << dendl;
}
explicit_profiles[sb] = profile;
if (ptarget) {
*ptarget = profile;
}
return 0;
}
bool do_find_profile(const rgw_bucket bucket, std::shared_ptr<AWSSyncConfig_Profile> *result) {
const string& name = bucket.name;
auto iter = explicit_profiles.upper_bound(name);
if (iter == explicit_profiles.begin()) {
return false;
}
--iter;
if (iter->first.size() > name.size()) {
return false;
}
if (name.compare(0, iter->first.size(), iter->first) != 0) {
return false;
}
std::shared_ptr<AWSSyncConfig_Profile>& target = iter->second;
if (!target->prefix &&
name.size() != iter->first.size()) {
return false;
}
*result = target;
return true;
}
void find_profile(const rgw_bucket bucket, std::shared_ptr<AWSSyncConfig_Profile> *result) {
if (!do_find_profile(bucket, result)) {
*result = root_profile;
}
}
AWSSyncConfig() {}
int init(const DoutPrefixProvider *dpp, CephContext *cct, const JSONFormattable& config) {
auto& default_conf = config["default"];
if (config.exists("default")) {
default_profile.init(default_conf);
init_profile(dpp, cct, default_conf, default_profile, false);
}
for (auto& conn : config["connections"].array()) {
auto new_conn = conn;
std::shared_ptr<AWSSyncConfig_Connection> c{new AWSSyncConfig_Connection};
c->init(new_conn);
connections[new_conn["id"]] = c;
}
acl_profiles.init(config["acl_profiles"]);
int r = s3.init(dpp, cct, config["s3"]);
if (r < 0) {
return r;
}
auto new_root_conf = config;
r = init_target(dpp, cct, new_root_conf, &root_profile); /* the root profile config */
if (r < 0) {
return r;
}
for (auto target_conf : config["profiles"].array()) {
int r = init_target(dpp, cct, target_conf, nullptr);
if (r < 0) {
return r;
}
}
JSONFormatter jf(true);
dump_conf(cct, jf);
stringstream ss;
jf.flush(ss);
ldpp_dout(dpp, 5) << "sync module config (parsed representation):\n" << ss.str() << dendl;
return 0;
}
void expand_target(RGWDataSyncCtx *sc, const string& sid, const string& path, string *dest) {
apply_meta_param(path, "sid", sid, dest);
const RGWZoneGroup& zg = sc->env->svc->zone->get_zonegroup();
apply_meta_param(path, "zonegroup", zg.get_name(), dest);
apply_meta_param(path, "zonegroup_id", zg.get_id(), dest);
const RGWZone& zone = sc->env->svc->zone->get_zone();
apply_meta_param(path, "zone", zone.name, dest);
apply_meta_param(path, "zone_id", zone.id, dest);
}
void update_config(const DoutPrefixProvider *dpp, RGWDataSyncCtx *sc, const string& sid) {
expand_target(sc, sid, root_profile->target_path, &root_profile->target_path);
ldpp_dout(dpp, 20) << "updated target: (root) -> " << root_profile->target_path << dendl;
for (auto& t : explicit_profiles) {
expand_target(sc, sid, t.second->target_path, &t.second->target_path);
ldpp_dout(dpp, 20) << "updated target: " << t.first << " -> " << t.second->target_path << dendl;
}
}
void dump_conf(CephContext *cct, JSONFormatter& jf) const {
Formatter::ObjectSection config(jf, "config");
root_profile->dump_conf(cct, jf);
jf.open_array_section("connections");
for (auto c : connections) {
c.second->dump_conf(cct, jf);
}
jf.close_section();
acl_profiles.dump_conf(cct, jf);
{ // targets
Formatter::ArraySection as(jf, "profiles");
for (auto& t : explicit_profiles) {
Formatter::ObjectSection target_section(jf, "profile");
encode_json("name", t.first, &jf);
t.second->dump_conf(cct, jf);
}
}
}
string get_path(std::shared_ptr<AWSSyncConfig_Profile>& profile,
const RGWBucketInfo& bucket_info,
const rgw_obj_key& obj) {
string bucket_str;
string owner;
if (!bucket_info.owner.tenant.empty()) {
bucket_str = owner = bucket_info.owner.tenant + "-";
owner += bucket_info.owner.id;
}
bucket_str += bucket_info.bucket.name;
const string& path = profile->target_path;
string new_path;
apply_meta_param(path, "bucket", bucket_str, &new_path);
apply_meta_param(new_path, "owner", owner, &new_path);
new_path += string("/") + get_key_oid(obj);
return new_path;
}
void get_target(std::shared_ptr<AWSSyncConfig_Profile>& profile,
const RGWBucketInfo& bucket_info,
const rgw_obj_key& obj,
string *bucket_name,
string *obj_name) {
string path = get_path(profile, bucket_info, obj);
size_t pos = path.find('/');
*bucket_name = path.substr(0, pos);
*obj_name = path.substr(pos + 1);
}
void init_conns(RGWDataSyncCtx *sc, const string& id) {
auto sync_env = sc->env;
update_config(sync_env->dpp, sc, id);
auto& root_conf = root_profile->conn_conf;
root_profile->conn.reset(new S3RESTConn(sc->cct,
id,
{ root_conf->endpoint },
root_conf->key,
sync_env->svc->zone->get_zonegroup().get_id(),
root_conf->region,
root_conf->host_style));
for (auto i : explicit_profiles) {
auto& c = i.second;
c->conn.reset(new S3RESTConn(sc->cct,
id,
{ c->conn_conf->endpoint },
c->conn_conf->key,
sync_env->svc->zone->get_zonegroup().get_id(),
c->conn_conf->region,
c->conn_conf->host_style));
}
}
};
struct AWSSyncInstanceEnv {
AWSSyncConfig conf;
string id;
explicit AWSSyncInstanceEnv(AWSSyncConfig& _conf) : conf(_conf) {}
void init(RGWDataSyncCtx *sc, uint64_t instance_id) {
char buf[32];
snprintf(buf, sizeof(buf), "%llx", (unsigned long long)instance_id);
id = buf;
conf.init_conns(sc, id);
}
void get_profile(const rgw_bucket& bucket, std::shared_ptr<AWSSyncConfig_Profile> *ptarget) {
conf.find_profile(bucket, ptarget);
ceph_assert(ptarget);
}
};
static int do_decode_rest_obj(const DoutPrefixProvider *dpp, CephContext *cct, map<string, bufferlist>& attrs, map<string, string>& headers, rgw_rest_obj *info)
{
for (auto header : headers) {
const string& val = header.second;
if (header.first == "RGWX_OBJECT_SIZE") {
info->content_len = atoi(val.c_str());
} else {
info->attrs[header.first] = val;
}
}
info->acls.set_ctx(cct);
auto aiter = attrs.find(RGW_ATTR_ACL);
if (aiter != attrs.end()) {
bufferlist& bl = aiter->second;
auto bliter = bl.cbegin();
try {
info->acls.decode(bliter);
} catch (buffer::error& err) {
ldpp_dout(dpp, 0) << "ERROR: failed to decode policy off attrs" << dendl;
return -EIO;
}
} else {
ldpp_dout(dpp, 0) << "WARNING: acl attrs not provided" << dendl;
}
return 0;
}
class RGWRESTStreamGetCRF : public RGWStreamReadHTTPResourceCRF
{
RGWDataSyncCtx *sc;
RGWRESTConn *conn;
const rgw_obj& src_obj;
RGWRESTConn::get_obj_params req_params;
rgw_sync_aws_src_obj_properties src_properties;
public:
RGWRESTStreamGetCRF(CephContext *_cct,
RGWCoroutinesEnv *_env,
RGWCoroutine *_caller,
RGWDataSyncCtx *_sc,
RGWRESTConn *_conn,
const rgw_obj& _src_obj,
const rgw_sync_aws_src_obj_properties& _src_properties) : RGWStreamReadHTTPResourceCRF(_cct, _env, _caller,
_sc->env->http_manager, _src_obj.key),
sc(_sc), conn(_conn), src_obj(_src_obj),
src_properties(_src_properties) {
}
int init(const DoutPrefixProvider *dpp) override {
/* init input connection */
req_params.get_op = true;
req_params.prepend_metadata = true;
req_params.unmod_ptr = &src_properties.mtime;
req_params.etag = src_properties.etag;
req_params.mod_zone_id = src_properties.zone_short_id;
req_params.mod_pg_ver = src_properties.pg_ver;
if (range.is_set) {
req_params.range_is_set = true;
req_params.range_start = range.ofs;
req_params.range_end = range.ofs + range.size - 1;
}
RGWRESTStreamRWRequest *in_req;
int ret = conn->get_obj(dpp, src_obj, req_params, false /* send */, &in_req);
if (ret < 0) {
ldpp_dout(dpp, 0) << "ERROR: " << __func__ << "(): conn->get_obj() returned ret=" << ret << dendl;
return ret;
}
set_req(in_req);
return RGWStreamReadHTTPResourceCRF::init(dpp);
}
int decode_rest_obj(const DoutPrefixProvider *dpp, map<string, string>& headers, bufferlist& extra_data) override {
map<string, bufferlist> src_attrs;
ldpp_dout(dpp, 20) << __func__ << ":" << " headers=" << headers << " extra_data.length()=" << extra_data.length() << dendl;
if (extra_data.length() > 0) {
JSONParser jp;
if (!jp.parse(extra_data.c_str(), extra_data.length())) {
ldpp_dout(dpp, 0) << "ERROR: failed to parse response extra data. len=" << extra_data.length() << " data=" << extra_data.c_str() << dendl;
return -EIO;
}
JSONDecoder::decode_json("attrs", src_attrs, &jp);
}
return do_decode_rest_obj(dpp, sc->cct, src_attrs, headers, &rest_obj);
}
bool need_extra_data() override {
return true;
}
};
static std::set<string> keep_headers = { "CONTENT_TYPE",
"CONTENT_ENCODING",
"CONTENT_DISPOSITION",
"CONTENT_LANGUAGE" };
class RGWAWSStreamPutCRF : public RGWStreamWriteHTTPResourceCRF
{
RGWDataSyncCtx *sc;
rgw_sync_aws_src_obj_properties src_properties;
std::shared_ptr<AWSSyncConfig_Profile> target;
const rgw_obj& dest_obj;
string etag;
public:
RGWAWSStreamPutCRF(CephContext *_cct,
RGWCoroutinesEnv *_env,
RGWCoroutine *_caller,
RGWDataSyncCtx *_sc,
const rgw_sync_aws_src_obj_properties& _src_properties,
std::shared_ptr<AWSSyncConfig_Profile>& _target,
const rgw_obj& _dest_obj) : RGWStreamWriteHTTPResourceCRF(_cct, _env, _caller, _sc->env->http_manager),
sc(_sc), src_properties(_src_properties), target(_target), dest_obj(_dest_obj) {
}
int init() override {
/* init output connection */
RGWRESTStreamS3PutObj *out_req{nullptr};
if (multipart.is_multipart) {
char buf[32];
snprintf(buf, sizeof(buf), "%d", multipart.part_num);
rgw_http_param_pair params[] = { { "uploadId", multipart.upload_id.c_str() },
{ "partNumber", buf },
{ nullptr, nullptr } };
target->conn->put_obj_send_init(dest_obj, params, &out_req);
} else {
target->conn->put_obj_send_init(dest_obj, nullptr, &out_req);
}
set_req(out_req);
return RGWStreamWriteHTTPResourceCRF::init();
}
static bool keep_attr(const string& h) {
return (keep_headers.find(h) != keep_headers.end() ||
boost::algorithm::starts_with(h, "X_AMZ_"));
}
static void init_send_attrs(const DoutPrefixProvider *dpp,
CephContext *cct,
const rgw_rest_obj& rest_obj,
const rgw_sync_aws_src_obj_properties& src_properties,
const AWSSyncConfig_Profile *target,
map<string, string> *attrs) {
auto& new_attrs = *attrs;
new_attrs.clear();
for (auto& hi : rest_obj.attrs) {
if (keep_attr(hi.first)) {
new_attrs.insert(hi);
}
}
auto acl = rest_obj.acls.get_acl();
map<int, vector<string> > access_map;
if (target->acls) {
for (auto& grant : acl.get_grant_map()) {
auto& orig_grantee = grant.first;
auto& perm = grant.second;
string grantee;
const auto& am = target->acls->acl_mappings;
auto iter = am.find(orig_grantee);
if (iter == am.end()) {
ldpp_dout(dpp, 20) << "acl_mappings: Could not find " << orig_grantee << " .. ignoring" << dendl;
continue;
}
grantee = iter->second.dest_id;
string type;
switch (iter->second.type) {
case ACL_TYPE_CANON_USER:
type = "id";
break;
case ACL_TYPE_EMAIL_USER:
type = "emailAddress";
break;
case ACL_TYPE_GROUP:
type = "uri";
break;
default:
continue;
}
string tv = type + "=" + grantee;
int flags = perm.get_permission().get_permissions();
if ((flags & RGW_PERM_FULL_CONTROL) == RGW_PERM_FULL_CONTROL) {
access_map[flags].push_back(tv);
continue;
}
for (int i = 1; i <= RGW_PERM_WRITE_ACP; i <<= 1) {
if (flags & i) {
access_map[i].push_back(tv);
}
}
}
}
for (auto aiter : access_map) {
int grant_type = aiter.first;
string header_str("x-amz-grant-");
switch (grant_type) {
case RGW_PERM_READ:
header_str.append("read");
break;
case RGW_PERM_WRITE:
header_str.append("write");
break;
case RGW_PERM_READ_ACP:
header_str.append("read-acp");
break;
case RGW_PERM_WRITE_ACP:
header_str.append("write-acp");
break;
case RGW_PERM_FULL_CONTROL:
header_str.append("full-control");
break;
}
string s;
for (auto viter : aiter.second) {
if (!s.empty()) {
s.append(", ");
}
s.append(viter);
}
ldpp_dout(dpp, 20) << "acl_mappings: set acl: " << header_str << "=" << s << dendl;
new_attrs[header_str] = s;
}
char buf[32];
snprintf(buf, sizeof(buf), "%llu", (long long)src_properties.versioned_epoch);
new_attrs["x-amz-meta-rgwx-versioned-epoch"] = buf;
utime_t ut(src_properties.mtime);
snprintf(buf, sizeof(buf), "%lld.%09lld",
(long long)ut.sec(),
(long long)ut.nsec());
new_attrs["x-amz-meta-rgwx-source-mtime"] = buf;
new_attrs["x-amz-meta-rgwx-source-etag"] = src_properties.etag;
new_attrs["x-amz-meta-rgwx-source-key"] = rest_obj.key.name;
if (!rest_obj.key.instance.empty()) {
new_attrs["x-amz-meta-rgwx-source-version-id"] = rest_obj.key.instance;
}
}
void send_ready(const DoutPrefixProvider *dpp, const rgw_rest_obj& rest_obj) override {
RGWRESTStreamS3PutObj *r = static_cast<RGWRESTStreamS3PutObj *>(req);
map<string, string> new_attrs;
if (!multipart.is_multipart) {
init_send_attrs(dpp, sc->cct, rest_obj, src_properties, target.get(), &new_attrs);
}
r->set_send_length(rest_obj.content_len);
RGWAccessControlPolicy policy;
r->send_ready(dpp, target->conn->get_key(), new_attrs, policy);
}
void handle_headers(const map<string, string>& headers) {
for (auto h : headers) {
if (h.first == "ETAG") {
etag = h.second;
}
}
}
bool get_etag(string *petag) {
if (etag.empty()) {
return false;
}
*petag = etag;
return true;
}
};
class RGWAWSStreamObjToCloudPlainCR : public RGWCoroutine {
RGWDataSyncCtx *sc;
RGWRESTConn *source_conn;
std::shared_ptr<AWSSyncConfig_Profile> target;
const rgw_obj& src_obj;
const rgw_obj& dest_obj;
rgw_sync_aws_src_obj_properties src_properties;
std::shared_ptr<RGWStreamReadHTTPResourceCRF> in_crf;
std::shared_ptr<RGWStreamWriteHTTPResourceCRF> out_crf;
public:
RGWAWSStreamObjToCloudPlainCR(RGWDataSyncCtx *_sc,
RGWRESTConn *_source_conn,
const rgw_obj& _src_obj,
const rgw_sync_aws_src_obj_properties& _src_properties,
std::shared_ptr<AWSSyncConfig_Profile> _target,
const rgw_obj& _dest_obj) : RGWCoroutine(_sc->cct),
sc(_sc),
source_conn(_source_conn),
target(_target),
src_obj(_src_obj),
dest_obj(_dest_obj),
src_properties(_src_properties) {}
int operate(const DoutPrefixProvider *dpp) override {
reenter(this) {
/* init input */
in_crf.reset(new RGWRESTStreamGetCRF(cct, get_env(), this, sc,
source_conn, src_obj,
src_properties));
/* init output */
out_crf.reset(new RGWAWSStreamPutCRF(cct, get_env(), this, sc,
src_properties, target, dest_obj));
yield call(new RGWStreamSpliceCR(cct, sc->env->http_manager, in_crf, out_crf));
if (retcode < 0) {
return set_cr_error(retcode);
}
return set_cr_done();
}
return 0;
}
};
class RGWAWSStreamObjToCloudMultipartPartCR : public RGWCoroutine {
RGWDataSyncCtx *sc;
RGWRESTConn *source_conn;
std::shared_ptr<AWSSyncConfig_Profile> target;
const rgw_obj& src_obj;
const rgw_obj& dest_obj;
rgw_sync_aws_src_obj_properties src_properties;
string upload_id;
rgw_sync_aws_multipart_part_info part_info;
std::shared_ptr<RGWStreamReadHTTPResourceCRF> in_crf;
std::shared_ptr<RGWStreamWriteHTTPResourceCRF> out_crf;
string *petag;
public:
RGWAWSStreamObjToCloudMultipartPartCR(RGWDataSyncCtx *_sc,
RGWRESTConn *_source_conn,
const rgw_obj& _src_obj,
std::shared_ptr<AWSSyncConfig_Profile>& _target,
const rgw_obj& _dest_obj,
const rgw_sync_aws_src_obj_properties& _src_properties,
const string& _upload_id,
const rgw_sync_aws_multipart_part_info& _part_info,
string *_petag) : RGWCoroutine(_sc->cct),
sc(_sc),
source_conn(_source_conn),
target(_target),
src_obj(_src_obj),
dest_obj(_dest_obj),
src_properties(_src_properties),
upload_id(_upload_id),
part_info(_part_info),
petag(_petag) {}
int operate(const DoutPrefixProvider *dpp) override {
reenter(this) {
/* init input */
in_crf.reset(new RGWRESTStreamGetCRF(cct, get_env(), this, sc,
source_conn, src_obj,
src_properties));
in_crf->set_range(part_info.ofs, part_info.size);
/* init output */
out_crf.reset(new RGWAWSStreamPutCRF(cct, get_env(), this, sc,
src_properties, target, dest_obj));
out_crf->set_multipart(upload_id, part_info.part_num, part_info.size);
yield call(new RGWStreamSpliceCR(cct, sc->env->http_manager, in_crf, out_crf));
if (retcode < 0) {
return set_cr_error(retcode);
}
if (!(static_cast<RGWAWSStreamPutCRF *>(out_crf.get()))->get_etag(petag)) {
ldpp_dout(dpp, 0) << "ERROR: failed to get etag from PUT request" << dendl;
return set_cr_error(-EIO);
}
return set_cr_done();
}
return 0;
}
};
class RGWAWSAbortMultipartCR : public RGWCoroutine {
RGWDataSyncCtx *sc;
RGWRESTConn *dest_conn;
const rgw_obj& dest_obj;
string upload_id;
public:
RGWAWSAbortMultipartCR(RGWDataSyncCtx *_sc,
RGWRESTConn *_dest_conn,
const rgw_obj& _dest_obj,
const string& _upload_id) : RGWCoroutine(_sc->cct),
sc(_sc),
dest_conn(_dest_conn),
dest_obj(_dest_obj),
upload_id(_upload_id) {}
int operate(const DoutPrefixProvider *dpp) override {
reenter(this) {
yield {
rgw_http_param_pair params[] = { { "uploadId", upload_id.c_str() }, {nullptr, nullptr} };
bufferlist bl;
call(new RGWDeleteRESTResourceCR(sc->cct, dest_conn, sc->env->http_manager,
obj_to_aws_path(dest_obj), params));
}
if (retcode < 0) {
ldpp_dout(dpp, 0) << "ERROR: failed to abort multipart upload for dest object=" << dest_obj << " (retcode=" << retcode << ")" << dendl;
return set_cr_error(retcode);
}
return set_cr_done();
}
return 0;
}
};
class RGWAWSInitMultipartCR : public RGWCoroutine {
RGWDataSyncCtx *sc;
RGWRESTConn *dest_conn;
const rgw_obj& dest_obj;
uint64_t obj_size;
map<string, string> attrs;
bufferlist out_bl;
string *upload_id;
struct InitMultipartResult {
string bucket;
string key;
string upload_id;
void decode_xml(XMLObj *obj) {
RGWXMLDecoder::decode_xml("Bucket", bucket, obj);
RGWXMLDecoder::decode_xml("Key", key, obj);
RGWXMLDecoder::decode_xml("UploadId", upload_id, obj);
}
} result;
public:
RGWAWSInitMultipartCR(RGWDataSyncCtx *_sc,
RGWRESTConn *_dest_conn,
const rgw_obj& _dest_obj,
uint64_t _obj_size,
const map<string, string>& _attrs,
string *_upload_id) : RGWCoroutine(_sc->cct),
sc(_sc),
dest_conn(_dest_conn),
dest_obj(_dest_obj),
obj_size(_obj_size),
attrs(_attrs),
upload_id(_upload_id) {}
int operate(const DoutPrefixProvider *dpp) override {
reenter(this) {
yield {
rgw_http_param_pair params[] = { { "uploads", nullptr }, {nullptr, nullptr} };
bufferlist bl;
call(new RGWPostRawRESTResourceCR <bufferlist> (sc->cct, dest_conn, sc->env->http_manager,
obj_to_aws_path(dest_obj), params, &attrs, bl, &out_bl));
}
if (retcode < 0) {
ldpp_dout(dpp, 0) << "ERROR: failed to initialize multipart upload for dest object=" << dest_obj << dendl;
return set_cr_error(retcode);
}
{
/*
* If one of the following fails we cannot abort upload, as we cannot
* extract the upload id. If one of these fail it's very likely that that's
* the least of our problem.
*/
RGWXMLDecoder::XMLParser parser;
if (!parser.init()) {
ldpp_dout(dpp, 0) << "ERROR: failed to initialize xml parser for parsing multipart init response from server" << dendl;
return set_cr_error(-EIO);
}
if (!parser.parse(out_bl.c_str(), out_bl.length(), 1)) {
string str(out_bl.c_str(), out_bl.length());
ldpp_dout(dpp, 5) << "ERROR: failed to parse xml: " << str << dendl;
return set_cr_error(-EIO);
}
try {
RGWXMLDecoder::decode_xml("InitiateMultipartUploadResult", result, &parser, true);
} catch (RGWXMLDecoder::err& err) {
string str(out_bl.c_str(), out_bl.length());
ldpp_dout(dpp, 5) << "ERROR: unexpected xml: " << str << dendl;
return set_cr_error(-EIO);
}
}
ldpp_dout(dpp, 20) << "init multipart result: bucket=" << result.bucket << " key=" << result.key << " upload_id=" << result.upload_id << dendl;
*upload_id = result.upload_id;
return set_cr_done();
}
return 0;
}
};
class RGWAWSCompleteMultipartCR : public RGWCoroutine {
RGWDataSyncCtx *sc;
RGWRESTConn *dest_conn;
const rgw_obj& dest_obj;
bufferlist out_bl;
string upload_id;
struct CompleteMultipartReq {
map<int, rgw_sync_aws_multipart_part_info> parts;
explicit CompleteMultipartReq(const map<int, rgw_sync_aws_multipart_part_info>& _parts) : parts(_parts) {}
void dump_xml(Formatter *f) const {
for (auto p : parts) {
f->open_object_section("Part");
encode_xml("PartNumber", p.first, f);
encode_xml("ETag", p.second.etag, f);
f->close_section();
};
}
} req_enc;
struct CompleteMultipartResult {
string location;
string bucket;
string key;
string etag;
void decode_xml(XMLObj *obj) {
RGWXMLDecoder::decode_xml("Location", bucket, obj);
RGWXMLDecoder::decode_xml("Bucket", bucket, obj);
RGWXMLDecoder::decode_xml("Key", key, obj);
RGWXMLDecoder::decode_xml("ETag", etag, obj);
}
} result;
public:
RGWAWSCompleteMultipartCR(RGWDataSyncCtx *_sc,
RGWRESTConn *_dest_conn,
const rgw_obj& _dest_obj,
string _upload_id,
const map<int, rgw_sync_aws_multipart_part_info>& _parts) : RGWCoroutine(_sc->cct),
sc(_sc),
dest_conn(_dest_conn),
dest_obj(_dest_obj),
upload_id(_upload_id),
req_enc(_parts) {}
int operate(const DoutPrefixProvider *dpp) override {
reenter(this) {
yield {
rgw_http_param_pair params[] = { { "uploadId", upload_id.c_str() }, {nullptr, nullptr} };
stringstream ss;
XMLFormatter formatter;
encode_xml("CompleteMultipartUpload", req_enc, &formatter);
formatter.flush(ss);
bufferlist bl;
bl.append(ss.str());
call(new RGWPostRawRESTResourceCR <bufferlist> (sc->cct, dest_conn, sc->env->http_manager,
obj_to_aws_path(dest_obj), params, nullptr, bl, &out_bl));
}
if (retcode < 0) {
ldpp_dout(dpp, 0) << "ERROR: failed to initialize multipart upload for dest object=" << dest_obj << dendl;
return set_cr_error(retcode);
}
{
/*
* If one of the following fails we cannot abort upload, as we cannot
* extract the upload id. If one of these fail it's very likely that that's
* the least of our problem.
*/
RGWXMLDecoder::XMLParser parser;
if (!parser.init()) {
ldpp_dout(dpp, 0) << "ERROR: failed to initialize xml parser for parsing multipart init response from server" << dendl;
return set_cr_error(-EIO);
}
if (!parser.parse(out_bl.c_str(), out_bl.length(), 1)) {
string str(out_bl.c_str(), out_bl.length());
ldpp_dout(dpp, 5) << "ERROR: failed to parse xml: " << str << dendl;
return set_cr_error(-EIO);
}
try {
RGWXMLDecoder::decode_xml("CompleteMultipartUploadResult", result, &parser, true);
} catch (RGWXMLDecoder::err& err) {
string str(out_bl.c_str(), out_bl.length());
ldpp_dout(dpp, 5) << "ERROR: unexpected xml: " << str << dendl;
return set_cr_error(-EIO);
}
}
ldpp_dout(dpp, 20) << "complete multipart result: location=" << result.location << " bucket=" << result.bucket << " key=" << result.key << " etag=" << result.etag << dendl;
return set_cr_done();
}
return 0;
}
};
class RGWAWSStreamAbortMultipartUploadCR : public RGWCoroutine {
RGWDataSyncCtx *sc;
RGWRESTConn *dest_conn;
const rgw_obj& dest_obj;
const rgw_raw_obj status_obj;
string upload_id;
public:
RGWAWSStreamAbortMultipartUploadCR(RGWDataSyncCtx *_sc,
RGWRESTConn *_dest_conn,
const rgw_obj& _dest_obj,
const rgw_raw_obj& _status_obj,
const string& _upload_id) : RGWCoroutine(_sc->cct), sc(_sc),
dest_conn(_dest_conn),
dest_obj(_dest_obj),
status_obj(_status_obj),
upload_id(_upload_id) {}
int operate(const DoutPrefixProvider *dpp) override {
reenter(this) {
yield call(new RGWAWSAbortMultipartCR(sc, dest_conn, dest_obj, upload_id));
if (retcode < 0) {
ldpp_dout(dpp, 0) << "ERROR: failed to abort multipart upload dest obj=" << dest_obj << " upload_id=" << upload_id << " retcode=" << retcode << dendl;
/* ignore error, best effort */
}
yield call(new RGWRadosRemoveCR(sc->env->driver, status_obj));
if (retcode < 0) {
ldpp_dout(dpp, 0) << "ERROR: failed to remove sync status obj obj=" << status_obj << " retcode=" << retcode << dendl;
/* ignore error, best effort */
}
return set_cr_done();
}
return 0;
}
};
class RGWAWSStreamObjToCloudMultipartCR : public RGWCoroutine {
RGWDataSyncCtx *sc;
RGWDataSyncEnv *sync_env;
AWSSyncConfig& conf;
RGWRESTConn *source_conn;
std::shared_ptr<AWSSyncConfig_Profile> target;
const rgw_obj& src_obj;
const rgw_obj& dest_obj;
uint64_t obj_size;
string src_etag;
rgw_sync_aws_src_obj_properties src_properties;
rgw_rest_obj rest_obj;
rgw_sync_aws_multipart_upload_info status;
map<string, string> new_attrs;
rgw_sync_aws_multipart_part_info *pcur_part_info{nullptr};
int ret_err{0};
rgw_raw_obj status_obj;
public:
RGWAWSStreamObjToCloudMultipartCR(RGWDataSyncCtx *_sc,
rgw_bucket_sync_pipe& _sync_pipe,
AWSSyncConfig& _conf,
RGWRESTConn *_source_conn,
const rgw_obj& _src_obj,
std::shared_ptr<AWSSyncConfig_Profile>& _target,
const rgw_obj& _dest_obj,
uint64_t _obj_size,
const rgw_sync_aws_src_obj_properties& _src_properties,
const rgw_rest_obj& _rest_obj) : RGWCoroutine(_sc->cct),
sc(_sc),
sync_env(_sc->env),
conf(_conf),
source_conn(_source_conn),
target(_target),
src_obj(_src_obj),
dest_obj(_dest_obj),
obj_size(_obj_size),
src_properties(_src_properties),
rest_obj(_rest_obj),
status_obj(sync_env->svc->zone->get_zone_params().log_pool,
RGWBucketPipeSyncStatusManager::obj_status_oid(_sync_pipe, sc->source_zone, src_obj)) {
}
int operate(const DoutPrefixProvider *dpp) override {
reenter(this) {
yield call(new RGWSimpleRadosReadCR<rgw_sync_aws_multipart_upload_info>(
dpp, sync_env->driver, status_obj, &status, false));
if (retcode < 0 && retcode != -ENOENT) {
ldpp_dout(dpp, 0) << "ERROR: failed to read sync status of object " << src_obj << " retcode=" << retcode << dendl;
return retcode;
}
if (retcode >= 0) {
/* check here that mtime and size did not change */
if (status.src_properties.mtime != src_properties.mtime || status.obj_size != obj_size ||
status.src_properties.etag != src_properties.etag) {
yield call(new RGWAWSStreamAbortMultipartUploadCR(sc, target->conn.get(), dest_obj, status_obj, status.upload_id));
retcode = -ENOENT;
}
}
if (retcode == -ENOENT) {
RGWAWSStreamPutCRF::init_send_attrs(dpp, sc->cct, rest_obj, src_properties, target.get(), &new_attrs);
yield call(new RGWAWSInitMultipartCR(sc, target->conn.get(), dest_obj, status.obj_size, std::move(new_attrs), &status.upload_id));
if (retcode < 0) {
return set_cr_error(retcode);
}
status.obj_size = obj_size;
status.src_properties = src_properties;
#define MULTIPART_MAX_PARTS 10000
uint64_t min_part_size = obj_size / MULTIPART_MAX_PARTS;
status.part_size = std::max(conf.s3.multipart_min_part_size, min_part_size);
status.num_parts = (obj_size + status.part_size - 1) / status.part_size;
status.cur_part = 1;
}
for (; (uint32_t)status.cur_part <= status.num_parts; ++status.cur_part) {
yield {
rgw_sync_aws_multipart_part_info& cur_part_info = status.parts[status.cur_part];
cur_part_info.part_num = status.cur_part;
cur_part_info.ofs = status.cur_ofs;
cur_part_info.size = std::min((uint64_t)status.part_size, status.obj_size - status.cur_ofs);
pcur_part_info = &cur_part_info;
status.cur_ofs += status.part_size;
call(new RGWAWSStreamObjToCloudMultipartPartCR(sc,
source_conn, src_obj,
target,
dest_obj,
status.src_properties,
status.upload_id,
cur_part_info,
&cur_part_info.etag));
}
if (retcode < 0) {
ldpp_dout(dpp, 0) << "ERROR: failed to sync obj=" << src_obj << ", sync via multipart upload, upload_id=" << status.upload_id << " part number " << status.cur_part << " (error: " << cpp_strerror(-retcode) << ")" << dendl;
ret_err = retcode;
yield call(new RGWAWSStreamAbortMultipartUploadCR(sc, target->conn.get(), dest_obj, status_obj, status.upload_id));
return set_cr_error(ret_err);
}
yield call(new RGWSimpleRadosWriteCR<rgw_sync_aws_multipart_upload_info>(dpp, sync_env->driver, status_obj, status));
if (retcode < 0) {
ldpp_dout(dpp, 0) << "ERROR: failed to store multipart upload state, retcode=" << retcode << dendl;
/* continue with upload anyway */
}
ldpp_dout(dpp, 20) << "sync of object=" << src_obj << " via multipart upload, finished sending part #" << status.cur_part << " etag=" << pcur_part_info->etag << dendl;
}
yield call(new RGWAWSCompleteMultipartCR(sc, target->conn.get(), dest_obj, status.upload_id, status.parts));
if (retcode < 0) {
ldpp_dout(dpp, 0) << "ERROR: failed to complete multipart upload of obj=" << src_obj << " (error: " << cpp_strerror(-retcode) << ")" << dendl;
ret_err = retcode;
yield call(new RGWAWSStreamAbortMultipartUploadCR(sc, target->conn.get(), dest_obj, status_obj, status.upload_id));
return set_cr_error(ret_err);
}
/* remove status obj */
yield call(new RGWRadosRemoveCR(sync_env->driver, status_obj));
if (retcode < 0) {
ldpp_dout(dpp, 0) << "ERROR: failed to abort multipart upload obj=" << src_obj << " upload_id=" << status.upload_id << " part number " << status.cur_part << " (" << cpp_strerror(-retcode) << ")" << dendl;
/* ignore error, best effort */
}
return set_cr_done();
}
return 0;
}
};
template <class T>
int decode_attr(map<string, bufferlist>& attrs, const char *attr_name, T *result, T def_val)
{
map<string, bufferlist>::iterator iter = attrs.find(attr_name);
if (iter == attrs.end()) {
*result = def_val;
return 0;
}
bufferlist& bl = iter->second;
if (bl.length() == 0) {
*result = def_val;
return 0;
}
auto bliter = bl.cbegin();
try {
decode(*result, bliter);
} catch (buffer::error& err) {
return -EIO;
}
return 0;
}
// maybe use Fetch Remote Obj instead?
class RGWAWSHandleRemoteObjCBCR: public RGWStatRemoteObjCBCR {
rgw_bucket_sync_pipe sync_pipe;
AWSSyncInstanceEnv& instance;
uint64_t versioned_epoch{0};
RGWRESTConn *source_conn{nullptr};
std::shared_ptr<AWSSyncConfig_Profile> target;
bufferlist res;
unordered_map <string, bool> bucket_created;
rgw_rest_obj rest_obj;
int ret{0};
uint32_t src_zone_short_id{0};
uint64_t src_pg_ver{0};
bufferlist out_bl;
struct CreateBucketResult {
string code;
void decode_xml(XMLObj *obj) {
RGWXMLDecoder::decode_xml("Code", code, obj);
}
} result;
rgw_obj src_obj;
rgw_obj dest_obj;
public:
RGWAWSHandleRemoteObjCBCR(RGWDataSyncCtx *_sc,
rgw_bucket_sync_pipe& _sync_pipe,
rgw_obj_key& _key,
AWSSyncInstanceEnv& _instance,
uint64_t _versioned_epoch) : RGWStatRemoteObjCBCR(_sc, _sync_pipe.info.source_bs.bucket, _key),
sync_pipe(_sync_pipe),
instance(_instance), versioned_epoch(_versioned_epoch)
{}
~RGWAWSHandleRemoteObjCBCR(){
}
int operate(const DoutPrefixProvider *dpp) override {
reenter(this) {
ret = decode_attr(attrs, RGW_ATTR_PG_VER, &src_pg_ver, (uint64_t)0);
if (ret < 0) {
ldpp_dout(dpp, 0) << "ERROR: failed to decode pg ver attr, ignoring" << dendl;
} else {
ret = decode_attr(attrs, RGW_ATTR_SOURCE_ZONE, &src_zone_short_id, (uint32_t)0);
if (ret < 0) {
ldpp_dout(dpp, 0) << "ERROR: failed to decode source zone short_id attr, ignoring" << dendl;
src_pg_ver = 0; /* all or nothing */
}
}
ldpp_dout(dpp, 4) << "AWS: download begin: z=" << sc->source_zone
<< " b=" << src_bucket << " k=" << key << " size=" << size
<< " mtime=" << mtime << " etag=" << etag
<< " zone_short_id=" << src_zone_short_id << " pg_ver=" << src_pg_ver
<< dendl;
source_conn = sync_env->svc->zone->get_zone_conn(sc->source_zone);
if (!source_conn) {
ldpp_dout(dpp, 0) << "ERROR: cannot find http connection to zone " << sc->source_zone << dendl;
return set_cr_error(-EINVAL);
}
instance.get_profile(sync_pipe.info.source_bs.bucket, &target);
instance.conf.get_target(target, sync_pipe.dest_bucket_info, key, &dest_obj.bucket.name, &dest_obj.key.name);
if (bucket_created.find(dest_obj.bucket.name) == bucket_created.end()){
yield {
ldpp_dout(dpp, 0) << "AWS: creating bucket " << dest_obj.bucket.name << dendl;
bufferlist bl;
call(new RGWPutRawRESTResourceCR <bufferlist> (sc->cct, target->conn.get(),
sync_env->http_manager,
dest_obj.bucket.name, nullptr, bl, &out_bl));
}
if (retcode < 0 ) {
RGWXMLDecoder::XMLParser parser;
if (!parser.init()) {
ldpp_dout(dpp, 0) << "ERROR: failed to initialize xml parser for parsing multipart init response from server" << dendl;
return set_cr_error(retcode);
}
if (!parser.parse(out_bl.c_str(), out_bl.length(), 1)) {
string str(out_bl.c_str(), out_bl.length());
ldpp_dout(dpp, 5) << "ERROR: failed to parse xml: " << str << dendl;
return set_cr_error(retcode);
}
try {
RGWXMLDecoder::decode_xml("Error", result, &parser, true);
} catch (RGWXMLDecoder::err& err) {
string str(out_bl.c_str(), out_bl.length());
ldpp_dout(dpp, 5) << "ERROR: unexpected xml: " << str << dendl;
return set_cr_error(retcode);
}
if (result.code != "BucketAlreadyOwnedByYou") {
return set_cr_error(retcode);
}
}
bucket_created[dest_obj.bucket.name] = true;
}
yield {
src_obj.bucket = src_bucket;
src_obj.key = key;
/* init output */
rgw_sync_aws_src_obj_properties src_properties;
src_properties.mtime = mtime;
src_properties.etag = etag;
src_properties.zone_short_id = src_zone_short_id;
src_properties.pg_ver = src_pg_ver;
src_properties.versioned_epoch = versioned_epoch;
if (size < instance.conf.s3.multipart_sync_threshold) {
call(new RGWAWSStreamObjToCloudPlainCR(sc, source_conn, src_obj,
src_properties,
target,
dest_obj));
} else {
rgw_rest_obj rest_obj;
rest_obj.init(key);
if (do_decode_rest_obj(dpp, sc->cct, attrs, headers, &rest_obj)) {
ldpp_dout(dpp, 0) << "ERROR: failed to decode rest obj out of headers=" << headers << ", attrs=" << attrs << dendl;
return set_cr_error(-EINVAL);
}
call(new RGWAWSStreamObjToCloudMultipartCR(sc, sync_pipe, instance.conf, source_conn, src_obj,
target, dest_obj, size, src_properties, rest_obj));
}
}
if (retcode < 0) {
return set_cr_error(retcode);
}
return set_cr_done();
}
return 0;
}
};
class RGWAWSHandleRemoteObjCR : public RGWCallStatRemoteObjCR {
rgw_bucket_sync_pipe sync_pipe;
AWSSyncInstanceEnv& instance;
uint64_t versioned_epoch;
public:
RGWAWSHandleRemoteObjCR(RGWDataSyncCtx *_sc,
rgw_bucket_sync_pipe& _sync_pipe, rgw_obj_key& _key,
AWSSyncInstanceEnv& _instance, uint64_t _versioned_epoch) : RGWCallStatRemoteObjCR(_sc, _sync_pipe.info.source_bs.bucket, _key),
sync_pipe(_sync_pipe),
instance(_instance), versioned_epoch(_versioned_epoch) {
}
~RGWAWSHandleRemoteObjCR() {}
RGWStatRemoteObjCBCR *allocate_callback() override {
return new RGWAWSHandleRemoteObjCBCR(sc, sync_pipe, key, instance, versioned_epoch);
}
};
class RGWAWSRemoveRemoteObjCBCR : public RGWCoroutine {
RGWDataSyncCtx *sc;
std::shared_ptr<AWSSyncConfig_Profile> target;
rgw_bucket_sync_pipe sync_pipe;
rgw_obj_key key;
ceph::real_time mtime;
AWSSyncInstanceEnv& instance;
int ret{0};
public:
RGWAWSRemoveRemoteObjCBCR(RGWDataSyncCtx *_sc,
rgw_bucket_sync_pipe& _sync_pipe, rgw_obj_key& _key, const ceph::real_time& _mtime,
AWSSyncInstanceEnv& _instance) : RGWCoroutine(_sc->cct), sc(_sc),
sync_pipe(_sync_pipe), key(_key),
mtime(_mtime), instance(_instance) {}
int operate(const DoutPrefixProvider *dpp) override {
reenter(this) {
ldpp_dout(dpp, 0) << ": remove remote obj: z=" << sc->source_zone
<< " b=" <<sync_pipe.info.source_bs.bucket << " k=" << key << " mtime=" << mtime << dendl;
yield {
instance.get_profile(sync_pipe.info.source_bs.bucket, &target);
string path = instance.conf.get_path(target, sync_pipe.dest_bucket_info, key);
ldpp_dout(dpp, 0) << "AWS: removing aws object at" << path << dendl;
call(new RGWDeleteRESTResourceCR(sc->cct, target->conn.get(),
sc->env->http_manager,
path, nullptr /* params */));
}
if (retcode < 0) {
return set_cr_error(retcode);
}
return set_cr_done();
}
return 0;
}
};
class RGWAWSDataSyncModule: public RGWDataSyncModule {
CephContext *cct;
AWSSyncInstanceEnv instance;
public:
RGWAWSDataSyncModule(CephContext *_cct, AWSSyncConfig& _conf) :
cct(_cct),
instance(_conf) {
}
void init(RGWDataSyncCtx *sc, uint64_t instance_id) override {
instance.init(sc, instance_id);
}
~RGWAWSDataSyncModule() {}
RGWCoroutine *sync_object(const DoutPrefixProvider *dpp, RGWDataSyncCtx *sc, rgw_bucket_sync_pipe& sync_pipe, rgw_obj_key& key,
std::optional<uint64_t> versioned_epoch,
const rgw_zone_set_entry& source_trace_entry,
rgw_zone_set *zones_trace) override {
ldout(sc->cct, 0) << instance.id << ": sync_object: b=" << sync_pipe.info.source_bs.bucket << " k=" << key << " versioned_epoch=" << versioned_epoch.value_or(0) << dendl;
return new RGWAWSHandleRemoteObjCR(sc, sync_pipe, key, instance, versioned_epoch.value_or(0));
}
RGWCoroutine *remove_object(const DoutPrefixProvider *dpp, RGWDataSyncCtx *sc, rgw_bucket_sync_pipe& sync_pipe, rgw_obj_key& key, real_time& mtime, bool versioned, uint64_t versioned_epoch,
rgw_zone_set *zones_trace) override {
ldout(sc->cct, 0) <<"rm_object: b=" << sync_pipe.info.source_bs.bucket << " k=" << key << " mtime=" << mtime << " versioned=" << versioned << " versioned_epoch=" << versioned_epoch << dendl;
return new RGWAWSRemoveRemoteObjCBCR(sc, sync_pipe, key, mtime, instance);
}
RGWCoroutine *create_delete_marker(const DoutPrefixProvider *dpp, RGWDataSyncCtx *sc, rgw_bucket_sync_pipe& sync_pipe, rgw_obj_key& key, real_time& mtime,
rgw_bucket_entry_owner& owner, bool versioned, uint64_t versioned_epoch,
rgw_zone_set *zones_trace) override {
ldout(sc->cct, 0) <<"AWS Not implemented: create_delete_marker: b=" << sync_pipe.info.source_bs.bucket << " k=" << key << " mtime=" << mtime
<< " versioned=" << versioned << " versioned_epoch=" << versioned_epoch << dendl;
return NULL;
}
};
class RGWAWSSyncModuleInstance : public RGWSyncModuleInstance {
RGWAWSDataSyncModule data_handler;
public:
RGWAWSSyncModuleInstance(CephContext *cct, AWSSyncConfig& _conf) : data_handler(cct, _conf) {}
RGWDataSyncModule *get_data_handler() override {
return &data_handler;
}
};
int RGWAWSSyncModule::create_instance(const DoutPrefixProvider *dpp, CephContext *cct, const JSONFormattable& config, RGWSyncModuleInstanceRef *instance){
AWSSyncConfig conf;
int r = conf.init(dpp, cct, config);
if (r < 0) {
return r;
}
instance->reset(new RGWAWSSyncModuleInstance(cct, conf));
return 0;
}
| 62,642 | 33.34375 | 231 |
cc
|
null |
ceph-main/src/rgw/driver/rados/rgw_sync_module_aws.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab ft=cpp
#pragma once
#include "rgw_sync_module.h"
struct rgw_sync_aws_multipart_part_info {
int part_num{0};
uint64_t ofs{0};
uint64_t size{0};
std::string etag;
void encode(bufferlist& bl) const {
ENCODE_START(1, 1, bl);
encode(part_num, bl);
encode(ofs, bl);
encode(size, bl);
encode(etag, bl);
ENCODE_FINISH(bl);
}
void decode(bufferlist::const_iterator& bl) {
DECODE_START(1, bl);
decode(part_num, bl);
decode(ofs, bl);
decode(size, bl);
decode(etag, bl);
DECODE_FINISH(bl);
}
};
WRITE_CLASS_ENCODER(rgw_sync_aws_multipart_part_info)
struct rgw_sync_aws_src_obj_properties {
ceph::real_time mtime;
std::string etag;
uint32_t zone_short_id{0};
uint64_t pg_ver{0};
uint64_t versioned_epoch{0};
void encode(bufferlist& bl) const {
ENCODE_START(1, 1, bl);
encode(mtime, bl);
encode(etag, bl);
encode(zone_short_id, bl);
encode(pg_ver, bl);
encode(versioned_epoch, bl);
ENCODE_FINISH(bl);
}
void decode(bufferlist::const_iterator& bl) {
DECODE_START(1, bl);
decode(mtime, bl);
decode(etag, bl);
decode(zone_short_id, bl);
decode(pg_ver, bl);
decode(versioned_epoch, bl);
DECODE_FINISH(bl);
}
};
WRITE_CLASS_ENCODER(rgw_sync_aws_src_obj_properties)
struct rgw_sync_aws_multipart_upload_info {
std::string upload_id;
uint64_t obj_size;
rgw_sync_aws_src_obj_properties src_properties;
uint32_t part_size{0};
uint32_t num_parts{0};
int cur_part{0};
uint64_t cur_ofs{0};
std::map<int, rgw_sync_aws_multipart_part_info> parts;
void encode(bufferlist& bl) const {
ENCODE_START(1, 1, bl);
encode(upload_id, bl);
encode(obj_size, bl);
encode(src_properties, bl);
encode(part_size, bl);
encode(num_parts, bl);
encode(cur_part, bl);
encode(cur_ofs, bl);
encode(parts, bl);
ENCODE_FINISH(bl);
}
void decode(bufferlist::const_iterator& bl) {
DECODE_START(1, bl);
decode(upload_id, bl);
decode(obj_size, bl);
decode(src_properties, bl);
decode(part_size, bl);
decode(num_parts, bl);
decode(cur_part, bl);
decode(cur_ofs, bl);
decode(parts, bl);
DECODE_FINISH(bl);
}
};
WRITE_CLASS_ENCODER(rgw_sync_aws_multipart_upload_info)
class RGWAWSSyncModule : public RGWSyncModule {
public:
RGWAWSSyncModule() {}
bool supports_data_export() override { return false;}
int create_instance(const DoutPrefixProvider *dpp, CephContext *cct, const JSONFormattable& config, RGWSyncModuleInstanceRef *instance) override;
};
| 2,651 | 23.330275 | 147 |
h
|
null |
ceph-main/src/rgw/driver/rados/rgw_sync_module_es.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab ft=cpp
#include "rgw_b64.h"
#include "rgw_common.h"
#include "rgw_coroutine.h"
#include "rgw_sync_module.h"
#include "rgw_data_sync.h"
#include "rgw_sync_module_es.h"
#include "rgw_sync_module_es_rest.h"
#include "rgw_rest_conn.h"
#include "rgw_cr_rest.h"
#include "rgw_op.h"
#include "rgw_es_query.h"
#include "rgw_zone.h"
#include "services/svc_zone.h"
#include "include/str_list.h"
#include <boost/asio/yield.hpp>
#define dout_subsys ceph_subsys_rgw
using namespace std;
/*
* allowlist utility. Config string is a list of entries, where an entry is either an item,
* a prefix, or a suffix. An item would be the name of the entity that we'd look up,
* a prefix would be a string ending with an asterisk, a suffix would be a string starting
* with an asterisk. For example:
*
* bucket1, bucket2, foo*, *bar
*/
class ItemList {
bool approve_all{false};
set<string> entries;
set<string> prefixes;
set<string> suffixes;
void parse(const string& str) {
list<string> l;
get_str_list(str, ",", l);
for (auto& entry : l) {
entry = rgw_trim_whitespace(entry);
if (entry.empty()) {
continue;
}
if (entry == "*") {
approve_all = true;
return;
}
if (entry[0] == '*') {
suffixes.insert(entry.substr(1));
continue;
}
if (entry.back() == '*') {
prefixes.insert(entry.substr(0, entry.size() - 1));
continue;
}
entries.insert(entry);
}
}
public:
ItemList() {}
void init(const string& str, bool def_val) {
if (str.empty()) {
approve_all = def_val;
} else {
parse(str);
}
}
bool exists(const string& entry) {
if (approve_all) {
return true;
}
if (entries.find(entry) != entries.end()) {
return true;
}
auto i = prefixes.upper_bound(entry);
if (i != prefixes.begin()) {
--i;
if (boost::algorithm::starts_with(entry, *i)) {
return true;
}
}
for (i = suffixes.begin(); i != suffixes.end(); ++i) {
if (boost::algorithm::ends_with(entry, *i)) {
return true;
}
}
return false;
}
};
#define ES_NUM_SHARDS_MIN 5
#define ES_NUM_SHARDS_DEFAULT 16
#define ES_NUM_REPLICAS_DEFAULT 1
using ESVersion = std::pair<int,int>;
static constexpr ESVersion ES_V5{5,0};
static constexpr ESVersion ES_V7{7,0};
struct ESInfo {
std::string name;
std::string cluster_name;
std::string cluster_uuid;
ESVersion version;
void decode_json(JSONObj *obj);
std::string get_version_str(){
return std::to_string(version.first) + "." + std::to_string(version.second);
}
};
// simple wrapper structure to wrap the es version nested type
struct es_version_decoder {
ESVersion version;
int parse_version(const std::string& s) {
int major, minor;
int ret = sscanf(s.c_str(), "%d.%d", &major, &minor);
if (ret < 0) {
return ret;
}
version = std::make_pair(major,minor);
return 0;
}
void decode_json(JSONObj *obj) {
std::string s;
JSONDecoder::decode_json("number",s,obj);
if (parse_version(s) < 0)
throw JSONDecoder::err("Failed to parse ElasticVersion");
}
};
void ESInfo::decode_json(JSONObj *obj)
{
JSONDecoder::decode_json("name", name, obj);
JSONDecoder::decode_json("cluster_name", cluster_name, obj);
JSONDecoder::decode_json("cluster_uuid", cluster_uuid, obj);
es_version_decoder esv;
JSONDecoder::decode_json("version", esv, obj);
version = std::move(esv.version);
}
struct ElasticConfig {
uint64_t sync_instance{0};
string id;
string index_path;
std::unique_ptr<RGWRESTConn> conn;
bool explicit_custom_meta{true};
string override_index_path;
ItemList index_buckets;
ItemList allow_owners;
uint32_t num_shards{0};
uint32_t num_replicas{0};
std::map <string,string> default_headers = {{ "Content-Type", "application/json" }};
ESInfo es_info;
void init(CephContext *cct, const JSONFormattable& config) {
string elastic_endpoint = config["endpoint"];
id = string("elastic:") + elastic_endpoint;
conn.reset(new RGWRESTConn(cct, (rgw::sal::Driver*)nullptr, id, { elastic_endpoint }, nullopt /* region */ ));
explicit_custom_meta = config["explicit_custom_meta"](true);
index_buckets.init(config["index_buckets_list"], true); /* approve all buckets by default */
allow_owners.init(config["approved_owners_list"], true); /* approve all bucket owners by default */
override_index_path = config["override_index_path"];
num_shards = config["num_shards"](ES_NUM_SHARDS_DEFAULT);
if (num_shards < ES_NUM_SHARDS_MIN) {
num_shards = ES_NUM_SHARDS_MIN;
}
num_replicas = config["num_replicas"](ES_NUM_REPLICAS_DEFAULT);
if (string user = config["username"], pw = config["password"];
!user.empty() && !pw.empty()) {
auto auth_string = user + ":" + pw;
default_headers.emplace("AUTHORIZATION", "Basic " + rgw::to_base64(auth_string));
}
}
void init_instance(const RGWRealm& realm, uint64_t instance_id) {
sync_instance = instance_id;
if (!override_index_path.empty()) {
index_path = override_index_path;
return;
}
char buf[32];
snprintf(buf, sizeof(buf), "-%08x", (uint32_t)(sync_instance & 0xFFFFFFFF));
index_path = "/rgw-" + realm.get_name() + buf;
}
string get_index_path() {
return index_path;
}
map<string, string>& get_request_headers() {
return default_headers;
}
string get_obj_path(const RGWBucketInfo& bucket_info, const rgw_obj_key& key) {
if (es_info.version >= ES_V7) {
return index_path+ "/_doc/" + url_encode(bucket_info.bucket.bucket_id + ":" + key.name + ":" + (key.instance.empty() ? "null" : key.instance));
;
} else {
return index_path + "/object/" + url_encode(bucket_info.bucket.bucket_id + ":" + key.name + ":" + (key.instance.empty() ? "null" : key.instance));
}
}
bool should_handle_operation(RGWBucketInfo& bucket_info) {
return index_buckets.exists(bucket_info.bucket.name) &&
allow_owners.exists(bucket_info.owner.to_str());
}
};
using ElasticConfigRef = std::shared_ptr<ElasticConfig>;
static const char *es_type_to_str(const ESType& t) {
switch (t) {
case ESType::String: return "string";
case ESType::Text: return "text";
case ESType::Keyword: return "keyword";
case ESType::Long: return "long";
case ESType::Integer: return "integer";
case ESType::Short: return "short";
case ESType::Byte: return "byte";
case ESType::Double: return "double";
case ESType::Float: return "float";
case ESType::Half_Float: return "half_float";
case ESType::Scaled_Float: return "scaled_float";
case ESType::Date: return "date";
case ESType::Boolean: return "boolean";
case ESType::Integer_Range: return "integer_range";
case ESType::Float_Range: return "float_range";
case ESType::Double_Range: return "date_range";
case ESType::Date_Range: return "date_range";
case ESType::Geo_Point: return "geo_point";
case ESType::Ip: return "ip";
default:
return "<unknown>";
}
}
struct es_type_v2 {
ESType estype;
const char *format{nullptr};
std::optional<bool> analyzed;
es_type_v2(ESType et) : estype(et) {}
void dump(Formatter *f) const {
const char *type_str = es_type_to_str(estype);
encode_json("type", type_str, f);
if (format) {
encode_json("format", format, f);
}
auto is_analyzed = analyzed;
if (estype == ESType::String &&
!is_analyzed) {
is_analyzed = false;
}
if (is_analyzed) {
encode_json("index", (is_analyzed.value() ? "analyzed" : "not_analyzed"), f);
}
}
};
struct es_type_v5 {
ESType estype;
const char *format{nullptr};
std::optional<bool> analyzed;
std::optional<bool> index;
es_type_v5(ESType et) : estype(et) {}
void dump(Formatter *f) const {
ESType new_estype;
if (estype != ESType::String) {
new_estype = estype;
} else {
bool is_analyzed = analyzed.value_or(false);
new_estype = (is_analyzed ? ESType::Text : ESType::Keyword);
/* index = true; ... Not setting index=true, because that's the default,
* and dumping a boolean value *might* be a problem when backporting this
* because value might get quoted
*/
}
const char *type_str = es_type_to_str(new_estype);
encode_json("type", type_str, f);
if (format) {
encode_json("format", format, f);
}
if (index) {
encode_json("index", index.value(), f);
}
}
};
template <class T>
struct es_type : public T {
es_type(T t) : T(t) {}
es_type& set_format(const char *f) {
T::format = f;
return *this;
}
es_type& set_analyzed(bool a) {
T::analyzed = a;
return *this;
}
};
template <class T>
struct es_index_mappings {
ESVersion es_version;
ESType string_type {ESType::String};
es_index_mappings(ESVersion esv):es_version(esv) {
}
es_type<T> est(ESType t) const {
return es_type<T>(t);
}
void dump_custom(const char *section, ESType type, const char *format, Formatter *f) const {
f->open_object_section(section);
::encode_json("type", "nested", f);
f->open_object_section("properties");
encode_json("name", est(string_type), f);
encode_json("value", est(type).set_format(format), f);
f->close_section(); // entry
f->close_section(); // custom-string
}
void dump(Formatter *f) const {
if (es_version <= ES_V7)
f->open_object_section("object");
f->open_object_section("properties");
encode_json("bucket", est(string_type), f);
encode_json("name", est(string_type), f);
encode_json("instance", est(string_type), f);
encode_json("versioned_epoch", est(ESType::Long), f);
f->open_object_section("meta");
f->open_object_section("properties");
encode_json("cache_control", est(string_type), f);
encode_json("content_disposition", est(string_type), f);
encode_json("content_encoding", est(string_type), f);
encode_json("content_language", est(string_type), f);
encode_json("content_type", est(string_type), f);
encode_json("storage_class", est(string_type), f);
encode_json("etag", est(string_type), f);
encode_json("expires", est(string_type), f);
encode_json("mtime", est(ESType::Date)
.set_format("strict_date_optional_time||epoch_millis"), f);
encode_json("size", est(ESType::Long), f);
dump_custom("custom-string", string_type, nullptr, f);
dump_custom("custom-int", ESType::Long, nullptr, f);
dump_custom("custom-date", ESType::Date, "strict_date_optional_time||epoch_millis", f);
f->close_section(); // properties
f->close_section(); // meta
f->close_section(); // properties
if (es_version <= ES_V7)
f->close_section(); // object
}
};
struct es_index_settings {
uint32_t num_replicas;
uint32_t num_shards;
es_index_settings(uint32_t _replicas, uint32_t _shards) : num_replicas(_replicas), num_shards(_shards) {}
void dump(Formatter *f) const {
encode_json("number_of_replicas", num_replicas, f);
encode_json("number_of_shards", num_shards, f);
}
};
struct es_index_config_base {
virtual ~es_index_config_base() {}
virtual void dump(Formatter *f) const = 0;
};
template <class T>
struct es_index_config : public es_index_config_base {
es_index_settings settings;
es_index_mappings<T> mappings;
es_index_config(es_index_settings& _s, ESVersion esv) : settings(_s), mappings(esv) {
}
void dump(Formatter *f) const {
encode_json("settings", settings, f);
encode_json("mappings", mappings, f);
}
};
static bool is_sys_attr(const std::string& attr_name){
static constexpr std::initializer_list<const char*> rgw_sys_attrs =
{RGW_ATTR_PG_VER,
RGW_ATTR_SOURCE_ZONE,
RGW_ATTR_ID_TAG,
RGW_ATTR_TEMPURL_KEY1,
RGW_ATTR_TEMPURL_KEY2,
RGW_ATTR_UNIX1,
RGW_ATTR_UNIX_KEY1
};
return std::find(rgw_sys_attrs.begin(), rgw_sys_attrs.end(), attr_name) != rgw_sys_attrs.end();
}
static size_t attr_len(const bufferlist& val)
{
size_t len = val.length();
if (len && val[len - 1] == '\0') {
--len;
}
return len;
}
struct es_obj_metadata {
const DoutPrefixProvider *dpp;
CephContext *cct;
ElasticConfigRef es_conf;
RGWBucketInfo bucket_info;
rgw_obj_key key;
ceph::real_time mtime;
uint64_t size;
map<string, bufferlist> attrs;
uint64_t versioned_epoch;
es_obj_metadata(CephContext *_cct, ElasticConfigRef _es_conf, const RGWBucketInfo& _bucket_info,
const rgw_obj_key& _key, ceph::real_time& _mtime, uint64_t _size,
map<string, bufferlist>& _attrs, uint64_t _versioned_epoch) : cct(_cct), es_conf(_es_conf), bucket_info(_bucket_info), key(_key),
mtime(_mtime), size(_size), attrs(std::move(_attrs)), versioned_epoch(_versioned_epoch) {}
void dump(Formatter *f) const {
map<string, string> out_attrs;
map<string, string> custom_meta;
RGWAccessControlPolicy policy;
set<string> permissions;
RGWObjTags obj_tags;
for (auto i : attrs) {
const string& attr_name = i.first;
bufferlist& val = i.second;
if (!boost::algorithm::starts_with(attr_name, RGW_ATTR_PREFIX)) {
continue;
}
if (boost::algorithm::starts_with(attr_name, RGW_ATTR_META_PREFIX)) {
custom_meta.emplace(attr_name.substr(sizeof(RGW_ATTR_META_PREFIX) - 1),
string(val.c_str(), attr_len(val)));
continue;
}
if (boost::algorithm::starts_with(attr_name, RGW_ATTR_CRYPT_PREFIX)) {
continue;
}
if (boost::algorithm::starts_with(attr_name, RGW_ATTR_OLH_PREFIX)) {
// skip versioned object olh info
continue;
}
if (attr_name == RGW_ATTR_ACL) {
try {
auto i = val.cbegin();
decode(policy, i);
} catch (buffer::error& err) {
ldpp_dout(dpp, 0) << "ERROR: failed to decode acl for " << bucket_info.bucket << "/" << key << dendl;
continue;
}
const RGWAccessControlList& acl = policy.get_acl();
permissions.insert(policy.get_owner().get_id().to_str());
for (auto acliter : acl.get_grant_map()) {
const ACLGrant& grant = acliter.second;
if (grant.get_type().get_type() == ACL_TYPE_CANON_USER &&
((uint32_t)grant.get_permission().get_permissions() & RGW_PERM_READ) != 0) {
rgw_user user;
if (grant.get_id(user)) {
permissions.insert(user.to_str());
}
}
}
} else if (attr_name == RGW_ATTR_TAGS) {
try {
auto tags_bl = val.cbegin();
decode(obj_tags, tags_bl);
} catch (buffer::error& err) {
ldpp_dout(dpp, 0) << "ERROR: failed to decode obj tags for "
<< bucket_info.bucket << "/" << key << dendl;
continue;
}
} else if (attr_name == RGW_ATTR_COMPRESSION) {
RGWCompressionInfo cs_info;
try {
auto vals_bl = val.cbegin();
decode(cs_info, vals_bl);
} catch (buffer::error& err) {
ldpp_dout(dpp, 0) << "ERROR: failed to decode compression attr for "
<< bucket_info.bucket << "/" << key << dendl;
continue;
}
out_attrs.emplace("compression",std::move(cs_info.compression_type));
} else {
if (!is_sys_attr(attr_name)) {
out_attrs.emplace(attr_name.substr(sizeof(RGW_ATTR_PREFIX) - 1),
std::string(val.c_str(), attr_len(val)));
}
}
}
::encode_json("bucket", bucket_info.bucket.name, f);
::encode_json("name", key.name, f);
string instance = key.instance;
if (instance.empty())
instance = "null";
::encode_json("instance", instance, f);
::encode_json("versioned_epoch", versioned_epoch, f);
::encode_json("owner", policy.get_owner(), f);
::encode_json("permissions", permissions, f);
f->open_object_section("meta");
::encode_json("size", size, f);
string mtime_str;
rgw_to_iso8601(mtime, &mtime_str);
::encode_json("mtime", mtime_str, f);
for (auto i : out_attrs) {
::encode_json(i.first.c_str(), i.second, f);
}
map<string, string> custom_str;
map<string, string> custom_int;
map<string, string> custom_date;
for (auto i : custom_meta) {
auto config = bucket_info.mdsearch_config.find(i.first);
if (config == bucket_info.mdsearch_config.end()) {
if (!es_conf->explicit_custom_meta) {
/* default custom meta is of type string */
custom_str[i.first] = i.second;
} else {
ldpp_dout(dpp, 20) << "custom meta entry key=" << i.first << " not found in bucket mdsearch config: " << bucket_info.mdsearch_config << dendl;
}
continue;
}
switch (config->second) {
case ESEntityTypeMap::ES_ENTITY_DATE:
custom_date[i.first] = i.second;
break;
case ESEntityTypeMap::ES_ENTITY_INT:
custom_int[i.first] = i.second;
break;
default:
custom_str[i.first] = i.second;
}
}
if (!custom_str.empty()) {
f->open_array_section("custom-string");
for (auto i : custom_str) {
f->open_object_section("entity");
::encode_json("name", i.first.c_str(), f);
::encode_json("value", i.second, f);
f->close_section();
}
f->close_section();
}
if (!custom_int.empty()) {
f->open_array_section("custom-int");
for (auto i : custom_int) {
f->open_object_section("entity");
::encode_json("name", i.first.c_str(), f);
::encode_json("value", i.second, f);
f->close_section();
}
f->close_section();
}
if (!custom_date.empty()) {
f->open_array_section("custom-date");
for (auto i : custom_date) {
/*
* try to exlicitly parse date field, otherwise elasticsearch could reject the whole doc,
* which will end up with failed sync
*/
real_time t;
int r = parse_time(i.second.c_str(), &t);
if (r < 0) {
ldpp_dout(dpp, 20) << __func__ << "(): failed to parse time (" << i.second << "), skipping encoding of custom date attribute" << dendl;
continue;
}
string time_str;
rgw_to_iso8601(t, &time_str);
f->open_object_section("entity");
::encode_json("name", i.first.c_str(), f);
::encode_json("value", time_str.c_str(), f);
f->close_section();
}
f->close_section();
}
f->close_section(); // meta
const auto& m = obj_tags.get_tags();
if (m.size() > 0){
f->open_array_section("tagging");
for (const auto &it : m) {
f->open_object_section("tag");
::encode_json("key", it.first, f);
::encode_json("value",it.second, f);
f->close_section();
}
f->close_section(); // tagging
}
}
};
class RGWElasticGetESInfoCBCR : public RGWCoroutine {
public:
RGWElasticGetESInfoCBCR(RGWDataSyncCtx *_sc,
ElasticConfigRef _conf) : RGWCoroutine(_sc->cct),
sc(_sc), sync_env(_sc->env),
conf(_conf) {}
int operate(const DoutPrefixProvider *dpp) override {
reenter(this) {
ldpp_dout(dpp, 5) << conf->id << ": get elasticsearch info for zone: " << sc->source_zone << dendl;
yield call(new RGWReadRESTResourceCR<ESInfo> (sync_env->cct,
conf->conn.get(),
sync_env->http_manager,
"/", nullptr /*params*/,
&(conf->default_headers),
&(conf->es_info)));
if (retcode < 0) {
ldpp_dout(dpp, 5) << conf->id << ": get elasticsearch failed: " << retcode << dendl;
return set_cr_error(retcode);
}
ldpp_dout(dpp, 5) << conf->id << ": got elastic version=" << conf->es_info.get_version_str() << dendl;
return set_cr_done();
}
return 0;
}
private:
RGWDataSyncCtx *sc;
RGWDataSyncEnv *sync_env;
ElasticConfigRef conf;
};
class RGWElasticPutIndexCBCR : public RGWCoroutine {
public:
RGWElasticPutIndexCBCR(RGWDataSyncCtx *_sc,
ElasticConfigRef _conf) : RGWCoroutine(_sc->cct),
sc(_sc), sync_env(_sc->env),
conf(_conf) {}
int operate(const DoutPrefixProvider *dpp) override {
reenter(this) {
ldpp_dout(dpp, 5) << conf->id << ": put elasticsearch index for zone: " << sc->source_zone << dendl;
yield {
string path = conf->get_index_path();
es_index_settings settings(conf->num_replicas, conf->num_shards);
std::unique_ptr<es_index_config_base> index_conf;
if (conf->es_info.version >= ES_V5) {
ldpp_dout(dpp, 0) << "elasticsearch: index mapping: version >= 5" << dendl;
index_conf.reset(new es_index_config<es_type_v5>(settings, conf->es_info.version));
} else {
ldpp_dout(dpp, 0) << "elasticsearch: index mapping: version < 5" << dendl;
index_conf.reset(new es_index_config<es_type_v2>(settings, conf->es_info.version));
}
call(new RGWPutRESTResourceCR<es_index_config_base, int, _err_response> (sc->cct,
conf->conn.get(),
sync_env->http_manager,
path, nullptr /*params*/,
&(conf->default_headers),
*index_conf, nullptr, &err_response));
}
if (retcode < 0) {
if (err_response.error.type != "index_already_exists_exception" &&
err_response.error.type != "resource_already_exists_exception") {
ldpp_dout(dpp, 0) << "elasticsearch: failed to initialize index: response.type=" << err_response.error.type << " response.reason=" << err_response.error.reason << dendl;
return set_cr_error(retcode);
}
ldpp_dout(dpp, 0) << "elasticsearch: index already exists, assuming external initialization" << dendl;
}
return set_cr_done();
}
return 0;
}
private:
RGWDataSyncCtx *sc;
RGWDataSyncEnv *sync_env;
ElasticConfigRef conf;
struct _err_response {
struct err_reason {
vector<err_reason> root_cause;
string type;
string reason;
string index;
void decode_json(JSONObj *obj) {
JSONDecoder::decode_json("root_cause", root_cause, obj);
JSONDecoder::decode_json("type", type, obj);
JSONDecoder::decode_json("reason", reason, obj);
JSONDecoder::decode_json("index", index, obj);
}
} error;
void decode_json(JSONObj *obj) {
JSONDecoder::decode_json("error", error, obj);
}
} err_response;
};
class RGWElasticInitConfigCBCR : public RGWCoroutine {
RGWDataSyncCtx *sc;
RGWDataSyncEnv *sync_env;
ElasticConfigRef conf;
public:
RGWElasticInitConfigCBCR(RGWDataSyncCtx *_sc,
ElasticConfigRef _conf) : RGWCoroutine(_sc->cct),
sc(_sc), sync_env(_sc->env),
conf(_conf) {}
int operate(const DoutPrefixProvider *dpp) override {
reenter(this) {
yield call(new RGWElasticGetESInfoCBCR(sc, conf));
if (retcode < 0) {
return set_cr_error(retcode);
}
yield call(new RGWElasticPutIndexCBCR(sc, conf));
if (retcode < 0) {
return set_cr_error(retcode);
}
return set_cr_done();
}
return 0;
}
};
class RGWElasticHandleRemoteObjCBCR : public RGWStatRemoteObjCBCR {
rgw_bucket_sync_pipe sync_pipe;
ElasticConfigRef conf;
uint64_t versioned_epoch;
public:
RGWElasticHandleRemoteObjCBCR(RGWDataSyncCtx *_sc,
rgw_bucket_sync_pipe& _sync_pipe, rgw_obj_key& _key,
ElasticConfigRef _conf, uint64_t _versioned_epoch) : RGWStatRemoteObjCBCR(_sc, _sync_pipe.info.source_bs.bucket, _key),
sync_pipe(_sync_pipe), conf(_conf),
versioned_epoch(_versioned_epoch) {}
int operate(const DoutPrefixProvider *dpp) override {
reenter(this) {
ldpp_dout(dpp, 10) << ": stat of remote obj: z=" << sc->source_zone
<< " b=" << sync_pipe.info.source_bs.bucket << " k=" << key
<< " size=" << size << " mtime=" << mtime << dendl;
yield {
string path = conf->get_obj_path(sync_pipe.dest_bucket_info, key);
es_obj_metadata doc(sync_env->cct, conf, sync_pipe.dest_bucket_info, key, mtime, size, attrs, versioned_epoch);
call(new RGWPutRESTResourceCR<es_obj_metadata, int>(sync_env->cct, conf->conn.get(),
sync_env->http_manager,
path, nullptr /* params */,
&(conf->default_headers),
doc, nullptr /* result */));
}
if (retcode < 0) {
return set_cr_error(retcode);
}
return set_cr_done();
}
return 0;
}
};
class RGWElasticHandleRemoteObjCR : public RGWCallStatRemoteObjCR {
rgw_bucket_sync_pipe sync_pipe;
ElasticConfigRef conf;
uint64_t versioned_epoch;
public:
RGWElasticHandleRemoteObjCR(RGWDataSyncCtx *_sc,
rgw_bucket_sync_pipe& _sync_pipe, rgw_obj_key& _key,
ElasticConfigRef _conf, uint64_t _versioned_epoch) : RGWCallStatRemoteObjCR(_sc, _sync_pipe.info.source_bs.bucket, _key),
sync_pipe(_sync_pipe),
conf(_conf), versioned_epoch(_versioned_epoch) {
}
~RGWElasticHandleRemoteObjCR() override {}
RGWStatRemoteObjCBCR *allocate_callback() override {
return new RGWElasticHandleRemoteObjCBCR(sc, sync_pipe, key, conf, versioned_epoch);
}
};
class RGWElasticRemoveRemoteObjCBCR : public RGWCoroutine {
RGWDataSyncCtx *sc;
RGWDataSyncEnv *sync_env;
rgw_bucket_sync_pipe sync_pipe;
rgw_obj_key key;
ceph::real_time mtime;
ElasticConfigRef conf;
public:
RGWElasticRemoveRemoteObjCBCR(RGWDataSyncCtx *_sc,
rgw_bucket_sync_pipe& _sync_pipe, rgw_obj_key& _key, const ceph::real_time& _mtime,
ElasticConfigRef _conf) : RGWCoroutine(_sc->cct), sc(_sc), sync_env(_sc->env),
sync_pipe(_sync_pipe), key(_key),
mtime(_mtime), conf(_conf) {}
int operate(const DoutPrefixProvider *dpp) override {
reenter(this) {
ldpp_dout(dpp, 10) << ": remove remote obj: z=" << sc->source_zone
<< " b=" << sync_pipe.info.source_bs.bucket << " k=" << key << " mtime=" << mtime << dendl;
yield {
string path = conf->get_obj_path(sync_pipe.dest_bucket_info, key);
call(new RGWDeleteRESTResourceCR(sync_env->cct, conf->conn.get(),
sync_env->http_manager,
path, nullptr /* params */));
}
if (retcode < 0) {
return set_cr_error(retcode);
}
return set_cr_done();
}
return 0;
}
};
class RGWElasticDataSyncModule : public RGWDataSyncModule {
ElasticConfigRef conf;
public:
RGWElasticDataSyncModule(const DoutPrefixProvider *dpp, CephContext *cct, const JSONFormattable& config) : conf(std::make_shared<ElasticConfig>()) {
conf->init(cct, config);
}
~RGWElasticDataSyncModule() override {}
void init(RGWDataSyncCtx *sc, uint64_t instance_id) override {
conf->init_instance(sc->env->svc->zone->get_realm(), instance_id);
}
RGWCoroutine *init_sync(const DoutPrefixProvider *dpp, RGWDataSyncCtx *sc) override {
ldpp_dout(dpp, 5) << conf->id << ": init" << dendl;
return new RGWElasticInitConfigCBCR(sc, conf);
}
RGWCoroutine *start_sync(const DoutPrefixProvider *dpp, RGWDataSyncCtx *sc) override {
ldpp_dout(dpp, 5) << conf->id << ": start_sync" << dendl;
// try to get elastic search version
return new RGWElasticGetESInfoCBCR(sc, conf);
}
RGWCoroutine *sync_object(const DoutPrefixProvider *dpp, RGWDataSyncCtx *sc, rgw_bucket_sync_pipe& sync_pipe, rgw_obj_key& key, std::optional<uint64_t> versioned_epoch, const rgw_zone_set_entry& source_trace_entry, rgw_zone_set *zones_trace) override {
ldpp_dout(dpp, 10) << conf->id << ": sync_object: b=" << sync_pipe.info.source_bs.bucket << " k=" << key << " versioned_epoch=" << versioned_epoch.value_or(0) << dendl;
if (!conf->should_handle_operation(sync_pipe.dest_bucket_info)) {
ldpp_dout(dpp, 10) << conf->id << ": skipping operation (bucket not approved)" << dendl;
return nullptr;
}
return new RGWElasticHandleRemoteObjCR(sc, sync_pipe, key, conf, versioned_epoch.value_or(0));
}
RGWCoroutine *remove_object(const DoutPrefixProvider *dpp, RGWDataSyncCtx *sc, rgw_bucket_sync_pipe& sync_pipe, rgw_obj_key& key, real_time& mtime, bool versioned, uint64_t versioned_epoch, rgw_zone_set *zones_trace) override {
/* versioned and versioned epoch params are useless in the elasticsearch backend case */
ldpp_dout(dpp, 10) << conf->id << ": rm_object: b=" << sync_pipe.info.source_bs.bucket << " k=" << key << " mtime=" << mtime << " versioned=" << versioned << " versioned_epoch=" << versioned_epoch << dendl;
if (!conf->should_handle_operation(sync_pipe.dest_bucket_info)) {
ldpp_dout(dpp, 10) << conf->id << ": skipping operation (bucket not approved)" << dendl;
return nullptr;
}
return new RGWElasticRemoveRemoteObjCBCR(sc, sync_pipe, key, mtime, conf);
}
RGWCoroutine *create_delete_marker(const DoutPrefixProvider *dpp, RGWDataSyncCtx *sc, rgw_bucket_sync_pipe& sync_pipe, rgw_obj_key& key, real_time& mtime,
rgw_bucket_entry_owner& owner, bool versioned, uint64_t versioned_epoch, rgw_zone_set *zones_trace) override {
ldpp_dout(dpp, 10) << conf->id << ": create_delete_marker: b=" << sync_pipe.info.source_bs.bucket << " k=" << key << " mtime=" << mtime
<< " versioned=" << versioned << " versioned_epoch=" << versioned_epoch << dendl;
ldpp_dout(dpp, 10) << conf->id << ": skipping operation (not handled)" << dendl;
return NULL;
}
RGWRESTConn *get_rest_conn() {
return conf->conn.get();
}
string get_index_path() {
return conf->get_index_path();
}
map<string, string>& get_request_headers() {
return conf->get_request_headers();
}
};
RGWElasticSyncModuleInstance::RGWElasticSyncModuleInstance(const DoutPrefixProvider *dpp, CephContext *cct, const JSONFormattable& config)
{
data_handler = std::unique_ptr<RGWElasticDataSyncModule>(new RGWElasticDataSyncModule(dpp, cct, config));
}
RGWDataSyncModule *RGWElasticSyncModuleInstance::get_data_handler()
{
return data_handler.get();
}
RGWRESTConn *RGWElasticSyncModuleInstance::get_rest_conn()
{
return data_handler->get_rest_conn();
}
string RGWElasticSyncModuleInstance::get_index_path() {
return data_handler->get_index_path();
}
map<string, string>& RGWElasticSyncModuleInstance::get_request_headers() {
return data_handler->get_request_headers();
}
RGWRESTMgr *RGWElasticSyncModuleInstance::get_rest_filter(int dialect, RGWRESTMgr *orig) {
if (dialect != RGW_REST_S3) {
return orig;
}
delete orig;
return new RGWRESTMgr_MDSearch_S3();
}
int RGWElasticSyncModule::create_instance(const DoutPrefixProvider *dpp, CephContext *cct, const JSONFormattable& config, RGWSyncModuleInstanceRef *instance) {
string endpoint = config["endpoint"];
instance->reset(new RGWElasticSyncModuleInstance(dpp, cct, config));
return 0;
}
| 33,146 | 33.420561 | 254 |
cc
|
null |
ceph-main/src/rgw/driver/rados/rgw_sync_module_es.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab ft=cpp
#pragma once
#include "rgw_sync_module.h"
enum class ESType {
/* string datatypes */
String, /* Deprecated Since 5.X+ */
Text,
Keyword,
/* Numeric Types */
Long, Integer, Short, Byte, Double, Float, Half_Float, Scaled_Float,
/* Date Type */
Date,
/* Boolean */
Boolean,
/* Binary; Must Be Base64 Encoded */
Binary,
/* Range Types */
Integer_Range, Float_Range, Long_Range, Double_Range, Date_Range,
/* A Few Specialized Types */
Geo_Point,
Ip
};
class RGWElasticSyncModule : public RGWSyncModule {
public:
RGWElasticSyncModule() {}
bool supports_data_export() override {
return false;
}
int create_instance(const DoutPrefixProvider *dpp, CephContext *cct, const JSONFormattable& config, RGWSyncModuleInstanceRef *instance) override;
};
class RGWElasticDataSyncModule;
class RGWRESTConn;
class RGWElasticSyncModuleInstance : public RGWSyncModuleInstance {
std::unique_ptr<RGWElasticDataSyncModule> data_handler;
public:
RGWElasticSyncModuleInstance(const DoutPrefixProvider *dpp, CephContext *cct, const JSONFormattable& config);
RGWDataSyncModule *get_data_handler() override;
RGWRESTMgr *get_rest_filter(int dialect, RGWRESTMgr *orig) override;
RGWRESTConn *get_rest_conn();
std::string get_index_path();
std::map<std::string, std::string>& get_request_headers();
bool supports_user_writes() override {
return true;
}
};
| 1,508 | 24.15 | 147 |
h
|
null |
ceph-main/src/rgw/driver/rados/rgw_sync_module_es_rest.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab ft=cpp
#include "rgw_sync_module_es.h"
#include "rgw_sync_module_es_rest.h"
#include "rgw_es_query.h"
#include "rgw_op.h"
#include "rgw_rest.h"
#include "rgw_rest_s3.h"
#include "rgw_sal_rados.h"
#define dout_context g_ceph_context
#define dout_subsys ceph_subsys_rgw
using namespace std;
struct es_index_obj_response {
string bucket;
rgw_obj_key key;
uint64_t versioned_epoch{0};
ACLOwner owner;
set<string> read_permissions;
struct {
uint64_t size{0};
ceph::real_time mtime;
string etag;
string content_type;
string storage_class;
map<string, string> custom_str;
map<string, int64_t> custom_int;
map<string, string> custom_date;
template <class T>
struct _custom_entry {
string name;
T value;
void decode_json(JSONObj *obj) {
JSONDecoder::decode_json("name", name, obj);
JSONDecoder::decode_json("value", value, obj);
}
};
void decode_json(JSONObj *obj) {
JSONDecoder::decode_json("size", size, obj);
string mtime_str;
JSONDecoder::decode_json("mtime", mtime_str, obj);
parse_time(mtime_str.c_str(), &mtime);
JSONDecoder::decode_json("etag", etag, obj);
JSONDecoder::decode_json("content_type", content_type, obj);
JSONDecoder::decode_json("storage_class", storage_class, obj);
list<_custom_entry<string> > str_entries;
JSONDecoder::decode_json("custom-string", str_entries, obj);
for (auto& e : str_entries) {
custom_str[e.name] = e.value;
}
list<_custom_entry<int64_t> > int_entries;
JSONDecoder::decode_json("custom-int", int_entries, obj);
for (auto& e : int_entries) {
custom_int[e.name] = e.value;
}
list<_custom_entry<string> > date_entries;
JSONDecoder::decode_json("custom-date", date_entries, obj);
for (auto& e : date_entries) {
custom_date[e.name] = e.value;
}
}
} meta;
void decode_json(JSONObj *obj) {
JSONDecoder::decode_json("bucket", bucket, obj);
JSONDecoder::decode_json("name", key.name, obj);
JSONDecoder::decode_json("instance", key.instance, obj);
JSONDecoder::decode_json("versioned_epoch", versioned_epoch, obj);
JSONDecoder::decode_json("permissions", read_permissions, obj);
JSONDecoder::decode_json("owner", owner, obj);
JSONDecoder::decode_json("meta", meta, obj);
}
};
struct es_search_response {
uint32_t took;
bool timed_out;
struct {
uint32_t total;
uint32_t successful;
uint32_t failed;
void decode_json(JSONObj *obj) {
JSONDecoder::decode_json("total", total, obj);
JSONDecoder::decode_json("successful", successful, obj);
JSONDecoder::decode_json("failed", failed, obj);
}
} shards;
struct obj_hit {
string index;
string type;
string id;
// double score
es_index_obj_response source;
void decode_json(JSONObj *obj) {
JSONDecoder::decode_json("_index", index, obj);
JSONDecoder::decode_json("_type", type, obj);
JSONDecoder::decode_json("_id", id, obj);
JSONDecoder::decode_json("_source", source, obj);
}
};
struct {
uint32_t total;
// double max_score;
list<obj_hit> hits;
void decode_json(JSONObj *obj) {
JSONDecoder::decode_json("total", total, obj);
// JSONDecoder::decode_json("max_score", max_score, obj);
JSONDecoder::decode_json("hits", hits, obj);
}
} hits;
void decode_json(JSONObj *obj) {
JSONDecoder::decode_json("took", took, obj);
JSONDecoder::decode_json("timed_out", timed_out, obj);
JSONDecoder::decode_json("_shards", shards, obj);
JSONDecoder::decode_json("hits", hits, obj);
}
};
class RGWMetadataSearchOp : public RGWOp {
RGWSyncModuleInstanceRef sync_module_ref;
RGWElasticSyncModuleInstance *es_module;
protected:
string expression;
string custom_prefix;
#define MAX_KEYS_DEFAULT 100
uint64_t max_keys{MAX_KEYS_DEFAULT};
string marker_str;
uint64_t marker{0};
string next_marker;
bool is_truncated{false};
string err;
es_search_response response;
public:
RGWMetadataSearchOp(const RGWSyncModuleInstanceRef& sync_module) : sync_module_ref(sync_module) {
es_module = static_cast<RGWElasticSyncModuleInstance *>(sync_module_ref.get());
}
int verify_permission(optional_yield) override {
return 0;
}
virtual int get_params() = 0;
void pre_exec() override;
void execute(optional_yield y) override;
const char* name() const override { return "metadata_search"; }
virtual RGWOpType get_type() override { return RGW_OP_METADATA_SEARCH; }
virtual uint32_t op_mask() override { return RGW_OP_TYPE_READ; }
};
void RGWMetadataSearchOp::pre_exec()
{
rgw_bucket_object_pre_exec(s);
}
void RGWMetadataSearchOp::execute(optional_yield y)
{
op_ret = get_params();
if (op_ret < 0)
return;
list<pair<string, string> > conds;
if (!s->user->get_info().system) {
conds.push_back(make_pair("permissions", s->user->get_id().to_str()));
}
if (!s->bucket_name.empty()) {
conds.push_back(make_pair("bucket", s->bucket_name));
}
ESQueryCompiler es_query(expression, &conds, custom_prefix);
static map<string, string, ltstr_nocase> aliases = {
{ "bucket", "bucket" }, /* forces lowercase */
{ "name", "name" },
{ "key", "name" },
{ "instance", "instance" },
{ "etag", "meta.etag" },
{ "size", "meta.size" },
{ "mtime", "meta.mtime" },
{ "lastmodified", "meta.mtime" },
{ "last_modified", "meta.mtime" },
{ "contenttype", "meta.content_type" },
{ "content_type", "meta.content_type" },
{ "storageclass", "meta.storage_class" },
{ "storage_class", "meta.storage_class" },
};
es_query.set_field_aliases(&aliases);
static map<string, ESEntityTypeMap::EntityType> generic_map = { {"bucket", ESEntityTypeMap::ES_ENTITY_STR},
{"name", ESEntityTypeMap::ES_ENTITY_STR},
{"instance", ESEntityTypeMap::ES_ENTITY_STR},
{"permissions", ESEntityTypeMap::ES_ENTITY_STR},
{"meta.etag", ESEntityTypeMap::ES_ENTITY_STR},
{"meta.content_type", ESEntityTypeMap::ES_ENTITY_STR},
{"meta.mtime", ESEntityTypeMap::ES_ENTITY_DATE},
{"meta.size", ESEntityTypeMap::ES_ENTITY_INT},
{"meta.storage_class", ESEntityTypeMap::ES_ENTITY_STR} };
ESEntityTypeMap gm(generic_map);
es_query.set_generic_type_map(&gm);
static set<string> restricted_fields = { {"permissions"} };
es_query.set_restricted_fields(&restricted_fields);
map<string, ESEntityTypeMap::EntityType> custom_map;
for (auto& i : s->bucket->get_info().mdsearch_config) {
custom_map[i.first] = (ESEntityTypeMap::EntityType)i.second;
}
ESEntityTypeMap em(custom_map);
es_query.set_custom_type_map(&em);
bool valid = es_query.compile(&err);
if (!valid) {
ldpp_dout(this, 10) << "invalid query, failed generating request json" << dendl;
op_ret = -EINVAL;
return;
}
JSONFormatter f;
encode_json("root", es_query, &f);
RGWRESTConn *conn = es_module->get_rest_conn();
bufferlist in;
bufferlist out;
stringstream ss;
f.flush(ss);
in.append(ss.str());
string resource = es_module->get_index_path() + "/_search";
param_vec_t params;
static constexpr int BUFSIZE = 32;
char buf[BUFSIZE];
snprintf(buf, sizeof(buf), "%lld", (long long)max_keys);
params.push_back(param_pair_t("size", buf));
if (marker > 0) {
params.push_back(param_pair_t("from", marker_str.c_str()));
}
ldpp_dout(this, 20) << "sending request to elasticsearch, payload=" << string(in.c_str(), in.length()) << dendl;
auto& extra_headers = es_module->get_request_headers();
op_ret = conn->get_resource(s, resource, ¶ms, &extra_headers,
out, &in, nullptr, y);
if (op_ret < 0) {
ldpp_dout(this, 0) << "ERROR: failed to fetch resource (r=" << resource << ", ret=" << op_ret << ")" << dendl;
return;
}
ldpp_dout(this, 20) << "response: " << string(out.c_str(), out.length()) << dendl;
JSONParser jparser;
if (!jparser.parse(out.c_str(), out.length())) {
ldpp_dout(this, 0) << "ERROR: failed to parse elasticsearch response" << dendl;
op_ret = -EINVAL;
return;
}
try {
decode_json_obj(response, &jparser);
} catch (const JSONDecoder::err& e) {
ldpp_dout(this, 0) << "ERROR: failed to decode JSON input: " << e.what() << dendl;
op_ret = -EINVAL;
return;
}
}
class RGWMetadataSearch_ObjStore_S3 : public RGWMetadataSearchOp {
public:
explicit RGWMetadataSearch_ObjStore_S3(const RGWSyncModuleInstanceRef& _sync_module) : RGWMetadataSearchOp(_sync_module) {
custom_prefix = "x-amz-meta-";
}
int get_params() override {
expression = s->info.args.get("query");
bool exists;
string max_keys_str = s->info.args.get("max-keys", &exists);
#define MAX_KEYS_MAX 10000
if (exists) {
string err;
max_keys = strict_strtoll(max_keys_str.c_str(), 10, &err);
if (!err.empty()) {
return -EINVAL;
}
if (max_keys > MAX_KEYS_MAX) {
max_keys = MAX_KEYS_MAX;
}
}
marker_str = s->info.args.get("marker", &exists);
if (exists) {
string err;
marker = strict_strtoll(marker_str.c_str(), 10, &err);
if (!err.empty()) {
return -EINVAL;
}
}
uint64_t nm = marker + max_keys;
static constexpr int BUFSIZE = 32;
char buf[BUFSIZE];
snprintf(buf, sizeof(buf), "%lld", (long long)nm);
next_marker = buf;
return 0;
}
void send_response() override {
if (op_ret) {
s->err.message = err;
set_req_state_err(s, op_ret);
}
dump_errno(s);
end_header(s, this, "application/xml");
if (op_ret < 0) {
return;
}
is_truncated = (response.hits.hits.size() >= max_keys);
s->formatter->open_object_section("SearchMetadataResponse");
s->formatter->dump_string("Marker", marker_str);
s->formatter->dump_string("IsTruncated", (is_truncated ? "true" : "false"));
if (is_truncated) {
s->formatter->dump_string("NextMarker", next_marker);
}
if (s->format == RGWFormat::JSON) {
s->formatter->open_array_section("Objects");
}
for (auto& i : response.hits.hits) {
s->formatter->open_object_section("Contents");
es_index_obj_response& e = i.source;
s->formatter->dump_string("Bucket", e.bucket);
s->formatter->dump_string("Key", e.key.name);
string instance = (!e.key.instance.empty() ? e.key.instance : "null");
s->formatter->dump_string("Instance", instance.c_str());
s->formatter->dump_int("VersionedEpoch", e.versioned_epoch);
dump_time(s, "LastModified", e.meta.mtime);
s->formatter->dump_int("Size", e.meta.size);
s->formatter->dump_format("ETag", "\"%s\"", e.meta.etag.c_str());
s->formatter->dump_string("ContentType", e.meta.content_type.c_str());
s->formatter->dump_string("StorageClass", e.meta.storage_class.c_str());
dump_owner(s, e.owner.get_id(), e.owner.get_display_name());
s->formatter->open_array_section("CustomMetadata");
for (auto& m : e.meta.custom_str) {
s->formatter->open_object_section("Entry");
s->formatter->dump_string("Name", m.first.c_str());
s->formatter->dump_string("Value", m.second);
s->formatter->close_section();
}
for (auto& m : e.meta.custom_int) {
s->formatter->open_object_section("Entry");
s->formatter->dump_string("Name", m.first.c_str());
s->formatter->dump_int("Value", m.second);
s->formatter->close_section();
}
for (auto& m : e.meta.custom_date) {
s->formatter->open_object_section("Entry");
s->formatter->dump_string("Name", m.first.c_str());
s->formatter->dump_string("Value", m.second);
s->formatter->close_section();
}
s->formatter->close_section();
rgw_flush_formatter(s, s->formatter);
s->formatter->close_section();
};
if (s->format == RGWFormat::JSON) {
s->formatter->close_section();
}
s->formatter->close_section();
rgw_flush_formatter_and_reset(s, s->formatter);
}
};
class RGWHandler_REST_MDSearch_S3 : public RGWHandler_REST_S3 {
protected:
RGWOp *op_get() override {
if (s->info.args.exists("query")) {
return new RGWMetadataSearch_ObjStore_S3(driver->get_sync_module());
}
if (!s->init_state.url_bucket.empty() &&
s->info.args.exists("mdsearch")) {
return new RGWGetBucketMetaSearch_ObjStore_S3;
}
return nullptr;
}
RGWOp *op_head() override {
return nullptr;
}
RGWOp *op_post() override {
return nullptr;
}
public:
explicit RGWHandler_REST_MDSearch_S3(const rgw::auth::StrategyRegistry& auth_registry) : RGWHandler_REST_S3(auth_registry) {}
virtual ~RGWHandler_REST_MDSearch_S3() {}
};
RGWHandler_REST* RGWRESTMgr_MDSearch_S3::get_handler(rgw::sal::Driver* driver,
req_state* const s,
const rgw::auth::StrategyRegistry& auth_registry,
const std::string& frontend_prefix)
{
int ret =
RGWHandler_REST_S3::init_from_header(driver, s,
RGWFormat::XML, true);
if (ret < 0) {
return nullptr;
}
if (!s->object->empty()) {
return nullptr;
}
RGWHandler_REST *handler = new RGWHandler_REST_MDSearch_S3(auth_registry);
ldpp_dout(s, 20) << __func__ << " handler=" << typeid(*handler).name()
<< dendl;
return handler;
}
| 14,407 | 32.585082 | 127 |
cc
|
null |
ceph-main/src/rgw/driver/rados/rgw_sync_module_es_rest.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab ft=cpp
#pragma once
#include "rgw_rest.h"
class RGWElasticSyncModuleInstance;
class RGWRESTMgr_MDSearch_S3 : public RGWRESTMgr {
public:
explicit RGWRESTMgr_MDSearch_S3() {}
RGWHandler_REST *get_handler(rgw::sal::Driver* driver,
req_state* s,
const rgw::auth::StrategyRegistry& auth_registry,
const std::string& frontend_prefix) override;
};
| 521 | 26.473684 | 80 |
h
|
null |
ceph-main/src/rgw/driver/rados/rgw_sync_module_log.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab ft=cpp
#include "rgw_common.h"
#include "rgw_coroutine.h"
#include "rgw_cr_rados.h"
#include "rgw_sync_module.h"
#include "rgw_data_sync.h"
#include "rgw_sync_module_log.h"
#define dout_subsys ceph_subsys_rgw
using namespace std;
class RGWLogStatRemoteObjCBCR : public RGWStatRemoteObjCBCR {
public:
RGWLogStatRemoteObjCBCR(RGWDataSyncCtx *_sc,
rgw_bucket& _src_bucket, rgw_obj_key& _key) : RGWStatRemoteObjCBCR(_sc, _src_bucket, _key) {}
int operate(const DoutPrefixProvider *dpp) override {
ldpp_dout(dpp, 0) << "SYNC_LOG: stat of remote obj: z=" << sc->source_zone
<< " b=" << src_bucket << " k=" << key << " size=" << size << " mtime=" << mtime
<< " attrs=" << attrs << dendl;
return set_cr_done();
}
};
class RGWLogStatRemoteObjCR : public RGWCallStatRemoteObjCR {
public:
RGWLogStatRemoteObjCR(RGWDataSyncCtx *_sc,
rgw_bucket& _src_bucket, rgw_obj_key& _key) : RGWCallStatRemoteObjCR(_sc, _src_bucket, _key) {
}
~RGWLogStatRemoteObjCR() override {}
RGWStatRemoteObjCBCR *allocate_callback() override {
return new RGWLogStatRemoteObjCBCR(sc, src_bucket, key);
}
};
class RGWLogDataSyncModule : public RGWDataSyncModule {
string prefix;
public:
explicit RGWLogDataSyncModule(const string& _prefix) : prefix(_prefix) {}
RGWCoroutine *sync_object(const DoutPrefixProvider *dpp, RGWDataSyncCtx *sc, rgw_bucket_sync_pipe& sync_pipe, rgw_obj_key& key, std::optional<uint64_t> versioned_epoch, const rgw_zone_set_entry& source_trace_entry, rgw_zone_set *zones_trace) override {
ldpp_dout(dpp, 0) << prefix << ": SYNC_LOG: sync_object: b=" << sync_pipe.info.source_bs.bucket << " k=" << key << " versioned_epoch=" << versioned_epoch.value_or(0) << dendl;
return new RGWLogStatRemoteObjCR(sc, sync_pipe.info.source_bs.bucket, key);
}
RGWCoroutine *remove_object(const DoutPrefixProvider *dpp, RGWDataSyncCtx *sc, rgw_bucket_sync_pipe& sync_pipe, rgw_obj_key& key, real_time& mtime, bool versioned, uint64_t versioned_epoch, rgw_zone_set *zones_trace) override {
ldpp_dout(dpp, 0) << prefix << ": SYNC_LOG: rm_object: b=" << sync_pipe.info.source_bs.bucket << " k=" << key << " mtime=" << mtime << " versioned=" << versioned << " versioned_epoch=" << versioned_epoch << dendl;
return NULL;
}
RGWCoroutine *create_delete_marker(const DoutPrefixProvider *dpp, RGWDataSyncCtx *sc, rgw_bucket_sync_pipe& sync_pipe, rgw_obj_key& key, real_time& mtime,
rgw_bucket_entry_owner& owner, bool versioned, uint64_t versioned_epoch, rgw_zone_set *zones_trace) override {
ldpp_dout(dpp, 0) << prefix << ": SYNC_LOG: create_delete_marker: b=" << sync_pipe.info.source_bs.bucket << " k=" << key << " mtime=" << mtime
<< " versioned=" << versioned << " versioned_epoch=" << versioned_epoch << dendl;
return NULL;
}
};
class RGWLogSyncModuleInstance : public RGWSyncModuleInstance {
RGWLogDataSyncModule data_handler;
public:
explicit RGWLogSyncModuleInstance(const string& prefix) : data_handler(prefix) {}
RGWDataSyncModule *get_data_handler() override {
return &data_handler;
}
};
int RGWLogSyncModule::create_instance(const DoutPrefixProvider *dpp, CephContext *cct, const JSONFormattable& config, RGWSyncModuleInstanceRef *instance) {
string prefix = config["prefix"];
instance->reset(new RGWLogSyncModuleInstance(prefix));
return 0;
}
| 3,586 | 45.584416 | 254 |
cc
|
null |
ceph-main/src/rgw/driver/rados/rgw_sync_module_log.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab ft=cpp
#pragma once
#include "rgw_sync_module.h"
class RGWLogSyncModule : public RGWSyncModule {
public:
RGWLogSyncModule() {}
bool supports_data_export() override {
return false;
}
int create_instance(const DoutPrefixProvider *dpp, CephContext *cct, const JSONFormattable& config, RGWSyncModuleInstanceRef *instance) override;
};
| 444 | 26.8125 | 147 |
h
|
null |
ceph-main/src/rgw/driver/rados/rgw_sync_trace.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab ft=cpp
#ifndef CEPH_RGW_SYNC_TRACE_H
#define CEPH_RGW_SYNC_TRACE_H
#include <regex>
#include "common/debug.h"
#include "common/ceph_json.h"
#include "rgw_sync_trace.h"
#include "rgw_rados.h"
#include "rgw_worker.h"
#define dout_context g_ceph_context
static constexpr auto dout_subsys = ceph_subsys_rgw;
using namespace std;
RGWSyncTraceNode::RGWSyncTraceNode(CephContext *_cct, uint64_t _handle,
const RGWSyncTraceNodeRef& _parent,
const string& _type, const string& _id) : cct(_cct),
parent(_parent),
type(_type),
id(_id),
handle(_handle),
history(cct->_conf->rgw_sync_trace_per_node_log_size)
{
if (parent.get()) {
prefix = parent->get_prefix();
}
if (!type.empty()) {
prefix += type;
if (!id.empty()) {
prefix += "[" + id + "]";
}
prefix += ":";
}
}
void RGWSyncTraceNode::log(int level, const string& s)
{
status = s;
history.push_back(status);
/* dump output on either rgw_sync, or rgw -- but only once */
if (cct->_conf->subsys.should_gather(ceph_subsys_rgw_sync, level)) {
lsubdout(cct, rgw_sync,
ceph::dout::need_dynamic(level)) << "RGW-SYNC:" << to_str() << dendl;
} else {
lsubdout(cct, rgw,
ceph::dout::need_dynamic(level)) << "RGW-SYNC:" << to_str() << dendl;
}
}
class RGWSyncTraceServiceMapThread : public RGWRadosThread {
RGWRados *store;
RGWSyncTraceManager *manager;
uint64_t interval_msec() override {
return cct->_conf->rgw_sync_trace_servicemap_update_interval * 1000;
}
public:
RGWSyncTraceServiceMapThread(RGWRados *_store, RGWSyncTraceManager *_manager)
: RGWRadosThread(_store, "sync-trace"), store(_store), manager(_manager) {}
int process(const DoutPrefixProvider *dpp) override;
};
int RGWSyncTraceServiceMapThread::process(const DoutPrefixProvider *dpp)
{
map<string, string> status;
status["current_sync"] = manager->get_active_names();
int ret = store->update_service_map(dpp, std::move(status));
if (ret < 0) {
ldout(store->ctx(), 0) << "ERROR: update_service_map() returned ret=" << ret << dendl;
}
return 0;
}
RGWSyncTraceNodeRef RGWSyncTraceManager::add_node(const RGWSyncTraceNodeRef& parent,
const std::string& type,
const std::string& id)
{
shunique_lock wl(lock, ceph::acquire_unique);
auto handle = alloc_handle();
RGWSyncTraceNodeRef& ref = nodes[handle];
ref.reset(new RGWSyncTraceNode(cct, handle, parent, type, id));
// return a separate shared_ptr that calls finish() on the node instead of
// deleting it. the lambda capture holds a reference to the original 'ref'
auto deleter = [ref, this] (RGWSyncTraceNode *node) { finish_node(node); };
return {ref.get(), deleter};
}
bool RGWSyncTraceNode::match(const string& search_term, bool search_history)
{
try {
std::regex expr(search_term);
std::smatch m;
if (regex_search(prefix, m, expr)) {
return true;
}
if (regex_search(status, m,expr)) {
return true;
}
if (!search_history) {
return false;
}
for (auto h : history) {
if (regex_search(h, m, expr)) {
return true;
}
}
} catch (const std::regex_error& e) {
ldout(cct, 5) << "NOTICE: sync trace: bad expression: bad regex search term" << dendl;
}
return false;
}
void RGWSyncTraceManager::init(RGWRados *store)
{
service_map_thread = new RGWSyncTraceServiceMapThread(store, this);
service_map_thread->start();
}
RGWSyncTraceManager::~RGWSyncTraceManager()
{
cct->get_admin_socket()->unregister_commands(this);
service_map_thread->stop();
delete service_map_thread;
nodes.clear();
}
int RGWSyncTraceManager::hook_to_admin_command()
{
AdminSocket *admin_socket = cct->get_admin_socket();
admin_commands = { { "sync trace show name=search,type=CephString,req=false", "sync trace show [filter_str]: show current multisite tracing information" },
{ "sync trace history name=search,type=CephString,req=false", "sync trace history [filter_str]: show history of multisite tracing information" },
{ "sync trace active name=search,type=CephString,req=false", "show active multisite sync entities information" },
{ "sync trace active_short name=search,type=CephString,req=false", "show active multisite sync entities entries" } };
for (auto cmd : admin_commands) {
int r = admin_socket->register_command(cmd[0], this,
cmd[1]);
if (r < 0) {
lderr(cct) << "ERROR: fail to register admin socket command (r=" << r << ")" << dendl;
return r;
}
}
return 0;
}
static void dump_node(RGWSyncTraceNode *entry, bool show_history, Formatter *f)
{
f->open_object_section("entry");
::encode_json("status", entry->to_str(), f);
if (show_history) {
f->open_array_section("history");
for (auto h : entry->get_history()) {
::encode_json("entry", h, f);
}
f->close_section();
}
f->close_section();
}
string RGWSyncTraceManager::get_active_names()
{
shunique_lock rl(lock, ceph::acquire_shared);
stringstream ss;
JSONFormatter f;
f.open_array_section("result");
for (auto n : nodes) {
auto& entry = n.second;
if (!entry->test_flags(RGW_SNS_FLAG_ACTIVE)) {
continue;
}
const string& name = entry->get_resource_name();
if (!name.empty()) {
::encode_json("entry", name, &f);
}
f.flush(ss);
}
f.close_section();
f.flush(ss);
return ss.str();
}
int RGWSyncTraceManager::call(std::string_view command, const cmdmap_t& cmdmap,
const bufferlist&,
Formatter *f,
std::ostream& ss,
bufferlist& out) {
bool show_history = (command == "sync trace history");
bool show_short = (command == "sync trace active_short");
bool show_active = (command == "sync trace active") || show_short;
string search;
auto si = cmdmap.find("search");
if (si != cmdmap.end()) {
search = boost::get<string>(si->second);
}
shunique_lock rl(lock, ceph::acquire_shared);
f->open_object_section("result");
f->open_array_section("running");
for (auto n : nodes) {
auto& entry = n.second;
if (!search.empty() && !entry->match(search, show_history)) {
continue;
}
if (show_active && !entry->test_flags(RGW_SNS_FLAG_ACTIVE)) {
continue;
}
if (show_short) {
const string& name = entry->get_resource_name();
if (!name.empty()) {
::encode_json("entry", name, f);
}
} else {
dump_node(entry.get(), show_history, f);
}
f->flush(out);
}
f->close_section();
f->open_array_section("complete");
for (auto& entry : complete_nodes) {
if (!search.empty() && !entry->match(search, show_history)) {
continue;
}
if (show_active && !entry->test_flags(RGW_SNS_FLAG_ACTIVE)) {
continue;
}
dump_node(entry.get(), show_history, f);
f->flush(out);
}
f->close_section();
f->close_section();
return 0;
}
void RGWSyncTraceManager::finish_node(RGWSyncTraceNode *node)
{
RGWSyncTraceNodeRef old_node;
{
shunique_lock wl(lock, ceph::acquire_unique);
if (!node) {
return;
}
auto iter = nodes.find(node->handle);
if (iter == nodes.end()) {
/* not found, already finished */
return;
}
if (complete_nodes.full()) {
/* take a reference to the entry that is going to be evicted,
* can't let it get evicted under lock held, otherwise
* it's a deadlock as it will call finish_node()
*/
old_node = complete_nodes.front();
}
complete_nodes.push_back(iter->second);
nodes.erase(iter);
}
};
#endif
| 8,271 | 27.426117 | 166 |
cc
|
null |
ceph-main/src/rgw/driver/rados/rgw_sync_trace.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab ft=cpp
#pragma once
#include <atomic>
#include "common/ceph_mutex.h"
#include "common/shunique_lock.h"
#include "common/admin_socket.h"
#include <set>
#include <ostream>
#include <string>
#include <shared_mutex>
#include <boost/circular_buffer.hpp>
#define SSTR(o) ({ \
std::stringstream ss; \
ss << o; \
ss.str(); \
})
#define RGW_SNS_FLAG_ACTIVE 1
#define RGW_SNS_FLAG_ERROR 2
class RGWRados;
class RGWSyncTraceManager;
class RGWSyncTraceNode;
class RGWSyncTraceServiceMapThread;
using RGWSyncTraceNodeRef = std::shared_ptr<RGWSyncTraceNode>;
class RGWSyncTraceNode final {
friend class RGWSyncTraceManager;
CephContext *cct;
RGWSyncTraceNodeRef parent;
uint16_t state{0};
std::string status;
ceph::mutex lock = ceph::make_mutex("RGWSyncTraceNode::lock");
std::string type;
std::string id;
std::string prefix;
std::string resource_name;
uint64_t handle;
boost::circular_buffer<std::string> history;
// private constructor, create with RGWSyncTraceManager::add_node()
RGWSyncTraceNode(CephContext *_cct, uint64_t _handle,
const RGWSyncTraceNodeRef& _parent,
const std::string& _type, const std::string& _id);
public:
void set_resource_name(const std::string& s) {
resource_name = s;
}
const std::string& get_resource_name() {
return resource_name;
}
void set_flag(uint16_t s) {
state |= s;
}
void unset_flag(uint16_t s) {
state &= ~s;
}
bool test_flags(uint16_t f) {
return (state & f) == f;
}
void log(int level, const std::string& s);
std::string to_str() {
return prefix + " " + status;
}
const std::string& get_prefix() {
return prefix;
}
std::ostream& operator<<(std::ostream& os) {
os << to_str();
return os;
}
boost::circular_buffer<std::string>& get_history() {
return history;
}
bool match(const std::string& search_term, bool search_history);
};
class RGWSyncTraceManager : public AdminSocketHook {
friend class RGWSyncTraceNode;
mutable std::shared_timed_mutex lock;
using shunique_lock = ceph::shunique_lock<decltype(lock)>;
CephContext *cct;
RGWSyncTraceServiceMapThread *service_map_thread{nullptr};
std::map<uint64_t, RGWSyncTraceNodeRef> nodes;
boost::circular_buffer<RGWSyncTraceNodeRef> complete_nodes;
std::atomic<uint64_t> count = { 0 };
std::list<std::array<std::string, 3> > admin_commands;
uint64_t alloc_handle() {
return ++count;
}
void finish_node(RGWSyncTraceNode *node);
public:
RGWSyncTraceManager(CephContext *_cct, int max_lru) : cct(_cct), complete_nodes(max_lru) {}
~RGWSyncTraceManager();
void init(RGWRados *store);
const RGWSyncTraceNodeRef root_node;
RGWSyncTraceNodeRef add_node(const RGWSyncTraceNodeRef& parent,
const std::string& type,
const std::string& id = "");
int hook_to_admin_command();
int call(std::string_view command, const cmdmap_t& cmdmap,
const bufferlist&,
Formatter *f,
std::ostream& ss,
bufferlist& out) override;
std::string get_active_names();
};
| 3,267 | 22.014085 | 93 |
h
|
null |
ceph-main/src/rgw/driver/rados/rgw_tools.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab ft=cpp
#include "common/errno.h"
#include "librados/librados_asio.h"
#include "include/stringify.h"
#include "rgw_tools.h"
#include "rgw_acl_s3.h"
#include "rgw_aio_throttle.h"
#include "rgw_compression.h"
#include "common/BackTrace.h"
#define dout_subsys ceph_subsys_rgw
#define READ_CHUNK_LEN (512 * 1024)
using namespace std;
int rgw_init_ioctx(const DoutPrefixProvider *dpp,
librados::Rados *rados, const rgw_pool& pool,
librados::IoCtx& ioctx, bool create,
bool mostly_omap,
bool bulk)
{
int r = rados->ioctx_create(pool.name.c_str(), ioctx);
if (r == -ENOENT && create) {
r = rados->pool_create(pool.name.c_str());
if (r == -ERANGE) {
ldpp_dout(dpp, 0)
<< __func__
<< " ERROR: librados::Rados::pool_create returned " << cpp_strerror(-r)
<< " (this can be due to a pool or placement group misconfiguration, e.g."
<< " pg_num < pgp_num or mon_max_pg_per_osd exceeded)"
<< dendl;
}
if (r < 0 && r != -EEXIST) {
return r;
}
r = rados->ioctx_create(pool.name.c_str(), ioctx);
if (r < 0) {
return r;
}
r = ioctx.application_enable(pg_pool_t::APPLICATION_NAME_RGW, false);
if (r < 0 && r != -EOPNOTSUPP) {
return r;
}
if (mostly_omap) {
// set pg_autoscale_bias
bufferlist inbl;
float bias = g_conf().get_val<double>("rgw_rados_pool_autoscale_bias");
int r = rados->mon_command(
"{\"prefix\": \"osd pool set\", \"pool\": \"" +
pool.name + "\", \"var\": \"pg_autoscale_bias\", \"val\": \"" +
stringify(bias) + "\"}",
inbl, NULL, NULL);
if (r < 0) {
ldpp_dout(dpp, 10) << __func__ << " warning: failed to set pg_autoscale_bias on "
<< pool.name << dendl;
}
// set recovery_priority
int p = g_conf().get_val<uint64_t>("rgw_rados_pool_recovery_priority");
r = rados->mon_command(
"{\"prefix\": \"osd pool set\", \"pool\": \"" +
pool.name + "\", \"var\": \"recovery_priority\": \"" +
stringify(p) + "\"}",
inbl, NULL, NULL);
if (r < 0) {
ldpp_dout(dpp, 10) << __func__ << " warning: failed to set recovery_priority on "
<< pool.name << dendl;
}
}
if (bulk) {
// set bulk
bufferlist inbl;
int r = rados->mon_command(
"{\"prefix\": \"osd pool set\", \"pool\": \"" +
pool.name + "\", \"var\": \"bulk\", \"val\": \"true\"}",
inbl, NULL, NULL);
if (r < 0) {
ldpp_dout(dpp, 10) << __func__ << " warning: failed to set 'bulk' on "
<< pool.name << dendl;
}
}
} else if (r < 0) {
return r;
}
if (!pool.ns.empty()) {
ioctx.set_namespace(pool.ns);
}
return 0;
}
map<string, bufferlist>* no_change_attrs() {
static map<string, bufferlist> no_change;
return &no_change;
}
int rgw_put_system_obj(const DoutPrefixProvider *dpp, RGWSI_SysObj* svc_sysobj,
const rgw_pool& pool, const string& oid, bufferlist& data, bool exclusive,
RGWObjVersionTracker *objv_tracker, real_time set_mtime, optional_yield y, map<string, bufferlist> *pattrs)
{
map<string,bufferlist> no_attrs;
if (!pattrs) {
pattrs = &no_attrs;
}
rgw_raw_obj obj(pool, oid);
auto sysobj = svc_sysobj->get_obj(obj);
int ret;
if (pattrs != no_change_attrs()) {
ret = sysobj.wop()
.set_objv_tracker(objv_tracker)
.set_exclusive(exclusive)
.set_mtime(set_mtime)
.set_attrs(*pattrs)
.write(dpp, data, y);
} else {
ret = sysobj.wop()
.set_objv_tracker(objv_tracker)
.set_exclusive(exclusive)
.set_mtime(set_mtime)
.write_data(dpp, data, y);
}
return ret;
}
int rgw_stat_system_obj(const DoutPrefixProvider *dpp, RGWSI_SysObj* svc_sysobj,
const rgw_pool& pool, const std::string& key,
RGWObjVersionTracker *objv_tracker,
real_time *pmtime, optional_yield y,
std::map<std::string, bufferlist> *pattrs)
{
rgw_raw_obj obj(pool, key);
auto sysobj = svc_sysobj->get_obj(obj);
return sysobj.rop()
.set_attrs(pattrs)
.set_last_mod(pmtime)
.stat(y, dpp);
}
int rgw_get_system_obj(RGWSI_SysObj* svc_sysobj, const rgw_pool& pool, const string& key, bufferlist& bl,
RGWObjVersionTracker *objv_tracker, real_time *pmtime, optional_yield y,
const DoutPrefixProvider *dpp, map<string, bufferlist> *pattrs,
rgw_cache_entry_info *cache_info,
boost::optional<obj_version> refresh_version, bool raw_attrs)
{
const rgw_raw_obj obj(pool, key);
auto sysobj = svc_sysobj->get_obj(obj);
auto rop = sysobj.rop();
return rop.set_attrs(pattrs)
.set_last_mod(pmtime)
.set_objv_tracker(objv_tracker)
.set_raw_attrs(raw_attrs)
.set_cache_info(cache_info)
.set_refresh_version(refresh_version)
.read(dpp, &bl, y);
}
int rgw_delete_system_obj(const DoutPrefixProvider *dpp,
RGWSI_SysObj *sysobj_svc, const rgw_pool& pool, const string& oid,
RGWObjVersionTracker *objv_tracker, optional_yield y)
{
auto sysobj = sysobj_svc->get_obj(rgw_raw_obj{pool, oid});
rgw_raw_obj obj(pool, oid);
return sysobj.wop()
.set_objv_tracker(objv_tracker)
.remove(dpp, y);
}
int rgw_rados_operate(const DoutPrefixProvider *dpp, librados::IoCtx& ioctx, const std::string& oid,
librados::ObjectReadOperation *op, bufferlist* pbl,
optional_yield y, int flags)
{
// given a yield_context, call async_operate() to yield the coroutine instead
// of blocking
if (y) {
auto& context = y.get_io_context();
auto& yield = y.get_yield_context();
boost::system::error_code ec;
auto bl = librados::async_operate(
context, ioctx, oid, op, flags, yield[ec]);
if (pbl) {
*pbl = std::move(bl);
}
return -ec.value();
}
// work on asio threads should be asynchronous, so warn when they block
if (is_asio_thread) {
ldpp_dout(dpp, 20) << "WARNING: blocking librados call" << dendl;
#ifdef _BACKTRACE_LOGGING
ldpp_dout(dpp, 20) << "BACKTRACE: " << __func__ << ": " << ClibBackTrace(0) << dendl;
#endif
}
return ioctx.operate(oid, op, nullptr, flags);
}
int rgw_rados_operate(const DoutPrefixProvider *dpp, librados::IoCtx& ioctx, const std::string& oid,
librados::ObjectWriteOperation *op, optional_yield y,
int flags)
{
if (y) {
auto& context = y.get_io_context();
auto& yield = y.get_yield_context();
boost::system::error_code ec;
librados::async_operate(context, ioctx, oid, op, flags, yield[ec]);
return -ec.value();
}
if (is_asio_thread) {
ldpp_dout(dpp, 20) << "WARNING: blocking librados call" << dendl;
#ifdef _BACKTRACE_LOGGING
ldpp_dout(dpp, 20) << "BACKTRACE: " << __func__ << ": " << ClibBackTrace(0) << dendl;
#endif
}
return ioctx.operate(oid, op, flags);
}
int rgw_rados_notify(const DoutPrefixProvider *dpp, librados::IoCtx& ioctx, const std::string& oid,
bufferlist& bl, uint64_t timeout_ms, bufferlist* pbl,
optional_yield y)
{
if (y) {
auto& context = y.get_io_context();
auto& yield = y.get_yield_context();
boost::system::error_code ec;
auto reply = librados::async_notify(context, ioctx, oid,
bl, timeout_ms, yield[ec]);
if (pbl) {
*pbl = std::move(reply);
}
return -ec.value();
}
if (is_asio_thread) {
ldpp_dout(dpp, 20) << "WARNING: blocking librados call" << dendl;
#ifdef _BACKTRACE_LOGGING
ldpp_dout(dpp, 20) << "BACKTRACE: " << __func__ << ": " << ClibBackTrace(0) << dendl;
#endif
}
return ioctx.notify2(oid, bl, timeout_ms, pbl);
}
void rgw_filter_attrset(map<string, bufferlist>& unfiltered_attrset, const string& check_prefix,
map<string, bufferlist> *attrset)
{
attrset->clear();
map<string, bufferlist>::iterator iter;
for (iter = unfiltered_attrset.lower_bound(check_prefix);
iter != unfiltered_attrset.end(); ++iter) {
if (!boost::algorithm::starts_with(iter->first, check_prefix))
break;
(*attrset)[iter->first] = iter->second;
}
}
RGWDataAccess::RGWDataAccess(rgw::sal::Driver* _driver) : driver(_driver)
{
}
int RGWDataAccess::Bucket::finish_init()
{
auto iter = attrs.find(RGW_ATTR_ACL);
if (iter == attrs.end()) {
return 0;
}
bufferlist::const_iterator bliter = iter->second.begin();
try {
policy.decode(bliter);
} catch (buffer::error& err) {
return -EIO;
}
return 0;
}
int RGWDataAccess::Bucket::init(const DoutPrefixProvider *dpp, optional_yield y)
{
std::unique_ptr<rgw::sal::Bucket> bucket;
int ret = sd->driver->get_bucket(dpp, nullptr, tenant, name, &bucket, y);
if (ret < 0) {
return ret;
}
bucket_info = bucket->get_info();
mtime = bucket->get_modification_time();
attrs = bucket->get_attrs();
return finish_init();
}
int RGWDataAccess::Bucket::init(const RGWBucketInfo& _bucket_info,
const map<string, bufferlist>& _attrs)
{
bucket_info = _bucket_info;
attrs = _attrs;
return finish_init();
}
int RGWDataAccess::Bucket::get_object(const rgw_obj_key& key,
ObjectRef *obj) {
obj->reset(new Object(sd, shared_from_this(), key));
return 0;
}
int RGWDataAccess::Object::put(bufferlist& data,
map<string, bufferlist>& attrs,
const DoutPrefixProvider *dpp,
optional_yield y)
{
rgw::sal::Driver* driver = sd->driver;
CephContext *cct = driver->ctx();
string tag;
append_rand_alpha(cct, tag, tag, 32);
RGWBucketInfo& bucket_info = bucket->bucket_info;
rgw::BlockingAioThrottle aio(driver->ctx()->_conf->rgw_put_obj_min_window_size);
std::unique_ptr<rgw::sal::Bucket> b;
driver->get_bucket(NULL, bucket_info, &b);
std::unique_ptr<rgw::sal::Object> obj = b->get_object(key);
auto& owner = bucket->policy.get_owner();
string req_id = driver->zone_unique_id(driver->get_new_req_id());
std::unique_ptr<rgw::sal::Writer> processor;
processor = driver->get_atomic_writer(dpp, y, obj.get(),
owner.get_id(),
nullptr, olh_epoch, req_id);
int ret = processor->prepare(y);
if (ret < 0)
return ret;
rgw::sal::DataProcessor *filter = processor.get();
CompressorRef plugin;
boost::optional<RGWPutObj_Compress> compressor;
const auto& compression_type = driver->get_compression_type(bucket_info.placement_rule);
if (compression_type != "none") {
plugin = Compressor::create(driver->ctx(), compression_type);
if (!plugin) {
ldpp_dout(dpp, 1) << "Cannot load plugin for compression type "
<< compression_type << dendl;
} else {
compressor.emplace(driver->ctx(), plugin, filter);
filter = &*compressor;
}
}
off_t ofs = 0;
auto obj_size = data.length();
RGWMD5Etag etag_calc;
do {
size_t read_len = std::min(data.length(), (unsigned int)cct->_conf->rgw_max_chunk_size);
bufferlist bl;
data.splice(0, read_len, &bl);
etag_calc.update(bl);
ret = filter->process(std::move(bl), ofs);
if (ret < 0)
return ret;
ofs += read_len;
} while (data.length() > 0);
ret = filter->process({}, ofs);
if (ret < 0) {
return ret;
}
bool has_etag_attr = false;
auto iter = attrs.find(RGW_ATTR_ETAG);
if (iter != attrs.end()) {
bufferlist& bl = iter->second;
etag = bl.to_str();
has_etag_attr = true;
}
if (!aclbl) {
RGWAccessControlPolicy_S3 policy(cct);
policy.create_canned(bucket->policy.get_owner(), bucket->policy.get_owner(), string()); /* default private policy */
policy.encode(aclbl.emplace());
}
if (etag.empty()) {
etag_calc.finish(&etag);
}
if (!has_etag_attr) {
bufferlist etagbl;
etagbl.append(etag);
attrs[RGW_ATTR_ETAG] = etagbl;
}
attrs[RGW_ATTR_ACL] = *aclbl;
string *puser_data = nullptr;
if (user_data) {
puser_data = &(*user_data);
}
return processor->complete(obj_size, etag,
&mtime, mtime,
attrs, delete_at,
nullptr, nullptr,
puser_data,
nullptr, nullptr, y);
}
void RGWDataAccess::Object::set_policy(const RGWAccessControlPolicy& policy)
{
policy.encode(aclbl.emplace());
}
void rgw_complete_aio_completion(librados::AioCompletion* c, int r) {
auto pc = c->pc;
librados::CB_AioCompleteAndSafe cb(pc);
cb(r);
}
| 12,780 | 28.180365 | 130 |
cc
|
null |
ceph-main/src/rgw/driver/rados/rgw_tools.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab ft=cpp
#pragma once
#include <string>
#include "include/types.h"
#include "include/ceph_hash.h"
#include "common/ceph_time.h"
#include "rgw_common.h"
#include "rgw_sal_fwd.h"
class RGWSI_SysObj;
class RGWRados;
struct RGWObjVersionTracker;
class optional_yield;
struct obj_version;
int rgw_init_ioctx(const DoutPrefixProvider *dpp,
librados::Rados *rados, const rgw_pool& pool,
librados::IoCtx& ioctx,
bool create = false,
bool mostly_omap = false,
bool bulk = false);
#define RGW_NO_SHARD -1
#define RGW_SHARDS_PRIME_0 7877
#define RGW_SHARDS_PRIME_1 65521
extern const std::string MP_META_SUFFIX;
inline int rgw_shards_max()
{
return RGW_SHARDS_PRIME_1;
}
// only called by rgw_shard_id and rgw_bucket_shard_index
static inline int rgw_shards_mod(unsigned hval, int max_shards)
{
if (max_shards <= RGW_SHARDS_PRIME_0) {
return hval % RGW_SHARDS_PRIME_0 % max_shards;
}
return hval % RGW_SHARDS_PRIME_1 % max_shards;
}
// used for logging and tagging
inline int rgw_shard_id(const std::string& key, int max_shards)
{
return rgw_shards_mod(ceph_str_hash_linux(key.c_str(), key.size()),
max_shards);
}
void rgw_shard_name(const std::string& prefix, unsigned max_shards, const std::string& key, std::string& name, int *shard_id);
void rgw_shard_name(const std::string& prefix, unsigned max_shards, const std::string& section, const std::string& key, std::string& name);
void rgw_shard_name(const std::string& prefix, unsigned shard_id, std::string& name);
int rgw_put_system_obj(const DoutPrefixProvider *dpp, RGWSI_SysObj* svc_sysobj,
const rgw_pool& pool, const std::string& oid,
bufferlist& data, bool exclusive,
RGWObjVersionTracker *objv_tracker,
real_time set_mtime, optional_yield y,
std::map<std::string, bufferlist> *pattrs = nullptr);
int rgw_get_system_obj(RGWSI_SysObj* svc_sysobj, const rgw_pool& pool,
const std::string& key, bufferlist& bl,
RGWObjVersionTracker *objv_tracker, real_time *pmtime,
optional_yield y, const DoutPrefixProvider *dpp,
std::map<std::string, bufferlist> *pattrs = nullptr,
rgw_cache_entry_info *cache_info = nullptr,
boost::optional<obj_version> refresh_version = boost::none,
bool raw_attrs=false);
int rgw_delete_system_obj(const DoutPrefixProvider *dpp,
RGWSI_SysObj *sysobj_svc, const rgw_pool& pool, const std::string& oid,
RGWObjVersionTracker *objv_tracker, optional_yield y);
int rgw_stat_system_obj(const DoutPrefixProvider *dpp, RGWSI_SysObj* svc_sysobj,
const rgw_pool& pool, const std::string& key,
RGWObjVersionTracker *objv_tracker,
real_time *pmtime, optional_yield y,
std::map<std::string, bufferlist> *pattrs = nullptr);
const char *rgw_find_mime_by_ext(std::string& ext);
void rgw_filter_attrset(std::map<std::string, bufferlist>& unfiltered_attrset, const std::string& check_prefix,
std::map<std::string, bufferlist> *attrset);
/// indicates whether the current thread is in boost::asio::io_context::run(),
/// used to log warnings if synchronous librados calls are made
extern thread_local bool is_asio_thread;
/// perform the rados operation, using the yield context when given
int rgw_rados_operate(const DoutPrefixProvider *dpp, librados::IoCtx& ioctx, const std::string& oid,
librados::ObjectReadOperation *op, bufferlist* pbl,
optional_yield y, int flags = 0);
int rgw_rados_operate(const DoutPrefixProvider *dpp, librados::IoCtx& ioctx, const std::string& oid,
librados::ObjectWriteOperation *op, optional_yield y,
int flags = 0);
int rgw_rados_notify(const DoutPrefixProvider *dpp, librados::IoCtx& ioctx, const std::string& oid,
bufferlist& bl, uint64_t timeout_ms, bufferlist* pbl,
optional_yield y);
int rgw_tools_init(const DoutPrefixProvider *dpp, CephContext *cct);
void rgw_tools_cleanup();
template<class H, size_t S>
class RGWEtag
{
H hash;
public:
RGWEtag() {
if constexpr (std::is_same_v<H, MD5>) {
// Allow use of MD5 digest in FIPS mode for non-cryptographic purposes
hash.SetFlags(EVP_MD_CTX_FLAG_NON_FIPS_ALLOW);
}
}
void update(const char *buf, size_t len) {
hash.Update((const unsigned char *)buf, len);
}
void update(bufferlist& bl) {
if (bl.length() > 0) {
update(bl.c_str(), bl.length());
}
}
void update(const std::string& s) {
if (!s.empty()) {
update(s.c_str(), s.size());
}
}
void finish(std::string *etag) {
char etag_buf[S];
char etag_buf_str[S * 2 + 16];
hash.Final((unsigned char *)etag_buf);
buf_to_hex((const unsigned char *)etag_buf, S,
etag_buf_str);
*etag = etag_buf_str;
}
};
using RGWMD5Etag = RGWEtag<MD5, CEPH_CRYPTO_MD5_DIGESTSIZE>;
class RGWDataAccess
{
rgw::sal::Driver* driver;
public:
RGWDataAccess(rgw::sal::Driver* _driver);
class Object;
class Bucket;
using BucketRef = std::shared_ptr<Bucket>;
using ObjectRef = std::shared_ptr<Object>;
class Bucket : public std::enable_shared_from_this<Bucket> {
friend class RGWDataAccess;
friend class Object;
RGWDataAccess *sd{nullptr};
RGWBucketInfo bucket_info;
std::string tenant;
std::string name;
std::string bucket_id;
ceph::real_time mtime;
std::map<std::string, bufferlist> attrs;
RGWAccessControlPolicy policy;
int finish_init();
Bucket(RGWDataAccess *_sd,
const std::string& _tenant,
const std::string& _name,
const std::string& _bucket_id) : sd(_sd),
tenant(_tenant),
name(_name),
bucket_id(_bucket_id) {}
Bucket(RGWDataAccess *_sd) : sd(_sd) {}
int init(const DoutPrefixProvider *dpp, optional_yield y);
int init(const RGWBucketInfo& _bucket_info, const std::map<std::string, bufferlist>& _attrs);
public:
int get_object(const rgw_obj_key& key,
ObjectRef *obj);
};
class Object {
RGWDataAccess *sd{nullptr};
BucketRef bucket;
rgw_obj_key key;
ceph::real_time mtime;
std::string etag;
uint64_t olh_epoch{0};
ceph::real_time delete_at;
std::optional<std::string> user_data;
std::optional<bufferlist> aclbl;
Object(RGWDataAccess *_sd,
BucketRef&& _bucket,
const rgw_obj_key& _key) : sd(_sd),
bucket(_bucket),
key(_key) {}
public:
int put(bufferlist& data, std::map<std::string, bufferlist>& attrs, const DoutPrefixProvider *dpp, optional_yield y); /* might modify attrs */
void set_mtime(const ceph::real_time& _mtime) {
mtime = _mtime;
}
void set_etag(const std::string& _etag) {
etag = _etag;
}
void set_olh_epoch(uint64_t epoch) {
olh_epoch = epoch;
}
void set_delete_at(ceph::real_time _delete_at) {
delete_at = _delete_at;
}
void set_user_data(const std::string& _user_data) {
user_data = _user_data;
}
void set_policy(const RGWAccessControlPolicy& policy);
friend class Bucket;
};
int get_bucket(const DoutPrefixProvider *dpp,
const std::string& tenant,
const std::string name,
const std::string bucket_id,
BucketRef *bucket,
optional_yield y) {
bucket->reset(new Bucket(this, tenant, name, bucket_id));
return (*bucket)->init(dpp, y);
}
int get_bucket(const RGWBucketInfo& bucket_info,
const std::map<std::string, bufferlist>& attrs,
BucketRef *bucket) {
bucket->reset(new Bucket(this));
return (*bucket)->init(bucket_info, attrs);
}
friend class Bucket;
friend class Object;
};
using RGWDataAccessRef = std::shared_ptr<RGWDataAccess>;
/// Complete an AioCompletion. To return error values or otherwise
/// satisfy the caller. Useful for making complicated asynchronous
/// calls and error handling.
void rgw_complete_aio_completion(librados::AioCompletion* c, int r);
/// This returns a static, non-NULL pointer, recognized only by
/// rgw_put_system_obj(). When supplied instead of the attributes, the
/// attributes will be unmodified.
///
// (Currently providing nullptr will wipe all attributes.)
std::map<std::string, ceph::buffer::list>* no_change_attrs();
| 8,832 | 30.888087 | 146 |
h
|
null |
ceph-main/src/rgw/driver/rados/rgw_trim_bilog.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab ft=cpp
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2017 Red Hat, Inc
*
* Author: Casey Bodley <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*/
#include <mutex>
#include <boost/circular_buffer.hpp>
#include <boost/container/flat_map.hpp>
#include "include/scope_guard.h"
#include "common/bounded_key_counter.h"
#include "common/errno.h"
#include "rgw_trim_bilog.h"
#include "rgw_cr_rados.h"
#include "rgw_cr_rest.h"
#include "rgw_cr_tools.h"
#include "rgw_data_sync.h"
#include "rgw_metadata.h"
#include "rgw_sal.h"
#include "rgw_zone.h"
#include "rgw_sync.h"
#include "rgw_bucket.h"
#include "services/svc_zone.h"
#include "services/svc_meta.h"
#include "services/svc_bilog_rados.h"
#include <boost/asio/yield.hpp>
#include "include/ceph_assert.h"
#define dout_subsys ceph_subsys_rgw
#undef dout_prefix
#define dout_prefix (*_dout << "trim: ")
using namespace std;
using rgw::BucketTrimConfig;
using BucketChangeCounter = BoundedKeyCounter<std::string, int>;
const std::string rgw::BucketTrimStatus::oid = "bilog.trim";
using rgw::BucketTrimStatus;
// watch/notify api for gateways to coordinate about which buckets to trim
enum TrimNotifyType {
NotifyTrimCounters = 0,
NotifyTrimComplete,
};
WRITE_RAW_ENCODER(TrimNotifyType);
struct TrimNotifyHandler {
virtual ~TrimNotifyHandler() = default;
virtual void handle(bufferlist::const_iterator& input, bufferlist& output) = 0;
};
/// api to share the bucket trim counters between gateways in the same zone.
/// each gateway will process different datalog shards, so the gateway that runs
/// the trim process needs to accumulate their counters
struct TrimCounters {
/// counter for a single bucket
struct BucketCounter {
std::string bucket; //< bucket instance metadata key
int count{0};
BucketCounter() = default;
BucketCounter(const std::string& bucket, int count)
: bucket(bucket), count(count) {}
void encode(bufferlist& bl) const;
void decode(bufferlist::const_iterator& p);
};
using Vector = std::vector<BucketCounter>;
/// request bucket trim counters from peer gateways
struct Request {
uint16_t max_buckets; //< maximum number of bucket counters to return
void encode(bufferlist& bl) const;
void decode(bufferlist::const_iterator& p);
};
/// return the current bucket trim counters
struct Response {
Vector bucket_counters;
void encode(bufferlist& bl) const;
void decode(bufferlist::const_iterator& p);
};
/// server interface to query the hottest buckets
struct Server {
virtual ~Server() = default;
virtual void get_bucket_counters(int count, Vector& counters) = 0;
virtual void reset_bucket_counters() = 0;
};
/// notify handler
class Handler : public TrimNotifyHandler {
Server *const server;
public:
explicit Handler(Server *server) : server(server) {}
void handle(bufferlist::const_iterator& input, bufferlist& output) override;
};
};
std::ostream& operator<<(std::ostream& out, const TrimCounters::BucketCounter& rhs)
{
return out << rhs.bucket << ":" << rhs.count;
}
void TrimCounters::BucketCounter::encode(bufferlist& bl) const
{
using ceph::encode;
// no versioning to save space
encode(bucket, bl);
encode(count, bl);
}
void TrimCounters::BucketCounter::decode(bufferlist::const_iterator& p)
{
using ceph::decode;
decode(bucket, p);
decode(count, p);
}
WRITE_CLASS_ENCODER(TrimCounters::BucketCounter);
void TrimCounters::Request::encode(bufferlist& bl) const
{
ENCODE_START(1, 1, bl);
encode(max_buckets, bl);
ENCODE_FINISH(bl);
}
void TrimCounters::Request::decode(bufferlist::const_iterator& p)
{
DECODE_START(1, p);
decode(max_buckets, p);
DECODE_FINISH(p);
}
WRITE_CLASS_ENCODER(TrimCounters::Request);
void TrimCounters::Response::encode(bufferlist& bl) const
{
ENCODE_START(1, 1, bl);
encode(bucket_counters, bl);
ENCODE_FINISH(bl);
}
void TrimCounters::Response::decode(bufferlist::const_iterator& p)
{
DECODE_START(1, p);
decode(bucket_counters, p);
DECODE_FINISH(p);
}
WRITE_CLASS_ENCODER(TrimCounters::Response);
void TrimCounters::Handler::handle(bufferlist::const_iterator& input,
bufferlist& output)
{
Request request;
decode(request, input);
auto count = std::min<uint16_t>(request.max_buckets, 128);
Response response;
server->get_bucket_counters(count, response.bucket_counters);
encode(response, output);
}
/// api to notify peer gateways that trim has completed and their bucket change
/// counters can be reset
struct TrimComplete {
struct Request {
void encode(bufferlist& bl) const;
void decode(bufferlist::const_iterator& p);
};
struct Response {
void encode(bufferlist& bl) const;
void decode(bufferlist::const_iterator& p);
};
/// server interface to reset bucket counters
using Server = TrimCounters::Server;
/// notify handler
class Handler : public TrimNotifyHandler {
Server *const server;
public:
explicit Handler(Server *server) : server(server) {}
void handle(bufferlist::const_iterator& input, bufferlist& output) override;
};
};
void TrimComplete::Request::encode(bufferlist& bl) const
{
ENCODE_START(1, 1, bl);
ENCODE_FINISH(bl);
}
void TrimComplete::Request::decode(bufferlist::const_iterator& p)
{
DECODE_START(1, p);
DECODE_FINISH(p);
}
WRITE_CLASS_ENCODER(TrimComplete::Request);
void TrimComplete::Response::encode(bufferlist& bl) const
{
ENCODE_START(1, 1, bl);
ENCODE_FINISH(bl);
}
void TrimComplete::Response::decode(bufferlist::const_iterator& p)
{
DECODE_START(1, p);
DECODE_FINISH(p);
}
WRITE_CLASS_ENCODER(TrimComplete::Response);
void TrimComplete::Handler::handle(bufferlist::const_iterator& input,
bufferlist& output)
{
Request request;
decode(request, input);
server->reset_bucket_counters();
Response response;
encode(response, output);
}
/// rados watcher for bucket trim notifications
class BucketTrimWatcher : public librados::WatchCtx2 {
rgw::sal::RadosStore* const store;
const rgw_raw_obj& obj;
rgw_rados_ref ref;
uint64_t handle{0};
using HandlerPtr = std::unique_ptr<TrimNotifyHandler>;
boost::container::flat_map<TrimNotifyType, HandlerPtr> handlers;
public:
BucketTrimWatcher(rgw::sal::RadosStore* store, const rgw_raw_obj& obj,
TrimCounters::Server *counters)
: store(store), obj(obj) {
handlers.emplace(NotifyTrimCounters,
std::make_unique<TrimCounters::Handler>(counters));
handlers.emplace(NotifyTrimComplete,
std::make_unique<TrimComplete::Handler>(counters));
}
~BucketTrimWatcher() {
stop();
}
int start(const DoutPrefixProvider *dpp) {
int r = store->getRados()->get_raw_obj_ref(dpp, obj, &ref);
if (r < 0) {
return r;
}
// register a watch on the realm's control object
r = ref.pool.ioctx().watch2(ref.obj.oid, &handle, this);
if (r == -ENOENT) {
constexpr bool exclusive = true;
r = ref.pool.ioctx().create(ref.obj.oid, exclusive);
if (r == -EEXIST || r == 0) {
r = ref.pool.ioctx().watch2(ref.obj.oid, &handle, this);
}
}
if (r < 0) {
ldpp_dout(dpp, -1) << "Failed to watch " << ref.obj
<< " with " << cpp_strerror(-r) << dendl;
ref.pool.ioctx().close();
return r;
}
ldpp_dout(dpp, 10) << "Watching " << ref.obj.oid << dendl;
return 0;
}
int restart() {
int r = ref.pool.ioctx().unwatch2(handle);
if (r < 0) {
lderr(store->ctx()) << "Failed to unwatch on " << ref.obj
<< " with " << cpp_strerror(-r) << dendl;
}
r = ref.pool.ioctx().watch2(ref.obj.oid, &handle, this);
if (r < 0) {
lderr(store->ctx()) << "Failed to restart watch on " << ref.obj
<< " with " << cpp_strerror(-r) << dendl;
ref.pool.ioctx().close();
}
return r;
}
void stop() {
if (handle) {
ref.pool.ioctx().unwatch2(handle);
ref.pool.ioctx().close();
}
}
/// respond to bucket trim notifications
void handle_notify(uint64_t notify_id, uint64_t cookie,
uint64_t notifier_id, bufferlist& bl) override {
if (cookie != handle) {
return;
}
bufferlist reply;
try {
auto p = bl.cbegin();
TrimNotifyType type;
decode(type, p);
auto handler = handlers.find(type);
if (handler != handlers.end()) {
handler->second->handle(p, reply);
} else {
lderr(store->ctx()) << "no handler for notify type " << type << dendl;
}
} catch (const buffer::error& e) {
lderr(store->ctx()) << "Failed to decode notification: " << e.what() << dendl;
}
ref.pool.ioctx().notify_ack(ref.obj.oid, notify_id, cookie, reply);
}
/// reestablish the watch if it gets disconnected
void handle_error(uint64_t cookie, int err) override {
if (cookie != handle) {
return;
}
if (err == -ENOTCONN) {
ldout(store->ctx(), 4) << "Disconnected watch on " << ref.obj << dendl;
restart();
}
}
};
/// Interface to communicate with the trim manager about completed operations
struct BucketTrimObserver {
virtual ~BucketTrimObserver() = default;
virtual void on_bucket_trimmed(std::string&& bucket_instance) = 0;
virtual bool trimmed_recently(const std::string_view& bucket_instance) = 0;
};
/// trim each bilog shard to the given marker, while limiting the number of
/// concurrent requests
class BucketTrimShardCollectCR : public RGWShardCollectCR {
static constexpr int MAX_CONCURRENT_SHARDS = 16;
const DoutPrefixProvider *dpp;
rgw::sal::RadosStore* const store;
const RGWBucketInfo& bucket_info;
rgw::bucket_index_layout_generation generation;
const std::vector<std::string>& markers; //< shard markers to trim
size_t i{0}; //< index of current shard marker
int handle_result(int r) override {
if (r == -ENOENT) { // ENOENT is not a fatal error
return 0;
}
if (r < 0) {
ldout(cct, 4) << "failed to trim bilog shard: " << cpp_strerror(r) << dendl;
}
return r;
}
public:
BucketTrimShardCollectCR(const DoutPrefixProvider *dpp,
rgw::sal::RadosStore* store, const RGWBucketInfo& bucket_info,
const rgw::bucket_index_layout_generation& generation,
const std::vector<std::string>& markers)
: RGWShardCollectCR(store->ctx(), MAX_CONCURRENT_SHARDS),
dpp(dpp), store(store), bucket_info(bucket_info),
generation(generation), markers(markers)
{}
bool spawn_next() override;
};
bool BucketTrimShardCollectCR::spawn_next()
{
while (i < markers.size()) {
const auto& marker = markers[i];
const auto shard_id = i++;
// skip empty markers
if (!marker.empty()) {
ldpp_dout(dpp, 10) << "trimming bilog shard " << shard_id
<< " of " << bucket_info.bucket << " at marker " << marker << dendl;
spawn(new RGWRadosBILogTrimCR(dpp, store, bucket_info, shard_id,
generation, std::string{}, marker),
false);
return true;
}
}
return false;
}
/// Delete a BI generation, limiting the number of requests in flight.
class BucketCleanIndexCollectCR : public RGWShardCollectCR {
static constexpr int MAX_CONCURRENT_SHARDS = 16;
const DoutPrefixProvider *dpp;
rgw::sal::RadosStore* const store;
const RGWBucketInfo& bucket_info;
rgw::bucket_index_layout_generation index;
uint32_t shard = 0;
const uint32_t num_shards = rgw::num_shards(index);
int handle_result(int r) override {
if (r == -ENOENT) { // ENOENT is not a fatal error
return 0;
}
if (r < 0) {
ldout(cct, 4) << "clean index: " << cpp_strerror(r) << dendl;
}
return r;
}
public:
BucketCleanIndexCollectCR(const DoutPrefixProvider *dpp,
rgw::sal::RadosStore* store,
const RGWBucketInfo& bucket_info,
rgw::bucket_index_layout_generation index)
: RGWShardCollectCR(store->ctx(), MAX_CONCURRENT_SHARDS),
dpp(dpp), store(store), bucket_info(bucket_info),
index(index)
{}
bool spawn_next() override {
if (shard < num_shards) {
RGWRados::BucketShard bs(store->getRados());
bs.init(dpp, bucket_info, index, shard, null_yield);
spawn(new RGWRadosRemoveOidCR(store, std::move(bs.bucket_obj), nullptr),
false);
++shard;
return true;
} else {
return false;
}
}
};
/// trim the bilog of all of the given bucket instance's shards
class BucketTrimInstanceCR : public RGWCoroutine {
static constexpr auto MAX_RETRIES = 25u;
rgw::sal::RadosStore* const store;
RGWHTTPManager *const http;
BucketTrimObserver *const observer;
std::string bucket_instance;
rgw_bucket_get_sync_policy_params get_policy_params;
std::shared_ptr<rgw_bucket_get_sync_policy_result> source_policy;
rgw_bucket bucket;
const std::string& zone_id; //< my zone id
RGWBucketInfo _bucket_info;
const RGWBucketInfo *pbucket_info; //< pointer to bucket instance info to locate bucket indices
int child_ret = 0;
const DoutPrefixProvider *dpp;
public:
struct StatusShards {
uint64_t generation = 0;
std::vector<rgw_bucket_shard_sync_info> shards;
};
private:
std::vector<StatusShards> peer_status; //< sync status for each peer
std::vector<std::string> min_markers; //< min marker per shard
/// The log generation to trim
rgw::bucket_log_layout_generation totrim;
/// Generation to be cleaned/New bucket info (if any)
std::optional<std::pair<RGWBucketInfo,
rgw::bucket_log_layout_generation>> clean_info;
/// Maximum number of times to attempt to put bucket info
unsigned retries = 0;
int take_min_generation() {
// Initialize the min_generation to the bucket's current
// generation, used in case we have no peers.
auto min_generation = pbucket_info->layout.logs.back().gen;
// Determine the minimum generation
if (auto m = std::min_element(peer_status.begin(),
peer_status.end(),
[](const StatusShards& l,
const StatusShards& r) {
return l.generation < r.generation;
}); m != peer_status.end()) {
min_generation = m->generation;
}
auto& logs = pbucket_info->layout.logs;
auto log = std::find_if(logs.begin(), logs.end(),
rgw::matches_gen(min_generation));
if (log == logs.end()) {
ldpp_dout(dpp, 5) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< "ERROR: No log layout for min_generation="
<< min_generation << dendl;
return -ENOENT;
}
totrim = *log;
return 0;
}
/// If there is a generation below the minimum, prepare to clean it up.
int maybe_remove_generation() {
if (clean_info)
return 0;
if (pbucket_info->layout.logs.front().gen < totrim.gen) {
clean_info = {*pbucket_info, {}};
auto log = clean_info->first.layout.logs.cbegin();
clean_info->second = *log;
if (clean_info->first.layout.logs.size() == 1) {
ldpp_dout(dpp, -1)
<< "Critical error! Attempt to remove only log generation! "
<< "log.gen=" << log->gen << ", totrim.gen=" << totrim.gen
<< dendl;
return -EIO;
}
clean_info->first.layout.logs.erase(log);
}
return 0;
}
public:
BucketTrimInstanceCR(rgw::sal::RadosStore* store, RGWHTTPManager *http,
BucketTrimObserver *observer,
const std::string& bucket_instance,
const DoutPrefixProvider *dpp)
: RGWCoroutine(store->ctx()), store(store),
http(http), observer(observer),
bucket_instance(bucket_instance),
zone_id(store->svc()->zone->get_zone().id),
dpp(dpp) {
rgw_bucket_parse_bucket_key(cct, bucket_instance, &bucket, nullptr);
source_policy = make_shared<rgw_bucket_get_sync_policy_result>();
}
int operate(const DoutPrefixProvider *dpp) override;
};
namespace {
/// populate the status with the minimum stable marker of each shard
int take_min_status(
CephContext *cct,
const uint64_t min_generation,
std::vector<BucketTrimInstanceCR::StatusShards>::const_iterator first,
std::vector<BucketTrimInstanceCR::StatusShards>::const_iterator last,
std::vector<std::string> *status) {
for (auto peer = first; peer != last; ++peer) {
// Peers on later generations don't get a say in the matter
if (peer->generation > min_generation) {
continue;
}
if (peer->shards.size() != status->size()) {
// all peers must agree on the number of shards
return -EINVAL;
}
auto m = status->begin();
for (auto& shard : peer->shards) {
auto& marker = *m++;
// always take the first marker, or any later marker that's smaller
if (peer == first || marker > shard.inc_marker.position) {
marker = std::move(shard.inc_marker.position);
}
}
}
return 0;
}
}
template<>
inline int parse_decode_json<BucketTrimInstanceCR::StatusShards>(
BucketTrimInstanceCR::StatusShards& s, bufferlist& bl)
{
JSONParser p;
if (!p.parse(bl.c_str(), bl.length())) {
return -EINVAL;
}
try {
bilog_status_v2 v;
decode_json_obj(v, &p);
s.generation = v.sync_status.incremental_gen;
s.shards = std::move(v.inc_status);
} catch (JSONDecoder::err& e) {
try {
// Fall back if we're talking to an old node that can't give v2
// output.
s.generation = 0;
decode_json_obj(s.shards, &p);
} catch (JSONDecoder::err& e) {
return -EINVAL;
}
}
return 0;
}
int BucketTrimInstanceCR::operate(const DoutPrefixProvider *dpp)
{
reenter(this) {
ldpp_dout(dpp, 4) << "starting trim on bucket=" << bucket_instance << dendl;
get_policy_params.zone = zone_id;
get_policy_params.bucket = bucket;
yield call(new RGWBucketGetSyncPolicyHandlerCR(store->svc()->rados->get_async_processor(),
store,
get_policy_params,
source_policy,
dpp));
if (retcode < 0) {
if (retcode != -ENOENT) {
ldpp_dout(dpp, 0) << "ERROR: failed to fetch policy handler for bucket=" << bucket << dendl;
}
return set_cr_error(retcode);
}
if (auto& opt_bucket_info = source_policy->policy_handler->get_bucket_info();
opt_bucket_info) {
pbucket_info = &(*opt_bucket_info);
} else {
/* this shouldn't really happen */
return set_cr_error(-ENOENT);
}
if (pbucket_info->layout.logs.empty()) {
return set_cr_done(); // no bilogs to trim
}
// query peers for sync status
set_status("fetching sync status from relevant peers");
yield {
const auto& all_dests = source_policy->policy_handler->get_all_dests();
vector<rgw_zone_id> zids;
rgw_zone_id last_zid;
for (auto& diter : all_dests) {
const auto& zid = diter.first;
if (zid == last_zid) {
continue;
}
last_zid = zid;
zids.push_back(zid);
}
peer_status.resize(zids.size());
auto& zone_conn_map = store->svc()->zone->get_zone_conn_map();
auto p = peer_status.begin();
for (auto& zid : zids) {
// query data sync status from each sync peer
rgw_http_param_pair params[] = {
{ "type", "bucket-index" },
{ "status", nullptr },
{ "options", "merge" },
{ "bucket", bucket_instance.c_str() }, /* equal to source-bucket when `options==merge` and source-bucket
param is not provided */
{ "source-zone", zone_id.c_str() },
{ "version", "2" },
{ nullptr, nullptr }
};
auto ziter = zone_conn_map.find(zid);
if (ziter == zone_conn_map.end()) {
ldpp_dout(dpp, 0) << "WARNING: no connection to zone " << zid << ", can't trim bucket: " << bucket << dendl;
return set_cr_error(-ECANCELED);
}
using StatusCR = RGWReadRESTResourceCR<StatusShards>;
spawn(new StatusCR(cct, ziter->second, http, "/admin/log/", params, &*p),
false);
++p;
}
}
// wait for a response from each peer. all must respond to attempt trim
while (num_spawned()) {
yield wait_for_child();
collect(&child_ret, nullptr);
if (child_ret < 0) {
drain_all();
return set_cr_error(child_ret);
}
}
// Determine the minimum generation
retcode = take_min_generation();
if (retcode < 0) {
ldpp_dout(dpp, 4) << "failed to find minimum generation" << dendl;
return set_cr_error(retcode);
}
retcode = maybe_remove_generation();
if (retcode < 0) {
ldpp_dout(dpp, 4) << "error removing old generation from log: "
<< cpp_strerror(retcode) << dendl;
return set_cr_error(retcode);
}
if (clean_info) {
if (clean_info->second.layout.type != rgw::BucketLogType::InIndex) {
ldpp_dout(dpp, 0) << "Unable to convert log of unknown type "
<< clean_info->second.layout.type
<< " to rgw::bucket_index_layout_generation " << dendl;
return set_cr_error(-EINVAL);
}
yield call(new BucketCleanIndexCollectCR(dpp, store, clean_info->first,
clean_info->second.layout.in_index));
if (retcode < 0) {
ldpp_dout(dpp, 0) << "failed to remove previous generation: "
<< cpp_strerror(retcode) << dendl;
return set_cr_error(retcode);
}
while (clean_info && retries < MAX_RETRIES) {
yield call(new RGWPutBucketInstanceInfoCR(
store->svc()->rados->get_async_processor(),
store, clean_info->first, false, {},
no_change_attrs(), dpp));
// Raced, try again.
if (retcode == -ECANCELED) {
yield call(new RGWGetBucketInstanceInfoCR(
store->svc()->rados->get_async_processor(),
store, clean_info->first.bucket,
&(clean_info->first), nullptr, dpp));
if (retcode < 0) {
ldpp_dout(dpp, 0) << "failed to get bucket info: "
<< cpp_strerror(retcode) << dendl;
return set_cr_error(retcode);
}
if (clean_info->first.layout.logs.front().gen ==
clean_info->second.gen) {
clean_info->first.layout.logs.erase(
clean_info->first.layout.logs.begin());
++retries;
continue;
}
// Raced, but someone else did what we needed to.
retcode = 0;
}
if (retcode < 0) {
ldpp_dout(dpp, 0) << "failed to put bucket info: "
<< cpp_strerror(retcode) << dendl;
return set_cr_error(retcode);
}
clean_info = std::nullopt;
}
} else {
if (totrim.layout.type != rgw::BucketLogType::InIndex) {
ldpp_dout(dpp, 0) << "Unable to convert log of unknown type "
<< totrim.layout.type
<< " to rgw::bucket_index_layout_generation " << dendl;
return set_cr_error(-EINVAL);
}
// To avoid hammering the OSD too hard, either trim old
// generations OR trim the current one.
// determine the minimum marker for each shard
// initialize each shard with the maximum marker, which is only used when
// there are no peers syncing from us
min_markers.assign(std::max(1u, rgw::num_shards(totrim.layout.in_index)),
RGWSyncLogTrimCR::max_marker);
retcode = take_min_status(cct, totrim.gen, peer_status.cbegin(),
peer_status.cend(), &min_markers);
if (retcode < 0) {
ldpp_dout(dpp, 4) << "failed to correlate bucket sync status from peers" << dendl;
return set_cr_error(retcode);
}
// trim shards with a ShardCollectCR
ldpp_dout(dpp, 10) << "trimming bilogs for bucket=" << pbucket_info->bucket
<< " markers=" << min_markers << ", shards=" << min_markers.size() << dendl;
set_status("trimming bilog shards");
yield call(new BucketTrimShardCollectCR(dpp, store, *pbucket_info, totrim.layout.in_index,
min_markers));
// ENODATA just means there were no keys to trim
if (retcode == -ENODATA) {
retcode = 0;
}
if (retcode < 0) {
ldpp_dout(dpp, 4) << "failed to trim bilog shards: "
<< cpp_strerror(retcode) << dendl;
return set_cr_error(retcode);
}
}
observer->on_bucket_trimmed(std::move(bucket_instance));
return set_cr_done();
}
return 0;
}
/// trim each bucket instance while limiting the number of concurrent operations
class BucketTrimInstanceCollectCR : public RGWShardCollectCR {
rgw::sal::RadosStore* const store;
RGWHTTPManager *const http;
BucketTrimObserver *const observer;
std::vector<std::string>::const_iterator bucket;
std::vector<std::string>::const_iterator end;
const DoutPrefixProvider *dpp;
int handle_result(int r) override {
if (r == -ENOENT) { // ENOENT is not a fatal error
return 0;
}
if (r < 0) {
ldout(cct, 4) << "failed to trim bucket instance: " << cpp_strerror(r) << dendl;
}
return r;
}
public:
BucketTrimInstanceCollectCR(rgw::sal::RadosStore* store, RGWHTTPManager *http,
BucketTrimObserver *observer,
const std::vector<std::string>& buckets,
int max_concurrent,
const DoutPrefixProvider *dpp)
: RGWShardCollectCR(store->ctx(), max_concurrent),
store(store), http(http), observer(observer),
bucket(buckets.begin()), end(buckets.end()),
dpp(dpp)
{}
bool spawn_next() override;
};
bool BucketTrimInstanceCollectCR::spawn_next()
{
if (bucket == end) {
return false;
}
spawn(new BucketTrimInstanceCR(store, http, observer, *bucket, dpp), false);
++bucket;
return true;
}
/// correlate the replies from each peer gateway into the given counter
int accumulate_peer_counters(bufferlist& bl, BucketChangeCounter& counter)
{
counter.clear();
try {
// decode notify responses
auto p = bl.cbegin();
std::map<std::pair<uint64_t, uint64_t>, bufferlist> replies;
std::set<std::pair<uint64_t, uint64_t>> timeouts;
decode(replies, p);
decode(timeouts, p);
for (auto& peer : replies) {
auto q = peer.second.cbegin();
TrimCounters::Response response;
decode(response, q);
for (const auto& b : response.bucket_counters) {
counter.insert(b.bucket, b.count);
}
}
} catch (const buffer::error& e) {
return -EIO;
}
return 0;
}
/// metadata callback has the signature bool(string&& key, string&& marker)
using MetadataListCallback = std::function<bool(std::string&&, std::string&&)>;
/// lists metadata keys, passing each to a callback until it returns false.
/// on reaching the end, it will restart at the beginning and list up to the
/// initial marker
class AsyncMetadataList : public RGWAsyncRadosRequest {
CephContext *const cct;
RGWMetadataManager *const mgr;
const std::string section;
const std::string start_marker;
MetadataListCallback callback;
int _send_request(const DoutPrefixProvider *dpp) override;
public:
AsyncMetadataList(CephContext *cct, RGWCoroutine *caller,
RGWAioCompletionNotifier *cn, RGWMetadataManager *mgr,
const std::string& section, const std::string& start_marker,
const MetadataListCallback& callback)
: RGWAsyncRadosRequest(caller, cn), cct(cct), mgr(mgr),
section(section), start_marker(start_marker), callback(callback)
{}
};
int AsyncMetadataList::_send_request(const DoutPrefixProvider *dpp)
{
void* handle = nullptr;
std::list<std::string> keys;
bool truncated{false};
std::string marker;
// start a listing at the given marker
int r = mgr->list_keys_init(dpp, section, start_marker, &handle);
if (r == -EINVAL) {
// restart with empty marker below
} else if (r < 0) {
ldpp_dout(dpp, 10) << "failed to init metadata listing: "
<< cpp_strerror(r) << dendl;
return r;
} else {
ldpp_dout(dpp, 20) << "starting metadata listing at " << start_marker << dendl;
// release the handle when scope exits
auto g = make_scope_guard([=, this] { mgr->list_keys_complete(handle); });
do {
// get the next key and marker
r = mgr->list_keys_next(dpp, handle, 1, keys, &truncated);
if (r < 0) {
ldpp_dout(dpp, 10) << "failed to list metadata: "
<< cpp_strerror(r) << dendl;
return r;
}
marker = mgr->get_marker(handle);
if (!keys.empty()) {
ceph_assert(keys.size() == 1);
auto& key = keys.front();
if (!callback(std::move(key), std::move(marker))) {
return 0;
}
}
} while (truncated);
if (start_marker.empty()) {
// already listed all keys
return 0;
}
}
// restart the listing from the beginning (empty marker)
handle = nullptr;
r = mgr->list_keys_init(dpp, section, "", &handle);
if (r < 0) {
ldpp_dout(dpp, 10) << "failed to restart metadata listing: "
<< cpp_strerror(r) << dendl;
return r;
}
ldpp_dout(dpp, 20) << "restarting metadata listing" << dendl;
// release the handle when scope exits
auto g = make_scope_guard([=, this] { mgr->list_keys_complete(handle); });
do {
// get the next key and marker
r = mgr->list_keys_next(dpp, handle, 1, keys, &truncated);
if (r < 0) {
ldpp_dout(dpp, 10) << "failed to list metadata: "
<< cpp_strerror(r) << dendl;
return r;
}
marker = mgr->get_marker(handle);
if (!keys.empty()) {
ceph_assert(keys.size() == 1);
auto& key = keys.front();
// stop at original marker
if (marker > start_marker) {
return 0;
}
if (!callback(std::move(key), std::move(marker))) {
return 0;
}
}
} while (truncated);
return 0;
}
/// coroutine wrapper for AsyncMetadataList
class MetadataListCR : public RGWSimpleCoroutine {
RGWAsyncRadosProcessor *const async_rados;
RGWMetadataManager *const mgr;
const std::string& section;
const std::string& start_marker;
MetadataListCallback callback;
RGWAsyncRadosRequest *req{nullptr};
public:
MetadataListCR(CephContext *cct, RGWAsyncRadosProcessor *async_rados,
RGWMetadataManager *mgr, const std::string& section,
const std::string& start_marker,
const MetadataListCallback& callback)
: RGWSimpleCoroutine(cct), async_rados(async_rados), mgr(mgr),
section(section), start_marker(start_marker), callback(callback)
{}
~MetadataListCR() override {
request_cleanup();
}
int send_request(const DoutPrefixProvider *dpp) override {
req = new AsyncMetadataList(cct, this, stack->create_completion_notifier(),
mgr, section, start_marker, callback);
async_rados->queue(req);
return 0;
}
int request_complete() override {
return req->get_ret_status();
}
void request_cleanup() override {
if (req) {
req->finish();
req = nullptr;
}
}
};
class BucketTrimCR : public RGWCoroutine {
rgw::sal::RadosStore* const store;
RGWHTTPManager *const http;
const BucketTrimConfig& config;
BucketTrimObserver *const observer;
const rgw_raw_obj& obj;
ceph::mono_time start_time;
bufferlist notify_replies;
BucketChangeCounter counter;
std::vector<std::string> buckets; //< buckets selected for trim
BucketTrimStatus status;
RGWObjVersionTracker objv; //< version tracker for trim status object
std::string last_cold_marker; //< position for next trim marker
const DoutPrefixProvider *dpp;
static const std::string section; //< metadata section for bucket instances
public:
BucketTrimCR(rgw::sal::RadosStore* store, RGWHTTPManager *http,
const BucketTrimConfig& config, BucketTrimObserver *observer,
const rgw_raw_obj& obj, const DoutPrefixProvider *dpp)
: RGWCoroutine(store->ctx()), store(store), http(http), config(config),
observer(observer), obj(obj), counter(config.counter_size), dpp(dpp)
{}
int operate(const DoutPrefixProvider *dpp) override;
};
const std::string BucketTrimCR::section{"bucket.instance"};
int BucketTrimCR::operate(const DoutPrefixProvider *dpp)
{
reenter(this) {
start_time = ceph::mono_clock::now();
if (config.buckets_per_interval) {
// query watch/notify for hot buckets
ldpp_dout(dpp, 10) << "fetching active bucket counters" << dendl;
set_status("fetching active bucket counters");
yield {
// request the top bucket counters from each peer gateway
const TrimNotifyType type = NotifyTrimCounters;
TrimCounters::Request request{32};
bufferlist bl;
encode(type, bl);
encode(request, bl);
call(new RGWRadosNotifyCR(store, obj, bl, config.notify_timeout_ms,
¬ify_replies));
}
if (retcode < 0) {
ldpp_dout(dpp, 10) << "failed to fetch peer bucket counters" << dendl;
return set_cr_error(retcode);
}
// select the hottest buckets for trim
retcode = accumulate_peer_counters(notify_replies, counter);
if (retcode < 0) {
ldout(cct, 4) << "failed to correlate peer bucket counters" << dendl;
return set_cr_error(retcode);
}
buckets.reserve(config.buckets_per_interval);
const int max_count = config.buckets_per_interval -
config.min_cold_buckets_per_interval;
counter.get_highest(max_count,
[this] (const std::string& bucket, int count) {
buckets.push_back(bucket);
});
}
if (buckets.size() < config.buckets_per_interval) {
// read BucketTrimStatus for marker position
set_status("reading trim status");
using ReadStatus = RGWSimpleRadosReadCR<BucketTrimStatus>;
yield call(new ReadStatus(dpp, store, obj, &status, true, &objv));
if (retcode < 0) {
ldpp_dout(dpp, 10) << "failed to read bilog trim status: "
<< cpp_strerror(retcode) << dendl;
return set_cr_error(retcode);
}
if (status.marker == "MAX") {
status.marker.clear(); // restart at the beginning
}
ldpp_dout(dpp, 10) << "listing cold buckets from marker="
<< status.marker << dendl;
set_status("listing cold buckets for trim");
yield {
// capture a reference so 'this' remains valid in the callback
auto ref = boost::intrusive_ptr<RGWCoroutine>{this};
// list cold buckets to consider for trim
auto cb = [this, ref] (std::string&& bucket, std::string&& marker) {
// filter out keys that we trimmed recently
if (observer->trimmed_recently(bucket)) {
return true;
}
// filter out active buckets that we've already selected
auto i = std::find(buckets.begin(), buckets.end(), bucket);
if (i != buckets.end()) {
return true;
}
buckets.emplace_back(std::move(bucket));
// remember the last cold bucket spawned to update the status marker
last_cold_marker = std::move(marker);
// return true if there's room for more
return buckets.size() < config.buckets_per_interval;
};
call(new MetadataListCR(cct, store->svc()->rados->get_async_processor(),
store->ctl()->meta.mgr,
section, status.marker, cb));
}
if (retcode < 0) {
ldout(cct, 4) << "failed to list bucket instance metadata: "
<< cpp_strerror(retcode) << dendl;
return set_cr_error(retcode);
}
}
// trim bucket instances with limited concurrency
set_status("trimming buckets");
ldpp_dout(dpp, 4) << "collected " << buckets.size() << " buckets for trim" << dendl;
yield call(new BucketTrimInstanceCollectCR(store, http, observer, buckets,
config.concurrent_buckets, dpp));
// ignore errors from individual buckets
// write updated trim status
if (!last_cold_marker.empty() && status.marker != last_cold_marker) {
set_status("writing updated trim status");
status.marker = std::move(last_cold_marker);
ldpp_dout(dpp, 20) << "writing bucket trim marker=" << status.marker << dendl;
using WriteStatus = RGWSimpleRadosWriteCR<BucketTrimStatus>;
yield call(new WriteStatus(dpp, store, obj, status, &objv));
if (retcode < 0) {
ldpp_dout(dpp, 4) << "failed to write updated trim status: "
<< cpp_strerror(retcode) << dendl;
return set_cr_error(retcode);
}
}
// notify peers that trim completed
set_status("trim completed");
yield {
const TrimNotifyType type = NotifyTrimComplete;
TrimComplete::Request request;
bufferlist bl;
encode(type, bl);
encode(request, bl);
call(new RGWRadosNotifyCR(store, obj, bl, config.notify_timeout_ms,
nullptr));
}
if (retcode < 0) {
ldout(cct, 10) << "failed to notify peers of trim completion" << dendl;
return set_cr_error(retcode);
}
ldpp_dout(dpp, 4) << "bucket index log processing completed in "
<< ceph::mono_clock::now() - start_time << dendl;
return set_cr_done();
}
return 0;
}
class BucketTrimPollCR : public RGWCoroutine {
rgw::sal::RadosStore* const store;
RGWHTTPManager *const http;
const BucketTrimConfig& config;
BucketTrimObserver *const observer;
const rgw_raw_obj& obj;
const std::string name{"trim"}; //< lock name
const std::string cookie;
const DoutPrefixProvider *dpp;
public:
BucketTrimPollCR(rgw::sal::RadosStore* store, RGWHTTPManager *http,
const BucketTrimConfig& config,
BucketTrimObserver *observer, const rgw_raw_obj& obj,
const DoutPrefixProvider *dpp)
: RGWCoroutine(store->ctx()), store(store), http(http),
config(config), observer(observer), obj(obj),
cookie(RGWSimpleRadosLockCR::gen_random_cookie(cct)),
dpp(dpp) {}
int operate(const DoutPrefixProvider *dpp) override;
};
int BucketTrimPollCR::operate(const DoutPrefixProvider *dpp)
{
reenter(this) {
for (;;) {
set_status("sleeping");
wait(utime_t{static_cast<time_t>(config.trim_interval_sec), 0});
// prevent others from trimming for our entire wait interval
set_status("acquiring trim lock");
yield call(new RGWSimpleRadosLockCR(store->svc()->rados->get_async_processor(), store,
obj, name, cookie,
config.trim_interval_sec));
if (retcode < 0) {
ldout(cct, 4) << "failed to lock: " << cpp_strerror(retcode) << dendl;
continue;
}
set_status("trimming");
yield call(new BucketTrimCR(store, http, config, observer, obj, dpp));
if (retcode < 0) {
// on errors, unlock so other gateways can try
set_status("unlocking");
yield call(new RGWSimpleRadosUnlockCR(store->svc()->rados->get_async_processor(), store,
obj, name, cookie));
}
}
}
return 0;
}
/// tracks a bounded list of events with timestamps. old events can be expired,
/// and recent events can be searched by key. expiration depends on events being
/// inserted in temporal order
template <typename T, typename Clock = ceph::coarse_mono_clock>
class RecentEventList {
public:
using clock_type = Clock;
using time_point = typename clock_type::time_point;
RecentEventList(size_t max_size, const ceph::timespan& max_duration)
: events(max_size), max_duration(max_duration)
{}
/// insert an event at the given point in time. this time must be at least as
/// recent as the last inserted event
void insert(T&& value, const time_point& now) {
// ceph_assert(events.empty() || now >= events.back().time)
events.push_back(Event{std::move(value), now});
}
/// performs a linear search for an event matching the given key, whose type
/// U can be any that provides operator==(U, T)
template <typename U>
bool lookup(const U& key) const {
for (const auto& event : events) {
if (key == event.value) {
return true;
}
}
return false;
}
/// remove events that are no longer recent compared to the given point in time
void expire_old(const time_point& now) {
const auto expired_before = now - max_duration;
while (!events.empty() && events.front().time < expired_before) {
events.pop_front();
}
}
private:
struct Event {
T value;
time_point time;
};
boost::circular_buffer<Event> events;
const ceph::timespan max_duration;
};
namespace rgw {
// read bucket trim configuration from ceph context
void configure_bucket_trim(CephContext *cct, BucketTrimConfig& config)
{
const auto& conf = cct->_conf;
config.trim_interval_sec =
conf.get_val<int64_t>("rgw_sync_log_trim_interval");
config.counter_size = 512;
config.buckets_per_interval =
conf.get_val<int64_t>("rgw_sync_log_trim_max_buckets");
config.min_cold_buckets_per_interval =
conf.get_val<int64_t>("rgw_sync_log_trim_min_cold_buckets");
config.concurrent_buckets =
conf.get_val<int64_t>("rgw_sync_log_trim_concurrent_buckets");
config.notify_timeout_ms = 10000;
config.recent_size = 128;
config.recent_duration = std::chrono::hours(2);
}
class BucketTrimManager::Impl : public TrimCounters::Server,
public BucketTrimObserver {
public:
rgw::sal::RadosStore* const store;
const BucketTrimConfig config;
const rgw_raw_obj status_obj;
/// count frequency of bucket instance entries in the data changes log
BucketChangeCounter counter;
using RecentlyTrimmedBucketList = RecentEventList<std::string>;
using clock_type = RecentlyTrimmedBucketList::clock_type;
/// track recently trimmed buckets to focus trim activity elsewhere
RecentlyTrimmedBucketList trimmed;
/// serve the bucket trim watch/notify api
BucketTrimWatcher watcher;
/// protect data shared between data sync, trim, and watch/notify threads
std::mutex mutex;
Impl(rgw::sal::RadosStore* store, const BucketTrimConfig& config)
: store(store), config(config),
status_obj(store->svc()->zone->get_zone_params().log_pool, BucketTrimStatus::oid),
counter(config.counter_size),
trimmed(config.recent_size, config.recent_duration),
watcher(store, status_obj, this)
{}
/// TrimCounters::Server interface for watch/notify api
void get_bucket_counters(int count, TrimCounters::Vector& buckets) {
buckets.reserve(count);
std::lock_guard<std::mutex> lock(mutex);
counter.get_highest(count, [&buckets] (const std::string& key, int count) {
buckets.emplace_back(key, count);
});
ldout(store->ctx(), 20) << "get_bucket_counters: " << buckets << dendl;
}
void reset_bucket_counters() override {
ldout(store->ctx(), 20) << "bucket trim completed" << dendl;
std::lock_guard<std::mutex> lock(mutex);
counter.clear();
trimmed.expire_old(clock_type::now());
}
/// BucketTrimObserver interface to remember successfully-trimmed buckets
void on_bucket_trimmed(std::string&& bucket_instance) override {
ldout(store->ctx(), 20) << "trimmed bucket instance " << bucket_instance << dendl;
std::lock_guard<std::mutex> lock(mutex);
trimmed.insert(std::move(bucket_instance), clock_type::now());
}
bool trimmed_recently(const std::string_view& bucket_instance) override {
std::lock_guard<std::mutex> lock(mutex);
return trimmed.lookup(bucket_instance);
}
};
BucketTrimManager::BucketTrimManager(rgw::sal::RadosStore* store,
const BucketTrimConfig& config)
: impl(new Impl(store, config))
{
}
BucketTrimManager::~BucketTrimManager() = default;
int BucketTrimManager::init()
{
return impl->watcher.start(this);
}
void BucketTrimManager::on_bucket_changed(const std::string_view& bucket)
{
std::lock_guard<std::mutex> lock(impl->mutex);
// filter recently trimmed bucket instances out of bucket change counter
if (impl->trimmed.lookup(bucket)) {
return;
}
impl->counter.insert(std::string(bucket));
}
RGWCoroutine* BucketTrimManager::create_bucket_trim_cr(RGWHTTPManager *http)
{
return new BucketTrimPollCR(impl->store, http, impl->config,
impl.get(), impl->status_obj, this);
}
RGWCoroutine* BucketTrimManager::create_admin_bucket_trim_cr(RGWHTTPManager *http)
{
// return the trim coroutine without any polling
return new BucketTrimCR(impl->store, http, impl->config,
impl.get(), impl->status_obj, this);
}
CephContext* BucketTrimManager::get_cct() const
{
return impl->store->ctx();
}
unsigned BucketTrimManager::get_subsys() const
{
return dout_subsys;
}
std::ostream& BucketTrimManager::gen_prefix(std::ostream& out) const
{
return out << "rgw bucket trim manager: ";
}
} // namespace rgw
int bilog_trim(const DoutPrefixProvider* p, rgw::sal::RadosStore* store,
RGWBucketInfo& bucket_info, uint64_t gen, int shard_id,
std::string_view start_marker, std::string_view end_marker)
{
auto& logs = bucket_info.layout.logs;
auto log = std::find_if(logs.begin(), logs.end(), rgw::matches_gen(gen));
if (log == logs.end()) {
ldpp_dout(p, 5) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< "ERROR: no log layout with gen=" << gen << dendl;
return -ENOENT;
}
auto log_layout = *log;
auto r = store->svc()->bilog_rados->log_trim(p, bucket_info, log_layout, shard_id, start_marker, end_marker);
if (r < 0) {
ldpp_dout(p, 5) << __PRETTY_FUNCTION__ << ":" << __LINE__
<< "ERROR: bilog_rados->log_trim returned r=" << r << dendl;
}
return r;
}
| 46,253 | 30.987552 | 118 |
cc
|
null |
ceph-main/src/rgw/driver/rados/rgw_trim_bilog.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab ft=cpp
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2017 Red Hat, Inc
*
* Author: Casey Bodley <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*/
#pragma once
#include <memory>
#include <string_view>
#include "include/common_fwd.h"
#include "include/encoding.h"
#include "common/ceph_time.h"
#include "common/dout.h"
#include "rgw_common.h"
class RGWCoroutine;
class RGWHTTPManager;
namespace rgw {
namespace sal {
class RadosStore;
}
/// Interface to inform the trim process about which buckets are most active
struct BucketChangeObserver {
virtual ~BucketChangeObserver() = default;
virtual void on_bucket_changed(const std::string_view& bucket_instance) = 0;
};
/// Configuration for BucketTrimManager
struct BucketTrimConfig {
/// time interval in seconds between bucket trim attempts
uint32_t trim_interval_sec{0};
/// maximum number of buckets to track with BucketChangeObserver
size_t counter_size{0};
/// maximum number of buckets to process each trim interval
uint32_t buckets_per_interval{0};
/// minimum number of buckets to choose from the global bucket instance list
uint32_t min_cold_buckets_per_interval{0};
/// maximum number of buckets to process in parallel
uint32_t concurrent_buckets{0};
/// timeout in ms for bucket trim notify replies
uint64_t notify_timeout_ms{0};
/// maximum number of recently trimmed buckets to remember (should be small
/// enough for a linear search)
size_t recent_size{0};
/// maximum duration to consider a trim as 'recent' (should be some multiple
/// of the trim interval, at least)
ceph::timespan recent_duration{0};
};
/// fill out the BucketTrimConfig from the ceph context
void configure_bucket_trim(CephContext *cct, BucketTrimConfig& config);
/// Determines the buckets on which to focus trim activity, using two sources of
/// input: the frequency of entries read from the data changes log, and a global
/// listing of the bucket.instance metadata. This allows us to trim active
/// buckets quickly, while also ensuring that all buckets will eventually trim
class BucketTrimManager : public BucketChangeObserver, public DoutPrefixProvider {
class Impl;
std::unique_ptr<Impl> impl;
public:
BucketTrimManager(sal::RadosStore *store, const BucketTrimConfig& config);
~BucketTrimManager();
int init();
/// increment a counter for the given bucket instance
void on_bucket_changed(const std::string_view& bucket_instance) override;
/// create a coroutine to run the bucket trim process every trim interval
RGWCoroutine* create_bucket_trim_cr(RGWHTTPManager *http);
/// create a coroutine to trim buckets directly via radosgw-admin
RGWCoroutine* create_admin_bucket_trim_cr(RGWHTTPManager *http);
CephContext *get_cct() const override;
unsigned get_subsys() const;
std::ostream& gen_prefix(std::ostream& out) const;
};
/// provides persistent storage for the trim manager's current position in the
/// list of bucket instance metadata
struct BucketTrimStatus {
std::string marker; //< metadata key of current bucket instance
void encode(bufferlist& bl) const {
ENCODE_START(1, 1, bl);
encode(marker, bl);
ENCODE_FINISH(bl);
}
void decode(bufferlist::const_iterator& p) {
DECODE_START(1, p);
decode(marker, p);
DECODE_FINISH(p);
}
static const std::string oid;
};
} // namespace rgw
WRITE_CLASS_ENCODER(rgw::BucketTrimStatus);
int bilog_trim(const DoutPrefixProvider* p, rgw::sal::RadosStore* store,
RGWBucketInfo& bucket_info, uint64_t gen, int shard_id,
std::string_view start_marker, std::string_view end_marker);
| 3,920 | 31.139344 | 82 |
h
|
null |
ceph-main/src/rgw/driver/rados/rgw_trim_datalog.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab ft=cpp
#include <vector>
#include <string>
#include "common/errno.h"
#include "rgw_trim_datalog.h"
#include "rgw_cr_rados.h"
#include "rgw_cr_rest.h"
#include "rgw_datalog.h"
#include "rgw_data_sync.h"
#include "rgw_zone.h"
#include "rgw_bucket.h"
#include "services/svc_zone.h"
#include <boost/asio/yield.hpp>
#define dout_subsys ceph_subsys_rgw
#undef dout_prefix
#define dout_prefix (*_dout << "data trim: ")
namespace {
class DatalogTrimImplCR : public RGWSimpleCoroutine {
const DoutPrefixProvider *dpp;
rgw::sal::RadosStore* store;
boost::intrusive_ptr<RGWAioCompletionNotifier> cn;
int shard;
std::string marker;
std::string* last_trim_marker;
public:
DatalogTrimImplCR(const DoutPrefixProvider *dpp, rgw::sal::RadosStore* store, int shard,
const std::string& marker, std::string* last_trim_marker)
: RGWSimpleCoroutine(store->ctx()), dpp(dpp), store(store), shard(shard),
marker(marker), last_trim_marker(last_trim_marker) {
set_description() << "Datalog trim shard=" << shard
<< " marker=" << marker;
}
int send_request(const DoutPrefixProvider *dpp) override {
set_status() << "sending request";
cn = stack->create_completion_notifier();
return store->svc()->datalog_rados->trim_entries(dpp, shard, marker,
cn->completion());
}
int request_complete() override {
int r = cn->completion()->get_return_value();
ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << "(): trim of shard=" << shard
<< " marker=" << marker << " returned r=" << r << dendl;
set_status() << "request complete; ret=" << r;
if (r != -ENODATA) {
return r;
}
// nothing left to trim, update last_trim_marker
if (*last_trim_marker < marker &&
marker != store->svc()->datalog_rados->max_marker()) {
*last_trim_marker = marker;
}
return 0;
}
};
/// return the marker that it's safe to trim up to
const std::string& get_stable_marker(const rgw_data_sync_marker& m)
{
return m.state == m.FullSync ? m.next_step_marker : m.marker;
}
/// populate the container starting with 'dest' with the minimum stable marker
/// of each shard for all of the peers in [first, last)
template <typename IterIn, typename IterOut>
void take_min_markers(IterIn first, IterIn last, IterOut dest)
{
if (first == last) {
return;
}
for (auto p = first; p != last; ++p) {
auto m = dest;
for (auto &shard : p->sync_markers) {
const auto& stable = get_stable_marker(shard.second);
if (*m > stable) {
*m = stable;
}
++m;
}
}
}
} // anonymous namespace
class DataLogTrimCR : public RGWCoroutine {
using TrimCR = DatalogTrimImplCR;
const DoutPrefixProvider *dpp;
rgw::sal::RadosStore* store;
RGWHTTPManager *http;
const int num_shards;
const std::string& zone_id; //< my zone id
std::vector<rgw_data_sync_status> peer_status; //< sync status for each peer
std::vector<std::string> min_shard_markers; //< min marker per shard
std::vector<std::string>& last_trim; //< last trimmed marker per shard
int ret{0};
public:
DataLogTrimCR(const DoutPrefixProvider *dpp, rgw::sal::RadosStore* store, RGWHTTPManager *http,
int num_shards, std::vector<std::string>& last_trim)
: RGWCoroutine(store->ctx()), dpp(dpp), store(store), http(http),
num_shards(num_shards),
zone_id(store->svc()->zone->get_zone().id),
peer_status(store->svc()->zone->get_zone_data_notify_to_map().size()),
min_shard_markers(num_shards,
std::string(store->svc()->datalog_rados->max_marker())),
last_trim(last_trim)
{}
int operate(const DoutPrefixProvider *dpp) override;
};
int DataLogTrimCR::operate(const DoutPrefixProvider *dpp)
{
reenter(this) {
ldpp_dout(dpp, 10) << "fetching sync status for zone " << zone_id << dendl;
set_status("fetching sync status");
yield {
// query data sync status from each sync peer
rgw_http_param_pair params[] = {
{ "type", "data" },
{ "status", nullptr },
{ "source-zone", zone_id.c_str() },
{ nullptr, nullptr }
};
auto p = peer_status.begin();
for (auto& c : store->svc()->zone->get_zone_data_notify_to_map()) {
ldpp_dout(dpp, 20) << "query sync status from " << c.first << dendl;
using StatusCR = RGWReadRESTResourceCR<rgw_data_sync_status>;
spawn(new StatusCR(cct, c.second, http, "/admin/log/", params, &*p),
false);
++p;
}
}
// must get a successful reply from all peers to consider trimming
ret = 0;
while (ret == 0 && num_spawned() > 0) {
yield wait_for_child();
collect_next(&ret);
}
drain_all();
if (ret < 0) {
ldpp_dout(dpp, 4) << "failed to fetch sync status from all peers" << dendl;
return set_cr_error(ret);
}
ldpp_dout(dpp, 10) << "trimming log shards" << dendl;
set_status("trimming log shards");
yield {
// determine the minimum marker for each shard
take_min_markers(peer_status.begin(), peer_status.end(),
min_shard_markers.begin());
for (int i = 0; i < num_shards; i++) {
const auto& m = min_shard_markers[i];
if (m <= last_trim[i]) {
continue;
}
ldpp_dout(dpp, 10) << "trimming log shard " << i
<< " at marker=" << m
<< " last_trim=" << last_trim[i] << dendl;
spawn(new TrimCR(dpp, store, i, m, &last_trim[i]),
true);
}
}
return set_cr_done();
}
return 0;
}
RGWCoroutine* create_admin_data_log_trim_cr(const DoutPrefixProvider *dpp, rgw::sal::RadosStore* store,
RGWHTTPManager *http,
int num_shards,
std::vector<std::string>& markers)
{
return new DataLogTrimCR(dpp, store, http, num_shards, markers);
}
class DataLogTrimPollCR : public RGWCoroutine {
const DoutPrefixProvider *dpp;
rgw::sal::RadosStore* store;
RGWHTTPManager *http;
const int num_shards;
const utime_t interval; //< polling interval
const std::string lock_oid; //< use first data log shard for lock
const std::string lock_cookie;
std::vector<std::string> last_trim; //< last trimmed marker per shard
public:
DataLogTrimPollCR(const DoutPrefixProvider *dpp, rgw::sal::RadosStore* store, RGWHTTPManager *http,
int num_shards, utime_t interval)
: RGWCoroutine(store->ctx()), dpp(dpp), store(store), http(http),
num_shards(num_shards), interval(interval),
lock_oid(store->svc()->datalog_rados->get_oid(0, 0)),
lock_cookie(RGWSimpleRadosLockCR::gen_random_cookie(cct)),
last_trim(num_shards)
{}
int operate(const DoutPrefixProvider *dpp) override;
};
int DataLogTrimPollCR::operate(const DoutPrefixProvider *dpp)
{
reenter(this) {
for (;;) {
set_status("sleeping");
wait(interval);
// request a 'data_trim' lock that covers the entire wait interval to
// prevent other gateways from attempting to trim for the duration
set_status("acquiring trim lock");
// interval is a small number and unlikely to overflow
// coverity[store_truncates_time_t:SUPPRESS]
yield call(new RGWSimpleRadosLockCR(store->svc()->rados->get_async_processor(), store,
rgw_raw_obj(store->svc()->zone->get_zone_params().log_pool, lock_oid),
"data_trim", lock_cookie,
interval.sec()));
if (retcode < 0) {
// if the lock is already held, go back to sleep and try again later
ldpp_dout(dpp, 4) << "failed to lock " << lock_oid << ", trying again in "
<< interval.sec() << "s" << dendl;
continue;
}
set_status("trimming");
yield call(new DataLogTrimCR(dpp, store, http, num_shards, last_trim));
// note that the lock is not released. this is intentional, as it avoids
// duplicating this work in other gateways
}
}
return 0;
}
RGWCoroutine* create_data_log_trim_cr(const DoutPrefixProvider *dpp, rgw::sal::RadosStore* store,
RGWHTTPManager *http,
int num_shards, utime_t interval)
{
return new DataLogTrimPollCR(dpp, store, http, num_shards, interval);
}
| 8,526 | 32.308594 | 112 |
cc
|
null |
ceph-main/src/rgw/driver/rados/rgw_trim_datalog.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab ft=cpp
#pragma once
#include <string>
#include <vector>
#include "common/dout.h"
class RGWCoroutine;
class RGWRados;
class RGWHTTPManager;
class utime_t;
namespace rgw { namespace sal {
class RadosStore;
} }
// DataLogTrimCR factory function
extern RGWCoroutine* create_data_log_trim_cr(const DoutPrefixProvider *dpp, rgw::sal::RadosStore* store,
RGWHTTPManager *http,
int num_shards, utime_t interval);
// factory function for datalog trim via radosgw-admin
RGWCoroutine* create_admin_data_log_trim_cr(const DoutPrefixProvider *dpp, rgw::sal::RadosStore* store,
RGWHTTPManager *http,
int num_shards,
std::vector<std::string>& markers);
| 965 | 32.310345 | 104 |
h
|
null |
ceph-main/src/rgw/driver/rados/rgw_trim_mdlog.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab ft=cpp
#include "common/errno.h"
#include "rgw_trim_mdlog.h"
#include "rgw_sync.h"
#include "rgw_cr_rados.h"
#include "rgw_cr_rest.h"
#include "rgw_zone.h"
#include "services/svc_zone.h"
#include "services/svc_meta.h"
#include "services/svc_mdlog.h"
#include "services/svc_cls.h"
#include <boost/asio/yield.hpp>
#define dout_subsys ceph_subsys_rgw
#undef dout_prefix
#define dout_prefix (*_dout << "meta trim: ")
/// purge all log shards for the given mdlog
class PurgeLogShardsCR : public RGWShardCollectCR {
rgw::sal::RadosStore* const store;
const RGWMetadataLog* mdlog;
const int num_shards;
rgw_raw_obj obj;
int i{0};
static constexpr int max_concurrent = 16;
int handle_result(int r) override {
if (r == -ENOENT) { // ENOENT is not a fatal error
return 0;
}
if (r < 0) {
ldout(cct, 4) << "failed to remove mdlog shard: " << cpp_strerror(r) << dendl;
}
return r;
}
public:
PurgeLogShardsCR(rgw::sal::RadosStore* store, const RGWMetadataLog* mdlog,
const rgw_pool& pool, int num_shards)
: RGWShardCollectCR(store->ctx(), max_concurrent),
store(store), mdlog(mdlog), num_shards(num_shards), obj(pool, "")
{}
bool spawn_next() override {
if (i == num_shards) {
return false;
}
mdlog->get_shard_oid(i++, obj.oid);
spawn(new RGWRadosRemoveCR(store, obj), false);
return true;
}
};
using Cursor = RGWPeriodHistory::Cursor;
/// purge mdlogs from the oldest up to (but not including) the given realm_epoch
class PurgePeriodLogsCR : public RGWCoroutine {
struct Svc {
RGWSI_Zone *zone;
RGWSI_MDLog *mdlog;
} svc;
const DoutPrefixProvider *dpp;
rgw::sal::RadosStore* const store;
RGWMetadataManager *const metadata;
RGWObjVersionTracker objv;
Cursor cursor;
epoch_t realm_epoch;
epoch_t *last_trim_epoch; //< update last trim on success
public:
PurgePeriodLogsCR(const DoutPrefixProvider *dpp, rgw::sal::RadosStore* store, epoch_t realm_epoch, epoch_t *last_trim)
: RGWCoroutine(store->ctx()), dpp(dpp), store(store), metadata(store->ctl()->meta.mgr),
realm_epoch(realm_epoch), last_trim_epoch(last_trim) {
svc.zone = store->svc()->zone;
svc.mdlog = store->svc()->mdlog;
}
int operate(const DoutPrefixProvider *dpp) override;
};
int PurgePeriodLogsCR::operate(const DoutPrefixProvider *dpp)
{
reenter(this) {
// read our current oldest log period
yield call(svc.mdlog->read_oldest_log_period_cr(dpp, &cursor, &objv));
if (retcode < 0) {
return set_cr_error(retcode);
}
ceph_assert(cursor);
ldpp_dout(dpp, 20) << "oldest log realm_epoch=" << cursor.get_epoch()
<< " period=" << cursor.get_period().get_id() << dendl;
// trim -up to- the given realm_epoch
while (cursor.get_epoch() < realm_epoch) {
ldpp_dout(dpp, 4) << "purging log shards for realm_epoch=" << cursor.get_epoch()
<< " period=" << cursor.get_period().get_id() << dendl;
yield {
const auto mdlog = svc.mdlog->get_log(cursor.get_period().get_id());
const auto& pool = svc.zone->get_zone_params().log_pool;
auto num_shards = cct->_conf->rgw_md_log_max_shards;
call(new PurgeLogShardsCR(store, mdlog, pool, num_shards));
}
if (retcode < 0) {
ldpp_dout(dpp, 1) << "failed to remove log shards: "
<< cpp_strerror(retcode) << dendl;
return set_cr_error(retcode);
}
ldpp_dout(dpp, 10) << "removed log shards for realm_epoch=" << cursor.get_epoch()
<< " period=" << cursor.get_period().get_id() << dendl;
// update our mdlog history
yield call(svc.mdlog->trim_log_period_cr(dpp, cursor, &objv));
if (retcode == -ENOENT) {
// must have raced to update mdlog history. return success and allow the
// winner to continue purging
ldpp_dout(dpp, 10) << "already removed log shards for realm_epoch=" << cursor.get_epoch()
<< " period=" << cursor.get_period().get_id() << dendl;
return set_cr_done();
} else if (retcode < 0) {
ldpp_dout(dpp, 1) << "failed to remove log shards for realm_epoch="
<< cursor.get_epoch() << " period=" << cursor.get_period().get_id()
<< " with: " << cpp_strerror(retcode) << dendl;
return set_cr_error(retcode);
}
if (*last_trim_epoch < cursor.get_epoch()) {
*last_trim_epoch = cursor.get_epoch();
}
ceph_assert(cursor.has_next()); // get_current() should always come after
cursor.next();
}
return set_cr_done();
}
return 0;
}
namespace {
using connection_map = std::map<std::string, std::unique_ptr<RGWRESTConn>>;
/// construct a RGWRESTConn for each zone in the realm
template <typename Zonegroups>
connection_map make_peer_connections(rgw::sal::RadosStore* store,
const Zonegroups& zonegroups)
{
connection_map connections;
for (auto& g : zonegroups) {
for (auto& z : g.second.zones) {
std::unique_ptr<RGWRESTConn> conn{
new RGWRESTConn(store->ctx(), store, z.first.id, z.second.endpoints, g.second.api_name)};
connections.emplace(z.first.id, std::move(conn));
}
}
return connections;
}
/// return the marker that it's safe to trim up to
const std::string& get_stable_marker(const rgw_meta_sync_marker& m)
{
return m.state == m.FullSync ? m.next_step_marker : m.marker;
}
/// comparison operator for take_min_status()
bool operator<(const rgw_meta_sync_marker& lhs, const rgw_meta_sync_marker& rhs)
{
// sort by stable marker
return get_stable_marker(lhs) < get_stable_marker(rhs);
}
/// populate the status with the minimum stable marker of each shard for any
/// peer whose realm_epoch matches the minimum realm_epoch in the input
template <typename Iter>
int take_min_status(CephContext *cct, Iter first, Iter last,
rgw_meta_sync_status *status)
{
if (first == last) {
return -EINVAL;
}
const size_t num_shards = cct->_conf->rgw_md_log_max_shards;
status->sync_info.realm_epoch = std::numeric_limits<epoch_t>::max();
for (auto p = first; p != last; ++p) {
// validate peer's shard count
if (p->sync_markers.size() != num_shards) {
ldout(cct, 1) << "take_min_status got peer status with "
<< p->sync_markers.size() << " shards, expected "
<< num_shards << dendl;
return -EINVAL;
}
if (p->sync_info.realm_epoch < status->sync_info.realm_epoch) {
// earlier epoch, take its entire status
*status = std::move(*p);
} else if (p->sync_info.realm_epoch == status->sync_info.realm_epoch) {
// same epoch, take any earlier markers
auto m = status->sync_markers.begin();
for (auto& shard : p->sync_markers) {
if (shard.second < m->second) {
m->second = std::move(shard.second);
}
++m;
}
}
}
return 0;
}
struct TrimEnv {
const DoutPrefixProvider *dpp;
rgw::sal::RadosStore* const store;
RGWHTTPManager *const http;
int num_shards;
const rgw_zone_id& zone;
Cursor current; //< cursor to current period
epoch_t last_trim_epoch{0}; //< epoch of last mdlog that was purged
TrimEnv(const DoutPrefixProvider *dpp, rgw::sal::RadosStore* store, RGWHTTPManager *http, int num_shards)
: dpp(dpp), store(store), http(http), num_shards(num_shards),
zone(store->svc()->zone->zone_id()),
current(store->svc()->mdlog->get_period_history()->get_current())
{}
};
struct MasterTrimEnv : public TrimEnv {
connection_map connections; //< peer connections
std::vector<rgw_meta_sync_status> peer_status; //< sync status for each peer
/// last trim marker for each shard, only applies to current period's mdlog
std::vector<std::string> last_trim_markers;
MasterTrimEnv(const DoutPrefixProvider *dpp, rgw::sal::RadosStore* store, RGWHTTPManager *http, int num_shards)
: TrimEnv(dpp, store, http, num_shards),
last_trim_markers(num_shards)
{
auto& period = current.get_period();
connections = make_peer_connections(store, period.get_map().zonegroups);
connections.erase(zone.id);
peer_status.resize(connections.size());
}
};
struct PeerTrimEnv : public TrimEnv {
/// last trim timestamp for each shard, only applies to current period's mdlog
std::vector<ceph::real_time> last_trim_timestamps;
PeerTrimEnv(const DoutPrefixProvider *dpp, rgw::sal::RadosStore* store, RGWHTTPManager *http, int num_shards)
: TrimEnv(dpp, store, http, num_shards),
last_trim_timestamps(num_shards)
{}
void set_num_shards(int num_shards) {
this->num_shards = num_shards;
last_trim_timestamps.resize(num_shards);
}
};
} // anonymous namespace
/// spawn a trim cr for each shard that needs it, while limiting the number
/// of concurrent shards
class MetaMasterTrimShardCollectCR : public RGWShardCollectCR {
private:
static constexpr int MAX_CONCURRENT_SHARDS = 16;
MasterTrimEnv& env;
RGWMetadataLog *mdlog;
int shard_id{0};
std::string oid;
const rgw_meta_sync_status& sync_status;
int handle_result(int r) override {
if (r == -ENOENT) { // ENOENT is not a fatal error
return 0;
}
if (r < 0) {
ldout(cct, 4) << "failed to trim mdlog shard: " << cpp_strerror(r) << dendl;
}
return r;
}
public:
MetaMasterTrimShardCollectCR(MasterTrimEnv& env, RGWMetadataLog *mdlog,
const rgw_meta_sync_status& sync_status)
: RGWShardCollectCR(env.store->ctx(), MAX_CONCURRENT_SHARDS),
env(env), mdlog(mdlog), sync_status(sync_status)
{}
bool spawn_next() override;
};
bool MetaMasterTrimShardCollectCR::spawn_next()
{
while (shard_id < env.num_shards) {
auto m = sync_status.sync_markers.find(shard_id);
if (m == sync_status.sync_markers.end()) {
shard_id++;
continue;
}
auto& stable = get_stable_marker(m->second);
auto& last_trim = env.last_trim_markers[shard_id];
if (stable <= last_trim) {
// already trimmed
ldpp_dout(env.dpp, 20) << "skipping log shard " << shard_id
<< " at marker=" << stable
<< " last_trim=" << last_trim
<< " realm_epoch=" << sync_status.sync_info.realm_epoch << dendl;
shard_id++;
continue;
}
mdlog->get_shard_oid(shard_id, oid);
ldpp_dout(env.dpp, 10) << "trimming log shard " << shard_id
<< " at marker=" << stable
<< " last_trim=" << last_trim
<< " realm_epoch=" << sync_status.sync_info.realm_epoch << dendl;
spawn(new RGWSyncLogTrimCR(env.dpp, env.store, oid, stable, &last_trim), false);
shard_id++;
return true;
}
return false;
}
/// spawn rest requests to read each peer's sync status
class MetaMasterStatusCollectCR : public RGWShardCollectCR {
static constexpr int MAX_CONCURRENT_SHARDS = 16;
MasterTrimEnv& env;
connection_map::iterator c;
std::vector<rgw_meta_sync_status>::iterator s;
int handle_result(int r) override {
if (r == -ENOENT) { // ENOENT is not a fatal error
return 0;
}
if (r < 0) {
ldout(cct, 4) << "failed to fetch metadata sync status: "
<< cpp_strerror(r) << dendl;
}
return r;
}
public:
explicit MetaMasterStatusCollectCR(MasterTrimEnv& env)
: RGWShardCollectCR(env.store->ctx(), MAX_CONCURRENT_SHARDS),
env(env), c(env.connections.begin()), s(env.peer_status.begin())
{}
bool spawn_next() override {
if (c == env.connections.end()) {
return false;
}
static rgw_http_param_pair params[] = {
{ "type", "metadata" },
{ "status", nullptr },
{ nullptr, nullptr }
};
ldout(cct, 20) << "query sync status from " << c->first << dendl;
auto conn = c->second.get();
using StatusCR = RGWReadRESTResourceCR<rgw_meta_sync_status>;
spawn(new StatusCR(cct, conn, env.http, "/admin/log/", params, &*s),
false);
++c;
++s;
return true;
}
};
class MetaMasterTrimCR : public RGWCoroutine {
MasterTrimEnv& env;
rgw_meta_sync_status min_status; //< minimum sync status of all peers
int ret{0};
public:
explicit MetaMasterTrimCR(MasterTrimEnv& env)
: RGWCoroutine(env.store->ctx()), env(env)
{}
int operate(const DoutPrefixProvider *dpp) override;
};
int MetaMasterTrimCR::operate(const DoutPrefixProvider *dpp)
{
reenter(this) {
// TODO: detect this and fail before we spawn the trim thread?
if (env.connections.empty()) {
ldpp_dout(dpp, 4) << "no peers, exiting" << dendl;
return set_cr_done();
}
ldpp_dout(dpp, 10) << "fetching sync status for zone " << env.zone << dendl;
// query mdlog sync status from peers
yield call(new MetaMasterStatusCollectCR(env));
// must get a successful reply from all peers to consider trimming
if (ret < 0) {
ldpp_dout(dpp, 4) << "failed to fetch sync status from all peers" << dendl;
return set_cr_error(ret);
}
// determine the minimum epoch and markers
ret = take_min_status(env.store->ctx(), env.peer_status.begin(),
env.peer_status.end(), &min_status);
if (ret < 0) {
ldpp_dout(dpp, 4) << "failed to calculate min sync status from peers" << dendl;
return set_cr_error(ret);
}
yield {
auto store = env.store;
auto epoch = min_status.sync_info.realm_epoch;
ldpp_dout(dpp, 4) << "realm epoch min=" << epoch
<< " current=" << env.current.get_epoch()<< dendl;
if (epoch > env.last_trim_epoch + 1) {
// delete any prior mdlog periods
spawn(new PurgePeriodLogsCR(dpp, store, epoch, &env.last_trim_epoch), true);
} else {
ldpp_dout(dpp, 10) << "mdlogs already purged up to realm_epoch "
<< env.last_trim_epoch << dendl;
}
// if realm_epoch == current, trim mdlog based on markers
if (epoch == env.current.get_epoch()) {
auto mdlog = store->svc()->mdlog->get_log(env.current.get_period().get_id());
spawn(new MetaMasterTrimShardCollectCR(env, mdlog, min_status), true);
}
}
// ignore any errors during purge/trim because we want to hold the lock open
return set_cr_done();
}
return 0;
}
/// read the first entry of the master's mdlog shard and trim to that position
class MetaPeerTrimShardCR : public RGWCoroutine {
RGWMetaSyncEnv& env;
RGWMetadataLog *mdlog;
const std::string& period_id;
const int shard_id;
RGWMetadataLogInfo info;
ceph::real_time stable; //< safe timestamp to trim, according to master
ceph::real_time *last_trim; //< last trimmed timestamp, updated on trim
rgw_mdlog_shard_data result; //< result from master's mdlog listing
public:
MetaPeerTrimShardCR(RGWMetaSyncEnv& env, RGWMetadataLog *mdlog,
const std::string& period_id, int shard_id,
ceph::real_time *last_trim)
: RGWCoroutine(env.store->ctx()), env(env), mdlog(mdlog),
period_id(period_id), shard_id(shard_id), last_trim(last_trim)
{}
int operate(const DoutPrefixProvider *dpp) override;
};
int MetaPeerTrimShardCR::operate(const DoutPrefixProvider *dpp)
{
reenter(this) {
// query master's first mdlog entry for this shard
yield call(create_list_remote_mdlog_shard_cr(&env, period_id, shard_id,
"", 1, &result));
if (retcode < 0) {
ldpp_dout(dpp, 5) << "failed to read first entry from master's mdlog shard "
<< shard_id << " for period " << period_id
<< ": " << cpp_strerror(retcode) << dendl;
return set_cr_error(retcode);
}
if (result.entries.empty()) {
// if there are no mdlog entries, we don't have a timestamp to compare. we
// can't just trim everything, because there could be racing updates since
// this empty reply. query the mdlog shard info to read its max timestamp,
// then retry the listing to make sure it's still empty before trimming to
// that
ldpp_dout(dpp, 10) << "empty master mdlog shard " << shard_id
<< ", reading last timestamp from shard info" << dendl;
// read the mdlog shard info for the last timestamp
yield call(create_read_remote_mdlog_shard_info_cr(&env, period_id, shard_id, &info));
if (retcode < 0) {
ldpp_dout(dpp, 5) << "failed to read info from master's mdlog shard "
<< shard_id << " for period " << period_id
<< ": " << cpp_strerror(retcode) << dendl;
return set_cr_error(retcode);
}
if (ceph::real_clock::is_zero(info.last_update)) {
return set_cr_done(); // nothing to trim
}
ldpp_dout(dpp, 10) << "got mdlog shard info with last update="
<< info.last_update << dendl;
// re-read the master's first mdlog entry to make sure it hasn't changed
yield call(create_list_remote_mdlog_shard_cr(&env, period_id, shard_id,
"", 1, &result));
if (retcode < 0) {
ldpp_dout(dpp, 5) << "failed to read first entry from master's mdlog shard "
<< shard_id << " for period " << period_id
<< ": " << cpp_strerror(retcode) << dendl;
return set_cr_error(retcode);
}
// if the mdlog is still empty, trim to max marker
if (result.entries.empty()) {
stable = info.last_update;
} else {
stable = result.entries.front().timestamp;
// can only trim -up to- master's first timestamp, so subtract a second.
// (this is why we use timestamps instead of markers for the peers)
stable -= std::chrono::seconds(1);
}
} else {
stable = result.entries.front().timestamp;
stable -= std::chrono::seconds(1);
}
if (stable <= *last_trim) {
ldpp_dout(dpp, 10) << "skipping log shard " << shard_id
<< " at timestamp=" << stable
<< " last_trim=" << *last_trim << dendl;
return set_cr_done();
}
ldpp_dout(dpp, 10) << "trimming log shard " << shard_id
<< " at timestamp=" << stable
<< " last_trim=" << *last_trim << dendl;
yield {
std::string oid;
mdlog->get_shard_oid(shard_id, oid);
call(new RGWRadosTimelogTrimCR(dpp, env.store, oid, real_time{}, stable, "", ""));
}
if (retcode < 0 && retcode != -ENODATA) {
ldpp_dout(dpp, 1) << "failed to trim mdlog shard " << shard_id
<< ": " << cpp_strerror(retcode) << dendl;
return set_cr_error(retcode);
}
*last_trim = stable;
return set_cr_done();
}
return 0;
}
class MetaPeerTrimShardCollectCR : public RGWShardCollectCR {
static constexpr int MAX_CONCURRENT_SHARDS = 16;
PeerTrimEnv& env;
RGWMetadataLog *mdlog;
const std::string& period_id;
RGWMetaSyncEnv meta_env; //< for RGWListRemoteMDLogShardCR
int shard_id{0};
int handle_result(int r) override {
if (r == -ENOENT) { // ENOENT is not a fatal error
return 0;
}
if (r < 0) {
ldout(cct, 4) << "failed to trim mdlog shard: " << cpp_strerror(r) << dendl;
}
return r;
}
public:
MetaPeerTrimShardCollectCR(PeerTrimEnv& env, RGWMetadataLog *mdlog)
: RGWShardCollectCR(env.store->ctx(), MAX_CONCURRENT_SHARDS),
env(env), mdlog(mdlog), period_id(env.current.get_period().get_id())
{
meta_env.init(env.dpp, cct, env.store, env.store->svc()->zone->get_master_conn(),
env.store->svc()->rados->get_async_processor(), env.http, nullptr,
env.store->getRados()->get_sync_tracer());
}
bool spawn_next() override;
};
bool MetaPeerTrimShardCollectCR::spawn_next()
{
if (shard_id >= env.num_shards) {
return false;
}
auto& last_trim = env.last_trim_timestamps[shard_id];
spawn(new MetaPeerTrimShardCR(meta_env, mdlog, period_id, shard_id, &last_trim),
false);
shard_id++;
return true;
}
class MetaPeerTrimCR : public RGWCoroutine {
PeerTrimEnv& env;
rgw_mdlog_info mdlog_info; //< master's mdlog info
public:
explicit MetaPeerTrimCR(PeerTrimEnv& env) : RGWCoroutine(env.store->ctx()), env(env) {}
int operate(const DoutPrefixProvider *dpp) override;
};
int MetaPeerTrimCR::operate(const DoutPrefixProvider *dpp)
{
reenter(this) {
ldpp_dout(dpp, 10) << "fetching master mdlog info" << dendl;
yield {
// query mdlog_info from master for oldest_log_period
rgw_http_param_pair params[] = {
{ "type", "metadata" },
{ nullptr, nullptr }
};
using LogInfoCR = RGWReadRESTResourceCR<rgw_mdlog_info>;
call(new LogInfoCR(cct, env.store->svc()->zone->get_master_conn(), env.http,
"/admin/log/", params, &mdlog_info));
}
if (retcode < 0) {
ldpp_dout(dpp, 4) << "failed to read mdlog info from master" << dendl;
return set_cr_error(retcode);
}
// use master's shard count instead
env.set_num_shards(mdlog_info.num_shards);
if (mdlog_info.realm_epoch > env.last_trim_epoch + 1) {
// delete any prior mdlog periods
yield call(new PurgePeriodLogsCR(dpp, env.store, mdlog_info.realm_epoch,
&env.last_trim_epoch));
} else {
ldpp_dout(dpp, 10) << "mdlogs already purged through realm_epoch "
<< env.last_trim_epoch << dendl;
}
// if realm_epoch == current, trim mdlog based on master's markers
if (mdlog_info.realm_epoch == env.current.get_epoch()) {
yield {
auto mdlog = env.store->svc()->mdlog->get_log(env.current.get_period().get_id());
call(new MetaPeerTrimShardCollectCR(env, mdlog));
// ignore any errors during purge/trim because we want to hold the lock open
}
}
return set_cr_done();
}
return 0;
}
class MetaTrimPollCR : public RGWCoroutine {
rgw::sal::RadosStore* const store;
const utime_t interval; //< polling interval
const rgw_raw_obj obj;
const std::string name{"meta_trim"}; //< lock name
const std::string cookie;
protected:
/// allocate the coroutine to run within the lease
virtual RGWCoroutine* alloc_cr() = 0;
public:
MetaTrimPollCR(rgw::sal::RadosStore* store, utime_t interval)
: RGWCoroutine(store->ctx()), store(store), interval(interval),
obj(store->svc()->zone->get_zone_params().log_pool, RGWMetadataLogHistory::oid),
cookie(RGWSimpleRadosLockCR::gen_random_cookie(cct))
{}
int operate(const DoutPrefixProvider *dpp) override;
};
int MetaTrimPollCR::operate(const DoutPrefixProvider *dpp)
{
reenter(this) {
for (;;) {
set_status("sleeping");
wait(interval);
// prevent others from trimming for our entire wait interval
set_status("acquiring trim lock");
// interval is a small number and unlikely to overflow
// coverity[store_truncates_time_t:SUPPRESS]
yield call(new RGWSimpleRadosLockCR(store->svc()->rados->get_async_processor(), store,
obj, name, cookie, interval.sec()));
if (retcode < 0) {
ldout(cct, 4) << "failed to lock: " << cpp_strerror(retcode) << dendl;
continue;
}
set_status("trimming");
yield call(alloc_cr());
if (retcode < 0) {
// on errors, unlock so other gateways can try
set_status("unlocking");
yield call(new RGWSimpleRadosUnlockCR(store->svc()->rados->get_async_processor(), store,
obj, name, cookie));
}
}
}
return 0;
}
class MetaMasterTrimPollCR : public MetaTrimPollCR {
MasterTrimEnv env; //< trim state to share between calls
RGWCoroutine* alloc_cr() override {
return new MetaMasterTrimCR(env);
}
public:
MetaMasterTrimPollCR(const DoutPrefixProvider *dpp, rgw::sal::RadosStore* store, RGWHTTPManager *http,
int num_shards, utime_t interval)
: MetaTrimPollCR(store, interval),
env(dpp, store, http, num_shards)
{}
};
class MetaPeerTrimPollCR : public MetaTrimPollCR {
PeerTrimEnv env; //< trim state to share between calls
RGWCoroutine* alloc_cr() override {
return new MetaPeerTrimCR(env);
}
public:
MetaPeerTrimPollCR(const DoutPrefixProvider *dpp, rgw::sal::RadosStore* store, RGWHTTPManager *http,
int num_shards, utime_t interval)
: MetaTrimPollCR(store, interval),
env(dpp, store, http, num_shards)
{}
};
namespace {
bool sanity_check_endpoints(const DoutPrefixProvider *dpp, rgw::sal::RadosStore* store) {
bool retval = true;
auto current = store->svc()->mdlog->get_period_history()->get_current();
const auto& period = current.get_period();
for (const auto& [_, zonegroup] : period.get_map().zonegroups) {
if (zonegroup.endpoints.empty()) {
ldpp_dout(dpp, -1)
<< __PRETTY_FUNCTION__ << ":" << __LINE__
<< " WARNING: Cluster is is misconfigured! "
<< " Zonegroup " << zonegroup.get_name()
<< " (" << zonegroup.get_id() << ") in Realm "
<< period.get_realm_name() << " ( " << period.get_realm() << ") "
<< " has no endpoints!" << dendl;
}
for (const auto& [_, zone] : zonegroup.zones) {
if (zone.endpoints.empty()) {
ldpp_dout(dpp, -1)
<< __PRETTY_FUNCTION__ << ":" << __LINE__
<< " ERROR: Cluster is is misconfigured! "
<< " Zone " << zone.name << " (" << zone.id << ") in Zonegroup "
<< zonegroup.get_name() << " ( " << zonegroup.get_id()
<< ") in Realm " << period.get_realm_name()
<< " ( " << period.get_realm() << ") "
<< " has no endpoints! Trimming is impossible." << dendl;
retval = false;
}
}
}
return retval;
}
}
RGWCoroutine* create_meta_log_trim_cr(const DoutPrefixProvider *dpp, rgw::sal::RadosStore* store, RGWHTTPManager *http,
int num_shards, utime_t interval)
{
if (!sanity_check_endpoints(dpp, store)) {
ldpp_dout(dpp, -1)
<< __PRETTY_FUNCTION__ << ":" << __LINE__
<< " ERROR: Cluster is is misconfigured! Refusing to trim." << dendl;
return nullptr;
}
if (store->svc()->zone->is_meta_master()) {
return new MetaMasterTrimPollCR(dpp, store, http, num_shards, interval);
}
return new MetaPeerTrimPollCR(dpp, store, http, num_shards, interval);
}
struct MetaMasterAdminTrimCR : private MasterTrimEnv, public MetaMasterTrimCR {
MetaMasterAdminTrimCR(const DoutPrefixProvider *dpp, rgw::sal::RadosStore* store, RGWHTTPManager *http, int num_shards)
: MasterTrimEnv(dpp, store, http, num_shards),
MetaMasterTrimCR(*static_cast<MasterTrimEnv*>(this))
{}
};
struct MetaPeerAdminTrimCR : private PeerTrimEnv, public MetaPeerTrimCR {
MetaPeerAdminTrimCR(const DoutPrefixProvider *dpp, rgw::sal::RadosStore* store, RGWHTTPManager *http, int num_shards)
: PeerTrimEnv(dpp, store, http, num_shards),
MetaPeerTrimCR(*static_cast<PeerTrimEnv*>(this))
{}
};
RGWCoroutine* create_admin_meta_log_trim_cr(const DoutPrefixProvider *dpp, rgw::sal::RadosStore* store,
RGWHTTPManager *http,
int num_shards)
{
if (!sanity_check_endpoints(dpp, store)) {
ldpp_dout(dpp, -1)
<< __PRETTY_FUNCTION__ << ":" << __LINE__
<< " ERROR: Cluster is is misconfigured! Refusing to trim." << dendl;
return nullptr;
}
if (store->svc()->zone->is_meta_master()) {
return new MetaMasterAdminTrimCR(dpp, store, http, num_shards);
}
return new MetaPeerAdminTrimCR(dpp, store, http, num_shards);
}
| 27,534 | 33.461827 | 121 |
cc
|
null |
ceph-main/src/rgw/driver/rados/rgw_trim_mdlog.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab ft=cpp
#pragma once
class RGWCoroutine;
class DoutPrefixProvider;
class RGWRados;
class RGWHTTPManager;
class utime_t;
namespace rgw { namespace sal {
class RadosStore;
} }
// MetaLogTrimCR factory function
RGWCoroutine* create_meta_log_trim_cr(const DoutPrefixProvider *dpp,
rgw::sal::RadosStore* store,
RGWHTTPManager *http,
int num_shards, utime_t interval);
// factory function for mdlog trim via radosgw-admin
RGWCoroutine* create_admin_meta_log_trim_cr(const DoutPrefixProvider *dpp,
rgw::sal::RadosStore* store,
RGWHTTPManager *http,
int num_shards);
| 908 | 33.961538 | 74 |
h
|
null |
ceph-main/src/rgw/driver/rados/rgw_user.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab ft=cpp
#include "common/errno.h"
#include "rgw_user.h"
#include "rgw_bucket.h"
#include "rgw_quota.h"
#include "services/svc_user.h"
#include "services/svc_meta.h"
#define dout_subsys ceph_subsys_rgw
using namespace std;
extern void op_type_to_str(uint32_t mask, char *buf, int len);
static string key_type_to_str(int key_type) {
switch (key_type) {
case KEY_TYPE_SWIFT:
return "swift";
break;
default:
return "s3";
break;
}
}
static bool char_is_unreserved_url(char c)
{
if (isalnum(c))
return true;
switch (c) {
case '-':
case '.':
case '_':
case '~':
return true;
default:
return false;
}
}
static bool validate_access_key(string& key)
{
const char *p = key.c_str();
while (*p) {
if (!char_is_unreserved_url(*p))
return false;
p++;
}
return true;
}
static void set_err_msg(std::string *sink, std::string msg)
{
if (sink && !msg.empty())
*sink = msg;
}
/*
* Dump either the full user info or a subset to a formatter.
*
* NOTE: It is the caller's responsibility to ensure that the
* formatter is flushed at the correct time.
*/
static void dump_subusers_info(Formatter *f, RGWUserInfo &info)
{
map<string, RGWSubUser>::iterator uiter;
f->open_array_section("subusers");
for (uiter = info.subusers.begin(); uiter != info.subusers.end(); ++uiter) {
RGWSubUser& u = uiter->second;
f->open_object_section("user");
string s;
info.user_id.to_str(s);
f->dump_format("id", "%s:%s", s.c_str(), u.name.c_str());
char buf[256];
rgw_perm_to_str(u.perm_mask, buf, sizeof(buf));
f->dump_string("permissions", buf);
f->close_section();
}
f->close_section();
}
static void dump_access_keys_info(Formatter *f, RGWUserInfo &info)
{
map<string, RGWAccessKey>::iterator kiter;
f->open_array_section("keys");
for (kiter = info.access_keys.begin(); kiter != info.access_keys.end(); ++kiter) {
RGWAccessKey& k = kiter->second;
const char *sep = (k.subuser.empty() ? "" : ":");
const char *subuser = (k.subuser.empty() ? "" : k.subuser.c_str());
f->open_object_section("key");
string s;
info.user_id.to_str(s);
f->dump_format("user", "%s%s%s", s.c_str(), sep, subuser);
f->dump_string("access_key", k.id);
f->dump_string("secret_key", k.key);
f->close_section();
}
f->close_section();
}
static void dump_swift_keys_info(Formatter *f, RGWUserInfo &info)
{
map<string, RGWAccessKey>::iterator kiter;
f->open_array_section("swift_keys");
for (kiter = info.swift_keys.begin(); kiter != info.swift_keys.end(); ++kiter) {
RGWAccessKey& k = kiter->second;
const char *sep = (k.subuser.empty() ? "" : ":");
const char *subuser = (k.subuser.empty() ? "" : k.subuser.c_str());
f->open_object_section("key");
string s;
info.user_id.to_str(s);
f->dump_format("user", "%s%s%s", s.c_str(), sep, subuser);
f->dump_string("secret_key", k.key);
f->close_section();
}
f->close_section();
}
static void dump_user_info(Formatter *f, RGWUserInfo &info,
RGWStorageStats *stats = NULL)
{
f->open_object_section("user_info");
encode_json("tenant", info.user_id.tenant, f);
encode_json("user_id", info.user_id.id, f);
encode_json("display_name", info.display_name, f);
encode_json("email", info.user_email, f);
encode_json("suspended", (int)info.suspended, f);
encode_json("max_buckets", (int)info.max_buckets, f);
dump_subusers_info(f, info);
dump_access_keys_info(f, info);
dump_swift_keys_info(f, info);
encode_json("caps", info.caps, f);
char buf[256];
op_type_to_str(info.op_mask, buf, sizeof(buf));
encode_json("op_mask", (const char *)buf, f);
encode_json("system", (bool)info.system, f);
encode_json("admin", (bool)info.admin, f);
encode_json("default_placement", info.default_placement.name, f);
encode_json("default_storage_class", info.default_placement.storage_class, f);
encode_json("placement_tags", info.placement_tags, f);
encode_json("bucket_quota", info.quota.bucket_quota, f);
encode_json("user_quota", info.quota.user_quota, f);
encode_json("temp_url_keys", info.temp_url_keys, f);
string user_source_type;
switch ((RGWIdentityType)info.type) {
case TYPE_RGW:
user_source_type = "rgw";
break;
case TYPE_KEYSTONE:
user_source_type = "keystone";
break;
case TYPE_LDAP:
user_source_type = "ldap";
break;
case TYPE_NONE:
user_source_type = "none";
break;
default:
user_source_type = "none";
break;
}
encode_json("type", user_source_type, f);
encode_json("mfa_ids", info.mfa_ids, f);
if (stats) {
encode_json("stats", *stats, f);
}
f->close_section();
}
static int user_add_helper(RGWUserAdminOpState& op_state, std::string *err_msg)
{
int ret = 0;
const rgw_user& uid = op_state.get_user_id();
std::string user_email = op_state.get_user_email();
std::string display_name = op_state.get_display_name();
// fail if the user exists already
if (op_state.has_existing_user()) {
if (op_state.found_by_email) {
set_err_msg(err_msg, "email: " + user_email +
" is the email address of an existing user");
ret = -ERR_EMAIL_EXIST;
} else if (op_state.found_by_key) {
set_err_msg(err_msg, "duplicate key provided");
ret = -ERR_KEY_EXIST;
} else {
set_err_msg(err_msg, "user: " + uid.to_str() + " exists");
ret = -EEXIST;
}
return ret;
}
// fail if the user_info has already been populated
if (op_state.is_populated()) {
set_err_msg(err_msg, "cannot overwrite already populated user");
return -EEXIST;
}
// fail if the display name was not included
if (display_name.empty()) {
set_err_msg(err_msg, "no display name specified");
return -EINVAL;
}
return ret;
}
RGWAccessKeyPool::RGWAccessKeyPool(RGWUser* usr)
{
if (!usr) {
return;
}
user = usr;
driver = user->get_driver();
}
int RGWAccessKeyPool::init(RGWUserAdminOpState& op_state)
{
if (!op_state.is_initialized()) {
keys_allowed = false;
return -EINVAL;
}
const rgw_user& uid = op_state.get_user_id();
if (uid.compare(RGW_USER_ANON_ID) == 0) {
keys_allowed = false;
return -EINVAL;
}
swift_keys = op_state.get_swift_keys();
access_keys = op_state.get_access_keys();
keys_allowed = true;
return 0;
}
RGWUserAdminOpState::RGWUserAdminOpState(rgw::sal::Driver* driver)
{
user = driver->get_user(rgw_user(RGW_USER_ANON_ID));
}
void RGWUserAdminOpState::set_user_id(const rgw_user& id)
{
if (id.empty())
return;
user->get_info().user_id = id;
}
void RGWUserAdminOpState::set_subuser(std::string& _subuser)
{
if (_subuser.empty())
return;
size_t pos = _subuser.find(":");
if (pos != string::npos) {
rgw_user tmp_id;
tmp_id.from_str(_subuser.substr(0, pos));
if (tmp_id.tenant.empty()) {
user->get_info().user_id.id = tmp_id.id;
} else {
user->get_info().user_id = tmp_id;
}
subuser = _subuser.substr(pos+1);
} else {
subuser = _subuser;
}
subuser_specified = true;
}
void RGWUserAdminOpState::set_user_info(RGWUserInfo& user_info)
{
user->get_info() = user_info;
}
void RGWUserAdminOpState::set_user_version_tracker(RGWObjVersionTracker& objv_tracker)
{
user->get_version_tracker() = objv_tracker;
}
const rgw_user& RGWUserAdminOpState::get_user_id()
{
return user->get_id();
}
RGWUserInfo& RGWUserAdminOpState::get_user_info()
{
return user->get_info();
}
map<std::string, RGWAccessKey>* RGWUserAdminOpState::get_swift_keys()
{
return &user->get_info().swift_keys;
}
map<std::string, RGWAccessKey>* RGWUserAdminOpState::get_access_keys()
{
return &user->get_info().access_keys;
}
map<std::string, RGWSubUser>* RGWUserAdminOpState::get_subusers()
{
return &user->get_info().subusers;
}
RGWUserCaps *RGWUserAdminOpState::get_caps_obj()
{
return &user->get_info().caps;
}
std::string RGWUserAdminOpState::build_default_swift_kid()
{
if (user->get_id().empty() || subuser.empty())
return "";
std::string kid;
user->get_id().to_str(kid);
kid.append(":");
kid.append(subuser);
return kid;
}
std::string RGWUserAdminOpState::generate_subuser() {
if (user->get_id().empty())
return "";
std::string generated_subuser;
user->get_id().to_str(generated_subuser);
std::string rand_suffix;
int sub_buf_size = RAND_SUBUSER_LEN + 1;
char sub_buf[RAND_SUBUSER_LEN + 1];
gen_rand_alphanumeric_upper(g_ceph_context, sub_buf, sub_buf_size);
rand_suffix = sub_buf;
if (rand_suffix.empty())
return "";
generated_subuser.append(rand_suffix);
subuser = generated_subuser;
return generated_subuser;
}
/*
* Do a fairly exhaustive search for an existing key matching the parameters
* given. Also handles the case where no key type was specified and updates
* the operation state if needed.
*/
bool RGWAccessKeyPool::check_existing_key(RGWUserAdminOpState& op_state)
{
bool existing_key = false;
int key_type = op_state.get_key_type();
std::string kid = op_state.get_access_key();
std::map<std::string, RGWAccessKey>::iterator kiter;
std::string swift_kid = op_state.build_default_swift_kid();
RGWUserInfo dup_info;
if (kid.empty() && swift_kid.empty())
return false;
switch (key_type) {
case KEY_TYPE_SWIFT:
kiter = swift_keys->find(swift_kid);
existing_key = (kiter != swift_keys->end());
if (existing_key)
op_state.set_access_key(swift_kid);
break;
case KEY_TYPE_S3:
kiter = access_keys->find(kid);
existing_key = (kiter != access_keys->end());
break;
default:
kiter = access_keys->find(kid);
existing_key = (kiter != access_keys->end());
if (existing_key) {
op_state.set_key_type(KEY_TYPE_S3);
break;
}
kiter = swift_keys->find(kid);
existing_key = (kiter != swift_keys->end());
if (existing_key) {
op_state.set_key_type(KEY_TYPE_SWIFT);
break;
}
// handle the case where the access key was not provided in user:key format
if (swift_kid.empty())
return false;
kiter = swift_keys->find(swift_kid);
existing_key = (kiter != swift_keys->end());
if (existing_key) {
op_state.set_access_key(swift_kid);
op_state.set_key_type(KEY_TYPE_SWIFT);
}
}
op_state.set_existing_key(existing_key);
return existing_key;
}
int RGWAccessKeyPool::check_op(RGWUserAdminOpState& op_state,
std::string *err_msg)
{
RGWUserInfo dup_info;
if (!op_state.is_populated()) {
set_err_msg(err_msg, "user info was not populated");
return -EINVAL;
}
if (!keys_allowed) {
set_err_msg(err_msg, "keys not allowed for this user");
return -EACCES;
}
int32_t key_type = op_state.get_key_type();
// if a key type wasn't specified
if (key_type < 0) {
if (op_state.has_subuser()) {
key_type = KEY_TYPE_SWIFT;
} else {
key_type = KEY_TYPE_S3;
}
}
op_state.set_key_type(key_type);
/* see if the access key was specified */
if (key_type == KEY_TYPE_S3 && !op_state.will_gen_access() &&
op_state.get_access_key().empty()) {
set_err_msg(err_msg, "empty access key");
return -ERR_INVALID_ACCESS_KEY;
}
// don't check for secret key because we may be doing a removal
if (check_existing_key(op_state)) {
op_state.set_access_key_exist();
}
return 0;
}
// Generate a new random key
int RGWAccessKeyPool::generate_key(const DoutPrefixProvider *dpp, RGWUserAdminOpState& op_state,
optional_yield y, std::string *err_msg)
{
std::string id;
std::string key;
std::pair<std::string, RGWAccessKey> key_pair;
RGWAccessKey new_key;
std::unique_ptr<rgw::sal::User> duplicate_check;
int key_type = op_state.get_key_type();
bool gen_access = op_state.will_gen_access();
bool gen_secret = op_state.will_gen_secret();
if (!keys_allowed) {
set_err_msg(err_msg, "access keys not allowed for this user");
return -EACCES;
}
if (op_state.has_existing_key()) {
set_err_msg(err_msg, "cannot create existing key");
return -ERR_KEY_EXIST;
}
if (!gen_access) {
id = op_state.get_access_key();
}
if (!id.empty()) {
switch (key_type) {
case KEY_TYPE_SWIFT:
if (driver->get_user_by_swift(dpp, id, y, &duplicate_check) >= 0) {
set_err_msg(err_msg, "existing swift key in RGW system:" + id);
return -ERR_KEY_EXIST;
}
break;
case KEY_TYPE_S3:
if (driver->get_user_by_access_key(dpp, id, y, &duplicate_check) >= 0) {
set_err_msg(err_msg, "existing S3 key in RGW system:" + id);
return -ERR_KEY_EXIST;
}
}
}
//key's subuser
if (op_state.has_subuser()) {
//create user and subuser at the same time, user's s3 key should not be set this
if (!op_state.key_type_setbycontext || (key_type == KEY_TYPE_SWIFT)) {
new_key.subuser = op_state.get_subuser();
}
}
//Secret key
if (!gen_secret) {
if (op_state.get_secret_key().empty()) {
set_err_msg(err_msg, "empty secret key");
return -ERR_INVALID_SECRET_KEY;
}
key = op_state.get_secret_key();
} else {
char secret_key_buf[SECRET_KEY_LEN + 1];
gen_rand_alphanumeric_plain(g_ceph_context, secret_key_buf, sizeof(secret_key_buf));
key = secret_key_buf;
}
// Generate the access key
if (key_type == KEY_TYPE_S3 && gen_access) {
char public_id_buf[PUBLIC_ID_LEN + 1];
do {
int id_buf_size = sizeof(public_id_buf);
gen_rand_alphanumeric_upper(g_ceph_context, public_id_buf, id_buf_size);
id = public_id_buf;
if (!validate_access_key(id))
continue;
} while (!driver->get_user_by_access_key(dpp, id, y, &duplicate_check));
}
if (key_type == KEY_TYPE_SWIFT) {
id = op_state.build_default_swift_kid();
if (id.empty()) {
set_err_msg(err_msg, "empty swift access key");
return -ERR_INVALID_ACCESS_KEY;
}
// check that the access key doesn't exist
if (driver->get_user_by_swift(dpp, id, y, &duplicate_check) >= 0) {
set_err_msg(err_msg, "cannot create existing swift key");
return -ERR_KEY_EXIST;
}
}
// finally create the new key
new_key.id = id;
new_key.key = key;
key_pair.first = id;
key_pair.second = new_key;
if (key_type == KEY_TYPE_S3) {
access_keys->insert(key_pair);
} else if (key_type == KEY_TYPE_SWIFT) {
swift_keys->insert(key_pair);
}
return 0;
}
// modify an existing key
int RGWAccessKeyPool::modify_key(RGWUserAdminOpState& op_state, std::string *err_msg)
{
std::string id;
std::string key = op_state.get_secret_key();
int key_type = op_state.get_key_type();
RGWAccessKey modify_key;
pair<string, RGWAccessKey> key_pair;
map<std::string, RGWAccessKey>::iterator kiter;
switch (key_type) {
case KEY_TYPE_S3:
id = op_state.get_access_key();
if (id.empty()) {
set_err_msg(err_msg, "no access key specified");
return -ERR_INVALID_ACCESS_KEY;
}
break;
case KEY_TYPE_SWIFT:
id = op_state.build_default_swift_kid();
if (id.empty()) {
set_err_msg(err_msg, "no subuser specified");
return -EINVAL;
}
break;
default:
set_err_msg(err_msg, "invalid key type");
return -ERR_INVALID_KEY_TYPE;
}
if (!op_state.has_existing_key()) {
set_err_msg(err_msg, "key does not exist");
return -ERR_INVALID_ACCESS_KEY;
}
key_pair.first = id;
if (key_type == KEY_TYPE_SWIFT) {
modify_key.id = id;
modify_key.subuser = op_state.get_subuser();
} else if (key_type == KEY_TYPE_S3) {
kiter = access_keys->find(id);
if (kiter != access_keys->end()) {
modify_key = kiter->second;
}
}
if (op_state.will_gen_secret()) {
char secret_key_buf[SECRET_KEY_LEN + 1];
int key_buf_size = sizeof(secret_key_buf);
gen_rand_alphanumeric_plain(g_ceph_context, secret_key_buf, key_buf_size);
key = secret_key_buf;
}
if (key.empty()) {
set_err_msg(err_msg, "empty secret key");
return -ERR_INVALID_SECRET_KEY;
}
// update the access key with the new secret key
modify_key.key = key;
key_pair.second = modify_key;
if (key_type == KEY_TYPE_S3) {
(*access_keys)[id] = modify_key;
} else if (key_type == KEY_TYPE_SWIFT) {
(*swift_keys)[id] = modify_key;
}
return 0;
}
int RGWAccessKeyPool::execute_add(const DoutPrefixProvider *dpp,
RGWUserAdminOpState& op_state,
std::string *err_msg, bool defer_user_update,
optional_yield y)
{
int ret = 0;
std::string subprocess_msg;
int key_op = GENERATE_KEY;
// set the op
if (op_state.has_existing_key())
key_op = MODIFY_KEY;
switch (key_op) {
case GENERATE_KEY:
ret = generate_key(dpp, op_state, y, &subprocess_msg);
break;
case MODIFY_KEY:
ret = modify_key(op_state, &subprocess_msg);
break;
}
if (ret < 0) {
set_err_msg(err_msg, subprocess_msg);
return ret;
}
// store the updated info
if (!defer_user_update)
ret = user->update(dpp, op_state, err_msg, y);
if (ret < 0)
return ret;
return 0;
}
int RGWAccessKeyPool::add(const DoutPrefixProvider *dpp,
RGWUserAdminOpState& op_state, optional_yield y,
std::string *err_msg)
{
return add(dpp, op_state, err_msg, false, y);
}
int RGWAccessKeyPool::add(const DoutPrefixProvider *dpp,
RGWUserAdminOpState& op_state, std::string *err_msg,
bool defer_user_update, optional_yield y)
{
int ret;
std::string subprocess_msg;
ret = check_op(op_state, &subprocess_msg);
if (ret < 0) {
set_err_msg(err_msg, "unable to parse request, " + subprocess_msg);
return ret;
}
ret = execute_add(dpp, op_state, &subprocess_msg, defer_user_update, y);
if (ret < 0) {
set_err_msg(err_msg, "unable to add access key, " + subprocess_msg);
return ret;
}
return 0;
}
int RGWAccessKeyPool::execute_remove(const DoutPrefixProvider *dpp,
RGWUserAdminOpState& op_state,
std::string *err_msg,
bool defer_user_update,
optional_yield y)
{
int ret = 0;
int key_type = op_state.get_key_type();
std::string id = op_state.get_access_key();
map<std::string, RGWAccessKey>::iterator kiter;
map<std::string, RGWAccessKey> *keys_map;
if (!op_state.has_existing_key()) {
set_err_msg(err_msg, "unable to find access key, with key type: " +
key_type_to_str(key_type));
return -ERR_INVALID_ACCESS_KEY;
}
if (key_type == KEY_TYPE_S3) {
keys_map = access_keys;
} else if (key_type == KEY_TYPE_SWIFT) {
keys_map = swift_keys;
} else {
keys_map = NULL;
set_err_msg(err_msg, "invalid access key");
return -ERR_INVALID_ACCESS_KEY;
}
kiter = keys_map->find(id);
if (kiter == keys_map->end()) {
set_err_msg(err_msg, "key not found");
return -ERR_INVALID_ACCESS_KEY;
}
keys_map->erase(kiter);
if (!defer_user_update)
ret = user->update(dpp, op_state, err_msg, y);
if (ret < 0)
return ret;
return 0;
}
int RGWAccessKeyPool::remove(const DoutPrefixProvider *dpp, RGWUserAdminOpState& op_state, optional_yield y,
std::string *err_msg)
{
return remove(dpp, op_state, err_msg, false, y);
}
int RGWAccessKeyPool::remove(const DoutPrefixProvider *dpp,
RGWUserAdminOpState& op_state,
std::string *err_msg, bool defer_user_update,
optional_yield y)
{
int ret;
std::string subprocess_msg;
ret = check_op(op_state, &subprocess_msg);
if (ret < 0) {
set_err_msg(err_msg, "unable to parse request, " + subprocess_msg);
return ret;
}
ret = execute_remove(dpp, op_state, &subprocess_msg, defer_user_update, y);
if (ret < 0) {
set_err_msg(err_msg, "unable to remove access key, " + subprocess_msg);
return ret;
}
return 0;
}
// remove all keys associated with a subuser
int RGWAccessKeyPool::remove_subuser_keys(const DoutPrefixProvider *dpp,
RGWUserAdminOpState& op_state,
std::string *err_msg,
bool defer_user_update,
optional_yield y)
{
int ret = 0;
if (!op_state.is_populated()) {
set_err_msg(err_msg, "user info was not populated");
return -EINVAL;
}
if (!op_state.has_subuser()) {
set_err_msg(err_msg, "no subuser specified");
return -EINVAL;
}
std::string swift_kid = op_state.build_default_swift_kid();
if (swift_kid.empty()) {
set_err_msg(err_msg, "empty swift access key");
return -EINVAL;
}
map<std::string, RGWAccessKey>::iterator kiter;
map<std::string, RGWAccessKey> *keys_map;
// a subuser can have at most one swift key
keys_map = swift_keys;
kiter = keys_map->find(swift_kid);
if (kiter != keys_map->end()) {
keys_map->erase(kiter);
}
// a subuser may have multiple s3 key pairs
std::string subuser_str = op_state.get_subuser();
keys_map = access_keys;
RGWUserInfo user_info = op_state.get_user_info();
auto user_kiter = user_info.access_keys.begin();
for (; user_kiter != user_info.access_keys.end(); ++user_kiter) {
if (user_kiter->second.subuser == subuser_str) {
kiter = keys_map->find(user_kiter->first);
if (kiter != keys_map->end()) {
keys_map->erase(kiter);
}
}
}
if (!defer_user_update)
ret = user->update(dpp, op_state, err_msg, y);
if (ret < 0)
return ret;
return 0;
}
RGWSubUserPool::RGWSubUserPool(RGWUser *usr)
{
if (!usr) {
return;
}
user = usr;
subusers_allowed = true;
driver = user->get_driver();
}
int RGWSubUserPool::init(RGWUserAdminOpState& op_state)
{
if (!op_state.is_initialized()) {
subusers_allowed = false;
return -EINVAL;
}
const rgw_user& uid = op_state.get_user_id();
if (uid.compare(RGW_USER_ANON_ID) == 0) {
subusers_allowed = false;
return -EACCES;
}
subuser_map = op_state.get_subusers();
if (subuser_map == NULL) {
subusers_allowed = false;
return -EINVAL;
}
subusers_allowed = true;
return 0;
}
bool RGWSubUserPool::exists(std::string subuser)
{
if (subuser.empty())
return false;
if (!subuser_map)
return false;
if (subuser_map->count(subuser))
return true;
return false;
}
int RGWSubUserPool::check_op(RGWUserAdminOpState& op_state,
std::string *err_msg)
{
bool existing = false;
std::string subuser = op_state.get_subuser();
if (!op_state.is_populated()) {
set_err_msg(err_msg, "user info was not populated");
return -EINVAL;
}
if (!subusers_allowed) {
set_err_msg(err_msg, "subusers not allowed for this user");
return -EACCES;
}
if (subuser.empty() && !op_state.will_gen_subuser()) {
set_err_msg(err_msg, "empty subuser name");
return -EINVAL;
}
if (op_state.get_subuser_perm() == RGW_PERM_INVALID) {
set_err_msg(err_msg, "invalid subuser access");
return -EINVAL;
}
//set key type when it not set or set by context
if ((op_state.get_key_type() < 0) || op_state.key_type_setbycontext) {
op_state.set_key_type(KEY_TYPE_SWIFT);
op_state.key_type_setbycontext = true;
}
// check if the subuser exists
if (!subuser.empty())
existing = exists(subuser);
op_state.set_existing_subuser(existing);
return 0;
}
int RGWSubUserPool::execute_add(const DoutPrefixProvider *dpp,
RGWUserAdminOpState& op_state,
std::string *err_msg, bool defer_user_update,
optional_yield y)
{
int ret = 0;
std::string subprocess_msg;
RGWSubUser subuser;
std::pair<std::string, RGWSubUser> subuser_pair;
std::string subuser_str = op_state.get_subuser();
subuser_pair.first = subuser_str;
// assumes key should be created
if (op_state.has_key_op()) {
ret = user->keys.add(dpp, op_state, &subprocess_msg, true, y);
if (ret < 0) {
set_err_msg(err_msg, "unable to create subuser key, " + subprocess_msg);
return ret;
}
}
// create the subuser
subuser.name = subuser_str;
if (op_state.has_subuser_perm())
subuser.perm_mask = op_state.get_subuser_perm();
// insert the subuser into user info
subuser_pair.second = subuser;
subuser_map->insert(subuser_pair);
// attempt to save the subuser
if (!defer_user_update)
ret = user->update(dpp, op_state, err_msg, y);
if (ret < 0)
return ret;
return 0;
}
int RGWSubUserPool::add(const DoutPrefixProvider *dpp, RGWUserAdminOpState& op_state, optional_yield y,
std::string *err_msg)
{
return add(dpp, op_state, err_msg, false, y);
}
int RGWSubUserPool::add(const DoutPrefixProvider *dpp, RGWUserAdminOpState& op_state, std::string *err_msg, bool defer_user_update, optional_yield y)
{
std::string subprocess_msg;
int ret;
int32_t key_type = op_state.get_key_type();
ret = check_op(op_state, &subprocess_msg);
if (ret < 0) {
set_err_msg(err_msg, "unable to parse request, " + subprocess_msg);
return ret;
}
if (op_state.get_access_key_exist()) {
set_err_msg(err_msg, "cannot create existing key");
return -ERR_KEY_EXIST;
}
if (key_type == KEY_TYPE_S3 && op_state.get_access_key().empty()) {
op_state.set_gen_access();
}
if (op_state.get_secret_key().empty()) {
op_state.set_gen_secret();
}
ret = execute_add(dpp, op_state, &subprocess_msg, defer_user_update, y);
if (ret < 0) {
set_err_msg(err_msg, "unable to create subuser, " + subprocess_msg);
return ret;
}
return 0;
}
int RGWSubUserPool::execute_remove(const DoutPrefixProvider *dpp,
RGWUserAdminOpState& op_state,
std::string *err_msg, bool defer_user_update,
optional_yield y)
{
int ret = 0;
std::string subprocess_msg;
std::string subuser_str = op_state.get_subuser();
map<std::string, RGWSubUser>::iterator siter;
siter = subuser_map->find(subuser_str);
if (siter == subuser_map->end()){
set_err_msg(err_msg, "subuser not found: " + subuser_str);
return -ERR_NO_SUCH_SUBUSER;
}
if (!op_state.has_existing_subuser()) {
set_err_msg(err_msg, "subuser not found: " + subuser_str);
return -ERR_NO_SUCH_SUBUSER;
}
// always purge all associate keys
user->keys.remove_subuser_keys(dpp, op_state, &subprocess_msg, true, y);
// remove the subuser from the user info
subuser_map->erase(siter);
// attempt to save the subuser
if (!defer_user_update)
ret = user->update(dpp, op_state, err_msg, y);
if (ret < 0)
return ret;
return 0;
}
int RGWSubUserPool::remove(const DoutPrefixProvider *dpp, RGWUserAdminOpState& op_state, optional_yield y,
std::string *err_msg)
{
return remove(dpp, op_state, err_msg, false, y);
}
int RGWSubUserPool::remove(const DoutPrefixProvider *dpp, RGWUserAdminOpState& op_state, std::string *err_msg,
bool defer_user_update, optional_yield y)
{
std::string subprocess_msg;
int ret;
ret = check_op(op_state, &subprocess_msg);
if (ret < 0) {
set_err_msg(err_msg, "unable to parse request, " + subprocess_msg);
return ret;
}
ret = execute_remove(dpp, op_state, &subprocess_msg, defer_user_update, y);
if (ret < 0) {
set_err_msg(err_msg, "unable to remove subuser, " + subprocess_msg);
return ret;
}
return 0;
}
int RGWSubUserPool::execute_modify(const DoutPrefixProvider *dpp, RGWUserAdminOpState& op_state, std::string *err_msg, bool defer_user_update, optional_yield y)
{
int ret = 0;
std::string subprocess_msg;
std::map<std::string, RGWSubUser>::iterator siter;
std::pair<std::string, RGWSubUser> subuser_pair;
std::string subuser_str = op_state.get_subuser();
RGWSubUser subuser;
if (!op_state.has_existing_subuser()) {
set_err_msg(err_msg, "subuser does not exist");
return -ERR_NO_SUCH_SUBUSER;
}
subuser_pair.first = subuser_str;
siter = subuser_map->find(subuser_str);
subuser = siter->second;
if (op_state.has_key_op()) {
ret = user->keys.add(dpp, op_state, &subprocess_msg, true, y);
if (ret < 0) {
set_err_msg(err_msg, "unable to create subuser keys, " + subprocess_msg);
return ret;
}
}
if (op_state.has_subuser_perm())
subuser.perm_mask = op_state.get_subuser_perm();
subuser_pair.second = subuser;
subuser_map->erase(siter);
subuser_map->insert(subuser_pair);
// attempt to save the subuser
if (!defer_user_update)
ret = user->update(dpp, op_state, err_msg, y);
if (ret < 0)
return ret;
return 0;
}
int RGWSubUserPool::modify(const DoutPrefixProvider *dpp, RGWUserAdminOpState& op_state, optional_yield y, std::string *err_msg)
{
return RGWSubUserPool::modify(dpp, op_state, y, err_msg, false);
}
int RGWSubUserPool::modify(const DoutPrefixProvider *dpp, RGWUserAdminOpState& op_state, optional_yield y, std::string *err_msg, bool defer_user_update)
{
std::string subprocess_msg;
int ret;
RGWSubUser subuser;
ret = check_op(op_state, &subprocess_msg);
if (ret < 0) {
set_err_msg(err_msg, "unable to parse request, " + subprocess_msg);
return ret;
}
ret = execute_modify(dpp, op_state, &subprocess_msg, defer_user_update, y);
if (ret < 0) {
set_err_msg(err_msg, "unable to modify subuser, " + subprocess_msg);
return ret;
}
return 0;
}
RGWUserCapPool::RGWUserCapPool(RGWUser *usr)
{
if (!usr) {
return;
}
user = usr;
caps_allowed = true;
}
int RGWUserCapPool::init(RGWUserAdminOpState& op_state)
{
if (!op_state.is_initialized()) {
caps_allowed = false;
return -EINVAL;
}
const rgw_user& uid = op_state.get_user_id();
if (uid.compare(RGW_USER_ANON_ID) == 0) {
caps_allowed = false;
return -EACCES;
}
caps = op_state.get_caps_obj();
if (!caps) {
caps_allowed = false;
return -ERR_INVALID_CAP;
}
caps_allowed = true;
return 0;
}
int RGWUserCapPool::add(const DoutPrefixProvider *dpp, RGWUserAdminOpState& op_state, optional_yield y,
std::string *err_msg)
{
return add(dpp, op_state, err_msg, false, y);
}
int RGWUserCapPool::add(const DoutPrefixProvider *dpp, RGWUserAdminOpState& op_state, std::string *err_msg,
bool defer_save, optional_yield y)
{
int ret = 0;
std::string caps_str = op_state.get_caps();
if (!op_state.is_populated()) {
set_err_msg(err_msg, "user info was not populated");
return -EINVAL;
}
if (!caps_allowed) {
set_err_msg(err_msg, "caps not allowed for this user");
return -EACCES;
}
if (caps_str.empty()) {
set_err_msg(err_msg, "empty user caps");
return -ERR_INVALID_CAP;
}
int r = caps->add_from_string(caps_str);
if (r < 0) {
set_err_msg(err_msg, "unable to add caps: " + caps_str);
return r;
}
if (!defer_save)
ret = user->update(dpp, op_state, err_msg, y);
if (ret < 0)
return ret;
return 0;
}
int RGWUserCapPool::remove(const DoutPrefixProvider *dpp, RGWUserAdminOpState& op_state, optional_yield y,
std::string *err_msg)
{
return remove(dpp, op_state, err_msg, false, y);
}
int RGWUserCapPool::remove(const DoutPrefixProvider *dpp, RGWUserAdminOpState& op_state, std::string *err_msg,
bool defer_save, optional_yield y)
{
int ret = 0;
std::string caps_str = op_state.get_caps();
if (!op_state.is_populated()) {
set_err_msg(err_msg, "user info was not populated");
return -EINVAL;
}
if (!caps_allowed) {
set_err_msg(err_msg, "caps not allowed for this user");
return -EACCES;
}
if (caps_str.empty()) {
set_err_msg(err_msg, "empty user caps");
return -ERR_INVALID_CAP;
}
int r = caps->remove_from_string(caps_str);
if (r < 0) {
set_err_msg(err_msg, "unable to remove caps: " + caps_str);
return r;
}
if (!defer_save)
ret = user->update(dpp, op_state, err_msg, y);
if (ret < 0)
return ret;
return 0;
}
RGWUser::RGWUser() : caps(this), keys(this), subusers(this)
{
init_default();
}
int RGWUser::init(const DoutPrefixProvider *dpp, rgw::sal::Driver* _driver,
RGWUserAdminOpState& op_state, optional_yield y)
{
init_default();
int ret = init_storage(_driver);
if (ret < 0)
return ret;
ret = init(dpp, op_state, y);
if (ret < 0)
return ret;
return 0;
}
void RGWUser::init_default()
{
// use anonymous user info as a placeholder
rgw_get_anon_user(old_info);
user_id = RGW_USER_ANON_ID;
clear_populated();
}
int RGWUser::init_storage(rgw::sal::Driver* _driver)
{
if (!_driver) {
return -EINVAL;
}
driver = _driver;
clear_populated();
/* API wrappers */
keys = RGWAccessKeyPool(this);
caps = RGWUserCapPool(this);
subusers = RGWSubUserPool(this);
return 0;
}
int RGWUser::init(const DoutPrefixProvider *dpp, RGWUserAdminOpState& op_state, optional_yield y)
{
bool found = false;
std::string swift_user;
user_id = op_state.get_user_id();
std::string user_email = op_state.get_user_email();
std::string access_key = op_state.get_access_key();
std::string subuser = op_state.get_subuser();
int key_type = op_state.get_key_type();
if (key_type == KEY_TYPE_SWIFT) {
swift_user = op_state.get_access_key();
access_key.clear();
}
std::unique_ptr<rgw::sal::User> user;
clear_populated();
if (user_id.empty() && !subuser.empty()) {
size_t pos = subuser.find(':');
if (pos != string::npos) {
user_id = subuser.substr(0, pos);
op_state.set_user_id(user_id);
}
}
if (!user_id.empty() && (user_id.compare(RGW_USER_ANON_ID) != 0)) {
user = driver->get_user(user_id);
found = (user->load_user(dpp, y) >= 0);
op_state.found_by_uid = found;
}
if (driver->ctx()->_conf.get_val<bool>("rgw_user_unique_email")) {
if (!user_email.empty() && !found) {
found = (driver->get_user_by_email(dpp, user_email, y, &user) >= 0);
op_state.found_by_email = found;
}
}
if (!swift_user.empty() && !found) {
found = (driver->get_user_by_swift(dpp, swift_user, y, &user) >= 0);
op_state.found_by_key = found;
}
if (!access_key.empty() && !found) {
found = (driver->get_user_by_access_key(dpp, access_key, y, &user) >= 0);
op_state.found_by_key = found;
}
op_state.set_existing_user(found);
if (found) {
op_state.set_user_info(user->get_info());
op_state.set_populated();
op_state.objv = user->get_version_tracker();
op_state.set_user_version_tracker(user->get_version_tracker());
old_info = user->get_info();
set_populated();
}
if (user_id.empty()) {
user_id = user->get_id();
}
op_state.set_initialized();
// this may have been called by a helper object
int ret = init_members(op_state);
if (ret < 0)
return ret;
return 0;
}
int RGWUser::init_members(RGWUserAdminOpState& op_state)
{
int ret = 0;
ret = keys.init(op_state);
if (ret < 0)
return ret;
ret = subusers.init(op_state);
if (ret < 0)
return ret;
ret = caps.init(op_state);
if (ret < 0)
return ret;
return 0;
}
int RGWUser::update(const DoutPrefixProvider *dpp, RGWUserAdminOpState& op_state, std::string *err_msg,
optional_yield y)
{
int ret;
std::string subprocess_msg;
rgw::sal::User* user = op_state.get_user();
if (!driver) {
set_err_msg(err_msg, "couldn't initialize storage");
return -EINVAL;
}
// if op_state.op_access_keys is not empty most recent keys have been fetched from master zone
if(!op_state.op_access_keys.empty()) {
auto user_access_keys = op_state.get_access_keys();
*(user_access_keys) = op_state.op_access_keys;
}
RGWUserInfo *pold_info = (is_populated() ? &old_info : nullptr);
ret = user->store_user(dpp, y, false, pold_info);
op_state.objv = user->get_version_tracker();
op_state.set_user_version_tracker(user->get_version_tracker());
if (ret < 0) {
set_err_msg(err_msg, "unable to store user info");
return ret;
}
old_info = user->get_info();
set_populated();
return 0;
}
int RGWUser::check_op(RGWUserAdminOpState& op_state, std::string *err_msg)
{
int ret = 0;
const rgw_user& uid = op_state.get_user_id();
if (uid.compare(RGW_USER_ANON_ID) == 0) {
set_err_msg(err_msg, "unable to perform operations on the anonymous user");
return -EINVAL;
}
if (is_populated() && user_id.compare(uid) != 0) {
set_err_msg(err_msg, "user id mismatch, operation id: " + uid.to_str()
+ " does not match: " + user_id.to_str());
return -EINVAL;
}
ret = rgw_validate_tenant_name(uid.tenant);
if (ret) {
set_err_msg(err_msg,
"invalid tenant only alphanumeric and _ characters are allowed");
return ret;
}
//set key type when it not set or set by context
if ((op_state.get_key_type() < 0) || op_state.key_type_setbycontext) {
op_state.set_key_type(KEY_TYPE_S3);
op_state.key_type_setbycontext = true;
}
return 0;
}
// update swift_keys with new user id
static void rename_swift_keys(const rgw_user& user,
std::map<std::string, RGWAccessKey>& keys)
{
std::string user_id;
user.to_str(user_id);
auto modify_keys = std::move(keys);
for ([[maybe_unused]] auto& [k, key] : modify_keys) {
std::string id = user_id + ":" + key.subuser;
key.id = id;
keys[id] = std::move(key);
}
}
int RGWUser::execute_rename(const DoutPrefixProvider *dpp, RGWUserAdminOpState& op_state, std::string *err_msg, optional_yield y)
{
int ret;
bool populated = op_state.is_populated();
if (!op_state.has_existing_user() && !populated) {
set_err_msg(err_msg, "user not found");
return -ENOENT;
}
if (!populated) {
ret = init(dpp, op_state, y);
if (ret < 0) {
set_err_msg(err_msg, "unable to retrieve user info");
return ret;
}
}
std::unique_ptr<rgw::sal::User> old_user = driver->get_user(op_state.get_user_info().user_id);
std::unique_ptr<rgw::sal::User> new_user = driver->get_user(op_state.get_new_uid());
if (old_user->get_tenant() != new_user->get_tenant()) {
set_err_msg(err_msg, "users have to be under the same tenant namespace "
+ old_user->get_tenant() + " != " + new_user->get_tenant());
return -EINVAL;
}
// create a stub user and write only the uid index and buckets object
std::unique_ptr<rgw::sal::User> user;
user = driver->get_user(new_user->get_id());
const bool exclusive = !op_state.get_overwrite_new_user(); // overwrite if requested
ret = user->store_user(dpp, y, exclusive);
if (ret == -EEXIST) {
set_err_msg(err_msg, "user name given by --new-uid already exists");
return ret;
}
if (ret < 0) {
set_err_msg(err_msg, "unable to store new user info");
return ret;
}
RGWAccessControlPolicy policy_instance;
policy_instance.create_default(new_user->get_id(), old_user->get_display_name());
//unlink and link buckets to new user
string marker;
CephContext *cct = driver->ctx();
size_t max_buckets = cct->_conf->rgw_list_buckets_max_chunk;
rgw::sal::BucketList buckets;
do {
ret = old_user->list_buckets(dpp, marker, "", max_buckets, false, buckets, y);
if (ret < 0) {
set_err_msg(err_msg, "unable to list user buckets");
return ret;
}
auto& m = buckets.get_buckets();
for (auto it = m.begin(); it != m.end(); ++it) {
auto& bucket = it->second;
marker = it->first;
ret = bucket->load_bucket(dpp, y);
if (ret < 0) {
set_err_msg(err_msg, "failed to fetch bucket info for bucket=" + bucket->get_name());
return ret;
}
ret = bucket->set_acl(dpp, policy_instance, y);
if (ret < 0) {
set_err_msg(err_msg, "failed to set acl on bucket " + bucket->get_name());
return ret;
}
ret = rgw_chown_bucket_and_objects(driver, bucket.get(), new_user.get(),
std::string(), nullptr, dpp, y);
if (ret < 0) {
set_err_msg(err_msg, "failed to run bucket chown" + cpp_strerror(-ret));
return ret;
}
}
} while (buckets.is_truncated());
// update the 'stub user' with all of the other fields and rewrite all of the
// associated index objects
RGWUserInfo& user_info = op_state.get_user_info();
user_info.user_id = new_user->get_id();
op_state.objv = user->get_version_tracker();
op_state.set_user_version_tracker(user->get_version_tracker());
rename_swift_keys(new_user->get_id(), user_info.swift_keys);
return update(dpp, op_state, err_msg, y);
}
int RGWUser::execute_add(const DoutPrefixProvider *dpp, RGWUserAdminOpState& op_state, std::string *err_msg,
optional_yield y)
{
const rgw_user& uid = op_state.get_user_id();
std::string user_email = op_state.get_user_email();
std::string display_name = op_state.get_display_name();
// set the user info
RGWUserInfo user_info;
user_id = uid;
user_info.user_id = user_id;
user_info.display_name = display_name;
user_info.type = TYPE_RGW;
if (!user_email.empty())
user_info.user_email = user_email;
CephContext *cct = driver->ctx();
if (op_state.max_buckets_specified) {
user_info.max_buckets = op_state.get_max_buckets();
} else {
user_info.max_buckets =
cct->_conf.get_val<int64_t>("rgw_user_max_buckets");
}
user_info.suspended = op_state.get_suspension_status();
user_info.admin = op_state.admin;
user_info.system = op_state.system;
if (op_state.op_mask_specified)
user_info.op_mask = op_state.get_op_mask();
if (op_state.has_bucket_quota()) {
user_info.quota.bucket_quota = op_state.get_bucket_quota();
} else {
rgw_apply_default_bucket_quota(user_info.quota.bucket_quota, cct->_conf);
}
if (op_state.temp_url_key_specified) {
map<int, string>::iterator iter;
for (iter = op_state.temp_url_keys.begin();
iter != op_state.temp_url_keys.end(); ++iter) {
user_info.temp_url_keys[iter->first] = iter->second;
}
}
if (op_state.has_user_quota()) {
user_info.quota.user_quota = op_state.get_user_quota();
} else {
rgw_apply_default_user_quota(user_info.quota.user_quota, cct->_conf);
}
if (op_state.default_placement_specified) {
user_info.default_placement = op_state.default_placement;
}
if (op_state.placement_tags_specified) {
user_info.placement_tags = op_state.placement_tags;
}
// update the request
op_state.set_user_info(user_info);
op_state.set_populated();
// update the helper objects
int ret = init_members(op_state);
if (ret < 0) {
set_err_msg(err_msg, "unable to initialize user");
return ret;
}
// see if we need to add an access key
std::string subprocess_msg;
bool defer_user_update = true;
if (op_state.has_key_op()) {
ret = keys.add(dpp, op_state, &subprocess_msg, defer_user_update, y);
if (ret < 0) {
set_err_msg(err_msg, "unable to create access key, " + subprocess_msg);
return ret;
}
}
// see if we need to add some caps
if (op_state.has_caps_op()) {
ret = caps.add(dpp, op_state, &subprocess_msg, defer_user_update, y);
if (ret < 0) {
set_err_msg(err_msg, "unable to add user capabilities, " + subprocess_msg);
return ret;
}
}
ret = update(dpp, op_state, err_msg, y);
if (ret < 0)
return ret;
return 0;
}
int RGWUser::add(const DoutPrefixProvider *dpp, RGWUserAdminOpState& op_state, optional_yield y, std::string *err_msg)
{
std::string subprocess_msg;
int ret = user_add_helper(op_state, &subprocess_msg);
if (ret != 0) {
set_err_msg(err_msg, "unable to parse parameters, " + subprocess_msg);
return ret;
}
ret = check_op(op_state, &subprocess_msg);
if (ret < 0) {
set_err_msg(err_msg, "unable to parse parameters, " + subprocess_msg);
return ret;
}
ret = execute_add(dpp, op_state, &subprocess_msg, y);
if (ret < 0) {
set_err_msg(err_msg, "unable to create user, " + subprocess_msg);
return ret;
}
return 0;
}
int RGWUser::rename(RGWUserAdminOpState& op_state, optional_yield y, const DoutPrefixProvider *dpp, std::string *err_msg)
{
std::string subprocess_msg;
int ret;
ret = check_op(op_state, &subprocess_msg);
if (ret < 0) {
set_err_msg(err_msg, "unable to parse parameters, " + subprocess_msg);
return ret;
}
ret = execute_rename(dpp, op_state, &subprocess_msg, y);
if (ret < 0) {
set_err_msg(err_msg, "unable to rename user, " + subprocess_msg);
return ret;
}
return 0;
}
int RGWUser::execute_remove(const DoutPrefixProvider *dpp, RGWUserAdminOpState& op_state, std::string *err_msg, optional_yield y)
{
int ret;
bool purge_data = op_state.will_purge_data();
rgw::sal::User* user = op_state.get_user();
if (!op_state.has_existing_user()) {
set_err_msg(err_msg, "user does not exist");
return -ENOENT;
}
rgw::sal::BucketList buckets;
string marker;
CephContext *cct = driver->ctx();
size_t max_buckets = cct->_conf->rgw_list_buckets_max_chunk;
do {
ret = user->list_buckets(dpp, marker, string(), max_buckets, false, buckets, y);
if (ret < 0) {
set_err_msg(err_msg, "unable to read user bucket info");
return ret;
}
auto& m = buckets.get_buckets();
if (!m.empty() && !purge_data) {
set_err_msg(err_msg, "must specify purge data to remove user with buckets");
return -EEXIST; // change to code that maps to 409: conflict
}
for (auto it = m.begin(); it != m.end(); ++it) {
ret = it->second->remove_bucket(dpp, true, false, nullptr, y);
if (ret < 0) {
set_err_msg(err_msg, "unable to delete user data");
return ret;
}
marker = it->first;
}
} while (buckets.is_truncated());
ret = user->remove_user(dpp, y);
if (ret < 0) {
set_err_msg(err_msg, "unable to remove user from RADOS");
return ret;
}
op_state.clear_populated();
clear_populated();
return 0;
}
int RGWUser::remove(const DoutPrefixProvider *dpp, RGWUserAdminOpState& op_state, optional_yield y, std::string *err_msg)
{
std::string subprocess_msg;
int ret;
ret = check_op(op_state, &subprocess_msg);
if (ret < 0) {
set_err_msg(err_msg, "unable to parse parameters, " + subprocess_msg);
return ret;
}
ret = execute_remove(dpp, op_state, &subprocess_msg, y);
if (ret < 0) {
set_err_msg(err_msg, "unable to remove user, " + subprocess_msg);
return ret;
}
return 0;
}
int RGWUser::execute_modify(const DoutPrefixProvider *dpp, RGWUserAdminOpState& op_state, std::string *err_msg, optional_yield y)
{
bool populated = op_state.is_populated();
int ret = 0;
std::string subprocess_msg;
std::string op_email = op_state.get_user_email();
std::string display_name = op_state.get_display_name();
RGWUserInfo user_info;
std::unique_ptr<rgw::sal::User> duplicate_check;
// ensure that the user info has been populated or is populate-able
if (!op_state.has_existing_user() && !populated) {
set_err_msg(err_msg, "user not found");
return -ENOENT;
}
// if the user hasn't already been populated...attempt to
if (!populated) {
ret = init(dpp, op_state, y);
if (ret < 0) {
set_err_msg(err_msg, "unable to retrieve user info");
return ret;
}
}
// ensure that we can modify the user's attributes
if (user_id.compare(RGW_USER_ANON_ID) == 0) {
set_err_msg(err_msg, "unable to modify anonymous user's info");
return -EACCES;
}
user_info = old_info;
std::string old_email = old_info.user_email;
if (!op_email.empty()) {
// make sure we are not adding a duplicate email
if (old_email != op_email) {
ret = driver->get_user_by_email(dpp, op_email, y, &duplicate_check);
if (ret >= 0 && duplicate_check->get_id().compare(user_id) != 0) {
set_err_msg(err_msg, "cannot add duplicate email");
return -ERR_EMAIL_EXIST;
}
}
user_info.user_email = op_email;
} else if (op_email.empty() && op_state.user_email_specified) {
ldpp_dout(dpp, 10) << "removing email index: " << user_info.user_email << dendl;
/* will be physically removed later when calling update() */
user_info.user_email.clear();
}
// update the remaining user info
if (!display_name.empty())
user_info.display_name = display_name;
if (op_state.max_buckets_specified)
user_info.max_buckets = op_state.get_max_buckets();
if (op_state.admin_specified)
user_info.admin = op_state.admin;
if (op_state.system_specified)
user_info.system = op_state.system;
if (op_state.temp_url_key_specified) {
map<int, string>::iterator iter;
for (iter = op_state.temp_url_keys.begin();
iter != op_state.temp_url_keys.end(); ++iter) {
user_info.temp_url_keys[iter->first] = iter->second;
}
}
if (op_state.op_mask_specified)
user_info.op_mask = op_state.get_op_mask();
if (op_state.has_bucket_quota())
user_info.quota.bucket_quota = op_state.get_bucket_quota();
if (op_state.has_user_quota())
user_info.quota.user_quota = op_state.get_user_quota();
if (op_state.has_suspension_op()) {
__u8 suspended = op_state.get_suspension_status();
user_info.suspended = suspended;
rgw::sal::BucketList buckets;
if (user_id.empty()) {
set_err_msg(err_msg, "empty user id passed...aborting");
return -EINVAL;
}
string marker;
CephContext *cct = driver->ctx();
size_t max_buckets = cct->_conf->rgw_list_buckets_max_chunk;
std::unique_ptr<rgw::sal::User> user = driver->get_user(user_id);
do {
ret = user->list_buckets(dpp, marker, string(), max_buckets, false, buckets, y);
if (ret < 0) {
set_err_msg(err_msg, "could not get buckets for uid: " + user_id.to_str());
return ret;
}
auto& m = buckets.get_buckets();
vector<rgw_bucket> bucket_names;
for (auto iter = m.begin(); iter != m.end(); ++iter) {
auto& bucket = iter->second;
bucket_names.push_back(bucket->get_key());
marker = iter->first;
}
ret = driver->set_buckets_enabled(dpp, bucket_names, !suspended, y);
if (ret < 0) {
set_err_msg(err_msg, "failed to modify bucket");
return ret;
}
} while (buckets.is_truncated());
}
if (op_state.mfa_ids_specified) {
user_info.mfa_ids = op_state.mfa_ids;
}
if (op_state.default_placement_specified) {
user_info.default_placement = op_state.default_placement;
}
if (op_state.placement_tags_specified) {
user_info.placement_tags = op_state.placement_tags;
}
op_state.set_user_info(user_info);
// if we're supposed to modify keys, do so
if (op_state.has_key_op()) {
ret = keys.add(dpp, op_state, &subprocess_msg, true, y);
if (ret < 0) {
set_err_msg(err_msg, "unable to create or modify keys, " + subprocess_msg);
return ret;
}
}
ret = update(dpp, op_state, err_msg, y);
if (ret < 0)
return ret;
return 0;
}
int RGWUser::modify(const DoutPrefixProvider *dpp, RGWUserAdminOpState& op_state, optional_yield y, std::string *err_msg)
{
std::string subprocess_msg;
int ret;
ret = check_op(op_state, &subprocess_msg);
if (ret < 0) {
set_err_msg(err_msg, "unable to parse parameters, " + subprocess_msg);
return ret;
}
ret = execute_modify(dpp, op_state, &subprocess_msg, y);
if (ret < 0) {
set_err_msg(err_msg, "unable to modify user, " + subprocess_msg);
return ret;
}
return 0;
}
int RGWUser::info(const DoutPrefixProvider *dpp, RGWUserAdminOpState& op_state, RGWUserInfo& fetched_info,
optional_yield y, std::string *err_msg)
{
int ret = init(dpp, op_state, y);
if (ret < 0) {
set_err_msg(err_msg, "unable to fetch user info");
return ret;
}
fetched_info = op_state.get_user_info();
return 0;
}
int RGWUser::info(RGWUserInfo& fetched_info, std::string *err_msg)
{
if (!is_populated()) {
set_err_msg(err_msg, "no user info saved");
return -EINVAL;
}
fetched_info = old_info;
return 0;
}
int RGWUser::list(const DoutPrefixProvider *dpp, RGWUserAdminOpState& op_state, RGWFormatterFlusher& flusher)
{
Formatter *formatter = flusher.get_formatter();
void *handle = nullptr;
std::string metadata_key = "user";
if (op_state.max_entries > 1000) {
op_state.max_entries = 1000;
}
int ret = driver->meta_list_keys_init(dpp, metadata_key, op_state.marker, &handle);
if (ret < 0) {
return ret;
}
bool truncated = false;
uint64_t count = 0;
uint64_t left = 0;
flusher.start(0);
// open the result object section
formatter->open_object_section("result");
// open the user id list array section
formatter->open_array_section("keys");
do {
std::list<std::string> keys;
left = op_state.max_entries - count;
ret = driver->meta_list_keys_next(dpp, handle, left, keys, &truncated);
if (ret < 0 && ret != -ENOENT) {
return ret;
} if (ret != -ENOENT) {
for (std::list<std::string>::iterator iter = keys.begin(); iter != keys.end(); ++iter) {
formatter->dump_string("key", *iter);
++count;
}
}
} while (truncated && left > 0);
// close user id list section
formatter->close_section();
formatter->dump_bool("truncated", truncated);
formatter->dump_int("count", count);
if (truncated) {
formatter->dump_string("marker", driver->meta_get_marker(handle));
}
// close result object section
formatter->close_section();
driver->meta_list_keys_complete(handle);
flusher.flush();
return 0;
}
int RGWUserAdminOp_User::list(const DoutPrefixProvider *dpp, rgw::sal::Driver* driver, RGWUserAdminOpState& op_state,
RGWFormatterFlusher& flusher)
{
RGWUser user;
int ret = user.init_storage(driver);
if (ret < 0)
return ret;
ret = user.list(dpp, op_state, flusher);
if (ret < 0)
return ret;
return 0;
}
int RGWUserAdminOp_User::info(const DoutPrefixProvider *dpp,
rgw::sal::Driver* driver, RGWUserAdminOpState& op_state,
RGWFormatterFlusher& flusher,
optional_yield y)
{
RGWUserInfo info;
RGWUser user;
std::unique_ptr<rgw::sal::User> ruser;
int ret = user.init(dpp, driver, op_state, y);
if (ret < 0)
return ret;
if (!op_state.has_existing_user())
return -ERR_NO_SUCH_USER;
Formatter *formatter = flusher.get_formatter();
ret = user.info(info, NULL);
if (ret < 0)
return ret;
ruser = driver->get_user(info.user_id);
if (op_state.sync_stats) {
ret = rgw_user_sync_all_stats(dpp, driver, ruser.get(), y);
if (ret < 0) {
return ret;
}
}
RGWStorageStats stats;
RGWStorageStats *arg_stats = NULL;
if (op_state.fetch_stats) {
int ret = ruser->read_stats(dpp, y, &stats);
if (ret < 0 && ret != -ENOENT) {
return ret;
}
arg_stats = &stats;
}
if (formatter) {
flusher.start(0);
dump_user_info(formatter, info, arg_stats);
flusher.flush();
}
return 0;
}
int RGWUserAdminOp_User::create(const DoutPrefixProvider *dpp,
rgw::sal::Driver* driver,
RGWUserAdminOpState& op_state,
RGWFormatterFlusher& flusher, optional_yield y)
{
RGWUserInfo info;
RGWUser user;
int ret = user.init(dpp, driver, op_state, y);
if (ret < 0)
return ret;
Formatter *formatter = flusher.get_formatter();
ret = user.add(dpp, op_state, y, NULL);
if (ret < 0) {
if (ret == -EEXIST)
ret = -ERR_USER_EXIST;
return ret;
}
ret = user.info(info, NULL);
if (ret < 0)
return ret;
if (formatter) {
flusher.start(0);
dump_user_info(formatter, info);
flusher.flush();
}
return 0;
}
int RGWUserAdminOp_User::modify(const DoutPrefixProvider *dpp,
rgw::sal::Driver* driver,
RGWUserAdminOpState& op_state,
RGWFormatterFlusher& flusher, optional_yield y)
{
RGWUserInfo info;
RGWUser user;
int ret = user.init(dpp, driver, op_state, y);
if (ret < 0)
return ret;
Formatter *formatter = flusher.get_formatter();
ret = user.modify(dpp, op_state, y, NULL);
if (ret < 0) {
if (ret == -ENOENT)
ret = -ERR_NO_SUCH_USER;
return ret;
}
ret = user.info(info, NULL);
if (ret < 0)
return ret;
if (formatter) {
flusher.start(0);
dump_user_info(formatter, info);
flusher.flush();
}
return 0;
}
int RGWUserAdminOp_User::remove(const DoutPrefixProvider *dpp,
rgw::sal::Driver* driver, RGWUserAdminOpState& op_state,
RGWFormatterFlusher& flusher, optional_yield y)
{
RGWUserInfo info;
RGWUser user;
int ret = user.init(dpp, driver, op_state, y);
if (ret < 0)
return ret;
ret = user.remove(dpp, op_state, y, NULL);
if (ret == -ENOENT)
ret = -ERR_NO_SUCH_USER;
return ret;
}
int RGWUserAdminOp_Subuser::create(const DoutPrefixProvider *dpp,
rgw::sal::Driver* driver,
RGWUserAdminOpState& op_state,
RGWFormatterFlusher& flusher,
optional_yield y)
{
RGWUserInfo info;
RGWUser user;
int ret = user.init(dpp, driver, op_state, y);
if (ret < 0)
return ret;
if (!op_state.has_existing_user())
return -ERR_NO_SUCH_USER;
Formatter *formatter = flusher.get_formatter();
ret = user.subusers.add(dpp, op_state, y, NULL);
if (ret < 0)
return ret;
ret = user.info(info, NULL);
if (ret < 0)
return ret;
if (formatter) {
flusher.start(0);
dump_subusers_info(formatter, info);
flusher.flush();
}
return 0;
}
int RGWUserAdminOp_Subuser::modify(const DoutPrefixProvider *dpp,
rgw::sal::Driver* driver, RGWUserAdminOpState& op_state,
RGWFormatterFlusher& flusher, optional_yield y)
{
RGWUserInfo info;
RGWUser user;
int ret = user.init(dpp, driver, op_state, y);
if (ret < 0)
return ret;
if (!op_state.has_existing_user())
return -ERR_NO_SUCH_USER;
Formatter *formatter = flusher.get_formatter();
ret = user.subusers.modify(dpp, op_state, y, NULL);
if (ret < 0)
return ret;
ret = user.info(info, NULL);
if (ret < 0)
return ret;
if (formatter) {
flusher.start(0);
dump_subusers_info(formatter, info);
flusher.flush();
}
return 0;
}
int RGWUserAdminOp_Subuser::remove(const DoutPrefixProvider *dpp,
rgw::sal::Driver* driver,
RGWUserAdminOpState& op_state,
RGWFormatterFlusher& flusher,
optional_yield y)
{
RGWUserInfo info;
RGWUser user;
int ret = user.init(dpp, driver, op_state, y);
if (ret < 0)
return ret;
if (!op_state.has_existing_user())
return -ERR_NO_SUCH_USER;
ret = user.subusers.remove(dpp, op_state, y, NULL);
if (ret < 0)
return ret;
return 0;
}
int RGWUserAdminOp_Key::create(const DoutPrefixProvider *dpp,
rgw::sal::Driver* driver, RGWUserAdminOpState& op_state,
RGWFormatterFlusher& flusher,
optional_yield y)
{
RGWUserInfo info;
RGWUser user;
int ret = user.init(dpp, driver, op_state, y);
if (ret < 0)
return ret;
if (!op_state.has_existing_user())
return -ERR_NO_SUCH_USER;
Formatter *formatter = flusher.get_formatter();
ret = user.keys.add(dpp, op_state, y, NULL);
if (ret < 0)
return ret;
ret = user.info(info, NULL);
if (ret < 0)
return ret;
if (formatter) {
flusher.start(0);
int key_type = op_state.get_key_type();
if (key_type == KEY_TYPE_SWIFT)
dump_swift_keys_info(formatter, info);
else if (key_type == KEY_TYPE_S3)
dump_access_keys_info(formatter, info);
flusher.flush();
}
return 0;
}
int RGWUserAdminOp_Key::remove(const DoutPrefixProvider *dpp,
rgw::sal::Driver* driver,
RGWUserAdminOpState& op_state,
RGWFormatterFlusher& flusher,
optional_yield y)
{
RGWUserInfo info;
RGWUser user;
int ret = user.init(dpp, driver, op_state, y);
if (ret < 0)
return ret;
if (!op_state.has_existing_user())
return -ERR_NO_SUCH_USER;
ret = user.keys.remove(dpp, op_state, y, NULL);
if (ret < 0)
return ret;
return 0;
}
int RGWUserAdminOp_Caps::add(const DoutPrefixProvider *dpp,
rgw::sal::Driver* driver,
RGWUserAdminOpState& op_state,
RGWFormatterFlusher& flusher, optional_yield y)
{
RGWUserInfo info;
RGWUser user;
int ret = user.init(dpp, driver, op_state, y);
if (ret < 0)
return ret;
if (!op_state.has_existing_user())
return -ERR_NO_SUCH_USER;
Formatter *formatter = flusher.get_formatter();
ret = user.caps.add(dpp, op_state, y, NULL);
if (ret < 0)
return ret;
ret = user.info(info, NULL);
if (ret < 0)
return ret;
if (formatter) {
flusher.start(0);
info.caps.dump(formatter);
flusher.flush();
}
return 0;
}
int RGWUserAdminOp_Caps::remove(const DoutPrefixProvider *dpp,
rgw::sal::Driver* driver,
RGWUserAdminOpState& op_state,
RGWFormatterFlusher& flusher, optional_yield y)
{
RGWUserInfo info;
RGWUser user;
int ret = user.init(dpp, driver, op_state, y);
if (ret < 0)
return ret;
if (!op_state.has_existing_user())
return -ERR_NO_SUCH_USER;
Formatter *formatter = flusher.get_formatter();
ret = user.caps.remove(dpp, op_state, y, NULL);
if (ret < 0)
return ret;
ret = user.info(info, NULL);
if (ret < 0)
return ret;
if (formatter) {
flusher.start(0);
info.caps.dump(formatter);
flusher.flush();
}
return 0;
}
class RGWUserMetadataHandler : public RGWMetadataHandler_GenericMetaBE {
public:
struct Svc {
RGWSI_User *user{nullptr};
} svc;
RGWUserMetadataHandler(RGWSI_User *user_svc) {
base_init(user_svc->ctx(), user_svc->get_be_handler());
svc.user = user_svc;
}
~RGWUserMetadataHandler() {}
string get_type() override { return "user"; }
int do_get(RGWSI_MetaBackend_Handler::Op *op, string& entry, RGWMetadataObject **obj, optional_yield y, const DoutPrefixProvider *dpp) override {
RGWUserCompleteInfo uci;
RGWObjVersionTracker objv_tracker;
real_time mtime;
rgw_user user = RGWSI_User::user_from_meta_key(entry);
int ret = svc.user->read_user_info(op->ctx(), user, &uci.info, &objv_tracker,
&mtime, nullptr, &uci.attrs,
y, dpp);
if (ret < 0) {
return ret;
}
RGWUserMetadataObject *mdo = new RGWUserMetadataObject(uci, objv_tracker.read_version, mtime);
*obj = mdo;
return 0;
}
RGWMetadataObject *get_meta_obj(JSONObj *jo, const obj_version& objv, const ceph::real_time& mtime) override {
RGWUserCompleteInfo uci;
try {
decode_json_obj(uci, jo);
} catch (JSONDecoder::err& e) {
return nullptr;
}
return new RGWUserMetadataObject(uci, objv, mtime);
}
int do_put(RGWSI_MetaBackend_Handler::Op *op, string& entry,
RGWMetadataObject *obj,
RGWObjVersionTracker& objv_tracker,
optional_yield y, const DoutPrefixProvider *dpp,
RGWMDLogSyncType type, bool from_remote_zone) override;
int do_remove(RGWSI_MetaBackend_Handler::Op *op, string& entry, RGWObjVersionTracker& objv_tracker,
optional_yield y, const DoutPrefixProvider *dpp) override {
RGWUserInfo info;
rgw_user user = RGWSI_User::user_from_meta_key(entry);
int ret = svc.user->read_user_info(op->ctx(), user, &info, nullptr,
nullptr, nullptr, nullptr,
y, dpp);
if (ret < 0) {
return ret;
}
return svc.user->remove_user_info(op->ctx(), info, &objv_tracker,
y, dpp);
}
};
class RGWMetadataHandlerPut_User : public RGWMetadataHandlerPut_SObj
{
RGWUserMetadataHandler *uhandler;
RGWUserMetadataObject *uobj;
public:
RGWMetadataHandlerPut_User(RGWUserMetadataHandler *_handler,
RGWSI_MetaBackend_Handler::Op *op, string& entry,
RGWMetadataObject *obj, RGWObjVersionTracker& objv_tracker,
optional_yield y,
RGWMDLogSyncType type, bool from_remote_zone) : RGWMetadataHandlerPut_SObj(_handler, op, entry, obj, objv_tracker, y, type, from_remote_zone),
uhandler(_handler) {
uobj = static_cast<RGWUserMetadataObject *>(obj);
}
int put_checked(const DoutPrefixProvider *dpp) override;
};
int RGWUserMetadataHandler::do_put(RGWSI_MetaBackend_Handler::Op *op, string& entry,
RGWMetadataObject *obj,
RGWObjVersionTracker& objv_tracker,
optional_yield y, const DoutPrefixProvider *dpp,
RGWMDLogSyncType type, bool from_remote_zone)
{
RGWMetadataHandlerPut_User put_op(this, op, entry, obj, objv_tracker, y, type, from_remote_zone);
return do_put_operate(&put_op, dpp);
}
int RGWMetadataHandlerPut_User::put_checked(const DoutPrefixProvider *dpp)
{
RGWUserMetadataObject *orig_obj = static_cast<RGWUserMetadataObject *>(old_obj);
RGWUserCompleteInfo& uci = uobj->get_uci();
map<string, bufferlist> *pattrs{nullptr};
if (uci.has_attrs) {
pattrs = &uci.attrs;
}
RGWUserInfo *pold_info = (orig_obj ? &orig_obj->get_uci().info : nullptr);
auto mtime = obj->get_mtime();
int ret = uhandler->svc.user->store_user_info(op->ctx(), uci.info, pold_info,
&objv_tracker, mtime,
false, pattrs, y, dpp);
if (ret < 0) {
return ret;
}
return STATUS_APPLIED;
}
RGWUserCtl::RGWUserCtl(RGWSI_Zone *zone_svc,
RGWSI_User *user_svc,
RGWUserMetadataHandler *_umhandler) : umhandler(_umhandler) {
svc.zone = zone_svc;
svc.user = user_svc;
be_handler = umhandler->get_be_handler();
}
template <class T>
class optional_default
{
const std::optional<T>& opt;
std::optional<T> def;
const T *p;
public:
optional_default(const std::optional<T>& _o) : opt(_o) {
if (opt) {
p = &(*opt);
} else {
def = T();
p = &(*def);
}
}
const T *operator->() {
return p;
}
const T& operator*() {
return *p;
}
};
int RGWUserCtl::get_info_by_uid(const DoutPrefixProvider *dpp,
const rgw_user& uid,
RGWUserInfo *info,
optional_yield y,
const GetParams& params)
{
return be_handler->call([&](RGWSI_MetaBackend_Handler::Op *op) {
return svc.user->read_user_info(op->ctx(),
uid,
info,
params.objv_tracker,
params.mtime,
params.cache_info,
params.attrs,
y,
dpp);
});
}
int RGWUserCtl::get_info_by_email(const DoutPrefixProvider *dpp,
const string& email,
RGWUserInfo *info,
optional_yield y,
const GetParams& params)
{
return be_handler->call([&](RGWSI_MetaBackend_Handler::Op *op) {
return svc.user->get_user_info_by_email(op->ctx(), email,
info,
params.objv_tracker,
params.mtime,
y,
dpp);
});
}
int RGWUserCtl::get_info_by_swift(const DoutPrefixProvider *dpp,
const string& swift_name,
RGWUserInfo *info,
optional_yield y,
const GetParams& params)
{
return be_handler->call([&](RGWSI_MetaBackend_Handler::Op *op) {
return svc.user->get_user_info_by_swift(op->ctx(), swift_name,
info,
params.objv_tracker,
params.mtime,
y,
dpp);
});
}
int RGWUserCtl::get_info_by_access_key(const DoutPrefixProvider *dpp,
const string& access_key,
RGWUserInfo *info,
optional_yield y,
const GetParams& params)
{
return be_handler->call([&](RGWSI_MetaBackend_Handler::Op *op) {
return svc.user->get_user_info_by_access_key(op->ctx(), access_key,
info,
params.objv_tracker,
params.mtime,
y,
dpp);
});
}
int RGWUserCtl::get_attrs_by_uid(const DoutPrefixProvider *dpp,
const rgw_user& user_id,
map<string, bufferlist> *pattrs,
optional_yield y,
RGWObjVersionTracker *objv_tracker)
{
RGWUserInfo user_info;
return get_info_by_uid(dpp, user_id, &user_info, y, RGWUserCtl::GetParams()
.set_attrs(pattrs)
.set_objv_tracker(objv_tracker));
}
int RGWUserCtl::store_info(const DoutPrefixProvider *dpp,
const RGWUserInfo& info, optional_yield y,
const PutParams& params)
{
string key = RGWSI_User::get_meta_key(info.user_id);
return be_handler->call([&](RGWSI_MetaBackend_Handler::Op *op) {
return svc.user->store_user_info(op->ctx(), info,
params.old_info,
params.objv_tracker,
params.mtime,
params.exclusive,
params.attrs,
y,
dpp);
});
}
int RGWUserCtl::remove_info(const DoutPrefixProvider *dpp,
const RGWUserInfo& info, optional_yield y,
const RemoveParams& params)
{
string key = RGWSI_User::get_meta_key(info.user_id);
return be_handler->call([&](RGWSI_MetaBackend_Handler::Op *op) {
return svc.user->remove_user_info(op->ctx(), info,
params.objv_tracker,
y, dpp);
});
}
int RGWUserCtl::list_buckets(const DoutPrefixProvider *dpp,
const rgw_user& user,
const string& marker,
const string& end_marker,
uint64_t max,
bool need_stats,
RGWUserBuckets *buckets,
bool *is_truncated,
optional_yield y,
uint64_t default_max)
{
if (!max) {
max = default_max;
}
int ret = svc.user->list_buckets(dpp, user, marker, end_marker,
max, buckets, is_truncated, y);
if (ret < 0) {
return ret;
}
if (need_stats) {
map<string, RGWBucketEnt>& m = buckets->get_buckets();
ret = ctl.bucket->read_buckets_stats(m, y, dpp);
if (ret < 0 && ret != -ENOENT) {
ldpp_dout(dpp, 0) << "ERROR: could not get stats for buckets" << dendl;
return ret;
}
}
return 0;
}
int RGWUserCtl::read_stats(const DoutPrefixProvider *dpp,
const rgw_user& user, RGWStorageStats *stats,
optional_yield y,
ceph::real_time *last_stats_sync,
ceph::real_time *last_stats_update)
{
return be_handler->call([&](RGWSI_MetaBackend_Handler::Op *op) {
return svc.user->read_stats(dpp, op->ctx(), user, stats,
last_stats_sync, last_stats_update, y);
});
}
RGWMetadataHandler *RGWUserMetaHandlerAllocator::alloc(RGWSI_User *user_svc) {
return new RGWUserMetadataHandler(user_svc);
}
void rgw_user::dump(Formatter *f) const
{
::encode_json("user", *this, f);
}
| 72,377 | 25.063378 | 171 |
cc
|
null |
ceph-main/src/rgw/driver/rados/rgw_user.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab ft=cpp
#pragma once
#include <string>
#include <boost/algorithm/string.hpp>
#include "include/ceph_assert.h"
#include "include/types.h"
#include "rgw_common.h"
#include "rgw_tools.h"
#include "rgw_string.h"
#include "common/Formatter.h"
#include "rgw_formats.h"
#include "rgw_metadata.h"
#include "rgw_sal_fwd.h"
#define RGW_USER_ANON_ID "anonymous"
#define SECRET_KEY_LEN 40
#define PUBLIC_ID_LEN 20
#define RAND_SUBUSER_LEN 5
#define XMLNS_AWS_S3 "http://s3.amazonaws.com/doc/2006-03-01/"
class RGWUserCtl;
class RGWBucketCtl;
class RGWUserBuckets;
class RGWGetUserStats_CB;
/**
* A string wrapper that includes encode/decode functions
* for easily accessing a UID in all forms
*/
struct RGWUID
{
rgw_user user_id;
void encode(bufferlist& bl) const {
std::string s;
user_id.to_str(s);
using ceph::encode;
encode(s, bl);
}
void decode(bufferlist::const_iterator& bl) {
std::string s;
using ceph::decode;
decode(s, bl);
user_id.from_str(s);
}
};
WRITE_CLASS_ENCODER(RGWUID)
/** Entry for bucket metadata collection */
struct bucket_meta_entry {
size_t size;
size_t size_rounded;
ceph::real_time creation_time;
uint64_t count;
};
extern int rgw_user_sync_all_stats(const DoutPrefixProvider *dpp, rgw::sal::Driver* driver, rgw::sal::User* user, optional_yield y);
extern int rgw_user_get_all_buckets_stats(const DoutPrefixProvider *dpp,
rgw::sal::Driver* driver, rgw::sal::User* user,
std::map<std::string, bucket_meta_entry>& buckets_usage_map, optional_yield y);
/**
* Get the anonymous (ie, unauthenticated) user info.
*/
extern void rgw_get_anon_user(RGWUserInfo& info);
extern void rgw_perm_to_str(uint32_t mask, char *buf, int len);
extern uint32_t rgw_str_to_perm(const char *str);
extern int rgw_validate_tenant_name(const std::string& t);
enum ObjectKeyType {
KEY_TYPE_SWIFT,
KEY_TYPE_S3,
KEY_TYPE_UNDEFINED
};
enum RGWKeyPoolOp {
GENERATE_KEY,
MODIFY_KEY
};
enum RGWUserId {
RGW_USER_ID,
RGW_SWIFT_USERNAME,
RGW_USER_EMAIL,
RGW_ACCESS_KEY,
};
/*
* An RGWUser class along with supporting classes created
* to support the creation of an RESTful administrative API
*/
struct RGWUserAdminOpState {
// user attributes
std::unique_ptr<rgw::sal::User> user;
std::string user_email;
std::string display_name;
rgw_user new_user_id;
bool overwrite_new_user = false;
int32_t max_buckets{RGW_DEFAULT_MAX_BUCKETS};
__u8 suspended{0};
__u8 admin{0};
__u8 system{0};
__u8 exclusive{0};
__u8 fetch_stats{0};
__u8 sync_stats{0};
std::string caps;
RGWObjVersionTracker objv;
uint32_t op_mask{0};
std::map<int, std::string> temp_url_keys;
// subuser attributes
std::string subuser;
uint32_t perm_mask{RGW_PERM_NONE};
// key_attributes
std::string id; // access key
std::string key; // secret key
// access keys fetched for a user in the middle of an op
std::map<std::string, RGWAccessKey> op_access_keys;
int32_t key_type{-1};
bool access_key_exist = false;
std::set<std::string> mfa_ids;
// operation attributes
bool existing_user{false};
bool existing_key{false};
bool existing_subuser{false};
bool existing_email{false};
bool subuser_specified{false};
bool gen_secret{false};
bool gen_access{false};
bool gen_subuser{false};
bool id_specified{false};
bool key_specified{false};
bool type_specified{false};
bool key_type_setbycontext{false}; // key type set by user or subuser context
bool purge_data{false};
bool purge_keys{false};
bool display_name_specified{false};
bool user_email_specified{false};
bool max_buckets_specified{false};
bool perm_specified{false};
bool op_mask_specified{false};
bool caps_specified{false};
bool suspension_op{false};
bool admin_specified{false};
bool system_specified{false};
bool key_op{false};
bool temp_url_key_specified{false};
bool found_by_uid{false};
bool found_by_email{false};
bool found_by_key{false};
bool mfa_ids_specified{false};
// req parameters
bool populated{false};
bool initialized{false};
bool key_params_checked{false};
bool subuser_params_checked{false};
bool user_params_checked{false};
bool bucket_quota_specified{false};
bool user_quota_specified{false};
bool bucket_ratelimit_specified{false};
bool user_ratelimit_specified{false};
RGWQuota quota;
RGWRateLimitInfo user_ratelimit;
RGWRateLimitInfo bucket_ratelimit;
// req parameters for listing user
std::string marker{""};
uint32_t max_entries{1000};
rgw_placement_rule default_placement; // user default placement
bool default_placement_specified{false};
std::list<std::string> placement_tags; // user default placement_tags
bool placement_tags_specified{false};
void set_access_key(const std::string& access_key) {
if (access_key.empty())
return;
id = access_key;
id_specified = true;
gen_access = false;
key_op = true;
}
void set_secret_key(const std::string& secret_key) {
if (secret_key.empty())
return;
key = secret_key;
key_specified = true;
gen_secret = false;
key_op = true;
}
void set_user_id(const rgw_user& id);
void set_new_user_id(const rgw_user& id) {
if (id.empty())
return;
new_user_id = id;
}
void set_overwrite_new_user(bool b) {
overwrite_new_user = b;
}
void set_user_email(std::string& email) {
/* always lowercase email address */
boost::algorithm::to_lower(email);
user_email = email;
user_email_specified = true;
}
void set_display_name(const std::string& name) {
if (name.empty())
return;
display_name = name;
display_name_specified = true;
}
void set_subuser(std::string& _subuser);
void set_caps(const std::string& _caps) {
if (_caps.empty())
return;
caps = _caps;
caps_specified = true;
}
void set_perm(uint32_t perm) {
perm_mask = perm;
perm_specified = true;
}
void set_op_mask(uint32_t mask) {
op_mask = mask;
op_mask_specified = true;
}
void set_temp_url_key(const std::string& key, int index) {
temp_url_keys[index] = key;
temp_url_key_specified = true;
}
void set_key_type(int32_t type) {
key_type = type;
type_specified = true;
}
void set_access_key_exist() {
access_key_exist = true;
}
void set_suspension(__u8 is_suspended) {
suspended = is_suspended;
suspension_op = true;
}
void set_admin(__u8 is_admin) {
admin = is_admin;
admin_specified = true;
}
void set_system(__u8 is_system) {
system = is_system;
system_specified = true;
}
void set_exclusive(__u8 is_exclusive) {
exclusive = is_exclusive;
}
void set_fetch_stats(__u8 is_fetch_stats) {
fetch_stats = is_fetch_stats;
}
void set_sync_stats(__u8 is_sync_stats) {
sync_stats = is_sync_stats;
}
void set_user_info(RGWUserInfo& user_info);
void set_user_version_tracker(RGWObjVersionTracker& objv_tracker);
void set_max_buckets(int32_t mb) {
max_buckets = mb;
max_buckets_specified = true;
}
void set_gen_access() {
gen_access = true;
key_op = true;
}
void set_gen_secret() {
gen_secret = true;
key_op = true;
}
void set_generate_key() {
if (id.empty())
gen_access = true;
if (key.empty())
gen_secret = true;
key_op = true;
}
void clear_generate_key() {
gen_access = false;
gen_secret = false;
}
void set_purge_keys() {
purge_keys = true;
key_op = true;
}
void set_bucket_quota(RGWQuotaInfo& quotas) {
quota.bucket_quota = quotas;
bucket_quota_specified = true;
}
void set_user_quota(RGWQuotaInfo& quotas) {
quota.user_quota = quotas;
user_quota_specified = true;
}
void set_bucket_ratelimit(RGWRateLimitInfo& ratelimit) {
bucket_ratelimit = ratelimit;
bucket_ratelimit_specified = true;
}
void set_user_ratelimit(RGWRateLimitInfo& ratelimit) {
user_ratelimit = ratelimit;
user_ratelimit_specified = true;
}
void set_mfa_ids(const std::set<std::string>& ids) {
mfa_ids = ids;
mfa_ids_specified = true;
}
void set_default_placement(const rgw_placement_rule& _placement) {
default_placement = _placement;
default_placement_specified = true;
}
void set_placement_tags(const std::list<std::string>& _tags) {
placement_tags = _tags;
placement_tags_specified = true;
}
bool is_populated() { return populated; }
bool is_initialized() { return initialized; }
bool has_existing_user() { return existing_user; }
bool has_existing_key() { return existing_key; }
bool has_existing_subuser() { return existing_subuser; }
bool has_existing_email() { return existing_email; }
bool has_subuser() { return subuser_specified; }
bool has_key_op() { return key_op; }
bool has_caps_op() { return caps_specified; }
bool has_suspension_op() { return suspension_op; }
bool has_subuser_perm() { return perm_specified; }
bool has_op_mask() { return op_mask_specified; }
bool will_gen_access() { return gen_access; }
bool will_gen_secret() { return gen_secret; }
bool will_gen_subuser() { return gen_subuser; }
bool will_purge_keys() { return purge_keys; }
bool will_purge_data() { return purge_data; }
bool will_generate_subuser() { return gen_subuser; }
bool has_bucket_quota() { return bucket_quota_specified; }
bool has_user_quota() { return user_quota_specified; }
void set_populated() { populated = true; }
void clear_populated() { populated = false; }
void set_initialized() { initialized = true; }
void set_existing_user(bool flag) { existing_user = flag; }
void set_existing_key(bool flag) { existing_key = flag; }
void set_existing_subuser(bool flag) { existing_subuser = flag; }
void set_existing_email(bool flag) { existing_email = flag; }
void set_purge_data(bool flag) { purge_data = flag; }
void set_generate_subuser(bool flag) { gen_subuser = flag; }
__u8 get_suspension_status() { return suspended; }
int32_t get_key_type() {return key_type; }
bool get_access_key_exist() {return access_key_exist; }
uint32_t get_subuser_perm() { return perm_mask; }
int32_t get_max_buckets() { return max_buckets; }
uint32_t get_op_mask() { return op_mask; }
RGWQuotaInfo& get_bucket_quota() { return quota.bucket_quota; }
RGWQuotaInfo& get_user_quota() { return quota.user_quota; }
std::set<std::string>& get_mfa_ids() { return mfa_ids; }
rgw::sal::User* get_user() { return user.get(); }
const rgw_user& get_user_id();
std::string get_subuser() { return subuser; }
std::string get_access_key() { return id; }
std::string get_secret_key() { return key; }
std::string get_caps() { return caps; }
std::string get_user_email() { return user_email; }
std::string get_display_name() { return display_name; }
rgw_user& get_new_uid() { return new_user_id; }
bool get_overwrite_new_user() const { return overwrite_new_user; }
std::map<int, std::string>& get_temp_url_keys() { return temp_url_keys; }
RGWUserInfo& get_user_info();
std::map<std::string, RGWAccessKey>* get_swift_keys();
std::map<std::string, RGWAccessKey>* get_access_keys();
std::map<std::string, RGWSubUser>* get_subusers();
RGWUserCaps* get_caps_obj();
std::string build_default_swift_kid();
std::string generate_subuser();
RGWUserAdminOpState(rgw::sal::Driver* driver);
};
class RGWUser;
class RGWAccessKeyPool
{
RGWUser *user{nullptr};
std::map<std::string, int, ltstr_nocase> key_type_map;
rgw_user user_id;
rgw::sal::Driver* driver{nullptr};
std::map<std::string, RGWAccessKey> *swift_keys{nullptr};
std::map<std::string, RGWAccessKey> *access_keys{nullptr};
// we don't want to allow keys for the anonymous user or a null user
bool keys_allowed{false};
private:
int create_key(RGWUserAdminOpState& op_state, std::string *err_msg = NULL);
int generate_key(const DoutPrefixProvider *dpp,
RGWUserAdminOpState& op_state, optional_yield y,
std::string *err_msg = NULL);
int modify_key(RGWUserAdminOpState& op_state, std::string *err_msg = NULL);
int check_key_owner(RGWUserAdminOpState& op_state);
bool check_existing_key(RGWUserAdminOpState& op_state);
int check_op(RGWUserAdminOpState& op_state, std::string *err_msg = NULL);
/* API Contract Fulfilment */
int execute_add(const DoutPrefixProvider *dpp,
RGWUserAdminOpState& op_state, std::string *err_msg,
bool defer_save, optional_yield y);
int execute_remove(const DoutPrefixProvider *dpp,
RGWUserAdminOpState& op_state, std::string *err_msg,
bool defer_save, optional_yield y);
int remove_subuser_keys(const DoutPrefixProvider *dpp, RGWUserAdminOpState& op_state, std::string *err_msg,
bool defer_save, optional_yield y);
int add(const DoutPrefixProvider *dpp, RGWUserAdminOpState& op_state, std::string *err_msg, bool defer_save,
optional_yield y);
int remove(const DoutPrefixProvider *dpp, RGWUserAdminOpState& op_state, std::string *err_msg,
bool defer_save, optional_yield y);
public:
explicit RGWAccessKeyPool(RGWUser* usr);
int init(RGWUserAdminOpState& op_state);
/* API Contracted Methods */
int add(const DoutPrefixProvider *dpp, RGWUserAdminOpState& op_state, optional_yield y,
std::string *err_msg = NULL);
int remove(const DoutPrefixProvider *dpp, RGWUserAdminOpState& op_state, optional_yield y,
std::string *err_msg = NULL);
friend class RGWUser;
friend class RGWSubUserPool;
};
class RGWSubUserPool
{
RGWUser *user{nullptr};
rgw_user user_id;
rgw::sal::Driver* driver{nullptr};
bool subusers_allowed{false};
std::map<std::string, RGWSubUser> *subuser_map{nullptr};
private:
int check_op(RGWUserAdminOpState& op_state, std::string *err_msg = NULL);
/* API Contract Fulfillment */
int execute_add(const DoutPrefixProvider *dpp, RGWUserAdminOpState& op_state, std::string *err_msg, bool defer_save, optional_yield y);
int execute_remove(const DoutPrefixProvider *dpp, RGWUserAdminOpState& op_state, std::string *err_msg, bool defer_save, optional_yield y);
int execute_modify(const DoutPrefixProvider *dpp, RGWUserAdminOpState& op_state, std::string *err_msg, bool defer_save, optional_yield y);
int add(const DoutPrefixProvider *dpp, RGWUserAdminOpState& op_state, std::string *err_msg, bool defer_save,
optional_yield y);
int remove(const DoutPrefixProvider *dpp, RGWUserAdminOpState& op_state, std::string *err_msg, bool defer_save, optional_yield y);
int modify(const DoutPrefixProvider *dpp, RGWUserAdminOpState& op_state, optional_yield y, std::string *err_msg, bool defer_save);
public:
explicit RGWSubUserPool(RGWUser *user);
bool exists(std::string subuser);
int init(RGWUserAdminOpState& op_state);
/* API contracted methods */
int add(const DoutPrefixProvider *dpp, RGWUserAdminOpState& op_state, optional_yield y,
std::string *err_msg = NULL);
int remove(const DoutPrefixProvider *dpp, RGWUserAdminOpState& op_state, optional_yield y, std::string *err_msg = NULL);
int modify(const DoutPrefixProvider *dpp, RGWUserAdminOpState& op_state, optional_yield y, std::string *err_msg = NULL);
friend class RGWUser;
};
class RGWUserCapPool
{
RGWUserCaps *caps{nullptr};
bool caps_allowed{false};
RGWUser *user{nullptr};
private:
int add(const DoutPrefixProvider *dpp, RGWUserAdminOpState& op_state, std::string *err_msg, bool defer_save,
optional_yield y);
int remove(const DoutPrefixProvider *dpp, RGWUserAdminOpState& op_state, std::string *err_msg, bool defer_save,
optional_yield y);
public:
explicit RGWUserCapPool(RGWUser *user);
int init(RGWUserAdminOpState& op_state);
/* API contracted methods */
int add(const DoutPrefixProvider *dpp, RGWUserAdminOpState& op_state, optional_yield y,
std::string *err_msg = NULL);
int remove(const DoutPrefixProvider *dpp, RGWUserAdminOpState& op_state, optional_yield y, std::string *err_msg = NULL);
friend class RGWUser;
};
class RGWUser
{
private:
RGWUserInfo old_info;
rgw::sal::Driver* driver{nullptr};
rgw_user user_id;
bool info_stored{false};
void set_populated() { info_stored = true; }
void clear_populated() { info_stored = false; }
bool is_populated() { return info_stored; }
int check_op(RGWUserAdminOpState& req, std::string *err_msg);
int update(const DoutPrefixProvider *dpp, RGWUserAdminOpState& op_state, std::string *err_msg, optional_yield y);
void clear_members();
void init_default();
/* API Contract Fulfillment */
int execute_add(const DoutPrefixProvider *dpp, RGWUserAdminOpState& op_state, std::string *err_msg,
optional_yield y);
int execute_remove(const DoutPrefixProvider *dpp, RGWUserAdminOpState& op_state,
std::string *err_msg, optional_yield y);
int execute_modify(const DoutPrefixProvider *dpp, RGWUserAdminOpState& op_state, std::string *err_msg, optional_yield y);
int execute_rename(const DoutPrefixProvider *dpp, RGWUserAdminOpState& op_state, std::string *err_msg, optional_yield y);
public:
RGWUser();
int init(const DoutPrefixProvider *dpp, rgw::sal::Driver* storage, RGWUserAdminOpState& op_state,
optional_yield y);
int init_storage(rgw::sal::Driver* storage);
int init(const DoutPrefixProvider *dpp, RGWUserAdminOpState& op_state, optional_yield y);
int init_members(RGWUserAdminOpState& op_state);
rgw::sal::Driver* get_driver() { return driver; }
/* API Contracted Members */
RGWUserCapPool caps;
RGWAccessKeyPool keys;
RGWSubUserPool subusers;
/* API Contracted Methods */
int add(const DoutPrefixProvider *dpp, RGWUserAdminOpState& op_state, optional_yield y, std::string *err_msg = NULL);
int remove(const DoutPrefixProvider *dpp, RGWUserAdminOpState& op_state, optional_yield y, std::string *err_msg = NULL);
int rename(RGWUserAdminOpState& op_state, optional_yield y, const DoutPrefixProvider *dpp, std::string *err_msg = NULL);
/* remove an already populated RGWUser */
int remove(std::string *err_msg = NULL);
int modify(const DoutPrefixProvider *dpp, RGWUserAdminOpState& op_state, optional_yield y, std::string *err_msg = NULL);
/* retrieve info from an existing user in the RGW system */
int info(const DoutPrefixProvider *dpp, RGWUserAdminOpState& op_state, RGWUserInfo& fetched_info, optional_yield y,
std::string *err_msg = NULL);
/* info from an already populated RGWUser */
int info (RGWUserInfo& fetched_info, std::string *err_msg = NULL);
/* list the existing users */
int list(const DoutPrefixProvider *dpp, RGWUserAdminOpState& op_state, RGWFormatterFlusher& flusher);
friend class RGWAccessKeyPool;
friend class RGWSubUserPool;
friend class RGWUserCapPool;
};
/* Wrappers for admin API functionality */
class RGWUserAdminOp_User
{
public:
static int list(const DoutPrefixProvider *dpp, rgw::sal::Driver* driver,
RGWUserAdminOpState& op_state, RGWFormatterFlusher& flusher);
static int info(const DoutPrefixProvider *dpp,
rgw::sal::Driver* driver,
RGWUserAdminOpState& op_state, RGWFormatterFlusher& flusher,
optional_yield y);
static int create(const DoutPrefixProvider *dpp,
rgw::sal::Driver* driver,
RGWUserAdminOpState& op_state, RGWFormatterFlusher& flusher,
optional_yield y);
static int modify(const DoutPrefixProvider *dpp,
rgw::sal::Driver* driver,
RGWUserAdminOpState& op_state, RGWFormatterFlusher& flusher, optional_yield y);
static int remove(const DoutPrefixProvider *dpp, rgw::sal::Driver* driver,
RGWUserAdminOpState& op_state, RGWFormatterFlusher& flusher, optional_yield y);
};
class RGWUserAdminOp_Subuser
{
public:
static int create(const DoutPrefixProvider *dpp,
rgw::sal::Driver* driver,
RGWUserAdminOpState& op_state, RGWFormatterFlusher& flusher,
optional_yield y);
static int modify(const DoutPrefixProvider *dpp,
rgw::sal::Driver* driver,
RGWUserAdminOpState& op_state, RGWFormatterFlusher& flusher,
optional_yield y);
static int remove(const DoutPrefixProvider *dpp,
rgw::sal::Driver* driver,
RGWUserAdminOpState& op_state, RGWFormatterFlusher& flusher,
optional_yield y);
};
class RGWUserAdminOp_Key
{
public:
static int create(const DoutPrefixProvider *dpp, rgw::sal::Driver* driver,
RGWUserAdminOpState& op_state, RGWFormatterFlusher& flusher,
optional_yield y);
static int remove(const DoutPrefixProvider *dpp,
rgw::sal::Driver* driver,
RGWUserAdminOpState& op_state, RGWFormatterFlusher& flusher,
optional_yield y);
};
class RGWUserAdminOp_Caps
{
public:
static int add(const DoutPrefixProvider *dpp,
rgw::sal::Driver* driver,
RGWUserAdminOpState& op_state, RGWFormatterFlusher& flusher,
optional_yield y);
static int remove(const DoutPrefixProvider *dpp,
rgw::sal::Driver* driver,
RGWUserAdminOpState& op_state, RGWFormatterFlusher& flusher,
optional_yield y);
};
struct RGWUserCompleteInfo {
RGWUserInfo info;
std::map<std::string, bufferlist> attrs;
bool has_attrs{false};
void dump(Formatter * const f) const {
info.dump(f);
encode_json("attrs", attrs, f);
}
void decode_json(JSONObj *obj) {
decode_json_obj(info, obj);
has_attrs = JSONDecoder::decode_json("attrs", attrs, obj);
}
};
class RGWUserMetadataObject : public RGWMetadataObject {
RGWUserCompleteInfo uci;
public:
RGWUserMetadataObject() {}
RGWUserMetadataObject(const RGWUserCompleteInfo& _uci, const obj_version& v, real_time m)
: uci(_uci) {
objv = v;
mtime = m;
}
void dump(Formatter *f) const override {
uci.dump(f);
}
RGWUserCompleteInfo& get_uci() {
return uci;
}
};
class RGWUserMetadataHandler;
class RGWUserCtl
{
struct Svc {
RGWSI_Zone *zone{nullptr};
RGWSI_User *user{nullptr};
} svc;
struct Ctl {
RGWBucketCtl *bucket{nullptr};
} ctl;
RGWUserMetadataHandler *umhandler;
RGWSI_MetaBackend_Handler *be_handler{nullptr};
public:
RGWUserCtl(RGWSI_Zone *zone_svc,
RGWSI_User *user_svc,
RGWUserMetadataHandler *_umhandler);
void init(RGWBucketCtl *bucket_ctl) {
ctl.bucket = bucket_ctl;
}
RGWBucketCtl *get_bucket_ctl() {
return ctl.bucket;
}
struct GetParams {
RGWObjVersionTracker *objv_tracker{nullptr};
ceph::real_time *mtime{nullptr};
rgw_cache_entry_info *cache_info{nullptr};
std::map<std::string, bufferlist> *attrs{nullptr};
GetParams() {}
GetParams& set_objv_tracker(RGWObjVersionTracker *_objv_tracker) {
objv_tracker = _objv_tracker;
return *this;
}
GetParams& set_mtime(ceph::real_time *_mtime) {
mtime = _mtime;
return *this;
}
GetParams& set_cache_info(rgw_cache_entry_info *_cache_info) {
cache_info = _cache_info;
return *this;
}
GetParams& set_attrs(std::map<std::string, bufferlist> *_attrs) {
attrs = _attrs;
return *this;
}
};
struct PutParams {
RGWUserInfo *old_info{nullptr};
RGWObjVersionTracker *objv_tracker{nullptr};
ceph::real_time mtime;
bool exclusive{false};
std::map<std::string, bufferlist> *attrs{nullptr};
PutParams() {}
PutParams& set_old_info(RGWUserInfo *_info) {
old_info = _info;
return *this;
}
PutParams& set_objv_tracker(RGWObjVersionTracker *_objv_tracker) {
objv_tracker = _objv_tracker;
return *this;
}
PutParams& set_mtime(const ceph::real_time& _mtime) {
mtime = _mtime;
return *this;
}
PutParams& set_exclusive(bool _exclusive) {
exclusive = _exclusive;
return *this;
}
PutParams& set_attrs(std::map<std::string, bufferlist> *_attrs) {
attrs = _attrs;
return *this;
}
};
struct RemoveParams {
RGWObjVersionTracker *objv_tracker{nullptr};
RemoveParams() {}
RemoveParams& set_objv_tracker(RGWObjVersionTracker *_objv_tracker) {
objv_tracker = _objv_tracker;
return *this;
}
};
int get_info_by_uid(const DoutPrefixProvider *dpp,
const rgw_user& uid, RGWUserInfo *info,
optional_yield y, const GetParams& params = {});
int get_info_by_email(const DoutPrefixProvider *dpp,
const std::string& email, RGWUserInfo *info,
optional_yield y, const GetParams& params = {});
int get_info_by_swift(const DoutPrefixProvider *dpp,
const std::string& swift_name, RGWUserInfo *info,
optional_yield y, const GetParams& params = {});
int get_info_by_access_key(const DoutPrefixProvider *dpp,
const std::string& access_key, RGWUserInfo *info,
optional_yield y, const GetParams& params = {});
int get_attrs_by_uid(const DoutPrefixProvider *dpp,
const rgw_user& user_id,
std::map<std::string, bufferlist> *attrs,
optional_yield y,
RGWObjVersionTracker *objv_tracker = nullptr);
int store_info(const DoutPrefixProvider *dpp,
const RGWUserInfo& info, optional_yield y,
const PutParams& params = {});
int remove_info(const DoutPrefixProvider *dpp,
const RGWUserInfo& info, optional_yield y,
const RemoveParams& params = {});
int list_buckets(const DoutPrefixProvider *dpp,
const rgw_user& user,
const std::string& marker,
const std::string& end_marker,
uint64_t max,
bool need_stats,
RGWUserBuckets *buckets,
bool *is_truncated,
optional_yield y,
uint64_t default_max = 1000);
int read_stats(const DoutPrefixProvider *dpp,
const rgw_user& user, RGWStorageStats *stats,
optional_yield y,
ceph::real_time *last_stats_sync = nullptr, /* last time a full stats sync completed */
ceph::real_time *last_stats_update = nullptr); /* last time a stats update was done */
};
class RGWUserMetaHandlerAllocator {
public:
static RGWMetadataHandler *alloc(RGWSI_User *user_svc);
};
| 26,434 | 28.836343 | 140 |
h
|
null |
ceph-main/src/rgw/driver/rados/rgw_zone.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab ft=cpp
#include "rgw_zone.h"
#include "rgw_realm_watcher.h"
#include "rgw_sal_config.h"
#include "rgw_sync.h"
#include "services/svc_zone.h"
#define dout_context g_ceph_context
#define dout_subsys ceph_subsys_rgw
using namespace std;
using namespace rgw_zone_defaults;
RGWMetaSyncStatusManager::~RGWMetaSyncStatusManager(){}
#define FIRST_EPOCH 1
struct RGWAccessKey;
/// Generate a random uuid for realm/period/zonegroup/zone ids
static std::string gen_random_uuid()
{
uuid_d uuid;
uuid.generate_random();
return uuid.to_string();
}
void RGWDefaultZoneGroupInfo::dump(Formatter *f) const {
encode_json("default_zonegroup", default_zonegroup, f);
}
void RGWDefaultZoneGroupInfo::decode_json(JSONObj *obj) {
JSONDecoder::decode_json("default_zonegroup", default_zonegroup, obj);
/* backward compatability with region */
if (default_zonegroup.empty()) {
JSONDecoder::decode_json("default_region", default_zonegroup, obj);
}
}
int RGWZoneGroup::create_default(const DoutPrefixProvider *dpp, optional_yield y, bool old_format)
{
name = default_zonegroup_name;
api_name = default_zonegroup_name;
is_master = true;
RGWZoneGroupPlacementTarget placement_target;
placement_target.name = "default-placement";
placement_targets[placement_target.name] = placement_target;
default_placement.name = "default-placement";
RGWZoneParams zone_params(default_zone_name);
int r = zone_params.init(dpp, cct, sysobj_svc, y, false);
if (r < 0) {
ldpp_dout(dpp, 0) << "create_default: error initializing zone params: " << cpp_strerror(-r) << dendl;
return r;
}
r = zone_params.create_default(dpp, y);
if (r < 0 && r != -EEXIST) {
ldpp_dout(dpp, 0) << "create_default: error in create_default zone params: " << cpp_strerror(-r) << dendl;
return r;
} else if (r == -EEXIST) {
ldpp_dout(dpp, 10) << "zone_params::create_default() returned -EEXIST, we raced with another default zone_params creation" << dendl;
zone_params.clear_id();
r = zone_params.init(dpp, cct, sysobj_svc, y);
if (r < 0) {
ldpp_dout(dpp, 0) << "create_default: error in init existing zone params: " << cpp_strerror(-r) << dendl;
return r;
}
ldpp_dout(dpp, 20) << "zone_params::create_default() " << zone_params.get_name() << " id " << zone_params.get_id()
<< dendl;
}
RGWZone& default_zone = zones[zone_params.get_id()];
default_zone.name = zone_params.get_name();
default_zone.id = zone_params.get_id();
master_zone = default_zone.id;
// initialize supported zone features
default_zone.supported_features.insert(rgw::zone_features::supported.begin(),
rgw::zone_features::supported.end());
// enable default zonegroup features
enabled_features.insert(rgw::zone_features::enabled.begin(),
rgw::zone_features::enabled.end());
r = create(dpp, y);
if (r < 0 && r != -EEXIST) {
ldpp_dout(dpp, 0) << "error storing zone group info: " << cpp_strerror(-r) << dendl;
return r;
}
if (r == -EEXIST) {
ldpp_dout(dpp, 10) << "create_default() returned -EEXIST, we raced with another zonegroup creation" << dendl;
id.clear();
r = init(dpp, cct, sysobj_svc, y);
if (r < 0) {
return r;
}
}
if (old_format) {
name = id;
}
post_process_params(dpp, y);
return 0;
}
int RGWZoneGroup::equals(const string& other_zonegroup) const
{
if (is_master && other_zonegroup.empty())
return true;
return (id == other_zonegroup);
}
int RGWZoneGroup::add_zone(const DoutPrefixProvider *dpp,
const RGWZoneParams& zone_params, bool *is_master, bool *read_only,
const list<string>& endpoints, const string *ptier_type,
bool *psync_from_all, list<string>& sync_from, list<string>& sync_from_rm,
string *predirect_zone, std::optional<int> bucket_index_max_shards,
RGWSyncModulesManager *sync_mgr,
const rgw::zone_features::set& enable_features,
const rgw::zone_features::set& disable_features,
optional_yield y)
{
auto& zone_id = zone_params.get_id();
auto& zone_name = zone_params.get_name();
// check for duplicate zone name on insert
if (!zones.count(zone_id)) {
for (const auto& zone : zones) {
if (zone.second.name == zone_name) {
ldpp_dout(dpp, 0) << "ERROR: found existing zone name " << zone_name
<< " (" << zone.first << ") in zonegroup " << get_name() << dendl;
return -EEXIST;
}
}
}
if (is_master) {
if (*is_master) {
if (!master_zone.empty() && master_zone != zone_id) {
ldpp_dout(dpp, 0) << "NOTICE: overriding master zone: " << master_zone << dendl;
}
master_zone = zone_id;
} else if (master_zone == zone_id) {
master_zone.clear();
}
}
RGWZone& zone = zones[zone_id];
zone.name = zone_name;
zone.id = zone_id;
if (!endpoints.empty()) {
zone.endpoints = endpoints;
}
if (read_only) {
zone.read_only = *read_only;
}
if (ptier_type) {
zone.tier_type = *ptier_type;
if (!sync_mgr->get_module(*ptier_type, nullptr)) {
ldpp_dout(dpp, 0) << "ERROR: could not found sync module: " << *ptier_type
<< ", valid sync modules: "
<< sync_mgr->get_registered_module_names()
<< dendl;
return -ENOENT;
}
}
if (psync_from_all) {
zone.sync_from_all = *psync_from_all;
}
if (predirect_zone) {
zone.redirect_zone = *predirect_zone;
}
if (bucket_index_max_shards) {
zone.bucket_index_max_shards = *bucket_index_max_shards;
}
for (auto add : sync_from) {
zone.sync_from.insert(add);
}
for (auto rm : sync_from_rm) {
zone.sync_from.erase(rm);
}
zone.supported_features.insert(enable_features.begin(),
enable_features.end());
for (const auto& feature : disable_features) {
if (enabled_features.contains(feature)) {
lderr(cct) << "ERROR: Cannot disable zone feature \"" << feature
<< "\" until it's been disabled in zonegroup " << name << dendl;
return -EINVAL;
}
auto i = zone.supported_features.find(feature);
if (i == zone.supported_features.end()) {
ldout(cct, 1) << "WARNING: zone feature \"" << feature
<< "\" was not enabled in zone " << zone.name << dendl;
continue;
}
zone.supported_features.erase(i);
}
post_process_params(dpp, y);
return update(dpp,y);
}
int RGWZoneGroup::rename_zone(const DoutPrefixProvider *dpp,
const RGWZoneParams& zone_params,
optional_yield y)
{
RGWZone& zone = zones[zone_params.get_id()];
zone.name = zone_params.get_name();
return update(dpp, y);
}
void RGWZoneGroup::post_process_params(const DoutPrefixProvider *dpp, optional_yield y)
{
bool log_data = zones.size() > 1;
if (master_zone.empty()) {
auto iter = zones.begin();
if (iter != zones.end()) {
master_zone = iter->first;
}
}
for (auto& item : zones) {
RGWZone& zone = item.second;
zone.log_data = log_data;
RGWZoneParams zone_params(zone.id, zone.name);
int ret = zone_params.init(dpp, cct, sysobj_svc, y);
if (ret < 0) {
ldpp_dout(dpp, 0) << "WARNING: could not read zone params for zone id=" << zone.id << " name=" << zone.name << dendl;
continue;
}
for (auto& pitem : zone_params.placement_pools) {
const string& placement_name = pitem.first;
if (placement_targets.find(placement_name) == placement_targets.end()) {
RGWZoneGroupPlacementTarget placement_target;
placement_target.name = placement_name;
placement_targets[placement_name] = placement_target;
}
}
}
if (default_placement.empty() && !placement_targets.empty()) {
default_placement.init(placement_targets.begin()->first, RGW_STORAGE_CLASS_STANDARD);
}
}
int RGWZoneGroup::remove_zone(const DoutPrefixProvider *dpp, const std::string& zone_id, optional_yield y)
{
auto iter = zones.find(zone_id);
if (iter == zones.end()) {
ldpp_dout(dpp, 0) << "zone id " << zone_id << " is not a part of zonegroup "
<< name << dendl;
return -ENOENT;
}
zones.erase(iter);
post_process_params(dpp, y);
return update(dpp, y);
}
void RGWDefaultSystemMetaObjInfo::dump(Formatter *f) const {
encode_json("default_id", default_id, f);
}
void RGWDefaultSystemMetaObjInfo::decode_json(JSONObj *obj) {
JSONDecoder::decode_json("default_id", default_id, obj);
}
int RGWSystemMetaObj::rename(const DoutPrefixProvider *dpp, const string& new_name, optional_yield y)
{
string new_id;
int ret = read_id(dpp, new_name, new_id, y);
if (!ret) {
return -EEXIST;
}
if (ret < 0 && ret != -ENOENT) {
ldpp_dout(dpp, 0) << "Error read_id " << new_name << ": " << cpp_strerror(-ret) << dendl;
return ret;
}
string old_name = name;
name = new_name;
ret = update(dpp, y);
if (ret < 0) {
ldpp_dout(dpp, 0) << "Error storing new obj info " << new_name << ": " << cpp_strerror(-ret) << dendl;
return ret;
}
ret = store_name(dpp, true, y);
if (ret < 0) {
ldpp_dout(dpp, 0) << "Error storing new name " << new_name << ": " << cpp_strerror(-ret) << dendl;
return ret;
}
/* delete old name */
rgw_pool pool(get_pool(cct));
string oid = get_names_oid_prefix() + old_name;
rgw_raw_obj old_name_obj(pool, oid);
auto sysobj = sysobj_svc->get_obj(old_name_obj);
ret = sysobj.wop().remove(dpp, y);
if (ret < 0) {
ldpp_dout(dpp, 0) << "Error delete old obj name " << old_name << ": " << cpp_strerror(-ret) << dendl;
return ret;
}
return ret;
}
int RGWSystemMetaObj::read(const DoutPrefixProvider *dpp, optional_yield y)
{
int ret = read_id(dpp, name, id, y);
if (ret < 0) {
return ret;
}
return read_info(dpp, id, y);
}
int RGWZoneParams::create_default(const DoutPrefixProvider *dpp, optional_yield y, bool old_format)
{
name = default_zone_name;
int r = create(dpp, y);
if (r < 0) {
return r;
}
if (old_format) {
name = id;
}
return r;
}
const string& RGWZoneParams::get_compression_type(const rgw_placement_rule& placement_rule) const
{
static const std::string NONE{"none"};
auto p = placement_pools.find(placement_rule.name);
if (p == placement_pools.end()) {
return NONE;
}
const auto& type = p->second.get_compression_type(placement_rule.get_storage_class());
return !type.empty() ? type : NONE;
}
// run an MD5 hash on the zone_id and return the first 32 bits
static uint32_t gen_short_zone_id(const std::string zone_id)
{
unsigned char md5[CEPH_CRYPTO_MD5_DIGESTSIZE];
MD5 hash;
// Allow use of MD5 digest in FIPS mode for non-cryptographic purposes
hash.SetFlags(EVP_MD_CTX_FLAG_NON_FIPS_ALLOW);
hash.Update((const unsigned char *)zone_id.c_str(), zone_id.size());
hash.Final(md5);
uint32_t short_id;
memcpy((char *)&short_id, md5, sizeof(short_id));
return std::max(short_id, 1u);
}
int RGWPeriodMap::update(const RGWZoneGroup& zonegroup, CephContext *cct)
{
if (zonegroup.is_master_zonegroup() && (!master_zonegroup.empty() && zonegroup.get_id() != master_zonegroup)) {
ldout(cct,0) << "Error updating periodmap, multiple master zonegroups configured "<< dendl;
ldout(cct,0) << "master zonegroup: " << master_zonegroup << " and " << zonegroup.get_id() <<dendl;
return -EINVAL;
}
map<string, RGWZoneGroup>::iterator iter = zonegroups.find(zonegroup.get_id());
if (iter != zonegroups.end()) {
RGWZoneGroup& old_zonegroup = iter->second;
if (!old_zonegroup.api_name.empty()) {
zonegroups_by_api.erase(old_zonegroup.api_name);
}
}
zonegroups[zonegroup.get_id()] = zonegroup;
if (!zonegroup.api_name.empty()) {
zonegroups_by_api[zonegroup.api_name] = zonegroup;
}
if (zonegroup.is_master_zonegroup()) {
master_zonegroup = zonegroup.get_id();
} else if (master_zonegroup == zonegroup.get_id()) {
master_zonegroup = "";
}
for (auto& i : zonegroup.zones) {
auto& zone = i.second;
if (short_zone_ids.find(zone.id) != short_zone_ids.end()) {
continue;
}
// calculate the zone's short id
uint32_t short_id = gen_short_zone_id(zone.id);
// search for an existing zone with the same short id
for (auto& s : short_zone_ids) {
if (s.second == short_id) {
ldout(cct, 0) << "New zone '" << zone.name << "' (" << zone.id
<< ") generates the same short_zone_id " << short_id
<< " as existing zone id " << s.first << dendl;
return -EEXIST;
}
}
short_zone_ids[zone.id] = short_id;
}
return 0;
}
uint32_t RGWPeriodMap::get_zone_short_id(const string& zone_id) const
{
auto i = short_zone_ids.find(zone_id);
if (i == short_zone_ids.end()) {
return 0;
}
return i->second;
}
bool RGWPeriodMap::find_zone_by_name(const string& zone_name,
RGWZoneGroup *zonegroup,
RGWZone *zone) const
{
for (auto& iter : zonegroups) {
auto& zg = iter.second;
for (auto& ziter : zg.zones) {
auto& z = ziter.second;
if (z.name == zone_name) {
*zonegroup = zg;
*zone = z;
return true;
}
}
}
return false;
}
namespace rgw {
int read_realm(const DoutPrefixProvider* dpp, optional_yield y,
sal::ConfigStore* cfgstore,
std::string_view realm_id,
std::string_view realm_name,
RGWRealm& info,
std::unique_ptr<sal::RealmWriter>* writer)
{
if (!realm_id.empty()) {
return cfgstore->read_realm_by_id(dpp, y, realm_id, info, writer);
}
if (!realm_name.empty()) {
return cfgstore->read_realm_by_name(dpp, y, realm_name, info, writer);
}
return cfgstore->read_default_realm(dpp, y, info, writer);
}
int create_realm(const DoutPrefixProvider* dpp, optional_yield y,
sal::ConfigStore* cfgstore, bool exclusive,
RGWRealm& info,
std::unique_ptr<sal::RealmWriter>* writer_out)
{
if (info.name.empty()) {
ldpp_dout(dpp, -1) << __func__ << " requires a realm name" << dendl;
return -EINVAL;
}
if (info.id.empty()) {
info.id = gen_random_uuid();
}
// if the realm already has a current_period, just make sure it exists
std::optional<RGWPeriod> period;
if (!info.current_period.empty()) {
period.emplace();
int r = cfgstore->read_period(dpp, y, info.current_period,
std::nullopt, *period);
if (r < 0) {
ldpp_dout(dpp, -1) << __func__ << " failed to read realm's current_period="
<< info.current_period << " with " << cpp_strerror(r) << dendl;
return r;
}
}
// create the realm
std::unique_ptr<sal::RealmWriter> writer;
int r = cfgstore->create_realm(dpp, y, exclusive, info, &writer);
if (r < 0) {
return r;
}
if (!period) {
// initialize and exclusive-create the initial period
period.emplace();
period->id = gen_random_uuid();
period->period_map.id = period->id;
period->epoch = FIRST_EPOCH;
period->realm_id = info.id;
period->realm_name = info.name;
r = cfgstore->create_period(dpp, y, true, *period);
if (r < 0) {
ldpp_dout(dpp, -1) << __func__ << " failed to create the initial period id="
<< period->id << " for realm " << info.name
<< " with " << cpp_strerror(r) << dendl;
return r;
}
}
// update the realm's current_period
r = realm_set_current_period(dpp, y, cfgstore, *writer, info, *period);
if (r < 0) {
return r;
}
// try to set as default. may race with another create, so pass exclusive=true
// so we don't override an existing default
r = set_default_realm(dpp, y, cfgstore, info, true);
if (r < 0 && r != -EEXIST) {
ldpp_dout(dpp, 0) << "WARNING: failed to set realm as default: "
<< cpp_strerror(r) << dendl;
}
if (writer_out) {
*writer_out = std::move(writer);
}
return 0;
}
int set_default_realm(const DoutPrefixProvider* dpp, optional_yield y,
sal::ConfigStore* cfgstore, const RGWRealm& info,
bool exclusive)
{
return cfgstore->write_default_realm_id(dpp, y, exclusive, info.id);
}
int realm_set_current_period(const DoutPrefixProvider* dpp, optional_yield y,
sal::ConfigStore* cfgstore,
sal::RealmWriter& writer, RGWRealm& realm,
const RGWPeriod& period)
{
// update realm epoch to match the period's
if (realm.epoch > period.realm_epoch) {
ldpp_dout(dpp, -1) << __func__ << " with old realm epoch "
<< period.realm_epoch << ", current epoch=" << realm.epoch << dendl;
return -EINVAL;
}
if (realm.epoch == period.realm_epoch && realm.current_period != period.id) {
ldpp_dout(dpp, -1) << __func__ << " with same realm epoch "
<< period.realm_epoch << ", but different period id "
<< period.id << " != " << realm.current_period << dendl;
return -EINVAL;
}
realm.epoch = period.realm_epoch;
realm.current_period = period.id;
// update the realm object
int r = writer.write(dpp, y, realm);
if (r < 0) {
ldpp_dout(dpp, -1) << __func__ << " failed to overwrite realm "
<< realm.name << " with " << cpp_strerror(r) << dendl;
return r;
}
// reflect the zonegroup and period config
(void) reflect_period(dpp, y, cfgstore, period);
return 0;
}
int reflect_period(const DoutPrefixProvider* dpp, optional_yield y,
sal::ConfigStore* cfgstore, const RGWPeriod& info)
{
// overwrite the local period config and zonegroup objects
constexpr bool exclusive = false;
int r = cfgstore->write_period_config(dpp, y, exclusive, info.realm_id,
info.period_config);
if (r < 0) {
ldpp_dout(dpp, -1) << __func__ << " failed to store period config for realm id="
<< info.realm_id << " with " << cpp_strerror(r) << dendl;
return r;
}
for (auto& [zonegroup_id, zonegroup] : info.period_map.zonegroups) {
r = cfgstore->create_zonegroup(dpp, y, exclusive, zonegroup, nullptr);
if (r < 0) {
ldpp_dout(dpp, -1) << __func__ << " failed to store zonegroup id="
<< zonegroup_id << " with " << cpp_strerror(r) << dendl;
return r;
}
if (zonegroup.is_master) {
// set master as default if no default exists
constexpr bool exclusive = true;
r = set_default_zonegroup(dpp, y, cfgstore, zonegroup, exclusive);
if (r == 0) {
ldpp_dout(dpp, 1) << "Set the period's master zonegroup "
<< zonegroup.name << " as the default" << dendl;
}
}
}
return 0;
}
std::string get_staging_period_id(std::string_view realm_id)
{
return string_cat_reserve(realm_id, ":staging");
}
void fork_period(const DoutPrefixProvider* dpp, RGWPeriod& info)
{
ldpp_dout(dpp, 20) << __func__ << " realm id=" << info.realm_id
<< " period id=" << info.id << dendl;
info.predecessor_uuid = std::move(info.id);
info.id = get_staging_period_id(info.realm_id);
info.period_map.reset();
info.realm_epoch++;
}
int update_period(const DoutPrefixProvider* dpp, optional_yield y,
sal::ConfigStore* cfgstore, RGWPeriod& info)
{
// clear zone short ids of removed zones. period_map.update() will add the
// remaining zones back
info.period_map.short_zone_ids.clear();
// list all zonegroups in the realm
rgw::sal::ListResult<std::string> listing;
std::array<std::string, 1000> zonegroup_names; // list in pages of 1000
do {
int ret = cfgstore->list_zonegroup_names(dpp, y, listing.next,
zonegroup_names, listing);
if (ret < 0) {
std::cerr << "failed to list zonegroups: " << cpp_strerror(-ret) << std::endl;
return -ret;
}
for (const auto& name : listing.entries) {
RGWZoneGroup zg;
ret = cfgstore->read_zonegroup_by_name(dpp, y, name, zg, nullptr);
if (ret < 0) {
ldpp_dout(dpp, 0) << "WARNING: failed to read zonegroup "
<< name << ": " << cpp_strerror(-ret) << dendl;
continue;
}
if (zg.realm_id != info.realm_id) {
ldpp_dout(dpp, 20) << "skipping zonegroup " << zg.get_name()
<< " with realm id " << zg.realm_id
<< ", not on our realm " << info.realm_id << dendl;
continue;
}
if (zg.master_zone.empty()) {
ldpp_dout(dpp, 0) << "ERROR: zonegroup " << zg.get_name() << " should have a master zone " << dendl;
return -EINVAL;
}
if (zg.zones.find(zg.master_zone) == zg.zones.end()) {
ldpp_dout(dpp, 0) << "ERROR: zonegroup " << zg.get_name()
<< " has a non existent master zone "<< dendl;
return -EINVAL;
}
if (zg.is_master_zonegroup()) {
info.master_zonegroup = zg.get_id();
info.master_zone = zg.master_zone;
}
ret = info.period_map.update(zg, dpp->get_cct());
if (ret < 0) {
return ret;
}
} // foreach name in listing.entries
} while (!listing.next.empty());
// read the realm's current period config
int ret = cfgstore->read_period_config(dpp, y, info.realm_id,
info.period_config);
if (ret < 0 && ret != -ENOENT) {
ldpp_dout(dpp, 0) << "ERROR: failed to read period config: "
<< cpp_strerror(ret) << dendl;
return ret;
}
return 0;
}
int commit_period(const DoutPrefixProvider* dpp, optional_yield y,
sal::ConfigStore* cfgstore, sal::Driver* driver,
RGWRealm& realm, sal::RealmWriter& realm_writer,
const RGWPeriod& current_period,
RGWPeriod& info, std::ostream& error_stream,
bool force_if_stale)
{
auto zone_svc = static_cast<rgw::sal::RadosStore*>(driver)->svc()->zone; // XXX
ldpp_dout(dpp, 20) << __func__ << " realm " << realm.id
<< " period " << current_period.id << dendl;
// gateway must be in the master zone to commit
if (info.master_zone != zone_svc->get_zone_params().id) {
error_stream << "Cannot commit period on zone "
<< zone_svc->get_zone_params().id << ", it must be sent to "
"the period's master zone " << info.master_zone << '.' << std::endl;
return -EINVAL;
}
// period predecessor must match current period
if (info.predecessor_uuid != current_period.id) {
error_stream << "Period predecessor " << info.predecessor_uuid
<< " does not match current period " << current_period.id
<< ". Use 'period pull' to get the latest period from the master, "
"reapply your changes, and try again." << std::endl;
return -EINVAL;
}
// realm epoch must be 1 greater than current period
if (info.realm_epoch != current_period.realm_epoch + 1) {
error_stream << "Period's realm epoch " << info.realm_epoch
<< " does not come directly after current realm epoch "
<< current_period.realm_epoch << ". Use 'realm pull' to get the "
"latest realm and period from the master zone, reapply your changes, "
"and try again." << std::endl;
return -EINVAL;
}
// did the master zone change?
if (info.master_zone != current_period.master_zone) {
// store the current metadata sync status in the period
int r = info.update_sync_status(dpp, driver, current_period,
error_stream, force_if_stale);
if (r < 0) {
ldpp_dout(dpp, 0) << "failed to update metadata sync status: "
<< cpp_strerror(-r) << dendl;
return r;
}
// create an object with a new period id
info.period_map.id = info.id = gen_random_uuid();
info.epoch = FIRST_EPOCH;
constexpr bool exclusive = true;
r = cfgstore->create_period(dpp, y, exclusive, info);
if (r < 0) {
ldpp_dout(dpp, 0) << "failed to create new period: " << cpp_strerror(-r) << dendl;
return r;
}
// set as current period
r = realm_set_current_period(dpp, y, cfgstore, realm_writer, realm, info);
if (r < 0) {
ldpp_dout(dpp, 0) << "failed to update realm's current period: "
<< cpp_strerror(-r) << dendl;
return r;
}
ldpp_dout(dpp, 4) << "Promoted to master zone and committed new period "
<< info.id << dendl;
(void) cfgstore->realm_notify_new_period(dpp, y, info);
return 0;
}
// period must be based on current epoch
if (info.epoch != current_period.epoch) {
error_stream << "Period epoch " << info.epoch << " does not match "
"predecessor epoch " << current_period.epoch << ". Use "
"'period pull' to get the latest epoch from the master zone, "
"reapply your changes, and try again." << std::endl;
return -EINVAL;
}
// set period as next epoch
info.id = current_period.id;
info.epoch = current_period.epoch + 1;
info.predecessor_uuid = current_period.predecessor_uuid;
info.realm_epoch = current_period.realm_epoch;
// write the period
constexpr bool exclusive = true;
int r = cfgstore->create_period(dpp, y, exclusive, info);
if (r == -EEXIST) {
// already have this epoch (or a more recent one)
return 0;
}
if (r < 0) {
ldpp_dout(dpp, 0) << "failed to store period: " << cpp_strerror(r) << dendl;
return r;
}
r = reflect_period(dpp, y, cfgstore, info);
if (r < 0) {
ldpp_dout(dpp, 0) << "failed to update local objects: " << cpp_strerror(r) << dendl;
return r;
}
ldpp_dout(dpp, 4) << "Committed new epoch " << info.epoch
<< " for period " << info.id << dendl;
(void) cfgstore->realm_notify_new_period(dpp, y, info);
return 0;
}
int read_zonegroup(const DoutPrefixProvider* dpp, optional_yield y,
sal::ConfigStore* cfgstore,
std::string_view zonegroup_id,
std::string_view zonegroup_name,
RGWZoneGroup& info,
std::unique_ptr<sal::ZoneGroupWriter>* writer)
{
if (!zonegroup_id.empty()) {
return cfgstore->read_zonegroup_by_id(dpp, y, zonegroup_id, info, writer);
}
if (!zonegroup_name.empty()) {
return cfgstore->read_zonegroup_by_name(dpp, y, zonegroup_name, info, writer);
}
std::string realm_id;
int r = cfgstore->read_default_realm_id(dpp, y, realm_id);
if (r == -ENOENT) {
return cfgstore->read_zonegroup_by_name(dpp, y, default_zonegroup_name,
info, writer);
}
if (r < 0) {
return r;
}
return cfgstore->read_default_zonegroup(dpp, y, realm_id, info, writer);
}
int create_zonegroup(const DoutPrefixProvider* dpp, optional_yield y,
sal::ConfigStore* cfgstore, bool exclusive,
RGWZoneGroup& info)
{
if (info.name.empty()) {
ldpp_dout(dpp, -1) << __func__ << " requires a zonegroup name" << dendl;
return -EINVAL;
}
if (info.id.empty()) {
info.id = gen_random_uuid();
}
// insert the default placement target if it doesn't exist
constexpr std::string_view default_placement_name = "default-placement";
RGWZoneGroupPlacementTarget placement_target;
placement_target.name = default_placement_name;
info.placement_targets.emplace(default_placement_name, placement_target);
if (info.default_placement.name.empty()) {
info.default_placement.name = default_placement_name;
}
int r = cfgstore->create_zonegroup(dpp, y, exclusive, info, nullptr);
if (r < 0) {
ldpp_dout(dpp, 0) << "failed to create zonegroup with "
<< cpp_strerror(r) << dendl;
return r;
}
// try to set as default. may race with another create, so pass exclusive=true
// so we don't override an existing default
r = set_default_zonegroup(dpp, y, cfgstore, info, true);
if (r < 0 && r != -EEXIST) {
ldpp_dout(dpp, 0) << "WARNING: failed to set zonegroup as default: "
<< cpp_strerror(r) << dendl;
}
return 0;
}
static int create_default_zonegroup(const DoutPrefixProvider* dpp,
optional_yield y,
sal::ConfigStore* cfgstore,
bool exclusive,
const RGWZoneParams& default_zone,
RGWZoneGroup& info)
{
info.name = default_zonegroup_name;
info.api_name = default_zonegroup_name;
info.is_master = true;
// enable all supported features
info.enabled_features.insert(rgw::zone_features::enabled.begin(),
rgw::zone_features::enabled.end());
// add the zone to the zonegroup
bool is_master = true;
std::list<std::string> empty_list;
rgw::zone_features::set disable_features; // empty
int r = add_zone_to_group(dpp, info, default_zone, &is_master, nullptr,
empty_list, nullptr, nullptr, empty_list,
empty_list, nullptr, std::nullopt,
info.enabled_features, disable_features);
if (r < 0) {
return r;
}
// write the zone
return create_zonegroup(dpp, y, cfgstore, exclusive, info);
}
int set_default_zonegroup(const DoutPrefixProvider* dpp, optional_yield y,
sal::ConfigStore* cfgstore, const RGWZoneGroup& info,
bool exclusive)
{
return cfgstore->write_default_zonegroup_id(
dpp, y, exclusive, info.realm_id, info.id);
}
int remove_zone_from_group(const DoutPrefixProvider* dpp,
RGWZoneGroup& zonegroup,
const rgw_zone_id& zone_id)
{
auto z = zonegroup.zones.find(zone_id);
if (z == zonegroup.zones.end()) {
return -ENOENT;
}
zonegroup.zones.erase(z);
if (zonegroup.master_zone == zone_id) {
// choose a new master zone
auto m = zonegroup.zones.begin();
if (m != zonegroup.zones.end()) {
zonegroup.master_zone = m->first;
ldpp_dout(dpp, 0) << "NOTICE: promoted " << m->second.name
<< " as new master_zone of zonegroup " << zonegroup.name << dendl;
} else {
ldpp_dout(dpp, 0) << "NOTICE: removed master_zone of zonegroup "
<< zonegroup.name << dendl;
}
}
const bool log_data = zonegroup.zones.size() > 1;
for (auto& [id, zone] : zonegroup.zones) {
zone.log_data = log_data;
}
return 0;
}
// try to remove the given zone id from every zonegroup in the cluster
static int remove_zone_from_groups(const DoutPrefixProvider* dpp,
optional_yield y,
sal::ConfigStore* cfgstore,
const rgw_zone_id& zone_id)
{
std::array<std::string, 128> zonegroup_names;
sal::ListResult<std::string> listing;
do {
int r = cfgstore->list_zonegroup_names(dpp, y, listing.next,
zonegroup_names, listing);
if (r < 0) {
ldpp_dout(dpp, 0) << "failed to list zonegroups with "
<< cpp_strerror(r) << dendl;
return r;
}
for (const auto& name : listing.entries) {
RGWZoneGroup zonegroup;
std::unique_ptr<sal::ZoneGroupWriter> writer;
r = cfgstore->read_zonegroup_by_name(dpp, y, name, zonegroup, &writer);
if (r < 0) {
ldpp_dout(dpp, 0) << "WARNING: failed to load zonegroup " << name
<< " with " << cpp_strerror(r) << dendl;
continue;
}
r = remove_zone_from_group(dpp, zonegroup, zone_id);
if (r < 0) {
continue;
}
// write the updated zonegroup
r = writer->write(dpp, y, zonegroup);
if (r < 0) {
ldpp_dout(dpp, 0) << "WARNING: failed to write zonegroup " << name
<< " with " << cpp_strerror(r) << dendl;
continue;
}
ldpp_dout(dpp, 0) << "Removed zone from zonegroup " << name << dendl;
}
} while (!listing.next.empty());
return 0;
}
int read_zone(const DoutPrefixProvider* dpp, optional_yield y,
sal::ConfigStore* cfgstore,
std::string_view zone_id,
std::string_view zone_name,
RGWZoneParams& info,
std::unique_ptr<sal::ZoneWriter>* writer)
{
if (!zone_id.empty()) {
return cfgstore->read_zone_by_id(dpp, y, zone_id, info, writer);
}
if (!zone_name.empty()) {
return cfgstore->read_zone_by_name(dpp, y, zone_name, info, writer);
}
std::string realm_id;
int r = cfgstore->read_default_realm_id(dpp, y, realm_id);
if (r == -ENOENT) {
return cfgstore->read_zone_by_name(dpp, y, default_zone_name, info, writer);
}
if (r < 0) {
return r;
}
return cfgstore->read_default_zone(dpp, y, realm_id, info, writer);
}
extern int get_zones_pool_set(const DoutPrefixProvider *dpp, optional_yield y,
rgw::sal::ConfigStore* cfgstore,
std::string_view my_zone_id,
std::set<rgw_pool>& pools);
int create_zone(const DoutPrefixProvider* dpp, optional_yield y,
sal::ConfigStore* cfgstore, bool exclusive,
RGWZoneParams& info, std::unique_ptr<sal::ZoneWriter>* writer)
{
if (info.name.empty()) {
ldpp_dout(dpp, -1) << __func__ << " requires a zone name" << dendl;
return -EINVAL;
}
if (info.id.empty()) {
info.id = gen_random_uuid();
}
// add default placement with empty pool name
rgw_pool pool;
auto& placement = info.placement_pools["default-placement"];
placement.storage_classes.set_storage_class(
RGW_STORAGE_CLASS_STANDARD, &pool, nullptr);
// build a set of all pool names used by other zones
std::set<rgw_pool> pools;
int r = get_zones_pool_set(dpp, y, cfgstore, info.id, pools);
if (r < 0) {
return r;
}
// initialize pool names with the zone name prefix
r = init_zone_pool_names(dpp, y, pools, info);
if (r < 0) {
return r;
}
r = cfgstore->create_zone(dpp, y, exclusive, info, nullptr);
if (r < 0) {
ldpp_dout(dpp, 0) << "failed to create zone with "
<< cpp_strerror(r) << dendl;
return r;
}
// try to set as default. may race with another create, so pass exclusive=true
// so we don't override an existing default
r = set_default_zone(dpp, y, cfgstore, info, true);
if (r < 0 && r != -EEXIST) {
ldpp_dout(dpp, 0) << "WARNING: failed to set zone as default: "
<< cpp_strerror(r) << dendl;
}
return 0;
}
int set_default_zone(const DoutPrefixProvider* dpp, optional_yield y,
sal::ConfigStore* cfgstore, const RGWZoneParams& info,
bool exclusive)
{
return cfgstore->write_default_zone_id(
dpp, y, exclusive, info.realm_id, info.id);
}
int delete_zone(const DoutPrefixProvider* dpp, optional_yield y,
sal::ConfigStore* cfgstore, const RGWZoneParams& info,
sal::ZoneWriter& writer)
{
// remove this zone from any zonegroups that contain it
int r = remove_zone_from_groups(dpp, y, cfgstore, info.id);
if (r < 0) {
return r;
}
return writer.remove(dpp, y);
}
static int read_or_create_default_zone(const DoutPrefixProvider* dpp,
optional_yield y,
sal::ConfigStore* cfgstore,
RGWZoneParams& info)
{
int r = cfgstore->read_zone_by_name(dpp, y, default_zone_name, info, nullptr);
if (r == -ENOENT) {
info.name = default_zone_name;
constexpr bool exclusive = true;
r = create_zone(dpp, y, cfgstore, exclusive, info, nullptr);
if (r == -EEXIST) {
r = cfgstore->read_zone_by_name(dpp, y, default_zone_name, info, nullptr);
}
if (r < 0) {
ldpp_dout(dpp, 0) << "failed to create default zone: "
<< cpp_strerror(r) << dendl;
return r;
}
}
return r;
}
static int read_or_create_default_zonegroup(const DoutPrefixProvider* dpp,
optional_yield y,
sal::ConfigStore* cfgstore,
const RGWZoneParams& zone_params,
RGWZoneGroup& info)
{
int r = cfgstore->read_zonegroup_by_name(dpp, y, default_zonegroup_name,
info, nullptr);
if (r == -ENOENT) {
constexpr bool exclusive = true;
r = create_default_zonegroup(dpp, y, cfgstore, exclusive,
zone_params, info);
if (r == -EEXIST) {
r = cfgstore->read_zonegroup_by_name(dpp, y, default_zonegroup_name,
info, nullptr);
}
if (r < 0) {
ldpp_dout(dpp, 0) << "failed to create default zonegroup: "
<< cpp_strerror(r) << dendl;
return r;
}
}
return r;
}
int SiteConfig::load(const DoutPrefixProvider* dpp, optional_yield y,
sal::ConfigStore* cfgstore)
{
// clear existing configuration
zone = nullptr;
zonegroup = nullptr;
local_zonegroup = std::nullopt;
period = std::nullopt;
zone_params = RGWZoneParams{};
int r = 0;
// try to load a realm
realm.emplace();
std::string realm_name = dpp->get_cct()->_conf->rgw_realm;
if (!realm_name.empty()) {
r = cfgstore->read_realm_by_name(dpp, y, realm_name, *realm, nullptr);
} else {
r = cfgstore->read_default_realm(dpp, y, *realm, nullptr);
if (r == -ENOENT) { // no realm
r = 0;
realm = std::nullopt;
}
}
if (r < 0) {
ldpp_dout(dpp, 0) << "failed to load realm: " << cpp_strerror(r) << dendl;
return r;
}
// try to load the local zone params
std::string zone_name = dpp->get_cct()->_conf->rgw_zone;
if (!zone_name.empty()) {
r = cfgstore->read_zone_by_name(dpp, y, zone_name, zone_params, nullptr);
} else if (realm) {
// load the realm's default zone
r = cfgstore->read_default_zone(dpp, y, realm->id, zone_params, nullptr);
} else {
// load or create the "default" zone
r = read_or_create_default_zone(dpp, y, cfgstore, zone_params);
}
if (r < 0) {
ldpp_dout(dpp, 0) << "failed to load zone: " << cpp_strerror(r) << dendl;
return r;
}
if (!realm && !zone_params.realm_id.empty()) {
realm.emplace();
r = cfgstore->read_realm_by_id(dpp, y, zone_params.realm_id,
*realm, nullptr);
if (r < 0) {
ldpp_dout(dpp, 0) << "failed to load realm: " << cpp_strerror(r) << dendl;
return r;
}
}
if (realm) {
// try to load the realm's period
r = load_period_zonegroup(dpp, y, cfgstore, *realm, zone_params.id);
} else {
// fall back to a local zonegroup
r = load_local_zonegroup(dpp, y, cfgstore, zone_params.id);
}
return r;
}
int SiteConfig::load_period_zonegroup(const DoutPrefixProvider* dpp,
optional_yield y,
sal::ConfigStore* cfgstore,
const RGWRealm& realm,
const rgw_zone_id& zone_id)
{
// load the realm's current period
period.emplace();
int r = cfgstore->read_period(dpp, y, realm.current_period,
std::nullopt, *period);
if (r < 0) {
ldpp_dout(dpp, 0) << "failed to load current period: "
<< cpp_strerror(r) << dendl;
return r;
}
// find our zone and zonegroup in the period
for (const auto& zg : period->period_map.zonegroups) {
auto z = zg.second.zones.find(zone_id);
if (z != zg.second.zones.end()) {
zone = &z->second;
zonegroup = &zg.second;
return 0;
}
}
ldpp_dout(dpp, 0) << "ERROR: current period " << period->id
<< " does not contain zone id " << zone_id << dendl;
period = std::nullopt;
return -ENOENT;
}
int SiteConfig::load_local_zonegroup(const DoutPrefixProvider* dpp,
optional_yield y,
sal::ConfigStore* cfgstore,
const rgw_zone_id& zone_id)
{
int r = 0;
// load the zonegroup
local_zonegroup.emplace();
std::string zonegroup_name = dpp->get_cct()->_conf->rgw_zonegroup;
if (!zonegroup_name.empty()) {
r = cfgstore->read_zonegroup_by_name(dpp, y, zonegroup_name,
*local_zonegroup, nullptr);
} else {
r = read_or_create_default_zonegroup(dpp, y, cfgstore, zone_params,
*local_zonegroup);
}
if (r < 0) {
ldpp_dout(dpp, 0) << "failed to load zonegroup: "
<< cpp_strerror(r) << dendl;
} else {
// find our zone in the zonegroup
auto z = local_zonegroup->zones.find(zone_id);
if (z != local_zonegroup->zones.end()) {
zone = &z->second;
zonegroup = &*local_zonegroup;
return 0;
}
ldpp_dout(dpp, 0) << "ERROR: zonegroup " << local_zonegroup->id
<< " does not contain zone id " << zone_id << dendl;
r = -ENOENT;
}
local_zonegroup = std::nullopt;
return r;
}
} // namespace rgw
static inline int conf_to_uint64(const JSONFormattable& config, const string& key, uint64_t *pval)
{
string sval;
if (config.find(key, &sval)) {
string err;
uint64_t val = strict_strtoll(sval.c_str(), 10, &err);
if (!err.empty()) {
return -EINVAL;
}
*pval = val;
}
return 0;
}
int RGWZoneGroupPlacementTier::update_params(const JSONFormattable& config)
{
int r = -1;
if (config.exists("retain_head_object")) {
string s = config["retain_head_object"];
if (s == "true") {
retain_head_object = true;
} else {
retain_head_object = false;
}
}
if (tier_type == "cloud-s3") {
r = t.s3.update_params(config);
}
return r;
}
int RGWZoneGroupPlacementTier::clear_params(const JSONFormattable& config)
{
if (config.exists("retain_head_object")) {
retain_head_object = false;
}
if (tier_type == "cloud-s3") {
t.s3.clear_params(config);
}
return 0;
}
int RGWZoneGroupPlacementTierS3::update_params(const JSONFormattable& config)
{
int r = -1;
if (config.exists("endpoint")) {
endpoint = config["endpoint"];
}
if (config.exists("target_path")) {
target_path = config["target_path"];
}
if (config.exists("region")) {
region = config["region"];
}
if (config.exists("host_style")) {
string s;
s = config["host_style"];
if (s != "virtual") {
host_style = PathStyle;
} else {
host_style = VirtualStyle;
}
}
if (config.exists("target_storage_class")) {
target_storage_class = config["target_storage_class"];
}
if (config.exists("access_key")) {
key.id = config["access_key"];
}
if (config.exists("secret")) {
key.key = config["secret"];
}
if (config.exists("multipart_sync_threshold")) {
r = conf_to_uint64(config, "multipart_sync_threshold", &multipart_sync_threshold);
if (r < 0) {
multipart_sync_threshold = DEFAULT_MULTIPART_SYNC_PART_SIZE;
}
}
if (config.exists("multipart_min_part_size")) {
r = conf_to_uint64(config, "multipart_min_part_size", &multipart_min_part_size);
if (r < 0) {
multipart_min_part_size = DEFAULT_MULTIPART_SYNC_PART_SIZE;
}
}
if (config.exists("acls")) {
const JSONFormattable& cc = config["acls"];
if (cc.is_array()) {
for (auto& c : cc.array()) {
RGWTierACLMapping m;
m.init(c);
if (!m.source_id.empty()) {
acl_mappings[m.source_id] = m;
}
}
} else {
RGWTierACLMapping m;
m.init(cc);
if (!m.source_id.empty()) {
acl_mappings[m.source_id] = m;
}
}
}
return 0;
}
int RGWZoneGroupPlacementTierS3::clear_params(const JSONFormattable& config)
{
if (config.exists("endpoint")) {
endpoint.clear();
}
if (config.exists("target_path")) {
target_path.clear();
}
if (config.exists("region")) {
region.clear();
}
if (config.exists("host_style")) {
/* default */
host_style = PathStyle;
}
if (config.exists("target_storage_class")) {
target_storage_class.clear();
}
if (config.exists("access_key")) {
key.id.clear();
}
if (config.exists("secret")) {
key.key.clear();
}
if (config.exists("multipart_sync_threshold")) {
multipart_sync_threshold = DEFAULT_MULTIPART_SYNC_PART_SIZE;
}
if (config.exists("multipart_min_part_size")) {
multipart_min_part_size = DEFAULT_MULTIPART_SYNC_PART_SIZE;
}
if (config.exists("acls")) {
const JSONFormattable& cc = config["acls"];
if (cc.is_array()) {
for (auto& c : cc.array()) {
RGWTierACLMapping m;
m.init(c);
acl_mappings.erase(m.source_id);
}
} else {
RGWTierACLMapping m;
m.init(cc);
acl_mappings.erase(m.source_id);
}
}
return 0;
}
void rgw_meta_sync_info::generate_test_instances(list<rgw_meta_sync_info*>& o)
{
auto info = new rgw_meta_sync_info;
info->state = rgw_meta_sync_info::StateBuildingFullSyncMaps;
info->period = "periodid";
info->realm_epoch = 5;
o.push_back(info);
o.push_back(new rgw_meta_sync_info);
}
void rgw_meta_sync_marker::generate_test_instances(list<rgw_meta_sync_marker*>& o)
{
auto marker = new rgw_meta_sync_marker;
marker->state = rgw_meta_sync_marker::IncrementalSync;
marker->marker = "01234";
marker->realm_epoch = 5;
o.push_back(marker);
o.push_back(new rgw_meta_sync_marker);
}
void rgw_meta_sync_status::generate_test_instances(list<rgw_meta_sync_status*>& o)
{
o.push_back(new rgw_meta_sync_status);
}
void RGWZoneParams::generate_test_instances(list<RGWZoneParams*> &o)
{
o.push_back(new RGWZoneParams);
o.push_back(new RGWZoneParams);
}
void RGWPeriodLatestEpochInfo::generate_test_instances(list<RGWPeriodLatestEpochInfo*> &o)
{
RGWPeriodLatestEpochInfo *z = new RGWPeriodLatestEpochInfo;
o.push_back(z);
o.push_back(new RGWPeriodLatestEpochInfo);
}
void RGWZoneGroup::generate_test_instances(list<RGWZoneGroup*>& o)
{
RGWZoneGroup *r = new RGWZoneGroup;
o.push_back(r);
o.push_back(new RGWZoneGroup);
}
void RGWPeriodLatestEpochInfo::dump(Formatter *f) const {
encode_json("latest_epoch", epoch, f);
}
void RGWPeriodLatestEpochInfo::decode_json(JSONObj *obj) {
JSONDecoder::decode_json("latest_epoch", epoch, obj);
}
void RGWNameToId::dump(Formatter *f) const {
encode_json("obj_id", obj_id, f);
}
void RGWNameToId::decode_json(JSONObj *obj) {
JSONDecoder::decode_json("obj_id", obj_id, obj);
}
| 46,852 | 30.131561 | 136 |
cc
|
null |
ceph-main/src/rgw/driver/rados/rgw_zone.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab ft=cpp
#pragma once
#include <ostream>
#include "rgw_zone_types.h"
#include "rgw_common.h"
#include "rgw_sal_fwd.h"
#include "rgw_sync_policy.h"
class RGWSyncModulesManager;
class RGWSI_SysObj;
class RGWSI_Zone;
class RGWSystemMetaObj {
public:
std::string id;
std::string name;
CephContext *cct{nullptr};
RGWSI_SysObj *sysobj_svc{nullptr};
RGWSI_Zone *zone_svc{nullptr};
int store_name(const DoutPrefixProvider *dpp, bool exclusive, optional_yield y);
int store_info(const DoutPrefixProvider *dpp, bool exclusive, optional_yield y);
int read_info(const DoutPrefixProvider *dpp, const std::string& obj_id, optional_yield y, bool old_format = false);
int read_id(const DoutPrefixProvider *dpp, const std::string& obj_name, std::string& obj_id, optional_yield y);
int read_default(const DoutPrefixProvider *dpp,
RGWDefaultSystemMetaObjInfo& default_info,
const std::string& oid,
optional_yield y);
/* read and use default id */
int use_default(const DoutPrefixProvider *dpp, optional_yield y, bool old_format = false);
public:
RGWSystemMetaObj() {}
RGWSystemMetaObj(const std::string& _name): name(_name) {}
RGWSystemMetaObj(const std::string& _id, const std::string& _name) : id(_id), name(_name) {}
RGWSystemMetaObj(CephContext *_cct, RGWSI_SysObj *_sysobj_svc) {
reinit_instance(_cct, _sysobj_svc);
}
RGWSystemMetaObj(const std::string& _name, CephContext *_cct, RGWSI_SysObj *_sysobj_svc): name(_name) {
reinit_instance(_cct, _sysobj_svc);
}
const std::string& get_name() const { return name; }
const std::string& get_id() const { return id; }
void set_name(const std::string& _name) { name = _name;}
void set_id(const std::string& _id) { id = _id;}
void clear_id() { id.clear(); }
virtual ~RGWSystemMetaObj() {}
virtual void encode(bufferlist& bl) const {
ENCODE_START(1, 1, bl);
encode(id, bl);
encode(name, bl);
ENCODE_FINISH(bl);
}
virtual void decode(bufferlist::const_iterator& bl) {
DECODE_START(1, bl);
decode(id, bl);
decode(name, bl);
DECODE_FINISH(bl);
}
void reinit_instance(CephContext *_cct, RGWSI_SysObj *_sysobj_svc);
int init(const DoutPrefixProvider *dpp, CephContext *_cct, RGWSI_SysObj *_sysobj_svc,
optional_yield y,
bool setup_obj = true, bool old_format = false);
virtual int read_default_id(const DoutPrefixProvider *dpp, std::string& default_id, optional_yield y,
bool old_format = false);
virtual int set_as_default(const DoutPrefixProvider *dpp, optional_yield y, bool exclusive = false);
int delete_default();
virtual int create(const DoutPrefixProvider *dpp, optional_yield y, bool exclusive = true);
int delete_obj(const DoutPrefixProvider *dpp, optional_yield y, bool old_format = false);
int rename(const DoutPrefixProvider *dpp, const std::string& new_name, optional_yield y);
int update(const DoutPrefixProvider *dpp, optional_yield y) { return store_info(dpp, false, y);}
int update_name(const DoutPrefixProvider *dpp, optional_yield y) { return store_name(dpp, false, y);}
int read(const DoutPrefixProvider *dpp, optional_yield y);
int write(const DoutPrefixProvider *dpp, bool exclusive, optional_yield y);
virtual rgw_pool get_pool(CephContext *cct) const = 0;
virtual const std::string get_default_oid(bool old_format = false) const = 0;
virtual const std::string& get_names_oid_prefix() const = 0;
virtual const std::string& get_info_oid_prefix(bool old_format = false) const = 0;
virtual std::string get_predefined_id(CephContext *cct) const = 0;
virtual const std::string& get_predefined_name(CephContext *cct) const = 0;
void dump(Formatter *f) const;
void decode_json(JSONObj *obj);
};
WRITE_CLASS_ENCODER(RGWSystemMetaObj)
struct RGWZoneParams : RGWSystemMetaObj {
rgw_pool domain_root;
rgw_pool control_pool;
rgw_pool gc_pool;
rgw_pool lc_pool;
rgw_pool log_pool;
rgw_pool intent_log_pool;
rgw_pool usage_log_pool;
rgw_pool user_keys_pool;
rgw_pool user_email_pool;
rgw_pool user_swift_pool;
rgw_pool user_uid_pool;
rgw_pool roles_pool;
rgw_pool reshard_pool;
rgw_pool otp_pool;
rgw_pool oidc_pool;
rgw_pool notif_pool;
RGWAccessKey system_key;
std::map<std::string, RGWZonePlacementInfo> placement_pools;
std::string realm_id;
JSONFormattable tier_config;
RGWZoneParams() : RGWSystemMetaObj() {}
explicit RGWZoneParams(const std::string& name) : RGWSystemMetaObj(name){}
RGWZoneParams(const rgw_zone_id& id, const std::string& name) : RGWSystemMetaObj(id.id, name) {}
RGWZoneParams(const rgw_zone_id& id, const std::string& name, const std::string& _realm_id)
: RGWSystemMetaObj(id.id, name), realm_id(_realm_id) {}
virtual ~RGWZoneParams();
rgw_pool get_pool(CephContext *cct) const override;
const std::string get_default_oid(bool old_format = false) const override;
const std::string& get_names_oid_prefix() const override;
const std::string& get_info_oid_prefix(bool old_format = false) const override;
std::string get_predefined_id(CephContext *cct) const override;
const std::string& get_predefined_name(CephContext *cct) const override;
int init(const DoutPrefixProvider *dpp,
CephContext *_cct, RGWSI_SysObj *_sysobj_svc, optional_yield y,
bool setup_obj = true, bool old_format = false);
using RGWSystemMetaObj::init;
int read_default_id(const DoutPrefixProvider *dpp, std::string& default_id, optional_yield y, bool old_format = false) override;
int set_as_default(const DoutPrefixProvider *dpp, optional_yield y, bool exclusive = false) override;
int create_default(const DoutPrefixProvider *dpp, optional_yield y, bool old_format = false);
int create(const DoutPrefixProvider *dpp, optional_yield y, bool exclusive = true) override;
int fix_pool_names(const DoutPrefixProvider *dpp, optional_yield y);
const std::string& get_compression_type(const rgw_placement_rule& placement_rule) const;
void encode(bufferlist& bl) const override {
ENCODE_START(14, 1, bl);
encode(domain_root, bl);
encode(control_pool, bl);
encode(gc_pool, bl);
encode(log_pool, bl);
encode(intent_log_pool, bl);
encode(usage_log_pool, bl);
encode(user_keys_pool, bl);
encode(user_email_pool, bl);
encode(user_swift_pool, bl);
encode(user_uid_pool, bl);
RGWSystemMetaObj::encode(bl);
encode(system_key, bl);
encode(placement_pools, bl);
rgw_pool unused_metadata_heap;
encode(unused_metadata_heap, bl);
encode(realm_id, bl);
encode(lc_pool, bl);
std::map<std::string, std::string, ltstr_nocase> old_tier_config;
encode(old_tier_config, bl);
encode(roles_pool, bl);
encode(reshard_pool, bl);
encode(otp_pool, bl);
encode(tier_config, bl);
encode(oidc_pool, bl);
encode(notif_pool, bl);
ENCODE_FINISH(bl);
}
void decode(bufferlist::const_iterator& bl) override {
DECODE_START(14, bl);
decode(domain_root, bl);
decode(control_pool, bl);
decode(gc_pool, bl);
decode(log_pool, bl);
decode(intent_log_pool, bl);
decode(usage_log_pool, bl);
decode(user_keys_pool, bl);
decode(user_email_pool, bl);
decode(user_swift_pool, bl);
decode(user_uid_pool, bl);
if (struct_v >= 6) {
RGWSystemMetaObj::decode(bl);
} else if (struct_v >= 2) {
decode(name, bl);
id = name;
}
if (struct_v >= 3)
decode(system_key, bl);
if (struct_v >= 4)
decode(placement_pools, bl);
if (struct_v >= 5) {
rgw_pool unused_metadata_heap;
decode(unused_metadata_heap, bl);
}
if (struct_v >= 6) {
decode(realm_id, bl);
}
if (struct_v >= 7) {
decode(lc_pool, bl);
} else {
lc_pool = log_pool.name + ":lc";
}
std::map<std::string, std::string, ltstr_nocase> old_tier_config;
if (struct_v >= 8) {
decode(old_tier_config, bl);
}
if (struct_v >= 9) {
decode(roles_pool, bl);
} else {
roles_pool = name + ".rgw.meta:roles";
}
if (struct_v >= 10) {
decode(reshard_pool, bl);
} else {
reshard_pool = log_pool.name + ":reshard";
}
if (struct_v >= 11) {
::decode(otp_pool, bl);
} else {
otp_pool = name + ".rgw.otp";
}
if (struct_v >= 12) {
::decode(tier_config, bl);
} else {
for (auto& kv : old_tier_config) {
tier_config.set(kv.first, kv.second);
}
}
if (struct_v >= 13) {
::decode(oidc_pool, bl);
} else {
oidc_pool = name + ".rgw.meta:oidc";
}
if (struct_v >= 14) {
decode(notif_pool, bl);
} else {
notif_pool = log_pool.name + ":notif";
}
DECODE_FINISH(bl);
}
void dump(Formatter *f) const;
void decode_json(JSONObj *obj);
static void generate_test_instances(std::list<RGWZoneParams*>& o);
bool get_placement(const std::string& placement_id, RGWZonePlacementInfo *placement) const {
auto iter = placement_pools.find(placement_id);
if (iter == placement_pools.end()) {
return false;
}
*placement = iter->second;
return true;
}
/*
* return data pool of the head object
*/
bool get_head_data_pool(const rgw_placement_rule& placement_rule, const rgw_obj& obj, rgw_pool* pool) const {
const rgw_data_placement_target& explicit_placement = obj.bucket.explicit_placement;
if (!explicit_placement.data_pool.empty()) {
if (!obj.in_extra_data) {
*pool = explicit_placement.data_pool;
} else {
*pool = explicit_placement.get_data_extra_pool();
}
return true;
}
if (placement_rule.empty()) {
return false;
}
auto iter = placement_pools.find(placement_rule.name);
if (iter == placement_pools.end()) {
return false;
}
if (!obj.in_extra_data) {
*pool = iter->second.get_data_pool(placement_rule.storage_class);
} else {
*pool = iter->second.get_data_extra_pool();
}
return true;
}
bool valid_placement(const rgw_placement_rule& rule) const {
auto iter = placement_pools.find(rule.name);
if (iter == placement_pools.end()) {
return false;
}
return iter->second.storage_class_exists(rule.storage_class);
}
};
WRITE_CLASS_ENCODER(RGWZoneParams)
struct RGWZoneGroup : public RGWSystemMetaObj {
std::string api_name;
std::list<std::string> endpoints;
bool is_master = false;
rgw_zone_id master_zone;
std::map<rgw_zone_id, RGWZone> zones;
std::map<std::string, RGWZoneGroupPlacementTarget> placement_targets;
rgw_placement_rule default_placement;
std::list<std::string> hostnames;
std::list<std::string> hostnames_s3website;
// TODO: Maybe convert hostnames to a map<std::string,std::list<std::string>> for
// endpoint_type->hostnames
/*
20:05 < _robbat21irssi> maybe I do someting like: if (hostname_map.empty()) { populate all map keys from hostnames; };
20:05 < _robbat21irssi> but that's a later compatability migration planning bit
20:06 < yehudasa> more like if (!hostnames.empty()) {
20:06 < yehudasa> for (std::list<std::string>::iterator iter = hostnames.begin(); iter != hostnames.end(); ++iter) {
20:06 < yehudasa> hostname_map["s3"].append(iter->second);
20:07 < yehudasa> hostname_map["s3website"].append(iter->second);
20:07 < yehudasa> s/append/push_back/g
20:08 < _robbat21irssi> inner loop over APIs
20:08 < yehudasa> yeah, probably
20:08 < _robbat21irssi> s3, s3website, swift, swith_auth, swift_website
*/
std::map<std::string, std::list<std::string> > api_hostname_map;
std::map<std::string, std::list<std::string> > api_endpoints_map;
std::string realm_id;
rgw_sync_policy_info sync_policy;
rgw::zone_features::set enabled_features;
RGWZoneGroup(): is_master(false){}
RGWZoneGroup(const std::string &id, const std::string &name):RGWSystemMetaObj(id, name) {}
explicit RGWZoneGroup(const std::string &_name):RGWSystemMetaObj(_name) {}
RGWZoneGroup(const std::string &_name, bool _is_master, CephContext *cct, RGWSI_SysObj* sysobj_svc,
const std::string& _realm_id, const std::list<std::string>& _endpoints)
: RGWSystemMetaObj(_name, cct , sysobj_svc), endpoints(_endpoints), is_master(_is_master),
realm_id(_realm_id) {}
virtual ~RGWZoneGroup();
bool is_master_zonegroup() const { return is_master;}
void update_master(const DoutPrefixProvider *dpp, bool _is_master, optional_yield y) {
is_master = _is_master;
post_process_params(dpp, y);
}
void post_process_params(const DoutPrefixProvider *dpp, optional_yield y);
void encode(bufferlist& bl) const override {
ENCODE_START(6, 1, bl);
encode(name, bl);
encode(api_name, bl);
encode(is_master, bl);
encode(endpoints, bl);
encode(master_zone, bl);
encode(zones, bl);
encode(placement_targets, bl);
encode(default_placement, bl);
encode(hostnames, bl);
encode(hostnames_s3website, bl);
RGWSystemMetaObj::encode(bl);
encode(realm_id, bl);
encode(sync_policy, bl);
encode(enabled_features, bl);
ENCODE_FINISH(bl);
}
void decode(bufferlist::const_iterator& bl) override {
DECODE_START(6, bl);
decode(name, bl);
decode(api_name, bl);
decode(is_master, bl);
decode(endpoints, bl);
decode(master_zone, bl);
decode(zones, bl);
decode(placement_targets, bl);
decode(default_placement, bl);
if (struct_v >= 2) {
decode(hostnames, bl);
}
if (struct_v >= 3) {
decode(hostnames_s3website, bl);
}
if (struct_v >= 4) {
RGWSystemMetaObj::decode(bl);
decode(realm_id, bl);
} else {
id = name;
}
if (struct_v >= 5) {
decode(sync_policy, bl);
}
if (struct_v >= 6) {
decode(enabled_features, bl);
}
DECODE_FINISH(bl);
}
int read_default_id(const DoutPrefixProvider *dpp, std::string& default_id, optional_yield y, bool old_format = false) override;
int set_as_default(const DoutPrefixProvider *dpp, optional_yield y, bool exclusive = false) override;
int create_default(const DoutPrefixProvider *dpp, optional_yield y, bool old_format = false);
int equals(const std::string& other_zonegroup) const;
int add_zone(const DoutPrefixProvider *dpp,
const RGWZoneParams& zone_params, bool *is_master, bool *read_only,
const std::list<std::string>& endpoints, const std::string *ptier_type,
bool *psync_from_all, std::list<std::string>& sync_from,
std::list<std::string>& sync_from_rm, std::string *predirect_zone,
std::optional<int> bucket_index_max_shards, RGWSyncModulesManager *sync_mgr,
const rgw::zone_features::set& enable_features,
const rgw::zone_features::set& disable_features,
optional_yield y);
int remove_zone(const DoutPrefixProvider *dpp, const std::string& zone_id, optional_yield y);
int rename_zone(const DoutPrefixProvider *dpp, const RGWZoneParams& zone_params, optional_yield y);
rgw_pool get_pool(CephContext *cct) const override;
const std::string get_default_oid(bool old_region_format = false) const override;
const std::string& get_info_oid_prefix(bool old_region_format = false) const override;
const std::string& get_names_oid_prefix() const override;
std::string get_predefined_id(CephContext *cct) const override;
const std::string& get_predefined_name(CephContext *cct) const override;
void dump(Formatter *f) const;
void decode_json(JSONObj *obj);
static void generate_test_instances(std::list<RGWZoneGroup*>& o);
bool supports(std::string_view feature) const {
return enabled_features.contains(feature);
}
};
WRITE_CLASS_ENCODER(RGWZoneGroup)
struct RGWPeriodMap
{
std::string id;
std::map<std::string, RGWZoneGroup> zonegroups;
std::map<std::string, RGWZoneGroup> zonegroups_by_api;
std::map<std::string, uint32_t> short_zone_ids;
std::string master_zonegroup;
void encode(bufferlist& bl) const;
void decode(bufferlist::const_iterator& bl);
int update(const RGWZoneGroup& zonegroup, CephContext *cct);
void dump(Formatter *f) const;
void decode_json(JSONObj *obj);
void reset() {
zonegroups.clear();
zonegroups_by_api.clear();
master_zonegroup.clear();
}
uint32_t get_zone_short_id(const std::string& zone_id) const;
bool find_zone_by_id(const rgw_zone_id& zone_id,
RGWZoneGroup *zonegroup,
RGWZone *zone) const;
bool find_zone_by_name(const std::string& zone_id,
RGWZoneGroup *zonegroup,
RGWZone *zone) const;
};
WRITE_CLASS_ENCODER(RGWPeriodMap)
struct RGWPeriodConfig
{
RGWQuota quota;
RGWRateLimitInfo user_ratelimit;
RGWRateLimitInfo bucket_ratelimit;
// rate limit unauthenticated user
RGWRateLimitInfo anon_ratelimit;
void encode(bufferlist& bl) const {
ENCODE_START(2, 1, bl);
encode(quota.bucket_quota, bl);
encode(quota.user_quota, bl);
encode(bucket_ratelimit, bl);
encode(user_ratelimit, bl);
encode(anon_ratelimit, bl);
ENCODE_FINISH(bl);
}
void decode(bufferlist::const_iterator& bl) {
DECODE_START(2, bl);
decode(quota.bucket_quota, bl);
decode(quota.user_quota, bl);
if (struct_v >= 2) {
decode(bucket_ratelimit, bl);
decode(user_ratelimit, bl);
decode(anon_ratelimit, bl);
}
DECODE_FINISH(bl);
}
void dump(Formatter *f) const;
void decode_json(JSONObj *obj);
// the period config must be stored in a local object outside of the period,
// so that it can be used in a default configuration where no realm/period
// exists
int read(const DoutPrefixProvider *dpp, RGWSI_SysObj *sysobj_svc, const std::string& realm_id, optional_yield y);
int write(const DoutPrefixProvider *dpp, RGWSI_SysObj *sysobj_svc, const std::string& realm_id, optional_yield y);
static std::string get_oid(const std::string& realm_id);
static rgw_pool get_pool(CephContext *cct);
};
WRITE_CLASS_ENCODER(RGWPeriodConfig)
class RGWRealm;
class RGWPeriod;
class RGWRealm : public RGWSystemMetaObj
{
public:
std::string current_period;
epoch_t epoch{0}; //< realm epoch, incremented for each new period
int create_control(const DoutPrefixProvider *dpp, bool exclusive, optional_yield y);
int delete_control(const DoutPrefixProvider *dpp, optional_yield y);
public:
RGWRealm() {}
RGWRealm(const std::string& _id, const std::string& _name = "") : RGWSystemMetaObj(_id, _name) {}
RGWRealm(CephContext *_cct, RGWSI_SysObj *_sysobj_svc): RGWSystemMetaObj(_cct, _sysobj_svc) {}
RGWRealm(const std::string& _name, CephContext *_cct, RGWSI_SysObj *_sysobj_svc): RGWSystemMetaObj(_name, _cct, _sysobj_svc){}
virtual ~RGWRealm() override;
void encode(bufferlist& bl) const override {
ENCODE_START(1, 1, bl);
RGWSystemMetaObj::encode(bl);
encode(current_period, bl);
encode(epoch, bl);
ENCODE_FINISH(bl);
}
void decode(bufferlist::const_iterator& bl) override {
DECODE_START(1, bl);
RGWSystemMetaObj::decode(bl);
decode(current_period, bl);
decode(epoch, bl);
DECODE_FINISH(bl);
}
int create(const DoutPrefixProvider *dpp, optional_yield y, bool exclusive = true) override;
int delete_obj(const DoutPrefixProvider *dpp, optional_yield y);
rgw_pool get_pool(CephContext *cct) const override;
const std::string get_default_oid(bool old_format = false) const override;
const std::string& get_names_oid_prefix() const override;
const std::string& get_info_oid_prefix(bool old_format = false) const override;
std::string get_predefined_id(CephContext *cct) const override;
const std::string& get_predefined_name(CephContext *cct) const override;
using RGWSystemMetaObj::read_id; // expose as public for radosgw-admin
void dump(Formatter *f) const;
void decode_json(JSONObj *obj);
static void generate_test_instances(std::list<RGWRealm*>& o);
const std::string& get_current_period() const {
return current_period;
}
int set_current_period(const DoutPrefixProvider *dpp, RGWPeriod& period, optional_yield y);
void clear_current_period_and_epoch() {
current_period.clear();
epoch = 0;
}
epoch_t get_epoch() const { return epoch; }
std::string get_control_oid() const;
/// send a notify on the realm control object
int notify_zone(const DoutPrefixProvider *dpp, bufferlist& bl, optional_yield y);
/// notify the zone of a new period
int notify_new_period(const DoutPrefixProvider *dpp, const RGWPeriod& period, optional_yield y);
int find_zone(const DoutPrefixProvider *dpp,
const rgw_zone_id& zid,
RGWPeriod *pperiod,
RGWZoneGroup *pzonegroup,
bool *pfound,
optional_yield y) const;
};
WRITE_CLASS_ENCODER(RGWRealm)
struct RGWPeriodLatestEpochInfo {
epoch_t epoch = 0;
void encode(bufferlist& bl) const {
ENCODE_START(1, 1, bl);
encode(epoch, bl);
ENCODE_FINISH(bl);
}
void decode(bufferlist::const_iterator& bl) {
DECODE_START(1, bl);
decode(epoch, bl);
DECODE_FINISH(bl);
}
void dump(Formatter *f) const;
void decode_json(JSONObj *obj);
static void generate_test_instances(std::list<RGWPeriodLatestEpochInfo*>& o);
};
WRITE_CLASS_ENCODER(RGWPeriodLatestEpochInfo)
/*
* The RGWPeriod object contains the entire configuration of a
* RGWRealm, including its RGWZoneGroups and RGWZones. Consistency of
* this configuration is maintained across all zones by passing around
* the RGWPeriod object in its JSON representation.
*
* If a new configuration changes which zone is the metadata master
* zone (i.e., master zone of the master zonegroup), then a new
* RGWPeriod::id (a uuid) is generated, its RGWPeriod::realm_epoch is
* incremented, and the RGWRealm object is updated to reflect that new
* current_period id and epoch. If the configuration changes BUT which
* zone is the metadata master does NOT change, then only the
* RGWPeriod::epoch is incremented (and the RGWPeriod::id remains the
* same).
*
* When a new RGWPeriod is created with a new RGWPeriod::id (uuid), it
* is linked back to its predecessor RGWPeriod through the
* RGWPeriod::predecessor_uuid field, thus creating a "linked
* list"-like structure of RGWPeriods back to the cluster's creation.
*/
class RGWPeriod
{
public:
std::string id; //< a uuid
epoch_t epoch{0};
std::string predecessor_uuid;
std::vector<std::string> sync_status;
RGWPeriodMap period_map;
RGWPeriodConfig period_config;
std::string master_zonegroup;
rgw_zone_id master_zone;
std::string realm_id;
std::string realm_name;
epoch_t realm_epoch{1}; //< realm epoch when period was made current
CephContext *cct{nullptr};
RGWSI_SysObj *sysobj_svc{nullptr};
int read_info(const DoutPrefixProvider *dpp, optional_yield y);
int read_latest_epoch(const DoutPrefixProvider *dpp,
RGWPeriodLatestEpochInfo& epoch_info,
optional_yield y,
RGWObjVersionTracker *objv = nullptr);
int use_latest_epoch(const DoutPrefixProvider *dpp, optional_yield y);
int use_current_period();
const std::string get_period_oid() const;
const std::string get_period_oid_prefix() const;
// gather the metadata sync status for each shard; only for use on master zone
int update_sync_status(const DoutPrefixProvider *dpp,
rgw::sal::Driver* driver,
const RGWPeriod ¤t_period,
std::ostream& error_stream, bool force_if_stale);
public:
RGWPeriod() {}
explicit RGWPeriod(const std::string& period_id, epoch_t _epoch = 0)
: id(period_id), epoch(_epoch) {}
const std::string& get_id() const { return id; }
epoch_t get_epoch() const { return epoch; }
epoch_t get_realm_epoch() const { return realm_epoch; }
const std::string& get_predecessor() const { return predecessor_uuid; }
const rgw_zone_id& get_master_zone() const { return master_zone; }
const std::string& get_master_zonegroup() const { return master_zonegroup; }
const std::string& get_realm() const { return realm_id; }
const std::string& get_realm_name() const { return realm_name; }
const RGWPeriodMap& get_map() const { return period_map; }
RGWPeriodConfig& get_config() { return period_config; }
const RGWPeriodConfig& get_config() const { return period_config; }
const std::vector<std::string>& get_sync_status() const { return sync_status; }
rgw_pool get_pool(CephContext *cct) const;
const std::string& get_latest_epoch_oid() const;
const std::string& get_info_oid_prefix() const;
void set_user_quota(RGWQuotaInfo& user_quota) {
period_config.quota.user_quota = user_quota;
}
void set_bucket_quota(RGWQuotaInfo& bucket_quota) {
period_config.quota.bucket_quota = bucket_quota;
}
void set_id(const std::string& _id) {
this->id = _id;
period_map.id = _id;
}
void set_epoch(epoch_t epoch) { this->epoch = epoch; }
void set_realm_epoch(epoch_t epoch) { realm_epoch = epoch; }
void set_predecessor(const std::string& predecessor)
{
predecessor_uuid = predecessor;
}
void set_realm_id(const std::string& _realm_id) {
realm_id = _realm_id;
}
int reflect(const DoutPrefixProvider *dpp, optional_yield y);
int get_zonegroup(RGWZoneGroup& zonegroup,
const std::string& zonegroup_id) const;
bool is_single_zonegroup() const
{
return (period_map.zonegroups.size() <= 1);
}
/*
returns true if there are several zone groups with a least one zone
*/
bool is_multi_zonegroups_with_zones() const
{
int count = 0;
for (const auto& zg: period_map.zonegroups) {
if (zg.second.zones.size() > 0) {
if (count++ > 0) {
return true;
}
}
}
return false;
}
bool find_zone(const DoutPrefixProvider *dpp,
const rgw_zone_id& zid,
RGWZoneGroup *pzonegroup,
optional_yield y) const;
int get_latest_epoch(const DoutPrefixProvider *dpp, epoch_t& epoch, optional_yield y);
int set_latest_epoch(const DoutPrefixProvider *dpp, optional_yield y,
epoch_t epoch, bool exclusive = false,
RGWObjVersionTracker *objv = nullptr);
// update latest_epoch if the given epoch is higher, else return -EEXIST
int update_latest_epoch(const DoutPrefixProvider *dpp, epoch_t epoch, optional_yield y);
int init(const DoutPrefixProvider *dpp, CephContext *_cct, RGWSI_SysObj *_sysobj_svc, const std::string &period_realm_id, optional_yield y,
const std::string &period_realm_name = "", bool setup_obj = true);
int init(const DoutPrefixProvider *dpp, CephContext *_cct, RGWSI_SysObj *_sysobj_svc, optional_yield y, bool setup_obj = true);
int create(const DoutPrefixProvider *dpp, optional_yield y, bool exclusive = true);
int delete_obj(const DoutPrefixProvider *dpp, optional_yield y);
int store_info(const DoutPrefixProvider *dpp, bool exclusive, optional_yield y);
int add_zonegroup(const DoutPrefixProvider *dpp, const RGWZoneGroup& zonegroup, optional_yield y);
void fork();
int update(const DoutPrefixProvider *dpp, optional_yield y);
// commit a staging period; only for use on master zone
int commit(const DoutPrefixProvider *dpp,
rgw::sal::Driver* driver,
RGWRealm& realm, const RGWPeriod ¤t_period,
std::ostream& error_stream, optional_yield y,
bool force_if_stale = false);
void encode(bufferlist& bl) const {
ENCODE_START(1, 1, bl);
encode(id, bl);
encode(epoch, bl);
encode(realm_epoch, bl);
encode(predecessor_uuid, bl);
encode(sync_status, bl);
encode(period_map, bl);
encode(master_zone, bl);
encode(master_zonegroup, bl);
encode(period_config, bl);
encode(realm_id, bl);
encode(realm_name, bl);
ENCODE_FINISH(bl);
}
void decode(bufferlist::const_iterator& bl) {
DECODE_START(1, bl);
decode(id, bl);
decode(epoch, bl);
decode(realm_epoch, bl);
decode(predecessor_uuid, bl);
decode(sync_status, bl);
decode(period_map, bl);
decode(master_zone, bl);
decode(master_zonegroup, bl);
decode(period_config, bl);
decode(realm_id, bl);
decode(realm_name, bl);
DECODE_FINISH(bl);
}
void dump(Formatter *f) const;
void decode_json(JSONObj *obj);
static void generate_test_instances(std::list<RGWPeriod*>& o);
static std::string get_staging_id(const std::string& realm_id) {
return realm_id + ":staging";
}
};
WRITE_CLASS_ENCODER(RGWPeriod)
namespace rgw {
/// Look up a realm by its id. If no id is given, look it up by name.
/// If no name is given, fall back to the cluster's default realm.
int read_realm(const DoutPrefixProvider* dpp, optional_yield y,
sal::ConfigStore* cfgstore,
std::string_view realm_id,
std::string_view realm_name,
RGWRealm& info,
std::unique_ptr<sal::RealmWriter>* writer = nullptr);
/// Create a realm and its initial period. If the info.id is empty, a
/// random uuid will be generated.
int create_realm(const DoutPrefixProvider* dpp, optional_yield y,
sal::ConfigStore* cfgstore, bool exclusive,
RGWRealm& info,
std::unique_ptr<sal::RealmWriter>* writer = nullptr);
/// Set the given realm as the cluster's default realm.
int set_default_realm(const DoutPrefixProvider* dpp, optional_yield y,
sal::ConfigStore* cfgstore, const RGWRealm& info,
bool exclusive = false);
/// Update the current_period of an existing realm.
int realm_set_current_period(const DoutPrefixProvider* dpp, optional_yield y,
sal::ConfigStore* cfgstore,
sal::RealmWriter& writer, RGWRealm& realm,
const RGWPeriod& period);
/// Overwrite the local zonegroup and period config objects with the new
/// configuration contained in the given period.
int reflect_period(const DoutPrefixProvider* dpp, optional_yield y,
sal::ConfigStore* cfgstore, const RGWPeriod& info);
/// Return the staging period id for the given realm.
std::string get_staging_period_id(std::string_view realm_id);
/// Convert the given period into a separate staging period, where
/// radosgw-admin can make changes to it without effecting the running
/// configuration.
void fork_period(const DoutPrefixProvider* dpp, RGWPeriod& info);
/// Read all zonegroups in the period's realm and add them to the period.
int update_period(const DoutPrefixProvider* dpp, optional_yield y,
sal::ConfigStore* cfgstore, RGWPeriod& info);
/// Validates the given 'staging' period and tries to commit it as the
/// realm's new current period.
int commit_period(const DoutPrefixProvider* dpp, optional_yield y,
sal::ConfigStore* cfgstore, sal::Driver* driver,
RGWRealm& realm, sal::RealmWriter& realm_writer,
const RGWPeriod& current_period,
RGWPeriod& info, std::ostream& error_stream,
bool force_if_stale);
/// Look up a zonegroup by its id. If no id is given, look it up by name.
/// If no name is given, fall back to the cluster's default zonegroup.
int read_zonegroup(const DoutPrefixProvider* dpp, optional_yield y,
sal::ConfigStore* cfgstore,
std::string_view zonegroup_id,
std::string_view zonegroup_name,
RGWZoneGroup& info,
std::unique_ptr<sal::ZoneGroupWriter>* writer = nullptr);
/// Initialize and create the given zonegroup. If the given info.id is empty,
/// a random uuid will be generated. May fail with -EEXIST.
int create_zonegroup(const DoutPrefixProvider* dpp, optional_yield y,
sal::ConfigStore* cfgstore, bool exclusive,
RGWZoneGroup& info);
/// Set the given zonegroup as its realm's default zonegroup.
int set_default_zonegroup(const DoutPrefixProvider* dpp, optional_yield y,
sal::ConfigStore* cfgstore, const RGWZoneGroup& info,
bool exclusive = false);
/// Add a zone to the zonegroup, or update an existing zone entry.
int add_zone_to_group(const DoutPrefixProvider* dpp,
RGWZoneGroup& zonegroup,
const RGWZoneParams& zone_params,
const bool *pis_master, const bool *pread_only,
const std::list<std::string>& endpoints,
const std::string *ptier_type,
const bool *psync_from_all,
const std::list<std::string>& sync_from,
const std::list<std::string>& sync_from_rm,
const std::string *predirect_zone,
std::optional<int> bucket_index_max_shards,
const rgw::zone_features::set& enable_features,
const rgw::zone_features::set& disable_features);
/// Remove a zone by id from its zonegroup, promoting a new master zone if
/// necessary.
int remove_zone_from_group(const DoutPrefixProvider* dpp,
RGWZoneGroup& info,
const rgw_zone_id& zone_id);
/// Look up a zone by its id. If no id is given, look it up by name. If no name
/// is given, fall back to the realm's default zone.
int read_zone(const DoutPrefixProvider* dpp, optional_yield y,
sal::ConfigStore* cfgstore,
std::string_view zone_id,
std::string_view zone_name,
RGWZoneParams& info,
std::unique_ptr<sal::ZoneWriter>* writer = nullptr);
/// Initialize and create a new zone. If the given info.id is empty, a random
/// uuid will be generated. Pool names are initialized with the zone name as a
/// prefix. If any pool names conflict with existing zones, a random suffix is
/// added.
int create_zone(const DoutPrefixProvider* dpp, optional_yield y,
sal::ConfigStore* cfgstore, bool exclusive,
RGWZoneParams& info,
std::unique_ptr<sal::ZoneWriter>* writer = nullptr);
/// Initialize the zone's pool names using the zone name as a prefix. If a pool
/// name conflicts with an existing zone's pool, add a unique suffix.
int init_zone_pool_names(const DoutPrefixProvider *dpp, optional_yield y,
const std::set<rgw_pool>& pools, RGWZoneParams& info);
/// Set the given zone as its realm's default zone.
int set_default_zone(const DoutPrefixProvider* dpp, optional_yield y,
sal::ConfigStore* cfgstore, const RGWZoneParams& info,
bool exclusive = false);
/// Delete an existing zone and remove it from any zonegroups that contain it.
int delete_zone(const DoutPrefixProvider* dpp, optional_yield y,
sal::ConfigStore* cfgstore, const RGWZoneParams& info,
sal::ZoneWriter& writer);
/// Global state about the site configuration. Initialized once during
/// startup and may be reinitialized by RGWRealmReloader, but is otherwise
/// immutable at runtime.
class SiteConfig {
public:
/// Return the local zone params.
const RGWZoneParams& get_zone_params() const { return zone_params; }
/// Return the current realm configuration, if a realm is present.
const std::optional<RGWRealm>& get_realm() const { return realm; }
/// Return the current period configuration, if a period is present.
const std::optional<RGWPeriod>& get_period() const { return period; }
/// Return the zonegroup configuration.
const RGWZoneGroup& get_zonegroup() const { return *zonegroup; }
/// Return the public zone configuration.
const RGWZone& get_zone() const { return *zone; }
/// Load or reload the multisite configuration from storage. This is not
/// thread-safe, so requires careful coordination with the RGWRealmReloader.
int load(const DoutPrefixProvider* dpp, optional_yield y,
sal::ConfigStore* cfgstore);
private:
int load_period_zonegroup(const DoutPrefixProvider* dpp, optional_yield y,
sal::ConfigStore* cfgstore, const RGWRealm& realm,
const rgw_zone_id& zone_id);
int load_local_zonegroup(const DoutPrefixProvider* dpp, optional_yield y,
sal::ConfigStore* cfgstore,
const rgw_zone_id& zone_id);
RGWZoneParams zone_params;
std::optional<RGWRealm> realm;
std::optional<RGWPeriod> period;
std::optional<RGWZoneGroup> local_zonegroup;
const RGWZoneGroup* zonegroup = nullptr;
const RGWZone* zone = nullptr;
};
} // namespace rgw
| 36,835 | 36.511202 | 141 |
h
|
null |
ceph-main/src/rgw/driver/rados/sync_fairness.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab ft=cpp
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2022 Red Hat, Inc
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*/
#include <mutex>
#include <random>
#include <vector>
#include <boost/container/flat_map.hpp>
#include "include/encoding.h"
#include "include/rados/librados.hpp"
#include "rgw_sal_rados.h"
#include "rgw_cr_rados.h"
#include "sync_fairness.h"
#include <boost/asio/yield.hpp>
#define dout_subsys ceph_subsys_rgw
namespace rgw::sync_fairness {
using bid_value = uint16_t;
using bid_vector = std::vector<bid_value>; // bid per replication log shard
using notifier_id = uint64_t;
using bidder_map = boost::container::flat_map<notifier_id, bid_vector>;
struct BidRequest {
bid_vector bids;
void encode(bufferlist& bl) const {
ENCODE_START(1, 1, bl);
encode(bids, bl);
ENCODE_FINISH(bl);
}
void decode(bufferlist::const_iterator& p) {
DECODE_START(1, p);
decode(bids, p);
DECODE_FINISH(p);
}
};
WRITE_CLASS_ENCODER(BidRequest);
struct BidResponse {
bid_vector bids;
void encode(bufferlist& bl) const {
ENCODE_START(1, 1, bl);
encode(bids, bl);
ENCODE_FINISH(bl);
}
void decode(bufferlist::const_iterator& p) {
DECODE_START(1, p);
decode(bids, p);
DECODE_FINISH(p);
}
};
WRITE_CLASS_ENCODER(BidResponse);
static void encode_notify_request(const bid_vector& bids, bufferlist& bl)
{
BidRequest request;
request.bids = bids; // copy the vector
encode(request, bl);
}
static int apply_notify_responses(const bufferlist& bl, bidder_map& bidders)
{
bc::flat_map<std::pair<uint64_t, uint64_t>, bufferlist> replies;
std::vector<std::pair<uint64_t, uint64_t>> timeouts;
try {
// decode notify responses
auto p = bl.cbegin();
using ceph::decode;
decode(replies, p);
decode(timeouts, p);
// add peers that replied
for (const auto& peer : replies) {
auto q = peer.second.cbegin();
BidResponse response;
decode(response, q);
uint64_t peer_id = peer.first.first;
bidders[peer_id] = std::move(response.bids);
}
// remove peers that timed out
for (const auto& peer : timeouts) {
uint64_t peer_id = peer.first;
bidders.erase(peer_id);
}
} catch (const buffer::error& e) {
return -EIO;
}
return 0;
}
// server interface to handle bid notifications from peers
struct Server {
virtual ~Server() = default;
virtual void on_peer_bid(uint64_t peer_id, bid_vector peer_bids,
bid_vector& my_bids) = 0;
};
// rados watcher for sync fairness notifications
class Watcher : public librados::WatchCtx2 {
const DoutPrefixProvider* dpp;
sal::RadosStore* const store;
rgw_raw_obj obj;
Server* server;
rgw_rados_ref ref;
uint64_t handle = 0;
public:
Watcher(const DoutPrefixProvider* dpp, sal::RadosStore* store,
const rgw_raw_obj& obj, Server* server)
: dpp(dpp), store(store), obj(obj), server(server)
{}
~Watcher()
{
stop();
}
int start()
{
int r = store->getRados()->get_raw_obj_ref(dpp, obj, &ref);
if (r < 0) {
return r;
}
// register a watch on the control object
r = ref.pool.ioctx().watch2(ref.obj.oid, &handle, this);
if (r == -ENOENT) {
constexpr bool exclusive = true;
r = ref.pool.ioctx().create(ref.obj.oid, exclusive);
if (r == -EEXIST || r == 0) {
r = ref.pool.ioctx().watch2(ref.obj.oid, &handle, this);
}
}
if (r < 0) {
ldpp_dout(dpp, -1) << "Failed to watch " << ref.obj
<< " with " << cpp_strerror(-r) << dendl;
ref.pool.ioctx().close();
return r;
}
ldpp_dout(dpp, 10) << "Watching " << ref.obj.oid << dendl;
return 0;
}
int restart()
{
int r = ref.pool.ioctx().unwatch2(handle);
if (r < 0) {
ldpp_dout(dpp, -1) << "Failed to unwatch on " << ref.obj
<< " with " << cpp_strerror(-r) << dendl;
}
r = ref.pool.ioctx().watch2(ref.obj.oid, &handle, this);
if (r < 0) {
ldpp_dout(dpp, -1) << "Failed to restart watch on " << ref.obj
<< " with " << cpp_strerror(-r) << dendl;
ref.pool.ioctx().close();
}
return r;
}
void stop()
{
if (handle) {
ref.pool.ioctx().unwatch2(handle);
ref.pool.ioctx().close();
}
}
// respond to bid notifications
void handle_notify(uint64_t notify_id, uint64_t cookie,
uint64_t notifier_id, bufferlist& bl)
{
if (cookie != handle) {
return;
}
BidRequest request;
try {
auto p = bl.cbegin();
decode(request, p);
} catch (const buffer::error& e) {
ldpp_dout(dpp, -1) << "Failed to decode notification: " << e.what() << dendl;
return;
}
BidResponse response;
server->on_peer_bid(notifier_id, std::move(request.bids), response.bids);
bufferlist reply;
encode(response, reply);
ref.pool.ioctx().notify_ack(ref.obj.oid, notify_id, cookie, reply);
}
// reestablish the watch if it gets disconnected
void handle_error(uint64_t cookie, int err)
{
if (cookie != handle) {
return;
}
if (err == -ENOTCONN) {
ldpp_dout(dpp, 4) << "Disconnected watch on " << ref.obj << dendl;
restart();
}
}
}; // Watcher
class RadosBidManager;
// RGWRadosNotifyCR wrapper coroutine
class NotifyCR : public RGWCoroutine {
rgw::sal::RadosStore* store;
RadosBidManager* mgr;
rgw_raw_obj obj;
bufferlist request;
bufferlist response;
public:
NotifyCR(rgw::sal::RadosStore* store, RadosBidManager* mgr,
const rgw_raw_obj& obj, const bid_vector& my_bids)
: RGWCoroutine(store->ctx()), store(store), mgr(mgr), obj(obj)
{
encode_notify_request(my_bids, request);
}
int operate(const DoutPrefixProvider* dpp) override;
};
class RadosBidManager : public BidManager, public Server, public DoutPrefix {
sal::RadosStore* store;
rgw_raw_obj obj;
Watcher watcher;
std::mutex mutex;
bid_vector my_bids;
bidder_map all_bids;
public:
RadosBidManager(sal::RadosStore* store, const rgw_raw_obj& watch_obj,
std::size_t num_shards)
: DoutPrefix(store->ctx(), dout_subsys, "sync fairness: "),
store(store), obj(watch_obj), watcher(this, store, watch_obj, this)
{
// fill my_bids with random values
std::random_device rd;
std::default_random_engine rng{rd()};
my_bids.resize(num_shards);
for(bid_value i = 0; i < num_shards; ++i) {
my_bids[i] = i;
}
std::shuffle(my_bids.begin(), my_bids.end(), rng);
}
int start() override
{
return watcher.start();
}
void on_peer_bid(uint64_t peer_id, bid_vector peer_bids,
bid_vector& my_bids) override
{
ldpp_dout(this, 10) << "received bids from peer " << peer_id << dendl;
auto lock = std::scoped_lock{mutex};
all_bids[peer_id] = std::move(peer_bids);
my_bids = this->my_bids;
}
bool is_highest_bidder(std::size_t index)
{
auto lock = std::scoped_lock{mutex};
const bid_value my_bid = my_bids.at(index); // may throw
for (const auto& peer_bids : all_bids) {
const bid_value peer_bid = peer_bids.second.at(index); // may throw
if (peer_bid > my_bid) {
return false;
}
}
return true;
}
RGWCoroutine* notify_cr()
{
auto lock = std::scoped_lock{mutex};
return new NotifyCR(store, this, obj, my_bids);
}
void notify_response(const bufferlist& bl)
{
ldpp_dout(this, 10) << "received notify response from peers" << dendl;
auto lock = std::scoped_lock{mutex};
// clear existing bids in case any peers went away. note that this may
// remove newer bids from peer notifications that raced with ours
all_bids.clear();
apply_notify_responses(bl, all_bids);
}
};
int NotifyCR::operate(const DoutPrefixProvider* dpp)
{
static constexpr uint64_t timeout_ms = 15'000;
reenter(this) {
yield call(new RGWRadosNotifyCR(store, obj, request,
timeout_ms, &response));
if (retcode < 0) {
return set_cr_error(retcode);
}
mgr->notify_response(response);
return set_cr_done();
}
return 0;
}
auto create_rados_bid_manager(sal::RadosStore* store,
const rgw_raw_obj& watch_obj,
std::size_t num_shards)
-> std::unique_ptr<BidManager>
{
return std::make_unique<RadosBidManager>(store, watch_obj, num_shards);
}
} // namespace rgw::sync_fairness
| 8,772 | 23.923295 | 83 |
cc
|
null |
ceph-main/src/rgw/driver/rados/sync_fairness.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab ft=cpp
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2022 Red Hat, Inc
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*/
#pragma once
#include <memory>
namespace rgw::sal { class RadosStore; }
struct rgw_raw_obj;
class RGWCoroutine;
/// watch/notify protocol to coordinate the sharing of sync locks
///
/// each gateway generates a set of random bids, and broadcasts them regularly
/// to other active gateways. in response, the peer gateways send their own set
/// of bids
///
/// sync will only lock and process log shards where it holds the highest bid
namespace rgw::sync_fairness {
class BidManager {
public:
virtual ~BidManager() {}
/// establish a watch, creating the control object if necessary
virtual int start() = 0;
/// returns true if we're the highest bidder on the given shard index
virtual bool is_highest_bidder(std::size_t index) = 0;
/// return a coroutine that broadcasts our current bids and records the
/// bids from other peers that respond
virtual RGWCoroutine* notify_cr() = 0;
};
// rados BidManager factory
auto create_rados_bid_manager(sal::RadosStore* store,
const rgw_raw_obj& watch_obj,
std::size_t num_shards)
-> std::unique_ptr<BidManager>;
} // namespace rgw::sync_fairness
| 1,584 | 28.351852 | 79 |
h
|
null |
ceph-main/src/rgw/driver/rados/config/impl.cc
|
// vim: ts=8 sw=2 smarttab ft=cpp
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2022 Red Hat, Inc.
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include "impl.h"
#include "common/async/yield_context.h"
#include "common/errno.h"
#include "rgw_string.h"
#include "rgw_zone.h"
namespace rgw::rados {
// default pool names
constexpr std::string_view default_zone_root_pool = "rgw.root";
constexpr std::string_view default_zonegroup_root_pool = "rgw.root";
constexpr std::string_view default_realm_root_pool = "rgw.root";
constexpr std::string_view default_period_root_pool = "rgw.root";
static rgw_pool default_pool(std::string_view name,
std::string_view default_name)
{
return std::string{name_or_default(name, default_name)};
}
ConfigImpl::ConfigImpl(const ceph::common::ConfigProxy& conf)
: realm_pool(default_pool(conf->rgw_realm_root_pool,
default_realm_root_pool)),
period_pool(default_pool(conf->rgw_period_root_pool,
default_period_root_pool)),
zonegroup_pool(default_pool(conf->rgw_zonegroup_root_pool,
default_zonegroup_root_pool)),
zone_pool(default_pool(conf->rgw_zone_root_pool,
default_zone_root_pool))
{
}
int ConfigImpl::read(const DoutPrefixProvider* dpp, optional_yield y,
const rgw_pool& pool, const std::string& oid,
bufferlist& bl, RGWObjVersionTracker* objv)
{
librados::IoCtx ioctx;
int r = rgw_init_ioctx(dpp, &rados, pool, ioctx, true, false);
if (r < 0) {
return r;
}
librados::ObjectReadOperation op;
if (objv) {
objv->prepare_op_for_read(&op);
}
op.read(0, 0, &bl, nullptr);
return rgw_rados_operate(dpp, ioctx, oid, &op, nullptr, y);
}
int ConfigImpl::write(const DoutPrefixProvider* dpp, optional_yield y,
const rgw_pool& pool, const std::string& oid,
Create create, const bufferlist& bl,
RGWObjVersionTracker* objv)
{
librados::IoCtx ioctx;
int r = rgw_init_ioctx(dpp, &rados, pool, ioctx, true, false);
if (r < 0) {
return r;
}
librados::ObjectWriteOperation op;
switch (create) {
case Create::MustNotExist: op.create(true); break;
case Create::MayExist: op.create(false); break;
case Create::MustExist: op.assert_exists(); break;
}
if (objv) {
objv->prepare_op_for_write(&op);
}
op.write_full(bl);
r = rgw_rados_operate(dpp, ioctx, oid, &op, y);
if (r >= 0 && objv) {
objv->apply_write();
}
return r;
}
int ConfigImpl::remove(const DoutPrefixProvider* dpp, optional_yield y,
const rgw_pool& pool, const std::string& oid,
RGWObjVersionTracker* objv)
{
librados::IoCtx ioctx;
int r = rgw_init_ioctx(dpp, &rados, pool, ioctx, true, false);
if (r < 0) {
return r;
}
librados::ObjectWriteOperation op;
if (objv) {
objv->prepare_op_for_write(&op);
}
op.remove();
r = rgw_rados_operate(dpp, ioctx, oid, &op, y);
if (r >= 0 && objv) {
objv->apply_write();
}
return r;
}
int ConfigImpl::notify(const DoutPrefixProvider* dpp, optional_yield y,
const rgw_pool& pool, const std::string& oid,
bufferlist& bl, uint64_t timeout_ms)
{
librados::IoCtx ioctx;
int r = rgw_init_ioctx(dpp, &rados, pool, ioctx, true, false);
if (r < 0) {
return r;
}
return rgw_rados_notify(dpp, ioctx, oid, bl, timeout_ms, nullptr, y);
}
} // namespace rgw::rados
| 3,758 | 27.915385 | 71 |
cc
|
null |
ceph-main/src/rgw/driver/rados/config/impl.h
|
// vim: ts=8 sw=2 smarttab ft=cpp
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2022 Red Hat, Inc.
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#pragma once
#include "include/rados/librados.hpp"
#include "common/dout.h"
#include "rgw_basic_types.h"
#include "rgw_tools.h"
#include "rgw_sal_config.h"
namespace rgw::rados {
// write options that control object creation
enum class Create {
MustNotExist, // fail with EEXIST if the object already exists
MayExist, // create if the object didn't exist, overwrite if it did
MustExist, // fail with ENOENT if the object doesn't exist
};
struct ConfigImpl {
librados::Rados rados;
const rgw_pool realm_pool;
const rgw_pool period_pool;
const rgw_pool zonegroup_pool;
const rgw_pool zone_pool;
ConfigImpl(const ceph::common::ConfigProxy& conf);
int read(const DoutPrefixProvider* dpp, optional_yield y,
const rgw_pool& pool, const std::string& oid,
bufferlist& bl, RGWObjVersionTracker* objv);
template <typename T>
int read(const DoutPrefixProvider* dpp, optional_yield y,
const rgw_pool& pool, const std::string& oid,
T& data, RGWObjVersionTracker* objv)
{
bufferlist bl;
int r = read(dpp, y, pool, oid, bl, objv);
if (r < 0) {
return r;
}
try {
auto p = bl.cbegin();
decode(data, p);
} catch (const buffer::error& err) {
ldpp_dout(dpp, 0) << "ERROR: failed to decode obj from "
<< pool << ":" << oid << dendl;
return -EIO;
}
return 0;
}
int write(const DoutPrefixProvider* dpp, optional_yield y,
const rgw_pool& pool, const std::string& oid, Create create,
const bufferlist& bl, RGWObjVersionTracker* objv);
template <typename T>
int write(const DoutPrefixProvider* dpp, optional_yield y,
const rgw_pool& pool, const std::string& oid, Create create,
const T& data, RGWObjVersionTracker* objv)
{
bufferlist bl;
encode(data, bl);
return write(dpp, y, pool, oid, create, bl, objv);
}
int remove(const DoutPrefixProvider* dpp, optional_yield y,
const rgw_pool& pool, const std::string& oid,
RGWObjVersionTracker* objv);
int list(const DoutPrefixProvider* dpp, optional_yield y,
const rgw_pool& pool, const std::string& marker,
std::regular_invocable<std::string> auto filter,
std::span<std::string> entries,
sal::ListResult<std::string>& result)
{
librados::IoCtx ioctx;
int r = rgw_init_ioctx(dpp, &rados, pool, ioctx, true, false);
if (r < 0) {
return r;
}
librados::ObjectCursor oc;
if (!oc.from_str(marker)) {
ldpp_dout(dpp, 10) << "failed to parse cursor: " << marker << dendl;
return -EINVAL;
}
std::size_t count = 0;
try {
auto iter = ioctx.nobjects_begin(oc);
const auto end = ioctx.nobjects_end();
for (; count < entries.size() && iter != end; ++iter) {
std::string entry = filter(iter->get_oid());
if (!entry.empty()) {
entries[count++] = std::move(entry);
}
}
if (iter == end) {
result.next.clear();
} else {
result.next = iter.get_cursor().to_str();
}
} catch (const std::exception& e) {
ldpp_dout(dpp, 10) << "NObjectIterator exception " << e.what() << dendl;
return -EIO;
}
result.entries = entries.first(count);
return 0;
}
int notify(const DoutPrefixProvider* dpp, optional_yield y,
const rgw_pool& pool, const std::string& oid,
bufferlist& bl, uint64_t timeout_ms);
};
inline std::string_view name_or_default(std::string_view name,
std::string_view default_name)
{
if (!name.empty()) {
return name;
}
return default_name;
}
} // namespace rgw::rados
| 4,066 | 28.05 | 78 |
h
|
null |
ceph-main/src/rgw/driver/rados/config/period.cc
|
// vim: ts=8 sw=2 smarttab ft=cpp
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2022 Red Hat, Inc.
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include "common/dout.h"
#include "common/errno.h"
#include "rgw_zone.h"
#include "driver/rados/config/store.h"
#include "impl.h"
namespace rgw::rados {
// period oids
constexpr std::string_view period_info_oid_prefix = "periods.";
constexpr std::string_view period_latest_epoch_info_oid = ".latest_epoch";
constexpr std::string_view period_staging_suffix = ":staging";
static std::string period_oid(std::string_view period_id, uint32_t epoch)
{
// omit the epoch for the staging period
if (period_id.ends_with(period_staging_suffix)) {
return string_cat_reserve(period_info_oid_prefix, period_id);
}
return fmt::format("{}{}.{}", period_info_oid_prefix, period_id, epoch);
}
static std::string latest_epoch_oid(const ceph::common::ConfigProxy& conf,
std::string_view period_id)
{
return string_cat_reserve(
period_info_oid_prefix, period_id,
name_or_default(conf->rgw_period_latest_epoch_info_oid,
period_latest_epoch_info_oid));
}
static int read_latest_epoch(const DoutPrefixProvider* dpp, optional_yield y,
ConfigImpl* impl, std::string_view period_id,
uint32_t& epoch, RGWObjVersionTracker* objv)
{
const auto& pool = impl->period_pool;
const auto latest_oid = latest_epoch_oid(dpp->get_cct()->_conf, period_id);
RGWPeriodLatestEpochInfo latest;
int r = impl->read(dpp, y, pool, latest_oid, latest, objv);
if (r >= 0) {
epoch = latest.epoch;
}
return r;
}
static int write_latest_epoch(const DoutPrefixProvider* dpp, optional_yield y,
ConfigImpl* impl, bool exclusive,
std::string_view period_id, uint32_t epoch,
RGWObjVersionTracker* objv)
{
const auto& pool = impl->period_pool;
const auto latest_oid = latest_epoch_oid(dpp->get_cct()->_conf, period_id);
const auto create = exclusive ? Create::MustNotExist : Create::MayExist;
RGWPeriodLatestEpochInfo latest{epoch};
return impl->write(dpp, y, pool, latest_oid, create, latest, objv);
}
static int delete_latest_epoch(const DoutPrefixProvider* dpp, optional_yield y,
ConfigImpl* impl, std::string_view period_id,
RGWObjVersionTracker* objv)
{
const auto& pool = impl->period_pool;
const auto latest_oid = latest_epoch_oid(dpp->get_cct()->_conf, period_id);
return impl->remove(dpp, y, pool, latest_oid, objv);
}
static int update_latest_epoch(const DoutPrefixProvider* dpp, optional_yield y,
ConfigImpl* impl, std::string_view period_id,
uint32_t epoch)
{
static constexpr int MAX_RETRIES = 20;
for (int i = 0; i < MAX_RETRIES; i++) {
uint32_t existing_epoch = 0;
RGWObjVersionTracker objv;
bool exclusive = false;
// read existing epoch
int r = read_latest_epoch(dpp, y, impl, period_id, existing_epoch, &objv);
if (r == -ENOENT) {
// use an exclusive create to set the epoch atomically
exclusive = true;
objv.generate_new_write_ver(dpp->get_cct());
ldpp_dout(dpp, 20) << "creating initial latest_epoch=" << epoch
<< " for period=" << period_id << dendl;
} else if (r < 0) {
ldpp_dout(dpp, 0) << "ERROR: failed to read latest_epoch" << dendl;
return r;
} else if (epoch <= existing_epoch) {
r = -EEXIST; // fail with EEXIST if epoch is not newer
ldpp_dout(dpp, 10) << "found existing latest_epoch " << existing_epoch
<< " >= given epoch " << epoch << ", returning r=" << r << dendl;
return r;
} else {
ldpp_dout(dpp, 20) << "updating latest_epoch from " << existing_epoch
<< " -> " << epoch << " on period=" << period_id << dendl;
}
r = write_latest_epoch(dpp, y, impl, exclusive, period_id, epoch, &objv);
if (r == -EEXIST) {
continue; // exclusive create raced with another update, retry
} else if (r == -ECANCELED) {
continue; // write raced with a conflicting version, retry
}
if (r < 0) {
ldpp_dout(dpp, 0) << "ERROR: failed to write latest_epoch" << dendl;
return r;
}
return 0; // return success
}
return -ECANCELED; // fail after max retries
}
int RadosConfigStore::create_period(const DoutPrefixProvider* dpp,
optional_yield y, bool exclusive,
const RGWPeriod& info)
{
if (info.get_id().empty()) {
ldpp_dout(dpp, 0) << "period cannot have an empty id" << dendl;
return -EINVAL;
}
if (info.get_epoch() == 0) {
ldpp_dout(dpp, 0) << "period cannot have an empty epoch" << dendl;
return -EINVAL;
}
const auto& pool = impl->period_pool;
const auto info_oid = period_oid(info.get_id(), info.get_epoch());
const auto create = exclusive ? Create::MustNotExist : Create::MayExist;
RGWObjVersionTracker objv;
objv.generate_new_write_ver(dpp->get_cct());
int r = impl->write(dpp, y, pool, info_oid, create, info, &objv);
if (r < 0) {
return r;
}
(void) update_latest_epoch(dpp, y, impl.get(), info.get_id(), info.get_epoch());
return 0;
}
int RadosConfigStore::read_period(const DoutPrefixProvider* dpp,
optional_yield y,
std::string_view period_id,
std::optional<uint32_t> epoch,
RGWPeriod& info)
{
int r = 0;
if (!epoch) {
epoch = 0;
r = read_latest_epoch(dpp, y, impl.get(), period_id, *epoch, nullptr);
if (r < 0) {
return r;
}
}
const auto& pool = impl->period_pool;
const auto info_oid = period_oid(period_id, *epoch);
return impl->read(dpp, y, pool, info_oid, info, nullptr);
}
int RadosConfigStore::delete_period(const DoutPrefixProvider* dpp,
optional_yield y,
std::string_view period_id)
{
const auto& pool = impl->period_pool;
// read the latest_epoch
uint32_t latest_epoch = 0;
RGWObjVersionTracker latest_objv;
int r = read_latest_epoch(dpp, y, impl.get(), period_id,
latest_epoch, &latest_objv);
if (r < 0 && r != -ENOENT) { // just delete epoch=0 on ENOENT
ldpp_dout(dpp, 0) << "failed to read latest epoch for period "
<< period_id << ": " << cpp_strerror(r) << dendl;
return r;
}
for (uint32_t epoch = 0; epoch <= latest_epoch; epoch++) {
const auto info_oid = period_oid(period_id, epoch);
r = impl->remove(dpp, y, pool, info_oid, nullptr);
if (r < 0 && r != -ENOENT) { // ignore ENOENT
ldpp_dout(dpp, 0) << "failed to delete period " << info_oid
<< ": " << cpp_strerror(r) << dendl;
return r;
}
}
return delete_latest_epoch(dpp, y, impl.get(), period_id, &latest_objv);
}
int RadosConfigStore::list_period_ids(const DoutPrefixProvider* dpp,
optional_yield y,
const std::string& marker,
std::span<std::string> entries,
sal::ListResult<std::string>& result)
{
const auto& pool = impl->period_pool;
constexpr auto prefix = [] (std::string oid) -> std::string {
if (!oid.starts_with(period_info_oid_prefix)) {
return {};
}
if (!oid.ends_with(period_latest_epoch_info_oid)) {
return {};
}
// trim the prefix and suffix
const std::size_t count = oid.size() -
period_info_oid_prefix.size() -
period_latest_epoch_info_oid.size();
return oid.substr(period_info_oid_prefix.size(), count);
};
return impl->list(dpp, y, pool, marker, prefix, entries, result);
}
} // namespace rgw::rados
| 8,201 | 34.506494 | 82 |
cc
|
null |
ceph-main/src/rgw/driver/rados/config/period_config.cc
|
// vim: ts=8 sw=2 smarttab ft=cpp
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2022 Red Hat, Inc.
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include "rgw_zone.h"
#include "driver/rados/config/store.h"
#include "impl.h"
namespace rgw::rados {
// period config oids
constexpr std::string_view period_config_prefix = "period_config.";
constexpr std::string_view period_config_realm_default = "default";
std::string period_config_oid(std::string_view realm_id)
{
if (realm_id.empty()) {
realm_id = period_config_realm_default;
}
return string_cat_reserve(period_config_prefix, realm_id);
}
int RadosConfigStore::read_period_config(const DoutPrefixProvider* dpp,
optional_yield y,
std::string_view realm_id,
RGWPeriodConfig& info)
{
const auto& pool = impl->period_pool;
const auto oid = period_config_oid(realm_id);
return impl->read(dpp, y, pool, oid, info, nullptr);
}
int RadosConfigStore::write_period_config(const DoutPrefixProvider* dpp,
optional_yield y, bool exclusive,
std::string_view realm_id,
const RGWPeriodConfig& info)
{
const auto& pool = impl->period_pool;
const auto oid = period_config_oid(realm_id);
const auto create = exclusive ? Create::MustNotExist : Create::MayExist;
return impl->write(dpp, y, pool, oid, create, info, nullptr);
}
} // namespace rgw::rados
| 1,760 | 30.446429 | 75 |
cc
|
null |
ceph-main/src/rgw/driver/rados/config/realm.cc
|
// vim: ts=8 sw=2 smarttab ft=cpp
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2022 Red Hat, Inc.
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include "common/dout.h"
#include "common/errno.h"
#include "rgw_realm_watcher.h"
#include "rgw_zone.h"
#include "driver/rados/config/store.h"
#include "impl.h"
namespace rgw::rados {
// realm oids
constexpr std::string_view realm_names_oid_prefix = "realms_names.";
constexpr std::string_view realm_info_oid_prefix = "realms.";
constexpr std::string_view realm_control_oid_suffix = ".control";
constexpr std::string_view default_realm_info_oid = "default.realm";
static std::string realm_info_oid(std::string_view realm_id)
{
return string_cat_reserve(realm_info_oid_prefix, realm_id);
}
static std::string realm_name_oid(std::string_view realm_id)
{
return string_cat_reserve(realm_names_oid_prefix, realm_id);
}
static std::string realm_control_oid(std::string_view realm_id)
{
return string_cat_reserve(realm_info_oid_prefix, realm_id,
realm_control_oid_suffix);
}
static std::string default_realm_oid(const ceph::common::ConfigProxy& conf)
{
return std::string{name_or_default(conf->rgw_default_realm_info_oid,
default_realm_info_oid)};
}
int RadosConfigStore::write_default_realm_id(const DoutPrefixProvider* dpp,
optional_yield y, bool exclusive,
std::string_view realm_id)
{
const auto& pool = impl->realm_pool;
const auto oid = default_realm_oid(dpp->get_cct()->_conf);
const auto create = exclusive ? Create::MustNotExist : Create::MayExist;
RGWDefaultSystemMetaObjInfo default_info;
default_info.default_id = realm_id;
return impl->write(dpp, y, pool, oid, create, default_info, nullptr);
}
int RadosConfigStore::read_default_realm_id(const DoutPrefixProvider* dpp,
optional_yield y,
std::string& realm_id)
{
const auto& pool = impl->realm_pool;
const auto oid = default_realm_oid(dpp->get_cct()->_conf);
RGWDefaultSystemMetaObjInfo default_info;
int r = impl->read(dpp, y, pool, oid, default_info, nullptr);
if (r >= 0) {
realm_id = default_info.default_id;
}
return r;
}
int RadosConfigStore::delete_default_realm_id(const DoutPrefixProvider* dpp,
optional_yield y)
{
const auto& pool = impl->realm_pool;
const auto oid = default_realm_oid(dpp->get_cct()->_conf);
return impl->remove(dpp, y, pool, oid, nullptr);
}
class RadosRealmWriter : public sal::RealmWriter {
ConfigImpl* impl;
RGWObjVersionTracker objv;
std::string realm_id;
std::string realm_name;
public:
RadosRealmWriter(ConfigImpl* impl, RGWObjVersionTracker objv,
std::string_view realm_id, std::string_view realm_name)
: impl(impl), objv(std::move(objv)),
realm_id(realm_id), realm_name(realm_name)
{
}
int write(const DoutPrefixProvider* dpp, optional_yield y,
const RGWRealm& info) override
{
if (realm_id != info.get_id() || realm_name != info.get_name()) {
return -EINVAL; // can't modify realm id or name directly
}
const auto& pool = impl->realm_pool;
const auto info_oid = realm_info_oid(info.get_id());
return impl->write(dpp, y, pool, info_oid, Create::MustExist, info, &objv);
}
int rename(const DoutPrefixProvider* dpp, optional_yield y,
RGWRealm& info, std::string_view new_name) override
{
if (realm_id != info.get_id() || realm_name != info.get_name()) {
return -EINVAL; // can't modify realm id or name directly
}
if (new_name.empty()) {
ldpp_dout(dpp, 0) << "realm cannot have an empty name" << dendl;
return -EINVAL;
}
const auto& pool = impl->realm_pool;
const auto name = RGWNameToId{info.get_id()};
const auto info_oid = realm_info_oid(info.get_id());
const auto old_oid = realm_name_oid(info.get_name());
const auto new_oid = realm_name_oid(new_name);
// link the new name
RGWObjVersionTracker new_objv;
new_objv.generate_new_write_ver(dpp->get_cct());
int r = impl->write(dpp, y, pool, new_oid, Create::MustNotExist,
name, &new_objv);
if (r < 0) {
return r;
}
// write the info with updated name
info.set_name(std::string{new_name});
r = impl->write(dpp, y, pool, info_oid, Create::MustExist, info, &objv);
if (r < 0) {
// on failure, unlink the new name
(void) impl->remove(dpp, y, pool, new_oid, &new_objv);
return r;
}
// unlink the old name
(void) impl->remove(dpp, y, pool, old_oid, nullptr);
realm_name = new_name;
return 0;
}
int remove(const DoutPrefixProvider* dpp, optional_yield y) override
{
const auto& pool = impl->realm_pool;
const auto info_oid = realm_info_oid(realm_id);
int r = impl->remove(dpp, y, pool, info_oid, &objv);
if (r < 0) {
return r;
}
const auto name_oid = realm_name_oid(realm_name);
(void) impl->remove(dpp, y, pool, name_oid, nullptr);
const auto control_oid = realm_control_oid(realm_id);
(void) impl->remove(dpp, y, pool, control_oid, nullptr);
return 0;
}
}; // RadosRealmWriter
int RadosConfigStore::create_realm(const DoutPrefixProvider* dpp,
optional_yield y, bool exclusive,
const RGWRealm& info,
std::unique_ptr<sal::RealmWriter>* writer)
{
if (info.get_id().empty()) {
ldpp_dout(dpp, 0) << "realm cannot have an empty id" << dendl;
return -EINVAL;
}
if (info.get_name().empty()) {
ldpp_dout(dpp, 0) << "realm cannot have an empty name" << dendl;
return -EINVAL;
}
const auto& pool = impl->realm_pool;
const auto create = exclusive ? Create::MustNotExist : Create::MayExist;
// write the realm info
const auto info_oid = realm_info_oid(info.get_id());
RGWObjVersionTracker objv;
objv.generate_new_write_ver(dpp->get_cct());
int r = impl->write(dpp, y, pool, info_oid, create, info, &objv);
if (r < 0) {
return r;
}
// write the realm name
const auto name_oid = realm_name_oid(info.get_name());
const auto name = RGWNameToId{info.get_id()};
RGWObjVersionTracker name_objv;
name_objv.generate_new_write_ver(dpp->get_cct());
r = impl->write(dpp, y, pool, name_oid, create, name, &name_objv);
if (r < 0) {
(void) impl->remove(dpp, y, pool, info_oid, &objv);
return r;
}
// create control object for watch/notify
const auto control_oid = realm_control_oid(info.get_id());
bufferlist empty_bl;
r = impl->write(dpp, y, pool, control_oid, Create::MayExist,
empty_bl, nullptr);
if (r < 0) {
(void) impl->remove(dpp, y, pool, name_oid, &name_objv);
(void) impl->remove(dpp, y, pool, info_oid, &objv);
return r;
}
if (writer) {
*writer = std::make_unique<RadosRealmWriter>(
impl.get(), std::move(objv), info.get_id(), info.get_name());
}
return 0;
}
int RadosConfigStore::read_realm_by_id(const DoutPrefixProvider* dpp,
optional_yield y,
std::string_view realm_id,
RGWRealm& info,
std::unique_ptr<sal::RealmWriter>* writer)
{
const auto& pool = impl->realm_pool;
const auto info_oid = realm_info_oid(realm_id);
RGWObjVersionTracker objv;
int r = impl->read(dpp, y, pool, info_oid, info, &objv);
if (r < 0) {
return r;
}
if (writer) {
*writer = std::make_unique<RadosRealmWriter>(
impl.get(), std::move(objv), info.get_id(), info.get_name());
}
return 0;
}
int RadosConfigStore::read_realm_by_name(const DoutPrefixProvider* dpp,
optional_yield y,
std::string_view realm_name,
RGWRealm& info,
std::unique_ptr<sal::RealmWriter>* writer)
{
const auto& pool = impl->realm_pool;
// look up realm id by name
RGWNameToId name;
const auto name_oid = realm_name_oid(realm_name);
int r = impl->read(dpp, y, pool, name_oid, name, nullptr);
if (r < 0) {
return r;
}
const auto info_oid = realm_info_oid(name.obj_id);
RGWObjVersionTracker objv;
r = impl->read(dpp, y, pool, info_oid, info, &objv);
if (r < 0) {
return r;
}
if (writer) {
*writer = std::make_unique<RadosRealmWriter>(
impl.get(), std::move(objv), info.get_id(), info.get_name());
}
return 0;
}
int RadosConfigStore::read_default_realm(const DoutPrefixProvider* dpp,
optional_yield y,
RGWRealm& info,
std::unique_ptr<sal::RealmWriter>* writer)
{
const auto& pool = impl->realm_pool;
// read default realm id
RGWDefaultSystemMetaObjInfo default_info;
const auto default_oid = default_realm_oid(dpp->get_cct()->_conf);
int r = impl->read(dpp, y, pool, default_oid, default_info, nullptr);
if (r < 0) {
return r;
}
const auto info_oid = realm_info_oid(default_info.default_id);
RGWObjVersionTracker objv;
r = impl->read(dpp, y, pool, info_oid, info, &objv);
if (r < 0) {
return r;
}
if (writer) {
*writer = std::make_unique<RadosRealmWriter>(
impl.get(), std::move(objv), info.get_id(), info.get_name());
}
return 0;
}
int RadosConfigStore::read_realm_id(const DoutPrefixProvider* dpp,
optional_yield y,
std::string_view realm_name,
std::string& realm_id)
{
const auto& pool = impl->realm_pool;
RGWNameToId name;
// look up realm id by name
const auto name_oid = realm_name_oid(realm_name);
int r = impl->read(dpp, y, pool, name_oid, name, nullptr);
if (r < 0) {
return r;
}
realm_id = std::move(name.obj_id);
return 0;
}
int RadosConfigStore::realm_notify_new_period(const DoutPrefixProvider* dpp,
optional_yield y,
const RGWPeriod& period)
{
const auto& pool = impl->realm_pool;
const auto control_oid = realm_control_oid(period.get_realm());
bufferlist bl;
using ceph::encode;
// push the period to dependent zonegroups/zones
encode(RGWRealmNotify::ZonesNeedPeriod, bl);
encode(period, bl);
// reload the gateway with the new period
encode(RGWRealmNotify::Reload, bl);
constexpr uint64_t timeout_ms = 0;
return impl->notify(dpp, y, pool, control_oid, bl, timeout_ms);
}
int RadosConfigStore::list_realm_names(const DoutPrefixProvider* dpp,
optional_yield y,
const std::string& marker,
std::span<std::string> entries,
sal::ListResult<std::string>& result)
{
const auto& pool = impl->realm_pool;
constexpr auto prefix = [] (std::string oid) -> std::string {
if (!oid.starts_with(realm_names_oid_prefix)) {
return {};
}
return oid.substr(realm_names_oid_prefix.size());
};
return impl->list(dpp, y, pool, marker, prefix, entries, result);
}
} // namespace rgw::rados
| 11,737 | 31.158904 | 83 |
cc
|
null |
ceph-main/src/rgw/driver/rados/config/store.cc
|
// vim: ts=8 sw=2 smarttab ft=cpp
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2022 Red Hat, Inc.
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include "include/rados/librados.hpp"
#include "common/errno.h"
#include "impl.h"
#include "store.h"
namespace rgw::rados {
RadosConfigStore::RadosConfigStore(std::unique_ptr<ConfigImpl> impl)
: impl(std::move(impl))
{
}
RadosConfigStore::~RadosConfigStore() = default;
auto create_config_store(const DoutPrefixProvider* dpp)
-> std::unique_ptr<RadosConfigStore>
{
auto impl = std::make_unique<ConfigImpl>(dpp->get_cct()->_conf);
// initialize a Rados client
int r = impl->rados.init_with_context(dpp->get_cct());
if (r < 0) {
ldpp_dout(dpp, -1) << "Rados client initialization failed with "
<< cpp_strerror(-r) << dendl;
return nullptr;
}
r = impl->rados.connect();
if (r < 0) {
ldpp_dout(dpp, -1) << "Rados client connection failed with "
<< cpp_strerror(-r) << dendl;
return nullptr;
}
return std::make_unique<RadosConfigStore>(std::move(impl));
}
} // namespace rgw::rados
| 1,282 | 23.207547 | 68 |
cc
|
null |
ceph-main/src/rgw/driver/rados/config/store.h
|
// vim: ts=8 sw=2 smarttab ft=cpp
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2022 Red Hat, Inc.
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#pragma once
#include <list>
#include <memory>
#include <string>
#include "rgw_common.h"
#include "rgw_sal_config.h"
class DoutPrefixProvider;
class optional_yield;
namespace rgw::rados {
struct ConfigImpl;
class RadosConfigStore : public sal::ConfigStore {
public:
explicit RadosConfigStore(std::unique_ptr<ConfigImpl> impl);
virtual ~RadosConfigStore() override;
// Realm
virtual int write_default_realm_id(const DoutPrefixProvider* dpp,
optional_yield y, bool exclusive,
std::string_view realm_id) override;
virtual int read_default_realm_id(const DoutPrefixProvider* dpp,
optional_yield y,
std::string& realm_id) override;
virtual int delete_default_realm_id(const DoutPrefixProvider* dpp,
optional_yield y) override;
virtual int create_realm(const DoutPrefixProvider* dpp,
optional_yield y, bool exclusive,
const RGWRealm& info,
std::unique_ptr<sal::RealmWriter>* writer) override;
virtual int read_realm_by_id(const DoutPrefixProvider* dpp,
optional_yield y,
std::string_view realm_id,
RGWRealm& info,
std::unique_ptr<sal::RealmWriter>* writer) override;
virtual int read_realm_by_name(const DoutPrefixProvider* dpp,
optional_yield y,
std::string_view realm_name,
RGWRealm& info,
std::unique_ptr<sal::RealmWriter>* writer) override;
virtual int read_default_realm(const DoutPrefixProvider* dpp,
optional_yield y,
RGWRealm& info,
std::unique_ptr<sal::RealmWriter>* writer) override;
virtual int read_realm_id(const DoutPrefixProvider* dpp,
optional_yield y, std::string_view realm_name,
std::string& realm_id) override;
virtual int realm_notify_new_period(const DoutPrefixProvider* dpp,
optional_yield y,
const RGWPeriod& period) override;
virtual int list_realm_names(const DoutPrefixProvider* dpp,
optional_yield y, const std::string& marker,
std::span<std::string> entries,
sal::ListResult<std::string>& result) override;
// Period
virtual int create_period(const DoutPrefixProvider* dpp,
optional_yield y, bool exclusive,
const RGWPeriod& info) override;
virtual int read_period(const DoutPrefixProvider* dpp,
optional_yield y, std::string_view period_id,
std::optional<uint32_t> epoch, RGWPeriod& info) override;
virtual int delete_period(const DoutPrefixProvider* dpp,
optional_yield y,
std::string_view period_id) override;
virtual int list_period_ids(const DoutPrefixProvider* dpp,
optional_yield y, const std::string& marker,
std::span<std::string> entries,
sal::ListResult<std::string>& result) override;
// ZoneGroup
virtual int write_default_zonegroup_id(const DoutPrefixProvider* dpp,
optional_yield y, bool exclusive,
std::string_view realm_id,
std::string_view zonegroup_id) override;
virtual int read_default_zonegroup_id(const DoutPrefixProvider* dpp,
optional_yield y,
std::string_view realm_id,
std::string& zonegroup_id) override;
virtual int delete_default_zonegroup_id(const DoutPrefixProvider* dpp,
optional_yield y,
std::string_view realm_id) override;
virtual int create_zonegroup(const DoutPrefixProvider* dpp,
optional_yield y, bool exclusive,
const RGWZoneGroup& info,
std::unique_ptr<sal::ZoneGroupWriter>* writer) override;
virtual int read_zonegroup_by_id(const DoutPrefixProvider* dpp,
optional_yield y,
std::string_view zonegroup_id,
RGWZoneGroup& info,
std::unique_ptr<sal::ZoneGroupWriter>* writer) override;
virtual int read_zonegroup_by_name(const DoutPrefixProvider* dpp,
optional_yield y,
std::string_view zonegroup_name,
RGWZoneGroup& info,
std::unique_ptr<sal::ZoneGroupWriter>* writer) override;
virtual int read_default_zonegroup(const DoutPrefixProvider* dpp,
optional_yield y,
std::string_view realm_id,
RGWZoneGroup& info,
std::unique_ptr<sal::ZoneGroupWriter>* writer) override;
virtual int list_zonegroup_names(const DoutPrefixProvider* dpp,
optional_yield y, const std::string& marker,
std::span<std::string> entries,
sal::ListResult<std::string>& result) override;
// Zone
virtual int write_default_zone_id(const DoutPrefixProvider* dpp,
optional_yield y, bool exclusive,
std::string_view realm_id,
std::string_view zone_id) override;
virtual int read_default_zone_id(const DoutPrefixProvider* dpp,
optional_yield y,
std::string_view realm_id,
std::string& zone_id) override;
virtual int delete_default_zone_id(const DoutPrefixProvider* dpp,
optional_yield y,
std::string_view realm_id) override;
virtual int create_zone(const DoutPrefixProvider* dpp,
optional_yield y, bool exclusive,
const RGWZoneParams& info,
std::unique_ptr<sal::ZoneWriter>* writer) override;
virtual int read_zone_by_id(const DoutPrefixProvider* dpp,
optional_yield y,
std::string_view zone_id,
RGWZoneParams& info,
std::unique_ptr<sal::ZoneWriter>* writer) override;
virtual int read_zone_by_name(const DoutPrefixProvider* dpp,
optional_yield y,
std::string_view zone_name,
RGWZoneParams& info,
std::unique_ptr<sal::ZoneWriter>* writer) override;
virtual int read_default_zone(const DoutPrefixProvider* dpp,
optional_yield y,
std::string_view realm_id,
RGWZoneParams& info,
std::unique_ptr<sal::ZoneWriter>* writer) override;
virtual int list_zone_names(const DoutPrefixProvider* dpp,
optional_yield y, const std::string& marker,
std::span<std::string> entries,
sal::ListResult<std::string>& result) override;
// PeriodConfig
virtual int read_period_config(const DoutPrefixProvider* dpp,
optional_yield y,
std::string_view realm_id,
RGWPeriodConfig& info) override;
virtual int write_period_config(const DoutPrefixProvider* dpp,
optional_yield y, bool exclusive,
std::string_view realm_id,
const RGWPeriodConfig& info) override;
private:
std::unique_ptr<ConfigImpl> impl;
}; // RadosConfigStore
/// RadosConfigStore factory function
auto create_config_store(const DoutPrefixProvider* dpp)
-> std::unique_ptr<RadosConfigStore>;
} // namespace rgw::rados
| 9,218 | 49.377049 | 93 |
h
|
null |
ceph-main/src/rgw/driver/rados/config/zone.cc
|
// vim: ts=8 sw=2 smarttab ft=cpp
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2022 Red Hat, Inc.
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include "common/dout.h"
#include "common/errno.h"
#include "rgw_zone.h"
#include "driver/rados/config/store.h"
#include "impl.h"
namespace rgw::rados {
// zone oids
constexpr std::string_view zone_info_oid_prefix = "zone_info.";
constexpr std::string_view zone_names_oid_prefix = "zone_names.";
std::string zone_info_oid(std::string_view zone_id)
{
return string_cat_reserve(zone_info_oid_prefix, zone_id);
}
std::string zone_name_oid(std::string_view zone_id)
{
return string_cat_reserve(zone_names_oid_prefix, zone_id);
}
std::string default_zone_oid(const ceph::common::ConfigProxy& conf,
std::string_view realm_id)
{
return fmt::format("{}.{}", conf->rgw_default_zone_info_oid, realm_id);
}
int RadosConfigStore::write_default_zone_id(const DoutPrefixProvider* dpp,
optional_yield y,
bool exclusive,
std::string_view realm_id,
std::string_view zone_id)
{
const auto& pool = impl->zone_pool;
const auto default_oid = default_zone_oid(dpp->get_cct()->_conf, realm_id);
const auto create = exclusive ? Create::MustNotExist : Create::MayExist;
RGWDefaultSystemMetaObjInfo default_info;
default_info.default_id = zone_id;
return impl->write(dpp, y, pool, default_oid, create, default_info, nullptr);
}
int RadosConfigStore::read_default_zone_id(const DoutPrefixProvider* dpp,
optional_yield y,
std::string_view realm_id,
std::string& zone_id)
{
const auto& pool = impl->zone_pool;
const auto default_oid = default_zone_oid(dpp->get_cct()->_conf, realm_id);
RGWDefaultSystemMetaObjInfo default_info;
int r = impl->read(dpp, y, pool, default_oid, default_info, nullptr);
if (r >= 0) {
zone_id = default_info.default_id;
}
return r;
}
int RadosConfigStore::delete_default_zone_id(const DoutPrefixProvider* dpp,
optional_yield y,
std::string_view realm_id)
{
const auto& pool = impl->zone_pool;
const auto default_oid = default_zone_oid(dpp->get_cct()->_conf, realm_id);
return impl->remove(dpp, y, pool, default_oid, nullptr);
}
class RadosZoneWriter : public sal::ZoneWriter {
ConfigImpl* impl;
RGWObjVersionTracker objv;
std::string zone_id;
std::string zone_name;
public:
RadosZoneWriter(ConfigImpl* impl, RGWObjVersionTracker objv,
std::string_view zone_id, std::string_view zone_name)
: impl(impl), objv(std::move(objv)),
zone_id(zone_id), zone_name(zone_name)
{
}
int write(const DoutPrefixProvider* dpp, optional_yield y,
const RGWZoneParams& info) override
{
if (zone_id != info.get_id() || zone_name != info.get_name()) {
return -EINVAL; // can't modify zone id or name directly
}
const auto& pool = impl->zone_pool;
const auto info_oid = zone_info_oid(info.get_id());
return impl->write(dpp, y, pool, info_oid, Create::MustExist, info, &objv);
}
int rename(const DoutPrefixProvider* dpp, optional_yield y,
RGWZoneParams& info, std::string_view new_name) override
{
if (zone_id != info.get_id() || zone_name != info.get_name()) {
return -EINVAL; // can't modify zone id or name directly
}
if (new_name.empty()) {
ldpp_dout(dpp, 0) << "zone cannot have an empty name" << dendl;
return -EINVAL;
}
const auto& pool = impl->zone_pool;
const auto name = RGWNameToId{info.get_id()};
const auto info_oid = zone_info_oid(info.get_id());
const auto old_oid = zone_name_oid(info.get_name());
const auto new_oid = zone_name_oid(new_name);
// link the new name
RGWObjVersionTracker new_objv;
new_objv.generate_new_write_ver(dpp->get_cct());
int r = impl->write(dpp, y, pool, new_oid, Create::MustNotExist,
name, &new_objv);
if (r < 0) {
return r;
}
// write the info with updated name
info.set_name(std::string{new_name});
r = impl->write(dpp, y, pool, info_oid, Create::MustExist, info, &objv);
if (r < 0) {
// on failure, unlink the new name
(void) impl->remove(dpp, y, pool, new_oid, &new_objv);
return r;
}
// unlink the old name
(void) impl->remove(dpp, y, pool, old_oid, nullptr);
zone_name = new_name;
return 0;
}
int remove(const DoutPrefixProvider* dpp, optional_yield y) override
{
const auto& pool = impl->zone_pool;
const auto info_oid = zone_info_oid(zone_id);
int r = impl->remove(dpp, y, pool, info_oid, &objv);
if (r < 0) {
return r;
}
const auto name_oid = zone_name_oid(zone_name);
(void) impl->remove(dpp, y, pool, name_oid, nullptr);
return 0;
}
}; // RadosZoneWriter
int RadosConfigStore::create_zone(const DoutPrefixProvider* dpp,
optional_yield y, bool exclusive,
const RGWZoneParams& info,
std::unique_ptr<sal::ZoneWriter>* writer)
{
if (info.get_id().empty()) {
ldpp_dout(dpp, 0) << "zone cannot have an empty id" << dendl;
return -EINVAL;
}
if (info.get_name().empty()) {
ldpp_dout(dpp, 0) << "zone cannot have an empty name" << dendl;
return -EINVAL;
}
const auto& pool = impl->zone_pool;
const auto create = exclusive ? Create::MustNotExist : Create::MayExist;
// write the zone info
const auto info_oid = zone_info_oid(info.get_id());
RGWObjVersionTracker objv;
objv.generate_new_write_ver(dpp->get_cct());
int r = impl->write(dpp, y, pool, info_oid, create, info, &objv);
if (r < 0) {
return r;
}
// write the zone name
const auto name_oid = zone_name_oid(info.get_name());
const auto name = RGWNameToId{info.get_id()};
RGWObjVersionTracker name_objv;
name_objv.generate_new_write_ver(dpp->get_cct());
r = impl->write(dpp, y, pool, name_oid, create, name, &name_objv);
if (r < 0) {
(void) impl->remove(dpp, y, pool, info_oid, &objv);
return r;
}
if (writer) {
*writer = std::make_unique<RadosZoneWriter>(
impl.get(), std::move(objv), info.get_id(), info.get_name());
}
return 0;
}
int RadosConfigStore::read_zone_by_id(const DoutPrefixProvider* dpp,
optional_yield y,
std::string_view zone_id,
RGWZoneParams& info,
std::unique_ptr<sal::ZoneWriter>* writer)
{
const auto& pool = impl->zone_pool;
const auto info_oid = zone_info_oid(zone_id);
RGWObjVersionTracker objv;
int r = impl->read(dpp, y, pool, info_oid, info, &objv);
if (r < 0) {
return r;
}
if (writer) {
*writer = std::make_unique<RadosZoneWriter>(
impl.get(), std::move(objv), info.get_id(), info.get_name());
}
return 0;
}
int RadosConfigStore::read_zone_by_name(const DoutPrefixProvider* dpp,
optional_yield y,
std::string_view zone_name,
RGWZoneParams& info,
std::unique_ptr<sal::ZoneWriter>* writer)
{
const auto& pool = impl->zone_pool;
// look up zone id by name
const auto name_oid = zone_name_oid(zone_name);
RGWNameToId name;
int r = impl->read(dpp, y, pool, name_oid, name, nullptr);
if (r < 0) {
return r;
}
const auto info_oid = zone_info_oid(name.obj_id);
RGWObjVersionTracker objv;
r = impl->read(dpp, y, pool, info_oid, info, &objv);
if (r < 0) {
return r;
}
if (writer) {
*writer = std::make_unique<RadosZoneWriter>(
impl.get(), std::move(objv), info.get_id(), info.get_name());
}
return 0;
}
int RadosConfigStore::read_default_zone(const DoutPrefixProvider* dpp,
optional_yield y,
std::string_view realm_id,
RGWZoneParams& info,
std::unique_ptr<sal::ZoneWriter>* writer)
{
const auto& pool = impl->zone_pool;
// read default zone id
const auto default_oid = default_zone_oid(dpp->get_cct()->_conf, realm_id);
RGWDefaultSystemMetaObjInfo default_info;
int r = impl->read(dpp, y, pool, default_oid, default_info, nullptr);
if (r < 0) {
return r;
}
const auto info_oid = zone_info_oid(default_info.default_id);
RGWObjVersionTracker objv;
r = impl->read(dpp, y, pool, info_oid, info, &objv);
if (r < 0) {
return r;
}
if (writer) {
*writer = std::make_unique<RadosZoneWriter>(
impl.get(), std::move(objv), info.get_id(), info.get_name());
}
return 0;
}
int RadosConfigStore::list_zone_names(const DoutPrefixProvider* dpp,
optional_yield y,
const std::string& marker,
std::span<std::string> entries,
sal::ListResult<std::string>& result)
{
const auto& pool = impl->zone_pool;
constexpr auto prefix = [] (std::string oid) -> std::string {
if (!oid.starts_with(zone_names_oid_prefix)) {
return {};
}
return oid.substr(zone_names_oid_prefix.size());
};
return impl->list(dpp, y, pool, marker, prefix, entries, result);
}
} // namespace rgw::rados
| 9,991 | 30.923323 | 81 |
cc
|
null |
ceph-main/src/rgw/driver/rados/config/zonegroup.cc
|
// vim: ts=8 sw=2 smarttab ft=cpp
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2022 Red Hat, Inc.
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include "common/dout.h"
#include "common/errno.h"
#include "rgw_zone.h"
#include "driver/rados/config/store.h"
#include "impl.h"
namespace rgw::rados {
// zonegroup oids
constexpr std::string_view zonegroup_names_oid_prefix = "zonegroups_names.";
constexpr std::string_view zonegroup_info_oid_prefix = "zonegroup_info.";
constexpr std::string_view default_zonegroup_info_oid = "default.zonegroup";
static std::string zonegroup_info_oid(std::string_view zonegroup_id)
{
return string_cat_reserve(zonegroup_info_oid_prefix, zonegroup_id);
}
static std::string zonegroup_name_oid(std::string_view zonegroup_id)
{
return string_cat_reserve(zonegroup_names_oid_prefix, zonegroup_id);
}
static std::string default_zonegroup_oid(const ceph::common::ConfigProxy& conf,
std::string_view realm_id)
{
const auto prefix = name_or_default(conf->rgw_default_zonegroup_info_oid,
default_zonegroup_info_oid);
return fmt::format("{}.{}", prefix, realm_id);
}
int RadosConfigStore::write_default_zonegroup_id(const DoutPrefixProvider* dpp,
optional_yield y,
bool exclusive,
std::string_view realm_id,
std::string_view zonegroup_id)
{
const auto& pool = impl->zonegroup_pool;
const auto oid = default_zonegroup_oid(dpp->get_cct()->_conf, realm_id);
const auto create = exclusive ? Create::MustNotExist : Create::MayExist;
RGWDefaultSystemMetaObjInfo default_info;
default_info.default_id = zonegroup_id;
return impl->write(dpp, y, pool, oid, create, default_info, nullptr);
}
int RadosConfigStore::read_default_zonegroup_id(const DoutPrefixProvider* dpp,
optional_yield y,
std::string_view realm_id,
std::string& zonegroup_id)
{
const auto& pool = impl->zonegroup_pool;
const auto oid = default_zonegroup_oid(dpp->get_cct()->_conf, realm_id);
RGWDefaultSystemMetaObjInfo default_info;
int r = impl->read(dpp, y, pool, oid, default_info, nullptr);
if (r >= 0) {
zonegroup_id = default_info.default_id;
}
return r;
}
int RadosConfigStore::delete_default_zonegroup_id(const DoutPrefixProvider* dpp,
optional_yield y,
std::string_view realm_id)
{
const auto& pool = impl->zonegroup_pool;
const auto oid = default_zonegroup_oid(dpp->get_cct()->_conf, realm_id);
return impl->remove(dpp, y, pool, oid, nullptr);
}
class RadosZoneGroupWriter : public sal::ZoneGroupWriter {
ConfigImpl* impl;
RGWObjVersionTracker objv;
std::string zonegroup_id;
std::string zonegroup_name;
public:
RadosZoneGroupWriter(ConfigImpl* impl, RGWObjVersionTracker objv,
std::string_view zonegroup_id,
std::string_view zonegroup_name)
: impl(impl), objv(std::move(objv)),
zonegroup_id(zonegroup_id), zonegroup_name(zonegroup_name)
{
}
int write(const DoutPrefixProvider* dpp, optional_yield y,
const RGWZoneGroup& info) override
{
if (zonegroup_id != info.get_id() || zonegroup_name != info.get_name()) {
return -EINVAL; // can't modify zonegroup id or name directly
}
const auto& pool = impl->zonegroup_pool;
const auto info_oid = zonegroup_info_oid(info.get_id());
return impl->write(dpp, y, pool, info_oid, Create::MustExist, info, &objv);
}
int rename(const DoutPrefixProvider* dpp, optional_yield y,
RGWZoneGroup& info, std::string_view new_name) override
{
if (zonegroup_id != info.get_id() || zonegroup_name != info.get_name()) {
return -EINVAL; // can't modify zonegroup id or name directly
}
if (new_name.empty()) {
ldpp_dout(dpp, 0) << "zonegroup cannot have an empty name" << dendl;
return -EINVAL;
}
const auto& pool = impl->zonegroup_pool;
const auto name = RGWNameToId{info.get_id()};
const auto info_oid = zonegroup_info_oid(info.get_id());
const auto old_oid = zonegroup_name_oid(info.get_name());
const auto new_oid = zonegroup_name_oid(new_name);
// link the new name
RGWObjVersionTracker new_objv;
new_objv.generate_new_write_ver(dpp->get_cct());
int r = impl->write(dpp, y, pool, new_oid, Create::MustNotExist,
name, &new_objv);
if (r < 0) {
return r;
}
// write the info with updated name
info.set_name(std::string{new_name});
r = impl->write(dpp, y, pool, info_oid, Create::MustExist, info, &objv);
if (r < 0) {
// on failure, unlink the new name
(void) impl->remove(dpp, y, pool, new_oid, &new_objv);
return r;
}
// unlink the old name
(void) impl->remove(dpp, y, pool, old_oid, nullptr);
zonegroup_name = new_name;
return 0;
}
int remove(const DoutPrefixProvider* dpp, optional_yield y) override
{
const auto& pool = impl->zonegroup_pool;
const auto info_oid = zonegroup_info_oid(zonegroup_id);
int r = impl->remove(dpp, y, pool, info_oid, &objv);
if (r < 0) {
return r;
}
const auto name_oid = zonegroup_name_oid(zonegroup_name);
(void) impl->remove(dpp, y, pool, name_oid, nullptr);
return 0;
}
}; // RadosZoneGroupWriter
int RadosConfigStore::create_zonegroup(const DoutPrefixProvider* dpp,
optional_yield y, bool exclusive,
const RGWZoneGroup& info,
std::unique_ptr<sal::ZoneGroupWriter>* writer)
{
if (info.get_id().empty()) {
ldpp_dout(dpp, 0) << "zonegroup cannot have an empty id" << dendl;
return -EINVAL;
}
if (info.get_name().empty()) {
ldpp_dout(dpp, 0) << "zonegroup cannot have an empty name" << dendl;
return -EINVAL;
}
const auto& pool = impl->zonegroup_pool;
const auto create = exclusive ? Create::MustNotExist : Create::MayExist;
// write the zonegroup info
const auto info_oid = zonegroup_info_oid(info.get_id());
RGWObjVersionTracker objv;
objv.generate_new_write_ver(dpp->get_cct());
int r = impl->write(dpp, y, pool, info_oid, create, info, &objv);
if (r < 0) {
return r;
}
// write the zonegroup name
const auto name_oid = zonegroup_name_oid(info.get_name());
const auto name = RGWNameToId{info.get_id()};
RGWObjVersionTracker name_objv;
name_objv.generate_new_write_ver(dpp->get_cct());
r = impl->write(dpp, y, pool, name_oid, create, name, &name_objv);
if (r < 0) {
(void) impl->remove(dpp, y, pool, info_oid, &objv);
return r;
}
if (writer) {
*writer = std::make_unique<RadosZoneGroupWriter>(
impl.get(), std::move(objv), info.get_id(), info.get_name());
}
return 0;
}
int RadosConfigStore::read_zonegroup_by_id(const DoutPrefixProvider* dpp,
optional_yield y,
std::string_view zonegroup_id,
RGWZoneGroup& info,
std::unique_ptr<sal::ZoneGroupWriter>* writer)
{
const auto& pool = impl->zonegroup_pool;
const auto info_oid = zonegroup_info_oid(zonegroup_id);
RGWObjVersionTracker objv;
int r = impl->read(dpp, y, pool, info_oid, info, &objv);
if (r < 0) {
return r;
}
if (writer) {
*writer = std::make_unique<RadosZoneGroupWriter>(
impl.get(), std::move(objv), info.get_id(), info.get_name());
}
return 0;
}
int RadosConfigStore::read_zonegroup_by_name(const DoutPrefixProvider* dpp,
optional_yield y,
std::string_view zonegroup_name,
RGWZoneGroup& info,
std::unique_ptr<sal::ZoneGroupWriter>* writer)
{
const auto& pool = impl->zonegroup_pool;
// look up zonegroup id by name
RGWNameToId name;
const auto name_oid = zonegroup_name_oid(zonegroup_name);
int r = impl->read(dpp, y, pool, name_oid, name, nullptr);
if (r < 0) {
return r;
}
const auto info_oid = zonegroup_info_oid(name.obj_id);
RGWObjVersionTracker objv;
r = impl->read(dpp, y, pool, info_oid, info, &objv);
if (r < 0) {
return r;
}
if (writer) {
*writer = std::make_unique<RadosZoneGroupWriter>(
impl.get(), std::move(objv), info.get_id(), info.get_name());
}
return 0;
}
int RadosConfigStore::read_default_zonegroup(const DoutPrefixProvider* dpp,
optional_yield y,
std::string_view realm_id,
RGWZoneGroup& info,
std::unique_ptr<sal::ZoneGroupWriter>* writer)
{
const auto& pool = impl->zonegroup_pool;
// read default zonegroup id
RGWDefaultSystemMetaObjInfo default_info;
const auto default_oid = default_zonegroup_oid(dpp->get_cct()->_conf, realm_id);
int r = impl->read(dpp, y, pool, default_oid, default_info, nullptr);
if (r < 0) {
return r;
}
const auto info_oid = zonegroup_info_oid(default_info.default_id);
RGWObjVersionTracker objv;
r = impl->read(dpp, y, pool, info_oid, info, &objv);
if (r < 0) {
return r;
}
if (writer) {
*writer = std::make_unique<RadosZoneGroupWriter>(
impl.get(), std::move(objv), info.get_id(), info.get_name());
}
return 0;
}
int RadosConfigStore::list_zonegroup_names(const DoutPrefixProvider* dpp,
optional_yield y,
const std::string& marker,
std::span<std::string> entries,
sal::ListResult<std::string>& result)
{
const auto& pool = impl->zonegroup_pool;
constexpr auto prefix = [] (std::string oid) -> std::string {
if (!oid.starts_with(zonegroup_names_oid_prefix)) {
return {};
}
return oid.substr(zonegroup_names_oid_prefix.size());
};
return impl->list(dpp, y, pool, marker, prefix, entries, result);
}
} // namespace rgw::rados
| 10,804 | 33.193038 | 91 |
cc
|
null |
ceph-main/src/rgw/jwt-cpp/base.h
|
#pragma once
#include <string>
#include <array>
namespace jwt {
namespace alphabet {
struct base64 {
static const std::array<char, 64>& data() {
static std::array<char, 64> data = {
{'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P',
'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', 'a', 'b', 'c', 'd', 'e', 'f',
'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v',
'w', 'x', 'y', 'z', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '+', '/'}};
return data;
};
static const std::string& fill() {
static std::string fill = "=";
return fill;
}
};
struct base64url {
static const std::array<char, 64>& data() {
static std::array<char, 64> data = {
{'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P',
'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', 'a', 'b', 'c', 'd', 'e', 'f',
'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v',
'w', 'x', 'y', 'z', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '-', '_'}};
return data;
};
static const std::string& fill() {
static std::string fill = "%3d";
return fill;
}
};
}
class base {
public:
template<typename T>
static std::string encode(const std::string& bin) {
return encode(bin, T::data(), T::fill());
}
template<typename T>
static std::string decode(const std::string& base) {
return decode(base, T::data(), T::fill());
}
private:
static std::string encode(const std::string& bin, const std::array<char, 64>& alphabet, const std::string& fill) {
size_t size = bin.size();
std::string res;
// clear incomplete bytes
size_t fast_size = size - size % 3;
for (size_t i = 0; i < fast_size;) {
uint32_t octet_a = (unsigned char)bin[i++];
uint32_t octet_b = (unsigned char)bin[i++];
uint32_t octet_c = (unsigned char)bin[i++];
uint32_t triple = (octet_a << 0x10) + (octet_b << 0x08) + octet_c;
res += alphabet[(triple >> 3 * 6) & 0x3F];
res += alphabet[(triple >> 2 * 6) & 0x3F];
res += alphabet[(triple >> 1 * 6) & 0x3F];
res += alphabet[(triple >> 0 * 6) & 0x3F];
}
if (fast_size == size)
return res;
size_t mod = size % 3;
uint32_t octet_a = fast_size < size ? (unsigned char)bin[fast_size++] : 0;
uint32_t octet_b = fast_size < size ? (unsigned char)bin[fast_size++] : 0;
uint32_t octet_c = fast_size < size ? (unsigned char)bin[fast_size++] : 0;
uint32_t triple = (octet_a << 0x10) + (octet_b << 0x08) + octet_c;
switch (mod) {
case 1:
res += alphabet[(triple >> 3 * 6) & 0x3F];
res += alphabet[(triple >> 2 * 6) & 0x3F];
res += fill;
res += fill;
break;
case 2:
res += alphabet[(triple >> 3 * 6) & 0x3F];
res += alphabet[(triple >> 2 * 6) & 0x3F];
res += alphabet[(triple >> 1 * 6) & 0x3F];
res += fill;
break;
default:
break;
}
return res;
}
static std::string decode(const std::string& base, const std::array<char, 64>& alphabet, const std::string& fill) {
size_t size = base.size();
size_t fill_cnt = 0;
while (size > fill.size()) {
if (base.substr(size - fill.size(), fill.size()) == fill) {
fill_cnt++;
size -= fill.size();
if(fill_cnt > 2)
throw std::runtime_error("Invalid input");
}
else break;
}
if ((size + fill_cnt) % 4 != 0)
throw std::runtime_error("Invalid input");
size_t out_size = size / 4 * 3;
std::string res;
res.reserve(out_size);
auto get_sextet = [&](size_t offset) {
for (size_t i = 0; i < alphabet.size(); i++) {
if (alphabet[i] == base[offset])
return i;
}
throw std::runtime_error("Invalid input");
};
size_t fast_size = size - size % 4;
for (size_t i = 0; i < fast_size;) {
uint32_t sextet_a = get_sextet(i++);
uint32_t sextet_b = get_sextet(i++);
uint32_t sextet_c = get_sextet(i++);
uint32_t sextet_d = get_sextet(i++);
uint32_t triple = (sextet_a << 3 * 6)
+ (sextet_b << 2 * 6)
+ (sextet_c << 1 * 6)
+ (sextet_d << 0 * 6);
res += (triple >> 2 * 8) & 0xFF;
res += (triple >> 1 * 8) & 0xFF;
res += (triple >> 0 * 8) & 0xFF;
}
if (fill_cnt == 0)
return res;
uint32_t triple = (get_sextet(fast_size) << 3 * 6)
+ (get_sextet(fast_size + 1) << 2 * 6);
switch (fill_cnt) {
case 1:
triple |= (get_sextet(fast_size + 2) << 1 * 6);
res += (triple >> 2 * 8) & 0xFF;
res += (triple >> 1 * 8) & 0xFF;
break;
case 2:
res += (triple >> 2 * 8) & 0xFF;
break;
default:
break;
}
return res;
}
};
}
| 5,000 | 28.591716 | 117 |
h
|
null |
ceph-main/src/rgw/jwt-cpp/jwt.h
|
#pragma once
#define PICOJSON_USE_INT64
#include "picojson/picojson.h"
#include "base.h"
#include <set>
#include <chrono>
#include <unordered_map>
#include <memory>
#include <openssl/evp.h>
#include <openssl/hmac.h>
#include <openssl/pem.h>
#include <openssl/ec.h>
#include <openssl/err.h>
//If openssl version less than 1.1
#if OPENSSL_VERSION_NUMBER < 269484032
#define OPENSSL10
#endif
#ifndef JWT_CLAIM_EXPLICIT
#define JWT_CLAIM_EXPLICIT 1
#endif
namespace jwt {
using date = std::chrono::system_clock::time_point;
struct signature_verification_exception : public std::runtime_error {
signature_verification_exception()
: std::runtime_error("signature verification failed")
{}
explicit signature_verification_exception(const std::string& msg)
: std::runtime_error(msg)
{}
explicit signature_verification_exception(const char* msg)
: std::runtime_error(msg)
{}
};
struct signature_generation_exception : public std::runtime_error {
signature_generation_exception()
: std::runtime_error("signature generation failed")
{}
explicit signature_generation_exception(const std::string& msg)
: std::runtime_error(msg)
{}
explicit signature_generation_exception(const char* msg)
: std::runtime_error(msg)
{}
};
struct rsa_exception : public std::runtime_error {
explicit rsa_exception(const std::string& msg)
: std::runtime_error(msg)
{}
explicit rsa_exception(const char* msg)
: std::runtime_error(msg)
{}
};
struct ecdsa_exception : public std::runtime_error {
explicit ecdsa_exception(const std::string& msg)
: std::runtime_error(msg)
{}
explicit ecdsa_exception(const char* msg)
: std::runtime_error(msg)
{}
};
struct token_verification_exception : public std::runtime_error {
token_verification_exception()
: std::runtime_error("token verification failed")
{}
explicit token_verification_exception(const std::string& msg)
: std::runtime_error("token verification failed: " + msg)
{}
};
namespace helper {
inline
std::string extract_pubkey_from_cert(const std::string& certstr, const std::string& pw = "") {
// TODO: Cannot find the exact version this change happended
#if OPENSSL_VERSION_NUMBER <= 0x1000114fL
std::unique_ptr<BIO, decltype(&BIO_free_all)> certbio(BIO_new_mem_buf(const_cast<char*>(certstr.data()), certstr.size()), BIO_free_all);
#else
std::unique_ptr<BIO, decltype(&BIO_free_all)> certbio(BIO_new_mem_buf(certstr.data(), certstr.size()), BIO_free_all);
#endif
std::unique_ptr<BIO, decltype(&BIO_free_all)> keybio(BIO_new(BIO_s_mem()), BIO_free_all);
std::unique_ptr<X509, decltype(&X509_free)> cert(PEM_read_bio_X509(certbio.get(), nullptr, nullptr, const_cast<char*>(pw.c_str())), X509_free);
if (!cert) throw rsa_exception("Error loading cert into memory");
std::unique_ptr<EVP_PKEY, decltype(&EVP_PKEY_free)> key(X509_get_pubkey(cert.get()), EVP_PKEY_free);
if(!key) throw rsa_exception("Error getting public key from certificate");
if(!PEM_write_bio_PUBKEY(keybio.get(), key.get())) throw rsa_exception("Error writing public key data in PEM format");
char* ptr = nullptr;
auto len = BIO_get_mem_data(keybio.get(), &ptr);
if(len <= 0 || ptr == nullptr) throw rsa_exception("Failed to convert pubkey to pem");
std::string res(ptr, len);
return res;
}
inline
std::shared_ptr<EVP_PKEY> load_public_key_from_string(const std::string& key, const std::string& password = "") {
std::unique_ptr<BIO, decltype(&BIO_free_all)> pubkey_bio(BIO_new(BIO_s_mem()), BIO_free_all);
if(key.substr(0, 27) == "-----BEGIN CERTIFICATE-----") {
auto epkey = helper::extract_pubkey_from_cert(key, password);
if ((size_t)BIO_write(pubkey_bio.get(), epkey.data(), epkey.size()) != epkey.size())
throw rsa_exception("failed to load public key: bio_write failed");
} else {
if ((size_t)BIO_write(pubkey_bio.get(), key.data(), key.size()) != key.size())
throw rsa_exception("failed to load public key: bio_write failed");
}
std::shared_ptr<EVP_PKEY> pkey(PEM_read_bio_PUBKEY(pubkey_bio.get(), nullptr, nullptr, (void*)password.c_str()), EVP_PKEY_free);
if (!pkey)
throw rsa_exception("failed to load public key: PEM_read_bio_PUBKEY failed:" + std::string(ERR_error_string(ERR_get_error(), NULL)));
return pkey;
}
inline
std::shared_ptr<EVP_PKEY> load_private_key_from_string(const std::string& key, const std::string& password = "") {
std::unique_ptr<BIO, decltype(&BIO_free_all)> privkey_bio(BIO_new(BIO_s_mem()), BIO_free_all);
if ((size_t)BIO_write(privkey_bio.get(), key.data(), key.size()) != key.size())
throw rsa_exception("failed to load private key: bio_write failed");
std::shared_ptr<EVP_PKEY> pkey(PEM_read_bio_PrivateKey(privkey_bio.get(), nullptr, nullptr, const_cast<char*>(password.c_str())), EVP_PKEY_free);
if (!pkey)
throw rsa_exception("failed to load private key: PEM_read_bio_PrivateKey failed");
return pkey;
}
}
namespace algorithm {
/**
* "none" algorithm.
*
* Returns and empty signature and checks if the given signature is empty.
*/
struct none {
/// Return an empty string
std::string sign(const std::string&) const {
return "";
}
/// Check if the given signature is empty. JWT's with "none" algorithm should not contain a signature.
void verify(const std::string&, const std::string& signature) const {
if (!signature.empty())
throw signature_verification_exception();
}
/// Get algorithm name
std::string name() const {
return "none";
}
};
/**
* Base class for HMAC family of algorithms
*/
struct hmacsha {
/**
* Construct new hmac algorithm
* \param key Key to use for HMAC
* \param md Pointer to hash function
* \param name Name of the algorithm
*/
hmacsha(std::string key, const EVP_MD*(*md)(), const std::string& name)
: secret(std::move(key)), md(md), alg_name(name)
{}
/**
* Sign jwt data
* \param data The data to sign
* \return HMAC signature for the given data
* \throws signature_generation_exception
*/
std::string sign(const std::string& data) const {
std::string res;
res.resize(EVP_MAX_MD_SIZE);
unsigned int len = res.size();
if (HMAC(md(), secret.data(), secret.size(), (const unsigned char*)data.data(), data.size(), (unsigned char*)res.data(), &len) == nullptr)
throw signature_generation_exception();
res.resize(len);
return res;
}
/**
* Check if signature is valid
* \param data The data to check signature against
* \param signature Signature provided by the jwt
* \throws signature_verification_exception If the provided signature does not match
*/
void verify(const std::string& data, const std::string& signature) const {
try {
auto res = sign(data);
bool matched = true;
for (size_t i = 0; i < std::min<size_t>(res.size(), signature.size()); i++)
if (res[i] != signature[i])
matched = false;
if (res.size() != signature.size())
matched = false;
if (!matched)
throw signature_verification_exception();
}
catch (const signature_generation_exception&) {
throw signature_verification_exception();
}
}
/**
* Returns the algorithm name provided to the constructor
* \return Algorithmname
*/
std::string name() const {
return alg_name;
}
private:
/// HMAC secrect
const std::string secret;
/// HMAC hash generator
const EVP_MD*(*md)();
/// Algorithmname
const std::string alg_name;
};
/**
* Base class for RSA family of algorithms
*/
struct rsa {
/**
* Construct new rsa algorithm
* \param public_key RSA public key in PEM format
* \param private_key RSA private key or empty string if not available. If empty, signing will always fail.
* \param public_key_password Password to decrypt public key pem.
* \param privat_key_password Password to decrypt private key pem.
* \param md Pointer to hash function
* \param name Name of the algorithm
*/
rsa(const std::string& public_key, const std::string& private_key, const std::string& public_key_password, const std::string& private_key_password, const EVP_MD*(*md)(), const std::string& name)
: md(md), alg_name(name)
{
if (!private_key.empty()) {
pkey = helper::load_private_key_from_string(private_key, private_key_password);
} else if(!public_key.empty()) {
pkey = helper::load_public_key_from_string(public_key, public_key_password);
} else
throw rsa_exception("at least one of public or private key need to be present");
}
/**
* Sign jwt data
* \param data The data to sign
* \return RSA signature for the given data
* \throws signature_generation_exception
*/
std::string sign(const std::string& data) const {
#ifdef OPENSSL10
std::unique_ptr<EVP_MD_CTX, decltype(&EVP_MD_CTX_destroy)> ctx(EVP_MD_CTX_create(), EVP_MD_CTX_destroy);
#else
std::unique_ptr<EVP_MD_CTX, decltype(&EVP_MD_CTX_free)> ctx(EVP_MD_CTX_create(), EVP_MD_CTX_free);
#endif
if (!ctx)
throw signature_generation_exception("failed to create signature: could not create context");
if (!EVP_SignInit(ctx.get(), md()))
throw signature_generation_exception("failed to create signature: SignInit failed");
std::string res;
res.resize(EVP_PKEY_size(pkey.get()));
unsigned int len = 0;
if (!EVP_SignUpdate(ctx.get(), data.data(), data.size()))
throw signature_generation_exception();
if (!EVP_SignFinal(ctx.get(), (unsigned char*)res.data(), &len, pkey.get()))
throw signature_generation_exception();
res.resize(len);
return res;
}
/**
* Check if signature is valid
* \param data The data to check signature against
* \param signature Signature provided by the jwt
* \throws signature_verification_exception If the provided signature does not match
*/
void verify(const std::string& data, const std::string& signature) const {
#ifdef OPENSSL10
std::unique_ptr<EVP_MD_CTX, decltype(&EVP_MD_CTX_destroy)> ctx(EVP_MD_CTX_create(), EVP_MD_CTX_destroy);
#else
std::unique_ptr<EVP_MD_CTX, decltype(&EVP_MD_CTX_free)> ctx(EVP_MD_CTX_create(), EVP_MD_CTX_free);
#endif
if (!ctx)
throw signature_verification_exception("failed to verify signature: could not create context");
if (!EVP_VerifyInit(ctx.get(), md()))
throw signature_verification_exception("failed to verify signature: VerifyInit failed");
if (!EVP_VerifyUpdate(ctx.get(), data.data(), data.size()))
throw signature_verification_exception("failed to verify signature: VerifyUpdate failed");
auto res = EVP_VerifyFinal(ctx.get(), (const unsigned char*)signature.data(), signature.size(), pkey.get());
if (res != 1)
throw signature_verification_exception("evp verify final failed: " + std::to_string(res) + " " + ERR_error_string(ERR_get_error(), NULL));
}
/**
* Returns the algorithm name provided to the constructor
* \return Algorithmname
*/
std::string name() const {
return alg_name;
}
private:
/// OpenSSL structure containing converted keys
std::shared_ptr<EVP_PKEY> pkey;
/// Hash generator
const EVP_MD*(*md)();
/// Algorithmname
const std::string alg_name;
};
/**
* Base class for ECDSA family of algorithms
*/
struct ecdsa {
/**
* Construct new ecdsa algorithm
* \param public_key ECDSA public key in PEM format
* \param private_key ECDSA private key or empty string if not available. If empty, signing will always fail.
* \param public_key_password Password to decrypt public key pem.
* \param privat_key_password Password to decrypt private key pem.
* \param md Pointer to hash function
* \param name Name of the algorithm
*/
ecdsa(const std::string& public_key, const std::string& private_key, const std::string& public_key_password, const std::string& private_key_password, const EVP_MD*(*md)(), const std::string& name, size_t siglen)
: md(md), alg_name(name), signature_length(siglen)
{
if (!public_key.empty()) {
std::unique_ptr<BIO, decltype(&BIO_free_all)> pubkey_bio(BIO_new(BIO_s_mem()), BIO_free_all);
if(public_key.substr(0, 27) == "-----BEGIN CERTIFICATE-----") {
auto epkey = helper::extract_pubkey_from_cert(public_key, public_key_password);
if ((size_t)BIO_write(pubkey_bio.get(), epkey.data(), epkey.size()) != epkey.size())
throw ecdsa_exception("failed to load public key: bio_write failed");
} else {
if ((size_t)BIO_write(pubkey_bio.get(), public_key.data(), public_key.size()) != public_key.size())
throw ecdsa_exception("failed to load public key: bio_write failed");
}
pkey.reset(PEM_read_bio_EC_PUBKEY(pubkey_bio.get(), nullptr, nullptr, (void*)public_key_password.c_str()), EC_KEY_free);
if (!pkey)
throw ecdsa_exception("failed to load public key: PEM_read_bio_EC_PUBKEY failed:" + std::string(ERR_error_string(ERR_get_error(), NULL)));
size_t keysize = EC_GROUP_get_degree(EC_KEY_get0_group(pkey.get()));
if(keysize != signature_length*4 && (signature_length != 132 || keysize != 521))
throw ecdsa_exception("invalid key size");
}
if (!private_key.empty()) {
std::unique_ptr<BIO, decltype(&BIO_free_all)> privkey_bio(BIO_new(BIO_s_mem()), BIO_free_all);
if ((size_t)BIO_write(privkey_bio.get(), private_key.data(), private_key.size()) != private_key.size())
throw rsa_exception("failed to load private key: bio_write failed");
pkey.reset(PEM_read_bio_ECPrivateKey(privkey_bio.get(), nullptr, nullptr, const_cast<char*>(private_key_password.c_str())), EC_KEY_free);
if (!pkey)
throw rsa_exception("failed to load private key: PEM_read_bio_ECPrivateKey failed");
size_t keysize = EC_GROUP_get_degree(EC_KEY_get0_group(pkey.get()));
if(keysize != signature_length*4 && (signature_length != 132 || keysize != 521))
throw ecdsa_exception("invalid key size");
}
if(!pkey)
throw rsa_exception("at least one of public or private key need to be present");
if(EC_KEY_check_key(pkey.get()) == 0)
throw ecdsa_exception("failed to load key: key is invalid");
}
/**
* Sign jwt data
* \param data The data to sign
* \return ECDSA signature for the given data
* \throws signature_generation_exception
*/
std::string sign(const std::string& data) const {
const std::string hash = generate_hash(data);
std::unique_ptr<ECDSA_SIG, decltype(&ECDSA_SIG_free)>
sig(ECDSA_do_sign((const unsigned char*)hash.data(), hash.size(), pkey.get()), ECDSA_SIG_free);
if(!sig)
throw signature_generation_exception();
#ifdef OPENSSL10
auto rr = bn2raw(sig->r);
auto rs = bn2raw(sig->s);
#else
const BIGNUM *r;
const BIGNUM *s;
ECDSA_SIG_get0(sig.get(), &r, &s);
auto rr = bn2raw(r);
auto rs = bn2raw(s);
#endif
if(rr.size() > signature_length/2 || rs.size() > signature_length/2)
throw std::logic_error("bignum size exceeded expected length");
while(rr.size() != signature_length/2) rr = '\0' + rr;
while(rs.size() != signature_length/2) rs = '\0' + rs;
return rr + rs;
}
/**
* Check if signature is valid
* \param data The data to check signature against
* \param signature Signature provided by the jwt
* \throws signature_verification_exception If the provided signature does not match
*/
void verify(const std::string& data, const std::string& signature) const {
const std::string hash = generate_hash(data);
auto r = raw2bn(signature.substr(0, signature.size() / 2));
auto s = raw2bn(signature.substr(signature.size() / 2));
#ifdef OPENSSL10
ECDSA_SIG sig;
sig.r = r.get();
sig.s = s.get();
if(ECDSA_do_verify((const unsigned char*)hash.data(), hash.size(), &sig, pkey.get()) != 1)
throw signature_verification_exception("Invalid signature");
#else
std::unique_ptr<ECDSA_SIG, decltype(&ECDSA_SIG_free)> sig(ECDSA_SIG_new(), ECDSA_SIG_free);
ECDSA_SIG_set0(sig.get(), r.release(), s.release());
if(ECDSA_do_verify((const unsigned char*)hash.data(), hash.size(), sig.get(), pkey.get()) != 1)
throw signature_verification_exception("Invalid signature");
#endif
}
/**
* Returns the algorithm name provided to the constructor
* \return Algorithmname
*/
std::string name() const {
return alg_name;
}
private:
/**
* Convert a OpenSSL BIGNUM to a std::string
* \param bn BIGNUM to convert
* \return bignum as string
*/
#ifdef OPENSSL10
static std::string bn2raw(BIGNUM* bn)
#else
static std::string bn2raw(const BIGNUM* bn)
#endif
{
std::string res;
res.resize(BN_num_bytes(bn));
BN_bn2bin(bn, (unsigned char*)res.data());
return res;
}
/**
* Convert an std::string to a OpenSSL BIGNUM
* \param raw String to convert
* \return BIGNUM representation
*/
static std::unique_ptr<BIGNUM, decltype(&BN_free)> raw2bn(const std::string& raw) {
return std::unique_ptr<BIGNUM, decltype(&BN_free)>(BN_bin2bn((const unsigned char*)raw.data(), raw.size(), nullptr), BN_free);
}
/**
* Hash the provided data using the hash function specified in constructor
* \param data Data to hash
* \return Hash of data
*/
std::string generate_hash(const std::string& data) const {
#ifdef OPENSSL10
std::unique_ptr<EVP_MD_CTX, decltype(&EVP_MD_CTX_destroy)> ctx(EVP_MD_CTX_create(), &EVP_MD_CTX_destroy);
#else
std::unique_ptr<EVP_MD_CTX, decltype(&EVP_MD_CTX_free)> ctx(EVP_MD_CTX_new(), EVP_MD_CTX_free);
#endif
if(EVP_DigestInit(ctx.get(), md()) == 0)
throw signature_generation_exception("EVP_DigestInit failed");
if(EVP_DigestUpdate(ctx.get(), data.data(), data.size()) == 0)
throw signature_generation_exception("EVP_DigestUpdate failed");
unsigned int len = 0;
std::string res;
res.resize(EVP_MD_CTX_size(ctx.get()));
if(EVP_DigestFinal(ctx.get(), (unsigned char*)res.data(), &len) == 0)
throw signature_generation_exception("EVP_DigestFinal failed");
res.resize(len);
return res;
}
/// OpenSSL struct containing keys
std::shared_ptr<EC_KEY> pkey;
/// Hash generator function
const EVP_MD*(*md)();
/// Algorithmname
const std::string alg_name;
/// Length of the resulting signature
const size_t signature_length;
};
/**
* Base class for PSS-RSA family of algorithms
*/
struct pss {
/**
* Construct new pss algorithm
* \param public_key RSA public key in PEM format
* \param private_key RSA private key or empty string if not available. If empty, signing will always fail.
* \param public_key_password Password to decrypt public key pem.
* \param privat_key_password Password to decrypt private key pem.
* \param md Pointer to hash function
* \param name Name of the algorithm
*/
pss(const std::string& public_key, const std::string& private_key, const std::string& public_key_password, const std::string& private_key_password, const EVP_MD*(*md)(), const std::string& name)
: md(md), alg_name(name)
{
if (!private_key.empty()) {
pkey = helper::load_private_key_from_string(private_key, private_key_password);
} else if(!public_key.empty()) {
pkey = helper::load_public_key_from_string(public_key, public_key_password);
} else
throw rsa_exception("at least one of public or private key need to be present");
}
/**
* Sign jwt data
* \param data The data to sign
* \return ECDSA signature for the given data
* \throws signature_generation_exception
*/
std::string sign(const std::string& data) const {
auto hash = this->generate_hash(data);
std::unique_ptr<RSA, decltype(&RSA_free)> key(EVP_PKEY_get1_RSA(pkey.get()), RSA_free);
const int size = RSA_size(key.get());
std::string padded(size, 0x00);
if (!RSA_padding_add_PKCS1_PSS_mgf1(key.get(), (unsigned char*)padded.data(), (const unsigned char*)hash.data(), md(), md(), -1))
throw signature_generation_exception("failed to create signature: RSA_padding_add_PKCS1_PSS_mgf1 failed");
std::string res(size, 0x00);
if (RSA_private_encrypt(size, (const unsigned char*)padded.data(), (unsigned char*)res.data(), key.get(), RSA_NO_PADDING) < 0)
throw signature_generation_exception("failed to create signature: RSA_private_encrypt failed");
return res;
}
/**
* Check if signature is valid
* \param data The data to check signature against
* \param signature Signature provided by the jwt
* \throws signature_verification_exception If the provided signature does not match
*/
void verify(const std::string& data, const std::string& signature) const {
auto hash = this->generate_hash(data);
std::unique_ptr<RSA, decltype(&RSA_free)> key(EVP_PKEY_get1_RSA(pkey.get()), RSA_free);
const int size = RSA_size(key.get());
std::string sig(size, 0x00);
if(!RSA_public_decrypt(signature.size(), (const unsigned char*)signature.data(), (unsigned char*)sig.data(), key.get(), RSA_NO_PADDING))
throw signature_verification_exception("Invalid signature");
if(!RSA_verify_PKCS1_PSS_mgf1(key.get(), (const unsigned char*)hash.data(), md(), md(), (const unsigned char*)sig.data(), -1))
throw signature_verification_exception("Invalid signature");
}
/**
* Returns the algorithm name provided to the constructor
* \return Algorithmname
*/
std::string name() const {
return alg_name;
}
private:
/**
* Hash the provided data using the hash function specified in constructor
* \param data Data to hash
* \return Hash of data
*/
std::string generate_hash(const std::string& data) const {
#ifdef OPENSSL10
std::unique_ptr<EVP_MD_CTX, decltype(&EVP_MD_CTX_destroy)> ctx(EVP_MD_CTX_create(), &EVP_MD_CTX_destroy);
#else
std::unique_ptr<EVP_MD_CTX, decltype(&EVP_MD_CTX_free)> ctx(EVP_MD_CTX_new(), EVP_MD_CTX_free);
#endif
if(EVP_DigestInit(ctx.get(), md()) == 0)
throw signature_generation_exception("EVP_DigestInit failed");
if(EVP_DigestUpdate(ctx.get(), data.data(), data.size()) == 0)
throw signature_generation_exception("EVP_DigestUpdate failed");
unsigned int len = 0;
std::string res;
res.resize(EVP_MD_CTX_size(ctx.get()));
if(EVP_DigestFinal(ctx.get(), (unsigned char*)res.data(), &len) == 0)
throw signature_generation_exception("EVP_DigestFinal failed");
res.resize(len);
return res;
}
/// OpenSSL structure containing keys
std::shared_ptr<EVP_PKEY> pkey;
/// Hash generator function
const EVP_MD*(*md)();
/// Algorithmname
const std::string alg_name;
};
/**
* HS256 algorithm
*/
struct hs256 : public hmacsha {
/**
* Construct new instance of algorithm
* \param key HMAC signing key
*/
explicit hs256(std::string key)
: hmacsha(std::move(key), EVP_sha256, "HS256")
{}
};
/**
* HS384 algorithm
*/
struct hs384 : public hmacsha {
/**
* Construct new instance of algorithm
* \param key HMAC signing key
*/
explicit hs384(std::string key)
: hmacsha(std::move(key), EVP_sha384, "HS384")
{}
};
/**
* HS512 algorithm
*/
struct hs512 : public hmacsha {
/**
* Construct new instance of algorithm
* \param key HMAC signing key
*/
explicit hs512(std::string key)
: hmacsha(std::move(key), EVP_sha512, "HS512")
{}
};
/**
* RS256 algorithm
*/
struct rs256 : public rsa {
/**
* Construct new instance of algorithm
* \param public_key RSA public key in PEM format
* \param private_key RSA private key or empty string if not available. If empty, signing will always fail.
* \param public_key_password Password to decrypt public key pem.
* \param privat_key_password Password to decrypt private key pem.
*/
explicit rs256(const std::string& public_key, const std::string& private_key = "", const std::string& public_key_password = "", const std::string& private_key_password = "")
: rsa(public_key, private_key, public_key_password, private_key_password, EVP_sha256, "RS256")
{}
};
/**
* RS384 algorithm
*/
struct rs384 : public rsa {
/**
* Construct new instance of algorithm
* \param public_key RSA public key in PEM format
* \param private_key RSA private key or empty string if not available. If empty, signing will always fail.
* \param public_key_password Password to decrypt public key pem.
* \param privat_key_password Password to decrypt private key pem.
*/
explicit rs384(const std::string& public_key, const std::string& private_key = "", const std::string& public_key_password = "", const std::string& private_key_password = "")
: rsa(public_key, private_key, public_key_password, private_key_password, EVP_sha384, "RS384")
{}
};
/**
* RS512 algorithm
*/
struct rs512 : public rsa {
/**
* Construct new instance of algorithm
* \param public_key RSA public key in PEM format
* \param private_key RSA private key or empty string if not available. If empty, signing will always fail.
* \param public_key_password Password to decrypt public key pem.
* \param privat_key_password Password to decrypt private key pem.
*/
explicit rs512(const std::string& public_key, const std::string& private_key = "", const std::string& public_key_password = "", const std::string& private_key_password = "")
: rsa(public_key, private_key, public_key_password, private_key_password, EVP_sha512, "RS512")
{}
};
/**
* ES256 algorithm
*/
struct es256 : public ecdsa {
/**
* Construct new instance of algorithm
* \param public_key ECDSA public key in PEM format
* \param private_key ECDSA private key or empty string if not available. If empty, signing will always fail.
* \param public_key_password Password to decrypt public key pem.
* \param privat_key_password Password to decrypt private key pem.
*/
explicit es256(const std::string& public_key, const std::string& private_key = "", const std::string& public_key_password = "", const std::string& private_key_password = "")
: ecdsa(public_key, private_key, public_key_password, private_key_password, EVP_sha256, "ES256", 64)
{}
};
/**
* ES384 algorithm
*/
struct es384 : public ecdsa {
/**
* Construct new instance of algorithm
* \param public_key ECDSA public key in PEM format
* \param private_key ECDSA private key or empty string if not available. If empty, signing will always fail.
* \param public_key_password Password to decrypt public key pem.
* \param privat_key_password Password to decrypt private key pem.
*/
explicit es384(const std::string& public_key, const std::string& private_key = "", const std::string& public_key_password = "", const std::string& private_key_password = "")
: ecdsa(public_key, private_key, public_key_password, private_key_password, EVP_sha384, "ES384", 96)
{}
};
/**
* ES512 algorithm
*/
struct es512 : public ecdsa {
/**
* Construct new instance of algorithm
* \param public_key ECDSA public key in PEM format
* \param private_key ECDSA private key or empty string if not available. If empty, signing will always fail.
* \param public_key_password Password to decrypt public key pem.
* \param privat_key_password Password to decrypt private key pem.
*/
explicit es512(const std::string& public_key, const std::string& private_key = "", const std::string& public_key_password = "", const std::string& private_key_password = "")
: ecdsa(public_key, private_key, public_key_password, private_key_password, EVP_sha512, "ES512", 132)
{}
};
/**
* PS256 algorithm
*/
struct ps256 : public pss {
/**
* Construct new instance of algorithm
* \param public_key RSA public key in PEM format
* \param private_key RSA private key or empty string if not available. If empty, signing will always fail.
* \param public_key_password Password to decrypt public key pem.
* \param privat_key_password Password to decrypt private key pem.
*/
explicit ps256(const std::string& public_key, const std::string& private_key = "", const std::string& public_key_password = "", const std::string& private_key_password = "")
: pss(public_key, private_key, public_key_password, private_key_password, EVP_sha256, "PS256")
{}
};
/**
* PS384 algorithm
*/
struct ps384 : public pss {
/**
* Construct new instance of algorithm
* \param public_key RSA public key in PEM format
* \param private_key RSA private key or empty string if not available. If empty, signing will always fail.
* \param public_key_password Password to decrypt public key pem.
* \param privat_key_password Password to decrypt private key pem.
*/
explicit ps384(const std::string& public_key, const std::string& private_key = "", const std::string& public_key_password = "", const std::string& private_key_password = "")
: pss(public_key, private_key, public_key_password, private_key_password, EVP_sha384, "PS384")
{}
};
/**
* PS512 algorithm
*/
struct ps512 : public pss {
/**
* Construct new instance of algorithm
* \param public_key RSA public key in PEM format
* \param private_key RSA private key or empty string if not available. If empty, signing will always fail.
* \param public_key_password Password to decrypt public key pem.
* \param privat_key_password Password to decrypt private key pem.
*/
explicit ps512(const std::string& public_key, const std::string& private_key = "", const std::string& public_key_password = "", const std::string& private_key_password = "")
: pss(public_key, private_key, public_key_password, private_key_password, EVP_sha512, "PS512")
{}
};
}
/**
* Convenience wrapper for JSON value
*/
class claim {
picojson::value val;
public:
enum class type {
null,
boolean,
number,
string,
array,
object,
int64
};
claim()
: val()
{}
#if JWT_CLAIM_EXPLICIT
explicit claim(std::string s)
: val(std::move(s))
{}
explicit claim(const date& s)
: val(int64_t(std::chrono::system_clock::to_time_t(s)))
{}
explicit claim(const std::set<std::string>& s)
: val(picojson::array(s.cbegin(), s.cend()))
{}
explicit claim(const picojson::value& val)
: val(val)
{}
#else
claim(std::string s)
: val(std::move(s))
{}
claim(const date& s)
: val(int64_t(std::chrono::system_clock::to_time_t(s)))
{}
claim(const std::set<std::string>& s)
: val(picojson::array(s.cbegin(), s.cend()))
{}
claim(const picojson::value& val)
: val(val)
{}
#endif
template<typename Iterator>
claim(Iterator start, Iterator end)
: val(picojson::array())
{
auto& arr = val.get<picojson::array>();
for(; start != end; start++) {
arr.push_back(picojson::value(*start));
}
}
/**
* Get wrapped json object
* \return Wrapped json object
*/
picojson::value to_json() const {
return val;
}
/**
* Get type of contained object
* \return Type
* \throws std::logic_error An internal error occured
*/
type get_type() const {
if (val.is<picojson::null>()) return type::null;
else if (val.is<bool>()) return type::boolean;
else if (val.is<int64_t>()) return type::int64;
else if (val.is<double>()) return type::number;
else if (val.is<std::string>()) return type::string;
else if (val.is<picojson::array>()) return type::array;
else if (val.is<picojson::object>()) return type::object;
else throw std::logic_error("internal error");
}
/**
* Get the contained object as a string
* \return content as string
* \throws std::bad_cast Content was not a string
*/
const std::string& as_string() const {
if (!val.is<std::string>())
throw std::bad_cast();
return val.get<std::string>();
}
/**
* Get the contained object as a date
* \return content as date
* \throws std::bad_cast Content was not a date
*/
date as_date() const {
return std::chrono::system_clock::from_time_t(as_int());
}
/**
* Get the contained object as an array
* \return content as array
* \throws std::bad_cast Content was not an array
*/
const picojson::array& as_array() const {
if (!val.is<picojson::array>())
throw std::bad_cast();
return val.get<picojson::array>();
}
/**
* Get the contained object as a set of strings
* \return content as set of strings
* \throws std::bad_cast Content was not a set
*/
const std::set<std::string> as_set() const {
std::set<std::string> res;
for(auto& e : as_array()) {
if(!e.is<std::string>())
throw std::bad_cast();
res.insert(e.get<std::string>());
}
return res;
}
/**
* Get the contained object as an integer
* \return content as int
* \throws std::bad_cast Content was not an int
*/
int64_t as_int() const {
if (!val.is<int64_t>())
throw std::bad_cast();
return val.get<int64_t>();
}
/**
* Get the contained object as a bool
* \return content as bool
* \throws std::bad_cast Content was not a bool
*/
bool as_bool() const {
if (!val.is<bool>())
throw std::bad_cast();
return val.get<bool>();
}
/**
* Get the contained object as a number
* \return content as double
* \throws std::bad_cast Content was not a number
*/
double as_number() const {
if (!val.is<double>())
throw std::bad_cast();
return val.get<double>();
}
/**
* Get the contained object as an object
* \return content as object
* \throws std::bad_cast Content was not an object
*/
const picojson::object& as_object() const {
if (!val.is<picojson::object>())
throw std::bad_cast();
return val.get<picojson::object>();
}
};
/**
* Base class that represents a token payload.
* Contains Convenience accessors for common claims.
*/
class payload {
protected:
std::unordered_map<std::string, claim> payload_claims;
public:
/**
* Check if issuer is present ("iss")
* \return true if present, false otherwise
*/
bool has_issuer() const noexcept { return has_payload_claim("iss"); }
/**
* Check if subject is present ("sub")
* \return true if present, false otherwise
*/
bool has_subject() const noexcept { return has_payload_claim("sub"); }
/**
* Check if audience is present ("aud")
* \return true if present, false otherwise
*/
bool has_audience() const noexcept { return has_payload_claim("aud"); }
/**
* Check if expires is present ("exp")
* \return true if present, false otherwise
*/
bool has_expires_at() const noexcept { return has_payload_claim("exp"); }
/**
* Check if not before is present ("nbf")
* \return true if present, false otherwise
*/
bool has_not_before() const noexcept { return has_payload_claim("nbf"); }
/**
* Check if issued at is present ("iat")
* \return true if present, false otherwise
*/
bool has_issued_at() const noexcept { return has_payload_claim("iat"); }
/**
* Check if token id is present ("jti")
* \return true if present, false otherwise
*/
bool has_id() const noexcept { return has_payload_claim("jti"); }
/**
* Get issuer claim
* \return issuer as string
* \throws std::runtime_error If claim was not present
* \throws std::bad_cast Claim was present but not a string (Should not happen in a valid token)
*/
const std::string& get_issuer() const { return get_payload_claim("iss").as_string(); }
/**
* Get subject claim
* \return subject as string
* \throws std::runtime_error If claim was not present
* \throws std::bad_cast Claim was present but not a string (Should not happen in a valid token)
*/
const std::string& get_subject() const { return get_payload_claim("sub").as_string(); }
/**
* Get audience claim
* \return audience as a set of strings
* \throws std::runtime_error If claim was not present
* \throws std::bad_cast Claim was present but not a set (Should not happen in a valid token)
*/
std::set<std::string> get_audience() const {
auto aud = get_payload_claim("aud");
if(aud.get_type() == jwt::claim::type::string) return { aud.as_string()};
else return aud.as_set();
}
/**
* Get expires claim
* \return expires as a date in utc
* \throws std::runtime_error If claim was not present
* \throws std::bad_cast Claim was present but not a date (Should not happen in a valid token)
*/
const date get_expires_at() const { return get_payload_claim("exp").as_date(); }
/**
* Get not valid before claim
* \return nbf date in utc
* \throws std::runtime_error If claim was not present
* \throws std::bad_cast Claim was present but not a date (Should not happen in a valid token)
*/
const date get_not_before() const { return get_payload_claim("nbf").as_date(); }
/**
* Get issued at claim
* \return issued at as date in utc
* \throws std::runtime_error If claim was not present
* \throws std::bad_cast Claim was present but not a date (Should not happen in a valid token)
*/
const date get_issued_at() const { return get_payload_claim("iat").as_date(); }
/**
* Get id claim
* \return id as string
* \throws std::runtime_error If claim was not present
* \throws std::bad_cast Claim was present but not a string (Should not happen in a valid token)
*/
const std::string& get_id() const { return get_payload_claim("jti").as_string(); }
/**
* Check if a payload claim is present
* \return true if claim was present, false otherwise
*/
bool has_payload_claim(const std::string& name) const noexcept { return payload_claims.count(name) != 0; }
/**
* Get payload claim
* \return Requested claim
* \throws std::runtime_error If claim was not present
*/
const claim& get_payload_claim(const std::string& name) const {
if (!has_payload_claim(name))
throw std::runtime_error("claim not found");
return payload_claims.at(name);
}
/**
* Get all payload claims
* \return map of claims
*/
std::unordered_map<std::string, claim> get_payload_claims() const { return payload_claims; }
};
/**
* Base class that represents a token header.
* Contains Convenience accessors for common claims.
*/
class header {
protected:
std::unordered_map<std::string, claim> header_claims;
public:
/**
* Check if algortihm is present ("alg")
* \return true if present, false otherwise
*/
bool has_algorithm() const noexcept { return has_header_claim("alg"); }
/**
* Check if type is present ("typ")
* \return true if present, false otherwise
*/
bool has_type() const noexcept { return has_header_claim("typ"); }
/**
* Check if content type is present ("cty")
* \return true if present, false otherwise
*/
bool has_content_type() const noexcept { return has_header_claim("cty"); }
/**
* Check if key id is present ("kid")
* \return true if present, false otherwise
*/
bool has_key_id() const noexcept { return has_header_claim("kid"); }
/**
* Get algorithm claim
* \return algorithm as string
* \throws std::runtime_error If claim was not present
* \throws std::bad_cast Claim was present but not a string (Should not happen in a valid token)
*/
const std::string& get_algorithm() const { return get_header_claim("alg").as_string(); }
/**
* Get type claim
* \return type as a string
* \throws std::runtime_error If claim was not present
* \throws std::bad_cast Claim was present but not a string (Should not happen in a valid token)
*/
const std::string& get_type() const { return get_header_claim("typ").as_string(); }
/**
* Get content type claim
* \return content type as string
* \throws std::runtime_error If claim was not present
* \throws std::bad_cast Claim was present but not a string (Should not happen in a valid token)
*/
const std::string& get_content_type() const { return get_header_claim("cty").as_string(); }
/**
* Get key id claim
* \return key id as string
* \throws std::runtime_error If claim was not present
* \throws std::bad_cast Claim was present but not a string (Should not happen in a valid token)
*/
const std::string& get_key_id() const { return get_header_claim("kid").as_string(); }
/**
* Check if a header claim is present
* \return true if claim was present, false otherwise
*/
bool has_header_claim(const std::string& name) const noexcept { return header_claims.count(name) != 0; }
/**
* Get header claim
* \return Requested claim
* \throws std::runtime_error If claim was not present
*/
const claim& get_header_claim(const std::string& name) const {
if (!has_header_claim(name))
throw std::runtime_error("claim not found");
return header_claims.at(name);
}
/**
* Get all header claims
* \return map of claims
*/
std::unordered_map<std::string, claim> get_header_claims() const { return header_claims; }
};
/**
* Class containing all information about a decoded token
*/
class decoded_jwt : public header, public payload {
protected:
/// Unmodifed token, as passed to constructor
const std::string token;
/// Header part decoded from base64
std::string header;
/// Unmodified header part in base64
std::string header_base64;
/// Payload part decoded from base64
std::string payload;
/// Unmodified payload part in base64
std::string payload_base64;
/// Signature part decoded from base64
std::string signature;
/// Unmodified signature part in base64
std::string signature_base64;
public:
/**
* Constructor
* Parses a given token
* \param token The token to parse
* \throws std::invalid_argument Token is not in correct format
* \throws std::runtime_error Base64 decoding failed or invalid json
*/
explicit decoded_jwt(const std::string& token)
: token(token)
{
auto hdr_end = token.find('.');
if (hdr_end == std::string::npos)
throw std::invalid_argument("invalid token supplied");
auto payload_end = token.find('.', hdr_end + 1);
if (payload_end == std::string::npos)
throw std::invalid_argument("invalid token supplied");
header = header_base64 = token.substr(0, hdr_end);
payload = payload_base64 = token.substr(hdr_end + 1, payload_end - hdr_end - 1);
signature = signature_base64 = token.substr(payload_end + 1);
// Fix padding: JWT requires padding to get removed
auto fix_padding = [](std::string& str) {
switch (str.size() % 4) {
case 1:
str += alphabet::base64url::fill();
#ifdef __has_cpp_attribute
#if __has_cpp_attribute(fallthrough)
[[fallthrough]];
#endif
#endif
case 2:
str += alphabet::base64url::fill();
#ifdef __has_cpp_attribute
#if __has_cpp_attribute(fallthrough)
[[fallthrough]];
#endif
#endif
case 3:
str += alphabet::base64url::fill();
#ifdef __has_cpp_attribute
#if __has_cpp_attribute(fallthrough)
[[fallthrough]];
#endif
#endif
default:
break;
}
};
fix_padding(header);
fix_padding(payload);
fix_padding(signature);
header = base::decode<alphabet::base64url>(header);
payload = base::decode<alphabet::base64url>(payload);
signature = base::decode<alphabet::base64url>(signature);
auto parse_claims = [](const std::string& str) {
std::unordered_map<std::string, claim> res;
picojson::value val;
if (!picojson::parse(val, str).empty())
throw std::runtime_error("Invalid json");
for (auto& e : val.get<picojson::object>()) { res.insert({ e.first, claim(e.second) }); }
return res;
};
header_claims = parse_claims(header);
payload_claims = parse_claims(payload);
}
/**
* Get token string, as passed to constructor
* \return token as passed to constructor
*/
const std::string& get_token() const noexcept { return token; }
/**
* Get header part as json string
* \return header part after base64 decoding
*/
const std::string& get_header() const noexcept { return header; }
/**
* Get payload part as json string
* \return payload part after base64 decoding
*/
const std::string& get_payload() const noexcept { return payload; }
/**
* Get signature part as json string
* \return signature part after base64 decoding
*/
const std::string& get_signature() const noexcept { return signature; }
/**
* Get header part as base64 string
* \return header part before base64 decoding
*/
const std::string& get_header_base64() const noexcept { return header_base64; }
/**
* Get payload part as base64 string
* \return payload part before base64 decoding
*/
const std::string& get_payload_base64() const noexcept { return payload_base64; }
/**
* Get signature part as base64 string
* \return signature part before base64 decoding
*/
const std::string& get_signature_base64() const noexcept { return signature_base64; }
};
/**
* Builder class to build and sign a new token
* Use jwt::create() to get an instance of this class.
*/
class builder {
std::unordered_map<std::string, claim> header_claims;
std::unordered_map<std::string, claim> payload_claims;
builder() {}
friend builder create();
public:
/**
* Set a header claim.
* \param id Name of the claim
* \param c Claim to add
* \return *this to allow for method chaining
*/
builder& set_header_claim(const std::string& id, claim c) { header_claims[id] = std::move(c); return *this; }
/**
* Set a payload claim.
* \param id Name of the claim
* \param c Claim to add
* \return *this to allow for method chaining
*/
builder& set_payload_claim(const std::string& id, claim c) { payload_claims[id] = std::move(c); return *this; }
/**
* Set algorithm claim
* You normally don't need to do this, as the algorithm is automatically set if you don't change it.
* \param str Name of algorithm
* \return *this to allow for method chaining
*/
builder& set_algorithm(const std::string& str) { return set_header_claim("alg", claim(str)); }
/**
* Set type claim
* \param str Type to set
* \return *this to allow for method chaining
*/
builder& set_type(const std::string& str) { return set_header_claim("typ", claim(str)); }
/**
* Set content type claim
* \param str Type to set
* \return *this to allow for method chaining
*/
builder& set_content_type(const std::string& str) { return set_header_claim("cty", claim(str)); }
/**
* Set key id claim
* \param str Key id to set
* \return *this to allow for method chaining
*/
builder& set_key_id(const std::string& str) { return set_header_claim("kid", claim(str)); }
/**
* Set issuer claim
* \param str Issuer to set
* \return *this to allow for method chaining
*/
builder& set_issuer(const std::string& str) { return set_payload_claim("iss", claim(str)); }
/**
* Set subject claim
* \param str Subject to set
* \return *this to allow for method chaining
*/
builder& set_subject(const std::string& str) { return set_payload_claim("sub", claim(str)); }
/**
* Set audience claim
* \param l Audience set
* \return *this to allow for method chaining
*/
builder& set_audience(const std::set<std::string>& l) { return set_payload_claim("aud", claim(l)); }
/**
* Set audience claim
* \param aud Single audience
* \return *this to allow for method chaining
*/
builder& set_audience(const std::string& aud) { return set_payload_claim("aud", claim(aud)); }
/**
* Set expires at claim
* \param d Expires time
* \return *this to allow for method chaining
*/
builder& set_expires_at(const date& d) { return set_payload_claim("exp", claim(d)); }
/**
* Set not before claim
* \param d First valid time
* \return *this to allow for method chaining
*/
builder& set_not_before(const date& d) { return set_payload_claim("nbf", claim(d)); }
/**
* Set issued at claim
* \param d Issued at time, should be current time
* \return *this to allow for method chaining
*/
builder& set_issued_at(const date& d) { return set_payload_claim("iat", claim(d)); }
/**
* Set id claim
* \param str ID to set
* \return *this to allow for method chaining
*/
builder& set_id(const std::string& str) { return set_payload_claim("jti", claim(str)); }
/**
* Sign token and return result
* \param algo Instance of an algorithm to sign the token with
* \return Final token as a string
*/
template<typename T>
std::string sign(const T& algo) const {
picojson::object obj_header;
obj_header["alg"] = picojson::value(algo.name());
for (auto& e : header_claims) {
obj_header[e.first] = e.second.to_json();
}
picojson::object obj_payload;
for (auto& e : payload_claims) {
obj_payload.insert({ e.first, e.second.to_json() });
}
auto encode = [](const std::string& data) {
auto base = base::encode<alphabet::base64url>(data);
auto pos = base.find(alphabet::base64url::fill());
base = base.substr(0, pos);
return base;
};
std::string header = encode(picojson::value(obj_header).serialize());
std::string payload = encode(picojson::value(obj_payload).serialize());
std::string token = header + "." + payload;
return token + "." + encode(algo.sign(token));
}
};
/**
* Verifier class used to check if a decoded token contains all claims required by your application and has a valid signature.
*/
template<typename Clock>
class verifier {
struct algo_base {
virtual ~algo_base() {}
virtual void verify(const std::string& data, const std::string& sig) = 0;
};
template<typename T>
struct algo : public algo_base {
T alg;
explicit algo(T a) : alg(a) {}
virtual void verify(const std::string& data, const std::string& sig) override {
alg.verify(data, sig);
}
};
/// Required claims
std::unordered_map<std::string, claim> claims;
/// Leeway time for exp, nbf and iat
size_t default_leeway = 0;
/// Instance of clock type
Clock clock;
/// Supported algorithms
std::unordered_map<std::string, std::shared_ptr<algo_base>> algs;
public:
/**
* Constructor for building a new verifier instance
* \param c Clock instance
*/
explicit verifier(Clock c) : clock(c) {}
/**
* Set default leeway to use.
* \param leeway Default leeway to use if not specified otherwise
* \return *this to allow chaining
*/
verifier& leeway(size_t leeway) { default_leeway = leeway; return *this; }
/**
* Set leeway for expires at.
* If not specified the default leeway will be used.
* \param leeway Set leeway to use for expires at.
* \return *this to allow chaining
*/
verifier& expires_at_leeway(size_t leeway) { return with_claim("exp", claim(std::chrono::system_clock::from_time_t(leeway))); }
/**
* Set leeway for not before.
* If not specified the default leeway will be used.
* \param leeway Set leeway to use for not before.
* \return *this to allow chaining
*/
verifier& not_before_leeway(size_t leeway) { return with_claim("nbf", claim(std::chrono::system_clock::from_time_t(leeway))); }
/**
* Set leeway for issued at.
* If not specified the default leeway will be used.
* \param leeway Set leeway to use for issued at.
* \return *this to allow chaining
*/
verifier& issued_at_leeway(size_t leeway) { return with_claim("iat", claim(std::chrono::system_clock::from_time_t(leeway))); }
/**
* Set an issuer to check for.
* Check is casesensitive.
* \param iss Issuer to check for.
* \return *this to allow chaining
*/
verifier& with_issuer(const std::string& iss) { return with_claim("iss", claim(iss)); }
/**
* Set a subject to check for.
* Check is casesensitive.
* \param sub Subject to check for.
* \return *this to allow chaining
*/
verifier& with_subject(const std::string& sub) { return with_claim("sub", claim(sub)); }
/**
* Set an audience to check for.
* If any of the specified audiences is not present in the token the check fails.
* \param aud Audience to check for.
* \return *this to allow chaining
*/
verifier& with_audience(const std::set<std::string>& aud) { return with_claim("aud", claim(aud)); }
/**
* Set an id to check for.
* Check is casesensitive.
* \param id ID to check for.
* \return *this to allow chaining
*/
verifier& with_id(const std::string& id) { return with_claim("jti", claim(id)); }
/**
* Specify a claim to check for.
* \param name Name of the claim to check for
* \param c Claim to check for
* \return *this to allow chaining
*/
verifier& with_claim(const std::string& name, claim c) { claims[name] = c; return *this; }
/**
* Add an algorithm available for checking.
* \param alg Algorithm to allow
* \return *this to allow chaining
*/
template<typename Algorithm>
verifier& allow_algorithm(Algorithm alg) {
algs[alg.name()] = std::make_shared<algo<Algorithm>>(alg);
return *this;
}
/**
* Verify the given token.
* \param jwt Token to check
* \throws token_verification_exception Verification failed
*/
void verify(const decoded_jwt& jwt) const {
const std::string data = jwt.get_header_base64() + "." + jwt.get_payload_base64();
const std::string sig = jwt.get_signature();
const std::string& algo = jwt.get_algorithm();
if (algs.count(algo) == 0)
throw token_verification_exception("wrong algorithm");
algs.at(algo)->verify(data, sig);
auto assert_claim_eq = [](const decoded_jwt& jwt, const std::string& key, const claim& c) {
if (!jwt.has_payload_claim(key))
throw token_verification_exception("decoded_jwt is missing " + key + " claim");
auto& jc = jwt.get_payload_claim(key);
if (jc.get_type() != c.get_type())
throw token_verification_exception("claim " + key + " type mismatch");
if (c.get_type() == claim::type::int64) {
if (c.as_date() != jc.as_date())
throw token_verification_exception("claim " + key + " does not match expected");
}
else if (c.get_type() == claim::type::array) {
auto s1 = c.as_set();
auto s2 = jc.as_set();
if (s1.size() != s2.size())
throw token_verification_exception("claim " + key + " does not match expected");
auto it1 = s1.cbegin();
auto it2 = s2.cbegin();
while (it1 != s1.cend() && it2 != s2.cend()) {
if (*it1++ != *it2++)
throw token_verification_exception("claim " + key + " does not match expected");
}
}
else if (c.get_type() == claim::type::string) {
if (c.as_string() != jc.as_string())
throw token_verification_exception("claim " + key + " does not match expected");
}
else throw token_verification_exception("internal error");
};
auto time = clock.now();
if (jwt.has_expires_at()) {
auto leeway = claims.count("exp") == 1 ? std::chrono::system_clock::to_time_t(claims.at("exp").as_date()) : default_leeway;
auto exp = jwt.get_expires_at();
if (time > exp + std::chrono::seconds(leeway))
throw token_verification_exception("token expired");
}
if (jwt.has_issued_at()) {
auto leeway = claims.count("iat") == 1 ? std::chrono::system_clock::to_time_t(claims.at("iat").as_date()) : default_leeway;
auto iat = jwt.get_issued_at();
if (time < iat - std::chrono::seconds(leeway))
throw token_verification_exception("token expired");
}
if (jwt.has_not_before()) {
auto leeway = claims.count("nbf") == 1 ? std::chrono::system_clock::to_time_t(claims.at("nbf").as_date()) : default_leeway;
auto nbf = jwt.get_not_before();
if (time < nbf - std::chrono::seconds(leeway))
throw token_verification_exception("token expired");
}
for (auto& c : claims)
{
if (c.first == "exp" || c.first == "iat" || c.first == "nbf") {
// Nothing to do here, already checked
}
else if (c.first == "aud") {
if (!jwt.has_audience())
throw token_verification_exception("token doesn't contain the required audience");
auto aud = jwt.get_audience();
auto expected = c.second.as_set();
for (auto& e : expected)
if (aud.count(e) == 0)
throw token_verification_exception("token doesn't contain the required audience");
}
else {
assert_claim_eq(jwt, c.first, c.second);
}
}
}
};
/**
* Create a verifier using the given clock
* \param c Clock instance to use
* \return verifier instance
*/
template<typename Clock>
verifier<Clock> verify(Clock c) {
return verifier<Clock>(c);
}
/**
* Default clock class using std::chrono::system_clock as a backend.
*/
struct default_clock {
std::chrono::system_clock::time_point now() const {
return std::chrono::system_clock::now();
}
};
/**
* Create a verifier using the default clock
* \return verifier instance
*/
inline
verifier<default_clock> verify() {
return verify<default_clock>({});
}
/**
* Return a builder instance to create a new token
*/
inline
builder create() {
return builder();
}
/**
* Decode a token
* \param token Token to decode
* \return Decoded token
* \throws std::invalid_argument Token is not in correct format
* \throws std::runtime_error Base64 decoding failed or invalid json
*/
inline
decoded_jwt decode(const std::string& token) {
return decoded_jwt(token);
}
}
| 57,647 | 34.673267 | 214 |
h
|
null |
ceph-main/src/rgw/picojson/picojson.h
|
/*
* Copyright 2009-2010 Cybozu Labs, Inc.
* Copyright 2011-2014 Kazuho Oku
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef picojson_h
#define picojson_h
#include <algorithm>
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <cstddef>
#include <iostream>
#include <iterator>
#include <limits>
#include <map>
#include <stdexcept>
#include <string>
#include <vector>
#include <utility>
// for isnan/isinf
#if __cplusplus >= 201103L
#include <cmath>
#else
extern "C" {
#ifdef _MSC_VER
#include <float.h>
#elif defined(__INTEL_COMPILER)
#include <mathimf.h>
#else
#include <math.h>
#endif
}
#endif
#ifndef PICOJSON_USE_RVALUE_REFERENCE
#if (defined(__cpp_rvalue_references) && __cpp_rvalue_references >= 200610) || (defined(_MSC_VER) && _MSC_VER >= 1600)
#define PICOJSON_USE_RVALUE_REFERENCE 1
#else
#define PICOJSON_USE_RVALUE_REFERENCE 0
#endif
#endif // PICOJSON_USE_RVALUE_REFERENCE
#ifndef PICOJSON_NOEXCEPT
#if PICOJSON_USE_RVALUE_REFERENCE
#define PICOJSON_NOEXCEPT noexcept
#else
#define PICOJSON_NOEXCEPT throw()
#endif
#endif
// experimental support for int64_t (see README.mkdn for detail)
#ifdef PICOJSON_USE_INT64
//#define __STDC_FORMAT_MACROS
#include <cerrno>
#if __cplusplus >= 201103L
#include <cinttypes>
#else
extern "C" {
#include <inttypes.h>
}
#endif
#endif
// to disable the use of localeconv(3), set PICOJSON_USE_LOCALE to 0
#ifndef PICOJSON_USE_LOCALE
#define PICOJSON_USE_LOCALE 1
#endif
#if PICOJSON_USE_LOCALE
extern "C" {
#include <locale.h>
}
#endif
#ifndef PICOJSON_ASSERT
#define PICOJSON_ASSERT(e) \
do { \
if (!(e)) \
throw std::runtime_error(#e); \
} while (0)
#endif
#ifdef _MSC_VER
#define SNPRINTF _snprintf_s
#pragma warning(push)
#pragma warning(disable : 4244) // conversion from int to char
#pragma warning(disable : 4127) // conditional expression is constant
#pragma warning(disable : 4702) // unreachable code
#else
#define SNPRINTF snprintf
#endif
namespace picojson {
enum {
null_type,
boolean_type,
number_type,
string_type,
array_type,
object_type
#ifdef PICOJSON_USE_INT64
,
int64_type
#endif
};
enum { INDENT_WIDTH = 2 };
struct null {};
class value {
public:
typedef std::vector<value> array;
typedef std::map<std::string, value> object;
union _storage {
bool boolean_;
double number_;
#ifdef PICOJSON_USE_INT64
int64_t int64_;
#endif
std::string *string_;
array *array_;
object *object_;
};
protected:
int type_;
_storage u_;
public:
value();
value(int type, bool);
explicit value(bool b);
#ifdef PICOJSON_USE_INT64
explicit value(int64_t i);
#endif
explicit value(double n);
explicit value(const std::string &s);
explicit value(const array &a);
explicit value(const object &o);
#if PICOJSON_USE_RVALUE_REFERENCE
explicit value(std::string &&s);
explicit value(array &&a);
explicit value(object &&o);
#endif
explicit value(const char *s);
value(const char *s, size_t len);
~value();
value(const value &x);
value &operator=(const value &x);
#if PICOJSON_USE_RVALUE_REFERENCE
value(value &&x) PICOJSON_NOEXCEPT;
value &operator=(value &&x) PICOJSON_NOEXCEPT;
#endif
void swap(value &x) PICOJSON_NOEXCEPT;
template <typename T> bool is() const;
template <typename T> const T &get() const;
template <typename T> T &get();
template <typename T> void set(const T &);
#if PICOJSON_USE_RVALUE_REFERENCE
template <typename T> void set(T &&);
#endif
bool evaluate_as_boolean() const;
const value &get(const size_t idx) const;
const value &get(const std::string &key) const;
value &get(const size_t idx);
value &get(const std::string &key);
bool contains(const size_t idx) const;
bool contains(const std::string &key) const;
std::string to_str() const;
template <typename Iter> void serialize(Iter os, bool prettify = false) const;
std::string serialize(bool prettify = false) const;
private:
template <typename T> value(const T *); // intentionally defined to block implicit conversion of pointer to bool
template <typename Iter> static void _indent(Iter os, int indent);
template <typename Iter> void _serialize(Iter os, int indent) const;
std::string _serialize(int indent) const;
void clear();
};
typedef value::array array;
typedef value::object object;
inline value::value() : type_(null_type), u_() {
}
inline value::value(int type, bool) : type_(type), u_() {
switch (type) {
#define INIT(p, v) \
case p##type: \
u_.p = v; \
break
INIT(boolean_, false);
INIT(number_, 0.0);
#ifdef PICOJSON_USE_INT64
INIT(int64_, 0);
#endif
INIT(string_, new std::string());
INIT(array_, new array());
INIT(object_, new object());
#undef INIT
default:
break;
}
}
inline value::value(bool b) : type_(boolean_type), u_() {
u_.boolean_ = b;
}
#ifdef PICOJSON_USE_INT64
inline value::value(int64_t i) : type_(int64_type), u_() {
u_.int64_ = i;
}
#endif
inline value::value(double n) : type_(number_type), u_() {
if (
#ifdef _MSC_VER
!_finite(n)
#elif __cplusplus >= 201103L
std::isnan(n) || std::isinf(n)
#else
isnan(n) || isinf(n)
#endif
) {
throw std::overflow_error("");
}
u_.number_ = n;
}
inline value::value(const std::string &s) : type_(string_type), u_() {
u_.string_ = new std::string(s);
}
inline value::value(const array &a) : type_(array_type), u_() {
u_.array_ = new array(a);
}
inline value::value(const object &o) : type_(object_type), u_() {
u_.object_ = new object(o);
}
#if PICOJSON_USE_RVALUE_REFERENCE
inline value::value(std::string &&s) : type_(string_type), u_() {
u_.string_ = new std::string(std::move(s));
}
inline value::value(array &&a) : type_(array_type), u_() {
u_.array_ = new array(std::move(a));
}
inline value::value(object &&o) : type_(object_type), u_() {
u_.object_ = new object(std::move(o));
}
#endif
inline value::value(const char *s) : type_(string_type), u_() {
u_.string_ = new std::string(s);
}
inline value::value(const char *s, size_t len) : type_(string_type), u_() {
u_.string_ = new std::string(s, len);
}
inline void value::clear() {
switch (type_) {
#define DEINIT(p) \
case p##type: \
delete u_.p; \
break
DEINIT(string_);
DEINIT(array_);
DEINIT(object_);
#undef DEINIT
default:
break;
}
}
inline value::~value() {
clear();
}
inline value::value(const value &x) : type_(x.type_), u_() {
switch (type_) {
#define INIT(p, v) \
case p##type: \
u_.p = v; \
break
INIT(string_, new std::string(*x.u_.string_));
INIT(array_, new array(*x.u_.array_));
INIT(object_, new object(*x.u_.object_));
#undef INIT
default:
u_ = x.u_;
break;
}
}
inline value &value::operator=(const value &x) {
if (this != &x) {
value t(x);
swap(t);
}
return *this;
}
#if PICOJSON_USE_RVALUE_REFERENCE
inline value::value(value &&x) PICOJSON_NOEXCEPT : type_(null_type), u_() {
swap(x);
}
inline value &value::operator=(value &&x) PICOJSON_NOEXCEPT {
swap(x);
return *this;
}
#endif
inline void value::swap(value &x) PICOJSON_NOEXCEPT {
std::swap(type_, x.type_);
std::swap(u_, x.u_);
}
#define IS(ctype, jtype) \
template <> inline bool value::is<ctype>() const { \
return type_ == jtype##_type; \
}
IS(null, null)
IS(bool, boolean)
#ifdef PICOJSON_USE_INT64
IS(int64_t, int64)
#endif
IS(std::string, string)
IS(array, array)
IS(object, object)
#undef IS
template <> inline bool value::is<double>() const {
return type_ == number_type
#ifdef PICOJSON_USE_INT64
|| type_ == int64_type
#endif
;
}
#define GET(ctype, var) \
template <> inline const ctype &value::get<ctype>() const { \
PICOJSON_ASSERT("type mismatch! call is<type>() before get<type>()" && is<ctype>()); \
return var; \
} \
template <> inline ctype &value::get<ctype>() { \
PICOJSON_ASSERT("type mismatch! call is<type>() before get<type>()" && is<ctype>()); \
return var; \
}
GET(bool, u_.boolean_)
GET(std::string, *u_.string_)
GET(array, *u_.array_)
GET(object, *u_.object_)
#ifdef PICOJSON_USE_INT64
GET(double,
(type_ == int64_type && (const_cast<value *>(this)->type_ = number_type, (const_cast<value *>(this)->u_.number_ = u_.int64_)),
u_.number_))
GET(int64_t, u_.int64_)
#else
GET(double, u_.number_)
#endif
#undef GET
#define SET(ctype, jtype, setter) \
template <> inline void value::set<ctype>(const ctype &_val) { \
clear(); \
type_ = jtype##_type; \
setter \
}
SET(bool, boolean, u_.boolean_ = _val;)
SET(std::string, string, u_.string_ = new std::string(_val);)
SET(array, array, u_.array_ = new array(_val);)
SET(object, object, u_.object_ = new object(_val);)
SET(double, number, u_.number_ = _val;)
#ifdef PICOJSON_USE_INT64
SET(int64_t, int64, u_.int64_ = _val;)
#endif
#undef SET
#if PICOJSON_USE_RVALUE_REFERENCE
#define MOVESET(ctype, jtype, setter) \
template <> inline void value::set<ctype>(ctype && _val) { \
clear(); \
type_ = jtype##_type; \
setter \
}
MOVESET(std::string, string, u_.string_ = new std::string(std::move(_val));)
MOVESET(array, array, u_.array_ = new array(std::move(_val));)
MOVESET(object, object, u_.object_ = new object(std::move(_val));)
#undef MOVESET
#endif
inline bool value::evaluate_as_boolean() const {
switch (type_) {
case null_type:
return false;
case boolean_type:
return u_.boolean_;
case number_type:
return u_.number_ != 0;
#ifdef PICOJSON_USE_INT64
case int64_type:
return u_.int64_ != 0;
#endif
case string_type:
return !u_.string_->empty();
default:
return true;
}
}
inline const value &value::get(const size_t idx) const {
static value s_null;
PICOJSON_ASSERT(is<array>());
return idx < u_.array_->size() ? (*u_.array_)[idx] : s_null;
}
inline value &value::get(const size_t idx) {
static value s_null;
PICOJSON_ASSERT(is<array>());
return idx < u_.array_->size() ? (*u_.array_)[idx] : s_null;
}
inline const value &value::get(const std::string &key) const {
static value s_null;
PICOJSON_ASSERT(is<object>());
object::const_iterator i = u_.object_->find(key);
return i != u_.object_->end() ? i->second : s_null;
}
inline value &value::get(const std::string &key) {
static value s_null;
PICOJSON_ASSERT(is<object>());
object::iterator i = u_.object_->find(key);
return i != u_.object_->end() ? i->second : s_null;
}
inline bool value::contains(const size_t idx) const {
PICOJSON_ASSERT(is<array>());
return idx < u_.array_->size();
}
inline bool value::contains(const std::string &key) const {
PICOJSON_ASSERT(is<object>());
object::const_iterator i = u_.object_->find(key);
return i != u_.object_->end();
}
inline std::string value::to_str() const {
switch (type_) {
case null_type:
return "null";
case boolean_type:
return u_.boolean_ ? "true" : "false";
#ifdef PICOJSON_USE_INT64
case int64_type: {
char buf[sizeof("-9223372036854775808")];
SNPRINTF(buf, sizeof(buf), "%" PRId64, u_.int64_);
return buf;
}
#endif
case number_type: {
char buf[256];
double tmp;
SNPRINTF(buf, sizeof(buf), fabs(u_.number_) < (1ULL << 53) && modf(u_.number_, &tmp) == 0 ? "%.f" : "%.17g", u_.number_);
#if PICOJSON_USE_LOCALE
char *decimal_point = localeconv()->decimal_point;
if (strcmp(decimal_point, ".") != 0) {
size_t decimal_point_len = strlen(decimal_point);
for (char *p = buf; *p != '\0'; ++p) {
if (strncmp(p, decimal_point, decimal_point_len) == 0) {
return std::string(buf, p) + "." + (p + decimal_point_len);
}
}
}
#endif
return buf;
}
case string_type:
return *u_.string_;
case array_type:
return "array";
case object_type:
return "object";
default:
PICOJSON_ASSERT(0);
#ifdef _MSC_VER
__assume(0);
#endif
}
return std::string();
}
template <typename Iter> void copy(const std::string &s, Iter oi) {
std::copy(s.begin(), s.end(), oi);
}
template <typename Iter> struct serialize_str_char {
Iter oi;
void operator()(char c) {
switch (c) {
#define MAP(val, sym) \
case val: \
copy(sym, oi); \
break
MAP('"', "\\\"");
MAP('\\', "\\\\");
MAP('/', "\\/");
MAP('\b', "\\b");
MAP('\f', "\\f");
MAP('\n', "\\n");
MAP('\r', "\\r");
MAP('\t', "\\t");
#undef MAP
default:
if (static_cast<unsigned char>(c) < 0x20 || c == 0x7f) {
char buf[7];
SNPRINTF(buf, sizeof(buf), "\\u%04x", c & 0xff);
copy(buf, buf + 6, oi);
} else {
*oi++ = c;
}
break;
}
}
};
template <typename Iter> void serialize_str(const std::string &s, Iter oi) {
*oi++ = '"';
serialize_str_char<Iter> process_char = {oi};
std::for_each(s.begin(), s.end(), process_char);
*oi++ = '"';
}
template <typename Iter> void value::serialize(Iter oi, bool prettify) const {
return _serialize(oi, prettify ? 0 : -1);
}
inline std::string value::serialize(bool prettify) const {
return _serialize(prettify ? 0 : -1);
}
template <typename Iter> void value::_indent(Iter oi, int indent) {
*oi++ = '\n';
for (int i = 0; i < indent * INDENT_WIDTH; ++i) {
*oi++ = ' ';
}
}
template <typename Iter> void value::_serialize(Iter oi, int indent) const {
switch (type_) {
case string_type:
serialize_str(*u_.string_, oi);
break;
case array_type: {
*oi++ = '[';
if (indent != -1) {
++indent;
}
for (array::const_iterator i = u_.array_->begin(); i != u_.array_->end(); ++i) {
if (i != u_.array_->begin()) {
*oi++ = ',';
}
if (indent != -1) {
_indent(oi, indent);
}
i->_serialize(oi, indent);
}
if (indent != -1) {
--indent;
if (!u_.array_->empty()) {
_indent(oi, indent);
}
}
*oi++ = ']';
break;
}
case object_type: {
*oi++ = '{';
if (indent != -1) {
++indent;
}
for (object::const_iterator i = u_.object_->begin(); i != u_.object_->end(); ++i) {
if (i != u_.object_->begin()) {
*oi++ = ',';
}
if (indent != -1) {
_indent(oi, indent);
}
serialize_str(i->first, oi);
*oi++ = ':';
if (indent != -1) {
*oi++ = ' ';
}
i->second._serialize(oi, indent);
}
if (indent != -1) {
--indent;
if (!u_.object_->empty()) {
_indent(oi, indent);
}
}
*oi++ = '}';
break;
}
default:
copy(to_str(), oi);
break;
}
if (indent == 0) {
*oi++ = '\n';
}
}
inline std::string value::_serialize(int indent) const {
std::string s;
_serialize(std::back_inserter(s), indent);
return s;
}
template <typename Iter> class input {
protected:
Iter cur_, end_;
bool consumed_;
int line_;
public:
input(const Iter &first, const Iter &last) : cur_(first), end_(last), consumed_(false), line_(1) {
}
int getc() {
if (consumed_) {
if (*cur_ == '\n') {
++line_;
}
++cur_;
}
if (cur_ == end_) {
consumed_ = false;
return -1;
}
consumed_ = true;
return *cur_ & 0xff;
}
void ungetc() {
consumed_ = false;
}
Iter cur() const {
if (consumed_) {
input<Iter> *self = const_cast<input<Iter> *>(this);
self->consumed_ = false;
++self->cur_;
}
return cur_;
}
int line() const {
return line_;
}
void skip_ws() {
while (1) {
int ch = getc();
if (!(ch == ' ' || ch == '\t' || ch == '\n' || ch == '\r')) {
ungetc();
break;
}
}
}
bool picojson_expect(const int expected) {
skip_ws();
if (getc() != expected) {
ungetc();
return false;
}
return true;
}
bool match(const std::string &pattern) {
for (std::string::const_iterator pi(pattern.begin()); pi != pattern.end(); ++pi) {
if (getc() != *pi) {
ungetc();
return false;
}
}
return true;
}
};
template <typename Iter> inline int _parse_quadhex(input<Iter> &in) {
int uni_ch = 0, hex;
for (int i = 0; i < 4; i++) {
if ((hex = in.getc()) == -1) {
return -1;
}
if ('0' <= hex && hex <= '9') {
hex -= '0';
} else if ('A' <= hex && hex <= 'F') {
hex -= 'A' - 0xa;
} else if ('a' <= hex && hex <= 'f') {
hex -= 'a' - 0xa;
} else {
in.ungetc();
return -1;
}
uni_ch = uni_ch * 16 + hex;
}
return uni_ch;
}
template <typename String, typename Iter> inline bool _parse_codepoint(String &out, input<Iter> &in) {
int uni_ch;
if ((uni_ch = _parse_quadhex(in)) == -1) {
return false;
}
if (0xd800 <= uni_ch && uni_ch <= 0xdfff) {
if (0xdc00 <= uni_ch) {
// a second 16-bit of a surrogate pair appeared
return false;
}
// first 16-bit of surrogate pair, get the next one
if (in.getc() != '\\' || in.getc() != 'u') {
in.ungetc();
return false;
}
int second = _parse_quadhex(in);
if (!(0xdc00 <= second && second <= 0xdfff)) {
return false;
}
uni_ch = ((uni_ch - 0xd800) << 10) | ((second - 0xdc00) & 0x3ff);
uni_ch += 0x10000;
}
if (uni_ch < 0x80) {
out.push_back(static_cast<char>(uni_ch));
} else {
if (uni_ch < 0x800) {
out.push_back(static_cast<char>(0xc0 | (uni_ch >> 6)));
} else {
if (uni_ch < 0x10000) {
out.push_back(static_cast<char>(0xe0 | (uni_ch >> 12)));
} else {
out.push_back(static_cast<char>(0xf0 | (uni_ch >> 18)));
out.push_back(static_cast<char>(0x80 | ((uni_ch >> 12) & 0x3f)));
}
out.push_back(static_cast<char>(0x80 | ((uni_ch >> 6) & 0x3f)));
}
out.push_back(static_cast<char>(0x80 | (uni_ch & 0x3f)));
}
return true;
}
template <typename String, typename Iter> inline bool _parse_string(String &out, input<Iter> &in) {
while (1) {
int ch = in.getc();
if (ch < ' ') {
in.ungetc();
return false;
} else if (ch == '"') {
return true;
} else if (ch == '\\') {
if ((ch = in.getc()) == -1) {
return false;
}
switch (ch) {
#define MAP(sym, val) \
case sym: \
out.push_back(val); \
break
MAP('"', '\"');
MAP('\\', '\\');
MAP('/', '/');
MAP('b', '\b');
MAP('f', '\f');
MAP('n', '\n');
MAP('r', '\r');
MAP('t', '\t');
#undef MAP
case 'u':
if (!_parse_codepoint(out, in)) {
return false;
}
break;
default:
return false;
}
} else {
out.push_back(static_cast<char>(ch));
}
}
return false;
}
template <typename Context, typename Iter> inline bool _parse_array(Context &ctx, input<Iter> &in) {
if (!ctx.parse_array_start()) {
return false;
}
size_t idx = 0;
if (in.picojson_expect(']')) {
return ctx.parse_array_stop(idx);
}
do {
if (!ctx.parse_array_item(in, idx)) {
return false;
}
idx++;
} while (in.picojson_expect(','));
return in.picojson_expect(']') && ctx.parse_array_stop(idx);
}
template <typename Context, typename Iter> inline bool _parse_object(Context &ctx, input<Iter> &in) {
if (!ctx.parse_object_start()) {
return false;
}
if (in.picojson_expect('}')) {
return true;
}
do {
std::string key;
if (!in.picojson_expect('"') || !_parse_string(key, in) || !in.picojson_expect(':')) {
return false;
}
if (!ctx.parse_object_item(in, key)) {
return false;
}
} while (in.picojson_expect(','));
return in.picojson_expect('}');
}
template <typename Iter> inline std::string _parse_number(input<Iter> &in) {
std::string num_str;
while (1) {
int ch = in.getc();
if (('0' <= ch && ch <= '9') || ch == '+' || ch == '-' || ch == 'e' || ch == 'E') {
num_str.push_back(static_cast<char>(ch));
} else if (ch == '.') {
#if PICOJSON_USE_LOCALE
num_str += localeconv()->decimal_point;
#else
num_str.push_back('.');
#endif
} else {
in.ungetc();
break;
}
}
return num_str;
}
template <typename Context, typename Iter> inline bool _parse(Context &ctx, input<Iter> &in) {
in.skip_ws();
int ch = in.getc();
switch (ch) {
#define IS(ch, text, op) \
case ch: \
if (in.match(text) && op) { \
return true; \
} else { \
return false; \
}
IS('n', "ull", ctx.set_null());
IS('f', "alse", ctx.set_bool(false));
IS('t', "rue", ctx.set_bool(true));
#undef IS
case '"':
return ctx.parse_string(in);
case '[':
return _parse_array(ctx, in);
case '{':
return _parse_object(ctx, in);
default:
if (('0' <= ch && ch <= '9') || ch == '-') {
double f;
char *endp;
in.ungetc();
std::string num_str(_parse_number(in));
if (num_str.empty()) {
return false;
}
#ifdef PICOJSON_USE_INT64
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wtautological-type-limit-compare"
{
errno = 0;
intmax_t ival = strtoimax(num_str.c_str(), &endp, 10);
if (errno == 0 && std::numeric_limits<int64_t>::min() <= ival && ival <= std::numeric_limits<int64_t>::max() &&
endp == num_str.c_str() + num_str.size()) {
ctx.set_int64(ival);
return true;
}
}
#pragma clang diagnostic pop
#endif
f = strtod(num_str.c_str(), &endp);
if (endp == num_str.c_str() + num_str.size()) {
ctx.set_number(f);
return true;
}
return false;
}
break;
}
in.ungetc();
return false;
}
class deny_parse_context {
public:
bool set_null() {
return false;
}
bool set_bool(bool) {
return false;
}
#ifdef PICOJSON_USE_INT64
bool set_int64(int64_t) {
return false;
}
#endif
bool set_number(double) {
return false;
}
template <typename Iter> bool parse_string(input<Iter> &) {
return false;
}
bool parse_array_start() {
return false;
}
template <typename Iter> bool parse_array_item(input<Iter> &, size_t) {
return false;
}
bool parse_array_stop(size_t) {
return false;
}
bool parse_object_start() {
return false;
}
template <typename Iter> bool parse_object_item(input<Iter> &, const std::string &) {
return false;
}
};
class default_parse_context {
protected:
value *out_;
public:
default_parse_context(value *out) : out_(out) {
}
bool set_null() {
*out_ = value();
return true;
}
bool set_bool(bool b) {
*out_ = value(b);
return true;
}
#ifdef PICOJSON_USE_INT64
bool set_int64(int64_t i) {
*out_ = value(i);
return true;
}
#endif
bool set_number(double f) {
*out_ = value(f);
return true;
}
template <typename Iter> bool parse_string(input<Iter> &in) {
*out_ = value(string_type, false);
return _parse_string(out_->get<std::string>(), in);
}
bool parse_array_start() {
*out_ = value(array_type, false);
return true;
}
template <typename Iter> bool parse_array_item(input<Iter> &in, size_t) {
array &a = out_->get<array>();
a.push_back(value());
default_parse_context ctx(&a.back());
return _parse(ctx, in);
}
bool parse_array_stop(size_t) {
return true;
}
bool parse_object_start() {
*out_ = value(object_type, false);
return true;
}
template <typename Iter> bool parse_object_item(input<Iter> &in, const std::string &key) {
object &o = out_->get<object>();
default_parse_context ctx(&o[key]);
return _parse(ctx, in);
}
private:
default_parse_context(const default_parse_context &);
default_parse_context &operator=(const default_parse_context &);
};
class null_parse_context {
public:
struct dummy_str {
void push_back(int) {
}
};
public:
null_parse_context() {
}
bool set_null() {
return true;
}
bool set_bool(bool) {
return true;
}
#ifdef PICOJSON_USE_INT64
bool set_int64(int64_t) {
return true;
}
#endif
bool set_number(double) {
return true;
}
template <typename Iter> bool parse_string(input<Iter> &in) {
dummy_str s;
return _parse_string(s, in);
}
bool parse_array_start() {
return true;
}
template <typename Iter> bool parse_array_item(input<Iter> &in, size_t) {
return _parse(*this, in);
}
bool parse_array_stop(size_t) {
return true;
}
bool parse_object_start() {
return true;
}
template <typename Iter> bool parse_object_item(input<Iter> &in, const std::string &) {
return _parse(*this, in);
}
private:
null_parse_context(const null_parse_context &);
null_parse_context &operator=(const null_parse_context &);
};
// obsolete, use the version below
template <typename Iter> inline std::string parse(value &out, Iter &pos, const Iter &last) {
std::string err;
pos = parse(out, pos, last, &err);
return err;
}
template <typename Context, typename Iter> inline Iter _parse(Context &ctx, const Iter &first, const Iter &last, std::string *err) {
input<Iter> in(first, last);
if (!_parse(ctx, in) && err != NULL) {
char buf[64];
SNPRINTF(buf, sizeof(buf), "syntax error at line %d near: ", in.line());
*err = buf;
while (1) {
int ch = in.getc();
if (ch == -1 || ch == '\n') {
break;
} else if (ch >= ' ') {
err->push_back(static_cast<char>(ch));
}
}
}
return in.cur();
}
template <typename Iter> inline Iter parse(value &out, const Iter &first, const Iter &last, std::string *err) {
default_parse_context ctx(&out);
return _parse(ctx, first, last, err);
}
inline std::string parse(value &out, const std::string &s) {
std::string err;
parse(out, s.begin(), s.end(), &err);
return err;
}
inline std::string parse(value &out, std::istream &is) {
std::string err;
parse(out, std::istreambuf_iterator<char>(is.rdbuf()), std::istreambuf_iterator<char>(), &err);
return err;
}
template <typename T> struct last_error_t { static std::string s; };
template <typename T> std::string last_error_t<T>::s;
inline void set_last_error(const std::string &s) {
last_error_t<bool>::s = s;
}
inline const std::string &get_last_error() {
return last_error_t<bool>::s;
}
inline bool operator==(const value &x, const value &y) {
if (x.is<null>())
return y.is<null>();
#define PICOJSON_CMP(type) \
if (x.is<type>()) \
return y.is<type>() && x.get<type>() == y.get<type>()
PICOJSON_CMP(bool);
PICOJSON_CMP(double);
PICOJSON_CMP(std::string);
PICOJSON_CMP(array);
PICOJSON_CMP(object);
#undef PICOJSON_CMP
PICOJSON_ASSERT(0);
#ifdef _MSC_VER
__assume(0);
#endif
return false;
}
inline bool operator!=(const value &x, const value &y) {
return !(x == y);
}
}
#if !PICOJSON_USE_RVALUE_REFERENCE
namespace std {
template <> inline void swap(picojson::value &x, picojson::value &y) {
x.swap(y);
}
}
#endif
inline std::istream &operator>>(std::istream &is, picojson::value &x) {
picojson::set_last_error(std::string());
const std::string err(picojson::parse(x, is));
if (!err.empty()) {
picojson::set_last_error(err);
is.setstate(std::ios::failbit);
}
return is;
}
inline std::ostream &operator<<(std::ostream &os, const picojson::value &x) {
x.serialize(std::ostream_iterator<char>(os));
return os;
}
#ifdef _MSC_VER
#pragma warning(pop)
#endif
#endif
| 33,546 | 27.477929 | 132 |
h
|
null |
ceph-main/src/rgw/services/svc_bi.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab ft=cpp
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2019 Red Hat, Inc.
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#pragma once
#include "rgw_service.h"
class RGWBucketInfo;
struct RGWBucketEnt;
class RGWSI_BucketIndex : public RGWServiceInstance
{
public:
RGWSI_BucketIndex(CephContext *cct) : RGWServiceInstance(cct) {}
virtual ~RGWSI_BucketIndex() {}
virtual int init_index(const DoutPrefixProvider *dpp, RGWBucketInfo& bucket_info, const rgw::bucket_index_layout_generation& idx_layout) = 0;
virtual int clean_index(const DoutPrefixProvider *dpp, RGWBucketInfo& bucket_info, const rgw::bucket_index_layout_generation& idx_layout) = 0;
virtual int read_stats(const DoutPrefixProvider *dpp,
const RGWBucketInfo& bucket_info,
RGWBucketEnt *stats,
optional_yield y) = 0;
virtual int handle_overwrite(const DoutPrefixProvider *dpp,
const RGWBucketInfo& info,
const RGWBucketInfo& orig_info,
optional_yield y) = 0;
};
| 1,402 | 30.177778 | 144 |
h
|
null |
ceph-main/src/rgw/services/svc_bi_rados.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab ft=cpp
#include "svc_bi_rados.h"
#include "svc_bilog_rados.h"
#include "svc_zone.h"
#include "rgw_bucket.h"
#include "rgw_zone.h"
#include "rgw_datalog.h"
#include "cls/rgw/cls_rgw_client.h"
#define dout_subsys ceph_subsys_rgw
using namespace std;
static string dir_oid_prefix = ".dir.";
RGWSI_BucketIndex_RADOS::RGWSI_BucketIndex_RADOS(CephContext *cct) : RGWSI_BucketIndex(cct)
{
}
void RGWSI_BucketIndex_RADOS::init(RGWSI_Zone *zone_svc,
RGWSI_RADOS *rados_svc,
RGWSI_BILog_RADOS *bilog_svc,
RGWDataChangesLog *datalog_rados_svc)
{
svc.zone = zone_svc;
svc.rados = rados_svc;
svc.bilog = bilog_svc;
svc.datalog_rados = datalog_rados_svc;
}
int RGWSI_BucketIndex_RADOS::open_pool(const DoutPrefixProvider *dpp,
const rgw_pool& pool,
RGWSI_RADOS::Pool *index_pool,
bool mostly_omap)
{
*index_pool = svc.rados->pool(pool);
return index_pool->open(dpp, RGWSI_RADOS::OpenParams()
.set_mostly_omap(mostly_omap));
}
int RGWSI_BucketIndex_RADOS::open_bucket_index_pool(const DoutPrefixProvider *dpp,
const RGWBucketInfo& bucket_info,
RGWSI_RADOS::Pool *index_pool)
{
const rgw_pool& explicit_pool = bucket_info.bucket.explicit_placement.index_pool;
if (!explicit_pool.empty()) {
return open_pool(dpp, explicit_pool, index_pool, false);
}
auto& zonegroup = svc.zone->get_zonegroup();
auto& zone_params = svc.zone->get_zone_params();
const rgw_placement_rule *rule = &bucket_info.placement_rule;
if (rule->empty()) {
rule = &zonegroup.default_placement;
}
auto iter = zone_params.placement_pools.find(rule->name);
if (iter == zone_params.placement_pools.end()) {
ldpp_dout(dpp, 0) << "could not find placement rule " << *rule << " within zonegroup " << dendl;
return -EINVAL;
}
int r = open_pool(dpp, iter->second.index_pool, index_pool, true);
if (r < 0)
return r;
return 0;
}
int RGWSI_BucketIndex_RADOS::open_bucket_index_base(const DoutPrefixProvider *dpp,
const RGWBucketInfo& bucket_info,
RGWSI_RADOS::Pool *index_pool,
string *bucket_oid_base)
{
const rgw_bucket& bucket = bucket_info.bucket;
int r = open_bucket_index_pool(dpp, bucket_info, index_pool);
if (r < 0)
return r;
if (bucket.bucket_id.empty()) {
ldpp_dout(dpp, 0) << "ERROR: empty bucket_id for bucket operation" << dendl;
return -EIO;
}
*bucket_oid_base = dir_oid_prefix;
bucket_oid_base->append(bucket.bucket_id);
return 0;
}
int RGWSI_BucketIndex_RADOS::open_bucket_index(const DoutPrefixProvider *dpp,
const RGWBucketInfo& bucket_info,
RGWSI_RADOS::Pool *index_pool,
string *bucket_oid)
{
const rgw_bucket& bucket = bucket_info.bucket;
int r = open_bucket_index_pool(dpp, bucket_info, index_pool);
if (r < 0) {
ldpp_dout(dpp, 20) << __func__ << ": open_bucket_index_pool() returned "
<< r << dendl;
return r;
}
if (bucket.bucket_id.empty()) {
ldpp_dout(dpp, 0) << "ERROR: empty bucket id for bucket operation" << dendl;
return -EIO;
}
*bucket_oid = dir_oid_prefix;
bucket_oid->append(bucket.bucket_id);
return 0;
}
static char bucket_obj_with_generation(char *buf, size_t len, const string& bucket_oid_base, uint64_t gen_id,
uint32_t shard_id)
{
return snprintf(buf, len, "%s.%" PRIu64 ".%d", bucket_oid_base.c_str(), gen_id, shard_id);
}
static char bucket_obj_without_generation(char *buf, size_t len, const string& bucket_oid_base, uint32_t shard_id)
{
return snprintf(buf, len, "%s.%d", bucket_oid_base.c_str(), shard_id);
}
static void get_bucket_index_objects(const string& bucket_oid_base,
uint32_t num_shards, uint64_t gen_id,
map<int, string> *_bucket_objects,
int shard_id = -1)
{
auto& bucket_objects = *_bucket_objects;
if (!num_shards) {
bucket_objects[0] = bucket_oid_base;
} else {
char buf[bucket_oid_base.size() + 64];
if (shard_id < 0) {
for (uint32_t i = 0; i < num_shards; ++i) {
if (gen_id) {
bucket_obj_with_generation(buf, sizeof(buf), bucket_oid_base, gen_id, i);
} else {
bucket_obj_without_generation(buf, sizeof(buf), bucket_oid_base, i);
}
bucket_objects[i] = buf;
}
} else {
if (std::cmp_greater(shard_id, num_shards)) {
return;
} else {
if (gen_id) {
bucket_obj_with_generation(buf, sizeof(buf), bucket_oid_base, gen_id, shard_id);
} else {
// for backward compatibility, gen_id(0) will not be added in the object name
bucket_obj_without_generation(buf, sizeof(buf), bucket_oid_base, shard_id);
}
bucket_objects[shard_id] = buf;
}
}
}
}
static void get_bucket_instance_ids(const RGWBucketInfo& bucket_info,
int num_shards, int shard_id,
map<int, string> *result)
{
const rgw_bucket& bucket = bucket_info.bucket;
string plain_id = bucket.name + ":" + bucket.bucket_id;
if (!num_shards) {
(*result)[0] = plain_id;
} else {
char buf[16];
if (shard_id < 0) {
for (int i = 0; i < num_shards; ++i) {
snprintf(buf, sizeof(buf), ":%d", i);
(*result)[i] = plain_id + buf;
}
} else {
if (shard_id > num_shards) {
return;
}
snprintf(buf, sizeof(buf), ":%d", shard_id);
(*result)[shard_id] = plain_id + buf;
}
}
}
int RGWSI_BucketIndex_RADOS::open_bucket_index(const DoutPrefixProvider *dpp,
const RGWBucketInfo& bucket_info,
std::optional<int> _shard_id,
const rgw::bucket_index_layout_generation& idx_layout,
RGWSI_RADOS::Pool *index_pool,
map<int, string> *bucket_objs,
map<int, string> *bucket_instance_ids)
{
int shard_id = _shard_id.value_or(-1);
string bucket_oid_base;
int ret = open_bucket_index_base(dpp, bucket_info, index_pool, &bucket_oid_base);
if (ret < 0) {
ldpp_dout(dpp, 20) << __func__ << ": open_bucket_index_pool() returned "
<< ret << dendl;
return ret;
}
get_bucket_index_objects(bucket_oid_base, idx_layout.layout.normal.num_shards,
idx_layout.gen, bucket_objs, shard_id);
if (bucket_instance_ids) {
get_bucket_instance_ids(bucket_info, idx_layout.layout.normal.num_shards,
shard_id, bucket_instance_ids);
}
return 0;
}
void RGWSI_BucketIndex_RADOS::get_bucket_index_object(
const std::string& bucket_oid_base,
const rgw::bucket_index_normal_layout& normal,
uint64_t gen_id, int shard_id,
std::string* bucket_obj)
{
if (!normal.num_shards) {
// By default with no sharding, we use the bucket oid as itself
(*bucket_obj) = bucket_oid_base;
} else {
char buf[bucket_oid_base.size() + 64];
if (gen_id) {
bucket_obj_with_generation(buf, sizeof(buf), bucket_oid_base, gen_id, shard_id);
(*bucket_obj) = buf;
ldout(cct, 10) << "bucket_obj is " << (*bucket_obj) << dendl;
} else {
// for backward compatibility, gen_id(0) will not be added in the object name
bucket_obj_without_generation(buf, sizeof(buf), bucket_oid_base, shard_id);
(*bucket_obj) = buf;
}
}
}
int RGWSI_BucketIndex_RADOS::get_bucket_index_object(
const std::string& bucket_oid_base,
const rgw::bucket_index_normal_layout& normal,
uint64_t gen_id, const std::string& obj_key,
std::string* bucket_obj, int* shard_id)
{
int r = 0;
switch (normal.hash_type) {
case rgw::BucketHashType::Mod:
if (!normal.num_shards) {
// By default with no sharding, we use the bucket oid as itself
(*bucket_obj) = bucket_oid_base;
if (shard_id) {
*shard_id = -1;
}
} else {
uint32_t sid = bucket_shard_index(obj_key, normal.num_shards);
char buf[bucket_oid_base.size() + 64];
if (gen_id) {
bucket_obj_with_generation(buf, sizeof(buf), bucket_oid_base, gen_id, sid);
} else {
bucket_obj_without_generation(buf, sizeof(buf), bucket_oid_base, sid);
}
(*bucket_obj) = buf;
if (shard_id) {
*shard_id = (int)sid;
}
}
break;
default:
r = -ENOTSUP;
}
return r;
}
int RGWSI_BucketIndex_RADOS::open_bucket_index_shard(const DoutPrefixProvider *dpp,
const RGWBucketInfo& bucket_info,
const string& obj_key,
RGWSI_RADOS::Obj *bucket_obj,
int *shard_id)
{
string bucket_oid_base;
RGWSI_RADOS::Pool pool;
int ret = open_bucket_index_base(dpp, bucket_info, &pool, &bucket_oid_base);
if (ret < 0) {
ldpp_dout(dpp, 20) << __func__ << ": open_bucket_index_pool() returned "
<< ret << dendl;
return ret;
}
string oid;
const auto& current_index = bucket_info.layout.current_index;
ret = get_bucket_index_object(bucket_oid_base, current_index.layout.normal,
current_index.gen, obj_key, &oid, shard_id);
if (ret < 0) {
ldpp_dout(dpp, 10) << "get_bucket_index_object() returned ret=" << ret << dendl;
return ret;
}
*bucket_obj = svc.rados->obj(pool, oid);
return 0;
}
int RGWSI_BucketIndex_RADOS::open_bucket_index_shard(const DoutPrefixProvider *dpp,
const RGWBucketInfo& bucket_info,
const rgw::bucket_index_layout_generation& index,
int shard_id,
RGWSI_RADOS::Obj *bucket_obj)
{
RGWSI_RADOS::Pool index_pool;
string bucket_oid_base;
int ret = open_bucket_index_base(dpp, bucket_info, &index_pool, &bucket_oid_base);
if (ret < 0) {
ldpp_dout(dpp, 20) << __func__ << ": open_bucket_index_pool() returned "
<< ret << dendl;
return ret;
}
string oid;
get_bucket_index_object(bucket_oid_base, index.layout.normal,
index.gen, shard_id, &oid);
*bucket_obj = svc.rados->obj(index_pool, oid);
return 0;
}
int RGWSI_BucketIndex_RADOS::cls_bucket_head(const DoutPrefixProvider *dpp,
const RGWBucketInfo& bucket_info,
const rgw::bucket_index_layout_generation& idx_layout,
int shard_id,
vector<rgw_bucket_dir_header> *headers,
map<int, string> *bucket_instance_ids,
optional_yield y)
{
RGWSI_RADOS::Pool index_pool;
map<int, string> oids;
int r = open_bucket_index(dpp, bucket_info, shard_id, idx_layout, &index_pool, &oids, bucket_instance_ids);
if (r < 0)
return r;
map<int, struct rgw_cls_list_ret> list_results;
for (auto& iter : oids) {
list_results.emplace(iter.first, rgw_cls_list_ret());
}
r = CLSRGWIssueGetDirHeader(index_pool.ioctx(), oids, list_results, cct->_conf->rgw_bucket_index_max_aio)();
if (r < 0)
return r;
map<int, struct rgw_cls_list_ret>::iterator iter = list_results.begin();
for(; iter != list_results.end(); ++iter) {
headers->push_back(std::move(iter->second.dir.header));
}
return 0;
}
int RGWSI_BucketIndex_RADOS::init_index(const DoutPrefixProvider *dpp, RGWBucketInfo& bucket_info, const rgw::bucket_index_layout_generation& idx_layout)
{
RGWSI_RADOS::Pool index_pool;
string dir_oid = dir_oid_prefix;
int r = open_bucket_index_pool(dpp, bucket_info, &index_pool);
if (r < 0) {
return r;
}
dir_oid.append(bucket_info.bucket.bucket_id);
map<int, string> bucket_objs;
get_bucket_index_objects(dir_oid, idx_layout.layout.normal.num_shards, idx_layout.gen, &bucket_objs);
return CLSRGWIssueBucketIndexInit(index_pool.ioctx(),
bucket_objs,
cct->_conf->rgw_bucket_index_max_aio)();
}
int RGWSI_BucketIndex_RADOS::clean_index(const DoutPrefixProvider *dpp, RGWBucketInfo& bucket_info, const rgw::bucket_index_layout_generation& idx_layout)
{
RGWSI_RADOS::Pool index_pool;
std::string dir_oid = dir_oid_prefix;
int r = open_bucket_index_pool(dpp, bucket_info, &index_pool);
if (r < 0) {
return r;
}
dir_oid.append(bucket_info.bucket.bucket_id);
std::map<int, std::string> bucket_objs;
get_bucket_index_objects(dir_oid, idx_layout.layout.normal.num_shards,
idx_layout.gen, &bucket_objs);
return CLSRGWIssueBucketIndexClean(index_pool.ioctx(),
bucket_objs,
cct->_conf->rgw_bucket_index_max_aio)();
}
int RGWSI_BucketIndex_RADOS::read_stats(const DoutPrefixProvider *dpp,
const RGWBucketInfo& bucket_info,
RGWBucketEnt *result,
optional_yield y)
{
vector<rgw_bucket_dir_header> headers;
result->bucket = bucket_info.bucket;
int r = cls_bucket_head(dpp, bucket_info, bucket_info.layout.current_index, RGW_NO_SHARD, &headers, nullptr, y);
if (r < 0) {
return r;
}
result->count = 0;
result->size = 0;
result->size_rounded = 0;
auto hiter = headers.begin();
for (; hiter != headers.end(); ++hiter) {
RGWObjCategory category = RGWObjCategory::Main;
auto iter = (hiter->stats).find(category);
if (iter != hiter->stats.end()) {
struct rgw_bucket_category_stats& stats = iter->second;
result->count += stats.num_entries;
result->size += stats.total_size;
result->size_rounded += stats.total_size_rounded;
}
}
result->placement_rule = std::move(bucket_info.placement_rule);
return 0;
}
int RGWSI_BucketIndex_RADOS::get_reshard_status(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, list<cls_rgw_bucket_instance_entry> *status)
{
map<int, string> bucket_objs;
RGWSI_RADOS::Pool index_pool;
int r = open_bucket_index(dpp, bucket_info,
std::nullopt,
bucket_info.layout.current_index,
&index_pool,
&bucket_objs,
nullptr);
if (r < 0) {
return r;
}
for (auto i : bucket_objs) {
cls_rgw_bucket_instance_entry entry;
int ret = cls_rgw_get_bucket_resharding(index_pool.ioctx(), i.second, &entry);
if (ret < 0 && ret != -ENOENT) {
ldpp_dout(dpp, -1) << "ERROR: " << __func__ << ": cls_rgw_get_bucket_resharding() returned ret=" << ret << dendl;
return ret;
}
status->push_back(entry);
}
return 0;
}
int RGWSI_BucketIndex_RADOS::handle_overwrite(const DoutPrefixProvider *dpp,
const RGWBucketInfo& info,
const RGWBucketInfo& orig_info,
optional_yield y)
{
bool new_sync_enabled = info.datasync_flag_enabled();
bool old_sync_enabled = orig_info.datasync_flag_enabled();
if (old_sync_enabled == new_sync_enabled) {
return 0; // datasync flag didn't change
}
if (info.layout.logs.empty()) {
return 0; // no bilog
}
const auto& bilog = info.layout.logs.back();
if (bilog.layout.type != rgw::BucketLogType::InIndex) {
return -ENOTSUP;
}
const int shards_num = rgw::num_shards(bilog.layout.in_index);
int ret;
if (!new_sync_enabled) {
ret = svc.bilog->log_stop(dpp, info, bilog, -1);
} else {
ret = svc.bilog->log_start(dpp, info, bilog, -1);
}
if (ret < 0) {
ldpp_dout(dpp, -1) << "ERROR: failed writing bilog (bucket=" << info.bucket << "); ret=" << ret << dendl;
return ret;
}
for (int i = 0; i < shards_num; ++i) {
ret = svc.datalog_rados->add_entry(dpp, info, bilog, i, y);
if (ret < 0) {
ldpp_dout(dpp, -1) << "ERROR: failed writing data log (info.bucket=" << info.bucket << ", shard_id=" << i << ")" << dendl;
} // datalog error is not fatal
}
return 0;
}
| 17,189 | 32.705882 | 157 |
cc
|
null |
ceph-main/src/rgw/services/svc_bi_rados.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab ft=cpp
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2019 Red Hat, Inc.
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#pragma once
#include "rgw_datalog.h"
#include "rgw_service.h"
#include "rgw_tools.h"
#include "svc_bi.h"
#include "svc_rados.h"
#include "svc_tier_rados.h"
struct rgw_bucket_dir_header;
class RGWSI_BILog_RADOS;
#define RGW_NO_SHARD -1
#define RGW_SHARDS_PRIME_0 7877
#define RGW_SHARDS_PRIME_1 65521
/*
* Defined Bucket Index Namespaces
*/
#define RGW_OBJ_NS_MULTIPART "multipart"
#define RGW_OBJ_NS_SHADOW "shadow"
class RGWSI_BucketIndex_RADOS : public RGWSI_BucketIndex
{
friend class RGWSI_BILog_RADOS;
int open_pool(const DoutPrefixProvider *dpp,
const rgw_pool& pool,
RGWSI_RADOS::Pool *index_pool,
bool mostly_omap);
int open_bucket_index_pool(const DoutPrefixProvider *dpp,
const RGWBucketInfo& bucket_info,
RGWSI_RADOS::Pool *index_pool);
int open_bucket_index_base(const DoutPrefixProvider *dpp,
const RGWBucketInfo& bucket_info,
RGWSI_RADOS::Pool *index_pool,
std::string *bucket_oid_base);
// return the index oid for the given shard id
void get_bucket_index_object(const std::string& bucket_oid_base,
const rgw::bucket_index_normal_layout& normal,
uint64_t gen_id, int shard_id,
std::string* bucket_obj);
// return the index oid and shard id for the given object name
int get_bucket_index_object(const std::string& bucket_oid_base,
const rgw::bucket_index_normal_layout& normal,
uint64_t gen_id, const std::string& obj_key,
std::string* bucket_obj, int* shard_id);
int cls_bucket_head(const DoutPrefixProvider *dpp,
const RGWBucketInfo& bucket_info,
const rgw::bucket_index_layout_generation& idx_layout,
int shard_id,
std::vector<rgw_bucket_dir_header> *headers,
std::map<int, std::string> *bucket_instance_ids,
optional_yield y);
public:
struct Svc {
RGWSI_Zone *zone{nullptr};
RGWSI_RADOS *rados{nullptr};
RGWSI_BILog_RADOS *bilog{nullptr};
RGWDataChangesLog *datalog_rados{nullptr};
} svc;
RGWSI_BucketIndex_RADOS(CephContext *cct);
void init(RGWSI_Zone *zone_svc,
RGWSI_RADOS *rados_svc,
RGWSI_BILog_RADOS *bilog_svc,
RGWDataChangesLog *datalog_rados_svc);
static int shards_max() {
return RGW_SHARDS_PRIME_1;
}
static int shard_id(const std::string& key, int max_shards) {
return rgw_shard_id(key, max_shards);
}
static uint32_t bucket_shard_index(const std::string& key,
int num_shards) {
uint32_t sid = ceph_str_hash_linux(key.c_str(), key.size());
uint32_t sid2 = sid ^ ((sid & 0xFF) << 24);
return rgw_shards_mod(sid2, num_shards);
}
static uint32_t bucket_shard_index(const rgw_obj_key& obj_key,
int num_shards)
{
std::string sharding_key;
if (obj_key.ns == RGW_OBJ_NS_MULTIPART) {
RGWMPObj mp;
mp.from_meta(obj_key.name);
sharding_key = mp.get_key();
} else {
sharding_key = obj_key.name;
}
return bucket_shard_index(sharding_key, num_shards);
}
int init_index(const DoutPrefixProvider *dpp, RGWBucketInfo& bucket_info,const rgw::bucket_index_layout_generation& idx_layout) override;
int clean_index(const DoutPrefixProvider *dpp, RGWBucketInfo& bucket_info, const rgw::bucket_index_layout_generation& idx_layout) override;
/* RADOS specific */
int read_stats(const DoutPrefixProvider *dpp,
const RGWBucketInfo& bucket_info,
RGWBucketEnt *stats,
optional_yield y) override;
int get_reshard_status(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info,
std::list<cls_rgw_bucket_instance_entry> *status);
int handle_overwrite(const DoutPrefixProvider *dpp, const RGWBucketInfo& info,
const RGWBucketInfo& orig_info,
optional_yield y) override;
int open_bucket_index_shard(const DoutPrefixProvider *dpp,
const RGWBucketInfo& bucket_info,
const std::string& obj_key,
RGWSI_RADOS::Obj *bucket_obj,
int *shard_id);
int open_bucket_index_shard(const DoutPrefixProvider *dpp,
const RGWBucketInfo& bucket_info,
const rgw::bucket_index_layout_generation& index,
int shard_id, RGWSI_RADOS::Obj *bucket_obj);
int open_bucket_index(const DoutPrefixProvider *dpp,
const RGWBucketInfo& bucket_info,
RGWSI_RADOS::Pool *index_pool,
std::string *bucket_oid);
int open_bucket_index(const DoutPrefixProvider *dpp,
const RGWBucketInfo& bucket_info,
std::optional<int> shard_id,
const rgw::bucket_index_layout_generation& idx_layout,
RGWSI_RADOS::Pool *index_pool,
std::map<int, std::string> *bucket_objs,
std::map<int, std::string> *bucket_instance_ids);
};
| 5,875 | 34.185629 | 141 |
h
|
null |
ceph-main/src/rgw/services/svc_bilog_rados.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab ft=cpp
#include "svc_bilog_rados.h"
#include "svc_bi_rados.h"
#include "cls/rgw/cls_rgw_client.h"
#define dout_subsys ceph_subsys_rgw
using namespace std;
RGWSI_BILog_RADOS::RGWSI_BILog_RADOS(CephContext *cct) : RGWServiceInstance(cct)
{
}
void RGWSI_BILog_RADOS::init(RGWSI_BucketIndex_RADOS *bi_rados_svc)
{
svc.bi = bi_rados_svc;
}
int RGWSI_BILog_RADOS::log_trim(const DoutPrefixProvider *dpp,
const RGWBucketInfo& bucket_info,
const rgw::bucket_log_layout_generation& log_layout,
int shard_id,
std::string_view start_marker,
std::string_view end_marker)
{
RGWSI_RADOS::Pool index_pool;
map<int, string> bucket_objs;
BucketIndexShardsManager start_marker_mgr;
BucketIndexShardsManager end_marker_mgr;
const auto& current_index = rgw::log_to_index_layout(log_layout);
int r = svc.bi->open_bucket_index(dpp, bucket_info, shard_id, current_index, &index_pool, &bucket_objs, nullptr);
if (r < 0) {
return r;
}
r = start_marker_mgr.from_string(start_marker, shard_id);
if (r < 0) {
return r;
}
r = end_marker_mgr.from_string(end_marker, shard_id);
if (r < 0) {
return r;
}
return CLSRGWIssueBILogTrim(index_pool.ioctx(), start_marker_mgr, end_marker_mgr, bucket_objs,
cct->_conf->rgw_bucket_index_max_aio)();
}
int RGWSI_BILog_RADOS::log_start(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, const rgw::bucket_log_layout_generation& log_layout, int shard_id)
{
RGWSI_RADOS::Pool index_pool;
map<int, string> bucket_objs;
const auto& current_index = rgw::log_to_index_layout(log_layout);
int r = svc.bi->open_bucket_index(dpp, bucket_info, shard_id, current_index, &index_pool, &bucket_objs, nullptr);
if (r < 0)
return r;
return CLSRGWIssueResyncBucketBILog(index_pool.ioctx(), bucket_objs, cct->_conf->rgw_bucket_index_max_aio)();
}
int RGWSI_BILog_RADOS::log_stop(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, const rgw::bucket_log_layout_generation& log_layout, int shard_id)
{
RGWSI_RADOS::Pool index_pool;
map<int, string> bucket_objs;
const auto& current_index = rgw::log_to_index_layout(log_layout);
int r = svc.bi->open_bucket_index(dpp, bucket_info, shard_id, current_index, &index_pool, &bucket_objs, nullptr);
if (r < 0)
return r;
return CLSRGWIssueBucketBILogStop(index_pool.ioctx(), bucket_objs, cct->_conf->rgw_bucket_index_max_aio)();
}
static void build_bucket_index_marker(const string& shard_id_str,
const string& shard_marker,
string *marker) {
if (marker) {
*marker = shard_id_str;
marker->append(BucketIndexShardsManager::KEY_VALUE_SEPARATOR);
marker->append(shard_marker);
}
}
int RGWSI_BILog_RADOS::log_list(const DoutPrefixProvider *dpp,
const RGWBucketInfo& bucket_info,
const rgw::bucket_log_layout_generation& log_layout,
int shard_id, string& marker, uint32_t max,
std::list<rgw_bi_log_entry>& result, bool *truncated)
{
ldpp_dout(dpp, 20) << __func__ << ": " << bucket_info.bucket << " marker " << marker << " shard_id=" << shard_id << " max " << max << dendl;
result.clear();
RGWSI_RADOS::Pool index_pool;
map<int, string> oids;
map<int, cls_rgw_bi_log_list_ret> bi_log_lists;
const auto& current_index = rgw::log_to_index_layout(log_layout);
int r = svc.bi->open_bucket_index(dpp, bucket_info, shard_id, current_index, &index_pool, &oids, nullptr);
if (r < 0)
return r;
BucketIndexShardsManager marker_mgr;
bool has_shards = (oids.size() > 1 || shard_id >= 0);
// If there are multiple shards for the bucket index object, the marker
// should have the pattern '{shard_id_1}#{shard_marker_1},{shard_id_2}#
// {shard_marker_2}...', if there is no sharding, the bi_log_list should
// only contain one record, and the key is the bucket instance id.
r = marker_mgr.from_string(marker, shard_id);
if (r < 0)
return r;
r = CLSRGWIssueBILogList(index_pool.ioctx(), marker_mgr, max, oids, bi_log_lists, cct->_conf->rgw_bucket_index_max_aio)();
if (r < 0)
return r;
map<int, list<rgw_bi_log_entry>::iterator> vcurrents;
map<int, list<rgw_bi_log_entry>::iterator> vends;
if (truncated) {
*truncated = false;
}
map<int, cls_rgw_bi_log_list_ret>::iterator miter = bi_log_lists.begin();
for (; miter != bi_log_lists.end(); ++miter) {
int shard_id = miter->first;
vcurrents[shard_id] = miter->second.entries.begin();
vends[shard_id] = miter->second.entries.end();
if (truncated) {
*truncated = (*truncated || miter->second.truncated);
}
}
size_t total = 0;
bool has_more = true;
map<int, list<rgw_bi_log_entry>::iterator>::iterator viter;
map<int, list<rgw_bi_log_entry>::iterator>::iterator eiter;
while (total < max && has_more) {
has_more = false;
viter = vcurrents.begin();
eiter = vends.begin();
for (; total < max && viter != vcurrents.end(); ++viter, ++eiter) {
assert (eiter != vends.end());
int shard_id = viter->first;
list<rgw_bi_log_entry>::iterator& liter = viter->second;
if (liter == eiter->second){
continue;
}
rgw_bi_log_entry& entry = *(liter);
if (has_shards) {
char buf[16];
snprintf(buf, sizeof(buf), "%d", shard_id);
string tmp_id;
build_bucket_index_marker(buf, entry.id, &tmp_id);
entry.id.swap(tmp_id);
}
marker_mgr.add(shard_id, entry.id);
result.push_back(entry);
total++;
has_more = true;
++liter;
}
}
if (truncated) {
for (viter = vcurrents.begin(), eiter = vends.begin(); viter != vcurrents.end(); ++viter, ++eiter) {
assert (eiter != vends.end());
*truncated = (*truncated || (viter->second != eiter->second));
}
}
// Refresh marker, if there are multiple shards, the output will look like
// '{shard_oid_1}#{shard_marker_1},{shard_oid_2}#{shard_marker_2}...',
// if there is no sharding, the simply marker (without oid) is returned
if (has_shards) {
marker_mgr.to_string(&marker);
} else {
if (!result.empty()) {
marker = result.rbegin()->id;
}
}
return 0;
}
int RGWSI_BILog_RADOS::get_log_status(const DoutPrefixProvider *dpp,
const RGWBucketInfo& bucket_info,
const rgw::bucket_log_layout_generation& log_layout,
int shard_id,
map<int, string> *markers,
optional_yield y)
{
vector<rgw_bucket_dir_header> headers;
map<int, string> bucket_instance_ids;
const auto& current_index = rgw::log_to_index_layout(log_layout);
int r = svc.bi->cls_bucket_head(dpp, bucket_info, current_index, shard_id, &headers, &bucket_instance_ids, y);
if (r < 0)
return r;
ceph_assert(headers.size() == bucket_instance_ids.size());
auto iter = headers.begin();
map<int, string>::iterator viter = bucket_instance_ids.begin();
for(; iter != headers.end(); ++iter, ++viter) {
if (shard_id >= 0) {
(*markers)[shard_id] = iter->max_marker;
} else {
(*markers)[viter->first] = iter->max_marker;
}
}
return 0;
}
| 7,333 | 32.18552 | 164 |
cc
|
null |
ceph-main/src/rgw/services/svc_bilog_rados.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab ft=cpp
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2019 Red Hat, Inc.
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#pragma once
#include "rgw_service.h"
#include "svc_rados.h"
class RGWSI_BILog_RADOS : public RGWServiceInstance
{
public:
struct Svc {
RGWSI_BucketIndex_RADOS *bi{nullptr};
} svc;
RGWSI_BILog_RADOS(CephContext *cct);
void init(RGWSI_BucketIndex_RADOS *bi_rados_svc);
int log_start(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, const rgw::bucket_log_layout_generation& log_layout, int shard_id);
int log_stop(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, const rgw::bucket_log_layout_generation& log_layout, int shard_id);
int log_trim(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info,
const rgw::bucket_log_layout_generation& log_layout,
int shard_id,
std::string_view start_marker,
std::string_view end_marker);
int log_list(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info,
const rgw::bucket_log_layout_generation& log_layout,
int shard_id,
std::string& marker,
uint32_t max,
std::list<rgw_bi_log_entry>& result,
bool *truncated);
int get_log_status(const DoutPrefixProvider *dpp,
const RGWBucketInfo& bucket_info,
const rgw::bucket_log_layout_generation& log_layout,
int shard_id,
std::map<int, std::string> *markers,
optional_yield y);
};
| 1,914 | 30.393443 | 148 |
h
|
null |
ceph-main/src/rgw/services/svc_bucket.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab ft=cpp
#include "svc_bucket.h"
#define dout_subsys ceph_subsys_rgw
std::string RGWSI_Bucket::get_entrypoint_meta_key(const rgw_bucket& bucket)
{
if (bucket.bucket_id.empty()) {
return bucket.get_key();
}
rgw_bucket b(bucket);
b.bucket_id.clear();
return b.get_key();
}
std::string RGWSI_Bucket::get_bi_meta_key(const rgw_bucket& bucket)
{
return bucket.get_key();
}
| 488 | 17.807692 | 75 |
cc
|
null |
ceph-main/src/rgw/services/svc_bucket.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab ft=cpp
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2019 Red Hat, Inc.
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#pragma once
#include "rgw_service.h"
#include "svc_bucket_types.h"
class RGWSI_Bucket : public RGWServiceInstance
{
public:
RGWSI_Bucket(CephContext *cct) : RGWServiceInstance(cct) {}
virtual ~RGWSI_Bucket() {}
static std::string get_entrypoint_meta_key(const rgw_bucket& bucket);
static std::string get_bi_meta_key(const rgw_bucket& bucket);
virtual RGWSI_Bucket_BE_Handler& get_ep_be_handler() = 0;
virtual RGWSI_BucketInstance_BE_Handler& get_bi_be_handler() = 0;
virtual int read_bucket_entrypoint_info(RGWSI_Bucket_EP_Ctx& ctx,
const std::string& key,
RGWBucketEntryPoint *entry_point,
RGWObjVersionTracker *objv_tracker,
real_time *pmtime,
std::map<std::string, bufferlist> *pattrs,
optional_yield y,
const DoutPrefixProvider *dpp,
rgw_cache_entry_info *cache_info = nullptr,
boost::optional<obj_version> refresh_version = boost::none) = 0;
virtual int store_bucket_entrypoint_info(RGWSI_Bucket_EP_Ctx& ctx,
const std::string& key,
RGWBucketEntryPoint& info,
bool exclusive,
real_time mtime,
std::map<std::string, bufferlist> *pattrs,
RGWObjVersionTracker *objv_tracker,
optional_yield y,
const DoutPrefixProvider *dpp) = 0;
virtual int remove_bucket_entrypoint_info(RGWSI_Bucket_EP_Ctx& ctx,
const std::string& key,
RGWObjVersionTracker *objv_tracker,
optional_yield y,
const DoutPrefixProvider *dpp) = 0;
virtual int read_bucket_instance_info(RGWSI_Bucket_BI_Ctx& ctx,
const std::string& key,
RGWBucketInfo *info,
real_time *pmtime,
std::map<std::string, bufferlist> *pattrs,
optional_yield y,
const DoutPrefixProvider *dpp,
rgw_cache_entry_info *cache_info = nullptr,
boost::optional<obj_version> refresh_version = boost::none) = 0;
virtual int read_bucket_info(RGWSI_Bucket_X_Ctx& ep_ctx,
const rgw_bucket& bucket,
RGWBucketInfo *info,
real_time *pmtime,
std::map<std::string, bufferlist> *pattrs,
boost::optional<obj_version> refresh_version,
optional_yield y,
const DoutPrefixProvider *dpp) = 0;
virtual int store_bucket_instance_info(RGWSI_Bucket_BI_Ctx& ctx,
const std::string& key,
RGWBucketInfo& info,
std::optional<RGWBucketInfo *> orig_info, /* nullopt: orig_info was not fetched,
nullptr: orig_info was not found (new bucket instance */
bool exclusive,
real_time mtime,
std::map<std::string, bufferlist> *pattrs,
optional_yield y,
const DoutPrefixProvider *dpp) = 0;
virtual int remove_bucket_instance_info(RGWSI_Bucket_BI_Ctx& ctx,
const std::string& key,
const RGWBucketInfo& bucket_info,
RGWObjVersionTracker *objv_tracker,
optional_yield y,
const DoutPrefixProvider *dpp) = 0;
virtual int read_bucket_stats(RGWSI_Bucket_X_Ctx& ctx,
const rgw_bucket& bucket,
RGWBucketEnt *ent,
optional_yield y,
const DoutPrefixProvider *dpp) = 0;
virtual int read_buckets_stats(RGWSI_Bucket_X_Ctx& ctx,
std::map<std::string, RGWBucketEnt>& m,
optional_yield y,
const DoutPrefixProvider *dpp) = 0;
};
| 5,165 | 45.125 | 134 |
h
|
null |
ceph-main/src/rgw/services/svc_bucket_sobj.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab ft=cpp
#include "svc_bucket_sobj.h"
#include "svc_zone.h"
#include "svc_sys_obj.h"
#include "svc_sys_obj_cache.h"
#include "svc_bi.h"
#include "svc_meta.h"
#include "svc_meta_be_sobj.h"
#include "svc_sync_modules.h"
#include "rgw_bucket.h"
#include "rgw_tools.h"
#include "rgw_zone.h"
#define dout_subsys ceph_subsys_rgw
#define RGW_BUCKET_INSTANCE_MD_PREFIX ".bucket.meta."
using namespace std;
class RGWSI_Bucket_SObj_Module : public RGWSI_MBSObj_Handler_Module {
RGWSI_Bucket_SObj::Svc& svc;
const string prefix;
public:
RGWSI_Bucket_SObj_Module(RGWSI_Bucket_SObj::Svc& _svc) : RGWSI_MBSObj_Handler_Module("bucket"),
svc(_svc) {}
void get_pool_and_oid(const string& key, rgw_pool *pool, string *oid) override {
if (pool) {
*pool = svc.zone->get_zone_params().domain_root;
}
if (oid) {
*oid = key;
}
}
const string& get_oid_prefix() override {
return prefix;
}
bool is_valid_oid(const string& oid) override {
return (!oid.empty() && oid[0] != '.');
}
string key_to_oid(const string& key) override {
return key;
}
string oid_to_key(const string& oid) override {
/* should have been called after is_valid_oid(),
* so no need to check for validity */
return oid;
}
};
class RGWSI_BucketInstance_SObj_Module : public RGWSI_MBSObj_Handler_Module {
RGWSI_Bucket_SObj::Svc& svc;
const string prefix;
public:
RGWSI_BucketInstance_SObj_Module(RGWSI_Bucket_SObj::Svc& _svc) : RGWSI_MBSObj_Handler_Module("bucket.instance"),
svc(_svc), prefix(RGW_BUCKET_INSTANCE_MD_PREFIX) {}
void get_pool_and_oid(const string& key, rgw_pool *pool, string *oid) override {
if (pool) {
*pool = svc.zone->get_zone_params().domain_root;
}
if (oid) {
*oid = key_to_oid(key);
}
}
const string& get_oid_prefix() override {
return prefix;
}
bool is_valid_oid(const string& oid) override {
return (oid.compare(0, prefix.size(), RGW_BUCKET_INSTANCE_MD_PREFIX) == 0);
}
// 'tenant/' is used in bucket instance keys for sync to avoid parsing ambiguity
// with the existing instance[:shard] format. once we parse the shard, the / is
// replaced with a : to match the [tenant:]instance format
string key_to_oid(const string& key) override {
string oid = prefix + key;
// replace tenant/ with tenant:
auto c = oid.find('/', prefix.size());
if (c != string::npos) {
oid[c] = ':';
}
return oid;
}
// convert bucket instance oids back to the tenant/ format for metadata keys.
// it's safe to parse 'tenant:' only for oids, because they won't contain the
// optional :shard at the end
string oid_to_key(const string& oid) override {
/* this should have been called after oid was checked for validity */
if (oid.size() < prefix.size()) { /* just sanity check */
return string();
}
string key = oid.substr(prefix.size());
// find first : (could be tenant:bucket or bucket:instance)
auto c = key.find(':');
if (c != string::npos) {
// if we find another :, the first one was for tenant
if (key.find(':', c + 1) != string::npos) {
key[c] = '/';
}
}
return key;
}
/*
* hash entry for mdlog placement. Use the same hash key we'd have for the bucket entry
* point, so that the log entries end up at the same log shard, so that we process them
* in order
*/
string get_hash_key(const string& key) override {
string k = "bucket:";
int pos = key.find(':');
if (pos < 0)
k.append(key);
else
k.append(key.substr(0, pos));
return k;
}
};
RGWSI_Bucket_SObj::RGWSI_Bucket_SObj(CephContext *cct): RGWSI_Bucket(cct) {
}
RGWSI_Bucket_SObj::~RGWSI_Bucket_SObj() {
}
void RGWSI_Bucket_SObj::init(RGWSI_Zone *_zone_svc, RGWSI_SysObj *_sysobj_svc,
RGWSI_SysObj_Cache *_cache_svc, RGWSI_BucketIndex *_bi,
RGWSI_Meta *_meta_svc, RGWSI_MetaBackend *_meta_be_svc,
RGWSI_SyncModules *_sync_modules_svc,
RGWSI_Bucket_Sync *_bucket_sync_svc)
{
svc.bucket = this;
svc.zone = _zone_svc;
svc.sysobj = _sysobj_svc;
svc.cache = _cache_svc;
svc.bi = _bi;
svc.meta = _meta_svc;
svc.meta_be = _meta_be_svc;
svc.sync_modules = _sync_modules_svc;
svc.bucket_sync = _bucket_sync_svc;
}
int RGWSI_Bucket_SObj::do_start(optional_yield, const DoutPrefixProvider *dpp)
{
binfo_cache.reset(new RGWChainedCacheImpl<bucket_info_cache_entry>);
binfo_cache->init(svc.cache);
/* create first backend handler for bucket entrypoints */
RGWSI_MetaBackend_Handler *ep_handler;
int r = svc.meta->create_be_handler(RGWSI_MetaBackend::Type::MDBE_SOBJ, &ep_handler);
if (r < 0) {
ldpp_dout(dpp, 0) << "ERROR: failed to create be handler: r=" << r << dendl;
return r;
}
ep_be_handler = ep_handler;
RGWSI_MetaBackend_Handler_SObj *ep_bh = static_cast<RGWSI_MetaBackend_Handler_SObj *>(ep_handler);
auto ep_module = new RGWSI_Bucket_SObj_Module(svc);
ep_be_module.reset(ep_module);
ep_bh->set_module(ep_module);
/* create a second backend handler for bucket instance */
RGWSI_MetaBackend_Handler *bi_handler;
r = svc.meta->create_be_handler(RGWSI_MetaBackend::Type::MDBE_SOBJ, &bi_handler);
if (r < 0) {
ldpp_dout(dpp, 0) << "ERROR: failed to create be handler: r=" << r << dendl;
return r;
}
bi_be_handler = bi_handler;
RGWSI_MetaBackend_Handler_SObj *bi_bh = static_cast<RGWSI_MetaBackend_Handler_SObj *>(bi_handler);
auto bi_module = new RGWSI_BucketInstance_SObj_Module(svc);
bi_be_module.reset(bi_module);
bi_bh->set_module(bi_module);
return 0;
}
int RGWSI_Bucket_SObj::read_bucket_entrypoint_info(RGWSI_Bucket_EP_Ctx& ctx,
const string& key,
RGWBucketEntryPoint *entry_point,
RGWObjVersionTracker *objv_tracker,
real_time *pmtime,
map<string, bufferlist> *pattrs,
optional_yield y,
const DoutPrefixProvider *dpp,
rgw_cache_entry_info *cache_info,
boost::optional<obj_version> refresh_version)
{
bufferlist bl;
auto params = RGWSI_MBSObj_GetParams(&bl, pattrs, pmtime).set_cache_info(cache_info)
.set_refresh_version(refresh_version);
int ret = svc.meta_be->get_entry(ctx.get(), key, params, objv_tracker, y, dpp);
if (ret < 0) {
return ret;
}
auto iter = bl.cbegin();
try {
decode(*entry_point, iter);
} catch (buffer::error& err) {
ldpp_dout(dpp, 0) << "ERROR: could not decode buffer info, caught buffer::error" << dendl;
return -EIO;
}
return 0;
}
int RGWSI_Bucket_SObj::store_bucket_entrypoint_info(RGWSI_Bucket_EP_Ctx& ctx,
const string& key,
RGWBucketEntryPoint& info,
bool exclusive,
real_time mtime,
map<string, bufferlist> *pattrs,
RGWObjVersionTracker *objv_tracker,
optional_yield y,
const DoutPrefixProvider *dpp)
{
bufferlist bl;
encode(info, bl);
RGWSI_MBSObj_PutParams params(bl, pattrs, mtime, exclusive);
int ret = svc.meta_be->put(ctx.get(), key, params, objv_tracker, y, dpp);
if (ret < 0) {
return ret;
}
return ret;
}
int RGWSI_Bucket_SObj::remove_bucket_entrypoint_info(RGWSI_Bucket_EP_Ctx& ctx,
const string& key,
RGWObjVersionTracker *objv_tracker,
optional_yield y,
const DoutPrefixProvider *dpp)
{
RGWSI_MBSObj_RemoveParams params;
return svc.meta_be->remove(ctx.get(), key, params, objv_tracker, y, dpp);
}
int RGWSI_Bucket_SObj::read_bucket_instance_info(RGWSI_Bucket_BI_Ctx& ctx,
const string& key,
RGWBucketInfo *info,
real_time *pmtime, map<string, bufferlist> *pattrs,
optional_yield y,
const DoutPrefixProvider *dpp,
rgw_cache_entry_info *cache_info,
boost::optional<obj_version> refresh_version)
{
string cache_key("bi/");
cache_key.append(key);
if (auto e = binfo_cache->find(cache_key)) {
if (refresh_version &&
e->info.objv_tracker.read_version.compare(&(*refresh_version))) {
ldpp_dout(dpp, -1) << "WARNING: The bucket info cache is inconsistent. This is "
<< "a failure that should be debugged. I am a nice machine, "
<< "so I will try to recover." << dendl;
binfo_cache->invalidate(key);
} else {
*info = e->info;
if (pattrs)
*pattrs = e->attrs;
if (pmtime)
*pmtime = e->mtime;
return 0;
}
}
bucket_info_cache_entry e;
rgw_cache_entry_info ci;
int ret = do_read_bucket_instance_info(ctx, key,
&e.info, &e.mtime, &e.attrs,
&ci, refresh_version, y, dpp);
*info = e.info;
if (ret < 0) {
if (ret != -ENOENT) {
ldpp_dout(dpp, -1) << "ERROR: do_read_bucket_instance_info failed: " << ret << dendl;
} else {
ldpp_dout(dpp, 20) << "do_read_bucket_instance_info, bucket instance not found (key=" << key << ")" << dendl;
}
return ret;
}
if (pmtime) {
*pmtime = e.mtime;
}
if (pattrs) {
*pattrs = e.attrs;
}
if (cache_info) {
*cache_info = ci;
}
/* chain to only bucket instance and *not* bucket entrypoint */
if (!binfo_cache->put(dpp, svc.cache, cache_key, &e, {&ci})) {
ldpp_dout(dpp, 20) << "couldn't put binfo cache entry, might have raced with data changes" << dendl;
}
if (refresh_version &&
refresh_version->compare(&info->objv_tracker.read_version)) {
ldpp_dout(dpp, -1) << "WARNING: The OSD has the same version I have. Something may "
<< "have gone squirrelly. An administrator may have forced a "
<< "change; otherwise there is a problem somewhere." << dendl;
}
return 0;
}
int RGWSI_Bucket_SObj::do_read_bucket_instance_info(RGWSI_Bucket_BI_Ctx& ctx,
const string& key,
RGWBucketInfo *info,
real_time *pmtime, map<string, bufferlist> *pattrs,
rgw_cache_entry_info *cache_info,
boost::optional<obj_version> refresh_version,
optional_yield y,
const DoutPrefixProvider *dpp)
{
bufferlist bl;
RGWObjVersionTracker ot;
auto params = RGWSI_MBSObj_GetParams(&bl, pattrs, pmtime).set_cache_info(cache_info)
.set_refresh_version(refresh_version);
int ret = svc.meta_be->get_entry(ctx.get(), key, params, &ot, y, dpp);
if (ret < 0) {
return ret;
}
auto iter = bl.cbegin();
try {
decode(*info, iter);
} catch (buffer::error& err) {
ldpp_dout(dpp, 0) << "ERROR: could not decode buffer info, caught buffer::error" << dendl;
return -EIO;
}
info->objv_tracker = ot;
return 0;
}
int RGWSI_Bucket_SObj::read_bucket_info(RGWSI_Bucket_X_Ctx& ctx,
const rgw_bucket& bucket,
RGWBucketInfo *info,
real_time *pmtime,
map<string, bufferlist> *pattrs,
boost::optional<obj_version> refresh_version,
optional_yield y,
const DoutPrefixProvider *dpp)
{
rgw_cache_entry_info cache_info;
if (!bucket.bucket_id.empty()) {
return read_bucket_instance_info(ctx.bi, get_bi_meta_key(bucket),
info,
pmtime, pattrs,
y,
dpp,
&cache_info, refresh_version);
}
string bucket_entry = get_entrypoint_meta_key(bucket);
string cache_key("b/");
cache_key.append(bucket_entry);
if (auto e = binfo_cache->find(cache_key)) {
bool found_version = (bucket.bucket_id.empty() ||
bucket.bucket_id == e->info.bucket.bucket_id);
if (!found_version ||
(refresh_version &&
e->info.objv_tracker.read_version.compare(&(*refresh_version)))) {
ldpp_dout(dpp, -1) << "WARNING: The bucket info cache is inconsistent. This is "
<< "a failure that should be debugged. I am a nice machine, "
<< "so I will try to recover." << dendl;
binfo_cache->invalidate(cache_key);
} else {
*info = e->info;
if (pattrs)
*pattrs = e->attrs;
if (pmtime)
*pmtime = e->mtime;
return 0;
}
}
RGWBucketEntryPoint entry_point;
real_time ep_mtime;
RGWObjVersionTracker ot;
rgw_cache_entry_info entry_cache_info;
int ret = read_bucket_entrypoint_info(ctx.ep, bucket_entry,
&entry_point, &ot, &ep_mtime, pattrs,
y,
dpp,
&entry_cache_info, refresh_version);
if (ret < 0) {
/* only init these fields */
info->bucket = bucket;
return ret;
}
if (entry_point.has_bucket_info) {
*info = entry_point.old_bucket_info;
info->bucket.tenant = bucket.tenant;
ldpp_dout(dpp, 20) << "rgw_get_bucket_info: old bucket info, bucket=" << info->bucket << " owner " << info->owner << dendl;
return 0;
}
/* data is in the bucket instance object, we need to get attributes from there, clear everything
* that we got
*/
if (pattrs) {
pattrs->clear();
}
ldpp_dout(dpp, 20) << "rgw_get_bucket_info: bucket instance: " << entry_point.bucket << dendl;
/* read bucket instance info */
bucket_info_cache_entry e;
ret = read_bucket_instance_info(ctx.bi, get_bi_meta_key(entry_point.bucket),
&e.info, &e.mtime, &e.attrs,
y,
dpp,
&cache_info, refresh_version);
*info = e.info;
if (ret < 0) {
ldpp_dout(dpp, -1) << "ERROR: read_bucket_instance_from_oid failed: " << ret << dendl;
info->bucket = bucket;
// XXX and why return anything in case of an error anyway?
return ret;
}
if (pmtime)
*pmtime = e.mtime;
if (pattrs)
*pattrs = e.attrs;
/* chain to both bucket entry point and bucket instance */
if (!binfo_cache->put(dpp, svc.cache, cache_key, &e, {&entry_cache_info, &cache_info})) {
ldpp_dout(dpp, 20) << "couldn't put binfo cache entry, might have raced with data changes" << dendl;
}
if (refresh_version &&
refresh_version->compare(&info->objv_tracker.read_version)) {
ldpp_dout(dpp, -1) << "WARNING: The OSD has the same version I have. Something may "
<< "have gone squirrelly. An administrator may have forced a "
<< "change; otherwise there is a problem somewhere." << dendl;
}
return 0;
}
int RGWSI_Bucket_SObj::store_bucket_instance_info(RGWSI_Bucket_BI_Ctx& ctx,
const string& key,
RGWBucketInfo& info,
std::optional<RGWBucketInfo *> orig_info,
bool exclusive,
real_time mtime,
map<string, bufferlist> *pattrs,
optional_yield y,
const DoutPrefixProvider *dpp)
{
bufferlist bl;
encode(info, bl);
/*
* we might need some special handling if overwriting
*/
RGWBucketInfo shared_bucket_info;
if (!orig_info && !exclusive) { /* if exclusive, we're going to fail when try
to overwrite, so the whole check here is moot */
/*
* we're here because orig_info wasn't passed in
* we don't have info about what was there before, so need to fetch first
*/
int r = read_bucket_instance_info(ctx,
key,
&shared_bucket_info,
nullptr, nullptr,
y,
dpp,
nullptr, boost::none);
if (r < 0) {
if (r != -ENOENT) {
ldpp_dout(dpp, 0) << "ERROR: " << __func__ << "(): read_bucket_instance_info() of key=" << key << " returned r=" << r << dendl;
return r;
}
} else {
orig_info = &shared_bucket_info;
}
}
if (orig_info && *orig_info && !exclusive) {
int r = svc.bi->handle_overwrite(dpp, info, *(orig_info.value()), y);
if (r < 0) {
ldpp_dout(dpp, 0) << "ERROR: " << __func__ << "(): svc.bi->handle_overwrite() of key=" << key << " returned r=" << r << dendl;
return r;
}
}
RGWSI_MBSObj_PutParams params(bl, pattrs, mtime, exclusive);
int ret = svc.meta_be->put(ctx.get(), key, params, &info.objv_tracker, y, dpp);
if (ret >= 0) {
int r = svc.bucket_sync->handle_bi_update(dpp, info,
orig_info.value_or(nullptr),
y);
if (r < 0) {
return r;
}
} else if (ret == -EEXIST) {
/* well, if it's exclusive we shouldn't overwrite it, because we might race with another
* bucket operation on this specific bucket (e.g., being synced from the master), but
* since bucket instance meta object is unique for this specific bucket instance, we don't
* need to return an error.
* A scenario where we'd get -EEXIST here, is in a multi-zone config, we're not on the
* master, creating a bucket, sending bucket creation to the master, we create the bucket
* locally, while in the sync thread we sync the new bucket.
*/
ret = 0;
}
if (ret < 0) {
return ret;
}
return ret;
}
int RGWSI_Bucket_SObj::remove_bucket_instance_info(RGWSI_Bucket_BI_Ctx& ctx,
const string& key,
const RGWBucketInfo& info,
RGWObjVersionTracker *objv_tracker,
optional_yield y,
const DoutPrefixProvider *dpp)
{
RGWSI_MBSObj_RemoveParams params;
int ret = svc.meta_be->remove_entry(dpp, ctx.get(), key, params, objv_tracker, y);
if (ret < 0 &&
ret != -ENOENT) {
return ret;
}
int r = svc.bucket_sync->handle_bi_removal(dpp, info, y);
if (r < 0) {
ldpp_dout(dpp, 0) << "ERROR: failed to update bucket instance sync index: r=" << r << dendl;
/* returning success as index is just keeping hints, so will keep extra hints,
* but bucket removal succeeded
*/
}
return 0;
}
int RGWSI_Bucket_SObj::read_bucket_stats(const RGWBucketInfo& bucket_info,
RGWBucketEnt *ent,
optional_yield y,
const DoutPrefixProvider *dpp)
{
ent->count = 0;
ent->size = 0;
ent->size_rounded = 0;
vector<rgw_bucket_dir_header> headers;
int r = svc.bi->read_stats(dpp, bucket_info, ent, y);
if (r < 0) {
ldpp_dout(dpp, 0) << "ERROR: " << __func__ << "(): read_stats returned r=" << r << dendl;
return r;
}
return 0;
}
int RGWSI_Bucket_SObj::read_bucket_stats(RGWSI_Bucket_X_Ctx& ctx,
const rgw_bucket& bucket,
RGWBucketEnt *ent,
optional_yield y,
const DoutPrefixProvider *dpp)
{
RGWBucketInfo bucket_info;
int ret = read_bucket_info(ctx, bucket, &bucket_info, nullptr, nullptr, boost::none, y, dpp);
if (ret < 0) {
return ret;
}
return read_bucket_stats(bucket_info, ent, y, dpp);
}
int RGWSI_Bucket_SObj::read_buckets_stats(RGWSI_Bucket_X_Ctx& ctx,
map<string, RGWBucketEnt>& m,
optional_yield y,
const DoutPrefixProvider *dpp)
{
map<string, RGWBucketEnt>::iterator iter;
for (iter = m.begin(); iter != m.end(); ++iter) {
RGWBucketEnt& ent = iter->second;
int r = read_bucket_stats(ctx, ent.bucket, &ent, y, dpp);
if (r < 0) {
ldpp_dout(dpp, 0) << "ERROR: " << __func__ << "(): read_bucket_stats returned r=" << r << dendl;
return r;
}
}
return m.size();
}
| 22,430 | 33.776744 | 135 |
cc
|
null |
ceph-main/src/rgw/services/svc_bucket_sobj.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab ft=cpp
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2019 Red Hat, Inc.
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#pragma once
#include "rgw_service.h"
#include "svc_meta_be.h"
#include "svc_bucket_types.h"
#include "svc_bucket.h"
#include "svc_bucket_sync.h"
class RGWSI_Zone;
class RGWSI_SysObj;
class RGWSI_SysObj_Cache;
class RGWSI_Meta;
class RGWSI_SyncModules;
struct rgw_cache_entry_info;
template <class T>
class RGWChainedCacheImpl;
class RGWSI_Bucket_SObj : public RGWSI_Bucket
{
struct bucket_info_cache_entry {
RGWBucketInfo info;
real_time mtime;
std::map<std::string, bufferlist> attrs;
};
using RGWChainedCacheImpl_bucket_info_cache_entry = RGWChainedCacheImpl<bucket_info_cache_entry>;
std::unique_ptr<RGWChainedCacheImpl_bucket_info_cache_entry> binfo_cache;
RGWSI_Bucket_BE_Handler ep_be_handler;
std::unique_ptr<RGWSI_MetaBackend::Module> ep_be_module;
RGWSI_BucketInstance_BE_Handler bi_be_handler;
std::unique_ptr<RGWSI_MetaBackend::Module> bi_be_module;
int do_start(optional_yield, const DoutPrefixProvider *dpp) override;
int do_read_bucket_instance_info(RGWSI_Bucket_BI_Ctx& ctx,
const std::string& key,
RGWBucketInfo *info,
real_time *pmtime,
std::map<std::string, bufferlist> *pattrs,
rgw_cache_entry_info *cache_info,
boost::optional<obj_version> refresh_version,
optional_yield y,
const DoutPrefixProvider *dpp);
int read_bucket_stats(const RGWBucketInfo& bucket_info,
RGWBucketEnt *ent,
optional_yield y,
const DoutPrefixProvider *dpp);
public:
struct Svc {
RGWSI_Bucket_SObj *bucket{nullptr};
RGWSI_BucketIndex *bi{nullptr};
RGWSI_Zone *zone{nullptr};
RGWSI_SysObj *sysobj{nullptr};
RGWSI_SysObj_Cache *cache{nullptr};
RGWSI_Meta *meta{nullptr};
RGWSI_MetaBackend *meta_be{nullptr};
RGWSI_SyncModules *sync_modules{nullptr};
RGWSI_Bucket_Sync *bucket_sync{nullptr};
} svc;
RGWSI_Bucket_SObj(CephContext *cct);
~RGWSI_Bucket_SObj();
RGWSI_Bucket_BE_Handler& get_ep_be_handler() override {
return ep_be_handler;
}
RGWSI_BucketInstance_BE_Handler& get_bi_be_handler() override {
return bi_be_handler;
}
void init(RGWSI_Zone *_zone_svc,
RGWSI_SysObj *_sysobj_svc,
RGWSI_SysObj_Cache *_cache_svc,
RGWSI_BucketIndex *_bi,
RGWSI_Meta *_meta_svc,
RGWSI_MetaBackend *_meta_be_svc,
RGWSI_SyncModules *_sync_modules_svc,
RGWSI_Bucket_Sync *_bucket_sync_svc);
int read_bucket_entrypoint_info(RGWSI_Bucket_EP_Ctx& ctx,
const std::string& key,
RGWBucketEntryPoint *entry_point,
RGWObjVersionTracker *objv_tracker,
real_time *pmtime,
std::map<std::string, bufferlist> *pattrs,
optional_yield y,
const DoutPrefixProvider *dpp,
rgw_cache_entry_info *cache_info = nullptr,
boost::optional<obj_version> refresh_version = boost::none) override;
int store_bucket_entrypoint_info(RGWSI_Bucket_EP_Ctx& ctx,
const std::string& key,
RGWBucketEntryPoint& info,
bool exclusive,
real_time mtime,
std::map<std::string, bufferlist> *pattrs,
RGWObjVersionTracker *objv_tracker,
optional_yield y,
const DoutPrefixProvider *dpp) override;
int remove_bucket_entrypoint_info(RGWSI_Bucket_EP_Ctx& ctx,
const std::string& key,
RGWObjVersionTracker *objv_tracker,
optional_yield y,
const DoutPrefixProvider *dpp) override;
int read_bucket_instance_info(RGWSI_Bucket_BI_Ctx& ctx,
const std::string& key,
RGWBucketInfo *info,
real_time *pmtime,
std::map<std::string, bufferlist> *pattrs,
optional_yield y,
const DoutPrefixProvider *dpp,
rgw_cache_entry_info *cache_info = nullptr,
boost::optional<obj_version> refresh_version = boost::none) override;
int read_bucket_info(RGWSI_Bucket_X_Ctx& ep_ctx,
const rgw_bucket& bucket,
RGWBucketInfo *info,
real_time *pmtime,
std::map<std::string, bufferlist> *pattrs,
boost::optional<obj_version> refresh_version,
optional_yield y,
const DoutPrefixProvider *dpp) override;
int store_bucket_instance_info(RGWSI_Bucket_BI_Ctx& ctx,
const std::string& key,
RGWBucketInfo& info,
std::optional<RGWBucketInfo *> orig_info, /* nullopt: orig_info was not fetched,
nullptr: orig_info was not found (new bucket instance */
bool exclusive,
real_time mtime,
std::map<std::string, bufferlist> *pattrs,
optional_yield y,
const DoutPrefixProvider *dpp) override;
int remove_bucket_instance_info(RGWSI_Bucket_BI_Ctx& ctx,
const std::string& key,
const RGWBucketInfo& bucket_info,
RGWObjVersionTracker *objv_tracker,
optional_yield y,
const DoutPrefixProvider *dpp) override;
int read_bucket_stats(RGWSI_Bucket_X_Ctx& ctx,
const rgw_bucket& bucket,
RGWBucketEnt *ent,
optional_yield y,
const DoutPrefixProvider *dpp) override;
int read_buckets_stats(RGWSI_Bucket_X_Ctx& ctx,
std::map<std::string, RGWBucketEnt>& m,
optional_yield y,
const DoutPrefixProvider *dpp) override;
};
| 7,278 | 39.21547 | 134 |
h
|
null |
ceph-main/src/rgw/services/svc_bucket_sync.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2019 Red Hat, Inc.
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#pragma once
#include "rgw_service.h"
#include "svc_bucket_types.h"
class RGWBucketSyncPolicyHandler;
using RGWBucketSyncPolicyHandlerRef = std::shared_ptr<RGWBucketSyncPolicyHandler>;
class RGWSI_Bucket_Sync : public RGWServiceInstance
{
public:
RGWSI_Bucket_Sync(CephContext *cct) : RGWServiceInstance(cct) {}
virtual int get_policy_handler(RGWSI_Bucket_X_Ctx& ctx,
std::optional<rgw_zone_id> zone,
std::optional<rgw_bucket> bucket,
RGWBucketSyncPolicyHandlerRef *handler,
optional_yield y,
const DoutPrefixProvider *dpp) = 0;
virtual int handle_bi_update(const DoutPrefixProvider *dpp,
RGWBucketInfo& bucket_info,
RGWBucketInfo *orig_bucket_info,
optional_yield y) = 0;
virtual int handle_bi_removal(const DoutPrefixProvider *dpp,
const RGWBucketInfo& bucket_info,
optional_yield y) = 0;
virtual int get_bucket_sync_hints(const DoutPrefixProvider *dpp,
const rgw_bucket& bucket,
std::set<rgw_bucket> *sources,
std::set<rgw_bucket> *dests,
optional_yield y) = 0;
};
| 1,863 | 32.285714 | 82 |
h
|
null |
ceph-main/src/rgw/services/svc_bucket_sync_sobj.cc
|
#include "svc_bucket_sync_sobj.h"
#include "svc_zone.h"
#include "svc_sys_obj_cache.h"
#include "svc_bucket_sobj.h"
#include "rgw_bucket_sync.h"
#include "rgw_zone.h"
#include "rgw_sync_policy.h"
#define dout_subsys ceph_subsys_rgw
using namespace std;
static string bucket_sync_sources_oid_prefix = "bucket.sync-source-hints";
static string bucket_sync_targets_oid_prefix = "bucket.sync-target-hints";
class RGWSI_Bucket_Sync_SObj_HintIndexManager {
CephContext *cct;
struct {
RGWSI_Zone *zone;
RGWSI_SysObj *sysobj;
} svc;
public:
RGWSI_Bucket_Sync_SObj_HintIndexManager(RGWSI_Zone *_zone_svc,
RGWSI_SysObj *_sysobj_svc) {
svc.zone = _zone_svc;
svc.sysobj = _sysobj_svc;
cct = svc.zone->ctx();
}
rgw_raw_obj get_sources_obj(const rgw_bucket& bucket) const;
rgw_raw_obj get_dests_obj(const rgw_bucket& bucket) const;
template <typename C1, typename C2>
int update_hints(const DoutPrefixProvider *dpp,
const RGWBucketInfo& bucket_info,
C1& added_dests,
C2& removed_dests,
C1& added_sources,
C2& removed_sources,
optional_yield y);
};
RGWSI_Bucket_Sync_SObj::RGWSI_Bucket_Sync_SObj(CephContext *cct) : RGWSI_Bucket_Sync(cct) {
}
RGWSI_Bucket_Sync_SObj::~RGWSI_Bucket_Sync_SObj() {
}
void RGWSI_Bucket_Sync_SObj::init(RGWSI_Zone *_zone_svc,
RGWSI_SysObj *_sysobj_svc,
RGWSI_SysObj_Cache *_cache_svc,
RGWSI_Bucket_SObj *bucket_sobj_svc)
{
svc.zone = _zone_svc;
svc.sysobj = _sysobj_svc;
svc.cache = _cache_svc;
svc.bucket_sobj = bucket_sobj_svc;
hint_index_mgr.reset(new RGWSI_Bucket_Sync_SObj_HintIndexManager(svc.zone, svc.sysobj));
}
int RGWSI_Bucket_Sync_SObj::do_start(optional_yield, const DoutPrefixProvider *dpp)
{
sync_policy_cache.reset(new RGWChainedCacheImpl<bucket_sync_policy_cache_entry>);
sync_policy_cache->init(svc.cache);
return 0;
}
void RGWSI_Bucket_Sync_SObj::get_hint_entities(RGWSI_Bucket_X_Ctx& ctx,
const std::set<rgw_zone_id>& zones,
const std::set<rgw_bucket>& buckets,
std::set<rgw_sync_bucket_entity> *hint_entities,
optional_yield y, const DoutPrefixProvider *dpp)
{
vector<rgw_bucket> hint_buckets;
hint_buckets.reserve(buckets.size());
for (auto& b : buckets) {
RGWBucketInfo hint_bucket_info;
int ret = svc.bucket_sobj->read_bucket_info(ctx, b, &hint_bucket_info,
nullptr, nullptr, boost::none,
y, dpp);
if (ret < 0) {
ldpp_dout(dpp, 20) << "could not init bucket info for hint bucket=" << b << " ... skipping" << dendl;
continue;
}
hint_buckets.emplace_back(std::move(hint_bucket_info.bucket));
}
for (auto& zone : zones) {
for (auto& b : hint_buckets) {
hint_entities->insert(rgw_sync_bucket_entity(zone, b));
}
}
}
int RGWSI_Bucket_Sync_SObj::resolve_policy_hints(RGWSI_Bucket_X_Ctx& ctx,
rgw_sync_bucket_entity& self_entity,
RGWBucketSyncPolicyHandlerRef& handler,
RGWBucketSyncPolicyHandlerRef& zone_policy_handler,
std::map<optional_zone_bucket, RGWBucketSyncPolicyHandlerRef>& temp_map,
optional_yield y,
const DoutPrefixProvider *dpp)
{
set<rgw_zone_id> source_zones;
set<rgw_zone_id> target_zones;
zone_policy_handler->reflect(dpp, nullptr, nullptr,
nullptr, nullptr,
&source_zones,
&target_zones,
false); /* relaxed: also get all zones that we allow to sync to/from */
std::set<rgw_sync_bucket_entity> hint_entities;
get_hint_entities(ctx, source_zones, handler->get_source_hints(), &hint_entities, y, dpp);
get_hint_entities(ctx, target_zones, handler->get_target_hints(), &hint_entities, y, dpp);
std::set<rgw_sync_bucket_pipe> resolved_sources;
std::set<rgw_sync_bucket_pipe> resolved_dests;
for (auto& hint_entity : hint_entities) {
if (!hint_entity.zone ||
!hint_entity.bucket) {
continue; /* shouldn't really happen */
}
auto& zid = *hint_entity.zone;
auto& hint_bucket = *hint_entity.bucket;
RGWBucketSyncPolicyHandlerRef hint_bucket_handler;
auto iter = temp_map.find(optional_zone_bucket(zid, hint_bucket));
if (iter != temp_map.end()) {
hint_bucket_handler = iter->second;
} else {
int r = do_get_policy_handler(ctx, zid, hint_bucket, temp_map, &hint_bucket_handler, y, dpp);
if (r < 0) {
ldpp_dout(dpp, 20) << "could not get bucket sync policy handler for hint bucket=" << hint_bucket << " ... skipping" << dendl;
continue;
}
}
hint_bucket_handler->get_pipes(&resolved_dests,
&resolved_sources,
self_entity); /* flipping resolved dests and sources as these are
relative to the remote entity */
}
handler->set_resolved_hints(std::move(resolved_sources), std::move(resolved_dests));
return 0;
}
int RGWSI_Bucket_Sync_SObj::do_get_policy_handler(RGWSI_Bucket_X_Ctx& ctx,
std::optional<rgw_zone_id> zone,
std::optional<rgw_bucket> _bucket,
std::map<optional_zone_bucket, RGWBucketSyncPolicyHandlerRef>& temp_map,
RGWBucketSyncPolicyHandlerRef *handler,
optional_yield y,
const DoutPrefixProvider *dpp)
{
if (!_bucket) {
*handler = svc.zone->get_sync_policy_handler(zone);
return 0;
}
auto bucket = *_bucket;
if (bucket.bucket_id.empty()) {
RGWBucketEntryPoint ep_info;
int ret = svc.bucket_sobj->read_bucket_entrypoint_info(ctx.ep,
RGWSI_Bucket::get_entrypoint_meta_key(bucket),
&ep_info,
nullptr, /* objv_tracker */
nullptr, /* mtime */
nullptr, /* attrs */
y,
dpp,
nullptr, /* cache_info */
boost::none /* refresh_version */);
if (ret < 0) {
if (ret != -ENOENT) {
ldout(cct, 0) << "ERROR: svc.bucket->read_bucket_info(bucket=" << bucket << ") returned r=" << ret << dendl;
}
return ret;
}
bucket = ep_info.bucket;
}
string zone_key;
string bucket_key;
if (zone && *zone != svc.zone->zone_id()) {
zone_key = zone->id;
}
bucket_key = RGWSI_Bucket::get_bi_meta_key(bucket);
string cache_key("bi/" + zone_key + "/" + bucket_key);
if (auto e = sync_policy_cache->find(cache_key)) {
*handler = e->handler;
return 0;
}
bucket_sync_policy_cache_entry e;
rgw_cache_entry_info cache_info;
RGWBucketInfo bucket_info;
map<string, bufferlist> attrs;
int r = svc.bucket_sobj->read_bucket_instance_info(ctx.bi,
bucket_key,
&bucket_info,
nullptr,
&attrs,
y,
dpp,
&cache_info);
if (r < 0) {
if (r != -ENOENT) {
ldpp_dout(dpp, 0) << "ERROR: svc.bucket->read_bucket_instance_info(key=" << bucket_key << ") returned r=" << r << dendl;
}
return r;
}
auto zone_policy_handler = svc.zone->get_sync_policy_handler(zone);
if (!zone_policy_handler) {
ldpp_dout(dpp, 20) << "ERROR: could not find policy handler for zone=" << zone << dendl;
return -ENOENT;
}
e.handler.reset(zone_policy_handler->alloc_child(bucket_info, std::move(attrs)));
r = e.handler->init(dpp, y);
if (r < 0) {
ldpp_dout(dpp, 20) << "ERROR: failed to init bucket sync policy handler: r=" << r << dendl;
return r;
}
temp_map.emplace(optional_zone_bucket{zone, bucket}, e.handler);
rgw_sync_bucket_entity self_entity(zone.value_or(svc.zone->zone_id()), bucket);
r = resolve_policy_hints(ctx, self_entity,
e.handler,
zone_policy_handler,
temp_map, y, dpp);
if (r < 0) {
ldpp_dout(dpp, 20) << "ERROR: failed to resolve policy hints: bucket_key=" << bucket_key << ", r=" << r << dendl;
return r;
}
if (!sync_policy_cache->put(dpp, svc.cache, cache_key, &e, {&cache_info})) {
ldpp_dout(dpp, 20) << "couldn't put bucket_sync_policy cache entry, might have raced with data changes" << dendl;
}
*handler = e.handler;
return 0;
}
int RGWSI_Bucket_Sync_SObj::get_policy_handler(RGWSI_Bucket_X_Ctx& ctx,
std::optional<rgw_zone_id> zone,
std::optional<rgw_bucket> _bucket,
RGWBucketSyncPolicyHandlerRef *handler,
optional_yield y,
const DoutPrefixProvider *dpp)
{
std::map<optional_zone_bucket, RGWBucketSyncPolicyHandlerRef> temp_map;
return do_get_policy_handler(ctx, zone, _bucket, temp_map, handler, y, dpp);
}
static bool diff_sets(std::set<rgw_bucket>& orig_set,
std::set<rgw_bucket>& new_set,
vector<rgw_bucket> *added,
vector<rgw_bucket> *removed)
{
auto oiter = orig_set.begin();
auto niter = new_set.begin();
while (oiter != orig_set.end() &&
niter != new_set.end()) {
if (*oiter == *niter) {
++oiter;
++niter;
continue;
} else if (*oiter < *niter) {
removed->push_back(*oiter);
++oiter;
} else {
added->push_back(*niter);
++niter;
}
}
for (; oiter != orig_set.end(); ++oiter) {
removed->push_back(*oiter);
}
for (; niter != new_set.end(); ++niter) {
added->push_back(*niter);
}
return !(removed->empty() && added->empty());
}
class RGWSI_BS_SObj_HintIndexObj
{
friend class RGWSI_Bucket_Sync_SObj;
CephContext *cct;
struct {
RGWSI_SysObj *sysobj;
} svc;
rgw_raw_obj obj;
RGWSysObj sysobj;
RGWObjVersionTracker ot;
bool has_data{false};
public:
struct bi_entry {
rgw_bucket bucket;
map<rgw_bucket /* info_source */, obj_version> sources;
void encode(bufferlist& bl) const {
ENCODE_START(1, 1, bl);
encode(bucket, bl);
encode(sources, bl);
ENCODE_FINISH(bl);
}
void decode(bufferlist::const_iterator& bl) {
DECODE_START(1, bl);
decode(bucket, bl);
decode(sources, bl);
DECODE_FINISH(bl);
}
bool add(const rgw_bucket& info_source,
const obj_version& info_source_ver) {
auto& ver = sources[info_source];
if (ver == info_source_ver) { /* already updated */
return false;
}
if (info_source_ver.tag == ver.tag &&
info_source_ver.ver < ver.ver) {
return false;
}
ver = info_source_ver;
return true;
}
bool remove(const rgw_bucket& info_source,
const obj_version& info_source_ver) {
auto iter = sources.find(info_source);
if (iter == sources.end()) {
return false;
}
auto& ver = iter->second;
if (info_source_ver.tag == ver.tag &&
info_source_ver.ver < ver.ver) {
return false;
}
sources.erase(info_source);
return true;
}
bool empty() const {
return sources.empty();
}
};
struct single_instance_info {
map<rgw_bucket, bi_entry> entries;
void encode(bufferlist& bl) const {
ENCODE_START(1, 1, bl);
encode(entries, bl);
ENCODE_FINISH(bl);
}
void decode(bufferlist::const_iterator& bl) {
DECODE_START(1, bl);
decode(entries, bl);
DECODE_FINISH(bl);
}
bool add_entry(const rgw_bucket& info_source,
const obj_version& info_source_ver,
const rgw_bucket& bucket) {
auto& entry = entries[bucket];
if (!entry.add(info_source, info_source_ver)) {
return false;
}
entry.bucket = bucket;
return true;
}
bool remove_entry(const rgw_bucket& info_source,
const obj_version& info_source_ver,
const rgw_bucket& bucket) {
auto iter = entries.find(bucket);
if (iter == entries.end()) {
return false;
}
if (!iter->second.remove(info_source, info_source_ver)) {
return false;
}
if (iter->second.empty()) {
entries.erase(iter);
}
return true;
}
void clear() {
entries.clear();
}
bool empty() const {
return entries.empty();
}
void get_entities(std::set<rgw_bucket> *result) const {
for (auto& iter : entries) {
result->insert(iter.first);
}
}
};
struct info_map {
map<rgw_bucket, single_instance_info> instances;
void encode(bufferlist& bl) const {
ENCODE_START(1, 1, bl);
encode(instances, bl);
ENCODE_FINISH(bl);
}
void decode(bufferlist::const_iterator& bl) {
DECODE_START(1, bl);
decode(instances, bl);
DECODE_FINISH(bl);
}
bool empty() const {
return instances.empty();
}
void clear() {
instances.clear();
}
void get_entities(const rgw_bucket& bucket,
std::set<rgw_bucket> *result) const {
auto iter = instances.find(bucket);
if (iter == instances.end()) {
return;
}
iter->second.get_entities(result);
}
} info;
RGWSI_BS_SObj_HintIndexObj(RGWSI_SysObj *_sysobj_svc,
const rgw_raw_obj& _obj) : cct(_sysobj_svc->ctx()),
obj(_obj),
sysobj(_sysobj_svc->get_obj(obj))
{
svc.sysobj = _sysobj_svc;
}
template <typename C1, typename C2>
int update(const DoutPrefixProvider *dpp,
const rgw_bucket& entity,
const RGWBucketInfo& info_source,
C1 *add,
C2 *remove,
optional_yield y);
private:
template <typename C1, typename C2>
void update_entries(const rgw_bucket& info_source,
const obj_version& info_source_ver,
C1 *add,
C2 *remove,
single_instance_info *instance);
int read(const DoutPrefixProvider *dpp, optional_yield y);
int flush(const DoutPrefixProvider *dpp, optional_yield y);
void invalidate() {
has_data = false;
info.clear();
}
void get_entities(const rgw_bucket& bucket,
std::set<rgw_bucket> *result) const {
info.get_entities(bucket, result);
}
};
WRITE_CLASS_ENCODER(RGWSI_BS_SObj_HintIndexObj::bi_entry)
WRITE_CLASS_ENCODER(RGWSI_BS_SObj_HintIndexObj::single_instance_info)
WRITE_CLASS_ENCODER(RGWSI_BS_SObj_HintIndexObj::info_map)
template <typename C1, typename C2>
int RGWSI_BS_SObj_HintIndexObj::update(const DoutPrefixProvider *dpp,
const rgw_bucket& entity,
const RGWBucketInfo& info_source,
C1 *add,
C2 *remove,
optional_yield y)
{
int r = 0;
auto& info_source_ver = info_source.objv_tracker.read_version;
#define MAX_RETRIES 25
for (int i = 0; i < MAX_RETRIES; ++i) {
if (!has_data) {
r = read(dpp, y);
if (r < 0) {
ldpp_dout(dpp, 0) << "ERROR: cannot update hint index: failed to read: r=" << r << dendl;
return r;
}
}
auto& instance = info.instances[entity];
update_entries(info_source.bucket,
info_source_ver,
add, remove,
&instance);
if (instance.empty()) {
info.instances.erase(entity);
}
r = flush(dpp, y);
if (r >= 0) {
return 0;
}
if (r != -ECANCELED) {
ldpp_dout(dpp, 0) << "ERROR: failed to flush hint index: obj=" << obj << " r=" << r << dendl;
return r;
}
invalidate();
}
ldpp_dout(dpp, 0) << "ERROR: failed to flush hint index: too many retries (obj=" << obj << "), likely a bug" << dendl;
return -EIO;
}
template <typename C1, typename C2>
void RGWSI_BS_SObj_HintIndexObj::update_entries(const rgw_bucket& info_source,
const obj_version& info_source_ver,
C1 *add,
C2 *remove,
single_instance_info *instance)
{
if (remove) {
for (auto& bucket : *remove) {
instance->remove_entry(info_source, info_source_ver, bucket);
}
}
if (add) {
for (auto& bucket : *add) {
instance->add_entry(info_source, info_source_ver, bucket);
}
}
}
int RGWSI_BS_SObj_HintIndexObj::read(const DoutPrefixProvider *dpp, optional_yield y) {
RGWObjVersionTracker _ot;
bufferlist bl;
int r = sysobj.rop()
.set_objv_tracker(&_ot) /* forcing read of current version */
.read(dpp, &bl, y);
if (r < 0 && r != -ENOENT) {
ldpp_dout(dpp, 0) << "ERROR: failed reading data (obj=" << obj << "), r=" << r << dendl;
return r;
}
ot = _ot;
if (r >= 0) {
auto iter = bl.cbegin();
try {
decode(info, iter);
has_data = true;
} catch (buffer::error& err) {
ldpp_dout(dpp, 0) << "ERROR: " << __func__ << "(): failed to decode entries, ignoring" << dendl;
info.clear();
}
} else {
info.clear();
}
return 0;
}
int RGWSI_BS_SObj_HintIndexObj::flush(const DoutPrefixProvider *dpp, optional_yield y) {
int r;
if (!info.empty()) {
bufferlist bl;
encode(info, bl);
r = sysobj.wop()
.set_objv_tracker(&ot) /* forcing read of current version */
.write(dpp, bl, y);
} else { /* remove */
r = sysobj.wop()
.set_objv_tracker(&ot)
.remove(dpp, y);
}
if (r < 0) {
return r;
}
return 0;
}
rgw_raw_obj RGWSI_Bucket_Sync_SObj_HintIndexManager::get_sources_obj(const rgw_bucket& bucket) const
{
rgw_bucket b = bucket;
b.bucket_id.clear();
return rgw_raw_obj(svc.zone->get_zone_params().log_pool,
bucket_sync_sources_oid_prefix + "." + b.get_key());
}
rgw_raw_obj RGWSI_Bucket_Sync_SObj_HintIndexManager::get_dests_obj(const rgw_bucket& bucket) const
{
rgw_bucket b = bucket;
b.bucket_id.clear();
return rgw_raw_obj(svc.zone->get_zone_params().log_pool,
bucket_sync_targets_oid_prefix + "." + b.get_key());
}
template <typename C1, typename C2>
int RGWSI_Bucket_Sync_SObj_HintIndexManager::update_hints(const DoutPrefixProvider *dpp,
const RGWBucketInfo& bucket_info,
C1& added_dests,
C2& removed_dests,
C1& added_sources,
C2& removed_sources,
optional_yield y)
{
C1 self_entity = { bucket_info.bucket };
if (!added_dests.empty() ||
!removed_dests.empty()) {
/* update our dests */
RGWSI_BS_SObj_HintIndexObj index(svc.sysobj,
get_dests_obj(bucket_info.bucket));
int r = index.update(dpp, bucket_info.bucket,
bucket_info,
&added_dests,
&removed_dests,
y);
if (r < 0) {
ldpp_dout(dpp, 0) << "ERROR: failed to update targets index for bucket=" << bucket_info.bucket << " r=" << r << dendl;
return r;
}
/* update dest buckets */
for (auto& dest_bucket : added_dests) {
RGWSI_BS_SObj_HintIndexObj dep_index(svc.sysobj,
get_sources_obj(dest_bucket));
int r = dep_index.update(dpp, dest_bucket,
bucket_info,
&self_entity,
static_cast<C2 *>(nullptr),
y);
if (r < 0) {
ldpp_dout(dpp, 0) << "ERROR: failed to update targets index for bucket=" << dest_bucket << " r=" << r << dendl;
return r;
}
}
/* update removed dest buckets */
for (auto& dest_bucket : removed_dests) {
RGWSI_BS_SObj_HintIndexObj dep_index(svc.sysobj,
get_sources_obj(dest_bucket));
int r = dep_index.update(dpp, dest_bucket,
bucket_info,
static_cast<C1 *>(nullptr),
&self_entity,
y);
if (r < 0) {
ldpp_dout(dpp, 0) << "ERROR: failed to update targets index for bucket=" << dest_bucket << " r=" << r << dendl;
return r;
}
}
}
if (!added_sources.empty() ||
!removed_sources.empty()) {
RGWSI_BS_SObj_HintIndexObj index(svc.sysobj,
get_sources_obj(bucket_info.bucket));
/* update our sources */
int r = index.update(dpp, bucket_info.bucket,
bucket_info,
&added_sources,
&removed_sources,
y);
if (r < 0) {
ldpp_dout(dpp, 0) << "ERROR: failed to update targets index for bucket=" << bucket_info.bucket << " r=" << r << dendl;
return r;
}
/* update added sources buckets */
for (auto& source_bucket : added_sources) {
RGWSI_BS_SObj_HintIndexObj dep_index(svc.sysobj,
get_dests_obj(source_bucket));
int r = dep_index.update(dpp, source_bucket,
bucket_info,
&self_entity,
static_cast<C2 *>(nullptr),
y);
if (r < 0) {
ldpp_dout(dpp, 0) << "ERROR: failed to update targets index for bucket=" << source_bucket << " r=" << r << dendl;
return r;
}
}
/* update removed dest buckets */
for (auto& source_bucket : removed_sources) {
RGWSI_BS_SObj_HintIndexObj dep_index(svc.sysobj,
get_dests_obj(source_bucket));
int r = dep_index.update(dpp, source_bucket,
bucket_info,
static_cast<C1 *>(nullptr),
&self_entity,
y);
if (r < 0) {
ldpp_dout(dpp, 0) << "ERROR: failed to update targets index for bucket=" << source_bucket << " r=" << r << dendl;
return r;
}
}
}
return 0;
}
int RGWSI_Bucket_Sync_SObj::handle_bi_removal(const DoutPrefixProvider *dpp,
const RGWBucketInfo& bucket_info,
optional_yield y)
{
std::set<rgw_bucket> sources_set;
std::set<rgw_bucket> dests_set;
if (bucket_info.sync_policy) {
bucket_info.sync_policy->get_potential_related_buckets(bucket_info.bucket,
&sources_set,
&dests_set);
}
std::vector<rgw_bucket> removed_sources;
removed_sources.reserve(sources_set.size());
for (auto& e : sources_set) {
removed_sources.push_back(e);
}
std::vector<rgw_bucket> removed_dests;
removed_dests.reserve(dests_set.size());
for (auto& e : dests_set) {
removed_dests.push_back(e);
}
std::vector<rgw_bucket> added_sources;
std::vector<rgw_bucket> added_dests;
return hint_index_mgr->update_hints(dpp, bucket_info,
added_dests,
removed_dests,
added_sources,
removed_sources,
y);
}
int RGWSI_Bucket_Sync_SObj::handle_bi_update(const DoutPrefixProvider *dpp,
RGWBucketInfo& bucket_info,
RGWBucketInfo *orig_bucket_info,
optional_yield y)
{
std::set<rgw_bucket> orig_sources;
std::set<rgw_bucket> orig_dests;
if (orig_bucket_info &&
orig_bucket_info->sync_policy) {
orig_bucket_info->sync_policy->get_potential_related_buckets(bucket_info.bucket,
&orig_sources,
&orig_dests);
}
std::set<rgw_bucket> sources;
std::set<rgw_bucket> dests;
if (bucket_info.sync_policy) {
bucket_info.sync_policy->get_potential_related_buckets(bucket_info.bucket,
&sources,
&dests);
}
std::vector<rgw_bucket> removed_sources;
std::vector<rgw_bucket> added_sources;
bool found = diff_sets(orig_sources, sources, &added_sources, &removed_sources);
ldpp_dout(dpp, 20) << __func__ << "(): bucket=" << bucket_info.bucket << ": orig_sources=" << orig_sources << " new_sources=" << sources << dendl;
ldpp_dout(dpp, 20) << __func__ << "(): bucket=" << bucket_info.bucket << ": potential sources added=" << added_sources << " removed=" << removed_sources << dendl;
std::vector<rgw_bucket> removed_dests;
std::vector<rgw_bucket> added_dests;
found = found || diff_sets(orig_dests, dests, &added_dests, &removed_dests);
ldpp_dout(dpp, 20) << __func__ << "(): bucket=" << bucket_info.bucket << ": orig_dests=" << orig_dests << " new_dests=" << dests << dendl;
ldpp_dout(dpp, 20) << __func__ << "(): bucket=" << bucket_info.bucket << ": potential dests added=" << added_dests << " removed=" << removed_dests << dendl;
if (!found) {
return 0;
}
return hint_index_mgr->update_hints(dpp, bucket_info,
dests, /* set all dests, not just the ones that were added */
removed_dests,
sources, /* set all sources, not just that the ones that were added */
removed_sources,
y);
}
int RGWSI_Bucket_Sync_SObj::get_bucket_sync_hints(const DoutPrefixProvider *dpp,
const rgw_bucket& bucket,
std::set<rgw_bucket> *sources,
std::set<rgw_bucket> *dests,
optional_yield y)
{
if (!sources && !dests) {
return 0;
}
if (sources) {
RGWSI_BS_SObj_HintIndexObj index(svc.sysobj,
hint_index_mgr->get_sources_obj(bucket));
int r = index.read(dpp, y);
if (r < 0) {
ldpp_dout(dpp, 0) << "ERROR: failed to update sources index for bucket=" << bucket << " r=" << r << dendl;
return r;
}
index.get_entities(bucket, sources);
if (!bucket.bucket_id.empty()) {
rgw_bucket b = bucket;
b.bucket_id.clear();
index.get_entities(b, sources);
}
}
if (dests) {
RGWSI_BS_SObj_HintIndexObj index(svc.sysobj,
hint_index_mgr->get_dests_obj(bucket));
int r = index.read(dpp, y);
if (r < 0) {
ldpp_dout(dpp, 0) << "ERROR: failed to read targets index for bucket=" << bucket << " r=" << r << dendl;
return r;
}
index.get_entities(bucket, dests);
if (!bucket.bucket_id.empty()) {
rgw_bucket b = bucket;
b.bucket_id.clear();
index.get_entities(b, dests);
}
}
return 0;
}
| 29,479 | 31.610619 | 165 |
cc
|
null |
ceph-main/src/rgw/services/svc_bucket_sync_sobj.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2019 Red Hat, Inc.
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#pragma once
#include "rgw_service.h"
#include "svc_meta_be.h"
#include "svc_bucket_sync.h"
class RGWSI_Zone;
class RGWSI_SysObj_Cache;
class RGWSI_Bucket_SObj;
template <class T>
class RGWChainedCacheImpl;
class RGWSI_Bucket_Sync_SObj_HintIndexManager;
struct rgw_sync_bucket_entity;
class RGWSI_Bucket_Sync_SObj : public RGWSI_Bucket_Sync
{
struct bucket_sync_policy_cache_entry {
std::shared_ptr<RGWBucketSyncPolicyHandler> handler;
};
std::unique_ptr<RGWChainedCacheImpl<bucket_sync_policy_cache_entry> > sync_policy_cache;
std::unique_ptr<RGWSI_Bucket_Sync_SObj_HintIndexManager> hint_index_mgr;
int do_start(optional_yield, const DoutPrefixProvider *dpp) override;
struct optional_zone_bucket {
std::optional<rgw_zone_id> zone;
std::optional<rgw_bucket> bucket;
optional_zone_bucket(const std::optional<rgw_zone_id>& _zone,
const std::optional<rgw_bucket>& _bucket) : zone(_zone), bucket(_bucket) {}
bool operator<(const optional_zone_bucket& ozb) const {
if (zone < ozb.zone) {
return true;
}
if (zone > ozb.zone) {
return false;
}
return bucket < ozb.bucket;
}
};
void get_hint_entities(RGWSI_Bucket_X_Ctx& ctx,
const std::set<rgw_zone_id>& zone_names,
const std::set<rgw_bucket>& buckets,
std::set<rgw_sync_bucket_entity> *hint_entities,
optional_yield y, const DoutPrefixProvider *);
int resolve_policy_hints(RGWSI_Bucket_X_Ctx& ctx,
rgw_sync_bucket_entity& self_entity,
RGWBucketSyncPolicyHandlerRef& handler,
RGWBucketSyncPolicyHandlerRef& zone_policy_handler,
std::map<optional_zone_bucket, RGWBucketSyncPolicyHandlerRef>& temp_map,
optional_yield y,
const DoutPrefixProvider *dpp);
int do_get_policy_handler(RGWSI_Bucket_X_Ctx& ctx,
std::optional<rgw_zone_id> zone,
std::optional<rgw_bucket> _bucket,
std::map<optional_zone_bucket, RGWBucketSyncPolicyHandlerRef>& temp_map,
RGWBucketSyncPolicyHandlerRef *handler,
optional_yield y,
const DoutPrefixProvider *dpp);
public:
struct Svc {
RGWSI_Zone *zone{nullptr};
RGWSI_SysObj *sysobj{nullptr};
RGWSI_SysObj_Cache *cache{nullptr};
RGWSI_Bucket_SObj *bucket_sobj{nullptr};
} svc;
RGWSI_Bucket_Sync_SObj(CephContext *cct);
~RGWSI_Bucket_Sync_SObj();
void init(RGWSI_Zone *_zone_svc,
RGWSI_SysObj *_sysobj_svc,
RGWSI_SysObj_Cache *_cache_svc,
RGWSI_Bucket_SObj *_bucket_sobj_svc);
int get_policy_handler(RGWSI_Bucket_X_Ctx& ctx,
std::optional<rgw_zone_id> zone,
std::optional<rgw_bucket> bucket,
RGWBucketSyncPolicyHandlerRef *handler,
optional_yield y,
const DoutPrefixProvider *dpp);
int handle_bi_update(const DoutPrefixProvider *dpp,
RGWBucketInfo& bucket_info,
RGWBucketInfo *orig_bucket_info,
optional_yield y) override;
int handle_bi_removal(const DoutPrefixProvider *dpp,
const RGWBucketInfo& bucket_info,
optional_yield y) override;
int get_bucket_sync_hints(const DoutPrefixProvider *dpp,
const rgw_bucket& bucket,
std::set<rgw_bucket> *sources,
std::set<rgw_bucket> *dests,
optional_yield y) override;
};
| 4,274 | 33.475806 | 100 |
h
|
null |
ceph-main/src/rgw/services/svc_bucket_types.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab ft=cpp
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2019 Red Hat, Inc.
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#pragma once
#include "common/ptr_wrapper.h"
#include "svc_meta_be.h"
#include "svc_meta_be_types.h"
class RGWSI_MetaBackend_Handler;
using RGWSI_Bucket_BE_Handler = ptr_wrapper<RGWSI_MetaBackend_Handler, RGWSI_META_BE_TYPES::BUCKET>;
using RGWSI_BucketInstance_BE_Handler = ptr_wrapper<RGWSI_MetaBackend_Handler, RGWSI_META_BE_TYPES::BI>;
using RGWSI_Bucket_EP_Ctx = ptr_wrapper<RGWSI_MetaBackend::Context, RGWSI_META_BE_TYPES::BUCKET>;
using RGWSI_Bucket_BI_Ctx = ptr_wrapper<RGWSI_MetaBackend::Context, RGWSI_META_BE_TYPES::BI>;
struct RGWSI_Bucket_X_Ctx {
RGWSI_Bucket_EP_Ctx ep;
RGWSI_Bucket_BI_Ctx bi;
};
| 1,037 | 25.615385 | 104 |
h
|
null |
ceph-main/src/rgw/services/svc_cls.cc
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab ft=cpp
#include "svc_cls.h"
#include "svc_rados.h"
#include "svc_zone.h"
#include "rgw_zone.h"
#include "cls/otp/cls_otp_client.h"
#include "cls/log/cls_log_client.h"
#include "cls/lock/cls_lock_client.h"
#define dout_subsys ceph_subsys_rgw
using namespace std;
static string log_lock_name = "rgw_log_lock";
int RGWSI_Cls::do_start(optional_yield y, const DoutPrefixProvider *dpp)
{
int r = mfa.do_start(y, dpp);
if (r < 0) {
ldpp_dout(dpp, 0) << "ERROR: failed to start mfa service" << dendl;
return r;
}
return 0;
}
int RGWSI_Cls::MFA::get_mfa_obj(const DoutPrefixProvider *dpp, const rgw_user& user, std::optional<RGWSI_RADOS::Obj> *obj)
{
string oid = get_mfa_oid(user);
rgw_raw_obj o(zone_svc->get_zone_params().otp_pool, oid);
obj->emplace(rados_svc->obj(o));
int r = (*obj)->open(dpp);
if (r < 0) {
ldpp_dout(dpp, 4) << "failed to open rados context for " << o << dendl;
return r;
}
return 0;
}
int RGWSI_Cls::MFA::get_mfa_ref(const DoutPrefixProvider *dpp, const rgw_user& user, rgw_rados_ref *ref)
{
std::optional<RGWSI_RADOS::Obj> obj;
int r = get_mfa_obj(dpp, user, &obj);
if (r < 0) {
return r;
}
*ref = obj->get_ref();
return 0;
}
int RGWSI_Cls::MFA::check_mfa(const DoutPrefixProvider *dpp, const rgw_user& user, const string& otp_id, const string& pin, optional_yield y)
{
rgw_rados_ref ref;
int r = get_mfa_ref(dpp, user, &ref);
if (r < 0) {
return r;
}
rados::cls::otp::otp_check_t result;
r = rados::cls::otp::OTP::check(cct, ref.pool.ioctx(), ref.obj.oid, otp_id, pin, &result);
if (r < 0)
return r;
ldpp_dout(dpp, 20) << "OTP check, otp_id=" << otp_id << " result=" << (int)result.result << dendl;
return (result.result == rados::cls::otp::OTP_CHECK_SUCCESS ? 0 : -EACCES);
}
void RGWSI_Cls::MFA::prepare_mfa_write(librados::ObjectWriteOperation *op,
RGWObjVersionTracker *objv_tracker,
const ceph::real_time& mtime)
{
RGWObjVersionTracker ot;
if (objv_tracker) {
ot = *objv_tracker;
}
if (ot.write_version.tag.empty()) {
if (ot.read_version.tag.empty()) {
ot.generate_new_write_ver(cct);
} else {
ot.write_version = ot.read_version;
ot.write_version.ver++;
}
}
ot.prepare_op_for_write(op);
struct timespec mtime_ts = real_clock::to_timespec(mtime);
op->mtime2(&mtime_ts);
}
int RGWSI_Cls::MFA::create_mfa(const DoutPrefixProvider *dpp, const rgw_user& user, const rados::cls::otp::otp_info_t& config,
RGWObjVersionTracker *objv_tracker, const ceph::real_time& mtime, optional_yield y)
{
std::optional<RGWSI_RADOS::Obj> obj;
int r = get_mfa_obj(dpp, user, &obj);
if (r < 0) {
return r;
}
librados::ObjectWriteOperation op;
prepare_mfa_write(&op, objv_tracker, mtime);
rados::cls::otp::OTP::create(&op, config);
r = obj->operate(dpp, &op, y);
if (r < 0) {
ldpp_dout(dpp, 20) << "OTP create, otp_id=" << config.id << " result=" << (int)r << dendl;
return r;
}
return 0;
}
int RGWSI_Cls::MFA::remove_mfa(const DoutPrefixProvider *dpp,
const rgw_user& user, const string& id,
RGWObjVersionTracker *objv_tracker,
const ceph::real_time& mtime,
optional_yield y)
{
std::optional<RGWSI_RADOS::Obj> obj;
int r = get_mfa_obj(dpp, user, &obj);
if (r < 0) {
return r;
}
librados::ObjectWriteOperation op;
prepare_mfa_write(&op, objv_tracker, mtime);
rados::cls::otp::OTP::remove(&op, id);
r = obj->operate(dpp, &op, y);
if (r < 0) {
ldpp_dout(dpp, 20) << "OTP remove, otp_id=" << id << " result=" << (int)r << dendl;
return r;
}
return 0;
}
int RGWSI_Cls::MFA::get_mfa(const DoutPrefixProvider *dpp, const rgw_user& user, const string& id, rados::cls::otp::otp_info_t *result,
optional_yield y)
{
rgw_rados_ref ref;
int r = get_mfa_ref(dpp, user, &ref);
if (r < 0) {
return r;
}
r = rados::cls::otp::OTP::get(nullptr, ref.pool.ioctx(), ref.obj.oid, id, result);
if (r < 0) {
return r;
}
return 0;
}
int RGWSI_Cls::MFA::list_mfa(const DoutPrefixProvider *dpp, const rgw_user& user, list<rados::cls::otp::otp_info_t> *result,
optional_yield y)
{
rgw_rados_ref ref;
int r = get_mfa_ref(dpp, user, &ref);
if (r < 0) {
return r;
}
r = rados::cls::otp::OTP::get_all(nullptr, ref.pool.ioctx(), ref.obj.oid, result);
if (r < 0) {
return r;
}
return 0;
}
int RGWSI_Cls::MFA::otp_get_current_time(const DoutPrefixProvider *dpp, const rgw_user& user, ceph::real_time *result,
optional_yield y)
{
rgw_rados_ref ref;
int r = get_mfa_ref(dpp, user, &ref);
if (r < 0) {
return r;
}
r = rados::cls::otp::OTP::get_current_time(ref.pool.ioctx(), ref.obj.oid, result);
if (r < 0) {
return r;
}
return 0;
}
int RGWSI_Cls::MFA::set_mfa(const DoutPrefixProvider *dpp, const string& oid, const list<rados::cls::otp::otp_info_t>& entries,
bool reset_obj, RGWObjVersionTracker *objv_tracker,
const real_time& mtime,
optional_yield y)
{
rgw_raw_obj o(zone_svc->get_zone_params().otp_pool, oid);
auto obj = rados_svc->obj(o);
int r = obj.open(dpp);
if (r < 0) {
ldpp_dout(dpp, 4) << "failed to open rados context for " << o << dendl;
return r;
}
librados::ObjectWriteOperation op;
if (reset_obj) {
op.remove();
op.set_op_flags2(LIBRADOS_OP_FLAG_FAILOK);
op.create(false);
}
prepare_mfa_write(&op, objv_tracker, mtime);
rados::cls::otp::OTP::set(&op, entries);
r = obj.operate(dpp, &op, y);
if (r < 0) {
ldpp_dout(dpp, 20) << "OTP set entries.size()=" << entries.size() << " result=" << (int)r << dendl;
return r;
}
return 0;
}
int RGWSI_Cls::MFA::list_mfa(const DoutPrefixProvider *dpp, const string& oid, list<rados::cls::otp::otp_info_t> *result,
RGWObjVersionTracker *objv_tracker, ceph::real_time *pmtime,
optional_yield y)
{
rgw_raw_obj o(zone_svc->get_zone_params().otp_pool, oid);
auto obj = rados_svc->obj(o);
int r = obj.open(dpp);
if (r < 0) {
ldpp_dout(dpp, 4) << "failed to open rados context for " << o << dendl;
return r;
}
auto& ref = obj.get_ref();
librados::ObjectReadOperation op;
struct timespec mtime_ts;
if (pmtime) {
op.stat2(nullptr, &mtime_ts, nullptr);
}
objv_tracker->prepare_op_for_read(&op);
r = rados::cls::otp::OTP::get_all(&op, ref.pool.ioctx(), ref.obj.oid, result);
if (r < 0) {
return r;
}
if (pmtime) {
*pmtime = ceph::real_clock::from_timespec(mtime_ts);
}
return 0;
}
void RGWSI_Cls::TimeLog::prepare_entry(cls_log_entry& entry,
const real_time& ut,
const string& section,
const string& key,
bufferlist& bl)
{
cls_log_add_prepare_entry(entry, utime_t(ut), section, key, bl);
}
int RGWSI_Cls::TimeLog::init_obj(const DoutPrefixProvider *dpp, const string& oid, RGWSI_RADOS::Obj& obj)
{
rgw_raw_obj o(zone_svc->get_zone_params().log_pool, oid);
obj = rados_svc->obj(o);
return obj.open(dpp);
}
int RGWSI_Cls::TimeLog::add(const DoutPrefixProvider *dpp,
const string& oid,
const real_time& ut,
const string& section,
const string& key,
bufferlist& bl,
optional_yield y)
{
RGWSI_RADOS::Obj obj;
int r = init_obj(dpp, oid, obj);
if (r < 0) {
return r;
}
librados::ObjectWriteOperation op;
utime_t t(ut);
cls_log_add(op, t, section, key, bl);
return obj.operate(dpp, &op, y);
}
int RGWSI_Cls::TimeLog::add(const DoutPrefixProvider *dpp,
const string& oid,
std::list<cls_log_entry>& entries,
librados::AioCompletion *completion,
bool monotonic_inc,
optional_yield y)
{
RGWSI_RADOS::Obj obj;
int r = init_obj(dpp, oid, obj);
if (r < 0) {
return r;
}
librados::ObjectWriteOperation op;
cls_log_add(op, entries, monotonic_inc);
if (!completion) {
r = obj.operate(dpp, &op, y);
} else {
r = obj.aio_operate(completion, &op);
}
return r;
}
int RGWSI_Cls::TimeLog::list(const DoutPrefixProvider *dpp,
const string& oid,
const real_time& start_time,
const real_time& end_time,
int max_entries, std::list<cls_log_entry>& entries,
const string& marker,
string *out_marker,
bool *truncated,
optional_yield y)
{
RGWSI_RADOS::Obj obj;
int r = init_obj(dpp, oid, obj);
if (r < 0) {
return r;
}
librados::ObjectReadOperation op;
utime_t st(start_time);
utime_t et(end_time);
cls_log_list(op, st, et, marker, max_entries, entries,
out_marker, truncated);
bufferlist obl;
int ret = obj.operate(dpp, &op, &obl, y);
if (ret < 0)
return ret;
return 0;
}
int RGWSI_Cls::TimeLog::info(const DoutPrefixProvider *dpp,
const string& oid,
cls_log_header *header,
optional_yield y)
{
RGWSI_RADOS::Obj obj;
int r = init_obj(dpp, oid, obj);
if (r < 0) {
return r;
}
librados::ObjectReadOperation op;
cls_log_info(op, header);
bufferlist obl;
int ret = obj.operate(dpp, &op, &obl, y);
if (ret < 0)
return ret;
return 0;
}
int RGWSI_Cls::TimeLog::info_async(const DoutPrefixProvider *dpp,
RGWSI_RADOS::Obj& obj,
const string& oid,
cls_log_header *header,
librados::AioCompletion *completion)
{
int r = init_obj(dpp, oid, obj);
if (r < 0) {
return r;
}
librados::ObjectReadOperation op;
cls_log_info(op, header);
int ret = obj.aio_operate(completion, &op, nullptr);
if (ret < 0)
return ret;
return 0;
}
int RGWSI_Cls::TimeLog::trim(const DoutPrefixProvider *dpp,
const string& oid,
const real_time& start_time,
const real_time& end_time,
const string& from_marker,
const string& to_marker,
librados::AioCompletion *completion,
optional_yield y)
{
RGWSI_RADOS::Obj obj;
int r = init_obj(dpp, oid, obj);
if (r < 0) {
return r;
}
utime_t st(start_time);
utime_t et(end_time);
librados::ObjectWriteOperation op;
cls_log_trim(op, st, et, from_marker, to_marker);
if (!completion) {
r = obj.operate(dpp, &op, y);
} else {
r = obj.aio_operate(completion, &op);
}
return r;
}
int RGWSI_Cls::Lock::lock_exclusive(const DoutPrefixProvider *dpp,
const rgw_pool& pool,
const string& oid,
timespan& duration,
string& zone_id,
string& owner_id,
std::optional<string> lock_name)
{
auto p = rados_svc->pool(pool);
int r = p.open(dpp);
if (r < 0) {
return r;
}
uint64_t msec = std::chrono::duration_cast<std::chrono::milliseconds>(duration).count();
utime_t ut(msec / 1000, msec % 1000);
rados::cls::lock::Lock l(lock_name.value_or(log_lock_name));
l.set_duration(ut);
l.set_cookie(owner_id);
l.set_tag(zone_id);
l.set_may_renew(true);
return l.lock_exclusive(&p.ioctx(), oid);
}
int RGWSI_Cls::Lock::unlock(const DoutPrefixProvider *dpp,
const rgw_pool& pool,
const string& oid,
string& zone_id,
string& owner_id,
std::optional<string> lock_name)
{
auto p = rados_svc->pool(pool);
int r = p.open(dpp);
if (r < 0) {
return r;
}
rados::cls::lock::Lock l(lock_name.value_or(log_lock_name));
l.set_tag(zone_id);
l.set_cookie(owner_id);
return l.unlock(&p.ioctx(), oid);
}
| 12,658 | 25.427975 | 141 |
cc
|
null |
ceph-main/src/rgw/services/svc_cls.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab ft=cpp
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2019 Red Hat, Inc.
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#pragma once
#include "cls/otp/cls_otp_types.h"
#include "cls/log/cls_log_types.h"
#include "rgw_service.h"
#include "svc_rados.h"
class RGWSI_Cls : public RGWServiceInstance
{
RGWSI_Zone *zone_svc{nullptr};
RGWSI_RADOS *rados_svc{nullptr};
class ClsSubService : public RGWServiceInstance {
friend class RGWSI_Cls;
RGWSI_Cls *cls_svc{nullptr};
RGWSI_Zone *zone_svc{nullptr};
RGWSI_RADOS *rados_svc{nullptr};
void init(RGWSI_Cls *_cls_svc, RGWSI_Zone *_zone_svc, RGWSI_RADOS *_rados_svc) {
cls_svc = _cls_svc;
zone_svc = _cls_svc->zone_svc;
rados_svc = _cls_svc->rados_svc;
}
public:
ClsSubService(CephContext *cct) : RGWServiceInstance(cct) {}
};
public:
class MFA : public ClsSubService {
int get_mfa_obj(const DoutPrefixProvider *dpp, const rgw_user& user, std::optional<RGWSI_RADOS::Obj> *obj);
int get_mfa_ref(const DoutPrefixProvider *dpp, const rgw_user& user, rgw_rados_ref *ref);
void prepare_mfa_write(librados::ObjectWriteOperation *op,
RGWObjVersionTracker *objv_tracker,
const ceph::real_time& mtime);
public:
MFA(CephContext *cct): ClsSubService(cct) {}
std::string get_mfa_oid(const rgw_user& user) {
return std::string("user:") + user.to_str();
}
int check_mfa(const DoutPrefixProvider *dpp, const rgw_user& user, const std::string& otp_id, const std::string& pin, optional_yield y);
int create_mfa(const DoutPrefixProvider *dpp, const rgw_user& user, const rados::cls::otp::otp_info_t& config,
RGWObjVersionTracker *objv_tracker, const ceph::real_time& mtime, optional_yield y);
int remove_mfa(const DoutPrefixProvider *dpp,
const rgw_user& user, const std::string& id,
RGWObjVersionTracker *objv_tracker,
const ceph::real_time& mtime,
optional_yield y);
int get_mfa(const DoutPrefixProvider *dpp, const rgw_user& user, const std::string& id, rados::cls::otp::otp_info_t *result, optional_yield y);
int list_mfa(const DoutPrefixProvider *dpp, const rgw_user& user, std::list<rados::cls::otp::otp_info_t> *result, optional_yield y);
int otp_get_current_time(const DoutPrefixProvider *dpp, const rgw_user& user, ceph::real_time *result, optional_yield y);
int set_mfa(const DoutPrefixProvider *dpp, const std::string& oid, const std::list<rados::cls::otp::otp_info_t>& entries,
bool reset_obj, RGWObjVersionTracker *objv_tracker,
const real_time& mtime, optional_yield y);
int list_mfa(const DoutPrefixProvider *dpp, const std::string& oid, std::list<rados::cls::otp::otp_info_t> *result,
RGWObjVersionTracker *objv_tracker, ceph::real_time *pmtime, optional_yield y);
} mfa;
class TimeLog : public ClsSubService {
int init_obj(const DoutPrefixProvider *dpp, const std::string& oid, RGWSI_RADOS::Obj& obj);
public:
TimeLog(CephContext *cct): ClsSubService(cct) {}
void prepare_entry(cls_log_entry& entry,
const real_time& ut,
const std::string& section,
const std::string& key,
bufferlist& bl);
int add(const DoutPrefixProvider *dpp,
const std::string& oid,
const real_time& ut,
const std::string& section,
const std::string& key,
bufferlist& bl,
optional_yield y);
int add(const DoutPrefixProvider *dpp,
const std::string& oid,
std::list<cls_log_entry>& entries,
librados::AioCompletion *completion,
bool monotonic_inc,
optional_yield y);
int list(const DoutPrefixProvider *dpp,
const std::string& oid,
const real_time& start_time,
const real_time& end_time,
int max_entries, std::list<cls_log_entry>& entries,
const std::string& marker,
std::string *out_marker,
bool *truncated,
optional_yield y);
int info(const DoutPrefixProvider *dpp,
const std::string& oid,
cls_log_header *header,
optional_yield y);
int info_async(const DoutPrefixProvider *dpp,
RGWSI_RADOS::Obj& obj,
const std::string& oid,
cls_log_header *header,
librados::AioCompletion *completion);
int trim(const DoutPrefixProvider *dpp,
const std::string& oid,
const real_time& start_time,
const real_time& end_time,
const std::string& from_marker,
const std::string& to_marker,
librados::AioCompletion *completion,
optional_yield y);
} timelog;
class Lock : public ClsSubService {
int init_obj(const std::string& oid, RGWSI_RADOS::Obj& obj);
public:
Lock(CephContext *cct): ClsSubService(cct) {}
int lock_exclusive(const DoutPrefixProvider *dpp,
const rgw_pool& pool,
const std::string& oid,
timespan& duration,
std::string& zone_id,
std::string& owner_id,
std::optional<std::string> lock_name = std::nullopt);
int unlock(const DoutPrefixProvider *dpp,
const rgw_pool& pool,
const std::string& oid,
std::string& zone_id,
std::string& owner_id,
std::optional<std::string> lock_name = std::nullopt);
} lock;
RGWSI_Cls(CephContext *cct): RGWServiceInstance(cct), mfa(cct), timelog(cct), lock(cct) {}
void init(RGWSI_Zone *_zone_svc, RGWSI_RADOS *_rados_svc) {
rados_svc = _rados_svc;
zone_svc = _zone_svc;
mfa.init(this, zone_svc, rados_svc);
timelog.init(this, zone_svc, rados_svc);
lock.init(this, zone_svc, rados_svc);
}
int do_start(optional_yield, const DoutPrefixProvider *dpp) override;
};
| 6,338 | 36.958084 | 147 |
h
|
null |
ceph-main/src/rgw/services/svc_config_key.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab ft=cpp
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2019 Red Hat, Inc.
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#pragma once
#include "rgw_service.h"
class RGWSI_ConfigKey : public RGWServiceInstance
{
public:
RGWSI_ConfigKey(CephContext *cct) : RGWServiceInstance(cct) {}
virtual ~RGWSI_ConfigKey() {}
virtual int get(const std::string& key, bool secure, bufferlist *result) = 0;
};
| 697 | 20.8125 | 79 |
h
|
null |
ceph-main/src/rgw/services/svc_config_key_rados.cc
|
#include "svc_rados.h"
#include "svc_config_key_rados.h"
using namespace std;
RGWSI_ConfigKey_RADOS::~RGWSI_ConfigKey_RADOS(){}
int RGWSI_ConfigKey_RADOS::do_start(optional_yield, const DoutPrefixProvider *dpp)
{
maybe_insecure_mon_conn = !svc.rados->check_secure_mon_conn(dpp);
return 0;
}
void RGWSI_ConfigKey_RADOS::warn_if_insecure()
{
if (!maybe_insecure_mon_conn ||
warned_insecure.test_and_set()) {
return;
}
string s = "rgw is configured to optionally allow insecure connections to the monitors (auth_supported, ms_mon_client_mode), ssl certificates stored at the monitor configuration could leak";
svc.rados->clog_warn(s);
lderr(ctx()) << __func__ << "(): WARNING: " << s << dendl;
}
int RGWSI_ConfigKey_RADOS::get(const string& key, bool secure, bufferlist *result)
{
string cmd =
"{"
"\"prefix\": \"config-key get\", "
"\"key\": \"" + key + "\""
"}";
bufferlist inbl;
auto handle = svc.rados->handle();
int ret = handle.mon_command(cmd, inbl, result, nullptr);
if (ret < 0) {
return ret;
}
if (secure) {
warn_if_insecure();
}
return 0;
}
| 1,132 | 21.215686 | 192 |
cc
|
null |
ceph-main/src/rgw/services/svc_config_key_rados.h
|
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab ft=cpp
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2019 Red Hat, Inc.
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#pragma once
#include <atomic>
#include "rgw_service.h"
#include "svc_config_key.h"
class RGWSI_RADOS;
class RGWSI_ConfigKey_RADOS : public RGWSI_ConfigKey
{
bool maybe_insecure_mon_conn{false};
std::atomic_flag warned_insecure = ATOMIC_FLAG_INIT;
int do_start(optional_yield, const DoutPrefixProvider *dpp) override;
void warn_if_insecure();
public:
struct Svc {
RGWSI_RADOS *rados{nullptr};
} svc;
void init(RGWSI_RADOS *rados_svc) {
svc.rados = rados_svc;
}
RGWSI_ConfigKey_RADOS(CephContext *cct) : RGWSI_ConfigKey(cct) {}
virtual ~RGWSI_ConfigKey_RADOS() override;
int get(const std::string& key, bool secure, bufferlist *result) override;
};
| 1,107 | 19.145455 | 76 |
h
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.