Search is not available for this dataset
repo
stringlengths
2
152
file
stringlengths
15
239
code
stringlengths
0
58.4M
file_length
int64
0
58.4M
avg_line_length
float64
0
1.81M
max_line_length
int64
0
12.7M
extension_type
stringclasses
364 values
null
ceph-main/src/rgw/rgw_sal_motr.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=2 sw=2 expandtab ft=cpp /* * Ceph - scalable distributed file system * * SAL implementation for the CORTX Motr backend * * Copyright (C) 2021 Seagate Technology LLC and/or its Affiliates * * This is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License version 2.1, as published by the Free Software * Foundation. See file COPYING. * */ #include <errno.h> #include <stdlib.h> #include <unistd.h> extern "C" { #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wextern-c-compat" #pragma clang diagnostic ignored "-Wdeprecated-anon-enum-enum-conversion" #include "motr/config.h" #include "lib/types.h" #include "lib/trace.h" // m0_trace_set_mmapped_buffer #include "motr/layout.h" // M0_OBJ_LAYOUT_ID #include "helpers/helpers.h" // m0_ufid_next #pragma clang diagnostic pop } #include "common/Clock.h" #include "common/errno.h" #include "rgw_compression.h" #include "rgw_sal.h" #include "rgw_sal_motr.h" #include "rgw_bucket.h" #define dout_subsys ceph_subsys_rgw using std::string; using std::map; using std::vector; using std::set; using std::list; static string mp_ns = RGW_OBJ_NS_MULTIPART; static struct m0_ufid_generator ufid_gr; namespace rgw::sal { using ::ceph::encode; using ::ceph::decode; static std::string motr_global_indices[] = { RGW_MOTR_USERS_IDX_NAME, RGW_MOTR_BUCKET_INST_IDX_NAME, RGW_MOTR_BUCKET_HD_IDX_NAME, RGW_IAM_MOTR_ACCESS_KEY, RGW_IAM_MOTR_EMAIL_KEY }; void MotrMetaCache::invalid(const DoutPrefixProvider *dpp, const string& name) { cache.invalidate_remove(dpp, name); } int MotrMetaCache::put(const DoutPrefixProvider *dpp, const string& name, const bufferlist& data) { ldpp_dout(dpp, 0) << "Put into cache: name = " << name << dendl; ObjectCacheInfo info; info.status = 0; info.data = data; info.flags = CACHE_FLAG_DATA; info.meta.mtime = ceph::real_clock::now(); info.meta.size = data.length(); cache.put(dpp, name, info, NULL); // Inform other rgw instances. Do nothing if it gets some error? int rc = distribute_cache(dpp, name, info, UPDATE_OBJ); if (rc < 0) ldpp_dout(dpp, 0) << "ERROR: failed to distribute cache for " << name << dendl; return 0; } int MotrMetaCache::get(const DoutPrefixProvider *dpp, const string& name, bufferlist& data) { ObjectCacheInfo info; uint32_t flags = CACHE_FLAG_DATA; int rc = cache.get(dpp, name, info, flags, NULL); if (rc == 0) { if (info.status < 0) return info.status; bufferlist& bl = info.data; bufferlist::iterator it = bl.begin(); data.clear(); it.copy_all(data); ldpp_dout(dpp, 0) << "Cache hit: name = " << name << dendl; return 0; } ldpp_dout(dpp, 0) << "Cache miss: name = " << name << ", rc = "<< rc << dendl; if(rc == -ENODATA) return -ENOENT; return rc; } int MotrMetaCache::remove(const DoutPrefixProvider *dpp, const string& name) { cache.invalidate_remove(dpp, name); ObjectCacheInfo info; int rc = distribute_cache(dpp, name, info, INVALIDATE_OBJ); if (rc < 0) { ldpp_dout(dpp, 0) << "ERROR: " <<__func__<< "(): failed to distribute cache: rc =" << rc << dendl; } ldpp_dout(dpp, 0) << "Remove from cache: name = " << name << dendl; return 0; } int MotrMetaCache::distribute_cache(const DoutPrefixProvider *dpp, const string& normal_name, ObjectCacheInfo& obj_info, int op) { return 0; } int MotrMetaCache::watch_cb(const DoutPrefixProvider *dpp, uint64_t notify_id, uint64_t cookie, uint64_t notifier_id, bufferlist& bl) { return 0; } void MotrMetaCache::set_enabled(bool status) { cache.set_enabled(status); } // TODO: properly handle the number of key/value pairs to get in // one query. Now the POC simply tries to retrieve all `max` number of pairs // with starting key `marker`. int MotrUser::list_buckets(const DoutPrefixProvider *dpp, const string& marker, const string& end_marker, uint64_t max, bool need_stats, BucketList &buckets, optional_yield y) { int rc; vector<string> keys(max); vector<bufferlist> vals(max); bool is_truncated = false; ldpp_dout(dpp, 20) <<__func__<< ": list_user_buckets: marker=" << marker << " end_marker=" << end_marker << " max=" << max << dendl; // Retrieve all `max` number of pairs. buckets.clear(); string user_info_iname = "motr.rgw.user.info." + info.user_id.to_str(); keys[0] = marker; rc = store->next_query_by_name(user_info_iname, keys, vals); if (rc < 0) { ldpp_dout(dpp, 0) << "ERROR: NEXT query failed. " << rc << dendl; return rc; } // Process the returned pairs to add into BucketList. uint64_t bcount = 0; for (const auto& bl: vals) { if (bl.length() == 0) break; RGWBucketEnt ent; auto iter = bl.cbegin(); ent.decode(iter); std::time_t ctime = ceph::real_clock::to_time_t(ent.creation_time); ldpp_dout(dpp, 20) << "got creation time: << " << std::put_time(std::localtime(&ctime), "%F %T") << dendl; if (!end_marker.empty() && end_marker.compare(ent.bucket.marker) <= 0) break; buckets.add(std::make_unique<MotrBucket>(this->store, ent, this)); bcount++; } if (bcount == max) is_truncated = true; buckets.set_truncated(is_truncated); return 0; } int MotrUser::create_bucket(const DoutPrefixProvider* dpp, const rgw_bucket& b, const std::string& zonegroup_id, rgw_placement_rule& placement_rule, std::string& swift_ver_location, const RGWQuotaInfo* pquota_info, const RGWAccessControlPolicy& policy, Attrs& attrs, RGWBucketInfo& info, obj_version& ep_objv, bool exclusive, bool obj_lock_enabled, bool* existed, req_info& req_info, std::unique_ptr<Bucket>* bucket_out, optional_yield y) { int ret; std::unique_ptr<Bucket> bucket; // Look up the bucket. Create it if it doesn't exist. ret = this->store->get_bucket(dpp, this, b, &bucket, y); if (ret < 0 && ret != -ENOENT) return ret; if (ret != -ENOENT) { *existed = true; // if (swift_ver_location.empty()) { // swift_ver_location = bucket->get_info().swift_ver_location; // } // placement_rule.inherit_from(bucket->get_info().placement_rule); // TODO: ACL policy // // don't allow changes to the acl policy //RGWAccessControlPolicy old_policy(ctx()); //int rc = rgw_op_get_bucket_policy_from_attr( // dpp, this, u, bucket->get_attrs(), &old_policy, y); //if (rc >= 0 && old_policy != policy) { // bucket_out->swap(bucket); // return -EEXIST; //} } else { placement_rule.name = "default"; placement_rule.storage_class = "STANDARD"; bucket = std::make_unique<MotrBucket>(store, b, this); bucket->set_attrs(attrs); *existed = false; } if (!*existed){ // TODO: how to handle zone and multi-site. info.placement_rule = placement_rule; info.bucket = b; info.owner = this->get_info().user_id; info.zonegroup = zonegroup_id; if (obj_lock_enabled) info.flags = BUCKET_VERSIONED | BUCKET_OBJ_LOCK_ENABLED; bucket->set_version(ep_objv); bucket->get_info() = info; // Create a new bucket: (1) Add a key/value pair in the // bucket instance index. (2) Create a new bucket index. MotrBucket* mbucket = static_cast<MotrBucket*>(bucket.get()); ret = mbucket->put_info(dpp, y, ceph::real_time())? : mbucket->create_bucket_index() ? : mbucket->create_multipart_indices(); if (ret < 0) ldpp_dout(dpp, 0) << "ERROR: failed to create bucket indices! " << ret << dendl; // Insert the bucket entry into the user info index. ret = mbucket->link_user(dpp, this, y); if (ret < 0) ldpp_dout(dpp, 0) << "ERROR: failed to add bucket entry! " << ret << dendl; } else { return -EEXIST; // bucket->set_version(ep_objv); // bucket->get_info() = info; } bucket_out->swap(bucket); return ret; } int MotrUser::read_attrs(const DoutPrefixProvider* dpp, optional_yield y) { return 0; } int MotrUser::read_stats(const DoutPrefixProvider *dpp, optional_yield y, RGWStorageStats* stats, ceph::real_time *last_stats_sync, ceph::real_time *last_stats_update) { return 0; } /* stats - Not for first pass */ int MotrUser::read_stats_async(const DoutPrefixProvider *dpp, RGWGetUserStats_CB *cb) { return 0; } int MotrUser::complete_flush_stats(const DoutPrefixProvider *dpp, optional_yield y) { return 0; } int MotrUser::read_usage(const DoutPrefixProvider *dpp, uint64_t start_epoch, uint64_t end_epoch, uint32_t max_entries, bool *is_truncated, RGWUsageIter& usage_iter, map<rgw_user_bucket, rgw_usage_log_entry>& usage) { return 0; } int MotrUser::trim_usage(const DoutPrefixProvider *dpp, uint64_t start_epoch, uint64_t end_epoch) { return 0; } int MotrUser::load_user_from_idx(const DoutPrefixProvider *dpp, MotrStore *store, RGWUserInfo& info, map<string, bufferlist> *attrs, RGWObjVersionTracker *objv_tr) { struct MotrUserInfo muinfo; bufferlist bl; ldpp_dout(dpp, 20) << "info.user_id.id = " << info.user_id.id << dendl; if (store->get_user_cache()->get(dpp, info.user_id.id, bl)) { // Cache misses int rc = store->do_idx_op_by_name(RGW_MOTR_USERS_IDX_NAME, M0_IC_GET, info.user_id.to_str(), bl); ldpp_dout(dpp, 20) << "do_idx_op_by_name() = " << rc << dendl; if (rc < 0) return rc; // Put into cache. store->get_user_cache()->put(dpp, info.user_id.id, bl); } bufferlist& blr = bl; auto iter = blr.cbegin(); muinfo.decode(iter); info = muinfo.info; if (attrs) *attrs = muinfo.attrs; if (objv_tr) { objv_tr->read_version = muinfo.user_version; objv_tracker.read_version = objv_tr->read_version; } if (!info.access_keys.empty()) { for(auto key : info.access_keys) { access_key_tracker.insert(key.first); } } return 0; } int MotrUser::load_user(const DoutPrefixProvider *dpp, optional_yield y) { ldpp_dout(dpp, 20) << "load user: user id = " << info.user_id.to_str() << dendl; return load_user_from_idx(dpp, store, info, &attrs, &objv_tracker); } int MotrUser::create_user_info_idx() { string user_info_iname = "motr.rgw.user.info." + info.user_id.to_str(); return store->create_motr_idx_by_name(user_info_iname); } int MotrUser::merge_and_store_attrs(const DoutPrefixProvider* dpp, Attrs& new_attrs, optional_yield y) { for (auto& it : new_attrs) attrs[it.first] = it.second; return store_user(dpp, y, false); } int MotrUser::store_user(const DoutPrefixProvider* dpp, optional_yield y, bool exclusive, RGWUserInfo* old_info) { bufferlist bl; struct MotrUserInfo muinfo; RGWUserInfo orig_info; RGWObjVersionTracker objv_tr = {}; obj_version& obj_ver = objv_tr.read_version; ldpp_dout(dpp, 20) << "Store_user(): User = " << info.user_id.id << dendl; orig_info.user_id = info.user_id; // XXX: we open and close motr idx 2 times in this method: // 1) on load_user_from_idx() here and 2) on do_idx_op_by_name(PUT) below. // Maybe this can be optimised later somewhow. int rc = load_user_from_idx(dpp, store, orig_info, nullptr, &objv_tr); ldpp_dout(dpp, 10) << "Get user: rc = " << rc << dendl; // Check if the user already exists if (rc == 0 && obj_ver.ver > 0) { if (old_info) *old_info = orig_info; if (obj_ver.ver != objv_tracker.read_version.ver) { rc = -ECANCELED; ldpp_dout(dpp, 0) << "ERROR: User Read version mismatch" << dendl; goto out; } if (exclusive) return rc; obj_ver.ver++; } else { obj_ver.ver = 1; obj_ver.tag = "UserTAG"; } // Insert the user to user info index. muinfo.info = info; muinfo.attrs = attrs; muinfo.user_version = obj_ver; muinfo.encode(bl); rc = store->do_idx_op_by_name(RGW_MOTR_USERS_IDX_NAME, M0_IC_PUT, info.user_id.to_str(), bl); ldpp_dout(dpp, 10) << "Store user to motr index: rc = " << rc << dendl; if (rc == 0) { objv_tracker.read_version = obj_ver; objv_tracker.write_version = obj_ver; } // Store access key in access key index if (!info.access_keys.empty()) { std::string access_key; std::string secret_key; std::map<std::string, RGWAccessKey>::const_iterator iter = info.access_keys.begin(); const RGWAccessKey& k = iter->second; access_key = k.id; secret_key = k.key; MotrAccessKey MGWUserKeys(access_key, secret_key, info.user_id.to_str()); store->store_access_key(dpp, y, MGWUserKeys); access_key_tracker.insert(access_key); } // Check if any key need to be deleted if (access_key_tracker.size() != info.access_keys.size()) { std::string key_for_deletion; for (auto key : access_key_tracker) { if (!info.get_key(key)) { key_for_deletion = key; ldpp_dout(dpp, 0) << "Deleting access key: " << key_for_deletion << dendl; store->delete_access_key(dpp, y, key_for_deletion); if (rc < 0) { ldpp_dout(dpp, 0) << "Unable to delete access key" << rc << dendl; } } } if(rc >= 0){ access_key_tracker.erase(key_for_deletion); } } if (!info.user_email.empty()) { MotrEmailInfo MGWEmailInfo(info.user_id.to_str(), info.user_email); store->store_email_info(dpp, y, MGWEmailInfo); } // Create user info index to store all buckets that are belong // to this bucket. rc = create_user_info_idx(); if (rc < 0 && rc != -EEXIST) { ldpp_dout(dpp, 0) << "Failed to create user info index: rc = " << rc << dendl; goto out; } // Put the user info into cache. rc = store->get_user_cache()->put(dpp, info.user_id.id, bl); out: return rc; } int MotrUser::remove_user(const DoutPrefixProvider* dpp, optional_yield y) { // Remove user info from cache // Delete access keys for user // Delete user info // Delete user from user index // Delete email for user - TODO bufferlist bl; int rc; // Remove the user info from cache. store->get_user_cache()->remove(dpp, info.user_id.id); // Delete all access key of user if (!info.access_keys.empty()) { for(auto acc_key = info.access_keys.begin(); acc_key != info.access_keys.end(); acc_key++) { auto access_key = acc_key->first; rc = store->delete_access_key(dpp, y, access_key); // TODO // Check error code for access_key does not exist // Continue to next step only if delete failed because key doesn't exists if (rc < 0){ ldpp_dout(dpp, 0) << "Unable to delete access key" << rc << dendl; } } } //Delete email id if (!info.user_email.empty()) { rc = store->do_idx_op_by_name(RGW_IAM_MOTR_EMAIL_KEY, M0_IC_DEL, info.user_email, bl); if (rc < 0 && rc != -ENOENT) { ldpp_dout(dpp, 0) << "Unable to delete email id " << rc << dendl; } } // Delete user info index string user_info_iname = "motr.rgw.user.info." + info.user_id.to_str(); store->delete_motr_idx_by_name(user_info_iname); ldpp_dout(dpp, 10) << "Deleted user info index - " << user_info_iname << dendl; // Delete user from user index rc = store->do_idx_op_by_name(RGW_MOTR_USERS_IDX_NAME, M0_IC_DEL, info.user_id.to_str(), bl); if (rc < 0){ ldpp_dout(dpp, 0) << "Unable to delete user from user index " << rc << dendl; return rc; } // TODO // Delete email for user // rc = store->do_idx_op_by_name(RGW_IAM_MOTR_EMAIL_KEY, // M0_IC_DEL, info.user_email, bl); // if (rc < 0){ // ldpp_dout(dpp, 0) << "Unable to delete email for user" << rc << dendl; // return rc; // } return 0; } int MotrUser::verify_mfa(const std::string& mfa_str, bool* verified, const DoutPrefixProvider *dpp, optional_yield y) { *verified = false; return 0; } int MotrBucket::remove_bucket(const DoutPrefixProvider *dpp, bool delete_children, bool forward_to_master, req_info* req_info, optional_yield y) { int ret; ldpp_dout(dpp, 20) << "remove_bucket Entry=" << info.bucket.name << dendl; // Refresh info ret = load_bucket(dpp, y); if (ret < 0) { ldpp_dout(dpp, 0) << "ERROR: remove_bucket load_bucket failed rc=" << ret << dendl; return ret; } ListParams params; params.list_versions = true; params.allow_unordered = true; ListResults results; // 1. Check if Bucket has objects. // If bucket contains objects and delete_children is true, delete all objects. // Else throw error that bucket is not empty. do { results.objs.clear(); // Check if bucket has objects. ret = list(dpp, params, 1000, results, y); if (ret < 0) { return ret; } // If result contains entries, bucket is not empty. if (!results.objs.empty() && !delete_children) { ldpp_dout(dpp, 0) << "ERROR: could not remove non-empty bucket " << info.bucket.name << dendl; return -ENOTEMPTY; } for (const auto& obj : results.objs) { rgw_obj_key key(obj.key); /* xxx dang */ ret = rgw_remove_object(dpp, store, this, key); if (ret < 0 && ret != -ENOENT) { ldpp_dout(dpp, 0) << "ERROR: remove_bucket rgw_remove_object failed rc=" << ret << dendl; return ret; } } } while(results.is_truncated); // 2. Abort Mp uploads on the bucket. ret = abort_multiparts(dpp, store->ctx()); if (ret < 0) { return ret; } // 3. Remove mp index?? string bucket_multipart_iname = "motr.rgw.bucket." + info.bucket.name + ".multiparts"; ret = store->delete_motr_idx_by_name(bucket_multipart_iname); if (ret < 0) { ldpp_dout(dpp, 0) << "ERROR: remove_bucket failed to remove multipart index rc=" << ret << dendl; return ret; } // 4. Sync user stats. ret = this->sync_user_stats(dpp, y); if (ret < 0) { ldout(store->ctx(), 1) << "WARNING: failed sync user stats before bucket delete. ret=" << ret << dendl; } // 5. Remove the bucket from user info index. (unlink user) ret = this->unlink_user(dpp, owner, y); if (ret < 0) { ldpp_dout(dpp, 0) << "ERROR: remove_bucket unlink_user failed rc=" << ret << dendl; return ret; } // 6. Remove bucket index. string bucket_index_iname = "motr.rgw.bucket.index." + info.bucket.name; ret = store->delete_motr_idx_by_name(bucket_index_iname); if (ret < 0) { ldpp_dout(dpp, 0) << "ERROR: remove_bucket unlink_user failed rc=" << ret << dendl; return ret; } // 7. Remove bucket instance info. bufferlist bl; ret = store->get_bucket_inst_cache()->remove(dpp, info.bucket.name); if (ret < 0) { ldpp_dout(dpp, 0) << "ERROR: remove_bucket failed to remove bucket instance from cache rc=" << ret << dendl; return ret; } ret = store->do_idx_op_by_name(RGW_MOTR_BUCKET_INST_IDX_NAME, M0_IC_DEL, info.bucket.name, bl); if (ret < 0) { ldpp_dout(dpp, 0) << "ERROR: remove_bucket failed to remove bucket instance rc=" << ret << dendl; return ret; } // TODO : // 8. Remove Notifications // if bucket has notification definitions associated with it // they should be removed (note that any pending notifications on the bucket are still going to be sent) // 9. Forward request to master. if (forward_to_master) { bufferlist in_data; ret = store->forward_request_to_master(dpp, owner, &bucket_version, in_data, nullptr, *req_info, y); if (ret < 0) { if (ret == -ENOENT) { /* adjust error, we want to return with NoSuchBucket and not * NoSuchKey */ ret = -ERR_NO_SUCH_BUCKET; } ldpp_dout(dpp, 0) << "ERROR: Forward to master failed. ret=" << ret << dendl; return ret; } } ldpp_dout(dpp, 20) << "remove_bucket Exit=" << info.bucket.name << dendl; return ret; } int MotrBucket::remove_bucket_bypass_gc(int concurrent_max, bool keep_index_consistent, optional_yield y, const DoutPrefixProvider *dpp) { return 0; } int MotrBucket::put_info(const DoutPrefixProvider *dpp, bool exclusive, ceph::real_time _mtime) { bufferlist bl; struct MotrBucketInfo mbinfo; ldpp_dout(dpp, 20) << "put_info(): bucket_id=" << info.bucket.bucket_id << dendl; mbinfo.info = info; mbinfo.bucket_attrs = attrs; mbinfo.mtime = _mtime; mbinfo.bucket_version = bucket_version; mbinfo.encode(bl); // Insert bucket instance using bucket's marker (string). int rc = store->do_idx_op_by_name(RGW_MOTR_BUCKET_INST_IDX_NAME, M0_IC_PUT, info.bucket.name, bl, !exclusive); if (rc == 0) store->get_bucket_inst_cache()->put(dpp, info.bucket.name, bl); return rc; } int MotrBucket::load_bucket(const DoutPrefixProvider *dpp, optional_yield y, bool get_stats) { // Get bucket instance using bucket's name (string). or bucket id? bufferlist bl; if (store->get_bucket_inst_cache()->get(dpp, info.bucket.name, bl)) { // Cache misses. ldpp_dout(dpp, 20) << "load_bucket(): name=" << info.bucket.name << dendl; int rc = store->do_idx_op_by_name(RGW_MOTR_BUCKET_INST_IDX_NAME, M0_IC_GET, info.bucket.name, bl); ldpp_dout(dpp, 20) << "load_bucket(): rc=" << rc << dendl; if (rc < 0) return rc; store->get_bucket_inst_cache()->put(dpp, info.bucket.name, bl); } struct MotrBucketInfo mbinfo; bufferlist& blr = bl; auto iter =blr.cbegin(); mbinfo.decode(iter); //Decode into MotrBucketInfo. info = mbinfo.info; ldpp_dout(dpp, 20) << "load_bucket(): bucket_id=" << info.bucket.bucket_id << dendl; rgw_placement_rule placement_rule; placement_rule.name = "default"; placement_rule.storage_class = "STANDARD"; info.placement_rule = placement_rule; attrs = mbinfo.bucket_attrs; mtime = mbinfo.mtime; bucket_version = mbinfo.bucket_version; return 0; } int MotrBucket::link_user(const DoutPrefixProvider* dpp, User* new_user, optional_yield y) { bufferlist bl; RGWBucketEnt new_bucket; ceph::real_time creation_time = get_creation_time(); // RGWBucketEnt or cls_user_bucket_entry is the structure that is stored. new_bucket.bucket = info.bucket; new_bucket.size = 0; if (real_clock::is_zero(creation_time)) creation_time = ceph::real_clock::now(); new_bucket.creation_time = creation_time; new_bucket.encode(bl); std::time_t ctime = ceph::real_clock::to_time_t(new_bucket.creation_time); ldpp_dout(dpp, 20) << "got creation time: << " << std::put_time(std::localtime(&ctime), "%F %T") << dendl; // Insert the user into the user info index. string user_info_idx_name = "motr.rgw.user.info." + new_user->get_info().user_id.to_str(); return store->do_idx_op_by_name(user_info_idx_name, M0_IC_PUT, info.bucket.name, bl); } int MotrBucket::unlink_user(const DoutPrefixProvider* dpp, User* new_user, optional_yield y) { // Remove the user into the user info index. bufferlist bl; string user_info_idx_name = "motr.rgw.user.info." + new_user->get_info().user_id.to_str(); return store->do_idx_op_by_name(user_info_idx_name, M0_IC_DEL, info.bucket.name, bl); } /* stats - Not for first pass */ int MotrBucket::read_stats(const DoutPrefixProvider *dpp, const bucket_index_layout_generation& idx_layout, int shard_id, std::string *bucket_ver, std::string *master_ver, std::map<RGWObjCategory, RGWStorageStats>& stats, std::string *max_marker, bool *syncstopped) { return 0; } int MotrBucket::create_bucket_index() { string bucket_index_iname = "motr.rgw.bucket.index." + info.bucket.name; return store->create_motr_idx_by_name(bucket_index_iname); } int MotrBucket::create_multipart_indices() { int rc; // Bucket multipart index stores in-progress multipart uploads. // Key is the object name + upload_id, value is a rgw_bucket_dir_entry. // An entry is inserted when a multipart upload is initialised ( // MotrMultipartUpload::init()) and will be removed when the upload // is completed (MotrMultipartUpload::complete()). // MotrBucket::list_multiparts() will scan this index to return all // in-progress multipart uploads in the bucket. string bucket_multipart_iname = "motr.rgw.bucket." + info.bucket.name + ".multiparts"; rc = store->create_motr_idx_by_name(bucket_multipart_iname); if (rc < 0) { ldout(store->cctx, 0) << "Failed to create bucket multipart index " << bucket_multipart_iname << dendl; return rc; } return 0; } int MotrBucket::read_stats_async(const DoutPrefixProvider *dpp, const bucket_index_layout_generation& idx_layout, int shard_id, RGWGetBucketStats_CB *ctx) { return 0; } int MotrBucket::sync_user_stats(const DoutPrefixProvider *dpp, optional_yield y) { return 0; } int MotrBucket::update_container_stats(const DoutPrefixProvider *dpp) { return 0; } int MotrBucket::check_bucket_shards(const DoutPrefixProvider *dpp) { return 0; } int MotrBucket::chown(const DoutPrefixProvider *dpp, User& new_user, optional_yield y) { // TODO: update bucket with new owner return 0; } /* Make sure to call load_bucket() if you need it first */ bool MotrBucket::is_owner(User* user) { return (info.owner.compare(user->get_id()) == 0); } int MotrBucket::check_empty(const DoutPrefixProvider *dpp, optional_yield y) { /* XXX: Check if bucket contains any objects */ return 0; } int MotrBucket::check_quota(const DoutPrefixProvider *dpp, RGWQuota& quota, uint64_t obj_size, optional_yield y, bool check_size_only) { /* Not Handled in the first pass as stats are also needed */ return 0; } int MotrBucket::merge_and_store_attrs(const DoutPrefixProvider *dpp, Attrs& new_attrs, optional_yield y) { for (auto& it : new_attrs) attrs[it.first] = it.second; return put_info(dpp, y, ceph::real_time()); } int MotrBucket::try_refresh_info(const DoutPrefixProvider *dpp, ceph::real_time *pmtime) { return 0; } /* XXX: usage and stats not supported in the first pass */ int MotrBucket::read_usage(const DoutPrefixProvider *dpp, uint64_t start_epoch, uint64_t end_epoch, uint32_t max_entries, bool *is_truncated, RGWUsageIter& usage_iter, map<rgw_user_bucket, rgw_usage_log_entry>& usage) { return 0; } int MotrBucket::trim_usage(const DoutPrefixProvider *dpp, uint64_t start_epoch, uint64_t end_epoch) { return 0; } int MotrBucket::remove_objs_from_index(const DoutPrefixProvider *dpp, std::list<rgw_obj_index_key>& objs_to_unlink) { /* XXX: CHECK: Unlike RadosStore, there is no seperate bucket index table. * Delete all the object in the list from the object table of this * bucket */ return 0; } int MotrBucket::check_index(const DoutPrefixProvider *dpp, std::map<RGWObjCategory, RGWStorageStats>& existing_stats, std::map<RGWObjCategory, RGWStorageStats>& calculated_stats) { /* XXX: stats not supported yet */ return 0; } int MotrBucket::rebuild_index(const DoutPrefixProvider *dpp) { /* there is no index table in dbstore. Not applicable */ return 0; } int MotrBucket::set_tag_timeout(const DoutPrefixProvider *dpp, uint64_t timeout) { /* XXX: CHECK: set tag timeout for all the bucket objects? */ return 0; } int MotrBucket::purge_instance(const DoutPrefixProvider *dpp) { /* XXX: CHECK: for dbstore only single instance supported. * Remove all the objects for that instance? Anything extra needed? */ return 0; } int MotrBucket::set_acl(const DoutPrefixProvider *dpp, RGWAccessControlPolicy &acl, optional_yield y) { int ret = 0; bufferlist aclbl; acls = acl; acl.encode(aclbl); Attrs attrs = get_attrs(); attrs[RGW_ATTR_ACL] = aclbl; // TODO: update bucket entry with the new attrs return ret; } std::unique_ptr<Object> MotrBucket::get_object(const rgw_obj_key& k) { return std::make_unique<MotrObject>(this->store, k, this); } int MotrBucket::list(const DoutPrefixProvider *dpp, ListParams& params, int max, ListResults& results, optional_yield y) { int rc; vector<string> keys(max); vector<bufferlist> vals(max); ldpp_dout(dpp, 20) << "bucket=" << info.bucket.name << " prefix=" << params.prefix << " marker=" << params.marker << " max=" << max << dendl; // Retrieve all `max` number of pairs. string bucket_index_iname = "motr.rgw.bucket.index." + info.bucket.name; keys[0] = params.marker.empty() ? params.prefix : params.marker.get_oid(); rc = store->next_query_by_name(bucket_index_iname, keys, vals, params.prefix, params.delim); if (rc < 0) { ldpp_dout(dpp, 0) << "ERROR: NEXT query failed. " << rc << dendl; return rc; } // Process the returned pairs to add into ListResults. int i = 0; for (; i < rc; ++i) { if (vals[i].length() == 0) { results.common_prefixes[keys[i]] = true; } else { rgw_bucket_dir_entry ent; auto iter = vals[i].cbegin(); ent.decode(iter); if (params.list_versions || ent.is_visible()) results.objs.emplace_back(std::move(ent)); } } if (i == max) { results.is_truncated = true; results.next_marker = keys[max - 1] + " "; } else { results.is_truncated = false; } return 0; } int MotrBucket::list_multiparts(const DoutPrefixProvider *dpp, const string& prefix, string& marker, const string& delim, const int& max_uploads, vector<std::unique_ptr<MultipartUpload>>& uploads, map<string, bool> *common_prefixes, bool *is_truncated) { int rc; vector<string> key_vec(max_uploads); vector<bufferlist> val_vec(max_uploads); string bucket_multipart_iname = "motr.rgw.bucket." + this->get_name() + ".multiparts"; key_vec[0].clear(); key_vec[0].assign(marker.begin(), marker.end()); rc = store->next_query_by_name(bucket_multipart_iname, key_vec, val_vec); if (rc < 0) { ldpp_dout(dpp, 0) << "ERROR: NEXT query failed. " << rc << dendl; return rc; } // Process the returned pairs to add into ListResults. // The POC can only support listing all objects or selecting // with prefix. int ocount = 0; rgw_obj_key last_obj_key; *is_truncated = false; for (const auto& bl: val_vec) { if (bl.length() == 0) break; rgw_bucket_dir_entry ent; auto iter = bl.cbegin(); ent.decode(iter); if (prefix.size() && (0 != ent.key.name.compare(0, prefix.size(), prefix))) { ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ": skippping \"" << ent.key << "\" because doesn't match prefix" << dendl; continue; } rgw_obj_key key(ent.key); uploads.push_back(this->get_multipart_upload(key.name)); last_obj_key = key; ocount++; if (ocount == max_uploads) { *is_truncated = true; break; } } marker = last_obj_key.name; // What is common prefix? We don't handle it for now. return 0; } int MotrBucket::abort_multiparts(const DoutPrefixProvider *dpp, CephContext *cct) { return 0; } void MotrStore::finalize(void) { // close connection with motr m0_client_fini(this->instance, true); } const std::string& MotrZoneGroup::get_endpoint() const { if (!group.endpoints.empty()) { return group.endpoints.front(); } else { // use zonegroup's master zone endpoints auto z = group.zones.find(group.master_zone); if (z != group.zones.end() && !z->second.endpoints.empty()) { return z->second.endpoints.front(); } } return empty; } bool MotrZoneGroup::placement_target_exists(std::string& target) const { return !!group.placement_targets.count(target); } int MotrZoneGroup::get_placement_target_names(std::set<std::string>& names) const { for (const auto& target : group.placement_targets) { names.emplace(target.second.name); } return 0; } int MotrZoneGroup::get_placement_tier(const rgw_placement_rule& rule, std::unique_ptr<PlacementTier>* tier) { std::map<std::string, RGWZoneGroupPlacementTarget>::const_iterator titer; titer = group.placement_targets.find(rule.name); if (titer == group.placement_targets.end()) { return -ENOENT; } const auto& target_rule = titer->second; std::map<std::string, RGWZoneGroupPlacementTier>::const_iterator ttier; ttier = target_rule.tier_targets.find(rule.storage_class); if (ttier == target_rule.tier_targets.end()) { // not found return -ENOENT; } PlacementTier* t; t = new MotrPlacementTier(store, ttier->second); if (!t) return -ENOMEM; tier->reset(t); return 0; } ZoneGroup& MotrZone::get_zonegroup() { return zonegroup; } const std::string& MotrZone::get_id() { return zone_params->get_id(); } const std::string& MotrZone::get_name() const { return zone_params->get_name(); } bool MotrZone::is_writeable() { return true; } bool MotrZone::get_redirect_endpoint(std::string* endpoint) { return false; } bool MotrZone::has_zonegroup_api(const std::string& api) const { return (zonegroup->api_name == api); } const std::string& MotrZone::get_current_period_id() { return current_period->get_id(); } std::unique_ptr<LuaManager> MotrStore::get_lua_manager() { return std::make_unique<MotrLuaManager>(this); } int MotrObject::get_obj_state(const DoutPrefixProvider* dpp, RGWObjState **_state, optional_yield y, bool follow_olh) { // Get object's metadata (those stored in rgw_bucket_dir_entry). bufferlist bl; if (this->store->get_obj_meta_cache()->get(dpp, this->get_key().get_oid(), bl)) { // Cache misses. string bucket_index_iname = "motr.rgw.bucket.index." + this->get_bucket()->get_name(); int rc = this->store->do_idx_op_by_name(bucket_index_iname, M0_IC_GET, this->get_key().get_oid(), bl); if (rc < 0) { ldpp_dout(dpp, 0) << "Failed to get object's entry from bucket index. " << dendl; return rc; } // Put into cache. this->store->get_obj_meta_cache()->put(dpp, this->get_key().get_oid(), bl); } rgw_bucket_dir_entry ent; bufferlist& blr = bl; auto iter = blr.cbegin(); ent.decode(iter); // Set object's type. this->category = ent.meta.category; // Set object state. state.exists = true; state.size = ent.meta.size; state.accounted_size = ent.meta.size; state.mtime = ent.meta.mtime; state.has_attrs = true; bufferlist etag_bl; string& etag = ent.meta.etag; ldpp_dout(dpp, 20) <<__func__<< ": object's etag: " << ent.meta.etag << dendl; etag_bl.append(etag); state.attrset[RGW_ATTR_ETAG] = etag_bl; return 0; } MotrObject::~MotrObject() { this->close_mobj(); } // int MotrObject::read_attrs(const DoutPrefixProvider* dpp, Motr::Object::Read &read_op, optional_yield y, rgw_obj* target_obj) // { // read_op.params.attrs = &attrs; // read_op.params.target_obj = target_obj; // read_op.params.obj_size = &obj_size; // read_op.params.lastmod = &mtime; // // return read_op.prepare(dpp); // } int MotrObject::set_obj_attrs(const DoutPrefixProvider* dpp, Attrs* setattrs, Attrs* delattrs, optional_yield y) { // TODO: implement ldpp_dout(dpp, 20) <<__func__<< ": MotrObject::set_obj_attrs()" << dendl; return 0; } int MotrObject::get_obj_attrs(optional_yield y, const DoutPrefixProvider* dpp, rgw_obj* target_obj) { if (this->category == RGWObjCategory::MultiMeta) return 0; string bname, key; if (target_obj) { bname = target_obj->bucket.name; key = target_obj->key.get_oid(); } else { bname = this->get_bucket()->get_name(); key = this->get_key().get_oid(); } ldpp_dout(dpp, 20) << "MotrObject::get_obj_attrs(): " << bname << "/" << key << dendl; // Get object's metadata (those stored in rgw_bucket_dir_entry). bufferlist bl; if (this->store->get_obj_meta_cache()->get(dpp, key, bl)) { // Cache misses. string bucket_index_iname = "motr.rgw.bucket.index." + bname; int rc = this->store->do_idx_op_by_name(bucket_index_iname, M0_IC_GET, key, bl); if (rc < 0) { ldpp_dout(dpp, 0) << "Failed to get object's entry from bucket index. " << dendl; return rc; } // Put into cache. this->store->get_obj_meta_cache()->put(dpp, key, bl); } rgw_bucket_dir_entry ent; bufferlist& blr = bl; auto iter = blr.cbegin(); ent.decode(iter); decode(attrs, iter); return 0; } int MotrObject::modify_obj_attrs(const char* attr_name, bufferlist& attr_val, optional_yield y, const DoutPrefixProvider* dpp) { rgw_obj target = get_obj(); int r = get_obj_attrs(y, dpp, &target); if (r < 0) { return r; } set_atomic(); attrs[attr_name] = attr_val; return set_obj_attrs(dpp, &attrs, nullptr, y); } int MotrObject::delete_obj_attrs(const DoutPrefixProvider* dpp, const char* attr_name, optional_yield y) { rgw_obj target = get_obj(); Attrs rmattr; bufferlist bl; set_atomic(); rmattr[attr_name] = bl; return set_obj_attrs(dpp, nullptr, &rmattr, y); } bool MotrObject::is_expired() { return false; } // Taken from rgw_rados.cc void MotrObject::gen_rand_obj_instance_name() { enum {OBJ_INSTANCE_LEN = 32}; char buf[OBJ_INSTANCE_LEN + 1]; gen_rand_alphanumeric_no_underscore(store->ctx(), buf, OBJ_INSTANCE_LEN); state.obj.key.set_instance(buf); } int MotrObject::omap_get_vals_by_keys(const DoutPrefixProvider *dpp, const std::string& oid, const std::set<std::string>& keys, Attrs* vals) { return 0; } int MotrObject::omap_set_val_by_key(const DoutPrefixProvider *dpp, const std::string& key, bufferlist& val, bool must_exist, optional_yield y) { return 0; } int MotrObject::chown(User& new_user, const DoutPrefixProvider* dpp, optional_yield y) { return 0; } std::unique_ptr<MPSerializer> MotrObject::get_serializer(const DoutPrefixProvider *dpp, const std::string& lock_name) { return std::make_unique<MPMotrSerializer>(dpp, store, this, lock_name); } int MotrObject::transition(Bucket* bucket, const rgw_placement_rule& placement_rule, const real_time& mtime, uint64_t olh_epoch, const DoutPrefixProvider* dpp, optional_yield y) { return 0; } bool MotrObject::placement_rules_match(rgw_placement_rule& r1, rgw_placement_rule& r2) { /* XXX: support single default zone and zonegroup for now */ return true; } int MotrObject::dump_obj_layout(const DoutPrefixProvider *dpp, optional_yield y, Formatter* f) { return 0; } std::unique_ptr<Object::ReadOp> MotrObject::get_read_op() { return std::make_unique<MotrObject::MotrReadOp>(this); } MotrObject::MotrReadOp::MotrReadOp(MotrObject *_source) : source(_source) { } int MotrObject::MotrReadOp::prepare(optional_yield y, const DoutPrefixProvider* dpp) { int rc; ldpp_dout(dpp, 20) <<__func__<< ": bucket=" << source->get_bucket()->get_name() << dendl; rgw_bucket_dir_entry ent; rc = source->get_bucket_dir_ent(dpp, ent); if (rc < 0) return rc; // Set source object's attrs. The attrs is key/value map and is used // in send_response_data() to set attributes, including etag. bufferlist etag_bl; string& etag = ent.meta.etag; ldpp_dout(dpp, 20) <<__func__<< ": object's etag: " << ent.meta.etag << dendl; etag_bl.append(etag.c_str(), etag.size()); source->get_attrs().emplace(std::move(RGW_ATTR_ETAG), std::move(etag_bl)); source->set_key(ent.key); source->set_obj_size(ent.meta.size); source->category = ent.meta.category; *params.lastmod = ent.meta.mtime; if (params.mod_ptr || params.unmod_ptr) { // Convert all times go GMT to make them compatible obj_time_weight src_weight; src_weight.init(*params.lastmod, params.mod_zone_id, params.mod_pg_ver); src_weight.high_precision = params.high_precision_time; obj_time_weight dest_weight; dest_weight.high_precision = params.high_precision_time; // Check if-modified-since condition if (params.mod_ptr && !params.if_nomatch) { dest_weight.init(*params.mod_ptr, params.mod_zone_id, params.mod_pg_ver); ldpp_dout(dpp, 10) << "If-Modified-Since: " << dest_weight << " & " << "Last-Modified: " << src_weight << dendl; if (!(dest_weight < src_weight)) { return -ERR_NOT_MODIFIED; } } // Check if-unmodified-since condition if (params.unmod_ptr && !params.if_match) { dest_weight.init(*params.unmod_ptr, params.mod_zone_id, params.mod_pg_ver); ldpp_dout(dpp, 10) << "If-UnModified-Since: " << dest_weight << " & " << "Last-Modified: " << src_weight << dendl; if (dest_weight < src_weight) { return -ERR_PRECONDITION_FAILED; } } } // Check if-match condition if (params.if_match) { string if_match_str = rgw_string_unquote(params.if_match); ldpp_dout(dpp, 10) << "ETag: " << etag << " & " << "If-Match: " << if_match_str << dendl; if (if_match_str.compare(etag) != 0) { return -ERR_PRECONDITION_FAILED; } } // Check if-none-match condition if (params.if_nomatch) { string if_nomatch_str = rgw_string_unquote(params.if_nomatch); ldpp_dout(dpp, 10) << "ETag: " << etag << " & " << "If-NoMatch: " << if_nomatch_str << dendl; if (if_nomatch_str.compare(etag) == 0) { return -ERR_NOT_MODIFIED; } } // Skip opening an empty object. if(source->get_obj_size() == 0) return 0; // Open the object here. if (source->category == RGWObjCategory::MultiMeta) { ldpp_dout(dpp, 20) <<__func__<< ": open obj parts..." << dendl; rc = source->get_part_objs(dpp, this->part_objs)? : source->open_part_objs(dpp, this->part_objs); return rc; } else { ldpp_dout(dpp, 20) <<__func__<< ": open object..." << dendl; return source->open_mobj(dpp); } } int MotrObject::MotrReadOp::read(int64_t off, int64_t end, bufferlist& bl, optional_yield y, const DoutPrefixProvider* dpp) { ldpp_dout(dpp, 20) << "MotrReadOp::read(): sync read." << dendl; return 0; } // RGWGetObj::execute() calls ReadOp::iterate() to read object from 'off' to 'end'. // The returned data is processed in 'cb' which is a chain of post-processing // filters such as decompression, de-encryption and sending back data to client // (RGWGetObj_CB::handle_dta which in turn calls RGWGetObj::get_data_cb() to // send data back.). // // POC implements a simple sync version of iterate() function in which it reads // a block of data each time and call 'cb' for post-processing. int MotrObject::MotrReadOp::iterate(const DoutPrefixProvider* dpp, int64_t off, int64_t end, RGWGetDataCB* cb, optional_yield y) { int rc; if (source->category == RGWObjCategory::MultiMeta) rc = source->read_multipart_obj(dpp, off, end, cb, part_objs); else rc = source->read_mobj(dpp, off, end, cb); return rc; } int MotrObject::MotrReadOp::get_attr(const DoutPrefixProvider* dpp, const char* name, bufferlist& dest, optional_yield y) { //return 0; return -ENODATA; } std::unique_ptr<Object::DeleteOp> MotrObject::get_delete_op() { return std::make_unique<MotrObject::MotrDeleteOp>(this); } MotrObject::MotrDeleteOp::MotrDeleteOp(MotrObject *_source) : source(_source) { } // Implementation of DELETE OBJ also requires MotrObject::get_obj_state() // to retrieve and set object's state from object's metadata. // // TODO: // 1. The POC only remove the object's entry from bucket index and delete // corresponding Motr objects. It doesn't handle the DeleteOp::params. // Delete::delete_obj() in rgw_rados.cc shows how rados backend process the // params. // 2. Delete an object when its versioning is turned on. int MotrObject::MotrDeleteOp::delete_obj(const DoutPrefixProvider* dpp, optional_yield y) { ldpp_dout(dpp, 20) << "delete " << source->get_key().get_oid() << " from " << source->get_bucket()->get_name() << dendl; rgw_bucket_dir_entry ent; int rc = source->get_bucket_dir_ent(dpp, ent); if (rc < 0) { return rc; } //TODO: When integrating with background GC for object deletion, // we should consider adding object entry to GC before deleting the metadata. // Delete from the cache first. source->store->get_obj_meta_cache()->remove(dpp, source->get_key().get_oid()); // Delete the object's entry from the bucket index. bufferlist bl; string bucket_index_iname = "motr.rgw.bucket.index." + source->get_bucket()->get_name(); rc = source->store->do_idx_op_by_name(bucket_index_iname, M0_IC_DEL, source->get_key().get_oid(), bl); if (rc < 0) { ldpp_dout(dpp, 0) << "Failed to del object's entry from bucket index. " << dendl; return rc; } if (ent.meta.size == 0) { ldpp_dout(dpp, 0) << __func__ << ": Object size is 0, not deleting motr object." << dendl; return 0; } // Remove the motr objects. if (source->category == RGWObjCategory::MultiMeta) rc = source->delete_part_objs(dpp); else rc = source->delete_mobj(dpp); if (rc < 0) { ldpp_dout(dpp, 0) << "Failed to delete the object from Motr. " << dendl; return rc; } //result.delete_marker = parent_op.result.delete_marker; //result.version_id = parent_op.result.version_id; return 0; } int MotrObject::delete_object(const DoutPrefixProvider* dpp, optional_yield y, bool prevent_versioning) { MotrObject::MotrDeleteOp del_op(this); del_op.params.bucket_owner = bucket->get_info().owner; del_op.params.versioning_status = bucket->get_info().versioning_status(); return del_op.delete_obj(dpp, y); } int MotrObject::copy_object(User* user, req_info* info, const rgw_zone_id& source_zone, rgw::sal::Object* dest_object, rgw::sal::Bucket* dest_bucket, rgw::sal::Bucket* src_bucket, const rgw_placement_rule& dest_placement, ceph::real_time* src_mtime, ceph::real_time* mtime, const ceph::real_time* mod_ptr, const ceph::real_time* unmod_ptr, bool high_precision_time, const char* if_match, const char* if_nomatch, AttrsMod attrs_mod, bool copy_if_newer, Attrs& attrs, RGWObjCategory category, uint64_t olh_epoch, boost::optional<ceph::real_time> delete_at, std::string* version_id, std::string* tag, std::string* etag, void (*progress_cb)(off_t, void *), void* progress_data, const DoutPrefixProvider* dpp, optional_yield y) { return 0; } int MotrObject::swift_versioning_restore(bool& restored, const DoutPrefixProvider* dpp) { return 0; } int MotrObject::swift_versioning_copy(const DoutPrefixProvider* dpp, optional_yield y) { return 0; } MotrAtomicWriter::MotrAtomicWriter(const DoutPrefixProvider *dpp, optional_yield y, rgw::sal::Object* obj, MotrStore* _store, const rgw_user& _owner, const rgw_placement_rule *_ptail_placement_rule, uint64_t _olh_epoch, const std::string& _unique_tag) : StoreWriter(dpp, y), store(_store), owner(_owner), ptail_placement_rule(_ptail_placement_rule), olh_epoch(_olh_epoch), unique_tag(_unique_tag), obj(_store, obj->get_key(), obj->get_bucket()), old_obj(_store, obj->get_key(), obj->get_bucket()) {} static const unsigned MAX_BUFVEC_NR = 256; int MotrAtomicWriter::prepare(optional_yield y) { total_data_size = 0; if (obj.is_opened()) return 0; rgw_bucket_dir_entry ent; int rc = old_obj.get_bucket_dir_ent(dpp, ent); if (rc == 0) { ldpp_dout(dpp, 20) << __func__ << ": object exists." << dendl; } rc = m0_bufvec_empty_alloc(&buf, MAX_BUFVEC_NR) ?: m0_bufvec_alloc(&attr, MAX_BUFVEC_NR, 1) ?: m0_indexvec_alloc(&ext, MAX_BUFVEC_NR); if (rc != 0) this->cleanup(); return rc; } int MotrObject::create_mobj(const DoutPrefixProvider *dpp, uint64_t sz) { if (mobj != nullptr) { ldpp_dout(dpp, 0) <<__func__<< "ERROR: object is already opened" << dendl; return -EINVAL; } int rc = m0_ufid_next(&ufid_gr, 1, &meta.oid); if (rc != 0) { ldpp_dout(dpp, 0) <<__func__<< "ERROR: m0_ufid_next() failed: " << rc << dendl; return rc; } char fid_str[M0_FID_STR_LEN]; snprintf(fid_str, ARRAY_SIZE(fid_str), U128X_F, U128_P(&meta.oid)); ldpp_dout(dpp, 20) <<__func__<< ": sz=" << sz << " oid=" << fid_str << dendl; int64_t lid = m0_layout_find_by_objsz(store->instance, nullptr, sz); M0_ASSERT(lid > 0); M0_ASSERT(mobj == nullptr); mobj = new m0_obj(); m0_obj_init(mobj, &store->container.co_realm, &meta.oid, lid); struct m0_op *op = nullptr; mobj->ob_entity.en_flags |= M0_ENF_META; rc = m0_entity_create(nullptr, &mobj->ob_entity, &op); if (rc != 0) { this->close_mobj(); ldpp_dout(dpp, 0) << "ERROR: m0_entity_create() failed: " << rc << dendl; return rc; } ldpp_dout(dpp, 20) <<__func__<< ": call m0_op_launch()..." << dendl; m0_op_launch(&op, 1); rc = m0_op_wait(op, M0_BITS(M0_OS_FAILED, M0_OS_STABLE), M0_TIME_NEVER) ?: m0_rc(op); m0_op_fini(op); m0_op_free(op); if (rc != 0) { this->close_mobj(); ldpp_dout(dpp, 0) << "ERROR: failed to create motr object: " << rc << dendl; return rc; } meta.layout_id = mobj->ob_attr.oa_layout_id; meta.pver = mobj->ob_attr.oa_pver; ldpp_dout(dpp, 20) <<__func__<< ": lid=0x" << std::hex << meta.layout_id << std::dec << " rc=" << rc << dendl; // TODO: add key:user+bucket+key+obj.meta.oid value:timestamp to // gc.queue.index. See more at github.com/Seagate/cortx-rgw/issues/7. return rc; } int MotrObject::open_mobj(const DoutPrefixProvider *dpp) { char fid_str[M0_FID_STR_LEN]; snprintf(fid_str, ARRAY_SIZE(fid_str), U128X_F, U128_P(&meta.oid)); ldpp_dout(dpp, 20) <<__func__<< ": oid=" << fid_str << dendl; int rc; if (meta.layout_id == 0) { rgw_bucket_dir_entry ent; rc = this->get_bucket_dir_ent(dpp, ent); if (rc < 0) { ldpp_dout(dpp, 0) << "ERROR: open_mobj() failed: rc=" << rc << dendl; return rc; } } if (meta.layout_id == 0) return -ENOENT; M0_ASSERT(mobj == nullptr); mobj = new m0_obj(); memset(mobj, 0, sizeof *mobj); m0_obj_init(mobj, &store->container.co_realm, &meta.oid, store->conf.mc_layout_id); struct m0_op *op = nullptr; mobj->ob_attr.oa_layout_id = meta.layout_id; mobj->ob_attr.oa_pver = meta.pver; mobj->ob_entity.en_flags |= M0_ENF_META; rc = m0_entity_open(&mobj->ob_entity, &op); if (rc != 0) { ldpp_dout(dpp, 0) << "ERROR: m0_entity_open() failed: rc=" << rc << dendl; this->close_mobj(); return rc; } m0_op_launch(&op, 1); rc = m0_op_wait(op, M0_BITS(M0_OS_FAILED, M0_OS_STABLE), M0_TIME_NEVER) ?: m0_rc(op); m0_op_fini(op); m0_op_free(op); if (rc < 0) { ldpp_dout(dpp, 10) << "ERROR: failed to open motr object: rc=" << rc << dendl; this->close_mobj(); return rc; } ldpp_dout(dpp, 20) <<__func__<< ": rc=" << rc << dendl; return 0; } int MotrObject::delete_mobj(const DoutPrefixProvider *dpp) { int rc; char fid_str[M0_FID_STR_LEN]; snprintf(fid_str, ARRAY_SIZE(fid_str), U128X_F, U128_P(&meta.oid)); if (!meta.oid.u_hi || !meta.oid.u_lo) { ldpp_dout(dpp, 20) << __func__ << ": invalid motr object oid=" << fid_str << dendl; return -EINVAL; } ldpp_dout(dpp, 20) << __func__ << ": deleting motr object oid=" << fid_str << dendl; // Open the object. if (mobj == nullptr) { rc = this->open_mobj(dpp); if (rc < 0) return rc; } // Create an DELETE op and execute it (sync version). struct m0_op *op = nullptr; mobj->ob_entity.en_flags |= M0_ENF_META; rc = m0_entity_delete(&mobj->ob_entity, &op); if (rc != 0) { ldpp_dout(dpp, 0) << "ERROR: m0_entity_delete() failed: " << rc << dendl; return rc; } m0_op_launch(&op, 1); rc = m0_op_wait(op, M0_BITS(M0_OS_FAILED, M0_OS_STABLE), M0_TIME_NEVER) ?: m0_rc(op); m0_op_fini(op); m0_op_free(op); if (rc < 0) { ldpp_dout(dpp, 0) << "ERROR: failed to open motr object: " << rc << dendl; return rc; } this->close_mobj(); return 0; } void MotrObject::close_mobj() { if (mobj == nullptr) return; m0_obj_fini(mobj); delete mobj; mobj = nullptr; } int MotrObject::write_mobj(const DoutPrefixProvider *dpp, bufferlist&& data, uint64_t offset) { int rc; unsigned bs, left; struct m0_op *op; char *start, *p; struct m0_bufvec buf; struct m0_bufvec attr; struct m0_indexvec ext; left = data.length(); if (left == 0) return 0; rc = m0_bufvec_empty_alloc(&buf, 1) ?: m0_bufvec_alloc(&attr, 1, 1) ?: m0_indexvec_alloc(&ext, 1); if (rc != 0) goto out; bs = this->get_optimal_bs(left); ldpp_dout(dpp, 20) <<__func__<< ": left=" << left << " bs=" << bs << dendl; start = data.c_str(); for (p = start; left > 0; left -= bs, p += bs, offset += bs) { if (left < bs) bs = this->get_optimal_bs(left); if (left < bs) { data.append_zero(bs - left); left = bs; p = data.c_str(); } buf.ov_buf[0] = p; buf.ov_vec.v_count[0] = bs; ext.iv_index[0] = offset; ext.iv_vec.v_count[0] = bs; attr.ov_vec.v_count[0] = 0; op = nullptr; rc = m0_obj_op(this->mobj, M0_OC_WRITE, &ext, &buf, &attr, 0, 0, &op); if (rc != 0) goto out; m0_op_launch(&op, 1); rc = m0_op_wait(op, M0_BITS(M0_OS_FAILED, M0_OS_STABLE), M0_TIME_NEVER) ?: m0_rc(op); m0_op_fini(op); m0_op_free(op); if (rc != 0) goto out; } out: m0_indexvec_free(&ext); m0_bufvec_free(&attr); m0_bufvec_free2(&buf); return rc; } int MotrObject::read_mobj(const DoutPrefixProvider* dpp, int64_t off, int64_t end, RGWGetDataCB* cb) { int rc; unsigned bs, actual, left; struct m0_op *op; struct m0_bufvec buf; struct m0_bufvec attr; struct m0_indexvec ext; // make end pointer exclusive: // it's easier to work with it this way end++; ldpp_dout(dpp, 20) << "MotrObject::read_mobj(): off=" << off << " end=" << end << dendl; // As `off` may not be parity group size aligned, even using optimal // buffer block size, simply reading data from offset `off` could come // across parity group boundary. And Motr only allows page-size aligned // offset. // // The optimal size of each IO should also take into account the data // transfer size to s3 client. For example, 16MB may be nice to read // data from motr, but it could be too big for network transfer. // // TODO: We leave proper handling of offset in the future. bs = this->get_optimal_bs(end - off); ldpp_dout(dpp, 20) << "MotrObject::read_mobj(): bs=" << bs << dendl; rc = m0_bufvec_empty_alloc(&buf, 1) ? : m0_bufvec_alloc(&attr, 1, 1) ? : m0_indexvec_alloc(&ext, 1); if (rc < 0) goto out; left = end - off; for (; left > 0; off += actual) { if (left < bs) bs = this->get_optimal_bs(left); actual = bs; if (left < bs) actual = left; ldpp_dout(dpp, 20) << "MotrObject::read_mobj(): off=" << off << " actual=" << actual << dendl; bufferlist bl; buf.ov_buf[0] = bl.append_hole(bs).c_str(); buf.ov_vec.v_count[0] = bs; ext.iv_index[0] = off; ext.iv_vec.v_count[0] = bs; attr.ov_vec.v_count[0] = 0; left -= actual; // Read from Motr. op = nullptr; rc = m0_obj_op(this->mobj, M0_OC_READ, &ext, &buf, &attr, 0, 0, &op); ldpp_dout(dpp, 20) << "MotrObject::read_mobj(): init read op rc=" << rc << dendl; if (rc != 0) { ldpp_dout(dpp, 0) << __func__ << ": read failed during m0_obj_op, rc=" << rc << dendl; goto out; } m0_op_launch(&op, 1); rc = m0_op_wait(op, M0_BITS(M0_OS_FAILED, M0_OS_STABLE), M0_TIME_NEVER) ?: m0_rc(op); m0_op_fini(op); m0_op_free(op); if (rc != 0) { ldpp_dout(dpp, 0) << __func__ << ": read failed, m0_op_wait rc=" << rc << dendl; goto out; } // Call `cb` to process returned data. ldpp_dout(dpp, 20) << "MotrObject::read_mobj(): call cb to process data" << dendl; cb->handle_data(bl, 0, actual); } out: m0_indexvec_free(&ext); m0_bufvec_free(&attr); m0_bufvec_free2(&buf); this->close_mobj(); return rc; } int MotrObject::get_bucket_dir_ent(const DoutPrefixProvider *dpp, rgw_bucket_dir_entry& ent) { int rc = 0; string bucket_index_iname = "motr.rgw.bucket.index." + this->get_bucket()->get_name(); int max = 1000; vector<string> keys(max); vector<bufferlist> vals(max); bufferlist bl; bufferlist::const_iterator iter; if (this->get_bucket()->get_info().versioning_status() == BUCKET_VERSIONED || this->get_bucket()->get_info().versioning_status() == BUCKET_SUSPENDED) { rgw_bucket_dir_entry ent_to_check; if (this->store->get_obj_meta_cache()->get(dpp, this->get_name(), bl) == 0) { iter = bl.cbegin(); ent_to_check.decode(iter); if (ent_to_check.is_current()) { ent = ent_to_check; rc = 0; goto out; } } ldpp_dout(dpp, 20) <<__func__<< ": versioned bucket!" << dendl; keys[0] = this->get_name(); rc = store->next_query_by_name(bucket_index_iname, keys, vals); if (rc < 0) { ldpp_dout(dpp, 0) << __func__ << "ERROR: NEXT query failed. " << rc << dendl; return rc; } rc = -ENOENT; for (const auto& bl: vals) { if (bl.length() == 0) break; iter = bl.cbegin(); ent_to_check.decode(iter); if (ent_to_check.is_current()) { ldpp_dout(dpp, 20) <<__func__<< ": found current version!" << dendl; ent = ent_to_check; rc = 0; this->store->get_obj_meta_cache()->put(dpp, this->get_name(), bl); break; } } } else { if (this->store->get_obj_meta_cache()->get(dpp, this->get_key().get_oid(), bl)) { ldpp_dout(dpp, 20) <<__func__<< ": non-versioned bucket!" << dendl; rc = this->store->do_idx_op_by_name(bucket_index_iname, M0_IC_GET, this->get_key().get_oid(), bl); if (rc < 0) { ldpp_dout(dpp, 0) << __func__ << "ERROR: failed to get object's entry from bucket index: rc=" << rc << dendl; return rc; } this->store->get_obj_meta_cache()->put(dpp, this->get_key().get_oid(), bl); } bufferlist& blr = bl; iter = blr.cbegin(); ent.decode(iter); } out: if (rc == 0) { sal::Attrs dummy; decode(dummy, iter); meta.decode(iter); ldpp_dout(dpp, 20) <<__func__<< ": lid=0x" << std::hex << meta.layout_id << dendl; char fid_str[M0_FID_STR_LEN]; snprintf(fid_str, ARRAY_SIZE(fid_str), U128X_F, U128_P(&meta.oid)); ldpp_dout(dpp, 70) << __func__ << ": oid=" << fid_str << dendl; } else ldpp_dout(dpp, 0) <<__func__<< ": rc=" << rc << dendl; return rc; } int MotrObject::update_version_entries(const DoutPrefixProvider *dpp) { int rc; int max = 10; vector<string> keys(max); vector<bufferlist> vals(max); string bucket_index_iname = "motr.rgw.bucket.index." + this->get_bucket()->get_name(); keys[0] = this->get_name(); rc = store->next_query_by_name(bucket_index_iname, keys, vals); ldpp_dout(dpp, 20) << "get all versions, name = " << this->get_name() << "rc = " << rc << dendl; if (rc < 0) { ldpp_dout(dpp, 0) << "ERROR: NEXT query failed. " << rc << dendl; return rc; } // no entries returned. if (rc == 0) return 0; for (const auto& bl: vals) { if (bl.length() == 0) break; rgw_bucket_dir_entry ent; auto iter = bl.cbegin(); ent.decode(iter); if (0 != ent.key.name.compare(0, this->get_name().size(), this->get_name())) continue; if (!ent.is_current()) continue; // Remove from the cache. store->get_obj_meta_cache()->remove(dpp, this->get_name()); rgw::sal::Attrs attrs; decode(attrs, iter); MotrObject::Meta meta; meta.decode(iter); ent.flags = rgw_bucket_dir_entry::FLAG_VER; string key; if (ent.key.instance.empty()) key = ent.key.name; else { char buf[ent.key.name.size() + ent.key.instance.size() + 16]; snprintf(buf, sizeof(buf), "%s[%s]", ent.key.name.c_str(), ent.key.instance.c_str()); key = buf; } ldpp_dout(dpp, 20) << "update one version, key = " << key << dendl; bufferlist ent_bl; ent.encode(ent_bl); encode(attrs, ent_bl); meta.encode(ent_bl); rc = store->do_idx_op_by_name(bucket_index_iname, M0_IC_PUT, key, ent_bl); if (rc < 0) break; } return rc; } // Scan object_nnn_part_index to get all parts then open their motr objects. // TODO: all parts are opened in the POC. But for a large object, for example // a 5GB object will have about 300 parts (for default 15MB part). A better // way of managing opened object may be needed. int MotrObject::get_part_objs(const DoutPrefixProvider* dpp, std::map<int, std::unique_ptr<MotrObject>>& part_objs) { int rc; int max_parts = 1000; int marker = 0; uint64_t off = 0; bool truncated = false; std::unique_ptr<rgw::sal::MultipartUpload> upload; upload = this->get_bucket()->get_multipart_upload(this->get_name(), string()); do { rc = upload->list_parts(dpp, store->ctx(), max_parts, marker, &marker, &truncated); if (rc == -ENOENT) { rc = -ERR_NO_SUCH_UPLOAD; } if (rc < 0) return rc; std::map<uint32_t, std::unique_ptr<MultipartPart>>& parts = upload->get_parts(); for (auto part_iter = parts.begin(); part_iter != parts.end(); ++part_iter) { MultipartPart *mpart = part_iter->second.get(); MotrMultipartPart *mmpart = static_cast<MotrMultipartPart *>(mpart); uint32_t part_num = mmpart->get_num(); uint64_t part_size = mmpart->get_size(); string part_obj_name = this->get_bucket()->get_name() + "." + this->get_key().get_oid() + ".part." + std::to_string(part_num); std::unique_ptr<rgw::sal::Object> obj; obj = this->bucket->get_object(rgw_obj_key(part_obj_name)); std::unique_ptr<rgw::sal::MotrObject> mobj(static_cast<rgw::sal::MotrObject *>(obj.release())); ldpp_dout(dpp, 20) << "get_part_objs: off = " << off << ", size = " << part_size << dendl; mobj->part_off = off; mobj->part_size = part_size; mobj->part_num = part_num; mobj->meta = mmpart->meta; part_objs.emplace(part_num, std::move(mobj)); off += part_size; } } while (truncated); return 0; } int MotrObject::open_part_objs(const DoutPrefixProvider* dpp, std::map<int, std::unique_ptr<MotrObject>>& part_objs) { //for (auto& iter: part_objs) { for (auto iter = part_objs.begin(); iter != part_objs.end(); ++iter) { MotrObject* obj = static_cast<MotrObject *>(iter->second.get()); ldpp_dout(dpp, 20) << "open_part_objs: name = " << obj->get_name() << dendl; int rc = obj->open_mobj(dpp); if (rc < 0) return rc; } return 0; } int MotrObject::delete_part_objs(const DoutPrefixProvider* dpp) { std::unique_ptr<rgw::sal::MultipartUpload> upload; upload = this->get_bucket()->get_multipart_upload(this->get_name(), string()); std::unique_ptr<rgw::sal::MotrMultipartUpload> mupload(static_cast<rgw::sal::MotrMultipartUpload *>(upload.release())); return mupload->delete_parts(dpp); } int MotrObject::read_multipart_obj(const DoutPrefixProvider* dpp, int64_t off, int64_t end, RGWGetDataCB* cb, std::map<int, std::unique_ptr<MotrObject>>& part_objs) { int64_t cursor = off; ldpp_dout(dpp, 20) << "read_multipart_obj: off=" << off << " end=" << end << dendl; // Find the parts which are in the (off, end) range and // read data from it. Note: `end` argument is inclusive. for (auto iter = part_objs.begin(); iter != part_objs.end(); ++iter) { MotrObject* obj = static_cast<MotrObject *>(iter->second.get()); int64_t part_off = obj->part_off; int64_t part_size = obj->part_size; int64_t part_end = obj->part_off + obj->part_size - 1; ldpp_dout(dpp, 20) << "read_multipart_obj: part_off=" << part_off << " part_end=" << part_end << dendl; if (part_end < off) continue; int64_t local_off = cursor - obj->part_off; int64_t local_end = part_end < end? part_size - 1 : end - part_off; ldpp_dout(dpp, 20) << "real_multipart_obj: name=" << obj->get_name() << " local_off=" << local_off << " local_end=" << local_end << dendl; int rc = obj->read_mobj(dpp, local_off, local_end, cb); if (rc < 0) return rc; cursor = part_end + 1; if (cursor > end) break; } return 0; } static unsigned roundup(unsigned x, unsigned by) { return ((x - 1) / by + 1) * by; } unsigned MotrObject::get_optimal_bs(unsigned len) { struct m0_pool_version *pver; pver = m0_pool_version_find(&store->instance->m0c_pools_common, &mobj->ob_attr.oa_pver); M0_ASSERT(pver != nullptr); struct m0_pdclust_attr *pa = &pver->pv_attr; uint64_t lid = M0_OBJ_LAYOUT_ID(meta.layout_id); unsigned unit_sz = m0_obj_layout_id_to_unit_size(lid); unsigned grp_sz = unit_sz * pa->pa_N; // bs should be max 4-times pool-width deep counting by 1MB units, or // 8-times deep counting by 512K units, 16-times deep by 256K units, // and so on. Several units to one target will be aggregated to make // fewer network RPCs, disk i/o operations and BE transactions. // For unit sizes of 32K or less, the depth is 128, which // makes it 32K * 128 == 4MB - the maximum amount per target when // the performance is still good on LNet (which has max 1MB frames). // TODO: it may be different on libfabric, should be re-measured. unsigned depth = 128 / ((unit_sz + 0x7fff) / 0x8000); if (depth == 0) depth = 1; // P * N / (N + K + S) - number of data units to span the pool-width unsigned max_bs = depth * unit_sz * pa->pa_P * pa->pa_N / (pa->pa_N + pa->pa_K + pa->pa_S); max_bs = roundup(max_bs, grp_sz); // multiple of group size if (len >= max_bs) return max_bs; else if (len <= grp_sz) return grp_sz; else return roundup(len, grp_sz); } void MotrAtomicWriter::cleanup() { m0_indexvec_free(&ext); m0_bufvec_free(&attr); m0_bufvec_free2(&buf); acc_data.clear(); obj.close_mobj(); old_obj.close_mobj(); } unsigned MotrAtomicWriter::populate_bvec(unsigned len, bufferlist::iterator &bi) { unsigned i, l, done = 0; const char *data; for (i = 0; i < MAX_BUFVEC_NR && len > 0; ++i) { l = bi.get_ptr_and_advance(len, &data); buf.ov_buf[i] = (char*)data; buf.ov_vec.v_count[i] = l; ext.iv_index[i] = acc_off; ext.iv_vec.v_count[i] = l; attr.ov_vec.v_count[i] = 0; acc_off += l; len -= l; done += l; } buf.ov_vec.v_nr = i; ext.iv_vec.v_nr = i; return done; } int MotrAtomicWriter::write() { int rc; unsigned bs, left; struct m0_op *op; bufferlist::iterator bi; left = acc_data.length(); if (!obj.is_opened()) { rc = obj.create_mobj(dpp, left); if (rc == -EEXIST) rc = obj.open_mobj(dpp); if (rc != 0) { char fid_str[M0_FID_STR_LEN]; snprintf(fid_str, ARRAY_SIZE(fid_str), U128X_F, U128_P(&obj.meta.oid)); ldpp_dout(dpp, 0) << "ERROR: failed to create/open motr object " << fid_str << " (" << obj.get_bucket()->get_name() << "/" << obj.get_key().get_oid() << "): rc=" << rc << dendl; goto err; } } total_data_size += left; bs = obj.get_optimal_bs(left); ldpp_dout(dpp, 20) <<__func__<< ": left=" << left << " bs=" << bs << dendl; bi = acc_data.begin(); while (left > 0) { if (left < bs) bs = obj.get_optimal_bs(left); if (left < bs) { acc_data.append_zero(bs - left); auto off = bi.get_off(); bufferlist tmp; acc_data.splice(off, bs, &tmp); acc_data.clear(); acc_data.append(tmp.c_str(), bs); // make it a single buf bi = acc_data.begin(); left = bs; } left -= this->populate_bvec(bs, bi); op = nullptr; rc = m0_obj_op(obj.mobj, M0_OC_WRITE, &ext, &buf, &attr, 0, 0, &op); if (rc != 0) goto err; m0_op_launch(&op, 1); rc = m0_op_wait(op, M0_BITS(M0_OS_FAILED, M0_OS_STABLE), M0_TIME_NEVER) ?: m0_rc(op); m0_op_fini(op); m0_op_free(op); if (rc != 0) goto err; } acc_data.clear(); return 0; err: this->cleanup(); return rc; } static const unsigned MAX_ACC_SIZE = 32 * 1024 * 1024; // Accumulate enough data first to make a reasonable decision about the // optimal unit size for a new object, or bs for existing object (32M seems // enough for 4M units in 8+2 parity groups, a common config on wide pools), // and then launch the write operations. int MotrAtomicWriter::process(bufferlist&& data, uint64_t offset) { if (data.length() == 0) { // last call, flush data int rc = 0; if (acc_data.length() != 0) rc = this->write(); this->cleanup(); return rc; } if (acc_data.length() == 0) acc_off = offset; acc_data.append(std::move(data)); if (acc_data.length() < MAX_ACC_SIZE) return 0; return this->write(); } int MotrAtomicWriter::complete(size_t accounted_size, const std::string& etag, ceph::real_time *mtime, ceph::real_time set_mtime, std::map<std::string, bufferlist>& attrs, ceph::real_time delete_at, const char *if_match, const char *if_nomatch, const std::string *user_data, rgw_zone_set *zones_trace, bool *canceled, optional_yield y) { int rc = 0; if (acc_data.length() != 0) { // check again, just in case rc = this->write(); this->cleanup(); if (rc != 0) return rc; } bufferlist bl; rgw_bucket_dir_entry ent; // Set rgw_bucet_dir_entry. Some of the member of this structure may not // apply to motr. For example the storage_class. // // Checkout AtomicObjectProcessor::complete() in rgw_putobj_processor.cc // and RGWRados::Object::Write::write_meta() in rgw_rados.cc for what and // how to set the dir entry. Only set the basic ones for POC, no ACLs and // other attrs. obj.get_key().get_index_key(&ent.key); ent.meta.size = total_data_size; ent.meta.accounted_size = total_data_size; ent.meta.mtime = real_clock::is_zero(set_mtime)? ceph::real_clock::now() : set_mtime; ent.meta.etag = etag; ent.meta.owner = owner.to_str(); ent.meta.owner_display_name = obj.get_bucket()->get_owner()->get_display_name(); bool is_versioned = obj.get_key().have_instance(); if (is_versioned) ent.flags = rgw_bucket_dir_entry::FLAG_VER | rgw_bucket_dir_entry::FLAG_CURRENT; ldpp_dout(dpp, 20) <<__func__<< ": key=" << obj.get_key().get_oid() << " etag: " << etag << " user_data=" << user_data << dendl; if (user_data) ent.meta.user_data = *user_data; ent.encode(bl); RGWBucketInfo &info = obj.get_bucket()->get_info(); if (info.obj_lock_enabled() && info.obj_lock.has_rule()) { auto iter = attrs.find(RGW_ATTR_OBJECT_RETENTION); if (iter == attrs.end()) { real_time lock_until_date = info.obj_lock.get_lock_until_date(ent.meta.mtime); string mode = info.obj_lock.get_mode(); RGWObjectRetention obj_retention(mode, lock_until_date); bufferlist retention_bl; obj_retention.encode(retention_bl); attrs[RGW_ATTR_OBJECT_RETENTION] = retention_bl; } } encode(attrs, bl); obj.meta.encode(bl); ldpp_dout(dpp, 20) <<__func__<< ": lid=0x" << std::hex << obj.meta.layout_id << dendl; if (is_versioned) { // get the list of all versioned objects with the same key and // unset their FLAG_CURRENT later, if do_idx_op_by_name() is successful. // Note: without distributed lock on the index - it is possible that 2 // CURRENT entries would appear in the bucket. For example, consider the // following scenario when two clients are trying to add the new object // version concurrently: // client 1: reads all the CURRENT entries // client 2: updates the index and sets the new CURRENT // client 1: updates the index and sets the new CURRENT // At the step (1) client 1 would not see the new current record from step (2), // so it won't update it. As a result, two CURRENT version entries will appear // in the bucket. // TODO: update the current version (unset the flag) and insert the new current // version can be launched in one motr op. This requires change at do_idx_op() // and do_idx_op_by_name(). rc = obj.update_version_entries(dpp); if (rc < 0) return rc; } // Insert an entry into bucket index. string bucket_index_iname = "motr.rgw.bucket.index." + obj.get_bucket()->get_name(); rc = store->do_idx_op_by_name(bucket_index_iname, M0_IC_PUT, obj.get_key().get_oid(), bl); if (rc == 0) store->get_obj_meta_cache()->put(dpp, obj.get_key().get_oid(), bl); if (old_obj.get_bucket()->get_info().versioning_status() != BUCKET_VERSIONED) { // Delete old object data if exists. old_obj.delete_mobj(dpp); } // TODO: We need to handle the object leak caused by parallel object upload by // making use of background gc, which is currently not enabled for motr. return rc; } int MotrMultipartUpload::delete_parts(const DoutPrefixProvider *dpp) { int rc; int max_parts = 1000; int marker = 0; bool truncated = false; // Scan all parts and delete the corresponding motr objects. do { rc = this->list_parts(dpp, store->ctx(), max_parts, marker, &marker, &truncated); if (rc == -ENOENT) { truncated = false; rc = 0; } if (rc < 0) return rc; std::map<uint32_t, std::unique_ptr<MultipartPart>>& parts = this->get_parts(); for (auto part_iter = parts.begin(); part_iter != parts.end(); ++part_iter) { MultipartPart *mpart = part_iter->second.get(); MotrMultipartPart *mmpart = static_cast<MotrMultipartPart *>(mpart); uint32_t part_num = mmpart->get_num(); // Delete the part object. Note that the part object is not // inserted into bucket index, only the corresponding motr object // needs to be delete. That is why we don't call // MotrObject::delete_object(). string part_obj_name = bucket->get_name() + "." + mp_obj.get_key() + ".part." + std::to_string(part_num); std::unique_ptr<rgw::sal::Object> obj; obj = this->bucket->get_object(rgw_obj_key(part_obj_name)); std::unique_ptr<rgw::sal::MotrObject> mobj(static_cast<rgw::sal::MotrObject *>(obj.release())); mobj->meta = mmpart->meta; rc = mobj->delete_mobj(dpp); if (rc < 0) { ldpp_dout(dpp, 0) << __func__ << ": Failed to delete object from Motr. rc=" << rc << dendl; return rc; } } } while (truncated); // Delete object part index. std::string oid = mp_obj.get_key(); string obj_part_iname = "motr.rgw.object." + bucket->get_name() + "." + oid + ".parts"; return store->delete_motr_idx_by_name(obj_part_iname); } int MotrMultipartUpload::abort(const DoutPrefixProvider *dpp, CephContext *cct) { int rc; // Check if multipart upload exists bufferlist bl; std::unique_ptr<rgw::sal::Object> meta_obj; meta_obj = get_meta_obj(); string bucket_multipart_iname = "motr.rgw.bucket." + meta_obj->get_bucket()->get_name() + ".multiparts"; rc = store->do_idx_op_by_name(bucket_multipart_iname, M0_IC_GET, meta_obj->get_key().to_str(), bl); if (rc < 0) { ldpp_dout(dpp, 0) << __func__ << ": Failed to get multipart upload. rc=" << rc << dendl; return rc == -ENOENT ? -ERR_NO_SUCH_UPLOAD : rc; } // Scan all parts and delete the corresponding motr objects. rc = this->delete_parts(dpp); if (rc < 0) return rc; bl.clear(); // Remove the upload from bucket multipart index. rc = store->do_idx_op_by_name(bucket_multipart_iname, M0_IC_DEL, meta_obj->get_key().get_oid(), bl); return rc; } std::unique_ptr<rgw::sal::Object> MotrMultipartUpload::get_meta_obj() { std::unique_ptr<rgw::sal::Object> obj = bucket->get_object(rgw_obj_key(get_meta(), string(), mp_ns)); std::unique_ptr<rgw::sal::MotrObject> mobj(static_cast<rgw::sal::MotrObject *>(obj.release())); mobj->set_category(RGWObjCategory::MultiMeta); return mobj; } struct motr_multipart_upload_info { rgw_placement_rule dest_placement; void encode(bufferlist& bl) const { ENCODE_START(1, 1, bl); encode(dest_placement, bl); ENCODE_FINISH(bl); } void decode(bufferlist::const_iterator& bl) { DECODE_START(1, bl); decode(dest_placement, bl); DECODE_FINISH(bl); } }; WRITE_CLASS_ENCODER(motr_multipart_upload_info) int MotrMultipartUpload::init(const DoutPrefixProvider *dpp, optional_yield y, ACLOwner& _owner, rgw_placement_rule& dest_placement, rgw::sal::Attrs& attrs) { int rc; std::string oid = mp_obj.get_key(); owner = _owner; do { char buf[33]; string tmp_obj_name; gen_rand_alphanumeric(store->ctx(), buf, sizeof(buf) - 1); std::string upload_id = MULTIPART_UPLOAD_ID_PREFIX; /* v2 upload id */ upload_id.append(buf); mp_obj.init(oid, upload_id); tmp_obj_name = mp_obj.get_meta(); std::unique_ptr<rgw::sal::Object> obj; obj = bucket->get_object(rgw_obj_key(tmp_obj_name, string(), mp_ns)); // the meta object will be indexed with 0 size, we c obj->set_in_extra_data(true); obj->set_hash_source(oid); motr_multipart_upload_info upload_info; upload_info.dest_placement = dest_placement; bufferlist mpbl; encode(upload_info, mpbl); // Create an initial entry in the bucket. The entry will be // updated when multipart upload is completed, for example, // size, etag etc. bufferlist bl; rgw_bucket_dir_entry ent; obj->get_key().get_index_key(&ent.key); ent.meta.owner = owner.get_id().to_str(); ent.meta.category = RGWObjCategory::MultiMeta; ent.meta.mtime = ceph::real_clock::now(); ent.meta.user_data.assign(mpbl.c_str(), mpbl.c_str() + mpbl.length()); ent.encode(bl); // Insert an entry into bucket multipart index so it is not shown // when listing a bucket. string bucket_multipart_iname = "motr.rgw.bucket." + obj->get_bucket()->get_name() + ".multiparts"; rc = store->do_idx_op_by_name(bucket_multipart_iname, M0_IC_PUT, obj->get_key().get_oid(), bl); } while (rc == -EEXIST); if (rc < 0) return rc; // Create object part index. // TODO: add bucket as part of the name. string obj_part_iname = "motr.rgw.object." + bucket->get_name() + "." + oid + ".parts"; ldpp_dout(dpp, 20) << "MotrMultipartUpload::init(): object part index=" << obj_part_iname << dendl; rc = store->create_motr_idx_by_name(obj_part_iname); if (rc == -EEXIST) rc = 0; if (rc < 0) // TODO: clean the bucket index entry ldpp_dout(dpp, 0) << "Failed to create object multipart index " << obj_part_iname << dendl; return rc; } int MotrMultipartUpload::list_parts(const DoutPrefixProvider *dpp, CephContext *cct, int num_parts, int marker, int *next_marker, bool *truncated, bool assume_unsorted) { int rc; vector<string> key_vec(num_parts); vector<bufferlist> val_vec(num_parts); std::string oid = mp_obj.get_key(); string obj_part_iname = "motr.rgw.object." + bucket->get_name() + "." + oid + ".parts"; ldpp_dout(dpp, 20) << __func__ << ": object part index = " << obj_part_iname << dendl; key_vec[0].clear(); key_vec[0] = "part."; char buf[32]; snprintf(buf, sizeof(buf), "%08d", marker + 1); key_vec[0].append(buf); rc = store->next_query_by_name(obj_part_iname, key_vec, val_vec); if (rc < 0) { ldpp_dout(dpp, 0) << "ERROR: NEXT query failed. " << rc << dendl; return rc; } int last_num = 0; int part_cnt = 0; uint32_t expected_next = 0; ldpp_dout(dpp, 20) << __func__ << ": marker = " << marker << dendl; for (const auto& bl: val_vec) { if (bl.length() == 0) break; RGWUploadPartInfo info; auto iter = bl.cbegin(); info.decode(iter); rgw::sal::Attrs attrs_dummy; decode(attrs_dummy, iter); MotrObject::Meta meta; meta.decode(iter); ldpp_dout(dpp, 20) << __func__ << ": part_num=" << info.num << " part_size=" << info.size << dendl; ldpp_dout(dpp, 20) << __func__ << ": meta:oid=[" << meta.oid.u_hi << "," << meta.oid.u_lo << "], meta:pvid=[" << meta.pver.f_container << "," << meta.pver.f_key << "], meta:layout id=" << meta.layout_id << dendl; if (!expected_next) expected_next = info.num + 1; else if (expected_next && info.num != expected_next) return -EINVAL; else expected_next = info.num + 1; if ((int)info.num > marker) { last_num = info.num; parts.emplace(info.num, std::make_unique<MotrMultipartPart>(info, meta)); } part_cnt++; } // Does it have more parts? if (truncated) *truncated = part_cnt < num_parts? false : true; ldpp_dout(dpp, 20) << __func__ << ": truncated=" << *truncated << dendl; if (next_marker) *next_marker = last_num; return 0; } // Heavily copy from rgw_sal_rados.cc int MotrMultipartUpload::complete(const DoutPrefixProvider *dpp, optional_yield y, CephContext* cct, map<int, string>& part_etags, list<rgw_obj_index_key>& remove_objs, uint64_t& accounted_size, bool& compressed, RGWCompressionInfo& cs_info, off_t& off, std::string& tag, ACLOwner& owner, uint64_t olh_epoch, rgw::sal::Object* target_obj) { char final_etag[CEPH_CRYPTO_MD5_DIGESTSIZE]; char final_etag_str[CEPH_CRYPTO_MD5_DIGESTSIZE * 2 + 16]; std::string etag; bufferlist etag_bl; MD5 hash; // Allow use of MD5 digest in FIPS mode for non-cryptographic purposes hash.SetFlags(EVP_MD_CTX_FLAG_NON_FIPS_ALLOW); bool truncated; int rc; ldpp_dout(dpp, 20) << "MotrMultipartUpload::complete(): enter" << dendl; int total_parts = 0; int handled_parts = 0; int max_parts = 1000; int marker = 0; uint64_t min_part_size = cct->_conf->rgw_multipart_min_part_size; auto etags_iter = part_etags.begin(); rgw::sal::Attrs attrs = target_obj->get_attrs(); do { ldpp_dout(dpp, 20) << "MotrMultipartUpload::complete(): list_parts()" << dendl; rc = list_parts(dpp, cct, max_parts, marker, &marker, &truncated); if (rc == -ENOENT) { rc = -ERR_NO_SUCH_UPLOAD; } if (rc < 0) return rc; total_parts += parts.size(); if (!truncated && total_parts != (int)part_etags.size()) { ldpp_dout(dpp, 0) << "NOTICE: total parts mismatch: have: " << total_parts << " expected: " << part_etags.size() << dendl; rc = -ERR_INVALID_PART; return rc; } ldpp_dout(dpp, 20) << "MotrMultipartUpload::complete(): parts.size()=" << parts.size() << dendl; for (auto obj_iter = parts.begin(); etags_iter != part_etags.end() && obj_iter != parts.end(); ++etags_iter, ++obj_iter, ++handled_parts) { MultipartPart *mpart = obj_iter->second.get(); MotrMultipartPart *mmpart = static_cast<MotrMultipartPart *>(mpart); RGWUploadPartInfo *part = &mmpart->info; uint64_t part_size = part->accounted_size; ldpp_dout(dpp, 20) << "MotrMultipartUpload::complete(): part_size=" << part_size << dendl; if (handled_parts < (int)part_etags.size() - 1 && part_size < min_part_size) { rc = -ERR_TOO_SMALL; return rc; } char petag[CEPH_CRYPTO_MD5_DIGESTSIZE]; if (etags_iter->first != (int)obj_iter->first) { ldpp_dout(dpp, 0) << "NOTICE: parts num mismatch: next requested: " << etags_iter->first << " next uploaded: " << obj_iter->first << dendl; rc = -ERR_INVALID_PART; return rc; } string part_etag = rgw_string_unquote(etags_iter->second); if (part_etag.compare(part->etag) != 0) { ldpp_dout(dpp, 0) << "NOTICE: etag mismatch: part: " << etags_iter->first << " etag: " << etags_iter->second << dendl; rc = -ERR_INVALID_PART; return rc; } hex_to_buf(part->etag.c_str(), petag, CEPH_CRYPTO_MD5_DIGESTSIZE); hash.Update((const unsigned char *)petag, sizeof(petag)); ldpp_dout(dpp, 20) << "MotrMultipartUpload::complete(): calc etag " << dendl; string oid = mp_obj.get_part(part->num); rgw_obj src_obj; src_obj.init_ns(bucket->get_key(), oid, mp_ns); #if 0 // does Motr backend need it? /* update manifest for part */ if (part->manifest.empty()) { ldpp_dout(dpp, 0) << "ERROR: empty manifest for object part: obj=" << src_obj << dendl; rc = -ERR_INVALID_PART; return rc; } else { manifest.append(dpp, part->manifest, store->get_zone()); } ldpp_dout(dpp, 0) << "MotrMultipartUpload::complete(): manifest " << dendl; #endif bool part_compressed = (part->cs_info.compression_type != "none"); if ((handled_parts > 0) && ((part_compressed != compressed) || (cs_info.compression_type != part->cs_info.compression_type))) { ldpp_dout(dpp, 0) << "ERROR: compression type was changed during multipart upload (" << cs_info.compression_type << ">>" << part->cs_info.compression_type << ")" << dendl; rc = -ERR_INVALID_PART; return rc; } ldpp_dout(dpp, 20) << "MotrMultipartUpload::complete(): part compression" << dendl; if (part_compressed) { int64_t new_ofs; // offset in compression data for new part if (cs_info.blocks.size() > 0) new_ofs = cs_info.blocks.back().new_ofs + cs_info.blocks.back().len; else new_ofs = 0; for (const auto& block : part->cs_info.blocks) { compression_block cb; cb.old_ofs = block.old_ofs + cs_info.orig_size; cb.new_ofs = new_ofs; cb.len = block.len; cs_info.blocks.push_back(cb); new_ofs = cb.new_ofs + cb.len; } if (!compressed) cs_info.compression_type = part->cs_info.compression_type; cs_info.orig_size += part->cs_info.orig_size; compressed = true; } // We may not need to do the following as remove_objs are those // don't show when listing a bucket. As we store in-progress uploaded // object's metadata in a separate index, they are not shown when // listing a bucket. rgw_obj_index_key remove_key; src_obj.key.get_index_key(&remove_key); remove_objs.push_back(remove_key); off += part_size; accounted_size += part->accounted_size; ldpp_dout(dpp, 20) << "MotrMultipartUpload::complete(): off=" << off << ", accounted_size = " << accounted_size << dendl; } } while (truncated); hash.Final((unsigned char *)final_etag); buf_to_hex((unsigned char *)final_etag, sizeof(final_etag), final_etag_str); snprintf(&final_etag_str[CEPH_CRYPTO_MD5_DIGESTSIZE * 2], sizeof(final_etag_str) - CEPH_CRYPTO_MD5_DIGESTSIZE * 2, "-%lld", (long long)part_etags.size()); etag = final_etag_str; ldpp_dout(dpp, 20) << "calculated etag: " << etag << dendl; etag_bl.append(etag); attrs[RGW_ATTR_ETAG] = etag_bl; if (compressed) { // write compression attribute to full object bufferlist tmp; encode(cs_info, tmp); attrs[RGW_ATTR_COMPRESSION] = tmp; } // Read the object's the multipart_upload_info. // TODO: all those index name and key constructions should be implemented as // member functions. bufferlist bl; std::unique_ptr<rgw::sal::Object> meta_obj; meta_obj = get_meta_obj(); string bucket_multipart_iname = "motr.rgw.bucket." + meta_obj->get_bucket()->get_name() + ".multiparts"; rc = this->store->do_idx_op_by_name(bucket_multipart_iname, M0_IC_GET, meta_obj->get_key().get_oid(), bl); ldpp_dout(dpp, 20) << "MotrMultipartUpload::complete(): read entry from bucket multipart index rc=" << rc << dendl; if (rc < 0) return rc; rgw_bucket_dir_entry ent; bufferlist& blr = bl; auto ent_iter = blr.cbegin(); ent.decode(ent_iter); // Update the dir entry and insert it to the bucket index so // the object will be seen when listing the bucket. bufferlist update_bl; target_obj->get_key().get_index_key(&ent.key); // Change to offical name :) ent.meta.size = off; ent.meta.accounted_size = accounted_size; ldpp_dout(dpp, 20) << "MotrMultipartUpload::complete(): obj size=" << ent.meta.size << " obj accounted size=" << ent.meta.accounted_size << dendl; ent.meta.mtime = ceph::real_clock::now(); ent.meta.etag = etag; ent.encode(update_bl); encode(attrs, update_bl); MotrObject::Meta meta_dummy; meta_dummy.encode(update_bl); string bucket_index_iname = "motr.rgw.bucket.index." + meta_obj->get_bucket()->get_name(); ldpp_dout(dpp, 20) << "MotrMultipartUpload::complete(): target_obj name=" << target_obj->get_name() << " target_obj oid=" << target_obj->get_oid() << dendl; rc = store->do_idx_op_by_name(bucket_index_iname, M0_IC_PUT, target_obj->get_name(), update_bl); if (rc < 0) return rc; // Put into metadata cache. store->get_obj_meta_cache()->put(dpp, target_obj->get_name(), update_bl); // Now we can remove it from bucket multipart index. ldpp_dout(dpp, 20) << "MotrMultipartUpload::complete(): remove from bucket multipartindex " << dendl; return store->do_idx_op_by_name(bucket_multipart_iname, M0_IC_DEL, meta_obj->get_key().get_oid(), bl); } int MotrMultipartUpload::get_info(const DoutPrefixProvider *dpp, optional_yield y, rgw_placement_rule** rule, rgw::sal::Attrs* attrs) { if (!rule && !attrs) { return 0; } if (rule) { if (!placement.empty()) { *rule = &placement; if (!attrs) { /* Don't need attrs, done */ return 0; } } else { *rule = nullptr; } } std::unique_ptr<rgw::sal::Object> meta_obj; meta_obj = get_meta_obj(); meta_obj->set_in_extra_data(true); // Read the object's the multipart_upload_info. bufferlist bl; string bucket_multipart_iname = "motr.rgw.bucket." + meta_obj->get_bucket()->get_name() + ".multiparts"; int rc = this->store->do_idx_op_by_name(bucket_multipart_iname, M0_IC_GET, meta_obj->get_key().get_oid(), bl); if (rc < 0) { ldpp_dout(dpp, 0) << __func__ << ": Failed to get multipart info. rc=" << rc << dendl; return rc == -ENOENT ? -ERR_NO_SUCH_UPLOAD : rc; } rgw_bucket_dir_entry ent; bufferlist& blr = bl; auto ent_iter = blr.cbegin(); ent.decode(ent_iter); if (attrs) { bufferlist etag_bl; string& etag = ent.meta.etag; ldpp_dout(dpp, 20) << "object's etag: " << ent.meta.etag << dendl; etag_bl.append(etag.c_str(), etag.size()); attrs->emplace(std::move(RGW_ATTR_ETAG), std::move(etag_bl)); if (!rule || *rule != nullptr) { /* placement was cached; don't actually read */ return 0; } } /* Decode multipart_upload_info */ motr_multipart_upload_info upload_info; bufferlist mpbl; mpbl.append(ent.meta.user_data.c_str(), ent.meta.user_data.size()); auto mpbl_iter = mpbl.cbegin(); upload_info.decode(mpbl_iter); placement = upload_info.dest_placement; *rule = &placement; return 0; } std::unique_ptr<Writer> MotrMultipartUpload::get_writer( const DoutPrefixProvider *dpp, optional_yield y, rgw::sal::Object* obj, const rgw_user& owner, const rgw_placement_rule *ptail_placement_rule, uint64_t part_num, const std::string& part_num_str) { return std::make_unique<MotrMultipartWriter>(dpp, y, this, obj, store, owner, ptail_placement_rule, part_num, part_num_str); } int MotrMultipartWriter::prepare(optional_yield y) { string part_obj_name = head_obj->get_bucket()->get_name() + "." + head_obj->get_key().get_oid() + ".part." + std::to_string(part_num); ldpp_dout(dpp, 20) << "bucket=" << head_obj->get_bucket()->get_name() << "part_obj_name=" << part_obj_name << dendl; part_obj = std::make_unique<MotrObject>(this->store, rgw_obj_key(part_obj_name), head_obj->get_bucket()); if (part_obj == nullptr) return -ENOMEM; // s3 client may retry uploading part, so the part may have already // been created. int rc = part_obj->create_mobj(dpp, store->cctx->_conf->rgw_max_chunk_size); if (rc == -EEXIST) { rc = part_obj->open_mobj(dpp); if (rc < 0) return rc; } return rc; } int MotrMultipartWriter::process(bufferlist&& data, uint64_t offset) { int rc = part_obj->write_mobj(dpp, std::move(data), offset); if (rc == 0) { actual_part_size += data.length(); ldpp_dout(dpp, 20) << " write_mobj(): actual_part_size=" << actual_part_size << dendl; } return rc; } int MotrMultipartWriter::complete(size_t accounted_size, const std::string& etag, ceph::real_time *mtime, ceph::real_time set_mtime, std::map<std::string, bufferlist>& attrs, ceph::real_time delete_at, const char *if_match, const char *if_nomatch, const std::string *user_data, rgw_zone_set *zones_trace, bool *canceled, optional_yield y) { // Should the dir entry(object metadata) be updated? For example // mtime. ldpp_dout(dpp, 20) << "MotrMultipartWriter::complete(): enter" << dendl; // Add an entry into object_nnn_part_index. bufferlist bl; RGWUploadPartInfo info; info.num = part_num; info.etag = etag; info.size = actual_part_size; info.accounted_size = accounted_size; info.modified = real_clock::now(); bool compressed; int rc = rgw_compression_info_from_attrset(attrs, compressed, info.cs_info); ldpp_dout(dpp, 20) << "MotrMultipartWriter::complete(): compression rc=" << rc << dendl; if (rc < 0) { ldpp_dout(dpp, 1) << "cannot get compression info" << dendl; return rc; } encode(info, bl); encode(attrs, bl); part_obj->meta.encode(bl); string p = "part."; char buf[32]; snprintf(buf, sizeof(buf), "%08d", (int)part_num); p.append(buf); string obj_part_iname = "motr.rgw.object." + head_obj->get_bucket()->get_name() + "." + head_obj->get_key().get_oid() + ".parts"; ldpp_dout(dpp, 20) << "MotrMultipartWriter::complete(): object part index = " << obj_part_iname << dendl; rc = store->do_idx_op_by_name(obj_part_iname, M0_IC_PUT, p, bl); if (rc < 0) { return rc == -ENOENT ? -ERR_NO_SUCH_UPLOAD : rc; } return 0; } std::unique_ptr<RGWRole> MotrStore::get_role(std::string name, std::string tenant, std::string path, std::string trust_policy, std::string max_session_duration_str, std::multimap<std::string,std::string> tags) { RGWRole* p = nullptr; return std::unique_ptr<RGWRole>(p); } std::unique_ptr<RGWRole> MotrStore::get_role(const RGWRoleInfo& info) { RGWRole* p = nullptr; return std::unique_ptr<RGWRole>(p); } std::unique_ptr<RGWRole> MotrStore::get_role(std::string id) { RGWRole* p = nullptr; return std::unique_ptr<RGWRole>(p); } int MotrStore::get_roles(const DoutPrefixProvider *dpp, optional_yield y, const std::string& path_prefix, const std::string& tenant, vector<std::unique_ptr<RGWRole>>& roles) { return 0; } std::unique_ptr<RGWOIDCProvider> MotrStore::get_oidc_provider() { RGWOIDCProvider* p = nullptr; return std::unique_ptr<RGWOIDCProvider>(p); } int MotrStore::get_oidc_providers(const DoutPrefixProvider *dpp, const std::string& tenant, vector<std::unique_ptr<RGWOIDCProvider>>& providers) { return 0; } std::unique_ptr<MultipartUpload> MotrBucket::get_multipart_upload(const std::string& oid, std::optional<std::string> upload_id, ACLOwner owner, ceph::real_time mtime) { return std::make_unique<MotrMultipartUpload>(store, this, oid, upload_id, owner, mtime); } std::unique_ptr<Writer> MotrStore::get_append_writer(const DoutPrefixProvider *dpp, optional_yield y, rgw::sal::Object* obj, const rgw_user& owner, const rgw_placement_rule *ptail_placement_rule, const std::string& unique_tag, uint64_t position, uint64_t *cur_accounted_size) { return nullptr; } std::unique_ptr<Writer> MotrStore::get_atomic_writer(const DoutPrefixProvider *dpp, optional_yield y, rgw::sal::Object* obj, const rgw_user& owner, const rgw_placement_rule *ptail_placement_rule, uint64_t olh_epoch, const std::string& unique_tag) { return std::make_unique<MotrAtomicWriter>(dpp, y, obj, this, owner, ptail_placement_rule, olh_epoch, unique_tag); } const std::string& MotrStore::get_compression_type(const rgw_placement_rule& rule) { return zone.zone_params->get_compression_type(rule); } bool MotrStore::valid_placement(const rgw_placement_rule& rule) { return zone.zone_params->valid_placement(rule); } std::unique_ptr<User> MotrStore::get_user(const rgw_user &u) { ldout(cctx, 20) << "bucket's user: " << u.to_str() << dendl; return std::make_unique<MotrUser>(this, u); } int MotrStore::get_user_by_access_key(const DoutPrefixProvider *dpp, const std::string &key, optional_yield y, std::unique_ptr<User> *user) { int rc; User *u; bufferlist bl; RGWUserInfo uinfo; MotrAccessKey access_key; rc = do_idx_op_by_name(RGW_IAM_MOTR_ACCESS_KEY, M0_IC_GET, key, bl); if (rc < 0){ ldout(cctx, 0) << "Access key not found: rc = " << rc << dendl; return rc; } bufferlist& blr = bl; auto iter = blr.cbegin(); access_key.decode(iter); uinfo.user_id.from_str(access_key.user_id); ldout(cctx, 0) << "Loading user: " << uinfo.user_id.id << dendl; rc = MotrUser().load_user_from_idx(dpp, this, uinfo, nullptr, nullptr); if (rc < 0){ ldout(cctx, 0) << "Failed to load user: rc = " << rc << dendl; return rc; } u = new MotrUser(this, uinfo); if (!u) return -ENOMEM; user->reset(u); return 0; } int MotrStore::get_user_by_email(const DoutPrefixProvider *dpp, const std::string& email, optional_yield y, std::unique_ptr<User>* user) { int rc; User *u; bufferlist bl; RGWUserInfo uinfo; MotrEmailInfo email_info; rc = do_idx_op_by_name(RGW_IAM_MOTR_EMAIL_KEY, M0_IC_GET, email, bl); if (rc < 0){ ldout(cctx, 0) << "Email Id not found: rc = " << rc << dendl; return rc; } auto iter = bl.cbegin(); email_info.decode(iter); ldout(cctx, 0) << "Loading user: " << email_info.user_id << dendl; uinfo.user_id.from_str(email_info.user_id); rc = MotrUser().load_user_from_idx(dpp, this, uinfo, nullptr, nullptr); if (rc < 0){ ldout(cctx, 0) << "Failed to load user: rc = " << rc << dendl; return rc; } u = new MotrUser(this, uinfo); if (!u) return -ENOMEM; user->reset(u); return 0; } int MotrStore::get_user_by_swift(const DoutPrefixProvider *dpp, const std::string& user_str, optional_yield y, std::unique_ptr<User>* user) { /* Swift keys and subusers are not supported for now */ return 0; } int MotrStore::store_access_key(const DoutPrefixProvider *dpp, optional_yield y, MotrAccessKey access_key) { int rc; bufferlist bl; access_key.encode(bl); rc = do_idx_op_by_name(RGW_IAM_MOTR_ACCESS_KEY, M0_IC_PUT, access_key.id, bl); if (rc < 0){ ldout(cctx, 0) << "Failed to store key: rc = " << rc << dendl; return rc; } return rc; } int MotrStore::delete_access_key(const DoutPrefixProvider *dpp, optional_yield y, std::string access_key) { int rc; bufferlist bl; rc = do_idx_op_by_name(RGW_IAM_MOTR_ACCESS_KEY, M0_IC_DEL, access_key, bl); if (rc < 0){ ldout(cctx, 0) << "Failed to delete key: rc = " << rc << dendl; } return rc; } int MotrStore::store_email_info(const DoutPrefixProvider *dpp, optional_yield y, MotrEmailInfo& email_info ) { int rc; bufferlist bl; email_info.encode(bl); rc = do_idx_op_by_name(RGW_IAM_MOTR_EMAIL_KEY, M0_IC_PUT, email_info.email_id, bl); if (rc < 0) { ldout(cctx, 0) << "Failed to store the user by email as key: rc = " << rc << dendl; } return rc; } std::unique_ptr<Object> MotrStore::get_object(const rgw_obj_key& k) { return std::make_unique<MotrObject>(this, k); } int MotrStore::get_bucket(const DoutPrefixProvider *dpp, User* u, const rgw_bucket& b, std::unique_ptr<Bucket>* bucket, optional_yield y) { int ret; Bucket* bp; bp = new MotrBucket(this, b, u); ret = bp->load_bucket(dpp, y); if (ret < 0) { delete bp; return ret; } bucket->reset(bp); return 0; } int MotrStore::get_bucket(User* u, const RGWBucketInfo& i, std::unique_ptr<Bucket>* bucket) { Bucket* bp; bp = new MotrBucket(this, i, u); /* Don't need to fetch the bucket info, use the provided one */ bucket->reset(bp); return 0; } int MotrStore::get_bucket(const DoutPrefixProvider *dpp, User* u, const std::string& tenant, const std::string& name, std::unique_ptr<Bucket>* bucket, optional_yield y) { rgw_bucket b; b.tenant = tenant; b.name = name; return get_bucket(dpp, u, b, bucket, y); } bool MotrStore::is_meta_master() { return true; } int MotrStore::forward_request_to_master(const DoutPrefixProvider *dpp, User* user, obj_version *objv, bufferlist& in_data, JSONParser *jp, req_info& info, optional_yield y) { return 0; } int MotrStore::forward_iam_request_to_master(const DoutPrefixProvider *dpp, const RGWAccessKey& key, obj_version* objv, bufferlist& in_data, RGWXMLDecoder::XMLParser* parser, req_info& info, optional_yield y) { return 0; } std::string MotrStore::zone_unique_id(uint64_t unique_num) { return ""; } std::string MotrStore::zone_unique_trans_id(const uint64_t unique_num) { return ""; } int MotrStore::get_zonegroup(const std::string& id, std::unique_ptr<ZoneGroup>* group) { /* XXX: for now only one zonegroup supported */ ZoneGroup* zg; zg = new MotrZoneGroup(this, zone.zonegroup.get_group()); group->reset(zg); return 0; } int MotrStore::list_all_zones(const DoutPrefixProvider* dpp, std::list<std::string>& zone_ids) { zone_ids.push_back(zone.get_id()); return 0; } int MotrStore::cluster_stat(RGWClusterStat& stats) { return 0; } std::unique_ptr<Lifecycle> MotrStore::get_lifecycle(void) { return 0; } std::unique_ptr<Notification> MotrStore::get_notification(Object* obj, Object* src_obj, req_state* s, rgw::notify::EventType event_type, optional_yield y, const string* object_name) { return std::make_unique<MotrNotification>(obj, src_obj, event_type); } std::unique_ptr<Notification> MotrStore::get_notification(const DoutPrefixProvider* dpp, Object* obj, Object* src_obj, rgw::notify::EventType event_type, rgw::sal::Bucket* _bucket, std::string& _user_id, std::string& _user_tenant, std::string& _req_id, optional_yield y) { return std::make_unique<MotrNotification>(obj, src_obj, event_type); } int MotrStore::log_usage(const DoutPrefixProvider *dpp, map<rgw_user_bucket, RGWUsageBatch>& usage_info) { return 0; } int MotrStore::log_op(const DoutPrefixProvider *dpp, string& oid, bufferlist& bl) { return 0; } int MotrStore::register_to_service_map(const DoutPrefixProvider *dpp, const string& daemon_type, const map<string, string>& meta) { return 0; } void MotrStore::get_ratelimit(RGWRateLimitInfo& bucket_ratelimit, RGWRateLimitInfo& user_ratelimit, RGWRateLimitInfo& anon_ratelimit) { return; } void MotrStore::get_quota(RGWQuota& quota) { // XXX: Not handled for the first pass return; } int MotrStore::set_buckets_enabled(const DoutPrefixProvider *dpp, vector<rgw_bucket>& buckets, bool enabled) { return 0; } int MotrStore::get_sync_policy_handler(const DoutPrefixProvider *dpp, std::optional<rgw_zone_id> zone, std::optional<rgw_bucket> bucket, RGWBucketSyncPolicyHandlerRef *phandler, optional_yield y) { return 0; } RGWDataSyncStatusManager* MotrStore::get_data_sync_manager(const rgw_zone_id& source_zone) { return 0; } int MotrStore::read_all_usage(const DoutPrefixProvider *dpp, uint64_t start_epoch, uint64_t end_epoch, uint32_t max_entries, bool *is_truncated, RGWUsageIter& usage_iter, map<rgw_user_bucket, rgw_usage_log_entry>& usage) { return 0; } int MotrStore::trim_all_usage(const DoutPrefixProvider *dpp, uint64_t start_epoch, uint64_t end_epoch) { return 0; } int MotrStore::get_config_key_val(string name, bufferlist *bl) { return 0; } int MotrStore::meta_list_keys_init(const DoutPrefixProvider *dpp, const string& section, const string& marker, void** phandle) { return 0; } int MotrStore::meta_list_keys_next(const DoutPrefixProvider *dpp, void* handle, int max, list<string>& keys, bool* truncated) { return 0; } void MotrStore::meta_list_keys_complete(void* handle) { return; } std::string MotrStore::meta_get_marker(void* handle) { return ""; } int MotrStore::meta_remove(const DoutPrefixProvider *dpp, string& metadata_key, optional_yield y) { return 0; } int MotrStore::open_idx(struct m0_uint128 *id, bool create, struct m0_idx *idx) { m0_idx_init(idx, &container.co_realm, id); if (!create) return 0; // nothing to do more // create index or make sure it's created struct m0_op *op = nullptr; int rc = m0_entity_create(nullptr, &idx->in_entity, &op); if (rc != 0) { ldout(cctx, 0) << "ERROR: m0_entity_create() failed: " << rc << dendl; goto out; } m0_op_launch(&op, 1); rc = m0_op_wait(op, M0_BITS(M0_OS_FAILED, M0_OS_STABLE), M0_TIME_NEVER) ?: m0_rc(op); m0_op_fini(op); m0_op_free(op); if (rc != 0 && rc != -EEXIST) ldout(cctx, 0) << "ERROR: index create failed: " << rc << dendl; out: return rc; } static void set_m0bufvec(struct m0_bufvec *bv, vector<uint8_t>& vec) { *bv->ov_buf = reinterpret_cast<char*>(vec.data()); *bv->ov_vec.v_count = vec.size(); } // idx must be opened with open_idx() beforehand int MotrStore::do_idx_op(struct m0_idx *idx, enum m0_idx_opcode opcode, vector<uint8_t>& key, vector<uint8_t>& val, bool update) { int rc, rc_i; struct m0_bufvec k, v, *vp = &v; uint32_t flags = 0; struct m0_op *op = nullptr; if (m0_bufvec_empty_alloc(&k, 1) != 0) { ldout(cctx, 0) << "ERROR: failed to allocate key bufvec" << dendl; return -ENOMEM; } if (opcode == M0_IC_PUT || opcode == M0_IC_GET) { rc = -ENOMEM; if (m0_bufvec_empty_alloc(&v, 1) != 0) { ldout(cctx, 0) << "ERROR: failed to allocate value bufvec" << dendl; goto out; } } set_m0bufvec(&k, key); if (opcode == M0_IC_PUT) set_m0bufvec(&v, val); if (opcode == M0_IC_DEL) vp = nullptr; if (opcode == M0_IC_PUT && update) flags |= M0_OIF_OVERWRITE; rc = m0_idx_op(idx, opcode, &k, vp, &rc_i, flags, &op); if (rc != 0) { ldout(cctx, 0) << "ERROR: failed to init index op: " << rc << dendl; goto out; } m0_op_launch(&op, 1); rc = m0_op_wait(op, M0_BITS(M0_OS_FAILED, M0_OS_STABLE), M0_TIME_NEVER) ?: m0_rc(op); m0_op_fini(op); m0_op_free(op); if (rc != 0) { ldout(cctx, 0) << "ERROR: op failed: " << rc << dendl; goto out; } if (rc_i != 0) { ldout(cctx, 0) << "ERROR: idx op failed: " << rc_i << dendl; rc = rc_i; goto out; } if (opcode == M0_IC_GET) { val.resize(*v.ov_vec.v_count); memcpy(reinterpret_cast<char*>(val.data()), *v.ov_buf, *v.ov_vec.v_count); } out: m0_bufvec_free2(&k); if (opcode == M0_IC_GET) m0_bufvec_free(&v); // cleanup buffer after GET else if (opcode == M0_IC_PUT) m0_bufvec_free2(&v); return rc; } // Retrieve a range of key/value pairs starting from keys[0]. int MotrStore::do_idx_next_op(struct m0_idx *idx, vector<vector<uint8_t>>& keys, vector<vector<uint8_t>>& vals) { int rc; uint32_t i = 0; int nr_kvp = vals.size(); int *rcs = new int[nr_kvp]; struct m0_bufvec k, v; struct m0_op *op = nullptr; rc = m0_bufvec_empty_alloc(&k, nr_kvp)?: m0_bufvec_empty_alloc(&v, nr_kvp); if (rc != 0) { ldout(cctx, 0) << "ERROR: failed to allocate kv bufvecs" << dendl; return rc; } set_m0bufvec(&k, keys[0]); rc = m0_idx_op(idx, M0_IC_NEXT, &k, &v, rcs, 0, &op); if (rc != 0) { ldout(cctx, 0) << "ERROR: failed to init index op: " << rc << dendl; goto out; } m0_op_launch(&op, 1); rc = m0_op_wait(op, M0_BITS(M0_OS_FAILED, M0_OS_STABLE), M0_TIME_NEVER) ?: m0_rc(op); m0_op_fini(op); m0_op_free(op); if (rc != 0) { ldout(cctx, 0) << "ERROR: op failed: " << rc << dendl; goto out; } for (i = 0; i < v.ov_vec.v_nr; ++i) { if (rcs[i] < 0) break; vector<uint8_t>& key = keys[i]; vector<uint8_t>& val = vals[i]; key.resize(k.ov_vec.v_count[i]); val.resize(v.ov_vec.v_count[i]); memcpy(reinterpret_cast<char*>(key.data()), k.ov_buf[i], k.ov_vec.v_count[i]); memcpy(reinterpret_cast<char*>(val.data()), v.ov_buf[i], v.ov_vec.v_count[i]); } out: k.ov_vec.v_nr = i; v.ov_vec.v_nr = i; m0_bufvec_free(&k); m0_bufvec_free(&v); // cleanup buffer after GET delete []rcs; return rc ?: i; } // Retrieve a number of key/value pairs under the prefix starting // from the marker at key_out[0]. int MotrStore::next_query_by_name(string idx_name, vector<string>& key_out, vector<bufferlist>& val_out, string prefix, string delim) { unsigned nr_kvp = std::min(val_out.size(), 100UL); struct m0_idx idx = {}; vector<vector<uint8_t>> keys(nr_kvp); vector<vector<uint8_t>> vals(nr_kvp); struct m0_uint128 idx_id; int i = 0, j, k = 0; index_name_to_motr_fid(idx_name, &idx_id); int rc = open_motr_idx(&idx_id, &idx); if (rc != 0) { ldout(cctx, 0) << "ERROR: next_query_by_name(): failed to open index: rc=" << rc << dendl; goto out; } // Only the first element for keys needs to be set for NEXT query. // The keys will be set will the returned keys from motr index. ldout(cctx, 20) <<__func__<< ": next_query_by_name(): index=" << idx_name << " prefix=" << prefix << " delim=" << delim << dendl; keys[0].assign(key_out[0].begin(), key_out[0].end()); for (i = 0; i < (int)val_out.size(); i += k, k = 0) { rc = do_idx_next_op(&idx, keys, vals); ldout(cctx, 20) << "do_idx_next_op() = " << rc << dendl; if (rc < 0) { ldout(cctx, 0) << "ERROR: NEXT query failed. " << rc << dendl; goto out; } string dir; for (j = 0, k = 0; j < rc; ++j) { string key(keys[j].begin(), keys[j].end()); size_t pos = std::string::npos; if (!delim.empty()) pos = key.find(delim, prefix.length()); if (pos != std::string::npos) { // DIR entry dir.assign(key, 0, pos + 1); if (dir.compare(0, prefix.length(), prefix) != 0) goto out; if (i + k == 0 || dir != key_out[i + k - 1]) // a new one key_out[i + k++] = dir; continue; } dir = ""; if (key.compare(0, prefix.length(), prefix) != 0) goto out; key_out[i + k] = key; bufferlist& vbl = val_out[i + k]; vbl.append(reinterpret_cast<char*>(vals[j].data()), vals[j].size()); ++k; } if (rc < (int)nr_kvp) // there are no more keys to fetch break; string next_key; if (dir != "") next_key = dir + "\xff"; // skip all dir content in 1 step else next_key = key_out[i + k - 1] + " "; ldout(cctx, 0) << "do_idx_next_op(): next_key=" << next_key << dendl; keys[0].assign(next_key.begin(), next_key.end()); } out: m0_idx_fini(&idx); return rc < 0 ? rc : i + k; } int MotrStore::delete_motr_idx_by_name(string iname) { struct m0_idx idx; struct m0_uint128 idx_id; struct m0_op *op = nullptr; ldout(cctx, 20) << "delete_motr_idx_by_name=" << iname << dendl; index_name_to_motr_fid(iname, &idx_id); m0_idx_init(&idx, &container.co_realm, &idx_id); m0_entity_open(&idx.in_entity, &op); int rc = m0_entity_delete(&idx.in_entity, &op); if (rc < 0) goto out; m0_op_launch(&op, 1); ldout(cctx, 70) << "waiting for op completion" << dendl; rc = m0_op_wait(op, M0_BITS(M0_OS_FAILED, M0_OS_STABLE), M0_TIME_NEVER) ?: m0_rc(op); m0_op_fini(op); m0_op_free(op); if (rc == -ENOENT) // race deletion?? rc = 0; else if (rc < 0) ldout(cctx, 0) << "ERROR: index create failed: " << rc << dendl; ldout(cctx, 20) << "delete_motr_idx_by_name rc=" << rc << dendl; out: m0_idx_fini(&idx); return rc; } int MotrStore::open_motr_idx(struct m0_uint128 *id, struct m0_idx *idx) { m0_idx_init(idx, &container.co_realm, id); return 0; } // The following marcos are from dix/fid_convert.h which are not exposed. enum { M0_DIX_FID_DEVICE_ID_OFFSET = 32, M0_DIX_FID_DIX_CONTAINER_MASK = (1ULL << M0_DIX_FID_DEVICE_ID_OFFSET) - 1, }; // md5 is used here, a more robust way to convert index name to fid is // needed to avoid collision. void MotrStore::index_name_to_motr_fid(string iname, struct m0_uint128 *id) { unsigned char md5[16]; // 128/8 = 16 MD5 hash; // Allow use of MD5 digest in FIPS mode for non-cryptographic purposes hash.SetFlags(EVP_MD_CTX_FLAG_NON_FIPS_ALLOW); hash.Update((const unsigned char *)iname.c_str(), iname.length()); hash.Final(md5); memcpy(&id->u_hi, md5, 8); memcpy(&id->u_lo, md5 + 8, 8); ldout(cctx, 20) << "id = 0x" << std::hex << id->u_hi << ":0x" << std::hex << id->u_lo << dendl; struct m0_fid *fid = (struct m0_fid*)id; m0_fid_tset(fid, m0_dix_fid_type.ft_id, fid->f_container & M0_DIX_FID_DIX_CONTAINER_MASK, fid->f_key); ldout(cctx, 20) << "converted id = 0x" << std::hex << id->u_hi << ":0x" << std::hex << id->u_lo << dendl; } int MotrStore::do_idx_op_by_name(string idx_name, enum m0_idx_opcode opcode, string key_str, bufferlist &bl, bool update) { struct m0_idx idx; vector<uint8_t> key(key_str.begin(), key_str.end()); vector<uint8_t> val; struct m0_uint128 idx_id; index_name_to_motr_fid(idx_name, &idx_id); int rc = open_motr_idx(&idx_id, &idx); if (rc != 0) { ldout(cctx, 0) << "ERROR: failed to open index: " << rc << dendl; goto out; } if (opcode == M0_IC_PUT) val.assign(bl.c_str(), bl.c_str() + bl.length()); ldout(cctx, 20) <<__func__<< ": do_idx_op_by_name(): op=" << (opcode == M0_IC_PUT ? "PUT" : "GET") << " idx=" << idx_name << " key=" << key_str << dendl; rc = do_idx_op(&idx, opcode, key, val, update); if (rc == 0 && opcode == M0_IC_GET) // Append the returned value (blob) to the bufferlist. bl.append(reinterpret_cast<char*>(val.data()), val.size()); out: m0_idx_fini(&idx); return rc; } int MotrStore::create_motr_idx_by_name(string iname) { struct m0_idx idx = {}; struct m0_uint128 id; index_name_to_motr_fid(iname, &id); m0_idx_init(&idx, &container.co_realm, &id); // create index or make sure it's created struct m0_op *op = nullptr; int rc = m0_entity_create(nullptr, &idx.in_entity, &op); if (rc != 0) { ldout(cctx, 0) << "ERROR: m0_entity_create() failed: " << rc << dendl; goto out; } m0_op_launch(&op, 1); rc = m0_op_wait(op, M0_BITS(M0_OS_FAILED, M0_OS_STABLE), M0_TIME_NEVER) ?: m0_rc(op); m0_op_fini(op); m0_op_free(op); if (rc != 0 && rc != -EEXIST) ldout(cctx, 0) << "ERROR: index create failed: " << rc << dendl; out: m0_idx_fini(&idx); return rc; } // If a global index is checked (if it has been create) every time // before they're queried (put/get), which takes 2 Motr operations to // complete the query. As the global indices' name and FID are known // already when MotrStore is created, we move the check and creation // in newMotrStore(). // Similar method is used for per bucket/user index. For example, // bucket instance index is created when creating the bucket. int MotrStore::check_n_create_global_indices() { int rc = 0; for (const auto& iname : motr_global_indices) { rc = create_motr_idx_by_name(iname); if (rc < 0 && rc != -EEXIST) break; rc = 0; } return rc; } std::string MotrStore::get_cluster_id(const DoutPrefixProvider* dpp, optional_yield y) { char id[M0_FID_STR_LEN]; struct m0_confc *confc = m0_reqh2confc(&instance->m0c_reqh); m0_fid_print(id, ARRAY_SIZE(id), &confc->cc_root->co_id); return std::string(id); } int MotrStore::init_metadata_cache(const DoutPrefixProvider *dpp, CephContext *cct) { this->obj_meta_cache = new MotrMetaCache(dpp, cct); this->get_obj_meta_cache()->set_enabled(true); this->user_cache = new MotrMetaCache(dpp, cct); this->get_user_cache()->set_enabled(true); this->bucket_inst_cache = new MotrMetaCache(dpp, cct); this->get_bucket_inst_cache()->set_enabled(true); return 0; } int MotrLuaManager::get_script(const DoutPrefixProvider* dpp, optional_yield y, const std::string& key, std::string& script) { return -ENOENT; } int MotrLuaManager::put_script(const DoutPrefixProvider* dpp, optional_yield y, const std::string& key, const std::string& script) { return -ENOENT; } int MotrLuaManager::del_script(const DoutPrefixProvider* dpp, optional_yield y, const std::string& key) { return -ENOENT; } int MotrLuaManager::add_package(const DoutPrefixProvider* dpp, optional_yield y, const std::string& package_name) { return -ENOENT; } int MotrLuaManager::remove_package(const DoutPrefixProvider* dpp, optional_yield y, const std::string& package_name) { return -ENOENT; } int MotrLuaManager::list_packages(const DoutPrefixProvider* dpp, optional_yield y, rgw::lua::packages_t& packages) { return -ENOENT; } } // namespace rgw::sal extern "C" { void *newMotrStore(CephContext *cct) { int rc = -1; rgw::sal::MotrStore *store = new rgw::sal::MotrStore(cct); if (store) { store->conf.mc_is_oostore = true; // XXX: these params should be taken from config settings and // cct somehow? store->instance = nullptr; const auto& proc_ep = g_conf().get_val<std::string>("motr_my_endpoint"); const auto& ha_ep = g_conf().get_val<std::string>("motr_ha_endpoint"); const auto& proc_fid = g_conf().get_val<std::string>("motr_my_fid"); const auto& profile = g_conf().get_val<std::string>("motr_profile_fid"); const auto& admin_proc_ep = g_conf().get_val<std::string>("motr_admin_endpoint"); const auto& admin_proc_fid = g_conf().get_val<std::string>("motr_admin_fid"); const int init_flags = cct->get_init_flags(); ldout(cct, 0) << "INFO: motr my endpoint: " << proc_ep << dendl; ldout(cct, 0) << "INFO: motr ha endpoint: " << ha_ep << dendl; ldout(cct, 0) << "INFO: motr my fid: " << proc_fid << dendl; ldout(cct, 0) << "INFO: motr profile fid: " << profile << dendl; store->conf.mc_local_addr = proc_ep.c_str(); store->conf.mc_process_fid = proc_fid.c_str(); ldout(cct, 0) << "INFO: init flags: " << init_flags << dendl; ldout(cct, 0) << "INFO: motr admin endpoint: " << admin_proc_ep << dendl; ldout(cct, 0) << "INFO: motr admin fid: " << admin_proc_fid << dendl; // HACK this is so that radosge-admin uses a different client if (init_flags == 0) { store->conf.mc_process_fid = admin_proc_fid.c_str(); store->conf.mc_local_addr = admin_proc_ep.c_str(); } else { store->conf.mc_process_fid = proc_fid.c_str(); store->conf.mc_local_addr = proc_ep.c_str(); } store->conf.mc_ha_addr = ha_ep.c_str(); store->conf.mc_profile = profile.c_str(); ldout(cct, 50) << "INFO: motr profile fid: " << store->conf.mc_profile << dendl; ldout(cct, 50) << "INFO: ha addr: " << store->conf.mc_ha_addr << dendl; ldout(cct, 50) << "INFO: process fid: " << store->conf.mc_process_fid << dendl; ldout(cct, 50) << "INFO: motr endpoint: " << store->conf.mc_local_addr << dendl; store->conf.mc_tm_recv_queue_min_len = 64; store->conf.mc_max_rpc_msg_size = 524288; store->conf.mc_idx_service_id = M0_IDX_DIX; store->dix_conf.kc_create_meta = false; store->conf.mc_idx_service_conf = &store->dix_conf; if (!g_conf().get_val<bool>("motr_tracing_enabled")) { m0_trace_level_allow(M0_WARN); // allow errors and warnings in syslog anyway m0_trace_set_mmapped_buffer(false); } store->instance = nullptr; rc = m0_client_init(&store->instance, &store->conf, true); if (rc != 0) { ldout(cct, 0) << "ERROR: m0_client_init() failed: " << rc << dendl; goto out; } m0_container_init(&store->container, nullptr, &M0_UBER_REALM, store->instance); rc = store->container.co_realm.re_entity.en_sm.sm_rc; if (rc != 0) { ldout(cct, 0) << "ERROR: m0_container_init() failed: " << rc << dendl; goto out; } rc = m0_ufid_init(store->instance, &ufid_gr); if (rc != 0) { ldout(cct, 0) << "ERROR: m0_ufid_init() failed: " << rc << dendl; goto out; } // Create global indices if not yet. rc = store->check_n_create_global_indices(); if (rc != 0) { ldout(cct, 0) << "ERROR: check_n_create_global_indices() failed: " << rc << dendl; goto out; } } out: if (rc != 0) { delete store; return nullptr; } return store; } }
123,860
29.957511
178
cc
null
ceph-main/src/rgw/rgw_sal_motr.h
// vim: ts=2 sw=2 expandtab ft=cpp /* * Ceph - scalable distributed file system * * SAL implementation for the CORTX Motr backend * * Copyright (C) 2021 Seagate Technology LLC and/or its Affiliates * * This is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License version 2.1, as published by the Free Software * Foundation. See file COPYING. * */ #pragma once extern "C" { #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wextern-c-compat" #pragma clang diagnostic ignored "-Wdeprecated-anon-enum-enum-conversion" #include "motr/config.h" #include "motr/client.h" #pragma clang diagnostic pop } #include "rgw_sal_store.h" #include "rgw_rados.h" #include "rgw_notify.h" #include "rgw_oidc_provider.h" #include "rgw_role.h" #include "rgw_multi.h" #include "rgw_putobj_processor.h" namespace rgw::sal { class MotrStore; // Global Motr indices #define RGW_MOTR_USERS_IDX_NAME "motr.rgw.users" #define RGW_MOTR_BUCKET_INST_IDX_NAME "motr.rgw.bucket.instances" #define RGW_MOTR_BUCKET_HD_IDX_NAME "motr.rgw.bucket.headers" #define RGW_IAM_MOTR_ACCESS_KEY "motr.rgw.accesskeys" #define RGW_IAM_MOTR_EMAIL_KEY "motr.rgw.emails" //#define RGW_MOTR_BUCKET_ACL_IDX_NAME "motr.rgw.bucket.acls" // A simplified metadata cache implementation. // Note: MotrObjMetaCache doesn't handle the IO operations to Motr. A proxy // class can be added to handle cache and 'real' ops. class MotrMetaCache { protected: // MGW re-uses ObjectCache to cache object's metadata as it has already // implemented a lru cache: (1) ObjectCache internally uses a map and lru // list to manage cache entry. POC uses object name, user name or bucket // name as the key to lookup and insert an entry. (2) ObjectCache::data is // a bufferlist and can be used to store any metadata structure, such as // object's bucket dir entry, user info or bucket instance. // // Note from RGW: // The Rados Gateway stores metadata and objects in an internal cache. This // should be kept consistent by the OSD's relaying notify events between // multiple watching RGW processes. In the event that this notification // protocol fails, bounding the length of time that any data in the cache will // be assumed valid will ensure that any RGW instance that falls out of sync // will eventually recover. This seems to be an issue mostly for large numbers // of RGW instances under heavy use. If you would like to turn off cache expiry, // set this value to zero. // // Currently POC hasn't implemented the watch-notify menchanism yet. So the // current implementation is similar to cortx-s3server which is based on expiry // time. TODO: see comments on distribute_cache). // // Beaware: Motr object data is not cached in current POC as RGW! // RGW caches the first chunk (4MB by default). ObjectCache cache; public: // Lookup a cache entry. int get(const DoutPrefixProvider *dpp, const std::string& name, bufferlist& data); // Insert a cache entry. int put(const DoutPrefixProvider *dpp, const std::string& name, const bufferlist& data); // Called when an object is deleted. Notification should be sent to other // RGW instances. int remove(const DoutPrefixProvider *dpp, const std::string& name); // Make the local cache entry invalid. void invalid(const DoutPrefixProvider *dpp, const std::string& name); // TODO: Distribute_cache() and watch_cb() now are only place holder functions. // Checkout services/svc_sys_obj_cache.h/cc for reference. // These 2 functions are designed to notify or to act on cache notification. // It is feasible to implement the functionality using Motr's FDMI after discussing // with Hua. int distribute_cache(const DoutPrefixProvider *dpp, const std::string& normal_name, ObjectCacheInfo& obj_info, int op); int watch_cb(const DoutPrefixProvider *dpp, uint64_t notify_id, uint64_t cookie, uint64_t notifier_id, bufferlist& bl); void set_enabled(bool status); MotrMetaCache(const DoutPrefixProvider *dpp, CephContext *cct) { cache.set_ctx(cct); } }; struct MotrUserInfo { RGWUserInfo info; obj_version user_version; rgw::sal::Attrs attrs; void encode(bufferlist& bl) const { ENCODE_START(3, 3, bl); encode(info, bl); encode(user_version, bl); encode(attrs, bl); ENCODE_FINISH(bl); } void decode(bufferlist::const_iterator& bl) { DECODE_START(3, bl); decode(info, bl); decode(user_version, bl); decode(attrs, bl); DECODE_FINISH(bl); } }; WRITE_CLASS_ENCODER(MotrUserInfo); struct MotrEmailInfo { std::string user_id; std::string email_id; MotrEmailInfo() {} MotrEmailInfo(std::string _user_id, std::string _email_id ) : user_id(std::move(_user_id)), email_id(std::move(_email_id)) {} void encode(bufferlist& bl) const { ENCODE_START(2, 2, bl); encode(user_id, bl); encode(email_id, bl); ENCODE_FINISH(bl); } void decode(bufferlist::const_iterator& bl) { DECODE_START_LEGACY_COMPAT_LEN_32(2, 2, 2, bl); decode(user_id, bl); decode(email_id, bl); DECODE_FINISH(bl); } }; WRITE_CLASS_ENCODER(MotrEmailInfo); struct MotrAccessKey { std::string id; // AccessKey std::string key; // SecretKey std::string user_id; // UserID MotrAccessKey() {} MotrAccessKey(std::string _id, std::string _key, std::string _user_id) : id(std::move(_id)), key(std::move(_key)), user_id(std::move(_user_id)) {} void encode(bufferlist& bl) const { ENCODE_START(2, 2, bl); encode(id, bl); encode(key, bl); encode(user_id, bl); ENCODE_FINISH(bl); } void decode(bufferlist::const_iterator& bl) { DECODE_START_LEGACY_COMPAT_LEN_32(2, 2, 2, bl); decode(id, bl); decode(key, bl); decode(user_id, bl); DECODE_FINISH(bl); } }; WRITE_CLASS_ENCODER(MotrAccessKey); class MotrNotification : public StoreNotification { public: MotrNotification(Object* _obj, Object* _src_obj, rgw::notify::EventType _type) : StoreNotification(_obj, _src_obj, _type) {} ~MotrNotification() = default; virtual int publish_reserve(const DoutPrefixProvider *dpp, RGWObjTags* obj_tags = nullptr) override { return 0;} virtual int publish_commit(const DoutPrefixProvider* dpp, uint64_t size, const ceph::real_time& mtime, const std::string& etag, const std::string& version) override { return 0; } }; class MotrUser : public StoreUser { private: MotrStore *store; struct m0_uint128 idxID = {0xe5ecb53640d4ecce, 0x6a156cd5a74aa3b8}; // MD5 of “motr.rgw.users“ struct m0_idx idx; public: std::set<std::string> access_key_tracker; MotrUser(MotrStore *_st, const rgw_user& _u) : StoreUser(_u), store(_st) { } MotrUser(MotrStore *_st, const RGWUserInfo& _i) : StoreUser(_i), store(_st) { } MotrUser(MotrStore *_st) : store(_st) { } MotrUser(MotrUser& _o) = default; MotrUser() {} virtual std::unique_ptr<User> clone() override { return std::unique_ptr<User>(new MotrUser(*this)); } int list_buckets(const DoutPrefixProvider *dpp, const std::string& marker, const std::string& end_marker, uint64_t max, bool need_stats, BucketList& buckets, optional_yield y) override; virtual int create_bucket(const DoutPrefixProvider* dpp, const rgw_bucket& b, const std::string& zonegroup_id, rgw_placement_rule& placement_rule, std::string& swift_ver_location, const RGWQuotaInfo* pquota_info, const RGWAccessControlPolicy& policy, Attrs& attrs, RGWBucketInfo& info, obj_version& ep_objv, bool exclusive, bool obj_lock_enabled, bool* existed, req_info& req_info, std::unique_ptr<Bucket>* bucket, optional_yield y) override; virtual int read_attrs(const DoutPrefixProvider* dpp, optional_yield y) override; virtual int merge_and_store_attrs(const DoutPrefixProvider* dpp, Attrs& new_attrs, optional_yield y) override; virtual int read_stats(const DoutPrefixProvider *dpp, optional_yield y, RGWStorageStats* stats, ceph::real_time *last_stats_sync = nullptr, ceph::real_time *last_stats_update = nullptr) override; virtual int read_stats_async(const DoutPrefixProvider *dpp, RGWGetUserStats_CB* cb) override; virtual int complete_flush_stats(const DoutPrefixProvider *dpp, optional_yield y) override; virtual int read_usage(const DoutPrefixProvider *dpp, uint64_t start_epoch, uint64_t end_epoch, uint32_t max_entries, bool* is_truncated, RGWUsageIter& usage_iter, std::map<rgw_user_bucket, rgw_usage_log_entry>& usage) override; virtual int trim_usage(const DoutPrefixProvider *dpp, uint64_t start_epoch, uint64_t end_epoch) override; virtual int load_user(const DoutPrefixProvider* dpp, optional_yield y) override; virtual int store_user(const DoutPrefixProvider* dpp, optional_yield y, bool exclusive, RGWUserInfo* old_info = nullptr) override; virtual int remove_user(const DoutPrefixProvider* dpp, optional_yield y) override; virtual int verify_mfa(const std::string& mfa_str, bool* verified, const DoutPrefixProvider* dpp, optional_yield y) override; int create_user_info_idx(); int load_user_from_idx(const DoutPrefixProvider *dpp, MotrStore *store, RGWUserInfo& info, std::map<std::string, bufferlist> *attrs, RGWObjVersionTracker *objv_tr); friend class MotrBucket; }; class MotrBucket : public StoreBucket { private: MotrStore *store; RGWAccessControlPolicy acls; // RGWBucketInfo and other information that are shown when listing a bucket is // represented in struct MotrBucketInfo. The structure is encoded and stored // as the value of the global bucket instance index. // TODO: compare pros and cons of separating the bucket_attrs (ACLs, tag etc.) // into a different index. struct MotrBucketInfo { RGWBucketInfo info; obj_version bucket_version; ceph::real_time mtime; rgw::sal::Attrs bucket_attrs; void encode(bufferlist& bl) const { ENCODE_START(4, 4, bl); encode(info, bl); encode(bucket_version, bl); encode(mtime, bl); encode(bucket_attrs, bl); //rgw_cache.h example for a map ENCODE_FINISH(bl); } void decode(bufferlist::const_iterator& bl) { DECODE_START(4, bl); decode(info, bl); decode(bucket_version, bl); decode(mtime, bl); decode(bucket_attrs, bl); DECODE_FINISH(bl); } }; WRITE_CLASS_ENCODER(MotrBucketInfo); public: MotrBucket(MotrStore *_st) : store(_st), acls() { } MotrBucket(MotrStore *_st, User* _u) : StoreBucket(_u), store(_st), acls() { } MotrBucket(MotrStore *_st, const rgw_bucket& _b) : StoreBucket(_b), store(_st), acls() { } MotrBucket(MotrStore *_st, const RGWBucketEnt& _e) : StoreBucket(_e), store(_st), acls() { } MotrBucket(MotrStore *_st, const RGWBucketInfo& _i) : StoreBucket(_i), store(_st), acls() { } MotrBucket(MotrStore *_st, const rgw_bucket& _b, User* _u) : StoreBucket(_b, _u), store(_st), acls() { } MotrBucket(MotrStore *_st, const RGWBucketEnt& _e, User* _u) : StoreBucket(_e, _u), store(_st), acls() { } MotrBucket(MotrStore *_st, const RGWBucketInfo& _i, User* _u) : StoreBucket(_i, _u), store(_st), acls() { } ~MotrBucket() { } virtual std::unique_ptr<Object> get_object(const rgw_obj_key& k) override; virtual int list(const DoutPrefixProvider *dpp, ListParams&, int, ListResults&, optional_yield y) override; virtual int remove_bucket(const DoutPrefixProvider *dpp, bool delete_children, bool forward_to_master, req_info* req_info, optional_yield y) override; virtual int remove_bucket_bypass_gc(int concurrent_max, bool keep_index_consistent, optional_yield y, const DoutPrefixProvider *dpp) override; virtual RGWAccessControlPolicy& get_acl(void) override { return acls; } virtual int set_acl(const DoutPrefixProvider *dpp, RGWAccessControlPolicy& acl, optional_yield y) override; virtual int load_bucket(const DoutPrefixProvider *dpp, optional_yield y, bool get_stats = false) override; int link_user(const DoutPrefixProvider* dpp, User* new_user, optional_yield y); int unlink_user(const DoutPrefixProvider* dpp, User* new_user, optional_yield y); int create_bucket_index(); int create_multipart_indices(); virtual int read_stats(const DoutPrefixProvider *dpp, const bucket_index_layout_generation& idx_layout, int shard_id, std::string *bucket_ver, std::string *master_ver, std::map<RGWObjCategory, RGWStorageStats>& stats, std::string *max_marker = nullptr, bool *syncstopped = nullptr) override; virtual int read_stats_async(const DoutPrefixProvider *dpp, const bucket_index_layout_generation& idx_layout, int shard_id, RGWGetBucketStats_CB* ctx) override; virtual int sync_user_stats(const DoutPrefixProvider *dpp, optional_yield y) override; virtual int update_container_stats(const DoutPrefixProvider *dpp) override; virtual int check_bucket_shards(const DoutPrefixProvider *dpp) override; virtual int chown(const DoutPrefixProvider *dpp, User& new_user, optional_yield y) override; virtual int put_info(const DoutPrefixProvider *dpp, bool exclusive, ceph::real_time mtime) override; virtual bool is_owner(User* user) override; virtual int check_empty(const DoutPrefixProvider *dpp, optional_yield y) override; virtual int check_quota(const DoutPrefixProvider *dpp, RGWQuota& quota, uint64_t obj_size, optional_yield y, bool check_size_only = false) override; virtual int merge_and_store_attrs(const DoutPrefixProvider *dpp, Attrs& attrs, optional_yield y) override; virtual int try_refresh_info(const DoutPrefixProvider *dpp, ceph::real_time *pmtime) override; virtual int read_usage(const DoutPrefixProvider *dpp, uint64_t start_epoch, uint64_t end_epoch, uint32_t max_entries, bool *is_truncated, RGWUsageIter& usage_iter, std::map<rgw_user_bucket, rgw_usage_log_entry>& usage) override; virtual int trim_usage(const DoutPrefixProvider *dpp, uint64_t start_epoch, uint64_t end_epoch) override; virtual int remove_objs_from_index(const DoutPrefixProvider *dpp, std::list<rgw_obj_index_key>& objs_to_unlink) override; virtual int check_index(const DoutPrefixProvider *dpp, std::map<RGWObjCategory, RGWStorageStats>& existing_stats, std::map<RGWObjCategory, RGWStorageStats>& calculated_stats) override; virtual int rebuild_index(const DoutPrefixProvider *dpp) override; virtual int set_tag_timeout(const DoutPrefixProvider *dpp, uint64_t timeout) override; virtual int purge_instance(const DoutPrefixProvider *dpp) override; virtual std::unique_ptr<Bucket> clone() override { return std::make_unique<MotrBucket>(*this); } virtual std::unique_ptr<MultipartUpload> get_multipart_upload(const std::string& oid, std::optional<std::string> upload_id=std::nullopt, ACLOwner owner={}, ceph::real_time mtime=real_clock::now()) override; virtual int list_multiparts(const DoutPrefixProvider *dpp, const std::string& prefix, std::string& marker, const std::string& delim, const int& max_uploads, std::vector<std::unique_ptr<MultipartUpload>>& uploads, std::map<std::string, bool> *common_prefixes, bool *is_truncated) override; virtual int abort_multiparts(const DoutPrefixProvider *dpp, CephContext *cct) override; friend class MotrStore; }; class MotrPlacementTier: public StorePlacementTier { MotrStore* store; RGWZoneGroupPlacementTier tier; public: MotrPlacementTier(MotrStore* _store, const RGWZoneGroupPlacementTier& _tier) : store(_store), tier(_tier) {} virtual ~MotrPlacementTier() = default; virtual const std::string& get_tier_type() { return tier.tier_type; } virtual const std::string& get_storage_class() { return tier.storage_class; } virtual bool retain_head_object() { return tier.retain_head_object; } RGWZoneGroupPlacementTier& get_rt() { return tier; } }; class MotrZoneGroup : public StoreZoneGroup { MotrStore* store; const RGWZoneGroup group; std::string empty; public: MotrZoneGroup(MotrStore* _store) : store(_store), group() {} MotrZoneGroup(MotrStore* _store, const RGWZoneGroup& _group) : store(_store), group(_group) {} virtual ~MotrZoneGroup() = default; virtual const std::string& get_id() const override { return group.get_id(); }; virtual const std::string& get_name() const override { return group.get_name(); }; virtual int equals(const std::string& other_zonegroup) const override { return group.equals(other_zonegroup); }; /** Get the endpoint from zonegroup, or from master zone if not set */ virtual const std::string& get_endpoint() const override; virtual bool placement_target_exists(std::string& target) const override; virtual bool is_master_zonegroup() const override { return group.is_master_zonegroup(); }; virtual const std::string& get_api_name() const override { return group.api_name; }; virtual int get_placement_target_names(std::set<std::string>& names) const override; virtual const std::string& get_default_placement_name() const override { return group.default_placement.name; }; virtual int get_hostnames(std::list<std::string>& names) const override { names = group.hostnames; return 0; }; virtual int get_s3website_hostnames(std::list<std::string>& names) const override { names = group.hostnames_s3website; return 0; }; virtual int get_zone_count() const override { return group.zones.size(); } virtual int get_placement_tier(const rgw_placement_rule& rule, std::unique_ptr<PlacementTier>* tier); virtual int get_zone_by_id(const std::string& id, std::unique_ptr<Zone>* zone) override { return -1; } virtual int get_zone_by_name(const std::string& name, std::unique_ptr<Zone>* zone) override { return -1; } virtual int list_zones(std::list<std::string>& zone_ids) override { zone_ids.clear(); return 0; } const RGWZoneGroup& get_group() { return group; } virtual std::unique_ptr<ZoneGroup> clone() override { return std::make_unique<MotrZoneGroup>(store, group); } }; class MotrZone : public StoreZone { protected: MotrStore* store; RGWRealm *realm{nullptr}; MotrZoneGroup zonegroup; RGWZone *zone_public_config{nullptr}; /* external zone params, e.g., entrypoints, log flags, etc. */ RGWZoneParams *zone_params{nullptr}; /* internal zone params, e.g., rados pools */ RGWPeriod *current_period{nullptr}; public: MotrZone(MotrStore* _store) : store(_store), zonegroup(_store) { realm = new RGWRealm(); zone_public_config = new RGWZone(); zone_params = new RGWZoneParams(); current_period = new RGWPeriod(); // XXX: only default and STANDARD supported for now RGWZonePlacementInfo info; RGWZoneStorageClasses sc; sc.set_storage_class("STANDARD", nullptr, nullptr); info.storage_classes = sc; zone_params->placement_pools["default"] = info; } MotrZone(MotrStore* _store, MotrZoneGroup _zg) : store(_store), zonegroup(_zg) { realm = new RGWRealm(); // TODO: fetch zonegroup params (eg. id) from provisioner config. zonegroup.set_id("0956b174-fe14-4f97-8b50-bb7ec5e1cf62"); zonegroup.api_name = "default"; zone_public_config = new RGWZone(); zone_params = new RGWZoneParams(); current_period = new RGWPeriod(); // XXX: only default and STANDARD supported for now RGWZonePlacementInfo info; RGWZoneStorageClasses sc; sc.set_storage_class("STANDARD", nullptr, nullptr); info.storage_classes = sc; zone_params->placement_pools["default"] = info; } ~MotrZone() = default; virtual std::unique_ptr<Zone> clone() override { return std::make_unique<MotrZone>(store); } virtual ZoneGroup& get_zonegroup() override; virtual const std::string& get_id() override; virtual const std::string& get_name() const override; virtual bool is_writeable() override; virtual bool get_redirect_endpoint(std::string* endpoint) override; virtual bool has_zonegroup_api(const std::string& api) const override; virtual const std::string& get_current_period_id() override; virtual const RGWAccessKey& get_system_key() { return zone_params->system_key; } virtual const std::string& get_realm_name() { return realm->get_name(); } virtual const std::string& get_realm_id() { return realm->get_id(); } virtual const std::string_view get_tier_type() { return "rgw"; } virtual RGWBucketSyncPolicyHandlerRef get_sync_policy_handler() { return nullptr; } friend class MotrStore; }; class MotrLuaManager : public StoreLuaManager { MotrStore* store; public: MotrLuaManager(MotrStore* _s) : store(_s) { } virtual ~MotrLuaManager() = default; /** Get a script named with the given key from the backing store */ virtual int get_script(const DoutPrefixProvider* dpp, optional_yield y, const std::string& key, std::string& script) override; /** Put a script named with the given key to the backing store */ virtual int put_script(const DoutPrefixProvider* dpp, optional_yield y, const std::string& key, const std::string& script) override; /** Delete a script named with the given key from the backing store */ virtual int del_script(const DoutPrefixProvider* dpp, optional_yield y, const std::string& key) override; /** Add a lua package */ virtual int add_package(const DoutPrefixProvider* dpp, optional_yield y, const std::string& package_name) override; /** Remove a lua package */ virtual int remove_package(const DoutPrefixProvider* dpp, optional_yield y, const std::string& package_name) override; /** List lua packages */ virtual int list_packages(const DoutPrefixProvider* dpp, optional_yield y, rgw::lua::packages_t& packages) override; }; class MotrOIDCProvider : public RGWOIDCProvider { MotrStore* store; public: MotrOIDCProvider(MotrStore* _store) : store(_store) {} ~MotrOIDCProvider() = default; virtual int store_url(const DoutPrefixProvider *dpp, const std::string& url, bool exclusive, optional_yield y) override { return 0; } virtual int read_url(const DoutPrefixProvider *dpp, const std::string& url, const std::string& tenant) override { return 0; } virtual int delete_obj(const DoutPrefixProvider *dpp, optional_yield y) override { return 0;} void encode(bufferlist& bl) const { RGWOIDCProvider::encode(bl); } void decode(bufferlist::const_iterator& bl) { RGWOIDCProvider::decode(bl); } }; class MotrObject : public StoreObject { private: MotrStore *store; RGWAccessControlPolicy acls; RGWObjCategory category; // If this object is pat of a multipart uploaded one. // TODO: do it in another class? MotrPartObject : public MotrObject uint64_t part_off; uint64_t part_size; uint64_t part_num; public: // motr object metadata stored in index struct Meta { struct m0_uint128 oid = {}; struct m0_fid pver = {}; uint64_t layout_id = 0; void encode(bufferlist& bl) const { ENCODE_START(5, 5, bl); encode(oid.u_hi, bl); encode(oid.u_lo, bl); encode(pver.f_container, bl); encode(pver.f_key, bl); encode(layout_id, bl); ENCODE_FINISH(bl); } void decode(bufferlist::const_iterator& bl) { DECODE_START(5, bl); decode(oid.u_hi, bl); decode(oid.u_lo, bl); decode(pver.f_container, bl); decode(pver.f_key, bl); decode(layout_id, bl); DECODE_FINISH(bl); } }; struct m0_obj *mobj = NULL; Meta meta; struct MotrReadOp : public ReadOp { private: MotrObject* source; // The set of part objects if the source is // a multipart uploaded object. std::map<int, std::unique_ptr<MotrObject>> part_objs; public: MotrReadOp(MotrObject *_source); virtual int prepare(optional_yield y, const DoutPrefixProvider* dpp) override; /* * Both `read` and `iterate` read up through index `end` * *inclusive*. The number of bytes that could be returned is * `end - ofs + 1`. */ virtual int read(int64_t off, int64_t end, bufferlist& bl, optional_yield y, const DoutPrefixProvider* dpp) override; virtual int iterate(const DoutPrefixProvider* dpp, int64_t off, int64_t end, RGWGetDataCB* cb, optional_yield y) override; virtual int get_attr(const DoutPrefixProvider* dpp, const char* name, bufferlist& dest, optional_yield y) override; }; struct MotrDeleteOp : public DeleteOp { private: MotrObject* source; public: MotrDeleteOp(MotrObject* _source); virtual int delete_obj(const DoutPrefixProvider* dpp, optional_yield y) override; }; MotrObject() = default; MotrObject(MotrStore *_st, const rgw_obj_key& _k) : StoreObject(_k), store(_st), acls() {} MotrObject(MotrStore *_st, const rgw_obj_key& _k, Bucket* _b) : StoreObject(_k, _b), store(_st), acls() {} MotrObject(MotrObject& _o) = default; virtual ~MotrObject(); virtual int delete_object(const DoutPrefixProvider* dpp, optional_yield y, bool prevent_versioning = false) override; virtual int copy_object(User* user, req_info* info, const rgw_zone_id& source_zone, rgw::sal::Object* dest_object, rgw::sal::Bucket* dest_bucket, rgw::sal::Bucket* src_bucket, const rgw_placement_rule& dest_placement, ceph::real_time* src_mtime, ceph::real_time* mtime, const ceph::real_time* mod_ptr, const ceph::real_time* unmod_ptr, bool high_precision_time, const char* if_match, const char* if_nomatch, AttrsMod attrs_mod, bool copy_if_newer, Attrs& attrs, RGWObjCategory category, uint64_t olh_epoch, boost::optional<ceph::real_time> delete_at, std::string* version_id, std::string* tag, std::string* etag, void (*progress_cb)(off_t, void *), void* progress_data, const DoutPrefixProvider* dpp, optional_yield y) override; virtual RGWAccessControlPolicy& get_acl(void) override { return acls; } virtual int set_acl(const RGWAccessControlPolicy& acl) override { acls = acl; return 0; } virtual int get_obj_state(const DoutPrefixProvider* dpp, RGWObjState **state, optional_yield y, bool follow_olh = true) override; virtual int set_obj_attrs(const DoutPrefixProvider* dpp, Attrs* setattrs, Attrs* delattrs, optional_yield y) override; virtual int get_obj_attrs(optional_yield y, const DoutPrefixProvider* dpp, rgw_obj* target_obj = NULL) override; virtual int modify_obj_attrs(const char* attr_name, bufferlist& attr_val, optional_yield y, const DoutPrefixProvider* dpp) override; virtual int delete_obj_attrs(const DoutPrefixProvider* dpp, const char* attr_name, optional_yield y) override; virtual bool is_expired() override; virtual void gen_rand_obj_instance_name() override; virtual std::unique_ptr<Object> clone() override { return std::unique_ptr<Object>(new MotrObject(*this)); } virtual std::unique_ptr<MPSerializer> get_serializer(const DoutPrefixProvider *dpp, const std::string& lock_name) override; virtual int transition(Bucket* bucket, const rgw_placement_rule& placement_rule, const real_time& mtime, uint64_t olh_epoch, const DoutPrefixProvider* dpp, optional_yield y) override; virtual bool placement_rules_match(rgw_placement_rule& r1, rgw_placement_rule& r2) override; virtual int dump_obj_layout(const DoutPrefixProvider *dpp, optional_yield y, Formatter* f) override; /* Swift versioning */ virtual int swift_versioning_restore(bool& restored, const DoutPrefixProvider* dpp) override; virtual int swift_versioning_copy(const DoutPrefixProvider* dpp, optional_yield y) override; /* OPs */ virtual std::unique_ptr<ReadOp> get_read_op() override; virtual std::unique_ptr<DeleteOp> get_delete_op() override; /* OMAP */ virtual int omap_get_vals_by_keys(const DoutPrefixProvider *dpp, const std::string& oid, const std::set<std::string>& keys, Attrs* vals) override; virtual int omap_set_val_by_key(const DoutPrefixProvider *dpp, const std::string& key, bufferlist& val, bool must_exist, optional_yield y) override; virtual int chown(User& new_user, const DoutPrefixProvider* dpp, optional_yield y) override; private: //int read_attrs(const DoutPrefixProvider* dpp, Motr::Object::Read &read_op, optional_yield y, rgw_obj* target_obj = nullptr); public: bool is_opened() { return mobj != NULL; } int create_mobj(const DoutPrefixProvider *dpp, uint64_t sz); int open_mobj(const DoutPrefixProvider *dpp); int delete_mobj(const DoutPrefixProvider *dpp); void close_mobj(); int write_mobj(const DoutPrefixProvider *dpp, bufferlist&& data, uint64_t offset); int read_mobj(const DoutPrefixProvider* dpp, int64_t off, int64_t end, RGWGetDataCB* cb); unsigned get_optimal_bs(unsigned len); int get_part_objs(const DoutPrefixProvider *dpp, std::map<int, std::unique_ptr<MotrObject>>& part_objs); int open_part_objs(const DoutPrefixProvider* dpp, std::map<int, std::unique_ptr<MotrObject>>& part_objs); int read_multipart_obj(const DoutPrefixProvider* dpp, int64_t off, int64_t end, RGWGetDataCB* cb, std::map<int, std::unique_ptr<MotrObject>>& part_objs); int delete_part_objs(const DoutPrefixProvider* dpp); void set_category(RGWObjCategory _category) {category = _category;} int get_bucket_dir_ent(const DoutPrefixProvider *dpp, rgw_bucket_dir_entry& ent); int update_version_entries(const DoutPrefixProvider *dpp); }; // A placeholder locking class for multipart upload. // TODO: implement it using Motr object locks. class MPMotrSerializer : public StoreMPSerializer { public: MPMotrSerializer(const DoutPrefixProvider *dpp, MotrStore* store, MotrObject* obj, const std::string& lock_name) {} virtual int try_lock(const DoutPrefixProvider *dpp, utime_t dur, optional_yield y) override {return 0; } virtual int unlock() override { return 0;} }; class MotrAtomicWriter : public StoreWriter { protected: rgw::sal::MotrStore* store; const rgw_user& owner; const rgw_placement_rule *ptail_placement_rule; uint64_t olh_epoch; const std::string& unique_tag; MotrObject obj; MotrObject old_obj; uint64_t total_data_size; // for total data being uploaded bufferlist acc_data; // accumulated data uint64_t acc_off; // accumulated data offset struct m0_bufvec buf; struct m0_bufvec attr; struct m0_indexvec ext; public: MotrAtomicWriter(const DoutPrefixProvider *dpp, optional_yield y, rgw::sal::Object* obj, MotrStore* _store, const rgw_user& _owner, const rgw_placement_rule *_ptail_placement_rule, uint64_t _olh_epoch, const std::string& _unique_tag); ~MotrAtomicWriter() = default; // prepare to start processing object data virtual int prepare(optional_yield y) override; // Process a bufferlist virtual int process(bufferlist&& data, uint64_t offset) override; int write(); // complete the operation and make its result visible to clients virtual int complete(size_t accounted_size, const std::string& etag, ceph::real_time *mtime, ceph::real_time set_mtime, std::map<std::string, bufferlist>& attrs, ceph::real_time delete_at, const char *if_match, const char *if_nomatch, const std::string *user_data, rgw_zone_set *zones_trace, bool *canceled, optional_yield y) override; unsigned populate_bvec(unsigned len, bufferlist::iterator &bi); void cleanup(); }; class MotrMultipartWriter : public StoreWriter { protected: rgw::sal::MotrStore* store; // Head object. rgw::sal::Object* head_obj; // Part parameters. const uint64_t part_num; const std::string part_num_str; std::unique_ptr<MotrObject> part_obj; uint64_t actual_part_size = 0; public: MotrMultipartWriter(const DoutPrefixProvider *dpp, optional_yield y, MultipartUpload* upload, rgw::sal::Object* obj, MotrStore* _store, const rgw_user& owner, const rgw_placement_rule *ptail_placement_rule, uint64_t _part_num, const std::string& part_num_str) : StoreWriter(dpp, y), store(_store), head_obj(obj), part_num(_part_num), part_num_str(part_num_str) { } ~MotrMultipartWriter() = default; // prepare to start processing object data virtual int prepare(optional_yield y) override; // Process a bufferlist virtual int process(bufferlist&& data, uint64_t offset) override; // complete the operation and make its result visible to clients virtual int complete(size_t accounted_size, const std::string& etag, ceph::real_time *mtime, ceph::real_time set_mtime, std::map<std::string, bufferlist>& attrs, ceph::real_time delete_at, const char *if_match, const char *if_nomatch, const std::string *user_data, rgw_zone_set *zones_trace, bool *canceled, optional_yield y) override; }; // The implementation of multipart upload in POC roughly follows the // cortx-s3server's design. Parts are stored in separate Motr objects. // s3server uses a few auxiliary Motr indices to manage multipart // related metadata: (1) Bucket multipart index (bucket_nnn_multipart_index) // which contains metadata that answers questions such as which objects have // started multipart upload and its upload id. This index is created during // bucket creation. (2) Object part index (object_nnn_part_index) which stores // metadata of a part's details (size, pvid, oid...). This index is created in // MotrMultipartUpload::init(). (3) Extended metadata index // (bucket_nnn_extended_metadata): once parts has been uploaded and their // metadata saved in the part index, the user may issue multipart completion // request. When processing the completion request, the parts are read from // object part index and for each part an entry is created in extended index. // The entry for the object is created in bucket (object list) index. The part // index is deleted and an entry removed from bucket_nnn_multipart_index. Like // bucket multipart index, bucket part extened metadata index is created during // bucket creation. // // The extended metadata index is used mainly due to fault tolerant // considerations (how to handle Motr service crash when uploading an object) // and to avoid to create too many Motr indices (I am not sure I understand // why many Motr indices is bad.). In our POC, to keep it simple, only 2 // indices are maintained: bucket multipart index and object_nnn_part_index. // // class MotrMultipartPart : public StoreMultipartPart { protected: RGWUploadPartInfo info; public: MotrObject::Meta meta; MotrMultipartPart(RGWUploadPartInfo _info, MotrObject::Meta _meta) : info(_info), meta(_meta) {} virtual ~MotrMultipartPart() = default; virtual uint32_t get_num() { return info.num; } virtual uint64_t get_size() { return info.accounted_size; } virtual const std::string& get_etag() { return info.etag; } virtual ceph::real_time& get_mtime() { return info.modified; } RGWObjManifest& get_manifest() { return info.manifest; } friend class MotrMultipartUpload; }; class MotrMultipartUpload : public StoreMultipartUpload { MotrStore* store; RGWMPObj mp_obj; ACLOwner owner; ceph::real_time mtime; rgw_placement_rule placement; RGWObjManifest manifest; public: MotrMultipartUpload(MotrStore* _store, Bucket* _bucket, const std::string& oid, std::optional<std::string> upload_id, ACLOwner _owner, ceph::real_time _mtime) : StoreMultipartUpload(_bucket), store(_store), mp_obj(oid, upload_id), owner(_owner), mtime(_mtime) {} virtual ~MotrMultipartUpload() = default; virtual const std::string& get_meta() const { return mp_obj.get_meta(); } virtual const std::string& get_key() const { return mp_obj.get_key(); } virtual const std::string& get_upload_id() const { return mp_obj.get_upload_id(); } virtual const ACLOwner& get_owner() const override { return owner; } virtual ceph::real_time& get_mtime() { return mtime; } virtual std::unique_ptr<rgw::sal::Object> get_meta_obj() override; virtual int init(const DoutPrefixProvider* dpp, optional_yield y, ACLOwner& owner, rgw_placement_rule& dest_placement, rgw::sal::Attrs& attrs) override; virtual int list_parts(const DoutPrefixProvider* dpp, CephContext* cct, int num_parts, int marker, int* next_marker, bool* truncated, bool assume_unsorted = false) override; virtual int abort(const DoutPrefixProvider* dpp, CephContext* cct) override; virtual int complete(const DoutPrefixProvider* dpp, optional_yield y, CephContext* cct, std::map<int, std::string>& part_etags, std::list<rgw_obj_index_key>& remove_objs, uint64_t& accounted_size, bool& compressed, RGWCompressionInfo& cs_info, off_t& off, std::string& tag, ACLOwner& owner, uint64_t olh_epoch, rgw::sal::Object* target_obj) override; virtual int get_info(const DoutPrefixProvider *dpp, optional_yield y, rgw_placement_rule** rule, rgw::sal::Attrs* attrs = nullptr) override; virtual std::unique_ptr<Writer> get_writer(const DoutPrefixProvider *dpp, optional_yield y, rgw::sal::Object* obj, const rgw_user& owner, const rgw_placement_rule *ptail_placement_rule, uint64_t part_num, const std::string& part_num_str) override; int delete_parts(const DoutPrefixProvider *dpp); }; class MotrStore : public StoreDriver { private: MotrZone zone; RGWSyncModuleInstanceRef sync_module; MotrMetaCache* obj_meta_cache; MotrMetaCache* user_cache; MotrMetaCache* bucket_inst_cache; public: CephContext *cctx; struct m0_client *instance; struct m0_container container; struct m0_realm uber_realm; struct m0_config conf = {}; struct m0_idx_dix_config dix_conf = {}; MotrStore(CephContext *c): zone(this), cctx(c) {} ~MotrStore() { delete obj_meta_cache; delete user_cache; delete bucket_inst_cache; } virtual int initialize(CephContext *cct, const DoutPrefixProvider *dpp) { return 0; } virtual const std::string get_name() const override { return "motr"; } virtual std::unique_ptr<User> get_user(const rgw_user& u) override; virtual std::string get_cluster_id(const DoutPrefixProvider* dpp, optional_yield y) override; virtual int get_user_by_access_key(const DoutPrefixProvider *dpp, const std::string& key, optional_yield y, std::unique_ptr<User>* user) override; virtual int get_user_by_email(const DoutPrefixProvider *dpp, const std::string& email, optional_yield y, std::unique_ptr<User>* user) override; virtual int get_user_by_swift(const DoutPrefixProvider *dpp, const std::string& user_str, optional_yield y, std::unique_ptr<User>* user) override; virtual std::unique_ptr<Object> get_object(const rgw_obj_key& k) override; virtual int get_bucket(const DoutPrefixProvider *dpp, User* u, const rgw_bucket& b, std::unique_ptr<Bucket>* bucket, optional_yield y) override; virtual int get_bucket(User* u, const RGWBucketInfo& i, std::unique_ptr<Bucket>* bucket) override; virtual int get_bucket(const DoutPrefixProvider *dpp, User* u, const std::string& tenant, const std::string&name, std::unique_ptr<Bucket>* bucket, optional_yield y) override; virtual bool is_meta_master() override; virtual int forward_request_to_master(const DoutPrefixProvider *dpp, User* user, obj_version* objv, bufferlist& in_data, JSONParser *jp, req_info& info, optional_yield y) override; virtual int forward_iam_request_to_master(const DoutPrefixProvider *dpp, const RGWAccessKey& key, obj_version* objv, bufferlist& in_data, RGWXMLDecoder::XMLParser* parser, req_info& info, optional_yield y) override; virtual Zone* get_zone() { return &zone; } virtual std::string zone_unique_id(uint64_t unique_num) override; virtual std::string zone_unique_trans_id(const uint64_t unique_num) override; virtual int get_zonegroup(const std::string& id, std::unique_ptr<ZoneGroup>* zonegroup) override; virtual int list_all_zones(const DoutPrefixProvider* dpp, std::list<std::string>& zone_ids) override; virtual int cluster_stat(RGWClusterStat& stats) override; virtual std::unique_ptr<Lifecycle> get_lifecycle(void) override; virtual std::unique_ptr<Notification> get_notification(rgw::sal::Object* obj, rgw::sal::Object* src_obj, req_state* s, rgw::notify::EventType event_type, optional_yield y, const std::string* object_name=nullptr) override; virtual std::unique_ptr<Notification> get_notification(const DoutPrefixProvider* dpp, rgw::sal::Object* obj, rgw::sal::Object* src_obj, rgw::notify::EventType event_type, rgw::sal::Bucket* _bucket, std::string& _user_id, std::string& _user_tenant, std::string& _req_id, optional_yield y) override; virtual RGWLC* get_rgwlc(void) override { return NULL; } virtual RGWCoroutinesManagerRegistry* get_cr_registry() override { return NULL; } virtual int log_usage(const DoutPrefixProvider *dpp, std::map<rgw_user_bucket, RGWUsageBatch>& usage_info) override; virtual int log_op(const DoutPrefixProvider *dpp, std::string& oid, bufferlist& bl) override; virtual int register_to_service_map(const DoutPrefixProvider *dpp, const std::string& daemon_type, const std::map<std::string, std::string>& meta) override; virtual void get_ratelimit(RGWRateLimitInfo& bucket_ratelimit, RGWRateLimitInfo& user_ratelimit, RGWRateLimitInfo& anon_ratelimit) override; virtual void get_quota(RGWQuota& quota) override; virtual int set_buckets_enabled(const DoutPrefixProvider *dpp, std::vector<rgw_bucket>& buckets, bool enabled) override; virtual int get_sync_policy_handler(const DoutPrefixProvider *dpp, std::optional<rgw_zone_id> zone, std::optional<rgw_bucket> bucket, RGWBucketSyncPolicyHandlerRef *phandler, optional_yield y) override; virtual RGWDataSyncStatusManager* get_data_sync_manager(const rgw_zone_id& source_zone) override; virtual void wakeup_meta_sync_shards(std::set<int>& shard_ids) override { return; } virtual void wakeup_data_sync_shards(const DoutPrefixProvider *dpp, const rgw_zone_id& source_zone, boost::container::flat_map<int, boost::container::flat_set<rgw_data_notify_entry>>& shard_ids) override {} virtual int clear_usage(const DoutPrefixProvider *dpp) override { return 0; } virtual int read_all_usage(const DoutPrefixProvider *dpp, uint64_t start_epoch, uint64_t end_epoch, uint32_t max_entries, bool *is_truncated, RGWUsageIter& usage_iter, std::map<rgw_user_bucket, rgw_usage_log_entry>& usage) override; virtual int trim_all_usage(const DoutPrefixProvider *dpp, uint64_t start_epoch, uint64_t end_epoch) override; virtual int get_config_key_val(std::string name, bufferlist* bl) override; virtual int meta_list_keys_init(const DoutPrefixProvider *dpp, const std::string& section, const std::string& marker, void** phandle) override; virtual int meta_list_keys_next(const DoutPrefixProvider *dpp, void* handle, int max, std::list<std::string>& keys, bool* truncated) override; virtual void meta_list_keys_complete(void* handle) override; virtual std::string meta_get_marker(void *handle) override; virtual int meta_remove(const DoutPrefixProvider *dpp, std::string& metadata_key, optional_yield y) override; virtual const RGWSyncModuleInstanceRef& get_sync_module() { return sync_module; } virtual std::string get_host_id() { return ""; } virtual std::unique_ptr<LuaManager> get_lua_manager() override; virtual std::unique_ptr<RGWRole> get_role(std::string name, std::string tenant, std::string path="", std::string trust_policy="", std::string max_session_duration_str="", std::multimap<std::string, std::string> tags={}) override; virtual std::unique_ptr<RGWRole> get_role(const RGWRoleInfo& info) override; virtual std::unique_ptr<RGWRole> get_role(std::string id) override; virtual int get_roles(const DoutPrefixProvider *dpp, optional_yield y, const std::string& path_prefix, const std::string& tenant, std::vector<std::unique_ptr<RGWRole>>& roles) override; virtual std::unique_ptr<RGWOIDCProvider> get_oidc_provider() override; virtual int get_oidc_providers(const DoutPrefixProvider *dpp, const std::string& tenant, std::vector<std::unique_ptr<RGWOIDCProvider>>& providers) override; virtual std::unique_ptr<Writer> get_append_writer(const DoutPrefixProvider *dpp, optional_yield y, rgw::sal::Object* obj, const rgw_user& owner, const rgw_placement_rule *ptail_placement_rule, const std::string& unique_tag, uint64_t position, uint64_t *cur_accounted_size) override; virtual std::unique_ptr<Writer> get_atomic_writer(const DoutPrefixProvider *dpp, optional_yield y, rgw::sal::Object* obj, const rgw_user& owner, const rgw_placement_rule *ptail_placement_rule, uint64_t olh_epoch, const std::string& unique_tag) override; virtual const std::string& get_compression_type(const rgw_placement_rule& rule) override; virtual bool valid_placement(const rgw_placement_rule& rule) override; virtual void finalize(void) override; virtual CephContext *ctx(void) override { return cctx; } virtual void register_admin_apis(RGWRESTMgr* mgr) override { }; int open_idx(struct m0_uint128 *id, bool create, struct m0_idx *out); void close_idx(struct m0_idx *idx) { m0_idx_fini(idx); } int do_idx_op(struct m0_idx *, enum m0_idx_opcode opcode, std::vector<uint8_t>& key, std::vector<uint8_t>& val, bool update = false); int do_idx_next_op(struct m0_idx *idx, std::vector<std::vector<uint8_t>>& key_vec, std::vector<std::vector<uint8_t>>& val_vec); int next_query_by_name(std::string idx_name, std::vector<std::string>& key_str_vec, std::vector<bufferlist>& val_bl_vec, std::string prefix="", std::string delim=""); void index_name_to_motr_fid(std::string iname, struct m0_uint128 *fid); int open_motr_idx(struct m0_uint128 *id, struct m0_idx *idx); int create_motr_idx_by_name(std::string iname); int delete_motr_idx_by_name(std::string iname); int do_idx_op_by_name(std::string idx_name, enum m0_idx_opcode opcode, std::string key_str, bufferlist &bl, bool update=true); int check_n_create_global_indices(); int store_access_key(const DoutPrefixProvider *dpp, optional_yield y, MotrAccessKey access_key); int delete_access_key(const DoutPrefixProvider *dpp, optional_yield y, std::string access_key); int store_email_info(const DoutPrefixProvider *dpp, optional_yield y, MotrEmailInfo& email_info); int init_metadata_cache(const DoutPrefixProvider *dpp, CephContext *cct); MotrMetaCache* get_obj_meta_cache() {return obj_meta_cache;} MotrMetaCache* get_user_cache() {return user_cache;} MotrMetaCache* get_bucket_inst_cache() {return bucket_inst_cache;} }; struct obj_time_weight { real_time mtime; uint32_t zone_short_id; uint64_t pg_ver; bool high_precision; obj_time_weight() : zone_short_id(0), pg_ver(0), high_precision(false) {} bool compare_low_precision(const obj_time_weight& rhs) { struct timespec l = ceph::real_clock::to_timespec(mtime); struct timespec r = ceph::real_clock::to_timespec(rhs.mtime); l.tv_nsec = 0; r.tv_nsec = 0; if (l > r) { return false; } if (l < r) { return true; } if (!zone_short_id || !rhs.zone_short_id) { /* don't compare zone ids, if one wasn't provided */ return false; } if (zone_short_id != rhs.zone_short_id) { return (zone_short_id < rhs.zone_short_id); } return (pg_ver < rhs.pg_ver); } bool operator<(const obj_time_weight& rhs) { if (!high_precision || !rhs.high_precision) { return compare_low_precision(rhs); } if (mtime > rhs.mtime) { return false; } if (mtime < rhs.mtime) { return true; } if (!zone_short_id || !rhs.zone_short_id) { /* don't compare zone ids, if one wasn't provided */ return false; } if (zone_short_id != rhs.zone_short_id) { return (zone_short_id < rhs.zone_short_id); } return (pg_ver < rhs.pg_ver); } void init(const real_time& _mtime, uint32_t _short_id, uint64_t _pg_ver) { mtime = _mtime; zone_short_id = _short_id; pg_ver = _pg_ver; } void init(RGWObjState *state) { mtime = state->mtime; zone_short_id = state->zone_short_id; pg_ver = state->pg_ver; } }; inline std::ostream& operator<<(std::ostream& out, const obj_time_weight &o) { out << o.mtime; if (o.zone_short_id != 0 || o.pg_ver != 0) { out << "[zid=" << o.zone_short_id << ", pgv=" << o.pg_ver << "]"; } return out; } } // namespace rgw::sal
51,012
41.724456
210
h
null
ceph-main/src/rgw/rgw_sal_store.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab ft=cpp /* * Ceph - scalable distributed file system * * Copyright (C) 2022 Red Hat, Inc. * * This is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License version 2.1, as published by the Free Software * Foundation. See file COPYING. * */ #pragma once #include "rgw_sal.h" namespace rgw { namespace sal { class StoreDriver : public Driver { public: StoreDriver() {} virtual ~StoreDriver() = default; virtual uint64_t get_new_req_id() override { return ceph::util::generate_random_number<uint64_t>(); } int read_topics(const std::string& tenant, rgw_pubsub_topics& topics, RGWObjVersionTracker* objv_tracker, optional_yield y, const DoutPrefixProvider *dpp) override {return -EOPNOTSUPP;} int write_topics(const std::string& tenant, const rgw_pubsub_topics& topics, RGWObjVersionTracker* objv_tracker, optional_yield y, const DoutPrefixProvider *dpp) override {return -ENOENT;} int remove_topics(const std::string& tenant, RGWObjVersionTracker* objv_tracker, optional_yield y, const DoutPrefixProvider *dpp) override {return -ENOENT;} }; class StoreUser : public User { protected: RGWUserInfo info; RGWObjVersionTracker objv_tracker; Attrs attrs; public: StoreUser() : info() {} StoreUser(const rgw_user& _u) : info() { info.user_id = _u; } StoreUser(const RGWUserInfo& _i) : info(_i) {} StoreUser(StoreUser& _o) = default; virtual ~StoreUser() = default; virtual std::string& get_display_name() override { return info.display_name; } virtual const std::string& get_tenant() override { return info.user_id.tenant; } virtual void set_tenant(std::string& _t) override { info.user_id.tenant = _t; } virtual const std::string& get_ns() override { return info.user_id.ns; } virtual void set_ns(std::string& _ns) override { info.user_id.ns = _ns; } virtual void clear_ns() override { info.user_id.ns.clear(); } virtual const rgw_user& get_id() const override { return info.user_id; } virtual uint32_t get_type() const override { return info.type; } virtual int32_t get_max_buckets() const override { return info.max_buckets; } virtual void set_max_buckets(int32_t _max_buckets) override { info.max_buckets = _max_buckets; } virtual const RGWUserCaps& get_caps() const override { return info.caps; } virtual RGWObjVersionTracker& get_version_tracker() override { return objv_tracker; } virtual Attrs& get_attrs() override { return attrs; } virtual void set_attrs(Attrs& _attrs) override { attrs = _attrs; } virtual bool empty() const override { return info.user_id.id.empty(); } virtual RGWUserInfo& get_info() override { return info; } virtual void set_info(RGWQuotaInfo& _quota) override { info.quota.user_quota.max_size = _quota.max_size; info.quota.user_quota.max_objects = _quota.max_objects; } virtual void print(std::ostream& out) const override { out << info.user_id; } friend class StoreBucket; }; class StoreBucket : public Bucket { protected: RGWBucketEnt ent; RGWBucketInfo info; User* owner = nullptr; Attrs attrs; obj_version bucket_version; ceph::real_time mtime; public: StoreBucket() = default; StoreBucket(User* _u) : owner(_u) { } StoreBucket(const rgw_bucket& _b) { ent.bucket = _b; info.bucket = _b; } StoreBucket(const RGWBucketEnt& _e) : ent(_e) { info.bucket = ent.bucket; info.placement_rule = ent.placement_rule; info.creation_time = ent.creation_time; } StoreBucket(const RGWBucketInfo& _i) : info(_i) { ent.bucket = info.bucket; ent.placement_rule = info.placement_rule; ent.creation_time = info.creation_time; } StoreBucket(const rgw_bucket& _b, User* _u) : owner(_u) { ent.bucket = _b; info.bucket = _b; } StoreBucket(const RGWBucketEnt& _e, User* _u) : ent(_e), owner(_u) { info.bucket = ent.bucket; info.placement_rule = ent.placement_rule; info.creation_time = ent.creation_time; } StoreBucket(const RGWBucketInfo& _i, User* _u) : info(_i), owner(_u) { ent.bucket = info.bucket; ent.placement_rule = info.placement_rule; ent.creation_time = info.creation_time; } virtual ~StoreBucket() = default; virtual Attrs& get_attrs(void) override { return attrs; } virtual int set_attrs(Attrs a) override { attrs = a; return 0; } virtual void set_owner(rgw::sal::User* _owner) override { owner = _owner; } virtual void set_count(uint64_t _count) override { ent.count = _count; } virtual void set_size(uint64_t _size) override { ent.size = _size; } virtual User* get_owner(void) override { return owner; }; virtual ACLOwner get_acl_owner(void) override { return ACLOwner(info.owner); }; virtual bool empty() const override { return info.bucket.name.empty(); } virtual const std::string& get_name() const override { return info.bucket.name; } virtual const std::string& get_tenant() const override { return info.bucket.tenant; } virtual const std::string& get_marker() const override { return info.bucket.marker; } virtual const std::string& get_bucket_id() const override { return info.bucket.bucket_id; } virtual size_t get_size() const override { return ent.size; } virtual size_t get_size_rounded() const override { return ent.size_rounded; } virtual uint64_t get_count() const override { return ent.count; } virtual rgw_placement_rule& get_placement_rule() override { return info.placement_rule; } virtual ceph::real_time& get_creation_time() override { return info.creation_time; } virtual ceph::real_time& get_modification_time() override { return mtime; } virtual obj_version& get_version() override { return bucket_version; } virtual void set_version(obj_version &ver) override { bucket_version = ver; } virtual bool versioned() override { return info.versioned(); } virtual bool versioning_enabled() override { return info.versioning_enabled(); } virtual rgw_bucket& get_key() override { return info.bucket; } virtual RGWBucketInfo& get_info() override { return info; } virtual void print(std::ostream& out) const override { out << info.bucket; } virtual bool operator==(const Bucket& b) const override { if (typeid(*this) != typeid(b)) { return false; } const StoreBucket& sb = dynamic_cast<const StoreBucket&>(b); return (info.bucket.tenant == sb.info.bucket.tenant) && (info.bucket.name == sb.info.bucket.name) && (info.bucket.bucket_id == sb.info.bucket.bucket_id); } virtual bool operator!=(const Bucket& b) const override { if (typeid(*this) != typeid(b)) { return false; } const StoreBucket& sb = dynamic_cast<const StoreBucket&>(b); return (info.bucket.tenant != sb.info.bucket.tenant) || (info.bucket.name != sb.info.bucket.name) || (info.bucket.bucket_id != sb.info.bucket.bucket_id); } int read_topics(rgw_pubsub_bucket_topics& notifications, RGWObjVersionTracker* objv_tracker, optional_yield y, const DoutPrefixProvider *dpp) override {return 0;} int write_topics(const rgw_pubsub_bucket_topics& notifications, RGWObjVersionTracker* objv_tracker, optional_yield y, const DoutPrefixProvider *dpp) override {return 0;} int remove_topics(RGWObjVersionTracker* objv_tracker, optional_yield y, const DoutPrefixProvider *dpp) override {return 0;} friend class BucketList; protected: virtual void set_ent(RGWBucketEnt& _ent) { ent = _ent; info.bucket = ent.bucket; info.placement_rule = ent.placement_rule; } }; class StoreObject : public Object { protected: RGWObjState state; Bucket* bucket = nullptr; bool delete_marker{false}; public: StoreObject() = default; StoreObject(const rgw_obj_key& _k) { state.obj.key = _k; } StoreObject(const rgw_obj_key& _k, Bucket* _b) : bucket(_b) { state.obj.init(_b->get_key(), _k); } StoreObject(const StoreObject& _o) = default; virtual ~StoreObject() = default; virtual void set_atomic() override { state.is_atomic = true; } virtual bool is_atomic() override { return state.is_atomic; } virtual void set_prefetch_data() override { state.prefetch_data = true; } virtual bool is_prefetch_data() override { return state.prefetch_data; } virtual void set_compressed() override { state.compressed = true; } virtual bool is_compressed() override { return state.compressed; } virtual void invalidate() override { rgw_obj obj = state.obj; bool is_atomic = state.is_atomic; bool prefetch_data = state.prefetch_data; bool compressed = state.compressed; state = RGWObjState(); state.obj = obj; state.is_atomic = is_atomic; state.prefetch_data = prefetch_data; state.compressed = compressed; } virtual bool empty() const override { return state.obj.empty(); } virtual const std::string &get_name() const override { return state.obj.key.name; } virtual void set_obj_state(RGWObjState& _state) override { state = _state; } virtual Attrs& get_attrs(void) override { return state.attrset; } virtual const Attrs& get_attrs(void) const override { return state.attrset; } virtual int set_attrs(Attrs a) override { state.attrset = a; state.has_attrs = true; return 0; } virtual bool has_attrs(void) override { return state.has_attrs; } virtual ceph::real_time get_mtime(void) const override { return state.mtime; } virtual uint64_t get_obj_size(void) const override { return state.size; } virtual Bucket* get_bucket(void) const override { return bucket; } virtual void set_bucket(Bucket* b) override { bucket = b; state.obj.bucket = b->get_key(); } virtual std::string get_hash_source(void) override { return state.obj.index_hash_source; } virtual void set_hash_source(std::string s) override { state.obj.index_hash_source = s; } virtual std::string get_oid(void) const override { return state.obj.key.get_oid(); } virtual bool get_delete_marker(void) override { return delete_marker; } virtual bool get_in_extra_data(void) override { return state.obj.is_in_extra_data(); } virtual void set_in_extra_data(bool i) override { state.obj.set_in_extra_data(i); } int range_to_ofs(uint64_t obj_size, int64_t &ofs, int64_t &end); virtual void set_obj_size(uint64_t s) override { state.size = s; } virtual void set_name(const std::string& n) override { state.obj.key = n; } virtual void set_key(const rgw_obj_key& k) override { state.obj.key = k; } virtual rgw_obj get_obj(void) const override { return state.obj; } virtual rgw_obj_key& get_key() override { return state.obj.key; } virtual void set_instance(const std::string &i) override { state.obj.key.set_instance(i); } virtual const std::string &get_instance() const override { return state.obj.key.instance; } virtual bool have_instance(void) override { return state.obj.key.have_instance(); } virtual void clear_instance() override { state.obj.key.instance.clear(); } virtual int transition_to_cloud(Bucket* bucket, rgw::sal::PlacementTier* tier, rgw_bucket_dir_entry& o, std::set<std::string>& cloud_targets, CephContext* cct, bool update_object, const DoutPrefixProvider* dpp, optional_yield y) override { /* Return failure here, so stores which don't transition to cloud will * work with lifecycle */ return -1; } virtual int get_torrent_info(const DoutPrefixProvider* dpp, optional_yield y, bufferlist& bl) override { const auto& attrs = get_attrs(); if (auto i = attrs.find(RGW_ATTR_TORRENT); i != attrs.end()) { bl = i->second; return 0; } return -ENOENT; } virtual void print(std::ostream& out) const override { if (bucket) out << bucket << ":"; out << state.obj.key; } }; class StoreMultipartPart : public MultipartPart { protected: std::string oid; public: StoreMultipartPart() = default; virtual ~StoreMultipartPart() = default; }; class StoreMultipartUpload : public MultipartUpload { protected: Bucket* bucket; std::map<uint32_t, std::unique_ptr<MultipartPart>> parts; jspan_context trace_ctx{false, false}; public: StoreMultipartUpload(Bucket* _bucket) : bucket(_bucket) {} virtual ~StoreMultipartUpload() = default; virtual std::map<uint32_t, std::unique_ptr<MultipartPart>>& get_parts() override { return parts; } virtual const jspan_context& get_trace() override { return trace_ctx; } virtual void print(std::ostream& out) const override { out << get_meta(); if (!get_upload_id().empty()) out << ":" << get_upload_id(); } }; class StoreMPSerializer : public MPSerializer { protected: bool locked; std::string oid; public: StoreMPSerializer() : locked(false) {} StoreMPSerializer(std::string _oid) : locked(false), oid(_oid) {} virtual ~StoreMPSerializer() = default; virtual void clear_locked() override { locked = false; } virtual bool is_locked() override { return locked; } virtual void print(std::ostream& out) const override { out << oid; } }; class StoreLCSerializer : public LCSerializer { protected: std::string oid; public: StoreLCSerializer() {} StoreLCSerializer(std::string _oid) : oid(_oid) {} virtual ~StoreLCSerializer() = default; virtual void print(std::ostream& out) const override { out << oid; } }; class StoreLifecycle : public Lifecycle { public: struct StoreLCHead : LCHead { time_t start_date{0}; time_t shard_rollover_date{0}; std::string marker; StoreLCHead() = default; StoreLCHead(time_t _start_date, time_t _rollover_date, std::string& _marker) : start_date(_start_date), shard_rollover_date(_rollover_date), marker(_marker) {} StoreLCHead& operator=(LCHead& _h) { start_date = _h.get_start_date(); shard_rollover_date = _h.get_shard_rollover_date(); marker = _h.get_marker(); return *this; } virtual time_t& get_start_date() override { return start_date; } virtual void set_start_date(time_t _date) override { start_date = _date; } virtual std::string& get_marker() override { return marker; } virtual void set_marker(const std::string& _marker) override { marker = _marker; } virtual time_t& get_shard_rollover_date() override { return shard_rollover_date; } virtual void set_shard_rollover_date(time_t _date) override { shard_rollover_date = _date; } }; struct StoreLCEntry : LCEntry { std::string bucket; std::string oid; uint64_t start_time{0}; uint32_t status{0}; StoreLCEntry() = default; StoreLCEntry(std::string& _bucket, uint64_t _time, uint32_t _status) : bucket(_bucket), start_time(_time), status(_status) {} StoreLCEntry(std::string& _bucket, std::string _oid, uint64_t _time, uint32_t _status) : bucket(_bucket), oid(_oid), start_time(_time), status(_status) {} StoreLCEntry(const StoreLCEntry& _e) = default; StoreLCEntry& operator=(LCEntry& _e) { bucket = _e.get_bucket(); oid = _e.get_oid(); start_time = _e.get_start_time(); status = _e.get_status(); return *this; } virtual std::string& get_bucket() override { return bucket; } virtual void set_bucket(const std::string& _bucket) override { bucket = _bucket; } virtual std::string& get_oid() override { return oid; } virtual void set_oid(const std::string& _oid) override { oid = _oid; } virtual uint64_t get_start_time() override { return start_time; } virtual void set_start_time(uint64_t _time) override { start_time = _time; } virtual uint32_t get_status() override { return status; } virtual void set_status(uint32_t _status) override { status = _status; } virtual void print(std::ostream& out) const override { out << bucket << ":" << oid << ":" << start_time << ":" << status; } }; StoreLifecycle() = default; virtual ~StoreLifecycle() = default; virtual std::unique_ptr<LCEntry> get_entry() override { return std::make_unique<StoreLCEntry>(); } using Lifecycle::get_entry; }; class StoreNotification : public Notification { protected: Object* obj; Object* src_obj; rgw::notify::EventType event_type; public: StoreNotification(Object* _obj, Object* _src_obj, rgw::notify::EventType _type) : obj(_obj), src_obj(_src_obj), event_type(_type) {} virtual ~StoreNotification() = default; }; class StoreWriter : public Writer { protected: const DoutPrefixProvider* dpp; public: StoreWriter(const DoutPrefixProvider *_dpp, optional_yield y) : dpp(_dpp) {} virtual ~StoreWriter() = default; }; class StorePlacementTier : public PlacementTier { public: virtual ~StorePlacementTier() = default; }; class StoreZoneGroup : public ZoneGroup { public: virtual ~StoreZoneGroup() = default; }; class StoreZone : public Zone { public: virtual ~StoreZone() = default; }; class StoreLuaManager : public LuaManager { public: virtual ~StoreLuaManager() = default; }; } } // namespace rgw::sal
17,406
37.941834
163
h
null
ceph-main/src/rgw/rgw_signal.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab ft=cpp /* * Ceph - scalable distributed file system * * Copyright (C) 2022 Red Hat, Inc * * This is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License version 2.1, as published by the Free Software * Foundation. See file COPYING. * */ #include "rgw_signal.h" #include "global/signal_handler.h" #include "common/safe_io.h" #include "common/errno.h" #include "rgw_main.h" #include "rgw_log.h" #ifdef HAVE_SYS_PRCTL_H #include <sys/prctl.h> #endif #define dout_subsys ceph_subsys_rgw #define dout_context g_ceph_context static int signal_fd[2] = {0, 0}; namespace rgw { namespace signal { void sighup_handler(int signum) { if (rgw::AppMain::ops_log_file != nullptr) { rgw::AppMain::ops_log_file->reopen(); } g_ceph_context->reopen_logs(); } /* sighup_handler */ void signal_shutdown() { int val = 0; int ret = write(signal_fd[0], (char *)&val, sizeof(val)); if (ret < 0) { derr << "ERROR: " << __func__ << ": write() returned " << cpp_strerror(errno) << dendl; } } /* signal_shutdown */ void wait_shutdown() { int val; int r = safe_read_exact(signal_fd[1], &val, sizeof(val)); if (r < 0) { derr << "safe_read_exact returned with error" << dendl; } } /* wait_shutdown */ int signal_fd_init() { return socketpair(AF_UNIX, SOCK_STREAM, 0, signal_fd); } /* signal_fd_init */ void signal_fd_finalize() { close(signal_fd[0]); close(signal_fd[1]); } /* signal_fd_finalize */ void handle_sigterm(int signum) { dout(1) << __func__ << dendl; // send a signal to make fcgi's accept(2) wake up. unfortunately the // initial signal often isn't sufficient because we race with accept's // check of the flag wet by ShutdownPending() above. if (signum != SIGUSR1) { signal_shutdown(); // safety net in case we get stuck doing an orderly shutdown. uint64_t secs = g_ceph_context->_conf->rgw_exit_timeout_secs; if (secs) alarm(secs); dout(1) << __func__ << " set alarm for " << secs << dendl; } } /* handle_sigterm */ }} /* namespace rgw::signal */
2,217
23.108696
72
cc
null
ceph-main/src/rgw/rgw_signal.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab ft=cpp /* * Ceph - scalable distributed file system * * Copyright (C) 2022 Red Hat, Inc * * This is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License version 2.1, as published by the Free Software * Foundation. See file COPYING. * */ #pragma once namespace rgw { namespace signal { void signal_shutdown(); void wait_shutdown(); int signal_fd_init(); void signal_fd_finalize(); void handle_sigterm(int signum); void handle_sigterm(int signum); void sighup_handler(int signum); } // namespace signal } // namespace rgw
699
20.875
70
h
null
ceph-main/src/rgw/rgw_string.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab ft=cpp #include "rgw_string.h" static bool char_eq(char c1, char c2) { return c1 == c2; } static bool ci_char_eq(char c1, char c2) { return tolower(c1) == tolower(c2); } bool match_wildcards(std::string_view pattern, std::string_view input, uint32_t flags) { const auto eq = (flags & MATCH_CASE_INSENSITIVE) ? &ci_char_eq : &char_eq; auto it1 = pattern.begin(); auto it2 = input.begin(); while (true) { if (it1 == pattern.end()) return it2 == input.end(); if (*it1 == '*') { if (it1 + 1 == pattern.end()) return true; if (it2 == input.end() || eq(*(it1 + 1), *it2)) ++it1; else ++it2; continue; } if (it2 == input.end()) return false; if (*it1 == '?' || eq(*it1, *it2)) { ++it1; ++it2; continue; } return false; } return false; }
976
20.23913
76
cc
null
ceph-main/src/rgw/rgw_string.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab ft=cpp #pragma once #include <errno.h> #include <stdlib.h> #include <limits.h> #include <string_view> #include <string> #include <stdexcept> #include <boost/container/small_vector.hpp> struct ltstr_nocase { bool operator()(const std::string& s1, const std::string& s2) const { return strcasecmp(s1.c_str(), s2.c_str()) < 0; } }; static inline int stringcasecmp(const std::string& s1, const std::string& s2) { return strcasecmp(s1.c_str(), s2.c_str()); } static inline int stringcasecmp(const std::string& s1, const char *s2) { return strcasecmp(s1.c_str(), s2); } static inline int stringcasecmp(const std::string& s1, int ofs, int size, const std::string& s2) { return strncasecmp(s1.c_str() + ofs, s2.c_str(), size); } static inline int stringtoll(const std::string& s, int64_t *val) { char *end; long long result = strtoll(s.c_str(), &end, 10); if (result == LLONG_MAX) return -EINVAL; if (*end) return -EINVAL; *val = (int64_t)result; return 0; } static inline int stringtoull(const std::string& s, uint64_t *val) { char *end; unsigned long long result = strtoull(s.c_str(), &end, 10); if (result == ULLONG_MAX) return -EINVAL; if (*end) return -EINVAL; *val = (uint64_t)result; return 0; } static inline int stringtol(const std::string& s, int32_t *val) { char *end; long result = strtol(s.c_str(), &end, 10); if (result == LONG_MAX) return -EINVAL; if (*end) return -EINVAL; *val = (int32_t)result; return 0; } static inline int stringtoul(const std::string& s, uint32_t *val) { char *end; unsigned long result = strtoul(s.c_str(), &end, 10); if (result == ULONG_MAX) return -EINVAL; if (*end) return -EINVAL; *val = (uint32_t)result; return 0; } /* A converter between std::string_view and null-terminated C-strings. * It copies memory while trying to utilize the local memory instead of * issuing dynamic allocations. */ template<std::size_t N = 128> static inline boost::container::small_vector<char, N> sview2cstr(const std::string_view& sv) { boost::container::small_vector<char, N> cstr; cstr.reserve(sv.size() + sizeof('\0')); cstr.assign(std::begin(sv), std::end(sv)); cstr.push_back('\0'); return cstr; } /* std::strlen() isn't guaranteed to be computable at compile-time. Although * newer GCCs actually do that, Clang doesn't. Please be aware this function * IS NOT A DROP-IN REPLACEMENT FOR STRLEN -- it returns a different result * for strings having \0 in the middle. */ template<size_t N> static inline constexpr size_t sarrlen(const char (&arr)[N]) { return N - 1; } namespace detail { // variadic sum() to add up string lengths for reserve() static inline constexpr size_t sum() { return 0; } template <typename... Args> constexpr size_t sum(size_t v, Args... args) { return v + sum(args...); } // traits for string_size() template <typename T> struct string_traits { static constexpr size_t size(const T& s) { return s.size(); } }; // specializations for char*/const char* use strlen() template <> struct string_traits<const char*> { static size_t size(const char* s) { return std::strlen(s); } }; template <> struct string_traits<char*> : string_traits<const char*> {}; // constexpr specializations for char[]/const char[] template <std::size_t N> struct string_traits<const char[N]> { static constexpr size_t size_(const char* s, size_t i) { return i < N ? (*(s + i) == '\0' ? i : size_(s, i + 1)) : throw std::invalid_argument("Unterminated string constant."); } static constexpr size_t size(const char(&s)[N]) { return size_(s, 0); } }; template <std::size_t N> struct string_traits<char[N]> : string_traits<const char[N]> {}; // helpers for string_cat_reserve() static inline void append_to(std::string& s) {} template <typename... Args> void append_to(std::string& s, const std::string_view& v, const Args&... args) { s.append(v.begin(), v.end()); append_to(s, args...); } // helpers for string_join_reserve() static inline void join_next(std::string& s, const std::string_view& d) {} template <typename... Args> void join_next(std::string& s, const std::string_view& d, const std::string_view& v, const Args&... args) { s.append(d.begin(), d.end()); s.append(v.begin(), v.end()); join_next(s, d, args...); } static inline void join(std::string& s, const std::string_view& d) {} template <typename... Args> void join(std::string& s, const std::string_view& d, const std::string_view& v, const Args&... args) { s.append(v.begin(), v.end()); join_next(s, d, args...); } } // namespace detail /// return the length of a c string, string literal, or string type template <typename T> constexpr size_t string_size(const T& s) { return detail::string_traits<T>::size(s); } /// concatenates the given string arguments, returning as a std::string that /// gets preallocated with reserve() template <typename... Args> std::string string_cat_reserve(const Args&... args) { size_t total_size = detail::sum(string_size(args)...); std::string result; result.reserve(total_size); detail::append_to(result, args...); return result; } /// joins the given string arguments with a delimiter, returning as a /// std::string that gets preallocated with reserve() template <typename... Args> std::string string_join_reserve(const std::string_view& delim, const Args&... args) { size_t delim_size = delim.size() * std::max<ssize_t>(0, sizeof...(args) - 1); size_t total_size = detail::sum(string_size(args)...) + delim_size; std::string result; result.reserve(total_size); detail::join(result, delim, args...); return result; } template <typename... Args> std::string string_join_reserve(char delim, const Args&... args) { return string_join_reserve(std::string_view{&delim, 1}, args...); } /// use case-insensitive comparison in match_wildcards() static constexpr uint32_t MATCH_CASE_INSENSITIVE = 0x01; /// attempt to match the given input string with the pattern, which may contain /// the wildcard characters * and ? extern bool match_wildcards(std::string_view pattern, std::string_view input, uint32_t flags = 0);
6,357
25.940678
96
h
null
ceph-main/src/rgw/rgw_sts.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab ft=cpp #include <errno.h> #include <ctime> #include <regex> #include <boost/format.hpp> #include <boost/algorithm/string/replace.hpp> #include "common/errno.h" #include "common/Formatter.h" #include "common/ceph_json.h" #include "common/ceph_time.h" #include "auth/Crypto.h" #include "include/ceph_fs.h" #include "common/iso_8601.h" #include "include/types.h" #include "rgw_string.h" #include "rgw_b64.h" #include "rgw_common.h" #include "rgw_tools.h" #include "rgw_role.h" #include "rgw_user.h" #include "rgw_iam_policy.h" #include "rgw_sts.h" #include "rgw_sal.h" #include "rgw_sal_rados.h" #define dout_subsys ceph_subsys_rgw using namespace std; namespace STS { void Credentials::dump(Formatter *f) const { encode_json("AccessKeyId", accessKeyId , f); encode_json("Expiration", expiration , f); encode_json("SecretAccessKey", secretAccessKey , f); encode_json("SessionToken", sessionToken , f); } int Credentials::generateCredentials(const DoutPrefixProvider *dpp, CephContext* cct, const uint64_t& duration, const boost::optional<std::string>& policy, const boost::optional<std::string>& roleId, const boost::optional<std::string>& role_session, const boost::optional<std::vector<std::string>>& token_claims, const boost::optional<std::vector<std::pair<std::string,std::string>>>& session_princ_tags, boost::optional<rgw_user> user, rgw::auth::Identity* identity) { uuid_d accessKey, secretKey; char accessKeyId_str[MAX_ACCESS_KEY_LEN], secretAccessKey_str[MAX_SECRET_KEY_LEN]; //AccessKeyId gen_rand_alphanumeric_plain(cct, accessKeyId_str, sizeof(accessKeyId_str)); accessKeyId = accessKeyId_str; //SecretAccessKey gen_rand_alphanumeric_upper(cct, secretAccessKey_str, sizeof(secretAccessKey_str)); secretAccessKey = secretAccessKey_str; //Expiration real_clock::time_point t = real_clock::now(); real_clock::time_point exp = t + std::chrono::seconds(duration); expiration = ceph::to_iso_8601(exp); //Session Token - Encrypt using AES auto* cryptohandler = cct->get_crypto_handler(CEPH_CRYPTO_AES); if (! cryptohandler) { ldpp_dout(dpp, 0) << "ERROR: No AES cryto handler found !" << dendl; return -EINVAL; } string secret_s = cct->_conf->rgw_sts_key; buffer::ptr secret(secret_s.c_str(), secret_s.length()); int ret = 0; if (ret = cryptohandler->validate_secret(secret); ret < 0) { ldpp_dout(dpp, 0) << "ERROR: Invalid rgw sts key, please ensure its length is 16" << dendl; return ret; } string error; std::unique_ptr<CryptoKeyHandler> keyhandler(cryptohandler->get_key_handler(secret, error)); if (! keyhandler) { ldpp_dout(dpp, 0) << "ERROR: No Key handler found !" << dendl; return -EINVAL; } error.clear(); //Storing policy and roleId as part of token, so that they can be extracted // from the token itself for policy evaluation. SessionToken token; //authentication info token.access_key_id = accessKeyId; token.secret_access_key = secretAccessKey; token.expiration = expiration; token.issued_at = ceph::to_iso_8601(t); //Authorization info if (policy) token.policy = *policy; else token.policy = {}; if (roleId) token.roleId = *roleId; else token.roleId = {}; if (user) token.user = *user; else { rgw_user u({}, {}, {}); token.user = u; } if (token_claims) { token.token_claims = std::move(*token_claims); } if (identity) { token.acct_name = identity->get_acct_name(); token.perm_mask = identity->get_perm_mask(); token.is_admin = identity->is_admin_of(token.user); token.acct_type = identity->get_identity_type(); } else { token.acct_name = {}; token.perm_mask = 0; token.is_admin = 0; token.acct_type = TYPE_ROLE; token.role_session = role_session.get(); } if (session_princ_tags) { token.principal_tags = std::move(*session_princ_tags); } buffer::list input, enc_output; encode(token, input); if (ret = keyhandler->encrypt(input, enc_output, &error); ret < 0) { ldpp_dout(dpp, 0) << "ERROR: Encrypting session token returned an error !" << dendl; return ret; } bufferlist encoded_op; enc_output.encode_base64(encoded_op); encoded_op.append('\0'); sessionToken = encoded_op.c_str(); return ret; } void AssumedRoleUser::dump(Formatter *f) const { encode_json("Arn", arn , f); encode_json("AssumeRoleId", assumeRoleId , f); } int AssumedRoleUser::generateAssumedRoleUser(CephContext* cct, rgw::sal::Driver* driver, const string& roleId, const rgw::ARN& roleArn, const string& roleSessionName) { string resource = std::move(roleArn.resource); boost::replace_first(resource, "role", "assumed-role"); resource.append("/"); resource.append(roleSessionName); rgw::ARN assumed_role_arn(rgw::Partition::aws, rgw::Service::sts, "", roleArn.account, resource); arn = assumed_role_arn.to_string(); //Assumeroleid = roleid:rolesessionname assumeRoleId = roleId + ":" + roleSessionName; return 0; } AssumeRoleRequestBase::AssumeRoleRequestBase( CephContext* cct, const string& duration, const string& iamPolicy, const string& roleArn, const string& roleSessionName) : cct(cct), iamPolicy(iamPolicy), roleArn(roleArn), roleSessionName(roleSessionName) { MIN_DURATION_IN_SECS = cct->_conf->rgw_sts_min_session_duration; if (duration.empty()) { this->duration = DEFAULT_DURATION_IN_SECS; } else { this->duration = strict_strtoll(duration.c_str(), 10, &this->err_msg); } } int AssumeRoleRequestBase::validate_input(const DoutPrefixProvider *dpp) const { if (!err_msg.empty()) { ldpp_dout(dpp, 0) << "ERROR: error message is empty !" << dendl; return -EINVAL; } if (duration < MIN_DURATION_IN_SECS || duration > MAX_DURATION_IN_SECS) { ldpp_dout(dpp, 0) << "ERROR: Incorrect value of duration: " << duration << dendl; return -EINVAL; } if (! iamPolicy.empty() && (iamPolicy.size() < MIN_POLICY_SIZE || iamPolicy.size() > MAX_POLICY_SIZE)) { ldpp_dout(dpp, 0) << "ERROR: Incorrect size of iamPolicy: " << iamPolicy.size() << dendl; return -ERR_PACKED_POLICY_TOO_LARGE; } if (! roleArn.empty() && (roleArn.size() < MIN_ROLE_ARN_SIZE || roleArn.size() > MAX_ROLE_ARN_SIZE)) { ldpp_dout(dpp, 0) << "ERROR: Incorrect size of roleArn: " << roleArn.size() << dendl; return -EINVAL; } if (! roleSessionName.empty()) { if (roleSessionName.size() < MIN_ROLE_SESSION_SIZE || roleSessionName.size() > MAX_ROLE_SESSION_SIZE) { ldpp_dout(dpp, 0) << "ERROR: Either role session name is empty or role session size is incorrect: " << roleSessionName.size() << dendl; return -EINVAL; } std::regex regex_roleSession("[A-Za-z0-9_=,.@-]+"); if (! std::regex_match(roleSessionName, regex_roleSession)) { ldpp_dout(dpp, 0) << "ERROR: Role session name is incorrect: " << roleSessionName << dendl; return -EINVAL; } } return 0; } int AssumeRoleWithWebIdentityRequest::validate_input(const DoutPrefixProvider *dpp) const { if (! providerId.empty()) { if (providerId.length() < MIN_PROVIDER_ID_LEN || providerId.length() > MAX_PROVIDER_ID_LEN) { ldpp_dout(dpp, 0) << "ERROR: Either provider id is empty or provider id length is incorrect: " << providerId.length() << dendl; return -EINVAL; } } return AssumeRoleRequestBase::validate_input(dpp); } int AssumeRoleRequest::validate_input(const DoutPrefixProvider *dpp) const { if (! externalId.empty()) { if (externalId.length() < MIN_EXTERNAL_ID_LEN || externalId.length() > MAX_EXTERNAL_ID_LEN) { ldpp_dout(dpp, 0) << "ERROR: Either external id is empty or external id length is incorrect: " << externalId.length() << dendl; return -EINVAL; } std::regex regex_externalId("[A-Za-z0-9_=,.@:/-]+"); if (! std::regex_match(externalId, regex_externalId)) { ldpp_dout(dpp, 0) << "ERROR: Invalid external Id: " << externalId << dendl; return -EINVAL; } } if (! serialNumber.empty()){ if (serialNumber.size() < MIN_SERIAL_NUMBER_SIZE || serialNumber.size() > MAX_SERIAL_NUMBER_SIZE) { ldpp_dout(dpp, 0) << "Either serial number is empty or serial number length is incorrect: " << serialNumber.size() << dendl; return -EINVAL; } std::regex regex_serialNumber("[A-Za-z0-9_=/:,.@-]+"); if (! std::regex_match(serialNumber, regex_serialNumber)) { ldpp_dout(dpp, 0) << "Incorrect serial number: " << serialNumber << dendl; return -EINVAL; } } if (! tokenCode.empty() && tokenCode.size() == TOKEN_CODE_SIZE) { ldpp_dout(dpp, 0) << "Either token code is empty or token code size is invalid: " << tokenCode.size() << dendl; return -EINVAL; } return AssumeRoleRequestBase::validate_input(dpp); } std::tuple<int, rgw::sal::RGWRole*> STSService::getRoleInfo(const DoutPrefixProvider *dpp, const string& arn, optional_yield y) { if (auto r_arn = rgw::ARN::parse(arn); r_arn) { auto pos = r_arn->resource.find_last_of('/'); string roleName = r_arn->resource.substr(pos + 1); std::unique_ptr<rgw::sal::RGWRole> role = driver->get_role(roleName, r_arn->account); if (int ret = role->get(dpp, y); ret < 0) { if (ret == -ENOENT) { ldpp_dout(dpp, 0) << "Role doesn't exist: " << roleName << dendl; ret = -ERR_NO_ROLE_FOUND; } return make_tuple(ret, nullptr); } else { auto path_pos = r_arn->resource.find('/'); string path; if (path_pos == pos) { path = "/"; } else { path = r_arn->resource.substr(path_pos, ((pos - path_pos) + 1)); } string r_path = role->get_path(); if (path != r_path) { ldpp_dout(dpp, 0) << "Invalid Role ARN: Path in ARN does not match with the role path: " << path << " " << r_path << dendl; return make_tuple(-EACCES, nullptr); } this->role = std::move(role); return make_tuple(0, this->role.get()); } } else { ldpp_dout(dpp, 0) << "Invalid role arn: " << arn << dendl; return make_tuple(-EINVAL, nullptr); } } AssumeRoleWithWebIdentityResponse STSService::assumeRoleWithWebIdentity(const DoutPrefixProvider *dpp, AssumeRoleWithWebIdentityRequest& req) { AssumeRoleWithWebIdentityResponse response; response.assumeRoleResp.packedPolicySize = 0; std::vector<string> token_claims; if (req.getProviderId().empty()) { response.providerId = req.getIss(); } response.aud = req.getAud(); response.sub = req.getSub(); token_claims.emplace_back(string("iss") + ":" + req.getIss()); token_claims.emplace_back(string("aud") + ":" + req.getAud()); token_claims.emplace_back(string("sub") + ":" + req.getSub()); //Get the role info which is being assumed boost::optional<rgw::ARN> r_arn = rgw::ARN::parse(req.getRoleARN()); if (r_arn == boost::none) { ldpp_dout(dpp, 0) << "Error in parsing role arn: " << req.getRoleARN() << dendl; response.assumeRoleResp.retCode = -EINVAL; return response; } string roleId = role->get_id(); uint64_t roleMaxSessionDuration = role->get_max_session_duration(); req.setMaxDuration(roleMaxSessionDuration); //Validate input response.assumeRoleResp.retCode = req.validate_input(dpp); if (response.assumeRoleResp.retCode < 0) { return response; } //Calculate PackedPolicySize string policy = req.getPolicy(); response.assumeRoleResp.packedPolicySize = (policy.size() / req.getMaxPolicySize()) * 100; //Generate Assumed Role User response.assumeRoleResp.retCode = response.assumeRoleResp.user.generateAssumedRoleUser(cct, driver, roleId, r_arn.get(), req.getRoleSessionName()); if (response.assumeRoleResp.retCode < 0) { return response; } //Generate Credentials //Role and Policy provide the authorization info, user id and applier info are not needed response.assumeRoleResp.retCode = response.assumeRoleResp.creds.generateCredentials(dpp, cct, req.getDuration(), req.getPolicy(), roleId, req.getRoleSessionName(), token_claims, req.getPrincipalTags(), user_id, nullptr); if (response.assumeRoleResp.retCode < 0) { return response; } response.assumeRoleResp.retCode = 0; return response; } AssumeRoleResponse STSService::assumeRole(const DoutPrefixProvider *dpp, AssumeRoleRequest& req, optional_yield y) { AssumeRoleResponse response; response.packedPolicySize = 0; //Get the role info which is being assumed boost::optional<rgw::ARN> r_arn = rgw::ARN::parse(req.getRoleARN()); if (r_arn == boost::none) { ldpp_dout(dpp, 0) << "Error in parsing role arn: " << req.getRoleARN() << dendl; response.retCode = -EINVAL; return response; } string roleId = role->get_id(); uint64_t roleMaxSessionDuration = role->get_max_session_duration(); req.setMaxDuration(roleMaxSessionDuration); //Validate input response.retCode = req.validate_input(dpp); if (response.retCode < 0) { return response; } //Calculate PackedPolicySize string policy = req.getPolicy(); response.packedPolicySize = (policy.size() / req.getMaxPolicySize()) * 100; //Generate Assumed Role User response.retCode = response.user.generateAssumedRoleUser(cct, driver, roleId, r_arn.get(), req.getRoleSessionName()); if (response.retCode < 0) { return response; } //Generate Credentials //Role and Policy provide the authorization info, user id and applier info are not needed response.retCode = response.creds.generateCredentials(dpp, cct, req.getDuration(), req.getPolicy(), roleId, req.getRoleSessionName(), boost::none, boost::none, user_id, nullptr); if (response.retCode < 0) { return response; } response.retCode = 0; return response; } GetSessionTokenRequest::GetSessionTokenRequest(const string& duration, const string& serialNumber, const string& tokenCode) { if (duration.empty()) { this->duration = DEFAULT_DURATION_IN_SECS; } else { this->duration = stoull(duration); } this->serialNumber = serialNumber; this->tokenCode = tokenCode; } GetSessionTokenResponse STSService::getSessionToken(const DoutPrefixProvider *dpp, GetSessionTokenRequest& req) { int ret; Credentials cred; //Generate Credentials if (ret = cred.generateCredentials(dpp, cct, req.getDuration(), boost::none, boost::none, boost::none, boost::none, boost::none, user_id, identity); ret < 0) { return make_tuple(ret, cred); } return make_tuple(0, cred); } }
16,619
34.361702
141
cc
null
ceph-main/src/rgw/rgw_sts.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab ft=cpp #pragma once #include "rgw_role.h" #include "rgw_auth.h" #include "rgw_web_idp.h" namespace STS { class AssumeRoleRequestBase { protected: static constexpr uint64_t MIN_POLICY_SIZE = 1; static constexpr uint64_t MAX_POLICY_SIZE = 2048; static constexpr uint64_t DEFAULT_DURATION_IN_SECS = 3600; static constexpr uint64_t MIN_ROLE_ARN_SIZE = 2; static constexpr uint64_t MAX_ROLE_ARN_SIZE = 2048; static constexpr uint64_t MIN_ROLE_SESSION_SIZE = 2; static constexpr uint64_t MAX_ROLE_SESSION_SIZE = 64; uint64_t MIN_DURATION_IN_SECS; uint64_t MAX_DURATION_IN_SECS; CephContext* cct; uint64_t duration; std::string err_msg; std::string iamPolicy; std::string roleArn; std::string roleSessionName; public: AssumeRoleRequestBase(CephContext* cct, const std::string& duration, const std::string& iamPolicy, const std::string& roleArn, const std::string& roleSessionName); const std::string& getRoleARN() const { return roleArn; } const std::string& getRoleSessionName() const { return roleSessionName; } const std::string& getPolicy() const {return iamPolicy; } static const uint64_t& getMaxPolicySize() { return MAX_POLICY_SIZE; } void setMaxDuration(const uint64_t& maxDuration) { MAX_DURATION_IN_SECS = maxDuration; } const uint64_t& getDuration() const { return duration; } int validate_input(const DoutPrefixProvider *dpp) const; }; class AssumeRoleWithWebIdentityRequest : public AssumeRoleRequestBase { static constexpr uint64_t MIN_PROVIDER_ID_LEN = 4; static constexpr uint64_t MAX_PROVIDER_ID_LEN = 2048; std::string providerId; std::string iamPolicy; std::string iss; std::string sub; std::string aud; std::vector<std::pair<std::string,std::string>> session_princ_tags; public: AssumeRoleWithWebIdentityRequest( CephContext* cct, const std::string& duration, const std::string& providerId, const std::string& iamPolicy, const std::string& roleArn, const std::string& roleSessionName, const std::string& iss, const std::string& sub, const std::string& aud, std::vector<std::pair<std::string,std::string>> session_princ_tags) : AssumeRoleRequestBase(cct, duration, iamPolicy, roleArn, roleSessionName), providerId(providerId), iss(iss), sub(sub), aud(aud), session_princ_tags(session_princ_tags) {} const std::string& getProviderId() const { return providerId; } const std::string& getIss() const { return iss; } const std::string& getAud() const { return aud; } const std::string& getSub() const { return sub; } const std::vector<std::pair<std::string,std::string>>& getPrincipalTags() const { return session_princ_tags; } int validate_input(const DoutPrefixProvider *dpp) const; }; class AssumeRoleRequest : public AssumeRoleRequestBase { static constexpr uint64_t MIN_EXTERNAL_ID_LEN = 2; static constexpr uint64_t MAX_EXTERNAL_ID_LEN = 1224; static constexpr uint64_t MIN_SERIAL_NUMBER_SIZE = 9; static constexpr uint64_t MAX_SERIAL_NUMBER_SIZE = 256; static constexpr uint64_t TOKEN_CODE_SIZE = 6; std::string externalId; std::string serialNumber; std::string tokenCode; public: AssumeRoleRequest(CephContext* cct, const std::string& duration, const std::string& externalId, const std::string& iamPolicy, const std::string& roleArn, const std::string& roleSessionName, const std::string& serialNumber, const std::string& tokenCode) : AssumeRoleRequestBase(cct, duration, iamPolicy, roleArn, roleSessionName), externalId(externalId), serialNumber(serialNumber), tokenCode(tokenCode){} int validate_input(const DoutPrefixProvider *dpp) const; }; class GetSessionTokenRequest { protected: static constexpr uint64_t MIN_DURATION_IN_SECS = 900; static constexpr uint64_t DEFAULT_DURATION_IN_SECS = 3600; uint64_t duration; std::string serialNumber; std::string tokenCode; public: GetSessionTokenRequest(const std::string& duration, const std::string& serialNumber, const std::string& tokenCode); const uint64_t& getDuration() const { return duration; } static const uint64_t& getMinDuration() { return MIN_DURATION_IN_SECS; } }; class AssumedRoleUser { std::string arn; std::string assumeRoleId; public: int generateAssumedRoleUser( CephContext* cct, rgw::sal::Driver* driver, const std::string& roleId, const rgw::ARN& roleArn, const std::string& roleSessionName); const std::string& getARN() const { return arn; } const std::string& getAssumeRoleId() const { return assumeRoleId; } void dump(Formatter *f) const; }; struct SessionToken { std::string access_key_id; std::string secret_access_key; std::string expiration; std::string policy; std::string roleId; rgw_user user; std::string acct_name; uint32_t perm_mask; bool is_admin; uint32_t acct_type; std::string role_session; std::vector<std::string> token_claims; std::string issued_at; std::vector<std::pair<std::string,std::string>> principal_tags; SessionToken() {} void encode(bufferlist& bl) const { ENCODE_START(5, 1, bl); encode(access_key_id, bl); encode(secret_access_key, bl); encode(expiration, bl); encode(policy, bl); encode(roleId, bl); encode(user, bl); encode(acct_name, bl); encode(perm_mask, bl); encode(is_admin, bl); encode(acct_type, bl); encode(role_session, bl); encode(token_claims, bl); encode(issued_at, bl); encode(principal_tags, bl); ENCODE_FINISH(bl); } void decode(bufferlist::const_iterator& bl) { DECODE_START(5, bl); decode(access_key_id, bl); decode(secret_access_key, bl); decode(expiration, bl); decode(policy, bl); decode(roleId, bl); decode(user, bl); decode(acct_name, bl); decode(perm_mask, bl); decode(is_admin, bl); decode(acct_type, bl); if (struct_v >= 2) { decode(role_session, bl); } if (struct_v >= 3) { decode(token_claims, bl); } if (struct_v >= 4) { decode(issued_at, bl); } if (struct_v >= 5) { decode(principal_tags, bl); } DECODE_FINISH(bl); } }; WRITE_CLASS_ENCODER(SessionToken) class Credentials { static constexpr int MAX_ACCESS_KEY_LEN = 20; static constexpr int MAX_SECRET_KEY_LEN = 40; std::string accessKeyId; std::string expiration; std::string secretAccessKey; std::string sessionToken; public: int generateCredentials(const DoutPrefixProvider *dpp, CephContext* cct, const uint64_t& duration, const boost::optional<std::string>& policy, const boost::optional<std::string>& roleId, const boost::optional<std::string>& role_session, const boost::optional<std::vector<std::string>>& token_claims, const boost::optional<std::vector<std::pair<std::string,std::string>>>& session_princ_tags, boost::optional<rgw_user> user, rgw::auth::Identity* identity); const std::string& getAccessKeyId() const { return accessKeyId; } const std::string& getExpiration() const { return expiration; } const std::string& getSecretAccessKey() const { return secretAccessKey; } const std::string& getSessionToken() const { return sessionToken; } void dump(Formatter *f) const; }; struct AssumeRoleResponse { int retCode; AssumedRoleUser user; Credentials creds; uint64_t packedPolicySize; }; struct AssumeRoleWithWebIdentityResponse { AssumeRoleResponse assumeRoleResp; std::string aud; std::string providerId; std::string sub; }; using AssumeRoleResponse = struct AssumeRoleResponse ; using GetSessionTokenResponse = std::tuple<int, Credentials>; using AssumeRoleWithWebIdentityResponse = struct AssumeRoleWithWebIdentityResponse; class STSService { CephContext* cct; rgw::sal::Driver* driver; rgw_user user_id; std::unique_ptr<rgw::sal::RGWRole> role; rgw::auth::Identity* identity; public: STSService() = default; STSService(CephContext* cct, rgw::sal::Driver* driver, rgw_user user_id, rgw::auth::Identity* identity) : cct(cct), driver(driver), user_id(user_id), identity(identity) {} std::tuple<int, rgw::sal::RGWRole*> getRoleInfo(const DoutPrefixProvider *dpp, const std::string& arn, optional_yield y); AssumeRoleResponse assumeRole(const DoutPrefixProvider *dpp, AssumeRoleRequest& req, optional_yield y); GetSessionTokenResponse getSessionToken(const DoutPrefixProvider *dpp, GetSessionTokenRequest& req); AssumeRoleWithWebIdentityResponse assumeRoleWithWebIdentity(const DoutPrefixProvider *dpp, AssumeRoleWithWebIdentityRequest& req); }; }
9,296
35.892857
132
h
null
ceph-main/src/rgw/rgw_swift_auth.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab ft=cpp #include <array> #include <algorithm> #include <string_view> #include <boost/container/static_vector.hpp> #include <boost/algorithm/string/predicate.hpp> #include <boost/algorithm/string.hpp> #include "rgw_swift_auth.h" #include "rgw_rest.h" #include "common/ceph_crypto.h" #include "common/Clock.h" #include "include/random.h" #include "rgw_client_io.h" #include "rgw_http_client.h" #include "rgw_sal_rados.h" #include "include/str_list.h" #define dout_context g_ceph_context #define dout_subsys ceph_subsys_rgw #define DEFAULT_SWIFT_PREFIX "/swift" using namespace std; using namespace ceph::crypto; namespace rgw { namespace auth { namespace swift { /* TempURL: applier */ void TempURLApplier::modify_request_state(const DoutPrefixProvider* dpp, req_state* s) const /* in/out */ { bool inline_exists = false; const std::string& filename = s->info.args.get("filename"); s->info.args.get("inline", &inline_exists); if (inline_exists) { s->content_disp.override = "inline"; } else if (!filename.empty()) { std::string fenc; url_encode(filename, fenc); s->content_disp.override = "attachment; filename=\"" + fenc + "\""; } else { std::string fenc; url_encode(s->object->get_name(), fenc); s->content_disp.fallback = "attachment; filename=\"" + fenc + "\""; } ldpp_dout(dpp, 20) << "finished applying changes to req_state for TempURL: " << " content_disp override " << s->content_disp.override << " content_disp fallback " << s->content_disp.fallback << dendl; } void TempURLApplier::write_ops_log_entry(rgw_log_entry& entry) const { LocalApplier::write_ops_log_entry(entry); entry.temp_url = true; } /* TempURL: engine */ bool TempURLEngine::is_applicable(const req_state* const s) const noexcept { return s->info.args.exists("temp_url_sig") || s->info.args.exists("temp_url_expires"); } void TempURLEngine::get_owner_info(const DoutPrefixProvider* dpp, const req_state* const s, RGWUserInfo& owner_info, optional_yield y) const { /* We cannot use req_state::bucket_name because it isn't available * now. It will be initialized in RGWHandler_REST_SWIFT::postauth_init(). */ const string& bucket_name = s->init_state.url_bucket; /* TempURL requires that bucket and object names are specified. */ if (bucket_name.empty() || s->object->empty()) { throw -EPERM; } /* TempURL case is completely different than the Keystone auth - you may * get account name only through extraction from URL. In turn, knowledge * about account is neccessary to obtain its bucket tenant. Without that, * the access would be limited to accounts with empty tenant. */ string bucket_tenant; if (!s->account_name.empty()) { bool found = false; std::unique_ptr<rgw::sal::User> user; rgw_user uid(s->account_name); if (uid.tenant.empty()) { rgw_user tenanted_uid(uid.id, uid.id); user = driver->get_user(tenanted_uid); if (user->load_user(dpp, s->yield) >= 0) { /* Succeeded */ found = true; } } if (!found) { user = driver->get_user(uid); if (user->load_user(dpp, s->yield) < 0) { throw -EPERM; } } bucket_tenant = user->get_tenant(); } rgw_bucket b; b.tenant = std::move(bucket_tenant); b.name = std::move(bucket_name); std::unique_ptr<rgw::sal::Bucket> bucket; int ret = driver->get_bucket(dpp, nullptr, b, &bucket, s->yield); if (ret < 0) { throw ret; } ldpp_dout(dpp, 20) << "temp url user (bucket owner): " << bucket->get_info().owner << dendl; std::unique_ptr<rgw::sal::User> user; user = driver->get_user(bucket->get_info().owner); if (user->load_user(dpp, s->yield) < 0) { throw -EPERM; } owner_info = user->get_info(); } std::string TempURLEngine::convert_from_iso8601(std::string expires) const { /* Swift's TempURL allows clients to send the expiration as ISO8601- * compatible strings. Though, only plain UNIX timestamp are taken * for the HMAC calculations. We need to make the conversion. */ struct tm date_t; if (!parse_iso8601(expires.c_str(), &date_t, nullptr, true)) { return expires; } else { return std::to_string(internal_timegm(&date_t)); } } bool TempURLEngine::is_expired(const std::string& expires) const { string err; const utime_t now = ceph_clock_now(); const uint64_t expiration = (uint64_t)strict_strtoll(expires.c_str(), 10, &err); if (!err.empty()) { dout(5) << "failed to parse temp_url_expires: " << err << dendl; return true; } if (expiration <= (uint64_t)now.sec()) { dout(5) << "temp url expired: " << expiration << " <= " << now.sec() << dendl; return true; } return false; } bool TempURLEngine::is_disallowed_header_present(const req_info& info) const { static const auto headers = { "HTTP_X_OBJECT_MANIFEST", }; return std::any_of(std::begin(headers), std::end(headers), [&info](const char* header) { return info.env->exists(header); }); } std::string extract_swift_subuser(const std::string& swift_user_name) { size_t pos = swift_user_name.find(':'); if (std::string::npos == pos) { return swift_user_name; } else { return swift_user_name.substr(pos + 1); } } class TempURLEngine::SignatureHelper { private: static constexpr uint32_t output_size = CEPH_CRYPTO_HMACSHA1_DIGESTSIZE * 2 + 1; unsigned char dest[CEPH_CRYPTO_HMACSHA1_DIGESTSIZE]; // 20 char dest_str[output_size]; public: SignatureHelper() = default; const char* calc(const std::string& key, const std::string_view& method, const std::string_view& path, const std::string& expires) { using ceph::crypto::HMACSHA1; using UCHARPTR = const unsigned char*; HMACSHA1 hmac((UCHARPTR) key.c_str(), key.size()); hmac.Update((UCHARPTR) method.data(), method.size()); hmac.Update((UCHARPTR) "\n", 1); hmac.Update((UCHARPTR) expires.c_str(), expires.size()); hmac.Update((UCHARPTR) "\n", 1); hmac.Update((UCHARPTR) path.data(), path.size()); hmac.Final(dest); buf_to_hex((UCHARPTR) dest, sizeof(dest), dest_str); return dest_str; } bool is_equal_to(const std::string& rhs) const { /* never allow out-of-range exception */ if (rhs.size() < (output_size - 1)) { return false; } return rhs.compare(0 /* pos */, output_size, dest_str) == 0; } }; /* TempURLEngine::SignatureHelper */ class TempURLEngine::PrefixableSignatureHelper : private TempURLEngine::SignatureHelper { using base_t = SignatureHelper; const std::string_view decoded_uri; const std::string_view object_name; std::string_view no_obj_uri; const boost::optional<const std::string&> prefix; public: PrefixableSignatureHelper(const std::string& _decoded_uri, const std::string& object_name, const boost::optional<const std::string&> prefix) : decoded_uri(_decoded_uri), object_name(object_name), prefix(prefix) { /* Transform: v1/acct/cont/obj - > v1/acct/cont/ * * NOTE(rzarzynski): we really want to substr() on std::string_view, * not std::string. Otherwise we would end with no_obj_uri referencing * a temporary. */ no_obj_uri = \ decoded_uri.substr(0, decoded_uri.length() - object_name.length()); } const char* calc(const std::string& key, const std::string_view& method, const std::string_view& path, const std::string& expires) { if (!prefix) { return base_t::calc(key, method, path, expires); } else { const auto prefixed_path = \ string_cat_reserve("prefix:", no_obj_uri, *prefix); return base_t::calc(key, method, prefixed_path, expires); } } bool is_equal_to(const std::string& rhs) const { bool is_auth_ok = base_t::is_equal_to(rhs); if (prefix && is_auth_ok) { const auto prefix_uri = string_cat_reserve(no_obj_uri, *prefix); is_auth_ok = boost::algorithm::starts_with(decoded_uri, prefix_uri); } return is_auth_ok; } }; /* TempURLEngine::PrefixableSignatureHelper */ TempURLEngine::result_t TempURLEngine::authenticate(const DoutPrefixProvider* dpp, const req_state* const s, optional_yield y) const { if (! is_applicable(s)) { return result_t::deny(); } /* NOTE(rzarzynski): RGWHTTPArgs::get(), in contrast to RGWEnv::get(), * never returns nullptr. If the requested parameter is absent, we will * get the empty string. */ const std::string& temp_url_sig = s->info.args.get("temp_url_sig"); const std::string& temp_url_expires = \ convert_from_iso8601(s->info.args.get("temp_url_expires")); if (temp_url_sig.empty() || temp_url_expires.empty()) { return result_t::deny(); } /* Though, for prefixed tempurls we need to differentiate between empty * prefix and lack of prefix. Empty prefix means allowance for whole * container. */ const boost::optional<const std::string&> temp_url_prefix = \ s->info.args.get_optional("temp_url_prefix"); RGWUserInfo owner_info; try { get_owner_info(dpp, s, owner_info, y); } catch (...) { ldpp_dout(dpp, 5) << "cannot get user_info of account's owner" << dendl; return result_t::reject(); } if (owner_info.temp_url_keys.empty()) { ldpp_dout(dpp, 5) << "user does not have temp url key set, aborting" << dendl; return result_t::reject(); } if (is_expired(temp_url_expires)) { ldpp_dout(dpp, 5) << "temp url link expired" << dendl; return result_t::reject(-EPERM); } if (is_disallowed_header_present(s->info)) { ldout(cct, 5) << "temp url rejected due to disallowed header" << dendl; return result_t::reject(-EINVAL); } /* We need to verify two paths because of compliance with Swift, Tempest * and old versions of RadosGW. The second item will have the prefix * of Swift API entry point removed. */ /* XXX can we search this ONCE? */ const size_t pos = g_conf()->rgw_swift_url_prefix.find_last_not_of('/') + 1; const std::string_view ref_uri = s->decoded_uri; const std::array<std::string_view, 2> allowed_paths = { ref_uri, ref_uri.substr(pos + 1) }; /* Account owner calculates the signature also against a HTTP method. */ boost::container::static_vector<std::string_view, 3> allowed_methods; if (strcmp("HEAD", s->info.method) == 0) { /* HEAD requests are specially handled. */ /* TODO: after getting a newer boost (with static_vector supporting * initializers lists), get back to the good notation: * allowed_methods = {"HEAD", "GET", "PUT" }; * Just for now let's use emplace_back to construct the vector. */ allowed_methods.emplace_back("HEAD"); allowed_methods.emplace_back("GET"); allowed_methods.emplace_back("PUT"); } else if (strlen(s->info.method) > 0) { allowed_methods.emplace_back(s->info.method); } /* Need to try each combination of keys, allowed path and methods. */ PrefixableSignatureHelper sig_helper { s->decoded_uri, s->object->get_name(), temp_url_prefix }; for (const auto& kv : owner_info.temp_url_keys) { const int temp_url_key_num = kv.first; const string& temp_url_key = kv.second; if (temp_url_key.empty()) { continue; } for (const auto& path : allowed_paths) { for (const auto& method : allowed_methods) { const char* const local_sig = sig_helper.calc(temp_url_key, method, path, temp_url_expires); ldpp_dout(dpp, 20) << "temp url signature [" << temp_url_key_num << "] (calculated): " << local_sig << dendl; if (sig_helper.is_equal_to(temp_url_sig)) { auto apl = apl_factory->create_apl_turl(cct, s, owner_info); return result_t::grant(std::move(apl)); } else { ldpp_dout(dpp, 5) << "temp url signature mismatch: " << local_sig << " != " << temp_url_sig << dendl; } } } } return result_t::reject(); } /* External token */ bool ExternalTokenEngine::is_applicable(const std::string& token) const noexcept { if (token.empty()) { return false; } else if (g_conf()->rgw_swift_auth_url.empty()) { return false; } else { return true; } } ExternalTokenEngine::result_t ExternalTokenEngine::authenticate(const DoutPrefixProvider* dpp, const std::string& token, const req_state* const s, optional_yield y) const { if (! is_applicable(token)) { return result_t::deny(); } std::string auth_url = g_conf()->rgw_swift_auth_url; if (auth_url.back() != '/') { auth_url.append("/"); } auth_url.append("token"); char url_buf[auth_url.size() + 1 + token.length() + 1]; sprintf(url_buf, "%s/%s", auth_url.c_str(), token.c_str()); RGWHTTPHeadersCollector validator(cct, "GET", url_buf, { "X-Auth-Groups", "X-Auth-Ttl" }); ldpp_dout(dpp, 10) << "rgw_swift_validate_token url=" << url_buf << dendl; int ret = validator.process(y); if (ret < 0) { throw ret; } std::string swift_user; try { std::vector<std::string> swift_groups; get_str_vec(validator.get_header_value("X-Auth-Groups"), ",", swift_groups); if (0 == swift_groups.size()) { return result_t::deny(-EPERM); } else { swift_user = std::move(swift_groups[0]); } } catch (const std::out_of_range&) { /* The X-Auth-Groups header isn't present in the response. */ return result_t::deny(-EPERM); } if (swift_user.empty()) { return result_t::deny(-EPERM); } ldpp_dout(dpp, 10) << "swift user=" << swift_user << dendl; std::unique_ptr<rgw::sal::User> user; ret = driver->get_user_by_swift(dpp, swift_user, s->yield, &user); if (ret < 0) { ldpp_dout(dpp, 0) << "NOTICE: couldn't map swift user" << dendl; throw ret; } auto apl = apl_factory->create_apl_local(cct, s, user->get_info(), extract_swift_subuser(swift_user), std::nullopt, rgw::auth::LocalApplier::NO_ACCESS_KEY); return result_t::grant(std::move(apl)); } static int build_token(const string& swift_user, const string& key, const uint64_t nonce, const utime_t& expiration, bufferlist& bl) { using ceph::encode; encode(swift_user, bl); encode(nonce, bl); encode(expiration, bl); bufferptr p(CEPH_CRYPTO_HMACSHA1_DIGESTSIZE); char buf[bl.length() * 2 + 1]; buf_to_hex((const unsigned char *)bl.c_str(), bl.length(), buf); dout(20) << "build_token token=" << buf << dendl; char k[CEPH_CRYPTO_HMACSHA1_DIGESTSIZE]; // FIPS zeroization audit 20191116: this memset is not intended to // wipe out a secret after use. memset(k, 0, sizeof(k)); const char *s = key.c_str(); for (int i = 0; i < (int)key.length(); i++, s++) { k[i % CEPH_CRYPTO_HMACSHA1_DIGESTSIZE] |= *s; } calc_hmac_sha1(k, sizeof(k), bl.c_str(), bl.length(), p.c_str()); ::ceph::crypto::zeroize_for_security(k, sizeof(k)); bl.append(p); return 0; } static int encode_token(CephContext *cct, string& swift_user, string& key, bufferlist& bl) { const auto nonce = ceph::util::generate_random_number<uint64_t>(); utime_t expiration = ceph_clock_now(); expiration += cct->_conf->rgw_swift_token_expiration; return build_token(swift_user, key, nonce, expiration, bl); } /* AUTH_rgwtk (signed token): engine */ bool SignedTokenEngine::is_applicable(const std::string& token) const noexcept { if (token.empty()) { return false; } else { return token.compare(0, 10, "AUTH_rgwtk") == 0; } } SignedTokenEngine::result_t SignedTokenEngine::authenticate(const DoutPrefixProvider* dpp, const std::string& token, const req_state* const s) const { if (! is_applicable(token)) { return result_t::deny(-EPERM); } /* Effective token string is the part after the prefix. */ const std::string etoken = token.substr(strlen("AUTH_rgwtk")); const size_t etoken_len = etoken.length(); if (etoken_len & 1) { ldpp_dout(dpp, 0) << "NOTICE: failed to verify token: odd token length=" << etoken_len << dendl; throw -EINVAL; } ceph::bufferptr p(etoken_len/2); int ret = hex_to_buf(etoken.c_str(), p.c_str(), etoken_len); if (ret < 0) { throw ret; } ceph::bufferlist tok_bl; tok_bl.append(p); uint64_t nonce; utime_t expiration; std::string swift_user; try { auto iter = tok_bl.cbegin(); using ceph::decode; decode(swift_user, iter); decode(nonce, iter); decode(expiration, iter); } catch (buffer::error& err) { ldpp_dout(dpp, 0) << "NOTICE: failed to decode token" << dendl; throw -EINVAL; } const utime_t now = ceph_clock_now(); if (expiration < now) { ldpp_dout(dpp, 0) << "NOTICE: old timed out token was used now=" << now << " token.expiration=" << expiration << dendl; return result_t::deny(-EPERM); } std::unique_ptr<rgw::sal::User> user; ret = driver->get_user_by_swift(dpp, swift_user, s->yield, &user); if (ret < 0) { throw ret; } ldpp_dout(dpp, 10) << "swift_user=" << swift_user << dendl; const auto siter = user->get_info().swift_keys.find(swift_user); if (siter == std::end(user->get_info().swift_keys)) { return result_t::deny(-EPERM); } const auto swift_key = siter->second; bufferlist local_tok_bl; ret = build_token(swift_user, swift_key.key, nonce, expiration, local_tok_bl); if (ret < 0) { throw ret; } if (local_tok_bl.length() != tok_bl.length()) { ldpp_dout(dpp, 0) << "NOTICE: tokens length mismatch:" << " tok_bl.length()=" << tok_bl.length() << " local_tok_bl.length()=" << local_tok_bl.length() << dendl; return result_t::deny(-EPERM); } if (memcmp(local_tok_bl.c_str(), tok_bl.c_str(), local_tok_bl.length()) != 0) { char buf[local_tok_bl.length() * 2 + 1]; buf_to_hex(reinterpret_cast<const unsigned char *>(local_tok_bl.c_str()), local_tok_bl.length(), buf); ldpp_dout(dpp, 0) << "NOTICE: tokens mismatch tok=" << buf << dendl; return result_t::deny(-EPERM); } auto apl = apl_factory->create_apl_local(cct, s, user->get_info(), extract_swift_subuser(swift_user), std::nullopt, rgw::auth::LocalApplier::NO_ACCESS_KEY); return result_t::grant(std::move(apl)); } } /* namespace swift */ } /* namespace auth */ } /* namespace rgw */ void RGW_SWIFT_Auth_Get::execute(optional_yield y) { int ret = -EPERM; const char *key = s->info.env->get("HTTP_X_AUTH_KEY"); const char *user_name = s->info.env->get("HTTP_X_AUTH_USER"); s->prot_flags |= RGW_REST_SWIFT; string user_str; std::unique_ptr<rgw::sal::User> user; bufferlist bl; RGWAccessKey *swift_key; map<string, RGWAccessKey>::iterator siter; string swift_url = g_conf()->rgw_swift_url; string swift_prefix = g_conf()->rgw_swift_url_prefix; string tenant_path; /* * We did not allow an empty Swift prefix before, but we want it now. * So, we take rgw_swift_url_prefix = "/" to yield the empty prefix. * The rgw_swift_url_prefix = "" is the default and yields "/swift" * in a backwards-compatible way. */ if (swift_prefix.size() == 0) { swift_prefix = DEFAULT_SWIFT_PREFIX; } else if (swift_prefix == "/") { swift_prefix.clear(); } else { if (swift_prefix[0] != '/') { swift_prefix.insert(0, "/"); } } if (swift_url.size() == 0) { bool add_port = false; auto server_port = s->info.env->get_optional("SERVER_PORT_SECURE"); const char *protocol; if (server_port) { add_port = (*server_port != "443"); protocol = "https"; } else { server_port = s->info.env->get_optional("SERVER_PORT"); if (server_port) { add_port = (*server_port != "80"); } protocol = "http"; } const char *host = s->info.env->get("HTTP_HOST"); if (!host) { dout(0) << "NOTICE: server is misconfigured, missing rgw_swift_url_prefix or rgw_swift_url, HTTP_HOST is not set" << dendl; ret = -EINVAL; goto done; } swift_url = protocol; swift_url.append("://"); swift_url.append(host); if (add_port && !strchr(host, ':')) { swift_url.append(":"); swift_url.append(*server_port); } } if (!key || !user_name) goto done; user_str = user_name; ret = driver->get_user_by_swift(s, user_str, s->yield, &user); if (ret < 0) { ret = -EACCES; goto done; } siter = user->get_info().swift_keys.find(user_str); if (siter == user->get_info().swift_keys.end()) { ret = -EPERM; goto done; } swift_key = &siter->second; if (swift_key->key.compare(key) != 0) { dout(0) << "NOTICE: RGW_SWIFT_Auth_Get::execute(): bad swift key" << dendl; ret = -EPERM; goto done; } if (!g_conf()->rgw_swift_tenant_name.empty()) { tenant_path = "/AUTH_"; tenant_path.append(g_conf()->rgw_swift_tenant_name); } else if (g_conf()->rgw_swift_account_in_url) { tenant_path = "/AUTH_"; tenant_path.append(user->get_id().to_str()); } dump_header(s, "X-Storage-Url", swift_url + swift_prefix + "/v1" + tenant_path); using rgw::auth::swift::encode_token; if ((ret = encode_token(s->cct, swift_key->id, swift_key->key, bl)) < 0) goto done; { static constexpr size_t PREFIX_LEN = sizeof("AUTH_rgwtk") - 1; char token_val[PREFIX_LEN + bl.length() * 2 + 1]; snprintf(token_val, PREFIX_LEN + 1, "AUTH_rgwtk"); buf_to_hex((const unsigned char *)bl.c_str(), bl.length(), token_val + PREFIX_LEN); dump_header(s, "X-Storage-Token", token_val); dump_header(s, "X-Auth-Token", token_val); } ret = STATUS_NO_CONTENT; done: set_req_state_err(s, ret); dump_errno(s); end_header(s); } int RGWHandler_SWIFT_Auth::init(rgw::sal::Driver* driver, req_state *state, rgw::io::BasicClient *cio) { state->dialect = "swift-auth"; state->formatter = new JSONFormatter; state->format = RGWFormat::JSON; return RGWHandler::init(driver, state, cio); } int RGWHandler_SWIFT_Auth::authorize(const DoutPrefixProvider *dpp, optional_yield) { return 0; } RGWOp *RGWHandler_SWIFT_Auth::op_get() { return new RGW_SWIFT_Auth_Get; }
22,983
28.618557
129
cc
null
ceph-main/src/rgw/rgw_swift_auth.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab ft=cpp #pragma once #include "rgw_common.h" #include "rgw_user.h" #include "rgw_op.h" #include "rgw_rest.h" #include "rgw_auth.h" #include "rgw_auth_keystone.h" #include "rgw_auth_filters.h" #include "rgw_sal.h" #define RGW_SWIFT_TOKEN_EXPIRATION (15 * 60) namespace rgw { namespace auth { namespace swift { /* TempURL: applier. */ class TempURLApplier : public rgw::auth::LocalApplier { public: TempURLApplier(CephContext* const cct, const RGWUserInfo& user_info) : LocalApplier(cct, user_info, LocalApplier::NO_SUBUSER, std::nullopt, LocalApplier::NO_ACCESS_KEY) { }; void modify_request_state(const DoutPrefixProvider* dpp, req_state * s) const override; /* in/out */ void write_ops_log_entry(rgw_log_entry& entry) const override; struct Factory { virtual ~Factory() {} virtual aplptr_t create_apl_turl(CephContext* cct, const req_state* s, const RGWUserInfo& user_info) const = 0; }; }; /* TempURL: engine */ class TempURLEngine : public rgw::auth::Engine { using result_t = rgw::auth::Engine::result_t; CephContext* const cct; rgw::sal::Driver* driver; const TempURLApplier::Factory* const apl_factory; /* Helper methods. */ void get_owner_info(const DoutPrefixProvider* dpp, const req_state* s, RGWUserInfo& owner_info, optional_yield y) const; std::string convert_from_iso8601(std::string expires) const; bool is_applicable(const req_state* s) const noexcept; bool is_expired(const std::string& expires) const; bool is_disallowed_header_present(const req_info& info) const; class SignatureHelper; class PrefixableSignatureHelper; public: TempURLEngine(CephContext* const cct, rgw::sal::Driver* _driver , const TempURLApplier::Factory* const apl_factory) : cct(cct), driver(_driver), apl_factory(apl_factory) { } /* Interface implementations. */ const char* get_name() const noexcept override { return "rgw::auth::swift::TempURLEngine"; } result_t authenticate(const DoutPrefixProvider* dpp, const req_state* const s, optional_yield y) const override; }; /* AUTH_rgwtk */ class SignedTokenEngine : public rgw::auth::Engine { using result_t = rgw::auth::Engine::result_t; CephContext* const cct; rgw::sal::Driver* driver; const rgw::auth::TokenExtractor* const extractor; const rgw::auth::LocalApplier::Factory* const apl_factory; bool is_applicable(const std::string& token) const noexcept; using rgw::auth::Engine::authenticate; result_t authenticate(const DoutPrefixProvider* dpp, const std::string& token, const req_state* s) const; public: SignedTokenEngine(CephContext* const cct, rgw::sal::Driver* _driver, const rgw::auth::TokenExtractor* const extractor, const rgw::auth::LocalApplier::Factory* const apl_factory) : cct(cct), driver(_driver), extractor(extractor), apl_factory(apl_factory) { } const char* get_name() const noexcept override { return "rgw::auth::swift::SignedTokenEngine"; } result_t authenticate(const DoutPrefixProvider* dpp, const req_state* const s, optional_yield y) const override { return authenticate(dpp, extractor->get_token(s), s); } }; /* External token */ class ExternalTokenEngine : public rgw::auth::Engine { using result_t = rgw::auth::Engine::result_t; CephContext* const cct; rgw::sal::Driver* driver; const rgw::auth::TokenExtractor* const extractor; const rgw::auth::LocalApplier::Factory* const apl_factory; bool is_applicable(const std::string& token) const noexcept; result_t authenticate(const DoutPrefixProvider* dpp, const std::string& token, const req_state* s, optional_yield y) const; public: ExternalTokenEngine(CephContext* const cct, rgw::sal::Driver* _driver, const rgw::auth::TokenExtractor* const extractor, const rgw::auth::LocalApplier::Factory* const apl_factory) : cct(cct), driver(_driver), extractor(extractor), apl_factory(apl_factory) { } const char* get_name() const noexcept override { return "rgw::auth::swift::ExternalTokenEngine"; } result_t authenticate(const DoutPrefixProvider* dpp, const req_state* const s, optional_yield y) const override { return authenticate(dpp, extractor->get_token(s), s, y); } }; /* SwiftAnonymous: applier. */ class SwiftAnonymousApplier : public rgw::auth::LocalApplier { public: SwiftAnonymousApplier(CephContext* const cct, const RGWUserInfo& user_info) : LocalApplier(cct, user_info, LocalApplier::NO_SUBUSER, std::nullopt, LocalApplier::NO_ACCESS_KEY) { } bool is_admin_of(const rgw_user& uid) const {return false;} bool is_owner_of(const rgw_user& uid) const {return uid.id.compare(RGW_USER_ANON_ID) == 0;} }; class SwiftAnonymousEngine : public rgw::auth::AnonymousEngine { const rgw::auth::TokenExtractor* const extractor; bool is_applicable(const req_state* s) const noexcept override { return extractor->get_token(s).empty(); } public: SwiftAnonymousEngine(CephContext* const cct, const SwiftAnonymousApplier::Factory* const apl_factory, const rgw::auth::TokenExtractor* const extractor) : AnonymousEngine(cct, apl_factory), extractor(extractor) { } const char* get_name() const noexcept override { return "rgw::auth::swift::SwiftAnonymousEngine"; } }; class DefaultStrategy : public rgw::auth::Strategy, public rgw::auth::RemoteApplier::Factory, public rgw::auth::LocalApplier::Factory, public rgw::auth::swift::TempURLApplier::Factory { rgw::sal::Driver* driver; const ImplicitTenants& implicit_tenant_context; /* The engines. */ const rgw::auth::swift::TempURLEngine tempurl_engine; const rgw::auth::swift::SignedTokenEngine signed_engine; boost::optional <const rgw::auth::keystone::TokenEngine> keystone_engine; const rgw::auth::swift::ExternalTokenEngine external_engine; const rgw::auth::swift::SwiftAnonymousEngine anon_engine; using keystone_config_t = rgw::keystone::CephCtxConfig; using keystone_cache_t = rgw::keystone::TokenCache; using aplptr_t = rgw::auth::IdentityApplier::aplptr_t; using acl_strategy_t = rgw::auth::RemoteApplier::acl_strategy_t; /* The method implements TokenExtractor for X-Auth-Token present in req_state. */ struct AuthTokenExtractor : rgw::auth::TokenExtractor { std::string get_token(const req_state* const s) const override { /* Returning a reference here would end in GCC complaining about a reference * to temporary. */ return s->info.env->get("HTTP_X_AUTH_TOKEN", ""); } } auth_token_extractor; /* The method implements TokenExtractor for X-Service-Token present in req_state. */ struct ServiceTokenExtractor : rgw::auth::TokenExtractor { std::string get_token(const req_state* const s) const override { return s->info.env->get("HTTP_X_SERVICE_TOKEN", ""); } } service_token_extractor; aplptr_t create_apl_remote(CephContext* const cct, const req_state* const s, acl_strategy_t&& extra_acl_strategy, const rgw::auth::RemoteApplier::AuthInfo &info) const override { auto apl = \ rgw::auth::add_3rdparty(driver, rgw_user(s->account_name), rgw::auth::add_sysreq(cct, driver, s, rgw::auth::RemoteApplier(cct, driver, std::move(extra_acl_strategy), info, implicit_tenant_context, rgw::auth::ImplicitTenants::IMPLICIT_TENANTS_SWIFT))); /* TODO(rzarzynski): replace with static_ptr. */ return aplptr_t(new decltype(apl)(std::move(apl))); } aplptr_t create_apl_local(CephContext* const cct, const req_state* const s, const RGWUserInfo& user_info, const std::string& subuser, const std::optional<uint32_t>& perm_mask, const std::string& access_key_id) const override { auto apl = \ rgw::auth::add_3rdparty(driver, rgw_user(s->account_name), rgw::auth::add_sysreq(cct, driver, s, rgw::auth::LocalApplier(cct, user_info, subuser, perm_mask, access_key_id))); /* TODO(rzarzynski): replace with static_ptr. */ return aplptr_t(new decltype(apl)(std::move(apl))); } aplptr_t create_apl_turl(CephContext* const cct, const req_state* const s, const RGWUserInfo& user_info) const override { /* TempURL doesn't need any user account override. It's a Swift-specific * mechanism that requires account name internally, so there is no * business with delegating the responsibility outside. */ return aplptr_t(new rgw::auth::swift::TempURLApplier(cct, user_info)); } public: DefaultStrategy(CephContext* const cct, const ImplicitTenants& implicit_tenant_context, rgw::sal::Driver* _driver) : driver(_driver), implicit_tenant_context(implicit_tenant_context), tempurl_engine(cct, driver, static_cast<rgw::auth::swift::TempURLApplier::Factory*>(this)), signed_engine(cct, driver, static_cast<rgw::auth::TokenExtractor*>(&auth_token_extractor), static_cast<rgw::auth::LocalApplier::Factory*>(this)), external_engine(cct, driver, static_cast<rgw::auth::TokenExtractor*>(&auth_token_extractor), static_cast<rgw::auth::LocalApplier::Factory*>(this)), anon_engine(cct, static_cast<SwiftAnonymousApplier::Factory*>(this), static_cast<rgw::auth::TokenExtractor*>(&auth_token_extractor)) { /* When the constructor's body is being executed, all member engines * should be initialized. Thus, we can safely add them. */ using Control = rgw::auth::Strategy::Control; add_engine(Control::SUFFICIENT, tempurl_engine); add_engine(Control::SUFFICIENT, signed_engine); /* The auth strategy is responsible for deciding whether a parcular * engine is disabled or not. */ if (! cct->_conf->rgw_keystone_url.empty()) { keystone_engine.emplace(cct, static_cast<rgw::auth::TokenExtractor*>(&auth_token_extractor), static_cast<rgw::auth::TokenExtractor*>(&service_token_extractor), static_cast<rgw::auth::RemoteApplier::Factory*>(this), keystone_config_t::get_instance(), keystone_cache_t::get_instance<keystone_config_t>()); add_engine(Control::SUFFICIENT, *keystone_engine); } if (! cct->_conf->rgw_swift_auth_url.empty()) { add_engine(Control::SUFFICIENT, external_engine); } add_engine(Control::SUFFICIENT, anon_engine); } const char* get_name() const noexcept override { return "rgw::auth::swift::DefaultStrategy"; } }; } /* namespace swift */ } /* namespace auth */ } /* namespace rgw */ class RGW_SWIFT_Auth_Get : public RGWOp { public: RGW_SWIFT_Auth_Get() {} ~RGW_SWIFT_Auth_Get() override {} int verify_permission(optional_yield) override { return 0; } void execute(optional_yield y) override; const char* name() const override { return "swift_auth_get"; } dmc::client_id dmclock_client() override { return dmc::client_id::auth; } }; class RGWHandler_SWIFT_Auth : public RGWHandler_REST { public: RGWHandler_SWIFT_Auth() {} ~RGWHandler_SWIFT_Auth() override {} RGWOp *op_get() override; int init(rgw::sal::Driver* driver, req_state *state, rgw::io::BasicClient *cio) override; int authorize(const DoutPrefixProvider *dpp, optional_yield y) override; int postauth_init(optional_yield) override { return 0; } int read_permissions(RGWOp *op, optional_yield) override { return 0; } virtual RGWAccessControlPolicy *alloc_policy() { return NULL; } virtual void free_policy(RGWAccessControlPolicy *policy) {} }; class RGWRESTMgr_SWIFT_Auth : public RGWRESTMgr { public: RGWRESTMgr_SWIFT_Auth() = default; ~RGWRESTMgr_SWIFT_Auth() override = default; RGWRESTMgr *get_resource_mgr(req_state* const s, const std::string& uri, std::string* const out_uri) override { return this; } RGWHandler_REST* get_handler(rgw::sal::Driver* driver, req_state*, const rgw::auth::StrategyRegistry&, const std::string&) override { return new RGWHandler_SWIFT_Auth; } };
13,237
36.290141
114
h
null
ceph-main/src/rgw/rgw_sync.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab ft=cpp #include "rgw_sync.h" #define dout_subsys ceph_subsys_rgw using namespace std; std::ostream& RGWMetaSyncStatusManager::gen_prefix(std::ostream& out) const { return out << "meta sync: "; } unsigned RGWMetaSyncStatusManager::get_subsys() const { return dout_subsys; } void RGWRemoteMetaLog::finish() { going_down = true; stop(); }
448
17.708333
76
cc
null
ceph-main/src/rgw/rgw_sync_checkpoint.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab ft=cpp /* * Ceph - scalable distributed file system * * Copyright (C) 2020 Red Hat, Inc. * * This is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License version 2.1, as published by the Free Software * Foundation. See file COPYING. * */ #include <fmt/format.h> #include "common/errno.h" #include "rgw_sync_checkpoint.h" #include "rgw_sal_rados.h" #include "rgw_bucket_sync.h" #include "rgw_data_sync.h" #include "rgw_http_errors.h" #include "cls/rgw/cls_rgw_client.h" #include "services/svc_sys_obj.h" #include "services/svc_zone.h" #include "rgw_zone.h" #define dout_subsys ceph_subsys_rgw namespace { std::string incremental_marker(const rgw_bucket_shard_sync_info& info) { return BucketIndexShardsManager::get_shard_marker(info.inc_marker.position); } bool operator<(const std::vector<rgw_bucket_shard_sync_info>& lhs, const BucketIndexShardsManager& rhs) { for (size_t i = 0; i < lhs.size(); ++i) { const auto& l = incremental_marker(lhs[i]); const auto& r = rhs.get(i, ""); if (l < r) { return true; } } return false; } bool empty(const BucketIndexShardsManager& markers, int size) { static const std::string empty_string; for (int i = 0; i < size; ++i) { const auto& m = markers.get(i, empty_string); if (!m.empty()) { return false; } } return true; } std::ostream& operator<<(std::ostream& out, const std::vector<rgw_bucket_shard_sync_info>& rhs) { const char* separator = ""; // first entry has no comma out << '['; for (auto& i : rhs) { out << std::exchange(separator, ", ") << incremental_marker(i); } return out << ']'; } std::ostream& operator<<(std::ostream& out, const BucketIndexShardsManager& rhs) { out << '['; const char* separator = ""; // first entry has no comma for (auto& [i, marker] : rhs.get()) { out << std::exchange(separator, ", ") << marker; } return out << ']'; } int bucket_source_sync_checkpoint(const DoutPrefixProvider* dpp, rgw::sal::RadosStore* store, const RGWBucketInfo& bucket_info, const RGWBucketInfo& source_bucket_info, const rgw_sync_bucket_pipe& pipe, uint64_t latest_gen, const BucketIndexShardsManager& remote_markers, ceph::timespan retry_delay, ceph::coarse_mono_time timeout_at) { const int num_shards = remote_markers.get().size(); rgw_bucket_sync_status full_status; int r = rgw_read_bucket_full_sync_status(dpp, store, pipe, &full_status, null_yield); if (r < 0 && r != -ENOENT) { // retry on ENOENT return r; } // wait for incremental while (full_status.state != BucketSyncState::Incremental) { const auto delay_until = ceph::coarse_mono_clock::now() + retry_delay; if (delay_until > timeout_at) { lderr(store->ctx()) << "bucket checkpoint timed out waiting to reach incremental sync" << dendl; return -ETIMEDOUT; } ldout(store->ctx(), 1) << "waiting to reach incremental sync.." << dendl; std::this_thread::sleep_until(delay_until); r = rgw_read_bucket_full_sync_status(dpp, store, pipe, &full_status, null_yield); if (r < 0 && r != -ENOENT) { // retry on ENOENT return r; } } // wait for latest_gen while (full_status.incremental_gen < latest_gen) { const auto delay_until = ceph::coarse_mono_clock::now() + retry_delay; if (delay_until > timeout_at) { lderr(store->ctx()) << "bucket checkpoint timed out waiting to reach " "latest generation " << latest_gen << dendl; return -ETIMEDOUT; } ldout(store->ctx(), 1) << "waiting to reach latest gen " << latest_gen << ", on " << full_status.incremental_gen << ".." << dendl; std::this_thread::sleep_until(delay_until); r = rgw_read_bucket_full_sync_status(dpp, store, pipe, &full_status, null_yield); if (r < 0 && r != -ENOENT) { // retry on ENOENT return r; } } if (full_status.incremental_gen > latest_gen) { ldpp_dout(dpp, 1) << "bucket sync caught up with source:\n" << " local gen: " << full_status.incremental_gen << '\n' << " remote gen: " << latest_gen << dendl; return 0; } if (empty(remote_markers, num_shards)) { ldpp_dout(dpp, 1) << "bucket sync caught up with empty source" << dendl; return 0; } std::vector<rgw_bucket_shard_sync_info> status; status.resize(std::max<size_t>(1, num_shards)); r = rgw_read_bucket_inc_sync_status(dpp, store, pipe, full_status.incremental_gen, &status); if (r < 0) { return r; } while (status < remote_markers) { const auto delay_until = ceph::coarse_mono_clock::now() + retry_delay; if (delay_until > timeout_at) { ldpp_dout(dpp, 0) << "bucket checkpoint timed out waiting for incremental sync to catch up" << dendl; return -ETIMEDOUT; } ldpp_dout(dpp, 1) << "waiting for incremental sync to catch up:\n" << " local status: " << status << '\n' << " remote markers: " << remote_markers << dendl; std::this_thread::sleep_until(delay_until); r = rgw_read_bucket_inc_sync_status(dpp, store, pipe, full_status.incremental_gen, &status); if (r < 0) { return r; } } ldpp_dout(dpp, 1) << "bucket sync caught up with source:\n" << " local status: " << status << '\n' << " remote markers: " << remote_markers << dendl; return 0; } int source_bilog_info(const DoutPrefixProvider *dpp, RGWSI_Zone* zone_svc, const rgw_sync_bucket_pipe& pipe, rgw_bucket_index_marker_info& info, BucketIndexShardsManager& markers, optional_yield y) { ceph_assert(pipe.source.zone); auto& zone_conn_map = zone_svc->get_zone_conn_map(); auto conn = zone_conn_map.find(pipe.source.zone->id); if (conn == zone_conn_map.end()) { return -EINVAL; } return rgw_read_remote_bilog_info(dpp, conn->second, *pipe.source.bucket, info, markers, y); } } // anonymous namespace int rgw_bucket_sync_checkpoint(const DoutPrefixProvider* dpp, rgw::sal::RadosStore* store, const RGWBucketSyncPolicyHandler& policy, const RGWBucketInfo& info, std::optional<rgw_zone_id> opt_source_zone, std::optional<rgw_bucket> opt_source_bucket, ceph::timespan retry_delay, ceph::coarse_mono_time timeout_at) { struct sync_source_entry { rgw_sync_bucket_pipe pipe; uint64_t latest_gen = 0; BucketIndexShardsManager remote_markers; RGWBucketInfo source_bucket_info; }; std::list<sync_source_entry> sources; // fetch remote markers and bucket info in parallel boost::asio::io_context ioctx; for (const auto& [source_zone_id, pipe] : policy.get_all_sources()) { // filter by source zone/bucket if (opt_source_zone && *opt_source_zone != *pipe.source.zone) { continue; } if (opt_source_bucket && !opt_source_bucket->match(*pipe.source.bucket)) { continue; } auto& entry = sources.emplace_back(); entry.pipe = pipe; // fetch remote markers spawn::spawn(ioctx, [&] (yield_context yield) { auto y = optional_yield{ioctx, yield}; rgw_bucket_index_marker_info info; int r = source_bilog_info(dpp, store->svc()->zone, entry.pipe, info, entry.remote_markers, y); if (r < 0) { ldpp_dout(dpp, 0) << "failed to fetch remote bilog markers: " << cpp_strerror(r) << dendl; throw std::system_error(-r, std::system_category()); } entry.latest_gen = info.latest_gen; }); // fetch source bucket info spawn::spawn(ioctx, [&] (yield_context yield) { auto y = optional_yield{ioctx, yield}; int r = store->getRados()->get_bucket_instance_info( *entry.pipe.source.bucket, entry.source_bucket_info, nullptr, nullptr, y, dpp); if (r < 0) { ldpp_dout(dpp, 0) << "failed to read source bucket info: " << cpp_strerror(r) << dendl; throw std::system_error(-r, std::system_category()); } }); } try { ioctx.run(); } catch (const std::system_error& e) { return -e.code().value(); } // checkpoint each source sequentially for (const auto& e : sources) { int r = bucket_source_sync_checkpoint(dpp, store, info, e.source_bucket_info, e.pipe, e.latest_gen, e.remote_markers, retry_delay, timeout_at); if (r < 0) { ldpp_dout(dpp, 0) << "bucket sync checkpoint failed: " << cpp_strerror(r) << dendl; return r; } } ldpp_dout(dpp, 0) << "bucket checkpoint complete" << dendl; return 0; }
9,366
33.061818
107
cc
null
ceph-main/src/rgw/rgw_sync_checkpoint.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab ft=cpp /* * Ceph - scalable distributed file system * * Copyright (C) 2020 Red Hat, Inc. * * This is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License version 2.1, as published by the Free Software * Foundation. See file COPYING. * */ #pragma once #include <optional> #include "common/ceph_time.h" #include "rgw_basic_types.h" class DoutPrefixProvider; namespace rgw::sal { class RadosStore; } class RGWBucketInfo; class RGWBucketSyncPolicyHandler; // poll the bucket's sync status until it's caught up against all sync sources int rgw_bucket_sync_checkpoint(const DoutPrefixProvider* dpp, rgw::sal::RadosStore* store, const RGWBucketSyncPolicyHandler& policy, const RGWBucketInfo& info, std::optional<rgw_zone_id> opt_source_zone, std::optional<rgw_bucket> opt_source_bucket, ceph::timespan retry_delay, ceph::coarse_mono_time timeout_at);
1,239
33.444444
78
h
null
ceph-main/src/rgw/rgw_sync_policy.cc
#include "rgw_common.h" #include "rgw_sync_policy.h" #include "rgw_bucket.h" #define dout_subsys ceph_subsys_rgw using namespace std; string rgw_sync_bucket_entity::bucket_key() const { return rgw_sync_bucket_entities::bucket_key(bucket); } bool rgw_sync_pipe_filter_tag::from_str(const string& s) { if (s.empty()) { return false; } auto pos = s.find('='); if (pos == string::npos) { key = s; return true; } key = s.substr(0, pos); if (pos < s.size() - 1) { value = s.substr(pos + 1); } return true; } bool rgw_sync_pipe_filter_tag::operator==(const string& s) const { if (s.empty()) { return false; } auto pos = s.find('='); if (pos == string::npos) { return value.empty() && (s == key); } return s.compare(0, pos, s) == 0 && s.compare(pos + 1, s.size() - pos - 1, value) == 0; } void rgw_sync_pipe_filter::encode(bufferlist& bl) const { ENCODE_START(1, 1, bl); encode(prefix, bl); encode(tags, bl); ENCODE_FINISH(bl); } void rgw_sync_pipe_filter::decode(bufferlist::const_iterator& bl) { DECODE_START(1, bl); decode(prefix, bl); decode(tags, bl); DECODE_FINISH(bl); } void rgw_sync_pipe_filter::set_prefix(std::optional<std::string> opt_prefix, bool prefix_rm) { if (opt_prefix) { prefix = *opt_prefix; } else if (prefix_rm) { prefix.reset(); } } void rgw_sync_pipe_filter::set_tags(std::list<std::string>& tags_add, std::list<std::string>& tags_rm) { for (auto& t : tags_rm) { rgw_sync_pipe_filter_tag tag; if (tag.from_str(t)) { tags.erase(tag); } } for (auto& t : tags_add) { rgw_sync_pipe_filter_tag tag; if (tag.from_str(t)) { tags.insert(tag); } } } bool rgw_sync_pipe_filter::is_subset_of(const rgw_sync_pipe_filter& f) const { if (f.prefix) { if (!prefix) { return false; } /* f.prefix exists, and this->prefix is either equal or bigger, * therefore this->prefix also set */ if (!boost::starts_with(*prefix, *f.prefix)) { return false; } } /* prefix is subset, now check tags. All our tags should exist in f.tags */ for (auto& t : tags) { if (f.tags.find(t) == f.tags.end()) { return false; } } return true; } bool rgw_sync_pipe_filter::check_tag(const string& s) const { if (tags.empty()) { /* tag filter wasn't defined */ return true; } auto iter = tags.find(rgw_sync_pipe_filter_tag(s)); return (iter != tags.end()); } bool rgw_sync_pipe_filter::check_tag(const string& k, const string& v) const { if (tags.empty()) { /* tag filter wasn't defined */ return true; } auto iter = tags.find(rgw_sync_pipe_filter_tag(k, v)); return (iter != tags.end()); } bool rgw_sync_pipe_filter::has_tags() const { return !tags.empty(); } bool rgw_sync_pipe_filter::check_tags(const std::vector<string>& _tags) const { if (tags.empty()) { return true; } for (auto& t : _tags) { if (check_tag(t)) { return true; } } return false; } bool rgw_sync_pipe_filter::check_tags(const RGWObjTags::tag_map_t& _tags) const { if (tags.empty()) { return true; } for (auto& item : _tags) { if (check_tag(item.first, item.second)) { return true; } } return false; } void rgw_sync_bucket_entity::apply_bucket(std::optional<rgw_bucket> b) { if (!b) { return; } if (!bucket || bucket->name.empty()) { bucket = b; } } void rgw_sync_bucket_entities::add_zones(const std::vector<rgw_zone_id>& new_zones) { for (auto& z : new_zones) { if (z == "*") { all_zones = true; zones.reset(); return; } if (!zones) { zones.emplace(); } zones->insert(z); all_zones = false; } } std::vector<rgw_sync_bucket_entity> rgw_sync_bucket_entities::expand() const { std::vector<rgw_sync_bucket_entity> result; rgw_bucket b = get_bucket(); if (all_zones) { rgw_sync_bucket_entity e; e.all_zones = true; e.bucket = b; result.push_back(e); return result; } if (!zones) { return result; } for (auto& z : *zones) { rgw_sync_bucket_entity e; e.all_zones = false; e.bucket = b; e.zone = z; result.push_back(e); } return result; } void rgw_sync_bucket_entities::remove_zones(const std::vector<rgw_zone_id>& rm_zones) { all_zones = false; if (!zones) { return; } for (auto& z : rm_zones) { zones->erase(z); } } static void set_bucket_field(std::optional<string> source, string *field) { if (!source) { return; } if (source == "*") { field->clear(); return; } *field = *source; } void rgw_sync_bucket_entities::set_bucket(std::optional<string> tenant, std::optional<string> bucket_name, std::optional<string> bucket_id) { if ((!bucket) && (tenant || bucket_name || bucket_id)) { bucket.emplace(); } if (!bucket) { return; } set_bucket_field(tenant, &bucket->tenant); set_bucket_field(bucket_name, &bucket->name); set_bucket_field(bucket_id, &bucket->bucket_id); if (bucket->tenant.empty() && bucket->name.empty() && bucket->bucket_id.empty()) { bucket.reset(); } } void rgw_sync_bucket_entities::remove_bucket(std::optional<string> tenant, std::optional<string> bucket_name, std::optional<string> bucket_id) { if (!bucket) { return; } if (tenant) { bucket->tenant.clear(); } if (bucket_name) { bucket->name.clear(); } if (bucket_id) { bucket->bucket_id.clear(); } if (bucket->tenant.empty() && bucket->name.empty() && bucket->bucket_id.empty()) { bucket.reset(); } } string rgw_sync_bucket_entities::bucket_key(std::optional<rgw_bucket> b) { if (!b) { return string("*"); } rgw_bucket _b = *b; if (_b.name.empty()) { _b.name = "*"; } return _b.get_key(); } std::vector<rgw_sync_bucket_pipe> rgw_sync_bucket_pipes::expand() const { std::vector<rgw_sync_bucket_pipe> result; auto sources = source.expand(); auto dests = dest.expand(); for (auto& s : sources) { for (auto& d : dests) { rgw_sync_bucket_pipe pipe; pipe.id = id; pipe.source = s; pipe.dest = d; pipe.params = params; result.push_back(pipe); } } return result; } void rgw_sync_bucket_pipes::get_potential_related_buckets(const rgw_bucket& bucket, std::set<rgw_bucket> *sources, std::set<rgw_bucket> *dests) const { if (dest.match_bucket(bucket)) { auto expanded_sources = source.expand(); for (auto& s : expanded_sources) { if (s.bucket && !s.bucket->name.empty()) { sources->insert(*s.bucket); } } } if (source.match_bucket(bucket)) { auto expanded_dests = dest.expand(); for (auto& d : expanded_dests) { if (d.bucket && !d.bucket->name.empty()) { dests->insert(*d.bucket); } } } } bool rgw_sync_data_flow_group::find_or_create_symmetrical(const string& flow_id, rgw_sync_symmetric_group **flow_group) { for (auto& group : symmetrical) { if (flow_id == group.id) { *flow_group = &group; return true; } } auto& group = symmetrical.emplace_back(); *flow_group = &group; (*flow_group)->id = flow_id; return true; } void rgw_sync_data_flow_group::remove_symmetrical(const string& flow_id, std::optional<std::vector<rgw_zone_id> > zones) { if (symmetrical.empty()) { return; } auto& groups = symmetrical; auto iter = groups.begin(); for (; iter != groups.end(); ++iter) { if (iter->id == flow_id) { if (!zones) { groups.erase(iter); if (groups.empty()) { symmetrical.clear(); } return; } break; } } if (iter == groups.end()) { return; } auto& flow_group = *iter; for (auto& z : *zones) { flow_group.zones.erase(z); } if (flow_group.zones.empty()) { groups.erase(iter); } if (groups.empty()) { symmetrical.clear(); } } bool rgw_sync_data_flow_group::find_or_create_directional(const rgw_zone_id& source_zone, const rgw_zone_id& dest_zone, rgw_sync_directional_rule **flow_group) { for (auto& rule : directional) { if (source_zone == rule.source_zone && dest_zone == rule.dest_zone) { *flow_group = &rule; return true; } } auto& rule = directional.emplace_back(); *flow_group = &rule; rule.source_zone = source_zone; rule.dest_zone = dest_zone; return true; } void rgw_sync_data_flow_group::remove_directional(const rgw_zone_id& source_zone, const rgw_zone_id& dest_zone) { if (directional.empty()) { return; } for (auto iter = directional.begin(); iter != directional.end(); ++iter) { auto& rule = *iter; if (source_zone == rule.source_zone && dest_zone == rule.dest_zone) { directional.erase(iter); return; } } } void rgw_sync_data_flow_group::init_default(const std::set<rgw_zone_id>& zones) { symmetrical.clear(); symmetrical.push_back(rgw_sync_symmetric_group("default", zones)); } bool rgw_sync_policy_group::find_pipe(const string& pipe_id, bool create, rgw_sync_bucket_pipes **pipe) { for (auto& p : pipes) { if (pipe_id == p.id) { *pipe = &p; return true; } } if (!create) { return false; } auto& p = pipes.emplace_back(); *pipe = &p; p.id = pipe_id; return true; } void rgw_sync_policy_group::remove_pipe(const string& pipe_id) { for (auto iter = pipes.begin(); iter != pipes.end(); ++iter) { if (pipe_id == iter->id) { pipes.erase(iter); return; } } } void rgw_sync_policy_group::get_potential_related_buckets(const rgw_bucket& bucket, std::set<rgw_bucket> *sources, std::set<rgw_bucket> *dests) const { for (auto& pipe : pipes) { pipe.get_potential_related_buckets(bucket, sources, dests); } } void rgw_sync_policy_info::get_potential_related_buckets(const rgw_bucket& bucket, std::set<rgw_bucket> *sources, std::set<rgw_bucket> *dests) const { for (auto& entry : groups) { auto& group = entry.second; group.get_potential_related_buckets(bucket, sources, dests); } } void rgw_sync_directional_rule::dump(Formatter *f) const { encode_json("source_zone", source_zone, f); encode_json("dest_zone", dest_zone, f); } void rgw_sync_directional_rule::decode_json(JSONObj *obj) { JSONDecoder::decode_json("source_zone", source_zone, obj); JSONDecoder::decode_json("dest_zone", dest_zone, obj); } void rgw_sync_symmetric_group::dump(Formatter *f) const { encode_json("id", id, f); encode_json("zones", zones, f); } void rgw_sync_symmetric_group::decode_json(JSONObj *obj) { JSONDecoder::decode_json("id", id, obj); JSONDecoder::decode_json("zones", zones, obj); } void rgw_sync_bucket_entity::dump(Formatter *f) const { encode_json("zone", zone, f); encode_json("bucket", bucket_key(), f); } void rgw_sync_bucket_entity::decode_json(JSONObj *obj) { JSONDecoder::decode_json("zone", zone, obj); string s; if (JSONDecoder::decode_json("bucket", s, obj)) { rgw_bucket b; int ret = rgw_bucket_parse_bucket_key(nullptr, s, &b, nullptr); if (ret >= 0) { bucket = b; } else { bucket.reset(); } } } void rgw_sync_pipe_filter_tag::dump(Formatter *f) const { encode_json("key", key, f); encode_json("value", value, f); } void rgw_sync_pipe_filter_tag::decode_json(JSONObj *obj) { JSONDecoder::decode_json("key", key, obj); JSONDecoder::decode_json("value", value, obj); } void rgw_sync_pipe_filter::dump(Formatter *f) const { encode_json("prefix", prefix, f); encode_json("tags", tags, f); } void rgw_sync_pipe_filter::decode_json(JSONObj *obj) { JSONDecoder::decode_json("prefix", prefix, obj); JSONDecoder::decode_json("tags", tags, obj); } void rgw_sync_pipe_acl_translation::dump(Formatter *f) const { encode_json("owner", owner, f); } void rgw_sync_pipe_acl_translation::decode_json(JSONObj *obj) { JSONDecoder::decode_json("owner", owner, obj); } void rgw_sync_pipe_source_params::dump(Formatter *f) const { encode_json("filter", filter, f); } void rgw_sync_pipe_source_params::decode_json(JSONObj *obj) { JSONDecoder::decode_json("filter", filter, obj); } void rgw_sync_pipe_dest_params::dump(Formatter *f) const { encode_json("acl_translation", acl_translation, f); encode_json("storage_class", storage_class, f); } void rgw_sync_pipe_dest_params::decode_json(JSONObj *obj) { JSONDecoder::decode_json("acl_translation", acl_translation, obj); JSONDecoder::decode_json("storage_class", storage_class, obj); } void rgw_sync_pipe_params::dump(Formatter *f) const { encode_json("source", source, f); encode_json("dest", dest, f); encode_json("priority", priority, f); string s; switch (mode) { case MODE_SYSTEM: s = "system"; break; default: s = "user"; } encode_json("mode", s, f); encode_json("user", user, f); } void rgw_sync_pipe_params::decode_json(JSONObj *obj) { JSONDecoder::decode_json("source", source, obj); JSONDecoder::decode_json("dest", dest, obj); JSONDecoder::decode_json("priority", priority, obj); string s; JSONDecoder::decode_json("mode", s, obj); if (s == "system") { mode = MODE_SYSTEM; } else { mode = MODE_USER; } JSONDecoder::decode_json("user", user, obj); } void rgw_sync_bucket_entities::dump(Formatter *f) const { encode_json("bucket", rgw_sync_bucket_entities::bucket_key(bucket), f); if (zones) { encode_json("zones", zones, f); } else if (all_zones) { set<string> z = { "*" }; encode_json("zones", z, f); } } void rgw_sync_bucket_entities::decode_json(JSONObj *obj) { string s; JSONDecoder::decode_json("bucket", s, obj); if (s == "*") { bucket.reset(); } else { rgw_bucket b; int ret = rgw_bucket_parse_bucket_key(nullptr, s, &b, nullptr); if (ret < 0) { bucket.reset(); } else { if (b.tenant == "*") { b.tenant.clear(); } if (b.name == "*") { b.name.clear(); } if (b.bucket_id == "*") { b.bucket_id.clear(); } bucket = b; } } JSONDecoder::decode_json("zones", zones, obj); if (zones && zones->size() == 1) { auto iter = zones->begin(); if (*iter == "*") { zones.reset(); all_zones = true; } } } void rgw_sync_bucket_pipe::dump(Formatter *f) const { encode_json("id", id, f); encode_json("source", source, f); encode_json("dest", dest, f); encode_json("params", params, f); } void rgw_sync_bucket_pipe::decode_json(JSONObj *obj) { JSONDecoder::decode_json("id", id, obj); JSONDecoder::decode_json("source", source, obj); JSONDecoder::decode_json("dest", dest, obj); JSONDecoder::decode_json("params", params, obj); } void rgw_sync_bucket_pipes::dump(Formatter *f) const { encode_json("id", id, f); encode_json("source", source, f); encode_json("dest", dest, f); encode_json("params", params, f); } void rgw_sync_bucket_pipes::decode_json(JSONObj *obj) { JSONDecoder::decode_json("id", id, obj); JSONDecoder::decode_json("source", source, obj); JSONDecoder::decode_json("dest", dest, obj); JSONDecoder::decode_json("params", params, obj); } void rgw_sync_data_flow_group::dump(Formatter *f) const { if (!symmetrical.empty()) { encode_json("symmetrical", symmetrical, f); } if (!directional.empty()) { encode_json("directional", directional, f); } } void rgw_sync_data_flow_group::decode_json(JSONObj *obj) { JSONDecoder::decode_json("symmetrical", symmetrical, obj); JSONDecoder::decode_json("directional", directional, obj); } void rgw_sync_policy_group::dump(Formatter *f) const { encode_json("id", id, f); encode_json("data_flow", data_flow, f); encode_json("pipes", pipes, f); string s; switch (status) { case rgw_sync_policy_group::Status::FORBIDDEN: s = "forbidden"; break; case rgw_sync_policy_group::Status::ALLOWED: s = "allowed"; break; case rgw_sync_policy_group::Status::ENABLED: s = "enabled"; break; default: s = "unknown"; } encode_json("status", s, f); } void rgw_sync_policy_group::decode_json(JSONObj *obj) { JSONDecoder::decode_json("id", id, obj); JSONDecoder::decode_json("data_flow", data_flow, obj); JSONDecoder::decode_json("pipes", pipes, obj); string s; JSONDecoder::decode_json("status", s, obj); set_status(s); } void rgw_sync_policy_info::dump(Formatter *f) const { Formatter::ArraySection section(*f, "groups"); for (auto& group : groups ) { encode_json("group", group.second, f); } } void rgw_sync_policy_info::decode_json(JSONObj *obj) { vector<rgw_sync_policy_group> groups_vec; JSONDecoder::decode_json("groups", groups_vec, obj); for (auto& group : groups_vec) { groups.emplace(std::make_pair(group.id, std::move(group))); } }
17,358
21.029188
159
cc
null
ceph-main/src/rgw/rgw_sync_policy.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab /* * Ceph - scalable distributed file system * * Copyright (C) 2018 Red Hat, Inc. * * This is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License version 2.1, as published by the Free Software * Foundation. See file COPYING. * */ #pragma once #include "rgw_basic_types.h" #include "rgw_tag.h" struct rgw_sync_symmetric_group { std::string id; std::set<rgw_zone_id> zones; rgw_sync_symmetric_group() {} rgw_sync_symmetric_group(const std::string& _id, const std::set<rgw_zone_id> _zones) : id(_id), zones(_zones) {} void encode(bufferlist& bl) const { ENCODE_START(1, 1, bl); encode(id, bl); encode(zones, bl); ENCODE_FINISH(bl); } void decode(bufferlist::const_iterator& bl) { DECODE_START(1, bl); decode(id, bl); decode(zones, bl); DECODE_FINISH(bl); } void dump(ceph::Formatter *f) const; void decode_json(JSONObj *obj); }; WRITE_CLASS_ENCODER(rgw_sync_symmetric_group) struct rgw_sync_directional_rule { rgw_zone_id source_zone; rgw_zone_id dest_zone; void encode(bufferlist& bl) const { ENCODE_START(1, 1, bl); encode(source_zone, bl); encode(dest_zone, bl); ENCODE_FINISH(bl); } void decode(bufferlist::const_iterator& bl) { DECODE_START(1, bl); decode(source_zone, bl); decode(dest_zone, bl); DECODE_FINISH(bl); } void dump(ceph::Formatter *f) const; void decode_json(JSONObj *obj); }; WRITE_CLASS_ENCODER(rgw_sync_directional_rule) struct rgw_sync_bucket_entity { std::optional<rgw_zone_id> zone; /* define specific zones */ std::optional<rgw_bucket> bucket; /* define specific bucket */ static bool match_str(const std::string& s1, const std::string& s2) { /* empty std::string is wildcard */ return (s1.empty() || s2.empty() || s1 == s2); } bool all_zones{false}; rgw_sync_bucket_entity() {} rgw_sync_bucket_entity(const rgw_zone_id& _zone, std::optional<rgw_bucket> _bucket) : zone(_zone), bucket(_bucket.value_or(rgw_bucket())) {} bool specific() const { return zone && bucket; } void encode(bufferlist& bl) const { ENCODE_START(1, 1, bl); encode(all_zones, bl); encode(zone, bl); encode(bucket, bl); ENCODE_FINISH(bl); } void decode(bufferlist::const_iterator& bl) { DECODE_START(1, bl); decode(all_zones, bl); decode(zone, bl); decode(bucket, bl); DECODE_FINISH(bl); } void dump(ceph::Formatter *f) const; void decode_json(JSONObj *obj); rgw_bucket get_bucket() const { return bucket.value_or(rgw_bucket()); } std::string bucket_key() const; bool match_zone(const rgw_zone_id& z) const { if (all_zones) { return true; } if (!zone) { return false; } return (*zone == z); } void apply_zone(const rgw_zone_id& z) { all_zones = false; zone = z; } static bool match_bucket_id(const std::string& bid1, const std::string& bid2) { return (bid1.empty() || bid2.empty() || (bid1 == bid2)); } bool match_bucket(std::optional<rgw_bucket> b) const { if (!b) { return true; } if (!bucket) { return true; } return (match_str(bucket->tenant, b->tenant) && match_str(bucket->name, b->name) && match_bucket_id(bucket->bucket_id, b->bucket_id)); } bool match(const rgw_sync_bucket_entity& entity) const { if (!entity.zone) { return match_bucket(entity.bucket); } return (match_zone(*entity.zone) && match_bucket(entity.bucket)); } const bool operator<(const rgw_sync_bucket_entity& e) const { if (all_zones && !e.all_zones) { return false; } if (!all_zones && e.all_zones) { return true; } if (zone < e.zone) { return true; } if (e.zone < zone) { return false; } return (bucket < e.bucket); } void apply_bucket(std::optional<rgw_bucket> _b); }; WRITE_CLASS_ENCODER(rgw_sync_bucket_entity) struct rgw_sync_pipe_filter_tag { std::string key; std::string value; rgw_sync_pipe_filter_tag() {} rgw_sync_pipe_filter_tag(const std::string& s) { from_str(s); } rgw_sync_pipe_filter_tag(const std::string& _key, const std::string& _value) : key(_key), value(_value) {} void encode(bufferlist& bl) const { ENCODE_START(1, 1, bl); encode(key, bl); encode(value, bl); ENCODE_FINISH(bl); } void decode(bufferlist::const_iterator& bl) { DECODE_START(1, bl); decode(key, bl); decode(value, bl); DECODE_FINISH(bl); } void dump(ceph::Formatter *f) const; void decode_json(JSONObj *obj); bool from_str(const std::string& s); bool operator<(const rgw_sync_pipe_filter_tag& t) const { if (key < t.key) { return true; } if (t.key < key) { return false; } return (value < t.value); } bool operator==(const std::string& s) const; }; WRITE_CLASS_ENCODER(rgw_sync_pipe_filter_tag) struct rgw_sync_pipe_filter { std::optional<std::string> prefix; std::set<rgw_sync_pipe_filter_tag> tags; void set_prefix(std::optional<std::string> opt_prefix, bool prefix_rm); void set_tags(std::list<std::string>& tags_add, std::list<std::string>& tags_rm); void encode(bufferlist& bl) const; void decode(bufferlist::const_iterator& bl); void dump(ceph::Formatter *f) const; void decode_json(JSONObj *obj); bool is_subset_of(const rgw_sync_pipe_filter& f) const; bool has_tags() const; bool check_tag(const std::string& s) const; bool check_tag(const std::string& k, const std::string& v) const; bool check_tags(const std::vector<std::string>& tags) const; bool check_tags(const RGWObjTags::tag_map_t& tags) const; }; WRITE_CLASS_ENCODER(rgw_sync_pipe_filter) struct rgw_sync_pipe_acl_translation { rgw_user owner; void encode(bufferlist& bl) const { ENCODE_START(1, 1, bl); encode(owner, bl); ENCODE_FINISH(bl); } void decode(bufferlist::const_iterator& bl) { DECODE_START(1, bl); decode(owner, bl); DECODE_FINISH(bl); } void dump(ceph::Formatter *f) const; void decode_json(JSONObj *obj); bool operator==(const rgw_sync_pipe_acl_translation& aclt) const { return (owner == aclt.owner); } }; WRITE_CLASS_ENCODER(rgw_sync_pipe_acl_translation) struct rgw_sync_pipe_source_params { rgw_sync_pipe_filter filter; void encode(bufferlist& bl) const { ENCODE_START(1, 1, bl); encode(filter, bl); ENCODE_FINISH(bl); } void decode(bufferlist::const_iterator& bl) { DECODE_START(1, bl); decode(filter, bl); DECODE_FINISH(bl); } void dump(ceph::Formatter *f) const; void decode_json(JSONObj *obj); }; WRITE_CLASS_ENCODER(rgw_sync_pipe_source_params) struct rgw_sync_pipe_dest_params { std::optional<rgw_sync_pipe_acl_translation> acl_translation; std::optional<std::string> storage_class; void encode(bufferlist& bl) const { ENCODE_START(1, 1, bl); encode(acl_translation, bl); encode(storage_class, bl); ENCODE_FINISH(bl); } void decode(bufferlist::const_iterator& bl) { DECODE_START(1, bl); decode(acl_translation, bl); decode(storage_class, bl); DECODE_FINISH(bl); } void set_storage_class(const std::string& sc) { storage_class = sc; } void set_owner(const rgw_user& owner) { if (owner.empty()){ acl_translation.reset(); } else { acl_translation.emplace(); acl_translation->owner = owner; } } void dump(ceph::Formatter *f) const; void decode_json(JSONObj *obj); bool operator==(const rgw_sync_pipe_dest_params& rhs) const { return (acl_translation == rhs.acl_translation && storage_class == rhs.storage_class); } }; WRITE_CLASS_ENCODER(rgw_sync_pipe_dest_params) struct rgw_sync_pipe_params { rgw_sync_pipe_source_params source; rgw_sync_pipe_dest_params dest; enum Mode { MODE_SYSTEM = 0, MODE_USER = 1, } mode{MODE_SYSTEM}; int32_t priority{0}; rgw_user user; void encode(bufferlist& bl) const { ENCODE_START(1, 1, bl); encode(source, bl); encode(dest, bl); encode(priority, bl); encode((uint8_t)mode, bl); encode(user, bl); ENCODE_FINISH(bl); } void decode(bufferlist::const_iterator& bl) { DECODE_START(1, bl); decode(source, bl); decode(dest, bl); decode(priority, bl); uint8_t m; decode(m, bl); mode = (Mode)m; decode(user, bl); DECODE_FINISH(bl); } void dump(ceph::Formatter *f) const; void decode_json(JSONObj *obj); }; WRITE_CLASS_ENCODER(rgw_sync_pipe_params) struct rgw_sync_bucket_pipe { std::string id; rgw_sync_bucket_entity source; rgw_sync_bucket_entity dest; rgw_sync_pipe_params params; bool specific() const { return source.specific() && dest.specific(); } void encode(bufferlist& bl) const { ENCODE_START(1, 1, bl); encode(id, bl); encode(source, bl); encode(dest, bl); encode(params, bl); ENCODE_FINISH(bl); } void decode(bufferlist::const_iterator& bl) { DECODE_START(1, bl); decode(id, bl); decode(source, bl); decode(dest, bl); decode(params, bl); DECODE_FINISH(bl); } const bool operator<(const rgw_sync_bucket_pipe& p) const { if (id < p.id) { return true; } if (id >p.id) { return false; } if (source < p.source) { return true; } if (p.source < source) { return false; } return (dest < p.dest); } void dump(ceph::Formatter *f) const; void decode_json(JSONObj *obj); }; WRITE_CLASS_ENCODER(rgw_sync_bucket_pipe) struct rgw_sync_bucket_entities { std::optional<rgw_bucket> bucket; /* define specific bucket */ std::optional<std::set<rgw_zone_id> > zones; /* define specific zones, if not set then all zones */ bool all_zones{false}; void encode(bufferlist& bl) const { ENCODE_START(1, 1, bl); encode(bucket, bl); encode(zones, bl); encode(all_zones, bl); ENCODE_FINISH(bl); } void decode(bufferlist::const_iterator& bl) { DECODE_START(1, bl); decode(bucket, bl); decode(zones, bl); decode(all_zones, bl); DECODE_FINISH(bl); } void dump(ceph::Formatter *f) const; void decode_json(JSONObj *obj); bool match_bucket(std::optional<rgw_bucket> b) const { if (!b) { return true; } if (!bucket) { return true; } return (rgw_sync_bucket_entity::match_str(bucket->tenant, b->tenant) && rgw_sync_bucket_entity::match_str(bucket->name, b->name) && rgw_sync_bucket_entity::match_str(bucket->bucket_id, b->bucket_id)); } void add_zones(const std::vector<rgw_zone_id>& new_zones); void remove_zones(const std::vector<rgw_zone_id>& rm_zones); void set_bucket(std::optional<std::string> tenant, std::optional<std::string> bucket_name, std::optional<std::string> bucket_id); void remove_bucket(std::optional<std::string> tenant, std::optional<std::string> bucket_name, std::optional<std::string> bucket_id); bool match_zone(const rgw_zone_id& zone) const { if (!zones) { if (all_zones) { return true; } return false; } return (zones->find(zone) != zones->end()); } std::vector<rgw_sync_bucket_entity> expand() const; rgw_bucket get_bucket() const { return bucket.value_or(rgw_bucket()); } static std::string bucket_key(std::optional<rgw_bucket> b); void set_all_zones(bool state) { all_zones = state; if (all_zones) { zones.reset(); } } }; WRITE_CLASS_ENCODER(rgw_sync_bucket_entities) struct rgw_sync_bucket_pipes { std::string id; rgw_sync_bucket_entities source; rgw_sync_bucket_entities dest; rgw_sync_pipe_params params; void encode(bufferlist& bl) const { ENCODE_START(1, 1, bl); encode(id, bl); encode(source, bl); encode(dest, bl); encode(params, bl); ENCODE_FINISH(bl); } void decode(bufferlist::const_iterator& bl) { DECODE_START(1, bl); decode(id, bl); decode(source, bl); decode(dest, bl); decode(params, bl); DECODE_FINISH(bl); } bool match_source(const rgw_zone_id& zone, std::optional<rgw_bucket> b) const { return (source.match_zone(zone) && source.match_bucket(b)); } bool match_dest(const rgw_zone_id& zone, std::optional<rgw_bucket> b) const { return (dest.match_zone(zone) && dest.match_bucket(b)); } bool contains_zone_bucket(const rgw_zone_id& zone, std::optional<rgw_bucket> b) const { return (match_source(zone, b) || match_dest(zone, b)); } void dump(ceph::Formatter *f) const; void decode_json(JSONObj *obj); std::vector<rgw_sync_bucket_pipe> expand() const; void get_potential_related_buckets(const rgw_bucket& bucket, std::set<rgw_bucket> *sources, std::set<rgw_bucket> *dests) const; }; WRITE_CLASS_ENCODER(rgw_sync_bucket_pipes) std::ostream& operator<<(std::ostream& os, const rgw_sync_bucket_entity& e); std::ostream& operator<<(std::ostream& os, const rgw_sync_bucket_pipe& pipe); std::ostream& operator<<(std::ostream& os, const rgw_sync_bucket_entities& e); std::ostream& operator<<(std::ostream& os, const rgw_sync_bucket_pipes& pipe); /* * define data flow between zones. Symmetrical: zones sync from each other. * Directional: one zone fetches data from another. */ struct rgw_sync_data_flow_group { std::vector<rgw_sync_symmetric_group> symmetrical; std::vector<rgw_sync_directional_rule> directional; void encode(bufferlist& bl) const { ENCODE_START(1, 1, bl); encode(symmetrical, bl); encode(directional, bl); ENCODE_FINISH(bl); } void decode(bufferlist::const_iterator& bl) { DECODE_START(1, bl); decode(symmetrical, bl); decode(directional, bl); DECODE_FINISH(bl); } void dump(ceph::Formatter *f) const; void decode_json(JSONObj *obj); bool empty() const { return (symmetrical.empty() && directional.empty()); } bool find_or_create_symmetrical(const std::string& flow_id, rgw_sync_symmetric_group **flow_group); void remove_symmetrical(const std::string& flow_id, std::optional<std::vector<rgw_zone_id> > zones); bool find_or_create_directional(const rgw_zone_id& source_zone, const rgw_zone_id& dest_zone, rgw_sync_directional_rule **flow_group); void remove_directional(const rgw_zone_id& source_zone, const rgw_zone_id& dest_zone); void init_default(const std::set<rgw_zone_id>& zones); }; WRITE_CLASS_ENCODER(rgw_sync_data_flow_group) struct rgw_sync_policy_group { std::string id; rgw_sync_data_flow_group data_flow; /* override data flow, howver, will not be able to add new flows that don't exist at higher level */ std::vector<rgw_sync_bucket_pipes> pipes; /* if not defined then applies to all buckets (DR sync) */ enum Status { UNKNOWN = 0, /* ? */ FORBIDDEN = 1, /* sync not allowed */ ALLOWED = 2, /* sync allowed */ ENABLED = 3, /* sync should happen */ } status; void encode(bufferlist& bl) const { ENCODE_START(1, 1, bl); encode(id, bl); encode(data_flow, bl); encode(pipes, bl); encode((uint32_t)status, bl); ENCODE_FINISH(bl); } void decode(bufferlist::const_iterator& bl) { DECODE_START(1, bl); decode(id, bl); decode(data_flow, bl); decode(pipes, bl); uint32_t s; decode(s, bl); status = (Status)s; DECODE_FINISH(bl); } void dump(ceph::Formatter *f) const; void decode_json(JSONObj *obj); bool set_status(const std::string& s) { if (s == "forbidden") { status = rgw_sync_policy_group::Status::FORBIDDEN; } else if (s == "allowed") { status = rgw_sync_policy_group::Status::ALLOWED; } else if (s == "enabled") { status = rgw_sync_policy_group::Status::ENABLED; } else { status = rgw_sync_policy_group::Status::UNKNOWN; return false; } return true; } bool find_pipe(const std::string& pipe_id, bool create, rgw_sync_bucket_pipes **pipe); void remove_pipe(const std::string& pipe_id); void get_potential_related_buckets(const rgw_bucket& bucket, std::set<rgw_bucket> *sources, std::set<rgw_bucket> *dests) const; }; WRITE_CLASS_ENCODER(rgw_sync_policy_group) struct rgw_sync_policy_info { std::map<std::string, rgw_sync_policy_group> groups; void encode(bufferlist& bl) const { ENCODE_START(1, 1, bl); encode(groups, bl); ENCODE_FINISH(bl); } void decode(bufferlist::const_iterator& bl) { DECODE_START(1, bl); decode(groups, bl); DECODE_FINISH(bl); } void dump(ceph::Formatter *f) const; void decode_json(JSONObj *obj); bool empty() const { return groups.empty(); } void get_potential_related_buckets(const rgw_bucket& bucket, std::set<rgw_bucket> *sources, std::set<rgw_bucket> *dests) const; }; WRITE_CLASS_ENCODER(rgw_sync_policy_info)
17,549
24.695461
136
h
null
ceph-main/src/rgw/rgw_tag.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab ft=cpp #include <map> #include <string> #include <common/errno.h> #include <boost/algorithm/string.hpp> #include "rgw_tag.h" #include "rgw_common.h" using namespace std; void RGWObjTags::add_tag(const string& key, const string& val){ tag_map.emplace(std::make_pair(key,val)); } void RGWObjTags::emplace_tag(std::string&& key, std::string&& val){ tag_map.emplace(std::move(key), std::move(val)); } int RGWObjTags::check_and_add_tag(const string&key, const string& val){ if (tag_map.size() == max_obj_tags || key.size() > max_tag_key_size || val.size() > max_tag_val_size || key.size() == 0){ return -ERR_INVALID_TAG; } add_tag(key,val); return 0; } int RGWObjTags::set_from_string(const string& input){ if (input.empty()) { return 0; } int ret=0; vector <string> kvs; boost::split(kvs, input, boost::is_any_of("&")); for (const auto& kv: kvs){ auto p = kv.find("="); string key,val; if (p != string::npos) { ret = check_and_add_tag(url_decode(kv.substr(0,p)), url_decode(kv.substr(p+1))); } else { ret = check_and_add_tag(url_decode(kv)); } if (ret < 0) return ret; } return ret; } void RGWObjTags::dump(Formatter *f) const { f->open_object_section("tagset"); for (auto& tag: tag_map){ f->dump_string(tag.first.c_str(), tag.second); } f->close_section(); }
1,505
21.147059
71
cc
null
ceph-main/src/rgw/rgw_tag.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab ft=cpp #pragma once #include <string> #include <include/types.h> #include <map> class RGWObjTags { public: using tag_map_t = std::multimap <std::string, std::string>; protected: tag_map_t tag_map; uint32_t max_obj_tags{10}; static constexpr uint32_t max_tag_key_size{128}; static constexpr uint32_t max_tag_val_size{256}; public: RGWObjTags() = default; RGWObjTags(uint32_t max_obj_tags):max_obj_tags(max_obj_tags) {} void encode(bufferlist& bl) const { ENCODE_START(1,1,bl); encode(tag_map, bl); ENCODE_FINISH(bl); } void decode(bufferlist::const_iterator &bl) { DECODE_START_LEGACY_COMPAT_LEN(1, 1, 1, bl); decode(tag_map,bl); DECODE_FINISH(bl); } void dump(Formatter *f) const; void add_tag(const std::string& key, const std::string& val=""); void emplace_tag(std::string&& key, std::string&& val); int check_and_add_tag(const std::string& key, const std::string& val=""); size_t count() const {return tag_map.size();} int set_from_string(const std::string& input); void clear() { tag_map.clear(); } bool empty() const noexcept { return tag_map.empty(); } const tag_map_t& get_tags() const {return tag_map;} tag_map_t& get_tags() {return tag_map;} }; WRITE_CLASS_ENCODER(RGWObjTags)
1,360
26.22
75
h
null
ceph-main/src/rgw/rgw_tag_s3.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab ft=cpp #include <map> #include <string> #include <iostream> #include "include/types.h" #include "rgw_tag_s3.h" using namespace std; void RGWObjTagEntry_S3::decode_xml(XMLObj *obj) { RGWXMLDecoder::decode_xml("Key", key, obj, true); RGWXMLDecoder::decode_xml("Value", val, obj, true); } void RGWObjTagEntry_S3::dump_xml(Formatter *f) const { encode_xml("Key", key, f); encode_xml("Value", val, f); if (key.empty()) { throw RGWXMLDecoder::err("empty key"); } if (val.empty()) { throw RGWXMLDecoder::err("empty val"); } } void RGWObjTagSet_S3::decode_xml(XMLObj *obj) { vector<RGWObjTagEntry_S3> entries; bool mandatory{false}; RGWXMLDecoder::decode_xml("Tag", entries, obj, mandatory); for (auto& entry : entries) { const std::string& key = entry.get_key(); const std::string& val = entry.get_val(); add_tag(key,val); } } int RGWObjTagSet_S3::rebuild(RGWObjTags& dest) { int ret; for (const auto &it : tag_map){ ret = dest.check_and_add_tag(it.first, it.second); if (ret < 0) return ret; } return 0; } void RGWObjTagging_S3::decode_xml(XMLObj *obj) { RGWXMLDecoder::decode_xml("TagSet", tagset, obj, true); } void RGWObjTagSet_S3::dump_xml(Formatter *f) const { for (const auto& tag : tag_map){ Formatter::ObjectSection os(*f, "Tag"); encode_xml("Key", tag.first, f); encode_xml("Value", tag.second, f); } }
1,505
21.477612
70
cc
null
ceph-main/src/rgw/rgw_tag_s3.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab ft=cpp #pragma once #include <map> #include <string> #include <iostream> #include <include/types.h> #include <common/Formatter.h> #include <expat.h> #include "rgw_tag.h" #include "rgw_xml.h" class RGWObjTagEntry_S3 { std::string key; std::string val; public: RGWObjTagEntry_S3() {} RGWObjTagEntry_S3(const std::string &k, const std::string &v):key(k),val(v) {}; ~RGWObjTagEntry_S3() {} const std::string& get_key () const { return key; } const std::string& get_val () const { return val; } void dump_xml(Formatter *f) const; void decode_xml(XMLObj *obj); }; class RGWObjTagSet_S3: public RGWObjTags { public: int rebuild(RGWObjTags& dest); void dump_xml(Formatter *f) const; void decode_xml(XMLObj *obj); }; class RGWObjTagging_S3 { RGWObjTagSet_S3 tagset; public: void decode_xml(XMLObj *obj); int rebuild(RGWObjTags& dest) { return tagset.rebuild(dest); } };
1,004
19.1
81
h
null
ceph-main/src/rgw/rgw_tar.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab ft=cpp #pragma once #include <algorithm> #include <array> #include <cstring> #include <string_view> #include <tuple> #include <utility> #include <boost/optional.hpp> #include <boost/range/adaptor/reversed.hpp> namespace rgw { namespace tar { static constexpr size_t BLOCK_SIZE = 512; static inline std::pair<class StatusIndicator, boost::optional<class HeaderView>> interpret_block(const StatusIndicator& status, ceph::bufferlist& bl); class StatusIndicator { friend std::pair<class StatusIndicator, boost::optional<class HeaderView>> interpret_block(const StatusIndicator& status, ceph::bufferlist& bl); bool is_empty; bool is_eof; StatusIndicator() : is_empty(false), is_eof(false) { } StatusIndicator(const StatusIndicator& prev_status, const bool is_empty) : is_empty(is_empty), is_eof(is_empty && prev_status.empty()) { } public: bool empty() const { return is_empty; } bool eof() const { return is_eof; } static StatusIndicator create() { return StatusIndicator(); } } /* class StatusIndicator */; enum class FileType : char { UNKNOWN = '\0', /* The tar format uses ASCII encoding. */ NORMAL_FILE = '0', DIRECTORY = '5' }; /* enum class FileType */ class HeaderView { protected: /* Everything is char here (ASCII encoding), so we don't need to worry about * the struct padding. */ const struct header_t { char filename[100]; char __filemode[8]; char __owner_id[8]; char __group_id[8]; char filesize[12]; char lastmod[12]; char checksum[8]; char filetype; char __padding[355]; } *header; static_assert(sizeof(*header) == BLOCK_SIZE, "The TAR header must be exactly BLOCK_SIZE length"); /* The label is far more important from what the code really does. */ static size_t pos2len(const size_t pos) { return pos + 1; } public: explicit HeaderView(const char (&header)[BLOCK_SIZE]) : header(reinterpret_cast<const header_t*>(header)) { } FileType get_filetype() const { switch (header->filetype) { case static_cast<char>(FileType::NORMAL_FILE): return FileType::NORMAL_FILE; case static_cast<char>(FileType::DIRECTORY): return FileType::DIRECTORY; default: return FileType::UNKNOWN; } } std::string_view get_filename() const { return std::string_view(header->filename, std::min(sizeof(header->filename), strlen(header->filename))); } size_t get_filesize() const { /* The string_ref is pretty suitable here because tar encodes its * metadata in ASCII. */ const std::string_view raw(header->filesize, sizeof(header->filesize)); /* We need to find where the padding ends. */ const auto pad_ends_at = std::min(raw.find_last_not_of('\0'), raw.find_last_not_of(' ')); const auto trimmed = raw.substr(0, pad_ends_at == std::string_view::npos ? std::string_view::npos : pos2len(pad_ends_at)); size_t sum = 0, mul = 1; for (const char c : boost::adaptors::reverse(trimmed)) { sum += (c - '0') * mul; mul *= 8; } return sum; } }; /* class Header */ static inline std::pair<StatusIndicator, boost::optional<HeaderView>> interpret_block(const StatusIndicator& status, ceph::bufferlist& bl) { static constexpr std::array<char, BLOCK_SIZE> zero_block = {0, }; const char (&block)[BLOCK_SIZE] = \ reinterpret_cast<const char (&)[BLOCK_SIZE]>(*bl.c_str()); if (std::memcmp(zero_block.data(), block, BLOCK_SIZE) == 0) { return std::make_pair(StatusIndicator(status, true), boost::none); } else { return std::make_pair(StatusIndicator(status, false), HeaderView(block)); } } } /* namespace tar */ } /* namespace rgw */
4,059
25.363636
78
h
null
ceph-main/src/rgw/rgw_token.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab ft=cpp /* * Ceph - scalable distributed file system * * Copyright (C) 2016 Red Hat, Inc. * * This is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License version 2.1, as published by the Free Software * Foundation. See file COPYING. * */ #include <errno.h> #include <iostream> #include <sstream> #include <string> #include "common/config.h" #include "common/ceph_argparse.h" #include "common/debug.h" #include "global/global_init.h" #include "include/ceph_assert.h" #include "include/str_list.h" #include "rgw_token.h" #include "rgw_b64.h" #define dout_subsys ceph_subsys_rgw namespace { using namespace rgw; using std::get; using std::string; RGWToken::token_type type{RGWToken::TOKEN_NONE}; string access_key{""}; string secret_key{""}; Formatter* token_formatter{nullptr}; bool verbose {false}; bool do_encode {false}; bool do_decode {false}; } using namespace std; void usage() { cout << "usage: radosgw-token --encode --ttype=<token type> [options...]" << std::endl; cout << "\t(maybe exporting RGW_ACCESS_KEY_ID and RGW_SECRET_ACCESS_KEY)" << std::endl; cout << "\t <token type> := ad | ldap" << std::endl; cout << "\n"; generic_client_usage(); } int main(int argc, char **argv) { auto args = argv_to_vec(argc, argv); std::string val; if (args.empty()) { cerr << argv[0] << ": -h or --help for usage" << std::endl; exit(1); } if (ceph_argparse_need_usage(args)) { usage(); exit(0); } auto cct = global_init(nullptr, args, CEPH_ENTITY_TYPE_CLIENT, CODE_ENVIRONMENT_UTILITY, 0); common_init_finish(g_ceph_context); char *v{nullptr}; v = getenv("RGW_ACCESS_KEY_ID"); if (v) { access_key = v; } v = getenv("RGW_SECRET_ACCESS_KEY"); if (v) { secret_key = v; } for (auto arg_iter = args.begin(); arg_iter != args.end();) { if (ceph_argparse_witharg(args, arg_iter, &val, "--access", (char*) nullptr)) { access_key = val; } else if (ceph_argparse_witharg(args, arg_iter, &val, "--secret", (char*) nullptr)) { secret_key = val; } else if (ceph_argparse_witharg(args, arg_iter, &val, "--ttype", (char*) nullptr)) { for (const auto& ttype : {"ad", "ldap"}) { if (boost::iequals(val, ttype)) { type = RGWToken::to_type(val); break; } } } else if (ceph_argparse_flag(args, arg_iter, "--encode", (char*) nullptr)) { do_encode = true; } else if (ceph_argparse_flag(args, arg_iter, "--decode", (char*) nullptr)) { do_decode = true; } else if (ceph_argparse_flag(args, arg_iter, "--verbose", (char*) nullptr)) { verbose = true; } else { ++arg_iter; } } if ((! do_encode) || (type == RGWToken::TOKEN_NONE)) { return -EINVAL; } token_formatter = new JSONFormatter(true /* pretty */); RGWToken token(type, access_key, secret_key); if (do_encode) { token.encode_json(token_formatter); std::ostringstream os; token_formatter->flush(os); string token_str = os.str(); if (verbose) { std::cout << "expanded token: " << token_str << std::endl; if (do_decode) { RGWToken token2(token_str); std::cout << "decoded expanded token: " << token2 << std::endl; } } std::cout << to_base64(token_str) << std::endl; } return 0; }
3,512
23.227586
89
cc
null
ceph-main/src/rgw/rgw_token.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab ft=cpp /* * Ceph - scalable distributed file system * * Copyright (C) 2016 Red Hat, Inc * * This is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License version 2.1, as published by the Free Software * Foundation. See file COPYING. * */ #pragma once #include <stdint.h> #include <boost/algorithm/string.hpp> #include <sstream> #include "common/ceph_json.h" #include "common/Formatter.h" #include "rgw/rgw_b64.h" namespace rgw { using std::string; class RGWToken { public: static constexpr auto type_name = "RGW_TOKEN"; enum token_type : uint32_t { TOKEN_NONE, TOKEN_AD, TOKEN_KEYSTONE, TOKEN_LDAP, }; static enum token_type to_type(const string& s) { if (boost::iequals(s, "ad")) return TOKEN_AD; if (boost::iequals(s, "ldap")) return TOKEN_LDAP; if (boost::iequals(s, "keystone")) return TOKEN_KEYSTONE; return TOKEN_NONE; } static const char* from_type(enum token_type type) { switch (type) { case TOKEN_AD: return "ad"; case TOKEN_LDAP: return "ldap"; case TOKEN_KEYSTONE: return "keystone"; default: return "none"; }; } token_type type; string id; string key; virtual uint32_t version() const { return 1; }; bool valid() const{ return ((type != TOKEN_NONE) && (! id.empty()) && (! key.empty())); } RGWToken() : type(TOKEN_NONE) {}; RGWToken(enum token_type _type, const std::string& _id, const std::string& _key) : type(_type), id(_id), key(_key) {}; explicit RGWToken(const string& json) { JSONParser p; p.parse(json.c_str(), json.length()); JSONDecoder::decode_json(RGWToken::type_name, *this, &p); } RGWToken& operator=(const std::string& json) { JSONParser p; p.parse(json.c_str(), json.length()); JSONDecoder::decode_json(RGWToken::type_name, *this, &p); return *this; } void encode(bufferlist& bl) const { uint32_t ver = version(); string typestr{from_type(type)}; ENCODE_START(1, 1, bl); encode(type_name, bl); encode(ver, bl); encode(typestr, bl); encode(id, bl); encode(key, bl); ENCODE_FINISH(bl); } void decode(bufferlist::const_iterator& bl) { string name; string typestr; uint32_t version; DECODE_START(1, bl); decode(name, bl); decode(version, bl); decode(typestr, bl); type = to_type(typestr); decode(id, bl); decode(key, bl); DECODE_FINISH(bl); } void dump(Formatter* f) const { ::encode_json("version", uint32_t(version()), f); ::encode_json("type", from_type(type), f); ::encode_json("id", id, f); ::encode_json("key", key, f); } void encode_json(Formatter* f) { RGWToken& token = *this; f->open_object_section(type_name); ::encode_json(type_name, token, f); f->close_section(); } void decode_json(JSONObj* obj) { uint32_t version; string type_name; string typestr; JSONDecoder::decode_json("version", version, obj); JSONDecoder::decode_json("type", typestr, obj); type = to_type(typestr); JSONDecoder::decode_json("id", id, obj); JSONDecoder::decode_json("key", key, obj); } std::string encode_json_base64(Formatter* f) { encode_json(f); std::ostringstream os; f->flush(os); return to_base64(std::move(os.str())); } friend inline std::ostream& operator<<(std::ostream& os, const RGWToken& token); virtual ~RGWToken() {}; }; WRITE_CLASS_ENCODER(RGWToken) inline std::ostream& operator<<(std::ostream& os, const RGWToken& token) { os << "<<RGWToken" << " type=" << RGWToken::from_type(token.type) << " id=" << token.id << " key=" << token.key << ">>"; return os; } } /* namespace rgw */
4,088
22.912281
84
h
null
ceph-main/src/rgw/rgw_tools.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab ft=cpp #include <errno.h> #include "common/errno.h" #include "rgw_tools.h" #define dout_subsys ceph_subsys_rgw #define dout_context g_ceph_context #define READ_CHUNK_LEN (512 * 1024) using namespace std; static std::map<std::string, std::string>* ext_mime_map; void parse_mime_map_line(const char *start, const char *end) { char line[end - start + 1]; strncpy(line, start, end - start); line[end - start] = '\0'; char *l = line; #define DELIMS " \t\n\r" while (isspace(*l)) l++; char *mime = strsep(&l, DELIMS); if (!mime) return; char *ext; do { ext = strsep(&l, DELIMS); if (ext && *ext) { (*ext_mime_map)[ext] = mime; } } while (ext); } void parse_mime_map(const char *buf) { const char *start = buf, *end = buf; while (*end) { while (*end && *end != '\n') { end++; } parse_mime_map_line(start, end); end++; start = end; } } static int ext_mime_map_init(const DoutPrefixProvider *dpp, CephContext *cct, const char *ext_map) { int fd = open(ext_map, O_RDONLY); char *buf = NULL; int ret; if (fd < 0) { ret = -errno; ldpp_dout(dpp, 0) << __func__ << " failed to open file=" << ext_map << " : " << cpp_strerror(-ret) << dendl; return ret; } struct stat st; ret = fstat(fd, &st); if (ret < 0) { ret = -errno; ldpp_dout(dpp, 0) << __func__ << " failed to stat file=" << ext_map << " : " << cpp_strerror(-ret) << dendl; goto done; } buf = (char *)malloc(st.st_size + 1); if (!buf) { ret = -ENOMEM; ldpp_dout(dpp, 0) << __func__ << " failed to allocate buf" << dendl; goto done; } ret = safe_read(fd, buf, st.st_size + 1); if (ret != st.st_size) { // huh? file size has changed? ldpp_dout(dpp, 0) << __func__ << " raced! will retry.." << dendl; free(buf); close(fd); return ext_mime_map_init(dpp, cct, ext_map); } buf[st.st_size] = '\0'; parse_mime_map(buf); ret = 0; done: free(buf); close(fd); return ret; } const char *rgw_find_mime_by_ext(string& ext) { map<string, string>::iterator iter = ext_mime_map->find(ext); if (iter == ext_mime_map->end()) return NULL; return iter->second.c_str(); } int rgw_tools_init(const DoutPrefixProvider *dpp, CephContext *cct) { ext_mime_map = new std::map<std::string, std::string>; ext_mime_map_init(dpp, cct, cct->_conf->rgw_mime_types_file.c_str()); // ignore errors; missing mime.types is not fatal return 0; } void rgw_tools_cleanup() { delete ext_mime_map; ext_mime_map = nullptr; }
2,678
20.432
98
cc
null
ceph-main/src/rgw/rgw_torrent.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab ft=cpp #include "rgw_torrent.h" #include <ctime> #include <fmt/format.h> #include "common/split.h" #include "rgw_sal.h" #define ANNOUNCE "announce" #define ANNOUNCE_LIST "announce-list" #define COMMENT "comment" #define CREATED_BY "created by" #define CREATION_DATE "creation date" #define ENCODING "encoding" #define LENGTH "length" #define NAME "name" #define PIECE_LENGTH "piece length" #define PIECES "pieces" #define INFO_PIECES "info" //control characters void bencode_dict(bufferlist& bl) { bl.append('d'); } void bencode_list(bufferlist& bl) { bl.append('l'); } void bencode_end(bufferlist& bl) { bl.append('e'); } //key len void bencode_key(std::string_view key, bufferlist& bl) { bl.append(fmt::format("{}:", key.size())); bl.append(key); } //single values void bencode(int value, bufferlist& bl) { bl.append(fmt::format("i{}", value)); bencode_end(bl); } //single values void bencode(std::string_view str, bufferlist& bl) { bencode_key(str, bl); } //dictionary elements void bencode(std::string_view key, int value, bufferlist& bl) { bencode_key(key, bl); bencode(value, bl); } //dictionary elements void bencode(std::string_view key, std::string_view value, bufferlist& bl) { bencode_key(key, bl); bencode(value, bl); } int rgw_read_torrent_file(const DoutPrefixProvider* dpp, rgw::sal::Object* object, ceph::bufferlist &bl, optional_yield y) { bufferlist infobl; int r = object->get_torrent_info(dpp, y, infobl); if (r < 0) { ldpp_dout(dpp, 0) << "ERROR: read_torrent_info failed: " << r << dendl; return r; } // add other fields from config auto& conf = dpp->get_cct()->_conf; bencode_dict(bl); auto trackers = ceph::split(conf->rgw_torrent_tracker, ","); if (auto i = trackers.begin(); i != trackers.end()) { bencode_key(ANNOUNCE, bl); bencode_key(*i, bl); bencode_key(ANNOUNCE_LIST, bl); bencode_list(bl); for (; i != trackers.end(); ++i) { bencode_list(bl); bencode_key(*i, bl); bencode_end(bl); } bencode_end(bl); } std::string_view comment = conf->rgw_torrent_comment; if (!comment.empty()) { bencode(COMMENT, comment, bl); } std::string_view create_by = conf->rgw_torrent_createby; if (!create_by.empty()) { bencode(CREATED_BY, create_by, bl); } std::string_view encoding = conf->rgw_torrent_encoding; if (!encoding.empty()) { bencode(ENCODING, encoding, bl); } // append the info stored in the object bl.append(std::move(infobl)); return 0; } RGWPutObj_Torrent::RGWPutObj_Torrent(rgw::sal::DataProcessor* next, size_t max_len, size_t piece_len) : Pipe(next), max_len(max_len), piece_len(piece_len) { } int RGWPutObj_Torrent::process(bufferlist&& data, uint64_t logical_offset) { if (!data.length()) { // done if (piece_offset) { // hash the remainder char out[ceph::crypto::SHA1::digest_size]; digest.Final(reinterpret_cast<unsigned char*>(out)); piece_hashes.append(out, sizeof(out)); piece_count++; } return Pipe::process(std::move(data), logical_offset); } len += data.length(); if (len >= max_len) { // enforce the maximum object size; stop calculating and buffering hashes piece_hashes.clear(); piece_offset = 0; piece_count = 0; return Pipe::process(std::move(data), logical_offset); } auto p = data.begin(); while (!p.end()) { // feed each buffer segment through sha1 uint32_t want = piece_len - piece_offset; const char* buf = nullptr; size_t bytes = p.get_ptr_and_advance(want, &buf); digest.Update(reinterpret_cast<const unsigned char*>(buf), bytes); piece_offset += bytes; // record the hash digest at each piece boundary if (bytes == want) { char out[ceph::crypto::SHA1::digest_size]; digest.Final(reinterpret_cast<unsigned char*>(out)); digest.Restart(); piece_hashes.append(out, sizeof(out)); piece_count++; piece_offset = 0; } } return Pipe::process(std::move(data), logical_offset); } bufferlist RGWPutObj_Torrent::bencode_torrent(std::string_view filename) const { bufferlist bl; if (len >= max_len) { return bl; } // Only encode create_date and sha1 info. Other fields will be added during // GetObjectTorrent by rgw_read_torrent_file() // issue tracked here: https://tracker.ceph.com/issues/61160 // coverity[store_truncates_time_t:SUPPRESS] bencode(CREATION_DATE, std::time(nullptr), bl); bencode_key(INFO_PIECES, bl); bencode_dict(bl); bencode(LENGTH, len, bl); bencode(NAME, filename, bl); bencode(PIECE_LENGTH, piece_len, bl); bencode_key(PIECES, bl); bl.append(std::to_string(piece_count)); bl.append(':'); bl.append(piece_hashes); bencode_end(bl); return bl; }
5,066
25.668421
78
cc
null
ceph-main/src/rgw/rgw_torrent.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab ft=cpp #pragma once #include "common/ceph_crypto.h" #include "common/dout.h" #include "common/async/yield_context.h" #include "rgw_putobj.h" #include "rgw_sal_fwd.h" //control characters void bencode_dict(bufferlist& bl); void bencode_list(bufferlist& bl); void bencode_end(bufferlist& bl); //key len void bencode_key(std::string_view key, bufferlist& bl); //single values void bencode(int value, bufferlist& bl); //single values void bencode(std::string_view str, bufferlist& bl); //dictionary elements void bencode(std::string_view key, int value, bufferlist& bl); //dictionary elements void bencode(std::string_view key, std::string_view value, bufferlist& bl); // read the bencoded torrent file from the given object int rgw_read_torrent_file(const DoutPrefixProvider* dpp, rgw::sal::Object* object, ceph::bufferlist &bl, optional_yield y); // PutObj filter that builds a torrent file during upload class RGWPutObj_Torrent : public rgw::putobj::Pipe { size_t max_len = 0; size_t piece_len = 0; bufferlist piece_hashes; size_t len = 0; size_t piece_offset = 0; uint32_t piece_count = 0; ceph::crypto::SHA1 digest; public: RGWPutObj_Torrent(rgw::sal::DataProcessor* next, size_t max_len, size_t piece_len); int process(bufferlist&& data, uint64_t logical_offset) override; // after processing is complete, return the bencoded torrent file bufferlist bencode_torrent(std::string_view filename) const; };
1,639
26.79661
75
h
null
ceph-main/src/rgw/rgw_tracer.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include <string> #include "rgw_tracer.h" namespace tracing { namespace rgw { tracing::Tracer tracer; } // namespace rgw } // namespace tracing
246
16.642857
70
cc
null
ceph-main/src/rgw/rgw_tracer.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #pragma once #include "common/tracer.h" #include "rgw_common.h" namespace tracing { namespace rgw { const auto OP = "op"; const auto BUCKET_NAME = "bucket_name"; const auto USER_ID = "user_id"; const auto OBJECT_NAME = "object_name"; const auto RETURN = "return"; const auto UPLOAD_ID = "upload_id"; const auto TYPE = "type"; const auto REQUEST = "request"; const auto MULTIPART = "multipart_upload "; extern tracing::Tracer tracer; } // namespace rgw } // namespace tracing static inline void extract_span_context(const rgw::sal::Attrs& attr, jspan_context& span_ctx) { auto trace_iter = attr.find(RGW_ATTR_TRACE); if (trace_iter != attr.end()) { try { auto trace_bl_iter = trace_iter->second.cbegin(); tracing::decode(span_ctx, trace_bl_iter); } catch (buffer::error& err) {} } }
912
25.085714
95
h
null
ceph-main/src/rgw/rgw_url.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include <string> #include <regex> namespace rgw { namespace { const auto USER_GROUP_IDX = 3; const auto PASSWORD_GROUP_IDX = 4; const auto HOST_GROUP_IDX = 5; const std::string schema_re = "([[:alpha:]]+:\\/\\/)"; const std::string user_pass_re = "(([^:\\s]+):([^@\\s]+)@)?"; const std::string host_port_re = "([[:alnum:].:-]+)"; const std::string path_re = "(/[[:print:]]*)?"; } bool parse_url_authority(const std::string& url, std::string& host, std::string& user, std::string& password) { const std::string re = schema_re + user_pass_re + host_port_re + path_re; const std::regex url_regex(re, std::regex::icase); std::smatch url_match_result; if (std::regex_match(url, url_match_result, url_regex)) { host = url_match_result[HOST_GROUP_IDX]; user = url_match_result[USER_GROUP_IDX]; password = url_match_result[PASSWORD_GROUP_IDX]; return true; } return false; } bool parse_url_userinfo(const std::string& url, std::string& user, std::string& password) { const std::string re = schema_re + user_pass_re + host_port_re + path_re; const std::regex url_regex(re); std::smatch url_match_result; if (std::regex_match(url, url_match_result, url_regex)) { user = url_match_result[USER_GROUP_IDX]; password = url_match_result[PASSWORD_GROUP_IDX]; return true; } return false; } }
1,452
28.06
111
cc
null
ceph-main/src/rgw/rgw_url.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #pragma once #include <string> namespace rgw { // parse a URL of the form: http|https|amqp|amqps|kafka://[user:password@]<host>[:port] bool parse_url_authority(const std::string& url, std::string& host, std::string& user, std::string& password); bool parse_url_userinfo(const std::string& url, std::string& user, std::string& password); }
440
32.923077
110
h
null
ceph-main/src/rgw/rgw_usage.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab ft=cpp #include <string> #include <map> #include "rgw_rados.h" #include "rgw_usage.h" #include "rgw_formats.h" #include "rgw_sal.h" using namespace std; static void dump_usage_categories_info(Formatter *formatter, const rgw_usage_log_entry& entry, map<string, bool> *categories) { formatter->open_array_section("categories"); map<string, rgw_usage_data>::const_iterator uiter; for (uiter = entry.usage_map.begin(); uiter != entry.usage_map.end(); ++uiter) { if (categories && !categories->empty() && !categories->count(uiter->first)) continue; const rgw_usage_data& usage = uiter->second; formatter->open_object_section("entry"); formatter->dump_string("category", uiter->first); formatter->dump_unsigned("bytes_sent", usage.bytes_sent); formatter->dump_unsigned("bytes_received", usage.bytes_received); formatter->dump_unsigned("ops", usage.ops); formatter->dump_unsigned("successful_ops", usage.successful_ops); formatter->close_section(); // entry } formatter->close_section(); // categories } int RGWUsage::show(const DoutPrefixProvider *dpp, rgw::sal::Driver* driver, rgw::sal::User* user , rgw::sal::Bucket* bucket, uint64_t start_epoch, uint64_t end_epoch, bool show_log_entries, bool show_log_sum, map<string, bool> *categories, RGWFormatterFlusher& flusher) { uint32_t max_entries = 1000; bool is_truncated = true; RGWUsageIter usage_iter; Formatter *formatter = flusher.get_formatter(); map<rgw_user_bucket, rgw_usage_log_entry> usage; flusher.start(0); formatter->open_object_section("usage"); if (show_log_entries) { formatter->open_array_section("entries"); } string last_owner; bool user_section_open = false; map<string, rgw_usage_log_entry> summary_map; int ret; while (is_truncated) { if (bucket) { ret = bucket->read_usage(dpp, start_epoch, end_epoch, max_entries, &is_truncated, usage_iter, usage); } else if (user) { ret = user->read_usage(dpp, start_epoch, end_epoch, max_entries, &is_truncated, usage_iter, usage); } else { ret = driver->read_all_usage(dpp, start_epoch, end_epoch, max_entries, &is_truncated, usage_iter, usage); } if (ret == -ENOENT) { ret = 0; is_truncated = false; } if (ret < 0) { return ret; } map<rgw_user_bucket, rgw_usage_log_entry>::iterator iter; for (iter = usage.begin(); iter != usage.end(); ++iter) { const rgw_user_bucket& ub = iter->first; const rgw_usage_log_entry& entry = iter->second; if (show_log_entries) { if (ub.user.compare(last_owner) != 0) { if (user_section_open) { formatter->close_section(); formatter->close_section(); } formatter->open_object_section("user"); formatter->dump_string("user", ub.user); formatter->open_array_section("buckets"); user_section_open = true; last_owner = ub.user; } formatter->open_object_section("bucket"); formatter->dump_string("bucket", ub.bucket); utime_t ut(entry.epoch, 0); ut.gmtime(formatter->dump_stream("time")); formatter->dump_int("epoch", entry.epoch); string owner = entry.owner.to_str(); string payer = entry.payer.to_str(); formatter->dump_string("owner", owner); if (!payer.empty() && payer != owner) { formatter->dump_string("payer", payer); } dump_usage_categories_info(formatter, entry, categories); formatter->close_section(); // bucket flusher.flush(); } summary_map[ub.user].aggregate(entry, categories); } } if (show_log_entries) { if (user_section_open) { formatter->close_section(); // buckets formatter->close_section(); //user } formatter->close_section(); // entries } if (show_log_sum) { formatter->open_array_section("summary"); map<string, rgw_usage_log_entry>::iterator siter; for (siter = summary_map.begin(); siter != summary_map.end(); ++siter) { const rgw_usage_log_entry& entry = siter->second; formatter->open_object_section("user"); formatter->dump_string("user", siter->first); dump_usage_categories_info(formatter, entry, categories); rgw_usage_data total_usage; entry.sum(total_usage, *categories); formatter->open_object_section("total"); encode_json("bytes_sent", total_usage.bytes_sent, formatter); encode_json("bytes_received", total_usage.bytes_received, formatter); encode_json("ops", total_usage.ops, formatter); encode_json("successful_ops", total_usage.successful_ops, formatter); formatter->close_section(); // total formatter->close_section(); // user flusher.flush(); } formatter->close_section(); // summary } formatter->close_section(); // usage flusher.flush(); return 0; } int RGWUsage::trim(const DoutPrefixProvider *dpp, rgw::sal::Driver* driver, rgw::sal::User* user , rgw::sal::Bucket* bucket, uint64_t start_epoch, uint64_t end_epoch, optional_yield y) { if (bucket) { return bucket->trim_usage(dpp, start_epoch, end_epoch, y); } else if (user) { return user->trim_usage(dpp, start_epoch, end_epoch, y); } else { return driver->trim_all_usage(dpp, start_epoch, end_epoch, y); } } int RGWUsage::clear(const DoutPrefixProvider *dpp, rgw::sal::Driver* driver, optional_yield y) { return driver->clear_usage(dpp, y); }
5,637
31.77907
125
cc
null
ceph-main/src/rgw/rgw_usage.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab ft=cpp #pragma once #include <string> #include <map> #include "common/Formatter.h" #include "common/dout.h" #include "rgw_formats.h" #include "rgw_user.h" #include "rgw_sal_fwd.h" class RGWUsage { public: static int show(const DoutPrefixProvider *dpp, rgw::sal::Driver* driver, rgw::sal::User* user , rgw::sal::Bucket* bucket, uint64_t start_epoch, uint64_t end_epoch, bool show_log_entries, bool show_log_sum, std::map<std::string, bool> *categories, RGWFormatterFlusher& flusher); static int trim(const DoutPrefixProvider *dpp, rgw::sal::Driver* driver, rgw::sal::User* user , rgw::sal::Bucket* bucket, uint64_t start_epoch, uint64_t end_epoch, optional_yield y); static int clear(const DoutPrefixProvider *dpp, rgw::sal::Driver* driver, optional_yield y); };
897
27.967742
94
h
null
ceph-main/src/rgw/rgw_user.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab ft=cpp #include "rgw_sal_rados.h" #include "include/types.h" #include "rgw_user.h" // until everything is moved from rgw_common #include "rgw_common.h" #define dout_subsys ceph_subsys_rgw using namespace std; int rgw_user_sync_all_stats(const DoutPrefixProvider *dpp, rgw::sal::Driver* driver, rgw::sal::User* user, optional_yield y) { rgw::sal::BucketList user_buckets; CephContext *cct = driver->ctx(); size_t max_entries = cct->_conf->rgw_list_buckets_max_chunk; string marker; int ret; do { ret = user->list_buckets(dpp, marker, string(), max_entries, false, user_buckets, y); if (ret < 0) { ldpp_dout(dpp, 0) << "failed to read user buckets: ret=" << ret << dendl; return ret; } auto& buckets = user_buckets.get_buckets(); for (auto i = buckets.begin(); i != buckets.end(); ++i) { marker = i->first; auto& bucket = i->second; ret = bucket->load_bucket(dpp, y); if (ret < 0) { ldpp_dout(dpp, 0) << "ERROR: could not read bucket info: bucket=" << bucket << " ret=" << ret << dendl; continue; } ret = bucket->sync_user_stats(dpp, y); if (ret < 0) { ldout(cct, 0) << "ERROR: could not sync bucket stats: ret=" << ret << dendl; return ret; } ret = bucket->check_bucket_shards(dpp, y); if (ret < 0) { ldpp_dout(dpp, 0) << "ERROR in check_bucket_shards: " << cpp_strerror(-ret)<< dendl; } } } while (user_buckets.is_truncated()); ret = user->complete_flush_stats(dpp, y); if (ret < 0) { cerr << "ERROR: failed to complete syncing user stats: ret=" << ret << std::endl; return ret; } return 0; } int rgw_user_get_all_buckets_stats(const DoutPrefixProvider *dpp, rgw::sal::Driver* driver, rgw::sal::User* user, map<string, bucket_meta_entry>& buckets_usage_map, optional_yield y) { CephContext *cct = driver->ctx(); size_t max_entries = cct->_conf->rgw_list_buckets_max_chunk; bool done; string marker; int ret; do { rgw::sal::BucketList buckets; ret = user->list_buckets(dpp, marker, string(), max_entries, false, buckets, y); if (ret < 0) { ldpp_dout(dpp, 0) << "failed to read user buckets: ret=" << ret << dendl; return ret; } auto& m = buckets.get_buckets(); for (const auto& i : m) { marker = i.first; auto& bucket_ent = i.second; ret = bucket_ent->load_bucket(dpp, y, true /* load user stats */); if (ret < 0) { ldpp_dout(dpp, 0) << "ERROR: could not get bucket stats: ret=" << ret << dendl; return ret; } bucket_meta_entry entry; entry.size = bucket_ent->get_size(); entry.size_rounded = bucket_ent->get_size_rounded(); entry.creation_time = bucket_ent->get_creation_time(); entry.count = bucket_ent->get_count(); buckets_usage_map.emplace(bucket_ent->get_name(), entry); } done = (buckets.count() < max_entries); } while (!done); return 0; } int rgw_validate_tenant_name(const string& t) { struct tench { static bool is_good(char ch) { return isalnum(ch) || ch == '_'; } }; std::string::const_iterator it = std::find_if_not(t.begin(), t.end(), tench::is_good); return (it == t.end())? 0: -ERR_INVALID_TENANT_NAME; } /** * Get the anonymous (ie, unauthenticated) user info. */ void rgw_get_anon_user(RGWUserInfo& info) { info.user_id = RGW_USER_ANON_ID; info.display_name.clear(); info.access_keys.clear(); }
3,607
27.1875
111
cc
null
ceph-main/src/rgw/rgw_user_types.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab ft=cpp /* * Ceph - scalable distributed file system * * Copyright (C) 2019 Red Hat, Inc. * * This is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License version 2.1, as published by the Free Software * Foundation. See file COPYING. * */ /* N.B., this header defines fundamental serialized types. Do not * include files which can only be compiled in radosgw or OSD * contexts (e.g., rgw_sal.h, rgw_common.h) */ #pragma once #include <string_view> #include <fmt/format.h> #include "common/dout.h" #include "common/Formatter.h" struct rgw_user { std::string tenant; std::string id; std::string ns; rgw_user() {} explicit rgw_user(const std::string& s) { from_str(s); } rgw_user(const std::string& tenant, const std::string& id, const std::string& ns="") : tenant(tenant), id(id), ns(ns) { } rgw_user(std::string&& tenant, std::string&& id, std::string&& ns="") : tenant(std::move(tenant)), id(std::move(id)), ns(std::move(ns)) { } void encode(ceph::buffer::list& bl) const { ENCODE_START(2, 1, bl); encode(tenant, bl); encode(id, bl); encode(ns, bl); ENCODE_FINISH(bl); } void decode(ceph::buffer::list::const_iterator& bl) { DECODE_START(2, bl); decode(tenant, bl); decode(id, bl); if (struct_v >= 2) { decode(ns, bl); } DECODE_FINISH(bl); } void to_str(std::string& str) const { if (!tenant.empty()) { if (!ns.empty()) { str = tenant + '$' + ns + '$' + id; } else { str = tenant + '$' + id; } } else if (!ns.empty()) { str = '$' + ns + '$' + id; } else { str = id; } } void clear() { tenant.clear(); id.clear(); ns.clear(); } bool empty() const { return id.empty(); } std::string to_str() const { std::string s; to_str(s); return s; } void from_str(const std::string& str) { size_t pos = str.find('$'); if (pos != std::string::npos) { tenant = str.substr(0, pos); std::string_view sv = str; std::string_view ns_id = sv.substr(pos + 1); size_t ns_pos = ns_id.find('$'); if (ns_pos != std::string::npos) { ns = std::string(ns_id.substr(0, ns_pos)); id = std::string(ns_id.substr(ns_pos + 1)); } else { ns.clear(); id = std::string(ns_id); } } else { tenant.clear(); ns.clear(); id = str; } } rgw_user& operator=(const std::string& str) { from_str(str); return *this; } int compare(const rgw_user& u) const { int r = tenant.compare(u.tenant); if (r != 0) return r; r = ns.compare(u.ns); if (r != 0) { return r; } return id.compare(u.id); } int compare(const std::string& str) const { rgw_user u(str); return compare(u); } bool operator!=(const rgw_user& rhs) const { return (compare(rhs) != 0); } bool operator==(const rgw_user& rhs) const { return (compare(rhs) == 0); } bool operator<(const rgw_user& rhs) const { if (tenant < rhs.tenant) { return true; } else if (tenant > rhs.tenant) { return false; } if (ns < rhs.ns) { return true; } else if (ns > rhs.ns) { return false; } return (id < rhs.id); } void dump(ceph::Formatter *f) const; static void generate_test_instances(std::list<rgw_user*>& o); }; WRITE_CLASS_ENCODER(rgw_user)
3,596
21.622642
86
h
null
ceph-main/src/rgw/rgw_web_idp.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab ft=cpp #pragma once namespace rgw { namespace web_idp { //WebToken contains some claims from the decoded token which are of interest to us. struct WebTokenClaims { //Subject of the token std::string sub; //Intended audience for this token std::string aud; //Issuer of this token std::string iss; //Human-readable id for the resource owner std::string user_name; //Client Id std::string client_id; //azp std::string azp; }; }; /* namespace web_idp */ }; /* namespace rgw */
599
21.222222
83
h
null
ceph-main/src/rgw/rgw_website.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab ft=cpp /* * Ceph - scalable distributed file system * * Copyright (C) 2015 Yehuda Sadeh <[email protected]> * Copyright (C) 2015 Robin H. Johnson <[email protected]> * * This is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License version 2.1, as published by the Free Software * Foundation. See file COPYING. * */ #include "common/debug.h" #include "common/ceph_json.h" #include "common/Formatter.h" #include "acconfig.h" #include <errno.h> #include <string> #include <list> #include "include/types.h" #include "rgw_website.h" #include "rgw_common.h" #include "rgw_xml.h" using namespace std; bool RGWBWRoutingRuleCondition::check_key_condition(const string& key) { return (key.size() >= key_prefix_equals.size() && key.compare(0, key_prefix_equals.size(), key_prefix_equals) == 0); } void RGWBWRoutingRule::apply_rule(const string& default_protocol, const string& default_hostname, const string& key, string *new_url, int *redirect_code) { RGWRedirectInfo& redirect = redirect_info.redirect; string protocol = (!redirect.protocol.empty() ? redirect.protocol : default_protocol); string hostname = (!redirect.hostname.empty() ? redirect.hostname : default_hostname); *new_url = protocol + "://" + hostname + "/"; if (!redirect_info.replace_key_prefix_with.empty()) { *new_url += redirect_info.replace_key_prefix_with; if (key.size() > condition.key_prefix_equals.size()) { *new_url += key.substr(condition.key_prefix_equals.size()); } } else if (!redirect_info.replace_key_with.empty()) { *new_url += redirect_info.replace_key_with; } else { *new_url += key; } if(redirect.http_redirect_code > 0) *redirect_code = redirect.http_redirect_code; } bool RGWBWRoutingRules::check_key_and_error_code_condition(const string &key, int error_code, RGWBWRoutingRule **rule) { for (list<RGWBWRoutingRule>::iterator iter = rules.begin(); iter != rules.end(); ++iter) { if (iter->check_key_condition(key) && iter->check_error_code_condition(error_code)) { *rule = &(*iter); return true; } } return false; } bool RGWBWRoutingRules::check_key_condition(const string& key, RGWBWRoutingRule **rule) { for (list<RGWBWRoutingRule>::iterator iter = rules.begin(); iter != rules.end(); ++iter) { if (iter->check_key_condition(key)) { *rule = &(*iter); return true; } } return false; } bool RGWBWRoutingRules::check_error_code_condition(const int http_error_code, RGWBWRoutingRule **rule) { for (list<RGWBWRoutingRule>::iterator iter = rules.begin(); iter != rules.end(); ++iter) { if (iter->check_error_code_condition(http_error_code)) { *rule = &(*iter); return true; } } return false; } bool RGWBucketWebsiteConf::should_redirect(const string& key, const int http_error_code, RGWBWRoutingRule *redirect) { RGWBWRoutingRule *rule; if(!redirect_all.hostname.empty()) { RGWBWRoutingRule redirect_all_rule; redirect_all_rule.redirect_info.redirect = redirect_all; redirect_all.http_redirect_code = 301; *redirect = redirect_all_rule; return true; } else if (!routing_rules.check_key_and_error_code_condition(key, http_error_code, &rule)) { return false; } *redirect = *rule; return true; } bool RGWBucketWebsiteConf::get_effective_key(const string& key, string *effective_key, bool is_file) const { if (index_doc_suffix.empty()) { return false; } if (key.empty()) { *effective_key = index_doc_suffix; } else if (key[key.size() - 1] == '/') { *effective_key = key + index_doc_suffix; } else if (! is_file) { *effective_key = key + "/" + index_doc_suffix; } else { *effective_key = key; } return true; } void RGWRedirectInfo::dump(Formatter *f) const { encode_json("protocol", protocol, f); encode_json("hostname", hostname, f); encode_json("http_redirect_code", (int)http_redirect_code, f); } void RGWRedirectInfo::decode_json(JSONObj *obj) { JSONDecoder::decode_json("protocol", protocol, obj); JSONDecoder::decode_json("hostname", hostname, obj); int code; JSONDecoder::decode_json("http_redirect_code", code, obj); http_redirect_code = code; } void RGWBWRedirectInfo::dump(Formatter *f) const { encode_json("redirect", redirect, f); encode_json("replace_key_prefix_with", replace_key_prefix_with, f); encode_json("replace_key_with", replace_key_with, f); } void RGWBWRedirectInfo::decode_json(JSONObj *obj) { JSONDecoder::decode_json("redirect", redirect, obj); JSONDecoder::decode_json("replace_key_prefix_with", replace_key_prefix_with, obj); JSONDecoder::decode_json("replace_key_with", replace_key_with, obj); } void RGWBWRoutingRuleCondition::dump(Formatter *f) const { encode_json("key_prefix_equals", key_prefix_equals, f); encode_json("http_error_code_returned_equals", (int)http_error_code_returned_equals, f); } void RGWBWRoutingRuleCondition::decode_json(JSONObj *obj) { JSONDecoder::decode_json("key_prefix_equals", key_prefix_equals, obj); int code; JSONDecoder::decode_json("http_error_code_returned_equals", code, obj); http_error_code_returned_equals = code; } void RGWBWRoutingRule::dump(Formatter *f) const { encode_json("condition", condition, f); encode_json("redirect_info", redirect_info, f); } void RGWBWRoutingRule::decode_json(JSONObj *obj) { JSONDecoder::decode_json("condition", condition, obj); JSONDecoder::decode_json("redirect_info", redirect_info, obj); } void RGWBWRoutingRules::dump(Formatter *f) const { encode_json("rules", rules, f); } void RGWBWRoutingRules::decode_json(JSONObj *obj) { JSONDecoder::decode_json("rules", rules, obj); } void RGWBucketWebsiteConf::dump(Formatter *f) const { if (!redirect_all.hostname.empty()) { encode_json("redirect_all", redirect_all, f); } else { encode_json("index_doc_suffix", index_doc_suffix, f); encode_json("error_doc", error_doc, f); encode_json("routing_rules", routing_rules, f); } } void RGWBucketWebsiteConf::decode_json(JSONObj *obj) { JSONDecoder::decode_json("redirect_all", redirect_all, obj); JSONDecoder::decode_json("index_doc_suffix", index_doc_suffix, obj); JSONDecoder::decode_json("error_doc", error_doc, obj); JSONDecoder::decode_json("routing_rules", routing_rules, obj); } void RGWBWRedirectInfo::dump_xml(Formatter *f) const { if (!redirect.protocol.empty()) { encode_xml("Protocol", redirect.protocol, f); } if (!redirect.hostname.empty()) { encode_xml("HostName", redirect.hostname, f); } if (redirect.http_redirect_code > 0) { encode_xml("HttpRedirectCode", (int)redirect.http_redirect_code, f); } if (!replace_key_prefix_with.empty()) { encode_xml("ReplaceKeyPrefixWith", replace_key_prefix_with, f); } if (!replace_key_with.empty()) { encode_xml("ReplaceKeyWith", replace_key_with, f); } } #define WEBSITE_HTTP_REDIRECT_CODE_MIN 300 #define WEBSITE_HTTP_REDIRECT_CODE_MAX 400 void RGWBWRedirectInfo::decode_xml(XMLObj *obj) { RGWXMLDecoder::decode_xml("Protocol", redirect.protocol, obj); RGWXMLDecoder::decode_xml("HostName", redirect.hostname, obj); int code = 0; bool has_http_redirect_code = RGWXMLDecoder::decode_xml("HttpRedirectCode", code, obj); if (has_http_redirect_code && !(code > WEBSITE_HTTP_REDIRECT_CODE_MIN && code < WEBSITE_HTTP_REDIRECT_CODE_MAX)) { throw RGWXMLDecoder::err("The provided HTTP redirect code is not valid. Valid codes are 3XX except 300."); } redirect.http_redirect_code = code; bool has_replace_key_prefix_with = RGWXMLDecoder::decode_xml("ReplaceKeyPrefixWith", replace_key_prefix_with, obj); bool has_replace_key_with = RGWXMLDecoder::decode_xml("ReplaceKeyWith", replace_key_with, obj); if (has_replace_key_prefix_with && has_replace_key_with) { throw RGWXMLDecoder::err("You can only define ReplaceKeyPrefix or ReplaceKey but not both."); } } void RGWBWRoutingRuleCondition::dump_xml(Formatter *f) const { if (!key_prefix_equals.empty()) { encode_xml("KeyPrefixEquals", key_prefix_equals, f); } if (http_error_code_returned_equals > 0) { encode_xml("HttpErrorCodeReturnedEquals", (int)http_error_code_returned_equals, f); } } #define WEBSITE_HTTP_ERROR_CODE_RETURNED_EQUALS_MIN 400 #define WEBSITE_HTTP_ERROR_CODE_RETURNED_EQUALS_MAX 600 void RGWBWRoutingRuleCondition::decode_xml(XMLObj *obj) { RGWXMLDecoder::decode_xml("KeyPrefixEquals", key_prefix_equals, obj); int code = 0; bool has_http_error_code_returned_equals = RGWXMLDecoder::decode_xml("HttpErrorCodeReturnedEquals", code, obj); if (has_http_error_code_returned_equals && !(code >= WEBSITE_HTTP_ERROR_CODE_RETURNED_EQUALS_MIN && code < WEBSITE_HTTP_ERROR_CODE_RETURNED_EQUALS_MAX)) { throw RGWXMLDecoder::err("The provided HTTP redirect code is not valid. Valid codes are 4XX or 5XX."); } http_error_code_returned_equals = code; } void RGWBWRoutingRule::dump_xml(Formatter *f) const { encode_xml("Condition", condition, f); encode_xml("Redirect", redirect_info, f); } void RGWBWRoutingRule::decode_xml(XMLObj *obj) { RGWXMLDecoder::decode_xml("Condition", condition, obj); RGWXMLDecoder::decode_xml("Redirect", redirect_info, obj); } static void encode_xml(const char *name, const std::list<RGWBWRoutingRule>& l, ceph::Formatter *f) { do_encode_xml("RoutingRules", l, "RoutingRule", f); } void RGWBucketWebsiteConf::dump_xml(Formatter *f) const { if (!redirect_all.hostname.empty()) { f->open_object_section("RedirectAllRequestsTo"); encode_xml("HostName", redirect_all.hostname, f); if (!redirect_all.protocol.empty()) { encode_xml("Protocol", redirect_all.protocol, f); } f->close_section(); } if (!index_doc_suffix.empty()) { f->open_object_section("IndexDocument"); encode_xml("Suffix", index_doc_suffix, f); f->close_section(); } if (!error_doc.empty()) { f->open_object_section("ErrorDocument"); encode_xml("Key", error_doc, f); f->close_section(); } if (!routing_rules.rules.empty()) { encode_xml("RoutingRules", routing_rules.rules, f); } } void decode_xml_obj(list<RGWBWRoutingRule>& l, XMLObj *obj) { do_decode_xml_obj(l, "RoutingRule", obj); } void RGWBucketWebsiteConf::decode_xml(XMLObj *obj) { XMLObj *o = obj->find_first("RedirectAllRequestsTo"); if (o) { is_redirect_all = true; RGWXMLDecoder::decode_xml("HostName", redirect_all.hostname, o, true); RGWXMLDecoder::decode_xml("Protocol", redirect_all.protocol, o); } else { o = obj->find_first("IndexDocument"); if (o) { is_set_index_doc = true; RGWXMLDecoder::decode_xml("Suffix", index_doc_suffix, o); } o = obj->find_first("ErrorDocument"); if (o) { RGWXMLDecoder::decode_xml("Key", error_doc, o); } RGWXMLDecoder::decode_xml("RoutingRules", routing_rules.rules, obj); } }
11,080
31.400585
118
cc
null
ceph-main/src/rgw/rgw_website.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab ft=cpp /* * Ceph - scalable distributed file system * * Copyright (C) 2015 Yehuda Sadeh <[email protected]> * Copyright (C) 2015 Robin H. Johnson <[email protected]> * * This is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License version 2.1, as published by the Free Software * Foundation. See file COPYING. * */ #pragma once #include <list> #include <string> #include "common/ceph_json.h" #include "rgw_xml.h" struct RGWRedirectInfo { std::string protocol; std::string hostname; uint16_t http_redirect_code = 0; void encode(bufferlist& bl) const { ENCODE_START(1, 1, bl); encode(protocol, bl); encode(hostname, bl); encode(http_redirect_code, bl); ENCODE_FINISH(bl); } void decode(bufferlist::const_iterator& bl) { DECODE_START(1, bl); decode(protocol, bl); decode(hostname, bl); decode(http_redirect_code, bl); DECODE_FINISH(bl); } void dump(Formatter *f) const; void decode_json(JSONObj *obj); }; WRITE_CLASS_ENCODER(RGWRedirectInfo) struct RGWBWRedirectInfo { RGWRedirectInfo redirect; std::string replace_key_prefix_with; std::string replace_key_with; void encode(bufferlist& bl) const { ENCODE_START(1, 1, bl); encode(redirect, bl); encode(replace_key_prefix_with, bl); encode(replace_key_with, bl); ENCODE_FINISH(bl); } void decode(bufferlist::const_iterator& bl) { DECODE_START(1, bl); decode(redirect, bl); decode(replace_key_prefix_with, bl); decode(replace_key_with, bl); DECODE_FINISH(bl); } void dump(Formatter *f) const; void dump_xml(Formatter *f) const; void decode_json(JSONObj *obj); void decode_xml(XMLObj *obj); }; WRITE_CLASS_ENCODER(RGWBWRedirectInfo) struct RGWBWRoutingRuleCondition { std::string key_prefix_equals; uint16_t http_error_code_returned_equals = 0; void encode(bufferlist& bl) const { ENCODE_START(1, 1, bl); encode(key_prefix_equals, bl); encode(http_error_code_returned_equals, bl); ENCODE_FINISH(bl); } void decode(bufferlist::const_iterator& bl) { DECODE_START(1, bl); decode(key_prefix_equals, bl); decode(http_error_code_returned_equals, bl); DECODE_FINISH(bl); } void dump(Formatter *f) const; void dump_xml(Formatter *f) const; void decode_json(JSONObj *obj); void decode_xml(XMLObj *obj); bool check_key_condition(const std::string& key); bool check_error_code_condition(const int error_code) { return (uint16_t)error_code == http_error_code_returned_equals; } }; WRITE_CLASS_ENCODER(RGWBWRoutingRuleCondition) struct RGWBWRoutingRule { RGWBWRoutingRuleCondition condition; RGWBWRedirectInfo redirect_info; void encode(bufferlist& bl) const { ENCODE_START(1, 1, bl); encode(condition, bl); encode(redirect_info, bl); ENCODE_FINISH(bl); } void decode(bufferlist::const_iterator& bl) { DECODE_START(1, bl); decode(condition, bl); decode(redirect_info, bl); DECODE_FINISH(bl); } void dump(Formatter *f) const; void dump_xml(Formatter *f) const; void decode_json(JSONObj *obj); void decode_xml(XMLObj *obj); bool check_key_condition(const std::string& key) { return condition.check_key_condition(key); } bool check_error_code_condition(int error_code) { return condition.check_error_code_condition(error_code); } void apply_rule(const std::string& default_protocol, const std::string& default_hostname, const std::string& key, std::string *redirect, int *redirect_code); }; WRITE_CLASS_ENCODER(RGWBWRoutingRule) struct RGWBWRoutingRules { std::list<RGWBWRoutingRule> rules; void encode(bufferlist& bl) const { ENCODE_START(1, 1, bl); encode(rules, bl); ENCODE_FINISH(bl); } void decode(bufferlist::const_iterator& bl) { DECODE_START(1, bl); decode(rules, bl); DECODE_FINISH(bl); } void dump(Formatter *f) const; void dump_xml(Formatter *f) const; void decode_json(JSONObj *obj); bool check_key_condition(const std::string& key, RGWBWRoutingRule **rule); bool check_error_code_condition(int error_code, RGWBWRoutingRule **rule); bool check_key_and_error_code_condition(const std::string& key, const int error_code, RGWBWRoutingRule **rule); }; WRITE_CLASS_ENCODER(RGWBWRoutingRules) struct RGWBucketWebsiteConf { RGWRedirectInfo redirect_all; std::string index_doc_suffix; std::string error_doc; std::string subdir_marker; std::string listing_css_doc; bool listing_enabled; bool is_redirect_all; bool is_set_index_doc; RGWBWRoutingRules routing_rules; RGWBucketWebsiteConf() : listing_enabled(false) { is_redirect_all = false; is_set_index_doc = false; } void encode(bufferlist& bl) const { ENCODE_START(2, 1, bl); encode(index_doc_suffix, bl); encode(error_doc, bl); encode(routing_rules, bl); encode(redirect_all, bl); encode(subdir_marker, bl); encode(listing_css_doc, bl); encode(listing_enabled, bl); ENCODE_FINISH(bl); } void decode(bufferlist::const_iterator& bl) { DECODE_START(2, bl); decode(index_doc_suffix, bl); decode(error_doc, bl); decode(routing_rules, bl); decode(redirect_all, bl); if (struct_v >= 2) { decode(subdir_marker, bl); decode(listing_css_doc, bl); decode(listing_enabled, bl); } DECODE_FINISH(bl); } void dump(Formatter *f) const; void decode_json(JSONObj *obj); void decode_xml(XMLObj *obj); void dump_xml(Formatter *f) const; bool should_redirect(const std::string& key, const int http_error_code, RGWBWRoutingRule *redirect); bool get_effective_key(const std::string& key, std::string *effective_key, bool is_file) const; const std::string& get_index_doc() const { return index_doc_suffix; } bool is_empty() const { return index_doc_suffix.empty() && error_doc.empty() && subdir_marker.empty() && listing_css_doc.empty() && ! listing_enabled; } }; WRITE_CLASS_ENCODER(RGWBucketWebsiteConf)
6,407
25.262295
76
h
null
ceph-main/src/rgw/rgw_worker.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab ft=cpp /* * Ceph - scalable distributed file system * * Copyright (C) 2019 Red Hat, Inc. * * This is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License version 2.1, as published by the Free Software * Foundation. See file COPYING. * */ #pragma once #include <atomic> #include "common/Thread.h" #include "common/ceph_mutex.h" #include "include/common_fwd.h" class RGWRados; class RGWRadosThread { class Worker : public Thread, public DoutPrefixProvider { CephContext *cct; RGWRadosThread *processor; ceph::mutex lock = ceph::make_mutex("RGWRadosThread::Worker"); ceph::condition_variable cond; void wait() { std::unique_lock l{lock}; cond.wait(l); }; void wait_interval(const ceph::real_clock::duration& wait_time) { std::unique_lock l{lock}; cond.wait_for(l, wait_time); } public: Worker(CephContext *_cct, RGWRadosThread *_p) : cct(_cct), processor(_p) {} void *entry() override; void signal() { std::lock_guard l{lock}; cond.notify_all(); } CephContext *get_cct() const { return cct; } unsigned get_subsys() const { return ceph_subsys_rgw; } std::ostream& gen_prefix(std::ostream& out) const { return out << "rgw rados thread: "; } }; Worker *worker; protected: CephContext *cct; RGWRados *store; std::atomic<bool> down_flag = { false }; std::string thread_name; virtual uint64_t interval_msec() = 0; virtual void stop_process() {} public: RGWRadosThread(RGWRados *_store, const std::string& thread_name = "radosgw") : worker(NULL), cct(_store->ctx()), store(_store), thread_name(thread_name) {} virtual ~RGWRadosThread() { stop(); } virtual int init(const DoutPrefixProvider *dpp) { return 0; } virtual int process(const DoutPrefixProvider *dpp) = 0; bool going_down() { return down_flag; } void start(); void stop(); void signal() { if (worker) { worker->signal(); } } };
2,124
22.097826
91
h
null
ceph-main/src/rgw/rgw_xml.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab ft=cpp #include <string.h> #include <iostream> #include <map> #include <expat.h> #include "include/types.h" #include "include/utime.h" #include "rgw_xml.h" using namespace std; XMLObjIter:: XMLObjIter() { } XMLObjIter:: ~XMLObjIter() { } void XMLObjIter:: set(const XMLObjIter::map_iter_t &_cur, const XMLObjIter::map_iter_t &_end) { cur = _cur; end = _end; } XMLObj *XMLObjIter:: get_next() { XMLObj *obj = NULL; if (cur != end) { obj = cur->second; ++cur; } return obj; } bool XMLObjIter::get_name(std::string& name) const { if (cur == end) { return false; } name = cur->first; return true; } ostream& operator<<(ostream &out, const XMLObj &obj) { out << obj.obj_type << ": " << obj.data; return out; } XMLObj:: ~XMLObj() { } bool XMLObj:: xml_start(XMLObj *parent, const char *el, const char **attr) { this->parent = parent; obj_type = el; for (int i = 0; attr[i]; i += 2) { attr_map[attr[i]] = std::string(attr[i + 1]); } return true; } bool XMLObj:: xml_end(const char *el) { return true; } void XMLObj:: xml_handle_data(const char *s, int len) { data.append(s, len); } const std::string& XMLObj:: XMLObj::get_data() const { return data; } const std::string& XMLObj:: XMLObj::get_obj_type() const { return obj_type; } XMLObj *XMLObj:: XMLObj::get_parent() { return parent; } void XMLObj:: add_child(const std::string& el, XMLObj *obj) { children.insert(std::pair<std::string, XMLObj *>(el, obj)); } bool XMLObj:: get_attr(const std::string& name, std::string& attr) const { const std::map<std::string, std::string>::const_iterator iter = attr_map.find(name); if (iter == attr_map.end()) return false; attr = iter->second; return true; } XMLObjIter XMLObj:: find(const std::string& name) { XMLObjIter iter; const XMLObjIter::const_map_iter_t first = children.find(name); XMLObjIter::const_map_iter_t last; if (first != children.end()) { last = children.upper_bound(name); }else last = children.end(); iter.set(first, last); return iter; } XMLObjIter XMLObj::find_first() { XMLObjIter iter; const XMLObjIter::const_map_iter_t first = children.begin(); const XMLObjIter::const_map_iter_t last = children.end(); iter.set(first, last); return iter; } XMLObj *XMLObj:: find_first(const std::string& name) { const XMLObjIter::const_map_iter_t first = children.find(name); if (first != children.end()) return first->second; return nullptr; } RGWXMLParser:: RGWXMLParser() : buf(nullptr), buf_len(0), cur_obj(nullptr), success(true), init_called(false) { p = XML_ParserCreate(nullptr); } RGWXMLParser:: ~RGWXMLParser() { XML_ParserFree(p); free(buf); std::list<XMLObj *>::const_iterator iter; for (iter = allocated_objs.begin(); iter != allocated_objs.end(); ++iter) { XMLObj *obj = *iter; delete obj; } } void RGWXMLParser::call_xml_start(void* user_data, const char *el, const char **attr) { RGWXMLParser *handler = static_cast<RGWXMLParser *>(user_data); XMLObj * obj = handler->alloc_obj(el); if (!obj) { handler->unallocated_objs.push_back(XMLObj()); obj = &handler->unallocated_objs.back(); } else { handler->allocated_objs.push_back(obj); } if (!obj->xml_start(handler->cur_obj, el, attr)) { handler->success = false; return; } if (handler->cur_obj) { handler->cur_obj->add_child(el, obj); } else { handler->children.insert(std::pair<std::string, XMLObj *>(el, obj)); } handler->cur_obj = obj; handler->objs.push_back(obj); } void RGWXMLParser::call_xml_end(void* user_data, const char *el) { RGWXMLParser *handler = static_cast<RGWXMLParser *>(user_data); XMLObj *parent_obj = handler->cur_obj->get_parent(); if (!handler->cur_obj->xml_end(el)) { handler->success = false; return; } handler->cur_obj = parent_obj; } void RGWXMLParser::call_xml_handle_data(void* user_data, const char *s, int len) { RGWXMLParser *handler = static_cast<RGWXMLParser *>(user_data); handler->cur_obj->xml_handle_data(s, len); } bool RGWXMLParser::init() { if (!p) { return false; } init_called = true; XML_SetElementHandler(p, RGWXMLParser::call_xml_start, RGWXMLParser::call_xml_end); XML_SetCharacterDataHandler(p, RGWXMLParser::call_xml_handle_data); XML_SetUserData(p, (void *)this); return true; } bool RGWXMLParser::parse(const char *_buf, int len, int done) { ceph_assert(init_called); int pos = buf_len; char *tmp_buf; tmp_buf = (char *)realloc(buf, buf_len + len); if (tmp_buf == NULL){ free(buf); buf = NULL; return false; } else { buf = tmp_buf; } memcpy(&buf[buf_len], _buf, len); buf_len += len; success = true; if (!XML_Parse(p, &buf[pos], len, done)) { fprintf(stderr, "Parse error at line %d:\n%s\n", (int)XML_GetCurrentLineNumber(p), XML_ErrorString(XML_GetErrorCode(p))); success = false; } return success; } void decode_xml_obj(unsigned long& val, XMLObj *obj) { auto& s = obj->get_data(); const char *start = s.c_str(); char *p; errno = 0; val = strtoul(start, &p, 10); /* Check for various possible errors */ if ((errno == ERANGE && val == ULONG_MAX) || (errno != 0 && val == 0)) { throw RGWXMLDecoder::err("failed to number"); } if (p == start) { throw RGWXMLDecoder::err("failed to parse number"); } while (*p != '\0') { if (!isspace(*p)) { throw RGWXMLDecoder::err("failed to parse number"); } p++; } } void decode_xml_obj(long& val, XMLObj *obj) { const std::string s = obj->get_data(); const char *start = s.c_str(); char *p; errno = 0; val = strtol(start, &p, 10); /* Check for various possible errors */ if ((errno == ERANGE && (val == LONG_MAX || val == LONG_MIN)) || (errno != 0 && val == 0)) { throw RGWXMLDecoder::err("failed to parse number"); } if (p == start) { throw RGWXMLDecoder::err("failed to parse number"); } while (*p != '\0') { if (!isspace(*p)) { throw RGWXMLDecoder::err("failed to parse number"); } p++; } } void decode_xml_obj(long long& val, XMLObj *obj) { const std::string s = obj->get_data(); const char *start = s.c_str(); char *p; errno = 0; val = strtoll(start, &p, 10); /* Check for various possible errors */ if ((errno == ERANGE && (val == LLONG_MAX || val == LLONG_MIN)) || (errno != 0 && val == 0)) { throw RGWXMLDecoder::err("failed to parse number"); } if (p == start) { throw RGWXMLDecoder::err("failed to parse number"); } while (*p != '\0') { if (!isspace(*p)) { throw RGWXMLDecoder::err("failed to parse number"); } p++; } } void decode_xml_obj(unsigned long long& val, XMLObj *obj) { const std::string s = obj->get_data(); const char *start = s.c_str(); char *p; errno = 0; val = strtoull(start, &p, 10); /* Check for various possible errors */ if ((errno == ERANGE && val == ULLONG_MAX) || (errno != 0 && val == 0)) { throw RGWXMLDecoder::err("failed to parse number"); } if (p == start) { throw RGWXMLDecoder::err("failed to parse number"); } while (*p != '\0') { if (!isspace(*p)) { throw RGWXMLDecoder::err("failed to parse number"); } p++; } } void decode_xml_obj(int& val, XMLObj *obj) { long l; decode_xml_obj(l, obj); #if LONG_MAX > INT_MAX if (l > INT_MAX || l < INT_MIN) { throw RGWXMLDecoder::err("integer out of range"); } #endif val = (int)l; } void decode_xml_obj(unsigned& val, XMLObj *obj) { unsigned long l; decode_xml_obj(l, obj); #if ULONG_MAX > UINT_MAX if (l > UINT_MAX) { throw RGWXMLDecoder::err("unsigned integer out of range"); } #endif val = (unsigned)l; } void decode_xml_obj(bool& val, XMLObj *obj) { const std::string s = obj->get_data(); if (strncasecmp(s.c_str(), "true", 8) == 0) { val = true; return; } if (strncasecmp(s.c_str(), "false", 8) == 0) { val = false; return; } int i; decode_xml_obj(i, obj); val = (bool)i; } void decode_xml_obj(bufferlist& val, XMLObj *obj) { const std::string s = obj->get_data(); bufferlist bl; bl.append(s.c_str(), s.size()); try { val.decode_base64(bl); } catch (buffer::error& err) { throw RGWXMLDecoder::err("failed to decode base64"); } } void decode_xml_obj(utime_t& val, XMLObj *obj) { const std::string s = obj->get_data(); uint64_t epoch; uint64_t nsec; int r = utime_t::parse_date(s, &epoch, &nsec); if (r == 0) { val = utime_t(epoch, nsec); } else { throw RGWXMLDecoder::err("failed to decode utime_t"); } } void encode_xml(const char *name, const string& val, Formatter *f) { f->dump_string(name, val); } void encode_xml(const char *name, const char *val, Formatter *f) { f->dump_string(name, val); } void encode_xml(const char *name, bool val, Formatter *f) { std::string s; if (val) s = "True"; else s = "False"; f->dump_string(name, s); } void encode_xml(const char *name, int val, Formatter *f) { f->dump_int(name, val); } void encode_xml(const char *name, long val, Formatter *f) { f->dump_int(name, val); } void encode_xml(const char *name, unsigned val, Formatter *f) { f->dump_unsigned(name, val); } void encode_xml(const char *name, unsigned long val, Formatter *f) { f->dump_unsigned(name, val); } void encode_xml(const char *name, unsigned long long val, Formatter *f) { f->dump_unsigned(name, val); } void encode_xml(const char *name, long long val, Formatter *f) { f->dump_int(name, val); } void encode_xml(const char *name, const utime_t& val, Formatter *f) { val.gmtime(f->dump_stream(name)); } void encode_xml(const char *name, const bufferlist& bl, Formatter *f) { /* need to copy data from bl, as it is const bufferlist */ bufferlist src = bl; bufferlist b64; src.encode_base64(b64); const std::string s(b64.c_str(), b64.length()); encode_xml(name, s, f); }
10,001
18.884692
94
cc
null
ceph-main/src/rgw/rgw_xml.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab ft=cpp #pragma once #include <map> #include <stdexcept> #include <string> #include <iosfwd> #include <include/types.h> #include <common/Formatter.h> class XMLObj; class RGWXMLParser; class XMLObjIter { public: typedef std::map<std::string, XMLObj *>::iterator map_iter_t; typedef std::map<std::string, XMLObj *>::iterator const_map_iter_t; XMLObjIter(); virtual ~XMLObjIter(); void set(const XMLObjIter::const_map_iter_t &_cur, const XMLObjIter::const_map_iter_t &_end); XMLObj *get_next(); bool get_name(std::string& name) const; private: map_iter_t cur; map_iter_t end; }; /** * Represents a block of XML. * Give the class an XML blob, and it will parse the blob into * an attr_name->value map. * It shouldn't be the start point for any parsing. Look at RGWXMLParser for that. */ class XMLObj { private: XMLObj *parent; std::string obj_type; protected: std::string data; std::multimap<std::string, XMLObj *> children; std::map<std::string, std::string> attr_map; // invoked at the beginning of the XML tag, and populate any attributes bool xml_start(XMLObj *parent, const char *el, const char **attr); // callback invoked at the end of the XML tag // if objects are created while parsing, this should be overwritten in the drived class virtual bool xml_end(const char *el); // callback invoked for storing the data of the XML tag // if data manipulation is needed this could be overwritten in the drived class virtual void xml_handle_data(const char *s, int len); // get the parent object XMLObj *get_parent(); // add a child XML object void add_child(const std::string& el, XMLObj *obj); public: XMLObj() : parent(nullptr) {} virtual ~XMLObj(); // get the data (as string) const std::string& get_data() const; // get the type of the object (as string) const std::string& get_obj_type() const; bool get_attr(const std::string& name, std::string& attr) const; // return a list of sub-tags matching the name XMLObjIter find(const std::string& name); // return the first sub-tag XMLObjIter find_first(); // return the first sub-tags matching the name XMLObj *find_first(const std::string& name); friend std::ostream& operator<<(std::ostream &out, const XMLObj &obj); friend RGWXMLParser; }; struct XML_ParserStruct; // an XML parser is an XML object without a parent (root of the tree) // the parser could be used in 2 ways: // // (1) lazy object creation/intrusive API: usually used within the RGWXMLDecode namespace (as RGWXMLDecode::XMLParser) // the parser will parse the input and store info, but will not generate the target object. The object can be allocated outside // of the parser (stack or heap), and require to implement the decode_xml() API for the values to be populated. // note that the decode_xml() calls may throw exceptions if parsing fails // // (2) object creation while parsing: a new class needs to be derived from RGWXMLParser and implement alloc_obj() // API that should create a set of classes derived from XMLObj implementing xml_end() to create the actual target objects // // There could be a mix-and-match of the 2 types, control over that is in the alloc_obj() call // deciding for which tags objects are allocate during parsing and for which tags object allocation is external class RGWXMLParser : public XMLObj { private: XML_ParserStruct *p; char *buf; int buf_len; XMLObj *cur_obj; std::vector<XMLObj *> objs; std::list<XMLObj *> allocated_objs; std::list<XMLObj> unallocated_objs; bool success; bool init_called; // calls xml_start() on each parsed object // passed as static callback to actual parser, passes itself as user_data static void call_xml_start(void* user_data, const char *el, const char **attr); // calls xml_end() on each parsed object // passed as static callback to actual parser, passes itself as user_data static void call_xml_end(void* user_data, const char *el); // calls xml_handle_data() on each parsed object // passed as static callback to actual parser, passes itself as user_data static void call_xml_handle_data(void* user_data, const char *s, int len); protected: // if objects are created while parsing, this should be implemented in the derived class // and be a factory for creating the classes derived from XMLObj // note that not all sub-tags has to be constructed here, any such tag which is not // constructed will be lazily created when decode_xml() is invoked on it // // note that in case of different tags sharing the same name at different levels // this method should not be used virtual XMLObj *alloc_obj(const char *el) { return nullptr; } public: RGWXMLParser(); virtual ~RGWXMLParser() override; // initialize the parser, must be called before parsing bool init(); // parse the XML buffer (can be invoked multiple times for incremental parsing) // receives the buffer to parse, its length, and boolean indication (0,1) // whether this is the final chunk of the buffer bool parse(const char *buf, int len, int done); // get the XML blob being parsed const char *get_xml() const { return buf; } }; namespace RGWXMLDecoder { struct err : std::runtime_error { using runtime_error::runtime_error; }; typedef RGWXMLParser XMLParser; template<class T> bool decode_xml(const char *name, T& val, XMLObj* obj, bool mandatory = false); template<class T> bool decode_xml(const char *name, std::vector<T>& v, XMLObj* obj, bool mandatory = false); template<class C> bool decode_xml(const char *name, C& container, void (*cb)(C&, XMLObj *obj), XMLObj *obj, bool mandatory = false); template<class T> void decode_xml(const char *name, T& val, T& default_val, XMLObj* obj); } static inline std::ostream& operator<<(std::ostream &out, RGWXMLDecoder::err& err) { return out << err.what(); } template<class T> void decode_xml_obj(T& val, XMLObj *obj) { val.decode_xml(obj); } static inline void decode_xml_obj(std::string& val, XMLObj *obj) { val = obj->get_data(); } void decode_xml_obj(unsigned long long& val, XMLObj *obj); void decode_xml_obj(long long& val, XMLObj *obj); void decode_xml_obj(unsigned long& val, XMLObj *obj); void decode_xml_obj(long& val, XMLObj *obj); void decode_xml_obj(unsigned& val, XMLObj *obj); void decode_xml_obj(int& val, XMLObj *obj); void decode_xml_obj(bool& val, XMLObj *obj); void decode_xml_obj(bufferlist& val, XMLObj *obj); class utime_t; void decode_xml_obj(utime_t& val, XMLObj *obj); template<class T> void decode_xml_obj(std::optional<T>& val, XMLObj *obj) { val.emplace(); decode_xml_obj(*val, obj); } template<class T> void do_decode_xml_obj(std::list<T>& l, const std::string& name, XMLObj *obj) { l.clear(); XMLObjIter iter = obj->find(name); XMLObj *o; while ((o = iter.get_next())) { T val; decode_xml_obj(val, o); l.push_back(val); } } template<class T> bool RGWXMLDecoder::decode_xml(const char *name, T& val, XMLObj *obj, bool mandatory) { XMLObjIter iter = obj->find(name); XMLObj *o = iter.get_next(); if (!o) { if (mandatory) { std::string s = "missing mandatory field " + std::string(name); throw err(s); } val = T(); return false; } try { decode_xml_obj(val, o); } catch (const err& e) { std::string s = std::string(name) + ": "; s.append(e.what()); throw err(s); } return true; } template<class T> bool RGWXMLDecoder::decode_xml(const char *name, std::vector<T>& v, XMLObj *obj, bool mandatory) { XMLObjIter iter = obj->find(name); XMLObj *o = iter.get_next(); v.clear(); if (!o) { if (mandatory) { std::string s = "missing mandatory field " + std::string(name); throw err(s); } return false; } do { T val; try { decode_xml_obj(val, o); } catch (const err& e) { std::string s = std::string(name) + ": "; s.append(e.what()); throw err(s); } v.push_back(val); } while ((o = iter.get_next())); return true; } template<class C> bool RGWXMLDecoder::decode_xml(const char *name, C& container, void (*cb)(C&, XMLObj *), XMLObj *obj, bool mandatory) { container.clear(); XMLObjIter iter = obj->find(name); XMLObj *o = iter.get_next(); if (!o) { if (mandatory) { std::string s = "missing mandatory field " + std::string(name); throw err(s); } return false; } try { decode_xml_obj(container, cb, o); } catch (const err& e) { std::string s = std::string(name) + ": "; s.append(e.what()); throw err(s); } return true; } template<class T> void RGWXMLDecoder::decode_xml(const char *name, T& val, T& default_val, XMLObj *obj) { XMLObjIter iter = obj->find(name); XMLObj *o = iter.get_next(); if (!o) { val = default_val; return; } try { decode_xml_obj(val, o); } catch (const err& e) { val = default_val; std::string s = std::string(name) + ": "; s.append(e.what()); throw err(s); } } template<class T> static void encode_xml(const char *name, const T& val, ceph::Formatter *f) { f->open_object_section(name); val.dump_xml(f); f->close_section(); } template<class T> static void encode_xml(const char *name, const char *ns, const T& val, ceph::Formatter *f) { f->open_object_section_in_ns(name, ns); val.dump_xml(f); f->close_section(); } void encode_xml(const char *name, const std::string& val, ceph::Formatter *f); void encode_xml(const char *name, const char *val, ceph::Formatter *f); void encode_xml(const char *name, bool val, ceph::Formatter *f); void encode_xml(const char *name, int val, ceph::Formatter *f); void encode_xml(const char *name, unsigned val, ceph::Formatter *f); void encode_xml(const char *name, long val, ceph::Formatter *f); void encode_xml(const char *name, unsigned long val, ceph::Formatter *f); void encode_xml(const char *name, long long val, ceph::Formatter *f); void encode_xml(const char *name, const utime_t& val, ceph::Formatter *f); void encode_xml(const char *name, const bufferlist& bl, ceph::Formatter *f); void encode_xml(const char *name, long long unsigned val, ceph::Formatter *f); template<class T> static void do_encode_xml(const char *name, const std::list<T>& l, const char *entry_name, ceph::Formatter *f) { f->open_array_section(name); for (typename std::list<T>::const_iterator iter = l.begin(); iter != l.end(); ++iter) { encode_xml(entry_name, *iter, f); } f->close_section(); } template<class T> static void encode_xml(const char *name, const std::vector<T>& l, ceph::Formatter *f) { for (typename std::vector<T>::const_iterator iter = l.begin(); iter != l.end(); ++iter) { encode_xml(name, *iter, f); } } template<class T> static void encode_xml(const char *name, const std::optional<T>& o, ceph::Formatter *f) { if (!o) { return; } encode_xml(name, *o, f); }
10,985
28.532258
127
h
null
ceph-main/src/rgw/rgw_xml_enc.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab ft=cpp /* * Ceph - scalable distributed file system * * Copyright (C) 2015 Yehuda Sadeh <[email protected]> * Copyright (C) 2015 Robin H. Johnson <[email protected]> * * This is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License version 2.1, as published by the Free Software * Foundation. See file COPYING. * */ #include "rgw_common.h" #include "rgw_xml.h" #include "common/Formatter.h" #define dout_subsys ceph_subsys_rgw using namespace std;
639
23.615385
70
cc
null
ceph-main/src/rgw/rgw_zone.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab ft=cpp #include <optional> #include "common/errno.h" #include "rgw_zone.h" #include "rgw_sal_config.h" #include "rgw_sync.h" #include "services/svc_zone.h" #define dout_context g_ceph_context #define dout_subsys ceph_subsys_rgw namespace rgw_zone_defaults { static std::string default_bucket_index_pool_suffix = "rgw.buckets.index"; static std::string default_storage_extra_pool_suffix = "rgw.buckets.non-ec"; static std::string zone_info_oid_prefix = "zone_info."; std::string zone_names_oid_prefix = "zone_names."; std::string region_info_oid_prefix = "region_info."; std::string zone_group_info_oid_prefix = "zonegroup_info."; std::string default_region_info_oid = "default.region"; std::string default_zone_group_info_oid = "default.zonegroup"; std::string region_map_oid = "region_map"; std::string default_zonegroup_name = "default"; std::string default_zone_name = "default"; std::string zonegroup_names_oid_prefix = "zonegroups_names."; std::string RGW_DEFAULT_ZONE_ROOT_POOL = "rgw.root"; std::string RGW_DEFAULT_ZONEGROUP_ROOT_POOL = "rgw.root"; std::string RGW_DEFAULT_PERIOD_ROOT_POOL = "rgw.root"; std::string avail_pools = ".pools.avail"; std::string default_storage_pool_suffix = "rgw.buckets.data"; } using namespace std; using namespace rgw_zone_defaults; void encode_json_plain(const char *name, const RGWAccessKey& val, Formatter *f) { f->open_object_section(name); val.dump_plain(f); f->close_section(); } static void decode_zones(map<rgw_zone_id, RGWZone>& zones, JSONObj *o) { RGWZone z; z.decode_json(o); zones[z.id] = z; } static void decode_placement_targets(map<string, RGWZoneGroupPlacementTarget>& targets, JSONObj *o) { RGWZoneGroupPlacementTarget t; t.decode_json(o); targets[t.name] = t; } void RGWZone::generate_test_instances(list<RGWZone*> &o) { RGWZone *z = new RGWZone; o.push_back(z); o.push_back(new RGWZone); } void RGWZone::dump(Formatter *f) const { encode_json("id", id, f); encode_json("name", name, f); encode_json("endpoints", endpoints, f); encode_json("log_meta", log_meta, f); encode_json("log_data", log_data, f); encode_json("bucket_index_max_shards", bucket_index_max_shards, f); encode_json("read_only", read_only, f); encode_json("tier_type", tier_type, f); encode_json("sync_from_all", sync_from_all, f); encode_json("sync_from", sync_from, f); encode_json("redirect_zone", redirect_zone, f); encode_json("supported_features", supported_features, f); } void RGWZone::decode_json(JSONObj *obj) { JSONDecoder::decode_json("id", id, obj); JSONDecoder::decode_json("name", name, obj); if (id.empty()) { id = name; } JSONDecoder::decode_json("endpoints", endpoints, obj); JSONDecoder::decode_json("log_meta", log_meta, obj); JSONDecoder::decode_json("log_data", log_data, obj); JSONDecoder::decode_json("bucket_index_max_shards", bucket_index_max_shards, obj); JSONDecoder::decode_json("read_only", read_only, obj); JSONDecoder::decode_json("tier_type", tier_type, obj); JSONDecoder::decode_json("sync_from_all", sync_from_all, true, obj); JSONDecoder::decode_json("sync_from", sync_from, obj); JSONDecoder::decode_json("redirect_zone", redirect_zone, obj); JSONDecoder::decode_json("supported_features", supported_features, obj); } int RGWSystemMetaObj::init(const DoutPrefixProvider *dpp, CephContext *_cct, RGWSI_SysObj *_sysobj_svc, optional_yield y, bool setup_obj, bool old_format) { reinit_instance(_cct, _sysobj_svc); if (!setup_obj) return 0; if (old_format && id.empty()) { id = name; } if (id.empty()) { id = get_predefined_id(cct); } if (id.empty()) { int r; if (name.empty()) { name = get_predefined_name(cct); } if (name.empty()) { r = use_default(dpp, y, old_format); if (r < 0) { return r; } } else if (!old_format) { r = read_id(dpp, name, id, y); if (r < 0) { if (r != -ENOENT) { ldpp_dout(dpp, 0) << "error in read_id for object name: " << name << " : " << cpp_strerror(-r) << dendl; } return r; } } } return read_info(dpp, id, y, old_format); } RGWZoneGroup::~RGWZoneGroup() {} const string RGWZoneGroup::get_default_oid(bool old_region_format) const { if (old_region_format) { if (cct->_conf->rgw_default_region_info_oid.empty()) { return default_region_info_oid; } return cct->_conf->rgw_default_region_info_oid; } string default_oid = cct->_conf->rgw_default_zonegroup_info_oid; if (cct->_conf->rgw_default_zonegroup_info_oid.empty()) { default_oid = default_zone_group_info_oid; } default_oid += "." + realm_id; return default_oid; } const string& RGWZoneGroup::get_info_oid_prefix(bool old_region_format) const { if (old_region_format) { return region_info_oid_prefix; } return zone_group_info_oid_prefix; } const string& RGWZoneGroup::get_names_oid_prefix() const { return zonegroup_names_oid_prefix; } string RGWZoneGroup::get_predefined_id(CephContext *cct) const { return cct->_conf.get_val<string>("rgw_zonegroup_id"); } const string& RGWZoneGroup::get_predefined_name(CephContext *cct) const { return cct->_conf->rgw_zonegroup; } rgw_pool RGWZoneGroup::get_pool(CephContext *cct_) const { if (cct_->_conf->rgw_zonegroup_root_pool.empty()) { return rgw_pool(RGW_DEFAULT_ZONEGROUP_ROOT_POOL); } return rgw_pool(cct_->_conf->rgw_zonegroup_root_pool); } int RGWZoneGroup::read_default_id(const DoutPrefixProvider *dpp, string& default_id, optional_yield y, bool old_format) { if (realm_id.empty()) { /* try using default realm */ RGWRealm realm; int ret = realm.init(dpp, cct, sysobj_svc, y); // no default realm exist if (ret < 0) { return read_id(dpp, default_zonegroup_name, default_id, y); } realm_id = realm.get_id(); } return RGWSystemMetaObj::read_default_id(dpp, default_id, y, old_format); } int RGWSystemMetaObj::use_default(const DoutPrefixProvider *dpp, optional_yield y, bool old_format) { return read_default_id(dpp, id, y, old_format); } void RGWSystemMetaObj::reinit_instance(CephContext *_cct, RGWSI_SysObj *_sysobj_svc) { cct = _cct; sysobj_svc = _sysobj_svc; zone_svc = _sysobj_svc->get_zone_svc(); } int RGWSystemMetaObj::read_info(const DoutPrefixProvider *dpp, const string& obj_id, optional_yield y, bool old_format) { rgw_pool pool(get_pool(cct)); bufferlist bl; string oid = get_info_oid_prefix(old_format) + obj_id; auto sysobj = sysobj_svc->get_obj(rgw_raw_obj{pool, oid}); int ret = sysobj.rop().read(dpp, &bl, y); if (ret < 0) { ldpp_dout(dpp, 0) << "failed reading obj info from " << pool << ":" << oid << ": " << cpp_strerror(-ret) << dendl; return ret; } using ceph::decode; try { auto iter = bl.cbegin(); decode(*this, iter); } catch (buffer::error& err) { ldpp_dout(dpp, 0) << "ERROR: failed to decode obj from " << pool << ":" << oid << dendl; return -EIO; } return 0; } void RGWZoneGroup::decode_json(JSONObj *obj) { RGWSystemMetaObj::decode_json(obj); if (id.empty()) { derr << "old format " << dendl; JSONDecoder::decode_json("name", name, obj); id = name; } JSONDecoder::decode_json("api_name", api_name, obj); JSONDecoder::decode_json("is_master", is_master, obj); JSONDecoder::decode_json("endpoints", endpoints, obj); JSONDecoder::decode_json("hostnames", hostnames, obj); JSONDecoder::decode_json("hostnames_s3website", hostnames_s3website, obj); JSONDecoder::decode_json("master_zone", master_zone, obj); JSONDecoder::decode_json("zones", zones, decode_zones, obj); JSONDecoder::decode_json("placement_targets", placement_targets, decode_placement_targets, obj); string pr; JSONDecoder::decode_json("default_placement", pr, obj); default_placement.from_str(pr); JSONDecoder::decode_json("realm_id", realm_id, obj); JSONDecoder::decode_json("sync_policy", sync_policy, obj); JSONDecoder::decode_json("enabled_features", enabled_features, obj); } RGWZoneParams::~RGWZoneParams() {} void RGWZoneParams::decode_json(JSONObj *obj) { RGWSystemMetaObj::decode_json(obj); JSONDecoder::decode_json("domain_root", domain_root, obj); JSONDecoder::decode_json("control_pool", control_pool, obj); JSONDecoder::decode_json("gc_pool", gc_pool, obj); JSONDecoder::decode_json("lc_pool", lc_pool, obj); JSONDecoder::decode_json("log_pool", log_pool, obj); JSONDecoder::decode_json("intent_log_pool", intent_log_pool, obj); JSONDecoder::decode_json("roles_pool", roles_pool, obj); JSONDecoder::decode_json("reshard_pool", reshard_pool, obj); JSONDecoder::decode_json("usage_log_pool", usage_log_pool, obj); JSONDecoder::decode_json("user_keys_pool", user_keys_pool, obj); JSONDecoder::decode_json("user_email_pool", user_email_pool, obj); JSONDecoder::decode_json("user_swift_pool", user_swift_pool, obj); JSONDecoder::decode_json("user_uid_pool", user_uid_pool, obj); JSONDecoder::decode_json("otp_pool", otp_pool, obj); JSONDecoder::decode_json("system_key", system_key, obj); JSONDecoder::decode_json("placement_pools", placement_pools, obj); JSONDecoder::decode_json("tier_config", tier_config, obj); JSONDecoder::decode_json("realm_id", realm_id, obj); JSONDecoder::decode_json("notif_pool", notif_pool, obj); } void RGWZoneParams::dump(Formatter *f) const { RGWSystemMetaObj::dump(f); encode_json("domain_root", domain_root, f); encode_json("control_pool", control_pool, f); encode_json("gc_pool", gc_pool, f); encode_json("lc_pool", lc_pool, f); encode_json("log_pool", log_pool, f); encode_json("intent_log_pool", intent_log_pool, f); encode_json("usage_log_pool", usage_log_pool, f); encode_json("roles_pool", roles_pool, f); encode_json("reshard_pool", reshard_pool, f); encode_json("user_keys_pool", user_keys_pool, f); encode_json("user_email_pool", user_email_pool, f); encode_json("user_swift_pool", user_swift_pool, f); encode_json("user_uid_pool", user_uid_pool, f); encode_json("otp_pool", otp_pool, f); encode_json_plain("system_key", system_key, f); encode_json("placement_pools", placement_pools, f); encode_json("tier_config", tier_config, f); encode_json("realm_id", realm_id, f); encode_json("notif_pool", notif_pool, f); } int RGWZoneParams::init(const DoutPrefixProvider *dpp, CephContext *cct, RGWSI_SysObj *sysobj_svc, optional_yield y, bool setup_obj, bool old_format) { if (name.empty()) { name = cct->_conf->rgw_zone; } return RGWSystemMetaObj::init(dpp, cct, sysobj_svc, y, setup_obj, old_format); } rgw_pool RGWZoneParams::get_pool(CephContext *cct) const { if (cct->_conf->rgw_zone_root_pool.empty()) { return rgw_pool(RGW_DEFAULT_ZONE_ROOT_POOL); } return rgw_pool(cct->_conf->rgw_zone_root_pool); } const string RGWZoneParams::get_default_oid(bool old_format) const { if (old_format) { return cct->_conf->rgw_default_zone_info_oid; } return cct->_conf->rgw_default_zone_info_oid + "." + realm_id; } const string& RGWZoneParams::get_names_oid_prefix() const { return zone_names_oid_prefix; } const string& RGWZoneParams::get_info_oid_prefix(bool old_format) const { return zone_info_oid_prefix; } string RGWZoneParams::get_predefined_id(CephContext *cct) const { return cct->_conf.get_val<string>("rgw_zone_id"); } const string& RGWZoneParams::get_predefined_name(CephContext *cct) const { return cct->_conf->rgw_zone; } int RGWZoneParams::read_default_id(const DoutPrefixProvider *dpp, string& default_id, optional_yield y, bool old_format) { if (realm_id.empty()) { /* try using default realm */ RGWRealm realm; int ret = realm.init(dpp, cct, sysobj_svc, y); //no default realm exist if (ret < 0) { return read_id(dpp, default_zone_name, default_id, y); } realm_id = realm.get_id(); } return RGWSystemMetaObj::read_default_id(dpp, default_id, y, old_format); } int RGWZoneParams::set_as_default(const DoutPrefixProvider *dpp, optional_yield y, bool exclusive) { if (realm_id.empty()) { /* try using default realm */ RGWRealm realm; int ret = realm.init(dpp, cct, sysobj_svc, y); if (ret < 0) { ldpp_dout(dpp, 10) << "could not read realm id: " << cpp_strerror(-ret) << dendl; return -EINVAL; } realm_id = realm.get_id(); } return RGWSystemMetaObj::set_as_default(dpp, y, exclusive); } int RGWZoneParams::create(const DoutPrefixProvider *dpp, optional_yield y, bool exclusive) { /* check for old pools config */ rgw_raw_obj obj(domain_root, avail_pools); auto sysobj = sysobj_svc->get_obj(obj); int r = sysobj.rop().stat(y, dpp); if (r < 0) { ldpp_dout(dpp, 10) << "couldn't find old data placement pools config, setting up new ones for the zone" << dendl; /* a new system, let's set new placement info */ RGWZonePlacementInfo default_placement; default_placement.index_pool = name + "." + default_bucket_index_pool_suffix; rgw_pool pool = name + "." + default_storage_pool_suffix; default_placement.storage_classes.set_storage_class(RGW_STORAGE_CLASS_STANDARD, &pool, nullptr); default_placement.data_extra_pool = name + "." + default_storage_extra_pool_suffix; placement_pools["default-placement"] = default_placement; } r = fix_pool_names(dpp, y); if (r < 0) { ldpp_dout(dpp, 0) << "ERROR: fix_pool_names returned r=" << r << dendl; return r; } r = RGWSystemMetaObj::create(dpp, y, exclusive); if (r < 0) { return r; } // try to set as default. may race with another create, so pass exclusive=true // so we don't override an existing default r = set_as_default(dpp, y, true); if (r < 0 && r != -EEXIST) { ldpp_dout(dpp, 10) << "WARNING: failed to set zone as default, r=" << r << dendl; } return 0; } rgw_pool fix_zone_pool_dup(const set<rgw_pool>& pools, const string& default_prefix, const string& default_suffix, const rgw_pool& suggested_pool) { string suggested_name = suggested_pool.to_str(); string prefix = default_prefix; string suffix = default_suffix; if (!suggested_pool.empty()) { prefix = suggested_name.substr(0, suggested_name.find(".")); suffix = suggested_name.substr(prefix.length()); } rgw_pool pool(prefix + suffix); while (pools.count(pool)) { pool = prefix + "_" + std::to_string(std::rand()) + suffix; } return pool; } void add_zone_pools(const RGWZoneParams& info, std::set<rgw_pool>& pools) { pools.insert(info.domain_root); pools.insert(info.control_pool); pools.insert(info.gc_pool); pools.insert(info.log_pool); pools.insert(info.intent_log_pool); pools.insert(info.usage_log_pool); pools.insert(info.user_keys_pool); pools.insert(info.user_email_pool); pools.insert(info.user_swift_pool); pools.insert(info.user_uid_pool); pools.insert(info.otp_pool); pools.insert(info.roles_pool); pools.insert(info.reshard_pool); pools.insert(info.oidc_pool); pools.insert(info.notif_pool); for (const auto& [pname, placement] : info.placement_pools) { pools.insert(placement.index_pool); for (const auto& [sname, sc] : placement.storage_classes.get_all()) { if (sc.data_pool) { pools.insert(sc.data_pool.get()); } } pools.insert(placement.data_extra_pool); } } namespace rgw { int get_zones_pool_set(const DoutPrefixProvider *dpp, optional_yield y, rgw::sal::ConfigStore* cfgstore, std::string_view my_zone_id, std::set<rgw_pool>& pools) { std::array<std::string, 128> zone_names; rgw::sal::ListResult<std::string> listing; do { int r = cfgstore->list_zone_names(dpp, y, listing.next, zone_names, listing); if (r < 0) { ldpp_dout(dpp, 0) << "failed to list zones with " << cpp_strerror(r) << dendl; return r; } for (const auto& name : listing.entries) { RGWZoneParams info; r = cfgstore->read_zone_by_name(dpp, y, name, info, nullptr); if (r < 0) { ldpp_dout(dpp, 0) << "failed to load zone " << name << " with " << cpp_strerror(r) << dendl; return r; } if (info.get_id() != my_zone_id) { add_zone_pools(info, pools); } } } while (!listing.next.empty()); return 0; } } static int get_zones_pool_set(const DoutPrefixProvider *dpp, CephContext* cct, RGWSI_SysObj* sysobj_svc, const list<string>& zone_names, const string& my_zone_id, set<rgw_pool>& pool_names, optional_yield y) { for (const auto& name : zone_names) { RGWZoneParams zone(name); int r = zone.init(dpp, cct, sysobj_svc, y); if (r < 0) { ldpp_dout(dpp, 0) << "Error: failed to load zone " << name << " with " << cpp_strerror(-r) << dendl; return r; } if (zone.get_id() != my_zone_id) { add_zone_pools(zone, pool_names); } } return 0; } int RGWZoneParams::fix_pool_names(const DoutPrefixProvider *dpp, optional_yield y) { list<string> zones; int r = zone_svc->list_zones(dpp, zones); if (r < 0) { ldpp_dout(dpp, 10) << "WARNING: driver->list_zones() returned r=" << r << dendl; } set<rgw_pool> pools; r = get_zones_pool_set(dpp, cct, sysobj_svc, zones, id, pools, y); if (r < 0) { ldpp_dout(dpp, 0) << "Error: get_zones_pool_names" << r << dendl; return r; } domain_root = fix_zone_pool_dup(pools, name, ".rgw.meta:root", domain_root); control_pool = fix_zone_pool_dup(pools, name, ".rgw.control", control_pool); gc_pool = fix_zone_pool_dup(pools, name ,".rgw.log:gc", gc_pool); lc_pool = fix_zone_pool_dup(pools, name ,".rgw.log:lc", lc_pool); log_pool = fix_zone_pool_dup(pools, name, ".rgw.log", log_pool); intent_log_pool = fix_zone_pool_dup(pools, name, ".rgw.log:intent", intent_log_pool); usage_log_pool = fix_zone_pool_dup(pools, name, ".rgw.log:usage", usage_log_pool); user_keys_pool = fix_zone_pool_dup(pools, name, ".rgw.meta:users.keys", user_keys_pool); user_email_pool = fix_zone_pool_dup(pools, name, ".rgw.meta:users.email", user_email_pool); user_swift_pool = fix_zone_pool_dup(pools, name, ".rgw.meta:users.swift", user_swift_pool); user_uid_pool = fix_zone_pool_dup(pools, name, ".rgw.meta:users.uid", user_uid_pool); roles_pool = fix_zone_pool_dup(pools, name, ".rgw.meta:roles", roles_pool); reshard_pool = fix_zone_pool_dup(pools, name, ".rgw.log:reshard", reshard_pool); otp_pool = fix_zone_pool_dup(pools, name, ".rgw.otp", otp_pool); oidc_pool = fix_zone_pool_dup(pools, name, ".rgw.meta:oidc", oidc_pool); notif_pool = fix_zone_pool_dup(pools, name ,".rgw.log:notif", notif_pool); for(auto& iter : placement_pools) { iter.second.index_pool = fix_zone_pool_dup(pools, name, "." + default_bucket_index_pool_suffix, iter.second.index_pool); for (auto& pi : iter.second.storage_classes.get_all()) { if (pi.second.data_pool) { rgw_pool& pool = pi.second.data_pool.get(); pool = fix_zone_pool_dup(pools, name, "." + default_storage_pool_suffix, pool); } } iter.second.data_extra_pool= fix_zone_pool_dup(pools, name, "." + default_storage_extra_pool_suffix, iter.second.data_extra_pool); } return 0; } int RGWPeriodConfig::read(const DoutPrefixProvider *dpp, RGWSI_SysObj *sysobj_svc, const std::string& realm_id, optional_yield y) { const auto& pool = get_pool(sysobj_svc->ctx()); const auto& oid = get_oid(realm_id); bufferlist bl; auto sysobj = sysobj_svc->get_obj(rgw_raw_obj{pool, oid}); int ret = sysobj.rop().read(dpp, &bl, y); if (ret < 0) { return ret; } using ceph::decode; try { auto iter = bl.cbegin(); decode(*this, iter); } catch (buffer::error& err) { return -EIO; } return 0; } int RGWPeriodConfig::write(const DoutPrefixProvider *dpp, RGWSI_SysObj *sysobj_svc, const std::string& realm_id, optional_yield y) { const auto& pool = get_pool(sysobj_svc->ctx()); const auto& oid = get_oid(realm_id); bufferlist bl; using ceph::encode; encode(*this, bl); auto sysobj = sysobj_svc->get_obj(rgw_raw_obj{pool, oid}); return sysobj.wop() .set_exclusive(false) .write(dpp, bl, y); } void RGWPeriodConfig::decode_json(JSONObj *obj) { JSONDecoder::decode_json("bucket_quota", quota.bucket_quota, obj); JSONDecoder::decode_json("user_quota", quota.user_quota, obj); JSONDecoder::decode_json("user_ratelimit", user_ratelimit, obj); JSONDecoder::decode_json("bucket_ratelimit", bucket_ratelimit, obj); JSONDecoder::decode_json("anonymous_ratelimit", anon_ratelimit, obj); } void RGWPeriodConfig::dump(Formatter *f) const { encode_json("bucket_quota", quota.bucket_quota, f); encode_json("user_quota", quota.user_quota, f); encode_json("user_ratelimit", user_ratelimit, f); encode_json("bucket_ratelimit", bucket_ratelimit, f); encode_json("anonymous_ratelimit", anon_ratelimit, f); } std::string RGWPeriodConfig::get_oid(const std::string& realm_id) { if (realm_id.empty()) { return "period_config.default"; } return "period_config." + realm_id; } rgw_pool RGWPeriodConfig::get_pool(CephContext *cct) { const auto& pool_name = cct->_conf->rgw_period_root_pool; if (pool_name.empty()) { return {RGW_DEFAULT_PERIOD_ROOT_POOL}; } return {pool_name}; } int RGWSystemMetaObj::delete_obj(const DoutPrefixProvider *dpp, optional_yield y, bool old_format) { rgw_pool pool(get_pool(cct)); /* check to see if obj is the default */ RGWDefaultSystemMetaObjInfo default_info; int ret = read_default(dpp, default_info, get_default_oid(old_format), y); if (ret < 0 && ret != -ENOENT) return ret; if (default_info.default_id == id || (old_format && default_info.default_id == name)) { string oid = get_default_oid(old_format); rgw_raw_obj default_named_obj(pool, oid); auto sysobj = sysobj_svc->get_obj(default_named_obj); ret = sysobj.wop().remove(dpp, y); if (ret < 0) { ldpp_dout(dpp, 0) << "Error delete default obj name " << name << ": " << cpp_strerror(-ret) << dendl; return ret; } } if (!old_format) { string oid = get_names_oid_prefix() + name; rgw_raw_obj object_name(pool, oid); auto sysobj = sysobj_svc->get_obj(object_name); ret = sysobj.wop().remove(dpp, y); if (ret < 0) { ldpp_dout(dpp, 0) << "Error delete obj name " << name << ": " << cpp_strerror(-ret) << dendl; return ret; } } string oid = get_info_oid_prefix(old_format); if (old_format) { oid += name; } else { oid += id; } rgw_raw_obj object_id(pool, oid); auto sysobj = sysobj_svc->get_obj(object_id); ret = sysobj.wop().remove(dpp, y); if (ret < 0) { ldpp_dout(dpp, 0) << "Error delete object id " << id << ": " << cpp_strerror(-ret) << dendl; } return ret; } void RGWZoneGroup::dump(Formatter *f) const { RGWSystemMetaObj::dump(f); encode_json("api_name", api_name, f); encode_json("is_master", is_master, f); encode_json("endpoints", endpoints, f); encode_json("hostnames", hostnames, f); encode_json("hostnames_s3website", hostnames_s3website, f); encode_json("master_zone", master_zone, f); encode_json_map("zones", zones, f); /* more friendly representation */ encode_json_map("placement_targets", placement_targets, f); /* more friendly representation */ encode_json("default_placement", default_placement, f); encode_json("realm_id", realm_id, f); encode_json("sync_policy", sync_policy, f); encode_json("enabled_features", enabled_features, f); } void RGWZoneGroupPlacementTarget::decode_json(JSONObj *obj) { JSONDecoder::decode_json("name", name, obj); JSONDecoder::decode_json("tags", tags, obj); JSONDecoder::decode_json("storage_classes", storage_classes, obj); if (storage_classes.empty()) { storage_classes.insert(RGW_STORAGE_CLASS_STANDARD); } JSONDecoder::decode_json("tier_targets", tier_targets, obj); } void RGWZonePlacementInfo::dump(Formatter *f) const { encode_json("index_pool", index_pool, f); encode_json("storage_classes", storage_classes, f); encode_json("data_extra_pool", data_extra_pool, f); encode_json("index_type", (uint32_t)index_type, f); encode_json("inline_data", inline_data, f); /* no real need for backward compatibility of compression_type and data_pool in here, * rather not clutter the output */ } void RGWZonePlacementInfo::decode_json(JSONObj *obj) { JSONDecoder::decode_json("index_pool", index_pool, obj); JSONDecoder::decode_json("storage_classes", storage_classes, obj); JSONDecoder::decode_json("data_extra_pool", data_extra_pool, obj); uint32_t it; JSONDecoder::decode_json("index_type", it, obj); JSONDecoder::decode_json("inline_data", inline_data, obj); index_type = (rgw::BucketIndexType)it; /* backward compatibility, these are now defined in storage_classes */ string standard_compression_type; string *pcompression = nullptr; if (JSONDecoder::decode_json("compression", standard_compression_type, obj)) { pcompression = &standard_compression_type; } rgw_pool standard_data_pool; rgw_pool *ppool = nullptr; if (JSONDecoder::decode_json("data_pool", standard_data_pool, obj)) { ppool = &standard_data_pool; } if (ppool || pcompression) { storage_classes.set_storage_class(RGW_STORAGE_CLASS_STANDARD, ppool, pcompression); } } void RGWSystemMetaObj::dump(Formatter *f) const { encode_json("id", id , f); encode_json("name", name , f); } void RGWSystemMetaObj::decode_json(JSONObj *obj) { JSONDecoder::decode_json("id", id, obj); JSONDecoder::decode_json("name", name, obj); } int RGWSystemMetaObj::read_default(const DoutPrefixProvider *dpp, RGWDefaultSystemMetaObjInfo& default_info, const string& oid, optional_yield y) { using ceph::decode; auto pool = get_pool(cct); bufferlist bl; auto sysobj = sysobj_svc->get_obj(rgw_raw_obj(pool, oid)); int ret = sysobj.rop().read(dpp, &bl, y); if (ret < 0) return ret; try { auto iter = bl.cbegin(); decode(default_info, iter); } catch (buffer::error& err) { ldpp_dout(dpp, 0) << "error decoding data from " << pool << ":" << oid << dendl; return -EIO; } return 0; } void RGWZoneGroupPlacementTarget::dump(Formatter *f) const { encode_json("name", name, f); encode_json("tags", tags, f); encode_json("storage_classes", storage_classes, f); if (!tier_targets.empty()) { encode_json("tier_targets", tier_targets, f); } } void RGWZoneGroupPlacementTier::decode_json(JSONObj *obj) { JSONDecoder::decode_json("tier_type", tier_type, obj); JSONDecoder::decode_json("storage_class", storage_class, obj); JSONDecoder::decode_json("retain_head_object", retain_head_object, obj); if (tier_type == "cloud-s3") { JSONDecoder::decode_json("s3", t.s3, obj); } } void RGWZoneStorageClasses::dump(Formatter *f) const { for (auto& i : m) { encode_json(i.first.c_str(), i.second, f); } } void RGWZoneStorageClasses::decode_json(JSONObj *obj) { JSONFormattable f; decode_json_obj(f, obj); for (auto& field : f.object()) { JSONObj *field_obj = obj->find_obj(field.first); assert(field_obj); decode_json_obj(m[field.first], field_obj); } standard_class = &m[RGW_STORAGE_CLASS_STANDARD]; } void RGWZoneGroupPlacementTier::dump(Formatter *f) const { encode_json("tier_type", tier_type, f); encode_json("storage_class", storage_class, f); encode_json("retain_head_object", retain_head_object, f); if (tier_type == "cloud-s3") { encode_json("s3", t.s3, f); } } void RGWZoneGroupPlacementTierS3::decode_json(JSONObj *obj) { JSONDecoder::decode_json("endpoint", endpoint, obj); JSONDecoder::decode_json("access_key", key.id, obj); JSONDecoder::decode_json("secret", key.key, obj); JSONDecoder::decode_json("region", region, obj); string s; JSONDecoder::decode_json("host_style", s, obj); if (s != "virtual") { host_style = PathStyle; } else { host_style = VirtualStyle; } JSONDecoder::decode_json("target_storage_class", target_storage_class, obj); JSONDecoder::decode_json("target_path", target_path, obj); JSONDecoder::decode_json("acl_mappings", acl_mappings, obj); JSONDecoder::decode_json("multipart_sync_threshold", multipart_sync_threshold, obj); JSONDecoder::decode_json("multipart_min_part_size", multipart_min_part_size, obj); } void RGWZoneStorageClass::dump(Formatter *f) const { if (data_pool) { encode_json("data_pool", data_pool.get(), f); } if (compression_type) { encode_json("compression_type", compression_type.get(), f); } } void RGWZoneStorageClass::decode_json(JSONObj *obj) { JSONDecoder::decode_json("data_pool", data_pool, obj); JSONDecoder::decode_json("compression_type", compression_type, obj); } void RGWTierACLMapping::decode_json(JSONObj *obj) { string s; JSONDecoder::decode_json("type", s, obj); if (s == "email") { type = ACL_TYPE_EMAIL_USER; } else if (s == "uri") { type = ACL_TYPE_GROUP; } else { type = ACL_TYPE_CANON_USER; } JSONDecoder::decode_json("source_id", source_id, obj); JSONDecoder::decode_json("dest_id", dest_id, obj); } void RGWZoneGroupPlacementTierS3::dump(Formatter *f) const { encode_json("endpoint", endpoint, f); encode_json("access_key", key.id, f); encode_json("secret", key.key, f); encode_json("region", region, f); string s = (host_style == PathStyle ? "path" : "virtual"); encode_json("host_style", s, f); encode_json("target_storage_class", target_storage_class, f); encode_json("target_path", target_path, f); encode_json("acl_mappings", acl_mappings, f); encode_json("multipart_sync_threshold", multipart_sync_threshold, f); encode_json("multipart_min_part_size", multipart_min_part_size, f); } void RGWTierACLMapping::dump(Formatter *f) const { string s; switch (type) { case ACL_TYPE_EMAIL_USER: s = "email"; break; case ACL_TYPE_GROUP: s = "uri"; break; default: s = "id"; break; } encode_json("type", s, f); encode_json("source_id", source_id, f); encode_json("dest_id", dest_id, f); } void RGWPeriodMap::dump(Formatter *f) const { encode_json("id", id, f); encode_json_map("zonegroups", zonegroups, f); encode_json("short_zone_ids", short_zone_ids, f); } static void decode_zonegroups(map<string, RGWZoneGroup>& zonegroups, JSONObj *o) { RGWZoneGroup zg; zg.decode_json(o); zonegroups[zg.get_id()] = zg; } void RGWPeriodMap::decode_json(JSONObj *obj) { JSONDecoder::decode_json("id", id, obj); JSONDecoder::decode_json("zonegroups", zonegroups, decode_zonegroups, obj); /* backward compatability with region */ if (zonegroups.empty()) { JSONDecoder::decode_json("regions", zonegroups, obj); } /* backward compatability with region */ if (master_zonegroup.empty()) { JSONDecoder::decode_json("master_region", master_zonegroup, obj); } JSONDecoder::decode_json("short_zone_ids", short_zone_ids, obj); } void RGWPeriodMap::decode(bufferlist::const_iterator& bl) { DECODE_START(2, bl); decode(id, bl); decode(zonegroups, bl); decode(master_zonegroup, bl); if (struct_v >= 2) { decode(short_zone_ids, bl); } DECODE_FINISH(bl); zonegroups_by_api.clear(); for (map<string, RGWZoneGroup>::iterator iter = zonegroups.begin(); iter != zonegroups.end(); ++iter) { RGWZoneGroup& zonegroup = iter->second; zonegroups_by_api[zonegroup.api_name] = zonegroup; if (zonegroup.is_master_zonegroup()) { master_zonegroup = zonegroup.get_id(); } } } void RGWPeriodMap::encode(bufferlist& bl) const { ENCODE_START(2, 1, bl); encode(id, bl); encode(zonegroups, bl); encode(master_zonegroup, bl); encode(short_zone_ids, bl); ENCODE_FINISH(bl); } int RGWSystemMetaObj::create(const DoutPrefixProvider *dpp, optional_yield y, bool exclusive) { int ret; /* check to see the name is not used */ ret = read_id(dpp, name, id, y); if (exclusive && ret == 0) { ldpp_dout(dpp, 10) << "ERROR: name " << name << " already in use for obj id " << id << dendl; return -EEXIST; } else if ( ret < 0 && ret != -ENOENT) { ldpp_dout(dpp, 0) << "failed reading obj id " << id << ": " << cpp_strerror(-ret) << dendl; return ret; } if (id.empty()) { /* create unique id */ uuid_d new_uuid; char uuid_str[37]; new_uuid.generate_random(); new_uuid.print(uuid_str); id = uuid_str; } ret = store_info(dpp, exclusive, y); if (ret < 0) { ldpp_dout(dpp, 0) << "ERROR: storing info for " << id << ": " << cpp_strerror(-ret) << dendl; return ret; } return store_name(dpp, exclusive, y); } int RGWSystemMetaObj::read_default_id(const DoutPrefixProvider *dpp, string& default_id, optional_yield y, bool old_format) { RGWDefaultSystemMetaObjInfo default_info; int ret = read_default(dpp, default_info, get_default_oid(old_format), y); if (ret < 0) { return ret; } default_id = default_info.default_id; return 0; } int RGWSystemMetaObj::set_as_default(const DoutPrefixProvider *dpp, optional_yield y, bool exclusive) { using ceph::encode; string oid = get_default_oid(); rgw_pool pool(get_pool(cct)); bufferlist bl; RGWDefaultSystemMetaObjInfo default_info; default_info.default_id = id; encode(default_info, bl); auto sysobj = sysobj_svc->get_obj(rgw_raw_obj(pool, oid)); int ret = sysobj.wop() .set_exclusive(exclusive) .write(dpp, bl, y); if (ret < 0) return ret; return 0; } int RGWSystemMetaObj::store_info(const DoutPrefixProvider *dpp, bool exclusive, optional_yield y) { rgw_pool pool(get_pool(cct)); string oid = get_info_oid_prefix() + id; bufferlist bl; using ceph::encode; encode(*this, bl); auto sysobj = sysobj_svc->get_obj(rgw_raw_obj{pool, oid}); return sysobj.wop() .set_exclusive(exclusive) .write(dpp, bl, y); } int RGWSystemMetaObj::read_id(const DoutPrefixProvider *dpp, const string& obj_name, string& object_id, optional_yield y) { using ceph::decode; rgw_pool pool(get_pool(cct)); bufferlist bl; string oid = get_names_oid_prefix() + obj_name; auto sysobj = sysobj_svc->get_obj(rgw_raw_obj(pool, oid)); int ret = sysobj.rop().read(dpp, &bl, y); if (ret < 0) { return ret; } RGWNameToId nameToId; try { auto iter = bl.cbegin(); decode(nameToId, iter); } catch (buffer::error& err) { ldpp_dout(dpp, 0) << "ERROR: failed to decode obj from " << pool << ":" << oid << dendl; return -EIO; } object_id = nameToId.obj_id; return 0; } int RGWSystemMetaObj::store_name(const DoutPrefixProvider *dpp, bool exclusive, optional_yield y) { rgw_pool pool(get_pool(cct)); string oid = get_names_oid_prefix() + name; RGWNameToId nameToId; nameToId.obj_id = id; bufferlist bl; using ceph::encode; encode(nameToId, bl); auto sysobj = sysobj_svc->get_obj(rgw_raw_obj(pool, oid)); return sysobj.wop() .set_exclusive(exclusive) .write(dpp, bl, y); } bool RGWPeriodMap::find_zone_by_id(const rgw_zone_id& zone_id, RGWZoneGroup *zonegroup, RGWZone *zone) const { for (auto& iter : zonegroups) { auto& zg = iter.second; auto ziter = zg.zones.find(zone_id); if (ziter != zg.zones.end()) { *zonegroup = zg; *zone = ziter->second; return true; } } return false; } int RGWZoneGroup::set_as_default(const DoutPrefixProvider *dpp, optional_yield y, bool exclusive) { if (realm_id.empty()) { /* try using default realm */ RGWRealm realm; int ret = realm.init(dpp, cct, sysobj_svc, y); if (ret < 0) { ldpp_dout(dpp, 10) << "could not read realm id: " << cpp_strerror(-ret) << dendl; return -EINVAL; } realm_id = realm.get_id(); } return RGWSystemMetaObj::set_as_default(dpp, y, exclusive); } int RGWSystemMetaObj::write(const DoutPrefixProvider *dpp, bool exclusive, optional_yield y) { int ret = store_info(dpp, exclusive, y); if (ret < 0) { ldpp_dout(dpp, 20) << __func__ << "(): store_info() returned ret=" << ret << dendl; return ret; } ret = store_name(dpp, exclusive, y); if (ret < 0) { ldpp_dout(dpp, 20) << __func__ << "(): store_name() returned ret=" << ret << dendl; return ret; } return 0; } namespace rgw { int init_zone_pool_names(const DoutPrefixProvider *dpp, optional_yield y, const std::set<rgw_pool>& pools, RGWZoneParams& info) { info.domain_root = fix_zone_pool_dup(pools, info.name, ".rgw.meta:root", info.domain_root); info.control_pool = fix_zone_pool_dup(pools, info.name, ".rgw.control", info.control_pool); info.gc_pool = fix_zone_pool_dup(pools, info.name, ".rgw.log:gc", info.gc_pool); info.lc_pool = fix_zone_pool_dup(pools, info.name, ".rgw.log:lc", info.lc_pool); info.log_pool = fix_zone_pool_dup(pools, info.name, ".rgw.log", info.log_pool); info.intent_log_pool = fix_zone_pool_dup(pools, info.name, ".rgw.log:intent", info.intent_log_pool); info.usage_log_pool = fix_zone_pool_dup(pools, info.name, ".rgw.log:usage", info.usage_log_pool); info.user_keys_pool = fix_zone_pool_dup(pools, info.name, ".rgw.meta:users.keys", info.user_keys_pool); info.user_email_pool = fix_zone_pool_dup(pools, info.name, ".rgw.meta:users.email", info.user_email_pool); info.user_swift_pool = fix_zone_pool_dup(pools, info.name, ".rgw.meta:users.swift", info.user_swift_pool); info.user_uid_pool = fix_zone_pool_dup(pools, info.name, ".rgw.meta:users.uid", info.user_uid_pool); info.roles_pool = fix_zone_pool_dup(pools, info.name, ".rgw.meta:roles", info.roles_pool); info.reshard_pool = fix_zone_pool_dup(pools, info.name, ".rgw.log:reshard", info.reshard_pool); info.otp_pool = fix_zone_pool_dup(pools, info.name, ".rgw.otp", info.otp_pool); info.oidc_pool = fix_zone_pool_dup(pools, info.name, ".rgw.meta:oidc", info.oidc_pool); info.notif_pool = fix_zone_pool_dup(pools, info.name, ".rgw.log:notif", info.notif_pool); for (auto& [pname, placement] : info.placement_pools) { placement.index_pool = fix_zone_pool_dup(pools, info.name, "." + default_bucket_index_pool_suffix, placement.index_pool); placement.data_extra_pool= fix_zone_pool_dup(pools, info.name, "." + default_storage_extra_pool_suffix, placement.data_extra_pool); for (auto& [sname, sc] : placement.storage_classes.get_all()) { if (sc.data_pool) { sc.data_pool = fix_zone_pool_dup(pools, info.name, "." + default_storage_pool_suffix, *sc.data_pool); } } } return 0; } int add_zone_to_group(const DoutPrefixProvider* dpp, RGWZoneGroup& zonegroup, const RGWZoneParams& zone_params, const bool *pis_master, const bool *pread_only, const std::list<std::string>& endpoints, const std::string *ptier_type, const bool *psync_from_all, const std::list<std::string>& sync_from, const std::list<std::string>& sync_from_rm, const std::string *predirect_zone, std::optional<int> bucket_index_max_shards, const rgw::zone_features::set& enable_features, const rgw::zone_features::set& disable_features) { const std::string& zone_id = zone_params.id; const std::string& zone_name = zone_params.name; if (zone_id.empty()) { ldpp_dout(dpp, -1) << __func__ << " requires a zone id" << dendl; return -EINVAL; } if (zone_name.empty()) { ldpp_dout(dpp, -1) << __func__ << " requires a zone name" << dendl; return -EINVAL; } // check for duplicate zone name on insert if (!zonegroup.zones.count(zone_id)) { for (const auto& [id, zone] : zonegroup.zones) { if (zone.name == zone_name) { ldpp_dout(dpp, 0) << "ERROR: found existing zone name " << zone_name << " (" << id << ") in zonegroup " << zonegroup.name << dendl; return -EEXIST; } } } rgw_zone_id& master_zone = zonegroup.master_zone; if (pis_master) { if (*pis_master) { if (!master_zone.empty() && master_zone != zone_id) { ldpp_dout(dpp, 0) << "NOTICE: overriding master zone: " << master_zone << dendl; } master_zone = zone_id; } else if (master_zone == zone_id) { master_zone.clear(); } } else if (master_zone.empty() && zonegroup.zones.empty()) { ldpp_dout(dpp, 0) << "NOTICE: promoted " << zone_name << " as new master_zone of zonegroup " << zonegroup.name << dendl; master_zone = zone_id; } // make sure the zone's placement targets are named in the zonegroup for (const auto& [name, placement] : zone_params.placement_pools) { auto target = RGWZoneGroupPlacementTarget{.name = name}; zonegroup.placement_targets.emplace(name, std::move(target)); } RGWZone& zone = zonegroup.zones[zone_params.id]; zone.id = zone_params.id; zone.name = zone_params.name; if (!endpoints.empty()) { zone.endpoints = endpoints; } if (pread_only) { zone.read_only = *pread_only; } if (ptier_type) { zone.tier_type = *ptier_type; } if (psync_from_all) { zone.sync_from_all = *psync_from_all; } if (predirect_zone) { zone.redirect_zone = *predirect_zone; } if (bucket_index_max_shards) { zone.bucket_index_max_shards = *bucket_index_max_shards; } // add/remove sync_from for (auto add : sync_from) { zone.sync_from.insert(add); } for (const auto& rm : sync_from_rm) { auto i = zone.sync_from.find(rm); if (i == zone.sync_from.end()) { ldpp_dout(dpp, 1) << "WARNING: zone \"" << rm << "\" was not in sync_from" << dendl; continue; } zone.sync_from.erase(i); } // add/remove supported features zone.supported_features.insert(enable_features.begin(), enable_features.end()); for (const auto& feature : disable_features) { if (zonegroup.enabled_features.contains(feature)) { ldpp_dout(dpp, -1) << "ERROR: Cannot disable zone feature \"" << feature << "\" until it's been disabled in zonegroup " << zonegroup.name << dendl; return -EINVAL; } auto i = zone.supported_features.find(feature); if (i == zone.supported_features.end()) { ldpp_dout(dpp, 1) << "WARNING: zone feature \"" << feature << "\" was not enabled in zone " << zone.name << dendl; continue; } zone.supported_features.erase(i); } const bool log_data = zonegroup.zones.size() > 1; for (auto& [id, zone] : zonegroup.zones) { zone.log_data = log_data; } return 0; } } // namespace rgw
43,436
30.659621
135
cc
null
ceph-main/src/rgw/rgw_zone_features.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab ft=cpp /* N.B., this header defines fundamental serialized types. Do not * include files which can only be compiled in radosgw or OSD * contexts (e.g., rgw_sal.h, rgw_common.h) */ #pragma once #include <string> #include <boost/container/flat_set.hpp> namespace rgw::zone_features { // zone feature names inline constexpr std::string_view resharding = "resharding"; inline constexpr std::string_view compress_encrypted = "compress-encrypted"; // static list of features supported by this release inline constexpr std::initializer_list<std::string_view> supported = { resharding, compress_encrypted, }; inline constexpr bool supports(std::string_view feature) { for (auto i : supported) { if (feature.compare(i) == 0) { return true; } } return false; } // static list of features enabled by default on new zonegroups inline constexpr std::initializer_list<std::string_view> enabled = { resharding, }; // enable string_view overloads for find() contains() etc struct feature_less : std::less<std::string_view> { using is_transparent = std::true_type; }; using set = boost::container::flat_set<std::string, feature_less>; } // namespace rgw::zone_features
1,293
25.958333
76
h
null
ceph-main/src/rgw/rgw_zone_types.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab ft=cpp /* * Ceph - scalable distributed file system * * Copyright (C) 2019 Red Hat, Inc. * * This is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License version 2.1, as published by the Free Software * Foundation. See file COPYING. * */ /* N.B., this header defines fundamental serialized types. Do not * introduce changes or include files which can only be compiled in * radosgw or OSD contexts (e.g., rgw_sal.h, rgw_common.h) */ #pragma once #include <string> #include <set> #include <map> #include <list> #include <boost/optional.hpp> #include <fmt/format.h> #include "include/types.h" #include "rgw_bucket_layout.h" #include "rgw_zone_features.h" #include "rgw_pool_types.h" #include "rgw_acl_types.h" #include "rgw_placement_types.h" #include "common/Formatter.h" class JSONObj; namespace rgw_zone_defaults { extern std::string zone_names_oid_prefix; extern std::string region_info_oid_prefix; extern std::string realm_names_oid_prefix; extern std::string zone_group_info_oid_prefix; extern std::string realm_info_oid_prefix; extern std::string default_region_info_oid; extern std::string default_zone_group_info_oid; extern std::string region_map_oid; extern std::string default_realm_info_oid; extern std::string default_zonegroup_name; extern std::string default_zone_name; extern std::string zonegroup_names_oid_prefix; extern std::string RGW_DEFAULT_ZONE_ROOT_POOL; extern std::string RGW_DEFAULT_ZONEGROUP_ROOT_POOL; extern std::string RGW_DEFAULT_REALM_ROOT_POOL; extern std::string RGW_DEFAULT_PERIOD_ROOT_POOL; extern std::string avail_pools; extern std::string default_storage_pool_suffix; } /* namespace rgw_zone_defaults */ struct RGWNameToId { std::string obj_id; void encode(bufferlist& bl) const { ENCODE_START(1, 1, bl); encode(obj_id, bl); ENCODE_FINISH(bl); } void decode(bufferlist::const_iterator& bl) { DECODE_START(1, bl); decode(obj_id, bl); DECODE_FINISH(bl); } void dump(Formatter *f) const; void decode_json(JSONObj *obj); }; WRITE_CLASS_ENCODER(RGWNameToId) struct RGWDefaultSystemMetaObjInfo { std::string default_id; void encode(bufferlist& bl) const { ENCODE_START(1, 1, bl); encode(default_id, bl); ENCODE_FINISH(bl); } void decode(bufferlist::const_iterator& bl) { DECODE_START(1, bl); decode(default_id, bl); DECODE_FINISH(bl); } void dump(Formatter *f) const; void decode_json(JSONObj *obj); }; WRITE_CLASS_ENCODER(RGWDefaultSystemMetaObjInfo) struct RGWZoneStorageClass { boost::optional<rgw_pool> data_pool; boost::optional<std::string> compression_type; void encode(bufferlist& bl) const { ENCODE_START(1, 1, bl); encode(data_pool, bl); encode(compression_type, bl); ENCODE_FINISH(bl); } void decode(bufferlist::const_iterator& bl) { DECODE_START(1, bl); decode(data_pool, bl); decode(compression_type, bl); DECODE_FINISH(bl); } void dump(Formatter *f) const; void decode_json(JSONObj *obj); }; WRITE_CLASS_ENCODER(RGWZoneStorageClass) class RGWZoneStorageClasses { std::map<std::string, RGWZoneStorageClass> m; /* in memory only */ RGWZoneStorageClass *standard_class; public: RGWZoneStorageClasses() { standard_class = &m[RGW_STORAGE_CLASS_STANDARD]; } RGWZoneStorageClasses(const RGWZoneStorageClasses& rhs) { m = rhs.m; standard_class = &m[RGW_STORAGE_CLASS_STANDARD]; } RGWZoneStorageClasses& operator=(const RGWZoneStorageClasses& rhs) { m = rhs.m; standard_class = &m[RGW_STORAGE_CLASS_STANDARD]; return *this; } const RGWZoneStorageClass& get_standard() const { return *standard_class; } bool find(const std::string& sc, const RGWZoneStorageClass** pstorage_class) const { auto iter = m.find(sc); if (iter == m.end()) { return false; } *pstorage_class = &iter->second; return true; } bool exists(const std::string& sc) const { if (sc.empty()) { return true; } auto iter = m.find(sc); return (iter != m.end()); } const std::map<std::string, RGWZoneStorageClass>& get_all() const { return m; } std::map<std::string, RGWZoneStorageClass>& get_all() { return m; } void set_storage_class(const std::string& sc, const rgw_pool* data_pool, const std::string* compression_type) { const std::string *psc = &sc; if (sc.empty()) { psc = &RGW_STORAGE_CLASS_STANDARD; } RGWZoneStorageClass& storage_class = m[*psc]; if (data_pool) { storage_class.data_pool = *data_pool; } if (compression_type) { storage_class.compression_type = *compression_type; } } void remove_storage_class(const std::string& sc) { if (!sc.empty()) { m.erase(sc); } } void encode(bufferlist& bl) const { ENCODE_START(1, 1, bl); encode(m, bl); ENCODE_FINISH(bl); } void decode(bufferlist::const_iterator& bl) { DECODE_START(1, bl); decode(m, bl); standard_class = &m[RGW_STORAGE_CLASS_STANDARD]; DECODE_FINISH(bl); } void dump(Formatter *f) const; void decode_json(JSONObj *obj); }; WRITE_CLASS_ENCODER(RGWZoneStorageClasses) struct RGWZonePlacementInfo { rgw_pool index_pool; rgw_pool data_extra_pool; /* if not set we should use data_pool */ RGWZoneStorageClasses storage_classes; rgw::BucketIndexType index_type; bool inline_data; RGWZonePlacementInfo() : index_type(rgw::BucketIndexType::Normal), inline_data(true) {} void encode(bufferlist& bl) const { ENCODE_START(8, 1, bl); encode(index_pool.to_str(), bl); rgw_pool standard_data_pool = get_data_pool(RGW_STORAGE_CLASS_STANDARD); encode(standard_data_pool.to_str(), bl); encode(data_extra_pool.to_str(), bl); encode((uint32_t)index_type, bl); std::string standard_compression_type = get_compression_type(RGW_STORAGE_CLASS_STANDARD); encode(standard_compression_type, bl); encode(storage_classes, bl); encode(inline_data, bl); ENCODE_FINISH(bl); } void decode(bufferlist::const_iterator& bl) { DECODE_START(8, bl); std::string index_pool_str; std::string data_pool_str; decode(index_pool_str, bl); index_pool = rgw_pool(index_pool_str); decode(data_pool_str, bl); rgw_pool standard_data_pool(data_pool_str); if (struct_v >= 4) { std::string data_extra_pool_str; decode(data_extra_pool_str, bl); data_extra_pool = rgw_pool(data_extra_pool_str); } if (struct_v >= 5) { uint32_t it; decode(it, bl); index_type = (rgw::BucketIndexType)it; } std::string standard_compression_type; if (struct_v >= 6) { decode(standard_compression_type, bl); } if (struct_v >= 7) { decode(storage_classes, bl); } else { storage_classes.set_storage_class(RGW_STORAGE_CLASS_STANDARD, &standard_data_pool, (!standard_compression_type.empty() ? &standard_compression_type : nullptr)); } if (struct_v >= 8) { decode(inline_data, bl); } DECODE_FINISH(bl); } const rgw_pool& get_data_extra_pool() const { static rgw_pool no_pool; if (data_extra_pool.empty()) { return storage_classes.get_standard().data_pool.get_value_or(no_pool); } return data_extra_pool; } const rgw_pool& get_data_pool(const std::string& sc) const { const RGWZoneStorageClass *storage_class; static rgw_pool no_pool; if (!storage_classes.find(sc, &storage_class)) { return storage_classes.get_standard().data_pool.get_value_or(no_pool); } return storage_class->data_pool.get_value_or(no_pool); } const rgw_pool& get_standard_data_pool() const { return get_data_pool(RGW_STORAGE_CLASS_STANDARD); } const std::string& get_compression_type(const std::string& sc) const { const RGWZoneStorageClass *storage_class; static std::string no_compression; if (!storage_classes.find(sc, &storage_class)) { return no_compression; } return storage_class->compression_type.get_value_or(no_compression); } bool storage_class_exists(const std::string& sc) const { return storage_classes.exists(sc); } void dump(Formatter *f) const; void decode_json(JSONObj *obj); }; WRITE_CLASS_ENCODER(RGWZonePlacementInfo) struct RGWZone { std::string id; std::string name; std::list<std::string> endpoints; // std::vector? bool log_meta; bool log_data; bool read_only; std::string tier_type; std::string redirect_zone; /** * Represents the number of shards for the bucket index object, a value of zero * indicates there is no sharding. By default (no sharding, the name of the object * is '.dir.{marker}', with sharding, the name is '.dir.{marker}.{sharding_id}', * sharding_id is zero-based value. It is not recommended to set a too large value * (e.g. thousand) as it increases the cost for bucket listing. */ uint32_t bucket_index_max_shards; // pre-shard buckets on creation to enable some write-parallism by default, // delay the need to reshard as the bucket grows, and (in multisite) get some // bucket index sharding where dynamic resharding is not supported static constexpr uint32_t default_bucket_index_max_shards = 11; bool sync_from_all; std::set<std::string> sync_from; /* list of zones to sync from */ rgw::zone_features::set supported_features; RGWZone() : log_meta(false), log_data(false), read_only(false), bucket_index_max_shards(default_bucket_index_max_shards), sync_from_all(true) {} void encode(bufferlist& bl) const { ENCODE_START(8, 1, bl); encode(name, bl); encode(endpoints, bl); encode(log_meta, bl); encode(log_data, bl); encode(bucket_index_max_shards, bl); encode(id, bl); encode(read_only, bl); encode(tier_type, bl); encode(sync_from_all, bl); encode(sync_from, bl); encode(redirect_zone, bl); encode(supported_features, bl); ENCODE_FINISH(bl); } void decode(bufferlist::const_iterator& bl) { DECODE_START(8, bl); decode(name, bl); if (struct_v < 4) { id = name; } decode(endpoints, bl); if (struct_v >= 2) { decode(log_meta, bl); decode(log_data, bl); } if (struct_v >= 3) { decode(bucket_index_max_shards, bl); } if (struct_v >= 4) { decode(id, bl); decode(read_only, bl); } if (struct_v >= 5) { decode(tier_type, bl); } if (struct_v >= 6) { decode(sync_from_all, bl); decode(sync_from, bl); } if (struct_v >= 7) { decode(redirect_zone, bl); } if (struct_v >= 8) { decode(supported_features, bl); } DECODE_FINISH(bl); } void dump(Formatter *f) const; void decode_json(JSONObj *obj); static void generate_test_instances(std::list<RGWZone*>& o); bool is_read_only() const { return read_only; } bool syncs_from(const std::string& zone_name) const { return (sync_from_all || sync_from.find(zone_name) != sync_from.end()); } bool supports(std::string_view feature) const { return supported_features.contains(feature); } }; WRITE_CLASS_ENCODER(RGWZone) struct RGWDefaultZoneGroupInfo { std::string default_zonegroup; void encode(bufferlist& bl) const { ENCODE_START(1, 1, bl); encode(default_zonegroup, bl); ENCODE_FINISH(bl); } void decode(bufferlist::const_iterator& bl) { DECODE_START(1, bl); decode(default_zonegroup, bl); DECODE_FINISH(bl); } void dump(Formatter *f) const; void decode_json(JSONObj *obj); //todo: implement ceph-dencoder }; WRITE_CLASS_ENCODER(RGWDefaultZoneGroupInfo) struct RGWTierACLMapping { ACLGranteeTypeEnum type{ACL_TYPE_CANON_USER}; std::string source_id; std::string dest_id; RGWTierACLMapping() = default; RGWTierACLMapping(ACLGranteeTypeEnum t, const std::string& s, const std::string& d) : type(t), source_id(s), dest_id(d) {} void init(const JSONFormattable& config) { const std::string& t = config["type"]; if (t == "email") { type = ACL_TYPE_EMAIL_USER; } else if (t == "uri") { type = ACL_TYPE_GROUP; } else { type = ACL_TYPE_CANON_USER; } source_id = config["source_id"]; dest_id = config["dest_id"]; } void encode(bufferlist& bl) const { ENCODE_START(1, 1, bl); encode((uint32_t)type, bl); encode(source_id, bl); encode(dest_id, bl); ENCODE_FINISH(bl); } void decode(bufferlist::const_iterator& bl) { DECODE_START(1, bl); uint32_t it; decode(it, bl); type = (ACLGranteeTypeEnum)it; decode(source_id, bl); decode(dest_id, bl); DECODE_FINISH(bl); } void dump(Formatter *f) const; void decode_json(JSONObj *obj); }; WRITE_CLASS_ENCODER(RGWTierACLMapping) enum HostStyle { PathStyle = 0, VirtualStyle = 1, }; struct RGWZoneGroupPlacementTierS3 { #define DEFAULT_MULTIPART_SYNC_PART_SIZE (32 * 1024 * 1024) std::string endpoint; RGWAccessKey key; std::string region; HostStyle host_style{PathStyle}; std::string target_storage_class; /* Should below be bucket/zone specific?? */ std::string target_path; std::map<std::string, RGWTierACLMapping> acl_mappings; uint64_t multipart_sync_threshold{DEFAULT_MULTIPART_SYNC_PART_SIZE}; uint64_t multipart_min_part_size{DEFAULT_MULTIPART_SYNC_PART_SIZE}; int update_params(const JSONFormattable& config); int clear_params(const JSONFormattable& config); void encode(bufferlist& bl) const { ENCODE_START(1, 1, bl); encode(endpoint, bl); encode(key, bl); encode(region, bl); encode((uint32_t)host_style, bl); // XXX kill C-style casts encode(target_storage_class, bl); encode(target_path, bl); encode(acl_mappings, bl); encode(multipart_sync_threshold, bl); encode(multipart_min_part_size, bl); ENCODE_FINISH(bl); } void decode(bufferlist::const_iterator& bl) { DECODE_START(1, bl); decode(endpoint, bl); decode(key, bl); decode(region, bl); uint32_t it; decode(it, bl); host_style = (HostStyle)it; // XXX can't this be HostStyle(it)? decode(target_storage_class, bl); decode(target_path, bl); decode(acl_mappings, bl); decode(multipart_sync_threshold, bl); decode(multipart_min_part_size, bl); DECODE_FINISH(bl); } void dump(Formatter *f) const; void decode_json(JSONObj *obj); }; WRITE_CLASS_ENCODER(RGWZoneGroupPlacementTierS3) struct RGWZoneGroupPlacementTier { std::string tier_type; std::string storage_class; bool retain_head_object = false; struct _tier { RGWZoneGroupPlacementTierS3 s3; } t; int update_params(const JSONFormattable& config); int clear_params(const JSONFormattable& config); void encode(bufferlist& bl) const { ENCODE_START(1, 1, bl); encode(tier_type, bl); encode(storage_class, bl); encode(retain_head_object, bl); if (tier_type == "cloud-s3") { encode(t.s3, bl); } ENCODE_FINISH(bl); } void decode(bufferlist::const_iterator& bl) { DECODE_START(1, bl); decode(tier_type, bl); decode(storage_class, bl); decode(retain_head_object, bl); if (tier_type == "cloud-s3") { decode(t.s3, bl); } DECODE_FINISH(bl); } void dump(Formatter *f) const; void decode_json(JSONObj *obj); }; WRITE_CLASS_ENCODER(RGWZoneGroupPlacementTier) struct RGWZoneGroupPlacementTarget { std::string name; std::set<std::string> tags; std::set<std::string> storage_classes; std::map<std::string, RGWZoneGroupPlacementTier> tier_targets; bool user_permitted(const std::list<std::string>& user_tags) const { if (tags.empty()) { return true; } for (auto& rule : user_tags) { if (tags.find(rule) != tags.end()) { return true; } } return false; } void encode(bufferlist& bl) const { ENCODE_START(3, 1, bl); encode(name, bl); encode(tags, bl); encode(storage_classes, bl); encode(tier_targets, bl); ENCODE_FINISH(bl); } void decode(bufferlist::const_iterator& bl) { DECODE_START(3, bl); decode(name, bl); decode(tags, bl); if (struct_v >= 2) { decode(storage_classes, bl); } if (storage_classes.empty()) { storage_classes.insert(RGW_STORAGE_CLASS_STANDARD); } if (struct_v >= 3) { decode(tier_targets, bl); } DECODE_FINISH(bl); } void dump(Formatter *f) const; void decode_json(JSONObj *obj); }; WRITE_CLASS_ENCODER(RGWZoneGroupPlacementTarget)
16,654
25.605431
117
h
null
ceph-main/src/rgw/rgwam.py
#!@Python3_EXECUTABLE@ # -*- mode:python -*- # vim: ts=4 sw=4 smarttab expandtab # # Processed in Makefile to add python #! line and version variable # # import subprocess import random import string import json import argparse import sys import socket import base64 import logging from urllib.parse import urlparse from ceph.rgw.rgwam_core import RGWAM, EnvArgs from ceph.rgw.types import RGWAMEnvMgr, RGWAMException class RGWAMCLIMgr(RGWAMEnvMgr): def __init__(self, common_args): args = [] if common_args.conf_path: args += [ '-c', common_args.conf_path ] if common_args.ceph_name: args += [ '-n', common_args.ceph_name ] if common_args.ceph_keyring: args += [ '-k', common_args.ceph_keyring ] self.args_prefix = args def tool_exec(self, prog, args): run_cmd = [ prog ] + self.args_prefix + args result = subprocess.run(run_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout = result.stdout.decode('utf-8') stderr = result.stderr.decode('utf-8') return run_cmd, result.returncode, stdout, stderr def apply_rgw(self, svc_id, realm_name, zone_name, port = None): return None def list_daemons(self, service_name, daemon_type = None, daemon_id = None, hostname = None, refresh = True): return [] class RealmCommand: def __init__(self, env, args): self.env = env self.args = args def parse(self): parser = argparse.ArgumentParser( usage='''rgwam realm <subcommand> The subcommands are: bootstrap Bootstrap new realm new-zone-creds Create credentials for connecting new zone ''') parser.add_argument('subcommand', help='Subcommand to run') # parse_args defaults to [1:] for args, but you need to # exclude the rest of the args too, or validation will fail args = parser.parse_args(self.args[0:1]) sub = args.subcommand.replace('-', '_') if not hasattr(self, sub): print('Unrecognized subcommand:', args.subcommand) parser.print_help() exit(1) # use dispatch pattern to invoke method with same name return getattr(self, sub) def bootstrap(self): parser = argparse.ArgumentParser( description='Bootstrap new realm', usage='rgwam realm bootstrap [<args>]') parser.add_argument('--realm') parser.add_argument('--zonegroup') parser.add_argument('--zone') parser.add_argument('--endpoints') parser.add_argument('--sys-uid') parser.add_argument('--uid') parser.add_argument('--start-radosgw', action='store_true', dest='start_radosgw', default=True) parser.add_argument('--no-start-radosgw', action='store_false', dest='start_radosgw') args = parser.parse_args(self.args[1:]) return RGWAM(self.env).realm_bootstrap(args.realm, args.zonegroup, args.zone, args.endpoints, args.sys_uid, args.uid, args.start_radosgw) def new_zone_creds(self): parser = argparse.ArgumentParser( description='Bootstrap new realm', usage='rgwam realm new-zone-creds [<args>]') parser.add_argument('--endpoints') parser.add_argument('--sys-uid') args = parser.parse_args(self.args[1:]) return RGWAM(self.env).realm_new_zone_creds(args.endpoints, args.sys_uid) class ZoneCommand: def __init__(self, env, args): self.env = env self.args = args def parse(self): parser = argparse.ArgumentParser( usage='''rgwam zone <subcommand> The subcommands are: run run radosgw daemon in current zone ''') parser.add_argument('subcommand', help='Subcommand to run') # parse_args defaults to [1:] for args, but you need to # exclude the rest of the args too, or validation will fail args = parser.parse_args(self.args[0:1]) if not hasattr(self, args.subcommand): print('Unrecognized subcommand:', args.subcommand) parser.print_help() exit(1) # use dispatch pattern to invoke method with same name return getattr(self, args.subcommand) def run(self): parser = argparse.ArgumentParser( description='Run radosgw daemon', usage='rgwam zone run [<args>]') parser.add_argument('--port') parser.add_argument('--log-file') parser.add_argument('--debug-ms') parser.add_argument('--debug-rgw') args = parser.parse_args(self.args[1:]) return RGWAM(self.env).run_radosgw(port = args.port) def create(self): parser = argparse.ArgumentParser( description='Create new zone to join existing realm', usage='rgwam zone create [<args>]') parser.add_argument('--realm-token') parser.add_argument('--zone') parser.add_argument('--zonegroup') parser.add_argument('--endpoints') parser.add_argument('--start-radosgw', action='store_true', dest='start_radosgw', default=True) parser.add_argument('--no-start-radosgw', action='store_false', dest='start_radosgw') args = parser.parse_args(self.args[1:]) return RGWAM(self.env).zone_create(args.realm_token, args.zonegroup, args.zone, args.endpoints, args.start_radosgw) class CommonArgs: def __init__(self, ns): self.conf_path = ns.conf_path self.ceph_name = ns.ceph_name self.ceph_keyring = ns.ceph_keyring class TopLevelCommand: def _parse(self): parser = argparse.ArgumentParser( description='RGW assist for multisite tool', formatter_class=argparse.RawDescriptionHelpFormatter, epilog=''' The commands are: realm bootstrap Bootstrap new realm realm new-zone-creds Create credentials to connect new zone to realm zone create Create new zone and connect it to existing realm zone run Run radosgw in current zone ''') parser.add_argument('command', help='command to run', default=None) parser.add_argument('-c', help='ceph conf path', dest='conf_path') parser.add_argument('-n', help='ceph user name', dest='ceph_name') parser.add_argument('-k', help='ceph keyring', dest='ceph_keyring') removed_args = [] args = sys.argv[1:] if len(args) > 0: if hasattr(self, args[0]): # remove -h/--help if top command is not empty so that top level help # doesn't override subcommand, we'll add it later help_args = [ '-h', '--help' ] removed_args = [arg for arg in args if arg in help_args] args = [arg for arg in args if arg not in help_args] (ns, args) = parser.parse_known_args(args) if not hasattr(self, ns.command) or ns.command[0] == '_': print('Unrecognized command:', ns.command) parser.print_help() exit(1) # use dispatch pattern to invoke method with same name args += removed_args return (getattr(self, ns.command), CommonArgs(ns), args) def realm(self, env, args): cmd = RealmCommand(env, args).parse() return cmd() def zone(self, env, args): cmd = ZoneCommand(env, args).parse() return cmd() def main(): logging.basicConfig(level=logging.INFO) log = logging.getLogger(__name__) (cmd, common_args, args)= TopLevelCommand()._parse() env = EnvArgs(RGWAMCLIMgr(common_args)) try: retval, out, err = cmd(env, args) if retval != 0: log.error('stdout: '+ out + '\nstderr: ' + err) sys.exit(retval) except RGWAMException as e: print('ERROR: ' + e.message) sys.exit(0) if __name__ == '__main__': main()
8,004
32.215768
123
py
null
ceph-main/src/rgw/driver/d4n/d4n_datacache.cc
#include "d4n_datacache.h" #define dout_subsys ceph_subsys_rgw #define dout_context g_ceph_context /* Base metadata and data fields should remain consistent */ std::vector<std::string> baseFields { "mtime", "object_size", "accounted_size", "epoch", "version_id", "source_zone_short_id", "bucket_count", "bucket_size", "user_quota.max_size", "user_quota.max_objects", "max_buckets", "data"}; std::vector< std::pair<std::string, std::string> > RGWD4NCache::buildObject(rgw::sal::Attrs* binary) { std::vector< std::pair<std::string, std::string> > values; rgw::sal::Attrs::iterator attrs; /* Convert to vector */ if (binary != NULL) { for (attrs = binary->begin(); attrs != binary->end(); ++attrs) { values.push_back(std::make_pair(attrs->first, attrs->second.to_str())); } } return values; } int RGWD4NCache::findClient(cpp_redis::client *client) { if (client->is_connected()) return 0; if (host == "" || port == 0) { dout(10) << "RGW D4N Cache: D4N cache endpoint was not configured correctly" << dendl; return EDESTADDRREQ; } client->connect(host, port, nullptr); if (!client->is_connected()) return ECONNREFUSED; return 0; } int RGWD4NCache::existKey(std::string key) { int result = -1; std::vector<std::string> keys; keys.push_back(key); if (!client.is_connected()) { return result; } try { client.exists(keys, [&result](cpp_redis::reply &reply) { if (reply.is_integer()) { result = reply.as_integer(); /* Returns 1 upon success */ } }); client.sync_commit(std::chrono::milliseconds(1000)); } catch(std::exception &e) {} return result; } int RGWD4NCache::setObject(std::string oid, rgw::sal::Attrs* attrs) { /* Creating the index based on oid */ std::string key = "rgw-object:" + oid + ":cache"; std::string result; if (!client.is_connected()) { findClient(&client); } /* Every set will be treated as new */ try { std::vector< std::pair<std::string, std::string> > redisObject = buildObject(attrs); if (redisObject.empty()) { return -1; } client.hmset(key, redisObject, [&result](cpp_redis::reply &reply) { if (!reply.is_null()) { result = reply.as_string(); } }); client.sync_commit(std::chrono::milliseconds(1000)); if (result != "OK") { return -1; } } catch(std::exception &e) { return -1; } return 0; } int RGWD4NCache::getObject(std::string oid, rgw::sal::Attrs* newAttrs, std::vector< std::pair<std::string, std::string> >* newMetadata) { std::string result; std::string key = "rgw-object:" + oid + ":cache"; if (!client.is_connected()) { findClient(&client); } if (existKey(key)) { int field_exist = -1; rgw::sal::Attrs::iterator it; std::vector< std::pair<std::string, std::string> > redisObject; std::vector<std::string> getFields; /* Retrieve existing fields from cache */ try { client.hgetall(key, [&getFields](cpp_redis::reply &reply) { if (reply.is_array()) { auto arr = reply.as_array(); if (!arr[0].is_null()) { for (long unsigned int i = 0; i < arr.size() - 1; i += 2) { getFields.push_back(arr[i].as_string()); } } } }); client.sync_commit(std::chrono::milliseconds(1000)); } catch(std::exception &e) { return -1; } /* Only data exists */ if (getFields.size() == 1 && getFields[0] == "data") return 0; /* Ensure all metadata, attributes, and data has been set */ for (const auto& field : baseFields) { auto it = std::find_if(getFields.begin(), getFields.end(), [&](const auto& comp) { return comp == field; }); if (it != getFields.end()) { int index = std::distance(getFields.begin(), it); getFields.erase(getFields.begin() + index); } else { return -1; } } /* Get attributes from cache */ try { client.hmget(key, getFields, [&field_exist, &newAttrs, &getFields](cpp_redis::reply &reply) { if (reply.is_array()) { auto arr = reply.as_array(); if (!arr[0].is_null()) { field_exist = 0; for (long unsigned int i = 0; i < getFields.size(); ++i) { std::string tmp = arr[i].as_string(); buffer::list bl; bl.append(tmp); newAttrs->insert({getFields[i], bl}); } } } }); client.sync_commit(std::chrono::milliseconds(1000)); } catch(std::exception &e) { return -1; } if (field_exist == 0) { field_exist = -1; getFields.clear(); getFields.insert(getFields.begin(), baseFields.begin(), baseFields.end()); getFields.pop_back(); /* Do not query for data field */ /* Get metadata from cache */ try { client.hmget(key, getFields, [&field_exist, &newMetadata, &getFields](cpp_redis::reply &reply) { if (reply.is_array()) { auto arr = reply.as_array(); if (!arr[0].is_null()) { field_exist = 0; for (long unsigned int i = 0; i < getFields.size(); ++i) { newMetadata->push_back({getFields[i], arr[i].as_string()}); } } } }); client.sync_commit(std::chrono::milliseconds(1000)); } catch(std::exception &e) { return -1; } } else { return -1; } } else { dout(20) << "RGW D4N Cache: Object was not retrievable." << dendl; return -2; } return 0; } int RGWD4NCache::copyObject(std::string original_oid, std::string copy_oid, rgw::sal::Attrs* attrs) { std::string result; std::vector< std::pair<std::string, std::string> > redisObject; std::string key = "rgw-object:" + original_oid + ":cache"; if (!client.is_connected()) { findClient(&client); } /* Read values from cache */ if (existKey(key)) { try { client.hgetall(key, [&redisObject](cpp_redis::reply &reply) { if (reply.is_array()) { auto arr = reply.as_array(); if (!arr[0].is_null()) { for (long unsigned int i = 0; i < arr.size() - 1; i += 2) { redisObject.push_back({arr[i].as_string(), arr[i + 1].as_string()}); } } } }); client.sync_commit(std::chrono::milliseconds(1000)); } catch(std::exception &e) { return -1; } } else { return -2; } /* Build copy with updated values */ if (!redisObject.empty()) { rgw::sal::Attrs::iterator attr; for (attr = attrs->begin(); attr != attrs->end(); ++attr) { auto it = std::find_if(redisObject.begin(), redisObject.end(), [&](const auto& pair) { return pair.first == attr->first; }); if (it != redisObject.end()) { int index = std::distance(redisObject.begin(), it); redisObject[index] = {attr->first, attr->second.to_str()}; } else { redisObject.push_back(std::make_pair(attr->first, attr->second.to_str())); } } } else { return -1; } /* Set copy with new values */ key = "rgw-object:" + copy_oid + ":cache"; try { client.hmset(key, redisObject, [&result](cpp_redis::reply &reply) { if (!reply.is_null()) { result = reply.as_string(); } }); client.sync_commit(std::chrono::milliseconds(1000)); if (result != "OK") { return -1; } } catch(std::exception &e) { return -1; } return 0; } int RGWD4NCache::delObject(std::string oid) { int result = 0; std::vector<std::string> keys; std::string key = "rgw-object:" + oid + ":cache"; keys.push_back(key); if (!client.is_connected()) { findClient(&client); } if (existKey(key)) { try { client.del(keys, [&result](cpp_redis::reply &reply) { if (reply.is_integer()) { result = reply.as_integer(); } }); client.sync_commit(std::chrono::milliseconds(1000)); return result - 1; } catch(std::exception &e) { return -1; } } else { dout(20) << "RGW D4N Cache: Object is not in cache." << dendl; return -2; } } int RGWD4NCache::updateAttr(std::string oid, rgw::sal::Attrs* attr) { std::string result; std::string key = "rgw-object:" + oid + ":cache"; if (!client.is_connected()) { findClient(&client); } if (existKey(key)) { try { std::vector< std::pair<std::string, std::string> > redisObject; auto it = attr->begin(); redisObject.push_back({it->first, it->second.to_str()}); client.hmset(key, redisObject, [&result](cpp_redis::reply &reply) { if (!reply.is_null()) { result = reply.as_string(); } }); client.sync_commit(std::chrono::milliseconds(1000)); if (result != "OK") { return -1; } } catch(std::exception &e) { return -1; } } else { return -2; } return 0; } int RGWD4NCache::delAttrs(std::string oid, std::vector<std::string>& baseFields, std::vector<std::string>& deleteFields) { int result = 0; std::string key = "rgw-object:" + oid + ":cache"; if (!client.is_connected()) { findClient(&client); } if (existKey(key)) { /* Find if attribute doesn't exist */ for (const auto& delField : deleteFields) { if (std::find(baseFields.begin(), baseFields.end(), delField) == baseFields.end()) { deleteFields.erase(std::find(deleteFields.begin(), deleteFields.end(), delField)); } } try { client.hdel(key, deleteFields, [&result](cpp_redis::reply &reply) { if (reply.is_integer()) { result = reply.as_integer(); } }); client.sync_commit(std::chrono::milliseconds(1000)); return result - 1; } catch(std::exception &e) { return -1; } } dout(20) << "RGW D4N Cache: Object is not in cache." << dendl; return -2; } int RGWD4NCache::appendData(std::string oid, buffer::list& data) { std::string result; std::string value = ""; std::string key = "rgw-object:" + oid + ":cache"; if (!client.is_connected()) { findClient(&client); } if (existKey(key)) { try { client.hget(key, "data", [&value](cpp_redis::reply &reply) { if (!reply.is_null()) { value = reply.as_string(); } }); client.sync_commit(std::chrono::milliseconds(1000)); } catch(std::exception &e) { return -1; } } try { /* Append to existing value or set as new value */ std::string temp = value + data.to_str(); std::vector< std::pair<std::string, std::string> > field; field.push_back({"data", temp}); client.hmset(key, field, [&result](cpp_redis::reply &reply) { if (!reply.is_null()) { result = reply.as_string(); } }); client.sync_commit(std::chrono::milliseconds(1000)); if (result != "OK") { return -1; } } catch(std::exception &e) { return -1; } return 0; } int RGWD4NCache::deleteData(std::string oid) { int result = 0; std::string key = "rgw-object:" + oid + ":cache"; std::vector<std::string> deleteField; deleteField.push_back("data"); if (!client.is_connected()) { findClient(&client); } if (existKey(key)) { int field_exist = -1; try { client.hget(key, "data", [&field_exist](cpp_redis::reply &reply) { if (!reply.is_null()) { field_exist = 0; } }); client.sync_commit(std::chrono::milliseconds(1000)); } catch(std::exception &e) { return -1; } if (field_exist == 0) { try { client.hdel(key, deleteField, [&result](cpp_redis::reply &reply) { if (reply.is_integer()) { result = reply.as_integer(); /* Returns 1 upon success */ } }); client.sync_commit(std::chrono::milliseconds(1000)); return result - 1; } catch(std::exception &e) { return -1; } } else { return -1; } } else { return 0; /* No delete was necessary */ } }
11,770
22.973523
122
cc
null
ceph-main/src/rgw/driver/d4n/d4n_datacache.h
#ifndef CEPH_RGWD4NCACHE_H #define CEPH_RGWD4NCACHE_H #include "rgw_common.h" #include <cpp_redis/cpp_redis> #include <string> #include <iostream> class RGWD4NCache { public: CephContext *cct; RGWD4NCache() {} RGWD4NCache(std::string cacheHost, int cachePort):host(cacheHost), port(cachePort) {} void init(CephContext *_cct) { cct = _cct; host = cct->_conf->rgw_d4n_host; port = cct->_conf->rgw_d4n_port; } int findClient(cpp_redis::client *client); int existKey(std::string key); int setObject(std::string oid, rgw::sal::Attrs* attrs); int getObject(std::string oid, rgw::sal::Attrs* newAttrs, std::vector< std::pair<std::string, std::string> >* newMetadata); int copyObject(std::string original_oid, std::string copy_oid, rgw::sal::Attrs* attrs); int delObject(std::string oid); int updateAttr(std::string oid, rgw::sal::Attrs* attr); int delAttrs(std::string oid, std::vector<std::string>& baseFields, std::vector<std::string>& deleteFields); int appendData(std::string oid, buffer::list& data); int deleteData(std::string oid); private: cpp_redis::client client; std::string host = ""; int port = 0; std::vector< std::pair<std::string, std::string> > buildObject(rgw::sal::Attrs* binary); }; #endif
1,308
30.926829
127
h
null
ceph-main/src/rgw/driver/d4n/d4n_directory.cc
#include "d4n_directory.h" #define dout_subsys ceph_subsys_rgw #define dout_context g_ceph_context int RGWBlockDirectory::findClient(cpp_redis::client *client) { if (client->is_connected()) return 0; if (host == "" || port == 0) { dout(10) << "RGW D4N Directory: D4N directory endpoint was not configured correctly" << dendl; return EDESTADDRREQ; } client->connect(host, port, nullptr); if (!client->is_connected()) return ECONNREFUSED; return 0; } std::string RGWBlockDirectory::buildIndex(cache_block *ptr) { return "rgw-object:" + ptr->c_obj.obj_name + ":directory"; } int RGWBlockDirectory::existKey(std::string key) { int result = -1; std::vector<std::string> keys; keys.push_back(key); if (!client.is_connected()) { return result; } try { client.exists(keys, [&result](cpp_redis::reply &reply) { if (reply.is_integer()) { result = reply.as_integer(); /* Returns 1 upon success */ } }); client.sync_commit(std::chrono::milliseconds(1000)); } catch(std::exception &e) {} return result; } int RGWBlockDirectory::setValue(cache_block *ptr) { /* Creating the index based on obj_name */ std::string key = buildIndex(ptr); if (!client.is_connected()) { findClient(&client); } std::string result; std::vector<std::string> keys; keys.push_back(key); /* Every set will be new */ if (host == "" || port == 0) { dout(10) << "RGW D4N Directory: Directory endpoint not configured correctly" << dendl; return -1; } std::string endpoint = host + ":" + std::to_string(port); std::vector<std::pair<std::string, std::string>> list; /* Creating a list of key's properties */ list.push_back(make_pair("key", key)); list.push_back(make_pair("size", std::to_string(ptr->size_in_bytes))); list.push_back(make_pair("bucket_name", ptr->c_obj.bucket_name)); list.push_back(make_pair("obj_name", ptr->c_obj.obj_name)); list.push_back(make_pair("hosts", endpoint)); try { client.hmset(key, list, [&result](cpp_redis::reply &reply) { if (!reply.is_null()) { result = reply.as_string(); } }); client.sync_commit(std::chrono::milliseconds(1000)); if (result != "OK") { return -1; } } catch(std::exception &e) { return -1; } return 0; } int RGWBlockDirectory::getValue(cache_block *ptr) { std::string key = buildIndex(ptr); if (!client.is_connected()) { findClient(&client); } if (existKey(key)) { int field_exist = -1; std::string hosts; std::string size; std::string bucket_name; std::string obj_name; std::vector<std::string> fields; fields.push_back("key"); fields.push_back("hosts"); fields.push_back("size"); fields.push_back("bucket_name"); fields.push_back("obj_name"); try { client.hmget(key, fields, [&key, &hosts, &size, &bucket_name, &obj_name, &field_exist](cpp_redis::reply &reply) { if (reply.is_array()) { auto arr = reply.as_array(); if (!arr[0].is_null()) { field_exist = 0; key = arr[0].as_string(); hosts = arr[1].as_string(); size = arr[2].as_string(); bucket_name = arr[3].as_string(); obj_name = arr[4].as_string(); } } }); client.sync_commit(std::chrono::milliseconds(1000)); if (field_exist < 0) { return field_exist; } /* Currently, there can only be one host */ ptr->size_in_bytes = std::stoi(size); ptr->c_obj.bucket_name = bucket_name; ptr->c_obj.obj_name = obj_name; } catch(std::exception &e) { return -1; } } return 0; } int RGWBlockDirectory::delValue(cache_block *ptr) { int result = 0; std::vector<std::string> keys; std::string key = buildIndex(ptr); keys.push_back(key); if (!client.is_connected()) { findClient(&client); } if (existKey(key)) { try { client.del(keys, [&result](cpp_redis::reply &reply) { if (reply.is_integer()) { result = reply.as_integer(); /* Returns 1 upon success */ } }); client.sync_commit(std::chrono::milliseconds(1000)); return result - 1; } catch(std::exception &e) { return -1; } } else { dout(20) << "RGW D4N Directory: Block is not in directory." << dendl; return -2; } }
4,354
23.194444
119
cc
null
ceph-main/src/rgw/driver/d4n/d4n_directory.h
#ifndef CEPH_RGWD4NDIRECTORY_H #define CEPH_RGWD4NDIRECTORY_H #include "rgw_common.h" #include <cpp_redis/cpp_redis> #include <string> #include <iostream> struct cache_obj { std::string bucket_name; /* s3 bucket name */ std::string obj_name; /* s3 obj name */ }; struct cache_block { cache_obj c_obj; uint64_t size_in_bytes; /* block size_in_bytes */ std::vector<std::string> hosts_list; /* Currently not supported: list of hostnames <ip:port> of block locations */ }; class RGWDirectory { public: RGWDirectory() {} CephContext *cct; }; class RGWBlockDirectory: RGWDirectory { public: RGWBlockDirectory() {} RGWBlockDirectory(std::string blockHost, int blockPort):host(blockHost), port(blockPort) {} void init(CephContext *_cct) { cct = _cct; host = cct->_conf->rgw_d4n_host; port = cct->_conf->rgw_d4n_port; } int findClient(cpp_redis::client *client); int existKey(std::string key); int setValue(cache_block *ptr); int getValue(cache_block *ptr); int delValue(cache_block *ptr); std::string get_host() { return host; } int get_port() { return port; } private: cpp_redis::client client; std::string buildIndex(cache_block *ptr); std::string host = ""; int port = 0; }; #endif
1,294
22.981481
116
h
null
ceph-main/src/rgw/driver/d4n/rgw_sal_d4n.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab ft=cpp /* * Ceph - scalable distributed file system * * Copyright (C) 2022 Red Hat, Inc. * * This is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License version 2.1, as published by the Free Software * Foundation. See file COPYING. * */ #include "rgw_sal_d4n.h" #define dout_subsys ceph_subsys_rgw #define dout_context g_ceph_context namespace rgw { namespace sal { static inline Bucket* nextBucket(Bucket* t) { if (!t) return nullptr; return dynamic_cast<FilterBucket*>(t)->get_next(); } static inline Object* nextObject(Object* t) { if (!t) return nullptr; return dynamic_cast<FilterObject*>(t)->get_next(); } int D4NFilterDriver::initialize(CephContext *cct, const DoutPrefixProvider *dpp) { FilterDriver::initialize(cct, dpp); blk_dir->init(cct); d4n_cache->init(cct); return 0; } std::unique_ptr<User> D4NFilterDriver::get_user(const rgw_user &u) { std::unique_ptr<User> user = next->get_user(u); return std::make_unique<D4NFilterUser>(std::move(user), this); } std::unique_ptr<Object> D4NFilterBucket::get_object(const rgw_obj_key& k) { std::unique_ptr<Object> o = next->get_object(k); return std::make_unique<D4NFilterObject>(std::move(o), this, filter); } int D4NFilterUser::create_bucket(const DoutPrefixProvider* dpp, const rgw_bucket& b, const std::string& zonegroup_id, rgw_placement_rule& placement_rule, std::string& swift_ver_location, const RGWQuotaInfo * pquota_info, const RGWAccessControlPolicy& policy, Attrs& attrs, RGWBucketInfo& info, obj_version& ep_objv, bool exclusive, bool obj_lock_enabled, bool* existed, req_info& req_info, std::unique_ptr<Bucket>* bucket_out, optional_yield y) { std::unique_ptr<Bucket> nb; int ret; ret = next->create_bucket(dpp, b, zonegroup_id, placement_rule, swift_ver_location, pquota_info, policy, attrs, info, ep_objv, exclusive, obj_lock_enabled, existed, req_info, &nb, y); if (ret < 0) return ret; Bucket* fb = new D4NFilterBucket(std::move(nb), this, filter); bucket_out->reset(fb); return 0; } int D4NFilterObject::copy_object(User* user, req_info* info, const rgw_zone_id& source_zone, rgw::sal::Object* dest_object, rgw::sal::Bucket* dest_bucket, rgw::sal::Bucket* src_bucket, const rgw_placement_rule& dest_placement, ceph::real_time* src_mtime, ceph::real_time* mtime, const ceph::real_time* mod_ptr, const ceph::real_time* unmod_ptr, bool high_precision_time, const char* if_match, const char* if_nomatch, AttrsMod attrs_mod, bool copy_if_newer, Attrs& attrs, RGWObjCategory category, uint64_t olh_epoch, boost::optional<ceph::real_time> delete_at, std::string* version_id, std::string* tag, std::string* etag, void (*progress_cb)(off_t, void *), void* progress_data, const DoutPrefixProvider* dpp, optional_yield y) { /* Append additional metadata to attributes */ rgw::sal::Attrs baseAttrs = this->get_attrs(); buffer::list bl; bl.append(to_iso_8601(*mtime)); baseAttrs.insert({"mtime", bl}); bl.clear(); if (version_id != NULL) { bl.append(*version_id); baseAttrs.insert({"version_id", bl}); bl.clear(); } if (!etag->empty()) { bl.append(*etag); baseAttrs.insert({"etag", bl}); bl.clear(); } if (attrs_mod == rgw::sal::ATTRSMOD_REPLACE) { /* Replace */ rgw::sal::Attrs::iterator iter; for (const auto& pair : attrs) { iter = baseAttrs.find(pair.first); if (iter != baseAttrs.end()) { iter->second = pair.second; } else { baseAttrs.insert({pair.first, pair.second}); } } } else if (attrs_mod == rgw::sal::ATTRSMOD_MERGE) { /* Merge */ baseAttrs.insert(attrs.begin(), attrs.end()); } int copyObjReturn = filter->get_d4n_cache()->copyObject(this->get_key().get_oid(), dest_object->get_key().get_oid(), &baseAttrs); if (copyObjReturn < 0) { ldpp_dout(dpp, 20) << "D4N Filter: Cache copy object operation failed." << dendl; } else { ldpp_dout(dpp, 20) << "D4N Filter: Cache copy object operation succeeded." << dendl; } return next->copy_object(user, info, source_zone, nextObject(dest_object), nextBucket(dest_bucket), nextBucket(src_bucket), dest_placement, src_mtime, mtime, mod_ptr, unmod_ptr, high_precision_time, if_match, if_nomatch, attrs_mod, copy_if_newer, attrs, category, olh_epoch, delete_at, version_id, tag, etag, progress_cb, progress_data, dpp, y); } int D4NFilterObject::set_obj_attrs(const DoutPrefixProvider* dpp, Attrs* setattrs, Attrs* delattrs, optional_yield y) { if (setattrs != NULL) { /* Ensure setattrs and delattrs do not overlap */ if (delattrs != NULL) { for (const auto& attr : *delattrs) { if (std::find(setattrs->begin(), setattrs->end(), attr) != setattrs->end()) { delattrs->erase(std::find(delattrs->begin(), delattrs->end(), attr)); } } } int updateAttrsReturn = filter->get_d4n_cache()->setObject(this->get_key().get_oid(), setattrs); if (updateAttrsReturn < 0) { ldpp_dout(dpp, 20) << "D4N Filter: Cache set object attributes operation failed." << dendl; } else { ldpp_dout(dpp, 20) << "D4N Filter: Cache set object attributes operation succeeded." << dendl; } } if (delattrs != NULL) { std::vector<std::string> delFields; Attrs::iterator attrs; /* Extract fields from delattrs */ for (attrs = delattrs->begin(); attrs != delattrs->end(); ++attrs) { delFields.push_back(attrs->first); } Attrs currentattrs = this->get_attrs(); std::vector<std::string> currentFields; /* Extract fields from current attrs */ for (attrs = currentattrs.begin(); attrs != currentattrs.end(); ++attrs) { currentFields.push_back(attrs->first); } int delAttrsReturn = filter->get_d4n_cache()->delAttrs(this->get_key().get_oid(), currentFields, delFields); if (delAttrsReturn < 0) { ldpp_dout(dpp, 20) << "D4N Filter: Cache delete object attributes operation failed." << dendl; } else { ldpp_dout(dpp, 20) << "D4N Filter: Cache delete object attributes operation succeeded." << dendl; } } return next->set_obj_attrs(dpp, setattrs, delattrs, y); } int D4NFilterObject::get_obj_attrs(optional_yield y, const DoutPrefixProvider* dpp, rgw_obj* target_obj) { rgw::sal::Attrs newAttrs; std::vector< std::pair<std::string, std::string> > newMetadata; int getAttrsReturn = filter->get_d4n_cache()->getObject(this->get_key().get_oid(), &newAttrs, &newMetadata); if (getAttrsReturn < 0) { ldpp_dout(dpp, 20) << "D4N Filter: Cache get object attributes operation failed." << dendl; return next->get_obj_attrs(y, dpp, target_obj); } else { int setAttrsReturn = this->set_attrs(newAttrs); if (setAttrsReturn < 0) { ldpp_dout(dpp, 20) << "D4N Filter: Cache get object attributes operation failed." << dendl; return next->get_obj_attrs(y, dpp, target_obj); } else { ldpp_dout(dpp, 20) << "D4N Filter: Cache get object attributes operation succeeded." << dendl; return 0; } } } int D4NFilterObject::modify_obj_attrs(const char* attr_name, bufferlist& attr_val, optional_yield y, const DoutPrefixProvider* dpp) { Attrs update; update[(std::string)attr_name] = attr_val; int updateAttrsReturn = filter->get_d4n_cache()->updateAttr(this->get_key().get_oid(), &update); if (updateAttrsReturn < 0) { ldpp_dout(dpp, 20) << "D4N Filter: Cache modify object attribute operation failed." << dendl; } else { ldpp_dout(dpp, 20) << "D4N Filter: Cache modify object attribute operation succeeded." << dendl; } return next->modify_obj_attrs(attr_name, attr_val, y, dpp); } int D4NFilterObject::delete_obj_attrs(const DoutPrefixProvider* dpp, const char* attr_name, optional_yield y) { std::vector<std::string> delFields; delFields.push_back((std::string)attr_name); Attrs::iterator attrs; Attrs currentattrs = this->get_attrs(); std::vector<std::string> currentFields; /* Extract fields from current attrs */ for (attrs = currentattrs.begin(); attrs != currentattrs.end(); ++attrs) { currentFields.push_back(attrs->first); } int delAttrReturn = filter->get_d4n_cache()->delAttrs(this->get_key().get_oid(), currentFields, delFields); if (delAttrReturn < 0) { ldpp_dout(dpp, 20) << "D4N Filter: Cache delete object attribute operation failed." << dendl; } else { ldpp_dout(dpp, 20) << "D4N Filter: Cache delete object attribute operation succeeded." << dendl; } return next->delete_obj_attrs(dpp, attr_name, y); } std::unique_ptr<Object> D4NFilterDriver::get_object(const rgw_obj_key& k) { std::unique_ptr<Object> o = next->get_object(k); return std::make_unique<D4NFilterObject>(std::move(o), this); } std::unique_ptr<Writer> D4NFilterDriver::get_atomic_writer(const DoutPrefixProvider *dpp, optional_yield y, rgw::sal::Object* obj, const rgw_user& owner, const rgw_placement_rule *ptail_placement_rule, uint64_t olh_epoch, const std::string& unique_tag) { std::unique_ptr<Writer> writer = next->get_atomic_writer(dpp, y, nextObject(obj), owner, ptail_placement_rule, olh_epoch, unique_tag); return std::make_unique<D4NFilterWriter>(std::move(writer), this, obj, dpp, true); } std::unique_ptr<Object::ReadOp> D4NFilterObject::get_read_op() { std::unique_ptr<ReadOp> r = next->get_read_op(); return std::make_unique<D4NFilterReadOp>(std::move(r), this); } std::unique_ptr<Object::DeleteOp> D4NFilterObject::get_delete_op() { std::unique_ptr<DeleteOp> d = next->get_delete_op(); return std::make_unique<D4NFilterDeleteOp>(std::move(d), this); } int D4NFilterObject::D4NFilterReadOp::prepare(optional_yield y, const DoutPrefixProvider* dpp) { int getDirReturn = source->filter->get_block_dir()->getValue(source->filter->get_cache_block()); if (getDirReturn < 0) { ldpp_dout(dpp, 20) << "D4N Filter: Directory get operation failed." << dendl; } else { ldpp_dout(dpp, 20) << "D4N Filter: Directory get operation succeeded." << dendl; } rgw::sal::Attrs newAttrs; std::vector< std::pair<std::string, std::string> > newMetadata; int getObjReturn = source->filter->get_d4n_cache()->getObject(source->get_key().get_oid(), &newAttrs, &newMetadata); int ret = next->prepare(y, dpp); if (getObjReturn < 0) { ldpp_dout(dpp, 20) << "D4N Filter: Cache get object operation failed." << dendl; } else { /* Set metadata locally */ RGWQuotaInfo quota_info; RGWObjState* astate; source->get_obj_state(dpp, &astate, y); for (auto it = newMetadata.begin(); it != newMetadata.end(); ++it) { if (!std::strcmp(it->first.data(), "mtime")) { parse_time(it->second.data(), &astate->mtime); } else if (!std::strcmp(it->first.data(), "object_size")) { source->set_obj_size(std::stoull(it->second)); } else if (!std::strcmp(it->first.data(), "accounted_size")) { astate->accounted_size = std::stoull(it->second); } else if (!std::strcmp(it->first.data(), "epoch")) { astate->epoch = std::stoull(it->second); } else if (!std::strcmp(it->first.data(), "version_id")) { source->set_instance(it->second); } else if (!std::strcmp(it->first.data(), "source_zone_short_id")) { astate->zone_short_id = static_cast<uint32_t>(std::stoul(it->second)); } else if (!std::strcmp(it->first.data(), "bucket_count")) { source->get_bucket()->set_count(std::stoull(it->second)); } else if (!std::strcmp(it->first.data(), "bucket_size")) { source->get_bucket()->set_size(std::stoull(it->second)); } else if (!std::strcmp(it->first.data(), "user_quota.max_size")) { quota_info.max_size = std::stoull(it->second); } else if (!std::strcmp(it->first.data(), "user_quota.max_objects")) { quota_info.max_objects = std::stoull(it->second); } else if (!std::strcmp(it->first.data(), "max_buckets")) { source->get_bucket()->get_owner()->set_max_buckets(std::stoull(it->second)); } } source->get_bucket()->get_owner()->set_info(quota_info); source->set_obj_state(*astate); /* Set attributes locally */ int setAttrsReturn = source->set_attrs(newAttrs); if (setAttrsReturn < 0) { ldpp_dout(dpp, 20) << "D4N Filter: Cache get object operation failed." << dendl; } else { ldpp_dout(dpp, 20) << "D4N Filter: Cache get object operation succeeded." << dendl; } } return ret; } int D4NFilterObject::D4NFilterDeleteOp::delete_obj(const DoutPrefixProvider* dpp, optional_yield y) { int delDirReturn = source->filter->get_block_dir()->delValue(source->filter->get_cache_block()); if (delDirReturn < 0) { ldpp_dout(dpp, 20) << "D4N Filter: Directory delete operation failed." << dendl; } else { ldpp_dout(dpp, 20) << "D4N Filter: Directory delete operation succeeded." << dendl; } int delObjReturn = source->filter->get_d4n_cache()->delObject(source->get_key().get_oid()); if (delObjReturn < 0) { ldpp_dout(dpp, 20) << "D4N Filter: Cache delete operation failed." << dendl; } else { ldpp_dout(dpp, 20) << "D4N Filter: Cache delete operation succeeded." << dendl; } return next->delete_obj(dpp, y); } int D4NFilterWriter::prepare(optional_yield y) { int delDataReturn = filter->get_d4n_cache()->deleteData(obj->get_key().get_oid()); if (delDataReturn < 0) { ldpp_dout(save_dpp, 20) << "D4N Filter: Cache delete data operation failed." << dendl; } else { ldpp_dout(save_dpp, 20) << "D4N Filter: Cache delete data operation succeeded." << dendl; } return next->prepare(y); } int D4NFilterWriter::process(bufferlist&& data, uint64_t offset) { int appendDataReturn = filter->get_d4n_cache()->appendData(obj->get_key().get_oid(), data); if (appendDataReturn < 0) { ldpp_dout(save_dpp, 20) << "D4N Filter: Cache append data operation failed." << dendl; } else { ldpp_dout(save_dpp, 20) << "D4N Filter: Cache append data operation succeeded." << dendl; } return next->process(std::move(data), offset); } int D4NFilterWriter::complete(size_t accounted_size, const std::string& etag, ceph::real_time *mtime, ceph::real_time set_mtime, std::map<std::string, bufferlist>& attrs, ceph::real_time delete_at, const char *if_match, const char *if_nomatch, const std::string *user_data, rgw_zone_set *zones_trace, bool *canceled, optional_yield y) { cache_block* temp_cache_block = filter->get_cache_block(); RGWBlockDirectory* temp_block_dir = filter->get_block_dir(); temp_cache_block->hosts_list.push_back(temp_block_dir->get_host() + ":" + std::to_string(temp_block_dir->get_port())); temp_cache_block->size_in_bytes = accounted_size; temp_cache_block->c_obj.bucket_name = obj->get_bucket()->get_name(); temp_cache_block->c_obj.obj_name = obj->get_key().get_oid(); int setDirReturn = temp_block_dir->setValue(temp_cache_block); if (setDirReturn < 0) { ldpp_dout(save_dpp, 20) << "D4N Filter: Directory set operation failed." << dendl; } else { ldpp_dout(save_dpp, 20) << "D4N Filter: Directory set operation succeeded." << dendl; } /* Retrieve complete set of attrs */ RGWObjState* astate; int ret = next->complete(accounted_size, etag, mtime, set_mtime, attrs, delete_at, if_match, if_nomatch, user_data, zones_trace, canceled, y); obj->get_obj_attrs(y, save_dpp, NULL); obj->get_obj_state(save_dpp, &astate, y); /* Append additional metadata to attributes */ rgw::sal::Attrs baseAttrs = obj->get_attrs(); rgw::sal::Attrs attrs_temp = baseAttrs; buffer::list bl; bl.append(to_iso_8601(obj->get_mtime())); baseAttrs.insert({"mtime", bl}); bl.clear(); bl.append(std::to_string(obj->get_obj_size())); baseAttrs.insert({"object_size", bl}); bl.clear(); bl.append(std::to_string(accounted_size)); baseAttrs.insert({"accounted_size", bl}); bl.clear(); bl.append(std::to_string(astate->epoch)); baseAttrs.insert({"epoch", bl}); bl.clear(); if (obj->have_instance()) { bl.append(obj->get_instance()); baseAttrs.insert({"version_id", bl}); bl.clear(); } else { bl.append(""); /* Empty value */ baseAttrs.insert({"version_id", bl}); bl.clear(); } auto iter = attrs_temp.find(RGW_ATTR_SOURCE_ZONE); if (iter != attrs_temp.end()) { bl.append(std::to_string(astate->zone_short_id)); baseAttrs.insert({"source_zone_short_id", bl}); bl.clear(); } else { bl.append("0"); /* Initialized to zero */ baseAttrs.insert({"source_zone_short_id", bl}); bl.clear(); } bl.append(std::to_string(obj->get_bucket()->get_count())); baseAttrs.insert({"bucket_count", bl}); bl.clear(); bl.append(std::to_string(obj->get_bucket()->get_size())); baseAttrs.insert({"bucket_size", bl}); bl.clear(); RGWUserInfo info = obj->get_bucket()->get_owner()->get_info(); bl.append(std::to_string(info.quota.user_quota.max_size)); baseAttrs.insert({"user_quota.max_size", bl}); bl.clear(); bl.append(std::to_string(info.quota.user_quota.max_objects)); baseAttrs.insert({"user_quota.max_objects", bl}); bl.clear(); bl.append(std::to_string(obj->get_bucket()->get_owner()->get_max_buckets())); baseAttrs.insert({"max_buckets", bl}); bl.clear(); baseAttrs.insert(attrs.begin(), attrs.end()); int setObjReturn = filter->get_d4n_cache()->setObject(obj->get_key().get_oid(), &baseAttrs); if (setObjReturn < 0) { ldpp_dout(save_dpp, 20) << "D4N Filter: Cache set operation failed." << dendl; } else { ldpp_dout(save_dpp, 20) << "D4N Filter: Cache set operation succeeded." << dendl; } return ret; } } } // namespace rgw::sal extern "C" { rgw::sal::Driver* newD4NFilter(rgw::sal::Driver* next) { rgw::sal::D4NFilterDriver* driver = new rgw::sal::D4NFilterDriver(next); return driver; } }
19,718
33.777778
185
cc
null
ceph-main/src/rgw/driver/d4n/rgw_sal_d4n.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab ft=cpp /* * Ceph - scalable distributed file system * * Copyright (C) 2022 Red Hat, Inc. * * This is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License version 2.1, as published by the Free Software * Foundation. See file COPYING. * */ #pragma once #include "rgw_sal_filter.h" #include "rgw_sal.h" #include "rgw_oidc_provider.h" #include "rgw_role.h" #include "common/dout.h" #include "driver/d4n/d4n_directory.h" #include "driver/d4n/d4n_datacache.h" namespace rgw { namespace sal { class D4NFilterDriver : public FilterDriver { private: RGWBlockDirectory* blk_dir; cache_block* c_blk; RGWD4NCache* d4n_cache; public: D4NFilterDriver(Driver* _next) : FilterDriver(_next) { blk_dir = new RGWBlockDirectory(); /* Initialize directory address with cct */ c_blk = new cache_block(); d4n_cache = new RGWD4NCache(); } virtual ~D4NFilterDriver() { delete blk_dir; delete c_blk; delete d4n_cache; } virtual int initialize(CephContext *cct, const DoutPrefixProvider *dpp) override; virtual std::unique_ptr<User> get_user(const rgw_user& u) override; virtual std::unique_ptr<Object> get_object(const rgw_obj_key& k) override; virtual std::unique_ptr<Writer> get_atomic_writer(const DoutPrefixProvider *dpp, optional_yield y, rgw::sal::Object* obj, const rgw_user& owner, const rgw_placement_rule *ptail_placement_rule, uint64_t olh_epoch, const std::string& unique_tag) override; RGWBlockDirectory* get_block_dir() { return blk_dir; } cache_block* get_cache_block() { return c_blk; } RGWD4NCache* get_d4n_cache() { return d4n_cache; } }; class D4NFilterUser : public FilterUser { private: D4NFilterDriver* filter; public: D4NFilterUser(std::unique_ptr<User> _next, D4NFilterDriver* _filter) : FilterUser(std::move(_next)), filter(_filter) {} virtual ~D4NFilterUser() = default; virtual int create_bucket(const DoutPrefixProvider* dpp, const rgw_bucket& b, const std::string& zonegroup_id, rgw_placement_rule& placement_rule, std::string& swift_ver_location, const RGWQuotaInfo* pquota_info, const RGWAccessControlPolicy& policy, Attrs& attrs, RGWBucketInfo& info, obj_version& ep_objv, bool exclusive, bool obj_lock_enabled, bool* existed, req_info& req_info, std::unique_ptr<Bucket>* bucket, optional_yield y) override; }; class D4NFilterBucket : public FilterBucket { private: D4NFilterDriver* filter; public: D4NFilterBucket(std::unique_ptr<Bucket> _next, User* _user, D4NFilterDriver* _filter) : FilterBucket(std::move(_next), _user), filter(_filter) {} virtual ~D4NFilterBucket() = default; virtual std::unique_ptr<Object> get_object(const rgw_obj_key& key) override; }; class D4NFilterObject : public FilterObject { private: D4NFilterDriver* filter; public: struct D4NFilterReadOp : FilterReadOp { D4NFilterObject* source; D4NFilterReadOp(std::unique_ptr<ReadOp> _next, D4NFilterObject* _source) : FilterReadOp(std::move(_next)), source(_source) {} virtual ~D4NFilterReadOp() = default; virtual int prepare(optional_yield y, const DoutPrefixProvider* dpp) override; }; struct D4NFilterDeleteOp : FilterDeleteOp { D4NFilterObject* source; D4NFilterDeleteOp(std::unique_ptr<DeleteOp> _next, D4NFilterObject* _source) : FilterDeleteOp(std::move(_next)), source(_source) {} virtual ~D4NFilterDeleteOp() = default; virtual int delete_obj(const DoutPrefixProvider* dpp, optional_yield y) override; }; D4NFilterObject(std::unique_ptr<Object> _next, D4NFilterDriver* _filter) : FilterObject(std::move(_next)), filter(_filter) {} D4NFilterObject(std::unique_ptr<Object> _next, Bucket* _bucket, D4NFilterDriver* _filter) : FilterObject(std::move(_next), _bucket), filter(_filter) {} D4NFilterObject(D4NFilterObject& _o, D4NFilterDriver* _filter) : FilterObject(_o), filter(_filter) {} virtual ~D4NFilterObject() = default; virtual int copy_object(User* user, req_info* info, const rgw_zone_id& source_zone, rgw::sal::Object* dest_object, rgw::sal::Bucket* dest_bucket, rgw::sal::Bucket* src_bucket, const rgw_placement_rule& dest_placement, ceph::real_time* src_mtime, ceph::real_time* mtime, const ceph::real_time* mod_ptr, const ceph::real_time* unmod_ptr, bool high_precision_time, const char* if_match, const char* if_nomatch, AttrsMod attrs_mod, bool copy_if_newer, Attrs& attrs, RGWObjCategory category, uint64_t olh_epoch, boost::optional<ceph::real_time> delete_at, std::string* version_id, std::string* tag, std::string* etag, void (*progress_cb)(off_t, void *), void* progress_data, const DoutPrefixProvider* dpp, optional_yield y) override; virtual const std::string &get_name() const override { return next->get_name(); } virtual int set_obj_attrs(const DoutPrefixProvider* dpp, Attrs* setattrs, Attrs* delattrs, optional_yield y) override; virtual int get_obj_attrs(optional_yield y, const DoutPrefixProvider* dpp, rgw_obj* target_obj = NULL) override; virtual int modify_obj_attrs(const char* attr_name, bufferlist& attr_val, optional_yield y, const DoutPrefixProvider* dpp) override; virtual int delete_obj_attrs(const DoutPrefixProvider* dpp, const char* attr_name, optional_yield y) override; virtual std::unique_ptr<ReadOp> get_read_op() override; virtual std::unique_ptr<DeleteOp> get_delete_op() override; }; class D4NFilterWriter : public FilterWriter { private: D4NFilterDriver* filter; const DoutPrefixProvider* save_dpp; bool atomic; public: D4NFilterWriter(std::unique_ptr<Writer> _next, D4NFilterDriver* _filter, Object* _obj, const DoutPrefixProvider* _dpp) : FilterWriter(std::move(_next), _obj), filter(_filter), save_dpp(_dpp), atomic(false) {} D4NFilterWriter(std::unique_ptr<Writer> _next, D4NFilterDriver* _filter, Object* _obj, const DoutPrefixProvider* _dpp, bool _atomic) : FilterWriter(std::move(_next), _obj), filter(_filter), save_dpp(_dpp), atomic(_atomic) {} virtual ~D4NFilterWriter() = default; virtual int prepare(optional_yield y); virtual int process(bufferlist&& data, uint64_t offset) override; virtual int complete(size_t accounted_size, const std::string& etag, ceph::real_time *mtime, ceph::real_time set_mtime, std::map<std::string, bufferlist>& attrs, ceph::real_time delete_at, const char *if_match, const char *if_nomatch, const std::string *user_data, rgw_zone_set *zones_trace, bool *canceled, optional_yield y) override; bool is_atomic() { return atomic; }; const DoutPrefixProvider* dpp() { return save_dpp; } }; } } // namespace rgw::sal
7,876
38.385
136
h
null
ceph-main/src/rgw/driver/daos/README.md
# DAOS Standalone RADOS Gateway (RGW) on [DAOS](http://daos.io/) (Experimental) ## CMake Option Add below cmake option ```bash -DWITH_RADOSGW_DAOS=ON ``` ## Build ```bash cd build ninja [vstart] ``` ## Running Test cluster Edit ceph.conf to add below option ```conf [client] rgw backend store = daos ``` Restart vstart cluster or just RGW server ```bash [..] RGW=1 ../src/vstart.sh -d ``` The above configuration brings up an RGW server on DAOS. ## Creating a test user To create a `testid` user to be used for s3 operations, use the following command: ```bash local akey='0555b35654ad1656d804' local skey='h7GhxuBLTrlhVUyxSPUKUV8r/2EI4ngqJxD7iBdBYLhwluN30JaT3Q==' radosgw-admin user create --uid testid \ --access-key $akey --secret $skey \ --display-name 'M. Tester' --email [email protected] --no-mon-config ```
879
17.333333
83
md
null
ceph-main/src/rgw/driver/dbstore/README.md
# DBStore Standalone Rados Gateway (RGW) on DBStore (Experimental) ## CMake Option Add below cmake option (enabled by default) -DWITH_RADOSGW_DBSTORE=ON ## Build cd build ninja [vstart] ## Running Test cluster Edit ceph.conf to add below option [client] rgw backend store = dbstore rgw config store = dbstore Start vstart cluster MON=1 RGW=1 ../src/vstart.sh -o rgw_backend_store=dbstore -o rgw_config_store=dbstore -n -d The above vstart command brings up RGW server on dbstore. It creates default zonegroup, zone and few default users (eg., testid) to be used for s3 operations. `radosgw-admin` can be used to create and remove other users, zonegroups and zones. By default, dbstore creates .db file *'/var/lib/ceph/radosgw/dbstore-default_ns.db'* to store the data and *'/var/lib/ceph/radosgw/dbstore-config.db'* file to store the configuration. This can be configured using below options in ceph.conf [client] dbstore db dir = <path for the directory for storing the db backend store data> dbstore db name prefix = <prefix to the file names created by db backend store> dbstore config uri = <Config database URI. URIs beginning with file: refer to local files opened with SQLite.> ## DBStore Unit Tests To execute DBStore unit test cases (using Gtest framework), from build directory ninja unittest_dbstore_tests ./bin/unittest_dbstore_tests [logfile] [loglevel] (default logfile: rgw_dbstore_tests.log, loglevel: 20) ninja unittest_dbstore_mgr_tests ./bin/unittest_dbstore_mgr_tests To execute Sample test file ninja src/rgw/driver/dbstore/install ./bin/dbstore-bin [logfile] [loglevel] (default logfile: rgw_dbstore_bin.log, loglevel: 20)
1,764
30.517857
239
md
null
ceph-main/src/rgw/driver/dbstore/dbstore_main.cc
#include <stdio.h> #include <sqlite3.h> #include <stdlib.h> #include <string.h> #include <pthread.h> #include "dbstore_mgr.h" #include <dbstore.h> #include <dbstore_log.h> using namespace std; using namespace rgw::store; using DB = rgw::store::DB; struct thr_args { DB *dbs; int thr_id; }; void* process(void *arg) { struct thr_args *t_args = (struct thr_args*)arg; DB *db = t_args->dbs; int thr_id = t_args->thr_id; int ret = -1; cout<<"Entered thread:"<<thr_id<<"\n"; string user1 = "User1"; string bucketa = "rgw"; string objecta1 = "bugfixing"; string objecta2 = "zipper"; string bucketb = "gluster"; string objectb1 = "bugfixing"; string objectb2 = "delegations"; string user2 = "User2"; string bucketc = "qe"; string objectc1 = "rhhi"; string objectc2 = "cns"; DBOpParams params = {}; const DoutPrefixProvider *dpp = db->get_def_dpp(); db->InitializeParams(dpp, &params); params.op.user.uinfo.display_name = user1; params.op.user.uinfo.user_id.tenant = "tenant"; params.op.user.uinfo.user_id.id = user1; params.op.user.uinfo.suspended = 123; params.op.user.uinfo.max_buckets = 456; params.op.user.uinfo.placement_tags.push_back("tags1"); params.op.user.uinfo.placement_tags.push_back("tags2"); RGWAccessKey k1("id1", "key1"); RGWAccessKey k2("id2", "key2"); params.op.user.uinfo.access_keys.insert(make_pair("key1", k1)); params.op.user.uinfo.access_keys.insert(make_pair("key2", k2)); ret = db->ProcessOp(dpp, "InsertUser", &params); cout << "InsertUser return value: " << ret << "\n"; DBOpParams params2 = {}; params.op.user.uinfo.user_id.tenant = "tenant2"; db->InitializeParams(dpp, &params2); params2.op.user.uinfo.display_name = user1; ret = db->ProcessOp(dpp, "GetUser", &params2); cout << "GetUser return value: " << ret << "\n"; cout << "tenant: " << params2.op.user.uinfo.user_id.tenant << "\n"; cout << "suspended: " << (int)params2.op.user.uinfo.suspended << "\n"; list<string>::iterator it = params2.op.user.uinfo.placement_tags.begin(); while (it != params2.op.user.uinfo.placement_tags.end()) { cout << "list = " << *it << "\n"; it++; } map<string, RGWAccessKey>::iterator it2 = params2.op.user.uinfo.access_keys.begin(); while (it2 != params2.op.user.uinfo.access_keys.end()) { cout << "keys = " << it2->first << "\n"; RGWAccessKey k = it2->second; cout << "id = " << k.id << ", keys = " << k.key << "\n"; it2++; } params.op.bucket.info.bucket.name = bucketa; db->ProcessOp(dpp, "InsertBucket", &params); params.op.user.uinfo.display_name = user2; params.op.user.uinfo.user_id.id = user2; db->ProcessOp(dpp, "InsertUser", &params); params.op.bucket.info.bucket.name = bucketb; db->ProcessOp(dpp, "InsertBucket", &params); db->ProcessOp(dpp, "GetUser", &params); db->ProcessOp(dpp, "GetBucket", &params); db->ListAllUsers(dpp, &params); db->ListAllBuckets(dpp, &params); params.op.bucket.info.bucket.name = bucketb; db->ProcessOp(dpp, "RemoveBucket", &params); params.op.user.uinfo.user_id.id = user2; db->ProcessOp(dpp, "RemoveUser", &params); db->ListAllUsers(dpp, &params); db->ListAllBuckets(dpp, &params); cout<<"Exiting thread:"<<thr_id<<"\n"; return 0; } int main(int argc, char *argv[]) { string tenant = "Redhat"; string logfile = "rgw_dbstore_bin.log"; int loglevel = 20; DBStoreManager *dbsm; DB *dbs; int rc = 0, tnum = 0; void *res; pthread_attr_t attr; int num_thr = 2; pthread_t threads[num_thr]; struct thr_args t_args[num_thr]; cout << "loglevel " << loglevel << "\n"; // format: ./dbstore-bin logfile loglevel if (argc == 3) { logfile = argv[1]; loglevel = (atoi)(argv[2]); cout << "loglevel set to " << loglevel << "\n"; } vector<const char*> args; auto cct = global_init(NULL, args, CEPH_ENTITY_TYPE_CLIENT, CODE_ENVIRONMENT_DAEMON, CINIT_FLAG_NO_MON_CONFIG, 1); dbsm = new DBStoreManager(cct.get(), logfile, loglevel); dbs = dbsm->getDB(tenant, true); cout<<"No. of threads being created = "<<num_thr<<"\n"; /* Initialize thread creation attributes */ rc = pthread_attr_init(&attr); if (rc != 0) { cout<<" error in pthread_attr_init \n"; goto out; } for (tnum = 0; tnum < num_thr; tnum++) { t_args[tnum].dbs = dbs; t_args[tnum].thr_id = tnum; rc = pthread_create((pthread_t*)&threads[tnum], &attr, &process, &t_args[tnum]); if (rc != 0) { cout<<" error in pthread_create \n"; goto out; } cout<<"Created thread (thread-id:"<<tnum<<")\n"; } /* Destroy the thread attributes object, since it is no longer needed */ rc = pthread_attr_destroy(&attr); if (rc != 0) { cout<<"error in pthread_attr_destroy \n"; } /* Now join with each thread, and display its returned value */ for (tnum = 0; tnum < num_thr; tnum++) { rc = pthread_join(threads[tnum], &res); if (rc != 0) { cout<<"error in pthread_join \n"; } else { cout<<"Joined with thread "<<tnum<<"\n"; } } out: dbsm->destroyAllHandles(); return 0; }
5,126
24.635
86
cc
null
ceph-main/src/rgw/driver/dbstore/dbstore_mgr.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include "dbstore_mgr.h" #include "common/dbstore_log.h" #include <filesystem> static constexpr auto dout_subsys = ceph_subsys_rgw; using namespace std; /* Given a tenant, find and return the DBStore handle. * If not found and 'create' set to true, create one * and return */ DB *DBStoreManager::getDB (string tenant, bool create) { map<string, DB*>::iterator iter; DB *dbs = nullptr; pair<map<string, DB*>::iterator,bool> ret; if (tenant.empty()) return default_db; if (DBStoreHandles.empty()) goto not_found; iter = DBStoreHandles.find(tenant); if (iter != DBStoreHandles.end()) return iter->second; not_found: if (!create) return nullptr; dbs = createDB(tenant); return dbs; } /* Create DBStore instance */ DB *DBStoreManager::createDB(std::string tenant) { DB *dbs = nullptr; pair<map<string, DB*>::iterator,bool> ret; const auto& db_path = g_conf().get_val<std::string>("dbstore_db_dir"); const auto& db_name = g_conf().get_val<std::string>("dbstore_db_name_prefix") + "-" + tenant; auto db_full_path = std::filesystem::path(db_path) / db_name; ldout(cct, 0) << "DB initialization full db_path("<<db_full_path<<")" << dendl; /* Create the handle */ #ifdef SQLITE_ENABLED dbs = new SQLiteDB(db_full_path.string(), cct); #else dbs = new DB(db_full_path.string(), cct); #endif /* API is DB::Initialize(string logfile, int loglevel); * If none provided, by default write in to dbstore.log file * created in current working directory with loglevel L_EVENT. * XXX: need to align these logs to ceph location */ if (dbs->Initialize("", -1) < 0) { ldout(cct, 0) << "DB initialization failed for tenant("<<tenant<<")" << dendl; delete dbs; return nullptr; } /* XXX: Do we need lock to protect this map? */ ret = DBStoreHandles.insert(pair<string, DB*>(tenant, dbs)); /* * Its safe to check for already existing entry (just * incase other thread raced and created the entry) */ if (ret.second == false) { /* Entry already created by another thread */ delete dbs; dbs = ret.first->second; } return dbs; } void DBStoreManager::deleteDB(string tenant) { map<string, DB*>::iterator iter; DB *dbs = nullptr; if (tenant.empty() || DBStoreHandles.empty()) return; /* XXX: Check if we need to perform this operation under a lock */ iter = DBStoreHandles.find(tenant); if (iter == DBStoreHandles.end()) return; dbs = iter->second; DBStoreHandles.erase(iter); dbs->Destroy(dbs->get_def_dpp()); delete dbs; return; } void DBStoreManager::deleteDB(DB *dbs) { if (!dbs) return; (void)deleteDB(dbs->getDBname()); } void DBStoreManager::destroyAllHandles(){ map<string, DB*>::iterator iter; DB *dbs = nullptr; if (DBStoreHandles.empty()) return; for (iter = DBStoreHandles.begin(); iter != DBStoreHandles.end(); ++iter) { dbs = iter->second; dbs->Destroy(dbs->get_def_dpp()); delete dbs; } DBStoreHandles.clear(); return; }
3,136
21.248227
95
cc
null
ceph-main/src/rgw/driver/dbstore/dbstore_mgr.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #pragma once #include <map> #include <cerrno> #include <cstdlib> #include <string> #include <cstdio> #include <iostream> #include <vector> #include "common/ceph_context.h" #include "common/dbstore.h" #include "sqlite/sqliteDB.h" using namespace rgw::store; using DB = rgw::store::DB; /* XXX: Should be a dbstore config option */ const static std::string default_tenant = "default_ns"; class DBStoreManager { private: std::map<std::string, DB*> DBStoreHandles; DB *default_db = nullptr; CephContext *cct; public: DBStoreManager(CephContext *_cct): DBStoreHandles() { cct = _cct; default_db = createDB(default_tenant); }; DBStoreManager(CephContext *_cct, std::string logfile, int loglevel): DBStoreHandles() { /* No ceph context. Create one with log args provided */ cct = _cct; cct->_log->set_log_file(logfile); cct->_log->reopen_log_file(); cct->_conf->subsys.set_log_level(ceph_subsys_rgw, loglevel); default_db = createDB(default_tenant); }; ~DBStoreManager() { destroyAllHandles(); }; /* XXX: TBD based on testing * 1) Lock to protect DBStoreHandles map. * 2) Refcount of each DBStore to protect from * being deleted while using it. */ DB* getDB () { return default_db; }; DB* getDB (std::string tenant, bool create); DB* createDB (std::string tenant); void deleteDB (std::string tenant); void deleteDB (DB* db); void destroyAllHandles(); };
1,524
25.754386
90
h
null
ceph-main/src/rgw/driver/dbstore/common/connection_pool.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab ft=cpp /* * Ceph - scalable distributed file system * * Copyright (C) 2022 Red Hat, Inc. * * This is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License version 2.1, as published by the Free Software * Foundation. See file COPYING. * */ #pragma once #include <concepts> #include <condition_variable> #include <memory> #include <mutex> #include <boost/circular_buffer.hpp> #include "common/dout.h" namespace rgw::dbstore { template <typename Connection> class ConnectionHandle; /// A thread-safe base class that manages a fixed-size pool of generic database /// connections and supports the reclamation of ConnectionHandles. This class /// is the subset of ConnectionPool which doesn't depend on the Factory type. template <typename Connection> class ConnectionPoolBase { public: ConnectionPoolBase(std::size_t max_connections) : connections(max_connections) {} private: friend class ConnectionHandle<Connection>; // TODO: the caller may detect a connection error that prevents the connection // from being reused. allow them to indicate these errors here void put(std::unique_ptr<Connection> connection) { auto lock = std::scoped_lock{mutex}; connections.push_back(std::move(connection)); if (connections.size() == 1) { // was empty cond.notify_one(); } } protected: std::mutex mutex; std::condition_variable cond; boost::circular_buffer<std::unique_ptr<Connection>> connections; }; /// Handle to a database connection borrowed from the pool. Automatically /// returns the connection to its pool on the handle's destruction. template <typename Connection> class ConnectionHandle { ConnectionPoolBase<Connection>* pool = nullptr; std::unique_ptr<Connection> conn; public: ConnectionHandle() noexcept = default; ConnectionHandle(ConnectionPoolBase<Connection>* pool, std::unique_ptr<Connection> conn) noexcept : pool(pool), conn(std::move(conn)) {} ~ConnectionHandle() { if (conn) { pool->put(std::move(conn)); } } ConnectionHandle(ConnectionHandle&&) = default; ConnectionHandle& operator=(ConnectionHandle&& o) noexcept { if (conn) { pool->put(std::move(conn)); } conn = std::move(o.conn); pool = o.pool; return *this; } explicit operator bool() const noexcept { return static_cast<bool>(conn); } Connection& operator*() const noexcept { return *conn; } Connection* operator->() const noexcept { return conn.get(); } Connection* get() const noexcept { return conn.get(); } }; // factory_of concept requires the function signature: // F(const DoutPrefixProvider*) -> std::unique_ptr<T> template <typename F, typename T> concept factory_of = requires (F factory, const DoutPrefixProvider* dpp) { { factory(dpp) } -> std::same_as<std::unique_ptr<T>>; requires std::move_constructible<F>; }; /// Generic database connection pool that enforces a limit on open connections. template <typename Connection, factory_of<Connection> Factory> class ConnectionPool : public ConnectionPoolBase<Connection> { public: ConnectionPool(Factory factory, std::size_t max_connections) : ConnectionPoolBase<Connection>(max_connections), factory(std::move(factory)) {} /// Borrow a connection from the pool. If all existing connections are in use, /// use the connection factory to create another one. If we've reached the /// limit on open connections, wait on a condition variable for the next one /// returned to the pool. auto get(const DoutPrefixProvider* dpp) -> ConnectionHandle<Connection> { auto lock = std::unique_lock{this->mutex}; std::unique_ptr<Connection> conn; if (!this->connections.empty()) { // take an existing connection conn = std::move(this->connections.front()); this->connections.pop_front(); } else if (total < this->connections.capacity()) { // add another connection to the pool conn = factory(dpp); ++total; } else { // wait for the next put() // TODO: support optional_yield ldpp_dout(dpp, 4) << "ConnectionPool waiting on a connection" << dendl; this->cond.wait(lock, [&] { return !this->connections.empty(); }); ldpp_dout(dpp, 4) << "ConnectionPool done waiting" << dendl; conn = std::move(this->connections.front()); this->connections.pop_front(); } return {this, std::move(conn)}; } private: Factory factory; std::size_t total = 0; }; } // namespace rgw::dbstore
4,665
30.527027
80
h
null
ceph-main/src/rgw/driver/dbstore/common/dbstore.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include "dbstore.h" using namespace std; namespace rgw { namespace store { map<string, class ObjectOp*> DB::objectmap = {}; map<string, class ObjectOp*> DB::getObjectMap() { return DB::objectmap; } int DB::Initialize(string logfile, int loglevel) { int ret = -1; const DoutPrefixProvider *dpp = get_def_dpp(); if (!cct) { cout << "Failed to Initialize. No ceph Context \n"; return -1; } if (loglevel > 0) { cct->_conf->subsys.set_log_level(ceph_subsys_rgw, loglevel); } if (!logfile.empty()) { cct->_log->set_log_file(logfile); cct->_log->reopen_log_file(); } db = openDB(dpp); if (!db) { ldpp_dout(dpp, 0) <<"Failed to open database " << dendl; return ret; } ret = InitializeDBOps(dpp); if (ret) { ldpp_dout(dpp, 0) <<"InitializeDBOps failed " << dendl; closeDB(dpp); db = NULL; return ret; } ldpp_dout(dpp, 0) << "DB successfully initialized - name:" \ << db_name << "" << dendl; return ret; } int DB::createGC(const DoutPrefixProvider *dpp) { int ret = 0; /* create gc thread */ gc_worker = std::make_unique<DB::GC>(dpp, this); gc_worker->create("db_gc"); return ret; } int DB::stopGC() { if (gc_worker) { gc_worker->signal_stop(); gc_worker->join(); } return 0; } int DB::Destroy(const DoutPrefixProvider *dpp) { if (!db) return 0; stopGC(); closeDB(dpp); ldpp_dout(dpp, 20)<<"DB successfully destroyed - name:" \ <<db_name << dendl; return 0; } std::shared_ptr<class DBOp> DB::getDBOp(const DoutPrefixProvider *dpp, std::string_view Op, const DBOpParams *params) { if (!Op.compare("InsertUser")) return dbops.InsertUser; if (!Op.compare("RemoveUser")) return dbops.RemoveUser; if (!Op.compare("GetUser")) return dbops.GetUser; if (!Op.compare("InsertBucket")) return dbops.InsertBucket; if (!Op.compare("UpdateBucket")) return dbops.UpdateBucket; if (!Op.compare("RemoveBucket")) return dbops.RemoveBucket; if (!Op.compare("GetBucket")) return dbops.GetBucket; if (!Op.compare("ListUserBuckets")) return dbops.ListUserBuckets; if (!Op.compare("InsertLCEntry")) return dbops.InsertLCEntry; if (!Op.compare("RemoveLCEntry")) return dbops.RemoveLCEntry; if (!Op.compare("GetLCEntry")) return dbops.GetLCEntry; if (!Op.compare("ListLCEntries")) return dbops.ListLCEntries; if (!Op.compare("InsertLCHead")) return dbops.InsertLCHead; if (!Op.compare("RemoveLCHead")) return dbops.RemoveLCHead; if (!Op.compare("GetLCHead")) return dbops.GetLCHead; /* Object Operations */ map<string, class ObjectOp*>::iterator iter; class ObjectOp* Ob; { const std::lock_guard<std::mutex> lk(mtx); iter = DB::objectmap.find(params->op.bucket.info.bucket.name); } if (iter == DB::objectmap.end()) { ldpp_dout(dpp, 30)<<"No objectmap found for bucket: " \ <<params->op.bucket.info.bucket.name << dendl; /* not found */ return nullptr; } Ob = iter->second; if (!Op.compare("PutObject")) return Ob->PutObject; if (!Op.compare("DeleteObject")) return Ob->DeleteObject; if (!Op.compare("GetObject")) return Ob->GetObject; if (!Op.compare("UpdateObject")) return Ob->UpdateObject; if (!Op.compare("ListBucketObjects")) return Ob->ListBucketObjects; if (!Op.compare("ListVersionedObjects")) return Ob->ListVersionedObjects; if (!Op.compare("PutObjectData")) return Ob->PutObjectData; if (!Op.compare("UpdateObjectData")) return Ob->UpdateObjectData; if (!Op.compare("GetObjectData")) return Ob->GetObjectData; if (!Op.compare("DeleteObjectData")) return Ob->DeleteObjectData; if (!Op.compare("DeleteStaleObjectData")) return Ob->DeleteStaleObjectData; return nullptr; } int DB::objectmapInsert(const DoutPrefixProvider *dpp, string bucket, class ObjectOp* ptr) { map<string, class ObjectOp*>::iterator iter; class ObjectOp *Ob; const std::lock_guard<std::mutex> lk(mtx); iter = DB::objectmap.find(bucket); if (iter != DB::objectmap.end()) { // entry already exists // return success or replace it or // return error ? // // return success for now & delete the newly allocated ptr ldpp_dout(dpp, 30)<<"Objectmap entry already exists for bucket("\ <<bucket<<"). Not inserted " << dendl; delete ptr; return 0; } Ob = (class ObjectOp*) ptr; Ob->InitializeObjectOps(getDBname(), dpp); DB::objectmap.insert(pair<string, class ObjectOp*>(bucket, Ob)); return 0; } int DB::objectmapDelete(const DoutPrefixProvider *dpp, string bucket) { map<string, class ObjectOp*>::iterator iter; const std::lock_guard<std::mutex> lk(mtx); iter = DB::objectmap.find(bucket); if (iter == DB::objectmap.end()) { // entry doesn't exist // return success or return error ? // return success for now ldpp_dout(dpp, 20)<<"Objectmap entry for bucket("<<bucket<<") " <<"doesnt exist to delete " << dendl; return 0; } DB::objectmap.erase(iter); return 0; } int DB::InitializeParams(const DoutPrefixProvider *dpp, DBOpParams *params) { int ret = -1; if (!params) goto out; params->cct = cct; //reset params here params->user_table = user_table; params->bucket_table = bucket_table; params->quota_table = quota_table; params->lc_entry_table = lc_entry_table; params->lc_head_table = lc_head_table; ret = 0; out: return ret; } int DB::ProcessOp(const DoutPrefixProvider *dpp, std::string_view Op, DBOpParams *params) { int ret = -1; shared_ptr<class DBOp> db_op; db_op = getDBOp(dpp, Op, params); if (!db_op) { ldpp_dout(dpp, 0)<<"No db_op found for Op("<<Op<<")" << dendl; return ret; } ret = db_op->Execute(dpp, params); if (ret) { ldpp_dout(dpp, 0)<<"In Process op Execute failed for fop(" << Op << ")" << dendl; } else { ldpp_dout(dpp, 20)<<"Successfully processed fop(" << Op << ")" << dendl; } return ret; } int DB::get_user(const DoutPrefixProvider *dpp, const std::string& query_str, const std::string& query_str_val, RGWUserInfo& uinfo, map<string, bufferlist> *pattrs, RGWObjVersionTracker *pobjv_tracker) { int ret = 0; if (query_str.empty() || query_str_val.empty()) { ldpp_dout(dpp, 0)<<"In GetUser - Invalid query(" << query_str <<"), query_str_val(" << query_str_val <<")" << dendl; return -1; } DBOpParams params = {}; InitializeParams(dpp, &params); params.op.query_str = query_str; // validate query_str with UserTable entries names if (query_str == "username") { params.op.user.uinfo.display_name = query_str_val; } else if (query_str == "email") { params.op.user.uinfo.user_email = query_str_val; } else if (query_str == "access_key") { RGWAccessKey k(query_str_val, ""); map<string, RGWAccessKey> keys; keys[query_str_val] = k; params.op.user.uinfo.access_keys = keys; } else if (query_str == "user_id") { params.op.user.uinfo.user_id = uinfo.user_id; } else { ldpp_dout(dpp, 0)<<"In GetUser Invalid query string :" <<query_str.c_str()<<") " << dendl; return -1; } ret = ProcessOp(dpp, "GetUser", &params); if (ret) goto out; /* Verify if its a valid user */ if (params.op.user.uinfo.access_keys.empty() || params.op.user.uinfo.user_id.id.empty()) { ldpp_dout(dpp, 0)<<"In GetUser - No user with query(" <<query_str.c_str()<<"), user_id(" << uinfo.user_id <<") found" << dendl; return -ENOENT; } uinfo = params.op.user.uinfo; if (pattrs) { *pattrs = params.op.user.user_attrs; } if (pobjv_tracker) { pobjv_tracker->read_version = params.op.user.user_version; } out: return ret; } int DB::store_user(const DoutPrefixProvider *dpp, RGWUserInfo& uinfo, bool exclusive, map<string, bufferlist> *pattrs, RGWObjVersionTracker *pobjv, RGWUserInfo* pold_info) { DBOpParams params = {}; InitializeParams(dpp, &params); int ret = 0; /* Check if the user already exists and return the old info, caller will have a use for it */ RGWUserInfo orig_info; RGWObjVersionTracker objv_tracker = {}; obj_version& obj_ver = objv_tracker.read_version; orig_info.user_id = uinfo.user_id; ret = get_user(dpp, string("user_id"), uinfo.user_id.id, orig_info, nullptr, &objv_tracker); if (!ret && obj_ver.ver) { /* already exists. */ if (pold_info) { *pold_info = orig_info; } if (pobjv && (pobjv->read_version.ver != obj_ver.ver)) { /* Object version mismatch.. return ECANCELED */ ret = -ECANCELED; ldpp_dout(dpp, 0)<<"User Read version mismatch err:(" <<ret<<") " << dendl; return ret; } if (exclusive) { // return return ret; } obj_ver.ver++; } else { obj_ver.ver = 1; obj_ver.tag = "UserTAG"; } params.op.user.user_version = obj_ver; params.op.user.uinfo = uinfo; if (pattrs) { params.op.user.user_attrs = *pattrs; } ret = ProcessOp(dpp, "InsertUser", &params); if (ret) { ldpp_dout(dpp, 0)<<"store_user failed with err:(" <<ret<<") " << dendl; goto out; } ldpp_dout(dpp, 20)<<"User creation successful - userid:(" <<uinfo.user_id<<") " << dendl; if (pobjv) { pobjv->read_version = obj_ver; pobjv->write_version = obj_ver; } out: return ret; } int DB::remove_user(const DoutPrefixProvider *dpp, RGWUserInfo& uinfo, RGWObjVersionTracker *pobjv) { DBOpParams params = {}; InitializeParams(dpp, &params); int ret = 0; RGWUserInfo orig_info; RGWObjVersionTracker objv_tracker = {}; orig_info.user_id = uinfo.user_id; ret = get_user(dpp, string("user_id"), uinfo.user_id.id, orig_info, nullptr, &objv_tracker); if (ret) { return ret; } if (!ret && objv_tracker.read_version.ver) { /* already exists. */ if (pobjv && (pobjv->read_version.ver != objv_tracker.read_version.ver)) { /* Object version mismatch.. return ECANCELED */ ret = -ECANCELED; ldpp_dout(dpp, 0)<<"User Read version mismatch err:(" <<ret<<") " << dendl; return ret; } } params.op.user.uinfo.user_id = uinfo.user_id; ret = ProcessOp(dpp, "RemoveUser", &params); if (ret) { ldpp_dout(dpp, 0)<<"remove_user failed with err:(" <<ret<<") " << dendl; goto out; } out: return ret; } int DB::get_bucket_info(const DoutPrefixProvider *dpp, const std::string& query_str, const std::string& query_str_val, RGWBucketInfo& info, rgw::sal::Attrs* pattrs, ceph::real_time* pmtime, obj_version* pbucket_version) { int ret = 0; if (query_str.empty()) { // not checking for query_str_val as the query can be to fetch // entries with null values return -1; } DBOpParams params = {}; DBOpParams params2 = {}; InitializeParams(dpp, &params); if (query_str == "name") { params.op.bucket.info.bucket.name = info.bucket.name; } else { ldpp_dout(dpp, 0)<<"In GetBucket Invalid query string :" <<query_str.c_str()<<") " << dendl; return -1; } ret = ProcessOp(dpp, "GetBucket", &params); if (ret) { ldpp_dout(dpp, 0)<<"In GetBucket failed err:(" <<ret<<") " << dendl; goto out; } if (!ret && params.op.bucket.info.bucket.marker.empty()) { return -ENOENT; } info = params.op.bucket.info; if (pattrs) { *pattrs = params.op.bucket.bucket_attrs; } if (pmtime) { *pmtime = params.op.bucket.mtime; } if (pbucket_version) { *pbucket_version = params.op.bucket.bucket_version; } out: return ret; } int DB::create_bucket(const DoutPrefixProvider *dpp, const RGWUserInfo& owner, rgw_bucket& bucket, const string& zonegroup_id, const rgw_placement_rule& placement_rule, const string& swift_ver_location, const RGWQuotaInfo * pquota_info, map<std::string, bufferlist>& attrs, RGWBucketInfo& info, obj_version *pobjv, obj_version *pep_objv, real_time creation_time, rgw_bucket *pmaster_bucket, uint32_t *pmaster_num_shards, optional_yield y, bool exclusive) { /* * XXX: Simple creation for now. * * Referring to RGWRados::create_bucket(), * Check if bucket already exists, select_bucket_placement, * is explicit put/remove instance info needed? - should not be ideally */ DBOpParams params = {}; InitializeParams(dpp, &params); int ret = 0; /* Check if the bucket already exists and return the old info, caller will have a use for it */ RGWBucketInfo orig_info; orig_info.bucket.name = bucket.name; ret = get_bucket_info(dpp, string("name"), "", orig_info, nullptr, nullptr, nullptr); if (!ret && !orig_info.owner.id.empty() && exclusive) { /* already exists. Return the old info */ info = std::move(orig_info); return ret; } RGWObjVersionTracker& objv_tracker = info.objv_tracker; objv_tracker.read_version.clear(); if (pobjv) { objv_tracker.write_version = *pobjv; } else { objv_tracker.generate_new_write_ver(cct); } params.op.bucket.bucket_version = objv_tracker.write_version; objv_tracker.read_version = params.op.bucket.bucket_version; uint64_t bid = next_bucket_id(); string s = getDBname() + "." + std::to_string(bid); bucket.marker = bucket.bucket_id = s; info.bucket = bucket; info.owner = owner.user_id; info.zonegroup = zonegroup_id; info.placement_rule = placement_rule; info.swift_ver_location = swift_ver_location; info.swift_versioning = (!swift_ver_location.empty()); info.requester_pays = false; if (real_clock::is_zero(creation_time)) { info.creation_time = ceph::real_clock::now(); } else { info.creation_time = creation_time; } if (pquota_info) { info.quota = *pquota_info; } params.op.bucket.info = info; params.op.bucket.bucket_attrs = attrs; params.op.bucket.mtime = ceph::real_time(); params.op.user.uinfo.user_id.id = owner.user_id.id; ret = ProcessOp(dpp, "InsertBucket", &params); if (ret) { ldpp_dout(dpp, 0)<<"create_bucket failed with err:(" <<ret<<") " << dendl; goto out; } out: return ret; } int DB::remove_bucket(const DoutPrefixProvider *dpp, const RGWBucketInfo info) { int ret = 0; DBOpParams params = {}; InitializeParams(dpp, &params); params.op.bucket.info.bucket.name = info.bucket.name; ret = ProcessOp(dpp, "RemoveBucket", &params); if (ret) { ldpp_dout(dpp, 0)<<"In RemoveBucket failed err:(" <<ret<<") " << dendl; goto out; } out: return ret; } int DB::list_buckets(const DoutPrefixProvider *dpp, const std::string& query_str, rgw_user& user, const string& marker, const string& end_marker, uint64_t max, bool need_stats, RGWUserBuckets *buckets, bool *is_truncated) { int ret = 0; DBOpParams params = {}; InitializeParams(dpp, &params); params.op.user.uinfo.user_id = user; params.op.bucket.min_marker = marker; params.op.bucket.max_marker = end_marker; params.op.list_max_count = max; params.op.query_str = query_str; ret = ProcessOp(dpp, "ListUserBuckets", &params); if (ret) { ldpp_dout(dpp, 0)<<"In ListUserBuckets failed err:(" <<ret<<") " << dendl; goto out; } /* need_stats: stats are already part of entries... In case they are maintained in * separate table , maybe use "Inner Join" with stats table for the query. */ if (params.op.bucket.list_entries.size() == max) *is_truncated = true; for (auto& entry : params.op.bucket.list_entries) { if (!end_marker.empty() && end_marker.compare(entry.bucket.marker) <= 0) { *is_truncated = false; break; } buckets->add(std::move(entry)); } if (query_str == "all") { // userID/OwnerID may have changed. Update it. user.id = params.op.bucket.info.owner.id; } out: return ret; } int DB::update_bucket(const DoutPrefixProvider *dpp, const std::string& query_str, RGWBucketInfo& info, bool exclusive, const rgw_user* powner_id, map<std::string, bufferlist>* pattrs, ceph::real_time* pmtime, RGWObjVersionTracker* pobjv) { int ret = 0; DBOpParams params = {}; obj_version bucket_version; RGWBucketInfo orig_info; /* Check if the bucket already exists and return the old info, caller will have a use for it */ orig_info.bucket.name = info.bucket.name; params.op.bucket.info.bucket.name = info.bucket.name; ret = get_bucket_info(dpp, string("name"), "", orig_info, nullptr, nullptr, &bucket_version); if (ret) { ldpp_dout(dpp, 0)<<"Failed to read bucket info err:(" <<ret<<") " << dendl; goto out; } if (!orig_info.owner.id.empty() && exclusive) { /* already exists. Return the old info */ info = std::move(orig_info); return ret; } /* Verify if the objv read_ver matches current bucket version */ if (pobjv) { if (pobjv->read_version.ver != bucket_version.ver) { ldpp_dout(dpp, 0)<<"Read version mismatch err:(" <<ret<<") " << dendl; ret = -ECANCELED; goto out; } } else { pobjv = &info.objv_tracker; } InitializeParams(dpp, &params); params.op.bucket.info.bucket.name = info.bucket.name; if (powner_id) { params.op.user.uinfo.user_id.id = powner_id->id; } else { params.op.user.uinfo.user_id.id = orig_info.owner.id; } /* Update version & mtime */ params.op.bucket.bucket_version.ver = ++(bucket_version.ver); if (pmtime) { params.op.bucket.mtime = *pmtime;; } else { params.op.bucket.mtime = ceph::real_time(); } if (query_str == "attrs") { params.op.query_str = "attrs"; params.op.bucket.bucket_attrs = *pattrs; } else if (query_str == "owner") { /* Update only owner i.e, chown. * Update creation_time too */ params.op.query_str = "owner"; params.op.bucket.info.creation_time = params.op.bucket.mtime; } else if (query_str == "info") { params.op.query_str = "info"; params.op.bucket.info = info; } else { ret = -1; ldpp_dout(dpp, 0)<<"In UpdateBucket Invalid query_str : " << query_str << dendl; goto out; } ret = ProcessOp(dpp, "UpdateBucket", &params); if (ret) { ldpp_dout(dpp, 0)<<"In UpdateBucket failed err:(" <<ret<<") " << dendl; goto out; } if (pobjv) { pobjv->read_version = params.op.bucket.bucket_version; pobjv->write_version = params.op.bucket.bucket_version; } out: return ret; } /** * Get ordered listing of the objects in a bucket. * * max_p: maximum number of results to return * bucket: bucket to list contents of * prefix: only return results that match this prefix * delim: do not include results that match this string. * Any skipped results will have the matching portion of their name * inserted in common_prefixes with a "true" mark. * marker: if filled in, begin the listing with this object. * end_marker: if filled in, end the listing with this object. * result: the objects are put in here. * common_prefixes: if delim is filled in, any matching prefixes are * placed here. * is_truncated: if number of objects in the bucket is bigger than * max, then truncated. */ int DB::Bucket::List::list_objects(const DoutPrefixProvider *dpp, int64_t max, vector<rgw_bucket_dir_entry> *result, map<string, bool> *common_prefixes, bool *is_truncated) { int ret = 0; DB *store = target->get_store(); int64_t count = 0; std::string prev_obj; DBOpParams db_params = {}; store->InitializeParams(dpp, &db_params); db_params.op.bucket.info = target->get_bucket_info(); /* XXX: Handle whole marker? key -> name, instance, ns? */ db_params.op.obj.min_marker = params.marker.name; db_params.op.obj.max_marker = params.end_marker.name; db_params.op.obj.prefix = params.prefix + "%"; db_params.op.list_max_count = max + 1; /* +1 for next_marker */ ret = store->ProcessOp(dpp, "ListBucketObjects", &db_params); if (ret) { ldpp_dout(dpp, 0)<<"In ListBucketObjects failed err:(" <<ret<<") " << dendl; goto out; } for (auto& entry : db_params.op.obj.list_entries) { if (!params.list_versions) { if (entry.flags & rgw_bucket_dir_entry::FLAG_DELETE_MARKER) { prev_obj = entry.key.name; // skip all non-current entries and delete_marker continue; } if (entry.key.name == prev_obj) { // non current versions..skip the entry continue; } entry.flags |= rgw_bucket_dir_entry::FLAG_CURRENT; } else { if (entry.key.name != prev_obj) { // current version entry.flags |= rgw_bucket_dir_entry::FLAG_CURRENT; } else { entry.flags &= ~(rgw_bucket_dir_entry::FLAG_CURRENT); entry.flags |= rgw_bucket_dir_entry::FLAG_VER; } } prev_obj = entry.key.name; if (count >= max) { *is_truncated = true; next_marker.name = entry.key.name; next_marker.instance = entry.key.instance; break; } if (!params.delim.empty()) { const std::string& objname = entry.key.name; const int delim_pos = objname.find(params.delim, params.prefix.size()); if (delim_pos >= 0) { /* extract key -with trailing delimiter- for CommonPrefix */ const std::string& prefix_key = objname.substr(0, delim_pos + params.delim.length()); if (common_prefixes && common_prefixes->find(prefix_key) == common_prefixes->end()) { next_marker = prefix_key; (*common_prefixes)[prefix_key] = true; count++; } continue; } } if (!params.end_marker.name.empty() && params.end_marker.name.compare(entry.key.name) <= 0) { // should not include end_marker *is_truncated = false; break; } count++; result->push_back(std::move(entry)); } out: return ret; } int DB::raw_obj::InitializeParamsfromRawObj(const DoutPrefixProvider *dpp, DBOpParams* params) { int ret = 0; if (!params) return -1; params->op.bucket.info.bucket.name = bucket_name; params->op.obj.state.obj.key.name = obj_name; params->op.obj.state.obj.key.instance = obj_instance; params->op.obj.state.obj.key.ns = obj_ns; params->op.obj.obj_id = obj_id; if (multipart_part_str != "0.0") { params->op.obj.is_multipart = true; } else { params->op.obj.is_multipart = false; } params->op.obj_data.multipart_part_str = multipart_part_str; params->op.obj_data.part_num = part_num; return ret; } int DB::Object::InitializeParamsfromObject(const DoutPrefixProvider *dpp, DBOpParams* params) { int ret = 0; string bucket = bucket_info.bucket.name; if (!params) return -1; params->op.bucket.info.bucket.name = bucket; params->op.obj.state.obj = obj; params->op.obj.obj_id = obj_id; return ret; } int DB::Object::get_object_impl(const DoutPrefixProvider *dpp, DBOpParams& params) { int ret = 0; if (params.op.obj.state.obj.key.name.empty()) { /* Initialize */ store->InitializeParams(dpp, &params); InitializeParamsfromObject(dpp, &params); } ret = store->ProcessOp(dpp, "GetObject", &params); /* pick one field check if object exists */ if (!ret && !params.op.obj.state.exists) { ldpp_dout(dpp, 0)<<"Object(bucket:" << bucket_info.bucket.name << ", Object:"<< obj.key.name << ") doesn't exist" << dendl; ret = -ENOENT; } return ret; } int DB::Object::obj_omap_set_val_by_key(const DoutPrefixProvider *dpp, const std::string& key, bufferlist& val, bool must_exist) { int ret = 0; DBOpParams params = {}; ret = get_object_impl(dpp, params); if (ret) { ldpp_dout(dpp, 0) <<"get_object_impl failed err:(" <<ret<<")" << dendl; goto out; } params.op.obj.omap[key] = val; params.op.query_str = "omap"; params.op.obj.state.mtime = real_clock::now(); ret = store->ProcessOp(dpp, "UpdateObject", &params); if (ret) { ldpp_dout(dpp, 0)<<"In UpdateObject failed err:(" <<ret<<") " << dendl; goto out; } out: return ret; } int DB::Object::obj_omap_get_vals_by_keys(const DoutPrefixProvider *dpp, const std::string& oid, const std::set<std::string>& keys, std::map<std::string, bufferlist>* vals) { int ret = 0; DBOpParams params = {}; std::map<std::string, bufferlist> omap; if (!vals) return -1; ret = get_object_impl(dpp, params); if (ret) { ldpp_dout(dpp, 0) <<"get_object_impl failed err:(" <<ret<<")" << dendl; goto out; } omap = params.op.obj.omap; for (const auto& k : keys) { (*vals)[k] = omap[k]; } out: return ret; } int DB::Object::add_mp_part(const DoutPrefixProvider *dpp, RGWUploadPartInfo info) { int ret = 0; DBOpParams params = {}; ret = get_object_impl(dpp, params); if (ret) { ldpp_dout(dpp, 0) <<"get_object_impl failed err:(" <<ret<<")" << dendl; goto out; } params.op.obj.mp_parts.push_back(info); params.op.query_str = "mp"; params.op.obj.state.mtime = real_clock::now(); ret = store->ProcessOp(dpp, "UpdateObject", &params); if (ret) { ldpp_dout(dpp, 0)<<"In UpdateObject failed err:(" <<ret<<") " << dendl; goto out; } out: return ret; } int DB::Object::get_mp_parts_list(const DoutPrefixProvider *dpp, std::list<RGWUploadPartInfo>& info) { int ret = 0; DBOpParams params = {}; std::map<std::string, bufferlist> omap; ret = get_object_impl(dpp, params); if (ret) { ldpp_dout(dpp, 0) <<"get_object_impl failed err:(" <<ret<<")" << dendl; goto out; } info = params.op.obj.mp_parts; out: return ret; } /* Taken from rgw_rados.cc */ void DB::gen_rand_obj_instance_name(rgw_obj_key *target_key) { #define OBJ_INSTANCE_LEN 32 char buf[OBJ_INSTANCE_LEN + 1]; gen_rand_alphanumeric_no_underscore(cct, buf, OBJ_INSTANCE_LEN); /* don't want it to get url escaped, no underscore for instance name due to the way we encode the raw keys */ target_key->set_instance(buf); } int DB::Object::obj_omap_get_all(const DoutPrefixProvider *dpp, std::map<std::string, bufferlist> *m) { int ret = 0; DBOpParams params = {}; std::map<std::string, bufferlist> omap; if (!m) return -1; ret = get_object_impl(dpp, params); if (ret) { ldpp_dout(dpp, 0) <<"get_object_impl failed err:(" <<ret<<")" << dendl; goto out; } (*m) = params.op.obj.omap; out: return ret; } int DB::Object::obj_omap_get_vals(const DoutPrefixProvider *dpp, const std::string& marker, uint64_t max_count, std::map<std::string, bufferlist> *m, bool* pmore) { int ret = 0; DBOpParams params = {}; std::map<std::string, bufferlist> omap; map<string, bufferlist>::iterator iter; uint64_t count = 0; if (!m) return -1; ret = get_object_impl(dpp, params); if (ret) { ldpp_dout(dpp, 0) <<"get_object_impl failed err:(" <<ret<<")" << dendl; goto out; } omap = params.op.obj.omap; for (iter = omap.begin(); iter != omap.end(); ++iter) { if (iter->first < marker) continue; if ((++count) > max_count) { *pmore = true; break; } (*m)[iter->first] = iter->second; } out: return ret; } int DB::Object::set_attrs(const DoutPrefixProvider *dpp, map<string, bufferlist>& setattrs, map<string, bufferlist>* rmattrs) { int ret = 0; DBOpParams params = {}; rgw::sal::Attrs *attrs; map<string, bufferlist>::iterator iter; RGWObjState* state; store->InitializeParams(dpp, &params); InitializeParamsfromObject(dpp, &params); ret = get_state(dpp, &state, true); if (ret && !state->exists) { ldpp_dout(dpp, 0) <<"get_state failed err:(" <<ret<<")" << dendl; goto out; } /* For now lets keep it simple..rmattrs & setattrs .. * XXX: Check rgw_rados::set_attrs */ params.op.obj.state = *state; attrs = &params.op.obj.state.attrset; if (rmattrs) { for (iter = rmattrs->begin(); iter != rmattrs->end(); ++iter) { (*attrs).erase(iter->first); } } for (iter = setattrs.begin(); iter != setattrs.end(); ++iter) { (*attrs)[iter->first] = iter->second; } params.op.query_str = "attrs"; /* As per https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingMetadata.html, * the only way for users to modify object metadata is to make a copy of the object and * set the metadata. * Hence do not update mtime for any other attr changes */ ret = store->ProcessOp(dpp, "UpdateObject", &params); if (ret) { ldpp_dout(dpp, 0)<<"In UpdateObject failed err:(" <<ret<<") " << dendl; goto out; } out: return ret; } int DB::Object::transition(const DoutPrefixProvider *dpp, const rgw_placement_rule& rule, const real_time& mtime, uint64_t olh_epoch) { int ret = 0; DBOpParams params = {}; map<string, bufferlist> *attrset; store->InitializeParams(dpp, &params); InitializeParamsfromObject(dpp, &params); ret = store->ProcessOp(dpp, "GetObject", &params); if (ret) { ldpp_dout(dpp, 0) <<"In GetObject failed err:(" <<ret<<")" << dendl; goto out; } /* pick one field check if object exists */ if (!params.op.obj.state.exists) { ldpp_dout(dpp, 0)<<"Object(bucket:" << bucket_info.bucket.name << ", Object:"<< obj.key.name << ") doesn't exist" << dendl; return -1; } params.op.query_str = "meta"; params.op.obj.state.mtime = real_clock::now(); params.op.obj.storage_class = rule.storage_class; attrset = &params.op.obj.state.attrset; if (!rule.storage_class.empty()) { bufferlist bl; bl.append(rule.storage_class); (*attrset)[RGW_ATTR_STORAGE_CLASS] = bl; } params.op.obj.versioned_epoch = olh_epoch; // XXX: not sure if needed /* Unlike Rados, in dbstore for now, both head and tail objects * refer to same storage class */ params.op.obj.head_placement_rule = rule; params.op.obj.tail_placement.placement_rule = rule; ret = store->ProcessOp(dpp, "UpdateObject", &params); if (ret) { ldpp_dout(dpp, 0)<<"In UpdateObject failed err:(" <<ret<<") " << dendl; goto out; } out: return ret; } int DB::raw_obj::read(const DoutPrefixProvider *dpp, int64_t ofs, uint64_t len, bufferlist& bl) { int ret = 0; DBOpParams params = {}; db->InitializeParams(dpp, &params); InitializeParamsfromRawObj(dpp, &params); ret = db->ProcessOp(dpp, "GetObjectData", &params); if (ret) { ldpp_dout(dpp, 0)<<"In GetObjectData failed err:(" <<ret<<")" << dendl; return ret; } /* Verify if its valid obj */ if (!params.op.obj_data.size) { ret = -ENOENT; ldpp_dout(dpp, 0)<<"In GetObjectData failed err:(" <<ret<<")" << dendl; return ret; } bufferlist& read_bl = params.op.obj_data.data; unsigned copy_len; copy_len = std::min((uint64_t)read_bl.length() - ofs, len); read_bl.begin(ofs).copy(copy_len, bl); return bl.length(); } int DB::raw_obj::write(const DoutPrefixProvider *dpp, int64_t ofs, int64_t write_ofs, uint64_t len, bufferlist& bl) { int ret = 0; DBOpParams params = {}; db->InitializeParams(dpp, &params); InitializeParamsfromRawObj(dpp, &params); /* XXX: Check for chunk_size ?? */ params.op.obj_data.offset = ofs; unsigned write_len = std::min((uint64_t)bl.length() - write_ofs, len); bl.begin(write_ofs).copy(write_len, params.op.obj_data.data); params.op.obj_data.size = params.op.obj_data.data.length(); params.op.obj.state.mtime = real_clock::now(); ret = db->ProcessOp(dpp, "PutObjectData", &params); if (ret) { ldpp_dout(dpp, 0)<<"In PutObjectData failed err:(" <<ret<<")" << dendl; return ret; } return write_len; } int DB::Object::list_versioned_objects(const DoutPrefixProvider *dpp, std::list<rgw_bucket_dir_entry>& list_entries) { int ret = 0; store = get_store(); DBOpParams db_params = {}; store->InitializeParams(dpp, &db_params); InitializeParamsfromObject(dpp, &db_params); db_params.op.list_max_count = MAX_VERSIONED_OBJECTS; ret = store->ProcessOp(dpp, "ListVersionedObjects", &db_params); if (ret) { ldpp_dout(dpp, 0)<<"In ListVersionedObjects failed err:(" <<ret<<") " << dendl; } else { list_entries = db_params.op.obj.list_entries; } return ret; } int DB::Object::get_obj_state(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, const rgw_obj& obj, bool follow_olh, RGWObjState** state) { int ret = 0; DBOpParams params = {}; RGWObjState* s; if (!obj.key.instance.empty()) { /* Versionid provided. Fetch the object */ ret = get_object_impl(dpp, params); if (ret && ret != -ENOENT) { ldpp_dout(dpp, 0) <<"get_object_impl failed err:(" <<ret<<")" << dendl; goto out; } } else { /* Instance is empty. May or may not be versioned object. * List all the versions and read the most recent entry */ ret = list_versioned_objects(dpp, params.op.obj.list_entries); if (params.op.obj.list_entries.size() != 0) { /* Ensure its not a delete marker */ auto& ent = params.op.obj.list_entries.front(); if (ent.flags & rgw_bucket_dir_entry::FLAG_DELETE_MARKER) { ret = -ENOENT; goto out; } store->InitializeParams(dpp, &params); InitializeParamsfromObject(dpp, &params); params.op.obj.state.obj.key = ent.key; ret = get_object_impl(dpp, params); if (ret) { ldpp_dout(dpp, 0) <<"get_object_impl of versioned object failed err:(" <<ret<<")" << dendl; goto out; } } else { ret = -ENOENT; return ret; } } s = &params.op.obj.state; /* XXX: For now use state->shadow_obj to store ObjectID string */ s->shadow_obj = params.op.obj.obj_id; *state = &obj_state; **state = *s; out: return ret; } int DB::Object::get_state(const DoutPrefixProvider *dpp, RGWObjState** pstate, bool follow_olh) { return get_obj_state(dpp, bucket_info, obj, follow_olh, pstate); } int DB::Object::Read::get_attr(const DoutPrefixProvider *dpp, const char *name, bufferlist& dest) { RGWObjState* state; int r = source->get_state(dpp, &state, true); if (r < 0) return r; if (!state->exists) return -ENOENT; if (!state->get_attr(name, dest)) return -ENODATA; return 0; } int DB::Object::Read::prepare(const DoutPrefixProvider *dpp) { DB *store = source->get_store(); CephContext *cct = store->ctx(); bufferlist etag; map<string, bufferlist>::iterator iter; RGWObjState* astate; int r = source->get_state(dpp, &astate, true); if (r < 0) return r; if (!astate->exists) { return -ENOENT; } state.obj = astate->obj; source->obj_id = astate->shadow_obj; if (params.target_obj) { *params.target_obj = state.obj; } if (params.attrs) { *params.attrs = astate->attrset; if (cct->_conf->subsys.should_gather<ceph_subsys_rgw, 20>()) { for (iter = params.attrs->begin(); iter != params.attrs->end(); ++iter) { ldpp_dout(dpp, 20) << "Read xattr rgw_rados: " << iter->first << dendl; } } } if (conds.if_match || conds.if_nomatch) { r = get_attr(dpp, RGW_ATTR_ETAG, etag); if (r < 0) return r; if (conds.if_match) { string if_match_str = rgw_string_unquote(conds.if_match); ldpp_dout(dpp, 10) << "ETag: " << string(etag.c_str(), etag.length()) << " " << " If-Match: " << if_match_str << dendl; if (if_match_str.compare(0, etag.length(), etag.c_str(), etag.length()) != 0) { return -ERR_PRECONDITION_FAILED; } } if (conds.if_nomatch) { string if_nomatch_str = rgw_string_unquote(conds.if_nomatch); ldpp_dout(dpp, 10) << "ETag: " << string(etag.c_str(), etag.length()) << " " << " If-NoMatch: " << if_nomatch_str << dendl; if (if_nomatch_str.compare(0, etag.length(), etag.c_str(), etag.length()) == 0) { return -ERR_NOT_MODIFIED; } } } if (params.obj_size) *params.obj_size = astate->size; if (params.lastmod) *params.lastmod = astate->mtime; return 0; } int DB::Object::Read::range_to_ofs(uint64_t obj_size, int64_t &ofs, int64_t &end) { if (ofs < 0) { ofs += obj_size; if (ofs < 0) ofs = 0; end = obj_size - 1; } else if (end < 0) { end = obj_size - 1; } if (obj_size > 0) { if (ofs >= (off_t)obj_size) { return -ERANGE; } if (end >= (off_t)obj_size) { end = obj_size - 1; } } return 0; } int DB::Object::Read::read(int64_t ofs, int64_t end, bufferlist& bl, const DoutPrefixProvider *dpp) { DB *store = source->get_store(); uint64_t read_ofs = ofs; uint64_t len, read_len; bufferlist read_bl; uint64_t max_chunk_size = store->get_max_chunk_size(); RGWObjState* astate; int r = source->get_state(dpp, &astate, true); if (r < 0) return r; if (!astate || !astate->exists) { return -ENOENT; } if (astate->size == 0) { end = 0; } else if (end >= (int64_t)astate->size) { end = astate->size - 1; } if (end < 0) len = 0; else len = end - ofs + 1; if (len > max_chunk_size) { len = max_chunk_size; } int head_data_size = astate->data.length(); bool reading_from_head = (ofs < head_data_size); if (reading_from_head) { if (!ofs && astate->data.length() >= len) { bl = astate->data; return bl.length(); } if (ofs < astate->data.length()) { unsigned copy_len = std::min((uint64_t)head_data_size - ofs, len); astate->data.begin(ofs).copy(copy_len, bl); return bl.length(); } } /* tail object */ int part_num = (ofs / max_chunk_size); /* XXX: Handle multipart_str */ raw_obj read_obj(store, source->get_bucket_info().bucket.name, astate->obj.key.name, astate->obj.key.instance, astate->obj.key.ns, source->obj_id, "0.0", part_num); read_len = len; ldpp_dout(dpp, 20) << "dbstore->read obj-ofs=" << ofs << " read_ofs=" << read_ofs << " read_len=" << read_len << dendl; // read from non head object r = read_obj.read(dpp, read_ofs, read_len, bl); if (r < 0) { return r; } return bl.length(); } static int _get_obj_iterate_cb(const DoutPrefixProvider *dpp, const DB::raw_obj& read_obj, off_t obj_ofs, off_t len, bool is_head_obj, RGWObjState* astate, void *arg) { struct db_get_obj_data* d = static_cast<struct db_get_obj_data*>(arg); return d->store->get_obj_iterate_cb(dpp, read_obj, obj_ofs, len, is_head_obj, astate, arg); } int DB::get_obj_iterate_cb(const DoutPrefixProvider *dpp, const raw_obj& read_obj, off_t obj_ofs, off_t len, bool is_head_obj, RGWObjState* astate, void *arg) { struct db_get_obj_data* d = static_cast<struct db_get_obj_data*>(arg); bufferlist bl; int r = 0; if (is_head_obj) { bl = astate->data; } else { // read from non head object raw_obj robj = read_obj; /* read entire data. So pass offset as '0' & len as '-1' */ r = robj.read(dpp, 0, -1, bl); if (r <= 0) { return r; } } unsigned read_ofs = 0, read_len = 0; while (read_ofs < bl.length()) { unsigned chunk_len = std::min((uint64_t)bl.length() - read_ofs, (uint64_t)len); r = d->client_cb->handle_data(bl, read_ofs, chunk_len); if (r < 0) return r; read_ofs += chunk_len; read_len += chunk_len; ldpp_dout(dpp, 20) << "dbstore->get_obj_iterate_cb obj-ofs=" << obj_ofs << " len=" << len << " chunk_len = " << chunk_len << " read_len = " << read_len << dendl; } d->offset += read_len; return read_len; } int DB::Object::Read::iterate(const DoutPrefixProvider *dpp, int64_t ofs, int64_t end, RGWGetDataCB *cb) { DB *store = source->get_store(); const uint64_t chunk_size = store->get_max_chunk_size(); db_get_obj_data data(store, cb, ofs); int r = source->iterate_obj(dpp, source->get_bucket_info(), state.obj, ofs, end, chunk_size, _get_obj_iterate_cb, &data); if (r < 0) { ldpp_dout(dpp, 0) << "iterate_obj() failed with " << r << dendl; return r; } return 0; } int DB::Object::iterate_obj(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, const rgw_obj& obj, off_t ofs, off_t end, uint64_t max_chunk_size, iterate_obj_cb cb, void *arg) { DB *store = get_store(); uint64_t len; RGWObjState* astate; int r = get_state(dpp, &astate, true); if (r < 0) { return r; } if (!astate->exists) { return -ENOENT; } if (end < 0) len = 0; else len = end - ofs + 1; /* XXX: Will it really help to store all parts info in astate like manifest in Rados? */ int part_num = 0; int head_data_size = astate->data.length(); while (ofs <= end && (uint64_t)ofs < astate->size) { part_num = (ofs / max_chunk_size); uint64_t read_len = std::min(len, max_chunk_size); /* XXX: Handle multipart_str */ raw_obj read_obj(store, get_bucket_info().bucket.name, astate->obj.key.name, astate->obj.key.instance, astate->obj.key.ns, obj_id, "0.0", part_num); bool reading_from_head = (ofs < head_data_size); r = cb(dpp, read_obj, ofs, read_len, reading_from_head, astate, arg); if (r <= 0) { return r; } /* r refers to chunk_len (no. of bytes) handled in cb */ len -= r; ofs += r; } return 0; } int DB::Object::Write::prepare(const DoutPrefixProvider* dpp) { DB *store = target->get_store(); int ret = -1; /* XXX: handle assume_noent */ obj_state.obj = target->obj; if (target->obj_id.empty()) { if (!target->obj.key.instance.empty() && (target->obj.key.instance != "null")) { /* versioned object. Set obj_id same as versionID/instance */ target->obj_id = target->obj.key.instance; } else { // generate obj_id char buf[33]; gen_rand_alphanumeric(store->ctx(), buf, sizeof(buf) - 1); target->obj_id = buf; } } ret = 0; return ret; } /* writes tail objects */ int DB::Object::Write::write_data(const DoutPrefixProvider* dpp, bufferlist& data, uint64_t ofs) { DB *store = target->get_store(); /* tail objects */ /* XXX: Split into parts each of max_chunk_size. But later make tail * object chunk size limit to sqlite blob limit */ int part_num = 0; uint64_t max_chunk_size = store->get_max_chunk_size(); /* tail_obj ofs should be greater than max_head_size */ if (mp_part_str == "0.0") { // ensure not multipart meta object if (ofs < store->get_max_head_size()) { return -1; } } uint64_t end = data.length(); uint64_t write_ofs = 0; /* as we are writing max_chunk_size at a time in sal_dbstore DBAtomicWriter::process(), * maybe this while loop is not needed */ while (write_ofs < end) { part_num = (ofs / max_chunk_size); uint64_t len = std::min(end, max_chunk_size); /* XXX: Handle multipart_str */ raw_obj write_obj(store, target->get_bucket_info().bucket.name, obj_state.obj.key.name, obj_state.obj.key.instance, obj_state.obj.key.ns, target->obj_id, mp_part_str, part_num); ldpp_dout(dpp, 20) << "dbstore->write obj-ofs=" << ofs << " write_len=" << len << dendl; // write into non head object int r = write_obj.write(dpp, ofs, write_ofs, len, data); if (r < 0) { return r; } /* r refers to chunk_len (no. of bytes) handled in raw_obj::write */ len -= r; ofs += r; write_ofs += r; } return 0; } /* Write metadata & head object data */ int DB::Object::Write::_do_write_meta(const DoutPrefixProvider *dpp, uint64_t size, uint64_t accounted_size, map<string, bufferlist>& attrs, bool assume_noent, bool modify_tail) { DB *store = target->get_store(); RGWObjState* state = &obj_state; map<string, bufferlist> *attrset; DBOpParams params = {}; int ret = 0; string etag; string content_type; bufferlist acl_bl; string storage_class; map<string, bufferlist>::iterator iter; store->InitializeParams(dpp, &params); target->InitializeParamsfromObject(dpp, &params); obj_state = params.op.obj.state; if (real_clock::is_zero(meta.set_mtime)) { meta.set_mtime = real_clock::now(); } attrset = &state->attrset; if (target->bucket_info.obj_lock_enabled() && target->bucket_info.obj_lock.has_rule()) { // && meta.flags == PUT_OBJ_CREATE) { auto iter = attrs.find(RGW_ATTR_OBJECT_RETENTION); if (iter == attrs.end()) { real_time lock_until_date = target->bucket_info.obj_lock.get_lock_until_date(meta.set_mtime); string mode = target->bucket_info.obj_lock.get_mode(); RGWObjectRetention obj_retention(mode, lock_until_date); bufferlist bl; obj_retention.encode(bl); (*attrset)[RGW_ATTR_OBJECT_RETENTION] = bl; } } state->mtime = meta.set_mtime; if (meta.data) { /* if we want to overwrite the data, we also want to overwrite the xattrs, so just remove the object */ params.op.obj.head_data = *meta.data; } if (meta.rmattrs) { for (iter = meta.rmattrs->begin(); iter != meta.rmattrs->end(); ++iter) { const string& name = iter->first; (*attrset).erase(name.c_str()); } } if (meta.manifest) { storage_class = meta.manifest->get_tail_placement().placement_rule.storage_class; /* remove existing manifest attr */ iter = attrs.find(RGW_ATTR_MANIFEST); if (iter != attrs.end()) attrs.erase(iter); bufferlist bl; encode(*meta.manifest, bl); (*attrset)[RGW_ATTR_MANIFEST] = bl; } for (iter = attrs.begin(); iter != attrs.end(); ++iter) { const string& name = iter->first; bufferlist& bl = iter->second; if (!bl.length()) continue; (*attrset)[name.c_str()] = bl; if (name.compare(RGW_ATTR_ETAG) == 0) { etag = rgw_bl_str(bl); params.op.obj.etag = etag; } else if (name.compare(RGW_ATTR_CONTENT_TYPE) == 0) { content_type = rgw_bl_str(bl); } else if (name.compare(RGW_ATTR_ACL) == 0) { acl_bl = bl; } } if (!storage_class.empty()) { bufferlist bl; bl.append(storage_class); (*attrset)[RGW_ATTR_STORAGE_CLASS] = bl; } params.op.obj.state = *state ; params.op.obj.state.exists = true; params.op.obj.state.size = size; params.op.obj.state.accounted_size = accounted_size; params.op.obj.owner = target->get_bucket_info().owner.id; params.op.obj.category = meta.category; if (meta.mtime) { *meta.mtime = meta.set_mtime; } params.op.query_str = "meta"; params.op.obj.obj_id = target->obj_id; /* Check if versioned */ bool is_versioned = !target->obj.key.instance.empty() && (target->obj.key.instance != "null"); params.op.obj.is_versioned = is_versioned; if (is_versioned && (params.op.obj.category == RGWObjCategory::Main)) { /* versioned object */ params.op.obj.flags |= rgw_bucket_dir_entry::FLAG_VER; } ret = store->ProcessOp(dpp, "PutObject", &params); if (ret) { ldpp_dout(dpp, 0)<<"In PutObject failed err:(" <<ret<<")" << dendl; goto out; } out: if (ret < 0) { ldpp_dout(dpp, 0) << "ERROR: do_write_meta returned ret=" << ret << dendl; } meta.canceled = true; return ret; } int DB::Object::Write::write_meta(const DoutPrefixProvider *dpp, uint64_t size, uint64_t accounted_size, map<string, bufferlist>& attrs) { bool assume_noent = false; /* handle assume_noent */ int r = _do_write_meta(dpp, size, accounted_size, attrs, assume_noent, meta.modify_tail); return r; } int DB::Object::Delete::delete_obj(const DoutPrefixProvider *dpp) { int ret = 0; DBOpParams del_params = {}; bool versioning_enabled = ((params.versioning_status & BUCKET_VERSIONED) == BUCKET_VERSIONED); bool versioning_suspended = ((params.versioning_status & BUCKET_VERSIONS_SUSPENDED) == BUCKET_VERSIONS_SUSPENDED); bool regular_obj = true; std::string versionid = target->obj.key.instance; ret = target->get_object_impl(dpp, del_params); if (ret < 0 && ret != -ENOENT) { ldpp_dout(dpp, 0)<<"GetObject during delete failed err:(" <<ret<<")" << dendl; return ret; } regular_obj = (del_params.op.obj.category == RGWObjCategory::Main); if (!ret) { if (!versionid.empty()) { // version-id is provided ret = delete_obj_impl(dpp, del_params); return ret; } else { // version-id is empty.. /* * case: bucket_versioned * create_delete_marker; * case: bucket_suspended * delete entry * create delete marker with version-id null; * default: * just delete the entry */ if (versioning_suspended && regular_obj) { ret = delete_obj_impl(dpp, del_params); ret = create_dm(dpp, del_params); } else if (versioning_enabled && regular_obj) { ret = create_dm(dpp, del_params); } else { ret = delete_obj_impl(dpp, del_params); } } } else { // ret == -ENOENT /* case: VersionID given * return -ENOENT * else: // may or may not be versioned object * Listversionedobjects * if (list_entries.empty()) { * nothing to do..return ENOENT * } else { * read top entry * if (top.flags | FLAG_DELETE_MARKER) { * // nothing to do * return -ENOENT; * } * if (bucket_versioned) { * // create delete marker with new version-id * } else if (bucket_suspended) { * // create delete marker with version-id null * } * bucket cannot be in unversioned state post having versions * } */ if (!versionid.empty()) { return -ENOENT; } ret = target->list_versioned_objects(dpp, del_params.op.obj.list_entries); if (ret) { ldpp_dout(dpp, 0)<<"ListVersionedObjects failed err:(" <<ret<<")" << dendl; return ret; } if (del_params.op.obj.list_entries.empty()) { return -ENOENT; } auto &ent = del_params.op.obj.list_entries.front(); if (ent.flags & rgw_bucket_dir_entry::FLAG_DELETE_MARKER) { // for now do not create another delete marker..just exit return 0; } ret = create_dm(dpp, del_params); } return ret; } int DB::Object::Delete::delete_obj_impl(const DoutPrefixProvider *dpp, DBOpParams& del_params) { int ret = 0; DB *store = target->get_store(); ret = store->ProcessOp(dpp, "DeleteObject", &del_params); if (ret) { ldpp_dout(dpp, 0) << "In DeleteObject failed err:(" <<ret<<")" << dendl; return ret; } /* Now that tail objects are associated with objectID, they are not deleted * as part of this DeleteObj operation. Such tail objects (with no head object * in *.object.table are cleaned up later by GC thread. * * To avoid races between writes/reads & GC delete, mtime is maintained for each * tail object. This mtime is updated when tail object is written and also when * its corresponding head object is deleted (like here in this case). */ DBOpParams update_params = del_params; update_params.op.obj.state.mtime = real_clock::now(); ret = store->ProcessOp(dpp, "UpdateObjectData", &update_params); if (ret) { ldpp_dout(dpp, 0) << "Updating tail objects mtime failed err:(" <<ret<<")" << dendl; } return ret; } /* * a) if no versionID specified, * - create a delete marker with * - new version/instanceID (if bucket versioned) * - null versionID (if versioning suspended) */ int DB::Object::Delete::create_dm(const DoutPrefixProvider *dpp, DBOpParams& del_params) { DB *store = target->get_store(); bool versioning_suspended = ((params.versioning_status & BUCKET_VERSIONS_SUSPENDED) == BUCKET_VERSIONS_SUSPENDED); int ret = -1; DBOpParams olh_params = {}; std::string version_id; DBOpParams next_params = del_params; version_id = del_params.op.obj.state.obj.key.instance; DBOpParams dm_params = del_params; // create delete marker store->InitializeParams(dpp, &dm_params); target->InitializeParamsfromObject(dpp, &dm_params); dm_params.op.obj.category = RGWObjCategory::None; if (versioning_suspended) { dm_params.op.obj.state.obj.key.instance = "null"; } else { store->gen_rand_obj_instance_name(&dm_params.op.obj.state.obj.key); dm_params.op.obj.obj_id = dm_params.op.obj.state.obj.key.instance; } dm_params.op.obj.flags |= (rgw_bucket_dir_entry::FLAG_DELETE_MARKER); ret = store->ProcessOp(dpp, "PutObject", &dm_params); if (ret) { ldpp_dout(dpp, 0) << "delete_olh: failed to create delete marker - err:(" <<ret<<")" << dendl; return ret; } result.delete_marker = true; result.version_id = dm_params.op.obj.state.obj.key.instance; return ret; } int DB::get_entry(const std::string& oid, const std::string& marker, std::unique_ptr<rgw::sal::Lifecycle::LCEntry>* entry) { int ret = 0; const DoutPrefixProvider *dpp = get_def_dpp(); DBOpParams params = {}; InitializeParams(dpp, &params); params.op.lc_entry.index = oid; params.op.lc_entry.entry.set_bucket(marker); params.op.query_str = "get_entry"; ret = ProcessOp(dpp, "GetLCEntry", &params); if (ret) { ldpp_dout(dpp, 0)<<"In GetLCEntry failed err:(" <<ret<<") " << dendl; goto out; } if (!params.op.lc_entry.entry.get_start_time() == 0) { //ensure entry found rgw::sal::Lifecycle::LCEntry* e; e = new rgw::sal::StoreLifecycle::StoreLCEntry(params.op.lc_entry.entry); if (!e) { ret = -ENOMEM; goto out; } entry->reset(e); } out: return ret; } int DB::get_next_entry(const std::string& oid, const std::string& marker, std::unique_ptr<rgw::sal::Lifecycle::LCEntry>* entry) { int ret = 0; const DoutPrefixProvider *dpp = get_def_dpp(); DBOpParams params = {}; InitializeParams(dpp, &params); params.op.lc_entry.index = oid; params.op.lc_entry.entry.set_bucket(marker); params.op.query_str = "get_next_entry"; ret = ProcessOp(dpp, "GetLCEntry", &params); if (ret) { ldpp_dout(dpp, 0)<<"In GetLCEntry failed err:(" <<ret<<") " << dendl; goto out; } if (!params.op.lc_entry.entry.get_start_time() == 0) { //ensure entry found rgw::sal::Lifecycle::LCEntry* e; e = new rgw::sal::StoreLifecycle::StoreLCEntry(params.op.lc_entry.entry); if (!e) { ret = -ENOMEM; goto out; } entry->reset(e); } out: return ret; } int DB::set_entry(const std::string& oid, rgw::sal::Lifecycle::LCEntry& entry) { int ret = 0; const DoutPrefixProvider *dpp = get_def_dpp(); DBOpParams params = {}; InitializeParams(dpp, &params); params.op.lc_entry.index = oid; params.op.lc_entry.entry = entry; ret = ProcessOp(dpp, "InsertLCEntry", &params); if (ret) { ldpp_dout(dpp, 0)<<"In InsertLCEntry failed err:(" <<ret<<") " << dendl; goto out; } out: return ret; } int DB::list_entries(const std::string& oid, const std::string& marker, uint32_t max_entries, std::vector<std::unique_ptr<rgw::sal::Lifecycle::LCEntry>>& entries) { int ret = 0; const DoutPrefixProvider *dpp = get_def_dpp(); entries.clear(); DBOpParams params = {}; InitializeParams(dpp, &params); params.op.lc_entry.index = oid; params.op.lc_entry.min_marker = marker; params.op.list_max_count = max_entries; ret = ProcessOp(dpp, "ListLCEntries", &params); if (ret) { ldpp_dout(dpp, 0)<<"In ListLCEntries failed err:(" <<ret<<") " << dendl; goto out; } for (auto& entry : params.op.lc_entry.list_entries) { entries.push_back(std::make_unique<rgw::sal::StoreLifecycle::StoreLCEntry>(std::move(entry))); } out: return ret; } int DB::rm_entry(const std::string& oid, rgw::sal::Lifecycle::LCEntry& entry) { int ret = 0; const DoutPrefixProvider *dpp = get_def_dpp(); DBOpParams params = {}; InitializeParams(dpp, &params); params.op.lc_entry.index = oid; params.op.lc_entry.entry = entry; ret = ProcessOp(dpp, "RemoveLCEntry", &params); if (ret) { ldpp_dout(dpp, 0)<<"In RemoveLCEntry failed err:(" <<ret<<") " << dendl; goto out; } out: return ret; } int DB::get_head(const std::string& oid, std::unique_ptr<rgw::sal::Lifecycle::LCHead>* head) { int ret = 0; const DoutPrefixProvider *dpp = get_def_dpp(); DBOpParams params = {}; InitializeParams(dpp, &params); params.op.lc_head.index = oid; ret = ProcessOp(dpp, "GetLCHead", &params); if (ret) { ldpp_dout(dpp, 0)<<"In GetLCHead failed err:(" <<ret<<") " << dendl; goto out; } *head = std::make_unique<rgw::sal::StoreLifecycle::StoreLCHead>(params.op.lc_head.head); out: return ret; } int DB::put_head(const std::string& oid, rgw::sal::Lifecycle::LCHead& head) { int ret = 0; const DoutPrefixProvider *dpp = get_def_dpp(); DBOpParams params = {}; InitializeParams(dpp, &params); params.op.lc_head.index = oid; params.op.lc_head.head = head; ret = ProcessOp(dpp, "InsertLCHead", &params); if (ret) { ldpp_dout(dpp, 0)<<"In InsertLCHead failed err:(" <<ret<<") " << dendl; goto out; } out: return ret; } int DB::delete_stale_objs(const DoutPrefixProvider *dpp, const std::string& bucket, uint32_t min_wait) { DBOpParams params = {}; int ret = -1; params.op.bucket.info.bucket.name = bucket; /* Verify if bucket exists. * XXX: This is needed for now to create objectmap of bucket * in SQLGetBucket */ InitializeParams(dpp, &params); ret = ProcessOp(dpp, "GetBucket", &params); if (ret) { ldpp_dout(dpp, 0) << "In GetBucket failed err:(" <<ret<<")" << dendl; return ret; } ldpp_dout(dpp, 20) << " Deleting stale_objs of bucket( " << bucket <<")" << dendl; /* XXX: handle reads racing with delete here. Simple approach is maybe * to use locks or sqlite transactions. */ InitializeParams(dpp, &params); params.op.obj.state.mtime = (real_clock::now() - make_timespan(min_wait)); ret = ProcessOp(dpp, "DeleteStaleObjectData", &params); if (ret) { ldpp_dout(dpp, 0) << "In DeleteStaleObjectData failed err:(" <<ret<<")" << dendl; } return ret; } void *DB::GC::entry() { do { std::unique_lock<std::mutex> lk(mtx); ldpp_dout(dpp, 2) << " DB GC started " << dendl; int max = 100; RGWUserBuckets buckets; bool is_truncated = false; do { std::string& marker = bucket_marker; rgw_user user; user.id = user_marker; buckets.clear(); is_truncated = false; int r = db->list_buckets(dpp, "all", user, marker, string(), max, false, &buckets, &is_truncated); if (r < 0) { //do nothing? retry later ? break; } for (const auto& ent : buckets.get_buckets()) { const std::string &bname = ent.first; r = db->delete_stale_objs(dpp, bname, gc_obj_min_wait); if (r < 0) { //do nothing? skip to next entry? ldpp_dout(dpp, 2) << " delete_stale_objs failed for bucket( " << bname <<")" << dendl; } bucket_marker = bname; user_marker = user.id; /* XXX: If using locks, unlock here and reacquire in the next iteration */ cv.wait_for(lk, std::chrono::milliseconds(100)); if (stop_signalled) { goto done; } } } while(is_truncated); bucket_marker.clear(); cv.wait_for(lk, std::chrono::milliseconds(gc_interval*10)); } while(! stop_signalled); done: return nullptr; } } } // namespace rgw::store
60,134
25.714793
167
cc
null
ceph-main/src/rgw/driver/dbstore/common/dbstore.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #pragma once #include <errno.h> #include <stdlib.h> #include <string> #include <stdio.h> #include <iostream> #include <mutex> #include <condition_variable> #include "fmt/format.h" #include <map> #include "rgw_sal_store.h" #include "rgw_common.h" #include "driver/rados/rgw_bucket.h" #include "global/global_context.h" #include "global/global_init.h" #include "common/ceph_context.h" #include "rgw_obj_manifest.h" #include "rgw_multi.h" namespace rgw { namespace store { class DB; struct DBOpUserInfo { RGWUserInfo uinfo = {}; obj_version user_version; rgw::sal::Attrs user_attrs; }; struct DBOpBucketInfo { RGWBucketEnt ent; // maybe not needed. not used in create/get_bucket RGWBucketInfo info; RGWUser* owner = nullptr; rgw::sal::Attrs bucket_attrs; obj_version bucket_version; ceph::real_time mtime; // used for list query std::string min_marker; std::string max_marker; std::list<RGWBucketEnt> list_entries; }; struct DBOpObjectInfo { RGWAccessControlPolicy acls; RGWObjState state = {}; /* Below are taken from rgw_bucket_dir_entry */ RGWObjCategory category; std::string etag; std::string owner; std::string owner_display_name; std::string content_type; std::string storage_class; bool appendable; uint64_t index_ver; std::string tag; uint16_t flags; uint64_t versioned_epoch; /* from state.manifest (RGWObjManifest) */ std::map<uint64_t, RGWObjManifestPart> objs; uint64_t head_size{0}; rgw_placement_rule head_placement_rule; uint64_t max_head_size{0}; std::string obj_id; rgw_bucket_placement tail_placement; /* might be different than the original bucket, as object might have been copied across pools */ std::map<uint64_t, RGWObjManifestRule> rules; std::string tail_instance; /* tail object's instance */ /* Obj's omap <key,value> store */ std::map<std::string, bufferlist> omap; /* Extra fields */ bool is_multipart; std::list<RGWUploadPartInfo> mp_parts; bufferlist head_data; std::string min_marker; std::string max_marker; std::string prefix; std::list<rgw_bucket_dir_entry> list_entries; /* XXX: Maybe use std::vector instead of std::list */ /* for versioned objects */ bool is_versioned; uint64_t version_num = 0; }; struct DBOpObjectDataInfo { RGWObjState state; uint64_t part_num; std::string multipart_part_str; uint64_t offset; uint64_t size; bufferlist data{}; }; struct DBOpLCHeadInfo { std::string index; rgw::sal::StoreLifecycle::StoreLCHead head; }; struct DBOpLCEntryInfo { std::string index; rgw::sal::StoreLifecycle::StoreLCEntry entry; // used for list query std::string min_marker; std::list<rgw::sal::StoreLifecycle::StoreLCEntry> list_entries; }; struct DBOpInfo { std::string name; // Op name /* Support only single access_key for now. So store * it separately as primary access_key_id & secret to * be able to query easily. * * XXX: Swift keys and subuser not supported for now */ DBOpUserInfo user; std::string query_str; DBOpBucketInfo bucket; DBOpObjectInfo obj; DBOpObjectDataInfo obj_data; DBOpLCHeadInfo lc_head; DBOpLCEntryInfo lc_entry; uint64_t list_max_count; }; struct DBOpParams { CephContext *cct; /* Tables */ std::string user_table; std::string bucket_table; std::string object_table; /* Ops*/ DBOpInfo op; std::string objectdata_table; std::string object_trigger; std::string object_view; std::string quota_table; std::string lc_head_table; std::string lc_entry_table; std::string obj; }; /* Used for prepared schemas. * Difference with above structure is that all * the fields are strings here to accommodate any * style identifiers used by backend db. By default * initialized with sqlitedb style, can be overriden * using InitPrepareParams() * * These identifiers are used in prepare and bind statements * to get the right index of each param. */ struct DBOpUserPrepareInfo { static constexpr const char* user_id = ":user_id"; static constexpr const char* tenant = ":tenant"; static constexpr const char* ns = ":ns"; static constexpr const char* display_name = ":display_name"; static constexpr const char* user_email = ":user_email"; /* Support only single access_key for now. So store * it separately as primary access_key_id & secret to * be able to query easily. * * In future, when need to support & query from multiple * access keys, better to maintain them in a separate table. */ static constexpr const char* access_keys_id = ":access_keys_id"; static constexpr const char* access_keys_secret = ":access_keys_secret"; static constexpr const char* access_keys = ":access_keys"; static constexpr const char* swift_keys = ":swift_keys"; static constexpr const char* subusers = ":subusers"; static constexpr const char* suspended = ":suspended"; static constexpr const char* max_buckets = ":max_buckets"; static constexpr const char* op_mask = ":op_mask"; static constexpr const char* user_caps = ":user_caps"; static constexpr const char* admin = ":admin"; static constexpr const char* system = ":system"; static constexpr const char* placement_name = ":placement_name"; static constexpr const char* placement_storage_class = ":placement_storage_class"; static constexpr const char* placement_tags = ":placement_tags"; static constexpr const char* bucket_quota = ":bucket_quota"; static constexpr const char* temp_url_keys = ":temp_url_keys"; static constexpr const char* user_quota = ":user_quota"; static constexpr const char* type = ":type"; static constexpr const char* mfa_ids = ":mfa_ids"; static constexpr const char* user_attrs = ":user_attrs"; static constexpr const char* user_ver = ":user_vers"; static constexpr const char* user_ver_tag = ":user_ver_tag"; }; struct DBOpBucketPrepareInfo { static constexpr const char* bucket_name = ":bucket_name"; static constexpr const char* tenant = ":tenant"; static constexpr const char* marker = ":marker"; static constexpr const char* bucket_id = ":bucket_id"; static constexpr const char* size = ":size"; static constexpr const char* size_rounded = ":size_rounded"; static constexpr const char* creation_time = ":creation_time"; static constexpr const char* count = ":count"; static constexpr const char* placement_name = ":placement_name"; static constexpr const char* placement_storage_class = ":placement_storage_class"; /* ownerid - maps to DBOpUserPrepareInfo */ static constexpr const char* flags = ":flags"; static constexpr const char* zonegroup = ":zonegroup"; static constexpr const char* has_instance_obj = ":has_instance_obj"; static constexpr const char* quota = ":quota"; static constexpr const char* requester_pays = ":requester_pays"; static constexpr const char* has_website = ":has_website"; static constexpr const char* website_conf = ":website_conf"; static constexpr const char* swift_versioning = ":swift_versioning"; static constexpr const char* swift_ver_location = ":swift_ver_location"; static constexpr const char* mdsearch_config = ":mdsearch_config"; static constexpr const char* new_bucket_instance_id = ":new_bucket_instance_id"; static constexpr const char* obj_lock = ":obj_lock"; static constexpr const char* sync_policy_info_groups = ":sync_policy_info_groups"; static constexpr const char* bucket_attrs = ":bucket_attrs"; static constexpr const char* bucket_ver = ":bucket_vers"; static constexpr const char* bucket_ver_tag = ":bucket_ver_tag"; static constexpr const char* mtime = ":mtime"; static constexpr const char* min_marker = ":min_marker"; static constexpr const char* max_marker = ":max_marker"; }; struct DBOpObjectPrepareInfo { static constexpr const char* obj_name = ":obj_name"; static constexpr const char* obj_instance = ":obj_instance"; static constexpr const char* obj_ns = ":obj_ns"; static constexpr const char* acls = ":acls"; static constexpr const char* index_ver = ":index_ver"; static constexpr const char* tag = ":tag"; static constexpr const char* flags = ":flags"; static constexpr const char* versioned_epoch = ":versioned_epoch"; static constexpr const char* obj_category = ":obj_category"; static constexpr const char* etag = ":etag"; static constexpr const char* owner = ":owner"; static constexpr const char* owner_display_name = ":owner_display_name"; static constexpr const char* storage_class = ":storage_class"; static constexpr const char* appendable = ":appendable"; static constexpr const char* content_type = ":content_type"; static constexpr const char* index_hash_source = ":index_hash_source"; static constexpr const char* obj_size = ":obj_size"; static constexpr const char* accounted_size = ":accounted_size"; static constexpr const char* mtime = ":mtime"; static constexpr const char* epoch = ":epoch"; static constexpr const char* obj_tag = ":obj_tag"; static constexpr const char* tail_tag = ":tail_tag"; static constexpr const char* write_tag = ":write_tag"; static constexpr const char* fake_tag = ":fake_tag"; static constexpr const char* shadow_obj = ":shadow_obj"; static constexpr const char* has_data = ":has_data"; static constexpr const char* is_versioned = ":is_versioned"; static constexpr const char* version_num = ":version_num"; static constexpr const char* pg_ver = ":pg_ver"; static constexpr const char* zone_short_id = ":zone_short_id"; static constexpr const char* obj_version = ":obj_version"; static constexpr const char* obj_version_tag = ":obj_version_tag"; static constexpr const char* obj_attrs = ":obj_attrs"; static constexpr const char* head_size = ":head_size"; static constexpr const char* max_head_size = ":max_head_size"; static constexpr const char* obj_id = ":obj_id"; static constexpr const char* tail_instance = ":tail_instance"; static constexpr const char* head_placement_rule_name = ":head_placement_rule_name"; static constexpr const char* head_placement_storage_class = ":head_placement_storage_class"; static constexpr const char* tail_placement_rule_name = ":tail_placement_rule_name"; static constexpr const char* tail_placement_storage_class = ":tail_placement_storage_class"; static constexpr const char* manifest_part_objs = ":manifest_part_objs"; static constexpr const char* manifest_part_rules = ":manifest_part_rules"; static constexpr const char* omap = ":omap"; static constexpr const char* is_multipart = ":is_multipart"; static constexpr const char* mp_parts = ":mp_parts"; static constexpr const char* head_data = ":head_data"; static constexpr const char* min_marker = ":min_marker"; static constexpr const char* max_marker = ":max_marker"; static constexpr const char* prefix = ":prefix"; /* Below used to update mp_parts obj name * from meta object to src object on completion */ static constexpr const char* new_obj_name = ":new_obj_name"; static constexpr const char* new_obj_instance = ":new_obj_instance"; static constexpr const char* new_obj_ns = ":new_obj_ns"; }; struct DBOpObjectDataPrepareInfo { static constexpr const char* part_num = ":part_num"; static constexpr const char* offset = ":offset"; static constexpr const char* data = ":data"; static constexpr const char* size = ":size"; static constexpr const char* multipart_part_str = ":multipart_part_str"; }; struct DBOpLCEntryPrepareInfo { static constexpr const char* index = ":index"; static constexpr const char* bucket_name = ":bucket_name"; static constexpr const char* start_time = ":start_time"; static constexpr const char* status = ":status"; static constexpr const char* min_marker = ":min_marker"; }; struct DBOpLCHeadPrepareInfo { static constexpr const char* index = ":index"; static constexpr const char* start_date = ":start_date"; static constexpr const char* marker = ":marker"; }; struct DBOpPrepareInfo { DBOpUserPrepareInfo user; std::string_view query_str; // view into DBOpInfo::query_str DBOpBucketPrepareInfo bucket; DBOpObjectPrepareInfo obj; DBOpObjectDataPrepareInfo obj_data; DBOpLCHeadPrepareInfo lc_head; DBOpLCEntryPrepareInfo lc_entry; static constexpr const char* list_max_count = ":list_max_count"; }; struct DBOpPrepareParams { /* Tables */ std::string user_table; std::string bucket_table; std::string object_table; /* Ops */ DBOpPrepareInfo op; std::string objectdata_table; std::string object_trigger; std::string object_view; std::string quota_table; std::string lc_head_table; std::string lc_entry_table; }; struct DBOps { std::shared_ptr<class InsertUserOp> InsertUser; std::shared_ptr<class RemoveUserOp> RemoveUser; std::shared_ptr<class GetUserOp> GetUser; std::shared_ptr<class InsertBucketOp> InsertBucket; std::shared_ptr<class UpdateBucketOp> UpdateBucket; std::shared_ptr<class RemoveBucketOp> RemoveBucket; std::shared_ptr<class GetBucketOp> GetBucket; std::shared_ptr<class ListUserBucketsOp> ListUserBuckets; std::shared_ptr<class InsertLCEntryOp> InsertLCEntry; std::shared_ptr<class RemoveLCEntryOp> RemoveLCEntry; std::shared_ptr<class GetLCEntryOp> GetLCEntry; std::shared_ptr<class ListLCEntriesOp> ListLCEntries; std::shared_ptr<class InsertLCHeadOp> InsertLCHead; std::shared_ptr<class RemoveLCHeadOp> RemoveLCHead; std::shared_ptr<class GetLCHeadOp> GetLCHead; }; class ObjectOp { public: ObjectOp() {}; virtual ~ObjectOp() {} std::shared_ptr<class PutObjectOp> PutObject; std::shared_ptr<class DeleteObjectOp> DeleteObject; std::shared_ptr<class GetObjectOp> GetObject; std::shared_ptr<class UpdateObjectOp> UpdateObject; std::shared_ptr<class ListBucketObjectsOp> ListBucketObjects; std::shared_ptr<class ListVersionedObjectsOp> ListVersionedObjects; std::shared_ptr<class PutObjectDataOp> PutObjectData; std::shared_ptr<class UpdateObjectDataOp> UpdateObjectData; std::shared_ptr<class GetObjectDataOp> GetObjectData; std::shared_ptr<class DeleteObjectDataOp> DeleteObjectData; std::shared_ptr<class DeleteStaleObjectDataOp> DeleteStaleObjectData; virtual int InitializeObjectOps(std::string db_name, const DoutPrefixProvider *dpp) { return 0; } }; class DBOp { private: static constexpr std::string_view CreateUserTableQ = /* Corresponds to rgw::sal::User * * For now only UserID is made Primary key. * If multiple tenants are stored in single .db handle, should * make both (UserID, Tenant) as Primary Key. * * XXX: * - AccessKeys, SwiftKeys, Subusers (map<>) are stored as blob. * To enable easy query, first accesskey is stored in separate fields * AccessKeysID, AccessKeysSecret. * In future, may be have separate table to store these keys and * query on that table. * - Quota stored as blob .. should be linked to quota table. */ "CREATE TABLE IF NOT EXISTS '{}' ( \ UserID TEXT NOT NULL UNIQUE, \ Tenant TEXT , \ NS TEXT , \ DisplayName TEXT , \ UserEmail TEXT , \ AccessKeysID TEXT , \ AccessKeysSecret TEXT , \ AccessKeys BLOB , \ SwiftKeys BLOB , \ SubUsers BLOB , \ Suspended INTEGER , \ MaxBuckets INTEGER , \ OpMask INTEGER , \ UserCaps BLOB , \ Admin INTEGER , \ System INTEGER , \ PlacementName TEXT , \ PlacementStorageClass TEXT , \ PlacementTags BLOB , \ BucketQuota BLOB , \ TempURLKeys BLOB , \ UserQuota BLOB , \ TYPE INTEGER , \ MfaIDs BLOB , \ AssumedRoleARN TEXT , \ UserAttrs BLOB, \ UserVersion INTEGER, \ UserVersionTag TEXT, \ PRIMARY KEY (UserID) \n);"; static constexpr std::string_view CreateBucketTableQ = /* Corresponds to rgw::sal::Bucket * * For now only BucketName is made Primary key. Since buckets should * be unique across users in rgw, OwnerID is not made part of primary key. * However it is still referenced as foreign key * * If multiple tenants are stored in single .db handle, should * make both (BucketName, Tenant) as Primary Key. Also should * reference (UserID, Tenant) as Foreign key. * * leaving below RADOS specific fields * - rgw_data_placement_target explicit_placement (struct rgw_bucket) * - rgw::BucketLayout layout (struct RGWBucketInfo) * - const static uint32_t NUM_SHARDS_BLIND_BUCKET (struct RGWBucketInfo), * should be '0' indicating no sharding. * - cls_rgw_reshard_status reshard_status (struct RGWBucketInfo) * * XXX: * - Quota stored as blob .. should be linked to quota table. * - WebsiteConf stored as BLOB..if required, should be split * - Storing bucket_version (struct RGWBucket), objv_tracker * (struct RGWBucketInfo) separately. Are they same? * */ "CREATE TABLE IF NOT EXISTS '{}' ( \ BucketName TEXT NOT NULL UNIQUE , \ Tenant TEXT, \ Marker TEXT, \ BucketID TEXT, \ Size INTEGER, \ SizeRounded INTEGER,\ CreationTime BLOB, \ Count INTEGER, \ PlacementName TEXT , \ PlacementStorageClass TEXT , \ OwnerID TEXT NOT NULL, \ Flags INTEGER, \ Zonegroup TEXT, \ HasInstanceObj BOOLEAN, \ Quota BLOB, \ RequesterPays BOOLEAN, \ HasWebsite BOOLEAN, \ WebsiteConf BLOB, \ SwiftVersioning BOOLEAN, \ SwiftVerLocation TEXT, \ MdsearchConfig BLOB, \ NewBucketInstanceID TEXT,\ ObjectLock BLOB, \ SyncPolicyInfoGroups BLOB, \ BucketAttrs BLOB, \ BucketVersion INTEGER, \ BucketVersionTag TEXT, \ Mtime BLOB, \ PRIMARY KEY (BucketName) \ FOREIGN KEY (OwnerID) \ REFERENCES '{}' (UserID) ON DELETE CASCADE ON UPDATE CASCADE \n);"; static constexpr std::string_view CreateObjectTableTriggerQ = "CREATE TRIGGER IF NOT EXISTS '{}' \ AFTER INSERT ON '{}' \ BEGIN \ UPDATE '{}' \ SET VersionNum = (SELECT COALESCE(max(VersionNum), 0) from '{}' where ObjName = new.ObjName) + 1 \ where ObjName = new.ObjName and ObjInstance = new.ObjInstance; \ END;"; static constexpr std::string_view CreateObjectTableQ = /* Corresponds to rgw::sal::Object * * For now only BucketName, ObjName is made Primary key. * If multiple tenants are stored in single .db handle, should * include Tenant too in the Primary Key. Also should * reference (BucketID, Tenant) as Foreign key. * * referring to * - rgw_bucket_dir_entry - following are added for now * flags, * versioned_epoch * tag * index_ver * meta.category * meta.etag * meta.storageclass * meta.appendable * meta.content_type * meta.owner * meta.owner_display_name * * - RGWObjState. Below are omitted from that struct * as they seem in-memory variables * * is_atomic, has_atts, exists, prefetch_data, keep_tail, * - RGWObjManifest * * Extra field added "IsMultipart" to flag multipart uploads, * HeadData to store first chunk data. */ "CREATE TABLE IF NOT EXISTS '{}' ( \ ObjName TEXT NOT NULL , \ ObjInstance TEXT, \ ObjNS TEXT, \ BucketName TEXT NOT NULL , \ ACLs BLOB, \ IndexVer INTEGER, \ Tag TEXT, \ Flags INTEGER, \ VersionedEpoch INTEGER, \ ObjCategory INTEGER, \ Etag TEXT, \ Owner TEXT, \ OwnerDisplayName TEXT, \ StorageClass TEXT, \ Appendable BOOL, \ ContentType TEXT, \ IndexHashSource TEXT, \ ObjSize INTEGER, \ AccountedSize INTEGER, \ Mtime BLOB, \ Epoch INTEGER, \ ObjTag BLOB, \ TailTag BLOB, \ WriteTag TEXT, \ FakeTag BOOL, \ ShadowObj TEXT, \ HasData BOOL, \ IsVersioned BOOL, \ VersionNum INTEGER, \ PGVer INTEGER, \ ZoneShortID INTEGER, \ ObjVersion INTEGER, \ ObjVersionTag TEXT, \ ObjAttrs BLOB, \ HeadSize INTEGER, \ MaxHeadSize INTEGER, \ ObjID TEXT NOT NULL, \ TailInstance TEXT, \ HeadPlacementRuleName TEXT, \ HeadPlacementRuleStorageClass TEXT, \ TailPlacementRuleName TEXT, \ TailPlacementStorageClass TEXT, \ ManifestPartObjs BLOB, \ ManifestPartRules BLOB, \ Omap BLOB, \ IsMultipart BOOL, \ MPPartsList BLOB, \ HeadData BLOB, \ PRIMARY KEY (ObjName, ObjInstance, BucketName), \ FOREIGN KEY (BucketName) \ REFERENCES '{}' (BucketName) ON DELETE CASCADE ON UPDATE CASCADE \n);"; static constexpr std::string_view CreateObjectDataTableQ = /* Extra field 'MultipartPartStr' added which signifies multipart * <uploadid + partnum>. For regular object, it is '0.0' * * - part: a collection of stripes that make a contiguous part of an object. A regular object will only have one part (although might have many stripes), a multipart object might have many parts. Each part has a fixed stripe size (ObjChunkSize), although the last stripe of a part might be smaller than that. */ "CREATE TABLE IF NOT EXISTS '{}' ( \ ObjName TEXT NOT NULL , \ ObjInstance TEXT, \ ObjNS TEXT, \ BucketName TEXT NOT NULL , \ ObjID TEXT NOT NULL , \ MultipartPartStr TEXT, \ PartNum INTEGER NOT NULL, \ Offset INTEGER, \ Size INTEGER, \ Mtime BLOB, \ Data BLOB, \ PRIMARY KEY (ObjName, BucketName, ObjInstance, ObjID, MultipartPartStr, PartNum), \ FOREIGN KEY (BucketName) \ REFERENCES '{}' (BucketName) ON DELETE CASCADE ON UPDATE CASCADE \n);"; static constexpr std::string_view CreateObjectViewQ = /* This query creats temporary view with entries from ObjectData table which have * corresponding head object (i.e, with same ObjName, ObjInstance, ObjNS, ObjID) * in the Object table. * * GC thread can use this view to delete stale entries from the ObjectData table which * do not exist in this view. * * XXX: This view is throwing ForeignKey mismatch error, mostly may be because all the keys * of objectdata table are not referenced here. So this view is not used atm. */ "CREATE TEMP VIEW IF NOT EXISTS '{}' AS \ SELECT s.ObjName, s.ObjInstance, s.ObjID from '{}' as s INNER JOIN '{}' USING \ (ObjName, BucketName, ObjInstance, ObjID);"; static constexpr std::string_view CreateQuotaTableQ = "CREATE TABLE IF NOT EXISTS '{}' ( \ QuotaID INTEGER PRIMARY KEY AUTOINCREMENT UNIQUE , \ MaxSizeSoftThreshold INTEGER , \ MaxObjsSoftThreshold INTEGER , \ MaxSize INTEGER , \ MaxObjects INTEGER , \ Enabled Boolean , \ CheckOnRaw Boolean \n);"; static constexpr std::string_view CreateLCEntryTableQ = "CREATE TABLE IF NOT EXISTS '{}' ( \ LCIndex TEXT NOT NULL , \ BucketName TEXT NOT NULL , \ StartTime INTEGER , \ Status INTEGER , \ PRIMARY KEY (LCIndex, BucketName) \n);"; static constexpr std::string_view CreateLCHeadTableQ = "CREATE TABLE IF NOT EXISTS '{}' ( \ LCIndex TEXT NOT NULL , \ Marker TEXT , \ StartDate INTEGER , \ PRIMARY KEY (LCIndex) \n);"; static constexpr std::string_view DropQ = "DROP TABLE IF EXISTS '{}'"; static constexpr std::string_view ListAllQ = "SELECT * from '{}'"; public: DBOp() {} virtual ~DBOp() {} std::mutex mtx; // to protect prepared stmt static std::string CreateTableSchema(std::string_view type, const DBOpParams *params) { if (!type.compare("User")) return fmt::format(CreateUserTableQ, params->user_table); if (!type.compare("Bucket")) return fmt::format(CreateBucketTableQ, params->bucket_table, params->user_table); if (!type.compare("Object")) return fmt::format(CreateObjectTableQ, params->object_table, params->bucket_table); if (!type.compare("ObjectTrigger")) return fmt::format(CreateObjectTableTriggerQ, params->object_trigger, params->object_table, params->object_table, params->object_table); if (!type.compare("ObjectData")) return fmt::format(CreateObjectDataTableQ, params->objectdata_table, params->bucket_table); if (!type.compare("ObjectView")) return fmt::format(CreateObjectTableQ, params->object_view, params->objectdata_table, params->object_table); if (!type.compare("Quota")) return fmt::format(CreateQuotaTableQ, params->quota_table); if (!type.compare("LCHead")) return fmt::format(CreateLCHeadTableQ, params->lc_head_table); if (!type.compare("LCEntry")) return fmt::format(CreateLCEntryTableQ, params->lc_entry_table, params->bucket_table); ceph_abort_msgf("incorrect table type %.*s", type.size(), type.data()); } static std::string DeleteTableSchema(std::string_view table) { return fmt::format(DropQ, table); } static std::string ListTableSchema(std::string_view table) { return fmt::format(ListAllQ, table); } virtual int Prepare(const DoutPrefixProvider *dpp, DBOpParams *params) { return 0; } virtual int Bind(const DoutPrefixProvider *dpp, DBOpParams *params) { return 0; } virtual int Execute(const DoutPrefixProvider *dpp, DBOpParams *params) { return 0; } }; class InsertUserOp : virtual public DBOp { private: /* For existing entires, - * (1) INSERT or REPLACE - it will delete previous entry and then * inserts new one. Since it deletes previos enties, it will * trigger all foriegn key cascade deletes or other triggers. * (2) INSERT or UPDATE - this will set NULL values to unassigned * fields. * more info: https://code-examples.net/en/q/377728 * * For now using INSERT or REPLACE. If required of updating existing * record, will use another query. */ static constexpr std::string_view Query = "INSERT OR REPLACE INTO '{}' \ (UserID, Tenant, NS, DisplayName, UserEmail, \ AccessKeysID, AccessKeysSecret, AccessKeys, SwiftKeys,\ SubUsers, Suspended, MaxBuckets, OpMask, UserCaps, Admin, \ System, PlacementName, PlacementStorageClass, PlacementTags, \ BucketQuota, TempURLKeys, UserQuota, Type, MfaIDs, \ UserAttrs, UserVersion, UserVersionTag) \ VALUES ({}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, \ {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {});"; public: virtual ~InsertUserOp() {} static std::string Schema(DBOpPrepareParams &params) { return fmt::format(Query, params.user_table, params.op.user.user_id, params.op.user.tenant, params.op.user.ns, params.op.user.display_name, params.op.user.user_email, params.op.user.access_keys_id, params.op.user.access_keys_secret, params.op.user.access_keys, params.op.user.swift_keys, params.op.user.subusers, params.op.user.suspended, params.op.user.max_buckets, params.op.user.op_mask, params.op.user.user_caps, params.op.user.admin, params.op.user.system, params.op.user.placement_name, params.op.user.placement_storage_class, params.op.user.placement_tags, params.op.user.bucket_quota, params.op.user.temp_url_keys, params.op.user.user_quota, params.op.user.type, params.op.user.mfa_ids, params.op.user.user_attrs, params.op.user.user_ver, params.op.user.user_ver_tag); } }; class RemoveUserOp: virtual public DBOp { private: static constexpr std::string_view Query = "DELETE from '{}' where UserID = {}"; public: virtual ~RemoveUserOp() {} static std::string Schema(DBOpPrepareParams &params) { return fmt::format(Query, params.user_table, params.op.user.user_id); } }; class GetUserOp: virtual public DBOp { private: /* If below query columns are updated, make sure to update the indexes * in list_user() cbk in sqliteDB.cc */ static constexpr std::string_view Query = "SELECT \ UserID, Tenant, NS, DisplayName, UserEmail, \ AccessKeysID, AccessKeysSecret, AccessKeys, SwiftKeys,\ SubUsers, Suspended, MaxBuckets, OpMask, UserCaps, Admin, \ System, PlacementName, PlacementStorageClass, PlacementTags, \ BucketQuota, TempURLKeys, UserQuota, Type, MfaIDs, AssumedRoleARN, \ UserAttrs, UserVersion, UserVersionTag from '{}' where UserID = {}"; static constexpr std::string_view QueryByEmail = "SELECT \ UserID, Tenant, NS, DisplayName, UserEmail, \ AccessKeysID, AccessKeysSecret, AccessKeys, SwiftKeys,\ SubUsers, Suspended, MaxBuckets, OpMask, UserCaps, Admin, \ System, PlacementName, PlacementStorageClass, PlacementTags, \ BucketQuota, TempURLKeys, UserQuota, Type, MfaIDs, AssumedRoleARN, \ UserAttrs, UserVersion, UserVersionTag from '{}' where UserEmail = {}"; static constexpr std::string_view QueryByAccessKeys = "SELECT \ UserID, Tenant, NS, DisplayName, UserEmail, \ AccessKeysID, AccessKeysSecret, AccessKeys, SwiftKeys,\ SubUsers, Suspended, MaxBuckets, OpMask, UserCaps, Admin, \ System, PlacementName, PlacementStorageClass, PlacementTags, \ BucketQuota, TempURLKeys, UserQuota, Type, MfaIDs, AssumedRoleARN, \ UserAttrs, UserVersion, UserVersionTag from '{}' where AccessKeysID = {}"; static constexpr std::string_view QueryByUserID = "SELECT \ UserID, Tenant, NS, DisplayName, UserEmail, \ AccessKeysID, AccessKeysSecret, AccessKeys, SwiftKeys,\ SubUsers, Suspended, MaxBuckets, OpMask, UserCaps, Admin, \ System, PlacementName, PlacementStorageClass, PlacementTags, \ BucketQuota, TempURLKeys, UserQuota, Type, MfaIDs, AssumedRoleARN, \ UserAttrs, UserVersion, UserVersionTag \ from '{}' where UserID = {}"; public: virtual ~GetUserOp() {} static std::string Schema(DBOpPrepareParams &params) { if (params.op.query_str == "email") { return fmt::format(QueryByEmail, params.user_table, params.op.user.user_email); } else if (params.op.query_str == "access_key") { return fmt::format(QueryByAccessKeys, params.user_table, params.op.user.access_keys_id); } else if (params.op.query_str == "user_id") { return fmt::format(QueryByUserID, params.user_table, params.op.user.user_id); } else { return fmt::format(Query, params.user_table, params.op.user.user_id); } } }; class InsertBucketOp: virtual public DBOp { private: static constexpr std::string_view Query = "INSERT OR REPLACE INTO '{}' \ (BucketName, Tenant, Marker, BucketID, Size, SizeRounded, CreationTime, \ Count, PlacementName, PlacementStorageClass, OwnerID, Flags, Zonegroup, \ HasInstanceObj, Quota, RequesterPays, HasWebsite, WebsiteConf, \ SwiftVersioning, SwiftVerLocation, \ MdsearchConfig, NewBucketInstanceID, ObjectLock, \ SyncPolicyInfoGroups, BucketAttrs, BucketVersion, BucketVersionTag, Mtime) \ VALUES ({}, {}, {}, {}, {}, {}, {}, {}, {}, \ {}, {}, {}, {}, {}, {}, {}, {}, {}, \ {}, {}, {}, {}, {}, {}, {}, {}, {}, {})"; public: virtual ~InsertBucketOp() {} static std::string Schema(DBOpPrepareParams &params) { return fmt::format(Query, params.bucket_table, params.op.bucket.bucket_name, params.op.bucket.tenant, params.op.bucket.marker, params.op.bucket.bucket_id, params.op.bucket.size, params.op.bucket.size_rounded, params.op.bucket.creation_time, params.op.bucket.count, params.op.bucket.placement_name, params.op.bucket.placement_storage_class, params.op.user.user_id, params.op.bucket.flags, params.op.bucket.zonegroup, params.op.bucket.has_instance_obj, params.op.bucket.quota, params.op.bucket.requester_pays, params.op.bucket.has_website, params.op.bucket.website_conf, params.op.bucket.swift_versioning, params.op.bucket.swift_ver_location, params.op.bucket.mdsearch_config, params.op.bucket.new_bucket_instance_id, params.op.bucket.obj_lock, params.op.bucket.sync_policy_info_groups, params.op.bucket.bucket_attrs, params.op.bucket.bucket_ver, params.op.bucket.bucket_ver_tag, params.op.bucket.mtime); } }; class UpdateBucketOp: virtual public DBOp { private: // Updates Info, Mtime, Version static constexpr std::string_view InfoQuery = "UPDATE '{}' SET Tenant = {}, Marker = {}, BucketID = {}, CreationTime = {}, \ Count = {}, PlacementName = {}, PlacementStorageClass = {}, OwnerID = {}, Flags = {}, \ Zonegroup = {}, HasInstanceObj = {}, Quota = {}, RequesterPays = {}, HasWebsite = {}, \ WebsiteConf = {}, SwiftVersioning = {}, SwiftVerLocation = {}, MdsearchConfig = {}, \ NewBucketInstanceID = {}, ObjectLock = {}, SyncPolicyInfoGroups = {}, \ BucketVersion = {}, Mtime = {} WHERE BucketName = {}"; // Updates Attrs, OwnerID, Mtime, Version static constexpr std::string_view AttrsQuery = "UPDATE '{}' SET OwnerID = {}, BucketAttrs = {}, Mtime = {}, BucketVersion = {} \ WHERE BucketName = {}"; // Updates OwnerID, CreationTime, Mtime, Version static constexpr std::string_view OwnerQuery = "UPDATE '{}' SET OwnerID = {}, CreationTime = {}, Mtime = {}, BucketVersion = {} WHERE BucketName = {}"; public: virtual ~UpdateBucketOp() {} static std::string Schema(DBOpPrepareParams &params) { if (params.op.query_str == "info") { return fmt::format(InfoQuery, params.bucket_table, params.op.bucket.tenant, params.op.bucket.marker, params.op.bucket.bucket_id, params.op.bucket.creation_time, params.op.bucket.count, params.op.bucket.placement_name, params.op.bucket.placement_storage_class, params.op.user.user_id, params.op.bucket.flags, params.op.bucket.zonegroup, params.op.bucket.has_instance_obj, params.op.bucket.quota, params.op.bucket.requester_pays, params.op.bucket.has_website, params.op.bucket.website_conf, params.op.bucket.swift_versioning, params.op.bucket.swift_ver_location, params.op.bucket.mdsearch_config, params.op.bucket.new_bucket_instance_id, params.op.bucket.obj_lock, params.op.bucket.sync_policy_info_groups, params.op.bucket.bucket_ver, params.op.bucket.mtime, params.op.bucket.bucket_name); } if (params.op.query_str == "attrs") { return fmt::format(AttrsQuery, params.bucket_table, params.op.user.user_id, params.op.bucket.bucket_attrs, params.op.bucket.mtime, params.op.bucket.bucket_ver, params.op.bucket.bucket_name); } if (params.op.query_str == "owner") { return fmt::format(OwnerQuery, params.bucket_table, params.op.user.user_id, params.op.bucket.creation_time, params.op.bucket.mtime, params.op.bucket.bucket_ver, params.op.bucket.bucket_name); } return ""; } }; class RemoveBucketOp: virtual public DBOp { private: static constexpr std::string_view Query = "DELETE from '{}' where BucketName = {}"; public: virtual ~RemoveBucketOp() {} static std::string Schema(DBOpPrepareParams &params) { return fmt::format(Query, params.bucket_table, params.op.bucket.bucket_name); } }; class GetBucketOp: virtual public DBOp { private: static constexpr std::string_view Query = "SELECT \ BucketName, BucketTable.Tenant, Marker, BucketID, Size, SizeRounded, CreationTime, \ Count, BucketTable.PlacementName, BucketTable.PlacementStorageClass, OwnerID, Flags, Zonegroup, \ HasInstanceObj, Quota, RequesterPays, HasWebsite, WebsiteConf, \ SwiftVersioning, SwiftVerLocation, \ MdsearchConfig, NewBucketInstanceID, ObjectLock, \ SyncPolicyInfoGroups, BucketAttrs, BucketVersion, BucketVersionTag, Mtime, NS \ from '{}' as BucketTable INNER JOIN '{}' ON OwnerID = UserID where BucketName = {}"; public: virtual ~GetBucketOp() {} static std::string Schema(DBOpPrepareParams &params) { //return fmt::format(Query, params.op.bucket.bucket_name, // params.bucket_table, params.user_table); return fmt::format(Query, params.bucket_table, params.user_table, params.op.bucket.bucket_name); } }; class ListUserBucketsOp: virtual public DBOp { private: // once we have stats also stored, may have to update this query to join // these two tables. static constexpr std::string_view Query = "SELECT \ BucketName, Tenant, Marker, BucketID, Size, SizeRounded, CreationTime, \ Count, PlacementName, PlacementStorageClass, OwnerID, Flags, Zonegroup, \ HasInstanceObj, Quota, RequesterPays, HasWebsite, WebsiteConf, \ SwiftVersioning, SwiftVerLocation, \ MdsearchConfig, NewBucketInstanceID, ObjectLock, \ SyncPolicyInfoGroups, BucketAttrs, BucketVersion, BucketVersionTag, Mtime \ FROM '{}' WHERE OwnerID = {} AND BucketName > {} ORDER BY BucketName ASC LIMIT {}"; /* BucketNames are unique across users. Hence userid/OwnerID is not used as * marker or for ordering here in the below query */ static constexpr std::string_view AllQuery = "SELECT \ BucketName, Tenant, Marker, BucketID, Size, SizeRounded, CreationTime, \ Count, PlacementName, PlacementStorageClass, OwnerID, Flags, Zonegroup, \ HasInstanceObj, Quota, RequesterPays, HasWebsite, WebsiteConf, \ SwiftVersioning, SwiftVerLocation, \ MdsearchConfig, NewBucketInstanceID, ObjectLock, \ SyncPolicyInfoGroups, BucketAttrs, BucketVersion, BucketVersionTag, Mtime \ FROM '{}' WHERE BucketName > {} ORDER BY BucketName ASC LIMIT {}"; public: virtual ~ListUserBucketsOp() {} static std::string Schema(DBOpPrepareParams &params) { if (params.op.query_str == "all") { return fmt::format(AllQuery, params.bucket_table, params.op.bucket.min_marker, params.op.list_max_count); } else { return fmt::format(Query, params.bucket_table, params.op.user.user_id, params.op.bucket.min_marker, params.op.list_max_count); } } }; class PutObjectOp: virtual public DBOp { private: static constexpr std::string_view Query = "INSERT OR REPLACE INTO '{}' \ (ObjName, ObjInstance, ObjNS, BucketName, ACLs, IndexVer, Tag, \ Flags, VersionedEpoch, ObjCategory, Etag, Owner, OwnerDisplayName, \ StorageClass, Appendable, ContentType, IndexHashSource, ObjSize, \ AccountedSize, Mtime, Epoch, ObjTag, TailTag, WriteTag, FakeTag, \ ShadowObj, HasData, IsVersioned, VersionNum, PGVer, ZoneShortID, \ ObjVersion, ObjVersionTag, ObjAttrs, HeadSize, MaxHeadSize, \ ObjID, TailInstance, HeadPlacementRuleName, HeadPlacementRuleStorageClass, \ TailPlacementRuleName, TailPlacementStorageClass, \ ManifestPartObjs, ManifestPartRules, Omap, IsMultipart, MPPartsList, \ HeadData) \ VALUES ({}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, \ {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, \ {}, {}, {}, \ {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {})"; public: virtual ~PutObjectOp() {} static std::string Schema(DBOpPrepareParams &params) { return fmt::format(Query, params.object_table, params.op.obj.obj_name, params.op.obj.obj_instance, params.op.obj.obj_ns, params.op.bucket.bucket_name, params.op.obj.acls, params.op.obj.index_ver, params.op.obj.tag, params.op.obj.flags, params.op.obj.versioned_epoch, params.op.obj.obj_category, params.op.obj.etag, params.op.obj.owner, params.op.obj.owner_display_name, params.op.obj.storage_class, params.op.obj.appendable, params.op.obj.content_type, params.op.obj.index_hash_source, params.op.obj.obj_size, params.op.obj.accounted_size, params.op.obj.mtime, params.op.obj.epoch, params.op.obj.obj_tag, params.op.obj.tail_tag, params.op.obj.write_tag, params.op.obj.fake_tag, params.op.obj.shadow_obj, params.op.obj.has_data, params.op.obj.is_versioned, params.op.obj.version_num, params.op.obj.pg_ver, params.op.obj.zone_short_id, params.op.obj.obj_version, params.op.obj.obj_version_tag, params.op.obj.obj_attrs, params.op.obj.head_size, params.op.obj.max_head_size, params.op.obj.obj_id, params.op.obj.tail_instance, params.op.obj.head_placement_rule_name, params.op.obj.head_placement_storage_class, params.op.obj.tail_placement_rule_name, params.op.obj.tail_placement_storage_class, params.op.obj.manifest_part_objs, params.op.obj.manifest_part_rules, params.op.obj.omap, params.op.obj.is_multipart, params.op.obj.mp_parts, params.op.obj.head_data); } }; class DeleteObjectOp: virtual public DBOp { private: static constexpr std::string_view Query = "DELETE from '{}' where BucketName = {} and ObjName = {} and ObjInstance = {}"; public: virtual ~DeleteObjectOp() {} static std::string Schema(DBOpPrepareParams &params) { return fmt::format(Query, params.object_table, params.op.bucket.bucket_name, params.op.obj.obj_name, params.op.obj.obj_instance); } }; class GetObjectOp: virtual public DBOp { private: static constexpr std::string_view Query = "SELECT \ ObjName, ObjInstance, ObjNS, BucketName, ACLs, IndexVer, Tag, \ Flags, VersionedEpoch, ObjCategory, Etag, Owner, OwnerDisplayName, \ StorageClass, Appendable, ContentType, IndexHashSource, ObjSize, \ AccountedSize, Mtime, Epoch, ObjTag, TailTag, WriteTag, FakeTag, \ ShadowObj, HasData, IsVersioned, VersionNum, PGVer, ZoneShortID, \ ObjVersion, ObjVersionTag, ObjAttrs, HeadSize, MaxHeadSize, \ ObjID, TailInstance, HeadPlacementRuleName, HeadPlacementRuleStorageClass, \ TailPlacementRuleName, TailPlacementStorageClass, \ ManifestPartObjs, ManifestPartRules, Omap, IsMultipart, MPPartsList, \ HeadData from '{}' \ where BucketName = {} and ObjName = {} and ObjInstance = {}"; public: virtual ~GetObjectOp() {} static std::string Schema(DBOpPrepareParams &params) { return fmt::format(Query, params.object_table, params.op.bucket.bucket_name, params.op.obj.obj_name, params.op.obj.obj_instance); } }; class ListBucketObjectsOp: virtual public DBOp { private: // once we have stats also stored, may have to update this query to join // these two tables. static constexpr std::string_view Query = "SELECT \ ObjName, ObjInstance, ObjNS, BucketName, ACLs, IndexVer, Tag, \ Flags, VersionedEpoch, ObjCategory, Etag, Owner, OwnerDisplayName, \ StorageClass, Appendable, ContentType, IndexHashSource, ObjSize, \ AccountedSize, Mtime, Epoch, ObjTag, TailTag, WriteTag, FakeTag, \ ShadowObj, HasData, IsVersioned, VersionNum, PGVer, ZoneShortID, \ ObjVersion, ObjVersionTag, ObjAttrs, HeadSize, MaxHeadSize, \ ObjID, TailInstance, HeadPlacementRuleName, HeadPlacementRuleStorageClass, \ TailPlacementRuleName, TailPlacementStorageClass, \ ManifestPartObjs, ManifestPartRules, Omap, IsMultipart, MPPartsList, HeadData from '{}' \ where BucketName = {} and ObjName >= {} and ObjName LIKE {} ORDER BY ObjName ASC, VersionNum DESC LIMIT {}"; public: virtual ~ListBucketObjectsOp() {} static std::string Schema(DBOpPrepareParams &params) { /* XXX: Include obj_id, delim */ return fmt::format(Query, params.object_table, params.op.bucket.bucket_name, params.op.obj.min_marker, params.op.obj.prefix, params.op.list_max_count); } }; #define MAX_VERSIONED_OBJECTS 20 class ListVersionedObjectsOp: virtual public DBOp { private: // once we have stats also stored, may have to update this query to join // these two tables. static constexpr std::string_view Query = "SELECT \ ObjName, ObjInstance, ObjNS, BucketName, ACLs, IndexVer, Tag, \ Flags, VersionedEpoch, ObjCategory, Etag, Owner, OwnerDisplayName, \ StorageClass, Appendable, ContentType, IndexHashSource, ObjSize, \ AccountedSize, Mtime, Epoch, ObjTag, TailTag, WriteTag, FakeTag, \ ShadowObj, HasData, IsVersioned, VersionNum, PGVer, ZoneShortID, \ ObjVersion, ObjVersionTag, ObjAttrs, HeadSize, MaxHeadSize, \ ObjID, TailInstance, HeadPlacementRuleName, HeadPlacementRuleStorageClass, \ TailPlacementRuleName, TailPlacementStorageClass, \ ManifestPartObjs, ManifestPartRules, Omap, IsMultipart, MPPartsList, \ HeadData from '{}' \ where BucketName = {} and ObjName = {} ORDER BY VersionNum DESC LIMIT {}"; public: virtual ~ListVersionedObjectsOp() {} static std::string Schema(DBOpPrepareParams &params) { /* XXX: Include obj_id, delim */ return fmt::format(Query, params.object_table, params.op.bucket.bucket_name, params.op.obj.obj_name, params.op.list_max_count); } }; class UpdateObjectOp: virtual public DBOp { private: // Updates Omap static constexpr std::string_view OmapQuery = "UPDATE '{}' SET Omap = {}, Mtime = {} \ where BucketName = {} and ObjName = {} and ObjInstance = {}"; static constexpr std::string_view AttrsQuery = "UPDATE '{}' SET ObjAttrs = {}, Mtime = {} \ where BucketName = {} and ObjName = {} and ObjInstance = {}"; static constexpr std::string_view MPQuery = "UPDATE '{}' SET MPPartsList = {}, Mtime = {} \ where BucketName = {} and ObjName = {} and ObjInstance = {}"; static constexpr std::string_view MetaQuery = "UPDATE '{}' SET \ ObjNS = {}, ACLs = {}, IndexVer = {}, Tag = {}, Flags = {}, VersionedEpoch = {}, \ ObjCategory = {}, Etag = {}, Owner = {}, OwnerDisplayName = {}, \ StorageClass = {}, Appendable = {}, ContentType = {}, \ IndexHashSource = {}, ObjSize = {}, AccountedSize = {}, Mtime = {}, \ Epoch = {}, ObjTag = {}, TailTag = {}, WriteTag = {}, FakeTag = {}, \ ShadowObj = {}, HasData = {}, IsVersioned = {}, VersionNum = {}, PGVer = {}, \ ZoneShortID = {}, ObjVersion = {}, ObjVersionTag = {}, ObjAttrs = {}, \ HeadSize = {}, MaxHeadSize = {}, ObjID = {}, TailInstance = {}, \ HeadPlacementRuleName = {}, HeadPlacementRuleStorageClass = {}, \ TailPlacementRuleName = {}, TailPlacementStorageClass = {}, \ ManifestPartObjs = {}, ManifestPartRules = {}, Omap = {}, \ IsMultipart = {}, MPPartsList = {}, HeadData = {} \ WHERE ObjName = {} and ObjInstance = {} and BucketName = {}"; public: virtual ~UpdateObjectOp() {} static std::string Schema(DBOpPrepareParams &params) { if (params.op.query_str == "omap") { return fmt::format(OmapQuery, params.object_table, params.op.obj.omap, params.op.obj.mtime, params.op.bucket.bucket_name, params.op.obj.obj_name, params.op.obj.obj_instance); } if (params.op.query_str == "attrs") { return fmt::format(AttrsQuery, params.object_table, params.op.obj.obj_attrs, params.op.obj.mtime, params.op.bucket.bucket_name, params.op.obj.obj_name, params.op.obj.obj_instance); } if (params.op.query_str == "mp") { return fmt::format(MPQuery, params.object_table, params.op.obj.mp_parts, params.op.obj.mtime, params.op.bucket.bucket_name, params.op.obj.obj_name, params.op.obj.obj_instance); } if (params.op.query_str == "meta") { return fmt::format(MetaQuery, params.object_table, params.op.obj.obj_ns, params.op.obj.acls, params.op.obj.index_ver, params.op.obj.tag, params.op.obj.flags, params.op.obj.versioned_epoch, params.op.obj.obj_category, params.op.obj.etag, params.op.obj.owner, params.op.obj.owner_display_name, params.op.obj.storage_class, params.op.obj.appendable, params.op.obj.content_type, params.op.obj.index_hash_source, params.op.obj.obj_size, params.op.obj.accounted_size, params.op.obj.mtime, params.op.obj.epoch, params.op.obj.obj_tag, params.op.obj.tail_tag, params.op.obj.write_tag, params.op.obj.fake_tag, params.op.obj.shadow_obj, params.op.obj.has_data, params.op.obj.is_versioned, params.op.obj.version_num, params.op.obj.pg_ver, params.op.obj.zone_short_id, params.op.obj.obj_version, params.op.obj.obj_version_tag, params.op.obj.obj_attrs, params.op.obj.head_size, params.op.obj.max_head_size, params.op.obj.obj_id, params.op.obj.tail_instance, params.op.obj.head_placement_rule_name, params.op.obj.head_placement_storage_class, params.op.obj.tail_placement_rule_name, params.op.obj.tail_placement_storage_class, params.op.obj.manifest_part_objs, params.op.obj.manifest_part_rules, params.op.obj.omap, params.op.obj.is_multipart, params.op.obj.mp_parts, params.op.obj.head_data, params.op.obj.obj_name, params.op.obj.obj_instance, params.op.bucket.bucket_name); } return ""; } }; class PutObjectDataOp: virtual public DBOp { private: static constexpr std::string_view Query = "INSERT OR REPLACE INTO '{}' \ (ObjName, ObjInstance, ObjNS, BucketName, ObjID, MultipartPartStr, PartNum, Offset, Size, Mtime, Data) \ VALUES ({}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {})"; public: virtual ~PutObjectDataOp() {} static std::string Schema(DBOpPrepareParams &params) { return fmt::format(Query, params.objectdata_table, params.op.obj.obj_name, params.op.obj.obj_instance, params.op.obj.obj_ns, params.op.bucket.bucket_name, params.op.obj.obj_id, params.op.obj_data.multipart_part_str, params.op.obj_data.part_num, params.op.obj_data.offset, params.op.obj_data.size, params.op.obj.mtime, params.op.obj_data.data); } }; /* XXX: Recheck if this is really needed */ class UpdateObjectDataOp: virtual public DBOp { private: static constexpr std::string_view Query = "UPDATE '{}' \ SET Mtime = {} WHERE ObjName = {} and ObjInstance = {} and \ BucketName = {} and ObjID = {}"; public: virtual ~UpdateObjectDataOp() {} static std::string Schema(DBOpPrepareParams &params) { return fmt::format(Query, params.objectdata_table, params.op.obj.mtime, params.op.obj.obj_name, params.op.obj.obj_instance, params.op.bucket.bucket_name, params.op.obj.obj_id); } }; class GetObjectDataOp: virtual public DBOp { private: static constexpr std::string_view Query = "SELECT \ ObjName, ObjInstance, ObjNS, BucketName, ObjID, MultipartPartStr, PartNum, Offset, Size, Mtime, Data \ from '{}' where BucketName = {} and ObjName = {} and ObjInstance = {} and ObjID = {} ORDER BY MultipartPartStr, PartNum"; public: virtual ~GetObjectDataOp() {} static std::string Schema(DBOpPrepareParams &params) { return fmt::format(Query, params.objectdata_table, params.op.bucket.bucket_name, params.op.obj.obj_name, params.op.obj.obj_instance, params.op.obj.obj_id); } }; class DeleteObjectDataOp: virtual public DBOp { private: static constexpr std::string_view Query = "DELETE from '{}' where BucketName = {} and ObjName = {} and ObjInstance = {} and ObjID = {}"; public: virtual ~DeleteObjectDataOp() {} static std::string Schema(DBOpPrepareParams &params) { return fmt::format(Query, params.objectdata_table, params.op.bucket.bucket_name, params.op.obj.obj_name, params.op.obj.obj_instance, params.op.obj.obj_id); } }; class DeleteStaleObjectDataOp: virtual public DBOp { private: static constexpr std::string_view Query = "DELETE from '{}' WHERE (ObjName, ObjInstance, ObjID) NOT IN (SELECT s.ObjName, s.ObjInstance, s.ObjID from '{}' as s INNER JOIN '{}' USING (ObjName, BucketName, ObjInstance, ObjID)) and Mtime < {}"; public: virtual ~DeleteStaleObjectDataOp() {} static std::string Schema(DBOpPrepareParams &params) { return fmt::format(Query, params.objectdata_table, params.objectdata_table, params.object_table, params.op.obj.mtime); } }; class InsertLCEntryOp: virtual public DBOp { private: static constexpr std::string_view Query = "INSERT OR REPLACE INTO '{}' \ (LCIndex, BucketName, StartTime, Status) \ VALUES ({}, {}, {}, {})"; public: virtual ~InsertLCEntryOp() {} static std::string Schema(DBOpPrepareParams &params) { return fmt::format(Query, params.lc_entry_table, params.op.lc_entry.index, params.op.lc_entry.bucket_name, params.op.lc_entry.start_time, params.op.lc_entry.status); } }; class RemoveLCEntryOp: virtual public DBOp { private: static constexpr std::string_view Query = "DELETE from '{}' where LCIndex = {} and BucketName = {}"; public: virtual ~RemoveLCEntryOp() {} static std::string Schema(DBOpPrepareParams &params) { return fmt::format(Query, params.lc_entry_table, params.op.lc_entry.index, params.op.lc_entry.bucket_name); } }; class GetLCEntryOp: virtual public DBOp { private: static constexpr std::string_view Query = "SELECT \ LCIndex, BucketName, StartTime, Status \ from '{}' where LCIndex = {} and BucketName = {}"; static constexpr std::string_view NextQuery = "SELECT \ LCIndex, BucketName, StartTime, Status \ from '{}' where LCIndex = {} and BucketName > {} ORDER BY BucketName ASC"; public: virtual ~GetLCEntryOp() {} static std::string Schema(DBOpPrepareParams &params) { if (params.op.query_str == "get_next_entry") { return fmt::format(NextQuery, params.lc_entry_table, params.op.lc_entry.index, params.op.lc_entry.bucket_name); } // default return fmt::format(Query, params.lc_entry_table, params.op.lc_entry.index, params.op.lc_entry.bucket_name); } }; class ListLCEntriesOp: virtual public DBOp { private: static constexpr std::string_view Query = "SELECT \ LCIndex, BucketName, StartTime, Status \ FROM '{}' WHERE LCIndex = {} AND BucketName > {} ORDER BY BucketName ASC LIMIT {}"; public: virtual ~ListLCEntriesOp() {} static std::string Schema(DBOpPrepareParams &params) { return fmt::format(Query, params.lc_entry_table, params.op.lc_entry.index, params.op.lc_entry.min_marker, params.op.list_max_count); } }; class InsertLCHeadOp: virtual public DBOp { private: static constexpr std::string_view Query = "INSERT OR REPLACE INTO '{}' \ (LCIndex, Marker, StartDate) \ VALUES ({}, {}, {})"; public: virtual ~InsertLCHeadOp() {} static std::string Schema(DBOpPrepareParams &params) { return fmt::format(Query, params.lc_head_table, params.op.lc_head.index, params.op.lc_head.marker, params.op.lc_head.start_date); } }; class RemoveLCHeadOp: virtual public DBOp { private: static constexpr std::string_view Query = "DELETE from '{}' where LCIndex = {}"; public: virtual ~RemoveLCHeadOp() {} static std::string Schema(DBOpPrepareParams &params) { return fmt::format(Query, params.lc_head_table, params.op.lc_head.index); } }; class GetLCHeadOp: virtual public DBOp { private: static constexpr std::string_view Query = "SELECT \ LCIndex, Marker, StartDate \ from '{}' where LCIndex = {}"; public: virtual ~GetLCHeadOp() {} static std::string Schema(DBOpPrepareParams &params) { return fmt::format(Query, params.lc_head_table, params.op.lc_head.index); } }; /* taken from rgw_rados.h::RGWOLHInfo */ struct DBOLHInfo { rgw_obj target; bool removed; DBOLHInfo() : removed(false) {} void encode(bufferlist& bl) const { ENCODE_START(1, 1, bl); encode(target, bl); encode(removed, bl); ENCODE_FINISH(bl); } void decode(bufferlist::const_iterator& bl) { DECODE_START(1, bl); decode(target, bl); decode(removed, bl); DECODE_FINISH(bl); } }; WRITE_CLASS_ENCODER(DBOLHInfo) class DB { private: const std::string db_name; rgw::sal::Driver* driver; const std::string user_table; const std::string bucket_table; const std::string quota_table; const std::string lc_head_table; const std::string lc_entry_table; static std::map<std::string, class ObjectOp*> objectmap; protected: void *db; CephContext *cct; const DoutPrefix dp; uint64_t max_bucket_id = 0; // XXX: default ObjStripeSize or ObjChunk size - 4M, make them configurable? uint64_t ObjHeadSize = 1024; /* 1K - default head data size */ uint64_t ObjChunkSize = (get_blob_limit() - 1000); /* 1000 to accommodate other fields */ // Below mutex is to protect objectmap and other shared // objects if any. std::mutex mtx; public: DB(std::string db_name, CephContext *_cct) : db_name(db_name), user_table(db_name+"_user_table"), bucket_table(db_name+"_bucket_table"), quota_table(db_name+"_quota_table"), lc_head_table(db_name+"_lc_head_table"), lc_entry_table(db_name+"_lc_entry_table"), cct(_cct), dp(_cct, ceph_subsys_rgw, "rgw DBStore backend: ") {} /* DB() {}*/ DB(CephContext *_cct) : db_name("default_db"), user_table(db_name+"_user_table"), bucket_table(db_name+"_bucket_table"), quota_table(db_name+"_quota_table"), lc_head_table(db_name+"_lc_head_table"), lc_entry_table(db_name+"_lc_entry_table"), cct(_cct), dp(_cct, ceph_subsys_rgw, "rgw DBStore backend: ") {} virtual ~DB() {} const std::string getDBname() { return db_name; } const std::string getDBfile() { return db_name + ".db"; } const std::string getUserTable() { return user_table; } const std::string getBucketTable() { return bucket_table; } const std::string getQuotaTable() { return quota_table; } const std::string getLCHeadTable() { return lc_head_table; } const std::string getLCEntryTable() { return lc_entry_table; } const std::string getObjectTable(std::string bucket) { return db_name+"_"+bucket+"_object_table"; } const std::string getObjectDataTable(std::string bucket) { return db_name+"_"+bucket+"_objectdata_table"; } const std::string getObjectView(std::string bucket) { return db_name+"_"+bucket+"_object_view"; } const std::string getObjectTrigger(std::string bucket) { return db_name+"_"+bucket+"_object_trigger"; } std::map<std::string, class ObjectOp*> getObjectMap(); struct DBOps dbops; // DB operations, make it private? void set_driver(rgw::sal::Driver* _driver) { driver = _driver; } void set_context(CephContext *_cct) { cct = _cct; } CephContext *ctx() { return cct; } const DoutPrefixProvider *get_def_dpp() { return &dp; } int Initialize(std::string logfile, int loglevel); int Destroy(const DoutPrefixProvider *dpp); int LockInit(const DoutPrefixProvider *dpp); int LockDestroy(const DoutPrefixProvider *dpp); int Lock(const DoutPrefixProvider *dpp); int Unlock(const DoutPrefixProvider *dpp); int InitializeParams(const DoutPrefixProvider *dpp, DBOpParams *params); int ProcessOp(const DoutPrefixProvider *dpp, std::string_view Op, DBOpParams *params); std::shared_ptr<class DBOp> getDBOp(const DoutPrefixProvider *dpp, std::string_view Op, const DBOpParams *params); int objectmapInsert(const DoutPrefixProvider *dpp, std::string bucket, class ObjectOp* ptr); int objectmapDelete(const DoutPrefixProvider *dpp, std::string bucket); virtual uint64_t get_blob_limit() { return 0; }; virtual void *openDB(const DoutPrefixProvider *dpp) { return NULL; } virtual int closeDB(const DoutPrefixProvider *dpp) { return 0; } virtual int createTables(const DoutPrefixProvider *dpp) { return 0; } virtual int InitializeDBOps(const DoutPrefixProvider *dpp) { return 0; } virtual int InitPrepareParams(const DoutPrefixProvider *dpp, DBOpPrepareParams &p_params, DBOpParams* params) = 0; virtual int createLCTables(const DoutPrefixProvider *dpp) = 0; virtual int ListAllBuckets(const DoutPrefixProvider *dpp, DBOpParams *params) = 0; virtual int ListAllUsers(const DoutPrefixProvider *dpp, DBOpParams *params) = 0; virtual int ListAllObjects(const DoutPrefixProvider *dpp, DBOpParams *params) = 0; int get_user(const DoutPrefixProvider *dpp, const std::string& query_str, const std::string& query_str_val, RGWUserInfo& uinfo, std::map<std::string, bufferlist> *pattrs, RGWObjVersionTracker *pobjv_tracker); int store_user(const DoutPrefixProvider *dpp, RGWUserInfo& uinfo, bool exclusive, std::map<std::string, bufferlist> *pattrs, RGWObjVersionTracker *pobjv_tracker, RGWUserInfo* pold_info); int remove_user(const DoutPrefixProvider *dpp, RGWUserInfo& uinfo, RGWObjVersionTracker *pobjv_tracker); int get_bucket_info(const DoutPrefixProvider *dpp, const std::string& query_str, const std::string& query_str_val, RGWBucketInfo& info, rgw::sal::Attrs* pattrs, ceph::real_time* pmtime, obj_version* pbucket_version); int create_bucket(const DoutPrefixProvider *dpp, const RGWUserInfo& owner, rgw_bucket& bucket, const std::string& zonegroup_id, const rgw_placement_rule& placement_rule, const std::string& swift_ver_location, const RGWQuotaInfo * pquota_info, std::map<std::string, bufferlist>& attrs, RGWBucketInfo& info, obj_version *pobjv, obj_version *pep_objv, real_time creation_time, rgw_bucket *pmaster_bucket, uint32_t *pmaster_num_shards, optional_yield y, bool exclusive); int next_bucket_id() { return ++max_bucket_id; }; int remove_bucket(const DoutPrefixProvider *dpp, const RGWBucketInfo info); int list_buckets(const DoutPrefixProvider *dpp, const std::string& query_str, rgw_user& user, const std::string& marker, const std::string& end_marker, uint64_t max, bool need_stats, RGWUserBuckets *buckets, bool *is_truncated); int update_bucket(const DoutPrefixProvider *dpp, const std::string& query_str, RGWBucketInfo& info, bool exclusive, const rgw_user* powner_id, std::map<std::string, bufferlist>* pattrs, ceph::real_time* pmtime, RGWObjVersionTracker* pobjv); uint64_t get_max_head_size() { return ObjHeadSize; } uint64_t get_max_chunk_size() { return ObjChunkSize; } void gen_rand_obj_instance_name(rgw_obj_key *target_key); // db raw obj string is of format - // "<bucketname>_<objname>_<objinstance>_<multipart-part-str>_<partnum>" static constexpr std::string_view raw_obj_oid = "{0}_{1}_{2}_{3}_{4}"; std::string to_oid(std::string_view bucket, std::string_view obj_name, std::string_view obj_instance, std::string_view obj_id, std::string_view mp_str, uint64_t partnum) { return fmt::format(raw_obj_oid, bucket, obj_name, obj_instance, obj_id, mp_str, partnum); } int from_oid(const std::string& oid, std::string& bucket, std::string& obj_name, std::string& obj_id, std::string& obj_instance, std::string& mp_str, uint64_t& partnum) { // TODO: use ceph::split() from common/split.h // XXX: doesn't this break if obj_name has underscores in it? std::vector<std::string> result; boost::split(result, oid, boost::is_any_of("_")); bucket = result[0]; obj_name = result[1]; obj_instance = result[2]; obj_id = result[3]; mp_str = result[4]; partnum = stoi(result[5]); return 0; } struct raw_obj { DB* db; std::string bucket_name; std::string obj_name; std::string obj_instance; std::string obj_ns; std::string obj_id; std::string multipart_part_str; uint64_t part_num; std::string obj_table; std::string obj_data_table; raw_obj(DB* _db) { db = _db; } raw_obj(DB* _db, std::string& _bname, std::string& _obj_name, std::string& _obj_instance, std::string& _obj_ns, std::string& _obj_id, std::string _mp_part_str, int _part_num) { db = _db; bucket_name = _bname; obj_name = _obj_name; obj_instance = _obj_instance; obj_ns = _obj_ns; obj_id = _obj_id; multipart_part_str = _mp_part_str; part_num = _part_num; obj_table = bucket_name+".object.table"; obj_data_table = bucket_name+".objectdata.table"; } raw_obj(DB* _db, std::string& oid) { int r; db = _db; r = db->from_oid(oid, bucket_name, obj_name, obj_instance, obj_id, multipart_part_str, part_num); if (r < 0) { multipart_part_str = "0.0"; part_num = 0; } obj_table = db->getObjectTable(bucket_name); obj_data_table = db->getObjectDataTable(bucket_name); } int InitializeParamsfromRawObj (const DoutPrefixProvider *dpp, DBOpParams* params); int read(const DoutPrefixProvider *dpp, int64_t ofs, uint64_t end, bufferlist& bl); int write(const DoutPrefixProvider *dpp, int64_t ofs, int64_t write_ofs, uint64_t len, bufferlist& bl); }; class GC : public Thread { const DoutPrefixProvider *dpp; DB *db; /* Default time interval for GC * XXX: Make below options configurable * * gc_interval: The time between successive gc thread runs * gc_obj_min_wait: Min. time to wait before deleting any data post its creation. * */ std::mutex mtx; std::condition_variable cv; bool stop_signalled = false; uint32_t gc_interval = 24*60*60; //sec ; default: 24*60*60 uint32_t gc_obj_min_wait = 60*60; //60*60sec default std::string bucket_marker; std::string user_marker; public: GC(const DoutPrefixProvider *_dpp, DB* _db) : dpp(_dpp), db(_db) {} void *entry() override; void signal_stop() { std::lock_guard<std::mutex> lk_guard(mtx); stop_signalled = true; cv.notify_one(); } friend class DB; }; std::unique_ptr<DB::GC> gc_worker; class Bucket { friend class DB; DB* store; RGWBucketInfo bucket_info; public: Bucket(DB *_store, const RGWBucketInfo& _binfo) : store(_store), bucket_info(_binfo) {} DB *get_store() { return store; } rgw_bucket& get_bucket() { return bucket_info.bucket; } RGWBucketInfo& get_bucket_info() { return bucket_info; } class List { protected: // absolute maximum number of objects that // list_objects_(un)ordered can return static constexpr int64_t bucket_list_objects_absolute_max = 25000; DB::Bucket *target; rgw_obj_key next_marker; public: struct Params { std::string prefix; std::string delim; rgw_obj_key marker; rgw_obj_key end_marker; std::string ns; bool enforce_ns; RGWAccessListFilter* access_list_filter; RGWBucketListNameFilter force_check_filter; bool list_versions; bool allow_unordered; Params() : enforce_ns(true), access_list_filter(nullptr), list_versions(false), allow_unordered(false) {} } params; explicit List(DB::Bucket *_target) : target(_target) {} /* XXX: Handle ordered and unordered separately. * For now returning only ordered entries */ int list_objects(const DoutPrefixProvider *dpp, int64_t max, std::vector<rgw_bucket_dir_entry> *result, std::map<std::string, bool> *common_prefixes, bool *is_truncated); rgw_obj_key& get_next_marker() { return next_marker; } }; }; class Object { friend class DB; DB* store; RGWBucketInfo bucket_info; rgw_obj obj; RGWObjState obj_state; std::string obj_id; bool versioning_disabled; bool bs_initialized; public: Object(DB *_store, const RGWBucketInfo& _bucket_info, const rgw_obj& _obj) : store(_store), bucket_info(_bucket_info), obj(_obj), versioning_disabled(false), bs_initialized(false) {} Object(DB *_store, const RGWBucketInfo& _bucket_info, const rgw_obj& _obj, const std::string& _obj_id) : store(_store), bucket_info(_bucket_info), obj(_obj), obj_id(_obj_id) {} struct Read { DB::Object *source; struct GetObjState { rgw_obj obj; } state; struct ConditionParams { const ceph::real_time *mod_ptr; const ceph::real_time *unmod_ptr; bool high_precision_time; uint32_t mod_zone_id; uint64_t mod_pg_ver; const char *if_match; const char *if_nomatch; ConditionParams() : mod_ptr(NULL), unmod_ptr(NULL), high_precision_time(false), mod_zone_id(0), mod_pg_ver(0), if_match(NULL), if_nomatch(NULL) {} } conds; struct Params { ceph::real_time *lastmod; uint64_t *obj_size; std::map<std::string, bufferlist> *attrs; rgw_obj *target_obj; Params() : lastmod(nullptr), obj_size(nullptr), attrs(nullptr), target_obj(nullptr) {} } params; explicit Read(DB::Object *_source) : source(_source) {} int prepare(const DoutPrefixProvider *dpp); static int range_to_ofs(uint64_t obj_size, int64_t &ofs, int64_t &end); int read(int64_t ofs, int64_t end, bufferlist& bl, const DoutPrefixProvider *dpp); int iterate(const DoutPrefixProvider *dpp, int64_t ofs, int64_t end, RGWGetDataCB *cb); int get_attr(const DoutPrefixProvider *dpp, const char *name, bufferlist& dest); }; struct Write { DB::Object *target; RGWObjState obj_state; std::string mp_part_str = "0.0"; // multipart num struct MetaParams { ceph::real_time *mtime; std::map<std::string, bufferlist>* rmattrs; const bufferlist *data; RGWObjManifest *manifest; const std::string *ptag; std::list<rgw_obj_index_key> *remove_objs; ceph::real_time set_mtime; rgw_user owner; RGWObjCategory category; int flags; const char *if_match; const char *if_nomatch; std::optional<uint64_t> olh_epoch; ceph::real_time delete_at; bool canceled; const std::string *user_data; rgw_zone_set *zones_trace; bool modify_tail; bool completeMultipart; bool appendable; MetaParams() : mtime(NULL), rmattrs(NULL), data(NULL), manifest(NULL), ptag(NULL), remove_objs(NULL), category(RGWObjCategory::Main), flags(0), if_match(NULL), if_nomatch(NULL), canceled(false), user_data(nullptr), zones_trace(nullptr), modify_tail(false), completeMultipart(false), appendable(false) {} } meta; explicit Write(DB::Object *_target) : target(_target) {} void set_mp_part_str(std::string _mp_part_str) { mp_part_str = _mp_part_str;} int prepare(const DoutPrefixProvider* dpp); int write_data(const DoutPrefixProvider* dpp, bufferlist& data, uint64_t ofs); int _do_write_meta(const DoutPrefixProvider *dpp, uint64_t size, uint64_t accounted_size, std::map<std::string, bufferlist>& attrs, bool assume_noent, bool modify_tail); int write_meta(const DoutPrefixProvider *dpp, uint64_t size, uint64_t accounted_size, std::map<std::string, bufferlist>& attrs); }; struct Delete { DB::Object *target; struct DeleteParams { rgw_user bucket_owner; int versioning_status; ACLOwner obj_owner; /* needed for creation of deletion marker */ uint64_t olh_epoch; std::string marker_version_id; uint32_t bilog_flags; std::list<rgw_obj_index_key> *remove_objs; ceph::real_time expiration_time; ceph::real_time unmod_since; ceph::real_time mtime; /* for setting delete marker mtime */ bool high_precision_time; rgw_zone_set *zones_trace; bool abortmp; uint64_t parts_accounted_size; DeleteParams() : versioning_status(0), olh_epoch(0), bilog_flags(0), remove_objs(NULL), high_precision_time(false), zones_trace(nullptr), abortmp(false), parts_accounted_size(0) {} } params; struct DeleteResult { bool delete_marker; std::string version_id; DeleteResult() : delete_marker(false) {} } result; explicit Delete(DB::Object *_target) : target(_target) {} int delete_obj(const DoutPrefixProvider *dpp); int delete_obj_impl(const DoutPrefixProvider *dpp, DBOpParams& del_params); int create_dm(const DoutPrefixProvider *dpp, DBOpParams& del_params); }; /* XXX: the parameters may be subject to change. All we need is bucket name * & obj name,instance - keys */ int get_object_impl(const DoutPrefixProvider *dpp, DBOpParams& params); int get_obj_state(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, const rgw_obj& obj, bool follow_olh, RGWObjState **state); int get_state(const DoutPrefixProvider *dpp, RGWObjState **pstate, bool follow_olh); int list_versioned_objects(const DoutPrefixProvider *dpp, std::list<rgw_bucket_dir_entry>& list_entries); DB *get_store() { return store; } rgw_obj& get_obj() { return obj; } RGWBucketInfo& get_bucket_info() { return bucket_info; } int InitializeParamsfromObject(const DoutPrefixProvider *dpp, DBOpParams* params); int set_attrs(const DoutPrefixProvider *dpp, std::map<std::string, bufferlist>& setattrs, std::map<std::string, bufferlist>* rmattrs); int transition(const DoutPrefixProvider *dpp, const rgw_placement_rule& rule, const real_time& mtime, uint64_t olh_epoch); int obj_omap_set_val_by_key(const DoutPrefixProvider *dpp, const std::string& key, bufferlist& val, bool must_exist); int obj_omap_get_vals_by_keys(const DoutPrefixProvider *dpp, const std::string& oid, const std::set<std::string>& keys, std::map<std::string, bufferlist>* vals); int obj_omap_get_all(const DoutPrefixProvider *dpp, std::map<std::string, bufferlist> *m); int obj_omap_get_vals(const DoutPrefixProvider *dpp, const std::string& marker, uint64_t count, std::map<std::string, bufferlist> *m, bool* pmore); using iterate_obj_cb = int (*)(const DoutPrefixProvider*, const raw_obj&, off_t, off_t, bool, RGWObjState*, void*); int add_mp_part(const DoutPrefixProvider *dpp, RGWUploadPartInfo info); int get_mp_parts_list(const DoutPrefixProvider *dpp, std::list<RGWUploadPartInfo>& info); int iterate_obj(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, const rgw_obj& obj, off_t ofs, off_t end, uint64_t max_chunk_size, iterate_obj_cb cb, void *arg); }; int get_obj_iterate_cb(const DoutPrefixProvider *dpp, const raw_obj& read_obj, off_t obj_ofs, off_t len, bool is_head_obj, RGWObjState *astate, void *arg); int get_entry(const std::string& oid, const std::string& marker, std::unique_ptr<rgw::sal::Lifecycle::LCEntry>* entry); int get_next_entry(const std::string& oid, const std::string& marker, std::unique_ptr<rgw::sal::Lifecycle::LCEntry>* entry); int set_entry(const std::string& oid, rgw::sal::Lifecycle::LCEntry& entry); int list_entries(const std::string& oid, const std::string& marker, uint32_t max_entries, std::vector<std::unique_ptr<rgw::sal::Lifecycle::LCEntry>>& entries); int rm_entry(const std::string& oid, rgw::sal::Lifecycle::LCEntry& entry); int get_head(const std::string& oid, std::unique_ptr<rgw::sal::Lifecycle::LCHead>* head); int put_head(const std::string& oid, rgw::sal::Lifecycle::LCHead& head); int delete_stale_objs(const DoutPrefixProvider *dpp, const std::string& bucket, uint32_t min_wait); int createGC(const DoutPrefixProvider *_dpp); int stopGC(); }; struct db_get_obj_data { DB* store; RGWGetDataCB* client_cb = nullptr; uint64_t offset; // next offset to write to client db_get_obj_data(DB* db, RGWGetDataCB* cb, uint64_t offset) : store(db), client_cb(cb), offset(offset) {} ~db_get_obj_data() {} }; } } // namespace rgw::store
80,764
39.042142
205
h
null
ceph-main/src/rgw/driver/dbstore/common/dbstore_log.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #pragma once #include <cerrno> #include <cstdlib> #include <string> #include <cstdio> #include <iostream> #include <fstream> #include "common/dout.h" #undef dout_prefix #define dout_prefix *_dout << "rgw dbstore: "
316
18.8125
70
h
null
ceph-main/src/rgw/driver/dbstore/config/sqlite.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab ft=cpp /* * Ceph - scalable distributed file system * * Copyright (C) 2022 Red Hat, Inc. * * This is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License version 2.1, as published by the Free Software * Foundation. See file COPYING. * */ #include <charconv> #include <initializer_list> #include <map> #include <fmt/format.h> #include <sqlite3.h> #include "include/buffer.h" #include "include/encoding.h" #include "common/dout.h" #include "common/random_string.h" #include "rgw_zone.h" #include "common/connection_pool.h" #include "sqlite/connection.h" #include "sqlite/error.h" #include "sqlite/statement.h" #include "sqlite_schema.h" #include "sqlite.h" #define dout_subsys ceph_subsys_rgw_dbstore namespace rgw::dbstore::config { struct Prefix : DoutPrefixPipe { std::string_view prefix; Prefix(const DoutPrefixProvider& dpp, std::string_view prefix) : DoutPrefixPipe(dpp), prefix(prefix) {} unsigned get_subsys() const override { return dout_subsys; } void add_prefix(std::ostream& out) const override { out << prefix; } }; namespace { // parameter names for prepared statement bindings static constexpr const char* P1 = ":1"; static constexpr const char* P2 = ":2"; static constexpr const char* P3 = ":3"; static constexpr const char* P4 = ":4"; static constexpr const char* P5 = ":5"; static constexpr const char* P6 = ":6"; // bind as text unless value is empty void bind_text_or_null(const DoutPrefixProvider* dpp, const sqlite::stmt_binding& stmt, const char* name, std::string_view value) { if (value.empty()) { sqlite::bind_null(dpp, stmt, name); } else { sqlite::bind_text(dpp, stmt, name, value); } } void read_text_rows(const DoutPrefixProvider* dpp, const sqlite::stmt_execution& stmt, std::span<std::string> entries, sal::ListResult<std::string>& result) { result.entries = sqlite::read_text_rows(dpp, stmt, entries); if (result.entries.size() < entries.size()) { // end of listing result.next.clear(); } else { result.next = result.entries.back(); } } struct RealmRow { RGWRealm info; int ver; std::string tag; }; void read_realm_row(const sqlite::stmt_execution& stmt, RealmRow& row) { row.info.id = sqlite::column_text(stmt, 0); row.info.name = sqlite::column_text(stmt, 1); row.info.current_period = sqlite::column_text(stmt, 2); row.info.epoch = sqlite::column_int(stmt, 3); row.ver = sqlite::column_int(stmt, 4); row.tag = sqlite::column_text(stmt, 5); } void read_period_row(const sqlite::stmt_execution& stmt, RGWPeriod& row) { // just read the Data column and decode everything else from that std::string data = sqlite::column_text(stmt, 3); bufferlist bl = bufferlist::static_from_string(data); auto p = bl.cbegin(); decode(row, p); } struct ZoneGroupRow { RGWZoneGroup info; int ver; std::string tag; }; void read_zonegroup_row(const sqlite::stmt_execution& stmt, ZoneGroupRow& row) { std::string data = sqlite::column_text(stmt, 3); row.ver = sqlite::column_int(stmt, 4); row.tag = sqlite::column_text(stmt, 5); bufferlist bl = bufferlist::static_from_string(data); auto p = bl.cbegin(); decode(row.info, p); } struct ZoneRow { RGWZoneParams info; int ver; std::string tag; }; void read_zone_row(const sqlite::stmt_execution& stmt, ZoneRow& row) { std::string data = sqlite::column_text(stmt, 3); row.ver = sqlite::column_int(stmt, 4); row.tag = sqlite::column_text(stmt, 5); bufferlist bl = bufferlist::static_from_string(data); auto p = bl.cbegin(); decode(row.info, p); } std::string generate_version_tag(CephContext* cct) { static constexpr auto TAG_LEN = 24; return gen_rand_alphanumeric(cct, TAG_LEN); } using SQLiteConnectionHandle = ConnectionHandle<sqlite::Connection>; using SQLiteConnectionPool = ConnectionPool< sqlite::Connection, sqlite::ConnectionFactory>; } // anonymous namespace class SQLiteImpl : public SQLiteConnectionPool { public: using SQLiteConnectionPool::SQLiteConnectionPool; }; SQLiteConfigStore::SQLiteConfigStore(std::unique_ptr<SQLiteImpl> impl) : impl(std::move(impl)) { } SQLiteConfigStore::~SQLiteConfigStore() = default; // Realm class SQLiteRealmWriter : public sal::RealmWriter { SQLiteImpl* impl; int ver; std::string tag; std::string realm_id; std::string realm_name; public: SQLiteRealmWriter(SQLiteImpl* impl, int ver, std::string tag, std::string_view realm_id, std::string_view realm_name) : impl(impl), ver(ver), tag(std::move(tag)), realm_id(realm_id), realm_name(realm_name) {} int write(const DoutPrefixProvider* dpp, optional_yield y, const RGWRealm& info) override { Prefix prefix{*dpp, "dbconfig:sqlite:realm_write "}; dpp = &prefix; if (!impl) { return -EINVAL; // can't write after a conflict or delete } if (realm_id != info.id || realm_name != info.name) { return -EINVAL; // can't modify realm id or name directly } try { auto conn = impl->get(dpp); auto& stmt = conn->statements["realm_upd"]; if (!stmt) { const std::string sql = fmt::format(schema::realm_update5, P1, P2, P3, P4, P5); stmt = sqlite::prepare_statement(dpp, conn->db.get(), sql); } auto binding = sqlite::stmt_binding{stmt.get()}; sqlite::bind_text(dpp, binding, P1, info.id); sqlite::bind_text(dpp, binding, P2, info.current_period); sqlite::bind_int(dpp, binding, P3, info.epoch); sqlite::bind_int(dpp, binding, P4, ver); sqlite::bind_text(dpp, binding, P5, tag); auto reset = sqlite::stmt_execution{stmt.get()}; sqlite::eval0(dpp, reset); if (!::sqlite3_changes(conn->db.get())) { // VersionNumber/Tag mismatch // our version is no longer consistent, so later writes would fail too impl = nullptr; return -ECANCELED; } } catch (const sqlite::error& e) { ldpp_dout(dpp, 20) << "realm update failed: " << e.what() << dendl; if (e.code() == sqlite::errc::foreign_key_constraint) { return -EINVAL; // refers to nonexistent CurrentPeriod } else if (e.code() == sqlite::errc::busy) { return -EBUSY; } return -EIO; } ++ver; return 0; } int rename(const DoutPrefixProvider* dpp, optional_yield y, RGWRealm& info, std::string_view new_name) override { Prefix prefix{*dpp, "dbconfig:sqlite:realm_rename "}; dpp = &prefix; if (!impl) { return -EINVAL; // can't write after conflict or delete } if (realm_id != info.id || realm_name != info.name) { return -EINVAL; // can't modify realm id or name directly } if (new_name.empty()) { ldpp_dout(dpp, 0) << "realm cannot have an empty name" << dendl; return -EINVAL; } try { auto conn = impl->get(dpp); auto& stmt = conn->statements["realm_rename"]; if (!stmt) { const std::string sql = fmt::format(schema::realm_rename4, P1, P2, P3, P4); stmt = sqlite::prepare_statement(dpp, conn->db.get(), sql); } auto binding = sqlite::stmt_binding{stmt.get()}; sqlite::bind_text(dpp, binding, P1, realm_id); sqlite::bind_text(dpp, binding, P2, new_name); sqlite::bind_int(dpp, binding, P3, ver); sqlite::bind_text(dpp, binding, P4, tag); auto reset = sqlite::stmt_execution{stmt.get()}; sqlite::eval0(dpp, reset); if (!::sqlite3_changes(conn->db.get())) { // VersionNumber/Tag mismatch impl = nullptr; return -ECANCELED; } } catch (const sqlite::error& e) { ldpp_dout(dpp, 20) << "realm rename failed: " << e.what() << dendl; if (e.code() == sqlite::errc::unique_constraint) { return -EEXIST; // Name already taken } else if (e.code() == sqlite::errc::busy) { return -EBUSY; } return -EIO; } info.name = std::string{new_name}; ++ver; return 0; } int remove(const DoutPrefixProvider* dpp, optional_yield y) override { Prefix prefix{*dpp, "dbconfig:sqlite:realm_remove "}; dpp = &prefix; if (!impl) { return -EINVAL; // can't write after conflict or delete } try { auto conn = impl->get(dpp); auto& stmt = conn->statements["realm_del"]; if (!stmt) { const std::string sql = fmt::format(schema::realm_delete3, P1, P2, P3); stmt = sqlite::prepare_statement(dpp, conn->db.get(), sql); } auto binding = sqlite::stmt_binding{stmt.get()}; sqlite::bind_text(dpp, binding, P1, realm_id); sqlite::bind_int(dpp, binding, P2, ver); sqlite::bind_text(dpp, binding, P3, tag); auto reset = sqlite::stmt_execution{stmt.get()}; sqlite::eval0(dpp, reset); impl = nullptr; // prevent any further writes after delete if (!::sqlite3_changes(conn->db.get())) { return -ECANCELED; // VersionNumber/Tag mismatch } } catch (const sqlite::error& e) { ldpp_dout(dpp, 20) << "realm delete failed: " << e.what() << dendl; if (e.code() == sqlite::errc::busy) { return -EBUSY; } return -EIO; } return 0; } }; // SQLiteRealmWriter int SQLiteConfigStore::write_default_realm_id(const DoutPrefixProvider* dpp, optional_yield y, bool exclusive, std::string_view realm_id) { Prefix prefix{*dpp, "dbconfig:sqlite:write_default_realm_id "}; dpp = &prefix; if (realm_id.empty()) { ldpp_dout(dpp, 0) << "requires a realm id" << dendl; return -EINVAL; } try { auto conn = impl->get(dpp); sqlite::stmt_ptr* stmt = nullptr; if (exclusive) { stmt = &conn->statements["def_realm_ins"]; if (!*stmt) { const std::string sql = fmt::format(schema::default_realm_insert1, P1); *stmt = sqlite::prepare_statement(dpp, conn->db.get(), sql); } } else { stmt = &conn->statements["def_realm_ups"]; if (!*stmt) { const std::string sql = fmt::format(schema::default_realm_upsert1, P1); *stmt = sqlite::prepare_statement(dpp, conn->db.get(), sql); } } auto binding = sqlite::stmt_binding{stmt->get()}; sqlite::bind_text(dpp, binding, P1, realm_id); auto reset = sqlite::stmt_execution{stmt->get()}; sqlite::eval0(dpp, reset); } catch (const sqlite::error& e) { ldpp_dout(dpp, 20) << "default realm insert failed: " << e.what() << dendl; if (e.code() == sqlite::errc::primary_key_constraint) { return -EEXIST; } else if (e.code() == sqlite::errc::busy) { return -EBUSY; } return -EIO; } return 0; } int SQLiteConfigStore::read_default_realm_id(const DoutPrefixProvider* dpp, optional_yield y, std::string& realm_id) { Prefix prefix{*dpp, "dbconfig:sqlite:read_default_realm_id "}; dpp = &prefix; try { auto conn = impl->get(dpp); auto& stmt = conn->statements["def_realm_sel"]; if (!stmt) { static constexpr std::string_view sql = schema::default_realm_select0; stmt = sqlite::prepare_statement(dpp, conn->db.get(), sql); } auto reset = sqlite::stmt_execution{stmt.get()}; sqlite::eval1(dpp, reset); realm_id = sqlite::column_text(reset, 0); } catch (const sqlite::error& e) { ldpp_dout(dpp, 20) << "default realm select failed: " << e.what() << dendl; if (e.code() == sqlite::errc::busy) { return -EBUSY; } return -EIO; } return 0; } int SQLiteConfigStore::delete_default_realm_id(const DoutPrefixProvider* dpp, optional_yield y) { Prefix prefix{*dpp, "dbconfig:sqlite:delete_default_realm_id "}; dpp = &prefix; try { auto conn = impl->get(dpp); auto& stmt = conn->statements["def_realm_del"]; if (!stmt) { static constexpr std::string_view sql = schema::default_realm_delete0; stmt = sqlite::prepare_statement(dpp, conn->db.get(), sql); } auto reset = sqlite::stmt_execution{stmt.get()}; sqlite::eval0(dpp, reset); if (!::sqlite3_changes(conn->db.get())) { return -ENOENT; } } catch (const sqlite::error& e) { ldpp_dout(dpp, 20) << "default realm delete failed: " << e.what() << dendl; if (e.code() == sqlite::errc::busy) { return -EBUSY; } return -EIO; } return 0; } int SQLiteConfigStore::create_realm(const DoutPrefixProvider* dpp, optional_yield y, bool exclusive, const RGWRealm& info, std::unique_ptr<sal::RealmWriter>* writer) { Prefix prefix{*dpp, "dbconfig:sqlite:create_realm "}; dpp = &prefix; if (info.id.empty()) { ldpp_dout(dpp, 0) << "realm cannot have an empty id" << dendl; return -EINVAL; } if (info.name.empty()) { ldpp_dout(dpp, 0) << "realm cannot have an empty name" << dendl; return -EINVAL; } int ver = 1; auto tag = generate_version_tag(dpp->get_cct()); try { auto conn = impl->get(dpp); sqlite::stmt_ptr* stmt = nullptr; if (exclusive) { stmt = &conn->statements["realm_ins"]; if (!*stmt) { const std::string sql = fmt::format(schema::realm_insert4, P1, P2, P3, P4); *stmt = sqlite::prepare_statement(dpp, conn->db.get(), sql); } } else { stmt = &conn->statements["realm_ups"]; if (!*stmt) { const std::string sql = fmt::format(schema::realm_upsert4, P1, P2, P3, P4); *stmt = sqlite::prepare_statement(dpp, conn->db.get(), sql); } } auto binding = sqlite::stmt_binding{stmt->get()}; sqlite::bind_text(dpp, binding, P1, info.id); sqlite::bind_text(dpp, binding, P2, info.name); sqlite::bind_int(dpp, binding, P3, ver); sqlite::bind_text(dpp, binding, P4, tag); auto reset = sqlite::stmt_execution{stmt->get()}; sqlite::eval0(dpp, reset); } catch (const sqlite::error& e) { ldpp_dout(dpp, 20) << "realm insert failed: " << e.what() << dendl; if (e.code() == sqlite::errc::primary_key_constraint) { return -EEXIST; // ID already taken } else if (e.code() == sqlite::errc::unique_constraint) { return -EEXIST; // Name already taken } else if (e.code() == sqlite::errc::busy) { return -EBUSY; } return -EIO; } if (writer) { *writer = std::make_unique<SQLiteRealmWriter>( impl.get(), ver, std::move(tag), info.id, info.name); } return 0; } int SQLiteConfigStore::read_realm_by_id(const DoutPrefixProvider* dpp, optional_yield y, std::string_view realm_id, RGWRealm& info, std::unique_ptr<sal::RealmWriter>* writer) { Prefix prefix{*dpp, "dbconfig:sqlite:read_realm_by_id "}; dpp = &prefix; if (realm_id.empty()) { ldpp_dout(dpp, 0) << "requires a realm id" << dendl; return -EINVAL; } RealmRow row; try { auto conn = impl->get(dpp); auto& stmt = conn->statements["realm_sel_id"]; if (!stmt) { const std::string sql = fmt::format(schema::realm_select_id1, P1); stmt = sqlite::prepare_statement(dpp, conn->db.get(), sql); } auto binding = sqlite::stmt_binding{stmt.get()}; sqlite::bind_text(dpp, binding, P1, realm_id); auto reset = sqlite::stmt_execution{stmt.get()}; sqlite::eval1(dpp, reset); read_realm_row(reset, row); } catch (const buffer::error& e) { ldpp_dout(dpp, 20) << "realm decode failed: " << e.what() << dendl; return -EIO; } catch (const sqlite::error& e) { ldpp_dout(dpp, 20) << "realm select failed: " << e.what() << dendl; if (e.code() == sqlite::errc::done) { return -ENOENT; } else if (e.code() == sqlite::errc::busy) { return -EBUSY; } return -EIO; } info = std::move(row.info); if (writer) { *writer = std::make_unique<SQLiteRealmWriter>( impl.get(), row.ver, std::move(row.tag), info.id, info.name); } return 0; } static void realm_select_by_name(const DoutPrefixProvider* dpp, sqlite::Connection& conn, std::string_view realm_name, RealmRow& row) { auto& stmt = conn.statements["realm_sel_name"]; if (!stmt) { const std::string sql = fmt::format(schema::realm_select_name1, P1); stmt = sqlite::prepare_statement(dpp, conn.db.get(), sql); } auto binding = sqlite::stmt_binding{stmt.get()}; sqlite::bind_text(dpp, binding, P1, realm_name); auto reset = sqlite::stmt_execution{stmt.get()}; sqlite::eval1(dpp, reset); read_realm_row(reset, row); } int SQLiteConfigStore::read_realm_by_name(const DoutPrefixProvider* dpp, optional_yield y, std::string_view realm_name, RGWRealm& info, std::unique_ptr<sal::RealmWriter>* writer) { Prefix prefix{*dpp, "dbconfig:sqlite:read_realm_by_name "}; dpp = &prefix; if (realm_name.empty()) { ldpp_dout(dpp, 0) << "requires a realm name" << dendl; return -EINVAL; } RealmRow row; try { auto conn = impl->get(dpp); realm_select_by_name(dpp, *conn, realm_name, row); } catch (const buffer::error& e) { ldpp_dout(dpp, 20) << "realm decode failed: " << e.what() << dendl; return -EIO; } catch (const sqlite::error& e) { ldpp_dout(dpp, 20) << "realm select failed: " << e.what() << dendl; if (e.code() == sqlite::errc::done) { return -ENOENT; } else if (e.code() == sqlite::errc::busy) { return -EBUSY; } return -EIO; } info = std::move(row.info); if (writer) { *writer = std::make_unique<SQLiteRealmWriter>( impl.get(), row.ver, std::move(row.tag), info.id, info.name); } return 0; } int SQLiteConfigStore::read_default_realm(const DoutPrefixProvider* dpp, optional_yield y, RGWRealm& info, std::unique_ptr<sal::RealmWriter>* writer) { Prefix prefix{*dpp, "dbconfig:sqlite:read_default_realm "}; dpp = &prefix; RealmRow row; try { auto conn = impl->get(dpp); auto& stmt = conn->statements["realm_sel_def"]; if (!stmt) { static constexpr std::string_view sql = schema::realm_select_default0; stmt = sqlite::prepare_statement(dpp, conn->db.get(), sql); } auto reset = sqlite::stmt_execution{stmt.get()}; sqlite::eval1(dpp, reset); read_realm_row(reset, row); } catch (const buffer::error& e) { ldpp_dout(dpp, 20) << "realm decode failed: " << e.what() << dendl; return -EIO; } catch (const sqlite::error& e) { ldpp_dout(dpp, 20) << "realm select failed: " << e.what() << dendl; if (e.code() == sqlite::errc::done) { return -ENOENT; } else if (e.code() == sqlite::errc::busy) { return -EBUSY; } return -EIO; } info = std::move(row.info); if (writer) { *writer = std::make_unique<SQLiteRealmWriter>( impl.get(), row.ver, std::move(row.tag), info.id, info.name); } return 0; } int SQLiteConfigStore::read_realm_id(const DoutPrefixProvider* dpp, optional_yield y, std::string_view realm_name, std::string& realm_id) { Prefix prefix{*dpp, "dbconfig:sqlite:read_realm_id "}; dpp = &prefix; if (realm_name.empty()) { ldpp_dout(dpp, 0) << "requires a realm name" << dendl; return -EINVAL; } try { auto conn = impl->get(dpp); RealmRow row; realm_select_by_name(dpp, *conn, realm_name, row); realm_id = std::move(row.info.id); } catch (const buffer::error& e) { ldpp_dout(dpp, 20) << "realm decode failed: " << e.what() << dendl; return -EIO; } catch (const sqlite::error& e) { ldpp_dout(dpp, 20) << "realm select failed: " << e.what() << dendl; if (e.code() == sqlite::errc::done) { return -ENOENT; } else if (e.code() == sqlite::errc::busy) { return -EBUSY; } return -EIO; } return 0; } int SQLiteConfigStore::realm_notify_new_period(const DoutPrefixProvider* dpp, optional_yield y, const RGWPeriod& period) { return -ENOTSUP; } int SQLiteConfigStore::list_realm_names(const DoutPrefixProvider* dpp, optional_yield y, const std::string& marker, std::span<std::string> entries, sal::ListResult<std::string>& result) { Prefix prefix{*dpp, "dbconfig:sqlite:list_realm_names "}; dpp = &prefix; try { auto conn = impl->get(dpp); auto& stmt = conn->statements["realm_sel_names"]; if (!stmt) { const std::string sql = fmt::format(schema::realm_select_names2, P1, P2); stmt = sqlite::prepare_statement(dpp, conn->db.get(), sql); } auto binding = sqlite::stmt_binding{stmt.get()}; sqlite::bind_text(dpp, binding, P1, marker); sqlite::bind_int(dpp, binding, P2, entries.size()); auto reset = sqlite::stmt_execution{stmt.get()}; read_text_rows(dpp, reset, entries, result); } catch (const sqlite::error& e) { ldpp_dout(dpp, 20) << "realm select failed: " << e.what() << dendl; if (e.code() == sqlite::errc::busy) { return -EBUSY; } return -EIO; } return 0; } // Period int SQLiteConfigStore::create_period(const DoutPrefixProvider* dpp, optional_yield y, bool exclusive, const RGWPeriod& info) { Prefix prefix{*dpp, "dbconfig:sqlite:create_period "}; dpp = &prefix; if (info.id.empty()) { ldpp_dout(dpp, 0) << "period cannot have an empty id" << dendl; return -EINVAL; } bufferlist bl; encode(info, bl); const auto data = std::string_view{bl.c_str(), bl.length()}; try { auto conn = impl->get(dpp); sqlite::stmt_ptr* stmt = nullptr; if (exclusive) { stmt = &conn->statements["period_ins"]; if (!*stmt) { const std::string sql = fmt::format(schema::period_insert4, P1, P2, P3, P4); *stmt = sqlite::prepare_statement(dpp, conn->db.get(), sql); } } else { stmt = &conn->statements["period_ups"]; if (!*stmt) { const std::string sql = fmt::format(schema::period_upsert4, P1, P2, P3, P4); *stmt = sqlite::prepare_statement(dpp, conn->db.get(), sql); } } auto binding = sqlite::stmt_binding{stmt->get()}; sqlite::bind_text(dpp, binding, P1, info.id); sqlite::bind_int(dpp, binding, P2, info.epoch); sqlite::bind_text(dpp, binding, P3, info.realm_id); sqlite::bind_text(dpp, binding, P4, data); auto reset = sqlite::stmt_execution{stmt->get()}; sqlite::eval0(dpp, reset); } catch (const sqlite::error& e) { ldpp_dout(dpp, 20) << "period insert failed: " << e.what() << dendl; if (e.code() == sqlite::errc::foreign_key_constraint) { return -EINVAL; // refers to nonexistent RealmID } else if (e.code() == sqlite::errc::busy) { return -EBUSY; } return -EIO; } return 0; } static void period_select_epoch(const DoutPrefixProvider* dpp, sqlite::Connection& conn, std::string_view id, uint32_t epoch, RGWPeriod& row) { auto& stmt = conn.statements["period_sel_epoch"]; if (!stmt) { const std::string sql = fmt::format(schema::period_select_epoch2, P1, P2); stmt = sqlite::prepare_statement(dpp, conn.db.get(), sql); } auto binding = sqlite::stmt_binding{stmt.get()}; sqlite::bind_text(dpp, binding, P1, id); sqlite::bind_int(dpp, binding, P2, epoch); auto reset = sqlite::stmt_execution{stmt.get()}; sqlite::eval1(dpp, reset); read_period_row(reset, row); } static void period_select_latest(const DoutPrefixProvider* dpp, sqlite::Connection& conn, std::string_view id, RGWPeriod& row) { auto& stmt = conn.statements["period_sel_latest"]; if (!stmt) { const std::string sql = fmt::format(schema::period_select_latest1, P1); stmt = sqlite::prepare_statement(dpp, conn.db.get(), sql); } auto binding = sqlite::stmt_binding{stmt.get()}; sqlite::bind_text(dpp, binding, P1, id); auto reset = sqlite::stmt_execution{stmt.get()}; sqlite::eval1(dpp, reset); read_period_row(reset, row); } int SQLiteConfigStore::read_period(const DoutPrefixProvider* dpp, optional_yield y, std::string_view period_id, std::optional<uint32_t> epoch, RGWPeriod& info) { Prefix prefix{*dpp, "dbconfig:sqlite:read_period "}; dpp = &prefix; if (period_id.empty()) { ldpp_dout(dpp, 0) << "requires a period id" << dendl; return -EINVAL; } try { auto conn = impl->get(dpp); if (epoch) { period_select_epoch(dpp, *conn, period_id, *epoch, info); } else { period_select_latest(dpp, *conn, period_id, info); } } catch (const buffer::error& e) { ldpp_dout(dpp, 20) << "period decode failed: " << e.what() << dendl; return -EIO; } catch (const sqlite::error& e) { ldpp_dout(dpp, 20) << "period select failed: " << e.what() << dendl; if (e.code() == sqlite::errc::done) { return -ENOENT; } else if (e.code() == sqlite::errc::busy) { return -EBUSY; } return -EIO; } return 0; } int SQLiteConfigStore::delete_period(const DoutPrefixProvider* dpp, optional_yield y, std::string_view period_id) { Prefix prefix{*dpp, "dbconfig:sqlite:delete_period "}; dpp = &prefix; if (period_id.empty()) { ldpp_dout(dpp, 0) << "requires a period id" << dendl; return -EINVAL; } try { auto conn = impl->get(dpp); auto& stmt = conn->statements["period_del"]; if (!stmt) { const std::string sql = fmt::format(schema::period_delete1, P1); stmt = sqlite::prepare_statement(dpp, conn->db.get(), sql); } auto binding = sqlite::stmt_binding{stmt.get()}; sqlite::bind_text(dpp, binding, P1, period_id); auto reset = sqlite::stmt_execution{stmt.get()}; sqlite::eval0(dpp, reset); if (!::sqlite3_changes(conn->db.get())) { return -ENOENT; } } catch (const sqlite::error& e) { ldpp_dout(dpp, 20) << "period delete failed: " << e.what() << dendl; if (e.code() == sqlite::errc::busy) { return -EBUSY; } return -EIO; } return 0; } int SQLiteConfigStore::list_period_ids(const DoutPrefixProvider* dpp, optional_yield y, const std::string& marker, std::span<std::string> entries, sal::ListResult<std::string>& result) { Prefix prefix{*dpp, "dbconfig:sqlite:list_period_ids "}; dpp = &prefix; try { auto conn = impl->get(dpp); auto& stmt = conn->statements["period_sel_ids"]; if (!stmt) { const std::string sql = fmt::format(schema::period_select_ids2, P1, P2); stmt = sqlite::prepare_statement(dpp, conn->db.get(), sql); } auto binding = sqlite::stmt_binding{stmt.get()}; sqlite::bind_text(dpp, binding, P1, marker); sqlite::bind_int(dpp, binding, P2, entries.size()); auto reset = sqlite::stmt_execution{stmt.get()}; read_text_rows(dpp, reset, entries, result); } catch (const sqlite::error& e) { ldpp_dout(dpp, 20) << "period select failed: " << e.what() << dendl; if (e.code() == sqlite::errc::busy) { return -EBUSY; } return -EIO; } return 0; } // ZoneGroup class SQLiteZoneGroupWriter : public sal::ZoneGroupWriter { SQLiteImpl* impl; int ver; std::string tag; std::string zonegroup_id; std::string zonegroup_name; public: SQLiteZoneGroupWriter(SQLiteImpl* impl, int ver, std::string tag, std::string_view zonegroup_id, std::string_view zonegroup_name) : impl(impl), ver(ver), tag(std::move(tag)), zonegroup_id(zonegroup_id), zonegroup_name(zonegroup_name) {} int write(const DoutPrefixProvider* dpp, optional_yield y, const RGWZoneGroup& info) override { Prefix prefix{*dpp, "dbconfig:sqlite:zonegroup_write "}; dpp = &prefix; if (!impl) { return -EINVAL; // can't write after conflict or delete } if (zonegroup_id != info.id || zonegroup_name != info.name) { return -EINVAL; // can't modify zonegroup id or name directly } bufferlist bl; encode(info, bl); const auto data = std::string_view{bl.c_str(), bl.length()}; try { auto conn = impl->get(dpp); auto& stmt = conn->statements["zonegroup_upd"]; if (!stmt) { const std::string sql = fmt::format(schema::zonegroup_update5, P1, P2, P3, P4, P5); stmt = sqlite::prepare_statement(dpp, conn->db.get(), sql); } auto binding = sqlite::stmt_binding{stmt.get()}; sqlite::bind_text(dpp, binding, P1, info.id); bind_text_or_null(dpp, binding, P2, info.realm_id); sqlite::bind_text(dpp, binding, P3, data); sqlite::bind_int(dpp, binding, P4, ver); sqlite::bind_text(dpp, binding, P5, tag); auto reset = sqlite::stmt_execution{stmt.get()}; sqlite::eval0(dpp, reset); if (!::sqlite3_changes(conn->db.get())) { // VersionNumber/Tag mismatch impl = nullptr; return -ECANCELED; } } catch (const sqlite::error& e) { ldpp_dout(dpp, 20) << "zonegroup update failed: " << e.what() << dendl; if (e.code() == sqlite::errc::foreign_key_constraint) { return -EINVAL; // refers to nonexistent RealmID } else if (e.code() == sqlite::errc::busy) { return -EBUSY; } return -EIO; } return 0; } int rename(const DoutPrefixProvider* dpp, optional_yield y, RGWZoneGroup& info, std::string_view new_name) override { Prefix prefix{*dpp, "dbconfig:sqlite:zonegroup_rename "}; dpp = &prefix; if (!impl) { return -EINVAL; // can't write after conflict or delete } if (zonegroup_id != info.get_id() || zonegroup_name != info.get_name()) { return -EINVAL; // can't modify zonegroup id or name directly } if (new_name.empty()) { ldpp_dout(dpp, 0) << "zonegroup cannot have an empty name" << dendl; return -EINVAL; } try { auto conn = impl->get(dpp); auto& stmt = conn->statements["zonegroup_rename"]; if (!stmt) { const std::string sql = fmt::format(schema::zonegroup_rename4, P1, P2, P3, P4); stmt = sqlite::prepare_statement(dpp, conn->db.get(), sql); } auto binding = sqlite::stmt_binding{stmt.get()}; sqlite::bind_text(dpp, binding, P1, info.id); sqlite::bind_text(dpp, binding, P2, new_name); sqlite::bind_int(dpp, binding, P3, ver); sqlite::bind_text(dpp, binding, P4, tag); auto reset = sqlite::stmt_execution{stmt.get()}; sqlite::eval0(dpp, reset); if (!::sqlite3_changes(conn->db.get())) { // VersionNumber/Tag mismatch impl = nullptr; return -ECANCELED; } } catch (const sqlite::error& e) { ldpp_dout(dpp, 20) << "zonegroup rename failed: " << e.what() << dendl; if (e.code() == sqlite::errc::unique_constraint) { return -EEXIST; // Name already taken } else if (e.code() == sqlite::errc::busy) { return -EBUSY; } return -EIO; } info.name = std::string{new_name}; return 0; } int remove(const DoutPrefixProvider* dpp, optional_yield y) override { Prefix prefix{*dpp, "dbconfig:sqlite:zonegroup_remove "}; dpp = &prefix; if (!impl) { return -EINVAL; // can't write after conflict or delete } try { auto conn = impl->get(dpp); auto& stmt = conn->statements["zonegroup_del"]; if (!stmt) { const std::string sql = fmt::format(schema::zonegroup_delete3, P1, P2, P3); stmt = sqlite::prepare_statement(dpp, conn->db.get(), sql); } auto binding = sqlite::stmt_binding{stmt.get()}; sqlite::bind_text(dpp, binding, P1, zonegroup_id); sqlite::bind_int(dpp, binding, P2, ver); sqlite::bind_text(dpp, binding, P3, tag); auto reset = sqlite::stmt_execution{stmt.get()}; sqlite::eval0(dpp, reset); impl = nullptr; if (!::sqlite3_changes(conn->db.get())) { // VersionNumber/Tag mismatch return -ECANCELED; } } catch (const sqlite::error& e) { ldpp_dout(dpp, 20) << "zonegroup delete failed: " << e.what() << dendl; if (e.code() == sqlite::errc::busy) { return -EBUSY; } return -EIO; } return 0; } }; // SQLiteZoneGroupWriter int SQLiteConfigStore::write_default_zonegroup_id(const DoutPrefixProvider* dpp, optional_yield y, bool exclusive, std::string_view realm_id, std::string_view zonegroup_id) { Prefix prefix{*dpp, "dbconfig:sqlite:write_default_zonegroup_id "}; dpp = &prefix; try { auto conn = impl->get(dpp); sqlite::stmt_ptr* stmt = nullptr; if (exclusive) { stmt = &conn->statements["def_zonegroup_ins"]; if (!*stmt) { const std::string sql = fmt::format(schema::default_zonegroup_insert2, P1, P2); *stmt = sqlite::prepare_statement(dpp, conn->db.get(), sql); } } else { stmt = &conn->statements["def_zonegroup_ups"]; if (!*stmt) { const std::string sql = fmt::format(schema::default_zonegroup_upsert2, P1, P2); *stmt = sqlite::prepare_statement(dpp, conn->db.get(), sql); } } auto binding = sqlite::stmt_binding{stmt->get()}; bind_text_or_null(dpp, binding, P1, realm_id); sqlite::bind_text(dpp, binding, P2, zonegroup_id); auto reset = sqlite::stmt_execution{stmt->get()}; sqlite::eval0(dpp, reset); } catch (const sqlite::error& e) { ldpp_dout(dpp, 20) << "default zonegroup insert failed: " << e.what() << dendl; if (e.code() == sqlite::errc::busy) { return -EBUSY; } return -EIO; } return 0; } int SQLiteConfigStore::read_default_zonegroup_id(const DoutPrefixProvider* dpp, optional_yield y, std::string_view realm_id, std::string& zonegroup_id) { Prefix prefix{*dpp, "dbconfig:sqlite:read_default_zonegroup_id "}; dpp = &prefix; try { auto conn = impl->get(dpp); auto& stmt = conn->statements["def_zonegroup_sel"]; if (!stmt) { const std::string sql = fmt::format(schema::default_zonegroup_select1, P1); stmt = sqlite::prepare_statement(dpp, conn->db.get(), sql); } auto binding = sqlite::stmt_binding{stmt.get()}; bind_text_or_null(dpp, binding, P1, realm_id); auto reset = sqlite::stmt_execution{stmt.get()}; sqlite::eval1(dpp, reset); zonegroup_id = sqlite::column_text(reset, 0); } catch (const sqlite::error& e) { ldpp_dout(dpp, 20) << "default zonegroup select failed: " << e.what() << dendl; if (e.code() == sqlite::errc::done) { return -ENOENT; } else if (e.code() == sqlite::errc::busy) { return -EBUSY; } return -EIO; } return 0; } int SQLiteConfigStore::delete_default_zonegroup_id(const DoutPrefixProvider* dpp, optional_yield y, std::string_view realm_id) { Prefix prefix{*dpp, "dbconfig:sqlite:delete_default_zonegroup_id "}; dpp = &prefix; try { auto conn = impl->get(dpp); auto& stmt = conn->statements["def_zonegroup_del"]; if (!stmt) { const std::string sql = fmt::format(schema::default_zonegroup_delete1, P1); stmt = sqlite::prepare_statement(dpp, conn->db.get(), sql); } auto binding = sqlite::stmt_binding{stmt.get()}; bind_text_or_null(dpp, binding, P1, realm_id); auto reset = sqlite::stmt_execution{stmt.get()}; sqlite::eval0(dpp, reset); if (!::sqlite3_changes(conn->db.get())) { return -ENOENT; } } catch (const sqlite::error& e) { ldpp_dout(dpp, 20) << "default zonegroup delete failed: " << e.what() << dendl; if (e.code() == sqlite::errc::busy) { return -EBUSY; } return -EIO; } return 0; } int SQLiteConfigStore::create_zonegroup(const DoutPrefixProvider* dpp, optional_yield y, bool exclusive, const RGWZoneGroup& info, std::unique_ptr<sal::ZoneGroupWriter>* writer) { Prefix prefix{*dpp, "dbconfig:sqlite:create_zonegroup "}; dpp = &prefix; if (info.id.empty()) { ldpp_dout(dpp, 0) << "zonegroup cannot have an empty id" << dendl; return -EINVAL; } if (info.name.empty()) { ldpp_dout(dpp, 0) << "zonegroup cannot have an empty name" << dendl; return -EINVAL; } int ver = 1; auto tag = generate_version_tag(dpp->get_cct()); bufferlist bl; encode(info, bl); const auto data = std::string_view{bl.c_str(), bl.length()}; try { auto conn = impl->get(dpp); sqlite::stmt_ptr* stmt = nullptr; if (exclusive) { stmt = &conn->statements["zonegroup_ins"]; if (!*stmt) { const std::string sql = fmt::format(schema::zonegroup_insert6, P1, P2, P3, P4, P5, P6); *stmt = sqlite::prepare_statement(dpp, conn->db.get(), sql); } } else { stmt = &conn->statements["zonegroup_ups"]; if (!*stmt) { const std::string sql = fmt::format(schema::zonegroup_upsert6, P1, P2, P3, P4, P5, P6); *stmt = sqlite::prepare_statement(dpp, conn->db.get(), sql); } } auto binding = sqlite::stmt_binding{stmt->get()}; sqlite::bind_text(dpp, binding, P1, info.id); sqlite::bind_text(dpp, binding, P2, info.name); bind_text_or_null(dpp, binding, P3, info.realm_id); sqlite::bind_text(dpp, binding, P4, data); sqlite::bind_int(dpp, binding, P5, ver); sqlite::bind_text(dpp, binding, P6, tag); auto reset = sqlite::stmt_execution{stmt->get()}; sqlite::eval0(dpp, reset); } catch (const sqlite::error& e) { ldpp_dout(dpp, 20) << "zonegroup insert failed: " << e.what() << dendl; if (e.code() == sqlite::errc::foreign_key_constraint) { return -EINVAL; // refers to nonexistent RealmID } else if (e.code() == sqlite::errc::primary_key_constraint) { return -EEXIST; // ID already taken } else if (e.code() == sqlite::errc::unique_constraint) { return -EEXIST; // Name already taken } else if (e.code() == sqlite::errc::busy) { return -EBUSY; } return -EIO; } if (writer) { *writer = std::make_unique<SQLiteZoneGroupWriter>( impl.get(), ver, std::move(tag), info.id, info.name); } return 0; } int SQLiteConfigStore::read_zonegroup_by_id(const DoutPrefixProvider* dpp, optional_yield y, std::string_view zonegroup_id, RGWZoneGroup& info, std::unique_ptr<sal::ZoneGroupWriter>* writer) { Prefix prefix{*dpp, "dbconfig:sqlite:read_zonegroup_by_id "}; dpp = &prefix; if (zonegroup_id.empty()) { ldpp_dout(dpp, 0) << "requires a zonegroup id" << dendl; return -EINVAL; } ZoneGroupRow row; try { auto conn = impl->get(dpp); auto& stmt = conn->statements["zonegroup_sel_id"]; if (!stmt) { const std::string sql = fmt::format(schema::zonegroup_select_id1, P1); stmt = sqlite::prepare_statement(dpp, conn->db.get(), sql); } auto binding = sqlite::stmt_binding{stmt.get()}; sqlite::bind_text(dpp, binding, P1, zonegroup_id); auto reset = sqlite::stmt_execution{stmt.get()}; sqlite::eval1(dpp, reset); read_zonegroup_row(reset, row); } catch (const buffer::error& e) { ldpp_dout(dpp, 20) << "zonegroup decode failed: " << e.what() << dendl; return -EIO; } catch (const sqlite::error& e) { ldpp_dout(dpp, 20) << "zonegroup select failed: " << e.what() << dendl; if (e.code() == sqlite::errc::done) { return -ENOENT; } else if (e.code() == sqlite::errc::busy) { return -EBUSY; } return -EIO; } info = std::move(row.info); if (writer) { *writer = std::make_unique<SQLiteZoneGroupWriter>( impl.get(), row.ver, std::move(row.tag), info.id, info.name); } return 0; } int SQLiteConfigStore::read_zonegroup_by_name(const DoutPrefixProvider* dpp, optional_yield y, std::string_view zonegroup_name, RGWZoneGroup& info, std::unique_ptr<sal::ZoneGroupWriter>* writer) { Prefix prefix{*dpp, "dbconfig:sqlite:read_zonegroup_by_name "}; dpp = &prefix; if (zonegroup_name.empty()) { ldpp_dout(dpp, 0) << "requires a zonegroup name" << dendl; return -EINVAL; } ZoneGroupRow row; try { auto conn = impl->get(dpp); auto& stmt = conn->statements["zonegroup_sel_name"]; if (!stmt) { const std::string sql = fmt::format(schema::zonegroup_select_name1, P1); stmt = sqlite::prepare_statement(dpp, conn->db.get(), sql); } auto binding = sqlite::stmt_binding{stmt.get()}; sqlite::bind_text(dpp, binding, P1, zonegroup_name); auto reset = sqlite::stmt_execution{stmt.get()}; sqlite::eval1(dpp, reset); read_zonegroup_row(reset, row); } catch (const buffer::error& e) { ldpp_dout(dpp, 20) << "zonegroup decode failed: " << e.what() << dendl; return -EIO; } catch (const sqlite::error& e) { ldpp_dout(dpp, 20) << "zonegroup select failed: " << e.what() << dendl; if (e.code() == sqlite::errc::done) { return -ENOENT; } else if (e.code() == sqlite::errc::busy) { return -EBUSY; } return -EIO; } info = std::move(row.info); if (writer) { *writer = std::make_unique<SQLiteZoneGroupWriter>( impl.get(), row.ver, std::move(row.tag), info.id, info.name); } return 0; } int SQLiteConfigStore::read_default_zonegroup(const DoutPrefixProvider* dpp, optional_yield y, std::string_view realm_id, RGWZoneGroup& info, std::unique_ptr<sal::ZoneGroupWriter>* writer) { Prefix prefix{*dpp, "dbconfig:sqlite:read_default_zonegroup "}; dpp = &prefix; ZoneGroupRow row; try { auto conn = impl->get(dpp); auto& stmt = conn->statements["zonegroup_sel_def"]; if (!stmt) { static constexpr std::string_view sql = schema::zonegroup_select_default0; stmt = sqlite::prepare_statement(dpp, conn->db.get(), sql); } auto reset = sqlite::stmt_execution{stmt.get()}; sqlite::eval1(dpp, reset); read_zonegroup_row(reset, row); } catch (const buffer::error& e) { ldpp_dout(dpp, 20) << "zonegroup decode failed: " << e.what() << dendl; return -EIO; } catch (const sqlite::error& e) { ldpp_dout(dpp, 20) << "zonegroup select failed: " << e.what() << dendl; if (e.code() == sqlite::errc::done) { return -ENOENT; } else if (e.code() == sqlite::errc::busy) { return -EBUSY; } return -EIO; } info = std::move(row.info); if (writer) { *writer = std::make_unique<SQLiteZoneGroupWriter>( impl.get(), row.ver, std::move(row.tag), info.id, info.name); } return 0; } int SQLiteConfigStore::list_zonegroup_names(const DoutPrefixProvider* dpp, optional_yield y, const std::string& marker, std::span<std::string> entries, sal::ListResult<std::string>& result) { Prefix prefix{*dpp, "dbconfig:sqlite:list_zonegroup_names "}; dpp = &prefix; try { auto conn = impl->get(dpp); auto& stmt = conn->statements["zonegroup_sel_names"]; if (!stmt) { const std::string sql = fmt::format(schema::zonegroup_select_names2, P1, P2); stmt = sqlite::prepare_statement(dpp, conn->db.get(), sql); } auto binding = sqlite::stmt_binding{stmt.get()}; auto reset = sqlite::stmt_execution{stmt.get()}; sqlite::bind_text(dpp, binding, P1, marker); sqlite::bind_int(dpp, binding, P2, entries.size()); read_text_rows(dpp, reset, entries, result); } catch (const sqlite::error& e) { ldpp_dout(dpp, 20) << "zonegroup select failed: " << e.what() << dendl; if (e.code() == sqlite::errc::busy) { return -EBUSY; } return -EIO; } return 0; } // Zone class SQLiteZoneWriter : public sal::ZoneWriter { SQLiteImpl* impl; int ver; std::string tag; std::string zone_id; std::string zone_name; public: SQLiteZoneWriter(SQLiteImpl* impl, int ver, std::string tag, std::string_view zone_id, std::string_view zone_name) : impl(impl), ver(ver), tag(std::move(tag)), zone_id(zone_id), zone_name(zone_name) {} int write(const DoutPrefixProvider* dpp, optional_yield y, const RGWZoneParams& info) override { Prefix prefix{*dpp, "dbconfig:sqlite:zone_write "}; dpp = &prefix; if (!impl) { return -EINVAL; // can't write after conflict or delete } if (zone_id != info.id || zone_name != info.name) { return -EINVAL; // can't modify zone id or name directly } bufferlist bl; encode(info, bl); const auto data = std::string_view{bl.c_str(), bl.length()}; try { auto conn = impl->get(dpp); auto& stmt = conn->statements["zone_upd"]; if (!stmt) { const std::string sql = fmt::format(schema::zone_update5, P1, P2, P3, P4, P5); stmt = sqlite::prepare_statement(dpp, conn->db.get(), sql); } auto binding = sqlite::stmt_binding{stmt.get()}; sqlite::bind_text(dpp, binding, P1, info.id); bind_text_or_null(dpp, binding, P2, info.realm_id); sqlite::bind_text(dpp, binding, P3, data); sqlite::bind_int(dpp, binding, P4, ver); sqlite::bind_text(dpp, binding, P5, tag); auto reset = sqlite::stmt_execution{stmt.get()}; sqlite::eval0(dpp, reset); if (!::sqlite3_changes(conn->db.get())) { // VersionNumber/Tag mismatch impl = nullptr; return -ECANCELED; } } catch (const sqlite::error& e) { ldpp_dout(dpp, 20) << "zone update failed: " << e.what() << dendl; if (e.code() == sqlite::errc::foreign_key_constraint) { return -EINVAL; // refers to nonexistent RealmID } else if (e.code() == sqlite::errc::busy) { return -EBUSY; } return -EIO; } ++ver; return 0; } int rename(const DoutPrefixProvider* dpp, optional_yield y, RGWZoneParams& info, std::string_view new_name) override { Prefix prefix{*dpp, "dbconfig:sqlite:zone_rename "}; dpp = &prefix; if (!impl) { return -EINVAL; // can't write after conflict or delete } if (zone_id != info.id || zone_name != info.name) { return -EINVAL; // can't modify zone id or name directly } if (new_name.empty()) { ldpp_dout(dpp, 0) << "zonegroup cannot have an empty name" << dendl; return -EINVAL; } try { auto conn = impl->get(dpp); auto& stmt = conn->statements["zone_rename"]; if (!stmt) { const std::string sql = fmt::format(schema::zone_rename4, P1, P2, P2, P3); stmt = sqlite::prepare_statement(dpp, conn->db.get(), sql); } auto binding = sqlite::stmt_binding{stmt.get()}; sqlite::bind_text(dpp, binding, P1, info.id); sqlite::bind_text(dpp, binding, P2, new_name); sqlite::bind_int(dpp, binding, P3, ver); sqlite::bind_text(dpp, binding, P4, tag); auto reset = sqlite::stmt_execution{stmt.get()}; sqlite::eval0(dpp, reset); if (!::sqlite3_changes(conn->db.get())) { // VersionNumber/Tag mismatch impl = nullptr; return -ECANCELED; } } catch (const sqlite::error& e) { ldpp_dout(dpp, 20) << "zone rename failed: " << e.what() << dendl; if (e.code() == sqlite::errc::unique_constraint) { return -EEXIST; // Name already taken } else if (e.code() == sqlite::errc::busy) { return -EBUSY; } return -EIO; } info.name = std::string{new_name}; ++ver; return 0; } int remove(const DoutPrefixProvider* dpp, optional_yield y) override { Prefix prefix{*dpp, "dbconfig:sqlite:zone_remove "}; dpp = &prefix; if (!impl) { return -EINVAL; // can't write after conflict or delete } try { auto conn = impl->get(dpp); auto& stmt = conn->statements["zone_del"]; if (!stmt) { const std::string sql = fmt::format(schema::zone_delete3, P1, P2, P3); stmt = sqlite::prepare_statement(dpp, conn->db.get(), sql); } auto binding = sqlite::stmt_binding{stmt.get()}; sqlite::bind_text(dpp, binding, P1, zone_id); sqlite::bind_int(dpp, binding, P2, ver); sqlite::bind_text(dpp, binding, P3, tag); auto reset = sqlite::stmt_execution{stmt.get()}; sqlite::eval0(dpp, reset); impl = nullptr; if (!::sqlite3_changes(conn->db.get())) { // VersionNumber/Tag mismatch return -ECANCELED; } } catch (const sqlite::error& e) { ldpp_dout(dpp, 20) << "zone delete failed: " << e.what() << dendl; if (e.code() == sqlite::errc::busy) { return -EBUSY; } return -EIO; } return 0; } }; // SQLiteZoneWriter int SQLiteConfigStore::write_default_zone_id(const DoutPrefixProvider* dpp, optional_yield y, bool exclusive, std::string_view realm_id, std::string_view zone_id) { Prefix prefix{*dpp, "dbconfig:sqlite:write_default_zone_id "}; dpp = &prefix; if (zone_id.empty()) { ldpp_dout(dpp, 0) << "requires a zone id" << dendl; return -EINVAL; } try { auto conn = impl->get(dpp); sqlite::stmt_ptr* stmt = nullptr; if (exclusive) { stmt = &conn->statements["def_zone_ins"]; if (!*stmt) { const std::string sql = fmt::format(schema::default_zone_insert2, P1, P2); *stmt = sqlite::prepare_statement(dpp, conn->db.get(), sql); } } else { stmt = &conn->statements["def_zone_ups"]; if (!*stmt) { const std::string sql = fmt::format(schema::default_zone_upsert2, P1, P2); *stmt = sqlite::prepare_statement(dpp, conn->db.get(), sql); } } auto binding = sqlite::stmt_binding{stmt->get()}; bind_text_or_null(dpp, binding, P1, realm_id); sqlite::bind_text(dpp, binding, P2, zone_id); auto reset = sqlite::stmt_execution{stmt->get()}; sqlite::eval0(dpp, reset); } catch (const sqlite::error& e) { ldpp_dout(dpp, 20) << "default zone insert failed: " << e.what() << dendl; if (e.code() == sqlite::errc::busy) { return -EBUSY; } return -EIO; } return 0; } int SQLiteConfigStore::read_default_zone_id(const DoutPrefixProvider* dpp, optional_yield y, std::string_view realm_id, std::string& zone_id) { Prefix prefix{*dpp, "dbconfig:sqlite:read_default_zone_id "}; dpp = &prefix; try { auto conn = impl->get(dpp); auto& stmt = conn->statements["def_zone_sel"]; if (!stmt) { const std::string sql = fmt::format(schema::default_zone_select1, P1); stmt = sqlite::prepare_statement(dpp, conn->db.get(), sql); } auto binding = sqlite::stmt_binding{stmt.get()}; bind_text_or_null(dpp, binding, P1, realm_id); auto reset = sqlite::stmt_execution{stmt.get()}; sqlite::eval1(dpp, reset); zone_id = sqlite::column_text(reset, 0); } catch (const sqlite::error& e) { ldpp_dout(dpp, 20) << "default zone select failed: " << e.what() << dendl; if (e.code() == sqlite::errc::done) { return -ENOENT; } else if (e.code() == sqlite::errc::busy) { return -EBUSY; } return -EIO; } return 0; } int SQLiteConfigStore::delete_default_zone_id(const DoutPrefixProvider* dpp, optional_yield y, std::string_view realm_id) { Prefix prefix{*dpp, "dbconfig:sqlite:delete_default_zone_id "}; dpp = &prefix; try { auto conn = impl->get(dpp); auto& stmt = conn->statements["def_zone_del"]; if (!stmt) { const std::string sql = fmt::format(schema::default_zone_delete1, P1); stmt = sqlite::prepare_statement(dpp, conn->db.get(), sql); } auto binding = sqlite::stmt_binding{stmt.get()}; bind_text_or_null(dpp, binding, P1, realm_id); auto reset = sqlite::stmt_execution{stmt.get()}; sqlite::eval0(dpp, reset); if (!::sqlite3_changes(conn->db.get())) { return -ENOENT; } } catch (const sqlite::error& e) { ldpp_dout(dpp, 20) << "default zone delete failed: " << e.what() << dendl; if (e.code() == sqlite::errc::busy) { return -EBUSY; } return -EIO; } return 0; } int SQLiteConfigStore::create_zone(const DoutPrefixProvider* dpp, optional_yield y, bool exclusive, const RGWZoneParams& info, std::unique_ptr<sal::ZoneWriter>* writer) { Prefix prefix{*dpp, "dbconfig:sqlite:create_zone "}; dpp = &prefix; if (info.id.empty()) { ldpp_dout(dpp, 0) << "zone cannot have an empty id" << dendl; return -EINVAL; } if (info.name.empty()) { ldpp_dout(dpp, 0) << "zone cannot have an empty name" << dendl; return -EINVAL; } int ver = 1; auto tag = generate_version_tag(dpp->get_cct()); bufferlist bl; encode(info, bl); const auto data = std::string_view{bl.c_str(), bl.length()}; try { auto conn = impl->get(dpp); sqlite::stmt_ptr* stmt = nullptr; if (exclusive) { stmt = &conn->statements["zone_ins"]; if (!*stmt) { const std::string sql = fmt::format(schema::zone_insert6, P1, P2, P3, P4, P5, P6); *stmt = sqlite::prepare_statement(dpp, conn->db.get(), sql); } } else { stmt = &conn->statements["zone_ups"]; if (!*stmt) { const std::string sql = fmt::format(schema::zone_upsert6, P1, P2, P3, P4, P5, P6); *stmt = sqlite::prepare_statement(dpp, conn->db.get(), sql); } } auto binding = sqlite::stmt_binding{stmt->get()}; sqlite::bind_text(dpp, binding, P1, info.id); sqlite::bind_text(dpp, binding, P2, info.name); bind_text_or_null(dpp, binding, P3, info.realm_id); sqlite::bind_text(dpp, binding, P4, data); sqlite::bind_int(dpp, binding, P5, ver); sqlite::bind_text(dpp, binding, P6, tag); auto reset = sqlite::stmt_execution{stmt->get()}; sqlite::eval0(dpp, reset); } catch (const sqlite::error& e) { ldpp_dout(dpp, 20) << "zone insert failed: " << e.what() << dendl; if (e.code() == sqlite::errc::foreign_key_constraint) { return -EINVAL; // refers to nonexistent RealmID } else if (e.code() == sqlite::errc::primary_key_constraint) { return -EEXIST; // ID already taken } else if (e.code() == sqlite::errc::unique_constraint) { return -EEXIST; // Name already taken } else if (e.code() == sqlite::errc::busy) { return -EBUSY; } return -EIO; } if (writer) { *writer = std::make_unique<SQLiteZoneWriter>( impl.get(), ver, std::move(tag), info.id, info.name); } return 0; } int SQLiteConfigStore::read_zone_by_id(const DoutPrefixProvider* dpp, optional_yield y, std::string_view zone_id, RGWZoneParams& info, std::unique_ptr<sal::ZoneWriter>* writer) { Prefix prefix{*dpp, "dbconfig:sqlite:read_zone_by_id "}; dpp = &prefix; if (zone_id.empty()) { ldpp_dout(dpp, 0) << "requires a zone id" << dendl; return -EINVAL; } ZoneRow row; try { auto conn = impl->get(dpp); auto& stmt = conn->statements["zone_sel_id"]; if (!stmt) { const std::string sql = fmt::format(schema::zone_select_id1, P1); stmt = sqlite::prepare_statement(dpp, conn->db.get(), sql); } auto binding = sqlite::stmt_binding{stmt.get()}; sqlite::bind_text(dpp, binding, P1, zone_id); auto reset = sqlite::stmt_execution{stmt.get()}; sqlite::eval1(dpp, reset); read_zone_row(reset, row); } catch (const sqlite::error& e) { ldpp_dout(dpp, 20) << "zone select failed: " << e.what() << dendl; if (e.code() == sqlite::errc::done) { return -ENOENT; } else if (e.code() == sqlite::errc::busy) { return -EBUSY; } return -EIO; } info = std::move(row.info); if (writer) { *writer = std::make_unique<SQLiteZoneWriter>( impl.get(), row.ver, std::move(row.tag), info.id, info.name); } return 0; } int SQLiteConfigStore::read_zone_by_name(const DoutPrefixProvider* dpp, optional_yield y, std::string_view zone_name, RGWZoneParams& info, std::unique_ptr<sal::ZoneWriter>* writer) { Prefix prefix{*dpp, "dbconfig:sqlite:read_zone_by_name "}; dpp = &prefix; if (zone_name.empty()) { ldpp_dout(dpp, 0) << "requires a zone name" << dendl; return -EINVAL; } ZoneRow row; try { auto conn = impl->get(dpp); auto& stmt = conn->statements["zone_sel_name"]; if (!stmt) { const std::string sql = fmt::format(schema::zone_select_name1, P1); stmt = sqlite::prepare_statement(dpp, conn->db.get(), sql); } auto binding = sqlite::stmt_binding{stmt.get()}; sqlite::bind_text(dpp, binding, P1, zone_name); auto reset = sqlite::stmt_execution{stmt.get()}; sqlite::eval1(dpp, reset); read_zone_row(reset, row); } catch (const sqlite::error& e) { ldpp_dout(dpp, 20) << "zone select failed: " << e.what() << dendl; if (e.code() == sqlite::errc::done) { return -ENOENT; } else if (e.code() == sqlite::errc::busy) { return -EBUSY; } return -EIO; } info = std::move(row.info); if (writer) { *writer = std::make_unique<SQLiteZoneWriter>( impl.get(), row.ver, std::move(row.tag), info.id, info.name); } return 0; } int SQLiteConfigStore::read_default_zone(const DoutPrefixProvider* dpp, optional_yield y, std::string_view realm_id, RGWZoneParams& info, std::unique_ptr<sal::ZoneWriter>* writer) { Prefix prefix{*dpp, "dbconfig:sqlite:read_default_zone "}; dpp = &prefix; ZoneRow row; try { auto conn = impl->get(dpp); auto& stmt = conn->statements["zone_sel_def"]; if (!stmt) { static constexpr std::string_view sql = schema::zone_select_default0; stmt = sqlite::prepare_statement(dpp, conn->db.get(), sql); } auto reset = sqlite::stmt_execution{stmt.get()}; sqlite::eval1(dpp, reset); read_zone_row(reset, row); } catch (const sqlite::error& e) { ldpp_dout(dpp, 20) << "zone select failed: " << e.what() << dendl; if (e.code() == sqlite::errc::done) { return -ENOENT; } else if (e.code() == sqlite::errc::busy) { return -EBUSY; } return -EIO; } info = std::move(row.info); if (writer) { *writer = std::make_unique<SQLiteZoneWriter>( impl.get(), row.ver, std::move(row.tag), info.id, info.name); } return 0; } int SQLiteConfigStore::list_zone_names(const DoutPrefixProvider* dpp, optional_yield y, const std::string& marker, std::span<std::string> entries, sal::ListResult<std::string>& result) { Prefix prefix{*dpp, "dbconfig:sqlite:list_zone_names "}; dpp = &prefix; try { auto conn = impl->get(dpp); auto& stmt = conn->statements["zone_sel_names"]; if (!stmt) { const std::string sql = fmt::format(schema::zone_select_names2, P1, P2); stmt = sqlite::prepare_statement(dpp, conn->db.get(), sql); } auto binding = sqlite::stmt_binding{stmt.get()}; sqlite::bind_text(dpp, binding, P1, marker); sqlite::bind_int(dpp, binding, P2, entries.size()); auto reset = sqlite::stmt_execution{stmt.get()}; read_text_rows(dpp, reset, entries, result); } catch (const sqlite::error& e) { ldpp_dout(dpp, 20) << "zone select failed: " << e.what() << dendl; if (e.code() == sqlite::errc::busy) { return -EBUSY; } return -EIO; } return 0; } // PeriodConfig int SQLiteConfigStore::read_period_config(const DoutPrefixProvider* dpp, optional_yield y, std::string_view realm_id, RGWPeriodConfig& info) { Prefix prefix{*dpp, "dbconfig:sqlite:read_period_config "}; dpp = &prefix; try { auto conn = impl->get(dpp); auto& stmt = conn->statements["period_conf_sel"]; if (!stmt) { const std::string sql = fmt::format(schema::period_config_select1, P1); stmt = sqlite::prepare_statement(dpp, conn->db.get(), sql); } auto binding = sqlite::stmt_binding{stmt.get()}; sqlite::bind_text(dpp, binding, P1, realm_id); auto reset = sqlite::stmt_execution{stmt.get()}; sqlite::eval1(dpp, reset); std::string data = sqlite::column_text(reset, 0); bufferlist bl = bufferlist::static_from_string(data); auto p = bl.cbegin(); decode(info, p); } catch (const buffer::error& e) { ldpp_dout(dpp, 20) << "period config decode failed: " << e.what() << dendl; return -EIO; } catch (const sqlite::error& e) { ldpp_dout(dpp, 20) << "period config select failed: " << e.what() << dendl; if (e.code() == sqlite::errc::done) { return -ENOENT; } else if (e.code() == sqlite::errc::busy) { return -EBUSY; } return -EIO; } return 0; } int SQLiteConfigStore::write_period_config(const DoutPrefixProvider* dpp, optional_yield y, bool exclusive, std::string_view realm_id, const RGWPeriodConfig& info) { Prefix prefix{*dpp, "dbconfig:sqlite:write_period_config "}; dpp = &prefix; bufferlist bl; encode(info, bl); const auto data = std::string_view{bl.c_str(), bl.length()}; try { auto conn = impl->get(dpp); sqlite::stmt_ptr* stmt = nullptr; if (exclusive) { stmt = &conn->statements["period_conf_ins"]; if (!*stmt) { const std::string sql = fmt::format(schema::period_config_insert2, P1, P2); *stmt = sqlite::prepare_statement(dpp, conn->db.get(), sql); } } else { stmt = &conn->statements["period_conf_ups"]; if (!*stmt) { const std::string sql = fmt::format(schema::period_config_upsert2, P1, P2); *stmt = sqlite::prepare_statement(dpp, conn->db.get(), sql); } } auto binding = sqlite::stmt_binding{stmt->get()}; sqlite::bind_text(dpp, binding, P1, realm_id); sqlite::bind_text(dpp, binding, P2, data); auto reset = sqlite::stmt_execution{stmt->get()}; sqlite::eval0(dpp, reset); } catch (const buffer::error& e) { ldpp_dout(dpp, 20) << "period config decode failed: " << e.what() << dendl; return -EIO; } catch (const sqlite::error& e) { ldpp_dout(dpp, 20) << "period config insert failed: " << e.what() << dendl; if (e.code() == sqlite::errc::primary_key_constraint) { return -EEXIST; } else if (e.code() == sqlite::errc::busy) { return -EBUSY; } return -EIO; } return 0; } namespace { int version_cb(void* user, int count, char** values, char** names) { if (count != 1) { return EINVAL; } std::string_view name = names[0]; if (name != "user_version") { return EINVAL; } std::string_view value = values[0]; auto result = std::from_chars(value.begin(), value.end(), *reinterpret_cast<uint32_t*>(user)); if (result.ec != std::errc{}) { return static_cast<int>(result.ec); } return 0; } void apply_schema_migrations(const DoutPrefixProvider* dpp, sqlite3* db) { sqlite::execute(dpp, db, "PRAGMA foreign_keys = ON", nullptr, nullptr); // initiate a transaction and read the current schema version uint32_t version = 0; sqlite::execute(dpp, db, "BEGIN; PRAGMA user_version", version_cb, &version); const uint32_t initial_version = version; ldpp_dout(dpp, 4) << "current schema version " << version << dendl; // use the version as an index into schema::migrations auto m = std::next(schema::migrations.begin(), version); for (; m != schema::migrations.end(); ++m, ++version) { try { sqlite::execute(dpp, db, m->up, nullptr, nullptr); } catch (const sqlite::error&) { ldpp_dout(dpp, -1) << "ERROR: schema migration failed on v" << version << ": " << m->description << dendl; throw; } } if (version > initial_version) { // update the user_version and commit the transaction const auto commit = fmt::format("PRAGMA user_version = {}; COMMIT", version); sqlite::execute(dpp, db, commit.c_str(), nullptr, nullptr); ldpp_dout(dpp, 4) << "upgraded database schema to version " << version << dendl; } else { // nothing to commit sqlite::execute(dpp, db, "ROLLBACK", nullptr, nullptr); } } } // anonymous namespace auto create_sqlite_store(const DoutPrefixProvider* dpp, const std::string& uri) -> std::unique_ptr<config::SQLiteConfigStore> { Prefix prefix{*dpp, "dbconfig:sqlite:create_sqlite_store "}; dpp = &prefix; // build the connection pool int flags = SQLITE_OPEN_CREATE | SQLITE_OPEN_URI | SQLITE_OPEN_READWRITE | SQLITE_OPEN_NOMUTEX; auto factory = sqlite::ConnectionFactory{uri, flags}; // sqlite does not support concurrent writers. we enforce this limitation by // using a connection pool of size=1 static constexpr size_t max_connections = 1; auto impl = std::make_unique<SQLiteImpl>(std::move(factory), max_connections); // open a connection to apply schema migrations auto conn = impl->get(dpp); apply_schema_migrations(dpp, conn->db.get()); return std::make_unique<SQLiteConfigStore>(std::move(impl)); } } // namespace rgw::dbstore::config
68,857
32.073007
92
cc
null
ceph-main/src/rgw/driver/dbstore/config/sqlite.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab ft=cpp /* * Ceph - scalable distributed file system * * Copyright (C) 2022 Red Hat, Inc. * * This is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License version 2.1, as published by the Free Software * Foundation. See file COPYING. * */ #pragma once #include "rgw_sal_config.h" class DoutPrefixProvider; namespace rgw::dbstore::config { struct SQLiteImpl; class SQLiteConfigStore : public sal::ConfigStore { public: explicit SQLiteConfigStore(std::unique_ptr<SQLiteImpl> impl); ~SQLiteConfigStore() override; int write_default_realm_id(const DoutPrefixProvider* dpp, optional_yield y, bool exclusive, std::string_view realm_id) override; int read_default_realm_id(const DoutPrefixProvider* dpp, optional_yield y, std::string& realm_id) override; int delete_default_realm_id(const DoutPrefixProvider* dpp, optional_yield y) override; int create_realm(const DoutPrefixProvider* dpp, optional_yield y, bool exclusive, const RGWRealm& info, std::unique_ptr<sal::RealmWriter>* writer) override; int read_realm_by_id(const DoutPrefixProvider* dpp, optional_yield y, std::string_view realm_id, RGWRealm& info, std::unique_ptr<sal::RealmWriter>* writer) override; int read_realm_by_name(const DoutPrefixProvider* dpp, optional_yield y, std::string_view realm_name, RGWRealm& info, std::unique_ptr<sal::RealmWriter>* writer) override; int read_default_realm(const DoutPrefixProvider* dpp, optional_yield y, RGWRealm& info, std::unique_ptr<sal::RealmWriter>* writer) override; int read_realm_id(const DoutPrefixProvider* dpp, optional_yield y, std::string_view realm_name, std::string& realm_id) override; int realm_notify_new_period(const DoutPrefixProvider* dpp, optional_yield y, const RGWPeriod& period) override; int list_realm_names(const DoutPrefixProvider* dpp, optional_yield y, const std::string& marker, std::span<std::string> entries, sal::ListResult<std::string>& result) override; int create_period(const DoutPrefixProvider* dpp, optional_yield y, bool exclusive, const RGWPeriod& info) override; int read_period(const DoutPrefixProvider* dpp, optional_yield y, std::string_view period_id, std::optional<uint32_t> epoch, RGWPeriod& info) override; int delete_period(const DoutPrefixProvider* dpp, optional_yield y, std::string_view period_id) override; int list_period_ids(const DoutPrefixProvider* dpp, optional_yield y, const std::string& marker, std::span<std::string> entries, sal::ListResult<std::string>& result) override; int write_default_zonegroup_id(const DoutPrefixProvider* dpp, optional_yield y, bool exclusive, std::string_view realm_id, std::string_view zonegroup_id) override; int read_default_zonegroup_id(const DoutPrefixProvider* dpp, optional_yield y, std::string_view realm_id, std::string& zonegroup_id) override; int delete_default_zonegroup_id(const DoutPrefixProvider* dpp, optional_yield y, std::string_view realm_id) override; int create_zonegroup(const DoutPrefixProvider* dpp, optional_yield y, bool exclusive, const RGWZoneGroup& info, std::unique_ptr<sal::ZoneGroupWriter>* writer) override; int read_zonegroup_by_id(const DoutPrefixProvider* dpp, optional_yield y, std::string_view zonegroup_id, RGWZoneGroup& info, std::unique_ptr<sal::ZoneGroupWriter>* writer) override; int read_zonegroup_by_name(const DoutPrefixProvider* dpp, optional_yield y, std::string_view zonegroup_name, RGWZoneGroup& info, std::unique_ptr<sal::ZoneGroupWriter>* writer) override; int read_default_zonegroup(const DoutPrefixProvider* dpp, optional_yield y, std::string_view realm_id, RGWZoneGroup& info, std::unique_ptr<sal::ZoneGroupWriter>* writer) override; int list_zonegroup_names(const DoutPrefixProvider* dpp, optional_yield y, const std::string& marker, std::span<std::string> entries, sal::ListResult<std::string>& result) override; int write_default_zone_id(const DoutPrefixProvider* dpp, optional_yield y, bool exclusive, std::string_view realm_id, std::string_view zone_id) override; int read_default_zone_id(const DoutPrefixProvider* dpp, optional_yield y, std::string_view realm_id, std::string& zone_id) override; int delete_default_zone_id(const DoutPrefixProvider* dpp, optional_yield y, std::string_view realm_id) override; int create_zone(const DoutPrefixProvider* dpp, optional_yield y, bool exclusive, const RGWZoneParams& info, std::unique_ptr<sal::ZoneWriter>* writer) override; int read_zone_by_id(const DoutPrefixProvider* dpp, optional_yield y, std::string_view zone_id, RGWZoneParams& info, std::unique_ptr<sal::ZoneWriter>* writer) override; int read_zone_by_name(const DoutPrefixProvider* dpp, optional_yield y, std::string_view zone_name, RGWZoneParams& info, std::unique_ptr<sal::ZoneWriter>* writer) override; int read_default_zone(const DoutPrefixProvider* dpp, optional_yield y, std::string_view realm_id, RGWZoneParams& info, std::unique_ptr<sal::ZoneWriter>* writer) override; int list_zone_names(const DoutPrefixProvider* dpp, optional_yield y, const std::string& marker, std::span<std::string> entries, sal::ListResult<std::string>& result) override; int read_period_config(const DoutPrefixProvider* dpp, optional_yield y, std::string_view realm_id, RGWPeriodConfig& info) override; int write_period_config(const DoutPrefixProvider* dpp, optional_yield y, bool exclusive, std::string_view realm_id, const RGWPeriodConfig& info) override; private: std::unique_ptr<SQLiteImpl> impl; }; // SQLiteConfigStore auto create_sqlite_store(const DoutPrefixProvider* dpp, const std::string& uri) -> std::unique_ptr<config::SQLiteConfigStore>; } // namespace rgw::dbstore::config
8,134
46.023121
85
h
null
ceph-main/src/rgw/driver/dbstore/config/sqlite_schema.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab ft=cpp /* * Ceph - scalable distributed file system * * Copyright (C) 2022 Red Hat, Inc. * * This is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License version 2.1, as published by the Free Software * Foundation. See file COPYING. * */ #pragma once #include <initializer_list> namespace rgw::dbstore::config::schema { struct Migration { // human-readable description to help with debugging migration errors const char* description = nullptr; // series of sql statements to apply the schema migration const char* up = nullptr; // series of sql statements to undo the schema migration const char* down = nullptr; }; static constexpr std::initializer_list<Migration> migrations {{ .description = "create the initial ConfigStore tables", .up = R"( CREATE TABLE IF NOT EXISTS Realms ( ID TEXT PRIMARY KEY NOT NULL, Name TEXT UNIQUE NOT NULL, CurrentPeriod TEXT, Epoch INTEGER DEFAULT 0, VersionNumber INTEGER, VersionTag TEXT ); CREATE TABLE IF NOT EXISTS Periods ( ID TEXT NOT NULL, Epoch INTEGER DEFAULT 0, RealmID TEXT NOT NULL REFERENCES Realms (ID), Data TEXT NOT NULL, PRIMARY KEY (ID, Epoch) ); CREATE TABLE IF NOT EXISTS PeriodConfigs ( RealmID TEXT PRIMARY KEY NOT NULL REFERENCES Realms (ID), Data TEXT NOT NULL ); CREATE TABLE IF NOT EXISTS ZoneGroups ( ID TEXT PRIMARY KEY NOT NULL, Name TEXT UNIQUE NOT NULL, RealmID TEXT REFERENCES Realms (ID), Data TEXT NOT NULL, VersionNumber INTEGER, VersionTag TEXT ); CREATE TABLE IF NOT EXISTS Zones ( ID TEXT PRIMARY KEY NOT NULL, Name TEXT UNIQUE NOT NULL, RealmID TEXT REFERENCES Realms (ID), Data TEXT NOT NULL, VersionNumber INTEGER, VersionTag TEXT ); CREATE TABLE IF NOT EXISTS DefaultRealms ( ID TEXT, Empty TEXT PRIMARY KEY ); CREATE TABLE IF NOT EXISTS DefaultZoneGroups ( ID TEXT, RealmID TEXT PRIMARY KEY REFERENCES Realms (ID) ); CREATE TABLE IF NOT EXISTS DefaultZones ( ID TEXT, RealmID TEXT PRIMARY KEY REFERENCES Realms (ID) ); )", .down = R"( DROP TABLE IF EXISTS Realms; DROP TABLE IF EXISTS Periods; DROP TABLE IF EXISTS PeriodConfigs; DROP TABLE IF EXISTS ZoneGroups; DROP TABLE IF EXISTS Zones; DROP TABLE IF EXISTS DefaultRealms; DROP TABLE IF EXISTS DefaultZoneGroups; DROP TABLE IF EXISTS DefaultZones; )" } }; // DefaultRealms static constexpr const char* default_realm_insert1 = "INSERT INTO DefaultRealms (ID, Empty) VALUES ({}, '')"; static constexpr const char* default_realm_upsert1 = R"(INSERT INTO DefaultRealms (ID, Empty) VALUES ({0}, '') ON CONFLICT(Empty) DO UPDATE SET ID = {0})"; static constexpr const char* default_realm_select0 = "SELECT ID FROM DefaultRealms LIMIT 1"; static constexpr const char* default_realm_delete0 = "DELETE FROM DefaultRealms"; // Realms static constexpr const char* realm_update5 = "UPDATE Realms SET CurrentPeriod = {1}, Epoch = {2}, VersionNumber = {3} + 1 \ WHERE ID = {0} AND VersionNumber = {3} AND VersionTag = {4}"; static constexpr const char* realm_rename4 = "UPDATE Realms SET Name = {1}, VersionNumber = {2} + 1 \ WHERE ID = {0} AND VersionNumber = {2} AND VersionTag = {3}"; static constexpr const char* realm_delete3 = "DELETE FROM Realms WHERE ID = {} AND VersionNumber = {} AND VersionTag = {}"; static constexpr const char* realm_insert4 = "INSERT INTO Realms (ID, Name, VersionNumber, VersionTag) \ VALUES ({}, {}, {}, {})"; static constexpr const char* realm_upsert4 = "INSERT INTO Realms (ID, Name, VersionNumber, VersionTag) \ VALUES ({0}, {1}, {2}, {3}) \ ON CONFLICT(ID) DO UPDATE SET Name = {1}, \ VersionNumber = {2}, VersionTag = {3}"; static constexpr const char* realm_select_id1 = "SELECT * FROM Realms WHERE ID = {} LIMIT 1"; static constexpr const char* realm_select_name1 = "SELECT * FROM Realms WHERE Name = {} LIMIT 1"; static constexpr const char* realm_select_default0 = "SELECT r.* FROM Realms r \ INNER JOIN DefaultRealms d \ ON d.ID = r.ID LIMIT 1"; static constexpr const char* realm_select_names2 = "SELECT Name FROM Realms WHERE Name > {} \ ORDER BY Name ASC LIMIT {}"; // Periods static constexpr const char* period_insert4 = "INSERT INTO Periods (ID, Epoch, RealmID, Data) \ VALUES ({}, {}, {}, {})"; static constexpr const char* period_upsert4 = "INSERT INTO Periods (ID, Epoch, RealmID, Data) \ VALUES ({0}, {1}, {2}, {3}) \ ON CONFLICT DO UPDATE SET RealmID = {2}, Data = {3}"; static constexpr const char* period_select_epoch2 = "SELECT * FROM Periods WHERE ID = {} AND Epoch = {} LIMIT 1"; static constexpr const char* period_select_latest1 = "SELECT * FROM Periods WHERE ID = {} ORDER BY Epoch DESC LIMIT 1"; static constexpr const char* period_delete1 = "DELETE FROM Periods WHERE ID = {}"; static constexpr const char* period_select_ids2 = "SELECT ID FROM Periods WHERE ID > {} ORDER BY ID ASC LIMIT {}"; // DefaultZoneGroups static constexpr const char* default_zonegroup_insert2 = "INSERT INTO DefaultZoneGroups (RealmID, ID) VALUES ({}, {})"; static constexpr const char* default_zonegroup_upsert2 = "INSERT INTO DefaultZoneGroups (RealmID, ID) \ VALUES ({0}, {1}) \ ON CONFLICT(RealmID) DO UPDATE SET ID = {1}"; static constexpr const char* default_zonegroup_select1 = "SELECT ID FROM DefaultZoneGroups WHERE RealmID = {}"; static constexpr const char* default_zonegroup_delete1 = "DELETE FROM DefaultZoneGroups WHERE RealmID = {}"; // ZoneGroups static constexpr const char* zonegroup_update5 = "UPDATE ZoneGroups SET RealmID = {1}, Data = {2}, VersionNumber = {3} + 1 \ WHERE ID = {0} AND VersionNumber = {3} AND VersionTag = {4}"; static constexpr const char* zonegroup_rename4 = "UPDATE ZoneGroups SET Name = {1}, VersionNumber = {2} + 1 \ WHERE ID = {0} AND VersionNumber = {2} AND VersionTag = {3}"; static constexpr const char* zonegroup_delete3 = "DELETE FROM ZoneGroups WHERE ID = {} \ AND VersionNumber = {} AND VersionTag = {}"; static constexpr const char* zonegroup_insert6 = "INSERT INTO ZoneGroups (ID, Name, RealmID, Data, VersionNumber, VersionTag) \ VALUES ({}, {}, {}, {}, {}, {})"; static constexpr const char* zonegroup_upsert6 = "INSERT INTO ZoneGroups (ID, Name, RealmID, Data, VersionNumber, VersionTag) \ VALUES ({0}, {1}, {2}, {3}, {4}, {5}) \ ON CONFLICT (ID) DO UPDATE SET Name = {1}, RealmID = {2}, \ Data = {3}, VersionNumber = {4}, VersionTag = {5}"; static constexpr const char* zonegroup_select_id1 = "SELECT * FROM ZoneGroups WHERE ID = {} LIMIT 1"; static constexpr const char* zonegroup_select_name1 = "SELECT * FROM ZoneGroups WHERE Name = {} LIMIT 1"; static constexpr const char* zonegroup_select_default0 = "SELECT z.* FROM ZoneGroups z \ INNER JOIN DefaultZoneGroups d \ ON d.ID = z.ID LIMIT 1"; static constexpr const char* zonegroup_select_names2 = "SELECT Name FROM ZoneGroups WHERE Name > {} \ ORDER BY Name ASC LIMIT {}"; // DefaultZones static constexpr const char* default_zone_insert2 = "INSERT INTO DefaultZones (RealmID, ID) VALUES ({}, {})"; static constexpr const char* default_zone_upsert2 = "INSERT INTO DefaultZones (RealmID, ID) VALUES ({0}, {1}) \ ON CONFLICT(RealmID) DO UPDATE SET ID = {1}"; static constexpr const char* default_zone_select1 = "SELECT ID FROM DefaultZones WHERE RealmID = {}"; static constexpr const char* default_zone_delete1 = "DELETE FROM DefaultZones WHERE RealmID = {}"; // Zones static constexpr const char* zone_update5 = "UPDATE Zones SET RealmID = {1}, Data = {2}, VersionNumber = {3} + 1 \ WHERE ID = {0} AND VersionNumber = {3} AND VersionTag = {4}"; static constexpr const char* zone_rename4 = "UPDATE Zones SET Name = {1}, VersionNumber = {2} + 1 \ WHERE ID = {0} AND VersionNumber = {2} AND VersionTag = {3}"; static constexpr const char* zone_delete3 = "DELETE FROM Zones WHERE ID = {} AND VersionNumber = {} AND VersionTag = {}"; static constexpr const char* zone_insert6 = "INSERT INTO Zones (ID, Name, RealmID, Data, VersionNumber, VersionTag) \ VALUES ({}, {}, {}, {}, {}, {})"; static constexpr const char* zone_upsert6 = "INSERT INTO Zones (ID, Name, RealmID, Data, VersionNumber, VersionTag) \ VALUES ({0}, {1}, {2}, {3}, {4}, {5}) \ ON CONFLICT (ID) DO UPDATE SET Name = {1}, RealmID = {2}, \ Data = {3}, VersionNumber = {4}, VersionTag = {5}"; static constexpr const char* zone_select_id1 = "SELECT * FROM Zones WHERE ID = {} LIMIT 1"; static constexpr const char* zone_select_name1 = "SELECT * FROM Zones WHERE Name = {} LIMIT 1"; static constexpr const char* zone_select_default0 = "SELECT z.* FROM Zones z \ INNER JOIN DefaultZones d \ ON d.ID = z.ID LIMIT 1"; static constexpr const char* zone_select_names2 = "SELECT Name FROM Zones WHERE Name > {} \ ORDER BY Name ASC LIMIT {}"; // PeriodConfigs static constexpr const char* period_config_insert2 = "INSERT INTO PeriodConfigs (RealmID, Data) VALUES ({}, {})"; static constexpr const char* period_config_upsert2 = "INSERT INTO PeriodConfigs (RealmID, Data) VALUES ({0}, {1}) \ ON CONFLICT (RealmID) DO UPDATE SET Data = {1}"; static constexpr const char* period_config_select1 = "SELECT Data FROM PeriodConfigs WHERE RealmID = {} LIMIT 1"; } // namespace rgw::dbstore::config::schema
9,244
29.816667
78
h
null
ceph-main/src/rgw/driver/dbstore/config/store.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab ft=cpp /* * Ceph - scalable distributed file system * * Copyright (C) 2022 Red Hat, Inc. * * This is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License version 2.1, as published by the Free Software * Foundation. See file COPYING. * */ #include <stdexcept> #include <fmt/format.h> #include "store.h" #ifdef SQLITE_ENABLED #include "sqlite.h" #endif namespace rgw::dbstore { auto create_config_store(const DoutPrefixProvider* dpp, const std::string& uri) -> std::unique_ptr<sal::ConfigStore> { #ifdef SQLITE_ENABLED if (uri.starts_with("file:")) { return config::create_sqlite_store(dpp, uri); } #endif throw std::runtime_error(fmt::format("unrecognized URI {}", uri)); } } // namespace rgw::dbstore
892
21.897436
79
cc
null
ceph-main/src/rgw/driver/dbstore/config/store.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab ft=cpp /* * Ceph - scalable distributed file system * * Copyright (C) 2022 Red Hat, Inc. * * This is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License version 2.1, as published by the Free Software * Foundation. See file COPYING. * */ #pragma once #include <memory> #include "rgw_sal_config.h" namespace rgw::dbstore { // ConfigStore factory auto create_config_store(const DoutPrefixProvider* dpp, const std::string& uri) -> std::unique_ptr<sal::ConfigStore>; } // namespace rgw::dbstore
671
23
79
h
null
ceph-main/src/rgw/driver/dbstore/sqlite/connection.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab ft=cpp /* * Ceph - scalable distributed file system * * Copyright (C) 2022 Red Hat, Inc. * * This is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License version 2.1, as published by the Free Software * Foundation. See file COPYING. * */ #include "common/dout.h" #include "connection.h" #include "error.h" namespace rgw::dbstore::sqlite { db_ptr open_database(const char* filename, int flags) { sqlite3* db = nullptr; const int result = ::sqlite3_open_v2(filename, &db, flags, nullptr); if (result != SQLITE_OK) { throw std::system_error(result, sqlite::error_category()); } // request extended result codes (void) ::sqlite3_extended_result_codes(db, 1); return db_ptr{db}; } } // namespace rgw::dbstore::sqlite
907
24.942857
70
cc
null
ceph-main/src/rgw/driver/dbstore/sqlite/connection.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab ft=cpp /* * Ceph - scalable distributed file system * * Copyright (C) 2022 Red Hat, Inc. * * This is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License version 2.1, as published by the Free Software * Foundation. See file COPYING. * */ #pragma once #include <memory> #include <sqlite3.h> #include <fmt/format.h> #include "sqlite/statement.h" class DoutPrefixProvider; namespace rgw::dbstore::sqlite { // owning sqlite3 pointer struct db_deleter { void operator()(sqlite3* p) const { ::sqlite3_close(p); } }; using db_ptr = std::unique_ptr<sqlite3, db_deleter>; // open the database file or throw on error db_ptr open_database(const char* filename, int flags); struct Connection { db_ptr db; // map of statements, prepared on first use std::map<std::string_view, stmt_ptr> statements; explicit Connection(db_ptr db) : db(std::move(db)) {} }; // sqlite connection factory for ConnectionPool class ConnectionFactory { std::string uri; int flags; public: ConnectionFactory(std::string uri, int flags) : uri(std::move(uri)), flags(flags) {} auto operator()(const DoutPrefixProvider* dpp) -> std::unique_ptr<Connection> { auto db = open_database(uri.c_str(), flags); return std::make_unique<Connection>(std::move(db)); } }; } // namespace rgw::dbstore::sqlite
1,485
21.861538
70
h
null
ceph-main/src/rgw/driver/dbstore/sqlite/error.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab ft=cpp /* * Ceph - scalable distributed file system * * Copyright (C) 2022 Red Hat, Inc. * * This is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License version 2.1, as published by the Free Software * Foundation. See file COPYING. * */ #include "error.h" namespace rgw::dbstore::sqlite { const std::error_category& error_category() { struct category : std::error_category { const char* name() const noexcept override { return "dbstore:sqlite"; } std::string message(int ev) const override { return ::sqlite3_errstr(ev); } std::error_condition default_error_condition(int code) const noexcept override { return {code & 0xFF, category()}; } }; static category instance; return instance; } } // namespace rgw::dbstore::sqlite
952
24.078947
84
cc
null
ceph-main/src/rgw/driver/dbstore/sqlite/error.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab ft=cpp /* * Ceph - scalable distributed file system * * Copyright (C) 2022 Red Hat, Inc. * * This is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License version 2.1, as published by the Free Software * Foundation. See file COPYING. * */ #pragma once #include <system_error> #include <sqlite3.h> namespace rgw::dbstore::sqlite { // error category for sqlite extended result codes: // https://www.sqlite.org/rescode.html const std::error_category& error_category(); // sqlite exception type that carries the extended error code and message class error : public std::runtime_error { std::error_code ec; public: error(const char* errmsg, std::error_code ec) : runtime_error(errmsg), ec(ec) {} error(sqlite3* db, std::error_code ec) : error(::sqlite3_errmsg(db), ec) {} error(sqlite3* db, int result) : error(db, {result, error_category()}) {} error(sqlite3* db) : error(db, ::sqlite3_extended_errcode(db)) {} std::error_code code() const { return ec; } }; // sqlite error conditions for primary and extended result codes // // 'primary' error_conditions will match 'primary' error_codes as well as any // 'extended' error_codes whose lowest 8 bits match that primary code. for // example, the error_condition for SQLITE_CONSTRAINT will match the error_codes // SQLITE_CONSTRAINT and SQLITE_CONSTRAINT_* enum class errc { // primary result codes ok = SQLITE_OK, busy = SQLITE_BUSY, constraint = SQLITE_CONSTRAINT, row = SQLITE_ROW, done = SQLITE_DONE, // extended result codes primary_key_constraint = SQLITE_CONSTRAINT_PRIMARYKEY, foreign_key_constraint = SQLITE_CONSTRAINT_FOREIGNKEY, unique_constraint = SQLITE_CONSTRAINT_UNIQUE, // ..add conditions as needed }; inline std::error_code make_error_code(errc e) { return {static_cast<int>(e), error_category()}; } inline std::error_condition make_error_condition(errc e) { return {static_cast<int>(e), error_category()}; } } // namespace rgw::dbstore::sqlite namespace std { // enable implicit conversions from sqlite::errc to std::error_condition template<> struct is_error_condition_enum< rgw::dbstore::sqlite::errc> : public true_type {}; } // namespace std
2,348
27.646341
80
h
null
ceph-main/src/rgw/driver/dbstore/sqlite/sqliteDB.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include "sqliteDB.h" using namespace std; #define SQL_PREPARE(dpp, params, sdb, stmt, ret, Op) \ do { \ string schema; \ schema = Schema(params); \ sqlite3_prepare_v2 (*sdb, schema.c_str(), \ -1, &stmt , NULL); \ if (!stmt) { \ ldpp_dout(dpp, 0) <<"failed to prepare statement " \ <<"for Op("<<Op<<"); Errmsg -"\ <<sqlite3_errmsg(*sdb)<< dendl;\ ret = -1; \ goto out; \ } \ ldpp_dout(dpp, 20)<<"Successfully Prepared stmt for Op("<<Op \ <<") schema("<<schema<<") stmt("<<stmt<<")"<< dendl; \ ret = 0; \ } while(0); #define SQL_BIND_INDEX(dpp, stmt, index, str, sdb) \ do { \ index = sqlite3_bind_parameter_index(stmt, str); \ \ if (index <=0) { \ ldpp_dout(dpp, 0) <<"failed to fetch bind parameter"\ " index for str("<<str<<") in " \ <<"stmt("<<stmt<<"); Errmsg -" \ <<sqlite3_errmsg(*sdb)<< dendl; \ rc = -1; \ goto out; \ } \ ldpp_dout(dpp, 20)<<"Bind parameter index for str(" \ <<str<<") in stmt("<<stmt<<") is " \ <<index<< dendl; \ }while(0); #define SQL_BIND_TEXT(dpp, stmt, index, str, sdb) \ do { \ rc = sqlite3_bind_text(stmt, index, str, -1, SQLITE_TRANSIENT); \ if (rc != SQLITE_OK) { \ ldpp_dout(dpp, 0)<<"sqlite bind text failed for index(" \ <<index<<"), str("<<str<<") in stmt(" \ <<stmt<<"); Errmsg - "<<sqlite3_errmsg(*sdb) \ << dendl; \ rc = -1; \ goto out; \ } \ ldpp_dout(dpp, 20)<<"Bind parameter text for index(" \ <<index<<") in stmt("<<stmt<<") is " \ <<str<< dendl; \ }while(0); #define SQL_BIND_INT(dpp, stmt, index, num, sdb) \ do { \ rc = sqlite3_bind_int(stmt, index, num); \ \ if (rc != SQLITE_OK) { \ ldpp_dout(dpp, 0)<<"sqlite bind int failed for index(" \ <<index<<"), num("<<num<<") in stmt(" \ <<stmt<<"); Errmsg - "<<sqlite3_errmsg(*sdb) \ << dendl; \ rc = -1; \ goto out; \ } \ ldpp_dout(dpp, 20)<<"Bind parameter int for index(" \ <<index<<") in stmt("<<stmt<<") is " \ <<num<< dendl; \ }while(0); #define SQL_BIND_BLOB(dpp, stmt, index, blob, size, sdb) \ do { \ rc = sqlite3_bind_blob(stmt, index, blob, size, SQLITE_TRANSIENT); \ \ if (rc != SQLITE_OK) { \ ldpp_dout(dpp, 0)<<"sqlite bind blob failed for index(" \ <<index<<"), blob("<<blob<<") in stmt(" \ <<stmt<<"); Errmsg - "<<sqlite3_errmsg(*sdb) \ << dendl; \ rc = -1; \ goto out; \ } \ }while(0); #define SQL_ENCODE_BLOB_PARAM(dpp, stmt, index, param, sdb) \ do { \ bufferlist b; \ encode(param, b); \ SQL_BIND_BLOB(dpp, stmt, index, b.c_str(), b.length(), sdb); \ }while(0); #define SQL_READ_BLOB(dpp, stmt, index, void_ptr, len) \ do { \ void_ptr = NULL; \ void_ptr = (void *)sqlite3_column_blob(stmt, index); \ len = sqlite3_column_bytes(stmt, index); \ \ if (!void_ptr || len == 0) { \ ldpp_dout(dpp, 20)<<"Null value for blob index(" \ <<index<<") in stmt("<<stmt<<") "<< dendl; \ } \ }while(0); #define SQL_DECODE_BLOB_PARAM(dpp, stmt, index, param, sdb) \ do { \ bufferlist b; \ void *blob; \ int blob_len = 0; \ \ SQL_READ_BLOB(dpp, stmt, index, blob, blob_len); \ \ b.append(reinterpret_cast<char *>(blob), blob_len); \ \ decode(param, b); \ }while(0); #define SQL_EXECUTE(dpp, params, stmt, cbk, args...) \ do{ \ const std::lock_guard<std::mutex> lk(((DBOp*)(this))->mtx); \ if (!stmt) { \ ret = Prepare(dpp, params); \ } \ \ if (!stmt) { \ ldpp_dout(dpp, 0) <<"No prepared statement "<< dendl; \ goto out; \ } \ \ ret = Bind(dpp, params); \ if (ret) { \ ldpp_dout(dpp, 0) <<"Bind parameters failed for stmt(" <<stmt<<") "<< dendl; \ goto out; \ } \ \ ret = Step(dpp, params->op, stmt, cbk); \ \ Reset(dpp, stmt); \ \ if (ret) { \ ldpp_dout(dpp, 0) <<"Execution failed for stmt(" <<stmt<<")"<< dendl; \ goto out; \ } \ }while(0); int SQLiteDB::InitPrepareParams(const DoutPrefixProvider *dpp, DBOpPrepareParams &p_params, DBOpParams* params) { std::string bucket; if (!params) return -1; if (params->user_table.empty()) { params->user_table = getUserTable(); } if (params->user_table.empty()) { params->user_table = getUserTable(); } if (params->bucket_table.empty()) { params->bucket_table = getBucketTable(); } if (params->quota_table.empty()) { params->quota_table = getQuotaTable(); } if (params->lc_entry_table.empty()) { params->lc_entry_table = getLCEntryTable(); } if (params->lc_head_table.empty()) { params->lc_head_table = getLCHeadTable(); } p_params.user_table = params->user_table; p_params.bucket_table = params->bucket_table; p_params.quota_table = params->quota_table; p_params.lc_entry_table = params->lc_entry_table; p_params.lc_head_table = params->lc_head_table; p_params.op.query_str = params->op.query_str; bucket = params->op.bucket.info.bucket.name; if (!bucket.empty()) { if (params->object_table.empty()) { params->object_table = getObjectTable(bucket); } if (params->objectdata_table.empty()) { params->objectdata_table = getObjectDataTable(bucket); } if (params->object_view.empty()) { params->object_view = getObjectView(bucket); } if (params->object_trigger.empty()) { params->object_trigger = getObjectTrigger(bucket); } p_params.object_table = params->object_table; p_params.objectdata_table = params->objectdata_table; p_params.object_view = params->object_view; } return 0; } static int list_callback(void *None, int argc, char **argv, char **aname) { int i; for(i=0; i < argc; i++) { string arg = argv[i] ? argv[i] : "NULL"; cout<<aname[i]<<" = "<<arg<<"\n"; } return 0; } enum GetUser { UserID = 0, Tenant, NS, DisplayName, UserEmail, AccessKeysID, AccessKeysSecret, AccessKeys, SwiftKeys, SubUsers, Suspended, MaxBuckets, OpMask, UserCaps, Admin, System, PlacementName, PlacementStorageClass, PlacementTags, BucketQuota, TempURLKeys, UserQuota, TYPE, MfaIDs, AssumedRoleARN, UserAttrs, UserVersion, UserVersionTag, }; enum GetBucket { BucketName = 0, Bucket_Tenant, //Tenant Marker, BucketID, Size, SizeRounded, CreationTime, Count, Bucket_PlacementName, Bucket_PlacementStorageClass, OwnerID, Flags, Zonegroup, HasInstanceObj, Quota, RequesterPays, HasWebsite, WebsiteConf, SwiftVersioning, SwiftVerLocation, MdsearchConfig, NewBucketInstanceID, ObjectLock, SyncPolicyInfoGroups, BucketAttrs, BucketVersion, BucketVersionTag, Mtime, Bucket_User_NS }; enum GetObject { ObjName, ObjInstance, ObjNS, ObjBucketName, ACLs, IndexVer, Tag, ObjFlags, VersionedEpoch, ObjCategory, Etag, Owner, OwnerDisplayName, StorageClass, Appendable, ContentType, IndexHashSource, ObjSize, AccountedSize, ObjMtime, Epoch, ObjTag, TailTag, WriteTag, FakeTag, ShadowObj, HasData, IsVersioned, VersionNum, PGVer, ZoneShortID, ObjVersion, ObjVersionTag, ObjAttrs, HeadSize, MaxHeadSize, ObjID, TailInstance, HeadPlacementRuleName, HeadPlacementRuleStorageClass, TailPlacementRuleName, TailPlacementStorageClass, ManifestPartObjs, ManifestPartRules, Omap, IsMultipart, MPPartsList, HeadData, Versions }; enum GetObjectData { ObjDataName, ObjDataInstance, ObjDataNS, ObjDataBucketName, ObjDataID, MultipartPartStr, PartNum, Offset, ObjDataSize, ObjDataMtime, ObjData }; enum GetLCEntry { LCEntryIndex, LCEntryBucketName, LCEntryStartTime, LCEntryStatus }; enum GetLCHead { LCHeadIndex, LCHeadMarker, LCHeadStartDate }; static int list_user(const DoutPrefixProvider *dpp, DBOpInfo &op, sqlite3_stmt *stmt) { if (!stmt) return -1; op.user.uinfo.user_id.tenant = (const char*)sqlite3_column_text(stmt, Tenant); op.user.uinfo.user_id.id = (const char*)sqlite3_column_text(stmt, UserID); op.user.uinfo.user_id.ns = (const char*)sqlite3_column_text(stmt, NS); op.user.uinfo.display_name = (const char*)sqlite3_column_text(stmt, DisplayName); // user_name op.user.uinfo.user_email = (const char*)sqlite3_column_text(stmt, UserEmail); SQL_DECODE_BLOB_PARAM(dpp, stmt, SwiftKeys, op.user.uinfo.swift_keys, sdb); SQL_DECODE_BLOB_PARAM(dpp, stmt, SubUsers, op.user.uinfo.subusers, sdb); SQL_DECODE_BLOB_PARAM(dpp, stmt, AccessKeys, op.user.uinfo.access_keys, sdb); op.user.uinfo.suspended = sqlite3_column_int(stmt, Suspended); op.user.uinfo.max_buckets = sqlite3_column_int(stmt, MaxBuckets); op.user.uinfo.op_mask = sqlite3_column_int(stmt, OpMask); SQL_DECODE_BLOB_PARAM(dpp, stmt, UserCaps, op.user.uinfo.caps, sdb); op.user.uinfo.admin = sqlite3_column_int(stmt, Admin); op.user.uinfo.system = sqlite3_column_int(stmt, System); op.user.uinfo.default_placement.name = (const char*)sqlite3_column_text(stmt, PlacementName); op.user.uinfo.default_placement.storage_class = (const char*)sqlite3_column_text(stmt, PlacementStorageClass); SQL_DECODE_BLOB_PARAM(dpp, stmt, PlacementTags, op.user.uinfo.placement_tags, sdb); SQL_DECODE_BLOB_PARAM(dpp, stmt, BucketQuota, op.user.uinfo.quota.bucket_quota, sdb); SQL_DECODE_BLOB_PARAM(dpp, stmt, TempURLKeys, op.user.uinfo.temp_url_keys, sdb); SQL_DECODE_BLOB_PARAM(dpp, stmt, UserQuota, op.user.uinfo.quota.user_quota, sdb); op.user.uinfo.type = sqlite3_column_int(stmt, TYPE); SQL_DECODE_BLOB_PARAM(dpp, stmt, MfaIDs, op.user.uinfo.mfa_ids, sdb); SQL_DECODE_BLOB_PARAM(dpp, stmt, UserAttrs, op.user.user_attrs, sdb); op.user.user_version.ver = sqlite3_column_int(stmt, UserVersion); op.user.user_version.tag = (const char*)sqlite3_column_text(stmt, UserVersionTag); return 0; } static int list_bucket(const DoutPrefixProvider *dpp, DBOpInfo &op, sqlite3_stmt *stmt) { if (!stmt) return -1; op.bucket.ent.bucket.name = (const char*)sqlite3_column_text(stmt, BucketName); op.bucket.ent.bucket.tenant = (const char*)sqlite3_column_text(stmt, Bucket_Tenant); op.bucket.ent.bucket.marker = (const char*)sqlite3_column_text(stmt, Marker); op.bucket.ent.bucket.bucket_id = (const char*)sqlite3_column_text(stmt, BucketID); op.bucket.ent.size = sqlite3_column_int(stmt, Size); op.bucket.ent.size_rounded = sqlite3_column_int(stmt, SizeRounded); SQL_DECODE_BLOB_PARAM(dpp, stmt, CreationTime, op.bucket.ent.creation_time, sdb); op.bucket.ent.count = sqlite3_column_int(stmt, Count); op.bucket.ent.placement_rule.name = (const char*)sqlite3_column_text(stmt, Bucket_PlacementName); op.bucket.ent.placement_rule.storage_class = (const char*)sqlite3_column_text(stmt, Bucket_PlacementStorageClass); op.bucket.info.bucket = op.bucket.ent.bucket; op.bucket.info.placement_rule = op.bucket.ent.placement_rule; op.bucket.info.creation_time = op.bucket.ent.creation_time; op.bucket.info.owner.id = (const char*)sqlite3_column_text(stmt, OwnerID); op.bucket.info.owner.tenant = op.bucket.ent.bucket.tenant; if (op.name == "GetBucket") { op.bucket.info.owner.ns = (const char*)sqlite3_column_text(stmt, Bucket_User_NS); } op.bucket.info.flags = sqlite3_column_int(stmt, Flags); op.bucket.info.zonegroup = (const char*)sqlite3_column_text(stmt, Zonegroup); op.bucket.info.has_instance_obj = sqlite3_column_int(stmt, HasInstanceObj); SQL_DECODE_BLOB_PARAM(dpp, stmt, Quota, op.bucket.info.quota, sdb); op.bucket.info.requester_pays = sqlite3_column_int(stmt, RequesterPays); op.bucket.info.has_website = sqlite3_column_int(stmt, HasWebsite); SQL_DECODE_BLOB_PARAM(dpp, stmt, WebsiteConf, op.bucket.info.website_conf, sdb); op.bucket.info.swift_versioning = sqlite3_column_int(stmt, SwiftVersioning); op.bucket.info.swift_ver_location = (const char*)sqlite3_column_text(stmt, SwiftVerLocation); SQL_DECODE_BLOB_PARAM(dpp, stmt, MdsearchConfig, op.bucket.info.mdsearch_config, sdb); op.bucket.info.new_bucket_instance_id = (const char*)sqlite3_column_text(stmt, NewBucketInstanceID); SQL_DECODE_BLOB_PARAM(dpp, stmt, ObjectLock, op.bucket.info.obj_lock, sdb); SQL_DECODE_BLOB_PARAM(dpp, stmt, SyncPolicyInfoGroups, op.bucket.info.sync_policy, sdb); SQL_DECODE_BLOB_PARAM(dpp, stmt, BucketAttrs, op.bucket.bucket_attrs, sdb); op.bucket.bucket_version.ver = sqlite3_column_int(stmt, BucketVersion); op.bucket.bucket_version.tag = (const char*)sqlite3_column_text(stmt, BucketVersionTag); /* Read bucket version into info.objv_tracker.read_ver. No need * to set write_ver as its not used anywhere. Still keeping its * value same as read_ver */ op.bucket.info.objv_tracker.read_version = op.bucket.bucket_version; op.bucket.info.objv_tracker.write_version = op.bucket.bucket_version; SQL_DECODE_BLOB_PARAM(dpp, stmt, Mtime, op.bucket.mtime, sdb); op.bucket.list_entries.push_back(op.bucket.ent); return 0; } static int list_object(const DoutPrefixProvider *dpp, DBOpInfo &op, sqlite3_stmt *stmt) { if (!stmt) return -1; //cout<<sqlite3_column_text(stmt, 0)<<", "; //cout<<sqlite3_column_text(stmt, 1) << "\n"; op.obj.state.exists = true; op.obj.state.obj.key.name = (const char*)sqlite3_column_text(stmt, ObjName); op.bucket.info.bucket.name = (const char*)sqlite3_column_text(stmt, ObjBucketName); op.obj.state.obj.key.instance = (const char*)sqlite3_column_text(stmt, ObjInstance); op.obj.state.obj.key.ns = (const char*)sqlite3_column_text(stmt, ObjNS); SQL_DECODE_BLOB_PARAM(dpp, stmt, ACLs, op.obj.acls, sdb); op.obj.index_ver = sqlite3_column_int(stmt, IndexVer); op.obj.tag = (const char*)sqlite3_column_text(stmt, Tag); op.obj.flags = sqlite3_column_int(stmt, ObjFlags); op.obj.versioned_epoch = sqlite3_column_int(stmt, VersionedEpoch); op.obj.category = (RGWObjCategory)sqlite3_column_int(stmt, ObjCategory); op.obj.etag = (const char*)sqlite3_column_text(stmt, Etag); op.obj.owner = (const char*)sqlite3_column_text(stmt, Owner); op.obj.owner_display_name = (const char*)sqlite3_column_text(stmt, OwnerDisplayName); op.obj.storage_class = (const char*)sqlite3_column_text(stmt, StorageClass); op.obj.appendable = sqlite3_column_int(stmt, Appendable); op.obj.content_type = (const char*)sqlite3_column_text(stmt, ContentType); op.obj.state.obj.index_hash_source = (const char*)sqlite3_column_text(stmt, IndexHashSource); op.obj.state.size = sqlite3_column_int(stmt, ObjSize); op.obj.state.accounted_size = sqlite3_column_int(stmt, AccountedSize); SQL_DECODE_BLOB_PARAM(dpp, stmt, ObjMtime, op.obj.state.mtime, sdb); op.obj.state.epoch = sqlite3_column_int(stmt, Epoch); SQL_DECODE_BLOB_PARAM(dpp, stmt, ObjTag, op.obj.state.obj_tag, sdb); SQL_DECODE_BLOB_PARAM(dpp, stmt, TailTag, op.obj.state.tail_tag, sdb); op.obj.state.write_tag = (const char*)sqlite3_column_text(stmt, WriteTag); op.obj.state.fake_tag = sqlite3_column_int(stmt, FakeTag); op.obj.state.shadow_obj = (const char*)sqlite3_column_text(stmt, ShadowObj); op.obj.state.has_data = sqlite3_column_int(stmt, HasData); op.obj.is_versioned = sqlite3_column_int(stmt, IsVersioned); op.obj.version_num = sqlite3_column_int(stmt, VersionNum); op.obj.state.pg_ver = sqlite3_column_int(stmt, PGVer); op.obj.state.zone_short_id = sqlite3_column_int(stmt, ZoneShortID); op.obj.state.objv_tracker.read_version.ver = sqlite3_column_int(stmt, ObjVersion); op.obj.state.objv_tracker.read_version.tag = (const char*)sqlite3_column_text(stmt, ObjVersionTag); SQL_DECODE_BLOB_PARAM(dpp, stmt, ObjAttrs, op.obj.state.attrset, sdb); op.obj.head_size = sqlite3_column_int(stmt, HeadSize); op.obj.max_head_size = sqlite3_column_int(stmt, MaxHeadSize); op.obj.obj_id = (const char*)sqlite3_column_text(stmt, ObjID); op.obj.tail_instance = (const char*)sqlite3_column_text(stmt, TailInstance); op.obj.head_placement_rule.name = (const char*)sqlite3_column_text(stmt, HeadPlacementRuleName); op.obj.head_placement_rule.storage_class = (const char*)sqlite3_column_text(stmt, HeadPlacementRuleStorageClass); op.obj.tail_placement.placement_rule.name = (const char*)sqlite3_column_text(stmt, TailPlacementRuleName); op.obj.tail_placement.placement_rule.storage_class = (const char*)sqlite3_column_text(stmt, TailPlacementStorageClass); SQL_DECODE_BLOB_PARAM(dpp, stmt, ManifestPartObjs, op.obj.objs, sdb); SQL_DECODE_BLOB_PARAM(dpp, stmt, ManifestPartRules, op.obj.rules, sdb); SQL_DECODE_BLOB_PARAM(dpp, stmt, Omap, op.obj.omap, sdb); op.obj.is_multipart = sqlite3_column_int(stmt, IsMultipart); SQL_DECODE_BLOB_PARAM(dpp, stmt, MPPartsList, op.obj.mp_parts, sdb); SQL_DECODE_BLOB_PARAM(dpp, stmt, HeadData, op.obj.head_data, sdb); op.obj.state.data = op.obj.head_data; rgw_bucket_dir_entry dent; dent.key.name = op.obj.state.obj.key.name; dent.key.instance = op.obj.state.obj.key.instance; dent.tag = op.obj.tag; dent.flags = op.obj.flags; dent.versioned_epoch = op.obj.versioned_epoch; dent.index_ver = op.obj.index_ver; dent.exists = true; dent.meta.category = op.obj.category; dent.meta.size = op.obj.state.size; dent.meta.accounted_size = op.obj.state.accounted_size; dent.meta.mtime = op.obj.state.mtime; dent.meta.etag = op.obj.etag; dent.meta.owner = op.obj.owner; dent.meta.owner_display_name = op.obj.owner_display_name; dent.meta.content_type = op.obj.content_type; dent.meta.storage_class = op.obj.storage_class; dent.meta.appendable = op.obj.appendable; op.obj.list_entries.push_back(dent); return 0; } static int get_objectdata(const DoutPrefixProvider *dpp, DBOpInfo &op, sqlite3_stmt *stmt) { if (!stmt) return -1; op.obj.state.obj.key.name = (const char*)sqlite3_column_text(stmt, ObjName); op.bucket.info.bucket.name = (const char*)sqlite3_column_text(stmt, ObjBucketName); op.obj.state.obj.key.instance = (const char*)sqlite3_column_text(stmt, ObjInstance); op.obj.state.obj.key.ns = (const char*)sqlite3_column_text(stmt, ObjNS); op.obj.obj_id = (const char*)sqlite3_column_text(stmt, ObjDataID); op.obj_data.part_num = sqlite3_column_int(stmt, PartNum); op.obj_data.offset = sqlite3_column_int(stmt, Offset); op.obj_data.size = sqlite3_column_int(stmt, ObjDataSize); op.obj_data.multipart_part_str = (const char*)sqlite3_column_text(stmt, MultipartPartStr); SQL_DECODE_BLOB_PARAM(dpp, stmt, ObjDataMtime, op.obj.state.mtime, sdb); SQL_DECODE_BLOB_PARAM(dpp, stmt, ObjData, op.obj_data.data, sdb); return 0; } static int list_lc_entry(const DoutPrefixProvider *dpp, DBOpInfo &op, sqlite3_stmt *stmt) { if (!stmt) return -1; op.lc_entry.index = (const char*)sqlite3_column_text(stmt, LCEntryIndex); op.lc_entry.entry.set_bucket((const char*)sqlite3_column_text(stmt, LCEntryBucketName)); op.lc_entry.entry.set_start_time(sqlite3_column_int(stmt, LCEntryStartTime)); op.lc_entry.entry.set_status(sqlite3_column_int(stmt, LCEntryStatus)); op.lc_entry.list_entries.push_back(op.lc_entry.entry); return 0; } static int list_lc_head(const DoutPrefixProvider *dpp, DBOpInfo &op, sqlite3_stmt *stmt) { if (!stmt) return -1; int64_t start_date; op.lc_head.index = (const char*)sqlite3_column_text(stmt, LCHeadIndex); op.lc_head.head.set_marker((const char*)sqlite3_column_text(stmt, LCHeadMarker)); SQL_DECODE_BLOB_PARAM(dpp, stmt, LCHeadStartDate, start_date, sdb); op.lc_head.head.get_start_date() = start_date; return 0; } int SQLiteDB::InitializeDBOps(const DoutPrefixProvider *dpp) { (void)createTables(dpp); dbops.InsertUser = make_shared<SQLInsertUser>(&this->db, this->getDBname(), cct); dbops.RemoveUser = make_shared<SQLRemoveUser>(&this->db, this->getDBname(), cct); dbops.GetUser = make_shared<SQLGetUser>(&this->db, this->getDBname(), cct); dbops.InsertBucket = make_shared<SQLInsertBucket>(&this->db, this->getDBname(), cct); dbops.UpdateBucket = make_shared<SQLUpdateBucket>(&this->db, this->getDBname(), cct); dbops.RemoveBucket = make_shared<SQLRemoveBucket>(&this->db, this->getDBname(), cct); dbops.GetBucket = make_shared<SQLGetBucket>(&this->db, this->getDBname(), cct); dbops.ListUserBuckets = make_shared<SQLListUserBuckets>(&this->db, this->getDBname(), cct); dbops.InsertLCEntry = make_shared<SQLInsertLCEntry>(&this->db, this->getDBname(), cct); dbops.RemoveLCEntry = make_shared<SQLRemoveLCEntry>(&this->db, this->getDBname(), cct); dbops.GetLCEntry = make_shared<SQLGetLCEntry>(&this->db, this->getDBname(), cct); dbops.ListLCEntries = make_shared<SQLListLCEntries>(&this->db, this->getDBname(), cct); dbops.InsertLCHead = make_shared<SQLInsertLCHead>(&this->db, this->getDBname(), cct); dbops.RemoveLCHead = make_shared<SQLRemoveLCHead>(&this->db, this->getDBname(), cct); dbops.GetLCHead = make_shared<SQLGetLCHead>(&this->db, this->getDBname(), cct); return 0; } void *SQLiteDB::openDB(const DoutPrefixProvider *dpp) { string dbname; int rc = 0; dbname = getDBfile(); if (dbname.empty()) { ldpp_dout(dpp, 0)<<"dbname is NULL" << dendl; goto out; } rc = sqlite3_open_v2(dbname.c_str(), (sqlite3**)&db, SQLITE_OPEN_READWRITE | SQLITE_OPEN_CREATE | SQLITE_OPEN_FULLMUTEX, NULL); if (rc) { ldpp_dout(dpp, 0) <<"Cant open "<<dbname<<"; Errmsg - "\ <<sqlite3_errmsg((sqlite3*)db) << dendl; } else { ldpp_dout(dpp, 0) <<"Opened database("<<dbname<<") successfully" << dendl; } exec(dpp, "PRAGMA foreign_keys=ON", NULL); out: return db; } int SQLiteDB::closeDB(const DoutPrefixProvider *dpp) { if (db) sqlite3_close((sqlite3 *)db); db = NULL; return 0; } int SQLiteDB::Reset(const DoutPrefixProvider *dpp, sqlite3_stmt *stmt) { int ret = -1; if (!stmt) { return -1; } sqlite3_clear_bindings(stmt); ret = sqlite3_reset(stmt); return ret; } int SQLiteDB::Step(const DoutPrefixProvider *dpp, DBOpInfo &op, sqlite3_stmt *stmt, int (*cbk)(const DoutPrefixProvider *dpp, DBOpInfo &op, sqlite3_stmt *stmt)) { int ret = -1; if (!stmt) { return -1; } again: ret = sqlite3_step(stmt); if ((ret != SQLITE_DONE) && (ret != SQLITE_ROW)) { ldpp_dout(dpp, 0)<<"sqlite step failed for stmt("<<stmt \ <<"); Errmsg - "<<sqlite3_errmsg((sqlite3*)db) << dendl; return -1; } else if (ret == SQLITE_ROW) { if (cbk) { (*cbk)(dpp, op, stmt); } else { } goto again; } ldpp_dout(dpp, 20)<<"sqlite step successfully executed for stmt(" \ <<stmt<<") ret = " << ret << dendl; return 0; } int SQLiteDB::exec(const DoutPrefixProvider *dpp, const char *schema, int (*callback)(void*,int,char**,char**)) { int ret = -1; char *errmsg = NULL; if (!db) goto out; ret = sqlite3_exec((sqlite3*)db, schema, callback, 0, &errmsg); if (ret != SQLITE_OK) { ldpp_dout(dpp, 0) <<"sqlite exec failed for schema("<<schema \ <<"); Errmsg - "<<errmsg << dendl; sqlite3_free(errmsg); goto out; } ret = 0; ldpp_dout(dpp, 10) <<"sqlite exec successfully processed for schema(" \ <<schema<<")" << dendl; out: return ret; } int SQLiteDB::createTables(const DoutPrefixProvider *dpp) { int ret = -1; int cu = 0, cb = 0, cq = 0; DBOpParams params = {}; params.user_table = getUserTable(); params.bucket_table = getBucketTable(); if ((cu = createUserTable(dpp, &params))) goto out; if ((cb = createBucketTable(dpp, &params))) goto out; if ((cq = createQuotaTable(dpp, &params))) goto out; ret = 0; out: if (ret) { if (cu) DeleteUserTable(dpp, &params); if (cb) DeleteBucketTable(dpp, &params); ldpp_dout(dpp, 0)<<"Creation of tables failed" << dendl; } return ret; } int SQLiteDB::createUserTable(const DoutPrefixProvider *dpp, DBOpParams *params) { int ret = -1; string schema; schema = CreateTableSchema("User", params); ret = exec(dpp, schema.c_str(), NULL); if (ret) ldpp_dout(dpp, 0)<<"CreateUserTable failed" << dendl; ldpp_dout(dpp, 20)<<"CreateUserTable suceeded" << dendl; return ret; } int SQLiteDB::createBucketTable(const DoutPrefixProvider *dpp, DBOpParams *params) { int ret = -1; string schema; schema = CreateTableSchema("Bucket", params); ret = exec(dpp, schema.c_str(), NULL); if (ret) ldpp_dout(dpp, 0)<<"CreateBucketTable failed " << dendl; ldpp_dout(dpp, 20)<<"CreateBucketTable suceeded " << dendl; return ret; } int SQLiteDB::createObjectTable(const DoutPrefixProvider *dpp, DBOpParams *params) { int ret = -1; string schema; schema = CreateTableSchema("Object", params); ret = exec(dpp, schema.c_str(), NULL); if (ret) ldpp_dout(dpp, 0)<<"CreateObjectTable failed " << dendl; ldpp_dout(dpp, 20)<<"CreateObjectTable suceeded " << dendl; return ret; } int SQLiteDB::createObjectTableTrigger(const DoutPrefixProvider *dpp, DBOpParams *params) { int ret = -1; string schema; schema = CreateTableSchema("ObjectTrigger", params); ret = exec(dpp, schema.c_str(), NULL); if (ret) ldpp_dout(dpp, 0)<<"CreateObjectTableTrigger failed " << dendl; ldpp_dout(dpp, 20)<<"CreateObjectTableTrigger suceeded " << dendl; return ret; } int SQLiteDB::createObjectView(const DoutPrefixProvider *dpp, DBOpParams *params) { int ret = -1; string schema; schema = CreateTableSchema("ObjectView", params); ret = exec(dpp, schema.c_str(), NULL); if (ret) ldpp_dout(dpp, 0)<<"CreateObjectView failed " << dendl; ldpp_dout(dpp, 20)<<"CreateObjectView suceeded " << dendl; return ret; } int SQLiteDB::createQuotaTable(const DoutPrefixProvider *dpp, DBOpParams *params) { int ret = -1; string schema; schema = CreateTableSchema("Quota", params); ret = exec(dpp, schema.c_str(), NULL); if (ret) ldpp_dout(dpp, 0)<<"CreateQuotaTable failed " << dendl; ldpp_dout(dpp, 20)<<"CreateQuotaTable suceeded " << dendl; return ret; } int SQLiteDB::createObjectDataTable(const DoutPrefixProvider *dpp, DBOpParams *params) { int ret = -1; string schema; schema = CreateTableSchema("ObjectData", params); ret = exec(dpp, schema.c_str(), NULL); if (ret) ldpp_dout(dpp, 0)<<"CreateObjectDataTable failed " << dendl; ldpp_dout(dpp, 20)<<"CreateObjectDataTable suceeded " << dendl; return ret; } int SQLiteDB::createLCTables(const DoutPrefixProvider *dpp) { int ret = -1; string schema; DBOpParams params = {}; params.lc_entry_table = getLCEntryTable(); params.lc_head_table = getLCHeadTable(); params.bucket_table = getBucketTable(); schema = CreateTableSchema("LCEntry", &params); ret = exec(dpp, schema.c_str(), NULL); if (ret) { ldpp_dout(dpp, 0)<<"CreateLCEntryTable failed" << dendl; return ret; } ldpp_dout(dpp, 20)<<"CreateLCEntryTable suceeded" << dendl; schema = CreateTableSchema("LCHead", &params); ret = exec(dpp, schema.c_str(), NULL); if (ret) { ldpp_dout(dpp, 0)<<"CreateLCHeadTable failed" << dendl; (void)DeleteLCEntryTable(dpp, &params); } ldpp_dout(dpp, 20)<<"CreateLCHeadTable suceeded" << dendl; return ret; } int SQLiteDB::DeleteUserTable(const DoutPrefixProvider *dpp, DBOpParams *params) { int ret = -1; string schema; schema = DeleteTableSchema(params->user_table); ret = exec(dpp, schema.c_str(), NULL); if (ret) ldpp_dout(dpp, 0)<<"DeleteUserTable failed " << dendl; ldpp_dout(dpp, 20)<<"DeleteUserTable suceeded " << dendl; return ret; } int SQLiteDB::DeleteBucketTable(const DoutPrefixProvider *dpp, DBOpParams *params) { int ret = -1; string schema; schema = DeleteTableSchema(params->bucket_table); ret = exec(dpp, schema.c_str(), NULL); if (ret) ldpp_dout(dpp, 0)<<"DeletebucketTable failed " << dendl; ldpp_dout(dpp, 20)<<"DeletebucketTable suceeded " << dendl; return ret; } int SQLiteDB::DeleteObjectTable(const DoutPrefixProvider *dpp, DBOpParams *params) { int ret = -1; string schema; schema = DeleteTableSchema(params->object_table); ret = exec(dpp, schema.c_str(), NULL); if (ret) ldpp_dout(dpp, 0)<<"DeleteObjectTable failed " << dendl; ldpp_dout(dpp, 20)<<"DeleteObjectTable suceeded " << dendl; return ret; } int SQLiteDB::DeleteObjectDataTable(const DoutPrefixProvider *dpp, DBOpParams *params) { int ret = -1; string schema; schema = DeleteTableSchema(params->objectdata_table); ret = exec(dpp, schema.c_str(), NULL); if (ret) ldpp_dout(dpp, 0)<<"DeleteObjectDataTable failed " << dendl; ldpp_dout(dpp, 20)<<"DeleteObjectDataTable suceeded " << dendl; return ret; } int SQLiteDB::DeleteQuotaTable(const DoutPrefixProvider *dpp, DBOpParams *params) { int ret = -1; string schema; schema = DeleteTableSchema(params->quota_table); ret = exec(dpp, schema.c_str(), NULL); if (ret) ldpp_dout(dpp, 0)<<"DeleteQuotaTable failed " << dendl; ldpp_dout(dpp, 20)<<"DeleteQuotaTable suceeded " << dendl; return ret; } int SQLiteDB::DeleteLCEntryTable(const DoutPrefixProvider *dpp, DBOpParams *params) { int ret = -1; string schema; schema = DeleteTableSchema(params->lc_entry_table); ret = exec(dpp, schema.c_str(), NULL); if (ret) ldpp_dout(dpp, 0)<<"DeleteLCEntryTable failed " << dendl; ldpp_dout(dpp, 20)<<"DeleteLCEntryTable suceeded " << dendl; return ret; } int SQLiteDB::DeleteLCHeadTable(const DoutPrefixProvider *dpp, DBOpParams *params) { int ret = -1; string schema; schema = DeleteTableSchema(params->lc_head_table); ret = exec(dpp, schema.c_str(), NULL); if (ret) ldpp_dout(dpp, 0)<<"DeleteLCHeadTable failed " << dendl; ldpp_dout(dpp, 20)<<"DeleteLCHeadTable suceeded " << dendl; return ret; } int SQLiteDB::ListAllUsers(const DoutPrefixProvider *dpp, DBOpParams *params) { int ret = -1; string schema; schema = ListTableSchema(params->user_table); ret = exec(dpp, schema.c_str(), &list_callback); if (ret) ldpp_dout(dpp, 0)<<"GetUsertable failed " << dendl; ldpp_dout(dpp, 20)<<"GetUserTable suceeded " << dendl; return ret; } int SQLiteDB::ListAllBuckets(const DoutPrefixProvider *dpp, DBOpParams *params) { int ret = -1; string schema; schema = ListTableSchema(params->bucket_table); ret = exec(dpp, schema.c_str(), &list_callback); if (ret) ldpp_dout(dpp, 0)<<"Listbuckettable failed " << dendl; ldpp_dout(dpp, 20)<<"ListbucketTable suceeded " << dendl; return ret; } int SQLiteDB::ListAllObjects(const DoutPrefixProvider *dpp, DBOpParams *params) { int ret = -1; string schema; map<string, class ObjectOp*>::iterator iter; map<string, class ObjectOp*> objectmap; string bucket; objectmap = getObjectMap(); if (objectmap.empty()) ldpp_dout(dpp, 20)<<"objectmap empty " << dendl; for (iter = objectmap.begin(); iter != objectmap.end(); ++iter) { bucket = iter->first; params->object_table = getObjectTable(bucket); schema = ListTableSchema(params->object_table); ret = exec(dpp, schema.c_str(), &list_callback); if (ret) ldpp_dout(dpp, 0)<<"ListObjecttable failed " << dendl; ldpp_dout(dpp, 20)<<"ListObjectTable suceeded " << dendl; } return ret; } int SQLObjectOp::InitializeObjectOps(string db_name, const DoutPrefixProvider *dpp) { PutObject = make_shared<SQLPutObject>(sdb, db_name, cct); DeleteObject = make_shared<SQLDeleteObject>(sdb, db_name, cct); GetObject = make_shared<SQLGetObject>(sdb, db_name, cct); UpdateObject = make_shared<SQLUpdateObject>(sdb, db_name, cct); ListBucketObjects = make_shared<SQLListBucketObjects>(sdb, db_name, cct); ListVersionedObjects = make_shared<SQLListVersionedObjects>(sdb, db_name, cct); PutObjectData = make_shared<SQLPutObjectData>(sdb, db_name, cct); UpdateObjectData = make_shared<SQLUpdateObjectData>(sdb, db_name, cct); GetObjectData = make_shared<SQLGetObjectData>(sdb, db_name, cct); DeleteObjectData = make_shared<SQLDeleteObjectData>(sdb, db_name, cct); DeleteStaleObjectData = make_shared<SQLDeleteStaleObjectData>(sdb, db_name, cct); return 0; } int SQLInsertUser::Prepare(const DoutPrefixProvider *dpp, struct DBOpParams *params) { int ret = -1; struct DBOpPrepareParams p_params = PrepareParams; if (!*sdb) { ldpp_dout(dpp, 0)<<"In SQLInsertUser - no db" << dendl; goto out; } InitPrepareParams(dpp, p_params, params); SQL_PREPARE(dpp, p_params, sdb, stmt, ret, "PrepareInsertUser"); out: return ret; } int SQLInsertUser::Bind(const DoutPrefixProvider *dpp, struct DBOpParams *params) { int index = -1; int rc = 0; struct DBOpPrepareParams p_params = PrepareParams; SQL_BIND_INDEX(dpp, stmt, index, p_params.op.user.tenant, sdb); SQL_BIND_TEXT(dpp, stmt, index, params->op.user.uinfo.user_id.tenant.c_str(), sdb); SQL_BIND_INDEX(dpp, stmt, index, p_params.op.user.user_id, sdb); SQL_BIND_TEXT(dpp, stmt, index, params->op.user.uinfo.user_id.id.c_str(), sdb); SQL_BIND_INDEX(dpp, stmt, index, p_params.op.user.ns, sdb); SQL_BIND_TEXT(dpp, stmt, index, params->op.user.uinfo.user_id.ns.c_str(), sdb); SQL_BIND_INDEX(dpp, stmt, index, p_params.op.user.display_name, sdb); SQL_BIND_TEXT(dpp, stmt, index, params->op.user.uinfo.display_name.c_str(), sdb); SQL_BIND_INDEX(dpp, stmt, index, p_params.op.user.user_email, sdb); SQL_BIND_TEXT(dpp, stmt, index, params->op.user.uinfo.user_email.c_str(), sdb); if (!params->op.user.uinfo.access_keys.empty()) { string access_key; string key; map<string, RGWAccessKey>::const_iterator it = params->op.user.uinfo.access_keys.begin(); const RGWAccessKey& k = it->second; access_key = k.id; key = k.key; SQL_BIND_INDEX(dpp, stmt, index, p_params.op.user.access_keys_id, sdb); SQL_BIND_TEXT(dpp, stmt, index, access_key.c_str(), sdb); SQL_BIND_INDEX(dpp, stmt, index, p_params.op.user.access_keys_secret, sdb); SQL_BIND_TEXT(dpp, stmt, index, key.c_str(), sdb); } SQL_BIND_INDEX(dpp, stmt, index, p_params.op.user.access_keys, sdb); SQL_ENCODE_BLOB_PARAM(dpp, stmt, index, params->op.user.uinfo.access_keys, sdb); SQL_BIND_INDEX(dpp, stmt, index, p_params.op.user.swift_keys, sdb); SQL_ENCODE_BLOB_PARAM(dpp, stmt, index, params->op.user.uinfo.swift_keys, sdb); SQL_BIND_INDEX(dpp, stmt, index, p_params.op.user.subusers, sdb); SQL_ENCODE_BLOB_PARAM(dpp, stmt, index, params->op.user.uinfo.subusers, sdb); SQL_BIND_INDEX(dpp, stmt, index, p_params.op.user.suspended, sdb); SQL_BIND_INT(dpp, stmt, index, params->op.user.uinfo.suspended, sdb); SQL_BIND_INDEX(dpp, stmt, index, p_params.op.user.max_buckets, sdb); SQL_BIND_INT(dpp, stmt, index, params->op.user.uinfo.max_buckets, sdb); SQL_BIND_INDEX(dpp, stmt, index, p_params.op.user.op_mask, sdb); SQL_BIND_INT(dpp, stmt, index, params->op.user.uinfo.op_mask, sdb); SQL_BIND_INDEX(dpp, stmt, index, p_params.op.user.user_caps, sdb); SQL_ENCODE_BLOB_PARAM(dpp, stmt, index, params->op.user.uinfo.caps, sdb); SQL_BIND_INDEX(dpp, stmt, index, p_params.op.user.admin, sdb); SQL_BIND_INT(dpp, stmt, index, params->op.user.uinfo.admin, sdb); SQL_BIND_INDEX(dpp, stmt, index, p_params.op.user.system, sdb); SQL_BIND_INT(dpp, stmt, index, params->op.user.uinfo.system, sdb); SQL_BIND_INDEX(dpp, stmt, index, p_params.op.user.placement_name, sdb); SQL_BIND_TEXT(dpp, stmt, index, params->op.user.uinfo.default_placement.name.c_str(), sdb); SQL_BIND_INDEX(dpp, stmt, index, p_params.op.user.placement_storage_class, sdb); SQL_BIND_TEXT(dpp, stmt, index, params->op.user.uinfo.default_placement.storage_class.c_str(), sdb); SQL_BIND_INDEX(dpp, stmt, index, p_params.op.user.placement_tags, sdb); SQL_ENCODE_BLOB_PARAM(dpp, stmt, index, params->op.user.uinfo.placement_tags, sdb); SQL_BIND_INDEX(dpp, stmt, index, p_params.op.user.bucket_quota, sdb); SQL_ENCODE_BLOB_PARAM(dpp, stmt, index, params->op.user.uinfo.quota.bucket_quota, sdb); SQL_BIND_INDEX(dpp, stmt, index, p_params.op.user.temp_url_keys, sdb); SQL_ENCODE_BLOB_PARAM(dpp, stmt, index, params->op.user.uinfo.temp_url_keys, sdb); SQL_BIND_INDEX(dpp, stmt, index, p_params.op.user.user_quota, sdb); SQL_ENCODE_BLOB_PARAM(dpp, stmt, index, params->op.user.uinfo.quota.user_quota, sdb); SQL_BIND_INDEX(dpp, stmt, index, p_params.op.user.type, sdb); SQL_BIND_INT(dpp, stmt, index, params->op.user.uinfo.type, sdb); SQL_BIND_INDEX(dpp, stmt, index, p_params.op.user.mfa_ids, sdb); SQL_ENCODE_BLOB_PARAM(dpp, stmt, index, params->op.user.uinfo.mfa_ids, sdb); SQL_BIND_INDEX(dpp, stmt, index, p_params.op.user.user_attrs, sdb); SQL_ENCODE_BLOB_PARAM(dpp, stmt, index, params->op.user.user_attrs, sdb); SQL_BIND_INDEX(dpp, stmt, index, p_params.op.user.user_ver, sdb); SQL_BIND_INT(dpp, stmt, index, params->op.user.user_version.ver, sdb); SQL_BIND_INDEX(dpp, stmt, index, p_params.op.user.user_ver_tag, sdb); SQL_BIND_TEXT(dpp, stmt, index, params->op.user.user_version.tag.c_str(), sdb); out: return rc; } int SQLInsertUser::Execute(const DoutPrefixProvider *dpp, struct DBOpParams *params) { int ret = -1; SQL_EXECUTE(dpp, params, stmt, NULL); out: return ret; } int SQLRemoveUser::Prepare(const DoutPrefixProvider *dpp, struct DBOpParams *params) { int ret = -1; struct DBOpPrepareParams p_params = PrepareParams; if (!*sdb) { ldpp_dout(dpp, 0)<<"In SQLRemoveUser - no db" << dendl; goto out; } InitPrepareParams(dpp, p_params, params); SQL_PREPARE(dpp, p_params, sdb, stmt, ret, "PrepareRemoveUser"); out: return ret; } int SQLRemoveUser::Bind(const DoutPrefixProvider *dpp, struct DBOpParams *params) { int index = -1; int rc = 0; struct DBOpPrepareParams p_params = PrepareParams; SQL_BIND_INDEX(dpp, stmt, index, p_params.op.user.user_id, sdb); SQL_BIND_TEXT(dpp, stmt, index, params->op.user.uinfo.user_id.id.c_str(), sdb); out: return rc; } int SQLRemoveUser::Execute(const DoutPrefixProvider *dpp, struct DBOpParams *params) { int ret = -1; SQL_EXECUTE(dpp, params, stmt, NULL); out: return ret; } int SQLGetUser::Prepare(const DoutPrefixProvider *dpp, struct DBOpParams *params) { int ret = -1; struct DBOpPrepareParams p_params = PrepareParams; if (!*sdb) { ldpp_dout(dpp, 0)<<"In SQLGetUser - no db" << dendl; goto out; } InitPrepareParams(dpp, p_params, params); if (params->op.query_str == "email") { SQL_PREPARE(dpp, p_params, sdb, email_stmt, ret, "PrepareGetUser"); } else if (params->op.query_str == "access_key") { SQL_PREPARE(dpp, p_params, sdb, ak_stmt, ret, "PrepareGetUser"); } else if (params->op.query_str == "user_id") { SQL_PREPARE(dpp, p_params, sdb, userid_stmt, ret, "PrepareGetUser"); } else { // by default by userid SQL_PREPARE(dpp, p_params, sdb, stmt, ret, "PrepareGetUser"); } out: return ret; } int SQLGetUser::Bind(const DoutPrefixProvider *dpp, struct DBOpParams *params) { int index = -1; int rc = 0; struct DBOpPrepareParams p_params = PrepareParams; if (params->op.query_str == "email") { SQL_BIND_INDEX(dpp, email_stmt, index, p_params.op.user.user_email, sdb); SQL_BIND_TEXT(dpp, email_stmt, index, params->op.user.uinfo.user_email.c_str(), sdb); } else if (params->op.query_str == "access_key") { if (!params->op.user.uinfo.access_keys.empty()) { string access_key; map<string, RGWAccessKey>::const_iterator it = params->op.user.uinfo.access_keys.begin(); const RGWAccessKey& k = it->second; access_key = k.id; SQL_BIND_INDEX(dpp, ak_stmt, index, p_params.op.user.access_keys_id, sdb); SQL_BIND_TEXT(dpp, ak_stmt, index, access_key.c_str(), sdb); } } else if (params->op.query_str == "user_id") { SQL_BIND_INDEX(dpp, userid_stmt, index, p_params.op.user.user_id, sdb); SQL_BIND_TEXT(dpp, userid_stmt, index, params->op.user.uinfo.user_id.id.c_str(), sdb); } else { // by default by userid SQL_BIND_INDEX(dpp, stmt, index, p_params.op.user.user_id, sdb); SQL_BIND_TEXT(dpp, stmt, index, params->op.user.uinfo.user_id.id.c_str(), sdb); } out: return rc; } int SQLGetUser::Execute(const DoutPrefixProvider *dpp, struct DBOpParams *params) { int ret = -1; if (params->op.query_str == "email") { SQL_EXECUTE(dpp, params, email_stmt, list_user); } else if (params->op.query_str == "access_key") { SQL_EXECUTE(dpp, params, ak_stmt, list_user); } else if (params->op.query_str == "user_id") { SQL_EXECUTE(dpp, params, userid_stmt, list_user); } else { // by default by userid SQL_EXECUTE(dpp, params, stmt, list_user); } out: return ret; } int SQLInsertBucket::Prepare(const DoutPrefixProvider *dpp, struct DBOpParams *params) { int ret = -1; struct DBOpPrepareParams p_params = PrepareParams; if (!*sdb) { ldpp_dout(dpp, 0)<<"In SQLInsertBucket - no db" << dendl; goto out; } InitPrepareParams(dpp, p_params, params); SQL_PREPARE(dpp, p_params, sdb, stmt, ret, "PrepareInsertBucket"); out: return ret; } int SQLInsertBucket::Bind(const DoutPrefixProvider *dpp, struct DBOpParams *params) { int index = -1; int rc = 0; struct DBOpPrepareParams p_params = PrepareParams; // user_id here is copied as OwnerID in the bucket table. SQL_BIND_INDEX(dpp, stmt, index, p_params.op.user.user_id, sdb); SQL_BIND_TEXT(dpp, stmt, index, params->op.user.uinfo.user_id.id.c_str(), sdb); SQL_BIND_INDEX(dpp, stmt, index, p_params.op.bucket.bucket_name, sdb); SQL_BIND_TEXT(dpp, stmt, index, params->op.bucket.info.bucket.name.c_str(), sdb); SQL_BIND_INDEX(dpp, stmt, index, p_params.op.bucket.tenant, sdb); SQL_BIND_TEXT(dpp, stmt, index, params->op.bucket.info.bucket.tenant.c_str(), sdb); SQL_BIND_INDEX(dpp, stmt, index, p_params.op.bucket.marker, sdb); SQL_BIND_TEXT(dpp, stmt, index, params->op.bucket.info.bucket.marker.c_str(), sdb); SQL_BIND_INDEX(dpp, stmt, index, p_params.op.bucket.bucket_id, sdb); SQL_BIND_TEXT(dpp, stmt, index, params->op.bucket.info.bucket.bucket_id.c_str(), sdb); SQL_BIND_INDEX(dpp, stmt, index, p_params.op.bucket.size, sdb); SQL_BIND_INT(dpp, stmt, index, params->op.bucket.ent.size, sdb); SQL_BIND_INDEX(dpp, stmt, index, p_params.op.bucket.size_rounded, sdb); SQL_BIND_INT(dpp, stmt, index, params->op.bucket.ent.size_rounded, sdb); SQL_BIND_INDEX(dpp, stmt, index, p_params.op.bucket.creation_time, sdb); SQL_ENCODE_BLOB_PARAM(dpp, stmt, index, params->op.bucket.info.creation_time, sdb); SQL_BIND_INDEX(dpp, stmt, index, p_params.op.bucket.count, sdb); SQL_BIND_INT(dpp, stmt, index, params->op.bucket.ent.count, sdb); SQL_BIND_INDEX(dpp, stmt, index, p_params.op.bucket.placement_name, sdb); SQL_BIND_TEXT(dpp, stmt, index, params->op.bucket.info.placement_rule.name.c_str(), sdb); SQL_BIND_INDEX(dpp, stmt, index, p_params.op.bucket.placement_storage_class, sdb); SQL_BIND_TEXT(dpp, stmt, index, params->op.bucket.info.placement_rule.storage_class.c_str(), sdb); SQL_BIND_INDEX(dpp, stmt, index, p_params.op.bucket.flags, sdb); SQL_BIND_INT(dpp, stmt, index, params->op.bucket.info.flags, sdb); SQL_BIND_INDEX(dpp, stmt, index, p_params.op.bucket.zonegroup, sdb); SQL_BIND_TEXT(dpp, stmt, index, params->op.bucket.info.zonegroup.c_str(), sdb); SQL_BIND_INDEX(dpp, stmt, index, p_params.op.bucket.has_instance_obj, sdb); SQL_BIND_INT(dpp, stmt, index, params->op.bucket.info.has_instance_obj, sdb); SQL_BIND_INDEX(dpp, stmt, index, p_params.op.bucket.quota, sdb); SQL_ENCODE_BLOB_PARAM(dpp, stmt, index, params->op.bucket.info.quota, sdb); SQL_BIND_INDEX(dpp, stmt, index, p_params.op.bucket.requester_pays, sdb); SQL_BIND_INT(dpp, stmt, index, params->op.bucket.info.requester_pays, sdb); SQL_BIND_INDEX(dpp, stmt, index, p_params.op.bucket.has_website, sdb); SQL_BIND_INT(dpp, stmt, index, params->op.bucket.info.has_website, sdb); SQL_BIND_INDEX(dpp, stmt, index, p_params.op.bucket.website_conf, sdb); SQL_ENCODE_BLOB_PARAM(dpp, stmt, index, params->op.bucket.info.website_conf, sdb); SQL_BIND_INDEX(dpp, stmt, index, p_params.op.bucket.swift_versioning, sdb); SQL_BIND_INT(dpp, stmt, index, params->op.bucket.info.swift_versioning, sdb); SQL_BIND_INDEX(dpp, stmt, index, p_params.op.bucket.swift_ver_location, sdb); SQL_BIND_TEXT(dpp, stmt, index, params->op.bucket.info.swift_ver_location.c_str(), sdb); SQL_BIND_INDEX(dpp, stmt, index, p_params.op.bucket.mdsearch_config, sdb); SQL_ENCODE_BLOB_PARAM(dpp, stmt, index, params->op.bucket.info.mdsearch_config, sdb); SQL_BIND_INDEX(dpp, stmt, index, p_params.op.bucket.new_bucket_instance_id, sdb); SQL_BIND_TEXT(dpp, stmt, index, params->op.bucket.info.new_bucket_instance_id.c_str(), sdb); SQL_BIND_INDEX(dpp, stmt, index, p_params.op.bucket.obj_lock, sdb); SQL_ENCODE_BLOB_PARAM(dpp, stmt, index, params->op.bucket.info.obj_lock, sdb); SQL_BIND_INDEX(dpp, stmt, index, p_params.op.bucket.sync_policy_info_groups, sdb); SQL_ENCODE_BLOB_PARAM(dpp, stmt, index, params->op.bucket.info.sync_policy, sdb); SQL_BIND_INDEX(dpp, stmt, index, p_params.op.bucket.bucket_attrs, sdb); SQL_ENCODE_BLOB_PARAM(dpp, stmt, index, params->op.bucket.bucket_attrs, sdb); SQL_BIND_INDEX(dpp, stmt, index, p_params.op.bucket.bucket_ver, sdb); SQL_BIND_INT(dpp, stmt, index, params->op.bucket.bucket_version.ver, sdb); SQL_BIND_INDEX(dpp, stmt, index, p_params.op.bucket.bucket_ver_tag, sdb); SQL_BIND_TEXT(dpp, stmt, index, params->op.bucket.bucket_version.tag.c_str(), sdb); SQL_BIND_INDEX(dpp, stmt, index, p_params.op.bucket.mtime, sdb); SQL_ENCODE_BLOB_PARAM(dpp, stmt, index, params->op.bucket.mtime, sdb); out: return rc; } int SQLInsertBucket::Execute(const DoutPrefixProvider *dpp, struct DBOpParams *params) { int ret = -1; class SQLObjectOp *ObPtr = NULL; string bucket_name = params->op.bucket.info.bucket.name; struct DBOpPrepareParams p_params = PrepareParams; ObPtr = new SQLObjectOp(sdb, ctx()); objectmapInsert(dpp, bucket_name, ObPtr); SQL_EXECUTE(dpp, params, stmt, NULL); /* Once Bucket is inserted created corresponding object(&data) tables */ InitPrepareParams(dpp, p_params, params); (void)createObjectTable(dpp, params); (void)createObjectDataTable(dpp, params); (void)createObjectTableTrigger(dpp, params); out: return ret; } int SQLUpdateBucket::Prepare(const DoutPrefixProvider *dpp, struct DBOpParams *params) { int ret = -1; struct DBOpPrepareParams p_params = PrepareParams; if (!*sdb) { ldpp_dout(dpp, 0)<<"In SQLUpdateBucket - no db" << dendl; goto out; } InitPrepareParams(dpp, p_params, params); if (params->op.query_str == "attrs") { SQL_PREPARE(dpp, p_params, sdb, attrs_stmt, ret, "PrepareUpdateBucket"); } else if (params->op.query_str == "owner") { SQL_PREPARE(dpp, p_params, sdb, owner_stmt, ret, "PrepareUpdateBucket"); } else if (params->op.query_str == "info") { SQL_PREPARE(dpp, p_params, sdb, info_stmt, ret, "PrepareUpdateBucket"); } else { ldpp_dout(dpp, 0)<<"In SQLUpdateBucket invalid query_str:" << params->op.query_str << "" << dendl; goto out; } out: return ret; } int SQLUpdateBucket::Bind(const DoutPrefixProvider *dpp, struct DBOpParams *params) { int index = -1; int rc = 0; struct DBOpPrepareParams p_params = PrepareParams; sqlite3_stmt** stmt = NULL; // Prepared statement /* All below fields for attrs */ if (params->op.query_str == "attrs") { stmt = &attrs_stmt; } else if (params->op.query_str == "owner") { stmt = &owner_stmt; } else if (params->op.query_str == "info") { stmt = &info_stmt; } else { ldpp_dout(dpp, 0)<<"In SQLUpdateBucket invalid query_str:" << params->op.query_str << "" << dendl; goto out; } if (params->op.query_str == "attrs") { SQL_BIND_INDEX(dpp, *stmt, index, p_params.op.bucket.bucket_attrs, sdb); SQL_ENCODE_BLOB_PARAM(dpp, *stmt, index, params->op.bucket.bucket_attrs, sdb); } else if (params->op.query_str == "owner") { SQL_BIND_INDEX(dpp, *stmt, index, p_params.op.bucket.creation_time, sdb); SQL_ENCODE_BLOB_PARAM(dpp, *stmt, index, params->op.bucket.info.creation_time, sdb); } else if (params->op.query_str == "info") { SQL_BIND_INDEX(dpp, *stmt, index, p_params.op.bucket.tenant, sdb); SQL_BIND_TEXT(dpp, *stmt, index, params->op.bucket.info.bucket.tenant.c_str(), sdb); SQL_BIND_INDEX(dpp, *stmt, index, p_params.op.bucket.marker, sdb); SQL_BIND_TEXT(dpp, *stmt, index, params->op.bucket.info.bucket.marker.c_str(), sdb); SQL_BIND_INDEX(dpp, *stmt, index, p_params.op.bucket.bucket_id, sdb); SQL_BIND_TEXT(dpp, *stmt, index, params->op.bucket.info.bucket.bucket_id.c_str(), sdb); SQL_BIND_INDEX(dpp, *stmt, index, p_params.op.bucket.creation_time, sdb); SQL_ENCODE_BLOB_PARAM(dpp, *stmt, index, params->op.bucket.info.creation_time, sdb); SQL_BIND_INDEX(dpp, *stmt, index, p_params.op.bucket.count, sdb); SQL_BIND_INT(dpp, *stmt, index, params->op.bucket.ent.count, sdb); SQL_BIND_INDEX(dpp, *stmt, index, p_params.op.bucket.placement_name, sdb); SQL_BIND_TEXT(dpp, *stmt, index, params->op.bucket.info.placement_rule.name.c_str(), sdb); SQL_BIND_INDEX(dpp, *stmt, index, p_params.op.bucket.placement_storage_class, sdb); SQL_BIND_TEXT(dpp, *stmt, index, params->op.bucket.info.placement_rule.storage_class.c_str(), sdb); SQL_BIND_INDEX(dpp, *stmt, index, p_params.op.bucket.flags, sdb); SQL_BIND_INT(dpp, *stmt, index, params->op.bucket.info.flags, sdb); SQL_BIND_INDEX(dpp, *stmt, index, p_params.op.bucket.zonegroup, sdb); SQL_BIND_TEXT(dpp, *stmt, index, params->op.bucket.info.zonegroup.c_str(), sdb); SQL_BIND_INDEX(dpp, *stmt, index, p_params.op.bucket.has_instance_obj, sdb); SQL_BIND_INT(dpp, *stmt, index, params->op.bucket.info.has_instance_obj, sdb); SQL_BIND_INDEX(dpp, *stmt, index, p_params.op.bucket.quota, sdb); SQL_ENCODE_BLOB_PARAM(dpp, *stmt, index, params->op.bucket.info.quota, sdb); SQL_BIND_INDEX(dpp, *stmt, index, p_params.op.bucket.requester_pays, sdb); SQL_BIND_INT(dpp, *stmt, index, params->op.bucket.info.requester_pays, sdb); SQL_BIND_INDEX(dpp, *stmt, index, p_params.op.bucket.has_website, sdb); SQL_BIND_INT(dpp, *stmt, index, params->op.bucket.info.has_website, sdb); SQL_BIND_INDEX(dpp, *stmt, index, p_params.op.bucket.website_conf, sdb); SQL_ENCODE_BLOB_PARAM(dpp, *stmt, index, params->op.bucket.info.website_conf, sdb); SQL_BIND_INDEX(dpp, *stmt, index, p_params.op.bucket.swift_versioning, sdb); SQL_BIND_INT(dpp, *stmt, index, params->op.bucket.info.swift_versioning, sdb); SQL_BIND_INDEX(dpp, *stmt, index, p_params.op.bucket.swift_ver_location, sdb); SQL_BIND_TEXT(dpp, *stmt, index, params->op.bucket.info.swift_ver_location.c_str(), sdb); SQL_BIND_INDEX(dpp, *stmt, index, p_params.op.bucket.mdsearch_config, sdb); SQL_ENCODE_BLOB_PARAM(dpp, *stmt, index, params->op.bucket.info.mdsearch_config, sdb); SQL_BIND_INDEX(dpp, *stmt, index, p_params.op.bucket.new_bucket_instance_id, sdb); SQL_BIND_TEXT(dpp, *stmt, index, params->op.bucket.info.new_bucket_instance_id.c_str(), sdb); SQL_BIND_INDEX(dpp, *stmt, index, p_params.op.bucket.obj_lock, sdb); SQL_ENCODE_BLOB_PARAM(dpp, *stmt, index, params->op.bucket.info.obj_lock, sdb); SQL_BIND_INDEX(dpp, *stmt, index, p_params.op.bucket.sync_policy_info_groups, sdb); SQL_ENCODE_BLOB_PARAM(dpp, *stmt, index, params->op.bucket.info.sync_policy, sdb); } SQL_BIND_INDEX(dpp, *stmt, index, p_params.op.user.user_id, sdb); SQL_BIND_TEXT(dpp, *stmt, index, params->op.user.uinfo.user_id.id.c_str(), sdb); SQL_BIND_INDEX(dpp, *stmt, index, p_params.op.bucket.bucket_name, sdb); SQL_BIND_TEXT(dpp, *stmt, index, params->op.bucket.info.bucket.name.c_str(), sdb); SQL_BIND_INDEX(dpp, *stmt, index, p_params.op.bucket.bucket_ver, sdb); SQL_BIND_INT(dpp, *stmt, index, params->op.bucket.bucket_version.ver, sdb); SQL_BIND_INDEX(dpp, *stmt, index, p_params.op.bucket.mtime, sdb); SQL_ENCODE_BLOB_PARAM(dpp, *stmt, index, params->op.bucket.mtime, sdb); out: return rc; } int SQLUpdateBucket::Execute(const DoutPrefixProvider *dpp, struct DBOpParams *params) { int ret = -1; sqlite3_stmt** stmt = NULL; // Prepared statement if (params->op.query_str == "attrs") { stmt = &attrs_stmt; } else if (params->op.query_str == "owner") { stmt = &owner_stmt; } else if (params->op.query_str == "info") { stmt = &info_stmt; } else { ldpp_dout(dpp, 0)<<"In SQLUpdateBucket invalid query_str:" << params->op.query_str << "" << dendl; goto out; } SQL_EXECUTE(dpp, params, *stmt, NULL); out: return ret; } int SQLRemoveBucket::Prepare(const DoutPrefixProvider *dpp, struct DBOpParams *params) { int ret = -1; struct DBOpPrepareParams p_params = PrepareParams; if (!*sdb) { ldpp_dout(dpp, 0)<<"In SQLRemoveBucket - no db" << dendl; goto out; } InitPrepareParams(dpp, p_params, params); SQL_PREPARE(dpp, p_params, sdb, stmt, ret, "PrepareRemoveBucket"); out: return ret; } int SQLRemoveBucket::Bind(const DoutPrefixProvider *dpp, struct DBOpParams *params) { int index = -1; int rc = 0; struct DBOpPrepareParams p_params = PrepareParams; SQL_BIND_INDEX(dpp, stmt, index, p_params.op.bucket.bucket_name, sdb); SQL_BIND_TEXT(dpp, stmt, index, params->op.bucket.info.bucket.name.c_str(), sdb); out: return rc; } int SQLRemoveBucket::Execute(const DoutPrefixProvider *dpp, struct DBOpParams *params) { int ret = -1; objectmapDelete(dpp, params->op.bucket.info.bucket.name); SQL_EXECUTE(dpp, params, stmt, NULL); out: return ret; } int SQLGetBucket::Prepare(const DoutPrefixProvider *dpp, struct DBOpParams *params) { int ret = -1; struct DBOpPrepareParams p_params = PrepareParams; if (!*sdb) { ldpp_dout(dpp, 0)<<"In SQLGetBucket - no db" << dendl; goto out; } InitPrepareParams(dpp, p_params, params); SQL_PREPARE(dpp, p_params, sdb, stmt, ret, "PrepareGetBucket"); out: return ret; } int SQLGetBucket::Bind(const DoutPrefixProvider *dpp, struct DBOpParams *params) { int index = -1; int rc = 0; struct DBOpPrepareParams p_params = PrepareParams; SQL_BIND_INDEX(dpp, stmt, index, p_params.op.bucket.bucket_name, sdb); SQL_BIND_TEXT(dpp, stmt, index, params->op.bucket.info.bucket.name.c_str(), sdb); out: return rc; } int SQLGetBucket::Execute(const DoutPrefixProvider *dpp, struct DBOpParams *params) { int ret = -1; class SQLObjectOp *ObPtr = NULL; params->op.name = "GetBucket"; ObPtr = new SQLObjectOp(sdb, ctx()); /* For the case when the server restarts, need to reinsert objectmap*/ objectmapInsert(dpp, params->op.bucket.info.bucket.name, ObPtr); SQL_EXECUTE(dpp, params, stmt, list_bucket); out: return ret; } int SQLListUserBuckets::Prepare(const DoutPrefixProvider *dpp, struct DBOpParams *params) { int ret = -1; struct DBOpPrepareParams p_params = PrepareParams; if (!*sdb) { ldpp_dout(dpp, 0)<<"In SQLListUserBuckets - no db" << dendl; goto out; } InitPrepareParams(dpp, p_params, params); if (params->op.query_str == "all") { SQL_PREPARE(dpp, p_params, sdb, all_stmt, ret, "PrepareListUserBuckets"); }else { SQL_PREPARE(dpp, p_params, sdb, stmt, ret, "PrepareListUserBuckets"); } out: return ret; } int SQLListUserBuckets::Bind(const DoutPrefixProvider *dpp, struct DBOpParams *params) { int index = -1; int rc = 0; struct DBOpPrepareParams p_params = PrepareParams; sqlite3_stmt** pstmt = NULL; // Prepared statement if (params->op.query_str == "all") { pstmt = &all_stmt; } else { pstmt = &stmt; } if (params->op.query_str != "all") { SQL_BIND_INDEX(dpp, *pstmt, index, p_params.op.user.user_id, sdb); SQL_BIND_TEXT(dpp, *pstmt, index, params->op.user.uinfo.user_id.id.c_str(), sdb); } SQL_BIND_INDEX(dpp, *pstmt, index, p_params.op.bucket.min_marker, sdb); SQL_BIND_TEXT(dpp, *pstmt, index, params->op.bucket.min_marker.c_str(), sdb); SQL_BIND_INDEX(dpp, *pstmt, index, p_params.op.list_max_count, sdb); SQL_BIND_INT(dpp, *pstmt, index, params->op.list_max_count, sdb); out: return rc; } int SQLListUserBuckets::Execute(const DoutPrefixProvider *dpp, struct DBOpParams *params) { int ret = -1; if (params->op.query_str == "all") { SQL_EXECUTE(dpp, params, all_stmt, list_bucket); } else { SQL_EXECUTE(dpp, params, stmt, list_bucket); } out: return ret; } int SQLPutObject::Prepare(const DoutPrefixProvider *dpp, struct DBOpParams *params) { int ret = -1; struct DBOpPrepareParams p_params = PrepareParams; if (!*sdb) { ldpp_dout(dpp, 0)<<"In SQLPutObject - no db" << dendl; goto out; } InitPrepareParams(dpp, p_params, params); SQL_PREPARE(dpp, p_params, sdb, stmt, ret, "PreparePutObject"); out: return ret; } int SQLPutObject::Bind(const DoutPrefixProvider *dpp, struct DBOpParams *params) { int index = -1; int rc = 0; int VersionNum = 0; struct DBOpPrepareParams p_params = PrepareParams; if (params->op.obj.state.obj.key.instance.empty()) { params->op.obj.state.obj.key.instance = "null"; } SQL_BIND_INDEX(dpp, stmt, index, p_params.op.obj.obj_name, sdb); SQL_BIND_TEXT(dpp, stmt, index, params->op.obj.state.obj.key.name.c_str(), sdb); SQL_BIND_INDEX(dpp, stmt, index, p_params.op.bucket.bucket_name, sdb); SQL_BIND_TEXT(dpp, stmt, index, params->op.bucket.info.bucket.name.c_str(), sdb); SQL_BIND_INDEX(dpp, stmt, index, p_params.op.obj.obj_instance, sdb); SQL_BIND_TEXT(dpp, stmt, index, params->op.obj.state.obj.key.instance.c_str(), sdb); SQL_BIND_INDEX(dpp, stmt, index, p_params.op.obj.obj_ns, sdb); SQL_BIND_TEXT(dpp, stmt, index, params->op.obj.state.obj.key.ns.c_str(), sdb); SQL_BIND_INDEX(dpp, stmt, index, p_params.op.obj.acls, sdb); SQL_ENCODE_BLOB_PARAM(dpp, stmt, index, params->op.obj.acls, sdb); SQL_BIND_INDEX(dpp, stmt, index, p_params.op.obj.index_ver, sdb); SQL_BIND_INT(dpp, stmt, index, params->op.obj.index_ver, sdb); SQL_BIND_INDEX(dpp, stmt, index, p_params.op.obj.tag, sdb); SQL_BIND_TEXT(dpp, stmt, index, params->op.obj.tag.c_str(), sdb); SQL_BIND_INDEX(dpp, stmt, index, p_params.op.obj.flags, sdb); SQL_BIND_INT(dpp, stmt, index, params->op.obj.flags, sdb); SQL_BIND_INDEX(dpp, stmt, index, p_params.op.obj.versioned_epoch, sdb); SQL_BIND_INT(dpp, stmt, index, params->op.obj.versioned_epoch, sdb); SQL_BIND_INDEX(dpp, stmt, index, p_params.op.obj.obj_category, sdb); SQL_BIND_INT(dpp, stmt, index, (uint8_t)(params->op.obj.category), sdb); SQL_BIND_INDEX(dpp, stmt, index, p_params.op.obj.etag, sdb); SQL_BIND_TEXT(dpp, stmt, index, params->op.obj.etag.c_str(), sdb); SQL_BIND_INDEX(dpp, stmt, index, p_params.op.obj.owner, sdb); SQL_BIND_TEXT(dpp, stmt, index, params->op.obj.owner.c_str(), sdb); SQL_BIND_INDEX(dpp, stmt, index, p_params.op.obj.owner_display_name, sdb); SQL_BIND_TEXT(dpp, stmt, index, params->op.obj.owner_display_name.c_str(), sdb); SQL_BIND_INDEX(dpp, stmt, index, p_params.op.obj.storage_class, sdb); SQL_BIND_TEXT(dpp, stmt, index, params->op.obj.storage_class.c_str(), sdb); SQL_BIND_INDEX(dpp, stmt, index, p_params.op.obj.appendable, sdb); SQL_BIND_INT(dpp, stmt, index, params->op.obj.appendable, sdb); SQL_BIND_INDEX(dpp, stmt, index, p_params.op.obj.content_type, sdb); SQL_BIND_TEXT(dpp, stmt, index, params->op.obj.content_type.c_str(), sdb); SQL_BIND_INDEX(dpp, stmt, index, p_params.op.obj.index_hash_source, sdb); SQL_BIND_TEXT(dpp, stmt, index, params->op.obj.state.obj.index_hash_source.c_str(), sdb); SQL_BIND_INDEX(dpp, stmt, index, p_params.op.obj.obj_size, sdb); SQL_BIND_INT(dpp, stmt, index, params->op.obj.state.size, sdb); SQL_BIND_INDEX(dpp, stmt, index, p_params.op.obj.accounted_size, sdb); SQL_BIND_INT(dpp, stmt, index, params->op.obj.state.accounted_size, sdb); SQL_BIND_INDEX(dpp, stmt, index, p_params.op.obj.mtime, sdb); SQL_ENCODE_BLOB_PARAM(dpp, stmt, index, params->op.obj.state.mtime, sdb); SQL_BIND_INDEX(dpp, stmt, index, p_params.op.obj.epoch, sdb); SQL_BIND_INT(dpp, stmt, index, params->op.obj.state.epoch, sdb); SQL_BIND_INDEX(dpp, stmt, index, p_params.op.obj.obj_tag, sdb); SQL_ENCODE_BLOB_PARAM(dpp, stmt, index, params->op.obj.state.obj_tag, sdb); SQL_BIND_INDEX(dpp, stmt, index, p_params.op.obj.tail_tag, sdb); SQL_ENCODE_BLOB_PARAM(dpp, stmt, index, params->op.obj.state.tail_tag, sdb); SQL_BIND_INDEX(dpp, stmt, index, p_params.op.obj.write_tag, sdb); SQL_BIND_TEXT(dpp, stmt, index, params->op.obj.state.write_tag.c_str(), sdb); SQL_BIND_INDEX(dpp, stmt, index, p_params.op.obj.fake_tag, sdb); SQL_BIND_INT(dpp, stmt, index, params->op.obj.state.fake_tag, sdb); SQL_BIND_INDEX(dpp, stmt, index, p_params.op.obj.shadow_obj, sdb); SQL_BIND_TEXT(dpp, stmt, index, params->op.obj.state.shadow_obj.c_str(), sdb); SQL_BIND_INDEX(dpp, stmt, index, p_params.op.obj.has_data, sdb); SQL_BIND_INT(dpp, stmt, index, params->op.obj.state.has_data, sdb); SQL_BIND_INDEX(dpp, stmt, index, p_params.op.obj.is_versioned, sdb); SQL_BIND_INT(dpp, stmt, index, params->op.obj.is_versioned, sdb); SQL_BIND_INDEX(dpp, stmt, index, p_params.op.obj.version_num, sdb); SQL_BIND_INT(dpp, stmt, index, VersionNum, sdb); SQL_BIND_INDEX(dpp, stmt, index, p_params.op.obj.pg_ver, sdb); SQL_BIND_INT(dpp, stmt, index, params->op.obj.state.pg_ver, sdb); SQL_BIND_INDEX(dpp, stmt, index, p_params.op.obj.zone_short_id, sdb); SQL_BIND_INT(dpp, stmt, index, params->op.obj.state.zone_short_id, sdb); SQL_BIND_INDEX(dpp, stmt, index, p_params.op.obj.obj_version, sdb); SQL_BIND_INT(dpp, stmt, index, params->op.obj.state.objv_tracker.read_version.ver, sdb); SQL_BIND_INDEX(dpp, stmt, index, p_params.op.obj.obj_version_tag, sdb); SQL_BIND_TEXT(dpp, stmt, index, params->op.obj.state.objv_tracker.read_version.tag.c_str(), sdb); SQL_BIND_INDEX(dpp, stmt, index, p_params.op.obj.obj_attrs, sdb); SQL_ENCODE_BLOB_PARAM(dpp, stmt, index, params->op.obj.state.attrset, sdb); SQL_BIND_INDEX(dpp, stmt, index, p_params.op.obj.head_size, sdb); SQL_BIND_INT(dpp, stmt, index, params->op.obj.head_size, sdb); SQL_BIND_INDEX(dpp, stmt, index, p_params.op.obj.max_head_size, sdb); SQL_BIND_INT(dpp, stmt, index, params->op.obj.max_head_size, sdb); SQL_BIND_INDEX(dpp, stmt, index, p_params.op.obj.obj_id, sdb); SQL_BIND_TEXT(dpp, stmt, index, params->op.obj.obj_id.c_str(), sdb); SQL_BIND_INDEX(dpp, stmt, index, p_params.op.obj.tail_instance, sdb); SQL_BIND_TEXT(dpp, stmt, index, params->op.obj.tail_instance.c_str(), sdb); SQL_BIND_INDEX(dpp, stmt, index, p_params.op.obj.head_placement_rule_name, sdb); SQL_BIND_TEXT(dpp, stmt, index, params->op.obj.head_placement_rule.name.c_str(), sdb); SQL_BIND_INDEX(dpp, stmt, index, p_params.op.obj.head_placement_storage_class, sdb); SQL_BIND_TEXT(dpp, stmt, index, params->op.obj.head_placement_rule.storage_class.c_str(), sdb); SQL_BIND_INDEX(dpp, stmt, index, p_params.op.obj.tail_placement_rule_name, sdb); SQL_BIND_TEXT(dpp, stmt, index, params->op.obj.tail_placement.placement_rule.name.c_str(), sdb); SQL_BIND_INDEX(dpp, stmt, index, p_params.op.obj.tail_placement_storage_class, sdb); SQL_BIND_TEXT(dpp, stmt, index, params->op.obj.tail_placement.placement_rule.storage_class.c_str(), sdb); SQL_BIND_INDEX(dpp, stmt, index, p_params.op.obj.manifest_part_objs, sdb); SQL_ENCODE_BLOB_PARAM(dpp, stmt, index, params->op.obj.objs, sdb); SQL_BIND_INDEX(dpp, stmt, index, p_params.op.obj.manifest_part_rules, sdb); SQL_ENCODE_BLOB_PARAM(dpp, stmt, index, params->op.obj.rules, sdb); SQL_BIND_INDEX(dpp, stmt, index, p_params.op.obj.omap, sdb); SQL_ENCODE_BLOB_PARAM(dpp, stmt, index, params->op.obj.omap, sdb); SQL_BIND_INDEX(dpp, stmt, index, p_params.op.obj.is_multipart, sdb); SQL_BIND_INT(dpp, stmt, index, params->op.obj.is_multipart, sdb); SQL_BIND_INDEX(dpp, stmt, index, p_params.op.obj.mp_parts, sdb); SQL_ENCODE_BLOB_PARAM(dpp, stmt, index, params->op.obj.mp_parts, sdb); SQL_BIND_INDEX(dpp, stmt, index, p_params.op.obj.head_data, sdb); SQL_ENCODE_BLOB_PARAM(dpp, stmt, index, params->op.obj.head_data, sdb); out: return rc; } int SQLPutObject::Execute(const DoutPrefixProvider *dpp, struct DBOpParams *params) { int ret = -1; SQL_EXECUTE(dpp, params, stmt, NULL); out: return ret; } int SQLDeleteObject::Prepare(const DoutPrefixProvider *dpp, struct DBOpParams *params) { int ret = -1; struct DBOpPrepareParams p_params = PrepareParams; if (!*sdb) { ldpp_dout(dpp, 0)<<"In SQLDeleteObject - no db" << dendl; goto out; } InitPrepareParams(dpp, p_params, params); SQL_PREPARE(dpp, p_params, sdb, stmt, ret, "PrepareDeleteObject"); out: return ret; } int SQLDeleteObject::Bind(const DoutPrefixProvider *dpp, struct DBOpParams *params) { int index = -1; int rc = 0; struct DBOpPrepareParams p_params = PrepareParams; if (params->op.obj.state.obj.key.instance.empty()) { params->op.obj.state.obj.key.instance = "null"; } SQL_BIND_INDEX(dpp, stmt, index, p_params.op.bucket.bucket_name, sdb); SQL_BIND_TEXT(dpp, stmt, index, params->op.bucket.info.bucket.name.c_str(), sdb); SQL_BIND_INDEX(dpp, stmt, index, p_params.op.obj.obj_name, sdb); SQL_BIND_TEXT(dpp, stmt, index, params->op.obj.state.obj.key.name.c_str(), sdb); SQL_BIND_INDEX(dpp, stmt, index, p_params.op.obj.obj_instance, sdb); SQL_BIND_TEXT(dpp, stmt, index, params->op.obj.state.obj.key.instance.c_str(), sdb); out: return rc; } int SQLDeleteObject::Execute(const DoutPrefixProvider *dpp, struct DBOpParams *params) { int ret = -1; SQL_EXECUTE(dpp, params, stmt, NULL); out: return ret; } int SQLGetObject::Prepare(const DoutPrefixProvider *dpp, struct DBOpParams *params) { int ret = -1; struct DBOpPrepareParams p_params = PrepareParams; if (!*sdb) { ldpp_dout(dpp, 0)<<"In SQLGetObject - no db" << dendl; goto out; } InitPrepareParams(dpp, p_params, params); SQL_PREPARE(dpp, p_params, sdb, stmt, ret, "PrepareGetObject"); out: return ret; } int SQLGetObject::Bind(const DoutPrefixProvider *dpp, struct DBOpParams *params) { int index = -1; int rc = 0; struct DBOpPrepareParams p_params = PrepareParams; if (params->op.obj.state.obj.key.instance.empty()) { params->op.obj.state.obj.key.instance = "null"; } SQL_BIND_INDEX(dpp, stmt, index, p_params.op.bucket.bucket_name, sdb); SQL_BIND_TEXT(dpp, stmt, index, params->op.bucket.info.bucket.name.c_str(), sdb); SQL_BIND_INDEX(dpp, stmt, index, p_params.op.obj.obj_name, sdb); SQL_BIND_TEXT(dpp, stmt, index, params->op.obj.state.obj.key.name.c_str(), sdb); SQL_BIND_INDEX(dpp, stmt, index, p_params.op.obj.obj_instance, sdb); SQL_BIND_TEXT(dpp, stmt, index, params->op.obj.state.obj.key.instance.c_str(), sdb); out: return rc; } int SQLGetObject::Execute(const DoutPrefixProvider *dpp, struct DBOpParams *params) { int ret = -1; SQL_EXECUTE(dpp, params, stmt, list_object); out: return ret; } int SQLUpdateObject::Prepare(const DoutPrefixProvider *dpp, struct DBOpParams *params) { int ret = -1; struct DBOpPrepareParams p_params = PrepareParams; struct DBOpParams copy = *params; string bucket_name; if (!*sdb) { ldpp_dout(dpp, 0)<<"In SQLUpdateObject - no db" << dendl; goto out; } InitPrepareParams(dpp, p_params, params); if (params->op.query_str == "omap") { SQL_PREPARE(dpp, p_params, sdb, omap_stmt, ret, "PrepareUpdateObject"); } else if (params->op.query_str == "attrs") { SQL_PREPARE(dpp, p_params, sdb, attrs_stmt, ret, "PrepareUpdateObject"); } else if (params->op.query_str == "meta") { SQL_PREPARE(dpp, p_params, sdb, meta_stmt, ret, "PrepareUpdateObject"); } else if (params->op.query_str == "mp") { SQL_PREPARE(dpp, p_params, sdb, mp_stmt, ret, "PrepareUpdateObject"); } else { ldpp_dout(dpp, 0)<<"In SQLUpdateObject invalid query_str:" << params->op.query_str << dendl; goto out; } out: return ret; } int SQLUpdateObject::Bind(const DoutPrefixProvider *dpp, struct DBOpParams *params) { int index = -1; int rc = 0; struct DBOpPrepareParams p_params = PrepareParams; sqlite3_stmt** stmt = NULL; // Prepared statement /* All below fields for attrs */ if (params->op.query_str == "omap") { stmt = &omap_stmt; } else if (params->op.query_str == "attrs") { stmt = &attrs_stmt; } else if (params->op.query_str == "meta") { stmt = &meta_stmt; } else if (params->op.query_str == "mp") { stmt = &mp_stmt; } else { ldpp_dout(dpp, 0)<<"In SQLUpdateObject invalid query_str:" << params->op.query_str << dendl; goto out; } if (params->op.obj.state.obj.key.instance.empty()) { params->op.obj.state.obj.key.instance = "null"; } SQL_BIND_INDEX(dpp, *stmt, index, p_params.op.bucket.bucket_name, sdb); SQL_BIND_TEXT(dpp, *stmt, index, params->op.bucket.info.bucket.name.c_str(), sdb); SQL_BIND_INDEX(dpp, *stmt, index, p_params.op.obj.obj_name, sdb); SQL_BIND_TEXT(dpp, *stmt, index, params->op.obj.state.obj.key.name.c_str(), sdb); SQL_BIND_INDEX(dpp, *stmt, index, p_params.op.obj.obj_instance, sdb); SQL_BIND_TEXT(dpp, *stmt, index, params->op.obj.state.obj.key.instance.c_str(), sdb); SQL_BIND_INDEX(dpp, *stmt, index, p_params.op.obj.mtime, sdb); SQL_ENCODE_BLOB_PARAM(dpp, *stmt, index, params->op.obj.state.mtime, sdb); if (params->op.query_str == "omap") { SQL_BIND_INDEX(dpp, *stmt, index, p_params.op.obj.omap, sdb); SQL_ENCODE_BLOB_PARAM(dpp, *stmt, index, params->op.obj.omap, sdb); } if (params->op.query_str == "attrs") { SQL_BIND_INDEX(dpp, *stmt, index, p_params.op.obj.obj_attrs, sdb); SQL_ENCODE_BLOB_PARAM(dpp, *stmt, index, params->op.obj.state.attrset, sdb); } if (params->op.query_str == "mp") { SQL_BIND_INDEX(dpp, *stmt, index, p_params.op.obj.mp_parts, sdb); SQL_ENCODE_BLOB_PARAM(dpp, *stmt, index, params->op.obj.mp_parts, sdb); } if (params->op.query_str == "meta") { SQL_BIND_INDEX(dpp, *stmt, index, p_params.op.obj.obj_ns, sdb); SQL_BIND_TEXT(dpp, *stmt, index, params->op.obj.state.obj.key.ns.c_str(), sdb); SQL_BIND_INDEX(dpp, *stmt, index, p_params.op.obj.acls, sdb); SQL_ENCODE_BLOB_PARAM(dpp, *stmt, index, params->op.obj.acls, sdb); SQL_BIND_INDEX(dpp, *stmt, index, p_params.op.obj.index_ver, sdb); SQL_BIND_INT(dpp, *stmt, index, params->op.obj.index_ver, sdb); SQL_BIND_INDEX(dpp, *stmt, index, p_params.op.obj.tag, sdb); SQL_BIND_TEXT(dpp, *stmt, index, params->op.obj.tag.c_str(), sdb); SQL_BIND_INDEX(dpp, *stmt, index, p_params.op.obj.flags, sdb); SQL_BIND_INT(dpp, *stmt, index, params->op.obj.flags, sdb); SQL_BIND_INDEX(dpp, *stmt, index, p_params.op.obj.versioned_epoch, sdb); SQL_BIND_INT(dpp, *stmt, index, params->op.obj.versioned_epoch, sdb); SQL_BIND_INDEX(dpp, *stmt, index, p_params.op.obj.obj_category, sdb); SQL_BIND_INT(dpp, *stmt, index, (uint8_t)(params->op.obj.category), sdb); SQL_BIND_INDEX(dpp, *stmt, index, p_params.op.obj.etag, sdb); SQL_BIND_TEXT(dpp, *stmt, index, params->op.obj.etag.c_str(), sdb); SQL_BIND_INDEX(dpp, *stmt, index, p_params.op.obj.owner, sdb); SQL_BIND_TEXT(dpp, *stmt, index, params->op.obj.owner.c_str(), sdb); SQL_BIND_INDEX(dpp, *stmt, index, p_params.op.obj.owner_display_name, sdb); SQL_BIND_TEXT(dpp, *stmt, index, params->op.obj.owner_display_name.c_str(), sdb); SQL_BIND_INDEX(dpp, *stmt, index, p_params.op.obj.storage_class, sdb); SQL_BIND_TEXT(dpp, *stmt, index, params->op.obj.storage_class.c_str(), sdb); SQL_BIND_INDEX(dpp, *stmt, index, p_params.op.obj.appendable, sdb); SQL_BIND_INT(dpp, *stmt, index, params->op.obj.appendable, sdb); SQL_BIND_INDEX(dpp, *stmt, index, p_params.op.obj.content_type, sdb); SQL_BIND_TEXT(dpp, *stmt, index, params->op.obj.content_type.c_str(), sdb); SQL_BIND_INDEX(dpp, *stmt, index, p_params.op.obj.index_hash_source, sdb); SQL_BIND_TEXT(dpp, *stmt, index, params->op.obj.state.obj.index_hash_source.c_str(), sdb); SQL_BIND_INDEX(dpp, *stmt, index, p_params.op.obj.obj_size, sdb); SQL_BIND_INT(dpp, *stmt, index, params->op.obj.state.size, sdb); SQL_BIND_INDEX(dpp, *stmt, index, p_params.op.obj.accounted_size, sdb); SQL_BIND_INT(dpp, *stmt, index, params->op.obj.state.accounted_size, sdb); SQL_BIND_INDEX(dpp, *stmt, index, p_params.op.obj.epoch, sdb); SQL_BIND_INT(dpp, *stmt, index, params->op.obj.state.epoch, sdb); SQL_BIND_INDEX(dpp, *stmt, index, p_params.op.obj.obj_tag, sdb); SQL_ENCODE_BLOB_PARAM(dpp, *stmt, index, params->op.obj.state.obj_tag, sdb); SQL_BIND_INDEX(dpp, *stmt, index, p_params.op.obj.tail_tag, sdb); SQL_ENCODE_BLOB_PARAM(dpp, *stmt, index, params->op.obj.state.tail_tag, sdb); SQL_BIND_INDEX(dpp, *stmt, index, p_params.op.obj.write_tag, sdb); SQL_BIND_TEXT(dpp, *stmt, index, params->op.obj.state.write_tag.c_str(), sdb); SQL_BIND_INDEX(dpp, *stmt, index, p_params.op.obj.fake_tag, sdb); SQL_BIND_INT(dpp, *stmt, index, params->op.obj.state.fake_tag, sdb); SQL_BIND_INDEX(dpp, *stmt, index, p_params.op.obj.shadow_obj, sdb); SQL_BIND_TEXT(dpp, *stmt, index, params->op.obj.state.shadow_obj.c_str(), sdb); SQL_BIND_INDEX(dpp, *stmt, index, p_params.op.obj.has_data, sdb); SQL_BIND_INT(dpp, *stmt, index, params->op.obj.state.has_data, sdb); SQL_BIND_INDEX(dpp, *stmt, index, p_params.op.obj.is_versioned, sdb); SQL_BIND_INT(dpp, *stmt, index, params->op.obj.is_versioned, sdb); SQL_BIND_INDEX(dpp, *stmt, index, p_params.op.obj.version_num, sdb); SQL_BIND_INT(dpp, *stmt, index, params->op.obj.version_num, sdb); SQL_BIND_INDEX(dpp, *stmt, index, p_params.op.obj.pg_ver, sdb); SQL_BIND_INT(dpp, *stmt, index, params->op.obj.state.pg_ver, sdb); SQL_BIND_INDEX(dpp, *stmt, index, p_params.op.obj.zone_short_id, sdb); SQL_BIND_INT(dpp, *stmt, index, params->op.obj.state.zone_short_id, sdb); SQL_BIND_INDEX(dpp, *stmt, index, p_params.op.obj.obj_version, sdb); SQL_BIND_INT(dpp, *stmt, index, params->op.obj.state.objv_tracker.read_version.ver, sdb); SQL_BIND_INDEX(dpp, *stmt, index, p_params.op.obj.obj_version_tag, sdb); SQL_BIND_TEXT(dpp, *stmt, index, params->op.obj.state.objv_tracker.read_version.tag.c_str(), sdb); SQL_BIND_INDEX(dpp, *stmt, index, p_params.op.obj.obj_attrs, sdb); SQL_ENCODE_BLOB_PARAM(dpp, *stmt, index, params->op.obj.state.attrset, sdb); SQL_BIND_INDEX(dpp, *stmt, index, p_params.op.obj.head_size, sdb); SQL_BIND_INT(dpp, *stmt, index, params->op.obj.head_size, sdb); SQL_BIND_INDEX(dpp, *stmt, index, p_params.op.obj.max_head_size, sdb); SQL_BIND_INT(dpp, *stmt, index, params->op.obj.max_head_size, sdb); SQL_BIND_INDEX(dpp, *stmt, index, p_params.op.obj.obj_id, sdb); SQL_BIND_TEXT(dpp, *stmt, index, params->op.obj.obj_id.c_str(), sdb); SQL_BIND_INDEX(dpp, *stmt, index, p_params.op.obj.tail_instance, sdb); SQL_BIND_TEXT(dpp, *stmt, index, params->op.obj.tail_instance.c_str(), sdb); SQL_BIND_INDEX(dpp, *stmt, index, p_params.op.obj.head_placement_rule_name, sdb); SQL_BIND_TEXT(dpp, *stmt, index, params->op.obj.head_placement_rule.name.c_str(), sdb); SQL_BIND_INDEX(dpp, *stmt, index, p_params.op.obj.head_placement_storage_class, sdb); SQL_BIND_TEXT(dpp, *stmt, index, params->op.obj.head_placement_rule.storage_class.c_str(), sdb); SQL_BIND_INDEX(dpp, *stmt, index, p_params.op.obj.tail_placement_rule_name, sdb); SQL_BIND_TEXT(dpp, *stmt, index, params->op.obj.tail_placement.placement_rule.name.c_str(), sdb); SQL_BIND_INDEX(dpp, *stmt, index, p_params.op.obj.tail_placement_storage_class, sdb); SQL_BIND_TEXT(dpp, *stmt, index, params->op.obj.tail_placement.placement_rule.storage_class.c_str(), sdb); SQL_BIND_INDEX(dpp, *stmt, index, p_params.op.obj.manifest_part_objs, sdb); SQL_ENCODE_BLOB_PARAM(dpp, *stmt, index, params->op.obj.objs, sdb); SQL_BIND_INDEX(dpp, *stmt, index, p_params.op.obj.manifest_part_rules, sdb); SQL_ENCODE_BLOB_PARAM(dpp, *stmt, index, params->op.obj.rules, sdb); SQL_BIND_INDEX(dpp, *stmt, index, p_params.op.obj.omap, sdb); SQL_ENCODE_BLOB_PARAM(dpp, *stmt, index, params->op.obj.omap, sdb); SQL_BIND_INDEX(dpp, *stmt, index, p_params.op.obj.is_multipart, sdb); SQL_BIND_INT(dpp, *stmt, index, params->op.obj.is_multipart, sdb); SQL_BIND_INDEX(dpp, *stmt, index, p_params.op.obj.mp_parts, sdb); SQL_ENCODE_BLOB_PARAM(dpp, *stmt, index, params->op.obj.mp_parts, sdb); SQL_BIND_INDEX(dpp, *stmt, index, p_params.op.obj.head_data, sdb); SQL_ENCODE_BLOB_PARAM(dpp, *stmt, index, params->op.obj.head_data, sdb); } out: return rc; } int SQLUpdateObject::Execute(const DoutPrefixProvider *dpp, struct DBOpParams *params) { int ret = -1; sqlite3_stmt** stmt = NULL; // Prepared statement if (params->op.query_str == "omap") { stmt = &omap_stmt; } else if (params->op.query_str == "attrs") { stmt = &attrs_stmt; } else if (params->op.query_str == "meta") { stmt = &meta_stmt; } else if (params->op.query_str == "mp") { stmt = &mp_stmt; } else { ldpp_dout(dpp, 0)<<"In SQLUpdateObject invalid query_str:" << params->op.query_str << dendl; goto out; } SQL_EXECUTE(dpp, params, *stmt, NULL); out: return ret; } int SQLListBucketObjects::Prepare(const DoutPrefixProvider *dpp, struct DBOpParams *params) { int ret = -1; struct DBOpPrepareParams p_params = PrepareParams; if (!*sdb) { ldpp_dout(dpp, 0)<<"In SQLListBucketObjects - no db" << dendl; goto out; } InitPrepareParams(dpp, p_params, params); SQL_PREPARE(dpp, p_params, sdb, stmt, ret, "PrepareListBucketObjects"); out: return ret; } int SQLListBucketObjects::Bind(const DoutPrefixProvider *dpp, struct DBOpParams *params) { int index = -1; int rc = 0; struct DBOpPrepareParams p_params = PrepareParams; if (params->op.obj.state.obj.key.instance.empty()) { params->op.obj.state.obj.key.instance = "null"; } SQL_BIND_INDEX(dpp, stmt, index, p_params.op.bucket.bucket_name, sdb); SQL_BIND_TEXT(dpp, stmt, index, params->op.bucket.info.bucket.name.c_str(), sdb); SQL_BIND_INDEX(dpp, stmt, index, p_params.op.obj.min_marker, sdb); SQL_BIND_TEXT(dpp, stmt, index, params->op.obj.min_marker.c_str(), sdb); SQL_BIND_INDEX(dpp, stmt, index, p_params.op.obj.prefix, sdb); SQL_BIND_TEXT(dpp, stmt, index, params->op.obj.prefix.c_str(), sdb); SQL_BIND_INDEX(dpp, stmt, index, p_params.op.list_max_count, sdb); SQL_BIND_INT(dpp, stmt, index, params->op.list_max_count, sdb); out: return rc; } int SQLListBucketObjects::Execute(const DoutPrefixProvider *dpp, struct DBOpParams *params) { int ret = -1; SQL_EXECUTE(dpp, params, stmt, list_object); out: return ret; } int SQLListVersionedObjects::Prepare(const DoutPrefixProvider *dpp, struct DBOpParams *params) { int ret = -1; struct DBOpPrepareParams p_params = PrepareParams; if (!*sdb) { ldpp_dout(dpp, 0)<<"In SQLListVersionedObjects - no db" << dendl; goto out; } InitPrepareParams(dpp, p_params, params); SQL_PREPARE(dpp, p_params, sdb, stmt, ret, "PrepareListVersionedObjects"); out: return ret; } int SQLListVersionedObjects::Bind(const DoutPrefixProvider *dpp, struct DBOpParams *params) { int index = -1; int rc = 0; struct DBOpPrepareParams p_params = PrepareParams; if (params->op.obj.state.obj.key.instance.empty()) { params->op.obj.state.obj.key.instance = "null"; } SQL_BIND_INDEX(dpp, stmt, index, p_params.op.bucket.bucket_name, sdb); SQL_BIND_TEXT(dpp, stmt, index, params->op.bucket.info.bucket.name.c_str(), sdb); SQL_BIND_INDEX(dpp, stmt, index, p_params.op.obj.obj_name, sdb); SQL_BIND_TEXT(dpp, stmt, index, params->op.obj.state.obj.key.name.c_str(), sdb); SQL_BIND_INDEX(dpp, stmt, index, p_params.op.list_max_count, sdb); SQL_BIND_INT(dpp, stmt, index, params->op.list_max_count, sdb); out: return rc; } int SQLListVersionedObjects::Execute(const DoutPrefixProvider *dpp, struct DBOpParams *params) { int ret = -1; SQL_EXECUTE(dpp, params, stmt, list_object); out: return ret; } int SQLPutObjectData::Prepare(const DoutPrefixProvider *dpp, struct DBOpParams *params) { int ret = -1; struct DBOpPrepareParams p_params = PrepareParams; if (!*sdb) { ldpp_dout(dpp, 0)<<"In SQLPutObjectData - no db" << dendl; goto out; } InitPrepareParams(dpp, p_params, params); SQL_PREPARE(dpp, p_params, sdb, stmt, ret, "PreparePutObjectData"); out: return ret; } int SQLPutObjectData::Bind(const DoutPrefixProvider *dpp, struct DBOpParams *params) { int index = -1; int rc = 0; struct DBOpPrepareParams p_params = PrepareParams; if (params->op.obj.state.obj.key.instance.empty()) { params->op.obj.state.obj.key.instance = "null"; } SQL_BIND_INDEX(dpp, stmt, index, p_params.op.obj.obj_name, sdb); SQL_BIND_TEXT(dpp, stmt, index, params->op.obj.state.obj.key.name.c_str(), sdb); SQL_BIND_INDEX(dpp, stmt, index, p_params.op.obj.obj_instance, sdb); SQL_BIND_TEXT(dpp, stmt, index, params->op.obj.state.obj.key.instance.c_str(), sdb); SQL_BIND_INDEX(dpp, stmt, index, p_params.op.obj.obj_ns, sdb); SQL_BIND_TEXT(dpp, stmt, index, params->op.obj.state.obj.key.ns.c_str(), sdb); SQL_BIND_INDEX(dpp, stmt, index, p_params.op.bucket.bucket_name, sdb); SQL_BIND_TEXT(dpp, stmt, index, params->op.bucket.info.bucket.name.c_str(), sdb); SQL_BIND_INDEX(dpp, stmt, index, p_params.op.obj.obj_id, sdb); SQL_BIND_TEXT(dpp, stmt, index, params->op.obj.obj_id.c_str(), sdb); SQL_BIND_INDEX(dpp, stmt, index, p_params.op.obj_data.part_num, sdb); SQL_BIND_INT(dpp, stmt, index, params->op.obj_data.part_num, sdb); SQL_BIND_INDEX(dpp, stmt, index, p_params.op.obj_data.offset, sdb); SQL_BIND_INT(dpp, stmt, index, params->op.obj_data.offset, sdb); SQL_BIND_INDEX(dpp, stmt, index, p_params.op.obj_data.data, sdb); SQL_ENCODE_BLOB_PARAM(dpp, stmt, index, params->op.obj_data.data, sdb); SQL_BIND_INDEX(dpp, stmt, index, p_params.op.obj_data.size, sdb); SQL_BIND_INT(dpp, stmt, index, params->op.obj_data.size, sdb); SQL_BIND_INDEX(dpp, stmt, index, p_params.op.obj_data.multipart_part_str, sdb); SQL_BIND_TEXT(dpp, stmt, index, params->op.obj_data.multipart_part_str.c_str(), sdb); SQL_BIND_INDEX(dpp, stmt, index, p_params.op.obj.mtime, sdb); SQL_ENCODE_BLOB_PARAM(dpp, stmt, index, params->op.obj.state.mtime, sdb); out: return rc; } int SQLPutObjectData::Execute(const DoutPrefixProvider *dpp, struct DBOpParams *params) { int ret = -1; SQL_EXECUTE(dpp, params, stmt, NULL); out: return ret; } int SQLUpdateObjectData::Prepare(const DoutPrefixProvider *dpp, struct DBOpParams *params) { int ret = -1; struct DBOpPrepareParams p_params = PrepareParams; if (!*sdb) { ldpp_dout(dpp, 0)<<"In SQLUpdateObjectData - no db" << dendl; goto out; } InitPrepareParams(dpp, p_params, params); SQL_PREPARE(dpp, p_params, sdb, stmt, ret, "PrepareUpdateObjectData"); out: return ret; } int SQLUpdateObjectData::Bind(const DoutPrefixProvider *dpp, struct DBOpParams *params) { int index = -1; int rc = 0; struct DBOpPrepareParams p_params = PrepareParams; if (params->op.obj.state.obj.key.instance.empty()) { params->op.obj.state.obj.key.instance = "null"; } SQL_BIND_INDEX(dpp, stmt, index, p_params.op.obj.obj_name, sdb); SQL_BIND_TEXT(dpp, stmt, index, params->op.obj.state.obj.key.name.c_str(), sdb); SQL_BIND_INDEX(dpp, stmt, index, p_params.op.obj.obj_instance, sdb); SQL_BIND_TEXT(dpp, stmt, index, params->op.obj.state.obj.key.instance.c_str(), sdb); SQL_BIND_INDEX(dpp, stmt, index, p_params.op.bucket.bucket_name, sdb); SQL_BIND_TEXT(dpp, stmt, index, params->op.bucket.info.bucket.name.c_str(), sdb); SQL_BIND_INDEX(dpp, stmt, index, p_params.op.obj.obj_id, sdb); SQL_BIND_TEXT(dpp, stmt, index, params->op.obj.obj_id.c_str(), sdb); SQL_BIND_INDEX(dpp, stmt, index, p_params.op.obj.mtime, sdb); SQL_ENCODE_BLOB_PARAM(dpp, stmt, index, params->op.obj.state.mtime, sdb); out: return rc; } int SQLUpdateObjectData::Execute(const DoutPrefixProvider *dpp, struct DBOpParams *params) { int ret = -1; SQL_EXECUTE(dpp, params, stmt, NULL); out: return ret; } int SQLGetObjectData::Prepare(const DoutPrefixProvider *dpp, struct DBOpParams *params) { int ret = -1; struct DBOpPrepareParams p_params = PrepareParams; if (!*sdb) { ldpp_dout(dpp, 0)<<"In SQLGetObjectData - no db" << dendl; goto out; } InitPrepareParams(dpp, p_params, params); SQL_PREPARE(dpp, p_params, sdb, stmt, ret, "PrepareGetObjectData"); out: return ret; } int SQLGetObjectData::Bind(const DoutPrefixProvider *dpp, struct DBOpParams *params) { int index = -1; int rc = 0; struct DBOpPrepareParams p_params = PrepareParams; if (params->op.obj.state.obj.key.instance.empty()) { params->op.obj.state.obj.key.instance = "null"; } SQL_BIND_INDEX(dpp, stmt, index, p_params.op.bucket.bucket_name, sdb); SQL_BIND_TEXT(dpp, stmt, index, params->op.bucket.info.bucket.name.c_str(), sdb); SQL_BIND_INDEX(dpp, stmt, index, p_params.op.obj.obj_name, sdb); SQL_BIND_TEXT(dpp, stmt, index, params->op.obj.state.obj.key.name.c_str(), sdb); SQL_BIND_INDEX(dpp, stmt, index, p_params.op.obj.obj_instance, sdb); SQL_BIND_TEXT(dpp, stmt, index, params->op.obj.state.obj.key.instance.c_str(), sdb); SQL_BIND_INDEX(dpp, stmt, index, p_params.op.obj.obj_id, sdb); SQL_BIND_TEXT(dpp, stmt, index, params->op.obj.obj_id.c_str(), sdb); out: return rc; } int SQLGetObjectData::Execute(const DoutPrefixProvider *dpp, struct DBOpParams *params) { int ret = -1; SQL_EXECUTE(dpp, params, stmt, get_objectdata); out: return ret; } int SQLDeleteObjectData::Prepare(const DoutPrefixProvider *dpp, struct DBOpParams *params) { int ret = -1; struct DBOpPrepareParams p_params = PrepareParams; if (!*sdb) { ldpp_dout(dpp, 0)<<"In SQLDeleteObjectData - no db" << dendl; goto out; } InitPrepareParams(dpp, p_params, params); SQL_PREPARE(dpp, p_params, sdb, stmt, ret, "PrepareDeleteObjectData"); out: return ret; } int SQLDeleteObjectData::Bind(const DoutPrefixProvider *dpp, struct DBOpParams *params) { int index = -1; int rc = 0; struct DBOpPrepareParams p_params = PrepareParams; if (params->op.obj.state.obj.key.instance.empty()) { params->op.obj.state.obj.key.instance = "null"; } SQL_BIND_INDEX(dpp, stmt, index, p_params.op.bucket.bucket_name, sdb); SQL_BIND_TEXT(dpp, stmt, index, params->op.bucket.info.bucket.name.c_str(), sdb); SQL_BIND_INDEX(dpp, stmt, index, p_params.op.obj.obj_name, sdb); SQL_BIND_TEXT(dpp, stmt, index, params->op.obj.state.obj.key.name.c_str(), sdb); SQL_BIND_INDEX(dpp, stmt, index, p_params.op.obj.obj_instance, sdb); SQL_BIND_TEXT(dpp, stmt, index, params->op.obj.state.obj.key.instance.c_str(), sdb); SQL_BIND_INDEX(dpp, stmt, index, p_params.op.obj.obj_id, sdb); SQL_BIND_TEXT(dpp, stmt, index, params->op.obj.obj_id.c_str(), sdb); out: return rc; } int SQLDeleteObjectData::Execute(const DoutPrefixProvider *dpp, struct DBOpParams *params) { int ret = -1; SQL_EXECUTE(dpp, params, stmt, NULL); out: return ret; } int SQLDeleteStaleObjectData::Prepare(const DoutPrefixProvider *dpp, struct DBOpParams *params) { int ret = -1; struct DBOpPrepareParams p_params = PrepareParams; if (!*sdb) { ldpp_dout(dpp, 0)<<"In SQLDeleteStaleObjectData - no db" << dendl; goto out; } InitPrepareParams(dpp, p_params, params); SQL_PREPARE(dpp, p_params, sdb, stmt, ret, "PrepareDeleteStaleObjectData"); out: return ret; } int SQLDeleteStaleObjectData::Bind(const DoutPrefixProvider *dpp, struct DBOpParams *params) { int index = -1; int rc = 0; struct DBOpPrepareParams p_params = PrepareParams; SQL_BIND_INDEX(dpp, stmt, index, p_params.op.obj.mtime, sdb); SQL_ENCODE_BLOB_PARAM(dpp, stmt, index, params->op.obj.state.mtime, sdb); out: return rc; } int SQLDeleteStaleObjectData::Execute(const DoutPrefixProvider *dpp, struct DBOpParams *params) { int ret = -1; SQL_EXECUTE(dpp, params, stmt, NULL); out: return ret; } int SQLInsertLCEntry::Prepare(const DoutPrefixProvider *dpp, struct DBOpParams *params) { int ret = -1; struct DBOpPrepareParams p_params = PrepareParams; if (!*sdb) { ldpp_dout(dpp, 0)<<"In SQLInsertLCEntry - no db" << dendl; goto out; } InitPrepareParams(dpp, p_params, params); SQL_PREPARE(dpp, p_params, sdb, stmt, ret, "PrepareInsertLCEntry"); out: return ret; } int SQLInsertLCEntry::Bind(const DoutPrefixProvider *dpp, struct DBOpParams *params) { int index = -1; int rc = 0; struct DBOpPrepareParams p_params = PrepareParams; SQL_BIND_INDEX(dpp, stmt, index, p_params.op.lc_entry.index, sdb); SQL_BIND_TEXT(dpp, stmt, index, params->op.lc_entry.index.c_str(), sdb); SQL_BIND_INDEX(dpp, stmt, index, p_params.op.lc_entry.bucket_name, sdb); SQL_BIND_TEXT(dpp, stmt, index, params->op.lc_entry.entry.get_bucket().c_str(), sdb); SQL_BIND_INDEX(dpp, stmt, index, p_params.op.lc_entry.status, sdb); SQL_BIND_INT(dpp, stmt, index, params->op.lc_entry.entry.get_status(), sdb); SQL_BIND_INDEX(dpp, stmt, index, p_params.op.lc_entry.start_time, sdb); SQL_BIND_INT(dpp, stmt, index, params->op.lc_entry.entry.get_start_time(), sdb); out: return rc; } int SQLInsertLCEntry::Execute(const DoutPrefixProvider *dpp, struct DBOpParams *params) { int ret = -1; SQL_EXECUTE(dpp, params, stmt, NULL); out: return ret; } int SQLRemoveLCEntry::Prepare(const DoutPrefixProvider *dpp, struct DBOpParams *params) { int ret = -1; struct DBOpPrepareParams p_params = PrepareParams; if (!*sdb) { ldpp_dout(dpp, 0)<<"In SQLRemoveLCEntry - no db" << dendl; goto out; } InitPrepareParams(dpp, p_params, params); SQL_PREPARE(dpp, p_params, sdb, stmt, ret, "PrepareRemoveLCEntry"); out: return ret; } int SQLRemoveLCEntry::Bind(const DoutPrefixProvider *dpp, struct DBOpParams *params) { int index = -1; int rc = 0; struct DBOpPrepareParams p_params = PrepareParams; SQL_BIND_INDEX(dpp, stmt, index, p_params.op.lc_entry.index, sdb); SQL_BIND_TEXT(dpp, stmt, index, params->op.lc_entry.index.c_str(), sdb); SQL_BIND_INDEX(dpp, stmt, index, p_params.op.lc_entry.bucket_name, sdb); SQL_BIND_TEXT(dpp, stmt, index, params->op.lc_entry.entry.get_bucket().c_str(), sdb); out: return rc; } int SQLRemoveLCEntry::Execute(const DoutPrefixProvider *dpp, struct DBOpParams *params) { int ret = -1; SQL_EXECUTE(dpp, params, stmt, NULL); out: return ret; } int SQLGetLCEntry::Prepare(const DoutPrefixProvider *dpp, struct DBOpParams *params) { int ret = -1; sqlite3_stmt** pstmt = NULL; // Prepared statement struct DBOpPrepareParams p_params = PrepareParams; if (!*sdb) { ldpp_dout(dpp, 0)<<"In SQLGetLCEntry - no db" << dendl; goto out; } InitPrepareParams(dpp, p_params, params); if (params->op.query_str == "get_next_entry") { pstmt = &next_stmt; } else { pstmt = &stmt; } SQL_PREPARE(dpp, p_params, sdb, *pstmt, ret, "PrepareGetLCEntry"); out: return ret; } int SQLGetLCEntry::Bind(const DoutPrefixProvider *dpp, struct DBOpParams *params) { int index = -1; int rc = 0; struct DBOpPrepareParams p_params = PrepareParams; sqlite3_stmt** pstmt = NULL; // Prepared statement if (params->op.query_str == "get_next_entry") { pstmt = &next_stmt; } else { pstmt = &stmt; } SQL_BIND_INDEX(dpp, *pstmt, index, p_params.op.lc_entry.index, sdb); SQL_BIND_TEXT(dpp, *pstmt, index, params->op.lc_entry.index.c_str(), sdb); SQL_BIND_INDEX(dpp, *pstmt, index, p_params.op.lc_entry.bucket_name, sdb); SQL_BIND_TEXT(dpp, *pstmt, index, params->op.lc_entry.entry.get_bucket().c_str(), sdb); out: return rc; } int SQLGetLCEntry::Execute(const DoutPrefixProvider *dpp, struct DBOpParams *params) { int ret = -1; sqlite3_stmt** pstmt = NULL; // Prepared statement if (params->op.query_str == "get_next_entry") { pstmt = &next_stmt; } else { pstmt = &stmt; } SQL_EXECUTE(dpp, params, *pstmt, list_lc_entry); out: return ret; } int SQLListLCEntries::Prepare(const DoutPrefixProvider *dpp, struct DBOpParams *params) { int ret = -1; struct DBOpPrepareParams p_params = PrepareParams; if (!*sdb) { ldpp_dout(dpp, 0)<<"In SQLListLCEntries - no db" << dendl; goto out; } InitPrepareParams(dpp, p_params, params); SQL_PREPARE(dpp, p_params, sdb, stmt, ret, "PrepareListLCEntries"); out: return ret; } int SQLListLCEntries::Bind(const DoutPrefixProvider *dpp, struct DBOpParams *params) { int index = -1; int rc = 0; struct DBOpPrepareParams p_params = PrepareParams; SQL_BIND_INDEX(dpp, stmt, index, p_params.op.lc_entry.index, sdb); SQL_BIND_TEXT(dpp, stmt, index, params->op.lc_entry.index.c_str(), sdb); SQL_BIND_INDEX(dpp, stmt, index, p_params.op.lc_entry.min_marker, sdb); SQL_BIND_TEXT(dpp, stmt, index, params->op.lc_entry.min_marker.c_str(), sdb); SQL_BIND_INDEX(dpp, stmt, index, p_params.op.list_max_count, sdb); SQL_BIND_INT(dpp, stmt, index, params->op.list_max_count, sdb); out: return rc; } int SQLListLCEntries::Execute(const DoutPrefixProvider *dpp, struct DBOpParams *params) { int ret = -1; SQL_EXECUTE(dpp, params, stmt, list_lc_entry); out: return ret; } int SQLInsertLCHead::Prepare(const DoutPrefixProvider *dpp, struct DBOpParams *params) { int ret = -1; struct DBOpPrepareParams p_params = PrepareParams; if (!*sdb) { ldpp_dout(dpp, 0)<<"In SQLInsertLCHead - no db" << dendl; goto out; } InitPrepareParams(dpp, p_params, params); SQL_PREPARE(dpp, p_params, sdb, stmt, ret, "PrepareInsertLCHead"); out: return ret; } int SQLInsertLCHead::Bind(const DoutPrefixProvider *dpp, struct DBOpParams *params) { int index = -1; int rc = 0; struct DBOpPrepareParams p_params = PrepareParams; SQL_BIND_INDEX(dpp, stmt, index, p_params.op.lc_head.index, sdb); SQL_BIND_TEXT(dpp, stmt, index, params->op.lc_head.index.c_str(), sdb); SQL_BIND_INDEX(dpp, stmt, index, p_params.op.lc_head.marker, sdb); SQL_BIND_TEXT(dpp, stmt, index, params->op.lc_head.head.get_marker().c_str(), sdb); SQL_BIND_INDEX(dpp, stmt, index, p_params.op.lc_head.start_date, sdb); SQL_ENCODE_BLOB_PARAM(dpp, stmt, index, static_cast<int64_t>(params->op.lc_head.head.start_date), sdb); out: return rc; } int SQLInsertLCHead::Execute(const DoutPrefixProvider *dpp, struct DBOpParams *params) { int ret = -1; SQL_EXECUTE(dpp, params, stmt, NULL); out: return ret; } int SQLRemoveLCHead::Prepare(const DoutPrefixProvider *dpp, struct DBOpParams *params) { int ret = -1; struct DBOpPrepareParams p_params = PrepareParams; if (!*sdb) { ldpp_dout(dpp, 0)<<"In SQLRemoveLCHead - no db" << dendl; goto out; } InitPrepareParams(dpp, p_params, params); SQL_PREPARE(dpp, p_params, sdb, stmt, ret, "PrepareRemoveLCHead"); out: return ret; } int SQLRemoveLCHead::Bind(const DoutPrefixProvider *dpp, struct DBOpParams *params) { int index = -1; int rc = 0; struct DBOpPrepareParams p_params = PrepareParams; SQL_BIND_INDEX(dpp, stmt, index, p_params.op.lc_head.index, sdb); SQL_BIND_TEXT(dpp, stmt, index, params->op.lc_head.index.c_str(), sdb); out: return rc; } int SQLRemoveLCHead::Execute(const DoutPrefixProvider *dpp, struct DBOpParams *params) { int ret = -1; SQL_EXECUTE(dpp, params, stmt, NULL); out: return ret; } int SQLGetLCHead::Prepare(const DoutPrefixProvider *dpp, struct DBOpParams *params) { int ret = -1; struct DBOpPrepareParams p_params = PrepareParams; if (!*sdb) { ldpp_dout(dpp, 0)<<"In SQLGetLCHead - no db" << dendl; goto out; } InitPrepareParams(dpp, p_params, params); SQL_PREPARE(dpp, p_params, sdb, stmt, ret, "PrepareGetLCHead"); out: return ret; } int SQLGetLCHead::Bind(const DoutPrefixProvider *dpp, struct DBOpParams *params) { int index = -1; int rc = 0; struct DBOpPrepareParams p_params = PrepareParams; SQL_BIND_INDEX(dpp, stmt, index, p_params.op.lc_head.index, sdb); SQL_BIND_TEXT(dpp, stmt, index, params->op.lc_head.index.c_str(), sdb); out: return rc; } int SQLGetLCHead::Execute(const DoutPrefixProvider *dpp, struct DBOpParams *params) { int ret = -1; // clear the params before fetching the entry params->op.lc_head.head = {}; SQL_EXECUTE(dpp, params, stmt, list_lc_head); out: return ret; }
97,235
31.444444
121
cc
null
ceph-main/src/rgw/driver/dbstore/sqlite/sqliteDB.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #pragma once #include <errno.h> #include <stdlib.h> #include <string> #include <sqlite3.h> #include "rgw/driver/dbstore/common/dbstore.h" using namespace rgw::store; class SQLiteDB : public DB, virtual public DBOp { private: sqlite3_mutex *mutex = NULL; protected: CephContext *cct; public: sqlite3_stmt *stmt = NULL; DBOpPrepareParams PrepareParams; SQLiteDB(sqlite3 *dbi, std::string db_name, CephContext *_cct) : DB(db_name, _cct), cct(_cct) { db = (void*)dbi; } SQLiteDB(std::string db_name, CephContext *_cct) : DB(db_name, _cct), cct(_cct) { } ~SQLiteDB() {} uint64_t get_blob_limit() override { return SQLITE_LIMIT_LENGTH; } void *openDB(const DoutPrefixProvider *dpp) override; int closeDB(const DoutPrefixProvider *dpp) override; int InitializeDBOps(const DoutPrefixProvider *dpp) override; int InitPrepareParams(const DoutPrefixProvider *dpp, DBOpPrepareParams &p_params, DBOpParams* params) override; int exec(const DoutPrefixProvider *dpp, const char *schema, int (*callback)(void*,int,char**,char**)); int Step(const DoutPrefixProvider *dpp, DBOpInfo &op, sqlite3_stmt *stmt, int (*cbk)(const DoutPrefixProvider *dpp, DBOpInfo &op, sqlite3_stmt *stmt)); int Reset(const DoutPrefixProvider *dpp, sqlite3_stmt *stmt); /* default value matches with sqliteDB style */ int createTables(const DoutPrefixProvider *dpp) override; int createBucketTable(const DoutPrefixProvider *dpp, DBOpParams *params); int createUserTable(const DoutPrefixProvider *dpp, DBOpParams *params); int createObjectTable(const DoutPrefixProvider *dpp, DBOpParams *params); int createObjectDataTable(const DoutPrefixProvider *dpp, DBOpParams *params); int createObjectView(const DoutPrefixProvider *dpp, DBOpParams *params); int createObjectTableTrigger(const DoutPrefixProvider *dpp, DBOpParams *params); int createQuotaTable(const DoutPrefixProvider *dpp, DBOpParams *params); void populate_object_params(const DoutPrefixProvider *dpp, struct DBOpPrepareParams& p_params, struct DBOpParams* params, bool data); int createLCTables(const DoutPrefixProvider *dpp) override; int DeleteBucketTable(const DoutPrefixProvider *dpp, DBOpParams *params); int DeleteUserTable(const DoutPrefixProvider *dpp, DBOpParams *params); int DeleteObjectTable(const DoutPrefixProvider *dpp, DBOpParams *params); int DeleteObjectDataTable(const DoutPrefixProvider *dpp, DBOpParams *params); int DeleteQuotaTable(const DoutPrefixProvider *dpp, DBOpParams *params); int DeleteLCEntryTable(const DoutPrefixProvider *dpp, DBOpParams *params); int DeleteLCHeadTable(const DoutPrefixProvider *dpp, DBOpParams *params); int ListAllBuckets(const DoutPrefixProvider *dpp, DBOpParams *params) override; int ListAllUsers(const DoutPrefixProvider *dpp, DBOpParams *params) override; int ListAllObjects(const DoutPrefixProvider *dpp, DBOpParams *params) override; }; class SQLObjectOp : public ObjectOp { private: sqlite3 **sdb = NULL; CephContext *cct; public: SQLObjectOp(sqlite3 **sdbi, CephContext *_cct) : sdb(sdbi), cct(_cct) {}; ~SQLObjectOp() {} int InitializeObjectOps(std::string db_name, const DoutPrefixProvider *dpp); }; class SQLInsertUser : public SQLiteDB, public InsertUserOp { private: sqlite3 **sdb = NULL; sqlite3_stmt *stmt = NULL; // Prepared statement public: SQLInsertUser(void **db, std::string db_name, CephContext *cct) : SQLiteDB((sqlite3 *)(*db), db_name, cct), sdb((sqlite3 **)db) {} ~SQLInsertUser() { if (stmt) sqlite3_finalize(stmt); } int Prepare(const DoutPrefixProvider *dpp, DBOpParams *params); int Execute(const DoutPrefixProvider *dpp, DBOpParams *params); int Bind(const DoutPrefixProvider *dpp, DBOpParams *params); }; class SQLRemoveUser : public SQLiteDB, public RemoveUserOp { private: sqlite3 **sdb = NULL; sqlite3_stmt *stmt = NULL; // Prepared statement public: SQLRemoveUser(void **db, std::string db_name, CephContext *cct) : SQLiteDB((sqlite3 *)(*db), db_name, cct), sdb((sqlite3 **)db) {} ~SQLRemoveUser() { if (stmt) sqlite3_finalize(stmt); } int Prepare(const DoutPrefixProvider *dpp, DBOpParams *params); int Execute(const DoutPrefixProvider *dpp, DBOpParams *params); int Bind(const DoutPrefixProvider *dpp, DBOpParams *params); }; class SQLGetUser : public SQLiteDB, public GetUserOp { private: sqlite3 **sdb = NULL; sqlite3_stmt *stmt = NULL; // Prepared statement sqlite3_stmt *email_stmt = NULL; // Prepared statement to query by useremail sqlite3_stmt *ak_stmt = NULL; // Prepared statement to query by access_key_id sqlite3_stmt *userid_stmt = NULL; // Prepared statement to query by user_id public: SQLGetUser(void **db, std::string db_name, CephContext *cct) : SQLiteDB((sqlite3 *)(*db), db_name, cct), sdb((sqlite3 **)db) {} ~SQLGetUser() { if (stmt) sqlite3_finalize(stmt); if (email_stmt) sqlite3_finalize(email_stmt); if (ak_stmt) sqlite3_finalize(ak_stmt); if (userid_stmt) sqlite3_finalize(userid_stmt); } int Prepare(const DoutPrefixProvider *dpp, DBOpParams *params); int Execute(const DoutPrefixProvider *dpp, DBOpParams *params); int Bind(const DoutPrefixProvider *dpp, DBOpParams *params); }; class SQLInsertBucket : public SQLiteDB, public InsertBucketOp { private: sqlite3 **sdb = NULL; sqlite3_stmt *stmt = NULL; // Prepared statement public: SQLInsertBucket(void **db, std::string db_name, CephContext *cct) : SQLiteDB((sqlite3 *)(*db), db_name, cct), sdb((sqlite3 **)db) {} ~SQLInsertBucket() { if (stmt) sqlite3_finalize(stmt); } int Prepare(const DoutPrefixProvider *dpp, DBOpParams *params); int Execute(const DoutPrefixProvider *dpp, DBOpParams *params); int Bind(const DoutPrefixProvider *dpp, DBOpParams *params); }; class SQLUpdateBucket : public SQLiteDB, public UpdateBucketOp { private: sqlite3 **sdb = NULL; sqlite3_stmt *info_stmt = NULL; // Prepared statement sqlite3_stmt *attrs_stmt = NULL; // Prepared statement sqlite3_stmt *owner_stmt = NULL; // Prepared statement public: SQLUpdateBucket(void **db, std::string db_name, CephContext *cct) : SQLiteDB((sqlite3 *)(*db), db_name, cct), sdb((sqlite3 **)db) {} ~SQLUpdateBucket() { if (info_stmt) sqlite3_finalize(info_stmt); if (attrs_stmt) sqlite3_finalize(attrs_stmt); if (owner_stmt) sqlite3_finalize(owner_stmt); } int Prepare(const DoutPrefixProvider *dpp, DBOpParams *params); int Execute(const DoutPrefixProvider *dpp, DBOpParams *params); int Bind(const DoutPrefixProvider *dpp, DBOpParams *params); }; class SQLRemoveBucket : public SQLiteDB, public RemoveBucketOp { private: sqlite3 **sdb = NULL; sqlite3_stmt *stmt = NULL; // Prepared statement public: SQLRemoveBucket(void **db, std::string db_name, CephContext *cct) : SQLiteDB((sqlite3 *)(*db), db_name, cct), sdb((sqlite3 **)db) {} ~SQLRemoveBucket() { if (stmt) sqlite3_finalize(stmt); } int Prepare(const DoutPrefixProvider *dpp, DBOpParams *params); int Execute(const DoutPrefixProvider *dpp, DBOpParams *params); int Bind(const DoutPrefixProvider *dpp, DBOpParams *params); }; class SQLGetBucket : public SQLiteDB, public GetBucketOp { private: sqlite3 **sdb = NULL; sqlite3_stmt *stmt = NULL; // Prepared statement public: SQLGetBucket(void **db, std::string db_name, CephContext *cct) : SQLiteDB((sqlite3 *)(*db), db_name, cct), sdb((sqlite3 **)db) {} ~SQLGetBucket() { if (stmt) sqlite3_finalize(stmt); } int Prepare(const DoutPrefixProvider *dpp, DBOpParams *params); int Execute(const DoutPrefixProvider *dpp, DBOpParams *params); int Bind(const DoutPrefixProvider *dpp, DBOpParams *params); }; class SQLListUserBuckets : public SQLiteDB, public ListUserBucketsOp { private: sqlite3 **sdb = NULL; sqlite3_stmt *stmt = NULL; // Prepared statement sqlite3_stmt *all_stmt = NULL; // Prepared statement public: SQLListUserBuckets(void **db, std::string db_name, CephContext *cct) : SQLiteDB((sqlite3 *)(*db), db_name, cct), sdb((sqlite3 **)db) {} ~SQLListUserBuckets() { if (stmt) sqlite3_finalize(stmt); if (all_stmt) sqlite3_finalize(all_stmt); } int Prepare(const DoutPrefixProvider *dpp, DBOpParams *params); int Execute(const DoutPrefixProvider *dpp, DBOpParams *params); int Bind(const DoutPrefixProvider *dpp, DBOpParams *params); }; class SQLPutObject : public SQLiteDB, public PutObjectOp { private: sqlite3 **sdb = NULL; sqlite3_stmt *stmt = NULL; // Prepared statement public: SQLPutObject(void **db, std::string db_name, CephContext *cct) : SQLiteDB((sqlite3 *)(*db), db_name, cct), sdb((sqlite3 **)db) {} SQLPutObject(sqlite3 **sdbi, std::string db_name, CephContext *cct) : SQLiteDB(*sdbi, db_name, cct), sdb(sdbi) {} ~SQLPutObject() { if (stmt) sqlite3_finalize(stmt); } int Prepare(const DoutPrefixProvider *dpp, DBOpParams *params); int Execute(const DoutPrefixProvider *dpp, DBOpParams *params); int Bind(const DoutPrefixProvider *dpp, DBOpParams *params); }; class SQLDeleteObject : public SQLiteDB, public DeleteObjectOp { private: sqlite3 **sdb = NULL; sqlite3_stmt *stmt = NULL; // Prepared statement public: SQLDeleteObject(void **db, std::string db_name, CephContext *cct) : SQLiteDB((sqlite3 *)(*db), db_name, cct), sdb((sqlite3 **)db) {} SQLDeleteObject(sqlite3 **sdbi, std::string db_name, CephContext *cct) : SQLiteDB(*sdbi, db_name, cct), sdb(sdbi) {} ~SQLDeleteObject() { if (stmt) sqlite3_finalize(stmt); } int Prepare(const DoutPrefixProvider *dpp, DBOpParams *params); int Execute(const DoutPrefixProvider *dpp, DBOpParams *params); int Bind(const DoutPrefixProvider *dpp, DBOpParams *params); }; class SQLGetObject : public SQLiteDB, public GetObjectOp { private: sqlite3 **sdb = NULL; sqlite3_stmt *stmt = NULL; // Prepared statement public: SQLGetObject(void **db, std::string db_name, CephContext *cct) : SQLiteDB((sqlite3 *)(*db), db_name, cct), sdb((sqlite3 **)db) {} SQLGetObject(sqlite3 **sdbi, std::string db_name, CephContext *cct) : SQLiteDB(*sdbi, db_name, cct), sdb(sdbi) {} ~SQLGetObject() { if (stmt) sqlite3_finalize(stmt); } int Prepare(const DoutPrefixProvider *dpp, DBOpParams *params); int Execute(const DoutPrefixProvider *dpp, DBOpParams *params); int Bind(const DoutPrefixProvider *dpp, DBOpParams *params); }; class SQLUpdateObject : public SQLiteDB, public UpdateObjectOp { private: sqlite3 **sdb = NULL; sqlite3_stmt *omap_stmt = NULL; // Prepared statement sqlite3_stmt *attrs_stmt = NULL; // Prepared statement sqlite3_stmt *meta_stmt = NULL; // Prepared statement sqlite3_stmt *mp_stmt = NULL; // Prepared statement public: SQLUpdateObject(void **db, std::string db_name, CephContext *cct) : SQLiteDB((sqlite3 *)(*db), db_name, cct), sdb((sqlite3 **)db) {} SQLUpdateObject(sqlite3 **sdbi, std::string db_name, CephContext *cct) : SQLiteDB(*sdbi, db_name, cct), sdb(sdbi) {} ~SQLUpdateObject() { if (omap_stmt) sqlite3_finalize(omap_stmt); if (attrs_stmt) sqlite3_finalize(attrs_stmt); if (meta_stmt) sqlite3_finalize(meta_stmt); } int Prepare(const DoutPrefixProvider *dpp, DBOpParams *params); int Execute(const DoutPrefixProvider *dpp, DBOpParams *params); int Bind(const DoutPrefixProvider *dpp, DBOpParams *params); }; class SQLListBucketObjects : public SQLiteDB, public ListBucketObjectsOp { private: sqlite3 **sdb = NULL; sqlite3_stmt *stmt = NULL; // Prepared statement public: SQLListBucketObjects(void **db, std::string db_name, CephContext *cct) : SQLiteDB((sqlite3 *)(*db), db_name, cct), sdb((sqlite3 **)db) {} SQLListBucketObjects(sqlite3 **sdbi, std::string db_name, CephContext *cct) : SQLiteDB(*sdbi, db_name, cct), sdb(sdbi) {} ~SQLListBucketObjects() { if (stmt) sqlite3_finalize(stmt); } int Prepare(const DoutPrefixProvider *dpp, DBOpParams *params); int Execute(const DoutPrefixProvider *dpp, DBOpParams *params); int Bind(const DoutPrefixProvider *dpp, DBOpParams *params); }; class SQLListVersionedObjects : public SQLiteDB, public ListVersionedObjectsOp { private: sqlite3 **sdb = NULL; sqlite3_stmt *stmt = NULL; // Prepared statement public: SQLListVersionedObjects(void **db, std::string db_name, CephContext *cct) : SQLiteDB((sqlite3 *)(*db), db_name, cct), sdb((sqlite3 **)db) {} SQLListVersionedObjects(sqlite3 **sdbi, std::string db_name, CephContext *cct) : SQLiteDB(*sdbi, db_name, cct), sdb(sdbi) {} ~SQLListVersionedObjects() { if (stmt) sqlite3_finalize(stmt); } int Prepare(const DoutPrefixProvider *dpp, DBOpParams *params); int Execute(const DoutPrefixProvider *dpp, DBOpParams *params); int Bind(const DoutPrefixProvider *dpp, DBOpParams *params); }; class SQLPutObjectData : public SQLiteDB, public PutObjectDataOp { private: sqlite3 **sdb = NULL; sqlite3_stmt *stmt = NULL; // Prepared statement public: SQLPutObjectData(void **db, std::string db_name, CephContext *cct) : SQLiteDB((sqlite3 *)(*db), db_name, cct), sdb((sqlite3 **)db) {} SQLPutObjectData(sqlite3 **sdbi, std::string db_name, CephContext *cct) : SQLiteDB(*sdbi, db_name, cct), sdb(sdbi) {} ~SQLPutObjectData() { if (stmt) sqlite3_finalize(stmt); } int Prepare(const DoutPrefixProvider *dpp, DBOpParams *params); int Execute(const DoutPrefixProvider *dpp, DBOpParams *params); int Bind(const DoutPrefixProvider *dpp, DBOpParams *params); }; class SQLUpdateObjectData : public SQLiteDB, public UpdateObjectDataOp { private: sqlite3 **sdb = NULL; sqlite3_stmt *stmt = NULL; // Prepared statement public: SQLUpdateObjectData(void **db, std::string db_name, CephContext *cct) : SQLiteDB((sqlite3 *)(*db), db_name, cct), sdb((sqlite3 **)db) {} SQLUpdateObjectData(sqlite3 **sdbi, std::string db_name, CephContext *cct) : SQLiteDB(*sdbi, db_name, cct), sdb(sdbi) {} ~SQLUpdateObjectData() { if (stmt) sqlite3_finalize(stmt); } int Prepare(const DoutPrefixProvider *dpp, DBOpParams *params); int Execute(const DoutPrefixProvider *dpp, DBOpParams *params); int Bind(const DoutPrefixProvider *dpp, DBOpParams *params); }; class SQLGetObjectData : public SQLiteDB, public GetObjectDataOp { private: sqlite3 **sdb = NULL; sqlite3_stmt *stmt = NULL; // Prepared statement public: SQLGetObjectData(void **db, std::string db_name, CephContext *cct) : SQLiteDB((sqlite3 *)(*db), db_name, cct), sdb((sqlite3 **)db) {} SQLGetObjectData(sqlite3 **sdbi, std::string db_name, CephContext *cct) : SQLiteDB(*sdbi, db_name, cct), sdb(sdbi) {} ~SQLGetObjectData() { if (stmt) sqlite3_finalize(stmt); } int Prepare(const DoutPrefixProvider *dpp, DBOpParams *params); int Execute(const DoutPrefixProvider *dpp, DBOpParams *params); int Bind(const DoutPrefixProvider *dpp, DBOpParams *params); }; class SQLDeleteObjectData : public SQLiteDB, public DeleteObjectDataOp { private: sqlite3 **sdb = NULL; sqlite3_stmt *stmt = NULL; // Prepared statement public: SQLDeleteObjectData(void **db, std::string db_name, CephContext *cct) : SQLiteDB((sqlite3 *)(*db), db_name, cct), sdb((sqlite3 **)db) {} SQLDeleteObjectData(sqlite3 **sdbi, std::string db_name, CephContext *cct) : SQLiteDB(*sdbi, db_name, cct), sdb(sdbi) {} ~SQLDeleteObjectData() { if (stmt) sqlite3_finalize(stmt); } int Prepare(const DoutPrefixProvider *dpp, DBOpParams *params); int Execute(const DoutPrefixProvider *dpp, DBOpParams *params); int Bind(const DoutPrefixProvider *dpp, DBOpParams *params); }; class SQLDeleteStaleObjectData : public SQLiteDB, public DeleteStaleObjectDataOp { private: sqlite3 **sdb = NULL; sqlite3_stmt *stmt = NULL; // Prepared statement public: SQLDeleteStaleObjectData(void **db, std::string db_name, CephContext *cct) : SQLiteDB((sqlite3 *)(*db), db_name, cct), sdb((sqlite3 **)db) {} SQLDeleteStaleObjectData(sqlite3 **sdbi, std::string db_name, CephContext *cct) : SQLiteDB(*sdbi, db_name, cct), sdb(sdbi) {} ~SQLDeleteStaleObjectData() { if (stmt) sqlite3_finalize(stmt); } int Prepare(const DoutPrefixProvider *dpp, DBOpParams *params); int Execute(const DoutPrefixProvider *dpp, DBOpParams *params); int Bind(const DoutPrefixProvider *dpp, DBOpParams *params); }; class SQLInsertLCEntry : public SQLiteDB, public InsertLCEntryOp { private: sqlite3 **sdb = NULL; sqlite3_stmt *stmt = NULL; // Prepared statement public: SQLInsertLCEntry(void **db, std::string db_name, CephContext *cct) : SQLiteDB((sqlite3 *)(*db), db_name, cct), sdb((sqlite3 **)db) {} ~SQLInsertLCEntry() { if (stmt) sqlite3_finalize(stmt); } int Prepare(const DoutPrefixProvider *dpp, DBOpParams *params); int Execute(const DoutPrefixProvider *dpp, DBOpParams *params); int Bind(const DoutPrefixProvider *dpp, DBOpParams *params); }; class SQLRemoveLCEntry : public SQLiteDB, public RemoveLCEntryOp { private: sqlite3 **sdb = NULL; sqlite3_stmt *stmt = NULL; // Prepared statement public: SQLRemoveLCEntry(void **db, std::string db_name, CephContext *cct) : SQLiteDB((sqlite3 *)(*db), db_name, cct), sdb((sqlite3 **)db) {} ~SQLRemoveLCEntry() { if (stmt) sqlite3_finalize(stmt); } int Prepare(const DoutPrefixProvider *dpp, DBOpParams *params); int Execute(const DoutPrefixProvider *dpp, DBOpParams *params); int Bind(const DoutPrefixProvider *dpp, DBOpParams *params); }; class SQLGetLCEntry : public SQLiteDB, public GetLCEntryOp { private: sqlite3 **sdb = NULL; sqlite3_stmt *stmt = NULL; // Prepared statement sqlite3_stmt *next_stmt = NULL; // Prepared statement public: SQLGetLCEntry(void **db, std::string db_name, CephContext *cct) : SQLiteDB((sqlite3 *)(*db), db_name, cct), sdb((sqlite3 **)db) {} ~SQLGetLCEntry() { if (stmt) sqlite3_finalize(stmt); if (next_stmt) sqlite3_finalize(next_stmt); } int Prepare(const DoutPrefixProvider *dpp, DBOpParams *params); int Execute(const DoutPrefixProvider *dpp, DBOpParams *params); int Bind(const DoutPrefixProvider *dpp, DBOpParams *params); }; class SQLListLCEntries : public SQLiteDB, public ListLCEntriesOp { private: sqlite3 **sdb = NULL; sqlite3_stmt *stmt = NULL; // Prepared statement public: SQLListLCEntries(void **db, std::string db_name, CephContext *cct) : SQLiteDB((sqlite3 *)(*db), db_name, cct), sdb((sqlite3 **)db) {} ~SQLListLCEntries() { if (stmt) sqlite3_finalize(stmt); } int Prepare(const DoutPrefixProvider *dpp, DBOpParams *params); int Execute(const DoutPrefixProvider *dpp, DBOpParams *params); int Bind(const DoutPrefixProvider *dpp, DBOpParams *params); }; class SQLInsertLCHead : public SQLiteDB, public InsertLCHeadOp { private: sqlite3 **sdb = NULL; sqlite3_stmt *stmt = NULL; // Prepared statement public: SQLInsertLCHead(void **db, std::string db_name, CephContext *cct) : SQLiteDB((sqlite3 *)(*db), db_name, cct), sdb((sqlite3 **)db) {} ~SQLInsertLCHead() { if (stmt) sqlite3_finalize(stmt); } int Prepare(const DoutPrefixProvider *dpp, DBOpParams *params); int Execute(const DoutPrefixProvider *dpp, DBOpParams *params); int Bind(const DoutPrefixProvider *dpp, DBOpParams *params); }; class SQLRemoveLCHead : public SQLiteDB, public RemoveLCHeadOp { private: sqlite3 **sdb = NULL; sqlite3_stmt *stmt = NULL; // Prepared statement public: SQLRemoveLCHead(void **db, std::string db_name, CephContext *cct) : SQLiteDB((sqlite3 *)(*db), db_name, cct), sdb((sqlite3 **)db) {} ~SQLRemoveLCHead() { if (stmt) sqlite3_finalize(stmt); } int Prepare(const DoutPrefixProvider *dpp, DBOpParams *params); int Execute(const DoutPrefixProvider *dpp, DBOpParams *params); int Bind(const DoutPrefixProvider *dpp, DBOpParams *params); }; class SQLGetLCHead : public SQLiteDB, public GetLCHeadOp { private: sqlite3 **sdb = NULL; sqlite3_stmt *stmt = NULL; // Prepared statement public: SQLGetLCHead(void **db, std::string db_name, CephContext *cct) : SQLiteDB((sqlite3 *)(*db), db_name, cct), sdb((sqlite3 **)db) {} ~SQLGetLCHead() { if (stmt) sqlite3_finalize(stmt); } int Prepare(const DoutPrefixProvider *dpp, DBOpParams *params); int Execute(const DoutPrefixProvider *dpp, DBOpParams *params); int Bind(const DoutPrefixProvider *dpp, DBOpParams *params); };
21,389
37.75
145
h
null
ceph-main/src/rgw/driver/dbstore/sqlite/statement.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab ft=cpp /* * Ceph - scalable distributed file system * * Copyright (C) 2022 Red Hat, Inc. * * This is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License version 2.1, as published by the Free Software * Foundation. See file COPYING. * */ #include "common/dout.h" #include "error.h" #include "statement.h" #define dout_subsys ceph_subsys_rgw_dbstore namespace rgw::dbstore::sqlite { // owning pointer to arbitrary memory allocated and returned by sqlite3 struct sqlite_deleter { template <typename T> void operator()(T* p) { ::sqlite3_free(p); } }; template <typename T> using sqlite_ptr = std::unique_ptr<T, sqlite_deleter>; stmt_ptr prepare_statement(const DoutPrefixProvider* dpp, sqlite3* db, std::string_view sql) { sqlite3_stmt* stmt = nullptr; int result = ::sqlite3_prepare_v2(db, sql.data(), sql.size(), &stmt, nullptr); auto ec = std::error_code{result, sqlite::error_category()}; if (ec != sqlite::errc::ok) { const char* errmsg = ::sqlite3_errmsg(db); ldpp_dout(dpp, 1) << "preparation failed: " << errmsg << " (" << ec << ")\nstatement: " << sql << dendl; throw sqlite::error(errmsg, ec); } return stmt_ptr{stmt}; } static int bind_index(const DoutPrefixProvider* dpp, const stmt_binding& stmt, const char* name) { const int index = ::sqlite3_bind_parameter_index(stmt.get(), name); if (index <= 0) { ldpp_dout(dpp, 1) << "binding failed on parameter name=" << name << dendl; sqlite3* db = ::sqlite3_db_handle(stmt.get()); throw sqlite::error(db); } return index; } void bind_null(const DoutPrefixProvider* dpp, const stmt_binding& stmt, const char* name) { const int index = bind_index(dpp, stmt, name); int result = ::sqlite3_bind_null(stmt.get(), index); auto ec = std::error_code{result, sqlite::error_category()}; if (ec != sqlite::errc::ok) { ldpp_dout(dpp, 1) << "binding failed on parameter name=" << name << dendl; sqlite3* db = ::sqlite3_db_handle(stmt.get()); throw sqlite::error(db, ec); } } void bind_text(const DoutPrefixProvider* dpp, const stmt_binding& stmt, const char* name, std::string_view value) { const int index = bind_index(dpp, stmt, name); int result = ::sqlite3_bind_text(stmt.get(), index, value.data(), value.size(), SQLITE_STATIC); auto ec = std::error_code{result, sqlite::error_category()}; if (ec != sqlite::errc::ok) { ldpp_dout(dpp, 1) << "binding failed on parameter name=" << name << " value=" << value << dendl; sqlite3* db = ::sqlite3_db_handle(stmt.get()); throw sqlite::error(db, ec); } } void bind_int(const DoutPrefixProvider* dpp, const stmt_binding& stmt, const char* name, int value) { const int index = bind_index(dpp, stmt, name); int result = ::sqlite3_bind_int(stmt.get(), index, value); auto ec = std::error_code{result, sqlite::error_category()}; if (ec != sqlite::errc::ok) { ldpp_dout(dpp, 1) << "binding failed on parameter name=" << name << " value=" << value << dendl; sqlite3* db = ::sqlite3_db_handle(stmt.get()); throw sqlite::error(db, ec); } } void eval0(const DoutPrefixProvider* dpp, const stmt_execution& stmt) { sqlite_ptr<char> sql; if (dpp->get_cct()->_conf->subsys.should_gather<dout_subsys, 20>()) { sql.reset(::sqlite3_expanded_sql(stmt.get())); } const int result = ::sqlite3_step(stmt.get()); auto ec = std::error_code{result, sqlite::error_category()}; sqlite3* db = ::sqlite3_db_handle(stmt.get()); if (ec != sqlite::errc::done) { const char* errmsg = ::sqlite3_errmsg(db); ldpp_dout(dpp, 20) << "evaluation failed: " << errmsg << " (" << ec << ")\nstatement: " << sql.get() << dendl; throw sqlite::error(errmsg, ec); } ldpp_dout(dpp, 20) << "evaluation succeeded: " << sql.get() << dendl; } void eval1(const DoutPrefixProvider* dpp, const stmt_execution& stmt) { sqlite_ptr<char> sql; if (dpp->get_cct()->_conf->subsys.should_gather<dout_subsys, 20>()) { sql.reset(::sqlite3_expanded_sql(stmt.get())); } const int result = ::sqlite3_step(stmt.get()); auto ec = std::error_code{result, sqlite::error_category()}; if (ec != sqlite::errc::row) { sqlite3* db = ::sqlite3_db_handle(stmt.get()); const char* errmsg = ::sqlite3_errmsg(db); ldpp_dout(dpp, 1) << "evaluation failed: " << errmsg << " (" << ec << ")\nstatement: " << sql.get() << dendl; throw sqlite::error(errmsg, ec); } ldpp_dout(dpp, 20) << "evaluation succeeded: " << sql.get() << dendl; } int column_int(const stmt_execution& stmt, int column) { return ::sqlite3_column_int(stmt.get(), column); } std::string column_text(const stmt_execution& stmt, int column) { const unsigned char* text = ::sqlite3_column_text(stmt.get(), column); // may be NULL if (text) { const std::size_t size = ::sqlite3_column_bytes(stmt.get(), column); return {reinterpret_cast<const char*>(text), size}; } else { return {}; } } auto read_text_rows(const DoutPrefixProvider* dpp, const stmt_execution& stmt, std::span<std::string> entries) -> std::span<std::string> { sqlite_ptr<char> sql; if (dpp->get_cct()->_conf->subsys.should_gather<dout_subsys, 20>()) { sql.reset(::sqlite3_expanded_sql(stmt.get())); } std::size_t count = 0; while (count < entries.size()) { const int result = ::sqlite3_step(stmt.get()); auto ec = std::error_code{result, sqlite::error_category()}; if (ec == sqlite::errc::done) { break; } if (ec != sqlite::errc::row) { sqlite3* db = ::sqlite3_db_handle(stmt.get()); const char* errmsg = ::sqlite3_errmsg(db); ldpp_dout(dpp, 1) << "evaluation failed: " << errmsg << " (" << ec << ")\nstatement: " << sql.get() << dendl; throw sqlite::error(errmsg, ec); } entries[count] = column_text(stmt, 0); ++count; } ldpp_dout(dpp, 20) << "statement evaluation produced " << count << " results: " << sql.get() << dendl; return entries.first(count); } void execute(const DoutPrefixProvider* dpp, sqlite3* db, const char* query, sqlite3_callback callback, void* arg) { char* errmsg = nullptr; const int result = ::sqlite3_exec(db, query, callback, arg, &errmsg); auto ec = std::error_code{result, sqlite::error_category()}; auto ptr = sqlite_ptr<char>{errmsg}; // free on destruction if (ec != sqlite::errc::ok) { ldpp_dout(dpp, 1) << "query execution failed: " << errmsg << " (" << ec << ")\nquery: " << query << dendl; throw sqlite::error(errmsg, ec); } ldpp_dout(dpp, 20) << "query execution succeeded: " << query << dendl; } } // namespace rgw::dbstore::sqlite
6,976
31.910377
80
cc
null
ceph-main/src/rgw/driver/dbstore/sqlite/statement.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab ft=cpp /* * Ceph - scalable distributed file system * * Copyright (C) 2022 Red Hat, Inc. * * This is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License version 2.1, as published by the Free Software * Foundation. See file COPYING. * */ #pragma once #include <memory> #include <span> #include <string> #include <sqlite3.h> class DoutPrefixProvider; namespace rgw::dbstore::sqlite { // owning sqlite3_stmt pointer struct stmt_deleter { void operator()(sqlite3_stmt* p) const { ::sqlite3_finalize(p); } }; using stmt_ptr = std::unique_ptr<sqlite3_stmt, stmt_deleter>; // non-owning sqlite3_stmt pointer that clears binding state on destruction struct stmt_binding_deleter { void operator()(sqlite3_stmt* p) const { ::sqlite3_clear_bindings(p); } }; using stmt_binding = std::unique_ptr<sqlite3_stmt, stmt_binding_deleter>; // non-owning sqlite3_stmt pointer that clears execution state on destruction struct stmt_execution_deleter { void operator()(sqlite3_stmt* p) const { ::sqlite3_reset(p); } }; using stmt_execution = std::unique_ptr<sqlite3_stmt, stmt_execution_deleter>; // prepare the sql statement or throw on error stmt_ptr prepare_statement(const DoutPrefixProvider* dpp, sqlite3* db, std::string_view sql); // bind a NULL input for the given parameter name void bind_null(const DoutPrefixProvider* dpp, const stmt_binding& stmt, const char* name); // bind an input string for the given parameter name void bind_text(const DoutPrefixProvider* dpp, const stmt_binding& stmt, const char* name, std::string_view value); // bind an input integer for the given parameter name void bind_int(const DoutPrefixProvider* dpp, const stmt_binding& stmt, const char* name, int value); // evaluate a prepared statement, expecting no result rows void eval0(const DoutPrefixProvider* dpp, const stmt_execution& stmt); // evaluate a prepared statement, expecting a single result row void eval1(const DoutPrefixProvider* dpp, const stmt_execution& stmt); // return the given column as an integer int column_int(const stmt_execution& stmt, int column); // return the given column as text, or an empty string on NULL std::string column_text(const stmt_execution& stmt, int column); // read the text column from each result row into the given entries, and return // the sub-span of entries that contain results auto read_text_rows(const DoutPrefixProvider* dpp, const stmt_execution& stmt, std::span<std::string> entries) -> std::span<std::string>; // execute a raw query without preparing a statement. the optional callback // can be used to read results void execute(const DoutPrefixProvider* dpp, sqlite3* db, const char* query, sqlite3_callback callback, void* arg); } // namespace rgw::dbstore::sqlite
3,020
33.329545
79
h
null
ceph-main/src/rgw/driver/dbstore/tests/dbstore_mgr_tests.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include "common/ceph_context.h" #include "rgw/driver/dbstore/dbstore_mgr.h" #include <filesystem> #include <gtest/gtest.h> #include <memory> using namespace rgw; namespace fs = std::filesystem; const static std::string TEST_DIR = "rgw_dbstore_tests"; bool endsWith(const std::string &mainStr, const std::string &toMatch) { if(mainStr.size() >= toMatch.size() && mainStr.compare(mainStr.size() - toMatch.size(), toMatch.size(), toMatch) == 0) return true; else return false; } class TestDBStoreManager : public ::testing::Test { protected: void SetUp() override { ctx_ = std::make_shared<CephContext>(CEPH_ENTITY_TYPE_CLIENT); g_ceph_context = ctx_.get(); fs::current_path(fs::temp_directory_path()); fs::create_directory(TEST_DIR); } void TearDown() override { fs::current_path(fs::temp_directory_path()); fs::remove_all(TEST_DIR); } std::string getTestDir() const { auto test_dir = fs::temp_directory_path() / TEST_DIR; return test_dir.string(); } fs::path getDBFullPath(const std::string & base_dir, const std::string & tenant) const { auto db_path = ctx_->_conf.get_val<std::string>("dbstore_db_dir"); const auto& db_name = ctx_->_conf.get_val<std::string>("dbstore_db_name_prefix") + "-" + tenant + ".db"; auto db_full_path = std::filesystem::path(db_path) / db_name; auto db_full_path_test = fs::path(base_dir) / db_full_path; return db_full_path_test; } std::string getDBTenant(const std::string & base_dir, const std::string & tenant) const { auto db_name = ctx_->_conf.get_val<std::string>("dbstore_db_name_prefix"); db_name += "-" + tenant; auto db_full_path = fs::path(base_dir) / db_name; return db_full_path.string(); } std::string getDBTenant(const std::string & tenant = default_tenant) const { return getDBTenant(getTestDir(), tenant); } fs::path getDBFullPath(const std::string & tenant) const { return getDBFullPath(getTestDir(), tenant); } fs::path getLogFilePath(const std::string & log_file) { return fs::temp_directory_path() / log_file; } std::shared_ptr<CephContext> getContext() const { return ctx_; } private: std::shared_ptr<CephContext> ctx_; }; TEST_F(TestDBStoreManager, BasicInstantiateUsingDBDir) { getContext()->_conf.set_val("dbstore_db_dir", getTestDir()); EXPECT_FALSE(fs::exists(getDBFullPath(default_tenant))); auto dbstore_mgr = std::make_shared<DBStoreManager>(getContext().get()); EXPECT_TRUE(fs::exists(getDBFullPath(default_tenant))); } TEST_F(TestDBStoreManager, DBNamePrefix) { getContext()->_conf.set_val("dbstore_db_dir", getTestDir()); std::string prefix = "testprefix"; getContext()->_conf.set_val("dbstore_db_name_prefix", prefix); EXPECT_FALSE(fs::exists(getDBFullPath(default_tenant))); auto dbstore_mgr = std::make_shared<DBStoreManager>(getContext().get()); EXPECT_TRUE(fs::exists(getDBFullPath(default_tenant))); // check that the database name contains the given prefix std::string expected_db_name = prefix + "-" + default_tenant + ".db"; EXPECT_TRUE(endsWith(getDBFullPath(default_tenant), expected_db_name)); } TEST_F(TestDBStoreManager, BasicInstantiateSecondConstructor) { getContext()->_conf.set_val("dbstore_db_dir", getTestDir()); EXPECT_FALSE(fs::exists(getDBFullPath(default_tenant))); auto dbstore_mgr = std::make_shared<DBStoreManager>(getContext().get(), getLogFilePath("test.log").string(), 10); EXPECT_TRUE(fs::exists(getDBFullPath(default_tenant))); } TEST_F(TestDBStoreManager, TestDBName) { getContext()->_conf.set_val("dbstore_db_dir", getTestDir()); auto dbstore_mgr = std::make_shared<DBStoreManager>(getContext().get()); auto db = dbstore_mgr->getDB(default_tenant, false); ASSERT_NE(nullptr, db); EXPECT_EQ(getDBTenant(), db->getDBname()); } TEST_F(TestDBStoreManager, TestDBNameDefaultDB) { getContext()->_conf.set_val("dbstore_db_dir", getTestDir()); auto dbstore_mgr = std::make_shared<DBStoreManager>(getContext().get()); // passing an empty tenant should return the default_db auto db = dbstore_mgr->getDB("", false); ASSERT_NE(nullptr, db); EXPECT_EQ(getDBTenant(), db->getDBname()); } TEST_F(TestDBStoreManager, TestDBBadTenant) { getContext()->_conf.set_val("dbstore_db_dir", getTestDir()); auto dbstore_mgr = std::make_shared<DBStoreManager>(getContext().get()); auto db = dbstore_mgr->getDB("does-not-exist", false); ASSERT_EQ(nullptr, db); } TEST_F(TestDBStoreManager, TestGetNewDB) { getContext()->_conf.set_val("dbstore_db_dir", getTestDir()); auto dbstore_mgr = std::make_shared<DBStoreManager>(getContext().get()); auto new_tenant_path = "new_tenant"; auto db = dbstore_mgr->getDB(new_tenant_path, true); ASSERT_NE(nullptr, db); EXPECT_EQ(getDBTenant(new_tenant_path), db->getDBname()); } TEST_F(TestDBStoreManager, TestDelete) { getContext()->_conf.set_val("dbstore_db_dir", getTestDir()); auto dbstore_mgr = std::make_shared<DBStoreManager>(getContext().get()); dbstore_mgr->deleteDB(default_tenant); auto db = dbstore_mgr->getDB(default_tenant, false); ASSERT_EQ(nullptr, db); }
5,319
32.670886
115
cc
null
ceph-main/src/rgw/driver/dbstore/tests/dbstore_tests.cc
#include "gtest/gtest.h" #include <iostream> #include <stdlib.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <dbstore.h> #include <sqliteDB.h> #include "rgw_common.h" using namespace std; using DB = rgw::store::DB; vector<const char*> args; namespace gtest { class Environment* env; class Environment : public ::testing::Environment { public: Environment(): tenant("default_ns"), db(nullptr), db_type("SQLite"), ret(-1) {} Environment(string tenantname, string db_typename): tenant(tenantname), db(nullptr), db_type(db_typename), ret(-1) {} virtual ~Environment() {} void SetUp() override { cct = global_init(NULL, args, CEPH_ENTITY_TYPE_CLIENT, CODE_ENVIRONMENT_DAEMON, CINIT_FLAG_NO_DEFAULT_CONFIG_FILE | CINIT_FLAG_NO_MON_CONFIG | CINIT_FLAG_NO_DAEMON_ACTIONS); if (!db_type.compare("SQLite")) { db = new SQLiteDB(tenant, cct.get()); ASSERT_TRUE(db != nullptr); ret = db->Initialize(logfile, loglevel); ASSERT_GE(ret, 0); } } void TearDown() override { if (!db) return; db->Destroy(db->get_def_dpp()); delete db; } string tenant; DB *db; string db_type; int ret; string logfile = "rgw_dbstore_tests.log"; int loglevel = 30; boost::intrusive_ptr<CephContext> cct; }; } ceph::real_time bucket_mtime = real_clock::now(); string marker1; class DBGetDataCB : public RGWGetDataCB { public: bufferlist data_bl; off_t data_ofs, data_len; int handle_data(bufferlist& bl, off_t bl_ofs, off_t bl_len) { data_bl = bl; data_ofs = bl_ofs; data_len = bl_len; return 0; } }; namespace { class DBStoreTest : public ::testing::Test { protected: int ret; DB *db = nullptr; string user1 = "user1"; string user_id1 = "user_id1"; string bucket1 = "bucket1"; string object1 = "object1"; string data = "Hello World"; DBOpParams GlobalParams = {}; const DoutPrefixProvider *dpp; DBStoreTest() {} void SetUp() { db = gtest::env->db; ASSERT_TRUE(db != nullptr); dpp = db->get_def_dpp(); ASSERT_TRUE(dpp != nullptr); GlobalParams.op.user.uinfo.display_name = user1; GlobalParams.op.user.uinfo.user_id.id = user_id1; GlobalParams.op.bucket.info.bucket.name = bucket1; GlobalParams.op.obj.state.obj.bucket = GlobalParams.op.bucket.info.bucket; GlobalParams.op.obj.state.obj.key.name = object1; GlobalParams.op.obj.state.obj.key.instance = "inst1"; GlobalParams.op.obj.obj_id = "obj_id1"; GlobalParams.op.obj_data.part_num = 0; /* As of now InitializeParams doesnt do anything * special based on fop. Hence its okay to do * global initialization once. */ ret = db->InitializeParams(dpp, &GlobalParams); ASSERT_EQ(ret, 0); } void TearDown() { } int write_object(const DoutPrefixProvider *dpp, DBOpParams params) { DB::Object op_target(db, params.op.bucket.info, params.op.obj.state.obj); DB::Object::Write write_op(&op_target); map<string, bufferlist> setattrs; ret = write_op.prepare(dpp); if (ret) return ret; write_op.meta.mtime = &bucket_mtime; write_op.meta.category = RGWObjCategory::Main; write_op.meta.owner = params.op.user.uinfo.user_id; bufferlist b1 = params.op.obj.head_data; write_op.meta.data = &b1; bufferlist b2; encode("ACL", b2); setattrs[RGW_ATTR_ACL] = b2; ret = write_op.write_meta(0, params.op.obj.state.size, b1.length()+1, setattrs); return ret; } }; } TEST_F(DBStoreTest, InsertUser) { struct DBOpParams params = GlobalParams; int ret = -1; params.op.user.uinfo.user_id.tenant = "tenant"; params.op.user.uinfo.user_email = "[email protected]"; params.op.user.uinfo.suspended = 123; params.op.user.uinfo.max_buckets = 456; params.op.user.uinfo.placement_tags.push_back("tags"); RGWAccessKey k1("id1", "key1"); RGWAccessKey k2("id2", "key2"); params.op.user.uinfo.access_keys["id1"] = k1; params.op.user.uinfo.access_keys["id2"] = k2; params.op.user.user_version.ver = 1; params.op.user.user_version.tag = "UserTAG"; ret = db->ProcessOp(dpp, "InsertUser", &params); ASSERT_EQ(ret, 0); } TEST_F(DBStoreTest, GetUser) { struct DBOpParams params = GlobalParams; int ret = -1; ret = db->ProcessOp(dpp, "GetUser", &params); ASSERT_EQ(ret, 0); ASSERT_EQ(params.op.user.uinfo.user_id.tenant, "tenant"); ASSERT_EQ(params.op.user.uinfo.user_email, "[email protected]"); ASSERT_EQ(params.op.user.uinfo.user_id.id, "user_id1"); ASSERT_EQ(params.op.user.uinfo.suspended, 123); ASSERT_EQ(params.op.user.uinfo.max_buckets, 456); ASSERT_EQ(params.op.user.uinfo.placement_tags.back(), "tags"); RGWAccessKey k; map<string, RGWAccessKey>::iterator it2 = params.op.user.uinfo.access_keys.begin(); k = it2->second; ASSERT_EQ(k.id, "id1"); ASSERT_EQ(k.key, "key1"); it2++; k = it2->second; ASSERT_EQ(k.id, "id2"); ASSERT_EQ(k.key, "key2"); } TEST_F(DBStoreTest, GetUserQuery) { struct DBOpParams params = GlobalParams; int ret = -1; params.op.query_str = "email"; params.op.user.uinfo.user_email = "[email protected]"; ret = db->ProcessOp(dpp, "GetUser", &params); ASSERT_EQ(ret, 0); ASSERT_EQ(params.op.user.uinfo.user_id.tenant, "tenant"); ASSERT_EQ(params.op.user.uinfo.user_email, "[email protected]"); ASSERT_EQ(params.op.user.uinfo.user_id.id, "user_id1"); ASSERT_EQ(params.op.user.uinfo.suspended, 123); ASSERT_EQ(params.op.user.uinfo.max_buckets, 456); ASSERT_EQ(params.op.user.uinfo.placement_tags.back(), "tags"); RGWAccessKey k; map<string, RGWAccessKey>::iterator it2 = params.op.user.uinfo.access_keys.begin(); k = it2->second; ASSERT_EQ(k.id, "id1"); ASSERT_EQ(k.key, "key1"); it2++; k = it2->second; ASSERT_EQ(k.id, "id2"); ASSERT_EQ(k.key, "key2"); } TEST_F(DBStoreTest, GetUserQueryByEmail) { int ret = -1; RGWUserInfo uinfo; string email = "[email protected]"; map<std::string, bufferlist> attrs; RGWObjVersionTracker objv; ret = db->get_user(dpp, "email", email, uinfo, &attrs, &objv); ASSERT_EQ(ret, 0); ASSERT_EQ(uinfo.user_id.tenant, "tenant"); ASSERT_EQ(uinfo.user_email, "[email protected]"); ASSERT_EQ(uinfo.user_id.id, "user_id1"); ASSERT_EQ(uinfo.suspended, 123); ASSERT_EQ(uinfo.max_buckets, 456); ASSERT_EQ(uinfo.placement_tags.back(), "tags"); RGWAccessKey k; map<string, RGWAccessKey>::iterator it2 = uinfo.access_keys.begin(); k = it2->second; ASSERT_EQ(k.id, "id1"); ASSERT_EQ(k.key, "key1"); it2++; k = it2->second; ASSERT_EQ(k.id, "id2"); ASSERT_EQ(k.key, "key2"); ASSERT_EQ(objv.read_version.ver, 1); } TEST_F(DBStoreTest, GetUserQueryByAccessKey) { int ret = -1; RGWUserInfo uinfo; string key = "id1"; ret = db->get_user(dpp, "access_key", key, uinfo, nullptr, nullptr); ASSERT_EQ(ret, 0); ASSERT_EQ(uinfo.user_id.tenant, "tenant"); ASSERT_EQ(uinfo.user_email, "[email protected]"); ASSERT_EQ(uinfo.user_id.id, "user_id1"); ASSERT_EQ(uinfo.suspended, 123); ASSERT_EQ(uinfo.max_buckets, 456); ASSERT_EQ(uinfo.placement_tags.back(), "tags"); RGWAccessKey k; map<string, RGWAccessKey>::iterator it2 = uinfo.access_keys.begin(); k = it2->second; ASSERT_EQ(k.id, "id1"); ASSERT_EQ(k.key, "key1"); it2++; k = it2->second; ASSERT_EQ(k.id, "id2"); ASSERT_EQ(k.key, "key2"); } TEST_F(DBStoreTest, StoreUser) { struct DBOpParams params = GlobalParams; int ret = -1; RGWUserInfo uinfo, old_uinfo; map<std::string, bufferlist> attrs; RGWObjVersionTracker objv_tracker; bufferlist attr1, attr2; encode("attrs1", attr1); attrs["attr1"] = attr1; encode("attrs2", attr2); attrs["attr2"] = attr2; uinfo.user_id.id = "user_id2"; uinfo.user_id.tenant = "tenant"; uinfo.user_email = "[email protected]"; uinfo.suspended = 123; uinfo.max_buckets = 456; uinfo.placement_tags.push_back("tags"); RGWAccessKey k1("id1", "key1"); RGWAccessKey k2("id2", "key2"); uinfo.access_keys["id1"] = k1; uinfo.access_keys["id2"] = k2; /* non exclusive create..should create new one */ ret = db->store_user(dpp, uinfo, false, &attrs, &objv_tracker, &old_uinfo); ASSERT_EQ(ret, 0); ASSERT_EQ(old_uinfo.user_email, ""); ASSERT_EQ(objv_tracker.read_version.ver, 1); ASSERT_EQ(objv_tracker.read_version.tag, "UserTAG"); /* invalid version number */ objv_tracker.read_version.ver = 4; ret = db->store_user(dpp, uinfo, true, &attrs, &objv_tracker, &old_uinfo); ASSERT_EQ(ret, -125); /* returns ECANCELED */ ASSERT_EQ(old_uinfo.user_id.id, uinfo.user_id.id); ASSERT_EQ(old_uinfo.user_email, uinfo.user_email); /* exclusive create..should not create new one */ uinfo.user_email = "[email protected]"; objv_tracker.read_version.ver = 1; ret = db->store_user(dpp, uinfo, true, &attrs, &objv_tracker, &old_uinfo); ASSERT_EQ(ret, 0); ASSERT_EQ(old_uinfo.user_email, "[email protected]"); ASSERT_EQ(objv_tracker.read_version.ver, 1); ret = db->store_user(dpp, uinfo, false, &attrs, &objv_tracker, &old_uinfo); ASSERT_EQ(ret, 0); ASSERT_EQ(old_uinfo.user_email, "[email protected]"); ASSERT_EQ(objv_tracker.read_version.ver, 2); ASSERT_EQ(objv_tracker.read_version.tag, "UserTAG"); } TEST_F(DBStoreTest, GetUserQueryByUserID) { int ret = -1; RGWUserInfo uinfo; map<std::string, bufferlist> attrs; RGWObjVersionTracker objv; uinfo.user_id.tenant = "tenant"; uinfo.user_id.id = "user_id2"; ret = db->get_user(dpp, "user_id", "user_id2", uinfo, &attrs, &objv); ASSERT_EQ(ret, 0); ASSERT_EQ(uinfo.user_id.tenant, "tenant"); ASSERT_EQ(uinfo.user_email, "[email protected]"); ASSERT_EQ(uinfo.user_id.id, "user_id2"); ASSERT_EQ(uinfo.suspended, 123); ASSERT_EQ(uinfo.max_buckets, 456); ASSERT_EQ(uinfo.placement_tags.back(), "tags"); RGWAccessKey k; map<string, RGWAccessKey>::iterator it = uinfo.access_keys.begin(); k = it->second; ASSERT_EQ(k.id, "id1"); ASSERT_EQ(k.key, "key1"); it++; k = it->second; ASSERT_EQ(k.id, "id2"); ASSERT_EQ(k.key, "key2"); ASSERT_EQ(objv.read_version.ver, 2); bufferlist k1, k2; string attr; map<std::string, bufferlist>::iterator it2 = attrs.begin(); k1 = it2->second; decode(attr, k1); ASSERT_EQ(attr, "attrs1"); it2++; k2 = it2->second; decode(attr, k2); ASSERT_EQ(attr, "attrs2"); } TEST_F(DBStoreTest, ListAllUsers) { struct DBOpParams params = GlobalParams; int ret = -1; ret = db->ListAllUsers(dpp, &params); ASSERT_EQ(ret, 0); } TEST_F(DBStoreTest, InsertBucket) { struct DBOpParams params = GlobalParams; int ret = -1; params.op.bucket.info.bucket.name = "bucket1"; params.op.bucket.info.bucket.tenant = "tenant"; params.op.bucket.info.bucket.marker = "marker1"; params.op.bucket.ent.size = 1024; params.op.bucket.info.has_instance_obj = false; params.op.bucket.bucket_version.ver = 1; params.op.bucket.bucket_version.tag = "read_tag"; params.op.bucket.mtime = bucket_mtime; ret = db->ProcessOp(dpp, "InsertBucket", &params); ASSERT_EQ(ret, 0); } TEST_F(DBStoreTest, UpdateBucketAttrs) { int ret = -1; RGWBucketInfo info; map<std::string, bufferlist> attrs; RGWObjVersionTracker objv; bufferlist aclbl, aclbl2; encode("attrs1", aclbl); attrs["attr1"] = aclbl; encode("attrs2", aclbl2); attrs["attr2"] = aclbl2; info.bucket.name = "bucket1"; /* invalid version number */ objv.read_version.ver = 4; ret = db->update_bucket(dpp, "attrs", info, false, nullptr, &attrs, &bucket_mtime, &objv); ASSERT_EQ(ret, -125); /* returns ECANCELED */ /* right version number */ objv.read_version.ver = 1; ret = db->update_bucket(dpp, "attrs", info, false, nullptr, &attrs, &bucket_mtime, &objv); ASSERT_EQ(ret, 0); ASSERT_EQ(objv.read_version.ver, 2); } TEST_F(DBStoreTest, UpdateBucketInfo) { struct DBOpParams params = GlobalParams; int ret = -1; RGWBucketInfo info; params.op.bucket.info.bucket.name = "bucket1"; ret = db->ProcessOp(dpp, "GetBucket", &params); ASSERT_EQ(ret, 0); info = params.op.bucket.info; info.bucket.marker = "marker2"; ret = db->update_bucket(dpp, "info", info, false, nullptr, nullptr, &bucket_mtime, nullptr); ASSERT_EQ(ret, 0); ASSERT_EQ(info.objv_tracker.read_version.ver, 3); } TEST_F(DBStoreTest, GetBucket) { struct DBOpParams params = GlobalParams; int ret = -1; params.op.bucket.info.bucket.name = "bucket1"; ret = db->ProcessOp(dpp, "GetBucket", &params); ASSERT_EQ(ret, 0); ASSERT_EQ(params.op.bucket.info.bucket.name, "bucket1"); ASSERT_EQ(params.op.bucket.info.bucket.tenant, "tenant"); ASSERT_EQ(params.op.bucket.info.bucket.marker, "marker2"); ASSERT_EQ(params.op.bucket.ent.size, 1024); ASSERT_EQ(params.op.bucket.ent.bucket.name, "bucket1"); ASSERT_EQ(params.op.bucket.ent.bucket.tenant, "tenant"); ASSERT_EQ(params.op.bucket.info.has_instance_obj, false); ASSERT_EQ(params.op.bucket.info.objv_tracker.read_version.ver, 3); ASSERT_EQ(params.op.bucket.info.objv_tracker.read_version.tag, "read_tag"); ASSERT_EQ(params.op.bucket.mtime, bucket_mtime); ASSERT_EQ(params.op.bucket.info.owner.id, "user_id1"); bufferlist k, k2; string acl; map<std::string, bufferlist>::iterator it2 = params.op.bucket.bucket_attrs.begin(); k = it2->second; decode(acl, k); ASSERT_EQ(acl, "attrs1"); it2++; k2 = it2->second; decode(acl, k2); ASSERT_EQ(acl, "attrs2"); } TEST_F(DBStoreTest, CreateBucket) { struct DBOpParams params = GlobalParams; int ret = -1; RGWBucketInfo info; RGWUserInfo owner; rgw_bucket bucket; obj_version objv; rgw_placement_rule rule; map<std::string, bufferlist> attrs; owner.user_id.id = "user_id1"; bucket.name = "bucket1"; bucket.tenant = "tenant"; objv.ver = 2; objv.tag = "write_tag"; rule.name = "rule1"; rule.storage_class = "sc1"; ret = db->create_bucket(dpp, owner, bucket, "zid", rule, "swift_ver", NULL, attrs, info, &objv, NULL, bucket_mtime, NULL, NULL, null_yield, false); ASSERT_EQ(ret, 0); bucket.name = "bucket2"; ret = db->create_bucket(dpp, owner, bucket, "zid", rule, "swift_ver", NULL, attrs, info, &objv, NULL, bucket_mtime, NULL, NULL, null_yield, false); ASSERT_EQ(ret, 0); bucket.name = "bucket3"; ret = db->create_bucket(dpp, owner, bucket, "zid", rule, "swift_ver", NULL, attrs, info, &objv, NULL, bucket_mtime, NULL, NULL, null_yield, false); ASSERT_EQ(ret, 0); bucket.name = "bucket4"; ret = db->create_bucket(dpp, owner, bucket, "zid", rule, "swift_ver", NULL, attrs, info, &objv, NULL, bucket_mtime, NULL, NULL, null_yield, false); ASSERT_EQ(ret, 0); bucket.name = "bucket5"; ret = db->create_bucket(dpp, owner, bucket, "zid", rule, "swift_ver", NULL, attrs, info, &objv, NULL, bucket_mtime, NULL, NULL, null_yield, false); ASSERT_EQ(ret, 0); } TEST_F(DBStoreTest, GetBucketQueryByName) { int ret = -1; RGWBucketInfo binfo; binfo.bucket.name = "bucket2"; rgw::sal::Attrs attrs; ceph::real_time mtime; obj_version objv; ret = db->get_bucket_info(dpp, "name", "", binfo, &attrs, &mtime, &objv); ASSERT_EQ(ret, 0); ASSERT_EQ(binfo.bucket.name, "bucket2"); ASSERT_EQ(binfo.bucket.tenant, "tenant"); ASSERT_EQ(binfo.owner.id, "user_id1"); ASSERT_EQ(binfo.objv_tracker.read_version.ver, 2); ASSERT_EQ(binfo.objv_tracker.read_version.tag, "write_tag"); ASSERT_EQ(binfo.zonegroup, "zid"); ASSERT_EQ(binfo.creation_time, bucket_mtime); ASSERT_EQ(binfo.placement_rule.name, "rule1"); ASSERT_EQ(binfo.placement_rule.storage_class, "sc1"); ASSERT_EQ(objv.ver, 2); ASSERT_EQ(objv.tag, "write_tag"); marker1 = binfo.bucket.marker; } TEST_F(DBStoreTest, ListUserBuckets) { struct DBOpParams params = GlobalParams; int ret = -1; rgw_user owner; int max = 2; bool need_stats = true; bool is_truncated = false; RGWUserBuckets ulist; owner.id = "user_id1"; marker1 = ""; do { is_truncated = false; ret = db->list_buckets(dpp, "", owner, marker1, "", max, need_stats, &ulist, &is_truncated); ASSERT_EQ(ret, 0); cout << "marker1 :" << marker1 << "\n"; cout << "is_truncated :" << is_truncated << "\n"; for (const auto& ent: ulist.get_buckets()) { RGWBucketEnt e = ent.second; cout << "###################### \n"; cout << "ent.bucket.id : " << e.bucket.name << "\n"; cout << "ent.bucket.marker : " << e.bucket.marker << "\n"; cout << "ent.bucket.bucket_id : " << e.bucket.bucket_id << "\n"; cout << "ent.size : " << e.size << "\n"; cout << "ent.rule.name : " << e.placement_rule.name << "\n"; marker1 = e.bucket.name; } ulist.clear(); } while(is_truncated); } TEST_F(DBStoreTest, BucketChown) { int ret = -1; RGWBucketInfo info; rgw_user user; user.id = "user_id2"; info.bucket.name = "bucket5"; ret = db->update_bucket(dpp, "owner", info, false, &user, nullptr, &bucket_mtime, nullptr); ASSERT_EQ(ret, 0); ASSERT_EQ(info.objv_tracker.read_version.ver, 3); } TEST_F(DBStoreTest, ListAllBuckets) { struct DBOpParams params = GlobalParams; int ret = -1; ret = db->ListAllBuckets(dpp, &params); ASSERT_EQ(ret, 0); } TEST_F(DBStoreTest, ListAllBuckets2) { struct DBOpParams params = GlobalParams; int ret = -1; rgw_user owner; int max = 2; bool need_stats = true; bool is_truncated = false; RGWUserBuckets ulist; marker1 = ""; do { is_truncated = false; ret = db->list_buckets(dpp, "all", owner, marker1, "", max, need_stats, &ulist, &is_truncated); ASSERT_EQ(ret, 0); cout << "^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ \n"; cout << "ownerID : " << owner.id << "\n"; cout << "marker1 :" << marker1 << "\n"; cout << "is_truncated :" << is_truncated << "\n"; for (const auto& ent: ulist.get_buckets()) { RGWBucketEnt e = ent.second; cout << "###################### \n"; cout << "ent.bucket.id : " << e.bucket.name << "\n"; cout << "ent.bucket.marker : " << e.bucket.marker << "\n"; cout << "ent.bucket.bucket_id : " << e.bucket.bucket_id << "\n"; cout << "ent.size : " << e.size << "\n"; cout << "ent.rule.name : " << e.placement_rule.name << "\n"; marker1 = e.bucket.name; } ulist.clear(); } while(is_truncated); } TEST_F(DBStoreTest, RemoveBucketAPI) { int ret = -1; RGWBucketInfo info; info.bucket.name = "bucket5"; ret = db->remove_bucket(dpp, info); ASSERT_EQ(ret, 0); } TEST_F(DBStoreTest, RemoveUserAPI) { int ret = -1; RGWUserInfo uinfo; RGWObjVersionTracker objv; uinfo.user_id.tenant = "tenant"; uinfo.user_id.id = "user_id2"; /* invalid version number...should fail */ objv.read_version.ver = 4; ret = db->remove_user(dpp, uinfo, &objv); ASSERT_EQ(ret, -125); objv.read_version.ver = 2; ret = db->remove_user(dpp, uinfo, &objv); ASSERT_EQ(ret, 0); } TEST_F(DBStoreTest, PutObject) { struct DBOpParams params = GlobalParams; int ret = -1; params.op.obj.category = RGWObjCategory::Main; params.op.obj.storage_class = "STANDARD"; bufferlist b1; encode("HELLO WORLD", b1); cout<<"XXXXXXXXX Insert b1.length " << b1.length() << "\n"; params.op.obj.head_data = b1; params.op.obj.state.size = 12; params.op.obj.state.is_olh = false; ret = db->ProcessOp(dpp, "PutObject", &params); ASSERT_EQ(ret, 0); /* Insert another objects */ params.op.obj.state.obj.key.name = "object2"; params.op.obj.state.obj.key.instance = "inst2"; ret = db->ProcessOp(dpp, "PutObject", &params); ASSERT_EQ(ret, 0); params.op.obj.state.obj.key.name = "object3"; params.op.obj.state.obj.key.instance = "inst3"; ret = db->ProcessOp(dpp, "PutObject", &params); ASSERT_EQ(ret, 0); } TEST_F(DBStoreTest, ListAllObjects) { struct DBOpParams params = GlobalParams; int ret = -1; ret = db->ListAllObjects(dpp, &params); ASSERT_GE(ret, 0); } TEST_F(DBStoreTest, GetObject) { struct DBOpParams params = GlobalParams; int ret = -1; ret = db->ProcessOp(dpp, "GetObject", &params); ASSERT_EQ(ret, 0); ASSERT_EQ(params.op.obj.category, RGWObjCategory::Main); ASSERT_EQ(params.op.obj.storage_class, "STANDARD"); string data; decode(data, params.op.obj.head_data); ASSERT_EQ(data, "HELLO WORLD"); ASSERT_EQ(params.op.obj.state.size, 12); cout << "versionNum :" << params.op.obj.version_num << "\n"; } TEST_F(DBStoreTest, GetObjectState) { struct DBOpParams params = GlobalParams; int ret = -1; RGWObjState* s; params.op.obj.state.obj.key.name = "object2"; params.op.obj.state.obj.key.instance = "inst2"; DB::Object op_target(db, params.op.bucket.info, params.op.obj.state.obj); ret = op_target.get_obj_state(dpp, params.op.bucket.info, params.op.obj.state.obj, false, &s); ASSERT_EQ(ret, 0); ASSERT_EQ(s->size, 12); ASSERT_EQ(s->is_olh, false); cout << "versionNum :" << params.op.obj.version_num << "\n"; /* Recheck with get_state API */ ret = op_target.get_state(dpp, &s, false); ASSERT_EQ(ret, 0); ASSERT_EQ(s->size, 12); ASSERT_EQ(s->is_olh, false); cout << "versionNum :" << params.op.obj.version_num << "\n"; } TEST_F(DBStoreTest, ObjAttrs) { struct DBOpParams params = GlobalParams; int ret = -1; map<string, bufferlist> setattrs; map<string, bufferlist> rmattrs; map<string, bufferlist> readattrs; bufferlist b1, b2, b3; encode("ACL", b1); setattrs[RGW_ATTR_ACL] = b1; encode("LC", b2); setattrs[RGW_ATTR_LC] = b2; encode("ETAG", b3); setattrs[RGW_ATTR_ETAG] = b3; DB::Object op_target(db, params.op.bucket.info, params.op.obj.state.obj); /* Set some attrs */ ret = op_target.set_attrs(dpp, setattrs, nullptr); ASSERT_EQ(ret, 0); /* read those attrs */ DB::Object::Read read_op(&op_target); read_op.params.attrs = &readattrs; ret = read_op.prepare(dpp); ASSERT_EQ(ret, 0); string val; decode(val, readattrs[RGW_ATTR_ACL]); ASSERT_EQ(val, "ACL"); decode(val, readattrs[RGW_ATTR_LC]); ASSERT_EQ(val, "LC"); decode(val, readattrs[RGW_ATTR_ETAG]); ASSERT_EQ(val, "ETAG"); /* Remove some attrs */ rmattrs[RGW_ATTR_ACL] = b1; map<string, bufferlist> empty; ret = op_target.set_attrs(dpp, empty, &rmattrs); ASSERT_EQ(ret, 0); /* read those attrs */ ret = read_op.prepare(dpp); ASSERT_EQ(ret, 0); ASSERT_EQ(readattrs.count(RGW_ATTR_ACL), 0); decode(val, readattrs[RGW_ATTR_LC]); ASSERT_EQ(val, "LC"); decode(val, readattrs[RGW_ATTR_ETAG]); ASSERT_EQ(val, "ETAG"); } TEST_F(DBStoreTest, WriteObject) { struct DBOpParams params = GlobalParams; int ret = -1; params.op.obj.state.obj.key.name = "object3"; params.op.obj.state.obj.key.instance = "inst3"; DB::Object op_target(db, params.op.bucket.info, params.op.obj.state.obj); bufferlist b1; encode("HELLO WORLD - Object3", b1); params.op.obj.head_data = b1; params.op.obj.state.size = 22; ret = write_object(dpp, params); ASSERT_EQ(ret, 0); } TEST_F(DBStoreTest, ReadObject) { struct DBOpParams params = GlobalParams; int ret = -1; map<string, bufferlist> readattrs; params.op.obj.state.obj.key.name = "object3"; params.op.obj.state.obj.key.instance = "inst3"; uint64_t obj_size; DB::Object op_target(db, params.op.bucket.info, params.op.obj.state.obj); DB::Object::Read read_op(&op_target); read_op.params.attrs = &readattrs; read_op.params.obj_size = &obj_size; ret = read_op.prepare(dpp); ASSERT_EQ(ret, 0); bufferlist bl; ret = read_op.read(0, 25, bl, dpp); cout<<"XXXXXXXXX Insert bl.length " << bl.length() << "\n"; ASSERT_EQ(ret, 25); string data; decode(data, bl); ASSERT_EQ(data, "HELLO WORLD - Object3"); ASSERT_EQ(obj_size, 22); } TEST_F(DBStoreTest, IterateObject) { struct DBOpParams params = GlobalParams; int ret = -1; map<string, bufferlist> readattrs; uint64_t obj_size; DBGetDataCB cb; DB::Object op_target(db, params.op.bucket.info, params.op.obj.state.obj); DB::Object::Read read_op(&op_target); read_op.params.attrs = &readattrs; read_op.params.obj_size = &obj_size; ret = read_op.prepare(dpp); ASSERT_EQ(ret, 0); bufferlist bl; ret = read_op.iterate(dpp, 0, 15, &cb); ASSERT_EQ(ret, 0); string data; decode(data, cb.data_bl); cout << "XXXXXXXXXX iterate data is " << data << ", bl_ofs = " << cb.data_ofs << ", bl_len = " << cb.data_len << "\n"; ASSERT_EQ(data, "HELLO WORLD"); ASSERT_EQ(cb.data_ofs, 0); ASSERT_EQ(cb.data_len, 15); } TEST_F(DBStoreTest, ListBucketObjects) { struct DBOpParams params = GlobalParams; int ret = -1; int max = 2; bool is_truncated = false; rgw_obj_key marker1; DB::Bucket target(db, params.op.bucket.info); DB::Bucket::List list_op(&target); vector<rgw_bucket_dir_entry> dir_list; marker1.name = ""; do { is_truncated = false; list_op.params.marker = marker1; ret = list_op.list_objects(dpp, max, &dir_list, nullptr, &is_truncated); ASSERT_EQ(ret, 0); cout << "marker1 :" << marker1.name << "\n"; cout << "is_truncated :" << is_truncated << "\n"; for (const auto& ent: dir_list) { cls_rgw_obj_key key = ent.key; cout << "###################### \n"; cout << "key.name : " << key.name << "\n"; cout << "key.instance : " << key.instance << "\n"; marker1 = list_op.get_next_marker(); } dir_list.clear(); } while(is_truncated); } TEST_F(DBStoreTest, DeleteObj) { struct DBOpParams params = GlobalParams; int ret = -1; RGWObjState *s; /* delete object2 */ params.op.obj.state.obj.key.name = "object2"; params.op.obj.state.obj.key.instance = "inst2"; DB::Object op_target(db, params.op.bucket.info, params.op.obj.state.obj); DB::Object::Delete delete_op(&op_target); ret = delete_op.delete_obj(dpp); ASSERT_EQ(ret, 0); /* Should return ENOENT */ ret = op_target.get_state(dpp, &s, false); ASSERT_EQ(ret, -2); } TEST_F(DBStoreTest, WriteVersionedObject) { struct DBOpParams params = GlobalParams; int ret = -1; std::string instances[] = {"inst1", "inst2", "inst3"}; bufferlist b1; params.op.obj.flags |= rgw_bucket_dir_entry::FLAG_CURRENT; params.op.obj.state.obj.key.name = "object1"; /* Write versioned objects */ DB::Object op_target(db, params.op.bucket.info, params.op.obj.state.obj); DB::Object::Write write_op(&op_target); /* Version1 */ params.op.obj.state.obj.key.instance = instances[0]; encode("HELLO WORLD", b1); params.op.obj.head_data = b1; params.op.obj.state.size = 12; ret = write_object(dpp, params); ASSERT_EQ(ret, 0); /* Version2 */ params.op.obj.state.obj.key.instance = instances[1]; b1.clear(); encode("HELLO WORLD ABC", b1); params.op.obj.head_data = b1; params.op.obj.state.size = 16; ret = write_object(dpp, params); ASSERT_EQ(ret, 0); /* Version3 */ params.op.obj.state.obj.key.instance = instances[2]; b1.clear(); encode("HELLO WORLD A", b1); params.op.obj.head_data = b1; params.op.obj.state.size = 14; ret = write_object(dpp, params); ASSERT_EQ(ret, 0); } TEST_F(DBStoreTest, ListVersionedObject) { struct DBOpParams params = GlobalParams; int ret = -1; std::string instances[] = {"inst1", "inst2", "inst3"}; int i = 0; /* list versioned objects */ params.op.obj.state.obj.key.instance.clear(); params.op.list_max_count = MAX_VERSIONED_OBJECTS; ret = db->ProcessOp(dpp, "ListVersionedObjects", &params); ASSERT_EQ(ret, 0); i = 2; for (auto ent: params.op.obj.list_entries) { ASSERT_EQ(ent.key.instance, instances[i]); i--; } } TEST_F(DBStoreTest, ReadVersionedObject) { struct DBOpParams params = GlobalParams; int ret = -1; std::string instances[] = {"inst1", "inst2", "inst3"}; std::string data; /* read object.. should fetch latest version */ RGWObjState* s; params = GlobalParams; params.op.obj.state.obj.key.instance.clear(); DB::Object op_target2(db, params.op.bucket.info, params.op.obj.state.obj); ret = op_target2.get_obj_state(dpp, params.op.bucket.info, params.op.obj.state.obj, true, &s); ASSERT_EQ(ret, 0); ASSERT_EQ(s->obj.key.instance, instances[2]); decode(data, s->data); ASSERT_EQ(data, "HELLO WORLD A"); ASSERT_EQ(s->size, 14); /* read a particular non-current version */ params.op.obj.state.obj.key.instance = instances[1]; DB::Object op_target3(db, params.op.bucket.info, params.op.obj.state.obj); ret = op_target3.get_obj_state(dpp, params.op.bucket.info, params.op.obj.state.obj, true, &s); ASSERT_EQ(ret, 0); decode(data, s->data); ASSERT_EQ(data, "HELLO WORLD ABC"); ASSERT_EQ(s->size, 16); } TEST_F(DBStoreTest, DeleteVersionedObject) { struct DBOpParams params = GlobalParams; int ret = -1; std::string instances[] = {"inst1", "inst2", "inst3"}; std::string data; std::string dm_instance; int i = 0; /* Delete object..should create delete marker */ params.op.obj.state.obj.key.instance.clear(); DB::Object op_target(db, params.op.bucket.info, params.op.obj.state.obj); DB::Object::Delete delete_op(&op_target); delete_op.params.versioning_status |= BUCKET_VERSIONED; ret = delete_op.delete_obj(dpp); ASSERT_EQ(ret, 0); /* list versioned objects */ params = GlobalParams; params.op.obj.state.obj.key.instance.clear(); params.op.list_max_count = MAX_VERSIONED_OBJECTS; ret = db->ProcessOp(dpp, "ListVersionedObjects", &params); i = 3; for (auto ent: params.op.obj.list_entries) { string is_delete_marker = (ent.flags & rgw_bucket_dir_entry::FLAG_DELETE_MARKER)? "true" : "false"; cout << "ent.name: " << ent.key.name << ". ent.instance: " << ent.key.instance << " is_delete_marker = " << is_delete_marker << "\n"; if (i == 3) { ASSERT_EQ(is_delete_marker, "true"); dm_instance = ent.key.instance; } else { ASSERT_EQ(is_delete_marker, "false"); ASSERT_EQ(ent.key.instance, instances[i]); } i--; } /* read object.. should return -ENOENT */ RGWObjState* s; params = GlobalParams; params.op.obj.state.obj.key.instance.clear(); DB::Object op_target2(db, params.op.bucket.info, params.op.obj.state.obj); ret = op_target2.get_obj_state(dpp, params.op.bucket.info, params.op.obj.state.obj, true, &s); ASSERT_EQ(ret, -ENOENT); /* Delete delete marker..should be able to read object now */ params.op.obj.state.obj.key.instance = dm_instance; DB::Object op_target3(db, params.op.bucket.info, params.op.obj.state.obj); DB::Object::Delete delete_op2(&op_target3); delete_op2.params.versioning_status |= BUCKET_VERSIONED; ret = delete_op2.delete_obj(dpp); ASSERT_EQ(ret, 0); /* read object.. should fetch latest version */ params = GlobalParams; params.op.obj.state.obj.key.instance.clear(); DB::Object op_target4(db, params.op.bucket.info, params.op.obj.state.obj); ret = op_target4.get_obj_state(dpp, params.op.bucket.info, params.op.obj.state.obj, true, &s); ASSERT_EQ(s->obj.key.instance, instances[2]); decode(data, s->data); ASSERT_EQ(data, "HELLO WORLD A"); ASSERT_EQ(s->size, 14); /* delete latest version using version-id. Next version should get promoted */ params.op.obj.state.obj.key.instance = instances[2]; DB::Object op_target5(db, params.op.bucket.info, params.op.obj.state.obj); DB::Object::Delete delete_op3(&op_target5); delete_op3.params.versioning_status |= BUCKET_VERSIONED; ret = delete_op3.delete_obj(dpp); ASSERT_EQ(ret, 0); /* list versioned objects..only two versions should be present * with second version marked as CURRENT */ params = GlobalParams; params.op.obj.state.obj.key.instance.clear(); params.op.list_max_count = MAX_VERSIONED_OBJECTS; ret = db->ProcessOp(dpp, "ListVersionedObjects", &params); i = 1; for (auto ent: params.op.obj.list_entries) { if (i == 1) { dm_instance = ent.key.instance; } else { ASSERT_EQ(ent.key.instance, instances[i]); } i--; } } TEST_F(DBStoreTest, ObjectOmapSetVal) { struct DBOpParams params = GlobalParams; int ret = -1; DB::Object op_target(db, params.op.bucket.info, params.op.obj.state.obj); string val = "part1_val"; bufferlist bl; encode(val, bl); ret = op_target.obj_omap_set_val_by_key(dpp, "part1", bl, false); ASSERT_EQ(ret, 0); val = "part2_val"; bl.clear(); encode(val, bl); ret = op_target.obj_omap_set_val_by_key(dpp, "part2", bl, false); ASSERT_EQ(ret, 0); val = "part3_val"; bl.clear(); encode(val, bl); ret = op_target.obj_omap_set_val_by_key(dpp, "part3", bl, false); ASSERT_EQ(ret, 0); val = "part4_val"; bl.clear(); encode(val, bl); ret = op_target.obj_omap_set_val_by_key(dpp, "part4", bl, false); ASSERT_EQ(ret, 0); } TEST_F(DBStoreTest, ObjectOmapGetValsByKeys) { struct DBOpParams params = GlobalParams; int ret = -1; std::set<std::string> keys; std::map<std::string, bufferlist> vals; DB::Object op_target(db, params.op.bucket.info, params.op.obj.state.obj); keys.insert("part2"); keys.insert("part4"); ret = op_target.obj_omap_get_vals_by_keys(dpp, "", keys, &vals); ASSERT_EQ(ret, 0); ASSERT_EQ(vals.size(), 2); string val; decode(val, vals["part2"]); ASSERT_EQ(val, "part2_val"); decode(val, vals["part4"]); ASSERT_EQ(val, "part4_val"); } TEST_F(DBStoreTest, ObjectOmapGetAll) { struct DBOpParams params = GlobalParams; int ret = -1; std::map<std::string, bufferlist> vals; DB::Object op_target(db, params.op.bucket.info, params.op.obj.state.obj); ret = op_target.obj_omap_get_all(dpp, &vals); ASSERT_EQ(ret, 0); ASSERT_EQ(vals.size(), 4); string val; decode(val, vals["part1"]); ASSERT_EQ(val, "part1_val"); decode(val, vals["part2"]); ASSERT_EQ(val, "part2_val"); decode(val, vals["part3"]); ASSERT_EQ(val, "part3_val"); decode(val, vals["part4"]); ASSERT_EQ(val, "part4_val"); } TEST_F(DBStoreTest, ObjectOmapGetVals) { struct DBOpParams params = GlobalParams; int ret = -1; std::set<std::string> keys; std::map<std::string, bufferlist> vals; bool pmore; DB::Object op_target(db, params.op.bucket.info, params.op.obj.state.obj); ret = op_target.obj_omap_get_vals(dpp, "part3", 10, &vals, &pmore); ASSERT_EQ(ret, 0); ASSERT_EQ(vals.size(), 2); string val; decode(val, vals["part3"]); ASSERT_EQ(val, "part3_val"); decode(val, vals["part4"]); ASSERT_EQ(val, "part4_val"); } TEST_F(DBStoreTest, PutObjectData) { struct DBOpParams params = GlobalParams; int ret = -1; params.op.obj_data.part_num = 1; params.op.obj_data.offset = 10; params.op.obj_data.multipart_part_str = "2"; bufferlist b1; encode("HELLO WORLD", b1); params.op.obj_data.data = b1; params.op.obj_data.size = 12; params.op.obj.state.mtime = real_clock::now(); ret = db->ProcessOp(dpp, "PutObjectData", &params); ASSERT_EQ(ret, 0); } TEST_F(DBStoreTest, UpdateObjectData) { struct DBOpParams params = GlobalParams; int ret = -1; params.op.obj.state.mtime = bucket_mtime; ret = db->ProcessOp(dpp, "UpdateObjectData", &params); ASSERT_EQ(ret, 0); } TEST_F(DBStoreTest, GetObjectData) { struct DBOpParams params = GlobalParams; int ret = -1; params.op.obj.state.obj.key.instance = "inst1"; params.op.obj.state.obj.key.name = "object1"; ret = db->ProcessOp(dpp, "GetObjectData", &params); ASSERT_EQ(ret, 0); ASSERT_EQ(params.op.obj_data.part_num, 1); ASSERT_EQ(params.op.obj_data.offset, 10); ASSERT_EQ(params.op.obj_data.multipart_part_str, "2"); ASSERT_EQ(params.op.obj.state.obj.key.instance, "inst1"); ASSERT_EQ(params.op.obj.state.obj.key.name, "object1"); ASSERT_EQ(params.op.obj.state.mtime, bucket_mtime); string data; decode(data, params.op.obj_data.data); ASSERT_EQ(data, "HELLO WORLD"); } TEST_F(DBStoreTest, DeleteObjectData) { struct DBOpParams params = GlobalParams; int ret = -1; ret = db->ProcessOp(dpp, "DeleteObjectData", &params); ASSERT_EQ(ret, 0); } TEST_F(DBStoreTest, DeleteObject) { struct DBOpParams params = GlobalParams; int ret = -1; ret = db->ProcessOp(dpp, "DeleteObject", &params); ASSERT_EQ(ret, 0); } TEST_F(DBStoreTest, LCTables) { struct DBOpParams params = GlobalParams; int ret = -1; ret = db->createLCTables(dpp); ASSERT_GE(ret, 0); } TEST_F(DBStoreTest, LCHead) { struct DBOpParams params = GlobalParams; int ret = -1; std::string index1 = "bucket1"; std::string index2 = "bucket2"; time_t lc_time = ceph_clock_now(); std::unique_ptr<rgw::sal::Lifecycle::LCHead> head; std::string ents[] = {"entry1", "entry2", "entry3"}; rgw::sal::StoreLifecycle::StoreLCHead head1(lc_time, 0, ents[0]); rgw::sal::StoreLifecycle::StoreLCHead head2(lc_time, 0, ents[1]); rgw::sal::StoreLifecycle::StoreLCHead head3(lc_time, 0, ents[2]); ret = db->put_head(index1, head1); ASSERT_EQ(ret, 0); ret = db->put_head(index2, head2); ASSERT_EQ(ret, 0); ret = db->get_head(index1, &head); ASSERT_EQ(ret, 0); ASSERT_EQ(head->get_marker(), "entry1"); ret = db->get_head(index2, &head); ASSERT_EQ(ret, 0); ASSERT_EQ(head->get_marker(), "entry2"); // update index1 ret = db->put_head(index1, head3); ASSERT_EQ(ret, 0); ret = db->get_head(index1, &head); ASSERT_EQ(ret, 0); ASSERT_EQ(head->get_marker(), "entry3"); } TEST_F(DBStoreTest, LCEntry) { struct DBOpParams params = GlobalParams; int ret = -1; uint64_t lc_time = ceph_clock_now(); std::string index1 = "lcindex1"; std::string index2 = "lcindex2"; typedef enum {lc_uninitial = 1, lc_complete} status; std::string ents[] = {"bucket1", "bucket2", "bucket3", "bucket4"}; std::unique_ptr<rgw::sal::Lifecycle::LCEntry> entry; rgw::sal::StoreLifecycle::StoreLCEntry entry1(ents[0], lc_time, lc_uninitial); rgw::sal::StoreLifecycle::StoreLCEntry entry2(ents[1], lc_time, lc_uninitial); rgw::sal::StoreLifecycle::StoreLCEntry entry3(ents[2], lc_time, lc_uninitial); rgw::sal::StoreLifecycle::StoreLCEntry entry4(ents[3], lc_time, lc_uninitial); vector<std::unique_ptr<rgw::sal::Lifecycle::LCEntry>> lc_entries; ret = db->set_entry(index1, entry1); ASSERT_EQ(ret, 0); ret = db->set_entry(index1, entry2); ASSERT_EQ(ret, 0); ret = db->set_entry(index1, entry3); ASSERT_EQ(ret, 0); ret = db->set_entry(index2, entry4); ASSERT_EQ(ret, 0); // get entry index1, entry1 ret = db->get_entry(index1, ents[0], &entry); ASSERT_EQ(ret, 0); ASSERT_EQ(entry->get_status(), lc_uninitial); ASSERT_EQ(entry->get_start_time(), lc_time); // get next entry index1, entry2 ret = db->get_next_entry(index1, ents[1], &entry); ASSERT_EQ(ret, 0); ASSERT_EQ(entry->get_bucket(), ents[2]); ASSERT_EQ(entry->get_status(), lc_uninitial); ASSERT_EQ(entry->get_start_time(), lc_time); // update entry4 to entry5 entry4.status = lc_complete; ret = db->set_entry(index2, entry4); ASSERT_EQ(ret, 0); ret = db->get_entry(index2, ents[3], &entry); ASSERT_EQ(ret, 0); ASSERT_EQ(entry->get_status(), lc_complete); // list entries ret = db->list_entries(index1, "", 5, lc_entries); ASSERT_EQ(ret, 0); for (const auto& ent: lc_entries) { cout << "###################### \n"; cout << "lc entry.bucket : " << ent->get_bucket() << "\n"; cout << "lc entry.status : " << ent->get_status() << "\n"; } // remove index1, entry3 ret = db->rm_entry(index1, entry3); ASSERT_EQ(ret, 0); // get next entry index1, entry2.. should be null entry.release(); ret = db->get_next_entry(index1, ents[1], &entry); ASSERT_EQ(ret, 0); ASSERT_EQ(entry.get(), nullptr); } TEST_F(DBStoreTest, RemoveBucket) { struct DBOpParams params = GlobalParams; int ret = -1; ret = db->ProcessOp(dpp, "RemoveBucket", &params); ASSERT_EQ(ret, 0); } TEST_F(DBStoreTest, RemoveUser) { struct DBOpParams params = GlobalParams; int ret = -1; ret = db->ProcessOp(dpp, "RemoveUser", &params); ASSERT_EQ(ret, 0); } TEST_F(DBStoreTest, InsertTestIDUser) { struct DBOpParams params = GlobalParams; int ret = -1; params.op.user.uinfo.user_id.id = "testid"; params.op.user.uinfo.display_name = "M. Tester"; params.op.user.uinfo.user_id.tenant = "tenant"; params.op.user.uinfo.user_email = "[email protected]"; RGWAccessKey k1("0555b35654ad1656d804", "h7GhxuBLTrlhVUyxSPUKUV8r/2EI4ngqJxD7iBdBYLhwluN30JaT3Q=="); params.op.user.uinfo.access_keys["0555b35654ad1656d804"] = k1; params.op.user.user_version.ver = 1; params.op.user.user_version.tag = "UserTAG"; ret = db->ProcessOp(dpp, "InsertUser", &params); ASSERT_EQ(ret, 0); } int main(int argc, char **argv) { int ret = -1; string c_logfile = "rgw_dbstore_tests.log"; int c_loglevel = 20; // format: ./dbstore-tests logfile loglevel if (argc == 3) { c_logfile = argv[1]; c_loglevel = (atoi)(argv[2]); cout << "logfile:" << c_logfile << ", loglevel set to " << c_loglevel << "\n"; } ::testing::InitGoogleTest(&argc, argv); gtest::env = new gtest::Environment(); gtest::env->logfile = c_logfile; gtest::env->loglevel = c_loglevel; ::testing::AddGlobalTestEnvironment(gtest::env); ret = RUN_ALL_TESTS(); return ret; }
41,547
28.300423
137
cc
null
ceph-main/src/rgw/driver/immutable_config/store.cc
// vim: ts=8 sw=2 smarttab ft=cpp /* * Ceph - scalable distributed file system * * Copyright (C) 2022 Red Hat, Inc. * * This is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License version 2.1, as published by the Free Software * Foundation. See file COPYING. * */ #include "rgw_zone.h" #include "store.h" namespace rgw::sal { ImmutableConfigStore::ImmutableConfigStore(const RGWZoneGroup& zonegroup, const RGWZoneParams& zone, const RGWPeriodConfig& period_config) : zonegroup(zonegroup), zone(zone), period_config(period_config) { } // Realm int ImmutableConfigStore::write_default_realm_id(const DoutPrefixProvider* dpp, optional_yield y, bool exclusive, std::string_view realm_id) { return -EROFS; } int ImmutableConfigStore::read_default_realm_id(const DoutPrefixProvider* dpp, optional_yield y, std::string& realm_id) { return -ENOENT; } int ImmutableConfigStore::delete_default_realm_id(const DoutPrefixProvider* dpp, optional_yield y) { return -EROFS; } int ImmutableConfigStore::create_realm(const DoutPrefixProvider* dpp, optional_yield y, bool exclusive, const RGWRealm& info, std::unique_ptr<RealmWriter>* writer) { return -EROFS; } int ImmutableConfigStore::read_realm_by_id(const DoutPrefixProvider* dpp, optional_yield y, std::string_view realm_id, RGWRealm& info, std::unique_ptr<RealmWriter>* writer) { return -ENOENT; } int ImmutableConfigStore::read_realm_by_name(const DoutPrefixProvider* dpp, optional_yield y, std::string_view realm_name, RGWRealm& info, std::unique_ptr<RealmWriter>* writer) { return -ENOENT; } int ImmutableConfigStore::read_default_realm(const DoutPrefixProvider* dpp, optional_yield y, RGWRealm& info, std::unique_ptr<RealmWriter>* writer) { return -ENOENT; } int ImmutableConfigStore::read_realm_id(const DoutPrefixProvider* dpp, optional_yield y, std::string_view realm_name, std::string& realm_id) { return -ENOENT; } int ImmutableConfigStore::realm_notify_new_period(const DoutPrefixProvider* dpp, optional_yield y, const RGWPeriod& period) { return -ENOTSUP; } int ImmutableConfigStore::list_realm_names(const DoutPrefixProvider* dpp, optional_yield y, const std::string& marker, std::span<std::string> entries, ListResult<std::string>& result) { result.next.clear(); result.entries = entries.first(0); return 0; } // Period int ImmutableConfigStore::create_period(const DoutPrefixProvider* dpp, optional_yield y, bool exclusive, const RGWPeriod& info) { return -EROFS; } int ImmutableConfigStore::read_period(const DoutPrefixProvider* dpp, optional_yield y, std::string_view period_id, std::optional<uint32_t> epoch, RGWPeriod& info) { return -ENOENT; } int ImmutableConfigStore::delete_period(const DoutPrefixProvider* dpp, optional_yield y, std::string_view period_id) { return -EROFS; } int ImmutableConfigStore::list_period_ids(const DoutPrefixProvider* dpp, optional_yield y, const std::string& marker, std::span<std::string> entries, ListResult<std::string>& result) { result.next.clear(); result.entries = entries.first(0); return 0; } // ZoneGroup class ImmutableZoneGroupWriter : public ZoneGroupWriter { public: int write(const DoutPrefixProvider* dpp, optional_yield y, const RGWZoneGroup& info) override { return -EROFS; } int rename(const DoutPrefixProvider* dpp, optional_yield y, RGWZoneGroup& info, std::string_view new_name) override { return -EROFS; } int remove(const DoutPrefixProvider* dpp, optional_yield y) override { return -EROFS; } }; int ImmutableConfigStore::write_default_zonegroup_id(const DoutPrefixProvider* dpp, optional_yield y, bool exclusive, std::string_view realm_id, std::string_view zonegroup_id) { return -EROFS; } int ImmutableConfigStore::read_default_zonegroup_id(const DoutPrefixProvider* dpp, optional_yield y, std::string_view realm_id, std::string& zonegroup_id) { if (!realm_id.empty()) { return -ENOENT; } zonegroup_id = zonegroup.id; return 0; } int ImmutableConfigStore::delete_default_zonegroup_id(const DoutPrefixProvider* dpp, optional_yield y, std::string_view realm_id) { return -EROFS; } int ImmutableConfigStore::create_zonegroup(const DoutPrefixProvider* dpp, optional_yield y, bool exclusive, const RGWZoneGroup& info, std::unique_ptr<ZoneGroupWriter>* writer) { return -EROFS; } int ImmutableConfigStore::read_zonegroup_by_id(const DoutPrefixProvider* dpp, optional_yield y, std::string_view zonegroup_id, RGWZoneGroup& info, std::unique_ptr<ZoneGroupWriter>* writer) { if (zonegroup_id != zonegroup.id) { return -ENOENT; } info = zonegroup; if (writer) { *writer = std::make_unique<ImmutableZoneGroupWriter>(); } return 0; } int ImmutableConfigStore::read_zonegroup_by_name(const DoutPrefixProvider* dpp, optional_yield y, std::string_view zonegroup_name, RGWZoneGroup& info, std::unique_ptr<ZoneGroupWriter>* writer) { if (zonegroup_name != zonegroup.name) { return -ENOENT; } info = zonegroup; if (writer) { *writer = std::make_unique<ImmutableZoneGroupWriter>(); } return 0; } int ImmutableConfigStore::read_default_zonegroup(const DoutPrefixProvider* dpp, optional_yield y, std::string_view realm_id, RGWZoneGroup& info, std::unique_ptr<ZoneGroupWriter>* writer) { info = zonegroup; if (writer) { *writer = std::make_unique<ImmutableZoneGroupWriter>(); } return 0; } int ImmutableConfigStore::list_zonegroup_names(const DoutPrefixProvider* dpp, optional_yield y, const std::string& marker, std::span<std::string> entries, ListResult<std::string>& result) { if (marker < zonegroup.name) { entries[0] = zonegroup.name; result.next = zonegroup.name; result.entries = entries.first(1); } else { result.next.clear(); result.entries = entries.first(0); } return 0; } // Zone class ImmutableZoneWriter : public ZoneWriter { public: int write(const DoutPrefixProvider* dpp, optional_yield y, const RGWZoneParams& info) override { return -EROFS; } int rename(const DoutPrefixProvider* dpp, optional_yield y, RGWZoneParams& info, std::string_view new_name) override { return -EROFS; } int remove(const DoutPrefixProvider* dpp, optional_yield y) override { return -EROFS; } }; int ImmutableConfigStore::write_default_zone_id(const DoutPrefixProvider* dpp, optional_yield y, bool exclusive, std::string_view realm_id, std::string_view zone_id) { return -EROFS; } int ImmutableConfigStore::read_default_zone_id(const DoutPrefixProvider* dpp, optional_yield y, std::string_view realm_id, std::string& zone_id) { if (realm_id.empty()) { return -ENOENT; } zone_id = zone.id; return 0; } int ImmutableConfigStore::delete_default_zone_id(const DoutPrefixProvider* dpp, optional_yield y, std::string_view realm_id) { return -EROFS; } int ImmutableConfigStore::create_zone(const DoutPrefixProvider* dpp, optional_yield y, bool exclusive, const RGWZoneParams& info, std::unique_ptr<ZoneWriter>* writer) { return -EROFS; } int ImmutableConfigStore::read_zone_by_id(const DoutPrefixProvider* dpp, optional_yield y, std::string_view zone_id, RGWZoneParams& info, std::unique_ptr<ZoneWriter>* writer) { if (zone_id != zone.id) { return -ENOENT; } info = zone; if (writer) { *writer = std::make_unique<ImmutableZoneWriter>(); } return 0; } int ImmutableConfigStore::read_zone_by_name(const DoutPrefixProvider* dpp, optional_yield y, std::string_view zone_name, RGWZoneParams& info, std::unique_ptr<ZoneWriter>* writer) { if (zone_name != zone.name) { return -ENOENT; } info = zone; if (writer) { *writer = std::make_unique<ImmutableZoneWriter>(); } return 0; } int ImmutableConfigStore::read_default_zone(const DoutPrefixProvider* dpp, optional_yield y, std::string_view realm_id, RGWZoneParams& info, std::unique_ptr<ZoneWriter>* writer) { if (!realm_id.empty()) { return -ENOENT; } info = zone; if (writer) { *writer = std::make_unique<ImmutableZoneWriter>(); } return 0; } int ImmutableConfigStore::list_zone_names(const DoutPrefixProvider* dpp, optional_yield y, const std::string& marker, std::span<std::string> entries, ListResult<std::string>& result) { if (marker < zone.name) { entries[0] = zone.name; result.next = zone.name; result.entries = entries.first(1); } else { result.next.clear(); result.entries = entries.first(0); } return 0; } // PeriodConfig int ImmutableConfigStore::read_period_config(const DoutPrefixProvider* dpp, optional_yield y, std::string_view realm_id, RGWPeriodConfig& info) { if (!realm_id.empty()) { return -ENOENT; } info = period_config; return 0; } int ImmutableConfigStore::write_period_config(const DoutPrefixProvider* dpp, optional_yield y, bool exclusive, std::string_view realm_id, const RGWPeriodConfig& info) { return -EROFS; } /// ImmutableConfigStore factory function auto create_immutable_config_store(const DoutPrefixProvider* dpp, const RGWZoneGroup& zonegroup, const RGWZoneParams& zone, const RGWPeriodConfig& period_config) -> std::unique_ptr<ConfigStore> { return std::make_unique<ImmutableConfigStore>(zonegroup, zone, period_config); } } // namespace rgw::sal
13,715
31.425532
91
cc
null
ceph-main/src/rgw/driver/immutable_config/store.h
// vim: ts=8 sw=2 smarttab ft=cpp /* * Ceph - scalable distributed file system * * Copyright (C) 2022 Red Hat, Inc. * * This is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License version 2.1, as published by the Free Software * Foundation. See file COPYING. * */ #pragma once #include "rgw_sal_config.h" namespace rgw::sal { /// A read-only ConfigStore that serves the given default zonegroup and zone. class ImmutableConfigStore : public ConfigStore { public: explicit ImmutableConfigStore(const RGWZoneGroup& zonegroup, const RGWZoneParams& zone, const RGWPeriodConfig& period_config); // Realm virtual int write_default_realm_id(const DoutPrefixProvider* dpp, optional_yield y, bool exclusive, std::string_view realm_id) override; virtual int read_default_realm_id(const DoutPrefixProvider* dpp, optional_yield y, std::string& realm_id) override; virtual int delete_default_realm_id(const DoutPrefixProvider* dpp, optional_yield y) override; virtual int create_realm(const DoutPrefixProvider* dpp, optional_yield y, bool exclusive, const RGWRealm& info, std::unique_ptr<RealmWriter>* writer) override; virtual int read_realm_by_id(const DoutPrefixProvider* dpp, optional_yield y, std::string_view realm_id, RGWRealm& info, std::unique_ptr<RealmWriter>* writer) override; virtual int read_realm_by_name(const DoutPrefixProvider* dpp, optional_yield y, std::string_view realm_name, RGWRealm& info, std::unique_ptr<RealmWriter>* writer) override; virtual int read_default_realm(const DoutPrefixProvider* dpp, optional_yield y, RGWRealm& info, std::unique_ptr<RealmWriter>* writer) override; virtual int read_realm_id(const DoutPrefixProvider* dpp, optional_yield y, std::string_view realm_name, std::string& realm_id) override; virtual int realm_notify_new_period(const DoutPrefixProvider* dpp, optional_yield y, const RGWPeriod& period) override; virtual int list_realm_names(const DoutPrefixProvider* dpp, optional_yield y, const std::string& marker, std::span<std::string> entries, ListResult<std::string>& result) override; // Period virtual int create_period(const DoutPrefixProvider* dpp, optional_yield y, bool exclusive, const RGWPeriod& info) override; virtual int read_period(const DoutPrefixProvider* dpp, optional_yield y, std::string_view period_id, std::optional<uint32_t> epoch, RGWPeriod& info) override; virtual int delete_period(const DoutPrefixProvider* dpp, optional_yield y, std::string_view period_id) override; virtual int list_period_ids(const DoutPrefixProvider* dpp, optional_yield y, const std::string& marker, std::span<std::string> entries, ListResult<std::string>& result) override; // ZoneGroup virtual int write_default_zonegroup_id(const DoutPrefixProvider* dpp, optional_yield y, bool exclusive, std::string_view realm_id, std::string_view zonegroup_id) override; virtual int read_default_zonegroup_id(const DoutPrefixProvider* dpp, optional_yield y, std::string_view realm_id, std::string& zonegroup_id) override; virtual int delete_default_zonegroup_id(const DoutPrefixProvider* dpp, optional_yield y, std::string_view realm_id) override; virtual int create_zonegroup(const DoutPrefixProvider* dpp, optional_yield y, bool exclusive, const RGWZoneGroup& info, std::unique_ptr<ZoneGroupWriter>* writer) override; virtual int read_zonegroup_by_id(const DoutPrefixProvider* dpp, optional_yield y, std::string_view zonegroup_id, RGWZoneGroup& info, std::unique_ptr<ZoneGroupWriter>* writer) override; virtual int read_zonegroup_by_name(const DoutPrefixProvider* dpp, optional_yield y, std::string_view zonegroup_name, RGWZoneGroup& info, std::unique_ptr<ZoneGroupWriter>* writer) override; virtual int read_default_zonegroup(const DoutPrefixProvider* dpp, optional_yield y, std::string_view realm_id, RGWZoneGroup& info, std::unique_ptr<ZoneGroupWriter>* writer) override; virtual int list_zonegroup_names(const DoutPrefixProvider* dpp, optional_yield y, const std::string& marker, std::span<std::string> entries, ListResult<std::string>& result) override; // Zone virtual int write_default_zone_id(const DoutPrefixProvider* dpp, optional_yield y, bool exclusive, std::string_view realm_id, std::string_view zone_id) override; virtual int read_default_zone_id(const DoutPrefixProvider* dpp, optional_yield y, std::string_view realm_id, std::string& zone_id) override; virtual int delete_default_zone_id(const DoutPrefixProvider* dpp, optional_yield y, std::string_view realm_id) override; virtual int create_zone(const DoutPrefixProvider* dpp, optional_yield y, bool exclusive, const RGWZoneParams& info, std::unique_ptr<ZoneWriter>* writer) override; virtual int read_zone_by_id(const DoutPrefixProvider* dpp, optional_yield y, std::string_view zone_id, RGWZoneParams& info, std::unique_ptr<ZoneWriter>* writer) override; virtual int read_zone_by_name(const DoutPrefixProvider* dpp, optional_yield y, std::string_view zone_name, RGWZoneParams& info, std::unique_ptr<ZoneWriter>* writer) override; virtual int read_default_zone(const DoutPrefixProvider* dpp, optional_yield y, std::string_view realm_id, RGWZoneParams& info, std::unique_ptr<ZoneWriter>* writer) override; virtual int list_zone_names(const DoutPrefixProvider* dpp, optional_yield y, const std::string& marker, std::span<std::string> entries, ListResult<std::string>& result) override; // PeriodConfig virtual int read_period_config(const DoutPrefixProvider* dpp, optional_yield y, std::string_view realm_id, RGWPeriodConfig& info) override; virtual int write_period_config(const DoutPrefixProvider* dpp, optional_yield y, bool exclusive, std::string_view realm_id, const RGWPeriodConfig& info) override; private: const RGWZoneGroup zonegroup; const RGWZoneParams zone; const RGWPeriodConfig period_config; }; // ImmutableConfigStore /// ImmutableConfigStore factory function auto create_immutable_config_store(const DoutPrefixProvider* dpp, const RGWZoneGroup& zonegroup, const RGWZoneParams& zone, const RGWPeriodConfig& period_config) -> std::unique_ptr<ConfigStore>; } // namespace rgw::sal
9,431
51.110497
88
h
null
ceph-main/src/rgw/driver/json_config/store.cc
// vim: ts=8 sw=2 smarttab ft=cpp /* * Ceph - scalable distributed file system * * Copyright (C) 2022 Red Hat, Inc. * * This is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License version 2.1, as published by the Free Software * Foundation. See file COPYING. * */ #include <system_error> #include "include/buffer.h" #include "common/errno.h" #include "common/ceph_json.h" #include "rgw_zone.h" #include "driver/immutable_config/store.h" #include "store.h" namespace rgw::sal { namespace { struct DecodedConfig { RGWZoneGroup zonegroup; RGWZoneParams zone; RGWPeriodConfig period_config; void decode_json(JSONObj *obj) { JSONDecoder::decode_json("zonegroup", zonegroup, obj); JSONDecoder::decode_json("zone", zone, obj); JSONDecoder::decode_json("period_config", period_config, obj); } }; static void parse_config(const DoutPrefixProvider* dpp, const char* filename) { bufferlist bl; std::string errmsg; int r = bl.read_file(filename, &errmsg); if (r < 0) { ldpp_dout(dpp, 0) << "failed to read json config file '" << filename << "': " << errmsg << dendl; throw std::system_error(-r, std::system_category()); } JSONParser p; if (!p.parse(bl.c_str(), bl.length())) { ldpp_dout(dpp, 0) << "failed to parse json config file" << dendl; throw std::system_error(make_error_code(std::errc::invalid_argument)); } DecodedConfig config; try { decode_json_obj(config, &p); } catch (const JSONDecoder::err& e) { ldpp_dout(dpp, 0) << "failed to decode JSON input: " << e.what() << dendl; throw std::system_error(make_error_code(std::errc::invalid_argument)); } } void sanity_check_config(const DoutPrefixProvider* dpp, DecodedConfig& config) { if (config.zonegroup.id.empty()) { config.zonegroup.id = "default"; } if (config.zonegroup.name.empty()) { config.zonegroup.name = "default"; } if (config.zonegroup.api_name.empty()) { config.zonegroup.api_name = config.zonegroup.name; } if (config.zone.id.empty()) { config.zone.id = "default"; } if (config.zone.name.empty()) { config.zone.name = "default"; } // add default placement if it doesn't exist rgw_pool pool; RGWZonePlacementInfo placement; placement.storage_classes.set_storage_class( RGW_STORAGE_CLASS_STANDARD, &pool, nullptr); config.zone.placement_pools.emplace("default-placement", std::move(placement)); std::set<rgw_pool> pools; int r = rgw::init_zone_pool_names(dpp, null_yield, pools, config.zone); if (r < 0) { ldpp_dout(dpp, 0) << "failed to set default zone pool names" << dendl; throw std::system_error(-r, std::system_category()); } // verify that config.zonegroup only contains config.zone if (config.zonegroup.zones.size() > 1) { ldpp_dout(dpp, 0) << "zonegroup cannot contain multiple zones" << dendl; throw std::system_error(make_error_code(std::errc::invalid_argument)); } if (config.zonegroup.zones.size() == 1) { auto z = config.zonegroup.zones.begin(); if (z->first != config.zone.id) { ldpp_dout(dpp, 0) << "zonegroup contains unknown zone id=" << z->first << dendl; throw std::system_error(make_error_code(std::errc::invalid_argument)); } if (z->second.id != config.zone.id) { ldpp_dout(dpp, 0) << "zonegroup contains unknown zone id=" << z->second.id << dendl; throw std::system_error(make_error_code(std::errc::invalid_argument)); } if (z->second.name != config.zone.name) { ldpp_dout(dpp, 0) << "zonegroup contains unknown zone name=" << z->second.name << dendl; throw std::system_error(make_error_code(std::errc::invalid_argument)); } if (config.zonegroup.master_zone != config.zone.id) { ldpp_dout(dpp, 0) << "zonegroup contains unknown master_zone=" << config.zonegroup.master_zone << dendl; throw std::system_error(make_error_code(std::errc::invalid_argument)); } } else { // add the zone to the group const bool is_master = true; const bool read_only = false; std::list<std::string> endpoints; std::list<std::string> sync_from; std::list<std::string> sync_from_rm; rgw::zone_features::set enable_features; rgw::zone_features::set disable_features; enable_features.insert(rgw::zone_features::supported.begin(), rgw::zone_features::supported.end()); int r = rgw::add_zone_to_group(dpp, config.zonegroup, config.zone, &is_master, &read_only, endpoints, nullptr, nullptr, sync_from, sync_from_rm, nullptr, std::nullopt, enable_features, disable_features); if (r < 0) { ldpp_dout(dpp, 0) << "failed to add zone to zonegroup: " << cpp_strerror(r) << dendl; throw std::system_error(-r, std::system_category()); } config.zonegroup.enabled_features.insert(rgw::zone_features::enabled.begin(), rgw::zone_features::enabled.end()); } // insert the default placement target if it doesn't exist auto target = RGWZoneGroupPlacementTarget{.name = "default-placement"}; config.zonegroup.placement_targets.emplace(target.name, target); if (config.zonegroup.default_placement.name.empty()) { config.zonegroup.default_placement.name = target.name; } } } // anonymous namespace auto create_json_config_store(const DoutPrefixProvider* dpp, const std::string& filename) -> std::unique_ptr<ConfigStore> { DecodedConfig config; parse_config(dpp, filename.c_str()); sanity_check_config(dpp, config); return create_immutable_config_store(dpp, config.zonegroup, config.zone, config.period_config); } } // namespace rgw::sal
5,988
32.646067
81
cc
null
ceph-main/src/rgw/driver/json_config/store.h
// vim: ts=8 sw=2 smarttab ft=cpp /* * Ceph - scalable distributed file system * * Copyright (C) 2022 Red Hat, Inc. * * This is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License version 2.1, as published by the Free Software * Foundation. See file COPYING. * */ #pragma once #include "driver/immutable_config/store.h" namespace rgw::sal { /// Create an immutable ConfigStore by parsing the zonegroup and zone from the /// given json filename. auto create_json_config_store(const DoutPrefixProvider* dpp, const std::string& filename) -> std::unique_ptr<ConfigStore>; } // namespace rgw::sal
707
24.285714
78
h
null
ceph-main/src/rgw/driver/rados/cls_fifo_legacy.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab /* * Ceph - scalable distributed file system * * Copyright (C) 2020 Red Hat <[email protected]> * Author: Adam C. Emerson * * This is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License version 2.1, as published by the Free Software * Foundation. See file COPYING. * */ #include <algorithm> #include <cstdint> #include <numeric> #include <optional> #include <string_view> #include <fmt/format.h> #include "include/rados/librados.hpp" #include "include/buffer.h" #include "common/async/yield_context.h" #include "common/random_string.h" #include "cls/fifo/cls_fifo_types.h" #include "cls/fifo/cls_fifo_ops.h" #include "cls_fifo_legacy.h" namespace rgw::cls::fifo { namespace cb = ceph::buffer; namespace fifo = rados::cls::fifo; using ceph::from_error_code; inline constexpr auto MAX_RACE_RETRIES = 10; void create_meta(lr::ObjectWriteOperation* op, std::string_view id, std::optional<fifo::objv> objv, std::optional<std::string_view> oid_prefix, bool exclusive, std::uint64_t max_part_size, std::uint64_t max_entry_size) { fifo::op::create_meta cm; cm.id = id; cm.version = objv; cm.oid_prefix = oid_prefix; cm.max_part_size = max_part_size; cm.max_entry_size = max_entry_size; cm.exclusive = exclusive; cb::list in; encode(cm, in); op->exec(fifo::op::CLASS, fifo::op::CREATE_META, in); } int get_meta(const DoutPrefixProvider *dpp, lr::IoCtx& ioctx, const std::string& oid, std::optional<fifo::objv> objv, fifo::info* info, std::uint32_t* part_header_size, std::uint32_t* part_entry_overhead, uint64_t tid, optional_yield y, bool probe) { lr::ObjectReadOperation op; fifo::op::get_meta gm; gm.version = objv; cb::list in; encode(gm, in); cb::list bl; op.exec(fifo::op::CLASS, fifo::op::GET_META, in, &bl, nullptr); auto r = rgw_rados_operate(dpp, ioctx, oid, &op, nullptr, y); if (r >= 0) try { fifo::op::get_meta_reply reply; auto iter = bl.cbegin(); decode(reply, iter); if (info) *info = std::move(reply.info); if (part_header_size) *part_header_size = reply.part_header_size; if (part_entry_overhead) *part_entry_overhead = reply.part_entry_overhead; } catch (const cb::error& err) { ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " decode failed: " << err.what() << " tid=" << tid << dendl; r = from_error_code(err.code()); } else if (!(probe && (r == -ENOENT || r == -ENODATA))) { ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " fifo::op::GET_META failed r=" << r << " tid=" << tid << dendl; } return r; }; namespace { void update_meta(lr::ObjectWriteOperation* op, const fifo::objv& objv, const fifo::update& update) { fifo::op::update_meta um; um.version = objv; um.tail_part_num = update.tail_part_num(); um.head_part_num = update.head_part_num(); um.min_push_part_num = update.min_push_part_num(); um.max_push_part_num = update.max_push_part_num(); um.journal_entries_add = update.journal_entries_add(); um.journal_entries_rm = update.journal_entries_rm(); cb::list in; encode(um, in); op->exec(fifo::op::CLASS, fifo::op::UPDATE_META, in); } void part_init(lr::ObjectWriteOperation* op, fifo::data_params params) { fifo::op::init_part ip; ip.params = params; cb::list in; encode(ip, in); op->exec(fifo::op::CLASS, fifo::op::INIT_PART, in); } int push_part(const DoutPrefixProvider *dpp, lr::IoCtx& ioctx, const std::string& oid, std::deque<cb::list> data_bufs, std::uint64_t tid, optional_yield y) { lr::ObjectWriteOperation op; fifo::op::push_part pp; op.assert_exists(); pp.data_bufs = data_bufs; pp.total_len = 0; for (const auto& bl : data_bufs) pp.total_len += bl.length(); cb::list in; encode(pp, in); auto retval = 0; op.exec(fifo::op::CLASS, fifo::op::PUSH_PART, in, nullptr, &retval); auto r = rgw_rados_operate(dpp, ioctx, oid, &op, y, lr::OPERATION_RETURNVEC); if (r < 0) { ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " fifo::op::PUSH_PART failed r=" << r << " tid=" << tid << dendl; return r; } if (retval < 0) { ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " error handling response retval=" << retval << " tid=" << tid << dendl; } return retval; } void push_part(lr::IoCtx& ioctx, const std::string& oid, std::deque<cb::list> data_bufs, std::uint64_t tid, lr::AioCompletion* c) { lr::ObjectWriteOperation op; fifo::op::push_part pp; pp.data_bufs = data_bufs; pp.total_len = 0; for (const auto& bl : data_bufs) pp.total_len += bl.length(); cb::list in; encode(pp, in); op.exec(fifo::op::CLASS, fifo::op::PUSH_PART, in); auto r = ioctx.aio_operate(oid, c, &op, lr::OPERATION_RETURNVEC); ceph_assert(r >= 0); } void trim_part(lr::ObjectWriteOperation* op, std::uint64_t ofs, bool exclusive) { fifo::op::trim_part tp; tp.ofs = ofs; tp.exclusive = exclusive; cb::list in; encode(tp, in); op->exec(fifo::op::CLASS, fifo::op::TRIM_PART, in); } int list_part(const DoutPrefixProvider *dpp, lr::IoCtx& ioctx, const std::string& oid, std::uint64_t ofs, std::uint64_t max_entries, std::vector<fifo::part_list_entry>* entries, bool* more, bool* full_part, std::uint64_t tid, optional_yield y) { lr::ObjectReadOperation op; fifo::op::list_part lp; lp.ofs = ofs; lp.max_entries = max_entries; cb::list in; encode(lp, in); cb::list bl; op.exec(fifo::op::CLASS, fifo::op::LIST_PART, in, &bl, nullptr); auto r = rgw_rados_operate(dpp, ioctx, oid, &op, nullptr, y); if (r >= 0) try { fifo::op::list_part_reply reply; auto iter = bl.cbegin(); decode(reply, iter); if (entries) *entries = std::move(reply.entries); if (more) *more = reply.more; if (full_part) *full_part = reply.full_part; } catch (const cb::error& err) { ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " decode failed: " << err.what() << " tid=" << tid << dendl; r = from_error_code(err.code()); } else if (r != -ENOENT) { ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " fifo::op::LIST_PART failed r=" << r << " tid=" << tid << dendl; } return r; } struct list_entry_completion : public lr::ObjectOperationCompletion { CephContext* cct; int* r_out; std::vector<fifo::part_list_entry>* entries; bool* more; bool* full_part; std::uint64_t tid; list_entry_completion(CephContext* cct, int* r_out, std::vector<fifo::part_list_entry>* entries, bool* more, bool* full_part, std::uint64_t tid) : cct(cct), r_out(r_out), entries(entries), more(more), full_part(full_part), tid(tid) {} virtual ~list_entry_completion() = default; void handle_completion(int r, bufferlist& bl) override { if (r >= 0) try { fifo::op::list_part_reply reply; auto iter = bl.cbegin(); decode(reply, iter); if (entries) *entries = std::move(reply.entries); if (more) *more = reply.more; if (full_part) *full_part = reply.full_part; } catch (const cb::error& err) { lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " decode failed: " << err.what() << " tid=" << tid << dendl; r = from_error_code(err.code()); } else if (r < 0) { lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " fifo::op::LIST_PART failed r=" << r << " tid=" << tid << dendl; } if (r_out) *r_out = r; } }; lr::ObjectReadOperation list_part(CephContext* cct, std::uint64_t ofs, std::uint64_t max_entries, int* r_out, std::vector<fifo::part_list_entry>* entries, bool* more, bool* full_part, std::uint64_t tid) { lr::ObjectReadOperation op; fifo::op::list_part lp; lp.ofs = ofs; lp.max_entries = max_entries; cb::list in; encode(lp, in); op.exec(fifo::op::CLASS, fifo::op::LIST_PART, in, new list_entry_completion(cct, r_out, entries, more, full_part, tid)); return op; } int get_part_info(const DoutPrefixProvider *dpp, lr::IoCtx& ioctx, const std::string& oid, fifo::part_header* header, std::uint64_t tid, optional_yield y) { lr::ObjectReadOperation op; fifo::op::get_part_info gpi; cb::list in; cb::list bl; encode(gpi, in); op.exec(fifo::op::CLASS, fifo::op::GET_PART_INFO, in, &bl, nullptr); auto r = rgw_rados_operate(dpp, ioctx, oid, &op, nullptr, y); if (r >= 0) try { fifo::op::get_part_info_reply reply; auto iter = bl.cbegin(); decode(reply, iter); if (header) *header = std::move(reply.header); } catch (const cb::error& err) { ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " decode failed: " << err.what() << " tid=" << tid << dendl; r = from_error_code(err.code()); } else { ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " fifo::op::GET_PART_INFO failed r=" << r << " tid=" << tid << dendl; } return r; } struct partinfo_completion : public lr::ObjectOperationCompletion { CephContext* cct; int* rp; fifo::part_header* h; std::uint64_t tid; partinfo_completion(CephContext* cct, int* rp, fifo::part_header* h, std::uint64_t tid) : cct(cct), rp(rp), h(h), tid(tid) { } virtual ~partinfo_completion() = default; void handle_completion(int r, bufferlist& bl) override { if (r >= 0) try { fifo::op::get_part_info_reply reply; auto iter = bl.cbegin(); decode(reply, iter); if (h) *h = std::move(reply.header); } catch (const cb::error& err) { r = from_error_code(err.code()); lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " decode failed: " << err.what() << " tid=" << tid << dendl; } else { lderr(cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " fifo::op::GET_PART_INFO failed r=" << r << " tid=" << tid << dendl; } if (rp) { *rp = r; } } }; lr::ObjectReadOperation get_part_info(CephContext* cct, fifo::part_header* header, std::uint64_t tid, int* r = 0) { lr::ObjectReadOperation op; fifo::op::get_part_info gpi; cb::list in; cb::list bl; encode(gpi, in); op.exec(fifo::op::CLASS, fifo::op::GET_PART_INFO, in, new partinfo_completion(cct, r, header, tid)); return op; } } std::optional<marker> FIFO::to_marker(std::string_view s) { marker m; if (s.empty()) { m.num = info.tail_part_num; m.ofs = 0; return m; } auto pos = s.find(':'); if (pos == s.npos) { return std::nullopt; } auto num = s.substr(0, pos); auto ofs = s.substr(pos + 1); auto n = ceph::parse<decltype(m.num)>(num); if (!n) { return std::nullopt; } m.num = *n; auto o = ceph::parse<decltype(m.ofs)>(ofs); if (!o) { return std::nullopt; } m.ofs = *o; return m; } int FIFO::apply_update(const DoutPrefixProvider *dpp, fifo::info* info, const fifo::objv& objv, const fifo::update& update, std::uint64_t tid) { ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " entering: tid=" << tid << dendl; std::unique_lock l(m); if (objv != info->version) { ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " version mismatch, canceling: tid=" << tid << dendl; return -ECANCELED; } info->apply_update(update); return {}; } int FIFO::_update_meta(const DoutPrefixProvider *dpp, const fifo::update& update, fifo::objv version, bool* pcanceled, std::uint64_t tid, optional_yield y) { ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " entering: tid=" << tid << dendl; lr::ObjectWriteOperation op; bool canceled = false; update_meta(&op, version, update); auto r = rgw_rados_operate(dpp, ioctx, oid, &op, y); if (r >= 0 || r == -ECANCELED) { canceled = (r == -ECANCELED); if (!canceled) { r = apply_update(dpp, &info, version, update, tid); if (r < 0) canceled = true; } if (canceled) { r = read_meta(dpp, tid, y); canceled = r < 0 ? false : true; } } if (pcanceled) *pcanceled = canceled; if (canceled) { ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " canceled: tid=" << tid << dendl; } if (r < 0) { ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " returning error: r=" << r << " tid=" << tid << dendl; } return r; } struct Updater : public Completion<Updater> { FIFO* fifo; fifo::update update; fifo::objv version; bool reread = false; bool* pcanceled = nullptr; std::uint64_t tid; Updater(const DoutPrefixProvider *dpp, FIFO* fifo, lr::AioCompletion* super, const fifo::update& update, fifo::objv version, bool* pcanceled, std::uint64_t tid) : Completion(dpp, super), fifo(fifo), update(update), version(version), pcanceled(pcanceled) {} void handle(const DoutPrefixProvider *dpp, Ptr&& p, int r) { ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " entering: tid=" << tid << dendl; if (reread) handle_reread(dpp, std::move(p), r); else handle_update(dpp, std::move(p), r); } void handle_update(const DoutPrefixProvider *dpp, Ptr&& p, int r) { ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " handling async update_meta: tid=" << tid << dendl; if (r < 0 && r != -ECANCELED) { ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " update failed: r=" << r << " tid=" << tid << dendl; complete(std::move(p), r); return; } bool canceled = (r == -ECANCELED); if (!canceled) { int r = fifo->apply_update(dpp, &fifo->info, version, update, tid); if (r < 0) { ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " update failed, marking canceled: r=" << r << " tid=" << tid << dendl; canceled = true; } } if (canceled) { reread = true; fifo->read_meta(dpp, tid, call(std::move(p))); return; } if (pcanceled) *pcanceled = false; ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " completing: tid=" << tid << dendl; complete(std::move(p), 0); } void handle_reread(const DoutPrefixProvider *dpp, Ptr&& p, int r) { ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " handling async read_meta: tid=" << tid << dendl; if (r < 0 && pcanceled) { *pcanceled = false; } else if (r >= 0 && pcanceled) { *pcanceled = true; } if (r < 0) { ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " failed dispatching read_meta: r=" << r << " tid=" << tid << dendl; } else { ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " completing: tid=" << tid << dendl; } complete(std::move(p), r); } }; void FIFO::_update_meta(const DoutPrefixProvider *dpp, const fifo::update& update, fifo::objv version, bool* pcanceled, std::uint64_t tid, lr::AioCompletion* c) { ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " entering: tid=" << tid << dendl; lr::ObjectWriteOperation op; update_meta(&op, info.version, update); auto updater = std::make_unique<Updater>(dpp, this, c, update, version, pcanceled, tid); auto r = ioctx.aio_operate(oid, Updater::call(std::move(updater)), &op); assert(r >= 0); } int FIFO::create_part(const DoutPrefixProvider *dpp, int64_t part_num, std::uint64_t tid, optional_yield y) { ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " entering: tid=" << tid << dendl; lr::ObjectWriteOperation op; op.create(false); /* We don't need exclusivity, part_init ensures we're creating from the same journal entry. */ std::unique_lock l(m); part_init(&op, info.params); auto oid = info.part_oid(part_num); l.unlock(); auto r = rgw_rados_operate(dpp, ioctx, oid, &op, y); if (r < 0) { ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " part_init failed: r=" << r << " tid=" << tid << dendl; } return r; } int FIFO::remove_part(const DoutPrefixProvider *dpp, int64_t part_num, std::uint64_t tid, optional_yield y) { ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " entering: tid=" << tid << dendl; lr::ObjectWriteOperation op; op.remove(); std::unique_lock l(m); auto oid = info.part_oid(part_num); l.unlock(); auto r = rgw_rados_operate(dpp, ioctx, oid, &op, y); if (r < 0) { ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " remove failed: r=" << r << " tid=" << tid << dendl; } return r; } int FIFO::process_journal(const DoutPrefixProvider *dpp, std::uint64_t tid, optional_yield y) { ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " entering: tid=" << tid << dendl; std::vector<fifo::journal_entry> processed; std::unique_lock l(m); auto tmpjournal = info.journal; auto new_tail = info.tail_part_num; auto new_head = info.head_part_num; auto new_max = info.max_push_part_num; l.unlock(); int r = 0; for (auto& entry : tmpjournal) { ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " processing entry: entry=" << entry << " tid=" << tid << dendl; switch (entry.op) { using enum fifo::journal_entry::Op; case create: r = create_part(dpp, entry.part_num, tid, y); if (entry.part_num > new_max) { new_max = entry.part_num; } break; case set_head: r = 0; if (entry.part_num > new_head) { new_head = entry.part_num; } break; case remove: r = remove_part(dpp, entry.part_num, tid, y); if (r == -ENOENT) r = 0; if (entry.part_num >= new_tail) { new_tail = entry.part_num + 1; } break; default: ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " unknown journaled op: entry=" << entry << " tid=" << tid << dendl; return -EIO; } if (r < 0) { ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " processing entry failed: entry=" << entry << " r=" << r << " tid=" << tid << dendl; return -r; } processed.push_back(std::move(entry)); } // Postprocess bool canceled = true; for (auto i = 0; canceled && i < MAX_RACE_RETRIES; ++i) { ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " postprocessing: i=" << i << " tid=" << tid << dendl; std::optional<int64_t> tail_part_num; std::optional<int64_t> head_part_num; std::optional<int64_t> max_part_num; std::unique_lock l(m); auto objv = info.version; if (new_tail > tail_part_num) tail_part_num = new_tail; if (new_head > info.head_part_num) head_part_num = new_head; if (new_max > info.max_push_part_num) max_part_num = new_max; l.unlock(); if (processed.empty() && !tail_part_num && !max_part_num) { ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " nothing to update any more: i=" << i << " tid=" << tid << dendl; canceled = false; break; } auto u = fifo::update().tail_part_num(tail_part_num) .head_part_num(head_part_num).max_push_part_num(max_part_num) .journal_entries_rm(processed); r = _update_meta(dpp, u, objv, &canceled, tid, y); if (r < 0) { ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " _update_meta failed: update=" << u << " r=" << r << " tid=" << tid << dendl; break; } if (canceled) { std::vector<fifo::journal_entry> new_processed; std::unique_lock l(m); ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " update canceled, retrying: i=" << i << " tid=" << tid << dendl; for (auto& e : processed) { if (info.journal.contains(e)) { new_processed.push_back(e); } } processed = std::move(new_processed); } } if (r == 0 && canceled) { ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " canceled too many times, giving up: tid=" << tid << dendl; r = -ECANCELED; } if (r < 0) { ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " failed, r=: " << r << " tid=" << tid << dendl; } return r; } int FIFO::_prepare_new_part(const DoutPrefixProvider *dpp, std::int64_t new_part_num, bool is_head, std::uint64_t tid, optional_yield y) { ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " entering: tid=" << tid << dendl; std::unique_lock l(m); using enum fifo::journal_entry::Op; std::vector<fifo::journal_entry> jentries{{ create, new_part_num }}; if (info.journal.contains({create, new_part_num}) && (!is_head || info.journal.contains({set_head, new_part_num}))) { l.unlock(); ldpp_dout(dpp, 5) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " new part journaled, but not processed: tid=" << tid << dendl; auto r = process_journal(dpp, tid, y); if (r < 0) { ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " process_journal failed: r=" << r << " tid=" << tid << dendl; } return r; } auto version = info.version; if (is_head) { ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " needs new head: tid=" << tid << dendl; jentries.push_back({ set_head, new_part_num }); } l.unlock(); int r = 0; bool canceled = true; for (auto i = 0; canceled && i < MAX_RACE_RETRIES; ++i) { canceled = false; ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " updating metadata: i=" << i << " tid=" << tid << dendl; auto u = fifo::update{}.journal_entries_add(jentries); r = _update_meta(dpp, u, version, &canceled, tid, y); if (r >= 0 && canceled) { std::unique_lock l(m); version = info.version; auto found = (info.journal.contains({create, new_part_num}) || info.journal.contains({set_head, new_part_num})); if ((info.max_push_part_num >= new_part_num && info.head_part_num >= new_part_num)) { ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " raced, but journaled and processed: i=" << i << " tid=" << tid << dendl; return 0; } if (found) { ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " raced, journaled but not processed: i=" << i << " tid=" << tid << dendl; canceled = false; } l.unlock(); } if (r < 0) { ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " _update_meta failed: update=" << u << " r=" << r << " tid=" << tid << dendl; return r; } } if (canceled) { ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " canceled too many times, giving up: tid=" << tid << dendl; return -ECANCELED; } r = process_journal(dpp, tid, y); if (r < 0) { ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " process_journal failed: r=" << r << " tid=" << tid << dendl; } return r; } int FIFO::_prepare_new_head(const DoutPrefixProvider *dpp, std::int64_t new_head_part_num, std::uint64_t tid, optional_yield y) { ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " entering: tid=" << tid << dendl; std::unique_lock l(m); auto max_push_part_num = info.max_push_part_num; auto version = info.version; l.unlock(); int r = 0; if (max_push_part_num < new_head_part_num) { ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " need new part: tid=" << tid << dendl; r = _prepare_new_part(dpp, new_head_part_num, true, tid, y); if (r < 0) { ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " _prepare_new_part failed: r=" << r << " tid=" << tid << dendl; return r; } std::unique_lock l(m); if (info.max_push_part_num < new_head_part_num) { ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " inconsistency, push part less than head part: " << " tid=" << tid << dendl; return -EIO; } l.unlock(); return 0; } using enum fifo::journal_entry::Op; fifo::journal_entry jentry; jentry.op = set_head; jentry.part_num = new_head_part_num; r = 0; bool canceled = true; for (auto i = 0; canceled && i < MAX_RACE_RETRIES; ++i) { canceled = false; ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " updating metadata: i=" << i << " tid=" << tid << dendl; auto u = fifo::update{}.journal_entries_add({{ jentry }}); r = _update_meta(dpp, u, version, &canceled, tid, y); if (r >= 0 && canceled) { std::unique_lock l(m); auto found = (info.journal.contains({create, new_head_part_num}) || info.journal.contains({set_head, new_head_part_num})); version = info.version; if ((info.head_part_num >= new_head_part_num)) { ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " raced, but journaled and processed: i=" << i << " tid=" << tid << dendl; return 0; } if (found) { ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " raced, journaled but not processed: i=" << i << " tid=" << tid << dendl; canceled = false; } l.unlock(); } if (r < 0) { ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " _update_meta failed: update=" << u << " r=" << r << " tid=" << tid << dendl; return r; } } if (canceled) { ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " canceled too many times, giving up: tid=" << tid << dendl; return -ECANCELED; } r = process_journal(dpp, tid, y); if (r < 0) { ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " process_journal failed: r=" << r << " tid=" << tid << dendl; } return r; } struct NewPartPreparer : public Completion<NewPartPreparer> { FIFO* f; std::vector<fifo::journal_entry> jentries; int i = 0; std::int64_t new_part_num; bool canceled = false; uint64_t tid; NewPartPreparer(const DoutPrefixProvider *dpp, FIFO* f, lr::AioCompletion* super, std::vector<fifo::journal_entry> jentries, std::int64_t new_part_num, std::uint64_t tid) : Completion(dpp, super), f(f), jentries(std::move(jentries)), new_part_num(new_part_num), tid(tid) {} void handle(const DoutPrefixProvider *dpp, Ptr&& p, int r) { ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " entering: tid=" << tid << dendl; if (r < 0) { ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " _update_meta failed: r=" << r << " tid=" << tid << dendl; complete(std::move(p), r); return; } if (canceled) { using enum fifo::journal_entry::Op; std::unique_lock l(f->m); auto found = (f->info.journal.contains({create, new_part_num}) || f->info.journal.contains({set_head, new_part_num})); auto max_push_part_num = f->info.max_push_part_num; auto head_part_num = f->info.head_part_num; auto version = f->info.version; l.unlock(); if ((max_push_part_num >= new_part_num && head_part_num >= new_part_num)) { ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " raced, but journaled and processed: i=" << i << " tid=" << tid << dendl; complete(std::move(p), 0); return; } if (i >= MAX_RACE_RETRIES) { complete(std::move(p), -ECANCELED); return; } if (!found) { ++i; f->_update_meta(dpp, fifo::update{} .journal_entries_add(jentries), version, &canceled, tid, call(std::move(p))); return; } else { ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " raced, journaled but not processed: i=" << i << " tid=" << tid << dendl; canceled = false; } // Fall through. We still need to process the journal. } f->process_journal(dpp, tid, super()); return; } }; void FIFO::_prepare_new_part(const DoutPrefixProvider *dpp, std::int64_t new_part_num, bool is_head, std::uint64_t tid, lr::AioCompletion* c) { std::unique_lock l(m); using enum fifo::journal_entry::Op; std::vector<fifo::journal_entry> jentries{{create, new_part_num}}; if (info.journal.contains({create, new_part_num}) && (!is_head || info.journal.contains({set_head, new_part_num}))) { l.unlock(); ldpp_dout(dpp, 5) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " new part journaled, but not processed: tid=" << tid << dendl; process_journal(dpp, tid, c); return; } auto version = info.version; if (is_head) { jentries.push_back({ set_head, new_part_num }); } l.unlock(); auto n = std::make_unique<NewPartPreparer>(dpp, this, c, jentries, new_part_num, tid); auto np = n.get(); _update_meta(dpp, fifo::update{}.journal_entries_add(jentries), version, &np->canceled, tid, NewPartPreparer::call(std::move(n))); } struct NewHeadPreparer : public Completion<NewHeadPreparer> { FIFO* f; int i = 0; bool newpart; std::int64_t new_head_part_num; bool canceled = false; std::uint64_t tid; NewHeadPreparer(const DoutPrefixProvider *dpp, FIFO* f, lr::AioCompletion* super, bool newpart, std::int64_t new_head_part_num, std::uint64_t tid) : Completion(dpp, super), f(f), newpart(newpart), new_head_part_num(new_head_part_num), tid(tid) {} void handle(const DoutPrefixProvider *dpp, Ptr&& p, int r) { if (newpart) handle_newpart(std::move(p), r); else handle_update(dpp, std::move(p), r); } void handle_newpart(Ptr&& p, int r) { if (r < 0) { lderr(f->cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " _prepare_new_part failed: r=" << r << " tid=" << tid << dendl; complete(std::move(p), r); return; } std::unique_lock l(f->m); if (f->info.max_push_part_num < new_head_part_num) { l.unlock(); lderr(f->cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " _prepare_new_part failed: r=" << r << " tid=" << tid << dendl; complete(std::move(p), -EIO); } else { l.unlock(); complete(std::move(p), 0); } } void handle_update(const DoutPrefixProvider *dpp, Ptr&& p, int r) { ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " entering: tid=" << tid << dendl; if (r < 0) { ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " _update_meta failed: r=" << r << " tid=" << tid << dendl; complete(std::move(p), r); return; } if (canceled) { using enum fifo::journal_entry::Op; std::unique_lock l(f->m); auto found = (f->info.journal.contains({create, new_head_part_num }) || f->info.journal.contains({set_head, new_head_part_num })); auto head_part_num = f->info.head_part_num; auto version = f->info.version; l.unlock(); if ((head_part_num >= new_head_part_num)) { ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " raced, but journaled and processed: i=" << i << " tid=" << tid << dendl; complete(std::move(p), 0); return; } if (i >= MAX_RACE_RETRIES) { complete(std::move(p), -ECANCELED); return; } if (!found) { ++i; fifo::journal_entry jentry; jentry.op = set_head; jentry.part_num = new_head_part_num; f->_update_meta(dpp, fifo::update{} .journal_entries_add({{jentry}}), version, &canceled, tid, call(std::move(p))); return; } else { ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " raced, journaled but not processed: i=" << i << " tid=" << tid << dendl; canceled = false; } // Fall through. We still need to process the journal. } f->process_journal(dpp, tid, super()); return; } }; void FIFO::_prepare_new_head(const DoutPrefixProvider *dpp, std::int64_t new_head_part_num, std::uint64_t tid, lr::AioCompletion* c) { ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " entering: tid=" << tid << dendl; std::unique_lock l(m); auto max_push_part_num = info.max_push_part_num; auto version = info.version; l.unlock(); if (max_push_part_num < new_head_part_num) { ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " need new part: tid=" << tid << dendl; auto n = std::make_unique<NewHeadPreparer>(dpp, this, c, true, new_head_part_num, tid); _prepare_new_part(dpp, new_head_part_num, true, tid, NewHeadPreparer::call(std::move(n))); } else { ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " updating head: tid=" << tid << dendl; auto n = std::make_unique<NewHeadPreparer>(dpp, this, c, false, new_head_part_num, tid); auto np = n.get(); using enum fifo::journal_entry::Op; fifo::journal_entry jentry; jentry.op = set_head; jentry.part_num = new_head_part_num; _update_meta(dpp, fifo::update{}.journal_entries_add({{jentry}}), version, &np->canceled, tid, NewHeadPreparer::call(std::move(n))); } } int FIFO::push_entries(const DoutPrefixProvider *dpp, const std::deque<cb::list>& data_bufs, std::uint64_t tid, optional_yield y) { ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " entering: tid=" << tid << dendl; std::unique_lock l(m); auto head_part_num = info.head_part_num; const auto part_oid = info.part_oid(head_part_num); l.unlock(); auto r = push_part(dpp, ioctx, part_oid, data_bufs, tid, y); if (r < 0) { ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " push_part failed: r=" << r << " tid=" << tid << dendl; } return r; } void FIFO::push_entries(const std::deque<cb::list>& data_bufs, std::uint64_t tid, lr::AioCompletion* c) { std::unique_lock l(m); auto head_part_num = info.head_part_num; const auto part_oid = info.part_oid(head_part_num); l.unlock(); push_part(ioctx, part_oid, data_bufs, tid, c); } int FIFO::trim_part(const DoutPrefixProvider *dpp, int64_t part_num, uint64_t ofs, bool exclusive, std::uint64_t tid, optional_yield y) { ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " entering: tid=" << tid << dendl; lr::ObjectWriteOperation op; std::unique_lock l(m); const auto part_oid = info.part_oid(part_num); l.unlock(); rgw::cls::fifo::trim_part(&op, ofs, exclusive); auto r = rgw_rados_operate(dpp, ioctx, part_oid, &op, y); if (r < 0) { ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " trim_part failed: r=" << r << " tid=" << tid << dendl; } return 0; } void FIFO::trim_part(const DoutPrefixProvider *dpp, int64_t part_num, uint64_t ofs, bool exclusive, std::uint64_t tid, lr::AioCompletion* c) { ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " entering: tid=" << tid << dendl; lr::ObjectWriteOperation op; std::unique_lock l(m); const auto part_oid = info.part_oid(part_num); l.unlock(); rgw::cls::fifo::trim_part(&op, ofs, exclusive); auto r = ioctx.aio_operate(part_oid, c, &op); ceph_assert(r >= 0); } int FIFO::open(const DoutPrefixProvider *dpp, lr::IoCtx ioctx, std::string oid, std::unique_ptr<FIFO>* fifo, optional_yield y, std::optional<fifo::objv> objv, bool probe) { ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " entering" << dendl; fifo::info info; std::uint32_t size; std::uint32_t over; int r = get_meta(dpp, ioctx, std::move(oid), objv, &info, &size, &over, 0, y, probe); if (r < 0) { if (!(probe && (r == -ENOENT || r == -ENODATA))) { ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " get_meta failed: r=" << r << dendl; } return r; } std::unique_ptr<FIFO> f(new FIFO(std::move(ioctx), oid)); f->info = info; f->part_header_size = size; f->part_entry_overhead = over; // If there are journal entries, process them, in case // someone crashed mid-transaction. if (!info.journal.empty()) { ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " processing leftover journal" << dendl; r = f->process_journal(dpp, 0, y); if (r < 0) { ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " process_journal failed: r=" << r << dendl; return r; } } *fifo = std::move(f); return 0; } int FIFO::create(const DoutPrefixProvider *dpp, lr::IoCtx ioctx, std::string oid, std::unique_ptr<FIFO>* fifo, optional_yield y, std::optional<fifo::objv> objv, std::optional<std::string_view> oid_prefix, bool exclusive, std::uint64_t max_part_size, std::uint64_t max_entry_size) { ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " entering" << dendl; lr::ObjectWriteOperation op; create_meta(&op, oid, objv, oid_prefix, exclusive, max_part_size, max_entry_size); auto r = rgw_rados_operate(dpp, ioctx, oid, &op, y); if (r < 0) { ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " create_meta failed: r=" << r << dendl; return r; } r = open(dpp, std::move(ioctx), std::move(oid), fifo, y, objv); return r; } int FIFO::read_meta(const DoutPrefixProvider *dpp, std::uint64_t tid, optional_yield y) { ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " entering: tid=" << tid << dendl; fifo::info _info; std::uint32_t _phs; std::uint32_t _peo; auto r = get_meta(dpp, ioctx, oid, std::nullopt, &_info, &_phs, &_peo, tid, y); if (r < 0) { ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " get_meta failed: r=" << r << " tid=" << tid << dendl; return r; } std::unique_lock l(m); // We have a newer version already! if (_info.version.same_or_later(this->info.version)) { info = std::move(_info); part_header_size = _phs; part_entry_overhead = _peo; } return 0; } int FIFO::read_meta(const DoutPrefixProvider *dpp, optional_yield y) { std::unique_lock l(m); auto tid = ++next_tid; l.unlock(); return read_meta(dpp, tid, y); } struct Reader : public Completion<Reader> { FIFO* fifo; cb::list bl; std::uint64_t tid; Reader(const DoutPrefixProvider *dpp, FIFO* fifo, lr::AioCompletion* super, std::uint64_t tid) : Completion(dpp, super), fifo(fifo), tid(tid) {} void handle(const DoutPrefixProvider *dpp, Ptr&& p, int r) { ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " entering: tid=" << tid << dendl; if (r >= 0) try { fifo::op::get_meta_reply reply; auto iter = bl.cbegin(); decode(reply, iter); std::unique_lock l(fifo->m); if (reply.info.version.same_or_later(fifo->info.version)) { fifo->info = std::move(reply.info); fifo->part_header_size = reply.part_header_size; fifo->part_entry_overhead = reply.part_entry_overhead; } } catch (const cb::error& err) { ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " failed to decode response err=" << err.what() << " tid=" << tid << dendl; r = from_error_code(err.code()); } else { ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " read_meta failed r=" << r << " tid=" << tid << dendl; } complete(std::move(p), r); } }; void FIFO::read_meta(const DoutPrefixProvider *dpp, std::uint64_t tid, lr::AioCompletion* c) { ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " entering: tid=" << tid << dendl; lr::ObjectReadOperation op; fifo::op::get_meta gm; cb::list in; encode(gm, in); auto reader = std::make_unique<Reader>(dpp, this, c, tid); auto rp = reader.get(); auto r = ioctx.aio_exec(oid, Reader::call(std::move(reader)), fifo::op::CLASS, fifo::op::GET_META, in, &rp->bl); assert(r >= 0); } const fifo::info& FIFO::meta() const { return info; } std::pair<std::uint32_t, std::uint32_t> FIFO::get_part_layout_info() const { return {part_header_size, part_entry_overhead}; } int FIFO::push(const DoutPrefixProvider *dpp, const cb::list& bl, optional_yield y) { return push(dpp, std::vector{ bl }, y); } void FIFO::push(const DoutPrefixProvider *dpp, const cb::list& bl, lr::AioCompletion* c) { push(dpp, std::vector{ bl }, c); } int FIFO::push(const DoutPrefixProvider *dpp, const std::vector<cb::list>& data_bufs, optional_yield y) { std::unique_lock l(m); auto tid = ++next_tid; auto max_entry_size = info.params.max_entry_size; auto need_new_head = info.need_new_head(); auto head_part_num = info.head_part_num; l.unlock(); ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " entering: tid=" << tid << dendl; if (data_bufs.empty()) { ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " empty push, returning success tid=" << tid << dendl; return 0; } // Validate sizes for (const auto& bl : data_bufs) { if (bl.length() > max_entry_size) { ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " entry bigger than max_entry_size tid=" << tid << dendl; return -E2BIG; } } int r = 0; if (need_new_head) { ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " need new head tid=" << tid << dendl; r = _prepare_new_head(dpp, head_part_num + 1, tid, y); if (r < 0) { ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " _prepare_new_head failed: r=" << r << " tid=" << tid << dendl; return r; } } std::deque<cb::list> remaining(data_bufs.begin(), data_bufs.end()); std::deque<cb::list> batch; uint64_t batch_len = 0; auto retries = 0; bool canceled = true; while ((!remaining.empty() || !batch.empty()) && (retries <= MAX_RACE_RETRIES)) { ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " preparing push: remaining=" << remaining.size() << " batch=" << batch.size() << " retries=" << retries << " tid=" << tid << dendl; std::unique_lock l(m); head_part_num = info.head_part_num; auto max_part_size = info.params.max_part_size; auto overhead = part_entry_overhead; l.unlock(); while (!remaining.empty() && (remaining.front().length() + batch_len <= max_part_size)) { /* We can send entries with data_len up to max_entry_size, however, we want to also account the overhead when dealing with multiple entries. Previous check doesn't account for overhead on purpose. */ batch_len += remaining.front().length() + overhead; batch.push_back(std::move(remaining.front())); remaining.pop_front(); } ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " prepared push: remaining=" << remaining.size() << " batch=" << batch.size() << " retries=" << retries << " batch_len=" << batch_len << " tid=" << tid << dendl; auto r = push_entries(dpp, batch, tid, y); if (r == -ERANGE) { canceled = true; ++retries; ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " need new head tid=" << tid << dendl; r = _prepare_new_head(dpp, head_part_num + 1, tid, y); if (r < 0) { ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " prepare_new_head failed: r=" << r << " tid=" << tid << dendl; return r; } r = 0; continue; } if (r == -ENOENT) { ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " racing client trimmed part, rereading metadata " << "tid=" << tid << dendl; canceled = true; ++retries; r = read_meta(dpp, y); if (r < 0) { ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " read_meta failed: r=" << r << " tid=" << tid << dendl; return r; } r = 0; continue; } if (r < 0) { ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " push_entries failed: r=" << r << " tid=" << tid << dendl; return r; } // Made forward progress! canceled = false; retries = 0; batch_len = 0; if (r == ssize(batch)) { batch.clear(); } else { batch.erase(batch.begin(), batch.begin() + r); for (const auto& b : batch) { batch_len += b.length() + part_entry_overhead; } } } if (canceled) { ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " canceled too many times, giving up: tid=" << tid << dendl; return -ECANCELED; } return 0; } struct Pusher : public Completion<Pusher> { FIFO* f; std::deque<cb::list> remaining; std::deque<cb::list> batch; int i = 0; std::int64_t head_part_num; std::uint64_t tid; enum { pushing, new_heading, meta_reading } state = pushing; void prep_then_push(const DoutPrefixProvider *dpp, Ptr&& p, const unsigned successes) { std::unique_lock l(f->m); auto max_part_size = f->info.params.max_part_size; auto part_entry_overhead = f->part_entry_overhead; head_part_num = f->info.head_part_num; l.unlock(); ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " preparing push: remaining=" << remaining.size() << " batch=" << batch.size() << " i=" << i << " tid=" << tid << dendl; uint64_t batch_len = 0; if (successes > 0) { if (successes == batch.size()) { batch.clear(); } else { batch.erase(batch.begin(), batch.begin() + successes); for (const auto& b : batch) { batch_len += b.length() + part_entry_overhead; } } } if (batch.empty() && remaining.empty()) { complete(std::move(p), 0); return; } while (!remaining.empty() && (remaining.front().length() + batch_len <= max_part_size)) { /* We can send entries with data_len up to max_entry_size, however, we want to also account the overhead when dealing with multiple entries. Previous check doesn't account for overhead on purpose. */ batch_len += remaining.front().length() + part_entry_overhead; batch.push_back(std::move(remaining.front())); remaining.pop_front(); } ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " prepared push: remaining=" << remaining.size() << " batch=" << batch.size() << " i=" << i << " batch_len=" << batch_len << " tid=" << tid << dendl; push(std::move(p)); } void push(Ptr&& p) { f->push_entries(batch, tid, call(std::move(p))); } void new_head(const DoutPrefixProvider *dpp, Ptr&& p) { state = new_heading; f->_prepare_new_head(dpp, head_part_num + 1, tid, call(std::move(p))); } void read_meta(const DoutPrefixProvider *dpp, Ptr&& p) { ++i; state = meta_reading; f->read_meta(dpp, tid, call(std::move(p))); } void handle(const DoutPrefixProvider *dpp, Ptr&& p, int r) { switch (state) { case pushing: if (r == -ERANGE) { ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " need new head tid=" << tid << dendl; new_head(dpp, std::move(p)); return; } if (r == -ENOENT) { if (i > MAX_RACE_RETRIES) { ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " racing client deleted part, but we're out" << " of retries: tid=" << tid << dendl; complete(std::move(p), r); } ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " racing client deleted part: tid=" << tid << dendl; read_meta(dpp, std::move(p)); return; } if (r < 0) { ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " push_entries failed: r=" << r << " tid=" << tid << dendl; complete(std::move(p), r); return; } i = 0; // We've made forward progress, so reset the race counter! prep_then_push(dpp, std::move(p), r); break; case new_heading: if (r < 0) { ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " prepare_new_head failed: r=" << r << " tid=" << tid << dendl; complete(std::move(p), r); return; } state = pushing; handle_new_head(dpp, std::move(p), r); break; case meta_reading: if (r < 0) { ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " read_meta failed: r=" << r << " tid=" << tid << dendl; complete(std::move(p), r); return; } state = pushing; prep_then_push(dpp, std::move(p), r); break; } } void handle_new_head(const DoutPrefixProvider *dpp, Ptr&& p, int r) { if (r == -ECANCELED) { if (p->i == MAX_RACE_RETRIES) { ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " canceled too many times, giving up: tid=" << tid << dendl; complete(std::move(p), -ECANCELED); return; } ++p->i; } else if (r) { complete(std::move(p), r); return; } if (p->batch.empty()) { prep_then_push(dpp, std::move(p), 0); return; } else { push(std::move(p)); return; } } Pusher(const DoutPrefixProvider *dpp, FIFO* f, std::deque<cb::list>&& remaining, std::int64_t head_part_num, std::uint64_t tid, lr::AioCompletion* super) : Completion(dpp, super), f(f), remaining(std::move(remaining)), head_part_num(head_part_num), tid(tid) {} }; void FIFO::push(const DoutPrefixProvider *dpp, const std::vector<cb::list>& data_bufs, lr::AioCompletion* c) { std::unique_lock l(m); auto tid = ++next_tid; auto max_entry_size = info.params.max_entry_size; auto need_new_head = info.need_new_head(); auto head_part_num = info.head_part_num; l.unlock(); ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " entering: tid=" << tid << dendl; auto p = std::make_unique<Pusher>(dpp, this, std::deque<cb::list>(data_bufs.begin(), data_bufs.end()), head_part_num, tid, c); // Validate sizes for (const auto& bl : data_bufs) { if (bl.length() > max_entry_size) { ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " entry bigger than max_entry_size tid=" << tid << dendl; Pusher::complete(std::move(p), -E2BIG); return; } } if (data_bufs.empty() ) { ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " empty push, returning success tid=" << tid << dendl; Pusher::complete(std::move(p), 0); return; } if (need_new_head) { ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " need new head tid=" << tid << dendl; p->new_head(dpp, std::move(p)); } else { p->prep_then_push(dpp, std::move(p), 0); } } int FIFO::list(const DoutPrefixProvider *dpp, int max_entries, std::optional<std::string_view> markstr, std::vector<list_entry>* presult, bool* pmore, optional_yield y) { std::unique_lock l(m); auto tid = ++next_tid; std::int64_t part_num = info.tail_part_num; l.unlock(); ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " entering: tid=" << tid << dendl; std::uint64_t ofs = 0; if (markstr) { auto marker = to_marker(*markstr); if (!marker) { ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " invalid marker string: " << markstr << " tid= "<< tid << dendl; return -EINVAL; } part_num = marker->num; ofs = marker->ofs; } std::vector<list_entry> result; result.reserve(max_entries); bool more = false; std::vector<fifo::part_list_entry> entries; int r = 0; while (max_entries > 0) { ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " max_entries=" << max_entries << " tid=" << tid << dendl; bool part_more = false; bool part_full = false; std::unique_lock l(m); auto part_oid = info.part_oid(part_num); l.unlock(); r = list_part(dpp, ioctx, part_oid, ofs, max_entries, &entries, &part_more, &part_full, tid, y); if (r == -ENOENT) { ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " missing part, rereading metadata" << " tid= "<< tid << dendl; r = read_meta(dpp, tid, y); if (r < 0) { ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " read_meta failed: r=" << r << " tid= "<< tid << dendl; return r; } if (part_num < info.tail_part_num) { /* raced with trim? restart */ ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " raced with trim, restarting: tid=" << tid << dendl; max_entries += result.size(); result.clear(); std::unique_lock l(m); part_num = info.tail_part_num; l.unlock(); ofs = 0; continue; } ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " assuming part was not written yet, so end of data: " << "tid=" << tid << dendl; more = false; r = 0; break; } if (r < 0) { ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " list_entries failed: r=" << r << " tid= "<< tid << dendl; return r; } more = part_full || part_more; for (auto& entry : entries) { list_entry e; e.data = std::move(entry.data); e.marker = marker{part_num, entry.ofs}.to_string(); e.mtime = entry.mtime; result.push_back(std::move(e)); --max_entries; if (max_entries == 0) break; } entries.clear(); if (max_entries > 0 && part_more) { } if (!part_full) { ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " head part is not full, so we can assume we're done: " << "tid=" << tid << dendl; break; } if (!part_more) { ++part_num; ofs = 0; } } if (presult) *presult = std::move(result); if (pmore) *pmore = more; return 0; } int FIFO::trim(const DoutPrefixProvider *dpp, std::string_view markstr, bool exclusive, optional_yield y) { bool overshoot = false; auto marker = to_marker(markstr); if (!marker) { return -EINVAL; } auto part_num = marker->num; auto ofs = marker->ofs; std::unique_lock l(m); auto tid = ++next_tid; auto hn = info.head_part_num; const auto max_part_size = info.params.max_part_size; if (part_num > hn) { l.unlock(); auto r = read_meta(dpp, tid, y); if (r < 0) { return r; } l.lock(); auto hn = info.head_part_num; if (part_num > hn) { overshoot = true; part_num = hn; ofs = max_part_size; } } if (part_num < info.tail_part_num) { return -ENODATA; } auto pn = info.tail_part_num; l.unlock(); ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " entering: tid=" << tid << dendl; int r = 0; while (pn < part_num) { ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " pn=" << pn << " tid=" << tid << dendl; std::unique_lock l(m); l.unlock(); r = trim_part(dpp, pn, max_part_size, false, tid, y); if (r < 0 && r == -ENOENT) { ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " trim_part failed: r=" << r << " tid= "<< tid << dendl; return r; } ++pn; } r = trim_part(dpp, part_num, ofs, exclusive, tid, y); if (r < 0 && r != -ENOENT) { ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " trim_part failed: r=" << r << " tid= "<< tid << dendl; return r; } l.lock(); auto tail_part_num = info.tail_part_num; auto objv = info.version; l.unlock(); bool canceled = tail_part_num < part_num; int retries = 0; while ((tail_part_num < part_num) && canceled && (retries <= MAX_RACE_RETRIES)) { r = _update_meta(dpp, fifo::update{}.tail_part_num(part_num), objv, &canceled, tid, y); if (r < 0) { ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " _update_meta failed: r=" << r << " tid= "<< tid << dendl; return r; } if (canceled) { ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " canceled: retries=" << retries << " tid=" << tid << dendl; l.lock(); tail_part_num = info.tail_part_num; objv = info.version; l.unlock(); ++retries; } } if (canceled) { ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " canceled too many times, giving up: tid=" << tid << dendl; return -EIO; } return overshoot ? -ENODATA : 0; } struct Trimmer : public Completion<Trimmer> { FIFO* fifo; std::int64_t part_num; std::uint64_t ofs; std::int64_t pn; bool exclusive; std::uint64_t tid; bool update = false; bool reread = false; bool canceled = false; bool overshoot = false; int retries = 0; Trimmer(const DoutPrefixProvider *dpp, FIFO* fifo, std::int64_t part_num, std::uint64_t ofs, std::int64_t pn, bool exclusive, lr::AioCompletion* super, std::uint64_t tid) : Completion(dpp, super), fifo(fifo), part_num(part_num), ofs(ofs), pn(pn), exclusive(exclusive), tid(tid) {} void handle(const DoutPrefixProvider *dpp, Ptr&& p, int r) { ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " entering: tid=" << tid << dendl; if (reread) { reread = false; if (r < 0) { ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " read_meta failed: r=" << r << " tid=" << tid << dendl; complete(std::move(p), r); return; } std::unique_lock l(fifo->m); auto hn = fifo->info.head_part_num; const auto max_part_size = fifo->info.params.max_part_size; const auto tail_part_num = fifo->info.tail_part_num; l.unlock(); if (part_num > hn) { part_num = hn; ofs = max_part_size; overshoot = true; } if (part_num < tail_part_num) { complete(std::move(p), -ENODATA); return; } pn = tail_part_num; if (pn < part_num) { ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " pn=" << pn << " tid=" << tid << dendl; fifo->trim_part(dpp, pn++, max_part_size, false, tid, call(std::move(p))); } else { update = true; canceled = tail_part_num < part_num; fifo->trim_part(dpp, part_num, ofs, exclusive, tid, call(std::move(p))); } return; } if (r == -ENOENT) { r = 0; } if (r < 0) { ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << (update ? " update_meta " : " trim ") << "failed: r=" << r << " tid=" << tid << dendl; complete(std::move(p), r); return; } if (!update) { ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " handling preceding trim callback: tid=" << tid << dendl; retries = 0; if (pn < part_num) { ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " pn=" << pn << " tid=" << tid << dendl; std::unique_lock l(fifo->m); const auto max_part_size = fifo->info.params.max_part_size; l.unlock(); fifo->trim_part(dpp, pn++, max_part_size, false, tid, call(std::move(p))); return; } std::unique_lock l(fifo->m); const auto tail_part_num = fifo->info.tail_part_num; l.unlock(); update = true; canceled = tail_part_num < part_num; fifo->trim_part(dpp, part_num, ofs, exclusive, tid, call(std::move(p))); return; } ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " handling update-needed callback: tid=" << tid << dendl; std::unique_lock l(fifo->m); auto tail_part_num = fifo->info.tail_part_num; auto objv = fifo->info.version; l.unlock(); if ((tail_part_num < part_num) && canceled) { if (retries > MAX_RACE_RETRIES) { ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " canceled too many times, giving up: tid=" << tid << dendl; complete(std::move(p), -EIO); return; } ++retries; fifo->_update_meta(dpp, fifo::update{} .tail_part_num(part_num), objv, &canceled, tid, call(std::move(p))); } else { complete(std::move(p), overshoot ? -ENODATA : 0); } } }; void FIFO::trim(const DoutPrefixProvider *dpp, std::string_view markstr, bool exclusive, lr::AioCompletion* c) { auto marker = to_marker(markstr); auto realmark = marker.value_or(::rgw::cls::fifo::marker{}); std::unique_lock l(m); const auto hn = info.head_part_num; const auto max_part_size = info.params.max_part_size; const auto pn = info.tail_part_num; const auto part_oid = info.part_oid(pn); auto tid = ++next_tid; l.unlock(); ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " entering: tid=" << tid << dendl; auto trimmer = std::make_unique<Trimmer>(dpp, this, realmark.num, realmark.ofs, pn, exclusive, c, tid); if (!marker) { Trimmer::complete(std::move(trimmer), -EINVAL); return; } ++trimmer->pn; auto ofs = marker->ofs; if (marker->num > hn) { trimmer->reread = true; read_meta(dpp, tid, Trimmer::call(std::move(trimmer))); return; } if (pn < marker->num) { ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " pn=" << pn << " tid=" << tid << dendl; ofs = max_part_size; } else { trimmer->update = true; } trim_part(dpp, pn, ofs, exclusive, tid, Trimmer::call(std::move(trimmer))); } int FIFO::get_part_info(const DoutPrefixProvider *dpp, int64_t part_num, fifo::part_header* header, optional_yield y) { std::unique_lock l(m); const auto part_oid = info.part_oid(part_num); auto tid = ++next_tid; l.unlock(); auto r = rgw::cls::fifo::get_part_info(dpp, ioctx, part_oid, header, tid, y); if (r < 0) { ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " get_part_info failed: r=" << r << " tid=" << tid << dendl; } return r; } void FIFO::get_part_info(int64_t part_num, fifo::part_header* header, lr::AioCompletion* c) { std::unique_lock l(m); const auto part_oid = info.part_oid(part_num); auto tid = ++next_tid; l.unlock(); auto op = rgw::cls::fifo::get_part_info(cct, header, tid); auto r = ioctx.aio_operate(part_oid, c, &op, nullptr); ceph_assert(r >= 0); } struct InfoGetter : Completion<InfoGetter> { FIFO* fifo; fifo::part_header header; fu2::function<void(int r, fifo::part_header&&)> f; std::uint64_t tid; bool headerread = false; InfoGetter(const DoutPrefixProvider *dpp, FIFO* fifo, fu2::function<void(int r, fifo::part_header&&)> f, std::uint64_t tid, lr::AioCompletion* super) : Completion(dpp, super), fifo(fifo), f(std::move(f)), tid(tid) {} void handle(const DoutPrefixProvider *dpp, Ptr&& p, int r) { if (!headerread) { if (r < 0) { ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " read_meta failed: r=" << r << " tid=" << tid << dendl; if (f) f(r, {}); complete(std::move(p), r); return; } auto info = fifo->meta(); auto hpn = info.head_part_num; if (hpn < 0) { ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " no head, returning empty partinfo r=" << r << " tid=" << tid << dendl; if (f) f(0, {}); complete(std::move(p), r); return; } headerread = true; auto op = rgw::cls::fifo::get_part_info(fifo->cct, &header, tid); std::unique_lock l(fifo->m); auto oid = fifo->info.part_oid(hpn); l.unlock(); r = fifo->ioctx.aio_operate(oid, call(std::move(p)), &op, nullptr); ceph_assert(r >= 0); return; } if (r < 0) { ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " get_part_info failed: r=" << r << " tid=" << tid << dendl; } if (f) f(r, std::move(header)); complete(std::move(p), r); return; } }; void FIFO::get_head_info(const DoutPrefixProvider *dpp, fu2::unique_function<void(int r, fifo::part_header&&)> f, lr::AioCompletion* c) { std::unique_lock l(m); auto tid = ++next_tid; l.unlock(); auto ig = std::make_unique<InfoGetter>(dpp, this, std::move(f), tid, c); read_meta(dpp, tid, InfoGetter::call(std::move(ig))); } struct JournalProcessor : public Completion<JournalProcessor> { private: FIFO* const fifo; std::vector<fifo::journal_entry> processed; decltype(fifo->info.journal) journal; decltype(journal)::iterator iter; std::int64_t new_tail; std::int64_t new_head; std::int64_t new_max; int race_retries = 0; bool first_pp = true; bool canceled = false; std::uint64_t tid; enum { entry_callback, pp_callback, } state; void create_part(const DoutPrefixProvider *dpp, Ptr&& p, int64_t part_num) { ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " entering: tid=" << tid << dendl; state = entry_callback; lr::ObjectWriteOperation op; op.create(false); /* We don't need exclusivity, part_init ensures we're creating from the same journal entry. */ std::unique_lock l(fifo->m); part_init(&op, fifo->info.params); auto oid = fifo->info.part_oid(part_num); l.unlock(); auto r = fifo->ioctx.aio_operate(oid, call(std::move(p)), &op); ceph_assert(r >= 0); return; } void remove_part(const DoutPrefixProvider *dpp, Ptr&& p, int64_t part_num) { ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " entering: tid=" << tid << dendl; state = entry_callback; lr::ObjectWriteOperation op; op.remove(); std::unique_lock l(fifo->m); auto oid = fifo->info.part_oid(part_num); l.unlock(); auto r = fifo->ioctx.aio_operate(oid, call(std::move(p)), &op); ceph_assert(r >= 0); return; } void finish_je(const DoutPrefixProvider *dpp, Ptr&& p, int r, const fifo::journal_entry& entry) { ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " entering: tid=" << tid << dendl; ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " finishing entry: entry=" << entry << " tid=" << tid << dendl; using enum fifo::journal_entry::Op; if (entry.op == remove && r == -ENOENT) r = 0; if (r < 0) { ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " processing entry failed: entry=" << entry << " r=" << r << " tid=" << tid << dendl; complete(std::move(p), r); return; } else { switch (entry.op) { case unknown: case set_head: // Can't happen. Filtered out in process. complete(std::move(p), -EIO); return; case create: if (entry.part_num > new_max) { new_max = entry.part_num; } break; case remove: if (entry.part_num >= new_tail) { new_tail = entry.part_num + 1; } break; } processed.push_back(entry); } ++iter; process(dpp, std::move(p)); } void postprocess(const DoutPrefixProvider *dpp, Ptr&& p) { ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " entering: tid=" << tid << dendl; if (processed.empty()) { ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " nothing to update any more: race_retries=" << race_retries << " tid=" << tid << dendl; complete(std::move(p), 0); return; } pp_run(dpp, std::move(p), 0, false); } public: JournalProcessor(const DoutPrefixProvider *dpp, FIFO* fifo, std::uint64_t tid, lr::AioCompletion* super) : Completion(dpp, super), fifo(fifo), tid(tid) { std::unique_lock l(fifo->m); journal = fifo->info.journal; iter = journal.begin(); new_tail = fifo->info.tail_part_num; new_head = fifo->info.head_part_num; new_max = fifo->info.max_push_part_num; } void pp_run(const DoutPrefixProvider *dpp, Ptr&& p, int r, bool canceled) { ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " entering: tid=" << tid << dendl; std::optional<int64_t> tail_part_num; std::optional<int64_t> head_part_num; std::optional<int64_t> max_part_num; if (r < 0) { ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " failed, r=: " << r << " tid=" << tid << dendl; complete(std::move(p), r); } ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " postprocessing: race_retries=" << race_retries << " tid=" << tid << dendl; if (!first_pp && r == 0 && !canceled) { ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " nothing to update any more: race_retries=" << race_retries << " tid=" << tid << dendl; complete(std::move(p), 0); return; } first_pp = false; if (canceled) { if (race_retries >= MAX_RACE_RETRIES) { ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " canceled too many times, giving up: tid=" << tid << dendl; complete(std::move(p), -ECANCELED); return; } ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " update canceled, retrying: race_retries=" << race_retries << " tid=" << tid << dendl; ++race_retries; std::vector<fifo::journal_entry> new_processed; std::unique_lock l(fifo->m); for (auto& e : processed) { if (fifo->info.journal.contains(e)) { new_processed.push_back(e); } } processed = std::move(new_processed); } std::unique_lock l(fifo->m); auto objv = fifo->info.version; if (new_tail > fifo->info.tail_part_num) { tail_part_num = new_tail; } if (new_head > fifo->info.head_part_num) { head_part_num = new_head; } if (new_max > fifo->info.max_push_part_num) { max_part_num = new_max; } l.unlock(); if (processed.empty() && !tail_part_num && !max_part_num) { /* nothing to update anymore */ ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " nothing to update any more: race_retries=" << race_retries << " tid=" << tid << dendl; complete(std::move(p), 0); return; } state = pp_callback; fifo->_update_meta(dpp, fifo::update{} .tail_part_num(tail_part_num) .head_part_num(head_part_num) .max_push_part_num(max_part_num) .journal_entries_rm(processed), objv, &this->canceled, tid, call(std::move(p))); return; } JournalProcessor(const JournalProcessor&) = delete; JournalProcessor& operator =(const JournalProcessor&) = delete; JournalProcessor(JournalProcessor&&) = delete; JournalProcessor& operator =(JournalProcessor&&) = delete; void process(const DoutPrefixProvider *dpp, Ptr&& p) { ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " entering: tid=" << tid << dendl; while (iter != journal.end()) { ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " processing entry: entry=" << *iter << " tid=" << tid << dendl; const auto entry = *iter; switch (entry.op) { using enum fifo::journal_entry::Op; case create: create_part(dpp, std::move(p), entry.part_num); return; case set_head: if (entry.part_num > new_head) { new_head = entry.part_num; } processed.push_back(entry); ++iter; continue; case remove: remove_part(dpp, std::move(p), entry.part_num); return; default: ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " unknown journaled op: entry=" << entry << " tid=" << tid << dendl; complete(std::move(p), -EIO); return; } } postprocess(dpp, std::move(p)); return; } void handle(const DoutPrefixProvider *dpp, Ptr&& p, int r) { ldpp_dout(dpp, 20) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " entering: tid=" << tid << dendl; switch (state) { case entry_callback: finish_je(dpp, std::move(p), r, *iter); return; case pp_callback: auto c = canceled; canceled = false; pp_run(dpp, std::move(p), r, c); return; } abort(); } }; void FIFO::process_journal(const DoutPrefixProvider *dpp, std::uint64_t tid, lr::AioCompletion* c) { auto p = std::make_unique<JournalProcessor>(dpp, this, tid, c); p->process(dpp, std::move(p)); } struct Lister : Completion<Lister> { FIFO* f; std::vector<list_entry> result; bool more = false; std::int64_t part_num; std::uint64_t ofs; int max_entries; int r_out = 0; std::vector<fifo::part_list_entry> entries; bool part_more = false; bool part_full = false; std::vector<list_entry>* entries_out; bool* more_out; std::uint64_t tid; bool read = false; void complete(Ptr&& p, int r) { if (r >= 0) { if (more_out) *more_out = more; if (entries_out) *entries_out = std::move(result); } Completion::complete(std::move(p), r); } public: Lister(const DoutPrefixProvider *dpp, FIFO* f, std::int64_t part_num, std::uint64_t ofs, int max_entries, std::vector<list_entry>* entries_out, bool* more_out, std::uint64_t tid, lr::AioCompletion* super) : Completion(dpp, super), f(f), part_num(part_num), ofs(ofs), max_entries(max_entries), entries_out(entries_out), more_out(more_out), tid(tid) { result.reserve(max_entries); } Lister(const Lister&) = delete; Lister& operator =(const Lister&) = delete; Lister(Lister&&) = delete; Lister& operator =(Lister&&) = delete; void handle(const DoutPrefixProvider *dpp, Ptr&& p, int r) { if (read) handle_read(std::move(p), r); else handle_list(dpp, std::move(p), r); } void list(Ptr&& p) { if (max_entries > 0) { part_more = false; part_full = false; entries.clear(); std::unique_lock l(f->m); auto part_oid = f->info.part_oid(part_num); l.unlock(); read = false; auto op = list_part(f->cct, ofs, max_entries, &r_out, &entries, &part_more, &part_full, tid); f->ioctx.aio_operate(part_oid, call(std::move(p)), &op, nullptr); } else { complete(std::move(p), 0); } } void handle_read(Ptr&& p, int r) { read = false; if (r >= 0) r = r_out; r_out = 0; if (r < 0) { complete(std::move(p), r); return; } if (part_num < f->info.tail_part_num) { /* raced with trim? restart */ max_entries += result.size(); result.clear(); part_num = f->info.tail_part_num; ofs = 0; list(std::move(p)); return; } /* assuming part was not written yet, so end of data */ more = false; complete(std::move(p), 0); return; } void handle_list(const DoutPrefixProvider *dpp, Ptr&& p, int r) { if (r >= 0) r = r_out; r_out = 0; std::unique_lock l(f->m); auto part_oid = f->info.part_oid(part_num); l.unlock(); if (r == -ENOENT) { read = true; f->read_meta(dpp, tid, call(std::move(p))); return; } if (r < 0) { complete(std::move(p), r); return; } more = part_full || part_more; for (auto& entry : entries) { list_entry e; e.data = std::move(entry.data); e.marker = marker{part_num, entry.ofs}.to_string(); e.mtime = entry.mtime; result.push_back(std::move(e)); } max_entries -= entries.size(); entries.clear(); if (max_entries > 0 && part_more) { list(std::move(p)); return; } if (!part_full) { /* head part is not full */ complete(std::move(p), 0); return; } ++part_num; ofs = 0; list(std::move(p)); } }; void FIFO::list(const DoutPrefixProvider *dpp, int max_entries, std::optional<std::string_view> markstr, std::vector<list_entry>* out, bool* more, lr::AioCompletion* c) { std::unique_lock l(m); auto tid = ++next_tid; std::int64_t part_num = info.tail_part_num; l.unlock(); std::uint64_t ofs = 0; std::optional<::rgw::cls::fifo::marker> marker; if (markstr) { marker = to_marker(*markstr); if (marker) { part_num = marker->num; ofs = marker->ofs; } } auto ls = std::make_unique<Lister>(dpp, this, part_num, ofs, max_entries, out, more, tid, c); if (markstr && !marker) { auto l = ls.get(); l->complete(std::move(ls), -EINVAL); } else { ls->list(std::move(ls)); } } }
76,662
29.182283
111
cc
null
ceph-main/src/rgw/driver/rados/cls_fifo_legacy.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab /* * Ceph - scalable distributed file system * * Copyright (C) 2020 Red Hat <[email protected]> * Author: Adam C. Emerson * * This is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License version 2.1, as published by the Free Software * Foundation. See file COPYING. * */ #pragma once #include <cstdint> #include <deque> #include <map> #include <memory> #include <mutex> #include <optional> #include <string_view> #include <vector> #include <fmt/format.h> #include "include/rados/librados.hpp" #include "include/buffer.h" #include "include/function2.hpp" #include "common/async/yield_context.h" #include "cls/fifo/cls_fifo_types.h" #include "cls/fifo/cls_fifo_ops.h" #include "librados/AioCompletionImpl.h" #include "rgw_tools.h" namespace rgw::cls::fifo { namespace cb = ceph::buffer; namespace fifo = rados::cls::fifo; namespace lr = librados; inline constexpr std::uint64_t default_max_part_size = 4 * 1024 * 1024; inline constexpr std::uint64_t default_max_entry_size = 32 * 1024; void create_meta(lr::ObjectWriteOperation* op, std::string_view id, std::optional<fifo::objv> objv, std::optional<std::string_view> oid_prefix, bool exclusive = false, std::uint64_t max_part_size = default_max_part_size, std::uint64_t max_entry_size = default_max_entry_size); int get_meta(const DoutPrefixProvider *dpp, lr::IoCtx& ioctx, const std::string& oid, std::optional<fifo::objv> objv, fifo::info* info, std::uint32_t* part_header_size, std::uint32_t* part_entry_overhead, std::uint64_t tid, optional_yield y, bool probe = false); struct marker { std::int64_t num = 0; std::uint64_t ofs = 0; marker() = default; marker(std::int64_t num, std::uint64_t ofs) : num(num), ofs(ofs) {} static marker max() { return { std::numeric_limits<decltype(num)>::max(), std::numeric_limits<decltype(ofs)>::max() }; } std::string to_string() { return fmt::format("{:0>20}:{:0>20}", num, ofs); } }; struct list_entry { cb::list data; std::string marker; ceph::real_time mtime; }; using part_info = fifo::part_header; /// This is an implementation of FIFO using librados to facilitate /// backports. Please see /src/neorados/cls/fifo.h for full /// information. /// /// This library uses optional_yield. Please see /// /src/common/async/yield_context.h. In summary, optional_yield /// contains either a spawn::yield_context (in which case the current /// coroutine is suspended until completion) or null_yield (in which /// case the current thread is blocked until completion.) /// /// Please see the librados documentation for information on /// AioCompletion and IoCtx. class FIFO { friend struct Reader; friend struct Updater; friend struct Trimmer; friend struct InfoGetter; friend struct Pusher; friend struct NewPartPreparer; friend struct NewHeadPreparer; friend struct JournalProcessor; friend struct Lister; mutable lr::IoCtx ioctx; CephContext* cct = static_cast<CephContext*>(ioctx.cct()); const std::string oid; std::mutex m; std::uint64_t next_tid = 0; fifo::info info; std::uint32_t part_header_size = 0xdeadbeef; std::uint32_t part_entry_overhead = 0xdeadbeef; std::optional<marker> to_marker(std::string_view s); FIFO(lr::IoCtx&& ioc, std::string oid) : ioctx(std::move(ioc)), oid(oid) {} int apply_update(const DoutPrefixProvider *dpp, fifo::info* info, const fifo::objv& objv, const fifo::update& update, std::uint64_t tid); int _update_meta(const DoutPrefixProvider *dpp, const fifo::update& update, fifo::objv version, bool* pcanceled, std::uint64_t tid, optional_yield y); void _update_meta(const DoutPrefixProvider *dpp, const fifo::update& update, fifo::objv version, bool* pcanceled, std::uint64_t tid, lr::AioCompletion* c); int create_part(const DoutPrefixProvider *dpp, int64_t part_num, std::uint64_t tid, optional_yield y); int remove_part(const DoutPrefixProvider *dpp, int64_t part_num, std::uint64_t tid, optional_yield y); int process_journal(const DoutPrefixProvider *dpp, std::uint64_t tid, optional_yield y); void process_journal(const DoutPrefixProvider *dpp, std::uint64_t tid, lr::AioCompletion* c); int _prepare_new_part(const DoutPrefixProvider *dpp, std::int64_t new_part_num, bool is_head, std::uint64_t tid, optional_yield y); void _prepare_new_part(const DoutPrefixProvider *dpp, std::int64_t new_part_num, bool is_head, std::uint64_t tid, lr::AioCompletion* c); int _prepare_new_head(const DoutPrefixProvider *dpp, std::int64_t new_head_part_num, std::uint64_t tid, optional_yield y); void _prepare_new_head(const DoutPrefixProvider *dpp, std::int64_t new_head_part_num, std::uint64_t tid, lr::AioCompletion* c); int push_entries(const DoutPrefixProvider *dpp, const std::deque<cb::list>& data_bufs, std::uint64_t tid, optional_yield y); void push_entries(const std::deque<cb::list>& data_bufs, std::uint64_t tid, lr::AioCompletion* c); int trim_part(const DoutPrefixProvider *dpp, int64_t part_num, uint64_t ofs, bool exclusive, std::uint64_t tid, optional_yield y); void trim_part(const DoutPrefixProvider *dpp, int64_t part_num, uint64_t ofs, bool exclusive, std::uint64_t tid, lr::AioCompletion* c); /// Force refresh of metadata, yielding/blocking style int read_meta(const DoutPrefixProvider *dpp, std::uint64_t tid, optional_yield y); /// Force refresh of metadata, with a librados Completion void read_meta(const DoutPrefixProvider *dpp, std::uint64_t tid, lr::AioCompletion* c); public: FIFO(const FIFO&) = delete; FIFO& operator =(const FIFO&) = delete; FIFO(FIFO&&) = delete; FIFO& operator =(FIFO&&) = delete; /// Open an existing FIFO. static int open(const DoutPrefixProvider *dpp, lr::IoCtx ioctx, //< IO Context std::string oid, //< OID for metadata object std::unique_ptr<FIFO>* fifo, //< OUT: Pointer to FIFO object optional_yield y, //< Optional yield context /// Operation will fail if FIFO is not at this version std::optional<fifo::objv> objv = std::nullopt, /// Probing for existence, don't print errors if we /// can't find it. bool probe = false); /// Create a new or open an existing FIFO. static int create(const DoutPrefixProvider *dpp, lr::IoCtx ioctx, //< IO Context std::string oid, //< OID for metadata object std::unique_ptr<FIFO>* fifo, //< OUT: Pointer to FIFO object optional_yield y, //< Optional yield context /// Operation will fail if the FIFO exists and is /// not of this version. std::optional<fifo::objv> objv = std::nullopt, /// Prefix for all objects std::optional<std::string_view> oid_prefix = std::nullopt, /// Fail if the FIFO already exists bool exclusive = false, /// Maximum allowed size of parts std::uint64_t max_part_size = default_max_part_size, /// Maximum allowed size of entries std::uint64_t max_entry_size = default_max_entry_size); /// Force refresh of metadata, yielding/blocking style int read_meta(const DoutPrefixProvider *dpp, optional_yield y); /// Get currently known metadata const fifo::info& meta() const; /// Get partition header and entry overhead size std::pair<std::uint32_t, std::uint32_t> get_part_layout_info() const; /// Push an entry to the FIFO int push(const DoutPrefixProvider *dpp, const cb::list& bl, //< Entry to push optional_yield y //< Optional yield ); /// Push an entry to the FIFO void push(const DoutPrefixProvider *dpp, const cb::list& bl, //< Entry to push lr::AioCompletion* c //< Async Completion ); /// Push entries to the FIFO int push(const DoutPrefixProvider *dpp, const std::vector<cb::list>& data_bufs, //< Entries to push optional_yield y //< Optional yield ); /// Push entries to the FIFO void push(const DoutPrefixProvider *dpp, const std::vector<cb::list>& data_bufs, //< Entries to push lr::AioCompletion* c //< Async Completion ); /// List entries int list(const DoutPrefixProvider *dpp, int max_entries, //< Maximum entries to list /// Point after which to begin listing. Start at tail if null std::optional<std::string_view> markstr, std::vector<list_entry>* out, //< OUT: entries /// OUT: True if more entries in FIFO beyond the last returned bool* more, optional_yield y //< Optional yield ); void list(const DoutPrefixProvider *dpp, int max_entries, //< Maximum entries to list /// Point after which to begin listing. Start at tail if null std::optional<std::string_view> markstr, std::vector<list_entry>* out, //< OUT: entries /// OUT: True if more entries in FIFO beyond the last returned bool* more, lr::AioCompletion* c //< Async Completion ); /// Trim entries, coroutine/block style int trim(const DoutPrefixProvider *dpp, std::string_view markstr, //< Position to which to trim, inclusive bool exclusive, //< If true, do not trim the target entry //< itself, just all those before it. optional_yield y //< Optional yield ); /// Trim entries, librados AioCompletion style void trim(const DoutPrefixProvider *dpp, std::string_view markstr, //< Position to which to trim, inclusive bool exclusive, //< If true, do not trim the target entry //< itself, just all those before it. lr::AioCompletion* c //< librados AIO Completion ); /// Get part info int get_part_info(const DoutPrefixProvider *dpp, int64_t part_num, /// Part number fifo::part_header* header, //< OUT: Information optional_yield y //< Optional yield ); /// Get part info void get_part_info(int64_t part_num, //< Part number fifo::part_header* header, //< OUT: Information lr::AioCompletion* c //< AIO Completion ); /// A convenience method to fetch the part information for the FIFO /// head, using librados::AioCompletion, since /// libradio::AioCompletions compose lousily. void get_head_info(const DoutPrefixProvider *dpp, fu2::unique_function< //< Function to receive info void(int r, fifo::part_header&&)>, lr::AioCompletion* c //< AIO Completion ); }; template<typename T> struct Completion { private: const DoutPrefixProvider *_dpp; lr::AioCompletion* _cur = nullptr; lr::AioCompletion* _super; public: using Ptr = std::unique_ptr<T>; lr::AioCompletion* cur() const { return _cur; } lr::AioCompletion* super() const { return _super; } Completion(const DoutPrefixProvider *dpp, lr::AioCompletion* super) : _dpp(dpp), _super(super) { super->pc->get(); } ~Completion() { if (_super) { _super->pc->put(); } if (_cur) _cur->release(); _super = nullptr; _cur = nullptr; } // The only times that aio_operate can return an error are: // 1. The completion contains a null pointer. This should just // crash, and in our case it does. // 2. An attempt is made to write to a snapshot. RGW doesn't use // snapshots, so we don't care. // // So we will just assert that initiating an Aio operation succeeds // and not worry about recovering. static lr::AioCompletion* call(Ptr&& p) { p->_cur = lr::Rados::aio_create_completion(static_cast<void*>(p.get()), &cb); auto c = p->_cur; p.release(); // coverity[leaked_storage:SUPPRESS] return c; } static void complete(Ptr&& p, int r) { auto c = p->_super; p->_super = nullptr; rgw_complete_aio_completion(c, r); } static void cb(lr::completion_t, void* arg) { auto t = static_cast<T*>(arg); auto r = t->_cur->get_return_value(); t->_cur->release(); t->_cur = nullptr; t->handle(t->_dpp, Ptr(t), r); } }; }
12,040
34.83631
138
h
null
ceph-main/src/rgw/driver/rados/rgw_bucket.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab ft=cpp #include "rgw_acl_s3.h" #include "rgw_tag_s3.h" #include "rgw_bucket.h" #include "rgw_op.h" #include "rgw_bucket_sync.h" #include "services/svc_zone.h" #include "services/svc_bucket.h" #include "services/svc_user.h" #include "rgw_reshard.h" // stolen from src/cls/version/cls_version.cc #define VERSION_ATTR "ceph.objclass.version" #include "cls/user/cls_user_types.h" #include "rgw_sal_rados.h" #define dout_subsys ceph_subsys_rgw // seconds for timeout during RGWBucket::check_object_index constexpr uint64_t BUCKET_TAG_QUICK_TIMEOUT = 30; using namespace std; // default number of entries to list with each bucket listing call // (use marker to bridge between calls) static constexpr size_t listing_max_entries = 1000; /* * The tenant_name is always returned on purpose. May be empty, of course. */ static void parse_bucket(const string& bucket, string *tenant_name, string *bucket_name, string *bucket_instance = nullptr /* optional */) { /* * expected format: [tenant/]bucket:bucket_instance */ int pos = bucket.find('/'); if (pos >= 0) { *tenant_name = bucket.substr(0, pos); } else { tenant_name->clear(); } string bn = bucket.substr(pos + 1); pos = bn.find (':'); if (pos < 0) { *bucket_name = std::move(bn); return; } *bucket_name = bn.substr(0, pos); if (bucket_instance) { *bucket_instance = bn.substr(pos + 1); } /* * deal with the possible tenant:bucket:bucket_instance case */ if (tenant_name->empty()) { pos = bucket_instance->find(':'); if (pos >= 0) { *tenant_name = *bucket_name; *bucket_name = bucket_instance->substr(0, pos); *bucket_instance = bucket_instance->substr(pos + 1); } } } static void dump_mulipart_index_results(list<rgw_obj_index_key>& objs_to_unlink, Formatter *f) { for (const auto& o : objs_to_unlink) { f->dump_string("object", o.name); } } void check_bad_user_bucket_mapping(rgw::sal::Driver* driver, rgw::sal::User& user, bool fix, optional_yield y, const DoutPrefixProvider *dpp) { rgw::sal::BucketList user_buckets; string marker; CephContext *cct = driver->ctx(); size_t max_entries = cct->_conf->rgw_list_buckets_max_chunk; do { int ret = user.list_buckets(dpp, marker, string(), max_entries, false, user_buckets, y); if (ret < 0) { ldout(driver->ctx(), 0) << "failed to read user buckets: " << cpp_strerror(-ret) << dendl; return; } map<string, std::unique_ptr<rgw::sal::Bucket>>& buckets = user_buckets.get_buckets(); for (auto i = buckets.begin(); i != buckets.end(); ++i) { marker = i->first; auto& bucket = i->second; std::unique_ptr<rgw::sal::Bucket> actual_bucket; int r = driver->get_bucket(dpp, &user, user.get_tenant(), bucket->get_name(), &actual_bucket, y); if (r < 0) { ldout(driver->ctx(), 0) << "could not get bucket info for bucket=" << bucket << dendl; continue; } if (actual_bucket->get_name().compare(bucket->get_name()) != 0 || actual_bucket->get_tenant().compare(bucket->get_tenant()) != 0 || actual_bucket->get_marker().compare(bucket->get_marker()) != 0 || actual_bucket->get_bucket_id().compare(bucket->get_bucket_id()) != 0) { cout << "bucket info mismatch: expected " << actual_bucket << " got " << bucket << std::endl; if (fix) { cout << "fixing" << std::endl; r = actual_bucket->chown(dpp, user, y); if (r < 0) { cerr << "failed to fix bucket: " << cpp_strerror(-r) << std::endl; } } } } } while (user_buckets.is_truncated()); } // returns true if entry is in the empty namespace. note: function // type conforms to type RGWBucketListNameFilter bool rgw_bucket_object_check_filter(const std::string& oid) { const static std::string empty_ns; rgw_obj_key key; // thrown away but needed for parsing return rgw_obj_key::oid_to_key_in_ns(oid, &key, empty_ns); } int rgw_remove_object(const DoutPrefixProvider *dpp, rgw::sal::Driver* driver, rgw::sal::Bucket* bucket, rgw_obj_key& key, optional_yield y) { if (key.instance.empty()) { key.instance = "null"; } std::unique_ptr<rgw::sal::Object> object = bucket->get_object(key); return object->delete_object(dpp, y); } static void set_err_msg(std::string *sink, std::string msg) { if (sink && !msg.empty()) *sink = msg; } int RGWBucket::init(rgw::sal::Driver* _driver, RGWBucketAdminOpState& op_state, optional_yield y, const DoutPrefixProvider *dpp, std::string *err_msg) { if (!_driver) { set_err_msg(err_msg, "no storage!"); return -EINVAL; } driver = _driver; std::string bucket_name = op_state.get_bucket_name(); if (bucket_name.empty() && op_state.get_user_id().empty()) return -EINVAL; user = driver->get_user(op_state.get_user_id()); std::string tenant = user->get_tenant(); // split possible tenant/name auto pos = bucket_name.find('/'); if (pos != string::npos) { tenant = bucket_name.substr(0, pos); bucket_name = bucket_name.substr(pos + 1); } int r = driver->get_bucket(dpp, user.get(), tenant, bucket_name, &bucket, y); if (r < 0) { set_err_msg(err_msg, "failed to fetch bucket info for bucket=" + bucket_name); return r; } op_state.set_bucket(bucket->clone()); if (!rgw::sal::User::empty(user.get())) { r = user->load_user(dpp, y); if (r < 0) { set_err_msg(err_msg, "failed to fetch user info"); return r; } } op_state.display_name = user->get_display_name(); clear_failure(); return 0; } bool rgw_find_bucket_by_id(const DoutPrefixProvider *dpp, CephContext *cct, rgw::sal::Driver* driver, const string& marker, const string& bucket_id, rgw_bucket* bucket_out) { void *handle = NULL; bool truncated = false; string s; int ret = driver->meta_list_keys_init(dpp, "bucket.instance", marker, &handle); if (ret < 0) { cerr << "ERROR: can't get key: " << cpp_strerror(-ret) << std::endl; driver->meta_list_keys_complete(handle); return -ret; } do { list<string> keys; ret = driver->meta_list_keys_next(dpp, handle, 1000, keys, &truncated); if (ret < 0) { cerr << "ERROR: lists_keys_next(): " << cpp_strerror(-ret) << std::endl; driver->meta_list_keys_complete(handle); return -ret; } for (list<string>::iterator iter = keys.begin(); iter != keys.end(); ++iter) { s = *iter; ret = rgw_bucket_parse_bucket_key(cct, s, bucket_out, nullptr); if (ret < 0) { continue; } if (bucket_id == bucket_out->bucket_id) { driver->meta_list_keys_complete(handle); return true; } } } while (truncated); driver->meta_list_keys_complete(handle); return false; } int RGWBucket::chown(RGWBucketAdminOpState& op_state, const string& marker, optional_yield y, const DoutPrefixProvider *dpp, std::string *err_msg) { /* User passed in by rgw_admin is the new user; get the current user and set it in * the bucket */ std::unique_ptr<rgw::sal::User> old_user = driver->get_user(bucket->get_info().owner); bucket->set_owner(old_user.get()); return rgw_chown_bucket_and_objects(driver, bucket.get(), user.get(), marker, err_msg, dpp, y); } int RGWBucket::set_quota(RGWBucketAdminOpState& op_state, const DoutPrefixProvider *dpp, optional_yield y, std::string *err_msg) { bucket = op_state.get_bucket()->clone(); bucket->get_info().quota = op_state.quota; int r = bucket->put_info(dpp, false, real_time(), y); if (r < 0) { set_err_msg(err_msg, "ERROR: failed writing bucket instance info: " + cpp_strerror(-r)); return r; } return r; } int RGWBucket::remove_object(const DoutPrefixProvider *dpp, RGWBucketAdminOpState& op_state, optional_yield y, std::string *err_msg) { std::string object_name = op_state.get_object_name(); rgw_obj_key key(object_name); bucket = op_state.get_bucket()->clone(); int ret = rgw_remove_object(dpp, driver, bucket.get(), key, y); if (ret < 0) { set_err_msg(err_msg, "unable to remove object" + cpp_strerror(-ret)); return ret; } return 0; } static void dump_bucket_index(const vector<rgw_bucket_dir_entry>& objs, Formatter *f) { for (auto iter = objs.begin(); iter != objs.end(); ++iter) { f->dump_string("object", iter->key.name); } } static void dump_bucket_usage(map<RGWObjCategory, RGWStorageStats>& stats, Formatter *formatter) { map<RGWObjCategory, RGWStorageStats>::iterator iter; formatter->open_object_section("usage"); for (iter = stats.begin(); iter != stats.end(); ++iter) { RGWStorageStats& s = iter->second; formatter->open_object_section(to_string(iter->first)); s.dump(formatter); formatter->close_section(); } formatter->close_section(); } static void dump_index_check(map<RGWObjCategory, RGWStorageStats> existing_stats, map<RGWObjCategory, RGWStorageStats> calculated_stats, Formatter *formatter) { formatter->open_object_section("check_result"); formatter->open_object_section("existing_header"); dump_bucket_usage(existing_stats, formatter); formatter->close_section(); formatter->open_object_section("calculated_header"); dump_bucket_usage(calculated_stats, formatter); formatter->close_section(); formatter->close_section(); } int RGWBucket::check_bad_index_multipart(RGWBucketAdminOpState& op_state, RGWFormatterFlusher& flusher, const DoutPrefixProvider *dpp, optional_yield y, std::string *err_msg) { const bool fix_index = op_state.will_fix_index(); bucket = op_state.get_bucket()->clone(); rgw::sal::Bucket::ListParams params; params.list_versions = true; params.ns = RGW_OBJ_NS_MULTIPART; std::map<std::string, bool> meta_objs; std::map<rgw_obj_index_key, std::string> all_objs; bool is_truncated; do { rgw::sal::Bucket::ListResults results; int r = bucket->list(dpp, params, listing_max_entries, results, y); if (r < 0) { set_err_msg(err_msg, "failed to list objects in bucket=" + bucket->get_name() + " err=" + cpp_strerror(-r)); return r; } is_truncated = results.is_truncated; for (const auto& o : results.objs) { rgw_obj_index_key key = o.key; rgw_obj obj(bucket->get_key(), key); std::string oid = obj.get_oid(); int pos = oid.find_last_of('.'); if (pos < 0) { /* obj has no suffix */ all_objs[key] = oid; } else { /* obj has suffix */ std::string name = oid.substr(0, pos); std::string suffix = oid.substr(pos + 1); if (suffix.compare("meta") == 0) { meta_objs[name] = true; } else { all_objs[key] = name; } } } } while (is_truncated); std::list<rgw_obj_index_key> objs_to_unlink; Formatter *f = flusher.get_formatter(); f->open_array_section("invalid_multipart_entries"); for (const auto& o : all_objs) { const std::string& name = o.second; if (meta_objs.find(name) == meta_objs.end()) { objs_to_unlink.push_back(o.first); } if (objs_to_unlink.size() > listing_max_entries) { if (fix_index) { // note: under rados this removes directly from rados index objects int r = bucket->remove_objs_from_index(dpp, objs_to_unlink); if (r < 0) { set_err_msg(err_msg, "ERROR: remove_obj_from_index() returned error: " + cpp_strerror(-r)); return r; } } dump_mulipart_index_results(objs_to_unlink, f); flusher.flush(); objs_to_unlink.clear(); } } if (fix_index) { // note: under rados this removes directly from rados index objects int r = bucket->remove_objs_from_index(dpp, objs_to_unlink); if (r < 0) { set_err_msg(err_msg, "ERROR: remove_obj_from_index() returned error: " + cpp_strerror(-r)); return r; } } dump_mulipart_index_results(objs_to_unlink, f); f->close_section(); flusher.flush(); return 0; } int RGWBucket::check_object_index(const DoutPrefixProvider *dpp, RGWBucketAdminOpState& op_state, RGWFormatterFlusher& flusher, optional_yield y, std::string *err_msg) { bool fix_index = op_state.will_fix_index(); if (!fix_index) { set_err_msg(err_msg, "check-objects flag requires fix index enabled"); return -EINVAL; } // use a quicker/shorter tag timeout during this process bucket->set_tag_timeout(dpp, BUCKET_TAG_QUICK_TIMEOUT); rgw::sal::Bucket::ListResults results; results.is_truncated = true; Formatter *formatter = flusher.get_formatter(); formatter->open_object_section("objects"); while (results.is_truncated) { rgw::sal::Bucket::ListParams params; params.marker = results.next_marker; params.force_check_filter = rgw_bucket_object_check_filter; int r = bucket->list(dpp, params, listing_max_entries, results, y); if (r == -ENOENT) { break; } else if (r < 0) { set_err_msg(err_msg, "ERROR: failed operation r=" + cpp_strerror(-r)); } dump_bucket_index(results.objs, formatter); flusher.flush(); } formatter->close_section(); // restore normal tag timeout for bucket bucket->set_tag_timeout(dpp, 0); return 0; } int RGWBucket::check_index(const DoutPrefixProvider *dpp, RGWBucketAdminOpState& op_state, map<RGWObjCategory, RGWStorageStats>& existing_stats, map<RGWObjCategory, RGWStorageStats>& calculated_stats, std::string *err_msg) { bool fix_index = op_state.will_fix_index(); int r = bucket->check_index(dpp, existing_stats, calculated_stats); if (r < 0) { set_err_msg(err_msg, "failed to check index error=" + cpp_strerror(-r)); return r; } if (fix_index) { r = bucket->rebuild_index(dpp); if (r < 0) { set_err_msg(err_msg, "failed to rebuild index err=" + cpp_strerror(-r)); return r; } } return 0; } int RGWBucket::sync(RGWBucketAdminOpState& op_state, const DoutPrefixProvider *dpp, optional_yield y, std::string *err_msg) { if (!driver->is_meta_master()) { set_err_msg(err_msg, "ERROR: failed to update bucket sync: only allowed on meta master zone"); return -EINVAL; } bool sync = op_state.will_sync_bucket(); if (sync) { bucket->get_info().flags &= ~BUCKET_DATASYNC_DISABLED; } else { bucket->get_info().flags |= BUCKET_DATASYNC_DISABLED; } // when writing this metadata, RGWSI_BucketIndex_RADOS::handle_overwrite() // will write the corresponding datalog and bilog entries int r = bucket->put_info(dpp, false, real_time(), y); if (r < 0) { set_err_msg(err_msg, "ERROR: failed writing bucket instance info:" + cpp_strerror(-r)); return r; } return 0; } int RGWBucket::policy_bl_to_stream(bufferlist& bl, ostream& o) { RGWAccessControlPolicy_S3 policy(g_ceph_context); int ret = decode_bl(bl, policy); if (ret < 0) { ldout(driver->ctx(),0) << "failed to decode RGWAccessControlPolicy" << dendl; } policy.to_xml(o); return 0; } int rgw_object_get_attr(const DoutPrefixProvider *dpp, rgw::sal::Driver* driver, rgw::sal::Object* obj, const char* attr_name, bufferlist& out_bl, optional_yield y) { std::unique_ptr<rgw::sal::Object::ReadOp> rop = obj->get_read_op(); return rop->get_attr(dpp, attr_name, out_bl, y); } int RGWBucket::get_policy(RGWBucketAdminOpState& op_state, RGWAccessControlPolicy& policy, optional_yield y, const DoutPrefixProvider *dpp) { int ret; std::string object_name = op_state.get_object_name(); bucket = op_state.get_bucket()->clone(); if (!object_name.empty()) { bufferlist bl; std::unique_ptr<rgw::sal::Object> obj = bucket->get_object(rgw_obj_key(object_name)); ret = rgw_object_get_attr(dpp, driver, obj.get(), RGW_ATTR_ACL, bl, y); if (ret < 0){ return ret; } ret = decode_bl(bl, policy); if (ret < 0) { ldout(driver->ctx(),0) << "failed to decode RGWAccessControlPolicy" << dendl; } return ret; } map<string, bufferlist>::iterator aiter = bucket->get_attrs().find(RGW_ATTR_ACL); if (aiter == bucket->get_attrs().end()) { return -ENOENT; } ret = decode_bl(aiter->second, policy); if (ret < 0) { ldout(driver->ctx(),0) << "failed to decode RGWAccessControlPolicy" << dendl; } return ret; } int RGWBucketAdminOp::get_policy(rgw::sal::Driver* driver, RGWBucketAdminOpState& op_state, RGWAccessControlPolicy& policy, const DoutPrefixProvider *dpp, optional_yield y) { RGWBucket bucket; int ret = bucket.init(driver, op_state, y, dpp); if (ret < 0) return ret; ret = bucket.get_policy(op_state, policy, y, dpp); if (ret < 0) return ret; return 0; } /* Wrappers to facilitate RESTful interface */ int RGWBucketAdminOp::get_policy(rgw::sal::Driver* driver, RGWBucketAdminOpState& op_state, RGWFormatterFlusher& flusher, const DoutPrefixProvider *dpp, optional_yield y) { RGWAccessControlPolicy policy(driver->ctx()); int ret = get_policy(driver, op_state, policy, dpp, y); if (ret < 0) return ret; Formatter *formatter = flusher.get_formatter(); flusher.start(0); formatter->open_object_section("policy"); policy.dump(formatter); formatter->close_section(); flusher.flush(); return 0; } int RGWBucketAdminOp::dump_s3_policy(rgw::sal::Driver* driver, RGWBucketAdminOpState& op_state, ostream& os, const DoutPrefixProvider *dpp, optional_yield y) { RGWAccessControlPolicy_S3 policy(driver->ctx()); int ret = get_policy(driver, op_state, policy, dpp, y); if (ret < 0) return ret; policy.to_xml(os); return 0; } int RGWBucketAdminOp::unlink(rgw::sal::Driver* driver, RGWBucketAdminOpState& op_state, const DoutPrefixProvider *dpp, optional_yield y) { RGWBucket bucket; int ret = bucket.init(driver, op_state, y, dpp); if (ret < 0) return ret; return static_cast<rgw::sal::RadosStore*>(driver)->ctl()->bucket->unlink_bucket(op_state.get_user_id(), op_state.get_bucket()->get_info().bucket, y, dpp, true); } int RGWBucketAdminOp::link(rgw::sal::Driver* driver, RGWBucketAdminOpState& op_state, const DoutPrefixProvider *dpp, optional_yield y, string *err) { if (!op_state.is_user_op()) { set_err_msg(err, "empty user id"); return -EINVAL; } RGWBucket bucket; int ret = bucket.init(driver, op_state, y, dpp, err); if (ret < 0) return ret; string bucket_id = op_state.get_bucket_id(); std::string display_name = op_state.get_user_display_name(); std::unique_ptr<rgw::sal::Bucket> loc_bucket; std::unique_ptr<rgw::sal::Bucket> old_bucket; loc_bucket = op_state.get_bucket()->clone(); if (!bucket_id.empty() && bucket_id != loc_bucket->get_bucket_id()) { set_err_msg(err, "specified bucket id does not match " + loc_bucket->get_bucket_id()); return -EINVAL; } old_bucket = loc_bucket->clone(); loc_bucket->get_key().tenant = op_state.get_user_id().tenant; if (!op_state.new_bucket_name.empty()) { auto pos = op_state.new_bucket_name.find('/'); if (pos != string::npos) { loc_bucket->get_key().tenant = op_state.new_bucket_name.substr(0, pos); loc_bucket->get_key().name = op_state.new_bucket_name.substr(pos + 1); } else { loc_bucket->get_key().name = op_state.new_bucket_name; } } RGWObjVersionTracker objv_tracker; RGWObjVersionTracker old_version = loc_bucket->get_info().objv_tracker; map<string, bufferlist>::iterator aiter = loc_bucket->get_attrs().find(RGW_ATTR_ACL); if (aiter == loc_bucket->get_attrs().end()) { // should never happen; only pre-argonaut buckets lacked this. ldpp_dout(dpp, 0) << "WARNING: can't bucket link because no acl on bucket=" << old_bucket << dendl; set_err_msg(err, "While crossing the Anavros you have displeased the goddess Hera." " You must sacrifice your ancient bucket " + loc_bucket->get_bucket_id()); return -EINVAL; } bufferlist& aclbl = aiter->second; RGWAccessControlPolicy policy; ACLOwner owner; try { auto iter = aclbl.cbegin(); decode(policy, iter); owner = policy.get_owner(); } catch (buffer::error& e) { set_err_msg(err, "couldn't decode policy"); return -EIO; } int r = static_cast<rgw::sal::RadosStore*>(driver)->ctl()->bucket->unlink_bucket(owner.get_id(), old_bucket->get_info().bucket, y, dpp, false); if (r < 0) { set_err_msg(err, "could not unlink policy from user " + owner.get_id().to_str()); return r; } // now update the user for the bucket... if (display_name.empty()) { ldpp_dout(dpp, 0) << "WARNING: user " << op_state.get_user_id() << " has no display name set" << dendl; } RGWAccessControlPolicy policy_instance; policy_instance.create_default(op_state.get_user_id(), display_name); owner = policy_instance.get_owner(); aclbl.clear(); policy_instance.encode(aclbl); bool exclusive = false; loc_bucket->get_info().owner = op_state.get_user_id(); if (*loc_bucket != *old_bucket) { loc_bucket->get_info().bucket = loc_bucket->get_key(); loc_bucket->get_info().objv_tracker.version_for_read()->ver = 0; exclusive = true; } r = loc_bucket->put_info(dpp, exclusive, ceph::real_time(), y); if (r < 0) { set_err_msg(err, "ERROR: failed writing bucket instance info: " + cpp_strerror(-r)); return r; } /* link to user */ RGWBucketEntryPoint ep; ep.bucket = loc_bucket->get_info().bucket; ep.owner = op_state.get_user_id(); ep.creation_time = loc_bucket->get_info().creation_time; ep.linked = true; rgw::sal::Attrs ep_attrs; rgw_ep_info ep_data{ep, ep_attrs}; r = static_cast<rgw::sal::RadosStore*>(driver)->ctl()->bucket->link_bucket(op_state.get_user_id(), loc_bucket->get_info().bucket, loc_bucket->get_info().creation_time, y, dpp, true, &ep_data); if (r < 0) { set_err_msg(err, "failed to relink bucket"); return r; } if (*loc_bucket != *old_bucket) { // like RGWRados::delete_bucket -- excepting no bucket_index work. r = static_cast<rgw::sal::RadosStore*>(driver)->ctl()->bucket->remove_bucket_entrypoint_info( old_bucket->get_key(), y, dpp, RGWBucketCtl::Bucket::RemoveParams() .set_objv_tracker(&ep_data.ep_objv)); if (r < 0) { set_err_msg(err, "failed to unlink old bucket " + old_bucket->get_tenant() + "/" + old_bucket->get_name()); return r; } r = static_cast<rgw::sal::RadosStore*>(driver)->ctl()->bucket->remove_bucket_instance_info( old_bucket->get_key(), old_bucket->get_info(), y, dpp, RGWBucketCtl::BucketInstance::RemoveParams() .set_objv_tracker(&ep_data.ep_objv)); if (r < 0) { set_err_msg(err, "failed to unlink old bucket " + old_bucket->get_tenant() + "/" + old_bucket->get_name()); return r; } } return 0; } int RGWBucketAdminOp::chown(rgw::sal::Driver* driver, RGWBucketAdminOpState& op_state, const string& marker, const DoutPrefixProvider *dpp, optional_yield y, string *err) { RGWBucket bucket; int ret = bucket.init(driver, op_state, y, dpp, err); if (ret < 0) return ret; return bucket.chown(op_state, marker, y, dpp, err); } int RGWBucketAdminOp::check_index(rgw::sal::Driver* driver, RGWBucketAdminOpState& op_state, RGWFormatterFlusher& flusher, optional_yield y, const DoutPrefixProvider *dpp) { int ret; map<RGWObjCategory, RGWStorageStats> existing_stats; map<RGWObjCategory, RGWStorageStats> calculated_stats; RGWBucket bucket; ret = bucket.init(driver, op_state, y, dpp); if (ret < 0) return ret; Formatter *formatter = flusher.get_formatter(); flusher.start(0); ret = bucket.check_bad_index_multipart(op_state, flusher, dpp, y); if (ret < 0) return ret; ret = bucket.check_object_index(dpp, op_state, flusher, y); if (ret < 0) return ret; ret = bucket.check_index(dpp, op_state, existing_stats, calculated_stats); if (ret < 0) return ret; dump_index_check(existing_stats, calculated_stats, formatter); flusher.flush(); return 0; } int RGWBucketAdminOp::remove_bucket(rgw::sal::Driver* driver, RGWBucketAdminOpState& op_state, optional_yield y, const DoutPrefixProvider *dpp, bool bypass_gc, bool keep_index_consistent) { std::unique_ptr<rgw::sal::Bucket> bucket; std::unique_ptr<rgw::sal::User> user = driver->get_user(op_state.get_user_id()); int ret = driver->get_bucket(dpp, user.get(), user->get_tenant(), op_state.get_bucket_name(), &bucket, y); if (ret < 0) return ret; if (bypass_gc) ret = bucket->remove_bucket_bypass_gc(op_state.get_max_aio(), keep_index_consistent, y, dpp); else ret = bucket->remove_bucket(dpp, op_state.will_delete_children(), false, nullptr, y); return ret; } int RGWBucketAdminOp::remove_object(rgw::sal::Driver* driver, RGWBucketAdminOpState& op_state, const DoutPrefixProvider *dpp, optional_yield y) { RGWBucket bucket; int ret = bucket.init(driver, op_state, y, dpp); if (ret < 0) return ret; return bucket.remove_object(dpp, op_state, y); } int RGWBucketAdminOp::sync_bucket(rgw::sal::Driver* driver, RGWBucketAdminOpState& op_state, const DoutPrefixProvider *dpp, optional_yield y, string *err_msg) { RGWBucket bucket; int ret = bucket.init(driver, op_state, y, dpp, err_msg); if (ret < 0) { return ret; } return bucket.sync(op_state, dpp, y, err_msg); } static int bucket_stats(rgw::sal::Driver* driver, const std::string& tenant_name, const std::string& bucket_name, Formatter *formatter, const DoutPrefixProvider *dpp, optional_yield y) { std::unique_ptr<rgw::sal::Bucket> bucket; map<RGWObjCategory, RGWStorageStats> stats; int ret = driver->get_bucket(dpp, nullptr, tenant_name, bucket_name, &bucket, y); if (ret < 0) { return ret; } const auto& index = bucket->get_info().get_current_index(); if (is_layout_indexless(index)) { cerr << "error, indexless buckets do not maintain stats; bucket=" << bucket->get_name() << std::endl; return -EINVAL; } std::string bucket_ver, master_ver; std::string max_marker; ret = bucket->read_stats(dpp, index, RGW_NO_SHARD, &bucket_ver, &master_ver, stats, &max_marker); if (ret < 0) { cerr << "error getting bucket stats bucket=" << bucket->get_name() << " ret=" << ret << std::endl; return ret; } utime_t ut(bucket->get_modification_time()); utime_t ctime_ut(bucket->get_creation_time()); formatter->open_object_section("stats"); formatter->dump_string("bucket", bucket->get_name()); formatter->dump_int("num_shards", bucket->get_info().layout.current_index.layout.normal.num_shards); formatter->dump_string("tenant", bucket->get_tenant()); formatter->dump_string("zonegroup", bucket->get_info().zonegroup); formatter->dump_string("placement_rule", bucket->get_info().placement_rule.to_str()); ::encode_json("explicit_placement", bucket->get_key().explicit_placement, formatter); formatter->dump_string("id", bucket->get_bucket_id()); formatter->dump_string("marker", bucket->get_marker()); formatter->dump_stream("index_type") << bucket->get_info().layout.current_index.layout.type; ::encode_json("owner", bucket->get_info().owner, formatter); formatter->dump_string("ver", bucket_ver); formatter->dump_string("master_ver", master_ver); ut.gmtime(formatter->dump_stream("mtime")); ctime_ut.gmtime(formatter->dump_stream("creation_time")); formatter->dump_string("max_marker", max_marker); dump_bucket_usage(stats, formatter); encode_json("bucket_quota", bucket->get_info().quota, formatter); // bucket tags auto iter = bucket->get_attrs().find(RGW_ATTR_TAGS); if (iter != bucket->get_attrs().end()) { RGWObjTagSet_S3 tagset; bufferlist::const_iterator piter{&iter->second}; try { tagset.decode(piter); tagset.dump(formatter); } catch (buffer::error& err) { cerr << "ERROR: caught buffer:error, couldn't decode TagSet" << std::endl; } } // TODO: bucket CORS // TODO: bucket LC formatter->close_section(); return 0; } int RGWBucketAdminOp::limit_check(rgw::sal::Driver* driver, RGWBucketAdminOpState& op_state, const std::list<std::string>& user_ids, RGWFormatterFlusher& flusher, optional_yield y, const DoutPrefixProvider *dpp, bool warnings_only) { int ret = 0; const size_t max_entries = driver->ctx()->_conf->rgw_list_buckets_max_chunk; const size_t safe_max_objs_per_shard = driver->ctx()->_conf->rgw_safe_max_objects_per_shard; uint16_t shard_warn_pct = driver->ctx()->_conf->rgw_shard_warning_threshold; if (shard_warn_pct > 100) shard_warn_pct = 90; Formatter *formatter = flusher.get_formatter(); flusher.start(0); formatter->open_array_section("users"); for (const auto& user_id : user_ids) { formatter->open_object_section("user"); formatter->dump_string("user_id", user_id); formatter->open_array_section("buckets"); string marker; rgw::sal::BucketList buckets; do { std::unique_ptr<rgw::sal::User> user = driver->get_user(rgw_user(user_id)); ret = user->list_buckets(dpp, marker, string(), max_entries, false, buckets, y); if (ret < 0) return ret; map<string, std::unique_ptr<rgw::sal::Bucket>>& m_buckets = buckets.get_buckets(); for (const auto& iter : m_buckets) { auto& bucket = iter.second; uint64_t num_objects = 0; marker = bucket->get_name(); /* Casey's location for marker update, * as we may now not reach the end of * the loop body */ ret = bucket->load_bucket(dpp, y); if (ret < 0) continue; const auto& index = bucket->get_info().get_current_index(); if (is_layout_indexless(index)) { continue; // indexless buckets don't have stats } /* need stats for num_entries */ string bucket_ver, master_ver; std::map<RGWObjCategory, RGWStorageStats> stats; ret = bucket->read_stats(dpp, index, RGW_NO_SHARD, &bucket_ver, &master_ver, stats, nullptr); if (ret < 0) continue; for (const auto& s : stats) { num_objects += s.second.num_objects; } const uint32_t num_shards = rgw::num_shards(index.layout.normal); uint64_t objs_per_shard = (num_shards) ? num_objects/num_shards : num_objects; { bool warn; stringstream ss; uint64_t fill_pct = objs_per_shard * 100 / safe_max_objs_per_shard; if (fill_pct > 100) { ss << "OVER " << fill_pct << "%"; warn = true; } else if (fill_pct >= shard_warn_pct) { ss << "WARN " << fill_pct << "%"; warn = true; } else { ss << "OK"; warn = false; } if (warn || !warnings_only) { formatter->open_object_section("bucket"); formatter->dump_string("bucket", bucket->get_name()); formatter->dump_string("tenant", bucket->get_tenant()); formatter->dump_int("num_objects", num_objects); formatter->dump_int("num_shards", num_shards); formatter->dump_int("objects_per_shard", objs_per_shard); formatter->dump_string("fill_status", ss.str()); formatter->close_section(); } } } formatter->flush(cout); } while (buckets.is_truncated()); /* foreach: bucket */ formatter->close_section(); formatter->close_section(); formatter->flush(cout); } /* foreach: user_id */ formatter->close_section(); formatter->flush(cout); return ret; } /* RGWBucketAdminOp::limit_check */ int RGWBucketAdminOp::info(rgw::sal::Driver* driver, RGWBucketAdminOpState& op_state, RGWFormatterFlusher& flusher, optional_yield y, const DoutPrefixProvider *dpp) { RGWBucket bucket; int ret = 0; const std::string& bucket_name = op_state.get_bucket_name(); if (!bucket_name.empty()) { ret = bucket.init(driver, op_state, y, dpp); if (-ENOENT == ret) return -ERR_NO_SUCH_BUCKET; else if (ret < 0) return ret; } Formatter *formatter = flusher.get_formatter(); flusher.start(0); CephContext *cct = driver->ctx(); const size_t max_entries = cct->_conf->rgw_list_buckets_max_chunk; const bool show_stats = op_state.will_fetch_stats(); const rgw_user& user_id = op_state.get_user_id(); if (op_state.is_user_op()) { formatter->open_array_section("buckets"); rgw::sal::BucketList buckets; std::unique_ptr<rgw::sal::User> user = driver->get_user(op_state.get_user_id()); std::string marker; const std::string empty_end_marker; constexpr bool no_need_stats = false; // set need_stats to false do { ret = user->list_buckets(dpp, marker, empty_end_marker, max_entries, no_need_stats, buckets, y); if (ret < 0) { return ret; } const std::string* marker_cursor = nullptr; map<string, std::unique_ptr<rgw::sal::Bucket>>& m = buckets.get_buckets(); for (const auto& i : m) { const std::string& obj_name = i.first; if (!bucket_name.empty() && bucket_name != obj_name) { continue; } if (show_stats) { bucket_stats(driver, user_id.tenant, obj_name, formatter, dpp, y); } else { formatter->dump_string("bucket", obj_name); } marker_cursor = &obj_name; } // for loop if (marker_cursor) { marker = *marker_cursor; } flusher.flush(); } while (buckets.is_truncated()); formatter->close_section(); } else if (!bucket_name.empty()) { ret = bucket_stats(driver, user_id.tenant, bucket_name, formatter, dpp, y); if (ret < 0) { return ret; } } else { void *handle = nullptr; bool truncated = true; formatter->open_array_section("buckets"); ret = driver->meta_list_keys_init(dpp, "bucket", string(), &handle); while (ret == 0 && truncated) { std::list<std::string> buckets; constexpr int max_keys = 1000; ret = driver->meta_list_keys_next(dpp, handle, max_keys, buckets, &truncated); for (auto& bucket_name : buckets) { if (show_stats) { bucket_stats(driver, user_id.tenant, bucket_name, formatter, dpp, y); } else { formatter->dump_string("bucket", bucket_name); } } } driver->meta_list_keys_complete(handle); formatter->close_section(); } flusher.flush(); return 0; } int RGWBucketAdminOp::set_quota(rgw::sal::Driver* driver, RGWBucketAdminOpState& op_state, const DoutPrefixProvider *dpp, optional_yield y) { RGWBucket bucket; int ret = bucket.init(driver, op_state, y, dpp); if (ret < 0) return ret; return bucket.set_quota(op_state, dpp, y); } inline auto split_tenant(const std::string& bucket_name){ auto p = bucket_name.find('/'); if(p != std::string::npos) { return std::make_pair(bucket_name.substr(0,p), bucket_name.substr(p+1)); } return std::make_pair(std::string(), bucket_name); } using bucket_instance_ls = std::vector<RGWBucketInfo>; void get_stale_instances(rgw::sal::Driver* driver, const std::string& bucket_name, const vector<std::string>& lst, bucket_instance_ls& stale_instances, const DoutPrefixProvider *dpp, optional_yield y) { bucket_instance_ls other_instances; // first iterate over the entries, and pick up the done buckets; these // are guaranteed to be stale for (const auto& bucket_instance : lst){ RGWBucketInfo binfo; std::unique_ptr<rgw::sal::Bucket> bucket; rgw_bucket rbucket; rgw_bucket_parse_bucket_key(driver->ctx(), bucket_instance, &rbucket, nullptr); int r = driver->get_bucket(dpp, nullptr, rbucket, &bucket, y); if (r < 0){ // this can only happen if someone deletes us right when we're processing ldpp_dout(dpp, -1) << "Bucket instance is invalid: " << bucket_instance << cpp_strerror(-r) << dendl; continue; } binfo = bucket->get_info(); if (binfo.reshard_status == cls_rgw_reshard_status::DONE) stale_instances.emplace_back(std::move(binfo)); else { other_instances.emplace_back(std::move(binfo)); } } // Read the cur bucket info, if the bucket doesn't exist we can simply return // all the instances auto [tenant, bname] = split_tenant(bucket_name); RGWBucketInfo cur_bucket_info; std::unique_ptr<rgw::sal::Bucket> cur_bucket; int r = driver->get_bucket(dpp, nullptr, tenant, bname, &cur_bucket, y); if (r < 0) { if (r == -ENOENT) { // bucket doesn't exist, everything is stale then stale_instances.insert(std::end(stale_instances), std::make_move_iterator(other_instances.begin()), std::make_move_iterator(other_instances.end())); } else { // all bets are off if we can't read the bucket, just return the sureshot stale instances ldpp_dout(dpp, -1) << "error: reading bucket info for bucket: " << bname << cpp_strerror(-r) << dendl; } return; } // Don't process further in this round if bucket is resharding cur_bucket_info = cur_bucket->get_info(); if (cur_bucket_info.reshard_status == cls_rgw_reshard_status::IN_PROGRESS) return; other_instances.erase(std::remove_if(other_instances.begin(), other_instances.end(), [&cur_bucket_info](const RGWBucketInfo& b){ return (b.bucket.bucket_id == cur_bucket_info.bucket.bucket_id || b.bucket.bucket_id == cur_bucket_info.new_bucket_instance_id); }), other_instances.end()); // check if there are still instances left if (other_instances.empty()) { return; } // Now we have a bucket with instances where the reshard status is none, this // usually happens when the reshard process couldn't complete, lockdown the // bucket and walk through these instances to make sure no one else interferes // with these { RGWBucketReshardLock reshard_lock(static_cast<rgw::sal::RadosStore*>(driver), cur_bucket->get_info(), true); r = reshard_lock.lock(dpp); if (r < 0) { // most likely bucket is under reshard, return the sureshot stale instances ldpp_dout(dpp, 5) << __func__ << "failed to take reshard lock; reshard underway likey" << dendl; return; } auto sg = make_scope_guard([&reshard_lock](){ reshard_lock.unlock();} ); // this should be fast enough that we may not need to renew locks and check // exit status?, should we read the values of the instances again? stale_instances.insert(std::end(stale_instances), std::make_move_iterator(other_instances.begin()), std::make_move_iterator(other_instances.end())); } return; } static int process_stale_instances(rgw::sal::Driver* driver, RGWBucketAdminOpState& op_state, RGWFormatterFlusher& flusher, const DoutPrefixProvider *dpp, std::function<void(const bucket_instance_ls&, Formatter *, rgw::sal::Driver*)> process_f, optional_yield y) { std::string marker; void *handle; Formatter *formatter = flusher.get_formatter(); static constexpr auto default_max_keys = 1000; int ret = driver->meta_list_keys_init(dpp, "bucket.instance", marker, &handle); if (ret < 0) { cerr << "ERROR: can't get key: " << cpp_strerror(-ret) << std::endl; return ret; } bool truncated; formatter->open_array_section("keys"); auto g = make_scope_guard([&driver, &handle, &formatter]() { driver->meta_list_keys_complete(handle); formatter->close_section(); // keys formatter->flush(cout); }); do { list<std::string> keys; ret = driver->meta_list_keys_next(dpp, handle, default_max_keys, keys, &truncated); if (ret < 0 && ret != -ENOENT) { cerr << "ERROR: lists_keys_next(): " << cpp_strerror(-ret) << std::endl; return ret; } if (ret != -ENOENT) { // partition the list of buckets by buckets as the listing is un sorted, // since it would minimize the reads to bucket_info std::unordered_map<std::string, std::vector<std::string>> bucket_instance_map; for (auto &key: keys) { auto pos = key.find(':'); if(pos != std::string::npos) bucket_instance_map[key.substr(0,pos)].emplace_back(std::move(key)); } for (const auto& kv: bucket_instance_map) { bucket_instance_ls stale_lst; get_stale_instances(driver, kv.first, kv.second, stale_lst, dpp, y); process_f(stale_lst, formatter, driver); } } } while (truncated); return 0; } int RGWBucketAdminOp::list_stale_instances(rgw::sal::Driver* driver, RGWBucketAdminOpState& op_state, RGWFormatterFlusher& flusher, const DoutPrefixProvider *dpp, optional_yield y) { auto process_f = [](const bucket_instance_ls& lst, Formatter *formatter, rgw::sal::Driver*){ for (const auto& binfo: lst) formatter->dump_string("key", binfo.bucket.get_key()); }; return process_stale_instances(driver, op_state, flusher, dpp, process_f, y); } int RGWBucketAdminOp::clear_stale_instances(rgw::sal::Driver* driver, RGWBucketAdminOpState& op_state, RGWFormatterFlusher& flusher, const DoutPrefixProvider *dpp, optional_yield y) { auto process_f = [dpp, y](const bucket_instance_ls& lst, Formatter *formatter, rgw::sal::Driver* driver){ for (const auto &binfo: lst) { std::unique_ptr<rgw::sal::Bucket> bucket; driver->get_bucket(nullptr, binfo, &bucket); int ret = bucket->purge_instance(dpp, y); if (ret == 0){ auto md_key = "bucket.instance:" + binfo.bucket.get_key(); ret = driver->meta_remove(dpp, md_key, y); } formatter->open_object_section("delete_status"); formatter->dump_string("bucket_instance", binfo.bucket.get_key()); formatter->dump_int("status", -ret); formatter->close_section(); } }; return process_stale_instances(driver, op_state, flusher, dpp, process_f, y); } static int fix_single_bucket_lc(rgw::sal::Driver* driver, const std::string& tenant_name, const std::string& bucket_name, const DoutPrefixProvider *dpp, optional_yield y) { std::unique_ptr<rgw::sal::Bucket> bucket; int ret = driver->get_bucket(dpp, nullptr, tenant_name, bucket_name, &bucket, y); if (ret < 0) { // TODO: Should we handle the case where the bucket could've been removed between // listing and fetching? return ret; } return rgw::lc::fix_lc_shard_entry(dpp, driver, driver->get_rgwlc()->get_lc(), bucket.get()); } static void format_lc_status(Formatter* formatter, const std::string& tenant_name, const std::string& bucket_name, int status) { formatter->open_object_section("bucket_entry"); std::string entry = tenant_name.empty() ? bucket_name : tenant_name + "/" + bucket_name; formatter->dump_string("bucket", entry); formatter->dump_int("status", status); formatter->close_section(); // bucket_entry } static void process_single_lc_entry(rgw::sal::Driver* driver, Formatter *formatter, const std::string& tenant_name, const std::string& bucket_name, const DoutPrefixProvider *dpp, optional_yield y) { int ret = fix_single_bucket_lc(driver, tenant_name, bucket_name, dpp, y); format_lc_status(formatter, tenant_name, bucket_name, -ret); } int RGWBucketAdminOp::fix_lc_shards(rgw::sal::Driver* driver, RGWBucketAdminOpState& op_state, RGWFormatterFlusher& flusher, const DoutPrefixProvider *dpp, optional_yield y) { std::string marker; void *handle; Formatter *formatter = flusher.get_formatter(); static constexpr auto default_max_keys = 1000; bool truncated; if (const std::string& bucket_name = op_state.get_bucket_name(); ! bucket_name.empty()) { const rgw_user user_id = op_state.get_user_id(); process_single_lc_entry(driver, formatter, user_id.tenant, bucket_name, dpp, y); formatter->flush(cout); } else { int ret = driver->meta_list_keys_init(dpp, "bucket", marker, &handle); if (ret < 0) { std::cerr << "ERROR: can't get key: " << cpp_strerror(-ret) << std::endl; return ret; } { formatter->open_array_section("lc_fix_status"); auto sg = make_scope_guard([&driver, &handle, &formatter](){ driver->meta_list_keys_complete(handle); formatter->close_section(); // lc_fix_status formatter->flush(cout); }); do { list<std::string> keys; ret = driver->meta_list_keys_next(dpp, handle, default_max_keys, keys, &truncated); if (ret < 0 && ret != -ENOENT) { std::cerr << "ERROR: lists_keys_next(): " << cpp_strerror(-ret) << std::endl; return ret; } if (ret != -ENOENT) { for (const auto &key:keys) { auto [tenant_name, bucket_name] = split_tenant(key); process_single_lc_entry(driver, formatter, tenant_name, bucket_name, dpp, y); } } formatter->flush(cout); // regularly flush every 1k entries } while (truncated); } } return 0; } static bool has_object_expired(const DoutPrefixProvider *dpp, rgw::sal::Driver* driver, rgw::sal::Bucket* bucket, const rgw_obj_key& key, utime_t& delete_at, optional_yield y) { std::unique_ptr<rgw::sal::Object> obj = bucket->get_object(key); bufferlist delete_at_bl; int ret = rgw_object_get_attr(dpp, driver, obj.get(), RGW_ATTR_DELETE_AT, delete_at_bl, y); if (ret < 0) { return false; // no delete at attr, proceed } ret = decode_bl(delete_at_bl, delete_at); if (ret < 0) { return false; // failed to parse } if (delete_at <= ceph_clock_now() && !delete_at.is_zero()) { return true; } return false; } static int fix_bucket_obj_expiry(const DoutPrefixProvider *dpp, rgw::sal::Driver* driver, rgw::sal::Bucket* bucket, RGWFormatterFlusher& flusher, bool dry_run, optional_yield y) { if (bucket->get_key().bucket_id == bucket->get_key().marker) { ldpp_dout(dpp, -1) << "Not a resharded bucket skipping" << dendl; return 0; // not a resharded bucket, move along } Formatter *formatter = flusher.get_formatter(); formatter->open_array_section("expired_deletion_status"); auto sg = make_scope_guard([&formatter] { formatter->close_section(); formatter->flush(std::cout); }); rgw::sal::Bucket::ListParams params; rgw::sal::Bucket::ListResults results; params.list_versions = bucket->versioned(); params.allow_unordered = true; do { int ret = bucket->list(dpp, params, listing_max_entries, results, y); if (ret < 0) { ldpp_dout(dpp, -1) << "ERROR failed to list objects in the bucket" << dendl; return ret; } for (const auto& obj : results.objs) { rgw_obj_key key(obj.key); utime_t delete_at; if (has_object_expired(dpp, driver, bucket, key, delete_at, y)) { formatter->open_object_section("object_status"); formatter->dump_string("object", key.name); formatter->dump_stream("delete_at") << delete_at; if (!dry_run) { ret = rgw_remove_object(dpp, driver, bucket, key, y); formatter->dump_int("status", ret); } formatter->close_section(); // object_status } } formatter->flush(cout); // regularly flush every 1k entries } while (results.is_truncated); return 0; } int RGWBucketAdminOp::fix_obj_expiry(rgw::sal::Driver* driver, RGWBucketAdminOpState& op_state, RGWFormatterFlusher& flusher, const DoutPrefixProvider *dpp, optional_yield y, bool dry_run) { RGWBucket admin_bucket; int ret = admin_bucket.init(driver, op_state, y, dpp); if (ret < 0) { ldpp_dout(dpp, -1) << "failed to initialize bucket" << dendl; return ret; } std::unique_ptr<rgw::sal::Bucket> bucket; ret = driver->get_bucket(nullptr, admin_bucket.get_bucket_info(), &bucket); if (ret < 0) { return ret; } return fix_bucket_obj_expiry(dpp, driver, bucket.get(), flusher, dry_run, y); } void RGWBucketCompleteInfo::dump(Formatter *f) const { encode_json("bucket_info", info, f); encode_json("attrs", attrs, f); } void RGWBucketCompleteInfo::decode_json(JSONObj *obj) { JSONDecoder::decode_json("bucket_info", info, obj); JSONDecoder::decode_json("attrs", attrs, obj); } class RGWBucketMetadataHandler : public RGWBucketMetadataHandlerBase { public: struct Svc { RGWSI_Bucket *bucket{nullptr}; } svc; struct Ctl { RGWBucketCtl *bucket{nullptr}; } ctl; RGWBucketMetadataHandler() {} void init(RGWSI_Bucket *bucket_svc, RGWBucketCtl *bucket_ctl) override { base_init(bucket_svc->ctx(), bucket_svc->get_ep_be_handler().get()); svc.bucket = bucket_svc; ctl.bucket = bucket_ctl; } string get_type() override { return "bucket"; } RGWMetadataObject *get_meta_obj(JSONObj *jo, const obj_version& objv, const ceph::real_time& mtime) override { RGWBucketEntryPoint be; try { decode_json_obj(be, jo); } catch (JSONDecoder::err& e) { return nullptr; } return new RGWBucketEntryMetadataObject(be, objv, mtime); } int do_get(RGWSI_MetaBackend_Handler::Op *op, string& entry, RGWMetadataObject **obj, optional_yield y, const DoutPrefixProvider *dpp) override { RGWObjVersionTracker ot; RGWBucketEntryPoint be; real_time mtime; map<string, bufferlist> attrs; RGWSI_Bucket_EP_Ctx ctx(op->ctx()); int ret = svc.bucket->read_bucket_entrypoint_info(ctx, entry, &be, &ot, &mtime, &attrs, y, dpp); if (ret < 0) return ret; RGWBucketEntryMetadataObject *mdo = new RGWBucketEntryMetadataObject(be, ot.read_version, mtime, std::move(attrs)); *obj = mdo; return 0; } int do_put(RGWSI_MetaBackend_Handler::Op *op, string& entry, RGWMetadataObject *obj, RGWObjVersionTracker& objv_tracker, optional_yield y, const DoutPrefixProvider *dpp, RGWMDLogSyncType type, bool from_remote_zone) override; int do_remove(RGWSI_MetaBackend_Handler::Op *op, string& entry, RGWObjVersionTracker& objv_tracker, optional_yield y, const DoutPrefixProvider *dpp) override { RGWBucketEntryPoint be; real_time orig_mtime; RGWSI_Bucket_EP_Ctx ctx(op->ctx()); int ret = svc.bucket->read_bucket_entrypoint_info(ctx, entry, &be, &objv_tracker, &orig_mtime, nullptr, y, dpp); if (ret < 0) return ret; /* * We're unlinking the bucket but we don't want to update the entrypoint here - we're removing * it immediately and don't want to invalidate our cached objv_version or the bucket obj removal * will incorrectly fail. */ ret = ctl.bucket->unlink_bucket(be.owner, be.bucket, y, dpp, false); if (ret < 0) { ldpp_dout(dpp, -1) << "could not unlink bucket=" << entry << " owner=" << be.owner << dendl; } ret = svc.bucket->remove_bucket_entrypoint_info(ctx, entry, &objv_tracker, y, dpp); if (ret < 0) { ldpp_dout(dpp, -1) << "could not delete bucket=" << entry << dendl; } /* idempotent */ return 0; } int call(std::function<int(RGWSI_Bucket_EP_Ctx& ctx)> f) { return call(nullopt, f); } int call(std::optional<RGWSI_MetaBackend_CtxParams> bectx_params, std::function<int(RGWSI_Bucket_EP_Ctx& ctx)> f) { return be_handler->call(bectx_params, [&](RGWSI_MetaBackend_Handler::Op *op) { RGWSI_Bucket_EP_Ctx ctx(op->ctx()); return f(ctx); }); } }; class RGWMetadataHandlerPut_Bucket : public RGWMetadataHandlerPut_SObj { RGWBucketMetadataHandler *bhandler; RGWBucketEntryMetadataObject *obj; public: RGWMetadataHandlerPut_Bucket(RGWBucketMetadataHandler *_handler, RGWSI_MetaBackend_Handler::Op *op, string& entry, RGWMetadataObject *_obj, RGWObjVersionTracker& objv_tracker, optional_yield y, RGWMDLogSyncType type, bool from_remote_zone) : RGWMetadataHandlerPut_SObj(_handler, op, entry, obj, objv_tracker, y, type, from_remote_zone), bhandler(_handler) { obj = static_cast<RGWBucketEntryMetadataObject *>(_obj); } ~RGWMetadataHandlerPut_Bucket() {} void encode_obj(bufferlist *bl) override { obj->get_ep().encode(*bl); } int put_checked(const DoutPrefixProvider *dpp) override; int put_post(const DoutPrefixProvider *dpp) override; }; int RGWBucketMetadataHandler::do_put(RGWSI_MetaBackend_Handler::Op *op, string& entry, RGWMetadataObject *obj, RGWObjVersionTracker& objv_tracker, optional_yield y, const DoutPrefixProvider *dpp, RGWMDLogSyncType type, bool from_remote_zone) { RGWMetadataHandlerPut_Bucket put_op(this, op, entry, obj, objv_tracker, y, type, from_remote_zone); return do_put_operate(&put_op, dpp); } int RGWMetadataHandlerPut_Bucket::put_checked(const DoutPrefixProvider *dpp) { RGWBucketEntryMetadataObject *orig_obj = static_cast<RGWBucketEntryMetadataObject *>(old_obj); if (orig_obj) { obj->set_pattrs(&orig_obj->get_attrs()); } auto& be = obj->get_ep(); auto mtime = obj->get_mtime(); auto pattrs = obj->get_pattrs(); RGWSI_Bucket_EP_Ctx ctx(op->ctx()); return bhandler->svc.bucket->store_bucket_entrypoint_info(ctx, entry, be, false, mtime, pattrs, &objv_tracker, y, dpp); } int RGWMetadataHandlerPut_Bucket::put_post(const DoutPrefixProvider *dpp) { auto& be = obj->get_ep(); int ret; /* link bucket */ if (be.linked) { ret = bhandler->ctl.bucket->link_bucket(be.owner, be.bucket, be.creation_time, y, dpp, false); } else { ret = bhandler->ctl.bucket->unlink_bucket(be.owner, be.bucket, y, dpp, false); } return ret; } static void get_md5_digest(const RGWBucketEntryPoint *be, string& md5_digest) { char md5[CEPH_CRYPTO_MD5_DIGESTSIZE * 2 + 1]; unsigned char m[CEPH_CRYPTO_MD5_DIGESTSIZE]; bufferlist bl; Formatter *f = new JSONFormatter(false); be->dump(f); f->flush(bl); MD5 hash; // Allow use of MD5 digest in FIPS mode for non-cryptographic purposes hash.SetFlags(EVP_MD_CTX_FLAG_NON_FIPS_ALLOW); hash.Update((const unsigned char *)bl.c_str(), bl.length()); hash.Final(m); buf_to_hex(m, CEPH_CRYPTO_MD5_DIGESTSIZE, md5); delete f; md5_digest = md5; } #define ARCHIVE_META_ATTR RGW_ATTR_PREFIX "zone.archive.info" struct archive_meta_info { rgw_bucket orig_bucket; bool from_attrs(CephContext *cct, map<string, bufferlist>& attrs) { auto iter = attrs.find(ARCHIVE_META_ATTR); if (iter == attrs.end()) { return false; } auto bliter = iter->second.cbegin(); try { decode(bliter); } catch (buffer::error& err) { ldout(cct, 0) << "ERROR: failed to decode archive meta info" << dendl; return false; } return true; } void store_in_attrs(map<string, bufferlist>& attrs) const { encode(attrs[ARCHIVE_META_ATTR]); } void encode(bufferlist& bl) const { ENCODE_START(1, 1, bl); encode(orig_bucket, bl); ENCODE_FINISH(bl); } void decode(bufferlist::const_iterator& bl) { DECODE_START(1, bl); decode(orig_bucket, bl); DECODE_FINISH(bl); } }; WRITE_CLASS_ENCODER(archive_meta_info) class RGWArchiveBucketMetadataHandler : public RGWBucketMetadataHandler { public: RGWArchiveBucketMetadataHandler() {} int do_remove(RGWSI_MetaBackend_Handler::Op *op, string& entry, RGWObjVersionTracker& objv_tracker, optional_yield y, const DoutPrefixProvider *dpp) override { auto cct = svc.bucket->ctx(); RGWSI_Bucket_EP_Ctx ctx(op->ctx()); ldpp_dout(dpp, 5) << "SKIP: bucket removal is not allowed on archive zone: bucket:" << entry << " ... proceeding to rename" << dendl; string tenant_name, bucket_name; parse_bucket(entry, &tenant_name, &bucket_name); rgw_bucket entry_bucket; entry_bucket.tenant = tenant_name; entry_bucket.name = bucket_name; real_time mtime; /* read original entrypoint */ RGWBucketEntryPoint be; map<string, bufferlist> attrs; int ret = svc.bucket->read_bucket_entrypoint_info(ctx, entry, &be, &objv_tracker, &mtime, &attrs, y, dpp); if (ret < 0) { return ret; } string bi_meta_name = RGWSI_Bucket::get_bi_meta_key(be.bucket); /* read original bucket instance info */ map<string, bufferlist> attrs_m; ceph::real_time orig_mtime; RGWBucketInfo old_bi; ret = ctl.bucket->read_bucket_instance_info(be.bucket, &old_bi, y, dpp, RGWBucketCtl::BucketInstance::GetParams() .set_mtime(&orig_mtime) .set_attrs(&attrs_m)); if (ret < 0) { return ret; } archive_meta_info ami; if (!ami.from_attrs(svc.bucket->ctx(), attrs_m)) { ami.orig_bucket = old_bi.bucket; ami.store_in_attrs(attrs_m); } /* generate a new bucket instance. We could have avoided this if we could just point a new * bucket entry point to the old bucket instance, however, due to limitation in the way * we index buckets under the user, bucket entrypoint and bucket instance of the same * bucket need to have the same name, so we need to copy the old bucket instance into * to a new entry with the new name */ string new_bucket_name; RGWBucketInfo new_bi = old_bi; RGWBucketEntryPoint new_be = be; string md5_digest; get_md5_digest(&new_be, md5_digest); new_bucket_name = ami.orig_bucket.name + "-deleted-" + md5_digest; new_bi.bucket.name = new_bucket_name; new_bi.objv_tracker.clear(); new_be.bucket.name = new_bucket_name; ret = ctl.bucket->store_bucket_instance_info(new_be.bucket, new_bi, y, dpp, RGWBucketCtl::BucketInstance::PutParams() .set_exclusive(false) .set_mtime(orig_mtime) .set_attrs(&attrs_m) .set_orig_info(&old_bi)); if (ret < 0) { ldpp_dout(dpp, 0) << "ERROR: failed to put new bucket instance info for bucket=" << new_bi.bucket << " ret=" << ret << dendl; return ret; } /* store a new entrypoint */ RGWObjVersionTracker ot; ot.generate_new_write_ver(cct); ret = svc.bucket->store_bucket_entrypoint_info(ctx, RGWSI_Bucket::get_entrypoint_meta_key(new_be.bucket), new_be, true, mtime, &attrs, nullptr, y, dpp); if (ret < 0) { ldpp_dout(dpp, 0) << "ERROR: failed to put new bucket entrypoint for bucket=" << new_be.bucket << " ret=" << ret << dendl; return ret; } /* link new bucket */ ret = ctl.bucket->link_bucket(new_be.owner, new_be.bucket, new_be.creation_time, y, dpp, false); if (ret < 0) { ldpp_dout(dpp, 0) << "ERROR: failed to link new bucket for bucket=" << new_be.bucket << " ret=" << ret << dendl; return ret; } /* clean up old stuff */ ret = ctl.bucket->unlink_bucket(be.owner, entry_bucket, y, dpp, false); if (ret < 0) { ldpp_dout(dpp, -1) << "could not unlink bucket=" << entry << " owner=" << be.owner << dendl; } // if (ret == -ECANCELED) it means that there was a race here, and someone // wrote to the bucket entrypoint just before we removed it. The question is // whether it was a newly created bucket entrypoint ... in which case we // should ignore the error and move forward, or whether it is a higher version // of the same bucket instance ... in which we should retry ret = svc.bucket->remove_bucket_entrypoint_info(ctx, RGWSI_Bucket::get_entrypoint_meta_key(be.bucket), &objv_tracker, y, dpp); if (ret < 0) { ldpp_dout(dpp, 0) << "ERROR: failed to put new bucket entrypoint for bucket=" << new_be.bucket << " ret=" << ret << dendl; return ret; } ret = ctl.bucket->remove_bucket_instance_info(be.bucket, old_bi, y, dpp); if (ret < 0) { ldpp_dout(dpp, -1) << "could not delete bucket=" << entry << dendl; } /* idempotent */ return 0; } int do_put(RGWSI_MetaBackend_Handler::Op *op, string& entry, RGWMetadataObject *obj, RGWObjVersionTracker& objv_tracker, optional_yield y, const DoutPrefixProvider *dpp, RGWMDLogSyncType type, bool from_remote_zone) override { if (entry.find("-deleted-") != string::npos) { RGWObjVersionTracker ot; RGWMetadataObject *robj; int ret = do_get(op, entry, &robj, y, dpp); if (ret != -ENOENT) { if (ret < 0) { return ret; } ot.read_version = robj->get_version(); delete robj; ret = do_remove(op, entry, ot, y, dpp); if (ret < 0) { return ret; } } } return RGWBucketMetadataHandler::do_put(op, entry, obj, objv_tracker, y, dpp, type, from_remote_zone); } }; class RGWBucketInstanceMetadataHandler : public RGWBucketInstanceMetadataHandlerBase { int read_bucket_instance_entry(RGWSI_Bucket_BI_Ctx& ctx, const string& entry, RGWBucketCompleteInfo *bi, ceph::real_time *pmtime, optional_yield y, const DoutPrefixProvider *dpp) { return svc.bucket->read_bucket_instance_info(ctx, entry, &bi->info, pmtime, &bi->attrs, y, dpp); } public: struct Svc { RGWSI_Zone *zone{nullptr}; RGWSI_Bucket *bucket{nullptr}; RGWSI_BucketIndex *bi{nullptr}; } svc; rgw::sal::Driver* driver; RGWBucketInstanceMetadataHandler(rgw::sal::Driver* driver) : driver(driver) {} void init(RGWSI_Zone *zone_svc, RGWSI_Bucket *bucket_svc, RGWSI_BucketIndex *bi_svc) override { base_init(bucket_svc->ctx(), bucket_svc->get_bi_be_handler().get()); svc.zone = zone_svc; svc.bucket = bucket_svc; svc.bi = bi_svc; } string get_type() override { return "bucket.instance"; } RGWMetadataObject *get_meta_obj(JSONObj *jo, const obj_version& objv, const ceph::real_time& mtime) override { RGWBucketCompleteInfo bci; try { decode_json_obj(bci, jo); } catch (JSONDecoder::err& e) { return nullptr; } return new RGWBucketInstanceMetadataObject(bci, objv, mtime); } int do_get(RGWSI_MetaBackend_Handler::Op *op, string& entry, RGWMetadataObject **obj, optional_yield y, const DoutPrefixProvider *dpp) override { RGWBucketCompleteInfo bci; real_time mtime; RGWSI_Bucket_BI_Ctx ctx(op->ctx()); int ret = svc.bucket->read_bucket_instance_info(ctx, entry, &bci.info, &mtime, &bci.attrs, y, dpp); if (ret < 0) return ret; RGWBucketInstanceMetadataObject *mdo = new RGWBucketInstanceMetadataObject(bci, bci.info.objv_tracker.read_version, mtime); *obj = mdo; return 0; } int do_put(RGWSI_MetaBackend_Handler::Op *op, string& entry, RGWMetadataObject *_obj, RGWObjVersionTracker& objv_tracker, optional_yield y, const DoutPrefixProvider *dpp, RGWMDLogSyncType sync_type, bool from_remote_zone) override; int do_remove(RGWSI_MetaBackend_Handler::Op *op, string& entry, RGWObjVersionTracker& objv_tracker, optional_yield y, const DoutPrefixProvider *dpp) override { RGWBucketCompleteInfo bci; RGWSI_Bucket_BI_Ctx ctx(op->ctx()); int ret = read_bucket_instance_entry(ctx, entry, &bci, nullptr, y, dpp); if (ret < 0 && ret != -ENOENT) return ret; return svc.bucket->remove_bucket_instance_info(ctx, entry, bci.info, &bci.info.objv_tracker, y, dpp); } int call(std::function<int(RGWSI_Bucket_BI_Ctx& ctx)> f) { return call(nullopt, f); } int call(std::optional<RGWSI_MetaBackend_CtxParams> bectx_params, std::function<int(RGWSI_Bucket_BI_Ctx& ctx)> f) { return be_handler->call(bectx_params, [&](RGWSI_MetaBackend_Handler::Op *op) { RGWSI_Bucket_BI_Ctx ctx(op->ctx()); return f(ctx); }); } }; class RGWMetadataHandlerPut_BucketInstance : public RGWMetadataHandlerPut_SObj { CephContext *cct; RGWBucketInstanceMetadataHandler *bihandler; RGWBucketInstanceMetadataObject *obj; public: RGWMetadataHandlerPut_BucketInstance(CephContext *_cct, RGWBucketInstanceMetadataHandler *_handler, RGWSI_MetaBackend_Handler::Op *_op, string& entry, RGWMetadataObject *_obj, RGWObjVersionTracker& objv_tracker, optional_yield y, RGWMDLogSyncType type, bool from_remote_zone) : RGWMetadataHandlerPut_SObj(_handler, _op, entry, _obj, objv_tracker, y, type, from_remote_zone), cct(_cct), bihandler(_handler) { obj = static_cast<RGWBucketInstanceMetadataObject *>(_obj); auto& bci = obj->get_bci(); obj->set_pattrs(&bci.attrs); } void encode_obj(bufferlist *bl) override { obj->get_bucket_info().encode(*bl); } int put_check(const DoutPrefixProvider *dpp) override; int put_checked(const DoutPrefixProvider *dpp) override; int put_post(const DoutPrefixProvider *dpp) override; }; int RGWBucketInstanceMetadataHandler::do_put(RGWSI_MetaBackend_Handler::Op *op, string& entry, RGWMetadataObject *obj, RGWObjVersionTracker& objv_tracker, optional_yield y, const DoutPrefixProvider *dpp, RGWMDLogSyncType type, bool from_remote_zone) { RGWMetadataHandlerPut_BucketInstance put_op(svc.bucket->ctx(), this, op, entry, obj, objv_tracker, y, type, from_remote_zone); return do_put_operate(&put_op, dpp); } void init_default_bucket_layout(CephContext *cct, rgw::BucketLayout& layout, const RGWZone& zone, std::optional<uint32_t> shards, std::optional<rgw::BucketIndexType> type) { layout.current_index.gen = 0; layout.current_index.layout.normal.hash_type = rgw::BucketHashType::Mod; layout.current_index.layout.type = type.value_or(rgw::BucketIndexType::Normal); if (shards) { layout.current_index.layout.normal.num_shards = *shards; } else if (cct->_conf->rgw_override_bucket_index_max_shards > 0) { layout.current_index.layout.normal.num_shards = cct->_conf->rgw_override_bucket_index_max_shards; } else { layout.current_index.layout.normal.num_shards = zone.bucket_index_max_shards; } if (layout.current_index.layout.type == rgw::BucketIndexType::Normal) { layout.logs.push_back(log_layout_from_index(0, layout.current_index)); } } int RGWMetadataHandlerPut_BucketInstance::put_check(const DoutPrefixProvider *dpp) { int ret; RGWBucketCompleteInfo& bci = obj->get_bci(); RGWBucketInstanceMetadataObject *orig_obj = static_cast<RGWBucketInstanceMetadataObject *>(old_obj); RGWBucketCompleteInfo *old_bci = (orig_obj ? &orig_obj->get_bci() : nullptr); const bool exists = (!!orig_obj); if (from_remote_zone) { // don't sync bucket layout changes if (!exists) { // replace peer's layout with default-constructed, then apply our defaults bci.info.layout = rgw::BucketLayout{}; init_default_bucket_layout(cct, bci.info.layout, bihandler->svc.zone->get_zone(), std::nullopt, std::nullopt); } else { bci.info.layout = old_bci->info.layout; } } if (!exists || old_bci->info.bucket.bucket_id != bci.info.bucket.bucket_id) { /* a new bucket, we need to select a new bucket placement for it */ string tenant_name; string bucket_name; string bucket_instance; parse_bucket(entry, &tenant_name, &bucket_name, &bucket_instance); RGWZonePlacementInfo rule_info; bci.info.bucket.name = bucket_name; bci.info.bucket.bucket_id = bucket_instance; bci.info.bucket.tenant = tenant_name; // if the sync module never writes data, don't require the zone to specify all placement targets if (bihandler->svc.zone->sync_module_supports_writes()) { ret = bihandler->svc.zone->select_bucket_location_by_rule(dpp, bci.info.placement_rule, &rule_info, y); if (ret < 0) { ldpp_dout(dpp, 0) << "ERROR: select_bucket_placement() returned " << ret << dendl; return ret; } } bci.info.layout.current_index.layout.type = rule_info.index_type; } else { /* always keep bucket versioning enabled on archive zone */ if (bihandler->driver->get_zone()->get_tier_type() == "archive") { bci.info.flags = (bci.info.flags & ~BUCKET_VERSIONS_SUSPENDED) | BUCKET_VERSIONED; } /* existing bucket, keep its placement */ bci.info.bucket.explicit_placement = old_bci->info.bucket.explicit_placement; bci.info.placement_rule = old_bci->info.placement_rule; } /* record the read version (if any), store the new version */ bci.info.objv_tracker.read_version = objv_tracker.read_version; bci.info.objv_tracker.write_version = objv_tracker.write_version; return 0; } int RGWMetadataHandlerPut_BucketInstance::put_checked(const DoutPrefixProvider *dpp) { RGWBucketInstanceMetadataObject *orig_obj = static_cast<RGWBucketInstanceMetadataObject *>(old_obj); RGWBucketInfo *orig_info = (orig_obj ? &orig_obj->get_bucket_info() : nullptr); auto& info = obj->get_bucket_info(); auto mtime = obj->get_mtime(); auto pattrs = obj->get_pattrs(); RGWSI_Bucket_BI_Ctx ctx(op->ctx()); return bihandler->svc.bucket->store_bucket_instance_info(ctx, entry, info, orig_info, false, mtime, pattrs, y, dpp); } int RGWMetadataHandlerPut_BucketInstance::put_post(const DoutPrefixProvider *dpp) { RGWBucketCompleteInfo& bci = obj->get_bci(); objv_tracker = bci.info.objv_tracker; int ret = bihandler->svc.bi->init_index(dpp, bci.info, bci.info.layout.current_index); if (ret < 0) { return ret; } /* update lifecyle policy */ { std::unique_ptr<rgw::sal::Bucket> bucket; ret = bihandler->driver->get_bucket(nullptr, bci.info, &bucket); if (ret < 0) { ldpp_dout(dpp, 0) << __func__ << " failed to get_bucket(...) for " << bci.info.bucket.name << dendl; return ret; } auto lc = bihandler->driver->get_rgwlc(); auto lc_it = bci.attrs.find(RGW_ATTR_LC); if (lc_it != bci.attrs.end()) { ldpp_dout(dpp, 20) << "set lc config for " << bci.info.bucket.name << dendl; ret = lc->set_bucket_config(bucket.get(), bci.attrs, nullptr); if (ret < 0) { ldpp_dout(dpp, 0) << __func__ << " failed to set lc config for " << bci.info.bucket.name << dendl; return ret; } } else { ldpp_dout(dpp, 20) << "remove lc config for " << bci.info.bucket.name << dendl; ret = lc->remove_bucket_config(bucket.get(), bci.attrs, false /* cannot merge attrs */); if (ret < 0) { ldpp_dout(dpp, 0) << __func__ << " failed to remove lc config for " << bci.info.bucket.name << dendl; return ret; } } } /* update lc */ return STATUS_APPLIED; } class RGWArchiveBucketInstanceMetadataHandler : public RGWBucketInstanceMetadataHandler { public: RGWArchiveBucketInstanceMetadataHandler(rgw::sal::Driver* driver) : RGWBucketInstanceMetadataHandler(driver) {} // N.B. replication of lifecycle policy relies on logic in RGWBucketInstanceMetadataHandler::do_put(...), override with caution int do_remove(RGWSI_MetaBackend_Handler::Op *op, string& entry, RGWObjVersionTracker& objv_tracker, optional_yield y, const DoutPrefixProvider *dpp) override { ldpp_dout(dpp, 0) << "SKIP: bucket instance removal is not allowed on archive zone: bucket.instance:" << entry << dendl; return 0; } }; RGWBucketCtl::RGWBucketCtl(RGWSI_Zone *zone_svc, RGWSI_Bucket *bucket_svc, RGWSI_Bucket_Sync *bucket_sync_svc, RGWSI_BucketIndex *bi_svc, RGWSI_User* user_svc) : cct(zone_svc->ctx()) { svc.zone = zone_svc; svc.bucket = bucket_svc; svc.bucket_sync = bucket_sync_svc; svc.bi = bi_svc; svc.user = user_svc; } void RGWBucketCtl::init(RGWUserCtl *user_ctl, RGWBucketMetadataHandler *_bm_handler, RGWBucketInstanceMetadataHandler *_bmi_handler, RGWDataChangesLog *datalog, const DoutPrefixProvider *dpp) { ctl.user = user_ctl; bm_handler = _bm_handler; bmi_handler = _bmi_handler; bucket_be_handler = bm_handler->get_be_handler(); bi_be_handler = bmi_handler->get_be_handler(); datalog->set_bucket_filter( [this](const rgw_bucket& bucket, optional_yield y, const DoutPrefixProvider *dpp) { return bucket_exports_data(bucket, y, dpp); }); } int RGWBucketCtl::call(std::function<int(RGWSI_Bucket_X_Ctx& ctx)> f) { return bm_handler->call([&](RGWSI_Bucket_EP_Ctx& ep_ctx) { return bmi_handler->call([&](RGWSI_Bucket_BI_Ctx& bi_ctx) { RGWSI_Bucket_X_Ctx ctx{ep_ctx, bi_ctx}; return f(ctx); }); }); } int RGWBucketCtl::read_bucket_entrypoint_info(const rgw_bucket& bucket, RGWBucketEntryPoint *info, optional_yield y, const DoutPrefixProvider *dpp, const Bucket::GetParams& params) { return bm_handler->call(params.bectx_params, [&](RGWSI_Bucket_EP_Ctx& ctx) { return svc.bucket->read_bucket_entrypoint_info(ctx, RGWSI_Bucket::get_entrypoint_meta_key(bucket), info, params.objv_tracker, params.mtime, params.attrs, y, dpp, params.cache_info, params.refresh_version); }); } int RGWBucketCtl::store_bucket_entrypoint_info(const rgw_bucket& bucket, RGWBucketEntryPoint& info, optional_yield y, const DoutPrefixProvider *dpp, const Bucket::PutParams& params) { return bm_handler->call([&](RGWSI_Bucket_EP_Ctx& ctx) { return svc.bucket->store_bucket_entrypoint_info(ctx, RGWSI_Bucket::get_entrypoint_meta_key(bucket), info, params.exclusive, params.mtime, params.attrs, params.objv_tracker, y, dpp); }); } int RGWBucketCtl::remove_bucket_entrypoint_info(const rgw_bucket& bucket, optional_yield y, const DoutPrefixProvider *dpp, const Bucket::RemoveParams& params) { return bm_handler->call([&](RGWSI_Bucket_EP_Ctx& ctx) { return svc.bucket->remove_bucket_entrypoint_info(ctx, RGWSI_Bucket::get_entrypoint_meta_key(bucket), params.objv_tracker, y, dpp); }); } int RGWBucketCtl::read_bucket_instance_info(const rgw_bucket& bucket, RGWBucketInfo *info, optional_yield y, const DoutPrefixProvider *dpp, const BucketInstance::GetParams& params) { int ret = bmi_handler->call(params.bectx_params, [&](RGWSI_Bucket_BI_Ctx& ctx) { return svc.bucket->read_bucket_instance_info(ctx, RGWSI_Bucket::get_bi_meta_key(bucket), info, params.mtime, params.attrs, y, dpp, params.cache_info, params.refresh_version); }); if (ret < 0) { return ret; } if (params.objv_tracker) { *params.objv_tracker = info->objv_tracker; } return 0; } int RGWBucketCtl::read_bucket_info(const rgw_bucket& bucket, RGWBucketInfo *info, optional_yield y, const DoutPrefixProvider *dpp, const BucketInstance::GetParams& params, RGWObjVersionTracker *ep_objv_tracker) { const rgw_bucket *b = &bucket; std::optional<RGWBucketEntryPoint> ep; if (b->bucket_id.empty()) { ep.emplace(); int r = read_bucket_entrypoint_info(*b, &(*ep), y, dpp, RGWBucketCtl::Bucket::GetParams() .set_bectx_params(params.bectx_params) .set_objv_tracker(ep_objv_tracker)); if (r < 0) { return r; } b = &ep->bucket; } int ret = bmi_handler->call(params.bectx_params, [&](RGWSI_Bucket_BI_Ctx& ctx) { return svc.bucket->read_bucket_instance_info(ctx, RGWSI_Bucket::get_bi_meta_key(*b), info, params.mtime, params.attrs, y, dpp, params.cache_info, params.refresh_version); }); if (ret < 0) { return ret; } if (params.objv_tracker) { *params.objv_tracker = info->objv_tracker; } return 0; } int RGWBucketCtl::do_store_bucket_instance_info(RGWSI_Bucket_BI_Ctx& ctx, const rgw_bucket& bucket, RGWBucketInfo& info, optional_yield y, const DoutPrefixProvider *dpp, const BucketInstance::PutParams& params) { if (params.objv_tracker) { info.objv_tracker = *params.objv_tracker; } return svc.bucket->store_bucket_instance_info(ctx, RGWSI_Bucket::get_bi_meta_key(bucket), info, params.orig_info, params.exclusive, params.mtime, params.attrs, y, dpp); } int RGWBucketCtl::store_bucket_instance_info(const rgw_bucket& bucket, RGWBucketInfo& info, optional_yield y, const DoutPrefixProvider *dpp, const BucketInstance::PutParams& params) { return bmi_handler->call([&](RGWSI_Bucket_BI_Ctx& ctx) { return do_store_bucket_instance_info(ctx, bucket, info, y, dpp, params); }); } int RGWBucketCtl::remove_bucket_instance_info(const rgw_bucket& bucket, RGWBucketInfo& info, optional_yield y, const DoutPrefixProvider *dpp, const BucketInstance::RemoveParams& params) { if (params.objv_tracker) { info.objv_tracker = *params.objv_tracker; } return bmi_handler->call([&](RGWSI_Bucket_BI_Ctx& ctx) { return svc.bucket->remove_bucket_instance_info(ctx, RGWSI_Bucket::get_bi_meta_key(bucket), info, &info.objv_tracker, y, dpp); }); } int RGWBucketCtl::do_store_linked_bucket_info(RGWSI_Bucket_X_Ctx& ctx, RGWBucketInfo& info, RGWBucketInfo *orig_info, bool exclusive, real_time mtime, obj_version *pep_objv, map<string, bufferlist> *pattrs, bool create_entry_point, optional_yield y, const DoutPrefixProvider *dpp) { bool create_head = !info.has_instance_obj || create_entry_point; int ret = svc.bucket->store_bucket_instance_info(ctx.bi, RGWSI_Bucket::get_bi_meta_key(info.bucket), info, orig_info, exclusive, mtime, pattrs, y, dpp); if (ret < 0) { return ret; } if (!create_head) return 0; /* done! */ RGWBucketEntryPoint entry_point; entry_point.bucket = info.bucket; entry_point.owner = info.owner; entry_point.creation_time = info.creation_time; entry_point.linked = true; RGWObjVersionTracker ot; if (pep_objv && !pep_objv->tag.empty()) { ot.write_version = *pep_objv; } else { ot.generate_new_write_ver(cct); if (pep_objv) { *pep_objv = ot.write_version; } } ret = svc.bucket->store_bucket_entrypoint_info(ctx.ep, RGWSI_Bucket::get_entrypoint_meta_key(info.bucket), entry_point, exclusive, mtime, pattrs, &ot, y, dpp); if (ret < 0) return ret; return 0; } int RGWBucketCtl::convert_old_bucket_info(RGWSI_Bucket_X_Ctx& ctx, const rgw_bucket& bucket, optional_yield y, const DoutPrefixProvider *dpp) { RGWBucketEntryPoint entry_point; real_time ep_mtime; RGWObjVersionTracker ot; map<string, bufferlist> attrs; RGWBucketInfo info; auto cct = svc.bucket->ctx(); ldpp_dout(dpp, 10) << "RGWRados::convert_old_bucket_info(): bucket=" << bucket << dendl; int ret = svc.bucket->read_bucket_entrypoint_info(ctx.ep, RGWSI_Bucket::get_entrypoint_meta_key(bucket), &entry_point, &ot, &ep_mtime, &attrs, y, dpp); if (ret < 0) { ldpp_dout(dpp, 0) << "ERROR: get_bucket_entrypoint_info() returned " << ret << " bucket=" << bucket << dendl; return ret; } if (!entry_point.has_bucket_info) { /* already converted! */ return 0; } info = entry_point.old_bucket_info; ot.generate_new_write_ver(cct); ret = do_store_linked_bucket_info(ctx, info, nullptr, false, ep_mtime, &ot.write_version, &attrs, true, y, dpp); if (ret < 0) { ldpp_dout(dpp, 0) << "ERROR: failed to put_linked_bucket_info(): " << ret << dendl; return ret; } return 0; } int RGWBucketCtl::set_bucket_instance_attrs(RGWBucketInfo& bucket_info, map<string, bufferlist>& attrs, RGWObjVersionTracker *objv_tracker, optional_yield y, const DoutPrefixProvider *dpp) { return call([&](RGWSI_Bucket_X_Ctx& ctx) { rgw_bucket& bucket = bucket_info.bucket; if (!bucket_info.has_instance_obj) { /* an old bucket object, need to convert it */ int ret = convert_old_bucket_info(ctx, bucket, y, dpp); if (ret < 0) { ldpp_dout(dpp, 0) << "ERROR: failed converting old bucket info: " << ret << dendl; return ret; } } return do_store_bucket_instance_info(ctx.bi, bucket, bucket_info, y, dpp, BucketInstance::PutParams().set_attrs(&attrs) .set_objv_tracker(objv_tracker) .set_orig_info(&bucket_info)); }); } int RGWBucketCtl::link_bucket(const rgw_user& user_id, const rgw_bucket& bucket, ceph::real_time creation_time, optional_yield y, const DoutPrefixProvider *dpp, bool update_entrypoint, rgw_ep_info *pinfo) { return bm_handler->call([&](RGWSI_Bucket_EP_Ctx& ctx) { return do_link_bucket(ctx, user_id, bucket, creation_time, update_entrypoint, pinfo, y, dpp); }); } int RGWBucketCtl::do_link_bucket(RGWSI_Bucket_EP_Ctx& ctx, const rgw_user& user_id, const rgw_bucket& bucket, ceph::real_time creation_time, bool update_entrypoint, rgw_ep_info *pinfo, optional_yield y, const DoutPrefixProvider *dpp) { int ret; RGWBucketEntryPoint ep; RGWObjVersionTracker ot; RGWObjVersionTracker& rot = (pinfo) ? pinfo->ep_objv : ot; map<string, bufferlist> attrs, *pattrs = nullptr; string meta_key; if (update_entrypoint) { meta_key = RGWSI_Bucket::get_entrypoint_meta_key(bucket); if (pinfo) { ep = pinfo->ep; pattrs = &pinfo->attrs; } else { ret = svc.bucket->read_bucket_entrypoint_info(ctx, meta_key, &ep, &rot, nullptr, &attrs, y, dpp); if (ret < 0 && ret != -ENOENT) { ldpp_dout(dpp, 0) << "ERROR: read_bucket_entrypoint_info() returned: " << cpp_strerror(-ret) << dendl; } pattrs = &attrs; } } ret = svc.user->add_bucket(dpp, user_id, bucket, creation_time, y); if (ret < 0) { ldpp_dout(dpp, 0) << "ERROR: error adding bucket to user directory:" << " user=" << user_id << " bucket=" << bucket << " err=" << cpp_strerror(-ret) << dendl; goto done_err; } if (!update_entrypoint) return 0; ep.linked = true; ep.owner = user_id; ep.bucket = bucket; ret = svc.bucket->store_bucket_entrypoint_info( ctx, meta_key, ep, false, real_time(), pattrs, &rot, y, dpp); if (ret < 0) goto done_err; return 0; done_err: int r = do_unlink_bucket(ctx, user_id, bucket, true, y, dpp); if (r < 0) { ldpp_dout(dpp, 0) << "ERROR: failed unlinking bucket on error cleanup: " << cpp_strerror(-r) << dendl; } return ret; } int RGWBucketCtl::unlink_bucket(const rgw_user& user_id, const rgw_bucket& bucket, optional_yield y, const DoutPrefixProvider *dpp, bool update_entrypoint) { return bm_handler->call([&](RGWSI_Bucket_EP_Ctx& ctx) { return do_unlink_bucket(ctx, user_id, bucket, update_entrypoint, y, dpp); }); } int RGWBucketCtl::do_unlink_bucket(RGWSI_Bucket_EP_Ctx& ctx, const rgw_user& user_id, const rgw_bucket& bucket, bool update_entrypoint, optional_yield y, const DoutPrefixProvider *dpp) { int ret = svc.user->remove_bucket(dpp, user_id, bucket, y); if (ret < 0) { ldpp_dout(dpp, 0) << "ERROR: error removing bucket from directory: " << cpp_strerror(-ret)<< dendl; } if (!update_entrypoint) return 0; RGWBucketEntryPoint ep; RGWObjVersionTracker ot; map<string, bufferlist> attrs; string meta_key = RGWSI_Bucket::get_entrypoint_meta_key(bucket); ret = svc.bucket->read_bucket_entrypoint_info(ctx, meta_key, &ep, &ot, nullptr, &attrs, y, dpp); if (ret == -ENOENT) return 0; if (ret < 0) return ret; if (!ep.linked) return 0; if (ep.owner != user_id) { ldpp_dout(dpp, 0) << "bucket entry point user mismatch, can't unlink bucket: " << ep.owner << " != " << user_id << dendl; return -EINVAL; } ep.linked = false; return svc.bucket->store_bucket_entrypoint_info(ctx, meta_key, ep, false, real_time(), &attrs, &ot, y, dpp); } int RGWBucketCtl::read_bucket_stats(const rgw_bucket& bucket, RGWBucketEnt *result, optional_yield y, const DoutPrefixProvider *dpp) { return call([&](RGWSI_Bucket_X_Ctx& ctx) { return svc.bucket->read_bucket_stats(ctx, bucket, result, y, dpp); }); } int RGWBucketCtl::read_buckets_stats(map<string, RGWBucketEnt>& m, optional_yield y, const DoutPrefixProvider *dpp) { return call([&](RGWSI_Bucket_X_Ctx& ctx) { return svc.bucket->read_buckets_stats(ctx, m, y, dpp); }); } int RGWBucketCtl::sync_user_stats(const DoutPrefixProvider *dpp, const rgw_user& user_id, const RGWBucketInfo& bucket_info, optional_yield y, RGWBucketEnt* pent) { RGWBucketEnt ent; if (!pent) { pent = &ent; } int r = svc.bi->read_stats(dpp, bucket_info, pent, y); if (r < 0) { ldpp_dout(dpp, 20) << __func__ << "(): failed to read bucket stats (r=" << r << ")" << dendl; return r; } return svc.user->flush_bucket_stats(dpp, user_id, *pent, y); } int RGWBucketCtl::get_sync_policy_handler(std::optional<rgw_zone_id> zone, std::optional<rgw_bucket> bucket, RGWBucketSyncPolicyHandlerRef *phandler, optional_yield y, const DoutPrefixProvider *dpp) { int r = call([&](RGWSI_Bucket_X_Ctx& ctx) { return svc.bucket_sync->get_policy_handler(ctx, zone, bucket, phandler, y, dpp); }); if (r < 0) { ldpp_dout(dpp, 20) << __func__ << "(): failed to get policy handler for bucket=" << bucket << " (r=" << r << ")" << dendl; return r; } return 0; } int RGWBucketCtl::bucket_exports_data(const rgw_bucket& bucket, optional_yield y, const DoutPrefixProvider *dpp) { RGWBucketSyncPolicyHandlerRef handler; int r = get_sync_policy_handler(std::nullopt, bucket, &handler, y, dpp); if (r < 0) { return r; } return handler->bucket_exports_data(); } int RGWBucketCtl::bucket_imports_data(const rgw_bucket& bucket, optional_yield y, const DoutPrefixProvider *dpp) { RGWBucketSyncPolicyHandlerRef handler; int r = get_sync_policy_handler(std::nullopt, bucket, &handler, y, dpp); if (r < 0) { return r; } return handler->bucket_imports_data(); } RGWBucketMetadataHandlerBase* RGWBucketMetaHandlerAllocator::alloc() { return new RGWBucketMetadataHandler(); } RGWBucketInstanceMetadataHandlerBase* RGWBucketInstanceMetaHandlerAllocator::alloc(rgw::sal::Driver* driver) { return new RGWBucketInstanceMetadataHandler(driver); } RGWBucketMetadataHandlerBase* RGWArchiveBucketMetaHandlerAllocator::alloc() { return new RGWArchiveBucketMetadataHandler(); } RGWBucketInstanceMetadataHandlerBase* RGWArchiveBucketInstanceMetaHandlerAllocator::alloc(rgw::sal::Driver* driver) { return new RGWArchiveBucketInstanceMetadataHandler(driver); } void RGWBucketEntryPoint::generate_test_instances(list<RGWBucketEntryPoint*>& o) { RGWBucketEntryPoint *bp = new RGWBucketEntryPoint(); init_bucket(&bp->bucket, "tenant", "bucket", "pool", ".index.pool", "marker", "10"); bp->owner = "owner"; bp->creation_time = ceph::real_clock::from_ceph_timespec({ceph_le32(2), ceph_le32(3)}); o.push_back(bp); o.push_back(new RGWBucketEntryPoint); } void RGWBucketEntryPoint::dump(Formatter *f) const { encode_json("bucket", bucket, f); encode_json("owner", owner, f); utime_t ut(creation_time); encode_json("creation_time", ut, f); encode_json("linked", linked, f); encode_json("has_bucket_info", has_bucket_info, f); if (has_bucket_info) { encode_json("old_bucket_info", old_bucket_info, f); } } void RGWBucketEntryPoint::decode_json(JSONObj *obj) { JSONDecoder::decode_json("bucket", bucket, obj); JSONDecoder::decode_json("owner", owner, obj); utime_t ut; JSONDecoder::decode_json("creation_time", ut, obj); creation_time = ut.to_real_time(); JSONDecoder::decode_json("linked", linked, obj); JSONDecoder::decode_json("has_bucket_info", has_bucket_info, obj); if (has_bucket_info) { JSONDecoder::decode_json("old_bucket_info", old_bucket_info, obj); } }
98,331
33.0957
194
cc
null
ceph-main/src/rgw/driver/rados/rgw_bucket.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab ft=cpp #pragma once #include <string> #include <memory> #include <variant> #include <boost/container/flat_map.hpp> #include <boost/container/flat_set.hpp> #include "include/types.h" #include "rgw_common.h" #include "rgw_tools.h" #include "rgw_metadata.h" #include "rgw/rgw_bucket.h" #include "rgw_string.h" #include "rgw_sal.h" #include "common/Formatter.h" #include "common/lru_map.h" #include "common/ceph_time.h" #include "rgw_formats.h" #include "services/svc_bucket_types.h" #include "services/svc_bucket_sync.h" // define as static when RGWBucket implementation completes extern void rgw_get_buckets_obj(const rgw_user& user_id, std::string& buckets_obj_id); class RGWSI_Meta; class RGWBucketMetadataHandler; class RGWBucketInstanceMetadataHandler; class RGWUserCtl; class RGWBucketCtl; class RGWZone; struct RGWZoneParams; // this is used as a filter to RGWRados::cls_bucket_list_ordered; it // conforms to the type RGWBucketListNameFilter extern bool rgw_bucket_object_check_filter(const std::string& oid); void init_default_bucket_layout(CephContext *cct, rgw::BucketLayout& layout, const RGWZone& zone, std::optional<uint32_t> shards, std::optional<rgw::BucketIndexType> type); struct RGWBucketCompleteInfo { RGWBucketInfo info; std::map<std::string, bufferlist> attrs; void dump(Formatter *f) const; void decode_json(JSONObj *obj); }; class RGWBucketEntryMetadataObject : public RGWMetadataObject { RGWBucketEntryPoint ep; std::map<std::string, bufferlist> attrs; public: RGWBucketEntryMetadataObject(RGWBucketEntryPoint& _ep, const obj_version& v, real_time m) : ep(_ep) { objv = v; mtime = m; set_pattrs (&attrs); } RGWBucketEntryMetadataObject(RGWBucketEntryPoint& _ep, const obj_version& v, real_time m, std::map<std::string, bufferlist>&& _attrs) : ep(_ep), attrs(std::move(_attrs)) { objv = v; mtime = m; set_pattrs (&attrs); } void dump(Formatter *f) const override { ep.dump(f); } RGWBucketEntryPoint& get_ep() { return ep; } std::map<std::string, bufferlist>& get_attrs() { return attrs; } }; class RGWBucketInstanceMetadataObject : public RGWMetadataObject { RGWBucketCompleteInfo info; public: RGWBucketInstanceMetadataObject() {} RGWBucketInstanceMetadataObject(RGWBucketCompleteInfo& i, const obj_version& v, real_time m) : info(i) { objv = v; mtime = m; } void dump(Formatter *f) const override { info.dump(f); } void decode_json(JSONObj *obj) { info.decode_json(obj); } RGWBucketCompleteInfo& get_bci() { return info; } RGWBucketInfo& get_bucket_info() { return info.info; } }; /** * store a list of the user's buckets, with associated functinos. */ class RGWUserBuckets { std::map<std::string, RGWBucketEnt> buckets; public: RGWUserBuckets() = default; RGWUserBuckets(RGWUserBuckets&&) = default; RGWUserBuckets& operator=(const RGWUserBuckets&) = default; void encode(bufferlist& bl) const { using ceph::encode; encode(buckets, bl); } void decode(bufferlist::const_iterator& bl) { using ceph::decode; decode(buckets, bl); } /** * Check if the user owns a bucket by the given name. */ bool owns(std::string& name) { std::map<std::string, RGWBucketEnt>::iterator iter; iter = buckets.find(name); return (iter != buckets.end()); } /** * Add a (created) bucket to the user's bucket list. */ void add(const RGWBucketEnt& bucket) { buckets[bucket.bucket.name] = bucket; } /** * Remove a bucket from the user's list by name. */ void remove(const std::string& name) { std::map<std::string, RGWBucketEnt>::iterator iter; iter = buckets.find(name); if (iter != buckets.end()) { buckets.erase(iter); } } /** * Get the user's buckets as a map. */ std::map<std::string, RGWBucketEnt>& get_buckets() { return buckets; } /** * Cleanup data structure */ void clear() { buckets.clear(); } size_t count() { return buckets.size(); } }; WRITE_CLASS_ENCODER(RGWUserBuckets) class RGWBucketMetadataHandlerBase : public RGWMetadataHandler_GenericMetaBE { public: virtual ~RGWBucketMetadataHandlerBase() {} virtual void init(RGWSI_Bucket *bucket_svc, RGWBucketCtl *bucket_ctl) = 0; }; class RGWBucketInstanceMetadataHandlerBase : public RGWMetadataHandler_GenericMetaBE { public: virtual ~RGWBucketInstanceMetadataHandlerBase() {} virtual void init(RGWSI_Zone *zone_svc, RGWSI_Bucket *bucket_svc, RGWSI_BucketIndex *bi_svc) = 0; }; class RGWBucketMetaHandlerAllocator { public: static RGWBucketMetadataHandlerBase *alloc(); }; class RGWBucketInstanceMetaHandlerAllocator { public: static RGWBucketInstanceMetadataHandlerBase *alloc(rgw::sal::Driver* driver); }; class RGWArchiveBucketMetaHandlerAllocator { public: static RGWBucketMetadataHandlerBase *alloc(); }; class RGWArchiveBucketInstanceMetaHandlerAllocator { public: static RGWBucketInstanceMetadataHandlerBase *alloc(rgw::sal::Driver* driver); }; extern int rgw_remove_object(const DoutPrefixProvider *dpp, rgw::sal::Driver* driver, rgw::sal::Bucket* bucket, rgw_obj_key& key, optional_yield y); extern int rgw_object_get_attr(rgw::sal::Driver* driver, rgw::sal::Object* obj, const char* attr_name, bufferlist& out_bl, optional_yield y); extern void check_bad_user_bucket_mapping(rgw::sal::Driver* driver, rgw::sal::User& user, bool fix, optional_yield y, const DoutPrefixProvider *dpp); struct RGWBucketAdminOpState { rgw_user uid; std::string display_name; std::string bucket_name; std::string bucket_id; std::string object_name; std::string new_bucket_name; bool list_buckets; bool stat_buckets; bool check_objects; bool fix_index; bool delete_child_objects; bool bucket_stored; bool sync_bucket; int max_aio = 0; std::unique_ptr<rgw::sal::Bucket> bucket; RGWQuotaInfo quota; RGWRateLimitInfo ratelimit_info; void set_fetch_stats(bool value) { stat_buckets = value; } void set_check_objects(bool value) { check_objects = value; } void set_fix_index(bool value) { fix_index = value; } void set_delete_children(bool value) { delete_child_objects = value; } void set_max_aio(int value) { max_aio = value; } void set_user_id(const rgw_user& user_id) { if (!user_id.empty()) uid = user_id; } void set_tenant(const std::string& tenant_str) { uid.tenant = tenant_str; } void set_bucket_name(const std::string& bucket_str) { bucket_name = bucket_str; } void set_object(std::string& object_str) { object_name = object_str; } void set_new_bucket_name(std::string& new_bucket_str) { new_bucket_name = new_bucket_str; } void set_quota(RGWQuotaInfo& value) { quota = value; } void set_bucket_ratelimit(RGWRateLimitInfo& value) { ratelimit_info = value; } void set_sync_bucket(bool value) { sync_bucket = value; } rgw_user& get_user_id() { return uid; } std::string& get_user_display_name() { return display_name; } std::string& get_bucket_name() { return bucket_name; } std::string& get_object_name() { return object_name; } std::string& get_tenant() { return uid.tenant; } rgw::sal::Bucket* get_bucket() { return bucket.get(); } void set_bucket(std::unique_ptr<rgw::sal::Bucket> _bucket) { bucket = std::move(_bucket); bucket_stored = true; } void set_bucket_id(const std::string& bi) { bucket_id = bi; } const std::string& get_bucket_id() { return bucket_id; } bool will_fetch_stats() { return stat_buckets; } bool will_fix_index() { return fix_index; } bool will_delete_children() { return delete_child_objects; } bool will_check_objects() { return check_objects; } bool is_user_op() { return !uid.empty(); } bool is_system_op() { return uid.empty(); } bool has_bucket_stored() { return bucket_stored; } int get_max_aio() { return max_aio; } bool will_sync_bucket() { return sync_bucket; } RGWBucketAdminOpState() : list_buckets(false), stat_buckets(false), check_objects(false), fix_index(false), delete_child_objects(false), bucket_stored(false), sync_bucket(true) {} }; /* * A simple wrapper class for administrative bucket operations */ class RGWBucket { RGWUserBuckets buckets; rgw::sal::Driver* driver; RGWAccessHandle handle; std::unique_ptr<rgw::sal::Bucket> bucket; std::unique_ptr<rgw::sal::User> user; bool failure; RGWObjVersionTracker ep_objv; // entrypoint object version public: RGWBucket() : driver(NULL), handle(NULL), failure(false) {} int init(rgw::sal::Driver* storage, RGWBucketAdminOpState& op_state, optional_yield y, const DoutPrefixProvider *dpp, std::string *err_msg = NULL); int check_bad_index_multipart(RGWBucketAdminOpState& op_state, RGWFormatterFlusher& flusher, const DoutPrefixProvider *dpp, optional_yield y, std::string *err_msg = NULL); int check_object_index(const DoutPrefixProvider *dpp, RGWBucketAdminOpState& op_state, RGWFormatterFlusher& flusher, optional_yield y, std::string *err_msg = NULL); int check_index(const DoutPrefixProvider *dpp, RGWBucketAdminOpState& op_state, std::map<RGWObjCategory, RGWStorageStats>& existing_stats, std::map<RGWObjCategory, RGWStorageStats>& calculated_stats, std::string *err_msg = NULL); int chown(RGWBucketAdminOpState& op_state, const std::string& marker, optional_yield y, const DoutPrefixProvider *dpp, std::string *err_msg = NULL); int set_quota(RGWBucketAdminOpState& op_state, const DoutPrefixProvider *dpp, optional_yield y, std::string *err_msg = NULL); int remove_object(const DoutPrefixProvider *dpp, RGWBucketAdminOpState& op_state, optional_yield y, std::string *err_msg = NULL); int policy_bl_to_stream(bufferlist& bl, std::ostream& o); int get_policy(RGWBucketAdminOpState& op_state, RGWAccessControlPolicy& policy, optional_yield y, const DoutPrefixProvider *dpp); int sync(RGWBucketAdminOpState& op_state, const DoutPrefixProvider *dpp, optional_yield y, std::string *err_msg = NULL); void clear_failure() { failure = false; } const RGWBucketInfo& get_bucket_info() const { return bucket->get_info(); } }; class RGWBucketAdminOp { public: static int get_policy(rgw::sal::Driver* driver, RGWBucketAdminOpState& op_state, RGWFormatterFlusher& flusher, const DoutPrefixProvider *dpp, optional_yield y); static int get_policy(rgw::sal::Driver* driver, RGWBucketAdminOpState& op_state, RGWAccessControlPolicy& policy, const DoutPrefixProvider *dpp, optional_yield y); static int dump_s3_policy(rgw::sal::Driver* driver, RGWBucketAdminOpState& op_state, std::ostream& os, const DoutPrefixProvider *dpp, optional_yield y); static int unlink(rgw::sal::Driver* driver, RGWBucketAdminOpState& op_state, const DoutPrefixProvider *dpp, optional_yield y); static int link(rgw::sal::Driver* driver, RGWBucketAdminOpState& op_state, const DoutPrefixProvider *dpp, optional_yield y, std::string *err_msg = NULL); static int chown(rgw::sal::Driver* driver, RGWBucketAdminOpState& op_state, const std::string& marker, const DoutPrefixProvider *dpp, optional_yield y, std::string *err_msg = NULL); static int check_index(rgw::sal::Driver* driver, RGWBucketAdminOpState& op_state, RGWFormatterFlusher& flusher, optional_yield y, const DoutPrefixProvider *dpp); static int remove_bucket(rgw::sal::Driver* driver, RGWBucketAdminOpState& op_state, optional_yield y, const DoutPrefixProvider *dpp, bool bypass_gc = false, bool keep_index_consistent = true); static int remove_object(rgw::sal::Driver* driver, RGWBucketAdminOpState& op_state, const DoutPrefixProvider *dpp, optional_yield y); static int info(rgw::sal::Driver* driver, RGWBucketAdminOpState& op_state, RGWFormatterFlusher& flusher, optional_yield y, const DoutPrefixProvider *dpp); static int limit_check(rgw::sal::Driver* driver, RGWBucketAdminOpState& op_state, const std::list<std::string>& user_ids, RGWFormatterFlusher& flusher, optional_yield y, const DoutPrefixProvider *dpp, bool warnings_only = false); static int set_quota(rgw::sal::Driver* driver, RGWBucketAdminOpState& op_state, const DoutPrefixProvider *dpp, optional_yield y); static int list_stale_instances(rgw::sal::Driver* driver, RGWBucketAdminOpState& op_state, RGWFormatterFlusher& flusher, const DoutPrefixProvider *dpp, optional_yield y); static int clear_stale_instances(rgw::sal::Driver* driver, RGWBucketAdminOpState& op_state, RGWFormatterFlusher& flusher, const DoutPrefixProvider *dpp, optional_yield y); static int fix_lc_shards(rgw::sal::Driver* driver, RGWBucketAdminOpState& op_state, RGWFormatterFlusher& flusher, const DoutPrefixProvider *dpp, optional_yield y); static int fix_obj_expiry(rgw::sal::Driver* driver, RGWBucketAdminOpState& op_state, RGWFormatterFlusher& flusher, const DoutPrefixProvider *dpp, optional_yield y, bool dry_run = false); static int sync_bucket(rgw::sal::Driver* driver, RGWBucketAdminOpState& op_state, const DoutPrefixProvider *dpp, optional_yield y, std::string *err_msg = NULL); }; struct rgw_ep_info { RGWBucketEntryPoint &ep; std::map<std::string, buffer::list>& attrs; RGWObjVersionTracker ep_objv; rgw_ep_info(RGWBucketEntryPoint &ep, std::map<std::string, bufferlist>& attrs) : ep(ep), attrs(attrs) {} }; class RGWBucketCtl { CephContext *cct; struct Svc { RGWSI_Zone *zone{nullptr}; RGWSI_Bucket *bucket{nullptr}; RGWSI_Bucket_Sync *bucket_sync{nullptr}; RGWSI_BucketIndex *bi{nullptr}; RGWSI_User* user = nullptr; } svc; struct Ctl { RGWUserCtl *user{nullptr}; } ctl; RGWBucketMetadataHandler *bm_handler; RGWBucketInstanceMetadataHandler *bmi_handler; RGWSI_Bucket_BE_Handler bucket_be_handler; /* bucket backend handler */ RGWSI_BucketInstance_BE_Handler bi_be_handler; /* bucket instance backend handler */ int call(std::function<int(RGWSI_Bucket_X_Ctx& ctx)> f); public: RGWBucketCtl(RGWSI_Zone *zone_svc, RGWSI_Bucket *bucket_svc, RGWSI_Bucket_Sync *bucket_sync_svc, RGWSI_BucketIndex *bi_svc, RGWSI_User* user_svc); void init(RGWUserCtl *user_ctl, RGWBucketMetadataHandler *_bm_handler, RGWBucketInstanceMetadataHandler *_bmi_handler, RGWDataChangesLog *datalog, const DoutPrefixProvider *dpp); struct Bucket { struct GetParams { RGWObjVersionTracker *objv_tracker{nullptr}; real_time *mtime{nullptr}; std::map<std::string, bufferlist> *attrs{nullptr}; rgw_cache_entry_info *cache_info{nullptr}; boost::optional<obj_version> refresh_version; std::optional<RGWSI_MetaBackend_CtxParams> bectx_params; GetParams() {} GetParams& set_objv_tracker(RGWObjVersionTracker *_objv_tracker) { objv_tracker = _objv_tracker; return *this; } GetParams& set_mtime(ceph::real_time *_mtime) { mtime = _mtime; return *this; } GetParams& set_attrs(std::map<std::string, bufferlist> *_attrs) { attrs = _attrs; return *this; } GetParams& set_cache_info(rgw_cache_entry_info *_cache_info) { cache_info = _cache_info; return *this; } GetParams& set_refresh_version(const obj_version& _refresh_version) { refresh_version = _refresh_version; return *this; } GetParams& set_bectx_params(std::optional<RGWSI_MetaBackend_CtxParams> _bectx_params) { bectx_params = _bectx_params; return *this; } }; struct PutParams { RGWObjVersionTracker *objv_tracker{nullptr}; ceph::real_time mtime; bool exclusive{false}; std::map<std::string, bufferlist> *attrs{nullptr}; PutParams() {} PutParams& set_objv_tracker(RGWObjVersionTracker *_objv_tracker) { objv_tracker = _objv_tracker; return *this; } PutParams& set_mtime(const ceph::real_time& _mtime) { mtime = _mtime; return *this; } PutParams& set_exclusive(bool _exclusive) { exclusive = _exclusive; return *this; } PutParams& set_attrs(std::map<std::string, bufferlist> *_attrs) { attrs = _attrs; return *this; } }; struct RemoveParams { RGWObjVersionTracker *objv_tracker{nullptr}; RemoveParams() {} RemoveParams& set_objv_tracker(RGWObjVersionTracker *_objv_tracker) { objv_tracker = _objv_tracker; return *this; } }; }; struct BucketInstance { struct GetParams { real_time *mtime{nullptr}; std::map<std::string, bufferlist> *attrs{nullptr}; rgw_cache_entry_info *cache_info{nullptr}; boost::optional<obj_version> refresh_version; RGWObjVersionTracker *objv_tracker{nullptr}; std::optional<RGWSI_MetaBackend_CtxParams> bectx_params; GetParams() {} GetParams& set_mtime(ceph::real_time *_mtime) { mtime = _mtime; return *this; } GetParams& set_attrs(std::map<std::string, bufferlist> *_attrs) { attrs = _attrs; return *this; } GetParams& set_cache_info(rgw_cache_entry_info *_cache_info) { cache_info = _cache_info; return *this; } GetParams& set_refresh_version(const obj_version& _refresh_version) { refresh_version = _refresh_version; return *this; } GetParams& set_objv_tracker(RGWObjVersionTracker *_objv_tracker) { objv_tracker = _objv_tracker; return *this; } GetParams& set_bectx_params(std::optional<RGWSI_MetaBackend_CtxParams> _bectx_params) { bectx_params = _bectx_params; return *this; } }; struct PutParams { std::optional<RGWBucketInfo *> orig_info; /* nullopt: orig_info was not fetched, nullptr: orig_info was not found (new bucket instance */ ceph::real_time mtime; bool exclusive{false}; std::map<std::string, bufferlist> *attrs{nullptr}; RGWObjVersionTracker *objv_tracker{nullptr}; PutParams() {} PutParams& set_orig_info(RGWBucketInfo *pinfo) { orig_info = pinfo; return *this; } PutParams& set_mtime(const ceph::real_time& _mtime) { mtime = _mtime; return *this; } PutParams& set_exclusive(bool _exclusive) { exclusive = _exclusive; return *this; } PutParams& set_attrs(std::map<std::string, bufferlist> *_attrs) { attrs = _attrs; return *this; } PutParams& set_objv_tracker(RGWObjVersionTracker *_objv_tracker) { objv_tracker = _objv_tracker; return *this; } }; struct RemoveParams { RGWObjVersionTracker *objv_tracker{nullptr}; RemoveParams() {} RemoveParams& set_objv_tracker(RGWObjVersionTracker *_objv_tracker) { objv_tracker = _objv_tracker; return *this; } }; }; /* bucket entrypoint */ int read_bucket_entrypoint_info(const rgw_bucket& bucket, RGWBucketEntryPoint *info, optional_yield y, const DoutPrefixProvider *dpp, const Bucket::GetParams& params = {}); int store_bucket_entrypoint_info(const rgw_bucket& bucket, RGWBucketEntryPoint& info, optional_yield y, const DoutPrefixProvider *dpp, const Bucket::PutParams& params = {}); int remove_bucket_entrypoint_info(const rgw_bucket& bucket, optional_yield y, const DoutPrefixProvider *dpp, const Bucket::RemoveParams& params = {}); /* bucket instance */ int read_bucket_instance_info(const rgw_bucket& bucket, RGWBucketInfo *info, optional_yield y, const DoutPrefixProvider *dpp, const BucketInstance::GetParams& params = {}); int store_bucket_instance_info(const rgw_bucket& bucket, RGWBucketInfo& info, optional_yield y, const DoutPrefixProvider *dpp, const BucketInstance::PutParams& params = {}); int remove_bucket_instance_info(const rgw_bucket& bucket, RGWBucketInfo& info, optional_yield y, const DoutPrefixProvider *dpp, const BucketInstance::RemoveParams& params = {}); /* * bucket_id may or may not be provided * * ep_objv_tracker might not be populated even if provided. Will only be set if entrypoint is read * (that is: if bucket_id is empty). */ int read_bucket_info(const rgw_bucket& bucket, RGWBucketInfo *info, optional_yield y, const DoutPrefixProvider *dpp, const BucketInstance::GetParams& params = {}, RGWObjVersionTracker *ep_objv_tracker = nullptr); int set_bucket_instance_attrs(RGWBucketInfo& bucket_info, std::map<std::string, bufferlist>& attrs, RGWObjVersionTracker *objv_tracker, optional_yield y, const DoutPrefixProvider *dpp); /* user/bucket */ int link_bucket(const rgw_user& user_id, const rgw_bucket& bucket, ceph::real_time creation_time, optional_yield y, const DoutPrefixProvider *dpp, bool update_entrypoint = true, rgw_ep_info *pinfo = nullptr); int unlink_bucket(const rgw_user& user_id, const rgw_bucket& bucket, optional_yield y, const DoutPrefixProvider *dpp, bool update_entrypoint = true); int read_buckets_stats(std::map<std::string, RGWBucketEnt>& m, optional_yield y, const DoutPrefixProvider *dpp); int read_bucket_stats(const rgw_bucket& bucket, RGWBucketEnt *result, optional_yield y, const DoutPrefixProvider *dpp); /* quota related */ int sync_user_stats(const DoutPrefixProvider *dpp, const rgw_user& user_id, const RGWBucketInfo& bucket_info, optional_yield y, RGWBucketEnt* pent); /* bucket sync */ int get_sync_policy_handler(std::optional<rgw_zone_id> zone, std::optional<rgw_bucket> bucket, RGWBucketSyncPolicyHandlerRef *phandler, optional_yield y, const DoutPrefixProvider *dpp); int bucket_exports_data(const rgw_bucket& bucket, optional_yield y, const DoutPrefixProvider *dpp); int bucket_imports_data(const rgw_bucket& bucket, optional_yield y, const DoutPrefixProvider *dpp); private: int convert_old_bucket_info(RGWSI_Bucket_X_Ctx& ctx, const rgw_bucket& bucket, optional_yield y, const DoutPrefixProvider *dpp); int do_store_bucket_instance_info(RGWSI_Bucket_BI_Ctx& ctx, const rgw_bucket& bucket, RGWBucketInfo& info, optional_yield y, const DoutPrefixProvider *dpp, const BucketInstance::PutParams& params); int do_store_linked_bucket_info(RGWSI_Bucket_X_Ctx& ctx, RGWBucketInfo& info, RGWBucketInfo *orig_info, bool exclusive, real_time mtime, obj_version *pep_objv, std::map<std::string, bufferlist> *pattrs, bool create_entry_point, optional_yield, const DoutPrefixProvider *dpp); int do_link_bucket(RGWSI_Bucket_EP_Ctx& ctx, const rgw_user& user, const rgw_bucket& bucket, ceph::real_time creation_time, bool update_entrypoint, rgw_ep_info *pinfo, optional_yield y, const DoutPrefixProvider *dpp); int do_unlink_bucket(RGWSI_Bucket_EP_Ctx& ctx, const rgw_user& user_id, const rgw_bucket& bucket, bool update_entrypoint, optional_yield y, const DoutPrefixProvider *dpp); }; bool rgw_find_bucket_by_id(const DoutPrefixProvider *dpp, CephContext *cct, rgw::sal::Driver* driver, const std::string& marker, const std::string& bucket_id, rgw_bucket* bucket_out);
25,972
33.538564
183
h
null
ceph-main/src/rgw/driver/rados/rgw_bucket_sync.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab ft=cpp #include "rgw_common.h" #include "rgw_bucket_sync.h" #include "rgw_data_sync.h" #include "rgw_zone.h" #include "services/svc_zone.h" #include "services/svc_bucket_sync.h" #define dout_subsys ceph_subsys_rgw using namespace std; ostream& operator<<(ostream& os, const rgw_sync_bucket_entity& e) { os << "{b=" << rgw_sync_bucket_entities::bucket_key(e.bucket) << ",z=" << e.zone.value_or(rgw_zone_id()) << ",az=" << (int)e.all_zones << "}"; return os; } ostream& operator<<(ostream& os, const rgw_sync_bucket_pipe& pipe) { os << "{s=" << pipe.source << ",d=" << pipe.dest << "}"; return os; } ostream& operator<<(ostream& os, const rgw_sync_bucket_entities& e) { os << "{b=" << rgw_sync_bucket_entities::bucket_key(e.bucket) << ",z=" << e.zones.value_or(std::set<rgw_zone_id>()) << "}"; return os; } ostream& operator<<(ostream& os, const rgw_sync_bucket_pipes& pipe) { os << "{id=" << pipe.id << ",s=" << pipe.source << ",d=" << pipe.dest << "}"; return os; } static std::vector<rgw_sync_bucket_pipe> filter_relevant_pipes(const std::vector<rgw_sync_bucket_pipes>& pipes, const rgw_zone_id& source_zone, const rgw_zone_id& dest_zone) { std::vector<rgw_sync_bucket_pipe> relevant_pipes; for (auto& p : pipes) { if (p.source.match_zone(source_zone) && p.dest.match_zone(dest_zone)) { for (auto pipe : p.expand()) { pipe.source.apply_zone(source_zone); pipe.dest.apply_zone(dest_zone); relevant_pipes.push_back(pipe); } } } return relevant_pipes; } static bool is_wildcard_bucket(const rgw_bucket& bucket) { return bucket.name.empty(); } void rgw_sync_group_pipe_map::dump(ceph::Formatter *f) const { encode_json("zone", zone.id, f); encode_json("buckets", rgw_sync_bucket_entities::bucket_key(bucket), f); encode_json("sources", sources, f); encode_json("dests", dests, f); } template <typename CB1, typename CB2> void rgw_sync_group_pipe_map::try_add_to_pipe_map(const rgw_zone_id& source_zone, const rgw_zone_id& dest_zone, const std::vector<rgw_sync_bucket_pipes>& pipes, zb_pipe_map_t *pipe_map, CB1 filter_cb, CB2 call_filter_cb) { if (!filter_cb(source_zone, nullopt, dest_zone, nullopt)) { return; } auto relevant_pipes = filter_relevant_pipes(pipes, source_zone, dest_zone); for (auto& pipe : relevant_pipes) { rgw_sync_bucket_entity zb; if (!call_filter_cb(pipe, &zb)) { continue; } pipe_map->insert(make_pair(zb, pipe)); } } template <typename CB> void rgw_sync_group_pipe_map::try_add_source(const rgw_zone_id& source_zone, const rgw_zone_id& dest_zone, const std::vector<rgw_sync_bucket_pipes>& pipes, CB filter_cb) { return try_add_to_pipe_map(source_zone, dest_zone, pipes, &sources, filter_cb, [&](const rgw_sync_bucket_pipe& pipe, rgw_sync_bucket_entity *zb) { *zb = rgw_sync_bucket_entity{source_zone, pipe.source.get_bucket()}; return filter_cb(source_zone, zb->bucket, dest_zone, pipe.dest.get_bucket()); }); } template <typename CB> void rgw_sync_group_pipe_map::try_add_dest(const rgw_zone_id& source_zone, const rgw_zone_id& dest_zone, const std::vector<rgw_sync_bucket_pipes>& pipes, CB filter_cb) { return try_add_to_pipe_map(source_zone, dest_zone, pipes, &dests, filter_cb, [&](const rgw_sync_bucket_pipe& pipe, rgw_sync_bucket_entity *zb) { *zb = rgw_sync_bucket_entity{dest_zone, pipe.dest.get_bucket()}; return filter_cb(source_zone, pipe.source.get_bucket(), dest_zone, zb->bucket); }); } using zb_pipe_map_t = rgw_sync_group_pipe_map::zb_pipe_map_t; pair<zb_pipe_map_t::const_iterator, zb_pipe_map_t::const_iterator> rgw_sync_group_pipe_map::find_pipes(const zb_pipe_map_t& m, const rgw_zone_id& zone, std::optional<rgw_bucket> b) const { if (!b) { return m.equal_range(rgw_sync_bucket_entity{zone, rgw_bucket()}); } auto zb = rgw_sync_bucket_entity{zone, *b}; auto range = m.equal_range(zb); if (range.first == range.second && !is_wildcard_bucket(*b)) { /* couldn't find the specific bucket, try to find by wildcard */ zb.bucket = rgw_bucket(); range = m.equal_range(zb); } return range; } template <typename CB> void rgw_sync_group_pipe_map::init(const DoutPrefixProvider *dpp, CephContext *cct, const rgw_zone_id& _zone, std::optional<rgw_bucket> _bucket, const rgw_sync_policy_group& group, rgw_sync_data_flow_group *_default_flow, std::set<rgw_zone_id> *_pall_zones, CB filter_cb) { zone = _zone; bucket = _bucket; default_flow = _default_flow; pall_zones = _pall_zones; rgw_sync_bucket_entity zb(zone, bucket); status = group.status; std::vector<rgw_sync_bucket_pipes> zone_pipes; string bucket_key = (bucket ? bucket->get_key() : "*"); /* only look at pipes that touch the specific zone and bucket */ for (auto& pipe : group.pipes) { if (pipe.contains_zone_bucket(zone, bucket)) { ldpp_dout(dpp, 20) << __func__ << "(): pipe_map (zone=" << zone << " bucket=" << bucket_key << "): adding potential pipe: " << pipe << dendl; zone_pipes.push_back(pipe); } } const rgw_sync_data_flow_group *pflow; if (!group.data_flow.empty()) { pflow = &group.data_flow; } else { if (!default_flow) { return; } pflow = default_flow; } auto& flow = *pflow; pall_zones->insert(zone); /* symmetrical */ for (auto& symmetrical_group : flow.symmetrical) { if (symmetrical_group.zones.find(zone) != symmetrical_group.zones.end()) { for (auto& z : symmetrical_group.zones) { if (z != zone) { pall_zones->insert(z); try_add_source(z, zone, zone_pipes, filter_cb); try_add_dest(zone, z, zone_pipes, filter_cb); } } } } /* directional */ for (auto& rule : flow.directional) { if (rule.source_zone == zone) { pall_zones->insert(rule.dest_zone); try_add_dest(zone, rule.dest_zone, zone_pipes, filter_cb); } else if (rule.dest_zone == zone) { pall_zones->insert(rule.source_zone); try_add_source(rule.source_zone, zone, zone_pipes, filter_cb); } } } /* * find all relevant pipes in our zone that match {dest_bucket} <- {source_zone, source_bucket} */ vector<rgw_sync_bucket_pipe> rgw_sync_group_pipe_map::find_source_pipes(const rgw_zone_id& source_zone, std::optional<rgw_bucket> source_bucket, std::optional<rgw_bucket> dest_bucket) const { vector<rgw_sync_bucket_pipe> result; auto range = find_pipes(sources, source_zone, source_bucket); for (auto iter = range.first; iter != range.second; ++iter) { auto pipe = iter->second; if (pipe.dest.match_bucket(dest_bucket)) { result.push_back(pipe); } } return result; } /* * find all relevant pipes in other zones that pull from a specific * source bucket in out zone {source_bucket} -> {dest_zone, dest_bucket} */ vector<rgw_sync_bucket_pipe> rgw_sync_group_pipe_map::find_dest_pipes(std::optional<rgw_bucket> source_bucket, const rgw_zone_id& dest_zone, std::optional<rgw_bucket> dest_bucket) const { vector<rgw_sync_bucket_pipe> result; auto range = find_pipes(dests, dest_zone, dest_bucket); for (auto iter = range.first; iter != range.second; ++iter) { auto pipe = iter->second; if (pipe.source.match_bucket(source_bucket)) { result.push_back(pipe); } } return result; } /* * find all relevant pipes from {source_zone, source_bucket} -> {dest_zone, dest_bucket} */ vector<rgw_sync_bucket_pipe> rgw_sync_group_pipe_map::find_pipes(const rgw_zone_id& source_zone, std::optional<rgw_bucket> source_bucket, const rgw_zone_id& dest_zone, std::optional<rgw_bucket> dest_bucket) const { if (dest_zone == zone) { return find_source_pipes(source_zone, source_bucket, dest_bucket); } if (source_zone == zone) { return find_dest_pipes(source_bucket, dest_zone, dest_bucket); } return vector<rgw_sync_bucket_pipe>(); } void RGWBucketSyncFlowManager::pipe_rules::insert(const rgw_sync_bucket_pipe& pipe) { pipes.push_back(pipe); auto ppipe = &pipes.back(); auto prefix = ppipe->params.source.filter.prefix.value_or(string()); prefix_refs.insert(make_pair(prefix, ppipe)); for (auto& t : ppipe->params.source.filter.tags) { string tag = t.key + "=" + t.value; auto titer = tag_refs.find(tag); if (titer != tag_refs.end() && ppipe->params.priority > titer->second->params.priority) { titer->second = ppipe; } else { tag_refs[tag] = ppipe; } } } bool RGWBucketSyncFlowManager::pipe_rules::find_basic_info_without_tags(const rgw_obj_key& key, std::optional<rgw_user> *user, std::optional<rgw_user> *acl_translation_owner, std::optional<string> *storage_class, rgw_sync_pipe_params::Mode *mode, bool *need_more_info) const { std::optional<string> owner; *need_more_info = false; if (prefix_refs.empty()) { return false; } auto end = prefix_refs.upper_bound(key.name); auto iter = end; if (iter != prefix_refs.begin()) { --iter; } if (iter == prefix_refs.end()) { return false; } if (iter != prefix_refs.begin()) { iter = prefix_refs.find(iter->first); /* prefix_refs is multimap, find first element holding that key */ } std::vector<decltype(iter)> iters; std::optional<int> priority; for (; iter != end; ++iter) { auto& prefix = iter->first; if (!boost::starts_with(key.name, prefix)) { continue; } auto& rule_params = iter->second->params; auto& filter = rule_params.source.filter; if (rule_params.priority > priority) { priority = rule_params.priority; if (!filter.has_tags()) { iters.clear(); } iters.push_back(iter); *need_more_info = filter.has_tags(); /* if highest priority filter has tags, then we can't be sure if it would be used. We need to first read the info from the source object */ } } if (iters.empty()) { return false; } std::optional<rgw_user> _user; std::optional<rgw_sync_pipe_acl_translation> _acl_translation; std::optional<string> _storage_class; rgw_sync_pipe_params::Mode _mode{rgw_sync_pipe_params::Mode::MODE_SYSTEM}; // make sure all params are the same by saving the first one // encountered and comparing all subsequent to it bool first_iter = true; for (auto& iter : iters) { const rgw_sync_pipe_params& rule_params = iter->second->params; if (first_iter) { _user = rule_params.user; _acl_translation = rule_params.dest.acl_translation; _storage_class = rule_params.dest.storage_class; _mode = rule_params.mode; first_iter = false; } else { // note: three of these == operators are comparing std::optional // against std::optional; as one would expect they are equal a) // if both do not contain values or b) if both do and those // contained values are the same const bool conflict = !(_user == rule_params.user && _acl_translation == rule_params.dest.acl_translation && _storage_class == rule_params.dest.storage_class && _mode == rule_params.mode); if (conflict) { *need_more_info = true; return false; } } } *user = _user; if (_acl_translation) { *acl_translation_owner = _acl_translation->owner; } *storage_class = _storage_class; *mode = _mode; return true; } bool RGWBucketSyncFlowManager::pipe_rules::find_obj_params(const rgw_obj_key& key, const RGWObjTags::tag_map_t& tags, rgw_sync_pipe_params *params) const { if (prefix_refs.empty()) { return false; } auto iter = prefix_refs.upper_bound(key.name); if (iter != prefix_refs.begin()) { --iter; } if (iter == prefix_refs.end()) { return false; } auto end = prefix_refs.upper_bound(key.name); auto max = end; std::optional<int> priority; for (; iter != end; ++iter) { /* NOTE: this is not the most efficient way to do it, * a trie data structure would be better */ auto& prefix = iter->first; if (!boost::starts_with(key.name, prefix)) { continue; } auto& rule_params = iter->second->params; auto& filter = rule_params.source.filter; if (!filter.check_tags(tags)) { continue; } if (rule_params.priority > priority) { priority = rule_params.priority; max = iter; } } if (max == end) { return false; } *params = max->second->params; return true; } /* * return either the current prefix for s, or the next one if s is not within a prefix */ RGWBucketSyncFlowManager::pipe_rules::prefix_map_t::const_iterator RGWBucketSyncFlowManager::pipe_rules::prefix_search(const std::string& s) const { if (prefix_refs.empty()) { return prefix_refs.end(); } auto next = prefix_refs.upper_bound(s); auto iter = next; if (iter != prefix_refs.begin()) { --iter; } if (!boost::starts_with(s, iter->first)) { return next; } return iter; } void RGWBucketSyncFlowManager::pipe_set::insert(const rgw_sync_bucket_pipe& pipe) { /* Ensure this pipe doesn't match with any disabled pipes */ for (auto p: disabled_pipe_map) { if (p.second.source.match(pipe.source) && p.second.dest.match(pipe.dest)) { return; } } pipe_map.insert(make_pair(pipe.id, pipe)); auto& rules_ref = rules[endpoints_pair(pipe)]; if (!rules_ref) { rules_ref = make_shared<RGWBucketSyncFlowManager::pipe_rules>(); } rules_ref->insert(pipe); pipe_handler h(rules_ref, pipe); handlers.insert(h); } void RGWBucketSyncFlowManager::pipe_set::remove_all() { pipe_map.clear(); disabled_pipe_map.clear(); rules.clear(); handlers.clear(); } void RGWBucketSyncFlowManager::pipe_set::disable(const rgw_sync_bucket_pipe& pipe) { /* This pipe is disabled. Add it to disabled pipes & remove any * matching pipes already inserted */ disabled_pipe_map.insert(make_pair(pipe.id, pipe)); for (auto iter_p = pipe_map.begin(); iter_p != pipe_map.end(); ) { auto p = iter_p++; if (p->second.source.match(pipe.source) && p->second.dest.match(pipe.dest)) { auto& rules_ref = rules[endpoints_pair(p->second)]; if (rules_ref) { pipe_handler h(rules_ref, p->second); handlers.erase(h); } rules.erase(endpoints_pair(p->second)); pipe_map.erase(p); } } } void RGWBucketSyncFlowManager::pipe_set::dump(ceph::Formatter *f) const { encode_json("pipes", pipe_map, f); } bool RGWBucketSyncFlowManager::allowed_data_flow(const rgw_zone_id& source_zone, std::optional<rgw_bucket> source_bucket, const rgw_zone_id& dest_zone, std::optional<rgw_bucket> dest_bucket, bool check_activated) const { bool found = false; bool found_activated = false; for (auto m : flow_groups) { auto& fm = m.second; auto pipes = fm.find_pipes(source_zone, source_bucket, dest_zone, dest_bucket); bool is_found = !pipes.empty(); if (is_found) { switch (fm.status) { case rgw_sync_policy_group::Status::FORBIDDEN: return false; case rgw_sync_policy_group::Status::ENABLED: found = true; found_activated = true; break; case rgw_sync_policy_group::Status::ALLOWED: found = true; break; default: break; /* unknown -- ignore */ } } } if (check_activated && found_activated) { return true; } return found; } void RGWBucketSyncFlowManager::init(const DoutPrefixProvider *dpp, const rgw_sync_policy_info& sync_policy) { std::optional<rgw_sync_data_flow_group> default_flow; if (parent) { default_flow.emplace(); default_flow->init_default(parent->all_zones); } for (auto& item : sync_policy.groups) { auto& group = item.second; auto& flow_group_map = flow_groups[group.id]; flow_group_map.init(dpp, cct, zone_id, bucket, group, (default_flow ? &(*default_flow) : nullptr), &all_zones, [&](const rgw_zone_id& source_zone, std::optional<rgw_bucket> source_bucket, const rgw_zone_id& dest_zone, std::optional<rgw_bucket> dest_bucket) { if (!parent) { return true; } return parent->allowed_data_flow(source_zone, source_bucket, dest_zone, dest_bucket, false); /* just check that it's not disabled */ }); } } /* * These are the semantics to be followed while resolving the policy * conflicts - * * ================================================== * zonegroup bucket Result * ================================================== * enabled enabled enabled * allowed enabled * forbidden disabled * allowed enabled enabled * allowed disabled * forbidden disabled * forbidden enabled disabled * allowed disabled * forbidden disabled * * In case multiple group policies are set to reflect for any sync pair * (<source-zone,source-bucket>, <dest-zone,dest-bucket>), the following * rules are applied in the order- * 1) Even if one policy status is FORBIDDEN, the sync will be disabled * 2) Atleast one policy should be ENABLED for the sync to be allowed. * */ void RGWBucketSyncFlowManager::reflect(const DoutPrefixProvider *dpp, std::optional<rgw_bucket> effective_bucket, RGWBucketSyncFlowManager::pipe_set *source_pipes, RGWBucketSyncFlowManager::pipe_set *dest_pipes, bool only_enabled) const { string effective_bucket_key; bool is_forbidden = false; if (effective_bucket) { effective_bucket_key = effective_bucket->get_key(); } if (parent) { parent->reflect(dpp, effective_bucket, source_pipes, dest_pipes, only_enabled); } for (auto& item : flow_groups) { auto& flow_group_map = item.second; is_forbidden = false; if (flow_group_map.status == rgw_sync_policy_group::Status::FORBIDDEN) { /* FORBIDDEN takes precedence over all the other rules. * Remove any other pipes which may allow access. */ is_forbidden = true; } else if (flow_group_map.status != rgw_sync_policy_group::Status::ENABLED && (only_enabled || flow_group_map.status != rgw_sync_policy_group::Status::ALLOWED)) { /* only return enabled groups */ continue; } for (auto& entry : flow_group_map.sources) { rgw_sync_bucket_pipe pipe = entry.second; if (!pipe.dest.match_bucket(effective_bucket)) { continue; } pipe.source.apply_bucket(effective_bucket); pipe.dest.apply_bucket(effective_bucket); if (is_forbidden) { ldpp_dout(dpp, 20) << __func__ << "(): flow manager (bucket=" << effective_bucket_key << "): removing source pipe: " << pipe << dendl; source_pipes->disable(pipe); } else { ldpp_dout(dpp, 20) << __func__ << "(): flow manager (bucket=" << effective_bucket_key << "): adding source pipe: " << pipe << dendl; source_pipes->insert(pipe); } } for (auto& entry : flow_group_map.dests) { rgw_sync_bucket_pipe pipe = entry.second; if (!pipe.source.match_bucket(effective_bucket)) { continue; } pipe.source.apply_bucket(effective_bucket); pipe.dest.apply_bucket(effective_bucket); if (is_forbidden) { ldpp_dout(dpp, 20) << __func__ << "(): flow manager (bucket=" << effective_bucket_key << "): removing dest pipe: " << pipe << dendl; dest_pipes->disable(pipe); } else { ldpp_dout(dpp, 20) << __func__ << "(): flow manager (bucket=" << effective_bucket_key << "): adding dest pipe: " << pipe << dendl; dest_pipes->insert(pipe); } } } } RGWBucketSyncFlowManager::RGWBucketSyncFlowManager(CephContext *_cct, const rgw_zone_id& _zone_id, std::optional<rgw_bucket> _bucket, const RGWBucketSyncFlowManager *_parent) : cct(_cct), zone_id(_zone_id), bucket(_bucket), parent(_parent) {} void RGWSyncPolicyCompat::convert_old_sync_config(RGWSI_Zone *zone_svc, RGWSI_SyncModules *sync_modules_svc, rgw_sync_policy_info *ppolicy) { bool found = false; rgw_sync_policy_info policy; auto& group = policy.groups["default"]; auto& zonegroup = zone_svc->get_zonegroup(); for (const auto& ziter1 : zonegroup.zones) { auto& id1 = ziter1.first; const RGWZone& z1 = ziter1.second; for (const auto& ziter2 : zonegroup.zones) { auto& id2 = ziter2.first; const RGWZone& z2 = ziter2.second; if (id1 == id2) { continue; } if (z1.syncs_from(z2.name)) { found = true; rgw_sync_directional_rule *rule; group.data_flow.find_or_create_directional(id2, id1, &rule); } } } if (!found) { /* nothing syncs */ return; } rgw_sync_bucket_pipes pipes; pipes.id = "all"; pipes.source.all_zones = true; pipes.dest.all_zones = true; group.pipes.emplace_back(std::move(pipes)); group.status = rgw_sync_policy_group::Status::ENABLED; *ppolicy = std::move(policy); } RGWBucketSyncPolicyHandler::RGWBucketSyncPolicyHandler(RGWSI_Zone *_zone_svc, RGWSI_SyncModules *sync_modules_svc, RGWSI_Bucket_Sync *_bucket_sync_svc, std::optional<rgw_zone_id> effective_zone) : zone_svc(_zone_svc) , bucket_sync_svc(_bucket_sync_svc) { zone_id = effective_zone.value_or(zone_svc->zone_id()); flow_mgr.reset(new RGWBucketSyncFlowManager(zone_svc->ctx(), zone_id, nullopt, nullptr)); sync_policy = zone_svc->get_zonegroup().sync_policy; if (sync_policy.empty()) { RGWSyncPolicyCompat::convert_old_sync_config(zone_svc, sync_modules_svc, &sync_policy); legacy_config = true; } } RGWBucketSyncPolicyHandler::RGWBucketSyncPolicyHandler(const RGWBucketSyncPolicyHandler *_parent, const RGWBucketInfo& _bucket_info, map<string, bufferlist>&& _bucket_attrs) : parent(_parent), bucket_info(_bucket_info), bucket_attrs(std::move(_bucket_attrs)) { if (_bucket_info.sync_policy) { sync_policy = *_bucket_info.sync_policy; for (auto& entry : sync_policy.groups) { for (auto& pipe : entry.second.pipes) { if (pipe.params.mode == rgw_sync_pipe_params::MODE_USER && pipe.params.user.empty()) { pipe.params.user = _bucket_info.owner; } } } } legacy_config = parent->legacy_config; bucket = _bucket_info.bucket; zone_svc = parent->zone_svc; bucket_sync_svc = parent->bucket_sync_svc; flow_mgr.reset(new RGWBucketSyncFlowManager(zone_svc->ctx(), parent->zone_id, _bucket_info.bucket, parent->flow_mgr.get())); } RGWBucketSyncPolicyHandler::RGWBucketSyncPolicyHandler(const RGWBucketSyncPolicyHandler *_parent, const rgw_bucket& _bucket, std::optional<rgw_sync_policy_info> _sync_policy) : parent(_parent) { if (_sync_policy) { sync_policy = *_sync_policy; } legacy_config = parent->legacy_config; bucket = _bucket; zone_svc = parent->zone_svc; bucket_sync_svc = parent->bucket_sync_svc; flow_mgr.reset(new RGWBucketSyncFlowManager(zone_svc->ctx(), parent->zone_id, _bucket, parent->flow_mgr.get())); } RGWBucketSyncPolicyHandler *RGWBucketSyncPolicyHandler::alloc_child(const RGWBucketInfo& bucket_info, map<string, bufferlist>&& bucket_attrs) const { return new RGWBucketSyncPolicyHandler(this, bucket_info, std::move(bucket_attrs)); } RGWBucketSyncPolicyHandler *RGWBucketSyncPolicyHandler::alloc_child(const rgw_bucket& bucket, std::optional<rgw_sync_policy_info> sync_policy) const { return new RGWBucketSyncPolicyHandler(this, bucket, sync_policy); } int RGWBucketSyncPolicyHandler::init(const DoutPrefixProvider *dpp, optional_yield y) { int r = bucket_sync_svc->get_bucket_sync_hints(dpp, bucket.value_or(rgw_bucket()), &source_hints, &target_hints, y); if (r < 0) { ldpp_dout(dpp, 0) << "ERROR: failed to initialize bucket sync policy handler: get_bucket_sync_hints() on bucket=" << bucket << " returned r=" << r << dendl; return r; } flow_mgr->init(dpp, sync_policy); reflect(dpp, &source_pipes, &target_pipes, &sources, &targets, &source_zones, &target_zones, true); return 0; } void RGWBucketSyncPolicyHandler::reflect(const DoutPrefixProvider *dpp, RGWBucketSyncFlowManager::pipe_set *psource_pipes, RGWBucketSyncFlowManager::pipe_set *ptarget_pipes, map<rgw_zone_id, RGWBucketSyncFlowManager::pipe_set> *psources, map<rgw_zone_id, RGWBucketSyncFlowManager::pipe_set> *ptargets, std::set<rgw_zone_id> *psource_zones, std::set<rgw_zone_id> *ptarget_zones, bool only_enabled) const { RGWBucketSyncFlowManager::pipe_set _source_pipes; RGWBucketSyncFlowManager::pipe_set _target_pipes; map<rgw_zone_id, RGWBucketSyncFlowManager::pipe_set> _sources; map<rgw_zone_id, RGWBucketSyncFlowManager::pipe_set> _targets; std::set<rgw_zone_id> _source_zones; std::set<rgw_zone_id> _target_zones; flow_mgr->reflect(dpp, bucket, &_source_pipes, &_target_pipes, only_enabled); for (auto& entry : _source_pipes.pipe_map) { auto& pipe = entry.second; if (!pipe.source.zone) { continue; } _source_zones.insert(*pipe.source.zone); _sources[*pipe.source.zone].insert(pipe); } for (auto& entry : _target_pipes.pipe_map) { auto& pipe = entry.second; if (!pipe.dest.zone) { continue; } _target_zones.insert(*pipe.dest.zone); _targets[*pipe.dest.zone].insert(pipe); } if (psource_pipes) { *psource_pipes = std::move(_source_pipes); } if (ptarget_pipes) { *ptarget_pipes = std::move(_target_pipes); } if (psources) { *psources = std::move(_sources); } if (ptargets) { *ptargets = std::move(_targets); } if (psource_zones) { *psource_zones = std::move(_source_zones); } if (ptarget_zones) { *ptarget_zones = std::move(_target_zones); } } multimap<rgw_zone_id, rgw_sync_bucket_pipe> RGWBucketSyncPolicyHandler::get_all_sources() const { multimap<rgw_zone_id, rgw_sync_bucket_pipe> m; for (auto& source_entry : sources) { auto& zone_id = source_entry.first; auto& pipes = source_entry.second.pipe_map; for (auto& entry : pipes) { auto& pipe = entry.second; m.insert(make_pair(zone_id, pipe)); } } for (auto& pipe : resolved_sources) { if (!pipe.source.zone) { continue; } m.insert(make_pair(*pipe.source.zone, pipe)); } return m; } multimap<rgw_zone_id, rgw_sync_bucket_pipe> RGWBucketSyncPolicyHandler::get_all_dests() const { multimap<rgw_zone_id, rgw_sync_bucket_pipe> m; for (auto& dest_entry : targets) { auto& zone_id = dest_entry.first; auto& pipes = dest_entry.second.pipe_map; for (auto& entry : pipes) { auto& pipe = entry.second; m.insert(make_pair(zone_id, pipe)); } } for (auto& pipe : resolved_dests) { if (!pipe.dest.zone) { continue; } m.insert(make_pair(*pipe.dest.zone, pipe)); } return m; } multimap<rgw_zone_id, rgw_sync_bucket_pipe> RGWBucketSyncPolicyHandler::get_all_dests_in_zone(const rgw_zone_id& zone_id) const { multimap<rgw_zone_id, rgw_sync_bucket_pipe> m; auto iter = targets.find(zone_id); if (iter != targets.end()) { auto& pipes = iter->second.pipe_map; for (auto& entry : pipes) { auto& pipe = entry.second; m.insert(make_pair(zone_id, pipe)); } } for (auto& pipe : resolved_dests) { if (!pipe.dest.zone || *pipe.dest.zone != zone_id) { continue; } m.insert(make_pair(*pipe.dest.zone, pipe)); } return m; } void RGWBucketSyncPolicyHandler::get_pipes(std::set<rgw_sync_bucket_pipe> *_sources, std::set<rgw_sync_bucket_pipe> *_targets, std::optional<rgw_sync_bucket_entity> filter_peer) { /* return raw pipes */ for (auto& entry : source_pipes.pipe_map) { auto& source_pipe = entry.second; if (!filter_peer || source_pipe.source.match(*filter_peer)) { _sources->insert(source_pipe); } } for (auto& entry : target_pipes.pipe_map) { auto& target_pipe = entry.second; if (!filter_peer || target_pipe.dest.match(*filter_peer)) { _targets->insert(target_pipe); } } } bool RGWBucketSyncPolicyHandler::bucket_exports_data() const { if (!bucket) { return false; } if (!zone_svc->sync_module_exports_data()) { return false; } if (bucket_is_sync_source()) { return true; } return (zone_svc->need_to_log_data() && bucket_info->datasync_flag_enabled()); } bool RGWBucketSyncPolicyHandler::bucket_imports_data() const { return bucket_is_sync_target(); }
34,004
32.370952
147
cc
null
ceph-main/src/rgw/driver/rados/rgw_bucket_sync.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab /* * Ceph - scalable distributed file system * * Copyright (C) 2018 Red Hat, Inc. * * This is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License version 2.1, as published by the Free Software * Foundation. See file COPYING. * */ #pragma once #include "rgw_common.h" #include "rgw_sync_policy.h" class RGWSI_Zone; class RGWSI_SyncModules; class RGWSI_Bucket_Sync; struct rgw_sync_group_pipe_map; struct rgw_sync_bucket_pipes; struct rgw_sync_policy_info; struct rgw_sync_group_pipe_map { rgw_zone_id zone; std::optional<rgw_bucket> bucket; rgw_sync_policy_group::Status status{rgw_sync_policy_group::Status::UNKNOWN}; using zb_pipe_map_t = std::multimap<rgw_sync_bucket_entity, rgw_sync_bucket_pipe>; zb_pipe_map_t sources; /* all the pipes where zone is pulling from */ zb_pipe_map_t dests; /* all the pipes that pull from zone */ std::set<rgw_zone_id> *pall_zones{nullptr}; rgw_sync_data_flow_group *default_flow{nullptr}; /* flow to use if policy doesn't define it, used in the case of bucket sync policy, not at the zonegroup level */ void dump(ceph::Formatter *f) const; template <typename CB1, typename CB2> void try_add_to_pipe_map(const rgw_zone_id& source_zone, const rgw_zone_id& dest_zone, const std::vector<rgw_sync_bucket_pipes>& pipes, zb_pipe_map_t *pipe_map, CB1 filter_cb, CB2 call_filter_cb); template <typename CB> void try_add_source(const rgw_zone_id& source_zone, const rgw_zone_id& dest_zone, const std::vector<rgw_sync_bucket_pipes>& pipes, CB filter_cb); template <typename CB> void try_add_dest(const rgw_zone_id& source_zone, const rgw_zone_id& dest_zone, const std::vector<rgw_sync_bucket_pipes>& pipes, CB filter_cb); std::pair<zb_pipe_map_t::const_iterator, zb_pipe_map_t::const_iterator> find_pipes(const zb_pipe_map_t& m, const rgw_zone_id& zone, std::optional<rgw_bucket> b) const; template <typename CB> void init(const DoutPrefixProvider *dpp, CephContext *cct, const rgw_zone_id& _zone, std::optional<rgw_bucket> _bucket, const rgw_sync_policy_group& group, rgw_sync_data_flow_group *_default_flow, std::set<rgw_zone_id> *_pall_zones, CB filter_cb); /* * find all relevant pipes in our zone that match {dest_bucket} <- {source_zone, source_bucket} */ std::vector<rgw_sync_bucket_pipe> find_source_pipes(const rgw_zone_id& source_zone, std::optional<rgw_bucket> source_bucket, std::optional<rgw_bucket> dest_bucket) const; /* * find all relevant pipes in other zones that pull from a specific * source bucket in out zone {source_bucket} -> {dest_zone, dest_bucket} */ std::vector<rgw_sync_bucket_pipe> find_dest_pipes(std::optional<rgw_bucket> source_bucket, const rgw_zone_id& dest_zone, std::optional<rgw_bucket> dest_bucket) const; /* * find all relevant pipes from {source_zone, source_bucket} -> {dest_zone, dest_bucket} */ std::vector<rgw_sync_bucket_pipe> find_pipes(const rgw_zone_id& source_zone, std::optional<rgw_bucket> source_bucket, const rgw_zone_id& dest_zone, std::optional<rgw_bucket> dest_bucket) const; }; class RGWSyncPolicyCompat { public: static void convert_old_sync_config(RGWSI_Zone *zone_svc, RGWSI_SyncModules *sync_modules_svc, rgw_sync_policy_info *ppolicy); }; class RGWBucketSyncFlowManager { friend class RGWBucketSyncPolicyHandler; public: struct endpoints_pair { rgw_sync_bucket_entity source; rgw_sync_bucket_entity dest; endpoints_pair() {} endpoints_pair(const rgw_sync_bucket_pipe& pipe) { source = pipe.source; dest = pipe.dest; } bool operator<(const endpoints_pair& e) const { if (source < e.source) { return true; } if (e.source < source) { return false; } return (dest < e.dest); } }; /* * pipe_rules: deal with a set of pipes that have common endpoints_pair */ class pipe_rules { std::list<rgw_sync_bucket_pipe> pipes; public: using prefix_map_t = std::multimap<std::string, rgw_sync_bucket_pipe *>; std::map<std::string, rgw_sync_bucket_pipe *> tag_refs; prefix_map_t prefix_refs; void insert(const rgw_sync_bucket_pipe& pipe); bool find_basic_info_without_tags(const rgw_obj_key& key, std::optional<rgw_user> *user, std::optional<rgw_user> *acl_translation, std::optional<std::string> *storage_class, rgw_sync_pipe_params::Mode *mode, bool *need_more_info) const; bool find_obj_params(const rgw_obj_key& key, const RGWObjTags::tag_map_t& tags, rgw_sync_pipe_params *params) const; void scan_prefixes(std::vector<std::string> *prefixes) const; prefix_map_t::const_iterator prefix_begin() const { return prefix_refs.begin(); } prefix_map_t::const_iterator prefix_search(const std::string& s) const; prefix_map_t::const_iterator prefix_end() const { return prefix_refs.end(); } }; using pipe_rules_ref = std::shared_ptr<pipe_rules>; /* * pipe_handler: extends endpoints_rule to point at the corresponding rules handler */ struct pipe_handler : public endpoints_pair { pipe_rules_ref rules; pipe_handler() {} pipe_handler(pipe_rules_ref& _rules, const rgw_sync_bucket_pipe& _pipe) : endpoints_pair(_pipe), rules(_rules) {} bool specific() const { return source.specific() && dest.specific(); } bool find_basic_info_without_tags(const rgw_obj_key& key, std::optional<rgw_user> *user, std::optional<rgw_user> *acl_translation, std::optional<std::string> *storage_class, rgw_sync_pipe_params::Mode *mode, bool *need_more_info) const { if (!rules) { return false; } return rules->find_basic_info_without_tags(key, user, acl_translation, storage_class, mode, need_more_info); } bool find_obj_params(const rgw_obj_key& key, const RGWObjTags::tag_map_t& tags, rgw_sync_pipe_params *params) const { if (!rules) { return false; } return rules->find_obj_params(key, tags, params); } }; struct pipe_set { std::map<endpoints_pair, pipe_rules_ref> rules; std::multimap<std::string, rgw_sync_bucket_pipe> pipe_map; std::multimap<std::string, rgw_sync_bucket_pipe> disabled_pipe_map; std::set<pipe_handler> handlers; using iterator = std::set<pipe_handler>::iterator; void clear() { rules.clear(); pipe_map.clear(); disabled_pipe_map.clear(); handlers.clear(); } void insert(const rgw_sync_bucket_pipe& pipe); void remove_all(); void disable(const rgw_sync_bucket_pipe& pipe); iterator begin() const { return handlers.begin(); } iterator end() const { return handlers.end(); } void dump(ceph::Formatter *f) const; }; private: CephContext *cct; rgw_zone_id zone_id; std::optional<rgw_bucket> bucket; const RGWBucketSyncFlowManager *parent{nullptr}; std::map<std::string, rgw_sync_group_pipe_map> flow_groups; std::set<rgw_zone_id> all_zones; bool allowed_data_flow(const rgw_zone_id& source_zone, std::optional<rgw_bucket> source_bucket, const rgw_zone_id& dest_zone, std::optional<rgw_bucket> dest_bucket, bool check_activated) const; /* * find all the matching flows om a flow map for a specific bucket */ void update_flow_maps(const rgw_sync_bucket_pipes& pipe); void init(const DoutPrefixProvider *dpp, const rgw_sync_policy_info& sync_policy); public: RGWBucketSyncFlowManager(CephContext *_cct, const rgw_zone_id& _zone_id, std::optional<rgw_bucket> _bucket, const RGWBucketSyncFlowManager *_parent); void reflect(const DoutPrefixProvider *dpp, std::optional<rgw_bucket> effective_bucket, pipe_set *flow_by_source, pipe_set *flow_by_dest, bool only_enabled) const; }; static inline std::ostream& operator<<(std::ostream& os, const RGWBucketSyncFlowManager::endpoints_pair& e) { os << e.dest << " -> " << e.source; return os; } class RGWBucketSyncPolicyHandler { bool legacy_config{false}; const RGWBucketSyncPolicyHandler *parent{nullptr}; RGWSI_Zone *zone_svc; RGWSI_Bucket_Sync *bucket_sync_svc; rgw_zone_id zone_id; std::optional<RGWBucketInfo> bucket_info; std::optional<std::map<std::string, bufferlist> > bucket_attrs; std::optional<rgw_bucket> bucket; std::unique_ptr<RGWBucketSyncFlowManager> flow_mgr; rgw_sync_policy_info sync_policy; RGWBucketSyncFlowManager::pipe_set source_pipes; RGWBucketSyncFlowManager::pipe_set target_pipes; std::map<rgw_zone_id, RGWBucketSyncFlowManager::pipe_set> sources; /* source pipes by source zone id */ std::map<rgw_zone_id, RGWBucketSyncFlowManager::pipe_set> targets; /* target pipes by target zone id */ std::set<rgw_zone_id> source_zones; std::set<rgw_zone_id> target_zones; std::set<rgw_bucket> source_hints; std::set<rgw_bucket> target_hints; std::set<rgw_sync_bucket_pipe> resolved_sources; std::set<rgw_sync_bucket_pipe> resolved_dests; bool bucket_is_sync_source() const { return !targets.empty() || !resolved_dests.empty(); } bool bucket_is_sync_target() const { return !sources.empty() || !resolved_sources.empty(); } RGWBucketSyncPolicyHandler(const RGWBucketSyncPolicyHandler *_parent, const RGWBucketInfo& _bucket_info, std::map<std::string, bufferlist>&& _bucket_attrs); RGWBucketSyncPolicyHandler(const RGWBucketSyncPolicyHandler *_parent, const rgw_bucket& _bucket, std::optional<rgw_sync_policy_info> _sync_policy); public: RGWBucketSyncPolicyHandler(RGWSI_Zone *_zone_svc, RGWSI_SyncModules *sync_modules_svc, RGWSI_Bucket_Sync *bucket_sync_svc, std::optional<rgw_zone_id> effective_zone = std::nullopt); RGWBucketSyncPolicyHandler *alloc_child(const RGWBucketInfo& bucket_info, std::map<std::string, bufferlist>&& bucket_attrs) const; RGWBucketSyncPolicyHandler *alloc_child(const rgw_bucket& bucket, std::optional<rgw_sync_policy_info> sync_policy) const; int init(const DoutPrefixProvider *dpp, optional_yield y); void reflect(const DoutPrefixProvider *dpp, RGWBucketSyncFlowManager::pipe_set *psource_pipes, RGWBucketSyncFlowManager::pipe_set *ptarget_pipes, std::map<rgw_zone_id, RGWBucketSyncFlowManager::pipe_set> *psources, std::map<rgw_zone_id, RGWBucketSyncFlowManager::pipe_set> *ptargets, std::set<rgw_zone_id> *psource_zones, std::set<rgw_zone_id> *ptarget_zones, bool only_enabled) const; void set_resolved_hints(std::set<rgw_sync_bucket_pipe>&& _resolved_sources, std::set<rgw_sync_bucket_pipe>&& _resolved_dests) { resolved_sources = std::move(_resolved_sources); resolved_dests = std::move(_resolved_dests); } const std::set<rgw_sync_bucket_pipe>& get_resolved_source_hints() { return resolved_sources; } const std::set<rgw_sync_bucket_pipe>& get_resolved_dest_hints() { return resolved_dests; } const std::set<rgw_zone_id>& get_source_zones() const { return source_zones; } const std::set<rgw_zone_id>& get_target_zones() const { return target_zones; } const std::map<rgw_zone_id, RGWBucketSyncFlowManager::pipe_set>& get_sources() { return sources; } std::multimap<rgw_zone_id, rgw_sync_bucket_pipe> get_all_sources() const; std::multimap<rgw_zone_id, rgw_sync_bucket_pipe> get_all_dests() const; std::multimap<rgw_zone_id, rgw_sync_bucket_pipe> get_all_dests_in_zone(const rgw_zone_id& zone_id) const; const std::map<rgw_zone_id, RGWBucketSyncFlowManager::pipe_set>& get_targets() { return targets; } const std::optional<RGWBucketInfo>& get_bucket_info() const { return bucket_info; } const std::optional<std::map<std::string, bufferlist> >& get_bucket_attrs() const { return bucket_attrs; } void get_pipes(RGWBucketSyncFlowManager::pipe_set **_sources, RGWBucketSyncFlowManager::pipe_set **_targets) { /* return raw pipes (with zone name) */ *_sources = &source_pipes; *_targets = &target_pipes; } void get_pipes(std::set<rgw_sync_bucket_pipe> *sources, std::set<rgw_sync_bucket_pipe> *targets, std::optional<rgw_sync_bucket_entity> filter_peer); const std::set<rgw_bucket>& get_source_hints() const { return source_hints; } const std::set<rgw_bucket>& get_target_hints() const { return target_hints; } bool bucket_exports_data() const; bool bucket_imports_data() const; const rgw_sync_policy_info& get_sync_policy() const { return sync_policy; } bool is_legacy_config() const { return legacy_config; } };
14,707
34.270983
152
h
null
ceph-main/src/rgw/driver/rados/rgw_cr_rados.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab ft=cpp #include "include/compat.h" #include "rgw_sal.h" #include "rgw_zone.h" #include "rgw_coroutine.h" #include "rgw_cr_rados.h" #include "rgw_sync_counters.h" #include "rgw_bucket.h" #include "rgw_datalog_notify.h" #include "rgw_cr_rest.h" #include "rgw_rest_conn.h" #include "rgw_rados.h" #include "services/svc_zone.h" #include "services/svc_zone_utils.h" #include "services/svc_sys_obj.h" #include "services/svc_cls.h" #include "cls/lock/cls_lock_client.h" #include "cls/rgw/cls_rgw_client.h" #include <boost/asio/yield.hpp> #include <boost/container/flat_set.hpp> #define dout_context g_ceph_context #define dout_subsys ceph_subsys_rgw using namespace std; bool RGWAsyncRadosProcessor::RGWWQ::_enqueue(RGWAsyncRadosRequest *req) { if (processor->is_going_down()) { return false; } req->get(); processor->m_req_queue.push_back(req); dout(20) << "enqueued request req=" << hex << req << dec << dendl; _dump_queue(); return true; } bool RGWAsyncRadosProcessor::RGWWQ::_empty() { return processor->m_req_queue.empty(); } RGWAsyncRadosRequest *RGWAsyncRadosProcessor::RGWWQ::_dequeue() { if (processor->m_req_queue.empty()) return NULL; RGWAsyncRadosRequest *req = processor->m_req_queue.front(); processor->m_req_queue.pop_front(); dout(20) << "dequeued request req=" << hex << req << dec << dendl; _dump_queue(); return req; } void RGWAsyncRadosProcessor::RGWWQ::_process(RGWAsyncRadosRequest *req, ThreadPool::TPHandle& handle) { processor->handle_request(this, req); processor->req_throttle.put(1); } void RGWAsyncRadosProcessor::RGWWQ::_dump_queue() { if (!g_conf()->subsys.should_gather<ceph_subsys_rgw, 20>()) { return; } deque<RGWAsyncRadosRequest *>::iterator iter; if (processor->m_req_queue.empty()) { dout(20) << "RGWWQ: empty" << dendl; return; } dout(20) << "RGWWQ:" << dendl; for (iter = processor->m_req_queue.begin(); iter != processor->m_req_queue.end(); ++iter) { dout(20) << "req: " << hex << *iter << dec << dendl; } } RGWAsyncRadosProcessor::RGWAsyncRadosProcessor(CephContext *_cct, int num_threads) : cct(_cct), m_tp(cct, "RGWAsyncRadosProcessor::m_tp", "rados_async", num_threads), req_throttle(_cct, "rgw_async_rados_ops", num_threads * 2), req_wq(this, ceph::make_timespan(g_conf()->rgw_op_thread_timeout), ceph::make_timespan(g_conf()->rgw_op_thread_suicide_timeout), &m_tp) { } void RGWAsyncRadosProcessor::start() { m_tp.start(); } void RGWAsyncRadosProcessor::stop() { going_down = true; m_tp.drain(&req_wq); m_tp.stop(); for (auto iter = m_req_queue.begin(); iter != m_req_queue.end(); ++iter) { (*iter)->put(); } } void RGWAsyncRadosProcessor::handle_request(const DoutPrefixProvider *dpp, RGWAsyncRadosRequest *req) { req->send_request(dpp); req->put(); } void RGWAsyncRadosProcessor::queue(RGWAsyncRadosRequest *req) { req_throttle.get(1); req_wq.queue(req); } int RGWAsyncGetSystemObj::_send_request(const DoutPrefixProvider *dpp) { map<string, bufferlist> *pattrs = want_attrs ? &attrs : nullptr; auto sysobj = svc_sysobj->get_obj(obj); return sysobj.rop() .set_objv_tracker(&objv_tracker) .set_attrs(pattrs) .set_raw_attrs(raw_attrs) .read(dpp, &bl, null_yield); } RGWAsyncGetSystemObj::RGWAsyncGetSystemObj(const DoutPrefixProvider *_dpp, RGWCoroutine *caller, RGWAioCompletionNotifier *cn, RGWSI_SysObj *_svc, RGWObjVersionTracker *_objv_tracker, const rgw_raw_obj& _obj, bool want_attrs, bool raw_attrs) : RGWAsyncRadosRequest(caller, cn), dpp(_dpp), svc_sysobj(_svc), obj(_obj), want_attrs(want_attrs), raw_attrs(raw_attrs) { if (_objv_tracker) { objv_tracker = *_objv_tracker; } } int RGWSimpleRadosReadAttrsCR::send_request(const DoutPrefixProvider *dpp) { int r = store->getRados()->get_raw_obj_ref(dpp, obj, &ref); if (r < 0) { ldpp_dout(dpp, -1) << "ERROR: failed to get ref for (" << obj << ") ret=" << r << dendl; return r; } set_status() << "sending request"; librados::ObjectReadOperation op; if (objv_tracker) { objv_tracker->prepare_op_for_read(&op); } if (raw_attrs && pattrs) { op.getxattrs(pattrs, nullptr); } else { op.getxattrs(&unfiltered_attrs, nullptr); } cn = stack->create_completion_notifier(); return ref.pool.ioctx().aio_operate(ref.obj.oid, cn->completion(), &op, nullptr); } int RGWSimpleRadosReadAttrsCR::request_complete() { int ret = cn->completion()->get_return_value(); set_status() << "request complete; ret=" << ret; if (!raw_attrs && pattrs) { rgw_filter_attrset(unfiltered_attrs, RGW_ATTR_PREFIX, pattrs); } return ret; } int RGWAsyncPutSystemObj::_send_request(const DoutPrefixProvider *dpp) { auto sysobj = svc->get_obj(obj); return sysobj.wop() .set_objv_tracker(&objv_tracker) .set_exclusive(exclusive) .write_data(dpp, bl, null_yield); } RGWAsyncPutSystemObj::RGWAsyncPutSystemObj(const DoutPrefixProvider *_dpp, RGWCoroutine *caller, RGWAioCompletionNotifier *cn, RGWSI_SysObj *_svc, RGWObjVersionTracker *_objv_tracker, const rgw_raw_obj& _obj, bool _exclusive, bufferlist _bl) : RGWAsyncRadosRequest(caller, cn), dpp(_dpp), svc(_svc), obj(_obj), exclusive(_exclusive), bl(std::move(_bl)) { if (_objv_tracker) { objv_tracker = *_objv_tracker; } } int RGWAsyncPutSystemObjAttrs::_send_request(const DoutPrefixProvider *dpp) { auto sysobj = svc->get_obj(obj); return sysobj.wop() .set_objv_tracker(&objv_tracker) .set_exclusive(exclusive) .set_attrs(attrs) .write_attrs(dpp, null_yield); } RGWAsyncPutSystemObjAttrs::RGWAsyncPutSystemObjAttrs(const DoutPrefixProvider *_dpp, RGWCoroutine *caller, RGWAioCompletionNotifier *cn, RGWSI_SysObj *_svc, RGWObjVersionTracker *_objv_tracker, const rgw_raw_obj& _obj, map<string, bufferlist> _attrs, bool exclusive) : RGWAsyncRadosRequest(caller, cn), dpp(_dpp), svc(_svc), obj(_obj), attrs(std::move(_attrs)), exclusive(exclusive) { if (_objv_tracker) { objv_tracker = *_objv_tracker; } } RGWOmapAppend::RGWOmapAppend(RGWAsyncRadosProcessor *_async_rados, rgw::sal::RadosStore* _store, const rgw_raw_obj& _obj, uint64_t _window_size) : RGWConsumerCR<string>(_store->ctx()), async_rados(_async_rados), store(_store), obj(_obj), going_down(false), num_pending_entries(0), window_size(_window_size), total_entries(0) { } int RGWAsyncLockSystemObj::_send_request(const DoutPrefixProvider *dpp) { rgw_rados_ref ref; int r = store->getRados()->get_raw_obj_ref(dpp, obj, &ref); if (r < 0) { ldpp_dout(dpp, -1) << "ERROR: failed to get ref for (" << obj << ") ret=" << r << dendl; return r; } rados::cls::lock::Lock l(lock_name); utime_t duration(duration_secs, 0); l.set_duration(duration); l.set_cookie(cookie); l.set_may_renew(true); return l.lock_exclusive(&ref.pool.ioctx(), ref.obj.oid); } RGWAsyncLockSystemObj::RGWAsyncLockSystemObj(RGWCoroutine *caller, RGWAioCompletionNotifier *cn, rgw::sal::RadosStore* _store, RGWObjVersionTracker *_objv_tracker, const rgw_raw_obj& _obj, const string& _name, const string& _cookie, uint32_t _duration_secs) : RGWAsyncRadosRequest(caller, cn), store(_store), obj(_obj), lock_name(_name), cookie(_cookie), duration_secs(_duration_secs) { } int RGWAsyncUnlockSystemObj::_send_request(const DoutPrefixProvider *dpp) { rgw_rados_ref ref; int r = store->getRados()->get_raw_obj_ref(dpp, obj, &ref); if (r < 0) { ldpp_dout(dpp, -1) << "ERROR: failed to get ref for (" << obj << ") ret=" << r << dendl; return r; } rados::cls::lock::Lock l(lock_name); l.set_cookie(cookie); return l.unlock(&ref.pool.ioctx(), ref.obj.oid); } RGWAsyncUnlockSystemObj::RGWAsyncUnlockSystemObj(RGWCoroutine *caller, RGWAioCompletionNotifier *cn, rgw::sal::RadosStore* _store, RGWObjVersionTracker *_objv_tracker, const rgw_raw_obj& _obj, const string& _name, const string& _cookie) : RGWAsyncRadosRequest(caller, cn), store(_store), obj(_obj), lock_name(_name), cookie(_cookie) { } RGWRadosSetOmapKeysCR::RGWRadosSetOmapKeysCR(rgw::sal::RadosStore* _store, const rgw_raw_obj& _obj, map<string, bufferlist>& _entries) : RGWSimpleCoroutine(_store->ctx()), store(_store), entries(_entries), obj(_obj), cn(NULL) { stringstream& s = set_description(); s << "set omap keys dest=" << obj << " keys=[" << s.str() << "]"; for (auto i = entries.begin(); i != entries.end(); ++i) { if (i != entries.begin()) { s << ", "; } s << i->first; } s << "]"; } int RGWRadosSetOmapKeysCR::send_request(const DoutPrefixProvider *dpp) { int r = store->getRados()->get_raw_obj_ref(dpp, obj, &ref); if (r < 0) { ldpp_dout(dpp, -1) << "ERROR: failed to get ref for (" << obj << ") ret=" << r << dendl; return r; } set_status() << "sending request"; librados::ObjectWriteOperation op; op.omap_set(entries); cn = stack->create_completion_notifier(); return ref.pool.ioctx().aio_operate(ref.obj.oid, cn->completion(), &op); } int RGWRadosSetOmapKeysCR::request_complete() { int r = cn->completion()->get_return_value(); set_status() << "request complete; ret=" << r; return r; } RGWRadosGetOmapKeysCR::RGWRadosGetOmapKeysCR(rgw::sal::RadosStore* _store, const rgw_raw_obj& _obj, const string& _marker, int _max_entries, ResultPtr _result) : RGWSimpleCoroutine(_store->ctx()), store(_store), obj(_obj), marker(_marker), max_entries(_max_entries), result(std::move(_result)) { ceph_assert(result); // must be allocated set_description() << "get omap keys dest=" << obj << " marker=" << marker; } int RGWRadosGetOmapKeysCR::send_request(const DoutPrefixProvider *dpp) { int r = store->getRados()->get_raw_obj_ref(dpp, obj, &result->ref); if (r < 0) { ldpp_dout(dpp, -1) << "ERROR: failed to get ref for (" << obj << ") ret=" << r << dendl; return r; } set_status() << "send request"; librados::ObjectReadOperation op; op.omap_get_keys2(marker, max_entries, &result->entries, &result->more, nullptr); cn = stack->create_completion_notifier(result); return result->ref.pool.ioctx().aio_operate(result->ref.obj.oid, cn->completion(), &op, NULL); } int RGWRadosGetOmapKeysCR::request_complete() { int r = cn->completion()->get_return_value(); set_status() << "request complete; ret=" << r; return r; } RGWRadosGetOmapValsCR::RGWRadosGetOmapValsCR(rgw::sal::RadosStore* _store, const rgw_raw_obj& _obj, const string& _marker, int _max_entries, ResultPtr _result) : RGWSimpleCoroutine(_store->ctx()), store(_store), obj(_obj), marker(_marker), max_entries(_max_entries), result(std::move(_result)) { ceph_assert(result); // must be allocated set_description() << "get omap keys dest=" << obj << " marker=" << marker; } int RGWRadosGetOmapValsCR::send_request(const DoutPrefixProvider *dpp) { int r = store->getRados()->get_raw_obj_ref(dpp, obj, &result->ref); if (r < 0) { ldpp_dout(dpp, -1) << "ERROR: failed to get ref for (" << obj << ") ret=" << r << dendl; return r; } set_status() << "send request"; librados::ObjectReadOperation op; op.omap_get_vals2(marker, max_entries, &result->entries, &result->more, nullptr); cn = stack->create_completion_notifier(result); return result->ref.pool.ioctx().aio_operate(result->ref.obj.oid, cn->completion(), &op, NULL); } int RGWRadosGetOmapValsCR::request_complete() { int r = cn->completion()->get_return_value(); set_status() << "request complete; ret=" << r; return r; } RGWRadosRemoveOmapKeysCR::RGWRadosRemoveOmapKeysCR(rgw::sal::RadosStore* _store, const rgw_raw_obj& _obj, const set<string>& _keys) : RGWSimpleCoroutine(_store->ctx()), store(_store), keys(_keys), obj(_obj), cn(NULL) { set_description() << "remove omap keys dest=" << obj << " keys=" << keys; } int RGWRadosRemoveOmapKeysCR::send_request(const DoutPrefixProvider *dpp) { int r = store->getRados()->get_raw_obj_ref(dpp, obj, &ref); if (r < 0) { ldpp_dout(dpp, -1) << "ERROR: failed to get ref for (" << obj << ") ret=" << r << dendl; return r; } set_status() << "send request"; librados::ObjectWriteOperation op; op.omap_rm_keys(keys); cn = stack->create_completion_notifier(); return ref.pool.ioctx().aio_operate(ref.obj.oid, cn->completion(), &op); } int RGWRadosRemoveOmapKeysCR::request_complete() { int r = cn->completion()->get_return_value(); set_status() << "request complete; ret=" << r; return r; } RGWRadosRemoveCR::RGWRadosRemoveCR(rgw::sal::RadosStore* store, const rgw_raw_obj& obj, RGWObjVersionTracker* objv_tracker) : RGWSimpleCoroutine(store->ctx()), store(store), obj(obj), objv_tracker(objv_tracker) { set_description() << "remove dest=" << obj; } int RGWRadosRemoveCR::send_request(const DoutPrefixProvider *dpp) { auto rados = store->getRados()->get_rados_handle(); int r = rados->ioctx_create(obj.pool.name.c_str(), ioctx); if (r < 0) { lderr(cct) << "ERROR: failed to open pool (" << obj.pool.name << ") ret=" << r << dendl; return r; } ioctx.locator_set_key(obj.loc); set_status() << "send request"; librados::ObjectWriteOperation op; if (objv_tracker) { objv_tracker->prepare_op_for_write(&op); } op.remove(); cn = stack->create_completion_notifier(); return ioctx.aio_operate(obj.oid, cn->completion(), &op); } int RGWRadosRemoveCR::request_complete() { int r = cn->completion()->get_return_value(); set_status() << "request complete; ret=" << r; return r; } RGWRadosRemoveOidCR::RGWRadosRemoveOidCR(rgw::sal::RadosStore* store, librados::IoCtx&& ioctx, std::string_view oid, RGWObjVersionTracker* objv_tracker) : RGWSimpleCoroutine(store->ctx()), ioctx(std::move(ioctx)), oid(std::string(oid)), objv_tracker(objv_tracker) { set_description() << "remove dest=" << oid; } RGWRadosRemoveOidCR::RGWRadosRemoveOidCR(rgw::sal::RadosStore* store, RGWSI_RADOS::Obj& obj, RGWObjVersionTracker* objv_tracker) : RGWSimpleCoroutine(store->ctx()), ioctx(librados::IoCtx(obj.get_ref().pool.ioctx())), oid(obj.get_ref().obj.oid), objv_tracker(objv_tracker) { set_description() << "remove dest=" << oid; } RGWRadosRemoveOidCR::RGWRadosRemoveOidCR(rgw::sal::RadosStore* store, RGWSI_RADOS::Obj&& obj, RGWObjVersionTracker* objv_tracker) : RGWSimpleCoroutine(store->ctx()), ioctx(std::move(obj.get_ref().pool.ioctx())), oid(std::move(obj.get_ref().obj.oid)), objv_tracker(objv_tracker) { set_description() << "remove dest=" << oid; } int RGWRadosRemoveOidCR::send_request(const DoutPrefixProvider *dpp) { librados::ObjectWriteOperation op; if (objv_tracker) { objv_tracker->prepare_op_for_write(&op); } op.remove(); cn = stack->create_completion_notifier(); return ioctx.aio_operate(oid, cn->completion(), &op); } int RGWRadosRemoveOidCR::request_complete() { int r = cn->completion()->get_return_value(); set_status() << "request complete; ret=" << r; return r; } RGWSimpleRadosLockCR::RGWSimpleRadosLockCR(RGWAsyncRadosProcessor *_async_rados, rgw::sal::RadosStore* _store, const rgw_raw_obj& _obj, const string& _lock_name, const string& _cookie, uint32_t _duration) : RGWSimpleCoroutine(_store->ctx()), async_rados(_async_rados), store(_store), lock_name(_lock_name), cookie(_cookie), duration(_duration), obj(_obj), req(nullptr) { set_description() << "rados lock dest=" << obj << " lock=" << lock_name << " cookie=" << cookie << " duration=" << duration; } void RGWSimpleRadosLockCR::request_cleanup() { if (req) { req->finish(); req = NULL; } } int RGWSimpleRadosLockCR::send_request(const DoutPrefixProvider *dpp) { set_status() << "sending request"; req = new RGWAsyncLockSystemObj(this, stack->create_completion_notifier(), store, NULL, obj, lock_name, cookie, duration); async_rados->queue(req); return 0; } int RGWSimpleRadosLockCR::request_complete() { set_status() << "request complete; ret=" << req->get_ret_status(); return req->get_ret_status(); } RGWSimpleRadosUnlockCR::RGWSimpleRadosUnlockCR(RGWAsyncRadosProcessor *_async_rados, rgw::sal::RadosStore* _store, const rgw_raw_obj& _obj, const string& _lock_name, const string& _cookie) : RGWSimpleCoroutine(_store->ctx()), async_rados(_async_rados), store(_store), lock_name(_lock_name), cookie(_cookie), obj(_obj), req(NULL) { set_description() << "rados unlock dest=" << obj << " lock=" << lock_name << " cookie=" << cookie; } void RGWSimpleRadosUnlockCR::request_cleanup() { if (req) { req->finish(); req = NULL; } } int RGWSimpleRadosUnlockCR::send_request(const DoutPrefixProvider *dpp) { set_status() << "sending request"; req = new RGWAsyncUnlockSystemObj(this, stack->create_completion_notifier(), store, NULL, obj, lock_name, cookie); async_rados->queue(req); return 0; } int RGWSimpleRadosUnlockCR::request_complete() { set_status() << "request complete; ret=" << req->get_ret_status(); return req->get_ret_status(); } int RGWOmapAppend::operate(const DoutPrefixProvider *dpp) { reenter(this) { for (;;) { if (!has_product() && going_down) { set_status() << "going down"; break; } set_status() << "waiting for product"; yield wait_for_product(); yield { string entry; while (consume(&entry)) { set_status() << "adding entry: " << entry; entries[entry] = bufferlist(); if (entries.size() >= window_size) { break; } } if (entries.size() >= window_size || going_down) { set_status() << "flushing to omap"; call(new RGWRadosSetOmapKeysCR(store, obj, entries)); entries.clear(); } } if (get_ret_status() < 0) { ldout(cct, 0) << "ERROR: failed to store entries in omap" << dendl; return set_state(RGWCoroutine_Error); } } /* done with coroutine */ return set_state(RGWCoroutine_Done); } return 0; } void RGWOmapAppend::flush_pending() { receive(pending_entries); num_pending_entries = 0; } bool RGWOmapAppend::append(const string& s) { if (is_done()) { return false; } ++total_entries; pending_entries.push_back(s); if (++num_pending_entries >= (int)window_size) { flush_pending(); } return true; } bool RGWOmapAppend::finish() { going_down = true; flush_pending(); set_sleeping(false); return (!is_done()); } int RGWAsyncGetBucketInstanceInfo::_send_request(const DoutPrefixProvider *dpp) { int r; if (!bucket.bucket_id.empty()) { r = store->getRados()->get_bucket_instance_info(bucket, bucket_info, nullptr, &attrs, null_yield, dpp); } else { r = store->ctl()->bucket->read_bucket_info(bucket, &bucket_info, null_yield, dpp, RGWBucketCtl::BucketInstance::GetParams().set_attrs(&attrs)); } if (r < 0) { ldpp_dout(dpp, 0) << "ERROR: failed to get bucket instance info for " << bucket << dendl; return r; } return 0; } int RGWAsyncPutBucketInstanceInfo::_send_request(const DoutPrefixProvider *dpp) { auto r = store->getRados()->put_bucket_instance_info(bucket_info, exclusive, mtime, attrs, dpp, null_yield); if (r < 0) { ldpp_dout(dpp, 0) << "ERROR: failed to put bucket instance info for " << bucket_info.bucket << dendl; return r; } return 0; } RGWRadosBILogTrimCR::RGWRadosBILogTrimCR( const DoutPrefixProvider *dpp, rgw::sal::RadosStore* store, const RGWBucketInfo& bucket_info, int shard_id, const rgw::bucket_index_layout_generation& generation, const std::string& start_marker, const std::string& end_marker) : RGWSimpleCoroutine(store->ctx()), bucket_info(bucket_info), shard_id(shard_id), generation(generation), bs(store->getRados()), start_marker(BucketIndexShardsManager::get_shard_marker(start_marker)), end_marker(BucketIndexShardsManager::get_shard_marker(end_marker)) { } int RGWRadosBILogTrimCR::send_request(const DoutPrefixProvider *dpp) { int r = bs.init(dpp, bucket_info, generation, shard_id, null_yield); if (r < 0) { ldpp_dout(dpp, -1) << "ERROR: bucket shard init failed ret=" << r << dendl; return r; } bufferlist in; cls_rgw_bi_log_trim_op call; call.start_marker = std::move(start_marker); call.end_marker = std::move(end_marker); encode(call, in); librados::ObjectWriteOperation op; op.exec(RGW_CLASS, RGW_BI_LOG_TRIM, in); cn = stack->create_completion_notifier(); return bs.bucket_obj.aio_operate(cn->completion(), &op); } int RGWRadosBILogTrimCR::request_complete() { int r = cn->completion()->get_return_value(); set_status() << "request complete; ret=" << r; return r; } int RGWAsyncFetchRemoteObj::_send_request(const DoutPrefixProvider *dpp) { RGWObjectCtx obj_ctx(store); char buf[16]; snprintf(buf, sizeof(buf), ".%lld", (long long)store->getRados()->instance_id()); rgw::sal::Attrs attrs; rgw_obj src_obj(src_bucket, key); rgw::sal::RadosBucket dest_bucket(store, dest_bucket_info); rgw::sal::RadosObject dest_obj(store, dest_key.value_or(key), &dest_bucket); rgw_obj stat_dest_obj; if (!stat_follow_olh) { stat_dest_obj = dest_obj.get_obj(); } else { stat_dest_obj = src_obj; } std::string etag; std::optional<uint64_t> bytes_transferred; int r = store->getRados()->fetch_remote_obj(obj_ctx, user_id.value_or(rgw_user()), NULL, /* req_info */ source_zone, dest_obj.get_obj(), src_obj, dest_bucket_info, /* dest */ nullptr, /* source */ dest_placement_rule, nullptr, /* real_time* src_mtime, */ NULL, /* real_time* mtime, */ NULL, /* const real_time* mod_ptr, */ NULL, /* const real_time* unmod_ptr, */ false, /* high precision time */ NULL, /* const char *if_match, */ NULL, /* const char *if_nomatch, */ RGWRados::ATTRSMOD_NONE, copy_if_newer, attrs, RGWObjCategory::Main, versioned_epoch, real_time(), /* delete_at */ NULL, /* string *ptag, */ &etag, /* string *petag, */ NULL, /* void (*progress_cb)(off_t, void *), */ NULL, /* void *progress_data*); */ dpp, filter.get(), null_yield, stat_follow_olh, stat_dest_obj, source_trace_entry, &zones_trace, &bytes_transferred); if (r < 0) { ldpp_dout(dpp, 0) << "store->fetch_remote_obj() returned r=" << r << dendl; if (counters) { counters->inc(sync_counters::l_fetch_err, 1); } } else { // r >= 0 if (bytes_transferred) { // send notification that object was succesfully synced std::string user_id = "rgw sync"; std::string req_id = "0"; RGWObjTags obj_tags; auto iter = attrs.find(RGW_ATTR_TAGS); if (iter != attrs.end()) { try { auto it = iter->second.cbegin(); obj_tags.decode(it); } catch (buffer::error &err) { ldpp_dout(dpp, 1) << "ERROR: " << __func__ << ": caught buffer::error couldn't decode TagSet " << dendl; } } // NOTE: we create a mutable copy of bucket.get_tenant as the get_notification function expects a std::string&, not const std::string tenant(dest_bucket.get_tenant()); std::unique_ptr<rgw::sal::Notification> notify = store->get_notification(dpp, &dest_obj, nullptr, rgw::notify::ObjectSyncedCreate, &dest_bucket, user_id, tenant, req_id, null_yield); auto notify_res = static_cast<rgw::sal::RadosNotification*>(notify.get())->get_reservation(); int ret = rgw::notify::publish_reserve(dpp, rgw::notify::ObjectSyncedCreate, notify_res, &obj_tags); if (ret < 0) { ldpp_dout(dpp, 1) << "ERROR: reserving notification failed, with error: " << ret << dendl; // no need to return, the sync already happened } else { ret = rgw::notify::publish_commit(&dest_obj, *bytes_transferred, ceph::real_clock::now(), etag, dest_obj.get_instance(), rgw::notify::ObjectSyncedCreate, notify_res, dpp); if (ret < 0) { ldpp_dout(dpp, 1) << "ERROR: publishing notification failed, with error: " << ret << dendl; } } } if (counters) { if (bytes_transferred) { counters->inc(sync_counters::l_fetch, *bytes_transferred); } else { counters->inc(sync_counters::l_fetch_not_modified); } } } return r; } int RGWAsyncStatRemoteObj::_send_request(const DoutPrefixProvider *dpp) { RGWObjectCtx obj_ctx(store); string user_id; char buf[16]; snprintf(buf, sizeof(buf), ".%lld", (long long)store->getRados()->instance_id()); rgw_obj src_obj(src_bucket, key); int r = store->getRados()->stat_remote_obj(dpp, obj_ctx, rgw_user(user_id), nullptr, /* req_info */ source_zone, src_obj, nullptr, /* source */ pmtime, /* real_time* src_mtime, */ psize, /* uint64_t * */ nullptr, /* const real_time* mod_ptr, */ nullptr, /* const real_time* unmod_ptr, */ true, /* high precision time */ nullptr, /* const char *if_match, */ nullptr, /* const char *if_nomatch, */ pattrs, pheaders, nullptr, nullptr, /* string *ptag, */ petag, null_yield); /* string *petag, */ if (r < 0) { ldpp_dout(dpp, 0) << "store->stat_remote_obj() returned r=" << r << dendl; } return r; } int RGWAsyncRemoveObj::_send_request(const DoutPrefixProvider *dpp) { ldpp_dout(dpp, 0) << __func__ << "(): deleting obj=" << obj << dendl; obj->set_atomic(); RGWObjState *state; int ret = obj->get_obj_state(dpp, &state, null_yield); if (ret < 0) { ldpp_dout(dpp, 20) << __func__ << "(): get_obj_state() obj=" << obj << " returned ret=" << ret << dendl; return ret; } /* has there been any racing object write? */ if (del_if_older && (state->mtime > timestamp)) { ldpp_dout(dpp, 20) << __func__ << "(): skipping object removal obj=" << obj << " (obj mtime=" << state->mtime << ", request timestamp=" << timestamp << ")" << dendl; return 0; } RGWAccessControlPolicy policy; /* decode policy */ map<string, bufferlist>::iterator iter = state->attrset.find(RGW_ATTR_ACL); if (iter != state->attrset.end()) { auto bliter = iter->second.cbegin(); try { policy.decode(bliter); } catch (buffer::error& err) { ldpp_dout(dpp, 0) << "ERROR: could not decode policy, caught buffer::error" << dendl; return -EIO; } } std::unique_ptr<rgw::sal::Object::DeleteOp> del_op = obj->get_delete_op(); del_op->params.bucket_owner = bucket->get_info().owner; del_op->params.obj_owner = policy.get_owner(); if (del_if_older) { del_op->params.unmod_since = timestamp; } if (versioned) { del_op->params.versioning_status = BUCKET_VERSIONED; } del_op->params.olh_epoch = versioned_epoch; del_op->params.marker_version_id = marker_version_id; del_op->params.obj_owner.set_id(rgw_user(owner)); del_op->params.obj_owner.set_name(owner_display_name); del_op->params.mtime = timestamp; del_op->params.high_precision_time = true; del_op->params.zones_trace = &zones_trace; ret = del_op->delete_obj(dpp, null_yield); if (ret < 0) { ldpp_dout(dpp, 20) << __func__ << "(): delete_obj() obj=" << obj << " returned ret=" << ret << dendl; } return ret; } int RGWContinuousLeaseCR::operate(const DoutPrefixProvider *dpp) { if (aborted) { caller->set_sleeping(false); return set_cr_done(); } reenter(this) { last_renew_try_time = ceph::coarse_mono_clock::now(); while (!going_down) { current_time = ceph::coarse_mono_clock::now(); yield call(new RGWSimpleRadosLockCR(async_rados, store, obj, lock_name, cookie, interval)); if (latency) { latency->add_latency(ceph::coarse_mono_clock::now() - current_time); } current_time = ceph::coarse_mono_clock::now(); if (current_time - last_renew_try_time > interval_tolerance) { // renewal should happen between 50%-90% of interval ldout(store->ctx(), 1) << *this << ": WARNING: did not renew lock " << obj << ":" << lock_name << ": within 90\% of interval. " << (current_time - last_renew_try_time) << " > " << interval_tolerance << dendl; } last_renew_try_time = current_time; caller->set_sleeping(false); /* will only be relevant when we return, that's why we can do it early */ if (retcode < 0) { set_locked(false); ldout(store->ctx(), 20) << *this << ": couldn't lock " << obj << ":" << lock_name << ": retcode=" << retcode << dendl; return set_state(RGWCoroutine_Error, retcode); } ldout(store->ctx(), 20) << *this << ": successfully locked " << obj << ":" << lock_name << dendl; set_locked(true); yield wait(utime_t(interval / 2, 0)); } set_locked(false); /* moot at this point anyway */ current_time = ceph::coarse_mono_clock::now(); yield call(new RGWSimpleRadosUnlockCR(async_rados, store, obj, lock_name, cookie)); if (latency) { latency->add_latency(ceph::coarse_mono_clock::now() - current_time); } return set_state(RGWCoroutine_Done); } return 0; } RGWRadosTimelogAddCR::RGWRadosTimelogAddCR(const DoutPrefixProvider *_dpp, rgw::sal::RadosStore* _store, const string& _oid, const cls_log_entry& entry) : RGWSimpleCoroutine(_store->ctx()), dpp(_dpp), store(_store), oid(_oid), cn(NULL) { stringstream& s = set_description(); s << "timelog add entry oid=" << oid << "entry={id=" << entry.id << ", section=" << entry.section << ", name=" << entry.name << "}"; entries.push_back(entry); } int RGWRadosTimelogAddCR::send_request(const DoutPrefixProvider *dpp) { set_status() << "sending request"; cn = stack->create_completion_notifier(); return store->svc()->cls->timelog.add(dpp, oid, entries, cn->completion(), true, null_yield); } int RGWRadosTimelogAddCR::request_complete() { int r = cn->completion()->get_return_value(); set_status() << "request complete; ret=" << r; return r; } RGWRadosTimelogTrimCR::RGWRadosTimelogTrimCR(const DoutPrefixProvider *dpp, rgw::sal::RadosStore* store, const std::string& oid, const real_time& start_time, const real_time& end_time, const std::string& from_marker, const std::string& to_marker) : RGWSimpleCoroutine(store->ctx()), dpp(dpp), store(store), oid(oid), start_time(start_time), end_time(end_time), from_marker(from_marker), to_marker(to_marker) { set_description() << "timelog trim oid=" << oid << " start_time=" << start_time << " end_time=" << end_time << " from_marker=" << from_marker << " to_marker=" << to_marker; } int RGWRadosTimelogTrimCR::send_request(const DoutPrefixProvider *dpp) { set_status() << "sending request"; cn = stack->create_completion_notifier(); return store->svc()->cls->timelog.trim(dpp, oid, start_time, end_time, from_marker, to_marker, cn->completion(), null_yield); } int RGWRadosTimelogTrimCR::request_complete() { int r = cn->completion()->get_return_value(); set_status() << "request complete; ret=" << r; return r; } RGWSyncLogTrimCR::RGWSyncLogTrimCR(const DoutPrefixProvider *dpp, rgw::sal::RadosStore* store, const std::string& oid, const std::string& to_marker, std::string *last_trim_marker) : RGWRadosTimelogTrimCR(dpp, store, oid, real_time{}, real_time{}, std::string{}, to_marker), cct(store->ctx()), last_trim_marker(last_trim_marker) { } int RGWSyncLogTrimCR::request_complete() { int r = RGWRadosTimelogTrimCR::request_complete(); if (r != -ENODATA) { return r; } // nothing left to trim, update last_trim_marker if (*last_trim_marker < to_marker && to_marker != max_marker) { *last_trim_marker = to_marker; } return 0; } int RGWAsyncStatObj::_send_request(const DoutPrefixProvider *dpp) { rgw_raw_obj raw_obj; store->getRados()->obj_to_raw(bucket_info.placement_rule, obj, &raw_obj); return store->getRados()->raw_obj_stat(dpp, raw_obj, psize, pmtime, pepoch, nullptr, nullptr, objv_tracker, null_yield); } RGWStatObjCR::RGWStatObjCR(const DoutPrefixProvider *dpp, RGWAsyncRadosProcessor *async_rados, rgw::sal::RadosStore* store, const RGWBucketInfo& _bucket_info, const rgw_obj& obj, uint64_t *psize, real_time* pmtime, uint64_t *pepoch, RGWObjVersionTracker *objv_tracker) : RGWSimpleCoroutine(store->ctx()), dpp(dpp), store(store), async_rados(async_rados), bucket_info(_bucket_info), obj(obj), psize(psize), pmtime(pmtime), pepoch(pepoch), objv_tracker(objv_tracker) { } void RGWStatObjCR::request_cleanup() { if (req) { req->finish(); req = NULL; } } int RGWStatObjCR::send_request(const DoutPrefixProvider *dpp) { req = new RGWAsyncStatObj(dpp, this, stack->create_completion_notifier(), store, bucket_info, obj, psize, pmtime, pepoch, objv_tracker); async_rados->queue(req); return 0; } int RGWStatObjCR::request_complete() { return req->get_ret_status(); } RGWRadosNotifyCR::RGWRadosNotifyCR(rgw::sal::RadosStore* store, const rgw_raw_obj& obj, bufferlist& request, uint64_t timeout_ms, bufferlist *response) : RGWSimpleCoroutine(store->ctx()), store(store), obj(obj), request(request), timeout_ms(timeout_ms), response(response) { set_description() << "notify dest=" << obj; } int RGWRadosNotifyCR::send_request(const DoutPrefixProvider *dpp) { int r = store->getRados()->get_raw_obj_ref(dpp, obj, &ref); if (r < 0) { ldpp_dout(dpp, -1) << "ERROR: failed to get ref for (" << obj << ") ret=" << r << dendl; return r; } set_status() << "sending request"; cn = stack->create_completion_notifier(); return ref.pool.ioctx().aio_notify(ref.obj.oid, cn->completion(), request, timeout_ms, response); } int RGWRadosNotifyCR::request_complete() { int r = cn->completion()->get_return_value(); set_status() << "request complete; ret=" << r; return r; } int RGWDataPostNotifyCR::operate(const DoutPrefixProvider* dpp) { reenter(this) { using PostNotify2 = RGWPostRESTResourceCR<bc::flat_map<int, bc::flat_set<rgw_data_notify_entry>>, int>; yield { rgw_http_param_pair pairs[] = { { "type", "data" }, { "notify2", NULL }, { "source-zone", source_zone }, { NULL, NULL } }; call(new PostNotify2(store->ctx(), conn, &http_manager, "/admin/log", pairs, shards, nullptr)); } if (retcode == -ERR_METHOD_NOT_ALLOWED) { using PostNotify1 = RGWPostRESTResourceCR<rgw_data_notify_v1_encoder, int>; yield { rgw_http_param_pair pairs[] = { { "type", "data" }, { "notify", NULL }, { "source-zone", source_zone }, { NULL, NULL } }; auto encoder = rgw_data_notify_v1_encoder{shards}; call(new PostNotify1(store->ctx(), conn, &http_manager, "/admin/log", pairs, encoder, nullptr)); } } if (retcode < 0) { return set_cr_error(retcode); } return set_cr_done(); } return 0; }
38,770
31.968537
181
cc
null
ceph-main/src/rgw/driver/rados/rgw_cr_rados.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab ft=cpp #pragma once #include <boost/intrusive_ptr.hpp> #include "include/ceph_assert.h" #include "rgw_coroutine.h" #include "rgw_sal.h" #include "rgw_sal_rados.h" #include "common/WorkQueue.h" #include "common/Throttle.h" #include <atomic> #include "common/ceph_time.h" #include "services/svc_sys_obj.h" #include "services/svc_bucket.h" struct rgw_http_param_pair; class RGWRESTConn; class RGWAsyncRadosRequest : public RefCountedObject { RGWCoroutine *caller; RGWAioCompletionNotifier *notifier; int retcode; ceph::mutex lock = ceph::make_mutex("RGWAsyncRadosRequest::lock"); protected: virtual int _send_request(const DoutPrefixProvider *dpp) = 0; public: RGWAsyncRadosRequest(RGWCoroutine *_caller, RGWAioCompletionNotifier *_cn) : caller(_caller), notifier(_cn), retcode(0) { } ~RGWAsyncRadosRequest() override { if (notifier) { notifier->put(); } } void send_request(const DoutPrefixProvider *dpp) { get(); retcode = _send_request(dpp); { std::lock_guard l{lock}; if (notifier) { notifier->cb(); // drops its own ref notifier = nullptr; } } put(); } int get_ret_status() { return retcode; } void finish() { { std::lock_guard l{lock}; if (notifier) { // we won't call notifier->cb() to drop its ref, so drop it here notifier->put(); notifier = nullptr; } } put(); } }; class RGWAsyncRadosProcessor { std::deque<RGWAsyncRadosRequest *> m_req_queue; std::atomic<bool> going_down = { false }; protected: CephContext *cct; ThreadPool m_tp; Throttle req_throttle; struct RGWWQ : public DoutPrefixProvider, public ThreadPool::WorkQueue<RGWAsyncRadosRequest> { RGWAsyncRadosProcessor *processor; RGWWQ(RGWAsyncRadosProcessor *p, ceph::timespan timeout, ceph::timespan suicide_timeout, ThreadPool *tp) : ThreadPool::WorkQueue<RGWAsyncRadosRequest>("RGWWQ", timeout, suicide_timeout, tp), processor(p) {} bool _enqueue(RGWAsyncRadosRequest *req) override; void _dequeue(RGWAsyncRadosRequest *req) override { ceph_abort(); } bool _empty() override; RGWAsyncRadosRequest *_dequeue() override; using ThreadPool::WorkQueue<RGWAsyncRadosRequest>::_process; void _process(RGWAsyncRadosRequest *req, ThreadPool::TPHandle& handle) override; void _dump_queue(); void _clear() override { ceph_assert(processor->m_req_queue.empty()); } CephContext *get_cct() const { return processor->cct; } unsigned get_subsys() const { return ceph_subsys_rgw; } std::ostream& gen_prefix(std::ostream& out) const { return out << "rgw async rados processor: ";} } req_wq; public: RGWAsyncRadosProcessor(CephContext *_cct, int num_threads); ~RGWAsyncRadosProcessor() {} void start(); void stop(); void handle_request(const DoutPrefixProvider *dpp, RGWAsyncRadosRequest *req); void queue(RGWAsyncRadosRequest *req); bool is_going_down() { return going_down; } }; template <class P> class RGWSimpleWriteOnlyAsyncCR : public RGWSimpleCoroutine { RGWAsyncRadosProcessor *async_rados; rgw::sal::RadosStore* store; P params; const DoutPrefixProvider *dpp; class Request : public RGWAsyncRadosRequest { rgw::sal::RadosStore* store; P params; const DoutPrefixProvider *dpp; protected: int _send_request(const DoutPrefixProvider *dpp) override; public: Request(RGWCoroutine *caller, RGWAioCompletionNotifier *cn, rgw::sal::RadosStore* store, const P& _params, const DoutPrefixProvider *dpp) : RGWAsyncRadosRequest(caller, cn), store(store), params(_params), dpp(dpp) {} } *req{nullptr}; public: RGWSimpleWriteOnlyAsyncCR(RGWAsyncRadosProcessor *_async_rados, rgw::sal::RadosStore* _store, const P& _params, const DoutPrefixProvider *_dpp) : RGWSimpleCoroutine(_store->ctx()), async_rados(_async_rados), store(_store), params(_params), dpp(_dpp) {} ~RGWSimpleWriteOnlyAsyncCR() override { request_cleanup(); } void request_cleanup() override { if (req) { req->finish(); req = NULL; } } int send_request(const DoutPrefixProvider *dpp) override { req = new Request(this, stack->create_completion_notifier(), store, params, dpp); async_rados->queue(req); return 0; } int request_complete() override { return req->get_ret_status(); } }; template <class P, class R> class RGWSimpleAsyncCR : public RGWSimpleCoroutine { RGWAsyncRadosProcessor *async_rados; rgw::sal::RadosStore* store; P params; std::shared_ptr<R> result; const DoutPrefixProvider *dpp; class Request : public RGWAsyncRadosRequest { rgw::sal::RadosStore* store; P params; std::shared_ptr<R> result; const DoutPrefixProvider *dpp; protected: int _send_request(const DoutPrefixProvider *dpp) override; public: Request(const DoutPrefixProvider *dpp, RGWCoroutine *caller, RGWAioCompletionNotifier *cn, rgw::sal::RadosStore* _store, const P& _params, std::shared_ptr<R>& _result, const DoutPrefixProvider *_dpp) : RGWAsyncRadosRequest(caller, cn), store(_store), params(_params), result(_result), dpp(_dpp) {} } *req{nullptr}; public: RGWSimpleAsyncCR(RGWAsyncRadosProcessor *_async_rados, rgw::sal::RadosStore* _store, const P& _params, std::shared_ptr<R>& _result, const DoutPrefixProvider *_dpp) : RGWSimpleCoroutine(_store->ctx()), async_rados(_async_rados), store(_store), params(_params), result(_result), dpp(_dpp) {} ~RGWSimpleAsyncCR() override { request_cleanup(); } void request_cleanup() override { if (req) { req->finish(); req = NULL; } } int send_request(const DoutPrefixProvider *dpp) override { req = new Request(dpp, this, stack->create_completion_notifier(), store, params, result, dpp); async_rados->queue(req); return 0; } int request_complete() override { return req->get_ret_status(); } }; class RGWGenericAsyncCR : public RGWSimpleCoroutine { RGWAsyncRadosProcessor *async_rados; rgw::sal::RadosStore* store; public: class Action { public: virtual ~Action() {} virtual int operate() = 0; }; private: std::shared_ptr<Action> action; class Request : public RGWAsyncRadosRequest { std::shared_ptr<Action> action; protected: int _send_request(const DoutPrefixProvider *dpp) override { if (!action) { return 0; } return action->operate(); } public: Request(const DoutPrefixProvider *dpp, RGWCoroutine *caller, RGWAioCompletionNotifier *cn, std::shared_ptr<Action>& _action) : RGWAsyncRadosRequest(caller, cn), action(_action) {} } *req{nullptr}; public: RGWGenericAsyncCR(CephContext *_cct, RGWAsyncRadosProcessor *_async_rados, std::shared_ptr<Action>& _action) : RGWSimpleCoroutine(_cct), async_rados(_async_rados), action(_action) {} template<typename T> RGWGenericAsyncCR(CephContext *_cct, RGWAsyncRadosProcessor *_async_rados, std::shared_ptr<T>& _action) : RGWSimpleCoroutine(_cct), async_rados(_async_rados), action(std::static_pointer_cast<Action>(_action)) {} ~RGWGenericAsyncCR() override { request_cleanup(); } void request_cleanup() override { if (req) { req->finish(); req = NULL; } } int send_request(const DoutPrefixProvider *dpp) override { req = new Request(dpp, this, stack->create_completion_notifier(), action); async_rados->queue(req); return 0; } int request_complete() override { return req->get_ret_status(); } }; class RGWAsyncGetSystemObj : public RGWAsyncRadosRequest { const DoutPrefixProvider *dpp; RGWSI_SysObj* svc_sysobj; rgw_raw_obj obj; const bool want_attrs; const bool raw_attrs; protected: int _send_request(const DoutPrefixProvider *dpp) override; public: RGWAsyncGetSystemObj(const DoutPrefixProvider *dpp, RGWCoroutine *caller, RGWAioCompletionNotifier *cn, RGWSI_SysObj *_svc, RGWObjVersionTracker *_objv_tracker, const rgw_raw_obj& _obj, bool want_attrs, bool raw_attrs); bufferlist bl; std::map<std::string, bufferlist> attrs; RGWObjVersionTracker objv_tracker; }; class RGWAsyncPutSystemObj : public RGWAsyncRadosRequest { const DoutPrefixProvider *dpp; RGWSI_SysObj *svc; rgw_raw_obj obj; bool exclusive; bufferlist bl; protected: int _send_request(const DoutPrefixProvider *dpp) override; public: RGWAsyncPutSystemObj(const DoutPrefixProvider *dpp, RGWCoroutine *caller, RGWAioCompletionNotifier *cn, RGWSI_SysObj *_svc, RGWObjVersionTracker *_objv_tracker, const rgw_raw_obj& _obj, bool _exclusive, bufferlist _bl); RGWObjVersionTracker objv_tracker; }; class RGWAsyncPutSystemObjAttrs : public RGWAsyncRadosRequest { const DoutPrefixProvider *dpp; RGWSI_SysObj *svc; rgw_raw_obj obj; std::map<std::string, bufferlist> attrs; bool exclusive; protected: int _send_request(const DoutPrefixProvider *dpp) override; public: RGWAsyncPutSystemObjAttrs(const DoutPrefixProvider *dpp, RGWCoroutine *caller, RGWAioCompletionNotifier *cn, RGWSI_SysObj *_svc, RGWObjVersionTracker *_objv_tracker, const rgw_raw_obj& _obj, std::map<std::string, bufferlist> _attrs, bool exclusive); RGWObjVersionTracker objv_tracker; }; class RGWAsyncLockSystemObj : public RGWAsyncRadosRequest { rgw::sal::RadosStore* store; rgw_raw_obj obj; std::string lock_name; std::string cookie; uint32_t duration_secs; protected: int _send_request(const DoutPrefixProvider *dpp) override; public: RGWAsyncLockSystemObj(RGWCoroutine *caller, RGWAioCompletionNotifier *cn, rgw::sal::RadosStore* _store, RGWObjVersionTracker *_objv_tracker, const rgw_raw_obj& _obj, const std::string& _name, const std::string& _cookie, uint32_t _duration_secs); }; class RGWAsyncUnlockSystemObj : public RGWAsyncRadosRequest { rgw::sal::RadosStore* store; rgw_raw_obj obj; std::string lock_name; std::string cookie; protected: int _send_request(const DoutPrefixProvider *dpp) override; public: RGWAsyncUnlockSystemObj(RGWCoroutine *caller, RGWAioCompletionNotifier *cn, rgw::sal::RadosStore* _store, RGWObjVersionTracker *_objv_tracker, const rgw_raw_obj& _obj, const std::string& _name, const std::string& _cookie); }; template <class T> class RGWSimpleRadosReadCR : public RGWSimpleCoroutine { const DoutPrefixProvider* dpp; rgw::sal::RadosStore* store; rgw_raw_obj obj; T* result; /// on ENOENT, call handle_data() with an empty object instead of failing const bool empty_on_enoent; RGWObjVersionTracker* objv_tracker; T val; rgw_rados_ref ref; ceph::buffer::list bl; boost::intrusive_ptr<RGWAioCompletionNotifier> cn; public: RGWSimpleRadosReadCR(const DoutPrefixProvider* dpp, rgw::sal::RadosStore* store, const rgw_raw_obj& obj, T* result, bool empty_on_enoent = true, RGWObjVersionTracker* objv_tracker = nullptr) : RGWSimpleCoroutine(store->ctx()), dpp(dpp), store(store), obj(obj), result(result), empty_on_enoent(empty_on_enoent), objv_tracker(objv_tracker) { if (!result) { result = &val; } } int send_request(const DoutPrefixProvider *dpp) { int r = store->getRados()->get_raw_obj_ref(dpp, obj, &ref); if (r < 0) { ldpp_dout(dpp, -1) << "ERROR: failed to get ref for (" << obj << ") ret=" << r << dendl; return r; } set_status() << "sending request"; librados::ObjectReadOperation op; if (objv_tracker) { objv_tracker->prepare_op_for_read(&op); } op.read(0, -1, &bl, nullptr); cn = stack->create_completion_notifier(); return ref.pool.ioctx().aio_operate(ref.obj.oid, cn->completion(), &op, nullptr); } int request_complete() { int ret = cn->completion()->get_return_value(); set_status() << "request complete; ret=" << ret; if (ret == -ENOENT && empty_on_enoent) { *result = T(); } else { if (ret < 0) { return ret; } try { auto iter = bl.cbegin(); if (iter.end()) { // allow successful reads with empty buffers. ReadSyncStatus coroutines // depend on this to be able to read without locking, because the // cls lock from InitSyncStatus will create an empty object if it didn't // exist *result = T(); } else { decode(*result, iter); } } catch (buffer::error& err) { return -EIO; } } return handle_data(*result); } virtual int handle_data(T& data) { return 0; } }; class RGWSimpleRadosReadAttrsCR : public RGWSimpleCoroutine { const DoutPrefixProvider* dpp; rgw::sal::RadosStore* const store; const rgw_raw_obj obj; std::map<std::string, bufferlist>* const pattrs; const bool raw_attrs; RGWObjVersionTracker* const objv_tracker; rgw_rados_ref ref; std::map<std::string, bufferlist> unfiltered_attrs; boost::intrusive_ptr<RGWAioCompletionNotifier> cn; public: RGWSimpleRadosReadAttrsCR(const DoutPrefixProvider* dpp, rgw::sal::RadosStore* store, rgw_raw_obj obj, std::map<std::string, bufferlist>* pattrs, bool raw_attrs, RGWObjVersionTracker* objv_tracker = nullptr) : RGWSimpleCoroutine(store->ctx()), dpp(dpp), store(store), obj(std::move(obj)), pattrs(pattrs), raw_attrs(raw_attrs), objv_tracker(objv_tracker) {} int send_request(const DoutPrefixProvider *dpp) override; int request_complete() override; }; template <class T> class RGWSimpleRadosWriteCR : public RGWSimpleCoroutine { const DoutPrefixProvider* dpp; rgw::sal::RadosStore* const store; rgw_raw_obj obj; RGWObjVersionTracker* objv_tracker; bool exclusive; bufferlist bl; rgw_rados_ref ref; std::map<std::string, bufferlist> unfiltered_attrs; boost::intrusive_ptr<RGWAioCompletionNotifier> cn; public: RGWSimpleRadosWriteCR(const DoutPrefixProvider* dpp, rgw::sal::RadosStore* const store, rgw_raw_obj obj, const T& data, RGWObjVersionTracker* objv_tracker = nullptr, bool exclusive = false) : RGWSimpleCoroutine(store->ctx()), dpp(dpp), store(store), obj(std::move(obj)), objv_tracker(objv_tracker), exclusive(exclusive) { encode(data, bl); } int send_request(const DoutPrefixProvider *dpp) override { int r = store->getRados()->get_raw_obj_ref(dpp, obj, &ref); if (r < 0) { ldpp_dout(dpp, -1) << "ERROR: failed to get ref for (" << obj << ") ret=" << r << dendl; return r; } set_status() << "sending request"; librados::ObjectWriteOperation op; if (exclusive) { op.create(true); } if (objv_tracker) { objv_tracker->prepare_op_for_write(&op); } op.write_full(bl); cn = stack->create_completion_notifier(); return ref.pool.ioctx().aio_operate(ref.obj.oid, cn->completion(), &op); } int request_complete() override { int ret = cn->completion()->get_return_value(); set_status() << "request complete; ret=" << ret; if (ret >= 0 && objv_tracker) { objv_tracker->apply_write(); } return ret; } }; class RGWSimpleRadosWriteAttrsCR : public RGWSimpleCoroutine { const DoutPrefixProvider* dpp; rgw::sal::RadosStore* const store; RGWObjVersionTracker* objv_tracker; rgw_raw_obj obj; std::map<std::string, bufferlist> attrs; bool exclusive; rgw_rados_ref ref; boost::intrusive_ptr<RGWAioCompletionNotifier> cn; public: RGWSimpleRadosWriteAttrsCR(const DoutPrefixProvider* dpp, rgw::sal::RadosStore* const store, rgw_raw_obj obj, std::map<std::string, bufferlist> attrs, RGWObjVersionTracker* objv_tracker = nullptr, bool exclusive = false) : RGWSimpleCoroutine(store->ctx()), dpp(dpp), store(store), objv_tracker(objv_tracker), obj(std::move(obj)), attrs(std::move(attrs)), exclusive(exclusive) {} int send_request(const DoutPrefixProvider *dpp) override { int r = store->getRados()->get_raw_obj_ref(dpp, obj, &ref); if (r < 0) { ldpp_dout(dpp, -1) << "ERROR: failed to get ref for (" << obj << ") ret=" << r << dendl; return r; } set_status() << "sending request"; librados::ObjectWriteOperation op; if (exclusive) { op.create(true); } if (objv_tracker) { objv_tracker->prepare_op_for_write(&op); } for (const auto& [name, bl] : attrs) { if (!bl.length()) continue; op.setxattr(name.c_str(), bl); } cn = stack->create_completion_notifier(); if (!op.size()) { cn->cb(); return 0; } return ref.pool.ioctx().aio_operate(ref.obj.oid, cn->completion(), &op); } int request_complete() override { int ret = cn->completion()->get_return_value(); set_status() << "request complete; ret=" << ret; if (ret >= 0 && objv_tracker) { objv_tracker->apply_write(); } return ret; } }; class RGWRadosSetOmapKeysCR : public RGWSimpleCoroutine { rgw::sal::RadosStore* store; std::map<std::string, bufferlist> entries; rgw_rados_ref ref; rgw_raw_obj obj; boost::intrusive_ptr<RGWAioCompletionNotifier> cn; public: RGWRadosSetOmapKeysCR(rgw::sal::RadosStore* _store, const rgw_raw_obj& _obj, std::map<std::string, bufferlist>& _entries); int send_request(const DoutPrefixProvider *dpp) override; int request_complete() override; }; class RGWRadosGetOmapKeysCR : public RGWSimpleCoroutine { public: struct Result { rgw_rados_ref ref; std::set<std::string> entries; bool more = false; }; using ResultPtr = std::shared_ptr<Result>; RGWRadosGetOmapKeysCR(rgw::sal::RadosStore* _store, const rgw_raw_obj& _obj, const std::string& _marker, int _max_entries, ResultPtr result); int send_request(const DoutPrefixProvider *dpp) override; int request_complete() override; private: rgw::sal::RadosStore* store; rgw_raw_obj obj; std::string marker; int max_entries; ResultPtr result; boost::intrusive_ptr<RGWAioCompletionNotifier> cn; }; class RGWRadosGetOmapValsCR : public RGWSimpleCoroutine { public: struct Result { rgw_rados_ref ref; std::map<std::string, bufferlist> entries; bool more = false; }; using ResultPtr = std::shared_ptr<Result>; RGWRadosGetOmapValsCR(rgw::sal::RadosStore* _store, const rgw_raw_obj& _obj, const std::string& _marker, int _max_entries, ResultPtr result); int send_request(const DoutPrefixProvider *dpp) override; int request_complete() override; private: rgw::sal::RadosStore* store; rgw_raw_obj obj; std::string marker; int max_entries; ResultPtr result; boost::intrusive_ptr<RGWAioCompletionNotifier> cn; }; class RGWRadosRemoveOmapKeysCR : public RGWSimpleCoroutine { rgw::sal::RadosStore* store; rgw_rados_ref ref; std::set<std::string> keys; rgw_raw_obj obj; boost::intrusive_ptr<RGWAioCompletionNotifier> cn; public: RGWRadosRemoveOmapKeysCR(rgw::sal::RadosStore* _store, const rgw_raw_obj& _obj, const std::set<std::string>& _keys); int send_request(const DoutPrefixProvider *dpp) override; int request_complete() override; }; class RGWRadosRemoveCR : public RGWSimpleCoroutine { rgw::sal::RadosStore* store; librados::IoCtx ioctx; const rgw_raw_obj obj; RGWObjVersionTracker* objv_tracker; boost::intrusive_ptr<RGWAioCompletionNotifier> cn; public: RGWRadosRemoveCR(rgw::sal::RadosStore* store, const rgw_raw_obj& obj, RGWObjVersionTracker* objv_tracker = nullptr); int send_request(const DoutPrefixProvider *dpp) override; int request_complete() override; }; class RGWRadosRemoveOidCR : public RGWSimpleCoroutine { librados::IoCtx ioctx; const std::string oid; RGWObjVersionTracker* objv_tracker; boost::intrusive_ptr<RGWAioCompletionNotifier> cn; public: RGWRadosRemoveOidCR(rgw::sal::RadosStore* store, librados::IoCtx&& ioctx, std::string_view oid, RGWObjVersionTracker* objv_tracker = nullptr); RGWRadosRemoveOidCR(rgw::sal::RadosStore* store, RGWSI_RADOS::Obj& obj, RGWObjVersionTracker* objv_tracker = nullptr); RGWRadosRemoveOidCR(rgw::sal::RadosStore* store, RGWSI_RADOS::Obj&& obj, RGWObjVersionTracker* objv_tracker = nullptr); int send_request(const DoutPrefixProvider *dpp) override; int request_complete() override; }; class RGWSimpleRadosLockCR : public RGWSimpleCoroutine { RGWAsyncRadosProcessor *async_rados; rgw::sal::RadosStore* store; std::string lock_name; std::string cookie; uint32_t duration; rgw_raw_obj obj; RGWAsyncLockSystemObj *req; public: RGWSimpleRadosLockCR(RGWAsyncRadosProcessor *_async_rados, rgw::sal::RadosStore* _store, const rgw_raw_obj& _obj, const std::string& _lock_name, const std::string& _cookie, uint32_t _duration); ~RGWSimpleRadosLockCR() override { request_cleanup(); } void request_cleanup() override; int send_request(const DoutPrefixProvider *dpp) override; int request_complete() override; static std::string gen_random_cookie(CephContext* cct) { static constexpr std::size_t COOKIE_LEN = 16; char buf[COOKIE_LEN + 1]; gen_rand_alphanumeric(cct, buf, sizeof(buf) - 1); return buf; } }; class RGWSimpleRadosUnlockCR : public RGWSimpleCoroutine { RGWAsyncRadosProcessor *async_rados; rgw::sal::RadosStore* store; std::string lock_name; std::string cookie; rgw_raw_obj obj; RGWAsyncUnlockSystemObj *req; public: RGWSimpleRadosUnlockCR(RGWAsyncRadosProcessor *_async_rados, rgw::sal::RadosStore* _store, const rgw_raw_obj& _obj, const std::string& _lock_name, const std::string& _cookie); ~RGWSimpleRadosUnlockCR() override { request_cleanup(); } void request_cleanup() override; int send_request(const DoutPrefixProvider *dpp) override; int request_complete() override; }; #define OMAP_APPEND_MAX_ENTRIES_DEFAULT 100 class RGWOmapAppend : public RGWConsumerCR<std::string> { RGWAsyncRadosProcessor *async_rados; rgw::sal::RadosStore* store; rgw_raw_obj obj; bool going_down; int num_pending_entries; std::list<std::string> pending_entries; std::map<std::string, bufferlist> entries; uint64_t window_size; uint64_t total_entries; public: RGWOmapAppend(RGWAsyncRadosProcessor *_async_rados, rgw::sal::RadosStore* _store, const rgw_raw_obj& _obj, uint64_t _window_size = OMAP_APPEND_MAX_ENTRIES_DEFAULT); int operate(const DoutPrefixProvider *dpp) override; void flush_pending(); bool append(const std::string& s); bool finish(); uint64_t get_total_entries() { return total_entries; } const rgw_raw_obj& get_obj() { return obj; } }; class RGWShardedOmapCRManager { RGWAsyncRadosProcessor *async_rados; rgw::sal::RadosStore* store; RGWCoroutine *op; int num_shards; std::vector<RGWOmapAppend *> shards; public: RGWShardedOmapCRManager(RGWAsyncRadosProcessor *_async_rados, rgw::sal::RadosStore* _store, RGWCoroutine *_op, int _num_shards, const rgw_pool& pool, const std::string& oid_prefix) : async_rados(_async_rados), store(_store), op(_op), num_shards(_num_shards) { shards.reserve(num_shards); for (int i = 0; i < num_shards; ++i) { char buf[oid_prefix.size() + 16]; snprintf(buf, sizeof(buf), "%s.%d", oid_prefix.c_str(), i); RGWOmapAppend *shard = new RGWOmapAppend(async_rados, store, rgw_raw_obj(pool, buf)); shard->get(); shards.push_back(shard); op->spawn(shard, false); } } ~RGWShardedOmapCRManager() { for (auto shard : shards) { shard->put(); } } bool append(const std::string& entry, int shard_id) { return shards[shard_id]->append(entry); } bool finish() { bool success = true; for (auto& append_op : shards) { success &= (append_op->finish() && (!append_op->is_error())); } return success; } uint64_t get_total_entries(int shard_id) { return shards[shard_id]->get_total_entries(); } }; class RGWAsyncGetBucketInstanceInfo : public RGWAsyncRadosRequest { rgw::sal::RadosStore* store; rgw_bucket bucket; const DoutPrefixProvider *dpp; protected: int _send_request(const DoutPrefixProvider *dpp) override; public: RGWAsyncGetBucketInstanceInfo(RGWCoroutine *caller, RGWAioCompletionNotifier *cn, rgw::sal::RadosStore* _store, const rgw_bucket& bucket, const DoutPrefixProvider *dpp) : RGWAsyncRadosRequest(caller, cn), store(_store), bucket(bucket), dpp(dpp) {} RGWBucketInfo bucket_info; std::map<std::string, bufferlist> attrs; }; class RGWAsyncPutBucketInstanceInfo : public RGWAsyncRadosRequest { rgw::sal::RadosStore* store; RGWBucketInfo& bucket_info; bool exclusive; real_time mtime; std::map<std::string, ceph::bufferlist>* attrs; const DoutPrefixProvider *dpp; protected: int _send_request(const DoutPrefixProvider *dpp) override; public: RGWAsyncPutBucketInstanceInfo(RGWCoroutine* caller, RGWAioCompletionNotifier* cn, rgw::sal::RadosStore* store, RGWBucketInfo& bucket_info, bool exclusive, real_time mtime, std::map<std::string, ceph::bufferlist>* attrs, const DoutPrefixProvider* dpp) : RGWAsyncRadosRequest(caller, cn), store(store), bucket_info(bucket_info), exclusive(exclusive), mtime(mtime), attrs(attrs), dpp(dpp) {} }; class RGWGetBucketInstanceInfoCR : public RGWSimpleCoroutine { RGWAsyncRadosProcessor *async_rados; rgw::sal::RadosStore* store; rgw_bucket bucket; RGWBucketInfo *bucket_info; std::map<std::string, bufferlist> *pattrs; const DoutPrefixProvider *dpp; RGWAsyncGetBucketInstanceInfo *req{nullptr}; public: // rgw_bucket constructor RGWGetBucketInstanceInfoCR(RGWAsyncRadosProcessor *_async_rados, rgw::sal::RadosStore* _store, const rgw_bucket& _bucket, RGWBucketInfo *_bucket_info, std::map<std::string, bufferlist> *_pattrs, const DoutPrefixProvider *dpp) : RGWSimpleCoroutine(_store->ctx()), async_rados(_async_rados), store(_store), bucket(_bucket), bucket_info(_bucket_info), pattrs(_pattrs), dpp(dpp) {} ~RGWGetBucketInstanceInfoCR() override { request_cleanup(); } void request_cleanup() override { if (req) { req->finish(); req = NULL; } } int send_request(const DoutPrefixProvider *dpp) override { req = new RGWAsyncGetBucketInstanceInfo(this, stack->create_completion_notifier(), store, bucket, dpp); async_rados->queue(req); return 0; } int request_complete() override { if (bucket_info) { *bucket_info = std::move(req->bucket_info); } if (pattrs) { *pattrs = std::move(req->attrs); } return req->get_ret_status(); } }; class RGWPutBucketInstanceInfoCR : public RGWSimpleCoroutine { RGWAsyncRadosProcessor *async_rados; rgw::sal::RadosStore* store; RGWBucketInfo& bucket_info; bool exclusive; real_time mtime; std::map<std::string, ceph::bufferlist>* attrs; const DoutPrefixProvider *dpp; RGWAsyncPutBucketInstanceInfo* req = nullptr; public: // rgw_bucket constructor RGWPutBucketInstanceInfoCR(RGWAsyncRadosProcessor *async_rados, rgw::sal::RadosStore* store, RGWBucketInfo& bucket_info, bool exclusive, real_time mtime, std::map<std::string, ceph::bufferlist>* attrs, const DoutPrefixProvider *dpp) : RGWSimpleCoroutine(store->ctx()), async_rados(async_rados), store(store), bucket_info(bucket_info), exclusive(exclusive), mtime(mtime), attrs(attrs), dpp(dpp) {} ~RGWPutBucketInstanceInfoCR() override { request_cleanup(); } void request_cleanup() override { if (req) { req->finish(); req = nullptr; } } int send_request(const DoutPrefixProvider *dpp) override { req = new RGWAsyncPutBucketInstanceInfo(this, stack->create_completion_notifier(), store, bucket_info, exclusive, mtime, attrs, dpp); async_rados->queue(req); return 0; } int request_complete() override { return req->get_ret_status(); } }; class RGWRadosBILogTrimCR : public RGWSimpleCoroutine { const RGWBucketInfo& bucket_info; int shard_id; const rgw::bucket_index_layout_generation generation; RGWRados::BucketShard bs; std::string start_marker; std::string end_marker; boost::intrusive_ptr<RGWAioCompletionNotifier> cn; public: RGWRadosBILogTrimCR(const DoutPrefixProvider *dpp, rgw::sal::RadosStore* store, const RGWBucketInfo& bucket_info, int shard_id, const rgw::bucket_index_layout_generation& generation, const std::string& start_marker, const std::string& end_marker); int send_request(const DoutPrefixProvider *dpp) override; int request_complete() override; }; class RGWAsyncFetchRemoteObj : public RGWAsyncRadosRequest { rgw::sal::RadosStore* store; rgw_zone_id source_zone; std::optional<rgw_user> user_id; rgw_bucket src_bucket; std::optional<rgw_placement_rule> dest_placement_rule; RGWBucketInfo dest_bucket_info; rgw_obj_key key; std::optional<rgw_obj_key> dest_key; std::optional<uint64_t> versioned_epoch; real_time src_mtime; bool copy_if_newer; std::shared_ptr<RGWFetchObjFilter> filter; bool stat_follow_olh; rgw_zone_set_entry source_trace_entry; rgw_zone_set zones_trace; PerfCounters* counters; const DoutPrefixProvider *dpp; protected: int _send_request(const DoutPrefixProvider *dpp) override; public: RGWAsyncFetchRemoteObj(RGWCoroutine *caller, RGWAioCompletionNotifier *cn, rgw::sal::RadosStore* _store, const rgw_zone_id& _source_zone, std::optional<rgw_user>& _user_id, const rgw_bucket& _src_bucket, std::optional<rgw_placement_rule> _dest_placement_rule, const RGWBucketInfo& _dest_bucket_info, const rgw_obj_key& _key, const std::optional<rgw_obj_key>& _dest_key, std::optional<uint64_t> _versioned_epoch, bool _if_newer, std::shared_ptr<RGWFetchObjFilter> _filter, bool _stat_follow_olh, const rgw_zone_set_entry& source_trace_entry, rgw_zone_set *_zones_trace, PerfCounters* counters, const DoutPrefixProvider *dpp) : RGWAsyncRadosRequest(caller, cn), store(_store), source_zone(_source_zone), user_id(_user_id), src_bucket(_src_bucket), dest_placement_rule(_dest_placement_rule), dest_bucket_info(_dest_bucket_info), key(_key), dest_key(_dest_key), versioned_epoch(_versioned_epoch), copy_if_newer(_if_newer), filter(_filter), stat_follow_olh(_stat_follow_olh), source_trace_entry(source_trace_entry), counters(counters), dpp(dpp) { if (_zones_trace) { zones_trace = *_zones_trace; } } }; class RGWFetchRemoteObjCR : public RGWSimpleCoroutine { CephContext *cct; RGWAsyncRadosProcessor *async_rados; rgw::sal::RadosStore* store; rgw_zone_id source_zone; std::optional<rgw_user> user_id; rgw_bucket src_bucket; std::optional<rgw_placement_rule> dest_placement_rule; RGWBucketInfo dest_bucket_info; rgw_obj_key key; std::optional<rgw_obj_key> dest_key; std::optional<uint64_t> versioned_epoch; real_time src_mtime; bool copy_if_newer; std::shared_ptr<RGWFetchObjFilter> filter; RGWAsyncFetchRemoteObj *req; bool stat_follow_olh; const rgw_zone_set_entry& source_trace_entry; rgw_zone_set *zones_trace; PerfCounters* counters; const DoutPrefixProvider *dpp; public: RGWFetchRemoteObjCR(RGWAsyncRadosProcessor *_async_rados, rgw::sal::RadosStore* _store, const rgw_zone_id& _source_zone, std::optional<rgw_user> _user_id, const rgw_bucket& _src_bucket, std::optional<rgw_placement_rule> _dest_placement_rule, const RGWBucketInfo& _dest_bucket_info, const rgw_obj_key& _key, const std::optional<rgw_obj_key>& _dest_key, std::optional<uint64_t> _versioned_epoch, bool _if_newer, std::shared_ptr<RGWFetchObjFilter> _filter, bool _stat_follow_olh, const rgw_zone_set_entry& source_trace_entry, rgw_zone_set *_zones_trace, PerfCounters* counters, const DoutPrefixProvider *dpp) : RGWSimpleCoroutine(_store->ctx()), cct(_store->ctx()), async_rados(_async_rados), store(_store), source_zone(_source_zone), user_id(_user_id), src_bucket(_src_bucket), dest_placement_rule(_dest_placement_rule), dest_bucket_info(_dest_bucket_info), key(_key), dest_key(_dest_key), versioned_epoch(_versioned_epoch), copy_if_newer(_if_newer), filter(_filter), req(NULL), stat_follow_olh(_stat_follow_olh), source_trace_entry(source_trace_entry), zones_trace(_zones_trace), counters(counters), dpp(dpp) {} ~RGWFetchRemoteObjCR() override { request_cleanup(); } void request_cleanup() override { if (req) { req->finish(); req = NULL; } } int send_request(const DoutPrefixProvider *dpp) override { req = new RGWAsyncFetchRemoteObj(this, stack->create_completion_notifier(), store, source_zone, user_id, src_bucket, dest_placement_rule, dest_bucket_info, key, dest_key, versioned_epoch, copy_if_newer, filter, stat_follow_olh, source_trace_entry, zones_trace, counters, dpp); async_rados->queue(req); return 0; } int request_complete() override { return req->get_ret_status(); } }; class RGWAsyncStatRemoteObj : public RGWAsyncRadosRequest { rgw::sal::RadosStore* store; rgw_zone_id source_zone; rgw_bucket src_bucket; rgw_obj_key key; ceph::real_time *pmtime; uint64_t *psize; std::string *petag; std::map<std::string, bufferlist> *pattrs; std::map<std::string, std::string> *pheaders; protected: int _send_request(const DoutPrefixProvider *dpp) override; public: RGWAsyncStatRemoteObj(RGWCoroutine *caller, RGWAioCompletionNotifier *cn, rgw::sal::RadosStore* _store, const rgw_zone_id& _source_zone, rgw_bucket& _src_bucket, const rgw_obj_key& _key, ceph::real_time *_pmtime, uint64_t *_psize, std::string *_petag, std::map<std::string, bufferlist> *_pattrs, std::map<std::string, std::string> *_pheaders) : RGWAsyncRadosRequest(caller, cn), store(_store), source_zone(_source_zone), src_bucket(_src_bucket), key(_key), pmtime(_pmtime), psize(_psize), petag(_petag), pattrs(_pattrs), pheaders(_pheaders) {} }; class RGWStatRemoteObjCR : public RGWSimpleCoroutine { CephContext *cct; RGWAsyncRadosProcessor *async_rados; rgw::sal::RadosStore* store; rgw_zone_id source_zone; rgw_bucket src_bucket; rgw_obj_key key; ceph::real_time *pmtime; uint64_t *psize; std::string *petag; std::map<std::string, bufferlist> *pattrs; std::map<std::string, std::string> *pheaders; RGWAsyncStatRemoteObj *req; public: RGWStatRemoteObjCR(RGWAsyncRadosProcessor *_async_rados, rgw::sal::RadosStore* _store, const rgw_zone_id& _source_zone, rgw_bucket& _src_bucket, const rgw_obj_key& _key, ceph::real_time *_pmtime, uint64_t *_psize, std::string *_petag, std::map<std::string, bufferlist> *_pattrs, std::map<std::string, std::string> *_pheaders) : RGWSimpleCoroutine(_store->ctx()), cct(_store->ctx()), async_rados(_async_rados), store(_store), source_zone(_source_zone), src_bucket(_src_bucket), key(_key), pmtime(_pmtime), psize(_psize), petag(_petag), pattrs(_pattrs), pheaders(_pheaders), req(NULL) {} ~RGWStatRemoteObjCR() override { request_cleanup(); } void request_cleanup() override { if (req) { req->finish(); req = NULL; } } int send_request(const DoutPrefixProvider *dpp) override { req = new RGWAsyncStatRemoteObj(this, stack->create_completion_notifier(), store, source_zone, src_bucket, key, pmtime, psize, petag, pattrs, pheaders); async_rados->queue(req); return 0; } int request_complete() override { return req->get_ret_status(); } }; class RGWAsyncRemoveObj : public RGWAsyncRadosRequest { const DoutPrefixProvider *dpp; rgw::sal::RadosStore* store; rgw_zone_id source_zone; std::unique_ptr<rgw::sal::Bucket> bucket; std::unique_ptr<rgw::sal::Object> obj; std::string owner; std::string owner_display_name; bool versioned; uint64_t versioned_epoch; std::string marker_version_id; bool del_if_older; ceph::real_time timestamp; rgw_zone_set zones_trace; protected: int _send_request(const DoutPrefixProvider *dpp) override; public: RGWAsyncRemoveObj(const DoutPrefixProvider *_dpp, RGWCoroutine *caller, RGWAioCompletionNotifier *cn, rgw::sal::RadosStore* _store, const rgw_zone_id& _source_zone, RGWBucketInfo& _bucket_info, const rgw_obj_key& _key, const std::string& _owner, const std::string& _owner_display_name, bool _versioned, uint64_t _versioned_epoch, bool _delete_marker, bool _if_older, real_time& _timestamp, rgw_zone_set* _zones_trace) : RGWAsyncRadosRequest(caller, cn), dpp(_dpp), store(_store), source_zone(_source_zone), owner(_owner), owner_display_name(_owner_display_name), versioned(_versioned), versioned_epoch(_versioned_epoch), del_if_older(_if_older), timestamp(_timestamp) { if (_delete_marker) { marker_version_id = _key.instance; } if (_zones_trace) { zones_trace = *_zones_trace; } store->get_bucket(nullptr, _bucket_info, &bucket); obj = bucket->get_object(_key); } }; class RGWRemoveObjCR : public RGWSimpleCoroutine { const DoutPrefixProvider *dpp; CephContext *cct; RGWAsyncRadosProcessor *async_rados; rgw::sal::RadosStore* store; rgw_zone_id source_zone; RGWBucketInfo bucket_info; rgw_obj_key key; bool versioned; uint64_t versioned_epoch; bool delete_marker; std::string owner; std::string owner_display_name; bool del_if_older; real_time timestamp; RGWAsyncRemoveObj *req; rgw_zone_set *zones_trace; public: RGWRemoveObjCR(const DoutPrefixProvider *_dpp, RGWAsyncRadosProcessor *_async_rados, rgw::sal::RadosStore* _store, const rgw_zone_id& _source_zone, RGWBucketInfo& _bucket_info, const rgw_obj_key& _key, bool _versioned, uint64_t _versioned_epoch, std::string *_owner, std::string *_owner_display_name, bool _delete_marker, real_time *_timestamp, rgw_zone_set *_zones_trace) : RGWSimpleCoroutine(_store->ctx()), dpp(_dpp), cct(_store->ctx()), async_rados(_async_rados), store(_store), source_zone(_source_zone), bucket_info(_bucket_info), key(_key), versioned(_versioned), versioned_epoch(_versioned_epoch), delete_marker(_delete_marker), req(NULL), zones_trace(_zones_trace) { del_if_older = (_timestamp != NULL); if (_timestamp) { timestamp = *_timestamp; } if (_owner) { owner = *_owner; } if (_owner_display_name) { owner_display_name = *_owner_display_name; } } ~RGWRemoveObjCR() override { request_cleanup(); } void request_cleanup() override { if (req) { req->finish(); req = NULL; } } int send_request(const DoutPrefixProvider *dpp) override { req = new RGWAsyncRemoveObj(dpp, this, stack->create_completion_notifier(), store, source_zone, bucket_info, key, owner, owner_display_name, versioned, versioned_epoch, delete_marker, del_if_older, timestamp, zones_trace); async_rados->queue(req); return 0; } int request_complete() override { return req->get_ret_status(); } }; /// \brief Collect average latency /// /// Used in data sync to back off on concurrency when latency of lock /// operations rises. /// /// \warning This class is not thread safe. We do not use a mutex /// because all coroutines spawned by RGWDataSyncCR share a single thread. class LatencyMonitor { ceph::timespan total; std::uint64_t count = 0; public: LatencyMonitor() = default; void add_latency(ceph::timespan latency) { total += latency; ++count; } ceph::timespan avg_latency() { using namespace std::literals; return count == 0 ? 0s : total / count; } }; class RGWContinuousLeaseCR : public RGWCoroutine { RGWAsyncRadosProcessor* async_rados; rgw::sal::RadosStore* store; const rgw_raw_obj obj; const std::string lock_name; const std::string cookie{RGWSimpleRadosLockCR::gen_random_cookie(cct)}; int interval; bool going_down{false}; bool locked{false}; const ceph::timespan interval_tolerance; const ceph::timespan ts_interval; RGWCoroutine* caller; bool aborted{false}; ceph::coarse_mono_time last_renew_try_time; ceph::coarse_mono_time current_time; LatencyMonitor* latency; public: RGWContinuousLeaseCR(RGWAsyncRadosProcessor* async_rados, rgw::sal::RadosStore* _store, rgw_raw_obj obj, std::string lock_name, int interval, RGWCoroutine* caller, LatencyMonitor* const latency) : RGWCoroutine(_store->ctx()), async_rados(async_rados), store(_store), obj(std::move(obj)), lock_name(std::move(lock_name)), interval(interval), interval_tolerance(ceph::make_timespan(9*interval/10)), ts_interval(ceph::make_timespan(interval)), caller(caller), latency(latency) {} virtual ~RGWContinuousLeaseCR() override; int operate(const DoutPrefixProvider *dpp) override; bool is_locked() const { if (ceph::coarse_mono_clock::now() - last_renew_try_time > ts_interval) { return false; } return locked; } void set_locked(bool status) { locked = status; } void go_down() { going_down = true; wakeup(); } void abort() { aborted = true; } }; class RGWRadosTimelogAddCR : public RGWSimpleCoroutine { const DoutPrefixProvider *dpp; rgw::sal::RadosStore* store; std::list<cls_log_entry> entries; std::string oid; boost::intrusive_ptr<RGWAioCompletionNotifier> cn; public: RGWRadosTimelogAddCR(const DoutPrefixProvider *dpp, rgw::sal::RadosStore* _store, const std::string& _oid, const cls_log_entry& entry); int send_request(const DoutPrefixProvider *dpp) override; int request_complete() override; }; class RGWRadosTimelogTrimCR : public RGWSimpleCoroutine { const DoutPrefixProvider *dpp; rgw::sal::RadosStore* store; boost::intrusive_ptr<RGWAioCompletionNotifier> cn; protected: std::string oid; real_time start_time; real_time end_time; std::string from_marker; std::string to_marker; public: RGWRadosTimelogTrimCR(const DoutPrefixProvider *dpp, rgw::sal::RadosStore* store, const std::string& oid, const real_time& start_time, const real_time& end_time, const std::string& from_marker, const std::string& to_marker); int send_request(const DoutPrefixProvider *dpp) override; int request_complete() override; }; // wrapper to update last_trim_marker on success class RGWSyncLogTrimCR : public RGWRadosTimelogTrimCR { CephContext *cct; std::string *last_trim_marker; public: static constexpr const char* max_marker = "99999999"; RGWSyncLogTrimCR(const DoutPrefixProvider *dpp, rgw::sal::RadosStore* store, const std::string& oid, const std::string& to_marker, std::string *last_trim_marker); int request_complete() override; }; class RGWAsyncStatObj : public RGWAsyncRadosRequest { const DoutPrefixProvider *dpp; rgw::sal::RadosStore* store; RGWBucketInfo bucket_info; rgw_obj obj; uint64_t *psize; real_time *pmtime; uint64_t *pepoch; RGWObjVersionTracker *objv_tracker; protected: int _send_request(const DoutPrefixProvider *dpp) override; public: RGWAsyncStatObj(const DoutPrefixProvider *dpp, RGWCoroutine *caller, RGWAioCompletionNotifier *cn, rgw::sal::RadosStore* store, const RGWBucketInfo& _bucket_info, const rgw_obj& obj, uint64_t *psize = nullptr, real_time *pmtime = nullptr, uint64_t *pepoch = nullptr, RGWObjVersionTracker *objv_tracker = nullptr) : RGWAsyncRadosRequest(caller, cn), dpp(dpp), store(store), obj(obj), psize(psize), pmtime(pmtime), pepoch(pepoch), objv_tracker(objv_tracker) {} }; class RGWStatObjCR : public RGWSimpleCoroutine { const DoutPrefixProvider *dpp; rgw::sal::RadosStore* store; RGWAsyncRadosProcessor *async_rados; RGWBucketInfo bucket_info; rgw_obj obj; uint64_t *psize; real_time *pmtime; uint64_t *pepoch; RGWObjVersionTracker *objv_tracker; RGWAsyncStatObj *req = nullptr; public: RGWStatObjCR(const DoutPrefixProvider *dpp, RGWAsyncRadosProcessor *async_rados, rgw::sal::RadosStore* store, const RGWBucketInfo& _bucket_info, const rgw_obj& obj, uint64_t *psize = nullptr, real_time* pmtime = nullptr, uint64_t *pepoch = nullptr, RGWObjVersionTracker *objv_tracker = nullptr); ~RGWStatObjCR() override { request_cleanup(); } void request_cleanup() override; int send_request(const DoutPrefixProvider *dpp) override; int request_complete() override; }; /// coroutine wrapper for IoCtx::aio_notify() class RGWRadosNotifyCR : public RGWSimpleCoroutine { rgw::sal::RadosStore* const store; const rgw_raw_obj obj; bufferlist request; const uint64_t timeout_ms; bufferlist *response; rgw_rados_ref ref; boost::intrusive_ptr<RGWAioCompletionNotifier> cn; public: RGWRadosNotifyCR(rgw::sal::RadosStore* store, const rgw_raw_obj& obj, bufferlist& request, uint64_t timeout_ms, bufferlist *response); int send_request(const DoutPrefixProvider *dpp) override; int request_complete() override; }; class RGWDataPostNotifyCR : public RGWCoroutine { RGWRados *store; RGWHTTPManager& http_manager; bc::flat_map<int, bc::flat_set<rgw_data_notify_entry> >& shards; const char *source_zone; RGWRESTConn *conn; public: RGWDataPostNotifyCR(RGWRados *_store, RGWHTTPManager& _http_manager, bc::flat_map<int, bc::flat_set<rgw_data_notify_entry> >& _shards, const char *_zone, RGWRESTConn *_conn) : RGWCoroutine(_store->ctx()), store(_store), http_manager(_http_manager), shards(_shards), source_zone(_zone), conn(_conn) {} int operate(const DoutPrefixProvider* dpp) override; };
51,323
29.992754
182
h
null
ceph-main/src/rgw/driver/rados/rgw_cr_tools.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab ft=cpp #include "common/errno.h" #include "rgw_cr_tools.h" #include "rgw_bucket.h" #include "rgw_user.h" #include "rgw_op.h" #include "rgw_acl_s3.h" #include "rgw_zone.h" #include "services/svc_zone.h" #define dout_context g_ceph_context #define dout_subsys ceph_subsys_rgw using namespace std; template<> int RGWUserCreateCR::Request::_send_request(const DoutPrefixProvider *dpp) { CephContext *cct = store->ctx(); const int32_t default_max_buckets = cct->_conf.get_val<int64_t>("rgw_user_max_buckets"); RGWUserAdminOpState op_state(store); auto& user = params.user; op_state.set_user_id(user); op_state.set_display_name(params.display_name); op_state.set_user_email(params.email); op_state.set_caps(params.caps); op_state.set_access_key(params.access_key); op_state.set_secret_key(params.secret_key); if (!params.key_type.empty()) { int32_t key_type = KEY_TYPE_S3; if (params.key_type == "swift") { key_type = KEY_TYPE_SWIFT; } op_state.set_key_type(key_type); } op_state.set_max_buckets(params.max_buckets.value_or(default_max_buckets)); op_state.set_suspension(params.suspended); op_state.set_system(params.system); op_state.set_exclusive(params.exclusive); if (params.generate_key) { op_state.set_generate_key(); } if (params.apply_quota) { RGWQuota quota; if (cct->_conf->rgw_bucket_default_quota_max_objects >= 0) { quota.bucket_quota.max_objects = cct->_conf->rgw_bucket_default_quota_max_objects; quota.bucket_quota.enabled = true; } if (cct->_conf->rgw_bucket_default_quota_max_size >= 0) { quota.bucket_quota.max_size = cct->_conf->rgw_bucket_default_quota_max_size; quota.bucket_quota.enabled = true; } if (cct->_conf->rgw_user_default_quota_max_objects >= 0) { quota.user_quota.max_objects = cct->_conf->rgw_user_default_quota_max_objects; quota.user_quota.enabled = true; } if (cct->_conf->rgw_user_default_quota_max_size >= 0) { quota.user_quota.max_size = cct->_conf->rgw_user_default_quota_max_size; quota.user_quota.enabled = true; } if (quota.bucket_quota.enabled) { op_state.set_bucket_quota(quota.bucket_quota); } if (quota.user_quota.enabled) { op_state.set_user_quota(quota.user_quota); } } RGWNullFlusher flusher; return RGWUserAdminOp_User::create(dpp, store, op_state, flusher, null_yield); } template<> int RGWGetUserInfoCR::Request::_send_request(const DoutPrefixProvider *dpp) { return store->ctl()->user->get_info_by_uid(dpp, params.user, result.get(), null_yield); } template<> int RGWGetBucketInfoCR::Request::_send_request(const DoutPrefixProvider *dpp) { return store->get_bucket(dpp, nullptr, params.tenant, params.bucket_name, &result->bucket, null_yield); } template<> int RGWBucketCreateLocalCR::Request::_send_request(const DoutPrefixProvider *dpp) { CephContext *cct = store->ctx(); auto& zone_svc = store->svc()->zone; const auto& user_info = params.user_info.get(); const auto& user = user_info->user_id; const auto& bucket_name = params.bucket_name; auto& placement_rule = params.placement_rule; if (!placement_rule.empty() && !zone_svc->get_zone_params().valid_placement(placement_rule)) { ldpp_dout(dpp, 0) << "placement target (" << placement_rule << ")" << " doesn't exist in the placement targets of zonegroup" << " (" << zone_svc->get_zonegroup().api_name << ")" << dendl; return -ERR_INVALID_LOCATION_CONSTRAINT; } /* we need to make sure we read bucket info, it's not read before for this * specific request */ RGWBucketInfo bucket_info; map<string, bufferlist> bucket_attrs; int ret = store->getRados()->get_bucket_info(store->svc(), user.tenant, bucket_name, bucket_info, nullptr, null_yield, dpp, &bucket_attrs); if (ret < 0 && ret != -ENOENT) return ret; bool bucket_exists = (ret != -ENOENT); RGWAccessControlPolicy old_policy(cct); ACLOwner bucket_owner; bucket_owner.set_id(user); bucket_owner.set_name(user_info->display_name); if (bucket_exists) { ret = rgw_op_get_bucket_policy_from_attr(dpp, cct, store, bucket_info, bucket_attrs, &old_policy, null_yield); if (ret >= 0) { if (old_policy.get_owner().get_id().compare(user) != 0) { return -EEXIST; } } } RGWBucketInfo master_info; rgw_bucket *pmaster_bucket = nullptr; uint32_t *pmaster_num_shards = nullptr; real_time creation_time; string zonegroup_id = zone_svc->get_zonegroup().get_id(); if (bucket_exists) { rgw_placement_rule selected_placement_rule; rgw_bucket bucket; bucket.tenant = user.tenant; bucket.name = bucket_name; ret = zone_svc->select_bucket_placement(dpp, *user_info, zonegroup_id, placement_rule, &selected_placement_rule, nullptr, null_yield); if (selected_placement_rule != bucket_info.placement_rule) { ldpp_dout(dpp, 0) << "bucket already exists on a different placement rule: " << " selected_rule= " << selected_placement_rule << " existing_rule= " << bucket_info.placement_rule << dendl; return -EEXIST; } } /* Encode special metadata first as we're using std::map::emplace under * the hood. This method will add the new items only if the map doesn't * contain such keys yet. */ RGWAccessControlPolicy_S3 policy(cct); policy.create_canned(bucket_owner, bucket_owner, string()); /* default private policy */ bufferlist aclbl; policy.encode(aclbl); map<string, buffer::list> attrs; attrs.emplace(std::move(RGW_ATTR_ACL), std::move(aclbl)); RGWQuotaInfo quota_info; const RGWQuotaInfo * pquota_info = nullptr; rgw_bucket bucket; bucket.tenant = user.tenant; bucket.name = bucket_name; RGWBucketInfo info; obj_version ep_objv; ret = store->getRados()->create_bucket(*user_info, bucket, zonegroup_id, placement_rule, bucket_info.swift_ver_location, pquota_info, attrs, info, nullptr, &ep_objv, creation_time, pmaster_bucket, pmaster_num_shards, null_yield, dpp, true); if (ret && ret != -EEXIST) return ret; bool existed = (ret == -EEXIST); if (existed) { if (info.owner != user) { ldpp_dout(dpp, 20) << "NOTICE: bucket already exists under a different user (bucket=" << bucket << " user=" << user << " bucket_owner=" << info.owner << dendl; return -EEXIST; } bucket = info.bucket; } ret = store->ctl()->bucket->link_bucket(user, bucket, info.creation_time, null_yield, dpp, false); if (ret && !existed && ret != -EEXIST) { /* if it exists (or previously existed), don't remove it! */ int r = store->ctl()->bucket->unlink_bucket(user, bucket, null_yield, dpp); if (r < 0) { ldpp_dout(dpp, 0) << "WARNING: failed to unlink bucket: ret=" << r << dendl; } } else if (ret == -EEXIST || (ret == 0 && existed)) { ret = -ERR_BUCKET_EXISTS; } if (ret < 0) { ldpp_dout(dpp, 0) << "ERROR: bucket creation (bucket=" << bucket << ") return ret=" << ret << dendl; } return ret; } template<> int RGWObjectSimplePutCR::Request::_send_request(const DoutPrefixProvider *dpp) { RGWDataAccess::ObjectRef obj; CephContext *cct = store->ctx(); int ret = params.bucket->get_object(params.key, &obj); if (ret < 0) { lderr(cct) << "ERROR: failed to get object: " << cpp_strerror(-ret) << dendl; return -ret; } if (params.user_data) { obj->set_user_data(*params.user_data); } ret = obj->put(params.data, params.attrs, dpp, null_yield); if (ret < 0) { ldpp_dout(dpp, -1) << "ERROR: put object returned error: " << cpp_strerror(-ret) << dendl; } return 0; } template<> int RGWBucketLifecycleConfigCR::Request::_send_request(const DoutPrefixProvider *dpp) { CephContext *cct = store->ctx(); RGWLC *lc = store->getRados()->get_lc(); if (!lc) { lderr(cct) << "ERROR: lifecycle object is not initialized!" << dendl; return -EIO; } int ret = lc->set_bucket_config(params.bucket, params.bucket_attrs, &params.config); if (ret < 0) { lderr(cct) << "ERROR: failed to set lifecycle on bucke: " << cpp_strerror(-ret) << dendl; return -ret; } return 0; } template<> int RGWBucketGetSyncPolicyHandlerCR::Request::_send_request(const DoutPrefixProvider *dpp) { int r = store->ctl()->bucket->get_sync_policy_handler(params.zone, params.bucket, &result->policy_handler, null_yield, dpp); if (r < 0) { ldpp_dout(dpp, -1) << "ERROR: " << __func__ << "(): get_sync_policy_handler() returned " << r << dendl; return r; } return 0; }
9,107
30.085324
165
cc
null
ceph-main/src/rgw/driver/rados/rgw_cr_tools.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab ft=cpp #pragma once #include "rgw_cr_rados.h" #include "rgw_tools.h" #include "rgw_lc.h" #include "services/svc_bucket_sync.h" struct rgw_user_create_params { rgw_user user; std::string display_name; std::string email; std::string access_key; std::string secret_key; std::string key_type; /* "swift" or "s3" */ std::string caps; bool generate_key{true}; bool suspended{false}; std::optional<int32_t> max_buckets; bool system{false}; bool exclusive{false}; bool apply_quota{true}; }; using RGWUserCreateCR = RGWSimpleWriteOnlyAsyncCR<rgw_user_create_params>; struct rgw_get_user_info_params { rgw_user user; }; using RGWGetUserInfoCR = RGWSimpleAsyncCR<rgw_get_user_info_params, RGWUserInfo>; struct rgw_get_bucket_info_params { std::string tenant; std::string bucket_name; }; struct rgw_get_bucket_info_result { std::unique_ptr<rgw::sal::Bucket> bucket; }; using RGWGetBucketInfoCR = RGWSimpleAsyncCR<rgw_get_bucket_info_params, rgw_get_bucket_info_result>; struct rgw_bucket_create_local_params { std::shared_ptr<RGWUserInfo> user_info; std::string bucket_name; rgw_placement_rule placement_rule; }; using RGWBucketCreateLocalCR = RGWSimpleWriteOnlyAsyncCR<rgw_bucket_create_local_params>; struct rgw_object_simple_put_params { RGWDataAccess::BucketRef bucket; rgw_obj_key key; bufferlist data; std::map<std::string, bufferlist> attrs; std::optional<std::string> user_data; }; using RGWObjectSimplePutCR = RGWSimpleWriteOnlyAsyncCR<rgw_object_simple_put_params>; struct rgw_bucket_lifecycle_config_params { rgw::sal::Bucket* bucket; rgw::sal::Attrs bucket_attrs; RGWLifecycleConfiguration config; }; using RGWBucketLifecycleConfigCR = RGWSimpleWriteOnlyAsyncCR<rgw_bucket_lifecycle_config_params>; struct rgw_bucket_get_sync_policy_params { std::optional<rgw_zone_id> zone; std::optional<rgw_bucket> bucket; }; struct rgw_bucket_get_sync_policy_result { RGWBucketSyncPolicyHandlerRef policy_handler; }; using RGWBucketGetSyncPolicyHandlerCR = RGWSimpleAsyncCR<rgw_bucket_get_sync_policy_params, rgw_bucket_get_sync_policy_result>;
2,214
24.755814
127
h
null
ceph-main/src/rgw/driver/rados/rgw_d3n_datacache.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab ft=cpp #include "rgw_d3n_datacache.h" #include "rgw_rest_client.h" #include "rgw_auth_s3.h" #include "rgw_op.h" #include "rgw_common.h" #include "rgw_auth_s3.h" #include "rgw_op.h" #include "rgw_crypt_sanitize.h" #if defined(__linux__) #include <features.h> #endif #if __has_include(<filesystem>) #include <filesystem> namespace efs = std::filesystem; #else #include <experimental/filesystem> namespace efs = std::experimental::filesystem; #endif #define dout_subsys ceph_subsys_rgw using namespace std; int D3nCacheAioWriteRequest::d3n_libaio_prepare_write_op(bufferlist& bl, unsigned int len, string oid, string cache_location) { std::string location = cache_location + url_encode(oid, true); int r = 0; lsubdout(g_ceph_context, rgw_datacache, 20) << "D3nDataCache: " << __func__ << "(): Write To Cache, location=" << location << dendl; cb = new struct aiocb; mode_t mode = S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH; memset(cb, 0, sizeof(struct aiocb)); r = fd = ::open(location.c_str(), O_WRONLY | O_CREAT | O_TRUNC, mode); if (fd < 0) { ldout(cct, 0) << "ERROR: D3nCacheAioWriteRequest::create_io: open file failed, errno=" << errno << ", location='" << location.c_str() << "'" << dendl; return r; } r = 0; if (g_conf()->rgw_d3n_l1_fadvise != POSIX_FADV_NORMAL) posix_fadvise(fd, 0, 0, g_conf()->rgw_d3n_l1_fadvise); cb->aio_fildes = fd; data = malloc(len); if (!data) { ldout(cct, 0) << "ERROR: D3nCacheAioWriteRequest::create_io: memory allocation failed" << dendl; return -1; } cb->aio_buf = data; memcpy((void*)data, bl.c_str(), len); cb->aio_nbytes = len; return r; } D3nDataCache::D3nDataCache() : cct(nullptr), io_type(_io_type::ASYNC_IO), free_data_cache_size(0), outstanding_write_size(0) { lsubdout(g_ceph_context, rgw_datacache, 5) << "D3nDataCache: " << __func__ << "()" << dendl; } void D3nDataCache::init(CephContext *_cct) { cct = _cct; free_data_cache_size = cct->_conf->rgw_d3n_l1_datacache_size; head = nullptr; tail = nullptr; cache_location = cct->_conf->rgw_d3n_l1_datacache_persistent_path; if(cache_location.back() != '/') { cache_location += "/"; } try { if (efs::exists(cache_location)) { // d3n: evict the cache storage directory if (g_conf()->rgw_d3n_l1_evict_cache_on_start) { lsubdout(g_ceph_context, rgw, 5) << "D3nDataCache: init: evicting the persistent storage directory on start" << dendl; for (auto& p : efs::directory_iterator(cache_location)) { efs::remove_all(p.path()); } } } else { // create the cache storage directory lsubdout(g_ceph_context, rgw, 5) << "D3nDataCache: init: creating the persistent storage directory on start" << dendl; efs::create_directories(cache_location); } } catch (const efs::filesystem_error& e) { lderr(g_ceph_context) << "D3nDataCache: init: ERROR initializing the cache storage directory '" << cache_location << "' : " << e.what() << dendl; } auto conf_eviction_policy = cct->_conf.get_val<std::string>("rgw_d3n_l1_eviction_policy"); ceph_assert(conf_eviction_policy == "lru" || conf_eviction_policy == "random"); if (conf_eviction_policy == "lru") eviction_policy = _eviction_policy::LRU; if (conf_eviction_policy == "random") eviction_policy = _eviction_policy::RANDOM; #if defined(HAVE_LIBAIO) && defined(__GLIBC__) // libaio setup struct aioinit ainit{0}; ainit.aio_threads = cct->_conf.get_val<int64_t>("rgw_d3n_libaio_aio_threads"); ainit.aio_num = cct->_conf.get_val<int64_t>("rgw_d3n_libaio_aio_num"); ainit.aio_idle_time = 10; aio_init(&ainit); #endif } int D3nDataCache::d3n_io_write(bufferlist& bl, unsigned int len, std::string oid) { D3nChunkDataInfo* chunk_info = new D3nChunkDataInfo; std::string location = cache_location + url_encode(oid, true); lsubdout(g_ceph_context, rgw_datacache, 20) << "D3nDataCache: " << __func__ << "(): location=" << location << dendl; FILE *cache_file = nullptr; int r = 0; size_t nbytes = 0; cache_file = fopen(location.c_str(), "w+"); if (cache_file == nullptr) { ldout(cct, 0) << "ERROR: D3nDataCache::fopen file has return error, errno=" << errno << dendl; return -errno; } nbytes = fwrite(bl.c_str(), 1, len, cache_file); if (nbytes != len) { ldout(cct, 0) << "ERROR: D3nDataCache::io_write: fwrite has returned error: nbytes!=len, nbytes=" << nbytes << ", len=" << len << dendl; return -EIO; } r = fclose(cache_file); if (r != 0) { ldout(cct, 0) << "ERROR: D3nDataCache::fclsoe file has return error, errno=" << errno << dendl; return -errno; } { // update cahce_map entries for new chunk in cache const std::lock_guard l(d3n_cache_lock); chunk_info->oid = oid; chunk_info->set_ctx(cct); chunk_info->size = len; d3n_cache_map.insert(pair<string, D3nChunkDataInfo*>(oid, chunk_info)); } return r; } void d3n_libaio_write_cb(sigval sigval) { lsubdout(g_ceph_context, rgw_datacache, 30) << "D3nDataCache: " << __func__ << "()" << dendl; D3nCacheAioWriteRequest* c = static_cast<D3nCacheAioWriteRequest*>(sigval.sival_ptr); c->priv_data->d3n_libaio_write_completion_cb(c); } void D3nDataCache::d3n_libaio_write_completion_cb(D3nCacheAioWriteRequest* c) { D3nChunkDataInfo* chunk_info{nullptr}; ldout(cct, 5) << "D3nDataCache: " << __func__ << "(): oid=" << c->oid << dendl; { // update cache_map entries for new chunk in cache const std::lock_guard l(d3n_cache_lock); d3n_outstanding_write_list.erase(c->oid); chunk_info = new D3nChunkDataInfo; chunk_info->oid = c->oid; chunk_info->set_ctx(cct); chunk_info->size = c->cb->aio_nbytes; d3n_cache_map.insert(pair<string, D3nChunkDataInfo*>(c->oid, chunk_info)); } { // update free size const std::lock_guard l(d3n_eviction_lock); free_data_cache_size -= c->cb->aio_nbytes; outstanding_write_size -= c->cb->aio_nbytes; lru_insert_head(chunk_info); } delete c; c = nullptr; } int D3nDataCache::d3n_libaio_create_write_request(bufferlist& bl, unsigned int len, std::string oid) { lsubdout(g_ceph_context, rgw_datacache, 30) << "D3nDataCache: " << __func__ << "(): Write To Cache, oid=" << oid << ", len=" << len << dendl; auto wr = std::make_unique<struct D3nCacheAioWriteRequest>(cct); int r = 0; if ((r = wr->d3n_libaio_prepare_write_op(bl, len, oid, cache_location)) < 0) { ldout(cct, 0) << "ERROR: D3nDataCache: " << __func__ << "() prepare libaio write op r=" << r << dendl; return r; } wr->cb->aio_sigevent.sigev_notify = SIGEV_THREAD; wr->cb->aio_sigevent.sigev_notify_function = d3n_libaio_write_cb; wr->cb->aio_sigevent.sigev_notify_attributes = nullptr; wr->cb->aio_sigevent.sigev_value.sival_ptr = (void*)(wr.get()); wr->oid = oid; wr->priv_data = this; if ((r = ::aio_write(wr->cb)) != 0) { ldout(cct, 0) << "ERROR: D3nDataCache: " << __func__ << "() aio_write r=" << r << dendl; return r; } // wr will be deleted when the write is successful and d3n_libaio_write_completion_cb gets called // coverity[RESOURCE_LEAK:FALSE] wr.release(); return r; } void D3nDataCache::put(bufferlist& bl, unsigned int len, std::string& oid) { size_t sr = 0; uint64_t freed_size = 0, _free_data_cache_size = 0, _outstanding_write_size = 0; ldout(cct, 10) << "D3nDataCache::" << __func__ << "(): oid=" << oid << ", len=" << len << dendl; { const std::lock_guard l(d3n_cache_lock); std::unordered_map<string, D3nChunkDataInfo*>::iterator iter = d3n_cache_map.find(oid); if (iter != d3n_cache_map.end()) { ldout(cct, 10) << "D3nDataCache::" << __func__ << "(): data already cached, no rewrite" << dendl; return; } auto it = d3n_outstanding_write_list.find(oid); if (it != d3n_outstanding_write_list.end()) { ldout(cct, 10) << "D3nDataCache: NOTE: data put in cache already issued, no rewrite" << dendl; return; } d3n_outstanding_write_list.insert(oid); } { const std::lock_guard l(d3n_eviction_lock); _free_data_cache_size = free_data_cache_size; _outstanding_write_size = outstanding_write_size; } ldout(cct, 20) << "D3nDataCache: Before eviction _free_data_cache_size:" << _free_data_cache_size << ", _outstanding_write_size:" << _outstanding_write_size << ", freed_size:" << freed_size << dendl; while (len > (_free_data_cache_size - _outstanding_write_size + freed_size)) { ldout(cct, 20) << "D3nDataCache: enter eviction" << dendl; if (eviction_policy == _eviction_policy::LRU) { sr = lru_eviction(); } else if (eviction_policy == _eviction_policy::RANDOM) { sr = random_eviction(); } else { ldout(cct, 0) << "D3nDataCache: Warning: unknown cache eviction policy, defaulting to lru eviction" << dendl; sr = lru_eviction(); } if (sr == 0) { ldout(cct, 2) << "D3nDataCache: Warning: eviction was not able to free disk space, not writing to cache" << dendl; d3n_outstanding_write_list.erase(oid); return; } ldout(cct, 20) << "D3nDataCache: completed eviction of " << sr << " bytes" << dendl; freed_size += sr; } int r = 0; r = d3n_libaio_create_write_request(bl, len, oid); if (r < 0) { const std::lock_guard l(d3n_cache_lock); d3n_outstanding_write_list.erase(oid); ldout(cct, 1) << "D3nDataCache: create_aio_write_request fail, r=" << r << dendl; return; } const std::lock_guard l(d3n_eviction_lock); free_data_cache_size += freed_size; outstanding_write_size += len; } bool D3nDataCache::get(const string& oid, const off_t len) { const std::lock_guard l(d3n_cache_lock); bool exist = false; string location = cache_location + url_encode(oid, true); lsubdout(g_ceph_context, rgw_datacache, 20) << "D3nDataCache: " << __func__ << "(): location=" << location << dendl; std::unordered_map<string, D3nChunkDataInfo*>::iterator iter = d3n_cache_map.find(oid); if (!(iter == d3n_cache_map.end())) { // check inside cache whether file exists or not!!!! then make exist true; struct D3nChunkDataInfo* chdo = iter->second; struct stat st; int r = stat(location.c_str(), &st); if ( r != -1 && st.st_size == len) { // file exists and containes required data range length exist = true; /*LRU*/ /*get D3nChunkDataInfo*/ const std::lock_guard l(d3n_eviction_lock); lru_remove(chdo); lru_insert_head(chdo); } else { d3n_cache_map.erase(oid); const std::lock_guard l(d3n_eviction_lock); lru_remove(chdo); delete chdo; exist = false; } } return exist; } size_t D3nDataCache::random_eviction() { lsubdout(g_ceph_context, rgw_datacache, 20) << "D3nDataCache: " << __func__ << "()" << dendl; int n_entries = 0; int random_index = 0; size_t freed_size = 0; D3nChunkDataInfo* del_entry; string del_oid, location; { const std::lock_guard l(d3n_cache_lock); n_entries = d3n_cache_map.size(); if (n_entries <= 0) { return -1; } srand (time(NULL)); random_index = ceph::util::generate_random_number<int>(0, n_entries-1); std::unordered_map<string, D3nChunkDataInfo*>::iterator iter = d3n_cache_map.begin(); std::advance(iter, random_index); del_oid = iter->first; del_entry = iter->second; ldout(cct, 20) << "D3nDataCache: random_eviction: index:" << random_index << ", free size: " << del_entry->size << dendl; freed_size = del_entry->size; delete del_entry; del_entry = nullptr; d3n_cache_map.erase(del_oid); // oid } location = cache_location + url_encode(del_oid, true); ::remove(location.c_str()); return freed_size; } size_t D3nDataCache::lru_eviction() { lsubdout(g_ceph_context, rgw_datacache, 20) << "D3nDataCache: " << __func__ << "()" << dendl; int n_entries = 0; size_t freed_size = 0; D3nChunkDataInfo* del_entry; string del_oid, location; { const std::lock_guard l(d3n_eviction_lock); del_entry = tail; if (del_entry == nullptr) { ldout(cct, 2) << "D3nDataCache: lru_eviction: del_entry=null_ptr" << dendl; return 0; } lru_remove(del_entry); } { const std::lock_guard l(d3n_cache_lock); n_entries = d3n_cache_map.size(); if (n_entries <= 0) { ldout(cct, 2) << "D3nDataCache: lru_eviction: cache_map.size<=0" << dendl; return -1; } del_oid = del_entry->oid; ldout(cct, 20) << "D3nDataCache: lru_eviction: oid to remove: " << del_oid << dendl; d3n_cache_map.erase(del_oid); // oid } freed_size = del_entry->size; delete del_entry; location = cache_location + url_encode(del_oid, true); ::remove(location.c_str()); return freed_size; }
12,841
33.708108
201
cc
null
ceph-main/src/rgw/driver/rados/rgw_d3n_datacache.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab ft=cpp #pragma once #include "rgw_rados.h" #include <curl/curl.h> #include "rgw_common.h" #include <unistd.h> #include <signal.h> #include "include/Context.h" #include "include/lru.h" #include "rgw_d3n_cacherequest.h" /*D3nDataCache*/ struct D3nDataCache; struct D3nChunkDataInfo : public LRUObject { CephContext *cct; uint64_t size; time_t access_time; std::string address; std::string oid; bool complete; struct D3nChunkDataInfo* lru_prev; struct D3nChunkDataInfo* lru_next; D3nChunkDataInfo(): size(0) {} void set_ctx(CephContext *_cct) { cct = _cct; } void dump(Formatter *f) const; static void generate_test_instances(std::list<D3nChunkDataInfo*>& o); }; struct D3nCacheAioWriteRequest { std::string oid; void *data = nullptr; int fd = -1; struct aiocb *cb = nullptr; D3nDataCache *priv_data = nullptr; CephContext *cct = nullptr; D3nCacheAioWriteRequest(CephContext *_cct) : cct(_cct) {} int d3n_libaio_prepare_write_op(bufferlist& bl, unsigned int len, std::string oid, std::string cache_location); ~D3nCacheAioWriteRequest() { ::close(fd); free(data); cb->aio_buf = nullptr; delete(cb); } }; struct D3nDataCache { private: std::unordered_map<std::string, D3nChunkDataInfo*> d3n_cache_map; std::set<std::string> d3n_outstanding_write_list; std::mutex d3n_cache_lock; std::mutex d3n_eviction_lock; CephContext *cct; enum class _io_type { SYNC_IO = 1, ASYNC_IO = 2, SEND_FILE = 3 } io_type; enum class _eviction_policy { LRU=0, RANDOM=1 } eviction_policy; struct sigaction action; uint64_t free_data_cache_size = 0; uint64_t outstanding_write_size = 0; struct D3nChunkDataInfo* head; struct D3nChunkDataInfo* tail; private: void add_io(); public: D3nDataCache(); ~D3nDataCache() { while (lru_eviction() > 0); } std::string cache_location; bool get(const std::string& oid, const off_t len); void put(bufferlist& bl, unsigned int len, std::string& obj_key); int d3n_io_write(bufferlist& bl, unsigned int len, std::string oid); int d3n_libaio_create_write_request(bufferlist& bl, unsigned int len, std::string oid); void d3n_libaio_write_completion_cb(D3nCacheAioWriteRequest* c); size_t random_eviction(); size_t lru_eviction(); void init(CephContext *_cct); void lru_insert_head(struct D3nChunkDataInfo* o) { lsubdout(g_ceph_context, rgw_datacache, 30) << "D3nDataCache: " << __func__ << "()" << dendl; o->lru_next = head; o->lru_prev = nullptr; if (head) { head->lru_prev = o; } else { tail = o; } head = o; } void lru_insert_tail(struct D3nChunkDataInfo* o) { lsubdout(g_ceph_context, rgw_datacache, 30) << "D3nDataCache: " << __func__ << "()" << dendl; o->lru_next = nullptr; o->lru_prev = tail; if (tail) { tail->lru_next = o; } else { head = o; } tail = o; } void lru_remove(struct D3nChunkDataInfo* o) { lsubdout(g_ceph_context, rgw_datacache, 30) << "D3nDataCache: " << __func__ << "()" << dendl; if (o->lru_next) o->lru_next->lru_prev = o->lru_prev; else tail = o->lru_prev; if (o->lru_prev) o->lru_prev->lru_next = o->lru_next; else head = o->lru_next; o->lru_next = o->lru_prev = nullptr; } }; template <class T> class D3nRGWDataCache : public T { public: D3nRGWDataCache() {} int init_rados() override { int ret; ret = T::init_rados(); if (ret < 0) return ret; return 0; } int get_obj_iterate_cb(const DoutPrefixProvider *dpp, const rgw_raw_obj& read_obj, off_t obj_ofs, off_t read_ofs, off_t len, bool is_head_obj, RGWObjState *astate, void *arg) override; }; template<typename T> int D3nRGWDataCache<T>::get_obj_iterate_cb(const DoutPrefixProvider *dpp, const rgw_raw_obj& read_obj, off_t obj_ofs, off_t read_ofs, off_t len, bool is_head_obj, RGWObjState *astate, void *arg) { lsubdout(g_ceph_context, rgw_datacache, 30) << "D3nDataCache::" << __func__ << "(): is head object : " << is_head_obj << dendl; librados::ObjectReadOperation op; struct get_obj_data* d = static_cast<struct get_obj_data*>(arg); std::string oid, key; if (is_head_obj) { // only when reading from the head object do we need to do the atomic test int r = T::append_atomic_test(dpp, astate, op); if (r < 0) return r; if (astate && obj_ofs < astate->data.length()) { unsigned chunk_len = std::min((uint64_t)astate->data.length() - obj_ofs, (uint64_t)len); r = d->client_cb->handle_data(astate->data, obj_ofs, chunk_len); if (r < 0) return r; len -= chunk_len; d->offset += chunk_len; read_ofs += chunk_len; obj_ofs += chunk_len; if (!len) return 0; } auto obj = d->rgwrados->svc.rados->obj(read_obj); r = obj.open(dpp); if (r < 0) { lsubdout(g_ceph_context, rgw, 4) << "failed to open rados context for " << read_obj << dendl; return r; } ldpp_dout(dpp, 20) << "D3nDataCache::" << __func__ << "(): oid=" << read_obj.oid << " obj-ofs=" << obj_ofs << " read_ofs=" << read_ofs << " len=" << len << dendl; op.read(read_ofs, len, nullptr, nullptr); const uint64_t cost = len; const uint64_t id = obj_ofs; // use logical object offset for sorting replies auto& ref = obj.get_ref(); auto completed = d->aio->get(ref.obj, rgw::Aio::librados_op(ref.pool.ioctx(), std::move(op), d->yield), cost, id); return d->flush(std::move(completed)); } else { ldpp_dout(dpp, 20) << "D3nDataCache::" << __func__ << "(): oid=" << read_obj.oid << ", is_head_obj=" << is_head_obj << ", obj-ofs=" << obj_ofs << ", read_ofs=" << read_ofs << ", len=" << len << dendl; int r; op.read(read_ofs, len, nullptr, nullptr); const uint64_t cost = len; const uint64_t id = obj_ofs; // use logical object offset for sorting replies oid = read_obj.oid; auto obj = d->rgwrados->svc.rados->obj(read_obj); r = obj.open(dpp); if (r < 0) { lsubdout(g_ceph_context, rgw, 0) << "D3nDataCache: Error: failed to open rados context for " << read_obj << ", r=" << r << dendl; return r; } auto& ref = obj.get_ref(); const bool is_compressed = (astate->attrset.find(RGW_ATTR_COMPRESSION) != astate->attrset.end()); const bool is_encrypted = (astate->attrset.find(RGW_ATTR_CRYPT_MODE) != astate->attrset.end()); if (read_ofs != 0 || astate->size != astate->accounted_size || is_compressed || is_encrypted) { d->d3n_bypass_cache_write = true; lsubdout(g_ceph_context, rgw, 5) << "D3nDataCache: " << __func__ << "(): Note - bypassing datacache: oid=" << read_obj.oid << ", read_ofs!=0 = " << read_ofs << ", size=" << astate->size << " != accounted_size=" << astate->accounted_size << ", is_compressed=" << is_compressed << ", is_encrypted=" << is_encrypted << dendl; auto completed = d->aio->get(ref.obj, rgw::Aio::librados_op(ref.pool.ioctx(), std::move(op), d->yield), cost, id); r = d->flush(std::move(completed)); return r; } if (d->rgwrados->d3n_data_cache->get(oid, len)) { // Read From Cache ldpp_dout(dpp, 20) << "D3nDataCache: " << __func__ << "(): READ FROM CACHE: oid=" << read_obj.oid << ", obj-ofs=" << obj_ofs << ", read_ofs=" << read_ofs << ", len=" << len << dendl; auto completed = d->aio->get(ref.obj, rgw::Aio::d3n_cache_op(dpp, d->yield, read_ofs, len, d->rgwrados->d3n_data_cache->cache_location), cost, id); r = d->flush(std::move(completed)); if (r < 0) { lsubdout(g_ceph_context, rgw, 0) << "D3nDataCache: " << __func__ << "(): Error: failed to drain/flush, r= " << r << dendl; } return r; } else { // Write To Cache ldpp_dout(dpp, 20) << "D3nDataCache: " << __func__ << "(): WRITE TO CACHE: oid=" << read_obj.oid << ", obj-ofs=" << obj_ofs << ", read_ofs=" << read_ofs << " len=" << len << dendl; auto completed = d->aio->get(ref.obj, rgw::Aio::librados_op(ref.pool.ioctx(), std::move(op), d->yield), cost, id); return d->flush(std::move(completed)); } } lsubdout(g_ceph_context, rgw, 1) << "D3nDataCache: " << __func__ << "(): Warning: Check head object cache handling flow, oid=" << read_obj.oid << dendl; return 0; }
8,486
31.517241
329
h
null
ceph-main/src/rgw/driver/rados/rgw_data_sync.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab ft=cpp #include "common/ceph_json.h" #include "common/RefCountedObj.h" #include "common/WorkQueue.h" #include "common/Throttle.h" #include "common/errno.h" #include "rgw_common.h" #include "rgw_zone.h" #include "rgw_sync.h" #include "rgw_data_sync.h" #include "rgw_rest_conn.h" #include "rgw_cr_rados.h" #include "rgw_cr_rest.h" #include "rgw_cr_tools.h" #include "rgw_http_client.h" #include "rgw_bucket.h" #include "rgw_bucket_sync.h" #include "rgw_bucket_sync_cache.h" #include "rgw_datalog.h" #include "rgw_metadata.h" #include "rgw_sync_counters.h" #include "rgw_sync_error_repo.h" #include "rgw_sync_module.h" #include "rgw_sal.h" #include "cls/lock/cls_lock_client.h" #include "cls/rgw/cls_rgw_client.h" #include "services/svc_zone.h" #include "services/svc_sync_modules.h" #include "include/common_fwd.h" #include "include/random.h" #include <boost/asio/yield.hpp> #include <string_view> #define dout_subsys ceph_subsys_rgw #undef dout_prefix #define dout_prefix (*_dout << "data sync: ") using namespace std; static const string datalog_sync_status_oid_prefix = "datalog.sync-status"; static const string datalog_sync_status_shard_prefix = "datalog.sync-status.shard"; static const string datalog_sync_full_sync_index_prefix = "data.full-sync.index"; static const string bucket_full_status_oid_prefix = "bucket.full-sync-status"; static const string bucket_status_oid_prefix = "bucket.sync-status"; static const string object_status_oid_prefix = "bucket.sync-status"; static const string data_sync_bids_oid = "data-sync-bids"; void rgw_datalog_info::decode_json(JSONObj *obj) { JSONDecoder::decode_json("num_objects", num_shards, obj); } void rgw_datalog_entry::decode_json(JSONObj *obj) { JSONDecoder::decode_json("key", key, obj); utime_t ut; JSONDecoder::decode_json("timestamp", ut, obj); timestamp = ut.to_real_time(); } void rgw_datalog_shard_data::decode_json(JSONObj *obj) { JSONDecoder::decode_json("marker", marker, obj); JSONDecoder::decode_json("truncated", truncated, obj); JSONDecoder::decode_json("entries", entries, obj); }; // print a bucket shard with [gen] std::string to_string(const rgw_bucket_shard& bs, std::optional<uint64_t> gen) { constexpr auto digits10 = std::numeric_limits<uint64_t>::digits10; constexpr auto reserve = 2 + digits10; // [value] auto str = bs.get_key('/', ':', ':', reserve); str.append(1, '['); str.append(std::to_string(gen.value_or(0))); str.append(1, ']'); return str; } class RGWReadDataSyncStatusMarkersCR : public RGWShardCollectCR { static constexpr int MAX_CONCURRENT_SHARDS = 16; RGWDataSyncCtx *sc; RGWDataSyncEnv *env; const int num_shards; int shard_id{0};; map<uint32_t, rgw_data_sync_marker>& markers; std::vector<RGWObjVersionTracker>& objvs; int handle_result(int r) override { if (r == -ENOENT) { // ENOENT is not a fatal error return 0; } if (r < 0) { ldout(cct, 4) << "failed to read data sync status: " << cpp_strerror(r) << dendl; } return r; } public: RGWReadDataSyncStatusMarkersCR(RGWDataSyncCtx *sc, int num_shards, map<uint32_t, rgw_data_sync_marker>& markers, std::vector<RGWObjVersionTracker>& objvs) : RGWShardCollectCR(sc->cct, MAX_CONCURRENT_SHARDS), sc(sc), env(sc->env), num_shards(num_shards), markers(markers), objvs(objvs) {} bool spawn_next() override; }; bool RGWReadDataSyncStatusMarkersCR::spawn_next() { if (shard_id >= num_shards) { return false; } using CR = RGWSimpleRadosReadCR<rgw_data_sync_marker>; spawn(new CR(env->dpp, env->driver, rgw_raw_obj(env->svc->zone->get_zone_params().log_pool, RGWDataSyncStatusManager::shard_obj_name(sc->source_zone, shard_id)), &markers[shard_id], true, &objvs[shard_id]), false); shard_id++; return true; } class RGWReadDataSyncRecoveringShardsCR : public RGWShardCollectCR { static constexpr int MAX_CONCURRENT_SHARDS = 16; RGWDataSyncCtx *sc; RGWDataSyncEnv *env; uint64_t max_entries; int num_shards; int shard_id{0}; string marker; std::vector<RGWRadosGetOmapKeysCR::ResultPtr>& omapkeys; int handle_result(int r) override { if (r == -ENOENT) { // ENOENT is not a fatal error return 0; } if (r < 0) { ldout(cct, 4) << "failed to list recovering data sync: " << cpp_strerror(r) << dendl; } return r; } public: RGWReadDataSyncRecoveringShardsCR(RGWDataSyncCtx *sc, uint64_t _max_entries, int _num_shards, std::vector<RGWRadosGetOmapKeysCR::ResultPtr>& omapkeys) : RGWShardCollectCR(sc->cct, MAX_CONCURRENT_SHARDS), sc(sc), env(sc->env), max_entries(_max_entries), num_shards(_num_shards), omapkeys(omapkeys) {} bool spawn_next() override; }; bool RGWReadDataSyncRecoveringShardsCR::spawn_next() { if (shard_id >= num_shards) return false; string error_oid = RGWDataSyncStatusManager::shard_obj_name(sc->source_zone, shard_id) + ".retry"; auto& shard_keys = omapkeys[shard_id]; shard_keys = std::make_shared<RGWRadosGetOmapKeysCR::Result>(); spawn(new RGWRadosGetOmapKeysCR(env->driver, rgw_raw_obj(env->svc->zone->get_zone_params().log_pool, error_oid), marker, max_entries, shard_keys), false); ++shard_id; return true; } class RGWReadDataSyncStatusCoroutine : public RGWCoroutine { RGWDataSyncCtx *sc; RGWDataSyncEnv *sync_env; rgw_data_sync_status *sync_status; RGWObjVersionTracker* objv_tracker; std::vector<RGWObjVersionTracker>& objvs; public: RGWReadDataSyncStatusCoroutine(RGWDataSyncCtx *_sc, rgw_data_sync_status *_status, RGWObjVersionTracker* objv_tracker, std::vector<RGWObjVersionTracker>& objvs) : RGWCoroutine(_sc->cct), sc(_sc), sync_env(sc->env), sync_status(_status), objv_tracker(objv_tracker), objvs(objvs) {} int operate(const DoutPrefixProvider *dpp) override; }; int RGWReadDataSyncStatusCoroutine::operate(const DoutPrefixProvider *dpp) { reenter(this) { // read sync info using ReadInfoCR = RGWSimpleRadosReadCR<rgw_data_sync_info>; yield { bool empty_on_enoent = false; // fail on ENOENT call(new ReadInfoCR(dpp, sync_env->driver, rgw_raw_obj(sync_env->svc->zone->get_zone_params().log_pool, RGWDataSyncStatusManager::sync_status_oid(sc->source_zone)), &sync_status->sync_info, empty_on_enoent, objv_tracker)); } if (retcode < 0) { ldpp_dout(dpp, 4) << "failed to read sync status info with " << cpp_strerror(retcode) << dendl; return set_cr_error(retcode); } // read shard markers objvs.resize(sync_status->sync_info.num_shards); using ReadMarkersCR = RGWReadDataSyncStatusMarkersCR; yield call(new ReadMarkersCR(sc, sync_status->sync_info.num_shards, sync_status->sync_markers, objvs)); if (retcode < 0) { ldpp_dout(dpp, 4) << "failed to read sync status markers with " << cpp_strerror(retcode) << dendl; return set_cr_error(retcode); } return set_cr_done(); } return 0; } class RGWReadRemoteDataLogShardInfoCR : public RGWCoroutine { RGWDataSyncCtx *sc; RGWDataSyncEnv *sync_env; RGWRESTReadResource *http_op; int shard_id; RGWDataChangesLogInfo *shard_info; public: RGWReadRemoteDataLogShardInfoCR(RGWDataSyncCtx *_sc, int _shard_id, RGWDataChangesLogInfo *_shard_info) : RGWCoroutine(_sc->cct), sc(_sc), sync_env(_sc->env), http_op(NULL), shard_id(_shard_id), shard_info(_shard_info) { } ~RGWReadRemoteDataLogShardInfoCR() override { if (http_op) { http_op->put(); } } int operate(const DoutPrefixProvider *dpp) override { reenter(this) { yield { char buf[16]; snprintf(buf, sizeof(buf), "%d", shard_id); rgw_http_param_pair pairs[] = { { "type" , "data" }, { "id", buf }, { "info" , NULL }, { NULL, NULL } }; string p = "/admin/log/"; http_op = new RGWRESTReadResource(sc->conn, p, pairs, NULL, sync_env->http_manager); init_new_io(http_op); int ret = http_op->aio_read(dpp); if (ret < 0) { ldpp_dout(dpp, 0) << "ERROR: failed to read from " << p << dendl; log_error() << "failed to send http operation: " << http_op->to_str() << " ret=" << ret << std::endl; return set_cr_error(ret); } return io_block(0); } yield { int ret = http_op->wait(shard_info, null_yield); if (ret < 0) { return set_cr_error(ret); } return set_cr_done(); } } return 0; } }; struct read_remote_data_log_response { string marker; bool truncated; vector<rgw_data_change_log_entry> entries; read_remote_data_log_response() : truncated(false) {} void decode_json(JSONObj *obj) { JSONDecoder::decode_json("marker", marker, obj); JSONDecoder::decode_json("truncated", truncated, obj); JSONDecoder::decode_json("entries", entries, obj); }; }; class RGWReadRemoteDataLogShardCR : public RGWCoroutine { RGWDataSyncCtx *sc; RGWDataSyncEnv *sync_env; RGWRESTReadResource *http_op = nullptr; int shard_id; const std::string& marker; string *pnext_marker; vector<rgw_data_change_log_entry> *entries; bool *truncated; read_remote_data_log_response response; std::optional<TOPNSPC::common::PerfGuard> timer; public: RGWReadRemoteDataLogShardCR(RGWDataSyncCtx *_sc, int _shard_id, const std::string& marker, string *pnext_marker, vector<rgw_data_change_log_entry> *_entries, bool *_truncated) : RGWCoroutine(_sc->cct), sc(_sc), sync_env(_sc->env), shard_id(_shard_id), marker(marker), pnext_marker(pnext_marker), entries(_entries), truncated(_truncated) { } ~RGWReadRemoteDataLogShardCR() override { if (http_op) { http_op->put(); } } int operate(const DoutPrefixProvider *dpp) override { reenter(this) { yield { char buf[16]; snprintf(buf, sizeof(buf), "%d", shard_id); rgw_http_param_pair pairs[] = { { "type" , "data" }, { "id", buf }, { "marker", marker.c_str() }, { "extra-info", "true" }, { NULL, NULL } }; string p = "/admin/log/"; http_op = new RGWRESTReadResource(sc->conn, p, pairs, NULL, sync_env->http_manager); init_new_io(http_op); if (sync_env->counters) { timer.emplace(sync_env->counters, sync_counters::l_poll); } int ret = http_op->aio_read(dpp); if (ret < 0) { ldpp_dout(dpp, 0) << "ERROR: failed to read from " << p << dendl; log_error() << "failed to send http operation: " << http_op->to_str() << " ret=" << ret << std::endl; if (sync_env->counters) { sync_env->counters->inc(sync_counters::l_poll_err); } return set_cr_error(ret); } return io_block(0); } yield { timer.reset(); int ret = http_op->wait(&response, null_yield); if (ret < 0) { if (sync_env->counters && ret != -ENOENT) { sync_env->counters->inc(sync_counters::l_poll_err); } return set_cr_error(ret); } entries->clear(); entries->swap(response.entries); *pnext_marker = response.marker; *truncated = response.truncated; return set_cr_done(); } } return 0; } }; class RGWReadRemoteDataLogInfoCR : public RGWShardCollectCR { RGWDataSyncCtx *sc; RGWDataSyncEnv *sync_env; int num_shards; map<int, RGWDataChangesLogInfo> *datalog_info; int shard_id; #define READ_DATALOG_MAX_CONCURRENT 10 int handle_result(int r) override { if (r == -ENOENT) { // ENOENT is not a fatal error return 0; } if (r < 0) { ldout(cct, 4) << "failed to fetch remote datalog info: " << cpp_strerror(r) << dendl; } return r; } public: RGWReadRemoteDataLogInfoCR(RGWDataSyncCtx *_sc, int _num_shards, map<int, RGWDataChangesLogInfo> *_datalog_info) : RGWShardCollectCR(_sc->cct, READ_DATALOG_MAX_CONCURRENT), sc(_sc), sync_env(_sc->env), num_shards(_num_shards), datalog_info(_datalog_info), shard_id(0) {} bool spawn_next() override; }; bool RGWReadRemoteDataLogInfoCR::spawn_next() { if (shard_id >= num_shards) { return false; } spawn(new RGWReadRemoteDataLogShardInfoCR(sc, shard_id, &(*datalog_info)[shard_id]), false); shard_id++; return true; } class RGWListRemoteDataLogShardCR : public RGWSimpleCoroutine { RGWDataSyncCtx *sc; RGWDataSyncEnv *sync_env; RGWRESTReadResource *http_op; int shard_id; string marker; uint32_t max_entries; rgw_datalog_shard_data *result; public: RGWListRemoteDataLogShardCR(RGWDataSyncCtx *sc, int _shard_id, const string& _marker, uint32_t _max_entries, rgw_datalog_shard_data *_result) : RGWSimpleCoroutine(sc->cct), sc(sc), sync_env(sc->env), http_op(NULL), shard_id(_shard_id), marker(_marker), max_entries(_max_entries), result(_result) {} int send_request(const DoutPrefixProvider *dpp) override { RGWRESTConn *conn = sc->conn; char buf[32]; snprintf(buf, sizeof(buf), "%d", shard_id); char max_entries_buf[32]; snprintf(max_entries_buf, sizeof(max_entries_buf), "%d", (int)max_entries); const char *marker_key = (marker.empty() ? "" : "marker"); rgw_http_param_pair pairs[] = { { "type", "data" }, { "id", buf }, { "max-entries", max_entries_buf }, { marker_key, marker.c_str() }, { NULL, NULL } }; string p = "/admin/log/"; http_op = new RGWRESTReadResource(conn, p, pairs, NULL, sync_env->http_manager); init_new_io(http_op); int ret = http_op->aio_read(dpp); if (ret < 0) { ldpp_dout(dpp, 0) << "ERROR: failed to read from " << p << dendl; log_error() << "failed to send http operation: " << http_op->to_str() << " ret=" << ret << std::endl; http_op->put(); return ret; } return 0; } int request_complete() override { int ret = http_op->wait(result, null_yield); http_op->put(); if (ret < 0 && ret != -ENOENT) { ldpp_dout(sync_env->dpp, 0) << "ERROR: failed to list remote datalog shard, ret=" << ret << dendl; return ret; } return 0; } }; class RGWListRemoteDataLogCR : public RGWShardCollectCR { RGWDataSyncCtx *sc; RGWDataSyncEnv *sync_env; map<int, string> shards; int max_entries_per_shard; map<int, rgw_datalog_shard_data> *result; map<int, string>::iterator iter; #define READ_DATALOG_MAX_CONCURRENT 10 int handle_result(int r) override { if (r == -ENOENT) { // ENOENT is not a fatal error return 0; } if (r < 0) { ldout(cct, 4) << "failed to list remote datalog: " << cpp_strerror(r) << dendl; } return r; } public: RGWListRemoteDataLogCR(RGWDataSyncCtx *_sc, map<int, string>& _shards, int _max_entries_per_shard, map<int, rgw_datalog_shard_data> *_result) : RGWShardCollectCR(_sc->cct, READ_DATALOG_MAX_CONCURRENT), sc(_sc), sync_env(_sc->env), max_entries_per_shard(_max_entries_per_shard), result(_result) { shards.swap(_shards); iter = shards.begin(); } bool spawn_next() override; }; bool RGWListRemoteDataLogCR::spawn_next() { if (iter == shards.end()) { return false; } spawn(new RGWListRemoteDataLogShardCR(sc, iter->first, iter->second, max_entries_per_shard, &(*result)[iter->first]), false); ++iter; return true; } class RGWInitDataSyncStatusCoroutine : public RGWCoroutine { static constexpr auto lock_name{ "sync_lock"sv }; RGWDataSyncCtx* const sc; RGWDataSyncEnv* const sync_env{ sc->env }; const uint32_t num_shards; rgw_data_sync_status* const status; RGWSyncTraceNodeRef tn; boost::intrusive_ptr<RGWContinuousLeaseCR> lease_cr; RGWObjVersionTracker& objv_tracker; std::vector<RGWObjVersionTracker>& objvs; const rgw_pool& pool{ sync_env->svc->zone->get_zone_params().log_pool }; const string sync_status_oid{ RGWDataSyncStatusManager::sync_status_oid(sc->source_zone) }; map<int, RGWDataChangesLogInfo> shards_info; public: RGWInitDataSyncStatusCoroutine( RGWDataSyncCtx* _sc, uint32_t num_shards, uint64_t instance_id, const RGWSyncTraceNodeRef& tn_parent, rgw_data_sync_status* status, boost::intrusive_ptr<RGWContinuousLeaseCR> lease_cr, RGWObjVersionTracker& objv_tracker, std::vector<RGWObjVersionTracker>& objvs) : RGWCoroutine(_sc->cct), sc(_sc), num_shards(num_shards), status(status), tn(sync_env->sync_tracer->add_node(tn_parent, "init_data_sync_status")), lease_cr(std::move(lease_cr)), objv_tracker(objv_tracker), objvs(objvs) { status->sync_info.instance_id = instance_id; } static auto continuous_lease_cr(RGWDataSyncCtx* const sc, RGWCoroutine* const caller) { auto lock_duration = sc->cct->_conf->rgw_sync_lease_period; return new RGWContinuousLeaseCR( sc->env->async_rados, sc->env->driver, { sc->env->svc->zone->get_zone_params().log_pool, RGWDataSyncStatusManager::sync_status_oid(sc->source_zone) }, string(lock_name), lock_duration, caller, &sc->lcc); } int operate(const DoutPrefixProvider *dpp) override { int ret; reenter(this) { if (!lease_cr->is_locked()) { drain_all(); return set_cr_error(-ECANCELED); } using WriteInfoCR = RGWSimpleRadosWriteCR<rgw_data_sync_info>; yield call(new WriteInfoCR(dpp, sync_env->driver, rgw_raw_obj{pool, sync_status_oid}, status->sync_info, &objv_tracker)); if (retcode < 0) { tn->log(0, SSTR("ERROR: failed to write sync status info with " << retcode)); return set_cr_error(retcode); } // In the original code we reacquired the lock. Since // RGWSimpleRadosWriteCR doesn't appear to touch the attributes // and cls_version works across it, this should be unnecessary. // Putting a note here just in case. If we see ECANCELED where // we expect EBUSY, we can revisit this. /* fetch current position in logs */ yield { RGWRESTConn *conn = sync_env->svc->zone->get_zone_conn(sc->source_zone); if (!conn) { tn->log(0, SSTR("ERROR: connection to zone " << sc->source_zone << " does not exist!")); return set_cr_error(-EIO); } for (uint32_t i = 0; i < num_shards; i++) { spawn(new RGWReadRemoteDataLogShardInfoCR(sc, i, &shards_info[i]), true); } } while (collect(&ret, NULL)) { if (ret < 0) { tn->log(0, SSTR("ERROR: failed to read remote data log shards")); return set_state(RGWCoroutine_Error); } yield; } yield { objvs.resize(num_shards); for (uint32_t i = 0; i < num_shards; i++) { RGWDataChangesLogInfo& info = shards_info[i]; auto& marker = status->sync_markers[i]; marker.next_step_marker = info.marker; marker.timestamp = info.last_update; const auto& oid = RGWDataSyncStatusManager::shard_obj_name(sc->source_zone, i); auto& objv = objvs[i]; objv.generate_new_write_ver(cct); using WriteMarkerCR = RGWSimpleRadosWriteCR<rgw_data_sync_marker>; spawn(new WriteMarkerCR(dpp, sync_env->driver, rgw_raw_obj{pool, oid}, marker, &objv), true); } } while (collect(&ret, NULL)) { if (ret < 0) { tn->log(0, SSTR("ERROR: failed to write data sync status markers")); return set_state(RGWCoroutine_Error); } yield; } status->sync_info.state = rgw_data_sync_info::StateBuildingFullSyncMaps; yield call(new WriteInfoCR(dpp, sync_env->driver, rgw_raw_obj{pool, sync_status_oid}, status->sync_info, &objv_tracker)); if (retcode < 0) { tn->log(0, SSTR("ERROR: failed to write sync status info with " << retcode)); return set_cr_error(retcode); } return set_cr_done(); } return 0; } }; RGWRemoteDataLog::RGWRemoteDataLog(const DoutPrefixProvider *dpp, rgw::sal::RadosStore* driver, RGWAsyncRadosProcessor *async_rados) : RGWCoroutinesManager(driver->ctx(), driver->getRados()->get_cr_registry()), dpp(dpp), driver(driver), cct(driver->ctx()), cr_registry(driver->getRados()->get_cr_registry()), async_rados(async_rados), http_manager(driver->ctx(), completion_mgr), data_sync_cr(NULL), initialized(false) { } int RGWRemoteDataLog::read_log_info(const DoutPrefixProvider *dpp, rgw_datalog_info *log_info) { rgw_http_param_pair pairs[] = { { "type", "data" }, { NULL, NULL } }; int ret = sc.conn->get_json_resource(dpp, "/admin/log", pairs, null_yield, *log_info); if (ret < 0) { ldpp_dout(dpp, 0) << "ERROR: failed to fetch datalog info" << dendl; return ret; } ldpp_dout(dpp, 20) << "remote datalog, num_shards=" << log_info->num_shards << dendl; return 0; } int RGWRemoteDataLog::read_source_log_shards_info(const DoutPrefixProvider *dpp, map<int, RGWDataChangesLogInfo> *shards_info) { rgw_datalog_info log_info; int ret = read_log_info(dpp, &log_info); if (ret < 0) { return ret; } return run(dpp, new RGWReadRemoteDataLogInfoCR(&sc, log_info.num_shards, shards_info)); } int RGWRemoteDataLog::read_source_log_shards_next(const DoutPrefixProvider *dpp, map<int, string> shard_markers, map<int, rgw_datalog_shard_data> *result) { return run(dpp, new RGWListRemoteDataLogCR(&sc, shard_markers, 1, result)); } int RGWRemoteDataLog::init(const rgw_zone_id& _source_zone, RGWRESTConn *_conn, RGWSyncErrorLogger *_error_logger, RGWSyncTraceManager *_sync_tracer, RGWSyncModuleInstanceRef& _sync_module, PerfCounters* counters) { sync_env.init(dpp, cct, driver, driver->svc(), async_rados, &http_manager, _error_logger, _sync_tracer, _sync_module, counters); sc.init(&sync_env, _conn, _source_zone); if (initialized) { return 0; } int ret = http_manager.start(); if (ret < 0) { ldpp_dout(dpp, 0) << "failed in http_manager.start() ret=" << ret << dendl; return ret; } tn = sync_env.sync_tracer->add_node(sync_env.sync_tracer->root_node, "data"); initialized = true; return 0; } void RGWRemoteDataLog::finish() { stop(); } int RGWRemoteDataLog::read_sync_status(const DoutPrefixProvider *dpp, rgw_data_sync_status *sync_status) { // cannot run concurrently with run_sync(), so run in a separate manager RGWObjVersionTracker objv; std::vector<RGWObjVersionTracker> shard_objvs; RGWCoroutinesManager crs(cct, cr_registry); RGWHTTPManager http_manager(cct, crs.get_completion_mgr()); int ret = http_manager.start(); if (ret < 0) { ldpp_dout(dpp, 0) << "failed in http_manager.start() ret=" << ret << dendl; return ret; } RGWDataSyncEnv sync_env_local = sync_env; sync_env_local.http_manager = &http_manager; RGWDataSyncCtx sc_local = sc; sc_local.env = &sync_env_local; ret = crs.run(dpp, new RGWReadDataSyncStatusCoroutine(&sc_local, sync_status, &objv, shard_objvs)); http_manager.stop(); return ret; } int RGWRemoteDataLog::read_recovering_shards(const DoutPrefixProvider *dpp, const int num_shards, set<int>& recovering_shards) { // cannot run concurrently with run_sync(), so run in a separate manager RGWCoroutinesManager crs(cct, cr_registry); RGWHTTPManager http_manager(cct, crs.get_completion_mgr()); int ret = http_manager.start(); if (ret < 0) { ldpp_dout(dpp, 0) << "failed in http_manager.start() ret=" << ret << dendl; return ret; } RGWDataSyncEnv sync_env_local = sync_env; sync_env_local.http_manager = &http_manager; RGWDataSyncCtx sc_local = sc; sc_local.env = &sync_env_local; std::vector<RGWRadosGetOmapKeysCR::ResultPtr> omapkeys; omapkeys.resize(num_shards); uint64_t max_entries{1}; ret = crs.run(dpp, new RGWReadDataSyncRecoveringShardsCR(&sc_local, max_entries, num_shards, omapkeys)); http_manager.stop(); if (ret == 0) { for (int i = 0; i < num_shards; i++) { if (omapkeys[i]->entries.size() != 0) { recovering_shards.insert(i); } } } return ret; } namespace RGWRDL { class DataSyncInitCR : public RGWCoroutine { RGWDataSyncCtx* const sc; const uint32_t num_shards; uint64_t instance_id; const RGWSyncTraceNodeRef& tn; rgw_data_sync_status* const sync_status; std::vector<RGWObjVersionTracker>& objvs; boost::intrusive_ptr<RGWContinuousLeaseCR> lease_cr; RGWObjVersionTracker objv_tracker; public: DataSyncInitCR(RGWDataSyncCtx* sc, uint32_t num_shards, uint64_t instance_id, const RGWSyncTraceNodeRef& tn, rgw_data_sync_status* sync_status, std::vector<RGWObjVersionTracker>& objvs) : RGWCoroutine(sc->cct), sc(sc), num_shards(num_shards), instance_id(instance_id), tn(tn), sync_status(sync_status), objvs(objvs) {} ~DataSyncInitCR() override { if (lease_cr) { lease_cr->abort(); } } int operate(const DoutPrefixProvider *dpp) override { reenter(this) { lease_cr.reset( RGWInitDataSyncStatusCoroutine::continuous_lease_cr(sc, this)); yield spawn(lease_cr.get(), false); while (!lease_cr->is_locked()) { if (lease_cr->is_done()) { tn->log(5, "ERROR: failed to take data sync status lease"); set_status("lease lock failed, early abort"); drain_all(); return set_cr_error(lease_cr->get_ret_status()); } tn->log(5, "waiting on data sync status lease"); yield set_sleeping(true); } tn->log(5, "acquired data sync status lease"); objv_tracker.generate_new_write_ver(sc->cct); yield call(new RGWInitDataSyncStatusCoroutine(sc, num_shards, instance_id, tn, sync_status, lease_cr, objv_tracker, objvs)); lease_cr->go_down(); lease_cr.reset(); drain_all(); if (retcode < 0) { set_cr_error(retcode); } return set_cr_done(); } return 0; } }; } int RGWRemoteDataLog::init_sync_status(const DoutPrefixProvider *dpp, int num_shards) { rgw_data_sync_status sync_status; std::vector<RGWObjVersionTracker> objvs; sync_status.sync_info.num_shards = num_shards; RGWCoroutinesManager crs(cct, cr_registry); RGWHTTPManager http_manager(cct, crs.get_completion_mgr()); int ret = http_manager.start(); if (ret < 0) { ldpp_dout(dpp, 0) << "failed in http_manager.start() ret=" << ret << dendl; return ret; } RGWDataSyncEnv sync_env_local = sync_env; sync_env_local.http_manager = &http_manager; auto instance_id = ceph::util::generate_random_number<uint64_t>(); RGWDataSyncCtx sc_local = sc; sc_local.env = &sync_env_local; ret = crs.run(dpp, new RGWRDL::DataSyncInitCR(&sc_local, num_shards, instance_id, tn, &sync_status, objvs)); http_manager.stop(); return ret; } static string full_data_sync_index_shard_oid(const rgw_zone_id& source_zone, int shard_id) { char buf[datalog_sync_full_sync_index_prefix.size() + 1 + source_zone.id.size() + 1 + 16]; snprintf(buf, sizeof(buf), "%s.%s.%d", datalog_sync_full_sync_index_prefix.c_str(), source_zone.id.c_str(), shard_id); return string(buf); } struct read_metadata_list { string marker; bool truncated; list<string> keys; int count; read_metadata_list() : truncated(false), count(0) {} void decode_json(JSONObj *obj) { JSONDecoder::decode_json("marker", marker, obj); JSONDecoder::decode_json("truncated", truncated, obj); JSONDecoder::decode_json("keys", keys, obj); JSONDecoder::decode_json("count", count, obj); } }; struct bucket_instance_meta_info { string key; obj_version ver; utime_t mtime; RGWBucketInstanceMetadataObject data; bucket_instance_meta_info() {} void decode_json(JSONObj *obj) { JSONDecoder::decode_json("key", key, obj); JSONDecoder::decode_json("ver", ver, obj); JSONDecoder::decode_json("mtime", mtime, obj); JSONDecoder::decode_json("data", data, obj); } }; class RGWReadRemoteBucketIndexLogInfoCR : public RGWCoroutine { RGWDataSyncCtx *sc; RGWDataSyncEnv *sync_env; const string instance_key; rgw_bucket_index_marker_info *info; public: RGWReadRemoteBucketIndexLogInfoCR(RGWDataSyncCtx *_sc, const rgw_bucket& bucket, rgw_bucket_index_marker_info *_info) : RGWCoroutine(_sc->cct), sc(_sc), sync_env(_sc->env), instance_key(bucket.get_key()), info(_info) {} int operate(const DoutPrefixProvider *dpp) override { reenter(this) { yield { rgw_http_param_pair pairs[] = { { "type" , "bucket-index" }, { "bucket-instance", instance_key.c_str() }, { "info" , NULL }, { NULL, NULL } }; string p = "/admin/log/"; call(new RGWReadRESTResourceCR<rgw_bucket_index_marker_info>(sync_env->cct, sc->conn, sync_env->http_manager, p, pairs, info)); } if (retcode < 0) { return set_cr_error(retcode); } return set_cr_done(); } return 0; } }; class RGWListBucketIndexesCR : public RGWCoroutine { RGWDataSyncCtx *sc; RGWDataSyncEnv *sync_env = sc->env; rgw::sal::RadosStore* driver = sync_env->driver; rgw_data_sync_status *sync_status; std::vector<RGWObjVersionTracker>& objvs; int req_ret = 0; int ret = 0; list<string>::iterator iter; unique_ptr<RGWShardedOmapCRManager> entries_index; string oid_prefix = datalog_sync_full_sync_index_prefix + "." + sc->source_zone.id; string path = "/admin/metadata/bucket.instance"; bucket_instance_meta_info meta_info; string key; bool failed = false; bool truncated = false; read_metadata_list result; public: RGWListBucketIndexesCR(RGWDataSyncCtx* sc, rgw_data_sync_status* sync_status, std::vector<RGWObjVersionTracker>& objvs) : RGWCoroutine(sc->cct), sc(sc), sync_status(sync_status), objvs(objvs) {} ~RGWListBucketIndexesCR() override { } int operate(const DoutPrefixProvider *dpp) override { reenter(this) { entries_index = std::make_unique<RGWShardedOmapCRManager>( sync_env->async_rados, driver, this, cct->_conf->rgw_data_log_num_shards, sync_env->svc->zone->get_zone_params().log_pool, oid_prefix); yield; // yield so OmapAppendCRs can start do { yield { string entrypoint = "/admin/metadata/bucket.instance"s; rgw_http_param_pair pairs[] = {{"max-entries", "1000"}, {"marker", result.marker.c_str()}, {NULL, NULL}}; call(new RGWReadRESTResourceCR<read_metadata_list>( sync_env->cct, sc->conn, sync_env->http_manager, entrypoint, pairs, &result)); } if (retcode < 0) { ldpp_dout(dpp, 0) << "ERROR: failed to fetch metadata for section bucket.instance" << dendl; return set_cr_error(retcode); } for (iter = result.keys.begin(); iter != result.keys.end(); ++iter) { ldpp_dout(dpp, 20) << "list metadata: section=bucket.instance key=" << *iter << dendl; key = *iter; yield { rgw_http_param_pair pairs[] = {{"key", key.c_str()}, {NULL, NULL}}; call(new RGWReadRESTResourceCR<bucket_instance_meta_info>( sync_env->cct, sc->conn, sync_env->http_manager, path, pairs, &meta_info)); } if (retcode < 0) { ldpp_dout(dpp, 0) << "ERROR: failed to fetch metadata for key: " << key << dendl; return set_cr_error(retcode); } // Now that bucket full sync is bucket-wide instead of // per-shard, we only need to register a single shard of // each bucket to guarantee that sync will see everything // that happened before data full sync starts. This also // means we don't have to care about the bucket's current // shard count. yield entries_index->append( fmt::format("{}:{}", key, 0), sync_env->svc->datalog_rados->get_log_shard_id( meta_info.data.get_bucket_info().bucket, 0)); } truncated = result.truncated; } while (truncated); yield { if (!entries_index->finish()) { failed = true; } } if (!failed) { for (auto iter = sync_status->sync_markers.begin(); iter != sync_status->sync_markers.end(); ++iter) { int shard_id = (int)iter->first; rgw_data_sync_marker& marker = iter->second; marker.total_entries = entries_index->get_total_entries(shard_id); spawn(new RGWSimpleRadosWriteCR<rgw_data_sync_marker>( dpp, sync_env->driver, rgw_raw_obj(sync_env->svc->zone->get_zone_params().log_pool, RGWDataSyncStatusManager::shard_obj_name( sc->source_zone, shard_id)), marker, &objvs[shard_id]), true); } } else { yield call(sync_env->error_logger->log_error_cr( dpp, sc->conn->get_remote_id(), "data.init", "", EIO, string("failed to build bucket instances map"))); } while (collect(&ret, NULL)) { if (ret < 0) { yield call(sync_env->error_logger->log_error_cr( dpp, sc->conn->get_remote_id(), "data.init", "", -ret, string("failed to driver sync status: ") + cpp_strerror(-ret))); req_ret = ret; } yield; } drain_all(); if (req_ret < 0) { yield return set_cr_error(req_ret); } yield return set_cr_done(); } return 0; } }; #define DATA_SYNC_UPDATE_MARKER_WINDOW 1 class RGWDataSyncShardMarkerTrack : public RGWSyncShardMarkerTrack<string, string> { RGWDataSyncCtx *sc; RGWDataSyncEnv *sync_env; string marker_oid; rgw_data_sync_marker sync_marker; RGWSyncTraceNodeRef tn; RGWObjVersionTracker& objv; public: RGWDataSyncShardMarkerTrack(RGWDataSyncCtx *_sc, const string& _marker_oid, const rgw_data_sync_marker& _marker, RGWSyncTraceNodeRef& _tn, RGWObjVersionTracker& objv) : RGWSyncShardMarkerTrack(DATA_SYNC_UPDATE_MARKER_WINDOW), sc(_sc), sync_env(_sc->env), marker_oid(_marker_oid), sync_marker(_marker), tn(_tn), objv(objv) {} RGWCoroutine* store_marker(const string& new_marker, uint64_t index_pos, const real_time& timestamp) override { sync_marker.marker = new_marker; sync_marker.pos = index_pos; sync_marker.timestamp = timestamp; tn->log(20, SSTR("updating marker marker_oid=" << marker_oid << " marker=" << new_marker)); return new RGWSimpleRadosWriteCR<rgw_data_sync_marker>(sync_env->dpp, sync_env->driver, rgw_raw_obj(sync_env->svc->zone->get_zone_params().log_pool, marker_oid), sync_marker, &objv); } RGWOrderCallCR *allocate_order_control_cr() override { return new RGWLastCallerWinsCR(sync_env->cct); } }; // ostream wrappers to print buckets without copying strings struct bucket_str { const rgw_bucket& b; explicit bucket_str(const rgw_bucket& b) : b(b) {} }; std::ostream& operator<<(std::ostream& out, const bucket_str& rhs) { auto& b = rhs.b; if (!b.tenant.empty()) { out << b.tenant << '/'; } out << b.name; if (!b.bucket_id.empty()) { out << ':' << b.bucket_id; } return out; } struct bucket_str_noinstance { const rgw_bucket& b; explicit bucket_str_noinstance(const rgw_bucket& b) : b(b) {} }; std::ostream& operator<<(std::ostream& out, const bucket_str_noinstance& rhs) { auto& b = rhs.b; if (!b.tenant.empty()) { out << b.tenant << '/'; } out << b.name; return out; } struct bucket_shard_str { const rgw_bucket_shard& bs; explicit bucket_shard_str(const rgw_bucket_shard& bs) : bs(bs) {} }; std::ostream& operator<<(std::ostream& out, const bucket_shard_str& rhs) { auto& bs = rhs.bs; out << bucket_str{bs.bucket}; if (bs.shard_id >= 0) { out << ':' << bs.shard_id; } return out; } #if FMT_VERSION >= 90000 template <> struct fmt::formatter<bucket_shard_str> : fmt::ostream_formatter {}; #endif struct all_bucket_info { RGWBucketInfo bucket_info; map<string, bufferlist> attrs; }; struct rgw_sync_pipe_info_entity { private: RGWBucketInfo bucket_info; map<string, bufferlist> bucket_attrs; bool _has_bucket_info{false}; public: rgw_zone_id zone; rgw_sync_pipe_info_entity() {} rgw_sync_pipe_info_entity(const rgw_sync_bucket_entity& e, std::optional<all_bucket_info>& binfo) { if (e.zone) { zone = *e.zone; } if (!e.bucket) { return; } if (!binfo || binfo->bucket_info.bucket != *e.bucket) { bucket_info.bucket = *e.bucket; } else { set_bucket_info(*binfo); } } void update_empty_bucket_info(const std::map<rgw_bucket, all_bucket_info>& buckets_info) { if (_has_bucket_info) { return; } if (bucket_info.bucket.name.empty()) { return; } auto iter = buckets_info.find(bucket_info.bucket); if (iter == buckets_info.end()) { return; } set_bucket_info(iter->second); } bool has_bucket_info() const { return _has_bucket_info; } void set_bucket_info(const all_bucket_info& all_info) { bucket_info = all_info.bucket_info; bucket_attrs = all_info.attrs; _has_bucket_info = true; } const RGWBucketInfo& get_bucket_info() const { return bucket_info; } const rgw_bucket& get_bucket() const { return bucket_info.bucket; } bool operator<(const rgw_sync_pipe_info_entity& e) const { if (zone < e.zone) { return false; } if (zone > e.zone) { return true; } return (bucket_info.bucket < e.bucket_info.bucket); } }; std::ostream& operator<<(std::ostream& out, const rgw_sync_pipe_info_entity& e) { auto& bucket = e.get_bucket_info().bucket; out << e.zone << ":" << bucket.get_key(); return out; } struct rgw_sync_pipe_handler_info { RGWBucketSyncFlowManager::pipe_handler handler; rgw_sync_pipe_info_entity source; rgw_sync_pipe_info_entity target; rgw_sync_pipe_handler_info() {} rgw_sync_pipe_handler_info(const RGWBucketSyncFlowManager::pipe_handler& _handler, std::optional<all_bucket_info> source_bucket_info, std::optional<all_bucket_info> target_bucket_info) : handler(_handler), source(handler.source, source_bucket_info), target(handler.dest, target_bucket_info) { } bool operator<(const rgw_sync_pipe_handler_info& p) const { if (source < p.source) { return true; } if (p.source < source) { return false; } return (target < p.target); } void update_empty_bucket_info(const std::map<rgw_bucket, all_bucket_info>& buckets_info) { source.update_empty_bucket_info(buckets_info); target.update_empty_bucket_info(buckets_info); } }; std::ostream& operator<<(std::ostream& out, const rgw_sync_pipe_handler_info& p) { out << p.source << ">" << p.target; return out; } struct rgw_sync_pipe_info_set { std::set<rgw_sync_pipe_handler_info> handlers; using iterator = std::set<rgw_sync_pipe_handler_info>::iterator; void clear() { handlers.clear(); } void insert(const RGWBucketSyncFlowManager::pipe_handler& handler, std::optional<all_bucket_info>& source_bucket_info, std::optional<all_bucket_info>& target_bucket_info) { rgw_sync_pipe_handler_info p(handler, source_bucket_info, target_bucket_info); handlers.insert(p); } iterator begin() { return handlers.begin(); } iterator end() { return handlers.end(); } size_t size() const { return handlers.size(); } bool empty() const { return handlers.empty(); } void update_empty_bucket_info(const std::map<rgw_bucket, all_bucket_info>& buckets_info) { if (buckets_info.empty()) { return; } std::set<rgw_sync_pipe_handler_info> p; for (auto pipe : handlers) { pipe.update_empty_bucket_info(buckets_info); p.insert(pipe); } handlers = std::move(p); } }; class RGWRunBucketSourcesSyncCR : public RGWCoroutine { RGWDataSyncCtx *sc; RGWDataSyncEnv *sync_env; boost::intrusive_ptr<const RGWContinuousLeaseCR> lease_cr; rgw_sync_pipe_info_set pipes; rgw_sync_pipe_info_set::iterator siter; rgw_bucket_sync_pair_info sync_pair; RGWSyncTraceNodeRef tn; ceph::real_time* progress; std::vector<ceph::real_time> shard_progress; std::vector<ceph::real_time>::iterator cur_shard_progress; RGWRESTConn *conn{nullptr}; rgw_zone_id last_zone; std::optional<uint64_t> gen; rgw_bucket_index_marker_info marker_info; BucketIndexShardsManager marker_mgr; public: RGWRunBucketSourcesSyncCR(RGWDataSyncCtx *_sc, boost::intrusive_ptr<const RGWContinuousLeaseCR> lease_cr, const rgw_bucket_shard& source_bs, const RGWSyncTraceNodeRef& _tn_parent, std::optional<uint64_t> gen, ceph::real_time* progress); int operate(const DoutPrefixProvider *dpp) override; }; class RGWDataSyncSingleEntryCR : public RGWCoroutine { RGWDataSyncCtx *sc; RGWDataSyncEnv *sync_env; rgw::bucket_sync::Handle state; // cached bucket-shard state rgw_data_sync_obligation obligation; // input obligation std::optional<rgw_data_sync_obligation> complete; // obligation to complete uint32_t obligation_counter = 0; RGWDataSyncShardMarkerTrack *marker_tracker; rgw_raw_obj error_repo; boost::intrusive_ptr<const RGWContinuousLeaseCR> lease_cr; RGWSyncTraceNodeRef tn; ceph::real_time progress; int sync_status = 0; public: RGWDataSyncSingleEntryCR(RGWDataSyncCtx *_sc, rgw::bucket_sync::Handle state, rgw_data_sync_obligation _obligation, RGWDataSyncShardMarkerTrack *_marker_tracker, const rgw_raw_obj& error_repo, boost::intrusive_ptr<const RGWContinuousLeaseCR> lease_cr, const RGWSyncTraceNodeRef& _tn_parent) : RGWCoroutine(_sc->cct), sc(_sc), sync_env(_sc->env), state(std::move(state)), obligation(std::move(_obligation)), marker_tracker(_marker_tracker), error_repo(error_repo), lease_cr(std::move(lease_cr)) { set_description() << "data sync single entry (source_zone=" << sc->source_zone << ") " << obligation; tn = sync_env->sync_tracer->add_node(_tn_parent, "entry", to_string(obligation.bs, obligation.gen)); } int operate(const DoutPrefixProvider *dpp) override { reenter(this) { if (state->obligation) { // this is already syncing in another DataSyncSingleEntryCR if (state->obligation->timestamp < obligation.timestamp) { // cancel existing obligation and overwrite it tn->log(10, SSTR("canceling existing obligation " << *state->obligation)); complete = std::move(*state->obligation); *state->obligation = std::move(obligation); state->counter++; } else { // cancel new obligation tn->log(10, SSTR("canceling new obligation " << obligation)); complete = std::move(obligation); } } else { // start syncing a new obligation state->obligation = obligation; obligation_counter = state->counter; state->counter++; // loop until the latest obligation is satisfied, because other callers // may update the obligation while we're syncing while ((state->obligation->timestamp == ceph::real_time() || state->progress_timestamp < state->obligation->timestamp) && obligation_counter != state->counter) { obligation_counter = state->counter; progress = ceph::real_time{}; ldout(cct, 4) << "starting sync on " << bucket_shard_str{state->key.first} << ' ' << *state->obligation << " progress timestamp " << state->progress_timestamp << " progress " << progress << dendl; yield call(new RGWRunBucketSourcesSyncCR(sc, lease_cr, state->key.first, tn, state->obligation->gen, &progress)); if (retcode < 0) { break; } state->progress_timestamp = std::max(progress, state->progress_timestamp); } // any new obligations will process themselves complete = std::move(*state->obligation); state->obligation.reset(); tn->log(10, SSTR("sync finished on " << bucket_shard_str{state->key.first} << " progress=" << progress << ' ' << complete << " r=" << retcode)); } sync_status = retcode; if (sync_status == -ENOENT) { // this was added when 'tenant/' was added to datalog entries, because // preexisting tenant buckets could never sync and would stay in the // error_repo forever tn->log(0, SSTR("WARNING: skipping data log entry for missing bucket " << complete->bs)); sync_status = 0; } if (sync_status < 0) { // write actual sync failures for 'radosgw-admin sync error list' if (sync_status != -EBUSY && sync_status != -EAGAIN) { yield call(sync_env->error_logger->log_error_cr(dpp, sc->conn->get_remote_id(), "data", to_string(complete->bs, complete->gen), -sync_status, string("failed to sync bucket instance: ") + cpp_strerror(-sync_status))); if (retcode < 0) { tn->log(0, SSTR("ERROR: failed to log sync failure: retcode=" << retcode)); } } if (complete->timestamp != ceph::real_time{}) { tn->log(10, SSTR("writing " << *complete << " to error repo for retry")); yield call(rgw::error_repo::write_cr(sync_env->driver->svc()->rados, error_repo, rgw::error_repo::encode_key(complete->bs, complete->gen), complete->timestamp)); if (retcode < 0) { tn->log(0, SSTR("ERROR: failed to log sync failure in error repo: retcode=" << retcode)); } } } else if (complete->retry) { yield call(rgw::error_repo::remove_cr(sync_env->driver->svc()->rados, error_repo, rgw::error_repo::encode_key(complete->bs, complete->gen), complete->timestamp)); if (retcode < 0) { tn->log(0, SSTR("ERROR: failed to remove omap key from error repo (" << error_repo << " retcode=" << retcode)); } } /* FIXME: what do do in case of error */ if (marker_tracker && !complete->marker.empty()) { /* update marker */ yield call(marker_tracker->finish(complete->marker)); if (retcode < 0) { return set_cr_error(retcode); } } if (sync_status == 0) { sync_status = retcode; } if (sync_status < 0) { return set_cr_error(sync_status); } return set_cr_done(); } return 0; } }; rgw_raw_obj datalog_oid_for_error_repo(RGWDataSyncCtx *sc, rgw::sal::RadosStore* driver, rgw_pool& pool, rgw_bucket_shard& bs) { int datalog_shard = driver->svc()->datalog_rados->choose_oid(bs); string oid = RGWDataSyncStatusManager::shard_obj_name(sc->source_zone, datalog_shard); return rgw_raw_obj(pool, oid + ".retry"); } class RGWDataIncrementalSyncFullObligationCR: public RGWCoroutine { RGWDataSyncCtx *sc; RGWDataSyncEnv *sync_env; rgw_bucket_shard source_bs; rgw_raw_obj error_repo; std::string error_marker; ceph::real_time timestamp; RGWSyncTraceNodeRef tn; rgw_bucket_index_marker_info remote_info; rgw_pool pool; uint32_t sid; rgw_bucket_shard bs; std::vector<store_gen_shards>::const_iterator each; public: RGWDataIncrementalSyncFullObligationCR(RGWDataSyncCtx *_sc, rgw_bucket_shard& _source_bs, const rgw_raw_obj& error_repo, const std::string& _error_marker, ceph::real_time& _timestamp, RGWSyncTraceNodeRef& _tn) : RGWCoroutine(_sc->cct), sc(_sc), sync_env(_sc->env), source_bs(_source_bs), error_repo(error_repo), error_marker(_error_marker), timestamp(_timestamp), tn(sync_env->sync_tracer->add_node(_tn, "error_repo", SSTR(bucket_shard_str(source_bs)))) {} int operate(const DoutPrefixProvider *dpp) override { reenter(this) { yield call(new RGWReadRemoteBucketIndexLogInfoCR(sc, source_bs.bucket, &remote_info)); if (retcode == -ENOENT) { // don't retry if bucket instance does not exist tn->log(10, SSTR("bucket instance or log layout does not exist on source for bucket " << source_bs.bucket)); yield call(rgw::error_repo::remove_cr(sync_env->driver->svc()->rados, error_repo, error_marker, timestamp)); return set_cr_done(); } else if (retcode < 0) { return set_cr_error(retcode); } each = remote_info.generations.cbegin(); for (; each != remote_info.generations.cend(); each++) { for (sid = 0; sid < each->num_shards; sid++) { bs.bucket = source_bs.bucket; bs.shard_id = sid; pool = sync_env->svc->zone->get_zone_params().log_pool; error_repo = datalog_oid_for_error_repo(sc, sync_env->driver, pool, source_bs); tn->log(10, SSTR("writing shard_id " << sid << " of gen " << each->gen << " to error repo for retry")); yield_spawn_window(rgw::error_repo::write_cr(sync_env->driver->svc()->rados, error_repo, rgw::error_repo::encode_key(bs, each->gen), timestamp), sc->lcc.adj_concurrency(cct->_conf->rgw_data_sync_spawn_window), [&](uint64_t stack_id, int ret) { if (ret < 0) { retcode = ret; } return 0; }); } } drain_all_cb([&](uint64_t stack_id, int ret) { if (ret < 0) { tn->log(10, SSTR("writing to error repo returned error: " << ret)); } return ret; }); // once everything succeeds, remove the full sync obligation from the error repo yield call(rgw::error_repo::remove_cr(sync_env->driver->svc()->rados, error_repo, error_marker, timestamp)); return set_cr_done(); } return 0; } }; RGWCoroutine* data_sync_single_entry(RGWDataSyncCtx *sc, const rgw_bucket_shard& src, std::optional<uint64_t> gen, const std::string marker, ceph::real_time timestamp, boost::intrusive_ptr<const RGWContinuousLeaseCR> lease_cr, boost::intrusive_ptr<rgw::bucket_sync::Cache> bucket_shard_cache, RGWDataSyncShardMarkerTrack* marker_tracker, rgw_raw_obj error_repo, RGWSyncTraceNodeRef& tn, bool retry) { auto state = bucket_shard_cache->get(src, gen); auto obligation = rgw_data_sync_obligation{src, gen, marker, timestamp, retry}; return new RGWDataSyncSingleEntryCR(sc, std::move(state), std::move(obligation), &*marker_tracker, error_repo, lease_cr.get(), tn); } static ceph::real_time timestamp_for_bucket_shard(rgw::sal::RadosStore* driver, const rgw_data_sync_status& sync_status, const rgw_bucket_shard& bs) { int datalog_shard = driver->svc()->datalog_rados->choose_oid(bs); auto status = sync_status.sync_markers.find(datalog_shard); if (status == sync_status.sync_markers.end()) { return ceph::real_clock::zero(); } return status->second.timestamp; } class RGWDataFullSyncSingleEntryCR : public RGWCoroutine { RGWDataSyncCtx *sc; RGWDataSyncEnv *sync_env; rgw_pool pool; rgw_bucket_shard source_bs; const std::string key; rgw_data_sync_status sync_status; rgw_raw_obj error_repo; ceph::real_time timestamp; boost::intrusive_ptr<const RGWContinuousLeaseCR> lease_cr; boost::intrusive_ptr<rgw::bucket_sync::Cache> bucket_shard_cache; RGWDataSyncShardMarkerTrack* marker_tracker; RGWSyncTraceNodeRef tn; rgw_bucket_index_marker_info remote_info; uint32_t sid; std::vector<store_gen_shards>::iterator each; uint64_t i{0}; RGWCoroutine* shard_cr = nullptr; bool first_shard = true; bool error_inject; public: RGWDataFullSyncSingleEntryCR(RGWDataSyncCtx *_sc, const rgw_pool& _pool, const rgw_bucket_shard& _source_bs, const std::string& _key, const rgw_data_sync_status& sync_status, const rgw_raw_obj& _error_repo, ceph::real_time _timestamp, boost::intrusive_ptr<const RGWContinuousLeaseCR> _lease_cr, boost::intrusive_ptr<rgw::bucket_sync::Cache> _bucket_shard_cache, RGWDataSyncShardMarkerTrack* _marker_tracker, RGWSyncTraceNodeRef& _tn) : RGWCoroutine(_sc->cct), sc(_sc), sync_env(_sc->env), pool(_pool), source_bs(_source_bs), key(_key), error_repo(_error_repo), timestamp(_timestamp), lease_cr(std::move(_lease_cr)), bucket_shard_cache(_bucket_shard_cache), marker_tracker(_marker_tracker), tn(_tn) { error_inject = (sync_env->cct->_conf->rgw_sync_data_full_inject_err_probability > 0); } int operate(const DoutPrefixProvider *dpp) override { reenter(this) { if (error_inject && rand() % 10000 < cct->_conf->rgw_sync_data_full_inject_err_probability * 10000.0) { tn->log(0, SSTR("injecting read bilog info error on key=" << key)); retcode = -ENOENT; } else { tn->log(0, SSTR("read bilog info key=" << key)); yield call(new RGWReadRemoteBucketIndexLogInfoCR(sc, source_bs.bucket, &remote_info)); } if (retcode < 0) { tn->log(10, SSTR("full sync: failed to read remote bucket info. Writing " << source_bs.shard_id << " to error repo for retry")); yield call(rgw::error_repo::write_cr(sync_env->driver->svc()->rados, error_repo, rgw::error_repo::encode_key(source_bs, std::nullopt), timestamp)); if (retcode < 0) { tn->log(0, SSTR("ERROR: failed to log " << source_bs.shard_id << " in error repo: retcode=" << retcode)); } yield call(marker_tracker->finish(key)); return set_cr_error(retcode); } //wait to sync the first shard of the oldest generation and then sync all other shards. //if any of the operations fail at any time, write them into error repo for later retry. each = remote_info.generations.begin(); for (; each != remote_info.generations.end(); each++) { for (sid = 0; sid < each->num_shards; sid++) { source_bs.shard_id = sid; // use the error repo and sync status timestamp from the datalog shard corresponding to source_bs error_repo = datalog_oid_for_error_repo(sc, sync_env->driver, pool, source_bs); timestamp = timestamp_for_bucket_shard(sync_env->driver, sync_status, source_bs); if (retcode < 0) { tn->log(10, SSTR("Write " << source_bs.shard_id << " to error repo for retry")); yield_spawn_window(rgw::error_repo::write_cr(sync_env->driver->svc()->rados, error_repo, rgw::error_repo::encode_key(source_bs, each->gen), timestamp), sc->lcc.adj_concurrency(cct->_conf->rgw_data_sync_spawn_window), std::nullopt); } else { shard_cr = data_sync_single_entry(sc, source_bs, each->gen, key, timestamp, lease_cr, bucket_shard_cache, nullptr, error_repo, tn, false); tn->log(10, SSTR("full sync: syncing shard_id " << sid << " of gen " << each->gen)); if (first_shard) { yield call(shard_cr); first_shard = false; } else { yield_spawn_window(shard_cr, sc->lcc.adj_concurrency(cct->_conf->rgw_data_sync_spawn_window), [&](uint64_t stack_id, int ret) { if (ret < 0) { retcode = ret; } return retcode; }); } } } drain_all_cb([&](uint64_t stack_id, int ret) { if (ret < 0) { retcode = ret; } return retcode; }); } yield call(marker_tracker->finish(key)); if (retcode < 0) { return set_cr_error(retcode); } return set_cr_done(); } return 0; } }; class RGWDataBaseSyncShardCR : public RGWCoroutine { protected: RGWDataSyncCtx *const sc; const rgw_pool& pool; const uint32_t shard_id; rgw_data_sync_marker& sync_marker; RGWSyncTraceNodeRef tn; const string& status_oid; const rgw_raw_obj& error_repo; boost::intrusive_ptr<const RGWContinuousLeaseCR> lease_cr; const rgw_data_sync_status& sync_status; RGWObjVersionTracker& objv; boost::intrusive_ptr<rgw::bucket_sync::Cache> bucket_shard_cache; std::optional<RGWDataSyncShardMarkerTrack> marker_tracker; RGWRadosGetOmapValsCR::ResultPtr omapvals; rgw_bucket_shard source_bs; int parse_bucket_key(const std::string& key, rgw_bucket_shard& bs) const { int ret = rgw_bucket_parse_bucket_key(sc->env->cct, key, &bs.bucket, &bs.shard_id); //for the case of num_shards 0, shard_id gets a value of -1 //because of the way bucket instance gets parsed in the absence of shard_id delimiter. //interpret it as a non-negative value. if (ret == 0) { if (bs.shard_id < 0) { bs.shard_id = 0; } } return ret; } RGWDataBaseSyncShardCR( RGWDataSyncCtx *const _sc, const rgw_pool& pool, const uint32_t shard_id, rgw_data_sync_marker& sync_marker, RGWSyncTraceNodeRef tn, const string& status_oid, const rgw_raw_obj& error_repo, boost::intrusive_ptr<const RGWContinuousLeaseCR> lease_cr, const rgw_data_sync_status& sync_status, RGWObjVersionTracker& objv, const boost::intrusive_ptr<rgw::bucket_sync::Cache>& bucket_shard_cache) : RGWCoroutine(_sc->cct), sc(_sc), pool(pool), shard_id(shard_id), sync_marker(sync_marker), tn(tn), status_oid(status_oid), error_repo(error_repo), lease_cr(std::move(lease_cr)), sync_status(sync_status), objv(objv), bucket_shard_cache(bucket_shard_cache) {} }; class RGWDataFullSyncShardCR : public RGWDataBaseSyncShardCR { static constexpr auto OMAP_GET_MAX_ENTRIES = 100; string oid; uint64_t total_entries = 0; ceph::real_time entry_timestamp; std::map<std::string, bufferlist> entries; std::map<std::string, bufferlist>::iterator iter; string error_marker; bool lost_lock = false; bool lost_bid = false; public: RGWDataFullSyncShardCR( RGWDataSyncCtx *const sc, const rgw_pool& pool, const uint32_t shard_id, rgw_data_sync_marker& sync_marker, RGWSyncTraceNodeRef tn, const string& status_oid, const rgw_raw_obj& error_repo, boost::intrusive_ptr<const RGWContinuousLeaseCR> lease_cr, const rgw_data_sync_status& sync_status, RGWObjVersionTracker& objv, const boost::intrusive_ptr<rgw::bucket_sync::Cache>& bucket_shard_cache) : RGWDataBaseSyncShardCR(sc, pool, shard_id, sync_marker, tn, status_oid, error_repo, std::move(lease_cr), sync_status, objv, bucket_shard_cache) {} int operate(const DoutPrefixProvider *dpp) override { reenter(this) { tn->log(10, "start full sync"); oid = full_data_sync_index_shard_oid(sc->source_zone, shard_id); marker_tracker.emplace(sc, status_oid, sync_marker, tn, objv); total_entries = sync_marker.pos; entry_timestamp = sync_marker.timestamp; // time when full sync started do { if (!lease_cr->is_locked()) { tn->log(1, "lease is lost, abort"); lost_lock = true; break; } if (!sc->env->bid_manager->is_highest_bidder(shard_id)) { tn->log(1, "lost bid"); lost_bid = true; break; } omapvals = std::make_shared<RGWRadosGetOmapValsCR::Result>(); yield call(new RGWRadosGetOmapValsCR(sc->env->driver, rgw_raw_obj(pool, oid), sync_marker.marker, OMAP_GET_MAX_ENTRIES, omapvals)); if (retcode < 0) { drain_all(); return set_cr_error(retcode); } entries = std::move(omapvals->entries); if (entries.size() > 0) { tn->set_flag(RGW_SNS_FLAG_ACTIVE); /* actually have entries to sync */ } tn->log(20, SSTR("retrieved " << entries.size() << " entries to sync")); iter = entries.begin(); for (; iter != entries.end(); ++iter) { retcode = parse_bucket_key(iter->first, source_bs); if (retcode < 0) { tn->log(1, SSTR("failed to parse bucket shard: " << iter->first)); marker_tracker->try_update_high_marker(iter->first, 0, entry_timestamp); continue; } tn->log(20, SSTR("full sync: " << iter->first)); total_entries++; if (!marker_tracker->start(iter->first, total_entries, entry_timestamp)) { tn->log(0, SSTR("ERROR: cannot start syncing " << iter->first << ". Duplicate entry?")); } else { tn->log(10, SSTR("timestamp for " << iter->first << " is :" << entry_timestamp)); yield_spawn_window(new RGWDataFullSyncSingleEntryCR( sc, pool, source_bs, iter->first, sync_status, error_repo, entry_timestamp, lease_cr, bucket_shard_cache, &*marker_tracker, tn), sc->lcc.adj_concurrency(cct->_conf->rgw_data_sync_spawn_window), [&](uint64_t stack_id, int ret) { if (ret < 0) { retcode = ret; } return retcode; }); } sync_marker.marker = iter->first; } } while (omapvals->more); omapvals.reset(); drain_all(); tn->unset_flag(RGW_SNS_FLAG_ACTIVE); if (lost_bid) { yield call(marker_tracker->flush()); } else if (!lost_lock) { /* update marker to reflect we're done with full sync */ sync_marker.state = rgw_data_sync_marker::IncrementalSync; sync_marker.marker = sync_marker.next_step_marker; sync_marker.next_step_marker.clear(); yield call(new RGWSimpleRadosWriteCR<rgw_data_sync_marker>( sc->env->dpp, sc->env->driver, rgw_raw_obj(pool, status_oid), sync_marker, &objv)); if (retcode < 0) { tn->log(0, SSTR("ERROR: failed to set sync marker: retcode=" << retcode)); return set_cr_error(retcode); } // clean up full sync index, ignoring errors yield call(new RGWRadosRemoveCR(sc->env->driver, {pool, oid})); // transition to incremental sync return set_cr_done(); } if (lost_lock || lost_bid) { return set_cr_error(-EBUSY); } } return 0; } }; class RGWDataIncSyncShardCR : public RGWDataBaseSyncShardCR { static constexpr int max_error_entries = 10; static constexpr uint32_t retry_backoff_secs = 60; ceph::mutex& inc_lock; bc::flat_set<rgw_data_notify_entry>& modified_shards; bc::flat_set<rgw_data_notify_entry> current_modified; decltype(current_modified)::iterator modified_iter; ceph::coarse_real_time error_retry_time; string error_marker; std::map<std::string, bufferlist> error_entries; decltype(error_entries)::iterator iter; ceph::real_time entry_timestamp; std::optional<uint64_t> gen; string next_marker; vector<rgw_data_change_log_entry> log_entries; decltype(log_entries)::iterator log_iter; bool truncated = false; int cbret = 0; bool lost_lock = false; bool lost_bid = false; utime_t get_idle_interval() const { ceph::timespan interval = std::chrono::seconds(cct->_conf->rgw_data_sync_poll_interval); if (!ceph::coarse_real_clock::is_zero(error_retry_time)) { auto now = ceph::coarse_real_clock::now(); if (error_retry_time > now) { auto d = error_retry_time - now; if (interval > d) { interval = d; } } } // convert timespan -> time_point -> utime_t return utime_t(ceph::coarse_real_clock::zero() + interval); } public: RGWDataIncSyncShardCR( RGWDataSyncCtx *const sc, const rgw_pool& pool, const uint32_t shard_id, rgw_data_sync_marker& sync_marker, RGWSyncTraceNodeRef tn, const string& status_oid, const rgw_raw_obj& error_repo, boost::intrusive_ptr<const RGWContinuousLeaseCR> lease_cr, const rgw_data_sync_status& sync_status, RGWObjVersionTracker& objv, const boost::intrusive_ptr<rgw::bucket_sync::Cache>& bucket_shard_cache, ceph::mutex& inc_lock, bc::flat_set<rgw_data_notify_entry>& modified_shards) : RGWDataBaseSyncShardCR(sc, pool, shard_id, sync_marker, tn, status_oid, error_repo, std::move(lease_cr), sync_status, objv, bucket_shard_cache), inc_lock(inc_lock), modified_shards(modified_shards) {} int operate(const DoutPrefixProvider *dpp) override { reenter(this) { tn->log(10, "start incremental sync"); marker_tracker.emplace(sc, status_oid, sync_marker, tn, objv); do { if (!lease_cr->is_locked()) { lost_lock = true; tn->log(1, "lease is lost, abort"); break; } if (!sc->env->bid_manager->is_highest_bidder(shard_id)) { tn->log(1, "lost bid"); lost_bid = true; break; } { current_modified.clear(); std::unique_lock il(inc_lock); current_modified.swap(modified_shards); il.unlock(); } if (current_modified.size() > 0) { tn->set_flag(RGW_SNS_FLAG_ACTIVE); /* actually have entries to sync */ } /* process out of band updates */ for (modified_iter = current_modified.begin(); modified_iter != current_modified.end(); ++modified_iter) { if (!lease_cr->is_locked()) { tn->log(1, "lease is lost, abort"); lost_lock = true; break; } retcode = parse_bucket_key(modified_iter->key, source_bs); if (retcode < 0) { tn->log(1, SSTR("failed to parse bucket shard: " << modified_iter->key)); continue; } tn->log(20, SSTR("received async update notification: " << modified_iter->key)); spawn(data_sync_single_entry(sc, source_bs, modified_iter->gen, {}, ceph::real_time{}, lease_cr, bucket_shard_cache, &*marker_tracker, error_repo, tn, false), false); } if (error_retry_time <= ceph::coarse_real_clock::now()) { /* process bucket shards that previously failed */ omapvals = std::make_shared<RGWRadosGetOmapValsCR::Result>(); yield call(new RGWRadosGetOmapValsCR(sc->env->driver, error_repo, error_marker, max_error_entries, omapvals)); error_entries = std::move(omapvals->entries); tn->log(20, SSTR("read error repo, got " << error_entries.size() << " entries")); iter = error_entries.begin(); for (; iter != error_entries.end(); ++iter) { if (!lease_cr->is_locked()) { tn->log(1, "lease is lost, abort"); lost_lock = true; break; } error_marker = iter->first; entry_timestamp = rgw::error_repo::decode_value(iter->second); retcode = rgw::error_repo::decode_key(iter->first, source_bs, gen); if (retcode == -EINVAL) { // backward compatibility for string keys that don't encode a gen retcode = parse_bucket_key(error_marker, source_bs); } if (retcode < 0) { tn->log(1, SSTR("failed to parse bucket shard: " << error_marker)); spawn(rgw::error_repo::remove_cr(sc->env->driver->svc()->rados, error_repo, error_marker, entry_timestamp), false); continue; } tn->log(10, SSTR("gen is " << gen)); if (!gen) { // write all full sync obligations for the bucket to error repo spawn(new RGWDataIncrementalSyncFullObligationCR(sc, source_bs, error_repo, error_marker, entry_timestamp, tn), false); } else { tn->log(20, SSTR("handle error entry key=" << to_string(source_bs, gen) << " timestamp=" << entry_timestamp)); spawn(data_sync_single_entry(sc, source_bs, gen, "", entry_timestamp, lease_cr, bucket_shard_cache, &*marker_tracker, error_repo, tn, true), false); } } if (!omapvals->more) { error_retry_time = ceph::coarse_real_clock::now() + make_timespan(retry_backoff_secs); error_marker.clear(); } } omapvals.reset(); tn->log(20, SSTR("shard_id=" << shard_id << " sync_marker=" << sync_marker.marker)); yield call(new RGWReadRemoteDataLogShardCR(sc, shard_id, sync_marker.marker, &next_marker, &log_entries, &truncated)); if (retcode < 0 && retcode != -ENOENT) { tn->log(0, SSTR("ERROR: failed to read remote data log info: ret=" << retcode)); drain_all(); return set_cr_error(retcode); } if (log_entries.size() > 0) { tn->set_flag(RGW_SNS_FLAG_ACTIVE); /* actually have entries to sync */ } for (log_iter = log_entries.begin(); log_iter != log_entries.end(); ++log_iter) { if (!lease_cr->is_locked()) { tn->log(1, "lease is lost, abort"); lost_lock = true; break; } tn->log(20, SSTR("shard_id=" << shard_id << " log_entry: " << log_iter->log_id << ":" << log_iter->log_timestamp << ":" << log_iter->entry.key)); retcode = parse_bucket_key(log_iter->entry.key, source_bs); if (retcode < 0) { tn->log(1, SSTR("failed to parse bucket shard: " << log_iter->entry.key)); marker_tracker->try_update_high_marker(log_iter->log_id, 0, log_iter->log_timestamp); continue; } if (!marker_tracker->start(log_iter->log_id, 0, log_iter->log_timestamp)) { tn->log(0, SSTR("ERROR: cannot start syncing " << log_iter->log_id << ". Duplicate entry?")); } else { tn->log(1, SSTR("incremental sync on " << log_iter->entry.key << "shard: " << shard_id << "on gen " << log_iter->entry.gen)); yield_spawn_window(data_sync_single_entry(sc, source_bs, log_iter->entry.gen, log_iter->log_id, log_iter->log_timestamp, lease_cr,bucket_shard_cache, &*marker_tracker, error_repo, tn, false), sc->lcc.adj_concurrency(cct->_conf->rgw_data_sync_spawn_window), [&](uint64_t stack_id, int ret) { if (ret < 0) { tn->log(10, SSTR("data_sync_single_entry returned error: " << ret)); cbret = ret; } return 0; }); } } if (cbret < 0 ) { retcode = cbret; drain_all(); return set_cr_error(retcode); } tn->log(20, SSTR("shard_id=" << shard_id << " sync_marker="<< sync_marker.marker << " next_marker=" << next_marker << " truncated=" << truncated)); if (!next_marker.empty()) { sync_marker.marker = next_marker; } else if (!log_entries.empty()) { sync_marker.marker = log_entries.back().log_id; } if (!truncated) { // we reached the end, wait a while before checking for more tn->unset_flag(RGW_SNS_FLAG_ACTIVE); yield wait(get_idle_interval()); } } while (true); if (lost_bid) { return set_cr_error(-EBUSY); } else if (lost_lock) { drain_all(); yield marker_tracker->flush(); return set_cr_error(-ECANCELED); } } return 0; } }; class RGWDataSyncShardCR : public RGWCoroutine { RGWDataSyncCtx *const sc; const rgw_pool pool; const uint32_t shard_id; rgw_data_sync_marker& sync_marker; rgw_data_sync_status sync_status; const RGWSyncTraceNodeRef tn; RGWObjVersionTracker& objv; bool *reset_backoff; ceph::mutex inc_lock = ceph::make_mutex("RGWDataSyncShardCR::inc_lock"); ceph::condition_variable inc_cond; RGWDataSyncEnv *const sync_env{ sc->env }; const string status_oid{ RGWDataSyncStatusManager::shard_obj_name( sc->source_zone, shard_id) }; const rgw_raw_obj error_repo{ pool, status_oid + ".retry" }; // target number of entries to cache before recycling idle ones static constexpr size_t target_cache_size = 256; boost::intrusive_ptr<rgw::bucket_sync::Cache> bucket_shard_cache { rgw::bucket_sync::Cache::create(target_cache_size) }; boost::intrusive_ptr<RGWContinuousLeaseCR> lease_cr; boost::intrusive_ptr<RGWCoroutinesStack> lease_stack; bc::flat_set<rgw_data_notify_entry> modified_shards; public: RGWDataSyncShardCR(RGWDataSyncCtx* const _sc, const rgw_pool& pool, const uint32_t shard_id, rgw_data_sync_marker& marker, const rgw_data_sync_status& sync_status, RGWSyncTraceNodeRef& tn, RGWObjVersionTracker& objv, bool *reset_backoff) : RGWCoroutine(_sc->cct), sc(_sc), pool(pool), shard_id(shard_id), sync_marker(marker), sync_status(sync_status), tn(tn), objv(objv), reset_backoff(reset_backoff) { set_description() << "data sync shard source_zone=" << sc->source_zone << " shard_id=" << shard_id; } ~RGWDataSyncShardCR() override { if (lease_cr) { lease_cr->abort(); } } void append_modified_shards(bc::flat_set<rgw_data_notify_entry>& entries) { std::lock_guard l{inc_lock}; modified_shards.insert(entries.begin(), entries.end()); } int operate(const DoutPrefixProvider *dpp) override { reenter(this) { if (!sc->env->bid_manager->is_highest_bidder(shard_id)) { tn->log(10, "not the highest bidder"); return set_cr_error(-EBUSY); } yield init_lease_cr(); while (!lease_cr->is_locked()) { if (lease_cr->is_done()) { tn->log(5, "failed to take lease"); set_status("lease lock failed, early abort"); drain_all(); return set_cr_error(lease_cr->get_ret_status()); } set_sleeping(true); yield; } *reset_backoff = true; tn->log(10, "took lease"); /* Reread data sync status to fetch latest marker and objv */ objv.clear(); yield call(new RGWSimpleRadosReadCR<rgw_data_sync_marker>(sync_env->dpp, sync_env->driver, rgw_raw_obj(pool, status_oid), &sync_marker, true, &objv)); if (retcode < 0) { lease_cr->go_down(); drain_all(); return set_cr_error(retcode); } while (true) { if (sync_marker.state == rgw_data_sync_marker::FullSync) { yield call(new RGWDataFullSyncShardCR(sc, pool, shard_id, sync_marker, tn, status_oid, error_repo, lease_cr, sync_status, objv, bucket_shard_cache)); if (retcode < 0) { if (retcode != -EBUSY) { tn->log(10, SSTR("full sync failed (retcode=" << retcode << ")")); } lease_cr->go_down(); drain_all(); return set_cr_error(retcode); } } else if (sync_marker.state == rgw_data_sync_marker::IncrementalSync) { yield call(new RGWDataIncSyncShardCR(sc, pool, shard_id, sync_marker, tn, status_oid, error_repo, lease_cr, sync_status, objv, bucket_shard_cache, inc_lock, modified_shards)); if (retcode < 0) { if (retcode != -EBUSY) { tn->log(10, SSTR("incremental sync failed (retcode=" << retcode << ")")); } lease_cr->go_down(); drain_all(); return set_cr_error(retcode); } } else { lease_cr->go_down(); drain_all(); return set_cr_error(-EIO); } } } return 0; } void init_lease_cr() { set_status("acquiring sync lock"); uint32_t lock_duration = cct->_conf->rgw_sync_lease_period; string lock_name = "sync_lock"; if (lease_cr) { lease_cr->abort(); } auto driver = sync_env->driver; lease_cr.reset(new RGWContinuousLeaseCR(sync_env->async_rados, driver, rgw_raw_obj(pool, status_oid), lock_name, lock_duration, this, &sc->lcc)); lease_stack.reset(spawn(lease_cr.get(), false)); } }; class RGWDataSyncShardControlCR : public RGWBackoffControlCR { RGWDataSyncCtx *sc; RGWDataSyncEnv *sync_env; rgw_pool pool; uint32_t shard_id; rgw_data_sync_marker sync_marker; rgw_data_sync_status sync_status; RGWSyncTraceNodeRef tn; RGWObjVersionTracker& objv; public: RGWDataSyncShardControlCR(RGWDataSyncCtx *_sc, const rgw_pool& _pool, uint32_t _shard_id, rgw_data_sync_marker& _marker, const rgw_data_sync_status& sync_status, RGWObjVersionTracker& objv, RGWSyncTraceNodeRef& _tn_parent) : RGWBackoffControlCR(_sc->cct, false), sc(_sc), sync_env(_sc->env), pool(_pool), shard_id(_shard_id), sync_marker(_marker), objv(objv) { tn = sync_env->sync_tracer->add_node(_tn_parent, "shard", std::to_string(shard_id)); } RGWCoroutine *alloc_cr() override { return new RGWDataSyncShardCR(sc, pool, shard_id, sync_marker, sync_status, tn, objv, backoff_ptr()); } RGWCoroutine *alloc_finisher_cr() override { return new RGWSimpleRadosReadCR<rgw_data_sync_marker>(sync_env->dpp, sync_env->driver, rgw_raw_obj(sync_env->svc->zone->get_zone_params().log_pool, RGWDataSyncStatusManager::shard_obj_name(sc->source_zone, shard_id)), &sync_marker, true, &objv); } void append_modified_shards(bc::flat_set<rgw_data_notify_entry>& keys) { std::lock_guard l{cr_lock()}; RGWDataSyncShardCR *cr = static_cast<RGWDataSyncShardCR *>(get_cr()); if (!cr) { return; } cr->append_modified_shards(keys); } }; class RGWDataSyncShardNotifyCR : public RGWCoroutine { RGWDataSyncEnv *sync_env; RGWSyncTraceNodeRef tn; public: RGWDataSyncShardNotifyCR(RGWDataSyncEnv *_sync_env, RGWSyncTraceNodeRef& _tn) : RGWCoroutine(_sync_env->cct), sync_env(_sync_env), tn(_tn) {} int operate(const DoutPrefixProvider* dpp) override { reenter(this) { for (;;) { set_status("sync lock notification"); yield call(sync_env->bid_manager->notify_cr()); if (retcode < 0) { tn->log(5, SSTR("ERROR: failed to notify bidding information" << retcode)); return set_cr_error(retcode); } set_status("sleeping"); yield wait(utime_t(cct->_conf->rgw_sync_lease_period, 0)); } } return 0; } }; class RGWDataSyncCR : public RGWCoroutine { RGWDataSyncCtx *sc; RGWDataSyncEnv *sync_env; uint32_t num_shards; rgw_data_sync_status sync_status; std::vector<RGWObjVersionTracker> objvs; ceph::mutex shard_crs_lock = ceph::make_mutex("RGWDataSyncCR::shard_crs_lock"); map<int, RGWDataSyncShardControlCR *> shard_crs; bool *reset_backoff; RGWSyncTraceNodeRef tn; RGWDataSyncModule *data_sync_module{nullptr}; boost::intrusive_ptr<RGWContinuousLeaseCR> init_lease; boost::intrusive_ptr<RGWCoroutinesStack> lease_stack; boost::intrusive_ptr<RGWCoroutinesStack> notify_stack; RGWObjVersionTracker obj_version; public: RGWDataSyncCR(RGWDataSyncCtx *_sc, uint32_t _num_shards, RGWSyncTraceNodeRef& _tn, bool *_reset_backoff) : RGWCoroutine(_sc->cct), sc(_sc), sync_env(_sc->env), num_shards(_num_shards), reset_backoff(_reset_backoff), tn(_tn) { } ~RGWDataSyncCR() override { for (auto iter : shard_crs) { iter.second->put(); } if (init_lease) { init_lease->abort(); } } int operate(const DoutPrefixProvider *dpp) override { reenter(this) { yield { ldpp_dout(dpp, 10) << "broadcast sync lock notify" << dendl; notify_stack.reset(spawn(new RGWDataSyncShardNotifyCR(sync_env, tn), false)); } /* read sync status */ yield call(new RGWReadDataSyncStatusCoroutine(sc, &sync_status, &obj_version, objvs)); data_sync_module = sync_env->sync_module->get_data_handler(); if (retcode < 0 && retcode != -ENOENT) { tn->log(0, SSTR("ERROR: failed to fetch sync status, retcode=" << retcode)); return set_cr_error(retcode); } if ((rgw_data_sync_info::SyncState)sync_status.sync_info.state != rgw_data_sync_info::StateSync) { init_lease.reset( RGWInitDataSyncStatusCoroutine::continuous_lease_cr(sc, this)); yield lease_stack.reset(spawn(init_lease.get(), false)); while (!init_lease->is_locked()) { if (init_lease->is_done()) { tn->log(5, "ERROR: failed to take data sync status lease"); set_status("lease lock failed, early abort"); drain_all_but_stack(notify_stack.get()); return set_cr_error(init_lease->get_ret_status()); } tn->log(5, "waiting on data sync status lease"); yield set_sleeping(true); } tn->log(5, "acquired data sync status lease"); // Reread sync status now that we've acquired the lock! obj_version.clear(); yield call(new RGWReadDataSyncStatusCoroutine(sc, &sync_status, &obj_version, objvs)); if (retcode < 0) { tn->log(0, SSTR("ERROR: failed to fetch sync status, retcode=" << retcode)); return set_cr_error(retcode); } } /* state: init status */ if ((rgw_data_sync_info::SyncState)sync_status.sync_info.state == rgw_data_sync_info::StateInit) { tn->log(20, SSTR("init")); sync_status.sync_info.num_shards = num_shards; uint64_t instance_id; instance_id = ceph::util::generate_random_number<uint64_t>(); yield call(new RGWInitDataSyncStatusCoroutine(sc, num_shards, instance_id, tn, &sync_status, init_lease, obj_version, objvs)); if (retcode < 0) { tn->log(0, SSTR("ERROR: failed to init sync, retcode=" << retcode)); init_lease->go_down(); drain_all_but_stack(notify_stack.get()); return set_cr_error(retcode); } // sets state = StateBuildingFullSyncMaps *reset_backoff = true; } data_sync_module->init(sc, sync_status.sync_info.instance_id); if ((rgw_data_sync_info::SyncState)sync_status.sync_info.state == rgw_data_sync_info::StateBuildingFullSyncMaps) { tn->log(10, SSTR("building full sync maps")); /* call sync module init here */ sync_status.sync_info.num_shards = num_shards; yield call(data_sync_module->init_sync(dpp, sc)); if (retcode < 0) { tn->log(0, SSTR("ERROR: sync module init_sync() failed, retcode=" << retcode)); return set_cr_error(retcode); } if (!init_lease->is_locked()) { init_lease->go_down(); drain_all_but_stack(notify_stack.get()); return set_cr_error(-ECANCELED); } /* state: building full sync maps */ yield call(new RGWListBucketIndexesCR(sc, &sync_status, objvs)); if (retcode < 0) { tn->log(0, SSTR("ERROR: failed to build full sync maps, retcode=" << retcode)); return set_cr_error(retcode); } sync_status.sync_info.state = rgw_data_sync_info::StateSync; if (!init_lease->is_locked()) { init_lease->go_down(); drain_all_but_stack(notify_stack.get()); return set_cr_error(-ECANCELED); } /* update new state */ yield call(set_sync_info_cr()); if (retcode < 0) { tn->log(0, SSTR("ERROR: failed to write sync status, retcode=" << retcode)); return set_cr_error(retcode); } *reset_backoff = true; } yield call(data_sync_module->start_sync(dpp, sc)); if (retcode < 0) { tn->log(0, SSTR("ERROR: failed to start sync, retcode=" << retcode)); return set_cr_error(retcode); } if ((rgw_data_sync_info::SyncState)sync_status.sync_info.state == rgw_data_sync_info::StateSync) { if (init_lease) { init_lease->go_down(); drain_all_but_stack(notify_stack.get()); init_lease.reset(); lease_stack.reset(); } yield { tn->log(10, SSTR("spawning " << num_shards << " shards sync")); for (map<uint32_t, rgw_data_sync_marker>::iterator iter = sync_status.sync_markers.begin(); iter != sync_status.sync_markers.end(); ++iter) { RGWDataSyncShardControlCR *cr = new RGWDataSyncShardControlCR(sc, sync_env->svc->zone->get_zone_params().log_pool, iter->first, iter->second, sync_status, objvs[iter->first], tn); cr->get(); shard_crs_lock.lock(); shard_crs[iter->first] = cr; shard_crs_lock.unlock(); spawn(cr, true); } } } notify_stack->cancel(); return set_cr_done(); } return 0; } RGWCoroutine *set_sync_info_cr() { return new RGWSimpleRadosWriteCR<rgw_data_sync_info>(sync_env->dpp, sync_env->driver, rgw_raw_obj(sync_env->svc->zone->get_zone_params().log_pool, RGWDataSyncStatusManager::sync_status_oid(sc->source_zone)), sync_status.sync_info, &obj_version); } void wakeup(int shard_id, bc::flat_set<rgw_data_notify_entry>& entries) { std::lock_guard l{shard_crs_lock}; map<int, RGWDataSyncShardControlCR *>::iterator iter = shard_crs.find(shard_id); if (iter == shard_crs.end()) { return; } iter->second->append_modified_shards(entries); iter->second->wakeup(); } }; class RGWDefaultDataSyncModule : public RGWDataSyncModule { public: RGWDefaultDataSyncModule() {} RGWCoroutine *sync_object(const DoutPrefixProvider *dpp, RGWDataSyncCtx *sc, rgw_bucket_sync_pipe& sync_pipe, rgw_obj_key& key, std::optional<uint64_t> versioned_epoch, const rgw_zone_set_entry& source_trace_entry, rgw_zone_set *zones_trace) override; RGWCoroutine *remove_object(const DoutPrefixProvider *dpp, RGWDataSyncCtx *sc, rgw_bucket_sync_pipe& sync_pipe, rgw_obj_key& key, real_time& mtime, bool versioned, uint64_t versioned_epoch, rgw_zone_set *zones_trace) override; RGWCoroutine *create_delete_marker(const DoutPrefixProvider *dpp, RGWDataSyncCtx *sc, rgw_bucket_sync_pipe& sync_pipe, rgw_obj_key& key, real_time& mtime, rgw_bucket_entry_owner& owner, bool versioned, uint64_t versioned_epoch, rgw_zone_set *zones_trace) override; }; class RGWDefaultSyncModuleInstance : public RGWSyncModuleInstance { RGWDefaultDataSyncModule data_handler; public: RGWDefaultSyncModuleInstance() {} RGWDataSyncModule *get_data_handler() override { return &data_handler; } bool supports_user_writes() override { return true; } }; int RGWDefaultSyncModule::create_instance(const DoutPrefixProvider *dpp, CephContext *cct, const JSONFormattable& config, RGWSyncModuleInstanceRef *instance) { instance->reset(new RGWDefaultSyncModuleInstance()); return 0; } class RGWUserPermHandler { friend struct Init; friend class Bucket; RGWDataSyncEnv *sync_env; rgw_user uid; struct _info { RGWUserInfo user_info; rgw::IAM::Environment env; std::unique_ptr<rgw::auth::Identity> identity; RGWAccessControlPolicy user_acl; }; std::shared_ptr<_info> info; struct Init; std::shared_ptr<Init> init_action; struct Init : public RGWGenericAsyncCR::Action { RGWDataSyncEnv *sync_env; rgw_user uid; std::shared_ptr<RGWUserPermHandler::_info> info; int ret{0}; Init(RGWUserPermHandler *handler) : sync_env(handler->sync_env), uid(handler->uid), info(handler->info) {} int operate() override { auto user_ctl = sync_env->driver->getRados()->ctl.user; ret = user_ctl->get_info_by_uid(sync_env->dpp, uid, &info->user_info, null_yield); if (ret < 0) { return ret; } info->identity = rgw::auth::transform_old_authinfo(sync_env->cct, uid, RGW_PERM_FULL_CONTROL, false, /* system_request? */ TYPE_RGW); map<string, bufferlist> uattrs; ret = user_ctl->get_attrs_by_uid(sync_env->dpp, uid, &uattrs, null_yield); if (ret == 0) { ret = RGWUserPermHandler::policy_from_attrs(sync_env->cct, uattrs, &info->user_acl); } if (ret == -ENOENT) { info->user_acl.create_default(uid, info->user_info.display_name); } return 0; } }; public: RGWUserPermHandler(RGWDataSyncEnv *_sync_env, const rgw_user& _uid) : sync_env(_sync_env), uid(_uid) {} RGWCoroutine *init_cr() { info = make_shared<_info>(); init_action = make_shared<Init>(this); return new RGWGenericAsyncCR(sync_env->cct, sync_env->async_rados, init_action); } class Bucket { RGWDataSyncEnv *sync_env; std::shared_ptr<_info> info; RGWAccessControlPolicy bucket_acl; std::optional<perm_state> ps; public: Bucket() {} int init(RGWUserPermHandler *handler, const RGWBucketInfo& bucket_info, const map<string, bufferlist>& bucket_attrs); bool verify_bucket_permission(int perm); bool verify_object_permission(const map<string, bufferlist>& obj_attrs, int perm); }; static int policy_from_attrs(CephContext *cct, const map<string, bufferlist>& attrs, RGWAccessControlPolicy *acl) { acl->set_ctx(cct); auto aiter = attrs.find(RGW_ATTR_ACL); if (aiter == attrs.end()) { return -ENOENT; } auto iter = aiter->second.begin(); try { acl->decode(iter); } catch (buffer::error& err) { ldout(cct, 0) << "ERROR: " << __func__ << "(): could not decode policy, caught buffer::error" << dendl; return -EIO; } return 0; } int init_bucket(const RGWBucketInfo& bucket_info, const map<string, bufferlist>& bucket_attrs, Bucket *bs) { return bs->init(this, bucket_info, bucket_attrs); } }; int RGWUserPermHandler::Bucket::init(RGWUserPermHandler *handler, const RGWBucketInfo& bucket_info, const map<string, bufferlist>& bucket_attrs) { sync_env = handler->sync_env; info = handler->info; int r = RGWUserPermHandler::policy_from_attrs(sync_env->cct, bucket_attrs, &bucket_acl); if (r < 0) { return r; } ps.emplace(sync_env->cct, info->env, info->identity.get(), bucket_info, info->identity->get_perm_mask(), false, /* defer to bucket acls */ nullptr, /* referer */ false); /* request_payer */ return 0; } bool RGWUserPermHandler::Bucket::verify_bucket_permission(int perm) { return verify_bucket_permission_no_policy(sync_env->dpp, &(*ps), &info->user_acl, &bucket_acl, perm); } bool RGWUserPermHandler::Bucket::verify_object_permission(const map<string, bufferlist>& obj_attrs, int perm) { RGWAccessControlPolicy obj_acl; int r = policy_from_attrs(sync_env->cct, obj_attrs, &obj_acl); if (r < 0) { return r; } return verify_bucket_permission_no_policy(sync_env->dpp, &(*ps), &bucket_acl, &obj_acl, perm); } class RGWFetchObjFilter_Sync : public RGWFetchObjFilter_Default { rgw_bucket_sync_pipe sync_pipe; std::shared_ptr<RGWUserPermHandler::Bucket> bucket_perms; std::optional<rgw_sync_pipe_dest_params> verify_dest_params; std::optional<ceph::real_time> mtime; std::optional<string> etag; std::optional<uint64_t> obj_size; std::unique_ptr<rgw::auth::Identity> identity; std::shared_ptr<bool> need_retry; public: RGWFetchObjFilter_Sync(rgw_bucket_sync_pipe& _sync_pipe, std::shared_ptr<RGWUserPermHandler::Bucket>& _bucket_perms, std::optional<rgw_sync_pipe_dest_params>&& _verify_dest_params, std::shared_ptr<bool>& _need_retry) : sync_pipe(_sync_pipe), bucket_perms(_bucket_perms), verify_dest_params(std::move(_verify_dest_params)), need_retry(_need_retry) { *need_retry = false; } int filter(CephContext *cct, const rgw_obj_key& source_key, const RGWBucketInfo& dest_bucket_info, std::optional<rgw_placement_rule> dest_placement_rule, const map<string, bufferlist>& obj_attrs, std::optional<rgw_user> *poverride_owner, const rgw_placement_rule **prule) override; }; int RGWFetchObjFilter_Sync::filter(CephContext *cct, const rgw_obj_key& source_key, const RGWBucketInfo& dest_bucket_info, std::optional<rgw_placement_rule> dest_placement_rule, const map<string, bufferlist>& obj_attrs, std::optional<rgw_user> *poverride_owner, const rgw_placement_rule **prule) { int abort_err = -ERR_PRECONDITION_FAILED; rgw_sync_pipe_params params; RGWObjTags obj_tags; auto iter = obj_attrs.find(RGW_ATTR_TAGS); if (iter != obj_attrs.end()) { try { auto it = iter->second.cbegin(); obj_tags.decode(it); } catch (buffer::error &err) { ldout(cct, 0) << "ERROR: " << __func__ << ": caught buffer::error couldn't decode TagSet " << dendl; } } if (!sync_pipe.info.handler.find_obj_params(source_key, obj_tags.get_tags(), &params)) { return abort_err; } if (verify_dest_params && !(*verify_dest_params == params.dest)) { /* raced! original dest params were different, will need to retry */ ldout(cct, 0) << "WARNING: " << __func__ << ": pipe dest params are different than original params, must have raced with object rewrite, retrying" << dendl; *need_retry = true; return -ECANCELED; } std::optional<std::map<string, bufferlist> > new_attrs; if (params.dest.acl_translation) { rgw_user& acl_translation_owner = params.dest.acl_translation->owner; if (!acl_translation_owner.empty()) { if (params.mode == rgw_sync_pipe_params::MODE_USER && acl_translation_owner != dest_bucket_info.owner) { ldout(cct, 0) << "ERROR: " << __func__ << ": acl translation was requested, but user (" << acl_translation_owner << ") is not dest bucket owner (" << dest_bucket_info.owner << ")" << dendl; return -EPERM; } *poverride_owner = acl_translation_owner; } } if (params.mode == rgw_sync_pipe_params::MODE_USER) { if (!bucket_perms->verify_object_permission(obj_attrs, RGW_PERM_READ)) { ldout(cct, 0) << "ERROR: " << __func__ << ": permission check failed: user not allowed to fetch object" << dendl; return -EPERM; } } if (!dest_placement_rule && params.dest.storage_class) { dest_rule.storage_class = *params.dest.storage_class; dest_rule.inherit_from(dest_bucket_info.placement_rule); dest_placement_rule = dest_rule; *prule = &dest_rule; } return RGWFetchObjFilter_Default::filter(cct, source_key, dest_bucket_info, dest_placement_rule, obj_attrs, poverride_owner, prule); } class RGWObjFetchCR : public RGWCoroutine { RGWDataSyncCtx *sc; RGWDataSyncEnv *sync_env; rgw_bucket_sync_pipe& sync_pipe; rgw_obj_key& key; std::optional<rgw_obj_key> dest_key; std::optional<uint64_t> versioned_epoch; bool stat_follow_olh; const rgw_zone_set_entry& source_trace_entry; rgw_zone_set *zones_trace; bool need_more_info{false}; bool check_change{false}; ceph::real_time src_mtime; uint64_t src_size; string src_etag; map<string, bufferlist> src_attrs; map<string, string> src_headers; std::optional<rgw_user> param_user; rgw_sync_pipe_params::Mode param_mode; std::optional<RGWUserPermHandler> user_perms; std::shared_ptr<RGWUserPermHandler::Bucket> source_bucket_perms; RGWUserPermHandler::Bucket dest_bucket_perms; std::optional<rgw_sync_pipe_dest_params> dest_params; int try_num{0}; std::shared_ptr<bool> need_retry; public: RGWObjFetchCR(RGWDataSyncCtx *_sc, rgw_bucket_sync_pipe& _sync_pipe, rgw_obj_key& _key, std::optional<rgw_obj_key> _dest_key, std::optional<uint64_t> _versioned_epoch, bool _stat_follow_olh, const rgw_zone_set_entry& source_trace_entry, rgw_zone_set *_zones_trace) : RGWCoroutine(_sc->cct), sc(_sc), sync_env(_sc->env), sync_pipe(_sync_pipe), key(_key), dest_key(_dest_key), versioned_epoch(_versioned_epoch), stat_follow_olh(_stat_follow_olh), source_trace_entry(source_trace_entry), zones_trace(_zones_trace) { } int operate(const DoutPrefixProvider *dpp) override { reenter(this) { #define MAX_RACE_RETRIES_OBJ_FETCH 10 for (try_num = 0; try_num < MAX_RACE_RETRIES_OBJ_FETCH; ++try_num) { { std::optional<rgw_user> param_acl_translation; std::optional<string> param_storage_class; if (!sync_pipe.info.handler.find_basic_info_without_tags(key, &param_user, &param_acl_translation, &param_storage_class, &param_mode, &need_more_info)) { if (!need_more_info) { return set_cr_error(-ERR_PRECONDITION_FAILED); } } } if (need_more_info) { ldout(cct, 20) << "Could not determine exact policy rule for obj=" << key << ", will read source object attributes" << dendl; /* * we need to fetch info about source object, so that we can determine * the correct policy configuration. This can happen if there are multiple * policy rules, and some depend on the object tagging */ yield call(new RGWStatRemoteObjCR(sync_env->async_rados, sync_env->driver, sc->source_zone, sync_pipe.info.source_bs.bucket, key, &src_mtime, &src_size, &src_etag, &src_attrs, &src_headers)); if (retcode < 0) { return set_cr_error(retcode); } RGWObjTags obj_tags; auto iter = src_attrs.find(RGW_ATTR_TAGS); if (iter != src_attrs.end()) { try { auto it = iter->second.cbegin(); obj_tags.decode(it); } catch (buffer::error &err) { ldout(cct, 0) << "ERROR: " << __func__ << ": caught buffer::error couldn't decode TagSet " << dendl; } } rgw_sync_pipe_params params; if (!sync_pipe.info.handler.find_obj_params(key, obj_tags.get_tags(), &params)) { return set_cr_error(-ERR_PRECONDITION_FAILED); } param_user = params.user; param_mode = params.mode; dest_params = params.dest; } if (param_mode == rgw_sync_pipe_params::MODE_USER) { if (!param_user) { ldout(cct, 20) << "ERROR: " << __func__ << ": user level sync but user param not set" << dendl; return set_cr_error(-EPERM); } user_perms.emplace(sync_env, *param_user); yield call(user_perms->init_cr()); if (retcode < 0) { ldout(cct, 20) << "ERROR: " << __func__ << ": failed to init user perms manager for uid=" << *param_user << dendl; return set_cr_error(retcode); } /* verify that user is allowed to write at the target bucket */ int r = user_perms->init_bucket(sync_pipe.dest_bucket_info, sync_pipe.dest_bucket_attrs, &dest_bucket_perms); if (r < 0) { ldout(cct, 20) << "ERROR: " << __func__ << ": failed to init bucket perms manager for uid=" << *param_user << " bucket=" << sync_pipe.source_bucket_info.bucket.get_key() << dendl; return set_cr_error(retcode); } if (!dest_bucket_perms.verify_bucket_permission(RGW_PERM_WRITE)) { ldout(cct, 0) << "ERROR: " << __func__ << ": permission check failed: user not allowed to write into bucket (bucket=" << sync_pipe.info.dest_bucket.get_key() << ")" << dendl; return -EPERM; } /* init source bucket permission structure */ source_bucket_perms = make_shared<RGWUserPermHandler::Bucket>(); r = user_perms->init_bucket(sync_pipe.source_bucket_info, sync_pipe.source_bucket_attrs, source_bucket_perms.get()); if (r < 0) { ldout(cct, 20) << "ERROR: " << __func__ << ": failed to init bucket perms manager for uid=" << *param_user << " bucket=" << sync_pipe.source_bucket_info.bucket.get_key() << dendl; return set_cr_error(retcode); } } yield { if (!need_retry) { need_retry = make_shared<bool>(); } auto filter = make_shared<RGWFetchObjFilter_Sync>(sync_pipe, source_bucket_perms, std::move(dest_params), need_retry); call(new RGWFetchRemoteObjCR(sync_env->async_rados, sync_env->driver, sc->source_zone, nullopt, sync_pipe.source_bucket_info.bucket, std::nullopt, sync_pipe.dest_bucket_info, key, dest_key, versioned_epoch, true, std::static_pointer_cast<RGWFetchObjFilter>(filter), stat_follow_olh, source_trace_entry, zones_trace, sync_env->counters, dpp)); } if (retcode < 0) { if (*need_retry) { continue; } return set_cr_error(retcode); } return set_cr_done(); } ldout(cct, 0) << "ERROR: " << __func__ << ": Too many retries trying to fetch object, possibly a bug: bucket=" << sync_pipe.source_bucket_info.bucket.get_key() << " key=" << key << dendl; return set_cr_error(-EIO); } return 0; } }; RGWCoroutine *RGWDefaultDataSyncModule::sync_object(const DoutPrefixProvider *dpp, RGWDataSyncCtx *sc, rgw_bucket_sync_pipe& sync_pipe, rgw_obj_key& key, std::optional<uint64_t> versioned_epoch, const rgw_zone_set_entry& source_trace_entry, rgw_zone_set *zones_trace) { bool stat_follow_olh = false; return new RGWObjFetchCR(sc, sync_pipe, key, std::nullopt, versioned_epoch, stat_follow_olh, source_trace_entry, zones_trace); } RGWCoroutine *RGWDefaultDataSyncModule::remove_object(const DoutPrefixProvider *dpp, RGWDataSyncCtx *sc, rgw_bucket_sync_pipe& sync_pipe, rgw_obj_key& key, real_time& mtime, bool versioned, uint64_t versioned_epoch, rgw_zone_set *zones_trace) { auto sync_env = sc->env; return new RGWRemoveObjCR(sync_env->dpp, sync_env->async_rados, sync_env->driver, sc->source_zone, sync_pipe.dest_bucket_info, key, versioned, versioned_epoch, NULL, NULL, false, &mtime, zones_trace); } RGWCoroutine *RGWDefaultDataSyncModule::create_delete_marker(const DoutPrefixProvider *dpp, RGWDataSyncCtx *sc, rgw_bucket_sync_pipe& sync_pipe, rgw_obj_key& key, real_time& mtime, rgw_bucket_entry_owner& owner, bool versioned, uint64_t versioned_epoch, rgw_zone_set *zones_trace) { auto sync_env = sc->env; return new RGWRemoveObjCR(sync_env->dpp, sync_env->async_rados, sync_env->driver, sc->source_zone, sync_pipe.dest_bucket_info, key, versioned, versioned_epoch, &owner.id, &owner.display_name, true, &mtime, zones_trace); } class RGWArchiveDataSyncModule : public RGWDefaultDataSyncModule { public: RGWArchiveDataSyncModule() {} RGWCoroutine *sync_object(const DoutPrefixProvider *dpp, RGWDataSyncCtx *sc, rgw_bucket_sync_pipe& sync_pipe, rgw_obj_key& key, std::optional<uint64_t> versioned_epoch, const rgw_zone_set_entry& source_trace_entry, rgw_zone_set *zones_trace) override; RGWCoroutine *remove_object(const DoutPrefixProvider *dpp, RGWDataSyncCtx *sc, rgw_bucket_sync_pipe& sync_pipe, rgw_obj_key& key, real_time& mtime, bool versioned, uint64_t versioned_epoch, rgw_zone_set *zones_trace) override; RGWCoroutine *create_delete_marker(const DoutPrefixProvider *dpp, RGWDataSyncCtx *sc, rgw_bucket_sync_pipe& sync_pipe, rgw_obj_key& key, real_time& mtime, rgw_bucket_entry_owner& owner, bool versioned, uint64_t versioned_epoch, rgw_zone_set *zones_trace) override; }; class RGWArchiveSyncModuleInstance : public RGWDefaultSyncModuleInstance { RGWArchiveDataSyncModule data_handler; public: RGWArchiveSyncModuleInstance() {} RGWDataSyncModule *get_data_handler() override { return &data_handler; } RGWMetadataHandler *alloc_bucket_meta_handler() override { return RGWArchiveBucketMetaHandlerAllocator::alloc(); } RGWBucketInstanceMetadataHandlerBase *alloc_bucket_instance_meta_handler(rgw::sal::Driver* driver) override { return RGWArchiveBucketInstanceMetaHandlerAllocator::alloc(driver); } }; int RGWArchiveSyncModule::create_instance(const DoutPrefixProvider *dpp, CephContext *cct, const JSONFormattable& config, RGWSyncModuleInstanceRef *instance) { instance->reset(new RGWArchiveSyncModuleInstance()); return 0; } RGWCoroutine *RGWArchiveDataSyncModule::sync_object(const DoutPrefixProvider *dpp, RGWDataSyncCtx *sc, rgw_bucket_sync_pipe& sync_pipe, rgw_obj_key& key, std::optional<uint64_t> versioned_epoch, const rgw_zone_set_entry& source_trace_entry, rgw_zone_set *zones_trace) { auto sync_env = sc->env; ldout(sc->cct, 5) << "SYNC_ARCHIVE: sync_object: b=" << sync_pipe.info.source_bs.bucket << " k=" << key << " versioned_epoch=" << versioned_epoch.value_or(0) << dendl; std::optional<rgw_obj_key> dest_key; bool stat_follow_olh = false; if (versioned_epoch.value_or(0) == 0) { /* force version if not set */ stat_follow_olh = true; versioned_epoch = 0; dest_key = key; if (key.instance.empty()) { sync_env->driver->getRados()->gen_rand_obj_instance_name(&(*dest_key)); } } if (key.instance.empty()) { dest_key = key; sync_env->driver->getRados()->gen_rand_obj_instance_name(&(*dest_key)); } return new RGWObjFetchCR(sc, sync_pipe, key, dest_key, versioned_epoch, stat_follow_olh, source_trace_entry, zones_trace); } RGWCoroutine *RGWArchiveDataSyncModule::remove_object(const DoutPrefixProvider *dpp, RGWDataSyncCtx *sc, rgw_bucket_sync_pipe& sync_pipe, rgw_obj_key& key, real_time& mtime, bool versioned, uint64_t versioned_epoch, rgw_zone_set *zones_trace) { ldout(sc->cct, 0) << "SYNC_ARCHIVE: remove_object: b=" << sync_pipe.info.source_bs.bucket << " k=" << key << " versioned_epoch=" << versioned_epoch << dendl; return NULL; } RGWCoroutine *RGWArchiveDataSyncModule::create_delete_marker(const DoutPrefixProvider *dpp, RGWDataSyncCtx *sc, rgw_bucket_sync_pipe& sync_pipe, rgw_obj_key& key, real_time& mtime, rgw_bucket_entry_owner& owner, bool versioned, uint64_t versioned_epoch, rgw_zone_set *zones_trace) { ldout(sc->cct, 0) << "SYNC_ARCHIVE: create_delete_marker: b=" << sync_pipe.info.source_bs.bucket << " k=" << key << " mtime=" << mtime << " versioned=" << versioned << " versioned_epoch=" << versioned_epoch << dendl; auto sync_env = sc->env; return new RGWRemoveObjCR(sync_env->dpp, sync_env->async_rados, sync_env->driver, sc->source_zone, sync_pipe.dest_bucket_info, key, versioned, versioned_epoch, &owner.id, &owner.display_name, true, &mtime, zones_trace); } class RGWDataSyncControlCR : public RGWBackoffControlCR { RGWDataSyncCtx *sc; RGWDataSyncEnv *sync_env; uint32_t num_shards; RGWSyncTraceNodeRef tn; static constexpr bool exit_on_error = false; // retry on all errors public: RGWDataSyncControlCR(RGWDataSyncCtx *_sc, uint32_t _num_shards, RGWSyncTraceNodeRef& _tn_parent) : RGWBackoffControlCR(_sc->cct, exit_on_error), sc(_sc), sync_env(_sc->env), num_shards(_num_shards) { tn = sync_env->sync_tracer->add_node(_tn_parent, "sync"); } RGWCoroutine *alloc_cr() override { return new RGWDataSyncCR(sc, num_shards, tn, backoff_ptr()); } void wakeup(int shard_id, bc::flat_set<rgw_data_notify_entry>& entries) { ceph::mutex& m = cr_lock(); m.lock(); RGWDataSyncCR *cr = static_cast<RGWDataSyncCR *>(get_cr()); if (!cr) { m.unlock(); return; } cr->get(); m.unlock(); if (cr) { cr->wakeup(shard_id, entries); } cr->put(); } }; void RGWRemoteDataLog::wakeup(int shard_id, bc::flat_set<rgw_data_notify_entry>& entries) { std::shared_lock rl{lock}; if (!data_sync_cr) { return; } data_sync_cr->wakeup(shard_id, entries); } int RGWRemoteDataLog::run_sync(const DoutPrefixProvider *dpp, int num_shards) { // construct and start bid manager for data sync fairness const auto& control_pool = sc.env->driver->svc()->zone->get_zone_params().control_pool; char buf[data_sync_bids_oid.size() + sc.source_zone.id.size() + 16]; snprintf(buf, sizeof(buf), "%s.%s", data_sync_bids_oid.c_str(), sc.source_zone.id.c_str()); auto control_obj = rgw_raw_obj{control_pool, string(buf)}; auto bid_manager = rgw::sync_fairness::create_rados_bid_manager( driver, control_obj, num_shards); int r = bid_manager->start(); if (r < 0) { return r; } sc.env->bid_manager = bid_manager.get(); lock.lock(); data_sync_cr = new RGWDataSyncControlCR(&sc, num_shards, tn); data_sync_cr->get(); // run() will drop a ref, so take another lock.unlock(); r = run(dpp, data_sync_cr); lock.lock(); data_sync_cr->put(); data_sync_cr = NULL; lock.unlock(); if (r < 0) { ldpp_dout(dpp, 0) << "ERROR: failed to run sync" << dendl; return r; } return 0; } CephContext *RGWDataSyncStatusManager::get_cct() const { return driver->ctx(); } int RGWDataSyncStatusManager::init(const DoutPrefixProvider *dpp) { RGWZone *zone_def; if (!(zone_def = driver->svc()->zone->find_zone(source_zone))) { ldpp_dout(this, 0) << "ERROR: failed to find zone config info for zone=" << source_zone << dendl; return -EIO; } if (!driver->svc()->sync_modules->get_manager()->supports_data_export(zone_def->tier_type)) { return -ENOTSUP; } const RGWZoneParams& zone_params = driver->svc()->zone->get_zone_params(); if (sync_module == nullptr) { sync_module = driver->get_sync_module(); } conn = driver->svc()->zone->get_zone_conn(source_zone); if (!conn) { ldpp_dout(this, 0) << "connection object to zone " << source_zone << " does not exist" << dendl; return -EINVAL; } error_logger = new RGWSyncErrorLogger(driver, RGW_SYNC_ERROR_LOG_SHARD_PREFIX, ERROR_LOGGER_SHARDS); int r = source_log.init(source_zone, conn, error_logger, driver->getRados()->get_sync_tracer(), sync_module, counters); if (r < 0) { ldpp_dout(this, 0) << "ERROR: failed to init remote log, r=" << r << dendl; finalize(); return r; } rgw_datalog_info datalog_info; r = source_log.read_log_info(dpp, &datalog_info); if (r < 0) { ldpp_dout(this, 5) << "ERROR: master.read_log_info() returned r=" << r << dendl; finalize(); return r; } num_shards = datalog_info.num_shards; for (int i = 0; i < num_shards; i++) { shard_objs[i] = rgw_raw_obj(zone_params.log_pool, shard_obj_name(source_zone, i)); } return 0; } void RGWDataSyncStatusManager::finalize() { delete error_logger; error_logger = nullptr; } unsigned RGWDataSyncStatusManager::get_subsys() const { return dout_subsys; } std::ostream& RGWDataSyncStatusManager::gen_prefix(std::ostream& out) const { auto zone = std::string_view{source_zone.id}; return out << "data sync zone:" << zone.substr(0, 8) << ' '; } string RGWDataSyncStatusManager::sync_status_oid(const rgw_zone_id& source_zone) { char buf[datalog_sync_status_oid_prefix.size() + source_zone.id.size() + 16]; snprintf(buf, sizeof(buf), "%s.%s", datalog_sync_status_oid_prefix.c_str(), source_zone.id.c_str()); return string(buf); } string RGWDataSyncStatusManager::shard_obj_name(const rgw_zone_id& source_zone, int shard_id) { char buf[datalog_sync_status_shard_prefix.size() + source_zone.id.size() + 16]; snprintf(buf, sizeof(buf), "%s.%s.%d", datalog_sync_status_shard_prefix.c_str(), source_zone.id.c_str(), shard_id); return string(buf); } class RGWInitBucketShardSyncStatusCoroutine : public RGWCoroutine { RGWDataSyncCtx *sc; RGWDataSyncEnv *sync_env; const rgw_bucket_sync_pair_info& sync_pair; const string sync_status_oid; rgw_bucket_shard_sync_info& status; RGWObjVersionTracker& objv_tracker; const BucketIndexShardsManager& marker_mgr; bool exclusive; public: RGWInitBucketShardSyncStatusCoroutine(RGWDataSyncCtx *_sc, const rgw_bucket_sync_pair_info& _sync_pair, rgw_bucket_shard_sync_info& _status, uint64_t gen, const BucketIndexShardsManager& _marker_mgr, RGWObjVersionTracker& objv_tracker, bool exclusive) : RGWCoroutine(_sc->cct), sc(_sc), sync_env(_sc->env), sync_pair(_sync_pair), sync_status_oid(RGWBucketPipeSyncStatusManager::inc_status_oid(sc->source_zone, _sync_pair, gen)), status(_status), objv_tracker(objv_tracker), marker_mgr(_marker_mgr), exclusive(exclusive) {} int operate(const DoutPrefixProvider *dpp) override { reenter(this) { yield { rgw_raw_obj obj(sync_env->svc->zone->get_zone_params().log_pool, sync_status_oid); // whether or not to do full sync, incremental sync will follow anyway if (sync_env->sync_module->should_full_sync()) { const auto max_marker = marker_mgr.get(sync_pair.source_bs.shard_id, ""); status.inc_marker.position = max_marker; } status.inc_marker.timestamp = ceph::real_clock::now(); status.state = rgw_bucket_shard_sync_info::StateIncrementalSync; map<string, bufferlist> attrs; status.encode_all_attrs(attrs); call(new RGWSimpleRadosWriteAttrsCR(dpp, sync_env->driver, obj, attrs, &objv_tracker, exclusive)); } if (retcode < 0) { ldout(cct, 20) << "ERROR: init marker position failed. error: " << retcode << dendl; return set_cr_error(retcode); } ldout(cct, 20) << "init marker position: " << status.inc_marker.position << ". written to shard status object: " << sync_status_oid << dendl; return set_cr_done(); } return 0; } }; #define BUCKET_SYNC_ATTR_PREFIX RGW_ATTR_PREFIX "bucket-sync." template <class T> static bool decode_attr(CephContext *cct, map<string, bufferlist>& attrs, const string& attr_name, T *val) { map<string, bufferlist>::iterator iter = attrs.find(attr_name); if (iter == attrs.end()) { *val = T(); return false; } auto biter = iter->second.cbegin(); try { decode(*val, biter); } catch (buffer::error& err) { ldout(cct, 0) << "ERROR: failed to decode attribute: " << attr_name << dendl; return false; } return true; } void rgw_bucket_shard_sync_info::decode_from_attrs(CephContext *cct, map<string, bufferlist>& attrs) { if (!decode_attr(cct, attrs, BUCKET_SYNC_ATTR_PREFIX "state", &state)) { decode_attr(cct, attrs, "state", &state); } if (!decode_attr(cct, attrs, BUCKET_SYNC_ATTR_PREFIX "inc_marker", &inc_marker)) { decode_attr(cct, attrs, "inc_marker", &inc_marker); } } void rgw_bucket_shard_sync_info::encode_all_attrs(map<string, bufferlist>& attrs) { encode_state_attr(attrs); inc_marker.encode_attr(attrs); } void rgw_bucket_shard_sync_info::encode_state_attr(map<string, bufferlist>& attrs) { using ceph::encode; encode(state, attrs[BUCKET_SYNC_ATTR_PREFIX "state"]); } void rgw_bucket_shard_full_sync_marker::encode_attr(map<string, bufferlist>& attrs) { using ceph::encode; encode(*this, attrs[BUCKET_SYNC_ATTR_PREFIX "full_marker"]); } void rgw_bucket_shard_inc_sync_marker::encode_attr(map<string, bufferlist>& attrs) { using ceph::encode; encode(*this, attrs[BUCKET_SYNC_ATTR_PREFIX "inc_marker"]); } class RGWReadBucketPipeSyncStatusCoroutine : public RGWCoroutine { RGWDataSyncCtx *sc; RGWDataSyncEnv *sync_env; string oid; rgw_bucket_shard_sync_info *status; RGWObjVersionTracker* objv_tracker; map<string, bufferlist> attrs; public: RGWReadBucketPipeSyncStatusCoroutine(RGWDataSyncCtx *_sc, const rgw_bucket_sync_pair_info& sync_pair, rgw_bucket_shard_sync_info *_status, RGWObjVersionTracker* objv_tracker, uint64_t gen) : RGWCoroutine(_sc->cct), sc(_sc), sync_env(_sc->env), oid(RGWBucketPipeSyncStatusManager::inc_status_oid(sc->source_zone, sync_pair, gen)), status(_status), objv_tracker(objv_tracker) {} int operate(const DoutPrefixProvider *dpp) override; }; int RGWReadBucketPipeSyncStatusCoroutine::operate(const DoutPrefixProvider *dpp) { reenter(this) { yield call(new RGWSimpleRadosReadAttrsCR(dpp, sync_env->driver, rgw_raw_obj(sync_env->svc->zone->get_zone_params().log_pool, oid), &attrs, true, objv_tracker)); if (retcode == -ENOENT) { *status = rgw_bucket_shard_sync_info(); return set_cr_done(); } if (retcode < 0) { ldpp_dout(dpp, 0) << "ERROR: failed to call fetch bucket shard info oid=" << oid << " ret=" << retcode << dendl; return set_cr_error(retcode); } status->decode_from_attrs(sync_env->cct, attrs); return set_cr_done(); } return 0; } // wrap ReadSyncStatus and set a flag if it's not in incremental class CheckBucketShardStatusIsIncremental : public RGWReadBucketPipeSyncStatusCoroutine { bool* result; rgw_bucket_shard_sync_info status; public: CheckBucketShardStatusIsIncremental(RGWDataSyncCtx* sc, const rgw_bucket_sync_pair_info& sync_pair, bool* result) : RGWReadBucketPipeSyncStatusCoroutine(sc, sync_pair, &status, nullptr, 0 /*no gen in compat mode*/), result(result) {} int operate(const DoutPrefixProvider *dpp) override { int r = RGWReadBucketPipeSyncStatusCoroutine::operate(dpp); if (state == RGWCoroutine_Done && status.state != rgw_bucket_shard_sync_info::StateIncrementalSync) { *result = false; } return r; } }; class CheckAllBucketShardStatusIsIncremental : public RGWShardCollectCR { // start with 1 shard, and only spawn more if we detect an existing shard. // this makes the backward compatilibility check far less expensive in the // general case where no shards exist static constexpr int initial_concurrent_shards = 1; static constexpr int max_concurrent_shards = 16; RGWDataSyncCtx* sc; rgw_bucket_sync_pair_info sync_pair; const int num_shards; bool* result; int shard = 0; public: CheckAllBucketShardStatusIsIncremental(RGWDataSyncCtx* sc, const rgw_bucket_sync_pair_info& sync_pair, int num_shards, bool* result) : RGWShardCollectCR(sc->cct, initial_concurrent_shards), sc(sc), sync_pair(sync_pair), num_shards(num_shards), result(result) {} bool spawn_next() override { // stop spawning if we saw any errors or non-incremental shards if (shard >= num_shards || status < 0 || !*result) { return false; } sync_pair.source_bs.shard_id = shard++; spawn(new CheckBucketShardStatusIsIncremental(sc, sync_pair, result), false); return true; } private: int handle_result(int r) override { if (r < 0) { ldout(cct, 4) << "failed to read bucket shard status: " << cpp_strerror(r) << dendl; } else if (shard == 0) { // enable concurrency once the first shard succeeds max_concurrent = max_concurrent_shards; } return r; } }; // wrap InitBucketShardSyncStatus with local storage for 'status' and 'objv' // and a loop to retry on racing writes class InitBucketShardStatusCR : public RGWCoroutine { RGWDataSyncCtx* sc; rgw_bucket_sync_pair_info pair; rgw_bucket_shard_sync_info status; RGWObjVersionTracker objv; const uint64_t gen; const BucketIndexShardsManager& marker_mgr; public: InitBucketShardStatusCR(RGWDataSyncCtx* sc, const rgw_bucket_sync_pair_info& pair, uint64_t gen, const BucketIndexShardsManager& marker_mgr) : RGWCoroutine(sc->cct), sc(sc), pair(pair), gen(gen), marker_mgr(marker_mgr) {} int operate(const DoutPrefixProvider *dpp) { reenter(this) { // non exclusive create with empty status objv.generate_new_write_ver(cct); yield call(new RGWInitBucketShardSyncStatusCoroutine(sc, pair, status, gen, marker_mgr, objv, false)); if (retcode < 0) { return set_cr_error(retcode); } return set_cr_done(); } return 0; } }; class InitBucketShardStatusCollectCR : public RGWShardCollectCR { static constexpr int max_concurrent_shards = 16; RGWDataSyncCtx* sc; rgw_bucket_sync_pair_info sync_pair; const uint64_t gen; const BucketIndexShardsManager& marker_mgr; const int num_shards; int shard = 0; int handle_result(int r) override { if (r < 0) { ldout(cct, 4) << "failed to init bucket shard status: " << cpp_strerror(r) << dendl; } return r; } public: InitBucketShardStatusCollectCR(RGWDataSyncCtx* sc, const rgw_bucket_sync_pair_info& sync_pair, uint64_t gen, const BucketIndexShardsManager& marker_mgr, int num_shards) : RGWShardCollectCR(sc->cct, max_concurrent_shards), sc(sc), sync_pair(sync_pair), gen(gen), marker_mgr(marker_mgr), num_shards(num_shards) {} bool spawn_next() override { if (shard >= num_shards || status < 0) { // stop spawning on any errors return false; } sync_pair.source_bs.shard_id = shard++; spawn(new InitBucketShardStatusCR(sc, sync_pair, gen, marker_mgr), false); return true; } }; class RemoveBucketShardStatusCR : public RGWCoroutine { RGWDataSyncCtx* const sc; RGWDataSyncEnv* const sync_env; rgw_bucket_sync_pair_info sync_pair; rgw_raw_obj obj; RGWObjVersionTracker objv; public: RemoveBucketShardStatusCR(RGWDataSyncCtx* sc, const rgw_bucket_sync_pair_info& sync_pair, uint64_t gen) : RGWCoroutine(sc->cct), sc(sc), sync_env(sc->env), sync_pair(sync_pair), obj(sync_env->svc->zone->get_zone_params().log_pool, RGWBucketPipeSyncStatusManager::inc_status_oid(sc->source_zone, sync_pair, gen)) {} int operate(const DoutPrefixProvider *dpp) override { reenter(this) { yield call(new RGWRadosRemoveCR(sync_env->driver, obj, &objv)); if (retcode < 0 && retcode != -ENOENT) { ldout(cct, 20) << "ERROR: failed to remove bucket shard status for: " << sync_pair << ". with error: " << retcode << dendl; return set_cr_error(retcode); } ldout(cct, 20) << "removed bucket shard status object: " << obj.oid << dendl; return set_cr_done(); } return 0; } }; class RemoveBucketShardStatusCollectCR : public RGWShardCollectCR { static constexpr int max_concurrent_shards = 16; RGWDataSyncCtx* const sc; RGWDataSyncEnv* const sync_env; rgw_bucket_sync_pair_info sync_pair; const uint64_t gen; const int num_shards; int shard = 0; int handle_result(int r) override { if (r < 0) { ldout(cct, 4) << "failed to remove bucket shard status object: " << cpp_strerror(r) << dendl; } return r; } public: RemoveBucketShardStatusCollectCR(RGWDataSyncCtx* sc, const rgw_bucket_sync_pair_info& sync_pair, uint64_t gen, int num_shards) : RGWShardCollectCR(sc->cct, max_concurrent_shards), sc(sc), sync_env(sc->env), sync_pair(sync_pair), gen(gen), num_shards(num_shards) {} bool spawn_next() override { if (shard >= num_shards) { return false; } sync_pair.source_bs.shard_id = shard++; spawn(new RemoveBucketShardStatusCR(sc, sync_pair, gen), false); return true; } }; class InitBucketFullSyncStatusCR : public RGWCoroutine { RGWDataSyncCtx *sc; RGWDataSyncEnv *sync_env; const rgw_bucket_sync_pair_info& sync_pair; const rgw_raw_obj& status_obj; rgw_bucket_sync_status& status; RGWObjVersionTracker& objv; const RGWBucketInfo& source_info; const bool check_compat; const rgw_bucket_index_marker_info& info; BucketIndexShardsManager marker_mgr; bool all_incremental = true; bool no_zero = false; public: InitBucketFullSyncStatusCR(RGWDataSyncCtx* sc, const rgw_bucket_sync_pair_info& sync_pair, const rgw_raw_obj& status_obj, rgw_bucket_sync_status& status, RGWObjVersionTracker& objv, const RGWBucketInfo& source_info, bool check_compat, const rgw_bucket_index_marker_info& info) : RGWCoroutine(sc->cct), sc(sc), sync_env(sc->env), sync_pair(sync_pair), status_obj(status_obj), status(status), objv(objv), source_info(source_info), check_compat(check_compat), info(info) {} int operate(const DoutPrefixProvider *dpp) override { reenter(this) { retcode = marker_mgr.from_string(info.max_marker, -1); if (retcode < 0) { lderr(cct) << "failed to parse bilog shard markers: " << cpp_strerror(retcode) << dendl; return set_cr_error(retcode); } status.state = BucketSyncState::Init; if (info.oldest_gen == 0) { if (check_compat) { // use shard count from our log gen=0 // try to convert existing per-shard incremental status for backward compatibility if (source_info.layout.logs.empty() || source_info.layout.logs.front().gen > 0) { ldpp_dout(dpp, 20) << "no generation zero when checking compatibility" << dendl; no_zero = true; } else if (auto& log = source_info.layout.logs.front(); log.layout.type != rgw::BucketLogType::InIndex) { ldpp_dout(dpp, 20) << "unrecognized log layout type when checking compatibility " << log.layout.type << dendl; no_zero = true; } if (!no_zero) { yield { const int num_shards0 = rgw::num_shards( source_info.layout.logs.front().layout.in_index.layout); call(new CheckAllBucketShardStatusIsIncremental(sc, sync_pair, num_shards0, &all_incremental)); } if (retcode < 0) { return set_cr_error(retcode); } if (all_incremental) { // we can use existing status and resume incremental sync status.state = BucketSyncState::Incremental; } } else { all_incremental = false; } } } if (status.state != BucketSyncState::Incremental) { // initialize all shard sync status. this will populate the log marker // positions where incremental sync will resume after full sync yield { const int num_shards = marker_mgr.get().size(); call(new InitBucketShardStatusCollectCR(sc, sync_pair, info.latest_gen, marker_mgr, num_shards)); } if (retcode < 0) { ldout(cct, 20) << "failed to init bucket shard status: " << cpp_strerror(retcode) << dendl; return set_cr_error(retcode); } if (sync_env->sync_module->should_full_sync()) { status.state = BucketSyncState::Full; } else { status.state = BucketSyncState::Incremental; } } status.shards_done_with_gen.resize(marker_mgr.get().size()); status.incremental_gen = info.latest_gen; ldout(cct, 20) << "writing bucket sync status during init. state=" << status.state << ". marker=" << status.full.position << dendl; // write bucket sync status using CR = RGWSimpleRadosWriteCR<rgw_bucket_sync_status>; yield call(new CR(dpp, sync_env->driver, status_obj, status, &objv, false)); if (retcode < 0) { ldout(cct, 20) << "failed to write bucket shard status: " << cpp_strerror(retcode) << dendl; return set_cr_error(retcode); } return set_cr_done(); } return 0; } }; #define OMAP_READ_MAX_ENTRIES 10 class RGWReadRecoveringBucketShardsCoroutine : public RGWCoroutine { RGWDataSyncCtx *sc; RGWDataSyncEnv *sync_env; rgw::sal::RadosStore* driver; const int shard_id; int max_entries; set<string>& recovering_buckets; string marker; string error_oid; RGWRadosGetOmapKeysCR::ResultPtr omapkeys; set<string> error_entries; int max_omap_entries; int count; public: RGWReadRecoveringBucketShardsCoroutine(RGWDataSyncCtx *_sc, const int _shard_id, set<string>& _recovering_buckets, const int _max_entries) : RGWCoroutine(_sc->cct), sc(_sc), sync_env(_sc->env), driver(sync_env->driver), shard_id(_shard_id), max_entries(_max_entries), recovering_buckets(_recovering_buckets), max_omap_entries(OMAP_READ_MAX_ENTRIES) { error_oid = RGWDataSyncStatusManager::shard_obj_name(sc->source_zone, shard_id) + ".retry"; } int operate(const DoutPrefixProvider *dpp) override; }; int RGWReadRecoveringBucketShardsCoroutine::operate(const DoutPrefixProvider *dpp) { reenter(this){ //read recovering bucket shards count = 0; do { omapkeys = std::make_shared<RGWRadosGetOmapKeysCR::Result>(); yield call(new RGWRadosGetOmapKeysCR(driver, rgw_raw_obj(sync_env->svc->zone->get_zone_params().log_pool, error_oid), marker, max_omap_entries, omapkeys)); if (retcode == -ENOENT) { break; } if (retcode < 0) { ldpp_dout(dpp, 0) << "failed to read recovering bucket shards with " << cpp_strerror(retcode) << dendl; return set_cr_error(retcode); } error_entries = std::move(omapkeys->entries); if (error_entries.empty()) { break; } count += error_entries.size(); marker = *error_entries.rbegin(); for (const std::string& key : error_entries) { rgw_bucket_shard bs; std::optional<uint64_t> gen; if (int r = rgw::error_repo::decode_key(key, bs, gen); r < 0) { // insert the key as-is recovering_buckets.insert(std::move(key)); } else if (gen) { recovering_buckets.insert(fmt::format("{}[{}]", bucket_shard_str{bs}, *gen)); } else { recovering_buckets.insert(fmt::format("{}[full]", bucket_shard_str{bs})); } } } while (omapkeys->more && count < max_entries); return set_cr_done(); } return 0; } class RGWReadPendingBucketShardsCoroutine : public RGWCoroutine { RGWDataSyncCtx *sc; RGWDataSyncEnv *sync_env; rgw::sal::RadosStore* driver; const int shard_id; int max_entries; set<string>& pending_buckets; string marker; string status_oid; rgw_data_sync_marker* sync_marker; int count; std::string next_marker; vector<rgw_data_change_log_entry> log_entries; bool truncated; public: RGWReadPendingBucketShardsCoroutine(RGWDataSyncCtx *_sc, const int _shard_id, set<string>& _pending_buckets, rgw_data_sync_marker* _sync_marker, const int _max_entries) : RGWCoroutine(_sc->cct), sc(_sc), sync_env(_sc->env), driver(sync_env->driver), shard_id(_shard_id), max_entries(_max_entries), pending_buckets(_pending_buckets), sync_marker(_sync_marker) { status_oid = RGWDataSyncStatusManager::shard_obj_name(sc->source_zone, shard_id); } int operate(const DoutPrefixProvider *dpp) override; }; int RGWReadPendingBucketShardsCoroutine::operate(const DoutPrefixProvider *dpp) { reenter(this){ //read sync status marker using CR = RGWSimpleRadosReadCR<rgw_data_sync_marker>; yield call(new CR(dpp, sync_env->driver, rgw_raw_obj(sync_env->svc->zone->get_zone_params().log_pool, status_oid), sync_marker)); if (retcode < 0) { ldpp_dout(dpp, 0) << "failed to read sync status marker with " << cpp_strerror(retcode) << dendl; return set_cr_error(retcode); } //read pending bucket shards marker = sync_marker->marker; count = 0; do{ yield call(new RGWReadRemoteDataLogShardCR(sc, shard_id, marker, &next_marker, &log_entries, &truncated)); if (retcode == -ENOENT) { break; } if (retcode < 0) { ldpp_dout(dpp, 0) << "failed to read remote data log info with " << cpp_strerror(retcode) << dendl; return set_cr_error(retcode); } if (log_entries.empty()) { break; } count += log_entries.size(); for (const auto& entry : log_entries) { pending_buckets.insert(entry.entry.key); } }while(truncated && count < max_entries); return set_cr_done(); } return 0; } int RGWRemoteDataLog::read_shard_status(const DoutPrefixProvider *dpp, int shard_id, set<string>& pending_buckets, set<string>& recovering_buckets, rgw_data_sync_marker *sync_marker, const int max_entries) { // cannot run concurrently with run_sync(), so run in a separate manager RGWCoroutinesManager crs(driver->ctx(), driver->getRados()->get_cr_registry()); RGWHTTPManager http_manager(driver->ctx(), crs.get_completion_mgr()); int ret = http_manager.start(); if (ret < 0) { ldpp_dout(dpp, 0) << "failed in http_manager.start() ret=" << ret << dendl; return ret; } RGWDataSyncEnv sync_env_local = sync_env; sync_env_local.http_manager = &http_manager; RGWDataSyncCtx sc_local = sc; sc_local.env = &sync_env_local; list<RGWCoroutinesStack *> stacks; RGWCoroutinesStack* recovering_stack = new RGWCoroutinesStack(driver->ctx(), &crs); recovering_stack->call(new RGWReadRecoveringBucketShardsCoroutine(&sc_local, shard_id, recovering_buckets, max_entries)); stacks.push_back(recovering_stack); RGWCoroutinesStack* pending_stack = new RGWCoroutinesStack(driver->ctx(), &crs); pending_stack->call(new RGWReadPendingBucketShardsCoroutine(&sc_local, shard_id, pending_buckets, sync_marker, max_entries)); stacks.push_back(pending_stack); ret = crs.run(dpp, stacks); http_manager.stop(); return ret; } CephContext *RGWBucketPipeSyncStatusManager::get_cct() const { return driver->ctx(); } void rgw_bucket_entry_owner::decode_json(JSONObj *obj) { JSONDecoder::decode_json("ID", id, obj); JSONDecoder::decode_json("DisplayName", display_name, obj); } struct bucket_list_entry { bool delete_marker; rgw_obj_key key; bool is_latest; real_time mtime; string etag; uint64_t size; string storage_class; rgw_bucket_entry_owner owner; uint64_t versioned_epoch; string rgw_tag; bucket_list_entry() : delete_marker(false), is_latest(false), size(0), versioned_epoch(0) {} void decode_json(JSONObj *obj) { JSONDecoder::decode_json("IsDeleteMarker", delete_marker, obj); JSONDecoder::decode_json("Key", key.name, obj); JSONDecoder::decode_json("VersionId", key.instance, obj); JSONDecoder::decode_json("IsLatest", is_latest, obj); string mtime_str; JSONDecoder::decode_json("RgwxMtime", mtime_str, obj); struct tm t; uint32_t nsec; if (parse_iso8601(mtime_str.c_str(), &t, &nsec)) { ceph_timespec ts; ts.tv_sec = (uint64_t)internal_timegm(&t); ts.tv_nsec = nsec; mtime = real_clock::from_ceph_timespec(ts); } JSONDecoder::decode_json("ETag", etag, obj); JSONDecoder::decode_json("Size", size, obj); JSONDecoder::decode_json("StorageClass", storage_class, obj); JSONDecoder::decode_json("Owner", owner, obj); JSONDecoder::decode_json("VersionedEpoch", versioned_epoch, obj); JSONDecoder::decode_json("RgwxTag", rgw_tag, obj); if (key.instance == "null" && !versioned_epoch) { key.instance.clear(); } } RGWModifyOp get_modify_op() const { if (delete_marker) { return CLS_RGW_OP_LINK_OLH_DM; } else if (!key.instance.empty() && key.instance != "null") { return CLS_RGW_OP_LINK_OLH; } else { return CLS_RGW_OP_ADD; } } }; struct bucket_list_result { string name; string prefix; string key_marker; string version_id_marker; int max_keys; bool is_truncated; list<bucket_list_entry> entries; bucket_list_result() : max_keys(0), is_truncated(false) {} void decode_json(JSONObj *obj) { JSONDecoder::decode_json("Name", name, obj); JSONDecoder::decode_json("Prefix", prefix, obj); JSONDecoder::decode_json("KeyMarker", key_marker, obj); JSONDecoder::decode_json("VersionIdMarker", version_id_marker, obj); JSONDecoder::decode_json("MaxKeys", max_keys, obj); JSONDecoder::decode_json("IsTruncated", is_truncated, obj); JSONDecoder::decode_json("Entries", entries, obj); } }; class RGWListRemoteBucketCR: public RGWCoroutine { RGWDataSyncCtx *sc; RGWDataSyncEnv *sync_env; const rgw_bucket_shard& bs; rgw_obj_key marker_position; bucket_list_result *result; public: RGWListRemoteBucketCR(RGWDataSyncCtx *_sc, const rgw_bucket_shard& bs, rgw_obj_key& _marker_position, bucket_list_result *_result) : RGWCoroutine(_sc->cct), sc(_sc), sync_env(_sc->env), bs(bs), marker_position(_marker_position), result(_result) {} int operate(const DoutPrefixProvider *dpp) override { reenter(this) { yield { rgw_http_param_pair pairs[] = { { "versions" , NULL }, { "format" , "json" }, { "objs-container" , "true" }, { "key-marker" , marker_position.name.c_str() }, { "version-id-marker" , marker_position.instance.c_str() }, { NULL, NULL } }; string p = string("/") + bs.bucket.get_key(':', 0); call(new RGWReadRESTResourceCR<bucket_list_result>(sync_env->cct, sc->conn, sync_env->http_manager, p, pairs, result)); } if (retcode < 0) { return set_cr_error(retcode); } return set_cr_done(); } return 0; } }; struct next_bilog_result { uint64_t generation = 0; int num_shards = 0; void decode_json(JSONObj *obj) { JSONDecoder::decode_json("generation", generation, obj); JSONDecoder::decode_json("num_shards", num_shards, obj); } }; struct bilog_list_result { list<rgw_bi_log_entry> entries; bool truncated{false}; std::optional<next_bilog_result> next_log; void decode_json(JSONObj *obj) { JSONDecoder::decode_json("entries", entries, obj); JSONDecoder::decode_json("truncated", truncated, obj); JSONDecoder::decode_json("next_log", next_log, obj); } }; class RGWListBucketIndexLogCR: public RGWCoroutine { RGWDataSyncCtx *sc; RGWDataSyncEnv *sync_env; const string instance_key; string marker; bilog_list_result *result; std::optional<PerfGuard> timer; uint64_t generation; std::string gen_str = std::to_string(generation); uint32_t format_ver{1}; public: RGWListBucketIndexLogCR(RGWDataSyncCtx *_sc, const rgw_bucket_shard& bs, string& _marker, uint64_t _generation, bilog_list_result *_result) : RGWCoroutine(_sc->cct), sc(_sc), sync_env(_sc->env), instance_key(bs.get_key()), marker(_marker), result(_result), generation(_generation) {} int operate(const DoutPrefixProvider *dpp) override { reenter(this) { if (sync_env->counters) { timer.emplace(sync_env->counters, sync_counters::l_poll); } yield { rgw_http_param_pair pairs[] = { { "bucket-instance", instance_key.c_str() }, { "format" , "json" }, { "marker" , marker.c_str() }, { "type", "bucket-index" }, { "generation", gen_str.c_str() }, { "format-ver", "2"}, { NULL, NULL } }; call(new RGWReadRESTResourceCR<bilog_list_result>(sync_env->cct, sc->conn, sync_env->http_manager, "/admin/log", pairs, result)); } timer.reset(); if (retcode < 0) { if (sync_env->counters) { sync_env->counters->inc(sync_counters::l_poll_err); } return set_cr_error(retcode); } return set_cr_done(); } return 0; } }; #define BUCKET_SYNC_UPDATE_MARKER_WINDOW 10 class RGWBucketFullSyncMarkerTrack : public RGWSyncShardMarkerTrack<rgw_obj_key, rgw_obj_key> { RGWDataSyncCtx *sc; RGWDataSyncEnv *sync_env; const rgw_raw_obj& status_obj; rgw_bucket_sync_status& sync_status; RGWSyncTraceNodeRef tn; RGWObjVersionTracker& objv_tracker; public: RGWBucketFullSyncMarkerTrack(RGWDataSyncCtx *_sc, const rgw_raw_obj& status_obj, rgw_bucket_sync_status& sync_status, RGWSyncTraceNodeRef tn, RGWObjVersionTracker& objv_tracker) : RGWSyncShardMarkerTrack(BUCKET_SYNC_UPDATE_MARKER_WINDOW), sc(_sc), sync_env(_sc->env), status_obj(status_obj), sync_status(sync_status), tn(std::move(tn)), objv_tracker(objv_tracker) {} RGWCoroutine *store_marker(const rgw_obj_key& new_marker, uint64_t index_pos, const real_time& timestamp) override { sync_status.full.position = new_marker; sync_status.full.count = index_pos; tn->log(20, SSTR("updating marker oid=" << status_obj.oid << " marker=" << new_marker)); return new RGWSimpleRadosWriteCR<rgw_bucket_sync_status>( sync_env->dpp, sync_env->driver, status_obj, sync_status, &objv_tracker); } RGWOrderCallCR *allocate_order_control_cr() override { return new RGWLastCallerWinsCR(sync_env->cct); } }; // write the incremental sync status and update 'stable_timestamp' on success class RGWWriteBucketShardIncSyncStatus : public RGWCoroutine { RGWDataSyncEnv *sync_env; rgw_raw_obj obj; rgw_bucket_shard_inc_sync_marker sync_marker; ceph::real_time* stable_timestamp; RGWObjVersionTracker& objv_tracker; std::map<std::string, bufferlist> attrs; public: RGWWriteBucketShardIncSyncStatus(RGWDataSyncEnv *sync_env, const rgw_raw_obj& obj, const rgw_bucket_shard_inc_sync_marker& sync_marker, ceph::real_time* stable_timestamp, RGWObjVersionTracker& objv_tracker) : RGWCoroutine(sync_env->cct), sync_env(sync_env), obj(obj), sync_marker(sync_marker), stable_timestamp(stable_timestamp), objv_tracker(objv_tracker) {} int operate(const DoutPrefixProvider *dpp) { reenter(this) { sync_marker.encode_attr(attrs); yield call(new RGWSimpleRadosWriteAttrsCR(sync_env->dpp, sync_env->driver, obj, attrs, &objv_tracker)); if (retcode < 0) { return set_cr_error(retcode); } if (stable_timestamp) { *stable_timestamp = sync_marker.timestamp; } return set_cr_done(); } return 0; } }; class RGWBucketIncSyncShardMarkerTrack : public RGWSyncShardMarkerTrack<string, rgw_obj_key> { RGWDataSyncCtx *sc; RGWDataSyncEnv *sync_env; rgw_raw_obj obj; rgw_bucket_shard_inc_sync_marker sync_marker; map<rgw_obj_key, string> key_to_marker; struct operation { rgw_obj_key key; bool is_olh; }; map<string, operation> marker_to_op; std::set<std::string> pending_olh; // object names with pending olh operations RGWSyncTraceNodeRef tn; RGWObjVersionTracker& objv_tracker; ceph::real_time* stable_timestamp; void handle_finish(const string& marker) override { auto iter = marker_to_op.find(marker); if (iter == marker_to_op.end()) { return; } auto& op = iter->second; key_to_marker.erase(op.key); reset_need_retry(op.key); if (op.is_olh) { pending_olh.erase(op.key.name); } marker_to_op.erase(iter); } public: RGWBucketIncSyncShardMarkerTrack(RGWDataSyncCtx *_sc, const string& _marker_oid, const rgw_bucket_shard_inc_sync_marker& _marker, RGWSyncTraceNodeRef tn, RGWObjVersionTracker& objv_tracker, ceph::real_time* stable_timestamp) : RGWSyncShardMarkerTrack(BUCKET_SYNC_UPDATE_MARKER_WINDOW), sc(_sc), sync_env(_sc->env), obj(sync_env->svc->zone->get_zone_params().log_pool, _marker_oid), sync_marker(_marker), tn(std::move(tn)), objv_tracker(objv_tracker), stable_timestamp(stable_timestamp) {} const rgw_raw_obj& get_obj() const { return obj; } RGWCoroutine* store_marker(const string& new_marker, uint64_t index_pos, const real_time& timestamp) override { sync_marker.position = new_marker; sync_marker.timestamp = timestamp; tn->log(20, SSTR("updating marker marker_oid=" << obj.oid << " marker=" << new_marker << " timestamp=" << timestamp)); return new RGWWriteBucketShardIncSyncStatus(sync_env, obj, sync_marker, stable_timestamp, objv_tracker); } /* * create index from key -> <op, marker>, and from marker -> key * this is useful so that we can insure that we only have one * entry for any key that is used. This is needed when doing * incremenatl sync of data, and we don't want to run multiple * concurrent sync operations for the same bucket shard * Also, we should make sure that we don't run concurrent operations on the same key with * different ops. */ bool index_key_to_marker(const rgw_obj_key& key, const string& marker, bool is_olh) { auto result = key_to_marker.emplace(key, marker); if (!result.second) { // exists set_need_retry(key); return false; } marker_to_op[marker] = operation{key, is_olh}; if (is_olh) { // prevent other olh ops from starting on this object name pending_olh.insert(key.name); } return true; } bool can_do_op(const rgw_obj_key& key, bool is_olh) { // serialize olh ops on the same object name if (is_olh && pending_olh.count(key.name)) { tn->log(20, SSTR("sync of " << key << " waiting for pending olh op")); return false; } return (key_to_marker.find(key) == key_to_marker.end()); } RGWOrderCallCR *allocate_order_control_cr() override { return new RGWLastCallerWinsCR(sync_env->cct); } }; static bool ignore_sync_error(int err) { switch (err) { case -ENOENT: case -EPERM: return true; default: break; } return false; } template <class T, class K> class RGWBucketSyncSingleEntryCR : public RGWCoroutine { RGWDataSyncCtx *sc; RGWDataSyncEnv *sync_env; rgw_bucket_sync_pipe& sync_pipe; rgw_bucket_shard& bs; rgw_obj_key key; bool versioned; std::optional<uint64_t> versioned_epoch; rgw_bucket_entry_owner owner; real_time timestamp; RGWModifyOp op; RGWPendingState op_state; T entry_marker; RGWSyncShardMarkerTrack<T, K> *marker_tracker; int sync_status; stringstream error_ss; bool error_injection; RGWDataSyncModule *data_sync_module; rgw_zone_set_entry source_trace_entry; rgw_zone_set zones_trace; RGWSyncTraceNodeRef tn; std::string zone_name; public: RGWBucketSyncSingleEntryCR(RGWDataSyncCtx *_sc, rgw_bucket_sync_pipe& _sync_pipe, const rgw_obj_key& _key, bool _versioned, std::optional<uint64_t> _versioned_epoch, real_time& _timestamp, const rgw_bucket_entry_owner& _owner, RGWModifyOp _op, RGWPendingState _op_state, const T& _entry_marker, RGWSyncShardMarkerTrack<T, K> *_marker_tracker, rgw_zone_set& _zones_trace, RGWSyncTraceNodeRef& _tn_parent) : RGWCoroutine(_sc->cct), sc(_sc), sync_env(_sc->env), sync_pipe(_sync_pipe), bs(_sync_pipe.info.source_bs), key(_key), versioned(_versioned), versioned_epoch(_versioned_epoch), owner(_owner), timestamp(_timestamp), op(_op), op_state(_op_state), entry_marker(_entry_marker), marker_tracker(_marker_tracker), sync_status(0){ stringstream ss; ss << bucket_shard_str{bs} << "/" << key << "[" << versioned_epoch.value_or(0) << "]"; set_description() << "bucket sync single entry (source_zone=" << sc->source_zone << ") b=" << ss.str() << " log_entry=" << entry_marker << " op=" << (int)op << " op_state=" << (int)op_state; set_status("init"); tn = sync_env->sync_tracer->add_node(_tn_parent, "entry", SSTR(key)); tn->log(20, SSTR("bucket sync single entry (source_zone=" << sc->source_zone << ") b=" << ss.str() << " log_entry=" << entry_marker << " op=" << (int)op << " op_state=" << (int)op_state)); error_injection = (sync_env->cct->_conf->rgw_sync_data_inject_err_probability > 0); data_sync_module = sync_env->sync_module->get_data_handler(); source_trace_entry.zone = sc->source_zone.id; source_trace_entry.location_key = _sync_pipe.info.source_bs.bucket.get_key(); zones_trace = _zones_trace; zones_trace.insert(sync_env->svc->zone->get_zone().id, _sync_pipe.info.dest_bucket.get_key()); if (sc->env->ostr) { RGWZone* z; if ((z = sc->env->driver->svc()->zone->find_zone(sc->source_zone))) { zone_name = z->name; } } } int operate(const DoutPrefixProvider *dpp) override { reenter(this) { /* skip entries that are not complete */ if (op_state != CLS_RGW_STATE_COMPLETE) { goto done; } tn->set_flag(RGW_SNS_FLAG_ACTIVE); do { yield { marker_tracker->reset_need_retry(key); if (key.name.empty()) { /* shouldn't happen */ set_status("skipping empty entry"); tn->log(0, "entry with empty obj name, skipping"); goto done; } if (error_injection && rand() % 10000 < cct->_conf->rgw_sync_data_inject_err_probability * 10000.0) { tn->log(0, SSTR(": injecting data sync error on key=" << key.name)); retcode = -EIO; } else if (op == CLS_RGW_OP_ADD || op == CLS_RGW_OP_LINK_OLH) { set_status("syncing obj"); tn->log(5, SSTR("bucket sync: sync obj: " << sc->source_zone << "/" << bs.bucket << "/" << key << "[" << versioned_epoch.value_or(0) << "]")); if (versioned_epoch) { pretty_print(sc->env, "Syncing object s3://{}/{} version {} in sync from zone {}\n", bs.bucket.name, key, *versioned_epoch, zone_name); } else { pretty_print(sc->env, "Syncing object s3://{}/{} in sync from zone {}\n", bs.bucket.name, key, zone_name); } call(data_sync_module->sync_object(dpp, sc, sync_pipe, key, versioned_epoch, source_trace_entry, &zones_trace)); } else if (op == CLS_RGW_OP_DEL || op == CLS_RGW_OP_UNLINK_INSTANCE) { set_status("removing obj"); if (versioned_epoch) { pretty_print(sc->env, "Deleting object s3://{}/{} version {} in sync from zone {}\n", bs.bucket.name, key, *versioned_epoch, zone_name); } else { pretty_print(sc->env, "Deleting object s3://{}/{} in sync from zone {}\n", bs.bucket.name, key, zone_name); } if (op == CLS_RGW_OP_UNLINK_INSTANCE) { versioned = true; } tn->log(10, SSTR("removing obj: " << sc->source_zone << "/" << bs.bucket << "/" << key << "[" << versioned_epoch.value_or(0) << "]")); call(data_sync_module->remove_object(dpp, sc, sync_pipe, key, timestamp, versioned, versioned_epoch.value_or(0), &zones_trace)); // our copy of the object is more recent, continue as if it succeeded } else if (op == CLS_RGW_OP_LINK_OLH_DM) { set_status("creating delete marker"); tn->log(10, SSTR("creating delete marker: obj: " << sc->source_zone << "/" << bs.bucket << "/" << key << "[" << versioned_epoch.value_or(0) << "]")); call(data_sync_module->create_delete_marker(dpp, sc, sync_pipe, key, timestamp, owner, versioned, versioned_epoch.value_or(0), &zones_trace)); } tn->set_resource_name(SSTR(bucket_str_noinstance(bs.bucket) << "/" << key)); } if (retcode == -ERR_PRECONDITION_FAILED) { pretty_print(sc->env, "Skipping object s3://{}/{} in sync from zone {}\n", bs.bucket.name, key, zone_name); set_status("Skipping object sync: precondition failed (object contains newer change or policy doesn't allow sync)"); tn->log(0, "Skipping object sync: precondition failed (object contains newer change or policy doesn't allow sync)"); retcode = 0; } } while (marker_tracker->need_retry(key)); { tn->unset_flag(RGW_SNS_FLAG_ACTIVE); if (retcode >= 0) { tn->log(10, "success"); } else { tn->log(10, SSTR("failed, retcode=" << retcode << " (" << cpp_strerror(-retcode) << ")")); } } if (retcode < 0 && retcode != -ENOENT) { set_status() << "failed to sync obj; retcode=" << retcode; tn->log(0, SSTR("ERROR: failed to sync object: " << bucket_shard_str{bs} << "/" << key.name)); if (!ignore_sync_error(retcode)) { error_ss << bucket_shard_str{bs} << "/" << key.name; sync_status = retcode; } } if (!error_ss.str().empty()) { yield call(sync_env->error_logger->log_error_cr(dpp, sc->conn->get_remote_id(), "data", error_ss.str(), -retcode, string("failed to sync object") + cpp_strerror(-sync_status))); } done: if (sync_status == 0) { /* update marker */ set_status() << "calling marker_tracker->finish(" << entry_marker << ")"; yield call(marker_tracker->finish(entry_marker)); sync_status = retcode; } if (sync_status < 0) { return set_cr_error(sync_status); } return set_cr_done(); } return 0; } }; class RGWBucketFullSyncCR : public RGWCoroutine { RGWDataSyncCtx *sc; RGWDataSyncEnv *sync_env; rgw_bucket_sync_pipe& sync_pipe; rgw_bucket_sync_status& sync_status; rgw_bucket_shard& bs; boost::intrusive_ptr<const RGWContinuousLeaseCR> lease_cr; bucket_list_result list_result; list<bucket_list_entry>::iterator entries_iter; rgw_obj_key list_marker; bucket_list_entry *entry{nullptr}; int total_entries{0}; int sync_result{0}; const rgw_raw_obj& status_obj; RGWObjVersionTracker& objv; rgw_zone_set zones_trace; RGWSyncTraceNodeRef tn; RGWBucketFullSyncMarkerTrack marker_tracker; struct _prefix_handler { RGWBucketSyncFlowManager::pipe_rules_ref rules; RGWBucketSyncFlowManager::pipe_rules::prefix_map_t::const_iterator iter; std::optional<string> cur_prefix; void set_rules(RGWBucketSyncFlowManager::pipe_rules_ref& _rules) { rules = _rules; } bool revalidate_marker(rgw_obj_key *marker) { if (cur_prefix && boost::starts_with(marker->name, *cur_prefix)) { return true; } if (!rules) { return false; } iter = rules->prefix_search(marker->name); if (iter == rules->prefix_end()) { return false; } cur_prefix = iter->first; marker->name = *cur_prefix; marker->instance.clear(); return true; } bool check_key_handled(const rgw_obj_key& key) { if (!rules) { return false; } if (cur_prefix && boost::starts_with(key.name, *cur_prefix)) { return true; } iter = rules->prefix_search(key.name); if (iter == rules->prefix_end()) { return false; } cur_prefix = iter->first; return boost::starts_with(key.name, iter->first); } } prefix_handler; public: RGWBucketFullSyncCR(RGWDataSyncCtx *_sc, rgw_bucket_sync_pipe& _sync_pipe, const rgw_raw_obj& status_obj, boost::intrusive_ptr<const RGWContinuousLeaseCR> lease_cr, rgw_bucket_sync_status& sync_status, RGWSyncTraceNodeRef tn_parent, RGWObjVersionTracker& objv_tracker) : RGWCoroutine(_sc->cct), sc(_sc), sync_env(_sc->env), sync_pipe(_sync_pipe), sync_status(sync_status), bs(_sync_pipe.info.source_bs), lease_cr(std::move(lease_cr)), status_obj(status_obj), objv(objv_tracker), tn(sync_env->sync_tracer->add_node(tn_parent, "full_sync", SSTR(bucket_shard_str{bs}))), marker_tracker(sc, status_obj, sync_status, tn, objv_tracker) { zones_trace.insert(sc->source_zone.id, sync_pipe.info.dest_bucket.get_key()); prefix_handler.set_rules(sync_pipe.get_rules()); } int operate(const DoutPrefixProvider *dpp) override; }; int RGWBucketFullSyncCR::operate(const DoutPrefixProvider *dpp) { reenter(this) { list_marker = sync_status.full.position; total_entries = sync_status.full.count; do { if (lease_cr && !lease_cr->is_locked()) { tn->log(1, "no lease or lease is lost, abort"); drain_all(); yield call(marker_tracker.flush()); if (retcode < 0) { tn->log(0, SSTR("ERROR: bucket full sync marker_tracker.flush() returned retcode=" << retcode)); return set_cr_error(retcode); } return set_cr_error(-ECANCELED); } set_status("listing remote bucket"); tn->log(20, "listing bucket for full sync"); if (!prefix_handler.revalidate_marker(&list_marker)) { set_status() << "finished iterating over all available prefixes: last marker=" << list_marker; tn->log(20, SSTR("finished iterating over all available prefixes: last marker=" << list_marker)); break; } yield call(new RGWListRemoteBucketCR(sc, bs, list_marker, &list_result)); if (retcode < 0 && retcode != -ENOENT) { set_status("failed bucket listing, going down"); drain_all(); yield spawn(marker_tracker.flush(), true); return set_cr_error(retcode); } if (list_result.entries.size() > 0) { tn->set_flag(RGW_SNS_FLAG_ACTIVE); /* actually have entries to sync */ } entries_iter = list_result.entries.begin(); for (; entries_iter != list_result.entries.end(); ++entries_iter) { if (lease_cr && !lease_cr->is_locked()) { drain_all(); yield call(marker_tracker.flush()); tn->log(1, "no lease or lease is lost, abort"); if (retcode < 0) { tn->log(0, SSTR("ERROR: bucket full sync marker_tracker.flush() returned retcode=" << retcode)); return set_cr_error(retcode); } return set_cr_error(-ECANCELED); } tn->log(20, SSTR("[full sync] syncing object: " << bucket_shard_str{bs} << "/" << entries_iter->key)); entry = &(*entries_iter); list_marker = entries_iter->key; if (!prefix_handler.check_key_handled(entries_iter->key)) { set_status() << "skipping entry due to policy rules: " << entries_iter->key; tn->log(20, SSTR("skipping entry due to policy rules: " << entries_iter->key)); continue; } total_entries++; if (!marker_tracker.start(entry->key, total_entries, real_time())) { tn->log(0, SSTR("ERROR: cannot start syncing " << entry->key << ". Duplicate entry?")); } else { using SyncCR = RGWBucketSyncSingleEntryCR<rgw_obj_key, rgw_obj_key>; yield spawn(new SyncCR(sc, sync_pipe, entry->key, false, /* versioned, only matters for object removal */ entry->versioned_epoch, entry->mtime, entry->owner, entry->get_modify_op(), CLS_RGW_STATE_COMPLETE, entry->key, &marker_tracker, zones_trace, tn), false); } drain_with_cb(sc->lcc.adj_concurrency(cct->_conf->rgw_bucket_sync_spawn_window), [&](uint64_t stack_id, int ret) { if (ret < 0) { tn->log(10, "a sync operation returned error"); sync_result = ret; } return 0; }); } } while (list_result.is_truncated && sync_result == 0); set_status("done iterating over all objects"); /* wait for all operations to complete */ drain_all_cb([&](uint64_t stack_id, int ret) { if (ret < 0) { tn->log(10, "a sync operation returned error"); sync_result = ret; } return 0; }); tn->unset_flag(RGW_SNS_FLAG_ACTIVE); if (lease_cr && !lease_cr->is_locked()) { tn->log(1, "no lease or lease is lost, abort"); yield call(marker_tracker.flush()); if (retcode < 0) { tn->log(0, SSTR("ERROR: bucket full sync marker_tracker.flush() returned retcode=" << retcode)); return set_cr_error(retcode); } return set_cr_error(-ECANCELED); } yield call(marker_tracker.flush()); if (retcode < 0) { tn->log(0, SSTR("ERROR: bucket full sync marker_tracker.flush() returned retcode=" << retcode)); return set_cr_error(retcode); } /* update sync state to incremental */ if (sync_result == 0) { sync_status.state = BucketSyncState::Incremental; tn->log(5, SSTR("set bucket state=" << sync_status.state)); yield call(new RGWSimpleRadosWriteCR<rgw_bucket_sync_status>( dpp, sync_env->driver, status_obj, sync_status, &objv)); tn->log(5, SSTR("bucket status objv=" << objv)); } else { tn->log(10, SSTR("backing out with sync_status=" << sync_result)); } if (retcode < 0 && sync_result == 0) { /* actually tried to set incremental state and failed */ tn->log(0, SSTR("ERROR: failed to set sync state on bucket " << bucket_shard_str{bs} << " retcode=" << retcode)); return set_cr_error(retcode); } if (sync_result < 0) { return set_cr_error(sync_result); } return set_cr_done(); } return 0; } static bool has_olh_epoch(RGWModifyOp op) { return op == CLS_RGW_OP_LINK_OLH || op == CLS_RGW_OP_UNLINK_INSTANCE; } class RGWBucketShardIsDoneCR : public RGWCoroutine { RGWDataSyncCtx *sc; RGWDataSyncEnv *sync_env; rgw_bucket_sync_status bucket_status; const rgw_raw_obj& bucket_status_obj; const int shard_id; RGWObjVersionTracker objv_tracker; const next_bilog_result& next_log; const uint64_t generation; public: RGWBucketShardIsDoneCR(RGWDataSyncCtx *_sc, const rgw_raw_obj& _bucket_status_obj, int _shard_id, const next_bilog_result& _next_log, const uint64_t _gen) : RGWCoroutine(_sc->cct), sc(_sc), sync_env(_sc->env), bucket_status_obj(_bucket_status_obj), shard_id(_shard_id), next_log(_next_log), generation(_gen) {} int operate(const DoutPrefixProvider* dpp) override { reenter(this) { do { // read bucket sync status objv_tracker.clear(); using ReadCR = RGWSimpleRadosReadCR<rgw_bucket_sync_status>; yield call(new ReadCR(dpp, sync_env->driver, bucket_status_obj, &bucket_status, false, &objv_tracker)); if (retcode < 0) { ldpp_dout(dpp, 20) << "failed to read bucket shard status: " << cpp_strerror(retcode) << dendl; return set_cr_error(retcode); } if (bucket_status.state != BucketSyncState::Incremental) { // exit with success to avoid stale shard being // retried in error repo if we lost a race ldpp_dout(dpp, 20) << "RGWBucketShardIsDoneCR found sync state = " << bucket_status.state << dendl; return set_cr_done(); } if (bucket_status.incremental_gen != generation) { // exit with success to avoid stale shard being // retried in error repo if we lost a race ldpp_dout(dpp, 20) << "RGWBucketShardIsDoneCR expected gen: " << generation << ", got: " << bucket_status.incremental_gen << dendl; return set_cr_done(); } yield { // update bucket_status after a shard is done with current gen auto& done = bucket_status.shards_done_with_gen; done[shard_id] = true; // increment gen if all shards are already done with current gen if (std::all_of(done.begin(), done.end(), [] (const bool done){return done; } )) { bucket_status.incremental_gen = next_log.generation; done.clear(); done.resize(next_log.num_shards, false); } ldpp_dout(dpp, 20) << "bucket status incremental gen is " << bucket_status.incremental_gen << dendl; using WriteCR = RGWSimpleRadosWriteCR<rgw_bucket_sync_status>; call(new WriteCR(dpp, sync_env->driver, bucket_status_obj, bucket_status, &objv_tracker, false)); } if (retcode < 0 && retcode != -ECANCELED) { ldpp_dout(dpp, 20) << "failed to write bucket sync status: " << cpp_strerror(retcode) << dendl; return set_cr_error(retcode); } else if (retcode >= 0) { return set_cr_done(); } } while (retcode == -ECANCELED); } return 0; } }; class RGWBucketShardIncrementalSyncCR : public RGWCoroutine { RGWDataSyncCtx *sc; RGWDataSyncEnv *sync_env; rgw_bucket_sync_pipe& sync_pipe; RGWBucketSyncFlowManager::pipe_rules_ref rules; rgw_bucket_shard& bs; const rgw_raw_obj& bucket_status_obj; boost::intrusive_ptr<const RGWContinuousLeaseCR> lease_cr; bilog_list_result extended_result; list<rgw_bi_log_entry> list_result; int next_num_shards; uint64_t next_gen; bool truncated; list<rgw_bi_log_entry>::iterator entries_iter, entries_end; map<pair<string, string>, pair<real_time, RGWModifyOp> > squash_map; rgw_bucket_shard_sync_info& sync_info; uint64_t generation; rgw_obj_key key; rgw_bi_log_entry *entry{nullptr}; bool updated_status{false}; rgw_zone_id zone_id; string target_location_key; string cur_id; int sync_status{0}; bool syncstopped{false}; RGWSyncTraceNodeRef tn; RGWBucketIncSyncShardMarkerTrack marker_tracker; public: RGWBucketShardIncrementalSyncCR(RGWDataSyncCtx *_sc, rgw_bucket_sync_pipe& _sync_pipe, const std::string& shard_status_oid, const rgw_raw_obj& _bucket_status_obj, boost::intrusive_ptr<const RGWContinuousLeaseCR> lease_cr, rgw_bucket_shard_sync_info& sync_info, uint64_t generation, RGWSyncTraceNodeRef& _tn_parent, RGWObjVersionTracker& objv_tracker, ceph::real_time* stable_timestamp) : RGWCoroutine(_sc->cct), sc(_sc), sync_env(_sc->env), sync_pipe(_sync_pipe), bs(_sync_pipe.info.source_bs), bucket_status_obj(_bucket_status_obj), lease_cr(std::move(lease_cr)), sync_info(sync_info), generation(generation), zone_id(sync_env->svc->zone->get_zone().id), tn(sync_env->sync_tracer->add_node(_tn_parent, "inc_sync", SSTR(bucket_shard_str{bs}))), marker_tracker(sc, shard_status_oid, sync_info.inc_marker, tn, objv_tracker, stable_timestamp) { set_description() << "bucket shard incremental sync bucket=" << bucket_shard_str{bs}; set_status("init"); rules = sync_pipe.get_rules(); target_location_key = sync_pipe.info.dest_bucket.get_key(); } bool check_key_handled(const rgw_obj_key& key) { if (!rules) { return false; } auto iter = rules->prefix_search(key.name); if (iter == rules->prefix_end()) { return false; } return boost::starts_with(key.name, iter->first); } int operate(const DoutPrefixProvider *dpp) override; }; int RGWBucketShardIncrementalSyncCR::operate(const DoutPrefixProvider *dpp) { int ret; reenter(this) { do { if (lease_cr && !lease_cr->is_locked()) { tn->log(1, "no lease or lease is lost, abort"); drain_all(); yield call(marker_tracker.flush()); if (retcode < 0) { tn->log(0, SSTR("ERROR: incremental sync marker_tracker.flush() returned retcode=" << retcode)); return set_cr_error(retcode); } return set_cr_error(-ECANCELED); } tn->log(20, SSTR("listing bilog for incremental sync; position=" << sync_info.inc_marker.position)); set_status() << "listing bilog; position=" << sync_info.inc_marker.position; yield call(new RGWListBucketIndexLogCR(sc, bs, sync_info.inc_marker.position, generation, &extended_result)); if (retcode < 0 && retcode != -ENOENT) { /* wait for all operations to complete */ drain_all(); yield spawn(marker_tracker.flush(), true); return set_cr_error(retcode); } list_result = std::move(extended_result.entries); truncated = extended_result.truncated; if (extended_result.next_log) { next_gen = extended_result.next_log->generation; next_num_shards = extended_result.next_log->num_shards; } squash_map.clear(); entries_iter = list_result.begin(); entries_end = list_result.end(); for (; entries_iter != entries_end; ++entries_iter) { auto e = *entries_iter; if (e.op == RGWModifyOp::CLS_RGW_OP_SYNCSTOP) { ldpp_dout(dpp, 20) << "syncstop at: " << e.timestamp << ". marker: " << e.id << dendl; syncstopped = true; entries_end = std::next(entries_iter); // stop after this entry break; } if (e.op == RGWModifyOp::CLS_RGW_OP_RESYNC) { ldpp_dout(dpp, 20) << "syncstart at: " << e.timestamp << ". marker: " << e.id << dendl; continue; } if (e.op == CLS_RGW_OP_CANCEL) { continue; } if (e.state != CLS_RGW_STATE_COMPLETE) { continue; } if (e.zones_trace.exists(zone_id.id, target_location_key)) { continue; } auto& squash_entry = squash_map[make_pair(e.object, e.instance)]; // don't squash over olh entries - we need to apply their olh_epoch if (has_olh_epoch(squash_entry.second) && !has_olh_epoch(e.op)) { continue; } if (squash_entry.first <= e.timestamp) { squash_entry = make_pair<>(e.timestamp, e.op); } } entries_iter = list_result.begin(); for (; entries_iter != entries_end; ++entries_iter) { if (lease_cr && !lease_cr->is_locked()) { tn->log(1, "no lease or lease is lost, abort"); drain_all(); yield call(marker_tracker.flush()); if (retcode < 0) { tn->log(0, SSTR("ERROR: incremental sync marker_tracker.flush() returned retcode=" << retcode)); return set_cr_error(retcode); } return set_cr_error(-ECANCELED); } entry = &(*entries_iter); { ssize_t p = entry->id.find('#'); /* entries might have explicit shard info in them, e.g., 6#00000000004.94.3 */ if (p < 0) { cur_id = entry->id; } else { cur_id = entry->id.substr(p + 1); } } sync_info.inc_marker.position = cur_id; if (entry->op == RGWModifyOp::CLS_RGW_OP_SYNCSTOP || entry->op == RGWModifyOp::CLS_RGW_OP_RESYNC) { ldpp_dout(dpp, 20) << "detected syncstop or resync on " << entries_iter->timestamp << ", skipping entry" << dendl; marker_tracker.try_update_high_marker(cur_id, 0, entry->timestamp); continue; } if (!key.set(rgw_obj_index_key{entry->object, entry->instance})) { set_status() << "parse_raw_oid() on " << entry->object << " returned false, skipping entry"; tn->log(20, SSTR("parse_raw_oid() on " << entry->object << " returned false, skipping entry")); marker_tracker.try_update_high_marker(cur_id, 0, entry->timestamp); continue; } tn->log(20, SSTR("parsed entry: id=" << cur_id << " iter->object=" << entry->object << " iter->instance=" << entry->instance << " name=" << key.name << " instance=" << key.instance << " ns=" << key.ns)); if (!key.ns.empty()) { set_status() << "skipping entry in namespace: " << entry->object; tn->log(20, SSTR("skipping entry in namespace: " << entry->object)); marker_tracker.try_update_high_marker(cur_id, 0, entry->timestamp); continue; } if (!check_key_handled(key)) { set_status() << "skipping entry due to policy rules: " << entry->object; tn->log(20, SSTR("skipping entry due to policy rules: " << entry->object)); marker_tracker.try_update_high_marker(cur_id, 0, entry->timestamp); continue; } set_status() << "got entry.id=" << cur_id << " key=" << key << " op=" << (int)entry->op; if (entry->op == CLS_RGW_OP_CANCEL) { set_status() << "canceled operation, skipping"; tn->log(20, SSTR("skipping object: " << bucket_shard_str{bs} << "/" << key << ": canceled operation")); marker_tracker.try_update_high_marker(cur_id, 0, entry->timestamp); continue; } if (entry->state != CLS_RGW_STATE_COMPLETE) { set_status() << "non-complete operation, skipping"; tn->log(20, SSTR("skipping object: " << bucket_shard_str{bs} << "/" << key << ": non-complete operation")); marker_tracker.try_update_high_marker(cur_id, 0, entry->timestamp); continue; } if (entry->zones_trace.exists(zone_id.id, target_location_key)) { set_status() << "redundant operation, skipping"; tn->log(20, SSTR("skipping object: " <<bucket_shard_str{bs} <<"/"<<key<<": redundant operation")); marker_tracker.try_update_high_marker(cur_id, 0, entry->timestamp); continue; } if (make_pair<>(entry->timestamp, entry->op) != squash_map[make_pair(entry->object, entry->instance)]) { set_status() << "squashed operation, skipping"; tn->log(20, SSTR("skipping object: " << bucket_shard_str{bs} << "/" << key << ": squashed operation")); marker_tracker.try_update_high_marker(cur_id, 0, entry->timestamp); continue; } tn->set_flag(RGW_SNS_FLAG_ACTIVE); tn->log(20, SSTR("syncing object: " << bucket_shard_str{bs} << "/" << key)); updated_status = false; while (!marker_tracker.can_do_op(key, has_olh_epoch(entry->op))) { if (!updated_status) { set_status() << "can't do op, conflicting inflight operation"; updated_status = true; } tn->log(5, SSTR("can't do op on key=" << key << " need to wait for conflicting operation to complete")); yield wait_for_child(); bool again = true; while (again) { again = collect(&ret, nullptr); if (ret < 0) { tn->log(0, SSTR("ERROR: a child operation returned error (ret=" << ret << ")")); sync_status = ret; /* we have reported this error */ } } if (sync_status != 0) break; } if (sync_status != 0) { /* get error, stop */ break; } if (!marker_tracker.index_key_to_marker(key, cur_id, has_olh_epoch(entry->op))) { set_status() << "can't do op, sync already in progress for object"; tn->log(20, SSTR("skipping sync of entry: " << cur_id << ":" << key << " sync already in progress for object")); marker_tracker.try_update_high_marker(cur_id, 0, entry->timestamp); continue; } // yield { set_status() << "start object sync"; if (!marker_tracker.start(cur_id, 0, entry->timestamp)) { tn->log(0, SSTR("ERROR: cannot start syncing " << cur_id << ". Duplicate entry?")); } else { std::optional<uint64_t> versioned_epoch; rgw_bucket_entry_owner owner(entry->owner, entry->owner_display_name); if (entry->ver.pool < 0) { versioned_epoch = entry->ver.epoch; } tn->log(20, SSTR("entry->timestamp=" << entry->timestamp)); using SyncCR = RGWBucketSyncSingleEntryCR<string, rgw_obj_key>; spawn(new SyncCR(sc, sync_pipe, key, entry->is_versioned(), versioned_epoch, entry->timestamp, owner, entry->op, entry->state, cur_id, &marker_tracker, entry->zones_trace, tn), false); } // } drain_with_cb(sc->lcc.adj_concurrency(cct->_conf->rgw_bucket_sync_spawn_window), [&](uint64_t stack_id, int ret) { if (ret < 0) { tn->log(10, "a sync operation returned error"); sync_status = ret; } return 0; }); } } while (!list_result.empty() && sync_status == 0 && !syncstopped); drain_all_cb([&](uint64_t stack_id, int ret) { if (ret < 0) { tn->log(10, "a sync operation returned error"); sync_status = ret; } return 0; }); tn->unset_flag(RGW_SNS_FLAG_ACTIVE); if (syncstopped) { // transition to StateStopped in RGWSyncBucketShardCR. if sync is // still disabled, we'll delete the sync status object. otherwise we'll // restart full sync to catch any changes that happened while sync was // disabled sync_info.state = rgw_bucket_shard_sync_info::StateStopped; return set_cr_done(); } yield call(marker_tracker.flush()); if (retcode < 0) { tn->log(0, SSTR("ERROR: incremental sync marker_tracker.flush() returned retcode=" << retcode)); return set_cr_error(retcode); } if (sync_status < 0) { tn->log(10, SSTR("backing out with sync_status=" << sync_status)); return set_cr_error(sync_status); } if (!truncated && extended_result.next_log) { yield call(new RGWBucketShardIsDoneCR(sc, bucket_status_obj, bs.shard_id, *extended_result.next_log, generation)); if (retcode < 0) { ldout(cct, 20) << "failed to update bucket sync status: " << cpp_strerror(retcode) << dendl; return set_cr_error(retcode); } yield { // delete the shard status object auto status_obj = sync_env->svc->rados->obj(marker_tracker.get_obj()); retcode = status_obj.open(dpp); if (retcode < 0) { return set_cr_error(retcode); } call(new RGWRadosRemoveOidCR(sync_env->driver, std::move(status_obj))); if (retcode < 0) { ldpp_dout(dpp, 20) << "failed to remove shard status object: " << cpp_strerror(retcode) << dendl; return set_cr_error(retcode); } } } return set_cr_done(); } return 0; } class RGWGetBucketPeersCR : public RGWCoroutine { RGWDataSyncEnv *sync_env; std::optional<rgw_bucket> target_bucket; std::optional<rgw_zone_id> source_zone; std::optional<rgw_bucket> source_bucket; rgw_sync_pipe_info_set *pipes; map<rgw_bucket, all_bucket_info> buckets_info; map<rgw_bucket, all_bucket_info>::iterator siiter; std::optional<all_bucket_info> target_bucket_info; std::optional<all_bucket_info> source_bucket_info; rgw_sync_pipe_info_set::iterator siter; std::shared_ptr<rgw_bucket_get_sync_policy_result> source_policy; std::shared_ptr<rgw_bucket_get_sync_policy_result> target_policy; RGWSyncTraceNodeRef tn; using pipe_const_iter = map<rgw_zone_id, RGWBucketSyncFlowManager::pipe_set>::const_iterator; static pair<pipe_const_iter, pipe_const_iter> get_pipe_iters(const map<rgw_zone_id, RGWBucketSyncFlowManager::pipe_set>& m, std::optional<rgw_zone_id> zone) { if (!zone) { return { m.begin(), m.end() }; } auto b = m.find(*zone); if (b == m.end()) { return { b, b }; } return { b, std::next(b) }; } void filter_sources(std::optional<rgw_zone_id> source_zone, std::optional<rgw_bucket> source_bucket, const map<rgw_zone_id, RGWBucketSyncFlowManager::pipe_set>& all_sources, rgw_sync_pipe_info_set *result) { ldpp_dout(sync_env->dpp, 20) << __func__ << ": source_zone=" << source_zone.value_or(rgw_zone_id("*")).id << " source_bucket=" << source_bucket.value_or(rgw_bucket()) << " all_sources.size()=" << all_sources.size() << dendl; auto iters = get_pipe_iters(all_sources, source_zone); for (auto i = iters.first; i != iters.second; ++i) { for (auto& handler : i->second) { if (!handler.specific()) { ldpp_dout(sync_env->dpp, 20) << __func__ << ": pipe_handler=" << handler << ": skipping" << dendl; continue; } if (source_bucket && !source_bucket->match(*handler.source.bucket)) { continue; } ldpp_dout(sync_env->dpp, 20) << __func__ << ": pipe_handler=" << handler << ": adding" << dendl; result->insert(handler, source_bucket_info, target_bucket_info); } } } void filter_targets(std::optional<rgw_zone_id> target_zone, std::optional<rgw_bucket> target_bucket, const map<rgw_zone_id, RGWBucketSyncFlowManager::pipe_set>& all_targets, rgw_sync_pipe_info_set *result) { ldpp_dout(sync_env->dpp, 20) << __func__ << ": target_zone=" << source_zone.value_or(rgw_zone_id("*")).id << " target_bucket=" << source_bucket.value_or(rgw_bucket()) << " all_targets.size()=" << all_targets.size() << dendl; auto iters = get_pipe_iters(all_targets, target_zone); for (auto i = iters.first; i != iters.second; ++i) { for (auto& handler : i->second) { if (target_bucket && handler.dest.bucket && !target_bucket->match(*handler.dest.bucket)) { ldpp_dout(sync_env->dpp, 20) << __func__ << ": pipe_handler=" << handler << ": skipping" << dendl; continue; } ldpp_dout(sync_env->dpp, 20) << __func__ << ": pipe_handler=" << handler << ": adding" << dendl; result->insert(handler, source_bucket_info, target_bucket_info); } } } void update_from_target_bucket_policy(); void update_from_source_bucket_policy(); struct GetHintTargets : public RGWGenericAsyncCR::Action { RGWDataSyncEnv *sync_env; rgw_bucket source_bucket; std::set<rgw_bucket> targets; GetHintTargets(RGWDataSyncEnv *_sync_env, const rgw_bucket& _source_bucket) : sync_env(_sync_env), source_bucket(_source_bucket) {} int operate() override { int r = sync_env->svc->bucket_sync->get_bucket_sync_hints(sync_env->dpp, source_bucket, nullptr, &targets, null_yield); if (r < 0) { ldpp_dout(sync_env->dpp, 0) << "ERROR: " << __func__ << "(): failed to fetch bucket sync hints for bucket=" << source_bucket << dendl; return r; } return 0; } }; std::shared_ptr<GetHintTargets> get_hint_targets_action; std::set<rgw_bucket>::iterator hiter; public: RGWGetBucketPeersCR(RGWDataSyncEnv *_sync_env, std::optional<rgw_bucket> _target_bucket, std::optional<rgw_zone_id> _source_zone, std::optional<rgw_bucket> _source_bucket, rgw_sync_pipe_info_set *_pipes, const RGWSyncTraceNodeRef& _tn_parent) : RGWCoroutine(_sync_env->cct), sync_env(_sync_env), target_bucket(_target_bucket), source_zone(_source_zone), source_bucket(_source_bucket), pipes(_pipes), tn(sync_env->sync_tracer->add_node(_tn_parent, "get_bucket_peers", SSTR( "target=" << target_bucket.value_or(rgw_bucket()) << ":source=" << target_bucket.value_or(rgw_bucket()) << ":source_zone=" << source_zone.value_or(rgw_zone_id("*")).id))) { } int operate(const DoutPrefixProvider *dpp) override; }; std::ostream& operator<<(std::ostream& out, std::optional<rgw_bucket_shard>& bs) { if (!bs) { out << "*"; } else { out << *bs; } return out; } static RGWCoroutine* sync_bucket_shard_cr(RGWDataSyncCtx* sc, boost::intrusive_ptr<const RGWContinuousLeaseCR> lease, const rgw_bucket_sync_pair_info& sync_pair, std::optional<uint64_t> gen, const RGWSyncTraceNodeRef& tn, ceph::real_time* progress); RGWRunBucketSourcesSyncCR::RGWRunBucketSourcesSyncCR(RGWDataSyncCtx *_sc, boost::intrusive_ptr<const RGWContinuousLeaseCR> lease_cr, const rgw_bucket_shard& source_bs, const RGWSyncTraceNodeRef& _tn_parent, std::optional<uint64_t> gen, ceph::real_time* progress) : RGWCoroutine(_sc->env->cct), sc(_sc), sync_env(_sc->env), lease_cr(std::move(lease_cr)), tn(sync_env->sync_tracer->add_node( _tn_parent, "bucket_sync_sources", SSTR( "source=" << source_bs << ":source_zone=" << sc->source_zone))), progress(progress), gen(gen) { sync_pair.source_bs = source_bs; } int RGWRunBucketSourcesSyncCR::operate(const DoutPrefixProvider *dpp) { reenter(this) { yield call(new RGWGetBucketPeersCR(sync_env, std::nullopt, sc->source_zone, sync_pair.source_bs.bucket, &pipes, tn)); if (retcode < 0 && retcode != -ENOENT) { tn->log(0, SSTR("ERROR: failed to read sync status for bucket. error: " << retcode)); return set_cr_error(retcode); } ldpp_dout(dpp, 20) << __func__ << "(): requested source_bs=" << sync_pair.source_bs << dendl; if (pipes.empty()) { ldpp_dout(dpp, 20) << __func__ << "(): no relevant sync pipes found" << dendl; return set_cr_done(); } shard_progress.resize(pipes.size()); cur_shard_progress = shard_progress.begin(); for (siter = pipes.begin(); siter != pipes.end(); ++siter, ++cur_shard_progress) { ldpp_dout(dpp, 20) << __func__ << "(): sync pipe=" << *siter << dendl; sync_pair.dest_bucket = siter->target.get_bucket(); sync_pair.handler = siter->handler; ldpp_dout(dpp, 20) << __func__ << "(): sync_pair=" << sync_pair << dendl; yield_spawn_window(sync_bucket_shard_cr(sc, lease_cr, sync_pair, gen, tn, &*cur_shard_progress), sc->lcc.adj_concurrency(cct->_conf->rgw_bucket_sync_spawn_window), [&](uint64_t stack_id, int ret) { if (ret < 0) { tn->log(10, SSTR("ERROR: a sync operation returned error: " << ret)); } return ret; }); } drain_all_cb([&](uint64_t stack_id, int ret) { if (ret < 0) { tn->log(10, SSTR("a sync operation returned error: " << ret)); } return ret; }); if (progress) { *progress = *std::min_element(shard_progress.begin(), shard_progress.end()); } return set_cr_done(); } return 0; } class RGWSyncGetBucketInfoCR : public RGWCoroutine { RGWDataSyncEnv *sync_env; rgw_bucket bucket; RGWBucketInfo *pbucket_info; map<string, bufferlist> *pattrs; RGWMetaSyncEnv meta_sync_env; RGWSyncTraceNodeRef tn; public: RGWSyncGetBucketInfoCR(RGWDataSyncEnv *_sync_env, const rgw_bucket& _bucket, RGWBucketInfo *_pbucket_info, map<string, bufferlist> *_pattrs, const RGWSyncTraceNodeRef& _tn_parent) : RGWCoroutine(_sync_env->cct), sync_env(_sync_env), bucket(_bucket), pbucket_info(_pbucket_info), pattrs(_pattrs), tn(sync_env->sync_tracer->add_node(_tn_parent, "get_bucket_info", SSTR(bucket))) { } int operate(const DoutPrefixProvider *dpp) override; }; int RGWSyncGetBucketInfoCR::operate(const DoutPrefixProvider *dpp) { reenter(this) { yield call(new RGWGetBucketInstanceInfoCR(sync_env->async_rados, sync_env->driver, bucket, pbucket_info, pattrs, dpp)); if (retcode == -ENOENT) { /* bucket instance info has not been synced in yet, fetch it now */ yield { tn->log(10, SSTR("no local info for bucket:" << ": fetching metadata")); string raw_key = string("bucket.instance:") + bucket.get_key(); meta_sync_env.init(dpp, cct, sync_env->driver, sync_env->svc->zone->get_master_conn(), sync_env->async_rados, sync_env->http_manager, sync_env->error_logger, sync_env->sync_tracer); call(new RGWMetaSyncSingleEntryCR(&meta_sync_env, raw_key, string() /* no marker */, MDLOG_STATUS_COMPLETE, NULL /* no marker tracker */, tn)); } if (retcode < 0) { tn->log(0, SSTR("ERROR: failed to fetch bucket instance info for " << bucket_str{bucket})); return set_cr_error(retcode); } yield call(new RGWGetBucketInstanceInfoCR(sync_env->async_rados, sync_env->driver, bucket, pbucket_info, pattrs, dpp)); } if (retcode < 0) { tn->log(0, SSTR("ERROR: failed to retrieve bucket info for bucket=" << bucket_str{bucket})); return set_cr_error(retcode); } return set_cr_done(); } return 0; } void RGWGetBucketPeersCR::update_from_target_bucket_policy() { if (!target_policy || !target_policy->policy_handler || !pipes) { return; } auto handler = target_policy->policy_handler.get(); filter_sources(source_zone, source_bucket, handler->get_sources(), pipes); for (siter = pipes->begin(); siter != pipes->end(); ++siter) { if (!siter->source.has_bucket_info()) { buckets_info.emplace(siter->source.get_bucket(), all_bucket_info()); } if (!siter->target.has_bucket_info()) { buckets_info.emplace(siter->target.get_bucket(), all_bucket_info()); } } } void RGWGetBucketPeersCR::update_from_source_bucket_policy() { if (!source_policy || !source_policy->policy_handler || !pipes) { return; } auto handler = source_policy->policy_handler.get(); filter_targets(sync_env->svc->zone->get_zone().id, target_bucket, handler->get_targets(), pipes); for (siter = pipes->begin(); siter != pipes->end(); ++siter) { if (!siter->source.has_bucket_info()) { buckets_info.emplace(siter->source.get_bucket(), all_bucket_info()); } if (!siter->target.has_bucket_info()) { buckets_info.emplace(siter->target.get_bucket(), all_bucket_info()); } } } class RGWSyncGetBucketSyncPolicyHandlerCR : public RGWCoroutine { RGWDataSyncEnv *sync_env; rgw_bucket bucket; rgw_bucket_get_sync_policy_params get_policy_params; std::shared_ptr<rgw_bucket_get_sync_policy_result> policy; RGWSyncTraceNodeRef tn; int i; public: RGWSyncGetBucketSyncPolicyHandlerCR(RGWDataSyncEnv *_sync_env, std::optional<rgw_zone_id> zone, const rgw_bucket& _bucket, std::shared_ptr<rgw_bucket_get_sync_policy_result>& _policy, const RGWSyncTraceNodeRef& _tn_parent) : RGWCoroutine(_sync_env->cct), sync_env(_sync_env), bucket(_bucket), policy(_policy), tn(sync_env->sync_tracer->add_node(_tn_parent, "get_sync_policy_handler", SSTR(bucket))) { get_policy_params.zone = zone; get_policy_params.bucket = bucket; } int operate(const DoutPrefixProvider *dpp) override { reenter(this) { for (i = 0; i < 2; ++i) { yield call(new RGWBucketGetSyncPolicyHandlerCR(sync_env->async_rados, sync_env->driver, get_policy_params, policy, dpp)); if (retcode < 0 && retcode != -ENOENT) { return set_cr_error(retcode); } if (retcode == 0) { return set_cr_done(); } /* bucket instance was not found, * try to get bucket instance info, can trigger * metadata sync of bucket instance */ yield call(new RGWSyncGetBucketInfoCR(sync_env, bucket, nullptr, nullptr, tn)); if (retcode < 0) { return set_cr_error(retcode); } } } return 0; } }; int RGWGetBucketPeersCR::operate(const DoutPrefixProvider *dpp) { reenter(this) { if (pipes) { pipes->clear(); } if (target_bucket) { target_policy = make_shared<rgw_bucket_get_sync_policy_result>(); yield call(new RGWSyncGetBucketSyncPolicyHandlerCR(sync_env, nullopt, *target_bucket, target_policy, tn)); if (retcode < 0 && retcode != -ENOENT) { return set_cr_error(retcode); } update_from_target_bucket_policy(); } if (source_bucket && source_zone) { source_policy = make_shared<rgw_bucket_get_sync_policy_result>(); yield call(new RGWSyncGetBucketSyncPolicyHandlerCR(sync_env, source_zone, *source_bucket, source_policy, tn)); if (retcode < 0 && retcode != -ENOENT) { return set_cr_error(retcode); } if (source_policy->policy_handler) { auto& opt_bucket_info = source_policy->policy_handler->get_bucket_info(); auto& opt_attrs = source_policy->policy_handler->get_bucket_attrs(); if (opt_bucket_info && opt_attrs) { source_bucket_info.emplace(); source_bucket_info->bucket_info = *opt_bucket_info; source_bucket_info->attrs = *opt_attrs; } } if (!target_bucket) { get_hint_targets_action = make_shared<GetHintTargets>(sync_env, *source_bucket); yield call(new RGWGenericAsyncCR(cct, sync_env->async_rados, get_hint_targets_action)); if (retcode < 0) { return set_cr_error(retcode); } /* hints might have incomplete bucket ids, * in which case we need to figure out the current * bucket_id */ for (hiter = get_hint_targets_action->targets.begin(); hiter != get_hint_targets_action->targets.end(); ++hiter) { ldpp_dout(dpp, 20) << "Got sync hint for bucket=" << *source_bucket << ": " << hiter->get_key() << dendl; target_policy = make_shared<rgw_bucket_get_sync_policy_result>(); yield call(new RGWSyncGetBucketSyncPolicyHandlerCR(sync_env, nullopt, *hiter, target_policy, tn)); if (retcode < 0 && retcode != -ENOENT) { return set_cr_error(retcode); } update_from_target_bucket_policy(); } } } update_from_source_bucket_policy(); for (siiter = buckets_info.begin(); siiter != buckets_info.end(); ++siiter) { if (siiter->second.bucket_info.bucket.name.empty()) { yield call(new RGWSyncGetBucketInfoCR(sync_env, siiter->first, &siiter->second.bucket_info, &siiter->second.attrs, tn)); } } if (pipes) { pipes->update_empty_bucket_info(buckets_info); } return set_cr_done(); } return 0; } class RGWSyncBucketShardCR : public RGWCoroutine { RGWDataSyncCtx *sc; RGWDataSyncEnv *sync_env; boost::intrusive_ptr<const RGWContinuousLeaseCR> lease_cr; rgw_bucket_sync_pair_info sync_pair; rgw_bucket_sync_pipe& sync_pipe; bool& bucket_stopped; uint64_t generation; ceph::real_time* progress; const std::string shard_status_oid; const rgw_raw_obj bucket_status_obj; rgw_bucket_shard_sync_info sync_status; RGWObjVersionTracker objv_tracker; RGWSyncTraceNodeRef tn; public: RGWSyncBucketShardCR(RGWDataSyncCtx *_sc, boost::intrusive_ptr<const RGWContinuousLeaseCR> lease_cr, const rgw_bucket_sync_pair_info& _sync_pair, rgw_bucket_sync_pipe& sync_pipe, bool& bucket_stopped, uint64_t generation, const RGWSyncTraceNodeRef& tn, ceph::real_time* progress) : RGWCoroutine(_sc->cct), sc(_sc), sync_env(_sc->env), lease_cr(std::move(lease_cr)), sync_pair(_sync_pair), sync_pipe(sync_pipe), bucket_stopped(bucket_stopped), generation(generation), progress(progress), shard_status_oid(RGWBucketPipeSyncStatusManager::inc_status_oid(sc->source_zone, sync_pair, generation)), bucket_status_obj(sc->env->svc->zone->get_zone_params().log_pool, RGWBucketPipeSyncStatusManager::full_status_oid(sc->source_zone, sync_pair.source_bs.bucket, sync_pair.dest_bucket)), tn(tn) { } int operate(const DoutPrefixProvider *dpp) override; }; int RGWSyncBucketShardCR::operate(const DoutPrefixProvider *dpp) { reenter(this) { objv_tracker.clear(); yield call(new RGWReadBucketPipeSyncStatusCoroutine(sc, sync_pair, &sync_status, &objv_tracker, generation)); if (retcode < 0 && retcode != -ENOENT) { tn->log(0, SSTR("ERROR: failed to read sync status for bucket. error: " << retcode)); return set_cr_error(retcode); } tn->log(20, SSTR("sync status for source bucket shard: " << sync_status.state)); sync_status.state = rgw_bucket_shard_sync_info::StateIncrementalSync; if (progress) { *progress = sync_status.inc_marker.timestamp; } yield call(new RGWBucketShardIncrementalSyncCR(sc, sync_pipe, shard_status_oid, bucket_status_obj, lease_cr, sync_status, generation, tn, objv_tracker, progress)); if (retcode < 0) { tn->log(5, SSTR("incremental sync on bucket failed, retcode=" << retcode)); return set_cr_error(retcode); } if (sync_status.state == rgw_bucket_shard_sync_info::StateStopped) { tn->log(20, SSTR("syncstopped indication for source bucket shard")); bucket_stopped = true; } return set_cr_done(); } return 0; } class RGWSyncBucketCR : public RGWCoroutine { RGWDataSyncCtx *sc; RGWDataSyncEnv *env; boost::intrusive_ptr<const RGWContinuousLeaseCR> data_lease_cr; boost::intrusive_ptr<RGWContinuousLeaseCR> bucket_lease_cr; rgw_bucket_sync_pair_info sync_pair; rgw_bucket_sync_pipe sync_pipe; std::optional<uint64_t> gen; ceph::real_time* progress; const std::string lock_name = "bucket sync"; const uint32_t lock_duration; const rgw_raw_obj status_obj; rgw_bucket_sync_status bucket_status; bool bucket_stopped = false; RGWObjVersionTracker objv; bool init_check_compat = false; rgw_bucket_index_marker_info info; rgw_raw_obj error_repo; rgw_bucket_shard source_bs; rgw_pool pool; uint64_t current_gen = 0; RGWSyncTraceNodeRef tn; public: RGWSyncBucketCR(RGWDataSyncCtx *_sc, boost::intrusive_ptr<const RGWContinuousLeaseCR> lease_cr, const rgw_bucket_sync_pair_info& _sync_pair, std::optional<uint64_t> gen, const RGWSyncTraceNodeRef& _tn_parent, ceph::real_time* progress) : RGWCoroutine(_sc->cct), sc(_sc), env(_sc->env), data_lease_cr(std::move(lease_cr)), sync_pair(_sync_pair), gen(gen), progress(progress), lock_duration(cct->_conf->rgw_sync_lease_period), status_obj(env->svc->zone->get_zone_params().log_pool, RGWBucketPipeSyncStatusManager::full_status_oid(sc->source_zone, sync_pair.source_bs.bucket, sync_pair.dest_bucket)), tn(env->sync_tracer->add_node(_tn_parent, "bucket", SSTR(bucket_str{_sync_pair.dest_bucket} << "<-" << bucket_shard_str{_sync_pair.source_bs} ))) { } int operate(const DoutPrefixProvider *dpp) override; }; static RGWCoroutine* sync_bucket_shard_cr(RGWDataSyncCtx* sc, boost::intrusive_ptr<const RGWContinuousLeaseCR> lease, const rgw_bucket_sync_pair_info& sync_pair, std::optional<uint64_t> gen, const RGWSyncTraceNodeRef& tn, ceph::real_time* progress) { return new RGWSyncBucketCR(sc, std::move(lease), sync_pair, gen, tn, progress); } #define RELEASE_LOCK(cr) \ if (cr) {cr->go_down(); drain_all(); cr.reset();} int RGWSyncBucketCR::operate(const DoutPrefixProvider *dpp) { reenter(this) { // read source/destination bucket info yield call(new RGWSyncGetBucketInfoCR(env, sync_pair.source_bs.bucket, &sync_pipe.source_bucket_info, &sync_pipe.source_bucket_attrs, tn)); if (retcode < 0) { tn->log(0, SSTR("ERROR: failed to retrieve bucket info for bucket=" << bucket_str{sync_pair.source_bs.bucket})); return set_cr_error(retcode); } yield call(new RGWSyncGetBucketInfoCR(env, sync_pair.dest_bucket, &sync_pipe.dest_bucket_info, &sync_pipe.dest_bucket_attrs, tn)); if (retcode < 0) { tn->log(0, SSTR("ERROR: failed to retrieve bucket info for bucket=" << bucket_str{sync_pair.source_bs.bucket})); return set_cr_error(retcode); } sync_pipe.info = sync_pair; // read bucket sync status using ReadCR = RGWSimpleRadosReadCR<rgw_bucket_sync_status>; using WriteCR = RGWSimpleRadosWriteCR<rgw_bucket_sync_status>; objv.clear(); yield call(new ReadCR(dpp, env->driver, status_obj, &bucket_status, false, &objv)); if (retcode == -ENOENT) { // if the full sync status object didn't exist yet, run the backward // compatability logic in InitBucketFullSyncStatusCR below. if it did // exist, a `bucket sync init` probably requested its re-initialization, // and shouldn't try to resume incremental sync init_check_compat = true; // use exclusive create to set state=Init objv.generate_new_write_ver(cct); yield call(new WriteCR(dpp, env->driver, status_obj, bucket_status, &objv, true)); tn->log(20, "bucket status object does not exist, create a new one"); if (retcode == -EEXIST) { // raced with another create, read its status tn->log(20, "raced with another create, read its status"); objv.clear(); yield call(new ReadCR(dpp, env->driver, status_obj, &bucket_status, false, &objv)); } } if (retcode < 0) { tn->log(20, SSTR("ERROR: failed to read bucket status object. error: " << retcode)); return set_cr_error(retcode); } do { tn->log(20, SSTR("sync status for source bucket: " << bucket_status.state << ". lease is: " << (bucket_lease_cr ? "taken" : "not taken") << ". stop indications is: " << bucket_stopped)); if (bucket_status.state != BucketSyncState::Incremental || bucket_stopped) { if (!bucket_lease_cr) { bucket_lease_cr.reset(new RGWContinuousLeaseCR(env->async_rados, env->driver, status_obj, lock_name, lock_duration, this, &sc->lcc)); yield spawn(bucket_lease_cr.get(), false); while (!bucket_lease_cr->is_locked()) { if (bucket_lease_cr->is_done()) { tn->log(5, "failed to take lease"); set_status("lease lock failed, early abort"); drain_all(); return set_cr_error(bucket_lease_cr->get_ret_status()); } tn->log(5, "waiting on bucket lease"); yield set_sleeping(true); } } // if state is Init or Stopped, we query the remote RGW for ther state yield call(new RGWReadRemoteBucketIndexLogInfoCR(sc, sync_pair.source_bs.bucket, &info)); if (retcode < 0) { RELEASE_LOCK(bucket_lease_cr); return set_cr_error(retcode); } if (info.syncstopped) { // remote indicates stopped state tn->log(20, "remote bilog indicates that sync was stopped"); // if state was incremental, remove all per-shard status objects if (bucket_status.state == BucketSyncState::Incremental) { yield { const auto num_shards = bucket_status.shards_done_with_gen.size(); const auto gen = bucket_status.incremental_gen; call(new RemoveBucketShardStatusCollectCR(sc, sync_pair, gen, num_shards)); } } // check if local state is "stopped" objv.clear(); yield call(new ReadCR(dpp, env->driver, status_obj, &bucket_status, false, &objv)); if (retcode < 0) { tn->log(20, SSTR("ERROR: failed to read status before writing 'stopped'. error: " << retcode)); RELEASE_LOCK(bucket_lease_cr); return set_cr_error(retcode); } if (bucket_status.state != BucketSyncState::Stopped) { // make sure that state is changed to stopped localy bucket_status.state = BucketSyncState::Stopped; yield call(new WriteCR(dpp, env->driver, status_obj, bucket_status, &objv, false)); if (retcode < 0) { tn->log(20, SSTR("ERROR: failed to write 'stopped' status. error: " << retcode)); RELEASE_LOCK(bucket_lease_cr); return set_cr_error(retcode); } } RELEASE_LOCK(bucket_lease_cr); return set_cr_done(); } if (bucket_stopped) { tn->log(20, SSTR("ERROR: switched from 'stop' to 'start' sync. while state is: " << bucket_status.state)); bucket_stopped = false; bucket_status.state = BucketSyncState::Init; } } if (bucket_status.state != BucketSyncState::Incremental) { // if the state wasn't Incremental, take a bucket-wide lease to prevent // different shards from duplicating the init and full sync if (!bucket_lease_cr) { bucket_lease_cr.reset(new RGWContinuousLeaseCR(env->async_rados, env->driver, status_obj, lock_name, lock_duration, this, &sc->lcc)); yield spawn(bucket_lease_cr.get(), false); while (!bucket_lease_cr->is_locked()) { if (bucket_lease_cr->is_done()) { tn->log(5, "failed to take lease"); set_status("lease lock failed, early abort"); drain_all(); return set_cr_error(bucket_lease_cr->get_ret_status()); } tn->log(5, "waiting on bucket lease"); yield set_sleeping(true); } } // reread the status after acquiring the lock objv.clear(); yield call(new ReadCR(dpp, env->driver, status_obj, &bucket_status, false, &objv)); if (retcode < 0) { RELEASE_LOCK(bucket_lease_cr); tn->log(20, SSTR("ERROR: reading the status after acquiring the lock failed. error: " << retcode)); return set_cr_error(retcode); } tn->log(20, SSTR("status after acquiring the lock is: " << bucket_status.state)); yield call(new InitBucketFullSyncStatusCR(sc, sync_pair, status_obj, bucket_status, objv, sync_pipe.source_bucket_info, init_check_compat, info)); if (retcode < 0) { tn->log(20, SSTR("ERROR: init full sync failed. error: " << retcode)); RELEASE_LOCK(bucket_lease_cr); return set_cr_error(retcode); } } assert(bucket_status.state == BucketSyncState::Incremental || bucket_status.state == BucketSyncState::Full); if (bucket_status.state == BucketSyncState::Full) { assert(bucket_lease_cr); yield call(new RGWBucketFullSyncCR(sc, sync_pipe, status_obj, bucket_lease_cr, bucket_status, tn, objv)); if (retcode < 0) { tn->log(20, SSTR("ERROR: full sync failed. error: " << retcode)); RELEASE_LOCK(bucket_lease_cr); return set_cr_error(retcode); } } if (bucket_status.state == BucketSyncState::Incremental) { // lease not required for incremental sync RELEASE_LOCK(bucket_lease_cr); assert(sync_pair.source_bs.shard_id >= 0); // if a specific gen was requested, compare that to the sync status if (gen) { current_gen = bucket_status.incremental_gen; source_bs = sync_pair.source_bs; if (*gen > current_gen) { /* In case the data log entry is missing for previous gen, it may * not be marked complete and the sync can get stuck. To avoid it, * may be we can add this (shardid, gen) to error repo to force * sync and mark that shard as completed. */ pool = sc->env->svc->zone->get_zone_params().log_pool; if ((static_cast<std::size_t>(source_bs.shard_id) < bucket_status.shards_done_with_gen.size()) && !bucket_status.shards_done_with_gen[source_bs.shard_id]) { // use the error repo and sync status timestamp from the datalog shard corresponding to source_bs error_repo = datalog_oid_for_error_repo(sc, sc->env->driver, pool, source_bs); yield call(rgw::error_repo::write_cr(sc->env->driver->svc()->rados, error_repo, rgw::error_repo::encode_key(source_bs, current_gen), ceph::real_clock::zero())); if (retcode < 0) { tn->log(0, SSTR("ERROR: failed to log prev gen entry (bucket=" << source_bs.bucket << ", shard_id=" << source_bs.shard_id << ", gen=" << current_gen << " in error repo: retcode=" << retcode)); } else { tn->log(20, SSTR("logged prev gen entry (bucket=" << source_bs.bucket << ", shard_id=" << source_bs.shard_id << ", gen=" << current_gen << " in error repo: retcode=" << retcode)); } } retcode = -EAGAIN; tn->log(10, SSTR("ERROR: requested sync of future generation " << *gen << " > " << current_gen << ", returning " << retcode << " for later retry")); return set_cr_error(retcode); } else if (*gen < current_gen) { tn->log(10, SSTR("WARNING: requested sync of past generation " << *gen << " < " << current_gen << ", returning success")); return set_cr_done(); } } if (static_cast<std::size_t>(sync_pair.source_bs.shard_id) >= bucket_status.shards_done_with_gen.size()) { tn->log(1, SSTR("bucket shard " << sync_pair.source_bs << " index out of bounds")); return set_cr_done(); // return success so we don't retry } if (bucket_status.shards_done_with_gen[sync_pair.source_bs.shard_id]) { tn->log(10, SSTR("bucket shard " << sync_pair.source_bs << " of gen " << gen << " already synced.")); return set_cr_done(); } yield call(new RGWSyncBucketShardCR(sc, data_lease_cr, sync_pair, sync_pipe, bucket_stopped, bucket_status.incremental_gen, tn, progress)); if (retcode < 0) { tn->log(20, SSTR("ERROR: incremental sync failed. error: " << retcode)); return set_cr_error(retcode); } } // loop back to previous states unless incremental sync returns normally } while (bucket_status.state != BucketSyncState::Incremental || bucket_stopped); return set_cr_done(); } return 0; } int RGWBucketPipeSyncStatusManager::do_init(const DoutPrefixProvider *dpp, std::ostream* ostr) { int ret = http_manager.start(); if (ret < 0) { ldpp_dout(this, 0) << "failed in http_manager.start() ret=" << ret << dendl; return ret; } sync_module.reset(new RGWDefaultSyncModuleInstance()); auto async_rados = driver->svc()->rados->get_async_processor(); sync_env.init(this, driver->ctx(), driver, driver->svc(), async_rados, &http_manager, error_logger.get(), driver->getRados()->get_sync_tracer(), sync_module, nullptr); sync_env.ostr = ostr; rgw_sync_pipe_info_set pipes; ret = cr_mgr.run(dpp, new RGWGetBucketPeersCR(&sync_env, dest_bucket, source_zone, source_bucket, &pipes, sync_env.sync_tracer->root_node)); if (ret < 0) { ldpp_dout(this, 0) << "failed to get bucket source peers info: (ret=" << ret << "): " << cpp_strerror(-ret) << dendl; return ret; } if (pipes.empty()) { ldpp_dout(this, 0) << "No peers. This is not a valid multisite configuration." << dendl; return -EINVAL; } for (auto& pipe : pipes) { auto& szone = pipe.source.zone; auto conn = driver->svc()->zone->get_zone_conn(szone); if (!conn) { ldpp_dout(this, 0) << "connection object to zone " << szone << " does not exist" << dendl; return -EINVAL; } RGWZone* z; if (!(z = driver->svc()->zone->find_zone(szone))) { ldpp_dout(this, 0) << "zone " << szone << " does not exist" << dendl; return -EINVAL; } sources.emplace_back(&sync_env, szone, conn, pipe.source.get_bucket_info(), pipe.target.get_bucket(), pipe.handler, z->name); } return 0; } int RGWBucketPipeSyncStatusManager::remote_info(const DoutPrefixProvider *dpp, source& s, uint64_t* oldest_gen, uint64_t* latest_gen, uint64_t* num_shards) { rgw_bucket_index_marker_info remote_info; BucketIndexShardsManager remote_markers; auto r = rgw_read_remote_bilog_info(dpp, s.sc.conn, s.info.bucket, remote_info, remote_markers, null_yield); if (r < 0) { ldpp_dout(dpp, 0) << __PRETTY_FUNCTION__ << ":" << __LINE__ << " rgw_read_remote_bilog_info: r=" << r << dendl; return r; } if (oldest_gen) *oldest_gen = remote_info.oldest_gen; if (latest_gen) *latest_gen = remote_info.latest_gen; if (num_shards) *num_shards = remote_markers.get().size(); return 0; } tl::expected<std::unique_ptr<RGWBucketPipeSyncStatusManager>, int> RGWBucketPipeSyncStatusManager::construct( const DoutPrefixProvider* dpp, rgw::sal::RadosStore* driver, std::optional<rgw_zone_id> source_zone, std::optional<rgw_bucket> source_bucket, const rgw_bucket& dest_bucket, std::ostream* ostr) { std::unique_ptr<RGWBucketPipeSyncStatusManager> self{ new RGWBucketPipeSyncStatusManager(driver, source_zone, source_bucket, dest_bucket)}; auto r = self->do_init(dpp, ostr); if (r < 0) { return tl::unexpected(r); } return self; } int RGWBucketPipeSyncStatusManager::init_sync_status( const DoutPrefixProvider *dpp) { // Just running one at a time saves us from buildup/teardown and in // practice we only do one zone at a time. for (auto& source : sources) { list<RGWCoroutinesStack*> stacks; RGWCoroutinesStack *stack = new RGWCoroutinesStack(driver->ctx(), &cr_mgr); pretty_print(source.sc.env, "Initializing sync state of bucket {} with zone {}.\n", source.info.bucket.name, source.zone_name); stack->call(new RGWSimpleRadosWriteCR<rgw_bucket_sync_status>( dpp, source.sc.env->driver, {sync_env.svc->zone->get_zone_params().log_pool, full_status_oid(source.sc.source_zone, source.info.bucket, source.dest)}, rgw_bucket_sync_status{})); stacks.push_back(stack); auto r = cr_mgr.run(dpp, stacks); if (r < 0) { pretty_print(source.sc.env, "Initialization of sync state for bucket {} with zone {} " "failed with error {}\n", source.info.bucket.name, source.zone_name, cpp_strerror(r)); } } return 0; } tl::expected<std::map<int, rgw_bucket_shard_sync_info>, int> RGWBucketPipeSyncStatusManager::read_sync_status( const DoutPrefixProvider *dpp) { std::map<int, rgw_bucket_shard_sync_info> sync_status; list<RGWCoroutinesStack *> stacks; auto sz = sources.begin(); if (source_zone) { sz = std::find_if(sources.begin(), sources.end(), [this](const source& s) { return s.sc.source_zone == *source_zone; } ); if (sz == sources.end()) { ldpp_dout(this, 0) << "ERROR: failed to find source zone: " << *source_zone << dendl; return tl::unexpected(-ENOENT); } } else { ldpp_dout(this, 5) << "No source zone specified, using source zone: " << sz->sc.source_zone << dendl; return tl::unexpected(-ENOENT); } uint64_t num_shards, latest_gen; auto ret = remote_info(dpp, *sz, nullptr, &latest_gen, &num_shards); if (ret < 0) { ldpp_dout(this, 5) << "Unable to get remote info: " << ret << dendl; return tl::unexpected(ret); } auto stack = new RGWCoroutinesStack(driver->ctx(), &cr_mgr); std::vector<rgw_bucket_sync_pair_info> pairs(num_shards); for (auto shard = 0u; shard < num_shards; ++shard) { auto& pair = pairs[shard]; pair.source_bs.bucket = sz->info.bucket; pair.dest_bucket = sz->dest; pair.source_bs.shard_id = shard; stack->call(new RGWReadBucketPipeSyncStatusCoroutine( &sz->sc, pair, &sync_status[shard], nullptr, latest_gen)); } stacks.push_back(stack); ret = cr_mgr.run(dpp, stacks); if (ret < 0) { ldpp_dout(this, 0) << "ERROR: failed to read sync status for " << bucket_str{dest_bucket} << dendl; return tl::unexpected(ret); } return sync_status; } namespace rgw::bucket_sync_run { // Retry-loop over calls to sync_bucket_shard_cr class ShardCR : public RGWCoroutine { static constexpr auto allowed_retries = 10u; RGWDataSyncCtx& sc; const rgw_bucket_sync_pair_info& pair; const uint64_t gen; unsigned retries = 0; ceph::real_time prev_progress; ceph::real_time progress; public: ShardCR(RGWDataSyncCtx& sc, const rgw_bucket_sync_pair_info& pair, const uint64_t gen) : RGWCoroutine(sc.cct), sc(sc), pair(pair), gen(gen) {} int operate(const DoutPrefixProvider *dpp) override { reenter(this) { // Since all errors (except ECANCELED) are considered retryable, // retry other errors so long as we're making progress. for (retries = 0u, retcode = -EDOM; (retries < allowed_retries) && (retcode != 0); ++retries) { ldpp_dout(dpp, 5) << "ShardCR: syncing bucket shard on: " << "zone=" << sc.source_zone << ", bucket=" << pair.source_bs.bucket.name << ", shard=" << pair.source_bs.shard_id << ", gen=" << gen << dendl; yield call(sync_bucket_shard_cr(&sc, nullptr, pair, gen, sc.env->sync_tracer->root_node, &progress)); if (retcode == -ECANCELED) { ldpp_dout(dpp, -1) << "ERROR: Got -ECANCELED for " << pair.source_bs << dendl; drain_all(); return set_cr_error(retcode); } else if (retcode < 0) { ldpp_dout(dpp, 5) << "WARNING: Got error, retcode=" << retcode << " for " << pair.source_bs << "on retry " << retries + 1 << " of " << allowed_retries << " allowed" << dendl; // Reset the retry counter if we made any progress if (progress != prev_progress) { retries = 0; } prev_progress = progress; } } if (retcode < 0) { ldpp_dout(dpp, -1) << "ERROR: Exhausted retries for " << pair.source_bs << " retcode=" << retcode << dendl; drain_all(); return set_cr_error(retcode); } drain_all(); return set_cr_done(); } return 0; } }; // Loop over calls to ShardCR with limited concurrency class GenCR : public RGWShardCollectCR { static constexpr auto MAX_CONCURRENT_SHARDS = 64; RGWDataSyncCtx& sc; const uint64_t gen; std::vector<rgw_bucket_sync_pair_info> pairs; decltype(pairs)::const_iterator iter; public: GenCR(RGWDataSyncCtx& sc, const rgw_bucket& source, const rgw_bucket& dest, const uint64_t gen, const uint64_t shards, const RGWBucketSyncFlowManager::pipe_handler& handler) : RGWShardCollectCR(sc.cct, MAX_CONCURRENT_SHARDS), sc(sc), gen(gen) { pairs.resize(shards); for (auto shard = 0u; shard < shards; ++shard) { auto& pair = pairs[shard]; pair.handler = handler; pair.source_bs.bucket = source; pair.dest_bucket = dest; pair.source_bs.shard_id = shard; } iter = pairs.cbegin(); assert(pairs.size() == shards); } virtual bool spawn_next() override { if (iter == pairs.cend()) { return false; } spawn(new ShardCR(sc, *iter, gen), false); ++iter; return true; } int handle_result(int r) override { if (r < 0) { ldpp_dout(sc.env->dpp, 4) << "ERROR: Error syncing shard: " << cpp_strerror(r) << dendl; } return r; } }; // Read sync status, loop over calls to GenCR class SourceCR : public RGWCoroutine { RGWDataSyncCtx& sc; const RGWBucketInfo& info; const rgw_bucket& dest; const RGWBucketSyncFlowManager::pipe_handler& handler; const rgw_raw_obj status_obj{ sc.env->svc->zone->get_zone_params().log_pool, RGWBucketPipeSyncStatusManager::full_status_oid(sc.source_zone, info.bucket, dest)}; BucketSyncState state = BucketSyncState::Incremental; uint64_t gen = 0; uint64_t num_shards = 0; rgw_bucket_sync_status status; std::string zone_name; public: SourceCR(RGWDataSyncCtx& sc, const RGWBucketInfo& info, const rgw_bucket& dest, const RGWBucketSyncFlowManager::pipe_handler& handler, const std::string& zone_name) : RGWCoroutine(sc.cct), sc(sc), info(info), dest(dest), handler(handler), zone_name(zone_name) {} int operate(const DoutPrefixProvider *dpp) override { reenter(this) { // Get the source's status. In incremental sync, this gives us // the generation and shard count that is next needed to be run. yield call(new RGWSimpleRadosReadCR<rgw_bucket_sync_status>( dpp, sc.env->driver, status_obj, &status)); if (retcode < 0) { ldpp_dout(dpp, -1) << "ERROR: Unable to fetch status for zone=" << sc.source_zone << " retcode=" << retcode << dendl; drain_all(); return set_cr_error(retcode); } if (status.state == BucketSyncState::Stopped) { // Nothing to do. pretty_print(sc.env, "Sync of bucket {} from source zone {} is in state Stopped. " "Nothing to do.\n", dest.name, zone_name); ldpp_dout(dpp, 5) << "SourceCR: Bucket is in state Stopped, returning." << dendl; drain_all(); return set_cr_done(); } do { state = status.state; gen = status.incremental_gen; num_shards = status.shards_done_with_gen.size(); ldpp_dout(dpp, 5) << "SourceCR: " << "state=" << state << ", gen=" << gen << ", num_shards=" << num_shards << dendl; // Special case to handle full sync. Since full sync no longer // uses shards and has no generations, we sync shard zero, // though use the current generation so a following // incremental sync can carry on. if (state != BucketSyncState::Incremental) { pretty_print(sc.env, "Beginning full sync of bucket {} from source zone {}.\n", dest.name, zone_name); ldpp_dout(dpp, 5) << "SourceCR: Calling GenCR with " << "gen=" << gen << ", num_shards=" << 1 << dendl; yield call(new GenCR(sc, info.bucket, dest, gen, 1, handler)); } else { pretty_print(sc.env, "Beginning incremental sync of bucket {}, generation {} from source zone {}.\n", dest.name, gen, zone_name); ldpp_dout(dpp, 5) << "SourceCR: Calling GenCR with " << "gen=" << gen << ", num_shards=" << num_shards << dendl; yield call(new GenCR(sc, info.bucket, dest, gen, num_shards, handler)); } if (retcode < 0) { ldpp_dout(dpp, -1) << "ERROR: Giving up syncing from " << sc.source_zone << " retcode=" << retcode << dendl; drain_all(); return set_cr_error(retcode); } pretty_print(sc.env, "Completed.\n"); yield call(new RGWSimpleRadosReadCR<rgw_bucket_sync_status>( dpp, sc.env->driver, status_obj, &status)); if (retcode < 0) { ldpp_dout(dpp, -1) << "ERROR: Unable to fetch status for zone=" << sc.source_zone << " retcode=" << retcode << dendl; drain_all(); return set_cr_error(retcode); } // Repeat until we have done an incremental run and the // generation remains unchanged. ldpp_dout(dpp, 5) << "SourceCR: " << "state=" << state << ", gen=" << gen << ", num_shards=" << num_shards << ", status.state=" << status.state << ", status.incremental_gen=" << status.incremental_gen << ", status.shards_done_with_gen.size()=" << status.shards_done_with_gen.size() << dendl; } while (state != BucketSyncState::Incremental || gen != status.incremental_gen); drain_all(); return set_cr_done(); } return 0; } }; } // namespace rgw::bucket_sync_run int RGWBucketPipeSyncStatusManager::run(const DoutPrefixProvider *dpp) { list<RGWCoroutinesStack *> stacks; for (auto& source : sources) { auto stack = new RGWCoroutinesStack(driver->ctx(), &cr_mgr); stack->call(new rgw::bucket_sync_run::SourceCR( source.sc, source.info, source.dest, source.handler, source.zone_name)); stacks.push_back(stack); } auto ret = cr_mgr.run(dpp, stacks); if (ret < 0) { ldpp_dout(this, 0) << "ERROR: Sync unsuccessful on bucket " << bucket_str{dest_bucket} << dendl; } return ret; } unsigned RGWBucketPipeSyncStatusManager::get_subsys() const { return dout_subsys; } std::ostream& RGWBucketPipeSyncStatusManager::gen_prefix(std::ostream& out) const { auto zone = std::string_view{source_zone.value_or(rgw_zone_id("*")).id}; return out << "bucket sync zone:" << zone.substr(0, 8) << " bucket:" << dest_bucket << ' '; } string RGWBucketPipeSyncStatusManager::full_status_oid(const rgw_zone_id& source_zone, const rgw_bucket& source_bucket, const rgw_bucket& dest_bucket) { if (source_bucket == dest_bucket) { return bucket_full_status_oid_prefix + "." + source_zone.id + ":" + dest_bucket.get_key(); } else { return bucket_full_status_oid_prefix + "." + source_zone.id + ":" + dest_bucket.get_key() + ":" + source_bucket.get_key(); } } inline std::string generation_token(uint64_t gen) { return (gen == 0) ? "" : (":" + std::to_string(gen)); } string RGWBucketPipeSyncStatusManager::inc_status_oid(const rgw_zone_id& source_zone, const rgw_bucket_sync_pair_info& sync_pair, uint64_t gen) { if (sync_pair.source_bs.bucket == sync_pair.dest_bucket) { return bucket_status_oid_prefix + "." + source_zone.id + ":" + sync_pair.source_bs.get_key() + generation_token(gen); } else { return bucket_status_oid_prefix + "." + source_zone.id + ":" + sync_pair.dest_bucket.get_key() + ":" + sync_pair.source_bs.get_key() + generation_token(gen); } } string RGWBucketPipeSyncStatusManager::obj_status_oid(const rgw_bucket_sync_pipe& sync_pipe, const rgw_zone_id& source_zone, const rgw_obj& obj) { string prefix = object_status_oid_prefix + "." + source_zone.id + ":" + obj.bucket.get_key(); if (sync_pipe.source_bucket_info.bucket != sync_pipe.dest_bucket_info.bucket) { prefix += string("/") + sync_pipe.dest_bucket_info.bucket.get_key(); } return prefix + ":" + obj.key.name + ":" + obj.key.instance; } int rgw_read_remote_bilog_info(const DoutPrefixProvider *dpp, RGWRESTConn* conn, const rgw_bucket& bucket, rgw_bucket_index_marker_info& info, BucketIndexShardsManager& markers, optional_yield y) { const auto instance_key = bucket.get_key(); const rgw_http_param_pair params[] = { { "type" , "bucket-index" }, { "bucket-instance", instance_key.c_str() }, { "info" , nullptr }, { nullptr, nullptr } }; int r = conn->get_json_resource(dpp, "/admin/log/", params, y, info); if (r < 0) { ldpp_dout(dpp, -1) << "failed to fetch remote log markers: " << cpp_strerror(r) << dendl; return r; } // parse shard markers r = markers.from_string(info.max_marker, -1); if (r < 0) { ldpp_dout(dpp, -1) << "failed to decode remote log markers" << dendl; return r; } return 0; } class RGWCollectBucketSyncStatusCR : public RGWShardCollectCR { static constexpr int max_concurrent_shards = 16; rgw::sal::RadosStore* const driver; RGWDataSyncCtx *const sc; RGWDataSyncEnv *const env; const uint64_t gen; rgw_bucket_sync_pair_info sync_pair; using Vector = std::vector<rgw_bucket_shard_sync_info>; Vector::iterator i, end; int handle_result(int r) override { if (r == -ENOENT) { // ENOENT is not a fatal error return 0; } if (r < 0) { ldout(cct, 4) << "failed to read bucket shard sync status: " << cpp_strerror(r) << dendl; } return r; } public: RGWCollectBucketSyncStatusCR(rgw::sal::RadosStore* driver, RGWDataSyncCtx *sc, const rgw_bucket_sync_pair_info& sync_pair, uint64_t gen, Vector *status) : RGWShardCollectCR(sc->cct, max_concurrent_shards), driver(driver), sc(sc), env(sc->env), gen(gen), sync_pair(sync_pair), i(status->begin()), end(status->end()) {} bool spawn_next() override { if (i == end) { return false; } spawn(new RGWReadBucketPipeSyncStatusCoroutine(sc, sync_pair, &*i, nullptr, gen), false); ++i; ++sync_pair.source_bs.shard_id; return true; } }; int rgw_read_bucket_full_sync_status(const DoutPrefixProvider *dpp, rgw::sal::RadosStore *driver, const rgw_sync_bucket_pipe& pipe, rgw_bucket_sync_status *status, optional_yield y) { auto get_oid = RGWBucketPipeSyncStatusManager::full_status_oid; const rgw_raw_obj obj{driver->svc()->zone->get_zone_params().log_pool, get_oid(*pipe.source.zone, *pipe.source.bucket, *pipe.dest.bucket)}; auto svc = driver->svc()->sysobj; auto sysobj = svc->get_obj(obj); bufferlist bl; int ret = sysobj.rop().read(dpp, &bl, y); if (ret < 0) return ret; try { auto iter = bl.cbegin(); using ceph::decode; rgw_bucket_sync_status result; decode(result, iter); *status = result; return 0; } catch (const buffer::error& err) { lderr(svc->ctx()) << "error decoding " << obj << ": " << err.what() << dendl; return -EIO; } } int rgw_read_bucket_inc_sync_status(const DoutPrefixProvider *dpp, rgw::sal::RadosStore *driver, const rgw_sync_bucket_pipe& pipe, uint64_t gen, std::vector<rgw_bucket_shard_sync_info> *status) { if (!pipe.source.zone || !pipe.source.bucket || !pipe.dest.zone || !pipe.dest.bucket) { return -EINVAL; } rgw_bucket_sync_pair_info sync_pair; sync_pair.source_bs.bucket = *pipe.source.bucket; sync_pair.source_bs.shard_id = 0; sync_pair.dest_bucket = *pipe.dest.bucket; RGWDataSyncEnv env; RGWSyncModuleInstanceRef module; // null sync module env.init(dpp, driver->ctx(), driver, driver->svc(), driver->svc()->rados->get_async_processor(), nullptr, nullptr, nullptr, module, nullptr); RGWDataSyncCtx sc; sc.init(&env, nullptr, *pipe.source.zone); RGWCoroutinesManager crs(driver->ctx(), driver->getRados()->get_cr_registry()); return crs.run(dpp, new RGWCollectBucketSyncStatusCR(driver, &sc, sync_pair, gen, status)); } void rgw_data_sync_info::generate_test_instances(list<rgw_data_sync_info*>& o) { auto info = new rgw_data_sync_info; info->state = rgw_data_sync_info::StateBuildingFullSyncMaps; info->num_shards = 8; o.push_back(info); o.push_back(new rgw_data_sync_info); } void rgw_data_sync_marker::generate_test_instances(list<rgw_data_sync_marker*>& o) { auto marker = new rgw_data_sync_marker; marker->state = rgw_data_sync_marker::IncrementalSync; marker->marker = "01234"; marker->pos = 5; o.push_back(marker); o.push_back(new rgw_data_sync_marker); } void rgw_data_sync_status::generate_test_instances(list<rgw_data_sync_status*>& o) { o.push_back(new rgw_data_sync_status); } void rgw_bucket_shard_full_sync_marker::dump(Formatter *f) const { encode_json("position", position, f); encode_json("count", count, f); } void rgw_bucket_shard_inc_sync_marker::decode_json(JSONObj *obj) { JSONDecoder::decode_json("position", position, obj); JSONDecoder::decode_json("timestamp", timestamp, obj); } void rgw_bucket_shard_inc_sync_marker::dump(Formatter *f) const { encode_json("position", position, f); encode_json("timestamp", timestamp, f); } void rgw_bucket_shard_sync_info::decode_json(JSONObj *obj) { std::string s; JSONDecoder::decode_json("status", s, obj); if (s == "full-sync") { state = StateFullSync; } else if (s == "incremental-sync") { state = StateIncrementalSync; } else if (s == "stopped") { state = StateStopped; } else { state = StateInit; } JSONDecoder::decode_json("inc_marker", inc_marker, obj); } void rgw_bucket_shard_full_sync_marker::decode_json(JSONObj *obj) { JSONDecoder::decode_json("position", position, obj); JSONDecoder::decode_json("count", count, obj); } void rgw_bucket_shard_sync_info::dump(Formatter *f) const { const char *s{nullptr}; switch ((SyncState)state) { case StateInit: s = "init"; break; case StateFullSync: s = "full-sync"; break; case StateIncrementalSync: s = "incremental-sync"; break; case StateStopped: s = "stopped"; break; default: s = "unknown"; break; } encode_json("status", s, f); encode_json("inc_marker", inc_marker, f); } void rgw_bucket_full_sync_status::decode_json(JSONObj *obj) { JSONDecoder::decode_json("position", position, obj); JSONDecoder::decode_json("count", count, obj); } void rgw_bucket_full_sync_status::dump(Formatter *f) const { encode_json("position", position, f); encode_json("count", count, f); } void encode_json(const char *name, BucketSyncState state, Formatter *f) { switch (state) { case BucketSyncState::Init: encode_json(name, "init", f); break; case BucketSyncState::Full: encode_json(name, "full-sync", f); break; case BucketSyncState::Incremental: encode_json(name, "incremental-sync", f); break; case BucketSyncState::Stopped: encode_json(name, "stopped", f); break; default: encode_json(name, "unknown", f); break; } } void decode_json_obj(BucketSyncState& state, JSONObj *obj) { std::string s; decode_json_obj(s, obj); if (s == "full-sync") { state = BucketSyncState::Full; } else if (s == "incremental-sync") { state = BucketSyncState::Incremental; } else if (s == "stopped") { state = BucketSyncState::Stopped; } else { state = BucketSyncState::Init; } } void rgw_bucket_sync_status::decode_json(JSONObj *obj) { JSONDecoder::decode_json("state", state, obj); JSONDecoder::decode_json("full", full, obj); JSONDecoder::decode_json("incremental_gen", incremental_gen, obj); } void rgw_bucket_sync_status::dump(Formatter *f) const { encode_json("state", state, f); encode_json("full", full, f); encode_json("incremental_gen", incremental_gen, f); } void bilog_status_v2::dump(Formatter *f) const { encode_json("sync_status", sync_status, f); encode_json("inc_status", inc_status, f); } void bilog_status_v2::decode_json(JSONObj *obj) { JSONDecoder::decode_json("sync_status", sync_status, obj); JSONDecoder::decode_json("inc_status", inc_status, obj); }
249,851
35.437509
228
cc
null
ceph-main/src/rgw/driver/rados/rgw_data_sync.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab ft=cpp #pragma once #include <fmt/format.h> #include <fmt/ostream.h> #include "include/encoding.h" #include "common/ceph_json.h" #include "common/likely.h" #include "rgw_coroutine.h" #include "rgw_cr_rados.h" #include "rgw_http_client.h" #include "rgw_sal_rados.h" #include "rgw_datalog.h" #include "rgw_sync.h" #include "rgw_sync_module.h" #include "rgw_sync_trace.h" #include "rgw_sync_policy.h" #include "rgw_bucket_sync.h" #include "sync_fairness.h" // represents an obligation to sync an entry up a given time struct rgw_data_sync_obligation { rgw_bucket_shard bs; std::optional<uint64_t> gen; std::string marker; ceph::real_time timestamp; bool retry = false; }; inline std::ostream& operator<<(std::ostream& out, const rgw_data_sync_obligation& o) { out << "key=" << o.bs; if (o.gen) { out << '[' << *o.gen << ']'; } if (!o.marker.empty()) { out << " marker=" << o.marker; } if (o.timestamp != ceph::real_time{}) { out << " timestamp=" << o.timestamp; } if (o.retry) { out << " retry"; } return out; } class JSONObj; struct rgw_sync_bucket_pipe; struct rgw_bucket_sync_pair_info { RGWBucketSyncFlowManager::pipe_handler handler; /* responsible for sync filters */ rgw_bucket_shard source_bs; rgw_bucket dest_bucket; }; inline std::ostream& operator<<(std::ostream& out, const rgw_bucket_sync_pair_info& p) { if (p.source_bs.bucket == p.dest_bucket) { return out << p.source_bs; } return out << p.source_bs << "->" << p.dest_bucket; } struct rgw_bucket_sync_pipe { rgw_bucket_sync_pair_info info; RGWBucketInfo source_bucket_info; std::map<std::string, bufferlist> source_bucket_attrs; RGWBucketInfo dest_bucket_info; std::map<std::string, bufferlist> dest_bucket_attrs; RGWBucketSyncFlowManager::pipe_rules_ref& get_rules() { return info.handler.rules; } }; inline std::ostream& operator<<(std::ostream& out, const rgw_bucket_sync_pipe& p) { return out << p.info; } struct rgw_datalog_info { uint32_t num_shards; rgw_datalog_info() : num_shards(0) {} void decode_json(JSONObj *obj); }; struct rgw_data_sync_info { enum SyncState { StateInit = 0, StateBuildingFullSyncMaps = 1, StateSync = 2, }; uint16_t state; uint32_t num_shards; uint64_t instance_id{0}; void encode(bufferlist& bl) const { ENCODE_START(2, 1, bl); encode(state, bl); encode(num_shards, bl); encode(instance_id, bl); ENCODE_FINISH(bl); } void decode(bufferlist::const_iterator& bl) { DECODE_START(2, bl); decode(state, bl); decode(num_shards, bl); if (struct_v >= 2) { decode(instance_id, bl); } DECODE_FINISH(bl); } void dump(Formatter *f) const { std::string s; switch ((SyncState)state) { case StateInit: s = "init"; break; case StateBuildingFullSyncMaps: s = "building-full-sync-maps"; break; case StateSync: s = "sync"; break; default: s = "unknown"; break; } encode_json("status", s, f); encode_json("num_shards", num_shards, f); encode_json("instance_id", instance_id, f); } void decode_json(JSONObj *obj) { std::string s; JSONDecoder::decode_json("status", s, obj); if (s == "building-full-sync-maps") { state = StateBuildingFullSyncMaps; } else if (s == "sync") { state = StateSync; } else { state = StateInit; } JSONDecoder::decode_json("num_shards", num_shards, obj); JSONDecoder::decode_json("instance_id", instance_id, obj); } static void generate_test_instances(std::list<rgw_data_sync_info*>& o); rgw_data_sync_info() : state((int)StateInit), num_shards(0) {} }; WRITE_CLASS_ENCODER(rgw_data_sync_info) struct rgw_data_sync_marker { enum SyncState { FullSync = 0, IncrementalSync = 1, }; uint16_t state; std::string marker; std::string next_step_marker; uint64_t total_entries; uint64_t pos; real_time timestamp; rgw_data_sync_marker() : state(FullSync), total_entries(0), pos(0) {} void encode(bufferlist& bl) const { ENCODE_START(1, 1, bl); encode(state, bl); encode(marker, bl); encode(next_step_marker, bl); encode(total_entries, bl); encode(pos, bl); encode(timestamp, bl); ENCODE_FINISH(bl); } void decode(bufferlist::const_iterator& bl) { DECODE_START(1, bl); decode(state, bl); decode(marker, bl); decode(next_step_marker, bl); decode(total_entries, bl); decode(pos, bl); decode(timestamp, bl); DECODE_FINISH(bl); } void dump(Formatter *f) const { const char *s{nullptr}; switch ((SyncState)state) { case FullSync: s = "full-sync"; break; case IncrementalSync: s = "incremental-sync"; break; default: s = "unknown"; break; } encode_json("status", s, f); encode_json("marker", marker, f); encode_json("next_step_marker", next_step_marker, f); encode_json("total_entries", total_entries, f); encode_json("pos", pos, f); encode_json("timestamp", utime_t(timestamp), f); } void decode_json(JSONObj *obj) { std::string s; JSONDecoder::decode_json("status", s, obj); if (s == "full-sync") { state = FullSync; } else if (s == "incremental-sync") { state = IncrementalSync; } JSONDecoder::decode_json("marker", marker, obj); JSONDecoder::decode_json("next_step_marker", next_step_marker, obj); JSONDecoder::decode_json("total_entries", total_entries, obj); JSONDecoder::decode_json("pos", pos, obj); utime_t t; JSONDecoder::decode_json("timestamp", t, obj); timestamp = t.to_real_time(); } static void generate_test_instances(std::list<rgw_data_sync_marker*>& o); }; WRITE_CLASS_ENCODER(rgw_data_sync_marker) struct rgw_data_sync_status { rgw_data_sync_info sync_info; std::map<uint32_t, rgw_data_sync_marker> sync_markers; rgw_data_sync_status() {} void encode(bufferlist& bl) const { ENCODE_START(1, 1, bl); encode(sync_info, bl); /* sync markers are encoded separately */ ENCODE_FINISH(bl); } void decode(bufferlist::const_iterator& bl) { DECODE_START(1, bl); decode(sync_info, bl); /* sync markers are decoded separately */ DECODE_FINISH(bl); } void dump(Formatter *f) const { encode_json("info", sync_info, f); encode_json("markers", sync_markers, f); } void decode_json(JSONObj *obj) { JSONDecoder::decode_json("info", sync_info, obj); JSONDecoder::decode_json("markers", sync_markers, obj); } static void generate_test_instances(std::list<rgw_data_sync_status*>& o); }; WRITE_CLASS_ENCODER(rgw_data_sync_status) struct rgw_datalog_entry { std::string key; ceph::real_time timestamp; void decode_json(JSONObj *obj); }; struct rgw_datalog_shard_data { std::string marker; bool truncated; std::vector<rgw_datalog_entry> entries; void decode_json(JSONObj *obj); }; class RGWAsyncRadosProcessor; class RGWDataSyncControlCR; struct rgw_bucket_entry_owner { std::string id; std::string display_name; rgw_bucket_entry_owner() {} rgw_bucket_entry_owner(const std::string& _id, const std::string& _display_name) : id(_id), display_name(_display_name) {} void decode_json(JSONObj *obj); }; class RGWSyncErrorLogger; class RGWRESTConn; class RGWServices; struct RGWDataSyncEnv { const DoutPrefixProvider *dpp{nullptr}; CephContext *cct{nullptr}; rgw::sal::RadosStore* driver{nullptr}; RGWServices *svc{nullptr}; RGWAsyncRadosProcessor *async_rados{nullptr}; RGWHTTPManager *http_manager{nullptr}; RGWSyncErrorLogger *error_logger{nullptr}; RGWSyncTraceManager *sync_tracer{nullptr}; RGWSyncModuleInstanceRef sync_module{nullptr}; PerfCounters* counters{nullptr}; rgw::sync_fairness::BidManager* bid_manager{nullptr}; RGWDataSyncEnv() {} void init(const DoutPrefixProvider *_dpp, CephContext *_cct, rgw::sal::RadosStore* _driver, RGWServices *_svc, RGWAsyncRadosProcessor *_async_rados, RGWHTTPManager *_http_manager, RGWSyncErrorLogger *_error_logger, RGWSyncTraceManager *_sync_tracer, RGWSyncModuleInstanceRef& _sync_module, PerfCounters* _counters) { dpp = _dpp; cct = _cct; driver = _driver; svc = _svc; async_rados = _async_rados; http_manager = _http_manager; error_logger = _error_logger; sync_tracer = _sync_tracer; sync_module = _sync_module; counters = _counters; } std::string shard_obj_name(int shard_id); std::string status_oid(); std::ostream* ostr{nullptr}; // For pretty printing progress }; // pretty ostream output for `radosgw-admin bucket sync run` #if FMT_VERSION >= 90000 template<typename ...T> void pretty_print(const RGWDataSyncEnv* env, fmt::format_string<T...> fmt, T&& ...t) { #else template<typename S, typename ...T> void pretty_print(const RGWDataSyncEnv* env, const S& fmt, T&& ...t) { #endif if (unlikely(!!env->ostr)) { fmt::print(*env->ostr, fmt, std::forward<T>(t)...); env->ostr->flush(); } } /// \brief Adjust concurrency based on latency /// /// Keep a running average of operation latency and scale concurrency /// down when latency rises. class LatencyConcurrencyControl : public LatencyMonitor { static constexpr auto dout_subsys = ceph_subsys_rgw; ceph::coarse_mono_time last_warning; public: CephContext* cct; LatencyConcurrencyControl(CephContext* cct) : cct(cct) {} /// \brief Lower concurrency when latency rises /// /// Since we have multiple spawn windows (data sync overall and /// bucket), accept a number of concurrent operations to spawn and, /// if latency is high, cut it in half. If latency is really high, /// cut it to 1. int64_t adj_concurrency(int64_t concurrency) { using namespace std::literals; auto threshold = (cct->_conf->rgw_sync_lease_period * 1s) / 12; if (avg_latency() >= 2 * threshold) [[unlikely]] { auto now = ceph::coarse_mono_clock::now(); if (now - last_warning > 5min) { ldout(cct, -1) << "WARNING: The OSD cluster is overloaded and struggling to " << "complete ops. You need more capacity to serve this level " << "of demand." << dendl; last_warning = now; } return 1; } else if (avg_latency() >= threshold) [[unlikely]] { return concurrency / 2; } else [[likely]] { return concurrency; } } }; struct RGWDataSyncCtx { RGWDataSyncEnv *env{nullptr}; CephContext *cct{nullptr}; RGWRESTConn *conn{nullptr}; rgw_zone_id source_zone; LatencyConcurrencyControl lcc{nullptr}; RGWDataSyncCtx() = default; RGWDataSyncCtx(RGWDataSyncEnv* env, RGWRESTConn* conn, const rgw_zone_id& source_zone) : env(env), cct(env->cct), conn(conn), source_zone(source_zone), lcc(cct) {} void init(RGWDataSyncEnv *_env, RGWRESTConn *_conn, const rgw_zone_id& _source_zone) { cct = _env->cct; env = _env; conn = _conn; source_zone = _source_zone; lcc.cct = cct; } }; class RGWRados; class RGWRemoteDataLog : public RGWCoroutinesManager { const DoutPrefixProvider *dpp; rgw::sal::RadosStore* driver; CephContext *cct; RGWCoroutinesManagerRegistry *cr_registry; RGWAsyncRadosProcessor *async_rados; RGWHTTPManager http_manager; RGWDataSyncEnv sync_env; RGWDataSyncCtx sc; ceph::shared_mutex lock = ceph::make_shared_mutex("RGWRemoteDataLog::lock"); RGWDataSyncControlCR *data_sync_cr; RGWSyncTraceNodeRef tn; bool initialized; public: RGWRemoteDataLog(const DoutPrefixProvider *dpp, rgw::sal::RadosStore* _store, RGWAsyncRadosProcessor *async_rados); int init(const rgw_zone_id& _source_zone, RGWRESTConn *_conn, RGWSyncErrorLogger *_error_logger, RGWSyncTraceManager *_sync_tracer, RGWSyncModuleInstanceRef& module, PerfCounters* _counters); void finish(); int read_log_info(const DoutPrefixProvider *dpp, rgw_datalog_info *log_info); int read_source_log_shards_info(const DoutPrefixProvider *dpp, std::map<int, RGWDataChangesLogInfo> *shards_info); int read_source_log_shards_next(const DoutPrefixProvider *dpp, std::map<int, std::string> shard_markers, std::map<int, rgw_datalog_shard_data> *result); int read_sync_status(const DoutPrefixProvider *dpp, rgw_data_sync_status *sync_status); int read_recovering_shards(const DoutPrefixProvider *dpp, const int num_shards, std::set<int>& recovering_shards); int read_shard_status(const DoutPrefixProvider *dpp, int shard_id, std::set<std::string>& lagging_buckets,std::set<std::string>& recovering_buckets, rgw_data_sync_marker* sync_marker, const int max_entries); int init_sync_status(const DoutPrefixProvider *dpp, int num_shards); int run_sync(const DoutPrefixProvider *dpp, int num_shards); void wakeup(int shard_id, bc::flat_set<rgw_data_notify_entry>& entries); }; class RGWDataSyncStatusManager : public DoutPrefixProvider { rgw::sal::RadosStore* driver; rgw_zone_id source_zone; RGWRESTConn *conn; RGWSyncErrorLogger *error_logger; RGWSyncModuleInstanceRef sync_module; PerfCounters* counters; RGWRemoteDataLog source_log; std::string source_status_oid; std::string source_shard_status_oid_prefix; std::map<int, rgw_raw_obj> shard_objs; int num_shards; public: RGWDataSyncStatusManager(rgw::sal::RadosStore* _driver, RGWAsyncRadosProcessor *async_rados, const rgw_zone_id& _source_zone, PerfCounters* counters) : driver(_driver), source_zone(_source_zone), conn(NULL), error_logger(NULL), sync_module(nullptr), counters(counters), source_log(this, driver, async_rados), num_shards(0) {} RGWDataSyncStatusManager(rgw::sal::RadosStore* _driver, RGWAsyncRadosProcessor *async_rados, const rgw_zone_id& _source_zone, PerfCounters* counters, const RGWSyncModuleInstanceRef& _sync_module) : driver(_driver), source_zone(_source_zone), conn(NULL), error_logger(NULL), sync_module(_sync_module), counters(counters), source_log(this, driver, async_rados), num_shards(0) {} ~RGWDataSyncStatusManager() { finalize(); } int init(const DoutPrefixProvider *dpp); void finalize(); static std::string shard_obj_name(const rgw_zone_id& source_zone, int shard_id); static std::string sync_status_oid(const rgw_zone_id& source_zone); int read_sync_status(const DoutPrefixProvider *dpp, rgw_data_sync_status *sync_status) { return source_log.read_sync_status(dpp, sync_status); } int read_recovering_shards(const DoutPrefixProvider *dpp, const int num_shards, std::set<int>& recovering_shards) { return source_log.read_recovering_shards(dpp, num_shards, recovering_shards); } int read_shard_status(const DoutPrefixProvider *dpp, int shard_id, std::set<std::string>& lagging_buckets, std::set<std::string>& recovering_buckets, rgw_data_sync_marker *sync_marker, const int max_entries) { return source_log.read_shard_status(dpp, shard_id, lagging_buckets, recovering_buckets,sync_marker, max_entries); } int init_sync_status(const DoutPrefixProvider *dpp) { return source_log.init_sync_status(dpp, num_shards); } int read_log_info(const DoutPrefixProvider *dpp, rgw_datalog_info *log_info) { return source_log.read_log_info(dpp, log_info); } int read_source_log_shards_info(const DoutPrefixProvider *dpp, std::map<int, RGWDataChangesLogInfo> *shards_info) { return source_log.read_source_log_shards_info(dpp, shards_info); } int read_source_log_shards_next(const DoutPrefixProvider *dpp, std::map<int, std::string> shard_markers, std::map<int, rgw_datalog_shard_data> *result) { return source_log.read_source_log_shards_next(dpp, shard_markers, result); } int run(const DoutPrefixProvider *dpp) { return source_log.run_sync(dpp, num_shards); } void wakeup(int shard_id, bc::flat_set<rgw_data_notify_entry>& entries) { return source_log.wakeup(shard_id, entries); } void stop() { source_log.finish(); } // implements DoutPrefixProvider CephContext *get_cct() const override; unsigned get_subsys() const override; std::ostream& gen_prefix(std::ostream& out) const override; }; class RGWBucketPipeSyncStatusManager; class RGWBucketSyncCR; struct rgw_bucket_shard_full_sync_marker { rgw_obj_key position; uint64_t count; rgw_bucket_shard_full_sync_marker() : count(0) {} void encode_attr(std::map<std::string, bufferlist>& attrs); void encode(bufferlist& bl) const { ENCODE_START(1, 1, bl); encode(position, bl); encode(count, bl); ENCODE_FINISH(bl); } void decode(bufferlist::const_iterator& bl) { DECODE_START(1, bl); decode(position, bl); decode(count, bl); DECODE_FINISH(bl); } void dump(Formatter *f) const; void decode_json(JSONObj *obj); }; WRITE_CLASS_ENCODER(rgw_bucket_shard_full_sync_marker) struct rgw_bucket_shard_inc_sync_marker { std::string position; ceph::real_time timestamp; void encode_attr(std::map<std::string, bufferlist>& attrs); void encode(bufferlist& bl) const { ENCODE_START(2, 1, bl); encode(position, bl); encode(timestamp, bl); ENCODE_FINISH(bl); } void decode(bufferlist::const_iterator& bl) { DECODE_START(2, bl); decode(position, bl); if (struct_v >= 2) { decode(timestamp, bl); } DECODE_FINISH(bl); } void dump(Formatter *f) const; void decode_json(JSONObj *obj); }; WRITE_CLASS_ENCODER(rgw_bucket_shard_inc_sync_marker) struct rgw_bucket_shard_sync_info { enum SyncState { StateInit = 0, StateFullSync = 1, StateIncrementalSync = 2, StateStopped = 3, }; uint16_t state; rgw_bucket_shard_inc_sync_marker inc_marker; void decode_from_attrs(CephContext *cct, std::map<std::string, bufferlist>& attrs); void encode_all_attrs(std::map<std::string, bufferlist>& attrs); void encode_state_attr(std::map<std::string, bufferlist>& attrs); void encode(bufferlist& bl) const { ENCODE_START(2, 1, bl); encode(state, bl); encode(inc_marker, bl); ENCODE_FINISH(bl); } void decode(bufferlist::const_iterator& bl) { DECODE_START(2, bl); decode(state, bl); if (struct_v <= 1) { rgw_bucket_shard_full_sync_marker full_marker; decode(full_marker, bl); } decode(inc_marker, bl); DECODE_FINISH(bl); } void dump(Formatter *f) const; void decode_json(JSONObj *obj); rgw_bucket_shard_sync_info() : state((int)StateInit) {} }; WRITE_CLASS_ENCODER(rgw_bucket_shard_sync_info) struct rgw_bucket_full_sync_status { rgw_obj_key position; uint64_t count = 0; void encode(bufferlist& bl) const { ENCODE_START(1, 1, bl); encode(position, bl); encode(count, bl); ENCODE_FINISH(bl); } void decode(bufferlist::const_iterator& bl) { DECODE_START(1, bl); decode(position, bl); decode(count, bl); DECODE_FINISH(bl); } void dump(Formatter *f) const; void decode_json(JSONObj *obj); }; WRITE_CLASS_ENCODER(rgw_bucket_full_sync_status) enum class BucketSyncState : uint8_t { Init = 0, Full, Incremental, Stopped, }; inline std::ostream& operator<<(std::ostream& out, const BucketSyncState& s) { switch (s) { case BucketSyncState::Init: out << "init"; break; case BucketSyncState::Full: out << "full"; break; case BucketSyncState::Incremental: out << "incremental"; break; case BucketSyncState::Stopped: out << "stopped"; break; } return out; } void encode_json(const char *name, BucketSyncState state, Formatter *f); void decode_json_obj(BucketSyncState& state, JSONObj *obj); struct rgw_bucket_sync_status { BucketSyncState state = BucketSyncState::Init; rgw_bucket_full_sync_status full; uint64_t incremental_gen = 0; std::vector<bool> shards_done_with_gen; void encode(bufferlist& bl) const { ENCODE_START(2, 1, bl); encode(state, bl); encode(full, bl); encode(incremental_gen, bl); encode(shards_done_with_gen, bl); ENCODE_FINISH(bl); } void decode(bufferlist::const_iterator& bl) { DECODE_START(2, bl); decode(state, bl); decode(full, bl); if (struct_v > 1) { decode(incremental_gen, bl); decode(shards_done_with_gen, bl); } DECODE_FINISH(bl); } void dump(Formatter *f) const; void decode_json(JSONObj *obj); }; WRITE_CLASS_ENCODER(rgw_bucket_sync_status) struct bilog_status_v2 { rgw_bucket_sync_status sync_status; std::vector<rgw_bucket_shard_sync_info> inc_status; void dump(Formatter *f) const; void decode_json(JSONObj *obj); }; struct store_gen_shards { uint64_t gen = 0; uint32_t num_shards = 0; void dump(Formatter *f) const { encode_json("gen", gen, f); encode_json("num_shards", num_shards, f); } void decode_json(JSONObj *obj) { JSONDecoder::decode_json("gen", gen, obj); JSONDecoder::decode_json("num_shards", num_shards, obj); } }; struct rgw_bucket_index_marker_info { std::string bucket_ver; std::string master_ver; std::string max_marker; bool syncstopped{false}; uint64_t oldest_gen = 0; uint64_t latest_gen = 0; std::vector<store_gen_shards> generations; void decode_json(JSONObj *obj) { JSONDecoder::decode_json("bucket_ver", bucket_ver, obj); JSONDecoder::decode_json("master_ver", master_ver, obj); JSONDecoder::decode_json("max_marker", max_marker, obj); JSONDecoder::decode_json("syncstopped", syncstopped, obj); JSONDecoder::decode_json("oldest_gen", oldest_gen, obj); JSONDecoder::decode_json("latest_gen", latest_gen, obj); JSONDecoder::decode_json("generations", generations, obj); } }; class BucketIndexShardsManager; int rgw_read_remote_bilog_info(const DoutPrefixProvider *dpp, RGWRESTConn* conn, const rgw_bucket& bucket, rgw_bucket_index_marker_info& info, BucketIndexShardsManager& markers, optional_yield y); class RGWBucketPipeSyncStatusManager : public DoutPrefixProvider { rgw::sal::RadosStore* driver; RGWDataSyncEnv sync_env; RGWCoroutinesManager cr_mgr{driver->ctx(), driver->getRados()->get_cr_registry()}; RGWHTTPManager http_manager{driver->ctx(), cr_mgr.get_completion_mgr()}; std::optional<rgw_zone_id> source_zone; std::optional<rgw_bucket> source_bucket; std::unique_ptr<RGWSyncErrorLogger> error_logger = std::make_unique<RGWSyncErrorLogger>(driver, RGW_SYNC_ERROR_LOG_SHARD_PREFIX, ERROR_LOGGER_SHARDS); RGWSyncModuleInstanceRef sync_module; rgw_bucket dest_bucket; struct source { RGWDataSyncCtx sc; RGWBucketInfo info; rgw_bucket dest; RGWBucketSyncFlowManager::pipe_handler handler; std::string zone_name; source(RGWDataSyncEnv* env, const rgw_zone_id& zone, RGWRESTConn* conn, const RGWBucketInfo& info, const rgw_bucket& dest, const RGWBucketSyncFlowManager::pipe_handler& handler, const std::string& zone_name) : sc(env, conn, zone), info(info), dest(dest), handler(handler), zone_name(zone_name) {} }; std::vector<source> sources; int do_init(const DoutPrefixProvider *dpp, std::ostream* ostr); RGWBucketPipeSyncStatusManager(rgw::sal::RadosStore* driver, std::optional<rgw_zone_id> source_zone, std::optional<rgw_bucket> source_bucket, const rgw_bucket& dest_bucket) : driver(driver), source_zone(source_zone), source_bucket(source_bucket), dest_bucket(dest_bucket) {} int remote_info(const DoutPrefixProvider *dpp, source& s, uint64_t* oldest_gen, uint64_t* latest_gen, uint64_t* num_shards); public: static tl::expected<std::unique_ptr<RGWBucketPipeSyncStatusManager>, int> construct(const DoutPrefixProvider* dpp, rgw::sal::RadosStore* driver, std::optional<rgw_zone_id> source_zone, std::optional<rgw_bucket> source_bucket, const rgw_bucket& dest_bucket, std::ostream *ostream); ~RGWBucketPipeSyncStatusManager() = default; static std::string full_status_oid(const rgw_zone_id& source_zone, const rgw_bucket& source_bucket, const rgw_bucket& dest_bucket); static std::string inc_status_oid(const rgw_zone_id& source_zone, const rgw_bucket_sync_pair_info& bs, uint64_t gen); // specific source obj sync status, can be used by sync modules static std::string obj_status_oid(const rgw_bucket_sync_pipe& sync_pipe, const rgw_zone_id& source_zone, const rgw_obj& obj); // implements DoutPrefixProvider CephContext *get_cct() const override; unsigned get_subsys() const override; std::ostream& gen_prefix(std::ostream& out) const override; int init_sync_status(const DoutPrefixProvider *dpp); tl::expected<std::map<int, rgw_bucket_shard_sync_info>, int> read_sync_status( const DoutPrefixProvider *dpp); int run(const DoutPrefixProvider *dpp); }; /// read the full sync status with respect to a source bucket int rgw_read_bucket_full_sync_status(const DoutPrefixProvider *dpp, rgw::sal::RadosStore *driver, const rgw_sync_bucket_pipe& pipe, rgw_bucket_sync_status *status, optional_yield y); /// read the incremental sync status of all bucket shards from the given source zone int rgw_read_bucket_inc_sync_status(const DoutPrefixProvider *dpp, rgw::sal::RadosStore *driver, const rgw_sync_bucket_pipe& pipe, uint64_t gen, std::vector<rgw_bucket_shard_sync_info> *status); class RGWDefaultSyncModule : public RGWSyncModule { public: RGWDefaultSyncModule() {} bool supports_writes() override { return true; } bool supports_data_export() override { return true; } int create_instance(const DoutPrefixProvider *dpp, CephContext *cct, const JSONFormattable& config, RGWSyncModuleInstanceRef *instance) override; }; class RGWArchiveSyncModule : public RGWDefaultSyncModule { public: RGWArchiveSyncModule() {} bool supports_writes() override { return true; } bool supports_data_export() override { return false; } int create_instance(const DoutPrefixProvider *dpp, CephContext *cct, const JSONFormattable& config, RGWSyncModuleInstanceRef *instance) override; };
26,605
29.546498
211
h
null
ceph-main/src/rgw/driver/rados/rgw_datalog.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab ft=cpp #include <vector> #include "common/async/yield_context.h" #include "common/debug.h" #include "common/containers.h" #include "common/errno.h" #include "common/error_code.h" #include "common/async/blocked_completion.h" #include "common/async/librados_completion.h" #include "cls/fifo/cls_fifo_types.h" #include "cls/log/cls_log_client.h" #include "cls_fifo_legacy.h" #include "rgw_bucket_layout.h" #include "rgw_datalog.h" #include "rgw_log_backing.h" #include "rgw_tools.h" #define dout_context g_ceph_context static constexpr auto dout_subsys = ceph_subsys_rgw; namespace bs = boost::system; namespace lr = librados; using ceph::containers::tiny_vector; void rgw_data_change::dump(ceph::Formatter *f) const { std::string type; switch (entity_type) { case ENTITY_TYPE_BUCKET: type = "bucket"; break; default: type = "unknown"; } encode_json("entity_type", type, f); encode_json("key", key, f); utime_t ut(timestamp); encode_json("timestamp", ut, f); encode_json("gen", gen, f); } void rgw_data_change::decode_json(JSONObj *obj) { std::string s; JSONDecoder::decode_json("entity_type", s, obj); if (s == "bucket") { entity_type = ENTITY_TYPE_BUCKET; } else { entity_type = ENTITY_TYPE_UNKNOWN; } JSONDecoder::decode_json("key", key, obj); utime_t ut; JSONDecoder::decode_json("timestamp", ut, obj); timestamp = ut.to_real_time(); JSONDecoder::decode_json("gen", gen, obj); } void rgw_data_change::generate_test_instances(std::list<rgw_data_change *>& l) { l.push_back(new rgw_data_change{}); l.push_back(new rgw_data_change); l.back()->entity_type = ENTITY_TYPE_BUCKET; l.back()->key = "bucket_name"; l.back()->timestamp = ceph::real_clock::zero(); l.back()->gen = 0; } void rgw_data_change_log_entry::dump(Formatter *f) const { encode_json("log_id", log_id, f); utime_t ut(log_timestamp); encode_json("log_timestamp", ut, f); encode_json("entry", entry, f); } void rgw_data_change_log_entry::decode_json(JSONObj *obj) { JSONDecoder::decode_json("log_id", log_id, obj); utime_t ut; JSONDecoder::decode_json("log_timestamp", ut, obj); log_timestamp = ut.to_real_time(); JSONDecoder::decode_json("entry", entry, obj); } void rgw_data_notify_entry::dump(Formatter *f) const { encode_json("key", key, f); encode_json("gen", gen, f); } void rgw_data_notify_entry::decode_json(JSONObj *obj) { JSONDecoder::decode_json("key", key, obj); JSONDecoder::decode_json("gen", gen, obj); } class RGWDataChangesOmap final : public RGWDataChangesBE { using centries = std::list<cls_log_entry>; std::vector<std::string> oids; public: RGWDataChangesOmap(lr::IoCtx& ioctx, RGWDataChangesLog& datalog, uint64_t gen_id, int num_shards) : RGWDataChangesBE(ioctx, datalog, gen_id) { oids.reserve(num_shards); for (auto i = 0; i < num_shards; ++i) { oids.push_back(get_oid(i)); } } ~RGWDataChangesOmap() override = default; void prepare(ceph::real_time ut, const std::string& key, ceph::buffer::list&& entry, entries& out) override { if (!std::holds_alternative<centries>(out)) { ceph_assert(std::visit([](const auto& v) { return std::empty(v); }, out)); out = centries(); } cls_log_entry e; cls_log_add_prepare_entry(e, utime_t(ut), {}, key, entry); std::get<centries>(out).push_back(std::move(e)); } int push(const DoutPrefixProvider *dpp, int index, entries&& items, optional_yield y) override { lr::ObjectWriteOperation op; cls_log_add(op, std::get<centries>(items), true); auto r = rgw_rados_operate(dpp, ioctx, oids[index], &op, y); if (r < 0) { ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ": failed to push to " << oids[index] << cpp_strerror(-r) << dendl; } return r; } int push(const DoutPrefixProvider *dpp, int index, ceph::real_time now, const std::string& key, ceph::buffer::list&& bl, optional_yield y) override { lr::ObjectWriteOperation op; cls_log_add(op, utime_t(now), {}, key, bl); auto r = rgw_rados_operate(dpp, ioctx, oids[index], &op, y); if (r < 0) { ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ": failed to push to " << oids[index] << cpp_strerror(-r) << dendl; } return r; } int list(const DoutPrefixProvider *dpp, int index, int max_entries, std::vector<rgw_data_change_log_entry>& entries, std::optional<std::string_view> marker, std::string* out_marker, bool* truncated, optional_yield y) override { std::list<cls_log_entry> log_entries; lr::ObjectReadOperation op; cls_log_list(op, {}, {}, std::string(marker.value_or("")), max_entries, log_entries, out_marker, truncated); auto r = rgw_rados_operate(dpp, ioctx, oids[index], &op, nullptr, y); if (r == -ENOENT) { *truncated = false; return 0; } if (r < 0) { ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ": failed to list " << oids[index] << cpp_strerror(-r) << dendl; return r; } for (auto iter = log_entries.begin(); iter != log_entries.end(); ++iter) { rgw_data_change_log_entry log_entry; log_entry.log_id = iter->id; auto rt = iter->timestamp.to_real_time(); log_entry.log_timestamp = rt; auto liter = iter->data.cbegin(); try { decode(log_entry.entry, liter); } catch (ceph::buffer::error& err) { ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ": failed to decode data changes log entry: " << err.what() << dendl; return -EIO; } entries.push_back(log_entry); } return 0; } int get_info(const DoutPrefixProvider *dpp, int index, RGWDataChangesLogInfo *info, optional_yield y) override { cls_log_header header; lr::ObjectReadOperation op; cls_log_info(op, &header); auto r = rgw_rados_operate(dpp, ioctx, oids[index], &op, nullptr, y); if (r == -ENOENT) r = 0; if (r < 0) { ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ": failed to get info from " << oids[index] << cpp_strerror(-r) << dendl; } else { info->marker = header.max_marker; info->last_update = header.max_time.to_real_time(); } return r; } int trim(const DoutPrefixProvider *dpp, int index, std::string_view marker, optional_yield y) override { lr::ObjectWriteOperation op; cls_log_trim(op, {}, {}, {}, std::string(marker)); auto r = rgw_rados_operate(dpp, ioctx, oids[index], &op, y); if (r == -ENOENT) r = -ENODATA; if (r < 0 && r != -ENODATA) { ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ": failed to get info from " << oids[index] << cpp_strerror(-r) << dendl; } return r; } int trim(const DoutPrefixProvider *dpp, int index, std::string_view marker, lr::AioCompletion* c) override { lr::ObjectWriteOperation op; cls_log_trim(op, {}, {}, {}, std::string(marker)); auto r = ioctx.aio_operate(oids[index], c, &op, 0); if (r == -ENOENT) r = -ENODATA; if (r < 0) { ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ": failed to get info from " << oids[index] << cpp_strerror(-r) << dendl; } return r; } std::string_view max_marker() const override { return "99999999"; } int is_empty(const DoutPrefixProvider *dpp, optional_yield y) override { for (auto shard = 0u; shard < oids.size(); ++shard) { std::list<cls_log_entry> log_entries; lr::ObjectReadOperation op; std::string out_marker; bool truncated; cls_log_list(op, {}, {}, {}, 1, log_entries, &out_marker, &truncated); auto r = rgw_rados_operate(dpp, ioctx, oids[shard], &op, nullptr, y); if (r == -ENOENT) { continue; } if (r < 0) { ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ": failed to list " << oids[shard] << cpp_strerror(-r) << dendl; return r; } if (!log_entries.empty()) { return 0; } } return 1; } }; class RGWDataChangesFIFO final : public RGWDataChangesBE { using centries = std::vector<ceph::buffer::list>; tiny_vector<LazyFIFO> fifos; public: RGWDataChangesFIFO(lr::IoCtx& ioctx, RGWDataChangesLog& datalog, uint64_t gen_id, int shards) : RGWDataChangesBE(ioctx, datalog, gen_id), fifos(shards, [&ioctx, this](std::size_t i, auto emplacer) { emplacer.emplace(ioctx, get_oid(i)); }) {} ~RGWDataChangesFIFO() override = default; void prepare(ceph::real_time, const std::string&, ceph::buffer::list&& entry, entries& out) override { if (!std::holds_alternative<centries>(out)) { ceph_assert(std::visit([](auto& v) { return std::empty(v); }, out)); out = centries(); } std::get<centries>(out).push_back(std::move(entry)); } int push(const DoutPrefixProvider *dpp, int index, entries&& items, optional_yield y) override { auto r = fifos[index].push(dpp, std::get<centries>(items), y); if (r < 0) { ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ": unable to push to FIFO: " << get_oid(index) << ": " << cpp_strerror(-r) << dendl; } return r; } int push(const DoutPrefixProvider *dpp, int index, ceph::real_time, const std::string&, ceph::buffer::list&& bl, optional_yield y) override { auto r = fifos[index].push(dpp, std::move(bl), y); if (r < 0) { ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ": unable to push to FIFO: " << get_oid(index) << ": " << cpp_strerror(-r) << dendl; } return r; } int list(const DoutPrefixProvider *dpp, int index, int max_entries, std::vector<rgw_data_change_log_entry>& entries, std::optional<std::string_view> marker, std::string* out_marker, bool* truncated, optional_yield y) override { std::vector<rgw::cls::fifo::list_entry> log_entries; bool more = false; auto r = fifos[index].list(dpp, max_entries, marker, &log_entries, &more, y); if (r < 0) { ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ": unable to list FIFO: " << get_oid(index) << ": " << cpp_strerror(-r) << dendl; return r; } for (const auto& entry : log_entries) { rgw_data_change_log_entry log_entry; log_entry.log_id = entry.marker; log_entry.log_timestamp = entry.mtime; auto liter = entry.data.cbegin(); try { decode(log_entry.entry, liter); } catch (const buffer::error& err) { ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ": failed to decode data changes log entry: " << err.what() << dendl; return -EIO; } entries.push_back(std::move(log_entry)); } if (truncated) *truncated = more; if (out_marker && !log_entries.empty()) { *out_marker = log_entries.back().marker; } return 0; } int get_info(const DoutPrefixProvider *dpp, int index, RGWDataChangesLogInfo *info, optional_yield y) override { auto& fifo = fifos[index]; auto r = fifo.read_meta(dpp, y); if (r < 0) { ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ": unable to get FIFO metadata: " << get_oid(index) << ": " << cpp_strerror(-r) << dendl; return r; } rados::cls::fifo::info m; fifo.meta(dpp, m, y); auto p = m.head_part_num; if (p < 0) { info->marker = ""; info->last_update = ceph::real_clock::zero(); return 0; } rgw::cls::fifo::part_info h; r = fifo.get_part_info(dpp, p, &h, y); if (r < 0) { ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ": unable to get part info: " << get_oid(index) << "/" << p << ": " << cpp_strerror(-r) << dendl; return r; } info->marker = rgw::cls::fifo::marker{p, h.last_ofs}.to_string(); info->last_update = h.max_time; return 0; } int trim(const DoutPrefixProvider *dpp, int index, std::string_view marker, optional_yield y) override { auto r = fifos[index].trim(dpp, marker, false, y); if (r < 0) { ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ": unable to trim FIFO: " << get_oid(index) << ": " << cpp_strerror(-r) << dendl; } return r; } int trim(const DoutPrefixProvider *dpp, int index, std::string_view marker, librados::AioCompletion* c) override { int r = 0; if (marker == rgw::cls::fifo::marker(0, 0).to_string()) { rgw_complete_aio_completion(c, -ENODATA); } else { // This null_yield is used for lazily opening FIFOs. // // shouldn't exist, but it can't be eliminated // since your caller is an RGWCoroutine in the data sync code. // // It can be eliminated after Reef when we can get rid of // AioCompletion entirely. fifos[index].trim(dpp, marker, false, c, null_yield); } return r; } std::string_view max_marker() const override { static const std::string mm = rgw::cls::fifo::marker::max().to_string(); return std::string_view(mm); } int is_empty(const DoutPrefixProvider *dpp, optional_yield y) override { std::vector<rgw::cls::fifo::list_entry> log_entries; bool more = false; for (auto shard = 0u; shard < fifos.size(); ++shard) { auto r = fifos[shard].list(dpp, 1, {}, &log_entries, &more, y); if (r < 0) { ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ": unable to list FIFO: " << get_oid(shard) << ": " << cpp_strerror(-r) << dendl; return r; } if (!log_entries.empty()) { return 0; } } return 1; } }; RGWDataChangesLog::RGWDataChangesLog(CephContext* cct) : cct(cct), num_shards(cct->_conf->rgw_data_log_num_shards), prefix(get_prefix()), changes(cct->_conf->rgw_data_log_changes_size) {} bs::error_code DataLogBackends::handle_init(entries_t e) noexcept { std::unique_lock l(m); for (const auto& [gen_id, gen] : e) { if (gen.pruned) { lderr(datalog.cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ << ": ERROR: given empty generation: gen_id=" << gen_id << dendl; } if (count(gen_id) != 0) { lderr(datalog.cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ << ": ERROR: generation already exists: gen_id=" << gen_id << dendl; } try { switch (gen.type) { case log_type::omap: emplace(gen_id, boost::intrusive_ptr<RGWDataChangesBE>(new RGWDataChangesOmap(ioctx, datalog, gen_id, shards)) ); break; case log_type::fifo: emplace(gen_id, boost::intrusive_ptr<RGWDataChangesBE>(new RGWDataChangesFIFO(ioctx, datalog, gen_id, shards)) ); break; default: lderr(datalog.cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ << ": IMPOSSIBLE: invalid log type: gen_id=" << gen_id << ", type" << gen.type << dendl; return bs::error_code(EFAULT, bs::system_category()); } } catch (const bs::system_error& err) { lderr(datalog.cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ << ": error setting up backend: gen_id=" << gen_id << ", err=" << err.what() << dendl; return err.code(); } } return {}; } bs::error_code DataLogBackends::handle_new_gens(entries_t e) noexcept { return handle_init(std::move(e)); } bs::error_code DataLogBackends::handle_empty_to(uint64_t new_tail) noexcept { std::unique_lock l(m); auto i = cbegin(); if (i->first < new_tail) { return {}; } if (new_tail >= (cend() - 1)->first) { lderr(datalog.cct) << __PRETTY_FUNCTION__ << ":" << __LINE__ << ": ERROR: attempt to trim head: new_tail=" << new_tail << dendl; return bs::error_code(EFAULT, bs::system_category()); } erase(i, upper_bound(new_tail)); return {}; } int RGWDataChangesLog::start(const DoutPrefixProvider *dpp, const RGWZone* _zone, const RGWZoneParams& zoneparams, librados::Rados* lr) { zone = _zone; ceph_assert(zone); auto defbacking = to_log_type( cct->_conf.get_val<std::string>("rgw_default_data_log_backing")); // Should be guaranteed by `set_enum_allowed` ceph_assert(defbacking); auto log_pool = zoneparams.log_pool; auto r = rgw_init_ioctx(dpp, lr, log_pool, ioctx, true, false); if (r < 0) { ldpp_dout(dpp, -1) << __PRETTY_FUNCTION__ << ": Failed to initialized ioctx, r=" << r << ", pool=" << log_pool << dendl; return -r; } // This null_yield is in startup code, so it doesn't matter that much. auto besr = logback_generations::init<DataLogBackends>( dpp, ioctx, metadata_log_oid(), [this](uint64_t gen_id, int shard) { return get_oid(gen_id, shard); }, num_shards, *defbacking, null_yield, *this); if (!besr) { lderr(cct) << __PRETTY_FUNCTION__ << ": Error initializing backends: " << besr.error().message() << dendl; return ceph::from_error_code(besr.error()); } bes = std::move(*besr); renew_thread = make_named_thread("rgw_dt_lg_renew", &RGWDataChangesLog::renew_run, this); return 0; } int RGWDataChangesLog::choose_oid(const rgw_bucket_shard& bs) { const auto& name = bs.bucket.name; auto shard_shift = (bs.shard_id > 0 ? bs.shard_id : 0); auto r = (ceph_str_hash_linux(name.data(), name.size()) + shard_shift) % num_shards; return static_cast<int>(r); } int RGWDataChangesLog::renew_entries(const DoutPrefixProvider *dpp) { if (!zone->log_data) return 0; /* we can't keep the bucket name as part of the cls_log_entry, and we need * it later, so we keep two lists under the map */ bc::flat_map<int, std::pair<std::vector<BucketGen>, RGWDataChangesBE::entries>> m; std::unique_lock l(lock); decltype(cur_cycle) entries; entries.swap(cur_cycle); l.unlock(); auto ut = real_clock::now(); auto be = bes->head(); for (const auto& [bs, gen] : entries) { auto index = choose_oid(bs); rgw_data_change change; bufferlist bl; change.entity_type = ENTITY_TYPE_BUCKET; change.key = bs.get_key(); change.timestamp = ut; change.gen = gen; encode(change, bl); m[index].first.push_back({bs, gen}); be->prepare(ut, change.key, std::move(bl), m[index].second); } for (auto& [index, p] : m) { auto& [buckets, entries] = p; auto now = real_clock::now(); // This null_yield can stay (for now) as we're in our own thread. auto ret = be->push(dpp, index, std::move(entries), null_yield); if (ret < 0) { /* we don't really need to have a special handling for failed cases here, * as this is just an optimization. */ ldpp_dout(dpp, -1) << "ERROR: svc.cls->timelog.add() returned " << ret << dendl; return ret; } auto expiration = now; expiration += ceph::make_timespan(cct->_conf->rgw_data_log_window); for (auto& [bs, gen] : buckets) { update_renewed(bs, gen, expiration); } } return 0; } auto RGWDataChangesLog::_get_change(const rgw_bucket_shard& bs, uint64_t gen) -> ChangeStatusPtr { ceph_assert(ceph_mutex_is_locked(lock)); ChangeStatusPtr status; if (!changes.find({bs, gen}, status)) { status = std::make_shared<ChangeStatus>(); changes.add({bs, gen}, status); } return status; } void RGWDataChangesLog::register_renew(const rgw_bucket_shard& bs, const rgw::bucket_log_layout_generation& gen) { std::scoped_lock l{lock}; cur_cycle.insert({bs, gen.gen}); } void RGWDataChangesLog::update_renewed(const rgw_bucket_shard& bs, uint64_t gen, real_time expiration) { std::unique_lock l{lock}; auto status = _get_change(bs, gen); l.unlock(); ldout(cct, 20) << "RGWDataChangesLog::update_renewd() bucket_name=" << bs.bucket.name << " shard_id=" << bs.shard_id << " expiration=" << expiration << dendl; std::unique_lock sl(status->lock); status->cur_expiration = expiration; } int RGWDataChangesLog::get_log_shard_id(rgw_bucket& bucket, int shard_id) { rgw_bucket_shard bs(bucket, shard_id); return choose_oid(bs); } bool RGWDataChangesLog::filter_bucket(const DoutPrefixProvider *dpp, const rgw_bucket& bucket, optional_yield y) const { if (!bucket_filter) { return true; } return bucket_filter(bucket, y, dpp); } std::string RGWDataChangesLog::get_oid(uint64_t gen_id, int i) const { return (gen_id > 0 ? fmt::format("{}@G{}.{}", prefix, gen_id, i) : fmt::format("{}.{}", prefix, i)); } int RGWDataChangesLog::add_entry(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, const rgw::bucket_log_layout_generation& gen, int shard_id, optional_yield y) { if (!zone->log_data) { return 0; } auto& bucket = bucket_info.bucket; if (!filter_bucket(dpp, bucket, y)) { return 0; } if (observer) { observer->on_bucket_changed(bucket.get_key()); } rgw_bucket_shard bs(bucket, shard_id); int index = choose_oid(bs); mark_modified(index, bs, gen.gen); std::unique_lock l(lock); auto status = _get_change(bs, gen.gen); l.unlock(); auto now = real_clock::now(); std::unique_lock sl(status->lock); ldpp_dout(dpp, 20) << "RGWDataChangesLog::add_entry() bucket.name=" << bucket.name << " shard_id=" << shard_id << " now=" << now << " cur_expiration=" << status->cur_expiration << dendl; if (now < status->cur_expiration) { /* no need to send, recently completed */ sl.unlock(); register_renew(bs, gen); return 0; } RefCountedCond* cond; if (status->pending) { cond = status->cond; ceph_assert(cond); status->cond->get(); sl.unlock(); int ret = cond->wait(); cond->put(); if (!ret) { register_renew(bs, gen); } return ret; } status->cond = new RefCountedCond; status->pending = true; ceph::real_time expiration; int ret; do { status->cur_sent = now; expiration = now; expiration += ceph::make_timespan(cct->_conf->rgw_data_log_window); sl.unlock(); ceph::buffer::list bl; rgw_data_change change; change.entity_type = ENTITY_TYPE_BUCKET; change.key = bs.get_key(); change.timestamp = now; change.gen = gen.gen; encode(change, bl); ldpp_dout(dpp, 20) << "RGWDataChangesLog::add_entry() sending update with now=" << now << " cur_expiration=" << expiration << dendl; auto be = bes->head(); ret = be->push(dpp, index, now, change.key, std::move(bl), y); now = real_clock::now(); sl.lock(); } while (!ret && real_clock::now() > expiration); cond = status->cond; status->pending = false; /* time of when operation started, not completed */ status->cur_expiration = status->cur_sent; status->cur_expiration += make_timespan(cct->_conf->rgw_data_log_window); status->cond = nullptr; sl.unlock(); cond->done(ret); cond->put(); return ret; } int DataLogBackends::list(const DoutPrefixProvider *dpp, int shard, int max_entries, std::vector<rgw_data_change_log_entry>& entries, std::string_view marker, std::string* out_marker, bool* truncated, optional_yield y) { const auto [start_id, start_cursor] = cursorgen(marker); auto gen_id = start_id; std::string out_cursor; while (max_entries > 0) { std::vector<rgw_data_change_log_entry> gentries; std::unique_lock l(m); auto i = lower_bound(gen_id); if (i == end()) return 0; auto be = i->second; l.unlock(); gen_id = be->gen_id; auto r = be->list(dpp, shard, max_entries, gentries, gen_id == start_id ? start_cursor : std::string{}, &out_cursor, truncated, y); if (r < 0) return r; if (out_marker && !out_cursor.empty()) { *out_marker = gencursor(gen_id, out_cursor); } for (auto& g : gentries) { g.log_id = gencursor(gen_id, g.log_id); } if (int s = gentries.size(); s < 0 || s > max_entries) max_entries = 0; else max_entries -= gentries.size(); std::move(gentries.begin(), gentries.end(), std::back_inserter(entries)); ++gen_id; } return 0; } int RGWDataChangesLog::list_entries(const DoutPrefixProvider *dpp, int shard, int max_entries, std::vector<rgw_data_change_log_entry>& entries, std::string_view marker, std::string* out_marker, bool* truncated, optional_yield y) { assert(shard < num_shards); return bes->list(dpp, shard, max_entries, entries, marker, out_marker, truncated, y); } int RGWDataChangesLog::list_entries(const DoutPrefixProvider *dpp, int max_entries, std::vector<rgw_data_change_log_entry>& entries, LogMarker& marker, bool *ptruncated, optional_yield y) { bool truncated; entries.clear(); for (; marker.shard < num_shards && int(entries.size()) < max_entries; marker.shard++, marker.marker.clear()) { int ret = list_entries(dpp, marker.shard, max_entries - entries.size(), entries, marker.marker, NULL, &truncated, y); if (ret == -ENOENT) { continue; } if (ret < 0) { return ret; } if (!truncated) { *ptruncated = false; return 0; } } *ptruncated = (marker.shard < num_shards); return 0; } int RGWDataChangesLog::get_info(const DoutPrefixProvider *dpp, int shard_id, RGWDataChangesLogInfo *info, optional_yield y) { assert(shard_id < num_shards); auto be = bes->head(); auto r = be->get_info(dpp, shard_id, info, y); if (!info->marker.empty()) { info->marker = gencursor(be->gen_id, info->marker); } return r; } int DataLogBackends::trim_entries(const DoutPrefixProvider *dpp, int shard_id, std::string_view marker, optional_yield y) { auto [target_gen, cursor] = cursorgen(marker); std::unique_lock l(m); const auto head_gen = (end() - 1)->second->gen_id; const auto tail_gen = begin()->first; if (target_gen < tail_gen) return 0; auto r = 0; for (auto be = lower_bound(0)->second; be->gen_id <= target_gen && be->gen_id <= head_gen && r >= 0; be = upper_bound(be->gen_id)->second) { l.unlock(); auto c = be->gen_id == target_gen ? cursor : be->max_marker(); r = be->trim(dpp, shard_id, c, y); if (r == -ENOENT) r = -ENODATA; if (r == -ENODATA && be->gen_id < target_gen) r = 0; if (be->gen_id == target_gen) break; l.lock(); }; return r; } int RGWDataChangesLog::trim_entries(const DoutPrefixProvider *dpp, int shard_id, std::string_view marker, optional_yield y) { assert(shard_id < num_shards); return bes->trim_entries(dpp, shard_id, marker, y); } class GenTrim : public rgw::cls::fifo::Completion<GenTrim> { public: DataLogBackends* const bes; const int shard_id; const uint64_t target_gen; const std::string cursor; const uint64_t head_gen; const uint64_t tail_gen; boost::intrusive_ptr<RGWDataChangesBE> be; GenTrim(const DoutPrefixProvider *dpp, DataLogBackends* bes, int shard_id, uint64_t target_gen, std::string cursor, uint64_t head_gen, uint64_t tail_gen, boost::intrusive_ptr<RGWDataChangesBE> be, lr::AioCompletion* super) : Completion(dpp, super), bes(bes), shard_id(shard_id), target_gen(target_gen), cursor(std::move(cursor)), head_gen(head_gen), tail_gen(tail_gen), be(std::move(be)) {} void handle(const DoutPrefixProvider *dpp, Ptr&& p, int r) { auto gen_id = be->gen_id; be.reset(); if (r == -ENOENT) r = -ENODATA; if (r == -ENODATA && gen_id < target_gen) r = 0; if (r < 0) { complete(std::move(p), r); return; } { std::unique_lock l(bes->m); auto i = bes->upper_bound(gen_id); if (i == bes->end() || i->first > target_gen || i->first > head_gen) { l.unlock(); complete(std::move(p), -ENODATA); return; } be = i->second; } auto c = be->gen_id == target_gen ? cursor : be->max_marker(); be->trim(dpp, shard_id, c, call(std::move(p))); } }; void DataLogBackends::trim_entries(const DoutPrefixProvider *dpp, int shard_id, std::string_view marker, librados::AioCompletion* c) { auto [target_gen, cursor] = cursorgen(marker); std::unique_lock l(m); const auto head_gen = (end() - 1)->second->gen_id; const auto tail_gen = begin()->first; if (target_gen < tail_gen) { l.unlock(); rgw_complete_aio_completion(c, -ENODATA); return; } auto be = begin()->second; l.unlock(); auto gt = std::make_unique<GenTrim>(dpp, this, shard_id, target_gen, std::string(cursor), head_gen, tail_gen, be, c); auto cc = be->gen_id == target_gen ? cursor : be->max_marker(); be->trim(dpp, shard_id, cc, GenTrim::call(std::move(gt))); } int DataLogBackends::trim_generations(const DoutPrefixProvider *dpp, std::optional<uint64_t>& through, optional_yield y) { if (size() != 1) { std::vector<mapped_type> candidates; { std::scoped_lock l(m); auto e = cend() - 1; for (auto i = cbegin(); i < e; ++i) { candidates.push_back(i->second); } } std::optional<uint64_t> highest; for (auto& be : candidates) { auto r = be->is_empty(dpp, y); if (r < 0) { return r; } else if (r == 1) { highest = be->gen_id; } else { break; } } through = highest; if (!highest) { return 0; } auto ec = empty_to(dpp, *highest, y); if (ec) { return ceph::from_error_code(ec); } } return ceph::from_error_code(remove_empty(dpp, y)); } int RGWDataChangesLog::trim_entries(const DoutPrefixProvider *dpp, int shard_id, std::string_view marker, librados::AioCompletion* c) { assert(shard_id < num_shards); bes->trim_entries(dpp, shard_id, marker, c); return 0; } bool RGWDataChangesLog::going_down() const { return down_flag; } RGWDataChangesLog::~RGWDataChangesLog() { down_flag = true; if (renew_thread.joinable()) { renew_stop(); renew_thread.join(); } } void RGWDataChangesLog::renew_run() noexcept { static constexpr auto runs_per_prune = 150; auto run = 0; for (;;) { const DoutPrefix dp(cct, dout_subsys, "rgw data changes log: "); ldpp_dout(&dp, 2) << "RGWDataChangesLog::ChangesRenewThread: start" << dendl; int r = renew_entries(&dp); if (r < 0) { ldpp_dout(&dp, 0) << "ERROR: RGWDataChangesLog::renew_entries returned error r=" << r << dendl; } if (going_down()) break; if (run == runs_per_prune) { std::optional<uint64_t> through; ldpp_dout(&dp, 2) << "RGWDataChangesLog::ChangesRenewThread: pruning old generations" << dendl; // This null_yield can stay, for now, as it's in its own thread. trim_generations(&dp, through, null_yield); if (r < 0) { derr << "RGWDataChangesLog::ChangesRenewThread: failed pruning r=" << r << dendl; } else if (through) { ldpp_dout(&dp, 2) << "RGWDataChangesLog::ChangesRenewThread: pruned generations " << "through " << *through << "." << dendl; } else { ldpp_dout(&dp, 2) << "RGWDataChangesLog::ChangesRenewThread: nothing to prune." << dendl; } run = 0; } else { ++run; } int interval = cct->_conf->rgw_data_log_window * 3 / 4; std::unique_lock locker{renew_lock}; renew_cond.wait_for(locker, std::chrono::seconds(interval)); } } void RGWDataChangesLog::renew_stop() { std::lock_guard l{renew_lock}; renew_cond.notify_all(); } void RGWDataChangesLog::mark_modified(int shard_id, const rgw_bucket_shard& bs, uint64_t gen) { if (!cct->_conf->rgw_data_notify_interval_msec) { return; } auto key = bs.get_key(); { std::shared_lock rl{modified_lock}; // read lock to check for existence auto shard = modified_shards.find(shard_id); if (shard != modified_shards.end() && shard->second.count({key, gen})) { return; } } std::unique_lock wl{modified_lock}; // write lock for insertion modified_shards[shard_id].insert(rgw_data_notify_entry{key, gen}); } std::string RGWDataChangesLog::max_marker() const { return gencursor(std::numeric_limits<uint64_t>::max(), "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~"); } int RGWDataChangesLog::change_format(const DoutPrefixProvider *dpp, log_type type, optional_yield y) { return ceph::from_error_code(bes->new_backing(dpp, type, y)); } int RGWDataChangesLog::trim_generations(const DoutPrefixProvider *dpp, std::optional<uint64_t>& through, optional_yield y) { return bes->trim_generations(dpp, through, y); } void RGWDataChangesLogInfo::dump(Formatter *f) const { encode_json("marker", marker, f); utime_t ut(last_update); encode_json("last_update", ut, f); } void RGWDataChangesLogInfo::decode_json(JSONObj *obj) { JSONDecoder::decode_json("marker", marker, obj); utime_t ut; JSONDecoder::decode_json("last_update", ut, obj); last_update = ut.to_real_time(); }
32,702
28.515343
136
cc
null
ceph-main/src/rgw/driver/rados/rgw_datalog.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab ft=cpp #pragma once #include <cstdint> #include <list> #include <memory> #include <string> #include <string_view> #include <variant> #include <vector> #include <boost/container/flat_map.hpp> #include <boost/container/flat_set.hpp> #include <boost/smart_ptr/intrusive_ptr.hpp> #include <boost/smart_ptr/intrusive_ref_counter.hpp> #include <fmt/format.h> #include "common/async/yield_context.h" #include "include/buffer.h" #include "include/encoding.h" #include "include/function2.hpp" #include "include/rados/librados.hpp" #include "common/ceph_context.h" #include "common/ceph_json.h" #include "common/ceph_time.h" #include "common/Formatter.h" #include "common/lru_map.h" #include "common/RefCountedObj.h" #include "cls/log/cls_log_types.h" #include "rgw_basic_types.h" #include "rgw_log_backing.h" #include "rgw_sync_policy.h" #include "rgw_zone.h" #include "rgw_trim_bilog.h" namespace bc = boost::container; enum DataLogEntityType { ENTITY_TYPE_UNKNOWN = 0, ENTITY_TYPE_BUCKET = 1, }; struct rgw_data_change { DataLogEntityType entity_type; std::string key; ceph::real_time timestamp; uint64_t gen = 0; void encode(ceph::buffer::list& bl) const { // require decoders to recognize v2 when gen>0 const uint8_t compat = (gen == 0) ? 1 : 2; ENCODE_START(2, compat, bl); auto t = std::uint8_t(entity_type); encode(t, bl); encode(key, bl); encode(timestamp, bl); encode(gen, bl); ENCODE_FINISH(bl); } void decode(bufferlist::const_iterator& bl) { DECODE_START(2, bl); std::uint8_t t; decode(t, bl); entity_type = DataLogEntityType(t); decode(key, bl); decode(timestamp, bl); if (struct_v < 2) { gen = 0; } else { decode(gen, bl); } DECODE_FINISH(bl); } void dump(ceph::Formatter* f) const; void decode_json(JSONObj* obj); static void generate_test_instances(std::list<rgw_data_change *>& l); }; WRITE_CLASS_ENCODER(rgw_data_change) struct rgw_data_change_log_entry { std::string log_id; ceph::real_time log_timestamp; rgw_data_change entry; void encode(ceph::buffer::list& bl) const { ENCODE_START(1, 1, bl); encode(log_id, bl); encode(log_timestamp, bl); encode(entry, bl); ENCODE_FINISH(bl); } void decode(ceph::buffer::list::const_iterator& bl) { DECODE_START(1, bl); decode(log_id, bl); decode(log_timestamp, bl); decode(entry, bl); DECODE_FINISH(bl); } void dump(ceph::Formatter* f) const; void decode_json(JSONObj* obj); }; WRITE_CLASS_ENCODER(rgw_data_change_log_entry) struct RGWDataChangesLogInfo { std::string marker; ceph::real_time last_update; void dump(ceph::Formatter* f) const; void decode_json(JSONObj* obj); }; struct RGWDataChangesLogMarker { int shard = 0; std::string marker; RGWDataChangesLogMarker() = default; }; class RGWDataChangesLog; struct rgw_data_notify_entry { std::string key; uint64_t gen = 0; void dump(ceph::Formatter* f) const; void decode_json(JSONObj* obj); rgw_data_notify_entry& operator=(const rgw_data_notify_entry&) = default; bool operator <(const rgw_data_notify_entry& d) const { if (key < d.key) { return true; } if (d.key < key) { return false; } return gen < d.gen; } friend std::ostream& operator <<(std::ostream& m, const rgw_data_notify_entry& e) { return m << "[key: " << e.key << ", gen: " << e.gen << "]"; } }; class RGWDataChangesBE; class DataLogBackends final : public logback_generations, private bc::flat_map<uint64_t, boost::intrusive_ptr<RGWDataChangesBE>> { friend class logback_generations; friend class GenTrim; std::mutex m; RGWDataChangesLog& datalog; DataLogBackends(librados::IoCtx& ioctx, std::string oid, fu2::unique_function<std::string( uint64_t, int) const>&& get_oid, int shards, RGWDataChangesLog& datalog) noexcept : logback_generations(ioctx, oid, std::move(get_oid), shards), datalog(datalog) {} public: boost::intrusive_ptr<RGWDataChangesBE> head() { std::unique_lock l(m); auto i = end(); --i; return i->second; } int list(const DoutPrefixProvider *dpp, int shard, int max_entries, std::vector<rgw_data_change_log_entry>& entries, std::string_view marker, std::string* out_marker, bool* truncated, optional_yield y); int trim_entries(const DoutPrefixProvider *dpp, int shard_id, std::string_view marker, optional_yield y); void trim_entries(const DoutPrefixProvider *dpp, int shard_id, std::string_view marker, librados::AioCompletion* c); void set_zero(RGWDataChangesBE* be) { emplace(0, be); } bs::error_code handle_init(entries_t e) noexcept override; bs::error_code handle_new_gens(entries_t e) noexcept override; bs::error_code handle_empty_to(uint64_t new_tail) noexcept override; int trim_generations(const DoutPrefixProvider *dpp, std::optional<uint64_t>& through, optional_yield y); }; struct BucketGen { rgw_bucket_shard shard; uint64_t gen; BucketGen(const rgw_bucket_shard& shard, uint64_t gen) : shard(shard), gen(gen) {} BucketGen(rgw_bucket_shard&& shard, uint64_t gen) : shard(std::move(shard)), gen(gen) {} BucketGen(const BucketGen&) = default; BucketGen(BucketGen&&) = default; BucketGen& operator =(const BucketGen&) = default; BucketGen& operator =(BucketGen&&) = default; ~BucketGen() = default; }; inline bool operator ==(const BucketGen& l, const BucketGen& r) { return (l.shard == r.shard) && (l.gen == r.gen); } inline bool operator <(const BucketGen& l, const BucketGen& r) { if (l.shard < r.shard) { return true; } else if (l.shard == r.shard) { return l.gen < r.gen; } else { return false; } } class RGWDataChangesLog { friend DataLogBackends; CephContext *cct; librados::IoCtx ioctx; rgw::BucketChangeObserver *observer = nullptr; const RGWZone* zone; std::unique_ptr<DataLogBackends> bes; const int num_shards; std::string get_prefix() { auto prefix = cct->_conf->rgw_data_log_obj_prefix; return prefix.empty() ? prefix : "data_log"; } std::string metadata_log_oid() { return get_prefix() + "generations_metadata"; } std::string prefix; ceph::mutex lock = ceph::make_mutex("RGWDataChangesLog::lock"); ceph::shared_mutex modified_lock = ceph::make_shared_mutex("RGWDataChangesLog::modified_lock"); bc::flat_map<int, bc::flat_set<rgw_data_notify_entry>> modified_shards; std::atomic<bool> down_flag = { false }; struct ChangeStatus { std::shared_ptr<const rgw_sync_policy_info> sync_policy; ceph::real_time cur_expiration; ceph::real_time cur_sent; bool pending = false; RefCountedCond* cond = nullptr; ceph::mutex lock = ceph::make_mutex("RGWDataChangesLog::ChangeStatus"); }; using ChangeStatusPtr = std::shared_ptr<ChangeStatus>; lru_map<BucketGen, ChangeStatusPtr> changes; bc::flat_set<BucketGen> cur_cycle; ChangeStatusPtr _get_change(const rgw_bucket_shard& bs, uint64_t gen); void register_renew(const rgw_bucket_shard& bs, const rgw::bucket_log_layout_generation& gen); void update_renewed(const rgw_bucket_shard& bs, uint64_t gen, ceph::real_time expiration); ceph::mutex renew_lock = ceph::make_mutex("ChangesRenewThread::lock"); ceph::condition_variable renew_cond; void renew_run() noexcept; void renew_stop(); std::thread renew_thread; std::function<bool(const rgw_bucket& bucket, optional_yield y, const DoutPrefixProvider *dpp)> bucket_filter; bool going_down() const; bool filter_bucket(const DoutPrefixProvider *dpp, const rgw_bucket& bucket, optional_yield y) const; int renew_entries(const DoutPrefixProvider *dpp); public: RGWDataChangesLog(CephContext* cct); ~RGWDataChangesLog(); int start(const DoutPrefixProvider *dpp, const RGWZone* _zone, const RGWZoneParams& zoneparams, librados::Rados* lr); int choose_oid(const rgw_bucket_shard& bs); int add_entry(const DoutPrefixProvider *dpp, const RGWBucketInfo& bucket_info, const rgw::bucket_log_layout_generation& gen, int shard_id, optional_yield y); int get_log_shard_id(rgw_bucket& bucket, int shard_id); int list_entries(const DoutPrefixProvider *dpp, int shard, int max_entries, std::vector<rgw_data_change_log_entry>& entries, std::string_view marker, std::string* out_marker, bool* truncated, optional_yield y); int trim_entries(const DoutPrefixProvider *dpp, int shard_id, std::string_view marker, optional_yield y); int trim_entries(const DoutPrefixProvider *dpp, int shard_id, std::string_view marker, librados::AioCompletion* c); // :( int get_info(const DoutPrefixProvider *dpp, int shard_id, RGWDataChangesLogInfo *info, optional_yield y); using LogMarker = RGWDataChangesLogMarker; int list_entries(const DoutPrefixProvider *dpp, int max_entries, std::vector<rgw_data_change_log_entry>& entries, LogMarker& marker, bool* ptruncated, optional_yield y); void mark_modified(int shard_id, const rgw_bucket_shard& bs, uint64_t gen); auto read_clear_modified() { std::unique_lock wl{modified_lock}; decltype(modified_shards) modified; modified.swap(modified_shards); modified_shards.clear(); return modified; } void set_observer(rgw::BucketChangeObserver *observer) { this->observer = observer; } void set_bucket_filter(decltype(bucket_filter)&& f) { bucket_filter = std::move(f); } // a marker that compares greater than any other std::string max_marker() const; std::string get_oid(uint64_t gen_id, int shard_id) const; int change_format(const DoutPrefixProvider *dpp, log_type type, optional_yield y); int trim_generations(const DoutPrefixProvider *dpp, std::optional<uint64_t>& through, optional_yield y); }; class RGWDataChangesBE : public boost::intrusive_ref_counter<RGWDataChangesBE> { protected: librados::IoCtx& ioctx; CephContext* const cct; RGWDataChangesLog& datalog; std::string get_oid(int shard_id) { return datalog.get_oid(gen_id, shard_id); } public: using entries = std::variant<std::list<cls_log_entry>, std::vector<ceph::buffer::list>>; const uint64_t gen_id; RGWDataChangesBE(librados::IoCtx& ioctx, RGWDataChangesLog& datalog, uint64_t gen_id) : ioctx(ioctx), cct(static_cast<CephContext*>(ioctx.cct())), datalog(datalog), gen_id(gen_id) {} virtual ~RGWDataChangesBE() = default; virtual void prepare(ceph::real_time now, const std::string& key, ceph::buffer::list&& entry, entries& out) = 0; virtual int push(const DoutPrefixProvider *dpp, int index, entries&& items, optional_yield y) = 0; virtual int push(const DoutPrefixProvider *dpp, int index, ceph::real_time now, const std::string& key, ceph::buffer::list&& bl, optional_yield y) = 0; virtual int list(const DoutPrefixProvider *dpp, int shard, int max_entries, std::vector<rgw_data_change_log_entry>& entries, std::optional<std::string_view> marker, std::string* out_marker, bool* truncated, optional_yield y) = 0; virtual int get_info(const DoutPrefixProvider *dpp, int index, RGWDataChangesLogInfo *info, optional_yield y) = 0; virtual int trim(const DoutPrefixProvider *dpp, int index, std::string_view marker, optional_yield y) = 0; virtual int trim(const DoutPrefixProvider *dpp, int index, std::string_view marker, librados::AioCompletion* c) = 0; virtual std::string_view max_marker() const = 0; // 1 on empty, 0 on non-empty, negative on error. virtual int is_empty(const DoutPrefixProvider *dpp, optional_yield y) = 0; };
11,839
28.89899
111
h
null
ceph-main/src/rgw/driver/rados/rgw_datalog_notify.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab ft=cpp #include "rgw_datalog_notify.h" #include "rgw_datalog.h" // custom encoding for v1 notify API struct EntryEncoderV1 { const rgw_data_notify_entry& entry; }; struct SetEncoderV1 { const bc::flat_set<rgw_data_notify_entry>& entries; }; // encode rgw_data_notify_entry as string void encode_json(const char *name, const EntryEncoderV1& e, Formatter *f) { f->dump_string(name, e.entry.key); // encode the key only } // encode set<rgw_data_notify_entry> as set<string> void encode_json(const char *name, const SetEncoderV1& e, Formatter *f) { f->open_array_section(name); for (auto& entry : e.entries) { encode_json("obj", EntryEncoderV1{entry}, f); } f->close_section(); } // encode map<int, set<rgw_data_notify_entry>> as map<int, set<string>> void encode_json(const char *name, const rgw_data_notify_v1_encoder& e, Formatter *f) { f->open_array_section(name); for (auto& [key, val] : e.shards) { f->open_object_section("entry"); encode_json("key", key, f); encode_json("val", SetEncoderV1{val}, f); f->close_section(); } f->close_section(); } struct EntryDecoderV1 { rgw_data_notify_entry& entry; }; struct SetDecoderV1 { bc::flat_set<rgw_data_notify_entry>& entries; }; // decode string into rgw_data_notify_entry void decode_json_obj(EntryDecoderV1& d, JSONObj *obj) { decode_json_obj(d.entry.key, obj); d.entry.gen = 0; } // decode set<string> into set<rgw_data_notify_entry> void decode_json_obj(SetDecoderV1& d, JSONObj *obj) { for (JSONObjIter o = obj->find_first(); !o.end(); ++o) { rgw_data_notify_entry val; auto decoder = EntryDecoderV1{val}; decode_json_obj(decoder, *o); d.entries.insert(std::move(val)); } } // decode map<int, set<string>> into map<int, set<rgw_data_notify_entry>> void decode_json_obj(rgw_data_notify_v1_decoder& d, JSONObj *obj) { for (JSONObjIter o = obj->find_first(); !o.end(); ++o) { int shard_id = 0; JSONDecoder::decode_json("key", shard_id, *o); bc::flat_set<rgw_data_notify_entry> val; SetDecoderV1 decoder{val}; JSONDecoder::decode_json("val", decoder, *o); d.shards[shard_id] = std::move(val); } }
2,246
28.181818
85
cc
null
ceph-main/src/rgw/driver/rados/rgw_datalog_notify.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab ft=cpp #pragma once #include <boost/container/flat_map.hpp> #include <boost/container/flat_set.hpp> #include "rgw_datalog.h" namespace bc = boost::container; namespace ceph { class Formatter; } class JSONObj; class RGWCoroutine; class RGWHTTPManager; class RGWRESTConn; struct rgw_data_notify_entry; // json encoder and decoder for notify v1 API struct rgw_data_notify_v1_encoder { const bc::flat_map<int, bc::flat_set<rgw_data_notify_entry>>& shards; }; void encode_json(const char *name, const rgw_data_notify_v1_encoder& e, ceph::Formatter *f); struct rgw_data_notify_v1_decoder { bc::flat_map<int, bc::flat_set<rgw_data_notify_entry>>& shards; }; void decode_json_obj(rgw_data_notify_v1_decoder& d, JSONObj *obj);
845
25.4375
71
h
null
ceph-main/src/rgw/driver/rados/rgw_etag_verifier.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab ft=cpp #include "rgw_etag_verifier.h" #include "rgw_obj_manifest.h" #define dout_subsys ceph_subsys_rgw namespace rgw::putobj { int create_etag_verifier(const DoutPrefixProvider *dpp, CephContext* cct, rgw::sal::DataProcessor* filter, const bufferlist& manifest_bl, const std::optional<RGWCompressionInfo>& compression, etag_verifier_ptr& verifier) { RGWObjManifest manifest; try { auto miter = manifest_bl.cbegin(); decode(manifest, miter); } catch (buffer::error& err) { ldpp_dout(dpp, 0) << "ERROR: couldn't decode manifest" << dendl; return -EIO; } RGWObjManifestRule rule; bool found = manifest.get_rule(0, &rule); if (!found) { ldpp_dout(dpp, -1) << "ERROR: manifest->get_rule() could not find rule" << dendl; return -EIO; } if (rule.start_part_num == 0) { /* Atomic object */ verifier.emplace<ETagVerifier_Atomic>(cct, filter); return 0; } uint64_t cur_part_ofs = UINT64_MAX; std::vector<uint64_t> part_ofs; /* * We must store the offset of each part to calculate the ETAGs for each * MPU part. These part ETags then become the input for the MPU object * Etag. */ for (auto mi = manifest.obj_begin(dpp); mi != manifest.obj_end(dpp); ++mi) { if (cur_part_ofs == mi.get_part_ofs()) continue; cur_part_ofs = mi.get_part_ofs(); ldpp_dout(dpp, 20) << "MPU Part offset:" << cur_part_ofs << dendl; part_ofs.push_back(cur_part_ofs); } if (compression) { // if the source object was compressed, the manifest is storing // compressed part offsets. transform the compressed offsets back to // their original offsets by finding the first block of each part const auto& blocks = compression->blocks; auto block = blocks.begin(); for (auto& ofs : part_ofs) { // find the compression_block with new_ofs == ofs constexpr auto less = [] (const compression_block& block, uint64_t ofs) { return block.new_ofs < ofs; }; block = std::lower_bound(block, blocks.end(), ofs, less); if (block == blocks.end() || block->new_ofs != ofs) { ldpp_dout(dpp, 4) << "no match for compressed offset " << ofs << ", disabling etag verification" << dendl; return -EIO; } ofs = block->old_ofs; ldpp_dout(dpp, 20) << "MPU Part uncompressed offset:" << ofs << dendl; } } verifier.emplace<ETagVerifier_MPU>(cct, std::move(part_ofs), filter); return 0; } int ETagVerifier_Atomic::process(bufferlist&& in, uint64_t logical_offset) { bufferlist out; if (in.length() > 0) hash.Update((const unsigned char *)in.c_str(), in.length()); return Pipe::process(std::move(in), logical_offset); } void ETagVerifier_Atomic::calculate_etag() { unsigned char m[CEPH_CRYPTO_MD5_DIGESTSIZE]; char calc_md5[CEPH_CRYPTO_MD5_DIGESTSIZE * 2 + 1]; /* Return early if ETag has already been calculated */ if (!calculated_etag.empty()) return; hash.Final(m); buf_to_hex(m, CEPH_CRYPTO_MD5_DIGESTSIZE, calc_md5); calculated_etag = calc_md5; ldout(cct, 20) << "Single part object: " << " etag:" << calculated_etag << dendl; } void ETagVerifier_MPU::process_end_of_MPU_part() { unsigned char m[CEPH_CRYPTO_MD5_DIGESTSIZE]; char calc_md5_part[CEPH_CRYPTO_MD5_DIGESTSIZE * 2 + 1]; std::string calculated_etag_part; hash.Final(m); mpu_etag_hash.Update((const unsigned char *)m, sizeof(m)); hash.Restart(); if (cct->_conf->subsys.should_gather(dout_subsys, 20)) { buf_to_hex(m, CEPH_CRYPTO_MD5_DIGESTSIZE, calc_md5_part); calculated_etag_part = calc_md5_part; ldout(cct, 20) << "Part etag: " << calculated_etag_part << dendl; } cur_part_index++; next_part_index++; } int ETagVerifier_MPU::process(bufferlist&& in, uint64_t logical_offset) { uint64_t bl_end = in.length() + logical_offset; /* Handle the last MPU part */ if (size_t(next_part_index) == part_ofs.size()) { hash.Update((const unsigned char *)in.c_str(), in.length()); goto done; } /* Incoming bufferlist spans two MPU parts. Calculate separate ETags */ if (bl_end > part_ofs[next_part_index]) { uint64_t part_one_len = part_ofs[next_part_index] - logical_offset; hash.Update((const unsigned char *)in.c_str(), part_one_len); process_end_of_MPU_part(); hash.Update((const unsigned char *)in.c_str() + part_one_len, bl_end - part_ofs[cur_part_index]); /* * If we've moved to the last part of the MPU, avoid usage of * parts_ofs[next_part_index] as it will lead to our-of-range access. */ if (size_t(next_part_index) == part_ofs.size()) goto done; } else { hash.Update((const unsigned char *)in.c_str(), in.length()); } /* Update the MPU Etag if the current part has ended */ if (logical_offset + in.length() + 1 == part_ofs[next_part_index]) process_end_of_MPU_part(); done: return Pipe::process(std::move(in), logical_offset); } void ETagVerifier_MPU::calculate_etag() { const uint32_t parts = part_ofs.size(); constexpr auto digits10 = std::numeric_limits<uint32_t>::digits10; constexpr auto extra = 2 + digits10; // add "-%u\0" at the end unsigned char m[CEPH_CRYPTO_MD5_DIGESTSIZE], mpu_m[CEPH_CRYPTO_MD5_DIGESTSIZE]; char final_etag_str[CEPH_CRYPTO_MD5_DIGESTSIZE * 2 + extra]; /* Return early if ETag has already been calculated */ if (!calculated_etag.empty()) return; hash.Final(m); mpu_etag_hash.Update((const unsigned char *)m, sizeof(m)); /* Refer RGWCompleteMultipart::execute() for ETag calculation for MPU object */ mpu_etag_hash.Final(mpu_m); buf_to_hex(mpu_m, CEPH_CRYPTO_MD5_DIGESTSIZE, final_etag_str); snprintf(&final_etag_str[CEPH_CRYPTO_MD5_DIGESTSIZE * 2], sizeof(final_etag_str) - CEPH_CRYPTO_MD5_DIGESTSIZE * 2, "-%u", parts); calculated_etag = final_etag_str; ldout(cct, 20) << "MPU calculated ETag:" << calculated_etag << dendl; } } // namespace rgw::putobj
6,148
31.026042
85
cc
null
ceph-main/src/rgw/driver/rados/rgw_etag_verifier.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab ft=cpp /* * RGW Etag Verifier is an RGW filter which enables the objects copied using * multisite sync to be verified using their ETag from source i.e. the MD5 * checksum of the object is computed at the destination and is verified to be * identical to the ETag stored in the object HEAD at source cluster. * * For MPU objects, a different filter named RGWMultipartEtagFilter is applied * which re-computes ETag using RGWObjManifest. This computes the ETag using the * same algorithm used at the source cluster i.e. MD5 sum of the individual ETag * on the MPU parts. */ #pragma once #include "rgw_putobj.h" #include "rgw_op.h" #include "common/static_ptr.h" namespace rgw::putobj { class ETagVerifier : public rgw::putobj::Pipe { protected: CephContext* cct; MD5 hash; std::string calculated_etag; public: ETagVerifier(CephContext* cct_, rgw::sal::DataProcessor *next) : Pipe(next), cct(cct_) { // Allow use of MD5 digest in FIPS mode for non-cryptographic purposes hash.SetFlags(EVP_MD_CTX_FLAG_NON_FIPS_ALLOW); } virtual void calculate_etag() = 0; std::string get_calculated_etag() { return calculated_etag;} }; /* ETagVerifier */ class ETagVerifier_Atomic : public ETagVerifier { public: ETagVerifier_Atomic(CephContext* cct_, rgw::sal::DataProcessor *next) : ETagVerifier(cct_, next) {} int process(bufferlist&& data, uint64_t logical_offset) override; void calculate_etag() override; }; /* ETagVerifier_Atomic */ class ETagVerifier_MPU : public ETagVerifier { std::vector<uint64_t> part_ofs; uint64_t cur_part_index{0}, next_part_index{1}; MD5 mpu_etag_hash; void process_end_of_MPU_part(); public: ETagVerifier_MPU(CephContext* cct, std::vector<uint64_t> part_ofs, rgw::sal::DataProcessor *next) : ETagVerifier(cct, next), part_ofs(std::move(part_ofs)) { // Allow use of MD5 digest in FIPS mode for non-cryptographic purposes hash.SetFlags(EVP_MD_CTX_FLAG_NON_FIPS_ALLOW); } int process(bufferlist&& data, uint64_t logical_offset) override; void calculate_etag() override; }; /* ETagVerifier_MPU */ constexpr auto max_etag_verifier_size = std::max( sizeof(ETagVerifier_Atomic), sizeof(ETagVerifier_MPU) ); using etag_verifier_ptr = ceph::static_ptr<ETagVerifier, max_etag_verifier_size>; int create_etag_verifier(const DoutPrefixProvider *dpp, CephContext* cct, rgw::sal::DataProcessor* next, const bufferlist& manifest_bl, const std::optional<RGWCompressionInfo>& compression, etag_verifier_ptr& verifier); } // namespace rgw::putobj
2,817
29.967033
81
h
null
ceph-main/src/rgw/driver/rados/rgw_gc.cc
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab ft=cpp #include "rgw_gc.h" #include "rgw_tools.h" #include "include/scope_guard.h" #include "include/rados/librados.hpp" #include "cls/rgw/cls_rgw_client.h" #include "cls/rgw_gc/cls_rgw_gc_client.h" #include "cls/refcount/cls_refcount_client.h" #include "cls/version/cls_version_client.h" #include "rgw_perf_counters.h" #include "cls/lock/cls_lock_client.h" #include "include/random.h" #include "rgw_gc_log.h" #include <list> // XXX #include <sstream> #include "xxhash.h" #define dout_context g_ceph_context #define dout_subsys ceph_subsys_rgw using namespace std; using namespace librados; static string gc_oid_prefix = "gc"; static string gc_index_lock_name = "gc_process"; void RGWGC::initialize(CephContext *_cct, RGWRados *_store, optional_yield y) { cct = _cct; store = _store; max_objs = min(static_cast<int>(cct->_conf->rgw_gc_max_objs), rgw_shards_max()); obj_names = new string[max_objs]; for (int i = 0; i < max_objs; i++) { obj_names[i] = gc_oid_prefix; char buf[32]; snprintf(buf, 32, ".%d", i); obj_names[i].append(buf); auto it = transitioned_objects_cache.begin() + i; transitioned_objects_cache.insert(it, false); //version = 0 -> not ready for transition //version = 1 -> marked ready for transition librados::ObjectWriteOperation op; op.create(false); const uint64_t queue_size = cct->_conf->rgw_gc_max_queue_size, num_deferred_entries = cct->_conf->rgw_gc_max_deferred; gc_log_init2(op, queue_size, num_deferred_entries); store->gc_operate(this, obj_names[i], &op, y); } } void RGWGC::finalize() { delete[] obj_names; } int RGWGC::tag_index(const string& tag) { return rgw_shards_mod(XXH64(tag.c_str(), tag.size(), seed), max_objs); } std::tuple<int, std::optional<cls_rgw_obj_chain>> RGWGC::send_split_chain(const cls_rgw_obj_chain& chain, const std::string& tag, optional_yield y) { ldpp_dout(this, 20) << "RGWGC::send_split_chain - tag is: " << tag << dendl; if (cct->_conf->rgw_max_chunk_size) { cls_rgw_obj_chain broken_chain; ldpp_dout(this, 20) << "RGWGC::send_split_chain - rgw_max_chunk_size is: " << cct->_conf->rgw_max_chunk_size << dendl; for (auto it = chain.objs.begin(); it != chain.objs.end(); it++) { ldpp_dout(this, 20) << "RGWGC::send_split_chain - adding obj with name: " << it->key << dendl; broken_chain.objs.emplace_back(*it); cls_rgw_gc_obj_info info; info.tag = tag; info.chain = broken_chain; cls_rgw_gc_set_entry_op op; op.info = info; size_t total_encoded_size = op.estimate_encoded_size(); ldpp_dout(this, 20) << "RGWGC::send_split_chain - total_encoded_size is: " << total_encoded_size << dendl; if (total_encoded_size > cct->_conf->rgw_max_chunk_size) { //dont add to chain, and send to gc broken_chain.objs.pop_back(); --it; ldpp_dout(this, 20) << "RGWGC::send_split_chain - more than, dont add to broken chain and send chain" << dendl; auto ret = send_chain(broken_chain, tag, y); if (ret < 0) { broken_chain.objs.insert(broken_chain.objs.end(), it, chain.objs.end()); // add all the remainder objs to the list to be deleted inline ldpp_dout(this, 0) << "RGWGC::send_split_chain - send chain returned error: " << ret << dendl; return {ret, {broken_chain}}; } broken_chain.objs.clear(); } } if (!broken_chain.objs.empty()) { //when the chain is smaller than or equal to rgw_max_chunk_size ldpp_dout(this, 20) << "RGWGC::send_split_chain - sending leftover objects" << dendl; auto ret = send_chain(broken_chain, tag, y); if (ret < 0) { ldpp_dout(this, 0) << "RGWGC::send_split_chain - send chain returned error: " << ret << dendl; return {ret, {broken_chain}}; } } } else { auto ret = send_chain(chain, tag, y); if (ret < 0) { ldpp_dout(this, 0) << "RGWGC::send_split_chain - send chain returned error: " << ret << dendl; return {ret, {std::move(chain)}}; } } return {0, {}}; } int RGWGC::send_chain(const cls_rgw_obj_chain& chain, const string& tag, optional_yield y) { ObjectWriteOperation op; cls_rgw_gc_obj_info info; info.chain = chain; info.tag = tag; gc_log_enqueue2(op, cct->_conf->rgw_gc_obj_min_wait, info); int i = tag_index(tag); ldpp_dout(this, 20) << "RGWGC::send_chain - on object name: " << obj_names[i] << "tag is: " << tag << dendl; auto ret = store->gc_operate(this, obj_names[i], &op, y); if (ret != -ECANCELED && ret != -EPERM) { return ret; } ObjectWriteOperation set_entry_op; cls_rgw_gc_set_entry(set_entry_op, cct->_conf->rgw_gc_obj_min_wait, info); return store->gc_operate(this, obj_names[i], &set_entry_op, y); } struct defer_chain_state { librados::AioCompletion* completion = nullptr; // TODO: hold a reference on the state in RGWGC to avoid use-after-free if // RGWGC destructs before this completion fires RGWGC* gc = nullptr; cls_rgw_gc_obj_info info; ~defer_chain_state() { if (completion) { completion->release(); } } }; static void async_defer_callback(librados::completion_t, void* arg) { std::unique_ptr<defer_chain_state> state{static_cast<defer_chain_state*>(arg)}; if (state->completion->get_return_value() == -ECANCELED) { state->gc->on_defer_canceled(state->info); } } void RGWGC::on_defer_canceled(const cls_rgw_gc_obj_info& info) { const std::string& tag = info.tag; const int i = tag_index(tag); // ECANCELED from cls_version_check() tells us that we've transitioned transitioned_objects_cache[i] = true; ObjectWriteOperation op; cls_rgw_gc_queue_defer_entry(op, cct->_conf->rgw_gc_obj_min_wait, info); cls_rgw_gc_remove(op, {tag}); auto c = librados::Rados::aio_create_completion(nullptr, nullptr); store->gc_aio_operate(obj_names[i], c, &op); c->release(); } int RGWGC::async_defer_chain(const string& tag, const cls_rgw_obj_chain& chain) { const int i = tag_index(tag); cls_rgw_gc_obj_info info; info.chain = chain; info.tag = tag; // if we've transitioned this shard object, we can rely on the cls_rgw_gc queue if (transitioned_objects_cache[i]) { ObjectWriteOperation op; cls_rgw_gc_queue_defer_entry(op, cct->_conf->rgw_gc_obj_min_wait, info); // this tag may still be present in omap, so remove it once the cls_rgw_gc // enqueue succeeds cls_rgw_gc_remove(op, {tag}); auto c = librados::Rados::aio_create_completion(nullptr, nullptr); int ret = store->gc_aio_operate(obj_names[i], c, &op); c->release(); return ret; } // if we haven't seen the transition yet, write the defer to omap with cls_rgw ObjectWriteOperation op; // assert that we haven't initialized cls_rgw_gc queue. this prevents us // from writing new entries to omap after the transition gc_log_defer1(op, cct->_conf->rgw_gc_obj_min_wait, info); // prepare a callback to detect the transition via ECANCELED from cls_version_check() auto state = std::make_unique<defer_chain_state>(); state->gc = this; state->info.chain = chain; state->info.tag = tag; state->completion = librados::Rados::aio_create_completion( state.get(), async_defer_callback); int ret = store->gc_aio_operate(obj_names[i], state->completion, &op); if (ret == 0) { // coverity[leaked_storage:SUPPRESS] state.release(); // release ownership until async_defer_callback() } return ret; } int RGWGC::remove(int index, const std::vector<string>& tags, AioCompletion **pc, optional_yield y) { ObjectWriteOperation op; cls_rgw_gc_remove(op, tags); auto c = librados::Rados::aio_create_completion(nullptr, nullptr); int ret = store->gc_aio_operate(obj_names[index], c, &op); if (ret < 0) { c->release(); } else { *pc = c; } return ret; } int RGWGC::remove(int index, int num_entries, optional_yield y) { ObjectWriteOperation op; cls_rgw_gc_queue_remove_entries(op, num_entries); return store->gc_operate(this, obj_names[index], &op, y); } int RGWGC::list(int *index, string& marker, uint32_t max, bool expired_only, std::list<cls_rgw_gc_obj_info>& result, bool *truncated, bool& processing_queue) { result.clear(); string next_marker; bool check_queue = false; for (; *index < max_objs && result.size() < max; (*index)++, marker.clear(), check_queue = false) { std::list<cls_rgw_gc_obj_info> entries, queue_entries; int ret = 0; //processing_queue is set to true from previous iteration if the queue was under process and probably has more elements in it. if (! transitioned_objects_cache[*index] && ! check_queue && ! processing_queue) { ret = cls_rgw_gc_list(store->gc_pool_ctx, obj_names[*index], marker, max - result.size(), expired_only, entries, truncated, next_marker); if (ret != -ENOENT && ret < 0) { return ret; } obj_version objv; cls_version_read(store->gc_pool_ctx, obj_names[*index], &objv); if (ret == -ENOENT || entries.size() == 0) { if (objv.ver == 0) { continue; } else { if (! expired_only) { transitioned_objects_cache[*index] = true; marker.clear(); } else { std::list<cls_rgw_gc_obj_info> non_expired_entries; ret = cls_rgw_gc_list(store->gc_pool_ctx, obj_names[*index], marker, 1, false, non_expired_entries, truncated, next_marker); if (non_expired_entries.size() == 0) { transitioned_objects_cache[*index] = true; marker.clear(); } } } } if ((objv.ver == 1) && (entries.size() < max - result.size())) { check_queue = true; marker.clear(); } } if (transitioned_objects_cache[*index] || check_queue || processing_queue) { processing_queue = false; ret = cls_rgw_gc_queue_list_entries(store->gc_pool_ctx, obj_names[*index], marker, (max - result.size()) - entries.size(), expired_only, queue_entries, truncated, next_marker); if (ret < 0) { return ret; } } if (entries.size() == 0 && queue_entries.size() == 0) continue; std::list<cls_rgw_gc_obj_info>::iterator iter; for (iter = entries.begin(); iter != entries.end(); ++iter) { result.push_back(*iter); } for (iter = queue_entries.begin(); iter != queue_entries.end(); ++iter) { result.push_back(*iter); } marker = next_marker; if (*index == max_objs - 1) { if (queue_entries.size() > 0 && *truncated) { processing_queue = true; } else { processing_queue = false; } /* we cut short here, truncated will hold the correct value */ return 0; } if (result.size() == max) { if (queue_entries.size() > 0 && *truncated) { processing_queue = true; } else { processing_queue = false; *index += 1; //move to next gc object } /* close approximation, it might be that the next of the objects don't hold * anything, in this case truncated should have been false, but we can find * that out on the next iteration */ *truncated = true; return 0; } } *truncated = false; processing_queue = false; return 0; } class RGWGCIOManager { const DoutPrefixProvider* dpp; CephContext *cct; RGWGC *gc; struct IO { enum Type { UnknownIO = 0, TailIO = 1, IndexIO = 2, } type{UnknownIO}; librados::AioCompletion *c{nullptr}; string oid; int index{-1}; string tag; }; deque<IO> ios; vector<std::vector<string> > remove_tags; /* tracks the number of remaining shadow objects for a given tag in order to * only remove the tag once all shadow objects have themselves been removed */ vector<map<string, size_t> > tag_io_size; #define MAX_AIO_DEFAULT 10 size_t max_aio{MAX_AIO_DEFAULT}; public: RGWGCIOManager(const DoutPrefixProvider* _dpp, CephContext *_cct, RGWGC *_gc) : dpp(_dpp), cct(_cct), gc(_gc) { max_aio = cct->_conf->rgw_gc_max_concurrent_io; remove_tags.resize(min(static_cast<int>(cct->_conf->rgw_gc_max_objs), rgw_shards_max())); tag_io_size.resize(min(static_cast<int>(cct->_conf->rgw_gc_max_objs), rgw_shards_max())); } ~RGWGCIOManager() { for (auto io : ios) { io.c->release(); } } int schedule_io(IoCtx *ioctx, const string& oid, ObjectWriteOperation *op, int index, const string& tag) { while (ios.size() > max_aio) { if (gc->going_down()) { return 0; } auto ret = handle_next_completion(); //Return error if we are using queue, else ignore it if (gc->transitioned_objects_cache[index] && ret < 0) { return ret; } } auto c = librados::Rados::aio_create_completion(nullptr, nullptr); int ret = ioctx->aio_operate(oid, c, op); if (ret < 0) { return ret; } ios.push_back(IO{IO::TailIO, c, oid, index, tag}); return 0; } int handle_next_completion() { ceph_assert(!ios.empty()); IO& io = ios.front(); io.c->wait_for_complete(); int ret = io.c->get_return_value(); io.c->release(); if (ret == -ENOENT) { ret = 0; } if (io.type == IO::IndexIO && ! gc->transitioned_objects_cache[io.index]) { if (ret < 0) { ldpp_dout(dpp, 0) << "WARNING: gc cleanup of tags on gc shard index=" << io.index << " returned error, ret=" << ret << dendl; } goto done; } if (ret < 0) { ldpp_dout(dpp, 0) << "WARNING: gc could not remove oid=" << io.oid << ", ret=" << ret << dendl; goto done; } if (! gc->transitioned_objects_cache[io.index]) { schedule_tag_removal(io.index, io.tag); } done: ios.pop_front(); return ret; } /* This is a request to schedule a tag removal. It will be called once when * there are no shadow objects. But it will also be called for every shadow * object when there are any. Since we do not want the tag to be removed * until all shadow objects have been successfully removed, the scheduling * will not happen until the shadow object count goes down to zero */ void schedule_tag_removal(int index, string tag) { auto& ts = tag_io_size[index]; auto ts_it = ts.find(tag); if (ts_it != ts.end()) { auto& size = ts_it->second; --size; // wait all shadow obj delete return if (size != 0) return; ts.erase(ts_it); } auto& rt = remove_tags[index]; rt.push_back(tag); if (rt.size() >= (size_t)cct->_conf->rgw_gc_max_trim_chunk) { flush_remove_tags(index, rt); } } void add_tag_io_size(int index, string tag, size_t size) { auto& ts = tag_io_size[index]; ts.emplace(tag, size); } int drain_ios() { int ret_val = 0; while (!ios.empty()) { if (gc->going_down()) { return -EAGAIN; } auto ret = handle_next_completion(); if (ret < 0) { ret_val = ret; } } return ret_val; } void drain() { drain_ios(); flush_remove_tags(); /* the tags draining might have generated more ios, drain those too */ drain_ios(); } void flush_remove_tags(int index, vector<string>& rt) { IO index_io; index_io.type = IO::IndexIO; index_io.index = index; ldpp_dout(dpp, 20) << __func__ << " removing entries from gc log shard index=" << index << ", size=" << rt.size() << ", entries=" << rt << dendl; auto rt_guard = make_scope_guard( [&] { rt.clear(); } ); int ret = gc->remove(index, rt, &index_io.c, null_yield); if (ret < 0) { /* we already cleared list of tags, this prevents us from * ballooning in case of a persistent problem */ ldpp_dout(dpp, 0) << "WARNING: failed to remove tags on gc shard index=" << index << " ret=" << ret << dendl; return; } if (perfcounter) { /* log the count of tags retired for rate estimation */ perfcounter->inc(l_rgw_gc_retire, rt.size()); } ios.push_back(index_io); } void flush_remove_tags() { int index = 0; for (auto& rt : remove_tags) { if (! gc->transitioned_objects_cache[index]) { flush_remove_tags(index, rt); } ++index; } } int remove_queue_entries(int index, int num_entries, optional_yield y) { int ret = gc->remove(index, num_entries, null_yield); if (ret < 0) { ldpp_dout(dpp, 0) << "ERROR: failed to remove queue entries on index=" << index << " ret=" << ret << dendl; return ret; } if (perfcounter) { /* log the count of tags retired for rate estimation */ perfcounter->inc(l_rgw_gc_retire, num_entries); } return 0; } }; // class RGWGCIOManger int RGWGC::process(int index, int max_secs, bool expired_only, RGWGCIOManager& io_manager, optional_yield y) { ldpp_dout(this, 20) << "RGWGC::process entered with GC index_shard=" << index << ", max_secs=" << max_secs << ", expired_only=" << expired_only << dendl; rados::cls::lock::Lock l(gc_index_lock_name); utime_t end = ceph_clock_now(); /* max_secs should be greater than zero. We don't want a zero max_secs * to be translated as no timeout, since we'd then need to break the * lock and that would require a manual intervention. In this case * we can just wait it out. */ if (max_secs <= 0) return -EAGAIN; end += max_secs; utime_t time(max_secs, 0); l.set_duration(time); int ret = l.lock_exclusive(&store->gc_pool_ctx, obj_names[index]); if (ret == -EBUSY) { /* already locked by another gc processor */ ldpp_dout(this, 10) << "RGWGC::process failed to acquire lock on " << obj_names[index] << dendl; return 0; } if (ret < 0) return ret; string marker; string next_marker; bool truncated = false; IoCtx *ctx = new IoCtx; do { int max = 100; std::list<cls_rgw_gc_obj_info> entries; int ret = 0; if (! transitioned_objects_cache[index]) { ret = cls_rgw_gc_list(store->gc_pool_ctx, obj_names[index], marker, max, expired_only, entries, &truncated, next_marker); ldpp_dout(this, 20) << "RGWGC::process cls_rgw_gc_list returned with returned:" << ret << ", entries.size=" << entries.size() << ", truncated=" << truncated << ", next_marker='" << next_marker << "'" << dendl; obj_version objv; cls_version_read(store->gc_pool_ctx, obj_names[index], &objv); if ((objv.ver == 1) && entries.size() == 0) { std::list<cls_rgw_gc_obj_info> non_expired_entries; ret = cls_rgw_gc_list(store->gc_pool_ctx, obj_names[index], marker, 1, false, non_expired_entries, &truncated, next_marker); if (non_expired_entries.size() == 0) { transitioned_objects_cache[index] = true; marker.clear(); ldpp_dout(this, 20) << "RGWGC::process cls_rgw_gc_list returned NO non expired entries, so setting cache entry to TRUE" << dendl; } else { ret = 0; goto done; } } if ((objv.ver == 0) && (ret == -ENOENT || entries.size() == 0)) { ret = 0; goto done; } } if (transitioned_objects_cache[index]) { ret = cls_rgw_gc_queue_list_entries(store->gc_pool_ctx, obj_names[index], marker, max, expired_only, entries, &truncated, next_marker); ldpp_dout(this, 20) << "RGWGC::process cls_rgw_gc_queue_list_entries returned with return value:" << ret << ", entries.size=" << entries.size() << ", truncated=" << truncated << ", next_marker='" << next_marker << "'" << dendl; if (entries.size() == 0) { ret = 0; goto done; } } if (ret < 0) goto done; marker = next_marker; string last_pool; std::list<cls_rgw_gc_obj_info>::iterator iter; for (iter = entries.begin(); iter != entries.end(); ++iter) { cls_rgw_gc_obj_info& info = *iter; ldpp_dout(this, 20) << "RGWGC::process iterating over entry tag='" << info.tag << "', time=" << info.time << ", chain.objs.size()=" << info.chain.objs.size() << dendl; std::list<cls_rgw_obj>::iterator liter; cls_rgw_obj_chain& chain = info.chain; utime_t now = ceph_clock_now(); if (now >= end) { goto done; } if (! transitioned_objects_cache[index]) { if (chain.objs.empty()) { io_manager.schedule_tag_removal(index, info.tag); } else { io_manager.add_tag_io_size(index, info.tag, chain.objs.size()); } } if (! chain.objs.empty()) { for (liter = chain.objs.begin(); liter != chain.objs.end(); ++liter) { cls_rgw_obj& obj = *liter; if (obj.pool != last_pool) { delete ctx; ctx = new IoCtx; ret = rgw_init_ioctx(this, store->get_rados_handle(), obj.pool, *ctx); if (ret < 0) { if (transitioned_objects_cache[index]) { goto done; } last_pool = ""; ldpp_dout(this, 0) << "ERROR: failed to create ioctx pool=" << obj.pool << dendl; continue; } last_pool = obj.pool; } ctx->locator_set_key(obj.loc); const string& oid = obj.key.name; /* just stored raw oid there */ ldpp_dout(this, 5) << "RGWGC::process removing " << obj.pool << ":" << obj.key.name << dendl; ObjectWriteOperation op; cls_refcount_put(op, info.tag, true); ret = io_manager.schedule_io(ctx, oid, &op, index, info.tag); if (ret < 0) { ldpp_dout(this, 0) << "WARNING: failed to schedule deletion for oid=" << oid << dendl; if (transitioned_objects_cache[index]) { //If deleting oid failed for any of them, we will not delete queue entries goto done; } } if (going_down()) { // leave early, even if tag isn't removed, it's ok since it // will be picked up next time around goto done; } } // chains loop } // else -- chains not empty } // entries loop if (transitioned_objects_cache[index] && entries.size() > 0) { ret = io_manager.drain_ios(); if (ret < 0) { goto done; } //Remove the entries from the queue ldpp_dout(this, 5) << "RGWGC::process removing entries, marker: " << marker << dendl; ret = io_manager.remove_queue_entries(index, entries.size(), null_yield); if (ret < 0) { ldpp_dout(this, 0) << "WARNING: failed to remove queue entries" << dendl; goto done; } } } while (truncated); done: /* we don't drain here, because if we're going down we don't want to * hold the system if backend is unresponsive */ l.unlock(&store->gc_pool_ctx, obj_names[index]); delete ctx; return 0; } int RGWGC::process(bool expired_only, optional_yield y) { int max_secs = cct->_conf->rgw_gc_processor_max_time; const int start = ceph::util::generate_random_number(0, max_objs - 1); RGWGCIOManager io_manager(this, store->ctx(), this); for (int i = 0; i < max_objs; i++) { int index = (i + start) % max_objs; int ret = process(index, max_secs, expired_only, io_manager, y); if (ret < 0) return ret; } if (!going_down()) { io_manager.drain(); } return 0; } bool RGWGC::going_down() { return down_flag; } void RGWGC::start_processor() { worker = new GCWorker(this, cct, this); worker->create("rgw_gc"); } void RGWGC::stop_processor() { down_flag = true; if (worker) { worker->stop(); worker->join(); } delete worker; worker = NULL; } unsigned RGWGC::get_subsys() const { return dout_subsys; } std::ostream& RGWGC::gen_prefix(std::ostream& out) const { return out << "garbage collection: "; } void *RGWGC::GCWorker::entry() { do { utime_t start = ceph_clock_now(); ldpp_dout(dpp, 2) << "garbage collection: start" << dendl; int r = gc->process(true, null_yield); if (r < 0) { ldpp_dout(dpp, 0) << "ERROR: garbage collection process() returned error r=" << r << dendl; } ldpp_dout(dpp, 2) << "garbage collection: stop" << dendl; if (gc->going_down()) break; utime_t end = ceph_clock_now(); end -= start; int secs = cct->_conf->rgw_gc_processor_period; if (secs <= end.sec()) continue; // next round secs -= end.sec(); std::unique_lock locker{lock}; cond.wait_for(locker, std::chrono::seconds(secs)); } while (!gc->going_down()); return NULL; } void RGWGC::GCWorker::stop() { std::lock_guard l{lock}; cond.notify_all(); }
24,800
29.505535
182
cc