repo
stringlengths 1
152
⌀ | file
stringlengths 15
205
| code
stringlengths 0
41.6M
| file_length
int64 0
41.6M
| avg_line_length
float64 0
1.81M
| max_line_length
int64 0
12.7M
| extension_type
stringclasses 90
values |
---|---|---|---|---|---|---|
null | ceph-main/src/include/win32/sys/uio.h | #include "include/compat.h"
| 28 | 13.5 | 27 | h |
null | ceph-main/src/include/win32/sys/un.h | #include "include/win32/winsock_compat.h"
| 42 | 20.5 | 41 | h |
null | ceph-main/src/java/native/JniConstants.cpp | /*
* Copyright (C) 2010 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "JniConstants.h"
#include "ScopedLocalRef.h"
#include <stdlib.h>
jclass JniConstants::inet6AddressClass;
jclass JniConstants::inetAddressClass;
jclass JniConstants::inetSocketAddressClass;
jclass JniConstants::stringClass;
static jclass findClass(JNIEnv* env, const char* name) {
ScopedLocalRef<jclass> localClass(env, env->FindClass(name));
jclass result = reinterpret_cast<jclass>(env->NewGlobalRef(localClass.get()));
if (result == NULL) {
fprintf(stderr, "failed to find class '%s'", name);
abort();
}
return result;
}
void JniConstants::init(JNIEnv* env) {
inet6AddressClass = findClass(env, "java/net/Inet6Address");
inetAddressClass = findClass(env, "java/net/InetAddress");
inetSocketAddressClass = findClass(env, "java/net/InetSocketAddress");
stringClass = findClass(env, "java/lang/String");
}
| 1,491 | 33.697674 | 82 | cpp |
null | ceph-main/src/java/native/JniConstants.h | /*
* Copyright (C) 2010 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef JNI_CONSTANTS_H_included
#define JNI_CONSTANTS_H_included
#include <jni.h>
/**
* A cache to avoid calling FindClass at runtime.
*
* Class lookup is relatively expensive (2.5us on passion-eng at the time of writing), so we do
* all such lookups eagerly at VM startup. This means that code that never uses, say,
* java.util.zip.Deflater still has to pay for the lookup, but it means that on a device the cost
* is definitely paid during boot and amortized. A central cache also removes the temptation to
* dynamically call FindClass rather than add a small cache to each file that needs one. Another
* cost is that each class cached here requires a global reference, though in practice we save
* enough by not having a global reference for each file that uses a class such as java.lang.String
* which is used in several files.
*
* FindClass is still called in a couple of situations: when throwing exceptions, and in some of
* the serialization code. The former is clearly not a performance case, and we're currently
* assuming that neither is the latter.
*
* TODO: similar arguments hold for field and method IDs; we should cache them centrally too.
*/
struct JniConstants {
static void init(JNIEnv* env);
static jclass inet6AddressClass;
static jclass inetAddressClass;
static jclass inetSocketAddressClass;
static jclass stringClass;
};
#define NATIVE_METHOD(className, functionName, signature) \
{ #functionName, signature, reinterpret_cast<void*>(className ## _ ## functionName) }
#endif // JNI_CONSTANTS_H_included
| 2,195 | 40.433962 | 99 | h |
null | ceph-main/src/java/native/ScopedLocalRef.h | /*
* Copyright (C) 2010 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef SCOPED_LOCAL_REF_H_included
#define SCOPED_LOCAL_REF_H_included
#include "jni.h"
#include <stddef.h>
// A smart pointer that deletes a JNI local reference when it goes out of scope.
template<typename T>
class ScopedLocalRef {
public:
ScopedLocalRef(JNIEnv* env, T localRef) : mEnv(env), mLocalRef(localRef) {
}
~ScopedLocalRef() {
reset();
}
void reset(T ptr = NULL) {
if (ptr != mLocalRef) {
if (mLocalRef != NULL) {
mEnv->DeleteLocalRef(mLocalRef);
}
mLocalRef = ptr;
}
}
T release() __attribute__((warn_unused_result)) {
T localRef = mLocalRef;
mLocalRef = NULL;
return localRef;
}
T get() const {
return mLocalRef;
}
private:
JNIEnv* mEnv;
T mLocalRef;
// Disallow copy and assignment.
ScopedLocalRef(const ScopedLocalRef&);
void operator=(const ScopedLocalRef&);
};
#endif // SCOPED_LOCAL_REF_H_included
| 1,617 | 24.28125 | 80 | h |
null | ceph-main/src/java/native/libcephfs_jni.cc | /*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include <sys/types.h>
#include <sys/stat.h>
#include <string.h>
#include <errno.h>
#include <unistd.h>
#include <fcntl.h>
#include <sys/un.h>
#include <jni.h>
#include "ScopedLocalRef.h"
#include "JniConstants.h"
#include "include/cephfs/libcephfs.h"
#include "common/dout.h"
#define dout_subsys ceph_subsys_javaclient
#include "com_ceph_fs_CephMount.h"
#define CEPH_STAT_CP "com/ceph/fs/CephStat"
#define CEPH_STAT_VFS_CP "com/ceph/fs/CephStatVFS"
#define CEPH_FILE_EXTENT_CP "com/ceph/fs/CephFileExtent"
#define CEPH_MOUNT_CP "com/ceph/fs/CephMount"
#define CEPH_NOTMOUNTED_CP "com/ceph/fs/CephNotMountedException"
#define CEPH_FILEEXISTS_CP "com/ceph/fs/CephFileAlreadyExistsException"
#define CEPH_ALREADYMOUNTED_CP "com/ceph/fs/CephAlreadyMountedException"
#define CEPH_NOTDIR_CP "com/ceph/fs/CephNotDirectoryException"
/*
* Flags to open(). must be synchronized with CephMount.java
*
* There are two versions of flags: the version in Java and the version in the
* target library (e.g. libc or libcephfs). We control the Java values and map
* to the target value with fixup_* functions below. This is much faster than
* keeping the values in Java and making a cross-JNI up-call to retrieve them,
* and makes it easy to keep any platform specific value changes in this file.
*/
#define JAVA_O_RDONLY 1
#define JAVA_O_RDWR 2
#define JAVA_O_APPEND 4
#define JAVA_O_CREAT 8
#define JAVA_O_TRUNC 16
#define JAVA_O_EXCL 32
#define JAVA_O_WRONLY 64
#define JAVA_O_DIRECTORY 128
/*
* Whence flags for seek(). sync with CephMount.java if changed.
*
* Mapping of SEEK_* done in seek function.
*/
#define JAVA_SEEK_SET 1
#define JAVA_SEEK_CUR 2
#define JAVA_SEEK_END 3
/*
* File attribute flags. sync with CephMount.java if changed.
*/
#define JAVA_SETATTR_MODE 1
#define JAVA_SETATTR_UID 2
#define JAVA_SETATTR_GID 4
#define JAVA_SETATTR_MTIME 8
#define JAVA_SETATTR_ATIME 16
/*
* Setxattr flags. sync with CephMount.java if changed.
*/
#define JAVA_XATTR_CREATE 1
#define JAVA_XATTR_REPLACE 2
#define JAVA_XATTR_NONE 3
/*
* flock flags. sync with CephMount.java if changed.
*/
#define JAVA_LOCK_SH 1
#define JAVA_LOCK_EX 2
#define JAVA_LOCK_NB 4
#define JAVA_LOCK_UN 8
using namespace std;
/* Map JAVA_O_* open flags to values in libc */
static inline int fixup_open_flags(jint jflags)
{
int ret = 0;
#define FIXUP_OPEN_FLAG(name) \
if (jflags & JAVA_##name) \
ret |= name;
FIXUP_OPEN_FLAG(O_RDONLY)
FIXUP_OPEN_FLAG(O_RDWR)
FIXUP_OPEN_FLAG(O_APPEND)
FIXUP_OPEN_FLAG(O_CREAT)
FIXUP_OPEN_FLAG(O_TRUNC)
FIXUP_OPEN_FLAG(O_EXCL)
FIXUP_OPEN_FLAG(O_WRONLY)
FIXUP_OPEN_FLAG(O_DIRECTORY)
#undef FIXUP_OPEN_FLAG
return ret;
}
/* Map JAVA_SETATTR_* to values in ceph lib */
static inline int fixup_attr_mask(jint jmask)
{
int mask = 0;
#define FIXUP_ATTR_MASK(name) \
if (jmask & JAVA_##name) \
mask |= CEPH_##name;
FIXUP_ATTR_MASK(SETATTR_MODE)
FIXUP_ATTR_MASK(SETATTR_UID)
FIXUP_ATTR_MASK(SETATTR_GID)
FIXUP_ATTR_MASK(SETATTR_MTIME)
FIXUP_ATTR_MASK(SETATTR_ATIME)
#undef FIXUP_ATTR_MASK
return mask;
}
/* Cached field IDs for com.ceph.fs.CephStat */
static jfieldID cephstat_mode_fid;
static jfieldID cephstat_uid_fid;
static jfieldID cephstat_gid_fid;
static jfieldID cephstat_size_fid;
static jfieldID cephstat_blksize_fid;
static jfieldID cephstat_blocks_fid;
static jfieldID cephstat_a_time_fid;
static jfieldID cephstat_m_time_fid;
static jfieldID cephstat_is_file_fid;
static jfieldID cephstat_is_directory_fid;
static jfieldID cephstat_is_symlink_fid;
/* Cached field IDs for com.ceph.fs.CephStatVFS */
static jfieldID cephstatvfs_bsize_fid;
static jfieldID cephstatvfs_frsize_fid;
static jfieldID cephstatvfs_blocks_fid;
static jfieldID cephstatvfs_bavail_fid;
static jfieldID cephstatvfs_files_fid;
static jfieldID cephstatvfs_fsid_fid;
static jfieldID cephstatvfs_namemax_fid;
/* Cached field IDs for com.ceph.fs.CephMount */
static jfieldID cephmount_instance_ptr_fid;
/* Cached field IDs for com.ceph.fs.CephFileExtent */
static jclass cephfileextent_cls;
static jmethodID cephfileextent_ctor_fid;
/*
* Exception throwing helper. Adapted from Apache Hadoop header
* org_apache_hadoop.h by adding the do {} while (0) construct.
*/
#define THROW(env, exception_name, message) \
do { \
jclass ecls = env->FindClass(exception_name); \
if (ecls) { \
int ret = env->ThrowNew(ecls, message); \
if (ret < 0) { \
printf("(CephFS) Fatal Error\n"); \
} \
env->DeleteLocalRef(ecls); \
} \
} while (0)
static void cephThrowNullArg(JNIEnv *env, const char *msg)
{
THROW(env, "java/lang/NullPointerException", msg);
}
static void cephThrowOutOfMemory(JNIEnv *env, const char *msg)
{
THROW(env, "java/lang/OutOfMemoryError", msg);
}
static void cephThrowInternal(JNIEnv *env, const char *msg)
{
THROW(env, "java/lang/InternalError", msg);
}
static void cephThrowIndexBounds(JNIEnv *env, const char *msg)
{
THROW(env, "java/lang/IndexOutOfBoundsException", msg);
}
static void cephThrowIllegalArg(JNIEnv *env, const char *msg)
{
THROW(env, "java/lang/IllegalArgumentException", msg);
}
static void cephThrowFNF(JNIEnv *env, const char *msg)
{
THROW(env, "java/io/FileNotFoundException", msg);
}
static void cephThrowFileExists(JNIEnv *env, const char *msg)
{
THROW(env, CEPH_FILEEXISTS_CP, msg);
}
static void cephThrowNotDir(JNIEnv *env, const char *msg)
{
THROW(env, CEPH_NOTDIR_CP, msg);
}
static void handle_error(JNIEnv *env, int rc)
{
switch (rc) {
case -ENOENT:
cephThrowFNF(env, "");
return;
case -EEXIST:
cephThrowFileExists(env, "");
return;
case -ENOTDIR:
cephThrowNotDir(env, "");
return;
default:
break;
}
THROW(env, "java/io/IOException", strerror(-rc));
}
#define CHECK_ARG_NULL(v, m, r) do { \
if (!(v)) { \
cephThrowNullArg(env, (m)); \
return (r); \
} } while (0)
#define CHECK_ARG_BOUNDS(c, m, r) do { \
if ((c)) { \
cephThrowIndexBounds(env, (m)); \
return (r); \
} } while (0)
#define CHECK_MOUNTED(_c, _r) do { \
if (!ceph_is_mounted((_c))) { \
THROW(env, CEPH_NOTMOUNTED_CP, "not mounted"); \
return (_r); \
} } while (0)
/*
* Cast a jlong to ceph_mount_info. Each JNI function is expected to pass in
* the class instance variable instance_ptr. Passing a parameter is faster
* than reaching back into Java via an upcall to retrieve this pointer.
*/
static inline struct ceph_mount_info *get_ceph_mount(jlong j_mntp)
{
return (struct ceph_mount_info *)j_mntp;
}
/*
* Setup cached field IDs
*/
static void setup_field_ids(JNIEnv *env, jclass clz)
{
jclass cephstat_cls;
jclass cephstatvfs_cls;
jclass tmp_cephfileextent_cls;
/*
* Get a fieldID from a class with a specific type
*
* clz: jclass
* field: field in clz
* type: integer, long, etc..
*
* This macro assumes some naming convention that is used
* only in this file:
*
* GETFID(cephstat, mode, I) gets translated into
* cephstat_mode_fid = env->GetFieldID(cephstat_cls, "mode", "I");
*/
#define GETFID(clz, field, type) do { \
clz ## _ ## field ## _fid = env->GetFieldID(clz ## _cls, #field, #type); \
if ( ! clz ## _ ## field ## _fid ) \
return; \
} while (0)
/* Cache CephStat fields */
cephstat_cls = env->FindClass(CEPH_STAT_CP);
if (!cephstat_cls)
return;
GETFID(cephstat, mode, I);
GETFID(cephstat, uid, I);
GETFID(cephstat, gid, I);
GETFID(cephstat, size, J);
GETFID(cephstat, blksize, J);
GETFID(cephstat, blocks, J);
GETFID(cephstat, a_time, J);
GETFID(cephstat, m_time, J);
GETFID(cephstat, is_file, Z);
GETFID(cephstat, is_directory, Z);
GETFID(cephstat, is_symlink, Z);
/* Cache CephStatVFS fields */
cephstatvfs_cls = env->FindClass(CEPH_STAT_VFS_CP);
if (!cephstatvfs_cls)
return;
GETFID(cephstatvfs, bsize, J);
GETFID(cephstatvfs, frsize, J);
GETFID(cephstatvfs, blocks, J);
GETFID(cephstatvfs, bavail, J);
GETFID(cephstatvfs, files, J);
GETFID(cephstatvfs, fsid, J);
GETFID(cephstatvfs, namemax, J);
/* Cache CephFileExtent fields */
tmp_cephfileextent_cls = env->FindClass(CEPH_FILE_EXTENT_CP);
if (!tmp_cephfileextent_cls)
return;
cephfileextent_cls = (jclass)env->NewGlobalRef(tmp_cephfileextent_cls);
env->DeleteLocalRef(tmp_cephfileextent_cls);
cephfileextent_ctor_fid = env->GetMethodID(cephfileextent_cls, "<init>", "(JJ[I)V");
if (!cephfileextent_ctor_fid)
return;
JniConstants::init(env);
#undef GETFID
cephmount_instance_ptr_fid = env->GetFieldID(clz, "instance_ptr", "J");
}
/*
* Class: com_ceph_fs_CephMount
* Method: native_initialize
* Signature: ()V
*/
JNIEXPORT void JNICALL Java_com_ceph_fs_CephMount_native_1initialize
(JNIEnv *env, jclass clz)
{
setup_field_ids(env, clz);
}
/*
* Class: com_ceph_fs_CephMount
* Method: native_ceph_create
* Signature: (Lcom/ceph/fs/CephMount;Ljava/lang/String;)I
*/
JNIEXPORT jint JNICALL Java_com_ceph_fs_CephMount_native_1ceph_1create
(JNIEnv *env, jclass clz, jobject j_cephmount, jstring j_id)
{
struct ceph_mount_info *cmount;
const char *c_id = NULL;
int ret;
CHECK_ARG_NULL(j_cephmount, "@mount is null", -1);
if (j_id) {
c_id = env->GetStringUTFChars(j_id, NULL);
if (!c_id) {
cephThrowInternal(env, "Failed to pin memory");
return -1;
}
}
ret = ceph_create(&cmount, c_id);
if (c_id)
env->ReleaseStringUTFChars(j_id, c_id);
if (ret) {
THROW(env, "java/lang/RuntimeException", "failed to create Ceph mount object");
return ret;
}
env->SetLongField(j_cephmount, cephmount_instance_ptr_fid, (long)cmount);
return ret;
}
/*
* Class: com_ceph_fs_CephMount
* Method: native_ceph_mount
* Signature: (JLjava/lang/String;)I
*/
JNIEXPORT jint JNICALL Java_com_ceph_fs_CephMount_native_1ceph_1mount
(JNIEnv *env, jclass clz, jlong j_mntp, jstring j_root)
{
struct ceph_mount_info *cmount = get_ceph_mount(j_mntp);
CephContext *cct = ceph_get_mount_context(cmount);
const char *c_root = NULL;
int ret;
/*
* Toss a message up if we are already mounted.
*/
if (ceph_is_mounted(cmount)) {
THROW(env, CEPH_ALREADYMOUNTED_CP, "");
return -1;
}
if (j_root) {
c_root = env->GetStringUTFChars(j_root, NULL);
if (!c_root) {
cephThrowInternal(env, "Failed to pin memory");
return -1;
}
}
ldout(cct, 10) << "jni: ceph_mount: " << (c_root ? c_root : "<NULL>") << dendl;
ret = ceph_mount(cmount, c_root);
ldout(cct, 10) << "jni: ceph_mount: exit ret " << ret << dendl;
if (c_root)
env->ReleaseStringUTFChars(j_root, c_root);
if (ret)
handle_error(env, ret);
return ret;
}
/*
* Class: com_ceph_fs_CephMount
* Method: native_ceph_unmount
* Signature: (J)I
*/
JNIEXPORT jint JNICALL Java_com_ceph_fs_CephMount_native_1ceph_1unmount
(JNIEnv *env, jclass clz, jlong j_mntp)
{
struct ceph_mount_info *cmount = get_ceph_mount(j_mntp);
CephContext *cct = ceph_get_mount_context(cmount);
int ret;
ldout(cct, 10) << "jni: ceph_unmount enter" << dendl;
CHECK_MOUNTED(cmount, -1);
ret = ceph_unmount(cmount);
ldout(cct, 10) << "jni: ceph_unmount exit ret " << ret << dendl;
if (ret)
handle_error(env, ret);
return ret;
}
/*
* Class: com_ceph_fs_CephMount
* Method: native_ceph_release
* Signature: (J)I
*/
JNIEXPORT jint JNICALL Java_com_ceph_fs_CephMount_native_1ceph_1release
(JNIEnv *env, jclass clz, jlong j_mntp)
{
struct ceph_mount_info *cmount = get_ceph_mount(j_mntp);
CephContext *cct = ceph_get_mount_context(cmount);
int ret;
ldout(cct, 10) << "jni: ceph_release called" << dendl;
ret = ceph_release(cmount);
if (ret)
handle_error(env, ret);
return ret;
}
/*
* Class: com_ceph_fs_CephMount
* Method: native_ceph_conf_set
* Signature: (JLjava/lang/String;Ljava/lang/String;)I
*/
JNIEXPORT jint JNICALL Java_com_ceph_fs_CephMount_native_1ceph_1conf_1set
(JNIEnv *env, jclass clz, jlong j_mntp, jstring j_opt, jstring j_val)
{
struct ceph_mount_info *cmount = get_ceph_mount(j_mntp);
CephContext *cct = ceph_get_mount_context(cmount);
const char *c_opt, *c_val;
int ret;
CHECK_ARG_NULL(j_opt, "@option is null", -1);
CHECK_ARG_NULL(j_val, "@value is null", -1);
c_opt = env->GetStringUTFChars(j_opt, NULL);
if (!c_opt) {
cephThrowInternal(env, "failed to pin memory");
return -1;
}
c_val = env->GetStringUTFChars(j_val, NULL);
if (!c_val) {
env->ReleaseStringUTFChars(j_opt, c_opt);
cephThrowInternal(env, "failed to pin memory");
return -1;
}
ldout(cct, 10) << "jni: conf_set: opt " << c_opt << " val " << c_val << dendl;
ret = ceph_conf_set(cmount, c_opt, c_val);
ldout(cct, 10) << "jni: conf_set: exit ret " << ret << dendl;
env->ReleaseStringUTFChars(j_opt, c_opt);
env->ReleaseStringUTFChars(j_val, c_val);
if (ret)
handle_error(env, ret);
return ret;
}
/*
* Class: com_ceph_fs_CephMount
* Method: native_ceph_conf_get
* Signature: (JLjava/lang/String;)Ljava/lang/String;
*/
JNIEXPORT jstring JNICALL Java_com_ceph_fs_CephMount_native_1ceph_1conf_1get
(JNIEnv *env, jclass clz, jlong j_mntp, jstring j_opt)
{
struct ceph_mount_info *cmount = get_ceph_mount(j_mntp);
CephContext *cct = ceph_get_mount_context(cmount);
const char *c_opt;
jstring value = NULL;
int ret, buflen;
char *buf;
CHECK_ARG_NULL(j_opt, "@option is null", NULL);
c_opt = env->GetStringUTFChars(j_opt, NULL);
if (!c_opt) {
cephThrowInternal(env, "failed to pin memory");
return NULL;
}
buflen = 128;
buf = new (std::nothrow) char[buflen];
if (!buf) {
cephThrowOutOfMemory(env, "head allocation failed");
goto out;
}
while (1) {
memset(buf, 0, sizeof(char)*buflen);
ldout(cct, 10) << "jni: conf_get: opt " << c_opt << " len " << buflen << dendl;
ret = ceph_conf_get(cmount, c_opt, buf, buflen);
if (ret == -ENAMETOOLONG) {
buflen *= 2;
delete [] buf;
buf = new (std::nothrow) char[buflen];
if (!buf) {
cephThrowOutOfMemory(env, "head allocation failed");
goto out;
}
} else
break;
}
ldout(cct, 10) << "jni: conf_get: ret " << ret << dendl;
if (ret == 0)
value = env->NewStringUTF(buf);
else if (ret != -ENOENT)
handle_error(env, ret);
delete [] buf;
out:
env->ReleaseStringUTFChars(j_opt, c_opt);
return value;
}
/*
* Class: com_ceph_fs_CephMount
* Method: native_ceph_conf_read_file
* Signature: (JLjava/lang/String;)I
*/
JNIEXPORT jint JNICALL Java_com_ceph_fs_CephMount_native_1ceph_1conf_1read_1file
(JNIEnv *env, jclass clz, jlong j_mntp, jstring j_path)
{
struct ceph_mount_info *cmount = get_ceph_mount(j_mntp);
CephContext *cct = ceph_get_mount_context(cmount);
const char *c_path;
int ret;
CHECK_ARG_NULL(j_path, "@path is null", -1);
c_path = env->GetStringUTFChars(j_path, NULL);
if (!c_path) {
cephThrowInternal(env, "failed to pin memory");
return -1;
}
ldout(cct, 10) << "jni: conf_read_file: path " << c_path << dendl;
ret = ceph_conf_read_file(cmount, c_path);
ldout(cct, 10) << "jni: conf_read_file: exit ret " << ret << dendl;
env->ReleaseStringUTFChars(j_path, c_path);
if (ret)
handle_error(env, ret);
return ret;
}
/*
* Class: com_ceph_fs_CephMount
* Method: native_ceph_statfs
* Signature: (JLjava/lang/String;Lcom/ceph/fs/CephStatVFS;)I
*/
JNIEXPORT jint JNICALL Java_com_ceph_fs_CephMount_native_1ceph_1statfs
(JNIEnv *env, jclass clz, jlong j_mntp, jstring j_path, jobject j_cephstatvfs)
{
struct ceph_mount_info *cmount = get_ceph_mount(j_mntp);
CephContext *cct = ceph_get_mount_context(cmount);
const char *c_path;
struct statvfs st;
int ret;
CHECK_ARG_NULL(j_path, "@path is null", -1);
CHECK_ARG_NULL(j_cephstatvfs, "@stat is null", -1);
CHECK_MOUNTED(cmount, -1);
c_path = env->GetStringUTFChars(j_path, NULL);
if (!c_path) {
cephThrowInternal(env, "Failed to pin memory");
return -1;
}
ldout(cct, 10) << "jni: statfs: path " << c_path << dendl;
ret = ceph_statfs(cmount, c_path, &st);
ldout(cct, 10) << "jni: statfs: exit ret " << ret << dendl;
env->ReleaseStringUTFChars(j_path, c_path);
if (ret) {
handle_error(env, ret);
return ret;
}
env->SetLongField(j_cephstatvfs, cephstatvfs_bsize_fid, st.f_bsize);
env->SetLongField(j_cephstatvfs, cephstatvfs_frsize_fid, st.f_frsize);
env->SetLongField(j_cephstatvfs, cephstatvfs_blocks_fid, st.f_blocks);
env->SetLongField(j_cephstatvfs, cephstatvfs_bavail_fid, st.f_bavail);
env->SetLongField(j_cephstatvfs, cephstatvfs_files_fid, st.f_files);
env->SetLongField(j_cephstatvfs, cephstatvfs_fsid_fid, st.f_fsid);
env->SetLongField(j_cephstatvfs, cephstatvfs_namemax_fid, st.f_namemax);
return ret;
}
/*
* Class: com_ceph_fs_CephMount
* Method: native_ceph_getcwd
* Signature: (J)Ljava/lang/String;
*/
JNIEXPORT jstring JNICALL Java_com_ceph_fs_CephMount_native_1ceph_1getcwd
(JNIEnv *env, jclass clz, jlong j_mntp)
{
struct ceph_mount_info *cmount = get_ceph_mount(j_mntp);
CephContext *cct = ceph_get_mount_context(cmount);
const char *c_cwd;
CHECK_MOUNTED(cmount, NULL);
ldout(cct, 10) << "jni: getcwd: enter" << dendl;
c_cwd = ceph_getcwd(cmount);
if (!c_cwd) {
cephThrowOutOfMemory(env, "ceph_getcwd");
return NULL;
}
ldout(cct, 10) << "jni: getcwd: exit ret " << c_cwd << dendl;
return env->NewStringUTF(c_cwd);
}
/*
* Class: com_ceph_fs_CephMount
* Method: native_ceph_chdir
* Signature: (JLjava/lang/String;)I
*/
JNIEXPORT jint JNICALL Java_com_ceph_fs_CephMount_native_1ceph_1chdir
(JNIEnv *env, jclass clz, jlong j_mntp, jstring j_path)
{
struct ceph_mount_info *cmount = get_ceph_mount(j_mntp);
CephContext *cct = ceph_get_mount_context(cmount);
const char *c_path;
int ret;
CHECK_ARG_NULL(j_path, "@path is null", -1);
CHECK_MOUNTED(cmount, -1);
c_path = env->GetStringUTFChars(j_path, NULL);
if (!c_path) {
cephThrowInternal(env, "failed to pin memory");
return -1;
}
ldout(cct, 10) << "jni: chdir: path " << c_path << dendl;
ret = ceph_chdir(cmount, c_path);
ldout(cct, 10) << "jni: chdir: exit ret " << ret << dendl;
env->ReleaseStringUTFChars(j_path, c_path);
if (ret)
handle_error(env, ret);
return ret;
}
/*
* Class: com_ceph_fs_CephMount
* Method: native_ceph_listdir
* Signature: (JLjava/lang/String;)[Ljava/lang/String;
*/
JNIEXPORT jobjectArray JNICALL Java_com_ceph_fs_CephMount_native_1ceph_1listdir
(JNIEnv *env, jclass clz, jlong j_mntp, jstring j_path)
{
struct ceph_mount_info *cmount = get_ceph_mount(j_mntp);
CephContext *cct = ceph_get_mount_context(cmount);
struct ceph_dir_result *dirp;
list<string>::iterator it;
list<string> contents;
const char *c_path;
jobjectArray dirlist;
string *ent;
int ret, buflen, bufpos, i;
jstring name;
char *buf;
CHECK_ARG_NULL(j_path, "@path is null", NULL);
CHECK_MOUNTED(cmount, NULL);
c_path = env->GetStringUTFChars(j_path, NULL);
if (!c_path) {
cephThrowInternal(env, "failed to pin memory");
return NULL;
}
ldout(cct, 10) << "jni: listdir: opendir: path " << c_path << dendl;
/* ret < 0 also includes -ENOTDIR which should return NULL */
ret = ceph_opendir(cmount, c_path, &dirp);
if (ret) {
env->ReleaseStringUTFChars(j_path, c_path);
handle_error(env, ret);
return NULL;
}
ldout(cct, 10) << "jni: listdir: opendir: exit ret " << ret << dendl;
/* buffer for ceph_getdnames() results */
buflen = 256;
buf = new (std::nothrow) char[buflen];
if (!buf) {
cephThrowOutOfMemory(env, "heap allocation failed");
goto out;
}
while (1) {
ldout(cct, 10) << "jni: listdir: getdnames: enter" << dendl;
ret = ceph_getdnames(cmount, dirp, buf, buflen);
if (ret == -ERANGE) {
delete [] buf;
buflen *= 2;
buf = new (std::nothrow) char[buflen];
if (!buf) {
cephThrowOutOfMemory(env, "heap allocation failed");
goto out;
}
continue;
}
ldout(cct, 10) << "jni: listdir: getdnames: exit ret " << ret << dendl;
if (ret <= 0)
break;
/* got at least one name */
bufpos = 0;
while (bufpos < ret) {
ent = new (std::nothrow) string(buf + bufpos);
if (!ent) {
delete [] buf;
cephThrowOutOfMemory(env, "heap allocation failed");
goto out;
}
/* filter out dot files: xref: java.io.File::list() */
if (ent->compare(".") && ent->compare("..")) {
contents.push_back(*ent);
ldout(cct, 20) << "jni: listdir: take path " << *ent << dendl;
}
bufpos += ent->size() + 1;
delete ent;
}
}
delete [] buf;
if (ret < 0) {
handle_error(env, ret);
goto out;
}
/* directory list */
dirlist = env->NewObjectArray(contents.size(), env->FindClass("java/lang/String"), NULL);
if (!dirlist)
goto out;
/*
* Fill directory listing array.
*
* FIXME: how should a partially filled array be cleaned-up properly?
*/
for (i = 0, it = contents.begin(); it != contents.end(); ++it) {
name = env->NewStringUTF(it->c_str());
if (!name)
goto out;
env->SetObjectArrayElement(dirlist, i++, name);
if (env->ExceptionOccurred())
goto out;
env->DeleteLocalRef(name);
}
env->ReleaseStringUTFChars(j_path, c_path);
ceph_closedir(cmount, dirp);
return dirlist;
out:
env->ReleaseStringUTFChars(j_path, c_path);
ceph_closedir(cmount, dirp);
return NULL;
}
/*
* Class: com_ceph_fs_CephMount
* Method: native_ceph_link
* Signature: (JLjava/lang/String;Ljava/lang/String;)I
*/
JNIEXPORT jint JNICALL Java_com_ceph_fs_CephMount_native_1ceph_1link
(JNIEnv *env, jclass clz, jlong j_mntp, jstring j_oldpath, jstring j_newpath)
{
struct ceph_mount_info *cmount = get_ceph_mount(j_mntp);
CephContext *cct = ceph_get_mount_context(cmount);
const char *c_oldpath, *c_newpath;
int ret;
CHECK_ARG_NULL(j_oldpath, "@oldpath is null", -1);
CHECK_ARG_NULL(j_newpath, "@newpath is null", -1);
CHECK_MOUNTED(cmount, -1);
c_oldpath = env->GetStringUTFChars(j_oldpath, NULL);
if (!c_oldpath) {
cephThrowInternal(env, "failed to pin memory");
return -1;
}
c_newpath = env->GetStringUTFChars(j_newpath, NULL);
if (!c_newpath) {
env->ReleaseStringUTFChars(j_oldpath, c_oldpath);
cephThrowInternal(env, "failed to pin memory");
return -1;
}
ldout(cct, 10) << "jni: link: oldpath " << c_oldpath <<
" newpath " << c_newpath << dendl;
ret = ceph_link(cmount, c_oldpath, c_newpath);
ldout(cct, 10) << "jni: link: exit ret " << ret << dendl;
env->ReleaseStringUTFChars(j_oldpath, c_oldpath);
env->ReleaseStringUTFChars(j_newpath, c_newpath);
if (ret)
handle_error(env, ret);
return ret;
}
/*
* Class: com_ceph_fs_CephMount
* Method: native_ceph_unlink
* Signature: (JLjava/lang/String;)I
*/
JNIEXPORT jint JNICALL Java_com_ceph_fs_CephMount_native_1ceph_1unlink
(JNIEnv *env, jclass clz, jlong j_mntp, jstring j_path)
{
struct ceph_mount_info *cmount = get_ceph_mount(j_mntp);
CephContext *cct = ceph_get_mount_context(cmount);
const char *c_path;
int ret;
CHECK_ARG_NULL(j_path, "@path is null", -1);
CHECK_MOUNTED(cmount, -1);
c_path = env->GetStringUTFChars(j_path, NULL);
if (!c_path) {
cephThrowInternal(env, "failed to pin memory");
return -1;
}
ldout(cct, 10) << "jni: unlink: path " << c_path << dendl;
ret = ceph_unlink(cmount, c_path);
ldout(cct, 10) << "jni: unlink: exit ret " << ret << dendl;
env->ReleaseStringUTFChars(j_path, c_path);
if (ret)
handle_error(env, ret);
return ret;
}
/*
* Class: com_ceph_fs_CephMount
* Method: native_ceph_rename
* Signature: (JLjava/lang/String;Ljava/lang/String;)I
*/
JNIEXPORT jint JNICALL Java_com_ceph_fs_CephMount_native_1ceph_1rename
(JNIEnv *env, jclass clz, jlong j_mntp, jstring j_from, jstring j_to)
{
struct ceph_mount_info *cmount = get_ceph_mount(j_mntp);
CephContext *cct = ceph_get_mount_context(cmount);
const char *c_from, *c_to;
int ret;
CHECK_ARG_NULL(j_from, "@from is null", -1);
CHECK_ARG_NULL(j_to, "@to is null", -1);
CHECK_MOUNTED(cmount, -1);
c_from = env->GetStringUTFChars(j_from, NULL);
if (!c_from) {
cephThrowInternal(env, "Failed to pin memory!");
return -1;
}
c_to = env->GetStringUTFChars(j_to, NULL);
if (!c_to) {
env->ReleaseStringUTFChars(j_from, c_from);
cephThrowInternal(env, "Failed to pin memory.");
return -1;
}
ldout(cct, 10) << "jni: rename: from " << c_from << " to " << c_to << dendl;
ret = ceph_rename(cmount, c_from, c_to);
ldout(cct, 10) << "jni: rename: exit ret " << ret << dendl;
env->ReleaseStringUTFChars(j_from, c_from);
env->ReleaseStringUTFChars(j_to, c_to);
if (ret)
handle_error(env, ret);
return ret;
}
/*
* Class: com_ceph_fs_CephMount
* Method: native_ceph_mkdir
* Signature: (JLjava/lang/String;I)I
*/
JNIEXPORT jint JNICALL Java_com_ceph_fs_CephMount_native_1ceph_1mkdir
(JNIEnv *env, jclass clz, jlong j_mntp, jstring j_path, jint j_mode)
{
struct ceph_mount_info *cmount = get_ceph_mount(j_mntp);
CephContext *cct = ceph_get_mount_context(cmount);
const char *c_path;
int ret;
CHECK_ARG_NULL(j_path, "@path is null", -1);
CHECK_MOUNTED(cmount, -1);
c_path = env->GetStringUTFChars(j_path, NULL);
if (!c_path) {
cephThrowInternal(env, "failed to pin memory");
return -1;
}
ldout(cct, 10) << "jni: mkdir: path " << c_path << " mode " << (int)j_mode << dendl;
ret = ceph_mkdir(cmount, c_path, (int)j_mode);
ldout(cct, 10) << "jni: mkdir: exit ret " << ret << dendl;
env->ReleaseStringUTFChars(j_path, c_path);
if (ret)
handle_error(env, ret);
return ret;
}
/*
* Class: com_ceph_fs_CephMount
* Method: native_ceph_mkdirs
* Signature: (JLjava/lang/String;I)I
*/
JNIEXPORT jint JNICALL Java_com_ceph_fs_CephMount_native_1ceph_1mkdirs
(JNIEnv *env, jclass clz, jlong j_mntp, jstring j_path, jint j_mode)
{
struct ceph_mount_info *cmount = get_ceph_mount(j_mntp);
CephContext *cct = ceph_get_mount_context(cmount);
const char *c_path;
int ret;
CHECK_ARG_NULL(j_path, "@path is null", -1);
CHECK_MOUNTED(cmount, -1);
c_path = env->GetStringUTFChars(j_path, NULL);
if (!c_path) {
cephThrowInternal(env, "failed to pin memory");
return -1;
}
ldout(cct, 10) << "jni: mkdirs: path " << c_path << " mode " << (int)j_mode << dendl;
ret = ceph_mkdirs(cmount, c_path, (int)j_mode);
ldout(cct, 10) << "jni: mkdirs: exit ret " << ret << dendl;
env->ReleaseStringUTFChars(j_path, c_path);
if (ret)
handle_error(env, ret);
return ret;
}
/*
* Class: com_ceph_fs_CephMount
* Method: native_ceph_rmdir
* Signature: (JLjava/lang/String;)I
*/
JNIEXPORT jint JNICALL Java_com_ceph_fs_CephMount_native_1ceph_1rmdir
(JNIEnv *env, jclass clz, jlong j_mntp, jstring j_path)
{
struct ceph_mount_info *cmount = get_ceph_mount(j_mntp);
CephContext *cct = ceph_get_mount_context(cmount);
const char *c_path;
int ret;
CHECK_ARG_NULL(j_path, "@path is null", -1);
CHECK_MOUNTED(cmount, -1);
c_path = env->GetStringUTFChars(j_path, NULL);
if (!c_path) {
cephThrowInternal(env, "failed to pin memory");
return -1;
}
ldout(cct, 10) << "jni: rmdir: path " << c_path << dendl;
ret = ceph_rmdir(cmount, c_path);
ldout(cct, 10) << "jni: rmdir: exit ret " << ret << dendl;
env->ReleaseStringUTFChars(j_path, c_path);
if (ret)
handle_error(env, ret);
return ret;
}
/*
* Class: com_ceph_fs_CephMount
* Method: native_ceph_readlink
* Signature: (JLjava/lang/String;)Ljava/lang/String;
*/
JNIEXPORT jstring JNICALL Java_com_ceph_fs_CephMount_native_1ceph_1readlink
(JNIEnv *env, jclass clz, jlong j_mntp, jstring j_path)
{
struct ceph_mount_info *cmount = get_ceph_mount(j_mntp);
CephContext *cct = ceph_get_mount_context(cmount);
const char *c_path;
char *linkname;
struct ceph_statx stx;
jstring j_linkname;
CHECK_ARG_NULL(j_path, "@path is null", NULL);
CHECK_MOUNTED(cmount, NULL);
c_path = env->GetStringUTFChars(j_path, NULL);
if (!c_path) {
cephThrowInternal(env, "failed to pin memory");
return NULL;
}
for (;;) {
ldout(cct, 10) << "jni: readlink: lstatx " << c_path << dendl;
int ret = ceph_statx(cmount, c_path, &stx, CEPH_STATX_SIZE,
AT_SYMLINK_NOFOLLOW);
ldout(cct, 10) << "jni: readlink: lstat exit ret " << ret << dendl;
if (ret) {
env->ReleaseStringUTFChars(j_path, c_path);
handle_error(env, ret);
return NULL;
}
linkname = new (std::nothrow) char[stx.stx_size + 1];
if (!linkname) {
env->ReleaseStringUTFChars(j_path, c_path);
cephThrowOutOfMemory(env, "head allocation failed");
return NULL;
}
ldout(cct, 10) << "jni: readlink: size " << stx.stx_size << " path " << c_path << dendl;
ret = ceph_readlink(cmount, c_path, linkname, stx.stx_size + 1);
ldout(cct, 10) << "jni: readlink: exit ret " << ret << dendl;
if (ret < 0) {
delete [] linkname;
env->ReleaseStringUTFChars(j_path, c_path);
handle_error(env, ret);
return NULL;
}
/* re-stat and try again */
if (ret > (int)stx.stx_size) {
delete [] linkname;
continue;
}
linkname[ret] = '\0';
break;
}
env->ReleaseStringUTFChars(j_path, c_path);
j_linkname = env->NewStringUTF(linkname);
delete [] linkname;
return j_linkname;
}
/*
* Class: com_ceph_fs_CephMount
* Method: native_ceph_symlink
* Signature: (JLjava/lang/String;Ljava/lang/String;)I
*/
JNIEXPORT jint JNICALL Java_com_ceph_fs_CephMount_native_1ceph_1symlink
(JNIEnv *env, jclass clz, jlong j_mntp, jstring j_oldpath, jstring j_newpath)
{
struct ceph_mount_info *cmount = get_ceph_mount(j_mntp);
CephContext *cct = ceph_get_mount_context(cmount);
const char *c_oldpath, *c_newpath;
int ret;
CHECK_ARG_NULL(j_oldpath, "@oldpath is null", -1);
CHECK_ARG_NULL(j_newpath, "@newpath is null", -1);
CHECK_MOUNTED(cmount, -1);
c_oldpath = env->GetStringUTFChars(j_oldpath, NULL);
if (!c_oldpath) {
cephThrowInternal(env, "failed to pin memory");
return -1;
}
c_newpath = env->GetStringUTFChars(j_newpath, NULL);
if (!c_newpath) {
env->ReleaseStringUTFChars(j_oldpath, c_oldpath);
cephThrowInternal(env, "failed to pin memory");
return -1;
}
ldout(cct, 10) << "jni: symlink: oldpath " << c_oldpath <<
" newpath " << c_newpath << dendl;
ret = ceph_symlink(cmount, c_oldpath, c_newpath);
ldout(cct, 10) << "jni: symlink: exit ret " << ret << dendl;
env->ReleaseStringUTFChars(j_oldpath, c_oldpath);
env->ReleaseStringUTFChars(j_newpath, c_newpath);
if (ret)
handle_error(env, ret);
return ret;
}
#define CEPH_J_CEPHSTAT_MASK (CEPH_STATX_UID|CEPH_STATX_GID|CEPH_STATX_SIZE|CEPH_STATX_BLOCKS|CEPH_STATX_MTIME|CEPH_STATX_ATIME)
static void fill_cephstat(JNIEnv *env, jobject j_cephstat, struct ceph_statx *stx)
{
env->SetIntField(j_cephstat, cephstat_mode_fid, stx->stx_mode);
env->SetIntField(j_cephstat, cephstat_uid_fid, stx->stx_uid);
env->SetIntField(j_cephstat, cephstat_gid_fid, stx->stx_gid);
env->SetLongField(j_cephstat, cephstat_size_fid, stx->stx_size);
env->SetLongField(j_cephstat, cephstat_blksize_fid, stx->stx_blksize);
env->SetLongField(j_cephstat, cephstat_blocks_fid, stx->stx_blocks);
long long time = stx->stx_mtime.tv_sec;
time *= 1000;
time += stx->stx_mtime.tv_nsec / 1000000;
env->SetLongField(j_cephstat, cephstat_m_time_fid, time);
time = stx->stx_atime.tv_sec;
time *= 1000;
time += stx->stx_atime.tv_nsec / 1000000;
env->SetLongField(j_cephstat, cephstat_a_time_fid, time);
env->SetBooleanField(j_cephstat, cephstat_is_file_fid,
S_ISREG(stx->stx_mode) ? JNI_TRUE : JNI_FALSE);
env->SetBooleanField(j_cephstat, cephstat_is_directory_fid,
S_ISDIR(stx->stx_mode) ? JNI_TRUE : JNI_FALSE);
env->SetBooleanField(j_cephstat, cephstat_is_symlink_fid,
S_ISLNK(stx->stx_mode) ? JNI_TRUE : JNI_FALSE);
}
/*
* Class: com_ceph_fs_CephMount
* Method: native_ceph_lstat
* Signature: (JLjava/lang/String;Lcom/ceph/fs/CephStat;)I
*/
JNIEXPORT jint JNICALL Java_com_ceph_fs_CephMount_native_1ceph_1lstat
(JNIEnv *env, jclass clz, jlong j_mntp, jstring j_path, jobject j_cephstat)
{
struct ceph_mount_info *cmount = get_ceph_mount(j_mntp);
CephContext *cct = ceph_get_mount_context(cmount);
const char *c_path;
struct ceph_statx stx;
int ret;
CHECK_ARG_NULL(j_path, "@path is null", -1);
CHECK_ARG_NULL(j_cephstat, "@stat is null", -1);
CHECK_MOUNTED(cmount, -1);
c_path = env->GetStringUTFChars(j_path, NULL);
if (!c_path) {
cephThrowInternal(env, "Failed to pin memory");
return -1;
}
ldout(cct, 10) << "jni: lstat: path " << c_path << dendl;
ret = ceph_statx(cmount, c_path, &stx, CEPH_J_CEPHSTAT_MASK, AT_SYMLINK_NOFOLLOW);
ldout(cct, 10) << "jni: lstat exit ret " << ret << dendl;
env->ReleaseStringUTFChars(j_path, c_path);
if (ret) {
handle_error(env, ret);
return ret;
}
fill_cephstat(env, j_cephstat, &stx);
return ret;
}
/*
* Class: com_ceph_fs_CephMount
* Method: native_ceph_stat
* Signature: (JLjava/lang/String;Lcom/ceph/fs/CephStat;)I
*/
JNIEXPORT jint JNICALL Java_com_ceph_fs_CephMount_native_1ceph_1stat
(JNIEnv *env, jclass clz, jlong j_mntp, jstring j_path, jobject j_cephstat)
{
struct ceph_mount_info *cmount = get_ceph_mount(j_mntp);
CephContext *cct = ceph_get_mount_context(cmount);
const char *c_path;
struct ceph_statx stx;
int ret;
CHECK_ARG_NULL(j_path, "@path is null", -1);
CHECK_ARG_NULL(j_cephstat, "@stat is null", -1);
CHECK_MOUNTED(cmount, -1);
c_path = env->GetStringUTFChars(j_path, NULL);
if (!c_path) {
cephThrowInternal(env, "Failed to pin memory");
return -1;
}
ldout(cct, 10) << "jni: stat: path " << c_path << dendl;
ret = ceph_statx(cmount, c_path, &stx, CEPH_J_CEPHSTAT_MASK, 0);
ldout(cct, 10) << "jni: stat exit ret " << ret << dendl;
env->ReleaseStringUTFChars(j_path, c_path);
if (ret) {
handle_error(env, ret);
return ret;
}
fill_cephstat(env, j_cephstat, &stx);
return ret;
}
/*
* Class: com_ceph_fs_CephMount
* Method: native_ceph_setattr
* Signature: (JLjava/lang/String;Lcom/ceph/fs/CephStat;I)I
*/
JNIEXPORT jint JNICALL Java_com_ceph_fs_CephMount_native_1ceph_1setattr
(JNIEnv *env, jclass clz, jlong j_mntp, jstring j_path, jobject j_cephstat, jint j_mask)
{
struct ceph_mount_info *cmount = get_ceph_mount(j_mntp);
CephContext *cct = ceph_get_mount_context(cmount);
const char *c_path;
struct ceph_statx stx;
int ret, mask = fixup_attr_mask(j_mask);
CHECK_ARG_NULL(j_path, "@path is null", -1);
CHECK_ARG_NULL(j_cephstat, "@stat is null", -1);
CHECK_MOUNTED(cmount, -1);
c_path = env->GetStringUTFChars(j_path, NULL);
if (!c_path) {
cephThrowInternal(env, "Failed to pin memory");
return -1;
}
memset(&stx, 0, sizeof(stx));
stx.stx_mode = env->GetIntField(j_cephstat, cephstat_mode_fid);
stx.stx_uid = env->GetIntField(j_cephstat, cephstat_uid_fid);
stx.stx_gid = env->GetIntField(j_cephstat, cephstat_gid_fid);
long mtime_msec = env->GetLongField(j_cephstat, cephstat_m_time_fid);
long atime_msec = env->GetLongField(j_cephstat, cephstat_a_time_fid);
stx.stx_mtime.tv_sec = mtime_msec / 1000;
stx.stx_mtime.tv_nsec = (mtime_msec % 1000) * 1000000;
stx.stx_atime.tv_sec = atime_msec / 1000;
stx.stx_atime.tv_nsec = (atime_msec % 1000) * 1000000;
ldout(cct, 10) << "jni: setattr: path " << c_path << " mask " << mask << dendl;
ret = ceph_setattrx(cmount, c_path, &stx, mask, 0);
ldout(cct, 10) << "jni: setattr: exit ret " << ret << dendl;
env->ReleaseStringUTFChars(j_path, c_path);
if (ret)
handle_error(env, ret);
return ret;
}
/*
* Class: com_ceph_fs_CephMount
* Method: native_ceph_chmod
* Signature: (JLjava/lang/String;I)I
*/
JNIEXPORT jint JNICALL Java_com_ceph_fs_CephMount_native_1ceph_1chmod
(JNIEnv *env, jclass clz, jlong j_mntp, jstring j_path, jint j_mode)
{
struct ceph_mount_info *cmount = get_ceph_mount(j_mntp);
CephContext *cct = ceph_get_mount_context(cmount);
const char *c_path;
int ret;
CHECK_ARG_NULL(j_path, "@path is null", -1);
CHECK_MOUNTED(cmount, -1);
c_path = env->GetStringUTFChars(j_path, NULL);
if (!c_path) {
cephThrowInternal(env, "Failed to pin memory");
return -1;
}
ldout(cct, 10) << "jni: chmod: path " << c_path << " mode " << (int)j_mode << dendl;
ret = ceph_chmod(cmount, c_path, (int)j_mode);
ldout(cct, 10) << "jni: chmod: exit ret " << ret << dendl;
env->ReleaseStringUTFChars(j_path, c_path);
if (ret)
handle_error(env, ret);
return ret;
}
/*
* Class: com_ceph_fs_CephMount
* Method: native_ceph_fchmod
* Signature: (JII)I
*/
JNIEXPORT jint JNICALL Java_com_ceph_fs_CephMount_native_1ceph_1fchmod
(JNIEnv *env, jclass clz, jlong j_mntp, jint j_fd, jint j_mode)
{
struct ceph_mount_info *cmount = get_ceph_mount(j_mntp);
CephContext *cct = ceph_get_mount_context(cmount);
int ret;
CHECK_MOUNTED(cmount, -1);
ldout(cct, 10) << "jni: fchmod: fd " << (int)j_fd << " mode " << (int)j_mode << dendl;
ret = ceph_fchmod(cmount, (int)j_fd, (int)j_mode);
ldout(cct, 10) << "jni: fchmod: exit ret " << ret << dendl;
if (ret)
handle_error(env, ret);
return ret;
}
/*
* Class: com_ceph_fs_CephMount
* Method: native_ceph_truncate
* Signature: (JLjava/lang/String;J)I
*/
JNIEXPORT jint JNICALL Java_com_ceph_fs_CephMount_native_1ceph_1truncate
(JNIEnv *env, jclass clz, jlong j_mntp, jstring j_path, jlong j_size)
{
struct ceph_mount_info *cmount = get_ceph_mount(j_mntp);
CephContext *cct = ceph_get_mount_context(cmount);
const char *c_path;
int ret;
CHECK_ARG_NULL(j_path, "@path is null", -1);
CHECK_MOUNTED(cmount, -1);
c_path = env->GetStringUTFChars(j_path, NULL);
if (!c_path) {
cephThrowInternal(env, "Failed to pin memory");
return -1;
}
ldout(cct, 10) << "jni: truncate: path " << c_path << " size " << (loff_t)j_size << dendl;
ret = ceph_truncate(cmount, c_path, (loff_t)j_size);
ldout(cct, 10) << "jni: truncate: exit ret " << ret << dendl;
env->ReleaseStringUTFChars(j_path, c_path);
if (ret)
handle_error(env, ret);
return ret;
}
/*
* Class: com_ceph_fs_CephMount
* Method: native_ceph_open
* Signature: (JLjava/lang/String;II)I
*/
JNIEXPORT jint JNICALL Java_com_ceph_fs_CephMount_native_1ceph_1open
(JNIEnv *env, jclass clz, jlong j_mntp, jstring j_path, jint j_flags, jint j_mode)
{
struct ceph_mount_info *cmount = get_ceph_mount(j_mntp);
CephContext *cct = ceph_get_mount_context(cmount);
const char *c_path;
int ret, flags = fixup_open_flags(j_flags);
CHECK_ARG_NULL(j_path, "@path is null", -1);
CHECK_MOUNTED(cmount, -1);
c_path = env->GetStringUTFChars(j_path, NULL);
if (!c_path) {
cephThrowInternal(env, "Failed to pin memory");
return -1;
}
ldout(cct, 10) << "jni: open: path " << c_path << " flags " << flags
<< " mode " << (int)j_mode << dendl;
ret = ceph_open(cmount, c_path, flags, (int)j_mode);
ldout(cct, 10) << "jni: open: exit ret " << ret << dendl;
env->ReleaseStringUTFChars(j_path, c_path);
if (ret < 0)
handle_error(env, ret);
return ret;
}
/*
* Class: com_ceph_fs_CephMount
* Method: native_ceph_open_layout
* Signature: (JLjava/lang/String;IIIIILjava/lang/String;)I
*/
JNIEXPORT jint JNICALL Java_com_ceph_fs_CephMount_native_1ceph_1open_1layout
(JNIEnv *env, jclass clz, jlong j_mntp, jstring j_path, jint j_flags, jint j_mode,
jint stripe_unit, jint stripe_count, jint object_size, jstring j_data_pool)
{
struct ceph_mount_info *cmount = get_ceph_mount(j_mntp);
CephContext *cct = ceph_get_mount_context(cmount);
const char *c_path, *c_data_pool = NULL;
int ret, flags = fixup_open_flags(j_flags);
CHECK_ARG_NULL(j_path, "@path is null", -1);
CHECK_MOUNTED(cmount, -1);
c_path = env->GetStringUTFChars(j_path, NULL);
if (!c_path) {
cephThrowInternal(env, "Failed to pin memory");
return -1;
}
if (j_data_pool) {
c_data_pool = env->GetStringUTFChars(j_data_pool, NULL);
if (!c_data_pool) {
env->ReleaseStringUTFChars(j_path, c_path);
cephThrowInternal(env, "Failed to pin memory");
return -1;
}
}
ldout(cct, 10) << "jni: open_layout: path " << c_path << " flags " << flags
<< " mode " << (int)j_mode << " stripe_unit " << stripe_unit
<< " stripe_count " << stripe_count << " object_size " << object_size
<< " data_pool " << (c_data_pool ? c_data_pool : "<NULL>") << dendl;
ret = ceph_open_layout(cmount, c_path, flags, (int)j_mode,
(int)stripe_unit, (int)stripe_count, (int)object_size, c_data_pool);
ldout(cct, 10) << "jni: open_layout: exit ret " << ret << dendl;
env->ReleaseStringUTFChars(j_path, c_path);
if (j_data_pool)
env->ReleaseStringUTFChars(j_data_pool, c_data_pool);
if (ret < 0)
handle_error(env, ret);
return ret;
}
/*
* Class: com_ceph_fs_CephMount
* Method: native_ceph_close
* Signature: (JI)I
*/
JNIEXPORT jint JNICALL Java_com_ceph_fs_CephMount_native_1ceph_1close
(JNIEnv *env, jclass clz, jlong j_mntp, jint j_fd)
{
struct ceph_mount_info *cmount = get_ceph_mount(j_mntp);
CephContext *cct = ceph_get_mount_context(cmount);
int ret;
CHECK_MOUNTED(cmount, -1);
ldout(cct, 10) << "jni: close: fd " << (int)j_fd << dendl;
ret = ceph_close(cmount, (int)j_fd);
ldout(cct, 10) << "jni: close: ret " << ret << dendl;
if (ret)
handle_error(env, ret);
return ret;
}
/*
* Class: com_ceph_fs_CephMount
* Method: native_ceph_lseek
* Signature: (JIJI)J
*/
JNIEXPORT jlong JNICALL Java_com_ceph_fs_CephMount_native_1ceph_1lseek
(JNIEnv *env, jclass clz, jlong j_mntp, jint j_fd, jlong j_offset, jint j_whence)
{
struct ceph_mount_info *cmount = get_ceph_mount(j_mntp);
CephContext *cct = ceph_get_mount_context(cmount);
int whence;
jlong ret;
CHECK_MOUNTED(cmount, -1);
switch (j_whence) {
case JAVA_SEEK_SET:
whence = SEEK_SET;
break;
case JAVA_SEEK_CUR:
whence = SEEK_CUR;
break;
case JAVA_SEEK_END:
whence = SEEK_END;
break;
default:
cephThrowIllegalArg(env, "Unknown whence value");
return -1;
}
ldout(cct, 10) << "jni: lseek: fd " << (int)j_fd << " offset "
<< (long)j_offset << " whence " << whence << dendl;
ret = ceph_lseek(cmount, (int)j_fd, (long)j_offset, whence);
ldout(cct, 10) << "jni: lseek: exit ret " << ret << dendl;
if (ret < 0)
handle_error(env, ret);
return ret;
}
/*
* Class: com_ceph_fs_CephMount
* Method: native_ceph_read
* Signature: (JI[BJJ)J
*/
JNIEXPORT jlong JNICALL Java_com_ceph_fs_CephMount_native_1ceph_1read
(JNIEnv *env, jclass clz, jlong j_mntp, jint j_fd, jbyteArray j_buf, jlong j_size, jlong j_offset)
{
struct ceph_mount_info *cmount = get_ceph_mount(j_mntp);
CephContext *cct = ceph_get_mount_context(cmount);
jsize buf_size;
jbyte *c_buf;
long ret;
CHECK_ARG_NULL(j_buf, "@buf is null", -1);
CHECK_ARG_BOUNDS(j_size < 0, "@size is negative", -1);
CHECK_MOUNTED(cmount, -1);
buf_size = env->GetArrayLength(j_buf);
CHECK_ARG_BOUNDS(j_size > buf_size, "@size > @buf.length", -1);
c_buf = env->GetByteArrayElements(j_buf, NULL);
if (!c_buf) {
cephThrowInternal(env, "failed to pin memory");
return -1;
}
ldout(cct, 10) << "jni: read: fd " << (int)j_fd << " len " << (long)j_size <<
" offset " << (long)j_offset << dendl;
ret = ceph_read(cmount, (int)j_fd, (char*)c_buf, (long)j_size, (long)j_offset);
ldout(cct, 10) << "jni: read: exit ret " << ret << dendl;
if (ret < 0)
handle_error(env, (int)ret);
else
env->ReleaseByteArrayElements(j_buf, c_buf, 0);
return (jlong)ret;
}
/*
* Class: com_ceph_fs_CephMount
* Method: native_ceph_write
* Signature: (JI[BJJ)J
*/
JNIEXPORT jlong JNICALL Java_com_ceph_fs_CephMount_native_1ceph_1write
(JNIEnv *env, jclass clz, jlong j_mntp, jint j_fd, jbyteArray j_buf, jlong j_size, jlong j_offset)
{
struct ceph_mount_info *cmount = get_ceph_mount(j_mntp);
CephContext *cct = ceph_get_mount_context(cmount);
jsize buf_size;
jbyte *c_buf;
long ret;
CHECK_ARG_NULL(j_buf, "@buf is null", -1);
CHECK_ARG_BOUNDS(j_size < 0, "@size is negative", -1);
CHECK_MOUNTED(cmount, -1);
buf_size = env->GetArrayLength(j_buf);
CHECK_ARG_BOUNDS(j_size > buf_size, "@size > @buf.length", -1);
c_buf = env->GetByteArrayElements(j_buf, NULL);
if (!c_buf) {
cephThrowInternal(env, "failed to pin memory");
return -1;
}
ldout(cct, 10) << "jni: write: fd " << (int)j_fd << " len " << (long)j_size <<
" offset " << (long)j_offset << dendl;
ret = ceph_write(cmount, (int)j_fd, (char*)c_buf, (long)j_size, (long)j_offset);
ldout(cct, 10) << "jni: write: exit ret " << ret << dendl;
if (ret < 0)
handle_error(env, (int)ret);
else
env->ReleaseByteArrayElements(j_buf, c_buf, JNI_ABORT);
return ret;
}
/*
* Class: com_ceph_fs_CephMount
* Method: native_ceph_ftruncate
* Signature: (JIJ)I
*/
JNIEXPORT jint JNICALL Java_com_ceph_fs_CephMount_native_1ceph_1ftruncate
(JNIEnv *env, jclass clz, jlong j_mntp, jint j_fd, jlong j_size)
{
struct ceph_mount_info *cmount = get_ceph_mount(j_mntp);
CephContext *cct = ceph_get_mount_context(cmount);
int ret;
CHECK_MOUNTED(cmount, -1);
ldout(cct, 10) << "jni: ftruncate: fd " << (int)j_fd <<
" size " << (loff_t)j_size << dendl;
ret = ceph_ftruncate(cmount, (int)j_fd, (loff_t)j_size);
ldout(cct, 10) << "jni: ftruncate: exit ret " << ret << dendl;
if (ret)
handle_error(env, ret);
return ret;
}
/*
* Class: com_ceph_fs_CephMount
* Method: native_ceph_fsync
* Signature: (JIZ)I
*/
JNIEXPORT jint JNICALL Java_com_ceph_fs_CephMount_native_1ceph_1fsync
(JNIEnv *env, jclass clz, jlong j_mntp, jint j_fd, jboolean j_dataonly)
{
struct ceph_mount_info *cmount = get_ceph_mount(j_mntp);
CephContext *cct = ceph_get_mount_context(cmount);
int ret;
ldout(cct, 10) << "jni: fsync: fd " << (int)j_fd <<
" dataonly " << (j_dataonly ? 1 : 0) << dendl;
ret = ceph_fsync(cmount, (int)j_fd, j_dataonly ? 1 : 0);
ldout(cct, 10) << "jni: fsync: exit ret " << ret << dendl;
if (ret)
handle_error(env, ret);
return ret;
}
/*
* Class: com_ceph_fs_CephMount
* Method: native_ceph_flock
* Signature: (JIZ)I
*/
JNIEXPORT jint JNICALL Java_com_ceph_fs_CephMount_native_1ceph_1flock
(JNIEnv *env, jclass clz, jlong j_mntp, jint j_fd, jint j_operation, jlong j_owner)
{
struct ceph_mount_info *cmount = get_ceph_mount(j_mntp);
CephContext *cct = ceph_get_mount_context(cmount);
int ret;
ldout(cct, 10) << "jni: flock: fd " << (int)j_fd <<
" operation " << j_operation << " owner " << j_owner << dendl;
int operation = 0;
#define MAP_FLOCK_FLAG(JNI_MASK, NATIVE_MASK) do { \
if ((j_operation & JNI_MASK) != 0) { \
operation |= NATIVE_MASK; \
j_operation &= ~JNI_MASK; \
} \
} while(0)
MAP_FLOCK_FLAG(JAVA_LOCK_SH, LOCK_SH);
MAP_FLOCK_FLAG(JAVA_LOCK_EX, LOCK_EX);
MAP_FLOCK_FLAG(JAVA_LOCK_NB, LOCK_NB);
MAP_FLOCK_FLAG(JAVA_LOCK_UN, LOCK_UN);
if (j_operation != 0) {
cephThrowIllegalArg(env, "flock flags");
return -EINVAL;
}
#undef MAP_FLOCK_FLAG
ret = ceph_flock(cmount, (int)j_fd, operation, (uint64_t) j_owner);
ldout(cct, 10) << "jni: flock: exit ret " << ret << dendl;
if (ret)
handle_error(env, ret);
return ret;
}
/*
* Class: com_ceph_fs_CephMount
* Method: native_ceph_fstat
* Signature: (JILcom/ceph/fs/CephStat;)I
*/
JNIEXPORT jint JNICALL Java_com_ceph_fs_CephMount_native_1ceph_1fstat
(JNIEnv *env, jclass clz, jlong j_mntp, jint j_fd, jobject j_cephstat)
{
struct ceph_mount_info *cmount = get_ceph_mount(j_mntp);
CephContext *cct = ceph_get_mount_context(cmount);
struct ceph_statx stx;
int ret;
CHECK_ARG_NULL(j_cephstat, "@stat is null", -1);
CHECK_MOUNTED(cmount, -1);
ldout(cct, 10) << "jni: fstat: fd " << (int)j_fd << dendl;
ret = ceph_fstatx(cmount, (int)j_fd, &stx, CEPH_J_CEPHSTAT_MASK, 0);
ldout(cct, 10) << "jni: fstat exit ret " << ret << dendl;
if (ret) {
handle_error(env, ret);
return ret;
}
fill_cephstat(env, j_cephstat, &stx);
return ret;
}
/*
* Class: com_ceph_fs_CephMount
* Method: native_ceph_sync_fs
* Signature: (J)I
*/
JNIEXPORT jint JNICALL Java_com_ceph_fs_CephMount_native_1ceph_1sync_1fs
(JNIEnv *env, jclass clz, jlong j_mntp)
{
struct ceph_mount_info *cmount = get_ceph_mount(j_mntp);
CephContext *cct = ceph_get_mount_context(cmount);
int ret;
ldout(cct, 10) << "jni: sync_fs: enter" << dendl;
ret = ceph_sync_fs(cmount);
ldout(cct, 10) << "jni: sync_fs: exit ret " << ret << dendl;
if (ret)
handle_error(env, ret);
return ret;
}
/*
* Class: com_ceph_fs_CephMount
* Method: native_ceph_getxattr
* Signature: (JLjava/lang/String;Ljava/lang/String;[B)J
*/
JNIEXPORT jlong JNICALL Java_com_ceph_fs_CephMount_native_1ceph_1getxattr
(JNIEnv *env, jclass clz, jlong j_mntp, jstring j_path, jstring j_name, jbyteArray j_buf)
{
struct ceph_mount_info *cmount = get_ceph_mount(j_mntp);
CephContext *cct = ceph_get_mount_context(cmount);
const char *c_path;
const char *c_name;
jsize buf_size;
jbyte *c_buf = NULL; /* please gcc with goto */
long ret;
CHECK_ARG_NULL(j_path, "@path is null", -1);
CHECK_ARG_NULL(j_name, "@name is null", -1);
CHECK_MOUNTED(cmount, -1);
c_path = env->GetStringUTFChars(j_path, NULL);
if (!c_path) {
cephThrowInternal(env, "Failed to pin memory");
return -1;
}
c_name = env->GetStringUTFChars(j_name, NULL);
if (!c_name) {
env->ReleaseStringUTFChars(j_path, c_path);
cephThrowInternal(env, "Failed to pin memory");
return -1;
}
/* just lookup the size if buf is null */
if (!j_buf) {
buf_size = 0;
goto do_getxattr;
}
c_buf = env->GetByteArrayElements(j_buf, NULL);
if (!c_buf) {
env->ReleaseStringUTFChars(j_path, c_path);
env->ReleaseStringUTFChars(j_name, c_name);
cephThrowInternal(env, "failed to pin memory");
return -1;
}
buf_size = env->GetArrayLength(j_buf);
do_getxattr:
ldout(cct, 10) << "jni: getxattr: path " << c_path << " name " << c_name <<
" len " << buf_size << dendl;
ret = ceph_getxattr(cmount, c_path, c_name, c_buf, buf_size);
if (ret == -ERANGE)
ret = ceph_getxattr(cmount, c_path, c_name, c_buf, 0);
ldout(cct, 10) << "jni: getxattr: exit ret " << ret << dendl;
env->ReleaseStringUTFChars(j_path, c_path);
env->ReleaseStringUTFChars(j_name, c_name);
if (j_buf)
env->ReleaseByteArrayElements(j_buf, c_buf, 0);
if (ret < 0)
handle_error(env, (int)ret);
return (jlong)ret;
}
/*
* Class: com_ceph_fs_CephMount
* Method: native_ceph_lgetxattr
* Signature: (JLjava/lang/String;Ljava/lang/String;[B)I
*/
JNIEXPORT jlong JNICALL Java_com_ceph_fs_CephMount_native_1ceph_1lgetxattr
(JNIEnv *env, jclass clz, jlong j_mntp, jstring j_path, jstring j_name, jbyteArray j_buf)
{
struct ceph_mount_info *cmount = get_ceph_mount(j_mntp);
CephContext *cct = ceph_get_mount_context(cmount);
const char *c_path;
const char *c_name;
jsize buf_size;
jbyte *c_buf = NULL; /* please gcc with goto */
long ret;
CHECK_ARG_NULL(j_path, "@path is null", -1);
CHECK_ARG_NULL(j_name, "@name is null", -1);
CHECK_MOUNTED(cmount, -1);
c_path = env->GetStringUTFChars(j_path, NULL);
if (!c_path) {
cephThrowInternal(env, "Failed to pin memory");
return -1;
}
c_name = env->GetStringUTFChars(j_name, NULL);
if (!c_name) {
env->ReleaseStringUTFChars(j_path, c_path);
cephThrowInternal(env, "Failed to pin memory");
return -1;
}
/* just lookup the size if buf is null */
if (!j_buf) {
buf_size = 0;
goto do_lgetxattr;
}
c_buf = env->GetByteArrayElements(j_buf, NULL);
if (!c_buf) {
env->ReleaseStringUTFChars(j_path, c_path);
env->ReleaseStringUTFChars(j_name, c_name);
cephThrowInternal(env, "failed to pin memory");
return -1;
}
buf_size = env->GetArrayLength(j_buf);
do_lgetxattr:
ldout(cct, 10) << "jni: lgetxattr: path " << c_path << " name " << c_name <<
" len " << buf_size << dendl;
ret = ceph_lgetxattr(cmount, c_path, c_name, c_buf, buf_size);
if (ret == -ERANGE)
ret = ceph_lgetxattr(cmount, c_path, c_name, c_buf, 0);
ldout(cct, 10) << "jni: lgetxattr: exit ret " << ret << dendl;
env->ReleaseStringUTFChars(j_path, c_path);
env->ReleaseStringUTFChars(j_name, c_name);
if (j_buf)
env->ReleaseByteArrayElements(j_buf, c_buf, 0);
if (ret < 0)
handle_error(env, (int)ret);
return (jlong)ret;
}
/*
* Class: com_ceph_fs_CephMount
* Method: native_ceph_listxattr
* Signature: (JLjava/lang/String;)[Ljava/lang/String;
*/
JNIEXPORT jobjectArray JNICALL Java_com_ceph_fs_CephMount_native_1ceph_1listxattr
(JNIEnv *env, jclass clz, jlong j_mntp, jstring j_path)
{
struct ceph_mount_info *cmount = get_ceph_mount(j_mntp);
CephContext *cct = ceph_get_mount_context(cmount);
jobjectArray xattrlist;
const char *c_path;
string *ent;
jstring name;
list<string>::iterator it;
list<string> contents;
int ret, buflen, bufpos, i;
char *buf;
CHECK_ARG_NULL(j_path, "@path is null", NULL);
CHECK_MOUNTED(cmount, NULL);
c_path = env->GetStringUTFChars(j_path, NULL);
if (!c_path) {
cephThrowInternal(env, "Failed to pin memory");
return NULL;
}
buflen = 1024;
buf = new (std::nothrow) char[buflen];
if (!buf) {
cephThrowOutOfMemory(env, "head allocation failed");
goto out;
}
while (1) {
ldout(cct, 10) << "jni: listxattr: path " << c_path << " len " << buflen << dendl;
ret = ceph_listxattr(cmount, c_path, buf, buflen);
if (ret == -ERANGE) {
delete [] buf;
buflen *= 2;
buf = new (std::nothrow) char[buflen];
if (!buf) {
cephThrowOutOfMemory(env, "heap allocation failed");
goto out;
}
continue;
}
break;
}
ldout(cct, 10) << "jni: listxattr: ret " << ret << dendl;
if (ret < 0) {
delete [] buf;
handle_error(env, ret);
goto out;
}
bufpos = 0;
while (bufpos < ret) {
ent = new (std::nothrow) string(buf + bufpos);
if (!ent) {
delete [] buf;
cephThrowOutOfMemory(env, "heap allocation failed");
goto out;
}
contents.push_back(*ent);
bufpos += ent->size() + 1;
delete ent;
}
delete [] buf;
xattrlist = env->NewObjectArray(contents.size(), env->FindClass("java/lang/String"), NULL);
if (!xattrlist)
goto out;
for (i = 0, it = contents.begin(); it != contents.end(); ++it) {
name = env->NewStringUTF(it->c_str());
if (!name)
goto out;
env->SetObjectArrayElement(xattrlist, i++, name);
if (env->ExceptionOccurred())
goto out;
env->DeleteLocalRef(name);
}
env->ReleaseStringUTFChars(j_path, c_path);
return xattrlist;
out:
env->ReleaseStringUTFChars(j_path, c_path);
return NULL;
}
/*
* Class: com_ceph_fs_CephMount
* Method: native_ceph_llistxattr
* Signature: (JLjava/lang/String;)[Ljava/lang/String;
*/
JNIEXPORT jobjectArray JNICALL Java_com_ceph_fs_CephMount_native_1ceph_1llistxattr
(JNIEnv *env, jclass clz, jlong j_mntp, jstring j_path)
{
struct ceph_mount_info *cmount = get_ceph_mount(j_mntp);
CephContext *cct = ceph_get_mount_context(cmount);
jobjectArray xattrlist;
const char *c_path;
string *ent;
jstring name;
list<string>::iterator it;
list<string> contents;
int ret, buflen, bufpos, i;
char *buf;
CHECK_ARG_NULL(j_path, "@path is null", NULL);
CHECK_MOUNTED(cmount, NULL);
c_path = env->GetStringUTFChars(j_path, NULL);
if (!c_path) {
cephThrowInternal(env, "Failed to pin memory");
return NULL;
}
buflen = 1024;
buf = new (std::nothrow) char[buflen];
if (!buf) {
cephThrowOutOfMemory(env, "head allocation failed");
goto out;
}
while (1) {
ldout(cct, 10) << "jni: llistxattr: path " << c_path << " len " << buflen << dendl;
ret = ceph_llistxattr(cmount, c_path, buf, buflen);
if (ret == -ERANGE) {
delete [] buf;
buflen *= 2;
buf = new (std::nothrow) char[buflen];
if (!buf) {
cephThrowOutOfMemory(env, "heap allocation failed");
goto out;
}
continue;
}
break;
}
ldout(cct, 10) << "jni: llistxattr: ret " << ret << dendl;
if (ret < 0) {
delete [] buf;
handle_error(env, ret);
goto out;
}
bufpos = 0;
while (bufpos < ret) {
ent = new (std::nothrow) string(buf + bufpos);
if (!ent) {
delete [] buf;
cephThrowOutOfMemory(env, "heap allocation failed");
goto out;
}
contents.push_back(*ent);
bufpos += ent->size() + 1;
delete ent;
}
delete [] buf;
xattrlist = env->NewObjectArray(contents.size(), env->FindClass("java/lang/String"), NULL);
if (!xattrlist)
goto out;
for (i = 0, it = contents.begin(); it != contents.end(); ++it) {
name = env->NewStringUTF(it->c_str());
if (!name)
goto out;
env->SetObjectArrayElement(xattrlist, i++, name);
if (env->ExceptionOccurred())
goto out;
env->DeleteLocalRef(name);
}
env->ReleaseStringUTFChars(j_path, c_path);
return xattrlist;
out:
env->ReleaseStringUTFChars(j_path, c_path);
return NULL;
}
/*
* Class: com_ceph_fs_CephMount
* Method: native_ceph_removexattr
* Signature: (JLjava/lang/String;Ljava/lang/String;)I
*/
JNIEXPORT jint JNICALL Java_com_ceph_fs_CephMount_native_1ceph_1removexattr
(JNIEnv *env, jclass clz, jlong j_mntp, jstring j_path, jstring j_name)
{
struct ceph_mount_info *cmount = get_ceph_mount(j_mntp);
CephContext *cct = ceph_get_mount_context(cmount);
const char *c_path;
const char *c_name;
int ret;
CHECK_ARG_NULL(j_path, "@path is null", -1);
CHECK_ARG_NULL(j_name, "@name is null", -1);
CHECK_MOUNTED(cmount, -1);
c_path = env->GetStringUTFChars(j_path, NULL);
if (!c_path) {
cephThrowInternal(env, "Failed to pin memory");
return -1;
}
c_name = env->GetStringUTFChars(j_name, NULL);
if (!c_name) {
env->ReleaseStringUTFChars(j_path, c_path);
cephThrowInternal(env, "Failed to pin memory");
return -1;
}
ldout(cct, 10) << "jni: removexattr: path " << c_path << " name " << c_name << dendl;
ret = ceph_removexattr(cmount, c_path, c_name);
ldout(cct, 10) << "jni: removexattr: exit ret " << ret << dendl;
env->ReleaseStringUTFChars(j_path, c_path);
env->ReleaseStringUTFChars(j_name, c_name);
if (ret)
handle_error(env, ret);
return ret;
}
/*
* Class: com_ceph_fs_CephMount
* Method: native_ceph_lremovexattr
* Signature: (JLjava/lang/String;Ljava/lang/String;)I
*/
JNIEXPORT jint JNICALL Java_com_ceph_fs_CephMount_native_1ceph_1lremovexattr
(JNIEnv *env, jclass clz, jlong j_mntp, jstring j_path, jstring j_name)
{
struct ceph_mount_info *cmount = get_ceph_mount(j_mntp);
CephContext *cct = ceph_get_mount_context(cmount);
const char *c_path;
const char *c_name;
int ret;
CHECK_ARG_NULL(j_path, "@path is null", -1);
CHECK_ARG_NULL(j_name, "@name is null", -1);
CHECK_MOUNTED(cmount, -1);
c_path = env->GetStringUTFChars(j_path, NULL);
if (!c_path) {
cephThrowInternal(env, "Failed to pin memory");
return -1;
}
c_name = env->GetStringUTFChars(j_name, NULL);
if (!c_name) {
env->ReleaseStringUTFChars(j_path, c_path);
cephThrowInternal(env, "Failed to pin memory");
return -1;
}
ldout(cct, 10) << "jni: lremovexattr: path " << c_path << " name " << c_name << dendl;
ret = ceph_lremovexattr(cmount, c_path, c_name);
ldout(cct, 10) << "jni: lremovexattr: exit ret " << ret << dendl;
env->ReleaseStringUTFChars(j_path, c_path);
env->ReleaseStringUTFChars(j_name, c_name);
if (ret)
handle_error(env, ret);
return ret;
}
/*
* Class: com_ceph_fs_CephMount
* Method: native_ceph_setxattr
* Signature: (JLjava/lang/String;Ljava/lang/String;[BJI)I
*/
JNIEXPORT jint JNICALL Java_com_ceph_fs_CephMount_native_1ceph_1setxattr
(JNIEnv *env, jclass clz, jlong j_mntp, jstring j_path, jstring j_name,
jbyteArray j_buf, jlong j_size, jint j_flags)
{
struct ceph_mount_info *cmount = get_ceph_mount(j_mntp);
CephContext *cct = ceph_get_mount_context(cmount);
const char *c_path;
const char *c_name;
jsize buf_size;
jbyte *c_buf;
int ret, flags;
CHECK_ARG_NULL(j_path, "@path is null", -1);
CHECK_ARG_NULL(j_name, "@name is null", -1);
CHECK_ARG_NULL(j_buf, "@buf is null", -1);
CHECK_ARG_BOUNDS(j_size < 0, "@size is negative", -1);
CHECK_MOUNTED(cmount, -1);
buf_size = env->GetArrayLength(j_buf);
CHECK_ARG_BOUNDS(j_size > buf_size, "@size > @buf.length", -1);
c_path = env->GetStringUTFChars(j_path, NULL);
if (!c_path) {
cephThrowInternal(env, "Failed to pin memory");
return -1;
}
c_name = env->GetStringUTFChars(j_name, NULL);
if (!c_name) {
env->ReleaseStringUTFChars(j_path, c_path);
cephThrowInternal(env, "Failed to pin memory");
return -1;
}
c_buf = env->GetByteArrayElements(j_buf, NULL);
if (!c_buf) {
env->ReleaseStringUTFChars(j_path, c_path);
env->ReleaseStringUTFChars(j_name, c_name);
cephThrowInternal(env, "failed to pin memory");
return -1;
}
switch (j_flags) {
case JAVA_XATTR_CREATE:
flags = CEPH_XATTR_CREATE;
break;
case JAVA_XATTR_REPLACE:
flags = CEPH_XATTR_REPLACE;
break;
case JAVA_XATTR_NONE:
flags = 0;
break;
default:
env->ReleaseStringUTFChars(j_path, c_path);
env->ReleaseStringUTFChars(j_name, c_name);
env->ReleaseByteArrayElements(j_buf, c_buf, JNI_ABORT);
cephThrowIllegalArg(env, "setxattr flag");
return -1;
}
ldout(cct, 10) << "jni: setxattr: path " << c_path << " name " << c_name
<< " len " << j_size << " flags " << flags << dendl;
ret = ceph_setxattr(cmount, c_path, c_name, c_buf, j_size, flags);
ldout(cct, 10) << "jni: setxattr: exit ret " << ret << dendl;
env->ReleaseStringUTFChars(j_path, c_path);
env->ReleaseStringUTFChars(j_name, c_name);
env->ReleaseByteArrayElements(j_buf, c_buf, JNI_ABORT);
if (ret)
handle_error(env, ret);
return ret;
}
/*
* Class: com_ceph_fs_CephMount
* Method: native_ceph_lsetxattr
* Signature: (JLjava/lang/String;Ljava/lang/String;[BJI)I
*/
JNIEXPORT jint JNICALL Java_com_ceph_fs_CephMount_native_1ceph_1lsetxattr
(JNIEnv *env, jclass clz, jlong j_mntp, jstring j_path, jstring j_name,
jbyteArray j_buf, jlong j_size, jint j_flags)
{
struct ceph_mount_info *cmount = get_ceph_mount(j_mntp);
CephContext *cct = ceph_get_mount_context(cmount);
const char *c_path;
const char *c_name;
jsize buf_size;
jbyte *c_buf;
int ret, flags;
CHECK_ARG_NULL(j_path, "@path is null", -1);
CHECK_ARG_NULL(j_name, "@name is null", -1);
CHECK_ARG_NULL(j_buf, "@buf is null", -1);
CHECK_ARG_BOUNDS(j_size < 0, "@size is negative", -1);
CHECK_MOUNTED(cmount, -1);
buf_size = env->GetArrayLength(j_buf);
CHECK_ARG_BOUNDS(j_size > buf_size, "@size > @buf.length", -1);
c_path = env->GetStringUTFChars(j_path, NULL);
if (!c_path) {
cephThrowInternal(env, "Failed to pin memory");
return -1;
}
c_name = env->GetStringUTFChars(j_name, NULL);
if (!c_name) {
env->ReleaseStringUTFChars(j_path, c_path);
cephThrowInternal(env, "Failed to pin memory");
return -1;
}
c_buf = env->GetByteArrayElements(j_buf, NULL);
if (!c_buf) {
env->ReleaseStringUTFChars(j_path, c_path);
env->ReleaseStringUTFChars(j_name, c_name);
cephThrowInternal(env, "failed to pin memory");
return -1;
}
switch (j_flags) {
case JAVA_XATTR_CREATE:
flags = CEPH_XATTR_CREATE;
break;
case JAVA_XATTR_REPLACE:
flags = CEPH_XATTR_REPLACE;
break;
case JAVA_XATTR_NONE:
flags = 0;
break;
default:
env->ReleaseStringUTFChars(j_path, c_path);
env->ReleaseStringUTFChars(j_name, c_name);
env->ReleaseByteArrayElements(j_buf, c_buf, JNI_ABORT);
cephThrowIllegalArg(env, "lsetxattr flag");
return -1;
}
ldout(cct, 10) << "jni: lsetxattr: path " << c_path << " name " << c_name
<< " len " << j_size << " flags " << flags << dendl;
ret = ceph_lsetxattr(cmount, c_path, c_name, c_buf, j_size, flags);
ldout(cct, 10) << "jni: lsetxattr: exit ret " << ret << dendl;
env->ReleaseStringUTFChars(j_path, c_path);
env->ReleaseStringUTFChars(j_name, c_name);
env->ReleaseByteArrayElements(j_buf, c_buf, JNI_ABORT);
if (ret)
handle_error(env, ret);
return ret;
}
/*
* Class: com_ceph_fs_CephMount
* Method: native_ceph_get_file_stripe_unit
* Signature: (JI)I
*/
JNIEXPORT jint JNICALL Java_com_ceph_fs_CephMount_native_1ceph_1get_1file_1stripe_1unit
(JNIEnv *env, jclass clz, jlong j_mntp, jint j_fd)
{
struct ceph_mount_info *cmount = get_ceph_mount(j_mntp);
CephContext *cct = ceph_get_mount_context(cmount);
int ret;
CHECK_MOUNTED(cmount, -1);
ldout(cct, 10) << "jni: get_file_stripe_unit: fd " << (int)j_fd << dendl;
ret = ceph_get_file_stripe_unit(cmount, (int)j_fd);
ldout(cct, 10) << "jni: get_file_stripe_unit: exit ret " << ret << dendl;
if (ret < 0)
handle_error(env, ret);
return ret;
}
/*
* Class: com_ceph_fs_CephMount
* Method: native_ceph_get_file_replication
* Signature: (JI)I
*/
JNIEXPORT jint JNICALL Java_com_ceph_fs_CephMount_native_1ceph_1get_1file_1replication
(JNIEnv *env, jclass clz, jlong j_mntp, jint j_fd)
{
struct ceph_mount_info *cmount = get_ceph_mount(j_mntp);
CephContext *cct = ceph_get_mount_context(cmount);
int ret;
CHECK_MOUNTED(cmount, -1);
ldout(cct, 10) << "jni: get_file_replication: fd " << (int)j_fd << dendl;
ret = ceph_get_file_replication(cmount, (int)j_fd);
ldout(cct, 10) << "jni: get_file_replication: exit ret " << ret << dendl;
if (ret < 0)
handle_error(env, ret);
return ret;
}
/*
* Class: com_ceph_fs_CephMount
* Method: native_ceph_get_file_pool_name
* Signature: (JI)Ljava/lang/String;
*/
JNIEXPORT jstring JNICALL Java_com_ceph_fs_CephMount_native_1ceph_1get_1file_1pool_1name
(JNIEnv *env, jclass clz, jlong j_mntp, jint j_fd)
{
struct ceph_mount_info *cmount = get_ceph_mount(j_mntp);
CephContext *cct = ceph_get_mount_context(cmount);
jstring pool = NULL;
int ret, buflen = 0;
char *buf = NULL;
CHECK_MOUNTED(cmount, NULL);
ldout(cct, 10) << "jni: get_file_pool_name: fd " << (int)j_fd << dendl;
for (;;) {
/* get pool name length (len==0) */
ret = ceph_get_file_pool_name(cmount, (int)j_fd, NULL, 0);
if (ret < 0)
break;
/* allocate buffer */
if (buf)
delete [] buf;
buflen = ret;
buf = new (std::nothrow) char[buflen+1]; /* +1 for '\0' */
if (!buf) {
cephThrowOutOfMemory(env, "head allocation failed");
goto out;
}
memset(buf, 0, (buflen+1)*sizeof(*buf));
/* handle zero-length pool name!? */
if (buflen == 0)
break;
/* fill buffer */
ret = ceph_get_file_pool_name(cmount, (int)j_fd, buf, buflen);
if (ret == -ERANGE) /* size changed! */
continue;
else
break;
}
ldout(cct, 10) << "jni: get_file_pool_name: ret " << ret << dendl;
if (ret < 0)
handle_error(env, ret);
else
pool = env->NewStringUTF(buf);
out:
if (buf)
delete [] buf;
return pool;
}
/**
* Class: com_ceph_fs_CephMount
* Method: native_ceph_get_default_data_pool_name
* Signature: (J)Ljava/lang/String;
*/
JNIEXPORT jstring JNICALL Java_com_ceph_fs_CephMount_native_1ceph_1get_1default_1data_1pool_1name
(JNIEnv *env, jclass clz, jlong j_mntp)
{
struct ceph_mount_info *cmount = get_ceph_mount(j_mntp);
CephContext *cct = ceph_get_mount_context(cmount);
jstring pool = NULL;
int ret, buflen = 0;
char *buf = NULL;
CHECK_MOUNTED(cmount, NULL);
ldout(cct, 10) << "jni: get_default_data_pool_name" << dendl;
ret = ceph_get_default_data_pool_name(cmount, NULL, 0);
if (ret < 0)
goto out;
buflen = ret;
buf = new (std::nothrow) char[buflen+1]; /* +1 for '\0' */
if (!buf) {
cephThrowOutOfMemory(env, "head allocation failed");
goto out;
}
memset(buf, 0, (buflen+1)*sizeof(*buf));
ret = ceph_get_default_data_pool_name(cmount, buf, buflen);
ldout(cct, 10) << "jni: get_default_data_pool_name: ret " << ret << dendl;
if (ret < 0)
handle_error(env, ret);
else
pool = env->NewStringUTF(buf);
out:
if (buf)
delete [] buf;
return pool;
}
/*
* Class: com_ceph_fs_CephMount
* Method: native_ceph_localize_reads
* Signature: (JZ)I
*/
JNIEXPORT jint JNICALL Java_com_ceph_fs_CephMount_native_1ceph_1localize_1reads
(JNIEnv *env, jclass clz, jlong j_mntp, jboolean j_on)
{
struct ceph_mount_info *cmount = get_ceph_mount(j_mntp);
CephContext *cct = ceph_get_mount_context(cmount);
int ret, val = j_on ? 1 : 0;
CHECK_MOUNTED(cmount, -1);
ldout(cct, 10) << "jni: localize_reads: val " << val << dendl;
ret = ceph_localize_reads(cmount, val);
ldout(cct, 10) << "jni: localize_reads: exit ret " << ret << dendl;
if (ret)
handle_error(env, ret);
return ret;
}
/*
* Class: com_ceph_fs_CephMount
* Method: native_ceph_get_stripe_unit_granularity
* Signature: (J)I
*/
JNIEXPORT jint JNICALL Java_com_ceph_fs_CephMount_native_1ceph_1get_1stripe_1unit_1granularity
(JNIEnv *env, jclass clz, jlong j_mntp)
{
struct ceph_mount_info *cmount = get_ceph_mount(j_mntp);
CephContext *cct = ceph_get_mount_context(cmount);
int ret;
CHECK_MOUNTED(cmount, -1);
ldout(cct, 10) << "jni: get_stripe_unit_granularity" << dendl;
ret = ceph_get_stripe_unit_granularity(cmount);
ldout(cct, 10) << "jni: get_stripe_unit_granularity: exit ret " << ret << dendl;
if (ret < 0)
handle_error(env, ret);
return ret;
}
/*
* Class: com_ceph_fs_CephMount
* Method: native_ceph_get_pool_id
* Signature: (JLjava/lang/String;)I
*/
JNIEXPORT jint JNICALL Java_com_ceph_fs_CephMount_native_1ceph_1get_1pool_1id
(JNIEnv *env, jclass clz, jlong j_mntp, jstring jname)
{
struct ceph_mount_info *cmount = get_ceph_mount(j_mntp);
CephContext *cct = ceph_get_mount_context(cmount);
const char *c_name;
int ret;
CHECK_MOUNTED(cmount, -1);
CHECK_ARG_NULL(jname, "@name is null", -1);
c_name = env->GetStringUTFChars(jname, NULL);
if (!c_name) {
cephThrowInternal(env, "failed to pin memory");
return -1;
}
ldout(cct, 10) << "jni: get_pool_id: name " << c_name << dendl;
ret = ceph_get_pool_id(cmount, c_name);
if (ret < 0)
handle_error(env, ret);
ldout(cct, 10) << "jni: get_pool_id: ret " << ret << dendl;
env->ReleaseStringUTFChars(jname, c_name);
return ret;
}
/*
* Class: com_ceph_fs_CephMount
* Method: native_ceph_get_pool_replication
* Signature: (JI)I
*/
JNIEXPORT jint JNICALL Java_com_ceph_fs_CephMount_native_1ceph_1get_1pool_1replication
(JNIEnv *env, jclass clz, jlong j_mntp, jint jpoolid)
{
struct ceph_mount_info *cmount = get_ceph_mount(j_mntp);
CephContext *cct = ceph_get_mount_context(cmount);
int ret;
CHECK_MOUNTED(cmount, -1);
ldout(cct, 10) << "jni: get_pool_replication: poolid " << jpoolid << dendl;
ret = ceph_get_pool_replication(cmount, jpoolid);
if (ret < 0)
handle_error(env, ret);
ldout(cct, 10) << "jni: get_pool_replication: ret " << ret << dendl;
return ret;
}
/*
* Class: com_ceph_fs_CephMount
* Method: native_ceph_get_file_extent_osds
* Signature: (JIJ)Lcom/ceph/fs/CephFileExtent;
*/
JNIEXPORT jobject JNICALL Java_com_ceph_fs_CephMount_native_1ceph_1get_1file_1extent_1osds
(JNIEnv *env, jclass clz, jlong mntp, jint fd, jlong off)
{
struct ceph_mount_info *cmount = get_ceph_mount(mntp);
CephContext *cct = ceph_get_mount_context(cmount);
jobject extent = NULL;
int ret, nosds, *osds = NULL;
jintArray osd_array;
loff_t len;
CHECK_MOUNTED(cmount, NULL);
ldout(cct, 10) << "jni: get_file_extent_osds: fd " << fd << " off " << off << dendl;
for (;;) {
/* get pg size */
ret = ceph_get_file_extent_osds(cmount, fd, off, NULL, NULL, 0);
if (ret < 0)
break;
/* alloc osd id array */
if (osds)
delete [] osds;
nosds = ret;
osds = new int[nosds];
/* get osd ids */
ret = ceph_get_file_extent_osds(cmount, fd, off, &len, osds, nosds);
if (ret == -ERANGE)
continue;
else
break;
}
ldout(cct, 10) << "jni: get_file_extent_osds: ret " << ret << dendl;
if (ret < 0) {
handle_error(env, ret);
goto out;
}
nosds = ret;
osd_array = env->NewIntArray(nosds);
if (!osd_array)
goto out;
env->SetIntArrayRegion(osd_array, 0, nosds, osds);
if (env->ExceptionOccurred())
goto out;
extent = env->NewObject(cephfileextent_cls, cephfileextent_ctor_fid, off, len, osd_array);
if (!extent)
goto out;
out:
if (osds)
delete [] osds;
return extent;
}
/*
* Class: com_ceph_fs_CephMount
* Method: native_ceph_get_osd_crush_location
* Signature: (JI)[Ljava/lang/String;
*/
JNIEXPORT jobjectArray JNICALL Java_com_ceph_fs_CephMount_native_1ceph_1get_1osd_1crush_1location
(JNIEnv *env, jclass clz, jlong j_mntp, jint osdid)
{
struct ceph_mount_info *cmount = get_ceph_mount(j_mntp);
CephContext *cct = ceph_get_mount_context(cmount);
jobjectArray path = NULL;
vector<string> str_path;
int ret, bufpos, buflen = 0;
char *buf = NULL;
CHECK_MOUNTED(cmount, NULL);
ldout(cct, 10) << "jni: osd loc: osd " << osdid << dendl;
for (;;) {
/* get length of the location path */
ret = ceph_get_osd_crush_location(cmount, osdid, NULL, 0);
if (ret < 0)
break;
/* alloc path buffer */
if (buf)
delete [] buf;
buflen = ret;
buf = new char[buflen+1];
memset(buf, 0, buflen*sizeof(*buf));
/* empty path */
if (buflen == 0)
break;
/* get the path */
ret = ceph_get_osd_crush_location(cmount, osdid, buf, buflen);
if (ret == -ERANGE)
continue;
else
break;
}
ldout(cct, 10) << "jni: osd loc: osd " << osdid << " ret " << ret << dendl;
if (ret < 0) {
handle_error(env, ret);
goto out;
}
bufpos = 0;
while (bufpos < ret) {
string type(buf + bufpos);
bufpos += type.size() + 1;
string name(buf + bufpos);
bufpos += name.size() + 1;
str_path.push_back(type);
str_path.push_back(name);
}
path = env->NewObjectArray(str_path.size(), env->FindClass("java/lang/String"), NULL);
if (!path)
goto out;
for (unsigned i = 0; i < str_path.size(); i++) {
jstring ent = env->NewStringUTF(str_path[i].c_str());
if (!ent)
goto out;
env->SetObjectArrayElement(path, i, ent);
if (env->ExceptionOccurred())
goto out;
env->DeleteLocalRef(ent);
}
out:
if (buf)
delete [] buf;
return path;
}
/*
* sockaddrToInetAddress uses with the following license, and is adapted for
* use in this project by using Ceph JNI exception utilities.
*
* ----
*
* Copyright (C) 2010 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
jobject sockaddrToInetAddress(JNIEnv* env, const sockaddr_storage& ss, jint* port) {
// Convert IPv4-mapped IPv6 addresses to IPv4 addresses.
// The RI states "Java will never return an IPv4-mapped address".
const sockaddr_in6& sin6 = reinterpret_cast<const sockaddr_in6&>(ss);
if (ss.ss_family == AF_INET6 && IN6_IS_ADDR_V4MAPPED(&sin6.sin6_addr)) {
// Copy the IPv6 address into the temporary sockaddr_storage.
sockaddr_storage tmp;
memset(&tmp, 0, sizeof(tmp));
memcpy(&tmp, &ss, sizeof(sockaddr_in6));
// Unmap it into an IPv4 address.
sockaddr_in& sin = reinterpret_cast<sockaddr_in&>(tmp);
sin.sin_family = AF_INET;
sin.sin_port = sin6.sin6_port;
memcpy(&sin.sin_addr.s_addr, &sin6.sin6_addr.s6_addr[12], 4);
// Do the regular conversion using the unmapped address.
return sockaddrToInetAddress(env, tmp, port);
}
const void* rawAddress;
size_t addressLength;
int sin_port = 0;
int scope_id = 0;
if (ss.ss_family == AF_INET) {
const sockaddr_in& sin = reinterpret_cast<const sockaddr_in&>(ss);
rawAddress = &sin.sin_addr.s_addr;
addressLength = 4;
sin_port = ntohs(sin.sin_port);
} else if (ss.ss_family == AF_INET6) {
const sockaddr_in6& sin6 = reinterpret_cast<const sockaddr_in6&>(ss);
rawAddress = &sin6.sin6_addr.s6_addr;
addressLength = 16;
sin_port = ntohs(sin6.sin6_port);
scope_id = sin6.sin6_scope_id;
} else if (ss.ss_family == AF_UNIX) {
const sockaddr_un& sun = reinterpret_cast<const sockaddr_un&>(ss);
rawAddress = &sun.sun_path;
addressLength = strlen(sun.sun_path);
} else {
// We can't throw SocketException. We aren't meant to see bad addresses, so seeing one
// really does imply an internal error.
//jniThrowExceptionFmt(env, "java/lang/IllegalArgumentException",
// "sockaddrToInetAddress unsupported ss_family: %i", ss.ss_family);
cephThrowIllegalArg(env, "sockaddrToInetAddress unsupposed ss_family");
return NULL;
}
if (port != NULL) {
*port = sin_port;
}
ScopedLocalRef<jbyteArray> byteArray(env, env->NewByteArray(addressLength));
if (byteArray.get() == NULL) {
return NULL;
}
env->SetByteArrayRegion(byteArray.get(), 0, addressLength,
reinterpret_cast<jbyte*>(const_cast<void*>(rawAddress)));
if (ss.ss_family == AF_UNIX) {
// Note that we get here for AF_UNIX sockets on accept(2). The unix(7) man page claims
// that the peer's sun_path will contain the path, but in practice it doesn't, and the
// peer length is returned as 2 (meaning only the sun_family field was set).
//
// Ceph Note: this isn't supported. inetUnixAddress appears to just be
// something in Dalvik/Android stuff.
cephThrowInternal(env, "OSD address should never be a UNIX socket");
return NULL;
//static jmethodID ctor = env->GetMethodID(JniConstants::inetUnixAddressClass, "<init>", "([B)V");
//return env->NewObject(JniConstants::inetUnixAddressClass, ctor, byteArray.get());
}
if (addressLength == 4) {
static jmethodID getByAddressMethod = env->GetStaticMethodID(JniConstants::inetAddressClass,
"getByAddress", "(Ljava/lang/String;[B)Ljava/net/InetAddress;");
if (getByAddressMethod == NULL) {
return NULL;
}
return env->CallStaticObjectMethod(JniConstants::inetAddressClass, getByAddressMethod,
NULL, byteArray.get());
} else if (addressLength == 16) {
static jmethodID getByAddressMethod = env->GetStaticMethodID(JniConstants::inet6AddressClass,
"getByAddress", "(Ljava/lang/String;[BI)Ljava/net/Inet6Address;");
if (getByAddressMethod == NULL) {
return NULL;
}
return env->CallStaticObjectMethod(JniConstants::inet6AddressClass, getByAddressMethod,
NULL, byteArray.get(), scope_id);
} else {
abort();
return NULL;
}
}
/*
* Class: com_ceph_fs_CephMount
* Method: native_ceph_get_osd_addr
* Signature: (JI)Ljava/net/InetAddress;
*/
JNIEXPORT jobject JNICALL Java_com_ceph_fs_CephMount_native_1ceph_1get_1osd_1addr
(JNIEnv *env, jclass clz, jlong j_mntp, jint osd)
{
struct ceph_mount_info *cmount = get_ceph_mount(j_mntp);
CephContext *cct = ceph_get_mount_context(cmount);
struct sockaddr_storage addr;
int ret;
CHECK_MOUNTED(cmount, NULL);
ldout(cct, 10) << "jni: get_osd_addr: osd " << osd << dendl;
ret = ceph_get_osd_addr(cmount, osd, &addr);
ldout(cct, 10) << "jni: get_osd_addr: ret " << ret << dendl;
if (ret < 0) {
handle_error(env, ret);
return NULL;
}
return sockaddrToInetAddress(env, addr, NULL);
}
| 79,866 | 25.323995 | 128 | cc |
null | ceph-main/src/journal/Entry.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "journal/Entry.h"
#include "include/encoding.h"
#include "include/stringify.h"
#include "common/Formatter.h"
#include <strstream>
#define dout_subsys ceph_subsys_journaler
#undef dout_prefix
#define dout_prefix *_dout << "Entry: " << this << " "
namespace journal {
namespace {
const uint32_t HEADER_FIXED_SIZE = 25; /// preamble, version, entry tid, tag id
const uint32_t REMAINDER_FIXED_SIZE = 8; /// data size, crc
} // anonymous namespace
uint32_t Entry::get_fixed_size() {
return HEADER_FIXED_SIZE + REMAINDER_FIXED_SIZE;
}
void Entry::encode(bufferlist &bl) const {
using ceph::encode;
bufferlist data_bl;
encode(preamble, data_bl);
encode(static_cast<uint8_t>(1), data_bl);
encode(m_entry_tid, data_bl);
encode(m_tag_tid, data_bl);
encode(m_data, data_bl);
uint32_t crc = data_bl.crc32c(0);
uint32_t bl_offset = bl.length();
bl.claim_append(data_bl);
encode(crc, bl);
ceph_assert(get_fixed_size() + m_data.length() + bl_offset == bl.length());
}
void Entry::decode(bufferlist::const_iterator &iter) {
using ceph::decode;
uint32_t start_offset = iter.get_off();
uint64_t bl_preamble;
decode(bl_preamble, iter);
if (bl_preamble != preamble) {
throw buffer::malformed_input("incorrect preamble: " +
stringify(bl_preamble));
}
uint8_t version;
decode(version, iter);
if (version != 1) {
throw buffer::malformed_input("unknown version: " + stringify(version));
}
decode(m_entry_tid, iter);
decode(m_tag_tid, iter);
decode(m_data, iter);
uint32_t end_offset = iter.get_off();
uint32_t crc;
decode(crc, iter);
bufferlist data_bl;
data_bl.substr_of(iter.get_bl(), start_offset, end_offset - start_offset);
uint32_t actual_crc = data_bl.crc32c(0);
if (crc != actual_crc) {
throw buffer::malformed_input("crc mismatch: " + stringify(crc) +
" != " + stringify(actual_crc));
}
}
void Entry::dump(Formatter *f) const {
f->dump_unsigned("tag_tid", m_tag_tid);
f->dump_unsigned("entry_tid", m_entry_tid);
std::stringstream data;
m_data.hexdump(data);
f->dump_string("data", data.str());
}
bool Entry::is_readable(bufferlist::const_iterator iter, uint32_t *bytes_needed) {
using ceph::decode;
uint32_t start_off = iter.get_off();
if (iter.get_remaining() < HEADER_FIXED_SIZE) {
bufferlist sub_bl;
sub_bl.substr_of(iter.get_bl(), iter.get_off(), iter.get_remaining());
if (sub_bl.length() > 0 && sub_bl.is_zero()) {
// pad bytes
*bytes_needed = 0;
} else {
*bytes_needed = HEADER_FIXED_SIZE - iter.get_remaining();
}
return false;
}
uint64_t bl_preamble;
decode(bl_preamble, iter);
if (bl_preamble != preamble) {
*bytes_needed = 0;
return false;
}
iter += HEADER_FIXED_SIZE - sizeof(bl_preamble);
if (iter.get_remaining() < sizeof(uint32_t)) {
*bytes_needed = sizeof(uint32_t) - iter.get_remaining();
return false;
}
uint32_t data_size;
decode(data_size, iter);
if (iter.get_remaining() < data_size) {
*bytes_needed = data_size - iter.get_remaining();
return false;
}
iter += data_size;
uint32_t end_off = iter.get_off();
if (iter.get_remaining() < sizeof(uint32_t)) {
*bytes_needed = sizeof(uint32_t) - iter.get_remaining();
return false;
}
bufferlist crc_bl;
crc_bl.substr_of(iter.get_bl(), start_off, end_off - start_off);
*bytes_needed = 0;
uint32_t crc;
decode(crc, iter);
if (crc != crc_bl.crc32c(0)) {
return false;
}
return true;
}
void Entry::generate_test_instances(std::list<Entry *> &o) {
o.push_back(new Entry(1, 123, bufferlist()));
bufferlist bl;
bl.append("data");
o.push_back(new Entry(2, 123, bl));
}
bool Entry::operator==(const Entry& rhs) const {
return (m_tag_tid == rhs.m_tag_tid && m_entry_tid == rhs.m_entry_tid &&
const_cast<bufferlist&>(m_data).contents_equal(
const_cast<bufferlist&>(rhs.m_data)));
}
std::ostream &operator<<(std::ostream &os, const Entry &entry) {
os << "Entry[tag_tid=" << entry.get_tag_tid() << ", "
<< "entry_tid=" << entry.get_entry_tid() << ", "
<< "data size=" << entry.get_data().length() << "]";
return os;
}
} // namespace journal
| 4,346 | 26.16875 | 82 | cc |
null | ceph-main/src/journal/Entry.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_JOURNAL_ENTRY_H
#define CEPH_JOURNAL_ENTRY_H
#include "include/int_types.h"
#include "include/buffer.h"
#include "include/encoding.h"
#include <iosfwd>
#include <string>
namespace ceph {
class Formatter;
}
namespace journal {
class Entry {
public:
Entry() : m_tag_tid(0), m_entry_tid() {}
Entry(uint64_t tag_tid, uint64_t entry_tid, const bufferlist &data)
: m_tag_tid(tag_tid), m_entry_tid(entry_tid), m_data(data)
{
}
static uint32_t get_fixed_size();
inline uint64_t get_tag_tid() const {
return m_tag_tid;
}
inline uint64_t get_entry_tid() const {
return m_entry_tid;
}
inline const bufferlist &get_data() const {
return m_data;
}
void encode(bufferlist &bl) const;
void decode(bufferlist::const_iterator &iter);
void dump(ceph::Formatter *f) const;
bool operator==(const Entry& rhs) const;
static bool is_readable(bufferlist::const_iterator iter, uint32_t *bytes_needed);
static void generate_test_instances(std::list<Entry *> &o);
private:
static const uint64_t preamble = 0x3141592653589793;
uint64_t m_tag_tid;
uint64_t m_entry_tid;
bufferlist m_data;
};
std::ostream &operator<<(std::ostream &os, const Entry &entry);
WRITE_CLASS_ENCODER(journal::Entry)
} // namespace journal
#endif // CEPH_JOURNAL_ENTRY_H
| 1,402 | 21.269841 | 83 | h |
null | ceph-main/src/journal/Future.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "journal/Future.h"
#include "journal/FutureImpl.h"
#include "include/ceph_assert.h"
namespace journal {
Future::Future() = default;
Future::Future(const Future& o) = default;
Future& Future::operator=(const Future& o) = default;
Future::Future(Future&& o) = default;
Future& Future::operator=(Future&& o) = default;
Future::Future(ceph::ref_t<FutureImpl> future_impl) : m_future_impl(std::move(future_impl)) {}
Future::~Future() = default;
void Future::flush(Context *on_safe) {
m_future_impl->flush(on_safe);
}
void Future::wait(Context *on_safe) {
ceph_assert(on_safe != NULL);
m_future_impl->wait(on_safe);
}
bool Future::is_complete() const {
return m_future_impl->is_complete();
}
int Future::get_return_value() const {
return m_future_impl->get_return_value();
}
std::ostream &operator<<(std::ostream &os, const Future &future) {
return os << *future.m_future_impl;
}
} // namespace journal
| 1,026 | 24.04878 | 94 | cc |
null | ceph-main/src/journal/Future.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_JOURNAL_FUTURE_H
#define CEPH_JOURNAL_FUTURE_H
#include <iosfwd>
#include <string>
#include "include/ceph_assert.h"
#include "include/int_types.h"
#include "common/ref.h"
class Context;
namespace journal {
class FutureImpl;
class Future {
public:
Future();
Future(const Future&);
Future& operator=(const Future&);
Future(Future&&);
Future& operator=(Future&&);
Future(ceph::ref_t<FutureImpl> future_impl);
~Future();
bool is_valid() const {
return bool(m_future_impl);
}
void flush(Context *on_safe);
void wait(Context *on_safe);
bool is_complete() const;
int get_return_value() const;
private:
friend class Journaler;
friend std::ostream& operator<<(std::ostream&, const Future&);
const auto& get_future_impl() const {
return m_future_impl;
}
ceph::ref_t<FutureImpl> m_future_impl;
};
std::ostream &operator<<(std::ostream &os, const Future &future);
} // namespace journal
using journal::operator<<;
#endif // CEPH_JOURNAL_FUTURE_H
| 1,106 | 18.086207 | 70 | h |
null | ceph-main/src/journal/FutureImpl.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "journal/FutureImpl.h"
#include "journal/Utils.h"
namespace journal {
FutureImpl::FutureImpl(uint64_t tag_tid, uint64_t entry_tid,
uint64_t commit_tid)
: m_tag_tid(tag_tid),
m_entry_tid(entry_tid),
m_commit_tid(commit_tid),
m_consistent_ack(this)
{
}
void FutureImpl::init(const ceph::ref_t<FutureImpl> &prev_future) {
// chain ourself to the prior future (if any) to that we known when the
// journal is consistent
if (prev_future) {
m_prev_future = prev_future;
m_prev_future->wait(&m_consistent_ack);
} else {
m_consistent_ack.complete(0);
}
}
void FutureImpl::flush(Context *on_safe) {
bool complete;
FlushHandlers flush_handlers;
ceph::ref_t<FutureImpl> prev_future;
{
std::lock_guard locker{m_lock};
complete = (m_safe && m_consistent);
if (!complete) {
if (on_safe != nullptr) {
m_contexts.push_back(on_safe);
}
prev_future = prepare_flush(&flush_handlers, m_lock);
}
}
// instruct prior futures to flush as well
while (prev_future) {
prev_future = prev_future->prepare_flush(&flush_handlers);
}
if (complete && on_safe != NULL) {
on_safe->complete(m_return_value);
} else if (!flush_handlers.empty()) {
// attached to journal object -- instruct it to flush all entries through
// this one. possible to become detached while lock is released, so flush
// will be re-requested by the object if it doesn't own the future
for (auto &pair : flush_handlers) {
pair.first->flush(pair.second);
}
}
}
ceph::ref_t<FutureImpl> FutureImpl::prepare_flush(FlushHandlers *flush_handlers) {
std::lock_guard locker{m_lock};
return prepare_flush(flush_handlers, m_lock);
}
ceph::ref_t<FutureImpl> FutureImpl::prepare_flush(FlushHandlers *flush_handlers,
ceph::mutex &lock) {
ceph_assert(ceph_mutex_is_locked(m_lock));
if (m_flush_state == FLUSH_STATE_NONE) {
m_flush_state = FLUSH_STATE_REQUESTED;
auto h = m_flush_handler;
if (h) {
flush_handlers->try_emplace(std::move(h), this);
}
}
return m_prev_future;
}
void FutureImpl::wait(Context *on_safe) {
ceph_assert(on_safe != NULL);
{
std::lock_guard locker{m_lock};
if (!m_safe || !m_consistent) {
m_contexts.push_back(on_safe);
return;
}
}
on_safe->complete(m_return_value);
}
bool FutureImpl::is_complete() const {
std::lock_guard locker{m_lock};
return m_safe && m_consistent;
}
int FutureImpl::get_return_value() const {
std::lock_guard locker{m_lock};
ceph_assert(m_safe && m_consistent);
return m_return_value;
}
bool FutureImpl::attach(FlushHandler::ref flush_handler) {
std::lock_guard locker{m_lock};
ceph_assert(!m_flush_handler);
m_flush_handler = std::move(flush_handler);
return m_flush_state != FLUSH_STATE_NONE;
}
void FutureImpl::safe(int r) {
m_lock.lock();
ceph_assert(!m_safe);
m_safe = true;
if (m_return_value == 0) {
m_return_value = r;
}
m_flush_handler.reset();
if (m_consistent) {
finish_unlock();
} else {
m_lock.unlock();
}
}
void FutureImpl::consistent(int r) {
m_lock.lock();
ceph_assert(!m_consistent);
m_consistent = true;
m_prev_future.reset();
if (m_return_value == 0) {
m_return_value = r;
}
if (m_safe) {
finish_unlock();
} else {
m_lock.unlock();
}
}
void FutureImpl::finish_unlock() {
ceph_assert(ceph_mutex_is_locked(m_lock));
ceph_assert(m_safe && m_consistent);
Contexts contexts;
contexts.swap(m_contexts);
m_lock.unlock();
for (Contexts::iterator it = contexts.begin();
it != contexts.end(); ++it) {
(*it)->complete(m_return_value);
}
}
std::ostream &operator<<(std::ostream &os, const FutureImpl &future) {
os << "Future[tag_tid=" << future.m_tag_tid << ", "
<< "entry_tid=" << future.m_entry_tid << ", "
<< "commit_tid=" << future.m_commit_tid << "]";
return os;
}
} // namespace journal
| 4,087 | 23.333333 | 82 | cc |
null | ceph-main/src/journal/FutureImpl.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_JOURNAL_FUTURE_IMPL_H
#define CEPH_JOURNAL_FUTURE_IMPL_H
#include "include/int_types.h"
#include "common/RefCountedObj.h"
#include "include/Context.h"
#include "journal/Future.h"
#include <list>
#include <map>
#include <boost/noncopyable.hpp>
#include "include/ceph_assert.h"
class Context;
namespace journal {
class FutureImpl : public RefCountedObject, boost::noncopyable {
public:
struct FlushHandler {
using ref = std::shared_ptr<FlushHandler>;
virtual void flush(const ceph::ref_t<FutureImpl> &future) = 0;
virtual ~FlushHandler() = default;
};
void init(const ceph::ref_t<FutureImpl> &prev_future);
inline uint64_t get_tag_tid() const {
return m_tag_tid;
}
inline uint64_t get_entry_tid() const {
return m_entry_tid;
}
inline uint64_t get_commit_tid() const {
return m_commit_tid;
}
void flush(Context *on_safe = NULL);
void wait(Context *on_safe);
bool is_complete() const;
int get_return_value() const;
inline bool is_flush_in_progress() const {
std::lock_guard locker{m_lock};
return (m_flush_state == FLUSH_STATE_IN_PROGRESS);
}
inline void set_flush_in_progress() {
auto h = std::move(m_flush_handler);
ceph_assert(h);
std::lock_guard locker{m_lock};
m_flush_state = FLUSH_STATE_IN_PROGRESS;
}
bool attach(FlushHandler::ref flush_handler);
inline void detach() {
m_flush_handler.reset();
}
inline FlushHandler::ref get_flush_handler() const {
return m_flush_handler;
}
void safe(int r);
private:
friend std::ostream &operator<<(std::ostream &, const FutureImpl &);
typedef std::map<FlushHandler::ref, ceph::ref_t<FutureImpl>> FlushHandlers;
typedef std::list<Context *> Contexts;
enum FlushState {
FLUSH_STATE_NONE,
FLUSH_STATE_REQUESTED,
FLUSH_STATE_IN_PROGRESS
};
struct C_ConsistentAck : public Context {
ceph::ref_t<FutureImpl> future;
C_ConsistentAck(ceph::ref_t<FutureImpl> _future) : future(std::move(_future)) {}
void complete(int r) override {
future->consistent(r);
future.reset();
}
void finish(int r) override {}
};
FRIEND_MAKE_REF(FutureImpl);
FutureImpl(uint64_t tag_tid, uint64_t entry_tid, uint64_t commit_tid);
~FutureImpl() override = default;
uint64_t m_tag_tid;
uint64_t m_entry_tid;
uint64_t m_commit_tid;
mutable ceph::mutex m_lock = ceph::make_mutex("FutureImpl::m_lock", false);
ceph::ref_t<FutureImpl> m_prev_future;
bool m_safe = false;
bool m_consistent = false;
int m_return_value = 0;
FlushHandler::ref m_flush_handler;
FlushState m_flush_state = FLUSH_STATE_NONE;
C_ConsistentAck m_consistent_ack;
Contexts m_contexts;
ceph::ref_t<FutureImpl> prepare_flush(FlushHandlers *flush_handlers);
ceph::ref_t<FutureImpl> prepare_flush(FlushHandlers *flush_handlers, ceph::mutex &lock);
void consistent(int r);
void finish_unlock();
};
std::ostream &operator<<(std::ostream &os, const FutureImpl &future);
} // namespace journal
using journal::operator<<;
#endif // CEPH_JOURNAL_FUTURE_IMPL_H
| 3,161 | 24.707317 | 90 | h |
null | ceph-main/src/journal/JournalMetadata.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "journal/JournalMetadata.h"
#include "journal/Utils.h"
#include "common/errno.h"
#include "common/Timer.h"
#include "cls/journal/cls_journal_client.h"
#include <functional>
#include <set>
#define dout_subsys ceph_subsys_journaler
#undef dout_prefix
#define dout_prefix *_dout << "JournalMetadata: " << this << " "
namespace journal {
using namespace cls::journal;
namespace {
struct C_GetClient : public Context {
CephContext *cct;
librados::IoCtx &ioctx;
const std::string &oid;
AsyncOpTracker &async_op_tracker;
std::string client_id;
cls::journal::Client *client;
Context *on_finish;
bufferlist out_bl;
C_GetClient(CephContext *cct, librados::IoCtx &ioctx, const std::string &oid,
AsyncOpTracker &async_op_tracker, const std::string &client_id,
cls::journal::Client *client, Context *on_finish)
: cct(cct), ioctx(ioctx), oid(oid), async_op_tracker(async_op_tracker),
client_id(client_id), client(client), on_finish(on_finish) {
async_op_tracker.start_op();
}
~C_GetClient() override {
async_op_tracker.finish_op();
}
virtual void send() {
send_get_client();
}
void send_get_client() {
ldout(cct, 20) << "C_GetClient: " << __func__ << dendl;
librados::ObjectReadOperation op;
client::get_client_start(&op, client_id);
librados::AioCompletion *comp = librados::Rados::aio_create_completion(
this, &utils::rados_state_callback<
C_GetClient, &C_GetClient::handle_get_client>);
int r = ioctx.aio_operate(oid, comp, &op, &out_bl);
ceph_assert(r == 0);
comp->release();
}
void handle_get_client(int r) {
ldout(cct, 20) << "C_GetClient: " << __func__ << ": r=" << r << dendl;
if (r == 0) {
auto it = out_bl.cbegin();
r = client::get_client_finish(&it, client);
}
complete(r);
}
void finish(int r) override {
on_finish->complete(r);
}
};
struct C_AllocateTag : public Context {
CephContext *cct;
librados::IoCtx &ioctx;
const std::string &oid;
AsyncOpTracker &async_op_tracker;
uint64_t tag_class;
Tag *tag;
Context *on_finish;
bufferlist out_bl;
C_AllocateTag(CephContext *cct, librados::IoCtx &ioctx,
const std::string &oid, AsyncOpTracker &async_op_tracker,
uint64_t tag_class, const bufferlist &data, Tag *tag,
Context *on_finish)
: cct(cct), ioctx(ioctx), oid(oid), async_op_tracker(async_op_tracker),
tag_class(tag_class), tag(tag), on_finish(on_finish) {
async_op_tracker.start_op();
tag->data = data;
}
~C_AllocateTag() override {
async_op_tracker.finish_op();
}
void send() {
send_get_next_tag_tid();
}
void send_get_next_tag_tid() {
ldout(cct, 20) << "C_AllocateTag: " << __func__ << dendl;
librados::ObjectReadOperation op;
client::get_next_tag_tid_start(&op);
librados::AioCompletion *comp = librados::Rados::aio_create_completion(
this, &utils::rados_state_callback<
C_AllocateTag, &C_AllocateTag::handle_get_next_tag_tid>);
out_bl.clear();
int r = ioctx.aio_operate(oid, comp, &op, &out_bl);
ceph_assert(r == 0);
comp->release();
}
void handle_get_next_tag_tid(int r) {
ldout(cct, 20) << "C_AllocateTag: " << __func__ << ": r=" << r << dendl;
if (r == 0) {
auto iter = out_bl.cbegin();
r = client::get_next_tag_tid_finish(&iter, &tag->tid);
}
if (r < 0) {
complete(r);
return;
}
send_tag_create();
}
void send_tag_create() {
ldout(cct, 20) << "C_AllocateTag: " << __func__ << dendl;
librados::ObjectWriteOperation op;
client::tag_create(&op, tag->tid, tag_class, tag->data);
librados::AioCompletion *comp = librados::Rados::aio_create_completion(
this, &utils::rados_state_callback<
C_AllocateTag, &C_AllocateTag::handle_tag_create>);
int r = ioctx.aio_operate(oid, comp, &op);
ceph_assert(r == 0);
comp->release();
}
void handle_tag_create(int r) {
ldout(cct, 20) << "C_AllocateTag: " << __func__ << ": r=" << r << dendl;
if (r == -ESTALE) {
send_get_next_tag_tid();
return;
} else if (r < 0) {
complete(r);
return;
}
send_get_tag();
}
void send_get_tag() {
ldout(cct, 20) << "C_AllocateTag: " << __func__ << dendl;
librados::ObjectReadOperation op;
client::get_tag_start(&op, tag->tid);
librados::AioCompletion *comp = librados::Rados::aio_create_completion(
this, &utils::rados_state_callback<
C_AllocateTag, &C_AllocateTag::handle_get_tag>);
out_bl.clear();
int r = ioctx.aio_operate(oid, comp, &op, &out_bl);
ceph_assert(r == 0);
comp->release();
}
void handle_get_tag(int r) {
ldout(cct, 20) << "C_AllocateTag: " << __func__ << ": r=" << r << dendl;
if (r == 0) {
auto iter = out_bl.cbegin();
cls::journal::Tag journal_tag;
r = client::get_tag_finish(&iter, &journal_tag);
if (r == 0) {
*tag = journal_tag;
}
}
complete(r);
}
void finish(int r) override {
on_finish->complete(r);
}
};
struct C_GetTag : public Context {
CephContext *cct;
librados::IoCtx &ioctx;
const std::string &oid;
AsyncOpTracker &async_op_tracker;
uint64_t tag_tid;
JournalMetadata::Tag *tag;
Context *on_finish;
bufferlist out_bl;
C_GetTag(CephContext *cct, librados::IoCtx &ioctx, const std::string &oid,
AsyncOpTracker &async_op_tracker, uint64_t tag_tid,
JournalMetadata::Tag *tag, Context *on_finish)
: cct(cct), ioctx(ioctx), oid(oid), async_op_tracker(async_op_tracker),
tag_tid(tag_tid), tag(tag), on_finish(on_finish) {
async_op_tracker.start_op();
}
~C_GetTag() override {
async_op_tracker.finish_op();
}
void send() {
send_get_tag();
}
void send_get_tag() {
librados::ObjectReadOperation op;
client::get_tag_start(&op, tag_tid);
librados::AioCompletion *comp = librados::Rados::aio_create_completion(
this, &utils::rados_state_callback<
C_GetTag, &C_GetTag::handle_get_tag>);
int r = ioctx.aio_operate(oid, comp, &op, &out_bl);
ceph_assert(r == 0);
comp->release();
}
void handle_get_tag(int r) {
if (r == 0) {
auto iter = out_bl.cbegin();
r = client::get_tag_finish(&iter, tag);
}
complete(r);
}
void finish(int r) override {
on_finish->complete(r);
}
};
struct C_GetTags : public Context {
CephContext *cct;
librados::IoCtx &ioctx;
const std::string &oid;
const std::string &client_id;
AsyncOpTracker &async_op_tracker;
uint64_t start_after_tag_tid;
boost::optional<uint64_t> tag_class;
JournalMetadata::Tags *tags;
Context *on_finish;
const uint64_t MAX_RETURN = 64;
bufferlist out_bl;
C_GetTags(CephContext *cct, librados::IoCtx &ioctx, const std::string &oid,
const std::string &client_id, AsyncOpTracker &async_op_tracker,
uint64_t start_after_tag_tid,
const boost::optional<uint64_t> &tag_class,
JournalMetadata::Tags *tags, Context *on_finish)
: cct(cct), ioctx(ioctx), oid(oid), client_id(client_id),
async_op_tracker(async_op_tracker),
start_after_tag_tid(start_after_tag_tid), tag_class(tag_class),
tags(tags), on_finish(on_finish) {
async_op_tracker.start_op();
}
~C_GetTags() override {
async_op_tracker.finish_op();
}
void send() {
send_tag_list();
}
void send_tag_list() {
librados::ObjectReadOperation op;
client::tag_list_start(&op, start_after_tag_tid, MAX_RETURN, client_id,
tag_class);
librados::AioCompletion *comp = librados::Rados::aio_create_completion(
this, &utils::rados_state_callback<
C_GetTags, &C_GetTags::handle_tag_list>);
out_bl.clear();
int r = ioctx.aio_operate(oid, comp, &op, &out_bl);
ceph_assert(r == 0);
comp->release();
}
void handle_tag_list(int r) {
if (r == 0) {
std::set<cls::journal::Tag> journal_tags;
auto iter = out_bl.cbegin();
r = client::tag_list_finish(&iter, &journal_tags);
if (r == 0) {
for (auto &journal_tag : journal_tags) {
tags->push_back(journal_tag);
start_after_tag_tid = journal_tag.tid;
}
if (journal_tags.size() == MAX_RETURN) {
send_tag_list();
return;
}
}
}
complete(r);
}
void finish(int r) override {
on_finish->complete(r);
}
};
struct C_FlushCommitPosition : public Context {
Context *commit_position_ctx;
Context *on_finish;
C_FlushCommitPosition(Context *commit_position_ctx, Context *on_finish)
: commit_position_ctx(commit_position_ctx), on_finish(on_finish) {
}
void finish(int r) override {
if (commit_position_ctx != nullptr) {
commit_position_ctx->complete(r);
}
on_finish->complete(r);
}
};
struct C_AssertActiveTag : public Context {
CephContext *cct;
librados::IoCtx &ioctx;
const std::string &oid;
AsyncOpTracker &async_op_tracker;
std::string client_id;
uint64_t tag_tid;
Context *on_finish;
bufferlist out_bl;
C_AssertActiveTag(CephContext *cct, librados::IoCtx &ioctx,
const std::string &oid, AsyncOpTracker &async_op_tracker,
const std::string &client_id, uint64_t tag_tid,
Context *on_finish)
: cct(cct), ioctx(ioctx), oid(oid), async_op_tracker(async_op_tracker),
client_id(client_id), tag_tid(tag_tid), on_finish(on_finish) {
async_op_tracker.start_op();
}
~C_AssertActiveTag() override {
async_op_tracker.finish_op();
}
void send() {
ldout(cct, 20) << "C_AssertActiveTag: " << __func__ << dendl;
librados::ObjectReadOperation op;
client::tag_list_start(&op, tag_tid, 2, client_id, boost::none);
librados::AioCompletion *comp = librados::Rados::aio_create_completion(
this, &utils::rados_state_callback<
C_AssertActiveTag, &C_AssertActiveTag::handle_send>);
int r = ioctx.aio_operate(oid, comp, &op, &out_bl);
ceph_assert(r == 0);
comp->release();
}
void handle_send(int r) {
ldout(cct, 20) << "C_AssertActiveTag: " << __func__ << ": r=" << r << dendl;
std::set<cls::journal::Tag> tags;
if (r == 0) {
auto it = out_bl.cbegin();
r = client::tag_list_finish(&it, &tags);
}
// NOTE: since 0 is treated as an uninitialized list filter, we need to
// load to entries and look at the last tid
if (r == 0 && !tags.empty() && tags.rbegin()->tid > tag_tid) {
r = -ESTALE;
}
complete(r);
}
void finish(int r) override {
on_finish->complete(r);
}
};
} // anonymous namespace
JournalMetadata::JournalMetadata(ContextWQ *work_queue, SafeTimer *timer,
ceph::mutex *timer_lock, librados::IoCtx &ioctx,
const std::string &oid,
const std::string &client_id,
const Settings &settings)
: m_oid(oid),
m_client_id(client_id), m_settings(settings),
m_work_queue(work_queue), m_timer(timer), m_timer_lock(timer_lock),
m_watch_ctx(this)
{
m_ioctx.dup(ioctx);
m_cct = reinterpret_cast<CephContext*>(m_ioctx.cct());
}
JournalMetadata::~JournalMetadata() {
std::lock_guard locker{m_lock};
ceph_assert(!m_initialized);
}
void JournalMetadata::init(Context *on_finish) {
{
std::lock_guard locker{m_lock};
ceph_assert(!m_initialized);
m_initialized = true;
}
// chain the init sequence (reverse order)
on_finish = utils::create_async_context_callback(
this, on_finish);
on_finish = new C_ImmutableMetadata(this, on_finish);
on_finish = new LambdaContext([this, on_finish](int r) {
if (r < 0) {
lderr(m_cct) << __func__ << ": failed to watch journal"
<< cpp_strerror(r) << dendl;
std::lock_guard locker{m_lock};
m_watch_handle = 0;
on_finish->complete(r);
return;
}
get_immutable_metadata(&m_order, &m_splay_width, &m_pool_id, on_finish);
});
librados::AioCompletion *comp = librados::Rados::aio_create_completion(
on_finish, utils::rados_ctx_callback);
int r = m_ioctx.aio_watch(m_oid, comp, &m_watch_handle, &m_watch_ctx);
ceph_assert(r == 0);
comp->release();
}
void JournalMetadata::shut_down(Context *on_finish) {
ldout(m_cct, 20) << __func__ << dendl;
uint64_t watch_handle = 0;
{
std::lock_guard locker{m_lock};
m_initialized = false;
std::swap(watch_handle, m_watch_handle);
}
// chain the shut down sequence (reverse order)
on_finish = utils::create_async_context_callback(
this, on_finish);
on_finish = new LambdaContext([this, on_finish](int r) {
ldout(m_cct, 20) << "shut_down: waiting for ops" << dendl;
m_async_op_tracker.wait_for_ops(on_finish);
});
on_finish = new LambdaContext([this, on_finish](int r) {
ldout(m_cct, 20) << "shut_down: flushing watch" << dendl;
librados::Rados rados(m_ioctx);
librados::AioCompletion *comp = librados::Rados::aio_create_completion(
on_finish, utils::rados_ctx_callback);
r = rados.aio_watch_flush(comp);
ceph_assert(r == 0);
comp->release();
});
on_finish = new LambdaContext([this, on_finish](int r) {
flush_commit_position(on_finish);
});
if (watch_handle != 0) {
librados::AioCompletion *comp = librados::Rados::aio_create_completion(
on_finish, utils::rados_ctx_callback);
int r = m_ioctx.aio_unwatch(watch_handle, comp);
ceph_assert(r == 0);
comp->release();
} else {
on_finish->complete(0);
}
}
void JournalMetadata::get_immutable_metadata(uint8_t *order,
uint8_t *splay_width,
int64_t *pool_id,
Context *on_finish) {
client::get_immutable_metadata(m_ioctx, m_oid, order, splay_width, pool_id,
on_finish);
}
void JournalMetadata::get_mutable_metadata(uint64_t *minimum_set,
uint64_t *active_set,
RegisteredClients *clients,
Context *on_finish) {
client::get_mutable_metadata(m_ioctx, m_oid, minimum_set, active_set, clients,
on_finish);
}
void JournalMetadata::register_client(const bufferlist &data,
Context *on_finish) {
ldout(m_cct, 10) << __func__ << ": " << m_client_id << dendl;
librados::ObjectWriteOperation op;
client::client_register(&op, m_client_id, data);
C_NotifyUpdate *ctx = new C_NotifyUpdate(this, on_finish);
librados::AioCompletion *comp =
librados::Rados::aio_create_completion(ctx,
utils::rados_ctx_callback);
int r = m_ioctx.aio_operate(m_oid, comp, &op);
ceph_assert(r == 0);
comp->release();
}
void JournalMetadata::update_client(const bufferlist &data,
Context *on_finish) {
ldout(m_cct, 10) << __func__ << ": " << m_client_id << dendl;
librados::ObjectWriteOperation op;
client::client_update_data(&op, m_client_id, data);
C_NotifyUpdate *ctx = new C_NotifyUpdate(this, on_finish);
librados::AioCompletion *comp =
librados::Rados::aio_create_completion(ctx, utils::rados_ctx_callback);
int r = m_ioctx.aio_operate(m_oid, comp, &op);
ceph_assert(r == 0);
comp->release();
}
void JournalMetadata::unregister_client(Context *on_finish) {
ceph_assert(!m_client_id.empty());
ldout(m_cct, 10) << __func__ << ": " << m_client_id << dendl;
librados::ObjectWriteOperation op;
client::client_unregister(&op, m_client_id);
C_NotifyUpdate *ctx = new C_NotifyUpdate(this, on_finish);
librados::AioCompletion *comp =
librados::Rados::aio_create_completion(ctx, utils::rados_ctx_callback);
int r = m_ioctx.aio_operate(m_oid, comp, &op);
ceph_assert(r == 0);
comp->release();
}
void JournalMetadata::allocate_tag(uint64_t tag_class, const bufferlist &data,
Tag *tag, Context *on_finish) {
on_finish = new C_NotifyUpdate(this, on_finish);
C_AllocateTag *ctx = new C_AllocateTag(m_cct, m_ioctx, m_oid,
m_async_op_tracker, tag_class,
data, tag, on_finish);
ctx->send();
}
void JournalMetadata::get_client(const std::string &client_id,
cls::journal::Client *client,
Context *on_finish) {
C_GetClient *ctx = new C_GetClient(m_cct, m_ioctx, m_oid, m_async_op_tracker,
client_id, client, on_finish);
ctx->send();
}
void JournalMetadata::get_tag(uint64_t tag_tid, Tag *tag, Context *on_finish) {
C_GetTag *ctx = new C_GetTag(m_cct, m_ioctx, m_oid, m_async_op_tracker,
tag_tid, tag, on_finish);
ctx->send();
}
void JournalMetadata::get_tags(uint64_t start_after_tag_tid,
const boost::optional<uint64_t> &tag_class,
Tags *tags, Context *on_finish) {
C_GetTags *ctx = new C_GetTags(m_cct, m_ioctx, m_oid, m_client_id,
m_async_op_tracker, start_after_tag_tid,
tag_class, tags, on_finish);
ctx->send();
}
void JournalMetadata::add_listener(JournalMetadataListener *listener) {
std::unique_lock locker{m_lock};
m_update_cond.wait(locker, [this] {
return m_update_notifications <= 0;
});
m_listeners.push_back(listener);
}
void JournalMetadata::remove_listener(JournalMetadataListener *listener) {
std::unique_lock locker{m_lock};
m_update_cond.wait(locker, [this] {
return m_update_notifications <= 0;
});
m_listeners.remove(listener);
}
void JournalMetadata::set_minimum_set(uint64_t object_set) {
std::lock_guard locker{m_lock};
ldout(m_cct, 20) << __func__ << ": current=" << m_minimum_set
<< ", new=" << object_set << dendl;
if (m_minimum_set >= object_set) {
return;
}
librados::ObjectWriteOperation op;
client::set_minimum_set(&op, object_set);
C_NotifyUpdate *ctx = new C_NotifyUpdate(this);
librados::AioCompletion *comp =
librados::Rados::aio_create_completion(ctx, utils::rados_ctx_callback);
int r = m_ioctx.aio_operate(m_oid, comp, &op);
ceph_assert(r == 0);
comp->release();
m_minimum_set = object_set;
}
int JournalMetadata::set_active_set(uint64_t object_set) {
C_SaferCond ctx;
set_active_set(object_set, &ctx);
return ctx.wait();
}
void JournalMetadata::set_active_set(uint64_t object_set, Context *on_finish) {
std::lock_guard locker{m_lock};
ldout(m_cct, 20) << __func__ << ": current=" << m_active_set
<< ", new=" << object_set << dendl;
if (m_active_set >= object_set) {
m_work_queue->queue(on_finish, 0);
return;
}
librados::ObjectWriteOperation op;
client::set_active_set(&op, object_set);
C_NotifyUpdate *ctx = new C_NotifyUpdate(this, on_finish);
librados::AioCompletion *comp =
librados::Rados::aio_create_completion(ctx, utils::rados_ctx_callback);
int r = m_ioctx.aio_operate(m_oid, comp, &op);
ceph_assert(r == 0);
comp->release();
m_active_set = object_set;
}
void JournalMetadata::assert_active_tag(uint64_t tag_tid, Context *on_finish) {
std::lock_guard locker{m_lock};
C_AssertActiveTag *ctx = new C_AssertActiveTag(m_cct, m_ioctx, m_oid,
m_async_op_tracker,
m_client_id, tag_tid,
on_finish);
ctx->send();
}
void JournalMetadata::flush_commit_position() {
ldout(m_cct, 20) << __func__ << dendl;
C_SaferCond ctx;
flush_commit_position(&ctx);
ctx.wait();
}
void JournalMetadata::flush_commit_position(Context *on_safe) {
ldout(m_cct, 20) << __func__ << dendl;
std::scoped_lock locker{*m_timer_lock, m_lock};
if (m_commit_position_ctx == nullptr && m_flush_commits_in_progress == 0) {
// nothing to flush
if (on_safe != nullptr) {
m_work_queue->queue(on_safe, 0);
}
return;
}
if (on_safe != nullptr) {
m_flush_commit_position_ctxs.push_back(on_safe);
}
if (m_commit_position_ctx == nullptr) {
return;
}
cancel_commit_task();
handle_commit_position_task();
}
void JournalMetadata::reserve_entry_tid(uint64_t tag_tid, uint64_t entry_tid) {
std::lock_guard locker{m_lock};
uint64_t &allocated_entry_tid = m_allocated_entry_tids[tag_tid];
if (allocated_entry_tid <= entry_tid) {
allocated_entry_tid = entry_tid + 1;
}
}
bool JournalMetadata::get_last_allocated_entry_tid(uint64_t tag_tid,
uint64_t *entry_tid) const {
std::lock_guard locker{m_lock};
AllocatedEntryTids::const_iterator it = m_allocated_entry_tids.find(tag_tid);
if (it == m_allocated_entry_tids.end()) {
return false;
}
ceph_assert(it->second > 0);
*entry_tid = it->second - 1;
return true;
}
void JournalMetadata::handle_immutable_metadata(int r, Context *on_init) {
if (r < 0) {
lderr(m_cct) << "failed to initialize immutable metadata: "
<< cpp_strerror(r) << dendl;
on_init->complete(r);
return;
}
ldout(m_cct, 10) << "initialized immutable metadata" << dendl;
refresh(on_init);
}
void JournalMetadata::refresh(Context *on_complete) {
ldout(m_cct, 10) << "refreshing mutable metadata" << dendl;
{
std::lock_guard locker{m_lock};
if (on_complete != nullptr) {
m_refresh_ctxs.push_back(on_complete);
}
++m_refreshes_in_progress;
}
auto refresh = new C_Refresh(this);
get_mutable_metadata(&refresh->minimum_set, &refresh->active_set,
&refresh->registered_clients, refresh);
}
void JournalMetadata::handle_refresh_complete(C_Refresh *refresh, int r) {
ldout(m_cct, 10) << "refreshed mutable metadata: r=" << r << dendl;
m_lock.lock();
if (r == 0) {
Client client(m_client_id, bufferlist());
RegisteredClients::iterator it = refresh->registered_clients.find(client);
if (it != refresh->registered_clients.end()) {
if (it->state == cls::journal::CLIENT_STATE_DISCONNECTED) {
ldout(m_cct, 0) << "client flagged disconnected: " << m_client_id
<< dendl;
}
m_minimum_set = std::max(m_minimum_set, refresh->minimum_set);
m_active_set = std::max(m_active_set, refresh->active_set);
m_registered_clients = refresh->registered_clients;
m_client = *it;
++m_update_notifications;
m_lock.unlock();
for (Listeners::iterator it = m_listeners.begin();
it != m_listeners.end(); ++it) {
(*it)->handle_update(this);
}
m_lock.lock();
if (--m_update_notifications == 0) {
m_update_cond.notify_all();
}
} else {
lderr(m_cct) << "failed to locate client: " << m_client_id << dendl;
r = -ENOENT;
}
}
Contexts refresh_ctxs;
ceph_assert(m_refreshes_in_progress > 0);
--m_refreshes_in_progress;
if (m_refreshes_in_progress == 0) {
std::swap(refresh_ctxs, m_refresh_ctxs);
}
m_lock.unlock();
for (auto ctx : refresh_ctxs) {
ctx->complete(r);
}
}
void JournalMetadata::cancel_commit_task() {
ldout(m_cct, 20) << __func__ << dendl;
ceph_assert(ceph_mutex_is_locked(*m_timer_lock));
ceph_assert(ceph_mutex_is_locked(m_lock));
ceph_assert(m_commit_position_ctx != nullptr);
ceph_assert(m_commit_position_task_ctx != nullptr);
m_timer->cancel_event(m_commit_position_task_ctx);
m_commit_position_task_ctx = NULL;
}
void JournalMetadata::schedule_commit_task() {
ldout(m_cct, 20) << __func__ << dendl;
ceph_assert(ceph_mutex_is_locked(*m_timer_lock));
ceph_assert(ceph_mutex_is_locked(m_lock));
ceph_assert(m_commit_position_ctx != nullptr);
if (m_commit_position_task_ctx == nullptr) {
m_commit_position_task_ctx =
m_timer->add_event_after(m_settings.commit_interval,
new C_CommitPositionTask(this));
}
}
void JournalMetadata::handle_commit_position_task() {
ceph_assert(ceph_mutex_is_locked(*m_timer_lock));
ceph_assert(ceph_mutex_is_locked(m_lock));
ldout(m_cct, 20) << __func__ << ": "
<< "client_id=" << m_client_id << ", "
<< "commit_position=" << m_commit_position << dendl;
m_commit_position_task_ctx = nullptr;
Context* commit_position_ctx = nullptr;
std::swap(commit_position_ctx, m_commit_position_ctx);
m_async_op_tracker.start_op();
++m_flush_commits_in_progress;
Context* ctx = new LambdaContext([this, commit_position_ctx](int r) {
Contexts flush_commit_position_ctxs;
m_lock.lock();
ceph_assert(m_flush_commits_in_progress > 0);
--m_flush_commits_in_progress;
if (m_flush_commits_in_progress == 0) {
std::swap(flush_commit_position_ctxs, m_flush_commit_position_ctxs);
}
m_lock.unlock();
commit_position_ctx->complete(0);
for (auto ctx : flush_commit_position_ctxs) {
ctx->complete(0);
}
m_async_op_tracker.finish_op();
});
ctx = new C_NotifyUpdate(this, ctx);
ctx = new LambdaContext([this, ctx](int r) {
// manually kick of a refresh in case the notification is missed
// and ignore the next notification that we are about to send
m_lock.lock();
++m_ignore_watch_notifies;
m_lock.unlock();
refresh(ctx);
});
ctx = new LambdaContext([this, ctx](int r) {
schedule_laggy_clients_disconnect(ctx);
});
librados::ObjectWriteOperation op;
client::client_commit(&op, m_client_id, m_commit_position);
auto comp = librados::Rados::aio_create_completion(ctx, utils::rados_ctx_callback);
int r = m_ioctx.aio_operate(m_oid, comp, &op);
ceph_assert(r == 0);
comp->release();
}
void JournalMetadata::schedule_watch_reset() {
ceph_assert(ceph_mutex_is_locked(*m_timer_lock));
m_timer->add_event_after(1, new C_WatchReset(this));
}
void JournalMetadata::handle_watch_reset() {
ceph_assert(ceph_mutex_is_locked(*m_timer_lock));
if (!m_initialized) {
return;
}
int r = m_ioctx.watch2(m_oid, &m_watch_handle, &m_watch_ctx);
if (r < 0) {
if (r == -ENOENT) {
ldout(m_cct, 5) << __func__ << ": journal header not found" << dendl;
} else if (r == -EBLOCKLISTED) {
ldout(m_cct, 5) << __func__ << ": client blocklisted" << dendl;
} else {
lderr(m_cct) << __func__ << ": failed to watch journal: "
<< cpp_strerror(r) << dendl;
}
schedule_watch_reset();
} else {
ldout(m_cct, 10) << __func__ << ": reset journal watch" << dendl;
refresh(NULL);
}
}
void JournalMetadata::handle_watch_notify(uint64_t notify_id, uint64_t cookie) {
ldout(m_cct, 10) << "journal header updated" << dendl;
bufferlist bl;
m_ioctx.notify_ack(m_oid, notify_id, cookie, bl);
{
std::lock_guard locker{m_lock};
if (m_ignore_watch_notifies > 0) {
--m_ignore_watch_notifies;
return;
}
}
refresh(NULL);
}
void JournalMetadata::handle_watch_error(int err) {
if (err == -ENOTCONN) {
ldout(m_cct, 5) << "journal watch error: header removed" << dendl;
} else if (err == -EBLOCKLISTED) {
lderr(m_cct) << "journal watch error: client blocklisted" << dendl;
} else {
lderr(m_cct) << "journal watch error: " << cpp_strerror(err) << dendl;
}
std::scoped_lock locker{*m_timer_lock, m_lock};
// release old watch on error
if (m_watch_handle != 0) {
m_ioctx.unwatch2(m_watch_handle);
m_watch_handle = 0;
}
if (m_initialized && err != -ENOENT) {
schedule_watch_reset();
}
}
uint64_t JournalMetadata::allocate_commit_tid(uint64_t object_num,
uint64_t tag_tid,
uint64_t entry_tid) {
std::lock_guard locker{m_lock};
uint64_t commit_tid = ++m_commit_tid;
m_pending_commit_tids[commit_tid] = CommitEntry(object_num, tag_tid,
entry_tid);
ldout(m_cct, 20) << "allocated commit tid: commit_tid=" << commit_tid << " ["
<< "object_num=" << object_num << ", "
<< "tag_tid=" << tag_tid << ", "
<< "entry_tid=" << entry_tid << "]"
<< dendl;
return commit_tid;
}
void JournalMetadata::overflow_commit_tid(uint64_t commit_tid,
uint64_t object_num) {
std::lock_guard locker{m_lock};
auto it = m_pending_commit_tids.find(commit_tid);
ceph_assert(it != m_pending_commit_tids.end());
ceph_assert(it->second.object_num < object_num);
ldout(m_cct, 20) << __func__ << ": "
<< "commit_tid=" << commit_tid << ", "
<< "old_object_num=" << it->second.object_num << ", "
<< "new_object_num=" << object_num << dendl;
it->second.object_num = object_num;
}
void JournalMetadata::get_commit_entry(uint64_t commit_tid,
uint64_t *object_num,
uint64_t *tag_tid, uint64_t *entry_tid) {
std::lock_guard locker{m_lock};
auto it = m_pending_commit_tids.find(commit_tid);
ceph_assert(it != m_pending_commit_tids.end());
*object_num = it->second.object_num;
*tag_tid = it->second.tag_tid;
*entry_tid = it->second.entry_tid;
}
void JournalMetadata::committed(uint64_t commit_tid,
const CreateContext &create_context) {
ldout(m_cct, 20) << "committed tid=" << commit_tid << dendl;
ObjectSetPosition commit_position;
Context *stale_ctx = nullptr;
{
std::scoped_lock locker{*m_timer_lock, m_lock};
ceph_assert(commit_tid > m_commit_position_tid);
if (!m_commit_position.object_positions.empty()) {
// in-flight commit position update
commit_position = m_commit_position;
} else {
// safe commit position
commit_position = m_client.commit_position;
}
CommitTids::iterator it = m_pending_commit_tids.find(commit_tid);
ceph_assert(it != m_pending_commit_tids.end());
CommitEntry &commit_entry = it->second;
commit_entry.committed = true;
bool update_commit_position = false;
while (!m_pending_commit_tids.empty()) {
CommitTids::iterator it = m_pending_commit_tids.begin();
CommitEntry &commit_entry = it->second;
if (!commit_entry.committed) {
break;
}
commit_position.object_positions.emplace_front(
commit_entry.object_num, commit_entry.tag_tid,
commit_entry.entry_tid);
m_pending_commit_tids.erase(it);
update_commit_position = true;
}
if (!update_commit_position) {
return;
}
// prune the position to have one position per splay offset
std::set<uint8_t> in_use_splay_offsets;
ObjectPositions::iterator ob_it = commit_position.object_positions.begin();
while (ob_it != commit_position.object_positions.end()) {
uint8_t splay_offset = ob_it->object_number % m_splay_width;
if (!in_use_splay_offsets.insert(splay_offset).second) {
ob_it = commit_position.object_positions.erase(ob_it);
} else {
++ob_it;
}
}
stale_ctx = m_commit_position_ctx;
m_commit_position_ctx = create_context();
m_commit_position = commit_position;
m_commit_position_tid = commit_tid;
ldout(m_cct, 20) << "updated commit position: " << commit_position << ", "
<< "on_safe=" << m_commit_position_ctx << dendl;
schedule_commit_task();
}
if (stale_ctx != nullptr) {
ldout(m_cct, 20) << "canceling stale commit: on_safe=" << stale_ctx
<< dendl;
stale_ctx->complete(-ESTALE);
}
}
void JournalMetadata::notify_update() {
ldout(m_cct, 10) << "notifying journal header update" << dendl;
bufferlist bl;
m_ioctx.notify2(m_oid, bl, 5000, NULL);
}
void JournalMetadata::async_notify_update(Context *on_safe) {
ldout(m_cct, 10) << "async notifying journal header update" << dendl;
C_AioNotify *ctx = new C_AioNotify(this, on_safe);
librados::AioCompletion *comp =
librados::Rados::aio_create_completion(ctx, utils::rados_ctx_callback);
bufferlist bl;
int r = m_ioctx.aio_notify(m_oid, comp, bl, 5000, NULL);
ceph_assert(r == 0);
comp->release();
}
void JournalMetadata::wait_for_ops() {
C_SaferCond ctx;
m_async_op_tracker.wait_for_ops(&ctx);
ctx.wait();
}
void JournalMetadata::handle_notified(int r) {
ldout(m_cct, 10) << "notified journal header update: r=" << r << dendl;
}
void JournalMetadata::schedule_laggy_clients_disconnect(Context *on_finish) {
ldout(m_cct, 20) << __func__ << dendl;
if (m_settings.max_concurrent_object_sets <= 0) {
on_finish->complete(0);
return;
}
Context *ctx = on_finish;
{
std::lock_guard locker{m_lock};
for (auto &c : m_registered_clients) {
if (c.state == cls::journal::CLIENT_STATE_DISCONNECTED ||
c.id == m_client_id ||
m_settings.ignored_laggy_clients.count(c.id) > 0) {
continue;
}
const std::string &client_id = c.id;
uint64_t object_set = 0;
if (!c.commit_position.object_positions.empty()) {
auto &position = *(c.commit_position.object_positions.begin());
object_set = position.object_number / m_splay_width;
}
if (m_active_set > object_set + m_settings.max_concurrent_object_sets) {
ldout(m_cct, 1) << __func__ << ": " << client_id
<< ": scheduling disconnect" << dendl;
ctx = new LambdaContext([this, client_id, ctx](int r1) {
ldout(m_cct, 10) << __func__ << ": " << client_id
<< ": flagging disconnected" << dendl;
librados::ObjectWriteOperation op;
client::client_update_state(
&op, client_id, cls::journal::CLIENT_STATE_DISCONNECTED);
auto comp = librados::Rados::aio_create_completion(
ctx, utils::rados_ctx_callback);
int r = m_ioctx.aio_operate(m_oid, comp, &op);
ceph_assert(r == 0);
comp->release();
});
}
}
}
if (ctx == on_finish) {
ldout(m_cct, 20) << __func__ << ": no laggy clients to disconnect" << dendl;
}
ctx->complete(0);
}
std::ostream &operator<<(std::ostream &os,
const JournalMetadata::RegisteredClients &clients) {
os << "[";
for (JournalMetadata::RegisteredClients::const_iterator c = clients.begin();
c != clients.end(); ++c) {
os << (c == clients.begin() ? "" : ", " ) << *c;
}
os << "]";
return os;
}
std::ostream &operator<<(std::ostream &os,
const JournalMetadata &jm) {
std::lock_guard locker{jm.m_lock};
os << "[oid=" << jm.m_oid << ", "
<< "initialized=" << jm.m_initialized << ", "
<< "order=" << (int)jm.m_order << ", "
<< "splay_width=" << (int)jm.m_splay_width << ", "
<< "pool_id=" << jm.m_pool_id << ", "
<< "minimum_set=" << jm.m_minimum_set << ", "
<< "active_set=" << jm.m_active_set << ", "
<< "client_id=" << jm.m_client_id << ", "
<< "commit_tid=" << jm.m_commit_tid << ", "
<< "commit_interval=" << jm.m_settings.commit_interval << ", "
<< "commit_position=" << jm.m_commit_position << ", "
<< "registered_clients=" << jm.m_registered_clients << "]";
return os;
}
} // namespace journal
| 35,478 | 29.427959 | 137 | cc |
null | ceph-main/src/journal/JournalMetadata.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_JOURNAL_JOURNAL_METADATA_H
#define CEPH_JOURNAL_JOURNAL_METADATA_H
#include "include/int_types.h"
#include "include/Context.h"
#include "include/rados/librados.hpp"
#include "common/AsyncOpTracker.h"
#include "common/Cond.h"
#include "common/Timer.h"
#include "common/ceph_mutex.h"
#include "common/RefCountedObj.h"
#include "common/WorkQueue.h"
#include "cls/journal/cls_journal_types.h"
#include "journal/JournalMetadataListener.h"
#include "journal/Settings.h"
#include <boost/noncopyable.hpp>
#include <boost/optional.hpp>
#include <functional>
#include <list>
#include <map>
#include <string>
#include "include/ceph_assert.h"
namespace journal {
class JournalMetadata : public RefCountedObject, boost::noncopyable {
public:
typedef std::function<Context*()> CreateContext;
typedef cls::journal::ObjectPosition ObjectPosition;
typedef cls::journal::ObjectPositions ObjectPositions;
typedef cls::journal::ObjectSetPosition ObjectSetPosition;
typedef cls::journal::Client Client;
typedef cls::journal::Tag Tag;
typedef std::set<Client> RegisteredClients;
typedef std::list<Tag> Tags;
void init(Context *on_init);
void shut_down(Context *on_finish);
bool is_initialized() const { return m_initialized; }
void get_immutable_metadata(uint8_t *order, uint8_t *splay_width,
int64_t *pool_id, Context *on_finish);
void get_mutable_metadata(uint64_t *minimum_set, uint64_t *active_set,
RegisteredClients *clients, Context *on_finish);
void add_listener(JournalMetadataListener *listener);
void remove_listener(JournalMetadataListener *listener);
void register_client(const bufferlist &data, Context *on_finish);
void update_client(const bufferlist &data, Context *on_finish);
void unregister_client(Context *on_finish);
void get_client(const std::string &client_id, cls::journal::Client *client,
Context *on_finish);
void allocate_tag(uint64_t tag_class, const bufferlist &data,
Tag *tag, Context *on_finish);
void get_tag(uint64_t tag_tid, Tag *tag, Context *on_finish);
void get_tags(uint64_t start_after_tag_tid,
const boost::optional<uint64_t> &tag_class, Tags *tags,
Context *on_finish);
inline const Settings &get_settings() const {
return m_settings;
}
inline const std::string &get_client_id() const {
return m_client_id;
}
inline uint8_t get_order() const {
return m_order;
}
inline uint64_t get_object_size() const {
return 1 << m_order;
}
inline uint8_t get_splay_width() const {
return m_splay_width;
}
inline int64_t get_pool_id() const {
return m_pool_id;
}
inline void queue(Context *on_finish, int r) {
m_work_queue->queue(on_finish, r);
}
inline ContextWQ *get_work_queue() {
return m_work_queue;
}
inline SafeTimer &get_timer() {
return *m_timer;
}
inline ceph::mutex &get_timer_lock() {
return *m_timer_lock;
}
void set_minimum_set(uint64_t object_set);
inline uint64_t get_minimum_set() const {
std::lock_guard locker{m_lock};
return m_minimum_set;
}
int set_active_set(uint64_t object_set);
void set_active_set(uint64_t object_set, Context *on_finish);
inline uint64_t get_active_set() const {
std::lock_guard locker{m_lock};
return m_active_set;
}
void assert_active_tag(uint64_t tag_tid, Context *on_finish);
void flush_commit_position();
void flush_commit_position(Context *on_safe);
void get_commit_position(ObjectSetPosition *commit_position) const {
std::lock_guard locker{m_lock};
*commit_position = m_client.commit_position;
}
void get_registered_clients(RegisteredClients *registered_clients) {
std::lock_guard locker{m_lock};
*registered_clients = m_registered_clients;
}
inline uint64_t allocate_entry_tid(uint64_t tag_tid) {
std::lock_guard locker{m_lock};
return m_allocated_entry_tids[tag_tid]++;
}
void reserve_entry_tid(uint64_t tag_tid, uint64_t entry_tid);
bool get_last_allocated_entry_tid(uint64_t tag_tid, uint64_t *entry_tid) const;
uint64_t allocate_commit_tid(uint64_t object_num, uint64_t tag_tid,
uint64_t entry_tid);
void overflow_commit_tid(uint64_t commit_tid, uint64_t object_num);
void get_commit_entry(uint64_t commit_tid, uint64_t *object_num,
uint64_t *tag_tid, uint64_t *entry_tid);
void committed(uint64_t commit_tid, const CreateContext &create_context);
void notify_update();
void async_notify_update(Context *on_safe);
void wait_for_ops();
private:
FRIEND_MAKE_REF(JournalMetadata);
JournalMetadata(ContextWQ *work_queue, SafeTimer *timer, ceph::mutex *timer_lock,
librados::IoCtx &ioctx, const std::string &oid,
const std::string &client_id, const Settings &settings);
~JournalMetadata() override;
typedef std::map<uint64_t, uint64_t> AllocatedEntryTids;
typedef std::list<JournalMetadataListener*> Listeners;
typedef std::list<Context*> Contexts;
struct CommitEntry {
uint64_t object_num;
uint64_t tag_tid;
uint64_t entry_tid;
bool committed;
CommitEntry() : object_num(0), tag_tid(0), entry_tid(0), committed(false) {
}
CommitEntry(uint64_t _object_num, uint64_t _tag_tid, uint64_t _entry_tid)
: object_num(_object_num), tag_tid(_tag_tid), entry_tid(_entry_tid),
committed(false) {
}
};
typedef std::map<uint64_t, CommitEntry> CommitTids;
struct C_WatchCtx : public librados::WatchCtx2 {
JournalMetadata *journal_metadata;
C_WatchCtx(JournalMetadata *_journal_metadata)
: journal_metadata(_journal_metadata) {}
void handle_notify(uint64_t notify_id, uint64_t cookie,
uint64_t notifier_id, bufferlist& bl) override {
journal_metadata->handle_watch_notify(notify_id, cookie);
}
void handle_error(uint64_t cookie, int err) override {
journal_metadata->handle_watch_error(err);
}
};
struct C_WatchReset : public Context {
JournalMetadata *journal_metadata;
C_WatchReset(JournalMetadata *_journal_metadata)
: journal_metadata(_journal_metadata) {
journal_metadata->m_async_op_tracker.start_op();
}
~C_WatchReset() override {
journal_metadata->m_async_op_tracker.finish_op();
}
void finish(int r) override {
journal_metadata->handle_watch_reset();
}
};
struct C_CommitPositionTask : public Context {
JournalMetadata *journal_metadata;
C_CommitPositionTask(JournalMetadata *_journal_metadata)
: journal_metadata(_journal_metadata) {
journal_metadata->m_async_op_tracker.start_op();
}
~C_CommitPositionTask() override {
journal_metadata->m_async_op_tracker.finish_op();
}
void finish(int r) override {
std::lock_guard locker{journal_metadata->m_lock};
journal_metadata->handle_commit_position_task();
};
};
struct C_AioNotify : public Context {
JournalMetadata* journal_metadata;
Context *on_safe;
C_AioNotify(JournalMetadata *_journal_metadata, Context *_on_safe)
: journal_metadata(_journal_metadata), on_safe(_on_safe) {
journal_metadata->m_async_op_tracker.start_op();
}
~C_AioNotify() override {
journal_metadata->m_async_op_tracker.finish_op();
}
void finish(int r) override {
journal_metadata->handle_notified(r);
if (on_safe != nullptr) {
on_safe->complete(0);
}
}
};
struct C_NotifyUpdate : public Context {
JournalMetadata* journal_metadata;
Context *on_safe;
C_NotifyUpdate(JournalMetadata *_journal_metadata, Context *_on_safe = NULL)
: journal_metadata(_journal_metadata), on_safe(_on_safe) {
journal_metadata->m_async_op_tracker.start_op();
}
~C_NotifyUpdate() override {
journal_metadata->m_async_op_tracker.finish_op();
}
void finish(int r) override {
if (r == 0) {
journal_metadata->async_notify_update(on_safe);
return;
}
if (on_safe != NULL) {
on_safe->complete(r);
}
}
};
struct C_ImmutableMetadata : public Context {
JournalMetadata* journal_metadata;
Context *on_finish;
C_ImmutableMetadata(JournalMetadata *_journal_metadata, Context *_on_finish)
: journal_metadata(_journal_metadata), on_finish(_on_finish) {
std::lock_guard locker{journal_metadata->m_lock};
journal_metadata->m_async_op_tracker.start_op();
}
~C_ImmutableMetadata() override {
journal_metadata->m_async_op_tracker.finish_op();
}
void finish(int r) override {
journal_metadata->handle_immutable_metadata(r, on_finish);
}
};
struct C_Refresh : public Context {
JournalMetadata* journal_metadata;
uint64_t minimum_set;
uint64_t active_set;
RegisteredClients registered_clients;
C_Refresh(JournalMetadata *_journal_metadata)
: journal_metadata(_journal_metadata), minimum_set(0), active_set(0) {
std::lock_guard locker{journal_metadata->m_lock};
journal_metadata->m_async_op_tracker.start_op();
}
~C_Refresh() override {
journal_metadata->m_async_op_tracker.finish_op();
}
void finish(int r) override {
journal_metadata->handle_refresh_complete(this, r);
}
};
librados::IoCtx m_ioctx;
CephContext *m_cct = nullptr;
std::string m_oid;
std::string m_client_id;
Settings m_settings;
uint8_t m_order = 0;
uint8_t m_splay_width = 0;
int64_t m_pool_id = -1;
bool m_initialized = false;
ContextWQ *m_work_queue;
SafeTimer *m_timer;
ceph::mutex *m_timer_lock;
mutable ceph::mutex m_lock = ceph::make_mutex("JournalMetadata::m_lock");
uint64_t m_commit_tid = 0;
CommitTids m_pending_commit_tids;
Listeners m_listeners;
C_WatchCtx m_watch_ctx;
uint64_t m_watch_handle = 0;
uint64_t m_minimum_set = 0;
uint64_t m_active_set = 0;
RegisteredClients m_registered_clients;
Client m_client;
AllocatedEntryTids m_allocated_entry_tids;
size_t m_update_notifications = 0;
ceph::condition_variable m_update_cond;
size_t m_ignore_watch_notifies = 0;
size_t m_refreshes_in_progress = 0;
Contexts m_refresh_ctxs;
uint64_t m_commit_position_tid = 0;
ObjectSetPosition m_commit_position;
Context *m_commit_position_ctx = nullptr;
Context *m_commit_position_task_ctx = nullptr;
size_t m_flush_commits_in_progress = 0;
Contexts m_flush_commit_position_ctxs;
AsyncOpTracker m_async_op_tracker;
void handle_immutable_metadata(int r, Context *on_init);
void refresh(Context *on_finish);
void handle_refresh_complete(C_Refresh *refresh, int r);
void cancel_commit_task();
void schedule_commit_task();
void handle_commit_position_task();
void schedule_watch_reset();
void handle_watch_reset();
void handle_watch_notify(uint64_t notify_id, uint64_t cookie);
void handle_watch_error(int err);
void handle_notified(int r);
void schedule_laggy_clients_disconnect(Context *on_finish);
friend std::ostream &operator<<(std::ostream &os,
const JournalMetadata &journal_metadata);
};
std::ostream &operator<<(std::ostream &os,
const JournalMetadata::RegisteredClients &clients);
std::ostream &operator<<(std::ostream &os,
const JournalMetadata &journal_metadata);
} // namespace journal
#endif // CEPH_JOURNAL_JOURNAL_METADATA_H
| 11,486 | 29.550532 | 83 | h |
null | ceph-main/src/journal/JournalMetadataListener.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2016 SUSE LINUX GmbH
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_JOURNAL_JOURNAL_METADATA_LISTENER_H
#define CEPH_JOURNAL_JOURNAL_METADATA_LISTENER_H
namespace journal {
class JournalMetadata;
struct JournalMetadataListener {
virtual ~JournalMetadataListener() {};
virtual void handle_update(JournalMetadata *) = 0;
};
} // namespace journal
#endif // CEPH_JOURNAL_JOURNAL_METADATA_LISTENER_H
| 758 | 23.483871 | 70 | h |
null | ceph-main/src/journal/JournalPlayer.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "common/PriorityCache.h"
#include "include/stringify.h"
#include "journal/JournalPlayer.h"
#include "journal/Entry.h"
#include "journal/ReplayHandler.h"
#include "journal/Types.h"
#include "journal/Utils.h"
#define dout_subsys ceph_subsys_journaler
#undef dout_prefix
#define dout_prefix *_dout << "JournalPlayer: " << this << " "
namespace journal {
namespace {
static const uint64_t MIN_FETCH_BYTES = 32768;
struct C_HandleComplete : public Context {
ReplayHandler* replay_handler;
explicit C_HandleComplete(ReplayHandler* r) : replay_handler(std::move(r)) {}
~C_HandleComplete() override {}
void finish(int r) override {
replay_handler->handle_complete(r);
}
};
struct C_HandleEntriesAvailable : public Context {
ReplayHandler* replay_handler;
explicit C_HandleEntriesAvailable(ReplayHandler* r) : replay_handler(std::move(r)) {}
~C_HandleEntriesAvailable() override {}
void finish(int r) override {
replay_handler->handle_entries_available();
}
};
} // anonymous namespace
JournalPlayer::JournalPlayer(librados::IoCtx &ioctx,
std::string_view object_oid_prefix,
ceph::ref_t<JournalMetadata> journal_metadata,
ReplayHandler* replay_handler,
CacheManagerHandler *cache_manager_handler)
: m_object_oid_prefix(object_oid_prefix),
m_journal_metadata(std::move(journal_metadata)),
m_replay_handler(std::move(replay_handler)),
m_cache_manager_handler(cache_manager_handler),
m_cache_rebalance_handler(this)
{
m_ioctx.dup(ioctx);
m_cct = reinterpret_cast<CephContext *>(m_ioctx.cct());
ObjectSetPosition commit_position;
m_journal_metadata->get_commit_position(&commit_position);
if (!commit_position.object_positions.empty()) {
ldout(m_cct, 5) << "commit position: " << commit_position << dendl;
// start replay after the last committed entry's object
uint8_t splay_width = m_journal_metadata->get_splay_width();
auto &active_position = commit_position.object_positions.front();
m_active_tag_tid = active_position.tag_tid;
m_commit_position_valid = true;
m_commit_position = active_position;
m_splay_offset = active_position.object_number % splay_width;
for (auto &position : commit_position.object_positions) {
uint8_t splay_offset = position.object_number % splay_width;
m_commit_positions[splay_offset] = position;
}
}
if (m_cache_manager_handler != nullptr) {
m_cache_name = "JournalPlayer/" + stringify(m_ioctx.get_id()) + "/" +
m_object_oid_prefix;
auto order = m_journal_metadata->get_order();
auto splay_width = m_journal_metadata->get_splay_width();
uint64_t min_size = MIN_FETCH_BYTES * splay_width;
uint64_t max_size = (2 << order) * splay_width;
m_cache_manager_handler->register_cache(m_cache_name, min_size, max_size,
&m_cache_rebalance_handler);
m_max_fetch_bytes = 0;
} else {
m_max_fetch_bytes = 2 << m_journal_metadata->get_order();
}
}
JournalPlayer::~JournalPlayer() {
ceph_assert(m_async_op_tracker.empty());
{
std::lock_guard locker{m_lock};
ceph_assert(m_shut_down);
ceph_assert(m_fetch_object_numbers.empty());
ceph_assert(!m_watch_scheduled);
}
if (m_cache_manager_handler != nullptr) {
m_cache_manager_handler->unregister_cache(m_cache_name);
}
}
void JournalPlayer::prefetch() {
std::lock_guard locker{m_lock};
ceph_assert(m_state == STATE_INIT);
if (m_shut_down) {
return;
}
if (m_cache_manager_handler != nullptr && m_max_fetch_bytes == 0) {
m_state = STATE_WAITCACHE;
return;
}
m_state = STATE_PREFETCH;
m_active_set = m_journal_metadata->get_active_set();
uint8_t splay_width = m_journal_metadata->get_splay_width();
for (uint8_t splay_offset = 0; splay_offset < splay_width; ++splay_offset) {
m_prefetch_splay_offsets.insert(splay_offset);
}
// compute active object for each splay offset (might be before
// active set)
std::map<uint8_t, uint64_t> splay_offset_to_objects;
for (auto &position : m_commit_positions) {
ceph_assert(splay_offset_to_objects.count(position.first) == 0);
splay_offset_to_objects[position.first] = position.second.object_number;
}
// prefetch the active object for each splay offset
std::set<uint64_t> prefetch_object_numbers;
for (uint8_t splay_offset = 0; splay_offset < splay_width; ++splay_offset) {
uint64_t object_number = splay_offset;
if (splay_offset_to_objects.count(splay_offset) != 0) {
object_number = splay_offset_to_objects[splay_offset];
}
prefetch_object_numbers.insert(object_number);
}
ldout(m_cct, 10) << __func__ << ": prefetching "
<< prefetch_object_numbers.size() << " " << "objects"
<< dendl;
for (auto object_number : prefetch_object_numbers) {
fetch(object_number);
}
}
void JournalPlayer::prefetch_and_watch(double interval) {
{
std::lock_guard locker{m_lock};
m_watch_enabled = true;
m_watch_interval = interval;
m_watch_step = WATCH_STEP_FETCH_CURRENT;
}
prefetch();
}
void JournalPlayer::shut_down(Context *on_finish) {
ldout(m_cct, 20) << __func__ << dendl;
std::lock_guard locker{m_lock};
ceph_assert(!m_shut_down);
m_shut_down = true;
m_watch_enabled = false;
on_finish = utils::create_async_context_callback(
m_journal_metadata, on_finish);
if (m_watch_scheduled) {
auto object_player = get_object_player();
switch (m_watch_step) {
case WATCH_STEP_FETCH_FIRST:
object_player = m_object_players.begin()->second;
// fallthrough
case WATCH_STEP_FETCH_CURRENT:
object_player->unwatch();
break;
case WATCH_STEP_ASSERT_ACTIVE:
break;
}
}
m_async_op_tracker.wait_for_ops(on_finish);
}
bool JournalPlayer::try_pop_front(Entry *entry, uint64_t *commit_tid) {
ldout(m_cct, 20) << __func__ << dendl;
std::lock_guard locker{m_lock};
if (m_state != STATE_PLAYBACK) {
m_handler_notified = false;
return false;
}
if (!verify_playback_ready()) {
if (!is_object_set_ready()) {
m_handler_notified = false;
} else {
refetch(true);
}
return false;
}
auto object_player = get_object_player();
ceph_assert(object_player && !object_player->empty());
object_player->front(entry);
object_player->pop_front();
uint64_t last_entry_tid;
if (m_journal_metadata->get_last_allocated_entry_tid(
entry->get_tag_tid(), &last_entry_tid) &&
entry->get_entry_tid() != last_entry_tid + 1) {
lderr(m_cct) << "missing prior journal entry: " << *entry << dendl;
m_state = STATE_ERROR;
notify_complete(-ENOMSG);
return false;
}
advance_splay_object();
remove_empty_object_player(object_player);
m_journal_metadata->reserve_entry_tid(entry->get_tag_tid(),
entry->get_entry_tid());
*commit_tid = m_journal_metadata->allocate_commit_tid(
object_player->get_object_number(), entry->get_tag_tid(),
entry->get_entry_tid());
return true;
}
void JournalPlayer::process_state(uint64_t object_number, int r) {
ldout(m_cct, 10) << __func__ << ": object_num=" << object_number << ", "
<< "r=" << r << dendl;
ceph_assert(ceph_mutex_is_locked(m_lock));
if (r >= 0) {
switch (m_state) {
case STATE_PREFETCH:
ldout(m_cct, 10) << "PREFETCH" << dendl;
r = process_prefetch(object_number);
break;
case STATE_PLAYBACK:
ldout(m_cct, 10) << "PLAYBACK" << dendl;
r = process_playback(object_number);
break;
case STATE_ERROR:
ldout(m_cct, 10) << "ERROR" << dendl;
break;
default:
lderr(m_cct) << "UNEXPECTED STATE (" << m_state << ")" << dendl;
ceph_abort();
break;
}
}
if (r < 0) {
m_state = STATE_ERROR;
notify_complete(r);
}
}
int JournalPlayer::process_prefetch(uint64_t object_number) {
ldout(m_cct, 10) << __func__ << ": object_num=" << object_number << dendl;
ceph_assert(ceph_mutex_is_locked(m_lock));
uint8_t splay_width = m_journal_metadata->get_splay_width();
uint8_t splay_offset = object_number % splay_width;
PrefetchSplayOffsets::iterator it = m_prefetch_splay_offsets.find(
splay_offset);
if (it == m_prefetch_splay_offsets.end()) {
return 0;
}
bool prefetch_complete = false;
ceph_assert(m_object_players.count(splay_offset) == 1);
auto object_player = m_object_players[splay_offset];
// prefetch in-order since a newer splay object could prefetch first
if (m_fetch_object_numbers.count(object_player->get_object_number()) == 0) {
// skip past known committed records
if (m_commit_positions.count(splay_offset) != 0 &&
!object_player->empty()) {
ObjectPosition &position = m_commit_positions[splay_offset];
ldout(m_cct, 15) << "seeking known commit position " << position << " in "
<< object_player->get_oid() << dendl;
bool found_commit = false;
Entry entry;
while (!object_player->empty()) {
object_player->front(&entry);
if (entry.get_tag_tid() == position.tag_tid &&
entry.get_entry_tid() == position.entry_tid) {
found_commit = true;
} else if (found_commit) {
ldout(m_cct, 10) << "located next uncommitted entry: " << entry
<< dendl;
break;
}
ldout(m_cct, 20) << "skipping committed entry: " << entry << dendl;
m_journal_metadata->reserve_entry_tid(entry.get_tag_tid(),
entry.get_entry_tid());
object_player->pop_front();
}
// do not search for commit position for this object
// if we've already seen it
if (found_commit) {
m_commit_positions.erase(splay_offset);
}
}
// if the object is empty, pre-fetch the next splay object
if (object_player->empty() && object_player->refetch_required()) {
ldout(m_cct, 10) << "refetching potentially partially decoded object"
<< dendl;
object_player->set_refetch_state(ObjectPlayer::REFETCH_STATE_NONE);
fetch(object_player);
} else if (!remove_empty_object_player(object_player)) {
ldout(m_cct, 10) << "prefetch of object complete" << dendl;
prefetch_complete = true;
}
}
if (!prefetch_complete) {
return 0;
}
m_prefetch_splay_offsets.erase(it);
if (!m_prefetch_splay_offsets.empty()) {
return 0;
}
ldout(m_cct, 10) << "switching to playback mode" << dendl;
m_state = STATE_PLAYBACK;
// if we have a valid commit position, our read should start with
// the next consistent journal entry in the sequence
if (m_commit_position_valid) {
splay_offset = m_commit_position.object_number % splay_width;
object_player = m_object_players[splay_offset];
if (object_player->empty()) {
if (!object_player->refetch_required()) {
advance_splay_object();
}
} else {
Entry entry;
object_player->front(&entry);
if (entry.get_tag_tid() == m_commit_position.tag_tid) {
advance_splay_object();
}
}
}
if (verify_playback_ready()) {
notify_entries_available();
} else if (is_object_set_ready()) {
refetch(false);
}
return 0;
}
int JournalPlayer::process_playback(uint64_t object_number) {
ldout(m_cct, 10) << __func__ << ": object_num=" << object_number << dendl;
ceph_assert(ceph_mutex_is_locked(m_lock));
if (verify_playback_ready()) {
notify_entries_available();
} else if (is_object_set_ready()) {
refetch(false);
}
return 0;
}
bool JournalPlayer::is_object_set_ready() const {
ceph_assert(ceph_mutex_is_locked(m_lock));
if (m_watch_scheduled || !m_fetch_object_numbers.empty()) {
ldout(m_cct, 20) << __func__ << ": waiting for in-flight fetch" << dendl;
return false;
}
return true;
}
bool JournalPlayer::verify_playback_ready() {
ceph_assert(ceph_mutex_is_locked(m_lock));
while (true) {
if (!is_object_set_ready()) {
ldout(m_cct, 10) << __func__ << ": waiting for full object set" << dendl;
return false;
}
auto object_player = get_object_player();
ceph_assert(object_player);
uint64_t object_num = object_player->get_object_number();
// Verify is the active object player has another entry available
// in the sequence
// NOTE: replay currently does not check tag class to playback multiple tags
// from different classes (issue #14909). When a new tag is discovered, it
// is assumed that the previous tag was closed at the last replayable entry.
Entry entry;
if (!object_player->empty()) {
m_watch_prune_active_tag = false;
object_player->front(&entry);
if (!m_active_tag_tid) {
ldout(m_cct, 10) << __func__ << ": "
<< "object_num=" << object_num << ", "
<< "initial tag=" << entry.get_tag_tid()
<< dendl;
m_active_tag_tid = entry.get_tag_tid();
return true;
} else if (entry.get_tag_tid() < *m_active_tag_tid ||
(m_prune_tag_tid && entry.get_tag_tid() <= *m_prune_tag_tid)) {
// entry occurred before the current active tag
ldout(m_cct, 10) << __func__ << ": detected stale entry: "
<< "object_num=" << object_num << ", "
<< "entry=" << entry << dendl;
prune_tag(entry.get_tag_tid());
continue;
} else if (entry.get_tag_tid() > *m_active_tag_tid) {
// new tag at current playback position -- implies that previous
// tag ended abruptly without flushing out all records
// search for the start record for the next tag
ldout(m_cct, 10) << __func__ << ": new tag detected: "
<< "object_num=" << object_num << ", "
<< "active_tag=" << *m_active_tag_tid << ", "
<< "new_tag=" << entry.get_tag_tid() << dendl;
if (entry.get_entry_tid() == 0) {
// first entry in new tag -- can promote to active
prune_active_tag(entry.get_tag_tid());
return true;
} else {
// prune current active and wait for initial entry for new tag
prune_active_tag(boost::none);
continue;
}
} else {
ldout(m_cct, 20) << __func__ << ": "
<< "object_num=" << object_num << ", "
<< "entry: " << entry << dendl;
ceph_assert(entry.get_tag_tid() == *m_active_tag_tid);
return true;
}
} else {
if (!m_active_tag_tid) {
// waiting for our first entry
ldout(m_cct, 10) << __func__ << ": waiting for first entry: "
<< "object_num=" << object_num << dendl;
return false;
} else if (m_prune_tag_tid && *m_prune_tag_tid == *m_active_tag_tid) {
ldout(m_cct, 10) << __func__ << ": no more entries" << dendl;
return false;
} else if (m_watch_enabled && m_watch_prune_active_tag) {
// detected current tag is now longer active and we have re-read the
// current object but it's still empty, so this tag is done
ldout(m_cct, 10) << __func__ << ": assuming no more in-sequence entries: "
<< "object_num=" << object_num << ", "
<< "active_tag " << *m_active_tag_tid << dendl;
prune_active_tag(boost::none);
continue;
} else if (object_player->refetch_required()) {
// if the active object requires a refetch, don't proceed looking for a
// new tag before this process completes
ldout(m_cct, 10) << __func__ << ": refetch required: "
<< "object_num=" << object_num << dendl;
return false;
} else if (!m_watch_enabled) {
// current playback position is empty so this tag is done
ldout(m_cct, 10) << __func__ << ": no more in-sequence entries: "
<< "object_num=" << object_num << ", "
<< "active_tag=" << *m_active_tag_tid << dendl;
prune_active_tag(boost::none);
continue;
} else if (!m_watch_scheduled) {
// no more entries and we don't have an active watch in-progress
ldout(m_cct, 10) << __func__ << ": no more entries -- watch required"
<< dendl;
return false;
}
}
}
return false;
}
void JournalPlayer::prune_tag(uint64_t tag_tid) {
ceph_assert(ceph_mutex_is_locked(m_lock));
ldout(m_cct, 10) << __func__ << ": pruning remaining entries for tag "
<< tag_tid << dendl;
// prune records that are at or below the largest prune tag tid
if (!m_prune_tag_tid || *m_prune_tag_tid < tag_tid) {
m_prune_tag_tid = tag_tid;
}
bool pruned = false;
for (const auto &player_pair : m_object_players) {
auto& object_player = player_pair.second;
ldout(m_cct, 15) << __func__ << ": checking " << object_player->get_oid()
<< dendl;
while (!object_player->empty()) {
Entry entry;
object_player->front(&entry);
if (entry.get_tag_tid() == tag_tid) {
ldout(m_cct, 20) << __func__ << ": pruned " << entry << dendl;
object_player->pop_front();
pruned = true;
} else {
break;
}
}
}
// avoid watch delay when pruning stale tags from journal objects
if (pruned) {
ldout(m_cct, 15) << __func__ << ": resetting refetch state to immediate"
<< dendl;
for (const auto &player_pair : m_object_players) {
auto& object_player = player_pair.second;
object_player->set_refetch_state(ObjectPlayer::REFETCH_STATE_IMMEDIATE);
}
}
// trim empty player to prefetch the next available object
for (const auto &player_pair : m_object_players) {
remove_empty_object_player(player_pair.second);
}
}
void JournalPlayer::prune_active_tag(const boost::optional<uint64_t>& tag_tid) {
ceph_assert(ceph_mutex_is_locked(m_lock));
ceph_assert(m_active_tag_tid);
uint64_t active_tag_tid = *m_active_tag_tid;
if (tag_tid) {
m_active_tag_tid = tag_tid;
}
m_splay_offset = 0;
m_watch_step = WATCH_STEP_FETCH_CURRENT;
prune_tag(active_tag_tid);
}
ceph::ref_t<ObjectPlayer> JournalPlayer::get_object_player() const {
ceph_assert(ceph_mutex_is_locked(m_lock));
SplayedObjectPlayers::const_iterator it = m_object_players.find(
m_splay_offset);
ceph_assert(it != m_object_players.end());
return it->second;
}
ceph::ref_t<ObjectPlayer> JournalPlayer::get_object_player(uint64_t object_number) const {
ceph_assert(ceph_mutex_is_locked(m_lock));
uint8_t splay_width = m_journal_metadata->get_splay_width();
uint8_t splay_offset = object_number % splay_width;
auto splay_it = m_object_players.find(splay_offset);
ceph_assert(splay_it != m_object_players.end());
auto object_player = splay_it->second;
ceph_assert(object_player->get_object_number() == object_number);
return object_player;
}
void JournalPlayer::advance_splay_object() {
ceph_assert(ceph_mutex_is_locked(m_lock));
++m_splay_offset;
m_splay_offset %= m_journal_metadata->get_splay_width();
m_watch_step = WATCH_STEP_FETCH_CURRENT;
ldout(m_cct, 20) << __func__ << ": new offset "
<< static_cast<uint32_t>(m_splay_offset) << dendl;
}
bool JournalPlayer::remove_empty_object_player(const ceph::ref_t<ObjectPlayer> &player) {
ceph_assert(ceph_mutex_is_locked(m_lock));
ceph_assert(!m_watch_scheduled);
uint8_t splay_width = m_journal_metadata->get_splay_width();
uint64_t object_set = player->get_object_number() / splay_width;
uint64_t active_set = m_journal_metadata->get_active_set();
if (!player->empty() || object_set == active_set) {
return false;
} else if (player->refetch_required()) {
ldout(m_cct, 20) << __func__ << ": " << player->get_oid() << " requires "
<< "a refetch" << dendl;
return false;
} else if (m_active_set != active_set) {
ldout(m_cct, 20) << __func__ << ": new active set detected, all players "
<< "require refetch" << dendl;
m_active_set = active_set;
for (const auto& pair : m_object_players) {
pair.second->set_refetch_state(ObjectPlayer::REFETCH_STATE_IMMEDIATE);
}
return false;
}
ldout(m_cct, 15) << __func__ << ": " << player->get_oid() << " empty"
<< dendl;
m_watch_prune_active_tag = false;
m_watch_step = WATCH_STEP_FETCH_CURRENT;
uint64_t next_object_num = player->get_object_number() + splay_width;
fetch(next_object_num);
return true;
}
void JournalPlayer::fetch(uint64_t object_num) {
ceph_assert(ceph_mutex_is_locked(m_lock));
auto object_player = ceph::make_ref<ObjectPlayer>(
m_ioctx, m_object_oid_prefix, object_num, m_journal_metadata->get_timer(),
m_journal_metadata->get_timer_lock(), m_journal_metadata->get_order(),
m_max_fetch_bytes);
auto splay_width = m_journal_metadata->get_splay_width();
m_object_players[object_num % splay_width] = object_player;
fetch(object_player);
}
void JournalPlayer::fetch(const ceph::ref_t<ObjectPlayer> &object_player) {
ceph_assert(ceph_mutex_is_locked(m_lock));
uint64_t object_num = object_player->get_object_number();
std::string oid = utils::get_object_name(m_object_oid_prefix, object_num);
ceph_assert(m_fetch_object_numbers.count(object_num) == 0);
m_fetch_object_numbers.insert(object_num);
ldout(m_cct, 10) << __func__ << ": " << oid << dendl;
C_Fetch *fetch_ctx = new C_Fetch(this, object_num);
object_player->fetch(fetch_ctx);
}
void JournalPlayer::handle_fetched(uint64_t object_num, int r) {
ldout(m_cct, 10) << __func__ << ": "
<< utils::get_object_name(m_object_oid_prefix, object_num)
<< ": r=" << r << dendl;
std::lock_guard locker{m_lock};
ceph_assert(m_fetch_object_numbers.count(object_num) == 1);
m_fetch_object_numbers.erase(object_num);
if (m_shut_down) {
return;
}
if (r == 0) {
auto object_player = get_object_player(object_num);
remove_empty_object_player(object_player);
}
process_state(object_num, r);
}
void JournalPlayer::refetch(bool immediate) {
ldout(m_cct, 10) << __func__ << dendl;
ceph_assert(ceph_mutex_is_locked(m_lock));
m_handler_notified = false;
// if watching the object, handle the periodic re-fetch
if (m_watch_enabled) {
schedule_watch(immediate);
return;
}
auto object_player = get_object_player();
if (object_player->refetch_required()) {
object_player->set_refetch_state(ObjectPlayer::REFETCH_STATE_NONE);
fetch(object_player);
return;
}
notify_complete(0);
}
void JournalPlayer::schedule_watch(bool immediate) {
ldout(m_cct, 10) << __func__ << dendl;
ceph_assert(ceph_mutex_is_locked(m_lock));
if (m_watch_scheduled) {
return;
}
m_watch_scheduled = true;
if (m_watch_step == WATCH_STEP_ASSERT_ACTIVE) {
// detect if a new tag has been created in case we are blocked
// by an incomplete tag sequence
ldout(m_cct, 20) << __func__ << ": asserting active tag="
<< *m_active_tag_tid << dendl;
m_async_op_tracker.start_op();
auto ctx = new LambdaContext([this](int r) {
handle_watch_assert_active(r);
});
m_journal_metadata->assert_active_tag(*m_active_tag_tid, ctx);
return;
}
ceph::ref_t<ObjectPlayer> object_player;
double watch_interval = m_watch_interval;
switch (m_watch_step) {
case WATCH_STEP_FETCH_CURRENT:
{
object_player = get_object_player();
uint8_t splay_width = m_journal_metadata->get_splay_width();
uint64_t active_set = m_journal_metadata->get_active_set();
uint64_t object_set = object_player->get_object_number() / splay_width;
if (immediate ||
(object_player->get_refetch_state() ==
ObjectPlayer::REFETCH_STATE_IMMEDIATE) ||
(object_set < active_set && object_player->refetch_required())) {
ldout(m_cct, 20) << __func__ << ": immediately refetching "
<< object_player->get_oid()
<< dendl;
object_player->set_refetch_state(ObjectPlayer::REFETCH_STATE_NONE);
watch_interval = 0;
}
}
break;
case WATCH_STEP_FETCH_FIRST:
object_player = m_object_players.begin()->second;
watch_interval = 0;
break;
default:
ceph_abort();
}
ldout(m_cct, 20) << __func__ << ": scheduling watch on "
<< object_player->get_oid() << dendl;
Context *ctx = utils::create_async_context_callback(
m_journal_metadata, new C_Watch(this, object_player->get_object_number()));
object_player->watch(ctx, watch_interval);
}
void JournalPlayer::handle_watch(uint64_t object_num, int r) {
ldout(m_cct, 10) << __func__ << ": r=" << r << dendl;
std::lock_guard locker{m_lock};
ceph_assert(m_watch_scheduled);
m_watch_scheduled = false;
if (m_shut_down || r == -ECANCELED) {
// unwatch of object player(s)
return;
}
auto object_player = get_object_player(object_num);
if (r == 0 && object_player->empty()) {
// possibly need to prune this empty object player if we've
// already fetched it after the active set was advanced with no
// new records
remove_empty_object_player(object_player);
}
// determine what object to query on next watch schedule tick
uint8_t splay_width = m_journal_metadata->get_splay_width();
if (m_watch_step == WATCH_STEP_FETCH_CURRENT &&
object_player->get_object_number() % splay_width != 0) {
m_watch_step = WATCH_STEP_FETCH_FIRST;
} else if (m_active_tag_tid) {
m_watch_step = WATCH_STEP_ASSERT_ACTIVE;
} else {
m_watch_step = WATCH_STEP_FETCH_CURRENT;
}
process_state(object_num, r);
}
void JournalPlayer::handle_watch_assert_active(int r) {
ldout(m_cct, 10) << __func__ << ": r=" << r << dendl;
std::lock_guard locker{m_lock};
ceph_assert(m_watch_scheduled);
m_watch_scheduled = false;
if (r == -ESTALE) {
// newer tag exists -- since we are at this step in the watch sequence,
// we know we can prune the active tag if watch fails again
ldout(m_cct, 10) << __func__ << ": tag " << *m_active_tag_tid << " "
<< "no longer active" << dendl;
m_watch_prune_active_tag = true;
}
m_watch_step = WATCH_STEP_FETCH_CURRENT;
if (!m_shut_down && m_watch_enabled) {
schedule_watch(false);
}
m_async_op_tracker.finish_op();
}
void JournalPlayer::notify_entries_available() {
ceph_assert(ceph_mutex_is_locked(m_lock));
if (m_handler_notified) {
return;
}
m_handler_notified = true;
ldout(m_cct, 10) << __func__ << ": entries available" << dendl;
m_journal_metadata->queue(new C_HandleEntriesAvailable(m_replay_handler), 0);
}
void JournalPlayer::notify_complete(int r) {
ceph_assert(ceph_mutex_is_locked(m_lock));
m_handler_notified = true;
ldout(m_cct, 10) << __func__ << ": replay complete: r=" << r << dendl;
m_journal_metadata->queue(new C_HandleComplete(m_replay_handler), r);
}
void JournalPlayer::handle_cache_rebalanced(uint64_t new_cache_bytes) {
std::lock_guard locker{m_lock};
if (m_state == STATE_ERROR || m_shut_down) {
return;
}
auto splay_width = m_journal_metadata->get_splay_width();
m_max_fetch_bytes = p2align<uint64_t>(new_cache_bytes / splay_width, 4096);
ldout(m_cct, 10) << __func__ << ": new_cache_bytes=" << new_cache_bytes
<< ", max_fetch_bytes=" << m_max_fetch_bytes << dendl;
uint64_t min_bytes = MIN_FETCH_BYTES;
if (m_state == STATE_WAITCACHE) {
m_state = STATE_INIT;
if (m_max_fetch_bytes >= min_bytes) {
m_async_op_tracker.start_op();
auto ctx = new LambdaContext(
[this](int r) {
prefetch();
m_async_op_tracker.finish_op();
});
m_journal_metadata->queue(ctx, 0);
return;
}
} else {
min_bytes = p2align<uint64_t>(min_bytes - (rand() % min_bytes) / 2, 4096);
}
if (m_max_fetch_bytes < min_bytes) {
lderr(m_cct) << __func__ << ": can't allocate enough memory from cache"
<< dendl;
m_state = STATE_ERROR;
notify_complete(-ENOMEM);
return;
}
for (auto &pair : m_object_players) {
pair.second->set_max_fetch_bytes(m_max_fetch_bytes);
}
}
} // namespace journal
| 28,705 | 31.919725 | 90 | cc |
null | ceph-main/src/journal/JournalPlayer.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_JOURNAL_JOURNAL_PLAYER_H
#define CEPH_JOURNAL_JOURNAL_PLAYER_H
#include "include/int_types.h"
#include "include/Context.h"
#include "include/rados/librados.hpp"
#include "common/AsyncOpTracker.h"
#include "common/Timer.h"
#include "journal/JournalMetadata.h"
#include "journal/ObjectPlayer.h"
#include "journal/Types.h"
#include "cls/journal/cls_journal_types.h"
#include <boost/none.hpp>
#include <boost/optional.hpp>
#include <map>
namespace journal {
class CacheManagerHandler;
class Entry;
class ReplayHandler;
class JournalPlayer {
public:
typedef cls::journal::ObjectPosition ObjectPosition;
typedef cls::journal::ObjectPositions ObjectPositions;
typedef cls::journal::ObjectSetPosition ObjectSetPosition;
JournalPlayer(librados::IoCtx &ioctx, std::string_view object_oid_prefix,
ceph::ref_t<JournalMetadata> journal_metadata,
ReplayHandler* replay_handler,
CacheManagerHandler *cache_manager_handler);
~JournalPlayer();
void prefetch();
void prefetch_and_watch(double interval);
void shut_down(Context *on_finish);
bool try_pop_front(Entry *entry, uint64_t *commit_tid);
private:
typedef std::set<uint8_t> PrefetchSplayOffsets;
typedef std::map<uint8_t, ceph::ref_t<ObjectPlayer>> SplayedObjectPlayers;
typedef std::map<uint8_t, ObjectPosition> SplayedObjectPositions;
typedef std::set<uint64_t> ObjectNumbers;
enum State {
STATE_INIT,
STATE_WAITCACHE,
STATE_PREFETCH,
STATE_PLAYBACK,
STATE_ERROR
};
enum WatchStep {
WATCH_STEP_FETCH_CURRENT,
WATCH_STEP_FETCH_FIRST,
WATCH_STEP_ASSERT_ACTIVE
};
struct C_Fetch : public Context {
JournalPlayer *player;
uint64_t object_num;
C_Fetch(JournalPlayer *p, uint64_t o) : player(p), object_num(o) {
player->m_async_op_tracker.start_op();
}
~C_Fetch() override {
player->m_async_op_tracker.finish_op();
}
void finish(int r) override {
player->handle_fetched(object_num, r);
}
};
struct C_Watch : public Context {
JournalPlayer *player;
uint64_t object_num;
C_Watch(JournalPlayer *player, uint64_t object_num)
: player(player), object_num(object_num) {
player->m_async_op_tracker.start_op();
}
~C_Watch() override {
player->m_async_op_tracker.finish_op();
}
void finish(int r) override {
player->handle_watch(object_num, r);
}
};
struct CacheRebalanceHandler : public journal::CacheRebalanceHandler {
JournalPlayer *player;
CacheRebalanceHandler(JournalPlayer *player) : player(player) {
}
void handle_cache_rebalanced(uint64_t new_cache_bytes) override {
player->handle_cache_rebalanced(new_cache_bytes);
}
};
librados::IoCtx m_ioctx;
CephContext *m_cct = nullptr;
std::string m_object_oid_prefix;
ceph::ref_t<JournalMetadata> m_journal_metadata;
ReplayHandler* m_replay_handler;
CacheManagerHandler *m_cache_manager_handler;
std::string m_cache_name;
CacheRebalanceHandler m_cache_rebalance_handler;
uint64_t m_max_fetch_bytes;
AsyncOpTracker m_async_op_tracker;
mutable ceph::mutex m_lock = ceph::make_mutex("JournalPlayer::m_lock");
State m_state = STATE_INIT;
uint8_t m_splay_offset = 0;
bool m_watch_enabled = false;
bool m_watch_scheduled = false;
double m_watch_interval = 0;
WatchStep m_watch_step = WATCH_STEP_FETCH_CURRENT;
bool m_watch_prune_active_tag = false;
bool m_shut_down = false;
bool m_handler_notified = false;
ObjectNumbers m_fetch_object_numbers;
PrefetchSplayOffsets m_prefetch_splay_offsets;
SplayedObjectPlayers m_object_players;
bool m_commit_position_valid = false;
ObjectPosition m_commit_position;
SplayedObjectPositions m_commit_positions;
uint64_t m_active_set = 0;
boost::optional<uint64_t> m_active_tag_tid = boost::none;
boost::optional<uint64_t> m_prune_tag_tid = boost::none;
void advance_splay_object();
bool is_object_set_ready() const;
bool verify_playback_ready();
void prune_tag(uint64_t tag_tid);
void prune_active_tag(const boost::optional<uint64_t>& tag_tid);
ceph::ref_t<ObjectPlayer> get_object_player() const;
ceph::ref_t<ObjectPlayer> get_object_player(uint64_t object_number) const;
bool remove_empty_object_player(const ceph::ref_t<ObjectPlayer> &object_player);
void process_state(uint64_t object_number, int r);
int process_prefetch(uint64_t object_number);
int process_playback(uint64_t object_number);
void fetch(uint64_t object_num);
void fetch(const ceph::ref_t<ObjectPlayer> &object_player);
void handle_fetched(uint64_t object_num, int r);
void refetch(bool immediate);
void schedule_watch(bool immediate);
void handle_watch(uint64_t object_num, int r);
void handle_watch_assert_active(int r);
void notify_entries_available();
void notify_complete(int r);
void handle_cache_rebalanced(uint64_t new_cache_bytes);
};
} // namespace journal
#endif // CEPH_JOURNAL_JOURNAL_PLAYER_H
| 5,090 | 27.762712 | 82 | h |
null | ceph-main/src/journal/JournalRecorder.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "journal/JournalRecorder.h"
#include "common/errno.h"
#include "journal/Entry.h"
#include "journal/Utils.h"
#include <atomic>
#define dout_subsys ceph_subsys_journaler
#undef dout_prefix
#define dout_prefix *_dout << "JournalRecorder: " << this << " " << __func__ \
<< ": "
using std::shared_ptr;
namespace journal {
namespace {
struct C_Flush : public Context {
ceph::ref_t<JournalMetadata> journal_metadata;
Context *on_finish;
std::atomic<int64_t> pending_flushes{0};
int ret_val = 0;
C_Flush(ceph::ref_t<JournalMetadata> _journal_metadata, Context *_on_finish,
size_t _pending_flushes)
: journal_metadata(std::move(_journal_metadata)),
on_finish(_on_finish),
pending_flushes(_pending_flushes) {
}
void complete(int r) override {
if (r < 0 && ret_val == 0) {
ret_val = r;
}
if (--pending_flushes == 0) {
// ensure all prior callback have been flushed as well
journal_metadata->queue(on_finish, ret_val);
delete this;
}
}
void finish(int r) override {
}
};
} // anonymous namespace
JournalRecorder::JournalRecorder(librados::IoCtx &ioctx,
std::string_view object_oid_prefix,
ceph::ref_t<JournalMetadata> journal_metadata,
uint64_t max_in_flight_appends)
: m_object_oid_prefix(object_oid_prefix),
m_journal_metadata(std::move(journal_metadata)),
m_max_in_flight_appends(max_in_flight_appends),
m_listener(this),
m_object_handler(this),
m_current_set(m_journal_metadata->get_active_set()),
m_object_locks{ceph::make_lock_container<ceph::mutex>(
m_journal_metadata->get_splay_width(), [](const size_t splay_offset) {
return ceph::make_mutex("ObjectRecorder::m_lock::" +
std::to_string(splay_offset));
})}
{
std::lock_guard locker{m_lock};
m_ioctx.dup(ioctx);
m_cct = reinterpret_cast<CephContext*>(m_ioctx.cct());
uint8_t splay_width = m_journal_metadata->get_splay_width();
for (uint8_t splay_offset = 0; splay_offset < splay_width; ++splay_offset) {
uint64_t object_number = splay_offset + (m_current_set * splay_width);
std::lock_guard locker{m_object_locks[splay_offset]};
m_object_ptrs[splay_offset] = create_object_recorder(
object_number, &m_object_locks[splay_offset]);
}
m_journal_metadata->add_listener(&m_listener);
}
JournalRecorder::~JournalRecorder() {
m_journal_metadata->remove_listener(&m_listener);
std::lock_guard locker{m_lock};
ceph_assert(m_in_flight_advance_sets == 0);
ceph_assert(m_in_flight_object_closes == 0);
}
void JournalRecorder::shut_down(Context *on_safe) {
on_safe = new LambdaContext(
[this, on_safe](int r) {
Context *ctx = nullptr;
{
std::lock_guard locker{m_lock};
if (m_in_flight_advance_sets != 0) {
ceph_assert(m_on_object_set_advanced == nullptr);
m_on_object_set_advanced = new LambdaContext(
[on_safe, r](int) {
on_safe->complete(r);
});
} else {
ctx = on_safe;
}
}
if (ctx != nullptr) {
ctx->complete(r);
}
});
flush(on_safe);
}
void JournalRecorder::set_append_batch_options(int flush_interval,
uint64_t flush_bytes,
double flush_age) {
ldout(m_cct, 5) << "flush_interval=" << flush_interval << ", "
<< "flush_bytes=" << flush_bytes << ", "
<< "flush_age=" << flush_age << dendl;
std::lock_guard locker{m_lock};
m_flush_interval = flush_interval;
m_flush_bytes = flush_bytes;
m_flush_age = flush_age;
uint8_t splay_width = m_journal_metadata->get_splay_width();
for (uint8_t splay_offset = 0; splay_offset < splay_width; ++splay_offset) {
std::lock_guard object_locker{m_object_locks[splay_offset]};
auto object_recorder = get_object(splay_offset);
object_recorder->set_append_batch_options(flush_interval, flush_bytes,
flush_age);
}
}
Future JournalRecorder::append(uint64_t tag_tid,
const bufferlist &payload_bl) {
ldout(m_cct, 20) << "tag_tid=" << tag_tid << dendl;
m_lock.lock();
uint64_t entry_tid = m_journal_metadata->allocate_entry_tid(tag_tid);
uint8_t splay_width = m_journal_metadata->get_splay_width();
uint8_t splay_offset = entry_tid % splay_width;
auto object_ptr = get_object(splay_offset);
uint64_t commit_tid = m_journal_metadata->allocate_commit_tid(
object_ptr->get_object_number(), tag_tid, entry_tid);
auto future = ceph::make_ref<FutureImpl>(tag_tid, entry_tid, commit_tid);
future->init(m_prev_future);
m_prev_future = future;
m_object_locks[splay_offset].lock();
m_lock.unlock();
bufferlist entry_bl;
encode(Entry(future->get_tag_tid(), future->get_entry_tid(), payload_bl),
entry_bl);
ceph_assert(entry_bl.length() <= m_journal_metadata->get_object_size());
bool object_full = object_ptr->append({{future, entry_bl}});
m_object_locks[splay_offset].unlock();
if (object_full) {
ldout(m_cct, 10) << "object " << object_ptr->get_oid() << " now full"
<< dendl;
std::lock_guard l{m_lock};
close_and_advance_object_set(object_ptr->get_object_number() / splay_width);
}
return Future(future);
}
void JournalRecorder::flush(Context *on_safe) {
ldout(m_cct, 20) << dendl;
C_Flush *ctx;
{
std::lock_guard locker{m_lock};
ctx = new C_Flush(m_journal_metadata, on_safe, m_object_ptrs.size() + 1);
for (const auto& p : m_object_ptrs) {
p.second->flush(ctx);
}
}
// avoid holding the lock in case there is nothing to flush
ctx->complete(0);
}
ceph::ref_t<ObjectRecorder> JournalRecorder::get_object(uint8_t splay_offset) {
ceph_assert(ceph_mutex_is_locked(m_lock));
const auto& object_recorder = m_object_ptrs.at(splay_offset);
ceph_assert(object_recorder);
return object_recorder;
}
void JournalRecorder::close_and_advance_object_set(uint64_t object_set) {
ceph_assert(ceph_mutex_is_locked(m_lock));
// entry overflow from open object
if (m_current_set != object_set) {
ldout(m_cct, 20) << "close already in-progress" << dendl;
return;
}
// we shouldn't overflow upon append if already closed and we
// shouldn't receive an overflowed callback if already closed
ceph_assert(m_in_flight_advance_sets == 0);
ceph_assert(m_in_flight_object_closes == 0);
uint64_t active_set = m_journal_metadata->get_active_set();
ceph_assert(m_current_set == active_set);
++m_current_set;
++m_in_flight_advance_sets;
ldout(m_cct, 10) << "closing active object set " << object_set << dendl;
if (close_object_set(m_current_set)) {
advance_object_set();
}
}
void JournalRecorder::advance_object_set() {
ceph_assert(ceph_mutex_is_locked(m_lock));
ceph_assert(m_in_flight_object_closes == 0);
ldout(m_cct, 10) << "advance to object set " << m_current_set << dendl;
m_journal_metadata->set_active_set(m_current_set, new C_AdvanceObjectSet(
this));
}
void JournalRecorder::handle_advance_object_set(int r) {
Context *on_object_set_advanced = nullptr;
{
std::lock_guard locker{m_lock};
ldout(m_cct, 20) << __func__ << ": r=" << r << dendl;
ceph_assert(m_in_flight_advance_sets > 0);
--m_in_flight_advance_sets;
if (r < 0 && r != -ESTALE) {
lderr(m_cct) << "failed to advance object set: " << cpp_strerror(r)
<< dendl;
}
if (m_in_flight_advance_sets == 0 && m_in_flight_object_closes == 0) {
open_object_set();
std::swap(on_object_set_advanced, m_on_object_set_advanced);
}
}
if (on_object_set_advanced != nullptr) {
on_object_set_advanced->complete(0);
}
}
void JournalRecorder::open_object_set() {
ceph_assert(ceph_mutex_is_locked(m_lock));
ldout(m_cct, 10) << "opening object set " << m_current_set << dendl;
uint8_t splay_width = m_journal_metadata->get_splay_width();
bool overflowed = false;
auto lockers{lock_object_recorders()};
for (const auto& p : m_object_ptrs) {
const auto& object_recorder = p.second;
uint64_t object_number = object_recorder->get_object_number();
if (object_number / splay_width != m_current_set) {
ceph_assert(object_recorder->is_closed());
// ready to close object and open object in active set
if (create_next_object_recorder(object_recorder)) {
overflowed = true;
}
}
}
lockers.clear();
if (overflowed) {
ldout(m_cct, 10) << "object set " << m_current_set << " now full" << dendl;
ldout(m_cct, 10) << "" << dendl;
close_and_advance_object_set(m_current_set);
}
}
bool JournalRecorder::close_object_set(uint64_t active_set) {
ldout(m_cct, 10) << "active_set=" << active_set << dendl;
ceph_assert(ceph_mutex_is_locked(m_lock));
// object recorders will invoke overflow handler as they complete
// closing the object to ensure correct order of future appends
uint8_t splay_width = m_journal_metadata->get_splay_width();
auto lockers{lock_object_recorders()};
for (const auto& p : m_object_ptrs) {
const auto& object_recorder = p.second;
if (object_recorder->get_object_number() / splay_width != active_set) {
ldout(m_cct, 10) << "closing object " << object_recorder->get_oid()
<< dendl;
// flush out all queued appends and hold future appends
if (!object_recorder->close()) {
++m_in_flight_object_closes;
ldout(m_cct, 10) << "object " << object_recorder->get_oid() << " "
<< "close in-progress" << dendl;
} else {
ldout(m_cct, 10) << "object " << object_recorder->get_oid() << " closed"
<< dendl;
}
}
}
return (m_in_flight_object_closes == 0);
}
ceph::ref_t<ObjectRecorder> JournalRecorder::create_object_recorder(
uint64_t object_number, ceph::mutex* lock) {
ldout(m_cct, 10) << "object_number=" << object_number << dendl;
auto object_recorder = ceph::make_ref<ObjectRecorder>(
m_ioctx, utils::get_object_name(m_object_oid_prefix, object_number),
object_number, lock, m_journal_metadata->get_work_queue(),
&m_object_handler, m_journal_metadata->get_order(),
m_max_in_flight_appends);
object_recorder->set_append_batch_options(m_flush_interval, m_flush_bytes,
m_flush_age);
return object_recorder;
}
bool JournalRecorder::create_next_object_recorder(
ceph::ref_t<ObjectRecorder> object_recorder) {
ceph_assert(ceph_mutex_is_locked(m_lock));
uint64_t object_number = object_recorder->get_object_number();
uint8_t splay_width = m_journal_metadata->get_splay_width();
uint8_t splay_offset = object_number % splay_width;
ldout(m_cct, 10) << "object_number=" << object_number << dendl;
ceph_assert(ceph_mutex_is_locked(m_object_locks[splay_offset]));
auto new_object_recorder = create_object_recorder(
(m_current_set * splay_width) + splay_offset, &m_object_locks[splay_offset]);
ldout(m_cct, 10) << "old oid=" << object_recorder->get_oid() << ", "
<< "new oid=" << new_object_recorder->get_oid() << dendl;
AppendBuffers append_buffers;
object_recorder->claim_append_buffers(&append_buffers);
// update the commit record to point to the correct object number
for (auto &append_buffer : append_buffers) {
m_journal_metadata->overflow_commit_tid(
append_buffer.first->get_commit_tid(),
new_object_recorder->get_object_number());
}
bool object_full = new_object_recorder->append(std::move(append_buffers));
if (object_full) {
ldout(m_cct, 10) << "object " << new_object_recorder->get_oid() << " "
<< "now full" << dendl;
}
m_object_ptrs[splay_offset] = std::move(new_object_recorder);
return object_full;
}
void JournalRecorder::handle_update() {
std::lock_guard locker{m_lock};
uint64_t active_set = m_journal_metadata->get_active_set();
if (m_current_set < active_set) {
// peer journal client advanced the active set
ldout(m_cct, 10) << "current_set=" << m_current_set << ", "
<< "active_set=" << active_set << dendl;
uint64_t current_set = m_current_set;
m_current_set = active_set;
if (m_in_flight_advance_sets == 0 && m_in_flight_object_closes == 0) {
ldout(m_cct, 10) << "closing current object set " << current_set << dendl;
if (close_object_set(active_set)) {
open_object_set();
}
}
}
}
void JournalRecorder::handle_closed(ObjectRecorder *object_recorder) {
ldout(m_cct, 10) << object_recorder->get_oid() << dendl;
std::lock_guard locker{m_lock};
uint64_t object_number = object_recorder->get_object_number();
uint8_t splay_width = m_journal_metadata->get_splay_width();
uint8_t splay_offset = object_number % splay_width;
auto& active_object_recorder = m_object_ptrs.at(splay_offset);
ceph_assert(active_object_recorder->get_object_number() == object_number);
ceph_assert(m_in_flight_object_closes > 0);
--m_in_flight_object_closes;
// object closed after advance active set committed
ldout(m_cct, 10) << "object " << active_object_recorder->get_oid()
<< " closed" << dendl;
if (m_in_flight_object_closes == 0) {
if (m_in_flight_advance_sets == 0) {
// peer forced closing of object set
open_object_set();
} else {
// local overflow advanced object set
advance_object_set();
}
}
}
void JournalRecorder::handle_overflow(ObjectRecorder *object_recorder) {
ldout(m_cct, 10) << object_recorder->get_oid() << dendl;
std::lock_guard locker{m_lock};
uint64_t object_number = object_recorder->get_object_number();
uint8_t splay_width = m_journal_metadata->get_splay_width();
uint8_t splay_offset = object_number % splay_width;
auto& active_object_recorder = m_object_ptrs.at(splay_offset);
ceph_assert(active_object_recorder->get_object_number() == object_number);
ldout(m_cct, 10) << "object " << active_object_recorder->get_oid()
<< " overflowed" << dendl;
close_and_advance_object_set(object_number / splay_width);
}
JournalRecorder::Lockers JournalRecorder::lock_object_recorders() {
Lockers lockers;
lockers.reserve(m_object_ptrs.size());
for (auto& lock : m_object_locks) {
lockers.emplace_back(lock);
}
return lockers;
}
} // namespace journal
| 14,655 | 32.691954 | 82 | cc |
null | ceph-main/src/journal/JournalRecorder.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_JOURNAL_JOURNAL_RECORDER_H
#define CEPH_JOURNAL_JOURNAL_RECORDER_H
#include "include/int_types.h"
#include "include/Context.h"
#include "include/rados/librados.hpp"
#include "common/ceph_mutex.h"
#include "common/containers.h"
#include "common/Timer.h"
#include "journal/Future.h"
#include "journal/FutureImpl.h"
#include "journal/JournalMetadata.h"
#include "journal/ObjectRecorder.h"
#include <map>
#include <string>
namespace journal {
class JournalRecorder {
public:
JournalRecorder(librados::IoCtx &ioctx, std::string_view object_oid_prefix,
ceph::ref_t<JournalMetadata> journal_metadata,
uint64_t max_in_flight_appends);
~JournalRecorder();
void shut_down(Context *on_safe);
void set_append_batch_options(int flush_interval, uint64_t flush_bytes,
double flush_age);
Future append(uint64_t tag_tid, const bufferlist &bl);
void flush(Context *on_safe);
ceph::ref_t<ObjectRecorder> get_object(uint8_t splay_offset);
private:
typedef std::map<uint8_t, ceph::ref_t<ObjectRecorder>> ObjectRecorderPtrs;
typedef std::vector<std::unique_lock<ceph::mutex>> Lockers;
struct Listener : public JournalMetadataListener {
JournalRecorder *journal_recorder;
Listener(JournalRecorder *_journal_recorder)
: journal_recorder(_journal_recorder) {}
void handle_update(JournalMetadata *) override {
journal_recorder->handle_update();
}
};
struct ObjectHandler : public ObjectRecorder::Handler {
JournalRecorder *journal_recorder;
ObjectHandler(JournalRecorder *_journal_recorder)
: journal_recorder(_journal_recorder) {
}
void closed(ObjectRecorder *object_recorder) override {
journal_recorder->handle_closed(object_recorder);
}
void overflow(ObjectRecorder *object_recorder) override {
journal_recorder->handle_overflow(object_recorder);
}
};
struct C_AdvanceObjectSet : public Context {
JournalRecorder *journal_recorder;
C_AdvanceObjectSet(JournalRecorder *_journal_recorder)
: journal_recorder(_journal_recorder) {
}
void finish(int r) override {
journal_recorder->handle_advance_object_set(r);
}
};
librados::IoCtx m_ioctx;
CephContext *m_cct = nullptr;
std::string m_object_oid_prefix;
ceph::ref_t<JournalMetadata> m_journal_metadata;
uint32_t m_flush_interval = 0;
uint64_t m_flush_bytes = 0;
double m_flush_age = 0;
uint64_t m_max_in_flight_appends;
Listener m_listener;
ObjectHandler m_object_handler;
ceph::mutex m_lock = ceph::make_mutex("JournalerRecorder::m_lock");
uint32_t m_in_flight_advance_sets = 0;
uint32_t m_in_flight_object_closes = 0;
uint64_t m_current_set;
ObjectRecorderPtrs m_object_ptrs;
ceph::containers::tiny_vector<ceph::mutex> m_object_locks;
ceph::ref_t<FutureImpl> m_prev_future;
Context *m_on_object_set_advanced = nullptr;
void open_object_set();
bool close_object_set(uint64_t active_set);
void advance_object_set();
void handle_advance_object_set(int r);
void close_and_advance_object_set(uint64_t object_set);
ceph::ref_t<ObjectRecorder> create_object_recorder(uint64_t object_number,
ceph::mutex* lock);
bool create_next_object_recorder(ceph::ref_t<ObjectRecorder> object_recorder);
void handle_update();
void handle_closed(ObjectRecorder *object_recorder);
void handle_overflow(ObjectRecorder *object_recorder);
Lockers lock_object_recorders();
};
} // namespace journal
#endif // CEPH_JOURNAL_JOURNAL_RECORDER_H
| 3,690 | 27.612403 | 80 | h |
null | ceph-main/src/journal/JournalTrimmer.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "journal/JournalTrimmer.h"
#include "journal/Utils.h"
#include "common/Cond.h"
#include "common/errno.h"
#include <limits>
#define dout_subsys ceph_subsys_journaler
#undef dout_prefix
#define dout_prefix *_dout << "JournalTrimmer: " << this << " "
namespace journal {
struct JournalTrimmer::C_RemoveSet : public Context {
JournalTrimmer *journal_trimmer;
uint64_t object_set;
ceph::mutex lock = ceph::make_mutex("JournalTrimmer::m_lock");
uint32_t refs;
int return_value;
C_RemoveSet(JournalTrimmer *_journal_trimmer, uint64_t _object_set,
uint8_t _splay_width);
void complete(int r) override;
void finish(int r) override {
journal_trimmer->handle_set_removed(r, object_set);
journal_trimmer->m_async_op_tracker.finish_op();
}
};
JournalTrimmer::JournalTrimmer(librados::IoCtx &ioctx,
const std::string &object_oid_prefix,
const ceph::ref_t<JournalMetadata>& journal_metadata)
: m_cct(NULL), m_object_oid_prefix(object_oid_prefix),
m_journal_metadata(journal_metadata), m_metadata_listener(this),
m_remove_set_pending(false),
m_remove_set(0), m_remove_set_ctx(NULL) {
m_ioctx.dup(ioctx);
m_cct = reinterpret_cast<CephContext *>(m_ioctx.cct());
m_journal_metadata->add_listener(&m_metadata_listener);
}
JournalTrimmer::~JournalTrimmer() {
ceph_assert(m_shutdown);
}
void JournalTrimmer::shut_down(Context *on_finish) {
ldout(m_cct, 20) << __func__ << dendl;
{
std::lock_guard locker{m_lock};
ceph_assert(!m_shutdown);
m_shutdown = true;
}
m_journal_metadata->remove_listener(&m_metadata_listener);
// chain the shut down sequence (reverse order)
on_finish = new LambdaContext([this, on_finish](int r) {
m_async_op_tracker.wait_for_ops(on_finish);
});
m_journal_metadata->flush_commit_position(on_finish);
}
void JournalTrimmer::remove_objects(bool force, Context *on_finish) {
ldout(m_cct, 20) << __func__ << dendl;
on_finish = new LambdaContext([this, force, on_finish](int r) {
std::lock_guard locker{m_lock};
if (m_remove_set_pending) {
on_finish->complete(-EBUSY);
}
if (!force) {
JournalMetadata::RegisteredClients registered_clients;
m_journal_metadata->get_registered_clients(®istered_clients);
if (registered_clients.size() == 0) {
on_finish->complete(-EINVAL);
return;
} else if (registered_clients.size() > 1) {
on_finish->complete(-EBUSY);
return;
}
}
m_remove_set = std::numeric_limits<uint64_t>::max();
m_remove_set_pending = true;
m_remove_set_ctx = on_finish;
remove_set(m_journal_metadata->get_minimum_set());
});
m_async_op_tracker.wait_for_ops(on_finish);
}
void JournalTrimmer::committed(uint64_t commit_tid) {
ldout(m_cct, 20) << __func__ << ": commit_tid=" << commit_tid << dendl;
m_journal_metadata->committed(commit_tid,
m_create_commit_position_safe_context);
}
void JournalTrimmer::trim_objects(uint64_t minimum_set) {
ceph_assert(ceph_mutex_is_locked(m_lock));
ldout(m_cct, 20) << __func__ << ": min_set=" << minimum_set << dendl;
if (minimum_set <= m_journal_metadata->get_minimum_set()) {
return;
}
if (m_remove_set_pending) {
m_remove_set = std::max(m_remove_set, minimum_set);
return;
}
m_remove_set = minimum_set;
m_remove_set_pending = true;
remove_set(m_journal_metadata->get_minimum_set());
}
void JournalTrimmer::remove_set(uint64_t object_set) {
ceph_assert(ceph_mutex_is_locked(m_lock));
m_async_op_tracker.start_op();
uint8_t splay_width = m_journal_metadata->get_splay_width();
C_RemoveSet *ctx = new C_RemoveSet(this, object_set, splay_width);
ldout(m_cct, 20) << __func__ << ": removing object set " << object_set
<< dendl;
for (uint64_t object_number = object_set * splay_width;
object_number < (object_set + 1) * splay_width;
++object_number) {
std::string oid = utils::get_object_name(m_object_oid_prefix,
object_number);
ldout(m_cct, 20) << "removing journal object " << oid << dendl;
auto comp =
librados::Rados::aio_create_completion(ctx, utils::rados_ctx_callback);
int r = m_ioctx.aio_remove(oid, comp,
CEPH_OSD_FLAG_FULL_FORCE | CEPH_OSD_FLAG_FULL_TRY);
ceph_assert(r == 0);
comp->release();
}
}
void JournalTrimmer::handle_metadata_updated() {
ldout(m_cct, 20) << __func__ << dendl;
std::lock_guard locker{m_lock};
JournalMetadata::RegisteredClients registered_clients;
m_journal_metadata->get_registered_clients(®istered_clients);
uint8_t splay_width = m_journal_metadata->get_splay_width();
uint64_t minimum_set = m_journal_metadata->get_minimum_set();
uint64_t active_set = m_journal_metadata->get_active_set();
uint64_t minimum_commit_set = active_set;
std::string minimum_client_id;
for (auto &client : registered_clients) {
if (client.state == cls::journal::CLIENT_STATE_DISCONNECTED) {
continue;
}
if (client.commit_position.object_positions.empty()) {
// client hasn't recorded any commits
minimum_commit_set = minimum_set;
minimum_client_id = client.id;
break;
}
for (auto &position : client.commit_position.object_positions) {
uint64_t object_set = position.object_number / splay_width;
if (object_set < minimum_commit_set) {
minimum_client_id = client.id;
minimum_commit_set = object_set;
}
}
}
if (minimum_commit_set > minimum_set) {
trim_objects(minimum_commit_set);
} else {
ldout(m_cct, 20) << "object set " << minimum_commit_set << " still "
<< "in-use by client " << minimum_client_id << dendl;
}
}
void JournalTrimmer::handle_set_removed(int r, uint64_t object_set) {
ldout(m_cct, 20) << __func__ << ": r=" << r << ", set=" << object_set << ", "
<< "trim=" << m_remove_set << dendl;
std::lock_guard locker{m_lock};
m_remove_set_pending = false;
if (r == -ENOENT) {
// no objects within the set existed
r = 0;
}
if (r == 0) {
// advance the minimum set to the next set
m_journal_metadata->set_minimum_set(object_set + 1);
uint64_t active_set = m_journal_metadata->get_active_set();
uint64_t minimum_set = m_journal_metadata->get_minimum_set();
if (m_remove_set > minimum_set && minimum_set <= active_set) {
m_remove_set_pending = true;
remove_set(minimum_set);
}
}
if (m_remove_set_ctx != nullptr && !m_remove_set_pending) {
ldout(m_cct, 20) << "completing remove set context" << dendl;
m_remove_set_ctx->complete(r);
m_remove_set_ctx = nullptr;
}
}
JournalTrimmer::C_RemoveSet::C_RemoveSet(JournalTrimmer *_journal_trimmer,
uint64_t _object_set,
uint8_t _splay_width)
: journal_trimmer(_journal_trimmer), object_set(_object_set),
lock(ceph::make_mutex(utils::unique_lock_name("C_RemoveSet::lock", this))),
refs(_splay_width), return_value(-ENOENT) {
}
void JournalTrimmer::C_RemoveSet::complete(int r) {
lock.lock();
if (r < 0 && r != -ENOENT &&
(return_value == -ENOENT || return_value == 0)) {
return_value = r;
} else if (r == 0 && return_value == -ENOENT) {
return_value = 0;
}
if (--refs == 0) {
finish(return_value);
lock.unlock();
delete this;
} else {
lock.unlock();
}
}
} // namespace journal
| 7,743 | 30.225806 | 84 | cc |
null | ceph-main/src/journal/JournalTrimmer.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_JOURNAL_JOURNAL_TRIMMER_H
#define CEPH_JOURNAL_JOURNAL_TRIMMER_H
#include "include/int_types.h"
#include "include/rados/librados.hpp"
#include "include/Context.h"
#include "common/AsyncOpTracker.h"
#include "journal/JournalMetadata.h"
#include "cls/journal/cls_journal_types.h"
#include <functional>
struct Context;
namespace journal {
class JournalTrimmer {
public:
typedef cls::journal::ObjectSetPosition ObjectSetPosition;
JournalTrimmer(librados::IoCtx &ioctx, const std::string &object_oid_prefix,
const ceph::ref_t<JournalMetadata> &journal_metadata);
~JournalTrimmer();
void shut_down(Context *on_finish);
void remove_objects(bool force, Context *on_finish);
void committed(uint64_t commit_tid);
private:
typedef std::function<Context*()> CreateContext;
struct MetadataListener : public JournalMetadataListener {
JournalTrimmer *journal_trimmer;
MetadataListener(JournalTrimmer *journal_trimmer)
: journal_trimmer(journal_trimmer) {
}
void handle_update(JournalMetadata *) override {
journal_trimmer->handle_metadata_updated();
}
};
struct C_CommitPositionSafe : public Context {
JournalTrimmer *journal_trimmer;
C_CommitPositionSafe(JournalTrimmer *_journal_trimmer)
: journal_trimmer(_journal_trimmer) {
journal_trimmer->m_async_op_tracker.start_op();
}
~C_CommitPositionSafe() override {
journal_trimmer->m_async_op_tracker.finish_op();
}
void finish(int r) override {
}
};
struct C_RemoveSet;
librados::IoCtx m_ioctx;
CephContext *m_cct;
std::string m_object_oid_prefix;
ceph::ref_t<JournalMetadata> m_journal_metadata;
MetadataListener m_metadata_listener;
AsyncOpTracker m_async_op_tracker;
ceph::mutex m_lock = ceph::make_mutex("JournalTrimmer::m_lock");
bool m_remove_set_pending;
uint64_t m_remove_set;
Context *m_remove_set_ctx;
bool m_shutdown = false;
CreateContext m_create_commit_position_safe_context = [this]() {
return new C_CommitPositionSafe(this);
};
void trim_objects(uint64_t minimum_set);
void remove_set(uint64_t object_set);
void handle_metadata_updated();
void handle_set_removed(int r, uint64_t object_set);
};
} // namespace journal
#endif // CEPH_JOURNAL_JOURNAL_TRIMMER_H
| 2,407 | 24.617021 | 78 | h |
null | ceph-main/src/journal/Journaler.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "journal/Journaler.h"
#include "include/stringify.h"
#include "common/errno.h"
#include "common/Timer.h"
#include "common/WorkQueue.h"
#include "journal/Entry.h"
#include "journal/FutureImpl.h"
#include "journal/JournalMetadata.h"
#include "journal/JournalPlayer.h"
#include "journal/JournalRecorder.h"
#include "journal/JournalTrimmer.h"
#include "journal/ReplayEntry.h"
#include "journal/ReplayHandler.h"
#include "cls/journal/cls_journal_client.h"
#include "cls/journal/cls_journal_types.h"
#include "Utils.h"
#define dout_subsys ceph_subsys_journaler
#undef dout_prefix
#define dout_prefix *_dout << "Journaler: " << this << " "
namespace journal {
namespace {
static const std::string JOURNAL_HEADER_PREFIX = "journal.";
static const std::string JOURNAL_OBJECT_PREFIX = "journal_data.";
} // anonymous namespace
using namespace cls::journal;
using utils::rados_ctx_callback;
std::string Journaler::header_oid(const std::string &journal_id) {
return JOURNAL_HEADER_PREFIX + journal_id;
}
std::string Journaler::object_oid_prefix(int pool_id,
const std::string &journal_id) {
return JOURNAL_OBJECT_PREFIX + stringify(pool_id) + "." + journal_id + ".";
}
Journaler::Threads::Threads(CephContext *cct) {
thread_pool = new ThreadPool(cct, "Journaler::thread_pool", "tp_journal", 1);
thread_pool->start();
work_queue = new ContextWQ("Journaler::work_queue",
ceph::make_timespan(60),
thread_pool);
timer = new SafeTimer(cct, timer_lock, true);
timer->init();
}
Journaler::Threads::~Threads() {
{
std::lock_guard timer_locker{timer_lock};
timer->shutdown();
}
delete timer;
timer = nullptr;
work_queue->drain();
delete work_queue;
work_queue = nullptr;
thread_pool->stop();
delete thread_pool;
thread_pool = nullptr;
}
Journaler::Journaler(librados::IoCtx &header_ioctx,
const std::string &journal_id,
const std::string &client_id, const Settings &settings,
CacheManagerHandler *cache_manager_handler)
: m_threads(new Threads(reinterpret_cast<CephContext*>(header_ioctx.cct()))),
m_client_id(client_id), m_cache_manager_handler(cache_manager_handler) {
set_up(m_threads->work_queue, m_threads->timer, &m_threads->timer_lock,
header_ioctx, journal_id, settings);
}
Journaler::Journaler(ContextWQ *work_queue, SafeTimer *timer,
ceph::mutex *timer_lock, librados::IoCtx &header_ioctx,
const std::string &journal_id,
const std::string &client_id, const Settings &settings,
CacheManagerHandler *cache_manager_handler)
: m_client_id(client_id), m_cache_manager_handler(cache_manager_handler) {
set_up(work_queue, timer, timer_lock, header_ioctx, journal_id,
settings);
}
void Journaler::set_up(ContextWQ *work_queue, SafeTimer *timer,
ceph::mutex *timer_lock, librados::IoCtx &header_ioctx,
const std::string &journal_id,
const Settings &settings) {
m_header_ioctx.dup(header_ioctx);
m_cct = reinterpret_cast<CephContext *>(m_header_ioctx.cct());
m_header_oid = header_oid(journal_id);
m_object_oid_prefix = object_oid_prefix(m_header_ioctx.get_id(), journal_id);
m_metadata = ceph::make_ref<JournalMetadata>(work_queue, timer, timer_lock,
m_header_ioctx, m_header_oid, m_client_id,
settings);
}
Journaler::~Journaler() {
if (m_metadata != nullptr) {
ceph_assert(!m_metadata->is_initialized());
if (!m_initialized) {
// never initialized -- ensure any in-flight ops are complete
// since we wouldn't expect shut_down to be invoked
m_metadata->wait_for_ops();
}
m_metadata.reset();
}
ceph_assert(m_trimmer == nullptr);
ceph_assert(m_player == nullptr);
ceph_assert(m_recorder == nullptr);
delete m_threads;
m_threads = nullptr;
}
void Journaler::exists(Context *on_finish) const {
librados::ObjectReadOperation op;
op.stat(nullptr, nullptr, nullptr);
librados::AioCompletion *comp =
librados::Rados::aio_create_completion(on_finish, rados_ctx_callback);
int r = m_header_ioctx.aio_operate(m_header_oid, comp, &op, nullptr);
ceph_assert(r == 0);
comp->release();
}
void Journaler::init(Context *on_init) {
m_initialized = true;
m_metadata->init(new C_InitJournaler(this, on_init));
}
int Journaler::init_complete() {
int64_t pool_id = m_metadata->get_pool_id();
if (pool_id < 0 || pool_id == m_header_ioctx.get_id()) {
ldout(m_cct, 20) << "using image pool for journal data" << dendl;
m_data_ioctx.dup(m_header_ioctx);
} else {
ldout(m_cct, 20) << "using pool id=" << pool_id << " for journal data"
<< dendl;
librados::Rados rados(m_header_ioctx);
int r = rados.ioctx_create2(pool_id, m_data_ioctx);
if (r < 0) {
if (r == -ENOENT) {
ldout(m_cct, 1) << "pool id=" << pool_id << " no longer exists"
<< dendl;
}
return r;
}
}
m_trimmer = new JournalTrimmer(m_data_ioctx, m_object_oid_prefix,
m_metadata);
return 0;
}
void Journaler::shut_down() {
C_SaferCond ctx;
shut_down(&ctx);
ctx.wait();
}
void Journaler::shut_down(Context *on_finish) {
ceph_assert(m_player == nullptr);
ceph_assert(m_recorder == nullptr);
auto metadata = std::move(m_metadata);
ceph_assert(metadata);
on_finish = new LambdaContext([metadata, on_finish](int r) {
on_finish->complete(0);
});
JournalTrimmer *trimmer = nullptr;
std::swap(trimmer, m_trimmer);
if (!trimmer) {
metadata->shut_down(on_finish);
return;
}
on_finish = new LambdaContext([trimmer, metadata, on_finish](int r) {
delete trimmer;
metadata->shut_down(on_finish);
});
trimmer->shut_down(on_finish);
}
bool Journaler::is_initialized() const {
return m_metadata->is_initialized();
}
void Journaler::get_immutable_metadata(uint8_t *order, uint8_t *splay_width,
int64_t *pool_id, Context *on_finish) {
m_metadata->get_immutable_metadata(order, splay_width, pool_id, on_finish);
}
void Journaler::get_mutable_metadata(uint64_t *minimum_set,
uint64_t *active_set,
RegisteredClients *clients,
Context *on_finish) {
m_metadata->get_mutable_metadata(minimum_set, active_set, clients, on_finish);
}
void Journaler::create(uint8_t order, uint8_t splay_width,
int64_t pool_id, Context *on_finish) {
if (order > 26 || order < 12) {
lderr(m_cct) << "order must be in the range [12, 26]" << dendl;
on_finish->complete(-EDOM);
return;
}
if (splay_width == 0) {
on_finish->complete(-EINVAL);
return;
}
ldout(m_cct, 5) << "creating new journal: " << m_header_oid << dendl;
librados::ObjectWriteOperation op;
client::create(&op, order, splay_width, pool_id);
librados::AioCompletion *comp =
librados::Rados::aio_create_completion(on_finish, rados_ctx_callback);
int r = m_header_ioctx.aio_operate(m_header_oid, comp, &op);
ceph_assert(r == 0);
comp->release();
}
void Journaler::remove(bool force, Context *on_finish) {
// chain journal removal (reverse order)
on_finish = new LambdaContext([this, on_finish](int r) {
librados::AioCompletion *comp = librados::Rados::aio_create_completion(
on_finish, utils::rados_ctx_callback);
r = m_header_ioctx.aio_remove(m_header_oid, comp);
ceph_assert(r == 0);
comp->release();
});
on_finish = new LambdaContext([this, force, on_finish](int r) {
m_trimmer->remove_objects(force, on_finish);
});
m_metadata->shut_down(on_finish);
}
void Journaler::flush_commit_position(Context *on_safe) {
m_metadata->flush_commit_position(on_safe);
}
void Journaler::add_listener(JournalMetadataListener *listener) {
m_metadata->add_listener(listener);
}
void Journaler::remove_listener(JournalMetadataListener *listener) {
m_metadata->remove_listener(listener);
}
int Journaler::register_client(const bufferlist &data) {
C_SaferCond cond;
register_client(data, &cond);
return cond.wait();
}
int Journaler::unregister_client() {
C_SaferCond cond;
unregister_client(&cond);
return cond.wait();
}
void Journaler::register_client(const bufferlist &data, Context *on_finish) {
return m_metadata->register_client(data, on_finish);
}
void Journaler::update_client(const bufferlist &data, Context *on_finish) {
return m_metadata->update_client(data, on_finish);
}
void Journaler::unregister_client(Context *on_finish) {
return m_metadata->unregister_client(on_finish);
}
void Journaler::get_client(const std::string &client_id,
cls::journal::Client *client,
Context *on_finish) {
m_metadata->get_client(client_id, client, on_finish);
}
int Journaler::get_cached_client(const std::string &client_id,
cls::journal::Client *client) {
RegisteredClients clients;
m_metadata->get_registered_clients(&clients);
auto it = clients.find({client_id, {}});
if (it == clients.end()) {
return -ENOENT;
}
*client = *it;
return 0;
}
void Journaler::allocate_tag(const bufferlist &data, cls::journal::Tag *tag,
Context *on_finish) {
m_metadata->allocate_tag(cls::journal::Tag::TAG_CLASS_NEW, data, tag,
on_finish);
}
void Journaler::allocate_tag(uint64_t tag_class, const bufferlist &data,
cls::journal::Tag *tag, Context *on_finish) {
m_metadata->allocate_tag(tag_class, data, tag, on_finish);
}
void Journaler::get_tag(uint64_t tag_tid, Tag *tag, Context *on_finish) {
m_metadata->get_tag(tag_tid, tag, on_finish);
}
void Journaler::get_tags(uint64_t tag_class, Tags *tags, Context *on_finish) {
m_metadata->get_tags(0, tag_class, tags, on_finish);
}
void Journaler::get_tags(uint64_t start_after_tag_tid, uint64_t tag_class,
Tags *tags, Context *on_finish) {
m_metadata->get_tags(start_after_tag_tid, tag_class, tags, on_finish);
}
void Journaler::start_replay(ReplayHandler* replay_handler) {
create_player(replay_handler);
m_player->prefetch();
}
void Journaler::start_live_replay(ReplayHandler* replay_handler,
double interval) {
create_player(replay_handler);
m_player->prefetch_and_watch(interval);
}
bool Journaler::try_pop_front(ReplayEntry *replay_entry,
uint64_t *tag_tid) {
ceph_assert(m_player != nullptr);
Entry entry;
uint64_t commit_tid;
if (!m_player->try_pop_front(&entry, &commit_tid)) {
return false;
}
*replay_entry = ReplayEntry(entry.get_data(), commit_tid);
if (tag_tid != nullptr) {
*tag_tid = entry.get_tag_tid();
}
return true;
}
void Journaler::stop_replay() {
C_SaferCond ctx;
stop_replay(&ctx);
ctx.wait();
}
void Journaler::stop_replay(Context *on_finish) {
auto player = std::move(m_player);
auto* playerp = player.get();
auto f = [player=std::move(player), on_finish](int r) {
on_finish->complete(r);
};
on_finish = new LambdaContext(std::move(f));
playerp->shut_down(on_finish);
}
void Journaler::committed(const ReplayEntry &replay_entry) {
m_trimmer->committed(replay_entry.get_commit_tid());
}
void Journaler::committed(const Future &future) {
auto& future_impl = future.get_future_impl();
m_trimmer->committed(future_impl->get_commit_tid());
}
void Journaler::start_append(uint64_t max_in_flight_appends) {
ceph_assert(m_recorder == nullptr);
// TODO verify active object set >= current replay object set
m_recorder = std::make_unique<JournalRecorder>(m_data_ioctx, m_object_oid_prefix,
m_metadata, max_in_flight_appends);
}
void Journaler::set_append_batch_options(int flush_interval,
uint64_t flush_bytes,
double flush_age) {
ceph_assert(m_recorder != nullptr);
m_recorder->set_append_batch_options(flush_interval, flush_bytes, flush_age);
}
void Journaler::stop_append(Context *on_safe) {
auto recorder = std::move(m_recorder);
ceph_assert(recorder);
auto* recorderp = recorder.get();
on_safe = new LambdaContext([recorder=std::move(recorder), on_safe](int r) {
on_safe->complete(r);
});
recorderp->shut_down(on_safe);
}
uint64_t Journaler::get_max_append_size() const {
uint64_t max_payload_size = m_metadata->get_object_size() -
Entry::get_fixed_size();
if (m_metadata->get_settings().max_payload_bytes > 0) {
max_payload_size = std::min(max_payload_size,
m_metadata->get_settings().max_payload_bytes);
}
return max_payload_size;
}
Future Journaler::append(uint64_t tag_tid, const bufferlist &payload_bl) {
return m_recorder->append(tag_tid, payload_bl);
}
void Journaler::flush_append(Context *on_safe) {
m_recorder->flush(on_safe);
}
void Journaler::create_player(ReplayHandler* replay_handler) {
ceph_assert(m_player == nullptr);
m_player = std::make_unique<JournalPlayer>(m_data_ioctx, m_object_oid_prefix, m_metadata,
replay_handler, m_cache_manager_handler);
}
void Journaler::get_metadata(uint8_t *order, uint8_t *splay_width,
int64_t *pool_id) {
ceph_assert(m_metadata != nullptr);
*order = m_metadata->get_order();
*splay_width = m_metadata->get_splay_width();
*pool_id = m_metadata->get_pool_id();
}
std::ostream &operator<<(std::ostream &os,
const Journaler &journaler) {
os << "[metadata=";
if (journaler.m_metadata) {
os << *journaler.m_metadata;
} else {
os << "NULL";
}
os << "]";
return os;
}
} // namespace journal
| 13,888 | 28.99784 | 91 | cc |
null | ceph-main/src/journal/Journaler.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_JOURNAL_JOURNALER_H
#define CEPH_JOURNAL_JOURNALER_H
#include "include/int_types.h"
#include "include/buffer_fwd.h"
#include "include/Context.h"
#include "include/rados/librados.hpp"
#include "journal/Future.h"
#include "journal/JournalMetadataListener.h"
#include "cls/journal/cls_journal_types.h"
#include "common/Timer.h"
#include <list>
#include <map>
#include <string>
#include "include/ceph_assert.h"
class ContextWQ;
class ThreadPool;
namespace journal {
struct CacheManagerHandler;
class JournalTrimmer;
class ReplayEntry;
class ReplayHandler;
class Settings;
class Journaler {
public:
struct Threads {
Threads(CephContext *cct);
~Threads();
ThreadPool *thread_pool = nullptr;
ContextWQ *work_queue = nullptr;
SafeTimer *timer;
ceph::mutex timer_lock = ceph::make_mutex("Journaler::timer_lock");
};
typedef cls::journal::Tag Tag;
typedef std::list<cls::journal::Tag> Tags;
typedef std::set<cls::journal::Client> RegisteredClients;
static std::string header_oid(const std::string &journal_id);
static std::string object_oid_prefix(int pool_id,
const std::string &journal_id);
Journaler(librados::IoCtx &header_ioctx, const std::string &journal_id,
const std::string &client_id, const Settings &settings,
CacheManagerHandler *cache_manager_handler);
Journaler(ContextWQ *work_queue, SafeTimer *timer, ceph::mutex *timer_lock,
librados::IoCtx &header_ioctx, const std::string &journal_id,
const std::string &client_id, const Settings &settings,
CacheManagerHandler *cache_manager_handler);
~Journaler();
void exists(Context *on_finish) const;
void create(uint8_t order, uint8_t splay_width, int64_t pool_id, Context *ctx);
void remove(bool force, Context *on_finish);
void init(Context *on_init);
void shut_down();
void shut_down(Context *on_finish);
bool is_initialized() const;
void get_immutable_metadata(uint8_t *order, uint8_t *splay_width,
int64_t *pool_id, Context *on_finish);
void get_mutable_metadata(uint64_t *minimum_set, uint64_t *active_set,
RegisteredClients *clients, Context *on_finish);
void add_listener(JournalMetadataListener *listener);
void remove_listener(JournalMetadataListener *listener);
int register_client(const bufferlist &data);
void register_client(const bufferlist &data, Context *on_finish);
int unregister_client();
void unregister_client(Context *on_finish);
void update_client(const bufferlist &data, Context *on_finish);
void get_client(const std::string &client_id, cls::journal::Client *client,
Context *on_finish);
int get_cached_client(const std::string &client_id,
cls::journal::Client *client);
void flush_commit_position(Context *on_safe);
void allocate_tag(const bufferlist &data, cls::journal::Tag *tag,
Context *on_finish);
void allocate_tag(uint64_t tag_class, const bufferlist &data,
cls::journal::Tag *tag, Context *on_finish);
void get_tag(uint64_t tag_tid, Tag *tag, Context *on_finish);
void get_tags(uint64_t tag_class, Tags *tags, Context *on_finish);
void get_tags(uint64_t start_after_tag_tid, uint64_t tag_class, Tags *tags,
Context *on_finish);
void start_replay(ReplayHandler* replay_handler);
void start_live_replay(ReplayHandler* replay_handler, double interval);
bool try_pop_front(ReplayEntry *replay_entry, uint64_t *tag_tid = nullptr);
void stop_replay();
void stop_replay(Context *on_finish);
uint64_t get_max_append_size() const;
void start_append(uint64_t max_in_flight_appends);
void set_append_batch_options(int flush_interval, uint64_t flush_bytes,
double flush_age);
Future append(uint64_t tag_tid, const bufferlist &bl);
void flush_append(Context *on_safe);
void stop_append(Context *on_safe);
void committed(const ReplayEntry &replay_entry);
void committed(const Future &future);
void get_metadata(uint8_t *order, uint8_t *splay_width, int64_t *pool_id);
private:
struct C_InitJournaler : public Context {
Journaler *journaler;
Context *on_safe;
C_InitJournaler(Journaler *_journaler, Context *_on_safe)
: journaler(_journaler), on_safe(_on_safe) {
}
void finish(int r) override {
if (r == 0) {
r = journaler->init_complete();
}
on_safe->complete(r);
}
};
Threads *m_threads = nullptr;
mutable librados::IoCtx m_header_ioctx;
librados::IoCtx m_data_ioctx;
CephContext *m_cct;
std::string m_client_id;
CacheManagerHandler *m_cache_manager_handler;
std::string m_header_oid;
std::string m_object_oid_prefix;
bool m_initialized = false;
ceph::ref_t<class JournalMetadata> m_metadata;
std::unique_ptr<class JournalPlayer> m_player;
std::unique_ptr<class JournalRecorder> m_recorder;
JournalTrimmer *m_trimmer = nullptr;
void set_up(ContextWQ *work_queue, SafeTimer *timer, ceph::mutex *timer_lock,
librados::IoCtx &header_ioctx, const std::string &journal_id,
const Settings &settings);
int init_complete();
void create_player(ReplayHandler* replay_handler);
friend std::ostream &operator<<(std::ostream &os,
const Journaler &journaler);
};
std::ostream &operator<<(std::ostream &os,
const Journaler &journaler);
} // namespace journal
#endif // CEPH_JOURNAL_JOURNALER_H
| 5,545 | 31.432749 | 81 | h |
null | ceph-main/src/journal/ObjectPlayer.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "journal/ObjectPlayer.h"
#include "journal/Utils.h"
#include "common/Timer.h"
#include <limits>
#define dout_subsys ceph_subsys_journaler
#undef dout_prefix
#define dout_prefix *_dout << "ObjectPlayer: " << this << " "
namespace journal {
namespace {
bool advance_to_last_pad_byte(uint32_t off, bufferlist::const_iterator *iter,
uint32_t *pad_len, bool *partial_entry) {
const uint32_t MAX_PAD = 8;
auto pad_bytes = MAX_PAD - off % MAX_PAD;
auto next = *iter;
ceph_assert(!next.end());
if (*next != '\0') {
return false;
}
for (auto i = pad_bytes - 1; i > 0; i--) {
if ((++next).end()) {
*partial_entry = true;
return false;
}
if (*next != '\0') {
return false;
}
}
*iter = next;
*pad_len += pad_bytes;
return true;
}
} // anonymous namespace
ObjectPlayer::ObjectPlayer(librados::IoCtx &ioctx,
const std::string& object_oid_prefix,
uint64_t object_num, SafeTimer &timer,
ceph::mutex &timer_lock, uint8_t order,
uint64_t max_fetch_bytes)
: m_object_num(object_num),
m_oid(utils::get_object_name(object_oid_prefix, m_object_num)),
m_timer(timer), m_timer_lock(timer_lock), m_order(order),
m_max_fetch_bytes(max_fetch_bytes > 0 ? max_fetch_bytes : 2 << order),
m_lock(ceph::make_mutex(utils::unique_lock_name("ObjectPlayer::m_lock", this)))
{
m_ioctx.dup(ioctx);
m_cct = reinterpret_cast<CephContext*>(m_ioctx.cct());
}
ObjectPlayer::~ObjectPlayer() {
{
std::lock_guard timer_locker{m_timer_lock};
std::lock_guard locker{m_lock};
ceph_assert(!m_fetch_in_progress);
ceph_assert(m_watch_ctx == nullptr);
}
}
void ObjectPlayer::fetch(Context *on_finish) {
ldout(m_cct, 10) << __func__ << ": " << m_oid << dendl;
std::lock_guard locker{m_lock};
ceph_assert(!m_fetch_in_progress);
m_fetch_in_progress = true;
C_Fetch *context = new C_Fetch(this, on_finish);
librados::ObjectReadOperation op;
op.read(m_read_off, m_max_fetch_bytes, &context->read_bl, NULL);
op.set_op_flags2(CEPH_OSD_OP_FLAG_FADVISE_DONTNEED);
auto rados_completion =
librados::Rados::aio_create_completion(context, utils::rados_ctx_callback);
int r = m_ioctx.aio_operate(m_oid, rados_completion, &op, 0, NULL);
ceph_assert(r == 0);
rados_completion->release();
}
void ObjectPlayer::watch(Context *on_fetch, double interval) {
ldout(m_cct, 20) << __func__ << ": " << m_oid << " watch" << dendl;
std::lock_guard timer_locker{m_timer_lock};
m_watch_interval = interval;
ceph_assert(m_watch_ctx == nullptr);
m_watch_ctx = on_fetch;
schedule_watch();
}
void ObjectPlayer::unwatch() {
ldout(m_cct, 20) << __func__ << ": " << m_oid << " unwatch" << dendl;
Context *watch_ctx = nullptr;
{
std::lock_guard timer_locker{m_timer_lock};
ceph_assert(!m_unwatched);
m_unwatched = true;
if (!cancel_watch()) {
return;
}
std::swap(watch_ctx, m_watch_ctx);
}
if (watch_ctx != nullptr) {
watch_ctx->complete(-ECANCELED);
}
}
void ObjectPlayer::front(Entry *entry) const {
std::lock_guard locker{m_lock};
ceph_assert(!m_entries.empty());
*entry = m_entries.front();
}
void ObjectPlayer::pop_front() {
std::lock_guard locker{m_lock};
ceph_assert(!m_entries.empty());
auto &entry = m_entries.front();
m_entry_keys.erase({entry.get_tag_tid(), entry.get_entry_tid()});
m_entries.pop_front();
}
int ObjectPlayer::handle_fetch_complete(int r, const bufferlist &bl,
bool *refetch) {
ldout(m_cct, 10) << __func__ << ": " << m_oid << ", r=" << r << ", len="
<< bl.length() << dendl;
*refetch = false;
if (r == -ENOENT) {
return 0;
} else if (r < 0) {
return r;
} else if (bl.length() == 0) {
return 0;
}
std::lock_guard locker{m_lock};
ceph_assert(m_fetch_in_progress);
m_read_off += bl.length();
m_read_bl.append(bl);
m_refetch_state = REFETCH_STATE_REQUIRED;
bool full_fetch = (m_max_fetch_bytes == 2U << m_order);
bool partial_entry = false;
bool invalid = false;
uint32_t invalid_start_off = 0;
clear_invalid_range(m_read_bl_off, m_read_bl.length());
bufferlist::const_iterator iter{&m_read_bl, 0};
uint32_t pad_len = 0;
while (!iter.end()) {
uint32_t bytes_needed;
uint32_t bl_off = iter.get_off();
if (!Entry::is_readable(iter, &bytes_needed)) {
if (bytes_needed != 0) {
invalid_start_off = m_read_bl_off + bl_off;
invalid = true;
partial_entry = true;
if (full_fetch) {
lderr(m_cct) << ": partial record at offset " << invalid_start_off
<< dendl;
} else {
ldout(m_cct, 20) << ": partial record detected, will re-fetch"
<< dendl;
}
break;
}
if (!advance_to_last_pad_byte(m_read_bl_off + iter.get_off(), &iter,
&pad_len, &partial_entry)) {
invalid_start_off = m_read_bl_off + bl_off;
invalid = true;
if (partial_entry) {
if (full_fetch) {
lderr(m_cct) << ": partial pad at offset " << invalid_start_off
<< dendl;
} else {
ldout(m_cct, 20) << ": partial pad detected, will re-fetch"
<< dendl;
}
} else {
lderr(m_cct) << ": detected corrupt journal entry at offset "
<< invalid_start_off << dendl;
}
break;
}
++iter;
continue;
}
Entry entry;
decode(entry, iter);
ldout(m_cct, 20) << ": " << entry << " decoded" << dendl;
uint32_t entry_len = iter.get_off() - bl_off;
if (invalid) {
// new corrupt region detected
uint32_t invalid_end_off = m_read_bl_off + bl_off;
lderr(m_cct) << ": corruption range [" << invalid_start_off
<< ", " << invalid_end_off << ")" << dendl;
m_invalid_ranges.insert(invalid_start_off,
invalid_end_off - invalid_start_off);
invalid = false;
m_read_bl_off = invalid_end_off;
}
EntryKey entry_key(std::make_pair(entry.get_tag_tid(),
entry.get_entry_tid()));
if (m_entry_keys.find(entry_key) == m_entry_keys.end()) {
m_entry_keys[entry_key] = m_entries.insert(m_entries.end(), entry);
} else {
ldout(m_cct, 10) << ": " << entry << " is duplicate, replacing" << dendl;
*m_entry_keys[entry_key] = entry;
}
// prune decoded / corrupted journal entries from front of bl
bufferlist sub_bl;
sub_bl.substr_of(m_read_bl, iter.get_off(),
m_read_bl.length() - iter.get_off());
sub_bl.swap(m_read_bl);
iter = bufferlist::iterator(&m_read_bl, 0);
// advance the decoded entry offset
m_read_bl_off += entry_len + pad_len;
pad_len = 0;
}
if (invalid) {
uint32_t invalid_end_off = m_read_bl_off + m_read_bl.length();
if (!partial_entry) {
lderr(m_cct) << ": corruption range [" << invalid_start_off
<< ", " << invalid_end_off << ")" << dendl;
}
m_invalid_ranges.insert(invalid_start_off,
invalid_end_off - invalid_start_off);
}
if (!m_invalid_ranges.empty() && !partial_entry) {
return -EBADMSG;
} else if (partial_entry && (full_fetch || m_entries.empty())) {
*refetch = true;
return -EAGAIN;
}
return 0;
}
void ObjectPlayer::clear_invalid_range(uint32_t off, uint32_t len) {
// possibly remove previously partial record region
InvalidRanges decode_range;
decode_range.insert(off, len);
InvalidRanges intersect_range;
intersect_range.intersection_of(m_invalid_ranges, decode_range);
if (!intersect_range.empty()) {
ldout(m_cct, 20) << ": clearing invalid range: " << intersect_range
<< dendl;
m_invalid_ranges.subtract(intersect_range);
}
}
void ObjectPlayer::schedule_watch() {
ceph_assert(ceph_mutex_is_locked(m_timer_lock));
if (m_watch_ctx == NULL) {
return;
}
ldout(m_cct, 20) << __func__ << ": " << m_oid << " scheduling watch" << dendl;
ceph_assert(m_watch_task == nullptr);
m_watch_task = m_timer.add_event_after(
m_watch_interval,
new LambdaContext([this](int) {
handle_watch_task();
}));
}
bool ObjectPlayer::cancel_watch() {
ceph_assert(ceph_mutex_is_locked(m_timer_lock));
ldout(m_cct, 20) << __func__ << ": " << m_oid << " cancelling watch" << dendl;
if (m_watch_task != nullptr) {
bool canceled = m_timer.cancel_event(m_watch_task);
ceph_assert(canceled);
m_watch_task = nullptr;
return true;
}
return false;
}
void ObjectPlayer::handle_watch_task() {
ceph_assert(ceph_mutex_is_locked(m_timer_lock));
ldout(m_cct, 10) << __func__ << ": " << m_oid << " polling" << dendl;
ceph_assert(m_watch_ctx != nullptr);
ceph_assert(m_watch_task != nullptr);
m_watch_task = nullptr;
fetch(new C_WatchFetch(this));
}
void ObjectPlayer::handle_watch_fetched(int r) {
ldout(m_cct, 10) << __func__ << ": " << m_oid << " poll complete, r=" << r
<< dendl;
Context *watch_ctx = nullptr;
{
std::lock_guard timer_locker{m_timer_lock};
std::swap(watch_ctx, m_watch_ctx);
if (m_unwatched) {
m_unwatched = false;
r = -ECANCELED;
}
}
if (watch_ctx != nullptr) {
watch_ctx->complete(r);
}
}
void ObjectPlayer::C_Fetch::finish(int r) {
bool refetch = false;
r = object_player->handle_fetch_complete(r, read_bl, &refetch);
{
std::lock_guard locker{object_player->m_lock};
object_player->m_fetch_in_progress = false;
}
if (refetch) {
object_player->fetch(on_finish);
return;
}
object_player.reset();
on_finish->complete(r);
}
void ObjectPlayer::C_WatchFetch::finish(int r) {
object_player->handle_watch_fetched(r);
}
} // namespace journal
| 10,124 | 27.441011 | 83 | cc |
null | ceph-main/src/journal/ObjectPlayer.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_JOURNAL_OBJECT_PLAYER_H
#define CEPH_JOURNAL_OBJECT_PLAYER_H
#include "include/Context.h"
#include "include/interval_set.h"
#include "include/rados/librados.hpp"
#include "common/ceph_mutex.h"
#include "common/Timer.h"
#include "common/RefCountedObj.h"
#include "journal/Entry.h"
#include <list>
#include <string>
#include <boost/noncopyable.hpp>
#include <boost/unordered_map.hpp>
#include "include/ceph_assert.h"
namespace journal {
class ObjectPlayer : public RefCountedObject {
public:
typedef std::list<Entry> Entries;
typedef interval_set<uint64_t> InvalidRanges;
enum RefetchState {
REFETCH_STATE_NONE,
REFETCH_STATE_REQUIRED,
REFETCH_STATE_IMMEDIATE
};
inline const std::string &get_oid() const {
return m_oid;
}
inline uint64_t get_object_number() const {
return m_object_num;
}
void fetch(Context *on_finish);
void watch(Context *on_fetch, double interval);
void unwatch();
void front(Entry *entry) const;
void pop_front();
inline bool empty() const {
std::lock_guard locker{m_lock};
return m_entries.empty();
}
inline void get_entries(Entries *entries) {
std::lock_guard locker{m_lock};
*entries = m_entries;
}
inline void get_invalid_ranges(InvalidRanges *invalid_ranges) {
std::lock_guard locker{m_lock};
*invalid_ranges = m_invalid_ranges;
}
inline bool refetch_required() const {
return (get_refetch_state() != REFETCH_STATE_NONE);
}
inline RefetchState get_refetch_state() const {
return m_refetch_state;
}
inline void set_refetch_state(RefetchState refetch_state) {
m_refetch_state = refetch_state;
}
inline void set_max_fetch_bytes(uint64_t max_fetch_bytes) {
std::lock_guard locker{m_lock};
m_max_fetch_bytes = max_fetch_bytes;
}
private:
FRIEND_MAKE_REF(ObjectPlayer);
ObjectPlayer(librados::IoCtx &ioctx, const std::string& object_oid_prefix,
uint64_t object_num, SafeTimer &timer, ceph::mutex &timer_lock,
uint8_t order, uint64_t max_fetch_bytes);
~ObjectPlayer() override;
typedef std::pair<uint64_t, uint64_t> EntryKey;
typedef boost::unordered_map<EntryKey, Entries::iterator> EntryKeys;
struct C_Fetch : public Context {
ceph::ref_t<ObjectPlayer> object_player;
Context *on_finish;
bufferlist read_bl;
C_Fetch(ObjectPlayer *o, Context *ctx) : object_player(o), on_finish(ctx) {
}
void finish(int r) override;
};
struct C_WatchFetch : public Context {
ceph::ref_t<ObjectPlayer> object_player;
C_WatchFetch(ObjectPlayer *o) : object_player(o) {
}
void finish(int r) override;
};
librados::IoCtx m_ioctx;
uint64_t m_object_num;
std::string m_oid;
CephContext *m_cct = nullptr;
SafeTimer &m_timer;
ceph::mutex &m_timer_lock;
uint8_t m_order;
uint64_t m_max_fetch_bytes;
double m_watch_interval = 0;
Context *m_watch_task = nullptr;
mutable ceph::mutex m_lock;
bool m_fetch_in_progress = false;
bufferlist m_read_bl;
uint32_t m_read_off = 0;
uint32_t m_read_bl_off = 0;
Entries m_entries;
EntryKeys m_entry_keys;
InvalidRanges m_invalid_ranges;
Context *m_watch_ctx = nullptr;
bool m_unwatched = false;
RefetchState m_refetch_state = REFETCH_STATE_IMMEDIATE;
int handle_fetch_complete(int r, const bufferlist &bl, bool *refetch);
void clear_invalid_range(uint32_t off, uint32_t len);
void schedule_watch();
bool cancel_watch();
void handle_watch_task();
void handle_watch_fetched(int r);
};
} // namespace journal
#endif // CEPH_JOURNAL_OBJECT_PLAYER_H
| 3,672 | 24.866197 | 79 | h |
null | ceph-main/src/journal/ObjectRecorder.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "journal/ObjectRecorder.h"
#include "journal/Future.h"
#include "journal/Utils.h"
#include "include/ceph_assert.h"
#include "common/Timer.h"
#include "common/errno.h"
#include "cls/journal/cls_journal_client.h"
#define dout_subsys ceph_subsys_journaler
#undef dout_prefix
#define dout_prefix *_dout << "ObjectRecorder: " << this << " " \
<< __func__ << " (" << m_oid << "): "
using namespace cls::journal;
using std::shared_ptr;
namespace journal {
ObjectRecorder::ObjectRecorder(librados::IoCtx &ioctx, std::string_view oid,
uint64_t object_number, ceph::mutex* lock,
ContextWQ *work_queue, Handler *handler,
uint8_t order, int32_t max_in_flight_appends)
: m_oid(oid), m_object_number(object_number),
m_op_work_queue(work_queue), m_handler(handler),
m_order(order), m_soft_max_size(1 << m_order),
m_max_in_flight_appends(max_in_flight_appends),
m_lock(lock)
{
m_ioctx.dup(ioctx);
m_cct = reinterpret_cast<CephContext*>(m_ioctx.cct());
ceph_assert(m_handler != NULL);
librados::Rados rados(m_ioctx);
int8_t require_osd_release = 0;
int r = rados.get_min_compatible_osd(&require_osd_release);
if (r < 0) {
ldout(m_cct, 0) << "failed to retrieve min OSD release: "
<< cpp_strerror(r) << dendl;
}
m_compat_mode = require_osd_release < CEPH_RELEASE_OCTOPUS;
ldout(m_cct, 20) << dendl;
}
ObjectRecorder::~ObjectRecorder() {
ldout(m_cct, 20) << dendl;
ceph_assert(m_pending_buffers.empty());
ceph_assert(m_in_flight_tids.empty());
ceph_assert(m_in_flight_appends.empty());
}
void ObjectRecorder::set_append_batch_options(int flush_interval,
uint64_t flush_bytes,
double flush_age) {
ldout(m_cct, 5) << "flush_interval=" << flush_interval << ", "
<< "flush_bytes=" << flush_bytes << ", "
<< "flush_age=" << flush_age << dendl;
ceph_assert(ceph_mutex_is_locked(*m_lock));
m_flush_interval = flush_interval;
m_flush_bytes = flush_bytes;
m_flush_age = flush_age;
}
bool ObjectRecorder::append(AppendBuffers &&append_buffers) {
ldout(m_cct, 20) << "count=" << append_buffers.size() << dendl;
ceph_assert(ceph_mutex_is_locked(*m_lock));
ceph::ref_t<FutureImpl> last_flushed_future;
auto flush_handler = get_flush_handler();
for (auto& append_buffer : append_buffers) {
ldout(m_cct, 20) << *append_buffer.first << ", "
<< "size=" << append_buffer.second.length() << dendl;
bool flush_requested = append_buffer.first->attach(flush_handler);
if (flush_requested) {
last_flushed_future = append_buffer.first;
}
m_pending_buffers.push_back(append_buffer);
m_pending_bytes += append_buffer.second.length();
}
return send_appends(!!last_flushed_future, last_flushed_future);
}
void ObjectRecorder::flush(Context *on_safe) {
ldout(m_cct, 20) << dendl;
Future future;
{
std::unique_lock locker{*m_lock};
// if currently handling flush notifications, wait so that
// we notify in the correct order (since lock is dropped on
// callback)
while (m_in_flight_callbacks > 0) {
m_in_flight_callbacks_cond.wait(locker);
}
// attach the flush to the most recent append
if (!m_pending_buffers.empty()) {
future = Future(m_pending_buffers.rbegin()->first);
} else if (!m_in_flight_appends.empty()) {
AppendBuffers &append_buffers = m_in_flight_appends.rbegin()->second;
ceph_assert(!append_buffers.empty());
future = Future(append_buffers.rbegin()->first);
}
}
if (future.is_valid()) {
// cannot be invoked while the same lock context
m_op_work_queue->queue(new LambdaContext(
[future, on_safe] (int r) mutable {
future.flush(on_safe);
}));
} else {
on_safe->complete(0);
}
}
void ObjectRecorder::flush(const ceph::ref_t<FutureImpl>& future) {
ldout(m_cct, 20) << "flushing " << *future << dendl;
std::unique_lock locker{*m_lock};
auto flush_handler = future->get_flush_handler();
auto my_handler = get_flush_handler();
if (flush_handler != my_handler) {
// if we don't own this future, re-issue the flush so that it hits the
// correct journal object owner
future->flush();
return;
} else if (future->is_flush_in_progress()) {
return;
}
if (!m_object_closed && !m_overflowed && send_appends(true, future)) {
++m_in_flight_callbacks;
notify_handler_unlock(locker, true);
}
}
void ObjectRecorder::claim_append_buffers(AppendBuffers *append_buffers) {
ldout(m_cct, 20) << dendl;
ceph_assert(ceph_mutex_is_locked(*m_lock));
ceph_assert(m_in_flight_tids.empty());
ceph_assert(m_in_flight_appends.empty());
ceph_assert(m_object_closed || m_overflowed);
for (auto& append_buffer : m_pending_buffers) {
ldout(m_cct, 20) << "detached " << *append_buffer.first << dendl;
append_buffer.first->detach();
}
append_buffers->splice(append_buffers->end(), m_pending_buffers,
m_pending_buffers.begin(), m_pending_buffers.end());
}
bool ObjectRecorder::close() {
ceph_assert(ceph_mutex_is_locked(*m_lock));
ldout(m_cct, 20) << dendl;
send_appends(true, {});
ceph_assert(!m_object_closed);
m_object_closed = true;
if (!m_in_flight_tids.empty() || m_in_flight_callbacks > 0) {
m_object_closed_notify = true;
return false;
}
return true;
}
void ObjectRecorder::handle_append_flushed(uint64_t tid, int r) {
ldout(m_cct, 20) << "tid=" << tid << ", r=" << r << dendl;
std::unique_lock locker{*m_lock};
++m_in_flight_callbacks;
auto tid_iter = m_in_flight_tids.find(tid);
ceph_assert(tid_iter != m_in_flight_tids.end());
m_in_flight_tids.erase(tid_iter);
InFlightAppends::iterator iter = m_in_flight_appends.find(tid);
ceph_assert(iter != m_in_flight_appends.end());
bool notify_overflowed = false;
AppendBuffers append_buffers;
if (r == -EOVERFLOW) {
ldout(m_cct, 10) << "append overflowed: "
<< "idle=" << m_in_flight_tids.empty() << ", "
<< "previous_overflow=" << m_overflowed << dendl;
if (m_in_flight_tids.empty()) {
append_overflowed();
}
if (!m_object_closed && !m_overflowed) {
notify_overflowed = true;
}
m_overflowed = true;
} else {
append_buffers.swap(iter->second);
ceph_assert(!append_buffers.empty());
for (auto& append_buffer : append_buffers) {
auto length = append_buffer.second.length();
m_object_bytes += length;
ceph_assert(m_in_flight_bytes >= length);
m_in_flight_bytes -= length;
}
ldout(m_cct, 20) << "object_bytes=" << m_object_bytes << dendl;
m_in_flight_appends.erase(iter);
}
locker.unlock();
// Flag the associated futures as complete.
for (auto& append_buffer : append_buffers) {
ldout(m_cct, 20) << *append_buffer.first << " marked safe" << dendl;
append_buffer.first->safe(r);
}
// attempt to kick off more appends to the object
locker.lock();
if (!m_object_closed && !m_overflowed && send_appends(false, {})) {
notify_overflowed = true;
}
ldout(m_cct, 20) << "pending tids=" << m_in_flight_tids << dendl;
// notify of overflow if one just occurred or indicate that all in-flight
// appends have completed on a closed object (or wake up stalled flush
// requests that was waiting for this strand to complete).
notify_handler_unlock(locker, notify_overflowed);
}
void ObjectRecorder::append_overflowed() {
ldout(m_cct, 10) << dendl;
ceph_assert(ceph_mutex_is_locked(*m_lock));
ceph_assert(!m_in_flight_appends.empty());
InFlightAppends in_flight_appends;
in_flight_appends.swap(m_in_flight_appends);
AppendBuffers restart_append_buffers;
for (InFlightAppends::iterator it = in_flight_appends.begin();
it != in_flight_appends.end(); ++it) {
restart_append_buffers.insert(restart_append_buffers.end(),
it->second.begin(), it->second.end());
}
restart_append_buffers.splice(restart_append_buffers.end(),
m_pending_buffers,
m_pending_buffers.begin(),
m_pending_buffers.end());
restart_append_buffers.swap(m_pending_buffers);
}
bool ObjectRecorder::send_appends(bool force, ceph::ref_t<FutureImpl> flush_future) {
ldout(m_cct, 20) << dendl;
ceph_assert(ceph_mutex_is_locked(*m_lock));
if (m_object_closed || m_overflowed) {
ldout(m_cct, 20) << "already closed or overflowed" << dendl;
return false;
}
if (m_pending_buffers.empty()) {
ldout(m_cct, 20) << "append buffers empty" << dendl;
return false;
}
if (!force &&
((m_flush_interval > 0 && m_pending_buffers.size() >= m_flush_interval) ||
(m_flush_bytes > 0 && m_pending_bytes >= m_flush_bytes) ||
(m_flush_age > 0 && !m_last_flush_time.is_zero() &&
m_last_flush_time + m_flush_age <= ceph_clock_now()))) {
ldout(m_cct, 20) << "forcing batch flush" << dendl;
force = true;
}
// start tracking flush time after the first append event
if (m_last_flush_time.is_zero()) {
m_last_flush_time = ceph_clock_now();
}
auto max_in_flight_appends = m_max_in_flight_appends;
if (m_flush_interval > 0 || m_flush_bytes > 0 || m_flush_age > 0) {
if (!force && max_in_flight_appends == 0) {
ldout(m_cct, 20) << "attempting to batch AIO appends" << dendl;
max_in_flight_appends = 1;
}
} else if (max_in_flight_appends < 0) {
max_in_flight_appends = 0;
}
if (!force && max_in_flight_appends != 0 &&
static_cast<int32_t>(m_in_flight_tids.size()) >= max_in_flight_appends) {
ldout(m_cct, 10) << "max in flight appends reached" << dendl;
return false;
}
librados::ObjectWriteOperation op;
if (m_compat_mode) {
client::guard_append(&op, m_soft_max_size);
}
size_t append_bytes = 0;
AppendBuffers append_buffers;
bufferlist append_bl;
for (auto it = m_pending_buffers.begin(); it != m_pending_buffers.end(); ) {
auto& future = it->first;
auto& bl = it->second;
auto size = m_object_bytes + m_in_flight_bytes + append_bytes + bl.length();
if (size == m_soft_max_size) {
ldout(m_cct, 10) << "object at capacity (" << size << ") " << *future << dendl;
m_overflowed = true;
} else if (size > m_soft_max_size) {
ldout(m_cct, 10) << "object beyond capacity (" << size << ") " << *future << dendl;
m_overflowed = true;
break;
}
bool flush_break = (force && flush_future && flush_future == future);
ldout(m_cct, 20) << "flushing " << *future << dendl;
future->set_flush_in_progress();
if (m_compat_mode) {
op.append(bl);
op.set_op_flags2(CEPH_OSD_OP_FLAG_FADVISE_DONTNEED);
} else {
append_bl.append(bl);
}
append_bytes += bl.length();
append_buffers.push_back(*it);
it = m_pending_buffers.erase(it);
if (flush_break) {
ldout(m_cct, 20) << "stopping at requested flush future" << dendl;
break;
}
}
if (append_bytes > 0) {
m_last_flush_time = ceph_clock_now();
uint64_t append_tid = m_append_tid++;
m_in_flight_tids.insert(append_tid);
m_in_flight_appends[append_tid].swap(append_buffers);
m_in_flight_bytes += append_bytes;
ceph_assert(m_pending_bytes >= append_bytes);
m_pending_bytes -= append_bytes;
if (!m_compat_mode) {
client::append(&op, m_soft_max_size, append_bl);
}
auto rados_completion = librados::Rados::aio_create_completion(
new C_AppendFlush(this, append_tid), utils::rados_ctx_callback);
int r = m_ioctx.aio_operate(m_oid, rados_completion, &op);
ceph_assert(r == 0);
rados_completion->release();
ldout(m_cct, 20) << "flushing journal tid=" << append_tid << ", "
<< "append_bytes=" << append_bytes << ", "
<< "in_flight_bytes=" << m_in_flight_bytes << ", "
<< "pending_bytes=" << m_pending_bytes << dendl;
}
return m_overflowed;
}
void ObjectRecorder::wake_up_flushes() {
ceph_assert(ceph_mutex_is_locked(*m_lock));
--m_in_flight_callbacks;
if (m_in_flight_callbacks == 0) {
m_in_flight_callbacks_cond.notify_all();
}
}
void ObjectRecorder::notify_handler_unlock(
std::unique_lock<ceph::mutex>& locker, bool notify_overflowed) {
ceph_assert(ceph_mutex_is_locked(*m_lock));
ceph_assert(m_in_flight_callbacks > 0);
if (!m_object_closed && notify_overflowed) {
// TODO need to delay completion until after aio_notify completes
ldout(m_cct, 10) << "overflow" << dendl;
ceph_assert(m_overflowed);
locker.unlock();
m_handler->overflow(this);
locker.lock();
}
// wake up blocked flush requests
wake_up_flushes();
// An overflow notification might have blocked a close. A close
// notification could lead to the immediate destruction of this object
// so the object shouldn't be referenced anymore
bool object_closed_notify = false;
if (m_in_flight_tids.empty()) {
std::swap(object_closed_notify, m_object_closed_notify);
}
ceph_assert(m_object_closed || !object_closed_notify);
locker.unlock();
if (object_closed_notify) {
ldout(m_cct, 10) << "closed" << dendl;
m_handler->closed(this);
}
}
} // namespace journal
| 13,578 | 30.950588 | 89 | cc |
null | ceph-main/src/journal/ObjectRecorder.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_JOURNAL_OBJECT_RECORDER_H
#define CEPH_JOURNAL_OBJECT_RECORDER_H
#include "include/utime.h"
#include "include/Context.h"
#include "include/rados/librados.hpp"
#include "common/ceph_mutex.h"
#include "common/RefCountedObj.h"
#include "common/WorkQueue.h"
#include "common/Timer.h"
#include "journal/FutureImpl.h"
#include <list>
#include <map>
#include <set>
#include <boost/noncopyable.hpp>
#include "include/ceph_assert.h"
namespace journal {
class ObjectRecorder;
typedef std::pair<ceph::ref_t<FutureImpl>, bufferlist> AppendBuffer;
typedef std::list<AppendBuffer> AppendBuffers;
class ObjectRecorder : public RefCountedObject, boost::noncopyable {
public:
struct Handler {
virtual ~Handler() {
}
virtual void closed(ObjectRecorder *object_recorder) = 0;
virtual void overflow(ObjectRecorder *object_recorder) = 0;
};
void set_append_batch_options(int flush_interval, uint64_t flush_bytes,
double flush_age);
inline uint64_t get_object_number() const {
return m_object_number;
}
inline const std::string &get_oid() const {
return m_oid;
}
bool append(AppendBuffers &&append_buffers);
void flush(Context *on_safe);
void flush(const ceph::ref_t<FutureImpl> &future);
void claim_append_buffers(AppendBuffers *append_buffers);
bool is_closed() const {
ceph_assert(ceph_mutex_is_locked(*m_lock));
return (m_object_closed && m_in_flight_appends.empty());
}
bool close();
inline CephContext *cct() const {
return m_cct;
}
inline size_t get_pending_appends() const {
std::lock_guard locker{*m_lock};
return m_pending_buffers.size();
}
private:
FRIEND_MAKE_REF(ObjectRecorder);
ObjectRecorder(librados::IoCtx &ioctx, std::string_view oid,
uint64_t object_number, ceph::mutex* lock,
ContextWQ *work_queue, Handler *handler, uint8_t order,
int32_t max_in_flight_appends);
~ObjectRecorder() override;
typedef std::set<uint64_t> InFlightTids;
typedef std::map<uint64_t, AppendBuffers> InFlightAppends;
struct FlushHandler : public FutureImpl::FlushHandler {
ceph::ref_t<ObjectRecorder> object_recorder;
virtual void flush(const ceph::ref_t<FutureImpl> &future) override {
object_recorder->flush(future);
}
FlushHandler(ceph::ref_t<ObjectRecorder> o) : object_recorder(std::move(o)) {}
};
struct C_AppendFlush : public Context {
ceph::ref_t<ObjectRecorder> object_recorder;
uint64_t tid;
C_AppendFlush(ceph::ref_t<ObjectRecorder> o, uint64_t _tid)
: object_recorder(std::move(o)), tid(_tid) {
}
void finish(int r) override {
object_recorder->handle_append_flushed(tid, r);
}
};
librados::IoCtx m_ioctx;
std::string m_oid;
uint64_t m_object_number;
CephContext *m_cct = nullptr;
ContextWQ *m_op_work_queue;
Handler *m_handler;
uint8_t m_order;
uint64_t m_soft_max_size;
uint32_t m_flush_interval = 0;
uint64_t m_flush_bytes = 0;
double m_flush_age = 0;
int32_t m_max_in_flight_appends;
bool m_compat_mode;
/* So that ObjectRecorder::FlushHandler doesn't create a circular reference: */
std::weak_ptr<FlushHandler> m_flush_handler;
auto get_flush_handler() {
auto h = m_flush_handler.lock();
if (!h) {
h = std::make_shared<FlushHandler>(this);
m_flush_handler = h;
}
return h;
}
mutable ceph::mutex* m_lock;
AppendBuffers m_pending_buffers;
uint64_t m_pending_bytes = 0;
utime_t m_last_flush_time;
uint64_t m_append_tid = 0;
InFlightTids m_in_flight_tids;
InFlightAppends m_in_flight_appends;
uint64_t m_object_bytes = 0;
bool m_overflowed = false;
bool m_object_closed = false;
bool m_object_closed_notify = false;
bufferlist m_prefetch_bl;
uint32_t m_in_flight_callbacks = 0;
ceph::condition_variable m_in_flight_callbacks_cond;
uint64_t m_in_flight_bytes = 0;
bool send_appends(bool force, ceph::ref_t<FutureImpl> flush_sentinel);
void handle_append_flushed(uint64_t tid, int r);
void append_overflowed();
void wake_up_flushes();
void notify_handler_unlock(std::unique_lock<ceph::mutex>& locker,
bool notify_overflowed);
};
} // namespace journal
#endif // CEPH_JOURNAL_OBJECT_RECORDER_H
| 4,394 | 26.298137 | 82 | h |
null | ceph-main/src/journal/ReplayEntry.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_JOURNAL_REPLAY_ENTRY_H
#define CEPH_JOURNAL_REPLAY_ENTRY_H
#include "include/int_types.h"
#include "include/buffer.h"
namespace journal {
class ReplayEntry {
public:
ReplayEntry() : m_commit_tid(0) {
}
ReplayEntry(const bufferlist &data, uint64_t commit_tid)
: m_data(data), m_commit_tid(commit_tid) {
}
inline const bufferlist &get_data() const {
return m_data;
}
inline uint64_t get_commit_tid() const {
return m_commit_tid;
}
private:
bufferlist m_data;
uint64_t m_commit_tid;
};
} // namespace journal
#endif // CEPH_JOURNAL_REPLAY_ENTRY_H
| 695 | 18.885714 | 70 | h |
null | ceph-main/src/journal/ReplayHandler.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_JOURNAL_REPLAY_HANDLER_H
#define CEPH_JOURNAL_REPLAY_HANDLER_H
namespace journal {
struct ReplayHandler {
virtual void handle_entries_available() = 0;
virtual void handle_complete(int r) = 0;
virtual ~ReplayHandler() {}
};
} // namespace journal
#endif // CEPH_JOURNAL_REPLAY_HANDLER_H
| 408 | 21.722222 | 70 | h |
null | ceph-main/src/journal/Settings.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_JOURNAL_SETTINGS_H
#define CEPH_JOURNAL_SETTINGS_H
#include "include/int_types.h"
namespace journal {
struct Settings {
double commit_interval = 5; ///< commit position throttle (in secs)
uint64_t max_payload_bytes = 0; ///< 0 implies object size limit
int max_concurrent_object_sets = 0; ///< 0 implies no limit
std::set<std::string> ignored_laggy_clients;
///< clients that mustn't be disconnected
};
} // namespace journal
#endif // # CEPH_JOURNAL_SETTINGS_H
| 637 | 28 | 79 | h |
null | ceph-main/src/journal/Types.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_JOURNAL_TYPES_H
#define CEPH_JOURNAL_TYPES_H
namespace journal {
struct CacheRebalanceHandler {
virtual ~CacheRebalanceHandler() {
}
virtual void handle_cache_rebalanced(uint64_t new_cache_bytes) = 0;
};
struct CacheManagerHandler {
virtual ~CacheManagerHandler() {
}
virtual void register_cache(const std::string &cache_name,
uint64_t min_size, uint64_t max_size,
CacheRebalanceHandler* handler) = 0;
virtual void unregister_cache(const std::string &cache_name) = 0;
};
} // namespace journal
#endif // # CEPH_JOURNAL_TYPES_H
| 720 | 23.862069 | 70 | h |
null | ceph-main/src/journal/Utils.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "journal/Utils.h"
#include "include/Context.h"
#include "include/stringify.h"
namespace journal {
namespace utils {
std::string get_object_name(const std::string &prefix, uint64_t number) {
return prefix + stringify(number);
}
std::string unique_lock_name(const std::string &name, void *address) {
return name + " (" + stringify(address) + ")";
}
void rados_ctx_callback(rados_completion_t c, void *arg) {
Context *comp = reinterpret_cast<Context *>(arg);
comp->complete(rados_aio_get_return_value(c));
}
} // namespace utils
} // namespace journal
| 670 | 24.807692 | 73 | cc |
null | ceph-main/src/journal/Utils.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_JOURNAL_UTILS_H
#define CEPH_JOURNAL_UTILS_H
#include "include/int_types.h"
#include "include/Context.h"
#include "include/rados/librados.hpp"
#include <string>
namespace journal {
namespace utils {
namespace detail {
template <typename M>
struct C_AsyncCallback : public Context {
M journal_metadata;
Context *on_finish;
C_AsyncCallback(M journal_metadata, Context *on_finish)
: journal_metadata(journal_metadata), on_finish(on_finish) {
}
void finish(int r) override {
journal_metadata->queue(on_finish, r);
}
};
} // namespace detail
template <typename T, void(T::*MF)(int)>
void rados_state_callback(rados_completion_t c, void *arg) {
T *obj = reinterpret_cast<T*>(arg);
int r = rados_aio_get_return_value(c);
(obj->*MF)(r);
}
std::string get_object_name(const std::string &prefix, uint64_t number);
std::string unique_lock_name(const std::string &name, void *address);
void rados_ctx_callback(rados_completion_t c, void *arg);
template <typename M>
Context *create_async_context_callback(M journal_metadata, Context *on_finish) {
// use async callback to acquire a clean lock context
return new detail::C_AsyncCallback<M>(journal_metadata, on_finish);
}
} // namespace utils
} // namespace journal
#endif // CEPH_JOURNAL_UTILS_H
| 1,389 | 24.272727 | 80 | h |
null | ceph-main/src/json_spirit/json_spirit.h | #ifndef JSON_SPIRIT
#define JSON_SPIRIT
// Copyright John W. Wilkinson 2007 - 2011
// Distributed under the MIT License, see accompanying file LICENSE.txt
// json spirit version 4.05
#if defined(_MSC_VER) && (_MSC_VER >= 1020)
# pragma once
#endif
#include "json_spirit_value.h"
#include "json_spirit_reader.h"
#include "json_spirit_writer.h"
#include "json_spirit_utils.h"
#endif
| 395 | 19.842105 | 71 | h |
null | ceph-main/src/json_spirit/json_spirit_error_position.h | #ifndef JSON_SPIRIT_ERROR_POSITION
#define JSON_SPIRIT_ERROR_POSITION
// Copyright John W. Wilkinson 2007 - 2011
// Distributed under the MIT License, see accompanying file LICENSE.txt
// json spirit version 4.05
#if defined(_MSC_VER) && (_MSC_VER >= 1020)
# pragma once
#endif
#include <string>
namespace json_spirit
{
// An Error_position exception is thrown by the "read_or_throw" functions below on finding an error.
// Note the "read_or_throw" functions are around 3 times slower than the standard functions "read"
// functions that return a bool.
//
struct Error_position
{
Error_position();
Error_position( unsigned int line, unsigned int column, const std::string& reason );
bool operator==( const Error_position& lhs ) const;
unsigned int line_;
unsigned int column_;
std::string reason_;
};
inline Error_position::Error_position()
: line_( 0 )
, column_( 0 )
{
}
inline Error_position::Error_position( unsigned int line, unsigned int column, const std::string& reason )
: line_( line )
, column_( column )
, reason_( reason )
{
}
inline bool Error_position::operator==( const Error_position& lhs ) const
{
if( this == &lhs ) return true;
return ( reason_ == lhs.reason_ ) &&
( line_ == lhs.line_ ) &&
( column_ == lhs.column_ );
}
}
#endif
| 1,461 | 25.581818 | 110 | h |
null | ceph-main/src/json_spirit/json_spirit_reader.cpp | // Copyright John W. Wilkinson 2007 - 2011
// Distributed under the MIT License, see accompanying file LICENSE.txt
// json spirit version 4.05
#include "json_spirit_reader.h"
#include "json_spirit_reader_template.h"
using namespace json_spirit;
#ifdef JSON_SPIRIT_VALUE_ENABLED
bool json_spirit::read( const std::string& s, Value& value )
{
return read_string( s, value );
}
void json_spirit::read_or_throw( const std::string& s, Value& value )
{
read_string_or_throw( s, value );
}
bool json_spirit::read( std::istream& is, Value& value )
{
return read_stream( is, value );
}
void json_spirit::read_or_throw( std::istream& is, Value& value )
{
read_stream_or_throw( is, value );
}
bool json_spirit::read( std::string::const_iterator& begin, std::string::const_iterator end, Value& value )
{
return read_range( begin, end, value );
}
void json_spirit::read_or_throw( std::string::const_iterator& begin, std::string::const_iterator end, Value& value )
{
begin = read_range_or_throw( begin, end, value );
}
#endif
#if defined( JSON_SPIRIT_WVALUE_ENABLED ) && !defined( BOOST_NO_STD_WSTRING )
bool json_spirit::read( const std::wstring& s, wValue& value )
{
return read_string( s, value );
}
void json_spirit::read_or_throw( const std::wstring& s, wValue& value )
{
read_string_or_throw( s, value );
}
bool json_spirit::read( std::wistream& is, wValue& value )
{
return read_stream( is, value );
}
void json_spirit::read_or_throw( std::wistream& is, wValue& value )
{
read_stream_or_throw( is, value );
}
bool json_spirit::read( std::wstring::const_iterator& begin, std::wstring::const_iterator end, wValue& value )
{
return read_range( begin, end, value );
}
void json_spirit::read_or_throw( std::wstring::const_iterator& begin, std::wstring::const_iterator end, wValue& value )
{
begin = read_range_or_throw( begin, end, value );
}
#endif
#ifdef JSON_SPIRIT_MVALUE_ENABLED
bool json_spirit::read( const std::string& s, mValue& value )
{
return read_string( s, value );
}
void json_spirit::read_or_throw( const std::string& s, mValue& value )
{
read_string_or_throw( s, value );
}
bool json_spirit::read( std::istream& is, mValue& value )
{
return read_stream( is, value );
}
void json_spirit::read_or_throw( std::istream& is, mValue& value )
{
read_stream_or_throw( is, value );
}
bool json_spirit::read( std::string::const_iterator& begin, std::string::const_iterator end, mValue& value )
{
return read_range( begin, end, value );
}
void json_spirit::read_or_throw( std::string::const_iterator& begin, std::string::const_iterator end, mValue& value )
{
begin = read_range_or_throw( begin, end, value );
}
#endif
#if defined( JSON_SPIRIT_WMVALUE_ENABLED ) && !defined( BOOST_NO_STD_WSTRING )
bool json_spirit::read( const std::wstring& s, wmValue& value )
{
return read_string( s, value );
}
void json_spirit::read_or_throw( const std::wstring& s, wmValue& value )
{
read_string_or_throw( s, value );
}
bool json_spirit::read( std::wistream& is, wmValue& value )
{
return read_stream( is, value );
}
void json_spirit::read_or_throw( std::wistream& is, wmValue& value )
{
read_stream_or_throw( is, value );
}
bool json_spirit::read( std::wstring::const_iterator& begin, std::wstring::const_iterator end, wmValue& value )
{
return read_range( begin, end, value );
}
void json_spirit::read_or_throw( std::wstring::const_iterator& begin, std::wstring::const_iterator end, wmValue& value )
{
begin = read_range_or_throw( begin, end, value );
}
#endif
| 3,969 | 27.768116 | 124 | cpp |
null | ceph-main/src/json_spirit/json_spirit_reader.h | #ifndef JSON_SPIRIT_READER
#define JSON_SPIRIT_READER
// Copyright John W. Wilkinson 2007 - 2011
// Distributed under the MIT License, see accompanying file LICENSE.txt
// json spirit version 4.05
#if defined(_MSC_VER) && (_MSC_VER >= 1020)
# pragma once
#endif
#include "json_spirit_value.h"
#include "json_spirit_error_position.h"
#include <iostream>
namespace json_spirit
{
// functions to reads a JSON values
#ifdef JSON_SPIRIT_VALUE_ENABLED
bool read( const std::string& s, Value& value );
bool read( std::istream& is, Value& value );
bool read( std::string::const_iterator& begin, std::string::const_iterator end, Value& value );
void read_or_throw( const std::string& s, Value& value );
void read_or_throw( std::istream& is, Value& value );
void read_or_throw( std::string::const_iterator& begin, std::string::const_iterator end, Value& value );
#endif
#if defined( JSON_SPIRIT_WVALUE_ENABLED ) && !defined( BOOST_NO_STD_WSTRING )
bool read( const std::wstring& s, wValue& value );
bool read( std::wistream& is, wValue& value );
bool read( std::wstring::const_iterator& begin, std::wstring::const_iterator end, wValue& value );
void read_or_throw( const std::wstring& s, wValue& value );
void read_or_throw( std::wistream& is, wValue& value );
void read_or_throw( std::wstring::const_iterator& begin, std::wstring::const_iterator end, wValue& value );
#endif
#ifdef JSON_SPIRIT_MVALUE_ENABLED
bool read( const std::string& s, mValue& value );
bool read( std::istream& is, mValue& value );
bool read( std::string::const_iterator& begin, std::string::const_iterator end, mValue& value );
void read_or_throw( const std::string& s, mValue& value );
void read_or_throw( std::istream& is, mValue& value );
void read_or_throw( std::string::const_iterator& begin, std::string::const_iterator end, mValue& value );
#endif
#if defined( JSON_SPIRIT_WMVALUE_ENABLED ) && !defined( BOOST_NO_STD_WSTRING )
bool read( const std::wstring& s, wmValue& value );
bool read( std::wistream& is, wmValue& value );
bool read( std::wstring::const_iterator& begin, std::wstring::const_iterator end, wmValue& value );
void read_or_throw( const std::wstring& s, wmValue& value );
void read_or_throw( std::wistream& is, wmValue& value );
void read_or_throw( std::wstring::const_iterator& begin, std::wstring::const_iterator end, wmValue& value );
#endif
}
#endif
| 2,509 | 38.84127 | 112 | h |
null | ceph-main/src/json_spirit/json_spirit_reader_template.h | #ifndef JSON_SPIRIT_READER_TEMPLATE
#define JSON_SPIRIT_READER_TEMPLATE
// Copyright John W. Wilkinson 2007 - 2011
// Distributed under the MIT License, see accompanying file LICENSE.txt
// json spirit version 4.05
#if defined(_MSC_VER) && (_MSC_VER >= 1020)
# pragma once
#endif
#include "json_spirit_value.h"
#include "json_spirit_error_position.h"
#include "common/utf8.h"
#define BOOST_SPIRIT_THREADSAFE // uncomment for multithreaded use, requires linking to boost.thread
#include <boost/bind/bind.hpp>
#include <boost/function.hpp>
#include <boost/version.hpp>
#include <boost/spirit/include/classic_core.hpp>
#include <boost/spirit/include/classic_confix.hpp>
#include <boost/spirit/include/classic_escape_char.hpp>
#include <boost/spirit/include/classic_multi_pass.hpp>
#include <boost/spirit/include/classic_position_iterator.hpp>
#include "include/ceph_assert.h"
namespace json_spirit
{
namespace spirit_namespace = boost::spirit::classic;
const spirit_namespace::int_parser < boost::int64_t > int64_p = spirit_namespace::int_parser < boost::int64_t >();
const spirit_namespace::uint_parser< boost::uint64_t > uint64_p = spirit_namespace::uint_parser< boost::uint64_t >();
template< class Iter_type >
bool is_eq( Iter_type first, Iter_type last, const char* c_str )
{
for( Iter_type i = first; i != last; ++i, ++c_str )
{
if( *c_str == 0 ) return false;
if( *i != *c_str ) return false;
}
return true;
}
template< class Char_type >
Char_type hex_to_num( const Char_type c )
{
if( ( c >= '0' ) && ( c <= '9' ) ) return c - '0';
if( ( c >= 'a' ) && ( c <= 'f' ) ) return c - 'a' + 10;
if( ( c >= 'A' ) && ( c <= 'F' ) ) return c - 'A' + 10;
return 0;
}
template< class Char_type, class Iter_type >
Char_type hex_str_to_char( Iter_type& begin )
{
const Char_type c1( *( ++begin ) );
const Char_type c2( *( ++begin ) );
return ( hex_to_num( c1 ) << 4 ) + hex_to_num( c2 );
}
template< class String_type, class Iter_type >
String_type unicode_str_to_utf8( Iter_type& begin );
template<>
std::string unicode_str_to_utf8( std::string::const_iterator & begin )
{
typedef std::string::value_type Char_type;
const Char_type c1( *( ++begin ) );
const Char_type c2( *( ++begin ) );
const Char_type c3( *( ++begin ) );
const Char_type c4( *( ++begin ) );
unsigned long uc = ( hex_to_num( c1 ) << 12 ) +
( hex_to_num( c2 ) << 8 ) +
( hex_to_num( c3 ) << 4 ) +
hex_to_num( c4 );
unsigned char buf[7]; // MAX_UTF8_SZ is 6 (see src/common/utf8.c)
int r = encode_utf8(uc, buf);
if (r >= 0) {
return std::string(reinterpret_cast<char *>(buf), r);
}
return std::string("_");
}
template< class String_type >
void append_esc_char_and_incr_iter( String_type& s,
typename String_type::const_iterator& begin,
typename String_type::const_iterator end )
{
typedef typename String_type::value_type Char_type;
const Char_type c2( *begin );
switch( c2 )
{
case 't': s += '\t'; break;
case 'b': s += '\b'; break;
case 'f': s += '\f'; break;
case 'n': s += '\n'; break;
case 'r': s += '\r'; break;
case '\\': s += '\\'; break;
case '/': s += '/'; break;
case '"': s += '"'; break;
case 'x':
{
if( end - begin >= 3 ) // expecting "xHH..."
{
s += hex_str_to_char< Char_type >( begin );
}
break;
}
case 'u':
{
if( end - begin >= 5 ) // expecting "uHHHH..."
{
s += unicode_str_to_utf8< String_type >( begin );
}
break;
}
}
}
template< class String_type >
String_type substitute_esc_chars( typename String_type::const_iterator begin,
typename String_type::const_iterator end )
{
typedef typename String_type::const_iterator Iter_type;
if( end - begin < 2 ) return String_type( begin, end );
String_type result;
result.reserve( end - begin );
const Iter_type end_minus_1( end - 1 );
Iter_type substr_start = begin;
Iter_type i = begin;
for( ; i < end_minus_1; ++i )
{
if( *i == '\\' )
{
result.append( substr_start, i );
++i; // skip the '\'
append_esc_char_and_incr_iter( result, i, end );
substr_start = i + 1;
}
}
result.append( substr_start, end );
return result;
}
template< class String_type >
String_type get_str_( typename String_type::const_iterator begin,
typename String_type::const_iterator end )
{
ceph_assert( end - begin >= 2 );
typedef typename String_type::const_iterator Iter_type;
Iter_type str_without_quotes( ++begin );
Iter_type end_without_quotes( --end );
return substitute_esc_chars< String_type >( str_without_quotes, end_without_quotes );
}
inline std::string get_str( std::string::const_iterator begin, std::string::const_iterator end )
{
return get_str_< std::string >( begin, end );
}
// Need this guard else it tries to instantiate unicode_str_to_utf8 with a
// std::wstring, which isn't presently implemented
#if defined( JSON_SPIRIT_WMVALUE_ENABLED ) && !defined( BOOST_NO_STD_WSTRING )
inline std::wstring get_str( std::wstring::const_iterator begin, std::wstring::const_iterator end )
{
return get_str_< std::wstring >( begin, end );
}
#endif
template< class String_type, class Iter_type >
String_type get_str( Iter_type begin, Iter_type end )
{
const String_type tmp( begin, end ); // convert multipass iterators to string iterators
return get_str( tmp.begin(), tmp.end() );
}
using namespace boost::placeholders;
// this class's methods get called by the spirit parse resulting
// in the creation of a JSON object or array
//
// NB Iter_type could be a std::string iterator, wstring iterator, a position iterator or a multipass iterator
//
template< class Value_type, class Iter_type >
class Semantic_actions
{
public:
typedef typename Value_type::Config_type Config_type;
typedef typename Config_type::String_type String_type;
typedef typename Config_type::Object_type Object_type;
typedef typename Config_type::Array_type Array_type;
typedef typename String_type::value_type Char_type;
Semantic_actions( Value_type& value )
: value_( value )
, current_p_( 0 )
{
}
void begin_obj( Char_type c )
{
ceph_assert( c == '{' );
begin_compound< Object_type >();
}
void end_obj( Char_type c )
{
ceph_assert( c == '}' );
end_compound();
}
void begin_array( Char_type c )
{
ceph_assert( c == '[' );
begin_compound< Array_type >();
}
void end_array( Char_type c )
{
ceph_assert( c == ']' );
end_compound();
}
void new_name( Iter_type begin, Iter_type end )
{
ceph_assert( current_p_->type() == obj_type );
name_ = get_str< String_type >( begin, end );
}
void new_str( Iter_type begin, Iter_type end )
{
add_to_current( get_str< String_type >( begin, end ) );
}
void new_true( Iter_type begin, Iter_type end )
{
ceph_assert( is_eq( begin, end, "true" ) );
add_to_current( true );
}
void new_false( Iter_type begin, Iter_type end )
{
ceph_assert( is_eq( begin, end, "false" ) );
add_to_current( false );
}
void new_null( Iter_type begin, Iter_type end )
{
ceph_assert( is_eq( begin, end, "null" ) );
add_to_current( Value_type() );
}
void new_int( boost::int64_t i )
{
add_to_current( i );
}
void new_uint64( boost::uint64_t ui )
{
add_to_current( ui );
}
void new_real( double d )
{
add_to_current( d );
}
private:
Semantic_actions& operator=( const Semantic_actions& );
// to prevent "assignment operator could not be generated" warning
Value_type* add_first( const Value_type& value )
{
ceph_assert( current_p_ == 0 );
value_ = value;
current_p_ = &value_;
return current_p_;
}
template< class Array_or_obj >
void begin_compound()
{
if( current_p_ == 0 )
{
add_first( Array_or_obj() );
}
else
{
stack_.push_back( current_p_ );
Array_or_obj new_array_or_obj; // avoid copy by building new array or object in place
current_p_ = add_to_current( new_array_or_obj );
}
}
void end_compound()
{
if( current_p_ != &value_ )
{
current_p_ = stack_.back();
stack_.pop_back();
}
}
Value_type* add_to_current( const Value_type& value )
{
if( current_p_ == 0 )
{
return add_first( value );
}
else if( current_p_->type() == array_type )
{
current_p_->get_array().push_back( value );
return ¤t_p_->get_array().back();
}
ceph_assert( current_p_->type() == obj_type );
return &Config_type::add( current_p_->get_obj(), name_, value );
}
Value_type& value_; // this is the object or array that is being created
Value_type* current_p_; // the child object or array that is currently being constructed
std::vector< Value_type* > stack_; // previous child objects and arrays
String_type name_; // of current name/value pair
};
template< typename Iter_type >
void throw_error( spirit_namespace::position_iterator< Iter_type > i, const std::string& reason )
{
throw Error_position( i.get_position().line, i.get_position().column, reason );
}
template< typename Iter_type >
void throw_error( Iter_type i, const std::string& reason )
{
throw reason;
}
// the spirit grammar
//
template< class Value_type, class Iter_type >
class Json_grammer : public spirit_namespace::grammar< Json_grammer< Value_type, Iter_type > >
{
public:
typedef Semantic_actions< Value_type, Iter_type > Semantic_actions_t;
Json_grammer( Semantic_actions_t& semantic_actions )
: actions_( semantic_actions )
{
}
static void throw_not_value( Iter_type begin, Iter_type end )
{
throw_error( begin, "not a value" );
}
static void throw_not_array( Iter_type begin, Iter_type end )
{
throw_error( begin, "not an array" );
}
static void throw_not_object( Iter_type begin, Iter_type end )
{
throw_error( begin, "not an object" );
}
static void throw_not_pair( Iter_type begin, Iter_type end )
{
throw_error( begin, "not a pair" );
}
static void throw_not_colon( Iter_type begin, Iter_type end )
{
throw_error( begin, "no colon in pair" );
}
static void throw_not_string( Iter_type begin, Iter_type end )
{
throw_error( begin, "not a string" );
}
template< typename ScannerT >
class definition
{
public:
definition( const Json_grammer& self )
{
using namespace spirit_namespace;
typedef typename Value_type::String_type::value_type Char_type;
// first we convert the semantic action class methods to functors with the
// parameter signature expected by spirit
typedef boost::function< void( Char_type ) > Char_action;
typedef boost::function< void( Iter_type, Iter_type ) > Str_action;
typedef boost::function< void( double ) > Real_action;
typedef boost::function< void( boost::int64_t ) > Int_action;
typedef boost::function< void( boost::uint64_t ) > Uint64_action;
Char_action begin_obj ( boost::bind( &Semantic_actions_t::begin_obj, &self.actions_, _1 ) );
Char_action end_obj ( boost::bind( &Semantic_actions_t::end_obj, &self.actions_, _1 ) );
Char_action begin_array( boost::bind( &Semantic_actions_t::begin_array, &self.actions_, _1 ) );
Char_action end_array ( boost::bind( &Semantic_actions_t::end_array, &self.actions_, _1 ) );
Str_action new_name ( boost::bind( &Semantic_actions_t::new_name, &self.actions_, _1, _2 ) );
Str_action new_str ( boost::bind( &Semantic_actions_t::new_str, &self.actions_, _1, _2 ) );
Str_action new_true ( boost::bind( &Semantic_actions_t::new_true, &self.actions_, _1, _2 ) );
Str_action new_false ( boost::bind( &Semantic_actions_t::new_false, &self.actions_, _1, _2 ) );
Str_action new_null ( boost::bind( &Semantic_actions_t::new_null, &self.actions_, _1, _2 ) );
Real_action new_real ( boost::bind( &Semantic_actions_t::new_real, &self.actions_, _1 ) );
Int_action new_int ( boost::bind( &Semantic_actions_t::new_int, &self.actions_, _1 ) );
Uint64_action new_uint64 ( boost::bind( &Semantic_actions_t::new_uint64, &self.actions_, _1 ) );
// actual grammar
json_
= value_ | eps_p[ &throw_not_value ]
;
value_
= string_[ new_str ]
| number_
| object_
| array_
| str_p( "true" ) [ new_true ]
| str_p( "false" )[ new_false ]
| str_p( "null" ) [ new_null ]
;
object_
= ch_p('{')[ begin_obj ]
>> !members_
>> ( ch_p('}')[ end_obj ] | eps_p[ &throw_not_object ] )
;
members_
= pair_ >> *( ',' >> pair_ | ch_p(',') )
;
pair_
= string_[ new_name ]
>> ( ':' | eps_p[ &throw_not_colon ] )
>> ( value_ | eps_p[ &throw_not_value ] )
;
array_
= ch_p('[')[ begin_array ]
>> !elements_
>> ( ch_p(']')[ end_array ] | eps_p[ &throw_not_array ] )
;
elements_
= value_ >> *( ',' >> value_ | ch_p(',') )
;
string_
= lexeme_d // this causes white space inside a string to be retained
[
confix_p
(
'"',
*lex_escape_ch_p,
'"'
)
]
;
number_
= strict_real_p[ new_real ]
| int64_p [ new_int ]
| uint64_p [ new_uint64 ]
;
}
spirit_namespace::rule< ScannerT > json_, object_, members_, pair_, array_, elements_, value_, string_, number_;
const spirit_namespace::rule< ScannerT >& start() const { return json_; }
};
private:
Json_grammer& operator=( const Json_grammer& ); // to prevent "assignment operator could not be generated" warning
Semantic_actions_t& actions_;
};
template< class Iter_type, class Value_type >
void add_posn_iter_and_read_range_or_throw( Iter_type begin, Iter_type end, Value_type& value )
{
typedef spirit_namespace::position_iterator< Iter_type > Posn_iter_t;
const Posn_iter_t posn_begin( begin, end );
const Posn_iter_t posn_end( end, end );
read_range_or_throw( posn_begin, posn_end, value );
}
template< class Istream_type >
struct Multi_pass_iters
{
typedef typename Istream_type::char_type Char_type;
typedef std::istream_iterator< Char_type, Char_type > istream_iter;
typedef spirit_namespace::multi_pass< istream_iter > Mp_iter;
Multi_pass_iters( Istream_type& is )
{
is.unsetf( std::ios::skipws );
begin_ = spirit_namespace::make_multi_pass( istream_iter( is ) );
end_ = spirit_namespace::make_multi_pass( istream_iter() );
}
Mp_iter begin_;
Mp_iter end_;
};
// reads a JSON Value from a pair of input iterators throwing an exception on invalid input, e.g.
//
// string::const_iterator start = str.begin();
// const string::const_iterator next = read_range_or_throw( str.begin(), str.end(), value );
//
// The iterator 'next' will point to the character past the
// last one read.
//
template< class Iter_type, class Value_type >
Iter_type read_range_or_throw( Iter_type begin, Iter_type end, Value_type& value )
{
Semantic_actions< Value_type, Iter_type > semantic_actions( value );
const spirit_namespace::parse_info< Iter_type > info =
spirit_namespace::parse( begin, end,
Json_grammer< Value_type, Iter_type >( semantic_actions ),
spirit_namespace::space_p );
if( !info.hit )
{
ceph_assert( false ); // in theory exception should already have been thrown
throw_error( info.stop, "error" );
}
return info.stop;
}
// reads a JSON Value from a pair of input iterators, e.g.
//
// string::const_iterator start = str.begin();
// const bool success = read_string( start, str.end(), value );
//
// The iterator 'start' will point to the character past the
// last one read.
//
template< class Iter_type, class Value_type >
bool read_range( Iter_type& begin, Iter_type end, Value_type& value )
{
try
{
begin = read_range_or_throw( begin, end, value );
return true;
}
catch( ... )
{
return false;
}
}
// reads a JSON Value from a string, e.g.
//
// const bool success = read_string( str, value );
//
template< class String_type, class Value_type >
bool read_string( const String_type& s, Value_type& value )
{
typename String_type::const_iterator begin = s.begin();
return read_range( begin, s.end(), value );
}
// reads a JSON Value from a string throwing an exception on invalid input, e.g.
//
// read_string_or_throw( is, value );
//
template< class String_type, class Value_type >
void read_string_or_throw( const String_type& s, Value_type& value )
{
add_posn_iter_and_read_range_or_throw( s.begin(), s.end(), value );
}
// reads a JSON Value from a stream, e.g.
//
// const bool success = read_stream( is, value );
//
template< class Istream_type, class Value_type >
bool read_stream( Istream_type& is, Value_type& value )
{
Multi_pass_iters< Istream_type > mp_iters( is );
return read_range( mp_iters.begin_, mp_iters.end_, value );
}
// reads a JSON Value from a stream throwing an exception on invalid input, e.g.
//
// read_stream_or_throw( is, value );
//
template< class Istream_type, class Value_type >
void read_stream_or_throw( Istream_type& is, Value_type& value )
{
const Multi_pass_iters< Istream_type > mp_iters( is );
add_posn_iter_and_read_range_or_throw( mp_iters.begin_, mp_iters.end_, value );
}
}
#endif
| 21,383 | 31.302115 | 124 | h |
null | ceph-main/src/json_spirit/json_spirit_stream_reader.h | #ifndef JSON_SPIRIT_READ_STREAM
#define JSON_SPIRIT_READ_STREAM
// Copyright John W. Wilkinson 2007 - 2011
// Distributed under the MIT License, see accompanying file LICENSE.txt
// json spirit version 4.05
#if defined(_MSC_VER) && (_MSC_VER >= 1020)
# pragma once
#endif
#include "json_spirit_reader_template.h"
namespace json_spirit
{
// these classes allows you to read multiple top level contiguous values from a stream,
// the normal stream read functions have a bug that prevent multiple top level values
// from being read unless they are separated by spaces
template< class Istream_type, class Value_type >
class Stream_reader
{
public:
Stream_reader( Istream_type& is )
: iters_( is )
{
}
bool read_next( Value_type& value )
{
return read_range( iters_.begin_, iters_.end_, value );
}
private:
typedef Multi_pass_iters< Istream_type > Mp_iters;
Mp_iters iters_;
};
template< class Istream_type, class Value_type >
class Stream_reader_thrower
{
public:
Stream_reader_thrower( Istream_type& is )
: iters_( is )
, posn_begin_( iters_.begin_, iters_.end_ )
, posn_end_( iters_.end_, iters_.end_ )
{
}
void read_next( Value_type& value )
{
posn_begin_ = read_range_or_throw( posn_begin_, posn_end_, value );
}
private:
typedef Multi_pass_iters< Istream_type > Mp_iters;
typedef spirit_namespace::position_iterator< typename Mp_iters::Mp_iter > Posn_iter_t;
Mp_iters iters_;
Posn_iter_t posn_begin_, posn_end_;
};
}
#endif
| 1,724 | 23.295775 | 94 | h |
null | ceph-main/src/json_spirit/json_spirit_utils.h | #ifndef JSON_SPIRIT_UTILS
#define JSON_SPIRIT_UTILS
// Copyright John W. Wilkinson 2007 - 2011
// Distributed under the MIT License, see accompanying file LICENSE.txt
// json spirit version 4.05
#if defined(_MSC_VER) && (_MSC_VER >= 1020)
# pragma once
#endif
#include "json_spirit_value.h"
#include <map>
namespace json_spirit
{
template< class Obj_t, class Map_t >
void obj_to_map( const Obj_t& obj, Map_t& mp_obj )
{
mp_obj.clear();
for( typename Obj_t::const_iterator i = obj.begin(); i != obj.end(); ++i )
{
mp_obj[ i->name_ ] = i->value_;
}
}
template< class Obj_t, class Map_t >
void map_to_obj( const Map_t& mp_obj, Obj_t& obj )
{
obj.clear();
for( typename Map_t::const_iterator i = mp_obj.begin(); i != mp_obj.end(); ++i )
{
obj.push_back( typename Obj_t::value_type( i->first, i->second ) );
}
}
#ifdef JSON_SPIRIT_VALUE_ENABLED
typedef std::map< std::string, Value > Mapped_obj;
#endif
#if defined( JSON_SPIRIT_WVALUE_ENABLED ) && !defined( BOOST_NO_STD_WSTRING )
typedef std::map< std::wstring, wValue > wMapped_obj;
#endif
template< class Object_type, class String_type >
const typename Object_type::value_type::Value_type& find_value( const Object_type& obj, const String_type& name )
{
for( typename Object_type::const_iterator i = obj.begin(); i != obj.end(); ++i )
{
if( i->name_ == name )
{
return i->value_;
}
}
return Object_type::value_type::Value_type::null;
}
}
#endif
| 1,644 | 24.703125 | 117 | h |
null | ceph-main/src/json_spirit/json_spirit_value.cpp | /* Copyright (c) 2007 John W Wilkinson
This source code can be used for any purpose as long as
this comment is retained. */
// json spirit version 2.00
#include "json_spirit_value.h"
| 192 | 20.444444 | 58 | cpp |
null | ceph-main/src/json_spirit/json_spirit_value.h | #ifndef JSON_SPIRIT_VALUE
#define JSON_SPIRIT_VALUE
// Copyright John W. Wilkinson 2007 - 2011
// Distributed under the MIT License, see accompanying file LICENSE.txt
// json spirit version 4.05
#if defined(_MSC_VER) && (_MSC_VER >= 1020)
# pragma once
#endif
#include <vector>
#include <map>
#include <string>
#include <sstream>
#include <stdexcept>
#include <boost/config.hpp>
#include <boost/cstdint.hpp>
#include <boost/shared_ptr.hpp>
#include <boost/variant.hpp>
// comment out the value types you don't need to reduce build times and intermediate file sizes
#define JSON_SPIRIT_VALUE_ENABLED
//#define JSON_SPIRIT_WVALUE_ENABLED
#define JSON_SPIRIT_MVALUE_ENABLED
//#define JSON_SPIRIT_WMVALUE_ENABLED
namespace json_spirit
{
enum Value_type{ obj_type, array_type, str_type, bool_type, int_type, real_type, null_type };
struct Null{};
template< class Config > // Config determines whether the value uses std::string or std::wstring and
// whether JSON Objects are represented as vectors or maps
class Value_impl
{
public:
typedef Config Config_type;
typedef typename Config::String_type String_type;
typedef typename Config::Object_type Object;
typedef typename Config::Array_type Array;
typedef typename String_type::const_pointer Const_str_ptr; // eg const char*
Value_impl(); // creates null value
Value_impl( Const_str_ptr value );
Value_impl( const String_type& value );
Value_impl( const Object& value );
Value_impl( const Array& value );
Value_impl( bool value );
Value_impl( int value );
Value_impl( boost::int64_t value );
Value_impl( boost::uint64_t value );
Value_impl( double value );
template< class Iter >
Value_impl( Iter first, Iter last ); // constructor from containers, e.g. std::vector or std::list
template< BOOST_VARIANT_ENUM_PARAMS( typename T ) >
Value_impl( const boost::variant< BOOST_VARIANT_ENUM_PARAMS(T) >& variant ); // constructor for compatible variant types
Value_impl( const Value_impl& other );
bool operator==( const Value_impl& lhs ) const;
Value_impl& operator=( const Value_impl& lhs );
Value_type type() const;
bool is_uint64() const;
bool is_null() const;
const String_type& get_str() const;
const Object& get_obj() const;
const Array& get_array() const;
bool get_bool() const;
int get_int() const;
boost::int64_t get_int64() const;
boost::uint64_t get_uint64() const;
double get_real() const;
Object& get_obj();
Array& get_array();
template< typename T > T get_value() const; // example usage: int i = value.get_value< int >();
// or double d = value.get_value< double >();
static const Value_impl null;
private:
void check_type( const Value_type vtype ) const;
typedef boost::variant< boost::recursive_wrapper< Object >, boost::recursive_wrapper< Array >,
String_type, bool, boost::int64_t, double, Null, boost::uint64_t > Variant;
Variant v_;
class Variant_converter_visitor : public boost::static_visitor< Variant >
{
public:
template< typename T, typename A, template< typename, typename > class Cont >
Variant operator()( const Cont< T, A >& cont ) const
{
return Array( cont.begin(), cont.end() );
}
Variant operator()( int i ) const
{
return static_cast< boost::int64_t >( i );
}
template<class T>
Variant operator()( const T& t ) const
{
return t;
}
};
};
// vector objects
template< class Config >
struct Pair_impl
{
typedef typename Config::String_type String_type;
typedef typename Config::Value_type Value_type;
Pair_impl()
{
}
Pair_impl( const String_type& name, const Value_type& value );
bool operator==( const Pair_impl& lhs ) const;
String_type name_;
Value_type value_;
};
#if defined( JSON_SPIRIT_VALUE_ENABLED ) || defined( JSON_SPIRIT_WVALUE_ENABLED )
template< class String >
struct Config_vector
{
typedef String String_type;
typedef Value_impl< Config_vector > Value_type;
typedef Pair_impl < Config_vector > Pair_type;
typedef std::vector< Value_type > Array_type;
typedef std::vector< Pair_type > Object_type;
static Value_type& add( Object_type& obj, const String_type& name, const Value_type& value )
{
obj.push_back( Pair_type( name , value ) );
return obj.back().value_;
}
static String_type get_name( const Pair_type& pair )
{
return pair.name_;
}
static Value_type get_value( const Pair_type& pair )
{
return pair.value_;
}
};
#endif
// typedefs for ASCII
#ifdef JSON_SPIRIT_VALUE_ENABLED
typedef Config_vector< std::string > Config;
typedef Config::Value_type Value;
typedef Config::Pair_type Pair;
typedef Config::Object_type Object;
typedef Config::Array_type Array;
#endif
// typedefs for Unicode
#if defined( JSON_SPIRIT_WVALUE_ENABLED ) && !defined( BOOST_NO_STD_WSTRING )
typedef Config_vector< std::wstring > wConfig;
typedef wConfig::Value_type wValue;
typedef wConfig::Pair_type wPair;
typedef wConfig::Object_type wObject;
typedef wConfig::Array_type wArray;
#endif
// map objects
#if defined( JSON_SPIRIT_MVALUE_ENABLED ) || defined( JSON_SPIRIT_WMVALUE_ENABLED )
template< class String >
struct Config_map
{
typedef String String_type;
typedef Value_impl< Config_map > Value_type;
typedef std::vector< Value_type > Array_type;
typedef std::map< String_type, Value_type > Object_type;
typedef std::pair< String_type, Value_type > Pair_type;
static Value_type& add( Object_type& obj, const String_type& name, const Value_type& value )
{
return obj[ name ] = value;
}
static String_type get_name( const Pair_type& pair )
{
return pair.first;
}
static Value_type get_value( const Pair_type& pair )
{
return pair.second;
}
};
#endif
// typedefs for ASCII
#ifdef JSON_SPIRIT_MVALUE_ENABLED
typedef Config_map< std::string > mConfig;
typedef mConfig::Value_type mValue;
typedef mConfig::Object_type mObject;
typedef mConfig::Array_type mArray;
#endif
// typedefs for Unicode
#if defined( JSON_SPIRIT_WMVALUE_ENABLED ) && !defined( BOOST_NO_STD_WSTRING )
typedef Config_map< std::wstring > wmConfig;
typedef wmConfig::Value_type wmValue;
typedef wmConfig::Object_type wmObject;
typedef wmConfig::Array_type wmArray;
#endif
///////////////////////////////////////////////////////////////////////////////////////////////
//
// implementation
inline bool operator==( const Null&, const Null& )
{
return true;
}
template< class Config >
const Value_impl< Config > Value_impl< Config >::null;
template< class Config >
Value_impl< Config >::Value_impl()
: v_( Null() )
{
}
template< class Config >
Value_impl< Config >::Value_impl( const Const_str_ptr value )
: v_( String_type( value ) )
{
}
template< class Config >
Value_impl< Config >::Value_impl( const String_type& value )
: v_( value )
{
}
template< class Config >
Value_impl< Config >::Value_impl( const Object& value )
: v_( value )
{
}
template< class Config >
Value_impl< Config >::Value_impl( const Array& value )
: v_( value )
{
}
template< class Config >
Value_impl< Config >::Value_impl( bool value )
: v_( value )
{
}
template< class Config >
Value_impl< Config >::Value_impl( int value )
: v_( static_cast< boost::int64_t >( value ) )
{
}
template< class Config >
Value_impl< Config >::Value_impl( boost::int64_t value )
: v_( value )
{
}
template< class Config >
Value_impl< Config >::Value_impl( boost::uint64_t value )
: v_( value )
{
}
template< class Config >
Value_impl< Config >::Value_impl( double value )
: v_( value )
{
}
template< class Config >
Value_impl< Config >::Value_impl( const Value_impl< Config >& other )
: v_( other.v_ )
{
}
template< class Config >
template< class Iter >
Value_impl< Config >::Value_impl( Iter first, Iter last )
: v_( Array( first, last ) )
{
}
template< class Config >
template< BOOST_VARIANT_ENUM_PARAMS( typename T ) >
Value_impl< Config >::Value_impl( const boost::variant< BOOST_VARIANT_ENUM_PARAMS(T) >& variant )
: v_( boost::apply_visitor( Variant_converter_visitor(), variant) )
{
}
template< class Config >
Value_impl< Config >& Value_impl< Config >::operator=( const Value_impl& lhs )
{
Value_impl tmp( lhs );
std::swap( v_, tmp.v_ );
return *this;
}
template< class Config >
bool Value_impl< Config >::operator==( const Value_impl& lhs ) const
{
if( this == &lhs ) return true;
if( type() != lhs.type() ) return false;
return v_ == lhs.v_;
}
template< class Config >
Value_type Value_impl< Config >::type() const
{
if( is_uint64() )
{
return int_type;
}
return static_cast< Value_type >( v_.which() );
}
template< class Config >
bool Value_impl< Config >::is_uint64() const
{
return v_.which() == null_type + 1;
}
template< class Config >
bool Value_impl< Config >::is_null() const
{
return type() == null_type;
}
template< class Config >
void Value_impl< Config >::check_type( const Value_type vtype ) const
{
if( type() != vtype )
{
std::ostringstream os;
os << "value type is " << type() << " not " << vtype;
throw std::runtime_error( os.str() );
}
}
template< class Config >
const typename Config::String_type& Value_impl< Config >::get_str() const
{
check_type( str_type );
return *boost::get< String_type >( &v_ );
}
template< class Config >
const typename Value_impl< Config >::Object& Value_impl< Config >::get_obj() const
{
check_type( obj_type );
return *boost::get< Object >( &v_ );
}
template< class Config >
const typename Value_impl< Config >::Array& Value_impl< Config >::get_array() const
{
check_type( array_type );
return *boost::get< Array >( &v_ );
}
template< class Config >
bool Value_impl< Config >::get_bool() const
{
check_type( bool_type );
return boost::get< bool >( v_ );
}
template< class Config >
int Value_impl< Config >::get_int() const
{
check_type( int_type );
return static_cast< int >( get_int64() );
}
template< class Config >
boost::int64_t Value_impl< Config >::get_int64() const
{
check_type( int_type );
if( is_uint64() )
{
return static_cast< boost::int64_t >( get_uint64() );
}
return boost::get< boost::int64_t >( v_ );
}
template< class Config >
boost::uint64_t Value_impl< Config >::get_uint64() const
{
check_type( int_type );
if( !is_uint64() )
{
return static_cast< boost::uint64_t >( get_int64() );
}
return boost::get< boost::uint64_t >( v_ );
}
template< class Config >
double Value_impl< Config >::get_real() const
{
if( type() == int_type )
{
return is_uint64() ? static_cast< double >( get_uint64() )
: static_cast< double >( get_int64() );
}
check_type( real_type );
return boost::get< double >( v_ );
}
template< class Config >
typename Value_impl< Config >::Object& Value_impl< Config >::get_obj()
{
check_type( obj_type );
return *boost::get< Object >( &v_ );
}
template< class Config >
typename Value_impl< Config >::Array& Value_impl< Config >::get_array()
{
check_type( array_type );
return *boost::get< Array >( &v_ );
}
template< class Config >
Pair_impl< Config >::Pair_impl( const String_type& name, const Value_type& value )
: name_( name )
, value_( value )
{
}
template< class Config >
bool Pair_impl< Config >::operator==( const Pair_impl< Config >& lhs ) const
{
if( this == &lhs ) return true;
return ( name_ == lhs.name_ ) && ( value_ == lhs.value_ );
}
// converts a C string, ie. 8 bit char array, to a string object
//
template < class String_type >
String_type to_str( const char* c_str )
{
String_type result;
for( const char* p = c_str; *p != 0; ++p )
{
result += *p;
}
return result;
}
//
namespace internal_
{
template< typename T >
struct Type_to_type
{
};
template< class Value >
int get_value( const Value& value, Type_to_type< int > )
{
return value.get_int();
}
template< class Value >
boost::int64_t get_value( const Value& value, Type_to_type< boost::int64_t > )
{
return value.get_int64();
}
template< class Value >
boost::uint64_t get_value( const Value& value, Type_to_type< boost::uint64_t > )
{
return value.get_uint64();
}
template< class Value >
double get_value( const Value& value, Type_to_type< double > )
{
return value.get_real();
}
template< class Value >
typename Value::String_type get_value( const Value& value, Type_to_type< typename Value::String_type > )
{
return value.get_str();
}
template< class Value >
typename Value::Array get_value( const Value& value, Type_to_type< typename Value::Array > )
{
return value.get_array();
}
template< class Value >
typename Value::Object get_value( const Value& value, Type_to_type< typename Value::Object > )
{
return value.get_obj();
}
template< class Value >
bool get_value( const Value& value, Type_to_type< bool > )
{
return value.get_bool();
}
}
template< class Config >
template< typename T >
T Value_impl< Config >::get_value() const
{
return internal_::get_value( *this, internal_::Type_to_type< T >() );
}
}
#endif
| 15,731 | 25.892308 | 128 | h |
null | ceph-main/src/json_spirit/json_spirit_writer.cpp | // Copyright John W. Wilkinson 2007 - 2011
// Distributed under the MIT License, see accompanying file LICENSE.txt
// json spirit version 4.05
#include "json_spirit_writer.h"
#include "json_spirit_writer_template.h"
using namespace json_spirit;
#ifdef JSON_SPIRIT_VALUE_ENABLED
void json_spirit::write( const Value& value, std::ostream& os, unsigned int options )
{
write_stream( value, os, options );
}
std::string json_spirit::write( const Value& value, unsigned int options )
{
return write_string( value, options );
}
void json_spirit::write_formatted( const Value& value, std::ostream& os )
{
write_stream( value, os, pretty_print );
}
std::string json_spirit::write_formatted( const Value& value )
{
return write_string( value, pretty_print );
}
#endif
#ifdef JSON_SPIRIT_MVALUE_ENABLED
void json_spirit::write( const mValue& value, std::ostream& os, unsigned int options )
{
write_stream( value, os, options );
}
std::string json_spirit::write( const mValue& value, unsigned int options )
{
return write_string( value, options );
}
void json_spirit::write_formatted( const mValue& value, std::ostream& os )
{
write_stream( value, os, pretty_print );
}
std::string json_spirit::write_formatted( const mValue& value )
{
return write_string( value, pretty_print );
}
#endif
#if defined( JSON_SPIRIT_WVALUE_ENABLED ) && !defined( BOOST_NO_STD_WSTRING )
void json_spirit::write( const wValue& value, std::wostream& os, unsigned int options )
{
write_stream( value, os, options );
}
std::wstring json_spirit::write( const wValue& value, unsigned int options )
{
return write_string( value, options );
}
void json_spirit::write_formatted( const wValue& value, std::wostream& os )
{
write_stream( value, os, pretty_print );
}
std::wstring json_spirit::write_formatted( const wValue& value )
{
return write_string( value, pretty_print );
}
#endif
#if defined( JSON_SPIRIT_WMVALUE_ENABLED ) && !defined( BOOST_NO_STD_WSTRING )
void json_spirit::write_formatted( const wmValue& value, std::wostream& os )
{
write_stream( value, os, pretty_print );
}
std::wstring json_spirit::write_formatted( const wmValue& value )
{
return write_string( value, pretty_print );
}
void json_spirit::write( const wmValue& value, std::wostream& os, unsigned int options )
{
write_stream( value, os, options );
}
std::wstring json_spirit::write( const wmValue& value, unsigned int options )
{
return write_string( value, options );
}
#endif
| 2,764 | 27.505155 | 92 | cpp |
null | ceph-main/src/json_spirit/json_spirit_writer.h | #ifndef JSON_SPIRIT_WRITER
#define JSON_SPIRIT_WRITER
// Copyright John W. Wilkinson 2007 - 2011
// Distributed under the MIT License, see accompanying file LICENSE.txt
// json spirit version 4.05
#if defined(_MSC_VER) && (_MSC_VER >= 1020)
# pragma once
#endif
#include "json_spirit_value.h"
#include "json_spirit_writer_options.h"
#include <iostream>
namespace json_spirit
{
// these functions to convert JSON Values to text
#ifdef JSON_SPIRIT_VALUE_ENABLED
void write( const Value& value, std::ostream& os, unsigned int options = 0 );
std::string write( const Value& value, unsigned int options = 0 );
#endif
#ifdef JSON_SPIRIT_MVALUE_ENABLED
void write( const mValue& value, std::ostream& os, unsigned int options = 0 );
std::string write( const mValue& value, unsigned int options = 0 );
#endif
#if defined( JSON_SPIRIT_WVALUE_ENABLED ) && !defined( BOOST_NO_STD_WSTRING )
void write( const wValue& value, std::wostream& os, unsigned int options = 0 );
std::wstring write( const wValue& value, unsigned int options = 0 );
#endif
#if defined( JSON_SPIRIT_WMVALUE_ENABLED ) && !defined( BOOST_NO_STD_WSTRING )
void write( const wmValue& value, std::wostream& os, unsigned int options = 0 );
std::wstring write( const wmValue& value, unsigned int options = 0 );
#endif
// these "formatted" versions of the "write" functions are the equivalent of the above functions
// with option "pretty_print"
#ifdef JSON_SPIRIT_VALUE_ENABLED
void write_formatted( const Value& value, std::ostream& os );
std::string write_formatted( const Value& value );
#endif
#ifdef JSON_SPIRIT_MVALUE_ENABLED
void write_formatted( const mValue& value, std::ostream& os );
std::string write_formatted( const mValue& value );
#endif
#if defined( JSON_SPIRIT_WVALUE_ENABLED ) && !defined( BOOST_NO_STD_WSTRING )
void write_formatted( const wValue& value, std::wostream& os );
std::wstring write_formatted( const wValue& value );
#endif
#if defined( JSON_SPIRIT_WMVALUE_ENABLED ) && !defined( BOOST_NO_STD_WSTRING )
void write_formatted( const wmValue& value, std::wostream& os );
std::wstring write_formatted( const wmValue& value );
#endif
}
#endif
| 2,301 | 34.96875 | 100 | h |
null | ceph-main/src/json_spirit/json_spirit_writer_options.h | #ifndef JSON_SPIRIT_WRITER_OPTIONS
#define JSON_SPIRIT_WRITER_OPTIONS
// Copyright John W. Wilkinson 2007 - 2011
// Distributed under the MIT License, see accompanying file LICENSE.txt
// json spirit version 4.05
#if defined(_MSC_VER) && (_MSC_VER >= 1020)
# pragma once
#endif
namespace json_spirit
{
enum Output_options{ pretty_print = 0x01, // Add whitespace to format the output nicely.
raw_utf8 = 0x02, // This prevents non-printable characters from being escapted using "\uNNNN" notation.
// Note, this is an extension to the JSON standard. It disables the escaping of
// non-printable characters allowing UTF-8 sequences held in 8 bit char strings
// to pass through unaltered.
remove_trailing_zeros = 0x04,
// outputs e.g. "1.200000000000000" as "1.2"
single_line_arrays = 0x08,
// pretty printing except that arrays printed on single lines unless they contain
// composite elements, i.e. objects or arrays
};
}
#endif
| 1,341 | 42.290323 | 134 | h |
null | ceph-main/src/json_spirit/json_spirit_writer_template.h | #ifndef JSON_SPIRIT_WRITER_TEMPLATE
#define JSON_SPIRIT_WRITER_TEMPLATE
// Copyright John W. Wilkinson 2007 - 2011
// Distributed under the MIT License, see accompanying file LICENSE.txt
// json spirit version 4.05
#if defined(_MSC_VER) && (_MSC_VER >= 1020)
# pragma once
#endif
#include "json_spirit_value.h"
#include "json_spirit_writer_options.h"
#include <sstream>
#include <iomanip>
#include <boost/io/ios_state.hpp>
#include "include/ceph_assert.h"
namespace json_spirit
{
inline char to_hex_char( unsigned int c )
{
ceph_assert( c <= 0xF );
const char ch = static_cast< char >( c );
if( ch < 10 ) return '0' + ch;
return 'A' - 10 + ch;
}
template< class String_type >
String_type non_printable_to_string( unsigned int c )
{
String_type result( 6, '\\' );
result[1] = 'u';
result[ 5 ] = to_hex_char( c & 0x000F ); c >>= 4;
result[ 4 ] = to_hex_char( c & 0x000F ); c >>= 4;
result[ 3 ] = to_hex_char( c & 0x000F ); c >>= 4;
result[ 2 ] = to_hex_char( c & 0x000F );
return result;
}
template< typename Char_type, class String_type >
bool add_esc_char( Char_type c, String_type& s )
{
switch( c )
{
case '"': s += to_str< String_type >( "\\\"" ); return true;
case '\\': s += to_str< String_type >( "\\\\" ); return true;
case '\b': s += to_str< String_type >( "\\b" ); return true;
case '\f': s += to_str< String_type >( "\\f" ); return true;
case '\n': s += to_str< String_type >( "\\n" ); return true;
case '\r': s += to_str< String_type >( "\\r" ); return true;
case '\t': s += to_str< String_type >( "\\t" ); return true;
}
return false;
}
template< class String_type >
String_type add_esc_chars( const String_type& s, bool raw_utf8 )
{
typedef typename String_type::const_iterator Iter_type;
typedef typename String_type::value_type Char_type;
String_type result;
const Iter_type end( s.end() );
for( Iter_type i = s.begin(); i != end; ++i )
{
const Char_type c( *i );
if( add_esc_char( c, result ) ) continue;
if( raw_utf8 )
{
result += c;
}
else
{
const wint_t unsigned_c( ( c >= 0 ) ? c : 256 + c );
if( iswprint( unsigned_c ) )
{
result += c;
}
else
{
result += non_printable_to_string< String_type >( unsigned_c );
}
}
}
return result;
}
template< class Ostream >
void append_double( Ostream& os, const double d, const int precision )
{
os << std::showpoint << std::setprecision( precision ) << d;
}
template< class String_type >
void erase_and_extract_exponent( String_type& str, String_type& exp )
{
const typename String_type::size_type exp_start= str.find( 'e' );
if( exp_start != String_type::npos )
{
exp = str.substr( exp_start );
str.erase( exp_start );
}
}
template< class String_type >
typename String_type::size_type find_first_non_zero( const String_type& str )
{
typename String_type::size_type result = str.size() - 1;
for( ; result != 0; --result )
{
if( str[ result ] != '0' )
{
break;
}
}
return result;
}
template< class String_type >
void remove_trailing( String_type& str )
{
String_type exp;
erase_and_extract_exponent( str, exp );
const typename String_type::size_type first_non_zero = find_first_non_zero( str );
if( first_non_zero != 0 )
{
const int offset = str[first_non_zero] == '.' ? 2 : 1; // note zero digits following a decimal point is non standard
str.erase( first_non_zero + offset );
}
str += exp;
}
// this class generates the JSON text,
// it keeps track of the indentation level etc.
//
template< class Value_type, class Ostream_type >
class Generator
{
typedef typename Value_type::Config_type Config_type;
typedef typename Config_type::String_type String_type;
typedef typename Config_type::Object_type Object_type;
typedef typename Config_type::Array_type Array_type;
typedef typename String_type::value_type Char_type;
typedef typename Object_type::value_type Obj_member_type;
public:
Generator( const Value_type& value, Ostream_type& os, unsigned int options )
: os_( os )
, indentation_level_( 0 )
, pretty_( ( options & pretty_print ) != 0 || ( options & single_line_arrays ) != 0 )
, raw_utf8_( ( options & raw_utf8 ) != 0 )
, remove_trailing_zeros_( ( options & remove_trailing_zeros ) != 0 )
, single_line_arrays_( ( options & single_line_arrays ) != 0 )
, ios_saver_( os )
{
output( value );
}
private:
void output( const Value_type& value )
{
switch( value.type() )
{
case obj_type: output( value.get_obj() ); break;
case array_type: output( value.get_array() ); break;
case str_type: output( value.get_str() ); break;
case bool_type: output( value.get_bool() ); break;
case real_type: output( value.get_real() ); break;
case int_type: output_int( value ); break;
case null_type: os_ << "null"; break;
default: ceph_assert( false );
}
}
void output( const Object_type& obj )
{
output_array_or_obj( obj, '{', '}' );
}
void output( const Obj_member_type& member )
{
output( Config_type::get_name( member ) ); space();
os_ << ':'; space();
output( Config_type::get_value( member ) );
}
void output_int( const Value_type& value )
{
if( value.is_uint64() )
{
os_ << value.get_uint64();
}
else
{
os_ << value.get_int64();
}
}
void output( const String_type& s )
{
os_ << '"' << add_esc_chars( s, raw_utf8_ ) << '"';
}
void output( bool b )
{
os_ << to_str< String_type >( b ? "true" : "false" );
}
void output( double d )
{
if( remove_trailing_zeros_ )
{
std::basic_ostringstream< Char_type > os;
append_double( os, d, 16 ); // note precision is 16 so that we get some trailing space that we can remove,
// otherwise, 0.1234 gets converted to "0.12399999..."
String_type str = os.str();
remove_trailing( str );
os_ << str;
}
else
{
append_double( os_, d, 17 );
}
}
static bool contains_composite_elements( const Array_type& arr )
{
for( typename Array_type::const_iterator i = arr.begin(); i != arr.end(); ++i )
{
const Value_type& val = *i;
if( val.type() == obj_type ||
val.type() == array_type )
{
return true;
}
}
return false;
}
template< class Iter >
void output_composite_item( Iter i, Iter last )
{
output( *i );
if( ++i != last )
{
os_ << ',';
}
}
void output( const Array_type& arr )
{
if( single_line_arrays_ && !contains_composite_elements( arr ) )
{
os_ << '['; space();
for( typename Array_type::const_iterator i = arr.begin(); i != arr.end(); ++i )
{
output_composite_item( i, arr.end() );
space();
}
os_ << ']';
}
else
{
output_array_or_obj( arr, '[', ']' );
}
}
template< class T >
void output_array_or_obj( const T& t, Char_type start_char, Char_type end_char )
{
os_ << start_char; new_line();
++indentation_level_;
for( typename T::const_iterator i = t.begin(); i != t.end(); ++i )
{
indent();
output_composite_item( i, t.end() );
new_line();
}
--indentation_level_;
indent(); os_ << end_char;
}
void indent()
{
if( !pretty_ ) return;
for( int i = 0; i < indentation_level_; ++i )
{
os_ << " ";
}
}
void space()
{
if( pretty_ ) os_ << ' ';
}
void new_line()
{
if( pretty_ ) os_ << '\n';
}
Generator& operator=( const Generator& ); // to prevent "assignment operator could not be generated" warning
Ostream_type& os_;
int indentation_level_;
bool pretty_;
bool raw_utf8_;
bool remove_trailing_zeros_;
bool single_line_arrays_;
boost::io::basic_ios_all_saver< Char_type > ios_saver_; // so that ostream state is reset after control is returned to the caller
};
// writes JSON Value to a stream, e.g.
//
// write_stream( value, os, pretty_print );
//
template< class Value_type, class Ostream_type >
void write_stream( const Value_type& value, Ostream_type& os, unsigned int options = 0 )
{
os << std::dec;
Generator< Value_type, Ostream_type >( value, os, options );
}
// writes JSON Value to a stream, e.g.
//
// const string json_str = write( value, pretty_print );
//
template< class Value_type >
typename Value_type::String_type write_string( const Value_type& value, unsigned int options = 0 )
{
typedef typename Value_type::String_type::value_type Char_type;
std::basic_ostringstream< Char_type > os;
write_stream( value, os, options );
return os.str();
}
}
#endif
| 10,833 | 27.14026 | 138 | h |
null | ceph-main/src/key_value_store/cls_kvs.cc | /*
* OSD classes for the key value store
*
* Created on: Aug 10, 2012
* Author: Eleanor Cawthon
*/
#include "include/compat.h"
#include "objclass/objclass.h"
#include <errno.h>
#include "key_value_store/kvs_arg_types.h"
#include "include/types.h"
#include <iostream>
#include <climits>
using std::string;
using std::map;
using std::set;
/**
* finds the index_data where a key belongs.
*
* @param key: the key to search for
* @param idata: the index_data for the first index value such that idata.key
* is greater than key.
* @param next_idata: the index_data for the next index entry after idata
* @pre: key is not encoded
* @post: idata contains complete information
* stored
*/
static int get_idata_from_key(cls_method_context_t hctx, const string &key,
index_data &idata, index_data &next_idata) {
bufferlist raw_val;
int r = 0;
std::map<std::string, bufferlist> kvmap;
bool more;
r = cls_cxx_map_get_vals(hctx, key_data(key).encoded(), "", 2, &kvmap, &more);
if (r < 0) {
CLS_LOG(20, "error reading index for range %s: %d", key.c_str(), r);
return r;
}
r = cls_cxx_map_get_val(hctx, key_data(key).encoded(), &raw_val);
if (r == 0){
CLS_LOG(20, "%s is already in the index: %d", key.c_str(), r);
auto b = raw_val.cbegin();
idata.decode(b);
if (!kvmap.empty()) {
auto b = kvmap.begin()->second.cbegin();
next_idata.decode(b);
}
return r;
} else if (r == -ENOENT || r == -ENODATA) {
auto b = kvmap.begin()->second.cbegin();
idata.decode(b);
if (idata.kdata.prefix != "1") {
auto nb = (++kvmap.begin())->second.cbegin();
next_idata.decode(nb);
}
r = 0;
} else if (r < 0) {
CLS_LOG(20, "error reading index for duplicates %s: %d", key.c_str(), r);
return r;
}
CLS_LOG(20, "idata is %s", idata.str().c_str());
return r;
}
static int get_idata_from_key_op(cls_method_context_t hctx,
bufferlist *in, bufferlist *out) {
CLS_LOG(20, "get_idata_from_key_op");
idata_from_key_args op;
auto it = in->cbegin();
try {
decode(op, it);
} catch (buffer::error& err) {
CLS_LOG(20, "error decoding idata_from_key_args.");
return -EINVAL;
}
int r = get_idata_from_key(hctx, op.key, op.idata, op.next_idata);
if (r < 0) {
return r;
} else {
encode(op, *out);
return 0;
}
}
/**
* finds the object in the index with the lowest key value that is greater
* than idata.key. If idata.key is the max key, returns -EOVERFLOW. If
* idata has a prefix and has timed out, cleans up.
*
* @param idata: idata for the object to search for.
* @param out_data: the idata for the next object.
*
* @pre: idata must contain a key.
* @post: out_data contains complete information
*/
static int get_next_idata(cls_method_context_t hctx, const index_data &idata,
index_data &out_data) {
int r = 0;
std::map<std::string, bufferlist> kvs;
bool more;
r = cls_cxx_map_get_vals(hctx, idata.kdata.encoded(), "", 1, &kvs, &more);
if (r < 0){
CLS_LOG(20, "getting kvs failed with error %d", r);
return r;
}
if (!kvs.empty()) {
out_data.kdata.parse(kvs.begin()->first);
auto b = kvs.begin()->second.cbegin();
out_data.decode(b);
} else {
r = -EOVERFLOW;
}
return r;
}
static int get_next_idata_op(cls_method_context_t hctx,
bufferlist *in, bufferlist *out) {
CLS_LOG(20, "get_next_idata_op");
idata_from_idata_args op;
auto it = in->cbegin();
try {
decode(op, it);
} catch (buffer::error& err) {
return -EINVAL;
}
int r = get_next_idata(hctx, op.idata, op.next_idata);
if (r < 0) {
return r;
} else {
op.encode(*out);
return 0;
}
}
/**
* finds the object in the index with the highest key value that is less
* than idata.key. If idata.key is the lowest key, returns -ERANGE If
* idata has a prefix and has timed out, cleans up.
*
* @param idata: idata for the object to search for.
* @param out_data: the idata for the next object.
*
* @pre: idata must contain a key.
* @ost: out_data contains complete information
*/
static int get_prev_idata(cls_method_context_t hctx, const index_data &idata,
index_data &out_data) {
int r = 0;
std::map<std::string, bufferlist> kvs;
bool more;
r = cls_cxx_map_get_vals(hctx, "", "", LONG_MAX, &kvs, &more);
if (r < 0){
CLS_LOG(20, "getting kvs failed with error %d", r);
return r;
}
std::map<std::string, bufferlist>::iterator it =
kvs.lower_bound(idata.kdata.encoded());
if (it->first != idata.kdata.encoded()) {
CLS_LOG(20, "object %s not found in the index (expected %s, found %s)",
idata.str().c_str(), idata.kdata.encoded().c_str(),
it->first.c_str());
return -ENODATA;
}
if (it == kvs.begin()) {
//it is the first object, there is no previous.
return -ERANGE;
} else {
--it;
}
out_data.kdata.parse(it->first);
auto b = it->second.cbegin();
out_data.decode(b);
return 0;
}
static int get_prev_idata_op(cls_method_context_t hctx,
bufferlist *in, bufferlist *out) {
CLS_LOG(20, "get_next_idata_op");
idata_from_idata_args op;
auto it = in->cbegin();
try {
decode(op, it);
} catch (buffer::error& err) {
return -EINVAL;
}
int r = get_prev_idata(hctx, op.idata, op.next_idata);
if (r < 0) {
return r;
} else {
op.encode(*out);
return 0;
}
}
/**
* Read all of the index entries where any keys in the map go
*/
static int read_many(cls_method_context_t hctx, const set<string> &keys,
map<string, bufferlist> * out) {
int r = 0;
bool more;
CLS_ERR("reading from a map of size %d, first key encoded is %s",
(int)keys.size(), key_data(*keys.begin()).encoded().c_str());
r = cls_cxx_map_get_vals(hctx, key_data(*keys.begin()).encoded().c_str(),
"", LONG_MAX, out, &more);
if (r < 0) {
CLS_ERR("getting omap vals failed with error %d", r);
}
CLS_ERR("got map of size %d ", (int)out->size());
if (out->size() > 1) {
out->erase(out->upper_bound(key_data(*keys.rbegin()).encoded().c_str()),
out->end());
}
CLS_ERR("returning map of size %d", (int)out->size());
return r;
}
static int read_many_op(cls_method_context_t hctx, bufferlist *in,
bufferlist *out) {
CLS_LOG(20, "read_many_op");
set<string> op;
map<string, bufferlist> outmap;
auto it = in->cbegin();
try {
decode(op, it);
} catch (buffer::error & err) {
return -EINVAL;
}
int r = read_many(hctx, op, &outmap);
if (r < 0) {
return r;
} else {
encode(outmap, *out);
return 0;
}
}
/**
* Checks the unwritable xattr. If it is "1" (i.e., it is unwritable), returns
* -EACCES. otherwise, returns 0.
*/
static int check_writable(cls_method_context_t hctx) {
bufferlist bl;
int r = cls_cxx_getxattr(hctx, "unwritable", &bl);
if (r < 0) {
CLS_LOG(20, "error reading xattr %s: %d", "unwritable", r);
return r;
}
if (string(bl.c_str(), bl.length()) == "1") {
return -EACCES;
} else{
return 0;
}
}
static int check_writable_op(cls_method_context_t hctx,
bufferlist *in, bufferlist *out) {
CLS_LOG(20, "check_writable_op");
return check_writable(hctx);
}
/**
* returns -EKEYREJECTED if size is outside of bound, according to comparator.
*
* @bound: the limit to test
* @comparator: should be CEPH_OSD_CMPXATTR_OP_[EQ|GT|LT]
*/
static int assert_size_in_bound(cls_method_context_t hctx, int bound,
int comparator) {
//determine size
bufferlist size_bl;
int r = cls_cxx_getxattr(hctx, "size", &size_bl);
if (r < 0) {
CLS_LOG(20, "error reading xattr %s: %d", "size", r);
return r;
}
int size = atoi(string(size_bl.c_str(), size_bl.length()).c_str());
CLS_LOG(20, "size is %d, bound is %d", size, bound);
//compare size to comparator
switch (comparator) {
case CEPH_OSD_CMPXATTR_OP_EQ:
if (size != bound) {
return -EKEYREJECTED;
}
break;
case CEPH_OSD_CMPXATTR_OP_LT:
if (size >= bound) {
return -EKEYREJECTED;
}
break;
case CEPH_OSD_CMPXATTR_OP_GT:
if (size <= bound) {
return -EKEYREJECTED;
}
break;
default:
CLS_LOG(20, "invalid argument passed to assert_size_in_bound: %d",
comparator);
return -EINVAL;
}
return 0;
}
static int assert_size_in_bound_op(cls_method_context_t hctx,
bufferlist *in, bufferlist *out) {
CLS_LOG(20, "assert_size_in_bound_op");
assert_size_args op;
auto it = in->cbegin();
try {
decode(op, it);
} catch (buffer::error& err) {
return -EINVAL;
}
return assert_size_in_bound(hctx, op.bound, op.comparator);
}
/**
* Attempts to insert omap into this object's omap.
*
* @return:
* if unwritable, returns -EACCES.
* if size > bound and key doesn't already exist in the omap, returns -EBALANCE.
* if exclusive is true, returns -EEXIST if any keys already exist.
*
* @post: object has omap entries inserted, and size xattr is updated
*/
static int omap_insert(cls_method_context_t hctx,
const map<string, bufferlist> &omap, int bound, bool exclusive) {
uint64_t size;
time_t time;
int r = cls_cxx_stat(hctx, &size, &time);
if (r < 0) {
return r;
}
CLS_LOG(20, "inserting %s", omap.begin()->first.c_str());
r = check_writable(hctx);
if (r < 0) {
CLS_LOG(20, "omap_insert: this object is unwritable: %d", r);
return r;
}
int assert_bound = bound;
//if this is an exclusive insert, make sure the key doesn't already exist.
for (map<string, bufferlist>::const_iterator it = omap.begin();
it != omap.end(); ++it) {
bufferlist bl;
r = cls_cxx_map_get_val(hctx, it->first, &bl);
if (r == 0 && string(bl.c_str(), bl.length()) != ""){
if (exclusive) {
CLS_LOG(20, "error: this is an exclusive insert and %s exists.",
it->first.c_str());
return -EEXIST;
}
assert_bound++;
CLS_LOG(20, "increased assert_bound to %d", assert_bound);
} else if (r != -ENODATA && r != -ENOENT) {
CLS_LOG(20, "error reading omap val for %s: %d", it->first.c_str(), r);
return r;
}
}
bufferlist old_size;
r = cls_cxx_getxattr(hctx, "size", &old_size);
if (r < 0) {
CLS_LOG(20, "error reading xattr %s: %d", "size", r);
return r;
}
int old_size_int = atoi(string(old_size.c_str(), old_size.length()).c_str());
CLS_LOG(20, "asserting size is less than %d (bound is %d)", assert_bound, bound);
if (old_size_int >= assert_bound) {
return -EKEYREJECTED;
}
int new_size_int = old_size_int + omap.size() - (assert_bound - bound);
CLS_LOG(20, "old size is %d, new size is %d", old_size_int, new_size_int);
bufferlist new_size;
std::stringstream s;
s << new_size_int;
new_size.append(s.str());
r = cls_cxx_map_set_vals(hctx, &omap);
if (r < 0) {
CLS_LOG(20, "error setting omap: %d", r);
return r;
}
r = cls_cxx_setxattr(hctx, "size", &new_size);
if (r < 0) {
CLS_LOG(20, "error setting xattr %s: %d", "size", r);
return r;
}
CLS_LOG(20, "successfully inserted %s", omap.begin()->first.c_str());
return 0;
}
static int omap_insert_op(cls_method_context_t hctx,
bufferlist *in, bufferlist *out) {
CLS_LOG(20, "omap_insert");
omap_set_args op;
auto it = in->cbegin();
try {
decode(op, it);
} catch (buffer::error& err) {
return -EINVAL;
}
return omap_insert(hctx, op.omap, op.bound, op.exclusive);
}
static int create_with_omap(cls_method_context_t hctx,
const map<string, bufferlist> &omap) {
CLS_LOG(20, "creating with omap: %s", omap.begin()->first.c_str());
//first make sure the object is writable
int r = cls_cxx_create(hctx, true);
if (r < 0) {
CLS_LOG(20, "omap create: creating failed: %d", r);
return r;
}
int new_size_int = omap.size();
CLS_LOG(20, "omap insert: new size is %d", new_size_int);
bufferlist new_size;
std::stringstream s;
s << new_size_int;
new_size.append(s.str());
r = cls_cxx_map_set_vals(hctx, &omap);
if (r < 0) {
CLS_LOG(20, "omap create: error setting omap: %d", r);
return r;
}
r = cls_cxx_setxattr(hctx, "size", &new_size);
if (r < 0) {
CLS_LOG(20, "omap create: error setting xattr %s: %d", "size", r);
return r;
}
bufferlist u;
u.append("0");
r = cls_cxx_setxattr(hctx, "unwritable", &u);
if (r < 0) {
CLS_LOG(20, "omap create: error setting xattr %s: %d", "unwritable", r);
return r;
}
CLS_LOG(20, "successfully created %s", omap.begin()->first.c_str());
return 0;
}
static int create_with_omap_op(cls_method_context_t hctx,
bufferlist *in, bufferlist *out) {
CLS_LOG(20, "omap_insert");
map<string, bufferlist> omap;
auto it = in->cbegin();
try {
decode(omap, it);
} catch (buffer::error& err) {
return -EINVAL;
}
return create_with_omap(hctx, omap);
}
/**
* Attempts to remove omap from this object's omap.
*
* @return:
* if unwritable, returns -EACCES.
* if size < bound and key doesn't already exist in the omap, returns -EBALANCE.
* if any of the keys are not in this object, returns -ENODATA.
*
* @post: object has omap entries removed, and size xattr is updated
*/
static int omap_remove(cls_method_context_t hctx,
const std::set<string> &omap, int bound) {
int r;
uint64_t size;
time_t time;
r = cls_cxx_stat(hctx, &size, &time);
if (r < 0) {
return r;
}
//first make sure the object is writable
r = check_writable(hctx);
if (r < 0) {
return r;
}
//check for existance of the key first
for (set<string>::const_iterator it = omap.begin();
it != omap.end(); ++it) {
bufferlist bl;
r = cls_cxx_map_get_val(hctx, *it, &bl);
if (r == -ENOENT || r == -ENODATA
|| string(bl.c_str(), bl.length()) == ""){
return -ENODATA;
} else if (r < 0) {
CLS_LOG(20, "error reading omap val for %s: %d", it->c_str(), r);
return r;
}
}
//fail if removing from an object with only bound entries.
bufferlist old_size;
r = cls_cxx_getxattr(hctx, "size", &old_size);
if (r < 0) {
CLS_LOG(20, "error reading xattr %s: %d", "size", r);
return r;
}
int old_size_int = atoi(string(old_size.c_str(), old_size.length()).c_str());
CLS_LOG(20, "asserting size is greater than %d", bound);
if (old_size_int <= bound) {
return -EKEYREJECTED;
}
int new_size_int = old_size_int - omap.size();
CLS_LOG(20, "old size is %d, new size is %d", old_size_int, new_size_int);
bufferlist new_size;
std::stringstream s;
s << new_size_int;
new_size.append(s.str());
r = cls_cxx_setxattr(hctx, "size", &new_size);
if (r < 0) {
CLS_LOG(20, "error setting xattr %s: %d", "unwritable", r);
return r;
}
for (std::set<string>::const_iterator it = omap.begin();
it != omap.end(); ++it) {
r = cls_cxx_map_remove_key(hctx, *it);
if (r < 0) {
CLS_LOG(20, "error removing omap: %d", r);
return r;
}
}
return 0;
}
static int omap_remove_op(cls_method_context_t hctx,
bufferlist *in, bufferlist *out) {
CLS_LOG(20, "omap_remove");
omap_rm_args op;
auto it = in->cbegin();
try {
decode(op, it);
} catch (buffer::error& err) {
return -EINVAL;
}
return omap_remove(hctx, op.omap, op.bound);
}
/**
* checks to see if this object needs to be split or rebalanced. if so, reads
* information about it.
*
* @post: if assert_size_in_bound(hctx, bound, comparator) succeeds,
* odata contains the size, omap, and unwritable attributes for this object.
* Otherwise, odata contains the size and unwritable attribute.
*/
static int maybe_read_for_balance(cls_method_context_t hctx,
object_data &odata, int bound, int comparator) {
CLS_LOG(20, "rebalance reading");
//if unwritable, return
int r = check_writable(hctx);
if (r < 0) {
odata.unwritable = true;
CLS_LOG(20, "rebalance read: error getting xattr %s: %d", "unwritable", r);
return r;
} else {
odata.unwritable = false;
}
//get the size attribute
bufferlist size;
r = cls_cxx_getxattr(hctx, "size", &size);
if (r < 0) {
CLS_LOG(20, "rebalance read: error getting xattr %s: %d", "size", r);
return r;
}
odata.size = atoi(string(size.c_str(), size.length()).c_str());
//check if it needs to be balanced
r = assert_size_in_bound(hctx, bound, comparator);
if (r < 0) {
CLS_LOG(20, "rebalance read: error on asserting size: %d", r);
return -EBALANCE;
}
//if the assert succeeded, it needs to be balanced
bool more;
r = cls_cxx_map_get_vals(hctx, "", "", LONG_MAX, &odata.omap, &more);
if (r < 0){
CLS_LOG(20, "rebalance read: getting kvs failed with error %d", r);
return r;
}
CLS_LOG(20, "rebalance read: size xattr is %llu, omap size is %llu",
(unsigned long long)odata.size,
(unsigned long long)odata.omap.size());
return 0;
}
static int maybe_read_for_balance_op(cls_method_context_t hctx,
bufferlist *in, bufferlist *out) {
CLS_LOG(20, "maybe_read_for_balance");
rebalance_args op;
auto it = in->cbegin();
try {
decode(op, it);
} catch (buffer::error& err) {
return -EINVAL;
}
int r = maybe_read_for_balance(hctx, op.odata, op.bound, op.comparator);
if (r < 0) {
return r;
} else {
op.encode(*out);
return 0;
}
}
CLS_INIT(kvs)
{
CLS_LOG(20, "Loaded assert condition class!");
cls_handle_t h_class;
cls_method_handle_t h_get_idata_from_key;
cls_method_handle_t h_get_next_idata;
cls_method_handle_t h_get_prev_idata;
cls_method_handle_t h_read_many;
cls_method_handle_t h_check_writable;
cls_method_handle_t h_assert_size_in_bound;
cls_method_handle_t h_omap_insert;
cls_method_handle_t h_create_with_omap;
cls_method_handle_t h_omap_remove;
cls_method_handle_t h_maybe_read_for_balance;
cls_register("kvs", &h_class);
cls_register_cxx_method(h_class, "get_idata_from_key",
CLS_METHOD_RD,
get_idata_from_key_op, &h_get_idata_from_key);
cls_register_cxx_method(h_class, "get_next_idata",
CLS_METHOD_RD,
get_next_idata_op, &h_get_next_idata);
cls_register_cxx_method(h_class, "get_prev_idata",
CLS_METHOD_RD,
get_prev_idata_op, &h_get_prev_idata);
cls_register_cxx_method(h_class, "read_many",
CLS_METHOD_RD,
read_many_op, &h_read_many);
cls_register_cxx_method(h_class, "check_writable",
CLS_METHOD_RD | CLS_METHOD_WR,
check_writable_op, &h_check_writable);
cls_register_cxx_method(h_class, "assert_size_in_bound",
CLS_METHOD_WR,
assert_size_in_bound_op, &h_assert_size_in_bound);
cls_register_cxx_method(h_class, "omap_insert",
CLS_METHOD_WR,
omap_insert_op, &h_omap_insert);
cls_register_cxx_method(h_class, "create_with_omap",
CLS_METHOD_WR,
create_with_omap_op, &h_create_with_omap);
cls_register_cxx_method(h_class, "omap_remove",
CLS_METHOD_WR,
omap_remove_op, &h_omap_remove);
cls_register_cxx_method(h_class, "maybe_read_for_balance",
CLS_METHOD_RD,
maybe_read_for_balance_op, &h_maybe_read_for_balance);
return;
}
| 19,454 | 27.033141 | 83 | cc |
null | ceph-main/src/key_value_store/key_value_structure.h | /*
* Interface for key-value store using librados
*
* September 2, 2012
* Eleanor Cawthon
* [email protected]
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*/
#ifndef KEY_VALUE_STRUCTURE_HPP_
#define KEY_VALUE_STRUCTURE_HPP_
#include "include/rados/librados.hpp"
#include "include/utime.h"
#include <vector>
using ceph::bufferlist;
class KeyValueStructure;
/**An injection_t is a function that is called before every
* ObjectWriteOperation to test concurrency issues. For example,
* one injection_t might cause the client to have a greater chance of dying
* mid-split/merge.
*/
typedef int (KeyValueStructure::*injection_t)();
/**
* Passed to aio methods to be called when the operation completes
*/
typedef void (*callback)(int * err, void *arg);
class KeyValueStructure{
public:
std::map<char, int> opmap;
//these are injection methods. By default, nothing is called at each
//interruption point.
/**
* returns 0
*/
virtual int nothing() = 0;
/**
* 10% chance of waiting wait_ms seconds
*/
virtual int wait() = 0;
/**
* 10% chance of killing the client.
*/
virtual int suicide() = 0;
////////////////DESTRUCTOR/////////////////
virtual ~KeyValueStructure() {}
////////////////UPDATERS///////////////////
/**
* set up the KeyValueStructure (i.e., initialize rados/io_ctx, etc.)
*/
virtual int setup(int argc, const char** argv) = 0;
/**
* set the method that gets called before each ObjectWriteOperation.
* If waite_time is set and the method passed involves waiting, it will wait
* for that many milliseconds.
*/
virtual void set_inject(injection_t inject, int wait_time) = 0;
/**
* if update_on_existing is false, returns an error if
* key already exists in the structure
*/
virtual int set(const std::string &key, const bufferlist &val,
bool update_on_existing) = 0;
/**
* efficiently insert the contents of in_map into the structure
*/
virtual int set_many(const std::map<std::string, bufferlist> &in_map) = 0;
/**
* removes the key-value for key. returns an error if key does not exist
*/
virtual int remove(const std::string &key) = 0;
/**
* removes all keys and values
*/
virtual int remove_all() = 0;
/**
* launches a thread to get the value of key. When complete, calls cb(cb_args)
*/
virtual void aio_get(const std::string &key, bufferlist *val, callback cb,
void *cb_args, int * err) = 0;
/**
* launches a thread to set key to val. When complete, calls cb(cb_args)
*/
virtual void aio_set(const std::string &key, const bufferlist &val, bool exclusive,
callback cb, void * cb_args, int * err) = 0;
/**
* launches a thread to remove key. When complete, calls cb(cb_args)
*/
virtual void aio_remove(const std::string &key, callback cb, void *cb_args,
int * err) = 0;
////////////////READERS////////////////////
/**
* gets the val associated with key.
*
* @param key the key to get
* @param val the value is stored in this
* @return error code
*/
virtual int get(const std::string &key, bufferlist *val) = 0;
/**
* stores all keys in keys. set should put them in order by key.
*/
virtual int get_all_keys(std::set<std::string> *keys) = 0;
/**
* stores all keys and values in kv_map. map should put them in order by key.
*/
virtual int get_all_keys_and_values(std::map<std::string,bufferlist> *kv_map) = 0;
/**
* True if the structure meets its own requirements for consistency.
*/
virtual bool is_consistent() = 0;
/**
* prints a string representation of the structure
*/
virtual std::string str() = 0;
};
#endif /* KEY_VALUE_STRUCTURE_HPP_ */
| 3,898 | 25.52381 | 85 | h |
null | ceph-main/src/key_value_store/kv_flat_btree_async.cc | /*
* Key-value store using librados
*
* September 2, 2012
* Eleanor Cawthon
* [email protected]
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*/
#include "include/compat.h"
#include "key_value_store/key_value_structure.h"
#include "key_value_store/kv_flat_btree_async.h"
#include "key_value_store/kvs_arg_types.h"
#include "include/rados/librados.hpp"
#include "common/ceph_context.h"
#include "common/Clock.h"
#include "include/types.h"
#include <errno.h>
#include <string>
#include <iostream>
#include <cassert>
#include <climits>
#include <cmath>
#include <sstream>
#include <stdlib.h>
#include <iterator>
using ceph::bufferlist;
using namespace std;
bool index_data::is_timed_out(utime_t now, utime_t timeout) const {
return prefix != "" && now - ts > timeout;
}
void IndexCache::clear() {
k2itmap.clear();
t2kmap.clear();
}
void IndexCache::push(const string &key, const index_data &idata) {
if (cache_size == 0) {
return;
}
index_data old_idata;
std::map<key_data, std::pair<index_data, utime_t> >::iterator old_it =
k2itmap.lower_bound(key_data(key));
if (old_it != k2itmap.end()) {
t2kmap.erase(old_it->second.second);
k2itmap.erase(old_it);
}
std::map<key_data, std::pair<index_data, utime_t> >::iterator new_it =
k2itmap.find(idata.kdata);
if (new_it != k2itmap.end()) {
utime_t old_time = new_it->second.second;
t2kmap.erase(old_time);
}
utime_t time = ceph_clock_now();
k2itmap[idata.kdata] = std::make_pair(idata, time);
t2kmap[time] = idata.kdata;
if ((int)k2itmap.size() > cache_size) {
pop();
}
}
void IndexCache::push(const index_data &idata) {
if (cache_size == 0) {
return;
}
if (k2itmap.count(idata.kdata) > 0) {
utime_t old_time = k2itmap[idata.kdata].second;
t2kmap.erase(old_time);
k2itmap.erase(idata.kdata);
}
utime_t time = ceph_clock_now();
k2itmap[idata.kdata] = std::make_pair(idata, time);
t2kmap[time] = idata.kdata;
if ((int)k2itmap.size() > cache_size) {
pop();
}
}
void IndexCache::pop() {
if (cache_size == 0) {
return;
}
std::map<utime_t, key_data>::iterator it = t2kmap.begin();
utime_t time = it->first;
key_data kdata = it->second;
k2itmap.erase(kdata);
t2kmap.erase(time);
}
void IndexCache::erase(key_data kdata) {
if (cache_size == 0) {
return;
}
if (k2itmap.count(kdata) > 0) {
utime_t c = k2itmap[kdata].second;
k2itmap.erase(kdata);
t2kmap.erase(c);
}
}
int IndexCache::get(const string &key, index_data *idata) const {
if (cache_size == 0) {
return -ENODATA;
}
if ((int)k2itmap.size() == 0) {
return -ENODATA;
}
std::map<key_data, std::pair<index_data, utime_t> >::const_iterator it =
k2itmap.lower_bound(key_data(key));
if (it == k2itmap.end() || !(it->second.first.min_kdata < key_data(key))) {
return -ENODATA;
} else {
*idata = it->second.first;
}
return 0;
}
int IndexCache::get(const string &key, index_data *idata,
index_data *next_idata) const {
if (cache_size == 0) {
return -ENODATA;
}
std::map<key_data, std::pair<index_data, utime_t> >::const_iterator it =
k2itmap.lower_bound(key_data(key));
if (it == k2itmap.end() || ++it == k2itmap.end()) {
return -ENODATA;
} else {
--it;
if (!(it->second.first.min_kdata < key_data(key))){
//stale, should be reread.
return -ENODATA;
} else {
*idata = it->second.first;
++it;
if (it != k2itmap.end()) {
*next_idata = it->second.first;
}
}
}
return 0;
}
int KvFlatBtreeAsync::nothing() {
return 0;
}
int KvFlatBtreeAsync::wait() {
if (rand() % 10 == 0) {
usleep(wait_ms);
}
return 0;
}
int KvFlatBtreeAsync::suicide() {
if (rand() % 10 == 0) {
if (verbose) cout << client_name << " is suiciding" << std::endl;
return 1;
}
return 0;
}
int KvFlatBtreeAsync::next(const index_data &idata, index_data * out_data)
{
if (verbose) cout << "\t\t" << client_name << "-next: finding next of "
<< idata.str()
<< std::endl;
int err = 0;
librados::ObjectReadOperation oro;
std::map<std::string, bufferlist> kvs;
oro.omap_get_vals2(idata.kdata.encoded(),1,&kvs, nullptr, &err);
err = io_ctx.operate(index_name, &oro, NULL);
if (err < 0){
if (verbose) cout << "\t\t\t" << client_name
<< "-next: getting index failed with error "
<< err << std::endl;
return err;
}
if (!kvs.empty()) {
out_data->kdata.parse(kvs.begin()->first);
auto b = kvs.begin()->second.cbegin();
out_data->decode(b);
if (idata.is_timed_out(ceph_clock_now(), timeout)) {
if (verbose) cout << client_name << " THINKS THE OTHER CLIENT DIED."
<< std::endl;
//the client died after deleting the object. clean up.
cleanup(idata, err);
}
} else {
err = -EOVERFLOW;
}
return err;
}
int KvFlatBtreeAsync::prev(const index_data &idata, index_data * out_data)
{
if (verbose) cout << "\t\t" << client_name << "-prev: finding prev of "
<< idata.str() << std::endl;
int err = 0;
bufferlist inbl;
idata_from_idata_args in_args;
in_args.idata = idata;
in_args.encode(inbl);
bufferlist outbl;
err = io_ctx.exec(index_name,"kvs", "get_prev_idata", inbl, outbl);
if (err < 0){
if (verbose) cout << "\t\t\t" << client_name
<< "-prev: getting index failed with error "
<< err << std::endl;
if (idata.is_timed_out(ceph_clock_now(), timeout)) {
if (verbose) cout << client_name << " THINKS THE OTHER CLIENT DIED."
<< std::endl;
//the client died after deleting the object. clean up.
err = cleanup(idata, err);
if (err == -ESUICIDE) {
return err;
} else {
err = 0;
}
}
return err;
}
auto it = outbl.cbegin();
in_args.decode(it);
*out_data = in_args.next_idata;
if (verbose) cout << "\t\t" << client_name << "-prev: prev is "
<< out_data->str()
<< std::endl;
return err;
}
int KvFlatBtreeAsync::read_index(const string &key, index_data * idata,
index_data * next_idata, bool force_update) {
int err = 0;
if (!force_update) {
if (verbose) cout << "\t" << client_name
<< "-read_index: getting index_data for " << key
<< " from cache" << std::endl;
icache_lock.lock();
if (next_idata != NULL) {
err = icache.get(key, idata, next_idata);
} else {
err = icache.get(key, idata);
}
icache_lock.unlock();
if (err == 0) {
//if (verbose) cout << "CACHE SUCCESS" << std::endl;
return err;
} else {
if (verbose) cout << "NOT IN CACHE" << std::endl;
}
}
if (verbose) cout << "\t" << client_name
<< "-read_index: getting index_data for " << key
<< " from object" << std::endl;
librados::ObjectReadOperation oro;
bufferlist raw_val;
std::set<std::string> key_set;
key_set.insert(key_data(key).encoded());
std::map<std::string, bufferlist> kvmap;
std::map<std::string, bufferlist> dupmap;
oro.omap_get_vals_by_keys(key_set, &dupmap, &err);
oro.omap_get_vals2(key_data(key).encoded(),
(cache_size / cache_refresh >= 2? cache_size / cache_refresh: 2),
&kvmap, nullptr, &err);
err = io_ctx.operate(index_name, &oro, NULL);
utime_t mytime = ceph_clock_now();
if (err < 0){
cerr << "\t" << client_name
<< "-read_index: getting keys failed with "
<< err << std::endl;
ceph_abort_msg(client_name + "-read_index: reading index failed");
return err;
}
kvmap.insert(dupmap.begin(), dupmap.end());
for (map<string, bufferlist>::iterator it = ++kvmap.begin();
it != kvmap.end();
++it) {
bufferlist bl = it->second;
auto blit = bl.cbegin();
index_data this_idata;
this_idata.decode(blit);
if (this_idata.is_timed_out(mytime, timeout)) {
if (verbose) cout << client_name
<< " THINKS THE OTHER CLIENT DIED. (mytime is "
<< mytime.sec() << "." << mytime.usec() << ", idata.ts is "
<< this_idata.ts.sec() << "." << this_idata.ts.usec()
<< ", it has been " << (mytime - this_idata.ts).sec()
<< '.' << (mytime - this_idata.ts).usec()
<< ", timeout is " << timeout << ")" << std::endl;
//the client died after deleting the object. clean up.
if (cleanup(this_idata, -EPREFIX) == -ESUICIDE) {
return -ESUICIDE;
}
return read_index(key, idata, next_idata, force_update);
}
std::scoped_lock l{icache_lock};
icache.push(this_idata);
}
auto b = kvmap.begin()->second.cbegin();
idata->decode(b);
idata->kdata.parse(kvmap.begin()->first);
if (verbose) cout << "\t" << client_name << "-read_index: kvmap_size is "
<< kvmap.size()
<< ", idata is " << idata->str() << std::endl;
ceph_assert(idata->obj != "");
icache_lock.lock();
icache.push(key, *idata);
icache_lock.unlock();
if (next_idata != NULL && idata->kdata.prefix != "1") {
next_idata->kdata.parse((++kvmap.begin())->first);
auto nb = (++kvmap.begin())->second.cbegin();
next_idata->decode(nb);
std::scoped_lock l{icache_lock};
icache.push(*next_idata);
}
return err;
}
int KvFlatBtreeAsync::split(const index_data &idata) {
int err = 0;
opmap['l']++;
if (idata.prefix != "") {
return -EPREFIX;
}
rebalance_args args;
args.bound = 2 * k - 1;
args.comparator = CEPH_OSD_CMPXATTR_OP_GT;
err = read_object(idata.obj, &args);
args.odata.max_kdata = idata.kdata;
if (err < 0) {
if (verbose) cout << "\t\t" << client_name << "-split: read object "
<< args.odata.name
<< " got " << err << std::endl;
return err;
}
if (verbose) cout << "\t\t" << client_name << "-split: splitting "
<< idata.obj
<< ", which has size " << args.odata.size
<< " and actual size " << args.odata.omap.size() << std::endl;
///////preparations that happen outside the critical section
//for prefix index
vector<object_data> to_create;
vector<object_data> to_delete;
to_delete.push_back(object_data(idata.min_kdata,
args.odata.max_kdata, args.odata.name, args.odata.version));
//for lower half object
std::map<std::string, bufferlist>::const_iterator it = args.odata.omap.begin();
client_index_lock.lock();
to_create.push_back(object_data(to_string(client_name, client_index++)));
client_index_lock.unlock();
for (int i = 0; i < k; i++) {
to_create[0].omap.insert(*it);
++it;
}
to_create[0].min_kdata = idata.min_kdata;
to_create[0].max_kdata = key_data(to_create[0].omap.rbegin()->first);
//for upper half object
client_index_lock.lock();
to_create.push_back(object_data(to_create[0].max_kdata,
args.odata.max_kdata,
to_string(client_name, client_index++)));
client_index_lock.unlock();
to_create[1].omap.insert(
++args.odata.omap.find(to_create[0].omap.rbegin()->first),
args.odata.omap.end());
//setting up operations
librados::ObjectWriteOperation owos[6];
vector<std::pair<std::pair<int, string>, librados::ObjectWriteOperation*> > ops;
index_data out_data;
set_up_prefix_index(to_create, to_delete, &owos[0], &out_data, &err);
ops.push_back(std::make_pair(
std::pair<int, string>(ADD_PREFIX, index_name),
&owos[0]));
for (int i = 1; i < 6; i++) {
ops.push_back(std::make_pair(std::make_pair(0,""), &owos[i]));
}
set_up_ops(to_create, to_delete, &ops, out_data, &err);
/////BEGIN CRITICAL SECTION/////
//put prefix on index entry for idata.val
err = perform_ops("\t\t" + client_name + "-split:", out_data, &ops);
if (err < 0) {
return err;
}
if (verbose) cout << "\t\t" << client_name << "-split: done splitting."
<< std::endl;
/////END CRITICAL SECTION/////
icache_lock.lock();
for (vector<delete_data>::iterator it = out_data.to_delete.begin();
it != out_data.to_delete.end(); ++it) {
icache.erase(it->max);
}
for (vector<create_data>::iterator it = out_data.to_create.begin();
it != out_data.to_create.end(); ++it) {
icache.push(index_data(*it));
}
icache_lock.unlock();
return err;
}
int KvFlatBtreeAsync::rebalance(const index_data &idata1,
const index_data &next_idata){
opmap['m']++;
int err = 0;
if (idata1.prefix != "") {
return -EPREFIX;
}
rebalance_args args1;
args1.bound = k + 1;
args1.comparator = CEPH_OSD_CMPXATTR_OP_LT;
index_data idata2 = next_idata;
rebalance_args args2;
args2.bound = k + 1;
args2.comparator = CEPH_OSD_CMPXATTR_OP_LT;
if (idata1.kdata.prefix == "1") {
//this is the highest key in the index, so it doesn't have a next.
//read the index for the previous entry
err = prev(idata1, &idata2);
if (err == -ERANGE) {
if (verbose) cout << "\t\t" << client_name
<< "-rebalance: this is the only node, "
<< "so aborting" << std::endl;
return -EUCLEAN;
} else if (err < 0) {
return err;
}
//read the first object
err = read_object(idata1.obj, &args2);
if (err < 0) {
if (verbose) cout << "reading " << idata1.obj << " failed with " << err
<< std::endl;
if (err == -ENOENT) {
return -ECANCELED;
}
return err;
}
args2.odata.min_kdata = idata1.min_kdata;
args2.odata.max_kdata = idata1.kdata;
//read the second object
args1.bound = 2 * k + 1;
err = read_object(idata2.obj, &args1);
if (err < 0) {
if (verbose) cout << "reading " << idata1.obj << " failed with " << err
<< std::endl;
return err;
}
args1.odata.min_kdata = idata2.min_kdata;
args1.odata.max_kdata = idata2.kdata;
if (verbose) cout << "\t\t" << client_name << "-rebalance: read "
<< idata2.obj
<< ". size: " << args1.odata.size << " version: "
<< args1.odata.version
<< std::endl;
} else {
assert (next_idata.obj != "");
//there is a next key, so get it.
err = read_object(idata1.obj, &args1);
if (err < 0) {
if (verbose) cout << "reading " << idata1.obj << " failed with " << err
<< std::endl;
return err;
}
args1.odata.min_kdata = idata1.min_kdata;
args1.odata.max_kdata = idata1.kdata;
args2.bound = 2 * k + 1;
err = read_object(idata2.obj, &args2);
if (err < 0) {
if (verbose) cout << "reading " << idata1.obj << " failed with " << err
<< std::endl;
if (err == -ENOENT) {
return -ECANCELED;
}
return err;
}
args2.odata.min_kdata = idata2.min_kdata;
args2.odata.max_kdata = idata2.kdata;
if (verbose) cout << "\t\t" << client_name << "-rebalance: read "
<< idata2.obj
<< ". size: " << args2.odata.size << " version: "
<< args2.odata.version
<< std::endl;
}
if (verbose) cout << "\t\t" << client_name << "-rebalance: o1 is "
<< args1.odata.max_kdata.encoded() << ","
<< args1.odata.name << " with size " << args1.odata.size
<< " , o2 is " << args2.odata.max_kdata.encoded()
<< "," << args2.odata.name << " with size " << args2.odata.size
<< std::endl;
//calculations
if ((int)args1.odata.size > k && (int)args1.odata.size <= 2*k
&& (int)args2.odata.size > k
&& (int)args2.odata.size <= 2*k) {
//nothing to do
if (verbose) cout << "\t\t" << client_name
<< "-rebalance: both sizes in range, so"
<< " aborting " << std::endl;
return -EBALANCE;
} else if (idata1.prefix != "" || idata2.prefix != "") {
return -EPREFIX;
}
//this is the high object. it gets created regardless of rebalance or merge.
client_index_lock.lock();
string o2w = to_string(client_name, client_index++);
client_index_lock.unlock();
index_data idata;
vector<object_data> to_create;
vector<object_data> to_delete;
librados::ObjectWriteOperation create[2];//possibly only 1 will be used
librados::ObjectWriteOperation other_ops[6];
vector<std::pair<std::pair<int, string>, librados::ObjectWriteOperation*> > ops;
ops.push_back(std::make_pair(
std::pair<int, string>(ADD_PREFIX, index_name),
&other_ops[0]));
if ((int)args1.odata.size + (int)args2.odata.size <= 2*k) {
//merge
if (verbose) cout << "\t\t" << client_name << "-rebalance: merging "
<< args1.odata.name
<< " and " << args2.odata.name << " to get " << o2w
<< std::endl;
std::map<string, bufferlist> write2_map;
write2_map.insert(args1.odata.omap.begin(), args1.odata.omap.end());
write2_map.insert(args2.odata.omap.begin(), args2.odata.omap.end());
to_create.push_back(object_data(args1.odata.min_kdata,
args2.odata.max_kdata, o2w, write2_map));
ops.push_back(std::make_pair(
std::pair<int, std::string>(MAKE_OBJECT, o2w),
&create[0]));
ceph_assert((int)write2_map.size() <= 2*k);
} else {
//rebalance
if (verbose) cout << "\t\t" << client_name << "-rebalance: rebalancing "
<< args1.odata.name
<< " and " << args2.odata.name << std::endl;
std::map<std::string, bufferlist> write1_map;
std::map<std::string, bufferlist> write2_map;
std::map<std::string, bufferlist>::iterator it;
client_index_lock.lock();
string o1w = to_string(client_name, client_index++);
client_index_lock.unlock();
int target_size_1 = ceil(((int)args1.odata.size + (int)args2.odata.size)
/ 2.0);
if (args1.odata.max_kdata != idata1.kdata) {
//this should be true if idata1 is the high object
target_size_1 = floor(((int)args1.odata.size + (int)args2.odata.size)
/ 2.0);
}
for (it = args1.odata.omap.begin();
it != args1.odata.omap.end() && (int)write1_map.size()
< target_size_1;
++it) {
write1_map.insert(*it);
}
if (it != args1.odata.omap.end()){
//write1_map is full, so put the rest in write2_map
write2_map.insert(it, args1.odata.omap.end());
write2_map.insert(args2.odata.omap.begin(), args2.odata.omap.end());
} else {
//args1.odata.omap was small, and write2_map still needs more
std::map<std::string, bufferlist>::iterator it2;
for(it2 = args2.odata.omap.begin();
(it2 != args2.odata.omap.end()) && ((int)write1_map.size()
< target_size_1);
++it2) {
write1_map.insert(*it2);
}
write2_map.insert(it2, args2.odata.omap.end());
}
if (verbose) cout << "\t\t" << client_name
<< "-rebalance: write1_map has size "
<< write1_map.size() << ", write2_map.size() is " << write2_map.size()
<< std::endl;
//at this point, write1_map and write2_map should have the correct pairs
to_create.push_back(object_data(args1.odata.min_kdata,
key_data(write1_map.rbegin()->first),
o1w,write1_map));
to_create.push_back(object_data( key_data(write1_map.rbegin()->first),
args2.odata.max_kdata, o2w, write2_map));
ops.push_back(std::make_pair(
std::pair<int, std::string>(MAKE_OBJECT, o1w),
&create[0]));
ops.push_back(std::make_pair(
std::pair<int, std::string>(MAKE_OBJECT, o2w),
&create[1]));
}
to_delete.push_back(object_data(args1.odata.min_kdata,
args1.odata.max_kdata, args1.odata.name, args1.odata.version));
to_delete.push_back(object_data(args2.odata.min_kdata,
args2.odata.max_kdata, args2.odata.name, args2.odata.version));
for (int i = 1; i < 6; i++) {
ops.push_back(std::make_pair(std::make_pair(0,""), &other_ops[i]));
}
index_data out_data;
set_up_prefix_index(to_create, to_delete, &other_ops[0], &out_data, &err);
set_up_ops(to_create, to_delete, &ops, out_data, &err);
//at this point, all operations should be completely set up.
/////BEGIN CRITICAL SECTION/////
err = perform_ops("\t\t" + client_name + "-rebalance:", out_data, &ops);
if (err < 0) {
return err;
}
icache_lock.lock();
for (vector<delete_data>::iterator it = out_data.to_delete.begin();
it != out_data.to_delete.end(); ++it) {
icache.erase(it->max);
}
for (vector<create_data>::iterator it = out_data.to_create.begin();
it != out_data.to_create.end(); ++it) {
icache.push(index_data(*it));
}
icache_lock.unlock();
if (verbose) cout << "\t\t" << client_name << "-rebalance: done rebalancing."
<< std::endl;
/////END CRITICAL SECTION/////
return err;
}
int KvFlatBtreeAsync::read_object(const string &obj, object_data * odata) {
librados::ObjectReadOperation get_obj;
librados::AioCompletion * obj_aioc = rados.aio_create_completion();
int err;
bufferlist unw_bl;
odata->name = obj;
get_obj.omap_get_vals2("", LONG_MAX, &odata->omap, nullptr, &err);
get_obj.getxattr("unwritable", &unw_bl, &err);
io_ctx.aio_operate(obj, obj_aioc, &get_obj, NULL);
obj_aioc->wait_for_complete();
err = obj_aioc->get_return_value();
if (err < 0){
//possibly -ENOENT, meaning someone else deleted it.
obj_aioc->release();
return err;
}
odata->unwritable = string(unw_bl.c_str(), unw_bl.length()) == "1";
odata->version = obj_aioc->get_version64();
odata->size = odata->omap.size();
obj_aioc->release();
return 0;
}
int KvFlatBtreeAsync::read_object(const string &obj, rebalance_args * args) {
bufferlist inbl;
args->encode(inbl);
bufferlist outbl;
int err;
librados::AioCompletion * a = rados.aio_create_completion();
io_ctx.aio_exec(obj, a, "kvs", "maybe_read_for_balance", inbl, &outbl);
a->wait_for_complete();
err = a->get_return_value();
if (err < 0) {
if (verbose) cout << "\t\t" << client_name
<< "-read_object: reading failed with "
<< err << std::endl;
a->release();
return err;
}
auto it = outbl.cbegin();
args->decode(it);
args->odata.name = obj;
args->odata.version = a->get_version64();
a->release();
return err;
}
void KvFlatBtreeAsync::set_up_prefix_index(
const vector<object_data> &to_create,
const vector<object_data> &to_delete,
librados::ObjectWriteOperation * owo,
index_data * idata,
int * err) {
std::map<std::string, std::pair<bufferlist, int> > assertions;
std::map<string, bufferlist> to_insert;
idata->prefix = "1";
idata->ts = ceph_clock_now();
for(vector<object_data>::const_iterator it = to_create.begin();
it != to_create.end();
++it) {
create_data c(it->min_kdata, it->max_kdata, it->name);
idata->to_create.push_back(c);
}
for(vector<object_data>::const_iterator it = to_delete.begin();
it != to_delete.end();
++it) {
delete_data d(it->min_kdata, it->max_kdata, it->name, it->version);
idata->to_delete.push_back(d);
}
for(vector<object_data>::const_iterator it = to_delete.begin();
it != to_delete.end();
++it) {
idata->obj = it->name;
idata->min_kdata = it->min_kdata;
idata->kdata = it->max_kdata;
bufferlist insert;
idata->encode(insert);
to_insert[it->max_kdata.encoded()] = insert;
index_data this_entry;
this_entry.min_kdata = idata->min_kdata;
this_entry.kdata = idata->kdata;
this_entry.obj = idata->obj;
assertions[it->max_kdata.encoded()] = std::pair<bufferlist, int>
(to_bl(this_entry), CEPH_OSD_CMPXATTR_OP_EQ);
if (verbose) cout << "\t\t\t" << client_name
<< "-setup_prefix: will assert "
<< this_entry.str() << std::endl;
}
ceph_assert(*err == 0);
owo->omap_cmp(assertions, err);
if (to_create.size() <= 2) {
owo->omap_set(to_insert);
}
}
//some args can be null if there are no corresponding entries in p
void KvFlatBtreeAsync::set_up_ops(
const vector<object_data> &create_vector,
const vector<object_data> &delete_vector,
vector<std::pair<std::pair<int, string>, librados::ObjectWriteOperation*> > * ops,
const index_data &idata,
int * err) {
vector<std::pair<std::pair<int, string>,
librados::ObjectWriteOperation* > >::iterator it;
//skip the prefixing part
for(it = ops->begin(); it->first.first == ADD_PREFIX; ++it) {}
std::map<string, bufferlist> to_insert;
std::set<string> to_remove;
std::map<string, std::pair<bufferlist, int> > assertions;
if (create_vector.size() > 0) {
for (int i = 0; i < (int)idata.to_delete.size(); ++i) {
it->first = std::pair<int, string>(UNWRITE_OBJECT, idata.to_delete[i].obj);
set_up_unwrite_object(delete_vector[i].version, it->second);
++it;
}
}
for (int i = 0; i < (int)idata.to_create.size(); ++i) {
index_data this_entry(idata.to_create[i].max, idata.to_create[i].min,
idata.to_create[i].obj);
to_insert[idata.to_create[i].max.encoded()] = to_bl(this_entry);
if (idata.to_create.size() <= 2) {
it->first = std::pair<int, string>(MAKE_OBJECT, idata.to_create[i].obj);
} else {
it->first = std::pair<int, string>(AIO_MAKE_OBJECT, idata.to_create[i].obj);
}
set_up_make_object(create_vector[i].omap, it->second);
++it;
}
for (int i = 0; i < (int)idata.to_delete.size(); ++i) {
index_data this_entry = idata;
this_entry.obj = idata.to_delete[i].obj;
this_entry.min_kdata = idata.to_delete[i].min;
this_entry.kdata = idata.to_delete[i].max;
if (verbose) cout << "\t\t\t" << client_name << "-setup_ops: will assert "
<< this_entry.str() << std::endl;
assertions[idata.to_delete[i].max.encoded()] = std::pair<bufferlist, int>(
to_bl(this_entry), CEPH_OSD_CMPXATTR_OP_EQ);
to_remove.insert(idata.to_delete[i].max.encoded());
it->first = std::pair<int, string>(REMOVE_OBJECT, idata.to_delete[i].obj);
set_up_delete_object(it->second);
++it;
}
if ((int)idata.to_create.size() <= 2) {
it->second->omap_cmp(assertions, err);
}
it->second->omap_rm_keys(to_remove);
it->second->omap_set(to_insert);
it->first = std::pair<int, string>(REMOVE_PREFIX, index_name);
}
void KvFlatBtreeAsync::set_up_make_object(
const std::map<std::string, bufferlist> &to_set,
librados::ObjectWriteOperation *owo) {
bufferlist inbl;
encode(to_set, inbl);
owo->exec("kvs", "create_with_omap", inbl);
}
void KvFlatBtreeAsync::set_up_unwrite_object(
const int &ver, librados::ObjectWriteOperation *owo) {
if (ver > 0) {
owo->assert_version(ver);
}
owo->cmpxattr("unwritable", CEPH_OSD_CMPXATTR_OP_EQ, to_bl("0"));
owo->setxattr("unwritable", to_bl("1"));
}
void KvFlatBtreeAsync::set_up_restore_object(
librados::ObjectWriteOperation *owo) {
owo->cmpxattr("unwritable", CEPH_OSD_CMPXATTR_OP_EQ, to_bl("1"));
owo->setxattr("unwritable", to_bl("0"));
}
void KvFlatBtreeAsync::set_up_delete_object(
librados::ObjectWriteOperation *owo) {
owo->cmpxattr("unwritable", CEPH_OSD_CMPXATTR_OP_EQ, to_bl("1"));
owo->remove();
}
int KvFlatBtreeAsync::perform_ops(const string &debug_prefix,
const index_data &idata,
vector<std::pair<std::pair<int, string>, librados::ObjectWriteOperation*> > *ops) {
int err = 0;
vector<librados::AioCompletion*> aiocs(idata.to_create.size());
int count = 0;
for (vector<std::pair<std::pair<int, string>,
librados::ObjectWriteOperation*> >::iterator it = ops->begin();
it != ops->end(); ++it) {
if ((((KeyValueStructure *)this)->*KvFlatBtreeAsync::interrupt)() == 1 ) {
return -ESUICIDE;
}
switch (it->first.first) {
case ADD_PREFIX://prefixing
if (verbose) cout << debug_prefix << " adding prefix" << std::endl;
err = io_ctx.operate(index_name, it->second);
if (err < 0) {
if (verbose) cout << debug_prefix << " prefixing the index failed with "
<< err << std::endl;
return -EPREFIX;
}
if (verbose) cout << debug_prefix << " prefix added." << std::endl;
break;
case UNWRITE_OBJECT://marking
if (verbose) cout << debug_prefix << " marking " << it->first.second
<< std::endl;
err = io_ctx.operate(it->first.second, it->second);
if (err < 0) {
//most likely because it changed, in which case it will be -ERANGE
if (verbose) cout << debug_prefix << " marking " << it->first.second
<< "failed with code" << err << std::endl;
if (it->first.second == (*idata.to_delete.begin()).max.encoded()) {
if (cleanup(idata, -EFIRSTOBJ) == -ESUICIDE) {
return -ESUICIDE;
}
} else {
if (cleanup(idata, -ERANGE) == -ESUICIDE) {
return -ESUICIDE;
}
}
return err;
}
if (verbose) cout << debug_prefix << " marked " << it->first.second
<< std::endl;
break;
case MAKE_OBJECT://creating
if (verbose) cout << debug_prefix << " creating " << it->first.second
<< std::endl;
err = io_ctx.operate(it->first.second, it->second);
if (err < 0) {
//this can happen if someone else was cleaning up after us.
if (verbose) cout << debug_prefix << " creating " << it->first.second
<< " failed"
<< " with code " << err << std::endl;
if (err == -EEXIST) {
//someone thinks we died, so die
if (verbose) cout << client_name << " is suiciding!" << std::endl;
return -ESUICIDE;
} else {
ceph_abort();
}
return err;
}
if (verbose || idata.to_create.size() > 2) {
cout << debug_prefix << " created object " << it->first.second
<< std::endl;
}
break;
case AIO_MAKE_OBJECT:
cout << debug_prefix << " launching asynchronous create "
<< it->first.second << std::endl;
aiocs[count] = rados.aio_create_completion();
io_ctx.aio_operate(it->first.second, aiocs[count], it->second);
count++;
if ((int)idata.to_create.size() == count) {
cout << "starting aiowrite waiting loop" << std::endl;
for (count -= 1; count >= 0; count--) {
aiocs[count]->wait_for_complete();
err = aiocs[count]->get_return_value();
if (err < 0) {
//this can happen if someone else was cleaning up after us.
cerr << debug_prefix << " a create failed"
<< " with code " << err << std::endl;
if (err == -EEXIST) {
//someone thinks we died, so die
cerr << client_name << " is suiciding!" << std::endl;
return -ESUICIDE;
} else {
ceph_abort();
}
return err;
}
if (verbose || idata.to_create.size() > 2) {
cout << debug_prefix << " completed aio " << aiocs.size() - count
<< "/" << aiocs.size() << std::endl;
}
}
}
break;
case REMOVE_OBJECT://deleting
if (verbose) cout << debug_prefix << " deleting " << it->first.second
<< std::endl;
err = io_ctx.operate(it->first.second, it->second);
if (err < 0) {
//if someone else called cleanup on this prefix first
if (verbose) cout << debug_prefix << " deleting " << it->first.second
<< "failed with code" << err << std::endl;
}
if (verbose) cout << debug_prefix << " deleted " << it->first.second
<< std::endl;
break;
case REMOVE_PREFIX://rewriting index
if (verbose) cout << debug_prefix << " updating index " << std::endl;
err = io_ctx.operate(index_name, it->second);
if (err < 0) {
if (verbose) cout << debug_prefix
<< " rewriting the index failed with code " << err
<< ". someone else must have thought we died, so dying" << std::endl;
return -ETIMEDOUT;
}
if (verbose) cout << debug_prefix << " updated index." << std::endl;
break;
case RESTORE_OBJECT:
if (verbose) cout << debug_prefix << " restoring " << it->first.second
<< std::endl;
err = io_ctx.operate(it->first.second, it->second);
if (err < 0) {
if (verbose) cout << debug_prefix << "restoring " << it->first.second
<< " failed"
<< " with " << err << std::endl;
return err;
}
if (verbose) cout << debug_prefix << " restored " << it->first.second
<< std::endl;
break;
default:
if (verbose) cout << debug_prefix << " performing unknown op on "
<< it->first.second
<< std::endl;
err = io_ctx.operate(index_name, it->second);
if (err < 0) {
if (verbose) cout << debug_prefix << " unknown op on "
<< it->first.second
<< " failed with " << err << std::endl;
return err;
}
if (verbose) cout << debug_prefix << " unknown op on "
<< it->first.second
<< " succeeded." << std::endl;
break;
}
}
return err;
}
int KvFlatBtreeAsync::cleanup(const index_data &idata, const int &error) {
if (verbose) cout << "\t\t" << client_name << ": cleaning up after "
<< idata.str()
<< std::endl;
int err = 0;
ceph_assert(idata.prefix != "");
std::map<std::string,bufferlist> new_index;
std::map<std::string, std::pair<bufferlist, int> > assertions;
switch (error) {
case -EFIRSTOBJ: {
//this happens if the split or rebalance failed to mark the first object,
//meaning only the index needs to be changed.
//restore objects that had been marked unwritable.
for(vector<delete_data >::const_iterator it =
idata.to_delete.begin();
it != idata.to_delete.end(); ++it) {
index_data this_entry;
this_entry.obj = (*it).obj;
this_entry.min_kdata = it->min;
this_entry.kdata = it->max;
new_index[it->max.encoded()] = to_bl(this_entry);
this_entry = idata;
this_entry.obj = it->obj;
this_entry.min_kdata = it->min;
this_entry.kdata = it->max;
if (verbose) cout << "\t\t\t" << client_name
<< "-cleanup: will assert index contains "
<< this_entry.str() << std::endl;
assertions[it->max.encoded()] =
std::pair<bufferlist, int>(to_bl(this_entry),
CEPH_OSD_CMPXATTR_OP_EQ);
}
//update the index
librados::ObjectWriteOperation update_index;
update_index.omap_cmp(assertions, &err);
update_index.omap_set(new_index);
if (verbose) cout << "\t\t\t" << client_name << "-cleanup: updating index"
<< std::endl;
if ((((KeyValueStructure *)this)->*KvFlatBtreeAsync::interrupt)() == 1 ) {
return -ESUICIDE;
}
err = io_ctx.operate(index_name, &update_index);
if (err < 0) {
if (verbose) cout << "\t\t\t" << client_name
<< "-cleanup: rewriting failed with "
<< err << ". returning -ECANCELED" << std::endl;
return -ECANCELED;
}
if (verbose) cout << "\t\t\t" << client_name
<< "-cleanup: updated index. cleanup done."
<< std::endl;
break;
}
case -ERANGE: {
//this happens if a split or rebalance fails to mark an object. It is a
//special case of rolling back that does not have to deal with new objects.
//restore objects that had been marked unwritable.
vector<delete_data >::const_iterator it;
for(it = idata.to_delete.begin();
it != idata.to_delete.end(); ++it) {
index_data this_entry;
this_entry.obj = (*it).obj;
this_entry.min_kdata = it->min;
this_entry.kdata = it->max;
new_index[it->max.encoded()] = to_bl(this_entry);
this_entry = idata;
this_entry.obj = it->obj;
this_entry.min_kdata = it->min;
this_entry.kdata = it->max;
if (verbose) cout << "\t\t\t" << client_name
<< "-cleanup: will assert index contains "
<< this_entry.str() << std::endl;
assertions[it->max.encoded()] =
std::pair<bufferlist, int>(to_bl(this_entry),
CEPH_OSD_CMPXATTR_OP_EQ);
}
it = idata.to_delete.begin();
librados::ObjectWriteOperation restore;
set_up_restore_object(&restore);
if ((((KeyValueStructure *)this)->*KvFlatBtreeAsync::interrupt)() == 1 ) {
return -ESUICIDE;
}
if (verbose) cout << "\t\t\t" << client_name << "-cleanup: restoring "
<< it->obj
<< std::endl;
err = io_ctx.operate(it->obj, &restore);
if (err < 0) {
//i.e., -ECANCELED because the object was already restored by someone
//else
if (verbose) cout << "\t\t\t" << client_name << "-cleanup: restoring "
<< it->obj
<< " failed with " << err << std::endl;
} else {
if (verbose) cout << "\t\t\t" << client_name << "-cleanup: restored "
<< it->obj
<< std::endl;
}
//update the index
librados::ObjectWriteOperation update_index;
update_index.omap_cmp(assertions, &err);
update_index.omap_set(new_index);
if (verbose) cout << "\t\t\t" << client_name << "-cleanup: updating index"
<< std::endl;
if ((((KeyValueStructure *)this)->*KvFlatBtreeAsync::interrupt)() == 1 ) {
return -ESUICIDE;
}
err = io_ctx.operate(index_name, &update_index);
if (err < 0) {
if (verbose) cout << "\t\t\t" << client_name
<< "-cleanup: rewriting failed with "
<< err << ". returning -ECANCELED" << std::endl;
return -ECANCELED;
}
if (verbose) cout << "\t\t\t" << client_name
<< "-cleanup: updated index. cleanup done."
<< std::endl;
break;
}
case -ENOENT: {
if (verbose) cout << "\t\t" << client_name << "-cleanup: rolling forward"
<< std::endl;
//all changes were created except for updating the index and possibly
//deleting the objects. roll forward.
vector<std::pair<std::pair<int, string>, librados::ObjectWriteOperation*> > ops;
vector<librados::ObjectWriteOperation> owos(idata.to_delete.size() + 1);
for (int i = 0; i <= (int)idata.to_delete.size(); ++i) {
ops.push_back(std::make_pair(std::pair<int, std::string>(0, ""), &owos[i]));
}
set_up_ops(vector<object_data>(),
vector<object_data>(), &ops, idata, &err);
err = perform_ops("\t\t" + client_name + "-cleanup:", idata, &ops);
if (err < 0) {
if (err == -ESUICIDE) {
return -ESUICIDE;
}
if (verbose) cout << "\t\t\t" << client_name
<< "-cleanup: rewriting failed with "
<< err << ". returning -ECANCELED" << std::endl;
return -ECANCELED;
}
if (verbose) cout << "\t\t\t" << client_name << "-cleanup: updated index"
<< std::endl;
break;
}
default: {
//roll back all changes.
if (verbose) cout << "\t\t" << client_name << "-cleanup: rolling back"
<< std::endl;
std::map<std::string,bufferlist> new_index;
std::set<std::string> to_remove;
std::map<std::string, std::pair<bufferlist, int> > assertions;
//mark the objects to be created. if someone else already has, die.
for(vector<create_data >::const_reverse_iterator it =
idata.to_create.rbegin();
it != idata.to_create.rend(); ++it) {
librados::ObjectWriteOperation rm;
set_up_unwrite_object(0, &rm);
if ((((KeyValueStructure *)this)->*KvFlatBtreeAsync::interrupt)() == 1 )
{
return -ESUICIDE;
}
if (verbose) cout << "\t\t\t" << client_name << "-cleanup: marking "
<< it->obj
<< std::endl;
err = io_ctx.operate(it->obj, &rm);
if (err < 0) {
if (verbose) cout << "\t\t\t" << client_name << "-cleanup: marking "
<< it->obj
<< " failed with " << err << std::endl;
} else {
if (verbose) cout << "\t\t\t" << client_name << "-cleanup: marked "
<< it->obj
<< std::endl;
}
}
//restore objects that had been marked unwritable.
for(vector<delete_data >::const_iterator it =
idata.to_delete.begin();
it != idata.to_delete.end(); ++it) {
index_data this_entry;
this_entry.obj = (*it).obj;
this_entry.min_kdata = it->min;
this_entry.kdata = it->max;
new_index[it->max.encoded()] = to_bl(this_entry);
this_entry = idata;
this_entry.obj = it->obj;
this_entry.min_kdata = it->min;
this_entry.kdata = it->max;
if (verbose) cout << "\t\t\t" << client_name
<< "-cleanup: will assert index contains "
<< this_entry.str() << std::endl;
assertions[it->max.encoded()] =
std::pair<bufferlist, int>(to_bl(this_entry),
CEPH_OSD_CMPXATTR_OP_EQ);
librados::ObjectWriteOperation restore;
set_up_restore_object(&restore);
if (verbose) cout << "\t\t\t" << client_name
<< "-cleanup: will assert index contains "
<< this_entry.str() << std::endl;
if ((((KeyValueStructure *)this)->*KvFlatBtreeAsync::interrupt)() == 1 )
{
return -ESUICIDE;
}
if (verbose) cout << "\t\t\t" << client_name << "-cleanup: restoring "
<< it->obj
<< std::endl;
err = io_ctx.operate(it->obj, &restore);
if (err == -ENOENT) {
//it had gotten far enough to be rolled forward - unmark the objects
//and roll forward.
if (verbose) cout << "\t\t\t" << client_name
<< "-cleanup: roll forward instead"
<< std::endl;
for(vector<create_data >::const_iterator cit =
idata.to_create.begin();
cit != idata.to_create.end(); ++cit) {
librados::ObjectWriteOperation res;
set_up_restore_object(&res);
if ((((KeyValueStructure *)this)->*KvFlatBtreeAsync::interrupt)()
== 1 ) {
return -ECANCELED;
}
if (verbose) cout << "\t\t\t" << client_name
<< "-cleanup: restoring " << cit->obj
<< std::endl;
err = io_ctx.operate(cit->obj, &res);
if (err < 0) {
if (verbose) cout << "\t\t\t" << client_name
<< "-cleanup: restoring "
<< cit->obj << " failed with " << err << std::endl;
}
if (verbose) cout << "\t\t\t" << client_name << "-cleanup: restored "
<< cit->obj
<< std::endl;
}
return cleanup(idata, -ENOENT);
} else if (err < 0) {
//i.e., -ECANCELED because the object was already restored by someone
//else
if (verbose) cout << "\t\t\t" << client_name
<< "-cleanup: restoring " << it->obj
<< " failed with " << err << std::endl;
} else {
if (verbose) cout << "\t\t\t" << client_name << "-cleanup: restored "
<< it->obj
<< std::endl;
}
}
//remove the new objects
for(vector<create_data >::const_reverse_iterator it =
idata.to_create.rbegin();
it != idata.to_create.rend(); ++it) {
to_remove.insert(it->max.encoded());
librados::ObjectWriteOperation rm;
rm.remove();
if ((((KeyValueStructure *)this)->*KvFlatBtreeAsync::interrupt)() == 1 )
{
return -ESUICIDE;
}
if (verbose) cout << "\t\t\t" << client_name << "-cleanup: removing "
<< it->obj
<< std::endl;
err = io_ctx.operate(it->obj, &rm);
if (err < 0) {
if (verbose) cout << "\t\t\t" << client_name
<< "-cleanup: failed to remove "
<< it->obj << std::endl;
} else {
if (verbose) cout << "\t\t\t" << client_name << "-cleanup: removed "
<< it->obj
<< std::endl;
}
}
//update the index
librados::ObjectWriteOperation update_index;
update_index.omap_cmp(assertions, &err);
update_index.omap_rm_keys(to_remove);
update_index.omap_set(new_index);
if (verbose) cout << "\t\t\t" << client_name << "-cleanup: updating index"
<< std::endl;
if ((((KeyValueStructure *)this)->*KvFlatBtreeAsync::interrupt)() == 1 ) {
return -ESUICIDE;
}
err = io_ctx.operate(index_name, &update_index);
if (err < 0) {
if (verbose) cout << "\t\t\t" << client_name
<< "-cleanup: rewriting failed with "
<< err << ". returning -ECANCELED" << std::endl;
return -ECANCELED;
}
if (verbose) cout << "\t\t\t" << client_name
<< "-cleanup: updated index. cleanup done."
<< std::endl;
break;
}
}
return err;
}
string KvFlatBtreeAsync::to_string(string s, int i) {
stringstream ret;
ret << s << i;
return ret.str();
}
string KvFlatBtreeAsync::get_name() {
return rados_id;
}
void KvFlatBtreeAsync::set_inject(injection_t inject, int wait_time) {
interrupt = inject;
wait_ms = wait_time;
}
int KvFlatBtreeAsync::setup(int argc, const char** argv) {
int r = rados.init(rados_id.c_str());
if (r < 0) {
cerr << "error during init" << r << std::endl;
return r;
}
r = rados.conf_parse_argv(argc, argv);
if (r < 0) {
cerr << "error during parsing args" << r << std::endl;
return r;
}
r = rados.conf_parse_env(NULL);
if (r < 0) {
cerr << "error during parsing env" << r << std::endl;
return r;
}
r = rados.conf_read_file(NULL);
if (r < 0) {
cerr << "error during read file: " << r << std::endl;
return r;
}
r = rados.connect();
if (r < 0) {
cerr << "error during connect: " << r << std::endl;
return r;
}
r = rados.ioctx_create(pool_name.c_str(), io_ctx);
if (r < 0) {
cerr << "error creating io ctx: " << r << std::endl;
rados.shutdown();
return r;
}
librados::ObjectWriteOperation make_index;
make_index.create(true);
std::map<std::string,bufferlist> index_map;
index_data idata;
idata.obj = client_name;
idata.min_kdata.raw_key = "";
idata.kdata = key_data("");
index_map["1"] = to_bl(idata);
make_index.omap_set(index_map);
r = io_ctx.operate(index_name, &make_index);
if (r < 0) {
if (verbose) cout << client_name << ": Making the index failed with code "
<< r
<< std::endl;
return 0;
}
if (verbose) cout << client_name << ": created index object" << std::endl;
librados::ObjectWriteOperation make_max_obj;
make_max_obj.create(true);
make_max_obj.setxattr("unwritable", to_bl("0"));
make_max_obj.setxattr("size", to_bl("0"));
r = io_ctx.operate(client_name, &make_max_obj);
if (r < 0) {
if (verbose) cout << client_name << ": Setting xattr failed with code "
<< r
<< std::endl;
}
return 0;
}
int KvFlatBtreeAsync::set(const string &key, const bufferlist &val,
bool update_on_existing) {
if (verbose) cout << client_name << " is "
<< (update_on_existing? "updating " : "setting ")
<< key << std::endl;
int err = 0;
utime_t mytime;
index_data idata(key);
if (verbose) cout << "\t" << client_name << ": finding oid" << std::endl;
err = read_index(key, &idata, NULL, false);
if (err < 0) {
if (verbose) cout << "\t" << client_name
<< ": getting oid failed with code "
<< err << std::endl;
return err;
}
if (verbose) cout << "\t" << client_name << ": index data is " << idata.str()
<< ", object is " << idata.obj << std::endl;
err = set_op(key, val, update_on_existing, idata);
if (verbose) cout << "\t" << client_name << ": finished set with " << err
<< std::endl;
return err;
}
int KvFlatBtreeAsync::set_op(const string &key, const bufferlist &val,
bool update_on_existing, index_data &idata) {
//write
bufferlist inbl;
omap_set_args args;
args.bound = 2 * k;
args.exclusive = !update_on_existing;
args.omap[key] = val;
args.encode(inbl);
librados::ObjectWriteOperation owo;
owo.exec("kvs", "omap_insert", inbl);
if ((((KeyValueStructure *)this)->*KvFlatBtreeAsync::interrupt)() == 1 ) {
if (verbose) cout << client_name << " IS SUICIDING!" << std::endl;
return -ESUICIDE;
}
if (verbose) cout << "\t" << client_name << ": inserting " << key
<< " into object "
<< idata.obj << std::endl;
int err = io_ctx.operate(idata.obj, &owo);
if (err < 0) {
switch (err) {
case -EEXIST: {
//the key already exists and this is an exclusive insert.
cerr << "\t" << client_name << ": writing key failed with "
<< err << std::endl;
return err;
}
case -EKEYREJECTED: {
//the object needs to be split.
do {
if (verbose) cout << "\t" << client_name << ": running split on "
<< idata.obj
<< std::endl;
err = read_index(key, &idata, NULL, true);
if (err < 0) {
if (verbose) cout << "\t" << client_name
<< ": getting oid failed with code "
<< err << std::endl;
return err;
}
err = split(idata);
if (err < 0 && err != -ENOENT && err != -EBALANCE) {
if (verbose) cerr << "\t" << client_name << ": split failed with "
<< err << std::endl;
int ret = handle_set_rm_errors(err, idata.obj, key, &idata, NULL);
switch (ret) {
case -ESUICIDE:
if (verbose) cout << client_name << " IS SUICIDING!" << std::endl;
return ret;
case 1:
return set_op(key, val, update_on_existing, idata);
case 2:
return err;
}
}
} while (err < 0 && err != -EBALANCE && err != -ENOENT);
err = read_index(key, &idata, NULL, true);
if (err < 0) {
if (verbose) cout << "\t" << client_name
<< ": getting oid failed with code "
<< err << std::endl;
return err;
}
return set_op(key, val, update_on_existing, idata);
}
default:
if (verbose) cerr << "\t" << client_name << ": writing obj failed with "
<< err << std::endl;
if (err == -ENOENT || err == -EACCES) {
if (err == -ENOENT) {
if (verbose) cout << "CACHE FAILURE" << std::endl;
}
err = read_index(key, &idata, NULL, true);
if (err < 0) {
if (verbose) cout << "\t" << client_name
<< ": getting oid failed with code "
<< err << std::endl;
return err;
}
if (verbose) cout << "\t" << client_name << ": index data is "
<< idata.str()
<< ", object is " << idata.obj << std::endl;
return set_op(key, val, update_on_existing, idata);
} else {
return err;
}
}
}
return 0;
}
int KvFlatBtreeAsync::remove(const string &key) {
if (verbose) cout << client_name << ": removing " << key << std::endl;
int err = 0;
string obj;
utime_t mytime;
index_data idata;
index_data next_idata;
if (verbose) cout << "\t" << client_name << ": finding oid" << std::endl;
err = read_index(key, &idata, &next_idata, false);
if (err < 0) {
if (verbose) cout << "getting oid failed with code " << err << std::endl;
return err;
}
obj = idata.obj;
if (verbose) cout << "\t" << client_name << ": idata is " << idata.str()
<< ", next_idata is " << next_idata.str()
<< ", obj is " << obj << std::endl;
err = remove_op(key, idata, next_idata);
if (verbose) cout << "\t" << client_name << ": finished remove with " << err
<< " and exiting" << std::endl;
return err;
}
int KvFlatBtreeAsync::remove_op(const string &key, index_data &idata,
index_data &next_idata) {
//write
bufferlist inbl;
omap_rm_args args;
args.bound = k;
args.omap.insert(key);
args.encode(inbl);
librados::ObjectWriteOperation owo;
owo.exec("kvs", "omap_remove", inbl);
if ((((KeyValueStructure *)this)->*KvFlatBtreeAsync::interrupt)() == 1 ) {
if (verbose) cout << client_name << " IS SUICIDING!" << std::endl;
return -ESUICIDE;
}
if (verbose) cout << "\t" << client_name << ": removing " << key << " from "
<< idata.obj
<< std::endl;
int err = io_ctx.operate(idata.obj, &owo);
if (err < 0) {
if (verbose) cout << "\t" << client_name << ": writing obj failed with "
<< err << std::endl;
switch (err) {
case -ENODATA: {
//the key does not exist in the object
return err;
}
case -EKEYREJECTED: {
//the object needs to be split.
do {
if (verbose) cerr << "\t" << client_name << ": running rebalance on "
<< idata.obj << std::endl;
err = read_index(key, &idata, &next_idata, true);
if (err < 0) {
if (verbose) cout << "\t" << client_name
<< ": getting oid failed with code "
<< err << std::endl;
return err;
}
err = rebalance(idata, next_idata);
if (err < 0 && err != -ENOENT && err != -EBALANCE) {
if (verbose) cerr << "\t" << client_name << ": rebalance returned "
<< err << std::endl;
int ret = handle_set_rm_errors(err, idata.obj, key, &idata,
&next_idata);
switch (ret) {
case -ESUICIDE:
if (verbose) cout << client_name << " IS SUICIDING!" << std::endl;
return err;
case 1:
return remove_op(key, idata, next_idata);
case 2:
return err;
break;
case -EUCLEAN:
//this is the only node, so it's ok to go below k.
librados::ObjectWriteOperation owo;
bufferlist inbl;
omap_rm_args args;
args.bound = 0;
args.omap.insert(key);
args.encode(inbl);
owo.exec("kvs", "omap_remove", inbl);
if ((((KeyValueStructure *)this)->*KvFlatBtreeAsync::interrupt)()
== 1 ) {
if (verbose) cout << client_name << " IS SUICIDING!"
<< std::endl;
return -ESUICIDE;
}
if (verbose) cout << "\t" << client_name << ": removing " << key
<< " from "
<< idata.obj
<< std::endl;
int err = io_ctx.operate(idata.obj, &owo);
if (err == 0) {
return 0;
}
}
}
} while (err < 0 && err != -EBALANCE && err != -ENOENT);
err = read_index(key, &idata, &next_idata, true);
if (err < 0) {
if (verbose) cout << "\t" << client_name
<< ": getting oid failed with code "
<< err << std::endl;
return err;
}
return remove(key);
}
default:
if (err == -ENOENT || err == -EACCES) {
err = read_index(key, &idata, &next_idata, true);
if (err < 0) {
if (verbose) cout << "\t" << client_name
<< ": getting oid failed with code "
<< err << std::endl;
return err;
}
if (verbose) cout << "\t" << client_name << ": index data is "
<< idata.str()
<< ", object is " << idata.obj << std::endl;
//idea: we read the time every time we read the index anyway - store it.
return remove_op(key, idata, next_idata);
} else {
return err;
}
}
}
return 0;
}
int KvFlatBtreeAsync::handle_set_rm_errors(int &err, string obj,
string key,
index_data * idata, index_data * next_idata) {
if (err == -ESUICIDE) {
return err;
} else if (err == -ECANCELED //if an object was unwritable or index changed
|| err == -EPREFIX //if there is currently a prefix
|| err == -ETIMEDOUT// if the index changes during the op - i.e. cleanup
|| err == -EACCES) //possible if we were acting on old index data
{
err = read_index(key, idata, next_idata, true);
if (err < 0) {
return err;
}
if (verbose) cout << "\t" << client_name << ": prefix is " << idata->str()
<< std::endl;
if (idata->obj != obj) {
//someone else has split or cleaned up or something. start over.
return 1;//meaning repeat
}
} else if (err != -ETIMEDOUT && err != -ERANGE && err != -EACCES
&& err != -EUCLEAN){
if (verbose) cout << "\t" << client_name
<< ": split encountered an unexpected error: " << err
<< std::endl;
return 2;
}
return err;
}
int KvFlatBtreeAsync::get(const string &key, bufferlist *val) {
opmap['g']++;
if (verbose) cout << client_name << ": getting " << key << std::endl;
int err = 0;
index_data idata;
utime_t mytime;
if ((((KeyValueStructure *)this)->*KvFlatBtreeAsync::interrupt)() == 1 ) {
return -ESUICIDE;
}
err = read_index(key, &idata, NULL, false);
mytime = ceph_clock_now();
if (err < 0) {
if (verbose) cout << "getting oid failed with code " << err << std::endl;
return err;
}
err = get_op(key, val, idata);
if (verbose) cout << client_name << ": got " << key << " with " << err
<< std::endl;
return err;
}
int KvFlatBtreeAsync::get_op(const string &key, bufferlist *val,
index_data &idata) {
int err = 0;
std::set<std::string> key_set;
key_set.insert(key);
std::map<std::string,bufferlist> omap;
librados::ObjectReadOperation read;
read.omap_get_vals_by_keys(key_set, &omap, &err);
err = io_ctx.operate(idata.obj, &read, NULL);
if (err < 0) {
if (err == -ENOENT) {
err = read_index(key, &idata, NULL, true);
if (err < 0) {
if (verbose) cout << "\t" << client_name
<< ": getting oid failed with code "
<< err << std::endl;
return err;
}
if (verbose) cout << "\t" << client_name << ": index data is "
<< idata.str()
<< ", object is " << idata.obj << std::endl;
return get_op(key, val, idata);
} else {
if (verbose) cout << client_name
<< ": get encountered an unexpected error: " << err
<< std::endl;
return err;
}
}
*val = omap[key];
return err;
}
void *KvFlatBtreeAsync::pset(void *ptr) {
struct aio_set_args *args = (struct aio_set_args *)ptr;
*args->err =
args->kvba->KvFlatBtreeAsync::set((string)args->key,
(bufferlist)args->val, (bool)args->exc);
args->cb(args->err, args->cb_args);
delete args;
return NULL;
}
void KvFlatBtreeAsync::aio_set(const string &key, const bufferlist &val,
bool exclusive, callback cb, void * cb_args, int * err) {
aio_set_args *args = new aio_set_args();
args->kvba = this;
args->key = key;
args->val = val;
args->exc = exclusive;
args->cb = cb;
args->cb_args = cb_args;
args->err = err;
pthread_t t;
int r = pthread_create(&t, NULL, pset, (void*)args);
if (r < 0) {
*args->err = r;
return;
}
pthread_detach(t);
}
void *KvFlatBtreeAsync::prm(void *ptr) {
struct aio_rm_args *args = (struct aio_rm_args *)ptr;
*args->err =
args->kvba->KvFlatBtreeAsync::remove((string)args->key);
args->cb(args->err, args->cb_args);
delete args;
return NULL;
}
void KvFlatBtreeAsync::aio_remove(const string &key,
callback cb, void * cb_args, int * err) {
aio_rm_args * args = new aio_rm_args();
args->kvba = this;
args->key = key;
args->cb = cb;
args->cb_args = cb_args;
args->err = err;
pthread_t t;
int r = pthread_create(&t, NULL, prm, (void*)args);
if (r < 0) {
*args->err = r;
return;
}
pthread_detach(t);
}
void *KvFlatBtreeAsync::pget(void *ptr) {
struct aio_get_args *args = (struct aio_get_args *)ptr;
*args->err =
args->kvba->KvFlatBtreeAsync::get((string)args->key,
(bufferlist *)args->val);
args->cb(args->err, args->cb_args);
delete args;
return NULL;
}
void KvFlatBtreeAsync::aio_get(const string &key, bufferlist *val,
callback cb, void * cb_args, int * err) {
aio_get_args * args = new aio_get_args();
args->kvba = this;
args->key = key;
args->val = val;
args->cb = cb;
args->cb_args = cb_args;
args->err = err;
pthread_t t;
int r = pthread_create(&t, NULL, pget, (void*)args);
if (r < 0) {
*args->err = r;
return;
}
pthread_detach(t);
}
int KvFlatBtreeAsync::set_many(const std::map<string, bufferlist> &in_map) {
int err = 0;
bufferlist inbl;
bufferlist outbl;
std::set<string> keys;
std::map<string, bufferlist> big_map;
for (map<string, bufferlist>::const_iterator it = in_map.begin();
it != in_map.end(); ++it) {
keys.insert(it->first);
big_map.insert(*it);
}
if (verbose) cout << "created key set and big_map" << std::endl;
encode(keys, inbl);
librados::AioCompletion * aioc = rados.aio_create_completion();
io_ctx.aio_exec(index_name, aioc, "kvs", "read_many", inbl, &outbl);
aioc->wait_for_complete();
err = aioc->get_return_value();
aioc->release();
if (err < 0) {
cerr << "getting index failed with " << err << std::endl;
return err;
}
std::map<string, bufferlist> imap;//read from the index
auto blit = outbl.cbegin();
decode(imap, blit);
if (verbose) cout << "finished reading index for objects. there are "
<< imap.size() << " entries that need to be changed. " << std::endl;
vector<object_data> to_delete;
vector<object_data> to_create;
if (verbose) cout << "setting up to_delete and to_create vectors from index "
<< "map" << std::endl;
//set up to_delete from index map
for (map<string, bufferlist>::iterator it = imap.begin(); it != imap.end();
++it){
index_data idata;
blit = it->second.begin();
idata.decode(blit);
to_delete.push_back(object_data(idata.min_kdata, idata.kdata, idata.obj));
err = read_object(idata.obj, &to_delete[to_delete.size() - 1]);
if (err < 0) {
if (verbose) cout << "reading " << idata.obj << " failed with " << err
<< std::endl;
return set_many(in_map);
}
big_map.insert(to_delete[to_delete.size() - 1].omap.begin(),
to_delete[to_delete.size() - 1].omap.end());
}
to_create.push_back(object_data(
to_string(client_name, client_index++)));
to_create[0].min_kdata = to_delete[0].min_kdata;
for(map<string, bufferlist>::iterator it = big_map.begin();
it != big_map.end(); ++it) {
if (to_create[to_create.size() - 1].omap.size() == 1.5 * k) {
to_create[to_create.size() - 1].max_kdata =
key_data(to_create[to_create.size() - 1]
.omap.rbegin()->first);
to_create.push_back(object_data(
to_string(client_name, client_index++)));
to_create[to_create.size() - 1].min_kdata =
to_create[to_create.size() - 2].max_kdata;
}
to_create[to_create.size() - 1].omap.insert(*it);
}
to_create[to_create.size() - 1].max_kdata =
to_delete[to_delete.size() - 1].max_kdata;
vector<librados::ObjectWriteOperation> owos(2 + 2 * to_delete.size()
+ to_create.size());
vector<std::pair<std::pair<int, string>, librados::ObjectWriteOperation*> > ops;
index_data idata;
set_up_prefix_index(to_create, to_delete, &owos[0], &idata, &err);
if (verbose) cout << "finished making to_create and to_delete. "
<< std::endl;
ops.push_back(std::make_pair(
std::pair<int, string>(ADD_PREFIX, index_name),
&owos[0]));
for (int i = 1; i < 2 + 2 * (int)to_delete.size() + (int)to_create.size();
i++) {
ops.push_back(std::make_pair(std::make_pair(0,""), &owos[i]));
}
set_up_ops(to_create, to_delete, &ops, idata, &err);
cout << "finished setting up ops. Starting critical section..." << std::endl;
/////BEGIN CRITICAL SECTION/////
//put prefix on index entry for idata.val
err = perform_ops("\t\t" + client_name + "-set_many:", idata, &ops);
if (err < 0) {
return set_many(in_map);
}
if (verbose) cout << "\t\t" << client_name << "-split: done splitting."
<< std::endl;
/////END CRITICAL SECTION/////
std::scoped_lock l{icache_lock};
for (vector<delete_data>::iterator it = idata.to_delete.begin();
it != idata.to_delete.end(); ++it) {
icache.erase(it->max);
}
for (vector<create_data>::iterator it = idata.to_create.begin();
it != idata.to_create.end(); ++it) {
icache.push(index_data(*it));
}
return err;
}
int KvFlatBtreeAsync::remove_all() {
if (verbose) cout << client_name << ": removing all" << std::endl;
int err = 0;
librados::ObjectReadOperation oro;
librados::AioCompletion * oro_aioc = rados.aio_create_completion();
std::map<std::string, bufferlist> index_set;
oro.omap_get_vals2("",LONG_MAX,&index_set, nullptr, &err);
err = io_ctx.aio_operate(index_name, oro_aioc, &oro, NULL);
if (err < 0){
if (err == -ENOENT) {
return 0;
}
if (verbose) cout << "getting keys failed with error " << err << std::endl;
return err;
}
oro_aioc->wait_for_complete();
oro_aioc->release();
librados::ObjectWriteOperation rm_index;
librados::AioCompletion * rm_index_aioc = rados.aio_create_completion();
std::map<std::string,bufferlist> new_index;
new_index["1"] = index_set["1"];
rm_index.omap_clear();
rm_index.omap_set(new_index);
io_ctx.aio_operate(index_name, rm_index_aioc, &rm_index);
err = rm_index_aioc->get_return_value();
rm_index_aioc->release();
if (err < 0) {
if (verbose) cout << "rm index aioc failed with " << err
<< std::endl;
return err;
}
if (!index_set.empty()) {
for (std::map<std::string,bufferlist>::iterator it = index_set.begin();
it != index_set.end(); ++it){
librados::ObjectWriteOperation sub;
if (it->first == "1") {
sub.omap_clear();
} else {
sub.remove();
}
index_data idata;
auto b = it->second.cbegin();
idata.decode(b);
io_ctx.operate(idata.obj, &sub);
}
}
icache.clear();
return 0;
}
int KvFlatBtreeAsync::get_all_keys(std::set<std::string> *keys) {
if (verbose) cout << client_name << ": getting all keys" << std::endl;
int err = 0;
librados::ObjectReadOperation oro;
std::map<std::string,bufferlist> index_set;
oro.omap_get_vals2("",LONG_MAX,&index_set, nullptr, &err);
io_ctx.operate(index_name, &oro, NULL);
if (err < 0){
if (verbose) cout << "getting keys failed with error " << err << std::endl;
return err;
}
for (std::map<std::string,bufferlist>::iterator it = index_set.begin();
it != index_set.end(); ++it){
librados::ObjectReadOperation sub;
std::set<std::string> ret;
sub.omap_get_keys2("",LONG_MAX,&ret, nullptr, &err);
index_data idata;
auto b = it->second.cbegin();
idata.decode(b);
io_ctx.operate(idata.obj, &sub, NULL);
keys->insert(ret.begin(), ret.end());
}
return err;
}
int KvFlatBtreeAsync::get_all_keys_and_values(
std::map<std::string,bufferlist> *kv_map) {
if (verbose) cout << client_name << ": getting all keys and values"
<< std::endl;
int err = 0;
librados::ObjectReadOperation first_read;
std::set<std::string> index_set;
first_read.omap_get_keys2("",LONG_MAX,&index_set, nullptr, &err);
io_ctx.operate(index_name, &first_read, NULL);
if (err < 0){
if (verbose) cout << "getting keys failed with error " << err << std::endl;
return err;
}
for (std::set<std::string>::iterator it = index_set.begin();
it != index_set.end(); ++it){
librados::ObjectReadOperation sub;
std::map<std::string, bufferlist> ret;
sub.omap_get_vals2("",LONG_MAX,&ret, nullptr, &err);
io_ctx.operate(*it, &sub, NULL);
kv_map->insert(ret.begin(), ret.end());
}
return err;
}
bool KvFlatBtreeAsync::is_consistent() {
int err;
bool ret = true;
if (verbose) cout << client_name << ": checking consistency" << std::endl;
std::map<std::string,bufferlist> index;
std::map<std::string, std::set<std::string> > sub_objs;
librados::ObjectReadOperation oro;
oro.omap_get_vals2("",LONG_MAX,&index, nullptr, &err);
io_ctx.operate(index_name, &oro, NULL);
if (err < 0){
//probably because the index doesn't exist - this might be ok.
for (librados::NObjectIterator oit = io_ctx.nobjects_begin();
oit != io_ctx.nobjects_end(); ++oit) {
//if this executes, there are floating objects.
cerr << "Not consistent! found floating object " << oit->get_oid()
<< std::endl;
ret = false;
}
return ret;
}
std::map<std::string, string> parsed_index;
std::set<std::string> onames;
std::set<std::string> special_names;
for (map<std::string,bufferlist>::iterator it = index.begin();
it != index.end(); ++it) {
if (it->first != "") {
index_data idata;
auto b = it->second.cbegin();
idata.decode(b);
if (idata.prefix != "") {
for(vector<delete_data>::iterator dit = idata.to_delete.begin();
dit != idata.to_delete.end(); ++dit) {
librados::ObjectReadOperation oro;
librados::AioCompletion * aioc = rados.aio_create_completion();
bufferlist un;
oro.getxattr("unwritable", &un, &err);
io_ctx.aio_operate(dit->obj, aioc, &oro, NULL);
aioc->wait_for_complete();
err = aioc->get_return_value();
if (ceph_clock_now() - idata.ts > timeout) {
if (err < 0) {
aioc->release();
if (err == -ENOENT) {
continue;
} else {
cerr << "Not consistent! reading object " << dit->obj
<< "returned " << err << std::endl;
ret = false;
break;
}
}
if (atoi(string(un.c_str(), un.length()).c_str()) != 1 &&
aioc->get_version64() != dit->version) {
cerr << "Not consistent! object " << dit->obj << " has been "
<< " modified since the client died was not cleaned up."
<< std::endl;
ret = false;
}
}
special_names.insert(dit->obj);
aioc->release();
}
for(vector<create_data >::iterator cit = idata.to_create.begin();
cit != idata.to_create.end(); ++cit) {
special_names.insert(cit->obj);
}
}
parsed_index.insert(std::make_pair(it->first, idata.obj));
onames.insert(idata.obj);
}
}
//make sure that an object exists iff it either is the index
//or is listed in the index
for (librados::NObjectIterator oit = io_ctx.nobjects_begin();
oit != io_ctx.nobjects_end(); ++oit) {
string name = oit->get_oid();
if (name != index_name && onames.count(name) == 0
&& special_names.count(name) == 0) {
cerr << "Not consistent! found floating object " << name << std::endl;
ret = false;
}
}
//check objects
string prev = "";
for (std::map<std::string, string>::iterator it = parsed_index.begin();
it != parsed_index.end();
++it) {
librados::ObjectReadOperation read;
read.omap_get_keys2("", LONG_MAX, &sub_objs[it->second], nullptr, &err);
err = io_ctx.operate(it->second, &read, NULL);
int size_int = (int)sub_objs[it->second].size();
//check that size is in the right range
if (it->first != "1" && special_names.count(it->second) == 0 &&
err != -ENOENT && (size_int > 2*k|| size_int < k)
&& parsed_index.size() > 1) {
cerr << "Not consistent! Object " << *it << " has size " << size_int
<< ", which is outside the acceptable range." << std::endl;
ret = false;
}
//check that all keys belong in that object
for(std::set<std::string>::iterator subit = sub_objs[it->second].begin();
subit != sub_objs[it->second].end(); ++subit) {
if ((it->first != "1"
&& *subit > it->first.substr(1,it->first.length()))
|| *subit <= prev) {
cerr << "Not consistent! key " << *subit << " does not belong in "
<< *it << std::endl;
cerr << "not last element, i.e. " << it->first << " not equal to 1? "
<< (it->first != "1") << std::endl
<< "greater than " << it->first.substr(1,it->first.length())
<<"? " << (*subit > it->first.substr(1,it->first.length()))
<< std::endl
<< "less than or equal to " << prev << "? "
<< (*subit <= prev) << std::endl;
ret = false;
}
}
prev = it->first.substr(1,it->first.length());
}
if (!ret) {
if (verbose) cout << "failed consistency test - see error log"
<< std::endl;
cerr << str();
} else {
if (verbose) cout << "passed consistency test" << std::endl;
}
return ret;
}
string KvFlatBtreeAsync::str() {
stringstream ret;
ret << "Top-level map:" << std::endl;
int err = 0;
std::set<std::string> keys;
std::map<std::string,bufferlist> index;
librados::ObjectReadOperation oro;
librados::AioCompletion * top_aioc = rados.aio_create_completion();
oro.omap_get_vals2("",LONG_MAX,&index, nullptr, &err);
io_ctx.aio_operate(index_name, top_aioc, &oro, NULL);
top_aioc->wait_for_complete();
err = top_aioc->get_return_value();
top_aioc->release();
if (err < 0 && err != -5){
if (verbose) cout << "getting keys failed with error " << err << std::endl;
return ret.str();
}
if(index.empty()) {
ret << "There are no objects!" << std::endl;
return ret.str();
}
for (map<std::string,bufferlist>::iterator it = index.begin();
it != index.end(); ++it) {
keys.insert(string(it->second.c_str(), it->second.length())
.substr(1,it->second.length()));
}
vector<std::string> all_names;
vector<int> all_sizes(index.size());
vector<int> all_versions(index.size());
vector<bufferlist> all_unwrit(index.size());
vector<map<std::string,bufferlist> > all_maps(keys.size());
vector<map<std::string,bufferlist>::iterator> its(keys.size());
unsigned done = 0;
vector<bool> dones(keys.size());
ret << std::endl << string(150,'-') << std::endl;
for (map<std::string,bufferlist>::iterator it = index.begin();
it != index.end(); ++it){
index_data idata;
auto b = it->second.cbegin();
idata.decode(b);
string s = idata.str();
ret << "|" << string((148 -
((*it).first.length()+s.length()+3))/2,' ');
ret << (*it).first;
ret << " | ";
ret << string(idata.str());
ret << string((148 -
((*it).first.length()+s.length()+3))/2,' ');
ret << "|\t";
all_names.push_back(idata.obj);
ret << std::endl << string(150,'-') << std::endl;
}
int indexer = 0;
//get the object names and sizes
for(vector<std::string>::iterator it = all_names.begin(); it
!= all_names.end();
++it) {
librados::ObjectReadOperation oro;
librados::AioCompletion *aioc = rados.aio_create_completion();
oro.omap_get_vals2("", LONG_MAX, &all_maps[indexer], nullptr, &err);
oro.getxattr("unwritable", &all_unwrit[indexer], &err);
io_ctx.aio_operate(*it, aioc, &oro, NULL);
aioc->wait_for_complete();
if (aioc->get_return_value() < 0) {
ret << "reading" << *it << "failed: " << err << std::endl;
//return ret.str();
}
all_sizes[indexer] = all_maps[indexer].size();
all_versions[indexer] = aioc->get_version64();
indexer++;
aioc->release();
}
ret << "///////////////////OBJECT NAMES////////////////" << std::endl;
//HEADERS
ret << std::endl;
for (int i = 0; i < indexer; i++) {
ret << "---------------------------\t";
}
ret << std::endl;
for (int i = 0; i < indexer; i++) {
ret << "|" << string((25 -
(string("Bucket: ").length() + all_names[i].length()))/2, ' ');
ret << "Bucket: " << all_names[i];
ret << string((25 -
(string("Bucket: ").length() + all_names[i].length()))/2, ' ') << "|\t";
}
ret << std::endl;
for (int i = 0; i < indexer; i++) {
its[i] = all_maps[i].begin();
ret << "|" << string((25 - (string("size: ").length()
+ to_string("",all_sizes[i]).length()))/2, ' ');
ret << "size: " << all_sizes[i];
ret << string((25 - (string("size: ").length()
+ to_string("",all_sizes[i]).length()))/2, ' ') << "|\t";
}
ret << std::endl;
for (int i = 0; i < indexer; i++) {
its[i] = all_maps[i].begin();
ret << "|" << string((25 - (string("version: ").length()
+ to_string("",all_versions[i]).length()))/2, ' ');
ret << "version: " << all_versions[i];
ret << string((25 - (string("version: ").length()
+ to_string("",all_versions[i]).length()))/2, ' ') << "|\t";
}
ret << std::endl;
for (int i = 0; i < indexer; i++) {
its[i] = all_maps[i].begin();
ret << "|" << string((25 - (string("unwritable? ").length()
+ 1))/2, ' ');
ret << "unwritable? " << string(all_unwrit[i].c_str(),
all_unwrit[i].length());
ret << string((25 - (string("unwritable? ").length()
+ 1))/2, ' ') << "|\t";
}
ret << std::endl;
for (int i = 0; i < indexer; i++) {
ret << "---------------------------\t";
}
ret << std::endl;
ret << "///////////////////THE ACTUAL BLOCKS////////////////" << std::endl;
ret << std::endl;
for (int i = 0; i < indexer; i++) {
ret << "---------------------------\t";
}
ret << std::endl;
//each time through this part is two lines
while(done < keys.size()) {
for(int i = 0; i < indexer; i++) {
if(dones[i]){
ret << " \t";
} else {
if (its[i] == all_maps[i].end()){
done++;
dones[i] = true;
ret << " \t";
} else {
ret << "|" << string((25 -
((*its[i]).first.length()+its[i]->second.length()+3))/2,' ');
ret << (*its[i]).first;
ret << " | ";
ret << string(its[i]->second.c_str(), its[i]->second.length());
ret << string((25 -
((*its[i]).first.length()+its[i]->second.length()+3))/2,' ');
ret << "|\t";
++(its[i]);
}
}
}
ret << std::endl;
for (int i = 0; i < indexer; i++) {
if(dones[i]){
ret << " \t";
} else {
ret << "---------------------------\t";
}
}
ret << std::endl;
}
return ret.str();
}
| 74,925 | 31.019658 | 87 | cc |
null | ceph-main/src/key_value_store/kv_flat_btree_async.h | /*
* Uses a two-level B-tree to store a set of key-value pairs.
*
* September 2, 2012
* Eleanor Cawthon
* [email protected]
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*/
#ifndef KVFLATBTREEASYNC_H_
#define KVFLATBTREEASYNC_H_
#define ESUICIDE 134
#define EPREFIX 136
#define EFIRSTOBJ 138
#include "key_value_store/key_value_structure.h"
#include "include/utime.h"
#include "include/types.h"
#include "include/encoding.h"
#include "common/ceph_mutex.h"
#include "common/Clock.h"
#include "common/Formatter.h"
#include "global/global_context.h"
#include "include/rados/librados.hpp"
#include <cfloat>
#include <queue>
#include <sstream>
#include <stdarg.h>
using ceph::bufferlist;
enum {
ADD_PREFIX = 1,
MAKE_OBJECT = 2,
UNWRITE_OBJECT = 3,
RESTORE_OBJECT = 4,
REMOVE_OBJECT = 5,
REMOVE_PREFIX = 6,
AIO_MAKE_OBJECT = 7
};
struct rebalance_args;
/**
* stores information about a key in the index.
*
* prefix is "0" unless key is "", in which case it is "1". This ensures that
* the object with key "" will always be the highest key in the index.
*/
struct key_data {
std::string raw_key;
std::string prefix;
key_data()
{}
/**
* @pre: key is a raw key (does not contain a prefix)
*/
key_data(std::string key)
: raw_key(key)
{
raw_key == "" ? prefix = "1" : prefix = "0";
}
bool operator==(key_data k) const {
return ((raw_key == k.raw_key) && (prefix == k.prefix));
}
bool operator!=(key_data k) const {
return ((raw_key != k.raw_key) || (prefix != k.prefix));
}
bool operator<(key_data k) const {
return this->encoded() < k.encoded();
}
bool operator>(key_data k) const {
return this->encoded() > k.encoded();
}
/**
* parses the prefix from encoded and stores the data in this.
*
* @pre: encoded has a prefix
*/
void parse(std::string encoded) {
prefix = encoded[0];
raw_key = encoded.substr(1,encoded.length());
}
/**
* returns a string containing the encoded (prefixed) key
*/
std::string encoded() const {
return prefix + raw_key;
}
void encode(bufferlist &bl) const {
ENCODE_START(1,1,bl);
encode(raw_key, bl);
encode(prefix, bl);
ENCODE_FINISH(bl);
}
void decode(bufferlist::const_iterator &p) {
DECODE_START(1, p);
decode(raw_key, p);
decode(prefix, p);
DECODE_FINISH(p);
}
};
WRITE_CLASS_ENCODER(key_data)
/**
* Stores information read from a librados object.
*/
struct object_data {
key_data min_kdata; //the max key from the previous index entry
key_data max_kdata; //the max key, from the index
std::string name; //the object's name
std::map<std::string, bufferlist> omap; // the omap of the object
bool unwritable; // an xattr that, if false, means an op is in
// progress and other clients should not write to it.
uint64_t version; //the version at time of read
uint64_t size; //the number of elements in the omap
object_data()
: unwritable(false),
version(0),
size(0)
{}
object_data(std::string the_name)
: name(the_name),
unwritable(false),
version(0),
size(0)
{}
object_data(key_data min, key_data kdat, std::string the_name)
: min_kdata(min),
max_kdata(kdat),
name(the_name),
unwritable(false),
version(0),
size(0)
{}
object_data(key_data min, key_data kdat, std::string the_name,
std::map<std::string, bufferlist> the_omap)
: min_kdata(min),
max_kdata(kdat),
name(the_name),
omap(the_omap),
unwritable(false),
version(0),
size(0)
{}
object_data(key_data min, key_data kdat, std::string the_name, int the_version)
: min_kdata(min),
max_kdata(kdat),
name(the_name),
unwritable(false),
version(the_version),
size(0)
{}
void encode(bufferlist &bl) const {
ENCODE_START(1,1,bl);
encode(min_kdata, bl);
encode(max_kdata, bl);
encode(name, bl);
encode(omap, bl);
encode(unwritable, bl);
encode(version, bl);
encode(size, bl);
ENCODE_FINISH(bl);
}
void decode(bufferlist::const_iterator &p) {
DECODE_START(1, p);
decode(min_kdata, p);
decode(max_kdata, p);
decode(name, p);
decode(omap, p);
decode(unwritable, p);
decode(version, p);
decode(size, p);
DECODE_FINISH(p);
}
};
WRITE_CLASS_ENCODER(object_data)
/**
* information about objects to be created by a split or merge - stored in the
* index_data.
*/
struct create_data {
key_data min;
key_data max;
std::string obj;
create_data()
{}
create_data(key_data n, key_data x, std::string o)
: min(n),
max(x),
obj(o)
{}
create_data(object_data o)
: min(o.min_kdata),
max(o.max_kdata),
obj(o.name)
{}
create_data & operator=(const create_data &c) {
min = c.min;
max = c.max;
obj = c.obj;
return *this;
}
void encode(bufferlist &bl) const {
ENCODE_START(1,1,bl);
encode(min, bl);
encode(max, bl);
encode(obj, bl);
ENCODE_FINISH(bl);
}
void decode(bufferlist::const_iterator &p) {
DECODE_START(1, p);
decode(min, p);
decode(max, p);
decode(obj, p);
DECODE_FINISH(p);
}
};
WRITE_CLASS_ENCODER(create_data)
/**
* information about objects to be deleted by a split or merge - stored in the
* index_data.
*/
struct delete_data {
key_data min;
key_data max;
std::string obj;
uint64_t version;
delete_data()
: version(0)
{}
delete_data(key_data n, key_data x, std::string o, uint64_t v)
: min(n),
max(x),
obj(o),
version(v)
{}
delete_data & operator=(const delete_data &d) {
min = d.min;
max = d.max;
obj = d.obj;
version = d.version;
return *this;
}
void encode(bufferlist &bl) const {
ENCODE_START(1,1,bl);
encode(min, bl);
encode(max, bl);
encode(obj, bl);
encode(version, bl);
ENCODE_FINISH(bl);
}
void decode(bufferlist::const_iterator &p) {
DECODE_START(1, p);
decode(min, p);
decode(max, p);
decode(obj, p);
decode(version, p);
DECODE_FINISH(p);
}
};
WRITE_CLASS_ENCODER(delete_data)
/**
* The index object is a key value map that stores
* the highest key stored in an object as keys, and an index_data
* as the corresponding value. The index_data contains the encoded
* high and low keys (where keys in this object are > min_kdata and
* <= kdata), the name of the librados object where keys containing
* that range of keys are located, and information about split and
* merge operations that may need to be cleaned up if a client dies.
*/
struct index_data {
//the encoded key corresponding to the object
key_data kdata;
//"1" if there is a prefix (because a split or merge is
//in progress), otherwise ""
std::string prefix;
//the kdata of the previous index entry
key_data min_kdata;
utime_t ts; //time that a split/merge started
//objects to be created
std::vector<create_data > to_create;
//objects to be deleted
std::vector<delete_data > to_delete;
//the name of the object where the key range is located.
std::string obj;
index_data()
{}
index_data(std::string raw_key)
: kdata(raw_key)
{}
index_data(key_data max, key_data min, std::string o)
: kdata(max),
min_kdata(min),
obj(o)
{}
index_data(create_data c)
: kdata(c.max),
min_kdata(c.min),
obj(c.obj)
{}
bool operator<(const index_data &other) const {
return (kdata.encoded() < other.kdata.encoded());
}
//true if there is a prefix and now - ts > timeout.
bool is_timed_out(utime_t now, utime_t timeout) const;
void encode(bufferlist &bl) const {
ENCODE_START(1,1,bl);
encode(prefix, bl);
encode(min_kdata, bl);
encode(kdata, bl);
encode(ts, bl);
encode(to_create, bl);
encode(to_delete, bl);
encode(obj, bl);
ENCODE_FINISH(bl);
}
void decode(bufferlist::const_iterator &p) {
DECODE_START(1, p);
decode(prefix, p);
decode(min_kdata, p);
decode(kdata, p);
decode(ts, p);
decode(to_create, p);
decode(to_delete, p);
decode(obj, p);
DECODE_FINISH(p);
}
/*
* Prints a string representation of the information, in the following format:
* (min_kdata/
* kdata,
* prefix
* ts
* elements of to_create, organized into (high key| obj name)
* ;
* elements of to_delete, organized into (high key| obj name | version number)
* :
* val)
*/
std::string str() const {
std::stringstream strm;
strm << '(' << min_kdata.encoded() << "/" << kdata.encoded() << ','
<< prefix;
if (prefix == "1") {
strm << ts.sec() << '.' << ts.usec();
for(std::vector<create_data>::const_iterator it = to_create.begin();
it != to_create.end(); ++it) {
strm << '(' << it->min.encoded() << '/' << it->max.encoded() << '|'
<< it->obj << ')';
}
strm << ';';
for(std::vector<delete_data >::const_iterator it = to_delete.begin();
it != to_delete.end(); ++it) {
strm << '(' << it->min.encoded() << '/' << it->max.encoded() << '|'
<< it->obj << '|'
<< it->version << ')';
}
strm << ':';
}
strm << obj << ')';
return strm.str();
}
};
WRITE_CLASS_ENCODER(index_data)
/**
* Structure to store information read from the index for reuse.
*/
class IndexCache {
protected:
std::map<key_data, std::pair<index_data, utime_t> > k2itmap;
std::map<utime_t, key_data> t2kmap;
int cache_size;
public:
IndexCache(int n)
: cache_size(n)
{}
/**
* Inserts idata into the cache and removes whatever key mapped to before.
* If the cache is full, pops the oldest entry.
*/
void push(const std::string &key, const index_data &idata);
/**
* Inserts idata into the cache. If idata.kdata is already in the cache,
* replaces the old one. Pops the oldest entry if the cache is full.
*/
void push(const index_data &idata);
/**
* Removes the oldest entry from the cache
*/
void pop();
/**
* Removes the value associated with kdata from both maps
*/
void erase(key_data kdata);
/**
* gets the idata where key belongs. If none, returns -ENODATA.
*/
int get(const std::string &key, index_data *idata) const;
/**
* Gets the idata where key goes and the one after it. If there are not
* valid entries for both of them, returns -ENODATA.
*/
int get(const std::string &key, index_data *idata, index_data * next_idata) const;
void clear();
};
class KvFlatBtreeAsync;
/**
* These are used internally to translate aio operations into useful thread
* arguments.
*/
struct aio_set_args {
KvFlatBtreeAsync * kvba;
std::string key;
bufferlist val;
bool exc;
callback cb;
void * cb_args;
int * err;
};
struct aio_rm_args {
KvFlatBtreeAsync * kvba;
std::string key;
callback cb;
void * cb_args;
int * err;
};
struct aio_get_args {
KvFlatBtreeAsync * kvba;
std::string key;
bufferlist * val;
bool exc;
callback cb;
void * cb_args;
int * err;
};
class KvFlatBtreeAsync : public KeyValueStructure {
protected:
//don't change these once operations start being called - they are not
//protected with mutexes!
int k;
std::string index_name;
librados::IoCtx io_ctx;
std::string rados_id;
std::string client_name;
librados::Rados rados;
std::string pool_name;
injection_t interrupt;
int wait_ms;
utime_t timeout; //declare a client dead if it goes this long without
//finishing a split/merge
int cache_size;
double cache_refresh; //read cache_size / cache_refresh entries each time the
//index is read
bool verbose;//if true, display lots of debug output
//shared variables protected with mutexes
ceph::mutex client_index_lock = ceph::make_mutex("client_index_lock");
int client_index; //names of new objects are client_name.client_index
ceph::mutex icache_lock = ceph::make_mutex("icache_lock");
IndexCache icache;
friend struct index_data;
/**
* finds the object in the index with the lowest key value that is greater
* than idata.kdata. If idata.kdata is the max key, returns -EOVERFLOW. If
* idata has a prefix and has timed out, cleans up.
*
* @param idata: idata for the object to search for.
* @param out_data: the idata for the next object.
*
* @pre: idata must contain a key_data.
* @post: out_data contains complete information
*/
int next(const index_data &idata, index_data * out_data);
/**
* finds the object in the index with the lowest key value that is greater
* than idata.kdata. If idata.kdata is the lowest key, returns -ERANGE If
* idata has a prefix and has timed out, cleans up.
*
* @param idata: idata for the object to search for.
* @param out_data: the idata for the next object.
*
* @pre: idata must contain a key_data.
* @post: out_data contains complete information
*/
int prev(const index_data &idata, index_data * out_data);
/**
* finds the index_data where a key belongs, from cache if possible. If it
* reads the index object, it will read the first cache_size entries after
* key and put them in the cache.
*
* @param key: the key to search for
* @param idata: the index_data for the first index value such that idata.key
* is greater than key.
* @param next_idata: if not NULL, this will be set to the idata after idata
* @param force_update: if false, will try to read from cache first.
*
* @pre: key is not encoded
* @post: idata contains complete information
* stored
*/
int read_index(const std::string &key, index_data * idata,
index_data * next_idata, bool force_update);
/**
* Reads obj and generates information about it. Iff the object has >= 2k
* entries, reads the whole omap and then splits it.
*
* @param idata: index data for the object being split
* @pre: idata contains a key and an obj
* @post: idata.obj has been split and icache has been updated
* @return -EBALANCE if obj does not need to be split, 0 if split successful,
* error from read_object or perform_ops if there is one.
*/
int split(const index_data &idata);
/**
* reads o1 and the next object after o1 and, if necessary, rebalances them.
* if hk1 is the highest key in the index, calls rebalance on the next highest
* key.
*
* @param idata: index data for the object being rebalanced
* @param next_idata: index data for the next object. If blank, will read.
* @pre: idata contains a key and an obj
* @post: idata.obj has been rebalanced and icache has been updated
* @return -EBALANCE if no change needed, -ENOENT if o1 does not exist,
* -ECANCELED if second object does not exist, otherwise, error from
* perform_ops
*/
int rebalance(const index_data &idata1, const index_data &next_idata);
/**
* performs an ObjectReadOperation to populate odata
*
* @post: odata has all information about obj except for key (which is "")
*/
int read_object(const std::string &obj, object_data * odata);
/**
* performs a maybe_read_for_balance ObjectOperation so the omap is only
* read if the object is out of bounds.
*/
int read_object(const std::string &obj, rebalance_args * args);
/**
* sets up owo to change the index in preparation for a split/merge.
*
* @param to_create: vector of object_data to be created.
* @param to_delete: vector of object_data to be deleted.
* @param owo: the ObjectWriteOperation to set up
* @param idata: will be populated by index data for this op.
* @param err: error code reference to pass to omap_cmp
* @pre: entries in to_create and to_delete must have keys and names.
*/
void set_up_prefix_index(
const std::vector<object_data> &to_create,
const std::vector<object_data> &to_delete,
librados::ObjectWriteOperation * owo,
index_data * idata,
int * err);
/**
* sets up all make, mark, restore, and delete ops, as well as the remove
* prefix op, based on idata.
*
* @param create_vector: vector of data about the objects to be created.
* @pre: entries in create_data must have names and omaps and be in idata
* order
* @param delete_vector: vector of data about the objects to be deleted
* @pre: entries in to_delete must have versions and be in idata order
* @param ops: the owos to set up. the pair is a pair of op identifiers
* and names of objects - set_up_ops fills these in.
* @pre: ops must be the correct size and the ObjectWriteOperation pointers
* must be valid.
* @param idata: the idata with information about how to set up the ops
* @pre: idata has valid to_create and to_delete
* @param err: the int to get the error value for omap_cmp
*/
void set_up_ops(
const std::vector<object_data> &create_vector,
const std::vector<object_data> &delete_vector,
std::vector<std::pair<std::pair<int, std::string>, librados::ObjectWriteOperation*> > * ops,
const index_data &idata,
int * err);
/**
* sets up owo to exclusive create, set omap to to_set, and set
* unwritable to "0"
*/
void set_up_make_object(
const std::map<std::string, bufferlist> &to_set,
librados::ObjectWriteOperation *owo);
/**
* sets up owo to assert object version and that object version is
* writable,
* then mark it unwritable.
*
* @param ver: if this is 0, no version is asserted.
*/
void set_up_unwrite_object(
const int &ver, librados::ObjectWriteOperation *owo);
/**
* sets up owo to assert that an object is unwritable and then mark it
* writable
*/
void set_up_restore_object(
librados::ObjectWriteOperation *owo);
/**
* sets up owo to assert that the object is unwritable and then remove it
*/
void set_up_delete_object(
librados::ObjectWriteOperation *owo);
/**
* perform the operations in ops and handles errors.
*
* @param debug_prefix: what to print at the beginning of debug output
* @param idata: the idata for the object being operated on, to be
* passed to cleanup if necessary
* @param ops: this contains an int identifying the type of op,
* a string that is the name of the object to operate on, and a pointer
* to the ObjectWriteOperation to use. All of this must be complete.
* @post: all operations are performed and most errors are handled
* (e.g., cleans up if an assertion fails). If an unknown error is found,
* returns it.
*/
int perform_ops( const std::string &debug_prefix,
const index_data &idata,
std::vector<std::pair<std::pair<int, std::string>, librados::ObjectWriteOperation*> > * ops);
/**
* Called when a client discovers that another client has died during a
* split or a merge. cleans up after that client.
*
* @param idata: the index data parsed from the index entry left by the dead
* client.
* @param error: the error that caused the client to realize the other client
* died (should be -ENOENT or -ETIMEDOUT)
* @post: rolls forward if -ENOENT, otherwise rolls back.
*/
int cleanup(const index_data &idata, const int &error);
/**
* does the ObjectWriteOperation and splits, reads the index, and/or retries
* until success.
*/
int set_op(const std::string &key, const bufferlist &val,
bool update_on_existing, index_data &idata);
/**
* does the ObjectWriteOperation and merges, reads the index, and/or retries
* until success.
*/
int remove_op(const std::string &key, index_data &idata, index_data &next_idata);
/**
* does the ObjectWriteOperation and reads the index and/or retries
* until success.
*/
int get_op(const std::string &key, bufferlist * val, index_data &idata);
/**
* does the ObjectWriteOperation and splits, reads the index, and/or retries
* until success.
*/
int handle_set_rm_errors(int &err, std::string key, std::string obj,
index_data * idata, index_data * next_idata);
/**
* called by aio_set, aio_remove, and aio_get, respectively.
*/
static void* pset(void *ptr);
static void* prm(void *ptr);
static void* pget(void *ptr);
public:
//interruption methods, for correctness testing
/**
* returns 0
*/
int nothing() override;
/**
* 10% chance of waiting wait_ms seconds
*/
int wait() override;
/**
* 10% chance of killing the client.
*/
int suicide() override;
KvFlatBtreeAsync(int k_val, std::string name, int cache, double cache_r,
bool verb)
: k(k_val),
index_name("index_object"),
rados_id(name),
client_name(std::string(name).append(".")),
pool_name("rbd"),
interrupt(&KeyValueStructure::nothing),
wait_ms(0),
timeout(100000,0),
cache_size(cache),
cache_refresh(cache_r),
verbose(verb),
client_index(0),
icache(cache)
{}
/**
* creates a string with an int at the end.
*
* @param s: the string on the left
* @param i: the int to be appended to the string
* @return the string
*/
static std::string to_string(std::string s, int i);
/**
* returns in encoded
*/
static bufferlist to_bl(const std::string &in) {
bufferlist bl;
bl.append(in);
return bl;
}
/**
* returns idata encoded;
*/
static bufferlist to_bl(const index_data &idata) {
bufferlist bl;
idata.encode(bl);
return bl;
}
/**
* returns the rados_id of this KvFlatBtreeAsync
*/
std::string get_name();
/**
* sets this kvba to call inject before every ObjectWriteOperation.
* If inject is wait and wait_time is set, wait will have a 10% chance of
* sleeping for waite_time milliseconds.
*/
void set_inject(injection_t inject, int wait_time) override;
/**
* sets up the rados and io_ctx of this KvFlatBtreeAsync. If the don't already
* exist, creates the index and max object.
*/
int setup(int argc, const char** argv) override;
int set(const std::string &key, const bufferlist &val,
bool update_on_existing) override;
int remove(const std::string &key) override;
/**
* returns true if all of the following are true:
*
* all objects are accounted for in the index or a prefix
* (i.e., no floating objects)
* all objects have k <= size <= 2k
* all keys in an object are within the specified predicted by the index
*
* if any of those fails, states that the problem(s) are, and prints str().
*
* @pre: no operations are in progress
*/
bool is_consistent() override;
/**
* returns an ASCII representation of the index and sub objects, showing
* stats about each object and all omaps. Don't use if you have more than
* about 10 objects.
*/
std::string str() override;
int get(const std::string &key, bufferlist *val) override;
//async versions of these methods
void aio_get(const std::string &key, bufferlist *val, callback cb,
void *cb_args, int * err) override;
void aio_set(const std::string &key, const bufferlist &val, bool exclusive,
callback cb, void * cb_args, int * err) override;
void aio_remove(const std::string &key, callback cb, void *cb_args, int * err) override;
//these methods that deal with multiple keys at once are efficient, but make
//no guarantees about atomicity!
/**
* Removes all objects and resets the store as if setup had just run. Makes no
* attempt to do this safely - make sure this is the only operation running
* when it is called!
*/
int remove_all() override;
/**
* This does not add prefixes to the index and therefore DOES NOT guarantee
* consistency! It is ONLY safe if there is only one instance at a time.
* It follows the same general logic as a rebalance, but
* with all objects that contain any of the keys in in_map. It is O(n), where
* n is the number of librados objects it has to change. Higher object sizes
* (i.e., k values) also decrease the efficiency of this method because it
* copies all of the entries in each object it modifies. Writing new objects
* is done in parallel.
*
* This is efficient if:
* * other clients are very unlikely to be modifying any of the objects while
* this operation is in progress
* * The entries in in_map are close together
* * It is especially efficient for initially entering lots of entries into
* an empty structure.
*
* It is very inefficient compared to setting one key and/or will starve if:
* * other clients are modifying the objects it tries to modify
* * The keys are distributed across the range of keys in the store
* * there is a small number of keys compared to k
*/
int set_many(const std::map<std::string, bufferlist> &in_map) override;
int get_all_keys(std::set<std::string> *keys) override;
int get_all_keys_and_values(std::map<std::string,bufferlist> *kv_map) override;
};
#endif /* KVFLATBTREEASYNC_H_ */
| 25,031 | 26.875278 | 99 | h |
null | ceph-main/src/key_value_store/kvs_arg_types.h | /*
* Argument types used by cls_kvs.cc
*
* Created on: Aug 10, 2012
* Author: eleanor
*/
#ifndef CLS_KVS_H_
#define CLS_KVS_H_
#define EBALANCE 137
#include "include/encoding.h"
#include "key_value_store/kv_flat_btree_async.h"
using ceph::bufferlist;
struct assert_size_args {
uint64_t bound; //the size to compare to - should be k or 2k
uint64_t comparator; //should be CEPH_OSD_CMPXATTR_OP_EQ,
//CEPH_OSD_CMPXATTR_OP_LT, or
//CEPH_OSD_CMPXATTR_OP_GT
void encode(bufferlist &bl) const {
ENCODE_START(1,1,bl);
encode(bound, bl);
encode(comparator, bl);
ENCODE_FINISH(bl);
}
void decode(bufferlist::const_iterator &p) {
DECODE_START(1, p);
decode(bound, p);
decode(comparator, p);
DECODE_FINISH(p);
}
};
WRITE_CLASS_ENCODER(assert_size_args)
struct idata_from_key_args {
std::string key;
index_data idata;
index_data next_idata;
void encode(bufferlist &bl) const {
ENCODE_START(1,1,bl);
encode(key, bl);
encode(idata, bl);
encode(next_idata, bl);
ENCODE_FINISH(bl);
}
void decode(bufferlist::const_iterator &p) {
DECODE_START(1, p);
decode(key, p);
decode(idata, p);
decode(next_idata, p);
DECODE_FINISH(p);
}
};
WRITE_CLASS_ENCODER(idata_from_key_args)
struct idata_from_idata_args {
index_data idata;
index_data next_idata;
void encode(bufferlist &bl) const {
ENCODE_START(1,1,bl);
encode(idata, bl);
encode(next_idata, bl);
ENCODE_FINISH(bl);
}
void decode(bufferlist::const_iterator &p) {
DECODE_START(1, p);
decode(idata, p);
decode(next_idata, p);
DECODE_FINISH(p);
}
};
WRITE_CLASS_ENCODER(idata_from_idata_args)
struct omap_set_args {
std::map<std::string, bufferlist> omap;
uint64_t bound;
bool exclusive;
void encode(bufferlist &bl) const {
ENCODE_START(1,1,bl);
encode(omap, bl);
encode(bound, bl);
encode(exclusive, bl);
ENCODE_FINISH(bl);
}
void decode(bufferlist::const_iterator &p) {
DECODE_START(1, p);
decode(omap, p);
decode(bound, p);
decode(exclusive, p);
DECODE_FINISH(p);
}
};
WRITE_CLASS_ENCODER(omap_set_args)
struct omap_rm_args {
std::set<std::string> omap;
uint64_t bound;
void encode(bufferlist &bl) const {
ENCODE_START(1,1,bl);
encode(omap, bl);
encode(bound, bl);
ENCODE_FINISH(bl);
}
void decode(bufferlist::const_iterator &p) {
DECODE_START(1, p);
decode(omap, p);
decode(bound, p);
DECODE_FINISH(p);
}
};
WRITE_CLASS_ENCODER(omap_rm_args)
struct rebalance_args {
object_data odata;
uint64_t bound;
uint64_t comparator;
void encode(bufferlist &bl) const {
ENCODE_START(1,1,bl);
encode(odata, bl);
encode(bound, bl);
encode(comparator, bl);
ENCODE_FINISH(bl);
}
void decode(bufferlist::const_iterator &p) {
DECODE_START(1, p);
decode(odata,p);
decode(bound, p);
decode(comparator, p);
DECODE_FINISH(p);
}
};
WRITE_CLASS_ENCODER(rebalance_args)
#endif /* CLS_KVS_H_ */
| 3,030 | 19.903448 | 62 | h |
null | ceph-main/src/kv/KeyValueDB.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "KeyValueDB.h"
#include "RocksDBStore.h"
using std::map;
using std::string;
KeyValueDB *KeyValueDB::create(CephContext *cct, const string& type,
const string& dir,
map<string,string> options,
void *p)
{
if (type == "rocksdb") {
return new RocksDBStore(cct, dir, options, p);
}
return NULL;
}
int KeyValueDB::test_init(const string& type, const string& dir)
{
if (type == "rocksdb") {
return RocksDBStore::_test_init(dir);
}
return -EINVAL;
}
| 603 | 20.571429 | 70 | cc |
null | ceph-main/src/kv/KeyValueDB.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef KEY_VALUE_DB_H
#define KEY_VALUE_DB_H
#include "include/buffer.h"
#include <ostream>
#include <set>
#include <map>
#include <optional>
#include <string>
#include <boost/scoped_ptr.hpp>
#include "include/encoding.h"
#include "common/Formatter.h"
#include "common/perf_counters.h"
#include "common/PriorityCache.h"
/**
* Defines virtual interface to be implemented by key value store
*
* Kyoto Cabinet should implement this
*/
class KeyValueDB {
public:
class TransactionImpl {
public:
/// Set Keys
void set(
const std::string &prefix, ///< [in] Prefix for keys, or CF name
const std::map<std::string, ceph::buffer::list> &to_set ///< [in] keys/values to set
) {
for (auto it = to_set.cbegin(); it != to_set.cend(); ++it)
set(prefix, it->first, it->second);
}
/// Set Keys (via encoded ceph::buffer::list)
void set(
const std::string &prefix, ///< [in] prefix, or CF name
ceph::buffer::list& to_set_bl ///< [in] encoded key/values to set
) {
using ceph::decode;
auto p = std::cbegin(to_set_bl);
uint32_t num;
decode(num, p);
while (num--) {
std::string key;
ceph::buffer::list value;
decode(key, p);
decode(value, p);
set(prefix, key, value);
}
}
/// Set Key
virtual void set(
const std::string &prefix, ///< [in] Prefix or CF for the key
const std::string &k, ///< [in] Key to set
const ceph::buffer::list &bl ///< [in] Value to set
) = 0;
virtual void set(
const std::string &prefix,
const char *k,
size_t keylen,
const ceph::buffer::list& bl) {
set(prefix, std::string(k, keylen), bl);
}
/// Removes Keys (via encoded ceph::buffer::list)
void rmkeys(
const std::string &prefix, ///< [in] Prefix or CF to search for
ceph::buffer::list &keys_bl ///< [in] Keys to remove
) {
using ceph::decode;
auto p = std::cbegin(keys_bl);
uint32_t num;
decode(num, p);
while (num--) {
std::string key;
decode(key, p);
rmkey(prefix, key);
}
}
/// Removes Keys
void rmkeys(
const std::string &prefix, ///< [in] Prefix/CF to search for
const std::set<std::string> &keys ///< [in] Keys to remove
) {
for (auto it = keys.cbegin(); it != keys.cend(); ++it)
rmkey(prefix, *it);
}
/// Remove Key
virtual void rmkey(
const std::string &prefix, ///< [in] Prefix/CF to search for
const std::string &k ///< [in] Key to remove
) = 0;
virtual void rmkey(
const std::string &prefix, ///< [in] Prefix to search for
const char *k, ///< [in] Key to remove
size_t keylen
) {
rmkey(prefix, std::string(k, keylen));
}
/// Remove Single Key which exists and was not overwritten.
/// This API is only related to performance optimization, and should only be
/// re-implemented by log-insert-merge tree based keyvalue stores(such as RocksDB).
/// If a key is overwritten (by calling set multiple times), then the result
/// of calling rm_single_key on this key is undefined.
virtual void rm_single_key(
const std::string &prefix, ///< [in] Prefix/CF to search for
const std::string &k ///< [in] Key to remove
) { return rmkey(prefix, k);}
/// Removes keys beginning with prefix
virtual void rmkeys_by_prefix(
const std::string &prefix ///< [in] Prefix/CF by which to remove keys
) = 0;
virtual void rm_range_keys(
const std::string &prefix, ///< [in] Prefix by which to remove keys
const std::string &start, ///< [in] The start bound of remove keys
const std::string &end ///< [in] The start bound of remove keys
) = 0;
/// Merge value into key
virtual void merge(
const std::string &prefix, ///< [in] Prefix/CF ==> MUST match some established merge operator
const std::string &key, ///< [in] Key to be merged
const ceph::buffer::list &value ///< [in] value to be merged into key
) { ceph_abort_msg("Not implemented"); }
virtual ~TransactionImpl() {}
};
typedef std::shared_ptr< TransactionImpl > Transaction;
/// create a new instance
static KeyValueDB *create(CephContext *cct, const std::string& type,
const std::string& dir,
std::map<std::string,std::string> options = {},
void *p = NULL);
/// test whether we can successfully initialize; may have side effects (e.g., create)
static int test_init(const std::string& type, const std::string& dir);
virtual int init(std::string option_str="") = 0;
virtual int open(std::ostream &out, const std::string& cfs="") = 0;
// std::vector cfs contains column families to be created when db is created.
virtual int create_and_open(std::ostream &out, const std::string& cfs="") = 0;
virtual int open_read_only(std::ostream &out, const std::string& cfs="") {
return -ENOTSUP;
}
virtual void close() { }
/// Try to repair K/V database. rocksdb requires that database must be not opened.
virtual int repair(std::ostream &out) { return 0; }
virtual Transaction get_transaction() = 0;
virtual int submit_transaction(Transaction) = 0;
virtual int submit_transaction_sync(Transaction t) {
return submit_transaction(t);
}
/// Retrieve Keys
virtual int get(
const std::string &prefix, ///< [in] Prefix/CF for key
const std::set<std::string> &key, ///< [in] Key to retrieve
std::map<std::string, ceph::buffer::list> *out ///< [out] Key value retrieved
) = 0;
virtual int get(const std::string &prefix, ///< [in] prefix or CF name
const std::string &key, ///< [in] key
ceph::buffer::list *value) { ///< [out] value
std::set<std::string> ks;
ks.insert(key);
std::map<std::string,ceph::buffer::list> om;
int r = get(prefix, ks, &om);
if (om.find(key) != om.end()) {
*value = std::move(om[key]);
} else {
*value = ceph::buffer::list();
r = -ENOENT;
}
return r;
}
virtual int get(const std::string &prefix,
const char *key, size_t keylen,
ceph::buffer::list *value) {
return get(prefix, std::string(key, keylen), value);
}
// This superclass is used both by kv iterators *and* by the ObjectMap
// omap iterator. The class hierarchies are unfortunately tied together
// by the legacy DBOjectMap implementation :(.
class SimplestIteratorImpl {
public:
virtual int seek_to_first() = 0;
virtual int upper_bound(const std::string &after) = 0;
virtual int lower_bound(const std::string &to) = 0;
virtual bool valid() = 0;
virtual int next() = 0;
virtual std::string key() = 0;
virtual std::string tail_key() {
return "";
}
virtual ceph::buffer::list value() = 0;
virtual int status() = 0;
virtual ~SimplestIteratorImpl() {}
};
class IteratorImpl : public SimplestIteratorImpl {
public:
virtual ~IteratorImpl() {}
virtual int seek_to_last() = 0;
virtual int prev() = 0;
virtual std::pair<std::string, std::string> raw_key() = 0;
virtual ceph::buffer::ptr value_as_ptr() {
ceph::buffer::list bl = value();
if (bl.length() == 1) {
return *bl.buffers().begin();
} else if (bl.length() == 0) {
return ceph::buffer::ptr();
} else {
ceph_abort();
}
}
};
typedef std::shared_ptr< IteratorImpl > Iterator;
// This is the low-level iterator implemented by the underlying KV store.
class WholeSpaceIteratorImpl {
public:
virtual int seek_to_first() = 0;
virtual int seek_to_first(const std::string &prefix) = 0;
virtual int seek_to_last() = 0;
virtual int seek_to_last(const std::string &prefix) = 0;
virtual int upper_bound(const std::string &prefix, const std::string &after) = 0;
virtual int lower_bound(const std::string &prefix, const std::string &to) = 0;
virtual bool valid() = 0;
virtual int next() = 0;
virtual int prev() = 0;
virtual std::string key() = 0;
virtual std::pair<std::string,std::string> raw_key() = 0;
virtual bool raw_key_is_prefixed(const std::string &prefix) = 0;
virtual ceph::buffer::list value() = 0;
virtual ceph::buffer::ptr value_as_ptr() {
ceph::buffer::list bl = value();
if (bl.length()) {
return *bl.buffers().begin();
} else {
return ceph::buffer::ptr();
}
}
virtual int status() = 0;
virtual size_t key_size() {
return 0;
}
virtual size_t value_size() {
return 0;
}
virtual ~WholeSpaceIteratorImpl() { }
};
typedef std::shared_ptr< WholeSpaceIteratorImpl > WholeSpaceIterator;
private:
// This class filters a WholeSpaceIterator by a prefix.
// Performs as a dummy wrapper over WholeSpaceIterator
// if prefix is empty
class PrefixIteratorImpl : public IteratorImpl {
const std::string prefix;
WholeSpaceIterator generic_iter;
public:
PrefixIteratorImpl(const std::string &prefix, WholeSpaceIterator iter) :
prefix(prefix), generic_iter(iter) { }
~PrefixIteratorImpl() override { }
int seek_to_first() override {
return prefix.empty() ?
generic_iter->seek_to_first() :
generic_iter->seek_to_first(prefix);
}
int seek_to_last() override {
return prefix.empty() ?
generic_iter->seek_to_last() :
generic_iter->seek_to_last(prefix);
}
int upper_bound(const std::string &after) override {
return generic_iter->upper_bound(prefix, after);
}
int lower_bound(const std::string &to) override {
return generic_iter->lower_bound(prefix, to);
}
bool valid() override {
if (!generic_iter->valid())
return false;
if (prefix.empty())
return true;
return prefix.empty() ?
true :
generic_iter->raw_key_is_prefixed(prefix);
}
int next() override {
return generic_iter->next();
}
int prev() override {
return generic_iter->prev();
}
std::string key() override {
return generic_iter->key();
}
std::pair<std::string, std::string> raw_key() override {
return generic_iter->raw_key();
}
ceph::buffer::list value() override {
return generic_iter->value();
}
ceph::buffer::ptr value_as_ptr() override {
return generic_iter->value_as_ptr();
}
int status() override {
return generic_iter->status();
}
};
protected:
Iterator make_iterator(const std::string &prefix, WholeSpaceIterator w_iter) {
return std::make_shared<PrefixIteratorImpl>(
prefix,
w_iter);
}
public:
typedef uint32_t IteratorOpts;
static const uint32_t ITERATOR_NOCACHE = 1;
struct IteratorBounds {
std::optional<std::string> lower_bound;
std::optional<std::string> upper_bound;
};
virtual WholeSpaceIterator get_wholespace_iterator(IteratorOpts opts = 0) = 0;
virtual Iterator get_iterator(const std::string &prefix, IteratorOpts opts = 0, IteratorBounds bounds = IteratorBounds()) {
return make_iterator(prefix,
get_wholespace_iterator(opts));
}
virtual uint64_t get_estimated_size(std::map<std::string,uint64_t> &extra) = 0;
virtual int get_statfs(struct store_statfs_t *buf) {
return -EOPNOTSUPP;
}
virtual int set_cache_size(uint64_t) {
return -EOPNOTSUPP;
}
virtual int set_cache_high_pri_pool_ratio(double ratio) {
return -EOPNOTSUPP;
}
virtual int64_t get_cache_usage() const {
return -EOPNOTSUPP;
}
virtual int64_t get_cache_usage(std::string prefix) const {
return -EOPNOTSUPP;
}
virtual std::shared_ptr<PriorityCache::PriCache> get_priority_cache() const {
return nullptr;
}
virtual std::shared_ptr<PriorityCache::PriCache> get_priority_cache(std::string prefix) const {
return nullptr;
}
virtual ~KeyValueDB() {}
/// estimate space utilization for a prefix (in bytes)
virtual int64_t estimate_prefix_size(const std::string& prefix,
const std::string& key_prefix) {
return 0;
}
/// compact the underlying store
virtual void compact() {}
/// compact the underlying store in async mode
virtual void compact_async() {}
/// compact db for all keys with a given prefix
virtual void compact_prefix(const std::string& prefix) {}
/// compact db for all keys with a given prefix, async
virtual void compact_prefix_async(const std::string& prefix) {}
virtual void compact_range(const std::string& prefix,
const std::string& start, const std::string& end) {}
virtual void compact_range_async(const std::string& prefix,
const std::string& start, const std::string& end) {}
// See RocksDB merge operator definition, we support the basic
// associative merge only right now.
class MergeOperator {
public:
/// Merge into a key that doesn't exist
virtual void merge_nonexistent(
const char *rdata, size_t rlen,
std::string *new_value) = 0;
/// Merge into a key that does exist
virtual void merge(
const char *ldata, size_t llen,
const char *rdata, size_t rlen,
std::string *new_value) = 0;
/// We use each operator name and each prefix to construct the overall RocksDB operator name for consistency check at open time.
virtual const char *name() const = 0;
virtual ~MergeOperator() {}
};
/// Setup one or more operators, this needs to be done BEFORE the DB is opened.
virtual int set_merge_operator(const std::string& prefix,
std::shared_ptr<MergeOperator> mop) {
return -EOPNOTSUPP;
}
virtual void get_statistics(ceph::Formatter *f) {
return;
}
/**
* Return your perf counters if you have any. Subclasses are not
* required to implement this, and callers must respect a null return
* value.
*/
virtual PerfCounters *get_perf_counters() {
return nullptr;
}
/**
* Access implementation specific integral property corresponding
* to passed property and prefic.
* Return value is true if property is valid for prefix, populates out.
*/
virtual bool get_property(
const std::string &property,
uint64_t *out) {
return false;
}
protected:
/// List of matching prefixes/ColumnFamilies and merge operators
std::vector<std::pair<std::string,
std::shared_ptr<MergeOperator> > > merge_ops;
};
#endif
| 14,552 | 30.914474 | 132 | h |
null | ceph-main/src/kv/KeyValueHistogram.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "include/stringify.h"
#include "KeyValueHistogram.h"
using std::map;
using std::string;
using ceph::Formatter;
#define KEY_SLAB 32
#define VALUE_SLAB 64
int KeyValueHistogram::get_key_slab(size_t sz)
{
return (sz / KEY_SLAB);
}
string KeyValueHistogram::get_key_slab_to_range(int slab)
{
int lower_bound = slab * KEY_SLAB;
int upper_bound = (slab + 1) * KEY_SLAB;
string ret = "[" + stringify(lower_bound) + "," + stringify(upper_bound) + ")";
return ret;
}
int KeyValueHistogram::get_value_slab(size_t sz)
{
return (sz / VALUE_SLAB);
}
string KeyValueHistogram::get_value_slab_to_range(int slab)
{
int lower_bound = slab * VALUE_SLAB;
int upper_bound = (slab + 1) * VALUE_SLAB;
string ret = "[" + stringify(lower_bound) + "," + stringify(upper_bound) + ")";
return ret;
}
void KeyValueHistogram::update_hist_entry(map<string, map<int, struct key_dist> >& key_hist,
const string& prefix, size_t key_size, size_t value_size)
{
uint32_t key_slab = get_key_slab(key_size);
uint32_t value_slab = get_value_slab(value_size);
key_hist[prefix][key_slab].count++;
key_hist[prefix][key_slab].max_len =
std::max<size_t>(key_size, key_hist[prefix][key_slab].max_len);
key_hist[prefix][key_slab].val_map[value_slab].count++;
key_hist[prefix][key_slab].val_map[value_slab].max_len =
std::max<size_t>(value_size,
key_hist[prefix][key_slab].val_map[value_slab].max_len);
}
void KeyValueHistogram::dump(Formatter* f)
{
f->open_object_section("rocksdb_value_distribution");
for (auto i : value_hist) {
f->dump_unsigned(get_value_slab_to_range(i.first).data(), i.second);
}
f->close_section();
f->open_object_section("rocksdb_key_value_histogram");
for (auto i : key_hist) {
f->dump_string("prefix", i.first);
f->open_object_section("key_hist");
for (auto k : i.second) {
f->dump_unsigned(get_key_slab_to_range(k.first).data(), k.second.count);
f->dump_unsigned("max_len", k.second.max_len);
f->open_object_section("value_hist");
for (auto j : k.second.val_map) {
f->dump_unsigned(get_value_slab_to_range(j.first).data(), j.second.count);
f->dump_unsigned("max_len", j.second.max_len);
}
f->close_section();
}
f->close_section();
}
f->close_section();
}
| 2,392 | 29.291139 | 92 | cc |
null | ceph-main/src/kv/KeyValueHistogram.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef KeyValueHistogram_H
#define KeyValueHistogram_H
#include <map>
#include <string>
#include "common/Formatter.h"
/**
*
* Key Value DB Histogram generator
*
*/
struct KeyValueHistogram {
struct value_dist {
uint64_t count;
uint32_t max_len;
};
struct key_dist {
uint64_t count;
uint32_t max_len;
std::map<int, struct value_dist> val_map; ///< slab id to count, max length of value and key
};
std::map<std::string, std::map<int, struct key_dist> > key_hist;
std::map<int, uint64_t> value_hist;
int get_key_slab(size_t sz);
std::string get_key_slab_to_range(int slab);
int get_value_slab(size_t sz);
std::string get_value_slab_to_range(int slab);
void update_hist_entry(std::map<std::string, std::map<int, struct key_dist> >& key_hist,
const std::string& prefix, size_t key_size, size_t value_size);
void dump(ceph::Formatter* f);
};
#endif
| 999 | 24.641026 | 96 | h |
null | ceph-main/src/kv/RocksDBStore.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include <filesystem>
#include <map>
#include <memory>
#include <set>
#include <string>
#include <errno.h>
#include <unistd.h>
#include <sys/types.h>
#include <sys/stat.h>
#include "rocksdb/db.h"
#include "rocksdb/table.h"
#include "rocksdb/env.h"
#include "rocksdb/slice.h"
#include "rocksdb/cache.h"
#include "rocksdb/filter_policy.h"
#include "rocksdb/utilities/convenience.h"
#include "rocksdb/utilities/table_properties_collectors.h"
#include "rocksdb/merge_operator.h"
#include "common/perf_counters.h"
#include "common/PriorityCache.h"
#include "include/common_fwd.h"
#include "include/scope_guard.h"
#include "include/str_list.h"
#include "include/stringify.h"
#include "include/str_map.h"
#include "KeyValueDB.h"
#include "RocksDBStore.h"
#include "common/debug.h"
#define dout_context cct
#define dout_subsys ceph_subsys_rocksdb
#undef dout_prefix
#define dout_prefix *_dout << "rocksdb: "
namespace fs = std::filesystem;
using std::function;
using std::list;
using std::map;
using std::ostream;
using std::pair;
using std::set;
using std::string;
using std::unique_ptr;
using std::vector;
using ceph::bufferlist;
using ceph::bufferptr;
using ceph::Formatter;
static const char* sharding_def_dir = "sharding";
static const char* sharding_def_file = "sharding/def";
static const char* sharding_recreate = "sharding/recreate_columns";
static const char* resharding_column_lock = "reshardingXcommencingXlocked";
static bufferlist to_bufferlist(rocksdb::Slice in) {
bufferlist bl;
bl.append(bufferptr(in.data(), in.size()));
return bl;
}
static rocksdb::SliceParts prepare_sliceparts(const bufferlist &bl,
vector<rocksdb::Slice> *slices)
{
unsigned n = 0;
for (auto& buf : bl.buffers()) {
(*slices)[n].data_ = buf.c_str();
(*slices)[n].size_ = buf.length();
n++;
}
return rocksdb::SliceParts(slices->data(), slices->size());
}
//
// One of these for the default rocksdb column family, routing each prefix
// to the appropriate MergeOperator.
//
class RocksDBStore::MergeOperatorRouter
: public rocksdb::AssociativeMergeOperator
{
RocksDBStore& store;
public:
const char *Name() const override {
// Construct a name that rocksDB will validate against. We want to
// do this in a way that doesn't constrain the ordering of calls
// to set_merge_operator, so sort the merge operators and then
// construct a name from all of those parts.
store.assoc_name.clear();
map<std::string,std::string> names;
for (auto& p : store.merge_ops) {
names[p.first] = p.second->name();
}
for (auto& p : names) {
store.assoc_name += '.';
store.assoc_name += p.first;
store.assoc_name += ':';
store.assoc_name += p.second;
}
return store.assoc_name.c_str();
}
explicit MergeOperatorRouter(RocksDBStore &_store) : store(_store) {}
bool Merge(const rocksdb::Slice& key,
const rocksdb::Slice* existing_value,
const rocksdb::Slice& value,
std::string* new_value,
rocksdb::Logger* logger) const override {
// for default column family
// extract prefix from key and compare against each registered merge op;
// even though merge operator for explicit CF is included in merge_ops,
// it won't be picked up, since it won't match.
for (auto& p : store.merge_ops) {
if (p.first.compare(0, p.first.length(),
key.data(), p.first.length()) == 0 &&
key.data()[p.first.length()] == 0) {
if (existing_value) {
p.second->merge(existing_value->data(), existing_value->size(),
value.data(), value.size(),
new_value);
} else {
p.second->merge_nonexistent(value.data(), value.size(), new_value);
}
break;
}
}
return true; // OK :)
}
};
//
// One of these per non-default column family, linked directly to the
// merge operator for that CF/prefix (if any).
//
class RocksDBStore::MergeOperatorLinker
: public rocksdb::AssociativeMergeOperator
{
private:
std::shared_ptr<KeyValueDB::MergeOperator> mop;
public:
explicit MergeOperatorLinker(const std::shared_ptr<KeyValueDB::MergeOperator> &o) : mop(o) {}
const char *Name() const override {
return mop->name();
}
bool Merge(const rocksdb::Slice& key,
const rocksdb::Slice* existing_value,
const rocksdb::Slice& value,
std::string* new_value,
rocksdb::Logger* logger) const override {
if (existing_value) {
mop->merge(existing_value->data(), existing_value->size(),
value.data(), value.size(),
new_value);
} else {
mop->merge_nonexistent(value.data(), value.size(), new_value);
}
return true;
}
};
int RocksDBStore::set_merge_operator(
const string& prefix,
std::shared_ptr<KeyValueDB::MergeOperator> mop)
{
// If you fail here, it's because you can't do this on an open database
ceph_assert(db == nullptr);
merge_ops.push_back(std::make_pair(prefix,mop));
return 0;
}
class CephRocksdbLogger : public rocksdb::Logger {
CephContext *cct;
public:
explicit CephRocksdbLogger(CephContext *c) : cct(c) {
cct->get();
}
~CephRocksdbLogger() override {
cct->put();
}
// Write an entry to the log file with the specified format.
void Logv(const char* format, va_list ap) override {
Logv(rocksdb::INFO_LEVEL, format, ap);
}
// Write an entry to the log file with the specified log level
// and format. Any log with level under the internal log level
// of *this (see @SetInfoLogLevel and @GetInfoLogLevel) will not be
// printed.
void Logv(const rocksdb::InfoLogLevel log_level, const char* format,
va_list ap) override {
int v = rocksdb::NUM_INFO_LOG_LEVELS - log_level - 1;
dout(ceph::dout::need_dynamic(v));
char buf[65536];
vsnprintf(buf, sizeof(buf), format, ap);
*_dout << buf << dendl;
}
};
rocksdb::Logger *create_rocksdb_ceph_logger()
{
return new CephRocksdbLogger(g_ceph_context);
}
static int string2bool(const string &val, bool &b_val)
{
if (strcasecmp(val.c_str(), "false") == 0) {
b_val = false;
return 0;
} else if (strcasecmp(val.c_str(), "true") == 0) {
b_val = true;
return 0;
} else {
std::string err;
int b = strict_strtol(val.c_str(), 10, &err);
if (!err.empty())
return -EINVAL;
b_val = !!b;
return 0;
}
}
namespace rocksdb {
extern std::string trim(const std::string& str);
}
// this function is a modification of rocksdb's StringToMap:
// 1) accepts ' \n ; as separators
// 2) leaves compound options with enclosing { and }
rocksdb::Status StringToMap(const std::string& opts_str,
std::unordered_map<std::string, std::string>* opts_map)
{
using rocksdb::Status;
using rocksdb::trim;
assert(opts_map);
// Example:
// opts_str = "write_buffer_size=1024;max_write_buffer_number=2;"
// "nested_opt={opt1=1;opt2=2};max_bytes_for_level_base=100"
size_t pos = 0;
std::string opts = trim(opts_str);
while (pos < opts.size()) {
size_t eq_pos = opts.find('=', pos);
if (eq_pos == std::string::npos) {
return Status::InvalidArgument("Mismatched key value pair, '=' expected");
}
std::string key = trim(opts.substr(pos, eq_pos - pos));
if (key.empty()) {
return Status::InvalidArgument("Empty key found");
}
// skip space after '=' and look for '{' for possible nested options
pos = eq_pos + 1;
while (pos < opts.size() && isspace(opts[pos])) {
++pos;
}
// Empty value at the end
if (pos >= opts.size()) {
(*opts_map)[key] = "";
break;
}
if (opts[pos] == '{') {
int count = 1;
size_t brace_pos = pos + 1;
while (brace_pos < opts.size()) {
if (opts[brace_pos] == '{') {
++count;
} else if (opts[brace_pos] == '}') {
--count;
if (count == 0) {
break;
}
}
++brace_pos;
}
// found the matching closing brace
if (count == 0) {
//include both '{' and '}'
(*opts_map)[key] = trim(opts.substr(pos, brace_pos - pos + 1));
// skip all whitespace and move to the next ';,'
// brace_pos points to the matching '}'
pos = brace_pos + 1;
while (pos < opts.size() && isspace(opts[pos])) {
++pos;
}
if (pos < opts.size() && opts[pos] != ';' && opts[pos] != ',') {
return Status::InvalidArgument(
"Unexpected chars after nested options");
}
++pos;
} else {
return Status::InvalidArgument(
"Mismatched curly braces for nested options");
}
} else {
size_t sc_pos = opts.find_first_of(",;", pos);
if (sc_pos == std::string::npos) {
(*opts_map)[key] = trim(opts.substr(pos));
// It either ends with a trailing , ; or the last key-value pair
break;
} else {
(*opts_map)[key] = trim(opts.substr(pos, sc_pos - pos));
}
pos = sc_pos + 1;
}
}
return Status::OK();
}
int RocksDBStore::tryInterpret(const string &key, const string &val, rocksdb::Options &opt)
{
if (key == "compaction_threads") {
std::string err;
int f = strict_iecstrtoll(val, &err);
if (!err.empty())
return -EINVAL;
//Low priority threadpool is used for compaction
opt.env->SetBackgroundThreads(f, rocksdb::Env::Priority::LOW);
} else if (key == "flusher_threads") {
std::string err;
int f = strict_iecstrtoll(val, &err);
if (!err.empty())
return -EINVAL;
//High priority threadpool is used for flusher
opt.env->SetBackgroundThreads(f, rocksdb::Env::Priority::HIGH);
} else if (key == "compact_on_mount") {
int ret = string2bool(val, compact_on_mount);
if (ret != 0)
return ret;
} else if (key == "disableWAL") {
int ret = string2bool(val, disableWAL);
if (ret != 0)
return ret;
} else {
//unrecognize config options.
return -EINVAL;
}
return 0;
}
int RocksDBStore::ParseOptionsFromString(const string &opt_str, rocksdb::Options &opt)
{
return ParseOptionsFromStringStatic(cct, opt_str, opt,
[&](const string& k, const string& v, rocksdb::Options& o) {
return tryInterpret(k, v, o);
}
);
}
int RocksDBStore::ParseOptionsFromStringStatic(
CephContext *cct,
const string& opt_str,
rocksdb::Options& opt,
function<int(const string&, const string&, rocksdb::Options&)> interp)
{
// keep aligned with func tryInterpret
const set<string> need_interp_keys = {"compaction_threads", "flusher_threads", "compact_on_mount", "disableWAL"};
rocksdb::Status status;
std::unordered_map<std::string, std::string> str_map;
status = StringToMap(opt_str, &str_map);
if (!status.ok()) {
dout(5) << __func__ << " error '" << status.getState() <<
"' while parsing options '" << opt_str << "'" << dendl;
return -EINVAL;
}
for (auto it = str_map.begin(); it != str_map.end(); ++it) {
string this_opt = it->first + "=" + it->second;
rocksdb::Status status =
rocksdb::GetOptionsFromString(opt, this_opt, &opt);
int r = 0;
if (!status.ok()) {
if (interp != nullptr) {
r = interp(it->first, it->second, opt);
} else if (!need_interp_keys.count(it->first)) {
r = -1;
}
if (r < 0) {
derr << status.ToString() << dendl;
return -EINVAL;
}
}
lgeneric_dout(cct, 1) << " set rocksdb option " << it->first
<< " = " << it->second << dendl;
}
return 0;
}
int RocksDBStore::init(string _options_str)
{
options_str = _options_str;
rocksdb::Options opt;
//try parse options
if (options_str.length()) {
int r = ParseOptionsFromString(options_str, opt);
if (r != 0) {
return -EINVAL;
}
}
return 0;
}
int RocksDBStore::create_db_dir()
{
if (env) {
unique_ptr<rocksdb::Directory> dir;
env->NewDirectory(path, &dir);
} else {
if (!fs::exists(path)) {
std::error_code ec;
if (!fs::create_directory(path, ec)) {
derr << __func__ << " failed to create " << path
<< ": " << ec.message() << dendl;
return -ec.value();
}
fs::permissions(path,
fs::perms::owner_all |
fs::perms::group_read | fs::perms::group_exec |
fs::perms::others_read | fs::perms::others_exec);
}
}
return 0;
}
int RocksDBStore::install_cf_mergeop(
const string &key_prefix,
rocksdb::ColumnFamilyOptions *cf_opt)
{
ceph_assert(cf_opt != nullptr);
cf_opt->merge_operator.reset();
for (auto& i : merge_ops) {
if (i.first == key_prefix) {
cf_opt->merge_operator.reset(new MergeOperatorLinker(i.second));
}
}
return 0;
}
int RocksDBStore::create_and_open(ostream &out,
const std::string& cfs)
{
int r = create_db_dir();
if (r < 0)
return r;
return do_open(out, true, false, cfs);
}
std::shared_ptr<rocksdb::Cache> RocksDBStore::create_block_cache(
const std::string& cache_type, size_t cache_size, double cache_prio_high) {
std::shared_ptr<rocksdb::Cache> cache;
auto shard_bits = cct->_conf->rocksdb_cache_shard_bits;
if (cache_type == "binned_lru") {
cache = rocksdb_cache::NewBinnedLRUCache(cct, cache_size, shard_bits, false, cache_prio_high);
} else if (cache_type == "lru") {
cache = rocksdb::NewLRUCache(cache_size, shard_bits);
} else if (cache_type == "clock") {
cache = rocksdb::NewClockCache(cache_size, shard_bits);
if (!cache) {
derr << "rocksdb_cache_type '" << cache
<< "' chosen, but RocksDB not compiled with LibTBB. "
<< dendl;
}
} else {
derr << "unrecognized rocksdb_cache_type '" << cache_type << "'" << dendl;
}
return cache;
}
int RocksDBStore::load_rocksdb_options(bool create_if_missing, rocksdb::Options& opt)
{
rocksdb::Status status;
if (options_str.length()) {
int r = ParseOptionsFromString(options_str, opt);
if (r != 0) {
return -EINVAL;
}
}
if (cct->_conf->rocksdb_perf) {
dbstats = rocksdb::CreateDBStatistics();
opt.statistics = dbstats;
}
opt.create_if_missing = create_if_missing;
if (kv_options.count("separate_wal_dir")) {
opt.wal_dir = path + ".wal";
}
// Since ceph::for_each_substr doesn't return a value and
// std::stoull does throw, we may as well just catch everything here.
try {
if (kv_options.count("db_paths")) {
list<string> paths;
get_str_list(kv_options["db_paths"], "; \t", paths);
for (auto& p : paths) {
size_t pos = p.find(',');
if (pos == std::string::npos) {
derr << __func__ << " invalid db path item " << p << " in "
<< kv_options["db_paths"] << dendl;
return -EINVAL;
}
string path = p.substr(0, pos);
string size_str = p.substr(pos + 1);
uint64_t size = atoll(size_str.c_str());
if (!size) {
derr << __func__ << " invalid db path item " << p << " in "
<< kv_options["db_paths"] << dendl;
return -EINVAL;
}
opt.db_paths.push_back(rocksdb::DbPath(path, size));
dout(10) << __func__ << " db_path " << path << " size " << size << dendl;
}
}
} catch (const std::system_error& e) {
return -e.code().value();
}
if (cct->_conf->rocksdb_log_to_ceph_log) {
opt.info_log.reset(new CephRocksdbLogger(cct));
}
if (priv) {
dout(10) << __func__ << " using custom Env " << priv << dendl;
opt.env = static_cast<rocksdb::Env*>(priv);
} else {
env = opt.env;
}
opt.env->SetAllowNonOwnerAccess(false);
// caches
if (!set_cache_flag) {
cache_size = cct->_conf->rocksdb_cache_size;
}
uint64_t row_cache_size = cache_size * cct->_conf->rocksdb_cache_row_ratio;
uint64_t block_cache_size = cache_size - row_cache_size;
bbt_opts.block_cache = create_block_cache(cct->_conf->rocksdb_cache_type, block_cache_size);
if (!bbt_opts.block_cache) {
return -EINVAL;
}
bbt_opts.block_size = cct->_conf->rocksdb_block_size;
if (row_cache_size > 0)
opt.row_cache = rocksdb::NewLRUCache(row_cache_size,
cct->_conf->rocksdb_cache_shard_bits);
uint64_t bloom_bits = cct->_conf.get_val<uint64_t>("rocksdb_bloom_bits_per_key");
if (bloom_bits > 0) {
dout(10) << __func__ << " set bloom filter bits per key to "
<< bloom_bits << dendl;
bbt_opts.filter_policy.reset(rocksdb::NewBloomFilterPolicy(bloom_bits));
}
using std::placeholders::_1;
if (cct->_conf.with_val<std::string>("rocksdb_index_type",
std::bind(std::equal_to<std::string>(), _1,
"binary_search")))
bbt_opts.index_type = rocksdb::BlockBasedTableOptions::IndexType::kBinarySearch;
if (cct->_conf.with_val<std::string>("rocksdb_index_type",
std::bind(std::equal_to<std::string>(), _1,
"hash_search")))
bbt_opts.index_type = rocksdb::BlockBasedTableOptions::IndexType::kHashSearch;
if (cct->_conf.with_val<std::string>("rocksdb_index_type",
std::bind(std::equal_to<std::string>(), _1,
"two_level")))
bbt_opts.index_type = rocksdb::BlockBasedTableOptions::IndexType::kTwoLevelIndexSearch;
if (!bbt_opts.no_block_cache) {
bbt_opts.cache_index_and_filter_blocks =
cct->_conf.get_val<bool>("rocksdb_cache_index_and_filter_blocks");
bbt_opts.cache_index_and_filter_blocks_with_high_priority =
cct->_conf.get_val<bool>("rocksdb_cache_index_and_filter_blocks_with_high_priority");
bbt_opts.pin_l0_filter_and_index_blocks_in_cache =
cct->_conf.get_val<bool>("rocksdb_pin_l0_filter_and_index_blocks_in_cache");
}
bbt_opts.partition_filters = cct->_conf.get_val<bool>("rocksdb_partition_filters");
if (cct->_conf.get_val<Option::size_t>("rocksdb_metadata_block_size") > 0)
bbt_opts.metadata_block_size = cct->_conf.get_val<Option::size_t>("rocksdb_metadata_block_size");
opt.table_factory.reset(rocksdb::NewBlockBasedTableFactory(bbt_opts));
dout(10) << __func__ << " block size " << cct->_conf->rocksdb_block_size
<< ", block_cache size " << byte_u_t(block_cache_size)
<< ", row_cache size " << byte_u_t(row_cache_size)
<< "; shards "
<< (1 << cct->_conf->rocksdb_cache_shard_bits)
<< ", type " << cct->_conf->rocksdb_cache_type
<< dendl;
opt.merge_operator.reset(new MergeOperatorRouter(*this));
comparator = opt.comparator;
return 0;
}
void RocksDBStore::add_column_family(const std::string& cf_name, uint32_t hash_l, uint32_t hash_h,
size_t shard_idx, rocksdb::ColumnFamilyHandle *handle) {
dout(10) << __func__ << " column_name=" << cf_name << " shard_idx=" << shard_idx <<
" hash_l=" << hash_l << " hash_h=" << hash_h << " handle=" << (void*) handle << dendl;
bool exists = cf_handles.count(cf_name) > 0;
auto& column = cf_handles[cf_name];
if (exists) {
ceph_assert(hash_l == column.hash_l);
ceph_assert(hash_h == column.hash_h);
} else {
ceph_assert(hash_l < hash_h);
column.hash_l = hash_l;
column.hash_h = hash_h;
}
if (column.handles.size() <= shard_idx)
column.handles.resize(shard_idx + 1);
column.handles[shard_idx] = handle;
cf_ids_to_prefix.emplace(handle->GetID(), cf_name);
}
bool RocksDBStore::is_column_family(const std::string& prefix) {
return cf_handles.count(prefix);
}
std::string_view RocksDBStore::get_key_hash_view(const prefix_shards& shards, const char* key, const size_t keylen) {
uint32_t hash_l = std::min<uint32_t>(shards.hash_l, keylen);
uint32_t hash_h = std::min<uint32_t>(shards.hash_h, keylen);
return { key + hash_l, hash_h - hash_l };
}
rocksdb::ColumnFamilyHandle *RocksDBStore::get_key_cf(const prefix_shards& shards, const char* key, const size_t keylen) {
auto sv = get_key_hash_view(shards, key, keylen);
uint32_t hash = ceph_str_hash_rjenkins(sv.data(), sv.size());
return shards.handles[hash % shards.handles.size()];
}
rocksdb::ColumnFamilyHandle *RocksDBStore::get_cf_handle(const std::string& prefix, const std::string& key) {
auto iter = cf_handles.find(prefix);
if (iter == cf_handles.end()) {
return nullptr;
} else {
if (iter->second.handles.size() == 1) {
return iter->second.handles[0];
} else {
return get_key_cf(iter->second, key.data(), key.size());
}
}
}
rocksdb::ColumnFamilyHandle *RocksDBStore::get_cf_handle(const std::string& prefix, const char* key, size_t keylen) {
auto iter = cf_handles.find(prefix);
if (iter == cf_handles.end()) {
return nullptr;
} else {
if (iter->second.handles.size() == 1) {
return iter->second.handles[0];
} else {
return get_key_cf(iter->second, key, keylen);
}
}
}
/**
* If the specified IteratorBounds arg has both an upper and a lower bound defined, and they have equal placement hash
* strings, we can be sure that the entire iteration range exists in a single CF. In that case, we return the relevant
* CF handle. In all other cases, we return a nullptr to indicate that the specified bounds cannot necessarily be mapped
* to a single CF.
*/
rocksdb::ColumnFamilyHandle *RocksDBStore::check_cf_handle_bounds(const cf_handles_iterator& iter, const IteratorBounds& bounds) {
if (!bounds.lower_bound || !bounds.upper_bound) {
return nullptr;
}
ceph_assert(iter != cf_handles.end());
ceph_assert(iter->second.handles.size() != 1);
if (iter->second.hash_l != 0) {
return nullptr;
}
auto lower_bound_hash_str = get_key_hash_view(iter->second, bounds.lower_bound->data(), bounds.lower_bound->size());
auto upper_bound_hash_str = get_key_hash_view(iter->second, bounds.upper_bound->data(), bounds.upper_bound->size());
if (lower_bound_hash_str == upper_bound_hash_str) {
auto key = *bounds.lower_bound;
return get_key_cf(iter->second, key.data(), key.size());
} else {
return nullptr;
}
}
/**
* Definition of sharding:
* space-separated list of: column_def [ '=' options ]
* column_def := column_name '(' shard_count ')'
* column_def := column_name '(' shard_count ',' hash_begin '-' ')'
* column_def := column_name '(' shard_count ',' hash_begin '-' hash_end ')'
* I=write_buffer_size=1048576 O(6) m(7,10-) prefix(4,0-10)=disable_auto_compactions=true,max_bytes_for_level_base=1048576
*/
bool RocksDBStore::parse_sharding_def(const std::string_view text_def_in,
std::vector<ColumnFamily>& sharding_def,
char const* *error_position,
std::string *error_msg)
{
std::string_view text_def = text_def_in;
char const* error_position_local = nullptr;
std::string error_msg_local;
if (error_position == nullptr) {
error_position = &error_position_local;
}
*error_position = nullptr;
if (error_msg == nullptr) {
error_msg = &error_msg_local;
error_msg->clear();
}
sharding_def.clear();
while (!text_def.empty()) {
std::string_view options;
std::string_view name;
size_t shard_cnt = 1;
uint32_t l_bound = 0;
uint32_t h_bound = std::numeric_limits<uint32_t>::max();
std::string_view column_def;
size_t spos = text_def.find(' ');
if (spos == std::string_view::npos) {
column_def = text_def;
text_def = std::string_view(text_def.end(), 0);
} else {
column_def = text_def.substr(0, spos);
text_def = text_def.substr(spos + 1);
}
size_t eqpos = column_def.find('=');
if (eqpos != std::string_view::npos) {
options = column_def.substr(eqpos + 1);
column_def = column_def.substr(0, eqpos);
}
size_t bpos = column_def.find('(');
if (bpos != std::string_view::npos) {
name = column_def.substr(0, bpos);
const char* nptr = &column_def[bpos + 1];
char* endptr;
shard_cnt = strtol(nptr, &endptr, 10);
if (nptr == endptr) {
*error_position = nptr;
*error_msg = "expecting integer";
break;
}
nptr = endptr;
if (*nptr == ',') {
nptr++;
l_bound = strtol(nptr, &endptr, 10);
if (nptr == endptr) {
*error_position = nptr;
*error_msg = "expecting integer";
break;
}
nptr = endptr;
if (*nptr != '-') {
*error_position = nptr;
*error_msg = "expecting '-'";
break;
}
nptr++;
h_bound = strtol(nptr, &endptr, 10);
if (nptr == endptr) {
h_bound = std::numeric_limits<uint32_t>::max();
}
nptr = endptr;
}
if (*nptr != ')') {
*error_position = nptr;
*error_msg = "expecting ')'";
break;
}
} else {
name = column_def;
}
sharding_def.emplace_back(std::string(name), shard_cnt, std::string(options), l_bound, h_bound);
}
return *error_position == nullptr;
}
void RocksDBStore::sharding_def_to_columns(const std::vector<ColumnFamily>& sharding_def,
std::vector<std::string>& columns)
{
columns.clear();
for (size_t i = 0; i < sharding_def.size(); i++) {
if (sharding_def[i].shard_cnt == 1) {
columns.push_back(sharding_def[i].name);
} else {
for (size_t j = 0; j < sharding_def[i].shard_cnt; j++) {
columns.push_back(sharding_def[i].name + "-" + std::to_string(j));
}
}
}
}
int RocksDBStore::create_shards(const rocksdb::Options& opt,
const std::vector<ColumnFamily>& sharding_def)
{
for (auto& p : sharding_def) {
// copy default CF settings, block cache, merge operators as
// the base for new CF
rocksdb::ColumnFamilyOptions cf_opt(opt);
rocksdb::Status status;
// apply options to column family
int r = update_column_family_options(p.name, p.options, &cf_opt);
if (r != 0) {
return r;
}
for (size_t idx = 0; idx < p.shard_cnt; idx++) {
std::string cf_name;
if (p.shard_cnt == 1)
cf_name = p.name;
else
cf_name = p.name + "-" + std::to_string(idx);
rocksdb::ColumnFamilyHandle *cf;
status = db->CreateColumnFamily(cf_opt, cf_name, &cf);
if (!status.ok()) {
derr << __func__ << " Failed to create rocksdb column family: "
<< cf_name << dendl;
return -EINVAL;
}
// store the new CF handle
add_column_family(p.name, p.hash_l, p.hash_h, idx, cf);
}
}
return 0;
}
int RocksDBStore::apply_sharding(const rocksdb::Options& opt,
const std::string& sharding_text)
{
// create and open column families
if (!sharding_text.empty()) {
bool b;
int r;
rocksdb::Status status;
std::vector<ColumnFamily> sharding_def;
char const* error_position;
std::string error_msg;
b = parse_sharding_def(sharding_text, sharding_def, &error_position, &error_msg);
if (!b) {
dout(1) << __func__ << " bad sharding: " << dendl;
dout(1) << __func__ << sharding_text << dendl;
dout(1) << __func__ << std::string(error_position - &sharding_text[0], ' ') << "^" << error_msg << dendl;
return -EINVAL;
}
r = create_shards(opt, sharding_def);
if (r != 0 ) {
derr << __func__ << " create_shards failed error=" << r << dendl;
return r;
}
opt.env->CreateDir(sharding_def_dir);
status = rocksdb::WriteStringToFile(opt.env, sharding_text,
sharding_def_file, true);
if (!status.ok()) {
derr << __func__ << " cannot write to " << sharding_def_file << dendl;
return -EIO;
}
} else {
opt.env->DeleteFile(sharding_def_file);
}
return 0;
}
// linking to rocksdb function defined in options_helper.cc
// it can parse nested params like "nested_opt={opt1=1;opt2=2}"
extern rocksdb::Status rocksdb::StringToMap(const std::string& opts_str,
std::unordered_map<std::string, std::string>* opts_map);
// Splits column family options from single string into name->value column_opts_map.
// The split is done using RocksDB parser that understands "{" and "}", so it
// properly extracts compound options.
// If non-RocksDB option "block_cache" is defined it is extracted to block_cache_opt.
int RocksDBStore::split_column_family_options(const std::string& options,
std::unordered_map<std::string, std::string>* opt_map,
std::string* block_cache_opt)
{
dout(20) << __func__ << " options=" << options << dendl;
rocksdb::Status status = rocksdb::StringToMap(options, opt_map);
if (!status.ok()) {
dout(5) << __func__ << " error '" << status.getState()
<< "' while parsing options '" << options << "'" << dendl;
return -EINVAL;
}
// if "block_cache" option exists, then move it to separate string
if (auto it = opt_map->find("block_cache"); it != opt_map->end()) {
*block_cache_opt = it->second;
opt_map->erase(it);
} else {
block_cache_opt->clear();
}
return 0;
}
// Updates column family options.
// Take options from more_options and apply them to cf_opt.
// Allowed options are exactly the same as allowed for column families in RocksDB.
// Ceph addition is "block_cache" option that is translated to block_cache and
// allows to specialize separate block cache for O column family.
//
// base_name - name of column without shard suffix: "-"+number
// options - additional options to apply
// cf_opt - column family options to update
int RocksDBStore::update_column_family_options(const std::string& base_name,
const std::string& more_options,
rocksdb::ColumnFamilyOptions* cf_opt)
{
std::unordered_map<std::string, std::string> options_map;
std::string block_cache_opt;
rocksdb::Status status;
int r = split_column_family_options(more_options, &options_map, &block_cache_opt);
if (r != 0) {
dout(5) << __func__ << " failed to parse options; column family=" << base_name
<< " options=" << more_options << dendl;
return r;
}
status = rocksdb::GetColumnFamilyOptionsFromMap(*cf_opt, options_map, cf_opt);
if (!status.ok()) {
dout(5) << __func__ << " invalid column family optionsp; column family="
<< base_name << " options=" << more_options << dendl;
dout(5) << __func__ << " RocksDB error='" << status.getState() << "'" << dendl;
return -EINVAL;
}
if (base_name != rocksdb::kDefaultColumnFamilyName) {
// default cf has its merge operator defined in load_rocksdb_options, should not override it
install_cf_mergeop(base_name, cf_opt);
}
if (!block_cache_opt.empty()) {
r = apply_block_cache_options(base_name, block_cache_opt, cf_opt);
if (r != 0) {
// apply_block_cache_options already does all necessary douts
return r;
}
}
// Set Compact on Deletion Factory
if (cct->_conf->rocksdb_cf_compact_on_deletion) {
size_t sliding_window = cct->_conf->rocksdb_cf_compact_on_deletion_sliding_window;
size_t trigger = cct->_conf->rocksdb_cf_compact_on_deletion_trigger;
cf_opt->table_properties_collector_factories.emplace_back(
rocksdb::NewCompactOnDeletionCollectorFactory(sliding_window, trigger));
}
return 0;
}
int RocksDBStore::apply_block_cache_options(const std::string& column_name,
const std::string& block_cache_opt,
rocksdb::ColumnFamilyOptions* cf_opt)
{
rocksdb::Status status;
std::unordered_map<std::string, std::string> cache_options_map;
status = rocksdb::StringToMap(block_cache_opt, &cache_options_map);
if (!status.ok()) {
dout(5) << __func__ << " invalid block cache options; column=" << column_name
<< " options=" << block_cache_opt << dendl;
dout(5) << __func__ << " RocksDB error='" << status.getState() << "'" << dendl;
return -EINVAL;
}
bool require_new_block_cache = false;
std::string cache_type = cct->_conf->rocksdb_cache_type;
if (const auto it = cache_options_map.find("type"); it != cache_options_map.end()) {
cache_type = it->second;
cache_options_map.erase(it);
require_new_block_cache = true;
}
size_t cache_size = cct->_conf->rocksdb_cache_size;
if (auto it = cache_options_map.find("size"); it != cache_options_map.end()) {
std::string error;
cache_size = strict_iecstrtoll(it->second.c_str(), &error);
if (!error.empty()) {
dout(10) << __func__ << " invalid size: '" << it->second << "'" << dendl;
return -EINVAL;
}
cache_options_map.erase(it);
require_new_block_cache = true;
}
double high_pri_pool_ratio = 0.0;
if (auto it = cache_options_map.find("high_ratio"); it != cache_options_map.end()) {
std::string error;
high_pri_pool_ratio = strict_strtod(it->second.c_str(), &error);
if (!error.empty()) {
dout(10) << __func__ << " invalid high_pri (float): '" << it->second << "'" << dendl;
return -EINVAL;
}
cache_options_map.erase(it);
require_new_block_cache = true;
}
rocksdb::BlockBasedTableOptions column_bbt_opts;
status = GetBlockBasedTableOptionsFromMap(bbt_opts, cache_options_map, &column_bbt_opts);
if (!status.ok()) {
dout(5) << __func__ << " invalid block cache options; column=" << column_name
<< " options=" << block_cache_opt << dendl;
dout(5) << __func__ << " RocksDB error='" << status.getState() << "'" << dendl;
return -EINVAL;
}
std::shared_ptr<rocksdb::Cache> block_cache;
if (column_bbt_opts.no_block_cache) {
// clear all settings except no_block_cache
// rocksdb does not like then
column_bbt_opts = rocksdb::BlockBasedTableOptions();
column_bbt_opts.no_block_cache = true;
} else {
if (require_new_block_cache) {
block_cache = create_block_cache(cache_type, cache_size, high_pri_pool_ratio);
if (!block_cache) {
dout(5) << __func__ << " failed to create block cache for params: " << block_cache_opt << dendl;
return -EINVAL;
}
} else {
block_cache = bbt_opts.block_cache;
}
}
column_bbt_opts.block_cache = block_cache;
cf_bbt_opts[column_name] = column_bbt_opts;
cf_opt->table_factory.reset(NewBlockBasedTableFactory(cf_bbt_opts[column_name]));
return 0;
}
int RocksDBStore::verify_sharding(const rocksdb::Options& opt,
std::vector<rocksdb::ColumnFamilyDescriptor>& existing_cfs,
std::vector<std::pair<size_t, RocksDBStore::ColumnFamily> >& existing_cfs_shard,
std::vector<rocksdb::ColumnFamilyDescriptor>& missing_cfs,
std::vector<std::pair<size_t, RocksDBStore::ColumnFamily> >& missing_cfs_shard)
{
rocksdb::Status status;
std::string stored_sharding_text;
status = opt.env->FileExists(sharding_def_file);
if (status.ok()) {
status = rocksdb::ReadFileToString(opt.env,
sharding_def_file,
&stored_sharding_text);
if(!status.ok()) {
derr << __func__ << " cannot read from " << sharding_def_file << dendl;
return -EIO;
}
dout(20) << __func__ << " sharding=" << stored_sharding_text << dendl;
} else {
dout(30) << __func__ << " no sharding" << dendl;
//no "sharding_def" present
}
//check if sharding_def matches stored_sharding_def
std::vector<ColumnFamily> stored_sharding_def;
parse_sharding_def(stored_sharding_text, stored_sharding_def);
std::sort(stored_sharding_def.begin(), stored_sharding_def.end(),
[](ColumnFamily& a, ColumnFamily& b) { return a.name < b.name; } );
std::vector<string> rocksdb_cfs;
status = rocksdb::DB::ListColumnFamilies(rocksdb::DBOptions(opt),
path, &rocksdb_cfs);
if (!status.ok()) {
derr << __func__ << " unable to list column families: " << status.ToString() << dendl;
return -EIO;
}
dout(5) << __func__ << " column families from rocksdb: " << rocksdb_cfs << dendl;
auto emplace_cf = [&] (const RocksDBStore::ColumnFamily& column,
int32_t shard_id,
const std::string& shard_name,
const rocksdb::ColumnFamilyOptions& opt) {
if (std::find(rocksdb_cfs.begin(), rocksdb_cfs.end(), shard_name) != rocksdb_cfs.end()) {
existing_cfs.emplace_back(shard_name, opt);
existing_cfs_shard.emplace_back(shard_id, column);
} else {
missing_cfs.emplace_back(shard_name, opt);
missing_cfs_shard.emplace_back(shard_id, column);
}
};
for (auto& column : stored_sharding_def) {
rocksdb::ColumnFamilyOptions cf_opt(opt);
int r = update_column_family_options(column.name, column.options, &cf_opt);
if (r != 0) {
return r;
}
if (column.shard_cnt == 1) {
emplace_cf(column, 0, column.name, cf_opt);
} else {
for (size_t i = 0; i < column.shard_cnt; i++) {
std::string cf_name = column.name + "-" + std::to_string(i);
emplace_cf(column, i, cf_name, cf_opt);
}
}
}
existing_cfs.emplace_back("default", opt);
if (existing_cfs.size() != rocksdb_cfs.size()) {
std::vector<std::string> columns_from_stored;
sharding_def_to_columns(stored_sharding_def, columns_from_stored);
derr << __func__ << " extra columns in rocksdb. rocksdb columns = " << rocksdb_cfs
<< " target columns = " << columns_from_stored << dendl;
return -EIO;
}
return 0;
}
std::ostream& operator<<(std::ostream& out, const RocksDBStore::ColumnFamily& cf)
{
out << "(";
out << cf.name;
out << ",";
out << cf.shard_cnt;
out << ",";
out << cf.hash_l;
out << "-";
if (cf.hash_h != std::numeric_limits<uint32_t>::max()) {
out << cf.hash_h;
}
out << ",";
out << cf.options;
out << ")";
return out;
}
int RocksDBStore::do_open(ostream &out,
bool create_if_missing,
bool open_readonly,
const std::string& sharding_text)
{
ceph_assert(!(create_if_missing && open_readonly));
rocksdb::Options opt;
int r = load_rocksdb_options(create_if_missing, opt);
if (r) {
dout(1) << __func__ << " load rocksdb options failed" << dendl;
return r;
}
rocksdb::Status status;
if (create_if_missing) {
status = rocksdb::DB::Open(opt, path, &db);
if (!status.ok()) {
derr << status.ToString() << dendl;
return -EINVAL;
}
r = apply_sharding(opt, sharding_text);
if (r < 0) {
return r;
}
default_cf = db->DefaultColumnFamily();
} else {
std::vector<rocksdb::ColumnFamilyDescriptor> existing_cfs;
std::vector<std::pair<size_t, RocksDBStore::ColumnFamily> > existing_cfs_shard;
std::vector<rocksdb::ColumnFamilyDescriptor> missing_cfs;
std::vector<std::pair<size_t, RocksDBStore::ColumnFamily> > missing_cfs_shard;
r = verify_sharding(opt,
existing_cfs, existing_cfs_shard,
missing_cfs, missing_cfs_shard);
if (r < 0) {
return r;
}
std::string sharding_recreate_text;
status = rocksdb::ReadFileToString(opt.env,
sharding_recreate,
&sharding_recreate_text);
bool recreate_mode = status.ok() && sharding_recreate_text == "1";
ceph_assert(!recreate_mode || !open_readonly);
if (recreate_mode == false && missing_cfs.size() != 0) {
// We do not accept when there are missing column families, except case that we are during resharding.
// We can get into this case if resharding was interrupted. It gives a chance to continue.
// Opening DB is only allowed in read-only mode.
if (open_readonly == false &&
std::find_if(missing_cfs.begin(), missing_cfs.end(),
[](const rocksdb::ColumnFamilyDescriptor& c) { return c.name == resharding_column_lock; }
) != missing_cfs.end()) {
derr << __func__ << " missing column families: " << missing_cfs_shard << dendl;
return -EIO;
}
}
if (existing_cfs.empty()) {
// no column families
if (open_readonly) {
status = rocksdb::DB::OpenForReadOnly(opt, path, &db);
} else {
status = rocksdb::DB::Open(opt, path, &db);
}
if (!status.ok()) {
derr << status.ToString() << dendl;
return -EINVAL;
}
default_cf = db->DefaultColumnFamily();
} else {
std::vector<rocksdb::ColumnFamilyHandle*> handles;
if (open_readonly) {
status = rocksdb::DB::OpenForReadOnly(rocksdb::DBOptions(opt),
path, existing_cfs,
&handles, &db);
} else {
status = rocksdb::DB::Open(rocksdb::DBOptions(opt),
path, existing_cfs, &handles, &db);
}
if (!status.ok()) {
derr << status.ToString() << dendl;
return -EINVAL;
}
ceph_assert(existing_cfs.size() == existing_cfs_shard.size() + 1);
ceph_assert(handles.size() == existing_cfs.size());
dout(10) << __func__ << " existing_cfs=" << existing_cfs.size() << dendl;
for (size_t i = 0; i < existing_cfs_shard.size(); i++) {
add_column_family(existing_cfs_shard[i].second.name,
existing_cfs_shard[i].second.hash_l,
existing_cfs_shard[i].second.hash_h,
existing_cfs_shard[i].first,
handles[i]);
}
default_cf = handles[handles.size() - 1];
must_close_default_cf = true;
if (missing_cfs.size() > 0 &&
std::find_if(missing_cfs.begin(), missing_cfs.end(),
[](const rocksdb::ColumnFamilyDescriptor& c) { return c.name == resharding_column_lock; }
) == missing_cfs.end())
{
dout(10) << __func__ << " missing_cfs=" << missing_cfs.size() << dendl;
ceph_assert(recreate_mode);
ceph_assert(missing_cfs.size() == missing_cfs_shard.size());
for (size_t i = 0; i < missing_cfs.size(); i++) {
rocksdb::ColumnFamilyHandle *cf;
status = db->CreateColumnFamily(missing_cfs[i].options, missing_cfs[i].name, &cf);
if (!status.ok()) {
derr << __func__ << " Failed to create rocksdb column family: "
<< missing_cfs[i].name << dendl;
return -EINVAL;
}
add_column_family(missing_cfs_shard[i].second.name,
missing_cfs_shard[i].second.hash_l,
missing_cfs_shard[i].second.hash_h,
missing_cfs_shard[i].first,
cf);
}
opt.env->DeleteFile(sharding_recreate);
}
}
}
ceph_assert(default_cf != nullptr);
PerfCountersBuilder plb(cct, "rocksdb", l_rocksdb_first, l_rocksdb_last);
plb.add_time_avg(l_rocksdb_get_latency, "get_latency", "Get latency");
plb.add_time_avg(l_rocksdb_submit_latency, "submit_latency", "Submit Latency");
plb.add_time_avg(l_rocksdb_submit_sync_latency, "submit_sync_latency", "Submit Sync Latency");
plb.add_u64_counter(l_rocksdb_compact, "compact", "Compactions");
plb.add_u64_counter(l_rocksdb_compact_range, "compact_range", "Compactions by range");
plb.add_u64_counter(l_rocksdb_compact_queue_merge, "compact_queue_merge", "Mergings of ranges in compaction queue");
plb.add_u64(l_rocksdb_compact_queue_len, "compact_queue_len", "Length of compaction queue");
plb.add_time_avg(l_rocksdb_write_wal_time, "rocksdb_write_wal_time", "Rocksdb write wal time");
plb.add_time_avg(l_rocksdb_write_memtable_time, "rocksdb_write_memtable_time", "Rocksdb write memtable time");
plb.add_time_avg(l_rocksdb_write_delay_time, "rocksdb_write_delay_time", "Rocksdb write delay time");
plb.add_time_avg(l_rocksdb_write_pre_and_post_process_time,
"rocksdb_write_pre_and_post_time", "total time spent on writing a record, excluding write process");
logger = plb.create_perf_counters();
cct->get_perfcounters_collection()->add(logger);
if (compact_on_mount) {
derr << "Compacting rocksdb store..." << dendl;
compact();
derr << "Finished compacting rocksdb store" << dendl;
}
return 0;
}
int RocksDBStore::_test_init(const string& dir)
{
rocksdb::Options options;
options.create_if_missing = true;
rocksdb::DB *db;
rocksdb::Status status = rocksdb::DB::Open(options, dir, &db);
delete db;
db = nullptr;
return status.ok() ? 0 : -EIO;
}
RocksDBStore::~RocksDBStore()
{
close();
if (priv) {
delete static_cast<rocksdb::Env*>(priv);
}
}
void RocksDBStore::close()
{
// stop compaction thread
compact_queue_lock.lock();
if (compact_thread.is_started()) {
dout(1) << __func__ << " waiting for compaction thread to stop" << dendl;
compact_queue_stop = true;
compact_queue_cond.notify_all();
compact_queue_lock.unlock();
compact_thread.join();
dout(1) << __func__ << " compaction thread to stopped" << dendl;
} else {
compact_queue_lock.unlock();
}
if (logger) {
cct->get_perfcounters_collection()->remove(logger);
delete logger;
logger = nullptr;
}
// Ensure db is destroyed before dependent db_cache and filterpolicy
for (auto& p : cf_handles) {
for (size_t i = 0; i < p.second.handles.size(); i++) {
db->DestroyColumnFamilyHandle(p.second.handles[i]);
}
}
cf_handles.clear();
if (must_close_default_cf) {
db->DestroyColumnFamilyHandle(default_cf);
must_close_default_cf = false;
}
default_cf = nullptr;
delete db;
db = nullptr;
}
int RocksDBStore::repair(std::ostream &out)
{
rocksdb::Status status;
rocksdb::Options opt;
int r = load_rocksdb_options(false, opt);
if (r) {
dout(1) << __func__ << " load rocksdb options failed" << dendl;
out << "load rocksdb options failed" << std::endl;
return r;
}
//need to save sharding definition, repairDB will delete files it does not know
std::string stored_sharding_text;
status = opt.env->FileExists(sharding_def_file);
if (status.ok()) {
status = rocksdb::ReadFileToString(opt.env,
sharding_def_file,
&stored_sharding_text);
if (!status.ok()) {
stored_sharding_text.clear();
}
}
dout(10) << __func__ << " stored_sharding: " << stored_sharding_text << dendl;
status = rocksdb::RepairDB(path, opt);
bool repaired = status.ok();
if (!stored_sharding_text.empty()) {
//recreate markers even if repair failed
opt.env->CreateDir(sharding_def_dir);
status = rocksdb::WriteStringToFile(opt.env, stored_sharding_text,
sharding_def_file, true);
if (!status.ok()) {
derr << __func__ << " cannot write to " << sharding_def_file << dendl;
return -1;
}
status = rocksdb::WriteStringToFile(opt.env, "1",
sharding_recreate, true);
if (!status.ok()) {
derr << __func__ << " cannot write to " << sharding_recreate << dendl;
return -1;
}
// fiinalize sharding recreate
if (do_open(out, false, false)) {
derr << __func__ << " cannot finalize repair" << dendl;
return -1;
}
close();
}
if (repaired && status.ok()) {
return 0;
} else {
out << "repair rocksdb failed : " << status.ToString() << std::endl;
return -1;
}
}
void RocksDBStore::split_stats(const std::string &s, char delim, std::vector<std::string> &elems) {
std::stringstream ss;
ss.str(s);
std::string item;
while (std::getline(ss, item, delim)) {
elems.push_back(item);
}
}
bool RocksDBStore::get_property(
const std::string &property,
uint64_t *out)
{
return db->GetIntProperty(property, out);
}
int64_t RocksDBStore::estimate_prefix_size(const string& prefix,
const string& key_prefix)
{
uint64_t size = 0;
auto p_iter = cf_handles.find(prefix);
if (p_iter != cf_handles.end()) {
for (auto cf : p_iter->second.handles) {
uint64_t s = 0;
string start = key_prefix + string(1, '\x00');
string limit = key_prefix + string("\xff\xff\xff\xff");
rocksdb::Range r(start, limit);
db->GetApproximateSizes(cf, &r, 1, &s);
size += s;
}
} else {
string start = combine_strings(prefix , key_prefix);
string limit = combine_strings(prefix , key_prefix + "\xff\xff\xff\xff");
rocksdb::Range r(start, limit);
db->GetApproximateSizes(default_cf, &r, 1, &size);
}
return size;
}
void RocksDBStore::get_statistics(Formatter *f)
{
if (!cct->_conf->rocksdb_perf) {
dout(20) << __func__ << " RocksDB perf is disabled, can't probe for stats"
<< dendl;
return;
}
if (cct->_conf->rocksdb_collect_compaction_stats) {
std::string stat_str;
bool status = db->GetProperty("rocksdb.stats", &stat_str);
if (status) {
f->open_object_section("rocksdb_statistics");
f->dump_string("rocksdb_compaction_statistics", "");
vector<string> stats;
split_stats(stat_str, '\n', stats);
for (auto st :stats) {
f->dump_string("", st);
}
f->close_section();
}
}
if (cct->_conf->rocksdb_collect_extended_stats) {
if (dbstats) {
f->open_object_section("rocksdb_extended_statistics");
string stat_str = dbstats->ToString();
vector<string> stats;
split_stats(stat_str, '\n', stats);
f->dump_string("rocksdb_extended_statistics", "");
for (auto st :stats) {
f->dump_string(".", st);
}
f->close_section();
}
f->open_object_section("rocksdbstore_perf_counters");
logger->dump_formatted(f, false, false);
f->close_section();
}
if (cct->_conf->rocksdb_collect_memory_stats) {
f->open_object_section("rocksdb_memtable_statistics");
std::string str;
if (!bbt_opts.no_block_cache) {
str.append(stringify(bbt_opts.block_cache->GetUsage()));
f->dump_string("block_cache_usage", str.data());
str.clear();
str.append(stringify(bbt_opts.block_cache->GetPinnedUsage()));
f->dump_string("block_cache_pinned_blocks_usage", str);
str.clear();
}
db->GetProperty("rocksdb.cur-size-all-mem-tables", &str);
f->dump_string("rocksdb_memtable_usage", str);
str.clear();
db->GetProperty("rocksdb.estimate-table-readers-mem", &str);
f->dump_string("rocksdb_index_filter_blocks_usage", str);
f->close_section();
}
}
struct RocksDBStore::RocksWBHandler: public rocksdb::WriteBatch::Handler {
RocksWBHandler(const RocksDBStore& db) : db(db) {}
const RocksDBStore& db;
std::stringstream seen;
int num_seen = 0;
void dump(const char* op_name,
uint32_t column_family_id,
const rocksdb::Slice& key_in,
const rocksdb::Slice* value = nullptr) {
string prefix;
string key;
ssize_t size = value ? value->size() : -1;
seen << std::endl << op_name << "(";
if (column_family_id == 0) {
db.split_key(key_in, &prefix, &key);
} else {
auto it = db.cf_ids_to_prefix.find(column_family_id);
ceph_assert(it != db.cf_ids_to_prefix.end());
prefix = it->second;
key = key_in.ToString();
}
seen << " prefix = " << prefix;
seen << " key = " << pretty_binary_string(key);
if (size != -1)
seen << " value size = " << std::to_string(size);
seen << ")";
num_seen++;
}
void Put(const rocksdb::Slice& key,
const rocksdb::Slice& value) override {
dump("Put", 0, key, &value);
}
rocksdb::Status PutCF(uint32_t column_family_id, const rocksdb::Slice& key,
const rocksdb::Slice& value) override {
dump("PutCF", column_family_id, key, &value);
return rocksdb::Status::OK();
}
void SingleDelete(const rocksdb::Slice& key) override {
dump("SingleDelete", 0, key);
}
rocksdb::Status SingleDeleteCF(uint32_t column_family_id, const rocksdb::Slice& key) override {
dump("SingleDeleteCF", column_family_id, key);
return rocksdb::Status::OK();
}
void Delete(const rocksdb::Slice& key) override {
dump("Delete", 0, key);
}
rocksdb::Status DeleteCF(uint32_t column_family_id, const rocksdb::Slice& key) override {
dump("DeleteCF", column_family_id, key);
return rocksdb::Status::OK();
}
void Merge(const rocksdb::Slice& key,
const rocksdb::Slice& value) override {
dump("Merge", 0, key, &value);
}
rocksdb::Status MergeCF(uint32_t column_family_id, const rocksdb::Slice& key,
const rocksdb::Slice& value) override {
dump("MergeCF", column_family_id, key, &value);
return rocksdb::Status::OK();
}
bool Continue() override { return num_seen < 50; }
};
int RocksDBStore::submit_common(rocksdb::WriteOptions& woptions, KeyValueDB::Transaction t)
{
// enable rocksdb breakdown
// considering performance overhead, default is disabled
if (cct->_conf->rocksdb_perf) {
rocksdb::SetPerfLevel(rocksdb::PerfLevel::kEnableTimeExceptForMutex);
rocksdb::get_perf_context()->Reset();
}
RocksDBTransactionImpl * _t =
static_cast<RocksDBTransactionImpl *>(t.get());
woptions.disableWAL = disableWAL;
lgeneric_subdout(cct, rocksdb, 30) << __func__;
RocksWBHandler bat_txc(*this);
_t->bat.Iterate(&bat_txc);
*_dout << " Rocksdb transaction: " << bat_txc.seen.str() << dendl;
rocksdb::Status s = db->Write(woptions, &_t->bat);
if (!s.ok()) {
RocksWBHandler rocks_txc(*this);
_t->bat.Iterate(&rocks_txc);
derr << __func__ << " error: " << s.ToString() << " code = " << s.code()
<< " Rocksdb transaction: " << rocks_txc.seen.str() << dendl;
}
if (cct->_conf->rocksdb_perf) {
utime_t write_memtable_time;
utime_t write_delay_time;
utime_t write_wal_time;
utime_t write_pre_and_post_process_time;
write_wal_time.set_from_double(
static_cast<double>(rocksdb::get_perf_context()->write_wal_time)/1000000000);
write_memtable_time.set_from_double(
static_cast<double>(rocksdb::get_perf_context()->write_memtable_time)/1000000000);
write_delay_time.set_from_double(
static_cast<double>(rocksdb::get_perf_context()->write_delay_time)/1000000000);
write_pre_and_post_process_time.set_from_double(
static_cast<double>(rocksdb::get_perf_context()->write_pre_and_post_process_time)/1000000000);
logger->tinc(l_rocksdb_write_memtable_time, write_memtable_time);
logger->tinc(l_rocksdb_write_delay_time, write_delay_time);
logger->tinc(l_rocksdb_write_wal_time, write_wal_time);
logger->tinc(l_rocksdb_write_pre_and_post_process_time, write_pre_and_post_process_time);
}
return s.ok() ? 0 : -1;
}
int RocksDBStore::submit_transaction(KeyValueDB::Transaction t)
{
utime_t start = ceph_clock_now();
rocksdb::WriteOptions woptions;
woptions.sync = false;
int result = submit_common(woptions, t);
utime_t lat = ceph_clock_now() - start;
logger->tinc(l_rocksdb_submit_latency, lat);
return result;
}
int RocksDBStore::submit_transaction_sync(KeyValueDB::Transaction t)
{
utime_t start = ceph_clock_now();
rocksdb::WriteOptions woptions;
// if disableWAL, sync can't set
woptions.sync = !disableWAL;
int result = submit_common(woptions, t);
utime_t lat = ceph_clock_now() - start;
logger->tinc(l_rocksdb_submit_sync_latency, lat);
return result;
}
RocksDBStore::RocksDBTransactionImpl::RocksDBTransactionImpl(RocksDBStore *_db)
{
db = _db;
}
void RocksDBStore::RocksDBTransactionImpl::put_bat(
rocksdb::WriteBatch& bat,
rocksdb::ColumnFamilyHandle *cf,
const string &key,
const bufferlist &to_set_bl)
{
// bufferlist::c_str() is non-constant, so we can't call c_str()
if (to_set_bl.is_contiguous() && to_set_bl.length() > 0) {
bat.Put(cf,
rocksdb::Slice(key),
rocksdb::Slice(to_set_bl.buffers().front().c_str(),
to_set_bl.length()));
} else {
rocksdb::Slice key_slice(key);
vector<rocksdb::Slice> value_slices(to_set_bl.get_num_buffers());
bat.Put(cf,
rocksdb::SliceParts(&key_slice, 1),
prepare_sliceparts(to_set_bl, &value_slices));
}
}
void RocksDBStore::RocksDBTransactionImpl::set(
const string &prefix,
const string &k,
const bufferlist &to_set_bl)
{
auto cf = db->get_cf_handle(prefix, k);
if (cf) {
put_bat(bat, cf, k, to_set_bl);
} else {
string key = combine_strings(prefix, k);
put_bat(bat, db->default_cf, key, to_set_bl);
}
}
void RocksDBStore::RocksDBTransactionImpl::set(
const string &prefix,
const char *k, size_t keylen,
const bufferlist &to_set_bl)
{
auto cf = db->get_cf_handle(prefix, k, keylen);
if (cf) {
string key(k, keylen); // fixme?
put_bat(bat, cf, key, to_set_bl);
} else {
string key;
combine_strings(prefix, k, keylen, &key);
put_bat(bat, cf, key, to_set_bl);
}
}
void RocksDBStore::RocksDBTransactionImpl::rmkey(const string &prefix,
const string &k)
{
auto cf = db->get_cf_handle(prefix, k);
if (cf) {
bat.Delete(cf, rocksdb::Slice(k));
} else {
bat.Delete(db->default_cf, combine_strings(prefix, k));
}
}
void RocksDBStore::RocksDBTransactionImpl::rmkey(const string &prefix,
const char *k,
size_t keylen)
{
auto cf = db->get_cf_handle(prefix, k, keylen);
if (cf) {
bat.Delete(cf, rocksdb::Slice(k, keylen));
} else {
string key;
combine_strings(prefix, k, keylen, &key);
bat.Delete(db->default_cf, rocksdb::Slice(key));
}
}
void RocksDBStore::RocksDBTransactionImpl::rm_single_key(const string &prefix,
const string &k)
{
auto cf = db->get_cf_handle(prefix, k);
if (cf) {
bat.SingleDelete(cf, k);
} else {
bat.SingleDelete(db->default_cf, combine_strings(prefix, k));
}
}
void RocksDBStore::RocksDBTransactionImpl::rmkeys_by_prefix(const string &prefix)
{
auto p_iter = db->cf_handles.find(prefix);
if (p_iter == db->cf_handles.end()) {
uint64_t cnt = db->get_delete_range_threshold();
bat.SetSavePoint();
auto it = db->get_iterator(prefix);
for (it->seek_to_first(); it->valid() && (--cnt) != 0; it->next()) {
bat.Delete(db->default_cf, combine_strings(prefix, it->key()));
}
if (cnt == 0) {
bat.RollbackToSavePoint();
string endprefix = prefix;
endprefix.push_back('\x01');
bat.DeleteRange(db->default_cf,
combine_strings(prefix, string()),
combine_strings(endprefix, string()));
} else {
bat.PopSavePoint();
}
} else {
ceph_assert(p_iter->second.handles.size() >= 1);
for (auto cf : p_iter->second.handles) {
uint64_t cnt = db->get_delete_range_threshold();
bat.SetSavePoint();
auto it = db->new_shard_iterator(cf);
for (it->seek_to_first(); it->valid() && (--cnt) != 0; it->next()) {
bat.Delete(cf, it->key());
}
if (cnt == 0) {
bat.RollbackToSavePoint();
string endprefix = "\xff\xff\xff\xff"; // FIXME: this is cheating...
bat.DeleteRange(cf, string(), endprefix);
} else {
bat.PopSavePoint();
}
}
}
}
void RocksDBStore::RocksDBTransactionImpl::rm_range_keys(const string &prefix,
const string &start,
const string &end)
{
ldout(db->cct, 10) << __func__
<< " enter prefix=" << prefix
<< " start=" << pretty_binary_string(start)
<< " end=" << pretty_binary_string(end) << dendl;
auto p_iter = db->cf_handles.find(prefix);
uint64_t cnt = db->get_delete_range_threshold();
if (p_iter == db->cf_handles.end()) {
uint64_t cnt0 = cnt;
bat.SetSavePoint();
auto it = db->get_iterator(prefix);
for (it->lower_bound(start);
it->valid() && db->comparator->Compare(it->key(), end) < 0 && (--cnt) != 0;
it->next()) {
bat.Delete(db->default_cf, combine_strings(prefix, it->key()));
}
ldout(db->cct, 15) << __func__
<< " count = " << cnt0 - cnt
<< dendl;
if (cnt == 0) {
ldout(db->cct, 10) << __func__ << " p_iter == end(), resorting to DeleteRange"
<< dendl;
bat.RollbackToSavePoint();
bat.DeleteRange(db->default_cf,
rocksdb::Slice(combine_strings(prefix, start)),
rocksdb::Slice(combine_strings(prefix, end)));
} else {
bat.PopSavePoint();
}
} else if (cnt == 0) {
ceph_assert(p_iter->second.handles.size() >= 1);
for (auto cf : p_iter->second.handles) {
ldout(db->cct, 10) << __func__ << " p_iter != end(), resorting to DeleteRange"
<< dendl;
bat.DeleteRange(cf, rocksdb::Slice(start), rocksdb::Slice(end));
}
} else {
auto bounds = KeyValueDB::IteratorBounds();
bounds.lower_bound = start;
bounds.upper_bound = end;
ceph_assert(p_iter->second.handles.size() >= 1);
for (auto cf : p_iter->second.handles) {
cnt = db->get_delete_range_threshold();
uint64_t cnt0 = cnt;
bat.SetSavePoint();
auto it = db->new_shard_iterator(cf, prefix, bounds);
for (it->lower_bound(start);
it->valid() && (--cnt) != 0;
it->next()) {
bat.Delete(cf, it->key());
}
ldout(db->cct, 10) << __func__
<< " count = " << cnt0 - cnt
<< dendl;
if (cnt == 0) {
ldout(db->cct, 10) << __func__ << " p_iter != end(), resorting to DeleteRange"
<< dendl;
bat.RollbackToSavePoint();
bat.DeleteRange(cf, rocksdb::Slice(start), rocksdb::Slice(end));
} else {
bat.PopSavePoint();
}
}
}
ldout(db->cct, 10) << __func__ << " end" << dendl;
}
void RocksDBStore::RocksDBTransactionImpl::merge(
const string &prefix,
const string &k,
const bufferlist &to_set_bl)
{
auto cf = db->get_cf_handle(prefix, k);
if (cf) {
// bufferlist::c_str() is non-constant, so we can't call c_str()
if (to_set_bl.is_contiguous() && to_set_bl.length() > 0) {
bat.Merge(
cf,
rocksdb::Slice(k),
rocksdb::Slice(to_set_bl.buffers().front().c_str(), to_set_bl.length()));
} else {
// make a copy
rocksdb::Slice key_slice(k);
vector<rocksdb::Slice> value_slices(to_set_bl.get_num_buffers());
bat.Merge(cf, rocksdb::SliceParts(&key_slice, 1),
prepare_sliceparts(to_set_bl, &value_slices));
}
} else {
string key = combine_strings(prefix, k);
// bufferlist::c_str() is non-constant, so we can't call c_str()
if (to_set_bl.is_contiguous() && to_set_bl.length() > 0) {
bat.Merge(
db->default_cf,
rocksdb::Slice(key),
rocksdb::Slice(to_set_bl.buffers().front().c_str(), to_set_bl.length()));
} else {
// make a copy
rocksdb::Slice key_slice(key);
vector<rocksdb::Slice> value_slices(to_set_bl.get_num_buffers());
bat.Merge(
db->default_cf,
rocksdb::SliceParts(&key_slice, 1),
prepare_sliceparts(to_set_bl, &value_slices));
}
}
}
int RocksDBStore::get(
const string &prefix,
const std::set<string> &keys,
std::map<string, bufferlist> *out)
{
rocksdb::PinnableSlice value;
utime_t start = ceph_clock_now();
if (cf_handles.count(prefix) > 0) {
for (auto& key : keys) {
auto cf_handle = get_cf_handle(prefix, key);
auto status = db->Get(rocksdb::ReadOptions(),
cf_handle,
rocksdb::Slice(key),
&value);
if (status.ok()) {
(*out)[key].append(value.data(), value.size());
} else if (status.IsIOError()) {
ceph_abort_msg(status.getState());
}
value.Reset();
}
} else {
for (auto& key : keys) {
string k = combine_strings(prefix, key);
auto status = db->Get(rocksdb::ReadOptions(),
default_cf,
rocksdb::Slice(k),
&value);
if (status.ok()) {
(*out)[key].append(value.data(), value.size());
} else if (status.IsIOError()) {
ceph_abort_msg(status.getState());
}
value.Reset();
}
}
utime_t lat = ceph_clock_now() - start;
logger->tinc(l_rocksdb_get_latency, lat);
return 0;
}
int RocksDBStore::get(
const string &prefix,
const string &key,
bufferlist *out)
{
ceph_assert(out && (out->length() == 0));
utime_t start = ceph_clock_now();
int r = 0;
rocksdb::PinnableSlice value;
rocksdb::Status s;
auto cf = get_cf_handle(prefix, key);
if (cf) {
s = db->Get(rocksdb::ReadOptions(),
cf,
rocksdb::Slice(key),
&value);
} else {
string k = combine_strings(prefix, key);
s = db->Get(rocksdb::ReadOptions(),
default_cf,
rocksdb::Slice(k),
&value);
}
if (s.ok()) {
out->append(value.data(), value.size());
} else if (s.IsNotFound()) {
r = -ENOENT;
} else {
ceph_abort_msg(s.getState());
}
utime_t lat = ceph_clock_now() - start;
logger->tinc(l_rocksdb_get_latency, lat);
return r;
}
int RocksDBStore::get(
const string& prefix,
const char *key,
size_t keylen,
bufferlist *out)
{
ceph_assert(out && (out->length() == 0));
utime_t start = ceph_clock_now();
int r = 0;
rocksdb::PinnableSlice value;
rocksdb::Status s;
auto cf = get_cf_handle(prefix, key, keylen);
if (cf) {
s = db->Get(rocksdb::ReadOptions(),
cf,
rocksdb::Slice(key, keylen),
&value);
} else {
string k;
combine_strings(prefix, key, keylen, &k);
s = db->Get(rocksdb::ReadOptions(),
default_cf,
rocksdb::Slice(k),
&value);
}
if (s.ok()) {
out->append(value.data(), value.size());
} else if (s.IsNotFound()) {
r = -ENOENT;
} else {
ceph_abort_msg(s.getState());
}
utime_t lat = ceph_clock_now() - start;
logger->tinc(l_rocksdb_get_latency, lat);
return r;
}
int RocksDBStore::split_key(rocksdb::Slice in, string *prefix, string *key)
{
size_t prefix_len = 0;
// Find separator inside Slice
char* separator = (char*) memchr(in.data(), 0, in.size());
if (separator == NULL)
return -EINVAL;
prefix_len = size_t(separator - in.data());
if (prefix_len >= in.size())
return -EINVAL;
// Fetch prefix and/or key directly from Slice
if (prefix)
*prefix = string(in.data(), prefix_len);
if (key)
*key = string(separator+1, in.size()-prefix_len-1);
return 0;
}
void RocksDBStore::compact()
{
logger->inc(l_rocksdb_compact);
rocksdb::CompactRangeOptions options;
db->CompactRange(options, default_cf, nullptr, nullptr);
for (auto cf : cf_handles) {
for (auto shard_cf : cf.second.handles) {
db->CompactRange(
options,
shard_cf,
nullptr, nullptr);
}
}
}
void RocksDBStore::compact_thread_entry()
{
std::unique_lock l{compact_queue_lock};
dout(10) << __func__ << " enter" << dendl;
while (!compact_queue_stop) {
if (!compact_queue.empty()) {
auto range = compact_queue.front();
compact_queue.pop_front();
logger->set(l_rocksdb_compact_queue_len, compact_queue.size());
l.unlock();
logger->inc(l_rocksdb_compact_range);
if (range.first.empty() && range.second.empty()) {
compact();
} else {
compact_range(range.first, range.second);
}
l.lock();
continue;
}
dout(10) << __func__ << " waiting" << dendl;
compact_queue_cond.wait(l);
}
dout(10) << __func__ << " exit" << dendl;
}
void RocksDBStore::compact_range_async(const string& start, const string& end)
{
std::lock_guard l(compact_queue_lock);
// try to merge adjacent ranges. this is O(n), but the queue should
// be short. note that we do not cover all overlap cases and merge
// opportunities here, but we capture the ones we currently need.
list< pair<string,string> >::iterator p = compact_queue.begin();
while (p != compact_queue.end()) {
if (p->first == start && p->second == end) {
// dup; no-op
return;
}
if (start <= p->first && p->first <= end) {
// new region crosses start of existing range
// select right bound that is bigger
compact_queue.push_back(make_pair(start, end > p->second ? end : p->second));
compact_queue.erase(p);
logger->inc(l_rocksdb_compact_queue_merge);
break;
}
if (start <= p->second && p->second <= end) {
// new region crosses end of existing range
//p->first < p->second and p->second <= end, so p->first <= end.
//But we break if previous condition, so start > p->first.
compact_queue.push_back(make_pair(p->first, end));
compact_queue.erase(p);
logger->inc(l_rocksdb_compact_queue_merge);
break;
}
++p;
}
if (p == compact_queue.end()) {
// no merge, new entry.
compact_queue.push_back(make_pair(start, end));
logger->set(l_rocksdb_compact_queue_len, compact_queue.size());
}
compact_queue_cond.notify_all();
if (!compact_thread.is_started()) {
compact_thread.create("rstore_compact");
}
}
bool RocksDBStore::check_omap_dir(string &omap_dir)
{
rocksdb::Options options;
options.create_if_missing = true;
rocksdb::DB *db;
rocksdb::Status status = rocksdb::DB::Open(options, omap_dir, &db);
delete db;
db = nullptr;
return status.ok();
}
void RocksDBStore::compact_range(const string& start, const string& end)
{
rocksdb::CompactRangeOptions options;
rocksdb::Slice cstart(start);
rocksdb::Slice cend(end);
string prefix_start, key_start;
string prefix_end, key_end;
string key_highest = "\xff\xff\xff\xff"; //cheating
string key_lowest = "";
auto compact_range = [&] (const decltype(cf_handles)::iterator column_it,
const std::string& start,
const std::string& end) {
rocksdb::Slice cstart(start);
rocksdb::Slice cend(end);
for (const auto& shard_it : column_it->second.handles) {
db->CompactRange(options, shard_it, &cstart, &cend);
}
};
db->CompactRange(options, default_cf, &cstart, &cend);
split_key(cstart, &prefix_start, &key_start);
split_key(cend, &prefix_end, &key_end);
if (prefix_start == prefix_end) {
const auto& column = cf_handles.find(prefix_start);
if (column != cf_handles.end()) {
compact_range(column, key_start, key_end);
}
} else {
auto column = cf_handles.find(prefix_start);
if (column != cf_handles.end()) {
compact_range(column, key_start, key_highest);
++column;
}
const auto& column_end = cf_handles.find(prefix_end);
while (column != column_end) {
compact_range(column, key_lowest, key_highest);
column++;
}
if (column != cf_handles.end()) {
compact_range(column, key_lowest, key_end);
}
}
}
RocksDBStore::RocksDBWholeSpaceIteratorImpl::~RocksDBWholeSpaceIteratorImpl()
{
delete dbiter;
}
int RocksDBStore::RocksDBWholeSpaceIteratorImpl::seek_to_first()
{
dbiter->SeekToFirst();
ceph_assert(!dbiter->status().IsIOError());
return dbiter->status().ok() ? 0 : -1;
}
int RocksDBStore::RocksDBWholeSpaceIteratorImpl::seek_to_first(const string &prefix)
{
rocksdb::Slice slice_prefix(prefix);
dbiter->Seek(slice_prefix);
ceph_assert(!dbiter->status().IsIOError());
return dbiter->status().ok() ? 0 : -1;
}
int RocksDBStore::RocksDBWholeSpaceIteratorImpl::seek_to_last()
{
dbiter->SeekToLast();
ceph_assert(!dbiter->status().IsIOError());
return dbiter->status().ok() ? 0 : -1;
}
int RocksDBStore::RocksDBWholeSpaceIteratorImpl::seek_to_last(const string &prefix)
{
string limit = past_prefix(prefix);
rocksdb::Slice slice_limit(limit);
dbiter->Seek(slice_limit);
if (!dbiter->Valid()) {
dbiter->SeekToLast();
} else {
dbiter->Prev();
}
return dbiter->status().ok() ? 0 : -1;
}
int RocksDBStore::RocksDBWholeSpaceIteratorImpl::upper_bound(const string &prefix, const string &after)
{
lower_bound(prefix, after);
if (valid()) {
pair<string,string> key = raw_key();
if (key.first == prefix && key.second == after)
next();
}
return dbiter->status().ok() ? 0 : -1;
}
int RocksDBStore::RocksDBWholeSpaceIteratorImpl::lower_bound(const string &prefix, const string &to)
{
string bound = combine_strings(prefix, to);
rocksdb::Slice slice_bound(bound);
dbiter->Seek(slice_bound);
return dbiter->status().ok() ? 0 : -1;
}
bool RocksDBStore::RocksDBWholeSpaceIteratorImpl::valid()
{
return dbiter->Valid();
}
int RocksDBStore::RocksDBWholeSpaceIteratorImpl::next()
{
if (valid()) {
dbiter->Next();
}
ceph_assert(!dbiter->status().IsIOError());
return dbiter->status().ok() ? 0 : -1;
}
int RocksDBStore::RocksDBWholeSpaceIteratorImpl::prev()
{
if (valid()) {
dbiter->Prev();
}
ceph_assert(!dbiter->status().IsIOError());
return dbiter->status().ok() ? 0 : -1;
}
string RocksDBStore::RocksDBWholeSpaceIteratorImpl::key()
{
string out_key;
split_key(dbiter->key(), 0, &out_key);
return out_key;
}
pair<string,string> RocksDBStore::RocksDBWholeSpaceIteratorImpl::raw_key()
{
string prefix, key;
split_key(dbiter->key(), &prefix, &key);
return make_pair(prefix, key);
}
bool RocksDBStore::RocksDBWholeSpaceIteratorImpl::raw_key_is_prefixed(const string &prefix) {
// Look for "prefix\0" right in rocksb::Slice
rocksdb::Slice key = dbiter->key();
if ((key.size() > prefix.length()) && (key[prefix.length()] == '\0')) {
return memcmp(key.data(), prefix.c_str(), prefix.length()) == 0;
} else {
return false;
}
}
bufferlist RocksDBStore::RocksDBWholeSpaceIteratorImpl::value()
{
return to_bufferlist(dbiter->value());
}
size_t RocksDBStore::RocksDBWholeSpaceIteratorImpl::key_size()
{
return dbiter->key().size();
}
size_t RocksDBStore::RocksDBWholeSpaceIteratorImpl::value_size()
{
return dbiter->value().size();
}
bufferptr RocksDBStore::RocksDBWholeSpaceIteratorImpl::value_as_ptr()
{
rocksdb::Slice val = dbiter->value();
return bufferptr(val.data(), val.size());
}
int RocksDBStore::RocksDBWholeSpaceIteratorImpl::status()
{
return dbiter->status().ok() ? 0 : -1;
}
string RocksDBStore::past_prefix(const string &prefix)
{
string limit = prefix;
limit.push_back(1);
return limit;
}
class CFIteratorImpl : public KeyValueDB::IteratorImpl {
protected:
string prefix;
rocksdb::Iterator *dbiter;
const KeyValueDB::IteratorBounds bounds;
const rocksdb::Slice iterate_lower_bound;
const rocksdb::Slice iterate_upper_bound;
public:
explicit CFIteratorImpl(const RocksDBStore* db,
const std::string& p,
rocksdb::ColumnFamilyHandle* cf,
KeyValueDB::IteratorBounds bounds_)
: prefix(p), bounds(std::move(bounds_)),
iterate_lower_bound(make_slice(bounds.lower_bound)),
iterate_upper_bound(make_slice(bounds.upper_bound))
{
auto options = rocksdb::ReadOptions();
if (db->cct->_conf->osd_rocksdb_iterator_bounds_enabled) {
if (bounds.lower_bound) {
options.iterate_lower_bound = &iterate_lower_bound;
}
if (bounds.upper_bound) {
options.iterate_upper_bound = &iterate_upper_bound;
}
}
dbiter = db->db->NewIterator(options, cf);
}
~CFIteratorImpl() {
delete dbiter;
}
int seek_to_first() override {
dbiter->SeekToFirst();
return dbiter->status().ok() ? 0 : -1;
}
int seek_to_last() override {
dbiter->SeekToLast();
return dbiter->status().ok() ? 0 : -1;
}
int upper_bound(const string &after) override {
lower_bound(after);
if (valid() && (key() == after)) {
next();
}
return dbiter->status().ok() ? 0 : -1;
}
int lower_bound(const string &to) override {
rocksdb::Slice slice_bound(to);
dbiter->Seek(slice_bound);
return dbiter->status().ok() ? 0 : -1;
}
int next() override {
if (valid()) {
dbiter->Next();
}
return dbiter->status().ok() ? 0 : -1;
}
int prev() override {
if (valid()) {
dbiter->Prev();
}
return dbiter->status().ok() ? 0 : -1;
}
bool valid() override {
return dbiter->Valid();
}
string key() override {
return dbiter->key().ToString();
}
std::pair<std::string, std::string> raw_key() override {
return make_pair(prefix, key());
}
bufferlist value() override {
return to_bufferlist(dbiter->value());
}
bufferptr value_as_ptr() override {
rocksdb::Slice val = dbiter->value();
return bufferptr(val.data(), val.size());
}
int status() override {
return dbiter->status().ok() ? 0 : -1;
}
};
//merge column iterators and rest iterator
class WholeMergeIteratorImpl : public KeyValueDB::WholeSpaceIteratorImpl {
private:
RocksDBStore* db;
KeyValueDB::WholeSpaceIterator main;
std::map<std::string, KeyValueDB::Iterator> shards;
std::map<std::string, KeyValueDB::Iterator>::iterator current_shard;
enum {on_main, on_shard} smaller;
public:
WholeMergeIteratorImpl(RocksDBStore* db)
: db(db)
, main(db->get_default_cf_iterator())
{
for (auto& e : db->cf_handles) {
shards.emplace(e.first, db->get_iterator(e.first));
}
}
// returns true if value in main is smaller then in shards
// invalid is larger then actual value
bool is_main_smaller() {
if (main->valid()) {
if (current_shard != shards.end()) {
auto main_rk = main->raw_key();
ceph_assert(current_shard->second->valid());
auto shards_rk = current_shard->second->raw_key();
if (main_rk.first < shards_rk.first)
return true;
if (main_rk.first > shards_rk.first)
return false;
return main_rk.second < shards_rk.second;
} else {
return true;
}
} else {
if (current_shard != shards.end()) {
return false;
} else {
//this means that neither is valid
//we select main to be smaller, so valid() will signal properly
return true;
}
}
}
int seek_to_first() override {
int r0 = main->seek_to_first();
int r1 = 0;
// find first shard that has some data
current_shard = shards.begin();
while (current_shard != shards.end()) {
r1 = current_shard->second->seek_to_first();
if (r1 != 0 || current_shard->second->valid()) {
//this is the first shard that will yield some keys
break;
}
++current_shard;
}
smaller = is_main_smaller() ? on_main : on_shard;
return r0 == 0 && r1 == 0 ? 0 : -1;
}
int seek_to_first(const std::string &prefix) override {
int r0 = main->seek_to_first(prefix);
int r1 = 0;
// find first shard that has some data
current_shard = shards.lower_bound(prefix);
while (current_shard != shards.end()) {
r1 = current_shard->second->seek_to_first();
if (r1 != 0 || current_shard->second->valid()) {
//this is the first shard that will yield some keys
break;
}
++current_shard;
}
smaller = is_main_smaller() ? on_main : on_shard;
return r0 == 0 && r1 == 0 ? 0 : -1;
};
int seek_to_last() override {
int r0 = main->seek_to_last();
int r1 = 0;
r1 = shards_seek_to_last();
//if we have 2 candidates, we need to select
if (main->valid()) {
if (shards_valid()) {
if (is_main_smaller()) {
smaller = on_shard;
main->next();
} else {
smaller = on_main;
shards_next();
}
} else {
smaller = on_main;
}
} else {
if (shards_valid()) {
smaller = on_shard;
} else {
smaller = on_main;
}
}
return r0 == 0 && r1 == 0 ? 0 : -1;
}
int seek_to_last(const std::string &prefix) override {
int r0 = main->seek_to_last(prefix);
int r1 = 0;
// find last shard that has some data
bool found = false;
current_shard = shards.lower_bound(prefix);
while (current_shard != shards.begin()) {
r1 = current_shard->second->seek_to_last();
if (r1 != 0)
break;
if (current_shard->second->valid()) {
found = true;
break;
}
}
//if we have 2 candidates, we need to select
if (main->valid() && found) {
if (is_main_smaller()) {
main->next();
} else {
shards_next();
}
}
if (!found) {
//set shards state that properly represents eof
current_shard = shards.end();
}
smaller = is_main_smaller() ? on_main : on_shard;
return r0 == 0 && r1 == 0 ? 0 : -1;
}
int upper_bound(const std::string &prefix, const std::string &after) override {
int r0 = main->upper_bound(prefix, after);
int r1 = 0;
if (r0 != 0)
return r0;
current_shard = shards.lower_bound(prefix);
if (current_shard != shards.end()) {
bool located = false;
if (current_shard->first == prefix) {
r1 = current_shard->second->upper_bound(after);
if (r1 != 0)
return r1;
if (current_shard->second->valid()) {
located = true;
}
}
if (!located) {
while (current_shard != shards.end()) {
r1 = current_shard->second->seek_to_first();
if (r1 != 0)
return r1;
if (current_shard->second->valid())
break;
++current_shard;
}
}
}
smaller = is_main_smaller() ? on_main : on_shard;
return 0;
}
int lower_bound(const std::string &prefix, const std::string &to) override {
int r0 = main->lower_bound(prefix, to);
int r1 = 0;
if (r0 != 0)
return r0;
current_shard = shards.lower_bound(prefix);
if (current_shard != shards.end()) {
bool located = false;
if (current_shard->first == prefix) {
r1 = current_shard->second->lower_bound(to);
if (r1 != 0)
return r1;
if (current_shard->second->valid()) {
located = true;
}
}
if (!located) {
while (current_shard != shards.end()) {
r1 = current_shard->second->seek_to_first();
if (r1 != 0)
return r1;
if (current_shard->second->valid())
break;
++current_shard;
}
}
}
smaller = is_main_smaller() ? on_main : on_shard;
return 0;
}
bool valid() override {
if (smaller == on_main) {
return main->valid();
} else {
if (current_shard == shards.end())
return false;
return current_shard->second->valid();
}
};
int next() override {
int r;
if (smaller == on_main) {
r = main->next();
} else {
r = shards_next();
}
if (r != 0)
return r;
smaller = is_main_smaller() ? on_main : on_shard;
return 0;
}
int prev() override {
int r;
bool main_was_valid = false;
if (main->valid()) {
main_was_valid = true;
r = main->prev();
} else {
r = main->seek_to_last();
}
if (r != 0)
return r;
bool shards_was_valid = false;
if (shards_valid()) {
shards_was_valid = true;
r = shards_prev();
} else {
r = shards_seek_to_last();
}
if (r != 0)
return r;
if (!main->valid() && !shards_valid()) {
//end, no previous. set marker so valid() can work
smaller = on_main;
return 0;
}
//if 1 is valid, select it
//if 2 are valid select larger and advance the other
if (main->valid()) {
if (shards_valid()) {
if (is_main_smaller()) {
smaller = on_shard;
if (main_was_valid) {
if (main->valid()) {
r = main->next();
} else {
r = main->seek_to_first();
}
} else {
//if we have resurrected main, kill it
if (main->valid()) {
main->next();
}
}
} else {
smaller = on_main;
if (shards_was_valid) {
if (shards_valid()) {
r = shards_next();
} else {
r = shards_seek_to_first();
}
} else {
//if we have resurected shards, kill it
if (shards_valid()) {
shards_next();
}
}
}
} else {
smaller = on_main;
r = shards_seek_to_first();
}
} else {
smaller = on_shard;
r = main->seek_to_first();
}
return r;
}
std::string key() override
{
if (smaller == on_main) {
return main->key();
} else {
return current_shard->second->key();
}
}
std::pair<std::string,std::string> raw_key() override
{
if (smaller == on_main) {
return main->raw_key();
} else {
return { current_shard->first, current_shard->second->key() };
}
}
bool raw_key_is_prefixed(const std::string &prefix) override
{
if (smaller == on_main) {
return main->raw_key_is_prefixed(prefix);
} else {
return current_shard->first == prefix;
}
}
ceph::buffer::list value() override
{
if (smaller == on_main) {
return main->value();
} else {
return current_shard->second->value();
}
}
int status() override
{
//because we already had to inspect key, it must be ok
return 0;
}
size_t key_size() override
{
if (smaller == on_main) {
return main->key_size();
} else {
return current_shard->second->key().size();
}
}
size_t value_size() override
{
if (smaller == on_main) {
return main->value_size();
} else {
return current_shard->second->value().length();
}
}
int shards_valid() {
if (current_shard == shards.end())
return false;
return current_shard->second->valid();
}
int shards_next() {
if (current_shard == shards.end()) {
//illegal to next() on !valid()
return -1;
}
int r = 0;
r = current_shard->second->next();
if (r != 0)
return r;
if (current_shard->second->valid())
return 0;
//current shard exhaused, search for key
++current_shard;
while (current_shard != shards.end()) {
r = current_shard->second->seek_to_first();
if (r != 0)
return r;
if (current_shard->second->valid())
break;
++current_shard;
}
//either we found key or not, but it is success
return 0;
}
int shards_prev() {
if (current_shard == shards.end()) {
//illegal to prev() on !valid()
return -1;
}
int r = current_shard->second->prev();
while (r == 0) {
if (current_shard->second->valid()) {
break;
}
if (current_shard == shards.begin()) {
//we have reached pre-first element
//this makes it !valid(), but guarantees next() moves to first element
break;
}
--current_shard;
r = current_shard->second->seek_to_last();
}
return r;
}
int shards_seek_to_last() {
int r = 0;
current_shard = shards.end();
if (current_shard == shards.begin()) {
//no shards at all
return 0;
}
while (current_shard != shards.begin()) {
--current_shard;
r = current_shard->second->seek_to_last();
if (r != 0)
return r;
if (current_shard->second->valid()) {
return 0;
}
}
//no keys at all
current_shard = shards.end();
return r;
}
int shards_seek_to_first() {
int r = 0;
current_shard = shards.begin();
while (current_shard != shards.end()) {
r = current_shard->second->seek_to_first();
if (r != 0)
break;
if (current_shard->second->valid()) {
//this is the first shard that will yield some keys
break;
}
++current_shard;
}
return r;
}
};
class ShardMergeIteratorImpl : public KeyValueDB::IteratorImpl {
private:
struct KeyLess {
private:
const rocksdb::Comparator* comparator;
public:
KeyLess(const rocksdb::Comparator* comparator) : comparator(comparator) { };
bool operator()(rocksdb::Iterator* a, rocksdb::Iterator* b) const
{
if (a->Valid()) {
if (b->Valid()) {
return comparator->Compare(a->key(), b->key()) < 0;
} else {
return true;
}
} else {
if (b->Valid()) {
return false;
} else {
return false;
}
}
}
};
const RocksDBStore* db;
KeyLess keyless;
string prefix;
const KeyValueDB::IteratorBounds bounds;
const rocksdb::Slice iterate_lower_bound;
const rocksdb::Slice iterate_upper_bound;
std::vector<rocksdb::Iterator*> iters;
public:
explicit ShardMergeIteratorImpl(const RocksDBStore* db,
const std::string& prefix,
const std::vector<rocksdb::ColumnFamilyHandle*>& shards,
KeyValueDB::IteratorBounds bounds_)
: db(db), keyless(db->comparator), prefix(prefix), bounds(std::move(bounds_)),
iterate_lower_bound(make_slice(bounds.lower_bound)),
iterate_upper_bound(make_slice(bounds.upper_bound))
{
iters.reserve(shards.size());
auto options = rocksdb::ReadOptions();
if (db->cct->_conf->osd_rocksdb_iterator_bounds_enabled) {
if (bounds.lower_bound) {
options.iterate_lower_bound = &iterate_lower_bound;
}
if (bounds.upper_bound) {
options.iterate_upper_bound = &iterate_upper_bound;
}
}
for (auto& s : shards) {
iters.push_back(db->db->NewIterator(options, s));
}
}
~ShardMergeIteratorImpl() {
for (auto& it : iters) {
delete it;
}
}
int seek_to_first() override {
for (auto& it : iters) {
it->SeekToFirst();
if (!it->status().ok()) {
return -1;
}
}
//all iterators seeked, sort
std::sort(iters.begin(), iters.end(), keyless);
return 0;
}
int seek_to_last() override {
for (auto& it : iters) {
it->SeekToLast();
if (!it->status().ok()) {
return -1;
}
}
for (size_t i = 1; i < iters.size(); i++) {
if (iters[0]->Valid()) {
if (iters[i]->Valid()) {
if (keyless(iters[0], iters[i])) {
std::swap(iters[0], iters[i]);
}
} else {
//iters[i] empty
}
} else {
if (iters[i]->Valid()) {
std::swap(iters[0], iters[i]);
}
}
//it might happen that cf was empty
if (iters[i]->Valid()) {
iters[i]->Next();
}
}
//no need to sort, as at most 1 iterator is valid now
return 0;
}
int upper_bound(const string &after) override {
rocksdb::Slice slice_bound(after);
for (auto& it : iters) {
it->Seek(slice_bound);
if (it->Valid() && it->key() == after) {
it->Next();
}
if (!it->status().ok()) {
return -1;
}
}
std::sort(iters.begin(), iters.end(), keyless);
return 0;
}
int lower_bound(const string &to) override {
rocksdb::Slice slice_bound(to);
for (auto& it : iters) {
it->Seek(slice_bound);
if (!it->status().ok()) {
return -1;
}
}
std::sort(iters.begin(), iters.end(), keyless);
return 0;
}
int next() override {
int r = -1;
if (iters[0]->Valid()) {
iters[0]->Next();
if (iters[0]->status().ok()) {
r = 0;
//bubble up
for (size_t i = 0; i < iters.size() - 1; i++) {
if (keyless(iters[i], iters[i + 1])) {
//matches, fixed
break;
}
std::swap(iters[i], iters[i + 1]);
}
}
}
return r;
}
// iters are sorted, so
// a[0] < b[0] < c[0] < d[0]
// a[0] > a[-1], a[0] > b[-1], a[0] > c[-1], a[0] > d[-1]
// so, prev() will be one of:
// a[-1], b[-1], c[-1], d[-1]
// prev() will be the one that is *largest* of them
//
// alg:
// 1. go prev() on each iterator we can
// 2. select largest key from those iterators
// 3. go next() on all iterators except (2)
// 4. sort
int prev() override {
std::vector<rocksdb::Iterator*> prev_done;
//1
for (auto it: iters) {
if (it->Valid()) {
it->Prev();
if (it->Valid()) {
prev_done.push_back(it);
} else {
it->SeekToFirst();
}
} else {
it->SeekToLast();
if (it->Valid()) {
prev_done.push_back(it);
}
}
}
if (prev_done.size() == 0) {
/* there is no previous element */
if (iters[0]->Valid()) {
iters[0]->Prev();
ceph_assert(!iters[0]->Valid());
}
return 0;
}
//2,3
rocksdb::Iterator* highest = prev_done[0];
for (size_t i = 1; i < prev_done.size(); i++) {
if (keyless(highest, prev_done[i])) {
highest->Next();
highest = prev_done[i];
} else {
prev_done[i]->Next();
}
}
//4
//insert highest in the beginning, and shift values until we pick highest
//untouched rest is sorted - we just prev()/next() them
rocksdb::Iterator* hold = highest;
for (size_t i = 0; i < iters.size(); i++) {
std::swap(hold, iters[i]);
if (hold == highest) break;
}
ceph_assert(hold == highest);
return 0;
}
bool valid() override {
return iters[0]->Valid();
}
string key() override {
return iters[0]->key().ToString();
}
std::pair<std::string, std::string> raw_key() override {
return make_pair(prefix, key());
}
bufferlist value() override {
return to_bufferlist(iters[0]->value());
}
bufferptr value_as_ptr() override {
rocksdb::Slice val = iters[0]->value();
return bufferptr(val.data(), val.size());
}
int status() override {
return iters[0]->status().ok() ? 0 : -1;
}
};
KeyValueDB::Iterator RocksDBStore::get_iterator(const std::string& prefix, IteratorOpts opts, IteratorBounds bounds)
{
auto cf_it = cf_handles.find(prefix);
if (cf_it != cf_handles.end()) {
rocksdb::ColumnFamilyHandle* cf = nullptr;
if (cf_it->second.handles.size() == 1) {
cf = cf_it->second.handles[0];
} else if (cct->_conf->osd_rocksdb_iterator_bounds_enabled) {
cf = check_cf_handle_bounds(cf_it, bounds);
}
if (cf) {
return std::make_shared<CFIteratorImpl>(
this,
prefix,
cf,
std::move(bounds));
} else {
return std::make_shared<ShardMergeIteratorImpl>(
this,
prefix,
cf_it->second.handles,
std::move(bounds));
}
} else {
// use wholespace engine if no cfs are configured
// or use default cf otherwise as there is no
// matching cf for the specified prefix.
auto w_it = cf_handles.size() == 0 || prefix.empty() ?
get_wholespace_iterator(opts) :
get_default_cf_iterator();
return KeyValueDB::make_iterator(prefix, w_it);
}
}
RocksDBStore::WholeSpaceIterator RocksDBStore::new_shard_iterator(rocksdb::ColumnFamilyHandle* cf)
{
return std::make_shared<RocksDBWholeSpaceIteratorImpl>(
this,
cf,
0);
}
KeyValueDB::Iterator RocksDBStore::new_shard_iterator(rocksdb::ColumnFamilyHandle* cf,
const std::string& prefix,
IteratorBounds bounds)
{
return std::make_shared<CFIteratorImpl>(
this,
prefix,
cf,
std::move(bounds));
}
RocksDBStore::WholeSpaceIterator RocksDBStore::get_wholespace_iterator(IteratorOpts opts)
{
if (cf_handles.size() == 0) {
return std::make_shared<RocksDBWholeSpaceIteratorImpl>(
this, default_cf, opts);
} else {
return std::make_shared<WholeMergeIteratorImpl>(this);
}
}
RocksDBStore::WholeSpaceIterator RocksDBStore::get_default_cf_iterator()
{
return std::make_shared<RocksDBWholeSpaceIteratorImpl>(this, default_cf, 0);
}
int RocksDBStore::prepare_for_reshard(const std::string& new_sharding,
RocksDBStore::columns_t& to_process_columns)
{
//0. lock db from opening
//1. list existing columns
//2. apply merge operator to (main + columns) opts
//3. prepare std::vector<rocksdb::ColumnFamilyDescriptor> existing_cfs
//4. open db, acquire existing column handles
//5. calculate missing columns
//6. create missing columns
//7. construct cf_handles according to new sharding
//8. check is all cf_handles are filled
bool b;
std::vector<ColumnFamily> new_sharding_def;
char const* error_position;
std::string error_msg;
b = parse_sharding_def(new_sharding, new_sharding_def, &error_position, &error_msg);
if (!b) {
dout(1) << __func__ << " bad sharding: " << dendl;
dout(1) << __func__ << new_sharding << dendl;
dout(1) << __func__ << std::string(error_position - &new_sharding[0], ' ') << "^" << error_msg << dendl;
return -EINVAL;
}
//0. lock db from opening
std::string stored_sharding_text;
rocksdb::ReadFileToString(env,
sharding_def_file,
&stored_sharding_text);
if (stored_sharding_text.find(resharding_column_lock) == string::npos) {
rocksdb::Status status;
if (stored_sharding_text.size() != 0)
stored_sharding_text += " ";
stored_sharding_text += resharding_column_lock;
env->CreateDir(sharding_def_dir);
status = rocksdb::WriteStringToFile(env, stored_sharding_text,
sharding_def_file, true);
if (!status.ok()) {
derr << __func__ << " cannot write to " << sharding_def_file << dendl;
return -EIO;
}
}
//1. list existing columns
rocksdb::Status status;
std::vector<std::string> existing_columns;
rocksdb::Options opt;
int r = load_rocksdb_options(false, opt);
if (r) {
dout(1) << __func__ << " load rocksdb options failed" << dendl;
return r;
}
status = rocksdb::DB::ListColumnFamilies(rocksdb::DBOptions(opt), path, &existing_columns);
if (!status.ok()) {
derr << "Unable to list column families: " << status.ToString() << dendl;
return -EINVAL;
}
dout(5) << "existing columns = " << existing_columns << dendl;
//2. apply merge operator to (main + columns) opts
//3. prepare std::vector<rocksdb::ColumnFamilyDescriptor> cfs_to_open
std::vector<rocksdb::ColumnFamilyDescriptor> cfs_to_open;
for (const auto& full_name : existing_columns) {
//split col_name to <prefix>-<number>
std::string base_name;
size_t pos = full_name.find('-');
if (std::string::npos == pos)
base_name = full_name;
else
base_name = full_name.substr(0,pos);
rocksdb::ColumnFamilyOptions cf_opt(opt);
// search if we have options for this column
std::string options;
for (const auto& nsd : new_sharding_def) {
if (nsd.name == base_name) {
options = nsd.options;
break;
}
}
int r = update_column_family_options(base_name, options, &cf_opt);
if (r != 0) {
return r;
}
cfs_to_open.emplace_back(full_name, cf_opt);
}
//4. open db, acquire existing column handles
std::vector<rocksdb::ColumnFamilyHandle*> handles;
status = rocksdb::DB::Open(rocksdb::DBOptions(opt),
path, cfs_to_open, &handles, &db);
if (!status.ok()) {
derr << status.ToString() << dendl;
return -EINVAL;
}
for (size_t i = 0; i < cfs_to_open.size(); i++) {
dout(10) << "column " << cfs_to_open[i].name << " handle " << (void*)handles[i] << dendl;
}
//5. calculate missing columns
std::vector<std::string> new_sharding_columns;
std::vector<std::string> missing_columns;
sharding_def_to_columns(new_sharding_def,
new_sharding_columns);
dout(5) << "target columns = " << new_sharding_columns << dendl;
for (const auto& n : new_sharding_columns) {
bool found = false;
for (const auto& e : existing_columns) {
if (n == e) {
found = true;
break;
}
}
if (!found) {
missing_columns.push_back(n);
}
}
dout(5) << "missing columns = " << missing_columns << dendl;
//6. create missing columns
for (const auto& full_name : missing_columns) {
std::string base_name;
size_t pos = full_name.find('-');
if (std::string::npos == pos)
base_name = full_name;
else
base_name = full_name.substr(0,pos);
rocksdb::ColumnFamilyOptions cf_opt(opt);
// search if we have options for this column
std::string options;
for (const auto& nsd : new_sharding_def) {
if (nsd.name == base_name) {
options = nsd.options;
break;
}
}
int r = update_column_family_options(base_name, options, &cf_opt);
if (r != 0) {
return r;
}
rocksdb::ColumnFamilyHandle *cf;
status = db->CreateColumnFamily(cf_opt, full_name, &cf);
if (!status.ok()) {
derr << __func__ << " Failed to create rocksdb column family: "
<< full_name << dendl;
return -EINVAL;
}
dout(10) << "created column " << full_name << " handle = " << (void*)cf << dendl;
existing_columns.push_back(full_name);
handles.push_back(cf);
}
//7. construct cf_handles according to new sharding
for (size_t i = 0; i < existing_columns.size(); i++) {
std::string full_name = existing_columns[i];
rocksdb::ColumnFamilyHandle *cf = handles[i];
std::string base_name;
size_t shard_idx = 0;
size_t pos = full_name.find('-');
dout(10) << "processing column " << full_name << dendl;
if (std::string::npos == pos) {
base_name = full_name;
} else {
base_name = full_name.substr(0,pos);
shard_idx = atoi(full_name.substr(pos+1).c_str());
}
if (rocksdb::kDefaultColumnFamilyName == base_name) {
default_cf = handles[i];
must_close_default_cf = true;
std::unique_ptr<rocksdb::ColumnFamilyHandle, cf_deleter_t> ptr{
cf, [](rocksdb::ColumnFamilyHandle*) {}};
to_process_columns.emplace(full_name, std::move(ptr));
} else {
for (const auto& nsd : new_sharding_def) {
if (nsd.name == base_name) {
if (shard_idx < nsd.shard_cnt) {
add_column_family(base_name, nsd.hash_l, nsd.hash_h, shard_idx, cf);
} else {
//ignore columns with index larger then shard count
}
break;
}
}
std::unique_ptr<rocksdb::ColumnFamilyHandle, cf_deleter_t> ptr{
cf, [this](rocksdb::ColumnFamilyHandle* handle) {
db->DestroyColumnFamilyHandle(handle);
}};
to_process_columns.emplace(full_name, std::move(ptr));
}
}
//8. check if all cf_handles are filled
for (const auto& col : cf_handles) {
for (size_t i = 0; i < col.second.handles.size(); i++) {
if (col.second.handles[i] == nullptr) {
derr << "missing handle for column " << col.first << " shard " << i << dendl;
return -EIO;
}
}
}
return 0;
}
int RocksDBStore::reshard_cleanup(const RocksDBStore::columns_t& current_columns)
{
std::vector<std::string> new_sharding_columns;
for (const auto& [name, handle] : cf_handles) {
if (handle.handles.size() == 1) {
new_sharding_columns.push_back(name);
} else {
for (size_t i = 0; i < handle.handles.size(); i++) {
new_sharding_columns.push_back(name + "-" + std::to_string(i));
}
}
}
for (auto& [name, handle] : current_columns) {
auto found = std::find(new_sharding_columns.begin(),
new_sharding_columns.end(),
name) != new_sharding_columns.end();
if (found || name == rocksdb::kDefaultColumnFamilyName) {
dout(5) << "Column " << name << " is part of new sharding." << dendl;
continue;
}
dout(5) << "Column " << name << " not part of new sharding. Deleting." << dendl;
// verify that column is empty
std::unique_ptr<rocksdb::Iterator> it{
db->NewIterator(rocksdb::ReadOptions(), handle.get())};
ceph_assert(it);
it->SeekToFirst();
ceph_assert(!it->Valid());
if (rocksdb::Status status = db->DropColumnFamily(handle.get()); !status.ok()) {
derr << __func__ << " Failed to drop column: " << name << dendl;
return -EINVAL;
}
}
return 0;
}
int RocksDBStore::reshard(const std::string& new_sharding, const RocksDBStore::resharding_ctrl* ctrl_in)
{
resharding_ctrl ctrl = ctrl_in ? *ctrl_in : resharding_ctrl();
size_t bytes_in_batch = 0;
size_t keys_in_batch = 0;
size_t bytes_per_iterator = 0;
size_t keys_per_iterator = 0;
size_t keys_processed = 0;
size_t keys_moved = 0;
auto flush_batch = [&](rocksdb::WriteBatch* batch) {
dout(10) << "flushing batch, " << keys_in_batch << " keys, for "
<< bytes_in_batch << " bytes" << dendl;
rocksdb::WriteOptions woptions;
woptions.sync = true;
rocksdb::Status s = db->Write(woptions, batch);
ceph_assert(s.ok());
bytes_in_batch = 0;
keys_in_batch = 0;
batch->Clear();
};
auto process_column = [&](rocksdb::ColumnFamilyHandle* handle,
const std::string& fixed_prefix)
{
dout(5) << " column=" << (void*)handle << " prefix=" << fixed_prefix << dendl;
std::unique_ptr<rocksdb::Iterator> it{
db->NewIterator(rocksdb::ReadOptions(), handle)};
ceph_assert(it);
rocksdb::WriteBatch bat;
for (it->SeekToFirst(); it->Valid(); it->Next()) {
rocksdb::Slice raw_key = it->key();
dout(30) << "key=" << pretty_binary_string(raw_key.ToString()) << dendl;
//check if need to refresh iterator
if (bytes_per_iterator >= ctrl.bytes_per_iterator ||
keys_per_iterator >= ctrl.keys_per_iterator) {
dout(8) << "refreshing iterator" << dendl;
bytes_per_iterator = 0;
keys_per_iterator = 0;
std::string raw_key_str = raw_key.ToString();
it.reset(db->NewIterator(rocksdb::ReadOptions(), handle));
ceph_assert(it);
it->Seek(raw_key_str);
ceph_assert(it->Valid());
raw_key = it->key();
}
rocksdb::Slice value = it->value();
std::string prefix, key;
if (fixed_prefix.size() == 0) {
split_key(raw_key, &prefix, &key);
} else {
prefix = fixed_prefix;
key = raw_key.ToString();
}
keys_processed++;
if ((keys_processed % 10000) == 0) {
dout(10) << "processed " << keys_processed << " keys, moved " << keys_moved << dendl;
}
rocksdb::ColumnFamilyHandle* new_handle = get_cf_handle(prefix, key);
if (new_handle == nullptr) {
new_handle = default_cf;
}
if (handle == new_handle) {
continue;
}
std::string new_raw_key;
if (new_handle == default_cf) {
new_raw_key = combine_strings(prefix, key);
} else {
new_raw_key = key;
}
bat.Delete(handle, raw_key);
bat.Put(new_handle, new_raw_key, value);
dout(25) << "moving " << (void*)handle << "/" << pretty_binary_string(raw_key.ToString()) <<
" to " << (void*)new_handle << "/" << pretty_binary_string(new_raw_key) <<
" size " << value.size() << dendl;
keys_moved++;
bytes_in_batch += new_raw_key.size() * 2 + value.size();
keys_in_batch++;
bytes_per_iterator += new_raw_key.size() * 2 + value.size();
keys_per_iterator++;
//check if need to write batch
if (bytes_in_batch >= ctrl.bytes_per_batch ||
keys_in_batch >= ctrl.keys_per_batch) {
flush_batch(&bat);
if (ctrl.unittest_fail_after_first_batch) {
return -1000;
}
}
}
if (bat.Count() > 0) {
flush_batch(&bat);
}
return 0;
};
auto close_column_handles = make_scope_guard([this] {
cf_handles.clear();
close();
});
columns_t to_process_columns;
int r = prepare_for_reshard(new_sharding, to_process_columns);
if (r != 0) {
dout(1) << "failed to prepare db for reshard" << dendl;
return r;
}
for (auto& [name, handle] : to_process_columns) {
dout(5) << "Processing column=" << name
<< " handle=" << handle.get() << dendl;
if (name == rocksdb::kDefaultColumnFamilyName) {
ceph_assert(handle.get() == default_cf);
r = process_column(default_cf, std::string());
} else {
std::string fixed_prefix = name.substr(0, name.find('-'));
dout(10) << "Prefix: " << fixed_prefix << dendl;
r = process_column(handle.get(), fixed_prefix);
}
if (r != 0) {
derr << "Error processing column " << name << dendl;
return r;
}
if (ctrl.unittest_fail_after_processing_column) {
return -1001;
}
}
r = reshard_cleanup(to_process_columns);
if (r != 0) {
dout(5) << "failed to cleanup after reshard" << dendl;
return r;
}
if (ctrl.unittest_fail_after_successful_processing) {
return -1002;
}
env->CreateDir(sharding_def_dir);
if (auto status = rocksdb::WriteStringToFile(env, new_sharding,
sharding_def_file, true);
!status.ok()) {
derr << __func__ << " cannot write to " << sharding_def_file << dendl;
return -EIO;
}
return r;
}
bool RocksDBStore::get_sharding(std::string& sharding) {
rocksdb::Status status;
std::string stored_sharding_text;
bool result = false;
sharding.clear();
status = env->FileExists(sharding_def_file);
if (status.ok()) {
status = rocksdb::ReadFileToString(env,
sharding_def_file,
&stored_sharding_text);
if(status.ok()) {
result = true;
sharding = stored_sharding_text;
}
}
return result;
}
| 105,861 | 29.341645 | 130 | cc |
null | ceph-main/src/kv/RocksDBStore.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef ROCKS_DB_STORE_H
#define ROCKS_DB_STORE_H
#include "include/types.h"
#include "include/buffer_fwd.h"
#include "KeyValueDB.h"
#include <set>
#include <map>
#include <string>
#include <memory>
#include <boost/scoped_ptr.hpp>
#include "rocksdb/write_batch.h"
#include "rocksdb/perf_context.h"
#include "rocksdb/iostats_context.h"
#include "rocksdb/statistics.h"
#include "rocksdb/table.h"
#include "rocksdb/db.h"
#include "kv/rocksdb_cache/BinnedLRUCache.h"
#include <errno.h>
#include "common/errno.h"
#include "common/dout.h"
#include "include/ceph_assert.h"
#include "include/common_fwd.h"
#include "common/Formatter.h"
#include "common/Cond.h"
#include "common/ceph_context.h"
#include "common/PriorityCache.h"
#include "common/pretty_binary.h"
enum {
l_rocksdb_first = 34300,
l_rocksdb_get_latency,
l_rocksdb_submit_latency,
l_rocksdb_submit_sync_latency,
l_rocksdb_compact,
l_rocksdb_compact_range,
l_rocksdb_compact_queue_merge,
l_rocksdb_compact_queue_len,
l_rocksdb_write_wal_time,
l_rocksdb_write_memtable_time,
l_rocksdb_write_delay_time,
l_rocksdb_write_pre_and_post_process_time,
l_rocksdb_last,
};
namespace rocksdb{
class DB;
class Env;
class Cache;
class FilterPolicy;
class Snapshot;
class Slice;
class WriteBatch;
class Iterator;
class Logger;
class ColumnFamilyHandle;
struct Options;
struct BlockBasedTableOptions;
struct DBOptions;
struct ColumnFamilyOptions;
}
extern rocksdb::Logger *create_rocksdb_ceph_logger();
inline rocksdb::Slice make_slice(const std::optional<std::string>& bound) {
if (bound) {
return {*bound};
} else {
return {};
}
}
/**
* Uses RocksDB to implement the KeyValueDB interface
*/
class RocksDBStore : public KeyValueDB {
CephContext *cct;
PerfCounters *logger;
std::string path;
std::map<std::string,std::string> kv_options;
void *priv;
rocksdb::DB *db;
rocksdb::Env *env;
const rocksdb::Comparator* comparator;
std::shared_ptr<rocksdb::Statistics> dbstats;
rocksdb::BlockBasedTableOptions bbt_opts;
std::string options_str;
uint64_t cache_size = 0;
bool set_cache_flag = false;
friend class ShardMergeIteratorImpl;
friend class CFIteratorImpl;
friend class WholeMergeIteratorImpl;
/*
* See RocksDB's definition of a column family(CF) and how to use it.
* The interfaces of KeyValueDB is extended, when a column family is created.
* Prefix will be the name of column family to use.
*/
public:
struct ColumnFamily {
std::string name; //< name of this individual column family
size_t shard_cnt; //< count of shards
std::string options; //< configure option string for this CF
uint32_t hash_l; //< first character to take for hash calc.
uint32_t hash_h; //< last character to take for hash calc.
ColumnFamily(const std::string &name, size_t shard_cnt, const std::string &options,
uint32_t hash_l, uint32_t hash_h)
: name(name), shard_cnt(shard_cnt), options(options), hash_l(hash_l), hash_h(hash_h) {}
};
private:
friend std::ostream& operator<<(std::ostream& out, const ColumnFamily& cf);
bool must_close_default_cf = false;
rocksdb::ColumnFamilyHandle *default_cf = nullptr;
/// column families in use, name->handles
struct prefix_shards {
uint32_t hash_l; //< first character to take for hash calc.
uint32_t hash_h; //< last character to take for hash calc.
std::vector<rocksdb::ColumnFamilyHandle *> handles;
};
std::unordered_map<std::string, prefix_shards> cf_handles;
typedef decltype(cf_handles)::iterator cf_handles_iterator;
std::unordered_map<uint32_t, std::string> cf_ids_to_prefix;
std::unordered_map<std::string, rocksdb::BlockBasedTableOptions> cf_bbt_opts;
void add_column_family(const std::string& cf_name, uint32_t hash_l, uint32_t hash_h,
size_t shard_idx, rocksdb::ColumnFamilyHandle *handle);
bool is_column_family(const std::string& prefix);
std::string_view get_key_hash_view(const prefix_shards& shards, const char* key, const size_t keylen);
rocksdb::ColumnFamilyHandle *get_key_cf(const prefix_shards& shards, const char* key, const size_t keylen);
rocksdb::ColumnFamilyHandle *get_cf_handle(const std::string& prefix, const std::string& key);
rocksdb::ColumnFamilyHandle *get_cf_handle(const std::string& prefix, const char* key, size_t keylen);
rocksdb::ColumnFamilyHandle *check_cf_handle_bounds(const cf_handles_iterator& it, const IteratorBounds& bounds);
int submit_common(rocksdb::WriteOptions& woptions, KeyValueDB::Transaction t);
int install_cf_mergeop(const std::string &cf_name, rocksdb::ColumnFamilyOptions *cf_opt);
int create_db_dir();
int do_open(std::ostream &out, bool create_if_missing, bool open_readonly,
const std::string& cfs="");
int load_rocksdb_options(bool create_if_missing, rocksdb::Options& opt);
public:
static bool parse_sharding_def(const std::string_view text_def,
std::vector<ColumnFamily>& sharding_def,
char const* *error_position = nullptr,
std::string *error_msg = nullptr);
const rocksdb::Comparator* get_comparator() const {
return comparator;
}
private:
static void sharding_def_to_columns(const std::vector<ColumnFamily>& sharding_def,
std::vector<std::string>& columns);
int create_shards(const rocksdb::Options& opt,
const std::vector<ColumnFamily>& sharding_def);
int apply_sharding(const rocksdb::Options& opt,
const std::string& sharding_text);
int verify_sharding(const rocksdb::Options& opt,
std::vector<rocksdb::ColumnFamilyDescriptor>& existing_cfs,
std::vector<std::pair<size_t, RocksDBStore::ColumnFamily> >& existing_cfs_shard,
std::vector<rocksdb::ColumnFamilyDescriptor>& missing_cfs,
std::vector<std::pair<size_t, RocksDBStore::ColumnFamily> >& missing_cfs_shard);
std::shared_ptr<rocksdb::Cache> create_block_cache(const std::string& cache_type, size_t cache_size, double cache_prio_high = 0.0);
int split_column_family_options(const std::string& opts_str,
std::unordered_map<std::string, std::string>* column_opts_map,
std::string* block_cache_opt);
int apply_block_cache_options(const std::string& column_name,
const std::string& block_cache_opt,
rocksdb::ColumnFamilyOptions* cf_opt);
int update_column_family_options(const std::string& base_name,
const std::string& more_options,
rocksdb::ColumnFamilyOptions* cf_opt);
// manage async compactions
ceph::mutex compact_queue_lock =
ceph::make_mutex("RocksDBStore::compact_thread_lock");
ceph::condition_variable compact_queue_cond;
std::list<std::pair<std::string,std::string>> compact_queue;
bool compact_queue_stop;
class CompactThread : public Thread {
RocksDBStore *db;
public:
explicit CompactThread(RocksDBStore *d) : db(d) {}
void *entry() override {
db->compact_thread_entry();
return NULL;
}
friend class RocksDBStore;
} compact_thread;
void compact_thread_entry();
void compact_range(const std::string& start, const std::string& end);
void compact_range_async(const std::string& start, const std::string& end);
int tryInterpret(const std::string& key, const std::string& val,
rocksdb::Options& opt);
public:
/// compact the underlying rocksdb store
bool compact_on_mount;
bool disableWAL;
uint64_t get_delete_range_threshold() const {
return cct->_conf.get_val<uint64_t>("rocksdb_delete_range_threshold");
}
void compact() override;
void compact_async() override {
compact_range_async({}, {});
}
int ParseOptionsFromString(const std::string& opt_str, rocksdb::Options& opt);
static int ParseOptionsFromStringStatic(
CephContext* cct,
const std::string& opt_str,
rocksdb::Options &opt,
std::function<int(const std::string&, const std::string&, rocksdb::Options&)> interp);
static int _test_init(const std::string& dir);
int init(std::string options_str) override;
/// compact rocksdb for all keys with a given prefix
void compact_prefix(const std::string& prefix) override {
compact_range(prefix, past_prefix(prefix));
}
void compact_prefix_async(const std::string& prefix) override {
compact_range_async(prefix, past_prefix(prefix));
}
void compact_range(const std::string& prefix, const std::string& start,
const std::string& end) override {
compact_range(combine_strings(prefix, start), combine_strings(prefix, end));
}
void compact_range_async(const std::string& prefix, const std::string& start,
const std::string& end) override {
compact_range_async(combine_strings(prefix, start), combine_strings(prefix, end));
}
RocksDBStore(CephContext *c, const std::string &path, std::map<std::string,std::string> opt, void *p) :
cct(c),
logger(NULL),
path(path),
kv_options(opt),
priv(p),
db(NULL),
env(static_cast<rocksdb::Env*>(p)),
comparator(nullptr),
dbstats(NULL),
compact_queue_stop(false),
compact_thread(this),
compact_on_mount(false),
disableWAL(false)
{}
~RocksDBStore() override;
static bool check_omap_dir(std::string &omap_dir);
/// Opens underlying db
int open(std::ostream &out, const std::string& cfs="") override {
return do_open(out, false, false, cfs);
}
/// Creates underlying db if missing and opens it
int create_and_open(std::ostream &out,
const std::string& cfs="") override;
int open_read_only(std::ostream &out, const std::string& cfs="") override {
return do_open(out, false, true, cfs);
}
void close() override;
int repair(std::ostream &out) override;
void split_stats(const std::string &s, char delim, std::vector<std::string> &elems);
void get_statistics(ceph::Formatter *f) override;
PerfCounters *get_perf_counters() override
{
return logger;
}
bool get_property(
const std::string &property,
uint64_t *out) final;
int64_t estimate_prefix_size(const std::string& prefix,
const std::string& key_prefix) override;
struct RocksWBHandler;
class RocksDBTransactionImpl : public KeyValueDB::TransactionImpl {
public:
rocksdb::WriteBatch bat;
RocksDBStore *db;
explicit RocksDBTransactionImpl(RocksDBStore *_db);
private:
void put_bat(
rocksdb::WriteBatch& bat,
rocksdb::ColumnFamilyHandle *cf,
const std::string &k,
const ceph::bufferlist &to_set_bl);
public:
void set(
const std::string &prefix,
const std::string &k,
const ceph::bufferlist &bl) override;
void set(
const std::string &prefix,
const char *k,
size_t keylen,
const ceph::bufferlist &bl) override;
void rmkey(
const std::string &prefix,
const std::string &k) override;
void rmkey(
const std::string &prefix,
const char *k,
size_t keylen) override;
void rm_single_key(
const std::string &prefix,
const std::string &k) override;
void rmkeys_by_prefix(
const std::string &prefix
) override;
void rm_range_keys(
const std::string &prefix,
const std::string &start,
const std::string &end) override;
void merge(
const std::string& prefix,
const std::string& k,
const ceph::bufferlist &bl) override;
};
KeyValueDB::Transaction get_transaction() override {
return std::make_shared<RocksDBTransactionImpl>(this);
}
int submit_transaction(KeyValueDB::Transaction t) override;
int submit_transaction_sync(KeyValueDB::Transaction t) override;
int get(
const std::string &prefix,
const std::set<std::string> &key,
std::map<std::string, ceph::bufferlist> *out
) override;
int get(
const std::string &prefix,
const std::string &key,
ceph::bufferlist *out
) override;
int get(
const std::string &prefix,
const char *key,
size_t keylen,
ceph::bufferlist *out) override;
class RocksDBWholeSpaceIteratorImpl :
public KeyValueDB::WholeSpaceIteratorImpl {
protected:
rocksdb::Iterator *dbiter;
public:
explicit RocksDBWholeSpaceIteratorImpl(const RocksDBStore* db,
rocksdb::ColumnFamilyHandle* cf,
const KeyValueDB::IteratorOpts opts)
{
rocksdb::ReadOptions options = rocksdb::ReadOptions();
if (opts & ITERATOR_NOCACHE)
options.fill_cache=false;
dbiter = db->db->NewIterator(options, cf);
}
~RocksDBWholeSpaceIteratorImpl() override;
int seek_to_first() override;
int seek_to_first(const std::string &prefix) override;
int seek_to_last() override;
int seek_to_last(const std::string &prefix) override;
int upper_bound(const std::string &prefix, const std::string &after) override;
int lower_bound(const std::string &prefix, const std::string &to) override;
bool valid() override;
int next() override;
int prev() override;
std::string key() override;
std::pair<std::string,std::string> raw_key() override;
bool raw_key_is_prefixed(const std::string &prefix) override;
ceph::bufferlist value() override;
ceph::bufferptr value_as_ptr() override;
int status() override;
size_t key_size() override;
size_t value_size() override;
};
Iterator get_iterator(const std::string& prefix, IteratorOpts opts = 0, IteratorBounds = IteratorBounds()) override;
private:
/// this iterator spans single cf
WholeSpaceIterator new_shard_iterator(rocksdb::ColumnFamilyHandle* cf);
Iterator new_shard_iterator(rocksdb::ColumnFamilyHandle* cf,
const std::string& prefix, IteratorBounds bound);
public:
/// Utility
static std::string combine_strings(const std::string &prefix, const std::string &value) {
std::string out = prefix;
out.push_back(0);
out.append(value);
return out;
}
static void combine_strings(const std::string &prefix,
const char *key, size_t keylen,
std::string *out) {
out->reserve(prefix.size() + 1 + keylen);
*out = prefix;
out->push_back(0);
out->append(key, keylen);
}
static int split_key(rocksdb::Slice in, std::string *prefix, std::string *key);
static std::string past_prefix(const std::string &prefix);
class MergeOperatorRouter;
class MergeOperatorLinker;
friend class MergeOperatorRouter;
int set_merge_operator(
const std::string& prefix,
std::shared_ptr<KeyValueDB::MergeOperator> mop) override;
std::string assoc_name; ///< Name of associative operator
uint64_t get_estimated_size(std::map<std::string,uint64_t> &extra) override {
DIR *store_dir = opendir(path.c_str());
if (!store_dir) {
lderr(cct) << __func__ << " something happened opening the store: "
<< cpp_strerror(errno) << dendl;
return 0;
}
uint64_t total_size = 0;
uint64_t sst_size = 0;
uint64_t log_size = 0;
uint64_t misc_size = 0;
struct dirent *entry = NULL;
while ((entry = readdir(store_dir)) != NULL) {
std::string n(entry->d_name);
if (n == "." || n == "..")
continue;
std::string fpath = path + '/' + n;
struct stat s;
int err = stat(fpath.c_str(), &s);
if (err < 0)
err = -errno;
// we may race against rocksdb while reading files; this should only
// happen when those files are being updated, data is being shuffled
// and files get removed, in which case there's not much of a problem
// as we'll get to them next time around.
if (err == -ENOENT) {
continue;
}
if (err < 0) {
lderr(cct) << __func__ << " error obtaining stats for " << fpath
<< ": " << cpp_strerror(err) << dendl;
goto err;
}
size_t pos = n.find_last_of('.');
if (pos == std::string::npos) {
misc_size += s.st_size;
continue;
}
std::string ext = n.substr(pos+1);
if (ext == "sst") {
sst_size += s.st_size;
} else if (ext == "log") {
log_size += s.st_size;
} else {
misc_size += s.st_size;
}
}
total_size = sst_size + log_size + misc_size;
extra["sst"] = sst_size;
extra["log"] = log_size;
extra["misc"] = misc_size;
extra["total"] = total_size;
err:
closedir(store_dir);
return total_size;
}
virtual int64_t get_cache_usage() const override {
return static_cast<int64_t>(bbt_opts.block_cache->GetUsage());
}
virtual int64_t get_cache_usage(std::string prefix) const override {
auto it = cf_bbt_opts.find(prefix);
if (it != cf_bbt_opts.end() && it->second.block_cache) {
return static_cast<int64_t>(it->second.block_cache->GetUsage());
}
return -EINVAL;
}
int set_cache_size(uint64_t s) override {
cache_size = s;
set_cache_flag = true;
return 0;
}
virtual std::shared_ptr<PriorityCache::PriCache>
get_priority_cache() const override {
return std::dynamic_pointer_cast<PriorityCache::PriCache>(
bbt_opts.block_cache);
}
virtual std::shared_ptr<PriorityCache::PriCache>
get_priority_cache(std::string prefix) const override {
auto it = cf_bbt_opts.find(prefix);
if (it != cf_bbt_opts.end()) {
return std::dynamic_pointer_cast<PriorityCache::PriCache>(
it->second.block_cache);
}
return nullptr;
}
WholeSpaceIterator get_wholespace_iterator(IteratorOpts opts = 0) override;
private:
WholeSpaceIterator get_default_cf_iterator();
using cf_deleter_t = std::function<void(rocksdb::ColumnFamilyHandle*)>;
using columns_t = std::map<std::string,
std::unique_ptr<rocksdb::ColumnFamilyHandle,
cf_deleter_t>>;
int prepare_for_reshard(const std::string& new_sharding,
columns_t& to_process_columns);
int reshard_cleanup(const columns_t& current_columns);
public:
struct resharding_ctrl {
size_t bytes_per_iterator = 10000000; /// amount of data to process before refreshing iterator
size_t keys_per_iterator = 10000;
size_t bytes_per_batch = 1000000; /// amount of data before submitting batch
size_t keys_per_batch = 1000;
bool unittest_fail_after_first_batch = false;
bool unittest_fail_after_processing_column = false;
bool unittest_fail_after_successful_processing = false;
};
int reshard(const std::string& new_sharding, const resharding_ctrl* ctrl = nullptr);
bool get_sharding(std::string& sharding);
};
#endif
| 18,549 | 32.605072 | 133 | h |
null | ceph-main/src/kv/rocksdb_cache/BinnedLRUCache.cc | // Copyright (c) 2018-Present Red Hat Inc. All rights reserved.
//
// Copyright (c) 2011-2018, Facebook, Inc. All rights reserved.
// This source code is licensed under both the GPLv2 and Apache 2.0 License
//
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.
#ifndef __STDC_FORMAT_MACROS
#define __STDC_FORMAT_MACROS
#endif
#include "BinnedLRUCache.h"
#include <stdio.h>
#include <stdlib.h>
#include <string>
#define dout_context cct
#define dout_subsys ceph_subsys_rocksdb
#undef dout_prefix
#define dout_prefix *_dout << "rocksdb: "
namespace rocksdb_cache {
BinnedLRUHandleTable::BinnedLRUHandleTable() : list_(nullptr), length_(0), elems_(0) {
Resize();
}
BinnedLRUHandleTable::~BinnedLRUHandleTable() {
ApplyToAllCacheEntries([](BinnedLRUHandle* h) {
if (h->refs == 1) {
h->Free();
}
});
delete[] list_;
}
BinnedLRUHandle* BinnedLRUHandleTable::Lookup(const rocksdb::Slice& key, uint32_t hash) {
return *FindPointer(key, hash);
}
BinnedLRUHandle* BinnedLRUHandleTable::Insert(BinnedLRUHandle* h) {
BinnedLRUHandle** ptr = FindPointer(h->key(), h->hash);
BinnedLRUHandle* old = *ptr;
h->next_hash = (old == nullptr ? nullptr : old->next_hash);
*ptr = h;
if (old == nullptr) {
++elems_;
if (elems_ > length_) {
// Since each cache entry is fairly large, we aim for a small
// average linked list length (<= 1).
Resize();
}
}
return old;
}
BinnedLRUHandle* BinnedLRUHandleTable::Remove(const rocksdb::Slice& key, uint32_t hash) {
BinnedLRUHandle** ptr = FindPointer(key, hash);
BinnedLRUHandle* result = *ptr;
if (result != nullptr) {
*ptr = result->next_hash;
--elems_;
}
return result;
}
BinnedLRUHandle** BinnedLRUHandleTable::FindPointer(const rocksdb::Slice& key, uint32_t hash) {
BinnedLRUHandle** ptr = &list_[hash & (length_ - 1)];
while (*ptr != nullptr && ((*ptr)->hash != hash || key != (*ptr)->key())) {
ptr = &(*ptr)->next_hash;
}
return ptr;
}
void BinnedLRUHandleTable::Resize() {
uint32_t new_length = 16;
while (new_length < elems_ * 1.5) {
new_length *= 2;
}
BinnedLRUHandle** new_list = new BinnedLRUHandle*[new_length];
memset(new_list, 0, sizeof(new_list[0]) * new_length);
uint32_t count = 0;
for (uint32_t i = 0; i < length_; i++) {
BinnedLRUHandle* h = list_[i];
while (h != nullptr) {
BinnedLRUHandle* next = h->next_hash;
uint32_t hash = h->hash;
BinnedLRUHandle** ptr = &new_list[hash & (new_length - 1)];
h->next_hash = *ptr;
*ptr = h;
h = next;
count++;
}
}
ceph_assert(elems_ == count);
delete[] list_;
list_ = new_list;
length_ = new_length;
}
BinnedLRUCacheShard::BinnedLRUCacheShard(CephContext *c, size_t capacity, bool strict_capacity_limit,
double high_pri_pool_ratio)
: cct(c),
capacity_(0),
high_pri_pool_usage_(0),
strict_capacity_limit_(strict_capacity_limit),
high_pri_pool_ratio_(high_pri_pool_ratio),
high_pri_pool_capacity_(0),
usage_(0),
lru_usage_(0),
age_bins(1) {
shift_bins();
// Make empty circular linked list
lru_.next = &lru_;
lru_.prev = &lru_;
lru_low_pri_ = &lru_;
SetCapacity(capacity);
}
BinnedLRUCacheShard::~BinnedLRUCacheShard() {}
bool BinnedLRUCacheShard::Unref(BinnedLRUHandle* e) {
ceph_assert(e->refs > 0);
e->refs--;
return e->refs == 0;
}
// Call deleter and free
void BinnedLRUCacheShard::EraseUnRefEntries() {
ceph::autovector<BinnedLRUHandle*> last_reference_list;
{
std::lock_guard<std::mutex> l(mutex_);
while (lru_.next != &lru_) {
BinnedLRUHandle* old = lru_.next;
ceph_assert(old->InCache());
ceph_assert(old->refs ==
1); // LRU list contains elements which may be evicted
LRU_Remove(old);
table_.Remove(old->key(), old->hash);
old->SetInCache(false);
Unref(old);
usage_ -= old->charge;
last_reference_list.push_back(old);
}
}
for (auto entry : last_reference_list) {
entry->Free();
}
}
void BinnedLRUCacheShard::ApplyToAllCacheEntries(
const std::function<void(const rocksdb::Slice& key,
void* value,
size_t charge,
DeleterFn)>& callback,
bool thread_safe)
{
if (thread_safe) {
mutex_.lock();
}
table_.ApplyToAllCacheEntries(
[callback](BinnedLRUHandle* h) {
callback(h->key(), h->value, h->charge, h->deleter);
});
if (thread_safe) {
mutex_.unlock();
}
}
void BinnedLRUCacheShard::TEST_GetLRUList(BinnedLRUHandle** lru, BinnedLRUHandle** lru_low_pri) {
*lru = &lru_;
*lru_low_pri = lru_low_pri_;
}
size_t BinnedLRUCacheShard::TEST_GetLRUSize() {
BinnedLRUHandle* lru_handle = lru_.next;
size_t lru_size = 0;
while (lru_handle != &lru_) {
lru_size++;
lru_handle = lru_handle->next;
}
return lru_size;
}
double BinnedLRUCacheShard::GetHighPriPoolRatio() const {
std::lock_guard<std::mutex> l(mutex_);
return high_pri_pool_ratio_;
}
size_t BinnedLRUCacheShard::GetHighPriPoolUsage() const {
std::lock_guard<std::mutex> l(mutex_);
return high_pri_pool_usage_;
}
void BinnedLRUCacheShard::LRU_Remove(BinnedLRUHandle* e) {
ceph_assert(e->next != nullptr);
ceph_assert(e->prev != nullptr);
if (lru_low_pri_ == e) {
lru_low_pri_ = e->prev;
}
e->next->prev = e->prev;
e->prev->next = e->next;
e->prev = e->next = nullptr;
lru_usage_ -= e->charge;
if (e->InHighPriPool()) {
ceph_assert(high_pri_pool_usage_ >= e->charge);
high_pri_pool_usage_ -= e->charge;
} else {
ceph_assert(*(e->age_bin) >= e->charge);
*(e->age_bin) -= e->charge;
}
}
void BinnedLRUCacheShard::LRU_Insert(BinnedLRUHandle* e) {
ceph_assert(e->next == nullptr);
ceph_assert(e->prev == nullptr);
e->age_bin = age_bins.front();
if (high_pri_pool_ratio_ > 0 && e->IsHighPri()) {
// Inset "e" to head of LRU list.
e->next = &lru_;
e->prev = lru_.prev;
e->prev->next = e;
e->next->prev = e;
e->SetInHighPriPool(true);
high_pri_pool_usage_ += e->charge;
MaintainPoolSize();
} else {
// Insert "e" to the head of low-pri pool. Note that when
// high_pri_pool_ratio is 0, head of low-pri pool is also head of LRU list.
e->next = lru_low_pri_->next;
e->prev = lru_low_pri_;
e->prev->next = e;
e->next->prev = e;
e->SetInHighPriPool(false);
lru_low_pri_ = e;
*(e->age_bin) += e->charge;
}
lru_usage_ += e->charge;
}
uint64_t BinnedLRUCacheShard::sum_bins(uint32_t start, uint32_t end) const {
std::lock_guard<std::mutex> l(mutex_);
auto size = age_bins.size();
if (size < start) {
return 0;
}
uint64_t bytes = 0;
end = (size < end) ? size : end;
for (auto i = start; i < end; i++) {
bytes += *(age_bins[i]);
}
return bytes;
}
void BinnedLRUCacheShard::MaintainPoolSize() {
while (high_pri_pool_usage_ > high_pri_pool_capacity_) {
// Overflow last entry in high-pri pool to low-pri pool.
lru_low_pri_ = lru_low_pri_->next;
ceph_assert(lru_low_pri_ != &lru_);
lru_low_pri_->SetInHighPriPool(false);
high_pri_pool_usage_ -= lru_low_pri_->charge;
*(lru_low_pri_->age_bin) += lru_low_pri_->charge;
}
}
void BinnedLRUCacheShard::EvictFromLRU(size_t charge,
ceph::autovector<BinnedLRUHandle*>* deleted) {
while (usage_ + charge > capacity_ && lru_.next != &lru_) {
BinnedLRUHandle* old = lru_.next;
ceph_assert(old->InCache());
ceph_assert(old->refs == 1); // LRU list contains elements which may be evicted
LRU_Remove(old);
table_.Remove(old->key(), old->hash);
old->SetInCache(false);
Unref(old);
usage_ -= old->charge;
deleted->push_back(old);
}
}
void BinnedLRUCacheShard::SetCapacity(size_t capacity) {
ceph::autovector<BinnedLRUHandle*> last_reference_list;
{
std::lock_guard<std::mutex> l(mutex_);
capacity_ = capacity;
high_pri_pool_capacity_ = capacity_ * high_pri_pool_ratio_;
EvictFromLRU(0, &last_reference_list);
}
// we free the entries here outside of mutex for
// performance reasons
for (auto entry : last_reference_list) {
entry->Free();
}
}
void BinnedLRUCacheShard::SetStrictCapacityLimit(bool strict_capacity_limit) {
std::lock_guard<std::mutex> l(mutex_);
strict_capacity_limit_ = strict_capacity_limit;
}
rocksdb::Cache::Handle* BinnedLRUCacheShard::Lookup(const rocksdb::Slice& key, uint32_t hash) {
std::lock_guard<std::mutex> l(mutex_);
BinnedLRUHandle* e = table_.Lookup(key, hash);
if (e != nullptr) {
ceph_assert(e->InCache());
if (e->refs == 1) {
LRU_Remove(e);
}
e->refs++;
e->SetHit();
}
return reinterpret_cast<rocksdb::Cache::Handle*>(e);
}
bool BinnedLRUCacheShard::Ref(rocksdb::Cache::Handle* h) {
BinnedLRUHandle* handle = reinterpret_cast<BinnedLRUHandle*>(h);
std::lock_guard<std::mutex> l(mutex_);
if (handle->InCache() && handle->refs == 1) {
LRU_Remove(handle);
}
handle->refs++;
return true;
}
void BinnedLRUCacheShard::SetHighPriPoolRatio(double high_pri_pool_ratio) {
std::lock_guard<std::mutex> l(mutex_);
high_pri_pool_ratio_ = high_pri_pool_ratio;
high_pri_pool_capacity_ = capacity_ * high_pri_pool_ratio_;
MaintainPoolSize();
}
bool BinnedLRUCacheShard::Release(rocksdb::Cache::Handle* handle, bool force_erase) {
if (handle == nullptr) {
return false;
}
BinnedLRUHandle* e = reinterpret_cast<BinnedLRUHandle*>(handle);
bool last_reference = false;
{
std::lock_guard<std::mutex> l(mutex_);
last_reference = Unref(e);
if (last_reference) {
usage_ -= e->charge;
}
if (e->refs == 1 && e->InCache()) {
// The item is still in cache, and nobody else holds a reference to it
if (usage_ > capacity_ || force_erase) {
// the cache is full
// The LRU list must be empty since the cache is full
ceph_assert(!(usage_ > capacity_) || lru_.next == &lru_);
// take this opportunity and remove the item
table_.Remove(e->key(), e->hash);
e->SetInCache(false);
Unref(e);
usage_ -= e->charge;
last_reference = true;
} else {
// put the item on the list to be potentially freed
LRU_Insert(e);
}
}
}
// free outside of mutex
if (last_reference) {
e->Free();
}
return last_reference;
}
rocksdb::Status BinnedLRUCacheShard::Insert(const rocksdb::Slice& key, uint32_t hash, void* value,
size_t charge,
DeleterFn deleter,
rocksdb::Cache::Handle** handle, rocksdb::Cache::Priority priority) {
auto e = new BinnedLRUHandle();
rocksdb::Status s;
ceph::autovector<BinnedLRUHandle*> last_reference_list;
e->value = value;
e->deleter = deleter;
e->charge = charge;
e->key_length = key.size();
e->key_data = new char[e->key_length];
e->flags = 0;
e->hash = hash;
e->refs = (handle == nullptr
? 1
: 2); // One from BinnedLRUCache, one for the returned handle
e->next = e->prev = nullptr;
e->SetInCache(true);
e->SetPriority(priority);
std::copy_n(key.data(), e->key_length, e->key_data);
{
std::lock_guard<std::mutex> l(mutex_);
// Free the space following strict LRU policy until enough space
// is freed or the lru list is empty
EvictFromLRU(charge, &last_reference_list);
if (usage_ - lru_usage_ + charge > capacity_ &&
(strict_capacity_limit_ || handle == nullptr)) {
if (handle == nullptr) {
// Don't insert the entry but still return ok, as if the entry inserted
// into cache and get evicted immediately.
last_reference_list.push_back(e);
} else {
delete e;
*handle = nullptr;
s = rocksdb::Status::Incomplete("Insert failed due to LRU cache being full.");
}
} else {
// insert into the cache
// note that the cache might get larger than its capacity if not enough
// space was freed
BinnedLRUHandle* old = table_.Insert(e);
usage_ += e->charge;
if (old != nullptr) {
old->SetInCache(false);
if (Unref(old)) {
usage_ -= old->charge;
// old is on LRU because it's in cache and its reference count
// was just 1 (Unref returned 0)
LRU_Remove(old);
last_reference_list.push_back(old);
}
}
if (handle == nullptr) {
LRU_Insert(e);
} else {
*handle = reinterpret_cast<rocksdb::Cache::Handle*>(e);
}
s = rocksdb::Status::OK();
}
}
// we free the entries here outside of mutex for
// performance reasons
for (auto entry : last_reference_list) {
entry->Free();
}
return s;
}
void BinnedLRUCacheShard::Erase(const rocksdb::Slice& key, uint32_t hash) {
BinnedLRUHandle* e;
bool last_reference = false;
{
std::lock_guard<std::mutex> l(mutex_);
e = table_.Remove(key, hash);
if (e != nullptr) {
last_reference = Unref(e);
if (last_reference) {
usage_ -= e->charge;
}
if (last_reference && e->InCache()) {
LRU_Remove(e);
}
e->SetInCache(false);
}
}
// mutex not held here
// last_reference will only be true if e != nullptr
if (last_reference) {
e->Free();
}
}
size_t BinnedLRUCacheShard::GetUsage() const {
std::lock_guard<std::mutex> l(mutex_);
return usage_;
}
size_t BinnedLRUCacheShard::GetPinnedUsage() const {
std::lock_guard<std::mutex> l(mutex_);
ceph_assert(usage_ >= lru_usage_);
return usage_ - lru_usage_;
}
void BinnedLRUCacheShard::shift_bins() {
std::lock_guard<std::mutex> l(mutex_);
age_bins.push_front(std::make_shared<uint64_t>(0));
}
uint32_t BinnedLRUCacheShard::get_bin_count() const {
std::lock_guard<std::mutex> l(mutex_);
return age_bins.capacity();
}
void BinnedLRUCacheShard::set_bin_count(uint32_t count) {
std::lock_guard<std::mutex> l(mutex_);
age_bins.set_capacity(count);
}
std::string BinnedLRUCacheShard::GetPrintableOptions() const {
const int kBufferSize = 200;
char buffer[kBufferSize];
{
std::lock_guard<std::mutex> l(mutex_);
snprintf(buffer, kBufferSize, " high_pri_pool_ratio: %.3lf\n",
high_pri_pool_ratio_);
}
return std::string(buffer);
}
DeleterFn BinnedLRUCacheShard::GetDeleter(rocksdb::Cache::Handle* h) const
{
auto* handle = reinterpret_cast<BinnedLRUHandle*>(h);
return handle->deleter;
}
BinnedLRUCache::BinnedLRUCache(CephContext *c,
size_t capacity,
int num_shard_bits,
bool strict_capacity_limit,
double high_pri_pool_ratio)
: ShardedCache(capacity, num_shard_bits, strict_capacity_limit), cct(c) {
num_shards_ = 1 << num_shard_bits;
// TODO: Switch over to use mempool
int rc = posix_memalign((void**) &shards_,
CACHE_LINE_SIZE,
sizeof(BinnedLRUCacheShard) * num_shards_);
if (rc != 0) {
throw std::bad_alloc();
}
size_t per_shard = (capacity + (num_shards_ - 1)) / num_shards_;
for (int i = 0; i < num_shards_; i++) {
new (&shards_[i])
BinnedLRUCacheShard(c, per_shard, strict_capacity_limit, high_pri_pool_ratio);
}
}
BinnedLRUCache::~BinnedLRUCache() {
for (int i = 0; i < num_shards_; i++) {
shards_[i].~BinnedLRUCacheShard();
}
aligned_free(shards_);
}
CacheShard* BinnedLRUCache::GetShard(int shard) {
return reinterpret_cast<CacheShard*>(&shards_[shard]);
}
const CacheShard* BinnedLRUCache::GetShard(int shard) const {
return reinterpret_cast<CacheShard*>(&shards_[shard]);
}
void* BinnedLRUCache::Value(Handle* handle) {
return reinterpret_cast<const BinnedLRUHandle*>(handle)->value;
}
size_t BinnedLRUCache::GetCharge(Handle* handle) const {
return reinterpret_cast<const BinnedLRUHandle*>(handle)->charge;
}
uint32_t BinnedLRUCache::GetHash(Handle* handle) const {
return reinterpret_cast<const BinnedLRUHandle*>(handle)->hash;
}
void BinnedLRUCache::DisownData() {
// Do not drop data if compile with ASAN to suppress leak warning.
#ifndef __SANITIZE_ADDRESS__
shards_ = nullptr;
#endif // !__SANITIZE_ADDRESS__
}
#if (ROCKSDB_MAJOR >= 7 || (ROCKSDB_MAJOR == 6 && ROCKSDB_MINOR >= 22))
DeleterFn BinnedLRUCache::GetDeleter(Handle* handle) const
{
return reinterpret_cast<const BinnedLRUHandle*>(handle)->deleter;
}
#endif
size_t BinnedLRUCache::TEST_GetLRUSize() {
size_t lru_size_of_all_shards = 0;
for (int i = 0; i < num_shards_; i++) {
lru_size_of_all_shards += shards_[i].TEST_GetLRUSize();
}
return lru_size_of_all_shards;
}
void BinnedLRUCache::SetHighPriPoolRatio(double high_pri_pool_ratio) {
for (int i = 0; i < num_shards_; i++) {
shards_[i].SetHighPriPoolRatio(high_pri_pool_ratio);
}
}
double BinnedLRUCache::GetHighPriPoolRatio() const {
double result = 0.0;
if (num_shards_ > 0) {
result = shards_[0].GetHighPriPoolRatio();
}
return result;
}
size_t BinnedLRUCache::GetHighPriPoolUsage() const {
// We will not lock the cache when getting the usage from shards.
size_t usage = 0;
for (int s = 0; s < num_shards_; s++) {
usage += shards_[s].GetHighPriPoolUsage();
}
return usage;
}
// PriCache
int64_t BinnedLRUCache::request_cache_bytes(PriorityCache::Priority pri, uint64_t total_cache) const
{
int64_t assigned = get_cache_bytes(pri);
int64_t request = 0;
switch(pri) {
// PRI0 is for rocksdb's high priority items (indexes/filters)
case PriorityCache::Priority::PRI0:
{
// Because we want the high pri cache to grow independently of the low
// pri cache, request a chunky allocation independent of the other
// priorities.
request = PriorityCache::get_chunk(GetHighPriPoolUsage(), total_cache);
break;
}
case PriorityCache::Priority::LAST:
{
auto max = get_bin_count();
request = GetUsage();
request -= GetHighPriPoolUsage();
request -= sum_bins(0, max);
break;
}
default:
{
ceph_assert(pri > 0 && pri < PriorityCache::Priority::LAST);
auto prev_pri = static_cast<PriorityCache::Priority>(pri - 1);
uint64_t start = get_bins(prev_pri);
uint64_t end = get_bins(pri);
request = sum_bins(start, end);
break;
}
}
request = (request > assigned) ? request - assigned : 0;
ldout(cct, 10) << __func__ << " Priority: " << static_cast<uint32_t>(pri)
<< " Request: " << request << dendl;
return request;
}
int64_t BinnedLRUCache::commit_cache_size(uint64_t total_bytes)
{
size_t old_bytes = GetCapacity();
int64_t new_bytes = PriorityCache::get_chunk(
get_cache_bytes(), total_bytes);
ldout(cct, 10) << __func__ << " old: " << old_bytes
<< " new: " << new_bytes << dendl;
SetCapacity((size_t) new_bytes);
double ratio = 0;
if (new_bytes > 0) {
int64_t pri0_bytes = get_cache_bytes(PriorityCache::Priority::PRI0);
ratio = (double) pri0_bytes / new_bytes;
}
ldout(cct, 5) << __func__ << " High Pri Pool Ratio set to " << ratio << dendl;
SetHighPriPoolRatio(ratio);
return new_bytes;
}
void BinnedLRUCache::shift_bins() {
for (int s = 0; s < num_shards_; s++) {
shards_[s].shift_bins();
}
}
uint64_t BinnedLRUCache::sum_bins(uint32_t start, uint32_t end) const {
uint64_t bytes = 0;
for (int s = 0; s < num_shards_; s++) {
bytes += shards_[s].sum_bins(start, end);
}
return bytes;
}
uint32_t BinnedLRUCache::get_bin_count() const {
uint32_t result = 0;
if (num_shards_ > 0) {
result = shards_[0].get_bin_count();
}
return result;
}
void BinnedLRUCache::set_bin_count(uint32_t count) {
for (int s = 0; s < num_shards_; s++) {
shards_[s].set_bin_count(count);
}
}
std::shared_ptr<rocksdb::Cache> NewBinnedLRUCache(
CephContext *c,
size_t capacity,
int num_shard_bits,
bool strict_capacity_limit,
double high_pri_pool_ratio) {
if (num_shard_bits >= 20) {
return nullptr; // the cache cannot be sharded into too many fine pieces
}
if (high_pri_pool_ratio < 0.0 || high_pri_pool_ratio > 1.0) {
// invalid high_pri_pool_ratio
return nullptr;
}
if (num_shard_bits < 0) {
num_shard_bits = GetDefaultCacheShardBits(capacity);
}
return std::make_shared<BinnedLRUCache>(
c, capacity, num_shard_bits, strict_capacity_limit, high_pri_pool_ratio);
}
} // namespace rocksdb_cache
| 20,729 | 27.791667 | 101 | cc |
null | ceph-main/src/kv/rocksdb_cache/BinnedLRUCache.h | // Copyright (c) 2018-Present Red Hat Inc. All rights reserved.
//
// Copyright (c) 2011-2018, Facebook, Inc. All rights reserved.
// This source code is licensed under both the GPLv2 and Apache 2.0 License
//
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.
#ifndef ROCKSDB_BINNED_LRU_CACHE
#define ROCKSDB_BINNED_LRU_CACHE
#include <string>
#include <mutex>
#include <boost/circular_buffer.hpp>
#include "ShardedCache.h"
#include "common/autovector.h"
#include "common/dout.h"
#include "include/ceph_assert.h"
#include "common/ceph_context.h"
namespace rocksdb_cache {
// LRU cache implementation
// An entry is a variable length heap-allocated structure.
// Entries are referenced by cache and/or by any external entity.
// The cache keeps all its entries in table. Some elements
// are also stored on LRU list.
//
// BinnedLRUHandle can be in these states:
// 1. Referenced externally AND in hash table.
// In that case the entry is *not* in the LRU. (refs > 1 && in_cache == true)
// 2. Not referenced externally and in hash table. In that case the entry is
// in the LRU and can be freed. (refs == 1 && in_cache == true)
// 3. Referenced externally and not in hash table. In that case the entry is
// in not on LRU and not in table. (refs >= 1 && in_cache == false)
//
// All newly created BinnedLRUHandles are in state 1. If you call
// BinnedLRUCacheShard::Release
// on entry in state 1, it will go into state 2. To move from state 1 to
// state 3, either call BinnedLRUCacheShard::Erase or BinnedLRUCacheShard::Insert with the
// same key.
// To move from state 2 to state 1, use BinnedLRUCacheShard::Lookup.
// Before destruction, make sure that no handles are in state 1. This means
// that any successful BinnedLRUCacheShard::Lookup/BinnedLRUCacheShard::Insert have a
// matching
// RUCache::Release (to move into state 2) or BinnedLRUCacheShard::Erase (for state 3)
std::shared_ptr<rocksdb::Cache> NewBinnedLRUCache(
CephContext *c,
size_t capacity,
int num_shard_bits = -1,
bool strict_capacity_limit = false,
double high_pri_pool_ratio = 0.0);
struct BinnedLRUHandle {
std::shared_ptr<uint64_t> age_bin;
void* value;
DeleterFn deleter;
BinnedLRUHandle* next_hash;
BinnedLRUHandle* next;
BinnedLRUHandle* prev;
size_t charge; // TODO(opt): Only allow uint32_t?
size_t key_length;
uint32_t refs; // a number of refs to this entry
// cache itself is counted as 1
// Include the following flags:
// in_cache: whether this entry is referenced by the hash table.
// is_high_pri: whether this entry is high priority entry.
// in_high_pri_pool: whether this entry is in high-pri pool.
char flags;
uint32_t hash; // Hash of key(); used for fast sharding and comparisons
char* key_data = nullptr; // Beginning of key
rocksdb::Slice key() const {
// For cheaper lookups, we allow a temporary Handle object
// to store a pointer to a key in "value".
if (next == this) {
return *(reinterpret_cast<rocksdb::Slice*>(value));
} else {
return rocksdb::Slice(key_data, key_length);
}
}
bool InCache() { return flags & 1; }
bool IsHighPri() { return flags & 2; }
bool InHighPriPool() { return flags & 4; }
bool HasHit() { return flags & 8; }
void SetInCache(bool in_cache) {
if (in_cache) {
flags |= 1;
} else {
flags &= ~1;
}
}
void SetPriority(rocksdb::Cache::Priority priority) {
if (priority == rocksdb::Cache::Priority::HIGH) {
flags |= 2;
} else {
flags &= ~2;
}
}
void SetInHighPriPool(bool in_high_pri_pool) {
if (in_high_pri_pool) {
flags |= 4;
} else {
flags &= ~4;
}
}
void SetHit() { flags |= 8; }
void Free() {
ceph_assert((refs == 1 && InCache()) || (refs == 0 && !InCache()));
if (deleter) {
(*deleter)(key(), value);
}
delete[] key_data;
delete this;
}
};
// We provide our own simple hash table since it removes a whole bunch
// of porting hacks and is also faster than some of the built-in hash
// table implementations in some of the compiler/runtime combinations
// we have tested. E.g., readrandom speeds up by ~5% over the g++
// 4.4.3's builtin hashtable.
class BinnedLRUHandleTable {
public:
BinnedLRUHandleTable();
~BinnedLRUHandleTable();
BinnedLRUHandle* Lookup(const rocksdb::Slice& key, uint32_t hash);
BinnedLRUHandle* Insert(BinnedLRUHandle* h);
BinnedLRUHandle* Remove(const rocksdb::Slice& key, uint32_t hash);
template <typename T>
void ApplyToAllCacheEntries(T func) {
for (uint32_t i = 0; i < length_; i++) {
BinnedLRUHandle* h = list_[i];
while (h != nullptr) {
auto n = h->next_hash;
ceph_assert(h->InCache());
func(h);
h = n;
}
}
}
private:
// Return a pointer to slot that points to a cache entry that
// matches key/hash. If there is no such cache entry, return a
// pointer to the trailing slot in the corresponding linked list.
BinnedLRUHandle** FindPointer(const rocksdb::Slice& key, uint32_t hash);
void Resize();
// The table consists of an array of buckets where each bucket is
// a linked list of cache entries that hash into the bucket.
BinnedLRUHandle** list_;
uint32_t length_;
uint32_t elems_;
};
// A single shard of sharded cache.
class alignas(CACHE_LINE_SIZE) BinnedLRUCacheShard : public CacheShard {
public:
BinnedLRUCacheShard(CephContext *c, size_t capacity, bool strict_capacity_limit,
double high_pri_pool_ratio);
virtual ~BinnedLRUCacheShard();
// Separate from constructor so caller can easily make an array of BinnedLRUCache
// if current usage is more than new capacity, the function will attempt to
// free the needed space
virtual void SetCapacity(size_t capacity) override;
// Set the flag to reject insertion if cache if full.
virtual void SetStrictCapacityLimit(bool strict_capacity_limit) override;
// Set percentage of capacity reserved for high-pri cache entries.
void SetHighPriPoolRatio(double high_pri_pool_ratio);
// Like Cache methods, but with an extra "hash" parameter.
virtual rocksdb::Status Insert(const rocksdb::Slice& key, uint32_t hash, void* value,
size_t charge,
DeleterFn deleter,
rocksdb::Cache::Handle** handle,
rocksdb::Cache::Priority priority) override;
virtual rocksdb::Cache::Handle* Lookup(const rocksdb::Slice& key, uint32_t hash) override;
virtual bool Ref(rocksdb::Cache::Handle* handle) override;
virtual bool Release(rocksdb::Cache::Handle* handle,
bool force_erase = false) override;
virtual void Erase(const rocksdb::Slice& key, uint32_t hash) override;
// Although in some platforms the update of size_t is atomic, to make sure
// GetUsage() and GetPinnedUsage() work correctly under any platform, we'll
// protect them with mutex_.
virtual size_t GetUsage() const override;
virtual size_t GetPinnedUsage() const override;
virtual void ApplyToAllCacheEntries(
const std::function<void(const rocksdb::Slice& key,
void* value,
size_t charge,
DeleterFn)>& callback,
bool thread_safe) override;
virtual void EraseUnRefEntries() override;
virtual std::string GetPrintableOptions() const override;
virtual DeleterFn GetDeleter(rocksdb::Cache::Handle* handle) const override;
void TEST_GetLRUList(BinnedLRUHandle** lru, BinnedLRUHandle** lru_low_pri);
// Retrieves number of elements in LRU, for unit test purpose only
// not threadsafe
size_t TEST_GetLRUSize();
// Retrieves high pri pool ratio
double GetHighPriPoolRatio() const;
// Retrieves high pri pool usage
size_t GetHighPriPoolUsage() const;
// Rotate the bins
void shift_bins();
// Get the bin count
uint32_t get_bin_count() const;
// Set the bin count
void set_bin_count(uint32_t count);
// Get the byte counts for a range of age bins
uint64_t sum_bins(uint32_t start, uint32_t end) const;
private:
CephContext *cct;
void LRU_Remove(BinnedLRUHandle* e);
void LRU_Insert(BinnedLRUHandle* e);
// Overflow the last entry in high-pri pool to low-pri pool until size of
// high-pri pool is no larger than the size specify by high_pri_pool_pct.
void MaintainPoolSize();
// Just reduce the reference count by 1.
// Return true if last reference
bool Unref(BinnedLRUHandle* e);
// Free some space following strict LRU policy until enough space
// to hold (usage_ + charge) is freed or the lru list is empty
// This function is not thread safe - it needs to be executed while
// holding the mutex_
void EvictFromLRU(size_t charge, ceph::autovector<BinnedLRUHandle*>* deleted);
// Initialized before use.
size_t capacity_;
// Memory size for entries in high-pri pool.
size_t high_pri_pool_usage_;
// Whether to reject insertion if cache reaches its full capacity.
bool strict_capacity_limit_;
// Ratio of capacity reserved for high priority cache entries.
double high_pri_pool_ratio_;
// High-pri pool size, equals to capacity * high_pri_pool_ratio.
// Remember the value to avoid recomputing each time.
double high_pri_pool_capacity_;
// Dummy head of LRU list.
// lru.prev is newest entry, lru.next is oldest entry.
// LRU contains items which can be evicted, ie reference only by cache
BinnedLRUHandle lru_;
// Pointer to head of low-pri pool in LRU list.
BinnedLRUHandle* lru_low_pri_;
// ------------^^^^^^^^^^^^^-----------
// Not frequently modified data members
// ------------------------------------
//
// We separate data members that are updated frequently from the ones that
// are not frequently updated so that they don't share the same cache line
// which will lead into false cache sharing
//
// ------------------------------------
// Frequently modified data members
// ------------vvvvvvvvvvvvv-----------
BinnedLRUHandleTable table_;
// Memory size for entries residing in the cache
size_t usage_;
// Memory size for entries residing only in the LRU list
size_t lru_usage_;
// mutex_ protects the following state.
// We don't count mutex_ as the cache's internal state so semantically we
// don't mind mutex_ invoking the non-const actions.
mutable std::mutex mutex_;
// Circular buffer of byte counters for age binning
boost::circular_buffer<std::shared_ptr<uint64_t>> age_bins;
};
class BinnedLRUCache : public ShardedCache {
public:
BinnedLRUCache(CephContext *c, size_t capacity, int num_shard_bits,
bool strict_capacity_limit, double high_pri_pool_ratio);
virtual ~BinnedLRUCache();
virtual const char* Name() const override { return "BinnedLRUCache"; }
virtual CacheShard* GetShard(int shard) override;
virtual const CacheShard* GetShard(int shard) const override;
virtual void* Value(Handle* handle) override;
virtual size_t GetCharge(Handle* handle) const override;
virtual uint32_t GetHash(Handle* handle) const override;
virtual void DisownData() override;
#if (ROCKSDB_MAJOR >= 7 || (ROCKSDB_MAJOR == 6 && ROCKSDB_MINOR >= 22))
virtual DeleterFn GetDeleter(Handle* handle) const override;
#endif
// Retrieves number of elements in LRU, for unit test purpose only
size_t TEST_GetLRUSize();
// Sets the high pri pool ratio
void SetHighPriPoolRatio(double high_pri_pool_ratio);
// Retrieves high pri pool ratio
double GetHighPriPoolRatio() const;
// Retrieves high pri pool usage
size_t GetHighPriPoolUsage() const;
// PriorityCache
virtual int64_t request_cache_bytes(
PriorityCache::Priority pri, uint64_t total_cache) const;
virtual int64_t commit_cache_size(uint64_t total_cache);
virtual int64_t get_committed_size() const {
return GetCapacity();
}
virtual void shift_bins();
uint64_t sum_bins(uint32_t start, uint32_t end) const;
uint32_t get_bin_count() const;
void set_bin_count(uint32_t count);
virtual std::string get_cache_name() const {
return "RocksDB Binned LRU Cache";
}
private:
CephContext *cct;
BinnedLRUCacheShard* shards_;
int num_shards_ = 0;
};
} // namespace rocksdb_cache
#endif // ROCKSDB_BINNED_LRU_CACHE
| 12,504 | 33.073569 | 92 | h |
null | ceph-main/src/kv/rocksdb_cache/ShardedCache.cc | // Copyright (c) 2018-Present Red Hat Inc. All rights reserved.
//
// Copyright (c) 2011-2018, Facebook, Inc. All rights reserved.
// This source code is licensed under both the GPLv2 and Apache 2.0 License
//
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.
#ifndef __STDC_FORMAT_MACROS
#define __STDC_FORMAT_MACROS
#endif
#include "ShardedCache.h"
#include <string>
namespace rocksdb_cache {
ShardedCache::ShardedCache(size_t capacity, int num_shard_bits,
bool strict_capacity_limit)
: num_shard_bits_(num_shard_bits),
capacity_(capacity),
strict_capacity_limit_(strict_capacity_limit),
last_id_(1) {}
void ShardedCache::SetCapacity(size_t capacity) {
int num_shards = 1 << num_shard_bits_;
const size_t per_shard = (capacity + (num_shards - 1)) / num_shards;
std::lock_guard<std::mutex> l(capacity_mutex_);
for (int s = 0; s < num_shards; s++) {
GetShard(s)->SetCapacity(per_shard);
}
capacity_ = capacity;
}
void ShardedCache::SetStrictCapacityLimit(bool strict_capacity_limit) {
int num_shards = 1 << num_shard_bits_;
std::lock_guard<std::mutex> l(capacity_mutex_);
for (int s = 0; s < num_shards; s++) {
GetShard(s)->SetStrictCapacityLimit(strict_capacity_limit);
}
strict_capacity_limit_ = strict_capacity_limit;
}
rocksdb::Status ShardedCache::Insert(const rocksdb::Slice& key, void* value, size_t charge,
DeleterFn deleter,
rocksdb::Cache::Handle** handle, Priority priority) {
uint32_t hash = HashSlice(key);
return GetShard(Shard(hash))
->Insert(key, hash, value, charge, deleter, handle, priority);
}
rocksdb::Cache::Handle* ShardedCache::Lookup(const rocksdb::Slice& key, rocksdb::Statistics* /*stats*/) {
uint32_t hash = HashSlice(key);
return GetShard(Shard(hash))->Lookup(key, hash);
}
bool ShardedCache::Ref(rocksdb::Cache::Handle* handle) {
uint32_t hash = GetHash(handle);
return GetShard(Shard(hash))->Ref(handle);
}
bool ShardedCache::Release(rocksdb::Cache::Handle* handle, bool force_erase) {
uint32_t hash = GetHash(handle);
return GetShard(Shard(hash))->Release(handle, force_erase);
}
void ShardedCache::Erase(const rocksdb::Slice& key) {
uint32_t hash = HashSlice(key);
GetShard(Shard(hash))->Erase(key, hash);
}
uint64_t ShardedCache::NewId() {
return last_id_.fetch_add(1, std::memory_order_relaxed);
}
size_t ShardedCache::GetCapacity() const {
std::lock_guard<std::mutex> l(capacity_mutex_);
return capacity_;
}
bool ShardedCache::HasStrictCapacityLimit() const {
std::lock_guard<std::mutex> l(capacity_mutex_);
return strict_capacity_limit_;
}
size_t ShardedCache::GetUsage() const {
// We will not lock the cache when getting the usage from shards.
int num_shards = 1 << num_shard_bits_;
size_t usage = 0;
for (int s = 0; s < num_shards; s++) {
usage += GetShard(s)->GetUsage();
}
return usage;
}
size_t ShardedCache::GetUsage(rocksdb::Cache::Handle* handle) const {
return GetCharge(handle);
}
size_t ShardedCache::GetPinnedUsage() const {
// We will not lock the cache when getting the usage from shards.
int num_shards = 1 << num_shard_bits_;
size_t usage = 0;
for (int s = 0; s < num_shards; s++) {
usage += GetShard(s)->GetPinnedUsage();
}
return usage;
}
#if (ROCKSDB_MAJOR >= 7 || (ROCKSDB_MAJOR == 6 && ROCKSDB_MINOR >= 22))
DeleterFn ShardedCache::GetDeleter(Handle* handle) const
{
uint32_t hash = GetHash(handle);
return GetShard(Shard(hash))->GetDeleter(handle);
}
void ShardedCache::ApplyToAllEntries(
const std::function<void(const rocksdb::Slice& key, void* value, size_t charge,
DeleterFn deleter)>& callback,
const ApplyToAllEntriesOptions& opts)
{
int num_shards = 1 << num_shard_bits_;
for (int s = 0; s < num_shards; s++) {
GetShard(s)->ApplyToAllCacheEntries(callback, true /* thread_safe */);
}
}
#else
void ShardedCache::ApplyToAllCacheEntries(void (*callback)(void*, size_t),
bool thread_safe) {
int num_shards = 1 << num_shard_bits_;
for (int s = 0; s < num_shards; s++) {
GetShard(s)->ApplyToAllCacheEntries(
[callback](const rocksdb::Slice&, void* value, size_t charge, DeleterFn) {
callback(value, charge);
},
thread_safe);
}
}
#endif
void ShardedCache::EraseUnRefEntries() {
int num_shards = 1 << num_shard_bits_;
for (int s = 0; s < num_shards; s++) {
GetShard(s)->EraseUnRefEntries();
}
}
std::string ShardedCache::GetPrintableOptions() const {
std::string ret;
ret.reserve(20000);
const int kBufferSize = 200;
char buffer[kBufferSize];
{
std::lock_guard<std::mutex> l(capacity_mutex_);
snprintf(buffer, kBufferSize, " capacity : %zu\n",
capacity_);
ret.append(buffer);
snprintf(buffer, kBufferSize, " num_shard_bits : %d\n", num_shard_bits_);
ret.append(buffer);
snprintf(buffer, kBufferSize, " strict_capacity_limit : %d\n",
strict_capacity_limit_);
ret.append(buffer);
}
ret.append(GetShard(0)->GetPrintableOptions());
return ret;
}
int GetDefaultCacheShardBits(size_t capacity) {
int num_shard_bits = 0;
size_t min_shard_size = 512L * 1024L; // Every shard is at least 512KB.
size_t num_shards = capacity / min_shard_size;
while (num_shards >>= 1) {
if (++num_shard_bits >= 6) {
// No more than 6.
return num_shard_bits;
}
}
return num_shard_bits;
}
} // namespace rocksdb_cache
| 5,696 | 30.131148 | 105 | cc |
null | ceph-main/src/kv/rocksdb_cache/ShardedCache.h | // Copyright (c) 2018-Present Red Hat Inc. All rights reserved.
//
// Copyright (c) 2011-2018, Facebook, Inc. All rights reserved.
// This source code is licensed under both the GPLv2 and Apache 2.0 License
//
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.
#ifndef ROCKSDB_SHARDED_CACHE
#define ROCKSDB_SHARDED_CACHE
#include <atomic>
#include <string>
#include <mutex>
#include "rocksdb/version.h"
#include "rocksdb/cache.h"
#include "include/ceph_hash.h"
#include "common/PriorityCache.h"
//#include "hash.h"
#ifndef CACHE_LINE_SIZE
#define CACHE_LINE_SIZE 64 // XXX arch-specific define
#endif
namespace rocksdb_cache {
using DeleterFn = void (*)(const rocksdb::Slice& key, void* value);
// Single cache shard interface.
class CacheShard {
public:
CacheShard() = default;
virtual ~CacheShard() = default;
virtual rocksdb::Status Insert(const rocksdb::Slice& key, uint32_t hash, void* value,
size_t charge,
DeleterFn deleter,
rocksdb::Cache::Handle** handle, rocksdb::Cache::Priority priority) = 0;
virtual rocksdb::Cache::Handle* Lookup(const rocksdb::Slice& key, uint32_t hash) = 0;
virtual bool Ref(rocksdb::Cache::Handle* handle) = 0;
virtual bool Release(rocksdb::Cache::Handle* handle, bool force_erase = false) = 0;
virtual void Erase(const rocksdb::Slice& key, uint32_t hash) = 0;
virtual void SetCapacity(size_t capacity) = 0;
virtual void SetStrictCapacityLimit(bool strict_capacity_limit) = 0;
virtual size_t GetUsage() const = 0;
virtual size_t GetPinnedUsage() const = 0;
virtual void ApplyToAllCacheEntries(
const std::function<void(const rocksdb::Slice& key,
void* value,
size_t charge,
DeleterFn)>& callback,
bool thread_safe) = 0;
virtual void EraseUnRefEntries() = 0;
virtual std::string GetPrintableOptions() const { return ""; }
virtual DeleterFn GetDeleter(rocksdb::Cache::Handle* handle) const = 0;
};
// Generic cache interface which shards cache by hash of keys. 2^num_shard_bits
// shards will be created, with capacity split evenly to each of the shards.
// Keys are sharded by the highest num_shard_bits bits of hash value.
class ShardedCache : public rocksdb::Cache, public PriorityCache::PriCache {
public:
ShardedCache(size_t capacity, int num_shard_bits, bool strict_capacity_limit);
virtual ~ShardedCache() = default;
// rocksdb::Cache
virtual const char* Name() const override = 0;
virtual rocksdb::Status Insert(const rocksdb::Slice& key, void* value, size_t charge,
DeleterFn,
rocksdb::Cache::Handle** handle, Priority priority) override;
virtual rocksdb::Cache::Handle* Lookup(const rocksdb::Slice& key, rocksdb::Statistics* stats) override;
virtual bool Ref(rocksdb::Cache::Handle* handle) override;
virtual bool Release(rocksdb::Cache::Handle* handle, bool force_erase = false) override;
virtual void* Value(Handle* handle) override = 0;
virtual void Erase(const rocksdb::Slice& key) override;
virtual uint64_t NewId() override;
virtual void SetCapacity(size_t capacity) override;
virtual void SetStrictCapacityLimit(bool strict_capacity_limit) override;
virtual bool HasStrictCapacityLimit() const override;
virtual size_t GetCapacity() const override;
virtual size_t GetUsage() const override;
virtual size_t GetUsage(rocksdb::Cache::Handle* handle) const override;
virtual size_t GetPinnedUsage() const override;
virtual size_t GetCharge(Handle* handle) const = 0;
#if (ROCKSDB_MAJOR >= 7 || (ROCKSDB_MAJOR == 6 && ROCKSDB_MINOR >= 22))
virtual DeleterFn GetDeleter(Handle* handle) const override;
#endif
virtual void DisownData() override = 0;
#if (ROCKSDB_MAJOR >= 7 || (ROCKSDB_MAJOR == 6 && ROCKSDB_MINOR >= 22))
virtual void ApplyToAllEntries(
const std::function<void(const rocksdb::Slice& key, void* value, size_t charge,
DeleterFn deleter)>& callback,
const ApplyToAllEntriesOptions& opts) override;
#else
virtual void ApplyToAllCacheEntries(void (*callback)(void*, size_t),
bool thread_safe) override;
#endif
virtual void EraseUnRefEntries() override;
virtual std::string GetPrintableOptions() const override;
virtual CacheShard* GetShard(int shard) = 0;
virtual const CacheShard* GetShard(int shard) const = 0;
virtual uint32_t GetHash(Handle* handle) const = 0;
int GetNumShardBits() const { return num_shard_bits_; }
virtual uint32_t get_bin_count() const = 0;
virtual void set_bin_count(uint32_t count) = 0;
// PriCache
virtual int64_t get_cache_bytes(PriorityCache::Priority pri) const {
return cache_bytes[pri];
}
virtual int64_t get_cache_bytes() const {
int64_t total = 0;
for (int i = 0; i < PriorityCache::Priority::LAST + 1; i++) {
PriorityCache::Priority pri = static_cast<PriorityCache::Priority>(i);
total += get_cache_bytes(pri);
}
return total;
}
virtual void set_cache_bytes(PriorityCache::Priority pri, int64_t bytes) {
cache_bytes[pri] = bytes;
}
virtual void add_cache_bytes(PriorityCache::Priority pri, int64_t bytes) {
cache_bytes[pri] += bytes;
}
virtual double get_cache_ratio() const {
return cache_ratio;
}
virtual void set_cache_ratio(double ratio) {
cache_ratio = ratio;
}
virtual uint64_t get_bins(PriorityCache::Priority pri) const {
if (pri > PriorityCache::Priority::PRI0 &&
pri < PriorityCache::Priority::LAST) {
return bins[pri];
}
return 0;
}
virtual void set_bins(PriorityCache::Priority pri, uint64_t end_bin) {
if (pri <= PriorityCache::Priority::PRI0 ||
pri >= PriorityCache::Priority::LAST) {
return;
}
bins[pri] = end_bin;
uint64_t max = 0;
for (int pri = 1; pri < PriorityCache::Priority::LAST; pri++) {
if (bins[pri] > max) {
max = bins[pri];
}
}
set_bin_count(max);
}
virtual void import_bins(const std::vector<uint64_t> &bins_v) {
uint64_t max = 0;
for (int pri = 1; pri < PriorityCache::Priority::LAST; pri++) {
unsigned i = (unsigned) pri - 1;
if (i < bins_v.size()) {
bins[pri] = bins_v[i];
if (bins[pri] > max) {
max = bins[pri];
}
} else {
bins[pri] = 0;
}
}
set_bin_count(max);
}
virtual std::string get_cache_name() const = 0;
private:
static inline uint32_t HashSlice(const rocksdb::Slice& s) {
return ceph_str_hash(CEPH_STR_HASH_RJENKINS, s.data(), s.size());
// return Hash(s.data(), s.size(), 0);
}
uint32_t Shard(uint32_t hash) const {
// Note, hash >> 32 yields hash in gcc, not the zero we expect!
return (num_shard_bits_ > 0) ? (hash >> (32 - num_shard_bits_)) : 0;
}
uint64_t bins[PriorityCache::Priority::LAST+1] = {0};
int64_t cache_bytes[PriorityCache::Priority::LAST+1] = {0};
double cache_ratio = 0;
int num_shard_bits_;
mutable std::mutex capacity_mutex_;
size_t capacity_;
bool strict_capacity_limit_;
std::atomic<uint64_t> last_id_;
};
extern int GetDefaultCacheShardBits(size_t capacity);
} // namespace rocksdb_cache
#endif // ROCKSDB_SHARDED_CACHE
| 7,499 | 36.878788 | 105 | h |
null | ceph-main/src/librados/AioCompletionImpl.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2012 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_LIBRADOS_AIOCOMPLETIONIMPL_H
#define CEPH_LIBRADOS_AIOCOMPLETIONIMPL_H
#include "common/ceph_mutex.h"
#include "include/buffer.h"
#include "include/xlist.h"
#include "osd/osd_types.h"
class IoCtxImpl;
struct librados::AioCompletionImpl {
ceph::mutex lock = ceph::make_mutex("AioCompletionImpl lock", false);
ceph::condition_variable cond;
int ref = 1, rval = 0;
bool released = false;
bool complete = false;
version_t objver = 0;
ceph_tid_t tid = 0;
rados_callback_t callback_complete = nullptr, callback_safe = nullptr;
void *callback_complete_arg = nullptr, *callback_safe_arg = nullptr;
// for read
bool is_read = false;
bufferlist bl;
bufferlist *blp = nullptr;
char *out_buf = nullptr;
IoCtxImpl *io = nullptr;
ceph_tid_t aio_write_seq = 0;
xlist<AioCompletionImpl*>::item aio_write_list_item;
AioCompletionImpl() : aio_write_list_item(this) { }
int set_complete_callback(void *cb_arg, rados_callback_t cb) {
std::scoped_lock l{lock};
callback_complete = cb;
callback_complete_arg = cb_arg;
return 0;
}
int set_safe_callback(void *cb_arg, rados_callback_t cb) {
std::scoped_lock l{lock};
callback_safe = cb;
callback_safe_arg = cb_arg;
return 0;
}
int wait_for_complete() {
std::unique_lock l{lock};
cond.wait(l, [this] { return complete; });
return 0;
}
int wait_for_safe() {
return wait_for_complete();
}
int is_complete() {
std::scoped_lock l{lock};
return complete;
}
int is_safe() {
return is_complete();
}
int wait_for_complete_and_cb() {
std::unique_lock l{lock};
cond.wait(l, [this] { return complete && !callback_complete && !callback_safe; });
return 0;
}
int wait_for_safe_and_cb() {
return wait_for_complete_and_cb();
}
int is_complete_and_cb() {
std::scoped_lock l{lock};
return complete && !callback_complete && !callback_safe;
}
int is_safe_and_cb() {
return is_complete_and_cb();
}
int get_return_value() {
std::scoped_lock l{lock};
return rval;
}
uint64_t get_version() {
std::scoped_lock l{lock};
return objver;
}
void get() {
std::scoped_lock l{lock};
_get();
}
void _get() {
ceph_assert(ceph_mutex_is_locked(lock));
ceph_assert(ref > 0);
++ref;
}
void release() {
lock.lock();
ceph_assert(!released);
released = true;
put_unlock();
}
void put() {
lock.lock();
put_unlock();
}
void put_unlock() {
ceph_assert(ref > 0);
int n = --ref;
lock.unlock();
if (!n)
delete this;
}
};
namespace librados {
struct CB_AioComplete {
AioCompletionImpl *c;
explicit CB_AioComplete(AioCompletionImpl *cc) : c(cc) {
c->_get();
}
void operator()() {
rados_callback_t cb_complete = c->callback_complete;
void *cb_complete_arg = c->callback_complete_arg;
if (cb_complete)
cb_complete(c, cb_complete_arg);
rados_callback_t cb_safe = c->callback_safe;
void *cb_safe_arg = c->callback_safe_arg;
if (cb_safe)
cb_safe(c, cb_safe_arg);
c->lock.lock();
c->callback_complete = NULL;
c->callback_safe = NULL;
c->cond.notify_all();
c->put_unlock();
}
};
/**
* Fills in all completed request data, and calls both
* complete and safe callbacks if they exist.
*
* Not useful for usual I/O, but for special things like
* flush where we only want to wait for things to be safe,
* but allow users to specify any of the callbacks.
*/
struct CB_AioCompleteAndSafe {
AioCompletionImpl *c;
explicit CB_AioCompleteAndSafe(AioCompletionImpl *cc) : c(cc) {
c->get();
}
CB_AioCompleteAndSafe(const CB_AioCompleteAndSafe&) = delete;
CB_AioCompleteAndSafe& operator =(const CB_AioCompleteAndSafe&) = delete;
CB_AioCompleteAndSafe(CB_AioCompleteAndSafe&& rhs) {
c = rhs.c;
rhs.c = nullptr;
}
CB_AioCompleteAndSafe& operator =(CB_AioCompleteAndSafe&& rhs) {
c = rhs.c;
rhs.c = nullptr;
return *this;
}
void operator()(int r = 0) {
c->lock.lock();
c->rval = r;
c->complete = true;
c->lock.unlock();
rados_callback_t cb_complete = c->callback_complete;
void *cb_complete_arg = c->callback_complete_arg;
if (cb_complete)
cb_complete(c, cb_complete_arg);
rados_callback_t cb_safe = c->callback_safe;
void *cb_safe_arg = c->callback_safe_arg;
if (cb_safe)
cb_safe(c, cb_safe_arg);
c->lock.lock();
c->callback_complete = NULL;
c->callback_safe = NULL;
c->cond.notify_all();
c->put_unlock();
}
};
}
#endif
| 5,013 | 22.990431 | 86 | h |
null | ceph-main/src/librados/IoCtxImpl.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2012 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include <limits.h>
#include "IoCtxImpl.h"
#include "librados/librados_c.h"
#include "librados/AioCompletionImpl.h"
#include "librados/PoolAsyncCompletionImpl.h"
#include "librados/RadosClient.h"
#include "include/ceph_assert.h"
#include "common/valgrind.h"
#include "common/EventTrace.h"
#define dout_subsys ceph_subsys_rados
#undef dout_prefix
#define dout_prefix *_dout << "librados: "
using std::string;
using std::map;
using std::unique_lock;
using std::vector;
namespace bs = boost::system;
namespace ca = ceph::async;
namespace cb = ceph::buffer;
namespace librados {
namespace {
struct CB_notify_Finish {
CephContext *cct;
Context *ctx;
Objecter *objecter;
Objecter::LingerOp *linger_op;
bufferlist *preply_bl;
char **preply_buf;
size_t *preply_buf_len;
CB_notify_Finish(CephContext *_cct, Context *_ctx, Objecter *_objecter,
Objecter::LingerOp *_linger_op, bufferlist *_preply_bl,
char **_preply_buf, size_t *_preply_buf_len)
: cct(_cct), ctx(_ctx), objecter(_objecter), linger_op(_linger_op),
preply_bl(_preply_bl), preply_buf(_preply_buf),
preply_buf_len(_preply_buf_len) {}
// move-only
CB_notify_Finish(const CB_notify_Finish&) = delete;
CB_notify_Finish& operator =(const CB_notify_Finish&) = delete;
CB_notify_Finish(CB_notify_Finish&&) = default;
CB_notify_Finish& operator =(CB_notify_Finish&&) = default;
void operator()(bs::error_code ec, bufferlist&& reply_bl) {
ldout(cct, 10) << __func__ << " completed notify (linger op "
<< linger_op << "), ec = " << ec << dendl;
// pass result back to user
// NOTE: we do this regardless of what error code we return
if (preply_buf) {
if (reply_bl.length()) {
*preply_buf = (char*)malloc(reply_bl.length());
memcpy(*preply_buf, reply_bl.c_str(), reply_bl.length());
} else {
*preply_buf = NULL;
}
}
if (preply_buf_len)
*preply_buf_len = reply_bl.length();
if (preply_bl)
*preply_bl = std::move(reply_bl);
ctx->complete(ceph::from_error_code(ec));
}
};
struct CB_aio_linger_cancel {
Objecter *objecter;
Objecter::LingerOp *linger_op;
CB_aio_linger_cancel(Objecter *_objecter, Objecter::LingerOp *_linger_op)
: objecter(_objecter), linger_op(_linger_op)
{
}
void operator()() {
objecter->linger_cancel(linger_op);
}
};
struct C_aio_linger_Complete : public Context {
AioCompletionImpl *c;
Objecter::LingerOp *linger_op;
bool cancel;
C_aio_linger_Complete(AioCompletionImpl *_c, Objecter::LingerOp *_linger_op, bool _cancel)
: c(_c), linger_op(_linger_op), cancel(_cancel)
{
c->get();
}
void finish(int r) override {
if (cancel || r < 0)
boost::asio::defer(c->io->client->finish_strand,
CB_aio_linger_cancel(c->io->objecter,
linger_op));
c->lock.lock();
c->rval = r;
c->complete = true;
c->cond.notify_all();
if (c->callback_complete ||
c->callback_safe) {
boost::asio::defer(c->io->client->finish_strand, CB_AioComplete(c));
}
c->put_unlock();
}
};
struct C_aio_notify_Complete : public C_aio_linger_Complete {
ceph::mutex lock = ceph::make_mutex("C_aio_notify_Complete::lock");
bool acked = false;
bool finished = false;
int ret_val = 0;
C_aio_notify_Complete(AioCompletionImpl *_c, Objecter::LingerOp *_linger_op)
: C_aio_linger_Complete(_c, _linger_op, false) {
}
void handle_ack(int r) {
// invoked by C_aio_notify_Ack
lock.lock();
acked = true;
complete_unlock(r);
}
void complete(int r) override {
// invoked by C_notify_Finish
lock.lock();
finished = true;
complete_unlock(r);
}
void complete_unlock(int r) {
if (ret_val == 0 && r < 0) {
ret_val = r;
}
if (acked && finished) {
lock.unlock();
cancel = true;
C_aio_linger_Complete::complete(ret_val);
} else {
lock.unlock();
}
}
};
struct C_aio_notify_Ack : public Context {
CephContext *cct;
C_aio_notify_Complete *oncomplete;
C_aio_notify_Ack(CephContext *_cct,
C_aio_notify_Complete *_oncomplete)
: cct(_cct), oncomplete(_oncomplete)
{
}
void finish(int r) override
{
ldout(cct, 10) << __func__ << " linger op " << oncomplete->linger_op << " "
<< "acked (" << r << ")" << dendl;
oncomplete->handle_ack(r);
}
};
struct C_aio_selfmanaged_snap_op_Complete : public Context {
librados::RadosClient *client;
librados::AioCompletionImpl *c;
C_aio_selfmanaged_snap_op_Complete(librados::RadosClient *client,
librados::AioCompletionImpl *c)
: client(client), c(c) {
c->get();
}
void finish(int r) override {
c->lock.lock();
c->rval = r;
c->complete = true;
c->cond.notify_all();
if (c->callback_complete || c->callback_safe) {
boost::asio::defer(client->finish_strand, librados::CB_AioComplete(c));
}
c->put_unlock();
}
};
struct C_aio_selfmanaged_snap_create_Complete : public C_aio_selfmanaged_snap_op_Complete {
snapid_t snapid;
uint64_t *dest_snapid;
C_aio_selfmanaged_snap_create_Complete(librados::RadosClient *client,
librados::AioCompletionImpl *c,
uint64_t *dest_snapid)
: C_aio_selfmanaged_snap_op_Complete(client, c),
dest_snapid(dest_snapid) {
}
void finish(int r) override {
if (r >= 0) {
*dest_snapid = snapid;
}
C_aio_selfmanaged_snap_op_Complete::finish(r);
}
};
} // anonymous namespace
} // namespace librados
librados::IoCtxImpl::IoCtxImpl() = default;
librados::IoCtxImpl::IoCtxImpl(RadosClient *c, Objecter *objecter,
int64_t poolid, snapid_t s)
: client(c), poolid(poolid), snap_seq(s),
notify_timeout(c->cct->_conf->client_notify_timeout),
oloc(poolid),
aio_write_seq(0), objecter(objecter)
{
}
void librados::IoCtxImpl::set_snap_read(snapid_t s)
{
if (!s)
s = CEPH_NOSNAP;
ldout(client->cct, 10) << "set snap read " << snap_seq << " -> " << s << dendl;
snap_seq = s;
}
int librados::IoCtxImpl::set_snap_write_context(snapid_t seq, vector<snapid_t>& snaps)
{
::SnapContext n;
ldout(client->cct, 10) << "set snap write context: seq = " << seq
<< " and snaps = " << snaps << dendl;
n.seq = seq;
n.snaps = snaps;
if (!n.is_valid())
return -EINVAL;
snapc = n;
return 0;
}
int librados::IoCtxImpl::get_object_hash_position(
const std::string& oid, uint32_t *hash_position)
{
int64_t r = objecter->get_object_hash_position(poolid, oid, oloc.nspace);
if (r < 0)
return r;
*hash_position = (uint32_t)r;
return 0;
}
int librados::IoCtxImpl::get_object_pg_hash_position(
const std::string& oid, uint32_t *pg_hash_position)
{
int64_t r = objecter->get_object_pg_hash_position(poolid, oid, oloc.nspace);
if (r < 0)
return r;
*pg_hash_position = (uint32_t)r;
return 0;
}
void librados::IoCtxImpl::queue_aio_write(AioCompletionImpl *c)
{
get();
std::scoped_lock l{aio_write_list_lock};
ceph_assert(c->io == this);
c->aio_write_seq = ++aio_write_seq;
ldout(client->cct, 20) << "queue_aio_write " << this << " completion " << c
<< " write_seq " << aio_write_seq << dendl;
aio_write_list.push_back(&c->aio_write_list_item);
}
void librados::IoCtxImpl::complete_aio_write(AioCompletionImpl *c)
{
ldout(client->cct, 20) << "complete_aio_write " << c << dendl;
aio_write_list_lock.lock();
ceph_assert(c->io == this);
c->aio_write_list_item.remove_myself();
map<ceph_tid_t, std::list<AioCompletionImpl*> >::iterator waiters = aio_write_waiters.begin();
while (waiters != aio_write_waiters.end()) {
if (!aio_write_list.empty() &&
aio_write_list.front()->aio_write_seq <= waiters->first) {
ldout(client->cct, 20) << " next outstanding write is " << aio_write_list.front()->aio_write_seq
<< " <= waiter " << waiters->first
<< ", stopping" << dendl;
break;
}
ldout(client->cct, 20) << " waking waiters on seq " << waiters->first << dendl;
for (std::list<AioCompletionImpl*>::iterator it = waiters->second.begin();
it != waiters->second.end(); ++it) {
boost::asio::defer(client->finish_strand, CB_AioCompleteAndSafe(*it));
(*it)->put();
}
aio_write_waiters.erase(waiters++);
}
aio_write_cond.notify_all();
aio_write_list_lock.unlock();
put();
}
void librados::IoCtxImpl::flush_aio_writes_async(AioCompletionImpl *c)
{
ldout(client->cct, 20) << "flush_aio_writes_async " << this
<< " completion " << c << dendl;
std::lock_guard l(aio_write_list_lock);
ceph_tid_t seq = aio_write_seq;
if (aio_write_list.empty()) {
ldout(client->cct, 20) << "flush_aio_writes_async no writes. (tid "
<< seq << ")" << dendl;
boost::asio::defer(client->finish_strand, CB_AioCompleteAndSafe(c));
} else {
ldout(client->cct, 20) << "flush_aio_writes_async " << aio_write_list.size()
<< " writes in flight; waiting on tid " << seq << dendl;
c->get();
aio_write_waiters[seq].push_back(c);
}
}
void librados::IoCtxImpl::flush_aio_writes()
{
ldout(client->cct, 20) << "flush_aio_writes" << dendl;
std::unique_lock l{aio_write_list_lock};
aio_write_cond.wait(l, [seq=aio_write_seq, this] {
return (aio_write_list.empty() ||
aio_write_list.front()->aio_write_seq > seq);
});
}
string librados::IoCtxImpl::get_cached_pool_name()
{
std::string pn;
client->pool_get_name(get_id(), &pn);
return pn;
}
// SNAPS
int librados::IoCtxImpl::snap_create(const char *snapName)
{
int reply;
string sName(snapName);
ceph::mutex mylock = ceph::make_mutex("IoCtxImpl::snap_create::mylock");
ceph::condition_variable cond;
bool done;
Context *onfinish = new C_SafeCond(mylock, cond, &done, &reply);
objecter->create_pool_snap(poolid, sName, onfinish);
std::unique_lock l{mylock};
cond.wait(l, [&done] { return done; });
return reply;
}
int librados::IoCtxImpl::selfmanaged_snap_create(uint64_t *psnapid)
{
int reply;
ceph::mutex mylock = ceph::make_mutex("IoCtxImpl::selfmanaged_snap_create::mylock");
ceph::condition_variable cond;
bool done;
Context *onfinish = new C_SafeCond(mylock, cond, &done, &reply);
snapid_t snapid;
objecter->allocate_selfmanaged_snap(poolid, &snapid, onfinish);
{
std::unique_lock l{mylock};
cond.wait(l, [&done] { return done; });
}
if (reply == 0)
*psnapid = snapid;
return reply;
}
void librados::IoCtxImpl::aio_selfmanaged_snap_create(uint64_t *snapid,
AioCompletionImpl *c)
{
C_aio_selfmanaged_snap_create_Complete *onfinish =
new C_aio_selfmanaged_snap_create_Complete(client, c, snapid);
objecter->allocate_selfmanaged_snap(poolid, &onfinish->snapid,
onfinish);
}
int librados::IoCtxImpl::snap_remove(const char *snapName)
{
int reply;
string sName(snapName);
ceph::mutex mylock = ceph::make_mutex("IoCtxImpl::snap_remove::mylock");
ceph::condition_variable cond;
bool done;
Context *onfinish = new C_SafeCond(mylock, cond, &done, &reply);
objecter->delete_pool_snap(poolid, sName, onfinish);
unique_lock l{mylock};
cond.wait(l, [&done] { return done; });
return reply;
}
int librados::IoCtxImpl::selfmanaged_snap_rollback_object(const object_t& oid,
::SnapContext& snapc,
uint64_t snapid)
{
int reply;
ceph::mutex mylock = ceph::make_mutex("IoCtxImpl::snap_rollback::mylock");
ceph::condition_variable cond;
bool done;
Context *onack = new C_SafeCond(mylock, cond, &done, &reply);
::ObjectOperation op;
prepare_assert_ops(&op);
op.rollback(snapid);
objecter->mutate(oid, oloc,
op, snapc, ceph::real_clock::now(),
extra_op_flags,
onack, NULL);
std::unique_lock l{mylock};
cond.wait(l, [&done] { return done; });
return reply;
}
int librados::IoCtxImpl::rollback(const object_t& oid, const char *snapName)
{
snapid_t snap;
int r = objecter->pool_snap_by_name(poolid, snapName, &snap);
if (r < 0) {
return r;
}
return selfmanaged_snap_rollback_object(oid, snapc, snap);
}
int librados::IoCtxImpl::selfmanaged_snap_remove(uint64_t snapid)
{
int reply;
ceph::mutex mylock = ceph::make_mutex("IoCtxImpl::selfmanaged_snap_remove::mylock");
ceph::condition_variable cond;
bool done;
objecter->delete_selfmanaged_snap(poolid, snapid_t(snapid),
new C_SafeCond(mylock, cond, &done, &reply));
std::unique_lock l{mylock};
cond.wait(l, [&done] { return done; });
return (int)reply;
}
void librados::IoCtxImpl::aio_selfmanaged_snap_remove(uint64_t snapid,
AioCompletionImpl *c)
{
Context *onfinish = new C_aio_selfmanaged_snap_op_Complete(client, c);
objecter->delete_selfmanaged_snap(poolid, snapid, onfinish);
}
int librados::IoCtxImpl::snap_list(vector<uint64_t> *snaps)
{
return objecter->pool_snap_list(poolid, snaps);
}
int librados::IoCtxImpl::snap_lookup(const char *name, uint64_t *snapid)
{
return objecter->pool_snap_by_name(poolid, name, (snapid_t *)snapid);
}
int librados::IoCtxImpl::snap_get_name(uint64_t snapid, std::string *s)
{
pool_snap_info_t info;
int ret = objecter->pool_snap_get_info(poolid, snapid, &info);
if (ret < 0) {
return ret;
}
*s = info.name.c_str();
return 0;
}
int librados::IoCtxImpl::snap_get_stamp(uint64_t snapid, time_t *t)
{
pool_snap_info_t info;
int ret = objecter->pool_snap_get_info(poolid, snapid, &info);
if (ret < 0) {
return ret;
}
*t = info.stamp.sec();
return 0;
}
// IO
int librados::IoCtxImpl::nlist(Objecter::NListContext *context, int max_entries)
{
bool done;
int r = 0;
ceph::mutex mylock = ceph::make_mutex("IoCtxImpl::nlist::mylock");
ceph::condition_variable cond;
if (context->at_end())
return 0;
context->max_entries = max_entries;
context->nspace = oloc.nspace;
objecter->list_nobjects(context, new C_SafeCond(mylock, cond, &done, &r));
std::unique_lock l{mylock};
cond.wait(l, [&done] { return done; });
return r;
}
uint32_t librados::IoCtxImpl::nlist_seek(Objecter::NListContext *context,
uint32_t pos)
{
context->list.clear();
return objecter->list_nobjects_seek(context, pos);
}
uint32_t librados::IoCtxImpl::nlist_seek(Objecter::NListContext *context,
const rados_object_list_cursor& cursor)
{
context->list.clear();
return objecter->list_nobjects_seek(context, *(const hobject_t *)cursor);
}
rados_object_list_cursor librados::IoCtxImpl::nlist_get_cursor(Objecter::NListContext *context)
{
hobject_t *c = new hobject_t;
objecter->list_nobjects_get_cursor(context, c);
return (rados_object_list_cursor)c;
}
int librados::IoCtxImpl::create(const object_t& oid, bool exclusive)
{
::ObjectOperation op;
prepare_assert_ops(&op);
op.create(exclusive);
return operate(oid, &op, NULL);
}
/*
* add any version assert operations that are appropriate given the
* stat in the IoCtx, either the target version assert or any src
* object asserts. these affect a single ioctx operation, so clear
* the ioctx state when we're doing.
*
* return a pointer to the ObjectOperation if we added any events;
* this is convenient for passing the extra_ops argument into Objecter
* methods.
*/
::ObjectOperation *librados::IoCtxImpl::prepare_assert_ops(::ObjectOperation *op)
{
::ObjectOperation *pop = NULL;
if (assert_ver) {
op->assert_version(assert_ver);
assert_ver = 0;
pop = op;
}
return pop;
}
int librados::IoCtxImpl::write(const object_t& oid, bufferlist& bl,
size_t len, uint64_t off)
{
if (len > UINT_MAX/2)
return -E2BIG;
::ObjectOperation op;
prepare_assert_ops(&op);
bufferlist mybl;
mybl.substr_of(bl, 0, len);
op.write(off, mybl);
return operate(oid, &op, NULL);
}
int librados::IoCtxImpl::append(const object_t& oid, bufferlist& bl, size_t len)
{
if (len > UINT_MAX/2)
return -E2BIG;
::ObjectOperation op;
prepare_assert_ops(&op);
bufferlist mybl;
mybl.substr_of(bl, 0, len);
op.append(mybl);
return operate(oid, &op, NULL);
}
int librados::IoCtxImpl::write_full(const object_t& oid, bufferlist& bl)
{
if (bl.length() > UINT_MAX/2)
return -E2BIG;
::ObjectOperation op;
prepare_assert_ops(&op);
op.write_full(bl);
return operate(oid, &op, NULL);
}
int librados::IoCtxImpl::writesame(const object_t& oid, bufferlist& bl,
size_t write_len, uint64_t off)
{
if ((bl.length() > UINT_MAX/2) || (write_len > UINT_MAX/2))
return -E2BIG;
if ((bl.length() == 0) || (write_len % bl.length()))
return -EINVAL;
::ObjectOperation op;
prepare_assert_ops(&op);
bufferlist mybl;
mybl.substr_of(bl, 0, bl.length());
op.writesame(off, write_len, mybl);
return operate(oid, &op, NULL);
}
int librados::IoCtxImpl::operate(const object_t& oid, ::ObjectOperation *o,
ceph::real_time *pmtime, int flags)
{
ceph::real_time ut = (pmtime ? *pmtime :
ceph::real_clock::now());
/* can't write to a snapshot */
if (snap_seq != CEPH_NOSNAP)
return -EROFS;
if (!o->size())
return 0;
ceph::mutex mylock = ceph::make_mutex("IoCtxImpl::operate::mylock");
ceph::condition_variable cond;
bool done;
int r;
version_t ver;
Context *oncommit = new C_SafeCond(mylock, cond, &done, &r);
int op = o->ops[0].op.op;
ldout(client->cct, 10) << ceph_osd_op_name(op) << " oid=" << oid
<< " nspace=" << oloc.nspace << dendl;
Objecter::Op *objecter_op = objecter->prepare_mutate_op(
oid, oloc,
*o, snapc, ut,
flags | extra_op_flags,
oncommit, &ver);
objecter->op_submit(objecter_op);
{
std::unique_lock l{mylock};
cond.wait(l, [&done] { return done;});
}
ldout(client->cct, 10) << "Objecter returned from "
<< ceph_osd_op_name(op) << " r=" << r << dendl;
set_sync_op_version(ver);
return r;
}
int librados::IoCtxImpl::operate_read(const object_t& oid,
::ObjectOperation *o,
bufferlist *pbl,
int flags)
{
if (!o->size())
return 0;
ceph::mutex mylock = ceph::make_mutex("IoCtxImpl::operate_read::mylock");
ceph::condition_variable cond;
bool done;
int r;
version_t ver;
Context *onack = new C_SafeCond(mylock, cond, &done, &r);
int op = o->ops[0].op.op;
ldout(client->cct, 10) << ceph_osd_op_name(op) << " oid=" << oid << " nspace=" << oloc.nspace << dendl;
Objecter::Op *objecter_op = objecter->prepare_read_op(
oid, oloc,
*o, snap_seq, pbl,
flags | extra_op_flags,
onack, &ver);
objecter->op_submit(objecter_op);
{
std::unique_lock l{mylock};
cond.wait(l, [&done] { return done; });
}
ldout(client->cct, 10) << "Objecter returned from "
<< ceph_osd_op_name(op) << " r=" << r << dendl;
set_sync_op_version(ver);
return r;
}
int librados::IoCtxImpl::aio_operate_read(const object_t &oid,
::ObjectOperation *o,
AioCompletionImpl *c,
int flags,
bufferlist *pbl,
const blkin_trace_info *trace_info)
{
FUNCTRACE(client->cct);
Context *oncomplete = new C_aio_Complete(c);
#if defined(WITH_EVENTTRACE)
((C_aio_Complete *) oncomplete)->oid = oid;
#endif
c->is_read = true;
c->io = this;
ZTracer::Trace trace;
if (trace_info) {
ZTracer::Trace parent_trace("", nullptr, trace_info);
trace.init("rados operate read", &objecter->trace_endpoint, &parent_trace);
}
trace.event("init root span");
Objecter::Op *objecter_op = objecter->prepare_read_op(
oid, oloc,
*o, snap_seq, pbl, flags | extra_op_flags,
oncomplete, &c->objver, nullptr, 0, &trace);
objecter->op_submit(objecter_op, &c->tid);
trace.event("rados operate read submitted");
return 0;
}
int librados::IoCtxImpl::aio_operate(const object_t& oid,
::ObjectOperation *o, AioCompletionImpl *c,
const SnapContext& snap_context,
const ceph::real_time *pmtime, int flags,
const blkin_trace_info *trace_info)
{
FUNCTRACE(client->cct);
OID_EVENT_TRACE(oid.name.c_str(), "RADOS_WRITE_OP_BEGIN");
const ceph::real_time ut = (pmtime ? *pmtime : ceph::real_clock::now());
/* can't write to a snapshot */
if (snap_seq != CEPH_NOSNAP)
return -EROFS;
Context *oncomplete = new C_aio_Complete(c);
#if defined(WITH_EVENTTRACE)
((C_aio_Complete *) oncomplete)->oid = oid;
#endif
c->io = this;
queue_aio_write(c);
ZTracer::Trace trace;
if (trace_info) {
ZTracer::Trace parent_trace("", nullptr, trace_info);
trace.init("rados operate", &objecter->trace_endpoint, &parent_trace);
}
trace.event("init root span");
Objecter::Op *op = objecter->prepare_mutate_op(
oid, oloc, *o, snap_context, ut, flags | extra_op_flags,
oncomplete, &c->objver, osd_reqid_t(), &trace);
objecter->op_submit(op, &c->tid);
trace.event("rados operate op submitted");
return 0;
}
int librados::IoCtxImpl::aio_read(const object_t oid, AioCompletionImpl *c,
bufferlist *pbl, size_t len, uint64_t off,
uint64_t snapid, const blkin_trace_info *info)
{
FUNCTRACE(client->cct);
if (len > (size_t) INT_MAX)
return -EDOM;
OID_EVENT_TRACE(oid.name.c_str(), "RADOS_READ_OP_BEGIN");
Context *oncomplete = new C_aio_Complete(c);
#if defined(WITH_EVENTTRACE)
((C_aio_Complete *) oncomplete)->oid = oid;
#endif
c->is_read = true;
c->io = this;
c->blp = pbl;
ZTracer::Trace trace;
if (info)
trace.init("rados read", &objecter->trace_endpoint, info);
Objecter::Op *o = objecter->prepare_read_op(
oid, oloc,
off, len, snapid, pbl, extra_op_flags,
oncomplete, &c->objver, nullptr, 0, &trace);
objecter->op_submit(o, &c->tid);
return 0;
}
int librados::IoCtxImpl::aio_read(const object_t oid, AioCompletionImpl *c,
char *buf, size_t len, uint64_t off,
uint64_t snapid, const blkin_trace_info *info)
{
FUNCTRACE(client->cct);
if (len > (size_t) INT_MAX)
return -EDOM;
OID_EVENT_TRACE(oid.name.c_str(), "RADOS_READ_OP_BEGIN");
Context *oncomplete = new C_aio_Complete(c);
#if defined(WITH_EVENTTRACE)
((C_aio_Complete *) oncomplete)->oid = oid;
#endif
c->is_read = true;
c->io = this;
c->bl.clear();
c->bl.push_back(buffer::create_static(len, buf));
c->blp = &c->bl;
c->out_buf = buf;
ZTracer::Trace trace;
if (info)
trace.init("rados read", &objecter->trace_endpoint, info);
Objecter::Op *o = objecter->prepare_read_op(
oid, oloc,
off, len, snapid, &c->bl, extra_op_flags,
oncomplete, &c->objver, nullptr, 0, &trace);
objecter->op_submit(o, &c->tid);
return 0;
}
class C_ObjectOperation : public Context {
public:
::ObjectOperation m_ops;
explicit C_ObjectOperation(Context *c) : m_ctx(c) {}
void finish(int r) override {
m_ctx->complete(r);
}
private:
Context *m_ctx;
};
int librados::IoCtxImpl::aio_sparse_read(const object_t oid,
AioCompletionImpl *c,
std::map<uint64_t,uint64_t> *m,
bufferlist *data_bl, size_t len,
uint64_t off, uint64_t snapid)
{
FUNCTRACE(client->cct);
if (len > (size_t) INT_MAX)
return -EDOM;
Context *nested = new C_aio_Complete(c);
C_ObjectOperation *onack = new C_ObjectOperation(nested);
#if defined(WITH_EVENTTRACE)
((C_aio_Complete *) nested)->oid = oid;
#endif
c->is_read = true;
c->io = this;
onack->m_ops.sparse_read(off, len, m, data_bl, NULL);
Objecter::Op *o = objecter->prepare_read_op(
oid, oloc,
onack->m_ops, snapid, NULL, extra_op_flags,
onack, &c->objver);
objecter->op_submit(o, &c->tid);
return 0;
}
int librados::IoCtxImpl::aio_cmpext(const object_t& oid,
AioCompletionImpl *c,
uint64_t off,
bufferlist& cmp_bl)
{
if (cmp_bl.length() > UINT_MAX/2)
return -E2BIG;
Context *onack = new C_aio_Complete(c);
c->is_read = true;
c->io = this;
Objecter::Op *o = objecter->prepare_cmpext_op(
oid, oloc, off, cmp_bl, snap_seq, extra_op_flags,
onack, &c->objver);
objecter->op_submit(o, &c->tid);
return 0;
}
/* use m_ops.cmpext() + prepare_read_op() for non-bufferlist C API */
int librados::IoCtxImpl::aio_cmpext(const object_t& oid,
AioCompletionImpl *c,
const char *cmp_buf,
size_t cmp_len,
uint64_t off)
{
if (cmp_len > UINT_MAX/2)
return -E2BIG;
bufferlist cmp_bl;
cmp_bl.append(cmp_buf, cmp_len);
Context *nested = new C_aio_Complete(c);
C_ObjectOperation *onack = new C_ObjectOperation(nested);
c->is_read = true;
c->io = this;
onack->m_ops.cmpext(off, cmp_len, cmp_buf, NULL);
Objecter::Op *o = objecter->prepare_read_op(
oid, oloc, onack->m_ops, snap_seq, NULL, extra_op_flags, onack, &c->objver);
objecter->op_submit(o, &c->tid);
return 0;
}
int librados::IoCtxImpl::aio_write(const object_t &oid, AioCompletionImpl *c,
const bufferlist& bl, size_t len,
uint64_t off, const blkin_trace_info *info)
{
FUNCTRACE(client->cct);
auto ut = ceph::real_clock::now();
ldout(client->cct, 20) << "aio_write " << oid << " " << off << "~" << len << " snapc=" << snapc << " snap_seq=" << snap_seq << dendl;
OID_EVENT_TRACE(oid.name.c_str(), "RADOS_WRITE_OP_BEGIN");
if (len > UINT_MAX/2)
return -E2BIG;
/* can't write to a snapshot */
if (snap_seq != CEPH_NOSNAP)
return -EROFS;
Context *oncomplete = new C_aio_Complete(c);
#if defined(WITH_EVENTTRACE)
((C_aio_Complete *) oncomplete)->oid = oid;
#endif
ZTracer::Trace trace;
if (info)
trace.init("rados write", &objecter->trace_endpoint, info);
c->io = this;
queue_aio_write(c);
Objecter::Op *o = objecter->prepare_write_op(
oid, oloc,
off, len, snapc, bl, ut, extra_op_flags,
oncomplete, &c->objver, nullptr, 0, &trace);
objecter->op_submit(o, &c->tid);
return 0;
}
int librados::IoCtxImpl::aio_append(const object_t &oid, AioCompletionImpl *c,
const bufferlist& bl, size_t len)
{
FUNCTRACE(client->cct);
auto ut = ceph::real_clock::now();
if (len > UINT_MAX/2)
return -E2BIG;
/* can't write to a snapshot */
if (snap_seq != CEPH_NOSNAP)
return -EROFS;
Context *oncomplete = new C_aio_Complete(c);
#if defined(WITH_EVENTTRACE)
((C_aio_Complete *) oncomplete)->oid = oid;
#endif
c->io = this;
queue_aio_write(c);
Objecter::Op *o = objecter->prepare_append_op(
oid, oloc,
len, snapc, bl, ut, extra_op_flags,
oncomplete, &c->objver);
objecter->op_submit(o, &c->tid);
return 0;
}
int librados::IoCtxImpl::aio_write_full(const object_t &oid,
AioCompletionImpl *c,
const bufferlist& bl)
{
FUNCTRACE(client->cct);
auto ut = ceph::real_clock::now();
if (bl.length() > UINT_MAX/2)
return -E2BIG;
/* can't write to a snapshot */
if (snap_seq != CEPH_NOSNAP)
return -EROFS;
Context *oncomplete = new C_aio_Complete(c);
#if defined(WITH_EVENTTRACE)
((C_aio_Complete *) oncomplete)->oid = oid;
#endif
c->io = this;
queue_aio_write(c);
Objecter::Op *o = objecter->prepare_write_full_op(
oid, oloc,
snapc, bl, ut, extra_op_flags,
oncomplete, &c->objver);
objecter->op_submit(o, &c->tid);
return 0;
}
int librados::IoCtxImpl::aio_writesame(const object_t &oid,
AioCompletionImpl *c,
const bufferlist& bl,
size_t write_len,
uint64_t off)
{
FUNCTRACE(client->cct);
auto ut = ceph::real_clock::now();
if ((bl.length() > UINT_MAX/2) || (write_len > UINT_MAX/2))
return -E2BIG;
if ((bl.length() == 0) || (write_len % bl.length()))
return -EINVAL;
/* can't write to a snapshot */
if (snap_seq != CEPH_NOSNAP)
return -EROFS;
Context *oncomplete = new C_aio_Complete(c);
#if defined(WITH_EVENTTRACE)
((C_aio_Complete *) oncomplete)->oid = oid;
#endif
c->io = this;
queue_aio_write(c);
Objecter::Op *o = objecter->prepare_writesame_op(
oid, oloc,
write_len, off,
snapc, bl, ut, extra_op_flags,
oncomplete, &c->objver);
objecter->op_submit(o, &c->tid);
return 0;
}
int librados::IoCtxImpl::aio_remove(const object_t &oid, AioCompletionImpl *c, int flags)
{
FUNCTRACE(client->cct);
auto ut = ceph::real_clock::now();
/* can't write to a snapshot */
if (snap_seq != CEPH_NOSNAP)
return -EROFS;
Context *oncomplete = new C_aio_Complete(c);
#if defined(WITH_EVENTTRACE)
((C_aio_Complete *) oncomplete)->oid = oid;
#endif
c->io = this;
queue_aio_write(c);
Objecter::Op *o = objecter->prepare_remove_op(
oid, oloc,
snapc, ut, flags | extra_op_flags,
oncomplete, &c->objver);
objecter->op_submit(o, &c->tid);
return 0;
}
int librados::IoCtxImpl::aio_stat(const object_t& oid, AioCompletionImpl *c,
uint64_t *psize, time_t *pmtime)
{
C_aio_stat_Ack *onack = new C_aio_stat_Ack(c, pmtime);
c->is_read = true;
c->io = this;
Objecter::Op *o = objecter->prepare_stat_op(
oid, oloc,
snap_seq, psize, &onack->mtime, extra_op_flags,
onack, &c->objver);
objecter->op_submit(o, &c->tid);
return 0;
}
int librados::IoCtxImpl::aio_stat2(const object_t& oid, AioCompletionImpl *c,
uint64_t *psize, struct timespec *pts)
{
C_aio_stat2_Ack *onack = new C_aio_stat2_Ack(c, pts);
c->is_read = true;
c->io = this;
Objecter::Op *o = objecter->prepare_stat_op(
oid, oloc,
snap_seq, psize, &onack->mtime, extra_op_flags,
onack, &c->objver);
objecter->op_submit(o, &c->tid);
return 0;
}
int librados::IoCtxImpl::aio_getxattr(const object_t& oid, AioCompletionImpl *c,
const char *name, bufferlist& bl)
{
::ObjectOperation rd;
prepare_assert_ops(&rd);
rd.getxattr(name, &bl, NULL);
int r = aio_operate_read(oid, &rd, c, 0, &bl);
return r;
}
int librados::IoCtxImpl::aio_rmxattr(const object_t& oid, AioCompletionImpl *c,
const char *name)
{
::ObjectOperation op;
prepare_assert_ops(&op);
op.rmxattr(name);
return aio_operate(oid, &op, c, snapc, nullptr, 0);
}
int librados::IoCtxImpl::aio_setxattr(const object_t& oid, AioCompletionImpl *c,
const char *name, bufferlist& bl)
{
::ObjectOperation op;
prepare_assert_ops(&op);
op.setxattr(name, bl);
return aio_operate(oid, &op, c, snapc, nullptr, 0);
}
namespace {
struct AioGetxattrsData {
AioGetxattrsData(librados::AioCompletionImpl *c, map<string, bufferlist>* attrset,
librados::RadosClient *_client) :
user_completion(c), user_attrset(attrset), client(_client) {}
struct librados::CB_AioCompleteAndSafe user_completion;
map<string, bufferlist> result_attrset;
map<std::string, bufferlist>* user_attrset;
librados::RadosClient *client;
};
}
static void aio_getxattrs_complete(rados_completion_t c, void *arg) {
AioGetxattrsData *cdata = reinterpret_cast<AioGetxattrsData*>(arg);
int rc = rados_aio_get_return_value(c);
cdata->user_attrset->clear();
if (rc >= 0) {
for (map<string,bufferlist>::iterator p = cdata->result_attrset.begin();
p != cdata->result_attrset.end();
++p) {
ldout(cdata->client->cct, 10) << "IoCtxImpl::getxattrs: xattr=" << p->first << dendl;
(*cdata->user_attrset)[p->first] = p->second;
}
}
cdata->user_completion(rc);
((librados::AioCompletionImpl*)c)->put();
delete cdata;
}
int librados::IoCtxImpl::aio_getxattrs(const object_t& oid, AioCompletionImpl *c,
map<std::string, bufferlist>& attrset)
{
AioGetxattrsData *cdata = new AioGetxattrsData(c, &attrset, client);
::ObjectOperation rd;
prepare_assert_ops(&rd);
rd.getxattrs(&cdata->result_attrset, NULL);
librados::AioCompletionImpl *comp = new librados::AioCompletionImpl;
comp->set_complete_callback(cdata, aio_getxattrs_complete);
return aio_operate_read(oid, &rd, comp, 0, NULL);
}
int librados::IoCtxImpl::aio_cancel(AioCompletionImpl *c)
{
return objecter->op_cancel(c->tid, -ECANCELED);
}
int librados::IoCtxImpl::hit_set_list(uint32_t hash, AioCompletionImpl *c,
std::list< std::pair<time_t, time_t> > *pls)
{
Context *oncomplete = new C_aio_Complete(c);
c->is_read = true;
c->io = this;
::ObjectOperation rd;
rd.hit_set_ls(pls, NULL);
object_locator_t oloc(poolid);
Objecter::Op *o = objecter->prepare_pg_read_op(
hash, oloc, rd, NULL, extra_op_flags, oncomplete, NULL, NULL);
objecter->op_submit(o, &c->tid);
return 0;
}
int librados::IoCtxImpl::hit_set_get(uint32_t hash, AioCompletionImpl *c,
time_t stamp,
bufferlist *pbl)
{
Context *oncomplete = new C_aio_Complete(c);
c->is_read = true;
c->io = this;
::ObjectOperation rd;
rd.hit_set_get(ceph::real_clock::from_time_t(stamp), pbl, 0);
object_locator_t oloc(poolid);
Objecter::Op *o = objecter->prepare_pg_read_op(
hash, oloc, rd, NULL, extra_op_flags, oncomplete, NULL, NULL);
objecter->op_submit(o, &c->tid);
return 0;
}
int librados::IoCtxImpl::remove(const object_t& oid)
{
::ObjectOperation op;
prepare_assert_ops(&op);
op.remove();
return operate(oid, &op, nullptr, librados::OPERATION_FULL_FORCE);
}
int librados::IoCtxImpl::remove(const object_t& oid, int flags)
{
::ObjectOperation op;
prepare_assert_ops(&op);
op.remove();
return operate(oid, &op, NULL, flags);
}
int librados::IoCtxImpl::trunc(const object_t& oid, uint64_t size)
{
::ObjectOperation op;
prepare_assert_ops(&op);
op.truncate(size);
return operate(oid, &op, NULL);
}
int librados::IoCtxImpl::get_inconsistent_objects(const pg_t& pg,
const librados::object_id_t& start_after,
uint64_t max_to_get,
AioCompletionImpl *c,
std::vector<inconsistent_obj_t>* objects,
uint32_t* interval)
{
Context *oncomplete = new C_aio_Complete(c);
c->is_read = true;
c->io = this;
::ObjectOperation op;
op.scrub_ls(start_after, max_to_get, objects, interval, &c->rval);
object_locator_t oloc{poolid, pg.ps()};
Objecter::Op *o = objecter->prepare_pg_read_op(
oloc.hash, oloc, op, nullptr, CEPH_OSD_FLAG_PGOP | extra_op_flags, oncomplete,
nullptr, nullptr);
objecter->op_submit(o, &c->tid);
return 0;
}
int librados::IoCtxImpl::get_inconsistent_snapsets(const pg_t& pg,
const librados::object_id_t& start_after,
uint64_t max_to_get,
AioCompletionImpl *c,
std::vector<inconsistent_snapset_t>* snapsets,
uint32_t* interval)
{
Context *oncomplete = new C_aio_Complete(c);
c->is_read = true;
c->io = this;
::ObjectOperation op;
op.scrub_ls(start_after, max_to_get, snapsets, interval, &c->rval);
object_locator_t oloc{poolid, pg.ps()};
Objecter::Op *o = objecter->prepare_pg_read_op(
oloc.hash, oloc, op, nullptr, CEPH_OSD_FLAG_PGOP | extra_op_flags, oncomplete,
nullptr, nullptr);
objecter->op_submit(o, &c->tid);
return 0;
}
int librados::IoCtxImpl::tmap_update(const object_t& oid, bufferlist& cmdbl)
{
::ObjectOperation wr;
prepare_assert_ops(&wr);
wr.tmap_update(cmdbl);
return operate(oid, &wr, NULL);
}
int librados::IoCtxImpl::exec(const object_t& oid,
const char *cls, const char *method,
bufferlist& inbl, bufferlist& outbl)
{
::ObjectOperation rd;
prepare_assert_ops(&rd);
rd.call(cls, method, inbl);
return operate_read(oid, &rd, &outbl);
}
int librados::IoCtxImpl::aio_exec(const object_t& oid, AioCompletionImpl *c,
const char *cls, const char *method,
bufferlist& inbl, bufferlist *outbl)
{
FUNCTRACE(client->cct);
Context *oncomplete = new C_aio_Complete(c);
#if defined(WITH_EVENTTRACE)
((C_aio_Complete *) oncomplete)->oid = oid;
#endif
c->is_read = true;
c->io = this;
::ObjectOperation rd;
prepare_assert_ops(&rd);
rd.call(cls, method, inbl);
Objecter::Op *o = objecter->prepare_read_op(
oid, oloc, rd, snap_seq, outbl, extra_op_flags, oncomplete, &c->objver);
objecter->op_submit(o, &c->tid);
return 0;
}
int librados::IoCtxImpl::aio_exec(const object_t& oid, AioCompletionImpl *c,
const char *cls, const char *method,
bufferlist& inbl, char *buf, size_t out_len)
{
FUNCTRACE(client->cct);
Context *oncomplete = new C_aio_Complete(c);
#if defined(WITH_EVENTTRACE)
((C_aio_Complete *) oncomplete)->oid = oid;
#endif
c->is_read = true;
c->io = this;
c->bl.clear();
c->bl.push_back(buffer::create_static(out_len, buf));
c->blp = &c->bl;
c->out_buf = buf;
::ObjectOperation rd;
prepare_assert_ops(&rd);
rd.call(cls, method, inbl);
Objecter::Op *o = objecter->prepare_read_op(
oid, oloc, rd, snap_seq, &c->bl, extra_op_flags, oncomplete, &c->objver);
objecter->op_submit(o, &c->tid);
return 0;
}
int librados::IoCtxImpl::read(const object_t& oid,
bufferlist& bl, size_t len, uint64_t off)
{
if (len > (size_t) INT_MAX)
return -EDOM;
OID_EVENT_TRACE(oid.name.c_str(), "RADOS_READ_OP_BEGIN");
::ObjectOperation rd;
prepare_assert_ops(&rd);
rd.read(off, len, &bl, NULL, NULL);
int r = operate_read(oid, &rd, &bl);
if (r < 0)
return r;
if (bl.length() < len) {
ldout(client->cct, 10) << "Returned length " << bl.length()
<< " less than original length "<< len << dendl;
}
return bl.length();
}
int librados::IoCtxImpl::cmpext(const object_t& oid, uint64_t off,
bufferlist& cmp_bl)
{
if (cmp_bl.length() > UINT_MAX/2)
return -E2BIG;
::ObjectOperation op;
prepare_assert_ops(&op);
op.cmpext(off, cmp_bl, NULL);
return operate_read(oid, &op, NULL);
}
int librados::IoCtxImpl::mapext(const object_t& oid,
uint64_t off, size_t len,
std::map<uint64_t,uint64_t>& m)
{
bufferlist bl;
ceph::mutex mylock = ceph::make_mutex("IoCtxImpl::read::mylock");
ceph::condition_variable cond;
bool done;
int r;
Context *onack = new C_SafeCond(mylock, cond, &done, &r);
objecter->mapext(oid, oloc,
off, len, snap_seq, &bl, extra_op_flags,
onack);
{
unique_lock l{mylock};
cond.wait(l, [&done] { return done;});
}
ldout(client->cct, 10) << "Objecter returned from read r=" << r << dendl;
if (r < 0)
return r;
auto iter = bl.cbegin();
decode(m, iter);
return m.size();
}
int librados::IoCtxImpl::sparse_read(const object_t& oid,
std::map<uint64_t,uint64_t>& m,
bufferlist& data_bl, size_t len,
uint64_t off)
{
if (len > (size_t) INT_MAX)
return -EDOM;
::ObjectOperation rd;
prepare_assert_ops(&rd);
rd.sparse_read(off, len, &m, &data_bl, NULL);
int r = operate_read(oid, &rd, NULL);
if (r < 0)
return r;
return m.size();
}
int librados::IoCtxImpl::checksum(const object_t& oid, uint8_t type,
const bufferlist &init_value, size_t len,
uint64_t off, size_t chunk_size,
bufferlist *pbl)
{
if (len > (size_t) INT_MAX) {
return -EDOM;
}
::ObjectOperation rd;
prepare_assert_ops(&rd);
rd.checksum(type, init_value, off, len, chunk_size, pbl, nullptr, nullptr);
int r = operate_read(oid, &rd, nullptr);
if (r < 0) {
return r;
}
return 0;
}
int librados::IoCtxImpl::stat(const object_t& oid, uint64_t *psize, time_t *pmtime)
{
uint64_t size;
real_time mtime;
if (!psize)
psize = &size;
::ObjectOperation rd;
prepare_assert_ops(&rd);
rd.stat(psize, &mtime, nullptr);
int r = operate_read(oid, &rd, NULL);
if (r >= 0 && pmtime) {
*pmtime = real_clock::to_time_t(mtime);
}
return r;
}
int librados::IoCtxImpl::stat2(const object_t& oid, uint64_t *psize, struct timespec *pts)
{
uint64_t size;
ceph::real_time mtime;
if (!psize)
psize = &size;
::ObjectOperation rd;
prepare_assert_ops(&rd);
rd.stat(psize, &mtime, nullptr);
int r = operate_read(oid, &rd, NULL);
if (r < 0) {
return r;
}
if (pts) {
*pts = ceph::real_clock::to_timespec(mtime);
}
return 0;
}
int librados::IoCtxImpl::getxattr(const object_t& oid,
const char *name, bufferlist& bl)
{
::ObjectOperation rd;
prepare_assert_ops(&rd);
rd.getxattr(name, &bl, NULL);
int r = operate_read(oid, &rd, &bl);
if (r < 0)
return r;
return bl.length();
}
int librados::IoCtxImpl::rmxattr(const object_t& oid, const char *name)
{
::ObjectOperation op;
prepare_assert_ops(&op);
op.rmxattr(name);
return operate(oid, &op, NULL);
}
int librados::IoCtxImpl::setxattr(const object_t& oid,
const char *name, bufferlist& bl)
{
::ObjectOperation op;
prepare_assert_ops(&op);
op.setxattr(name, bl);
return operate(oid, &op, NULL);
}
int librados::IoCtxImpl::getxattrs(const object_t& oid,
map<std::string, bufferlist>& attrset)
{
map<string, bufferlist> aset;
::ObjectOperation rd;
prepare_assert_ops(&rd);
rd.getxattrs(&aset, NULL);
int r = operate_read(oid, &rd, NULL);
attrset.clear();
if (r >= 0) {
for (map<string,bufferlist>::iterator p = aset.begin(); p != aset.end(); ++p) {
ldout(client->cct, 10) << "IoCtxImpl::getxattrs: xattr=" << p->first << dendl;
attrset[p->first.c_str()] = p->second;
}
}
return r;
}
void librados::IoCtxImpl::set_sync_op_version(version_t ver)
{
ANNOTATE_BENIGN_RACE_SIZED(&last_objver, sizeof(last_objver),
"IoCtxImpl last_objver");
last_objver = ver;
}
namespace librados {
void intrusive_ptr_add_ref(IoCtxImpl *p) { p->get(); }
void intrusive_ptr_release(IoCtxImpl *p) { p->put(); }
}
struct WatchInfo {
boost::intrusive_ptr<librados::IoCtxImpl> ioctx;
object_t oid;
librados::WatchCtx *ctx;
librados::WatchCtx2 *ctx2;
WatchInfo(librados::IoCtxImpl *io, object_t o,
librados::WatchCtx *c, librados::WatchCtx2 *c2)
: ioctx(io), oid(o), ctx(c), ctx2(c2) {}
void handle_notify(uint64_t notify_id,
uint64_t cookie,
uint64_t notifier_id,
bufferlist& bl) {
ldout(ioctx->client->cct, 10) << __func__ << " " << notify_id
<< " cookie " << cookie
<< " notifier_id " << notifier_id
<< " len " << bl.length()
<< dendl;
if (ctx2)
ctx2->handle_notify(notify_id, cookie, notifier_id, bl);
if (ctx) {
ctx->notify(0, 0, bl);
// send ACK back to OSD if using legacy protocol
bufferlist empty;
ioctx->notify_ack(oid, notify_id, cookie, empty);
}
}
void handle_error(uint64_t cookie, int err) {
ldout(ioctx->client->cct, 10) << __func__ << " cookie " << cookie
<< " err " << err
<< dendl;
if (ctx2)
ctx2->handle_error(cookie, err);
}
void operator()(bs::error_code ec,
uint64_t notify_id,
uint64_t cookie,
uint64_t notifier_id,
bufferlist&& bl) {
if (ec) {
handle_error(cookie, ceph::from_error_code(ec));
} else {
handle_notify(notify_id, cookie, notifier_id, bl);
}
}
};
// internal WatchInfo that owns the context memory
struct InternalWatchInfo : public WatchInfo {
std::unique_ptr<librados::WatchCtx> ctx;
std::unique_ptr<librados::WatchCtx2> ctx2;
InternalWatchInfo(librados::IoCtxImpl *io, object_t o,
librados::WatchCtx *c, librados::WatchCtx2 *c2)
: WatchInfo(io, o, c, c2), ctx(c), ctx2(c2) {}
};
int librados::IoCtxImpl::watch(const object_t& oid, uint64_t *handle,
librados::WatchCtx *ctx,
librados::WatchCtx2 *ctx2,
bool internal)
{
return watch(oid, handle, ctx, ctx2, 0, internal);
}
int librados::IoCtxImpl::watch(const object_t& oid, uint64_t *handle,
librados::WatchCtx *ctx,
librados::WatchCtx2 *ctx2,
uint32_t timeout,
bool internal)
{
::ObjectOperation wr;
version_t objver;
C_SaferCond onfinish;
Objecter::LingerOp *linger_op = objecter->linger_register(oid, oloc,
extra_op_flags);
*handle = linger_op->get_cookie();
if (internal) {
linger_op->handle = InternalWatchInfo(this, oid, ctx, ctx2);
} else {
linger_op->handle = WatchInfo(this, oid, ctx, ctx2);
}
prepare_assert_ops(&wr);
wr.watch(*handle, CEPH_OSD_WATCH_OP_WATCH, timeout);
bufferlist bl;
objecter->linger_watch(linger_op, wr,
snapc, ceph::real_clock::now(), bl,
&onfinish,
&objver);
int r = onfinish.wait();
set_sync_op_version(objver);
if (r < 0) {
objecter->linger_cancel(linger_op);
*handle = 0;
}
return r;
}
int librados::IoCtxImpl::aio_watch(const object_t& oid,
AioCompletionImpl *c,
uint64_t *handle,
librados::WatchCtx *ctx,
librados::WatchCtx2 *ctx2,
bool internal) {
return aio_watch(oid, c, handle, ctx, ctx2, 0, internal);
}
int librados::IoCtxImpl::aio_watch(const object_t& oid,
AioCompletionImpl *c,
uint64_t *handle,
librados::WatchCtx *ctx,
librados::WatchCtx2 *ctx2,
uint32_t timeout,
bool internal)
{
Objecter::LingerOp *linger_op = objecter->linger_register(oid, oloc,
extra_op_flags);
c->io = this;
Context *oncomplete = new C_aio_linger_Complete(c, linger_op, false);
::ObjectOperation wr;
*handle = linger_op->get_cookie();
if (internal) {
linger_op->handle = InternalWatchInfo(this, oid, ctx, ctx2);
} else {
linger_op->handle = WatchInfo(this, oid, ctx, ctx2);
}
prepare_assert_ops(&wr);
wr.watch(*handle, CEPH_OSD_WATCH_OP_WATCH, timeout);
bufferlist bl;
objecter->linger_watch(linger_op, wr,
snapc, ceph::real_clock::now(), bl,
oncomplete, &c->objver);
return 0;
}
int librados::IoCtxImpl::notify_ack(
const object_t& oid,
uint64_t notify_id,
uint64_t cookie,
bufferlist& bl)
{
::ObjectOperation rd;
prepare_assert_ops(&rd);
rd.notify_ack(notify_id, cookie, bl);
objecter->read(oid, oloc, rd, snap_seq, (bufferlist*)NULL, extra_op_flags, 0, 0);
return 0;
}
int librados::IoCtxImpl::watch_check(uint64_t cookie)
{
auto linger_op = reinterpret_cast<Objecter::LingerOp*>(cookie);
auto r = objecter->linger_check(linger_op);
if (r)
return 1 + std::chrono::duration_cast<
std::chrono::milliseconds>(*r).count();
else
return ceph::from_error_code(r.error());
}
int librados::IoCtxImpl::unwatch(uint64_t cookie)
{
Objecter::LingerOp *linger_op = reinterpret_cast<Objecter::LingerOp*>(cookie);
C_SaferCond onfinish;
version_t ver = 0;
::ObjectOperation wr;
prepare_assert_ops(&wr);
wr.watch(cookie, CEPH_OSD_WATCH_OP_UNWATCH);
objecter->mutate(linger_op->target.base_oid, oloc, wr,
snapc, ceph::real_clock::now(), extra_op_flags,
&onfinish, &ver);
objecter->linger_cancel(linger_op);
int r = onfinish.wait();
set_sync_op_version(ver);
return r;
}
int librados::IoCtxImpl::aio_unwatch(uint64_t cookie, AioCompletionImpl *c)
{
c->io = this;
Objecter::LingerOp *linger_op = reinterpret_cast<Objecter::LingerOp*>(cookie);
Context *oncomplete = new C_aio_linger_Complete(c, linger_op, true);
::ObjectOperation wr;
prepare_assert_ops(&wr);
wr.watch(cookie, CEPH_OSD_WATCH_OP_UNWATCH);
objecter->mutate(linger_op->target.base_oid, oloc, wr,
snapc, ceph::real_clock::now(), extra_op_flags,
oncomplete, &c->objver);
return 0;
}
int librados::IoCtxImpl::notify(const object_t& oid, bufferlist& bl,
uint64_t timeout_ms,
bufferlist *preply_bl,
char **preply_buf, size_t *preply_buf_len)
{
Objecter::LingerOp *linger_op = objecter->linger_register(oid, oloc,
extra_op_flags);
C_SaferCond notify_finish_cond;
linger_op->on_notify_finish =
Objecter::LingerOp::OpComp::create(
objecter->service.get_executor(),
CB_notify_Finish(client->cct, ¬ify_finish_cond,
objecter, linger_op, preply_bl,
preply_buf, preply_buf_len));
uint32_t timeout = notify_timeout;
if (timeout_ms)
timeout = timeout_ms / 1000;
// Construct RADOS op
::ObjectOperation rd;
prepare_assert_ops(&rd);
bufferlist inbl;
rd.notify(linger_op->get_cookie(), 1, timeout, bl, &inbl);
// Issue RADOS op
C_SaferCond onack;
version_t objver;
objecter->linger_notify(linger_op,
rd, snap_seq, inbl, NULL,
&onack, &objver);
ldout(client->cct, 10) << __func__ << " issued linger op " << linger_op << dendl;
int r = onack.wait();
ldout(client->cct, 10) << __func__ << " linger op " << linger_op
<< " acked (" << r << ")" << dendl;
if (r == 0) {
ldout(client->cct, 10) << __func__ << " waiting for watch_notify finish "
<< linger_op << dendl;
r = notify_finish_cond.wait();
} else {
ldout(client->cct, 10) << __func__ << " failed to initiate notify, r = "
<< r << dendl;
notify_finish_cond.wait();
}
objecter->linger_cancel(linger_op);
set_sync_op_version(objver);
return r;
}
int librados::IoCtxImpl::aio_notify(const object_t& oid, AioCompletionImpl *c,
bufferlist& bl, uint64_t timeout_ms,
bufferlist *preply_bl, char **preply_buf,
size_t *preply_buf_len)
{
Objecter::LingerOp *linger_op = objecter->linger_register(oid, oloc,
extra_op_flags);
c->io = this;
C_aio_notify_Complete *oncomplete = new C_aio_notify_Complete(c, linger_op);
linger_op->on_notify_finish =
Objecter::LingerOp::OpComp::create(
objecter->service.get_executor(),
CB_notify_Finish(client->cct, oncomplete,
objecter, linger_op,
preply_bl, preply_buf,
preply_buf_len));
Context *onack = new C_aio_notify_Ack(client->cct, oncomplete);
uint32_t timeout = notify_timeout;
if (timeout_ms)
timeout = timeout_ms / 1000;
// Construct RADOS op
::ObjectOperation rd;
prepare_assert_ops(&rd);
bufferlist inbl;
rd.notify(linger_op->get_cookie(), 1, timeout, bl, &inbl);
// Issue RADOS op
objecter->linger_notify(linger_op,
rd, snap_seq, inbl, NULL,
onack, &c->objver);
return 0;
}
int librados::IoCtxImpl::set_alloc_hint(const object_t& oid,
uint64_t expected_object_size,
uint64_t expected_write_size,
uint32_t flags)
{
::ObjectOperation wr;
prepare_assert_ops(&wr);
wr.set_alloc_hint(expected_object_size, expected_write_size, flags);
return operate(oid, &wr, NULL);
}
version_t librados::IoCtxImpl::last_version()
{
return last_objver;
}
void librados::IoCtxImpl::set_assert_version(uint64_t ver)
{
assert_ver = ver;
}
void librados::IoCtxImpl::set_notify_timeout(uint32_t timeout)
{
notify_timeout = timeout;
}
int librados::IoCtxImpl::cache_pin(const object_t& oid)
{
::ObjectOperation wr;
prepare_assert_ops(&wr);
wr.cache_pin();
return operate(oid, &wr, NULL);
}
int librados::IoCtxImpl::cache_unpin(const object_t& oid)
{
::ObjectOperation wr;
prepare_assert_ops(&wr);
wr.cache_unpin();
return operate(oid, &wr, NULL);
}
///////////////////////////// C_aio_stat_Ack ////////////////////////////
librados::IoCtxImpl::C_aio_stat_Ack::C_aio_stat_Ack(AioCompletionImpl *_c,
time_t *pm)
: c(_c), pmtime(pm)
{
ceph_assert(!c->io);
c->get();
}
void librados::IoCtxImpl::C_aio_stat_Ack::finish(int r)
{
c->lock.lock();
c->rval = r;
c->complete = true;
c->cond.notify_all();
if (r >= 0 && pmtime) {
*pmtime = real_clock::to_time_t(mtime);
}
if (c->callback_complete) {
boost::asio::defer(c->io->client->finish_strand, CB_AioComplete(c));
}
c->put_unlock();
}
///////////////////////////// C_aio_stat2_Ack ////////////////////////////
librados::IoCtxImpl::C_aio_stat2_Ack::C_aio_stat2_Ack(AioCompletionImpl *_c,
struct timespec *pt)
: c(_c), pts(pt)
{
ceph_assert(!c->io);
c->get();
}
void librados::IoCtxImpl::C_aio_stat2_Ack::finish(int r)
{
c->lock.lock();
c->rval = r;
c->complete = true;
c->cond.notify_all();
if (r >= 0 && pts) {
*pts = real_clock::to_timespec(mtime);
}
if (c->callback_complete) {
boost::asio::defer(c->io->client->finish_strand, CB_AioComplete(c));
}
c->put_unlock();
}
//////////////////////////// C_aio_Complete ////////////////////////////////
librados::IoCtxImpl::C_aio_Complete::C_aio_Complete(AioCompletionImpl *_c)
: c(_c)
{
c->get();
}
void librados::IoCtxImpl::C_aio_Complete::finish(int r)
{
c->lock.lock();
// Leave an existing rval unless r != 0
if (r)
c->rval = r; // This clears the error set in C_ObjectOperation_scrub_ls::finish()
c->complete = true;
c->cond.notify_all();
if (r == 0 && c->blp && c->blp->length() > 0) {
if (c->out_buf && !c->blp->is_contiguous()) {
c->rval = -ERANGE;
} else {
if (c->out_buf && !c->blp->is_provided_buffer(c->out_buf))
c->blp->begin().copy(c->blp->length(), c->out_buf);
c->rval = c->blp->length();
}
}
if (c->callback_complete ||
c->callback_safe) {
boost::asio::defer(c->io->client->finish_strand, CB_AioComplete(c));
}
if (c->aio_write_seq) {
c->io->complete_aio_write(c);
}
#if defined(WITH_EVENTTRACE)
OID_EVENT_TRACE(oid.name.c_str(), "RADOS_OP_COMPLETE");
#endif
c->put_unlock();
}
void librados::IoCtxImpl::object_list_slice(
const hobject_t start,
const hobject_t finish,
const size_t n,
const size_t m,
hobject_t *split_start,
hobject_t *split_finish)
{
if (start.is_max()) {
*split_start = hobject_t::get_max();
*split_finish = hobject_t::get_max();
return;
}
uint64_t start_hash = hobject_t::_reverse_bits(start.get_hash());
uint64_t finish_hash =
finish.is_max() ? 0x100000000 :
hobject_t::_reverse_bits(finish.get_hash());
uint64_t diff = finish_hash - start_hash;
uint64_t rev_start = start_hash + (diff * n / m);
uint64_t rev_finish = start_hash + (diff * (n + 1) / m);
if (n == 0) {
*split_start = start;
} else {
*split_start = hobject_t(
object_t(), string(), CEPH_NOSNAP,
hobject_t::_reverse_bits(rev_start), poolid, string());
}
if (n == m - 1)
*split_finish = finish;
else if (rev_finish >= 0x100000000)
*split_finish = hobject_t::get_max();
else
*split_finish = hobject_t(
object_t(), string(), CEPH_NOSNAP,
hobject_t::_reverse_bits(rev_finish), poolid, string());
}
int librados::IoCtxImpl::application_enable(const std::string& app_name,
bool force)
{
auto c = new PoolAsyncCompletionImpl();
application_enable_async(app_name, force, c);
int r = c->wait();
ceph_assert(r == 0);
r = c->get_return_value();
c->release();
c->put();
if (r < 0) {
return r;
}
return client->wait_for_latest_osdmap();
}
void librados::IoCtxImpl::application_enable_async(const std::string& app_name,
bool force,
PoolAsyncCompletionImpl *c)
{
// pre-Luminous clusters will return -EINVAL and application won't be
// preserved until Luminous is configured as minimim version.
if (!client->get_required_monitor_features().contains_all(
ceph::features::mon::FEATURE_LUMINOUS)) {
boost::asio::defer(client->finish_strand,
[cb = CB_PoolAsync_Safe(c)]() mutable {
cb(-EOPNOTSUPP);
});
return;
}
std::stringstream cmd;
cmd << "{"
<< "\"prefix\": \"osd pool application enable\","
<< "\"pool\": \"" << get_cached_pool_name() << "\","
<< "\"app\": \"" << app_name << "\"";
if (force) {
cmd << ",\"yes_i_really_mean_it\": true";
}
cmd << "}";
std::vector<std::string> cmds;
cmds.push_back(cmd.str());
bufferlist inbl;
client->mon_command_async(cmds, inbl, nullptr, nullptr,
make_lambda_context(CB_PoolAsync_Safe(c)));
}
int librados::IoCtxImpl::application_list(std::set<std::string> *app_names)
{
int r = 0;
app_names->clear();
objecter->with_osdmap([&](const OSDMap& o) {
auto pg_pool = o.get_pg_pool(poolid);
if (pg_pool == nullptr) {
r = -ENOENT;
return;
}
for (auto &pair : pg_pool->application_metadata) {
app_names->insert(pair.first);
}
});
return r;
}
int librados::IoCtxImpl::application_metadata_get(const std::string& app_name,
const std::string &key,
std::string* value)
{
int r = 0;
objecter->with_osdmap([&](const OSDMap& o) {
auto pg_pool = o.get_pg_pool(poolid);
if (pg_pool == nullptr) {
r = -ENOENT;
return;
}
auto app_it = pg_pool->application_metadata.find(app_name);
if (app_it == pg_pool->application_metadata.end()) {
r = -ENOENT;
return;
}
auto it = app_it->second.find(key);
if (it == app_it->second.end()) {
r = -ENOENT;
return;
}
*value = it->second;
});
return r;
}
int librados::IoCtxImpl::application_metadata_set(const std::string& app_name,
const std::string &key,
const std::string& value)
{
std::stringstream cmd;
cmd << "{"
<< "\"prefix\":\"osd pool application set\","
<< "\"pool\":\"" << get_cached_pool_name() << "\","
<< "\"app\":\"" << app_name << "\","
<< "\"key\":\"" << key << "\","
<< "\"value\":\"" << value << "\""
<< "}";
std::vector<std::string> cmds;
cmds.push_back(cmd.str());
bufferlist inbl;
int r = client->mon_command(cmds, inbl, nullptr, nullptr);
if (r < 0) {
return r;
}
// ensure we have the latest osd map epoch before proceeding
return client->wait_for_latest_osdmap();
}
int librados::IoCtxImpl::application_metadata_remove(const std::string& app_name,
const std::string &key)
{
std::stringstream cmd;
cmd << "{"
<< "\"prefix\":\"osd pool application rm\","
<< "\"pool\":\"" << get_cached_pool_name() << "\","
<< "\"app\":\"" << app_name << "\","
<< "\"key\":\"" << key << "\""
<< "}";
std::vector<std::string> cmds;
cmds.push_back(cmd.str());
bufferlist inbl;
int r = client->mon_command(cmds, inbl, nullptr, nullptr);
if (r < 0) {
return r;
}
// ensure we have the latest osd map epoch before proceeding
return client->wait_for_latest_osdmap();
}
int librados::IoCtxImpl::application_metadata_list(const std::string& app_name,
std::map<std::string, std::string> *values)
{
int r = 0;
values->clear();
objecter->with_osdmap([&](const OSDMap& o) {
auto pg_pool = o.get_pg_pool(poolid);
if (pg_pool == nullptr) {
r = -ENOENT;
return;
}
auto it = pg_pool->application_metadata.find(app_name);
if (it == pg_pool->application_metadata.end()) {
r = -ENOENT;
return;
}
*values = it->second;
});
return r;
}
| 59,825 | 25.90018 | 135 | cc |
null | ceph-main/src/librados/IoCtxImpl.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2012 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_LIBRADOS_IOCTXIMPL_H
#define CEPH_LIBRADOS_IOCTXIMPL_H
#include <atomic>
#include "common/Cond.h"
#include "common/ceph_mutex.h"
#include "common/snap_types.h"
#include "common/zipkin_trace.h"
#include "include/types.h"
#include "include/rados/librados.h"
#include "include/rados/librados.hpp"
#include "include/xlist.h"
#include "osd/osd_types.h"
#include "osdc/Objecter.h"
class RadosClient;
struct librados::IoCtxImpl {
std::atomic<uint64_t> ref_cnt = { 0 };
RadosClient *client = nullptr;
int64_t poolid = 0;
snapid_t snap_seq;
::SnapContext snapc;
uint64_t assert_ver = 0;
version_t last_objver = 0;
uint32_t notify_timeout = 30;
object_locator_t oloc;
int extra_op_flags = 0;
ceph::mutex aio_write_list_lock =
ceph::make_mutex("librados::IoCtxImpl::aio_write_list_lock");
ceph_tid_t aio_write_seq = 0;
ceph::condition_variable aio_write_cond;
xlist<AioCompletionImpl*> aio_write_list;
std::map<ceph_tid_t, std::list<AioCompletionImpl*> > aio_write_waiters;
Objecter *objecter = nullptr;
IoCtxImpl();
IoCtxImpl(RadosClient *c, Objecter *objecter,
int64_t poolid, snapid_t s);
void dup(const IoCtxImpl& rhs) {
// Copy everything except the ref count
client = rhs.client;
poolid = rhs.poolid;
snap_seq = rhs.snap_seq;
snapc = rhs.snapc;
assert_ver = rhs.assert_ver;
last_objver = rhs.last_objver;
notify_timeout = rhs.notify_timeout;
oloc = rhs.oloc;
extra_op_flags = rhs.extra_op_flags;
objecter = rhs.objecter;
}
void set_snap_read(snapid_t s);
int set_snap_write_context(snapid_t seq, std::vector<snapid_t>& snaps);
void get() {
ref_cnt++;
}
void put() {
if (--ref_cnt == 0)
delete this;
}
void queue_aio_write(struct AioCompletionImpl *c);
void complete_aio_write(struct AioCompletionImpl *c);
void flush_aio_writes_async(AioCompletionImpl *c);
void flush_aio_writes();
int64_t get_id() {
return poolid;
}
std::string get_cached_pool_name();
int get_object_hash_position(const std::string& oid, uint32_t *hash_position);
int get_object_pg_hash_position(const std::string& oid, uint32_t *pg_hash_position);
::ObjectOperation *prepare_assert_ops(::ObjectOperation *op);
// snaps
int snap_list(std::vector<uint64_t> *snaps);
int snap_lookup(const char *name, uint64_t *snapid);
int snap_get_name(uint64_t snapid, std::string *s);
int snap_get_stamp(uint64_t snapid, time_t *t);
int snap_create(const char* snapname);
int selfmanaged_snap_create(uint64_t *snapid);
void aio_selfmanaged_snap_create(uint64_t *snapid, AioCompletionImpl *c);
int snap_remove(const char* snapname);
int rollback(const object_t& oid, const char *snapName);
int selfmanaged_snap_remove(uint64_t snapid);
void aio_selfmanaged_snap_remove(uint64_t snapid, AioCompletionImpl *c);
int selfmanaged_snap_rollback_object(const object_t& oid,
::SnapContext& snapc, uint64_t snapid);
// io
int nlist(Objecter::NListContext *context, int max_entries);
uint32_t nlist_seek(Objecter::NListContext *context, uint32_t pos);
uint32_t nlist_seek(Objecter::NListContext *context, const rados_object_list_cursor& cursor);
rados_object_list_cursor nlist_get_cursor(Objecter::NListContext *context);
void object_list_slice(
const hobject_t start,
const hobject_t finish,
const size_t n,
const size_t m,
hobject_t *split_start,
hobject_t *split_finish);
int create(const object_t& oid, bool exclusive);
int write(const object_t& oid, bufferlist& bl, size_t len, uint64_t off);
int append(const object_t& oid, bufferlist& bl, size_t len);
int write_full(const object_t& oid, bufferlist& bl);
int writesame(const object_t& oid, bufferlist& bl,
size_t write_len, uint64_t offset);
int read(const object_t& oid, bufferlist& bl, size_t len, uint64_t off);
int mapext(const object_t& oid, uint64_t off, size_t len,
std::map<uint64_t,uint64_t>& m);
int sparse_read(const object_t& oid, std::map<uint64_t,uint64_t>& m,
bufferlist& bl, size_t len, uint64_t off);
int checksum(const object_t& oid, uint8_t type, const bufferlist &init_value,
size_t len, uint64_t off, size_t chunk_size, bufferlist *pbl);
int remove(const object_t& oid);
int remove(const object_t& oid, int flags);
int stat(const object_t& oid, uint64_t *psize, time_t *pmtime);
int stat2(const object_t& oid, uint64_t *psize, struct timespec *pts);
int trunc(const object_t& oid, uint64_t size);
int cmpext(const object_t& oid, uint64_t off, bufferlist& cmp_bl);
int tmap_update(const object_t& oid, bufferlist& cmdbl);
int exec(const object_t& oid, const char *cls, const char *method, bufferlist& inbl, bufferlist& outbl);
int getxattr(const object_t& oid, const char *name, bufferlist& bl);
int setxattr(const object_t& oid, const char *name, bufferlist& bl);
int getxattrs(const object_t& oid, std::map<std::string, bufferlist>& attrset);
int rmxattr(const object_t& oid, const char *name);
int operate(const object_t& oid, ::ObjectOperation *o, ceph::real_time *pmtime, int flags=0);
int operate_read(const object_t& oid, ::ObjectOperation *o, bufferlist *pbl, int flags=0);
int aio_operate(const object_t& oid, ::ObjectOperation *o,
AioCompletionImpl *c, const SnapContext& snap_context,
const ceph::real_time *pmtime, int flags,
const blkin_trace_info *trace_info = nullptr);
int aio_operate_read(const object_t& oid, ::ObjectOperation *o,
AioCompletionImpl *c, int flags, bufferlist *pbl, const blkin_trace_info *trace_info = nullptr);
struct C_aio_stat_Ack : public Context {
librados::AioCompletionImpl *c;
time_t *pmtime;
ceph::real_time mtime;
C_aio_stat_Ack(AioCompletionImpl *_c, time_t *pm);
void finish(int r) override;
};
struct C_aio_stat2_Ack : public Context {
librados::AioCompletionImpl *c;
struct timespec *pts;
ceph::real_time mtime;
C_aio_stat2_Ack(AioCompletionImpl *_c, struct timespec *pts);
void finish(int r) override;
};
struct C_aio_Complete : public Context {
#if defined(WITH_EVENTTRACE)
object_t oid;
#endif
AioCompletionImpl *c;
explicit C_aio_Complete(AioCompletionImpl *_c);
void finish(int r) override;
};
int aio_read(const object_t oid, AioCompletionImpl *c,
bufferlist *pbl, size_t len, uint64_t off, uint64_t snapid,
const blkin_trace_info *info = nullptr);
int aio_read(object_t oid, AioCompletionImpl *c,
char *buf, size_t len, uint64_t off, uint64_t snapid,
const blkin_trace_info *info = nullptr);
int aio_sparse_read(const object_t oid, AioCompletionImpl *c,
std::map<uint64_t,uint64_t> *m, bufferlist *data_bl,
size_t len, uint64_t off, uint64_t snapid);
int aio_cmpext(const object_t& oid, AioCompletionImpl *c, uint64_t off,
bufferlist& cmp_bl);
int aio_cmpext(const object_t& oid, AioCompletionImpl *c,
const char *cmp_buf, size_t cmp_len, uint64_t off);
int aio_write(const object_t &oid, AioCompletionImpl *c,
const bufferlist& bl, size_t len, uint64_t off,
const blkin_trace_info *info = nullptr);
int aio_append(const object_t &oid, AioCompletionImpl *c,
const bufferlist& bl, size_t len);
int aio_write_full(const object_t &oid, AioCompletionImpl *c,
const bufferlist& bl);
int aio_writesame(const object_t &oid, AioCompletionImpl *c,
const bufferlist& bl, size_t write_len, uint64_t off);
int aio_remove(const object_t &oid, AioCompletionImpl *c, int flags=0);
int aio_exec(const object_t& oid, AioCompletionImpl *c, const char *cls,
const char *method, bufferlist& inbl, bufferlist *outbl);
int aio_exec(const object_t& oid, AioCompletionImpl *c, const char *cls,
const char *method, bufferlist& inbl, char *buf, size_t out_len);
int aio_stat(const object_t& oid, AioCompletionImpl *c, uint64_t *psize, time_t *pmtime);
int aio_stat2(const object_t& oid, AioCompletionImpl *c, uint64_t *psize, struct timespec *pts);
int aio_getxattr(const object_t& oid, AioCompletionImpl *c,
const char *name, bufferlist& bl);
int aio_setxattr(const object_t& oid, AioCompletionImpl *c,
const char *name, bufferlist& bl);
int aio_getxattrs(const object_t& oid, AioCompletionImpl *c,
std::map<std::string, bufferlist>& attrset);
int aio_rmxattr(const object_t& oid, AioCompletionImpl *c,
const char *name);
int aio_cancel(AioCompletionImpl *c);
int hit_set_list(uint32_t hash, AioCompletionImpl *c,
std::list< std::pair<time_t, time_t> > *pls);
int hit_set_get(uint32_t hash, AioCompletionImpl *c, time_t stamp,
bufferlist *pbl);
int get_inconsistent_objects(const pg_t& pg,
const librados::object_id_t& start_after,
uint64_t max_to_get,
AioCompletionImpl *c,
std::vector<inconsistent_obj_t>* objects,
uint32_t* interval);
int get_inconsistent_snapsets(const pg_t& pg,
const librados::object_id_t& start_after,
uint64_t max_to_get,
AioCompletionImpl *c,
std::vector<inconsistent_snapset_t>* snapsets,
uint32_t* interval);
void set_sync_op_version(version_t ver);
int watch(const object_t& oid, uint64_t *cookie, librados::WatchCtx *ctx,
librados::WatchCtx2 *ctx2, bool internal = false);
int watch(const object_t& oid, uint64_t *cookie, librados::WatchCtx *ctx,
librados::WatchCtx2 *ctx2, uint32_t timeout, bool internal = false);
int aio_watch(const object_t& oid, AioCompletionImpl *c, uint64_t *cookie,
librados::WatchCtx *ctx, librados::WatchCtx2 *ctx2,
bool internal = false);
int aio_watch(const object_t& oid, AioCompletionImpl *c, uint64_t *cookie,
librados::WatchCtx *ctx, librados::WatchCtx2 *ctx2,
uint32_t timeout, bool internal = false);
int watch_check(uint64_t cookie);
int unwatch(uint64_t cookie);
int aio_unwatch(uint64_t cookie, AioCompletionImpl *c);
int notify(const object_t& oid, bufferlist& bl, uint64_t timeout_ms,
bufferlist *preplybl, char **preply_buf, size_t *preply_buf_len);
int notify_ack(const object_t& oid, uint64_t notify_id, uint64_t cookie,
bufferlist& bl);
int aio_notify(const object_t& oid, AioCompletionImpl *c, bufferlist& bl,
uint64_t timeout_ms, bufferlist *preplybl, char **preply_buf,
size_t *preply_buf_len);
int set_alloc_hint(const object_t& oid,
uint64_t expected_object_size,
uint64_t expected_write_size,
uint32_t flags);
version_t last_version();
void set_assert_version(uint64_t ver);
void set_notify_timeout(uint32_t timeout);
int cache_pin(const object_t& oid);
int cache_unpin(const object_t& oid);
int application_enable(const std::string& app_name, bool force);
void application_enable_async(const std::string& app_name, bool force,
PoolAsyncCompletionImpl *c);
int application_list(std::set<std::string> *app_names);
int application_metadata_get(const std::string& app_name,
const std::string &key,
std::string* value);
int application_metadata_set(const std::string& app_name,
const std::string &key,
const std::string& value);
int application_metadata_remove(const std::string& app_name,
const std::string &key);
int application_metadata_list(const std::string& app_name,
std::map<std::string, std::string> *values);
};
#endif
| 12,110 | 39.23588 | 106 | h |
null | ceph-main/src/librados/ListObjectImpl.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2014 David Zafman <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_LIBRADOS_LISTOBJECTIMPL_H
#define CEPH_LIBRADOS_LISTOBJECTIMPL_H
#include <string>
#include <include/rados/librados.hpp>
namespace librados {
struct ListObjectImpl {
std::string nspace;
std::string oid;
std::string locator;
ListObjectImpl() {}
ListObjectImpl(std::string n, std::string o, std::string l):
nspace(n), oid(o), locator(l) {}
auto operator<=>(const ListObjectImpl&) const = default;
const std::string& get_nspace() const { return nspace; }
const std::string& get_oid() const { return oid; }
const std::string& get_locator() const { return locator; }
};
inline std::ostream& operator<<(std::ostream& out, const struct ListObjectImpl& lop) {
out << (lop.nspace.size() ? lop.nspace + "/" : "") << lop.oid
<< (lop.locator.size() ? "@" + lop.locator : "");
return out;
}
class NObjectIteratorImpl {
public:
NObjectIteratorImpl() {}
~NObjectIteratorImpl();
NObjectIteratorImpl(const NObjectIteratorImpl &rhs);
NObjectIteratorImpl& operator=(const NObjectIteratorImpl& rhs);
bool operator==(const NObjectIteratorImpl& rhs) const;
bool operator!=(const NObjectIteratorImpl& rhs) const;
const ListObject& operator*() const;
const ListObject* operator->() const;
NObjectIteratorImpl &operator++(); // Preincrement
NObjectIteratorImpl operator++(int); // Postincrement
const ListObject *get_listobjectp() { return &cur_obj; }
/// get current hash position of the iterator, rounded to the current pg
uint32_t get_pg_hash_position() const;
/// move the iterator to a given hash position. this may (will!) be rounded to the nearest pg.
uint32_t seek(uint32_t pos);
/// move the iterator to a given cursor position
uint32_t seek(const librados::ObjectCursor& cursor);
/// get current cursor position
librados::ObjectCursor get_cursor();
void set_filter(const bufferlist &bl);
NObjectIteratorImpl(ObjListCtx *ctx_);
void get_next();
std::shared_ptr < ObjListCtx > ctx;
ListObject cur_obj;
};
}
#endif
| 2,482 | 30.0375 | 99 | h |
null | ceph-main/src/librados/ObjectOperationImpl.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#pragma once
#include "common/ceph_time.h"
#include "osdc/Objecter.h"
namespace librados {
// Wraps Objecter's ObjectOperation with storage for an optional mtime argument.
struct ObjectOperationImpl {
::ObjectOperation o;
ceph::real_time rt;
ceph::real_time *prt = nullptr;
};
} // namespace librados
| 677 | 23.214286 | 80 | h |
null | ceph-main/src/librados/PoolAsyncCompletionImpl.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2012 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_LIBRADOS_POOLASYNCCOMPLETIONIMPL_H
#define CEPH_LIBRADOS_POOLASYNCCOMPLETIONIMPL_H
#include "common/ceph_mutex.h"
#include <boost/intrusive_ptr.hpp>
#include "include/rados/librados.h"
#include "include/rados/librados.hpp"
namespace librados {
struct PoolAsyncCompletionImpl {
ceph::mutex lock = ceph::make_mutex("PoolAsyncCompletionImpl lock");
ceph::condition_variable cond;
int ref = 1;
int rval = 0;
bool released = false;
bool done = false;
rados_callback_t callback = nullptr;
void *callback_arg = nullptr;
PoolAsyncCompletionImpl() = default;
int set_callback(void *cb_arg, rados_callback_t cb) {
std::scoped_lock l(lock);
callback = cb;
callback_arg = cb_arg;
return 0;
}
int wait() {
std::unique_lock l(lock);
while (!done)
cond.wait(l);
return 0;
}
int is_complete() {
std::scoped_lock l(lock);
return done;
}
int get_return_value() {
std::scoped_lock l(lock);
return rval;
}
void get() {
std::scoped_lock l(lock);
ceph_assert(ref > 0);
ref++;
}
void release() {
std::scoped_lock l(lock);
ceph_assert(!released);
released = true;
}
void put() {
std::unique_lock l(lock);
int n = --ref;
l.unlock();
if (!n)
delete this;
}
};
inline void intrusive_ptr_add_ref(PoolAsyncCompletionImpl* p) {
p->get();
}
inline void intrusive_ptr_release(PoolAsyncCompletionImpl* p) {
p->put();
}
class CB_PoolAsync_Safe {
boost::intrusive_ptr<PoolAsyncCompletionImpl> p;
public:
explicit CB_PoolAsync_Safe(boost::intrusive_ptr<PoolAsyncCompletionImpl> p)
: p(p) {}
~CB_PoolAsync_Safe() = default;
void operator()(int r) {
auto c(std::move(p));
std::unique_lock l(c->lock);
c->rval = r;
c->done = true;
c->cond.notify_all();
if (c->callback) {
rados_callback_t cb = c->callback;
void *cb_arg = c->callback_arg;
l.unlock();
cb(c.get(), cb_arg);
l.lock();
}
}
};
}
#endif
| 2,529 | 21.792793 | 79 | h |
null | ceph-main/src/librados/RadosClient.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2012 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <iostream>
#include <string>
#include <sstream>
#include <pthread.h>
#include <errno.h>
#include "common/ceph_context.h"
#include "common/config.h"
#include "common/common_init.h"
#include "common/ceph_json.h"
#include "common/errno.h"
#include "common/ceph_json.h"
#include "common/async/blocked_completion.h"
#include "include/buffer.h"
#include "include/stringify.h"
#include "include/util.h"
#include "msg/Messenger.h"
// needed for static_cast
#include "messages/MLog.h"
#include "AioCompletionImpl.h"
#include "IoCtxImpl.h"
#include "PoolAsyncCompletionImpl.h"
#include "RadosClient.h"
#include "include/ceph_assert.h"
#include "common/EventTrace.h"
#define dout_subsys ceph_subsys_rados
#undef dout_prefix
#define dout_prefix *_dout << "librados: "
using std::ostringstream;
using std::string;
using std::map;
using std::vector;
namespace bc = boost::container;
namespace bs = boost::system;
namespace ca = ceph::async;
namespace cb = ceph::buffer;
librados::RadosClient::RadosClient(CephContext *cct_)
: Dispatcher(cct_->get()),
cct_deleter{cct, [](CephContext *p) {p->put();}}
{
auto& conf = cct->_conf;
conf.add_observer(this);
rados_mon_op_timeout = conf.get_val<std::chrono::seconds>("rados_mon_op_timeout");
}
int64_t librados::RadosClient::lookup_pool(const char *name)
{
int r = wait_for_osdmap();
if (r < 0) {
return r;
}
int64_t ret = objecter->with_osdmap(std::mem_fn(&OSDMap::lookup_pg_pool_name),
name);
if (-ENOENT == ret) {
// Make sure we have the latest map
int r = wait_for_latest_osdmap();
if (r < 0)
return r;
ret = objecter->with_osdmap(std::mem_fn(&OSDMap::lookup_pg_pool_name),
name);
}
return ret;
}
bool librados::RadosClient::pool_requires_alignment(int64_t pool_id)
{
bool required;
int r = pool_requires_alignment2(pool_id, &required);
if (r < 0) {
// Cast answer to false, this is a little bit problematic
// since we really don't know the answer yet, say.
return false;
}
return required;
}
// a safer version of pool_requires_alignment
int librados::RadosClient::pool_requires_alignment2(int64_t pool_id,
bool *req)
{
if (!req)
return -EINVAL;
int r = wait_for_osdmap();
if (r < 0) {
return r;
}
return objecter->with_osdmap([req, pool_id](const OSDMap& o) {
if (!o.have_pg_pool(pool_id)) {
return -ENOENT;
}
*req = o.get_pg_pool(pool_id)->requires_aligned_append();
return 0;
});
}
uint64_t librados::RadosClient::pool_required_alignment(int64_t pool_id)
{
uint64_t alignment;
int r = pool_required_alignment2(pool_id, &alignment);
if (r < 0) {
return 0;
}
return alignment;
}
// a safer version of pool_required_alignment
int librados::RadosClient::pool_required_alignment2(int64_t pool_id,
uint64_t *alignment)
{
if (!alignment)
return -EINVAL;
int r = wait_for_osdmap();
if (r < 0) {
return r;
}
return objecter->with_osdmap([alignment, pool_id](const OSDMap &o) {
if (!o.have_pg_pool(pool_id)) {
return -ENOENT;
}
*alignment = o.get_pg_pool(pool_id)->required_alignment();
return 0;
});
}
int librados::RadosClient::pool_get_name(uint64_t pool_id, std::string *s, bool wait_latest_map)
{
int r = wait_for_osdmap();
if (r < 0)
return r;
retry:
objecter->with_osdmap([&](const OSDMap& o) {
if (!o.have_pg_pool(pool_id)) {
r = -ENOENT;
} else {
r = 0;
*s = o.get_pool_name(pool_id);
}
});
if (r == -ENOENT && wait_latest_map) {
r = wait_for_latest_osdmap();
if (r < 0)
return r;
wait_latest_map = false;
goto retry;
}
return r;
}
int librados::RadosClient::get_fsid(std::string *s)
{
if (!s)
return -EINVAL;
std::lock_guard l(lock);
ostringstream oss;
oss << monclient.get_fsid();
*s = oss.str();
return 0;
}
int librados::RadosClient::ping_monitor(const string mon_id, string *result)
{
int err = 0;
/* If we haven't yet connected, we have no way of telling whether we
* already built monc's initial monmap. IF we are in CONNECTED state,
* then it is safe to assume that we went through connect(), which does
* build a monmap.
*/
if (state != CONNECTED) {
ldout(cct, 10) << __func__ << " build monmap" << dendl;
err = monclient.build_initial_monmap();
}
if (err < 0) {
return err;
}
err = monclient.ping_monitor(mon_id, result);
return err;
}
int librados::RadosClient::connect()
{
int err;
// already connected?
if (state == CONNECTING)
return -EINPROGRESS;
if (state == CONNECTED)
return -EISCONN;
state = CONNECTING;
if (!cct->_log->is_started()) {
cct->_log->start();
}
{
MonClient mc_bootstrap(cct, poolctx);
err = mc_bootstrap.get_monmap_and_config();
if (err < 0)
return err;
}
common_init_finish(cct);
poolctx.start(cct->_conf.get_val<std::uint64_t>("librados_thread_count"));
// get monmap
err = monclient.build_initial_monmap();
if (err < 0)
goto out;
err = -ENOMEM;
messenger = Messenger::create_client_messenger(cct, "radosclient");
if (!messenger)
goto out;
// require OSDREPLYMUX feature. this means we will fail to talk to
// old servers. this is necessary because otherwise we won't know
// how to decompose the reply data into its constituent pieces.
messenger->set_default_policy(Messenger::Policy::lossy_client(CEPH_FEATURE_OSDREPLYMUX));
ldout(cct, 1) << "starting msgr at " << messenger->get_myaddrs() << dendl;
ldout(cct, 1) << "starting objecter" << dendl;
objecter = new (std::nothrow) Objecter(cct, messenger, &monclient, poolctx);
if (!objecter)
goto out;
objecter->set_balanced_budget();
monclient.set_messenger(messenger);
mgrclient.set_messenger(messenger);
objecter->init();
messenger->add_dispatcher_head(&mgrclient);
messenger->add_dispatcher_tail(objecter);
messenger->add_dispatcher_tail(this);
messenger->start();
ldout(cct, 1) << "setting wanted keys" << dendl;
monclient.set_want_keys(
CEPH_ENTITY_TYPE_MON | CEPH_ENTITY_TYPE_OSD | CEPH_ENTITY_TYPE_MGR);
ldout(cct, 1) << "calling monclient init" << dendl;
err = monclient.init();
if (err) {
ldout(cct, 0) << conf->name << " initialization error " << cpp_strerror(-err) << dendl;
shutdown();
goto out;
}
err = monclient.authenticate(std::chrono::duration<double>(conf.get_val<std::chrono::seconds>("client_mount_timeout")).count());
if (err) {
ldout(cct, 0) << conf->name << " authentication error " << cpp_strerror(-err) << dendl;
shutdown();
goto out;
}
messenger->set_myname(entity_name_t::CLIENT(monclient.get_global_id()));
// Detect older cluster, put mgrclient into compatible mode
mgrclient.set_mgr_optional(
!get_required_monitor_features().contains_all(
ceph::features::mon::FEATURE_LUMINOUS));
// MgrClient needs this (it doesn't have MonClient reference itself)
monclient.sub_want("mgrmap", 0, 0);
monclient.renew_subs();
if (service_daemon) {
ldout(cct, 10) << __func__ << " registering as " << service_name << "."
<< daemon_name << dendl;
mgrclient.service_daemon_register(service_name, daemon_name,
daemon_metadata);
}
mgrclient.init();
objecter->set_client_incarnation(0);
objecter->start();
lock.lock();
state = CONNECTED;
instance_id = monclient.get_global_id();
lock.unlock();
ldout(cct, 1) << "init done" << dendl;
err = 0;
out:
if (err) {
state = DISCONNECTED;
if (objecter) {
delete objecter;
objecter = NULL;
}
if (messenger) {
delete messenger;
messenger = NULL;
}
}
return err;
}
void librados::RadosClient::shutdown()
{
std::unique_lock l{lock};
if (state == DISCONNECTED) {
return;
}
bool need_objecter = false;
if (objecter && objecter->initialized) {
need_objecter = true;
}
if (state == CONNECTED) {
if (need_objecter) {
// make sure watch callbacks are flushed
watch_flush();
}
}
state = DISCONNECTED;
instance_id = 0;
l.unlock();
if (need_objecter) {
objecter->shutdown();
}
mgrclient.shutdown();
monclient.shutdown();
if (messenger) {
messenger->shutdown();
messenger->wait();
}
poolctx.stop();
ldout(cct, 1) << "shutdown" << dendl;
}
int librados::RadosClient::watch_flush()
{
ldout(cct, 10) << __func__ << " enter" << dendl;
objecter->linger_callback_flush(ca::use_blocked);
ldout(cct, 10) << __func__ << " exit" << dendl;
return 0;
}
struct CB_aio_watch_flush_Complete {
librados::RadosClient *client;
librados::AioCompletionImpl *c;
CB_aio_watch_flush_Complete(librados::RadosClient *_client, librados::AioCompletionImpl *_c)
: client(_client), c(_c) {
c->get();
}
CB_aio_watch_flush_Complete(const CB_aio_watch_flush_Complete&) = delete;
CB_aio_watch_flush_Complete operator =(const CB_aio_watch_flush_Complete&) = delete;
CB_aio_watch_flush_Complete(CB_aio_watch_flush_Complete&& rhs) {
client = rhs.client;
c = rhs.c;
}
CB_aio_watch_flush_Complete& operator =(CB_aio_watch_flush_Complete&& rhs) {
client = rhs.client;
c = rhs.c;
return *this;
}
void operator()() {
c->lock.lock();
c->rval = 0;
c->complete = true;
c->cond.notify_all();
if (c->callback_complete ||
c->callback_safe) {
boost::asio::defer(client->finish_strand, librados::CB_AioComplete(c));
}
c->put_unlock();
}
};
int librados::RadosClient::async_watch_flush(AioCompletionImpl *c)
{
ldout(cct, 10) << __func__ << " enter" << dendl;
objecter->linger_callback_flush(CB_aio_watch_flush_Complete(this, c));
ldout(cct, 10) << __func__ << " exit" << dendl;
return 0;
}
uint64_t librados::RadosClient::get_instance_id()
{
return instance_id;
}
int librados::RadosClient::get_min_compatible_osd(int8_t* require_osd_release)
{
int r = wait_for_osdmap();
if (r < 0) {
return r;
}
objecter->with_osdmap(
[require_osd_release](const OSDMap& o) {
*require_osd_release = to_integer<int8_t>(o.require_osd_release);
});
return 0;
}
int librados::RadosClient::get_min_compatible_client(int8_t* min_compat_client,
int8_t* require_min_compat_client)
{
int r = wait_for_osdmap();
if (r < 0) {
return r;
}
objecter->with_osdmap(
[min_compat_client, require_min_compat_client](const OSDMap& o) {
*min_compat_client = to_integer<int8_t>(o.get_min_compat_client());
*require_min_compat_client =
to_integer<int8_t>(o.get_require_min_compat_client());
});
return 0;
}
librados::RadosClient::~RadosClient()
{
cct->_conf.remove_observer(this);
if (messenger)
delete messenger;
if (objecter)
delete objecter;
cct = NULL;
}
int librados::RadosClient::create_ioctx(const char *name, IoCtxImpl **io)
{
int64_t poolid = lookup_pool(name);
if (poolid < 0) {
return (int)poolid;
}
*io = new librados::IoCtxImpl(this, objecter, poolid, CEPH_NOSNAP);
return 0;
}
int librados::RadosClient::create_ioctx(int64_t pool_id, IoCtxImpl **io)
{
std::string pool_name;
int r = pool_get_name(pool_id, &pool_name, true);
if (r < 0)
return r;
*io = new librados::IoCtxImpl(this, objecter, pool_id, CEPH_NOSNAP);
return 0;
}
bool librados::RadosClient::ms_dispatch(Message *m)
{
bool ret;
std::lock_guard l(lock);
if (state == DISCONNECTED) {
ldout(cct, 10) << "disconnected, discarding " << *m << dendl;
m->put();
ret = true;
} else {
ret = _dispatch(m);
}
return ret;
}
void librados::RadosClient::ms_handle_connect(Connection *con)
{
}
bool librados::RadosClient::ms_handle_reset(Connection *con)
{
return false;
}
void librados::RadosClient::ms_handle_remote_reset(Connection *con)
{
}
bool librados::RadosClient::ms_handle_refused(Connection *con)
{
return false;
}
bool librados::RadosClient::_dispatch(Message *m)
{
ceph_assert(ceph_mutex_is_locked(lock));
switch (m->get_type()) {
// OSD
case CEPH_MSG_OSD_MAP:
cond.notify_all();
m->put();
break;
case CEPH_MSG_MDS_MAP:
m->put();
break;
case MSG_LOG:
handle_log(static_cast<MLog *>(m));
break;
default:
return false;
}
return true;
}
int librados::RadosClient::wait_for_osdmap()
{
ceph_assert(ceph_mutex_is_not_locked_by_me(lock));
if (state != CONNECTED) {
return -ENOTCONN;
}
bool need_map = false;
objecter->with_osdmap([&](const OSDMap& o) {
if (o.get_epoch() == 0) {
need_map = true;
}
});
if (need_map) {
std::unique_lock l(lock);
ceph::timespan timeout = rados_mon_op_timeout;
if (objecter->with_osdmap(std::mem_fn(&OSDMap::get_epoch)) == 0) {
ldout(cct, 10) << __func__ << " waiting" << dendl;
while (objecter->with_osdmap(std::mem_fn(&OSDMap::get_epoch)) == 0) {
if (timeout == timeout.zero()) {
cond.wait(l);
} else {
if (cond.wait_for(l, timeout) == std::cv_status::timeout) {
lderr(cct) << "timed out waiting for first osdmap from monitors"
<< dendl;
return -ETIMEDOUT;
}
}
}
ldout(cct, 10) << __func__ << " done waiting" << dendl;
}
return 0;
} else {
return 0;
}
}
int librados::RadosClient::wait_for_latest_osdmap()
{
bs::error_code ec;
objecter->wait_for_latest_osdmap(ca::use_blocked[ec]);
return ceph::from_error_code(ec);
}
int librados::RadosClient::pool_list(std::list<std::pair<int64_t, string> >& v)
{
int r = wait_for_osdmap();
if (r < 0)
return r;
objecter->with_osdmap([&](const OSDMap& o) {
for (auto p : o.get_pools())
v.push_back(std::make_pair(p.first, o.get_pool_name(p.first)));
});
return 0;
}
int librados::RadosClient::get_pool_stats(std::list<string>& pools,
map<string,::pool_stat_t> *result,
bool *pper_pool)
{
bs::error_code ec;
std::vector<std::string> v(pools.begin(), pools.end());
auto [res, per_pool] = objecter->get_pool_stats(v, ca::use_blocked[ec]);
if (ec)
return ceph::from_error_code(ec);
if (per_pool)
*pper_pool = per_pool;
if (result)
result->insert(res.begin(), res.end());
return 0;
}
bool librados::RadosClient::get_pool_is_selfmanaged_snaps_mode(
const std::string& pool)
{
bool ret = false;
objecter->with_osdmap([&](const OSDMap& osdmap) {
int64_t poolid = osdmap.lookup_pg_pool_name(pool);
if (poolid >= 0)
ret = osdmap.get_pg_pool(poolid)->is_unmanaged_snaps_mode();
});
return ret;
}
int librados::RadosClient::get_fs_stats(ceph_statfs& stats)
{
ceph::mutex mylock = ceph::make_mutex("RadosClient::get_fs_stats::mylock");
ceph::condition_variable cond;
bool done;
int ret = 0;
{
std::lock_guard l{mylock};
objecter->get_fs_stats(stats, std::optional<int64_t> (),
new C_SafeCond(mylock, cond, &done, &ret));
}
{
std::unique_lock l{mylock};
cond.wait(l, [&done] { return done;});
}
return ret;
}
void librados::RadosClient::get() {
std::lock_guard l(lock);
ceph_assert(refcnt > 0);
refcnt++;
}
bool librados::RadosClient::put() {
std::lock_guard l(lock);
ceph_assert(refcnt > 0);
refcnt--;
return (refcnt == 0);
}
int librados::RadosClient::pool_create(string& name,
int16_t crush_rule)
{
if (!name.length())
return -EINVAL;
int r = wait_for_osdmap();
if (r < 0) {
return r;
}
ceph::mutex mylock = ceph::make_mutex("RadosClient::pool_create::mylock");
int reply;
ceph::condition_variable cond;
bool done;
Context *onfinish = new C_SafeCond(mylock, cond, &done, &reply);
objecter->create_pool(name, onfinish, crush_rule);
std::unique_lock l{mylock};
cond.wait(l, [&done] { return done; });
return reply;
}
int librados::RadosClient::pool_create_async(string& name,
PoolAsyncCompletionImpl *c,
int16_t crush_rule)
{
int r = wait_for_osdmap();
if (r < 0)
return r;
Context *onfinish = make_lambda_context(CB_PoolAsync_Safe(c));
objecter->create_pool(name, onfinish, crush_rule);
return r;
}
int librados::RadosClient::pool_get_base_tier(int64_t pool_id, int64_t* base_tier)
{
int r = wait_for_osdmap();
if (r < 0) {
return r;
}
objecter->with_osdmap([&](const OSDMap& o) {
const pg_pool_t* pool = o.get_pg_pool(pool_id);
if (pool) {
if (pool->tier_of < 0) {
*base_tier = pool_id;
} else {
*base_tier = pool->tier_of;
}
r = 0;
} else {
r = -ENOENT;
}
});
return r;
}
int librados::RadosClient::pool_delete(const char *name)
{
int r = wait_for_osdmap();
if (r < 0) {
return r;
}
ceph::mutex mylock = ceph::make_mutex("RadosClient::pool_delete::mylock");
ceph::condition_variable cond;
bool done;
int ret;
Context *onfinish = new C_SafeCond(mylock, cond, &done, &ret);
objecter->delete_pool(name, onfinish);
std::unique_lock l{mylock};
cond.wait(l, [&done] { return done;});
return ret;
}
int librados::RadosClient::pool_delete_async(const char *name, PoolAsyncCompletionImpl *c)
{
int r = wait_for_osdmap();
if (r < 0)
return r;
Context *onfinish = make_lambda_context(CB_PoolAsync_Safe(c));
objecter->delete_pool(name, onfinish);
return r;
}
void librados::RadosClient::blocklist_self(bool set) {
std::lock_guard l(lock);
objecter->blocklist_self(set);
}
std::string librados::RadosClient::get_addrs() const {
CachedStackStringStream cos;
*cos << messenger->get_myaddrs();
return std::string(cos->strv());
}
int librados::RadosClient::blocklist_add(const string& client_address,
uint32_t expire_seconds)
{
entity_addr_t addr;
if (!addr.parse(client_address)) {
lderr(cct) << "unable to parse address " << client_address << dendl;
return -EINVAL;
}
std::stringstream cmd;
cmd << "{"
<< "\"prefix\": \"osd blocklist\", "
<< "\"blocklistop\": \"add\", "
<< "\"addr\": \"" << client_address << "\"";
if (expire_seconds != 0) {
cmd << ", \"expire\": " << expire_seconds << ".0";
}
cmd << "}";
std::vector<std::string> cmds;
cmds.push_back(cmd.str());
bufferlist inbl;
int r = mon_command(cmds, inbl, NULL, NULL);
if (r == -EINVAL) {
// try legacy blacklist command
std::stringstream cmd;
cmd << "{"
<< "\"prefix\": \"osd blacklist\", "
<< "\"blacklistop\": \"add\", "
<< "\"addr\": \"" << client_address << "\"";
if (expire_seconds != 0) {
cmd << ", \"expire\": " << expire_seconds << ".0";
}
cmd << "}";
cmds.clear();
cmds.push_back(cmd.str());
r = mon_command(cmds, inbl, NULL, NULL);
}
if (r < 0) {
return r;
}
// ensure we have the latest osd map epoch before proceeding
r = wait_for_latest_osdmap();
return r;
}
int librados::RadosClient::mon_command(const vector<string>& cmd,
const bufferlist &inbl,
bufferlist *outbl, string *outs)
{
C_SaferCond ctx;
mon_command_async(cmd, inbl, outbl, outs, &ctx);
return ctx.wait();
}
void librados::RadosClient::mon_command_async(const vector<string>& cmd,
const bufferlist &inbl,
bufferlist *outbl, string *outs,
Context *on_finish)
{
std::lock_guard l{lock};
monclient.start_mon_command(cmd, inbl,
[outs, outbl,
on_finish = std::unique_ptr<Context>(on_finish)]
(bs::error_code e,
std::string&& s,
ceph::bufferlist&& b) mutable {
if (outs)
*outs = std::move(s);
if (outbl)
*outbl = std::move(b);
if (on_finish)
on_finish.release()->complete(
ceph::from_error_code(e));
});
}
int librados::RadosClient::mgr_command(const vector<string>& cmd,
const bufferlist &inbl,
bufferlist *outbl, string *outs)
{
std::lock_guard l(lock);
C_SaferCond cond;
int r = mgrclient.start_command(cmd, inbl, outbl, outs, &cond);
if (r < 0)
return r;
lock.unlock();
if (rados_mon_op_timeout.count() > 0) {
r = cond.wait_for(rados_mon_op_timeout);
} else {
r = cond.wait();
}
lock.lock();
return r;
}
int librados::RadosClient::mgr_command(
const string& name,
const vector<string>& cmd,
const bufferlist &inbl,
bufferlist *outbl, string *outs)
{
std::lock_guard l(lock);
C_SaferCond cond;
int r = mgrclient.start_tell_command(name, cmd, inbl, outbl, outs, &cond);
if (r < 0)
return r;
lock.unlock();
if (rados_mon_op_timeout.count() > 0) {
r = cond.wait_for(rados_mon_op_timeout);
} else {
r = cond.wait();
}
lock.lock();
return r;
}
int librados::RadosClient::mon_command(int rank, const vector<string>& cmd,
const bufferlist &inbl,
bufferlist *outbl, string *outs)
{
bs::error_code ec;
auto&& [s, bl] = monclient.start_mon_command(rank, cmd, inbl,
ca::use_blocked[ec]);
if (outs)
*outs = std::move(s);
if (outbl)
*outbl = std::move(bl);
return ceph::from_error_code(ec);
}
int librados::RadosClient::mon_command(string name, const vector<string>& cmd,
const bufferlist &inbl,
bufferlist *outbl, string *outs)
{
bs::error_code ec;
auto&& [s, bl] = monclient.start_mon_command(name, cmd, inbl,
ca::use_blocked[ec]);
if (outs)
*outs = std::move(s);
if (outbl)
*outbl = std::move(bl);
return ceph::from_error_code(ec);
}
int librados::RadosClient::osd_command(int osd, vector<string>& cmd,
const bufferlist& inbl,
bufferlist *poutbl, string *prs)
{
ceph_tid_t tid;
if (osd < 0)
return -EINVAL;
// XXX do anything with tid?
bs::error_code ec;
auto [s, bl] = objecter->osd_command(osd, std::move(cmd), cb::list(inbl),
&tid, ca::use_blocked[ec]);
if (poutbl)
*poutbl = std::move(bl);
if (prs)
*prs = std::move(s);
return ceph::from_error_code(ec);
}
int librados::RadosClient::pg_command(pg_t pgid, vector<string>& cmd,
const bufferlist& inbl,
bufferlist *poutbl, string *prs)
{
ceph_tid_t tid;
bs::error_code ec;
auto [s, bl] = objecter->pg_command(pgid, std::move(cmd), inbl, &tid,
ca::use_blocked[ec]);
if (poutbl)
*poutbl = std::move(bl);
if (prs)
*prs = std::move(s);
return ceph::from_error_code(ec);
}
int librados::RadosClient::monitor_log(const string& level,
rados_log_callback_t cb,
rados_log_callback2_t cb2,
void *arg)
{
std::lock_guard l(lock);
if (state != CONNECTED) {
return -ENOTCONN;
}
if (cb == NULL && cb2 == NULL) {
// stop watch
ldout(cct, 10) << __func__ << " removing cb " << (void*)log_cb
<< " " << (void*)log_cb2 << dendl;
monclient.sub_unwant(log_watch);
log_watch.clear();
log_cb = NULL;
log_cb2 = NULL;
log_cb_arg = NULL;
return 0;
}
string watch_level;
if (level == "debug") {
watch_level = "log-debug";
} else if (level == "info") {
watch_level = "log-info";
} else if (level == "warn" || level == "warning") {
watch_level = "log-warn";
} else if (level == "err" || level == "error") {
watch_level = "log-error";
} else if (level == "sec") {
watch_level = "log-sec";
} else {
ldout(cct, 10) << __func__ << " invalid level " << level << dendl;
return -EINVAL;
}
if (log_cb || log_cb2)
monclient.sub_unwant(log_watch);
// (re)start watch
ldout(cct, 10) << __func__ << " add cb " << (void*)cb << " " << (void*)cb2
<< " level " << level << dendl;
monclient.sub_want(watch_level, 0, 0);
monclient.renew_subs();
log_cb = cb;
log_cb2 = cb2;
log_cb_arg = arg;
log_watch = watch_level;
return 0;
}
void librados::RadosClient::handle_log(MLog *m)
{
ceph_assert(ceph_mutex_is_locked(lock));
ldout(cct, 10) << __func__ << " version " << m->version << dendl;
if (log_last_version < m->version) {
log_last_version = m->version;
if (log_cb || log_cb2) {
for (std::deque<LogEntry>::iterator it = m->entries.begin(); it != m->entries.end(); ++it) {
LogEntry e = *it;
ostringstream ss;
ss << e.stamp << " " << e.name << " " << e.prio << " " << e.msg;
string line = ss.str();
string who = stringify(e.rank) + " " + stringify(e.addrs);
string name = stringify(e.name);
string level = stringify(e.prio);
struct timespec stamp;
e.stamp.to_timespec(&stamp);
ldout(cct, 20) << __func__ << " delivering " << ss.str() << dendl;
if (log_cb)
log_cb(log_cb_arg, line.c_str(), who.c_str(),
stamp.tv_sec, stamp.tv_nsec,
e.seq, level.c_str(), e.msg.c_str());
if (log_cb2)
log_cb2(log_cb_arg, line.c_str(),
e.channel.c_str(),
who.c_str(), name.c_str(),
stamp.tv_sec, stamp.tv_nsec,
e.seq, level.c_str(), e.msg.c_str());
}
}
monclient.sub_got(log_watch, log_last_version);
}
m->put();
}
int librados::RadosClient::service_daemon_register(
const std::string& service, ///< service name (e.g., 'rgw')
const std::string& name, ///< daemon name (e.g., 'gwfoo')
const std::map<std::string,std::string>& metadata)
{
if (service_daemon) {
return -EEXIST;
}
if (service == "osd" ||
service == "mds" ||
service == "client" ||
service == "mon" ||
service == "mgr") {
// normal ceph entity types are not allowed!
return -EINVAL;
}
if (service.empty() || name.empty()) {
return -EINVAL;
}
collect_sys_info(&daemon_metadata, cct);
ldout(cct,10) << __func__ << " " << service << "." << name << dendl;
service_daemon = true;
service_name = service;
daemon_name = name;
daemon_metadata.insert(metadata.begin(), metadata.end());
if (state == DISCONNECTED) {
return 0;
}
if (state == CONNECTING) {
return -EBUSY;
}
mgrclient.service_daemon_register(service_name, daemon_name,
daemon_metadata);
return 0;
}
int librados::RadosClient::service_daemon_update_status(
std::map<std::string,std::string>&& status)
{
if (state != CONNECTED) {
return -ENOTCONN;
}
return mgrclient.service_daemon_update_status(std::move(status));
}
mon_feature_t librados::RadosClient::get_required_monitor_features() const
{
return monclient.with_monmap([](const MonMap &monmap) {
return monmap.get_required_features(); } );
}
int librados::RadosClient::get_inconsistent_pgs(int64_t pool_id,
std::vector<std::string>* pgs)
{
vector<string> cmd = {
"{\"prefix\": \"pg ls\","
"\"pool\": " + std::to_string(pool_id) + ","
"\"states\": [\"inconsistent\"],"
"\"format\": \"json\"}"
};
bufferlist inbl, outbl;
string outstring;
if (auto ret = mgr_command(cmd, inbl, &outbl, &outstring); ret) {
return ret;
}
if (!outbl.length()) {
// no pg returned
return 0;
}
JSONParser parser;
if (!parser.parse(outbl.c_str(), outbl.length())) {
return -EINVAL;
}
vector<string> v;
if (!parser.is_array()) {
JSONObj *pgstat_obj = parser.find_obj("pg_stats");
if (!pgstat_obj)
return 0;
auto s = pgstat_obj->get_data();
JSONParser pg_stats;
if (!pg_stats.parse(s.c_str(), s.length())) {
return -EINVAL;
}
v = pg_stats.get_array_elements();
} else {
v = parser.get_array_elements();
}
for (auto i : v) {
JSONParser pg_json;
if (!pg_json.parse(i.c_str(), i.length())) {
return -EINVAL;
}
string pgid;
JSONDecoder::decode_json("pgid", pgid, &pg_json);
pgs->emplace_back(std::move(pgid));
}
return 0;
}
const char** librados::RadosClient::get_tracked_conf_keys() const
{
static const char *config_keys[] = {
"librados_thread_count",
"rados_mon_op_timeout",
nullptr
};
return config_keys;
}
void librados::RadosClient::handle_conf_change(const ConfigProxy& conf,
const std::set<std::string> &changed)
{
if (changed.count("librados_thread_count")) {
poolctx.stop();
poolctx.start(conf.get_val<std::uint64_t>("librados_thread_count"));
}
if (changed.count("rados_mon_op_timeout")) {
rados_mon_op_timeout = conf.get_val<std::chrono::seconds>("rados_mon_op_timeout");
}
}
| 28,810 | 23.29258 | 130 | cc |
null | ceph-main/src/librados/RadosClient.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2012 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_LIBRADOS_RADOSCLIENT_H
#define CEPH_LIBRADOS_RADOSCLIENT_H
#include <functional>
#include <memory>
#include <string>
#include "msg/Dispatcher.h"
#include "common/async/context_pool.h"
#include "common/config_fwd.h"
#include "common/Cond.h"
#include "common/ceph_mutex.h"
#include "common/ceph_time.h"
#include "common/config_obs.h"
#include "include/common_fwd.h"
#include "include/rados/librados.h"
#include "include/rados/librados.hpp"
#include "mon/MonClient.h"
#include "mgr/MgrClient.h"
#include "IoCtxImpl.h"
struct Context;
class Message;
class MLog;
class Messenger;
class AioCompletionImpl;
namespace neorados { namespace detail { class RadosClient; }}
class librados::RadosClient : public Dispatcher,
public md_config_obs_t
{
friend neorados::detail::RadosClient;
public:
using Dispatcher::cct;
private:
std::unique_ptr<CephContext,
std::function<void(CephContext*)>> cct_deleter;
public:
const ConfigProxy& conf{cct->_conf};
ceph::async::io_context_pool poolctx;
private:
enum {
DISCONNECTED,
CONNECTING,
CONNECTED,
} state{DISCONNECTED};
MonClient monclient{cct, poolctx};
MgrClient mgrclient{cct, nullptr, &monclient.monmap};
Messenger *messenger{nullptr};
uint64_t instance_id{0};
bool _dispatch(Message *m);
bool ms_dispatch(Message *m) override;
void ms_handle_connect(Connection *con) override;
bool ms_handle_reset(Connection *con) override;
void ms_handle_remote_reset(Connection *con) override;
bool ms_handle_refused(Connection *con) override;
Objecter *objecter{nullptr};
ceph::mutex lock = ceph::make_mutex("librados::RadosClient::lock");
ceph::condition_variable cond;
int refcnt{1};
version_t log_last_version{0};
rados_log_callback_t log_cb{nullptr};
rados_log_callback2_t log_cb2{nullptr};
void *log_cb_arg{nullptr};
std::string log_watch;
bool service_daemon = false;
std::string daemon_name, service_name;
std::map<std::string,std::string> daemon_metadata;
ceph::timespan rados_mon_op_timeout{};
int wait_for_osdmap();
public:
boost::asio::io_context::strand finish_strand{poolctx.get_io_context()};
explicit RadosClient(CephContext *cct);
~RadosClient() override;
int ping_monitor(std::string mon_id, std::string *result);
int connect();
void shutdown();
int watch_flush();
int async_watch_flush(AioCompletionImpl *c);
uint64_t get_instance_id();
int get_min_compatible_osd(int8_t* require_osd_release);
int get_min_compatible_client(int8_t* min_compat_client,
int8_t* require_min_compat_client);
int wait_for_latest_osdmap();
int create_ioctx(const char *name, IoCtxImpl **io);
int create_ioctx(int64_t, IoCtxImpl **io);
int get_fsid(std::string *s);
int64_t lookup_pool(const char *name);
bool pool_requires_alignment(int64_t pool_id);
int pool_requires_alignment2(int64_t pool_id, bool *req);
uint64_t pool_required_alignment(int64_t pool_id);
int pool_required_alignment2(int64_t pool_id, uint64_t *alignment);
int pool_get_name(uint64_t pool_id, std::string *name,
bool wait_latest_map = false);
int pool_list(std::list<std::pair<int64_t, std::string> >& ls);
int get_pool_stats(std::list<std::string>& ls, std::map<std::string,::pool_stat_t> *result,
bool *per_pool);
int get_fs_stats(ceph_statfs& result);
bool get_pool_is_selfmanaged_snaps_mode(const std::string& pool);
/*
-1 was set as the default value and monitor will pickup the right crush rule with below order:
a) osd pool default crush replicated rule
b) the first rule
c) error out if no value find
*/
int pool_create(std::string& name, int16_t crush_rule=-1);
int pool_create_async(std::string& name, PoolAsyncCompletionImpl *c,
int16_t crush_rule=-1);
int pool_get_base_tier(int64_t pool_id, int64_t* base_tier);
int pool_delete(const char *name);
int pool_delete_async(const char *name, PoolAsyncCompletionImpl *c);
int blocklist_add(const std::string& client_address, uint32_t expire_seconds);
int mon_command(const std::vector<std::string>& cmd, const bufferlist &inbl,
bufferlist *outbl, std::string *outs);
void mon_command_async(const std::vector<std::string>& cmd, const bufferlist &inbl,
bufferlist *outbl, std::string *outs, Context *on_finish);
int mon_command(int rank,
const std::vector<std::string>& cmd, const bufferlist &inbl,
bufferlist *outbl, std::string *outs);
int mon_command(std::string name,
const std::vector<std::string>& cmd, const bufferlist &inbl,
bufferlist *outbl, std::string *outs);
int mgr_command(const std::vector<std::string>& cmd, const bufferlist &inbl,
bufferlist *outbl, std::string *outs);
int mgr_command(
const std::string& name,
const std::vector<std::string>& cmd, const bufferlist &inbl,
bufferlist *outbl, std::string *outs);
int osd_command(int osd, std::vector<std::string>& cmd, const bufferlist& inbl,
bufferlist *poutbl, std::string *prs);
int pg_command(pg_t pgid, std::vector<std::string>& cmd, const bufferlist& inbl,
bufferlist *poutbl, std::string *prs);
void handle_log(MLog *m);
int monitor_log(const std::string& level, rados_log_callback_t cb,
rados_log_callback2_t cb2, void *arg);
void get();
bool put();
void blocklist_self(bool set);
std::string get_addrs() const;
int service_daemon_register(
const std::string& service, ///< service name (e.g., 'rgw')
const std::string& name, ///< daemon name (e.g., 'gwfoo')
const std::map<std::string,std::string>& metadata); ///< static metadata about daemon
int service_daemon_update_status(
std::map<std::string,std::string>&& status);
mon_feature_t get_required_monitor_features() const;
int get_inconsistent_pgs(int64_t pool_id, std::vector<std::string>* pgs);
const char** get_tracked_conf_keys() const override;
void handle_conf_change(const ConfigProxy& conf,
const std::set <std::string> &changed) override;
};
#endif
| 6,525 | 31.79397 | 96 | h |
null | ceph-main/src/librados/RadosXattrIter.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2014 Sebastien Ponce <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include <stdlib.h>
#include "RadosXattrIter.h"
librados::RadosXattrsIter::RadosXattrsIter()
: val(NULL)
{
i = attrset.end();
}
librados::RadosXattrsIter::~RadosXattrsIter()
{
free(val);
val = NULL;
}
| 645 | 20.533333 | 70 | cc |
null | ceph-main/src/librados/RadosXattrIter.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2014 Sebastien Ponce <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_LIBRADOS_XATTRITER_H
#define CEPH_LIBRADOS_XATTRITER_H
#include <string>
#include <map>
#include "include/buffer.h" // for bufferlist
namespace librados {
/**
* iterator object used in implementation of the external
* attributes part of the C interface of librados
*/
struct RadosXattrsIter {
RadosXattrsIter();
~RadosXattrsIter();
std::map<std::string, bufferlist> attrset;
std::map<std::string, bufferlist>::iterator i;
char *val;
};
};
#endif
| 933 | 22.948718 | 70 | h |
null | ceph-main/src/librados/librados_asio.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2017 Red Hat, Inc.
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*/
#ifndef LIBRADOS_ASIO_H
#define LIBRADOS_ASIO_H
#include "include/rados/librados.hpp"
#include "common/async/completion.h"
/// Defines asynchronous librados operations that satisfy all of the
/// "Requirements on asynchronous operations" imposed by the C++ Networking TS
/// in section 13.2.7. Many of the type and variable names below are taken
/// directly from those requirements.
///
/// The current draft of the Networking TS (as of 2017-11-27) is available here:
/// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2017/n4711.pdf
///
/// The boost::asio documentation duplicates these requirements here:
/// http://www.boost.org/doc/libs/1_66_0/doc/html/boost_asio/reference/asynchronous_operations.html
namespace librados {
namespace detail {
#ifndef _WIN32
constexpr auto err_category = boost::system::system_category;
#else
// librados uses "errno.h" error codes. On Windows,
// boost::system::system_category refers to errors from winerror.h.
// That being considered, we'll use boost::system::generic_category.
constexpr auto err_category = boost::system::generic_category;
#endif
/// unique_ptr with custom deleter for AioCompletion
struct AioCompletionDeleter {
void operator()(AioCompletion *c) { c->release(); }
};
using unique_aio_completion_ptr =
std::unique_ptr<AioCompletion, AioCompletionDeleter>;
/// Invokes the given completion handler. When the type of Result is not void,
/// storage is provided for it and that result is passed as an additional
/// argument to the handler.
template <typename Result>
struct Invoker {
using Signature = void(boost::system::error_code, Result);
Result result;
template <typename Completion>
void dispatch(Completion&& completion, boost::system::error_code ec) {
ceph::async::dispatch(std::move(completion), ec, std::move(result));
}
};
// specialization for Result=void
template <>
struct Invoker<void> {
using Signature = void(boost::system::error_code);
template <typename Completion>
void dispatch(Completion&& completion, boost::system::error_code ec) {
ceph::async::dispatch(std::move(completion), ec);
}
};
template <typename Result>
struct AsyncOp : Invoker<Result> {
unique_aio_completion_ptr aio_completion;
using Signature = typename Invoker<Result>::Signature;
using Completion = ceph::async::Completion<Signature, AsyncOp<Result>>;
static void aio_dispatch(completion_t cb, void *arg) {
// reclaim ownership of the completion
auto p = std::unique_ptr<Completion>{static_cast<Completion*>(arg)};
// move result out of Completion memory being freed
auto op = std::move(p->user_data);
const int ret = op.aio_completion->get_return_value();
boost::system::error_code ec;
if (ret < 0) {
ec.assign(-ret, librados::detail::err_category());
}
op.dispatch(std::move(p), ec);
}
template <typename Executor1, typename CompletionHandler>
static auto create(const Executor1& ex1, CompletionHandler&& handler) {
auto p = Completion::create(ex1, std::move(handler));
p->user_data.aio_completion.reset(
Rados::aio_create_completion(p.get(), aio_dispatch));
return p;
}
};
} // namespace detail
/// Calls IoCtx::aio_read() and arranges for the AioCompletion to call a
/// given handler with signature (boost::system::error_code, bufferlist).
template <typename ExecutionContext, typename CompletionToken>
auto async_read(ExecutionContext& ctx, IoCtx& io, const std::string& oid,
size_t len, uint64_t off, CompletionToken&& token)
{
using Op = detail::AsyncOp<bufferlist>;
using Signature = typename Op::Signature;
boost::asio::async_completion<CompletionToken, Signature> init(token);
auto p = Op::create(ctx.get_executor(), init.completion_handler);
auto& op = p->user_data;
int ret = io.aio_read(oid, op.aio_completion.get(), &op.result, len, off);
if (ret < 0) {
auto ec = boost::system::error_code{-ret, librados::detail::err_category()};
ceph::async::post(std::move(p), ec, bufferlist{});
} else {
p.release(); // release ownership until completion
}
return init.result.get();
}
/// Calls IoCtx::aio_write() and arranges for the AioCompletion to call a
/// given handler with signature (boost::system::error_code).
template <typename ExecutionContext, typename CompletionToken>
auto async_write(ExecutionContext& ctx, IoCtx& io, const std::string& oid,
bufferlist &bl, size_t len, uint64_t off,
CompletionToken&& token)
{
using Op = detail::AsyncOp<void>;
using Signature = typename Op::Signature;
boost::asio::async_completion<CompletionToken, Signature> init(token);
auto p = Op::create(ctx.get_executor(), init.completion_handler);
auto& op = p->user_data;
int ret = io.aio_write(oid, op.aio_completion.get(), bl, len, off);
if (ret < 0) {
auto ec = boost::system::error_code{-ret, librados::detail::err_category()};
ceph::async::post(std::move(p), ec);
} else {
p.release(); // release ownership until completion
}
return init.result.get();
}
/// Calls IoCtx::aio_operate() and arranges for the AioCompletion to call a
/// given handler with signature (boost::system::error_code, bufferlist).
template <typename ExecutionContext, typename CompletionToken>
auto async_operate(ExecutionContext& ctx, IoCtx& io, const std::string& oid,
ObjectReadOperation *read_op, int flags,
CompletionToken&& token)
{
using Op = detail::AsyncOp<bufferlist>;
using Signature = typename Op::Signature;
boost::asio::async_completion<CompletionToken, Signature> init(token);
auto p = Op::create(ctx.get_executor(), init.completion_handler);
auto& op = p->user_data;
int ret = io.aio_operate(oid, op.aio_completion.get(), read_op,
flags, &op.result);
if (ret < 0) {
auto ec = boost::system::error_code{-ret, librados::detail::err_category()};
ceph::async::post(std::move(p), ec, bufferlist{});
} else {
p.release(); // release ownership until completion
}
return init.result.get();
}
/// Calls IoCtx::aio_operate() and arranges for the AioCompletion to call a
/// given handler with signature (boost::system::error_code).
template <typename ExecutionContext, typename CompletionToken>
auto async_operate(ExecutionContext& ctx, IoCtx& io, const std::string& oid,
ObjectWriteOperation *write_op, int flags,
CompletionToken &&token)
{
using Op = detail::AsyncOp<void>;
using Signature = typename Op::Signature;
boost::asio::async_completion<CompletionToken, Signature> init(token);
auto p = Op::create(ctx.get_executor(), init.completion_handler);
auto& op = p->user_data;
int ret = io.aio_operate(oid, op.aio_completion.get(), write_op, flags);
if (ret < 0) {
auto ec = boost::system::error_code{-ret, librados::detail::err_category()};
ceph::async::post(std::move(p), ec);
} else {
p.release(); // release ownership until completion
}
return init.result.get();
}
/// Calls IoCtx::aio_notify() and arranges for the AioCompletion to call a
/// given handler with signature (boost::system::error_code, bufferlist).
template <typename ExecutionContext, typename CompletionToken>
auto async_notify(ExecutionContext& ctx, IoCtx& io, const std::string& oid,
bufferlist& bl, uint64_t timeout_ms, CompletionToken &&token)
{
using Op = detail::AsyncOp<bufferlist>;
using Signature = typename Op::Signature;
boost::asio::async_completion<CompletionToken, Signature> init(token);
auto p = Op::create(ctx.get_executor(), init.completion_handler);
auto& op = p->user_data;
int ret = io.aio_notify(oid, op.aio_completion.get(),
bl, timeout_ms, &op.result);
if (ret < 0) {
auto ec = boost::system::error_code{-ret, librados::detail::err_category()};
ceph::async::post(std::move(p), ec, bufferlist{});
} else {
p.release(); // release ownership until completion
}
return init.result.get();
}
} // namespace librados
#endif // LIBRADOS_ASIO_H
| 8,461 | 36.946188 | 99 | h |
null | ceph-main/src/librados/librados_c.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include <limits.h>
#include "acconfig.h"
#include "common/config.h"
#include "common/errno.h"
#include "common/ceph_argparse.h"
#include "common/ceph_json.h"
#include "common/common_init.h"
#include "common/TracepointProvider.h"
#include "common/hobject.h"
#include "common/async/waiter.h"
#include "include/rados/librados.h"
#include "include/types.h"
#include <include/stringify.h>
#include "librados/librados_c.h"
#include "librados/AioCompletionImpl.h"
#include "librados/IoCtxImpl.h"
#include "librados/ObjectOperationImpl.h"
#include "librados/PoolAsyncCompletionImpl.h"
#include "librados/RadosClient.h"
#include "librados/RadosXattrIter.h"
#include "librados/ListObjectImpl.h"
#include "librados/librados_util.h"
#include <cls/lock/cls_lock_client.h>
#include <string>
#include <map>
#include <set>
#include <vector>
#include <list>
#include <stdexcept>
#ifdef WITH_LTTNG
#define TRACEPOINT_DEFINE
#define TRACEPOINT_PROBE_DYNAMIC_LINKAGE
#include "tracing/librados.h"
#undef TRACEPOINT_PROBE_DYNAMIC_LINKAGE
#undef TRACEPOINT_DEFINE
#else
#define tracepoint(...)
#endif
#if defined(HAVE_ASM_SYMVER) || defined(HAVE_ATTR_SYMVER)
// prefer __attribute__() over global asm(".symver"). because the latter
// is not parsed by the compiler and is partitioned away by GCC if
// lto-partitions is enabled, in other words, these asm() statements
// are dropped by the -flto option by default. the way to address it is
// to use __attribute__. so this information can be processed by the
// C compiler, and be preserved after LTO partitions the code
#ifdef HAVE_ATTR_SYMVER
#define LIBRADOS_C_API_BASE(fn) \
extern __typeof (_##fn##_base) _##fn##_base __attribute__((__symver__ (#fn "@")))
#define LIBRADOS_C_API_BASE_DEFAULT(fn) \
extern __typeof (_##fn) _##fn __attribute__((__symver__ (#fn "@@")))
#define LIBRADOS_C_API_DEFAULT(fn, ver) \
extern __typeof (_##fn) _##fn __attribute__((__symver__ (#fn "@@LIBRADOS_" #ver)))
#else
#define LIBRADOS_C_API_BASE(fn) \
asm(".symver _" #fn "_base, " #fn "@")
#define LIBRADOS_C_API_BASE_DEFAULT(fn) \
asm(".symver _" #fn ", " #fn "@@")
#define LIBRADOS_C_API_DEFAULT(fn, ver) \
asm(".symver _" #fn ", " #fn "@@LIBRADOS_" #ver)
#endif
#define LIBRADOS_C_API_BASE_F(fn) _ ## fn ## _base
#define LIBRADOS_C_API_DEFAULT_F(fn) _ ## fn
#else
#define LIBRADOS_C_API_BASE(fn)
#define LIBRADOS_C_API_BASE_DEFAULT(fn)
#define LIBRADOS_C_API_DEFAULT(fn, ver)
#define LIBRADOS_C_API_BASE_F(fn) _ ## fn ## _base
// There shouldn't be multiple default versions of the same
// function.
#define LIBRADOS_C_API_DEFAULT_F(fn) fn
#endif
using std::ostringstream;
using std::pair;
using std::string;
using std::map;
using std::set;
using std::vector;
using std::list;
#define dout_subsys ceph_subsys_rados
#undef dout_prefix
#define dout_prefix *_dout << "librados: "
#define RADOS_LIST_MAX_ENTRIES 1024
static TracepointProvider::Traits tracepoint_traits("librados_tp.so", "rados_tracing");
/*
* Structure of this file
*
* RadosClient and the related classes are the internal implementation of librados.
* Above that layer sits the C API, found in include/rados/librados.h, and
* the C++ API, found in include/rados/librados.hpp
*
* The C++ API sometimes implements things in terms of the C API.
* Both the C++ and C API rely on RadosClient.
*
* Visually:
* +--------------------------------------+
* | C++ API |
* +--------------------+ |
* | C API | |
* +--------------------+-----------------+
* | RadosClient |
* +--------------------------------------+
*/
///////////////////////////// C API //////////////////////////////
static CephContext *rados_create_cct(
const char * const clustername,
CephInitParameters *iparams)
{
// missing things compared to global_init:
// g_ceph_context, g_conf, g_lockdep, signal handlers
CephContext *cct = common_preinit(*iparams, CODE_ENVIRONMENT_LIBRARY, 0);
if (clustername)
cct->_conf->cluster = clustername;
cct->_conf.parse_env(cct->get_module_type()); // environment variables override
cct->_conf.apply_changes(nullptr);
TracepointProvider::initialize<tracepoint_traits>(cct);
return cct;
}
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_create)(
rados_t *pcluster,
const char * const id)
{
CephInitParameters iparams(CEPH_ENTITY_TYPE_CLIENT);
if (id) {
iparams.name.set(CEPH_ENTITY_TYPE_CLIENT, id);
}
CephContext *cct = rados_create_cct("", &iparams);
tracepoint(librados, rados_create_enter, id);
*pcluster = reinterpret_cast<rados_t>(new librados::RadosClient(cct));
tracepoint(librados, rados_create_exit, 0, *pcluster);
cct->put();
return 0;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_create);
// as above, but
// 1) don't assume 'client.'; name is a full type.id namestr
// 2) allow setting clustername
// 3) flags is for future expansion (maybe some of the global_init()
// behavior is appropriate for some consumers of librados, for instance)
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_create2)(
rados_t *pcluster,
const char *const clustername,
const char * const name,
uint64_t flags)
{
// client is assumed, but from_str will override
int retval = 0;
CephInitParameters iparams(CEPH_ENTITY_TYPE_CLIENT);
if (!name || !iparams.name.from_str(name)) {
retval = -EINVAL;
}
CephContext *cct = rados_create_cct(clustername, &iparams);
tracepoint(librados, rados_create2_enter, clustername, name, flags);
if (retval == 0) {
*pcluster = reinterpret_cast<rados_t>(new librados::RadosClient(cct));
}
tracepoint(librados, rados_create2_exit, retval, *pcluster);
cct->put();
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_create2);
/* This function is intended for use by Ceph daemons. These daemons have
* already called global_init and want to use that particular configuration for
* their cluster.
*/
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_create_with_context)(
rados_t *pcluster,
rados_config_t cct_)
{
CephContext *cct = (CephContext *)cct_;
TracepointProvider::initialize<tracepoint_traits>(cct);
tracepoint(librados, rados_create_with_context_enter, cct_);
librados::RadosClient *radosp = new librados::RadosClient(cct);
*pcluster = (void *)radosp;
tracepoint(librados, rados_create_with_context_exit, 0, *pcluster);
return 0;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_create_with_context);
extern "C" rados_config_t LIBRADOS_C_API_DEFAULT_F(rados_cct)(rados_t cluster)
{
tracepoint(librados, rados_cct_enter, cluster);
librados::RadosClient *client = (librados::RadosClient *)cluster;
rados_config_t retval = (rados_config_t)client->cct;
tracepoint(librados, rados_cct_exit, retval);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_cct);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_connect)(rados_t cluster)
{
tracepoint(librados, rados_connect_enter, cluster);
librados::RadosClient *client = (librados::RadosClient *)cluster;
int retval = client->connect();
tracepoint(librados, rados_connect_exit, retval);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_connect);
extern "C" void LIBRADOS_C_API_DEFAULT_F(rados_shutdown)(rados_t cluster)
{
tracepoint(librados, rados_shutdown_enter, cluster);
librados::RadosClient *radosp = (librados::RadosClient *)cluster;
radosp->shutdown();
delete radosp;
tracepoint(librados, rados_shutdown_exit);
}
LIBRADOS_C_API_BASE_DEFAULT(rados_shutdown);
extern "C" uint64_t LIBRADOS_C_API_DEFAULT_F(rados_get_instance_id)(
rados_t cluster)
{
tracepoint(librados, rados_get_instance_id_enter, cluster);
librados::RadosClient *client = (librados::RadosClient *)cluster;
uint64_t retval = client->get_instance_id();
tracepoint(librados, rados_get_instance_id_exit, retval);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_get_instance_id);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_get_min_compatible_osd)(
rados_t cluster,
int8_t* require_osd_release)
{
librados::RadosClient *client = (librados::RadosClient *)cluster;
return client->get_min_compatible_osd(require_osd_release);
}
LIBRADOS_C_API_BASE_DEFAULT(rados_get_min_compatible_osd);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_get_min_compatible_client)(
rados_t cluster,
int8_t* min_compat_client,
int8_t* require_min_compat_client)
{
librados::RadosClient *client = (librados::RadosClient *)cluster;
return client->get_min_compatible_client(min_compat_client,
require_min_compat_client);
}
LIBRADOS_C_API_BASE_DEFAULT(rados_get_min_compatible_client);
extern "C" void LIBRADOS_C_API_DEFAULT_F(rados_version)(
int *major, int *minor, int *extra)
{
tracepoint(librados, rados_version_enter, major, minor, extra);
if (major)
*major = LIBRADOS_VER_MAJOR;
if (minor)
*minor = LIBRADOS_VER_MINOR;
if (extra)
*extra = LIBRADOS_VER_EXTRA;
tracepoint(librados, rados_version_exit, LIBRADOS_VER_MAJOR, LIBRADOS_VER_MINOR, LIBRADOS_VER_EXTRA);
}
LIBRADOS_C_API_BASE_DEFAULT(rados_version);
// -- config --
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_conf_read_file)(
rados_t cluster,
const char *path_list)
{
tracepoint(librados, rados_conf_read_file_enter, cluster, path_list);
librados::RadosClient *client = (librados::RadosClient *)cluster;
auto& conf = client->cct->_conf;
ostringstream warnings;
int ret = conf.parse_config_files(path_list, &warnings, 0);
if (ret) {
if (warnings.tellp() > 0)
lderr(client->cct) << warnings.str() << dendl;
client->cct->_conf.complain_about_parse_error(client->cct);
tracepoint(librados, rados_conf_read_file_exit, ret);
return ret;
}
conf.parse_env(client->cct->get_module_type()); // environment variables override
conf.apply_changes(nullptr);
client->cct->_conf.complain_about_parse_error(client->cct);
tracepoint(librados, rados_conf_read_file_exit, 0);
return 0;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_conf_read_file);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_conf_parse_argv)(
rados_t cluster,
int argc,
const char **argv)
{
tracepoint(librados, rados_conf_parse_argv_enter, cluster, argc);
int i;
for(i = 0; i < argc; i++) {
tracepoint(librados, rados_conf_parse_argv_arg, argv[i]);
}
librados::RadosClient *client = (librados::RadosClient *)cluster;
auto& conf = client->cct->_conf;
auto args = argv_to_vec(argc, argv);
int ret = conf.parse_argv(args);
if (ret) {
tracepoint(librados, rados_conf_parse_argv_exit, ret);
return ret;
}
conf.apply_changes(nullptr);
tracepoint(librados, rados_conf_parse_argv_exit, 0);
return 0;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_conf_parse_argv);
// like above, but return the remainder of argv to contain remaining
// unparsed args. Must be allocated to at least argc by caller.
// remargv will contain n <= argc pointers to original argv[], the end
// of which may be NULL
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_conf_parse_argv_remainder)(
rados_t cluster, int argc,
const char **argv,
const char **remargv)
{
tracepoint(librados, rados_conf_parse_argv_remainder_enter, cluster, argc);
unsigned int i;
for(i = 0; i < (unsigned int) argc; i++) {
tracepoint(librados, rados_conf_parse_argv_remainder_arg, argv[i]);
}
librados::RadosClient *client = (librados::RadosClient *)cluster;
auto& conf = client->cct->_conf;
vector<const char*> args;
for (int i=0; i<argc; i++)
args.push_back(argv[i]);
int ret = conf.parse_argv(args);
if (ret) {
tracepoint(librados, rados_conf_parse_argv_remainder_exit, ret);
return ret;
}
conf.apply_changes(NULL);
ceph_assert(args.size() <= (unsigned int)argc);
for (i = 0; i < (unsigned int)argc; ++i) {
if (i < args.size())
remargv[i] = args[i];
else
remargv[i] = (const char *)NULL;
tracepoint(librados, rados_conf_parse_argv_remainder_remarg, remargv[i]);
}
tracepoint(librados, rados_conf_parse_argv_remainder_exit, 0);
return 0;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_conf_parse_argv_remainder);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_conf_parse_env)(
rados_t cluster, const char *env)
{
tracepoint(librados, rados_conf_parse_env_enter, cluster, env);
librados::RadosClient *client = (librados::RadosClient *)cluster;
auto& conf = client->cct->_conf;
conf.parse_env(client->cct->get_module_type(), env);
conf.apply_changes(nullptr);
tracepoint(librados, rados_conf_parse_env_exit, 0);
return 0;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_conf_parse_env);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_conf_set)(
rados_t cluster,
const char *option,
const char *value)
{
tracepoint(librados, rados_conf_set_enter, cluster, option, value);
librados::RadosClient *client = (librados::RadosClient *)cluster;
auto& conf = client->cct->_conf;
int ret = conf.set_val(option, value);
if (ret) {
tracepoint(librados, rados_conf_set_exit, ret);
return ret;
}
conf.apply_changes(nullptr);
tracepoint(librados, rados_conf_set_exit, 0);
return 0;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_conf_set);
/* cluster info */
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_cluster_stat)(
rados_t cluster,
rados_cluster_stat_t *result)
{
tracepoint(librados, rados_cluster_stat_enter, cluster);
librados::RadosClient *client = (librados::RadosClient *)cluster;
ceph_statfs stats;
int r = client->get_fs_stats(stats);
result->kb = stats.kb;
result->kb_used = stats.kb_used;
result->kb_avail = stats.kb_avail;
result->num_objects = stats.num_objects;
tracepoint(librados, rados_cluster_stat_exit, r, result->kb, result->kb_used, result->kb_avail, result->num_objects);
return r;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_cluster_stat);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_conf_get)(
rados_t cluster,
const char *option,
char *buf, size_t len)
{
tracepoint(librados, rados_conf_get_enter, cluster, option, len);
char *tmp = buf;
librados::RadosClient *client = (librados::RadosClient *)cluster;
const auto& conf = client->cct->_conf;
int retval = conf.get_val(option, &tmp, len);
tracepoint(librados, rados_conf_get_exit, retval, retval ? "" : option);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_conf_get);
extern "C" int64_t LIBRADOS_C_API_DEFAULT_F(rados_pool_lookup)(
rados_t cluster,
const char *name)
{
tracepoint(librados, rados_pool_lookup_enter, cluster, name);
librados::RadosClient *radosp = (librados::RadosClient *)cluster;
int64_t retval = radosp->lookup_pool(name);
tracepoint(librados, rados_pool_lookup_exit, retval);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_pool_lookup);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_pool_reverse_lookup)(
rados_t cluster,
int64_t id,
char *buf,
size_t maxlen)
{
tracepoint(librados, rados_pool_reverse_lookup_enter, cluster, id, maxlen);
librados::RadosClient *radosp = (librados::RadosClient *)cluster;
std::string name;
int r = radosp->pool_get_name(id, &name, true);
if (r < 0) {
tracepoint(librados, rados_pool_reverse_lookup_exit, r, "");
return r;
}
if (name.length() >= maxlen) {
tracepoint(librados, rados_pool_reverse_lookup_exit, -ERANGE, "");
return -ERANGE;
}
strcpy(buf, name.c_str());
int retval = name.length();
tracepoint(librados, rados_pool_reverse_lookup_exit, retval, buf);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_pool_reverse_lookup);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_cluster_fsid)(
rados_t cluster,
char *buf,
size_t maxlen)
{
tracepoint(librados, rados_cluster_fsid_enter, cluster, maxlen);
librados::RadosClient *radosp = (librados::RadosClient *)cluster;
std::string fsid;
radosp->get_fsid(&fsid);
if (fsid.length() >= maxlen) {
tracepoint(librados, rados_cluster_fsid_exit, -ERANGE, "");
return -ERANGE;
}
strcpy(buf, fsid.c_str());
int retval = fsid.length();
tracepoint(librados, rados_cluster_fsid_exit, retval, buf);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_cluster_fsid);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_wait_for_latest_osdmap)(
rados_t cluster)
{
tracepoint(librados, rados_wait_for_latest_osdmap_enter, cluster);
librados::RadosClient *radosp = (librados::RadosClient *)cluster;
int retval = radosp->wait_for_latest_osdmap();
tracepoint(librados, rados_wait_for_latest_osdmap_exit, retval);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_wait_for_latest_osdmap);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_blocklist_add)(
rados_t cluster,
char *client_address,
uint32_t expire_seconds)
{
librados::RadosClient *radosp = (librados::RadosClient *)cluster;
return radosp->blocklist_add(client_address, expire_seconds);
}
LIBRADOS_C_API_BASE_DEFAULT(rados_blocklist_add);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_blacklist_add)(
rados_t cluster,
char *client_address,
uint32_t expire_seconds)
{
return LIBRADOS_C_API_DEFAULT_F(rados_blocklist_add)(
cluster, client_address, expire_seconds);
}
LIBRADOS_C_API_BASE_DEFAULT(rados_blacklist_add);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_getaddrs)(
rados_t cluster,
char** addrs)
{
librados::RadosClient *radosp = (librados::RadosClient *)cluster;
auto s = radosp->get_addrs();
*addrs = strdup(s.c_str());
return 0;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_getaddrs);
extern "C" void LIBRADOS_C_API_DEFAULT_F(rados_set_osdmap_full_try)(
rados_ioctx_t io)
{
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
ctx->extra_op_flags |= CEPH_OSD_FLAG_FULL_TRY;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_set_osdmap_full_try);
extern "C" void LIBRADOS_C_API_DEFAULT_F(rados_unset_osdmap_full_try)(
rados_ioctx_t io)
{
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
ctx->extra_op_flags &= ~CEPH_OSD_FLAG_FULL_TRY;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_unset_osdmap_full_try);
extern "C" void LIBRADOS_C_API_DEFAULT_F(rados_set_pool_full_try)(
rados_ioctx_t io)
{
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
ctx->extra_op_flags |= CEPH_OSD_FLAG_FULL_TRY;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_set_pool_full_try);
extern "C" void LIBRADOS_C_API_DEFAULT_F(rados_unset_pool_full_try)(
rados_ioctx_t io)
{
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
ctx->extra_op_flags &= ~CEPH_OSD_FLAG_FULL_TRY;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_unset_pool_full_try);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_application_enable)(
rados_ioctx_t io,
const char *app_name,
int force)
{
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
return ctx->application_enable(app_name, force != 0);
}
LIBRADOS_C_API_BASE_DEFAULT(rados_application_enable);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_application_list)(
rados_ioctx_t io,
char *values,
size_t *values_len)
{
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
std::set<std::string> app_names;
int r = ctx->application_list(&app_names);
if (r < 0) {
return r;
}
size_t total_len = 0;
for (auto app_name : app_names) {
total_len += app_name.size() + 1;
}
if (*values_len < total_len) {
*values_len = total_len;
return -ERANGE;
}
char *values_p = values;
for (auto app_name : app_names) {
size_t len = app_name.size() + 1;
strncpy(values_p, app_name.c_str(), len);
values_p += len;
}
*values_p = '\0';
*values_len = total_len;
return 0;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_application_list);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_application_metadata_get)(
rados_ioctx_t io,
const char *app_name,
const char *key,
char *value,
size_t *value_len)
{
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
std::string value_str;
int r = ctx->application_metadata_get(app_name, key, &value_str);
if (r < 0) {
return r;
}
size_t len = value_str.size() + 1;
if (*value_len < len) {
*value_len = len;
return -ERANGE;
}
strncpy(value, value_str.c_str(), len);
*value_len = len;
return 0;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_application_metadata_get);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_application_metadata_set)(
rados_ioctx_t io,
const char *app_name,
const char *key,
const char *value)
{
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
return ctx->application_metadata_set(app_name, key, value);
}
LIBRADOS_C_API_BASE_DEFAULT(rados_application_metadata_set);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_application_metadata_remove)(
rados_ioctx_t io,
const char *app_name,
const char *key)
{
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
return ctx->application_metadata_remove(app_name, key);
}
LIBRADOS_C_API_BASE_DEFAULT(rados_application_metadata_remove);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_application_metadata_list)(
rados_ioctx_t io,
const char *app_name,
char *keys, size_t *keys_len,
char *values, size_t *vals_len)
{
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
std::map<std::string, std::string> metadata;
int r = ctx->application_metadata_list(app_name, &metadata);
if (r < 0) {
return r;
}
size_t total_key_len = 0;
size_t total_val_len = 0;
for (auto pair : metadata) {
total_key_len += pair.first.size() + 1;
total_val_len += pair.second.size() + 1;
}
if (*keys_len < total_key_len || *vals_len < total_val_len) {
*keys_len = total_key_len;
*vals_len = total_val_len;
return -ERANGE;
}
char *keys_p = keys;
char *vals_p = values;
for (auto pair : metadata) {
size_t key_len = pair.first.size() + 1;
strncpy(keys_p, pair.first.c_str(), key_len);
keys_p += key_len;
size_t val_len = pair.second.size() + 1;
strncpy(vals_p, pair.second.c_str(), val_len);
vals_p += val_len;
}
*keys_p = '\0';
*keys_len = total_key_len;
*vals_p = '\0';
*vals_len = total_val_len;
return 0;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_application_metadata_list);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_pool_list)(
rados_t cluster,
char *buf,
size_t len)
{
tracepoint(librados, rados_pool_list_enter, cluster, len);
librados::RadosClient *client = (librados::RadosClient *)cluster;
std::list<std::pair<int64_t, std::string> > pools;
int r = client->pool_list(pools);
if (r < 0) {
tracepoint(librados, rados_pool_list_exit, r);
return r;
}
if (len > 0 && !buf) {
tracepoint(librados, rados_pool_list_exit, -EINVAL);
return -EINVAL;
}
char *b = buf;
if (b) {
// FIPS zeroization audit 20191116: this memset is not security related.
memset(b, 0, len);
}
int needed = 0;
std::list<std::pair<int64_t, std::string> >::const_iterator i = pools.begin();
std::list<std::pair<int64_t, std::string> >::const_iterator p_end =
pools.end();
for (; i != p_end; ++i) {
int rl = i->second.length() + 1;
if (len < (unsigned)rl)
break;
const char* pool = i->second.c_str();
tracepoint(librados, rados_pool_list_pool, pool);
if (b) {
strncat(b, pool, rl);
b += rl;
}
needed += rl;
len -= rl;
}
for (; i != p_end; ++i) {
int rl = i->second.length() + 1;
needed += rl;
}
int retval = needed + 1;
tracepoint(librados, rados_pool_list_exit, retval);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_pool_list);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_inconsistent_pg_list)(
rados_t cluster,
int64_t pool_id,
char *buf,
size_t len)
{
tracepoint(librados, rados_inconsistent_pg_list_enter, cluster, pool_id, len);
librados::RadosClient *client = (librados::RadosClient *)cluster;
std::vector<std::string> pgs;
if (int r = client->get_inconsistent_pgs(pool_id, &pgs); r < 0) {
tracepoint(librados, rados_inconsistent_pg_list_exit, r);
return r;
}
if (len > 0 && !buf) {
tracepoint(librados, rados_inconsistent_pg_list_exit, -EINVAL);
return -EINVAL;
}
char *b = buf;
if (b) {
// FIPS zeroization audit 20191116: this memset is not security related.
memset(b, 0, len);
}
int needed = 0;
for (const auto& s : pgs) {
unsigned rl = s.length() + 1;
if (b && len >= rl) {
tracepoint(librados, rados_inconsistent_pg_list_pg, s.c_str());
strncat(b, s.c_str(), rl);
b += rl;
len -= rl;
}
needed += rl;
}
int retval = needed + 1;
tracepoint(librados, rados_inconsistent_pg_list_exit, retval);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_inconsistent_pg_list);
static void dict_to_map(const char *dict,
std::map<std::string, std::string>* dict_map)
{
while (*dict != '\0') {
const char* key = dict;
dict += strlen(key) + 1;
const char* value = dict;
dict += strlen(value) + 1;
(*dict_map)[key] = value;
}
}
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_service_register)(
rados_t cluster,
const char *service,
const char *daemon,
const char *metadata_dict)
{
librados::RadosClient *client = (librados::RadosClient *)cluster;
std::map<std::string, std::string> metadata;
dict_to_map(metadata_dict, &metadata);
return client->service_daemon_register(service, daemon, metadata);
}
LIBRADOS_C_API_BASE_DEFAULT(rados_service_register);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_service_update_status)(
rados_t cluster,
const char *status_dict)
{
librados::RadosClient *client = (librados::RadosClient *)cluster;
std::map<std::string, std::string> status;
dict_to_map(status_dict, &status);
return client->service_daemon_update_status(std::move(status));
}
LIBRADOS_C_API_BASE_DEFAULT(rados_service_update_status);
static void do_out_buffer(bufferlist& outbl, char **outbuf, size_t *outbuflen)
{
if (outbuf) {
if (outbl.length() > 0) {
*outbuf = (char *)malloc(outbl.length());
memcpy(*outbuf, outbl.c_str(), outbl.length());
} else {
*outbuf = NULL;
}
}
if (outbuflen)
*outbuflen = outbl.length();
}
static void do_out_buffer(string& outbl, char **outbuf, size_t *outbuflen)
{
if (outbuf) {
if (outbl.length() > 0) {
*outbuf = (char *)malloc(outbl.length());
memcpy(*outbuf, outbl.c_str(), outbl.length());
} else {
*outbuf = NULL;
}
}
if (outbuflen)
*outbuflen = outbl.length();
}
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_ping_monitor)(
rados_t cluster,
const char *mon_id,
char **outstr,
size_t *outstrlen)
{
tracepoint(librados, rados_ping_monitor_enter, cluster, mon_id);
librados::RadosClient *client = (librados::RadosClient *)cluster;
string str;
if (!mon_id) {
tracepoint(librados, rados_ping_monitor_exit, -EINVAL, NULL, NULL);
return -EINVAL;
}
int ret = client->ping_monitor(mon_id, &str);
if (ret == 0) {
do_out_buffer(str, outstr, outstrlen);
}
tracepoint(librados, rados_ping_monitor_exit, ret, ret < 0 ? NULL : outstr, ret < 0 ? NULL : outstrlen);
return ret;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_ping_monitor);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_mon_command)(
rados_t cluster,
const char **cmd, size_t cmdlen,
const char *inbuf, size_t inbuflen,
char **outbuf, size_t *outbuflen,
char **outs, size_t *outslen)
{
tracepoint(librados, rados_mon_command_enter, cluster, cmdlen, inbuf, inbuflen);
librados::RadosClient *client = (librados::RadosClient *)cluster;
bufferlist inbl;
bufferlist outbl;
string outstring;
vector<string> cmdvec;
for (size_t i = 0; i < cmdlen; i++) {
tracepoint(librados, rados_mon_command_cmd, cmd[i]);
cmdvec.push_back(cmd[i]);
}
inbl.append(inbuf, inbuflen);
int ret = client->mon_command(cmdvec, inbl, &outbl, &outstring);
do_out_buffer(outbl, outbuf, outbuflen);
do_out_buffer(outstring, outs, outslen);
tracepoint(librados, rados_mon_command_exit, ret, outbuf, outbuflen, outs, outslen);
return ret;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_mon_command);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_mon_command_target)(
rados_t cluster,
const char *name,
const char **cmd, size_t cmdlen,
const char *inbuf, size_t inbuflen,
char **outbuf, size_t *outbuflen,
char **outs, size_t *outslen)
{
tracepoint(librados, rados_mon_command_target_enter, cluster, name, cmdlen, inbuf, inbuflen);
librados::RadosClient *client = (librados::RadosClient *)cluster;
bufferlist inbl;
bufferlist outbl;
string outstring;
vector<string> cmdvec;
// is this a numeric id?
char *endptr;
errno = 0;
long rank = strtol(name, &endptr, 10);
if ((errno == ERANGE && (rank == LONG_MAX || rank == LONG_MIN)) ||
(errno != 0 && rank == 0) ||
endptr == name || // no digits
*endptr != '\0') { // extra characters
rank = -1;
}
for (size_t i = 0; i < cmdlen; i++) {
tracepoint(librados, rados_mon_command_target_cmd, cmd[i]);
cmdvec.push_back(cmd[i]);
}
inbl.append(inbuf, inbuflen);
int ret;
if (rank >= 0)
ret = client->mon_command(rank, cmdvec, inbl, &outbl, &outstring);
else
ret = client->mon_command(name, cmdvec, inbl, &outbl, &outstring);
do_out_buffer(outbl, outbuf, outbuflen);
do_out_buffer(outstring, outs, outslen);
tracepoint(librados, rados_mon_command_target_exit, ret, outbuf, outbuflen, outs, outslen);
return ret;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_mon_command_target);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_osd_command)(
rados_t cluster, int osdid, const char **cmd,
size_t cmdlen,
const char *inbuf, size_t inbuflen,
char **outbuf, size_t *outbuflen,
char **outs, size_t *outslen)
{
tracepoint(librados, rados_osd_command_enter, cluster, osdid, cmdlen, inbuf, inbuflen);
librados::RadosClient *client = (librados::RadosClient *)cluster;
bufferlist inbl;
bufferlist outbl;
string outstring;
vector<string> cmdvec;
for (size_t i = 0; i < cmdlen; i++) {
tracepoint(librados, rados_osd_command_cmd, cmd[i]);
cmdvec.push_back(cmd[i]);
}
inbl.append(inbuf, inbuflen);
int ret = client->osd_command(osdid, cmdvec, inbl, &outbl, &outstring);
do_out_buffer(outbl, outbuf, outbuflen);
do_out_buffer(outstring, outs, outslen);
tracepoint(librados, rados_osd_command_exit, ret, outbuf, outbuflen, outs, outslen);
return ret;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_osd_command);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_mgr_command)(
rados_t cluster, const char **cmd,
size_t cmdlen,
const char *inbuf, size_t inbuflen,
char **outbuf, size_t *outbuflen,
char **outs, size_t *outslen)
{
tracepoint(librados, rados_mgr_command_enter, cluster, cmdlen, inbuf,
inbuflen);
librados::RadosClient *client = (librados::RadosClient *)cluster;
bufferlist inbl;
bufferlist outbl;
string outstring;
vector<string> cmdvec;
for (size_t i = 0; i < cmdlen; i++) {
tracepoint(librados, rados_mgr_command_cmd, cmd[i]);
cmdvec.push_back(cmd[i]);
}
inbl.append(inbuf, inbuflen);
int ret = client->mgr_command(cmdvec, inbl, &outbl, &outstring);
do_out_buffer(outbl, outbuf, outbuflen);
do_out_buffer(outstring, outs, outslen);
tracepoint(librados, rados_mgr_command_exit, ret, outbuf, outbuflen, outs,
outslen);
return ret;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_mgr_command);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_mgr_command_target)(
rados_t cluster,
const char *name,
const char **cmd,
size_t cmdlen,
const char *inbuf, size_t inbuflen,
char **outbuf, size_t *outbuflen,
char **outs, size_t *outslen)
{
tracepoint(librados, rados_mgr_command_target_enter, cluster, name, cmdlen,
inbuf, inbuflen);
librados::RadosClient *client = (librados::RadosClient *)cluster;
bufferlist inbl;
bufferlist outbl;
string outstring;
vector<string> cmdvec;
for (size_t i = 0; i < cmdlen; i++) {
tracepoint(librados, rados_mgr_command_target_cmd, cmd[i]);
cmdvec.push_back(cmd[i]);
}
inbl.append(inbuf, inbuflen);
int ret = client->mgr_command(name, cmdvec, inbl, &outbl, &outstring);
do_out_buffer(outbl, outbuf, outbuflen);
do_out_buffer(outstring, outs, outslen);
tracepoint(librados, rados_mgr_command_target_exit, ret, outbuf, outbuflen,
outs, outslen);
return ret;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_mgr_command_target);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_pg_command)(
rados_t cluster, const char *pgstr,
const char **cmd, size_t cmdlen,
const char *inbuf, size_t inbuflen,
char **outbuf, size_t *outbuflen,
char **outs, size_t *outslen)
{
tracepoint(librados, rados_pg_command_enter, cluster, pgstr, cmdlen, inbuf, inbuflen);
librados::RadosClient *client = (librados::RadosClient *)cluster;
bufferlist inbl;
bufferlist outbl;
string outstring;
pg_t pgid;
vector<string> cmdvec;
for (size_t i = 0; i < cmdlen; i++) {
tracepoint(librados, rados_pg_command_cmd, cmd[i]);
cmdvec.push_back(cmd[i]);
}
inbl.append(inbuf, inbuflen);
if (!pgid.parse(pgstr))
return -EINVAL;
int ret = client->pg_command(pgid, cmdvec, inbl, &outbl, &outstring);
do_out_buffer(outbl, outbuf, outbuflen);
do_out_buffer(outstring, outs, outslen);
tracepoint(librados, rados_pg_command_exit, ret, outbuf, outbuflen, outs, outslen);
return ret;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_pg_command);
extern "C" void LIBRADOS_C_API_DEFAULT_F(rados_buffer_free)(char *buf)
{
tracepoint(librados, rados_buffer_free_enter, buf);
if (buf)
free(buf);
tracepoint(librados, rados_buffer_free_exit);
}
LIBRADOS_C_API_BASE_DEFAULT(rados_buffer_free);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_monitor_log)(
rados_t cluster,
const char *level,
rados_log_callback_t cb,
void *arg)
{
tracepoint(librados, rados_monitor_log_enter, cluster, level, cb, arg);
librados::RadosClient *client = (librados::RadosClient *)cluster;
int retval = client->monitor_log(level, cb, nullptr, arg);
tracepoint(librados, rados_monitor_log_exit, retval);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_monitor_log);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_monitor_log2)(
rados_t cluster,
const char *level,
rados_log_callback2_t cb,
void *arg)
{
tracepoint(librados, rados_monitor_log2_enter, cluster, level, cb, arg);
librados::RadosClient *client = (librados::RadosClient *)cluster;
int retval = client->monitor_log(level, nullptr, cb, arg);
tracepoint(librados, rados_monitor_log2_exit, retval);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_monitor_log2);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_ioctx_create)(
rados_t cluster,
const char *name,
rados_ioctx_t *io)
{
tracepoint(librados, rados_ioctx_create_enter, cluster, name);
librados::RadosClient *client = (librados::RadosClient *)cluster;
librados::IoCtxImpl *ctx;
int r = client->create_ioctx(name, &ctx);
if (r < 0) {
tracepoint(librados, rados_ioctx_create_exit, r, NULL);
return r;
}
*io = ctx;
ctx->get();
tracepoint(librados, rados_ioctx_create_exit, 0, ctx);
return 0;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_ioctx_create);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_ioctx_create2)(
rados_t cluster,
int64_t pool_id,
rados_ioctx_t *io)
{
tracepoint(librados, rados_ioctx_create2_enter, cluster, pool_id);
librados::RadosClient *client = (librados::RadosClient *)cluster;
librados::IoCtxImpl *ctx;
int r = client->create_ioctx(pool_id, &ctx);
if (r < 0) {
tracepoint(librados, rados_ioctx_create2_exit, r, NULL);
return r;
}
*io = ctx;
ctx->get();
tracepoint(librados, rados_ioctx_create2_exit, 0, ctx);
return 0;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_ioctx_create2);
extern "C" void LIBRADOS_C_API_DEFAULT_F(rados_ioctx_destroy)(rados_ioctx_t io)
{
tracepoint(librados, rados_ioctx_destroy_enter, io);
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
if (ctx) {
ctx->put();
}
tracepoint(librados, rados_ioctx_destroy_exit);
}
LIBRADOS_C_API_BASE_DEFAULT(rados_ioctx_destroy);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_ioctx_pool_stat)(
rados_ioctx_t io,
struct rados_pool_stat_t *stats)
{
tracepoint(librados, rados_ioctx_pool_stat_enter, io);
librados::IoCtxImpl *io_ctx_impl = (librados::IoCtxImpl *)io;
list<string> ls;
std::string pool_name;
int err = io_ctx_impl->client->pool_get_name(io_ctx_impl->get_id(), &pool_name);
if (err) {
tracepoint(librados, rados_ioctx_pool_stat_exit, err, stats);
return err;
}
ls.push_back(pool_name);
map<string, ::pool_stat_t> rawresult;
bool per_pool = false;
err = io_ctx_impl->client->get_pool_stats(ls, &rawresult, &per_pool);
if (err) {
tracepoint(librados, rados_ioctx_pool_stat_exit, err, stats);
return err;
}
::pool_stat_t& r = rawresult[pool_name];
uint64_t allocated_bytes = r.get_allocated_data_bytes(per_pool) +
r.get_allocated_omap_bytes(per_pool);
// FIXME: raw_used_rate is unknown hence use 1.0 here
// meaning we keep net amount aggregated over all replicas
// Not a big deal so far since this field isn't exposed
uint64_t user_bytes = r.get_user_data_bytes(1.0, per_pool) +
r.get_user_omap_bytes(1.0, per_pool);
stats->num_kb = shift_round_up(allocated_bytes, 10);
stats->num_bytes = allocated_bytes;
stats->num_objects = r.stats.sum.num_objects;
stats->num_object_clones = r.stats.sum.num_object_clones;
stats->num_object_copies = r.stats.sum.num_object_copies;
stats->num_objects_missing_on_primary = r.stats.sum.num_objects_missing_on_primary;
stats->num_objects_unfound = r.stats.sum.num_objects_unfound;
stats->num_objects_degraded =
r.stats.sum.num_objects_degraded +
r.stats.sum.num_objects_misplaced; // FIXME: this is imprecise
stats->num_rd = r.stats.sum.num_rd;
stats->num_rd_kb = r.stats.sum.num_rd_kb;
stats->num_wr = r.stats.sum.num_wr;
stats->num_wr_kb = r.stats.sum.num_wr_kb;
stats->num_user_bytes = user_bytes;
stats->compressed_bytes_orig = r.store_stats.data_compressed_original;
stats->compressed_bytes = r.store_stats.data_compressed;
stats->compressed_bytes_alloc = r.store_stats.data_compressed_allocated;
tracepoint(librados, rados_ioctx_pool_stat_exit, 0, stats);
return 0;
}
LIBRADOS_C_API_DEFAULT(rados_ioctx_pool_stat, 14.2.0);
extern "C" int LIBRADOS_C_API_BASE_F(rados_ioctx_pool_stat)(
rados_ioctx_t io, struct __librados_base::rados_pool_stat_t *stats)
{
struct rados_pool_stat_t new_stats;
int r = LIBRADOS_C_API_DEFAULT_F(rados_ioctx_pool_stat)(io, &new_stats);
if (r < 0) {
return r;
}
stats->num_bytes = new_stats.num_bytes;
stats->num_kb = new_stats.num_kb;
stats->num_objects = new_stats.num_objects;
stats->num_object_clones = new_stats.num_object_clones;
stats->num_object_copies = new_stats.num_object_copies;
stats->num_objects_missing_on_primary = new_stats.num_objects_missing_on_primary;
stats->num_objects_unfound = new_stats.num_objects_unfound;
stats->num_objects_degraded = new_stats.num_objects_degraded;
stats->num_rd = new_stats.num_rd;
stats->num_rd_kb = new_stats.num_rd_kb;
stats->num_wr = new_stats.num_wr;
stats->num_wr_kb = new_stats.num_wr_kb;
return 0;
}
LIBRADOS_C_API_BASE(rados_ioctx_pool_stat);
extern "C" rados_config_t LIBRADOS_C_API_DEFAULT_F(rados_ioctx_cct)(
rados_ioctx_t io)
{
tracepoint(librados, rados_ioctx_cct_enter, io);
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
rados_config_t retval = (rados_config_t)ctx->client->cct;
tracepoint(librados, rados_ioctx_cct_exit, retval);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_ioctx_cct);
extern "C" void LIBRADOS_C_API_DEFAULT_F(rados_ioctx_snap_set_read)(
rados_ioctx_t io,
rados_snap_t seq)
{
tracepoint(librados, rados_ioctx_snap_set_read_enter, io, seq);
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
ctx->set_snap_read((snapid_t)seq);
tracepoint(librados, rados_ioctx_snap_set_read_exit);
}
LIBRADOS_C_API_BASE_DEFAULT(rados_ioctx_snap_set_read);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_ioctx_selfmanaged_snap_set_write_ctx)(
rados_ioctx_t io,
rados_snap_t seq,
rados_snap_t *snaps,
int num_snaps)
{
tracepoint(librados, rados_ioctx_selfmanaged_snap_set_write_ctx_enter, io, seq, snaps, num_snaps);
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
vector<snapid_t> snv;
snv.resize(num_snaps);
for (int i=0; i<num_snaps; i++) {
snv[i] = (snapid_t)snaps[i];
}
int retval = ctx->set_snap_write_context((snapid_t)seq, snv);
tracepoint(librados, rados_ioctx_selfmanaged_snap_set_write_ctx_exit, retval);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_ioctx_selfmanaged_snap_set_write_ctx);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_write)(
rados_ioctx_t io,
const char *o,
const char *buf,
size_t len,
uint64_t off)
{
tracepoint(librados, rados_write_enter, io, o, buf, len, off);
if (len > UINT_MAX/2)
return -E2BIG;
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
object_t oid(o);
bufferlist bl;
bl.append(buf, len);
int retval = ctx->write(oid, bl, len, off);
tracepoint(librados, rados_write_exit, retval);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_write);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_append)(
rados_ioctx_t io,
const char *o,
const char *buf,
size_t len)
{
tracepoint(librados, rados_append_enter, io, o, buf, len);
if (len > UINT_MAX/2)
return -E2BIG;
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
object_t oid(o);
bufferlist bl;
bl.append(buf, len);
int retval = ctx->append(oid, bl, len);
tracepoint(librados, rados_append_exit, retval);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_append);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_write_full)(
rados_ioctx_t io,
const char *o,
const char *buf,
size_t len)
{
tracepoint(librados, rados_write_full_enter, io, o, buf, len);
if (len > UINT_MAX/2)
return -E2BIG;
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
object_t oid(o);
bufferlist bl;
bl.append(buf, len);
int retval = ctx->write_full(oid, bl);
tracepoint(librados, rados_write_full_exit, retval);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_write_full);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_writesame)(
rados_ioctx_t io,
const char *o,
const char *buf,
size_t data_len,
size_t write_len,
uint64_t off)
{
tracepoint(librados, rados_writesame_enter, io, o, buf, data_len, write_len, off);
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
object_t oid(o);
bufferlist bl;
bl.append(buf, data_len);
int retval = ctx->writesame(oid, bl, write_len, off);
tracepoint(librados, rados_writesame_exit, retval);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_writesame);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_trunc)(
rados_ioctx_t io,
const char *o,
uint64_t size)
{
tracepoint(librados, rados_trunc_enter, io, o, size);
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
object_t oid(o);
int retval = ctx->trunc(oid, size);
tracepoint(librados, rados_trunc_exit, retval);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_trunc);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_remove)(
rados_ioctx_t io,
const char *o)
{
tracepoint(librados, rados_remove_enter, io, o);
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
object_t oid(o);
int retval = ctx->remove(oid);
tracepoint(librados, rados_remove_exit, retval);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_remove);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_read)(
rados_ioctx_t io,
const char *o,
char *buf,
size_t len,
uint64_t off)
{
tracepoint(librados, rados_read_enter, io, o, buf, len, off);
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
int ret;
object_t oid(o);
bufferlist bl;
bufferptr bp = buffer::create_static(len, buf);
bl.push_back(bp);
ret = ctx->read(oid, bl, len, off);
if (ret >= 0) {
if (bl.length() > len) {
tracepoint(librados, rados_read_exit, -ERANGE, NULL);
return -ERANGE;
}
if (!bl.is_provided_buffer(buf))
bl.begin().copy(bl.length(), buf);
ret = bl.length(); // hrm :/
}
tracepoint(librados, rados_read_exit, ret, buf);
return ret;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_read);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_checksum)(
rados_ioctx_t io, const char *o,
rados_checksum_type_t type,
const char *init_value, size_t init_value_len,
size_t len, uint64_t off, size_t chunk_size,
char *pchecksum, size_t checksum_len)
{
tracepoint(librados, rados_checksum_enter, io, o, type, init_value,
init_value_len, len, off, chunk_size);
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
object_t oid(o);
bufferlist init_value_bl;
init_value_bl.append(init_value, init_value_len);
bufferlist checksum_bl;
int retval = ctx->checksum(oid, get_checksum_op_type(type), init_value_bl,
len, off, chunk_size, &checksum_bl);
if (retval >= 0) {
if (checksum_bl.length() > checksum_len) {
tracepoint(librados, rados_checksum_exit, -ERANGE, NULL, 0);
return -ERANGE;
}
checksum_bl.begin().copy(checksum_bl.length(), pchecksum);
}
tracepoint(librados, rados_checksum_exit, retval, pchecksum, checksum_len);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_checksum);
extern "C" uint64_t LIBRADOS_C_API_DEFAULT_F(rados_get_last_version)(
rados_ioctx_t io)
{
tracepoint(librados, rados_get_last_version_enter, io);
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
uint64_t retval = ctx->last_version();
tracepoint(librados, rados_get_last_version_exit, retval);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_get_last_version);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_pool_create)(
rados_t cluster,
const char *name)
{
tracepoint(librados, rados_pool_create_enter, cluster, name);
librados::RadosClient *radosp = (librados::RadosClient *)cluster;
string sname(name);
int retval = radosp->pool_create(sname);
tracepoint(librados, rados_pool_create_exit, retval);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_pool_create);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_pool_create_with_auid)(
rados_t cluster,
const char *name,
uint64_t auid)
{
tracepoint(librados, rados_pool_create_with_auid_enter, cluster, name, auid);
librados::RadosClient *radosp = (librados::RadosClient *)cluster;
string sname(name);
int retval = 0;
if (auid != CEPH_AUTH_UID_DEFAULT) {
retval = -EINVAL;
} else {
retval = radosp->pool_create(sname);
}
tracepoint(librados, rados_pool_create_with_auid_exit, retval);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_pool_create_with_auid);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_pool_create_with_crush_rule)(
rados_t cluster,
const char *name,
__u8 crush_rule_num)
{
tracepoint(librados, rados_pool_create_with_crush_rule_enter, cluster, name, crush_rule_num);
librados::RadosClient *radosp = (librados::RadosClient *)cluster;
string sname(name);
int retval = radosp->pool_create(sname, crush_rule_num);
tracepoint(librados, rados_pool_create_with_crush_rule_exit, retval);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_pool_create_with_crush_rule);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_pool_create_with_all)(
rados_t cluster,
const char *name,
uint64_t auid,
__u8 crush_rule_num)
{
tracepoint(librados, rados_pool_create_with_all_enter, cluster, name, auid, crush_rule_num);
librados::RadosClient *radosp = (librados::RadosClient *)cluster;
string sname(name);
int retval = 0;
if (auid != CEPH_AUTH_UID_DEFAULT) {
retval = -EINVAL;
} else {
retval = radosp->pool_create(sname, crush_rule_num);
}
tracepoint(librados, rados_pool_create_with_all_exit, retval);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_pool_create_with_all);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_pool_get_base_tier)(
rados_t cluster,
int64_t pool_id,
int64_t* base_tier)
{
tracepoint(librados, rados_pool_get_base_tier_enter, cluster, pool_id);
librados::RadosClient *client = (librados::RadosClient *)cluster;
int retval = client->pool_get_base_tier(pool_id, base_tier);
tracepoint(librados, rados_pool_get_base_tier_exit, retval, *base_tier);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_pool_get_base_tier);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_pool_delete)(
rados_t cluster,
const char *pool_name)
{
tracepoint(librados, rados_pool_delete_enter, cluster, pool_name);
librados::RadosClient *client = (librados::RadosClient *)cluster;
int retval = client->pool_delete(pool_name);
tracepoint(librados, rados_pool_delete_exit, retval);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_pool_delete);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_ioctx_pool_set_auid)(
rados_ioctx_t io,
uint64_t auid)
{
tracepoint(librados, rados_ioctx_pool_set_auid_enter, io, auid);
int retval = -EOPNOTSUPP;
tracepoint(librados, rados_ioctx_pool_set_auid_exit, retval);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_ioctx_pool_set_auid);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_ioctx_pool_get_auid)(
rados_ioctx_t io,
uint64_t *auid)
{
tracepoint(librados, rados_ioctx_pool_get_auid_enter, io);
int retval = -EOPNOTSUPP;
tracepoint(librados, rados_ioctx_pool_get_auid_exit, retval, *auid);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_ioctx_pool_get_auid);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_ioctx_pool_requires_alignment)(
rados_ioctx_t io)
{
tracepoint(librados, rados_ioctx_pool_requires_alignment_enter, io);
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
int retval = ctx->client->pool_requires_alignment(ctx->get_id());
tracepoint(librados, rados_ioctx_pool_requires_alignment_exit, retval);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_ioctx_pool_requires_alignment);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_ioctx_pool_requires_alignment2)(
rados_ioctx_t io,
int *req)
{
tracepoint(librados, rados_ioctx_pool_requires_alignment_enter2, io);
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
bool requires_alignment;
int retval = ctx->client->pool_requires_alignment2(ctx->get_id(),
&requires_alignment);
tracepoint(librados, rados_ioctx_pool_requires_alignment_exit2, retval,
requires_alignment);
if (req)
*req = requires_alignment;
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_ioctx_pool_requires_alignment2);
extern "C" uint64_t LIBRADOS_C_API_DEFAULT_F(rados_ioctx_pool_required_alignment)(
rados_ioctx_t io)
{
tracepoint(librados, rados_ioctx_pool_required_alignment_enter, io);
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
uint64_t retval = ctx->client->pool_required_alignment(ctx->get_id());
tracepoint(librados, rados_ioctx_pool_required_alignment_exit, retval);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_ioctx_pool_required_alignment);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_ioctx_pool_required_alignment2)(
rados_ioctx_t io,
uint64_t *alignment)
{
tracepoint(librados, rados_ioctx_pool_required_alignment_enter2, io);
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
int retval = ctx->client->pool_required_alignment2(ctx->get_id(),
alignment);
tracepoint(librados, rados_ioctx_pool_required_alignment_exit2, retval,
*alignment);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_ioctx_pool_required_alignment2);
extern "C" void LIBRADOS_C_API_DEFAULT_F(rados_ioctx_locator_set_key)(
rados_ioctx_t io,
const char *key)
{
tracepoint(librados, rados_ioctx_locator_set_key_enter, io, key);
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
if (key)
ctx->oloc.key = key;
else
ctx->oloc.key = "";
tracepoint(librados, rados_ioctx_locator_set_key_exit);
}
LIBRADOS_C_API_BASE_DEFAULT(rados_ioctx_locator_set_key);
extern "C" void LIBRADOS_C_API_DEFAULT_F(rados_ioctx_set_namespace)(
rados_ioctx_t io,
const char *nspace)
{
tracepoint(librados, rados_ioctx_set_namespace_enter, io, nspace);
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
if (nspace)
ctx->oloc.nspace = nspace;
else
ctx->oloc.nspace = "";
tracepoint(librados, rados_ioctx_set_namespace_exit);
}
LIBRADOS_C_API_BASE_DEFAULT(rados_ioctx_set_namespace);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_ioctx_get_namespace)(
rados_ioctx_t io,
char *s,
unsigned maxlen)
{
tracepoint(librados, rados_ioctx_get_namespace_enter, io, maxlen);
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
auto length = ctx->oloc.nspace.length();
if (length >= maxlen) {
tracepoint(librados, rados_ioctx_get_namespace_exit, -ERANGE, "");
return -ERANGE;
}
strcpy(s, ctx->oloc.nspace.c_str());
int retval = (int)length;
tracepoint(librados, rados_ioctx_get_namespace_exit, retval, s);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_ioctx_get_namespace);
extern "C" rados_t LIBRADOS_C_API_DEFAULT_F(rados_ioctx_get_cluster)(
rados_ioctx_t io)
{
tracepoint(librados, rados_ioctx_get_cluster_enter, io);
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
rados_t retval = (rados_t)ctx->client;
tracepoint(librados, rados_ioctx_get_cluster_exit, retval);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_ioctx_get_cluster);
extern "C" int64_t LIBRADOS_C_API_DEFAULT_F(rados_ioctx_get_id)(
rados_ioctx_t io)
{
tracepoint(librados, rados_ioctx_get_id_enter, io);
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
int64_t retval = ctx->get_id();
tracepoint(librados, rados_ioctx_get_id_exit, retval);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_ioctx_get_id);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_ioctx_get_pool_name)(
rados_ioctx_t io,
char *s,
unsigned maxlen)
{
tracepoint(librados, rados_ioctx_get_pool_name_enter, io, maxlen);
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
std::string pool_name;
int err = ctx->client->pool_get_name(ctx->get_id(), &pool_name);
if (err) {
tracepoint(librados, rados_ioctx_get_pool_name_exit, err, "");
return err;
}
if (pool_name.length() >= maxlen) {
tracepoint(librados, rados_ioctx_get_pool_name_exit, -ERANGE, "");
return -ERANGE;
}
strcpy(s, pool_name.c_str());
int retval = pool_name.length();
tracepoint(librados, rados_ioctx_get_pool_name_exit, retval, s);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_ioctx_get_pool_name);
// snaps
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_ioctx_snap_create)(
rados_ioctx_t io,
const char *snapname)
{
tracepoint(librados, rados_ioctx_snap_create_enter, io, snapname);
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
int retval = ctx->snap_create(snapname);
tracepoint(librados, rados_ioctx_snap_create_exit, retval);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_ioctx_snap_create);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_ioctx_snap_remove)(
rados_ioctx_t io,
const char *snapname)
{
tracepoint(librados, rados_ioctx_snap_remove_enter, io, snapname);
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
int retval = ctx->snap_remove(snapname);
tracepoint(librados, rados_ioctx_snap_remove_exit, retval);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_ioctx_snap_remove);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_ioctx_snap_rollback)(
rados_ioctx_t io,
const char *oid,
const char *snapname)
{
tracepoint(librados, rados_ioctx_snap_rollback_enter, io, oid, snapname);
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
int retval = ctx->rollback(oid, snapname);
tracepoint(librados, rados_ioctx_snap_rollback_exit, retval);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_ioctx_snap_rollback);
// Deprecated name kept for backward compatibility
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_rollback)(
rados_ioctx_t io,
const char *oid,
const char *snapname)
{
return LIBRADOS_C_API_DEFAULT_F(rados_ioctx_snap_rollback)(io, oid, snapname);
}
LIBRADOS_C_API_BASE_DEFAULT(rados_rollback);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_ioctx_selfmanaged_snap_create)(
rados_ioctx_t io,
uint64_t *snapid)
{
tracepoint(librados, rados_ioctx_selfmanaged_snap_create_enter, io);
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
int retval = ctx->selfmanaged_snap_create(snapid);
tracepoint(librados, rados_ioctx_selfmanaged_snap_create_exit, retval, *snapid);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_ioctx_selfmanaged_snap_create);
extern "C" void LIBRADOS_C_API_DEFAULT_F(rados_aio_ioctx_selfmanaged_snap_create)(
rados_ioctx_t io,
rados_snap_t *snapid,
rados_completion_t completion)
{
tracepoint(librados, rados_ioctx_selfmanaged_snap_create_enter, io);
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
librados::AioCompletionImpl *c = (librados::AioCompletionImpl*)completion;
ctx->aio_selfmanaged_snap_create(snapid, c);
tracepoint(librados, rados_ioctx_selfmanaged_snap_create_exit, 0, 0);
}
LIBRADOS_C_API_BASE_DEFAULT(rados_aio_ioctx_selfmanaged_snap_create);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_ioctx_selfmanaged_snap_remove)(
rados_ioctx_t io,
uint64_t snapid)
{
tracepoint(librados, rados_ioctx_selfmanaged_snap_remove_enter, io, snapid);
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
int retval = ctx->selfmanaged_snap_remove(snapid);
tracepoint(librados, rados_ioctx_selfmanaged_snap_remove_exit, retval);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_ioctx_selfmanaged_snap_remove);
extern "C" void LIBRADOS_C_API_DEFAULT_F(rados_aio_ioctx_selfmanaged_snap_remove)(
rados_ioctx_t io,
rados_snap_t snapid,
rados_completion_t completion)
{
tracepoint(librados, rados_ioctx_selfmanaged_snap_remove_enter, io, snapid);
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
librados::AioCompletionImpl *c = (librados::AioCompletionImpl*)completion;
ctx->aio_selfmanaged_snap_remove(snapid, c);
tracepoint(librados, rados_ioctx_selfmanaged_snap_remove_exit, 0);
}
LIBRADOS_C_API_BASE_DEFAULT(rados_aio_ioctx_selfmanaged_snap_remove);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_ioctx_selfmanaged_snap_rollback)(
rados_ioctx_t io,
const char *oid,
uint64_t snapid)
{
tracepoint(librados, rados_ioctx_selfmanaged_snap_rollback_enter, io, oid, snapid);
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
int retval = ctx->selfmanaged_snap_rollback_object(oid, ctx->snapc, snapid);
tracepoint(librados, rados_ioctx_selfmanaged_snap_rollback_exit, retval);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_ioctx_selfmanaged_snap_rollback);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_ioctx_snap_list)(
rados_ioctx_t io,
rados_snap_t *snaps,
int maxlen)
{
tracepoint(librados, rados_ioctx_snap_list_enter, io, maxlen);
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
vector<uint64_t> snapvec;
int r = ctx->snap_list(&snapvec);
if (r < 0) {
tracepoint(librados, rados_ioctx_snap_list_exit, r, snaps, 0);
return r;
}
if ((int)snapvec.size() <= maxlen) {
for (unsigned i=0; i<snapvec.size(); i++) {
snaps[i] = snapvec[i];
}
int retval = snapvec.size();
tracepoint(librados, rados_ioctx_snap_list_exit, retval, snaps, retval);
return retval;
}
int retval = -ERANGE;
tracepoint(librados, rados_ioctx_snap_list_exit, retval, snaps, 0);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_ioctx_snap_list);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_ioctx_snap_lookup)(
rados_ioctx_t io,
const char *name,
rados_snap_t *id)
{
tracepoint(librados, rados_ioctx_snap_lookup_enter, io, name);
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
int retval = ctx->snap_lookup(name, (uint64_t *)id);
tracepoint(librados, rados_ioctx_snap_lookup_exit, retval, *id);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_ioctx_snap_lookup);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_ioctx_snap_get_name)(
rados_ioctx_t io,
rados_snap_t id,
char *name,
int maxlen)
{
tracepoint(librados, rados_ioctx_snap_get_name_enter, io, id, maxlen);
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
std::string sname;
int r = ctx->snap_get_name(id, &sname);
if (r < 0) {
tracepoint(librados, rados_ioctx_snap_get_name_exit, r, "");
return r;
}
if ((int)sname.length() >= maxlen) {
int retval = -ERANGE;
tracepoint(librados, rados_ioctx_snap_get_name_exit, retval, "");
return retval;
}
strncpy(name, sname.c_str(), maxlen);
tracepoint(librados, rados_ioctx_snap_get_name_exit, 0, name);
return 0;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_ioctx_snap_get_name);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_ioctx_snap_get_stamp)(
rados_ioctx_t io,
rados_snap_t id,
time_t *t)
{
tracepoint(librados, rados_ioctx_snap_get_stamp_enter, io, id);
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
int retval = ctx->snap_get_stamp(id, t);
tracepoint(librados, rados_ioctx_snap_get_stamp_exit, retval, *t);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_ioctx_snap_get_stamp);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_cmpext)(
rados_ioctx_t io,
const char *o,
const char *cmp_buf,
size_t cmp_len,
uint64_t off)
{
tracepoint(librados, rados_cmpext_enter, io, o, cmp_buf, cmp_len, off);
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
int ret;
object_t oid(o);
bufferlist cmp_bl;
cmp_bl.append(cmp_buf, cmp_len);
ret = ctx->cmpext(oid, off, cmp_bl);
tracepoint(librados, rados_cmpext_exit, ret);
return ret;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_cmpext);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_getxattr)(
rados_ioctx_t io,
const char *o,
const char *name,
char *buf,
size_t len)
{
tracepoint(librados, rados_getxattr_enter, io, o, name, len);
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
int ret;
object_t oid(o);
bufferlist bl;
bl.push_back(buffer::create_static(len, buf));
ret = ctx->getxattr(oid, name, bl);
if (ret >= 0) {
if (bl.length() > len) {
tracepoint(librados, rados_getxattr_exit, -ERANGE, buf, 0);
return -ERANGE;
}
if (!bl.is_provided_buffer(buf))
bl.begin().copy(bl.length(), buf);
ret = bl.length();
}
tracepoint(librados, rados_getxattr_exit, ret, buf, ret);
return ret;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_getxattr);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_getxattrs)(
rados_ioctx_t io,
const char *oid,
rados_xattrs_iter_t *iter)
{
tracepoint(librados, rados_getxattrs_enter, io, oid);
librados::RadosXattrsIter *it = new librados::RadosXattrsIter();
if (!it) {
tracepoint(librados, rados_getxattrs_exit, -ENOMEM, NULL);
return -ENOMEM;
}
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
object_t obj(oid);
int ret = ctx->getxattrs(obj, it->attrset);
if (ret) {
delete it;
tracepoint(librados, rados_getxattrs_exit, ret, NULL);
return ret;
}
it->i = it->attrset.begin();
*iter = it;
tracepoint(librados, rados_getxattrs_exit, 0, *iter);
return 0;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_getxattrs);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_getxattrs_next)(
rados_xattrs_iter_t iter,
const char **name,
const char **val,
size_t *len)
{
tracepoint(librados, rados_getxattrs_next_enter, iter);
librados::RadosXattrsIter *it = static_cast<librados::RadosXattrsIter*>(iter);
if (it->val) {
free(it->val);
it->val = NULL;
}
if (it->i == it->attrset.end()) {
*name = NULL;
*val = NULL;
*len = 0;
tracepoint(librados, rados_getxattrs_next_exit, 0, NULL, NULL, 0);
return 0;
}
const std::string &s(it->i->first);
*name = s.c_str();
bufferlist &bl(it->i->second);
size_t bl_len = bl.length();
if (!bl_len) {
// malloc(0) is not guaranteed to return a valid pointer
*val = (char *)NULL;
} else {
it->val = (char*)malloc(bl_len);
if (!it->val) {
tracepoint(librados, rados_getxattrs_next_exit, -ENOMEM, *name, NULL, 0);
return -ENOMEM;
}
memcpy(it->val, bl.c_str(), bl_len);
*val = it->val;
}
*len = bl_len;
++it->i;
tracepoint(librados, rados_getxattrs_next_exit, 0, *name, *val, *len);
return 0;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_getxattrs_next);
extern "C" void LIBRADOS_C_API_DEFAULT_F(rados_getxattrs_end)(
rados_xattrs_iter_t iter)
{
tracepoint(librados, rados_getxattrs_end_enter, iter);
librados::RadosXattrsIter *it = static_cast<librados::RadosXattrsIter*>(iter);
delete it;
tracepoint(librados, rados_getxattrs_end_exit);
}
LIBRADOS_C_API_BASE_DEFAULT(rados_getxattrs_end);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_setxattr)(
rados_ioctx_t io,
const char *o,
const char *name,
const char *buf,
size_t len)
{
tracepoint(librados, rados_setxattr_enter, io, o, name, buf, len);
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
object_t oid(o);
bufferlist bl;
bl.append(buf, len);
int retval = ctx->setxattr(oid, name, bl);
tracepoint(librados, rados_setxattr_exit, retval);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_setxattr);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_rmxattr)(
rados_ioctx_t io,
const char *o,
const char *name)
{
tracepoint(librados, rados_rmxattr_enter, io, o, name);
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
object_t oid(o);
int retval = ctx->rmxattr(oid, name);
tracepoint(librados, rados_rmxattr_exit, retval);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_rmxattr);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_stat)(
rados_ioctx_t io,
const char *o,
uint64_t *psize,
time_t *pmtime)
{
tracepoint(librados, rados_stat_enter, io, o);
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
object_t oid(o);
int retval = ctx->stat(oid, psize, pmtime);
tracepoint(librados, rados_stat_exit, retval, psize, pmtime);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_stat);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_stat2)(
rados_ioctx_t io,
const char *o,
uint64_t *psize,
struct timespec *pmtime)
{
tracepoint(librados, rados_stat2_enter, io, o);
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
object_t oid(o);
int retval = ctx->stat2(oid, psize, pmtime);
tracepoint(librados, rados_stat2_exit, retval, psize, pmtime);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_stat2);
extern "C" int LIBRADOS_C_API_BASE_F(rados_tmap_update)(
rados_ioctx_t io,
const char *o,
const char *cmdbuf,
size_t cmdbuflen)
{
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
object_t oid(o);
bufferlist cmdbl;
cmdbl.append(cmdbuf, cmdbuflen);
return ctx->tmap_update(oid, cmdbl);
}
LIBRADOS_C_API_BASE(rados_tmap_update);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_tmap_update)(
rados_ioctx_t io,
const char *o,
const char *cmdbuf,
size_t cmdbuflen)
{
return -ENOTSUP;
}
LIBRADOS_C_API_DEFAULT(rados_tmap_update, 14.2.0);
extern "C" int LIBRADOS_C_API_BASE_F(rados_tmap_put)(
rados_ioctx_t io,
const char *o,
const char *buf,
size_t buflen)
{
bufferlist bl;
bl.append(buf, buflen);
bufferlist header;
std::map<std::string, bufferlist> m;
bufferlist::const_iterator bl_it = bl.begin();
decode(header, bl_it);
decode(m, bl_it);
bufferlist out_bl;
encode(header, out_bl);
encode(m, out_bl);
return LIBRADOS_C_API_DEFAULT_F(rados_write_full)(
io, o, out_bl.c_str(), out_bl.length());
}
LIBRADOS_C_API_BASE(rados_tmap_put);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_tmap_put)(
rados_ioctx_t io,
const char *o,
const char *buf,
size_t buflen)
{
return -EOPNOTSUPP;
}
LIBRADOS_C_API_DEFAULT(rados_tmap_put, 14.2.0);
extern "C" int LIBRADOS_C_API_BASE_F(rados_tmap_get)(
rados_ioctx_t io,
const char *o,
char *buf,
size_t buflen)
{
return LIBRADOS_C_API_DEFAULT_F(rados_read)(io, o, buf, buflen, 0);
}
LIBRADOS_C_API_BASE(rados_tmap_get);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_tmap_get)(
rados_ioctx_t io,
const char *o,
char *buf,
size_t buflen)
{
return -EOPNOTSUPP;
}
LIBRADOS_C_API_DEFAULT(rados_tmap_get, 14.2.0);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_exec)(
rados_ioctx_t io,
const char *o,
const char *cls,
const char *method,
const char *inbuf,
size_t in_len,
char *buf,
size_t out_len)
{
tracepoint(librados, rados_exec_enter, io, o, cls, method, inbuf, in_len, out_len);
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
object_t oid(o);
bufferlist inbl, outbl;
int ret;
inbl.append(inbuf, in_len);
ret = ctx->exec(oid, cls, method, inbl, outbl);
if (ret >= 0) {
if (outbl.length()) {
if (outbl.length() > out_len) {
tracepoint(librados, rados_exec_exit, -ERANGE, buf, 0);
return -ERANGE;
}
outbl.begin().copy(outbl.length(), buf);
ret = outbl.length(); // hrm :/
}
}
tracepoint(librados, rados_exec_exit, ret, buf, ret);
return ret;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_exec);
extern "C" rados_object_list_cursor LIBRADOS_C_API_DEFAULT_F(rados_object_list_begin)(
rados_ioctx_t io)
{
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
hobject_t *result = new hobject_t(ctx->objecter->enumerate_objects_begin());
return (rados_object_list_cursor)result;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_object_list_begin);
extern "C" rados_object_list_cursor LIBRADOS_C_API_DEFAULT_F(rados_object_list_end)(
rados_ioctx_t io)
{
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
hobject_t *result = new hobject_t(ctx->objecter->enumerate_objects_end());
return (rados_object_list_cursor)result;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_object_list_end);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_object_list_is_end)(
rados_ioctx_t io,
rados_object_list_cursor cur)
{
hobject_t *hobj = (hobject_t*)cur;
return hobj->is_max();
}
LIBRADOS_C_API_BASE_DEFAULT(rados_object_list_is_end);
extern "C" void LIBRADOS_C_API_DEFAULT_F(rados_object_list_cursor_free)(
rados_ioctx_t io,
rados_object_list_cursor cur)
{
hobject_t *hobj = (hobject_t*)cur;
delete hobj;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_object_list_cursor_free);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_object_list_cursor_cmp)(
rados_ioctx_t io,
rados_object_list_cursor lhs_cur,
rados_object_list_cursor rhs_cur)
{
hobject_t *lhs = (hobject_t*)lhs_cur;
hobject_t *rhs = (hobject_t*)rhs_cur;
return cmp(*lhs, *rhs);
}
LIBRADOS_C_API_BASE_DEFAULT(rados_object_list_cursor_cmp);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_object_list)(rados_ioctx_t io,
const rados_object_list_cursor start,
const rados_object_list_cursor finish,
const size_t result_item_count,
const char *filter_buf,
const size_t filter_buf_len,
rados_object_list_item *result_items,
rados_object_list_cursor *next)
{
ceph_assert(next);
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
// Zero out items so that they will be safe to free later
// FIPS zeroization audit 20191116: this memset is not security related.
memset(result_items, 0, sizeof(rados_object_list_item) * result_item_count);
bufferlist filter_bl;
if (filter_buf != nullptr) {
filter_bl.append(filter_buf, filter_buf_len);
}
ceph::async::waiter<boost::system::error_code,
std::vector<librados::ListObjectImpl>,
hobject_t> w;
ctx->objecter->enumerate_objects<librados::ListObjectImpl>(
ctx->poolid,
ctx->oloc.nspace,
*((hobject_t*)start),
*((hobject_t*)finish),
result_item_count,
filter_bl,
w);
hobject_t *next_hobj = (hobject_t*)(*next);
ceph_assert(next_hobj);
auto [ec, result, next_hash] = w.wait();
if (ec) {
*next_hobj = hobject_t::get_max();
return ceph::from_error_code(ec);
}
ceph_assert(result.size() <= result_item_count); // Don't overflow!
int k = 0;
for (auto i = result.begin(); i != result.end(); ++i) {
rados_object_list_item &item = result_items[k++];
do_out_buffer(i->oid, &item.oid, &item.oid_length);
do_out_buffer(i->nspace, &item.nspace, &item.nspace_length);
do_out_buffer(i->locator, &item.locator, &item.locator_length);
}
*next_hobj = next_hash;
return result.size();
}
LIBRADOS_C_API_BASE_DEFAULT(rados_object_list);
extern "C" void LIBRADOS_C_API_DEFAULT_F(rados_object_list_free)(
const size_t result_size,
rados_object_list_item *results)
{
ceph_assert(results);
for (unsigned int i = 0; i < result_size; ++i) {
LIBRADOS_C_API_DEFAULT_F(rados_buffer_free)(results[i].oid);
LIBRADOS_C_API_DEFAULT_F(rados_buffer_free)(results[i].locator);
LIBRADOS_C_API_DEFAULT_F(rados_buffer_free)(results[i].nspace);
}
}
LIBRADOS_C_API_BASE_DEFAULT(rados_object_list_free);
/* list objects */
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_nobjects_list_open)(
rados_ioctx_t io,
rados_list_ctx_t *listh)
{
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
tracepoint(librados, rados_nobjects_list_open_enter, io);
Objecter::NListContext *h = new Objecter::NListContext;
h->pool_id = ctx->poolid;
h->pool_snap_seq = ctx->snap_seq;
h->nspace = ctx->oloc.nspace; // After dropping compatibility need nspace
*listh = (void *)new librados::ObjListCtx(ctx, h);
tracepoint(librados, rados_nobjects_list_open_exit, 0, *listh);
return 0;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_nobjects_list_open);
extern "C" void LIBRADOS_C_API_DEFAULT_F(rados_nobjects_list_close)(
rados_list_ctx_t h)
{
tracepoint(librados, rados_nobjects_list_close_enter, h);
librados::ObjListCtx *lh = (librados::ObjListCtx *)h;
delete lh;
tracepoint(librados, rados_nobjects_list_close_exit);
}
LIBRADOS_C_API_BASE_DEFAULT(rados_nobjects_list_close);
extern "C" uint32_t LIBRADOS_C_API_DEFAULT_F(rados_nobjects_list_seek)(
rados_list_ctx_t listctx,
uint32_t pos)
{
librados::ObjListCtx *lh = (librados::ObjListCtx *)listctx;
tracepoint(librados, rados_nobjects_list_seek_enter, listctx, pos);
uint32_t r = lh->ctx->nlist_seek(lh->nlc, pos);
tracepoint(librados, rados_nobjects_list_seek_exit, r);
return r;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_nobjects_list_seek);
extern "C" uint32_t LIBRADOS_C_API_DEFAULT_F(rados_nobjects_list_seek_cursor)(
rados_list_ctx_t listctx,
rados_object_list_cursor cursor)
{
librados::ObjListCtx *lh = (librados::ObjListCtx *)listctx;
tracepoint(librados, rados_nobjects_list_seek_cursor_enter, listctx);
uint32_t r = lh->ctx->nlist_seek(lh->nlc, cursor);
tracepoint(librados, rados_nobjects_list_seek_cursor_exit, r);
return r;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_nobjects_list_seek_cursor);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_nobjects_list_get_cursor)(
rados_list_ctx_t listctx,
rados_object_list_cursor *cursor)
{
librados::ObjListCtx *lh = (librados::ObjListCtx *)listctx;
tracepoint(librados, rados_nobjects_list_get_cursor_enter, listctx);
*cursor = lh->ctx->nlist_get_cursor(lh->nlc);
tracepoint(librados, rados_nobjects_list_get_cursor_exit, 0);
return 0;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_nobjects_list_get_cursor);
extern "C" uint32_t LIBRADOS_C_API_DEFAULT_F(rados_nobjects_list_get_pg_hash_position)(
rados_list_ctx_t listctx)
{
librados::ObjListCtx *lh = (librados::ObjListCtx *)listctx;
tracepoint(librados, rados_nobjects_list_get_pg_hash_position_enter, listctx);
uint32_t retval = lh->nlc->get_pg_hash_position();
tracepoint(librados, rados_nobjects_list_get_pg_hash_position_exit, retval);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_nobjects_list_get_pg_hash_position);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_nobjects_list_next)(
rados_list_ctx_t listctx,
const char **entry,
const char **key,
const char **nspace)
{
tracepoint(librados, rados_nobjects_list_next_enter, listctx);
uint32_t retval = rados_nobjects_list_next2(listctx, entry, key, nspace, NULL, NULL, NULL);
tracepoint(librados, rados_nobjects_list_next_exit, 0, *entry, key, nspace);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_nobjects_list_next);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_nobjects_list_next2)(
rados_list_ctx_t listctx,
const char **entry,
const char **key,
const char **nspace,
size_t *entry_size,
size_t *key_size,
size_t *nspace_size)
{
tracepoint(librados, rados_nobjects_list_next2_enter, listctx);
librados::ObjListCtx *lh = (librados::ObjListCtx *)listctx;
Objecter::NListContext *h = lh->nlc;
// if the list is non-empty, this method has been called before
if (!h->list.empty())
// so let's kill the previously-returned object
h->list.pop_front();
if (h->list.empty()) {
int ret = lh->ctx->nlist(lh->nlc, RADOS_LIST_MAX_ENTRIES);
if (ret < 0) {
tracepoint(librados, rados_nobjects_list_next2_exit, ret, NULL, NULL, NULL, NULL, NULL, NULL);
return ret;
}
if (h->list.empty()) {
tracepoint(librados, rados_nobjects_list_next2_exit, -ENOENT, NULL, NULL, NULL, NULL, NULL, NULL);
return -ENOENT;
}
}
*entry = h->list.front().oid.c_str();
if (key) {
if (h->list.front().locator.size())
*key = h->list.front().locator.c_str();
else
*key = NULL;
}
if (nspace)
*nspace = h->list.front().nspace.c_str();
if (entry_size)
*entry_size = h->list.front().oid.size();
if (key_size)
*key_size = h->list.front().locator.size();
if (nspace_size)
*nspace_size = h->list.front().nspace.size();
tracepoint(librados, rados_nobjects_list_next2_exit, 0, entry, key, nspace,
entry_size, key_size, nspace_size);
return 0;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_nobjects_list_next2);
/*
* removed legacy v2 list objects stubs
*
* thse return -ENOTSUP where possible.
*/
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_objects_list_open)(
rados_ioctx_t io,
rados_list_ctx_t *ctx)
{
return -ENOTSUP;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_objects_list_open);
extern "C" uint32_t LIBRADOS_C_API_DEFAULT_F(rados_objects_list_get_pg_hash_position)(
rados_list_ctx_t ctx)
{
return 0;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_objects_list_get_pg_hash_position);
extern "C" uint32_t LIBRADOS_C_API_DEFAULT_F(rados_objects_list_seek)(
rados_list_ctx_t ctx,
uint32_t pos)
{
return 0;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_objects_list_seek);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_objects_list_next)(
rados_list_ctx_t ctx,
const char **entry,
const char **key)
{
return -ENOTSUP;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_objects_list_next);
extern "C" void LIBRADOS_C_API_DEFAULT_F(rados_objects_list_close)(
rados_list_ctx_t ctx)
{
}
LIBRADOS_C_API_BASE_DEFAULT(rados_objects_list_close);
// -------------------------
// aio
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_aio_create_completion)(
void *cb_arg,
rados_callback_t cb_complete,
rados_callback_t cb_safe,
rados_completion_t *pc)
{
tracepoint(librados, rados_aio_create_completion_enter, cb_arg, cb_complete, cb_safe);
librados::AioCompletionImpl *c = new librados::AioCompletionImpl;
if (cb_complete)
c->set_complete_callback(cb_arg, cb_complete);
if (cb_safe)
c->set_safe_callback(cb_arg, cb_safe);
*pc = c;
tracepoint(librados, rados_aio_create_completion_exit, 0, *pc);
return 0;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_aio_create_completion);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_aio_create_completion2)(
void *cb_arg,
rados_callback_t cb_complete,
rados_completion_t *pc)
{
tracepoint(librados, rados_aio_create_completion2_enter, cb_arg, cb_complete);
librados::AioCompletionImpl *c = new librados::AioCompletionImpl;
if (cb_complete)
c->set_complete_callback(cb_arg, cb_complete);
*pc = c;
tracepoint(librados, rados_aio_create_completion2_exit, 0, *pc);
return 0;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_aio_create_completion2);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_aio_wait_for_complete)(
rados_completion_t c)
{
tracepoint(librados, rados_aio_wait_for_complete_enter, c);
int retval = ((librados::AioCompletionImpl*)c)->wait_for_complete();
tracepoint(librados, rados_aio_wait_for_complete_exit, retval);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_aio_wait_for_complete);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_aio_wait_for_safe)(
rados_completion_t c)
{
tracepoint(librados, rados_aio_wait_for_safe_enter, c);
int retval = ((librados::AioCompletionImpl*)c)->wait_for_complete();
tracepoint(librados, rados_aio_wait_for_safe_exit, retval);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_aio_wait_for_safe);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_aio_is_complete)(
rados_completion_t c)
{
tracepoint(librados, rados_aio_is_complete_enter, c);
int retval = ((librados::AioCompletionImpl*)c)->is_complete();
tracepoint(librados, rados_aio_is_complete_exit, retval);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_aio_is_complete);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_aio_is_safe)(
rados_completion_t c)
{
tracepoint(librados, rados_aio_is_safe_enter, c);
int retval = ((librados::AioCompletionImpl*)c)->is_safe();
tracepoint(librados, rados_aio_is_safe_exit, retval);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_aio_is_safe);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_aio_wait_for_complete_and_cb)(
rados_completion_t c)
{
tracepoint(librados, rados_aio_wait_for_complete_and_cb_enter, c);
int retval = ((librados::AioCompletionImpl*)c)->wait_for_complete_and_cb();
tracepoint(librados, rados_aio_wait_for_complete_and_cb_exit, retval);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_aio_wait_for_complete_and_cb);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_aio_wait_for_safe_and_cb)(
rados_completion_t c)
{
tracepoint(librados, rados_aio_wait_for_safe_and_cb_enter, c);
int retval = ((librados::AioCompletionImpl*)c)->wait_for_safe_and_cb();
tracepoint(librados, rados_aio_wait_for_safe_and_cb_exit, retval);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_aio_wait_for_safe_and_cb);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_aio_is_complete_and_cb)(
rados_completion_t c)
{
tracepoint(librados, rados_aio_is_complete_and_cb_enter, c);
int retval = ((librados::AioCompletionImpl*)c)->is_complete_and_cb();
tracepoint(librados, rados_aio_is_complete_and_cb_exit, retval);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_aio_is_complete_and_cb);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_aio_is_safe_and_cb)(
rados_completion_t c)
{
tracepoint(librados, rados_aio_is_safe_and_cb_enter, c);
int retval = ((librados::AioCompletionImpl*)c)->is_safe_and_cb();
tracepoint(librados, rados_aio_is_safe_and_cb_exit, retval);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_aio_is_safe_and_cb);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_aio_get_return_value)(
rados_completion_t c)
{
tracepoint(librados, rados_aio_get_return_value_enter, c);
int retval = ((librados::AioCompletionImpl*)c)->get_return_value();
tracepoint(librados, rados_aio_get_return_value_exit, retval);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_aio_get_return_value);
extern "C" uint64_t LIBRADOS_C_API_DEFAULT_F(rados_aio_get_version)(
rados_completion_t c)
{
tracepoint(librados, rados_aio_get_version_enter, c);
uint64_t retval = ((librados::AioCompletionImpl*)c)->get_version();
tracepoint(librados, rados_aio_get_version_exit, retval);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_aio_get_version);
extern "C" void LIBRADOS_C_API_DEFAULT_F(rados_aio_release)(
rados_completion_t c)
{
tracepoint(librados, rados_aio_release_enter, c);
((librados::AioCompletionImpl*)c)->put();
tracepoint(librados, rados_aio_release_exit);
}
LIBRADOS_C_API_BASE_DEFAULT(rados_aio_release);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_aio_read)(
rados_ioctx_t io, const char *o,
rados_completion_t completion,
char *buf, size_t len, uint64_t off)
{
tracepoint(librados, rados_aio_read_enter, io, o, completion, len, off);
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
object_t oid(o);
int retval = ctx->aio_read(oid, (librados::AioCompletionImpl*)completion,
buf, len, off, ctx->snap_seq);
tracepoint(librados, rados_aio_read_exit, retval);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_aio_read);
#ifdef WITH_BLKIN
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_aio_read_traced)(
rados_ioctx_t io, const char *o,
rados_completion_t completion,
char *buf, size_t len, uint64_t off,
struct blkin_trace_info *info)
{
tracepoint(librados, rados_aio_read_enter, io, o, completion, len, off);
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
object_t oid(o);
int retval = ctx->aio_read(oid, (librados::AioCompletionImpl*)completion,
buf, len, off, ctx->snap_seq, info);
tracepoint(librados, rados_aio_read_exit, retval);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_aio_read_traced);
#endif
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_aio_write)(
rados_ioctx_t io, const char *o,
rados_completion_t completion,
const char *buf, size_t len, uint64_t off)
{
tracepoint(librados, rados_aio_write_enter, io, o, completion, buf, len, off);
if (len > UINT_MAX/2)
return -E2BIG;
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
object_t oid(o);
bufferlist bl;
bl.append(buf, len);
int retval = ctx->aio_write(oid, (librados::AioCompletionImpl*)completion,
bl, len, off);
tracepoint(librados, rados_aio_write_exit, retval);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_aio_write);
#ifdef WITH_BLKIN
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_aio_write_traced)(
rados_ioctx_t io, const char *o,
rados_completion_t completion,
const char *buf, size_t len, uint64_t off,
struct blkin_trace_info *info)
{
tracepoint(librados, rados_aio_write_enter, io, o, completion, buf, len, off);
if (len > UINT_MAX/2)
return -E2BIG;
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
object_t oid(o);
bufferlist bl;
bl.append(buf, len);
int retval = ctx->aio_write(oid, (librados::AioCompletionImpl*)completion,
bl, len, off, info);
tracepoint(librados, rados_aio_write_exit, retval);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_aio_write_traced);
#endif
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_aio_append)(
rados_ioctx_t io, const char *o,
rados_completion_t completion,
const char *buf, size_t len)
{
tracepoint(librados, rados_aio_append_enter, io, o, completion, buf, len);
if (len > UINT_MAX/2)
return -E2BIG;
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
object_t oid(o);
bufferlist bl;
bl.append(buf, len);
int retval = ctx->aio_append(oid, (librados::AioCompletionImpl*)completion,
bl, len);
tracepoint(librados, rados_aio_append_exit, retval);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_aio_append);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_aio_write_full)(
rados_ioctx_t io, const char *o,
rados_completion_t completion,
const char *buf, size_t len)
{
tracepoint(librados, rados_aio_write_full_enter, io, o, completion, buf, len);
if (len > UINT_MAX/2)
return -E2BIG;
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
object_t oid(o);
bufferlist bl;
bl.append(buf, len);
int retval = ctx->aio_write_full(oid, (librados::AioCompletionImpl*)completion, bl);
tracepoint(librados, rados_aio_write_full_exit, retval);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_aio_write_full);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_aio_writesame)(
rados_ioctx_t io, const char *o,
rados_completion_t completion,
const char *buf, size_t data_len,
size_t write_len, uint64_t off)
{
tracepoint(librados, rados_aio_writesame_enter, io, o, completion, buf,
data_len, write_len, off);
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
object_t oid(o);
bufferlist bl;
bl.append(buf, data_len);
int retval = ctx->aio_writesame(o, (librados::AioCompletionImpl*)completion,
bl, write_len, off);
tracepoint(librados, rados_aio_writesame_exit, retval);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_aio_writesame);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_aio_remove)(
rados_ioctx_t io, const char *o,
rados_completion_t completion)
{
tracepoint(librados, rados_aio_remove_enter, io, o, completion);
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
object_t oid(o);
int retval = ctx->aio_remove(oid, (librados::AioCompletionImpl*)completion);
tracepoint(librados, rados_aio_remove_exit, retval);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_aio_remove);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_aio_flush_async)(
rados_ioctx_t io,
rados_completion_t completion)
{
tracepoint(librados, rados_aio_flush_async_enter, io, completion);
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
ctx->flush_aio_writes_async((librados::AioCompletionImpl*)completion);
tracepoint(librados, rados_aio_flush_async_exit, 0);
return 0;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_aio_flush_async);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_aio_flush)(rados_ioctx_t io)
{
tracepoint(librados, rados_aio_flush_enter, io);
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
ctx->flush_aio_writes();
tracepoint(librados, rados_aio_flush_exit, 0);
return 0;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_aio_flush);
struct AioGetxattrData {
AioGetxattrData(char* buf, rados_completion_t c, size_t l) :
user_buf(buf), len(l), user_completion((librados::AioCompletionImpl*)c) {}
bufferlist bl;
char* user_buf;
size_t len;
struct librados::CB_AioCompleteAndSafe user_completion;
};
static void rados_aio_getxattr_complete(rados_completion_t c, void *arg) {
AioGetxattrData *cdata = reinterpret_cast<AioGetxattrData*>(arg);
int rc = LIBRADOS_C_API_DEFAULT_F(rados_aio_get_return_value)(c);
if (rc >= 0) {
if (cdata->bl.length() > cdata->len) {
rc = -ERANGE;
} else {
if (!cdata->bl.is_provided_buffer(cdata->user_buf))
cdata->bl.begin().copy(cdata->bl.length(), cdata->user_buf);
rc = cdata->bl.length();
}
}
cdata->user_completion(rc);
reinterpret_cast<librados::AioCompletionImpl*>(c)->put();
delete cdata;
}
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_aio_getxattr)(
rados_ioctx_t io, const char *o,
rados_completion_t completion,
const char *name, char *buf, size_t len)
{
tracepoint(librados, rados_aio_getxattr_enter, io, o, completion, name, len);
// create data object to be passed to async callback
AioGetxattrData *cdata = new AioGetxattrData(buf, completion, len);
if (!cdata) {
tracepoint(librados, rados_aio_getxattr_exit, -ENOMEM, NULL, 0);
return -ENOMEM;
}
cdata->bl.push_back(buffer::create_static(len, buf));
// create completion callback
librados::AioCompletionImpl *c = new librados::AioCompletionImpl;
c->set_complete_callback(cdata, rados_aio_getxattr_complete);
// call async getxattr of IoCtx
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
object_t oid(o);
int ret = ctx->aio_getxattr(oid, c, name, cdata->bl);
tracepoint(librados, rados_aio_getxattr_exit, ret, buf, ret);
return ret;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_aio_getxattr);
namespace {
struct AioGetxattrsData {
AioGetxattrsData(rados_completion_t c, rados_xattrs_iter_t *_iter) :
iter(_iter), user_completion((librados::AioCompletionImpl*)c) {
it = new librados::RadosXattrsIter();
}
~AioGetxattrsData() {
if (it) delete it;
}
librados::RadosXattrsIter *it;
rados_xattrs_iter_t *iter;
struct librados::CB_AioCompleteAndSafe user_completion;
};
}
static void rados_aio_getxattrs_complete(rados_completion_t c, void *arg) {
AioGetxattrsData *cdata = reinterpret_cast<AioGetxattrsData*>(arg);
int rc = LIBRADOS_C_API_DEFAULT_F(rados_aio_get_return_value)(c);
if (rc) {
cdata->user_completion(rc);
} else {
cdata->it->i = cdata->it->attrset.begin();
*cdata->iter = cdata->it;
cdata->it = 0;
cdata->user_completion(0);
}
reinterpret_cast<librados::AioCompletionImpl*>(c)->put();
delete cdata;
}
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_aio_getxattrs)(
rados_ioctx_t io, const char *oid,
rados_completion_t completion,
rados_xattrs_iter_t *iter)
{
tracepoint(librados, rados_aio_getxattrs_enter, io, oid, completion);
// create data object to be passed to async callback
AioGetxattrsData *cdata = new AioGetxattrsData(completion, iter);
if (!cdata) {
tracepoint(librados, rados_getxattrs_exit, -ENOMEM, NULL);
return -ENOMEM;
}
// create completion callback
librados::AioCompletionImpl *c = new librados::AioCompletionImpl;
c->set_complete_callback(cdata, rados_aio_getxattrs_complete);
// call async getxattrs of IoCtx
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
object_t obj(oid);
int ret = ctx->aio_getxattrs(obj, c, cdata->it->attrset);
tracepoint(librados, rados_aio_getxattrs_exit, ret, cdata->it);
return ret;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_aio_getxattrs);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_aio_setxattr)(
rados_ioctx_t io, const char *o,
rados_completion_t completion,
const char *name, const char *buf, size_t len)
{
tracepoint(librados, rados_aio_setxattr_enter, io, o, completion, name, buf, len);
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
object_t oid(o);
bufferlist bl;
bl.append(buf, len);
int retval = ctx->aio_setxattr(oid, (librados::AioCompletionImpl*)completion, name, bl);
tracepoint(librados, rados_aio_setxattr_exit, retval);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_aio_setxattr);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_aio_rmxattr)(
rados_ioctx_t io, const char *o,
rados_completion_t completion,
const char *name)
{
tracepoint(librados, rados_aio_rmxattr_enter, io, o, completion, name);
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
object_t oid(o);
int retval = ctx->aio_rmxattr(oid, (librados::AioCompletionImpl*)completion, name);
tracepoint(librados, rados_aio_rmxattr_exit, retval);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_aio_rmxattr);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_aio_stat)(
rados_ioctx_t io, const char *o,
rados_completion_t completion,
uint64_t *psize, time_t *pmtime)
{
tracepoint(librados, rados_aio_stat_enter, io, o, completion);
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
object_t oid(o);
int retval = ctx->aio_stat(oid, (librados::AioCompletionImpl*)completion,
psize, pmtime);
tracepoint(librados, rados_aio_stat_exit, retval);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_aio_stat);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_aio_stat2)(
rados_ioctx_t io, const char *o,
rados_completion_t completion,
uint64_t *psize, struct timespec *pmtime)
{
tracepoint(librados, rados_aio_stat2_enter, io, o, completion);
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
object_t oid(o);
int retval = ctx->aio_stat2(oid, (librados::AioCompletionImpl*)completion,
psize, pmtime);
tracepoint(librados, rados_aio_stat2_exit, retval);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_aio_stat2);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_aio_cmpext)(
rados_ioctx_t io, const char *o,
rados_completion_t completion, const char *cmp_buf,
size_t cmp_len, uint64_t off)
{
tracepoint(librados, rados_aio_cmpext_enter, io, o, completion, cmp_buf,
cmp_len, off);
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
object_t oid(o);
int retval = ctx->aio_cmpext(oid, (librados::AioCompletionImpl*)completion,
cmp_buf, cmp_len, off);
tracepoint(librados, rados_aio_cmpext_exit, retval);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_aio_cmpext);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_aio_cancel)(
rados_ioctx_t io,
rados_completion_t completion)
{
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
return ctx->aio_cancel((librados::AioCompletionImpl*)completion);
}
LIBRADOS_C_API_BASE_DEFAULT(rados_aio_cancel);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_aio_exec)(
rados_ioctx_t io, const char *o,
rados_completion_t completion,
const char *cls, const char *method,
const char *inbuf, size_t in_len,
char *buf, size_t out_len)
{
tracepoint(librados, rados_aio_exec_enter, io, o, completion);
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
object_t oid(o);
bufferlist inbl;
inbl.append(inbuf, in_len);
int retval = ctx->aio_exec(oid, (librados::AioCompletionImpl*)completion,
cls, method, inbl, buf, out_len);
tracepoint(librados, rados_aio_exec_exit, retval);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_aio_exec);
struct C_WatchCB : public librados::WatchCtx {
rados_watchcb_t wcb;
void *arg;
C_WatchCB(rados_watchcb_t _wcb, void *_arg) : wcb(_wcb), arg(_arg) {}
void notify(uint8_t opcode, uint64_t ver, bufferlist& bl) override {
wcb(opcode, ver, arg);
}
};
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_watch)(
rados_ioctx_t io, const char *o, uint64_t ver,
uint64_t *handle,
rados_watchcb_t watchcb, void *arg)
{
tracepoint(librados, rados_watch_enter, io, o, ver, watchcb, arg);
uint64_t *cookie = handle;
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
object_t oid(o);
C_WatchCB *wc = new C_WatchCB(watchcb, arg);
int retval = ctx->watch(oid, cookie, wc, NULL, true);
tracepoint(librados, rados_watch_exit, retval, *handle);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_watch);
struct C_WatchCB2 : public librados::WatchCtx2 {
rados_watchcb2_t wcb;
rados_watcherrcb_t errcb;
void *arg;
C_WatchCB2(rados_watchcb2_t _wcb,
rados_watcherrcb_t _errcb,
void *_arg) : wcb(_wcb), errcb(_errcb), arg(_arg) {}
void handle_notify(uint64_t notify_id,
uint64_t cookie,
uint64_t notifier_gid,
bufferlist& bl) override {
wcb(arg, notify_id, cookie, notifier_gid, bl.c_str(), bl.length());
}
void handle_error(uint64_t cookie, int err) override {
if (errcb)
errcb(arg, cookie, err);
}
};
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_watch3)(
rados_ioctx_t io, const char *o, uint64_t *handle,
rados_watchcb2_t watchcb,
rados_watcherrcb_t watcherrcb,
uint32_t timeout,
void *arg)
{
tracepoint(librados, rados_watch3_enter, io, o, handle, watchcb, timeout, arg);
int ret;
if (!watchcb || !o || !handle) {
ret = -EINVAL;
} else {
uint64_t *cookie = handle;
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
object_t oid(o);
C_WatchCB2 *wc = new C_WatchCB2(watchcb, watcherrcb, arg);
ret = ctx->watch(oid, cookie, NULL, wc, timeout, true);
}
tracepoint(librados, rados_watch3_exit, ret, handle ? *handle : 0);
return ret;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_watch3);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_watch2)(
rados_ioctx_t io, const char *o, uint64_t *handle,
rados_watchcb2_t watchcb,
rados_watcherrcb_t watcherrcb,
void *arg)
{
return LIBRADOS_C_API_DEFAULT_F(rados_watch3)(
io, o, handle, watchcb, watcherrcb, 0, arg);
}
LIBRADOS_C_API_BASE_DEFAULT(rados_watch2);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_aio_watch2)(
rados_ioctx_t io, const char *o,
rados_completion_t completion,
uint64_t *handle,
rados_watchcb2_t watchcb,
rados_watcherrcb_t watcherrcb,
uint32_t timeout, void *arg)
{
tracepoint(librados, rados_aio_watch2_enter, io, o, completion, handle, watchcb, timeout, arg);
int ret;
if (!completion || !watchcb || !o || !handle) {
ret = -EINVAL;
} else {
uint64_t *cookie = handle;
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
object_t oid(o);
librados::AioCompletionImpl *c =
reinterpret_cast<librados::AioCompletionImpl*>(completion);
C_WatchCB2 *wc = new C_WatchCB2(watchcb, watcherrcb, arg);
ret = ctx->aio_watch(oid, c, cookie, NULL, wc, timeout, true);
}
tracepoint(librados, rados_aio_watch2_exit, ret, handle ? *handle : 0);
return ret;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_aio_watch2);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_aio_watch)(
rados_ioctx_t io, const char *o,
rados_completion_t completion,
uint64_t *handle,
rados_watchcb2_t watchcb,
rados_watcherrcb_t watcherrcb, void *arg)
{
return LIBRADOS_C_API_DEFAULT_F(rados_aio_watch2)(
io, o, completion, handle, watchcb, watcherrcb, 0, arg);
}
LIBRADOS_C_API_BASE_DEFAULT(rados_aio_watch);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_unwatch)(
rados_ioctx_t io,
const char *o,
uint64_t handle)
{
tracepoint(librados, rados_unwatch_enter, io, o, handle);
uint64_t cookie = handle;
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
int retval = ctx->unwatch(cookie);
tracepoint(librados, rados_unwatch_exit, retval);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_unwatch);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_unwatch2)(
rados_ioctx_t io,
uint64_t handle)
{
tracepoint(librados, rados_unwatch2_enter, io, handle);
uint64_t cookie = handle;
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
int retval = ctx->unwatch(cookie);
tracepoint(librados, rados_unwatch2_exit, retval);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_unwatch2);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_aio_unwatch)(
rados_ioctx_t io, uint64_t handle,
rados_completion_t completion)
{
tracepoint(librados, rados_aio_unwatch_enter, io, handle, completion);
uint64_t cookie = handle;
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
librados::AioCompletionImpl *c =
reinterpret_cast<librados::AioCompletionImpl*>(completion);
int retval = ctx->aio_unwatch(cookie, c);
tracepoint(librados, rados_aio_unwatch_exit, retval);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_aio_unwatch);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_watch_check)(
rados_ioctx_t io,
uint64_t handle)
{
tracepoint(librados, rados_watch_check_enter, io, handle);
uint64_t cookie = handle;
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
int retval = ctx->watch_check(cookie);
tracepoint(librados, rados_watch_check_exit, retval);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_watch_check);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_notify)(
rados_ioctx_t io, const char *o,
uint64_t ver, const char *buf, int buf_len)
{
tracepoint(librados, rados_notify_enter, io, o, ver, buf, buf_len);
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
object_t oid(o);
bufferlist bl;
if (buf) {
bufferptr p = buffer::create(buf_len);
memcpy(p.c_str(), buf, buf_len);
bl.push_back(p);
}
int retval = ctx->notify(oid, bl, 0, NULL, NULL, NULL);
tracepoint(librados, rados_notify_exit, retval);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_notify);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_notify2)(
rados_ioctx_t io, const char *o,
const char *buf, int buf_len,
uint64_t timeout_ms,
char **reply_buffer,
size_t *reply_buffer_len)
{
tracepoint(librados, rados_notify2_enter, io, o, buf, buf_len, timeout_ms);
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
object_t oid(o);
bufferlist bl;
if (buf) {
bufferptr p = buffer::create(buf_len);
memcpy(p.c_str(), buf, buf_len);
bl.push_back(p);
}
int ret = ctx->notify(oid, bl, timeout_ms, NULL, reply_buffer, reply_buffer_len);
tracepoint(librados, rados_notify2_exit, ret);
return ret;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_notify2);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_decode_notify_response)(
char *reply_buffer, size_t reply_buffer_len,
struct notify_ack_t **acks, size_t *nr_acks,
struct notify_timeout_t **timeouts, size_t *nr_timeouts)
{
if (!reply_buffer || !reply_buffer_len) {
return -EINVAL;
}
bufferlist bl;
bl.append(reply_buffer, reply_buffer_len);
map<pair<uint64_t,uint64_t>,bufferlist> acked;
set<pair<uint64_t,uint64_t>> missed;
auto iter = bl.cbegin();
decode(acked, iter);
decode(missed, iter);
*acks = nullptr;
*nr_acks = acked.size();
if (*nr_acks) {
*acks = new notify_ack_t[*nr_acks];
struct notify_ack_t *ack = *acks;
for (auto &[who, payload] : acked) {
ack->notifier_id = who.first;
ack->cookie = who.second;
ack->payload = nullptr;
ack->payload_len = payload.length();
if (ack->payload_len) {
ack->payload = (char *)malloc(ack->payload_len);
memcpy(ack->payload, payload.c_str(), ack->payload_len);
}
ack++;
}
}
*timeouts = nullptr;
*nr_timeouts = missed.size();
if (*nr_timeouts) {
*timeouts = new notify_timeout_t[*nr_timeouts];
struct notify_timeout_t *timeout = *timeouts;
for (auto &[notifier_id, cookie] : missed) {
timeout->notifier_id = notifier_id;
timeout->cookie = cookie;
timeout++;
}
}
return 0;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_decode_notify_response);
extern "C" void LIBRADOS_C_API_DEFAULT_F(rados_free_notify_response)(
struct notify_ack_t *acks, size_t nr_acks,
struct notify_timeout_t *timeouts)
{
for (uint64_t n = 0; n < nr_acks; ++n) {
assert(acks);
if (acks[n].payload) {
free(acks[n].payload);
}
}
if (acks) {
delete[] acks;
}
if (timeouts) {
delete[] timeouts;
}
}
LIBRADOS_C_API_BASE_DEFAULT(rados_free_notify_response);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_aio_notify)(
rados_ioctx_t io, const char *o,
rados_completion_t completion,
const char *buf, int buf_len,
uint64_t timeout_ms, char **reply_buffer,
size_t *reply_buffer_len)
{
tracepoint(librados, rados_aio_notify_enter, io, o, completion, buf, buf_len,
timeout_ms);
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
object_t oid(o);
bufferlist bl;
if (buf) {
bl.push_back(buffer::copy(buf, buf_len));
}
librados::AioCompletionImpl *c =
reinterpret_cast<librados::AioCompletionImpl*>(completion);
int ret = ctx->aio_notify(oid, c, bl, timeout_ms, NULL, reply_buffer,
reply_buffer_len);
tracepoint(librados, rados_aio_notify_exit, ret);
return ret;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_aio_notify);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_notify_ack)(
rados_ioctx_t io, const char *o,
uint64_t notify_id, uint64_t handle,
const char *buf, int buf_len)
{
tracepoint(librados, rados_notify_ack_enter, io, o, notify_id, handle, buf, buf_len);
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
object_t oid(o);
bufferlist bl;
if (buf) {
bufferptr p = buffer::create(buf_len);
memcpy(p.c_str(), buf, buf_len);
bl.push_back(p);
}
ctx->notify_ack(oid, notify_id, handle, bl);
tracepoint(librados, rados_notify_ack_exit, 0);
return 0;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_notify_ack);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_watch_flush)(rados_t cluster)
{
tracepoint(librados, rados_watch_flush_enter, cluster);
librados::RadosClient *client = (librados::RadosClient *)cluster;
int retval = client->watch_flush();
tracepoint(librados, rados_watch_flush_exit, retval);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_watch_flush);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_aio_watch_flush)(
rados_t cluster,
rados_completion_t completion)
{
tracepoint(librados, rados_aio_watch_flush_enter, cluster, completion);
librados::RadosClient *client = (librados::RadosClient *)cluster;
librados::AioCompletionImpl *c = (librados::AioCompletionImpl*)completion;
int retval = client->async_watch_flush(c);
tracepoint(librados, rados_aio_watch_flush_exit, retval);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_aio_watch_flush);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_set_alloc_hint)(
rados_ioctx_t io, const char *o,
uint64_t expected_object_size,
uint64_t expected_write_size)
{
tracepoint(librados, rados_set_alloc_hint_enter, io, o, expected_object_size, expected_write_size);
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
object_t oid(o);
int retval = ctx->set_alloc_hint(oid, expected_object_size,
expected_write_size, 0);
tracepoint(librados, rados_set_alloc_hint_exit, retval);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_set_alloc_hint);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_set_alloc_hint2)(
rados_ioctx_t io, const char *o,
uint64_t expected_object_size,
uint64_t expected_write_size,
uint32_t flags)
{
tracepoint(librados, rados_set_alloc_hint2_enter, io, o, expected_object_size, expected_write_size, flags);
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
object_t oid(o);
int retval = ctx->set_alloc_hint(oid, expected_object_size,
expected_write_size, flags);
tracepoint(librados, rados_set_alloc_hint2_exit, retval);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_set_alloc_hint2);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_lock_exclusive)(
rados_ioctx_t io, const char * o,
const char * name, const char * cookie,
const char * desc,
struct timeval * duration, uint8_t flags)
{
tracepoint(librados, rados_lock_exclusive_enter, io, o, name, cookie, desc, duration, flags);
librados::IoCtx ctx;
librados::IoCtx::from_rados_ioctx_t(io, ctx);
int retval = ctx.lock_exclusive(o, name, cookie, desc, duration, flags);
tracepoint(librados, rados_lock_exclusive_exit, retval);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_lock_exclusive);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_lock_shared)(
rados_ioctx_t io, const char * o,
const char * name, const char * cookie,
const char * tag, const char * desc,
struct timeval * duration, uint8_t flags)
{
tracepoint(librados, rados_lock_shared_enter, io, o, name, cookie, tag, desc, duration, flags);
librados::IoCtx ctx;
librados::IoCtx::from_rados_ioctx_t(io, ctx);
int retval = ctx.lock_shared(o, name, cookie, tag, desc, duration, flags);
tracepoint(librados, rados_lock_shared_exit, retval);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_lock_shared);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_unlock)(
rados_ioctx_t io, const char *o, const char *name,
const char *cookie)
{
tracepoint(librados, rados_unlock_enter, io, o, name, cookie);
librados::IoCtx ctx;
librados::IoCtx::from_rados_ioctx_t(io, ctx);
int retval = ctx.unlock(o, name, cookie);
tracepoint(librados, rados_unlock_exit, retval);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_unlock);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_aio_unlock)(
rados_ioctx_t io, const char *o, const char *name,
const char *cookie, rados_completion_t completion)
{
tracepoint(librados, rados_aio_unlock_enter, io, o, name, cookie, completion);
librados::IoCtx ctx;
librados::IoCtx::from_rados_ioctx_t(io, ctx);
librados::AioCompletionImpl *comp = (librados::AioCompletionImpl*)completion;
comp->get();
librados::AioCompletion c(comp);
int retval = ctx.aio_unlock(o, name, cookie, &c);
tracepoint(librados, rados_aio_unlock_exit, retval);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_aio_unlock);
extern "C" ssize_t LIBRADOS_C_API_DEFAULT_F(rados_list_lockers)(
rados_ioctx_t io, const char *o,
const char *name, int *exclusive,
char *tag, size_t *tag_len,
char *clients, size_t *clients_len,
char *cookies, size_t *cookies_len,
char *addrs, size_t *addrs_len)
{
tracepoint(librados, rados_list_lockers_enter, io, o, name, *tag_len, *clients_len, *cookies_len, *addrs_len);
librados::IoCtx ctx;
librados::IoCtx::from_rados_ioctx_t(io, ctx);
std::string name_str = name;
std::string oid = o;
std::string tag_str;
int tmp_exclusive;
std::list<librados::locker_t> lockers;
int r = ctx.list_lockers(oid, name_str, &tmp_exclusive, &tag_str, &lockers);
if (r < 0) {
tracepoint(librados, rados_list_lockers_exit, r, *exclusive, "", *tag_len, *clients_len, *cookies_len, *addrs_len);
return r;
}
size_t clients_total = 0;
size_t cookies_total = 0;
size_t addrs_total = 0;
list<librados::locker_t>::const_iterator it;
for (it = lockers.begin(); it != lockers.end(); ++it) {
clients_total += it->client.length() + 1;
cookies_total += it->cookie.length() + 1;
addrs_total += it->address.length() + 1;
}
bool too_short = ((clients_total > *clients_len) ||
(cookies_total > *cookies_len) ||
(addrs_total > *addrs_len) ||
(tag_str.length() + 1 > *tag_len));
*clients_len = clients_total;
*cookies_len = cookies_total;
*addrs_len = addrs_total;
*tag_len = tag_str.length() + 1;
if (too_short) {
tracepoint(librados, rados_list_lockers_exit, -ERANGE, *exclusive, "", *tag_len, *clients_len, *cookies_len, *addrs_len);
return -ERANGE;
}
strcpy(tag, tag_str.c_str());
char *clients_p = clients;
char *cookies_p = cookies;
char *addrs_p = addrs;
for (it = lockers.begin(); it != lockers.end(); ++it) {
strcpy(clients_p, it->client.c_str());
strcpy(cookies_p, it->cookie.c_str());
strcpy(addrs_p, it->address.c_str());
tracepoint(librados, rados_list_lockers_locker, clients_p, cookies_p, addrs_p);
clients_p += it->client.length() + 1;
cookies_p += it->cookie.length() + 1;
addrs_p += it->address.length() + 1;
}
if (tmp_exclusive)
*exclusive = 1;
else
*exclusive = 0;
int retval = lockers.size();
tracepoint(librados, rados_list_lockers_exit, retval, *exclusive, tag, *tag_len, *clients_len, *cookies_len, *addrs_len);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_list_lockers);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_break_lock)(
rados_ioctx_t io, const char *o,
const char *name, const char *client,
const char *cookie)
{
tracepoint(librados, rados_break_lock_enter, io, o, name, client, cookie);
librados::IoCtx ctx;
librados::IoCtx::from_rados_ioctx_t(io, ctx);
int retval = ctx.break_lock(o, name, client, cookie);
tracepoint(librados, rados_break_lock_exit, retval);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_break_lock);
extern "C" rados_write_op_t LIBRADOS_C_API_DEFAULT_F(rados_create_write_op)()
{
tracepoint(librados, rados_create_write_op_enter);
rados_write_op_t retval = new (std::nothrow) librados::ObjectOperationImpl;
tracepoint(librados, rados_create_write_op_exit, retval);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_create_write_op);
extern "C" void LIBRADOS_C_API_DEFAULT_F(rados_release_write_op)(
rados_write_op_t write_op)
{
tracepoint(librados, rados_release_write_op_enter, write_op);
delete static_cast<librados::ObjectOperationImpl*>(write_op);
tracepoint(librados, rados_release_write_op_exit);
}
LIBRADOS_C_API_BASE_DEFAULT(rados_release_write_op);
static ::ObjectOperation* to_object_operation(rados_write_op_t write_op)
{
return &static_cast<librados::ObjectOperationImpl*>(write_op)->o;
}
extern "C" void LIBRADOS_C_API_DEFAULT_F(rados_write_op_set_flags)(
rados_write_op_t write_op,
int flags)
{
tracepoint(librados, rados_write_op_set_flags_enter, write_op, flags);
to_object_operation(write_op)->set_last_op_flags(get_op_flags(flags));
tracepoint(librados, rados_write_op_set_flags_exit);
}
LIBRADOS_C_API_BASE_DEFAULT(rados_write_op_set_flags);
extern "C" void LIBRADOS_C_API_DEFAULT_F(rados_write_op_assert_version)(
rados_write_op_t write_op,
uint64_t ver)
{
tracepoint(librados, rados_write_op_assert_version_enter, write_op, ver);
to_object_operation(write_op)->assert_version(ver);
tracepoint(librados, rados_write_op_assert_version_exit);
}
LIBRADOS_C_API_BASE_DEFAULT(rados_write_op_assert_version);
extern "C" void LIBRADOS_C_API_DEFAULT_F(rados_write_op_assert_exists)(
rados_write_op_t write_op)
{
tracepoint(librados, rados_write_op_assert_exists_enter, write_op);
to_object_operation(write_op)->stat(nullptr, nullptr, nullptr);
tracepoint(librados, rados_write_op_assert_exists_exit);
}
LIBRADOS_C_API_BASE_DEFAULT(rados_write_op_assert_exists);
extern "C" void LIBRADOS_C_API_DEFAULT_F(rados_write_op_cmpext)(
rados_write_op_t write_op,
const char *cmp_buf,
size_t cmp_len,
uint64_t off,
int *prval)
{
tracepoint(librados, rados_write_op_cmpext_enter, write_op, cmp_buf,
cmp_len, off, prval);
to_object_operation(write_op)->cmpext(off, cmp_len, cmp_buf, prval);
tracepoint(librados, rados_write_op_cmpext_exit);
}
LIBRADOS_C_API_BASE_DEFAULT(rados_write_op_cmpext);
extern "C" void LIBRADOS_C_API_DEFAULT_F(rados_write_op_cmpxattr)(
rados_write_op_t write_op,
const char *name,
uint8_t comparison_operator,
const char *value,
size_t value_len)
{
tracepoint(librados, rados_write_op_cmpxattr_enter, write_op, name, comparison_operator, value, value_len);
bufferlist bl;
bl.append(value, value_len);
to_object_operation(write_op)->cmpxattr(name,
comparison_operator,
CEPH_OSD_CMPXATTR_MODE_STRING,
bl);
tracepoint(librados, rados_write_op_cmpxattr_exit);
}
LIBRADOS_C_API_BASE_DEFAULT(rados_write_op_cmpxattr);
static void rados_c_omap_cmp(ObjectOperation *op,
const char *key,
uint8_t comparison_operator,
const char *val,
size_t key_len,
size_t val_len,
int *prval)
{
bufferlist bl;
bl.append(val, val_len);
std::map<std::string, pair<bufferlist, int> > assertions;
string lkey = string(key, key_len);
assertions[lkey] = std::make_pair(bl, comparison_operator);
op->omap_cmp(assertions, prval);
}
extern "C" void LIBRADOS_C_API_DEFAULT_F(rados_write_op_omap_cmp)(
rados_write_op_t write_op,
const char *key,
uint8_t comparison_operator,
const char *val,
size_t val_len,
int *prval)
{
tracepoint(librados, rados_write_op_omap_cmp_enter, write_op, key, comparison_operator, val, val_len, prval);
rados_c_omap_cmp(to_object_operation(write_op), key, comparison_operator,
val, strlen(key), val_len, prval);
tracepoint(librados, rados_write_op_omap_cmp_exit);
}
LIBRADOS_C_API_BASE_DEFAULT(rados_write_op_omap_cmp);
extern "C" void LIBRADOS_C_API_DEFAULT_F(rados_write_op_omap_cmp2)(
rados_write_op_t write_op,
const char *key,
uint8_t comparison_operator,
const char *val,
size_t key_len,
size_t val_len,
int *prval)
{
tracepoint(librados, rados_write_op_omap_cmp_enter, write_op, key, comparison_operator, val, val_len, prval);
rados_c_omap_cmp(to_object_operation(write_op), key, comparison_operator,
val, key_len, val_len, prval);
tracepoint(librados, rados_write_op_omap_cmp_exit);
}
LIBRADOS_C_API_BASE_DEFAULT(rados_write_op_omap_cmp2);
extern "C" void LIBRADOS_C_API_DEFAULT_F(rados_write_op_setxattr)(
rados_write_op_t write_op,
const char *name,
const char *value,
size_t value_len)
{
tracepoint(librados, rados_write_op_setxattr_enter, write_op, name, value, value_len);
bufferlist bl;
bl.append(value, value_len);
to_object_operation(write_op)->setxattr(name, bl);
tracepoint(librados, rados_write_op_setxattr_exit);
}
LIBRADOS_C_API_BASE_DEFAULT(rados_write_op_setxattr);
extern "C" void LIBRADOS_C_API_DEFAULT_F(rados_write_op_rmxattr)(
rados_write_op_t write_op,
const char *name)
{
tracepoint(librados, rados_write_op_rmxattr_enter, write_op, name);
to_object_operation(write_op)->rmxattr(name);
tracepoint(librados, rados_write_op_rmxattr_exit);
}
LIBRADOS_C_API_BASE_DEFAULT(rados_write_op_rmxattr);
extern "C" void LIBRADOS_C_API_DEFAULT_F(rados_write_op_create)(
rados_write_op_t write_op,
int exclusive,
const char* category) // unused
{
tracepoint(librados, rados_write_op_create_enter, write_op, exclusive);
to_object_operation(write_op)->create(!!exclusive);
tracepoint(librados, rados_write_op_create_exit);
}
LIBRADOS_C_API_BASE_DEFAULT(rados_write_op_create);
extern "C" void LIBRADOS_C_API_DEFAULT_F(rados_write_op_write)(
rados_write_op_t write_op,
const char *buffer,
size_t len,
uint64_t offset)
{
tracepoint(librados, rados_write_op_write_enter, write_op, buffer, len, offset);
bufferlist bl;
bl.append(buffer,len);
to_object_operation(write_op)->write(offset, bl);
tracepoint(librados, rados_write_op_write_exit);
}
LIBRADOS_C_API_BASE_DEFAULT(rados_write_op_write);
extern "C" void LIBRADOS_C_API_DEFAULT_F(rados_write_op_write_full)(
rados_write_op_t write_op,
const char *buffer,
size_t len)
{
tracepoint(librados, rados_write_op_write_full_enter, write_op, buffer, len);
bufferlist bl;
bl.append(buffer,len);
to_object_operation(write_op)->write_full(bl);
tracepoint(librados, rados_write_op_write_full_exit);
}
LIBRADOS_C_API_BASE_DEFAULT(rados_write_op_write_full);
extern "C" void LIBRADOS_C_API_DEFAULT_F(rados_write_op_writesame)(
rados_write_op_t write_op,
const char *buffer,
size_t data_len,
size_t write_len,
uint64_t offset)
{
tracepoint(librados, rados_write_op_writesame_enter, write_op, buffer, data_len, write_len, offset);
bufferlist bl;
bl.append(buffer, data_len);
to_object_operation(write_op)->writesame(offset, write_len, bl);
tracepoint(librados, rados_write_op_writesame_exit);
}
LIBRADOS_C_API_BASE_DEFAULT(rados_write_op_writesame);
extern "C" void LIBRADOS_C_API_DEFAULT_F(rados_write_op_append)(
rados_write_op_t write_op,
const char *buffer,
size_t len)
{
tracepoint(librados, rados_write_op_append_enter, write_op, buffer, len);
bufferlist bl;
bl.append(buffer,len);
to_object_operation(write_op)->append(bl);
tracepoint(librados, rados_write_op_append_exit);
}
LIBRADOS_C_API_BASE_DEFAULT(rados_write_op_append);
extern "C" void LIBRADOS_C_API_DEFAULT_F(rados_write_op_remove)(
rados_write_op_t write_op)
{
tracepoint(librados, rados_write_op_remove_enter, write_op);
to_object_operation(write_op)->remove();
tracepoint(librados, rados_write_op_remove_exit);
}
LIBRADOS_C_API_BASE_DEFAULT(rados_write_op_remove);
extern "C" void LIBRADOS_C_API_DEFAULT_F(rados_write_op_truncate)(
rados_write_op_t write_op,
uint64_t offset)
{
tracepoint(librados, rados_write_op_truncate_enter, write_op, offset);
to_object_operation(write_op)->truncate(offset);
tracepoint(librados, rados_write_op_truncate_exit);
}
LIBRADOS_C_API_BASE_DEFAULT(rados_write_op_truncate);
extern "C" void LIBRADOS_C_API_DEFAULT_F(rados_write_op_zero)(
rados_write_op_t write_op,
uint64_t offset,
uint64_t len)
{
tracepoint(librados, rados_write_op_zero_enter, write_op, offset, len);
to_object_operation(write_op)->zero(offset, len);
tracepoint(librados, rados_write_op_zero_exit);
}
LIBRADOS_C_API_BASE_DEFAULT(rados_write_op_zero);
extern "C" void LIBRADOS_C_API_DEFAULT_F(rados_write_op_exec)(
rados_write_op_t write_op,
const char *cls,
const char *method,
const char *in_buf,
size_t in_len,
int *prval)
{
tracepoint(librados, rados_write_op_exec_enter, write_op, cls, method, in_buf, in_len, prval);
bufferlist inbl;
inbl.append(in_buf, in_len);
to_object_operation(write_op)->call(cls, method, inbl, NULL, NULL, prval);
tracepoint(librados, rados_write_op_exec_exit);
}
LIBRADOS_C_API_BASE_DEFAULT(rados_write_op_exec);
extern "C" void LIBRADOS_C_API_DEFAULT_F(rados_write_op_omap_set)(
rados_write_op_t write_op,
char const* const* keys,
char const* const* vals,
const size_t *lens,
size_t num)
{
tracepoint(librados, rados_write_op_omap_set_enter, write_op, num);
std::map<std::string, bufferlist> entries;
for (size_t i = 0; i < num; ++i) {
tracepoint(librados, rados_write_op_omap_set_entry, keys[i], vals[i], lens[i]);
bufferlist bl(lens[i]);
bl.append(vals[i], lens[i]);
entries[keys[i]] = bl;
}
to_object_operation(write_op)->omap_set(entries);
tracepoint(librados, rados_write_op_omap_set_exit);
}
LIBRADOS_C_API_BASE_DEFAULT(rados_write_op_omap_set);
extern "C" void LIBRADOS_C_API_DEFAULT_F(rados_write_op_omap_set2)(
rados_write_op_t write_op,
char const* const* keys,
char const* const* vals,
const size_t *key_lens,
const size_t *val_lens,
size_t num)
{
tracepoint(librados, rados_write_op_omap_set_enter, write_op, num);
std::map<std::string, bufferlist> entries;
for (size_t i = 0; i < num; ++i) {
bufferlist bl(val_lens[i]);
bl.append(vals[i], val_lens[i]);
string key(keys[i], key_lens[i]);
entries[key] = bl;
}
to_object_operation(write_op)->omap_set(entries);
tracepoint(librados, rados_write_op_omap_set_exit);
}
LIBRADOS_C_API_BASE_DEFAULT(rados_write_op_omap_set2);
extern "C" void LIBRADOS_C_API_DEFAULT_F(rados_write_op_omap_rm_keys)(
rados_write_op_t write_op,
char const* const* keys,
size_t keys_len)
{
tracepoint(librados, rados_write_op_omap_rm_keys_enter, write_op, keys_len);
for(size_t i = 0; i < keys_len; i++) {
tracepoint(librados, rados_write_op_omap_rm_keys_entry, keys[i]);
}
std::set<std::string> to_remove(keys, keys + keys_len);
to_object_operation(write_op)->omap_rm_keys(to_remove);
tracepoint(librados, rados_write_op_omap_rm_keys_exit);
}
LIBRADOS_C_API_BASE_DEFAULT(rados_write_op_omap_rm_keys);
extern "C" void LIBRADOS_C_API_DEFAULT_F(rados_write_op_omap_rm_keys2)(
rados_write_op_t write_op,
char const* const* keys,
const size_t* key_lens,
size_t keys_len)
{
tracepoint(librados, rados_write_op_omap_rm_keys_enter, write_op, keys_len);
std::set<std::string> to_remove;
for(size_t i = 0; i < keys_len; i++) {
to_remove.emplace(keys[i], key_lens[i]);
}
to_object_operation(write_op)->omap_rm_keys(to_remove);
tracepoint(librados, rados_write_op_omap_rm_keys_exit);
}
LIBRADOS_C_API_BASE_DEFAULT(rados_write_op_omap_rm_keys2);
extern "C" void LIBRADOS_C_API_DEFAULT_F(rados_write_op_omap_rm_range2)(
rados_write_op_t write_op,
const char *key_begin,
size_t key_begin_len,
const char *key_end,
size_t key_end_len)
{
tracepoint(librados, rados_write_op_omap_rm_range_enter,
write_op, key_begin, key_end);
to_object_operation(write_op)->omap_rm_range({key_begin, key_begin_len},
{key_end, key_end_len});
tracepoint(librados, rados_write_op_omap_rm_range_exit);
}
LIBRADOS_C_API_BASE_DEFAULT(rados_write_op_omap_rm_range2);
extern "C" void LIBRADOS_C_API_DEFAULT_F(rados_write_op_omap_clear)(
rados_write_op_t write_op)
{
tracepoint(librados, rados_write_op_omap_clear_enter, write_op);
to_object_operation(write_op)->omap_clear();
tracepoint(librados, rados_write_op_omap_clear_exit);
}
LIBRADOS_C_API_BASE_DEFAULT(rados_write_op_omap_clear);
extern "C" void LIBRADOS_C_API_DEFAULT_F(rados_write_op_set_alloc_hint)(
rados_write_op_t write_op,
uint64_t expected_object_size,
uint64_t expected_write_size)
{
tracepoint(librados, rados_write_op_set_alloc_hint_enter, write_op, expected_object_size, expected_write_size);
to_object_operation(write_op)->set_alloc_hint(expected_object_size,
expected_write_size, 0);
tracepoint(librados, rados_write_op_set_alloc_hint_exit);
}
LIBRADOS_C_API_BASE_DEFAULT(rados_write_op_set_alloc_hint);
extern "C" void LIBRADOS_C_API_DEFAULT_F(rados_write_op_set_alloc_hint2)(
rados_write_op_t write_op,
uint64_t expected_object_size,
uint64_t expected_write_size,
uint32_t flags)
{
tracepoint(librados, rados_write_op_set_alloc_hint2_enter, write_op, expected_object_size, expected_write_size, flags);
to_object_operation(write_op)->set_alloc_hint(expected_object_size,
expected_write_size,
flags);
tracepoint(librados, rados_write_op_set_alloc_hint2_exit);
}
LIBRADOS_C_API_BASE_DEFAULT(rados_write_op_set_alloc_hint2);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_write_op_operate)(
rados_write_op_t write_op,
rados_ioctx_t io,
const char *oid,
time_t *mtime,
int flags)
{
tracepoint(librados, rados_write_op_operate_enter, write_op, io, oid, mtime, flags);
object_t obj(oid);
auto oimpl = static_cast<librados::ObjectOperationImpl*>(write_op);
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
if (mtime) {
oimpl->rt = ceph::real_clock::from_time_t(*mtime);
oimpl->prt = &oimpl->rt;
}
int retval = ctx->operate(obj, &oimpl->o, oimpl->prt, translate_flags(flags));
tracepoint(librados, rados_write_op_operate_exit, retval);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_write_op_operate);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_write_op_operate2)(
rados_write_op_t write_op,
rados_ioctx_t io,
const char *oid,
struct timespec *ts,
int flags)
{
tracepoint(librados, rados_write_op_operate2_enter, write_op, io, oid, ts, flags);
object_t obj(oid);
auto oimpl = static_cast<librados::ObjectOperationImpl*>(write_op);
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
if (ts) {
oimpl->rt = ceph::real_clock::from_timespec(*ts);
oimpl->prt = &oimpl->rt;
}
int retval = ctx->operate(obj, &oimpl->o, oimpl->prt, translate_flags(flags));
tracepoint(librados, rados_write_op_operate_exit, retval);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_write_op_operate2);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_aio_write_op_operate)(
rados_write_op_t write_op,
rados_ioctx_t io,
rados_completion_t completion,
const char *oid,
time_t *mtime,
int flags)
{
tracepoint(librados, rados_aio_write_op_operate_enter, write_op, io, completion, oid, mtime, flags);
object_t obj(oid);
auto oimpl = static_cast<librados::ObjectOperationImpl*>(write_op);
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
librados::AioCompletionImpl *c = (librados::AioCompletionImpl*)completion;
if (mtime) {
oimpl->rt = ceph::real_clock::from_time_t(*mtime);
oimpl->prt = &oimpl->rt;
}
int retval = ctx->aio_operate(obj, &oimpl->o, c, ctx->snapc, oimpl->prt, translate_flags(flags));
tracepoint(librados, rados_aio_write_op_operate_exit, retval);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_aio_write_op_operate);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_aio_write_op_operate2)(
rados_write_op_t write_op,
rados_ioctx_t io,
rados_completion_t completion,
const char *oid,
struct timespec *mtime,
int flags)
{
tracepoint(librados, rados_aio_write_op_operate2_enter, write_op, io, completion, oid, mtime, flags);
object_t obj(oid);
auto oimpl = static_cast<librados::ObjectOperationImpl*>(write_op);
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
librados::AioCompletionImpl *c = (librados::AioCompletionImpl*)completion;
if (mtime) {
oimpl->rt = ceph::real_clock::from_timespec(*mtime);
oimpl->prt = &oimpl->rt;
}
int retval = ctx->aio_operate(obj, &oimpl->o, c, ctx->snapc, oimpl->prt, translate_flags(flags));
tracepoint(librados, rados_aio_write_op_operate_exit, retval);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_aio_write_op_operate2);
extern "C" rados_read_op_t LIBRADOS_C_API_DEFAULT_F(rados_create_read_op)()
{
tracepoint(librados, rados_create_read_op_enter);
rados_read_op_t retval = new (std::nothrow)::ObjectOperation;
tracepoint(librados, rados_create_read_op_exit, retval);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_create_read_op);
extern "C" void LIBRADOS_C_API_DEFAULT_F(rados_release_read_op)(
rados_read_op_t read_op)
{
tracepoint(librados, rados_release_read_op_enter, read_op);
delete (::ObjectOperation *)read_op;
tracepoint(librados, rados_release_read_op_exit);
}
LIBRADOS_C_API_BASE_DEFAULT(rados_release_read_op);
extern "C" void LIBRADOS_C_API_DEFAULT_F(rados_read_op_set_flags)(
rados_read_op_t read_op,
int flags)
{
tracepoint(librados, rados_read_op_set_flags_enter, read_op, flags);
((::ObjectOperation *)read_op)->set_last_op_flags(get_op_flags(flags));
tracepoint(librados, rados_read_op_set_flags_exit);
}
LIBRADOS_C_API_BASE_DEFAULT(rados_read_op_set_flags);
extern "C" void LIBRADOS_C_API_DEFAULT_F(rados_read_op_assert_version)(
rados_read_op_t read_op,
uint64_t ver)
{
tracepoint(librados, rados_read_op_assert_version_enter, read_op, ver);
((::ObjectOperation *)read_op)->assert_version(ver);
tracepoint(librados, rados_read_op_assert_version_exit);
}
LIBRADOS_C_API_BASE_DEFAULT(rados_read_op_assert_version);
extern "C" void LIBRADOS_C_API_DEFAULT_F(rados_read_op_assert_exists)(
rados_read_op_t read_op)
{
tracepoint(librados, rados_read_op_assert_exists_enter, read_op);
((::ObjectOperation *)read_op)->stat(nullptr, nullptr, nullptr);
tracepoint(librados, rados_read_op_assert_exists_exit);
}
LIBRADOS_C_API_BASE_DEFAULT(rados_read_op_assert_exists);
extern "C" void LIBRADOS_C_API_DEFAULT_F(rados_read_op_cmpext)(
rados_read_op_t read_op,
const char *cmp_buf,
size_t cmp_len,
uint64_t off,
int *prval)
{
tracepoint(librados, rados_read_op_cmpext_enter, read_op, cmp_buf,
cmp_len, off, prval);
((::ObjectOperation *)read_op)->cmpext(off, cmp_len, cmp_buf, prval);
tracepoint(librados, rados_read_op_cmpext_exit);
}
LIBRADOS_C_API_BASE_DEFAULT(rados_read_op_cmpext);
extern "C" void LIBRADOS_C_API_DEFAULT_F(rados_read_op_cmpxattr)(
rados_read_op_t read_op,
const char *name,
uint8_t comparison_operator,
const char *value,
size_t value_len)
{
tracepoint(librados, rados_read_op_cmpxattr_enter, read_op, name, comparison_operator, value, value_len);
bufferlist bl;
bl.append(value, value_len);
((::ObjectOperation *)read_op)->cmpxattr(name,
comparison_operator,
CEPH_OSD_CMPXATTR_MODE_STRING,
bl);
tracepoint(librados, rados_read_op_cmpxattr_exit);
}
LIBRADOS_C_API_BASE_DEFAULT(rados_read_op_cmpxattr);
extern "C" void LIBRADOS_C_API_DEFAULT_F(rados_read_op_omap_cmp)(
rados_read_op_t read_op,
const char *key,
uint8_t comparison_operator,
const char *val,
size_t val_len,
int *prval)
{
tracepoint(librados, rados_read_op_omap_cmp_enter, read_op, key, comparison_operator, val, val_len, prval);
rados_c_omap_cmp((::ObjectOperation *)read_op, key, comparison_operator,
val, strlen(key), val_len, prval);
tracepoint(librados, rados_read_op_omap_cmp_exit);
}
LIBRADOS_C_API_BASE_DEFAULT(rados_read_op_omap_cmp);
extern "C" void LIBRADOS_C_API_DEFAULT_F(rados_read_op_omap_cmp2)(
rados_read_op_t read_op,
const char *key,
uint8_t comparison_operator,
const char *val,
size_t key_len,
size_t val_len,
int *prval)
{
tracepoint(librados, rados_read_op_omap_cmp_enter, read_op, key, comparison_operator, val, val_len, prval);
rados_c_omap_cmp((::ObjectOperation *)read_op, key, comparison_operator,
val, key_len, val_len, prval);
tracepoint(librados, rados_read_op_omap_cmp_exit);
}
LIBRADOS_C_API_BASE_DEFAULT(rados_read_op_omap_cmp2);
extern "C" void LIBRADOS_C_API_DEFAULT_F(rados_read_op_stat)(
rados_read_op_t read_op,
uint64_t *psize,
time_t *pmtime,
int *prval)
{
tracepoint(librados, rados_read_op_stat_enter, read_op, psize, pmtime, prval);
((::ObjectOperation *)read_op)->stat(psize, pmtime, prval);
tracepoint(librados, rados_read_op_stat_exit);
}
LIBRADOS_C_API_BASE_DEFAULT(rados_read_op_stat);
extern "C" void LIBRADOS_C_API_DEFAULT_F(rados_read_op_stat2)(
rados_read_op_t read_op,
uint64_t *psize,
struct timespec *pmtime,
int *prval)
{
tracepoint(librados, rados_read_op_stat2_enter, read_op, psize, pmtime, prval);
((::ObjectOperation *)read_op)->stat(psize, pmtime, prval);
tracepoint(librados, rados_read_op_stat2_exit);
}
LIBRADOS_C_API_BASE_DEFAULT(rados_read_op_stat2);
class C_bl_to_buf : public Context {
char *out_buf;
size_t out_len;
size_t *bytes_read;
int *prval;
public:
bufferlist out_bl;
C_bl_to_buf(char *out_buf,
size_t out_len,
size_t *bytes_read,
int *prval) : out_buf(out_buf), out_len(out_len),
bytes_read(bytes_read), prval(prval) {}
void finish(int r) override {
if (out_bl.length() > out_len) {
if (prval)
*prval = -ERANGE;
if (bytes_read)
*bytes_read = 0;
return;
}
if (bytes_read)
*bytes_read = out_bl.length();
if (out_buf && !out_bl.is_provided_buffer(out_buf))
out_bl.begin().copy(out_bl.length(), out_buf);
}
};
extern "C" void LIBRADOS_C_API_DEFAULT_F(rados_read_op_read)(
rados_read_op_t read_op,
uint64_t offset,
size_t len,
char *buf,
size_t *bytes_read,
int *prval)
{
tracepoint(librados, rados_read_op_read_enter, read_op, offset, len, buf, bytes_read, prval);
C_bl_to_buf *ctx = new C_bl_to_buf(buf, len, bytes_read, prval);
ctx->out_bl.push_back(buffer::create_static(len, buf));
((::ObjectOperation *)read_op)->read(offset, len, &ctx->out_bl, prval, ctx);
tracepoint(librados, rados_read_op_read_exit);
}
LIBRADOS_C_API_BASE_DEFAULT(rados_read_op_read);
extern "C" void LIBRADOS_C_API_DEFAULT_F(rados_read_op_checksum)(
rados_read_op_t read_op,
rados_checksum_type_t type,
const char *init_value,
size_t init_value_len,
uint64_t offset, size_t len,
size_t chunk_size, char *pchecksum,
size_t checksum_len, int *prval)
{
tracepoint(librados, rados_read_op_checksum_enter, read_op, type, init_value,
init_value_len, offset, len, chunk_size);
bufferlist init_value_bl;
init_value_bl.append(init_value, init_value_len);
C_bl_to_buf *ctx = nullptr;
if (pchecksum != nullptr) {
ctx = new C_bl_to_buf(pchecksum, checksum_len, nullptr, prval);
}
((::ObjectOperation *)read_op)->checksum(get_checksum_op_type(type),
init_value_bl, offset, len,
chunk_size,
(ctx ? &ctx->out_bl : nullptr),
prval, ctx);
tracepoint(librados, rados_read_op_checksum_exit);
}
LIBRADOS_C_API_BASE_DEFAULT(rados_read_op_checksum);
class C_out_buffer : public Context {
char **out_buf;
size_t *out_len;
public:
bufferlist out_bl;
C_out_buffer(char **out_buf, size_t *out_len) : out_buf(out_buf),
out_len(out_len) {}
void finish(int r) override {
// ignore r since we don't know the meaning of return values
// from custom class methods
do_out_buffer(out_bl, out_buf, out_len);
}
};
extern "C" void LIBRADOS_C_API_DEFAULT_F(rados_read_op_exec)(
rados_read_op_t read_op,
const char *cls,
const char *method,
const char *in_buf,
size_t in_len,
char **out_buf,
size_t *out_len,
int *prval)
{
tracepoint(librados, rados_read_op_exec_enter, read_op, cls, method, in_buf, in_len, out_buf, out_len, prval);
bufferlist inbl;
inbl.append(in_buf, in_len);
C_out_buffer *ctx = new C_out_buffer(out_buf, out_len);
((::ObjectOperation *)read_op)->call(cls, method, inbl, &ctx->out_bl, ctx,
prval);
tracepoint(librados, rados_read_op_exec_exit);
}
LIBRADOS_C_API_BASE_DEFAULT(rados_read_op_exec);
extern "C" void LIBRADOS_C_API_DEFAULT_F(rados_read_op_exec_user_buf)(
rados_read_op_t read_op,
const char *cls,
const char *method,
const char *in_buf,
size_t in_len,
char *out_buf,
size_t out_len,
size_t *used_len,
int *prval)
{
tracepoint(librados, rados_read_op_exec_user_buf_enter, read_op, cls, method, in_buf, in_len, out_buf, out_len, used_len, prval);
C_bl_to_buf *ctx = new C_bl_to_buf(out_buf, out_len, used_len, prval);
bufferlist inbl;
inbl.append(in_buf, in_len);
((::ObjectOperation *)read_op)->call(cls, method, inbl, &ctx->out_bl, ctx,
prval);
tracepoint(librados, rados_read_op_exec_user_buf_exit);
}
LIBRADOS_C_API_BASE_DEFAULT(rados_read_op_exec_user_buf);
struct RadosOmapIter {
std::map<std::string, bufferlist> values;
std::map<std::string, bufferlist>::iterator i;
};
class C_OmapIter : public Context {
RadosOmapIter *iter;
public:
explicit C_OmapIter(RadosOmapIter *iter) : iter(iter) {}
void finish(int r) override {
iter->i = iter->values.begin();
}
};
class C_XattrsIter : public Context {
librados::RadosXattrsIter *iter;
public:
explicit C_XattrsIter(librados::RadosXattrsIter *iter) : iter(iter) {}
void finish(int r) override {
iter->i = iter->attrset.begin();
}
};
extern "C" void LIBRADOS_C_API_DEFAULT_F(rados_read_op_getxattrs)(
rados_read_op_t read_op,
rados_xattrs_iter_t *iter,
int *prval)
{
tracepoint(librados, rados_read_op_getxattrs_enter, read_op, prval);
librados::RadosXattrsIter *xattrs_iter = new librados::RadosXattrsIter;
((::ObjectOperation *)read_op)->getxattrs(&xattrs_iter->attrset, prval);
((::ObjectOperation *)read_op)->set_handler(new C_XattrsIter(xattrs_iter));
*iter = xattrs_iter;
tracepoint(librados, rados_read_op_getxattrs_exit, *iter);
}
LIBRADOS_C_API_BASE_DEFAULT(rados_read_op_getxattrs);
extern "C" void LIBRADOS_C_API_DEFAULT_F(rados_read_op_omap_get_vals)(
rados_read_op_t read_op,
const char *start_after,
const char *filter_prefix,
uint64_t max_return,
rados_omap_iter_t *iter,
int *prval)
{
tracepoint(librados, rados_read_op_omap_get_vals_enter, read_op, start_after, filter_prefix, max_return, prval);
RadosOmapIter *omap_iter = new RadosOmapIter;
const char *start = start_after ? start_after : "";
const char *filter = filter_prefix ? filter_prefix : "";
((::ObjectOperation *)read_op)->omap_get_vals(
start,
filter,
max_return,
&omap_iter->values,
nullptr,
prval);
((::ObjectOperation *)read_op)->set_handler(new C_OmapIter(omap_iter));
*iter = omap_iter;
tracepoint(librados, rados_read_op_omap_get_vals_exit, *iter);
}
LIBRADOS_C_API_BASE_DEFAULT(rados_read_op_omap_get_vals);
extern "C" void LIBRADOS_C_API_DEFAULT_F(rados_read_op_omap_get_vals2)(
rados_read_op_t read_op,
const char *start_after,
const char *filter_prefix,
uint64_t max_return,
rados_omap_iter_t *iter,
unsigned char *pmore,
int *prval)
{
tracepoint(librados, rados_read_op_omap_get_vals_enter, read_op, start_after, filter_prefix, max_return, prval);
RadosOmapIter *omap_iter = new RadosOmapIter;
const char *start = start_after ? start_after : "";
const char *filter = filter_prefix ? filter_prefix : "";
((::ObjectOperation *)read_op)->omap_get_vals(
start,
filter,
max_return,
&omap_iter->values,
(bool*)pmore,
prval);
((::ObjectOperation *)read_op)->set_handler(new C_OmapIter(omap_iter));
*iter = omap_iter;
tracepoint(librados, rados_read_op_omap_get_vals_exit, *iter);
}
LIBRADOS_C_API_BASE_DEFAULT(rados_read_op_omap_get_vals2);
struct C_OmapKeysIter : public Context {
RadosOmapIter *iter;
std::set<std::string> keys;
explicit C_OmapKeysIter(RadosOmapIter *iter) : iter(iter) {}
void finish(int r) override {
// map each key to an empty bl
for (std::set<std::string>::const_iterator i = keys.begin();
i != keys.end(); ++i) {
iter->values[*i];
}
iter->i = iter->values.begin();
}
};
extern "C" void LIBRADOS_C_API_DEFAULT_F(rados_read_op_omap_get_keys)(
rados_read_op_t read_op,
const char *start_after,
uint64_t max_return,
rados_omap_iter_t *iter,
int *prval)
{
tracepoint(librados, rados_read_op_omap_get_keys_enter, read_op, start_after, max_return, prval);
RadosOmapIter *omap_iter = new RadosOmapIter;
C_OmapKeysIter *ctx = new C_OmapKeysIter(omap_iter);
((::ObjectOperation *)read_op)->omap_get_keys(
start_after ? start_after : "",
max_return, &ctx->keys, nullptr, prval);
((::ObjectOperation *)read_op)->set_handler(ctx);
*iter = omap_iter;
tracepoint(librados, rados_read_op_omap_get_keys_exit, *iter);
}
LIBRADOS_C_API_BASE_DEFAULT(rados_read_op_omap_get_keys);
extern "C" void LIBRADOS_C_API_DEFAULT_F(rados_read_op_omap_get_keys2)(
rados_read_op_t read_op,
const char *start_after,
uint64_t max_return,
rados_omap_iter_t *iter,
unsigned char *pmore,
int *prval)
{
tracepoint(librados, rados_read_op_omap_get_keys_enter, read_op, start_after, max_return, prval);
RadosOmapIter *omap_iter = new RadosOmapIter;
C_OmapKeysIter *ctx = new C_OmapKeysIter(omap_iter);
((::ObjectOperation *)read_op)->omap_get_keys(
start_after ? start_after : "",
max_return, &ctx->keys,
(bool*)pmore, prval);
((::ObjectOperation *)read_op)->set_handler(ctx);
*iter = omap_iter;
tracepoint(librados, rados_read_op_omap_get_keys_exit, *iter);
}
LIBRADOS_C_API_BASE_DEFAULT(rados_read_op_omap_get_keys2);
static void internal_rados_read_op_omap_get_vals_by_keys(rados_read_op_t read_op,
set<string>& to_get,
rados_omap_iter_t *iter,
int *prval)
{
RadosOmapIter *omap_iter = new RadosOmapIter;
((::ObjectOperation *)read_op)->omap_get_vals_by_keys(to_get,
&omap_iter->values,
prval);
((::ObjectOperation *)read_op)->set_handler(new C_OmapIter(omap_iter));
*iter = omap_iter;
}
extern "C" void LIBRADOS_C_API_DEFAULT_F(rados_read_op_omap_get_vals_by_keys)(
rados_read_op_t read_op,
char const* const* keys,
size_t keys_len,
rados_omap_iter_t *iter,
int *prval)
{
tracepoint(librados, rados_read_op_omap_get_vals_by_keys_enter, read_op, keys, keys_len, iter, prval);
std::set<std::string> to_get(keys, keys + keys_len);
internal_rados_read_op_omap_get_vals_by_keys(read_op, to_get, iter, prval);
tracepoint(librados, rados_read_op_omap_get_vals_by_keys_exit, *iter);
}
LIBRADOS_C_API_BASE_DEFAULT(rados_read_op_omap_get_vals_by_keys);
extern "C" void LIBRADOS_C_API_DEFAULT_F(rados_read_op_omap_get_vals_by_keys2)(
rados_read_op_t read_op,
char const* const* keys,
size_t num_keys,
const size_t* key_lens,
rados_omap_iter_t *iter,
int *prval)
{
tracepoint(librados, rados_read_op_omap_get_vals_by_keys_enter, read_op, keys, num_keys, iter, prval);
std::set<std::string> to_get;
for (size_t i = 0; i < num_keys; i++) {
to_get.emplace(keys[i], key_lens[i]);
}
internal_rados_read_op_omap_get_vals_by_keys(read_op, to_get, iter, prval);
tracepoint(librados, rados_read_op_omap_get_vals_by_keys_exit, *iter);
}
LIBRADOS_C_API_BASE_DEFAULT(rados_read_op_omap_get_vals_by_keys2);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_omap_get_next2)(
rados_omap_iter_t iter,
char **key,
char **val,
size_t *key_len,
size_t *val_len)
{
tracepoint(librados, rados_omap_get_next_enter, iter);
RadosOmapIter *it = static_cast<RadosOmapIter *>(iter);
if (it->i == it->values.end()) {
if (key)
*key = NULL;
if (val)
*val = NULL;
if (key_len)
*key_len = 0;
if (val_len)
*val_len = 0;
tracepoint(librados, rados_omap_get_next_exit, 0, key, val, val_len);
return 0;
}
if (key)
*key = (char*)it->i->first.c_str();
if (val)
*val = it->i->second.c_str();
if (key_len)
*key_len = it->i->first.length();
if (val_len)
*val_len = it->i->second.length();
++it->i;
tracepoint(librados, rados_omap_get_next_exit, 0, key, val, val_len);
return 0;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_omap_get_next2);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_omap_get_next)(
rados_omap_iter_t iter,
char **key,
char **val,
size_t *len)
{
return LIBRADOS_C_API_DEFAULT_F(rados_omap_get_next2)(iter, key, val, nullptr, len);
}
LIBRADOS_C_API_BASE_DEFAULT(rados_omap_get_next);
extern "C" unsigned int LIBRADOS_C_API_DEFAULT_F(rados_omap_iter_size)(
rados_omap_iter_t iter)
{
RadosOmapIter *it = static_cast<RadosOmapIter *>(iter);
return it->values.size();
}
LIBRADOS_C_API_BASE_DEFAULT(rados_omap_iter_size);
extern "C" void LIBRADOS_C_API_DEFAULT_F(rados_omap_get_end)(
rados_omap_iter_t iter)
{
tracepoint(librados, rados_omap_get_end_enter, iter);
RadosOmapIter *it = static_cast<RadosOmapIter *>(iter);
delete it;
tracepoint(librados, rados_omap_get_end_exit);
}
LIBRADOS_C_API_BASE_DEFAULT(rados_omap_get_end);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_read_op_operate)(
rados_read_op_t read_op,
rados_ioctx_t io,
const char *oid,
int flags)
{
tracepoint(librados, rados_read_op_operate_enter, read_op, io, oid, flags);
object_t obj(oid);
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
int retval = ctx->operate_read(obj, (::ObjectOperation *)read_op, NULL,
translate_flags(flags));
tracepoint(librados, rados_read_op_operate_exit, retval);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_read_op_operate);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_aio_read_op_operate)(
rados_read_op_t read_op,
rados_ioctx_t io,
rados_completion_t completion,
const char *oid,
int flags)
{
tracepoint(librados, rados_aio_read_op_operate_enter, read_op, io, completion, oid, flags);
object_t obj(oid);
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
librados::AioCompletionImpl *c = (librados::AioCompletionImpl*)completion;
int retval = ctx->aio_operate_read(obj, (::ObjectOperation *)read_op,
c, translate_flags(flags), NULL);
tracepoint(librados, rados_aio_read_op_operate_exit, retval);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_aio_read_op_operate);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_cache_pin)(
rados_ioctx_t io,
const char *o)
{
tracepoint(librados, rados_cache_pin_enter, io, o);
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
object_t oid(o);
int retval = ctx->cache_pin(oid);
tracepoint(librados, rados_cache_pin_exit, retval);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_cache_pin);
extern "C" int LIBRADOS_C_API_DEFAULT_F(rados_cache_unpin)(
rados_ioctx_t io,
const char *o)
{
tracepoint(librados, rados_cache_unpin_enter, io, o);
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
object_t oid(o);
int retval = ctx->cache_unpin(oid);
tracepoint(librados, rados_cache_unpin_exit, retval);
return retval;
}
LIBRADOS_C_API_BASE_DEFAULT(rados_cache_unpin);
extern "C" void LIBRADOS_C_API_DEFAULT_F(rados_object_list_slice)(
rados_ioctx_t io,
const rados_object_list_cursor start,
const rados_object_list_cursor finish,
const size_t n,
const size_t m,
rados_object_list_cursor *split_start,
rados_object_list_cursor *split_finish)
{
librados::IoCtxImpl *ctx = (librados::IoCtxImpl *)io;
ceph_assert(split_start);
ceph_assert(split_finish);
hobject_t *split_start_hobj = (hobject_t*)(*split_start);
hobject_t *split_finish_hobj = (hobject_t*)(*split_finish);
ceph_assert(split_start_hobj);
ceph_assert(split_finish_hobj);
hobject_t *start_hobj = (hobject_t*)(start);
hobject_t *finish_hobj = (hobject_t*)(finish);
ctx->object_list_slice(
*start_hobj,
*finish_hobj,
n,
m,
split_start_hobj,
split_finish_hobj);
}
LIBRADOS_C_API_BASE_DEFAULT(rados_object_list_slice);
| 150,658 | 31.157737 | 131 | cc |
null | ceph-main/src/librados/librados_c.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef LIBRADOS_C_H
#define LIBRADOS_C_H
#include "include/types.h"
#include "include/rados/librados.h"
namespace __librados_base {
struct rados_pool_stat_t {
uint64_t num_bytes;
uint64_t num_kb;
uint64_t num_objects;
uint64_t num_object_clones;
uint64_t num_object_copies;
uint64_t num_objects_missing_on_primary;
uint64_t num_objects_unfound;
uint64_t num_objects_degraded;
uint64_t num_rd;
uint64_t num_rd_kb;
uint64_t num_wr;
uint64_t num_wr_kb;
};
} // namespace __librados_base
#endif // LIBRADOS_C_H
| 636 | 20.233333 | 70 | h |
null | ceph-main/src/librados/librados_cxx.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2004-2012 Sage Weil <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include <limits.h>
#include "common/config.h"
#include "common/errno.h"
#include "common/ceph_argparse.h"
#include "common/ceph_json.h"
#include "common/common_init.h"
#include "common/TracepointProvider.h"
#include "common/hobject.h"
#include "common/async/waiter.h"
#include "include/rados/librados.h"
#include "include/rados/librados.hpp"
#include "include/types.h"
#include <include/stringify.h>
#include "librados/AioCompletionImpl.h"
#include "librados/IoCtxImpl.h"
#include "librados/ObjectOperationImpl.h"
#include "librados/PoolAsyncCompletionImpl.h"
#include "librados/RadosClient.h"
#include "librados/RadosXattrIter.h"
#include "librados/ListObjectImpl.h"
#include "librados/librados_util.h"
#include "cls/lock/cls_lock_client.h"
#include <string>
#include <map>
#include <set>
#include <vector>
#include <list>
#include <stdexcept>
#include <system_error>
#ifdef WITH_LTTNG
#define TRACEPOINT_DEFINE
#define TRACEPOINT_PROBE_DYNAMIC_LINKAGE
#include "tracing/librados.h"
#undef TRACEPOINT_PROBE_DYNAMIC_LINKAGE
#undef TRACEPOINT_DEFINE
#else
#define tracepoint(...)
#endif
using std::list;
using std::map;
using std::pair;
using std::set;
using std::string;
using std::stringstream;
using std::vector;
#define dout_subsys ceph_subsys_rados
#undef dout_prefix
#define dout_prefix *_dout << "librados: "
static TracepointProvider::Traits tracepoint_traits("librados_tp.so", "rados_tracing");
/*
* Structure of this file
*
* RadosClient and the related classes are the internal implementation of librados.
* Above that layer sits the C API, found in include/rados/librados.h, and
* the C++ API, found in include/rados/librados.hpp
*
* The C++ API sometimes implements things in terms of the C API.
* Both the C++ and C API rely on RadosClient.
*
* Visually:
* +--------------------------------------+
* | C++ API |
* +--------------------+ |
* | C API | |
* +--------------------+-----------------+
* | RadosClient |
* +--------------------------------------+
*/
size_t librados::ObjectOperation::size()
{
::ObjectOperation *o = &impl->o;
if (o)
return o->size();
else
return 0;
}
//deprcated
void librados::ObjectOperation::set_op_flags(ObjectOperationFlags flags)
{
set_op_flags2((int)flags);
}
void librados::ObjectOperation::set_op_flags2(int flags)
{
ceph_assert(impl);
impl->o.set_last_op_flags(get_op_flags(flags));
}
void librados::ObjectOperation::cmpext(uint64_t off,
const bufferlist &cmp_bl,
int *prval)
{
ceph_assert(impl);
::ObjectOperation *o = &impl->o;
bufferlist c = cmp_bl;
o->cmpext(off, c, prval);
}
void librados::ObjectOperation::cmpxattr(const char *name, uint8_t op, const bufferlist& v)
{
ceph_assert(impl);
::ObjectOperation *o = &impl->o;
o->cmpxattr(name, op, CEPH_OSD_CMPXATTR_MODE_STRING, v);
}
void librados::ObjectOperation::cmpxattr(const char *name, uint8_t op, uint64_t v)
{
ceph_assert(impl);
::ObjectOperation *o = &impl->o;
bufferlist bl;
encode(v, bl);
o->cmpxattr(name, op, CEPH_OSD_CMPXATTR_MODE_U64, bl);
}
void librados::ObjectOperation::assert_version(uint64_t ver)
{
ceph_assert(impl);
::ObjectOperation *o = &impl->o;
o->assert_version(ver);
}
void librados::ObjectOperation::assert_exists()
{
ceph_assert(impl);
::ObjectOperation *o = &impl->o;
o->stat(nullptr, nullptr, nullptr);
}
void librados::ObjectOperation::exec(const char *cls, const char *method,
bufferlist& inbl)
{
ceph_assert(impl);
::ObjectOperation *o = &impl->o;
o->call(cls, method, inbl);
}
void librados::ObjectOperation::exec(const char *cls, const char *method, bufferlist& inbl, bufferlist *outbl, int *prval)
{
ceph_assert(impl);
::ObjectOperation *o = &impl->o;
o->call(cls, method, inbl, outbl, NULL, prval);
}
class ObjectOpCompletionCtx : public Context {
librados::ObjectOperationCompletion *completion;
bufferlist bl;
public:
explicit ObjectOpCompletionCtx(librados::ObjectOperationCompletion *c) : completion(c) {}
void finish(int r) override {
completion->handle_completion(r, bl);
delete completion;
}
bufferlist *outbl() {
return &bl;
}
};
void librados::ObjectOperation::exec(const char *cls, const char *method, bufferlist& inbl, librados::ObjectOperationCompletion *completion)
{
ceph_assert(impl);
::ObjectOperation *o = &impl->o;
ObjectOpCompletionCtx *ctx = new ObjectOpCompletionCtx(completion);
o->call(cls, method, inbl, ctx->outbl(), ctx, NULL);
}
void librados::ObjectReadOperation::stat(uint64_t *psize, time_t *pmtime, int *prval)
{
ceph_assert(impl);
::ObjectOperation *o = &impl->o;
o->stat(psize, pmtime, prval);
}
void librados::ObjectReadOperation::stat2(uint64_t *psize, struct timespec *pts, int *prval)
{
ceph_assert(impl);
::ObjectOperation *o = &impl->o;
o->stat(psize, pts, prval);
}
void librados::ObjectReadOperation::read(size_t off, uint64_t len, bufferlist *pbl, int *prval)
{
ceph_assert(impl);
::ObjectOperation *o = &impl->o;
o->read(off, len, pbl, prval, NULL);
}
void librados::ObjectReadOperation::sparse_read(uint64_t off, uint64_t len,
std::map<uint64_t,uint64_t> *m,
bufferlist *data_bl, int *prval,
uint64_t truncate_size,
uint32_t truncate_seq)
{
ceph_assert(impl);
::ObjectOperation *o = &impl->o;
o->sparse_read(off, len, m, data_bl, prval, truncate_size, truncate_seq);
}
void librados::ObjectReadOperation::checksum(rados_checksum_type_t type,
const bufferlist &init_value_bl,
uint64_t off, size_t len,
size_t chunk_size, bufferlist *pbl,
int *prval)
{
ceph_assert(impl);
::ObjectOperation *o = &impl->o;
o->checksum(get_checksum_op_type(type), init_value_bl, off, len, chunk_size,
pbl, prval, nullptr);
}
void librados::ObjectReadOperation::getxattr(const char *name, bufferlist *pbl, int *prval)
{
ceph_assert(impl);
::ObjectOperation *o = &impl->o;
o->getxattr(name, pbl, prval);
}
void librados::ObjectReadOperation::omap_get_vals(
const std::string &start_after,
const std::string &filter_prefix,
uint64_t max_return,
std::map<std::string, bufferlist> *out_vals,
int *prval)
{
ceph_assert(impl);
::ObjectOperation *o = &impl->o;
o->omap_get_vals(start_after, filter_prefix, max_return, out_vals, nullptr,
prval);
}
void librados::ObjectReadOperation::omap_get_vals2(
const std::string &start_after,
const std::string &filter_prefix,
uint64_t max_return,
std::map<std::string, bufferlist> *out_vals,
bool *pmore,
int *prval)
{
ceph_assert(impl);
::ObjectOperation *o = &impl->o;
o->omap_get_vals(start_after, filter_prefix, max_return, out_vals, pmore,
prval);
}
void librados::ObjectReadOperation::omap_get_vals(
const std::string &start_after,
uint64_t max_return,
std::map<std::string, bufferlist> *out_vals,
int *prval)
{
ceph_assert(impl);
::ObjectOperation *o = &impl->o;
o->omap_get_vals(start_after, "", max_return, out_vals, nullptr, prval);
}
void librados::ObjectReadOperation::omap_get_vals2(
const std::string &start_after,
uint64_t max_return,
std::map<std::string, bufferlist> *out_vals,
bool *pmore,
int *prval)
{
ceph_assert(impl);
::ObjectOperation *o = &impl->o;
o->omap_get_vals(start_after, "", max_return, out_vals, pmore, prval);
}
void librados::ObjectReadOperation::omap_get_keys(
const std::string &start_after,
uint64_t max_return,
std::set<std::string> *out_keys,
int *prval)
{
ceph_assert(impl);
::ObjectOperation *o = &impl->o;
o->omap_get_keys(start_after, max_return, out_keys, nullptr, prval);
}
void librados::ObjectReadOperation::omap_get_keys2(
const std::string &start_after,
uint64_t max_return,
std::set<std::string> *out_keys,
bool *pmore,
int *prval)
{
ceph_assert(impl);
::ObjectOperation *o = &impl->o;
o->omap_get_keys(start_after, max_return, out_keys, pmore, prval);
}
void librados::ObjectReadOperation::omap_get_header(bufferlist *bl, int *prval)
{
ceph_assert(impl);
::ObjectOperation *o = &impl->o;
o->omap_get_header(bl, prval);
}
void librados::ObjectReadOperation::omap_get_vals_by_keys(
const std::set<std::string> &keys,
std::map<std::string, bufferlist> *map,
int *prval)
{
ceph_assert(impl);
::ObjectOperation *o = &impl->o;
o->omap_get_vals_by_keys(keys, map, prval);
}
void librados::ObjectOperation::omap_cmp(
const std::map<std::string, pair<bufferlist, int> > &assertions,
int *prval)
{
ceph_assert(impl);
::ObjectOperation *o = &impl->o;
o->omap_cmp(assertions, prval);
}
void librados::ObjectReadOperation::list_watchers(
list<obj_watch_t> *out_watchers,
int *prval)
{
ceph_assert(impl);
::ObjectOperation *o = &impl->o;
o->list_watchers(out_watchers, prval);
}
void librados::ObjectReadOperation::list_snaps(
snap_set_t *out_snaps,
int *prval)
{
ceph_assert(impl);
::ObjectOperation *o = &impl->o;
o->list_snaps(out_snaps, prval);
}
void librados::ObjectReadOperation::is_dirty(bool *is_dirty, int *prval)
{
ceph_assert(impl);
::ObjectOperation *o = &impl->o;
o->is_dirty(is_dirty, prval);
}
int librados::IoCtx::omap_get_vals(const std::string& oid,
const std::string& orig_start_after,
const std::string& filter_prefix,
uint64_t max_return,
std::map<std::string, bufferlist> *out_vals)
{
bool first = true;
string start_after = orig_start_after;
bool more = true;
while (max_return > 0 && more) {
std::map<std::string,bufferlist> out;
ObjectReadOperation op;
op.omap_get_vals2(start_after, filter_prefix, max_return, &out, &more,
nullptr);
bufferlist bl;
int ret = operate(oid, &op, &bl);
if (ret < 0) {
return ret;
}
if (more) {
if (out.empty()) {
return -EINVAL; // wth
}
start_after = out.rbegin()->first;
}
if (out.size() <= max_return) {
max_return -= out.size();
} else {
max_return = 0;
}
if (first) {
out_vals->swap(out);
first = false;
} else {
out_vals->insert(out.begin(), out.end());
out.clear();
}
}
return 0;
}
int librados::IoCtx::omap_get_vals2(
const std::string& oid,
const std::string& start_after,
const std::string& filter_prefix,
uint64_t max_return,
std::map<std::string, bufferlist> *out_vals,
bool *pmore)
{
ObjectReadOperation op;
int r;
op.omap_get_vals2(start_after, filter_prefix, max_return, out_vals, pmore, &r);
bufferlist bl;
int ret = operate(oid, &op, &bl);
if (ret < 0)
return ret;
return r;
}
void librados::ObjectReadOperation::getxattrs(map<string, bufferlist> *pattrs, int *prval)
{
ceph_assert(impl);
::ObjectOperation *o = &impl->o;
o->getxattrs(pattrs, prval);
}
void librados::ObjectWriteOperation::mtime(time_t *pt)
{
ceph_assert(impl);
if (pt) {
impl->rt = ceph::real_clock::from_time_t(*pt);
impl->prt = &impl->rt;
}
}
void librados::ObjectWriteOperation::mtime2(struct timespec *pts)
{
ceph_assert(impl);
if (pts) {
impl->rt = ceph::real_clock::from_timespec(*pts);
impl->prt = &impl->rt;
}
}
void librados::ObjectWriteOperation::create(bool exclusive)
{
ceph_assert(impl);
::ObjectOperation *o = &impl->o;
o->create(exclusive);
}
void librados::ObjectWriteOperation::create(bool exclusive,
const std::string& category) // unused
{
ceph_assert(impl);
::ObjectOperation *o = &impl->o;
o->create(exclusive);
}
void librados::ObjectWriteOperation::write(uint64_t off, const bufferlist& bl)
{
ceph_assert(impl);
::ObjectOperation *o = &impl->o;
bufferlist c = bl;
o->write(off, c);
}
void librados::ObjectWriteOperation::write_full(const bufferlist& bl)
{
ceph_assert(impl);
::ObjectOperation *o = &impl->o;
bufferlist c = bl;
o->write_full(c);
}
void librados::ObjectWriteOperation::writesame(uint64_t off, uint64_t write_len,
const bufferlist& bl)
{
ceph_assert(impl);
::ObjectOperation *o = &impl->o;
bufferlist c = bl;
o->writesame(off, write_len, c);
}
void librados::ObjectWriteOperation::append(const bufferlist& bl)
{
ceph_assert(impl);
::ObjectOperation *o = &impl->o;
bufferlist c = bl;
o->append(c);
}
void librados::ObjectWriteOperation::remove()
{
ceph_assert(impl);
::ObjectOperation *o = &impl->o;
o->remove();
}
void librados::ObjectWriteOperation::truncate(uint64_t off)
{
ceph_assert(impl);
::ObjectOperation *o = &impl->o;
o->truncate(off);
}
void librados::ObjectWriteOperation::zero(uint64_t off, uint64_t len)
{
ceph_assert(impl);
::ObjectOperation *o = &impl->o;
o->zero(off, len);
}
void librados::ObjectWriteOperation::rmxattr(const char *name)
{
ceph_assert(impl);
::ObjectOperation *o = &impl->o;
o->rmxattr(name);
}
void librados::ObjectWriteOperation::setxattr(const char *name, const bufferlist& v)
{
ceph_assert(impl);
::ObjectOperation *o = &impl->o;
o->setxattr(name, v);
}
void librados::ObjectWriteOperation::setxattr(const char *name,
const buffer::list&& v)
{
ceph_assert(impl);
::ObjectOperation *o = &impl->o;
o->setxattr(name, std::move(v));
}
void librados::ObjectWriteOperation::omap_set(
const map<string, bufferlist> &map)
{
ceph_assert(impl);
::ObjectOperation *o = &impl->o;
o->omap_set(map);
}
void librados::ObjectWriteOperation::omap_set_header(const bufferlist &bl)
{
ceph_assert(impl);
bufferlist c = bl;
::ObjectOperation *o = &impl->o;
o->omap_set_header(c);
}
void librados::ObjectWriteOperation::omap_clear()
{
ceph_assert(impl);
::ObjectOperation *o = &impl->o;
o->omap_clear();
}
void librados::ObjectWriteOperation::omap_rm_keys(
const std::set<std::string> &to_rm)
{
ceph_assert(impl);
::ObjectOperation *o = &impl->o;
o->omap_rm_keys(to_rm);
}
void librados::ObjectWriteOperation::copy_from(const std::string& src,
const IoCtx& src_ioctx,
uint64_t src_version,
uint32_t src_fadvise_flags)
{
ceph_assert(impl);
::ObjectOperation *o = &impl->o;
o->copy_from(object_t(src), src_ioctx.io_ctx_impl->snap_seq,
src_ioctx.io_ctx_impl->oloc, src_version, 0, src_fadvise_flags);
}
void librados::ObjectWriteOperation::copy_from2(const std::string& src,
const IoCtx& src_ioctx,
uint64_t src_version,
uint32_t truncate_seq,
uint64_t truncate_size,
uint32_t src_fadvise_flags)
{
ceph_assert(impl);
::ObjectOperation *o = &impl->o;
o->copy_from2(object_t(src), src_ioctx.io_ctx_impl->snap_seq,
src_ioctx.io_ctx_impl->oloc, src_version, 0,
truncate_seq, truncate_size, src_fadvise_flags);
}
void librados::ObjectWriteOperation::undirty()
{
ceph_assert(impl);
::ObjectOperation *o = &impl->o;
o->undirty();
}
void librados::ObjectReadOperation::cache_flush()
{
ceph_assert(impl);
::ObjectOperation *o = &impl->o;
o->cache_flush();
}
void librados::ObjectReadOperation::cache_try_flush()
{
ceph_assert(impl);
::ObjectOperation *o = &impl->o;
o->cache_try_flush();
}
void librados::ObjectReadOperation::cache_evict()
{
ceph_assert(impl);
::ObjectOperation *o = &impl->o;
o->cache_evict();
}
void librados::ObjectReadOperation::tier_flush()
{
ceph_assert(impl);
::ObjectOperation *o = &impl->o;
o->tier_flush();
}
void librados::ObjectReadOperation::tier_evict()
{
ceph_assert(impl);
::ObjectOperation *o = &impl->o;
o->tier_evict();
}
void librados::ObjectWriteOperation::set_redirect(const std::string& tgt_obj,
const IoCtx& tgt_ioctx,
uint64_t tgt_version,
int flag)
{
ceph_assert(impl);
::ObjectOperation *o = &impl->o;
o->set_redirect(object_t(tgt_obj), tgt_ioctx.io_ctx_impl->snap_seq,
tgt_ioctx.io_ctx_impl->oloc, tgt_version, flag);
}
void librados::ObjectReadOperation::set_chunk(uint64_t src_offset,
uint64_t src_length,
const IoCtx& tgt_ioctx,
string tgt_oid,
uint64_t tgt_offset,
int flag)
{
ceph_assert(impl);
::ObjectOperation *o = &impl->o;
o->set_chunk(src_offset, src_length,
tgt_ioctx.io_ctx_impl->oloc, object_t(tgt_oid), tgt_offset, flag);
}
void librados::ObjectWriteOperation::tier_promote()
{
ceph_assert(impl);
::ObjectOperation *o = &impl->o;
o->tier_promote();
}
void librados::ObjectWriteOperation::unset_manifest()
{
ceph_assert(impl);
::ObjectOperation *o = &impl->o;
o->unset_manifest();
}
void librados::ObjectWriteOperation::tmap_update(const bufferlist& cmdbl)
{
ceph_assert(impl);
::ObjectOperation *o = &impl->o;
bufferlist c = cmdbl;
o->tmap_update(c);
}
void librados::ObjectWriteOperation::selfmanaged_snap_rollback(snap_t snapid)
{
ceph_assert(impl);
::ObjectOperation *o = &impl->o;
o->rollback(snapid);
}
// You must specify the snapid not the name normally used with pool snapshots
void librados::ObjectWriteOperation::snap_rollback(snap_t snapid)
{
ceph_assert(impl);
::ObjectOperation *o = &impl->o;
o->rollback(snapid);
}
void librados::ObjectWriteOperation::set_alloc_hint(
uint64_t expected_object_size,
uint64_t expected_write_size)
{
ceph_assert(impl);
::ObjectOperation *o = &impl->o;
o->set_alloc_hint(expected_object_size, expected_write_size, 0);
}
void librados::ObjectWriteOperation::set_alloc_hint2(
uint64_t expected_object_size,
uint64_t expected_write_size,
uint32_t flags)
{
ceph_assert(impl);
::ObjectOperation *o = &impl->o;
o->set_alloc_hint(expected_object_size, expected_write_size, flags);
}
void librados::ObjectWriteOperation::cache_pin()
{
ceph_assert(impl);
::ObjectOperation *o = &impl->o;
o->cache_pin();
}
void librados::ObjectWriteOperation::cache_unpin()
{
ceph_assert(impl);
::ObjectOperation *o = &impl->o;
o->cache_unpin();
}
librados::WatchCtx::
~WatchCtx()
{
}
librados::WatchCtx2::
~WatchCtx2()
{
}
///////////////////////////// NObjectIteratorImpl /////////////////////////////
librados::NObjectIteratorImpl::NObjectIteratorImpl(ObjListCtx *ctx_)
: ctx(ctx_)
{
}
librados::NObjectIteratorImpl::~NObjectIteratorImpl()
{
ctx.reset();
}
librados::NObjectIteratorImpl::NObjectIteratorImpl(const NObjectIteratorImpl &rhs)
{
*this = rhs;
}
librados::NObjectIteratorImpl& librados::NObjectIteratorImpl::operator=(const librados::NObjectIteratorImpl &rhs)
{
if (&rhs == this)
return *this;
if (rhs.ctx.get() == NULL) {
ctx.reset();
return *this;
}
Objecter::NListContext *list_ctx = new Objecter::NListContext(*rhs.ctx->nlc);
ctx.reset(new ObjListCtx(rhs.ctx->ctx, list_ctx));
cur_obj = rhs.cur_obj;
return *this;
}
bool librados::NObjectIteratorImpl::operator==(const librados::NObjectIteratorImpl& rhs) const {
if (ctx.get() == NULL) {
if (rhs.ctx.get() == NULL)
return true;
return rhs.ctx->nlc->at_end();
}
if (rhs.ctx.get() == NULL) {
// Redundant but same as ObjectIterator version
if (ctx.get() == NULL)
return true;
return ctx->nlc->at_end();
}
return ctx.get() == rhs.ctx.get();
}
bool librados::NObjectIteratorImpl::operator!=(const librados::NObjectIteratorImpl& rhs) const {
return !(*this == rhs);
}
const librados::ListObject& librados::NObjectIteratorImpl::operator*() const {
return cur_obj;
}
const librados::ListObject* librados::NObjectIteratorImpl::operator->() const {
return &cur_obj;
}
librados::NObjectIteratorImpl& librados::NObjectIteratorImpl::operator++()
{
get_next();
return *this;
}
librados::NObjectIteratorImpl librados::NObjectIteratorImpl::operator++(int)
{
librados::NObjectIteratorImpl ret(*this);
get_next();
return ret;
}
uint32_t librados::NObjectIteratorImpl::seek(uint32_t pos)
{
uint32_t r = rados_nobjects_list_seek(ctx.get(), pos);
get_next();
return r;
}
uint32_t librados::NObjectIteratorImpl::seek(const ObjectCursor& cursor)
{
uint32_t r = rados_nobjects_list_seek_cursor(ctx.get(), (rados_object_list_cursor)cursor.c_cursor);
get_next();
return r;
}
librados::ObjectCursor librados::NObjectIteratorImpl::get_cursor()
{
librados::ObjListCtx *lh = (librados::ObjListCtx *)ctx.get();
librados::ObjectCursor oc;
oc.set(lh->ctx->nlist_get_cursor(lh->nlc));
return oc;
}
void librados::NObjectIteratorImpl::set_filter(const bufferlist &bl)
{
ceph_assert(ctx);
ctx->nlc->filter = bl;
}
void librados::NObjectIteratorImpl::get_next()
{
const char *entry, *key, *nspace;
size_t entry_size, key_size, nspace_size;
if (ctx->nlc->at_end())
return;
int ret = rados_nobjects_list_next2(ctx.get(), &entry, &key, &nspace,
&entry_size, &key_size, &nspace_size);
if (ret == -ENOENT) {
return;
}
else if (ret) {
throw std::system_error(-ret, std::system_category(),
"rados_nobjects_list_next2");
}
if (cur_obj.impl == NULL)
cur_obj.impl = new ListObjectImpl();
cur_obj.impl->nspace = string{nspace, nspace_size};
cur_obj.impl->oid = string{entry, entry_size};
cur_obj.impl->locator = key ? string(key, key_size) : string();
}
uint32_t librados::NObjectIteratorImpl::get_pg_hash_position() const
{
return ctx->nlc->get_pg_hash_position();
}
///////////////////////////// NObjectIterator /////////////////////////////
librados::NObjectIterator::NObjectIterator(ObjListCtx *ctx_)
{
impl = new NObjectIteratorImpl(ctx_);
}
librados::NObjectIterator::~NObjectIterator()
{
delete impl;
}
librados::NObjectIterator::NObjectIterator(const NObjectIterator &rhs)
{
if (rhs.impl == NULL) {
impl = NULL;
return;
}
impl = new NObjectIteratorImpl();
*impl = *(rhs.impl);
}
librados::NObjectIterator& librados::NObjectIterator::operator=(const librados::NObjectIterator &rhs)
{
if (rhs.impl == NULL) {
delete impl;
impl = NULL;
return *this;
}
if (impl == NULL)
impl = new NObjectIteratorImpl();
*impl = *(rhs.impl);
return *this;
}
bool librados::NObjectIterator::operator==(const librados::NObjectIterator& rhs) const
{
if (impl && rhs.impl) {
return *impl == *(rhs.impl);
} else {
return impl == rhs.impl;
}
}
bool librados::NObjectIterator::operator!=(const librados::NObjectIterator& rhs) const
{
return !(*this == rhs);
}
const librados::ListObject& librados::NObjectIterator::operator*() const {
ceph_assert(impl);
return *(impl->get_listobjectp());
}
const librados::ListObject* librados::NObjectIterator::operator->() const {
ceph_assert(impl);
return impl->get_listobjectp();
}
librados::NObjectIterator& librados::NObjectIterator::operator++()
{
ceph_assert(impl);
impl->get_next();
return *this;
}
librados::NObjectIterator librados::NObjectIterator::operator++(int)
{
librados::NObjectIterator ret(*this);
impl->get_next();
return ret;
}
uint32_t librados::NObjectIterator::seek(uint32_t pos)
{
ceph_assert(impl);
return impl->seek(pos);
}
uint32_t librados::NObjectIterator::seek(const ObjectCursor& cursor)
{
ceph_assert(impl);
return impl->seek(cursor);
}
librados::ObjectCursor librados::NObjectIterator::get_cursor()
{
ceph_assert(impl);
return impl->get_cursor();
}
void librados::NObjectIterator::set_filter(const bufferlist &bl)
{
impl->set_filter(bl);
}
void librados::NObjectIterator::get_next()
{
ceph_assert(impl);
impl->get_next();
}
uint32_t librados::NObjectIterator::get_pg_hash_position() const
{
ceph_assert(impl);
return impl->get_pg_hash_position();
}
const librados::NObjectIterator librados::NObjectIterator::__EndObjectIterator(NULL);
///////////////////////////// PoolAsyncCompletion //////////////////////////////
librados::PoolAsyncCompletion::PoolAsyncCompletion::~PoolAsyncCompletion()
{
auto c = reinterpret_cast<PoolAsyncCompletionImpl *>(pc);
c->release();
}
int librados::PoolAsyncCompletion::PoolAsyncCompletion::set_callback(void *cb_arg,
rados_callback_t cb)
{
PoolAsyncCompletionImpl *c = (PoolAsyncCompletionImpl *)pc;
return c->set_callback(cb_arg, cb);
}
int librados::PoolAsyncCompletion::PoolAsyncCompletion::wait()
{
PoolAsyncCompletionImpl *c = (PoolAsyncCompletionImpl *)pc;
return c->wait();
}
bool librados::PoolAsyncCompletion::PoolAsyncCompletion::is_complete()
{
PoolAsyncCompletionImpl *c = (PoolAsyncCompletionImpl *)pc;
return c->is_complete();
}
int librados::PoolAsyncCompletion::PoolAsyncCompletion::get_return_value()
{
PoolAsyncCompletionImpl *c = (PoolAsyncCompletionImpl *)pc;
return c->get_return_value();
}
void librados::PoolAsyncCompletion::PoolAsyncCompletion::release()
{
delete this;
}
///////////////////////////// AioCompletion //////////////////////////////
librados::AioCompletion::AioCompletion::~AioCompletion()
{
auto c = reinterpret_cast<AioCompletionImpl *>(pc);
c->release();
}
int librados::AioCompletion::AioCompletion::set_complete_callback(void *cb_arg, rados_callback_t cb)
{
AioCompletionImpl *c = (AioCompletionImpl *)pc;
return c->set_complete_callback(cb_arg, cb);
}
int librados::AioCompletion::AioCompletion::set_safe_callback(void *cb_arg, rados_callback_t cb)
{
AioCompletionImpl *c = (AioCompletionImpl *)pc;
return c->set_safe_callback(cb_arg, cb);
}
int librados::AioCompletion::AioCompletion::wait_for_complete()
{
AioCompletionImpl *c = (AioCompletionImpl *)pc;
return c->wait_for_complete();
}
int librados::AioCompletion::AioCompletion::wait_for_safe()
{
AioCompletionImpl *c = (AioCompletionImpl *)pc;
return c->wait_for_complete();
}
bool librados::AioCompletion::AioCompletion::is_complete()
{
AioCompletionImpl *c = (AioCompletionImpl *)pc;
return c->is_complete();
}
bool librados::AioCompletion::AioCompletion::is_safe()
{
AioCompletionImpl *c = (AioCompletionImpl *)pc;
return c->is_safe();
}
int librados::AioCompletion::AioCompletion::wait_for_complete_and_cb()
{
AioCompletionImpl *c = (AioCompletionImpl *)pc;
return c->wait_for_complete_and_cb();
}
int librados::AioCompletion::AioCompletion::wait_for_safe_and_cb()
{
AioCompletionImpl *c = (AioCompletionImpl *)pc;
return c->wait_for_safe_and_cb();
}
bool librados::AioCompletion::AioCompletion::is_complete_and_cb()
{
AioCompletionImpl *c = (AioCompletionImpl *)pc;
return c->is_complete_and_cb();
}
bool librados::AioCompletion::AioCompletion::is_safe_and_cb()
{
AioCompletionImpl *c = (AioCompletionImpl *)pc;
return c->is_safe_and_cb();
}
int librados::AioCompletion::AioCompletion::get_return_value()
{
AioCompletionImpl *c = (AioCompletionImpl *)pc;
return c->get_return_value();
}
int librados::AioCompletion::AioCompletion::get_version()
{
AioCompletionImpl *c = (AioCompletionImpl *)pc;
return c->get_version();
}
uint64_t librados::AioCompletion::AioCompletion::get_version64()
{
AioCompletionImpl *c = (AioCompletionImpl *)pc;
return c->get_version();
}
void librados::AioCompletion::AioCompletion::release()
{
delete this;
}
///////////////////////////// IoCtx //////////////////////////////
librados::IoCtx::IoCtx() : io_ctx_impl(NULL)
{
}
void librados::IoCtx::from_rados_ioctx_t(rados_ioctx_t p, IoCtx &io)
{
IoCtxImpl *io_ctx_impl = (IoCtxImpl*)p;
io.io_ctx_impl = io_ctx_impl;
if (io_ctx_impl) {
io_ctx_impl->get();
}
}
librados::IoCtx::IoCtx(const IoCtx& rhs)
{
io_ctx_impl = rhs.io_ctx_impl;
if (io_ctx_impl) {
io_ctx_impl->get();
}
}
librados::IoCtx& librados::IoCtx::operator=(const IoCtx& rhs)
{
if (io_ctx_impl)
io_ctx_impl->put();
io_ctx_impl = rhs.io_ctx_impl;
io_ctx_impl->get();
return *this;
}
librados::IoCtx::IoCtx(IoCtx&& rhs) noexcept
: io_ctx_impl(std::exchange(rhs.io_ctx_impl, nullptr))
{
}
librados::IoCtx& librados::IoCtx::operator=(IoCtx&& rhs) noexcept
{
if (io_ctx_impl)
io_ctx_impl->put();
io_ctx_impl = std::exchange(rhs.io_ctx_impl, nullptr);
return *this;
}
librados::IoCtx::~IoCtx()
{
close();
}
bool librados::IoCtx::is_valid() const {
return io_ctx_impl != nullptr;
}
void librados::IoCtx::close()
{
if (io_ctx_impl)
io_ctx_impl->put();
io_ctx_impl = 0;
}
void librados::IoCtx::dup(const IoCtx& rhs)
{
if (io_ctx_impl)
io_ctx_impl->put();
io_ctx_impl = new IoCtxImpl();
io_ctx_impl->get();
io_ctx_impl->dup(*rhs.io_ctx_impl);
}
int librados::IoCtx::set_auid(uint64_t auid_)
{
return -EOPNOTSUPP;
}
int librados::IoCtx::set_auid_async(uint64_t auid_, PoolAsyncCompletion *c)
{
return -EOPNOTSUPP;
}
int librados::IoCtx::get_auid(uint64_t *auid_)
{
return -EOPNOTSUPP;
}
bool librados::IoCtx::pool_requires_alignment()
{
return io_ctx_impl->client->pool_requires_alignment(get_id());
}
int librados::IoCtx::pool_requires_alignment2(bool *req)
{
return io_ctx_impl->client->pool_requires_alignment2(get_id(), req);
}
uint64_t librados::IoCtx::pool_required_alignment()
{
return io_ctx_impl->client->pool_required_alignment(get_id());
}
int librados::IoCtx::pool_required_alignment2(uint64_t *alignment)
{
return io_ctx_impl->client->pool_required_alignment2(get_id(), alignment);
}
std::string librados::IoCtx::get_pool_name()
{
std::string s;
io_ctx_impl->client->pool_get_name(get_id(), &s);
return s;
}
std::string librados::IoCtx::get_pool_name() const
{
return io_ctx_impl->get_cached_pool_name();
}
uint64_t librados::IoCtx::get_instance_id() const
{
return io_ctx_impl->client->get_instance_id();
}
int librados::IoCtx::create(const std::string& oid, bool exclusive)
{
object_t obj(oid);
return io_ctx_impl->create(obj, exclusive);
}
int librados::IoCtx::create(const std::string& oid, bool exclusive,
const std::string& category) // unused
{
object_t obj(oid);
return io_ctx_impl->create(obj, exclusive);
}
int librados::IoCtx::write(const std::string& oid, bufferlist& bl, size_t len, uint64_t off)
{
object_t obj(oid);
return io_ctx_impl->write(obj, bl, len, off);
}
int librados::IoCtx::append(const std::string& oid, bufferlist& bl, size_t len)
{
object_t obj(oid);
return io_ctx_impl->append(obj, bl, len);
}
int librados::IoCtx::write_full(const std::string& oid, bufferlist& bl)
{
object_t obj(oid);
return io_ctx_impl->write_full(obj, bl);
}
int librados::IoCtx::writesame(const std::string& oid, bufferlist& bl,
size_t write_len, uint64_t off)
{
object_t obj(oid);
return io_ctx_impl->writesame(obj, bl, write_len, off);
}
int librados::IoCtx::read(const std::string& oid, bufferlist& bl, size_t len, uint64_t off)
{
object_t obj(oid);
return io_ctx_impl->read(obj, bl, len, off);
}
int librados::IoCtx::checksum(const std::string& oid,
rados_checksum_type_t type,
const bufferlist &init_value_bl, size_t len,
uint64_t off, size_t chunk_size, bufferlist *pbl)
{
object_t obj(oid);
return io_ctx_impl->checksum(obj, get_checksum_op_type(type), init_value_bl,
len, off, chunk_size, pbl);
}
int librados::IoCtx::remove(const std::string& oid)
{
object_t obj(oid);
return io_ctx_impl->remove(obj);
}
int librados::IoCtx::remove(const std::string& oid, int flags)
{
object_t obj(oid);
return io_ctx_impl->remove(obj, flags);
}
int librados::IoCtx::trunc(const std::string& oid, uint64_t size)
{
object_t obj(oid);
return io_ctx_impl->trunc(obj, size);
}
int librados::IoCtx::mapext(const std::string& oid, uint64_t off, size_t len,
std::map<uint64_t,uint64_t>& m)
{
object_t obj(oid);
return io_ctx_impl->mapext(obj, off, len, m);
}
int librados::IoCtx::cmpext(const std::string& oid, uint64_t off, bufferlist& cmp_bl)
{
object_t obj(oid);
return io_ctx_impl->cmpext(obj, off, cmp_bl);
}
int librados::IoCtx::sparse_read(const std::string& oid, std::map<uint64_t,uint64_t>& m,
bufferlist& bl, size_t len, uint64_t off)
{
object_t obj(oid);
return io_ctx_impl->sparse_read(obj, m, bl, len, off);
}
int librados::IoCtx::getxattr(const std::string& oid, const char *name, bufferlist& bl)
{
object_t obj(oid);
return io_ctx_impl->getxattr(obj, name, bl);
}
int librados::IoCtx::getxattrs(const std::string& oid, map<std::string, bufferlist>& attrset)
{
object_t obj(oid);
return io_ctx_impl->getxattrs(obj, attrset);
}
int librados::IoCtx::setxattr(const std::string& oid, const char *name, bufferlist& bl)
{
object_t obj(oid);
return io_ctx_impl->setxattr(obj, name, bl);
}
int librados::IoCtx::rmxattr(const std::string& oid, const char *name)
{
object_t obj(oid);
return io_ctx_impl->rmxattr(obj, name);
}
int librados::IoCtx::stat(const std::string& oid, uint64_t *psize, time_t *pmtime)
{
object_t obj(oid);
return io_ctx_impl->stat(obj, psize, pmtime);
}
int librados::IoCtx::stat2(const std::string& oid, uint64_t *psize, struct timespec *pts)
{
object_t obj(oid);
return io_ctx_impl->stat2(obj, psize, pts);
}
int librados::IoCtx::exec(const std::string& oid, const char *cls, const char *method,
bufferlist& inbl, bufferlist& outbl)
{
object_t obj(oid);
return io_ctx_impl->exec(obj, cls, method, inbl, outbl);
}
int librados::IoCtx::tmap_update(const std::string& oid, bufferlist& cmdbl)
{
object_t obj(oid);
return io_ctx_impl->tmap_update(obj, cmdbl);
}
int librados::IoCtx::omap_get_vals(const std::string& oid,
const std::string& start_after,
uint64_t max_return,
std::map<std::string, bufferlist> *out_vals)
{
return omap_get_vals(oid, start_after, string(), max_return, out_vals);
}
int librados::IoCtx::omap_get_vals2(
const std::string& oid,
const std::string& start_after,
uint64_t max_return,
std::map<std::string, bufferlist> *out_vals,
bool *pmore)
{
ObjectReadOperation op;
int r;
op.omap_get_vals2(start_after, max_return, out_vals, pmore, &r);
bufferlist bl;
int ret = operate(oid, &op, &bl);
if (ret < 0)
return ret;
return r;
}
int librados::IoCtx::omap_get_keys(const std::string& oid,
const std::string& orig_start_after,
uint64_t max_return,
std::set<std::string> *out_keys)
{
bool first = true;
string start_after = orig_start_after;
bool more = true;
while (max_return > 0 && more) {
std::set<std::string> out;
ObjectReadOperation op;
op.omap_get_keys2(start_after, max_return, &out, &more, nullptr);
bufferlist bl;
int ret = operate(oid, &op, &bl);
if (ret < 0) {
return ret;
}
if (more) {
if (out.empty()) {
return -EINVAL; // wth
}
start_after = *out.rbegin();
}
if (out.size() <= max_return) {
max_return -= out.size();
} else {
max_return = 0;
}
if (first) {
out_keys->swap(out);
first = false;
} else {
out_keys->insert(out.begin(), out.end());
out.clear();
}
}
return 0;
}
int librados::IoCtx::omap_get_keys2(
const std::string& oid,
const std::string& start_after,
uint64_t max_return,
std::set<std::string> *out_keys,
bool *pmore)
{
ObjectReadOperation op;
int r;
op.omap_get_keys2(start_after, max_return, out_keys, pmore, &r);
bufferlist bl;
int ret = operate(oid, &op, &bl);
if (ret < 0)
return ret;
return r;
}
int librados::IoCtx::omap_get_header(const std::string& oid,
bufferlist *bl)
{
ObjectReadOperation op;
int r;
op.omap_get_header(bl, &r);
bufferlist b;
int ret = operate(oid, &op, &b);
if (ret < 0)
return ret;
return r;
}
int librados::IoCtx::omap_get_vals_by_keys(const std::string& oid,
const std::set<std::string>& keys,
std::map<std::string, bufferlist> *vals)
{
ObjectReadOperation op;
int r;
bufferlist bl;
op.omap_get_vals_by_keys(keys, vals, &r);
int ret = operate(oid, &op, &bl);
if (ret < 0)
return ret;
return r;
}
int librados::IoCtx::omap_set(const std::string& oid,
const map<string, bufferlist>& m)
{
ObjectWriteOperation op;
op.omap_set(m);
return operate(oid, &op);
}
int librados::IoCtx::omap_set_header(const std::string& oid,
const bufferlist& bl)
{
ObjectWriteOperation op;
op.omap_set_header(bl);
return operate(oid, &op);
}
int librados::IoCtx::omap_clear(const std::string& oid)
{
ObjectWriteOperation op;
op.omap_clear();
return operate(oid, &op);
}
int librados::IoCtx::omap_rm_keys(const std::string& oid,
const std::set<std::string>& keys)
{
ObjectWriteOperation op;
op.omap_rm_keys(keys);
return operate(oid, &op);
}
int librados::IoCtx::operate(const std::string& oid, librados::ObjectWriteOperation *o)
{
object_t obj(oid);
if (unlikely(!o->impl))
return -EINVAL;
return io_ctx_impl->operate(obj, &o->impl->o, (ceph::real_time *)o->impl->prt);
}
int librados::IoCtx::operate(const std::string& oid, librados::ObjectWriteOperation *o, int flags)
{
object_t obj(oid);
if (unlikely(!o->impl))
return -EINVAL;
return io_ctx_impl->operate(obj, &o->impl->o, (ceph::real_time *)o->impl->prt, translate_flags(flags));
}
int librados::IoCtx::operate(const std::string& oid, librados::ObjectReadOperation *o, bufferlist *pbl)
{
object_t obj(oid);
if (unlikely(!o->impl))
return -EINVAL;
return io_ctx_impl->operate_read(obj, &o->impl->o, pbl);
}
int librados::IoCtx::operate(const std::string& oid, librados::ObjectReadOperation *o, bufferlist *pbl, int flags)
{
object_t obj(oid);
if (unlikely(!o->impl))
return -EINVAL;
return io_ctx_impl->operate_read(obj, &o->impl->o, pbl, translate_flags(flags));
}
int librados::IoCtx::aio_operate(const std::string& oid, AioCompletion *c,
librados::ObjectWriteOperation *o)
{
object_t obj(oid);
if (unlikely(!o->impl))
return -EINVAL;
return io_ctx_impl->aio_operate(obj, &o->impl->o, c->pc,
io_ctx_impl->snapc, o->impl->prt, 0);
}
int librados::IoCtx::aio_operate(const std::string& oid, AioCompletion *c,
ObjectWriteOperation *o, int flags)
{
object_t obj(oid);
if (unlikely(!o->impl))
return -EINVAL;
return io_ctx_impl->aio_operate(obj, &o->impl->o, c->pc,
io_ctx_impl->snapc, o->impl->prt,
translate_flags(flags));
}
int librados::IoCtx::aio_operate(const std::string& oid, AioCompletion *c,
librados::ObjectWriteOperation *o,
snap_t snap_seq, std::vector<snap_t>& snaps)
{
if (unlikely(!o->impl))
return -EINVAL;
object_t obj(oid);
vector<snapid_t> snv;
snv.resize(snaps.size());
for (size_t i = 0; i < snaps.size(); ++i)
snv[i] = snaps[i];
SnapContext snapc(snap_seq, snv);
return io_ctx_impl->aio_operate(obj, &o->impl->o, c->pc,
snapc, o->impl->prt, 0);
}
int librados::IoCtx::aio_operate(const std::string& oid, AioCompletion *c,
librados::ObjectWriteOperation *o,
snap_t snap_seq, std::vector<snap_t>& snaps,
const blkin_trace_info *trace_info)
{
if (unlikely(!o->impl))
return -EINVAL;
object_t obj(oid);
vector<snapid_t> snv;
snv.resize(snaps.size());
for (size_t i = 0; i < snaps.size(); ++i)
snv[i] = snaps[i];
SnapContext snapc(snap_seq, snv);
return io_ctx_impl->aio_operate(obj, &o->impl->o, c->pc,
snapc, o->impl->prt, 0, trace_info);
}
int librados::IoCtx::aio_operate(const std::string& oid, AioCompletion *c,
librados::ObjectWriteOperation *o,
snap_t snap_seq, std::vector<snap_t>& snaps, int flags,
const blkin_trace_info *trace_info)
{
if (unlikely(!o->impl))
return -EINVAL;
object_t obj(oid);
vector<snapid_t> snv;
snv.resize(snaps.size());
for (size_t i = 0; i < snaps.size(); ++i)
snv[i] = snaps[i];
SnapContext snapc(snap_seq, snv);
return io_ctx_impl->aio_operate(obj, &o->impl->o, c->pc, snapc, o->impl->prt,
translate_flags(flags), trace_info);
}
int librados::IoCtx::aio_operate(const std::string& oid, AioCompletion *c,
librados::ObjectReadOperation *o,
bufferlist *pbl)
{
if (unlikely(!o->impl))
return -EINVAL;
object_t obj(oid);
return io_ctx_impl->aio_operate_read(obj, &o->impl->o, c->pc,
0, pbl);
}
// deprecated
int librados::IoCtx::aio_operate(const std::string& oid, AioCompletion *c,
librados::ObjectReadOperation *o,
snap_t snapid_unused_deprecated,
int flags, bufferlist *pbl)
{
if (unlikely(!o->impl))
return -EINVAL;
object_t obj(oid);
int op_flags = 0;
if (flags & OPERATION_BALANCE_READS)
op_flags |= CEPH_OSD_FLAG_BALANCE_READS;
if (flags & OPERATION_LOCALIZE_READS)
op_flags |= CEPH_OSD_FLAG_LOCALIZE_READS;
if (flags & OPERATION_ORDER_READS_WRITES)
op_flags |= CEPH_OSD_FLAG_RWORDERED;
return io_ctx_impl->aio_operate_read(obj, &o->impl->o, c->pc,
op_flags, pbl);
}
int librados::IoCtx::aio_operate(const std::string& oid, AioCompletion *c,
librados::ObjectReadOperation *o,
int flags, bufferlist *pbl)
{
if (unlikely(!o->impl))
return -EINVAL;
object_t obj(oid);
return io_ctx_impl->aio_operate_read(obj, &o->impl->o, c->pc,
translate_flags(flags), pbl);
}
int librados::IoCtx::aio_operate(const std::string& oid, AioCompletion *c,
librados::ObjectReadOperation *o,
int flags, bufferlist *pbl, const blkin_trace_info *trace_info)
{
if (unlikely(!o->impl))
return -EINVAL;
object_t obj(oid);
return io_ctx_impl->aio_operate_read(obj, &o->impl->o, c->pc,
translate_flags(flags), pbl, trace_info);
}
void librados::IoCtx::snap_set_read(snap_t seq)
{
io_ctx_impl->set_snap_read(seq);
}
int librados::IoCtx::selfmanaged_snap_set_write_ctx(snap_t seq, vector<snap_t>& snaps)
{
vector<snapid_t> snv;
snv.resize(snaps.size());
for (unsigned i=0; i<snaps.size(); i++)
snv[i] = snaps[i];
return io_ctx_impl->set_snap_write_context(seq, snv);
}
int librados::IoCtx::snap_create(const char *snapname)
{
return io_ctx_impl->snap_create(snapname);
}
int librados::IoCtx::snap_lookup(const char *name, snap_t *snapid)
{
return io_ctx_impl->snap_lookup(name, snapid);
}
int librados::IoCtx::snap_get_stamp(snap_t snapid, time_t *t)
{
return io_ctx_impl->snap_get_stamp(snapid, t);
}
int librados::IoCtx::snap_get_name(snap_t snapid, std::string *s)
{
return io_ctx_impl->snap_get_name(snapid, s);
}
int librados::IoCtx::snap_remove(const char *snapname)
{
return io_ctx_impl->snap_remove(snapname);
}
int librados::IoCtx::snap_list(std::vector<snap_t> *snaps)
{
return io_ctx_impl->snap_list(snaps);
}
int librados::IoCtx::snap_rollback(const std::string& oid, const char *snapname)
{
return io_ctx_impl->rollback(oid, snapname);
}
// Deprecated name kept for backward compatibility
int librados::IoCtx::rollback(const std::string& oid, const char *snapname)
{
return snap_rollback(oid, snapname);
}
int librados::IoCtx::selfmanaged_snap_create(uint64_t *snapid)
{
return io_ctx_impl->selfmanaged_snap_create(snapid);
}
void librados::IoCtx::aio_selfmanaged_snap_create(uint64_t *snapid,
AioCompletion *c)
{
io_ctx_impl->aio_selfmanaged_snap_create(snapid, c->pc);
}
int librados::IoCtx::selfmanaged_snap_remove(uint64_t snapid)
{
return io_ctx_impl->selfmanaged_snap_remove(snapid);
}
void librados::IoCtx::aio_selfmanaged_snap_remove(uint64_t snapid,
AioCompletion *c)
{
io_ctx_impl->aio_selfmanaged_snap_remove(snapid, c->pc);
}
int librados::IoCtx::selfmanaged_snap_rollback(const std::string& oid, uint64_t snapid)
{
return io_ctx_impl->selfmanaged_snap_rollback_object(oid,
io_ctx_impl->snapc,
snapid);
}
int librados::IoCtx::lock_exclusive(const std::string &oid, const std::string &name,
const std::string &cookie,
const std::string &description,
struct timeval * duration, uint8_t flags)
{
utime_t dur = utime_t();
if (duration)
dur.set_from_timeval(duration);
return rados::cls::lock::lock(this, oid, name, ClsLockType::EXCLUSIVE, cookie, "",
description, dur, flags);
}
int librados::IoCtx::lock_shared(const std::string &oid, const std::string &name,
const std::string &cookie, const std::string &tag,
const std::string &description,
struct timeval * duration, uint8_t flags)
{
utime_t dur = utime_t();
if (duration)
dur.set_from_timeval(duration);
return rados::cls::lock::lock(this, oid, name, ClsLockType::SHARED, cookie, tag,
description, dur, flags);
}
int librados::IoCtx::unlock(const std::string &oid, const std::string &name,
const std::string &cookie)
{
return rados::cls::lock::unlock(this, oid, name, cookie);
}
struct AioUnlockCompletion : public librados::ObjectOperationCompletion {
librados::AioCompletionImpl *completion;
AioUnlockCompletion(librados::AioCompletion *c) : completion(c->pc) {
completion->get();
};
void handle_completion(int r, bufferlist& outbl) override {
rados_callback_t cb = completion->callback_complete;
void *cb_arg = completion->callback_complete_arg;
cb(completion, cb_arg);
completion->lock.lock();
completion->callback_complete = NULL;
completion->cond.notify_all();
completion->put_unlock();
}
};
int librados::IoCtx::aio_unlock(const std::string &oid, const std::string &name,
const std::string &cookie, AioCompletion *c)
{
return rados::cls::lock::aio_unlock(this, oid, name, cookie, c);
}
int librados::IoCtx::break_lock(const std::string &oid, const std::string &name,
const std::string &client, const std::string &cookie)
{
entity_name_t locker;
if (!locker.parse(client))
return -EINVAL;
return rados::cls::lock::break_lock(this, oid, name, cookie, locker);
}
int librados::IoCtx::list_lockers(const std::string &oid, const std::string &name,
int *exclusive,
std::string *tag,
std::list<librados::locker_t> *lockers)
{
std::list<librados::locker_t> tmp_lockers;
map<rados::cls::lock::locker_id_t, rados::cls::lock::locker_info_t> rados_lockers;
std::string tmp_tag;
ClsLockType tmp_type;
int r = rados::cls::lock::get_lock_info(this, oid, name, &rados_lockers, &tmp_type, &tmp_tag);
if (r < 0)
return r;
map<rados::cls::lock::locker_id_t, rados::cls::lock::locker_info_t>::iterator map_it;
for (map_it = rados_lockers.begin(); map_it != rados_lockers.end(); ++map_it) {
librados::locker_t locker;
locker.client = stringify(map_it->first.locker);
locker.cookie = map_it->first.cookie;
locker.address = stringify(map_it->second.addr);
tmp_lockers.push_back(locker);
}
if (lockers)
*lockers = tmp_lockers;
if (tag)
*tag = tmp_tag;
if (exclusive) {
if (tmp_type == ClsLockType::EXCLUSIVE)
*exclusive = 1;
else
*exclusive = 0;
}
return tmp_lockers.size();
}
librados::NObjectIterator librados::IoCtx::nobjects_begin(
const bufferlist &filter)
{
rados_list_ctx_t listh;
rados_nobjects_list_open(io_ctx_impl, &listh);
NObjectIterator iter((ObjListCtx*)listh);
if (filter.length() > 0) {
iter.set_filter(filter);
}
iter.get_next();
return iter;
}
librados::NObjectIterator librados::IoCtx::nobjects_begin(
uint32_t pos, const bufferlist &filter)
{
rados_list_ctx_t listh;
rados_nobjects_list_open(io_ctx_impl, &listh);
NObjectIterator iter((ObjListCtx*)listh);
if (filter.length() > 0) {
iter.set_filter(filter);
}
iter.seek(pos);
return iter;
}
librados::NObjectIterator librados::IoCtx::nobjects_begin(
const ObjectCursor& cursor, const bufferlist &filter)
{
rados_list_ctx_t listh;
rados_nobjects_list_open(io_ctx_impl, &listh);
NObjectIterator iter((ObjListCtx*)listh);
if (filter.length() > 0) {
iter.set_filter(filter);
}
iter.seek(cursor);
return iter;
}
const librados::NObjectIterator& librados::IoCtx::nobjects_end() const
{
return NObjectIterator::__EndObjectIterator;
}
int librados::IoCtx::hit_set_list(uint32_t hash, AioCompletion *c,
std::list< std::pair<time_t, time_t> > *pls)
{
return io_ctx_impl->hit_set_list(hash, c->pc, pls);
}
int librados::IoCtx::hit_set_get(uint32_t hash, AioCompletion *c, time_t stamp,
bufferlist *pbl)
{
return io_ctx_impl->hit_set_get(hash, c->pc, stamp, pbl);
}
uint64_t librados::IoCtx::get_last_version()
{
return io_ctx_impl->last_version();
}
int librados::IoCtx::aio_read(const std::string& oid, librados::AioCompletion *c,
bufferlist *pbl, size_t len, uint64_t off)
{
return io_ctx_impl->aio_read(oid, c->pc, pbl, len, off,
io_ctx_impl->snap_seq);
}
int librados::IoCtx::aio_read(const std::string& oid, librados::AioCompletion *c,
bufferlist *pbl, size_t len, uint64_t off,
uint64_t snapid)
{
return io_ctx_impl->aio_read(oid, c->pc, pbl, len, off, snapid);
}
int librados::IoCtx::aio_exec(const std::string& oid,
librados::AioCompletion *c, const char *cls,
const char *method, bufferlist& inbl,
bufferlist *outbl)
{
object_t obj(oid);
return io_ctx_impl->aio_exec(obj, c->pc, cls, method, inbl, outbl);
}
int librados::IoCtx::aio_cmpext(const std::string& oid,
librados::AioCompletion *c,
uint64_t off,
bufferlist& cmp_bl)
{
return io_ctx_impl->aio_cmpext(oid, c->pc, off, cmp_bl);
}
int librados::IoCtx::aio_sparse_read(const std::string& oid, librados::AioCompletion *c,
std::map<uint64_t,uint64_t> *m, bufferlist *data_bl,
size_t len, uint64_t off)
{
return io_ctx_impl->aio_sparse_read(oid, c->pc,
m, data_bl, len, off,
io_ctx_impl->snap_seq);
}
int librados::IoCtx::aio_sparse_read(const std::string& oid, librados::AioCompletion *c,
std::map<uint64_t,uint64_t> *m, bufferlist *data_bl,
size_t len, uint64_t off, uint64_t snapid)
{
return io_ctx_impl->aio_sparse_read(oid, c->pc,
m, data_bl, len, off, snapid);
}
int librados::IoCtx::aio_write(const std::string& oid, librados::AioCompletion *c,
const bufferlist& bl, size_t len, uint64_t off)
{
return io_ctx_impl->aio_write(oid, c->pc, bl, len, off);
}
int librados::IoCtx::aio_append(const std::string& oid, librados::AioCompletion *c,
const bufferlist& bl, size_t len)
{
return io_ctx_impl->aio_append(oid, c->pc, bl, len);
}
int librados::IoCtx::aio_write_full(const std::string& oid, librados::AioCompletion *c,
const bufferlist& bl)
{
object_t obj(oid);
return io_ctx_impl->aio_write_full(obj, c->pc, bl);
}
int librados::IoCtx::aio_writesame(const std::string& oid, librados::AioCompletion *c,
const bufferlist& bl, size_t write_len,
uint64_t off)
{
return io_ctx_impl->aio_writesame(oid, c->pc, bl, write_len, off);
}
int librados::IoCtx::aio_remove(const std::string& oid, librados::AioCompletion *c)
{
return io_ctx_impl->aio_remove(oid, c->pc);
}
int librados::IoCtx::aio_remove(const std::string& oid, librados::AioCompletion *c, int flags)
{
return io_ctx_impl->aio_remove(oid, c->pc, flags);
}
int librados::IoCtx::aio_flush_async(librados::AioCompletion *c)
{
io_ctx_impl->flush_aio_writes_async(c->pc);
return 0;
}
int librados::IoCtx::aio_flush()
{
io_ctx_impl->flush_aio_writes();
return 0;
}
struct AioGetxattrDataPP {
AioGetxattrDataPP(librados::AioCompletionImpl *c, bufferlist *_bl) :
bl(_bl), completion(c) {}
bufferlist *bl;
struct librados::CB_AioCompleteAndSafe completion;
};
static void rados_aio_getxattr_completepp(rados_completion_t c, void *arg) {
AioGetxattrDataPP *cdata = reinterpret_cast<AioGetxattrDataPP*>(arg);
int rc = rados_aio_get_return_value(c);
if (rc >= 0) {
rc = cdata->bl->length();
}
cdata->completion(rc);
delete cdata;
}
int librados::IoCtx::aio_getxattr(const std::string& oid, librados::AioCompletion *c,
const char *name, bufferlist& bl)
{
// create data object to be passed to async callback
AioGetxattrDataPP *cdata = new AioGetxattrDataPP(c->pc, &bl);
if (!cdata) {
return -ENOMEM;
}
// create completion callback
librados::AioCompletionImpl *comp = new librados::AioCompletionImpl;
comp->set_complete_callback(cdata, rados_aio_getxattr_completepp);
// call actual getxattr from IoCtxImpl
object_t obj(oid);
return io_ctx_impl->aio_getxattr(obj, comp, name, bl);
}
int librados::IoCtx::aio_getxattrs(const std::string& oid, AioCompletion *c,
map<std::string, bufferlist>& attrset)
{
object_t obj(oid);
return io_ctx_impl->aio_getxattrs(obj, c->pc, attrset);
}
int librados::IoCtx::aio_setxattr(const std::string& oid, AioCompletion *c,
const char *name, bufferlist& bl)
{
object_t obj(oid);
return io_ctx_impl->aio_setxattr(obj, c->pc, name, bl);
}
int librados::IoCtx::aio_rmxattr(const std::string& oid, AioCompletion *c,
const char *name)
{
object_t obj(oid);
return io_ctx_impl->aio_rmxattr(obj, c->pc, name);
}
int librados::IoCtx::aio_stat(const std::string& oid, librados::AioCompletion *c,
uint64_t *psize, time_t *pmtime)
{
object_t obj(oid);
return io_ctx_impl->aio_stat(obj, c->pc, psize, pmtime);
}
int librados::IoCtx::aio_cancel(librados::AioCompletion *c)
{
return io_ctx_impl->aio_cancel(c->pc);
}
int librados::IoCtx::watch(const string& oid, uint64_t ver, uint64_t *cookie,
librados::WatchCtx *ctx)
{
object_t obj(oid);
return io_ctx_impl->watch(obj, cookie, ctx, NULL);
}
int librados::IoCtx::watch2(const string& oid, uint64_t *cookie,
librados::WatchCtx2 *ctx2)
{
object_t obj(oid);
return io_ctx_impl->watch(obj, cookie, NULL, ctx2);
}
int librados::IoCtx::watch3(const string& oid, uint64_t *cookie,
librados::WatchCtx2 *ctx2, uint32_t timeout)
{
object_t obj(oid);
return io_ctx_impl->watch(obj, cookie, NULL, ctx2, timeout);
}
int librados::IoCtx::aio_watch(const string& oid, AioCompletion *c,
uint64_t *cookie,
librados::WatchCtx2 *ctx2)
{
object_t obj(oid);
return io_ctx_impl->aio_watch(obj, c->pc, cookie, NULL, ctx2);
}
int librados::IoCtx::aio_watch2(const string& oid, AioCompletion *c,
uint64_t *cookie,
librados::WatchCtx2 *ctx2,
uint32_t timeout)
{
object_t obj(oid);
return io_ctx_impl->aio_watch(obj, c->pc, cookie, NULL, ctx2, timeout);
}
int librados::IoCtx::unwatch(const string& oid, uint64_t handle)
{
return io_ctx_impl->unwatch(handle);
}
int librados::IoCtx::unwatch2(uint64_t handle)
{
return io_ctx_impl->unwatch(handle);
}
int librados::IoCtx::aio_unwatch(uint64_t handle, AioCompletion *c)
{
return io_ctx_impl->aio_unwatch(handle, c->pc);
}
int librados::IoCtx::watch_check(uint64_t handle)
{
return io_ctx_impl->watch_check(handle);
}
int librados::IoCtx::notify(const string& oid, uint64_t ver, bufferlist& bl)
{
object_t obj(oid);
return io_ctx_impl->notify(obj, bl, 0, NULL, NULL, NULL);
}
int librados::IoCtx::notify2(const string& oid, bufferlist& bl,
uint64_t timeout_ms, bufferlist *preplybl)
{
object_t obj(oid);
return io_ctx_impl->notify(obj, bl, timeout_ms, preplybl, NULL, NULL);
}
int librados::IoCtx::aio_notify(const string& oid, AioCompletion *c,
bufferlist& bl, uint64_t timeout_ms,
bufferlist *preplybl)
{
object_t obj(oid);
return io_ctx_impl->aio_notify(obj, c->pc, bl, timeout_ms, preplybl, NULL,
NULL);
}
void librados::IoCtx::decode_notify_response(bufferlist &bl,
std::vector<librados::notify_ack_t> *acks,
std::vector<librados::notify_timeout_t> *timeouts)
{
map<pair<uint64_t,uint64_t>,bufferlist> acked;
set<pair<uint64_t,uint64_t>> missed;
auto iter = bl.cbegin();
decode(acked, iter);
decode(missed, iter);
for (auto &[who, payload] : acked) {
acks->emplace_back(librados::notify_ack_t{who.first, who.second, payload});
}
for (auto &[notifier_id, cookie] : missed) {
timeouts->emplace_back(librados::notify_timeout_t{notifier_id, cookie});
}
}
void librados::IoCtx::notify_ack(const std::string& o,
uint64_t notify_id, uint64_t handle,
bufferlist& bl)
{
io_ctx_impl->notify_ack(o, notify_id, handle, bl);
}
int librados::IoCtx::list_watchers(const std::string& oid,
std::list<obj_watch_t> *out_watchers)
{
ObjectReadOperation op;
int r;
op.list_watchers(out_watchers, &r);
bufferlist bl;
int ret = operate(oid, &op, &bl);
if (ret < 0)
return ret;
return r;
}
int librados::IoCtx::list_snaps(const std::string& oid,
snap_set_t *out_snaps)
{
ObjectReadOperation op;
int r;
if (io_ctx_impl->snap_seq != CEPH_SNAPDIR)
return -EINVAL;
op.list_snaps(out_snaps, &r);
bufferlist bl;
int ret = operate(oid, &op, &bl);
if (ret < 0)
return ret;
return r;
}
void librados::IoCtx::set_notify_timeout(uint32_t timeout)
{
io_ctx_impl->set_notify_timeout(timeout);
}
int librados::IoCtx::set_alloc_hint(const std::string& o,
uint64_t expected_object_size,
uint64_t expected_write_size)
{
object_t oid(o);
return io_ctx_impl->set_alloc_hint(oid, expected_object_size,
expected_write_size, 0);
}
int librados::IoCtx::set_alloc_hint2(const std::string& o,
uint64_t expected_object_size,
uint64_t expected_write_size,
uint32_t flags)
{
object_t oid(o);
return io_ctx_impl->set_alloc_hint(oid, expected_object_size,
expected_write_size, flags);
}
void librados::IoCtx::set_assert_version(uint64_t ver)
{
io_ctx_impl->set_assert_version(ver);
}
void librados::IoCtx::locator_set_key(const string& key)
{
io_ctx_impl->oloc.key = key;
}
void librados::IoCtx::set_namespace(const string& nspace)
{
io_ctx_impl->oloc.nspace = nspace;
}
std::string librados::IoCtx::get_namespace() const
{
return io_ctx_impl->oloc.nspace;
}
int64_t librados::IoCtx::get_id()
{
return io_ctx_impl->get_id();
}
uint32_t librados::IoCtx::get_object_hash_position(const std::string& oid)
{
uint32_t hash;
int r = io_ctx_impl->get_object_hash_position(oid, &hash);
if (r < 0)
hash = 0;
return hash;
}
uint32_t librados::IoCtx::get_object_pg_hash_position(const std::string& oid)
{
uint32_t hash;
int r = io_ctx_impl->get_object_pg_hash_position(oid, &hash);
if (r < 0)
hash = 0;
return hash;
}
int librados::IoCtx::get_object_hash_position2(
const std::string& oid, uint32_t *hash_position)
{
return io_ctx_impl->get_object_hash_position(oid, hash_position);
}
int librados::IoCtx::get_object_pg_hash_position2(
const std::string& oid, uint32_t *pg_hash_position)
{
return io_ctx_impl->get_object_pg_hash_position(oid, pg_hash_position);
}
librados::config_t librados::IoCtx::cct()
{
return (config_t)io_ctx_impl->client->cct;
}
librados::IoCtx::IoCtx(IoCtxImpl *io_ctx_impl_)
: io_ctx_impl(io_ctx_impl_)
{
}
void librados::IoCtx::set_osdmap_full_try()
{
io_ctx_impl->extra_op_flags |= CEPH_OSD_FLAG_FULL_TRY;
}
void librados::IoCtx::unset_osdmap_full_try()
{
io_ctx_impl->extra_op_flags &= ~CEPH_OSD_FLAG_FULL_TRY;
}
bool librados::IoCtx::get_pool_full_try()
{
return (io_ctx_impl->extra_op_flags & CEPH_OSD_FLAG_FULL_TRY) != 0;
}
void librados::IoCtx::set_pool_full_try()
{
io_ctx_impl->extra_op_flags |= CEPH_OSD_FLAG_FULL_TRY;
}
void librados::IoCtx::unset_pool_full_try()
{
io_ctx_impl->extra_op_flags &= ~CEPH_OSD_FLAG_FULL_TRY;
}
///////////////////////////// Rados //////////////////////////////
void librados::Rados::version(int *major, int *minor, int *extra)
{
rados_version(major, minor, extra);
}
librados::Rados::Rados() : client(NULL)
{
}
librados::Rados::Rados(IoCtx &ioctx)
{
client = ioctx.io_ctx_impl->client;
ceph_assert(client != NULL);
client->get();
}
librados::Rados::~Rados()
{
shutdown();
}
void librados::Rados::from_rados_t(rados_t cluster, Rados &rados) {
if (rados.client) {
rados.client->put();
}
rados.client = static_cast<RadosClient*>(cluster);
if (rados.client) {
rados.client->get();
}
}
int librados::Rados::init(const char * const id)
{
return rados_create((rados_t *)&client, id);
}
int librados::Rados::init2(const char * const name,
const char * const clustername, uint64_t flags)
{
return rados_create2((rados_t *)&client, clustername, name, flags);
}
int librados::Rados::init_with_context(config_t cct_)
{
return rados_create_with_context((rados_t *)&client, (rados_config_t)cct_);
}
int librados::Rados::connect()
{
return client->connect();
}
librados::config_t librados::Rados::cct()
{
return (config_t)client->cct;
}
int librados::Rados::watch_flush()
{
if (!client)
return -EINVAL;
return client->watch_flush();
}
int librados::Rados::aio_watch_flush(AioCompletion *c)
{
if (!client)
return -EINVAL;
return client->async_watch_flush(c->pc);
}
void librados::Rados::shutdown()
{
if (!client)
return;
if (client->put()) {
client->shutdown();
delete client;
client = NULL;
}
}
uint64_t librados::Rados::get_instance_id()
{
return client->get_instance_id();
}
int librados::Rados::get_min_compatible_osd(int8_t* require_osd_release)
{
return client->get_min_compatible_osd(require_osd_release);
}
int librados::Rados::get_min_compatible_client(int8_t* min_compat_client,
int8_t* require_min_compat_client)
{
return client->get_min_compatible_client(min_compat_client,
require_min_compat_client);
}
int librados::Rados::conf_read_file(const char * const path) const
{
return rados_conf_read_file((rados_t)client, path);
}
int librados::Rados::conf_parse_argv(int argc, const char ** argv) const
{
return rados_conf_parse_argv((rados_t)client, argc, argv);
}
int librados::Rados::conf_parse_argv_remainder(int argc, const char ** argv,
const char ** remargv) const
{
return rados_conf_parse_argv_remainder((rados_t)client, argc, argv, remargv);
}
int librados::Rados::conf_parse_env(const char *name) const
{
return rados_conf_parse_env((rados_t)client, name);
}
int librados::Rados::conf_set(const char *option, const char *value)
{
return rados_conf_set((rados_t)client, option, value);
}
int librados::Rados::conf_get(const char *option, std::string &val)
{
char *str = NULL;
const auto& conf = client->cct->_conf;
int ret = conf.get_val(option, &str, -1);
if (ret) {
free(str);
return ret;
}
val = str;
free(str);
return 0;
}
int librados::Rados::service_daemon_register(
const std::string& service, ///< service name (e.g., 'rgw')
const std::string& name, ///< daemon name (e.g., 'gwfoo')
const std::map<std::string,std::string>& metadata) ///< static metadata about daemon
{
return client->service_daemon_register(service, name, metadata);
}
int librados::Rados::service_daemon_update_status(
std::map<std::string,std::string>&& status)
{
return client->service_daemon_update_status(std::move(status));
}
int librados::Rados::pool_create(const char *name)
{
string str(name);
return client->pool_create(str);
}
int librados::Rados::pool_create(const char *name, uint64_t auid)
{
if (auid != CEPH_AUTH_UID_DEFAULT) {
return -EINVAL;
}
string str(name);
return client->pool_create(str);
}
int librados::Rados::pool_create(const char *name, uint64_t auid, __u8 crush_rule)
{
if (auid != CEPH_AUTH_UID_DEFAULT) {
return -EINVAL;
}
string str(name);
return client->pool_create(str, crush_rule);
}
int librados::Rados::pool_create_with_rule(const char *name, __u8 crush_rule)
{
string str(name);
return client->pool_create(str, crush_rule);
}
int librados::Rados::pool_create_async(const char *name, PoolAsyncCompletion *c)
{
string str(name);
return client->pool_create_async(str, c->pc);
}
int librados::Rados::pool_create_async(const char *name, uint64_t auid, PoolAsyncCompletion *c)
{
if (auid != CEPH_AUTH_UID_DEFAULT) {
return -EINVAL;
}
string str(name);
return client->pool_create_async(str, c->pc);
}
int librados::Rados::pool_create_async(const char *name, uint64_t auid, __u8 crush_rule,
PoolAsyncCompletion *c)
{
if (auid != CEPH_AUTH_UID_DEFAULT) {
return -EINVAL;
}
string str(name);
return client->pool_create_async(str, c->pc, crush_rule);
}
int librados::Rados::pool_create_with_rule_async(
const char *name, __u8 crush_rule,
PoolAsyncCompletion *c)
{
string str(name);
return client->pool_create_async(str, c->pc, crush_rule);
}
int librados::Rados::pool_get_base_tier(int64_t pool_id, int64_t* base_tier)
{
tracepoint(librados, rados_pool_get_base_tier_enter, (rados_t)client, pool_id);
int retval = client->pool_get_base_tier(pool_id, base_tier);
tracepoint(librados, rados_pool_get_base_tier_exit, retval, *base_tier);
return retval;
}
int librados::Rados::pool_delete(const char *name)
{
return client->pool_delete(name);
}
int librados::Rados::pool_delete_async(const char *name, PoolAsyncCompletion *c)
{
return client->pool_delete_async(name, c->pc);
}
int librados::Rados::pool_list(std::list<std::string>& v)
{
std::list<std::pair<int64_t, std::string> > pools;
int r = client->pool_list(pools);
if (r < 0) {
return r;
}
v.clear();
for (std::list<std::pair<int64_t, std::string> >::iterator it = pools.begin();
it != pools.end(); ++it) {
v.push_back(it->second);
}
return 0;
}
int librados::Rados::pool_list2(std::list<std::pair<int64_t, std::string> >& v)
{
return client->pool_list(v);
}
int64_t librados::Rados::pool_lookup(const char *name)
{
return client->lookup_pool(name);
}
int librados::Rados::pool_reverse_lookup(int64_t id, std::string *name)
{
return client->pool_get_name(id, name, true);
}
int librados::Rados::mon_command(string cmd, const bufferlist& inbl,
bufferlist *outbl, string *outs)
{
vector<string> cmdvec;
cmdvec.push_back(cmd);
return client->mon_command(cmdvec, inbl, outbl, outs);
}
int librados::Rados::osd_command(int osdid, std::string cmd, const bufferlist& inbl,
bufferlist *outbl, std::string *outs)
{
vector<string> cmdvec;
cmdvec.push_back(cmd);
return client->osd_command(osdid, cmdvec, inbl, outbl, outs);
}
int librados::Rados::mgr_command(std::string cmd, const bufferlist& inbl,
bufferlist *outbl, std::string *outs)
{
vector<string> cmdvec;
cmdvec.push_back(cmd);
return client->mgr_command(cmdvec, inbl, outbl, outs);
}
int librados::Rados::pg_command(const char *pgstr, std::string cmd, const bufferlist& inbl,
bufferlist *outbl, std::string *outs)
{
vector<string> cmdvec;
cmdvec.push_back(cmd);
pg_t pgid;
if (!pgid.parse(pgstr))
return -EINVAL;
return client->pg_command(pgid, cmdvec, inbl, outbl, outs);
}
int librados::Rados::ioctx_create(const char *name, IoCtx &io)
{
rados_ioctx_t p;
int ret = rados_ioctx_create((rados_t)client, name, &p);
if (ret)
return ret;
io.close();
io.io_ctx_impl = (IoCtxImpl*)p;
return 0;
}
int librados::Rados::ioctx_create2(int64_t pool_id, IoCtx &io)
{
rados_ioctx_t p;
int ret = rados_ioctx_create2((rados_t)client, pool_id, &p);
if (ret)
return ret;
io.close();
io.io_ctx_impl = (IoCtxImpl*)p;
return 0;
}
void librados::Rados::test_blocklist_self(bool set)
{
client->blocklist_self(set);
}
int librados::Rados::get_pool_stats(std::list<string>& v,
stats_map& result)
{
map<string,::pool_stat_t> rawresult;
bool per_pool = false;
int r = client->get_pool_stats(v, &rawresult, &per_pool);
for (map<string,::pool_stat_t>::iterator p = rawresult.begin();
p != rawresult.end();
++p) {
pool_stat_t& pv = result[p->first];
auto& pstat = p->second;
store_statfs_t &statfs = pstat.store_stats;
uint64_t allocated_bytes = pstat.get_allocated_data_bytes(per_pool) +
pstat.get_allocated_omap_bytes(per_pool);
// FIXME: raw_used_rate is unknown hence use 1.0 here
// meaning we keep net amount aggregated over all replicas
// Not a big deal so far since this field isn't exposed
uint64_t user_bytes = pstat.get_user_data_bytes(1.0, per_pool) +
pstat.get_user_omap_bytes(1.0, per_pool);
object_stat_sum_t *sum = &p->second.stats.sum;
pv.num_kb = shift_round_up(allocated_bytes, 10);
pv.num_bytes = allocated_bytes;
pv.num_objects = sum->num_objects;
pv.num_object_clones = sum->num_object_clones;
pv.num_object_copies = sum->num_object_copies;
pv.num_objects_missing_on_primary = sum->num_objects_missing_on_primary;
pv.num_objects_unfound = sum->num_objects_unfound;
pv.num_objects_degraded = sum->num_objects_degraded;
pv.num_rd = sum->num_rd;
pv.num_rd_kb = sum->num_rd_kb;
pv.num_wr = sum->num_wr;
pv.num_wr_kb = sum->num_wr_kb;
pv.num_user_bytes = user_bytes;
pv.compressed_bytes_orig = statfs.data_compressed_original;
pv.compressed_bytes = statfs.data_compressed;
pv.compressed_bytes_alloc = statfs.data_compressed_allocated;
}
return r;
}
int librados::Rados::get_pool_stats(std::list<string>& v,
std::map<string, stats_map>& result)
{
stats_map m;
int r = get_pool_stats(v, m);
if (r < 0)
return r;
for (map<string,pool_stat_t>::iterator p = m.begin();
p != m.end();
++p) {
result[p->first][string()] = p->second;
}
return r;
}
int librados::Rados::get_pool_stats(std::list<string>& v,
string& category, // unused
std::map<string, stats_map>& result)
{
return -EOPNOTSUPP;
}
bool librados::Rados::get_pool_is_selfmanaged_snaps_mode(const std::string& pool)
{
return client->get_pool_is_selfmanaged_snaps_mode(pool);
}
int librados::Rados::cluster_stat(cluster_stat_t& result)
{
ceph_statfs stats;
int r = client->get_fs_stats(stats);
result.kb = stats.kb;
result.kb_used = stats.kb_used;
result.kb_avail = stats.kb_avail;
result.num_objects = stats.num_objects;
return r;
}
int librados::Rados::cluster_fsid(string *fsid)
{
return client->get_fsid(fsid);
}
namespace librados {
struct PlacementGroupImpl {
pg_t pgid;
};
PlacementGroup::PlacementGroup()
: impl{new PlacementGroupImpl}
{}
PlacementGroup::PlacementGroup(const PlacementGroup& pg)
: impl{new PlacementGroupImpl}
{
impl->pgid = pg.impl->pgid;
}
PlacementGroup::~PlacementGroup()
{}
bool PlacementGroup::parse(const char* s)
{
return impl->pgid.parse(s);
}
}
std::ostream& librados::operator<<(std::ostream& out,
const librados::PlacementGroup& pg)
{
return out << pg.impl->pgid;
}
int librados::Rados::get_inconsistent_pgs(int64_t pool_id,
std::vector<PlacementGroup>* pgs)
{
std::vector<string> pgids;
if (auto ret = client->get_inconsistent_pgs(pool_id, &pgids); ret) {
return ret;
}
for (const auto& pgid : pgids) {
librados::PlacementGroup pg;
if (!pg.parse(pgid.c_str())) {
return -EINVAL;
}
pgs->emplace_back(pg);
}
return 0;
}
int librados::Rados::get_inconsistent_objects(const PlacementGroup& pg,
const object_id_t &start_after,
unsigned max_return,
AioCompletion *c,
std::vector<inconsistent_obj_t>* objects,
uint32_t* interval)
{
IoCtx ioctx;
const pg_t pgid = pg.impl->pgid;
int r = ioctx_create2(pgid.pool(), ioctx);
if (r < 0) {
return r;
}
return ioctx.io_ctx_impl->get_inconsistent_objects(pgid,
start_after,
max_return,
c->pc,
objects,
interval);
}
int librados::Rados::get_inconsistent_snapsets(const PlacementGroup& pg,
const object_id_t &start_after,
unsigned max_return,
AioCompletion *c,
std::vector<inconsistent_snapset_t>* snapsets,
uint32_t* interval)
{
IoCtx ioctx;
const pg_t pgid = pg.impl->pgid;
int r = ioctx_create2(pgid.pool(), ioctx);
if (r < 0) {
return r;
}
return ioctx.io_ctx_impl->get_inconsistent_snapsets(pgid,
start_after,
max_return,
c->pc,
snapsets,
interval);
}
int librados::Rados::wait_for_latest_osdmap()
{
return client->wait_for_latest_osdmap();
}
int librados::Rados::blocklist_add(const std::string& client_address,
uint32_t expire_seconds)
{
return client->blocklist_add(client_address, expire_seconds);
}
std::string librados::Rados::get_addrs() const {
return client->get_addrs();
}
librados::PoolAsyncCompletion *librados::Rados::pool_async_create_completion()
{
PoolAsyncCompletionImpl *c = new PoolAsyncCompletionImpl;
return new PoolAsyncCompletion(c);
}
librados::AioCompletion *librados::Rados::aio_create_completion()
{
AioCompletionImpl *c = new AioCompletionImpl;
return new AioCompletion(c);
}
librados::AioCompletion *librados::Rados::aio_create_completion(void *cb_arg,
callback_t cb_complete,
callback_t cb_safe)
{
AioCompletionImpl *c;
int r = rados_aio_create_completion(cb_arg, cb_complete, cb_safe, (void**)&c);
ceph_assert(r == 0);
return new AioCompletion(c);
}
librados::AioCompletion *librados::Rados::aio_create_completion(void *cb_arg,
callback_t cb_complete)
{
AioCompletionImpl *c;
int r = rados_aio_create_completion2(cb_arg, cb_complete, (void**)&c);
ceph_assert(r == 0);
return new AioCompletion(c);
}
librados::ObjectOperation::ObjectOperation() : impl(new ObjectOperationImpl) {}
librados::ObjectOperation::ObjectOperation(ObjectOperation&& rhs)
: impl(rhs.impl) {
rhs.impl = nullptr;
}
librados::ObjectOperation&
librados::ObjectOperation::operator =(ObjectOperation&& rhs) {
delete impl;
impl = rhs.impl;
rhs.impl = nullptr;
return *this;
}
librados::ObjectOperation::~ObjectOperation() {
delete impl;
}
///////////////////////////// ListObject //////////////////////////////
librados::ListObject::ListObject() : impl(NULL)
{
}
librados::ListObject::ListObject(librados::ListObjectImpl *i): impl(i)
{
}
librados::ListObject::ListObject(const ListObject& rhs)
{
if (rhs.impl == NULL) {
impl = NULL;
return;
}
impl = new ListObjectImpl();
*impl = *(rhs.impl);
}
librados::ListObject& librados::ListObject::operator=(const ListObject& rhs)
{
if (rhs.impl == NULL) {
delete impl;
impl = NULL;
return *this;
}
if (impl == NULL)
impl = new ListObjectImpl();
*impl = *(rhs.impl);
return *this;
}
librados::ListObject::~ListObject()
{
if (impl)
delete impl;
impl = NULL;
}
const std::string& librados::ListObject::get_nspace() const
{
return impl->get_nspace();
}
const std::string& librados::ListObject::get_oid() const
{
return impl->get_oid();
}
const std::string& librados::ListObject::get_locator() const
{
return impl->get_locator();
}
std::ostream& librados::operator<<(std::ostream& out, const librados::ListObject& lop)
{
out << *(lop.impl);
return out;
}
librados::ObjectCursor::ObjectCursor()
{
c_cursor = (rados_object_list_cursor)new hobject_t();
}
librados::ObjectCursor::~ObjectCursor()
{
hobject_t *h = (hobject_t *)c_cursor;
delete h;
}
librados::ObjectCursor::ObjectCursor(rados_object_list_cursor c)
{
if (!c) {
c_cursor = nullptr;
} else {
c_cursor = (rados_object_list_cursor)new hobject_t(*(hobject_t *)c);
}
}
librados::ObjectCursor& librados::ObjectCursor::operator=(const librados::ObjectCursor& rhs)
{
if (rhs.c_cursor != nullptr) {
hobject_t *h = (hobject_t*)rhs.c_cursor;
c_cursor = (rados_object_list_cursor)(new hobject_t(*h));
} else {
c_cursor = nullptr;
}
return *this;
}
bool librados::ObjectCursor::operator<(const librados::ObjectCursor &rhs) const
{
const hobject_t lhs_hobj = (c_cursor == nullptr) ? hobject_t() : *((hobject_t*)c_cursor);
const hobject_t rhs_hobj = (rhs.c_cursor == nullptr) ? hobject_t() : *((hobject_t*)(rhs.c_cursor));
return lhs_hobj < rhs_hobj;
}
bool librados::ObjectCursor::operator==(const librados::ObjectCursor &rhs) const
{
const hobject_t lhs_hobj = (c_cursor == nullptr) ? hobject_t() : *((hobject_t*)c_cursor);
const hobject_t rhs_hobj = (rhs.c_cursor == nullptr) ? hobject_t() : *((hobject_t*)(rhs.c_cursor));
return cmp(lhs_hobj, rhs_hobj) == 0;
}
librados::ObjectCursor::ObjectCursor(const librados::ObjectCursor &rhs)
{
*this = rhs;
}
librados::ObjectCursor librados::IoCtx::object_list_begin()
{
hobject_t *h = new hobject_t(io_ctx_impl->objecter->enumerate_objects_begin());
ObjectCursor oc;
oc.set((rados_object_list_cursor)h);
return oc;
}
librados::ObjectCursor librados::IoCtx::object_list_end()
{
hobject_t *h = new hobject_t(io_ctx_impl->objecter->enumerate_objects_end());
librados::ObjectCursor oc;
oc.set((rados_object_list_cursor)h);
return oc;
}
void librados::ObjectCursor::set(rados_object_list_cursor c)
{
delete (hobject_t*)c_cursor;
c_cursor = c;
}
string librados::ObjectCursor::to_str() const
{
stringstream ss;
ss << *(hobject_t *)c_cursor;
return ss.str();
}
bool librados::ObjectCursor::from_str(const string& s)
{
if (s.empty()) {
*(hobject_t *)c_cursor = hobject_t();
return true;
}
return ((hobject_t *)c_cursor)->parse(s);
}
CEPH_RADOS_API std::ostream& librados::operator<<(std::ostream& os, const librados::ObjectCursor& oc)
{
if (oc.c_cursor) {
os << *(hobject_t *)oc.c_cursor;
} else {
os << hobject_t();
}
return os;
}
bool librados::IoCtx::object_list_is_end(const ObjectCursor &oc)
{
hobject_t *h = (hobject_t *)oc.c_cursor;
return h->is_max();
}
int librados::IoCtx::object_list(const ObjectCursor &start,
const ObjectCursor &finish,
const size_t result_item_count,
const bufferlist &filter,
std::vector<ObjectItem> *result,
ObjectCursor *next)
{
ceph_assert(result != nullptr);
ceph_assert(next != nullptr);
result->clear();
ceph::async::waiter<boost::system::error_code,
std::vector<librados::ListObjectImpl>,
hobject_t> w;
io_ctx_impl->objecter->enumerate_objects<librados::ListObjectImpl>(
io_ctx_impl->poolid,
io_ctx_impl->oloc.nspace,
*((hobject_t*)start.c_cursor),
*((hobject_t*)finish.c_cursor),
result_item_count,
filter,
w);
auto [ec, obj_result, next_hash] = w.wait();
if (ec) {
next->set((rados_object_list_cursor)(new hobject_t(hobject_t::get_max())));
return ceph::from_error_code(ec);
}
next->set((rados_object_list_cursor)(new hobject_t(next_hash)));
for (auto i = obj_result.begin();
i != obj_result.end(); ++i) {
ObjectItem oi;
oi.oid = i->oid;
oi.nspace = i->nspace;
oi.locator = i->locator;
result->push_back(oi);
}
return obj_result.size();
}
void librados::IoCtx::object_list_slice(
const ObjectCursor start,
const ObjectCursor finish,
const size_t n,
const size_t m,
ObjectCursor *split_start,
ObjectCursor *split_finish)
{
ceph_assert(split_start != nullptr);
ceph_assert(split_finish != nullptr);
io_ctx_impl->object_list_slice(
*((hobject_t*)(start.c_cursor)),
*((hobject_t*)(finish.c_cursor)),
n,
m,
(hobject_t*)(split_start->c_cursor),
(hobject_t*)(split_finish->c_cursor));
}
int librados::IoCtx::application_enable(const std::string& app_name,
bool force)
{
return io_ctx_impl->application_enable(app_name, force);
}
int librados::IoCtx::application_enable_async(const std::string& app_name,
bool force,
PoolAsyncCompletion *c)
{
io_ctx_impl->application_enable_async(app_name, force, c->pc);
return 0;
}
int librados::IoCtx::application_list(std::set<std::string> *app_names)
{
return io_ctx_impl->application_list(app_names);
}
int librados::IoCtx::application_metadata_get(const std::string& app_name,
const std::string &key,
std::string* value)
{
return io_ctx_impl->application_metadata_get(app_name, key, value);
}
int librados::IoCtx::application_metadata_set(const std::string& app_name,
const std::string &key,
const std::string& value)
{
return io_ctx_impl->application_metadata_set(app_name, key, value);
}
int librados::IoCtx::application_metadata_remove(const std::string& app_name,
const std::string &key)
{
return io_ctx_impl->application_metadata_remove(app_name, key);
}
int librados::IoCtx::application_metadata_list(const std::string& app_name,
std::map<std::string, std::string> *values)
{
return io_ctx_impl->application_metadata_list(app_name, values);
}
| 82,069 | 24.881425 | 140 | cc |
null | ceph-main/src/librados/librados_tp.cc | #include "acconfig.h"
#ifdef WITH_LTTNG
#define TRACEPOINT_DEFINE
#define TRACEPOINT_PROBE_DYNAMIC_LINKAGE
#include "tracing/librados.h"
#undef TRACEPOINT_PROBE_DYNAMIC_LINKAGE
#undef TRACEPOINT_DEFINE
#endif
| 210 | 20.1 | 40 | cc |
null | ceph-main/src/librados/librados_util.cc | #include "librados_util.h"
uint8_t get_checksum_op_type(rados_checksum_type_t type) {
switch (type) {
case LIBRADOS_CHECKSUM_TYPE_XXHASH32:
return CEPH_OSD_CHECKSUM_OP_TYPE_XXHASH32;
case LIBRADOS_CHECKSUM_TYPE_XXHASH64:
return CEPH_OSD_CHECKSUM_OP_TYPE_XXHASH64;
case LIBRADOS_CHECKSUM_TYPE_CRC32C:
return CEPH_OSD_CHECKSUM_OP_TYPE_CRC32C;
default:
return -1;
}
}
int get_op_flags(int flags)
{
int rados_flags = 0;
if (flags & LIBRADOS_OP_FLAG_EXCL)
rados_flags |= CEPH_OSD_OP_FLAG_EXCL;
if (flags & LIBRADOS_OP_FLAG_FAILOK)
rados_flags |= CEPH_OSD_OP_FLAG_FAILOK;
if (flags & LIBRADOS_OP_FLAG_FADVISE_RANDOM)
rados_flags |= CEPH_OSD_OP_FLAG_FADVISE_RANDOM;
if (flags & LIBRADOS_OP_FLAG_FADVISE_SEQUENTIAL)
rados_flags |= CEPH_OSD_OP_FLAG_FADVISE_SEQUENTIAL;
if (flags & LIBRADOS_OP_FLAG_FADVISE_WILLNEED)
rados_flags |= CEPH_OSD_OP_FLAG_FADVISE_WILLNEED;
if (flags & LIBRADOS_OP_FLAG_FADVISE_DONTNEED)
rados_flags |= CEPH_OSD_OP_FLAG_FADVISE_DONTNEED;
if (flags & LIBRADOS_OP_FLAG_FADVISE_NOCACHE)
rados_flags |= CEPH_OSD_OP_FLAG_FADVISE_NOCACHE;
return rados_flags;
}
int translate_flags(int flags)
{
int op_flags = 0;
if (flags & librados::OPERATION_BALANCE_READS)
op_flags |= CEPH_OSD_FLAG_BALANCE_READS;
if (flags & librados::OPERATION_LOCALIZE_READS)
op_flags |= CEPH_OSD_FLAG_LOCALIZE_READS;
if (flags & librados::OPERATION_ORDER_READS_WRITES)
op_flags |= CEPH_OSD_FLAG_RWORDERED;
if (flags & librados::OPERATION_IGNORE_CACHE)
op_flags |= CEPH_OSD_FLAG_IGNORE_CACHE;
if (flags & librados::OPERATION_SKIPRWLOCKS)
op_flags |= CEPH_OSD_FLAG_SKIPRWLOCKS;
if (flags & librados::OPERATION_IGNORE_OVERLAY)
op_flags |= CEPH_OSD_FLAG_IGNORE_OVERLAY;
if (flags & librados::OPERATION_FULL_TRY)
op_flags |= CEPH_OSD_FLAG_FULL_TRY;
if (flags & librados::OPERATION_FULL_FORCE)
op_flags |= CEPH_OSD_FLAG_FULL_FORCE;
if (flags & librados::OPERATION_IGNORE_REDIRECT)
op_flags |= CEPH_OSD_FLAG_IGNORE_REDIRECT;
if (flags & librados::OPERATION_ORDERSNAP)
op_flags |= CEPH_OSD_FLAG_ORDERSNAP;
if (flags & librados::OPERATION_RETURNVEC)
op_flags |= CEPH_OSD_FLAG_RETURNVEC;
return op_flags;
}
| 2,228 | 33.828125 | 58 | cc |
null | ceph-main/src/librados/librados_util.h | #include <cstdint>
#include "acconfig.h"
#include "include/rados/librados.h"
#include "IoCtxImpl.h"
#ifdef WITH_LTTNG
#include "tracing/librados.h"
#else
#define tracepoint(...)
#endif
uint8_t get_checksum_op_type(rados_checksum_type_t type);
int get_op_flags(int flags);
int translate_flags(int flags);
struct librados::ObjListCtx {
librados::IoCtxImpl dupctx;
librados::IoCtxImpl *ctx;
Objecter::NListContext *nlc;
bool legacy_list_api;
ObjListCtx(IoCtxImpl *c, Objecter::NListContext *nl, bool legacy=false)
: nlc(nl),
legacy_list_api(legacy) {
// Get our own private IoCtxImpl so that namespace setting isn't
// changed by caller between uses.
ctx = &dupctx;
dupctx.dup(*c);
}
~ObjListCtx() {
ctx = NULL;
delete nlc;
}
};
| 780 | 21.314286 | 73 | h |
null | ceph-main/src/librados/snap_set_diff.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include <vector>
#include "snap_set_diff.h"
#include "common/ceph_context.h"
#include "include/rados/librados.hpp"
#include "include/interval_set.h"
#include "common/debug.h"
#define dout_subsys ceph_subsys_rados
using namespace std;
/**
* calculate intervals/extents that vary between two snapshots
*/
void calc_snap_set_diff(CephContext *cct, const librados::snap_set_t& snap_set,
librados::snap_t start, librados::snap_t end,
interval_set<uint64_t> *diff, uint64_t *end_size,
bool *end_exists, librados::snap_t *clone_end_snap_id,
bool *whole_object)
{
ldout(cct, 10) << "calc_snap_set_diff start " << start << " end " << end
<< ", snap_set seq " << snap_set.seq << dendl;
bool saw_start = false;
uint64_t start_size = 0;
diff->clear();
*end_size = 0;
*end_exists = false;
*clone_end_snap_id = 0;
*whole_object = false;
for (vector<librados::clone_info_t>::const_iterator r = snap_set.clones.begin();
r != snap_set.clones.end();
) {
// make an interval, and hide the fact that the HEAD doesn't
// include itself in the snaps list
librados::snap_t a, b;
if (r->cloneid == librados::SNAP_HEAD) {
// head is valid starting from right after the last seen seq
a = snap_set.seq + 1;
b = librados::SNAP_HEAD;
} else if (r->snaps.empty()) {
ldout(cct, 1) << "clone " << r->cloneid
<< ": empty snaps, return whole object" << dendl;
diff->clear();
*whole_object = true;
return;
} else {
a = r->snaps[0];
// note: b might be < r->cloneid if a snap has been trimmed.
b = r->snaps[r->snaps.size()-1];
}
ldout(cct, 20) << " clone " << r->cloneid << " snaps " << r->snaps
<< " -> [" << a << "," << b << "]"
<< " size " << r->size << " overlap to next " << r->overlap << dendl;
if (b < start) {
// this is before start
++r;
continue;
}
if (!saw_start) {
if (start < a) {
ldout(cct, 20) << " start, after " << start << dendl;
// this means the object didn't exist at start
if (r->size)
diff->insert(0, r->size);
start_size = 0;
} else {
ldout(cct, 20) << " start" << dendl;
start_size = r->size;
}
saw_start = true;
}
*end_size = r->size;
if (end < a) {
ldout(cct, 20) << " past end " << end << ", end object does not exist" << dendl;
*end_exists = false;
diff->clear();
if (start_size) {
diff->insert(0, start_size);
}
break;
}
if (end <= b) {
ldout(cct, 20) << " end" << dendl;
*end_exists = true;
*clone_end_snap_id = b;
break;
}
// start with the max(this size, next size), and subtract off any
// overlap
const vector<pair<uint64_t, uint64_t> > *overlap = &r->overlap;
interval_set<uint64_t> diff_to_next;
uint64_t max_size = r->size;
++r;
if (r != snap_set.clones.end()) {
if (r->size > max_size)
max_size = r->size;
}
if (max_size)
diff_to_next.insert(0, max_size);
for (vector<pair<uint64_t, uint64_t> >::const_iterator p = overlap->begin();
p != overlap->end();
++p) {
diff_to_next.erase(p->first, p->second);
}
ldout(cct, 20) << " diff_to_next " << diff_to_next << dendl;
diff->union_of(diff_to_next);
ldout(cct, 20) << " diff now " << *diff << dendl;
}
}
| 3,506 | 28.720339 | 86 | cc |
null | ceph-main/src/librados/snap_set_diff.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef __CEPH_OSDC_SNAP_SET_DIFF_H
#define __CEPH_OSDC_SNAP_SET_DIFF_H
#include "include/common_fwd.h"
#include "include/rados/rados_types.hpp"
#include "include/interval_set.h"
void calc_snap_set_diff(CephContext *cct,
const librados::snap_set_t& snap_set,
librados::snap_t start, librados::snap_t end,
interval_set<uint64_t> *diff, uint64_t *end_size,
bool *end_exists, librados::snap_t *clone_end_snap_id,
bool *whole_object);
#endif
| 555 | 28.263158 | 70 | h |
null | ceph-main/src/libradosstriper/MultiAioCompletionImpl.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2014 Sebastien Ponce <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include "common/dout.h"
#include "libradosstriper/MultiAioCompletionImpl.h"
void libradosstriper::MultiAioCompletionImpl::complete_request(ssize_t r)
{
lock.lock();
if (rval >= 0) {
if (r < 0 && r != -EEXIST)
rval = r;
else if (r > 0)
rval += r;
}
ceph_assert(pending_complete);
int count = --pending_complete;
if (!count && !building) {
complete();
}
put_unlock();
}
void libradosstriper::MultiAioCompletionImpl::safe_request(ssize_t r)
{
lock.lock();
if (rval >= 0) {
if (r < 0 && r != -EEXIST)
rval = r;
}
ceph_assert(pending_safe);
int count = --pending_safe;
if (!count && !building) {
safe();
}
put_unlock();
}
void libradosstriper::MultiAioCompletionImpl::finish_adding_requests()
{
std::scoped_lock l{lock};
ceph_assert(building);
building = false;
if (!pending_complete)
complete();
if (!pending_safe)
safe();
}
| 1,347 | 21.098361 | 73 | cc |
null | ceph-main/src/libradosstriper/MultiAioCompletionImpl.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2014 Sebastien Ponce <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_LIBRADOSSTRIPERSTRIPER_MULTIAIOCOMPLETIONIMPL_H
#define CEPH_LIBRADOSSTRIPERSTRIPER_MULTIAIOCOMPLETIONIMPL_H
#include <list>
#include <mutex>
#include "common/ceph_mutex.h"
#include "include/radosstriper/libradosstriper.hpp"
namespace libradosstriper {
struct MultiAioCompletionImpl {
ceph::mutex lock = ceph::make_mutex("MultiAioCompletionImpl lock", false);
ceph::condition_variable cond;
int ref, rval;
int pending_complete, pending_safe;
rados_callback_t callback_complete, callback_safe;
void *callback_complete_arg, *callback_safe_arg;
bool building; ///< true if we are still building this completion
bufferlist bl; /// only used for read case in C api of rados striper
std::list<bufferlist*> bllist; /// keep temporary buffer lists used for destriping
MultiAioCompletionImpl()
: ref(1), rval(0),
pending_complete(0), pending_safe(0),
callback_complete(0), callback_safe(0),
callback_complete_arg(0), callback_safe_arg(0),
building(true) {};
~MultiAioCompletionImpl() {
// deallocate temporary buffer lists
for (std::list<bufferlist*>::iterator it = bllist.begin();
it != bllist.end();
it++) {
delete *it;
}
bllist.clear();
}
int set_complete_callback(void *cb_arg, rados_callback_t cb) {
std::scoped_lock l{lock};
callback_complete = cb;
callback_complete_arg = cb_arg;
return 0;
}
int set_safe_callback(void *cb_arg, rados_callback_t cb) {
std::scoped_lock l{lock};
callback_safe = cb;
callback_safe_arg = cb_arg;
return 0;
}
int wait_for_complete() {
std::unique_lock l{lock};
cond.wait(l, [this] { return !pending_complete; });
return 0;
}
int wait_for_safe() {
std::unique_lock l{lock};
cond.wait(l, [this] { return !pending_safe; });
return 0;
}
bool is_complete() {
std::scoped_lock l{lock};
return pending_complete == 0;
}
bool is_safe() {
std::scoped_lock l{lock};
return pending_safe == 0;
}
void wait_for_complete_and_cb() {
std::unique_lock l{lock};
cond.wait(l, [this] { return !pending_complete && !callback_complete; });
}
void wait_for_safe_and_cb() {
std::unique_lock l{lock};
cond.wait(l, [this] { return !pending_safe && !callback_safe; });
}
bool is_complete_and_cb() {
std::scoped_lock l{lock};
return ((0 == pending_complete) && !callback_complete);
}
bool is_safe_and_cb() {
std::scoped_lock l{lock};
return ((0 == pending_safe) && !callback_safe);
}
int get_return_value() {
std::scoped_lock l{lock};
return rval;
}
void get() {
std::scoped_lock l{lock};
_get();
}
void _get() {
ceph_assert(ceph_mutex_is_locked(lock));
ceph_assert(ref > 0);
++ref;
}
void put() {
lock.lock();
put_unlock();
}
void put_unlock() {
ceph_assert(ref > 0);
int n = --ref;
lock.unlock();
if (!n)
delete this;
}
void add_request() {
std::scoped_lock l{lock};
pending_complete++;
_get();
pending_safe++;
_get();
}
void add_safe_request() {
std::scoped_lock l{lock};
pending_complete++;
_get();
}
void complete() {
ceph_assert(ceph_mutex_is_locked(lock));
if (callback_complete) {
callback_complete(this, callback_complete_arg);
callback_complete = 0;
}
cond.notify_all();
}
void safe() {
ceph_assert(ceph_mutex_is_locked(lock));
if (callback_safe) {
callback_safe(this, callback_safe_arg);
callback_safe = 0;
}
cond.notify_all();
};
void complete_request(ssize_t r);
void safe_request(ssize_t r);
void finish_adding_requests();
};
inline void intrusive_ptr_add_ref(MultiAioCompletionImpl* ptr)
{
ptr->get();
}
inline void intrusive_ptr_release(MultiAioCompletionImpl* ptr)
{
ptr->put();
}
}
#endif // CEPH_LIBRADOSSTRIPERSTRIPER_MULTIAIOCOMPLETIONIMPL_H
| 4,333 | 24.494118 | 84 | h |
null | ceph-main/src/libradosstriper/RadosStriperImpl.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2014 Sebastien Ponce <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include <boost/algorithm/string/replace.hpp>
#include "libradosstriper/RadosStriperImpl.h"
#include <errno.h>
#include <sstream>
#include <iomanip>
#include <algorithm>
#include "include/types.h"
#include "include/uuid.h"
#include "include/ceph_fs.h"
#include "common/dout.h"
#include "common/strtol.h"
#include "common/RefCountedObj.h"
#include "osdc/Striper.h"
#include "librados/AioCompletionImpl.h"
#include <cls/lock/cls_lock_client.h>
/*
* This file contents the actual implementation of the rados striped objects interface.
*
* Striped objects are stored in rados in a set of regular rados objects, after their
* content has been striped using the osdc/Striper interface.
*
* The external attributes of the striped object are mapped to the attributes of the
* first underlying object. This first object has a set of extra external attributes
* storing the layout of the striped object for future read back. These attributes are :
* - striper.layout.object_size : the size of rados objects used.
* Must be a multiple of striper.layout.stripe_unit
* - striper.layout.stripe_unit : the size of a stripe unit
* - striper.layout.stripe_count : the number of stripes used
* - striper.size : total striped object size
*
* In general operations on striped objects are not atomic.
* However, a certain number of safety guards have been put to make the interface closer
* to atomicity :
* - each data operation takes a shared lock on the first rados object for the
* whole time of the operation
* - the remove and trunc operations take an exclusive lock on the first rados object
* for the whole time of the operation
* This makes sure that no removal/truncation of a striped object occurs while
* data operations are happening and vice versa. It thus makes sure that the layout
* of a striped object does not change during data operation, which is essential for
* data consistency.
*
* Still the writing to a striped object is not atomic. This means in particular that
* the size of an object may not be in sync with its content at all times.
* As the size is always guaranteed to be updated first and in an atomic way, and as
* sparse striped objects are supported (see below), what will typically happen is
* that a reader that comes too soon after a write will read 0s instead of the actual
* data.
*
* Note that remove handles the pieces of the striped object in reverse order,
* so that the head object is removed last, making the completion of the deletion atomic.
*
* Striped objects can be sparse, typically in case data was written at the end of the
* striped object only. In such a case, some rados objects constituing the striped object
* may be missing. Other can be partial (only the beginning will have data)
* When dealing with such sparse striped files, missing objects are detected and
* considered as full of 0s. They are however not created until real data is written
* to them.
*
* There are a number of missing features/improvements that could be implemented.
* Here are some ideas :
* - implementation of missing entry points (compared to rados)
* In particular : clone_range, sparse_read, exec, aio_flush_async, tmaps, omaps, ...
*
*/
#define dout_subsys ceph_subsys_rados
#undef dout_prefix
#define dout_prefix *_dout << "libradosstriper: "
/// size of xattr buffer
#define XATTR_BUFFER_SIZE 32
/// names of the different xattr entries
#define XATTR_LAYOUT_STRIPE_UNIT "striper.layout.stripe_unit"
#define XATTR_LAYOUT_STRIPE_COUNT "striper.layout.stripe_count"
#define XATTR_LAYOUT_OBJECT_SIZE "striper.layout.object_size"
#define XATTR_SIZE "striper.size"
#define LOCK_PREFIX "lock."
/// name of the lock used on objects to ensure layout stability during IO
#define RADOS_LOCK_NAME "striper.lock"
/// format of the extension of rados objects created for a given striped object
#define RADOS_OBJECT_EXTENSION_FORMAT ".%016llx"
/// default object layout
static const struct ceph_file_layout default_file_layout = {
ceph_le32(1<<22), // fl_stripe_unit
ceph_le32(1), // fl_stripe_count
ceph_le32(1<<22), // fl_object_size
ceph_le32(0), // fl_cas_hash
ceph_le32(0), // fl_object_stripe_unit
ceph_le32(-1), // fl_unused
ceph_le32(-1), // fl_pg_pool
};
using std::map;
using std::pair;
using std::string;
using std::vector;
using libradosstriper::MultiAioCompletionImplPtr;
namespace {
///////////////////////// CompletionData /////////////////////////////
/**
* struct handling the data needed to pass to the call back
* function in asynchronous operations
*/
struct CompletionData : RefCountedObject {
/// complete method
void complete(int r);
/// striper to be used to handle the write completion
libradosstriper::RadosStriperImpl *m_striper;
/// striped object concerned by the write operation
std::string m_soid;
/// shared lock to be released at completion
std::string m_lockCookie;
/// completion handler
librados::IoCtxImpl::C_aio_Complete *m_ack;
protected:
CompletionData(libradosstriper::RadosStriperImpl * striper,
const std::string& soid,
const std::string& lockCookie,
librados::AioCompletionImpl *userCompletion = 0);
~CompletionData() override;
};
CompletionData::CompletionData
(libradosstriper::RadosStriperImpl* striper,
const std::string& soid,
const std::string& lockCookie,
librados::AioCompletionImpl *userCompletion) :
RefCountedObject(striper->cct()),
m_striper(striper), m_soid(soid), m_lockCookie(lockCookie), m_ack(0) {
m_striper->get();
if (userCompletion) {
m_ack = new librados::IoCtxImpl::C_aio_Complete(userCompletion);
userCompletion->io = striper->m_ioCtxImpl;
}
}
CompletionData::~CompletionData() {
if (m_ack) delete m_ack;
m_striper->put();
}
void CompletionData::complete(int r) {
if (m_ack) m_ack->finish(r);
}
/**
* struct handling the data needed to pass to the call back
* function in asynchronous read operations
*/
struct ReadCompletionData : CompletionData {
/// bufferlist containing final result
bufferlist* m_bl;
/// extents that will be read
std::vector<ObjectExtent>* m_extents;
/// intermediate results
std::vector<bufferlist>* m_resultbl;
/// return code of read completion, to be remembered until unlocking happened
int m_readRc;
/// completion object for the unlocking of the striped object at the end of the read
librados::AioCompletion *m_unlockCompletion;
/// complete method for when reading is over
void complete_read(int r);
/// complete method for when object is unlocked
void complete_unlock(int r);
private:
FRIEND_MAKE_REF(ReadCompletionData);
ReadCompletionData(libradosstriper::RadosStriperImpl * striper,
const std::string& soid,
const std::string& lockCookie,
librados::AioCompletionImpl *userCompletion,
bufferlist* bl,
std::vector<ObjectExtent>* extents,
std::vector<bufferlist>* resultbl);
~ReadCompletionData() override;
};
ReadCompletionData::ReadCompletionData
(libradosstriper::RadosStriperImpl* striper,
const std::string& soid,
const std::string& lockCookie,
librados::AioCompletionImpl *userCompletion,
bufferlist* bl,
std::vector<ObjectExtent>* extents,
std::vector<bufferlist>* resultbl) :
CompletionData(striper, soid, lockCookie, userCompletion),
m_bl(bl), m_extents(extents), m_resultbl(resultbl), m_readRc(0),
m_unlockCompletion(0) {}
ReadCompletionData::~ReadCompletionData() {
m_unlockCompletion->release();
delete m_extents;
delete m_resultbl;
}
void ReadCompletionData::complete_read(int r) {
// gather data into final buffer
Striper::StripedReadResult readResult;
vector<bufferlist>::iterator bit = m_resultbl->begin();
for (vector<ObjectExtent>::iterator eit = m_extents->begin();
eit != m_extents->end();
++eit, ++bit) {
readResult.add_partial_result(m_striper->cct(), *bit, eit->buffer_extents);
}
m_bl->clear();
readResult.assemble_result(m_striper->cct(), *m_bl, true);
// Remember return code
m_readRc = r;
}
void ReadCompletionData::complete_unlock(int r) {
// call parent's completion method
// Note that we ignore the return code of the unlock as we cannot do much about it
CompletionData::complete(m_readRc?m_readRc:m_bl->length());
}
/**
* struct handling the data needed to pass to the call back
* function in asynchronous write operations
*/
struct WriteCompletionData : CompletionData {
/// safe completion handler
librados::IoCtxImpl::C_aio_Complete *m_safe;
/// completion object for the unlocking of the striped object at the end of the write
librados::AioCompletion *m_unlockCompletion;
/// return code of write completion, to be remembered until unlocking happened
int m_writeRc;
/// complete method for when writing is over
void complete_write(int r);
/// complete method for when object is unlocked
void complete_unlock(int r);
/// safe method
void safe(int r);
private:
FRIEND_MAKE_REF(WriteCompletionData);
/// constructor
WriteCompletionData(libradosstriper::RadosStriperImpl * striper,
const std::string& soid,
const std::string& lockCookie,
librados::AioCompletionImpl *userCompletion);
/// destructor
~WriteCompletionData() override;
};
WriteCompletionData::WriteCompletionData
(libradosstriper::RadosStriperImpl* striper,
const std::string& soid,
const std::string& lockCookie,
librados::AioCompletionImpl *userCompletion) :
CompletionData(striper, soid, lockCookie, userCompletion),
m_safe(0), m_unlockCompletion(0), m_writeRc(0) {
if (userCompletion) {
m_safe = new librados::IoCtxImpl::C_aio_Complete(userCompletion);
}
}
WriteCompletionData::~WriteCompletionData() {
m_unlockCompletion->release();
if (m_safe) delete m_safe;
}
void WriteCompletionData::complete_unlock(int r) {
// call parent's completion method
// Note that we ignore the return code of the unlock as we cannot do much about it
CompletionData::complete(m_writeRc);
}
void WriteCompletionData::complete_write(int r) {
// Remember return code
m_writeRc = r;
}
void WriteCompletionData::safe(int r) {
if (m_safe) m_safe->finish(r);
}
struct RemoveCompletionData : CompletionData {
/// removal flags
int flags;
private:
FRIEND_MAKE_REF(RemoveCompletionData);
/**
* constructor
* note that the constructed object will take ownership of the lock
*/
RemoveCompletionData(libradosstriper::RadosStriperImpl * striper,
const std::string& soid,
const std::string& lockCookie,
librados::AioCompletionImpl *userCompletion,
int flags = 0) :
CompletionData(striper, soid, lockCookie, userCompletion), flags(flags) {}
};
/**
* struct handling the data needed to pass to the call back
* function in asynchronous truncate operations
*/
struct TruncateCompletionData : RefCountedObject {
/// striper to be used
libradosstriper::RadosStriperImpl *m_striper;
/// striped object concerned by the truncate operation
std::string m_soid;
/// the final size of the truncated object
uint64_t m_size;
private:
FRIEND_MAKE_REF(TruncateCompletionData);
/// constructor
TruncateCompletionData(libradosstriper::RadosStriperImpl* striper,
const std::string& soid,
uint64_t size) :
RefCountedObject(striper->cct()),
m_striper(striper), m_soid(soid), m_size(size) {
m_striper->get();
}
/// destructor
~TruncateCompletionData() override {
m_striper->put();
}
};
/**
* struct handling the data needed to pass to the call back
* function in asynchronous read operations of a Rados File
*/
struct RadosReadCompletionData : RefCountedObject {
/// the multi asynch io completion object to be used
MultiAioCompletionImplPtr m_multiAioCompl;
/// the expected number of bytes
uint64_t m_expectedBytes;
/// the bufferlist object where data have been written
bufferlist *m_bl;
private:
FRIEND_MAKE_REF(RadosReadCompletionData);
/// constructor
RadosReadCompletionData(MultiAioCompletionImplPtr multiAioCompl,
uint64_t expectedBytes,
bufferlist *bl,
CephContext *context) :
RefCountedObject(context),
m_multiAioCompl(multiAioCompl), m_expectedBytes(expectedBytes), m_bl(bl) {}
};
/**
* struct handling (most of) the data needed to pass to the call back
* function in asynchronous stat operations.
* Inherited by the actual type for adding time information in different
* versions (time_t or struct timespec)
*/
struct BasicStatCompletionData : CompletionData {
// MultiAioCompletionImpl used to handle the double aysnc
// call in the back (stat + getxattr)
libradosstriper::MultiAioCompletionImpl *m_multiCompletion;
// where to store the size of first objct
// this will be ignored but we need a place to store it when
// async stat is called
uint64_t m_objectSize;
// where to store the file size
uint64_t *m_psize;
/// the bufferlist object used for the getxattr call
bufferlist m_bl;
/// return code of the stat
int m_statRC;
/// return code of the getxattr
int m_getxattrRC;
protected:
/// constructor
BasicStatCompletionData(libradosstriper::RadosStriperImpl* striper,
const std::string& soid,
librados::AioCompletionImpl *userCompletion,
libradosstriper::MultiAioCompletionImpl *multiCompletion,
uint64_t *psize) :
CompletionData(striper, soid, "", userCompletion),
m_multiCompletion(multiCompletion), m_psize(psize),
m_statRC(0), m_getxattrRC(0) {};
};
/**
* struct handling the data needed to pass to the call back
* function in asynchronous stat operations.
* Simple templated extension of BasicStatCompletionData.
* The template parameter is the type of the time information
* (used with time_t for stat and struct timespec for stat2)
*/
template<class TimeType>
struct StatCompletionData : BasicStatCompletionData {
// where to store the file time
TimeType *m_pmtime;
private:
FRIEND_MAKE_REF(StatCompletionData);
/// constructor
StatCompletionData(libradosstriper::RadosStriperImpl* striper,
const std::string& soid,
librados::AioCompletionImpl *userCompletion,
libradosstriper::MultiAioCompletionImpl *multiCompletion,
uint64_t *psize,
TimeType *pmtime) :
BasicStatCompletionData(striper, soid, userCompletion, multiCompletion, psize),
m_pmtime(pmtime) {};
};
/**
* struct handling the data needed to pass to the call back
* function in asynchronous remove operations of a Rados File
*/
struct RadosRemoveCompletionData : RefCountedObject {
/// the multi asynch io completion object to be used
MultiAioCompletionImplPtr m_multiAioCompl;
private:
FRIEND_MAKE_REF(RadosRemoveCompletionData);
/// constructor
RadosRemoveCompletionData(MultiAioCompletionImplPtr multiAioCompl,
CephContext *context) :
RefCountedObject(context),
m_multiAioCompl(multiAioCompl) {};
};
} // namespace {
///////////////////////// constructor /////////////////////////////
libradosstriper::RadosStriperImpl::RadosStriperImpl(librados::IoCtx& ioctx, librados::IoCtxImpl *ioctx_impl) :
m_refCnt(0), m_radosCluster(ioctx), m_ioCtx(ioctx), m_ioCtxImpl(ioctx_impl),
m_layout(default_file_layout) {}
///////////////////////// layout /////////////////////////////
int libradosstriper::RadosStriperImpl::setObjectLayoutStripeUnit
(unsigned int stripe_unit)
{
/* stripe unit must be non-zero, 64k increment */
if (!stripe_unit || (stripe_unit & (CEPH_MIN_STRIPE_UNIT-1)))
return -EINVAL;
m_layout.fl_stripe_unit = stripe_unit;
return 0;
}
int libradosstriper::RadosStriperImpl::setObjectLayoutStripeCount
(unsigned int stripe_count)
{
/* stripe count must be non-zero */
if (!stripe_count)
return -EINVAL;
m_layout.fl_stripe_count = stripe_count;
return 0;
}
int libradosstriper::RadosStriperImpl::setObjectLayoutObjectSize
(unsigned int object_size)
{
/* object size must be non-zero, 64k increment */
if (!object_size || (object_size & (CEPH_MIN_STRIPE_UNIT-1)))
return -EINVAL;
/* object size must be a multiple of stripe unit */
if (object_size < m_layout.fl_stripe_unit ||
object_size % m_layout.fl_stripe_unit)
return -EINVAL;
m_layout.fl_object_size = object_size;
return 0;
}
///////////////////////// xattrs /////////////////////////////
int libradosstriper::RadosStriperImpl::getxattr(const object_t& soid,
const char *name,
bufferlist& bl)
{
std::string firstObjOid = getObjectId(soid, 0);
return m_ioCtx.getxattr(firstObjOid, name, bl);
}
int libradosstriper::RadosStriperImpl::setxattr(const object_t& soid,
const char *name,
bufferlist& bl)
{
std::string firstObjOid = getObjectId(soid, 0);
return m_ioCtx.setxattr(firstObjOid, name, bl);
}
int libradosstriper::RadosStriperImpl::getxattrs(const object_t& soid,
map<string, bufferlist>& attrset)
{
std::string firstObjOid = getObjectId(soid, 0);
int rc = m_ioCtx.getxattrs(firstObjOid, attrset);
if (rc) return rc;
// cleanup internal attributes dedicated to striping and locking
attrset.erase(XATTR_LAYOUT_STRIPE_UNIT);
attrset.erase(XATTR_LAYOUT_STRIPE_COUNT);
attrset.erase(XATTR_LAYOUT_OBJECT_SIZE);
attrset.erase(XATTR_SIZE);
attrset.erase(std::string(LOCK_PREFIX) + RADOS_LOCK_NAME);
return rc;
}
int libradosstriper::RadosStriperImpl::rmxattr(const object_t& soid,
const char *name)
{
std::string firstObjOid = getObjectId(soid, 0);
return m_ioCtx.rmxattr(firstObjOid, name);
}
///////////////////////// io /////////////////////////////
int libradosstriper::RadosStriperImpl::write(const std::string& soid,
const bufferlist& bl,
size_t len,
uint64_t off)
{
// open the object. This will create it if needed, retrieve its layout
// and size and take a shared lock on it
ceph_file_layout layout;
std::string lockCookie;
int rc = createAndOpenStripedObject(soid, &layout, len+off, &lockCookie, true);
if (rc) return rc;
return write_in_open_object(soid, layout, lockCookie, bl, len, off);
}
int libradosstriper::RadosStriperImpl::append(const std::string& soid,
const bufferlist& bl,
size_t len)
{
// open the object. This will create it if needed, retrieve its layout
// and size and take a shared lock on it
ceph_file_layout layout;
uint64_t size = len;
std::string lockCookie;
int rc = openStripedObjectForWrite(soid, &layout, &size, &lockCookie, false);
if (rc) return rc;
return write_in_open_object(soid, layout, lockCookie, bl, len, size);
}
int libradosstriper::RadosStriperImpl::write_full(const std::string& soid,
const bufferlist& bl)
{
int rc = trunc(soid, 0);
if (rc && rc != -ENOENT) return rc; // ENOENT is obviously ok
return write(soid, bl, bl.length(), 0);
}
int libradosstriper::RadosStriperImpl::read(const std::string& soid,
bufferlist* bl,
size_t len,
uint64_t off)
{
// create a completion object
librados::AioCompletionImpl c;
// call asynchronous method
int rc = aio_read(soid, &c, bl, len, off);
// and wait for completion
if (!rc) {
// wait for completion
c.wait_for_complete_and_cb();
// return result
rc = c.get_return_value();
}
return rc;
}
///////////////////////// asynchronous io /////////////////////////////
int libradosstriper::RadosStriperImpl::aio_write(const std::string& soid,
librados::AioCompletionImpl *c,
const bufferlist& bl,
size_t len,
uint64_t off)
{
ceph_file_layout layout;
std::string lockCookie;
int rc = createAndOpenStripedObject(soid, &layout, len+off, &lockCookie, true);
if (rc) return rc;
return aio_write_in_open_object(soid, c, layout, lockCookie, bl, len, off);
}
int libradosstriper::RadosStriperImpl::aio_append(const std::string& soid,
librados::AioCompletionImpl *c,
const bufferlist& bl,
size_t len)
{
ceph_file_layout layout;
uint64_t size = len;
std::string lockCookie;
int rc = openStripedObjectForWrite(soid, &layout, &size, &lockCookie, false);
if (rc) return rc;
// create a completion object
return aio_write_in_open_object(soid, c, layout, lockCookie, bl, len, size);
}
int libradosstriper::RadosStriperImpl::aio_write_full(const std::string& soid,
librados::AioCompletionImpl *c,
const bufferlist& bl)
{
int rc = trunc(soid, 0);
if (rc) return rc;
return aio_write(soid, c, bl, bl.length(), 0);
}
static void rados_read_aio_unlock_complete(rados_striper_multi_completion_t c, void *arg)
{
auto cdata = ceph::ref_t<ReadCompletionData>(static_cast<ReadCompletionData*>(arg), false);
libradosstriper::MultiAioCompletionImpl *comp =
reinterpret_cast<libradosstriper::MultiAioCompletionImpl*>(c);
cdata->complete_unlock(comp->rval);
}
static void striper_read_aio_req_complete(rados_striper_multi_completion_t c, void *arg)
{
auto cdata = static_cast<ReadCompletionData*>(arg);
// launch the async unlocking of the object
cdata->m_striper->aio_unlockObject(cdata->m_soid, cdata->m_lockCookie, cdata->m_unlockCompletion);
// complete the read part in parallel
libradosstriper::MultiAioCompletionImpl *comp =
reinterpret_cast<libradosstriper::MultiAioCompletionImpl*>(c);
cdata->complete_read(comp->rval);
}
static void rados_req_read_complete(rados_completion_t c, void *arg)
{
auto data = static_cast<RadosReadCompletionData*>(arg);
int rc = rados_aio_get_return_value(c);
// We need to handle the case of sparse files here
if (rc == -ENOENT) {
// the object did not exist at all. This can happen for sparse files.
// we consider we've read 0 bytes and it will fall into next case
rc = 0;
}
ssize_t nread = rc;
if (rc >= 0 && (((uint64_t)rc) < data->m_expectedBytes)) {
// only partial data were present in the object (or the object did not
// even exist if we've gone through previous case).
// This is typical of sparse file and we need to complete with 0s.
unsigned int lenOfZeros = data->m_expectedBytes-rc;
unsigned int existingDataToZero = std::min(data->m_bl->length()-rc, lenOfZeros);
if (existingDataToZero > 0) {
data->m_bl->zero(rc, existingDataToZero);
}
if (lenOfZeros > existingDataToZero) {
ceph::bufferptr zeros(ceph::buffer::create(lenOfZeros-existingDataToZero));
zeros.zero();
data->m_bl->push_back(zeros);
}
nread = data->m_expectedBytes;
}
auto multi_aio_comp = data->m_multiAioCompl;
multi_aio_comp->complete_request(nread);
multi_aio_comp->safe_request(rc);
}
int libradosstriper::RadosStriperImpl::aio_read(const std::string& soid,
librados::AioCompletionImpl *c,
bufferlist* bl,
size_t len,
uint64_t off)
{
// open the object. This will retrieve its layout and size
// and take a shared lock on it
ceph_file_layout layout;
uint64_t size;
std::string lockCookie;
int rc = openStripedObjectForRead(soid, &layout, &size, &lockCookie);
if (rc) return rc;
// find out the actual number of bytes we can read
uint64_t read_len;
if (off >= size) {
// nothing to read ! We are done.
read_len = 0;
} else {
read_len = std::min(len, (size_t)(size-off));
}
// get list of extents to be read from
vector<ObjectExtent> *extents = new vector<ObjectExtent>();
if (read_len > 0) {
std::string format = soid;
boost::replace_all(format, "%", "%%");
format += RADOS_OBJECT_EXTENSION_FORMAT;
file_layout_t l;
l.from_legacy(layout);
Striper::file_to_extents(cct(), format.c_str(), &l, off, read_len,
0, *extents);
}
// create a completion object and transfer ownership of extents and resultbl
vector<bufferlist> *resultbl = new vector<bufferlist>(extents->size());
auto cdata = ceph::make_ref<ReadCompletionData>(this, soid, lockCookie, c, bl, extents, resultbl);
c->is_read = true;
c->io = m_ioCtxImpl;
// create a completion for the unlocking of the striped object at the end of the read
librados::AioCompletion *unlock_completion =
librados::Rados::aio_create_completion(cdata->get() /* create ref! */, rados_read_aio_unlock_complete);
cdata->m_unlockCompletion = unlock_completion;
// create the multiCompletion object handling the reads
MultiAioCompletionImplPtr nc{new libradosstriper::MultiAioCompletionImpl,
false};
nc->set_complete_callback(cdata.get(), striper_read_aio_req_complete);
// go through the extents
int r = 0, i = 0;
for (vector<ObjectExtent>::iterator p = extents->begin(); p != extents->end(); ++p) {
// create a buffer list describing where to place data read from current extend
bufferlist *oid_bl = &((*resultbl)[i++]);
for (vector<pair<uint64_t,uint64_t> >::iterator q = p->buffer_extents.begin();
q != p->buffer_extents.end();
++q) {
bufferlist buffer_bl;
buffer_bl.substr_of(*bl, q->first, q->second);
oid_bl->append(buffer_bl);
}
// read all extends of a given object in one go
nc->add_request();
// we need 2 references on data as both rados_req_read_safe and rados_req_read_complete
// will release one
auto data = ceph::make_ref<RadosReadCompletionData>(nc, p->length, oid_bl, cct());
librados::AioCompletion *rados_completion =
librados::Rados::aio_create_completion(data.detach(), rados_req_read_complete);
r = m_ioCtx.aio_read(p->oid.name, rados_completion, oid_bl, p->length, p->offset);
rados_completion->release();
if (r < 0)
break;
}
nc->finish_adding_requests();
return r;
}
int libradosstriper::RadosStriperImpl::aio_read(const std::string& soid,
librados::AioCompletionImpl *c,
char* buf,
size_t len,
uint64_t off)
{
// create a buffer list and store it inside the completion object
c->bl.clear();
c->bl.push_back(buffer::create_static(len, buf));
// call the bufferlist version of this method
return aio_read(soid, c, &c->bl, len, off);
}
int libradosstriper::RadosStriperImpl::aio_flush()
{
int ret;
// pass to the rados level
ret = m_ioCtx.aio_flush();
if (ret < 0)
return ret;
//wait all CompletionData are released
std::unique_lock l{lock};
cond.wait(l, [this] {return m_refCnt <= 1;});
return ret;
}
///////////////////////// stat and deletion /////////////////////////////
int libradosstriper::RadosStriperImpl::stat(const std::string& soid, uint64_t *psize, time_t *pmtime)
{
// create a completion object
librados::AioCompletionImpl c;
// call asynchronous version of stat
int rc = aio_stat(soid, &c, psize, pmtime);
if (rc == 0) {
// wait for completion of the remove
c.wait_for_complete();
// get result
rc = c.get_return_value();
}
return rc;
}
static void striper_stat_aio_stat_complete(rados_completion_t c, void *arg) {
auto data = ceph::ref_t<BasicStatCompletionData>(static_cast<BasicStatCompletionData*>(arg), false);
int rc = rados_aio_get_return_value(c);
if (rc == -ENOENT) {
// remember this has failed
data->m_statRC = rc;
}
data->m_multiCompletion->complete_request(rc);
}
static void striper_stat_aio_getxattr_complete(rados_completion_t c, void *arg) {
auto data = ceph::ref_t<BasicStatCompletionData>(static_cast<BasicStatCompletionData*>(arg), false);
int rc = rados_aio_get_return_value(c);
// We need to handle the case of sparse files here
if (rc < 0) {
// remember this has failed
data->m_getxattrRC = rc;
} else {
// this intermediate string allows to add a null terminator before calling strtol
std::string err;
std::string strsize(data->m_bl.c_str(), data->m_bl.length());
*data->m_psize = strict_strtoll(strsize.c_str(), 10, &err);
if (!err.empty()) {
lderr(data->m_striper->cct()) << XATTR_SIZE << " : " << err << dendl;
data->m_getxattrRC = -EINVAL;
}
rc = 0;
}
data->m_multiCompletion->complete_request(rc);
}
static void striper_stat_aio_req_complete(rados_striper_multi_completion_t c,
void *arg) {
auto data = ceph::ref_t<BasicStatCompletionData>(static_cast<BasicStatCompletionData*>(arg), false);
if (data->m_statRC) {
data->complete(data->m_statRC);
} else {
if (data->m_getxattrRC < 0) {
data->complete(data->m_getxattrRC);
} else {
data->complete(0);
}
}
}
template<class TimeType>
int libradosstriper::RadosStriperImpl::aio_generic_stat
(const std::string& soid,
librados::AioCompletionImpl *c,
uint64_t *psize,
TimeType *pmtime,
typename libradosstriper::RadosStriperImpl::StatFunction<TimeType>::Type statFunction)
{
// use a MultiAioCompletion object for dealing with the fact
// that we'll do 2 asynchronous calls in parallel
MultiAioCompletionImplPtr multi_completion{
new libradosstriper::MultiAioCompletionImpl, false};
// Data object used for passing context to asynchronous calls
std::string firstObjOid = getObjectId(soid, 0);
auto cdata = ceph::make_ref<StatCompletionData<TimeType>>(this, firstObjOid, c, multi_completion.get(), psize, pmtime);
multi_completion->set_complete_callback(cdata->get() /* create ref! */, striper_stat_aio_req_complete);
// use a regular AioCompletion for the stat async call
librados::AioCompletion *stat_completion =
librados::Rados::aio_create_completion(cdata->get() /* create ref! */, striper_stat_aio_stat_complete);
multi_completion->add_safe_request();
object_t obj(firstObjOid);
int rc = (m_ioCtxImpl->*statFunction)(obj, stat_completion->pc,
&cdata->m_objectSize, cdata->m_pmtime);
stat_completion->release();
if (rc < 0) {
// nothing is really started so cancel everything
delete cdata.detach();
return rc;
}
// use a regular AioCompletion for the getxattr async call
librados::AioCompletion *getxattr_completion =
librados::Rados::aio_create_completion(cdata->get() /* create ref! */, striper_stat_aio_getxattr_complete);
multi_completion->add_safe_request();
// in parallel, get the pmsize from the first object asynchronously
rc = m_ioCtxImpl->aio_getxattr(obj, getxattr_completion->pc,
XATTR_SIZE, cdata->m_bl);
getxattr_completion->release();
multi_completion->finish_adding_requests();
if (rc < 0) {
// the async stat is ongoing, so we need to go on
// we mark the getxattr as failed in the data object
cdata->m_getxattrRC = rc;
multi_completion->complete_request(rc);
return rc;
}
return 0;
}
int libradosstriper::RadosStriperImpl::aio_stat(const std::string& soid,
librados::AioCompletionImpl *c,
uint64_t *psize,
time_t *pmtime)
{
return aio_generic_stat<time_t>(soid, c, psize, pmtime, &librados::IoCtxImpl::aio_stat);
}
int libradosstriper::RadosStriperImpl::stat2(const std::string& soid, uint64_t *psize, struct timespec *pts)
{
// create a completion object
librados::AioCompletionImpl c;
// call asynchronous version of stat
int rc = aio_stat2(soid, &c, psize, pts);
if (rc == 0) {
// wait for completion of the remove
c.wait_for_complete_and_cb();
// get result
rc = c.get_return_value();
}
return rc;
}
int libradosstriper::RadosStriperImpl::aio_stat2(const std::string& soid,
librados::AioCompletionImpl *c,
uint64_t *psize,
struct timespec *pts)
{
return aio_generic_stat<struct timespec>(soid, c, psize, pts, &librados::IoCtxImpl::aio_stat2);
}
static void rados_req_remove_complete(rados_completion_t c, void *arg)
{
auto cdata = static_cast<RadosRemoveCompletionData*>(arg);
int rc = rados_aio_get_return_value(c);
// in case the object did not exist, it means we had a sparse file, all is fine
if (rc == -ENOENT) {
rc = 0;
}
cdata->m_multiAioCompl->complete_request(rc);
cdata->m_multiAioCompl->safe_request(rc);
}
static void striper_remove_aio_req_complete(rados_striper_multi_completion_t c, void *arg)
{
auto cdata = ceph::ref_t<RemoveCompletionData>(static_cast<RemoveCompletionData*>(arg), false);
libradosstriper::MultiAioCompletionImpl *comp =
reinterpret_cast<libradosstriper::MultiAioCompletionImpl*>(c);
ldout(cdata->m_striper->cct(), 10)
<< "RadosStriperImpl : striper_remove_aio_req_complete called for "
<< cdata->m_soid << dendl;
int rc = comp->rval;
if (rc == 0) {
// All went fine, synchronously remove first object
rc = cdata->m_striper->m_ioCtx.remove(cdata->m_striper->getObjectId(cdata->m_soid, 0),
cdata->flags);
} else {
lderr(cdata->m_striper->cct())
<< "RadosStriperImpl : deletion/truncation incomplete for " << cdata->m_soid
<< ", as errors were encountered. The file is left present but it's content "
<< " has been partially removed"
<< dendl;
}
cdata->complete(rc);
}
int libradosstriper::RadosStriperImpl::remove(const std::string& soid, int flags)
{
// create a completion object
librados::AioCompletionImpl c;
// call asynchronous version of remove
int rc = aio_remove(soid, &c, flags);
if (rc == 0) {
// wait for completion of the remove
c.wait_for_complete_and_cb();
// get result
rc = c.get_return_value();
}
return rc;
}
int libradosstriper::RadosStriperImpl::aio_remove(const std::string& soid,
librados::AioCompletionImpl *c,
int flags)
{
// the RemoveCompletionData object will lock the given soid for the duration
// of the removal
std::string lockCookie = getUUID();
int rc = m_ioCtx.lock_exclusive(getObjectId(soid, 0), RADOS_LOCK_NAME, lockCookie, "", 0, 0);
if (rc) return rc;
// create CompletionData for the async remove call
auto cdata = ceph::make_ref<RemoveCompletionData>(this, soid, lockCookie, c, flags);
MultiAioCompletionImplPtr multi_completion{
new libradosstriper::MultiAioCompletionImpl, false};
multi_completion->set_complete_callback(cdata->get() /* create ref! */, striper_remove_aio_req_complete);
// call asynchronous internal version of remove
ldout(cct(), 10)
<< "RadosStriperImpl : Aio_remove starting for "
<< soid << dendl;
rc = internal_aio_remove(soid, multi_completion);
return rc;
}
int libradosstriper::RadosStriperImpl::internal_aio_remove(
const std::string& soid,
MultiAioCompletionImplPtr multi_completion,
int flags)
{
std::string firstObjOid = getObjectId(soid, 0);
try {
// check size and get number of rados objects to delete
uint64_t nb_objects = 0;
bufferlist bl2;
int rc = getxattr(soid, XATTR_SIZE, bl2);
if (rc < 0) {
// no object size (or not able to get it)
// try to find the number of object "by hand"
uint64_t psize;
time_t pmtime;
while (!m_ioCtx.stat(getObjectId(soid, nb_objects), &psize, &pmtime)) {
nb_objects++;
}
} else {
// count total number of rados objects in the striped object
std::string err;
// this intermediate string allows to add a null terminator before calling strtol
std::string strsize(bl2.c_str(), bl2.length());
uint64_t size = strict_strtoll(strsize.c_str(), 10, &err);
if (!err.empty()) {
lderr(cct()) << XATTR_SIZE << " : " << err << dendl;
return -EINVAL;
}
uint64_t object_size = m_layout.fl_object_size;
uint64_t su = m_layout.fl_stripe_unit;
uint64_t stripe_count = m_layout.fl_stripe_count;
uint64_t nb_complete_sets = size / (object_size*stripe_count);
uint64_t remaining_data = size % (object_size*stripe_count);
uint64_t remaining_stripe_units = (remaining_data + su -1) / su;
uint64_t remaining_objects = std::min(remaining_stripe_units, stripe_count);
nb_objects = nb_complete_sets * stripe_count + remaining_objects;
}
// delete rados objects in reverse order
// Note that we do not drop the first object. This one will only be dropped
// if all other removals have been successful, and this is done in the
// callback of the multi_completion object
int rcr = 0;
for (int i = nb_objects-1; i >= 1; i--) {
multi_completion->add_request();
auto data = ceph::make_ref<RadosRemoveCompletionData>(multi_completion, cct());
librados::AioCompletion *rados_completion =
librados::Rados::aio_create_completion(data->get() /* create ref! */,
rados_req_remove_complete);
if (flags == 0) {
rcr = m_ioCtx.aio_remove(getObjectId(soid, i), rados_completion);
} else {
rcr = m_ioCtx.aio_remove(getObjectId(soid, i), rados_completion, flags);
}
rados_completion->release();
if (rcr < 0 and -ENOENT != rcr) {
lderr(cct()) << "RadosStriperImpl::remove : deletion incomplete for " << soid
<< ", as " << getObjectId(soid, i) << " could not be deleted (rc=" << rc << ")"
<< dendl;
break;
}
}
// we are over adding requests to the multi_completion object
multi_completion->finish_adding_requests();
// return
return rcr;
} catch (ErrorCode &e) {
// error caught when trying to take the exclusive lock
return e.m_code;
}
}
int libradosstriper::RadosStriperImpl::trunc(const std::string& soid, uint64_t size)
{
// lock the object in exclusive mode
std::string firstObjOid = getObjectId(soid, 0);
librados::ObjectWriteOperation op;
op.assert_exists();
std::string lockCookie = RadosStriperImpl::getUUID();
utime_t dur = utime_t();
rados::cls::lock::lock(&op, RADOS_LOCK_NAME, ClsLockType::EXCLUSIVE, lockCookie, "", "", dur, 0);
int rc = m_ioCtx.operate(firstObjOid, &op);
if (rc) return rc;
// load layout and size
ceph_file_layout layout;
uint64_t original_size;
rc = internal_get_layout_and_size(firstObjOid, &layout, &original_size);
if (!rc) {
if (size < original_size) {
rc = truncate(soid, original_size, size, layout);
} else if (size > original_size) {
rc = grow(soid, original_size, size, layout);
}
}
// unlock object, ignore return code as we cannot do much
m_ioCtx.unlock(firstObjOid, RADOS_LOCK_NAME, lockCookie);
// final return
return rc;
}
///////////////////////// private helpers /////////////////////////////
std::string libradosstriper::RadosStriperImpl::getObjectId(const object_t& soid,
long long unsigned objectno)
{
std::ostringstream s;
s << soid << '.' << std::setfill ('0') << std::setw(16) << std::hex << objectno;
return s.str();
}
void libradosstriper::RadosStriperImpl::unlockObject(const std::string& soid,
const std::string& lockCookie)
{
// unlock the shared lock on the first rados object
std::string firstObjOid = getObjectId(soid, 0);
m_ioCtx.unlock(firstObjOid, RADOS_LOCK_NAME, lockCookie);
}
void libradosstriper::RadosStriperImpl::aio_unlockObject(const std::string& soid,
const std::string& lockCookie,
librados::AioCompletion *c)
{
// unlock the shared lock on the first rados object
std::string firstObjOid = getObjectId(soid, 0);
m_ioCtx.aio_unlock(firstObjOid, RADOS_LOCK_NAME, lockCookie, c);
}
static void rados_write_aio_unlock_complete(rados_striper_multi_completion_t c, void *arg)
{
auto cdata = ceph::ref_t<WriteCompletionData>(static_cast<WriteCompletionData*>(arg), false);
libradosstriper::MultiAioCompletionImpl *comp =
reinterpret_cast<libradosstriper::MultiAioCompletionImpl*>(c);
cdata->complete_unlock(comp->rval);
}
static void striper_write_aio_req_complete(rados_striper_multi_completion_t c, void *arg)
{
auto cdata = ceph::ref_t<WriteCompletionData>(static_cast<WriteCompletionData*>(arg), false);
// launch the async unlocking of the object
cdata->m_striper->aio_unlockObject(cdata->m_soid, cdata->m_lockCookie, cdata->m_unlockCompletion);
// complete the write part in parallel
libradosstriper::MultiAioCompletionImpl *comp =
reinterpret_cast<libradosstriper::MultiAioCompletionImpl*>(c);
cdata->complete_write(comp->rval);
}
static void striper_write_aio_req_safe(rados_striper_multi_completion_t c, void *arg)
{
auto cdata = ceph::ref_t<WriteCompletionData>(static_cast<WriteCompletionData*>(arg), false);
libradosstriper::MultiAioCompletionImpl *comp =
reinterpret_cast<libradosstriper::MultiAioCompletionImpl*>(c);
cdata->safe(comp->rval);
}
int libradosstriper::RadosStriperImpl::write_in_open_object(const std::string& soid,
const ceph_file_layout& layout,
const std::string& lockCookie,
const bufferlist& bl,
size_t len,
uint64_t off) {
// create a completion object to be passed to the callbacks of the multicompletion
// we need 3 references as striper_write_aio_req_complete will release two and
// striper_write_aio_req_safe will release one
auto cdata = ceph::make_ref<WriteCompletionData>(this, soid, lockCookie, nullptr);
// create a completion object for the unlocking of the striped object at the end of the write
librados::AioCompletion *unlock_completion =
librados::Rados::aio_create_completion(cdata->get() /* create ref! */, rados_write_aio_unlock_complete);
cdata->m_unlockCompletion = unlock_completion;
// create the multicompletion that will handle the write completion
MultiAioCompletionImplPtr c{new libradosstriper::MultiAioCompletionImpl,
false};
c->set_complete_callback(cdata->get() /* create ref! */, striper_write_aio_req_complete);
c->set_safe_callback(cdata->get() /* create ref! */, striper_write_aio_req_safe);
// call the asynchronous API
int rc = internal_aio_write(soid, c, bl, len, off, layout);
if (!rc) {
// wait for completion and safety of data
c->wait_for_complete_and_cb();
c->wait_for_safe_and_cb();
// wait for the unlocking
unlock_completion->wait_for_complete();
// return result
rc = c->get_return_value();
}
return rc;
}
int libradosstriper::RadosStriperImpl::aio_write_in_open_object(const std::string& soid,
librados::AioCompletionImpl *c,
const ceph_file_layout& layout,
const std::string& lockCookie,
const bufferlist& bl,
size_t len,
uint64_t off) {
// create a completion object to be passed to the callbacks of the multicompletion
// we need 3 references as striper_write_aio_req_complete will release two and
// striper_write_aio_req_safe will release one
auto cdata = ceph::make_ref<WriteCompletionData>(this, soid, lockCookie, c);
m_ioCtxImpl->get();
c->io = m_ioCtxImpl;
// create a completion object for the unlocking of the striped object at the end of the write
librados::AioCompletion *unlock_completion =
librados::Rados::aio_create_completion(cdata->get() /* create ref! */, rados_write_aio_unlock_complete);
cdata->m_unlockCompletion = unlock_completion;
// create the multicompletion that will handle the write completion
libradosstriper::MultiAioCompletionImplPtr nc{
new libradosstriper::MultiAioCompletionImpl, false};
nc->set_complete_callback(cdata->get() /* create ref! */, striper_write_aio_req_complete);
nc->set_safe_callback(cdata->get() /* create ref! */, striper_write_aio_req_safe);
// internal asynchronous API
int rc = internal_aio_write(soid, nc, bl, len, off, layout);
return rc;
}
static void rados_req_write_complete(rados_completion_t c, void *arg)
{
auto comp = reinterpret_cast<libradosstriper::MultiAioCompletionImpl*>(arg);
comp->complete_request(rados_aio_get_return_value(c));
comp->safe_request(rados_aio_get_return_value(c));
}
int
libradosstriper::RadosStriperImpl::internal_aio_write(const std::string& soid,
libradosstriper::MultiAioCompletionImplPtr c,
const bufferlist& bl,
size_t len,
uint64_t off,
const ceph_file_layout& layout)
{
int r = 0;
// Do not try anything if we are called with empty buffer,
// file_to_extents would raise an exception
if (len > 0) {
// get list of extents to be written to
vector<ObjectExtent> extents;
std::string format = soid;
boost::replace_all(format, "%", "%%");
format += RADOS_OBJECT_EXTENSION_FORMAT;
file_layout_t l;
l.from_legacy(layout);
Striper::file_to_extents(cct(), format.c_str(), &l, off, len, 0, extents);
// go through the extents
for (vector<ObjectExtent>::iterator p = extents.begin(); p != extents.end(); ++p) {
// assemble pieces of a given object into a single buffer list
bufferlist oid_bl;
for (vector<pair<uint64_t,uint64_t> >::iterator q = p->buffer_extents.begin();
q != p->buffer_extents.end();
++q) {
bufferlist buffer_bl;
buffer_bl.substr_of(bl, q->first, q->second);
oid_bl.append(buffer_bl);
}
// and write the object
c->add_request();
librados::AioCompletion *rados_completion =
librados::Rados::aio_create_completion(c.get(),
rados_req_write_complete);
r = m_ioCtx.aio_write(p->oid.name, rados_completion, oid_bl,
p->length, p->offset);
rados_completion->release();
if (r < 0)
break;
}
}
c->finish_adding_requests();
return r;
}
int libradosstriper::RadosStriperImpl::extract_uint32_attr
(std::map<std::string, bufferlist> &attrs,
const std::string& key,
ceph_le32 *value)
{
std::map<std::string, bufferlist>::iterator attrsIt = attrs.find(key);
if (attrsIt != attrs.end()) {
// this intermediate string allows to add a null terminator before calling strtol
std::string strvalue(attrsIt->second.c_str(), attrsIt->second.length());
std::string err;
*value = strict_strtol(strvalue.c_str(), 10, &err);
if (!err.empty()) {
lderr(cct()) << key << " : " << err << dendl;
return -EINVAL;
}
} else {
return -ENOENT;
}
return 0;
}
int libradosstriper::RadosStriperImpl::extract_sizet_attr
(std::map<std::string, bufferlist> &attrs,
const std::string& key,
size_t *value)
{
std::map<std::string, bufferlist>::iterator attrsIt = attrs.find(key);
if (attrsIt != attrs.end()) {
// this intermediate string allows to add a null terminator before calling strtol
std::string strvalue(attrsIt->second.c_str(), attrsIt->second.length());
std::string err;
*value = strict_strtoll(strvalue.c_str(), 10, &err);
if (!err.empty()) {
lderr(cct()) << key << " : " << err << dendl;
return -EINVAL;
}
} else {
return -ENOENT;
}
return 0;
}
int libradosstriper::RadosStriperImpl::internal_get_layout_and_size(
const std::string& oid,
ceph_file_layout *layout,
uint64_t *size)
{
// get external attributes of the first rados object
std::map<std::string, bufferlist> attrs;
int rc = m_ioCtx.getxattrs(oid, attrs);
if (rc) return rc;
// deal with stripe_unit
rc = extract_uint32_attr(attrs, XATTR_LAYOUT_STRIPE_UNIT, &layout->fl_stripe_unit);
if (rc) return rc;
// deal with stripe_count
rc = extract_uint32_attr(attrs, XATTR_LAYOUT_STRIPE_COUNT, &layout->fl_stripe_count);
if (rc) return rc;
// deal with object_size
rc = extract_uint32_attr(attrs, XATTR_LAYOUT_OBJECT_SIZE, &layout->fl_object_size);
if (rc) return rc;
// deal with size
size_t ssize;
rc = extract_sizet_attr(attrs, XATTR_SIZE, &ssize);
if (rc) {
return rc;
}
*size = ssize;
// make valgrind happy by setting unused fl_pg_pool
layout->fl_pg_pool = 0;
return 0;
}
int libradosstriper::RadosStriperImpl::openStripedObjectForRead(
const std::string& soid,
ceph_file_layout *layout,
uint64_t *size,
std::string *lockCookie)
{
// take a lock the first rados object, if it exists and gets its size
// check, lock and size reading must be atomic and are thus done within a single operation
librados::ObjectWriteOperation op;
op.assert_exists();
*lockCookie = getUUID();
utime_t dur = utime_t();
rados::cls::lock::lock(&op, RADOS_LOCK_NAME, ClsLockType::SHARED, *lockCookie, "Tag", "", dur, 0);
std::string firstObjOid = getObjectId(soid, 0);
int rc = m_ioCtx.operate(firstObjOid, &op);
if (rc) {
// error case (including -ENOENT)
return rc;
}
rc = internal_get_layout_and_size(firstObjOid, layout, size);
if (rc) {
unlockObject(soid, *lockCookie);
lderr(cct()) << "RadosStriperImpl::openStripedObjectForRead : "
<< "could not load layout and size for "
<< soid << " : rc = " << rc << dendl;
}
return rc;
}
int libradosstriper::RadosStriperImpl::openStripedObjectForWrite(const std::string& soid,
ceph_file_layout *layout,
uint64_t *size,
std::string *lockCookie,
bool isFileSizeAbsolute)
{
// take a lock the first rados object, if it exists
// check and lock must be atomic and are thus done within a single operation
librados::ObjectWriteOperation op;
op.assert_exists();
*lockCookie = getUUID();
utime_t dur = utime_t();
rados::cls::lock::lock(&op, RADOS_LOCK_NAME, ClsLockType::SHARED, *lockCookie, "Tag", "", dur, 0);
std::string firstObjOid = getObjectId(soid, 0);
int rc = m_ioCtx.operate(firstObjOid, &op);
if (rc) {
if (rc == -ENOENT) {
// object does not exist, delegate to createEmptyStripedObject
int rc = createAndOpenStripedObject(soid, layout, *size, lockCookie, isFileSizeAbsolute);
// return original size
*size = 0;
return rc;
} else {
return rc;
}
}
// all fine
uint64_t curSize;
rc = internal_get_layout_and_size(firstObjOid, layout, &curSize);
if (rc) {
unlockObject(soid, *lockCookie);
lderr(cct()) << "RadosStriperImpl::openStripedObjectForWrite : "
<< "could not load layout and size for "
<< soid << " : rc = " << rc << dendl;
return rc;
}
// atomically update object size, only if smaller than current one
if (!isFileSizeAbsolute)
*size += curSize;
librados::ObjectWriteOperation writeOp;
writeOp.cmpxattr(XATTR_SIZE, LIBRADOS_CMPXATTR_OP_GT, *size);
std::ostringstream oss;
oss << *size;
bufferlist bl;
bl.append(oss.str());
writeOp.setxattr(XATTR_SIZE, bl);
rc = m_ioCtx.operate(firstObjOid, &writeOp);
// return current size
*size = curSize;
// handle case where objectsize is already bigger than size
if (-ECANCELED == rc)
rc = 0;
if (rc) {
unlockObject(soid, *lockCookie);
lderr(cct()) << "RadosStriperImpl::openStripedObjectForWrite : "
<< "could not set new size for "
<< soid << " : rc = " << rc << dendl;
}
return rc;
}
int libradosstriper::RadosStriperImpl::createAndOpenStripedObject(const std::string& soid,
ceph_file_layout *layout,
uint64_t size,
std::string *lockCookie,
bool isFileSizeAbsolute)
{
// build atomic write operation
librados::ObjectWriteOperation writeOp;
writeOp.create(true);
// object_size
std::ostringstream oss_object_size;
oss_object_size << m_layout.fl_object_size;
bufferlist bl_object_size;
bl_object_size.append(oss_object_size.str());
writeOp.setxattr(XATTR_LAYOUT_OBJECT_SIZE, bl_object_size);
// stripe unit
std::ostringstream oss_stripe_unit;
oss_stripe_unit << m_layout.fl_stripe_unit;
bufferlist bl_stripe_unit;
bl_stripe_unit.append(oss_stripe_unit.str());
writeOp.setxattr(XATTR_LAYOUT_STRIPE_UNIT, bl_stripe_unit);
// stripe count
std::ostringstream oss_stripe_count;
oss_stripe_count << m_layout.fl_stripe_count;
bufferlist bl_stripe_count;
bl_stripe_count.append(oss_stripe_count.str());
writeOp.setxattr(XATTR_LAYOUT_STRIPE_COUNT, bl_stripe_count);
// size
std::ostringstream oss_size;
oss_size << (isFileSizeAbsolute?size:0);
bufferlist bl_size;
bl_size.append(oss_size.str());
writeOp.setxattr(XATTR_SIZE, bl_size);
// effectively change attributes
std::string firstObjOid = getObjectId(soid, 0);
int rc = m_ioCtx.operate(firstObjOid, &writeOp);
// in case of error (but no EEXIST which would mean the object existed), return
if (rc && -EEXIST != rc) return rc;
// Otherwise open the object
uint64_t fileSize = size;
return openStripedObjectForWrite(soid, layout, &fileSize, lockCookie, isFileSizeAbsolute);
}
static void striper_truncate_aio_req_complete(rados_striper_multi_completion_t c, void *arg)
{
auto cdata = ceph::ref_t<TruncateCompletionData>(static_cast<TruncateCompletionData*>(arg), false);
libradosstriper::MultiAioCompletionImpl *comp =
reinterpret_cast<libradosstriper::MultiAioCompletionImpl*>(c);
if (0 == comp->rval) {
// all went fine, change size in the external attributes
std::ostringstream oss;
oss << cdata->m_size;
bufferlist bl;
bl.append(oss.str());
cdata->m_striper->setxattr(cdata->m_soid, XATTR_SIZE, bl);
}
}
int libradosstriper::RadosStriperImpl::truncate(const std::string& soid,
uint64_t original_size,
uint64_t size,
ceph_file_layout &layout)
{
auto cdata = ceph::make_ref<TruncateCompletionData>(this, soid, size);
libradosstriper::MultiAioCompletionImplPtr multi_completion{
new libradosstriper::MultiAioCompletionImpl, false};
multi_completion->set_complete_callback(cdata->get() /* create ref! */, striper_truncate_aio_req_complete);
// call asynchrous version of truncate
int rc = aio_truncate(soid, multi_completion, original_size, size, layout);
// wait for completion of the truncation
multi_completion->finish_adding_requests();
multi_completion->wait_for_complete_and_cb();
// return result
if (rc == 0) {
rc = multi_completion->get_return_value();
}
return rc;
}
int libradosstriper::RadosStriperImpl::aio_truncate
(const std::string& soid,
libradosstriper::MultiAioCompletionImplPtr multi_completion,
uint64_t original_size,
uint64_t size,
ceph_file_layout &layout)
{
// handle the underlying rados objects. 3 cases here :
// -- the objects belonging to object sets entirely located
// before the truncation are unchanged
// -- the objects belonging to the object set where the
// truncation took place are truncated or removed
// -- the objects belonging to object sets entirely located
// after the truncation are removed
// Note that we do it backward and that we change the size in
// the external attributes only at the end. This make sure that
// no rados object stays behind if we remove the striped object
// after a truncation has failed
uint64_t trunc_objectsetno = size / layout.fl_object_size / layout.fl_stripe_count;
uint64_t last_objectsetno = original_size / layout.fl_object_size / layout.fl_stripe_count;
bool exists = false;
for (int64_t objectno = (last_objectsetno+1) * layout.fl_stripe_count-1;
objectno >= (int64_t)((trunc_objectsetno + 1) * layout.fl_stripe_count);
objectno--) {
// if no object existed so far, check object existence
if (!exists) {
uint64_t nb_full_object_set = objectno / layout.fl_stripe_count;
uint64_t object_index_in_set = objectno % layout.fl_stripe_count;
uint64_t set_start_off = nb_full_object_set * layout.fl_object_size * layout.fl_stripe_count;
uint64_t object_start_off = set_start_off + object_index_in_set * layout.fl_stripe_unit;
exists = (original_size > object_start_off);
}
if (exists) {
// remove asynchronously
multi_completion->add_request();
auto data = ceph::make_ref<RadosRemoveCompletionData>(multi_completion, cct());
librados::AioCompletion *rados_completion =
librados::Rados::aio_create_completion(data->get() /* create ref! */,
rados_req_remove_complete);
int rc = m_ioCtx.aio_remove(getObjectId(soid, objectno), rados_completion);
rados_completion->release();
// in case the object did not exist, it means we had a sparse file, all is fine
if (rc && rc != -ENOENT) return rc;
}
}
for (int64_t objectno = ((trunc_objectsetno + 1) * layout.fl_stripe_count) -1;
objectno >= (int64_t)(trunc_objectsetno * layout.fl_stripe_count);
objectno--) {
// if no object existed so far, check object existence
if (!exists) {
uint64_t object_start_off = ((objectno / layout.fl_stripe_count) * layout.fl_object_size) +
((objectno % layout.fl_stripe_count) * layout.fl_stripe_unit);
exists = (original_size > object_start_off);
}
if (exists) {
// truncate
file_layout_t l;
l.from_legacy(layout);
uint64_t new_object_size = Striper::object_truncate_size(cct(), &l, objectno, size);
int rc;
if (new_object_size > 0 or 0 == objectno) {
// trunc is synchronous as there is no async version
// but note that only a single object will be truncated
// reducing the overload to a fixed amount
rc = m_ioCtx.trunc(getObjectId(soid, objectno), new_object_size);
} else {
// removes are asynchronous in order to speed up truncations of big files
multi_completion->add_request();
auto data = ceph::make_ref<RadosRemoveCompletionData>(multi_completion, cct());
librados::AioCompletion *rados_completion =
librados::Rados::aio_create_completion(data->get() /* create ref! */,
rados_req_remove_complete);
rc = m_ioCtx.aio_remove(getObjectId(soid, objectno), rados_completion);
rados_completion->release();
}
// in case the object did not exist, it means we had a sparse file, all is fine
if (rc && rc != -ENOENT) return rc;
}
}
return 0;
}
int libradosstriper::RadosStriperImpl::grow(const std::string& soid,
uint64_t original_size,
uint64_t size,
ceph_file_layout &layout)
{
// handle the underlying rados objects. As we support sparse objects,
// we only have to change the size in the external attributes
std::ostringstream oss;
oss << size;
bufferlist bl;
bl.append(oss.str());
int rc = m_ioCtx.setxattr(getObjectId(soid, 0), XATTR_SIZE, bl);
return rc;
}
std::string libradosstriper::RadosStriperImpl::getUUID()
{
struct uuid_d uuid;
uuid.generate_random();
char suuid[37];
uuid.print(suuid);
return std::string(suuid);
}
| 58,551 | 35.345127 | 121 | cc |
null | ceph-main/src/libradosstriper/RadosStriperImpl.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2014 Sebastien Ponce <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#ifndef CEPH_LIBRADOSSTRIPER_RADOSSTRIPERIMPL_H
#define CEPH_LIBRADOSSTRIPER_RADOSSTRIPERIMPL_H
#include <string>
#include <boost/intrusive_ptr.hpp>
#include "include/rados/librados.h"
#include "include/rados/librados.hpp"
#include "include/radosstriper/libradosstriper.h"
#include "include/radosstriper/libradosstriper.hpp"
#include "MultiAioCompletionImpl.h"
#include "librados/IoCtxImpl.h"
#include "librados/AioCompletionImpl.h"
#include "common/RefCountedObj.h"
#include "common/ceph_context.h"
namespace libradosstriper {
using MultiAioCompletionImplPtr =
boost::intrusive_ptr<MultiAioCompletionImpl>;
struct RadosStriperImpl {
/**
* exception wrapper around an error code
*/
struct ErrorCode {
ErrorCode(int error) : m_code(error) {};
int m_code;
};
/*
* Constructor
* @param cluster_name name of the cluster, can be NULL
* @param client_name has 2 meanings depending on cluster_name
* - if cluster_name is null : this is the client id
* - else : this is the full client name in format type.id
*/
RadosStriperImpl(librados::IoCtx& ioctx, librados::IoCtxImpl *ioctx_impl);
/// Destructor
~RadosStriperImpl() {};
// configuration
int setObjectLayoutStripeUnit(unsigned int stripe_unit);
int setObjectLayoutStripeCount(unsigned int stripe_count);
int setObjectLayoutObjectSize(unsigned int object_size);
// xattrs
int getxattr(const object_t& soid, const char *name, bufferlist& bl);
int setxattr(const object_t& soid, const char *name, bufferlist& bl);
int getxattrs(const object_t& soid, std::map<std::string, bufferlist>& attrset);
int rmxattr(const object_t& soid, const char *name);
// io
int write(const std::string& soid, const bufferlist& bl, size_t len, uint64_t off);
int append(const std::string& soid, const bufferlist& bl, size_t len);
int write_full(const std::string& soid, const bufferlist& bl);
int read(const std::string& soid, bufferlist* pbl, size_t len, uint64_t off);
// asynchronous io
int aio_write(const std::string& soid, librados::AioCompletionImpl *c,
const bufferlist& bl, size_t len, uint64_t off);
int aio_append(const std::string& soid, librados::AioCompletionImpl *c,
const bufferlist& bl, size_t len);
int aio_write_full(const std::string& soid, librados::AioCompletionImpl *c,
const bufferlist& bl);
int aio_read(const std::string& soid, librados::AioCompletionImpl *c,
bufferlist* pbl, size_t len, uint64_t off);
int aio_read(const std::string& soid, librados::AioCompletionImpl *c,
char* buf, size_t len, uint64_t off);
int aio_flush();
// stat, deletion and truncation
int stat(const std::string& soid, uint64_t *psize, time_t *pmtime);
int stat2(const std::string& soid, uint64_t *psize, struct timespec *pts);
template<class TimeType>
struct StatFunction {
typedef int (librados::IoCtxImpl::*Type) (const object_t& oid,
librados::AioCompletionImpl *c,
uint64_t *psize, TimeType *pmtime);
};
template<class TimeType>
int aio_generic_stat(const std::string& soid, librados::AioCompletionImpl *c,
uint64_t *psize, TimeType *pmtime,
typename StatFunction<TimeType>::Type statFunction);
int aio_stat(const std::string& soid, librados::AioCompletionImpl *c,
uint64_t *psize, time_t *pmtime);
int aio_stat2(const std::string& soid, librados::AioCompletionImpl *c,
uint64_t *psize, struct timespec *pts);
int remove(const std::string& soid, int flags=0);
int trunc(const std::string& soid, uint64_t size);
// asynchronous remove. Note that the removal is not 100% parallelized :
// the removal of the first rados object of the striped object will be
// done via a syncrhonous call after the completion of all other removals.
// These are done asynchrounously and in parallel
int aio_remove(const std::string& soid, librados::AioCompletionImpl *c, int flags=0);
// reference counting
void get() {
std::lock_guard l{lock};
m_refCnt ++ ;
}
void put() {
bool deleteme = false;
lock.lock();
m_refCnt --;
if (m_refCnt == 0)
deleteme = true;
cond.notify_all();
lock.unlock();
if (deleteme)
delete this;
}
// objectid manipulation
std::string getObjectId(const object_t& soid, long long unsigned objectno);
// opening and closing of striped objects
void unlockObject(const std::string& soid,
const std::string& lockCookie);
void aio_unlockObject(const std::string& soid,
const std::string& lockCookie,
librados::AioCompletion *c);
// internal versions of IO method
int write_in_open_object(const std::string& soid,
const ceph_file_layout& layout,
const std::string& lockCookie,
const bufferlist& bl,
size_t len,
uint64_t off);
int aio_write_in_open_object(const std::string& soid,
librados::AioCompletionImpl *c,
const ceph_file_layout& layout,
const std::string& lockCookie,
const bufferlist& bl,
size_t len,
uint64_t off);
int internal_aio_write(const std::string& soid,
MultiAioCompletionImplPtr c,
const bufferlist& bl,
size_t len,
uint64_t off,
const ceph_file_layout& layout);
int extract_uint32_attr(std::map<std::string, bufferlist> &attrs,
const std::string& key,
ceph_le32 *value);
int extract_sizet_attr(std::map<std::string, bufferlist> &attrs,
const std::string& key,
size_t *value);
int internal_get_layout_and_size(const std::string& oid,
ceph_file_layout *layout,
uint64_t *size);
int internal_aio_remove(const std::string& soid,
MultiAioCompletionImplPtr multi_completion,
int flags=0);
/**
* opens an existing striped object and takes a shared lock on it
* @return 0 if everything is ok and the lock was taken. -errcode otherwise
* In particulae, if the striped object does not exists, -ENOENT is returned
* In case the return code in not 0, no lock is taken
*/
int openStripedObjectForRead(const std::string& soid,
ceph_file_layout *layout,
uint64_t *size,
std::string *lockCookie);
/**
* opens an existing striped object, takes a shared lock on it
* and sets its size to the size it will have after the write.
* In case the striped object does not exists, it will create it by
* calling createOrOpenStripedObject.
* @param layout this is filled with the layout of the file
* @param size new size of the file (together with isFileSizeAbsolute)
* In case of success, this is filled with the size of the file before the opening
* @param isFileSizeAbsolute if false, this means that the given size should
* be added to the current file size (append mode)
* @return 0 if everything is ok and the lock was taken. -errcode otherwise
* In case the return code in not 0, no lock is taken
*/
int openStripedObjectForWrite(const std::string& soid,
ceph_file_layout *layout,
uint64_t *size,
std::string *lockCookie,
bool isFileSizeAbsolute);
/**
* creates an empty striped object with the given size and opens it calling
* openStripedObjectForWrite, which implies taking a shared lock on it
* Also deals with the cases where the object was created in the mean time
* @param isFileSizeAbsolute if false, this means that the given size should
* be added to the current file size (append mode). This of course only makes
* sense in case the striped object already exists
* @return 0 if everything is ok and the lock was taken. -errcode otherwise
* In case the return code in not 0, no lock is taken
*/
int createAndOpenStripedObject(const std::string& soid,
ceph_file_layout *layout,
uint64_t size,
std::string *lockCookie,
bool isFileSizeAbsolute);
/**
* truncates an object synchronously. Should only be called with size < original_size
*/
int truncate(const std::string& soid,
uint64_t original_size,
uint64_t size,
ceph_file_layout &layout);
/**
* truncates an object asynchronously. Should only be called with size < original_size
* note that the method is not 100% asynchronous, only the removal of rados objects
* is, the (potential) truncation of the rados object residing just at the truncation
* point is synchronous for lack of asynchronous truncation in the rados layer
*/
int aio_truncate(const std::string& soid,
MultiAioCompletionImplPtr c,
uint64_t original_size,
uint64_t size,
ceph_file_layout &layout);
/**
* grows an object (adding 0s). Should only be called with size > original_size
*/
int grow(const std::string& soid,
uint64_t original_size,
uint64_t size,
ceph_file_layout &layout);
/**
* creates a unique identifier
*/
static std::string getUUID();
CephContext *cct() {
return (CephContext*)m_radosCluster.cct();
}
// reference counting
std::condition_variable cond;
int m_refCnt;
std::mutex lock;
// Context
librados::Rados m_radosCluster;
librados::IoCtx m_ioCtx;
librados::IoCtxImpl *m_ioCtxImpl;
// Default layout
ceph_file_layout m_layout;
};
}
#endif
| 9,688 | 33.978339 | 88 | h |
null | ceph-main/src/libradosstriper/libradosstriper.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
/*
* Ceph - scalable distributed file system
*
* Copyright (C) 2014 Sebastien Ponce <[email protected]>
*
* This is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2.1, as published by the Free Software
* Foundation. See file COPYING.
*
*/
#include <errno.h>
#include "libradosstriper/RadosStriperImpl.h"
#include "libradosstriper/MultiAioCompletionImpl.h"
#include "include/types.h"
#include "include/radosstriper/libradosstriper.h"
#include "include/radosstriper/libradosstriper.hpp"
#include "librados/RadosXattrIter.h"
/*
* This file implements the rados striper API.
* There are 2 flavours of it :
* - the C API, found in include/rados/libradosstriper.h
* - the C++ API, found in include/rados/libradosstriper.hpp
*/
///////////////////////////// C++ API //////////////////////////////
libradosstriper::MultiAioCompletion::~MultiAioCompletion()
{
ceph_assert(pc->ref == 1);
pc->put();
}
int libradosstriper::MultiAioCompletion::set_complete_callback
(void *cb_arg, rados_callback_t cb)
{
MultiAioCompletionImpl *c = (MultiAioCompletionImpl *)pc;
return c->set_complete_callback(cb_arg, cb);
}
int libradosstriper::MultiAioCompletion::set_safe_callback
(void *cb_arg, rados_callback_t cb)
{
MultiAioCompletionImpl *c = (MultiAioCompletionImpl *)pc;
return c->set_safe_callback(cb_arg, cb);
}
void libradosstriper::MultiAioCompletion::wait_for_complete()
{
MultiAioCompletionImpl *c = (MultiAioCompletionImpl *)pc;
c->wait_for_complete();
}
void libradosstriper::MultiAioCompletion::wait_for_safe()
{
MultiAioCompletionImpl *c = (MultiAioCompletionImpl *)pc;
c->wait_for_safe();
}
bool libradosstriper::MultiAioCompletion::is_complete()
{
MultiAioCompletionImpl *c = (MultiAioCompletionImpl *)pc;
return c->is_complete();
}
bool libradosstriper::MultiAioCompletion::is_safe()
{
MultiAioCompletionImpl *c = (MultiAioCompletionImpl *)pc;
return c->is_safe();
}
void libradosstriper::MultiAioCompletion::wait_for_complete_and_cb()
{
MultiAioCompletionImpl *c = (MultiAioCompletionImpl *)pc;
c->wait_for_complete_and_cb();
}
void libradosstriper::MultiAioCompletion::MultiAioCompletion::wait_for_safe_and_cb()
{
MultiAioCompletionImpl *c = (MultiAioCompletionImpl *)pc;
c->wait_for_safe_and_cb();
}
bool libradosstriper::MultiAioCompletion::is_complete_and_cb()
{
MultiAioCompletionImpl *c = (MultiAioCompletionImpl *)pc;
return c->is_complete_and_cb();
}
bool libradosstriper::MultiAioCompletion::is_safe_and_cb()
{
MultiAioCompletionImpl *c = (MultiAioCompletionImpl *)pc;
return c->is_safe_and_cb();
}
int libradosstriper::MultiAioCompletion::get_return_value()
{
MultiAioCompletionImpl *c = (MultiAioCompletionImpl *)pc;
return c->get_return_value();
}
void libradosstriper::MultiAioCompletion::release()
{
MultiAioCompletionImpl *c = (MultiAioCompletionImpl *)pc;
c->put();
delete this;
}
libradosstriper::RadosStriper::RadosStriper() :
rados_striper_impl(0)
{
}
void libradosstriper::RadosStriper::to_rados_striper_t(RadosStriper &striper, rados_striper_t *s)
{
*s = (rados_striper_t)striper.rados_striper_impl;
striper.rados_striper_impl->get();
}
libradosstriper::RadosStriper::RadosStriper(const RadosStriper& rs)
{
rados_striper_impl = rs.rados_striper_impl;
if (rados_striper_impl) {
rados_striper_impl->get();
}
}
libradosstriper::RadosStriper& libradosstriper::RadosStriper::operator=(const RadosStriper& rs)
{
if (rados_striper_impl)
rados_striper_impl->put();
rados_striper_impl = rs.rados_striper_impl;
rados_striper_impl->get();
return *this;
}
libradosstriper::RadosStriper::~RadosStriper()
{
if (rados_striper_impl)
rados_striper_impl->put();
rados_striper_impl = 0;
}
int libradosstriper::RadosStriper::striper_create(librados::IoCtx& ioctx,
RadosStriper *striper)
{
try {
striper->rados_striper_impl = new libradosstriper::RadosStriperImpl(ioctx, ioctx.io_ctx_impl);
striper->rados_striper_impl->get();
} catch (int rc) {
return rc;
}
return 0;
}
int libradosstriper::RadosStriper::set_object_layout_stripe_unit
(unsigned int stripe_unit)
{
return rados_striper_impl->setObjectLayoutStripeUnit(stripe_unit);
}
int libradosstriper::RadosStriper::set_object_layout_stripe_count
(unsigned int stripe_count)
{
return rados_striper_impl->setObjectLayoutStripeCount(stripe_count);
}
int libradosstriper::RadosStriper::set_object_layout_object_size
(unsigned int object_size)
{
return rados_striper_impl->setObjectLayoutObjectSize(object_size);
}
int libradosstriper::RadosStriper::getxattr(const std::string& oid, const char *name, bufferlist& bl)
{
return rados_striper_impl->getxattr(oid, name, bl);
}
int libradosstriper::RadosStriper::setxattr(const std::string& oid, const char *name, bufferlist& bl)
{
return rados_striper_impl->setxattr(oid, name, bl);
}
int libradosstriper::RadosStriper::rmxattr(const std::string& oid, const char *name)
{
return rados_striper_impl->rmxattr(oid, name);
}
int libradosstriper::RadosStriper::getxattrs(const std::string& oid,
std::map<std::string, bufferlist>& attrset)
{
return rados_striper_impl->getxattrs(oid, attrset);
}
int libradosstriper::RadosStriper::write(const std::string& soid,
const bufferlist& bl,
size_t len,
uint64_t off)
{
return rados_striper_impl->write(soid, bl, len, off);
}
int libradosstriper::RadosStriper::write_full(const std::string& soid,
const bufferlist& bl)
{
return rados_striper_impl->write_full(soid, bl);
}
int libradosstriper::RadosStriper::append(const std::string& soid,
const bufferlist& bl,
size_t len)
{
return rados_striper_impl->append(soid, bl, len);
}
int libradosstriper::RadosStriper::aio_write(const std::string& soid,
librados::AioCompletion *c,
const bufferlist& bl,
size_t len,
uint64_t off)
{
return rados_striper_impl->aio_write(soid, c->pc, bl, len, off);
}
int libradosstriper::RadosStriper::aio_write_full(const std::string& soid,
librados::AioCompletion *c,
const bufferlist& bl)
{
return rados_striper_impl->aio_write_full(soid, c->pc, bl);
}
int libradosstriper::RadosStriper::aio_append(const std::string& soid,
librados::AioCompletion *c,
const bufferlist& bl,
size_t len)
{
return rados_striper_impl->aio_append(soid, c->pc, bl, len);
}
int libradosstriper::RadosStriper::read(const std::string& soid,
bufferlist* bl,
size_t len,
uint64_t off)
{
bl->clear();
bl->push_back(buffer::create(len));
return rados_striper_impl->read(soid, bl, len, off);
}
int libradosstriper::RadosStriper::aio_read(const std::string& soid,
librados::AioCompletion *c,
bufferlist* bl,
size_t len,
uint64_t off)
{
bl->clear();
bl->push_back(buffer::create(len));
return rados_striper_impl->aio_read(soid, c->pc, bl, len, off);
}
int libradosstriper::RadosStriper::stat(const std::string& soid, uint64_t *psize, time_t *pmtime)
{
return rados_striper_impl->stat(soid, psize, pmtime);
}
int libradosstriper::RadosStriper::aio_stat(const std::string& soid,
librados::AioCompletion *c,
uint64_t *psize,
time_t *pmtime)
{
return rados_striper_impl->aio_stat(soid, c->pc, psize, pmtime);
}
int libradosstriper::RadosStriper::stat2(const std::string& soid, uint64_t *psize, struct timespec *pts)
{
return rados_striper_impl->stat2(soid, psize, pts);
}
int libradosstriper::RadosStriper::aio_stat2(const std::string& soid,
librados::AioCompletion *c,
uint64_t *psize,
struct timespec *pts)
{
return rados_striper_impl->aio_stat2(soid, c->pc, psize, pts);
}
int libradosstriper::RadosStriper::remove(const std::string& soid)
{
return rados_striper_impl->remove(soid);
}
int libradosstriper::RadosStriper::aio_remove(const std::string& soid,
librados::AioCompletion *c)
{
return rados_striper_impl->aio_remove(soid, c->pc);
}
int libradosstriper::RadosStriper::remove(const std::string& soid, int flags)
{
return rados_striper_impl->remove(soid, flags);
}
int libradosstriper::RadosStriper::aio_remove(const std::string& soid,
librados::AioCompletion *c,
int flags)
{
return rados_striper_impl->aio_remove(soid, c->pc, flags);
}
int libradosstriper::RadosStriper::trunc(const std::string& soid, uint64_t size)
{
return rados_striper_impl->trunc(soid, size);
}
int libradosstriper::RadosStriper::aio_flush()
{
return rados_striper_impl->aio_flush();
}
libradosstriper::MultiAioCompletion* libradosstriper::RadosStriper::multi_aio_create_completion()
{
MultiAioCompletionImpl *c = new MultiAioCompletionImpl;
return new MultiAioCompletion(c);
}
libradosstriper::MultiAioCompletion*
libradosstriper::RadosStriper::multi_aio_create_completion(void *cb_arg,
librados::callback_t cb_complete,
librados::callback_t cb_safe)
{
MultiAioCompletionImpl *c;
int r = rados_striper_multi_aio_create_completion(cb_arg, cb_complete, cb_safe, (void**)&c);
ceph_assert(r == 0);
return new MultiAioCompletion(c);
}
///////////////////////////// C API //////////////////////////////
extern "C" int rados_striper_create(rados_ioctx_t ioctx,
rados_striper_t *striper)
{
librados::IoCtx ctx;
librados::IoCtx::from_rados_ioctx_t(ioctx, ctx);
libradosstriper::RadosStriper striperp;
int rc = libradosstriper::RadosStriper::striper_create(ctx, &striperp);
if (0 == rc)
libradosstriper::RadosStriper::to_rados_striper_t(striperp, striper);
return rc;
}
extern "C" void rados_striper_destroy(rados_striper_t striper)
{
libradosstriper::RadosStriperImpl *impl = (libradosstriper::RadosStriperImpl *)striper;
impl->put();
}
extern "C" int rados_striper_set_object_layout_stripe_unit(rados_striper_t striper,
unsigned int stripe_unit)
{
libradosstriper::RadosStriperImpl *impl = (libradosstriper::RadosStriperImpl *)striper;
return impl->setObjectLayoutStripeUnit(stripe_unit);
}
extern "C" int rados_striper_set_object_layout_stripe_count(rados_striper_t striper,
unsigned int stripe_count)
{
libradosstriper::RadosStriperImpl *impl = (libradosstriper::RadosStriperImpl *)striper;
return impl->setObjectLayoutStripeCount(stripe_count);
}
extern "C" int rados_striper_set_object_layout_object_size(rados_striper_t striper,
unsigned int object_size)
{
libradosstriper::RadosStriperImpl *impl = (libradosstriper::RadosStriperImpl *)striper;
return impl->setObjectLayoutObjectSize(object_size);
}
extern "C" int rados_striper_write(rados_striper_t striper,
const char *soid,
const char *buf,
size_t len,
uint64_t off)
{
libradosstriper::RadosStriperImpl *impl = (libradosstriper::RadosStriperImpl *)striper;
bufferlist bl;
bl.append(buf, len);
return impl->write(soid, bl, len, off);
}
extern "C" int rados_striper_write_full(rados_striper_t striper,
const char *soid,
const char *buf,
size_t len)
{
libradosstriper::RadosStriperImpl *impl = (libradosstriper::RadosStriperImpl *)striper;
bufferlist bl;
bl.append(buf, len);
return impl->write_full(soid, bl);
}
extern "C" int rados_striper_append(rados_striper_t striper,
const char *soid,
const char *buf,
size_t len)
{
libradosstriper::RadosStriperImpl *impl = (libradosstriper::RadosStriperImpl *)striper;
bufferlist bl;
bl.append(buf, len);
return impl->append(soid, bl, len);
}
extern "C" int rados_striper_read(rados_striper_t striper,
const char *soid,
char *buf,
size_t len,
uint64_t off)
{
libradosstriper::RadosStriperImpl *impl = (libradosstriper::RadosStriperImpl *)striper;
bufferlist bl;
bufferptr bp = buffer::create_static(len, buf);
bl.push_back(bp);
int ret = impl->read(soid, &bl, len, off);
if (ret >= 0) {
if (bl.length() > len)
return -ERANGE;
if (!bl.is_provided_buffer(buf))
bl.begin().copy(bl.length(), buf);
ret = bl.length(); // hrm :/
}
return ret;
}
extern "C" int rados_striper_remove(rados_striper_t striper, const char* soid)
{
libradosstriper::RadosStriperImpl *impl = (libradosstriper::RadosStriperImpl *)striper;
return impl->remove(soid);
}
extern "C" int rados_striper_trunc(rados_striper_t striper, const char* soid, uint64_t size)
{
libradosstriper::RadosStriperImpl *impl = (libradosstriper::RadosStriperImpl *)striper;
return impl->trunc(soid, size);
}
extern "C" int rados_striper_getxattr(rados_striper_t striper,
const char *oid,
const char *name,
char *buf,
size_t len)
{
libradosstriper::RadosStriperImpl *impl = (libradosstriper::RadosStriperImpl *)striper;
object_t obj(oid);
bufferlist bl;
int ret = impl->getxattr(oid, name, bl);
if (ret >= 0) {
if (bl.length() > len)
return -ERANGE;
bl.begin().copy(bl.length(), buf);
ret = bl.length();
}
return ret;
}
extern "C" int rados_striper_setxattr(rados_striper_t striper,
const char *oid,
const char *name,
const char *buf,
size_t len)
{
libradosstriper::RadosStriperImpl *impl = (libradosstriper::RadosStriperImpl *)striper;
object_t obj(oid);
bufferlist bl;
bl.append(buf, len);
return impl->setxattr(obj, name, bl);
}
extern "C" int rados_striper_rmxattr(rados_striper_t striper,
const char *oid,
const char *name)
{
libradosstriper::RadosStriperImpl *impl = (libradosstriper::RadosStriperImpl *)striper;
object_t obj(oid);
return impl->rmxattr(obj, name);
}
extern "C" int rados_striper_getxattrs(rados_striper_t striper,
const char *oid,
rados_xattrs_iter_t *iter)
{
libradosstriper::RadosStriperImpl *impl = (libradosstriper::RadosStriperImpl *)striper;
object_t obj(oid);
librados::RadosXattrsIter *it = new librados::RadosXattrsIter();
if (!it)
return -ENOMEM;
int ret = impl->getxattrs(obj, it->attrset);
if (ret) {
delete it;
return ret;
}
it->i = it->attrset.begin();
*iter = it;
return 0;
}
extern "C" int rados_striper_getxattrs_next(rados_xattrs_iter_t iter,
const char **name,
const char **val,
size_t *len)
{
return rados_getxattrs_next(iter, name, val, len);
}
extern "C" void rados_striper_getxattrs_end(rados_xattrs_iter_t iter)
{
return rados_getxattrs_end(iter);
}
extern "C" int rados_striper_stat(rados_striper_t striper,
const char* soid,
uint64_t *psize,
time_t *pmtime)
{
libradosstriper::RadosStriperImpl *impl = (libradosstriper::RadosStriperImpl *)striper;
return impl->stat(soid, psize, pmtime);
}
extern "C" int rados_striper_stat2(rados_striper_t striper,
const char* soid,
uint64_t *psize,
struct timespec *pmtime)
{
libradosstriper::RadosStriperImpl *impl = (libradosstriper::RadosStriperImpl *)striper;
return impl->stat2(soid, psize, pmtime);
}
extern "C" int rados_striper_multi_aio_create_completion(void *cb_arg,
rados_callback_t cb_complete,
rados_callback_t cb_safe,
rados_striper_multi_completion_t *pc)
{
libradosstriper::MultiAioCompletionImpl *c = new libradosstriper::MultiAioCompletionImpl;
if (cb_complete)
c->set_complete_callback(cb_arg, cb_complete);
if (cb_safe)
c->set_safe_callback(cb_arg, cb_safe);
*pc = c;
return 0;
}
extern "C" void rados_striper_multi_aio_wait_for_complete(rados_striper_multi_completion_t c)
{
((libradosstriper::MultiAioCompletionImpl*)c)->wait_for_complete();
}
extern "C" void rados_striper_multi_aio_wait_for_safe(rados_striper_multi_completion_t c)
{
((libradosstriper::MultiAioCompletionImpl*)c)->wait_for_safe();
}
extern "C" int rados_striper_multi_aio_is_complete(rados_striper_multi_completion_t c)
{
return ((libradosstriper::MultiAioCompletionImpl*)c)->is_complete();
}
extern "C" int rados_striper_multi_aio_is_safe(rados_striper_multi_completion_t c)
{
return ((libradosstriper::MultiAioCompletionImpl*)c)->is_safe();
}
extern "C" void rados_striper_multi_aio_wait_for_complete_and_cb(rados_striper_multi_completion_t c)
{
((libradosstriper::MultiAioCompletionImpl*)c)->wait_for_complete_and_cb();
}
extern "C" void rados_striper_multi_aio_wait_for_safe_and_cb(rados_striper_multi_completion_t c)
{
((libradosstriper::MultiAioCompletionImpl*)c)->wait_for_safe_and_cb();
}
extern "C" int rados_striper_multi_aio_is_complete_and_cb(rados_striper_multi_completion_t c)
{
return ((libradosstriper::MultiAioCompletionImpl*)c)->is_complete_and_cb();
}
extern "C" int rados_striper_multi_aio_is_safe_and_cb(rados_striper_multi_completion_t c)
{
return ((libradosstriper::MultiAioCompletionImpl*)c)->is_safe_and_cb();
}
extern "C" int rados_striper_multi_aio_get_return_value(rados_striper_multi_completion_t c)
{
return ((libradosstriper::MultiAioCompletionImpl*)c)->get_return_value();
}
extern "C" void rados_striper_multi_aio_release(rados_striper_multi_completion_t c)
{
((libradosstriper::MultiAioCompletionImpl*)c)->put();
}
extern "C" int rados_striper_aio_write(rados_striper_t striper,
const char* soid,
rados_completion_t completion,
const char *buf,
size_t len,
uint64_t off)
{
libradosstriper::RadosStriperImpl *impl = (libradosstriper::RadosStriperImpl *)striper;
bufferlist bl;
bl.append(buf, len);
return impl->aio_write(soid, (librados::AioCompletionImpl*)completion, bl, len, off);
}
extern "C" int rados_striper_aio_append(rados_striper_t striper,
const char* soid,
rados_completion_t completion,
const char *buf,
size_t len)
{
libradosstriper::RadosStriperImpl *impl = (libradosstriper::RadosStriperImpl *)striper;
bufferlist bl;
bl.append(buf, len);
return impl->aio_append(soid, (librados::AioCompletionImpl*)completion, bl, len);
}
extern "C" int rados_striper_aio_write_full(rados_striper_t striper,
const char* soid,
rados_completion_t completion,
const char *buf,
size_t len)
{
libradosstriper::RadosStriperImpl *impl = (libradosstriper::RadosStriperImpl *)striper;
bufferlist bl;
bl.append(buf, len);
return impl->aio_write_full(soid, (librados::AioCompletionImpl*)completion, bl);
}
extern "C" int rados_striper_aio_read(rados_striper_t striper,
const char *soid,
rados_completion_t completion,
char *buf,
size_t len,
uint64_t off)
{
libradosstriper::RadosStriperImpl *impl = (libradosstriper::RadosStriperImpl *)striper;
return impl->aio_read(soid, (librados::AioCompletionImpl*)completion, buf, len, off);
}
extern "C" int rados_striper_aio_remove(rados_striper_t striper,
const char* soid,
rados_completion_t completion)
{
libradosstriper::RadosStriperImpl *impl = (libradosstriper::RadosStriperImpl *)striper;
return impl->aio_remove(soid, (librados::AioCompletionImpl*)completion);
}
extern "C" void rados_striper_aio_flush(rados_striper_t striper)
{
libradosstriper::RadosStriperImpl *impl = (libradosstriper::RadosStriperImpl *)striper;
impl->aio_flush();
}
extern "C" int rados_striper_aio_stat(rados_striper_t striper,
const char* soid,
rados_completion_t completion,
uint64_t *psize,
time_t *pmtime)
{
libradosstriper::RadosStriperImpl *impl = (libradosstriper::RadosStriperImpl *)striper;
return impl->aio_stat(soid, (librados::AioCompletionImpl*)completion, psize, pmtime);
}
extern "C" int rados_striper_aio_stat2(rados_striper_t striper,
const char* soid,
rados_completion_t completion,
uint64_t *psize,
struct timespec *pmtime)
{
libradosstriper::RadosStriperImpl *impl = (libradosstriper::RadosStriperImpl *)striper;
return impl->aio_stat2(soid, (librados::AioCompletionImpl*)completion, psize, pmtime);
}
| 20,082 | 28.148041 | 104 | cc |
null | ceph-main/src/librbd/AsioEngine.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/AsioEngine.h"
#include "include/Context.h"
#include "include/neorados/RADOS.hpp"
#include "include/rados/librados.hpp"
#include "common/dout.h"
#include "librbd/asio/ContextWQ.h"
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::AsioEngine: " \
<< this << " " << __func__ << ": "
namespace librbd {
AsioEngine::AsioEngine(std::shared_ptr<librados::Rados> rados)
: m_rados_api(std::make_shared<neorados::RADOS>(
neorados::RADOS::make_with_librados(*rados))),
m_cct(m_rados_api->cct()),
m_io_context(m_rados_api->get_io_context()),
m_api_strand(std::make_unique<boost::asio::io_context::strand>(
m_io_context)),
m_context_wq(std::make_unique<asio::ContextWQ>(m_cct, m_io_context)) {
ldout(m_cct, 20) << dendl;
auto rados_threads = m_cct->_conf.get_val<uint64_t>("librados_thread_count");
auto rbd_threads = m_cct->_conf.get_val<uint64_t>("rbd_op_threads");
if (rbd_threads > rados_threads) {
// inherit the librados thread count -- but increase it if librbd wants to
// utilize more threads
m_cct->_conf.set_val_or_die("librados_thread_count",
std::to_string(rbd_threads));
m_cct->_conf.apply_changes(nullptr);
}
}
AsioEngine::AsioEngine(librados::IoCtx& io_ctx)
: AsioEngine(std::make_shared<librados::Rados>(io_ctx)) {
}
AsioEngine::~AsioEngine() {
ldout(m_cct, 20) << dendl;
m_api_strand.reset();
}
void AsioEngine::dispatch(Context* ctx, int r) {
dispatch([ctx, r]() { ctx->complete(r); });
}
void AsioEngine::post(Context* ctx, int r) {
post([ctx, r]() { ctx->complete(r); });
}
} // namespace librbd
| 1,798 | 30.561404 | 79 | cc |
null | ceph-main/src/librbd/AsioEngine.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_ASIO_ENGINE_H
#define CEPH_LIBRBD_ASIO_ENGINE_H
#include "include/common_fwd.h"
#include "include/rados/librados_fwd.hpp"
#include <memory>
#include <boost/asio/dispatch.hpp>
#include <boost/asio/io_context.hpp>
#include <boost/asio/io_context_strand.hpp>
#include <boost/asio/post.hpp>
struct Context;
namespace neorados { struct RADOS; }
namespace librbd {
namespace asio { struct ContextWQ; }
class AsioEngine {
public:
explicit AsioEngine(std::shared_ptr<librados::Rados> rados);
explicit AsioEngine(librados::IoCtx& io_ctx);
~AsioEngine();
AsioEngine(AsioEngine&&) = delete;
AsioEngine(const AsioEngine&) = delete;
AsioEngine& operator=(const AsioEngine&) = delete;
inline neorados::RADOS& get_rados_api() {
return *m_rados_api;
}
inline boost::asio::io_context& get_io_context() {
return m_io_context;
}
inline operator boost::asio::io_context&() {
return m_io_context;
}
using executor_type = boost::asio::io_context::executor_type;
inline executor_type get_executor() {
return m_io_context.get_executor();
}
inline boost::asio::io_context::strand& get_api_strand() {
// API client callbacks should never fire concurrently
return *m_api_strand;
}
inline asio::ContextWQ* get_work_queue() {
return m_context_wq.get();
}
template <typename T>
void dispatch(T&& t) {
boost::asio::dispatch(m_io_context, std::forward<T>(t));
}
void dispatch(Context* ctx, int r);
template <typename T>
void post(T&& t) {
boost::asio::post(m_io_context, std::forward<T>(t));
}
void post(Context* ctx, int r);
private:
std::shared_ptr<neorados::RADOS> m_rados_api;
CephContext* m_cct;
boost::asio::io_context& m_io_context;
std::unique_ptr<boost::asio::io_context::strand> m_api_strand;
std::unique_ptr<asio::ContextWQ> m_context_wq;
};
} // namespace librbd
#endif // CEPH_LIBRBD_ASIO_ENGINE_H
| 2,015 | 23.888889 | 70 | h |
null | ceph-main/src/librbd/AsyncObjectThrottle.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/AsyncObjectThrottle.h"
#include "librbd/AsyncRequest.h"
#include "librbd/ImageCtx.h"
#include "librbd/Utils.h"
#include "librbd/asio/ContextWQ.h"
namespace librbd
{
template <typename T>
AsyncObjectThrottle<T>::AsyncObjectThrottle(
const AsyncRequest<T>* async_request, T &image_ctx,
const ContextFactory& context_factory, Context *ctx,
ProgressContext *prog_ctx, uint64_t object_no, uint64_t end_object_no)
: m_lock(ceph::make_mutex(
util::unique_lock_name("librbd::AsyncThrottle::m_lock", this))),
m_async_request(async_request), m_image_ctx(image_ctx),
m_context_factory(context_factory), m_ctx(ctx), m_prog_ctx(prog_ctx),
m_object_no(object_no), m_end_object_no(end_object_no), m_current_ops(0),
m_ret(0)
{
}
template <typename T>
void AsyncObjectThrottle<T>::start_ops(uint64_t max_concurrent) {
ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
bool complete;
{
std::lock_guard l{m_lock};
for (uint64_t i = 0; i < max_concurrent; ++i) {
start_next_op();
if (m_ret < 0 && m_current_ops == 0) {
break;
}
}
complete = (m_current_ops == 0);
}
if (complete) {
// avoid re-entrant callback
m_image_ctx.op_work_queue->queue(m_ctx, m_ret);
delete this;
}
}
template <typename T>
void AsyncObjectThrottle<T>::finish_op(int r) {
bool complete;
{
std::shared_lock owner_locker{m_image_ctx.owner_lock};
std::lock_guard locker{m_lock};
--m_current_ops;
if (r < 0 && r != -ENOENT && m_ret == 0) {
m_ret = r;
}
start_next_op();
complete = (m_current_ops == 0);
}
if (complete) {
m_ctx->complete(m_ret);
delete this;
}
}
template <typename T>
void AsyncObjectThrottle<T>::start_next_op() {
bool done = false;
while (!done) {
if (m_async_request != NULL && m_async_request->is_canceled() &&
m_ret == 0) {
// allow in-flight ops to complete, but don't start new ops
m_ret = -ERESTART;
return;
} else if (m_ret != 0 || m_object_no >= m_end_object_no) {
return;
}
uint64_t ono = m_object_no++;
C_AsyncObjectThrottle<T> *ctx = m_context_factory(*this, ono);
int r = ctx->send();
if (r < 0) {
m_ret = r;
delete ctx;
return;
} else if (r > 0) {
// op completed immediately
delete ctx;
} else {
++m_current_ops;
done = true;
}
if (m_prog_ctx != NULL) {
r = m_prog_ctx->update_progress(ono, m_end_object_no);
if (r < 0) {
m_ret = r;
}
}
}
}
} // namespace librbd
#ifndef TEST_F
template class librbd::AsyncObjectThrottle<librbd::ImageCtx>;
#endif
| 2,763 | 24.357798 | 77 | cc |
null | ceph-main/src/librbd/AsyncObjectThrottle.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_ASYNC_OBJECT_THROTTLE_H
#define CEPH_LIBRBD_ASYNC_OBJECT_THROTTLE_H
#include "include/int_types.h"
#include "include/Context.h"
#include <boost/function.hpp>
namespace librbd
{
template <typename ImageCtxT> class AsyncRequest;
class ProgressContext;
struct ImageCtx;
class AsyncObjectThrottleFinisher {
public:
virtual ~AsyncObjectThrottleFinisher() {};
virtual void finish_op(int r) = 0;
};
template <typename ImageCtxT = ImageCtx>
class C_AsyncObjectThrottle : public Context {
public:
C_AsyncObjectThrottle(AsyncObjectThrottleFinisher &finisher,
ImageCtxT &image_ctx)
: m_image_ctx(image_ctx), m_finisher(finisher) {
}
virtual int send() = 0;
protected:
ImageCtxT &m_image_ctx;
void finish(int r) override {
m_finisher.finish_op(r);
}
private:
AsyncObjectThrottleFinisher &m_finisher;
};
template <typename ImageCtxT = ImageCtx>
class AsyncObjectThrottle : public AsyncObjectThrottleFinisher {
public:
typedef boost::function<
C_AsyncObjectThrottle<ImageCtxT>* (AsyncObjectThrottle&,
uint64_t)> ContextFactory;
AsyncObjectThrottle(const AsyncRequest<ImageCtxT> *async_request,
ImageCtxT &image_ctx,
const ContextFactory& context_factory, Context *ctx,
ProgressContext *prog_ctx, uint64_t object_no,
uint64_t end_object_no);
void start_ops(uint64_t max_concurrent);
void finish_op(int r) override;
private:
ceph::mutex m_lock;
const AsyncRequest<ImageCtxT> *m_async_request;
ImageCtxT &m_image_ctx;
ContextFactory m_context_factory;
Context *m_ctx;
ProgressContext *m_prog_ctx;
uint64_t m_object_no;
uint64_t m_end_object_no;
uint64_t m_current_ops;
int m_ret;
void start_next_op();
};
} // namespace librbd
extern template class librbd::AsyncObjectThrottle<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_ASYNC_OBJECT_THROTTLE_H
| 2,043 | 24.55 | 74 | h |
null | ceph-main/src/librbd/AsyncRequest.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/AsyncRequest.h"
#include "librbd/ImageCtx.h"
#include "librbd/Utils.h"
#include "librbd/asio/ContextWQ.h"
namespace librbd
{
template <typename T>
AsyncRequest<T>::AsyncRequest(T &image_ctx, Context *on_finish)
: m_image_ctx(image_ctx), m_on_finish(on_finish), m_canceled(false),
m_xlist_item(this) {
ceph_assert(m_on_finish != NULL);
start_request();
}
template <typename T>
AsyncRequest<T>::~AsyncRequest() {
}
template <typename T>
void AsyncRequest<T>::async_complete(int r) {
m_image_ctx.op_work_queue->queue(create_callback_context(), r);
}
template <typename T>
librados::AioCompletion *AsyncRequest<T>::create_callback_completion() {
return util::create_rados_callback(this);
}
template <typename T>
Context *AsyncRequest<T>::create_callback_context() {
return util::create_context_callback(this);
}
template <typename T>
Context *AsyncRequest<T>::create_async_callback_context() {
return util::create_context_callback<AsyncRequest<T>,
&AsyncRequest<T>::async_complete>(this);
}
template <typename T>
void AsyncRequest<T>::start_request() {
std::lock_guard async_ops_locker{m_image_ctx.async_ops_lock};
m_image_ctx.async_requests.push_back(&m_xlist_item);
}
template <typename T>
void AsyncRequest<T>::finish_request() {
decltype(m_image_ctx.async_requests_waiters) waiters;
{
std::lock_guard async_ops_locker{m_image_ctx.async_ops_lock};
ceph_assert(m_xlist_item.remove_myself());
if (m_image_ctx.async_requests.empty()) {
waiters = std::move(m_image_ctx.async_requests_waiters);
}
}
for (auto ctx : waiters) {
ctx->complete(0);
}
}
} // namespace librbd
#ifndef TEST_F
template class librbd::AsyncRequest<librbd::ImageCtx>;
#endif
| 1,866 | 24.930556 | 79 | cc |
null | ceph-main/src/librbd/AsyncRequest.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_ASYNC_REQUEST_H
#define CEPH_LIBRBD_ASYNC_REQUEST_H
#include "include/Context.h"
#include "include/rados/librados.hpp"
#include "include/xlist.h"
#include "include/compat.h"
namespace librbd {
class ImageCtx;
template <typename ImageCtxT = ImageCtx>
class AsyncRequest
{
public:
AsyncRequest(ImageCtxT &image_ctx, Context *on_finish);
virtual ~AsyncRequest();
void complete(int r) {
if (should_complete(r)) {
r = filter_return_code(r);
finish_and_destroy(r);
}
}
virtual void send() = 0;
inline bool is_canceled() const {
return m_canceled;
}
inline void cancel() {
m_canceled = true;
}
protected:
ImageCtxT &m_image_ctx;
librados::AioCompletion *create_callback_completion();
Context *create_callback_context();
Context *create_async_callback_context();
void async_complete(int r);
virtual bool should_complete(int r) = 0;
virtual int filter_return_code(int r) const {
return r;
}
// NOTE: temporary until converted to new state machine format
virtual void finish_and_destroy(int r) {
finish(r);
delete this;
}
virtual void finish(int r) {
finish_request();
m_on_finish->complete(r);
}
private:
Context *m_on_finish;
bool m_canceled;
typename xlist<AsyncRequest<ImageCtxT> *>::item m_xlist_item;
void start_request();
void finish_request();
};
} // namespace librbd
extern template class librbd::AsyncRequest<librbd::ImageCtx>;
#endif //CEPH_LIBRBD_ASYNC_REQUEST_H
| 1,604 | 19.844156 | 70 | h |
null | ceph-main/src/librbd/BlockGuard.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_IO_BLOCK_GUARD_H
#define CEPH_LIBRBD_IO_BLOCK_GUARD_H
#include "include/int_types.h"
#include "common/dout.h"
#include "common/ceph_mutex.h"
#include <boost/intrusive/list.hpp>
#include <boost/intrusive/set.hpp>
#include <deque>
#include <list>
#include "include/ceph_assert.h"
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::BlockGuard: " << this << " " \
<< __func__ << ": "
namespace librbd {
struct BlockExtent {
// [block_start, block_end)
uint64_t block_start = 0;
uint64_t block_end = 0;
BlockExtent() {
}
BlockExtent(uint64_t block_start, uint64_t block_end)
: block_start(block_start), block_end(block_end) {
}
friend std::ostream& operator<< (std::ostream& os, const BlockExtent& block_extent) {
os << "[block_start=" << block_extent.block_start
<< ", block_end=" << block_extent.block_end << "]";
return os;
}
};
struct BlockGuardCell {
};
/**
* Helper class to restrict and order concurrent IO to the same block. The
* definition of a block is dependent upon the user of this class. It might
* represent a backing object, 512 byte sectors, etc.
*/
template <typename BlockOperation>
class BlockGuard {
private:
struct DetainedBlockExtent;
public:
typedef std::list<BlockOperation> BlockOperations;
BlockGuard(CephContext *cct)
: m_cct(cct) {
}
BlockGuard(const BlockGuard&) = delete;
BlockGuard &operator=(const BlockGuard&) = delete;
/**
* Detain future IO for a range of blocks. the guard will keep
* ownership of the provided operation if the operation is blocked.
* @return 0 upon success and IO can be issued
* >0 if the IO is blocked,
* <0 upon error
*/
int detain(const BlockExtent &block_extent, BlockOperation *block_operation,
BlockGuardCell **cell) {
std::lock_guard locker{m_lock};
ldout(m_cct, 20) << block_extent
<< ", free_slots="
<< m_free_detained_block_extents.size()
<< dendl;
DetainedBlockExtent *detained_block_extent;
auto it = m_detained_block_extents.find(block_extent);
if (it != m_detained_block_extents.end()) {
// request against an already detained block
detained_block_extent = &(*it);
if (block_operation != nullptr) {
detained_block_extent->block_operations.emplace_back(
std::move(*block_operation));
}
// alert the caller that the IO was detained
*cell = nullptr;
return detained_block_extent->block_operations.size();
} else {
if (!m_free_detained_block_extents.empty()) {
detained_block_extent = &m_free_detained_block_extents.front();
detained_block_extent->block_operations.clear();
m_free_detained_block_extents.pop_front();
} else {
ldout(m_cct, 20) << "no free detained block cells" << dendl;
m_detained_block_extent_pool.emplace_back();
detained_block_extent = &m_detained_block_extent_pool.back();
}
detained_block_extent->block_extent = block_extent;
m_detained_block_extents.insert(*detained_block_extent);
*cell = reinterpret_cast<BlockGuardCell*>(detained_block_extent);
return 0;
}
}
/**
* Release any detained IO operations from the provided cell.
*/
void release(BlockGuardCell *cell, BlockOperations *block_operations) {
std::lock_guard locker{m_lock};
ceph_assert(cell != nullptr);
auto &detained_block_extent = reinterpret_cast<DetainedBlockExtent &>(
*cell);
ldout(m_cct, 20) << detained_block_extent.block_extent
<< ", pending_ops="
<< detained_block_extent.block_operations.size()
<< dendl;
*block_operations = std::move(detained_block_extent.block_operations);
m_detained_block_extents.erase(detained_block_extent.block_extent);
m_free_detained_block_extents.push_back(detained_block_extent);
}
private:
struct DetainedBlockExtent : public boost::intrusive::list_base_hook<>,
public boost::intrusive::set_base_hook<> {
BlockExtent block_extent;
BlockOperations block_operations;
};
struct DetainedBlockExtentKey {
typedef BlockExtent type;
const BlockExtent &operator()(const DetainedBlockExtent &value) {
return value.block_extent;
}
};
struct DetainedBlockExtentCompare {
bool operator()(const BlockExtent &lhs,
const BlockExtent &rhs) const {
// check for range overlap (lhs < rhs)
if (lhs.block_end <= rhs.block_start) {
return true;
}
return false;
}
};
typedef std::deque<DetainedBlockExtent> DetainedBlockExtentsPool;
typedef boost::intrusive::list<DetainedBlockExtent> DetainedBlockExtents;
typedef boost::intrusive::set<
DetainedBlockExtent,
boost::intrusive::compare<DetainedBlockExtentCompare>,
boost::intrusive::key_of_value<DetainedBlockExtentKey> >
BlockExtentToDetainedBlockExtents;
CephContext *m_cct;
ceph::mutex m_lock = ceph::make_mutex("librbd::BlockGuard::m_lock");
DetainedBlockExtentsPool m_detained_block_extent_pool;
DetainedBlockExtents m_free_detained_block_extents;
BlockExtentToDetainedBlockExtents m_detained_block_extents;
};
} // namespace librbd
#undef dout_subsys
#undef dout_prefix
#define dout_prefix *_dout
#endif // CEPH_LIBRBD_IO_BLOCK_GUARD_H
| 5,596 | 30.44382 | 87 | h |
null | ceph-main/src/librbd/ConfigWatcher.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/ConfigWatcher.h"
#include "common/config_obs.h"
#include "common/dout.h"
#include "common/errno.h"
#include "librbd/ImageCtx.h"
#include "librbd/ImageState.h"
#include "librbd/api/Config.h"
#include <deque>
#include <string>
#include <vector>
#include <boost/algorithm/string/predicate.hpp>
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::ConfigWatcher: " \
<< __func__ << ": "
namespace librbd {
template <typename I>
struct ConfigWatcher<I>::Observer : public md_config_obs_t {
ConfigWatcher<I>* m_config_watcher;
std::deque<std::string> m_config_key_strs;
mutable std::vector<const char*> m_config_keys;
Observer(CephContext* cct, ConfigWatcher<I>* config_watcher)
: m_config_watcher(config_watcher) {
const std::string rbd_key_prefix("rbd_");
auto& schema = cct->_conf.get_schema();
for (auto& pair : schema) {
// watch all "rbd_" keys for simplicity
if (!boost::starts_with(pair.first, rbd_key_prefix)) {
continue;
}
m_config_key_strs.emplace_back(pair.first);
}
m_config_keys.reserve(m_config_key_strs.size());
for (auto& key : m_config_key_strs) {
m_config_keys.emplace_back(key.c_str());
}
m_config_keys.emplace_back(nullptr);
}
const char** get_tracked_conf_keys() const override {
ceph_assert(!m_config_keys.empty());
return &m_config_keys[0];
}
void handle_conf_change(const ConfigProxy& conf,
const std::set <std::string> &changed) override {
m_config_watcher->handle_global_config_change(changed);
}
};
template <typename I>
ConfigWatcher<I>::ConfigWatcher(I& image_ctx)
: m_image_ctx(image_ctx) {
}
template <typename I>
ConfigWatcher<I>::~ConfigWatcher() {
ceph_assert(m_observer == nullptr);
}
template <typename I>
void ConfigWatcher<I>::init() {
auto cct = m_image_ctx.cct;
ldout(cct, 10) << dendl;
m_observer = new Observer(cct, this);
cct->_conf.add_observer(m_observer);
}
template <typename I>
void ConfigWatcher<I>::shut_down() {
auto cct = m_image_ctx.cct;
ldout(cct, 10) << dendl;
ceph_assert(m_observer != nullptr);
cct->_conf.remove_observer(m_observer);
delete m_observer;
m_observer = nullptr;
}
template <typename I>
void ConfigWatcher<I>::handle_global_config_change(
std::set<std::string> changed_keys) {
{
// ignore any global changes that are being overridden
std::shared_lock image_locker{m_image_ctx.image_lock};
for (auto& key : m_image_ctx.config_overrides) {
changed_keys.erase(key);
}
}
if (changed_keys.empty()) {
return;
}
auto cct = m_image_ctx.cct;
ldout(cct, 10) << "changed_keys=" << changed_keys << dendl;
// refresh the image to pick up any global config overrides
m_image_ctx.state->handle_update_notification();
}
} // namespace librbd
template class librbd::ConfigWatcher<librbd::ImageCtx>;
| 3,053 | 25.102564 | 75 | cc |
null | ceph-main/src/librbd/ConfigWatcher.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_CONFIG_WATCHER_H
#define CEPH_LIBRBD_CONFIG_WATCHER_H
#include <set>
#include <string>
struct Context;
namespace librbd {
struct ImageCtx;
template <typename ImageCtxT>
class ConfigWatcher {
public:
static ConfigWatcher* create(ImageCtxT& image_ctx) {
return new ConfigWatcher(image_ctx);
}
ConfigWatcher(ImageCtxT& image_ctx);
~ConfigWatcher();
ConfigWatcher(const ConfigWatcher&) = delete;
ConfigWatcher& operator=(const ConfigWatcher&) = delete;
void init();
void shut_down();
private:
struct Observer;
ImageCtxT& m_image_ctx;
Observer* m_observer = nullptr;
void handle_global_config_change(std::set<std::string> changed);
};
} // namespace librbd
extern template class librbd::ConfigWatcher<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_CONFIG_WATCHER_H
| 917 | 18.125 | 70 | h |
null | ceph-main/src/librbd/DeepCopyRequest.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "DeepCopyRequest.h"
#include "common/errno.h"
#include "librbd/ExclusiveLock.h"
#include "librbd/ImageCtx.h"
#include "librbd/ObjectMap.h"
#include "librbd/Utils.h"
#include "librbd/deep_copy/ImageCopyRequest.h"
#include "librbd/deep_copy/MetadataCopyRequest.h"
#include "librbd/deep_copy/SnapshotCopyRequest.h"
#include "librbd/internal.h"
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::DeepCopyRequest: " \
<< this << " " << __func__ << ": "
namespace librbd {
using namespace librbd::deep_copy;
using librbd::util::create_context_callback;
using librbd::util::create_rados_callback;
using librbd::util::unique_lock_name;
template <typename I>
DeepCopyRequest<I>::DeepCopyRequest(I *src_image_ctx, I *dst_image_ctx,
librados::snap_t src_snap_id_start,
librados::snap_t src_snap_id_end,
librados::snap_t dst_snap_id_start,
bool flatten,
const ObjectNumber &object_number,
asio::ContextWQ *work_queue,
SnapSeqs *snap_seqs,
deep_copy::Handler *handler,
Context *on_finish)
: RefCountedObject(dst_image_ctx->cct), m_src_image_ctx(src_image_ctx),
m_dst_image_ctx(dst_image_ctx), m_src_snap_id_start(src_snap_id_start),
m_src_snap_id_end(src_snap_id_end), m_dst_snap_id_start(dst_snap_id_start),
m_flatten(flatten), m_object_number(object_number),
m_work_queue(work_queue), m_snap_seqs(snap_seqs), m_handler(handler),
m_on_finish(on_finish), m_cct(dst_image_ctx->cct),
m_lock(ceph::make_mutex(unique_lock_name("DeepCopyRequest::m_lock", this))) {
}
template <typename I>
DeepCopyRequest<I>::~DeepCopyRequest() {
ceph_assert(m_snapshot_copy_request == nullptr);
ceph_assert(m_image_copy_request == nullptr);
}
template <typename I>
void DeepCopyRequest<I>::send() {
if (!m_src_image_ctx->data_ctx.is_valid()) {
lderr(m_cct) << "missing data pool for source image" << dendl;
finish(-ENODEV);
return;
}
if (!m_dst_image_ctx->data_ctx.is_valid()) {
lderr(m_cct) << "missing data pool for destination image" << dendl;
finish(-ENODEV);
return;
}
int r = validate_copy_points();
if (r < 0) {
finish(r);
return;
}
send_copy_snapshots();
}
template <typename I>
void DeepCopyRequest<I>::cancel() {
std::lock_guard locker{m_lock};
ldout(m_cct, 20) << dendl;
m_canceled = true;
if (m_snapshot_copy_request != nullptr) {
m_snapshot_copy_request->cancel();
}
if (m_image_copy_request != nullptr) {
m_image_copy_request->cancel();
}
}
template <typename I>
void DeepCopyRequest<I>::send_copy_snapshots() {
m_lock.lock();
if (m_canceled) {
m_lock.unlock();
finish(-ECANCELED);
return;
}
ldout(m_cct, 20) << dendl;
Context *ctx = create_context_callback<
DeepCopyRequest<I>, &DeepCopyRequest<I>::handle_copy_snapshots>(this);
m_snapshot_copy_request = SnapshotCopyRequest<I>::create(
m_src_image_ctx, m_dst_image_ctx, m_src_snap_id_start, m_src_snap_id_end,
m_dst_snap_id_start, m_flatten, m_work_queue, m_snap_seqs, ctx);
m_snapshot_copy_request->get();
m_lock.unlock();
m_snapshot_copy_request->send();
}
template <typename I>
void DeepCopyRequest<I>::handle_copy_snapshots(int r) {
ldout(m_cct, 20) << "r=" << r << dendl;
{
std::lock_guard locker{m_lock};
m_snapshot_copy_request->put();
m_snapshot_copy_request = nullptr;
if (r == 0 && m_canceled) {
r = -ECANCELED;
}
}
if (r == -ECANCELED) {
ldout(m_cct, 10) << "snapshot copy canceled" << dendl;
finish(r);
return;
} else if (r < 0) {
lderr(m_cct) << "failed to copy snapshot metadata: " << cpp_strerror(r)
<< dendl;
finish(r);
return;
}
if (m_src_snap_id_end == CEPH_NOSNAP) {
(*m_snap_seqs)[CEPH_NOSNAP] = CEPH_NOSNAP;
}
send_copy_image();
}
template <typename I>
void DeepCopyRequest<I>::send_copy_image() {
m_lock.lock();
if (m_canceled) {
m_lock.unlock();
finish(-ECANCELED);
return;
}
ldout(m_cct, 20) << dendl;
Context *ctx = create_context_callback<
DeepCopyRequest<I>, &DeepCopyRequest<I>::handle_copy_image>(this);
m_image_copy_request = ImageCopyRequest<I>::create(
m_src_image_ctx, m_dst_image_ctx, m_src_snap_id_start, m_src_snap_id_end,
m_dst_snap_id_start, m_flatten, m_object_number, *m_snap_seqs, m_handler,
ctx);
m_image_copy_request->get();
m_lock.unlock();
m_image_copy_request->send();
}
template <typename I>
void DeepCopyRequest<I>::handle_copy_image(int r) {
ldout(m_cct, 20) << "r=" << r << dendl;
{
std::lock_guard locker{m_lock};
m_image_copy_request->put();
m_image_copy_request = nullptr;
if (r == 0 && m_canceled) {
r = -ECANCELED;
}
}
if (r == -ECANCELED) {
ldout(m_cct, 10) << "image copy canceled" << dendl;
finish(r);
return;
} else if (r < 0) {
lderr(m_cct) << "failed to copy image: " << cpp_strerror(r) << dendl;
finish(r);
return;
}
send_copy_object_map();
}
template <typename I>
void DeepCopyRequest<I>::send_copy_object_map() {
m_dst_image_ctx->owner_lock.lock_shared();
m_dst_image_ctx->image_lock.lock_shared();
if (!m_dst_image_ctx->test_features(RBD_FEATURE_OBJECT_MAP,
m_dst_image_ctx->image_lock)) {
m_dst_image_ctx->image_lock.unlock_shared();
m_dst_image_ctx->owner_lock.unlock_shared();
send_copy_metadata();
return;
}
if (m_src_snap_id_end == CEPH_NOSNAP) {
m_dst_image_ctx->image_lock.unlock_shared();
m_dst_image_ctx->owner_lock.unlock_shared();
send_refresh_object_map();
return;
}
ceph_assert(m_dst_image_ctx->object_map != nullptr);
ldout(m_cct, 20) << dendl;
Context *finish_op_ctx = nullptr;
int r;
if (m_dst_image_ctx->exclusive_lock != nullptr) {
finish_op_ctx = m_dst_image_ctx->exclusive_lock->start_op(&r);
}
if (finish_op_ctx == nullptr) {
lderr(m_cct) << "lost exclusive lock" << dendl;
m_dst_image_ctx->image_lock.unlock_shared();
m_dst_image_ctx->owner_lock.unlock_shared();
finish(r);
return;
}
// rollback the object map (copy snapshot object map to HEAD)
auto ctx = new LambdaContext([this, finish_op_ctx](int r) {
handle_copy_object_map(r);
finish_op_ctx->complete(0);
});
ceph_assert(m_snap_seqs->count(m_src_snap_id_end) > 0);
librados::snap_t copy_snap_id = (*m_snap_seqs)[m_src_snap_id_end];
m_dst_image_ctx->object_map->rollback(copy_snap_id, ctx);
m_dst_image_ctx->image_lock.unlock_shared();
m_dst_image_ctx->owner_lock.unlock_shared();
}
template <typename I>
void DeepCopyRequest<I>::handle_copy_object_map(int r) {
ldout(m_cct, 20) << dendl;
if (r < 0) {
lderr(m_cct) << "failed to roll back object map: " << cpp_strerror(r)
<< dendl;
finish(r);
return;
}
send_refresh_object_map();
}
template <typename I>
void DeepCopyRequest<I>::send_refresh_object_map() {
int r;
Context *finish_op_ctx = nullptr;
{
std::shared_lock owner_locker{m_dst_image_ctx->owner_lock};
if (m_dst_image_ctx->exclusive_lock != nullptr) {
finish_op_ctx = m_dst_image_ctx->exclusive_lock->start_op(&r);
}
}
if (finish_op_ctx == nullptr) {
lderr(m_cct) << "lost exclusive lock" << dendl;
finish(r);
return;
}
ldout(m_cct, 20) << dendl;
auto ctx = new LambdaContext([this, finish_op_ctx](int r) {
handle_refresh_object_map(r);
finish_op_ctx->complete(0);
});
m_object_map = m_dst_image_ctx->create_object_map(CEPH_NOSNAP);
m_object_map->open(ctx);
}
template <typename I>
void DeepCopyRequest<I>::handle_refresh_object_map(int r) {
ldout(m_cct, 20) << "r=" << r << dendl;
if (r < 0) {
lderr(m_cct) << "failed to open object map: " << cpp_strerror(r)
<< dendl;
delete m_object_map;
finish(r);
return;
}
{
std::unique_lock image_locker{m_dst_image_ctx->image_lock};
std::swap(m_dst_image_ctx->object_map, m_object_map);
}
m_object_map->put();
send_copy_metadata();
}
template <typename I>
void DeepCopyRequest<I>::send_copy_metadata() {
ldout(m_cct, 20) << dendl;
Context *ctx = create_context_callback<
DeepCopyRequest<I>, &DeepCopyRequest<I>::handle_copy_metadata>(this);
auto request = MetadataCopyRequest<I>::create(m_src_image_ctx,
m_dst_image_ctx, ctx);
request->send();
}
template <typename I>
void DeepCopyRequest<I>::handle_copy_metadata(int r) {
ldout(m_cct, 20) << "r=" << r << dendl;
if (r < 0) {
lderr(m_cct) << "failed to copy metadata: " << cpp_strerror(r) << dendl;
finish(r);
return;
}
finish(0);
}
template <typename I>
int DeepCopyRequest<I>::validate_copy_points() {
std::shared_lock image_locker{m_src_image_ctx->image_lock};
if (m_src_snap_id_start != 0 &&
m_src_image_ctx->snap_info.find(m_src_snap_id_start) ==
m_src_image_ctx->snap_info.end()) {
lderr(m_cct) << "invalid start snap_id " << m_src_snap_id_start << dendl;
return -EINVAL;
}
if (m_src_snap_id_end != CEPH_NOSNAP &&
m_src_image_ctx->snap_info.find(m_src_snap_id_end) ==
m_src_image_ctx->snap_info.end()) {
lderr(m_cct) << "invalid end snap_id " << m_src_snap_id_end << dendl;
return -EINVAL;
}
return 0;
}
template <typename I>
void DeepCopyRequest<I>::finish(int r) {
ldout(m_cct, 20) << "r=" << r << dendl;
m_on_finish->complete(r);
put();
}
} // namespace librbd
template class librbd::DeepCopyRequest<librbd::ImageCtx>;
| 9,954 | 26.5 | 81 | cc |
null | ceph-main/src/librbd/DeepCopyRequest.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_DEEP_COPY_REQUEST_H
#define CEPH_LIBRBD_DEEP_COPY_REQUEST_H
#include "common/ceph_mutex.h"
#include "common/RefCountedObj.h"
#include "include/int_types.h"
#include "librbd/ImageCtx.h"
#include "librbd/Types.h"
#include "librbd/deep_copy/Types.h"
#include <map>
#include <vector>
class Context;
namespace librbd {
class ImageCtx;
namespace asio { struct ContextWQ; }
namespace deep_copy {
template <typename> class ImageCopyRequest;
template <typename> class SnapshotCopyRequest;
struct Handler;
}
template <typename ImageCtxT = ImageCtx>
class DeepCopyRequest : public RefCountedObject {
public:
static DeepCopyRequest* create(ImageCtxT *src_image_ctx,
ImageCtxT *dst_image_ctx,
librados::snap_t src_snap_id_start,
librados::snap_t src_snap_id_end,
librados::snap_t dst_snap_id_start,
bool flatten,
const deep_copy::ObjectNumber &object_number,
asio::ContextWQ *work_queue,
SnapSeqs *snap_seqs,
deep_copy::Handler *handler,
Context *on_finish) {
return new DeepCopyRequest(src_image_ctx, dst_image_ctx, src_snap_id_start,
src_snap_id_end, dst_snap_id_start, flatten,
object_number, work_queue, snap_seqs, handler,
on_finish);
}
DeepCopyRequest(ImageCtxT *src_image_ctx, ImageCtxT *dst_image_ctx,
librados::snap_t src_snap_id_start,
librados::snap_t src_snap_id_end,
librados::snap_t dst_snap_id_start,
bool flatten, const deep_copy::ObjectNumber &object_number,
asio::ContextWQ *work_queue, SnapSeqs *snap_seqs,
deep_copy::Handler *handler, Context *on_finish);
~DeepCopyRequest();
void send();
void cancel();
private:
/**
* @verbatim
*
* <start>
* |
* v
* COPY_SNAPSHOTS
* |
* v
* COPY_IMAGE . . . . . . . . . . . . . .
* | .
* v .
* COPY_OBJECT_MAP (skip if object .
* | map disabled) .
* v .
* REFRESH_OBJECT_MAP (skip if object . (image copy canceled)
* | map disabled) .
* v .
* COPY_METADATA .
* | .
* v .
* <finish> < . . . . . . . . . . . . . .
*
* @endverbatim
*/
typedef std::vector<librados::snap_t> SnapIds;
typedef std::map<librados::snap_t, SnapIds> SnapMap;
ImageCtxT *m_src_image_ctx;
ImageCtxT *m_dst_image_ctx;
librados::snap_t m_src_snap_id_start;
librados::snap_t m_src_snap_id_end;
librados::snap_t m_dst_snap_id_start;
bool m_flatten;
deep_copy::ObjectNumber m_object_number;
asio::ContextWQ *m_work_queue;
SnapSeqs *m_snap_seqs;
deep_copy::Handler *m_handler;
Context *m_on_finish;
CephContext *m_cct;
ceph::mutex m_lock;
bool m_canceled = false;
deep_copy::SnapshotCopyRequest<ImageCtxT> *m_snapshot_copy_request = nullptr;
deep_copy::ImageCopyRequest<ImageCtxT> *m_image_copy_request = nullptr;
decltype(ImageCtxT::object_map) m_object_map = nullptr;
void send_copy_snapshots();
void handle_copy_snapshots(int r);
void send_copy_image();
void handle_copy_image(int r);
void send_copy_object_map();
void handle_copy_object_map(int r);
void send_refresh_object_map();
void handle_refresh_object_map(int r);
void send_copy_metadata();
void handle_copy_metadata(int r);
int validate_copy_points();
void finish(int r);
};
} // namespace librbd
extern template class librbd::DeepCopyRequest<librbd::ImageCtx>;
#endif // CEPH_LIBRBD_DEEP_COPY_REQUEST_H
| 4,194 | 29.179856 | 79 | h |
null | ceph-main/src/librbd/ExclusiveLock.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include "librbd/cache/Utils.h"
#include "librbd/ExclusiveLock.h"
#include "librbd/ImageCtx.h"
#include "librbd/ImageWatcher.h"
#include "librbd/ImageState.h"
#include "librbd/exclusive_lock/ImageDispatch.h"
#include "librbd/exclusive_lock/PreAcquireRequest.h"
#include "librbd/exclusive_lock/PostAcquireRequest.h"
#include "librbd/exclusive_lock/PreReleaseRequest.h"
#include "librbd/io/ImageDispatcherInterface.h"
#include "librbd/Utils.h"
#include "librbd/asio/ContextWQ.h"
#include "common/ceph_mutex.h"
#include "common/dout.h"
#define dout_subsys ceph_subsys_rbd
#undef dout_prefix
#define dout_prefix *_dout << "librbd::ExclusiveLock: " << this << " " \
<< __func__
namespace librbd {
using namespace exclusive_lock;
using librbd::util::create_context_callback;
template <typename I>
using ML = ManagedLock<I>;
template <typename I>
ExclusiveLock<I>::ExclusiveLock(I &image_ctx)
: RefCountedObject(image_ctx.cct),
ML<I>(image_ctx.md_ctx, *image_ctx.asio_engine, image_ctx.header_oid,
image_ctx.image_watcher, managed_lock::EXCLUSIVE,
image_ctx.config.template get_val<bool>("rbd_blocklist_on_break_lock"),
image_ctx.config.template get_val<uint64_t>("rbd_blocklist_expire_seconds")),
m_image_ctx(image_ctx) {
std::lock_guard locker{ML<I>::m_lock};
ML<I>::set_state_uninitialized();
}
template <typename I>
bool ExclusiveLock<I>::accept_request(OperationRequestType request_type,
int *ret_val) const {
std::lock_guard locker{ML<I>::m_lock};
bool accept_request =
(!ML<I>::is_state_shutdown() && ML<I>::is_state_locked() &&
(m_request_blocked_count == 0 ||
m_image_ctx.get_exclusive_lock_policy()->accept_blocked_request(
request_type)));
if (ret_val != nullptr) {
*ret_val = accept_request ? 0 : m_request_blocked_ret_val;
}
ldout(m_image_ctx.cct, 20) << "=" << accept_request << " (request_type="
<< request_type << ")" << dendl;
return accept_request;
}
template <typename I>
bool ExclusiveLock<I>::accept_ops() const {
std::lock_guard locker{ML<I>::m_lock};
bool accept = accept_ops(ML<I>::m_lock);
ldout(m_image_ctx.cct, 20) << "=" << accept << dendl;
return accept;
}
template <typename I>
bool ExclusiveLock<I>::accept_ops(const ceph::mutex &lock) const {
return (!ML<I>::is_state_shutdown() &&
(ML<I>::is_state_locked() || ML<I>::is_state_post_acquiring()));
}
template <typename I>
void ExclusiveLock<I>::set_require_lock(bool init_shutdown,
io::Direction direction,
Context* on_finish) {
m_image_dispatch->set_require_lock(init_shutdown, direction, on_finish);
}
template <typename I>
void ExclusiveLock<I>::unset_require_lock(io::Direction direction) {
m_image_dispatch->unset_require_lock(direction);
}
template <typename I>
void ExclusiveLock<I>::block_requests(int r) {
std::lock_guard locker{ML<I>::m_lock};
m_request_blocked_count++;
if (m_request_blocked_ret_val == 0) {
m_request_blocked_ret_val = r;
}
ldout(m_image_ctx.cct, 20) << ": r=" << r << dendl;
}
template <typename I>
void ExclusiveLock<I>::unblock_requests() {
std::lock_guard locker{ML<I>::m_lock};
ceph_assert(m_request_blocked_count > 0);
m_request_blocked_count--;
if (m_request_blocked_count == 0) {
m_request_blocked_ret_val = 0;
}
ldout(m_image_ctx.cct, 20) << dendl;
}
template <typename I>
int ExclusiveLock<I>::get_unlocked_op_error() const {
if (m_image_ctx.image_watcher->is_blocklisted()) {
return -EBLOCKLISTED;
}
return -EROFS;
}
template <typename I>
void ExclusiveLock<I>::init(uint64_t features, Context *on_init) {
ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
on_init = create_context_callback<Context>(on_init, this);
ldout(m_image_ctx.cct, 10) << ": features=" << features << dendl;
{
std::lock_guard locker{ML<I>::m_lock};
ML<I>::set_state_initializing();
}
m_image_dispatch = exclusive_lock::ImageDispatch<I>::create(&m_image_ctx);
m_image_ctx.io_image_dispatcher->register_dispatch(m_image_dispatch);
on_init = new LambdaContext([this, on_init](int r) {
{
std::lock_guard locker{ML<I>::m_lock};
ML<I>::set_state_unlocked();
}
on_init->complete(r);
});
bool pwl_enabled = cache::util::is_pwl_enabled(m_image_ctx);
if (m_image_ctx.clone_copy_on_read ||
(features & RBD_FEATURE_JOURNALING) != 0 ||
pwl_enabled) {
m_image_dispatch->set_require_lock(true, io::DIRECTION_BOTH, on_init);
} else {
m_image_dispatch->set_require_lock(true, io::DIRECTION_WRITE, on_init);
}
}
template <typename I>
void ExclusiveLock<I>::shut_down(Context *on_shut_down) {
ldout(m_image_ctx.cct, 10) << dendl;
auto ref = ceph::ref_t<ExclusiveLock<I>>(this);
on_shut_down = create_context_callback<Context>(on_shut_down, this);
ML<I>::shut_down(on_shut_down);
// if stalled in request state machine -- abort
handle_peer_notification(0);
}
template <typename I>
void ExclusiveLock<I>::handle_peer_notification(int r) {
std::lock_guard locker{ML<I>::m_lock};
if (!ML<I>::is_state_waiting_for_lock()) {
return;
}
ldout(m_image_ctx.cct, 10) << dendl;
ceph_assert(ML<I>::is_action_acquire_lock());
m_acquire_lock_peer_ret_val = r;
ML<I>::execute_next_action();
}
template <typename I>
Context *ExclusiveLock<I>::start_op(int* ret_val) {
ceph_assert(ceph_mutex_is_locked(m_image_ctx.owner_lock));
std::lock_guard locker{ML<I>::m_lock};
if (!accept_ops(ML<I>::m_lock)) {
*ret_val = get_unlocked_op_error();
return nullptr;
}
m_async_op_tracker.start_op();
return new LambdaContext([this](int r) {
m_async_op_tracker.finish_op();
});
}
template <typename I>
void ExclusiveLock<I>::shutdown_handler(int r, Context *on_finish) {
ldout(m_image_ctx.cct, 10) << dendl;
{
std::unique_lock owner_locker{m_image_ctx.owner_lock};
m_image_ctx.exclusive_lock = nullptr;
}
on_finish = new LambdaContext([this, on_finish](int r) {
m_image_dispatch = nullptr;
m_image_ctx.image_watcher->flush(on_finish);
});
m_image_ctx.io_image_dispatcher->shut_down_dispatch(
m_image_dispatch->get_dispatch_layer(), on_finish);
}
template <typename I>
void ExclusiveLock<I>::pre_acquire_lock_handler(Context *on_finish) {
ldout(m_image_ctx.cct, 10) << dendl;
int acquire_lock_peer_ret_val = 0;
{
std::lock_guard locker{ML<I>::m_lock};
std::swap(acquire_lock_peer_ret_val, m_acquire_lock_peer_ret_val);
}
if (acquire_lock_peer_ret_val == -EROFS) {
ldout(m_image_ctx.cct, 10) << ": peer nacked lock request" << dendl;
on_finish->complete(acquire_lock_peer_ret_val);
return;
}
PreAcquireRequest<I> *req = PreAcquireRequest<I>::create(m_image_ctx,
on_finish);
m_image_ctx.op_work_queue->queue(new LambdaContext([req](int r) {
req->send();
}));
}
template <typename I>
void ExclusiveLock<I>::post_acquire_lock_handler(int r, Context *on_finish) {
ldout(m_image_ctx.cct, 10) << ": r=" << r << dendl;
if (r == -EROFS) {
// peer refused to release the exclusive lock
on_finish->complete(r);
return;
} else if (r < 0) {
ML<I>::m_lock.lock();
ceph_assert(ML<I>::is_state_acquiring());
// PostAcquire state machine will not run, so we need complete prepare
m_image_ctx.state->handle_prepare_lock_complete();
// if lock is in-use by another client, request the lock
if (ML<I>::is_action_acquire_lock() && (r == -EBUSY || r == -EAGAIN)) {
ML<I>::set_state_waiting_for_lock();
ML<I>::m_lock.unlock();
// request the lock from a peer
m_image_ctx.image_watcher->notify_request_lock();
// inform manage lock that we have interrupted the state machine
r = -ECANCELED;
} else {
ML<I>::m_lock.unlock();
// clear error if peer owns lock
if (r == -EAGAIN) {
r = 0;
}
}
on_finish->complete(r);
return;
}
std::lock_guard locker{ML<I>::m_lock};
m_pre_post_callback = on_finish;
using EL = ExclusiveLock<I>;
PostAcquireRequest<I> *req = PostAcquireRequest<I>::create(m_image_ctx,
util::create_context_callback<EL, &EL::handle_post_acquiring_lock>(this),
util::create_context_callback<EL, &EL::handle_post_acquired_lock>(this));
m_image_ctx.op_work_queue->queue(new LambdaContext([req](int r) {
req->send();
}));
}
template <typename I>
void ExclusiveLock<I>::handle_post_acquiring_lock(int r) {
ldout(m_image_ctx.cct, 10) << dendl;
std::lock_guard locker{ML<I>::m_lock};
ceph_assert(r == 0);
// lock is owned at this point
ML<I>::set_state_post_acquiring();
}
template <typename I>
void ExclusiveLock<I>::handle_post_acquired_lock(int r) {
ldout(m_image_ctx.cct, 10) << ": r=" << r << dendl;
Context *on_finish = nullptr;
{
std::lock_guard locker{ML<I>::m_lock};
ceph_assert(ML<I>::is_state_acquiring() ||
ML<I>::is_state_post_acquiring());
assert (m_pre_post_callback != nullptr);
std::swap(m_pre_post_callback, on_finish);
}
if (r < 0) {
on_finish->complete(r);
return;
}
m_image_ctx.perfcounter->tset(l_librbd_lock_acquired_time,
ceph_clock_now());
m_image_ctx.image_watcher->notify_acquired_lock();
m_image_dispatch->unset_require_lock(io::DIRECTION_BOTH);
on_finish->complete(0);
}
template <typename I>
void ExclusiveLock<I>::pre_release_lock_handler(bool shutting_down,
Context *on_finish) {
ldout(m_image_ctx.cct, 10) << dendl;
std::lock_guard locker{ML<I>::m_lock};
auto req = PreReleaseRequest<I>::create(
m_image_ctx, m_image_dispatch, shutting_down, m_async_op_tracker,
on_finish);
m_image_ctx.op_work_queue->queue(new LambdaContext([req](int r) {
req->send();
}));
}
template <typename I>
void ExclusiveLock<I>::post_release_lock_handler(bool shutting_down, int r,
Context *on_finish) {
ldout(m_image_ctx.cct, 10) << ": r=" << r << " shutting_down="
<< shutting_down << dendl;
if (!shutting_down) {
{
std::lock_guard locker{ML<I>::m_lock};
ceph_assert(ML<I>::is_state_pre_releasing() ||
ML<I>::is_state_releasing());
}
if (r >= 0) {
m_image_ctx.image_watcher->notify_released_lock();
}
on_finish->complete(r);
} else {
{
std::unique_lock owner_locker{m_image_ctx.owner_lock};
m_image_ctx.exclusive_lock = nullptr;
}
on_finish = new LambdaContext([this, r, on_finish](int) {
m_image_dispatch = nullptr;
m_image_ctx.image_watcher->notify_released_lock();
on_finish->complete(r);
});
m_image_ctx.io_image_dispatcher->shut_down_dispatch(
m_image_dispatch->get_dispatch_layer(), on_finish);
}
}
template <typename I>
void ExclusiveLock<I>::post_reacquire_lock_handler(int r, Context *on_finish) {
ldout(m_image_ctx.cct, 10) << dendl;
if (r >= 0) {
m_image_ctx.image_watcher->notify_acquired_lock();
}
on_finish->complete(r);
}
} // namespace librbd
template class librbd::ExclusiveLock<librbd::ImageCtx>;
| 11,476 | 28.503856 | 87 | cc |
null | ceph-main/src/librbd/ExclusiveLock.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#ifndef CEPH_LIBRBD_EXCLUSIVE_LOCK_H
#define CEPH_LIBRBD_EXCLUSIVE_LOCK_H
#include "common/AsyncOpTracker.h"
#include "librbd/ManagedLock.h"
#include "librbd/exclusive_lock/Policy.h"
#include "librbd/io/Types.h"
#include "common/RefCountedObj.h"
struct Context;
namespace librbd {
namespace exclusive_lock { template <typename> struct ImageDispatch; }
template <typename ImageCtxT = ImageCtx>
class ExclusiveLock : public RefCountedObject,
public ManagedLock<ImageCtxT> {
public:
static ExclusiveLock *create(ImageCtxT &image_ctx) {
return new ExclusiveLock<ImageCtxT>(image_ctx);
}
ExclusiveLock(ImageCtxT &image_ctx);
bool accept_request(exclusive_lock::OperationRequestType request_type,
int *ret_val) const;
bool accept_ops() const;
void set_require_lock(bool init_shutdown, io::Direction direction,
Context* on_finish);
void unset_require_lock(io::Direction direction);
void block_requests(int r);
void unblock_requests();
void init(uint64_t features, Context *on_init);
void shut_down(Context *on_shutdown);
void handle_peer_notification(int r);
int get_unlocked_op_error() const;
Context *start_op(int* ret_val);
protected:
void shutdown_handler(int r, Context *on_finish) override;
void pre_acquire_lock_handler(Context *on_finish) override;
void post_acquire_lock_handler(int r, Context *on_finish) override;
void pre_release_lock_handler(bool shutting_down,
Context *on_finish) override;
void post_release_lock_handler(bool shutting_down, int r,
Context *on_finish) override;
void post_reacquire_lock_handler(int r, Context *on_finish) override;
private:
/**
* @verbatim
*
* <start> * * > WAITING_FOR_REGISTER --------\
* | * (watch not registered) |
* | * |
* | * * > WAITING_FOR_PEER ------------\
* | * (request_lock busy) |
* | * |
* | * * * * * * * * * * * * * * |
* | * |
* v (init) (try_lock/request_lock) * |
* UNINITIALIZED -------> UNLOCKED ------------------------> ACQUIRING <--/
* ^ |
* | v
* RELEASING POST_ACQUIRING
* | |
* | |
* | (release_lock) v
* PRE_RELEASING <------------------------ LOCKED
*
* <LOCKED state>
* |
* v
* REACQUIRING -------------------------------------> <finish>
* . ^
* . |
* . . . > <RELEASE action> ---> <ACQUIRE action> ---/
*
* <UNLOCKED/LOCKED states>
* |
* |
* v
* PRE_SHUTTING_DOWN ---> SHUTTING_DOWN ---> SHUTDOWN ---> <finish>
*
* @endverbatim
*/
ImageCtxT& m_image_ctx;
exclusive_lock::ImageDispatch<ImageCtxT>* m_image_dispatch = nullptr;
Context *m_pre_post_callback = nullptr;
AsyncOpTracker m_async_op_tracker;
uint32_t m_request_blocked_count = 0;
int m_request_blocked_ret_val = 0;
int m_acquire_lock_peer_ret_val = 0;
bool accept_ops(const ceph::mutex &lock) const;
void handle_post_acquiring_lock(int r);
void handle_post_acquired_lock(int r);
};
} // namespace librbd
#endif // CEPH_LIBRBD_EXCLUSIVE_LOCK_H
| 4,124 | 33.957627 | 78 | h |
null | ceph-main/src/librbd/Features.cc | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#include <boost/lexical_cast.hpp>
#include <boost/algorithm/string.hpp>
#include "librbd/Features.h"
#include "include/rbd/features.h"
#include <map>
#include <vector>
static const std::map<std::string, uint64_t> RBD_FEATURE_MAP = {
{RBD_FEATURE_NAME_LAYERING, RBD_FEATURE_LAYERING},
{RBD_FEATURE_NAME_STRIPINGV2, RBD_FEATURE_STRIPINGV2},
{RBD_FEATURE_NAME_EXCLUSIVE_LOCK, RBD_FEATURE_EXCLUSIVE_LOCK},
{RBD_FEATURE_NAME_OBJECT_MAP, RBD_FEATURE_OBJECT_MAP},
{RBD_FEATURE_NAME_FAST_DIFF, RBD_FEATURE_FAST_DIFF},
{RBD_FEATURE_NAME_DEEP_FLATTEN, RBD_FEATURE_DEEP_FLATTEN},
{RBD_FEATURE_NAME_JOURNALING, RBD_FEATURE_JOURNALING},
{RBD_FEATURE_NAME_DATA_POOL, RBD_FEATURE_DATA_POOL},
{RBD_FEATURE_NAME_OPERATIONS, RBD_FEATURE_OPERATIONS},
{RBD_FEATURE_NAME_MIGRATING, RBD_FEATURE_MIGRATING},
{RBD_FEATURE_NAME_NON_PRIMARY, RBD_FEATURE_NON_PRIMARY},
{RBD_FEATURE_NAME_DIRTY_CACHE, RBD_FEATURE_DIRTY_CACHE},
};
static_assert((RBD_FEATURE_DIRTY_CACHE << 1) > RBD_FEATURES_ALL,
"new RBD feature added");
namespace librbd {
std::string rbd_features_to_string(uint64_t features,
std::ostream *err)
{
std::string r;
for (auto& i : RBD_FEATURE_MAP) {
if (features & i.second) {
if (!r.empty()) {
r += ",";
}
r += i.first;
features &= ~i.second;
}
}
if (err && features) {
*err << "ignoring unknown feature mask 0x"
<< std::hex << features << std::dec;
}
return r;
}
uint64_t rbd_features_from_string(const std::string& orig_value,
std::ostream *err)
{
uint64_t features = 0;
std::string value = orig_value;
boost::trim(value);
// empty string means default features
if (!value.size()) {
return RBD_FEATURES_DEFAULT;
}
try {
// numeric?
features = boost::lexical_cast<uint64_t>(value);
// drop unrecognized bits
uint64_t unsupported_features = (features & ~RBD_FEATURES_ALL);
if (unsupported_features != 0ull) {
features &= RBD_FEATURES_ALL;
if (err) {
*err << "ignoring unknown feature mask 0x"
<< std::hex << unsupported_features << std::dec;
}
}
uint64_t ignore_features_mask = (
RBD_FEATURES_INTERNAL | RBD_FEATURES_MUTABLE_INTERNAL);
uint64_t ignored_features = (features & ignore_features_mask);
if (ignored_features != 0ULL) {
features &= ~ignore_features_mask;
if (err) {
*err << "ignoring feature mask 0x" << std::hex << ignored_features;
}
}
} catch (boost::bad_lexical_cast&) {
// feature name list?
bool errors = false;
std::vector<std::string> feature_names;
boost::split(feature_names, value, boost::is_any_of(","));
for (auto feature_name: feature_names) {
boost::trim(feature_name);
auto feature_it = RBD_FEATURE_MAP.find(feature_name);
if (feature_it != RBD_FEATURE_MAP.end()) {
features += feature_it->second;
} else if (err) {
if (errors) {
*err << ", ";
} else {
errors = true;
}
*err << "ignoring unknown feature " << feature_name;
}
}
}
return features;
}
} // namespace librbd
| 3,181 | 27.410714 | 70 | cc |
null | ceph-main/src/librbd/Features.h | // -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab
#pragma once
#include <string>
#include <ostream>
namespace librbd {
std::string rbd_features_to_string(uint64_t features,
std::ostream *err);
uint64_t rbd_features_from_string(const std::string& value,
std::ostream *err);
} // librbd
| 359 | 20.176471 | 70 | h |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.